root/net/inet/ip.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. ip_ioctl
  2. strict_route
  3. loose_route
  4. ip_route_check
  5. build_options
  6. ip_send
  7. ip_build_header
  8. do_options
  9. ip_compute_csum
  10. ip_send_check
  11. ip_frag_create
  12. ip_find
  13. ip_free
  14. ip_expire
  15. ip_create
  16. ip_done
  17. ip_glue
  18. ip_defrag
  19. ip_fragment
  20. ip_forward
  21. ip_rcv
  22. ip_loopback
  23. ip_queue_xmit
  24. ip_mc_procinfo
  25. ip_setsockopt
  26. ip_getsockopt
  27. ip_rt_event
  28. ip_init

   1 /*
   2  * INET         An implementation of the TCP/IP protocol suite for the LINUX
   3  *              operating system.  INET is implemented using the  BSD Socket
   4  *              interface as the means of communication with the user level.
   5  *
   6  *              The Internet Protocol (IP) module.
   7  *
   8  * Version:     @(#)ip.c        1.0.16b 9/1/93
   9  *
  10  * Authors:     Ross Biro, <bir7@leland.Stanford.Edu>
  11  *              Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  12  *              Donald Becker, <becker@super.org>
  13  *              Alan Cox, <gw4pts@gw4pts.ampr.org>
  14  *              Richard Underwood
  15  *              Stefan Becker, <stefanb@yello.ping.de>
  16  *              
  17  *
  18  * Fixes:
  19  *              Alan Cox        :       Commented a couple of minor bits of surplus code
  20  *              Alan Cox        :       Undefining IP_FORWARD doesn't include the code
  21  *                                      (just stops a compiler warning).
  22  *              Alan Cox        :       Frames with >=MAX_ROUTE record routes, strict routes or loose routes
  23  *                                      are junked rather than corrupting things.
  24  *              Alan Cox        :       Frames to bad broadcast subnets are dumped
  25  *                                      We used to process them non broadcast and
  26  *                                      boy could that cause havoc.
  27  *              Alan Cox        :       ip_forward sets the free flag on the
  28  *                                      new frame it queues. Still crap because
  29  *                                      it copies the frame but at least it
  30  *                                      doesn't eat memory too.
  31  *              Alan Cox        :       Generic queue code and memory fixes.
  32  *              Fred Van Kempen :       IP fragment support (borrowed from NET2E)
  33  *              Gerhard Koerting:       Forward fragmented frames correctly.
  34  *              Gerhard Koerting:       Fixes to my fix of the above 8-).
  35  *              Gerhard Koerting:       IP interface addressing fix.
  36  *              Linus Torvalds  :       More robustness checks
  37  *              Alan Cox        :       Even more checks: Still not as robust as it ought to be
  38  *              Alan Cox        :       Save IP header pointer for later
  39  *              Alan Cox        :       ip option setting
  40  *              Alan Cox        :       Use ip_tos/ip_ttl settings
  41  *              Alan Cox        :       Fragmentation bogosity removed
  42  *                                      (Thanks to Mark.Bush@prg.ox.ac.uk)
  43  *              Dmitry Gorodchanin :    Send of a raw packet crash fix.
  44  *              Alan Cox        :       Silly ip bug when an overlength
  45  *                                      fragment turns up. Now frees the
  46  *                                      queue.
  47  *              Linus Torvalds/ :       Memory leakage on fragmentation
  48  *              Alan Cox        :       handling.
  49  *              Gerhard Koerting:       Forwarding uses IP priority hints
  50  *              Teemu Rantanen  :       Fragment problems.
  51  *              Alan Cox        :       General cleanup, comments and reformat
  52  *              Alan Cox        :       SNMP statistics
  53  *              Alan Cox        :       BSD address rule semantics. Also see
  54  *                                      UDP as there is a nasty checksum issue
  55  *                                      if you do things the wrong way.
  56  *              Alan Cox        :       Always defrag, moved IP_FORWARD to the config.in file
  57  *              Alan Cox        :       IP options adjust sk->priority.
  58  *              Pedro Roque     :       Fix mtu/length error in ip_forward.
  59  *              Alan Cox        :       Avoid ip_chk_addr when possible.
  60  *      Richard Underwood       :       IP multicasting.
  61  *              Alan Cox        :       Cleaned up multicast handlers.
  62  *              Alan Cox        :       RAW sockets demultiplex in the BSD style.
  63  *              Gunther Mayer   :       Fix the SNMP reporting typo
  64  *              Alan Cox        :       Always in group 224.0.0.1
  65  *              Alan Cox        :       Multicast loopback error for 224.0.0.1
  66  *              Alan Cox        :       IP_MULTICAST_LOOP option.
  67  *              Alan Cox        :       Use notifiers.
  68  *              Bjorn Ekwall    :       Removed ip_csum (from slhc.c too)
  69  *              Bjorn Ekwall    :       Moved ip_fast_csum to ip.h (inline!)
  70  *              Stefan Becker   :       Send out ICMP HOST REDIRECT
  71  *  
  72  *
  73  * To Fix:
  74  *              IP option processing is mostly not needed. ip_forward needs to know about routing rules
  75  *              and time stamp but that's about all. Use the route mtu field here too
  76  *              IP fragmentation wants rewriting cleanly. The RFC815 algorithm is much more efficient
  77  *              and could be made very efficient with the addition of some virtual memory hacks to permit
  78  *              the allocation of a buffer that can then be 'grown' by twiddling page tables.
  79  *              Output fragmentation wants updating along with the buffer management to use a single 
  80  *              interleaved copy algorithm so that fragmenting has a one copy overhead. Actual packet
  81  *              output should probably do its own fragmentation at the UDP/RAW layer. TCP shouldn't cause
  82  *              fragmentation anyway.
  83  *
  84  *              This program is free software; you can redistribute it and/or
  85  *              modify it under the terms of the GNU General Public License
  86  *              as published by the Free Software Foundation; either version
  87  *              2 of the License, or (at your option) any later version.
  88  */
  89 
  90 #include <asm/segment.h>
  91 #include <asm/system.h>
  92 #include <linux/types.h>
  93 #include <linux/kernel.h>
  94 #include <linux/sched.h>
  95 #include <linux/mm.h>
  96 #include <linux/string.h>
  97 #include <linux/errno.h>
  98 #include <linux/config.h>
  99 
 100 #include <linux/socket.h>
 101 #include <linux/sockios.h>
 102 #include <linux/in.h>
 103 #include <linux/inet.h>
 104 #include <linux/netdevice.h>
 105 #include <linux/etherdevice.h>
 106 
 107 #include "snmp.h"
 108 #include "ip.h"
 109 #include "protocol.h"
 110 #include "route.h"
 111 #include "tcp.h"
 112 #include <linux/skbuff.h>
 113 #include "sock.h"
 114 #include "arp.h"
 115 #include "icmp.h"
 116 #include "raw.h"
 117 #include <linux/igmp.h>
 118 #include <linux/ip_fw.h>
 119 
 120 #define CONFIG_IP_DEFRAG
 121 
 122 extern int last_retran;
 123 extern void sort_send(struct sock *sk);
 124 
 125 #define min(a,b)        ((a)<(b)?(a):(b))
 126 #define LOOPBACK(x)     (((x) & htonl(0xff000000)) == htonl(0x7f000000))
 127 
 128 /*
 129  *      SNMP management statistics
 130  */
 131 
 132 #ifdef CONFIG_IP_FORWARD
 133 struct ip_mib ip_statistics={1,64,};    /* Forwarding=Yes, Default TTL=64 */
 134 #else
 135 struct ip_mib ip_statistics={0,64,};    /* Forwarding=No, Default TTL=64 */
 136 #endif
 137 
 138 /*
 139  *      Handle the issuing of an ioctl() request
 140  *      for the ip device. This is scheduled to
 141  *      disappear
 142  */
 143 
 144 int ip_ioctl(struct sock *sk, int cmd, unsigned long arg)
     /* [previous][next][first][last][top][bottom][index][help] */
 145 {
 146         switch(cmd)
 147         {
 148                 default:
 149                         return(-EINVAL);
 150         }
 151 }
 152 
 153 
 154 /* these two routines will do routing. */
 155 
 156 static void
 157 strict_route(struct iphdr *iph, struct options *opt)
     /* [previous][next][first][last][top][bottom][index][help] */
 158 {
 159 }
 160 
 161 
 162 static void
 163 loose_route(struct iphdr *iph, struct options *opt)
     /* [previous][next][first][last][top][bottom][index][help] */
 164 {
 165 }
 166 
 167 
 168 
 169 
 170 /* This routine will check to see if we have lost a gateway. */
 171 void
 172 ip_route_check(unsigned long daddr)
     /* [previous][next][first][last][top][bottom][index][help] */
 173 {
 174 }
 175 
 176 
 177 #if 0
 178 /* this routine puts the options at the end of an ip header. */
 179 static int
 180 build_options(struct iphdr *iph, struct options *opt)
     /* [previous][next][first][last][top][bottom][index][help] */
 181 {
 182   unsigned char *ptr;
 183   /* currently we don't support any options. */
 184   ptr = (unsigned char *)(iph+1);
 185   *ptr = 0;
 186   return (4);
 187 }
 188 #endif
 189 
 190 
 191 /*
 192  *      Take an skb, and fill in the MAC header.
 193  */
 194 
 195 static int ip_send(struct sk_buff *skb, unsigned long daddr, int len, struct device *dev, unsigned long saddr)
     /* [previous][next][first][last][top][bottom][index][help] */
 196 {
 197         int mac = 0;
 198 
 199         skb->dev = dev;
 200         skb->arp = 1;
 201         if (dev->hard_header)
 202         {
 203                 /*
 204                  *      Build a hardware header. Source address is our mac, destination unknown
 205                  *      (rebuild header will sort this out)
 206                  */
 207                 mac = dev->hard_header(skb->data, dev, ETH_P_IP, NULL, NULL, len, skb);
 208                 if (mac < 0)
 209                 {
 210                         mac = -mac;
 211                         skb->arp = 0;
 212                         skb->raddr = daddr;     /* next routing address */
 213                 }
 214         }
 215         return mac;
 216 }
 217 
 218 int ip_id_count = 0;
 219 
 220 /*
 221  * This routine builds the appropriate hardware/IP headers for
 222  * the routine.  It assumes that if *dev != NULL then the
 223  * protocol knows what it's doing, otherwise it uses the
 224  * routing/ARP tables to select a device struct.
 225  */
 226 int ip_build_header(struct sk_buff *skb, unsigned long saddr, unsigned long daddr,
     /* [previous][next][first][last][top][bottom][index][help] */
 227                 struct device **dev, int type, struct options *opt, int len, int tos, int ttl)
 228 {
 229         static struct options optmem;
 230         struct iphdr *iph;
 231         struct rtable *rt;
 232         unsigned char *buff;
 233         unsigned long raddr;
 234         int tmp;
 235         unsigned long src;
 236 
 237         buff = skb->data;
 238 
 239         /*
 240          *      See if we need to look up the device.
 241          */
 242 
 243 #ifdef CONFIG_INET_MULTICAST    
 244         if(MULTICAST(daddr) && *dev==NULL && skb->sk && *skb->sk->ip_mc_name)
 245                 *dev=dev_get(skb->sk->ip_mc_name);
 246 #endif
 247         if (*dev == NULL)
 248         {
 249                 if(skb->localroute)
 250                         rt = ip_rt_local(daddr, &optmem, &src);
 251                 else
 252                         rt = ip_rt_route(daddr, &optmem, &src);
 253                 if (rt == NULL)
 254                 {
 255                         ip_statistics.IpOutNoRoutes++;
 256                         return(-ENETUNREACH);
 257                 }
 258 
 259                 *dev = rt->rt_dev;
 260                 /*
 261                  *      If the frame is from us and going off machine it MUST MUST MUST
 262                  *      have the output device ip address and never the loopback
 263                  */
 264                 if (LOOPBACK(saddr) && !LOOPBACK(daddr))
 265                         saddr = src;/*rt->rt_dev->pa_addr;*/
 266                 raddr = rt->rt_gateway;
 267 
 268                 opt = &optmem;
 269         }
 270         else
 271         {
 272                 /*
 273                  *      We still need the address of the first hop.
 274                  */
 275                 if(skb->localroute)
 276                         rt = ip_rt_local(daddr, &optmem, &src);
 277                 else
 278                         rt = ip_rt_route(daddr, &optmem, &src);
 279                 /*
 280                  *      If the frame is from us and going off machine it MUST MUST MUST
 281                  *      have the output device ip address and never the loopback
 282                  */
 283                 if (LOOPBACK(saddr) && !LOOPBACK(daddr))
 284                         saddr = src;/*rt->rt_dev->pa_addr;*/
 285 
 286                 raddr = (rt == NULL) ? 0 : rt->rt_gateway;
 287         }
 288 
 289         /*
 290          *      No source addr so make it our addr
 291          */
 292         if (saddr == 0)
 293                 saddr = src;
 294 
 295         /*
 296          *      No gateway so aim at the real destination
 297          */
 298         if (raddr == 0)
 299                 raddr = daddr;
 300 
 301         /*
 302          *      Now build the MAC header.
 303          */
 304 
 305         tmp = ip_send(skb, raddr, len, *dev, saddr);
 306         buff += tmp;
 307         len -= tmp;
 308 
 309         /*
 310          *      Book keeping
 311          */
 312 
 313         skb->dev = *dev;
 314         skb->saddr = saddr;
 315         if (skb->sk)
 316                 skb->sk->saddr = saddr;
 317 
 318         /*
 319          *      Now build the IP header.
 320          */
 321 
 322         /*
 323          *      If we are using IPPROTO_RAW, then we don't need an IP header, since
 324          *      one is being supplied to us by the user
 325          */
 326 
 327         if(type == IPPROTO_RAW)
 328                 return (tmp);
 329 
 330         iph = (struct iphdr *)buff;
 331         iph->version  = 4;
 332         iph->tos      = tos;
 333         iph->frag_off = 0;
 334         iph->ttl      = ttl;
 335         iph->daddr    = daddr;
 336         iph->saddr    = saddr;
 337         iph->protocol = type;
 338         iph->ihl      = 5;
 339         skb->ip_hdr   = iph;
 340 
 341         /* Setup the IP options. */
 342 #ifdef Not_Yet_Avail
 343         build_options(iph, opt);
 344 #endif
 345 
 346         return(20 + tmp);       /* IP header plus MAC header size */
 347 }
 348 
 349 
 350 static int
 351 do_options(struct iphdr *iph, struct options *opt)
     /* [previous][next][first][last][top][bottom][index][help] */
 352 {
 353   unsigned char *buff;
 354   int done = 0;
 355   int i, len = sizeof(struct iphdr);
 356 
 357   /* Zero out the options. */
 358   opt->record_route.route_size = 0;
 359   opt->loose_route.route_size  = 0;
 360   opt->strict_route.route_size = 0;
 361   opt->tstamp.ptr              = 0;
 362   opt->security                = 0;
 363   opt->compartment             = 0;
 364   opt->handling                = 0;
 365   opt->stream                  = 0;
 366   opt->tcc                     = 0;
 367   return(0);
 368 
 369   /* Advance the pointer to start at the options. */
 370   buff = (unsigned char *)(iph + 1);
 371 
 372   /* Now start the processing. */
 373   while (!done && len < iph->ihl*4) switch(*buff) {
 374         case IPOPT_END:
 375                 done = 1;
 376                 break;
 377         case IPOPT_NOOP:
 378                 buff++;
 379                 len++;
 380                 break;
 381         case IPOPT_SEC:
 382                 buff++;
 383                 if (*buff != 11) return(1);
 384                 buff++;
 385                 opt->security = ntohs(*(unsigned short *)buff);
 386                 buff += 2;
 387                 opt->compartment = ntohs(*(unsigned short *)buff);
 388                 buff += 2;
 389                 opt->handling = ntohs(*(unsigned short *)buff);
 390                 buff += 2;
 391                 opt->tcc = ((*buff) << 16) + ntohs(*(unsigned short *)(buff+1));
 392                 buff += 3;
 393                 len += 11;
 394                 break;
 395         case IPOPT_LSRR:
 396                 buff++;
 397                 if ((*buff - 3)% 4 != 0) return(1);
 398                 len += *buff;
 399                 opt->loose_route.route_size = (*buff -3)/4;
 400                 buff++;
 401                 if (*buff % 4 != 0) return(1);
 402                 opt->loose_route.pointer = *buff/4 - 1;
 403                 buff++;
 404                 buff++;
 405                 for (i = 0; i < opt->loose_route.route_size; i++) {
 406                         if(i>=MAX_ROUTE)
 407                                 return(1);
 408                         opt->loose_route.route[i] = *(unsigned long *)buff;
 409                         buff += 4;
 410                 }
 411                 break;
 412         case IPOPT_SSRR:
 413                 buff++;
 414                 if ((*buff - 3)% 4 != 0) return(1);
 415                 len += *buff;
 416                 opt->strict_route.route_size = (*buff -3)/4;
 417                 buff++;
 418                 if (*buff % 4 != 0) return(1);
 419                 opt->strict_route.pointer = *buff/4 - 1;
 420                 buff++;
 421                 buff++;
 422                 for (i = 0; i < opt->strict_route.route_size; i++) {
 423                         if(i>=MAX_ROUTE)
 424                                 return(1);
 425                         opt->strict_route.route[i] = *(unsigned long *)buff;
 426                         buff += 4;
 427                 }
 428                 break;
 429         case IPOPT_RR:
 430                 buff++;
 431                 if ((*buff - 3)% 4 != 0) return(1);
 432                 len += *buff;
 433                 opt->record_route.route_size = (*buff -3)/4;
 434                 buff++;
 435                 if (*buff % 4 != 0) return(1);
 436                 opt->record_route.pointer = *buff/4 - 1;
 437                 buff++;
 438                 buff++;
 439                 for (i = 0; i < opt->record_route.route_size; i++) {
 440                         if(i>=MAX_ROUTE)
 441                                 return 1;
 442                         opt->record_route.route[i] = *(unsigned long *)buff;
 443                         buff += 4;
 444                 }
 445                 break;
 446         case IPOPT_SID:
 447                 len += 4;
 448                 buff +=2;
 449                 opt->stream = *(unsigned short *)buff;
 450                 buff += 2;
 451                 break;
 452         case IPOPT_TIMESTAMP:
 453                 buff++;
 454                 len += *buff;
 455                 if (*buff % 4 != 0) return(1);
 456                 opt->tstamp.len = *buff / 4 - 1;
 457                 buff++;
 458                 if ((*buff - 1) % 4 != 0) return(1);
 459                 opt->tstamp.ptr = (*buff-1)/4;
 460                 buff++;
 461                 opt->tstamp.x.full_char = *buff;
 462                 buff++;
 463                 for (i = 0; i < opt->tstamp.len; i++) {
 464                         opt->tstamp.data[i] = *(unsigned long *)buff;
 465                         buff += 4;
 466                 }
 467                 break;
 468         default:
 469                 return(1);
 470   }
 471 
 472   if (opt->record_route.route_size == 0) {
 473         if (opt->strict_route.route_size != 0) {
 474                 memcpy(&(opt->record_route), &(opt->strict_route),
 475                                              sizeof(opt->record_route));
 476         } else if (opt->loose_route.route_size != 0) {
 477                 memcpy(&(opt->record_route), &(opt->loose_route),
 478                                              sizeof(opt->record_route));
 479         }
 480   }
 481 
 482   if (opt->strict_route.route_size != 0 &&
 483       opt->strict_route.route_size != opt->strict_route.pointer) {
 484         strict_route(iph, opt);
 485         return(0);
 486   }
 487 
 488   if (opt->loose_route.route_size != 0 &&
 489       opt->loose_route.route_size != opt->loose_route.pointer) {
 490         loose_route(iph, opt);
 491         return(0);
 492   }
 493 
 494   return(0);
 495 }
 496 
 497 /*
 498  * This routine does all the checksum computations that don't
 499  * require anything special (like copying or special headers).
 500  */
 501 
 502 unsigned short ip_compute_csum(unsigned char * buff, int len)
     /* [previous][next][first][last][top][bottom][index][help] */
 503 {
 504         unsigned long sum = 0;
 505 
 506         /* Do the first multiple of 4 bytes and convert to 16 bits. */
 507         if (len > 3)
 508         {
 509                 __asm__("clc\n"
 510                 "1:\t"
 511                 "lodsl\n\t"
 512                 "adcl %%eax, %%ebx\n\t"
 513                 "loop 1b\n\t"
 514                 "adcl $0, %%ebx\n\t"
 515                 "movl %%ebx, %%eax\n\t"
 516                 "shrl $16, %%eax\n\t"
 517                 "addw %%ax, %%bx\n\t"
 518                 "adcw $0, %%bx"
 519                 : "=b" (sum) , "=S" (buff)
 520                 : "0" (sum), "c" (len >> 2) ,"1" (buff)
 521                 : "ax", "cx", "si", "bx" );
 522         }
 523         if (len & 2)
 524         {
 525                 __asm__("lodsw\n\t"
 526                 "addw %%ax, %%bx\n\t"
 527                 "adcw $0, %%bx"
 528                 : "=b" (sum), "=S" (buff)
 529                 : "0" (sum), "1" (buff)
 530                 : "bx", "ax", "si");
 531         }
 532         if (len & 1)
 533         {
 534                 __asm__("lodsb\n\t"
 535                 "movb $0, %%ah\n\t"
 536                 "addw %%ax, %%bx\n\t"
 537                 "adcw $0, %%bx"
 538                 : "=b" (sum), "=S" (buff)
 539                 : "0" (sum), "1" (buff)
 540                 : "bx", "ax", "si");
 541         }
 542         sum =~sum;
 543         return(sum & 0xffff);
 544 }
 545 
 546 /*
 547  *      Generate a checksum for an outgoing IP datagram.
 548  */
 549 
 550 void ip_send_check(struct iphdr *iph)
     /* [previous][next][first][last][top][bottom][index][help] */
 551 {
 552         iph->check = 0;
 553         iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
 554 }
 555 
 556 /************************ Fragment Handlers From NET2E **********************************/
 557 
 558 
 559 /*
 560  *      This fragment handler is a bit of a heap. On the other hand it works quite
 561  *      happily and handles things quite well.
 562  */
 563 
 564 static struct ipq *ipqueue = NULL;              /* IP fragment queue    */
 565 
 566 /*
 567  *      Create a new fragment entry.
 568  */
 569 
 570 static struct ipfrag *ip_frag_create(int offset, int end, struct sk_buff *skb, unsigned char *ptr)
     /* [previous][next][first][last][top][bottom][index][help] */
 571 {
 572         struct ipfrag *fp;
 573 
 574         fp = (struct ipfrag *) kmalloc(sizeof(struct ipfrag), GFP_ATOMIC);
 575         if (fp == NULL)
 576         {
 577                 printk("IP: frag_create: no memory left !\n");
 578                 return(NULL);
 579         }
 580         memset(fp, 0, sizeof(struct ipfrag));
 581 
 582         /* Fill in the structure. */
 583         fp->offset = offset;
 584         fp->end = end;
 585         fp->len = end - offset;
 586         fp->skb = skb;
 587         fp->ptr = ptr;
 588 
 589         return(fp);
 590 }
 591 
 592 
 593 /*
 594  *      Find the correct entry in the "incomplete datagrams" queue for
 595  *      this IP datagram, and return the queue entry address if found.
 596  */
 597 
 598 static struct ipq *ip_find(struct iphdr *iph)
     /* [previous][next][first][last][top][bottom][index][help] */
 599 {
 600         struct ipq *qp;
 601         struct ipq *qplast;
 602 
 603         cli();
 604         qplast = NULL;
 605         for(qp = ipqueue; qp != NULL; qplast = qp, qp = qp->next)
 606         {
 607                 if (iph->id== qp->iph->id && iph->saddr == qp->iph->saddr &&
 608                         iph->daddr == qp->iph->daddr && iph->protocol == qp->iph->protocol)
 609                 {
 610                         del_timer(&qp->timer);  /* So it doesn't vanish on us. The timer will be reset anyway */
 611                         sti();
 612                         return(qp);
 613                 }
 614         }
 615         sti();
 616         return(NULL);
 617 }
 618 
 619 
 620 /*
 621  *      Remove an entry from the "incomplete datagrams" queue, either
 622  *      because we completed, reassembled and processed it, or because
 623  *      it timed out.
 624  */
 625 
 626 static void ip_free(struct ipq *qp)
     /* [previous][next][first][last][top][bottom][index][help] */
 627 {
 628         struct ipfrag *fp;
 629         struct ipfrag *xp;
 630 
 631         /*
 632          * Stop the timer for this entry.
 633          */
 634 
 635         del_timer(&qp->timer);
 636 
 637         /* Remove this entry from the "incomplete datagrams" queue. */
 638         cli();
 639         if (qp->prev == NULL)
 640         {
 641                 ipqueue = qp->next;
 642                 if (ipqueue != NULL)
 643                         ipqueue->prev = NULL;
 644         }
 645         else
 646         {
 647                 qp->prev->next = qp->next;
 648                 if (qp->next != NULL)
 649                         qp->next->prev = qp->prev;
 650         }
 651 
 652         /* Release all fragment data. */
 653 
 654         fp = qp->fragments;
 655         while (fp != NULL)
 656         {
 657                 xp = fp->next;
 658                 IS_SKB(fp->skb);
 659                 kfree_skb(fp->skb,FREE_READ);
 660                 kfree_s(fp, sizeof(struct ipfrag));
 661                 fp = xp;
 662         }
 663 
 664         /* Release the MAC header. */
 665         kfree_s(qp->mac, qp->maclen);
 666 
 667         /* Release the IP header. */
 668         kfree_s(qp->iph, qp->ihlen + 8);
 669 
 670         /* Finally, release the queue descriptor itself. */
 671         kfree_s(qp, sizeof(struct ipq));
 672         sti();
 673 }
 674 
 675 
 676 /*
 677  *      Oops- a fragment queue timed out.  Kill it and send an ICMP reply.
 678  */
 679 
 680 static void ip_expire(unsigned long arg)
     /* [previous][next][first][last][top][bottom][index][help] */
 681 {
 682         struct ipq *qp;
 683 
 684         qp = (struct ipq *)arg;
 685 
 686         /*
 687          *      Send an ICMP "Fragment Reassembly Timeout" message.
 688          */
 689 
 690         ip_statistics.IpReasmTimeout++;
 691         ip_statistics.IpReasmFails++;   
 692         /* This if is always true... shrug */
 693         if(qp->fragments!=NULL)
 694                 icmp_send(qp->fragments->skb,ICMP_TIME_EXCEEDED,
 695                                 ICMP_EXC_FRAGTIME, 0, qp->dev);
 696 
 697         /*
 698          *      Nuke the fragment queue.
 699          */
 700         ip_free(qp);
 701 }
 702 
 703 
 704 /*
 705  *      Add an entry to the 'ipq' queue for a newly received IP datagram.
 706  *      We will (hopefully :-) receive all other fragments of this datagram
 707  *      in time, so we just create a queue for this datagram, in which we
 708  *      will insert the received fragments at their respective positions.
 709  */
 710 
 711 static struct ipq *ip_create(struct sk_buff *skb, struct iphdr *iph, struct device *dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 712 {
 713         struct ipq *qp;
 714         int maclen;
 715         int ihlen;
 716 
 717         qp = (struct ipq *) kmalloc(sizeof(struct ipq), GFP_ATOMIC);
 718         if (qp == NULL)
 719         {
 720                 printk("IP: create: no memory left !\n");
 721                 return(NULL);
 722                 skb->dev = qp->dev;
 723         }
 724         memset(qp, 0, sizeof(struct ipq));
 725 
 726         /*
 727          *      Allocate memory for the MAC header.
 728          *
 729          *      FIXME: We have a maximum MAC address size limit and define
 730          *      elsewhere. We should use it here and avoid the 3 kmalloc() calls
 731          */
 732 
 733         maclen = ((unsigned long) iph) - ((unsigned long) skb->data);
 734         qp->mac = (unsigned char *) kmalloc(maclen, GFP_ATOMIC);
 735         if (qp->mac == NULL)
 736         {
 737                 printk("IP: create: no memory left !\n");
 738                 kfree_s(qp, sizeof(struct ipq));
 739                 return(NULL);
 740         }
 741 
 742         /*
 743          *      Allocate memory for the IP header (plus 8 octets for ICMP).
 744          */
 745 
 746         ihlen = (iph->ihl * sizeof(unsigned long));
 747         qp->iph = (struct iphdr *) kmalloc(ihlen + 8, GFP_ATOMIC);
 748         if (qp->iph == NULL)
 749         {
 750                 printk("IP: create: no memory left !\n");
 751                 kfree_s(qp->mac, maclen);
 752                 kfree_s(qp, sizeof(struct ipq));
 753                 return(NULL);
 754         }
 755 
 756         /* Fill in the structure. */
 757         memcpy(qp->mac, skb->data, maclen);
 758         memcpy(qp->iph, iph, ihlen + 8);
 759         qp->len = 0;
 760         qp->ihlen = ihlen;
 761         qp->maclen = maclen;
 762         qp->fragments = NULL;
 763         qp->dev = dev;
 764 
 765         /* Start a timer for this entry. */
 766         qp->timer.expires = IP_FRAG_TIME;               /* about 30 seconds     */
 767         qp->timer.data = (unsigned long) qp;            /* pointer to queue     */
 768         qp->timer.function = ip_expire;                 /* expire function      */
 769         add_timer(&qp->timer);
 770 
 771         /* Add this entry to the queue. */
 772         qp->prev = NULL;
 773         cli();
 774         qp->next = ipqueue;
 775         if (qp->next != NULL)
 776                 qp->next->prev = qp;
 777         ipqueue = qp;
 778         sti();
 779         return(qp);
 780 }
 781 
 782 
 783 /*
 784  *      See if a fragment queue is complete.
 785  */
 786 
 787 static int ip_done(struct ipq *qp)
     /* [previous][next][first][last][top][bottom][index][help] */
 788 {
 789         struct ipfrag *fp;
 790         int offset;
 791 
 792         /* Only possible if we received the final fragment. */
 793         if (qp->len == 0)
 794                 return(0);
 795 
 796         /* Check all fragment offsets to see if they connect. */
 797         fp = qp->fragments;
 798         offset = 0;
 799         while (fp != NULL)
 800         {
 801                 if (fp->offset > offset)
 802                         return(0);      /* fragment(s) missing */
 803                 offset = fp->end;
 804                 fp = fp->next;
 805         }
 806 
 807         /* All fragments are present. */
 808         return(1);
 809 }
 810 
 811 
 812 /*
 813  *      Build a new IP datagram from all its fragments.
 814  *
 815  *      FIXME: We copy here because we lack an effective way of handling lists
 816  *      of bits on input. Until the new skb data handling is in I'm not going
 817  *      to touch this with a bargepole. This also causes a 4Kish limit on
 818  *      packet sizes.
 819  */
 820 
 821 static struct sk_buff *ip_glue(struct ipq *qp)
     /* [previous][next][first][last][top][bottom][index][help] */
 822 {
 823         struct sk_buff *skb;
 824         struct iphdr *iph;
 825         struct ipfrag *fp;
 826         unsigned char *ptr;
 827         int count, len;
 828 
 829         /*
 830          *      Allocate a new buffer for the datagram.
 831          */
 832 
 833         len = qp->maclen + qp->ihlen + qp->len;
 834 
 835         if ((skb = alloc_skb(len,GFP_ATOMIC)) == NULL)
 836         {
 837                 ip_statistics.IpReasmFails++;
 838                 printk("IP: queue_glue: no memory for gluing queue 0x%X\n", (int) qp);
 839                 ip_free(qp);
 840                 return(NULL);
 841         }
 842 
 843         /* Fill in the basic details. */
 844         skb->len = (len - qp->maclen);
 845         skb->h.raw = skb->data;
 846         skb->free = 1;
 847 
 848         /* Copy the original MAC and IP headers into the new buffer. */
 849         ptr = (unsigned char *) skb->h.raw;
 850         memcpy(ptr, ((unsigned char *) qp->mac), qp->maclen);
 851         ptr += qp->maclen;
 852         memcpy(ptr, ((unsigned char *) qp->iph), qp->ihlen);
 853         ptr += qp->ihlen;
 854         skb->h.raw += qp->maclen;
 855 
 856         count = 0;
 857 
 858         /* Copy the data portions of all fragments into the new buffer. */
 859         fp = qp->fragments;
 860         while(fp != NULL)
 861         {
 862                 if(count+fp->len > skb->len)
 863                 {
 864                         printk("Invalid fragment list: Fragment over size.\n");
 865                         ip_free(qp);
 866                         kfree_skb(skb,FREE_WRITE);
 867                         ip_statistics.IpReasmFails++;
 868                         return NULL;
 869                 }
 870                 memcpy((ptr + fp->offset), fp->ptr, fp->len);
 871                 count += fp->len;
 872                 fp = fp->next;
 873         }
 874 
 875         /* We glued together all fragments, so remove the queue entry. */
 876         ip_free(qp);
 877 
 878         /* Done with all fragments. Fixup the new IP header. */
 879         iph = skb->h.iph;
 880         iph->frag_off = 0;
 881         iph->tot_len = htons((iph->ihl * sizeof(unsigned long)) + count);
 882         skb->ip_hdr = iph;
 883 
 884         ip_statistics.IpReasmOKs++;
 885         return(skb);
 886 }
 887 
 888 
 889 /*
 890  *      Process an incoming IP datagram fragment.
 891  */
 892 
 893 static struct sk_buff *ip_defrag(struct iphdr *iph, struct sk_buff *skb, struct device *dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 894 {
 895         struct ipfrag *prev, *next;
 896         struct ipfrag *tfp;
 897         struct ipq *qp;
 898         struct sk_buff *skb2;
 899         unsigned char *ptr;
 900         int flags, offset;
 901         int i, ihl, end;
 902 
 903         ip_statistics.IpReasmReqds++;
 904 
 905         /* Find the entry of this IP datagram in the "incomplete datagrams" queue. */
 906         qp = ip_find(iph);
 907 
 908         /* Is this a non-fragmented datagram? */
 909         offset = ntohs(iph->frag_off);
 910         flags = offset & ~IP_OFFSET;
 911         offset &= IP_OFFSET;
 912         if (((flags & IP_MF) == 0) && (offset == 0))
 913         {
 914                 if (qp != NULL)
 915                         ip_free(qp);    /* Huh? How could this exist?? */
 916                 return(skb);
 917         }
 918 
 919         offset <<= 3;           /* offset is in 8-byte chunks */
 920 
 921         /*
 922          * If the queue already existed, keep restarting its timer as long
 923          * as we still are receiving fragments.  Otherwise, create a fresh
 924          * queue entry.
 925          */
 926 
 927         if (qp != NULL)
 928         {
 929                 del_timer(&qp->timer);
 930                 qp->timer.expires = IP_FRAG_TIME;       /* about 30 seconds */
 931                 qp->timer.data = (unsigned long) qp;    /* pointer to queue */
 932                 qp->timer.function = ip_expire;         /* expire function */
 933                 add_timer(&qp->timer);
 934         }
 935         else
 936         {
 937                 /*
 938                  *      If we failed to create it, then discard the frame
 939                  */
 940                 if ((qp = ip_create(skb, iph, dev)) == NULL)
 941                 {
 942                         skb->sk = NULL;
 943                         kfree_skb(skb, FREE_READ);
 944                         ip_statistics.IpReasmFails++;
 945                         return NULL;
 946                 }
 947         }
 948 
 949         /*
 950          *      Determine the position of this fragment.
 951          */
 952 
 953         ihl = (iph->ihl * sizeof(unsigned long));
 954         end = offset + ntohs(iph->tot_len) - ihl;
 955 
 956         /*
 957          *      Point into the IP datagram 'data' part.
 958          */
 959 
 960         ptr = skb->data + dev->hard_header_len + ihl;
 961 
 962         /*
 963          *      Is this the final fragment?
 964          */
 965 
 966         if ((flags & IP_MF) == 0)
 967                 qp->len = end;
 968 
 969         /*
 970          *      Find out which fragments are in front and at the back of us
 971          *      in the chain of fragments so far.  We must know where to put
 972          *      this fragment, right?
 973          */
 974 
 975         prev = NULL;
 976         for(next = qp->fragments; next != NULL; next = next->next)
 977         {
 978                 if (next->offset > offset)
 979                         break;  /* bingo! */
 980                 prev = next;
 981         }
 982 
 983         /*
 984          *      We found where to put this one.
 985          *      Check for overlap with preceding fragment, and, if needed,
 986          *      align things so that any overlaps are eliminated.
 987          */
 988         if (prev != NULL && offset < prev->end)
 989         {
 990                 i = prev->end - offset;
 991                 offset += i;    /* ptr into datagram */
 992                 ptr += i;       /* ptr into fragment data */
 993         }
 994 
 995         /*
 996          * Look for overlap with succeeding segments.
 997          * If we can merge fragments, do it.
 998          */
 999 
1000         for(; next != NULL; next = tfp)
1001         {
1002                 tfp = next->next;
1003                 if (next->offset >= end)
1004                         break;          /* no overlaps at all */
1005 
1006                 i = end - next->offset;                 /* overlap is 'i' bytes */
1007                 next->len -= i;                         /* so reduce size of    */
1008                 next->offset += i;                      /* next fragment        */
1009                 next->ptr += i;
1010 
1011                 /*
1012                  *      If we get a frag size of <= 0, remove it and the packet
1013                  *      that it goes with.
1014                  */
1015                 if (next->len <= 0)
1016                 {
1017                         if (next->prev != NULL)
1018                                 next->prev->next = next->next;
1019                         else
1020                                 qp->fragments = next->next;
1021 
1022                         if (tfp->next != NULL)
1023                                 next->next->prev = next->prev;
1024 
1025                         kfree_skb(next->skb,FREE_READ);
1026                         kfree_s(next, sizeof(struct ipfrag));
1027                 }
1028         }
1029 
1030         /*
1031          *      Insert this fragment in the chain of fragments.
1032          */
1033 
1034         tfp = NULL;
1035         tfp = ip_frag_create(offset, end, skb, ptr);
1036 
1037         /*
1038          *      No memory to save the fragment - so throw the lot
1039          */
1040 
1041         if (!tfp)
1042         {
1043                 skb->sk = NULL;
1044                 kfree_skb(skb, FREE_READ);
1045                 return NULL;
1046         }
1047         tfp->prev = prev;
1048         tfp->next = next;
1049         if (prev != NULL)
1050                 prev->next = tfp;
1051         else
1052                 qp->fragments = tfp;
1053 
1054         if (next != NULL)
1055                 next->prev = tfp;
1056 
1057         /*
1058          *      OK, so we inserted this new fragment into the chain.
1059          *      Check if we now have a full IP datagram which we can
1060          *      bump up to the IP layer...
1061          */
1062 
1063         if (ip_done(qp))
1064         {
1065                 skb2 = ip_glue(qp);             /* glue together the fragments */
1066                 return(skb2);
1067         }
1068         return(NULL);
1069 }
1070 
1071 
1072 /*
1073  *      This IP datagram is too large to be sent in one piece.  Break it up into
1074  *      smaller pieces (each of size equal to the MAC header plus IP header plus
1075  *      a block of the data of the original IP data part) that will yet fit in a
1076  *      single device frame, and queue such a frame for sending by calling the
1077  *      ip_queue_xmit().  Note that this is recursion, and bad things will happen
1078  *      if this function causes a loop...
1079  *
1080  *      Yes this is inefficient, feel free to submit a quicker one.
1081  *
1082  *      **Protocol Violation**
1083  *      We copy all the options to each fragment. !FIXME!
1084  */
1085 void ip_fragment(struct sock *sk, struct sk_buff *skb, struct device *dev, int is_frag)
     /* [previous][next][first][last][top][bottom][index][help] */
1086 {
1087         struct iphdr *iph;
1088         unsigned char *raw;
1089         unsigned char *ptr;
1090         struct sk_buff *skb2;
1091         int left, mtu, hlen, len;
1092         int offset;
1093         unsigned long flags;
1094 
1095         /*
1096          *      Point into the IP datagram header.
1097          */
1098 
1099         raw = skb->data;
1100         iph = (struct iphdr *) (raw + dev->hard_header_len);
1101 
1102         skb->ip_hdr = iph;
1103 
1104         /*
1105          *      Setup starting values.
1106          */
1107 
1108         hlen = (iph->ihl * sizeof(unsigned long));
1109         left = ntohs(iph->tot_len) - hlen;      /* Space per frame */
1110         hlen += dev->hard_header_len;           /* Total header size */
1111         mtu = (dev->mtu - hlen);                /* Size of data space */
1112         ptr = (raw + hlen);                     /* Where to start from */
1113 
1114         /*
1115          *      Check for any "DF" flag. [DF means do not fragment]
1116          */
1117 
1118         if (ntohs(iph->frag_off) & IP_DF)
1119         {
1120                 /*
1121                  *      Reply giving the MTU of the failed hop.
1122                  */
1123                 ip_statistics.IpFragFails++;
1124                 icmp_send(skb,ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, dev->mtu, dev);
1125                 return;
1126         }
1127 
1128         /*
1129          *      The protocol doesn't seem to say what to do in the case that the
1130          *      frame + options doesn't fit the mtu. As it used to fall down dead
1131          *      in this case we were fortunate it didn't happen
1132          */
1133 
1134         if(mtu<8)
1135         {
1136                 /* It's wrong but its better than nothing */
1137                 icmp_send(skb,ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED,dev->mtu, dev);
1138                 ip_statistics.IpFragFails++;
1139                 return;
1140         }
1141 
1142         /*
1143          *      Fragment the datagram.
1144          */
1145 
1146         /*
1147          *      The initial offset is 0 for a complete frame. When
1148          *      fragmenting fragments its wherever this one starts.
1149          */
1150 
1151         if (is_frag & 2)
1152                 offset = (ntohs(iph->frag_off) & 0x1fff) << 3;
1153         else
1154                 offset = 0;
1155 
1156 
1157         /*
1158          *      Keep copying data until we run out.
1159          */
1160 
1161         while(left > 0)
1162         {
1163                 len = left;
1164                 /* IF: it doesn't fit, use 'mtu' - the data space left */
1165                 if (len > mtu)
1166                         len = mtu;
1167                 /* IF: we are not sending upto and including the packet end
1168                    then align the next start on an eight byte boundary */
1169                 if (len < left)
1170                 {
1171                         len/=8;
1172                         len*=8;
1173                 }
1174                 /*
1175                  *      Allocate buffer.
1176                  */
1177 
1178                 if ((skb2 = alloc_skb(len + hlen,GFP_ATOMIC)) == NULL)
1179                 {
1180                         printk("IP: frag: no memory for new fragment!\n");
1181                         ip_statistics.IpFragFails++;
1182                         return;
1183                 }
1184 
1185                 /*
1186                  *      Set up data on packet
1187                  */
1188 
1189                 skb2->arp = skb->arp;
1190                 if(skb->free==0)
1191                         printk("IP fragmenter: BUG free!=1 in fragmenter\n");
1192                 skb2->free = 1;
1193                 skb2->len = len + hlen;
1194                 skb2->h.raw=(char *) skb2->data;
1195                 /*
1196                  *      Charge the memory for the fragment to any owner
1197                  *      it might possess
1198                  */
1199 
1200                 save_flags(flags);
1201                 if (sk)
1202                 {
1203                         cli();
1204                         sk->wmem_alloc += skb2->mem_len;
1205                         skb2->sk=sk;
1206                 }
1207                 restore_flags(flags);
1208                 skb2->raddr = skb->raddr;       /* For rebuild_header - must be here */
1209 
1210                 /*
1211                  *      Copy the packet header into the new buffer.
1212                  */
1213 
1214                 memcpy(skb2->h.raw, raw, hlen);
1215 
1216                 /*
1217                  *      Copy a block of the IP datagram.
1218                  */
1219                 memcpy(skb2->h.raw + hlen, ptr, len);
1220                 left -= len;
1221 
1222                 skb2->h.raw+=dev->hard_header_len;
1223 
1224                 /*
1225                  *      Fill in the new header fields.
1226                  */
1227                 iph = (struct iphdr *)(skb2->h.raw/*+dev->hard_header_len*/);
1228                 iph->frag_off = htons((offset >> 3));
1229                 /*
1230                  *      Added AC : If we are fragmenting a fragment thats not the
1231                  *                 last fragment then keep MF on each bit
1232                  */
1233                 if (left > 0 || (is_frag & 1))
1234                         iph->frag_off |= htons(IP_MF);
1235                 ptr += len;
1236                 offset += len;
1237 
1238                 /*
1239                  *      Put this fragment into the sending queue.
1240                  */
1241 
1242                 ip_statistics.IpFragCreates++;
1243 
1244                 ip_queue_xmit(sk, dev, skb2, 2);
1245         }
1246         ip_statistics.IpFragOKs++;
1247 }
1248 
1249 
1250 
1251 #ifdef CONFIG_IP_FORWARD
1252 
1253 /*
1254  *      Forward an IP datagram to its next destination.
1255  */
1256 
1257 static void ip_forward(struct sk_buff *skb, struct device *dev, int is_frag)
     /* [previous][next][first][last][top][bottom][index][help] */
1258 {
1259         struct device *dev2;    /* Output device */
1260         struct iphdr *iph;      /* Our header */
1261         struct sk_buff *skb2;   /* Output packet */
1262         struct rtable *rt;      /* Route we use */
1263         unsigned char *ptr;     /* Data pointer */
1264         unsigned long raddr;    /* Router IP address */
1265 
1266         /* 
1267          *      See if we are allowed to forward this.
1268          */
1269 
1270 #ifdef CONFIG_IP_FIREWALL
1271         if(!ip_fw_chk(skb->h.iph, ip_fw_fwd_chain, ip_fw_fwd_policy))
1272         {
1273                 return;
1274         }
1275 #endif
1276         /*
1277          *      According to the RFC, we must first decrease the TTL field. If
1278          *      that reaches zero, we must reply an ICMP control message telling
1279          *      that the packet's lifetime expired.
1280          *
1281          *      Exception:
1282          *      We may not generate an ICMP for an ICMP. icmp_send does the
1283          *      enforcement of this so we can forget it here. It is however
1284          *      sometimes VERY important.
1285          */
1286 
1287         iph = skb->h.iph;
1288         iph->ttl--;
1289         if (iph->ttl <= 0)
1290         {
1291                 /* Tell the sender its packet died... */
1292                 icmp_send(skb, ICMP_TIME_EXCEEDED, ICMP_EXC_TTL, 0, dev);
1293                 return;
1294         }
1295 
1296         /*
1297          *      Re-compute the IP header checksum.
1298          *      This is inefficient. We know what has happened to the header
1299          *      and could thus adjust the checksum as Phil Karn does in KA9Q
1300          */
1301 
1302         ip_send_check(iph);
1303 
1304         /*
1305          * OK, the packet is still valid.  Fetch its destination address,
1306          * and give it to the IP sender for further processing.
1307          */
1308 
1309         rt = ip_rt_route(iph->daddr, NULL, NULL);
1310         if (rt == NULL)
1311         {
1312                 /*
1313                  *      Tell the sender its packet cannot be delivered. Again
1314                  *      ICMP is screened later.
1315                  */
1316                 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_NET_UNREACH, 0, dev);
1317                 return;
1318         }
1319 
1320 
1321         /*
1322          * Gosh.  Not only is the packet valid; we even know how to
1323          * forward it onto its final destination.  Can we say this
1324          * is being plain lucky?
1325          * If the router told us that there is no GW, use the dest.
1326          * IP address itself- we seem to be connected directly...
1327          */
1328 
1329         raddr = rt->rt_gateway;
1330 
1331         if (raddr != 0)
1332         {
1333                 /*
1334                  *      There is a gateway so find the correct route for it.
1335                  *      Gateways cannot in turn be gatewayed.
1336                  */
1337                 rt = ip_rt_route(raddr, NULL, NULL);
1338                 if (rt == NULL)
1339                 {
1340                         /*
1341                          *      Tell the sender its packet cannot be delivered...
1342                          */
1343                         icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0, dev);
1344                         return;
1345                 }
1346                 if (rt->rt_gateway != 0)
1347                         raddr = rt->rt_gateway;
1348         }
1349         else
1350                 raddr = iph->daddr;
1351 
1352         /*
1353          *      Having picked a route we can now send the frame out.
1354          */
1355 
1356         dev2 = rt->rt_dev;
1357 
1358         /*
1359          *      In IP you never have to forward a frame on the interface that it 
1360          *      arrived upon. We now generate an ICMP HOST REDIRECT giving the route
1361          *      we calculated.
1362          */
1363 #ifdef IP_NO_ICMP_REDIRECT
1364         if (dev == dev2)
1365                 return;
1366 #else
1367         if (dev == dev2)
1368                 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, raddr, dev);
1369 #endif          
1370 
1371         /*
1372          * We now allocate a new buffer, and copy the datagram into it.
1373          * If the indicated interface is up and running, kick it.
1374          */
1375 
1376         if (dev2->flags & IFF_UP)
1377         {
1378 
1379                 /*
1380                  *      Current design decrees we copy the packet. For identical header
1381                  *      lengths we could avoid it. The new skb code will let us push
1382                  *      data so the problem goes away then.
1383                  */
1384 
1385                 skb2 = alloc_skb(dev2->hard_header_len + skb->len, GFP_ATOMIC);
1386                 /*
1387                  *      This is rare and since IP is tolerant of network failures
1388                  *      quite harmless.
1389                  */
1390                 if (skb2 == NULL)
1391                 {
1392                         printk("\nIP: No memory available for IP forward\n");
1393                         return;
1394                 }
1395                 ptr = skb2->data;
1396                 skb2->free = 1;
1397                 skb2->len = skb->len + dev2->hard_header_len;
1398                 skb2->h.raw = ptr;
1399 
1400                 /*
1401                  *      Copy the packet data into the new buffer.
1402                  */
1403                 memcpy(ptr + dev2->hard_header_len, skb->h.raw, skb->len);
1404 
1405                 /* Now build the MAC header. */
1406                 (void) ip_send(skb2, raddr, skb->len, dev2, dev2->pa_addr);
1407 
1408                 ip_statistics.IpForwDatagrams++;
1409 
1410                 /*
1411                  *      See if it needs fragmenting. Note in ip_rcv we tagged
1412                  *      the fragment type. This must be right so that
1413                  *      the fragmenter does the right thing.
1414                  */
1415 
1416                 if(skb2->len > dev2->mtu + dev2->hard_header_len)
1417                 {
1418                         ip_fragment(NULL,skb2,dev2, is_frag);
1419                         kfree_skb(skb2,FREE_WRITE);
1420                 }
1421                 else
1422                 {
1423 #ifdef CONFIG_IP_ACCT           
1424                         /*
1425                          *      Count mapping we shortcut
1426                          */
1427                          
1428                         ip_acct_cnt(iph,ip_acct_chain);
1429 #endif                  
1430                         
1431                         /*
1432                          *      Map service types to priority. We lie about
1433                          *      throughput being low priority, but its a good
1434                          *      choice to help improve general usage.
1435                          */
1436                         if(iph->tos & IPTOS_LOWDELAY)
1437                                 dev_queue_xmit(skb2, dev2, SOPRI_INTERACTIVE);
1438                         else if(iph->tos & IPTOS_THROUGHPUT)
1439                                 dev_queue_xmit(skb2, dev2, SOPRI_BACKGROUND);
1440                         else
1441                                 dev_queue_xmit(skb2, dev2, SOPRI_NORMAL);
1442                 }
1443         }
1444 }
1445 
1446 
1447 #endif
1448 
1449 /*
1450  *      This function receives all incoming IP datagrams.
1451  */
1452 
1453 int ip_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *pt)
     /* [previous][next][first][last][top][bottom][index][help] */
1454 {
1455         struct iphdr *iph = skb->h.iph;
1456         struct sock *raw_sk=NULL;
1457         unsigned char hash;
1458         unsigned char flag = 0;
1459         unsigned char opts_p = 0;       /* Set iff the packet has options. */
1460         struct inet_protocol *ipprot;
1461         static struct options opt; /* since we don't use these yet, and they
1462                                 take up stack space. */
1463         int brd=IS_MYADDR;
1464         int is_frag=0;
1465 
1466         ip_statistics.IpInReceives++;
1467 
1468         /*
1469          *      Tag the ip header of this packet so we can find it
1470          */
1471 
1472         skb->ip_hdr = iph;
1473 
1474         /*
1475          *      Is the datagram acceptable?
1476          *
1477          *      1.      Length at least the size of an ip header
1478          *      2.      Version of 4
1479          *      3.      Checksums correctly. [Speed optimisation for later, skip loopback checksums]
1480          *      (4.     We ought to check for IP multicast addresses and undefined types.. does this matter ?)
1481          */
1482 
1483         if (skb->len<sizeof(struct iphdr) || iph->ihl<5 || iph->version != 4 || ip_fast_csum((unsigned char *)iph, iph->ihl) !=0)
1484         {
1485                 ip_statistics.IpInHdrErrors++;
1486                 kfree_skb(skb, FREE_WRITE);
1487                 return(0);
1488         }
1489         
1490         /*
1491          *      See if the firewall wants to dispose of the packet. 
1492          */
1493 
1494 #ifdef  CONFIG_IP_FIREWALL
1495         
1496         if(!LOOPBACK(iph->daddr) && !ip_fw_chk(iph,ip_fw_blk_chain,
1497                         ip_fw_blk_policy))
1498         {
1499                 kfree_skb(skb, FREE_WRITE);
1500                 return 0;       
1501         }
1502 
1503 #endif
1504         
1505         /*
1506          *      Our transport medium may have padded the buffer out. Now we know it
1507          *      is IP we can trim to the true length of the frame.
1508          */
1509 
1510         skb->len=ntohs(iph->tot_len);
1511 
1512         /*
1513          *      Next analyse the packet for options. Studies show under one packet in
1514          *      a thousand have options....
1515          */
1516 
1517         if (iph->ihl != 5)
1518         {       /* Fast path for the typical optionless IP packet. */
1519                 memset((char *) &opt, 0, sizeof(opt));
1520                 if (do_options(iph, &opt) != 0)
1521                         return 0;
1522                 opts_p = 1;
1523         }
1524 
1525         /*
1526          *      Remember if the frame is fragmented.
1527          */
1528          
1529         if(iph->frag_off)
1530         {
1531                 if (iph->frag_off & 0x0020)
1532                         is_frag|=1;
1533                 /*
1534                  *      Last fragment ?
1535                  */
1536         
1537                 if (ntohs(iph->frag_off) & 0x1fff)
1538                         is_frag|=2;
1539         }
1540         
1541         /*
1542          *      Do any IP forwarding required.  chk_addr() is expensive -- avoid it someday.
1543          *
1544          *      This is inefficient. While finding out if it is for us we could also compute
1545          *      the routing table entry. This is where the great unified cache theory comes
1546          *      in as and when someone implements it
1547          *
1548          *      For most hosts over 99% of packets match the first conditional
1549          *      and don't go via ip_chk_addr. Note: brd is set to IS_MYADDR at
1550          *      function entry.
1551          */
1552 
1553         if ( iph->daddr != skb->dev->pa_addr && (brd = ip_chk_addr(iph->daddr)) == 0)
1554         {
1555                 /*
1556                  *      Don't forward multicast or broadcast frames.
1557                  */
1558 
1559                 if(skb->pkt_type!=PACKET_HOST || brd==IS_BROADCAST)
1560                 {
1561                         kfree_skb(skb,FREE_WRITE);
1562                         return 0;
1563                 }
1564 
1565                 /*
1566                  *      The packet is for another target. Forward the frame
1567                  */
1568 
1569 #ifdef CONFIG_IP_FORWARD
1570                 ip_forward(skb, dev, is_frag);
1571 #else
1572 /*              printk("Machine %lx tried to use us as a forwarder to %lx but we have forwarding disabled!\n",
1573                         iph->saddr,iph->daddr);*/
1574                 ip_statistics.IpInAddrErrors++;
1575 #endif
1576                 /*
1577                  *      The forwarder is inefficient and copies the packet. We
1578                  *      free the original now.
1579                  */
1580 
1581                 kfree_skb(skb, FREE_WRITE);
1582                 return(0);
1583         }
1584         
1585 #ifdef CONFIG_IP_MULTICAST      
1586 
1587         if(brd==IS_MULTICAST && iph->daddr!=IGMP_ALL_HOSTS && !(dev->flags&IFF_LOOPBACK))
1588         {
1589                 /*
1590                  *      Check it is for one of our groups
1591                  */
1592                 struct ip_mc_list *ip_mc=dev->ip_mc_list;
1593                 do
1594                 {
1595                         if(ip_mc==NULL)
1596                         {       
1597                                 kfree_skb(skb, FREE_WRITE);
1598                                 return 0;
1599                         }
1600                         if(ip_mc->multiaddr==iph->daddr)
1601                                 break;
1602                         ip_mc=ip_mc->next;
1603                 }
1604                 while(1);
1605         }
1606 #endif
1607         /*
1608          *      Account for the packet
1609          */
1610          
1611 #ifdef CONFIG_IP_ACCT
1612         ip_acct_cnt(iph,ip_acct_chain);
1613 #endif  
1614 
1615         /*
1616          * Reassemble IP fragments.
1617          */
1618 
1619         if(is_frag)
1620         {
1621                 /* Defragment. Obtain the complete packet if there is one */
1622                 skb=ip_defrag(iph,skb,dev);
1623                 if(skb==NULL)
1624                         return 0;
1625                 skb->dev = dev;
1626                 iph=skb->h.iph;
1627         }
1628         
1629                  
1630 
1631         /*
1632          *      Point into the IP datagram, just past the header.
1633          */
1634 
1635         skb->ip_hdr = iph;
1636         skb->h.raw += iph->ihl*4;
1637         
1638         /*
1639          *      Deliver to raw sockets. This is fun as to avoid copies we want to make no surplus copies.
1640          */
1641          
1642         hash = iph->protocol & (SOCK_ARRAY_SIZE-1);
1643         
1644         /* If there maybe a raw socket we must check - if not we don't care less */
1645         if((raw_sk=raw_prot.sock_array[hash])!=NULL)
1646         {
1647                 struct sock *sknext=NULL;
1648                 struct sk_buff *skb1;
1649                 raw_sk=get_sock_raw(raw_sk, hash,  iph->saddr, iph->daddr);
1650                 if(raw_sk)      /* Any raw sockets */
1651                 {
1652                         do
1653                         {
1654                                 /* Find the next */
1655                                 sknext=get_sock_raw(raw_sk->next, hash, iph->saddr, iph->daddr);
1656                                 if(sknext)
1657                                         skb1=skb_clone(skb, GFP_ATOMIC);
1658                                 else
1659                                         break;  /* One pending raw socket left */
1660                                 if(skb1)
1661                                         raw_rcv(raw_sk, skb1, dev, iph->saddr,iph->daddr);
1662                                 raw_sk=sknext;
1663                         }
1664                         while(raw_sk!=NULL);
1665                         /* Here either raw_sk is the last raw socket, or NULL if none */
1666                         /* We deliver to the last raw socket AFTER the protocol checks as it avoids a surplus copy */
1667                 }
1668         }
1669         
1670         /*
1671          *      skb->h.raw now points at the protocol beyond the IP header.
1672          */
1673 
1674         hash = iph->protocol & (MAX_INET_PROTOS -1);
1675         for (ipprot = (struct inet_protocol *)inet_protos[hash];ipprot != NULL;ipprot=(struct inet_protocol *)ipprot->next)
1676         {
1677                 struct sk_buff *skb2;
1678 
1679                 if (ipprot->protocol != iph->protocol)
1680                         continue;
1681        /*
1682         *       See if we need to make a copy of it.  This will
1683         *       only be set if more than one protocol wants it.
1684         *       and then not for the last one. If there is a pending
1685         *       raw delivery wait for that
1686         */
1687                 if (ipprot->copy || raw_sk)
1688                 {
1689                         skb2 = skb_clone(skb, GFP_ATOMIC);
1690                         if(skb2==NULL)
1691                                 continue;
1692                 }
1693                 else
1694                 {
1695                         skb2 = skb;
1696                 }
1697                 flag = 1;
1698 
1699                /*
1700                 * Pass on the datagram to each protocol that wants it,
1701                 * based on the datagram protocol.  We should really
1702                 * check the protocol handler's return values here...
1703                 */
1704                 ipprot->handler(skb2, dev, opts_p ? &opt : 0, iph->daddr,
1705                                 (ntohs(iph->tot_len) - (iph->ihl * 4)),
1706                                 iph->saddr, 0, ipprot);
1707 
1708         }
1709 
1710         /*
1711          * All protocols checked.
1712          * If this packet was a broadcast, we may *not* reply to it, since that
1713          * causes (proven, grin) ARP storms and a leakage of memory (i.e. all
1714          * ICMP reply messages get queued up for transmission...)
1715          */
1716 
1717         if(raw_sk!=NULL)        /* Shift to last raw user */
1718                 raw_rcv(raw_sk, skb, dev, iph->saddr, iph->daddr);
1719         else if (!flag)         /* Free and report errors */
1720         {
1721                 if (brd != IS_BROADCAST && brd!=IS_MULTICAST)
1722                         icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PROT_UNREACH, 0, dev);
1723                 kfree_skb(skb, FREE_WRITE);
1724         }
1725 
1726         return(0);
1727 }
1728 
1729 /*
1730  *      Loop a packet back to the sender.
1731  */
1732  
1733 static void ip_loopback(struct device *old_dev, struct sk_buff *skb)
     /* [previous][next][first][last][top][bottom][index][help] */
1734 {
1735         extern struct device loopback_dev;
1736         struct device *dev=&loopback_dev;
1737         int len=skb->len-old_dev->hard_header_len;
1738         struct sk_buff *newskb=alloc_skb(len+dev->hard_header_len, GFP_ATOMIC);
1739         
1740         if(newskb==NULL)
1741                 return;
1742                 
1743         newskb->link3=NULL;
1744         newskb->sk=NULL;
1745         newskb->dev=dev;
1746         newskb->saddr=skb->saddr;
1747         newskb->daddr=skb->daddr;
1748         newskb->raddr=skb->raddr;
1749         newskb->free=1;
1750         newskb->lock=0;
1751         newskb->users=0;
1752         newskb->pkt_type=skb->pkt_type;
1753         newskb->len=len+dev->hard_header_len;
1754         
1755         
1756         newskb->ip_hdr=(struct iphdr *)(newskb->data+ip_send(newskb, skb->ip_hdr->daddr, len, dev, skb->ip_hdr->saddr));
1757         memcpy(newskb->ip_hdr,skb->ip_hdr,len);
1758 
1759         /* Recurse. The device check against IFF_LOOPBACK will stop infinite recursion */
1760                 
1761         /*printk("Loopback output queued [%lX to %lX].\n", newskb->ip_hdr->saddr,newskb->ip_hdr->daddr);*/
1762         ip_queue_xmit(NULL, dev, newskb, 1);
1763 }
1764 
1765 
1766 /*
1767  * Queues a packet to be sent, and starts the transmitter
1768  * if necessary.  if free = 1 then we free the block after
1769  * transmit, otherwise we don't. If free==2 we not only
1770  * free the block but also don't assign a new ip seq number.
1771  * This routine also needs to put in the total length,
1772  * and compute the checksum
1773  */
1774 
1775 void ip_queue_xmit(struct sock *sk, struct device *dev,
     /* [previous][next][first][last][top][bottom][index][help] */
1776               struct sk_buff *skb, int free)
1777 {
1778         struct iphdr *iph;
1779         unsigned char *ptr;
1780 
1781         /* Sanity check */
1782         if (dev == NULL)
1783         {
1784                 printk("IP: ip_queue_xmit dev = NULL\n");
1785                 return;
1786         }
1787 
1788         IS_SKB(skb);
1789 
1790         /*
1791          *      Do some book-keeping in the packet for later
1792          */
1793 
1794 
1795         skb->dev = dev;
1796         skb->when = jiffies;
1797 
1798         /*
1799          *      Find the IP header and set the length. This is bad
1800          *      but once we get the skb data handling code in the
1801          *      hardware will push its header sensibly and we will
1802          *      set skb->ip_hdr to avoid this mess and the fixed
1803          *      header length problem
1804          */
1805 
1806         ptr = skb->data;
1807         ptr += dev->hard_header_len;
1808         iph = (struct iphdr *)ptr;
1809         skb->ip_hdr = iph;
1810         iph->tot_len = ntohs(skb->len-dev->hard_header_len);
1811 
1812         /*
1813          *      No reassigning numbers to fragments...
1814          */
1815 
1816         if(free!=2)
1817                 iph->id      = htons(ip_id_count++);
1818         else
1819                 free=1;
1820 
1821         /* All buffers without an owner socket get freed */
1822         if (sk == NULL)
1823                 free = 1;
1824 
1825         skb->free = free;
1826 
1827         /*
1828          *      Do we need to fragment. Again this is inefficient.
1829          *      We need to somehow lock the original buffer and use
1830          *      bits of it.
1831          */
1832 
1833         if(skb->len > dev->mtu + dev->hard_header_len)
1834         {
1835                 ip_fragment(sk,skb,dev,0);
1836                 IS_SKB(skb);
1837                 kfree_skb(skb,FREE_WRITE);
1838                 return;
1839         }
1840 
1841         /*
1842          *      Add an IP checksum
1843          */
1844 
1845         ip_send_check(iph);
1846 
1847         /*
1848          *      Print the frame when debugging
1849          */
1850 
1851         /*
1852          *      More debugging. You cannot queue a packet already on a list
1853          *      Spot this and moan loudly.
1854          */
1855         if (skb->next != NULL)
1856         {
1857                 printk("ip_queue_xmit: next != NULL\n");
1858                 skb_unlink(skb);
1859         }
1860 
1861         /*
1862          *      If a sender wishes the packet to remain unfreed
1863          *      we add it to his send queue. This arguably belongs
1864          *      in the TCP level since nobody else uses it. BUT
1865          *      remember IPng might change all the rules.
1866          */
1867 
1868         if (!free)
1869         {
1870                 unsigned long flags;
1871                 /* The socket now has more outstanding blocks */
1872 
1873                 sk->packets_out++;
1874 
1875                 /* Protect the list for a moment */
1876                 save_flags(flags);
1877                 cli();
1878 
1879                 if (skb->link3 != NULL)
1880                 {
1881                         printk("ip.c: link3 != NULL\n");
1882                         skb->link3 = NULL;
1883                 }
1884                 if (sk->send_head == NULL)
1885                 {
1886                         sk->send_tail = skb;
1887                         sk->send_head = skb;
1888                 }
1889                 else
1890                 {
1891                         sk->send_tail->link3 = skb;
1892                         sk->send_tail = skb;
1893                 }
1894                 /* skb->link3 is NULL */
1895 
1896                 /* Interrupt restore */
1897                 restore_flags(flags);
1898         }
1899         else
1900                 /* Remember who owns the buffer */
1901                 skb->sk = sk;
1902 
1903         /*
1904          *      If the indicated interface is up and running, send the packet.
1905          */
1906          
1907         ip_statistics.IpOutRequests++;
1908 #ifdef CONFIG_IP_ACCT
1909         ip_acct_cnt(iph,ip_acct_chain);
1910 #endif  
1911         
1912 #ifdef CONFIG_IP_MULTICAST      
1913 
1914         /*
1915          *      Multicasts are looped back for other local users
1916          */
1917          
1918         if (MULTICAST(iph->daddr) && !(dev->flags&IFF_LOOPBACK))
1919         {
1920                 if(sk==NULL || sk->ip_mc_loop)
1921                 {
1922                         if(iph->daddr==IGMP_ALL_HOSTS)
1923                                 ip_loopback(dev,skb);
1924                         else
1925                         {
1926                                 struct ip_mc_list *imc=dev->ip_mc_list;
1927                                 while(imc!=NULL)
1928                                 {
1929                                         if(imc->multiaddr==iph->daddr)
1930                                         {
1931                                                 ip_loopback(dev,skb);
1932                                                 break;
1933                                         }
1934                                         imc=imc->next;
1935                                 }
1936                         }
1937                 }
1938                 /* Multicasts with ttl 0 must not go beyond the host */
1939                 
1940                 if(skb->ip_hdr->ttl==0)
1941                 {
1942                         kfree_skb(skb, FREE_READ);
1943                         return;
1944                 }
1945         }
1946 #endif
1947         if((dev->flags&IFF_BROADCAST) && iph->daddr==dev->pa_brdaddr && !(dev->flags&IFF_LOOPBACK))
1948                 ip_loopback(dev,skb);
1949                 
1950         if (dev->flags & IFF_UP)
1951         {
1952                 /*
1953                  *      If we have an owner use its priority setting,
1954                  *      otherwise use NORMAL
1955                  */
1956 
1957                 if (sk != NULL)
1958                 {
1959                         dev_queue_xmit(skb, dev, sk->priority);
1960                 }
1961                 else
1962                 {
1963                         dev_queue_xmit(skb, dev, SOPRI_NORMAL);
1964                 }
1965         }
1966         else
1967         {
1968                 ip_statistics.IpOutDiscards++;
1969                 if (free)
1970                         kfree_skb(skb, FREE_WRITE);
1971         }
1972 }
1973 
1974 
1975 
1976 #ifdef CONFIG_IP_MULTICAST
1977 
1978 /*
1979  *      Write an multicast group list table for the IGMP daemon to
1980  *      read.
1981  */
1982  
1983 int ip_mc_procinfo(char *buffer, char **start, off_t offset, int length)
     /* [previous][next][first][last][top][bottom][index][help] */
1984 {
1985         off_t pos=0, begin=0;
1986         struct ip_mc_list *im;
1987         unsigned long flags;
1988         int len=0;
1989         struct device *dev;
1990         
1991         len=sprintf(buffer,"Device    : Count\tGroup    Users Timer\n");  
1992         save_flags(flags);
1993         cli();
1994         
1995         for(dev = dev_base; dev; dev = dev->next)
1996         {
1997                 if((dev->flags&IFF_UP)&&(dev->flags&IFF_MULTICAST))
1998                 {
1999                         len+=sprintf(buffer+len,"%-10s: %5d\n",
2000                                         dev->name, dev->mc_count);
2001                         for(im = dev->ip_mc_list; im; im = im->next)
2002                         {
2003                                 len+=sprintf(buffer+len,
2004                                         "\t\t\t%08lX %5d %d:%08lX\n",
2005                                         im->multiaddr, im->users,
2006                                         im->tm_running, im->timer.expires);
2007                                 pos=begin+len;
2008                                 if(pos<offset)
2009                                 {
2010                                         len=0;
2011                                         begin=pos;
2012                                 }
2013                                 if(pos>offset+length)
2014                                         break;
2015                         }
2016                 }
2017         }
2018         restore_flags(flags);
2019         *start=buffer+(offset-begin);
2020         len-=(offset-begin);
2021         if(len>length)
2022                 len=length;     
2023         return len;
2024 }
2025 
2026 
2027 #endif  
2028 /*
2029  *      Socket option code for IP. This is the end of the line after any TCP,UDP etc options on
2030  *      an IP socket.
2031  *
2032  *      We implement IP_TOS (type of service), IP_TTL (time to live).
2033  *
2034  *      Next release we will sort out IP_OPTIONS since for some people are kind of important.
2035  */
2036 
2037 int ip_setsockopt(struct sock *sk, int level, int optname, char *optval, int optlen)
     /* [previous][next][first][last][top][bottom][index][help] */
2038 {
2039         int val,err;
2040 #if defined(CONFIG_IP_FIREWALL) || defined(CONFIG_IP_ACCT)
2041         struct ip_fw tmp_fw;
2042 #endif  
2043         if (optval == NULL)
2044                 return(-EINVAL);
2045 
2046         err=verify_area(VERIFY_READ, optval, sizeof(int));
2047         if(err)
2048                 return err;
2049 
2050         val = get_fs_long((unsigned long *)optval);
2051 
2052         if(level!=SOL_IP)
2053                 return -EOPNOTSUPP;
2054 
2055         switch(optname)
2056         {
2057                 case IP_TOS:
2058                         if(val<0||val>255)
2059                                 return -EINVAL;
2060                         sk->ip_tos=val;
2061                         if(val==IPTOS_LOWDELAY)
2062                                 sk->priority=SOPRI_INTERACTIVE;
2063                         if(val==IPTOS_THROUGHPUT)
2064                                 sk->priority=SOPRI_BACKGROUND;
2065                         return 0;
2066                 case IP_TTL:
2067                         if(val<1||val>255)
2068                                 return -EINVAL;
2069                         sk->ip_ttl=val;
2070                         return 0;
2071 #ifdef CONFIG_IP_MULTICAST
2072                 case IP_MULTICAST_TTL: 
2073                 {
2074                         unsigned char ucval;
2075 
2076                         ucval=get_fs_byte((unsigned char *)optval);
2077                         if(ucval<1||ucval>255)
2078                                 return -EINVAL;
2079                         sk->ip_mc_ttl=(int)ucval;
2080                         return 0;
2081                 }
2082                 case IP_MULTICAST_LOOP: 
2083                 {
2084                         unsigned char ucval;
2085 
2086                         ucval=get_fs_byte((unsigned char *)optval);
2087                         if(ucval!=0 && ucval!=1)
2088                                  return -EINVAL;
2089                         sk->ip_mc_loop=(int)ucval;
2090                         return 0;
2091                 }
2092                 case IP_MULTICAST_IF: 
2093                 {
2094                         /* Not fully tested */
2095                         struct in_addr addr;
2096                         struct device *dev=NULL;
2097                         
2098                         /*
2099                          *      Check the arguments are allowable
2100                          */
2101 
2102                         err=verify_area(VERIFY_READ, optval, sizeof(addr));
2103                         if(err)
2104                                 return err;
2105                                 
2106                         memcpy_fromfs(&addr,optval,sizeof(addr));
2107                         
2108                         printk("MC bind %s\n", in_ntoa(addr.s_addr));
2109                         
2110                         /*
2111                          *      What address has been requested
2112                          */
2113                         
2114                         if(addr.s_addr==INADDR_ANY)     /* Default */
2115                         {
2116                                 sk->ip_mc_name[0]=0;
2117                                 return 0;
2118                         }
2119                         
2120                         /*
2121                          *      Find the device
2122                          */
2123                          
2124                         for(dev = dev_base; dev; dev = dev->next)
2125                         {
2126                                 if((dev->flags&IFF_UP)&&(dev->flags&IFF_MULTICAST)&&
2127                                         (dev->pa_addr==addr.s_addr))
2128                                         break;
2129                         }
2130                         
2131                         /*
2132                          *      Did we find one
2133                          */
2134                          
2135                         if(dev) 
2136                         {
2137                                 strcpy(sk->ip_mc_name,dev->name);
2138                                 return 0;
2139                         }
2140                         return -EADDRNOTAVAIL;
2141                 }
2142                 
2143                 case IP_ADD_MEMBERSHIP: 
2144                 {
2145                 
2146 /*
2147  *      FIXME: Add/Del membership should have a semaphore protecting them from re-entry
2148  */
2149                         struct ip_mreq mreq;
2150                         static struct options optmem;
2151                         unsigned long route_src;
2152                         struct rtable *rt;
2153                         struct device *dev=NULL;
2154                         
2155                         /*
2156                          *      Check the arguments.
2157                          */
2158 
2159                         err=verify_area(VERIFY_READ, optval, sizeof(mreq));
2160                         if(err)
2161                                 return err;
2162 
2163                         memcpy_fromfs(&mreq,optval,sizeof(mreq));
2164 
2165                         /* 
2166                          *      Get device for use later
2167                          */
2168 
2169                         if(mreq.imr_interface.s_addr==INADDR_ANY) 
2170                         {
2171                                 /*
2172                                  *      Not set so scan.
2173                                  */
2174                                 if((rt=ip_rt_route(mreq.imr_multiaddr.s_addr,&optmem, &route_src))!=NULL)
2175                                 {
2176                                         dev=rt->rt_dev;
2177                                         rt->rt_use--;
2178                                 }
2179                         }
2180                         else
2181                         {
2182                                 /*
2183                                  *      Find a suitable device.
2184                                  */
2185                                 for(dev = dev_base; dev; dev = dev->next)
2186                                 {
2187                                         if((dev->flags&IFF_UP)&&(dev->flags&IFF_MULTICAST)&&
2188                                                 (dev->pa_addr==mreq.imr_interface.s_addr))
2189                                                 break;
2190                                 }
2191                         }
2192                         
2193                         /*
2194                          *      No device, no cookies.
2195                          */
2196                          
2197                         if(!dev)
2198                                 return -ENODEV;
2199                                 
2200                         /*
2201                          *      Join group.
2202                          */
2203                          
2204                         return ip_mc_join_group(sk,dev,mreq.imr_multiaddr.s_addr);
2205                 }
2206                 
2207                 case IP_DROP_MEMBERSHIP: 
2208                 {
2209                         struct ip_mreq mreq;
2210                         struct rtable *rt;
2211                         static struct options optmem;
2212                         unsigned long route_src;
2213                         struct device *dev=NULL;
2214 
2215                         /*
2216                          *      Check the arguments
2217                          */
2218                          
2219                         err=verify_area(VERIFY_READ, optval, sizeof(mreq));
2220                         if(err)
2221                                 return err;
2222 
2223                         memcpy_fromfs(&mreq,optval,sizeof(mreq));
2224 
2225                         /*
2226                          *      Get device for use later 
2227                          */
2228  
2229                         if(mreq.imr_interface.s_addr==INADDR_ANY) 
2230                         {
2231                                 if((rt=ip_rt_route(mreq.imr_multiaddr.s_addr,&optmem, &route_src))!=NULL)
2232                                 {
2233                                         dev=rt->rt_dev;
2234                                         rt->rt_use--;
2235                                 }
2236                         }
2237                         else 
2238                         {
2239                                 for(dev = dev_base; dev; dev = dev->next)
2240                                 {
2241                                         if((dev->flags&IFF_UP)&& (dev->flags&IFF_MULTICAST)&&
2242                                                         (dev->pa_addr==mreq.imr_interface.s_addr))
2243                                                 break;
2244                                 }
2245                         }
2246                         
2247                         /*
2248                          *      Did we find a suitable device.
2249                          */
2250                          
2251                         if(!dev)
2252                                 return -ENODEV;
2253                                 
2254                         /*
2255                          *      Leave group
2256                          */
2257                          
2258                         return ip_mc_leave_group(sk,dev,mreq.imr_multiaddr.s_addr);
2259                 }
2260 #endif                  
2261 #ifdef CONFIG_IP_FIREWALL
2262                 case IP_FW_ADD_BLK:
2263                 case IP_FW_DEL_BLK:
2264                 case IP_FW_ADD_FWD:
2265                 case IP_FW_DEL_FWD:
2266                 case IP_FW_CHK_BLK:
2267                 case IP_FW_CHK_FWD:
2268                 case IP_FW_FLUSH_BLK:
2269                 case IP_FW_FLUSH_FWD:
2270                 case IP_FW_ZERO_BLK:
2271                 case IP_FW_ZERO_FWD:
2272                 case IP_FW_POLICY_BLK:
2273                 case IP_FW_POLICY_FWD:
2274                         if(!suser())
2275                                 return -EPERM;
2276                         if(optlen>sizeof(tmp_fw) || optlen<1)
2277                                 return -EINVAL;
2278                         err=verify_area(VERIFY_READ,optval,optlen);
2279                         if(err)
2280                                 return err;
2281                         memcpy_fromfs(&tmp_fw,optval,optlen);
2282                         err=ip_fw_ctl(optname, &tmp_fw,optlen);
2283                         return -err;    /* -0 is 0 after all */
2284                         
2285 #endif
2286 #ifdef CONFIG_IP_ACCT
2287                 case IP_ACCT_DEL:
2288                 case IP_ACCT_ADD:
2289                 case IP_ACCT_FLUSH:
2290                 case IP_ACCT_ZERO:
2291                         if(!suser())
2292                                 return -EPERM;
2293                         if(optlen>sizeof(tmp_fw) || optlen<1)
2294                                 return -EINVAL;
2295                         err=verify_area(VERIFY_READ,optval,optlen);
2296                         if(err)
2297                                 return err;
2298                         memcpy_fromfs(&tmp_fw, optval,optlen);
2299                         err=ip_acct_ctl(optname, &tmp_fw,optlen);
2300                         return -err;    /* -0 is 0 after all */
2301 #endif
2302                 /* IP_OPTIONS and friends go here eventually */
2303                 default:
2304                         return(-ENOPROTOOPT);
2305         }
2306 }
2307 
2308 /*
2309  *      Get the options. Note for future reference. The GET of IP options gets the
2310  *      _received_ ones. The set sets the _sent_ ones.
2311  */
2312 
2313 int ip_getsockopt(struct sock *sk, int level, int optname, char *optval, int *optlen)
     /* [previous][next][first][last][top][bottom][index][help] */
2314 {
2315         int val,err;
2316 #ifdef CONFIG_IP_MULTICAST
2317         int len;
2318 #endif
2319         
2320         if(level!=SOL_IP)
2321                 return -EOPNOTSUPP;
2322 
2323         switch(optname)
2324         {
2325                 case IP_TOS:
2326                         val=sk->ip_tos;
2327                         break;
2328                 case IP_TTL:
2329                         val=sk->ip_ttl;
2330                         break;
2331 #ifdef CONFIG_IP_MULTICAST                      
2332                 case IP_MULTICAST_TTL:
2333                         val=sk->ip_mc_ttl;
2334                         break;
2335                 case IP_MULTICAST_LOOP:
2336                         val=sk->ip_mc_loop;
2337                         break;
2338                 case IP_MULTICAST_IF:
2339                         err=verify_area(VERIFY_WRITE, optlen, sizeof(int));
2340                         if(err)
2341                                 return err;
2342                         len=strlen(sk->ip_mc_name);
2343                         err=verify_area(VERIFY_WRITE, optval, len);
2344                         if(err)
2345                                 return err;
2346                         put_fs_long(len,(unsigned long *) optlen);
2347                         memcpy_tofs((void *)optval,sk->ip_mc_name, len);
2348                         return 0;
2349 #endif
2350                 default:
2351                         return(-ENOPROTOOPT);
2352         }
2353         err=verify_area(VERIFY_WRITE, optlen, sizeof(int));
2354         if(err)
2355                 return err;
2356         put_fs_long(sizeof(int),(unsigned long *) optlen);
2357 
2358         err=verify_area(VERIFY_WRITE, optval, sizeof(int));
2359         if(err)
2360                 return err;
2361         put_fs_long(val,(unsigned long *)optval);
2362 
2363         return(0);
2364 }
2365 
2366 /*
2367  *      IP protocol layer initialiser
2368  */
2369 
2370 static struct packet_type ip_packet_type =
2371 {
2372         0,      /* MUTTER ntohs(ETH_P_IP),*/
2373         NULL,   /* All devices */
2374         ip_rcv,
2375         NULL,
2376         NULL,
2377 };
2378 
2379 /*
2380  *      Device notifier
2381  */
2382  
2383 static int ip_rt_event(unsigned long event, void *ptr)
     /* [previous][next][first][last][top][bottom][index][help] */
2384 {
2385         if(event==NETDEV_DOWN)
2386                 ip_rt_flush(ptr);
2387         return NOTIFY_DONE;
2388 }
2389 
2390 struct notifier_block ip_rt_notifier={
2391         ip_rt_event,
2392         NULL,
2393         0
2394 };
2395 
2396 /*
2397  *      IP registers the packet type and then calls the subprotocol initialisers
2398  */
2399 
2400 void ip_init(void)
     /* [previous][next][first][last][top][bottom][index][help] */
2401 {
2402         ip_packet_type.type=htons(ETH_P_IP);
2403         dev_add_pack(&ip_packet_type);
2404 
2405         /* So we flush routes when a device is downed */        
2406         register_netdevice_notifier(&ip_rt_notifier);
2407 /*      ip_raw_init();
2408         ip_packet_init();
2409         ip_tcp_init();
2410         ip_udp_init();*/
2411 }

/* [previous][next][first][last][top][bottom][index][help] */