root/net/inet/ip.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. ip_ioctl
  2. strict_route
  3. loose_route
  4. ip_route_check
  5. build_options
  6. ip_send
  7. ip_build_header
  8. do_options
  9. ip_compute_csum
  10. ip_send_check
  11. ip_frag_create
  12. ip_find
  13. ip_free
  14. ip_expire
  15. ip_create
  16. ip_done
  17. ip_glue
  18. ip_defrag
  19. ip_fragment
  20. ip_forward
  21. ip_rcv
  22. ip_loopback
  23. ip_queue_xmit
  24. ip_mc_procinfo
  25. ip_setsockopt
  26. ip_getsockopt
  27. ip_rt_event
  28. ip_init

   1 /*
   2  * INET         An implementation of the TCP/IP protocol suite for the LINUX
   3  *              operating system.  INET is implemented using the  BSD Socket
   4  *              interface as the means of communication with the user level.
   5  *
   6  *              The Internet Protocol (IP) module.
   7  *
   8  * Version:     @(#)ip.c        1.0.16b 9/1/93
   9  *
  10  * Authors:     Ross Biro, <bir7@leland.Stanford.Edu>
  11  *              Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  12  *              Donald Becker, <becker@super.org>
  13  *              Alan Cox, <gw4pts@gw4pts.ampr.org>
  14  *              Richard Underwood
  15  *              Stefan Becker, <stefanb@yello.ping.de>
  16  *              
  17  *
  18  * Fixes:
  19  *              Alan Cox        :       Commented a couple of minor bits of surplus code
  20  *              Alan Cox        :       Undefining IP_FORWARD doesn't include the code
  21  *                                      (just stops a compiler warning).
  22  *              Alan Cox        :       Frames with >=MAX_ROUTE record routes, strict routes or loose routes
  23  *                                      are junked rather than corrupting things.
  24  *              Alan Cox        :       Frames to bad broadcast subnets are dumped
  25  *                                      We used to process them non broadcast and
  26  *                                      boy could that cause havoc.
  27  *              Alan Cox        :       ip_forward sets the free flag on the
  28  *                                      new frame it queues. Still crap because
  29  *                                      it copies the frame but at least it
  30  *                                      doesn't eat memory too.
  31  *              Alan Cox        :       Generic queue code and memory fixes.
  32  *              Fred Van Kempen :       IP fragment support (borrowed from NET2E)
  33  *              Gerhard Koerting:       Forward fragmented frames correctly.
  34  *              Gerhard Koerting:       Fixes to my fix of the above 8-).
  35  *              Gerhard Koerting:       IP interface addressing fix.
  36  *              Linus Torvalds  :       More robustness checks
  37  *              Alan Cox        :       Even more checks: Still not as robust as it ought to be
  38  *              Alan Cox        :       Save IP header pointer for later
  39  *              Alan Cox        :       ip option setting
  40  *              Alan Cox        :       Use ip_tos/ip_ttl settings
  41  *              Alan Cox        :       Fragmentation bogosity removed
  42  *                                      (Thanks to Mark.Bush@prg.ox.ac.uk)
  43  *              Dmitry Gorodchanin :    Send of a raw packet crash fix.
  44  *              Alan Cox        :       Silly ip bug when an overlength
  45  *                                      fragment turns up. Now frees the
  46  *                                      queue.
  47  *              Linus Torvalds/ :       Memory leakage on fragmentation
  48  *              Alan Cox        :       handling.
  49  *              Gerhard Koerting:       Forwarding uses IP priority hints
  50  *              Teemu Rantanen  :       Fragment problems.
  51  *              Alan Cox        :       General cleanup, comments and reformat
  52  *              Alan Cox        :       SNMP statistics
  53  *              Alan Cox        :       BSD address rule semantics. Also see
  54  *                                      UDP as there is a nasty checksum issue
  55  *                                      if you do things the wrong way.
  56  *              Alan Cox        :       Always defrag, moved IP_FORWARD to the config.in file
  57  *              Alan Cox        :       IP options adjust sk->priority.
  58  *              Pedro Roque     :       Fix mtu/length error in ip_forward.
  59  *              Alan Cox        :       Avoid ip_chk_addr when possible.
  60  *      Richard Underwood       :       IP multicasting.
  61  *              Alan Cox        :       Cleaned up multicast handlers.
  62  *              Alan Cox        :       RAW sockets demultiplex in the BSD style.
  63  *              Gunther Mayer   :       Fix the SNMP reporting typo
  64  *              Alan Cox        :       Always in group 224.0.0.1
  65  *              Alan Cox        :       Multicast loopback error for 224.0.0.1
  66  *              Alan Cox        :       IP_MULTICAST_LOOP option.
  67  *              Alan Cox        :       Use notifiers.
  68  *              Bjorn Ekwall    :       Removed ip_csum (from slhc.c too)
  69  *              Bjorn Ekwall    :       Moved ip_fast_csum to ip.h (inline!)
  70  *              Stefan Becker   :       Send out ICMP HOST REDIRECT
  71  *  
  72  *
  73  * To Fix:
  74  *              IP option processing is mostly not needed. ip_forward needs to know about routing rules
  75  *              and time stamp but that's about all. Use the route mtu field here too
  76  *              IP fragmentation wants rewriting cleanly. The RFC815 algorithm is much more efficient
  77  *              and could be made very efficient with the addition of some virtual memory hacks to permit
  78  *              the allocation of a buffer that can then be 'grown' by twiddling page tables.
  79  *              Output fragmentation wants updating along with the buffer management to use a single 
  80  *              interleaved copy algorithm so that fragmenting has a one copy overhead. Actual packet
  81  *              output should probably do its own fragmentation at the UDP/RAW layer. TCP shouldn't cause
  82  *              fragmentation anyway.
  83  *
  84  *              This program is free software; you can redistribute it and/or
  85  *              modify it under the terms of the GNU General Public License
  86  *              as published by the Free Software Foundation; either version
  87  *              2 of the License, or (at your option) any later version.
  88  */
  89 
  90 #include <asm/segment.h>
  91 #include <asm/system.h>
  92 #include <linux/types.h>
  93 #include <linux/kernel.h>
  94 #include <linux/sched.h>
  95 #include <linux/mm.h>
  96 #include <linux/string.h>
  97 #include <linux/errno.h>
  98 #include <linux/config.h>
  99 
 100 #include <linux/socket.h>
 101 #include <linux/sockios.h>
 102 #include <linux/in.h>
 103 #include <linux/inet.h>
 104 #include <linux/netdevice.h>
 105 #include <linux/etherdevice.h>
 106 
 107 #include "snmp.h"
 108 #include "ip.h"
 109 #include "protocol.h"
 110 #include "route.h"
 111 #include "tcp.h"
 112 #include <linux/skbuff.h>
 113 #include "sock.h"
 114 #include "arp.h"
 115 #include "icmp.h"
 116 #include "raw.h"
 117 #include <linux/igmp.h>
 118 #include <linux/ip_fw.h>
 119 
 120 #define CONFIG_IP_DEFRAG
 121 
 122 extern int last_retran;
 123 extern void sort_send(struct sock *sk);
 124 
 125 #define min(a,b)        ((a)<(b)?(a):(b))
 126 #define LOOPBACK(x)     (((x) & htonl(0xff000000)) == htonl(0x7f000000))
 127 
 128 /*
 129  *      SNMP management statistics
 130  */
 131 
 132 #ifdef CONFIG_IP_FORWARD
 133 struct ip_mib ip_statistics={1,64,};    /* Forwarding=Yes, Default TTL=64 */
 134 #else
 135 struct ip_mib ip_statistics={0,64,};    /* Forwarding=No, Default TTL=64 */
 136 #endif
 137 
 138 /*
 139  *      Handle the issuing of an ioctl() request
 140  *      for the ip device. This is scheduled to
 141  *      disappear
 142  */
 143 
 144 int ip_ioctl(struct sock *sk, int cmd, unsigned long arg)
     /* [previous][next][first][last][top][bottom][index][help] */
 145 {
 146         switch(cmd)
 147         {
 148                 default:
 149                         return(-EINVAL);
 150         }
 151 }
 152 
 153 
 154 /* these two routines will do routing. */
 155 
 156 static void
 157 strict_route(struct iphdr *iph, struct options *opt)
     /* [previous][next][first][last][top][bottom][index][help] */
 158 {
 159 }
 160 
 161 
 162 static void
 163 loose_route(struct iphdr *iph, struct options *opt)
     /* [previous][next][first][last][top][bottom][index][help] */
 164 {
 165 }
 166 
 167 
 168 
 169 
 170 /* This routine will check to see if we have lost a gateway. */
 171 void
 172 ip_route_check(unsigned long daddr)
     /* [previous][next][first][last][top][bottom][index][help] */
 173 {
 174 }
 175 
 176 
 177 #if 0
 178 /* this routine puts the options at the end of an ip header. */
 179 static int
 180 build_options(struct iphdr *iph, struct options *opt)
     /* [previous][next][first][last][top][bottom][index][help] */
 181 {
 182   unsigned char *ptr;
 183   /* currently we don't support any options. */
 184   ptr = (unsigned char *)(iph+1);
 185   *ptr = 0;
 186   return (4);
 187 }
 188 #endif
 189 
 190 
 191 /*
 192  *      Take an skb, and fill in the MAC header.
 193  */
 194 
 195 static int ip_send(struct sk_buff *skb, unsigned long daddr, int len, struct device *dev, unsigned long saddr)
     /* [previous][next][first][last][top][bottom][index][help] */
 196 {
 197         int mac = 0;
 198 
 199         skb->dev = dev;
 200         skb->arp = 1;
 201         if (dev->hard_header)
 202         {
 203                 /*
 204                  *      Build a hardware header. Source address is our mac, destination unknown
 205                  *      (rebuild header will sort this out)
 206                  */
 207                 mac = dev->hard_header(skb->data, dev, ETH_P_IP, NULL, NULL, len, skb);
 208                 if (mac < 0)
 209                 {
 210                         mac = -mac;
 211                         skb->arp = 0;
 212                         skb->raddr = daddr;     /* next routing address */
 213                 }
 214         }
 215         return mac;
 216 }
 217 
 218 int ip_id_count = 0;
 219 
 220 /*
 221  * This routine builds the appropriate hardware/IP headers for
 222  * the routine.  It assumes that if *dev != NULL then the
 223  * protocol knows what it's doing, otherwise it uses the
 224  * routing/ARP tables to select a device struct.
 225  */
 226 int ip_build_header(struct sk_buff *skb, unsigned long saddr, unsigned long daddr,
     /* [previous][next][first][last][top][bottom][index][help] */
 227                 struct device **dev, int type, struct options *opt, int len, int tos, int ttl)
 228 {
 229         static struct options optmem;
 230         struct iphdr *iph;
 231         struct rtable *rt;
 232         unsigned char *buff;
 233         unsigned long raddr;
 234         int tmp;
 235         unsigned long src;
 236 
 237         buff = skb->data;
 238 
 239         /*
 240          *      See if we need to look up the device.
 241          */
 242 
 243 #ifdef CONFIG_INET_MULTICAST    
 244         if(MULTICAST(daddr) && *dev==NULL && skb->sk && *skb->sk->ip_mc_name)
 245                 *dev=dev_get(skb->sk->ip_mc_name);
 246 #endif
 247         if (*dev == NULL)
 248         {
 249                 if(skb->localroute)
 250                         rt = ip_rt_local(daddr, &optmem, &src);
 251                 else
 252                         rt = ip_rt_route(daddr, &optmem, &src);
 253                 if (rt == NULL)
 254                 {
 255                         ip_statistics.IpOutNoRoutes++;
 256                         return(-ENETUNREACH);
 257                 }
 258 
 259                 *dev = rt->rt_dev;
 260                 /*
 261                  *      If the frame is from us and going off machine it MUST MUST MUST
 262                  *      have the output device ip address and never the loopback
 263                  */
 264                 if (LOOPBACK(saddr) && !LOOPBACK(daddr))
 265                         saddr = src;/*rt->rt_dev->pa_addr;*/
 266                 raddr = rt->rt_gateway;
 267 
 268                 opt = &optmem;
 269         }
 270         else
 271         {
 272                 /*
 273                  *      We still need the address of the first hop.
 274                  */
 275                 if(skb->localroute)
 276                         rt = ip_rt_local(daddr, &optmem, &src);
 277                 else
 278                         rt = ip_rt_route(daddr, &optmem, &src);
 279                 /*
 280                  *      If the frame is from us and going off machine it MUST MUST MUST
 281                  *      have the output device ip address and never the loopback
 282                  */
 283                 if (LOOPBACK(saddr) && !LOOPBACK(daddr))
 284                         saddr = src;/*rt->rt_dev->pa_addr;*/
 285 
 286                 raddr = (rt == NULL) ? 0 : rt->rt_gateway;
 287         }
 288 
 289         /*
 290          *      No source addr so make it our addr
 291          */
 292         if (saddr == 0)
 293                 saddr = src;
 294 
 295         /*
 296          *      No gateway so aim at the real destination
 297          */
 298         if (raddr == 0)
 299                 raddr = daddr;
 300 
 301         /*
 302          *      Now build the MAC header.
 303          */
 304 
 305         tmp = ip_send(skb, raddr, len, *dev, saddr);
 306         buff += tmp;
 307         len -= tmp;
 308 
 309         /*
 310          *      Book keeping
 311          */
 312 
 313         skb->dev = *dev;
 314         skb->saddr = saddr;
 315         if (skb->sk)
 316                 skb->sk->saddr = saddr;
 317 
 318         /*
 319          *      Now build the IP header.
 320          */
 321 
 322         /*
 323          *      If we are using IPPROTO_RAW, then we don't need an IP header, since
 324          *      one is being supplied to us by the user
 325          */
 326 
 327         if(type == IPPROTO_RAW)
 328                 return (tmp);
 329 
 330         iph = (struct iphdr *)buff;
 331         iph->version  = 4;
 332         iph->tos      = tos;
 333         iph->frag_off = 0;
 334         iph->ttl      = ttl;
 335         iph->daddr    = daddr;
 336         iph->saddr    = saddr;
 337         iph->protocol = type;
 338         iph->ihl      = 5;
 339         skb->ip_hdr   = iph;
 340 
 341         /* Setup the IP options. */
 342 #ifdef Not_Yet_Avail
 343         build_options(iph, opt);
 344 #endif
 345 
 346         return(20 + tmp);       /* IP header plus MAC header size */
 347 }
 348 
 349 
 350 static int
 351 do_options(struct iphdr *iph, struct options *opt)
     /* [previous][next][first][last][top][bottom][index][help] */
 352 {
 353   unsigned char *buff;
 354   int done = 0;
 355   int i, len = sizeof(struct iphdr);
 356 
 357   /* Zero out the options. */
 358   opt->record_route.route_size = 0;
 359   opt->loose_route.route_size  = 0;
 360   opt->strict_route.route_size = 0;
 361   opt->tstamp.ptr              = 0;
 362   opt->security                = 0;
 363   opt->compartment             = 0;
 364   opt->handling                = 0;
 365   opt->stream                  = 0;
 366   opt->tcc                     = 0;
 367   return(0);
 368 
 369   /* Advance the pointer to start at the options. */
 370   buff = (unsigned char *)(iph + 1);
 371 
 372   /* Now start the processing. */
 373   while (!done && len < iph->ihl*4) switch(*buff) {
 374         case IPOPT_END:
 375                 done = 1;
 376                 break;
 377         case IPOPT_NOOP:
 378                 buff++;
 379                 len++;
 380                 break;
 381         case IPOPT_SEC:
 382                 buff++;
 383                 if (*buff != 11) return(1);
 384                 buff++;
 385                 opt->security = ntohs(*(unsigned short *)buff);
 386                 buff += 2;
 387                 opt->compartment = ntohs(*(unsigned short *)buff);
 388                 buff += 2;
 389                 opt->handling = ntohs(*(unsigned short *)buff);
 390                 buff += 2;
 391                 opt->tcc = ((*buff) << 16) + ntohs(*(unsigned short *)(buff+1));
 392                 buff += 3;
 393                 len += 11;
 394                 break;
 395         case IPOPT_LSRR:
 396                 buff++;
 397                 if ((*buff - 3)% 4 != 0) return(1);
 398                 len += *buff;
 399                 opt->loose_route.route_size = (*buff -3)/4;
 400                 buff++;
 401                 if (*buff % 4 != 0) return(1);
 402                 opt->loose_route.pointer = *buff/4 - 1;
 403                 buff++;
 404                 buff++;
 405                 for (i = 0; i < opt->loose_route.route_size; i++) {
 406                         if(i>=MAX_ROUTE)
 407                                 return(1);
 408                         opt->loose_route.route[i] = *(unsigned long *)buff;
 409                         buff += 4;
 410                 }
 411                 break;
 412         case IPOPT_SSRR:
 413                 buff++;
 414                 if ((*buff - 3)% 4 != 0) return(1);
 415                 len += *buff;
 416                 opt->strict_route.route_size = (*buff -3)/4;
 417                 buff++;
 418                 if (*buff % 4 != 0) return(1);
 419                 opt->strict_route.pointer = *buff/4 - 1;
 420                 buff++;
 421                 buff++;
 422                 for (i = 0; i < opt->strict_route.route_size; i++) {
 423                         if(i>=MAX_ROUTE)
 424                                 return(1);
 425                         opt->strict_route.route[i] = *(unsigned long *)buff;
 426                         buff += 4;
 427                 }
 428                 break;
 429         case IPOPT_RR:
 430                 buff++;
 431                 if ((*buff - 3)% 4 != 0) return(1);
 432                 len += *buff;
 433                 opt->record_route.route_size = (*buff -3)/4;
 434                 buff++;
 435                 if (*buff % 4 != 0) return(1);
 436                 opt->record_route.pointer = *buff/4 - 1;
 437                 buff++;
 438                 buff++;
 439                 for (i = 0; i < opt->record_route.route_size; i++) {
 440                         if(i>=MAX_ROUTE)
 441                                 return 1;
 442                         opt->record_route.route[i] = *(unsigned long *)buff;
 443                         buff += 4;
 444                 }
 445                 break;
 446         case IPOPT_SID:
 447                 len += 4;
 448                 buff +=2;
 449                 opt->stream = *(unsigned short *)buff;
 450                 buff += 2;
 451                 break;
 452         case IPOPT_TIMESTAMP:
 453                 buff++;
 454                 len += *buff;
 455                 if (*buff % 4 != 0) return(1);
 456                 opt->tstamp.len = *buff / 4 - 1;
 457                 buff++;
 458                 if ((*buff - 1) % 4 != 0) return(1);
 459                 opt->tstamp.ptr = (*buff-1)/4;
 460                 buff++;
 461                 opt->tstamp.x.full_char = *buff;
 462                 buff++;
 463                 for (i = 0; i < opt->tstamp.len; i++) {
 464                         opt->tstamp.data[i] = *(unsigned long *)buff;
 465                         buff += 4;
 466                 }
 467                 break;
 468         default:
 469                 return(1);
 470   }
 471 
 472   if (opt->record_route.route_size == 0) {
 473         if (opt->strict_route.route_size != 0) {
 474                 memcpy(&(opt->record_route), &(opt->strict_route),
 475                                              sizeof(opt->record_route));
 476         } else if (opt->loose_route.route_size != 0) {
 477                 memcpy(&(opt->record_route), &(opt->loose_route),
 478                                              sizeof(opt->record_route));
 479         }
 480   }
 481 
 482   if (opt->strict_route.route_size != 0 &&
 483       opt->strict_route.route_size != opt->strict_route.pointer) {
 484         strict_route(iph, opt);
 485         return(0);
 486   }
 487 
 488   if (opt->loose_route.route_size != 0 &&
 489       opt->loose_route.route_size != opt->loose_route.pointer) {
 490         loose_route(iph, opt);
 491         return(0);
 492   }
 493 
 494   return(0);
 495 }
 496 
 497 /*
 498  * This routine does all the checksum computations that don't
 499  * require anything special (like copying or special headers).
 500  */
 501 
 502 unsigned short ip_compute_csum(unsigned char * buff, int len)
     /* [previous][next][first][last][top][bottom][index][help] */
 503 {
 504         unsigned long sum = 0;
 505 
 506         /* Do the first multiple of 4 bytes and convert to 16 bits. */
 507         if (len > 3)
 508         {
 509                 __asm__("clc\n"
 510                 "1:\t"
 511                 "lodsl\n\t"
 512                 "adcl %%eax, %%ebx\n\t"
 513                 "loop 1b\n\t"
 514                 "adcl $0, %%ebx\n\t"
 515                 "movl %%ebx, %%eax\n\t"
 516                 "shrl $16, %%eax\n\t"
 517                 "addw %%ax, %%bx\n\t"
 518                 "adcw $0, %%bx"
 519                 : "=b" (sum) , "=S" (buff)
 520                 : "0" (sum), "c" (len >> 2) ,"1" (buff)
 521                 : "ax", "cx", "si", "bx" );
 522         }
 523         if (len & 2)
 524         {
 525                 __asm__("lodsw\n\t"
 526                 "addw %%ax, %%bx\n\t"
 527                 "adcw $0, %%bx"
 528                 : "=b" (sum), "=S" (buff)
 529                 : "0" (sum), "1" (buff)
 530                 : "bx", "ax", "si");
 531         }
 532         if (len & 1)
 533         {
 534                 __asm__("lodsb\n\t"
 535                 "movb $0, %%ah\n\t"
 536                 "addw %%ax, %%bx\n\t"
 537                 "adcw $0, %%bx"
 538                 : "=b" (sum), "=S" (buff)
 539                 : "0" (sum), "1" (buff)
 540                 : "bx", "ax", "si");
 541         }
 542         sum =~sum;
 543         return(sum & 0xffff);
 544 }
 545 
 546 /*
 547  *      Generate a checksum for an outgoing IP datagram.
 548  */
 549 
 550 void ip_send_check(struct iphdr *iph)
     /* [previous][next][first][last][top][bottom][index][help] */
 551 {
 552         iph->check = 0;
 553         iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
 554 }
 555 
 556 /************************ Fragment Handlers From NET2E **********************************/
 557 
 558 
 559 /*
 560  *      This fragment handler is a bit of a heap. On the other hand it works quite
 561  *      happily and handles things quite well.
 562  */
 563 
 564 static struct ipq *ipqueue = NULL;              /* IP fragment queue    */
 565 
 566 /*
 567  *      Create a new fragment entry.
 568  */
 569 
 570 static struct ipfrag *ip_frag_create(int offset, int end, struct sk_buff *skb, unsigned char *ptr)
     /* [previous][next][first][last][top][bottom][index][help] */
 571 {
 572         struct ipfrag *fp;
 573 
 574         fp = (struct ipfrag *) kmalloc(sizeof(struct ipfrag), GFP_ATOMIC);
 575         if (fp == NULL)
 576         {
 577                 printk("IP: frag_create: no memory left !\n");
 578                 return(NULL);
 579         }
 580         memset(fp, 0, sizeof(struct ipfrag));
 581 
 582         /* Fill in the structure. */
 583         fp->offset = offset;
 584         fp->end = end;
 585         fp->len = end - offset;
 586         fp->skb = skb;
 587         fp->ptr = ptr;
 588 
 589         return(fp);
 590 }
 591 
 592 
 593 /*
 594  *      Find the correct entry in the "incomplete datagrams" queue for
 595  *      this IP datagram, and return the queue entry address if found.
 596  */
 597 
 598 static struct ipq *ip_find(struct iphdr *iph)
     /* [previous][next][first][last][top][bottom][index][help] */
 599 {
 600         struct ipq *qp;
 601         struct ipq *qplast;
 602 
 603         cli();
 604         qplast = NULL;
 605         for(qp = ipqueue; qp != NULL; qplast = qp, qp = qp->next)
 606         {
 607                 if (iph->id== qp->iph->id && iph->saddr == qp->iph->saddr &&
 608                         iph->daddr == qp->iph->daddr && iph->protocol == qp->iph->protocol)
 609                 {
 610                         del_timer(&qp->timer);  /* So it doesn't vanish on us. The timer will be reset anyway */
 611                         sti();
 612                         return(qp);
 613                 }
 614         }
 615         sti();
 616         return(NULL);
 617 }
 618 
 619 
 620 /*
 621  *      Remove an entry from the "incomplete datagrams" queue, either
 622  *      because we completed, reassembled and processed it, or because
 623  *      it timed out.
 624  */
 625 
 626 static void ip_free(struct ipq *qp)
     /* [previous][next][first][last][top][bottom][index][help] */
 627 {
 628         struct ipfrag *fp;
 629         struct ipfrag *xp;
 630 
 631         /*
 632          * Stop the timer for this entry.
 633          */
 634 
 635         del_timer(&qp->timer);
 636 
 637         /* Remove this entry from the "incomplete datagrams" queue. */
 638         cli();
 639         if (qp->prev == NULL)
 640         {
 641                 ipqueue = qp->next;
 642                 if (ipqueue != NULL)
 643                         ipqueue->prev = NULL;
 644         }
 645         else
 646         {
 647                 qp->prev->next = qp->next;
 648                 if (qp->next != NULL)
 649                         qp->next->prev = qp->prev;
 650         }
 651 
 652         /* Release all fragment data. */
 653 
 654         fp = qp->fragments;
 655         while (fp != NULL)
 656         {
 657                 xp = fp->next;
 658                 IS_SKB(fp->skb);
 659                 kfree_skb(fp->skb,FREE_READ);
 660                 kfree_s(fp, sizeof(struct ipfrag));
 661                 fp = xp;
 662         }
 663 
 664         /* Release the MAC header. */
 665         kfree_s(qp->mac, qp->maclen);
 666 
 667         /* Release the IP header. */
 668         kfree_s(qp->iph, qp->ihlen + 8);
 669 
 670         /* Finally, release the queue descriptor itself. */
 671         kfree_s(qp, sizeof(struct ipq));
 672         sti();
 673 }
 674 
 675 
 676 /*
 677  *      Oops- a fragment queue timed out.  Kill it and send an ICMP reply.
 678  */
 679 
 680 static void ip_expire(unsigned long arg)
     /* [previous][next][first][last][top][bottom][index][help] */
 681 {
 682         struct ipq *qp;
 683 
 684         qp = (struct ipq *)arg;
 685 
 686         /*
 687          *      Send an ICMP "Fragment Reassembly Timeout" message.
 688          */
 689 
 690         ip_statistics.IpReasmTimeout++;
 691         ip_statistics.IpReasmFails++;   
 692         /* This if is always true... shrug */
 693         if(qp->fragments!=NULL)
 694                 icmp_send(qp->fragments->skb,ICMP_TIME_EXCEEDED,
 695                                 ICMP_EXC_FRAGTIME, 0, qp->dev);
 696 
 697         /*
 698          *      Nuke the fragment queue.
 699          */
 700         ip_free(qp);
 701 }
 702 
 703 
 704 /*
 705  *      Add an entry to the 'ipq' queue for a newly received IP datagram.
 706  *      We will (hopefully :-) receive all other fragments of this datagram
 707  *      in time, so we just create a queue for this datagram, in which we
 708  *      will insert the received fragments at their respective positions.
 709  */
 710 
 711 static struct ipq *ip_create(struct sk_buff *skb, struct iphdr *iph, struct device *dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 712 {
 713         struct ipq *qp;
 714         int maclen;
 715         int ihlen;
 716 
 717         qp = (struct ipq *) kmalloc(sizeof(struct ipq), GFP_ATOMIC);
 718         if (qp == NULL)
 719         {
 720                 printk("IP: create: no memory left !\n");
 721                 return(NULL);
 722                 skb->dev = qp->dev;
 723         }
 724         memset(qp, 0, sizeof(struct ipq));
 725 
 726         /*
 727          *      Allocate memory for the MAC header.
 728          *
 729          *      FIXME: We have a maximum MAC address size limit and define
 730          *      elsewhere. We should use it here and avoid the 3 kmalloc() calls
 731          */
 732 
 733         maclen = ((unsigned long) iph) - ((unsigned long) skb->data);
 734         qp->mac = (unsigned char *) kmalloc(maclen, GFP_ATOMIC);
 735         if (qp->mac == NULL)
 736         {
 737                 printk("IP: create: no memory left !\n");
 738                 kfree_s(qp, sizeof(struct ipq));
 739                 return(NULL);
 740         }
 741 
 742         /*
 743          *      Allocate memory for the IP header (plus 8 octets for ICMP).
 744          */
 745 
 746         ihlen = (iph->ihl * sizeof(unsigned long));
 747         qp->iph = (struct iphdr *) kmalloc(ihlen + 8, GFP_ATOMIC);
 748         if (qp->iph == NULL)
 749         {
 750                 printk("IP: create: no memory left !\n");
 751                 kfree_s(qp->mac, maclen);
 752                 kfree_s(qp, sizeof(struct ipq));
 753                 return(NULL);
 754         }
 755 
 756         /* Fill in the structure. */
 757         memcpy(qp->mac, skb->data, maclen);
 758         memcpy(qp->iph, iph, ihlen + 8);
 759         qp->len = 0;
 760         qp->ihlen = ihlen;
 761         qp->maclen = maclen;
 762         qp->fragments = NULL;
 763         qp->dev = dev;
 764 
 765         /* Start a timer for this entry. */
 766         qp->timer.expires = IP_FRAG_TIME;               /* about 30 seconds     */
 767         qp->timer.data = (unsigned long) qp;            /* pointer to queue     */
 768         qp->timer.function = ip_expire;                 /* expire function      */
 769         add_timer(&qp->timer);
 770 
 771         /* Add this entry to the queue. */
 772         qp->prev = NULL;
 773         cli();
 774         qp->next = ipqueue;
 775         if (qp->next != NULL)
 776                 qp->next->prev = qp;
 777         ipqueue = qp;
 778         sti();
 779         return(qp);
 780 }
 781 
 782 
 783 /*
 784  *      See if a fragment queue is complete.
 785  */
 786 
 787 static int ip_done(struct ipq *qp)
     /* [previous][next][first][last][top][bottom][index][help] */
 788 {
 789         struct ipfrag *fp;
 790         int offset;
 791 
 792         /* Only possible if we received the final fragment. */
 793         if (qp->len == 0)
 794                 return(0);
 795 
 796         /* Check all fragment offsets to see if they connect. */
 797         fp = qp->fragments;
 798         offset = 0;
 799         while (fp != NULL)
 800         {
 801                 if (fp->offset > offset)
 802                         return(0);      /* fragment(s) missing */
 803                 offset = fp->end;
 804                 fp = fp->next;
 805         }
 806 
 807         /* All fragments are present. */
 808         return(1);
 809 }
 810 
 811 
 812 /*
 813  *      Build a new IP datagram from all its fragments.
 814  *
 815  *      FIXME: We copy here because we lack an effective way of handling lists
 816  *      of bits on input. Until the new skb data handling is in I'm not going
 817  *      to touch this with a bargepole. This also causes a 4Kish limit on
 818  *      packet sizes.
 819  */
 820 
 821 static struct sk_buff *ip_glue(struct ipq *qp)
     /* [previous][next][first][last][top][bottom][index][help] */
 822 {
 823         struct sk_buff *skb;
 824         struct iphdr *iph;
 825         struct ipfrag *fp;
 826         unsigned char *ptr;
 827         int count, len;
 828 
 829         /*
 830          *      Allocate a new buffer for the datagram.
 831          */
 832 
 833         len = qp->maclen + qp->ihlen + qp->len;
 834 
 835         if ((skb = alloc_skb(len,GFP_ATOMIC)) == NULL)
 836         {
 837                 ip_statistics.IpReasmFails++;
 838                 printk("IP: queue_glue: no memory for gluing queue 0x%X\n", (int) qp);
 839                 ip_free(qp);
 840                 return(NULL);
 841         }
 842 
 843         /* Fill in the basic details. */
 844         skb->len = (len - qp->maclen);
 845         skb->h.raw = skb->data;
 846         skb->free = 1;
 847 
 848         /* Copy the original MAC and IP headers into the new buffer. */
 849         ptr = (unsigned char *) skb->h.raw;
 850         memcpy(ptr, ((unsigned char *) qp->mac), qp->maclen);
 851         ptr += qp->maclen;
 852         memcpy(ptr, ((unsigned char *) qp->iph), qp->ihlen);
 853         ptr += qp->ihlen;
 854         skb->h.raw += qp->maclen;
 855 
 856         count = 0;
 857 
 858         /* Copy the data portions of all fragments into the new buffer. */
 859         fp = qp->fragments;
 860         while(fp != NULL)
 861         {
 862                 if(count+fp->len > skb->len)
 863                 {
 864                         printk("Invalid fragment list: Fragment over size.\n");
 865                         ip_free(qp);
 866                         kfree_skb(skb,FREE_WRITE);
 867                         ip_statistics.IpReasmFails++;
 868                         return NULL;
 869                 }
 870                 memcpy((ptr + fp->offset), fp->ptr, fp->len);
 871                 count += fp->len;
 872                 fp = fp->next;
 873         }
 874 
 875         /* We glued together all fragments, so remove the queue entry. */
 876         ip_free(qp);
 877 
 878         /* Done with all fragments. Fixup the new IP header. */
 879         iph = skb->h.iph;
 880         iph->frag_off = 0;
 881         iph->tot_len = htons((iph->ihl * sizeof(unsigned long)) + count);
 882         skb->ip_hdr = iph;
 883 
 884         ip_statistics.IpReasmOKs++;
 885         return(skb);
 886 }
 887 
 888 
 889 /*
 890  *      Process an incoming IP datagram fragment.
 891  */
 892 
 893 static struct sk_buff *ip_defrag(struct iphdr *iph, struct sk_buff *skb, struct device *dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 894 {
 895         struct ipfrag *prev, *next;
 896         struct ipfrag *tfp;
 897         struct ipq *qp;
 898         struct sk_buff *skb2;
 899         unsigned char *ptr;
 900         int flags, offset;
 901         int i, ihl, end;
 902 
 903         ip_statistics.IpReasmReqds++;
 904 
 905         /* Find the entry of this IP datagram in the "incomplete datagrams" queue. */
 906         qp = ip_find(iph);
 907 
 908         /* Is this a non-fragmented datagram? */
 909         offset = ntohs(iph->frag_off);
 910         flags = offset & ~IP_OFFSET;
 911         offset &= IP_OFFSET;
 912         if (((flags & IP_MF) == 0) && (offset == 0))
 913         {
 914                 if (qp != NULL)
 915                         ip_free(qp);    /* Huh? How could this exist?? */
 916                 return(skb);
 917         }
 918 
 919         offset <<= 3;           /* offset is in 8-byte chunks */
 920 
 921         /*
 922          * If the queue already existed, keep restarting its timer as long
 923          * as we still are receiving fragments.  Otherwise, create a fresh
 924          * queue entry.
 925          */
 926 
 927         if (qp != NULL)
 928         {
 929                 del_timer(&qp->timer);
 930                 qp->timer.expires = IP_FRAG_TIME;       /* about 30 seconds */
 931                 qp->timer.data = (unsigned long) qp;    /* pointer to queue */
 932                 qp->timer.function = ip_expire;         /* expire function */
 933                 add_timer(&qp->timer);
 934         }
 935         else
 936         {
 937                 /*
 938                  *      If we failed to create it, then discard the frame
 939                  */
 940                 if ((qp = ip_create(skb, iph, dev)) == NULL)
 941                 {
 942                         skb->sk = NULL;
 943                         kfree_skb(skb, FREE_READ);
 944                         ip_statistics.IpReasmFails++;
 945                         return NULL;
 946                 }
 947         }
 948 
 949         /*
 950          *      Determine the position of this fragment.
 951          */
 952 
 953         ihl = (iph->ihl * sizeof(unsigned long));
 954         end = offset + ntohs(iph->tot_len) - ihl;
 955 
 956         /*
 957          *      Point into the IP datagram 'data' part.
 958          */
 959 
 960         ptr = skb->data + dev->hard_header_len + ihl;
 961 
 962         /*
 963          *      Is this the final fragment?
 964          */
 965 
 966         if ((flags & IP_MF) == 0)
 967                 qp->len = end;
 968 
 969         /*
 970          *      Find out which fragments are in front and at the back of us
 971          *      in the chain of fragments so far.  We must know where to put
 972          *      this fragment, right?
 973          */
 974 
 975         prev = NULL;
 976         for(next = qp->fragments; next != NULL; next = next->next)
 977         {
 978                 if (next->offset > offset)
 979                         break;  /* bingo! */
 980                 prev = next;
 981         }
 982 
 983         /*
 984          *      We found where to put this one.
 985          *      Check for overlap with preceding fragment, and, if needed,
 986          *      align things so that any overlaps are eliminated.
 987          */
 988         if (prev != NULL && offset < prev->end)
 989         {
 990                 i = prev->end - offset;
 991                 offset += i;    /* ptr into datagram */
 992                 ptr += i;       /* ptr into fragment data */
 993         }
 994 
 995         /*
 996          * Look for overlap with succeeding segments.
 997          * If we can merge fragments, do it.
 998          */
 999 
1000         for(; next != NULL; next = tfp)
1001         {
1002                 tfp = next->next;
1003                 if (next->offset >= end)
1004                         break;          /* no overlaps at all */
1005 
1006                 i = end - next->offset;                 /* overlap is 'i' bytes */
1007                 next->len -= i;                         /* so reduce size of    */
1008                 next->offset += i;                      /* next fragment        */
1009                 next->ptr += i;
1010 
1011                 /*
1012                  *      If we get a frag size of <= 0, remove it and the packet
1013                  *      that it goes with.
1014                  */
1015                 if (next->len <= 0)
1016                 {
1017                         if (next->prev != NULL)
1018                                 next->prev->next = next->next;
1019                         else
1020                                 qp->fragments = next->next;
1021 
1022                         if (tfp->next != NULL)
1023                                 next->next->prev = next->prev;
1024 
1025                         kfree_skb(next->skb,FREE_READ);
1026                         kfree_s(next, sizeof(struct ipfrag));
1027                 }
1028         }
1029 
1030         /*
1031          *      Insert this fragment in the chain of fragments.
1032          */
1033 
1034         tfp = NULL;
1035         tfp = ip_frag_create(offset, end, skb, ptr);
1036 
1037         /*
1038          *      No memory to save the fragment - so throw the lot
1039          */
1040 
1041         if (!tfp)
1042         {
1043                 skb->sk = NULL;
1044                 kfree_skb(skb, FREE_READ);
1045                 return NULL;
1046         }
1047         tfp->prev = prev;
1048         tfp->next = next;
1049         if (prev != NULL)
1050                 prev->next = tfp;
1051         else
1052                 qp->fragments = tfp;
1053 
1054         if (next != NULL)
1055                 next->prev = tfp;
1056 
1057         /*
1058          *      OK, so we inserted this new fragment into the chain.
1059          *      Check if we now have a full IP datagram which we can
1060          *      bump up to the IP layer...
1061          */
1062 
1063         if (ip_done(qp))
1064         {
1065                 skb2 = ip_glue(qp);             /* glue together the fragments */
1066                 return(skb2);
1067         }
1068         return(NULL);
1069 }
1070 
1071 
1072 /*
1073  *      This IP datagram is too large to be sent in one piece.  Break it up into
1074  *      smaller pieces (each of size equal to the MAC header plus IP header plus
1075  *      a block of the data of the original IP data part) that will yet fit in a
1076  *      single device frame, and queue such a frame for sending by calling the
1077  *      ip_queue_xmit().  Note that this is recursion, and bad things will happen
1078  *      if this function causes a loop...
1079  *
1080  *      Yes this is inefficient, feel free to submit a quicker one.
1081  *
1082  *      **Protocol Violation**
1083  *      We copy all the options to each fragment. !FIXME!
1084  */
1085 void ip_fragment(struct sock *sk, struct sk_buff *skb, struct device *dev, int is_frag)
     /* [previous][next][first][last][top][bottom][index][help] */
1086 {
1087         struct iphdr *iph;
1088         unsigned char *raw;
1089         unsigned char *ptr;
1090         struct sk_buff *skb2;
1091         int left, mtu, hlen, len;
1092         int offset;
1093         unsigned long flags;
1094 
1095         /*
1096          *      Point into the IP datagram header.
1097          */
1098 
1099         raw = skb->data;
1100         iph = (struct iphdr *) (raw + dev->hard_header_len);
1101 
1102         skb->ip_hdr = iph;
1103 
1104         /*
1105          *      Setup starting values.
1106          */
1107 
1108         hlen = (iph->ihl * sizeof(unsigned long));
1109         left = ntohs(iph->tot_len) - hlen;      /* Space per frame */
1110         hlen += dev->hard_header_len;           /* Total header size */
1111         mtu = (dev->mtu - hlen);                /* Size of data space */
1112         ptr = (raw + hlen);                     /* Where to start from */
1113 
1114         /*
1115          *      Check for any "DF" flag. [DF means do not fragment]
1116          */
1117 
1118         if (ntohs(iph->frag_off) & IP_DF)
1119         {
1120                 /*
1121                  *      Reply giving the MTU of the failed hop.
1122                  */
1123                 ip_statistics.IpFragFails++;
1124                 icmp_send(skb,ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, dev->mtu, dev);
1125                 return;
1126         }
1127 
1128         /*
1129          *      The protocol doesn't seem to say what to do in the case that the
1130          *      frame + options doesn't fit the mtu. As it used to fall down dead
1131          *      in this case we were fortunate it didn't happen
1132          */
1133 
1134         if(mtu<8)
1135         {
1136                 /* It's wrong but its better than nothing */
1137                 icmp_send(skb,ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED,dev->mtu, dev);
1138                 ip_statistics.IpFragFails++;
1139                 return;
1140         }
1141 
1142         /*
1143          *      Fragment the datagram.
1144          */
1145 
1146         /*
1147          *      The initial offset is 0 for a complete frame. When
1148          *      fragmenting fragments its wherever this one starts.
1149          */
1150 
1151         if (is_frag & 2)
1152                 offset = (ntohs(iph->frag_off) & 0x1fff) << 3;
1153         else
1154                 offset = 0;
1155 
1156 
1157         /*
1158          *      Keep copying data until we run out.
1159          */
1160 
1161         while(left > 0)
1162         {
1163                 len = left;
1164                 /* IF: it doesn't fit, use 'mtu' - the data space left */
1165                 if (len > mtu)
1166                         len = mtu;
1167                 /* IF: we are not sending upto and including the packet end
1168                    then align the next start on an eight byte boundary */
1169                 if (len < left)
1170                 {
1171                         len/=8;
1172                         len*=8;
1173                 }
1174                 /*
1175                  *      Allocate buffer.
1176                  */
1177 
1178                 if ((skb2 = alloc_skb(len + hlen,GFP_ATOMIC)) == NULL)
1179                 {
1180                         printk("IP: frag: no memory for new fragment!\n");
1181                         ip_statistics.IpFragFails++;
1182                         return;
1183                 }
1184 
1185                 /*
1186                  *      Set up data on packet
1187                  */
1188 
1189                 skb2->arp = skb->arp;
1190                 if(skb->free==0)
1191                         printk("IP fragmenter: BUG free!=1 in fragmenter\n");
1192                 skb2->free = 1;
1193                 skb2->len = len + hlen;
1194                 skb2->h.raw=(char *) skb2->data;
1195                 /*
1196                  *      Charge the memory for the fragment to any owner
1197                  *      it might possess
1198                  */
1199 
1200                 save_flags(flags);
1201                 if (sk)
1202                 {
1203                         cli();
1204                         sk->wmem_alloc += skb2->mem_len;
1205                         skb2->sk=sk;
1206                 }
1207                 restore_flags(flags);
1208                 skb2->raddr = skb->raddr;       /* For rebuild_header - must be here */
1209 
1210                 /*
1211                  *      Copy the packet header into the new buffer.
1212                  */
1213 
1214                 memcpy(skb2->h.raw, raw, hlen);
1215 
1216                 /*
1217                  *      Copy a block of the IP datagram.
1218                  */
1219                 memcpy(skb2->h.raw + hlen, ptr, len);
1220                 left -= len;
1221 
1222                 skb2->h.raw+=dev->hard_header_len;
1223 
1224                 /*
1225                  *      Fill in the new header fields.
1226                  */
1227                 iph = (struct iphdr *)(skb2->h.raw/*+dev->hard_header_len*/);
1228                 iph->frag_off = htons((offset >> 3));
1229                 /*
1230                  *      Added AC : If we are fragmenting a fragment thats not the
1231                  *                 last fragment then keep MF on each bit
1232                  */
1233                 if (left > 0 || (is_frag & 1))
1234                         iph->frag_off |= htons(IP_MF);
1235                 ptr += len;
1236                 offset += len;
1237 
1238                 /*
1239                  *      Put this fragment into the sending queue.
1240                  */
1241 
1242                 ip_statistics.IpFragCreates++;
1243 
1244                 ip_queue_xmit(sk, dev, skb2, 2);
1245         }
1246         ip_statistics.IpFragOKs++;
1247 }
1248 
1249 
1250 
1251 #ifdef CONFIG_IP_FORWARD
1252 
1253 /*
1254  *      Forward an IP datagram to its next destination.
1255  */
1256 
1257 static void ip_forward(struct sk_buff *skb, struct device *dev, int is_frag)
     /* [previous][next][first][last][top][bottom][index][help] */
1258 {
1259         struct device *dev2;    /* Output device */
1260         struct iphdr *iph;      /* Our header */
1261         struct sk_buff *skb2;   /* Output packet */
1262         struct rtable *rt;      /* Route we use */
1263         unsigned char *ptr;     /* Data pointer */
1264         unsigned long raddr;    /* Router IP address */
1265 
1266         /* 
1267          *      See if we are allowed to forward this.
1268          */
1269 
1270 #ifdef CONFIG_IP_FIREWALL
1271         if(!ip_fw_chk(skb->h.iph, ip_fw_fwd_chain))
1272         {
1273                 return;
1274         }
1275 #endif
1276         /*
1277          *      According to the RFC, we must first decrease the TTL field. If
1278          *      that reaches zero, we must reply an ICMP control message telling
1279          *      that the packet's lifetime expired.
1280          *
1281          *      Exception:
1282          *      We may not generate an ICMP for an ICMP. icmp_send does the
1283          *      enforcement of this so we can forget it here. It is however
1284          *      sometimes VERY important.
1285          */
1286 
1287         iph = skb->h.iph;
1288         iph->ttl--;
1289         if (iph->ttl <= 0)
1290         {
1291                 /* Tell the sender its packet died... */
1292                 icmp_send(skb, ICMP_TIME_EXCEEDED, ICMP_EXC_TTL, 0, dev);
1293                 return;
1294         }
1295 
1296         /*
1297          *      Re-compute the IP header checksum.
1298          *      This is inefficient. We know what has happened to the header
1299          *      and could thus adjust the checksum as Phil Karn does in KA9Q
1300          */
1301 
1302         ip_send_check(iph);
1303 
1304         /*
1305          * OK, the packet is still valid.  Fetch its destination address,
1306          * and give it to the IP sender for further processing.
1307          */
1308 
1309         rt = ip_rt_route(iph->daddr, NULL, NULL);
1310         if (rt == NULL)
1311         {
1312                 /*
1313                  *      Tell the sender its packet cannot be delivered. Again
1314                  *      ICMP is screened later.
1315                  */
1316                 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_NET_UNREACH, 0, dev);
1317                 return;
1318         }
1319 
1320 
1321         /*
1322          * Gosh.  Not only is the packet valid; we even know how to
1323          * forward it onto its final destination.  Can we say this
1324          * is being plain lucky?
1325          * If the router told us that there is no GW, use the dest.
1326          * IP address itself- we seem to be connected directly...
1327          */
1328 
1329         raddr = rt->rt_gateway;
1330 
1331         if (raddr != 0)
1332         {
1333                 /*
1334                  *      There is a gateway so find the correct route for it.
1335                  *      Gateways cannot in turn be gatewayed.
1336                  */
1337                 rt = ip_rt_route(raddr, NULL, NULL);
1338                 if (rt == NULL)
1339                 {
1340                         /*
1341                          *      Tell the sender its packet cannot be delivered...
1342                          */
1343                         icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0, dev);
1344                         return;
1345                 }
1346                 if (rt->rt_gateway != 0)
1347                         raddr = rt->rt_gateway;
1348         }
1349         else
1350                 raddr = iph->daddr;
1351 
1352         /*
1353          *      Having picked a route we can now send the frame out.
1354          */
1355 
1356         dev2 = rt->rt_dev;
1357 
1358         /*
1359          *      In IP you never have to forward a frame on the interface that it 
1360          *      arrived upon. We now generate an ICMP HOST REDIRECT giving the route
1361          *      we calculated.
1362          */
1363 #ifdef IP_NO_ICMP_REDIRECT
1364         if (dev == dev2)
1365                 return;
1366 #else
1367         if (dev == dev2)
1368                 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, raddr, dev);
1369 #endif          
1370 
1371         /*
1372          * We now allocate a new buffer, and copy the datagram into it.
1373          * If the indicated interface is up and running, kick it.
1374          */
1375 
1376         if (dev2->flags & IFF_UP)
1377         {
1378 
1379                 /*
1380                  *      Current design decrees we copy the packet. For identical header
1381                  *      lengths we could avoid it. The new skb code will let us push
1382                  *      data so the problem goes away then.
1383                  */
1384 
1385                 skb2 = alloc_skb(dev2->hard_header_len + skb->len, GFP_ATOMIC);
1386                 /*
1387                  *      This is rare and since IP is tolerant of network failures
1388                  *      quite harmless.
1389                  */
1390                 if (skb2 == NULL)
1391                 {
1392                         printk("\nIP: No memory available for IP forward\n");
1393                         return;
1394                 }
1395                 ptr = skb2->data;
1396                 skb2->free = 1;
1397                 skb2->len = skb->len + dev2->hard_header_len;
1398                 skb2->h.raw = ptr;
1399 
1400                 /*
1401                  *      Copy the packet data into the new buffer.
1402                  */
1403                 memcpy(ptr + dev2->hard_header_len, skb->h.raw, skb->len);
1404 
1405                 /* Now build the MAC header. */
1406                 (void) ip_send(skb2, raddr, skb->len, dev2, dev2->pa_addr);
1407 
1408                 ip_statistics.IpForwDatagrams++;
1409 
1410                 /*
1411                  *      See if it needs fragmenting. Note in ip_rcv we tagged
1412                  *      the fragment type. This must be right so that
1413                  *      the fragmenter does the right thing.
1414                  */
1415 
1416                 if(skb2->len > dev2->mtu + dev2->hard_header_len)
1417                 {
1418                         ip_fragment(NULL,skb2,dev2, is_frag);
1419                         kfree_skb(skb2,FREE_WRITE);
1420                 }
1421                 else
1422                 {
1423 #ifdef CONFIG_IP_ACCT           
1424                         /*
1425                          *      Count mapping we shortcut
1426                          */
1427                          
1428                         ip_acct_cnt(iph,ip_acct_chain,1);
1429 #endif                  
1430                         
1431                         /*
1432                          *      Map service types to priority. We lie about
1433                          *      throughput being low priority, but its a good
1434                          *      choice to help improve general usage.
1435                          */
1436                         if(iph->tos & IPTOS_LOWDELAY)
1437                                 dev_queue_xmit(skb2, dev2, SOPRI_INTERACTIVE);
1438                         else if(iph->tos & IPTOS_THROUGHPUT)
1439                                 dev_queue_xmit(skb2, dev2, SOPRI_BACKGROUND);
1440                         else
1441                                 dev_queue_xmit(skb2, dev2, SOPRI_NORMAL);
1442                 }
1443         }
1444 }
1445 
1446 
1447 #endif
1448 
1449 /*
1450  *      This function receives all incoming IP datagrams.
1451  */
1452 
1453 int ip_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *pt)
     /* [previous][next][first][last][top][bottom][index][help] */
1454 {
1455         struct iphdr *iph = skb->h.iph;
1456         struct sock *raw_sk=NULL;
1457         unsigned char hash;
1458         unsigned char flag = 0;
1459         unsigned char opts_p = 0;       /* Set iff the packet has options. */
1460         struct inet_protocol *ipprot;
1461         static struct options opt; /* since we don't use these yet, and they
1462                                 take up stack space. */
1463         int brd=IS_MYADDR;
1464         int is_frag=0;
1465 
1466         ip_statistics.IpInReceives++;
1467 
1468         /*
1469          *      Tag the ip header of this packet so we can find it
1470          */
1471 
1472         skb->ip_hdr = iph;
1473 
1474         /*
1475          *      Is the datagram acceptable?
1476          *
1477          *      1.      Length at least the size of an ip header
1478          *      2.      Version of 4
1479          *      3.      Checksums correctly. [Speed optimisation for later, skip loopback checksums]
1480          *      (4.     We ought to check for IP multicast addresses and undefined types.. does this matter ?)
1481          */
1482 
1483         if (skb->len<sizeof(struct iphdr) || iph->ihl<5 || iph->version != 4 || ip_fast_csum((unsigned char *)iph, iph->ihl) !=0)
1484         {
1485                 ip_statistics.IpInHdrErrors++;
1486                 kfree_skb(skb, FREE_WRITE);
1487                 return(0);
1488         }
1489         
1490         /*
1491          *      See if the firewall wants to dispose of the packet. 
1492          */
1493 
1494 #ifdef  CONFIG_IP_FIREWALL
1495         
1496         if(!LOOPBACK(iph->daddr) && !ip_fw_chk(iph,ip_fw_blk_chain))
1497         {
1498                 kfree_skb(skb, FREE_WRITE);
1499                 return 0;       
1500         }
1501 
1502 #endif
1503         
1504         /*
1505          *      Our transport medium may have padded the buffer out. Now we know it
1506          *      is IP we can trim to the true length of the frame.
1507          */
1508 
1509         skb->len=ntohs(iph->tot_len);
1510 
1511         /*
1512          *      Next analyse the packet for options. Studies show under one packet in
1513          *      a thousand have options....
1514          */
1515 
1516         if (iph->ihl != 5)
1517         {       /* Fast path for the typical optionless IP packet. */
1518                 memset((char *) &opt, 0, sizeof(opt));
1519                 if (do_options(iph, &opt) != 0)
1520                         return 0;
1521                 opts_p = 1;
1522         }
1523 
1524         /*
1525          *      Remember if the frame is fragmented.
1526          */
1527          
1528         if(iph->frag_off)
1529         {
1530                 if (iph->frag_off & 0x0020)
1531                         is_frag|=1;
1532                 /*
1533                  *      Last fragment ?
1534                  */
1535         
1536                 if (ntohs(iph->frag_off) & 0x1fff)
1537                         is_frag|=2;
1538         }
1539         
1540         /*
1541          *      Do any IP forwarding required.  chk_addr() is expensive -- avoid it someday.
1542          *
1543          *      This is inefficient. While finding out if it is for us we could also compute
1544          *      the routing table entry. This is where the great unified cache theory comes
1545          *      in as and when someone implements it
1546          *
1547          *      For most hosts over 99% of packets match the first conditional
1548          *      and don't go via ip_chk_addr. Note: brd is set to IS_MYADDR at
1549          *      function entry.
1550          */
1551 
1552         if ( iph->daddr != skb->dev->pa_addr && (brd = ip_chk_addr(iph->daddr)) == 0)
1553         {
1554                 /*
1555                  *      Don't forward multicast or broadcast frames.
1556                  */
1557 
1558                 if(skb->pkt_type!=PACKET_HOST || brd==IS_BROADCAST)
1559                 {
1560                         kfree_skb(skb,FREE_WRITE);
1561                         return 0;
1562                 }
1563 
1564                 /*
1565                  *      The packet is for another target. Forward the frame
1566                  */
1567 
1568 #ifdef CONFIG_IP_FORWARD
1569                 ip_forward(skb, dev, is_frag);
1570 #else
1571 /*              printk("Machine %lx tried to use us as a forwarder to %lx but we have forwarding disabled!\n",
1572                         iph->saddr,iph->daddr);*/
1573                 ip_statistics.IpInAddrErrors++;
1574 #endif
1575                 /*
1576                  *      The forwarder is inefficient and copies the packet. We
1577                  *      free the original now.
1578                  */
1579 
1580                 kfree_skb(skb, FREE_WRITE);
1581                 return(0);
1582         }
1583         
1584 #ifdef CONFIG_IP_MULTICAST      
1585 
1586         if(brd==IS_MULTICAST && iph->daddr!=IGMP_ALL_HOSTS && !(dev->flags&IFF_LOOPBACK))
1587         {
1588                 /*
1589                  *      Check it is for one of our groups
1590                  */
1591                 struct ip_mc_list *ip_mc=dev->ip_mc_list;
1592                 do
1593                 {
1594                         if(ip_mc==NULL)
1595                         {       
1596                                 kfree_skb(skb, FREE_WRITE);
1597                                 return 0;
1598                         }
1599                         if(ip_mc->multiaddr==iph->daddr)
1600                                 break;
1601                         ip_mc=ip_mc->next;
1602                 }
1603                 while(1);
1604         }
1605 #endif
1606         /*
1607          *      Account for the packet
1608          */
1609          
1610 #ifdef CONFIG_IP_ACCT
1611         ip_acct_cnt(iph,ip_acct_chain,1);
1612 #endif  
1613 
1614         /*
1615          * Reassemble IP fragments.
1616          */
1617 
1618         if(is_frag)
1619         {
1620                 /* Defragment. Obtain the complete packet if there is one */
1621                 skb=ip_defrag(iph,skb,dev);
1622                 if(skb==NULL)
1623                         return 0;
1624                 skb->dev = dev;
1625                 iph=skb->h.iph;
1626         }
1627         
1628                  
1629 
1630         /*
1631          *      Point into the IP datagram, just past the header.
1632          */
1633 
1634         skb->ip_hdr = iph;
1635         skb->h.raw += iph->ihl*4;
1636         
1637         /*
1638          *      Deliver to raw sockets. This is fun as to avoid copies we want to make no surplus copies.
1639          */
1640          
1641         hash = iph->protocol & (SOCK_ARRAY_SIZE-1);
1642         
1643         /* If there maybe a raw socket we must check - if not we don't care less */
1644         if((raw_sk=raw_prot.sock_array[hash])!=NULL)
1645         {
1646                 struct sock *sknext=NULL;
1647                 struct sk_buff *skb1;
1648                 raw_sk=get_sock_raw(raw_sk, hash,  iph->saddr, iph->daddr);
1649                 if(raw_sk)      /* Any raw sockets */
1650                 {
1651                         do
1652                         {
1653                                 /* Find the next */
1654                                 sknext=get_sock_raw(raw_sk->next, hash, iph->saddr, iph->daddr);
1655                                 if(sknext)
1656                                         skb1=skb_clone(skb, GFP_ATOMIC);
1657                                 else
1658                                         break;  /* One pending raw socket left */
1659                                 if(skb1)
1660                                         raw_rcv(raw_sk, skb1, dev, iph->saddr,iph->daddr);
1661                                 raw_sk=sknext;
1662                         }
1663                         while(raw_sk!=NULL);
1664                         /* Here either raw_sk is the last raw socket, or NULL if none */
1665                         /* We deliver to the last raw socket AFTER the protocol checks as it avoids a surplus copy */
1666                 }
1667         }
1668         
1669         /*
1670          *      skb->h.raw now points at the protocol beyond the IP header.
1671          */
1672 
1673         hash = iph->protocol & (MAX_INET_PROTOS -1);
1674         for (ipprot = (struct inet_protocol *)inet_protos[hash];ipprot != NULL;ipprot=(struct inet_protocol *)ipprot->next)
1675         {
1676                 struct sk_buff *skb2;
1677 
1678                 if (ipprot->protocol != iph->protocol)
1679                         continue;
1680        /*
1681         *       See if we need to make a copy of it.  This will
1682         *       only be set if more than one protocol wants it.
1683         *       and then not for the last one. If there is a pending
1684         *       raw delivery wait for that
1685         */
1686                 if (ipprot->copy || raw_sk)
1687                 {
1688                         skb2 = skb_clone(skb, GFP_ATOMIC);
1689                         if(skb2==NULL)
1690                                 continue;
1691                 }
1692                 else
1693                 {
1694                         skb2 = skb;
1695                 }
1696                 flag = 1;
1697 
1698                /*
1699                 * Pass on the datagram to each protocol that wants it,
1700                 * based on the datagram protocol.  We should really
1701                 * check the protocol handler's return values here...
1702                 */
1703                 ipprot->handler(skb2, dev, opts_p ? &opt : 0, iph->daddr,
1704                                 (ntohs(iph->tot_len) - (iph->ihl * 4)),
1705                                 iph->saddr, 0, ipprot);
1706 
1707         }
1708 
1709         /*
1710          * All protocols checked.
1711          * If this packet was a broadcast, we may *not* reply to it, since that
1712          * causes (proven, grin) ARP storms and a leakage of memory (i.e. all
1713          * ICMP reply messages get queued up for transmission...)
1714          */
1715 
1716         if(raw_sk!=NULL)        /* Shift to last raw user */
1717                 raw_rcv(raw_sk, skb, dev, iph->saddr, iph->daddr);
1718         else if (!flag)         /* Free and report errors */
1719         {
1720                 if (brd != IS_BROADCAST && brd!=IS_MULTICAST)
1721                         icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PROT_UNREACH, 0, dev);
1722                 kfree_skb(skb, FREE_WRITE);
1723         }
1724 
1725         return(0);
1726 }
1727 
1728 /*
1729  *      Loop a packet back to the sender.
1730  */
1731  
1732 static void ip_loopback(struct device *old_dev, struct sk_buff *skb)
     /* [previous][next][first][last][top][bottom][index][help] */
1733 {
1734         extern struct device loopback_dev;
1735         struct device *dev=&loopback_dev;
1736         int len=skb->len-old_dev->hard_header_len;
1737         struct sk_buff *newskb=alloc_skb(len+dev->hard_header_len, GFP_ATOMIC);
1738         
1739         if(newskb==NULL)
1740                 return;
1741                 
1742         newskb->link3=NULL;
1743         newskb->sk=NULL;
1744         newskb->dev=dev;
1745         newskb->saddr=skb->saddr;
1746         newskb->daddr=skb->daddr;
1747         newskb->raddr=skb->raddr;
1748         newskb->free=1;
1749         newskb->lock=0;
1750         newskb->users=0;
1751         newskb->pkt_type=skb->pkt_type;
1752         newskb->len=len+dev->hard_header_len;
1753         
1754         
1755         newskb->ip_hdr=(struct iphdr *)(newskb->data+ip_send(newskb, skb->ip_hdr->daddr, len, dev, skb->ip_hdr->saddr));
1756         memcpy(newskb->ip_hdr,skb->ip_hdr,len);
1757 
1758         /* Recurse. The device check against IFF_LOOPBACK will stop infinite recursion */
1759                 
1760         /*printk("Loopback output queued [%lX to %lX].\n", newskb->ip_hdr->saddr,newskb->ip_hdr->daddr);*/
1761         ip_queue_xmit(NULL, dev, newskb, 1);
1762 }
1763 
1764 
1765 /*
1766  * Queues a packet to be sent, and starts the transmitter
1767  * if necessary.  if free = 1 then we free the block after
1768  * transmit, otherwise we don't. If free==2 we not only
1769  * free the block but also don't assign a new ip seq number.
1770  * This routine also needs to put in the total length,
1771  * and compute the checksum
1772  */
1773 
1774 void ip_queue_xmit(struct sock *sk, struct device *dev,
     /* [previous][next][first][last][top][bottom][index][help] */
1775               struct sk_buff *skb, int free)
1776 {
1777         struct iphdr *iph;
1778         unsigned char *ptr;
1779 
1780         /* Sanity check */
1781         if (dev == NULL)
1782         {
1783                 printk("IP: ip_queue_xmit dev = NULL\n");
1784                 return;
1785         }
1786 
1787         IS_SKB(skb);
1788 
1789         /*
1790          *      Do some book-keeping in the packet for later
1791          */
1792 
1793 
1794         skb->dev = dev;
1795         skb->when = jiffies;
1796 
1797         /*
1798          *      Find the IP header and set the length. This is bad
1799          *      but once we get the skb data handling code in the
1800          *      hardware will push its header sensibly and we will
1801          *      set skb->ip_hdr to avoid this mess and the fixed
1802          *      header length problem
1803          */
1804 
1805         ptr = skb->data;
1806         ptr += dev->hard_header_len;
1807         iph = (struct iphdr *)ptr;
1808         skb->ip_hdr = iph;
1809         iph->tot_len = ntohs(skb->len-dev->hard_header_len);
1810 
1811         /*
1812          *      No reassigning numbers to fragments...
1813          */
1814 
1815         if(free!=2)
1816                 iph->id      = htons(ip_id_count++);
1817         else
1818                 free=1;
1819 
1820         /* All buffers without an owner socket get freed */
1821         if (sk == NULL)
1822                 free = 1;
1823 
1824         skb->free = free;
1825 
1826         /*
1827          *      Do we need to fragment. Again this is inefficient.
1828          *      We need to somehow lock the original buffer and use
1829          *      bits of it.
1830          */
1831 
1832         if(skb->len > dev->mtu + dev->hard_header_len)
1833         {
1834                 ip_fragment(sk,skb,dev,0);
1835                 IS_SKB(skb);
1836                 kfree_skb(skb,FREE_WRITE);
1837                 return;
1838         }
1839 
1840         /*
1841          *      Add an IP checksum
1842          */
1843 
1844         ip_send_check(iph);
1845 
1846         /*
1847          *      Print the frame when debugging
1848          */
1849 
1850         /*
1851          *      More debugging. You cannot queue a packet already on a list
1852          *      Spot this and moan loudly.
1853          */
1854         if (skb->next != NULL)
1855         {
1856                 printk("ip_queue_xmit: next != NULL\n");
1857                 skb_unlink(skb);
1858         }
1859 
1860         /*
1861          *      If a sender wishes the packet to remain unfreed
1862          *      we add it to his send queue. This arguably belongs
1863          *      in the TCP level since nobody else uses it. BUT
1864          *      remember IPng might change all the rules.
1865          */
1866 
1867         if (!free)
1868         {
1869                 unsigned long flags;
1870                 /* The socket now has more outstanding blocks */
1871 
1872                 sk->packets_out++;
1873 
1874                 /* Protect the list for a moment */
1875                 save_flags(flags);
1876                 cli();
1877 
1878                 if (skb->link3 != NULL)
1879                 {
1880                         printk("ip.c: link3 != NULL\n");
1881                         skb->link3 = NULL;
1882                 }
1883                 if (sk->send_head == NULL)
1884                 {
1885                         sk->send_tail = skb;
1886                         sk->send_head = skb;
1887                 }
1888                 else
1889                 {
1890                         sk->send_tail->link3 = skb;
1891                         sk->send_tail = skb;
1892                 }
1893                 /* skb->link3 is NULL */
1894 
1895                 /* Interrupt restore */
1896                 restore_flags(flags);
1897         }
1898         else
1899                 /* Remember who owns the buffer */
1900                 skb->sk = sk;
1901 
1902         /*
1903          *      If the indicated interface is up and running, send the packet.
1904          */
1905          
1906         ip_statistics.IpOutRequests++;
1907 #ifdef CONFIG_IP_ACCT
1908         ip_acct_cnt(iph,ip_acct_chain,1);
1909 #endif  
1910         
1911 #ifdef CONFIG_IP_MULTICAST      
1912 
1913         /*
1914          *      Multicasts are looped back for other local users
1915          */
1916          
1917         if (MULTICAST(iph->daddr) && !(dev->flags&IFF_LOOPBACK))
1918         {
1919                 if(sk==NULL || sk->ip_mc_loop)
1920                 {
1921                         if(iph->daddr==IGMP_ALL_HOSTS)
1922                                 ip_loopback(dev,skb);
1923                         else
1924                         {
1925                                 struct ip_mc_list *imc=dev->ip_mc_list;
1926                                 while(imc!=NULL)
1927                                 {
1928                                         if(imc->multiaddr==iph->daddr)
1929                                         {
1930                                                 ip_loopback(dev,skb);
1931                                                 break;
1932                                         }
1933                                         imc=imc->next;
1934                                 }
1935                         }
1936                 }
1937                 /* Multicasts with ttl 0 must not go beyond the host */
1938                 
1939                 if(skb->ip_hdr->ttl==0)
1940                 {
1941                         kfree_skb(skb, FREE_READ);
1942                         return;
1943                 }
1944         }
1945 #endif
1946         if((dev->flags&IFF_BROADCAST) && iph->daddr==dev->pa_brdaddr && !(dev->flags&IFF_LOOPBACK))
1947                 ip_loopback(dev,skb);
1948                 
1949         if (dev->flags & IFF_UP)
1950         {
1951                 /*
1952                  *      If we have an owner use its priority setting,
1953                  *      otherwise use NORMAL
1954                  */
1955 
1956                 if (sk != NULL)
1957                 {
1958                         dev_queue_xmit(skb, dev, sk->priority);
1959                 }
1960                 else
1961                 {
1962                         dev_queue_xmit(skb, dev, SOPRI_NORMAL);
1963                 }
1964         }
1965         else
1966         {
1967                 ip_statistics.IpOutDiscards++;
1968                 if (free)
1969                         kfree_skb(skb, FREE_WRITE);
1970         }
1971 }
1972 
1973 
1974 
1975 #ifdef CONFIG_IP_MULTICAST
1976 
1977 /*
1978  *      Write an multicast group list table for the IGMP daemon to
1979  *      read.
1980  */
1981  
1982 int ip_mc_procinfo(char *buffer, char **start, off_t offset, int length)
     /* [previous][next][first][last][top][bottom][index][help] */
1983 {
1984         off_t pos=0, begin=0;
1985         struct ip_mc_list *im;
1986         unsigned long flags;
1987         int len=0;
1988         struct device *dev;
1989         
1990         len=sprintf(buffer,"Device    : Count\tGroup    Users Timer\n");  
1991         save_flags(flags);
1992         cli();
1993         
1994         for(dev = dev_base; dev; dev = dev->next)
1995         {
1996                 if((dev->flags&IFF_UP)&&(dev->flags&IFF_MULTICAST))
1997                 {
1998                         len+=sprintf(buffer+len,"%-10s: %5d\n",
1999                                         dev->name, dev->mc_count);
2000                         for(im = dev->ip_mc_list; im; im = im->next)
2001                         {
2002                                 len+=sprintf(buffer+len,
2003                                         "\t\t\t%08lX %5d %d:%08lX\n",
2004                                         im->multiaddr, im->users,
2005                                         im->tm_running, im->timer.expires);
2006                                 pos=begin+len;
2007                                 if(pos<offset)
2008                                 {
2009                                         len=0;
2010                                         begin=pos;
2011                                 }
2012                                 if(pos>offset+length)
2013                                         break;
2014                         }
2015                 }
2016         }
2017         restore_flags(flags);
2018         *start=buffer+(offset-begin);
2019         len-=(offset-begin);
2020         if(len>length)
2021                 len=length;     
2022         return len;
2023 }
2024 
2025 
2026 #endif  
2027 /*
2028  *      Socket option code for IP. This is the end of the line after any TCP,UDP etc options on
2029  *      an IP socket.
2030  *
2031  *      We implement IP_TOS (type of service), IP_TTL (time to live).
2032  *
2033  *      Next release we will sort out IP_OPTIONS since for some people are kind of important.
2034  */
2035 
2036 int ip_setsockopt(struct sock *sk, int level, int optname, char *optval, int optlen)
     /* [previous][next][first][last][top][bottom][index][help] */
2037 {
2038         int val,err;
2039 #if defined(CONFIG_IP_FIREWALL) || defined(CONFIG_IP_ACCT)
2040         struct ip_fw tmp_fw;
2041 #endif  
2042         if (optval == NULL)
2043                 return(-EINVAL);
2044 
2045         err=verify_area(VERIFY_READ, optval, sizeof(int));
2046         if(err)
2047                 return err;
2048 
2049         val = get_fs_long((unsigned long *)optval);
2050 
2051         if(level!=SOL_IP)
2052                 return -EOPNOTSUPP;
2053 
2054 #ifdef CONFIG_IP_MULTICAST
2055         if(optname==IP_MULTICAST_TTL)
2056         {
2057                 unsigned char ucval;
2058                 ucval=get_fs_byte((unsigned char *)optval);
2059                 printk("MC TTL %d\n", ucval);
2060                 if(ucval<1||ucval>255)
2061                         return -EINVAL;
2062                 sk->ip_mc_ttl=(int)ucval;
2063                 return 0;
2064         }
2065 #endif
2066 
2067         switch(optname)
2068         {
2069                 case IP_TOS:
2070                         if(val<0||val>255)
2071                                 return -EINVAL;
2072                         sk->ip_tos=val;
2073                         if(val==IPTOS_LOWDELAY)
2074                                 sk->priority=SOPRI_INTERACTIVE;
2075                         if(val==IPTOS_THROUGHPUT)
2076                                 sk->priority=SOPRI_BACKGROUND;
2077                         return 0;
2078                 case IP_TTL:
2079                         if(val<1||val>255)
2080                                 return -EINVAL;
2081                         sk->ip_ttl=val;
2082                         return 0;
2083 #ifdef CONFIG_IP_MULTICAST
2084 #ifdef GCC_WORKS
2085                 case IP_MULTICAST_TTL: 
2086                 {
2087                         unsigned char ucval;
2088 
2089                         ucval=get_fs_byte((unsigned char *)optval);
2090                         printk("MC TTL %d\n", ucval);
2091                         if(ucval<1||ucval>255)
2092                                 return -EINVAL;
2093                         sk->ip_mc_ttl=(int)ucval;
2094                         return 0;
2095                 }
2096 #endif
2097                 case IP_MULTICAST_LOOP: 
2098                 {
2099                         unsigned char ucval;
2100 
2101                         ucval=get_fs_byte((unsigned char *)optval);
2102                         if(ucval!=0 && ucval!=1)
2103                                  return -EINVAL;
2104                         sk->ip_mc_loop=(int)ucval;
2105                         return 0;
2106                 }
2107                 case IP_MULTICAST_IF: 
2108                 {
2109                         /* Not fully tested */
2110                         struct in_addr addr;
2111                         struct device *dev=NULL;
2112                         
2113                         /*
2114                          *      Check the arguments are allowable
2115                          */
2116 
2117                         err=verify_area(VERIFY_READ, optval, sizeof(addr));
2118                         if(err)
2119                                 return err;
2120                                 
2121                         memcpy_fromfs(&addr,optval,sizeof(addr));
2122                         
2123                         printk("MC bind %s\n", in_ntoa(addr.s_addr));
2124                         
2125                         /*
2126                          *      What address has been requested
2127                          */
2128                         
2129                         if(addr.s_addr==INADDR_ANY)     /* Default */
2130                         {
2131                                 sk->ip_mc_name[0]=0;
2132                                 return 0;
2133                         }
2134                         
2135                         /*
2136                          *      Find the device
2137                          */
2138                          
2139                         for(dev = dev_base; dev; dev = dev->next)
2140                         {
2141                                 if((dev->flags&IFF_UP)&&(dev->flags&IFF_MULTICAST)&&
2142                                         (dev->pa_addr==addr.s_addr))
2143                                         break;
2144                         }
2145                         
2146                         /*
2147                          *      Did we find one
2148                          */
2149                          
2150                         if(dev) 
2151                         {
2152                                 strcpy(sk->ip_mc_name,dev->name);
2153                                 return 0;
2154                         }
2155                         return -EADDRNOTAVAIL;
2156                 }
2157                 
2158                 case IP_ADD_MEMBERSHIP: 
2159                 {
2160                 
2161 /*
2162  *      FIXME: Add/Del membership should have a semaphore protecting them from re-entry
2163  */
2164                         struct ip_mreq mreq;
2165                         static struct options optmem;
2166                         unsigned long route_src;
2167                         struct rtable *rt;
2168                         struct device *dev=NULL;
2169                         
2170                         /*
2171                          *      Check the arguments.
2172                          */
2173 
2174                         err=verify_area(VERIFY_READ, optval, sizeof(mreq));
2175                         if(err)
2176                                 return err;
2177 
2178                         memcpy_fromfs(&mreq,optval,sizeof(mreq));
2179 
2180                         /* 
2181                          *      Get device for use later
2182                          */
2183 
2184                         if(mreq.imr_interface.s_addr==INADDR_ANY) 
2185                         {
2186                                 /*
2187                                  *      Not set so scan.
2188                                  */
2189                                 if((rt=ip_rt_route(mreq.imr_multiaddr.s_addr,&optmem, &route_src))!=NULL)
2190                                 {
2191                                         dev=rt->rt_dev;
2192                                         rt->rt_use--;
2193                                 }
2194                         }
2195                         else
2196                         {
2197                                 /*
2198                                  *      Find a suitable device.
2199                                  */
2200                                 for(dev = dev_base; dev; dev = dev->next)
2201                                 {
2202                                         if((dev->flags&IFF_UP)&&(dev->flags&IFF_MULTICAST)&&
2203                                                 (dev->pa_addr==mreq.imr_interface.s_addr))
2204                                                 break;
2205                                 }
2206                         }
2207                         
2208                         /*
2209                          *      No device, no cookies.
2210                          */
2211                          
2212                         if(!dev)
2213                                 return -ENODEV;
2214                                 
2215                         /*
2216                          *      Join group.
2217                          */
2218                          
2219                         return ip_mc_join_group(sk,dev,mreq.imr_multiaddr.s_addr);
2220                 }
2221                 
2222                 case IP_DROP_MEMBERSHIP: 
2223                 {
2224                         struct ip_mreq mreq;
2225                         struct rtable *rt;
2226                         static struct options optmem;
2227                         unsigned long route_src;
2228                         struct device *dev=NULL;
2229 
2230                         /*
2231                          *      Check the arguments
2232                          */
2233                          
2234                         err=verify_area(VERIFY_READ, optval, sizeof(mreq));
2235                         if(err)
2236                                 return err;
2237 
2238                         memcpy_fromfs(&mreq,optval,sizeof(mreq));
2239 
2240                         /*
2241                          *      Get device for use later 
2242                          */
2243  
2244                         if(mreq.imr_interface.s_addr==INADDR_ANY) 
2245                         {
2246                                 if((rt=ip_rt_route(mreq.imr_multiaddr.s_addr,&optmem, &route_src))!=NULL)
2247                                 {
2248                                         dev=rt->rt_dev;
2249                                         rt->rt_use--;
2250                                 }
2251                         }
2252                         else 
2253                         {
2254                                 for(dev = dev_base; dev; dev = dev->next)
2255                                 {
2256                                         if((dev->flags&IFF_UP)&& (dev->flags&IFF_MULTICAST)&&
2257                                                         (dev->pa_addr==mreq.imr_interface.s_addr))
2258                                                 break;
2259                                 }
2260                         }
2261                         
2262                         /*
2263                          *      Did we find a suitable device.
2264                          */
2265                          
2266                         if(!dev)
2267                                 return -ENODEV;
2268                                 
2269                         /*
2270                          *      Leave group
2271                          */
2272                          
2273                         return ip_mc_leave_group(sk,dev,mreq.imr_multiaddr.s_addr);
2274                 }
2275 #endif                  
2276 #ifdef CONFIG_IP_FIREWALL
2277                 case IP_FW_ADD_BLK:
2278                 case IP_FW_DEL_BLK:
2279                 case IP_FW_ADD_FWD:
2280                 case IP_FW_DEL_FWD:
2281                 case IP_FW_CHK_BLK:
2282                 case IP_FW_CHK_FWD:
2283                 case IP_FW_FLUSH:
2284                 case IP_FW_POLICY:
2285                         if(!suser())
2286                                 return -EPERM;
2287                         if(optlen>sizeof(tmp_fw) || optlen<1)
2288                                 return -EINVAL;
2289                         err=verify_area(VERIFY_READ,optval,optlen);
2290                         if(err)
2291                                 return err;
2292                         memcpy_fromfs(&tmp_fw,optval,optlen);
2293                         err=ip_fw_ctl(optname, &tmp_fw,optlen);
2294                         return -err;    /* -0 is 0 after all */
2295                         
2296 #endif
2297 #ifdef CONFIG_IP_ACCT
2298                 case IP_ACCT_DEL:
2299                 case IP_ACCT_ADD:
2300                 case IP_ACCT_FLUSH:
2301                 case IP_ACCT_ZERO:
2302                         if(!suser())
2303                                 return -EPERM;
2304                         if(optlen>sizeof(tmp_fw) || optlen<1)
2305                                 return -EINVAL;
2306                         err=verify_area(VERIFY_READ,optval,optlen);
2307                         if(err)
2308                                 return err;
2309                         memcpy_fromfs(&tmp_fw, optval,optlen);
2310                         err=ip_acct_ctl(optname, &tmp_fw,optlen);
2311                         return -err;    /* -0 is 0 after all */
2312 #endif
2313                 /* IP_OPTIONS and friends go here eventually */
2314                 default:
2315                         return(-ENOPROTOOPT);
2316         }
2317 }
2318 
2319 /*
2320  *      Get the options. Note for future reference. The GET of IP options gets the
2321  *      _received_ ones. The set sets the _sent_ ones.
2322  */
2323 
2324 int ip_getsockopt(struct sock *sk, int level, int optname, char *optval, int *optlen)
     /* [previous][next][first][last][top][bottom][index][help] */
2325 {
2326         int val,err;
2327 #ifdef CONFIG_IP_MULTICAST
2328         int len;
2329 #endif
2330         
2331         if(level!=SOL_IP)
2332                 return -EOPNOTSUPP;
2333 
2334         switch(optname)
2335         {
2336                 case IP_TOS:
2337                         val=sk->ip_tos;
2338                         break;
2339                 case IP_TTL:
2340                         val=sk->ip_ttl;
2341                         break;
2342 #ifdef CONFIG_IP_MULTICAST                      
2343                 case IP_MULTICAST_TTL:
2344                         val=sk->ip_mc_ttl;
2345                         break;
2346                 case IP_MULTICAST_LOOP:
2347                         val=sk->ip_mc_loop;
2348                         break;
2349                 case IP_MULTICAST_IF:
2350                         err=verify_area(VERIFY_WRITE, optlen, sizeof(int));
2351                         if(err)
2352                                 return err;
2353                         len=strlen(sk->ip_mc_name);
2354                         err=verify_area(VERIFY_WRITE, optval, len);
2355                         if(err)
2356                                 return err;
2357                         put_fs_long(len,(unsigned long *) optlen);
2358                         memcpy_tofs((void *)optval,sk->ip_mc_name, len);
2359                         return 0;
2360 #endif
2361                 default:
2362                         return(-ENOPROTOOPT);
2363         }
2364         err=verify_area(VERIFY_WRITE, optlen, sizeof(int));
2365         if(err)
2366                 return err;
2367         put_fs_long(sizeof(int),(unsigned long *) optlen);
2368 
2369         err=verify_area(VERIFY_WRITE, optval, sizeof(int));
2370         if(err)
2371                 return err;
2372         put_fs_long(val,(unsigned long *)optval);
2373 
2374         return(0);
2375 }
2376 
2377 /*
2378  *      IP protocol layer initialiser
2379  */
2380 
2381 static struct packet_type ip_packet_type =
2382 {
2383         0,      /* MUTTER ntohs(ETH_P_IP),*/
2384         NULL,   /* All devices */
2385         ip_rcv,
2386         NULL,
2387         NULL,
2388 };
2389 
2390 /*
2391  *      Device notifier
2392  */
2393  
2394 static int ip_rt_event(unsigned long event, void *ptr)
     /* [previous][next][first][last][top][bottom][index][help] */
2395 {
2396         if(event==NETDEV_DOWN)
2397                 ip_rt_flush(ptr);
2398         return NOTIFY_DONE;
2399 }
2400 
2401 struct notifier_block ip_rt_notifier={
2402         ip_rt_event,
2403         NULL,
2404         0
2405 };
2406 
2407 /*
2408  *      IP registers the packet type and then calls the subprotocol initialisers
2409  */
2410 
2411 void ip_init(void)
     /* [previous][next][first][last][top][bottom][index][help] */
2412 {
2413         ip_packet_type.type=htons(ETH_P_IP);
2414         dev_add_pack(&ip_packet_type);
2415 
2416         /* So we flush routes when a device is downed */        
2417         register_netdevice_notifier(&ip_rt_notifier);
2418 /*      ip_raw_init();
2419         ip_packet_init();
2420         ip_tcp_init();
2421         ip_udp_init();*/
2422 }

/* [previous][next][first][last][top][bottom][index][help] */