root/net/inet/ip.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. ip_ioctl
  2. strict_route
  3. loose_route
  4. ip_route_check
  5. build_options
  6. ip_send
  7. ip_build_header
  8. do_options
  9. ip_compute_csum
  10. ip_send_check
  11. ip_frag_create
  12. ip_find
  13. ip_free
  14. ip_expire
  15. ip_create
  16. ip_done
  17. ip_glue
  18. ip_defrag
  19. ip_fragment
  20. ip_forward
  21. ip_rcv
  22. ip_loopback
  23. ip_queue_xmit
  24. ip_mc_procinfo
  25. ip_setsockopt
  26. ip_getsockopt
  27. ip_rt_event
  28. ip_init

   1 /*
   2  * INET         An implementation of the TCP/IP protocol suite for the LINUX
   3  *              operating system.  INET is implemented using the  BSD Socket
   4  *              interface as the means of communication with the user level.
   5  *
   6  *              The Internet Protocol (IP) module.
   7  *
   8  * Version:     @(#)ip.c        1.0.16b 9/1/93
   9  *
  10  * Authors:     Ross Biro, <bir7@leland.Stanford.Edu>
  11  *              Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  12  *              Donald Becker, <becker@super.org>
  13  *              Alan Cox, <gw4pts@gw4pts.ampr.org>
  14  *              Richard Underwood
  15  *              Stefan Becker, <stefanb@yello.ping.de>
  16  *              
  17  *
  18  * Fixes:
  19  *              Alan Cox        :       Commented a couple of minor bits of surplus code
  20  *              Alan Cox        :       Undefining IP_FORWARD doesn't include the code
  21  *                                      (just stops a compiler warning).
  22  *              Alan Cox        :       Frames with >=MAX_ROUTE record routes, strict routes or loose routes
  23  *                                      are junked rather than corrupting things.
  24  *              Alan Cox        :       Frames to bad broadcast subnets are dumped
  25  *                                      We used to process them non broadcast and
  26  *                                      boy could that cause havoc.
  27  *              Alan Cox        :       ip_forward sets the free flag on the
  28  *                                      new frame it queues. Still crap because
  29  *                                      it copies the frame but at least it
  30  *                                      doesn't eat memory too.
  31  *              Alan Cox        :       Generic queue code and memory fixes.
  32  *              Fred Van Kempen :       IP fragment support (borrowed from NET2E)
  33  *              Gerhard Koerting:       Forward fragmented frames correctly.
  34  *              Gerhard Koerting:       Fixes to my fix of the above 8-).
  35  *              Gerhard Koerting:       IP interface addressing fix.
  36  *              Linus Torvalds  :       More robustness checks
  37  *              Alan Cox        :       Even more checks: Still not as robust as it ought to be
  38  *              Alan Cox        :       Save IP header pointer for later
  39  *              Alan Cox        :       ip option setting
  40  *              Alan Cox        :       Use ip_tos/ip_ttl settings
  41  *              Alan Cox        :       Fragmentation bogosity removed
  42  *                                      (Thanks to Mark.Bush@prg.ox.ac.uk)
  43  *              Dmitry Gorodchanin :    Send of a raw packet crash fix.
  44  *              Alan Cox        :       Silly ip bug when an overlength
  45  *                                      fragment turns up. Now frees the
  46  *                                      queue.
  47  *              Linus Torvalds/ :       Memory leakage on fragmentation
  48  *              Alan Cox        :       handling.
  49  *              Gerhard Koerting:       Forwarding uses IP priority hints
  50  *              Teemu Rantanen  :       Fragment problems.
  51  *              Alan Cox        :       General cleanup, comments and reformat
  52  *              Alan Cox        :       SNMP statistics
  53  *              Alan Cox        :       BSD address rule semantics. Also see
  54  *                                      UDP as there is a nasty checksum issue
  55  *                                      if you do things the wrong way.
  56  *              Alan Cox        :       Always defrag, moved IP_FORWARD to the config.in file
  57  *              Alan Cox        :       IP options adjust sk->priority.
  58  *              Pedro Roque     :       Fix mtu/length error in ip_forward.
  59  *              Alan Cox        :       Avoid ip_chk_addr when possible.
  60  *      Richard Underwood       :       IP multicasting.
  61  *              Alan Cox        :       Cleaned up multicast handlers.
  62  *              Alan Cox        :       RAW sockets demultiplex in the BSD style.
  63  *              Gunther Mayer   :       Fix the SNMP reporting typo
  64  *              Alan Cox        :       Always in group 224.0.0.1
  65  *              Alan Cox        :       Multicast loopback error for 224.0.0.1
  66  *              Alan Cox        :       IP_MULTICAST_LOOP option.
  67  *              Alan Cox        :       Use notifiers.
  68  *              Bjorn Ekwall    :       Removed ip_csum (from slhc.c too)
  69  *              Bjorn Ekwall    :       Moved ip_fast_csum to ip.h (inline!)
  70  *              Stefan Becker   :       Send out ICMP HOST REDIRECT
  71  *  
  72  *
  73  * To Fix:
  74  *              IP option processing is mostly not needed. ip_forward needs to know about routing rules
  75  *              and time stamp but that's about all. Use the route mtu field here too
  76  *              IP fragmentation wants rewriting cleanly. The RFC815 algorithm is much more efficient
  77  *              and could be made very efficient with the addition of some virtual memory hacks to permit
  78  *              the allocation of a buffer that can then be 'grown' by twiddling page tables.
  79  *              Output fragmentation wants updating along with the buffer management to use a single 
  80  *              interleaved copy algorithm so that fragmenting has a one copy overhead. Actual packet
  81  *              output should probably do its own fragmentation at the UDP/RAW layer. TCP shouldn't cause
  82  *              fragmentation anyway.
  83  *
  84  *              This program is free software; you can redistribute it and/or
  85  *              modify it under the terms of the GNU General Public License
  86  *              as published by the Free Software Foundation; either version
  87  *              2 of the License, or (at your option) any later version.
  88  */
  89 
  90 #include <asm/segment.h>
  91 #include <asm/system.h>
  92 #include <linux/types.h>
  93 #include <linux/kernel.h>
  94 #include <linux/sched.h>
  95 #include <linux/mm.h>
  96 #include <linux/string.h>
  97 #include <linux/errno.h>
  98 #include <linux/config.h>
  99 
 100 #include <linux/socket.h>
 101 #include <linux/sockios.h>
 102 #include <linux/in.h>
 103 #include <linux/inet.h>
 104 #include <linux/netdevice.h>
 105 #include <linux/etherdevice.h>
 106 
 107 #include "snmp.h"
 108 #include "ip.h"
 109 #include "protocol.h"
 110 #include "route.h"
 111 #include "tcp.h"
 112 #include "udp.h"
 113 #include <linux/skbuff.h>
 114 #include "sock.h"
 115 #include "arp.h"
 116 #include "icmp.h"
 117 #include "raw.h"
 118 #include <linux/igmp.h>
 119 #include <linux/ip_fw.h>
 120 
 121 #define CONFIG_IP_DEFRAG
 122 
 123 extern int last_retran;
 124 extern void sort_send(struct sock *sk);
 125 
 126 #define min(a,b)        ((a)<(b)?(a):(b))
 127 #define LOOPBACK(x)     (((x) & htonl(0xff000000)) == htonl(0x7f000000))
 128 
 129 /*
 130  *      SNMP management statistics
 131  */
 132 
 133 #ifdef CONFIG_IP_FORWARD
 134 struct ip_mib ip_statistics={1,64,};    /* Forwarding=Yes, Default TTL=64 */
 135 #else
 136 struct ip_mib ip_statistics={0,64,};    /* Forwarding=No, Default TTL=64 */
 137 #endif
 138 
 139 /*
 140  *      Handle the issuing of an ioctl() request
 141  *      for the ip device. This is scheduled to
 142  *      disappear
 143  */
 144 
 145 int ip_ioctl(struct sock *sk, int cmd, unsigned long arg)
     /* [previous][next][first][last][top][bottom][index][help] */
 146 {
 147         switch(cmd)
 148         {
 149                 default:
 150                         return(-EINVAL);
 151         }
 152 }
 153 
 154 
 155 /* these two routines will do routing. */
 156 
 157 static void
 158 strict_route(struct iphdr *iph, struct options *opt)
     /* [previous][next][first][last][top][bottom][index][help] */
 159 {
 160 }
 161 
 162 
 163 static void
 164 loose_route(struct iphdr *iph, struct options *opt)
     /* [previous][next][first][last][top][bottom][index][help] */
 165 {
 166 }
 167 
 168 
 169 
 170 
 171 /* This routine will check to see if we have lost a gateway. */
 172 void
 173 ip_route_check(unsigned long daddr)
     /* [previous][next][first][last][top][bottom][index][help] */
 174 {
 175 }
 176 
 177 
 178 #if 0
 179 /* this routine puts the options at the end of an ip header. */
 180 static int
 181 build_options(struct iphdr *iph, struct options *opt)
     /* [previous][next][first][last][top][bottom][index][help] */
 182 {
 183   unsigned char *ptr;
 184   /* currently we don't support any options. */
 185   ptr = (unsigned char *)(iph+1);
 186   *ptr = 0;
 187   return (4);
 188 }
 189 #endif
 190 
 191 
 192 /*
 193  *      Take an skb, and fill in the MAC header.
 194  */
 195 
 196 static int ip_send(struct sk_buff *skb, unsigned long daddr, int len, struct device *dev, unsigned long saddr)
     /* [previous][next][first][last][top][bottom][index][help] */
 197 {
 198         int mac = 0;
 199 
 200         skb->dev = dev;
 201         skb->arp = 1;
 202         if (dev->hard_header)
 203         {
 204                 /*
 205                  *      Build a hardware header. Source address is our mac, destination unknown
 206                  *      (rebuild header will sort this out)
 207                  */
 208                 mac = dev->hard_header(skb->data, dev, ETH_P_IP, NULL, NULL, len, skb);
 209                 if (mac < 0)
 210                 {
 211                         mac = -mac;
 212                         skb->arp = 0;
 213                         skb->raddr = daddr;     /* next routing address */
 214                 }
 215         }
 216         return mac;
 217 }
 218 
 219 int ip_id_count = 0;
 220 
 221 /*
 222  * This routine builds the appropriate hardware/IP headers for
 223  * the routine.  It assumes that if *dev != NULL then the
 224  * protocol knows what it's doing, otherwise it uses the
 225  * routing/ARP tables to select a device struct.
 226  */
 227 int ip_build_header(struct sk_buff *skb, unsigned long saddr, unsigned long daddr,
     /* [previous][next][first][last][top][bottom][index][help] */
 228                 struct device **dev, int type, struct options *opt, int len, int tos, int ttl)
 229 {
 230         static struct options optmem;
 231         struct iphdr *iph;
 232         struct rtable *rt;
 233         unsigned char *buff;
 234         unsigned long raddr;
 235         int tmp;
 236         unsigned long src;
 237 
 238         buff = skb->data;
 239 
 240         /*
 241          *      See if we need to look up the device.
 242          */
 243 
 244 #ifdef CONFIG_INET_MULTICAST    
 245         if(MULTICAST(daddr) && *dev==NULL && skb->sk && *skb->sk->ip_mc_name)
 246                 *dev=dev_get(skb->sk->ip_mc_name);
 247 #endif
 248         if (*dev == NULL)
 249         {
 250                 if(skb->localroute)
 251                         rt = ip_rt_local(daddr, &optmem, &src);
 252                 else
 253                         rt = ip_rt_route(daddr, &optmem, &src);
 254                 if (rt == NULL)
 255                 {
 256                         ip_statistics.IpOutNoRoutes++;
 257                         return(-ENETUNREACH);
 258                 }
 259 
 260                 *dev = rt->rt_dev;
 261                 /*
 262                  *      If the frame is from us and going off machine it MUST MUST MUST
 263                  *      have the output device ip address and never the loopback
 264                  */
 265                 if (LOOPBACK(saddr) && !LOOPBACK(daddr))
 266                         saddr = src;/*rt->rt_dev->pa_addr;*/
 267                 raddr = rt->rt_gateway;
 268 
 269                 opt = &optmem;
 270         }
 271         else
 272         {
 273                 /*
 274                  *      We still need the address of the first hop.
 275                  */
 276                 if(skb->localroute)
 277                         rt = ip_rt_local(daddr, &optmem, &src);
 278                 else
 279                         rt = ip_rt_route(daddr, &optmem, &src);
 280                 /*
 281                  *      If the frame is from us and going off machine it MUST MUST MUST
 282                  *      have the output device ip address and never the loopback
 283                  */
 284                 if (LOOPBACK(saddr) && !LOOPBACK(daddr))
 285                         saddr = src;/*rt->rt_dev->pa_addr;*/
 286 
 287                 raddr = (rt == NULL) ? 0 : rt->rt_gateway;
 288         }
 289 
 290         /*
 291          *      No source addr so make it our addr
 292          */
 293         if (saddr == 0)
 294                 saddr = src;
 295 
 296         /*
 297          *      No gateway so aim at the real destination
 298          */
 299         if (raddr == 0)
 300                 raddr = daddr;
 301 
 302         /*
 303          *      Now build the MAC header.
 304          */
 305 
 306         tmp = ip_send(skb, raddr, len, *dev, saddr);
 307         buff += tmp;
 308         len -= tmp;
 309 
 310         /*
 311          *      Book keeping
 312          */
 313 
 314         skb->dev = *dev;
 315         skb->saddr = saddr;
 316         if (skb->sk)
 317                 skb->sk->saddr = saddr;
 318 
 319         /*
 320          *      Now build the IP header.
 321          */
 322 
 323         /*
 324          *      If we are using IPPROTO_RAW, then we don't need an IP header, since
 325          *      one is being supplied to us by the user
 326          */
 327 
 328         if(type == IPPROTO_RAW)
 329                 return (tmp);
 330 
 331         iph = (struct iphdr *)buff;
 332         iph->version  = 4;
 333         iph->tos      = tos;
 334         iph->frag_off = 0;
 335         iph->ttl      = ttl;
 336         iph->daddr    = daddr;
 337         iph->saddr    = saddr;
 338         iph->protocol = type;
 339         iph->ihl      = 5;
 340         skb->ip_hdr   = iph;
 341 
 342         /* Setup the IP options. */
 343 #ifdef Not_Yet_Avail
 344         build_options(iph, opt);
 345 #endif
 346 
 347         return(20 + tmp);       /* IP header plus MAC header size */
 348 }
 349 
 350 
 351 static int
 352 do_options(struct iphdr *iph, struct options *opt)
     /* [previous][next][first][last][top][bottom][index][help] */
 353 {
 354   unsigned char *buff;
 355   int done = 0;
 356   int i, len = sizeof(struct iphdr);
 357 
 358   /* Zero out the options. */
 359   opt->record_route.route_size = 0;
 360   opt->loose_route.route_size  = 0;
 361   opt->strict_route.route_size = 0;
 362   opt->tstamp.ptr              = 0;
 363   opt->security                = 0;
 364   opt->compartment             = 0;
 365   opt->handling                = 0;
 366   opt->stream                  = 0;
 367   opt->tcc                     = 0;
 368   return(0);
 369 
 370   /* Advance the pointer to start at the options. */
 371   buff = (unsigned char *)(iph + 1);
 372 
 373   /* Now start the processing. */
 374   while (!done && len < iph->ihl*4) switch(*buff) {
 375         case IPOPT_END:
 376                 done = 1;
 377                 break;
 378         case IPOPT_NOOP:
 379                 buff++;
 380                 len++;
 381                 break;
 382         case IPOPT_SEC:
 383                 buff++;
 384                 if (*buff != 11) return(1);
 385                 buff++;
 386                 opt->security = ntohs(*(unsigned short *)buff);
 387                 buff += 2;
 388                 opt->compartment = ntohs(*(unsigned short *)buff);
 389                 buff += 2;
 390                 opt->handling = ntohs(*(unsigned short *)buff);
 391                 buff += 2;
 392                 opt->tcc = ((*buff) << 16) + ntohs(*(unsigned short *)(buff+1));
 393                 buff += 3;
 394                 len += 11;
 395                 break;
 396         case IPOPT_LSRR:
 397                 buff++;
 398                 if ((*buff - 3)% 4 != 0) return(1);
 399                 len += *buff;
 400                 opt->loose_route.route_size = (*buff -3)/4;
 401                 buff++;
 402                 if (*buff % 4 != 0) return(1);
 403                 opt->loose_route.pointer = *buff/4 - 1;
 404                 buff++;
 405                 buff++;
 406                 for (i = 0; i < opt->loose_route.route_size; i++) {
 407                         if(i>=MAX_ROUTE)
 408                                 return(1);
 409                         opt->loose_route.route[i] = *(unsigned long *)buff;
 410                         buff += 4;
 411                 }
 412                 break;
 413         case IPOPT_SSRR:
 414                 buff++;
 415                 if ((*buff - 3)% 4 != 0) return(1);
 416                 len += *buff;
 417                 opt->strict_route.route_size = (*buff -3)/4;
 418                 buff++;
 419                 if (*buff % 4 != 0) return(1);
 420                 opt->strict_route.pointer = *buff/4 - 1;
 421                 buff++;
 422                 buff++;
 423                 for (i = 0; i < opt->strict_route.route_size; i++) {
 424                         if(i>=MAX_ROUTE)
 425                                 return(1);
 426                         opt->strict_route.route[i] = *(unsigned long *)buff;
 427                         buff += 4;
 428                 }
 429                 break;
 430         case IPOPT_RR:
 431                 buff++;
 432                 if ((*buff - 3)% 4 != 0) return(1);
 433                 len += *buff;
 434                 opt->record_route.route_size = (*buff -3)/4;
 435                 buff++;
 436                 if (*buff % 4 != 0) return(1);
 437                 opt->record_route.pointer = *buff/4 - 1;
 438                 buff++;
 439                 buff++;
 440                 for (i = 0; i < opt->record_route.route_size; i++) {
 441                         if(i>=MAX_ROUTE)
 442                                 return 1;
 443                         opt->record_route.route[i] = *(unsigned long *)buff;
 444                         buff += 4;
 445                 }
 446                 break;
 447         case IPOPT_SID:
 448                 len += 4;
 449                 buff +=2;
 450                 opt->stream = *(unsigned short *)buff;
 451                 buff += 2;
 452                 break;
 453         case IPOPT_TIMESTAMP:
 454                 buff++;
 455                 len += *buff;
 456                 if (*buff % 4 != 0) return(1);
 457                 opt->tstamp.len = *buff / 4 - 1;
 458                 buff++;
 459                 if ((*buff - 1) % 4 != 0) return(1);
 460                 opt->tstamp.ptr = (*buff-1)/4;
 461                 buff++;
 462                 opt->tstamp.x.full_char = *buff;
 463                 buff++;
 464                 for (i = 0; i < opt->tstamp.len; i++) {
 465                         opt->tstamp.data[i] = *(unsigned long *)buff;
 466                         buff += 4;
 467                 }
 468                 break;
 469         default:
 470                 return(1);
 471   }
 472 
 473   if (opt->record_route.route_size == 0) {
 474         if (opt->strict_route.route_size != 0) {
 475                 memcpy(&(opt->record_route), &(opt->strict_route),
 476                                              sizeof(opt->record_route));
 477         } else if (opt->loose_route.route_size != 0) {
 478                 memcpy(&(opt->record_route), &(opt->loose_route),
 479                                              sizeof(opt->record_route));
 480         }
 481   }
 482 
 483   if (opt->strict_route.route_size != 0 &&
 484       opt->strict_route.route_size != opt->strict_route.pointer) {
 485         strict_route(iph, opt);
 486         return(0);
 487   }
 488 
 489   if (opt->loose_route.route_size != 0 &&
 490       opt->loose_route.route_size != opt->loose_route.pointer) {
 491         loose_route(iph, opt);
 492         return(0);
 493   }
 494 
 495   return(0);
 496 }
 497 
 498 /*
 499  * This routine does all the checksum computations that don't
 500  * require anything special (like copying or special headers).
 501  */
 502 
 503 unsigned short ip_compute_csum(unsigned char * buff, int len)
     /* [previous][next][first][last][top][bottom][index][help] */
 504 {
 505         unsigned long sum = 0;
 506 
 507         /* Do the first multiple of 4 bytes and convert to 16 bits. */
 508         if (len > 3)
 509         {
 510                 __asm__("clc\n"
 511                 "1:\t"
 512                 "lodsl\n\t"
 513                 "adcl %%eax, %%ebx\n\t"
 514                 "loop 1b\n\t"
 515                 "adcl $0, %%ebx\n\t"
 516                 "movl %%ebx, %%eax\n\t"
 517                 "shrl $16, %%eax\n\t"
 518                 "addw %%ax, %%bx\n\t"
 519                 "adcw $0, %%bx"
 520                 : "=b" (sum) , "=S" (buff)
 521                 : "0" (sum), "c" (len >> 2) ,"1" (buff)
 522                 : "ax", "cx", "si", "bx" );
 523         }
 524         if (len & 2)
 525         {
 526                 __asm__("lodsw\n\t"
 527                 "addw %%ax, %%bx\n\t"
 528                 "adcw $0, %%bx"
 529                 : "=b" (sum), "=S" (buff)
 530                 : "0" (sum), "1" (buff)
 531                 : "bx", "ax", "si");
 532         }
 533         if (len & 1)
 534         {
 535                 __asm__("lodsb\n\t"
 536                 "movb $0, %%ah\n\t"
 537                 "addw %%ax, %%bx\n\t"
 538                 "adcw $0, %%bx"
 539                 : "=b" (sum), "=S" (buff)
 540                 : "0" (sum), "1" (buff)
 541                 : "bx", "ax", "si");
 542         }
 543         sum =~sum;
 544         return(sum & 0xffff);
 545 }
 546 
 547 /*
 548  *      Generate a checksum for an outgoing IP datagram.
 549  */
 550 
 551 void ip_send_check(struct iphdr *iph)
     /* [previous][next][first][last][top][bottom][index][help] */
 552 {
 553         iph->check = 0;
 554         iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
 555 }
 556 
 557 /************************ Fragment Handlers From NET2E **********************************/
 558 
 559 
 560 /*
 561  *      This fragment handler is a bit of a heap. On the other hand it works quite
 562  *      happily and handles things quite well.
 563  */
 564 
 565 static struct ipq *ipqueue = NULL;              /* IP fragment queue    */
 566 
 567 /*
 568  *      Create a new fragment entry.
 569  */
 570 
 571 static struct ipfrag *ip_frag_create(int offset, int end, struct sk_buff *skb, unsigned char *ptr)
     /* [previous][next][first][last][top][bottom][index][help] */
 572 {
 573         struct ipfrag *fp;
 574 
 575         fp = (struct ipfrag *) kmalloc(sizeof(struct ipfrag), GFP_ATOMIC);
 576         if (fp == NULL)
 577         {
 578                 printk("IP: frag_create: no memory left !\n");
 579                 return(NULL);
 580         }
 581         memset(fp, 0, sizeof(struct ipfrag));
 582 
 583         /* Fill in the structure. */
 584         fp->offset = offset;
 585         fp->end = end;
 586         fp->len = end - offset;
 587         fp->skb = skb;
 588         fp->ptr = ptr;
 589 
 590         return(fp);
 591 }
 592 
 593 
 594 /*
 595  *      Find the correct entry in the "incomplete datagrams" queue for
 596  *      this IP datagram, and return the queue entry address if found.
 597  */
 598 
 599 static struct ipq *ip_find(struct iphdr *iph)
     /* [previous][next][first][last][top][bottom][index][help] */
 600 {
 601         struct ipq *qp;
 602         struct ipq *qplast;
 603 
 604         cli();
 605         qplast = NULL;
 606         for(qp = ipqueue; qp != NULL; qplast = qp, qp = qp->next)
 607         {
 608                 if (iph->id== qp->iph->id && iph->saddr == qp->iph->saddr &&
 609                         iph->daddr == qp->iph->daddr && iph->protocol == qp->iph->protocol)
 610                 {
 611                         del_timer(&qp->timer);  /* So it doesn't vanish on us. The timer will be reset anyway */
 612                         sti();
 613                         return(qp);
 614                 }
 615         }
 616         sti();
 617         return(NULL);
 618 }
 619 
 620 
 621 /*
 622  *      Remove an entry from the "incomplete datagrams" queue, either
 623  *      because we completed, reassembled and processed it, or because
 624  *      it timed out.
 625  */
 626 
 627 static void ip_free(struct ipq *qp)
     /* [previous][next][first][last][top][bottom][index][help] */
 628 {
 629         struct ipfrag *fp;
 630         struct ipfrag *xp;
 631 
 632         /*
 633          * Stop the timer for this entry.
 634          */
 635 
 636         del_timer(&qp->timer);
 637 
 638         /* Remove this entry from the "incomplete datagrams" queue. */
 639         cli();
 640         if (qp->prev == NULL)
 641         {
 642                 ipqueue = qp->next;
 643                 if (ipqueue != NULL)
 644                         ipqueue->prev = NULL;
 645         }
 646         else
 647         {
 648                 qp->prev->next = qp->next;
 649                 if (qp->next != NULL)
 650                         qp->next->prev = qp->prev;
 651         }
 652 
 653         /* Release all fragment data. */
 654 
 655         fp = qp->fragments;
 656         while (fp != NULL)
 657         {
 658                 xp = fp->next;
 659                 IS_SKB(fp->skb);
 660                 kfree_skb(fp->skb,FREE_READ);
 661                 kfree_s(fp, sizeof(struct ipfrag));
 662                 fp = xp;
 663         }
 664 
 665         /* Release the MAC header. */
 666         kfree_s(qp->mac, qp->maclen);
 667 
 668         /* Release the IP header. */
 669         kfree_s(qp->iph, qp->ihlen + 8);
 670 
 671         /* Finally, release the queue descriptor itself. */
 672         kfree_s(qp, sizeof(struct ipq));
 673         sti();
 674 }
 675 
 676 
 677 /*
 678  *      Oops- a fragment queue timed out.  Kill it and send an ICMP reply.
 679  */
 680 
 681 static void ip_expire(unsigned long arg)
     /* [previous][next][first][last][top][bottom][index][help] */
 682 {
 683         struct ipq *qp;
 684 
 685         qp = (struct ipq *)arg;
 686 
 687         /*
 688          *      Send an ICMP "Fragment Reassembly Timeout" message.
 689          */
 690 
 691         ip_statistics.IpReasmTimeout++;
 692         ip_statistics.IpReasmFails++;   
 693         /* This if is always true... shrug */
 694         if(qp->fragments!=NULL)
 695                 icmp_send(qp->fragments->skb,ICMP_TIME_EXCEEDED,
 696                                 ICMP_EXC_FRAGTIME, 0, qp->dev);
 697 
 698         /*
 699          *      Nuke the fragment queue.
 700          */
 701         ip_free(qp);
 702 }
 703 
 704 
 705 /*
 706  *      Add an entry to the 'ipq' queue for a newly received IP datagram.
 707  *      We will (hopefully :-) receive all other fragments of this datagram
 708  *      in time, so we just create a queue for this datagram, in which we
 709  *      will insert the received fragments at their respective positions.
 710  */
 711 
 712 static struct ipq *ip_create(struct sk_buff *skb, struct iphdr *iph, struct device *dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 713 {
 714         struct ipq *qp;
 715         int maclen;
 716         int ihlen;
 717 
 718         qp = (struct ipq *) kmalloc(sizeof(struct ipq), GFP_ATOMIC);
 719         if (qp == NULL)
 720         {
 721                 printk("IP: create: no memory left !\n");
 722                 return(NULL);
 723                 skb->dev = qp->dev;
 724         }
 725         memset(qp, 0, sizeof(struct ipq));
 726 
 727         /*
 728          *      Allocate memory for the MAC header.
 729          *
 730          *      FIXME: We have a maximum MAC address size limit and define
 731          *      elsewhere. We should use it here and avoid the 3 kmalloc() calls
 732          */
 733 
 734         maclen = ((unsigned long) iph) - ((unsigned long) skb->data);
 735         qp->mac = (unsigned char *) kmalloc(maclen, GFP_ATOMIC);
 736         if (qp->mac == NULL)
 737         {
 738                 printk("IP: create: no memory left !\n");
 739                 kfree_s(qp, sizeof(struct ipq));
 740                 return(NULL);
 741         }
 742 
 743         /*
 744          *      Allocate memory for the IP header (plus 8 octets for ICMP).
 745          */
 746 
 747         ihlen = (iph->ihl * sizeof(unsigned long));
 748         qp->iph = (struct iphdr *) kmalloc(ihlen + 8, GFP_ATOMIC);
 749         if (qp->iph == NULL)
 750         {
 751                 printk("IP: create: no memory left !\n");
 752                 kfree_s(qp->mac, maclen);
 753                 kfree_s(qp, sizeof(struct ipq));
 754                 return(NULL);
 755         }
 756 
 757         /* Fill in the structure. */
 758         memcpy(qp->mac, skb->data, maclen);
 759         memcpy(qp->iph, iph, ihlen + 8);
 760         qp->len = 0;
 761         qp->ihlen = ihlen;
 762         qp->maclen = maclen;
 763         qp->fragments = NULL;
 764         qp->dev = dev;
 765 
 766         /* Start a timer for this entry. */
 767         qp->timer.expires = IP_FRAG_TIME;               /* about 30 seconds     */
 768         qp->timer.data = (unsigned long) qp;            /* pointer to queue     */
 769         qp->timer.function = ip_expire;                 /* expire function      */
 770         add_timer(&qp->timer);
 771 
 772         /* Add this entry to the queue. */
 773         qp->prev = NULL;
 774         cli();
 775         qp->next = ipqueue;
 776         if (qp->next != NULL)
 777                 qp->next->prev = qp;
 778         ipqueue = qp;
 779         sti();
 780         return(qp);
 781 }
 782 
 783 
 784 /*
 785  *      See if a fragment queue is complete.
 786  */
 787 
 788 static int ip_done(struct ipq *qp)
     /* [previous][next][first][last][top][bottom][index][help] */
 789 {
 790         struct ipfrag *fp;
 791         int offset;
 792 
 793         /* Only possible if we received the final fragment. */
 794         if (qp->len == 0)
 795                 return(0);
 796 
 797         /* Check all fragment offsets to see if they connect. */
 798         fp = qp->fragments;
 799         offset = 0;
 800         while (fp != NULL)
 801         {
 802                 if (fp->offset > offset)
 803                         return(0);      /* fragment(s) missing */
 804                 offset = fp->end;
 805                 fp = fp->next;
 806         }
 807 
 808         /* All fragments are present. */
 809         return(1);
 810 }
 811 
 812 
 813 /*
 814  *      Build a new IP datagram from all its fragments.
 815  *
 816  *      FIXME: We copy here because we lack an effective way of handling lists
 817  *      of bits on input. Until the new skb data handling is in I'm not going
 818  *      to touch this with a bargepole. This also causes a 4Kish limit on
 819  *      packet sizes.
 820  */
 821 
 822 static struct sk_buff *ip_glue(struct ipq *qp)
     /* [previous][next][first][last][top][bottom][index][help] */
 823 {
 824         struct sk_buff *skb;
 825         struct iphdr *iph;
 826         struct ipfrag *fp;
 827         unsigned char *ptr;
 828         int count, len;
 829 
 830         /*
 831          *      Allocate a new buffer for the datagram.
 832          */
 833 
 834         len = qp->maclen + qp->ihlen + qp->len;
 835 
 836         if ((skb = alloc_skb(len,GFP_ATOMIC)) == NULL)
 837         {
 838                 ip_statistics.IpReasmFails++;
 839                 printk("IP: queue_glue: no memory for gluing queue 0x%X\n", (int) qp);
 840                 ip_free(qp);
 841                 return(NULL);
 842         }
 843 
 844         /* Fill in the basic details. */
 845         skb->len = (len - qp->maclen);
 846         skb->h.raw = skb->data;
 847         skb->free = 1;
 848 
 849         /* Copy the original MAC and IP headers into the new buffer. */
 850         ptr = (unsigned char *) skb->h.raw;
 851         memcpy(ptr, ((unsigned char *) qp->mac), qp->maclen);
 852         ptr += qp->maclen;
 853         memcpy(ptr, ((unsigned char *) qp->iph), qp->ihlen);
 854         ptr += qp->ihlen;
 855         skb->h.raw += qp->maclen;
 856 
 857         count = 0;
 858 
 859         /* Copy the data portions of all fragments into the new buffer. */
 860         fp = qp->fragments;
 861         while(fp != NULL)
 862         {
 863                 if(count+fp->len > skb->len)
 864                 {
 865                         printk("Invalid fragment list: Fragment over size.\n");
 866                         ip_free(qp);
 867                         kfree_skb(skb,FREE_WRITE);
 868                         ip_statistics.IpReasmFails++;
 869                         return NULL;
 870                 }
 871                 memcpy((ptr + fp->offset), fp->ptr, fp->len);
 872                 count += fp->len;
 873                 fp = fp->next;
 874         }
 875 
 876         /* We glued together all fragments, so remove the queue entry. */
 877         ip_free(qp);
 878 
 879         /* Done with all fragments. Fixup the new IP header. */
 880         iph = skb->h.iph;
 881         iph->frag_off = 0;
 882         iph->tot_len = htons((iph->ihl * sizeof(unsigned long)) + count);
 883         skb->ip_hdr = iph;
 884 
 885         ip_statistics.IpReasmOKs++;
 886         return(skb);
 887 }
 888 
 889 
 890 /*
 891  *      Process an incoming IP datagram fragment.
 892  */
 893 
 894 static struct sk_buff *ip_defrag(struct iphdr *iph, struct sk_buff *skb, struct device *dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 895 {
 896         struct ipfrag *prev, *next;
 897         struct ipfrag *tfp;
 898         struct ipq *qp;
 899         struct sk_buff *skb2;
 900         unsigned char *ptr;
 901         int flags, offset;
 902         int i, ihl, end;
 903 
 904         ip_statistics.IpReasmReqds++;
 905 
 906         /* Find the entry of this IP datagram in the "incomplete datagrams" queue. */
 907         qp = ip_find(iph);
 908 
 909         /* Is this a non-fragmented datagram? */
 910         offset = ntohs(iph->frag_off);
 911         flags = offset & ~IP_OFFSET;
 912         offset &= IP_OFFSET;
 913         if (((flags & IP_MF) == 0) && (offset == 0))
 914         {
 915                 if (qp != NULL)
 916                         ip_free(qp);    /* Huh? How could this exist?? */
 917                 return(skb);
 918         }
 919 
 920         offset <<= 3;           /* offset is in 8-byte chunks */
 921 
 922         /*
 923          * If the queue already existed, keep restarting its timer as long
 924          * as we still are receiving fragments.  Otherwise, create a fresh
 925          * queue entry.
 926          */
 927 
 928         if (qp != NULL)
 929         {
 930                 del_timer(&qp->timer);
 931                 qp->timer.expires = IP_FRAG_TIME;       /* about 30 seconds */
 932                 qp->timer.data = (unsigned long) qp;    /* pointer to queue */
 933                 qp->timer.function = ip_expire;         /* expire function */
 934                 add_timer(&qp->timer);
 935         }
 936         else
 937         {
 938                 /*
 939                  *      If we failed to create it, then discard the frame
 940                  */
 941                 if ((qp = ip_create(skb, iph, dev)) == NULL)
 942                 {
 943                         skb->sk = NULL;
 944                         kfree_skb(skb, FREE_READ);
 945                         ip_statistics.IpReasmFails++;
 946                         return NULL;
 947                 }
 948         }
 949 
 950         /*
 951          *      Determine the position of this fragment.
 952          */
 953 
 954         ihl = (iph->ihl * sizeof(unsigned long));
 955         end = offset + ntohs(iph->tot_len) - ihl;
 956 
 957         /*
 958          *      Point into the IP datagram 'data' part.
 959          */
 960 
 961         ptr = skb->data + dev->hard_header_len + ihl;
 962 
 963         /*
 964          *      Is this the final fragment?
 965          */
 966 
 967         if ((flags & IP_MF) == 0)
 968                 qp->len = end;
 969 
 970         /*
 971          *      Find out which fragments are in front and at the back of us
 972          *      in the chain of fragments so far.  We must know where to put
 973          *      this fragment, right?
 974          */
 975 
 976         prev = NULL;
 977         for(next = qp->fragments; next != NULL; next = next->next)
 978         {
 979                 if (next->offset > offset)
 980                         break;  /* bingo! */
 981                 prev = next;
 982         }
 983 
 984         /*
 985          *      We found where to put this one.
 986          *      Check for overlap with preceding fragment, and, if needed,
 987          *      align things so that any overlaps are eliminated.
 988          */
 989         if (prev != NULL && offset < prev->end)
 990         {
 991                 i = prev->end - offset;
 992                 offset += i;    /* ptr into datagram */
 993                 ptr += i;       /* ptr into fragment data */
 994         }
 995 
 996         /*
 997          * Look for overlap with succeeding segments.
 998          * If we can merge fragments, do it.
 999          */
1000 
1001         for(; next != NULL; next = tfp)
1002         {
1003                 tfp = next->next;
1004                 if (next->offset >= end)
1005                         break;          /* no overlaps at all */
1006 
1007                 i = end - next->offset;                 /* overlap is 'i' bytes */
1008                 next->len -= i;                         /* so reduce size of    */
1009                 next->offset += i;                      /* next fragment        */
1010                 next->ptr += i;
1011 
1012                 /*
1013                  *      If we get a frag size of <= 0, remove it and the packet
1014                  *      that it goes with.
1015                  */
1016                 if (next->len <= 0)
1017                 {
1018                         if (next->prev != NULL)
1019                                 next->prev->next = next->next;
1020                         else
1021                                 qp->fragments = next->next;
1022 
1023                         if (tfp->next != NULL)
1024                                 next->next->prev = next->prev;
1025 
1026                         kfree_skb(next->skb,FREE_READ);
1027                         kfree_s(next, sizeof(struct ipfrag));
1028                 }
1029         }
1030 
1031         /*
1032          *      Insert this fragment in the chain of fragments.
1033          */
1034 
1035         tfp = NULL;
1036         tfp = ip_frag_create(offset, end, skb, ptr);
1037 
1038         /*
1039          *      No memory to save the fragment - so throw the lot
1040          */
1041 
1042         if (!tfp)
1043         {
1044                 skb->sk = NULL;
1045                 kfree_skb(skb, FREE_READ);
1046                 return NULL;
1047         }
1048         tfp->prev = prev;
1049         tfp->next = next;
1050         if (prev != NULL)
1051                 prev->next = tfp;
1052         else
1053                 qp->fragments = tfp;
1054 
1055         if (next != NULL)
1056                 next->prev = tfp;
1057 
1058         /*
1059          *      OK, so we inserted this new fragment into the chain.
1060          *      Check if we now have a full IP datagram which we can
1061          *      bump up to the IP layer...
1062          */
1063 
1064         if (ip_done(qp))
1065         {
1066                 skb2 = ip_glue(qp);             /* glue together the fragments */
1067                 return(skb2);
1068         }
1069         return(NULL);
1070 }
1071 
1072 
1073 /*
1074  *      This IP datagram is too large to be sent in one piece.  Break it up into
1075  *      smaller pieces (each of size equal to the MAC header plus IP header plus
1076  *      a block of the data of the original IP data part) that will yet fit in a
1077  *      single device frame, and queue such a frame for sending by calling the
1078  *      ip_queue_xmit().  Note that this is recursion, and bad things will happen
1079  *      if this function causes a loop...
1080  *
1081  *      Yes this is inefficient, feel free to submit a quicker one.
1082  *
1083  *      **Protocol Violation**
1084  *      We copy all the options to each fragment. !FIXME!
1085  */
1086 void ip_fragment(struct sock *sk, struct sk_buff *skb, struct device *dev, int is_frag)
     /* [previous][next][first][last][top][bottom][index][help] */
1087 {
1088         struct iphdr *iph;
1089         unsigned char *raw;
1090         unsigned char *ptr;
1091         struct sk_buff *skb2;
1092         int left, mtu, hlen, len;
1093         int offset;
1094         unsigned long flags;
1095 
1096         /*
1097          *      Point into the IP datagram header.
1098          */
1099 
1100         raw = skb->data;
1101         iph = (struct iphdr *) (raw + dev->hard_header_len);
1102 
1103         skb->ip_hdr = iph;
1104 
1105         /*
1106          *      Setup starting values.
1107          */
1108 
1109         hlen = (iph->ihl * sizeof(unsigned long));
1110         left = ntohs(iph->tot_len) - hlen;      /* Space per frame */
1111         hlen += dev->hard_header_len;           /* Total header size */
1112         mtu = (dev->mtu - hlen);                /* Size of data space */
1113         ptr = (raw + hlen);                     /* Where to start from */
1114 
1115         /*
1116          *      Check for any "DF" flag. [DF means do not fragment]
1117          */
1118 
1119         if (ntohs(iph->frag_off) & IP_DF)
1120         {
1121                 /*
1122                  *      Reply giving the MTU of the failed hop.
1123                  */
1124                 ip_statistics.IpFragFails++;
1125                 icmp_send(skb,ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, dev->mtu, dev);
1126                 return;
1127         }
1128 
1129         /*
1130          *      The protocol doesn't seem to say what to do in the case that the
1131          *      frame + options doesn't fit the mtu. As it used to fall down dead
1132          *      in this case we were fortunate it didn't happen
1133          */
1134 
1135         if(mtu<8)
1136         {
1137                 /* It's wrong but it's better than nothing */
1138                 icmp_send(skb,ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED,dev->mtu, dev);
1139                 ip_statistics.IpFragFails++;
1140                 return;
1141         }
1142 
1143         /*
1144          *      Fragment the datagram.
1145          */
1146 
1147         /*
1148          *      The initial offset is 0 for a complete frame. When
1149          *      fragmenting fragments it's wherever this one starts.
1150          */
1151 
1152         if (is_frag & 2)
1153                 offset = (ntohs(iph->frag_off) & 0x1fff) << 3;
1154         else
1155                 offset = 0;
1156 
1157 
1158         /*
1159          *      Keep copying data until we run out.
1160          */
1161 
1162         while(left > 0)
1163         {
1164                 len = left;
1165                 /* IF: it doesn't fit, use 'mtu' - the data space left */
1166                 if (len > mtu)
1167                         len = mtu;
1168                 /* IF: we are not sending upto and including the packet end
1169                    then align the next start on an eight byte boundary */
1170                 if (len < left)
1171                 {
1172                         len/=8;
1173                         len*=8;
1174                 }
1175                 /*
1176                  *      Allocate buffer.
1177                  */
1178 
1179                 if ((skb2 = alloc_skb(len + hlen,GFP_ATOMIC)) == NULL)
1180                 {
1181                         printk("IP: frag: no memory for new fragment!\n");
1182                         ip_statistics.IpFragFails++;
1183                         return;
1184                 }
1185 
1186                 /*
1187                  *      Set up data on packet
1188                  */
1189 
1190                 skb2->arp = skb->arp;
1191                 if(skb->free==0)
1192                         printk("IP fragmenter: BUG free!=1 in fragmenter\n");
1193                 skb2->free = 1;
1194                 skb2->len = len + hlen;
1195                 skb2->h.raw=(char *) skb2->data;
1196                 /*
1197                  *      Charge the memory for the fragment to any owner
1198                  *      it might possess
1199                  */
1200 
1201                 save_flags(flags);
1202                 if (sk)
1203                 {
1204                         cli();
1205                         sk->wmem_alloc += skb2->mem_len;
1206                         skb2->sk=sk;
1207                 }
1208                 restore_flags(flags);
1209                 skb2->raddr = skb->raddr;       /* For rebuild_header - must be here */
1210 
1211                 /*
1212                  *      Copy the packet header into the new buffer.
1213                  */
1214 
1215                 memcpy(skb2->h.raw, raw, hlen);
1216 
1217                 /*
1218                  *      Copy a block of the IP datagram.
1219                  */
1220                 memcpy(skb2->h.raw + hlen, ptr, len);
1221                 left -= len;
1222 
1223                 skb2->h.raw+=dev->hard_header_len;
1224 
1225                 /*
1226                  *      Fill in the new header fields.
1227                  */
1228                 iph = (struct iphdr *)(skb2->h.raw/*+dev->hard_header_len*/);
1229                 iph->frag_off = htons((offset >> 3));
1230                 /*
1231                  *      Added AC : If we are fragmenting a fragment thats not the
1232                  *                 last fragment then keep MF on each bit
1233                  */
1234                 if (left > 0 || (is_frag & 1))
1235                         iph->frag_off |= htons(IP_MF);
1236                 ptr += len;
1237                 offset += len;
1238 
1239                 /*
1240                  *      Put this fragment into the sending queue.
1241                  */
1242 
1243                 ip_statistics.IpFragCreates++;
1244 
1245                 ip_queue_xmit(sk, dev, skb2, 2);
1246         }
1247         ip_statistics.IpFragOKs++;
1248 }
1249 
1250 
1251 
1252 #ifdef CONFIG_IP_FORWARD
1253 
1254 /*
1255  *      Forward an IP datagram to its next destination.
1256  */
1257 
1258 static void ip_forward(struct sk_buff *skb, struct device *dev, int is_frag)
     /* [previous][next][first][last][top][bottom][index][help] */
1259 {
1260         struct device *dev2;    /* Output device */
1261         struct iphdr *iph;      /* Our header */
1262         struct sk_buff *skb2;   /* Output packet */
1263         struct rtable *rt;      /* Route we use */
1264         unsigned char *ptr;     /* Data pointer */
1265         unsigned long raddr;    /* Router IP address */
1266         
1267         /* 
1268          *      See if we are allowed to forward this.
1269          */
1270 
1271 #ifdef CONFIG_IP_FIREWALL
1272         int err;
1273         
1274         if((err=ip_fw_chk(skb->h.iph, dev, ip_fw_fwd_chain, ip_fw_fwd_policy, 0))!=1)
1275         {
1276                 if(err==-1)
1277                         icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0, dev);
1278                 return;
1279         }
1280 #endif
1281         /*
1282          *      According to the RFC, we must first decrease the TTL field. If
1283          *      that reaches zero, we must reply an ICMP control message telling
1284          *      that the packet's lifetime expired.
1285          *
1286          *      Exception:
1287          *      We may not generate an ICMP for an ICMP. icmp_send does the
1288          *      enforcement of this so we can forget it here. It is however
1289          *      sometimes VERY important.
1290          */
1291 
1292         iph = skb->h.iph;
1293         iph->ttl--;
1294         if (iph->ttl <= 0)
1295         {
1296                 /* Tell the sender its packet died... */
1297                 icmp_send(skb, ICMP_TIME_EXCEEDED, ICMP_EXC_TTL, 0, dev);
1298                 return;
1299         }
1300 
1301         /*
1302          *      Re-compute the IP header checksum.
1303          *      This is inefficient. We know what has happened to the header
1304          *      and could thus adjust the checksum as Phil Karn does in KA9Q
1305          */
1306 
1307         ip_send_check(iph);
1308 
1309         /*
1310          * OK, the packet is still valid.  Fetch its destination address,
1311          * and give it to the IP sender for further processing.
1312          */
1313 
1314         rt = ip_rt_route(iph->daddr, NULL, NULL);
1315         if (rt == NULL)
1316         {
1317                 /*
1318                  *      Tell the sender its packet cannot be delivered. Again
1319                  *      ICMP is screened later.
1320                  */
1321                 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_NET_UNREACH, 0, dev);
1322                 return;
1323         }
1324 
1325 
1326         /*
1327          * Gosh.  Not only is the packet valid; we even know how to
1328          * forward it onto its final destination.  Can we say this
1329          * is being plain lucky?
1330          * If the router told us that there is no GW, use the dest.
1331          * IP address itself- we seem to be connected directly...
1332          */
1333 
1334         raddr = rt->rt_gateway;
1335 
1336         if (raddr != 0)
1337         {
1338                 /*
1339                  *      There is a gateway so find the correct route for it.
1340                  *      Gateways cannot in turn be gatewayed.
1341                  */
1342                 rt = ip_rt_route(raddr, NULL, NULL);
1343                 if (rt == NULL)
1344                 {
1345                         /*
1346                          *      Tell the sender its packet cannot be delivered...
1347                          */
1348                         icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0, dev);
1349                         return;
1350                 }
1351                 if (rt->rt_gateway != 0)
1352                         raddr = rt->rt_gateway;
1353         }
1354         else
1355                 raddr = iph->daddr;
1356 
1357         /*
1358          *      Having picked a route we can now send the frame out.
1359          */
1360 
1361         dev2 = rt->rt_dev;
1362 
1363         /*
1364          *      In IP you never have to forward a frame on the interface that it 
1365          *      arrived upon. We now generate an ICMP HOST REDIRECT giving the route
1366          *      we calculated.
1367          */
1368 #ifdef IP_NO_ICMP_REDIRECT
1369         if (dev == dev2)
1370                 return;
1371 #else
1372         if (dev == dev2)
1373                 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, raddr, dev);
1374 #endif          
1375 
1376         /*
1377          * We now allocate a new buffer, and copy the datagram into it.
1378          * If the indicated interface is up and running, kick it.
1379          */
1380 
1381         if (dev2->flags & IFF_UP)
1382         {
1383 
1384                 /*
1385                  *      Current design decrees we copy the packet. For identical header
1386                  *      lengths we could avoid it. The new skb code will let us push
1387                  *      data so the problem goes away then.
1388                  */
1389 
1390                 skb2 = alloc_skb(dev2->hard_header_len + skb->len, GFP_ATOMIC);
1391                 /*
1392                  *      This is rare and since IP is tolerant of network failures
1393                  *      quite harmless.
1394                  */
1395                 if (skb2 == NULL)
1396                 {
1397                         printk("\nIP: No memory available for IP forward\n");
1398                         return;
1399                 }
1400                 ptr = skb2->data;
1401                 skb2->free = 1;
1402                 skb2->len = skb->len + dev2->hard_header_len;
1403                 skb2->h.raw = ptr;
1404 
1405                 /*
1406                  *      Copy the packet data into the new buffer.
1407                  */
1408                 memcpy(ptr + dev2->hard_header_len, skb->h.raw, skb->len);
1409 
1410                 /* Now build the MAC header. */
1411                 (void) ip_send(skb2, raddr, skb->len, dev2, dev2->pa_addr);
1412 
1413                 ip_statistics.IpForwDatagrams++;
1414 
1415                 /*
1416                  *      See if it needs fragmenting. Note in ip_rcv we tagged
1417                  *      the fragment type. This must be right so that
1418                  *      the fragmenter does the right thing.
1419                  */
1420 
1421                 if(skb2->len > dev2->mtu + dev2->hard_header_len)
1422                 {
1423                         ip_fragment(NULL,skb2,dev2, is_frag);
1424                         kfree_skb(skb2,FREE_WRITE);
1425                 }
1426                 else
1427                 {
1428 #ifdef CONFIG_IP_ACCT           
1429                         /*
1430                          *      Count mapping we shortcut
1431                          */
1432                          
1433                         ip_acct_cnt(iph,dev,ip_acct_chain);
1434 #endif                  
1435                         
1436                         /*
1437                          *      Map service types to priority. We lie about
1438                          *      throughput being low priority, but it's a good
1439                          *      choice to help improve general usage.
1440                          */
1441                         if(iph->tos & IPTOS_LOWDELAY)
1442                                 dev_queue_xmit(skb2, dev2, SOPRI_INTERACTIVE);
1443                         else if(iph->tos & IPTOS_THROUGHPUT)
1444                                 dev_queue_xmit(skb2, dev2, SOPRI_BACKGROUND);
1445                         else
1446                                 dev_queue_xmit(skb2, dev2, SOPRI_NORMAL);
1447                 }
1448         }
1449 }
1450 
1451 
1452 #endif
1453 
1454 /*
1455  *      This function receives all incoming IP datagrams.
1456  */
1457 
1458 int ip_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *pt)
     /* [previous][next][first][last][top][bottom][index][help] */
1459 {
1460         struct iphdr *iph = skb->h.iph;
1461         struct sock *raw_sk=NULL;
1462         unsigned char hash;
1463         unsigned char flag = 0;
1464         unsigned char opts_p = 0;       /* Set iff the packet has options. */
1465         struct inet_protocol *ipprot;
1466         static struct options opt; /* since we don't use these yet, and they
1467                                 take up stack space. */
1468         int brd=IS_MYADDR;
1469         int is_frag=0;
1470 #ifdef CONFIG_IP_FIREWALL
1471         int err;
1472 #endif  
1473 
1474         ip_statistics.IpInReceives++;
1475 
1476         /*
1477          *      Tag the ip header of this packet so we can find it
1478          */
1479 
1480         skb->ip_hdr = iph;
1481 
1482         /*
1483          *      Is the datagram acceptable?
1484          *
1485          *      1.      Length at least the size of an ip header
1486          *      2.      Version of 4
1487          *      3.      Checksums correctly. [Speed optimisation for later, skip loopback checksums]
1488          *      (4.     We ought to check for IP multicast addresses and undefined types.. does this matter ?)
1489          */
1490 
1491         if (skb->len<sizeof(struct iphdr) || iph->ihl<5 || iph->version != 4 || ip_fast_csum((unsigned char *)iph, iph->ihl) !=0)
1492         {
1493                 ip_statistics.IpInHdrErrors++;
1494                 kfree_skb(skb, FREE_WRITE);
1495                 return(0);
1496         }
1497         
1498         /*
1499          *      See if the firewall wants to dispose of the packet. 
1500          */
1501 
1502 #ifdef  CONFIG_IP_FIREWALL
1503         
1504         if ((err=ip_fw_chk(iph,dev,ip_fw_blk_chain,ip_fw_blk_policy, 0))!=1)
1505         {
1506                 if(err==-1)
1507                         icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0, dev);
1508                 kfree_skb(skb, FREE_WRITE);
1509                 return 0;       
1510         }
1511 
1512 #endif
1513         
1514         /*
1515          *      Our transport medium may have padded the buffer out. Now we know it
1516          *      is IP we can trim to the true length of the frame.
1517          */
1518 
1519         skb->len=ntohs(iph->tot_len);
1520 
1521         /*
1522          *      Next analyse the packet for options. Studies show under one packet in
1523          *      a thousand have options....
1524          */
1525 
1526         if (iph->ihl != 5)
1527         {       /* Fast path for the typical optionless IP packet. */
1528                 memset((char *) &opt, 0, sizeof(opt));
1529                 if (do_options(iph, &opt) != 0)
1530                         return 0;
1531                 opts_p = 1;
1532         }
1533 
1534         /*
1535          *      Remember if the frame is fragmented.
1536          */
1537          
1538         if(iph->frag_off)
1539         {
1540                 if (iph->frag_off & 0x0020)
1541                         is_frag|=1;
1542                 /*
1543                  *      Last fragment ?
1544                  */
1545         
1546                 if (ntohs(iph->frag_off) & 0x1fff)
1547                         is_frag|=2;
1548         }
1549         
1550         /*
1551          *      Do any IP forwarding required.  chk_addr() is expensive -- avoid it someday.
1552          *
1553          *      This is inefficient. While finding out if it is for us we could also compute
1554          *      the routing table entry. This is where the great unified cache theory comes
1555          *      in as and when someone implements it
1556          *
1557          *      For most hosts over 99% of packets match the first conditional
1558          *      and don't go via ip_chk_addr. Note: brd is set to IS_MYADDR at
1559          *      function entry.
1560          */
1561 
1562         if ( iph->daddr != skb->dev->pa_addr && (brd = ip_chk_addr(iph->daddr)) == 0)
1563         {
1564                 /*
1565                  *      Don't forward multicast or broadcast frames.
1566                  */
1567 
1568                 if(skb->pkt_type!=PACKET_HOST || brd==IS_BROADCAST)
1569                 {
1570                         kfree_skb(skb,FREE_WRITE);
1571                         return 0;
1572                 }
1573 
1574                 /*
1575                  *      The packet is for another target. Forward the frame
1576                  */
1577 
1578 #ifdef CONFIG_IP_FORWARD
1579                 ip_forward(skb, dev, is_frag);
1580 #else
1581 /*              printk("Machine %lx tried to use us as a forwarder to %lx but we have forwarding disabled!\n",
1582                         iph->saddr,iph->daddr);*/
1583                 ip_statistics.IpInAddrErrors++;
1584 #endif
1585                 /*
1586                  *      The forwarder is inefficient and copies the packet. We
1587                  *      free the original now.
1588                  */
1589 
1590                 kfree_skb(skb, FREE_WRITE);
1591                 return(0);
1592         }
1593         
1594 #ifdef CONFIG_IP_MULTICAST      
1595 
1596         if(brd==IS_MULTICAST && iph->daddr!=IGMP_ALL_HOSTS && !(dev->flags&IFF_LOOPBACK))
1597         {
1598                 /*
1599                  *      Check it is for one of our groups
1600                  */
1601                 struct ip_mc_list *ip_mc=dev->ip_mc_list;
1602                 do
1603                 {
1604                         if(ip_mc==NULL)
1605                         {       
1606                                 kfree_skb(skb, FREE_WRITE);
1607                                 return 0;
1608                         }
1609                         if(ip_mc->multiaddr==iph->daddr)
1610                                 break;
1611                         ip_mc=ip_mc->next;
1612                 }
1613                 while(1);
1614         }
1615 #endif
1616         /*
1617          *      Account for the packet
1618          */
1619          
1620 #ifdef CONFIG_IP_ACCT
1621         ip_acct_cnt(iph,dev, ip_acct_chain);
1622 #endif  
1623 
1624         /*
1625          * Reassemble IP fragments.
1626          */
1627 
1628         if(is_frag)
1629         {
1630                 /* Defragment. Obtain the complete packet if there is one */
1631                 skb=ip_defrag(iph,skb,dev);
1632                 if(skb==NULL)
1633                         return 0;
1634                 skb->dev = dev;
1635                 iph=skb->h.iph;
1636         }
1637         
1638                  
1639 
1640         /*
1641          *      Point into the IP datagram, just past the header.
1642          */
1643 
1644         skb->ip_hdr = iph;
1645         skb->h.raw += iph->ihl*4;
1646         
1647         /*
1648          *      Deliver to raw sockets. This is fun as to avoid copies we want to make no surplus copies.
1649          */
1650          
1651         hash = iph->protocol & (SOCK_ARRAY_SIZE-1);
1652         
1653         /* If there maybe a raw socket we must check - if not we don't care less */
1654         if((raw_sk=raw_prot.sock_array[hash])!=NULL)
1655         {
1656                 struct sock *sknext=NULL;
1657                 struct sk_buff *skb1;
1658                 raw_sk=get_sock_raw(raw_sk, hash,  iph->saddr, iph->daddr);
1659                 if(raw_sk)      /* Any raw sockets */
1660                 {
1661                         do
1662                         {
1663                                 /* Find the next */
1664                                 sknext=get_sock_raw(raw_sk->next, hash, iph->saddr, iph->daddr);
1665                                 if(sknext)
1666                                         skb1=skb_clone(skb, GFP_ATOMIC);
1667                                 else
1668                                         break;  /* One pending raw socket left */
1669                                 if(skb1)
1670                                         raw_rcv(raw_sk, skb1, dev, iph->saddr,iph->daddr);
1671                                 raw_sk=sknext;
1672                         }
1673                         while(raw_sk!=NULL);
1674                         /* Here either raw_sk is the last raw socket, or NULL if none */
1675                         /* We deliver to the last raw socket AFTER the protocol checks as it avoids a surplus copy */
1676                 }
1677         }
1678         
1679         /*
1680          *      skb->h.raw now points at the protocol beyond the IP header.
1681          */
1682 
1683         hash = iph->protocol & (MAX_INET_PROTOS -1);
1684         for (ipprot = (struct inet_protocol *)inet_protos[hash];ipprot != NULL;ipprot=(struct inet_protocol *)ipprot->next)
1685         {
1686                 struct sk_buff *skb2;
1687 
1688                 if (ipprot->protocol != iph->protocol)
1689                         continue;
1690        /*
1691         *       See if we need to make a copy of it.  This will
1692         *       only be set if more than one protocol wants it.
1693         *       and then not for the last one. If there is a pending
1694         *       raw delivery wait for that
1695         */
1696                 if (ipprot->copy || raw_sk)
1697                 {
1698                         skb2 = skb_clone(skb, GFP_ATOMIC);
1699                         if(skb2==NULL)
1700                                 continue;
1701                 }
1702                 else
1703                 {
1704                         skb2 = skb;
1705                 }
1706                 flag = 1;
1707 
1708                /*
1709                 * Pass on the datagram to each protocol that wants it,
1710                 * based on the datagram protocol.  We should really
1711                 * check the protocol handler's return values here...
1712                 */
1713                 ipprot->handler(skb2, dev, opts_p ? &opt : 0, iph->daddr,
1714                                 (ntohs(iph->tot_len) - (iph->ihl * 4)),
1715                                 iph->saddr, 0, ipprot);
1716 
1717         }
1718 
1719         /*
1720          * All protocols checked.
1721          * If this packet was a broadcast, we may *not* reply to it, since that
1722          * causes (proven, grin) ARP storms and a leakage of memory (i.e. all
1723          * ICMP reply messages get queued up for transmission...)
1724          */
1725 
1726         if(raw_sk!=NULL)        /* Shift to last raw user */
1727                 raw_rcv(raw_sk, skb, dev, iph->saddr, iph->daddr);
1728         else if (!flag)         /* Free and report errors */
1729         {
1730                 if (brd != IS_BROADCAST && brd!=IS_MULTICAST)
1731                         icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PROT_UNREACH, 0, dev);
1732                 kfree_skb(skb, FREE_WRITE);
1733         }
1734 
1735         return(0);
1736 }
1737 
1738 /*
1739  *      Loop a packet back to the sender.
1740  */
1741  
1742 static void ip_loopback(struct device *old_dev, struct sk_buff *skb)
     /* [previous][next][first][last][top][bottom][index][help] */
1743 {
1744         extern struct device loopback_dev;
1745         struct device *dev=&loopback_dev;
1746         int len=skb->len-old_dev->hard_header_len;
1747         struct sk_buff *newskb=alloc_skb(len+dev->hard_header_len, GFP_ATOMIC);
1748         
1749         if(newskb==NULL)
1750                 return;
1751                 
1752         newskb->link3=NULL;
1753         newskb->sk=NULL;
1754         newskb->dev=dev;
1755         newskb->saddr=skb->saddr;
1756         newskb->daddr=skb->daddr;
1757         newskb->raddr=skb->raddr;
1758         newskb->free=1;
1759         newskb->lock=0;
1760         newskb->users=0;
1761         newskb->pkt_type=skb->pkt_type;
1762         newskb->len=len+dev->hard_header_len;
1763         
1764         
1765         newskb->ip_hdr=(struct iphdr *)(newskb->data+ip_send(newskb, skb->ip_hdr->daddr, len, dev, skb->ip_hdr->saddr));
1766         memcpy(newskb->ip_hdr,skb->ip_hdr,len);
1767 
1768         /* Recurse. The device check against IFF_LOOPBACK will stop infinite recursion */
1769                 
1770         /*printk("Loopback output queued [%lX to %lX].\n", newskb->ip_hdr->saddr,newskb->ip_hdr->daddr);*/
1771         ip_queue_xmit(NULL, dev, newskb, 1);
1772 }
1773 
1774 
1775 /*
1776  * Queues a packet to be sent, and starts the transmitter
1777  * if necessary.  if free = 1 then we free the block after
1778  * transmit, otherwise we don't. If free==2 we not only
1779  * free the block but also don't assign a new ip seq number.
1780  * This routine also needs to put in the total length,
1781  * and compute the checksum
1782  */
1783 
1784 void ip_queue_xmit(struct sock *sk, struct device *dev,
     /* [previous][next][first][last][top][bottom][index][help] */
1785               struct sk_buff *skb, int free)
1786 {
1787         struct iphdr *iph;
1788         unsigned char *ptr;
1789 
1790         /* Sanity check */
1791         if (dev == NULL)
1792         {
1793                 printk("IP: ip_queue_xmit dev = NULL\n");
1794                 return;
1795         }
1796 
1797         IS_SKB(skb);
1798 
1799         /*
1800          *      Do some book-keeping in the packet for later
1801          */
1802 
1803 
1804         skb->dev = dev;
1805         skb->when = jiffies;
1806 
1807         /*
1808          *      Find the IP header and set the length. This is bad
1809          *      but once we get the skb data handling code in the
1810          *      hardware will push its header sensibly and we will
1811          *      set skb->ip_hdr to avoid this mess and the fixed
1812          *      header length problem
1813          */
1814 
1815         ptr = skb->data;
1816         ptr += dev->hard_header_len;
1817         iph = (struct iphdr *)ptr;
1818         skb->ip_hdr = iph;
1819         iph->tot_len = ntohs(skb->len-dev->hard_header_len);
1820 
1821 #ifdef CONFIG_IP_FIREWALL
1822         if(ip_fw_chk(iph, dev, ip_fw_blk_chain, ip_fw_blk_policy, 0) != 1)
1823                 /* just don't send this packet */
1824                 return;
1825 #endif  
1826 
1827         /*
1828          *      No reassigning numbers to fragments...
1829          */
1830 
1831         if(free!=2)
1832                 iph->id      = htons(ip_id_count++);
1833         else
1834                 free=1;
1835 
1836         /* All buffers without an owner socket get freed */
1837         if (sk == NULL)
1838                 free = 1;
1839 
1840         skb->free = free;
1841 
1842         /*
1843          *      Do we need to fragment. Again this is inefficient.
1844          *      We need to somehow lock the original buffer and use
1845          *      bits of it.
1846          */
1847 
1848         if(skb->len > dev->mtu + dev->hard_header_len)
1849         {
1850                 ip_fragment(sk,skb,dev,0);
1851                 IS_SKB(skb);
1852                 kfree_skb(skb,FREE_WRITE);
1853                 return;
1854         }
1855 
1856         /*
1857          *      Add an IP checksum
1858          */
1859 
1860         ip_send_check(iph);
1861 
1862         /*
1863          *      Print the frame when debugging
1864          */
1865 
1866         /*
1867          *      More debugging. You cannot queue a packet already on a list
1868          *      Spot this and moan loudly.
1869          */
1870         if (skb->next != NULL)
1871         {
1872                 printk("ip_queue_xmit: next != NULL\n");
1873                 skb_unlink(skb);
1874         }
1875 
1876         /*
1877          *      If a sender wishes the packet to remain unfreed
1878          *      we add it to his send queue. This arguably belongs
1879          *      in the TCP level since nobody else uses it. BUT
1880          *      remember IPng might change all the rules.
1881          */
1882 
1883         if (!free)
1884         {
1885                 unsigned long flags;
1886                 /* The socket now has more outstanding blocks */
1887 
1888                 sk->packets_out++;
1889 
1890                 /* Protect the list for a moment */
1891                 save_flags(flags);
1892                 cli();
1893 
1894                 if (skb->link3 != NULL)
1895                 {
1896                         printk("ip.c: link3 != NULL\n");
1897                         skb->link3 = NULL;
1898                 }
1899                 if (sk->send_head == NULL)
1900                 {
1901                         sk->send_tail = skb;
1902                         sk->send_head = skb;
1903                 }
1904                 else
1905                 {
1906                         sk->send_tail->link3 = skb;
1907                         sk->send_tail = skb;
1908                 }
1909                 /* skb->link3 is NULL */
1910 
1911                 /* Interrupt restore */
1912                 restore_flags(flags);
1913         }
1914         else
1915                 /* Remember who owns the buffer */
1916                 skb->sk = sk;
1917 
1918         /*
1919          *      If the indicated interface is up and running, send the packet.
1920          */
1921          
1922         ip_statistics.IpOutRequests++;
1923 #ifdef CONFIG_IP_ACCT
1924         ip_acct_cnt(iph,dev, ip_acct_chain);
1925 #endif  
1926         
1927 #ifdef CONFIG_IP_MULTICAST      
1928 
1929         /*
1930          *      Multicasts are looped back for other local users
1931          */
1932          
1933         if (MULTICAST(iph->daddr) && !(dev->flags&IFF_LOOPBACK))
1934         {
1935                 if(sk==NULL || sk->ip_mc_loop)
1936                 {
1937                         if(iph->daddr==IGMP_ALL_HOSTS)
1938                                 ip_loopback(dev,skb);
1939                         else
1940                         {
1941                                 struct ip_mc_list *imc=dev->ip_mc_list;
1942                                 while(imc!=NULL)
1943                                 {
1944                                         if(imc->multiaddr==iph->daddr)
1945                                         {
1946                                                 ip_loopback(dev,skb);
1947                                                 break;
1948                                         }
1949                                         imc=imc->next;
1950                                 }
1951                         }
1952                 }
1953                 /* Multicasts with ttl 0 must not go beyond the host */
1954                 
1955                 if(skb->ip_hdr->ttl==0)
1956                 {
1957                         kfree_skb(skb, FREE_READ);
1958                         return;
1959                 }
1960         }
1961 #endif
1962         if((dev->flags&IFF_BROADCAST) && iph->daddr==dev->pa_brdaddr && !(dev->flags&IFF_LOOPBACK))
1963                 ip_loopback(dev,skb);
1964                 
1965         if (dev->flags & IFF_UP)
1966         {
1967                 /*
1968                  *      If we have an owner use its priority setting,
1969                  *      otherwise use NORMAL
1970                  */
1971 
1972                 if (sk != NULL)
1973                 {
1974                         dev_queue_xmit(skb, dev, sk->priority);
1975                 }
1976                 else
1977                 {
1978                         dev_queue_xmit(skb, dev, SOPRI_NORMAL);
1979                 }
1980         }
1981         else
1982         {
1983                 ip_statistics.IpOutDiscards++;
1984                 if (free)
1985                         kfree_skb(skb, FREE_WRITE);
1986         }
1987 }
1988 
1989 
1990 
1991 #ifdef CONFIG_IP_MULTICAST
1992 
1993 /*
1994  *      Write an multicast group list table for the IGMP daemon to
1995  *      read.
1996  */
1997  
1998 int ip_mc_procinfo(char *buffer, char **start, off_t offset, int length)
     /* [previous][next][first][last][top][bottom][index][help] */
1999 {
2000         off_t pos=0, begin=0;
2001         struct ip_mc_list *im;
2002         unsigned long flags;
2003         int len=0;
2004         struct device *dev;
2005         
2006         len=sprintf(buffer,"Device    : Count\tGroup    Users Timer\n");  
2007         save_flags(flags);
2008         cli();
2009         
2010         for(dev = dev_base; dev; dev = dev->next)
2011         {
2012                 if((dev->flags&IFF_UP)&&(dev->flags&IFF_MULTICAST))
2013                 {
2014                         len+=sprintf(buffer+len,"%-10s: %5d\n",
2015                                         dev->name, dev->mc_count);
2016                         for(im = dev->ip_mc_list; im; im = im->next)
2017                         {
2018                                 len+=sprintf(buffer+len,
2019                                         "\t\t\t%08lX %5d %d:%08lX\n",
2020                                         im->multiaddr, im->users,
2021                                         im->tm_running, im->timer.expires);
2022                                 pos=begin+len;
2023                                 if(pos<offset)
2024                                 {
2025                                         len=0;
2026                                         begin=pos;
2027                                 }
2028                                 if(pos>offset+length)
2029                                         break;
2030                         }
2031                 }
2032         }
2033         restore_flags(flags);
2034         *start=buffer+(offset-begin);
2035         len-=(offset-begin);
2036         if(len>length)
2037                 len=length;     
2038         return len;
2039 }
2040 
2041 
2042 #endif  
2043 /*
2044  *      Socket option code for IP. This is the end of the line after any TCP,UDP etc options on
2045  *      an IP socket.
2046  *
2047  *      We implement IP_TOS (type of service), IP_TTL (time to live).
2048  *
2049  *      Next release we will sort out IP_OPTIONS since for some people are kind of important.
2050  */
2051 
2052 int ip_setsockopt(struct sock *sk, int level, int optname, char *optval, int optlen)
     /* [previous][next][first][last][top][bottom][index][help] */
2053 {
2054         int val,err;
2055 #if defined(CONFIG_IP_FIREWALL) || defined(CONFIG_IP_ACCT)
2056         struct ip_fw tmp_fw;
2057 #endif  
2058         if (optval == NULL)
2059                 return(-EINVAL);
2060 
2061         err=verify_area(VERIFY_READ, optval, sizeof(int));
2062         if(err)
2063                 return err;
2064 
2065         val = get_fs_long((unsigned long *)optval);
2066 
2067         if(level!=SOL_IP)
2068                 return -EOPNOTSUPP;
2069 
2070         switch(optname)
2071         {
2072                 case IP_TOS:
2073                         if(val<0||val>255)
2074                                 return -EINVAL;
2075                         sk->ip_tos=val;
2076                         if(val==IPTOS_LOWDELAY)
2077                                 sk->priority=SOPRI_INTERACTIVE;
2078                         if(val==IPTOS_THROUGHPUT)
2079                                 sk->priority=SOPRI_BACKGROUND;
2080                         return 0;
2081                 case IP_TTL:
2082                         if(val<1||val>255)
2083                                 return -EINVAL;
2084                         sk->ip_ttl=val;
2085                         return 0;
2086 #ifdef CONFIG_IP_MULTICAST
2087                 case IP_MULTICAST_TTL: 
2088                 {
2089                         unsigned char ucval;
2090 
2091                         ucval=get_fs_byte((unsigned char *)optval);
2092                         if(ucval<1||ucval>255)
2093                                 return -EINVAL;
2094                         sk->ip_mc_ttl=(int)ucval;
2095                         return 0;
2096                 }
2097                 case IP_MULTICAST_LOOP: 
2098                 {
2099                         unsigned char ucval;
2100 
2101                         ucval=get_fs_byte((unsigned char *)optval);
2102                         if(ucval!=0 && ucval!=1)
2103                                  return -EINVAL;
2104                         sk->ip_mc_loop=(int)ucval;
2105                         return 0;
2106                 }
2107                 case IP_MULTICAST_IF: 
2108                 {
2109                         /* Not fully tested */
2110                         struct in_addr addr;
2111                         struct device *dev=NULL;
2112                         
2113                         /*
2114                          *      Check the arguments are allowable
2115                          */
2116 
2117                         err=verify_area(VERIFY_READ, optval, sizeof(addr));
2118                         if(err)
2119                                 return err;
2120                                 
2121                         memcpy_fromfs(&addr,optval,sizeof(addr));
2122                         
2123                         printk("MC bind %s\n", in_ntoa(addr.s_addr));
2124                         
2125                         /*
2126                          *      What address has been requested
2127                          */
2128                         
2129                         if(addr.s_addr==INADDR_ANY)     /* Default */
2130                         {
2131                                 sk->ip_mc_name[0]=0;
2132                                 return 0;
2133                         }
2134                         
2135                         /*
2136                          *      Find the device
2137                          */
2138                          
2139                         for(dev = dev_base; dev; dev = dev->next)
2140                         {
2141                                 if((dev->flags&IFF_UP)&&(dev->flags&IFF_MULTICAST)&&
2142                                         (dev->pa_addr==addr.s_addr))
2143                                         break;
2144                         }
2145                         
2146                         /*
2147                          *      Did we find one
2148                          */
2149                          
2150                         if(dev) 
2151                         {
2152                                 strcpy(sk->ip_mc_name,dev->name);
2153                                 return 0;
2154                         }
2155                         return -EADDRNOTAVAIL;
2156                 }
2157                 
2158                 case IP_ADD_MEMBERSHIP: 
2159                 {
2160                 
2161 /*
2162  *      FIXME: Add/Del membership should have a semaphore protecting them from re-entry
2163  */
2164                         struct ip_mreq mreq;
2165                         static struct options optmem;
2166                         unsigned long route_src;
2167                         struct rtable *rt;
2168                         struct device *dev=NULL;
2169                         
2170                         /*
2171                          *      Check the arguments.
2172                          */
2173 
2174                         err=verify_area(VERIFY_READ, optval, sizeof(mreq));
2175                         if(err)
2176                                 return err;
2177 
2178                         memcpy_fromfs(&mreq,optval,sizeof(mreq));
2179 
2180                         /* 
2181                          *      Get device for use later
2182                          */
2183 
2184                         if(mreq.imr_interface.s_addr==INADDR_ANY) 
2185                         {
2186                                 /*
2187                                  *      Not set so scan.
2188                                  */
2189                                 if((rt=ip_rt_route(mreq.imr_multiaddr.s_addr,&optmem, &route_src))!=NULL)
2190                                 {
2191                                         dev=rt->rt_dev;
2192                                         rt->rt_use--;
2193                                 }
2194                         }
2195                         else
2196                         {
2197                                 /*
2198                                  *      Find a suitable device.
2199                                  */
2200                                 for(dev = dev_base; dev; dev = dev->next)
2201                                 {
2202                                         if((dev->flags&IFF_UP)&&(dev->flags&IFF_MULTICAST)&&
2203                                                 (dev->pa_addr==mreq.imr_interface.s_addr))
2204                                                 break;
2205                                 }
2206                         }
2207                         
2208                         /*
2209                          *      No device, no cookies.
2210                          */
2211                          
2212                         if(!dev)
2213                                 return -ENODEV;
2214                                 
2215                         /*
2216                          *      Join group.
2217                          */
2218                          
2219                         return ip_mc_join_group(sk,dev,mreq.imr_multiaddr.s_addr);
2220                 }
2221                 
2222                 case IP_DROP_MEMBERSHIP: 
2223                 {
2224                         struct ip_mreq mreq;
2225                         struct rtable *rt;
2226                         static struct options optmem;
2227                         unsigned long route_src;
2228                         struct device *dev=NULL;
2229 
2230                         /*
2231                          *      Check the arguments
2232                          */
2233                          
2234                         err=verify_area(VERIFY_READ, optval, sizeof(mreq));
2235                         if(err)
2236                                 return err;
2237 
2238                         memcpy_fromfs(&mreq,optval,sizeof(mreq));
2239 
2240                         /*
2241                          *      Get device for use later 
2242                          */
2243  
2244                         if(mreq.imr_interface.s_addr==INADDR_ANY) 
2245                         {
2246                                 if((rt=ip_rt_route(mreq.imr_multiaddr.s_addr,&optmem, &route_src))!=NULL)
2247                                 {
2248                                         dev=rt->rt_dev;
2249                                         rt->rt_use--;
2250                                 }
2251                         }
2252                         else 
2253                         {
2254                                 for(dev = dev_base; dev; dev = dev->next)
2255                                 {
2256                                         if((dev->flags&IFF_UP)&& (dev->flags&IFF_MULTICAST)&&
2257                                                         (dev->pa_addr==mreq.imr_interface.s_addr))
2258                                                 break;
2259                                 }
2260                         }
2261                         
2262                         /*
2263                          *      Did we find a suitable device.
2264                          */
2265                          
2266                         if(!dev)
2267                                 return -ENODEV;
2268                                 
2269                         /*
2270                          *      Leave group
2271                          */
2272                          
2273                         return ip_mc_leave_group(sk,dev,mreq.imr_multiaddr.s_addr);
2274                 }
2275 #endif                  
2276 #ifdef CONFIG_IP_FIREWALL
2277                 case IP_FW_ADD_BLK:
2278                 case IP_FW_DEL_BLK:
2279                 case IP_FW_ADD_FWD:
2280                 case IP_FW_DEL_FWD:
2281                 case IP_FW_CHK_BLK:
2282                 case IP_FW_CHK_FWD:
2283                 case IP_FW_FLUSH_BLK:
2284                 case IP_FW_FLUSH_FWD:
2285                 case IP_FW_ZERO_BLK:
2286                 case IP_FW_ZERO_FWD:
2287                 case IP_FW_POLICY_BLK:
2288                 case IP_FW_POLICY_FWD:
2289                         if(!suser())
2290                                 return -EPERM;
2291                         if(optlen>sizeof(tmp_fw) || optlen<1)
2292                                 return -EINVAL;
2293                         err=verify_area(VERIFY_READ,optval,optlen);
2294                         if(err)
2295                                 return err;
2296                         memcpy_fromfs(&tmp_fw,optval,optlen);
2297                         err=ip_fw_ctl(optname, &tmp_fw,optlen);
2298                         return -err;    /* -0 is 0 after all */
2299                         
2300 #endif
2301 #ifdef CONFIG_IP_ACCT
2302                 case IP_ACCT_DEL:
2303                 case IP_ACCT_ADD:
2304                 case IP_ACCT_FLUSH:
2305                 case IP_ACCT_ZERO:
2306                         if(!suser())
2307                                 return -EPERM;
2308                         if(optlen>sizeof(tmp_fw) || optlen<1)
2309                                 return -EINVAL;
2310                         err=verify_area(VERIFY_READ,optval,optlen);
2311                         if(err)
2312                                 return err;
2313                         memcpy_fromfs(&tmp_fw, optval,optlen);
2314                         err=ip_acct_ctl(optname, &tmp_fw,optlen);
2315                         return -err;    /* -0 is 0 after all */
2316 #endif
2317                 /* IP_OPTIONS and friends go here eventually */
2318                 default:
2319                         return(-ENOPROTOOPT);
2320         }
2321 }
2322 
2323 /*
2324  *      Get the options. Note for future reference. The GET of IP options gets the
2325  *      _received_ ones. The set sets the _sent_ ones.
2326  */
2327 
2328 int ip_getsockopt(struct sock *sk, int level, int optname, char *optval, int *optlen)
     /* [previous][next][first][last][top][bottom][index][help] */
2329 {
2330         int val,err;
2331 #ifdef CONFIG_IP_MULTICAST
2332         int len;
2333 #endif
2334         
2335         if(level!=SOL_IP)
2336                 return -EOPNOTSUPP;
2337 
2338         switch(optname)
2339         {
2340                 case IP_TOS:
2341                         val=sk->ip_tos;
2342                         break;
2343                 case IP_TTL:
2344                         val=sk->ip_ttl;
2345                         break;
2346 #ifdef CONFIG_IP_MULTICAST                      
2347                 case IP_MULTICAST_TTL:
2348                         val=sk->ip_mc_ttl;
2349                         break;
2350                 case IP_MULTICAST_LOOP:
2351                         val=sk->ip_mc_loop;
2352                         break;
2353                 case IP_MULTICAST_IF:
2354                         err=verify_area(VERIFY_WRITE, optlen, sizeof(int));
2355                         if(err)
2356                                 return err;
2357                         len=strlen(sk->ip_mc_name);
2358                         err=verify_area(VERIFY_WRITE, optval, len);
2359                         if(err)
2360                                 return err;
2361                         put_fs_long(len,(unsigned long *) optlen);
2362                         memcpy_tofs((void *)optval,sk->ip_mc_name, len);
2363                         return 0;
2364 #endif
2365                 default:
2366                         return(-ENOPROTOOPT);
2367         }
2368         err=verify_area(VERIFY_WRITE, optlen, sizeof(int));
2369         if(err)
2370                 return err;
2371         put_fs_long(sizeof(int),(unsigned long *) optlen);
2372 
2373         err=verify_area(VERIFY_WRITE, optval, sizeof(int));
2374         if(err)
2375                 return err;
2376         put_fs_long(val,(unsigned long *)optval);
2377 
2378         return(0);
2379 }
2380 
2381 /*
2382  *      IP protocol layer initialiser
2383  */
2384 
2385 static struct packet_type ip_packet_type =
2386 {
2387         0,      /* MUTTER ntohs(ETH_P_IP),*/
2388         NULL,   /* All devices */
2389         ip_rcv,
2390         NULL,
2391         NULL,
2392 };
2393 
2394 /*
2395  *      Device notifier
2396  */
2397  
2398 static int ip_rt_event(unsigned long event, void *ptr)
     /* [previous][next][first][last][top][bottom][index][help] */
2399 {
2400         if(event==NETDEV_DOWN)
2401                 ip_rt_flush(ptr);
2402         return NOTIFY_DONE;
2403 }
2404 
2405 struct notifier_block ip_rt_notifier={
2406         ip_rt_event,
2407         NULL,
2408         0
2409 };
2410 
2411 /*
2412  *      IP registers the packet type and then calls the subprotocol initialisers
2413  */
2414 
2415 void ip_init(void)
     /* [previous][next][first][last][top][bottom][index][help] */
2416 {
2417         ip_packet_type.type=htons(ETH_P_IP);
2418         dev_add_pack(&ip_packet_type);
2419 
2420         /* So we flush routes when a device is downed */        
2421         register_netdevice_notifier(&ip_rt_notifier);
2422 /*      ip_raw_init();
2423         ip_packet_init();
2424         ip_tcp_init();
2425         ip_udp_init();*/
2426 }

/* [previous][next][first][last][top][bottom][index][help] */