root/net/inet/ip.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. ip_ioctl
  2. strict_route
  3. loose_route
  4. ip_route_check
  5. build_options
  6. ip_send
  7. ip_build_header
  8. do_options
  9. ip_fast_csum
  10. ip_compute_csum
  11. ip_csum
  12. ip_send_check
  13. ip_frag_create
  14. ip_find
  15. ip_free
  16. ip_expire
  17. ip_create
  18. ip_done
  19. ip_glue
  20. ip_defrag
  21. ip_fragment
  22. ip_forward
  23. ip_rcv
  24. ip_loopback
  25. ip_queue_xmit
  26. ip_mc_procinfo
  27. ip_setsockopt
  28. ip_getsockopt
  29. ip_rt_event
  30. ip_init

   1 /*
   2  * INET         An implementation of the TCP/IP protocol suite for the LINUX
   3  *              operating system.  INET is implemented using the  BSD Socket
   4  *              interface as the means of communication with the user level.
   5  *
   6  *              The Internet Protocol (IP) module.
   7  *
   8  * Version:     @(#)ip.c        1.0.16b 9/1/93
   9  *
  10  * Authors:     Ross Biro, <bir7@leland.Stanford.Edu>
  11  *              Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  12  *              Donald Becker, <becker@super.org>
  13  *              Alan Cox, <gw4pts@gw4pts.ampr.org>
  14  *              Richard Underwood
  15  *
  16  * Fixes:
  17  *              Alan Cox        :       Commented a couple of minor bits of surplus code
  18  *              Alan Cox        :       Undefining IP_FORWARD doesn't include the code
  19  *                                      (just stops a compiler warning).
  20  *              Alan Cox        :       Frames with >=MAX_ROUTE record routes, strict routes or loose routes
  21  *                                      are junked rather than corrupting things.
  22  *              Alan Cox        :       Frames to bad broadcast subnets are dumped
  23  *                                      We used to process them non broadcast and
  24  *                                      boy could that cause havoc.
  25  *              Alan Cox        :       ip_forward sets the free flag on the
  26  *                                      new frame it queues. Still crap because
  27  *                                      it copies the frame but at least it
  28  *                                      doesn't eat memory too.
  29  *              Alan Cox        :       Generic queue code and memory fixes.
  30  *              Fred Van Kempen :       IP fragment support (borrowed from NET2E)
  31  *              Gerhard Koerting:       Forward fragmented frames correctly.
  32  *              Gerhard Koerting:       Fixes to my fix of the above 8-).
  33  *              Gerhard Koerting:       IP interface addressing fix.
  34  *              Linus Torvalds  :       More robustness checks
  35  *              Alan Cox        :       Even more checks: Still not as robust as it ought to be
  36  *              Alan Cox        :       Save IP header pointer for later
  37  *              Alan Cox        :       ip option setting
  38  *              Alan Cox        :       Use ip_tos/ip_ttl settings
  39  *              Alan Cox        :       Fragmentation bogosity removed
  40  *                                      (Thanks to Mark.Bush@prg.ox.ac.uk)
  41  *              Dmitry Gorodchanin :    Send of a raw packet crash fix.
  42  *              Alan Cox        :       Silly ip bug when an overlength
  43  *                                      fragment turns up. Now frees the
  44  *                                      queue.
  45  *              Linus Torvalds/ :       Memory leakage on fragmentation
  46  *              Alan Cox        :       handling.
  47  *              Gerhard Koerting:       Forwarding uses IP priority hints
  48  *              Teemu Rantanen  :       Fragment problems.
  49  *              Alan Cox        :       General cleanup, comments and reformat
  50  *              Alan Cox        :       SNMP statistics
  51  *              Alan Cox        :       BSD address rule semantics. Also see
  52  *                                      UDP as there is a nasty checksum issue
  53  *                                      if you do things the wrong way.
  54  *              Alan Cox        :       Always defrag, moved IP_FORWARD to the config.in file
  55  *              Alan Cox        :       IP options adjust sk->priority.
  56  *              Pedro Roque     :       Fix mtu/length error in ip_forward.
  57  *              Alan Cox        :       Avoid ip_chk_addr when possible.
  58  *      Richard Underwood       :       IP multicasting.
  59  *              Alan Cox        :       Cleaned up multicast handlers.
  60  *              Alan Cox        :       RAW sockets demultiplex in the BSD style.
  61  *              Gunther Mayer   :       Fix the SNMP reporting typo
  62  *              Alan Cox        :       Always in group 224.0.0.1
  63  *              Alan Cox        :       Multicast loopback error for 224.0.0.1
  64  *              Alan Cox        :       IP_MULTICAST_LOOP option.
  65  *              Alan Cox        :       Use notifiers.
  66  *
  67  * To Fix:
  68  *              IP option processing is mostly not needed. ip_forward needs to know about routing rules
  69  *              and time stamp but that's about all. Use the route mtu field here too
  70  *              IP fragmentation wants rewriting cleanly. The RFC815 algorithm is much more efficient
  71  *              and could be made very efficient with the addition of some virtual memory hacks to permit
  72  *              the allocation of a buffer that can then be 'grown' by twiddling page tables.
  73  *              Output fragmentation wants updating along with the buffer management to use a single 
  74  *              interleaved copy algorithm so that fragmenting has a one copy overhead. Actual packet
  75  *              output should probably do its own fragmentation at the UDP/RAW layer. TCP shouldn't cause
  76  *              fragmentation anyway.
  77  *
  78  *              This program is free software; you can redistribute it and/or
  79  *              modify it under the terms of the GNU General Public License
  80  *              as published by the Free Software Foundation; either version
  81  *              2 of the License, or (at your option) any later version.
  82  */
  83 
  84 #include <asm/segment.h>
  85 #include <asm/system.h>
  86 #include <linux/types.h>
  87 #include <linux/kernel.h>
  88 #include <linux/sched.h>
  89 #include <linux/string.h>
  90 #include <linux/errno.h>
  91 #include <linux/config.h>
  92 
  93 #include <linux/socket.h>
  94 #include <linux/sockios.h>
  95 #include <linux/in.h>
  96 #include <linux/inet.h>
  97 #include <linux/netdevice.h>
  98 #include <linux/etherdevice.h>
  99 
 100 #include "snmp.h"
 101 #include "ip.h"
 102 #include "protocol.h"
 103 #include "route.h"
 104 #include "tcp.h"
 105 #include <linux/skbuff.h>
 106 #include "sock.h"
 107 #include "arp.h"
 108 #include "icmp.h"
 109 #include "raw.h"
 110 #include "igmp.h"
 111 #include <linux/ip_fw.h>
 112 
 113 #define CONFIG_IP_DEFRAG
 114 
 115 extern int last_retran;
 116 extern void sort_send(struct sock *sk);
 117 
 118 #define min(a,b)        ((a)<(b)?(a):(b))
 119 #define LOOPBACK(x)     (((x) & htonl(0xff000000)) == htonl(0x7f000000))
 120 
 121 /*
 122  *      SNMP management statistics
 123  */
 124 
 125 #ifdef CONFIG_IP_FORWARD
 126 struct ip_mib ip_statistics={1,64,};    /* Forwarding=Yes, Default TTL=64 */
 127 #else
 128 struct ip_mib ip_statistics={0,64,};    /* Forwarding=No, Default TTL=64 */
 129 #endif
 130 
 131 #ifdef CONFIG_IP_MULTICAST
 132 
 133 struct ip_mc_list *ip_mc_head=NULL;
 134 
 135 #endif
 136 
 137 /*
 138  *      Handle the issuing of an ioctl() request
 139  *      for the ip device. This is scheduled to
 140  *      disappear
 141  */
 142 
 143 int ip_ioctl(struct sock *sk, int cmd, unsigned long arg)
     /* [previous][next][first][last][top][bottom][index][help] */
 144 {
 145         switch(cmd)
 146         {
 147                 default:
 148                         return(-EINVAL);
 149         }
 150 }
 151 
 152 
 153 /* these two routines will do routing. */
 154 
 155 static void
 156 strict_route(struct iphdr *iph, struct options *opt)
     /* [previous][next][first][last][top][bottom][index][help] */
 157 {
 158 }
 159 
 160 
 161 static void
 162 loose_route(struct iphdr *iph, struct options *opt)
     /* [previous][next][first][last][top][bottom][index][help] */
 163 {
 164 }
 165 
 166 
 167 
 168 
 169 /* This routine will check to see if we have lost a gateway. */
 170 void
 171 ip_route_check(unsigned long daddr)
     /* [previous][next][first][last][top][bottom][index][help] */
 172 {
 173 }
 174 
 175 
 176 #if 0
 177 /* this routine puts the options at the end of an ip header. */
 178 static int
 179 build_options(struct iphdr *iph, struct options *opt)
     /* [previous][next][first][last][top][bottom][index][help] */
 180 {
 181   unsigned char *ptr;
 182   /* currently we don't support any options. */
 183   ptr = (unsigned char *)(iph+1);
 184   *ptr = 0;
 185   return (4);
 186 }
 187 #endif
 188 
 189 
 190 /*
 191  *      Take an skb, and fill in the MAC header.
 192  */
 193 
 194 static int ip_send(struct sk_buff *skb, unsigned long daddr, int len, struct device *dev, unsigned long saddr)
     /* [previous][next][first][last][top][bottom][index][help] */
 195 {
 196         int mac = 0;
 197 
 198         skb->dev = dev;
 199         skb->arp = 1;
 200         if (dev->hard_header)
 201         {
 202                 /*
 203                  *      Build a hardware header. Source address is our mac, destination unknown
 204                  *      (rebuild header will sort this out)
 205                  */
 206                 mac = dev->hard_header(skb->data, dev, ETH_P_IP, NULL, NULL, len, skb);
 207                 if (mac < 0)
 208                 {
 209                         mac = -mac;
 210                         skb->arp = 0;
 211                         skb->raddr = daddr;     /* next routing address */
 212                 }
 213         }
 214         return mac;
 215 }
 216 
 217 int ip_id_count = 0;
 218 
 219 /*
 220  * This routine builds the appropriate hardware/IP headers for
 221  * the routine.  It assumes that if *dev != NULL then the
 222  * protocol knows what it's doing, otherwise it uses the
 223  * routing/ARP tables to select a device struct.
 224  */
 225 int ip_build_header(struct sk_buff *skb, unsigned long saddr, unsigned long daddr,
     /* [previous][next][first][last][top][bottom][index][help] */
 226                 struct device **dev, int type, struct options *opt, int len, int tos, int ttl)
 227 {
 228         static struct options optmem;
 229         struct iphdr *iph;
 230         struct rtable *rt;
 231         unsigned char *buff;
 232         unsigned long raddr;
 233         int tmp;
 234         unsigned long src;
 235 
 236         buff = skb->data;
 237 
 238         /*
 239          *      See if we need to look up the device.
 240          */
 241 
 242 #ifdef CONFIG_INET_MULTICAST    
 243         if(MULTICAST(daddr) && *dev==NULL && skb->sk && *skb->sk->ip_mc_name)
 244                 *dev=dev_get(skb->sk->ip_mc_name);
 245 #endif
 246         if (*dev == NULL)
 247         {
 248                 if(skb->localroute)
 249                         rt = ip_rt_local(daddr, &optmem, &src);
 250                 else
 251                         rt = ip_rt_route(daddr, &optmem, &src);
 252                 if (rt == NULL)
 253                 {
 254                         ip_statistics.IpOutNoRoutes++;
 255                         return(-ENETUNREACH);
 256                 }
 257 
 258                 *dev = rt->rt_dev;
 259                 /*
 260                  *      If the frame is from us and going off machine it MUST MUST MUST
 261                  *      have the output device ip address and never the loopback
 262                  */
 263                 if (LOOPBACK(saddr) && !LOOPBACK(daddr))
 264                         saddr = src;/*rt->rt_dev->pa_addr;*/
 265                 raddr = rt->rt_gateway;
 266 
 267                 opt = &optmem;
 268         }
 269         else
 270         {
 271                 /*
 272                  *      We still need the address of the first hop.
 273                  */
 274                 if(skb->localroute)
 275                         rt = ip_rt_local(daddr, &optmem, &src);
 276                 else
 277                         rt = ip_rt_route(daddr, &optmem, &src);
 278                 /*
 279                  *      If the frame is from us and going off machine it MUST MUST MUST
 280                  *      have the output device ip address and never the loopback
 281                  */
 282                 if (LOOPBACK(saddr) && !LOOPBACK(daddr))
 283                         saddr = src;/*rt->rt_dev->pa_addr;*/
 284 
 285                 raddr = (rt == NULL) ? 0 : rt->rt_gateway;
 286         }
 287 
 288         /*
 289          *      No source addr so make it our addr
 290          */
 291         if (saddr == 0)
 292                 saddr = src;
 293 
 294         /*
 295          *      No gateway so aim at the real destination
 296          */
 297         if (raddr == 0)
 298                 raddr = daddr;
 299 
 300         /*
 301          *      Now build the MAC header.
 302          */
 303 
 304         tmp = ip_send(skb, raddr, len, *dev, saddr);
 305         buff += tmp;
 306         len -= tmp;
 307 
 308         /*
 309          *      Book keeping
 310          */
 311 
 312         skb->dev = *dev;
 313         skb->saddr = saddr;
 314         if (skb->sk)
 315                 skb->sk->saddr = saddr;
 316 
 317         /*
 318          *      Now build the IP header.
 319          */
 320 
 321         /*
 322          *      If we are using IPPROTO_RAW, then we don't need an IP header, since
 323          *      one is being supplied to us by the user
 324          */
 325 
 326         if(type == IPPROTO_RAW)
 327                 return (tmp);
 328 
 329         iph = (struct iphdr *)buff;
 330         iph->version  = 4;
 331         iph->tos      = tos;
 332         iph->frag_off = 0;
 333         iph->ttl      = ttl;
 334         iph->daddr    = daddr;
 335         iph->saddr    = saddr;
 336         iph->protocol = type;
 337         iph->ihl      = 5;
 338         skb->ip_hdr   = iph;
 339 
 340         /* Setup the IP options. */
 341 #ifdef Not_Yet_Avail
 342         build_options(iph, opt);
 343 #endif
 344 #ifdef CONFIG_IP_FIREWALL
 345         if(!ip_fw_chk(iph,ip_fw_blk_chain))
 346                 return -EPERM;
 347 #endif          
 348 
 349         return(20 + tmp);       /* IP header plus MAC header size */
 350 }
 351 
 352 
 353 static int
 354 do_options(struct iphdr *iph, struct options *opt)
     /* [previous][next][first][last][top][bottom][index][help] */
 355 {
 356   unsigned char *buff;
 357   int done = 0;
 358   int i, len = sizeof(struct iphdr);
 359 
 360   /* Zero out the options. */
 361   opt->record_route.route_size = 0;
 362   opt->loose_route.route_size  = 0;
 363   opt->strict_route.route_size = 0;
 364   opt->tstamp.ptr              = 0;
 365   opt->security                = 0;
 366   opt->compartment             = 0;
 367   opt->handling                = 0;
 368   opt->stream                  = 0;
 369   opt->tcc                     = 0;
 370   return(0);
 371 
 372   /* Advance the pointer to start at the options. */
 373   buff = (unsigned char *)(iph + 1);
 374 
 375   /* Now start the processing. */
 376   while (!done && len < iph->ihl*4) switch(*buff) {
 377         case IPOPT_END:
 378                 done = 1;
 379                 break;
 380         case IPOPT_NOOP:
 381                 buff++;
 382                 len++;
 383                 break;
 384         case IPOPT_SEC:
 385                 buff++;
 386                 if (*buff != 11) return(1);
 387                 buff++;
 388                 opt->security = ntohs(*(unsigned short *)buff);
 389                 buff += 2;
 390                 opt->compartment = ntohs(*(unsigned short *)buff);
 391                 buff += 2;
 392                 opt->handling = ntohs(*(unsigned short *)buff);
 393                 buff += 2;
 394                 opt->tcc = ((*buff) << 16) + ntohs(*(unsigned short *)(buff+1));
 395                 buff += 3;
 396                 len += 11;
 397                 break;
 398         case IPOPT_LSRR:
 399                 buff++;
 400                 if ((*buff - 3)% 4 != 0) return(1);
 401                 len += *buff;
 402                 opt->loose_route.route_size = (*buff -3)/4;
 403                 buff++;
 404                 if (*buff % 4 != 0) return(1);
 405                 opt->loose_route.pointer = *buff/4 - 1;
 406                 buff++;
 407                 buff++;
 408                 for (i = 0; i < opt->loose_route.route_size; i++) {
 409                         if(i>=MAX_ROUTE)
 410                                 return(1);
 411                         opt->loose_route.route[i] = *(unsigned long *)buff;
 412                         buff += 4;
 413                 }
 414                 break;
 415         case IPOPT_SSRR:
 416                 buff++;
 417                 if ((*buff - 3)% 4 != 0) return(1);
 418                 len += *buff;
 419                 opt->strict_route.route_size = (*buff -3)/4;
 420                 buff++;
 421                 if (*buff % 4 != 0) return(1);
 422                 opt->strict_route.pointer = *buff/4 - 1;
 423                 buff++;
 424                 buff++;
 425                 for (i = 0; i < opt->strict_route.route_size; i++) {
 426                         if(i>=MAX_ROUTE)
 427                                 return(1);
 428                         opt->strict_route.route[i] = *(unsigned long *)buff;
 429                         buff += 4;
 430                 }
 431                 break;
 432         case IPOPT_RR:
 433                 buff++;
 434                 if ((*buff - 3)% 4 != 0) return(1);
 435                 len += *buff;
 436                 opt->record_route.route_size = (*buff -3)/4;
 437                 buff++;
 438                 if (*buff % 4 != 0) return(1);
 439                 opt->record_route.pointer = *buff/4 - 1;
 440                 buff++;
 441                 buff++;
 442                 for (i = 0; i < opt->record_route.route_size; i++) {
 443                         if(i>=MAX_ROUTE)
 444                                 return 1;
 445                         opt->record_route.route[i] = *(unsigned long *)buff;
 446                         buff += 4;
 447                 }
 448                 break;
 449         case IPOPT_SID:
 450                 len += 4;
 451                 buff +=2;
 452                 opt->stream = *(unsigned short *)buff;
 453                 buff += 2;
 454                 break;
 455         case IPOPT_TIMESTAMP:
 456                 buff++;
 457                 len += *buff;
 458                 if (*buff % 4 != 0) return(1);
 459                 opt->tstamp.len = *buff / 4 - 1;
 460                 buff++;
 461                 if ((*buff - 1) % 4 != 0) return(1);
 462                 opt->tstamp.ptr = (*buff-1)/4;
 463                 buff++;
 464                 opt->tstamp.x.full_char = *buff;
 465                 buff++;
 466                 for (i = 0; i < opt->tstamp.len; i++) {
 467                         opt->tstamp.data[i] = *(unsigned long *)buff;
 468                         buff += 4;
 469                 }
 470                 break;
 471         default:
 472                 return(1);
 473   }
 474 
 475   if (opt->record_route.route_size == 0) {
 476         if (opt->strict_route.route_size != 0) {
 477                 memcpy(&(opt->record_route), &(opt->strict_route),
 478                                              sizeof(opt->record_route));
 479         } else if (opt->loose_route.route_size != 0) {
 480                 memcpy(&(opt->record_route), &(opt->loose_route),
 481                                              sizeof(opt->record_route));
 482         }
 483   }
 484 
 485   if (opt->strict_route.route_size != 0 &&
 486       opt->strict_route.route_size != opt->strict_route.pointer) {
 487         strict_route(iph, opt);
 488         return(0);
 489   }
 490 
 491   if (opt->loose_route.route_size != 0 &&
 492       opt->loose_route.route_size != opt->loose_route.pointer) {
 493         loose_route(iph, opt);
 494         return(0);
 495   }
 496 
 497   return(0);
 498 }
 499 
 500 /*
 501  *      This is a version of ip_compute_csum() optimized for IP headers, which
 502  *      always checksum on 4 octet boundaries.
 503  */
 504 
 505 static inline unsigned short ip_fast_csum(unsigned char * buff, int wlen)
     /* [previous][next][first][last][top][bottom][index][help] */
 506 {
 507         unsigned long sum = 0;
 508 
 509         if (wlen)
 510         {
 511         unsigned long bogus;
 512          __asm__("clc\n"
 513                 "1:\t"
 514                 "lodsl\n\t"
 515                 "adcl %3, %0\n\t"
 516                 "decl %2\n\t"
 517                 "jne 1b\n\t"
 518                 "adcl $0, %0\n\t"
 519                 "movl %0, %3\n\t"
 520                 "shrl $16, %3\n\t"
 521                 "addw %w3, %w0\n\t"
 522                 "adcw $0, %w0"
 523             : "=r" (sum), "=S" (buff), "=r" (wlen), "=a" (bogus)
 524             : "0"  (sum),  "1" (buff),  "2" (wlen));
 525         }
 526         return (~sum) & 0xffff;
 527 }
 528 
 529 /*
 530  * This routine does all the checksum computations that don't
 531  * require anything special (like copying or special headers).
 532  */
 533 
 534 unsigned short ip_compute_csum(unsigned char * buff, int len)
     /* [previous][next][first][last][top][bottom][index][help] */
 535 {
 536         unsigned long sum = 0;
 537 
 538         /* Do the first multiple of 4 bytes and convert to 16 bits. */
 539         if (len > 3)
 540         {
 541                 __asm__("clc\n"
 542                 "1:\t"
 543                 "lodsl\n\t"
 544                 "adcl %%eax, %%ebx\n\t"
 545                 "loop 1b\n\t"
 546                 "adcl $0, %%ebx\n\t"
 547                 "movl %%ebx, %%eax\n\t"
 548                 "shrl $16, %%eax\n\t"
 549                 "addw %%ax, %%bx\n\t"
 550                 "adcw $0, %%bx"
 551                 : "=b" (sum) , "=S" (buff)
 552                 : "0" (sum), "c" (len >> 2) ,"1" (buff)
 553                 : "ax", "cx", "si", "bx" );
 554         }
 555         if (len & 2)
 556         {
 557                 __asm__("lodsw\n\t"
 558                 "addw %%ax, %%bx\n\t"
 559                 "adcw $0, %%bx"
 560                 : "=b" (sum), "=S" (buff)
 561                 : "0" (sum), "1" (buff)
 562                 : "bx", "ax", "si");
 563         }
 564         if (len & 1)
 565         {
 566                 __asm__("lodsb\n\t"
 567                 "movb $0, %%ah\n\t"
 568                 "addw %%ax, %%bx\n\t"
 569                 "adcw $0, %%bx"
 570                 : "=b" (sum), "=S" (buff)
 571                 : "0" (sum), "1" (buff)
 572                 : "bx", "ax", "si");
 573         }
 574         sum =~sum;
 575         return(sum & 0xffff);
 576 }
 577 
 578 /*
 579  *      Check the header of an incoming IP datagram.  This version is still used in slhc.c.
 580  */
 581 
 582 int ip_csum(struct iphdr *iph)
     /* [previous][next][first][last][top][bottom][index][help] */
 583 {
 584         return ip_fast_csum((unsigned char *)iph, iph->ihl);
 585 }
 586 
 587 /*
 588  *      Generate a checksum for an outgoing IP datagram.
 589  */
 590 
 591 void ip_send_check(struct iphdr *iph)
     /* [previous][next][first][last][top][bottom][index][help] */
 592 {
 593         iph->check = 0;
 594         iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
 595 }
 596 
 597 /************************ Fragment Handlers From NET2E **********************************/
 598 
 599 
 600 /*
 601  *      This fragment handler is a bit of a heap. On the other hand it works quite
 602  *      happily and handles things quite well.
 603  */
 604 
 605 static struct ipq *ipqueue = NULL;              /* IP fragment queue    */
 606 
 607 /*
 608  *      Create a new fragment entry.
 609  */
 610 
 611 static struct ipfrag *ip_frag_create(int offset, int end, struct sk_buff *skb, unsigned char *ptr)
     /* [previous][next][first][last][top][bottom][index][help] */
 612 {
 613         struct ipfrag *fp;
 614 
 615         fp = (struct ipfrag *) kmalloc(sizeof(struct ipfrag), GFP_ATOMIC);
 616         if (fp == NULL)
 617         {
 618                 printk("IP: frag_create: no memory left !\n");
 619                 return(NULL);
 620         }
 621         memset(fp, 0, sizeof(struct ipfrag));
 622 
 623         /* Fill in the structure. */
 624         fp->offset = offset;
 625         fp->end = end;
 626         fp->len = end - offset;
 627         fp->skb = skb;
 628         fp->ptr = ptr;
 629 
 630         return(fp);
 631 }
 632 
 633 
 634 /*
 635  *      Find the correct entry in the "incomplete datagrams" queue for
 636  *      this IP datagram, and return the queue entry address if found.
 637  */
 638 
 639 static struct ipq *ip_find(struct iphdr *iph)
     /* [previous][next][first][last][top][bottom][index][help] */
 640 {
 641         struct ipq *qp;
 642         struct ipq *qplast;
 643 
 644         cli();
 645         qplast = NULL;
 646         for(qp = ipqueue; qp != NULL; qplast = qp, qp = qp->next)
 647         {
 648                 if (iph->id== qp->iph->id && iph->saddr == qp->iph->saddr &&
 649                         iph->daddr == qp->iph->daddr && iph->protocol == qp->iph->protocol)
 650                 {
 651                         del_timer(&qp->timer);  /* So it doesn't vanish on us. The timer will be reset anyway */
 652                         sti();
 653                         return(qp);
 654                 }
 655         }
 656         sti();
 657         return(NULL);
 658 }
 659 
 660 
 661 /*
 662  *      Remove an entry from the "incomplete datagrams" queue, either
 663  *      because we completed, reassembled and processed it, or because
 664  *      it timed out.
 665  */
 666 
 667 static void ip_free(struct ipq *qp)
     /* [previous][next][first][last][top][bottom][index][help] */
 668 {
 669         struct ipfrag *fp;
 670         struct ipfrag *xp;
 671 
 672         /*
 673          * Stop the timer for this entry.
 674          */
 675 
 676         del_timer(&qp->timer);
 677 
 678         /* Remove this entry from the "incomplete datagrams" queue. */
 679         cli();
 680         if (qp->prev == NULL)
 681         {
 682                 ipqueue = qp->next;
 683                 if (ipqueue != NULL)
 684                         ipqueue->prev = NULL;
 685         }
 686         else
 687         {
 688                 qp->prev->next = qp->next;
 689                 if (qp->next != NULL)
 690                         qp->next->prev = qp->prev;
 691         }
 692 
 693         /* Release all fragment data. */
 694 
 695         fp = qp->fragments;
 696         while (fp != NULL)
 697         {
 698                 xp = fp->next;
 699                 IS_SKB(fp->skb);
 700                 kfree_skb(fp->skb,FREE_READ);
 701                 kfree_s(fp, sizeof(struct ipfrag));
 702                 fp = xp;
 703         }
 704 
 705         /* Release the MAC header. */
 706         kfree_s(qp->mac, qp->maclen);
 707 
 708         /* Release the IP header. */
 709         kfree_s(qp->iph, qp->ihlen + 8);
 710 
 711         /* Finally, release the queue descriptor itself. */
 712         kfree_s(qp, sizeof(struct ipq));
 713         sti();
 714 }
 715 
 716 
 717 /*
 718  *      Oops- a fragment queue timed out.  Kill it and send an ICMP reply.
 719  */
 720 
 721 static void ip_expire(unsigned long arg)
     /* [previous][next][first][last][top][bottom][index][help] */
 722 {
 723         struct ipq *qp;
 724 
 725         qp = (struct ipq *)arg;
 726 
 727         /*
 728          *      Send an ICMP "Fragment Reassembly Timeout" message.
 729          */
 730 
 731         ip_statistics.IpReasmTimeout++;
 732         ip_statistics.IpReasmFails++;   
 733         /* This if is always true... shrug */
 734         if(qp->fragments!=NULL)
 735                 icmp_send(qp->fragments->skb,ICMP_TIME_EXCEEDED,
 736                                 ICMP_EXC_FRAGTIME, qp->dev);
 737 
 738         /*
 739          *      Nuke the fragment queue.
 740          */
 741         ip_free(qp);
 742 }
 743 
 744 
 745 /*
 746  *      Add an entry to the 'ipq' queue for a newly received IP datagram.
 747  *      We will (hopefully :-) receive all other fragments of this datagram
 748  *      in time, so we just create a queue for this datagram, in which we
 749  *      will insert the received fragments at their respective positions.
 750  */
 751 
 752 static struct ipq *ip_create(struct sk_buff *skb, struct iphdr *iph, struct device *dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 753 {
 754         struct ipq *qp;
 755         int maclen;
 756         int ihlen;
 757 
 758         qp = (struct ipq *) kmalloc(sizeof(struct ipq), GFP_ATOMIC);
 759         if (qp == NULL)
 760         {
 761                 printk("IP: create: no memory left !\n");
 762                 return(NULL);
 763                 skb->dev = qp->dev;
 764         }
 765         memset(qp, 0, sizeof(struct ipq));
 766 
 767         /*
 768          *      Allocate memory for the MAC header.
 769          *
 770          *      FIXME: We have a maximum MAC address size limit and define
 771          *      elsewhere. We should use it here and avoid the 3 kmalloc() calls
 772          */
 773 
 774         maclen = ((unsigned long) iph) - ((unsigned long) skb->data);
 775         qp->mac = (unsigned char *) kmalloc(maclen, GFP_ATOMIC);
 776         if (qp->mac == NULL)
 777         {
 778                 printk("IP: create: no memory left !\n");
 779                 kfree_s(qp, sizeof(struct ipq));
 780                 return(NULL);
 781         }
 782 
 783         /*
 784          *      Allocate memory for the IP header (plus 8 octets for ICMP).
 785          */
 786 
 787         ihlen = (iph->ihl * sizeof(unsigned long));
 788         qp->iph = (struct iphdr *) kmalloc(ihlen + 8, GFP_ATOMIC);
 789         if (qp->iph == NULL)
 790         {
 791                 printk("IP: create: no memory left !\n");
 792                 kfree_s(qp->mac, maclen);
 793                 kfree_s(qp, sizeof(struct ipq));
 794                 return(NULL);
 795         }
 796 
 797         /* Fill in the structure. */
 798         memcpy(qp->mac, skb->data, maclen);
 799         memcpy(qp->iph, iph, ihlen + 8);
 800         qp->len = 0;
 801         qp->ihlen = ihlen;
 802         qp->maclen = maclen;
 803         qp->fragments = NULL;
 804         qp->dev = dev;
 805 
 806         /* Start a timer for this entry. */
 807         qp->timer.expires = IP_FRAG_TIME;               /* about 30 seconds     */
 808         qp->timer.data = (unsigned long) qp;            /* pointer to queue     */
 809         qp->timer.function = ip_expire;                 /* expire function      */
 810         add_timer(&qp->timer);
 811 
 812         /* Add this entry to the queue. */
 813         qp->prev = NULL;
 814         cli();
 815         qp->next = ipqueue;
 816         if (qp->next != NULL)
 817                 qp->next->prev = qp;
 818         ipqueue = qp;
 819         sti();
 820         return(qp);
 821 }
 822 
 823 
 824 /*
 825  *      See if a fragment queue is complete.
 826  */
 827 
 828 static int ip_done(struct ipq *qp)
     /* [previous][next][first][last][top][bottom][index][help] */
 829 {
 830         struct ipfrag *fp;
 831         int offset;
 832 
 833         /* Only possible if we received the final fragment. */
 834         if (qp->len == 0)
 835                 return(0);
 836 
 837         /* Check all fragment offsets to see if they connect. */
 838         fp = qp->fragments;
 839         offset = 0;
 840         while (fp != NULL)
 841         {
 842                 if (fp->offset > offset)
 843                         return(0);      /* fragment(s) missing */
 844                 offset = fp->end;
 845                 fp = fp->next;
 846         }
 847 
 848         /* All fragments are present. */
 849         return(1);
 850 }
 851 
 852 
 853 /*
 854  *      Build a new IP datagram from all its fragments.
 855  *
 856  *      FIXME: We copy here because we lack an effective way of handling lists
 857  *      of bits on input. Until the new skb data handling is in I'm not going
 858  *      to touch this with a bargepole. This also causes a 4Kish limit on
 859  *      packet sizes.
 860  */
 861 
 862 static struct sk_buff *ip_glue(struct ipq *qp)
     /* [previous][next][first][last][top][bottom][index][help] */
 863 {
 864         struct sk_buff *skb;
 865         struct iphdr *iph;
 866         struct ipfrag *fp;
 867         unsigned char *ptr;
 868         int count, len;
 869 
 870         /*
 871          *      Allocate a new buffer for the datagram.
 872          */
 873 
 874         len = qp->maclen + qp->ihlen + qp->len;
 875 
 876         if ((skb = alloc_skb(len,GFP_ATOMIC)) == NULL)
 877         {
 878                 ip_statistics.IpReasmFails++;
 879                 printk("IP: queue_glue: no memory for gluing queue 0x%X\n", (int) qp);
 880                 ip_free(qp);
 881                 return(NULL);
 882         }
 883 
 884         /* Fill in the basic details. */
 885         skb->len = (len - qp->maclen);
 886         skb->h.raw = skb->data;
 887         skb->free = 1;
 888 
 889         /* Copy the original MAC and IP headers into the new buffer. */
 890         ptr = (unsigned char *) skb->h.raw;
 891         memcpy(ptr, ((unsigned char *) qp->mac), qp->maclen);
 892         ptr += qp->maclen;
 893         memcpy(ptr, ((unsigned char *) qp->iph), qp->ihlen);
 894         ptr += qp->ihlen;
 895         skb->h.raw += qp->maclen;
 896 
 897         count = 0;
 898 
 899         /* Copy the data portions of all fragments into the new buffer. */
 900         fp = qp->fragments;
 901         while(fp != NULL)
 902         {
 903                 if(count+fp->len > skb->len)
 904                 {
 905                         printk("Invalid fragment list: Fragment over size.\n");
 906                         ip_free(qp);
 907                         kfree_skb(skb,FREE_WRITE);
 908                         ip_statistics.IpReasmFails++;
 909                         return NULL;
 910                 }
 911                 memcpy((ptr + fp->offset), fp->ptr, fp->len);
 912                 count += fp->len;
 913                 fp = fp->next;
 914         }
 915 
 916         /* We glued together all fragments, so remove the queue entry. */
 917         ip_free(qp);
 918 
 919         /* Done with all fragments. Fixup the new IP header. */
 920         iph = skb->h.iph;
 921         iph->frag_off = 0;
 922         iph->tot_len = htons((iph->ihl * sizeof(unsigned long)) + count);
 923         skb->ip_hdr = iph;
 924 
 925         ip_statistics.IpReasmOKs++;
 926         return(skb);
 927 }
 928 
 929 
 930 /*
 931  *      Process an incoming IP datagram fragment.
 932  */
 933 
 934 static struct sk_buff *ip_defrag(struct iphdr *iph, struct sk_buff *skb, struct device *dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 935 {
 936         struct ipfrag *prev, *next;
 937         struct ipfrag *tfp;
 938         struct ipq *qp;
 939         struct sk_buff *skb2;
 940         unsigned char *ptr;
 941         int flags, offset;
 942         int i, ihl, end;
 943 
 944         ip_statistics.IpReasmReqds++;
 945 
 946         /* Find the entry of this IP datagram in the "incomplete datagrams" queue. */
 947         qp = ip_find(iph);
 948 
 949         /* Is this a non-fragmented datagram? */
 950         offset = ntohs(iph->frag_off);
 951         flags = offset & ~IP_OFFSET;
 952         offset &= IP_OFFSET;
 953         if (((flags & IP_MF) == 0) && (offset == 0))
 954         {
 955                 if (qp != NULL)
 956                         ip_free(qp);    /* Huh? How could this exist?? */
 957                 return(skb);
 958         }
 959 
 960         offset <<= 3;           /* offset is in 8-byte chunks */
 961 
 962         /*
 963          * If the queue already existed, keep restarting its timer as long
 964          * as we still are receiving fragments.  Otherwise, create a fresh
 965          * queue entry.
 966          */
 967 
 968         if (qp != NULL)
 969         {
 970                 del_timer(&qp->timer);
 971                 qp->timer.expires = IP_FRAG_TIME;       /* about 30 seconds */
 972                 qp->timer.data = (unsigned long) qp;    /* pointer to queue */
 973                 qp->timer.function = ip_expire;         /* expire function */
 974                 add_timer(&qp->timer);
 975         }
 976         else
 977         {
 978                 /*
 979                  *      If we failed to create it, then discard the frame
 980                  */
 981                 if ((qp = ip_create(skb, iph, dev)) == NULL)
 982                 {
 983                         skb->sk = NULL;
 984                         kfree_skb(skb, FREE_READ);
 985                         ip_statistics.IpReasmFails++;
 986                         return NULL;
 987                 }
 988         }
 989 
 990         /*
 991          *      Determine the position of this fragment.
 992          */
 993 
 994         ihl = (iph->ihl * sizeof(unsigned long));
 995         end = offset + ntohs(iph->tot_len) - ihl;
 996 
 997         /*
 998          *      Point into the IP datagram 'data' part.
 999          */
1000 
1001         ptr = skb->data + dev->hard_header_len + ihl;
1002 
1003         /*
1004          *      Is this the final fragment?
1005          */
1006 
1007         if ((flags & IP_MF) == 0)
1008                 qp->len = end;
1009 
1010         /*
1011          *      Find out which fragments are in front and at the back of us
1012          *      in the chain of fragments so far.  We must know where to put
1013          *      this fragment, right?
1014          */
1015 
1016         prev = NULL;
1017         for(next = qp->fragments; next != NULL; next = next->next)
1018         {
1019                 if (next->offset > offset)
1020                         break;  /* bingo! */
1021                 prev = next;
1022         }
1023 
1024         /*
1025          *      We found where to put this one.
1026          *      Check for overlap with preceding fragment, and, if needed,
1027          *      align things so that any overlaps are eliminated.
1028          */
1029         if (prev != NULL && offset < prev->end)
1030         {
1031                 i = prev->end - offset;
1032                 offset += i;    /* ptr into datagram */
1033                 ptr += i;       /* ptr into fragment data */
1034         }
1035 
1036         /*
1037          * Look for overlap with succeeding segments.
1038          * If we can merge fragments, do it.
1039          */
1040 
1041         for(; next != NULL; next = tfp)
1042         {
1043                 tfp = next->next;
1044                 if (next->offset >= end)
1045                         break;          /* no overlaps at all */
1046 
1047                 i = end - next->offset;                 /* overlap is 'i' bytes */
1048                 next->len -= i;                         /* so reduce size of    */
1049                 next->offset += i;                      /* next fragment        */
1050                 next->ptr += i;
1051 
1052                 /*
1053                  *      If we get a frag size of <= 0, remove it and the packet
1054                  *      that it goes with.
1055                  */
1056                 if (next->len <= 0)
1057                 {
1058                         if (next->prev != NULL)
1059                                 next->prev->next = next->next;
1060                         else
1061                                 qp->fragments = next->next;
1062 
1063                         if (tfp->next != NULL)
1064                                 next->next->prev = next->prev;
1065 
1066                         kfree_skb(next->skb,FREE_READ);
1067                         kfree_s(next, sizeof(struct ipfrag));
1068                 }
1069         }
1070 
1071         /*
1072          *      Insert this fragment in the chain of fragments.
1073          */
1074 
1075         tfp = NULL;
1076         tfp = ip_frag_create(offset, end, skb, ptr);
1077 
1078         /*
1079          *      No memory to save the fragment - so throw the lot
1080          */
1081 
1082         if (!tfp)
1083         {
1084                 skb->sk = NULL;
1085                 kfree_skb(skb, FREE_READ);
1086                 return NULL;
1087         }
1088         tfp->prev = prev;
1089         tfp->next = next;
1090         if (prev != NULL)
1091                 prev->next = tfp;
1092         else
1093                 qp->fragments = tfp;
1094 
1095         if (next != NULL)
1096                 next->prev = tfp;
1097 
1098         /*
1099          *      OK, so we inserted this new fragment into the chain.
1100          *      Check if we now have a full IP datagram which we can
1101          *      bump up to the IP layer...
1102          */
1103 
1104         if (ip_done(qp))
1105         {
1106                 skb2 = ip_glue(qp);             /* glue together the fragments */
1107                 return(skb2);
1108         }
1109         return(NULL);
1110 }
1111 
1112 
1113 /*
1114  *      This IP datagram is too large to be sent in one piece.  Break it up into
1115  *      smaller pieces (each of size equal to the MAC header plus IP header plus
1116  *      a block of the data of the original IP data part) that will yet fit in a
1117  *      single device frame, and queue such a frame for sending by calling the
1118  *      ip_queue_xmit().  Note that this is recursion, and bad things will happen
1119  *      if this function causes a loop...
1120  *
1121  *      Yes this is inefficient, feel free to submit a quicker one.
1122  *
1123  *      **Protocol Violation**
1124  *      We copy all the options to each fragment. !FIXME!
1125  */
1126 void ip_fragment(struct sock *sk, struct sk_buff *skb, struct device *dev, int is_frag)
     /* [previous][next][first][last][top][bottom][index][help] */
1127 {
1128         struct iphdr *iph;
1129         unsigned char *raw;
1130         unsigned char *ptr;
1131         struct sk_buff *skb2;
1132         int left, mtu, hlen, len;
1133         int offset;
1134         unsigned long flags;
1135 
1136         /*
1137          *      Point into the IP datagram header.
1138          */
1139 
1140         raw = skb->data;
1141         iph = (struct iphdr *) (raw + dev->hard_header_len);
1142 
1143         skb->ip_hdr = iph;
1144 
1145         /*
1146          *      Setup starting values.
1147          */
1148 
1149         hlen = (iph->ihl * sizeof(unsigned long));
1150         left = ntohs(iph->tot_len) - hlen;      /* Space per frame */
1151         hlen += dev->hard_header_len;           /* Total header size */
1152         mtu = (dev->mtu - hlen);                /* Size of data space */
1153         ptr = (raw + hlen);                     /* Where to start from */
1154 
1155         /*
1156          *      Check for any "DF" flag. [DF means do not fragment]
1157          */
1158 
1159         if (ntohs(iph->frag_off) & IP_DF)
1160         {
1161                 ip_statistics.IpFragFails++;
1162                 icmp_send(skb,ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, dev);
1163                 return;
1164         }
1165 
1166         /*
1167          *      The protocol doesn't seem to say what to do in the case that the
1168          *      frame + options doesn't fit the mtu. As it used to fall down dead
1169          *      in this case we were fortunate it didn't happen
1170          */
1171 
1172         if(mtu<8)
1173         {
1174                 /* It's wrong but its better than nothing */
1175                 icmp_send(skb,ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED,dev);
1176                 ip_statistics.IpFragFails++;
1177                 return;
1178         }
1179 
1180         /*
1181          *      Fragment the datagram.
1182          */
1183 
1184         /*
1185          *      The initial offset is 0 for a complete frame. When
1186          *      fragmenting fragments its wherever this one starts.
1187          */
1188 
1189         if (is_frag & 2)
1190                 offset = (ntohs(iph->frag_off) & 0x1fff) << 3;
1191         else
1192                 offset = 0;
1193 
1194 
1195         /*
1196          *      Keep copying data until we run out.
1197          */
1198 
1199         while(left > 0)
1200         {
1201                 len = left;
1202                 /* IF: it doesn't fit, use 'mtu' - the data space left */
1203                 if (len > mtu)
1204                         len = mtu;
1205                 /* IF: we are not sending upto and including the packet end
1206                    then align the next start on an eight byte boundary */
1207                 if (len < left)
1208                 {
1209                         len/=8;
1210                         len*=8;
1211                 }
1212                 /*
1213                  *      Allocate buffer.
1214                  */
1215 
1216                 if ((skb2 = alloc_skb(len + hlen,GFP_ATOMIC)) == NULL)
1217                 {
1218                         printk("IP: frag: no memory for new fragment!\n");
1219                         ip_statistics.IpFragFails++;
1220                         return;
1221                 }
1222 
1223                 /*
1224                  *      Set up data on packet
1225                  */
1226 
1227                 skb2->arp = skb->arp;
1228                 if(skb->free==0)
1229                         printk("IP fragmenter: BUG free!=1 in fragmenter\n");
1230                 skb2->free = 1;
1231                 skb2->len = len + hlen;
1232                 skb2->h.raw=(char *) skb2->data;
1233                 /*
1234                  *      Charge the memory for the fragment to any owner
1235                  *      it might possess
1236                  */
1237 
1238                 save_flags(flags);
1239                 if (sk)
1240                 {
1241                         cli();
1242                         sk->wmem_alloc += skb2->mem_len;
1243                         skb2->sk=sk;
1244                 }
1245                 restore_flags(flags);
1246                 skb2->raddr = skb->raddr;       /* For rebuild_header - must be here */
1247 
1248                 /*
1249                  *      Copy the packet header into the new buffer.
1250                  */
1251 
1252                 memcpy(skb2->h.raw, raw, hlen);
1253 
1254                 /*
1255                  *      Copy a block of the IP datagram.
1256                  */
1257                 memcpy(skb2->h.raw + hlen, ptr, len);
1258                 left -= len;
1259 
1260                 skb2->h.raw+=dev->hard_header_len;
1261 
1262                 /*
1263                  *      Fill in the new header fields.
1264                  */
1265                 iph = (struct iphdr *)(skb2->h.raw/*+dev->hard_header_len*/);
1266                 iph->frag_off = htons((offset >> 3));
1267                 /*
1268                  *      Added AC : If we are fragmenting a fragment thats not the
1269                  *                 last fragment then keep MF on each bit
1270                  */
1271                 if (left > 0 || (is_frag & 1))
1272                         iph->frag_off |= htons(IP_MF);
1273                 ptr += len;
1274                 offset += len;
1275 
1276                 /*
1277                  *      Put this fragment into the sending queue.
1278                  */
1279 
1280                 ip_statistics.IpFragCreates++;
1281 
1282                 ip_queue_xmit(sk, dev, skb2, 2);
1283         }
1284         ip_statistics.IpFragOKs++;
1285 }
1286 
1287 
1288 
1289 #ifdef CONFIG_IP_FORWARD
1290 
1291 /*
1292  *      Forward an IP datagram to its next destination.
1293  */
1294 
1295 static void ip_forward(struct sk_buff *skb, struct device *dev, int is_frag)
     /* [previous][next][first][last][top][bottom][index][help] */
1296 {
1297         struct device *dev2;    /* Output device */
1298         struct iphdr *iph;      /* Our header */
1299         struct sk_buff *skb2;   /* Output packet */
1300         struct rtable *rt;      /* Route we use */
1301         unsigned char *ptr;     /* Data pointer */
1302         unsigned long raddr;    /* Router IP address */
1303 
1304         /* 
1305          *      See if we are allowed to forward this.
1306          */
1307 
1308 #ifdef CONFIG_IP_FIREWALL
1309         if(!ip_fw_chk(skb->h.iph, ip_fw_fwd_chain))
1310         {
1311                 return;
1312         }
1313 #endif
1314         /*
1315          *      According to the RFC, we must first decrease the TTL field. If
1316          *      that reaches zero, we must reply an ICMP control message telling
1317          *      that the packet's lifetime expired.
1318          *
1319          *      Exception:
1320          *      We may not generate an ICMP for an ICMP. icmp_send does the
1321          *      enforcement of this so we can forget it here. It is however
1322          *      sometimes VERY important.
1323          */
1324 
1325         iph = skb->h.iph;
1326         iph->ttl--;
1327         if (iph->ttl <= 0)
1328         {
1329                 /* Tell the sender its packet died... */
1330                 icmp_send(skb, ICMP_TIME_EXCEEDED, ICMP_EXC_TTL, dev);
1331                 return;
1332         }
1333 
1334         /*
1335          *      Re-compute the IP header checksum.
1336          *      This is inefficient. We know what has happened to the header
1337          *      and could thus adjust the checksum as Phil Karn does in KA9Q
1338          */
1339 
1340         ip_send_check(iph);
1341 
1342         /*
1343          * OK, the packet is still valid.  Fetch its destination address,
1344          * and give it to the IP sender for further processing.
1345          */
1346 
1347         rt = ip_rt_route(iph->daddr, NULL, NULL);
1348         if (rt == NULL)
1349         {
1350                 /*
1351                  *      Tell the sender its packet cannot be delivered. Again
1352                  *      ICMP is screened later.
1353                  */
1354                 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_NET_UNREACH, dev);
1355                 return;
1356         }
1357 
1358 
1359         /*
1360          * Gosh.  Not only is the packet valid; we even know how to
1361          * forward it onto its final destination.  Can we say this
1362          * is being plain lucky?
1363          * If the router told us that there is no GW, use the dest.
1364          * IP address itself- we seem to be connected directly...
1365          */
1366 
1367         raddr = rt->rt_gateway;
1368 
1369         if (raddr != 0)
1370         {
1371                 /*
1372                  *      There is a gateway so find the correct route for it.
1373                  *      Gateways cannot in turn be gatewayed.
1374                  */
1375                 rt = ip_rt_route(raddr, NULL, NULL);
1376                 if (rt == NULL)
1377                 {
1378                         /*
1379                          *      Tell the sender its packet cannot be delivered...
1380                          */
1381                         icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, dev);
1382                         return;
1383                 }
1384                 if (rt->rt_gateway != 0)
1385                         raddr = rt->rt_gateway;
1386         }
1387         else
1388                 raddr = iph->daddr;
1389 
1390         /*
1391          *      Having picked a route we can now send the frame out.
1392          */
1393 
1394         dev2 = rt->rt_dev;
1395 
1396         /*
1397          *      In IP you never forward a frame on the interface that it arrived
1398          *      upon. We should generate an ICMP HOST REDIRECT giving the route
1399          *      we calculated.
1400          *      For now just dropping the packet is an acceptable compromise.
1401          */
1402 
1403         if (dev == dev2)
1404                 return;
1405 
1406         /*
1407          * We now allocate a new buffer, and copy the datagram into it.
1408          * If the indicated interface is up and running, kick it.
1409          */
1410 
1411         if (dev2->flags & IFF_UP)
1412         {
1413 
1414                 /*
1415                  *      Current design decrees we copy the packet. For identical header
1416                  *      lengths we could avoid it. The new skb code will let us push
1417                  *      data so the problem goes away then.
1418                  */
1419 
1420                 skb2 = alloc_skb(dev2->hard_header_len + skb->len, GFP_ATOMIC);
1421                 /*
1422                  *      This is rare and since IP is tolerant of network failures
1423                  *      quite harmless.
1424                  */
1425                 if (skb2 == NULL)
1426                 {
1427                         printk("\nIP: No memory available for IP forward\n");
1428                         return;
1429                 }
1430                 ptr = skb2->data;
1431                 skb2->free = 1;
1432                 skb2->len = skb->len + dev2->hard_header_len;
1433                 skb2->h.raw = ptr;
1434 
1435                 /*
1436                  *      Copy the packet data into the new buffer.
1437                  */
1438                 memcpy(ptr + dev2->hard_header_len, skb->h.raw, skb->len);
1439 
1440                 /* Now build the MAC header. */
1441                 (void) ip_send(skb2, raddr, skb->len, dev2, dev2->pa_addr);
1442 
1443                 ip_statistics.IpForwDatagrams++;
1444 
1445                 /*
1446                  *      See if it needs fragmenting. Note in ip_rcv we tagged
1447                  *      the fragment type. This must be right so that
1448                  *      the fragmenter does the right thing.
1449                  */
1450 
1451                 if(skb2->len > dev2->mtu + dev2->hard_header_len)
1452                 {
1453                         ip_fragment(NULL,skb2,dev2, is_frag);
1454                         kfree_skb(skb2,FREE_WRITE);
1455                 }
1456                 else
1457                 {
1458 #ifdef CONFIG_IP_ACCT           
1459                         /*
1460                          *      Count mapping we shortcut
1461                          */
1462                          
1463                         ip_acct_cnt(iph,ip_acct_chain,1);
1464 #endif                  
1465                         
1466                         /*
1467                          *      Map service types to priority. We lie about
1468                          *      throughput being low priority, but its a good
1469                          *      choice to help improve general usage.
1470                          */
1471                         if(iph->tos & IPTOS_LOWDELAY)
1472                                 dev_queue_xmit(skb2, dev2, SOPRI_INTERACTIVE);
1473                         else if(iph->tos & IPTOS_THROUGHPUT)
1474                                 dev_queue_xmit(skb2, dev2, SOPRI_BACKGROUND);
1475                         else
1476                                 dev_queue_xmit(skb2, dev2, SOPRI_NORMAL);
1477                 }
1478         }
1479 }
1480 
1481 
1482 #endif
1483 
1484 /*
1485  *      This function receives all incoming IP datagrams.
1486  */
1487 
1488 int ip_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *pt)
     /* [previous][next][first][last][top][bottom][index][help] */
1489 {
1490         struct iphdr *iph = skb->h.iph;
1491         struct sock *raw_sk=NULL;
1492         unsigned char hash;
1493         unsigned char flag = 0;
1494         unsigned char opts_p = 0;       /* Set iff the packet has options. */
1495         struct inet_protocol *ipprot;
1496         static struct options opt; /* since we don't use these yet, and they
1497                                 take up stack space. */
1498         int brd=IS_MYADDR;
1499         int is_frag=0;
1500 
1501         ip_statistics.IpInReceives++;
1502 
1503         /*
1504          *      Tag the ip header of this packet so we can find it
1505          */
1506 
1507         skb->ip_hdr = iph;
1508 
1509         /*
1510          *      Is the datagram acceptable?
1511          *
1512          *      1.      Length at least the size of an ip header
1513          *      2.      Version of 4
1514          *      3.      Checksums correctly. [Speed optimisation for later, skip loopback checksums]
1515          *      (4.     We ought to check for IP multicast addresses and undefined types.. does this matter ?)
1516          */
1517 
1518         if (skb->len<sizeof(struct iphdr) || iph->ihl<5 || iph->version != 4 || ip_fast_csum((unsigned char *)iph, iph->ihl) !=0)
1519         {
1520                 ip_statistics.IpInHdrErrors++;
1521                 kfree_skb(skb, FREE_WRITE);
1522                 return(0);
1523         }
1524         
1525         /*
1526          *      See if the firewall wants to dispose of the packet. 
1527          */
1528 
1529 #ifdef  CONFIG_IP_FIREWALL
1530         
1531         if(!LOOPBACK(iph->daddr) && !ip_fw_chk(iph,ip_fw_blk_chain))
1532         {
1533                 kfree_skb(skb, FREE_WRITE);
1534                 return 0;       
1535         }
1536 
1537 #endif
1538         
1539         /*
1540          *      Our transport medium may have padded the buffer out. Now we know it
1541          *      is IP we can trim to the true length of the frame.
1542          */
1543 
1544         skb->len=ntohs(iph->tot_len);
1545 
1546         /*
1547          *      Next analyse the packet for options. Studies show under one packet in
1548          *      a thousand have options....
1549          */
1550 
1551         if (iph->ihl != 5)
1552         {       /* Fast path for the typical optionless IP packet. */
1553                 memset((char *) &opt, 0, sizeof(opt));
1554                 if (do_options(iph, &opt) != 0)
1555                         return 0;
1556                 opts_p = 1;
1557         }
1558 
1559         /*
1560          *      Remember if the frame is fragmented.
1561          */
1562          
1563         if(iph->frag_off)
1564         {
1565                 if (iph->frag_off & 0x0020)
1566                         is_frag|=1;
1567                 /*
1568                  *      Last fragment ?
1569                  */
1570         
1571                 if (ntohs(iph->frag_off) & 0x1fff)
1572                         is_frag|=2;
1573         }
1574         
1575         /*
1576          *      Do any IP forwarding required.  chk_addr() is expensive -- avoid it someday.
1577          *
1578          *      This is inefficient. While finding out if it is for us we could also compute
1579          *      the routing table entry. This is where the great unified cache theory comes
1580          *      in as and when someone implements it
1581          *
1582          *      For most hosts over 99% of packets match the first conditional
1583          *      and don't go via ip_chk_addr. Note: brd is set to IS_MYADDR at
1584          *      function entry.
1585          */
1586 
1587         if ( iph->daddr != skb->dev->pa_addr && (brd = ip_chk_addr(iph->daddr)) == 0)
1588         {
1589                 /*
1590                  *      Don't forward multicast or broadcast frames.
1591                  */
1592 
1593                 if(skb->pkt_type!=PACKET_HOST || brd==IS_BROADCAST)
1594                 {
1595                         kfree_skb(skb,FREE_WRITE);
1596                         return 0;
1597                 }
1598 
1599                 /*
1600                  *      The packet is for another target. Forward the frame
1601                  */
1602 
1603 #ifdef CONFIG_IP_FORWARD
1604                 ip_forward(skb, dev, is_frag);
1605 #else
1606 /*              printk("Machine %lx tried to use us as a forwarder to %lx but we have forwarding disabled!\n",
1607                         iph->saddr,iph->daddr);*/
1608                 ip_statistics.IpInAddrErrors++;
1609 #endif
1610                 /*
1611                  *      The forwarder is inefficient and copies the packet. We
1612                  *      free the original now.
1613                  */
1614 
1615                 kfree_skb(skb, FREE_WRITE);
1616                 return(0);
1617         }
1618         
1619 #ifdef CONFIG_IP_MULTICAST      
1620 
1621         if(brd==IS_MULTICAST && iph->daddr!=IGMP_ALL_HOSTS && !(dev->flags&IFF_LOOPBACK))
1622         {
1623                 /*
1624                  *      Check it is for one of our groups
1625                  */
1626                 struct ip_mc_list *ip_mc=dev->ip_mc_list;
1627                 do
1628                 {
1629                         if(ip_mc==NULL)
1630                         {       
1631                                 kfree_skb(skb, FREE_WRITE);
1632                                 return 0;
1633                         }
1634                         if(ip_mc->multiaddr==iph->daddr)
1635                                 break;
1636                         ip_mc=ip_mc->next;
1637                 }
1638                 while(1);
1639         }
1640 #endif
1641         /*
1642          *      Account for the packet
1643          */
1644          
1645 #ifdef CONFIG_IP_ACCT
1646         ip_acct_cnt(iph,ip_acct_chain,1);
1647 #endif  
1648 
1649         /*
1650          * Reassemble IP fragments.
1651          */
1652 
1653         if(is_frag)
1654         {
1655                 /* Defragment. Obtain the complete packet if there is one */
1656                 skb=ip_defrag(iph,skb,dev);
1657                 if(skb==NULL)
1658                         return 0;
1659                 skb->dev = dev;
1660                 iph=skb->h.iph;
1661         }
1662         
1663                  
1664 
1665         /*
1666          *      Point into the IP datagram, just past the header.
1667          */
1668 
1669         skb->ip_hdr = iph;
1670         skb->h.raw += iph->ihl*4;
1671         
1672         /*
1673          *      Deliver to raw sockets. This is fun as to avoid copies we want to make no surplus copies.
1674          */
1675          
1676         hash = iph->protocol & (SOCK_ARRAY_SIZE-1);
1677         
1678         /* If there maybe a raw socket we must check - if not we don't care less */
1679         if((raw_sk=raw_prot.sock_array[hash])!=NULL)
1680         {
1681                 struct sock *sknext=NULL;
1682                 struct sk_buff *skb1;
1683                 raw_sk=get_sock_raw(raw_sk, hash,  iph->saddr, iph->daddr);
1684                 if(raw_sk)      /* Any raw sockets */
1685                 {
1686                         do
1687                         {
1688                                 /* Find the next */
1689                                 sknext=get_sock_raw(raw_sk->next, hash, iph->saddr, iph->daddr);
1690                                 if(sknext)
1691                                         skb1=skb_clone(skb, GFP_ATOMIC);
1692                                 else
1693                                         break;  /* One pending raw socket left */
1694                                 if(skb1)
1695                                         raw_rcv(raw_sk, skb1, dev, iph->saddr,iph->daddr);
1696                                 raw_sk=sknext;
1697                         }
1698                         while(raw_sk!=NULL);
1699                         /* Here either raw_sk is the last raw socket, or NULL if none */
1700                         /* We deliver to the last raw socket AFTER the protocol checks as it avoids a surplus copy */
1701                 }
1702         }
1703         
1704         /*
1705          *      skb->h.raw now points at the protocol beyond the IP header.
1706          */
1707 
1708         hash = iph->protocol & (MAX_INET_PROTOS -1);
1709         for (ipprot = (struct inet_protocol *)inet_protos[hash];ipprot != NULL;ipprot=(struct inet_protocol *)ipprot->next)
1710         {
1711                 struct sk_buff *skb2;
1712 
1713                 if (ipprot->protocol != iph->protocol)
1714                         continue;
1715        /*
1716         *       See if we need to make a copy of it.  This will
1717         *       only be set if more than one protocol wants it.
1718         *       and then not for the last one. If there is a pending
1719         *       raw delivery wait for that
1720         */
1721                 if (ipprot->copy || raw_sk)
1722                 {
1723                         skb2 = skb_clone(skb, GFP_ATOMIC);
1724                         if(skb2==NULL)
1725                                 continue;
1726                 }
1727                 else
1728                 {
1729                         skb2 = skb;
1730                 }
1731                 flag = 1;
1732 
1733                /*
1734                 * Pass on the datagram to each protocol that wants it,
1735                 * based on the datagram protocol.  We should really
1736                 * check the protocol handler's return values here...
1737                 */
1738                 ipprot->handler(skb2, dev, opts_p ? &opt : 0, iph->daddr,
1739                                 (ntohs(iph->tot_len) - (iph->ihl * 4)),
1740                                 iph->saddr, 0, ipprot);
1741 
1742         }
1743 
1744         /*
1745          * All protocols checked.
1746          * If this packet was a broadcast, we may *not* reply to it, since that
1747          * causes (proven, grin) ARP storms and a leakage of memory (i.e. all
1748          * ICMP reply messages get queued up for transmission...)
1749          */
1750 
1751         if(raw_sk!=NULL)        /* Shift to last raw user */
1752                 raw_rcv(raw_sk, skb, dev, iph->saddr, iph->daddr);
1753         else if (!flag)         /* Free and report errors */
1754         {
1755                 if (brd != IS_BROADCAST && brd!=IS_MULTICAST)
1756                         icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PROT_UNREACH, dev);
1757                 kfree_skb(skb, FREE_WRITE);
1758         }
1759 
1760         return(0);
1761 }
1762 
1763 /*
1764  *      Loop a packet back to the sender.
1765  */
1766  
1767 static void ip_loopback(struct device *old_dev, struct sk_buff *skb)
     /* [previous][next][first][last][top][bottom][index][help] */
1768 {
1769         extern struct device loopback_dev;
1770         struct device *dev=&loopback_dev;
1771         int len=skb->len-old_dev->hard_header_len;
1772         struct sk_buff *newskb=alloc_skb(len+dev->hard_header_len, GFP_ATOMIC);
1773         
1774         if(newskb==NULL)
1775                 return;
1776                 
1777         newskb->link3=NULL;
1778         newskb->sk=NULL;
1779         newskb->dev=dev;
1780         newskb->saddr=skb->saddr;
1781         newskb->daddr=skb->daddr;
1782         newskb->raddr=skb->raddr;
1783         newskb->free=1;
1784         newskb->lock=0;
1785         newskb->users=0;
1786         newskb->pkt_type=skb->pkt_type;
1787         newskb->len=len+dev->hard_header_len;
1788         
1789         
1790         newskb->ip_hdr=(struct iphdr *)(newskb->data+ip_send(newskb, skb->ip_hdr->daddr, len, dev, skb->ip_hdr->saddr));
1791         memcpy(newskb->ip_hdr,skb->ip_hdr,len);
1792 
1793         /* Recurse. The device check against IFF_LOOPBACK will stop infinite recursion */
1794                 
1795         /*printk("Loopback output queued [%lX to %lX].\n", newskb->ip_hdr->saddr,newskb->ip_hdr->daddr);*/
1796         ip_queue_xmit(NULL, dev, newskb, 1);
1797 }
1798 
1799 
1800 /*
1801  * Queues a packet to be sent, and starts the transmitter
1802  * if necessary.  if free = 1 then we free the block after
1803  * transmit, otherwise we don't. If free==2 we not only
1804  * free the block but also don't assign a new ip seq number.
1805  * This routine also needs to put in the total length,
1806  * and compute the checksum
1807  */
1808 
1809 void ip_queue_xmit(struct sock *sk, struct device *dev,
     /* [previous][next][first][last][top][bottom][index][help] */
1810               struct sk_buff *skb, int free)
1811 {
1812         struct iphdr *iph;
1813         unsigned char *ptr;
1814 
1815         /* Sanity check */
1816         if (dev == NULL)
1817         {
1818                 printk("IP: ip_queue_xmit dev = NULL\n");
1819                 return;
1820         }
1821 
1822         IS_SKB(skb);
1823 
1824         /*
1825          *      Do some book-keeping in the packet for later
1826          */
1827 
1828 
1829         skb->dev = dev;
1830         skb->when = jiffies;
1831 
1832         /*
1833          *      Find the IP header and set the length. This is bad
1834          *      but once we get the skb data handling code in the
1835          *      hardware will push its header sensibly and we will
1836          *      set skb->ip_hdr to avoid this mess and the fixed
1837          *      header length problem
1838          */
1839 
1840         ptr = skb->data;
1841         ptr += dev->hard_header_len;
1842         iph = (struct iphdr *)ptr;
1843         skb->ip_hdr = iph;
1844         iph->tot_len = ntohs(skb->len-dev->hard_header_len);
1845 
1846         /*
1847          *      No reassigning numbers to fragments...
1848          */
1849 
1850         if(free!=2)
1851                 iph->id      = htons(ip_id_count++);
1852         else
1853                 free=1;
1854 
1855         /* All buffers without an owner socket get freed */
1856         if (sk == NULL)
1857                 free = 1;
1858 
1859         skb->free = free;
1860 
1861         /*
1862          *      Do we need to fragment. Again this is inefficient.
1863          *      We need to somehow lock the original buffer and use
1864          *      bits of it.
1865          */
1866 
1867         if(skb->len > dev->mtu + dev->hard_header_len)
1868         {
1869                 ip_fragment(sk,skb,dev,0);
1870                 IS_SKB(skb);
1871                 kfree_skb(skb,FREE_WRITE);
1872                 return;
1873         }
1874 
1875         /*
1876          *      Add an IP checksum
1877          */
1878 
1879         ip_send_check(iph);
1880 
1881         /*
1882          *      Print the frame when debugging
1883          */
1884 
1885         /*
1886          *      More debugging. You cannot queue a packet already on a list
1887          *      Spot this and moan loudly.
1888          */
1889         if (skb->next != NULL)
1890         {
1891                 printk("ip_queue_xmit: next != NULL\n");
1892                 skb_unlink(skb);
1893         }
1894 
1895         /*
1896          *      If a sender wishes the packet to remain unfreed
1897          *      we add it to his send queue. This arguably belongs
1898          *      in the TCP level since nobody else uses it. BUT
1899          *      remember IPng might change all the rules.
1900          */
1901 
1902         if (!free)
1903         {
1904                 unsigned long flags;
1905                 /* The socket now has more outstanding blocks */
1906 
1907                 sk->packets_out++;
1908 
1909                 /* Protect the list for a moment */
1910                 save_flags(flags);
1911                 cli();
1912 
1913                 if (skb->link3 != NULL)
1914                 {
1915                         printk("ip.c: link3 != NULL\n");
1916                         skb->link3 = NULL;
1917                 }
1918                 if (sk->send_head == NULL)
1919                 {
1920                         sk->send_tail = skb;
1921                         sk->send_head = skb;
1922                 }
1923                 else
1924                 {
1925                         sk->send_tail->link3 = skb;
1926                         sk->send_tail = skb;
1927                 }
1928                 /* skb->link3 is NULL */
1929 
1930                 /* Interrupt restore */
1931                 restore_flags(flags);
1932         }
1933         else
1934                 /* Remember who owns the buffer */
1935                 skb->sk = sk;
1936 
1937         /*
1938          *      If the indicated interface is up and running, send the packet.
1939          */
1940          
1941         ip_statistics.IpOutRequests++;
1942 #ifdef CONFIG_IP_ACCT
1943         ip_acct_cnt(iph,ip_acct_chain,1);
1944 #endif  
1945         
1946 #ifdef CONFIG_IP_MULTICAST      
1947 
1948         /*
1949          *      Multicasts are looped back for other local users
1950          */
1951          
1952         if (MULTICAST(iph->daddr) && !(dev->flags&IFF_LOOPBACK))
1953         {
1954                 if(sk==NULL || sk->ip_mc_loop)
1955                 {
1956                         if(iph->daddr==IGMP_ALL_HOSTS)
1957                                 ip_loopback(dev,skb);
1958                         else
1959                         {
1960                                 struct ip_mc_list *imc=dev->ip_mc_list;
1961                                 while(imc!=NULL)
1962                                 {
1963                                         if(imc->multiaddr==iph->daddr)
1964                                         {
1965                                                 ip_loopback(dev,skb);
1966                                                 break;
1967                                         }
1968                                         imc=imc->next;
1969                                 }
1970                         }
1971                 }
1972                 /* Multicasts with ttl 0 must not go beyond the host */
1973                 
1974                 if(skb->ip_hdr->ttl==0)
1975                 {
1976                         kfree_skb(skb, FREE_READ);
1977                         return;
1978                 }
1979         }
1980 #endif
1981         if((dev->flags&IFF_BROADCAST) && iph->daddr==dev->pa_brdaddr && !(dev->flags&IFF_LOOPBACK))
1982                 ip_loopback(dev,skb);
1983                 
1984         if (dev->flags & IFF_UP)
1985         {
1986                 /*
1987                  *      If we have an owner use its priority setting,
1988                  *      otherwise use NORMAL
1989                  */
1990 
1991                 if (sk != NULL)
1992                 {
1993                         dev_queue_xmit(skb, dev, sk->priority);
1994                 }
1995                 else
1996                 {
1997                         dev_queue_xmit(skb, dev, SOPRI_NORMAL);
1998                 }
1999         }
2000         else
2001         {
2002                 ip_statistics.IpOutDiscards++;
2003                 if (free)
2004                         kfree_skb(skb, FREE_WRITE);
2005         }
2006 }
2007 
2008 
2009 
2010 #ifdef CONFIG_IP_MULTICAST
2011 
2012 /*
2013  *      Write an multicast group list table for the IGMP daemon to
2014  *      read.
2015  */
2016  
2017 int ip_mc_procinfo(char *buffer, char **start, off_t offset, int length)
     /* [previous][next][first][last][top][bottom][index][help] */
2018 {
2019         off_t pos=0, begin=0;
2020         struct ip_mc_list *im;
2021         unsigned long flags;
2022         int len=0;
2023         
2024         
2025         len=sprintf(buffer,"Device    : Multicast\n");  
2026         save_flags(flags);
2027         cli();
2028         
2029         im=ip_mc_head;
2030         
2031         while(im!=NULL)
2032         {
2033                 len+=sprintf(buffer+len,"%-10s: %08lX\n", im->interface->name, im->multiaddr);
2034                 pos=begin+len;
2035                 if(pos<offset)
2036                 {
2037                         len=0;
2038                         begin=pos;
2039                 }
2040                 if(pos>offset+length)
2041                         break;
2042                 im=im->next;
2043         }
2044         restore_flags(flags);
2045         *start=buffer+(offset-begin);
2046         len-=(offset-begin);
2047         if(len>length)
2048                 len=length;     
2049         return len;
2050 }
2051 
2052 
2053 #endif  
2054 /*
2055  *      Socket option code for IP. This is the end of the line after any TCP,UDP etc options on
2056  *      an IP socket.
2057  *
2058  *      We implement IP_TOS (type of service), IP_TTL (time to live).
2059  *
2060  *      Next release we will sort out IP_OPTIONS since for some people are kind of important.
2061  */
2062 
2063 int ip_setsockopt(struct sock *sk, int level, int optname, char *optval, int optlen)
     /* [previous][next][first][last][top][bottom][index][help] */
2064 {
2065         int val,err;
2066 #if defined(CONFIG_IP_FIREWALL) || defined(CONFIG_IP_ACCT)
2067         struct ip_fw tmp_fw;
2068 #endif  
2069         if (optval == NULL)
2070                 return(-EINVAL);
2071 
2072         err=verify_area(VERIFY_READ, optval, sizeof(int));
2073         if(err)
2074                 return err;
2075 
2076         val = get_fs_long((unsigned long *)optval);
2077 
2078         if(level!=SOL_IP)
2079                 return -EOPNOTSUPP;
2080 
2081 #ifdef CONFIG_IP_MULTICAST
2082         if(optname==IP_MULTICAST_TTL)
2083         {
2084                 unsigned char ucval;
2085                 ucval=get_fs_byte((unsigned char *)optval);
2086                 printk("MC TTL %d\n", ucval);
2087                 if(ucval<1||ucval>255)
2088                         return -EINVAL;
2089                 sk->ip_mc_ttl=(int)ucval;
2090                 return 0;
2091         }
2092 #endif
2093 
2094         switch(optname)
2095         {
2096                 case IP_TOS:
2097                         if(val<0||val>255)
2098                                 return -EINVAL;
2099                         sk->ip_tos=val;
2100                         if(val==IPTOS_LOWDELAY)
2101                                 sk->priority=SOPRI_INTERACTIVE;
2102                         if(val==IPTOS_THROUGHPUT)
2103                                 sk->priority=SOPRI_BACKGROUND;
2104                         return 0;
2105                 case IP_TTL:
2106                         if(val<1||val>255)
2107                                 return -EINVAL;
2108                         sk->ip_ttl=val;
2109                         return 0;
2110 #ifdef CONFIG_IP_MULTICAST
2111 #ifdef GCC_WORKS
2112                 case IP_MULTICAST_TTL: 
2113                 {
2114                         unsigned char ucval;
2115 
2116                         ucval=get_fs_byte((unsigned char *)optval);
2117                         printk("MC TTL %d\n", ucval);
2118                         if(ucval<1||ucval>255)
2119                                 return -EINVAL;
2120                         sk->ip_mc_ttl=(int)ucval;
2121                         return 0;
2122                 }
2123 #endif
2124                 case IP_MULTICAST_LOOP: 
2125                 {
2126                         unsigned char ucval;
2127 
2128                         ucval=get_fs_byte((unsigned char *)optval);
2129                         if(ucval!=0 && ucval!=1)
2130                                  return -EINVAL;
2131                         sk->ip_mc_loop=(int)ucval;
2132                         return 0;
2133                 }
2134                 case IP_MULTICAST_IF: 
2135                 {
2136                         /* Not fully tested */
2137                         struct in_addr addr;
2138                         struct device *dev=NULL;
2139                         
2140                         /*
2141                          *      Check the arguments are allowable
2142                          */
2143 
2144                         err=verify_area(VERIFY_READ, optval, sizeof(addr));
2145                         if(err)
2146                                 return err;
2147                                 
2148                         memcpy_fromfs(&addr,optval,sizeof(addr));
2149                         
2150                         printk("MC bind %s\n", in_ntoa(addr.s_addr));
2151                         
2152                         /*
2153                          *      What address has been requested
2154                          */
2155                         
2156                         if(addr.s_addr==INADDR_ANY)     /* Default */
2157                         {
2158                                 sk->ip_mc_name[0]=0;
2159                                 return 0;
2160                         }
2161                         
2162                         /*
2163                          *      Find the device
2164                          */
2165                          
2166                         for(dev = dev_base; dev; dev = dev->next)
2167                         {
2168                                 if((dev->flags&IFF_UP)&&(dev->flags&IFF_MULTICAST)&&
2169                                         (dev->pa_addr==addr.s_addr))
2170                                         break;
2171                         }
2172                         
2173                         /*
2174                          *      Did we find one
2175                          */
2176                          
2177                         if(dev) 
2178                         {
2179                                 strcpy(sk->ip_mc_name,dev->name);
2180                                 return 0;
2181                         }
2182                         return -EADDRNOTAVAIL;
2183                 }
2184                 
2185                 case IP_ADD_MEMBERSHIP: 
2186                 {
2187                 
2188 /*
2189  *      FIXME: Add/Del membership should have a semaphore protecting them from re-entry
2190  */
2191                         struct ip_mreq mreq;
2192                         static struct options optmem;
2193                         unsigned long route_src;
2194                         struct rtable *rt;
2195                         struct device *dev=NULL;
2196                         
2197                         /*
2198                          *      Check the arguments.
2199                          */
2200 
2201                         err=verify_area(VERIFY_READ, optval, sizeof(mreq));
2202                         if(err)
2203                                 return err;
2204 
2205                         memcpy_fromfs(&mreq,optval,sizeof(mreq));
2206 
2207                         /* 
2208                          *      Get device for use later
2209                          */
2210 
2211                         if(mreq.imr_interface.s_addr==INADDR_ANY) 
2212                         {
2213                                 /*
2214                                  *      Not set so scan.
2215                                  */
2216                                 if((rt=ip_rt_route(mreq.imr_multiaddr.s_addr,&optmem, &route_src))!=NULL)
2217                                 {
2218                                         dev=rt->rt_dev;
2219                                         rt->rt_use--;
2220                                 }
2221                         }
2222                         else
2223                         {
2224                                 /*
2225                                  *      Find a suitable device.
2226                                  */
2227                                 for(dev = dev_base; dev; dev = dev->next)
2228                                 {
2229                                         if((dev->flags&IFF_UP)&&(dev->flags&IFF_MULTICAST)&&
2230                                                 (dev->pa_addr==mreq.imr_interface.s_addr))
2231                                                 break;
2232                                 }
2233                         }
2234                         
2235                         /*
2236                          *      No device, no cookies.
2237                          */
2238                          
2239                         if(!dev)
2240                                 return -ENODEV;
2241                                 
2242                         /*
2243                          *      Join group.
2244                          */
2245                          
2246                         return ip_mc_join_group(sk,dev,mreq.imr_multiaddr.s_addr);
2247                 }
2248                 
2249                 case IP_DROP_MEMBERSHIP: 
2250                 {
2251                         struct ip_mreq mreq;
2252                         struct rtable *rt;
2253                         static struct options optmem;
2254                         unsigned long route_src;
2255                         struct device *dev=NULL;
2256 
2257                         /*
2258                          *      Check the arguments
2259                          */
2260                          
2261                         err=verify_area(VERIFY_READ, optval, sizeof(mreq));
2262                         if(err)
2263                                 return err;
2264 
2265                         memcpy_fromfs(&mreq,optval,sizeof(mreq));
2266 
2267                         /*
2268                          *      Get device for use later 
2269                          */
2270  
2271                         if(mreq.imr_interface.s_addr==INADDR_ANY) 
2272                         {
2273                                 if((rt=ip_rt_route(mreq.imr_multiaddr.s_addr,&optmem, &route_src))!=NULL)
2274                                 {
2275                                         dev=rt->rt_dev;
2276                                         rt->rt_use--;
2277                                 }
2278                         }
2279                         else 
2280                         {
2281                                 for(dev = dev_base; dev; dev = dev->next)
2282                                 {
2283                                         if((dev->flags&IFF_UP)&& (dev->flags&IFF_MULTICAST)&&
2284                                                         (dev->pa_addr==mreq.imr_interface.s_addr))
2285                                                 break;
2286                                 }
2287                         }
2288                         
2289                         /*
2290                          *      Did we find a suitable device.
2291                          */
2292                          
2293                         if(!dev)
2294                                 return -ENODEV;
2295                                 
2296                         /*
2297                          *      Leave group
2298                          */
2299                          
2300                         return ip_mc_leave_group(sk,dev,mreq.imr_multiaddr.s_addr);
2301                 }
2302 #endif                  
2303 #ifdef CONFIG_IP_FIREWALL
2304                 case IP_FW_ADD_BLK:
2305                 case IP_FW_DEL_BLK:
2306                 case IP_FW_ADD_FWD:
2307                 case IP_FW_DEL_FWD:
2308                 case IP_FW_CHK_BLK:
2309                 case IP_FW_CHK_FWD:
2310                 case IP_FW_FLUSH:
2311                 case IP_FW_POLICY:
2312                         if(!suser())
2313                                 return -EPERM;
2314                         if(optlen>sizeof(tmp_fw) || optlen<1)
2315                                 return -EINVAL;
2316                         err=verify_area(VERIFY_READ,optval,optlen);
2317                         if(err)
2318                                 return err;
2319                         memcpy_fromfs(&tmp_fw,optval,optlen);
2320                         err=ip_fw_ctl(optname, &tmp_fw,optlen);
2321                         return -err;    /* -0 is 0 after all */
2322                         
2323 #endif
2324 #ifdef CONFIG_IP_ACCT
2325                 case IP_ACCT_DEL:
2326                 case IP_ACCT_ADD:
2327                 case IP_ACCT_FLUSH:
2328                 case IP_ACCT_ZERO:
2329                         if(!suser())
2330                                 return -EPERM;
2331                         if(optlen>sizeof(tmp_fw) || optlen<1)
2332                                 return -EINVAL;
2333                         err=verify_area(VERIFY_READ,optval,optlen);
2334                         if(err)
2335                                 return err;
2336                         memcpy_fromfs(&tmp_fw, optval,optlen);
2337                         err=ip_acct_ctl(optname, &tmp_fw,optlen);
2338                         return -err;    /* -0 is 0 after all */
2339 #endif
2340                 /* IP_OPTIONS and friends go here eventually */
2341                 default:
2342                         return(-ENOPROTOOPT);
2343         }
2344 }
2345 
2346 /*
2347  *      Get the options. Note for future reference. The GET of IP options gets the
2348  *      _received_ ones. The set sets the _sent_ ones.
2349  */
2350 
2351 int ip_getsockopt(struct sock *sk, int level, int optname, char *optval, int *optlen)
     /* [previous][next][first][last][top][bottom][index][help] */
2352 {
2353         int val,err;
2354 #ifdef CONFIG_IP_MULTICAST
2355         int len;
2356 #endif
2357         
2358         if(level!=SOL_IP)
2359                 return -EOPNOTSUPP;
2360 
2361         switch(optname)
2362         {
2363                 case IP_TOS:
2364                         val=sk->ip_tos;
2365                         break;
2366                 case IP_TTL:
2367                         val=sk->ip_ttl;
2368                         break;
2369 #ifdef CONFIG_IP_MULTICAST                      
2370                 case IP_MULTICAST_TTL:
2371                         val=sk->ip_mc_ttl;
2372                         break;
2373                 case IP_MULTICAST_LOOP:
2374                         val=sk->ip_mc_loop;
2375                         break;
2376                 case IP_MULTICAST_IF:
2377                         err=verify_area(VERIFY_WRITE, optlen, sizeof(int));
2378                         if(err)
2379                                 return err;
2380                         len=strlen(sk->ip_mc_name);
2381                         err=verify_area(VERIFY_WRITE, optval, len);
2382                         if(err)
2383                                 return err;
2384                         put_fs_long(len,(unsigned long *) optlen);
2385                         memcpy_tofs((void *)optval,sk->ip_mc_name, len);
2386                         return 0;
2387 #endif
2388                 default:
2389                         return(-ENOPROTOOPT);
2390         }
2391         err=verify_area(VERIFY_WRITE, optlen, sizeof(int));
2392         if(err)
2393                 return err;
2394         put_fs_long(sizeof(int),(unsigned long *) optlen);
2395 
2396         err=verify_area(VERIFY_WRITE, optval, sizeof(int));
2397         if(err)
2398                 return err;
2399         put_fs_long(val,(unsigned long *)optval);
2400 
2401         return(0);
2402 }
2403 
2404 /*
2405  *      IP protocol layer initialiser
2406  */
2407 
2408 static struct packet_type ip_packet_type =
2409 {
2410         0,      /* MUTTER ntohs(ETH_P_IP),*/
2411         NULL,   /* All devices */
2412         ip_rcv,
2413         NULL,
2414         NULL,
2415 };
2416 
2417 /*
2418  *      Device notifier
2419  */
2420  
2421 static int ip_rt_event(unsigned long event, void *ptr)
     /* [previous][next][first][last][top][bottom][index][help] */
2422 {
2423         if(event==NETDEV_DOWN)
2424                 ip_rt_flush(ptr);
2425         return NOTIFY_DONE;
2426 }
2427 
2428 struct notifier_block ip_rt_notifier={
2429         ip_rt_event,
2430         NULL,
2431         0
2432 };
2433 
2434 /*
2435  *      IP registers the packet type and then calls the subprotocol initialisers
2436  */
2437 
2438 void ip_init(void)
     /* [previous][next][first][last][top][bottom][index][help] */
2439 {
2440         ip_packet_type.type=htons(ETH_P_IP);
2441         dev_add_pack(&ip_packet_type);
2442 
2443         /* So we flush routes when a device is downed */        
2444         register_netdevice_notifier(&ip_rt_notifier);
2445 /*      ip_raw_init();
2446         ip_packet_init();
2447         ip_tcp_init();
2448         ip_udp_init();*/
2449 }

/* [previous][next][first][last][top][bottom][index][help] */