root/net/inet/ip.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. ip_ioctl
  2. strict_route
  3. loose_route
  4. ip_route_check
  5. build_options
  6. ip_send
  7. ip_build_header
  8. do_options
  9. ip_fast_csum
  10. ip_compute_csum
  11. ip_csum
  12. ip_send_check
  13. ip_frag_create
  14. ip_find
  15. ip_free
  16. ip_expire
  17. ip_create
  18. ip_done
  19. ip_glue
  20. ip_defrag
  21. ip_fragment
  22. ip_forward
  23. ip_rcv
  24. ip_loopback
  25. ip_queue_xmit
  26. ip_mc_procinfo
  27. ip_setsockopt
  28. ip_getsockopt
  29. ip_rt_event
  30. ip_init

   1 /*
   2  * INET         An implementation of the TCP/IP protocol suite for the LINUX
   3  *              operating system.  INET is implemented using the  BSD Socket
   4  *              interface as the means of communication with the user level.
   5  *
   6  *              The Internet Protocol (IP) module.
   7  *
   8  * Version:     @(#)ip.c        1.0.16b 9/1/93
   9  *
  10  * Authors:     Ross Biro, <bir7@leland.Stanford.Edu>
  11  *              Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  12  *              Donald Becker, <becker@super.org>
  13  *              Alan Cox, <gw4pts@gw4pts.ampr.org>
  14  *              Richard Underwood
  15  *
  16  * Fixes:
  17  *              Alan Cox        :       Commented a couple of minor bits of surplus code
  18  *              Alan Cox        :       Undefining IP_FORWARD doesn't include the code
  19  *                                      (just stops a compiler warning).
  20  *              Alan Cox        :       Frames with >=MAX_ROUTE record routes, strict routes or loose routes
  21  *                                      are junked rather than corrupting things.
  22  *              Alan Cox        :       Frames to bad broadcast subnets are dumped
  23  *                                      We used to process them non broadcast and
  24  *                                      boy could that cause havoc.
  25  *              Alan Cox        :       ip_forward sets the free flag on the
  26  *                                      new frame it queues. Still crap because
  27  *                                      it copies the frame but at least it
  28  *                                      doesn't eat memory too.
  29  *              Alan Cox        :       Generic queue code and memory fixes.
  30  *              Fred Van Kempen :       IP fragment support (borrowed from NET2E)
  31  *              Gerhard Koerting:       Forward fragmented frames correctly.
  32  *              Gerhard Koerting:       Fixes to my fix of the above 8-).
  33  *              Gerhard Koerting:       IP interface addressing fix.
  34  *              Linus Torvalds  :       More robustness checks
  35  *              Alan Cox        :       Even more checks: Still not as robust as it ought to be
  36  *              Alan Cox        :       Save IP header pointer for later
  37  *              Alan Cox        :       ip option setting
  38  *              Alan Cox        :       Use ip_tos/ip_ttl settings
  39  *              Alan Cox        :       Fragmentation bogosity removed
  40  *                                      (Thanks to Mark.Bush@prg.ox.ac.uk)
  41  *              Dmitry Gorodchanin :    Send of a raw packet crash fix.
  42  *              Alan Cox        :       Silly ip bug when an overlength
  43  *                                      fragment turns up. Now frees the
  44  *                                      queue.
  45  *              Linus Torvalds/ :       Memory leakage on fragmentation
  46  *              Alan Cox        :       handling.
  47  *              Gerhard Koerting:       Forwarding uses IP priority hints
  48  *              Teemu Rantanen  :       Fragment problems.
  49  *              Alan Cox        :       General cleanup, comments and reformat
  50  *              Alan Cox        :       SNMP statistics
  51  *              Alan Cox        :       BSD address rule semantics. Also see
  52  *                                      UDP as there is a nasty checksum issue
  53  *                                      if you do things the wrong way.
  54  *              Alan Cox        :       Always defrag, moved IP_FORWARD to the config.in file
  55  *              Alan Cox        :       IP options adjust sk->priority.
  56  *              Pedro Roque     :       Fix mtu/length error in ip_forward.
  57  *              Alan Cox        :       Avoid ip_chk_addr when possible.
  58  *      Richard Underwood       :       IP multicasting.
  59  *              Alan Cox        :       Cleaned up multicast handlers.
  60  *              Alan Cox        :       RAW sockets demultiplex in the BSD style.
  61  *              Gunther Mayer   :       Fix the SNMP reporting typo
  62  *              Alan Cox        :       Always in group 224.0.0.1
  63  *              Alan Cox        :       Multicast loopback error for 224.0.0.1
  64  *              Alan Cox        :       IP_MULTICAST_LOOP option.
  65  *              Alan Cox        :       Use notifiers.
  66  *
  67  * To Fix:
  68  *              IP option processing is mostly not needed. ip_forward needs to know about routing rules
  69  *              and time stamp but that's about all. Use the route mtu field here too
  70  *              IP fragmentation wants rewriting cleanly. The RFC815 algorithm is much more efficient
  71  *              and could be made very efficient with the addition of some virtual memory hacks to permit
  72  *              the allocation of a buffer that can then be 'grown' by twiddling page tables.
  73  *              Output fragmentation wants updating along with the buffer management to use a single 
  74  *              interleaved copy algorithm so that fragmenting has a one copy overhead. Actual packet
  75  *              output should probably do its own fragmentation at the UDP/RAW layer. TCP shouldn't cause
  76  *              fragmentation anyway.
  77  *
  78  *              This program is free software; you can redistribute it and/or
  79  *              modify it under the terms of the GNU General Public License
  80  *              as published by the Free Software Foundation; either version
  81  *              2 of the License, or (at your option) any later version.
  82  */
  83 
  84 #include <asm/segment.h>
  85 #include <asm/system.h>
  86 #include <linux/types.h>
  87 #include <linux/kernel.h>
  88 #include <linux/sched.h>
  89 #include <linux/string.h>
  90 #include <linux/errno.h>
  91 #include <linux/config.h>
  92 
  93 #include <linux/socket.h>
  94 #include <linux/sockios.h>
  95 #include <linux/in.h>
  96 #include <linux/inet.h>
  97 #include <linux/netdevice.h>
  98 #include <linux/etherdevice.h>
  99 
 100 #include "snmp.h"
 101 #include "ip.h"
 102 #include "protocol.h"
 103 #include "route.h"
 104 #include "tcp.h"
 105 #include <linux/skbuff.h>
 106 #include "sock.h"
 107 #include "arp.h"
 108 #include "icmp.h"
 109 #include "raw.h"
 110 #include "igmp.h"
 111 #include <linux/ip_fw.h>
 112 
 113 #define CONFIG_IP_DEFRAG
 114 
 115 extern int last_retran;
 116 extern void sort_send(struct sock *sk);
 117 
 118 #define min(a,b)        ((a)<(b)?(a):(b))
 119 #define LOOPBACK(x)     (((x) & htonl(0xff000000)) == htonl(0x7f000000))
 120 
 121 /*
 122  *      SNMP management statistics
 123  */
 124 
 125 #ifdef CONFIG_IP_FORWARD
 126 struct ip_mib ip_statistics={1,64,};    /* Forwarding=Yes, Default TTL=64 */
 127 #else
 128 struct ip_mib ip_statistics={0,64,};    /* Forwarding=No, Default TTL=64 */
 129 #endif
 130 
 131 /*
 132  *      Handle the issuing of an ioctl() request
 133  *      for the ip device. This is scheduled to
 134  *      disappear
 135  */
 136 
 137 int ip_ioctl(struct sock *sk, int cmd, unsigned long arg)
     /* [previous][next][first][last][top][bottom][index][help] */
 138 {
 139         switch(cmd)
 140         {
 141                 default:
 142                         return(-EINVAL);
 143         }
 144 }
 145 
 146 
 147 /* these two routines will do routing. */
 148 
 149 static void
 150 strict_route(struct iphdr *iph, struct options *opt)
     /* [previous][next][first][last][top][bottom][index][help] */
 151 {
 152 }
 153 
 154 
 155 static void
 156 loose_route(struct iphdr *iph, struct options *opt)
     /* [previous][next][first][last][top][bottom][index][help] */
 157 {
 158 }
 159 
 160 
 161 
 162 
 163 /* This routine will check to see if we have lost a gateway. */
 164 void
 165 ip_route_check(unsigned long daddr)
     /* [previous][next][first][last][top][bottom][index][help] */
 166 {
 167 }
 168 
 169 
 170 #if 0
 171 /* this routine puts the options at the end of an ip header. */
 172 static int
 173 build_options(struct iphdr *iph, struct options *opt)
     /* [previous][next][first][last][top][bottom][index][help] */
 174 {
 175   unsigned char *ptr;
 176   /* currently we don't support any options. */
 177   ptr = (unsigned char *)(iph+1);
 178   *ptr = 0;
 179   return (4);
 180 }
 181 #endif
 182 
 183 
 184 /*
 185  *      Take an skb, and fill in the MAC header.
 186  */
 187 
 188 static int ip_send(struct sk_buff *skb, unsigned long daddr, int len, struct device *dev, unsigned long saddr)
     /* [previous][next][first][last][top][bottom][index][help] */
 189 {
 190         int mac = 0;
 191 
 192         skb->dev = dev;
 193         skb->arp = 1;
 194         if (dev->hard_header)
 195         {
 196                 /*
 197                  *      Build a hardware header. Source address is our mac, destination unknown
 198                  *      (rebuild header will sort this out)
 199                  */
 200                 mac = dev->hard_header(skb->data, dev, ETH_P_IP, NULL, NULL, len, skb);
 201                 if (mac < 0)
 202                 {
 203                         mac = -mac;
 204                         skb->arp = 0;
 205                         skb->raddr = daddr;     /* next routing address */
 206                 }
 207         }
 208         return mac;
 209 }
 210 
 211 int ip_id_count = 0;
 212 
 213 /*
 214  * This routine builds the appropriate hardware/IP headers for
 215  * the routine.  It assumes that if *dev != NULL then the
 216  * protocol knows what it's doing, otherwise it uses the
 217  * routing/ARP tables to select a device struct.
 218  */
 219 int ip_build_header(struct sk_buff *skb, unsigned long saddr, unsigned long daddr,
     /* [previous][next][first][last][top][bottom][index][help] */
 220                 struct device **dev, int type, struct options *opt, int len, int tos, int ttl)
 221 {
 222         static struct options optmem;
 223         struct iphdr *iph;
 224         struct rtable *rt;
 225         unsigned char *buff;
 226         unsigned long raddr;
 227         int tmp;
 228         unsigned long src;
 229 
 230         buff = skb->data;
 231 
 232         /*
 233          *      See if we need to look up the device.
 234          */
 235 
 236 #ifdef CONFIG_INET_MULTICAST    
 237         if(MULTICAST(daddr) && *dev==NULL && skb->sk && *skb->sk->ip_mc_name)
 238                 *dev=dev_get(skb->sk->ip_mc_name);
 239 #endif
 240         if (*dev == NULL)
 241         {
 242                 if(skb->localroute)
 243                         rt = ip_rt_local(daddr, &optmem, &src);
 244                 else
 245                         rt = ip_rt_route(daddr, &optmem, &src);
 246                 if (rt == NULL)
 247                 {
 248                         ip_statistics.IpOutNoRoutes++;
 249                         return(-ENETUNREACH);
 250                 }
 251 
 252                 *dev = rt->rt_dev;
 253                 /*
 254                  *      If the frame is from us and going off machine it MUST MUST MUST
 255                  *      have the output device ip address and never the loopback
 256                  */
 257                 if (LOOPBACK(saddr) && !LOOPBACK(daddr))
 258                         saddr = src;/*rt->rt_dev->pa_addr;*/
 259                 raddr = rt->rt_gateway;
 260 
 261                 opt = &optmem;
 262         }
 263         else
 264         {
 265                 /*
 266                  *      We still need the address of the first hop.
 267                  */
 268                 if(skb->localroute)
 269                         rt = ip_rt_local(daddr, &optmem, &src);
 270                 else
 271                         rt = ip_rt_route(daddr, &optmem, &src);
 272                 /*
 273                  *      If the frame is from us and going off machine it MUST MUST MUST
 274                  *      have the output device ip address and never the loopback
 275                  */
 276                 if (LOOPBACK(saddr) && !LOOPBACK(daddr))
 277                         saddr = src;/*rt->rt_dev->pa_addr;*/
 278 
 279                 raddr = (rt == NULL) ? 0 : rt->rt_gateway;
 280         }
 281 
 282         /*
 283          *      No source addr so make it our addr
 284          */
 285         if (saddr == 0)
 286                 saddr = src;
 287 
 288         /*
 289          *      No gateway so aim at the real destination
 290          */
 291         if (raddr == 0)
 292                 raddr = daddr;
 293 
 294         /*
 295          *      Now build the MAC header.
 296          */
 297 
 298         tmp = ip_send(skb, raddr, len, *dev, saddr);
 299         buff += tmp;
 300         len -= tmp;
 301 
 302         /*
 303          *      Book keeping
 304          */
 305 
 306         skb->dev = *dev;
 307         skb->saddr = saddr;
 308         if (skb->sk)
 309                 skb->sk->saddr = saddr;
 310 
 311         /*
 312          *      Now build the IP header.
 313          */
 314 
 315         /*
 316          *      If we are using IPPROTO_RAW, then we don't need an IP header, since
 317          *      one is being supplied to us by the user
 318          */
 319 
 320         if(type == IPPROTO_RAW)
 321                 return (tmp);
 322 
 323         iph = (struct iphdr *)buff;
 324         iph->version  = 4;
 325         iph->tos      = tos;
 326         iph->frag_off = 0;
 327         iph->ttl      = ttl;
 328         iph->daddr    = daddr;
 329         iph->saddr    = saddr;
 330         iph->protocol = type;
 331         iph->ihl      = 5;
 332         skb->ip_hdr   = iph;
 333 
 334         /* Setup the IP options. */
 335 #ifdef Not_Yet_Avail
 336         build_options(iph, opt);
 337 #endif
 338 #ifdef CONFIG_IP_FIREWALL
 339         if(!ip_fw_chk(iph,ip_fw_blk_chain))
 340                 return -EPERM;
 341 #endif          
 342 
 343         return(20 + tmp);       /* IP header plus MAC header size */
 344 }
 345 
 346 
 347 static int
 348 do_options(struct iphdr *iph, struct options *opt)
     /* [previous][next][first][last][top][bottom][index][help] */
 349 {
 350   unsigned char *buff;
 351   int done = 0;
 352   int i, len = sizeof(struct iphdr);
 353 
 354   /* Zero out the options. */
 355   opt->record_route.route_size = 0;
 356   opt->loose_route.route_size  = 0;
 357   opt->strict_route.route_size = 0;
 358   opt->tstamp.ptr              = 0;
 359   opt->security                = 0;
 360   opt->compartment             = 0;
 361   opt->handling                = 0;
 362   opt->stream                  = 0;
 363   opt->tcc                     = 0;
 364   return(0);
 365 
 366   /* Advance the pointer to start at the options. */
 367   buff = (unsigned char *)(iph + 1);
 368 
 369   /* Now start the processing. */
 370   while (!done && len < iph->ihl*4) switch(*buff) {
 371         case IPOPT_END:
 372                 done = 1;
 373                 break;
 374         case IPOPT_NOOP:
 375                 buff++;
 376                 len++;
 377                 break;
 378         case IPOPT_SEC:
 379                 buff++;
 380                 if (*buff != 11) return(1);
 381                 buff++;
 382                 opt->security = ntohs(*(unsigned short *)buff);
 383                 buff += 2;
 384                 opt->compartment = ntohs(*(unsigned short *)buff);
 385                 buff += 2;
 386                 opt->handling = ntohs(*(unsigned short *)buff);
 387                 buff += 2;
 388                 opt->tcc = ((*buff) << 16) + ntohs(*(unsigned short *)(buff+1));
 389                 buff += 3;
 390                 len += 11;
 391                 break;
 392         case IPOPT_LSRR:
 393                 buff++;
 394                 if ((*buff - 3)% 4 != 0) return(1);
 395                 len += *buff;
 396                 opt->loose_route.route_size = (*buff -3)/4;
 397                 buff++;
 398                 if (*buff % 4 != 0) return(1);
 399                 opt->loose_route.pointer = *buff/4 - 1;
 400                 buff++;
 401                 buff++;
 402                 for (i = 0; i < opt->loose_route.route_size; i++) {
 403                         if(i>=MAX_ROUTE)
 404                                 return(1);
 405                         opt->loose_route.route[i] = *(unsigned long *)buff;
 406                         buff += 4;
 407                 }
 408                 break;
 409         case IPOPT_SSRR:
 410                 buff++;
 411                 if ((*buff - 3)% 4 != 0) return(1);
 412                 len += *buff;
 413                 opt->strict_route.route_size = (*buff -3)/4;
 414                 buff++;
 415                 if (*buff % 4 != 0) return(1);
 416                 opt->strict_route.pointer = *buff/4 - 1;
 417                 buff++;
 418                 buff++;
 419                 for (i = 0; i < opt->strict_route.route_size; i++) {
 420                         if(i>=MAX_ROUTE)
 421                                 return(1);
 422                         opt->strict_route.route[i] = *(unsigned long *)buff;
 423                         buff += 4;
 424                 }
 425                 break;
 426         case IPOPT_RR:
 427                 buff++;
 428                 if ((*buff - 3)% 4 != 0) return(1);
 429                 len += *buff;
 430                 opt->record_route.route_size = (*buff -3)/4;
 431                 buff++;
 432                 if (*buff % 4 != 0) return(1);
 433                 opt->record_route.pointer = *buff/4 - 1;
 434                 buff++;
 435                 buff++;
 436                 for (i = 0; i < opt->record_route.route_size; i++) {
 437                         if(i>=MAX_ROUTE)
 438                                 return 1;
 439                         opt->record_route.route[i] = *(unsigned long *)buff;
 440                         buff += 4;
 441                 }
 442                 break;
 443         case IPOPT_SID:
 444                 len += 4;
 445                 buff +=2;
 446                 opt->stream = *(unsigned short *)buff;
 447                 buff += 2;
 448                 break;
 449         case IPOPT_TIMESTAMP:
 450                 buff++;
 451                 len += *buff;
 452                 if (*buff % 4 != 0) return(1);
 453                 opt->tstamp.len = *buff / 4 - 1;
 454                 buff++;
 455                 if ((*buff - 1) % 4 != 0) return(1);
 456                 opt->tstamp.ptr = (*buff-1)/4;
 457                 buff++;
 458                 opt->tstamp.x.full_char = *buff;
 459                 buff++;
 460                 for (i = 0; i < opt->tstamp.len; i++) {
 461                         opt->tstamp.data[i] = *(unsigned long *)buff;
 462                         buff += 4;
 463                 }
 464                 break;
 465         default:
 466                 return(1);
 467   }
 468 
 469   if (opt->record_route.route_size == 0) {
 470         if (opt->strict_route.route_size != 0) {
 471                 memcpy(&(opt->record_route), &(opt->strict_route),
 472                                              sizeof(opt->record_route));
 473         } else if (opt->loose_route.route_size != 0) {
 474                 memcpy(&(opt->record_route), &(opt->loose_route),
 475                                              sizeof(opt->record_route));
 476         }
 477   }
 478 
 479   if (opt->strict_route.route_size != 0 &&
 480       opt->strict_route.route_size != opt->strict_route.pointer) {
 481         strict_route(iph, opt);
 482         return(0);
 483   }
 484 
 485   if (opt->loose_route.route_size != 0 &&
 486       opt->loose_route.route_size != opt->loose_route.pointer) {
 487         loose_route(iph, opt);
 488         return(0);
 489   }
 490 
 491   return(0);
 492 }
 493 
 494 /*
 495  *      This is a version of ip_compute_csum() optimized for IP headers, which
 496  *      always checksum on 4 octet boundaries.
 497  */
 498 
 499 static inline unsigned short ip_fast_csum(unsigned char * buff, int wlen)
     /* [previous][next][first][last][top][bottom][index][help] */
 500 {
 501         unsigned long sum = 0;
 502 
 503         if (wlen)
 504         {
 505         unsigned long bogus;
 506          __asm__("clc\n"
 507                 "1:\t"
 508                 "lodsl\n\t"
 509                 "adcl %3, %0\n\t"
 510                 "decl %2\n\t"
 511                 "jne 1b\n\t"
 512                 "adcl $0, %0\n\t"
 513                 "movl %0, %3\n\t"
 514                 "shrl $16, %3\n\t"
 515                 "addw %w3, %w0\n\t"
 516                 "adcw $0, %w0"
 517             : "=r" (sum), "=S" (buff), "=r" (wlen), "=a" (bogus)
 518             : "0"  (sum),  "1" (buff),  "2" (wlen));
 519         }
 520         return (~sum) & 0xffff;
 521 }
 522 
 523 /*
 524  * This routine does all the checksum computations that don't
 525  * require anything special (like copying or special headers).
 526  */
 527 
 528 unsigned short ip_compute_csum(unsigned char * buff, int len)
     /* [previous][next][first][last][top][bottom][index][help] */
 529 {
 530         unsigned long sum = 0;
 531 
 532         /* Do the first multiple of 4 bytes and convert to 16 bits. */
 533         if (len > 3)
 534         {
 535                 __asm__("clc\n"
 536                 "1:\t"
 537                 "lodsl\n\t"
 538                 "adcl %%eax, %%ebx\n\t"
 539                 "loop 1b\n\t"
 540                 "adcl $0, %%ebx\n\t"
 541                 "movl %%ebx, %%eax\n\t"
 542                 "shrl $16, %%eax\n\t"
 543                 "addw %%ax, %%bx\n\t"
 544                 "adcw $0, %%bx"
 545                 : "=b" (sum) , "=S" (buff)
 546                 : "0" (sum), "c" (len >> 2) ,"1" (buff)
 547                 : "ax", "cx", "si", "bx" );
 548         }
 549         if (len & 2)
 550         {
 551                 __asm__("lodsw\n\t"
 552                 "addw %%ax, %%bx\n\t"
 553                 "adcw $0, %%bx"
 554                 : "=b" (sum), "=S" (buff)
 555                 : "0" (sum), "1" (buff)
 556                 : "bx", "ax", "si");
 557         }
 558         if (len & 1)
 559         {
 560                 __asm__("lodsb\n\t"
 561                 "movb $0, %%ah\n\t"
 562                 "addw %%ax, %%bx\n\t"
 563                 "adcw $0, %%bx"
 564                 : "=b" (sum), "=S" (buff)
 565                 : "0" (sum), "1" (buff)
 566                 : "bx", "ax", "si");
 567         }
 568         sum =~sum;
 569         return(sum & 0xffff);
 570 }
 571 
 572 /*
 573  *      Check the header of an incoming IP datagram.  This version is still used in slhc.c.
 574  */
 575 
 576 int ip_csum(struct iphdr *iph)
     /* [previous][next][first][last][top][bottom][index][help] */
 577 {
 578         return ip_fast_csum((unsigned char *)iph, iph->ihl);
 579 }
 580 
 581 /*
 582  *      Generate a checksum for an outgoing IP datagram.
 583  */
 584 
 585 void ip_send_check(struct iphdr *iph)
     /* [previous][next][first][last][top][bottom][index][help] */
 586 {
 587         iph->check = 0;
 588         iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
 589 }
 590 
 591 /************************ Fragment Handlers From NET2E **********************************/
 592 
 593 
 594 /*
 595  *      This fragment handler is a bit of a heap. On the other hand it works quite
 596  *      happily and handles things quite well.
 597  */
 598 
 599 static struct ipq *ipqueue = NULL;              /* IP fragment queue    */
 600 
 601 /*
 602  *      Create a new fragment entry.
 603  */
 604 
 605 static struct ipfrag *ip_frag_create(int offset, int end, struct sk_buff *skb, unsigned char *ptr)
     /* [previous][next][first][last][top][bottom][index][help] */
 606 {
 607         struct ipfrag *fp;
 608 
 609         fp = (struct ipfrag *) kmalloc(sizeof(struct ipfrag), GFP_ATOMIC);
 610         if (fp == NULL)
 611         {
 612                 printk("IP: frag_create: no memory left !\n");
 613                 return(NULL);
 614         }
 615         memset(fp, 0, sizeof(struct ipfrag));
 616 
 617         /* Fill in the structure. */
 618         fp->offset = offset;
 619         fp->end = end;
 620         fp->len = end - offset;
 621         fp->skb = skb;
 622         fp->ptr = ptr;
 623 
 624         return(fp);
 625 }
 626 
 627 
 628 /*
 629  *      Find the correct entry in the "incomplete datagrams" queue for
 630  *      this IP datagram, and return the queue entry address if found.
 631  */
 632 
 633 static struct ipq *ip_find(struct iphdr *iph)
     /* [previous][next][first][last][top][bottom][index][help] */
 634 {
 635         struct ipq *qp;
 636         struct ipq *qplast;
 637 
 638         cli();
 639         qplast = NULL;
 640         for(qp = ipqueue; qp != NULL; qplast = qp, qp = qp->next)
 641         {
 642                 if (iph->id== qp->iph->id && iph->saddr == qp->iph->saddr &&
 643                         iph->daddr == qp->iph->daddr && iph->protocol == qp->iph->protocol)
 644                 {
 645                         del_timer(&qp->timer);  /* So it doesn't vanish on us. The timer will be reset anyway */
 646                         sti();
 647                         return(qp);
 648                 }
 649         }
 650         sti();
 651         return(NULL);
 652 }
 653 
 654 
 655 /*
 656  *      Remove an entry from the "incomplete datagrams" queue, either
 657  *      because we completed, reassembled and processed it, or because
 658  *      it timed out.
 659  */
 660 
 661 static void ip_free(struct ipq *qp)
     /* [previous][next][first][last][top][bottom][index][help] */
 662 {
 663         struct ipfrag *fp;
 664         struct ipfrag *xp;
 665 
 666         /*
 667          * Stop the timer for this entry.
 668          */
 669 
 670         del_timer(&qp->timer);
 671 
 672         /* Remove this entry from the "incomplete datagrams" queue. */
 673         cli();
 674         if (qp->prev == NULL)
 675         {
 676                 ipqueue = qp->next;
 677                 if (ipqueue != NULL)
 678                         ipqueue->prev = NULL;
 679         }
 680         else
 681         {
 682                 qp->prev->next = qp->next;
 683                 if (qp->next != NULL)
 684                         qp->next->prev = qp->prev;
 685         }
 686 
 687         /* Release all fragment data. */
 688 
 689         fp = qp->fragments;
 690         while (fp != NULL)
 691         {
 692                 xp = fp->next;
 693                 IS_SKB(fp->skb);
 694                 kfree_skb(fp->skb,FREE_READ);
 695                 kfree_s(fp, sizeof(struct ipfrag));
 696                 fp = xp;
 697         }
 698 
 699         /* Release the MAC header. */
 700         kfree_s(qp->mac, qp->maclen);
 701 
 702         /* Release the IP header. */
 703         kfree_s(qp->iph, qp->ihlen + 8);
 704 
 705         /* Finally, release the queue descriptor itself. */
 706         kfree_s(qp, sizeof(struct ipq));
 707         sti();
 708 }
 709 
 710 
 711 /*
 712  *      Oops- a fragment queue timed out.  Kill it and send an ICMP reply.
 713  */
 714 
 715 static void ip_expire(unsigned long arg)
     /* [previous][next][first][last][top][bottom][index][help] */
 716 {
 717         struct ipq *qp;
 718 
 719         qp = (struct ipq *)arg;
 720 
 721         /*
 722          *      Send an ICMP "Fragment Reassembly Timeout" message.
 723          */
 724 
 725         ip_statistics.IpReasmTimeout++;
 726         ip_statistics.IpReasmFails++;   
 727         /* This if is always true... shrug */
 728         if(qp->fragments!=NULL)
 729                 icmp_send(qp->fragments->skb,ICMP_TIME_EXCEEDED,
 730                                 ICMP_EXC_FRAGTIME, qp->dev);
 731 
 732         /*
 733          *      Nuke the fragment queue.
 734          */
 735         ip_free(qp);
 736 }
 737 
 738 
 739 /*
 740  *      Add an entry to the 'ipq' queue for a newly received IP datagram.
 741  *      We will (hopefully :-) receive all other fragments of this datagram
 742  *      in time, so we just create a queue for this datagram, in which we
 743  *      will insert the received fragments at their respective positions.
 744  */
 745 
 746 static struct ipq *ip_create(struct sk_buff *skb, struct iphdr *iph, struct device *dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 747 {
 748         struct ipq *qp;
 749         int maclen;
 750         int ihlen;
 751 
 752         qp = (struct ipq *) kmalloc(sizeof(struct ipq), GFP_ATOMIC);
 753         if (qp == NULL)
 754         {
 755                 printk("IP: create: no memory left !\n");
 756                 return(NULL);
 757                 skb->dev = qp->dev;
 758         }
 759         memset(qp, 0, sizeof(struct ipq));
 760 
 761         /*
 762          *      Allocate memory for the MAC header.
 763          *
 764          *      FIXME: We have a maximum MAC address size limit and define
 765          *      elsewhere. We should use it here and avoid the 3 kmalloc() calls
 766          */
 767 
 768         maclen = ((unsigned long) iph) - ((unsigned long) skb->data);
 769         qp->mac = (unsigned char *) kmalloc(maclen, GFP_ATOMIC);
 770         if (qp->mac == NULL)
 771         {
 772                 printk("IP: create: no memory left !\n");
 773                 kfree_s(qp, sizeof(struct ipq));
 774                 return(NULL);
 775         }
 776 
 777         /*
 778          *      Allocate memory for the IP header (plus 8 octets for ICMP).
 779          */
 780 
 781         ihlen = (iph->ihl * sizeof(unsigned long));
 782         qp->iph = (struct iphdr *) kmalloc(ihlen + 8, GFP_ATOMIC);
 783         if (qp->iph == NULL)
 784         {
 785                 printk("IP: create: no memory left !\n");
 786                 kfree_s(qp->mac, maclen);
 787                 kfree_s(qp, sizeof(struct ipq));
 788                 return(NULL);
 789         }
 790 
 791         /* Fill in the structure. */
 792         memcpy(qp->mac, skb->data, maclen);
 793         memcpy(qp->iph, iph, ihlen + 8);
 794         qp->len = 0;
 795         qp->ihlen = ihlen;
 796         qp->maclen = maclen;
 797         qp->fragments = NULL;
 798         qp->dev = dev;
 799 
 800         /* Start a timer for this entry. */
 801         qp->timer.expires = IP_FRAG_TIME;               /* about 30 seconds     */
 802         qp->timer.data = (unsigned long) qp;            /* pointer to queue     */
 803         qp->timer.function = ip_expire;                 /* expire function      */
 804         add_timer(&qp->timer);
 805 
 806         /* Add this entry to the queue. */
 807         qp->prev = NULL;
 808         cli();
 809         qp->next = ipqueue;
 810         if (qp->next != NULL)
 811                 qp->next->prev = qp;
 812         ipqueue = qp;
 813         sti();
 814         return(qp);
 815 }
 816 
 817 
 818 /*
 819  *      See if a fragment queue is complete.
 820  */
 821 
 822 static int ip_done(struct ipq *qp)
     /* [previous][next][first][last][top][bottom][index][help] */
 823 {
 824         struct ipfrag *fp;
 825         int offset;
 826 
 827         /* Only possible if we received the final fragment. */
 828         if (qp->len == 0)
 829                 return(0);
 830 
 831         /* Check all fragment offsets to see if they connect. */
 832         fp = qp->fragments;
 833         offset = 0;
 834         while (fp != NULL)
 835         {
 836                 if (fp->offset > offset)
 837                         return(0);      /* fragment(s) missing */
 838                 offset = fp->end;
 839                 fp = fp->next;
 840         }
 841 
 842         /* All fragments are present. */
 843         return(1);
 844 }
 845 
 846 
 847 /*
 848  *      Build a new IP datagram from all its fragments.
 849  *
 850  *      FIXME: We copy here because we lack an effective way of handling lists
 851  *      of bits on input. Until the new skb data handling is in I'm not going
 852  *      to touch this with a bargepole. This also causes a 4Kish limit on
 853  *      packet sizes.
 854  */
 855 
 856 static struct sk_buff *ip_glue(struct ipq *qp)
     /* [previous][next][first][last][top][bottom][index][help] */
 857 {
 858         struct sk_buff *skb;
 859         struct iphdr *iph;
 860         struct ipfrag *fp;
 861         unsigned char *ptr;
 862         int count, len;
 863 
 864         /*
 865          *      Allocate a new buffer for the datagram.
 866          */
 867 
 868         len = qp->maclen + qp->ihlen + qp->len;
 869 
 870         if ((skb = alloc_skb(len,GFP_ATOMIC)) == NULL)
 871         {
 872                 ip_statistics.IpReasmFails++;
 873                 printk("IP: queue_glue: no memory for gluing queue 0x%X\n", (int) qp);
 874                 ip_free(qp);
 875                 return(NULL);
 876         }
 877 
 878         /* Fill in the basic details. */
 879         skb->len = (len - qp->maclen);
 880         skb->h.raw = skb->data;
 881         skb->free = 1;
 882 
 883         /* Copy the original MAC and IP headers into the new buffer. */
 884         ptr = (unsigned char *) skb->h.raw;
 885         memcpy(ptr, ((unsigned char *) qp->mac), qp->maclen);
 886         ptr += qp->maclen;
 887         memcpy(ptr, ((unsigned char *) qp->iph), qp->ihlen);
 888         ptr += qp->ihlen;
 889         skb->h.raw += qp->maclen;
 890 
 891         count = 0;
 892 
 893         /* Copy the data portions of all fragments into the new buffer. */
 894         fp = qp->fragments;
 895         while(fp != NULL)
 896         {
 897                 if(count+fp->len > skb->len)
 898                 {
 899                         printk("Invalid fragment list: Fragment over size.\n");
 900                         ip_free(qp);
 901                         kfree_skb(skb,FREE_WRITE);
 902                         ip_statistics.IpReasmFails++;
 903                         return NULL;
 904                 }
 905                 memcpy((ptr + fp->offset), fp->ptr, fp->len);
 906                 count += fp->len;
 907                 fp = fp->next;
 908         }
 909 
 910         /* We glued together all fragments, so remove the queue entry. */
 911         ip_free(qp);
 912 
 913         /* Done with all fragments. Fixup the new IP header. */
 914         iph = skb->h.iph;
 915         iph->frag_off = 0;
 916         iph->tot_len = htons((iph->ihl * sizeof(unsigned long)) + count);
 917         skb->ip_hdr = iph;
 918 
 919         ip_statistics.IpReasmOKs++;
 920         return(skb);
 921 }
 922 
 923 
 924 /*
 925  *      Process an incoming IP datagram fragment.
 926  */
 927 
 928 static struct sk_buff *ip_defrag(struct iphdr *iph, struct sk_buff *skb, struct device *dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 929 {
 930         struct ipfrag *prev, *next;
 931         struct ipfrag *tfp;
 932         struct ipq *qp;
 933         struct sk_buff *skb2;
 934         unsigned char *ptr;
 935         int flags, offset;
 936         int i, ihl, end;
 937 
 938         ip_statistics.IpReasmReqds++;
 939 
 940         /* Find the entry of this IP datagram in the "incomplete datagrams" queue. */
 941         qp = ip_find(iph);
 942 
 943         /* Is this a non-fragmented datagram? */
 944         offset = ntohs(iph->frag_off);
 945         flags = offset & ~IP_OFFSET;
 946         offset &= IP_OFFSET;
 947         if (((flags & IP_MF) == 0) && (offset == 0))
 948         {
 949                 if (qp != NULL)
 950                         ip_free(qp);    /* Huh? How could this exist?? */
 951                 return(skb);
 952         }
 953 
 954         offset <<= 3;           /* offset is in 8-byte chunks */
 955 
 956         /*
 957          * If the queue already existed, keep restarting its timer as long
 958          * as we still are receiving fragments.  Otherwise, create a fresh
 959          * queue entry.
 960          */
 961 
 962         if (qp != NULL)
 963         {
 964                 del_timer(&qp->timer);
 965                 qp->timer.expires = IP_FRAG_TIME;       /* about 30 seconds */
 966                 qp->timer.data = (unsigned long) qp;    /* pointer to queue */
 967                 qp->timer.function = ip_expire;         /* expire function */
 968                 add_timer(&qp->timer);
 969         }
 970         else
 971         {
 972                 /*
 973                  *      If we failed to create it, then discard the frame
 974                  */
 975                 if ((qp = ip_create(skb, iph, dev)) == NULL)
 976                 {
 977                         skb->sk = NULL;
 978                         kfree_skb(skb, FREE_READ);
 979                         ip_statistics.IpReasmFails++;
 980                         return NULL;
 981                 }
 982         }
 983 
 984         /*
 985          *      Determine the position of this fragment.
 986          */
 987 
 988         ihl = (iph->ihl * sizeof(unsigned long));
 989         end = offset + ntohs(iph->tot_len) - ihl;
 990 
 991         /*
 992          *      Point into the IP datagram 'data' part.
 993          */
 994 
 995         ptr = skb->data + dev->hard_header_len + ihl;
 996 
 997         /*
 998          *      Is this the final fragment?
 999          */
1000 
1001         if ((flags & IP_MF) == 0)
1002                 qp->len = end;
1003 
1004         /*
1005          *      Find out which fragments are in front and at the back of us
1006          *      in the chain of fragments so far.  We must know where to put
1007          *      this fragment, right?
1008          */
1009 
1010         prev = NULL;
1011         for(next = qp->fragments; next != NULL; next = next->next)
1012         {
1013                 if (next->offset > offset)
1014                         break;  /* bingo! */
1015                 prev = next;
1016         }
1017 
1018         /*
1019          *      We found where to put this one.
1020          *      Check for overlap with preceding fragment, and, if needed,
1021          *      align things so that any overlaps are eliminated.
1022          */
1023         if (prev != NULL && offset < prev->end)
1024         {
1025                 i = prev->end - offset;
1026                 offset += i;    /* ptr into datagram */
1027                 ptr += i;       /* ptr into fragment data */
1028         }
1029 
1030         /*
1031          * Look for overlap with succeeding segments.
1032          * If we can merge fragments, do it.
1033          */
1034 
1035         for(; next != NULL; next = tfp)
1036         {
1037                 tfp = next->next;
1038                 if (next->offset >= end)
1039                         break;          /* no overlaps at all */
1040 
1041                 i = end - next->offset;                 /* overlap is 'i' bytes */
1042                 next->len -= i;                         /* so reduce size of    */
1043                 next->offset += i;                      /* next fragment        */
1044                 next->ptr += i;
1045 
1046                 /*
1047                  *      If we get a frag size of <= 0, remove it and the packet
1048                  *      that it goes with.
1049                  */
1050                 if (next->len <= 0)
1051                 {
1052                         if (next->prev != NULL)
1053                                 next->prev->next = next->next;
1054                         else
1055                                 qp->fragments = next->next;
1056 
1057                         if (tfp->next != NULL)
1058                                 next->next->prev = next->prev;
1059 
1060                         kfree_skb(next->skb,FREE_READ);
1061                         kfree_s(next, sizeof(struct ipfrag));
1062                 }
1063         }
1064 
1065         /*
1066          *      Insert this fragment in the chain of fragments.
1067          */
1068 
1069         tfp = NULL;
1070         tfp = ip_frag_create(offset, end, skb, ptr);
1071 
1072         /*
1073          *      No memory to save the fragment - so throw the lot
1074          */
1075 
1076         if (!tfp)
1077         {
1078                 skb->sk = NULL;
1079                 kfree_skb(skb, FREE_READ);
1080                 return NULL;
1081         }
1082         tfp->prev = prev;
1083         tfp->next = next;
1084         if (prev != NULL)
1085                 prev->next = tfp;
1086         else
1087                 qp->fragments = tfp;
1088 
1089         if (next != NULL)
1090                 next->prev = tfp;
1091 
1092         /*
1093          *      OK, so we inserted this new fragment into the chain.
1094          *      Check if we now have a full IP datagram which we can
1095          *      bump up to the IP layer...
1096          */
1097 
1098         if (ip_done(qp))
1099         {
1100                 skb2 = ip_glue(qp);             /* glue together the fragments */
1101                 return(skb2);
1102         }
1103         return(NULL);
1104 }
1105 
1106 
1107 /*
1108  *      This IP datagram is too large to be sent in one piece.  Break it up into
1109  *      smaller pieces (each of size equal to the MAC header plus IP header plus
1110  *      a block of the data of the original IP data part) that will yet fit in a
1111  *      single device frame, and queue such a frame for sending by calling the
1112  *      ip_queue_xmit().  Note that this is recursion, and bad things will happen
1113  *      if this function causes a loop...
1114  *
1115  *      Yes this is inefficient, feel free to submit a quicker one.
1116  *
1117  *      **Protocol Violation**
1118  *      We copy all the options to each fragment. !FIXME!
1119  */
1120 void ip_fragment(struct sock *sk, struct sk_buff *skb, struct device *dev, int is_frag)
     /* [previous][next][first][last][top][bottom][index][help] */
1121 {
1122         struct iphdr *iph;
1123         unsigned char *raw;
1124         unsigned char *ptr;
1125         struct sk_buff *skb2;
1126         int left, mtu, hlen, len;
1127         int offset;
1128         unsigned long flags;
1129 
1130         /*
1131          *      Point into the IP datagram header.
1132          */
1133 
1134         raw = skb->data;
1135         iph = (struct iphdr *) (raw + dev->hard_header_len);
1136 
1137         skb->ip_hdr = iph;
1138 
1139         /*
1140          *      Setup starting values.
1141          */
1142 
1143         hlen = (iph->ihl * sizeof(unsigned long));
1144         left = ntohs(iph->tot_len) - hlen;      /* Space per frame */
1145         hlen += dev->hard_header_len;           /* Total header size */
1146         mtu = (dev->mtu - hlen);                /* Size of data space */
1147         ptr = (raw + hlen);                     /* Where to start from */
1148 
1149         /*
1150          *      Check for any "DF" flag. [DF means do not fragment]
1151          */
1152 
1153         if (ntohs(iph->frag_off) & IP_DF)
1154         {
1155                 ip_statistics.IpFragFails++;
1156                 icmp_send(skb,ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, dev);
1157                 return;
1158         }
1159 
1160         /*
1161          *      The protocol doesn't seem to say what to do in the case that the
1162          *      frame + options doesn't fit the mtu. As it used to fall down dead
1163          *      in this case we were fortunate it didn't happen
1164          */
1165 
1166         if(mtu<8)
1167         {
1168                 /* It's wrong but its better than nothing */
1169                 icmp_send(skb,ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED,dev);
1170                 ip_statistics.IpFragFails++;
1171                 return;
1172         }
1173 
1174         /*
1175          *      Fragment the datagram.
1176          */
1177 
1178         /*
1179          *      The initial offset is 0 for a complete frame. When
1180          *      fragmenting fragments its wherever this one starts.
1181          */
1182 
1183         if (is_frag & 2)
1184                 offset = (ntohs(iph->frag_off) & 0x1fff) << 3;
1185         else
1186                 offset = 0;
1187 
1188 
1189         /*
1190          *      Keep copying data until we run out.
1191          */
1192 
1193         while(left > 0)
1194         {
1195                 len = left;
1196                 /* IF: it doesn't fit, use 'mtu' - the data space left */
1197                 if (len > mtu)
1198                         len = mtu;
1199                 /* IF: we are not sending upto and including the packet end
1200                    then align the next start on an eight byte boundary */
1201                 if (len < left)
1202                 {
1203                         len/=8;
1204                         len*=8;
1205                 }
1206                 /*
1207                  *      Allocate buffer.
1208                  */
1209 
1210                 if ((skb2 = alloc_skb(len + hlen,GFP_ATOMIC)) == NULL)
1211                 {
1212                         printk("IP: frag: no memory for new fragment!\n");
1213                         ip_statistics.IpFragFails++;
1214                         return;
1215                 }
1216 
1217                 /*
1218                  *      Set up data on packet
1219                  */
1220 
1221                 skb2->arp = skb->arp;
1222                 if(skb->free==0)
1223                         printk("IP fragmenter: BUG free!=1 in fragmenter\n");
1224                 skb2->free = 1;
1225                 skb2->len = len + hlen;
1226                 skb2->h.raw=(char *) skb2->data;
1227                 /*
1228                  *      Charge the memory for the fragment to any owner
1229                  *      it might possess
1230                  */
1231 
1232                 save_flags(flags);
1233                 if (sk)
1234                 {
1235                         cli();
1236                         sk->wmem_alloc += skb2->mem_len;
1237                         skb2->sk=sk;
1238                 }
1239                 restore_flags(flags);
1240                 skb2->raddr = skb->raddr;       /* For rebuild_header - must be here */
1241 
1242                 /*
1243                  *      Copy the packet header into the new buffer.
1244                  */
1245 
1246                 memcpy(skb2->h.raw, raw, hlen);
1247 
1248                 /*
1249                  *      Copy a block of the IP datagram.
1250                  */
1251                 memcpy(skb2->h.raw + hlen, ptr, len);
1252                 left -= len;
1253 
1254                 skb2->h.raw+=dev->hard_header_len;
1255 
1256                 /*
1257                  *      Fill in the new header fields.
1258                  */
1259                 iph = (struct iphdr *)(skb2->h.raw/*+dev->hard_header_len*/);
1260                 iph->frag_off = htons((offset >> 3));
1261                 /*
1262                  *      Added AC : If we are fragmenting a fragment thats not the
1263                  *                 last fragment then keep MF on each bit
1264                  */
1265                 if (left > 0 || (is_frag & 1))
1266                         iph->frag_off |= htons(IP_MF);
1267                 ptr += len;
1268                 offset += len;
1269 
1270                 /*
1271                  *      Put this fragment into the sending queue.
1272                  */
1273 
1274                 ip_statistics.IpFragCreates++;
1275 
1276                 ip_queue_xmit(sk, dev, skb2, 2);
1277         }
1278         ip_statistics.IpFragOKs++;
1279 }
1280 
1281 
1282 
1283 #ifdef CONFIG_IP_FORWARD
1284 
1285 /*
1286  *      Forward an IP datagram to its next destination.
1287  */
1288 
1289 static void ip_forward(struct sk_buff *skb, struct device *dev, int is_frag)
     /* [previous][next][first][last][top][bottom][index][help] */
1290 {
1291         struct device *dev2;    /* Output device */
1292         struct iphdr *iph;      /* Our header */
1293         struct sk_buff *skb2;   /* Output packet */
1294         struct rtable *rt;      /* Route we use */
1295         unsigned char *ptr;     /* Data pointer */
1296         unsigned long raddr;    /* Router IP address */
1297 
1298         /* 
1299          *      See if we are allowed to forward this.
1300          */
1301 
1302 #ifdef CONFIG_IP_FIREWALL
1303         if(!ip_fw_chk(skb->h.iph, ip_fw_fwd_chain))
1304         {
1305                 return;
1306         }
1307 #endif
1308         /*
1309          *      According to the RFC, we must first decrease the TTL field. If
1310          *      that reaches zero, we must reply an ICMP control message telling
1311          *      that the packet's lifetime expired.
1312          *
1313          *      Exception:
1314          *      We may not generate an ICMP for an ICMP. icmp_send does the
1315          *      enforcement of this so we can forget it here. It is however
1316          *      sometimes VERY important.
1317          */
1318 
1319         iph = skb->h.iph;
1320         iph->ttl--;
1321         if (iph->ttl <= 0)
1322         {
1323                 /* Tell the sender its packet died... */
1324                 icmp_send(skb, ICMP_TIME_EXCEEDED, ICMP_EXC_TTL, dev);
1325                 return;
1326         }
1327 
1328         /*
1329          *      Re-compute the IP header checksum.
1330          *      This is inefficient. We know what has happened to the header
1331          *      and could thus adjust the checksum as Phil Karn does in KA9Q
1332          */
1333 
1334         ip_send_check(iph);
1335 
1336         /*
1337          * OK, the packet is still valid.  Fetch its destination address,
1338          * and give it to the IP sender for further processing.
1339          */
1340 
1341         rt = ip_rt_route(iph->daddr, NULL, NULL);
1342         if (rt == NULL)
1343         {
1344                 /*
1345                  *      Tell the sender its packet cannot be delivered. Again
1346                  *      ICMP is screened later.
1347                  */
1348                 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_NET_UNREACH, dev);
1349                 return;
1350         }
1351 
1352 
1353         /*
1354          * Gosh.  Not only is the packet valid; we even know how to
1355          * forward it onto its final destination.  Can we say this
1356          * is being plain lucky?
1357          * If the router told us that there is no GW, use the dest.
1358          * IP address itself- we seem to be connected directly...
1359          */
1360 
1361         raddr = rt->rt_gateway;
1362 
1363         if (raddr != 0)
1364         {
1365                 /*
1366                  *      There is a gateway so find the correct route for it.
1367                  *      Gateways cannot in turn be gatewayed.
1368                  */
1369                 rt = ip_rt_route(raddr, NULL, NULL);
1370                 if (rt == NULL)
1371                 {
1372                         /*
1373                          *      Tell the sender its packet cannot be delivered...
1374                          */
1375                         icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, dev);
1376                         return;
1377                 }
1378                 if (rt->rt_gateway != 0)
1379                         raddr = rt->rt_gateway;
1380         }
1381         else
1382                 raddr = iph->daddr;
1383 
1384         /*
1385          *      Having picked a route we can now send the frame out.
1386          */
1387 
1388         dev2 = rt->rt_dev;
1389 
1390         /*
1391          *      In IP you never forward a frame on the interface that it arrived
1392          *      upon. We should generate an ICMP HOST REDIRECT giving the route
1393          *      we calculated.
1394          *      For now just dropping the packet is an acceptable compromise.
1395          */
1396 
1397         if (dev == dev2)
1398                 return;
1399 
1400         /*
1401          * We now allocate a new buffer, and copy the datagram into it.
1402          * If the indicated interface is up and running, kick it.
1403          */
1404 
1405         if (dev2->flags & IFF_UP)
1406         {
1407 
1408                 /*
1409                  *      Current design decrees we copy the packet. For identical header
1410                  *      lengths we could avoid it. The new skb code will let us push
1411                  *      data so the problem goes away then.
1412                  */
1413 
1414                 skb2 = alloc_skb(dev2->hard_header_len + skb->len, GFP_ATOMIC);
1415                 /*
1416                  *      This is rare and since IP is tolerant of network failures
1417                  *      quite harmless.
1418                  */
1419                 if (skb2 == NULL)
1420                 {
1421                         printk("\nIP: No memory available for IP forward\n");
1422                         return;
1423                 }
1424                 ptr = skb2->data;
1425                 skb2->free = 1;
1426                 skb2->len = skb->len + dev2->hard_header_len;
1427                 skb2->h.raw = ptr;
1428 
1429                 /*
1430                  *      Copy the packet data into the new buffer.
1431                  */
1432                 memcpy(ptr + dev2->hard_header_len, skb->h.raw, skb->len);
1433 
1434                 /* Now build the MAC header. */
1435                 (void) ip_send(skb2, raddr, skb->len, dev2, dev2->pa_addr);
1436 
1437                 ip_statistics.IpForwDatagrams++;
1438 
1439                 /*
1440                  *      See if it needs fragmenting. Note in ip_rcv we tagged
1441                  *      the fragment type. This must be right so that
1442                  *      the fragmenter does the right thing.
1443                  */
1444 
1445                 if(skb2->len > dev2->mtu + dev2->hard_header_len)
1446                 {
1447                         ip_fragment(NULL,skb2,dev2, is_frag);
1448                         kfree_skb(skb2,FREE_WRITE);
1449                 }
1450                 else
1451                 {
1452 #ifdef CONFIG_IP_ACCT           
1453                         /*
1454                          *      Count mapping we shortcut
1455                          */
1456                          
1457                         ip_acct_cnt(iph,ip_acct_chain,1);
1458 #endif                  
1459                         
1460                         /*
1461                          *      Map service types to priority. We lie about
1462                          *      throughput being low priority, but its a good
1463                          *      choice to help improve general usage.
1464                          */
1465                         if(iph->tos & IPTOS_LOWDELAY)
1466                                 dev_queue_xmit(skb2, dev2, SOPRI_INTERACTIVE);
1467                         else if(iph->tos & IPTOS_THROUGHPUT)
1468                                 dev_queue_xmit(skb2, dev2, SOPRI_BACKGROUND);
1469                         else
1470                                 dev_queue_xmit(skb2, dev2, SOPRI_NORMAL);
1471                 }
1472         }
1473 }
1474 
1475 
1476 #endif
1477 
1478 /*
1479  *      This function receives all incoming IP datagrams.
1480  */
1481 
1482 int ip_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *pt)
     /* [previous][next][first][last][top][bottom][index][help] */
1483 {
1484         struct iphdr *iph = skb->h.iph;
1485         struct sock *raw_sk=NULL;
1486         unsigned char hash;
1487         unsigned char flag = 0;
1488         unsigned char opts_p = 0;       /* Set iff the packet has options. */
1489         struct inet_protocol *ipprot;
1490         static struct options opt; /* since we don't use these yet, and they
1491                                 take up stack space. */
1492         int brd=IS_MYADDR;
1493         int is_frag=0;
1494 
1495         ip_statistics.IpInReceives++;
1496 
1497         /*
1498          *      Tag the ip header of this packet so we can find it
1499          */
1500 
1501         skb->ip_hdr = iph;
1502 
1503         /*
1504          *      Is the datagram acceptable?
1505          *
1506          *      1.      Length at least the size of an ip header
1507          *      2.      Version of 4
1508          *      3.      Checksums correctly. [Speed optimisation for later, skip loopback checksums]
1509          *      (4.     We ought to check for IP multicast addresses and undefined types.. does this matter ?)
1510          */
1511 
1512         if (skb->len<sizeof(struct iphdr) || iph->ihl<5 || iph->version != 4 || ip_fast_csum((unsigned char *)iph, iph->ihl) !=0)
1513         {
1514                 ip_statistics.IpInHdrErrors++;
1515                 kfree_skb(skb, FREE_WRITE);
1516                 return(0);
1517         }
1518         
1519         /*
1520          *      See if the firewall wants to dispose of the packet. 
1521          */
1522 
1523 #ifdef  CONFIG_IP_FIREWALL
1524         
1525         if(!LOOPBACK(iph->daddr) && !ip_fw_chk(iph,ip_fw_blk_chain))
1526         {
1527                 kfree_skb(skb, FREE_WRITE);
1528                 return 0;       
1529         }
1530 
1531 #endif
1532         
1533         /*
1534          *      Our transport medium may have padded the buffer out. Now we know it
1535          *      is IP we can trim to the true length of the frame.
1536          */
1537 
1538         skb->len=ntohs(iph->tot_len);
1539 
1540         /*
1541          *      Next analyse the packet for options. Studies show under one packet in
1542          *      a thousand have options....
1543          */
1544 
1545         if (iph->ihl != 5)
1546         {       /* Fast path for the typical optionless IP packet. */
1547                 memset((char *) &opt, 0, sizeof(opt));
1548                 if (do_options(iph, &opt) != 0)
1549                         return 0;
1550                 opts_p = 1;
1551         }
1552 
1553         /*
1554          *      Remember if the frame is fragmented.
1555          */
1556          
1557         if(iph->frag_off)
1558         {
1559                 if (iph->frag_off & 0x0020)
1560                         is_frag|=1;
1561                 /*
1562                  *      Last fragment ?
1563                  */
1564         
1565                 if (ntohs(iph->frag_off) & 0x1fff)
1566                         is_frag|=2;
1567         }
1568         
1569         /*
1570          *      Do any IP forwarding required.  chk_addr() is expensive -- avoid it someday.
1571          *
1572          *      This is inefficient. While finding out if it is for us we could also compute
1573          *      the routing table entry. This is where the great unified cache theory comes
1574          *      in as and when someone implements it
1575          *
1576          *      For most hosts over 99% of packets match the first conditional
1577          *      and don't go via ip_chk_addr. Note: brd is set to IS_MYADDR at
1578          *      function entry.
1579          */
1580 
1581         if ( iph->daddr != skb->dev->pa_addr && (brd = ip_chk_addr(iph->daddr)) == 0)
1582         {
1583                 /*
1584                  *      Don't forward multicast or broadcast frames.
1585                  */
1586 
1587                 if(skb->pkt_type!=PACKET_HOST || brd==IS_BROADCAST)
1588                 {
1589                         kfree_skb(skb,FREE_WRITE);
1590                         return 0;
1591                 }
1592 
1593                 /*
1594                  *      The packet is for another target. Forward the frame
1595                  */
1596 
1597 #ifdef CONFIG_IP_FORWARD
1598                 ip_forward(skb, dev, is_frag);
1599 #else
1600 /*              printk("Machine %lx tried to use us as a forwarder to %lx but we have forwarding disabled!\n",
1601                         iph->saddr,iph->daddr);*/
1602                 ip_statistics.IpInAddrErrors++;
1603 #endif
1604                 /*
1605                  *      The forwarder is inefficient and copies the packet. We
1606                  *      free the original now.
1607                  */
1608 
1609                 kfree_skb(skb, FREE_WRITE);
1610                 return(0);
1611         }
1612         
1613 #ifdef CONFIG_IP_MULTICAST      
1614 
1615         if(brd==IS_MULTICAST && iph->daddr!=IGMP_ALL_HOSTS && !(dev->flags&IFF_LOOPBACK))
1616         {
1617                 /*
1618                  *      Check it is for one of our groups
1619                  */
1620                 struct ip_mc_list *ip_mc=dev->ip_mc_list;
1621                 do
1622                 {
1623                         if(ip_mc==NULL)
1624                         {       
1625                                 kfree_skb(skb, FREE_WRITE);
1626                                 return 0;
1627                         }
1628                         if(ip_mc->multiaddr==iph->daddr)
1629                                 break;
1630                         ip_mc=ip_mc->next;
1631                 }
1632                 while(1);
1633         }
1634 #endif
1635         /*
1636          *      Account for the packet
1637          */
1638          
1639 #ifdef CONFIG_IP_ACCT
1640         ip_acct_cnt(iph,ip_acct_chain,1);
1641 #endif  
1642 
1643         /*
1644          * Reassemble IP fragments.
1645          */
1646 
1647         if(is_frag)
1648         {
1649                 /* Defragment. Obtain the complete packet if there is one */
1650                 skb=ip_defrag(iph,skb,dev);
1651                 if(skb==NULL)
1652                         return 0;
1653                 skb->dev = dev;
1654                 iph=skb->h.iph;
1655         }
1656         
1657                  
1658 
1659         /*
1660          *      Point into the IP datagram, just past the header.
1661          */
1662 
1663         skb->ip_hdr = iph;
1664         skb->h.raw += iph->ihl*4;
1665         
1666         /*
1667          *      Deliver to raw sockets. This is fun as to avoid copies we want to make no surplus copies.
1668          */
1669          
1670         hash = iph->protocol & (SOCK_ARRAY_SIZE-1);
1671         
1672         /* If there maybe a raw socket we must check - if not we don't care less */
1673         if((raw_sk=raw_prot.sock_array[hash])!=NULL)
1674         {
1675                 struct sock *sknext=NULL;
1676                 struct sk_buff *skb1;
1677                 raw_sk=get_sock_raw(raw_sk, hash,  iph->saddr, iph->daddr);
1678                 if(raw_sk)      /* Any raw sockets */
1679                 {
1680                         do
1681                         {
1682                                 /* Find the next */
1683                                 sknext=get_sock_raw(raw_sk->next, hash, iph->saddr, iph->daddr);
1684                                 if(sknext)
1685                                         skb1=skb_clone(skb, GFP_ATOMIC);
1686                                 else
1687                                         break;  /* One pending raw socket left */
1688                                 if(skb1)
1689                                         raw_rcv(raw_sk, skb1, dev, iph->saddr,iph->daddr);
1690                                 raw_sk=sknext;
1691                         }
1692                         while(raw_sk!=NULL);
1693                         /* Here either raw_sk is the last raw socket, or NULL if none */
1694                         /* We deliver to the last raw socket AFTER the protocol checks as it avoids a surplus copy */
1695                 }
1696         }
1697         
1698         /*
1699          *      skb->h.raw now points at the protocol beyond the IP header.
1700          */
1701 
1702         hash = iph->protocol & (MAX_INET_PROTOS -1);
1703         for (ipprot = (struct inet_protocol *)inet_protos[hash];ipprot != NULL;ipprot=(struct inet_protocol *)ipprot->next)
1704         {
1705                 struct sk_buff *skb2;
1706 
1707                 if (ipprot->protocol != iph->protocol)
1708                         continue;
1709        /*
1710         *       See if we need to make a copy of it.  This will
1711         *       only be set if more than one protocol wants it.
1712         *       and then not for the last one. If there is a pending
1713         *       raw delivery wait for that
1714         */
1715                 if (ipprot->copy || raw_sk)
1716                 {
1717                         skb2 = skb_clone(skb, GFP_ATOMIC);
1718                         if(skb2==NULL)
1719                                 continue;
1720                 }
1721                 else
1722                 {
1723                         skb2 = skb;
1724                 }
1725                 flag = 1;
1726 
1727                /*
1728                 * Pass on the datagram to each protocol that wants it,
1729                 * based on the datagram protocol.  We should really
1730                 * check the protocol handler's return values here...
1731                 */
1732                 ipprot->handler(skb2, dev, opts_p ? &opt : 0, iph->daddr,
1733                                 (ntohs(iph->tot_len) - (iph->ihl * 4)),
1734                                 iph->saddr, 0, ipprot);
1735 
1736         }
1737 
1738         /*
1739          * All protocols checked.
1740          * If this packet was a broadcast, we may *not* reply to it, since that
1741          * causes (proven, grin) ARP storms and a leakage of memory (i.e. all
1742          * ICMP reply messages get queued up for transmission...)
1743          */
1744 
1745         if(raw_sk!=NULL)        /* Shift to last raw user */
1746                 raw_rcv(raw_sk, skb, dev, iph->saddr, iph->daddr);
1747         else if (!flag)         /* Free and report errors */
1748         {
1749                 if (brd != IS_BROADCAST && brd!=IS_MULTICAST)
1750                         icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PROT_UNREACH, dev);
1751                 kfree_skb(skb, FREE_WRITE);
1752         }
1753 
1754         return(0);
1755 }
1756 
1757 /*
1758  *      Loop a packet back to the sender.
1759  */
1760  
1761 static void ip_loopback(struct device *old_dev, struct sk_buff *skb)
     /* [previous][next][first][last][top][bottom][index][help] */
1762 {
1763         extern struct device loopback_dev;
1764         struct device *dev=&loopback_dev;
1765         int len=skb->len-old_dev->hard_header_len;
1766         struct sk_buff *newskb=alloc_skb(len+dev->hard_header_len, GFP_ATOMIC);
1767         
1768         if(newskb==NULL)
1769                 return;
1770                 
1771         newskb->link3=NULL;
1772         newskb->sk=NULL;
1773         newskb->dev=dev;
1774         newskb->saddr=skb->saddr;
1775         newskb->daddr=skb->daddr;
1776         newskb->raddr=skb->raddr;
1777         newskb->free=1;
1778         newskb->lock=0;
1779         newskb->users=0;
1780         newskb->pkt_type=skb->pkt_type;
1781         newskb->len=len+dev->hard_header_len;
1782         
1783         
1784         newskb->ip_hdr=(struct iphdr *)(newskb->data+ip_send(newskb, skb->ip_hdr->daddr, len, dev, skb->ip_hdr->saddr));
1785         memcpy(newskb->ip_hdr,skb->ip_hdr,len);
1786 
1787         /* Recurse. The device check against IFF_LOOPBACK will stop infinite recursion */
1788                 
1789         /*printk("Loopback output queued [%lX to %lX].\n", newskb->ip_hdr->saddr,newskb->ip_hdr->daddr);*/
1790         ip_queue_xmit(NULL, dev, newskb, 1);
1791 }
1792 
1793 
1794 /*
1795  * Queues a packet to be sent, and starts the transmitter
1796  * if necessary.  if free = 1 then we free the block after
1797  * transmit, otherwise we don't. If free==2 we not only
1798  * free the block but also don't assign a new ip seq number.
1799  * This routine also needs to put in the total length,
1800  * and compute the checksum
1801  */
1802 
1803 void ip_queue_xmit(struct sock *sk, struct device *dev,
     /* [previous][next][first][last][top][bottom][index][help] */
1804               struct sk_buff *skb, int free)
1805 {
1806         struct iphdr *iph;
1807         unsigned char *ptr;
1808 
1809         /* Sanity check */
1810         if (dev == NULL)
1811         {
1812                 printk("IP: ip_queue_xmit dev = NULL\n");
1813                 return;
1814         }
1815 
1816         IS_SKB(skb);
1817 
1818         /*
1819          *      Do some book-keeping in the packet for later
1820          */
1821 
1822 
1823         skb->dev = dev;
1824         skb->when = jiffies;
1825 
1826         /*
1827          *      Find the IP header and set the length. This is bad
1828          *      but once we get the skb data handling code in the
1829          *      hardware will push its header sensibly and we will
1830          *      set skb->ip_hdr to avoid this mess and the fixed
1831          *      header length problem
1832          */
1833 
1834         ptr = skb->data;
1835         ptr += dev->hard_header_len;
1836         iph = (struct iphdr *)ptr;
1837         skb->ip_hdr = iph;
1838         iph->tot_len = ntohs(skb->len-dev->hard_header_len);
1839 
1840         /*
1841          *      No reassigning numbers to fragments...
1842          */
1843 
1844         if(free!=2)
1845                 iph->id      = htons(ip_id_count++);
1846         else
1847                 free=1;
1848 
1849         /* All buffers without an owner socket get freed */
1850         if (sk == NULL)
1851                 free = 1;
1852 
1853         skb->free = free;
1854 
1855         /*
1856          *      Do we need to fragment. Again this is inefficient.
1857          *      We need to somehow lock the original buffer and use
1858          *      bits of it.
1859          */
1860 
1861         if(skb->len > dev->mtu + dev->hard_header_len)
1862         {
1863                 ip_fragment(sk,skb,dev,0);
1864                 IS_SKB(skb);
1865                 kfree_skb(skb,FREE_WRITE);
1866                 return;
1867         }
1868 
1869         /*
1870          *      Add an IP checksum
1871          */
1872 
1873         ip_send_check(iph);
1874 
1875         /*
1876          *      Print the frame when debugging
1877          */
1878 
1879         /*
1880          *      More debugging. You cannot queue a packet already on a list
1881          *      Spot this and moan loudly.
1882          */
1883         if (skb->next != NULL)
1884         {
1885                 printk("ip_queue_xmit: next != NULL\n");
1886                 skb_unlink(skb);
1887         }
1888 
1889         /*
1890          *      If a sender wishes the packet to remain unfreed
1891          *      we add it to his send queue. This arguably belongs
1892          *      in the TCP level since nobody else uses it. BUT
1893          *      remember IPng might change all the rules.
1894          */
1895 
1896         if (!free)
1897         {
1898                 unsigned long flags;
1899                 /* The socket now has more outstanding blocks */
1900 
1901                 sk->packets_out++;
1902 
1903                 /* Protect the list for a moment */
1904                 save_flags(flags);
1905                 cli();
1906 
1907                 if (skb->link3 != NULL)
1908                 {
1909                         printk("ip.c: link3 != NULL\n");
1910                         skb->link3 = NULL;
1911                 }
1912                 if (sk->send_head == NULL)
1913                 {
1914                         sk->send_tail = skb;
1915                         sk->send_head = skb;
1916                 }
1917                 else
1918                 {
1919                         sk->send_tail->link3 = skb;
1920                         sk->send_tail = skb;
1921                 }
1922                 /* skb->link3 is NULL */
1923 
1924                 /* Interrupt restore */
1925                 restore_flags(flags);
1926         }
1927         else
1928                 /* Remember who owns the buffer */
1929                 skb->sk = sk;
1930 
1931         /*
1932          *      If the indicated interface is up and running, send the packet.
1933          */
1934          
1935         ip_statistics.IpOutRequests++;
1936 #ifdef CONFIG_IP_ACCT
1937         ip_acct_cnt(iph,ip_acct_chain,1);
1938 #endif  
1939         
1940 #ifdef CONFIG_IP_MULTICAST      
1941 
1942         /*
1943          *      Multicasts are looped back for other local users
1944          */
1945          
1946         if (MULTICAST(iph->daddr) && !(dev->flags&IFF_LOOPBACK))
1947         {
1948                 if(sk==NULL || sk->ip_mc_loop)
1949                 {
1950                         if(iph->daddr==IGMP_ALL_HOSTS)
1951                                 ip_loopback(dev,skb);
1952                         else
1953                         {
1954                                 struct ip_mc_list *imc=dev->ip_mc_list;
1955                                 while(imc!=NULL)
1956                                 {
1957                                         if(imc->multiaddr==iph->daddr)
1958                                         {
1959                                                 ip_loopback(dev,skb);
1960                                                 break;
1961                                         }
1962                                         imc=imc->next;
1963                                 }
1964                         }
1965                 }
1966                 /* Multicasts with ttl 0 must not go beyond the host */
1967                 
1968                 if(skb->ip_hdr->ttl==0)
1969                 {
1970                         kfree_skb(skb, FREE_READ);
1971                         return;
1972                 }
1973         }
1974 #endif
1975         if((dev->flags&IFF_BROADCAST) && iph->daddr==dev->pa_brdaddr && !(dev->flags&IFF_LOOPBACK))
1976                 ip_loopback(dev,skb);
1977                 
1978         if (dev->flags & IFF_UP)
1979         {
1980                 /*
1981                  *      If we have an owner use its priority setting,
1982                  *      otherwise use NORMAL
1983                  */
1984 
1985                 if (sk != NULL)
1986                 {
1987                         dev_queue_xmit(skb, dev, sk->priority);
1988                 }
1989                 else
1990                 {
1991                         dev_queue_xmit(skb, dev, SOPRI_NORMAL);
1992                 }
1993         }
1994         else
1995         {
1996                 ip_statistics.IpOutDiscards++;
1997                 if (free)
1998                         kfree_skb(skb, FREE_WRITE);
1999         }
2000 }
2001 
2002 
2003 
2004 #ifdef CONFIG_IP_MULTICAST
2005 
2006 /*
2007  *      Write an multicast group list table for the IGMP daemon to
2008  *      read.
2009  */
2010  
2011 int ip_mc_procinfo(char *buffer, char **start, off_t offset, int length)
     /* [previous][next][first][last][top][bottom][index][help] */
2012 {
2013         off_t pos=0, begin=0;
2014         struct ip_mc_list *im;
2015         unsigned long flags;
2016         int len=0;
2017         struct device *dev;
2018         
2019         len=sprintf(buffer,"Device    : Count\tGroup    Users Timer\n");  
2020         save_flags(flags);
2021         cli();
2022         
2023         for(dev = dev_base; dev; dev = dev->next)
2024         {
2025                 if((dev->flags&IFF_UP)&&(dev->flags&IFF_MULTICAST))
2026                 {
2027                         len+=sprintf(buffer+len,"%-10s: %5d\n",
2028                                         dev->name, dev->mc_count);
2029                         for(im = dev->ip_mc_list; im; im = im->next)
2030                         {
2031                                 len+=sprintf(buffer+len,
2032                                         "\t\t\t%08lX %5d %d:%08lX\n",
2033                                         im->multiaddr, im->users,
2034                                         im->tm_running, im->timer.expires);
2035                                 pos=begin+len;
2036                                 if(pos<offset)
2037                                 {
2038                                         len=0;
2039                                         begin=pos;
2040                                 }
2041                                 if(pos>offset+length)
2042                                         break;
2043                         }
2044                 }
2045         }
2046         restore_flags(flags);
2047         *start=buffer+(offset-begin);
2048         len-=(offset-begin);
2049         if(len>length)
2050                 len=length;     
2051         return len;
2052 }
2053 
2054 
2055 #endif  
2056 /*
2057  *      Socket option code for IP. This is the end of the line after any TCP,UDP etc options on
2058  *      an IP socket.
2059  *
2060  *      We implement IP_TOS (type of service), IP_TTL (time to live).
2061  *
2062  *      Next release we will sort out IP_OPTIONS since for some people are kind of important.
2063  */
2064 
2065 int ip_setsockopt(struct sock *sk, int level, int optname, char *optval, int optlen)
     /* [previous][next][first][last][top][bottom][index][help] */
2066 {
2067         int val,err;
2068 #if defined(CONFIG_IP_FIREWALL) || defined(CONFIG_IP_ACCT)
2069         struct ip_fw tmp_fw;
2070 #endif  
2071         if (optval == NULL)
2072                 return(-EINVAL);
2073 
2074         err=verify_area(VERIFY_READ, optval, sizeof(int));
2075         if(err)
2076                 return err;
2077 
2078         val = get_fs_long((unsigned long *)optval);
2079 
2080         if(level!=SOL_IP)
2081                 return -EOPNOTSUPP;
2082 
2083 #ifdef CONFIG_IP_MULTICAST
2084         if(optname==IP_MULTICAST_TTL)
2085         {
2086                 unsigned char ucval;
2087                 ucval=get_fs_byte((unsigned char *)optval);
2088                 printk("MC TTL %d\n", ucval);
2089                 if(ucval<1||ucval>255)
2090                         return -EINVAL;
2091                 sk->ip_mc_ttl=(int)ucval;
2092                 return 0;
2093         }
2094 #endif
2095 
2096         switch(optname)
2097         {
2098                 case IP_TOS:
2099                         if(val<0||val>255)
2100                                 return -EINVAL;
2101                         sk->ip_tos=val;
2102                         if(val==IPTOS_LOWDELAY)
2103                                 sk->priority=SOPRI_INTERACTIVE;
2104                         if(val==IPTOS_THROUGHPUT)
2105                                 sk->priority=SOPRI_BACKGROUND;
2106                         return 0;
2107                 case IP_TTL:
2108                         if(val<1||val>255)
2109                                 return -EINVAL;
2110                         sk->ip_ttl=val;
2111                         return 0;
2112 #ifdef CONFIG_IP_MULTICAST
2113 #ifdef GCC_WORKS
2114                 case IP_MULTICAST_TTL: 
2115                 {
2116                         unsigned char ucval;
2117 
2118                         ucval=get_fs_byte((unsigned char *)optval);
2119                         printk("MC TTL %d\n", ucval);
2120                         if(ucval<1||ucval>255)
2121                                 return -EINVAL;
2122                         sk->ip_mc_ttl=(int)ucval;
2123                         return 0;
2124                 }
2125 #endif
2126                 case IP_MULTICAST_LOOP: 
2127                 {
2128                         unsigned char ucval;
2129 
2130                         ucval=get_fs_byte((unsigned char *)optval);
2131                         if(ucval!=0 && ucval!=1)
2132                                  return -EINVAL;
2133                         sk->ip_mc_loop=(int)ucval;
2134                         return 0;
2135                 }
2136                 case IP_MULTICAST_IF: 
2137                 {
2138                         /* Not fully tested */
2139                         struct in_addr addr;
2140                         struct device *dev=NULL;
2141                         
2142                         /*
2143                          *      Check the arguments are allowable
2144                          */
2145 
2146                         err=verify_area(VERIFY_READ, optval, sizeof(addr));
2147                         if(err)
2148                                 return err;
2149                                 
2150                         memcpy_fromfs(&addr,optval,sizeof(addr));
2151                         
2152                         printk("MC bind %s\n", in_ntoa(addr.s_addr));
2153                         
2154                         /*
2155                          *      What address has been requested
2156                          */
2157                         
2158                         if(addr.s_addr==INADDR_ANY)     /* Default */
2159                         {
2160                                 sk->ip_mc_name[0]=0;
2161                                 return 0;
2162                         }
2163                         
2164                         /*
2165                          *      Find the device
2166                          */
2167                          
2168                         for(dev = dev_base; dev; dev = dev->next)
2169                         {
2170                                 if((dev->flags&IFF_UP)&&(dev->flags&IFF_MULTICAST)&&
2171                                         (dev->pa_addr==addr.s_addr))
2172                                         break;
2173                         }
2174                         
2175                         /*
2176                          *      Did we find one
2177                          */
2178                          
2179                         if(dev) 
2180                         {
2181                                 strcpy(sk->ip_mc_name,dev->name);
2182                                 return 0;
2183                         }
2184                         return -EADDRNOTAVAIL;
2185                 }
2186                 
2187                 case IP_ADD_MEMBERSHIP: 
2188                 {
2189                 
2190 /*
2191  *      FIXME: Add/Del membership should have a semaphore protecting them from re-entry
2192  */
2193                         struct ip_mreq mreq;
2194                         static struct options optmem;
2195                         unsigned long route_src;
2196                         struct rtable *rt;
2197                         struct device *dev=NULL;
2198                         
2199                         /*
2200                          *      Check the arguments.
2201                          */
2202 
2203                         err=verify_area(VERIFY_READ, optval, sizeof(mreq));
2204                         if(err)
2205                                 return err;
2206 
2207                         memcpy_fromfs(&mreq,optval,sizeof(mreq));
2208 
2209                         /* 
2210                          *      Get device for use later
2211                          */
2212 
2213                         if(mreq.imr_interface.s_addr==INADDR_ANY) 
2214                         {
2215                                 /*
2216                                  *      Not set so scan.
2217                                  */
2218                                 if((rt=ip_rt_route(mreq.imr_multiaddr.s_addr,&optmem, &route_src))!=NULL)
2219                                 {
2220                                         dev=rt->rt_dev;
2221                                         rt->rt_use--;
2222                                 }
2223                         }
2224                         else
2225                         {
2226                                 /*
2227                                  *      Find a suitable device.
2228                                  */
2229                                 for(dev = dev_base; dev; dev = dev->next)
2230                                 {
2231                                         if((dev->flags&IFF_UP)&&(dev->flags&IFF_MULTICAST)&&
2232                                                 (dev->pa_addr==mreq.imr_interface.s_addr))
2233                                                 break;
2234                                 }
2235                         }
2236                         
2237                         /*
2238                          *      No device, no cookies.
2239                          */
2240                          
2241                         if(!dev)
2242                                 return -ENODEV;
2243                                 
2244                         /*
2245                          *      Join group.
2246                          */
2247                          
2248                         return ip_mc_join_group(sk,dev,mreq.imr_multiaddr.s_addr);
2249                 }
2250                 
2251                 case IP_DROP_MEMBERSHIP: 
2252                 {
2253                         struct ip_mreq mreq;
2254                         struct rtable *rt;
2255                         static struct options optmem;
2256                         unsigned long route_src;
2257                         struct device *dev=NULL;
2258 
2259                         /*
2260                          *      Check the arguments
2261                          */
2262                          
2263                         err=verify_area(VERIFY_READ, optval, sizeof(mreq));
2264                         if(err)
2265                                 return err;
2266 
2267                         memcpy_fromfs(&mreq,optval,sizeof(mreq));
2268 
2269                         /*
2270                          *      Get device for use later 
2271                          */
2272  
2273                         if(mreq.imr_interface.s_addr==INADDR_ANY) 
2274                         {
2275                                 if((rt=ip_rt_route(mreq.imr_multiaddr.s_addr,&optmem, &route_src))!=NULL)
2276                                 {
2277                                         dev=rt->rt_dev;
2278                                         rt->rt_use--;
2279                                 }
2280                         }
2281                         else 
2282                         {
2283                                 for(dev = dev_base; dev; dev = dev->next)
2284                                 {
2285                                         if((dev->flags&IFF_UP)&& (dev->flags&IFF_MULTICAST)&&
2286                                                         (dev->pa_addr==mreq.imr_interface.s_addr))
2287                                                 break;
2288                                 }
2289                         }
2290                         
2291                         /*
2292                          *      Did we find a suitable device.
2293                          */
2294                          
2295                         if(!dev)
2296                                 return -ENODEV;
2297                                 
2298                         /*
2299                          *      Leave group
2300                          */
2301                          
2302                         return ip_mc_leave_group(sk,dev,mreq.imr_multiaddr.s_addr);
2303                 }
2304 #endif                  
2305 #ifdef CONFIG_IP_FIREWALL
2306                 case IP_FW_ADD_BLK:
2307                 case IP_FW_DEL_BLK:
2308                 case IP_FW_ADD_FWD:
2309                 case IP_FW_DEL_FWD:
2310                 case IP_FW_CHK_BLK:
2311                 case IP_FW_CHK_FWD:
2312                 case IP_FW_FLUSH:
2313                 case IP_FW_POLICY:
2314                         if(!suser())
2315                                 return -EPERM;
2316                         if(optlen>sizeof(tmp_fw) || optlen<1)
2317                                 return -EINVAL;
2318                         err=verify_area(VERIFY_READ,optval,optlen);
2319                         if(err)
2320                                 return err;
2321                         memcpy_fromfs(&tmp_fw,optval,optlen);
2322                         err=ip_fw_ctl(optname, &tmp_fw,optlen);
2323                         return -err;    /* -0 is 0 after all */
2324                         
2325 #endif
2326 #ifdef CONFIG_IP_ACCT
2327                 case IP_ACCT_DEL:
2328                 case IP_ACCT_ADD:
2329                 case IP_ACCT_FLUSH:
2330                 case IP_ACCT_ZERO:
2331                         if(!suser())
2332                                 return -EPERM;
2333                         if(optlen>sizeof(tmp_fw) || optlen<1)
2334                                 return -EINVAL;
2335                         err=verify_area(VERIFY_READ,optval,optlen);
2336                         if(err)
2337                                 return err;
2338                         memcpy_fromfs(&tmp_fw, optval,optlen);
2339                         err=ip_acct_ctl(optname, &tmp_fw,optlen);
2340                         return -err;    /* -0 is 0 after all */
2341 #endif
2342                 /* IP_OPTIONS and friends go here eventually */
2343                 default:
2344                         return(-ENOPROTOOPT);
2345         }
2346 }
2347 
2348 /*
2349  *      Get the options. Note for future reference. The GET of IP options gets the
2350  *      _received_ ones. The set sets the _sent_ ones.
2351  */
2352 
2353 int ip_getsockopt(struct sock *sk, int level, int optname, char *optval, int *optlen)
     /* [previous][next][first][last][top][bottom][index][help] */
2354 {
2355         int val,err;
2356 #ifdef CONFIG_IP_MULTICAST
2357         int len;
2358 #endif
2359         
2360         if(level!=SOL_IP)
2361                 return -EOPNOTSUPP;
2362 
2363         switch(optname)
2364         {
2365                 case IP_TOS:
2366                         val=sk->ip_tos;
2367                         break;
2368                 case IP_TTL:
2369                         val=sk->ip_ttl;
2370                         break;
2371 #ifdef CONFIG_IP_MULTICAST                      
2372                 case IP_MULTICAST_TTL:
2373                         val=sk->ip_mc_ttl;
2374                         break;
2375                 case IP_MULTICAST_LOOP:
2376                         val=sk->ip_mc_loop;
2377                         break;
2378                 case IP_MULTICAST_IF:
2379                         err=verify_area(VERIFY_WRITE, optlen, sizeof(int));
2380                         if(err)
2381                                 return err;
2382                         len=strlen(sk->ip_mc_name);
2383                         err=verify_area(VERIFY_WRITE, optval, len);
2384                         if(err)
2385                                 return err;
2386                         put_fs_long(len,(unsigned long *) optlen);
2387                         memcpy_tofs((void *)optval,sk->ip_mc_name, len);
2388                         return 0;
2389 #endif
2390                 default:
2391                         return(-ENOPROTOOPT);
2392         }
2393         err=verify_area(VERIFY_WRITE, optlen, sizeof(int));
2394         if(err)
2395                 return err;
2396         put_fs_long(sizeof(int),(unsigned long *) optlen);
2397 
2398         err=verify_area(VERIFY_WRITE, optval, sizeof(int));
2399         if(err)
2400                 return err;
2401         put_fs_long(val,(unsigned long *)optval);
2402 
2403         return(0);
2404 }
2405 
2406 /*
2407  *      IP protocol layer initialiser
2408  */
2409 
2410 static struct packet_type ip_packet_type =
2411 {
2412         0,      /* MUTTER ntohs(ETH_P_IP),*/
2413         NULL,   /* All devices */
2414         ip_rcv,
2415         NULL,
2416         NULL,
2417 };
2418 
2419 /*
2420  *      Device notifier
2421  */
2422  
2423 static int ip_rt_event(unsigned long event, void *ptr)
     /* [previous][next][first][last][top][bottom][index][help] */
2424 {
2425         if(event==NETDEV_DOWN)
2426                 ip_rt_flush(ptr);
2427         return NOTIFY_DONE;
2428 }
2429 
2430 struct notifier_block ip_rt_notifier={
2431         ip_rt_event,
2432         NULL,
2433         0
2434 };
2435 
2436 /*
2437  *      IP registers the packet type and then calls the subprotocol initialisers
2438  */
2439 
2440 void ip_init(void)
     /* [previous][next][first][last][top][bottom][index][help] */
2441 {
2442         ip_packet_type.type=htons(ETH_P_IP);
2443         dev_add_pack(&ip_packet_type);
2444 
2445         /* So we flush routes when a device is downed */        
2446         register_netdevice_notifier(&ip_rt_notifier);
2447 /*      ip_raw_init();
2448         ip_packet_init();
2449         ip_tcp_init();
2450         ip_udp_init();*/
2451 }

/* [previous][next][first][last][top][bottom][index][help] */