root/net/ipv4/arp.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. arp_fast_lock
  2. arp_unlock
  3. arp_enqueue
  4. arp_dequeue
  5. arp_purge_send_q
  6. arp_free_entry
  7. arp_count_hhs
  8. arp_update_hhs
  9. arp_invalidate_hhs
  10. arp_set_hh
  11. arp_alloc_hh
  12. empty
  13. arpd_send
  14. arpd_update
  15. arpd_lookup
  16. arpd_flush
  17. arpd_callback
  18. arpd_update
  19. arp_force_expire
  20. arp_check_expire
  21. arp_expire_request
  22. arp_alloc_entry
  23. arp_device_event
  24. arp_send_q
  25. arp_update
  26. arp_lookup
  27. arp_query
  28. arp_set_predefined
  29. arp_new_entry
  30. arp_find
  31. arp_bind_cache
  32. arp_run_bh
  33. arp_send
  34. arp_rcv
  35. arp_req_set
  36. arp_req_get
  37. arp_req_delete
  38. arp_ioctl
  39. arp_get_info
  40. arp_init

   1 /* linux/net/inet/arp.c
   2  *
   3  * Copyright (C) 1994 by Florian  La Roche
   4  *
   5  * This module implements the Address Resolution Protocol ARP (RFC 826),
   6  * which is used to convert IP addresses (or in the future maybe other
   7  * high-level addresses into a low-level hardware address (like an Ethernet
   8  * address).
   9  *
  10  * FIXME:
  11  *      Experiment with better retransmit timers
  12  *      Clean up the timer deletions
  13  *      If you create a proxy entry set your interface address to the address
  14  *      and then delete it, proxies may get out of sync with reality - check this
  15  *
  16  * This program is free software; you can redistribute it and/or
  17  * modify it under the terms of the GNU General Public License
  18  * as published by the Free Software Foundation; either version
  19  * 2 of the License, or (at your option) any later version.
  20  *
  21  * Fixes:
  22  *              Alan Cox        :       Removed the ethernet assumptions in Florian's code
  23  *              Alan Cox        :       Fixed some small errors in the ARP logic
  24  *              Alan Cox        :       Allow >4K in /proc
  25  *              Alan Cox        :       Make ARP add its own protocol entry
  26  *
  27  *              Ross Martin     :       Rewrote arp_rcv() and arp_get_info()
  28  *              Stephen Henson  :       Add AX25 support to arp_get_info()
  29  *              Alan Cox        :       Drop data when a device is downed.
  30  *              Alan Cox        :       Use init_timer().
  31  *              Alan Cox        :       Double lock fixes.
  32  *              Martin Seine    :       Move the arphdr structure
  33  *                                      to if_arp.h for compatibility.
  34  *                                      with BSD based programs.
  35  *              Andrew Tridgell :       Added ARP netmask code and
  36  *                                      re-arranged proxy handling.
  37  *              Alan Cox        :       Changed to use notifiers.
  38  *              Niibe Yutaka    :       Reply for this device or proxies only.
  39  *              Alan Cox        :       Don't proxy across hardware types!
  40  *              Jonathan Naylor :       Added support for NET/ROM.
  41  *              Mike Shaver     :       RFC1122 checks.
  42  *              Jonathan Naylor :       Only lookup the hardware address for
  43  *                                      the correct hardware type.
  44  *              Germano Caronni :       Assorted subtle races.
  45  *              Craig Schlenter :       Don't modify permanent entry 
  46  *                                      during arp_rcv.
  47  *              Russ Nelson     :       Tidied up a few bits.
  48  *              Alexey Kuznetsov:       Major changes to caching and behaviour,
  49  *                                      eg intelligent arp probing and generation
  50  *                                      of host down events.
  51  *              Alan Cox        :       Missing unlock in device events.
  52  *              Eckes           :       ARP ioctl control errors.
  53  *              Alexey Kuznetsov:       Arp free fix.
  54  *              Manuel Rodriguez:       Gratuitous ARP.
  55  *              Jonathan Layes  :       Added arpd support through kerneld 
  56  *                                      message queue (960314)
  57  */
  58 
  59 /* RFC1122 Status:
  60    2.3.2.1 (ARP Cache Validation):
  61      MUST provide mechanism to flush stale cache entries (OK)
  62      SHOULD be able to configure cache timeout (NOT YET)
  63      MUST throttle ARP retransmits (OK)
  64    2.3.2.2 (ARP Packet Queue):
  65      SHOULD save at least one packet from each "conversation" with an
  66        unresolved IP address.  (OK)
  67    950727 -- MS
  68 */
  69       
  70 #include <linux/types.h>
  71 #include <linux/string.h>
  72 #include <linux/kernel.h>
  73 #include <linux/sched.h>
  74 #include <linux/config.h>
  75 #include <linux/socket.h>
  76 #include <linux/sockios.h>
  77 #include <linux/errno.h>
  78 #include <linux/in.h>
  79 #include <linux/mm.h>
  80 #include <linux/inet.h>
  81 #include <linux/netdevice.h>
  82 #include <linux/etherdevice.h>
  83 #include <linux/if_arp.h>
  84 #include <linux/trdevice.h>
  85 #include <linux/skbuff.h>
  86 #include <linux/proc_fs.h>
  87 #include <linux/stat.h>
  88 
  89 #include <net/ip.h>
  90 #include <net/icmp.h>
  91 #include <net/route.h>
  92 #include <net/protocol.h>
  93 #include <net/tcp.h>
  94 #include <net/sock.h>
  95 #include <net/arp.h>
  96 #ifdef CONFIG_AX25
  97 #include <net/ax25.h>
  98 #ifdef CONFIG_NETROM
  99 #include <net/netrom.h>
 100 #endif
 101 #endif
 102 #ifdef CONFIG_NET_ALIAS
 103 #include <linux/net_alias.h>
 104 #endif
 105 #ifdef CONFIG_ARPD
 106 #include <net/netlink.h>
 107 #endif
 108 
 109 #include <asm/system.h>
 110 #include <asm/segment.h>
 111 
 112 #include <stdarg.h>
 113 
 114 /*
 115  *      Configurable Parameters
 116  */
 117 
 118 /*
 119  *      After that time, an unused entry is deleted from the arp table.
 120  *      RFC1122 recommends set it to 60*HZ, if your site uses proxy arp
 121  *      and dynamic routing.
 122  */
 123 
 124 #ifndef CONFIG_ARPD
 125 #define ARP_TIMEOUT             (600*HZ)
 126 #else
 127 #define ARP_TIMEOUT             (60*HZ)
 128 #define ARPD_TIMEOUT            (600*HZ)
 129 #endif
 130 
 131 /*
 132  *      How often is ARP cache checked for expire.
 133  *      It is useless to set ARP_CHECK_INTERVAL > ARP_TIMEOUT
 134  */
 135 
 136 #define ARP_CHECK_INTERVAL      (60*HZ)
 137 
 138 /*
 139  *      Soft limit on ARP cache size.
 140  *      Note that this number should be greater, than
 141  *      number of simultaneously opened sockets, else
 142  *      hardware header cache will be not efficient.
 143  */
 144 
 145 #if RT_CACHE_DEBUG >= 2
 146 #define ARP_MAXSIZE     4
 147 #else
 148 #ifdef CONFIG_ARPD
 149 #define ARP_MAXSIZE     64
 150 #else
 151 #define ARP_MAXSIZE     256
 152 #endif /* CONFIG_ARPD */
 153 #endif
 154 
 155 /*
 156  *      If an arp request is send, ARP_RES_TIME is the timeout value until the
 157  *      next request is send.
 158  *      RFC1122: OK.  Throttles ARPing, as per 2.3.2.1. (MUST)
 159  *      The recommended minimum timeout is 1 second per destination.
 160  *
 161  */
 162 
 163 #define ARP_RES_TIME            (5*HZ)
 164 
 165 /*
 166  *      The number of times an broadcast arp request is send, until
 167  *      the host is considered temporarily unreachable.
 168  */
 169 
 170 #define ARP_MAX_TRIES           3
 171 
 172 /*
 173  *      The entry is reconfirmed by sending point-to-point ARP
 174  *      request after ARP_CONFIRM_INTERVAL.
 175  *      RFC1122 recommends 60*HZ.
 176  *
 177  *      Warning: there exist nodes, that answer only broadcast
 178  *      ARP requests (Cisco-4000 in hot standby mode?)
 179  *      Now arp code should work with such nodes, but
 180  *      it still will generate redundant broadcast requests, so that
 181  *      this interval should be enough long.
 182  */
 183 
 184 #define ARP_CONFIRM_INTERVAL    (300*HZ)
 185 
 186 /*
 187  *      We wait for answer to unicast request for ARP_CONFIRM_TIMEOUT.
 188  */
 189 
 190 #define ARP_CONFIRM_TIMEOUT     ARP_RES_TIME
 191 
 192 /*
 193  *      The number of times an unicast arp request is retried, until
 194  *      the cache entry is considered suspicious.
 195  *      Value 0 means that no unicast pings will be sent.
 196  *      RFC1122 recommends 2.
 197  */
 198 
 199 #define ARP_MAX_PINGS           1
 200 
 201 /*
 202  *      When a host is dead, but someone tries to connect it,
 203  *      we do not remove corresponding cache entry (it would
 204  *      be useless, it will be created again immediately)
 205  *      Instead we prolongate interval between broadcasts
 206  *      to ARP_DEAD_RES_TIME.
 207  *      This interval should be not very long.
 208  *      (When the host will be up again, we will notice it only
 209  *      when ARP_DEAD_RES_TIME expires, or when the host will arp us.
 210  */
 211 
 212 #define ARP_DEAD_RES_TIME       (60*HZ)
 213 
 214 /*
 215  *      This structure defines the ARP mapping cache.
 216  */
 217 
 218 struct arp_table
 219 {
 220         struct arp_table                *next;                  /* Linked entry list            */
 221         unsigned long                   last_used;              /* For expiry                   */
 222         unsigned long                   last_updated;           /* For expiry                   */
 223         unsigned int                    flags;                  /* Control status               */
 224         u32                             ip;                     /* ip address of entry          */
 225         u32                             mask;                   /* netmask - used for generalised proxy arps (tridge)           */
 226         unsigned char                   ha[MAX_ADDR_LEN];       /* Hardware address             */
 227         struct device                   *dev;                   /* Device the entry is tied to  */
 228         struct hh_cache                 *hh;                    /* Hardware headers chain       */
 229 
 230         /*
 231          *      The following entries are only used for unresolved hw addresses.
 232          */
 233         
 234         struct timer_list               timer;                  /* expire timer                 */
 235         int                             retries;                /* remaining retries            */
 236         struct sk_buff_head             skb;                    /* list of queued packets       */
 237 };
 238 
 239 
 240 static atomic_t arp_size = 0;
 241 
 242 #ifdef CONFIG_ARPD
 243 static int arpd_not_running;
 244 static int arpd_stamp;
 245 #endif
 246 
 247 static unsigned int arp_bh_mask;
 248 
 249 #define ARP_BH_BACKLOG  1
 250 
 251 /*
 252  *      Backlog for ARP updates.
 253  */
 254 static struct arp_table *arp_backlog;
 255 
 256 /*
 257  *      Backlog for incomplete entries.
 258  */
 259 static struct arp_table *arp_req_backlog;
 260 
 261 
 262 static void arp_run_bh(void);
 263 static void arp_check_expire (unsigned long);  
 264 static int  arp_update (u32 sip, char *sha, struct device * dev,
 265             struct arp_table *ientry, int grat);
 266 
 267 static struct timer_list arp_timer =
 268         { NULL, NULL, ARP_CHECK_INTERVAL, 0L, &arp_check_expire };
 269 
 270 /*
 271  * The default arp netmask is just 255.255.255.255 which means it's
 272  * a single machine entry. Only proxy entries can have other netmasks
 273  */
 274 
 275 #define DEF_ARP_NETMASK (~0)
 276 
 277 /*
 278  *      The size of the hash table. Must be a power of two.
 279  */
 280 
 281 #define ARP_TABLE_SIZE          16
 282 #define FULL_ARP_TABLE_SIZE     (ARP_TABLE_SIZE+1)
 283 
 284 struct arp_table *arp_tables[FULL_ARP_TABLE_SIZE] =
 285 {
 286         NULL,
 287 };
 288 
 289 #define arp_proxy_list arp_tables[ARP_TABLE_SIZE]
 290 
 291 /*
 292  *      The last bits in the IP address are used for the cache lookup.
 293  *      A special entry is used for proxy arp entries
 294  */
 295 
 296 #define HASH(paddr)             (htonl(paddr) & (ARP_TABLE_SIZE - 1))
 297 
 298 /*
 299  *      ARP cache semaphore.
 300  *
 301  *      Every time when someone wants to traverse arp table,
 302  *      he MUST call arp_fast_lock.
 303  *      It will guarantee that arp cache list will not change
 304  *      by interrupts and the entry that you found will not
 305  *      disappear unexpectedly.
 306  *      
 307  *      If you want to modify arp cache lists, you MUST
 308  *      call arp_fast_lock, and check that you are the only
 309  *      owner of semaphore (arp_lock == 1). If it is not the case
 310  *      you can defer your operation or forgot it,
 311  *      but DO NOT TOUCH lists.
 312  *
 313  *      However, you are allowed to change arp entry contents.
 314  *
 315  *      Assumptions:
 316  *           -- interrupt code MUST have lock/unlock balanced,
 317  *              you cannot lock cache on interrupt and defer unlocking
 318  *              to callback.
 319  *              In particular, it means that lock/unlock are allowed
 320  *              to be non-atomic. They are made atomic, but it was not
 321  *              necessary.
 322  *           -- nobody is allowed to sleep while
 323  *              it keeps arp locked. (route cache has similar locking
 324  *              scheme, but allows sleeping)
 325  *              
 326  */
 327 
 328 static atomic_t arp_lock;
 329 
 330 #define ARP_LOCKED() (arp_lock != 1)
 331 
 332 static __inline__ void arp_fast_lock(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 333 {
 334         atomic_inc(&arp_lock);
 335 }
 336 
 337 static __inline__ void arp_unlock(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 338 {
 339         if (atomic_dec_and_test(&arp_lock) && arp_bh_mask)
 340                 arp_run_bh();
 341 }
 342 
 343 /*
 344  * Enqueue to FIFO list.
 345  */
 346 
 347 static void arp_enqueue(struct arp_table **q, struct arp_table *entry)
     /* [previous][next][first][last][top][bottom][index][help] */
 348 {
 349         unsigned long flags;
 350         struct arp_table * tail;
 351 
 352         save_flags(flags);
 353         cli();
 354         tail = *q;
 355         if (!tail)
 356                 entry->next = entry;
 357         else
 358         {
 359                 entry->next = tail->next;
 360                 tail->next = entry;
 361         }
 362         *q = entry;
 363         restore_flags(flags);
 364         return;
 365 }
 366 
 367 /*
 368  * Dequeue from FIFO list,
 369  * caller should mask interrupts.
 370  */
 371 
 372 static struct arp_table * arp_dequeue(struct arp_table **q)
     /* [previous][next][first][last][top][bottom][index][help] */
 373 {
 374         struct arp_table * entry;
 375 
 376         if (*q)
 377         {
 378                 entry = (*q)->next;
 379                 (*q)->next = entry->next;
 380                 if (entry->next == entry)
 381                         *q = NULL;
 382                 entry->next = NULL;
 383                 return entry;
 384         }
 385         return NULL;
 386 }
 387 
 388 /*
 389  * Purge all linked skb's of the entry.
 390  */
 391 
 392 static void arp_purge_send_q(struct arp_table *entry)
     /* [previous][next][first][last][top][bottom][index][help] */
 393 {
 394         struct sk_buff *skb;
 395         unsigned long flags;
 396 
 397         save_flags(flags);
 398         cli();
 399         /* Release the list of `skb' pointers. */
 400         while ((skb = skb_dequeue(&entry->skb)) != NULL)
 401         {
 402                 skb_device_lock(skb);
 403                 restore_flags(flags);
 404                 dev_kfree_skb(skb, FREE_WRITE);
 405                 cli();
 406         }
 407         restore_flags(flags);
 408         return;
 409 }
 410 
 411 /*
 412  *      Release the entry and all resources linked to it: skb's, hh's, timer
 413  *      and certainly memory.
 414  *      The entry should be already removed from lists.
 415  */
 416 
 417 static void arp_free_entry(struct arp_table *entry)
     /* [previous][next][first][last][top][bottom][index][help] */
 418 {
 419         unsigned long flags;
 420         struct hh_cache *hh, *next;
 421 
 422         del_timer(&entry->timer);
 423         arp_purge_send_q(entry);
 424 
 425         save_flags(flags);
 426         cli();
 427         hh = entry->hh;
 428         entry->hh = NULL;
 429         restore_flags(flags);
 430 
 431         for ( ; hh; hh = next)
 432         {
 433                 next = hh->hh_next;
 434                 hh->hh_uptodate = 0;
 435                 hh->hh_next = NULL;
 436                 hh->hh_arp = NULL;
 437                 if (atomic_dec_and_test(&hh->hh_refcnt))
 438                         kfree_s(hh, sizeof(struct(struct hh_cache)));
 439         }
 440 
 441         kfree_s(entry, sizeof(struct arp_table));
 442         atomic_dec(&arp_size);
 443         return;
 444 }
 445 
 446 /*
 447  *      Hardware header cache.
 448  *
 449  *      BEWARE! Hardware header cache has no locking, so that
 450  *      it requires especially careful handling.
 451  *      It is the only part of arp+route, where a list
 452  *      should be traversed with masked interrupts.
 453  *      Luckily, this list contains one element 8), as rule.
 454  */
 455 
 456 /*
 457  *      How many users has this entry?
 458  *      The answer is reliable only when interrupts are masked.
 459  */
 460 
 461 static __inline__ int arp_count_hhs(struct arp_table * entry)
     /* [previous][next][first][last][top][bottom][index][help] */
 462 {
 463         struct hh_cache *hh;
 464         int count = 0;
 465 
 466         for (hh = entry->hh; hh; hh = hh->hh_next)
 467                 count += hh->hh_refcnt-1;
 468 
 469         return count;
 470 }
 471 
 472 /*
 473  * Signal to device layer, that hardware address may be changed.
 474  */
 475 
 476 static __inline__ void arp_update_hhs(struct arp_table * entry)
     /* [previous][next][first][last][top][bottom][index][help] */
 477 {
 478         struct hh_cache *hh;
 479 
 480         for (hh=entry->hh; hh; hh=hh->hh_next)
 481                 entry->dev->header_cache_update(hh, entry->dev, entry->ha);
 482 }
 483 
 484 /*
 485  *      Invalidate all hh's, so that higher level will not try to use it.
 486  */
 487 
 488 static __inline__ void arp_invalidate_hhs(struct arp_table * entry)
     /* [previous][next][first][last][top][bottom][index][help] */
 489 {
 490         struct hh_cache *hh;
 491 
 492         for (hh=entry->hh; hh; hh=hh->hh_next)
 493                 hh->hh_uptodate = 0;
 494 }
 495 
 496 /*
 497  *      Atomic attaching new hh entry.
 498  *      Return 1, if entry has been freed, rather than attached.
 499  */
 500 
 501 static int arp_set_hh(struct hh_cache **hhp, struct hh_cache *hh)
     /* [previous][next][first][last][top][bottom][index][help] */
 502 {
 503         unsigned long flags;
 504         struct hh_cache *hh1;
 505         struct arp_table *entry;
 506 
 507         atomic_inc(&hh->hh_refcnt);
 508 
 509         save_flags(flags);
 510         cli();
 511         if ((hh1 = *hhp) == NULL)
 512         {
 513                 *hhp = hh;
 514                 restore_flags(flags);
 515                 return 0;
 516         }
 517 
 518         entry = (struct arp_table*)hh->hh_arp;
 519 
 520         /*
 521          *      An hh1 entry is already attached to this point.
 522          *      Is it not linked to arp entry? Link it!
 523          */
 524         if (!hh1->hh_arp && entry)
 525         {
 526                 atomic_inc(&hh1->hh_refcnt);
 527                 hh1->hh_next = entry->hh;
 528                 entry->hh = hh1;
 529                 hh1->hh_arp = (void*)entry;
 530                 restore_flags(flags);
 531 
 532                 if (entry->flags & ATF_COM)
 533                         entry->dev->header_cache_update(hh1, entry->dev, entry->ha);
 534 #if RT_CACHE_DEBUG >= 1
 535                 printk("arp_set_hh: %08x is reattached. Good!\n", entry->ip);
 536 #endif
 537         }
 538 #if RT_CACHE_DEBUG >= 1
 539         else if (entry)
 540                 printk("arp_set_hh: %08x rr1 ok!\n", entry->ip);
 541 #endif
 542         restore_flags(flags);
 543         if (atomic_dec_and_test(&hh->hh_refcnt))
 544                 kfree_s(hh, sizeof(struct hh_cache));
 545         return 1;
 546 }
 547 
 548 static __inline__ struct hh_cache * arp_alloc_hh(int htype)
     /* [previous][next][first][last][top][bottom][index][help] */
 549 {
 550         struct hh_cache *hh;
 551         hh = kmalloc(sizeof(struct hh_cache), GFP_ATOMIC);
 552         if (hh)
 553         {
 554                 memset(hh, 0, sizeof(struct hh_cache));
 555                 hh->hh_type = htype;
 556         }
 557         return hh;
 558 }
 559 
 560 /*
 561  * Test if a hardware address is all zero
 562  */
 563 
 564 static __inline__ int empty(unsigned char * addr, int len)
     /* [previous][next][first][last][top][bottom][index][help] */
 565 {
 566         while (len > 0)
 567         {
 568                 if (*addr)
 569                         return 0;
 570                 len--;
 571                 addr++;
 572         }
 573         return 1;
 574 }
 575 
 576 
 577 #ifdef CONFIG_ARPD
 578 
 579 /*
 580  *      Send ARPD message.
 581  */
 582 static void arpd_send(int req, u32 addr, struct device * dev, char *ha,
     /* [previous][next][first][last][top][bottom][index][help] */
 583                       unsigned long updated)
 584 {
 585         int retval;
 586         struct sk_buff *skb;
 587         struct arpd_request *arpreq;
 588 
 589         if (arpd_not_running)
 590                 return;
 591 
 592         skb = alloc_skb(sizeof(struct arpd_request), GFP_ATOMIC);
 593         if (skb == NULL)
 594                 return;
 595 
 596         skb->free=1;
 597         arpreq=(struct arpd_request *)skb_put(skb, sizeof(struct arpd_request));
 598         arpreq->req = req;
 599         arpreq->ip  = addr;
 600         arpreq->dev = (unsigned long)dev;
 601         arpreq->stamp = arpd_stamp;
 602         arpreq->updated = updated;
 603         if (ha)
 604                 memcpy(arpreq->ha, ha, sizeof(arpreq->ha));
 605 
 606         retval = netlink_post(NETLINK_ARPD, skb);
 607         if (retval)
 608         {
 609                 kfree_skb(skb, FREE_WRITE);
 610                 if (retval == -EUNATCH)
 611                         arpd_not_running = 1;
 612         }
 613 }
 614 
 615 /*
 616  *      Send ARPD update message.
 617  */
 618 
 619 static __inline__ void arpd_update(struct arp_table * entry)
     /* [previous][next][first][last][top][bottom][index][help] */
 620 {
 621         if (arpd_not_running)
 622                 return;
 623         arpd_send(ARPD_UPDATE, entry->ip, entry->dev, entry->ha,
 624                   entry->last_updated);
 625 }
 626 
 627 /*
 628  *      Send ARPD lookup request.
 629  */
 630 
 631 static __inline__ void arpd_lookup(u32 addr, struct device * dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 632 {
 633         if (arpd_not_running)
 634                 return;
 635         arpd_send(ARPD_LOOKUP, addr, dev, NULL, 0);
 636 }
 637 
 638 /*
 639  *      Send ARPD flush message.
 640  */
 641 
 642 static __inline__ void arpd_flush(struct device * dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 643 {
 644         if (arpd_not_running)
 645                 return;
 646         arpd_send(ARPD_FLUSH, 0, dev, NULL, 0);
 647 }
 648 
 649 
 650 static int arpd_callback(struct sk_buff *skb)
     /* [previous][next][first][last][top][bottom][index][help] */
 651 {
 652         struct device * dev;
 653         struct arpd_request *retreq;
 654 
 655         arpd_not_running = 0;
 656 
 657         if (skb->len != sizeof(struct arpd_request))
 658         {
 659                 kfree_skb(skb, FREE_READ);
 660                 return -EINVAL;
 661         }
 662 
 663         retreq = (struct arpd_request *)skb->data;
 664         dev = (struct device*)retreq->dev;
 665 
 666         if (retreq->stamp != arpd_stamp || !dev)
 667         {
 668                 kfree_skb(skb, FREE_READ);
 669                 return -EINVAL;
 670         }
 671 
 672         if (!retreq->updated || empty(retreq->ha, sizeof(retreq->ha)))
 673         {
 674 /*
 675  *      Invalid mapping: drop it and send ARP broadcast.
 676  */
 677                 arp_send(ARPOP_REQUEST, ETH_P_ARP, retreq->ip, dev, dev->pa_addr, NULL, 
 678                          dev->dev_addr, NULL);
 679         }
 680         else
 681         {
 682                 arp_fast_lock();
 683                 arp_update(retreq->ip, retreq->ha, dev, NULL, 0);
 684                 arp_unlock();
 685 
 686 /*
 687  *      Old mapping: we cannot trust it, send ARP broadcast to confirm it.
 688  *      If it will answer, the entry will be updated,
 689  *      if not ... we are lost. We will use it for ARP_CONFIRM_INTERVAL.
 690  */
 691                 if (jiffies - retreq->updated < ARPD_TIMEOUT)
 692                         arp_send(ARPOP_REQUEST, ETH_P_ARP, retreq->ip, dev, dev->pa_addr, NULL, 
 693                                  dev->dev_addr, NULL);
 694         }
 695 
 696         kfree_skb(skb, FREE_READ);
 697         return sizeof(struct arpd_request);
 698 }
 699 
 700 #else
 701 
 702 static __inline__ void arpd_update(struct arp_table * entry)
     /* [previous][next][first][last][top][bottom][index][help] */
 703 {
 704         return;
 705 }
 706 
 707 #endif /* CONFIG_ARPD */
 708 
 709 
 710 
 711 
 712 /*
 713  *      ARP expiration routines.
 714  */
 715 
 716 /*
 717  *      Force the expiry of an entry in the internal cache so the memory
 718  *      can be used for a new request.
 719  */
 720 
 721 static int arp_force_expire(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 722 {
 723         int i;
 724         struct arp_table *entry, **pentry;
 725         struct arp_table **oldest_entry = NULL;
 726         unsigned long oldest_used = ~0;
 727         unsigned long flags;
 728         unsigned long now = jiffies;
 729         int result = 0;
 730 
 731         static last_index;
 732 
 733         if (ARP_LOCKED())
 734                 return 0;
 735 
 736         save_flags(flags);
 737 
 738         if (last_index >= ARP_TABLE_SIZE)
 739                 last_index = 0;
 740 
 741         for (i = 0; i < ARP_TABLE_SIZE; i++, last_index++)
 742         {
 743                 pentry = &arp_tables[last_index & (ARP_TABLE_SIZE-1)];
 744 
 745                 while ((entry = *pentry) != NULL)
 746                 {
 747                         if (!(entry->flags & ATF_PERM))
 748                         {
 749                                 int users;
 750                                 cli();
 751                                 users = arp_count_hhs(entry);
 752 
 753                                 if (!users && now - entry->last_used > ARP_TIMEOUT)
 754                                 {
 755                                         *pentry = entry->next;
 756                                         restore_flags(flags);
 757 #if RT_CACHE_DEBUG >= 2
 758                                         printk("arp_force_expire: %08x expired\n", entry->ip);
 759 #endif
 760                                         arp_free_entry(entry);
 761                                         result++;
 762                                         if (arp_size < ARP_MAXSIZE)
 763                                                 goto done;
 764                                         continue;
 765                                 }
 766                                 restore_flags(flags);
 767                                 if (!users && entry->last_used < oldest_used)
 768                                 {
 769                                         oldest_entry = pentry;
 770                                         oldest_used = entry->last_used;
 771                                 }
 772                         }
 773                         pentry = &entry->next;
 774                 }
 775         }
 776 
 777 done:
 778         if (result || !oldest_entry)
 779                 return result;
 780 
 781         entry = *oldest_entry;
 782         *oldest_entry = entry->next;
 783 #if RT_CACHE_DEBUG >= 2
 784         printk("arp_force_expire: expiring %08x\n", entry->ip);
 785 #endif
 786         arp_free_entry(entry);
 787         return 1;
 788 }
 789 
 790 /*
 791  *      Check if there are too old entries and remove them. If the ATF_PERM
 792  *      flag is set, they are always left in the arp cache (permanent entry).
 793  *      If an entry was not be confirmed  for ARP_CONFIRM_INTERVAL,
 794  *      send point-to-point ARP request.
 795  *      If it will not be confirmed for ARP_CONFIRM_TIMEOUT,
 796  *      give it to shred by arp_expire_entry.
 797  */
 798 
 799 static void arp_check_expire(unsigned long dummy)
     /* [previous][next][first][last][top][bottom][index][help] */
 800 {
 801         int i;
 802         unsigned long now = jiffies;
 803 
 804         del_timer(&arp_timer);
 805 
 806 #ifdef CONFIG_ARPD
 807         arpd_not_running = 0;
 808 #endif
 809 
 810         ip_rt_check_expire();
 811 
 812         arp_fast_lock();
 813 
 814         if (!ARP_LOCKED())
 815         {
 816 
 817                 for (i = 0; i < ARP_TABLE_SIZE; i++)
 818                 {
 819                         struct arp_table *entry, **pentry;
 820                 
 821                         pentry = &arp_tables[i];
 822 
 823                         while ((entry = *pentry) != NULL)
 824                         {
 825                                 if (entry->flags & ATF_PERM)
 826                                 {
 827                                         pentry = &entry->next;
 828                                         continue;
 829                                 }
 830 
 831                                 cli();
 832                                 if (now - entry->last_used > ARP_TIMEOUT
 833                                     && !arp_count_hhs(entry))
 834                                 {
 835                                         *pentry = entry->next;
 836                                         sti();
 837 #if RT_CACHE_DEBUG >= 2
 838                                         printk("arp_expire: %08x expired\n", entry->ip);
 839 #endif
 840                                         arp_free_entry(entry);
 841                                         continue;
 842                                 }
 843                                 sti();
 844                                 if (entry->last_updated
 845                                     && now - entry->last_updated > ARP_CONFIRM_INTERVAL
 846                                     && !(entry->flags & ATF_PERM))
 847                                 {
 848                                         struct device * dev = entry->dev;
 849                                         entry->retries = ARP_MAX_TRIES+ARP_MAX_PINGS;
 850                                         del_timer(&entry->timer);
 851                                         entry->timer.expires = jiffies + ARP_CONFIRM_TIMEOUT;
 852                                         add_timer(&entry->timer);
 853                                         arp_send(ARPOP_REQUEST, ETH_P_ARP, entry->ip,
 854                                                  dev, dev->pa_addr, entry->ha,
 855                                                  dev->dev_addr, NULL);
 856 #if RT_CACHE_DEBUG >= 2
 857                                         printk("arp_expire: %08x requires confirmation\n", entry->ip);
 858 #endif
 859                                 }
 860                                 pentry = &entry->next;  /* go to next entry */
 861                         }
 862                 }
 863         }
 864 
 865         arp_unlock();
 866 
 867         /*
 868          *      Set the timer again.
 869          */
 870 
 871         arp_timer.expires = jiffies + ARP_CHECK_INTERVAL;
 872         add_timer(&arp_timer);
 873 }
 874 
 875 /*
 876  *      This function is called, if an entry is not resolved in ARP_RES_TIME.
 877  *      When more than MAX_ARP_TRIES retries was done, release queued skb's,
 878  *      but not discard entry itself if  it is in use.
 879  */
 880 
 881 static void arp_expire_request (unsigned long arg)
     /* [previous][next][first][last][top][bottom][index][help] */
 882 {
 883         struct arp_table *entry = (struct arp_table *) arg;
 884         struct arp_table **pentry;
 885         unsigned long hash;
 886         unsigned long flags;
 887 
 888         arp_fast_lock();
 889 
 890         save_flags(flags);
 891         cli();
 892         del_timer(&entry->timer);
 893 
 894         /*
 895          *      If arp table is locked, defer expire processing.
 896          */
 897         if (ARP_LOCKED())
 898         {
 899 #if RT_CACHE_DEBUG >= 1
 900                 printk(KERN_DEBUG "arp_expire_request: %08x deferred\n", entry->ip);
 901 #endif
 902                 entry->timer.expires = jiffies + HZ/10;
 903                 add_timer(&entry->timer);
 904                 restore_flags(flags);
 905                 arp_unlock();
 906                 return;
 907         }
 908 
 909         /*
 910          *      Since all timeouts are handled with interrupts enabled, there is a
 911          *      small chance, that this entry has just been resolved by an incoming
 912          *      packet. This is the only race condition, but it is handled...
 913          *
 914          *      One exception: if entry is COMPLETE but old,
 915          *      it means that point-to-point ARP ping has been failed
 916          *      (It really occurs with Cisco 4000 routers)
 917          *      We should reconfirm it.
 918          */
 919         
 920         if ((entry->flags & ATF_COM) && entry->last_updated
 921             && jiffies - entry->last_updated <= ARP_CONFIRM_INTERVAL)
 922         {
 923                 restore_flags(flags);
 924                 arp_unlock();
 925                 return;
 926         }
 927 
 928         restore_flags(flags);
 929 
 930         if (entry->last_updated && --entry->retries > 0)
 931         {
 932                 struct device *dev = entry->dev;
 933 
 934 #if RT_CACHE_DEBUG >= 2
 935                 printk("arp_expire_request: %08x timed out\n", entry->ip);
 936 #endif
 937                 /* Set new timer. */
 938                 entry->timer.expires = jiffies + ARP_RES_TIME;
 939                 add_timer(&entry->timer);
 940                 arp_send(ARPOP_REQUEST, ETH_P_ARP, entry->ip, dev, dev->pa_addr,
 941                          entry->retries > ARP_MAX_TRIES ? entry->ha : NULL,
 942                          dev->dev_addr, NULL);
 943                 arp_unlock();
 944                 return;
 945         }
 946 
 947         /*
 948          *      The host is really dead.
 949          */
 950 
 951         arp_purge_send_q(entry);
 952 
 953         cli();
 954         if (arp_count_hhs(entry))
 955         {
 956                 /*
 957                  *      The host is dead, but someone refers to it.
 958                  *      It is useless to drop this entry just now,
 959                  *      it will be born again, so that
 960                  *      we keep it, but slow down retransmitting
 961                  *      to ARP_DEAD_RES_TIME.
 962                  */
 963 
 964                 struct device *dev = entry->dev;
 965 #if RT_CACHE_DEBUG >= 2
 966                 printk("arp_expire_request: %08x is dead\n", entry->ip);
 967 #endif
 968                 entry->retries = ARP_MAX_TRIES;
 969                 entry->flags &= ~ATF_COM;
 970                 arp_invalidate_hhs(entry);
 971                 restore_flags(flags);
 972 
 973                 /*
 974                  *      Declare the entry dead.
 975                  */
 976                 entry->last_updated = 0;
 977                 arpd_update(entry);
 978 
 979                 entry->timer.expires = jiffies + ARP_DEAD_RES_TIME;
 980                 add_timer(&entry->timer);
 981                 arp_send(ARPOP_REQUEST, ETH_P_ARP, entry->ip, dev, dev->pa_addr, 
 982                          NULL, dev->dev_addr, NULL);
 983                 arp_unlock();
 984                 return;
 985         }
 986         restore_flags(flags);
 987 
 988         entry->last_updated = 0;
 989         arpd_update(entry);
 990 
 991         hash = HASH(entry->ip);
 992 
 993         pentry = &arp_tables[hash];
 994 
 995         while (*pentry != NULL)
 996         {
 997                 if (*pentry != entry)
 998                 {
 999                         pentry = &(*pentry)->next;
1000                         continue;
1001                 }
1002                 *pentry = entry->next;
1003 #if RT_CACHE_DEBUG >= 2
1004                 printk("arp_expire_request: %08x is killed\n", entry->ip);
1005 #endif
1006                 arp_free_entry(entry);
1007         }
1008         arp_unlock();
1009 }
1010 
1011 
1012 /* 
1013  * Allocate memory for a new entry.  If we are at the maximum limit
1014  * of the internal ARP cache, arp_force_expire() an entry.  NOTE:  
1015  * arp_force_expire() needs the cache to be locked, so therefore
1016  * arp_alloc_entry() should only be called with the cache locked too!
1017  */
1018 
1019 static struct arp_table * arp_alloc_entry(void)
     /* [previous][next][first][last][top][bottom][index][help] */
1020 {
1021         struct arp_table * entry;
1022 
1023 
1024         if (arp_size >= ARP_MAXSIZE)
1025                 arp_force_expire();
1026 
1027         entry = (struct arp_table *)
1028                 kmalloc(sizeof(struct arp_table),GFP_ATOMIC);
1029 
1030         if (entry != NULL)
1031         {
1032                 atomic_inc(&arp_size);
1033                 memset(entry, 0, sizeof(struct arp_table));
1034 
1035                 entry->mask = DEF_ARP_NETMASK;
1036                 init_timer(&entry->timer);
1037                 entry->timer.function = arp_expire_request;
1038                 entry->timer.data = (unsigned long)entry;
1039                 entry->last_updated = entry->last_used = jiffies;
1040                 skb_queue_head_init(&entry->skb);
1041         }
1042         return entry;
1043 }
1044 
1045 
1046 
1047 /*
1048  *      Purge a device from the ARP queue
1049  */
1050  
1051 int arp_device_event(struct notifier_block *this, unsigned long event, void *ptr)
     /* [previous][next][first][last][top][bottom][index][help] */
1052 {
1053         struct device *dev=ptr;
1054         int i;
1055         
1056         if (event != NETDEV_DOWN)
1057                 return NOTIFY_DONE;
1058 
1059 #ifdef  CONFIG_ARPD
1060         arpd_flush(dev);
1061         arpd_stamp++;
1062 #endif
1063 
1064         arp_fast_lock();
1065 #if RT_CACHE_DEBUG >= 1  
1066         if (ARP_LOCKED())
1067                 printk("arp_device_event: impossible\n");
1068 #endif
1069 
1070         for (i = 0; i < FULL_ARP_TABLE_SIZE; i++)
1071         {
1072                 struct arp_table *entry;
1073                 struct arp_table **pentry = &arp_tables[i];
1074 
1075                 while ((entry = *pentry) != NULL)
1076                 {
1077                         if (entry->dev == dev)
1078                         {
1079                                 *pentry = entry->next;  /* remove from list */
1080                                 arp_free_entry(entry);
1081                         }
1082                         else
1083                                 pentry = &entry->next;  /* go to next entry */
1084                 }
1085         }
1086         arp_unlock();
1087         return NOTIFY_DONE;
1088 }
1089 
1090 
1091 
1092 /*
1093  *      This will try to retransmit everything on the queue.
1094  */
1095 
1096 static void arp_send_q(struct arp_table *entry)
     /* [previous][next][first][last][top][bottom][index][help] */
1097 {
1098         struct sk_buff *skb;
1099 
1100         unsigned long flags;
1101 
1102         /*
1103          *      Empty the entire queue, building its data up ready to send
1104          */
1105         
1106         if(!(entry->flags&ATF_COM))
1107         {
1108                 printk("arp_send_q: incomplete entry for %s\n",
1109                                 in_ntoa(entry->ip));
1110                 /* Can't flush the skb, because RFC1122 says to hang on to */
1111                 /* at least one from any unresolved entry.  --MS */
1112                 /* What's happened is that someone has 'unresolved' the entry
1113                    as we got to use it - this 'can't happen' -- AC */
1114                 return;
1115         }
1116 
1117         save_flags(flags);
1118         
1119         cli();
1120         while((skb = skb_dequeue(&entry->skb)) != NULL)
1121         {
1122                 IS_SKB(skb);
1123                 skb_device_lock(skb);
1124                 restore_flags(flags);
1125                 if(!skb->dev->rebuild_header(skb->data,skb->dev,skb->raddr,skb))
1126                 {
1127                         skb->arp  = 1;
1128                         if(skb->sk==NULL)
1129                                 dev_queue_xmit(skb, skb->dev, 0);
1130                         else
1131                                 dev_queue_xmit(skb,skb->dev,skb->sk->priority);
1132                 }
1133         }
1134         restore_flags(flags);
1135 }
1136 
1137 
1138 static int
1139 arp_update (u32 sip, char *sha, struct device * dev,
     /* [previous][next][first][last][top][bottom][index][help] */
1140             struct arp_table *ientry, int grat)
1141 {
1142         struct arp_table * entry;
1143         unsigned long hash;
1144 
1145         hash = HASH(sip);
1146 
1147         for (entry=arp_tables[hash]; entry; entry = entry->next)
1148                 if (entry->ip == sip && entry->dev == dev)
1149                         break;
1150 
1151         if (entry)
1152         {
1153 /*
1154  *      Entry found; update it only if it is not a permanent entry.
1155  */
1156                 if (!(entry->flags & ATF_PERM)) 
1157                 {
1158                         del_timer(&entry->timer);
1159                         entry->last_updated = jiffies;
1160                         if (memcmp(entry->ha, sha, dev->addr_len)!=0)
1161                         {
1162                                 memcpy(entry->ha, sha, dev->addr_len);
1163                                 if (entry->flags & ATF_COM)
1164                                         arp_update_hhs(entry);
1165                         }
1166                         arpd_update(entry);
1167                 }
1168 
1169                 if (!(entry->flags & ATF_COM))
1170                 {
1171 /*
1172  *      This entry was incomplete.  Delete the retransmit timer
1173  *      and switch to complete status.
1174  */
1175                         entry->flags |= ATF_COM;
1176                         arp_update_hhs(entry);
1177 /* 
1178  *      Send out waiting packets. We might have problems, if someone is 
1179  *      manually removing entries right now -- entry might become invalid 
1180  *      underneath us.
1181  */
1182                         arp_send_q(entry);
1183                 }
1184                 return 1;
1185         }
1186 
1187 /*
1188  *      No entry found.  Need to add a new entry to the arp table.
1189  */
1190         entry = ientry;
1191 
1192         if (grat && !entry)
1193                 return 0;
1194 
1195         if (!entry)
1196         {
1197                 entry = arp_alloc_entry();
1198                 if (!entry)
1199                         return 0;
1200 
1201                 entry->ip = sip;
1202                 entry->flags = ATF_COM;
1203                 memcpy(entry->ha, sha, dev->addr_len);
1204                 entry->dev = dev;
1205         }
1206 
1207         entry->last_updated = entry->last_used = jiffies;
1208         arpd_update(entry);
1209 
1210         if (!ARP_LOCKED())
1211         {
1212                 entry->next = arp_tables[hash];
1213                 arp_tables[hash] = entry;
1214                 return 0;
1215         }
1216 #if RT_CACHE_DEBUG >= 2
1217         printk("arp_update: %08x backlogged\n", entry->ip);
1218 #endif
1219         arp_enqueue(&arp_backlog, entry);
1220         arp_bh_mask |= ARP_BH_BACKLOG;
1221         return 0;
1222 }
1223 
1224 
1225 
1226 static __inline__ struct arp_table *arp_lookup(u32 paddr, struct device * dev)
     /* [previous][next][first][last][top][bottom][index][help] */
1227 {
1228         struct arp_table *entry;
1229 
1230         for (entry = arp_tables[HASH(paddr)]; entry != NULL; entry = entry->next)
1231                 if (entry->ip == paddr && (!dev || entry->dev == dev))
1232                         return entry;
1233         return NULL;
1234 }
1235 
1236 /*
1237  *      Find an arp mapping in the cache. If not found, return false.
1238  */
1239 
1240 int arp_query(unsigned char *haddr, u32 paddr, struct device * dev)
     /* [previous][next][first][last][top][bottom][index][help] */
1241 {
1242         struct arp_table *entry;
1243 
1244         arp_fast_lock();
1245 
1246         entry = arp_lookup(paddr, dev);
1247 
1248         if (entry != NULL)
1249         {
1250                 entry->last_used = jiffies;
1251                 if (entry->flags & ATF_COM)
1252                 {
1253                         memcpy(haddr, entry->ha, dev->addr_len);
1254                         arp_unlock();
1255                         return 1;
1256                 }
1257         }
1258         arp_unlock();
1259         return 0;
1260 }
1261 
1262 
1263 static int arp_set_predefined(int addr_hint, unsigned char * haddr, u32 paddr, struct device * dev)
     /* [previous][next][first][last][top][bottom][index][help] */
1264 {
1265         switch (addr_hint)
1266         {
1267                 case IS_MYADDR:
1268                         printk(KERN_DEBUG "ARP: arp called for own IP address\n");
1269                         memcpy(haddr, dev->dev_addr, dev->addr_len);
1270                         return 1;
1271 #ifdef CONFIG_IP_MULTICAST
1272                 case IS_MULTICAST:
1273                         if(dev->type==ARPHRD_ETHER || dev->type==ARPHRD_IEEE802)
1274                         {
1275                                 u32 taddr;
1276                                 haddr[0]=0x01;
1277                                 haddr[1]=0x00;
1278                                 haddr[2]=0x5e;
1279                                 taddr=ntohl(paddr);
1280                                 haddr[5]=taddr&0xff;
1281                                 taddr=taddr>>8;
1282                                 haddr[4]=taddr&0xff;
1283                                 taddr=taddr>>8;
1284                                 haddr[3]=taddr&0x7f;
1285                                 return 1;
1286                         }
1287                 /*
1288                  *      If a device does not support multicast broadcast the stuff (eg AX.25 for now)
1289                  */
1290 #endif
1291                 
1292                 case IS_BROADCAST:
1293                         memcpy(haddr, dev->broadcast, dev->addr_len);
1294                         return 1;
1295         }
1296         return 0;
1297 }
1298 
1299 /*
1300  *      Create a new unresolved entry.
1301  */
1302 
1303 struct arp_table * arp_new_entry(u32 paddr, struct device *dev, struct hh_cache *hh, struct sk_buff *skb)
     /* [previous][next][first][last][top][bottom][index][help] */
1304 {
1305         struct arp_table *entry;
1306 
1307         entry = arp_alloc_entry();
1308 
1309         if (entry != NULL)
1310         {
1311                 entry->ip = paddr;
1312                 entry->dev = dev;
1313                 if (hh)
1314                 {
1315                         entry->hh = hh;
1316                         atomic_inc(&hh->hh_refcnt);
1317                         hh->hh_arp = (void*)entry;
1318                 }
1319                 entry->timer.expires = jiffies + ARP_RES_TIME;
1320 
1321                 if (skb != NULL)
1322                 {
1323                         skb_queue_tail(&entry->skb, skb);
1324                         skb_device_unlock(skb);
1325                 }
1326 
1327                 if (!ARP_LOCKED())
1328                 {
1329                         unsigned long hash = HASH(paddr);
1330                         entry->next = arp_tables[hash];
1331                         arp_tables[hash] = entry;
1332                         add_timer(&entry->timer);
1333                         entry->retries = ARP_MAX_TRIES;
1334 #ifdef CONFIG_ARPD
1335                         if (!arpd_not_running)
1336                                 arpd_lookup(paddr, dev);
1337                         else
1338 #endif
1339                                 arp_send(ARPOP_REQUEST, ETH_P_ARP, paddr, dev, dev->pa_addr, NULL, 
1340                                          dev->dev_addr, NULL);
1341                 }
1342                 else
1343                 {
1344 #if RT_CACHE_DEBUG >= 2
1345                         printk("arp_new_entry: %08x backlogged\n", entry->ip);
1346 #endif
1347                         arp_enqueue(&arp_req_backlog, entry);
1348                         arp_bh_mask |= ARP_BH_BACKLOG;
1349                 }
1350         }
1351         return entry;
1352 }
1353 
1354 
1355 /*
1356  *      Find an arp mapping in the cache. If not found, post a request.
1357  */
1358 
1359 int arp_find(unsigned char *haddr, u32 paddr, struct device *dev,
     /* [previous][next][first][last][top][bottom][index][help] */
1360              u32 saddr, struct sk_buff *skb)
1361 {
1362         struct arp_table *entry;
1363         unsigned long hash;
1364 
1365         if (arp_set_predefined(ip_chk_addr(paddr), haddr, paddr, dev))
1366         {
1367                 if (skb)
1368                         skb->arp = 1;
1369                 return 0;
1370         }
1371 
1372         hash = HASH(paddr);
1373         arp_fast_lock();
1374 
1375         /*
1376          *      Find an entry
1377          */
1378         entry = arp_lookup(paddr, dev);
1379 
1380         if (entry != NULL)      /* It exists */
1381         {
1382                 if (entry->flags & ATF_COM)
1383                 {
1384                         entry->last_used = jiffies;
1385                         memcpy(haddr, entry->ha, dev->addr_len);
1386                         if (skb)
1387                                 skb->arp = 1;
1388                         arp_unlock();
1389                         return 0;
1390                 }
1391 
1392                 /*
1393                  *      A request was already send, but no reply yet. Thus
1394                  *      queue the packet with the previous attempt
1395                  */
1396                         
1397                 if (skb != NULL)
1398                 {
1399                         if (entry->last_updated)
1400                         {
1401                                 skb_queue_tail(&entry->skb, skb);
1402                                 skb_device_unlock(skb);
1403                         }
1404                         /*
1405                          * If last_updated==0 host is dead, so
1406                          * drop skb's and set socket error.
1407                          */
1408                         else
1409                         {
1410                                 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0, dev);
1411                                 dev_kfree_skb(skb, FREE_WRITE);
1412                         }
1413                 }
1414                 arp_unlock();
1415                 return 1;
1416         }
1417 
1418         entry = arp_new_entry(paddr, dev, NULL, skb);
1419 
1420         if (skb != NULL && !entry)
1421                 dev_kfree_skb(skb, FREE_WRITE);
1422 
1423         arp_unlock();
1424         return 1;
1425 }
1426 
1427 /*
1428  *      Binding hardware header cache entry.
1429  *      It is the only really complicated part of arp code.
1430  *      We have no locking for hh records, so that
1431  *      all possible race conditions should be resolved by
1432  *      cli()/sti() pairs.
1433  *
1434  *      Important note: hhs never disapear from lists, if ARP_LOCKED,
1435  *      this fact allows to scan hh lists with enabled interrupts,
1436  *      but results in generating duplicate hh entries.
1437  *      It is harmless. (and I've never seen such event)
1438  *
1439  *      Returns 0, if hh has been just created, so that
1440  *      caller should fill it.
1441  */
1442 
1443 int arp_bind_cache(struct hh_cache ** hhp, struct device *dev, unsigned short htype, u32 paddr)
     /* [previous][next][first][last][top][bottom][index][help] */
1444 {
1445         struct arp_table *entry;
1446         struct hh_cache *hh;
1447         int addr_hint;
1448         unsigned long flags;
1449 
1450         save_flags(flags);
1451 
1452         if ((addr_hint = ip_chk_addr(paddr)) != 0)
1453         {
1454                 unsigned char haddr[MAX_ADDR_LEN];
1455                 if (*hhp)
1456                         return 1;
1457                 hh = arp_alloc_hh(htype);
1458                 if (!hh)
1459                         return 1;
1460                 arp_set_predefined(addr_hint, haddr, paddr, dev);
1461                 dev->header_cache_update(hh, dev, haddr);
1462                 return arp_set_hh(hhp, hh);
1463         }
1464 
1465         arp_fast_lock();
1466 
1467         entry = arp_lookup(paddr, dev);
1468 
1469         if (entry)
1470         {
1471                 for (hh = entry->hh; hh; hh=hh->hh_next)
1472                         if (hh->hh_type == htype)
1473                                 break;
1474 
1475                 if (hh)
1476                 {
1477                         arp_set_hh(hhp, hh);
1478                         arp_unlock();
1479                         return 1;
1480                 }
1481         }
1482 
1483         hh = arp_alloc_hh(htype);
1484         if (!hh)
1485         {
1486                 arp_unlock();
1487                 return 1;
1488         }
1489 
1490         if (entry)
1491         {
1492 
1493                 cli();
1494                 hh->hh_arp = (void*)entry;
1495                 hh->hh_next = entry->hh;
1496                 entry->hh = hh;
1497                 atomic_inc(&hh->hh_refcnt);
1498                 restore_flags(flags);
1499 
1500                 if (entry->flags & ATF_COM)
1501                         dev->header_cache_update(hh, dev, entry->ha);
1502 
1503                 if (arp_set_hh(hhp, hh))
1504                 {
1505                         arp_unlock();
1506                         return 0;
1507                 }
1508 
1509                 entry->last_used = jiffies;
1510                 arp_unlock();
1511                 return 0;
1512         }
1513 
1514         entry = arp_new_entry(paddr, dev, hh, NULL);
1515         if (entry == NULL)
1516         {
1517                 kfree_s(hh, sizeof(struct hh_cache));
1518                 arp_unlock();
1519                 return 1;
1520         }
1521 
1522         if (!arp_set_hh(hhp, hh))
1523         {
1524                 arp_unlock();
1525                 return 0;
1526         }
1527         arp_unlock();
1528         return 1;
1529 }
1530 
1531 static void arp_run_bh()
     /* [previous][next][first][last][top][bottom][index][help] */
1532 {
1533         unsigned long flags;
1534         struct arp_table *entry, *entry1;
1535         struct device  * dev;
1536         unsigned long hash;
1537         struct hh_cache *hh;
1538         u32 sip;
1539 
1540         save_flags(flags);
1541         cli();
1542         arp_fast_lock();
1543 
1544         while (arp_bh_mask)
1545         {
1546                 arp_bh_mask  &= ~ARP_BH_BACKLOG;
1547 
1548                 while ((entry = arp_dequeue(&arp_backlog)) != NULL)
1549                 {
1550                         restore_flags(flags);
1551                         if (arp_update(entry->ip, entry->ha, entry->dev, entry, 0))
1552                                 arp_free_entry(entry);
1553                         cli();
1554                 }
1555 
1556                 cli();
1557                 while ((entry = arp_dequeue(&arp_req_backlog)) != NULL)
1558                 {
1559                         restore_flags(flags);
1560 
1561                         dev = entry->dev;
1562                         sip = entry->ip;
1563                         hash = HASH(sip);
1564 
1565                         for (entry1 = arp_tables[hash]; entry1; entry1 = entry1->next)
1566                                 if (entry1->ip == sip && entry1->dev == dev)
1567                                         break;
1568 
1569                         if (!entry1)
1570                         {
1571                                 cli();
1572                                 entry->next = arp_tables[hash];
1573                                 arp_tables[hash] = entry;
1574                                 restore_flags(flags);
1575                                 entry->timer.expires = jiffies + ARP_RES_TIME;
1576                                 entry->retries = ARP_MAX_TRIES;
1577                                 entry->last_used = jiffies;
1578                                 if (!(entry->flags & ATF_COM))
1579                                 {
1580                                         add_timer(&entry->timer);
1581 #ifdef CONFIG_ARPD
1582                                         if (!arpd_not_running)
1583                                                 arpd_lookup(sip, dev);
1584                                         else
1585 #endif
1586                                                 arp_send(ARPOP_REQUEST, ETH_P_ARP, sip, dev, dev->pa_addr, NULL, dev->dev_addr, NULL);
1587                                 }
1588 #if RT_CACHE_DEBUG >= 1
1589                                 printk(KERN_DEBUG "arp_run_bh: %08x reinstalled\n", sip);
1590 #endif
1591                         }
1592                         else
1593                         {
1594                                 struct sk_buff * skb;
1595                                 struct hh_cache * next;
1596 
1597                                 /* Discard entry, but preserve its hh's and
1598                                  * skb's.
1599                                  */
1600                                 cli();
1601                                 for (hh=entry->hh; hh; hh=next)
1602                                 {
1603                                         next = hh->hh_next;
1604                                         hh->hh_next = entry1->hh;
1605                                         entry1->hh = hh;
1606                                         hh->hh_arp = (void*)entry1;
1607                                 }
1608                                 entry->hh = NULL;
1609 
1610                                 /* Prune skb list from entry
1611                                  * and graft it to entry1.
1612                                  */
1613                                 while ((skb = skb_dequeue(&entry->skb)) != NULL)
1614                                 {
1615                                         skb_device_lock(skb);
1616                                         restore_flags(flags);
1617                                         skb_queue_tail(&entry1->skb, skb);
1618                                         skb_device_unlock(skb);
1619                                         cli();
1620                                 }
1621                                 restore_flags(flags);
1622                                 
1623                                 arp_free_entry(entry);
1624 
1625                                 if (entry1->flags & ATF_COM)
1626                                 {
1627                                         arp_update_hhs(entry1);
1628                                         arp_send_q(entry1);
1629                                 }
1630                         }
1631                         cli();
1632                 }
1633                 cli();
1634         }
1635         arp_unlock();
1636         restore_flags(flags);
1637 }
1638 
1639 
1640 /*
1641  *      Interface to link layer: send routine and receive handler.
1642  */
1643 
1644 /*
1645  *      Create and send an arp packet. If (dest_hw == NULL), we create a broadcast
1646  *      message.
1647  */
1648 
1649 void arp_send(int type, int ptype, u32 dest_ip, 
     /* [previous][next][first][last][top][bottom][index][help] */
1650               struct device *dev, u32 src_ip, 
1651               unsigned char *dest_hw, unsigned char *src_hw,
1652               unsigned char *target_hw)
1653 {
1654         struct sk_buff *skb;
1655         struct arphdr *arp;
1656         unsigned char *arp_ptr;
1657 
1658         /*
1659          *      No arp on this interface.
1660          */
1661         
1662         if (dev->flags&IFF_NOARP)
1663                 return;
1664 
1665         /*
1666          *      Allocate a buffer
1667          */
1668         
1669         skb = alloc_skb(sizeof(struct arphdr)+ 2*(dev->addr_len+4)
1670                                 + dev->hard_header_len, GFP_ATOMIC);
1671         if (skb == NULL)
1672         {
1673                 printk("ARP: no memory to send an arp packet\n");
1674                 return;
1675         }
1676         skb_reserve(skb, dev->hard_header_len);
1677         arp = (struct arphdr *) skb_put(skb,sizeof(struct arphdr) + 2*(dev->addr_len+4));
1678         skb->arp = 1;
1679         skb->dev = dev;
1680         skb->free = 1;
1681         skb->protocol = htons (ETH_P_IP);
1682 
1683         /*
1684          *      Fill the device header for the ARP frame
1685          */
1686 
1687         dev->hard_header(skb,dev,ptype,dest_hw?dest_hw:dev->broadcast,src_hw?src_hw:NULL,skb->len);
1688 
1689         /* Fill out the arp protocol part. */
1690         arp->ar_hrd = htons(dev->type);
1691 #ifdef CONFIG_AX25
1692 #ifdef CONFIG_NETROM
1693         arp->ar_pro = (dev->type == ARPHRD_AX25 || dev->type == ARPHRD_NETROM) ? htons(AX25_P_IP) : htons(ETH_P_IP);
1694 #else
1695         arp->ar_pro = (dev->type != ARPHRD_AX25) ? htons(ETH_P_IP) : htons(AX25_P_IP);
1696 #endif
1697 #else
1698         arp->ar_pro = htons(ETH_P_IP);
1699 #endif
1700         arp->ar_hln = dev->addr_len;
1701         arp->ar_pln = 4;
1702         arp->ar_op = htons(type);
1703 
1704         arp_ptr=(unsigned char *)(arp+1);
1705 
1706         memcpy(arp_ptr, src_hw, dev->addr_len);
1707         arp_ptr+=dev->addr_len;
1708         memcpy(arp_ptr, &src_ip,4);
1709         arp_ptr+=4;
1710         if (target_hw != NULL)
1711                 memcpy(arp_ptr, target_hw, dev->addr_len);
1712         else
1713                 memset(arp_ptr, 0, dev->addr_len);
1714         arp_ptr+=dev->addr_len;
1715         memcpy(arp_ptr, &dest_ip, 4);
1716 
1717         dev_queue_xmit(skb, dev, 0);
1718 }
1719 
1720 
1721 /*
1722  *      Receive an arp request by the device layer.
1723  */
1724 
1725 int arp_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *pt)
     /* [previous][next][first][last][top][bottom][index][help] */
1726 {
1727 /*
1728  *      We shouldn't use this type conversion. Check later.
1729  */
1730         
1731         struct arphdr *arp = (struct arphdr *)skb->h.raw;
1732         unsigned char *arp_ptr= (unsigned char *)(arp+1);
1733         unsigned char *sha,*tha;
1734         u32 sip,tip;
1735         
1736 /*
1737  *      The hardware length of the packet should match the hardware length
1738  *      of the device.  Similarly, the hardware types should match.  The
1739  *      device should be ARP-able.  Also, if pln is not 4, then the lookup
1740  *      is not from an IP number.  We can't currently handle this, so toss
1741  *      it. 
1742  */  
1743         if (arp->ar_hln != dev->addr_len    || 
1744                 dev->type != ntohs(arp->ar_hrd) || 
1745                 dev->flags & IFF_NOARP          ||
1746                 arp->ar_pln != 4)
1747         {
1748                 kfree_skb(skb, FREE_READ);
1749                 return 0;
1750                 /* Should this be an error/printk?  Seems like something */
1751                 /* you'd want to know about. Unless it's just !IFF_NOARP. -- MS */
1752         }
1753 
1754 /*
1755  *      Another test.
1756  *      The logic here is that the protocol being looked up by arp should 
1757  *      match the protocol the device speaks.  If it doesn't, there is a
1758  *      problem, so toss the packet.
1759  */
1760 /* Again, should this be an error/printk? -- MS */
1761 
1762         switch (dev->type)
1763         {
1764 #ifdef CONFIG_AX25
1765                 case ARPHRD_AX25:
1766                         if(arp->ar_pro != htons(AX25_P_IP))
1767                         {
1768                                 kfree_skb(skb, FREE_READ);
1769                                 return 0;
1770                         }
1771                         break;
1772 #endif
1773 #ifdef CONFIG_NETROM
1774                 case ARPHRD_NETROM:
1775                         if(arp->ar_pro != htons(AX25_P_IP))
1776                         {
1777                                 kfree_skb(skb, FREE_READ);
1778                                 return 0;
1779                         }
1780                         break;
1781 #endif
1782                 case ARPHRD_ETHER:
1783                 case ARPHRD_ARCNET:
1784                 case ARPHRD_METRICOM:
1785                         if(arp->ar_pro != htons(ETH_P_IP))
1786                         {
1787                                 kfree_skb(skb, FREE_READ);
1788                                 return 0;
1789                         }
1790                         break;
1791 
1792                 case ARPHRD_IEEE802:
1793                         if(arp->ar_pro != htons(ETH_P_IP))
1794                         {
1795                                 kfree_skb(skb, FREE_READ);
1796                                 return 0;
1797                         }
1798                         break;
1799 
1800                 default:
1801                         printk("ARP: dev->type mangled!\n");
1802                         kfree_skb(skb, FREE_READ);
1803                         return 0;
1804         }
1805 
1806 /*
1807  *      Extract fields
1808  */
1809 
1810         sha=arp_ptr;
1811         arp_ptr += dev->addr_len;
1812         memcpy(&sip, arp_ptr, 4);
1813         arp_ptr += 4;
1814         tha=arp_ptr;
1815         arp_ptr += dev->addr_len;
1816         memcpy(&tip, arp_ptr, 4);
1817   
1818 /* 
1819  *      Check for bad requests for 127.x.x.x and requests for multicast
1820  *      addresses.  If this is one such, delete it.
1821  */
1822         if (LOOPBACK(tip) || MULTICAST(tip))
1823         {
1824                 kfree_skb(skb, FREE_READ);
1825                 return 0;
1826         }
1827 
1828 /*
1829  *  Process entry.  The idea here is we want to send a reply if it is a
1830  *  request for us or if it is a request for someone else that we hold
1831  *  a proxy for.  We want to add an entry to our cache if it is a reply
1832  *  to us or if it is a request for our address.  
1833  *  (The assumption for this last is that if someone is requesting our 
1834  *  address, they are probably intending to talk to us, so it saves time 
1835  *  if we cache their address.  Their address is also probably not in 
1836  *  our cache, since ours is not in their cache.)
1837  * 
1838  *  Putting this another way, we only care about replies if they are to
1839  *  us, in which case we add them to the cache.  For requests, we care
1840  *  about those for us and those for our proxies.  We reply to both,
1841  *  and in the case of requests for us we add the requester to the arp 
1842  *  cache.
1843  */
1844 
1845 /*
1846  *      try to switch to alias device whose addr is tip or closest to sip.
1847  */
1848 
1849 #ifdef CONFIG_NET_ALIAS
1850         if (tip != dev->pa_addr && net_alias_has(skb->dev)) 
1851         {
1852                 /*
1853                  *      net_alias_dev_rcv_sel32 returns main dev if it fails to found other.
1854                  */
1855                 dev = net_alias_dev_rcv_sel32(dev, AF_INET, sip, tip);
1856 
1857                 if (dev->type != ntohs(arp->ar_hrd) || dev->flags & IFF_NOARP)
1858                 {
1859                         kfree_skb(skb, FREE_READ);
1860                         return 0;
1861                 }
1862         }
1863 #endif
1864 
1865         if (arp->ar_op == htons(ARPOP_REQUEST))
1866         { 
1867 
1868 /*
1869  * Only reply for the real device address or when it's in our proxy tables
1870  */
1871                 if (tip != dev->pa_addr)
1872                 {
1873                         struct arp_table *proxy_entry;
1874 
1875 /*
1876  *      To get in here, it is a request for someone else.  We need to
1877  *      check if that someone else is one of our proxies.  If it isn't,
1878  *      we can toss it.
1879  *
1880  *      Make "longest match" lookup, a la routing.
1881  */
1882 
1883                         arp_fast_lock();
1884 
1885                         for (proxy_entry = arp_proxy_list; proxy_entry;
1886                              proxy_entry = proxy_entry->next)
1887                         {
1888                                 if (proxy_entry->dev == dev &&
1889                                     !((proxy_entry->ip^tip)&proxy_entry->mask))
1890                                         break;
1891                         }
1892 
1893                         if (proxy_entry && (proxy_entry->mask || ((dev->pa_addr^tip)&dev->pa_mask)))
1894                         {
1895                                 char ha[MAX_ADDR_LEN];
1896                                 struct rtable * rt;
1897 
1898                                 /* Unlock arp tables to make life for
1899                                  * ip_rt_route easy. Note, that we are obliged
1900                                  * to make local copy of hardware address.
1901                                  */
1902 
1903                                 memcpy(ha, proxy_entry->ha, dev->addr_len);
1904                                 arp_unlock();
1905 
1906                                 rt = ip_rt_route(tip, 0);
1907                                 if (rt  && rt->rt_dev != dev)
1908                                         arp_send(ARPOP_REPLY,ETH_P_ARP,sip,dev,tip,sha,ha,sha);
1909                                 ip_rt_put(rt);
1910 
1911                         }
1912                         else
1913                                 arp_unlock();
1914                 }
1915                 else
1916                         arp_send(ARPOP_REPLY,ETH_P_ARP,sip,dev,tip,sha,dev->dev_addr,sha);
1917 
1918 /*
1919  *      Handle gratuitous arp.
1920  */
1921                 arp_fast_lock();
1922                 arp_update(sip, sha, dev, NULL, 1);
1923                 arp_unlock();
1924                 kfree_skb(skb, FREE_READ);
1925                 return 0;
1926         }
1927 
1928         arp_fast_lock();
1929         arp_update(sip, sha, dev, NULL, ip_chk_addr(tip) != IS_MYADDR);
1930         arp_unlock();
1931         kfree_skb(skb, FREE_READ);
1932         return 0;
1933 }
1934 
1935 
1936 
1937 /*
1938  *      User level interface (ioctl, /proc)
1939  */
1940 
1941 /*
1942  *      Set (create) an ARP cache entry.
1943  */
1944 
1945 static int arp_req_set(struct arpreq *r, struct device * dev)
     /* [previous][next][first][last][top][bottom][index][help] */
1946 {
1947         struct arp_table *entry, **entryp;
1948         struct sockaddr_in *si;
1949         unsigned char *ha;
1950         u32 ip;
1951         u32 mask = DEF_ARP_NETMASK;
1952         unsigned long flags;
1953 
1954         /*
1955          *      Extract netmask (if supplied).
1956          */
1957 
1958         if (r->arp_flags&ATF_NETMASK)
1959         {
1960                 si = (struct sockaddr_in *) &r->arp_netmask;
1961                 mask = si->sin_addr.s_addr;
1962         }
1963 
1964         /*
1965          *      Extract destination.
1966          */
1967         
1968         si = (struct sockaddr_in *) &r->arp_pa;
1969         ip = si->sin_addr.s_addr;
1970 
1971 
1972         if (r->arp_flags&ATF_PUBL)
1973         {
1974                 if (!mask && ip)
1975                         return -EINVAL;
1976                 if (!dev)
1977                         dev = dev_getbytype(r->arp_ha.sa_family);
1978         }
1979         else
1980         {
1981                 if (ip_chk_addr(ip))
1982                         return -EINVAL;
1983                 if (!dev)
1984                 {
1985                         struct rtable * rt;
1986                         rt = ip_rt_route(ip, 0);
1987                         if (!rt)
1988                                 return -ENETUNREACH;
1989                         dev = rt->rt_dev;
1990                         ip_rt_put(rt);
1991                 }
1992         }
1993         if (!dev || (dev->flags&(IFF_LOOPBACK|IFF_NOARP)))
1994                 return -ENODEV;
1995 
1996         if (r->arp_ha.sa_family != dev->type)   
1997                 return -EINVAL;
1998                 
1999         arp_fast_lock();
2000 #if RT_CACHE_DEBUG >= 1
2001         if (ARP_LOCKED())
2002                 printk("arp_req_set: bug\n");
2003 #endif
2004 
2005         if (!(r->arp_flags & ATF_PUBL))
2006                 entryp = &arp_tables[HASH(ip)];
2007         else
2008                 entryp = &arp_proxy_list;
2009 
2010         while ((entry = *entryp) != NULL)
2011         {
2012                 if (entry->ip == ip && entry->mask == mask && entry->dev == dev)
2013                         break;
2014                 if ((entry->mask & mask) != mask)
2015                 {
2016                         entry = NULL;
2017                         break;
2018                 }
2019                 entryp = &entry->next;
2020         }
2021 
2022         /*
2023          *      Do we need to create a new entry?
2024          */
2025         
2026         if (entry == NULL)
2027         {
2028                 entry = arp_alloc_entry();
2029                 if (entry == NULL)
2030                 {
2031                         arp_unlock();
2032                         return -ENOMEM;
2033                 }
2034                 entry->ip = ip;
2035                 entry->dev = dev;
2036                 entry->mask = mask;
2037                 entry->flags = r->arp_flags;
2038 
2039                 entry->next = (*entryp)->next;
2040                 *entryp = entry;
2041         }
2042 
2043         ha = r->arp_ha.sa_data;
2044         if (empty(ha, dev->addr_len))
2045                 ha = dev->dev_addr;
2046 
2047         save_flags(flags);
2048         cli();
2049         memcpy(entry->ha, ha, dev->addr_len);
2050         entry->last_updated = entry->last_used = jiffies;
2051         entry->flags |= ATF_COM;
2052         restore_flags(flags);
2053         arpd_update(entry);
2054         arp_update_hhs(entry);
2055         arp_unlock();
2056         return 0;
2057 }
2058 
2059 
2060 
2061 /*
2062  *      Get an ARP cache entry.
2063  */
2064 
2065 static int arp_req_get(struct arpreq *r, struct device *dev)
     /* [previous][next][first][last][top][bottom][index][help] */
2066 {
2067         struct arp_table *entry;
2068         struct sockaddr_in *si;
2069         u32 mask = DEF_ARP_NETMASK;
2070 
2071         if (r->arp_flags&ATF_NETMASK)
2072         {
2073                 si = (struct sockaddr_in *) &r->arp_netmask;
2074                 mask = si->sin_addr.s_addr;
2075         }
2076 
2077         si = (struct sockaddr_in *) &r->arp_pa;
2078 
2079         arp_fast_lock();
2080 #if RT_CACHE_DEBUG >= 1
2081         if (ARP_LOCKED())
2082                 printk("arp_req_set: impossible\n");
2083 #endif
2084 
2085         if (!(r->arp_flags & ATF_PUBL))
2086                 entry = arp_tables[HASH(si->sin_addr.s_addr)];
2087         else
2088                 entry = arp_proxy_list;
2089 
2090         for ( ; entry ;entry = entry->next)
2091         {
2092                 if (entry->ip == si->sin_addr.s_addr 
2093                     && (!dev || entry->dev == dev)
2094                     && (!(r->arp_flags&ATF_NETMASK) || entry->mask == mask))
2095                 {
2096                         memcpy(r->arp_ha.sa_data, entry->ha, entry->dev->addr_len);
2097                         r->arp_ha.sa_family = entry->dev->type;
2098                         r->arp_flags = entry->flags;
2099                         strncpy(r->arp_dev, entry->dev->name, sizeof(r->arp_dev));
2100                         arp_unlock();
2101                         return 0;
2102                 }
2103         }
2104 
2105         arp_unlock();
2106         return -ENXIO;
2107 }
2108 
2109 static int arp_req_delete(struct arpreq *r, struct device * dev)
     /* [previous][next][first][last][top][bottom][index][help] */
2110 {
2111         struct sockaddr_in      *si;
2112         struct arp_table        *entry, **entryp;
2113         int     retval = -ENXIO;
2114         u32     mask = DEF_ARP_NETMASK;
2115 
2116         if (r->arp_flags&ATF_NETMASK)
2117         {
2118                 si = (struct sockaddr_in *) &r->arp_netmask;
2119                 mask = si->sin_addr.s_addr;
2120         }
2121 
2122         si = (struct sockaddr_in *) &r->arp_pa;
2123 
2124         arp_fast_lock();
2125 #if RT_CACHE_DEBUG >= 1
2126         if (ARP_LOCKED())
2127                 printk("arp_req_delete: impossible\n");
2128 #endif
2129 
2130         if (!(r->arp_flags & ATF_PUBL))
2131                 entryp = &arp_tables[HASH(si->sin_addr.s_addr)];
2132         else
2133                 entryp = &arp_proxy_list;
2134 
2135         while ((entry = *entryp) != NULL)
2136         {
2137                 if (entry->ip == si->sin_addr.s_addr 
2138                     && (!dev || entry->dev == dev)
2139                     && (!(r->arp_flags&ATF_NETMASK) || entry->mask == mask))
2140                 {
2141                         *entryp = entry->next;
2142                         arp_free_entry(entry);
2143                         retval = 0;
2144                         continue;
2145                 }
2146                 entryp = &entry->next;
2147         }
2148 
2149         arp_unlock();
2150         return retval;
2151 }
2152 
2153 /*
2154  *      Handle an ARP layer I/O control request.
2155  */
2156 
2157 int arp_ioctl(unsigned int cmd, void *arg)
     /* [previous][next][first][last][top][bottom][index][help] */
2158 {
2159         int err;
2160         struct arpreq r;
2161 
2162         struct device * dev = NULL;
2163 
2164         switch(cmd)
2165         {
2166                 case SIOCDARP:
2167                 case SIOCSARP:
2168                         if (!suser())
2169                                 return -EPERM;
2170                 case SIOCGARP:
2171                         err = verify_area(VERIFY_READ, arg, sizeof(struct arpreq));
2172                         if (err)
2173                                 return err;
2174                         memcpy_fromfs(&r, arg, sizeof(struct arpreq));
2175                         break;
2176                 case OLD_SIOCDARP:
2177                 case OLD_SIOCSARP:
2178                         if (!suser())
2179                                 return -EPERM;
2180                 case OLD_SIOCGARP:
2181                         err = verify_area(VERIFY_READ, arg, sizeof(struct arpreq_old));
2182                         if (err)
2183                                 return err;
2184                         memcpy_fromfs(&r, arg, sizeof(struct arpreq_old));
2185                         memset(&r.arp_dev, 0, sizeof(r.arp_dev));
2186                         break;
2187                 default:
2188                         return -EINVAL;
2189         }
2190 
2191         if (r.arp_pa.sa_family != AF_INET)
2192                 return -EPFNOSUPPORT;
2193 
2194         if (!(r.arp_flags & ATF_PUBL))
2195                 r.arp_flags &= ~ATF_NETMASK;
2196         if (!(r.arp_flags & ATF_NETMASK))
2197                 ((struct sockaddr_in *)&r.arp_netmask)->sin_addr.s_addr=DEF_ARP_NETMASK;
2198 
2199         if (r.arp_dev[0])
2200         {
2201                 if ((dev = dev_get(r.arp_dev)) == NULL)
2202                         return -ENODEV;
2203 
2204                 if (!r.arp_ha.sa_family)
2205                         r.arp_ha.sa_family = dev->type;
2206                 else if (r.arp_ha.sa_family != dev->type)
2207                         return -EINVAL;
2208         }
2209 
2210         switch(cmd)
2211         {
2212                 case SIOCDARP:
2213                         return arp_req_delete(&r, dev);
2214                 case SIOCSARP:
2215                         return arp_req_set(&r, dev);
2216                 case OLD_SIOCDARP:
2217                         /* old  SIOCDARP destroys both
2218                          * normal and proxy mappings
2219                          */
2220                         r.arp_flags &= ~ATF_PUBL;
2221                         err = arp_req_delete(&r, dev);
2222                         r.arp_flags |= ATF_PUBL;
2223                         if (!err)
2224                                 arp_req_delete(&r, dev);
2225                         else
2226                                 err = arp_req_delete(&r, dev);
2227                         return err;
2228                 case OLD_SIOCSARP:
2229                         err = arp_req_set(&r, dev);
2230                         /* old SIOCSARP works so funny,
2231                          * that its behaviour can be emulated
2232                          * only approximately 8).
2233                          * It should work. --ANK
2234                          */
2235                         if (r.arp_flags & ATF_PUBL)
2236                         {       
2237                                 r.arp_flags &= ~ATF_PUBL;
2238                                 arp_req_delete(&r, dev);
2239                         }
2240                         return err;
2241                 case SIOCGARP:
2242                         err = verify_area(VERIFY_WRITE, arg, sizeof(struct arpreq));
2243                         if (err)
2244                                 return err;
2245                         err = arp_req_get(&r, dev);
2246                         if (!err)
2247                                 memcpy_tofs(arg, &r, sizeof(r));
2248                         return err;
2249                 case OLD_SIOCGARP:
2250                         err = verify_area(VERIFY_WRITE, arg, sizeof(struct arpreq_old));
2251                         if (err)
2252                                 return err;
2253                         r.arp_flags &= ~ATF_PUBL;
2254                         err = arp_req_get(&r, dev);
2255                         if (err < 0)
2256                         {
2257                                 r.arp_flags |= ATF_PUBL;
2258                                 err = arp_req_get(&r, dev);
2259                         }
2260                         if (!err)
2261                                 memcpy_tofs(arg, &r, sizeof(struct arpreq_old));
2262                         return err;
2263         }
2264         /*NOTREACHED*/
2265         return 0;
2266 }
2267 
2268 /*
2269  *      Write the contents of the ARP cache to a PROCfs file.
2270  */
2271 
2272 #define HBUFFERLEN 30
2273 
2274 int arp_get_info(char *buffer, char **start, off_t offset, int length, int dummy)
     /* [previous][next][first][last][top][bottom][index][help] */
2275 {
2276         int len=0;
2277         off_t pos=0;
2278         int size;
2279         struct arp_table *entry;
2280         char hbuffer[HBUFFERLEN];
2281         int i,j,k;
2282         const char hexbuf[] =  "0123456789ABCDEF";
2283 
2284         size = sprintf(buffer,"IP address       HW type     Flags       HW address            Mask     Device\n");
2285 
2286         pos+=size;
2287         len+=size;
2288 
2289         arp_fast_lock();
2290 
2291         for(i=0; i<FULL_ARP_TABLE_SIZE; i++)
2292         {
2293                 for(entry=arp_tables[i]; entry!=NULL; entry=entry->next)
2294                 {
2295 /*
2296  *      Convert hardware address to XX:XX:XX:XX ... form.
2297  */
2298 #ifdef CONFIG_AX25
2299 #ifdef CONFIG_NETROM
2300                         if (entry->dev->type == ARPHRD_AX25 || entry->dev->type == ARPHRD_NETROM)
2301                              strcpy(hbuffer,ax2asc((ax25_address *)entry->ha));
2302                         else {
2303 #else
2304                         if(entry->dev->type==ARPHRD_AX25)
2305                              strcpy(hbuffer,ax2asc((ax25_address *)entry->ha));
2306                         else {
2307 #endif
2308 #endif
2309 
2310                         for(k=0,j=0;k<HBUFFERLEN-3 && j<entry->dev->addr_len;j++)
2311                         {
2312                                 hbuffer[k++]=hexbuf[ (entry->ha[j]>>4)&15 ];
2313                                 hbuffer[k++]=hexbuf[  entry->ha[j]&15     ];
2314                                 hbuffer[k++]=':';
2315                         }
2316                         hbuffer[--k]=0;
2317         
2318 #ifdef CONFIG_AX25
2319                         }
2320 #endif
2321                         size = sprintf(buffer+len,
2322                                 "%-17s0x%-10x0x%-10x%s",
2323                                 in_ntoa(entry->ip),
2324                                 (unsigned int)entry->dev->type,
2325                                 entry->flags,
2326                                 hbuffer);
2327 #if RT_CACHE_DEBUG < 2
2328                         size += sprintf(buffer+len+size,
2329                                  "     %-17s %s\n",
2330                                  entry->mask==DEF_ARP_NETMASK ?
2331                                  "*" : in_ntoa(entry->mask), entry->dev->name);
2332 #else
2333                         size += sprintf(buffer+len+size,
2334                                  "     %-17s %s\t%d\t%1d\n",
2335                                  entry->mask==DEF_ARP_NETMASK ?
2336                                  "*" : in_ntoa(entry->mask), entry->dev->name, 
2337                                  entry->hh ? entry->hh->hh_refcnt : -1,
2338                                  entry->hh ? entry->hh->hh_uptodate : 0);
2339 #endif
2340         
2341                         len += size;
2342                         pos += size;
2343                   
2344                         if (pos <= offset)
2345                                 len=0;
2346                         if (pos >= offset+length)
2347                                 goto done;
2348                 }
2349         }
2350 done:
2351         arp_unlock();
2352   
2353         *start = buffer+len-(pos-offset);       /* Start of wanted data */
2354         len = pos-offset;                       /* Start slop */
2355         if (len>length)
2356                 len = length;                   /* Ending slop */
2357         return len;
2358 }
2359 
2360 
2361 
2362 /*
2363  *      Called once on startup.
2364  */
2365 
2366 static struct packet_type arp_packet_type =
2367 {
2368         0,      /* Should be: __constant_htons(ETH_P_ARP) - but this _doesn't_ come out constant! */
2369         NULL,           /* All devices */
2370         arp_rcv,
2371         NULL,
2372         NULL
2373 };
2374 
2375 static struct notifier_block arp_dev_notifier={
2376         arp_device_event,
2377         NULL,
2378         0
2379 };
2380 
2381 void arp_init (void)
     /* [previous][next][first][last][top][bottom][index][help] */
2382 {
2383         /* Register the packet type */
2384         arp_packet_type.type=htons(ETH_P_ARP);
2385         dev_add_pack(&arp_packet_type);
2386         /* Start with the regular checks for expired arp entries. */
2387         add_timer(&arp_timer);
2388         /* Register for device down reports */
2389         register_netdevice_notifier(&arp_dev_notifier);
2390 
2391 #ifdef CONFIG_PROC_FS
2392         proc_net_register(&(struct proc_dir_entry) {
2393                 PROC_NET_ARP, 3, "arp",
2394                 S_IFREG | S_IRUGO, 1, 0, 0,
2395                 0, &proc_net_inode_operations,
2396                 arp_get_info
2397         });
2398 #endif
2399 
2400 #ifdef CONFIG_ARPD
2401         netlink_attach(NETLINK_ARPD, arpd_callback);
2402 #endif
2403 }

/* [previous][next][first][last][top][bottom][index][help] */