1 /* linux/net/inet/arp.c
2 *
3 * Copyright (C) 1994 by Florian La Roche
4 *
5 * This module implements the Address Resolution Protocol ARP (RFC 826),
6 * which is used to convert IP addresses (or in the future maybe other
7 * high-level addresses into a low-level hardware address (like an Ethernet
8 * address).
9 *
10 * FIXME:
11 * Experiment with better retransmit timers
12 * Clean up the timer deletions
13 * If you create a proxy entry set your interface address to the address
14 * and then delete it, proxies may get out of sync with reality - check this
15 *
16 * This program is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License
18 * as published by the Free Software Foundation; either version
19 * 2 of the License, or (at your option) any later version.
20 *
21 *
22 * Fixes:
23 * Alan Cox : Removed the ethernet assumptions in Florian's code
24 * Alan Cox : Fixed some small errors in the ARP logic
25 * Alan Cox : Allow >4K in /proc
26 * Alan Cox : Make ARP add its own protocol entry
27 *
28 * Ross Martin : Rewrote arp_rcv() and arp_get_info()
29 * Stephen Henson : Add AX25 support to arp_get_info()
30 * Alan Cox : Drop data when a device is downed.
31 * Alan Cox : Use init_timer().
32 * Alan Cox : Double lock fixes.
33 * Martin Seine : Move the arphdr structure
34 * to if_arp.h for compatibility.
35 * with BSD based programs.
36 * Andrew Tridgell : Added ARP netmask code and
37 * re-arranged proxy handling.
38 * Alan Cox : Changed to use notifiers.
39 * Niibe Yutaka : Reply for this device or proxies only.
40 * Alan Cox : Don't proxy across hardware types!
41 * Jonathan Naylor : Added support for NET/ROM.
42 * Mike Shaver : RFC1122 checks.
43 * Jonathan Naylor : Only lookup the hardware address for
44 * the correct hardware type.
45 * Germano Caronni : Assorted subtle races.
46 * Craig Schlenter : Don't modify permanent entry
47 * during arp_rcv.
48 * Russ Nelson : Tidied up a few bits.
49 * Alexey Kuznetsov: Major changes to caching and behaviour,
50 * eg intelligent arp probing and generation
51 * of host down events.
52 * Alan Cox : Missing unlock in device events.
53 * Eckes : ARP ioctl control errors.
54 */
55
56 /* RFC1122 Status:
57 2.3.2.1 (ARP Cache Validation):
58 MUST provide mechanism to flush stale cache entries (OK)
59 SHOULD be able to configure cache timeout (NOT YET)
60 MUST throttle ARP retransmits (OK)
61 2.3.2.2 (ARP Packet Queue):
62 SHOULD save at least one packet from each "conversation" with an
63 unresolved IP address. (OK)
64 950727 -- MS
65 */
66
67 #include <linux/types.h>
68 #include <linux/string.h>
69 #include <linux/kernel.h>
70 #include <linux/sched.h>
71 #include <linux/config.h>
72 #include <linux/socket.h>
73 #include <linux/sockios.h>
74 #include <linux/errno.h>
75 #include <linux/if_arp.h>
76 #include <linux/in.h>
77 #include <linux/mm.h>
78 #include <linux/inet.h>
79 #include <linux/netdevice.h>
80 #include <linux/etherdevice.h>
81 #include <linux/trdevice.h>
82 #include <linux/skbuff.h>
83 #include <linux/proc_fs.h>
84 #include <linux/stat.h>
85
86 #include <net/ip.h>
87 #include <net/icmp.h>
88 #include <net/route.h>
89 #include <net/protocol.h>
90 #include <net/tcp.h>
91 #include <net/sock.h>
92 #include <net/arp.h>
93 #ifdef CONFIG_AX25
94 #include <net/ax25.h>
95 #ifdef CONFIG_NETROM
96 #include <net/netrom.h>
97 #endif
98 #endif
99
100 #include <asm/system.h>
101 #include <asm/segment.h>
102
103 #include <stdarg.h>
104
105 /*
106 * This structure defines the ARP mapping cache. As long as we make changes
107 * in this structure, we keep interrupts off. But normally we can copy the
108 * hardware address and the device pointer in a local variable and then
109 * make any "long calls" to send a packet out.
110 */
111
112 struct arp_table
113 {
114 struct arp_table *next; /* Linked entry list */
115 unsigned long last_used; /* For expiry */
116 unsigned long last_updated; /* For expiry */
117 unsigned int flags; /* Control status */
118 u32 ip; /* ip address of entry */
119 u32 mask; /* netmask - used for generalised proxy arps (tridge) */
120 unsigned char ha[MAX_ADDR_LEN]; /* Hardware address */
121 struct device *dev; /* Device the entry is tied to */
122
123 /*
124 * The following entries are only used for unresolved hw addresses.
125 */
126
127 struct timer_list timer; /* expire timer */
128 int retries; /* remaining retries */
129 struct sk_buff_head skb; /* list of queued packets */
130 struct hh_cache *hh;
131 };
132
133
134 /*
135 * Configurable Parameters (don't touch unless you know what you are doing
136 */
137
138 /*
139 * If an arp request is send, ARP_RES_TIME is the timeout value until the
140 * next request is send.
141 * RFC1122: OK. Throttles ARPing, as per 2.3.2.1. (MUST)
142 * The recommended minimum timeout is 1 second per destination.
143 * This timeout is prolongated to ARP_DEAD_RES_TIME, if
144 * destination does not respond.
145 */
146
147 #define ARP_RES_TIME (5*HZ)
148 #define ARP_DEAD_RES_TIME (60*HZ)
149
150 /*
151 * The number of times an arp request is send, until the host is
152 * considered temporarily unreachable.
153 */
154
155 #define ARP_MAX_TRIES 3
156
157 /*
158 * After that time, an unused entry is deleted from the arp table.
159 */
160
161 #define ARP_TIMEOUT (600*HZ)
162
163 /*
164 * How often is the function 'arp_check_retries' called.
165 * An unused entry is invalidated in the time between ARP_TIMEOUT and
166 * (ARP_TIMEOUT+ARP_CHECK_INTERVAL).
167 */
168
169 #define ARP_CHECK_INTERVAL (60*HZ)
170
171 /*
172 * The entry is reconfirmed by sending point-to-point ARP
173 * request after ARP_CONFIRM_INTERVAL. If destinations does not respond
174 * for ARP_CONFIRM_TIMEOUT, normal broadcast resolution scheme is started.
175 */
176
177 #define ARP_CONFIRM_INTERVAL (300*HZ)
178 #define ARP_CONFIRM_TIMEOUT ARP_RES_TIME
179
180 static unsigned long arp_lock;
181 static unsigned long arp_bh_mask;
182
183 #define ARP_BH_BACKLOG 1
184
185 static struct arp_table *arp_backlog;
186
187 static void arp_run_bh(void);
188 static void arp_check_expire (unsigned long);
189
190 static struct timer_list arp_timer =
191 { NULL, NULL, ARP_CHECK_INTERVAL, 0L, &arp_check_expire };
192
193 /*
194 * The default arp netmask is just 255.255.255.255 which means it's
195 * a single machine entry. Only proxy entries can have other netmasks
196 */
197
198 #define DEF_ARP_NETMASK (~0)
199
200 /*
201 * The size of the hash table. Must be a power of two.
202 * Maybe we should remove hashing in the future for arp and concentrate
203 * on Patrick Schaaf's Host-Cache-Lookup...
204 */
205
206 #define ARP_TABLE_SIZE 16
207 #define FULL_ARP_TABLE_SIZE (ARP_TABLE_SIZE+1)
208
209 struct arp_table *arp_tables[FULL_ARP_TABLE_SIZE] =
210 {
211 NULL,
212 };
213
214 #define arp_proxy_list arp_tables[ARP_TABLE_SIZE]
215
216 /*
217 * The last bits in the IP address are used for the cache lookup.
218 * A special entry is used for proxy arp entries
219 */
220
221 #define HASH(paddr) (htonl(paddr) & (ARP_TABLE_SIZE - 1))
222
223 /*
224 * Lock/unlock arp_table chains.
225 */
226
227 static __inline__ void arp_fast_lock(void)
/* ![[previous]](../icons/n_left.png)
![[next]](../icons/right.png)
![[first]](../icons/n_first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
228 {
229 ATOMIC_INCR(&arp_lock);
230 }
231
232 static __inline__ void arp_fast_unlock(void)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
233 {
234 ATOMIC_DECR(&arp_lock);
235 }
236
237 static __inline__ void arp_unlock(void)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
238 {
239 if (!ATOMIC_DECR_AND_CHECK(&arp_lock) && arp_bh_mask)
240 arp_run_bh();
241 }
242
243 /*
244 * Enqueue to FIFO list.
245 */
246
247 static void arp_enqueue(struct arp_table **q, struct arp_table *entry)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
248 {
249 unsigned long flags;
250 struct arp_table * tail;
251
252 save_flags(flags);
253 cli();
254 tail = *q;
255 if (!tail)
256 entry->next = entry;
257 else
258 {
259 entry->next = tail->next;
260 tail->next = entry;
261 }
262 *q = entry;
263 restore_flags(flags);
264 return;
265 }
266
267 /*
268 * Dequeue from FIFO list,
269 * caller should mask interrupts.
270 */
271
272 static struct arp_table * arp_dequeue(struct arp_table **q)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
273 {
274 struct arp_table * entry;
275
276 if (*q)
277 {
278 entry = (*q)->next;
279 (*q)->next = entry->next;
280 if (entry->next == entry)
281 *q = NULL;
282 entry->next = NULL;
283 return entry;
284 }
285 return NULL;
286 }
287
288 /*
289 * Purge all linked skb's of the entry.
290 */
291
292 static void arp_release_entry(struct arp_table *entry)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
293 {
294 struct sk_buff *skb;
295 unsigned long flags;
296
297 save_flags(flags);
298 cli();
299 /* Release the list of `skb' pointers. */
300 while ((skb = skb_dequeue(&entry->skb)) != NULL)
301 {
302 skb_device_lock(skb);
303 restore_flags(flags);
304 dev_kfree_skb(skb, FREE_WRITE);
305 cli();
306 }
307 restore_flags(flags);
308 return;
309 }
310
311 /*
312 * Release the entry and all resources linked to it: skb's, hh's, timer
313 * and certainly memory.
314 */
315
316 static void arp_free_entry(struct arp_table *entry)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
317 {
318 unsigned long flags;
319 struct hh_cache *hh, *next;
320
321 del_timer(&entry->timer);
322
323 save_flags(flags);
324 cli();
325 arp_release_entry(entry);
326
327 for (hh = entry->hh; hh; hh = next)
328 {
329 next = hh->hh_next;
330 hh->hh_arp = NULL;
331 if (!--hh->hh_refcnt)
332 kfree_s(hh, sizeof(struct(struct hh_cache)));
333 }
334 restore_flags(flags);
335
336 kfree_s(entry, sizeof(struct arp_table));
337 return;
338 }
339
340 /*
341 * How many users has this entry?
342 */
343
344 static __inline__ int arp_count_hhs(struct arp_table * entry)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
345 {
346 struct hh_cache *hh, **hhp;
347 int count = 0;
348
349 hhp = &entry->hh;
350 while ((hh=*hhp) != NULL)
351 {
352 if (hh->hh_refcnt == 1)
353 {
354 *hhp = hh->hh_next;
355 kfree_s(hh, sizeof(struct hh_cache));
356 continue;
357 }
358 count += hh->hh_refcnt-1;
359 hhp = &hh->hh_next;
360 }
361
362 return count;
363 }
364
365 /*
366 * Invalidate all hh's, so that higher level will not try to use it.
367 */
368
369 static __inline__ void arp_invalidate_hhs(struct arp_table * entry)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
370 {
371 struct hh_cache *hh;
372
373 for (hh=entry->hh; hh; hh=hh->hh_next)
374 hh->hh_uptodate = 0;
375 }
376
377 /*
378 * Signal to device layer, that hardware address may be changed.
379 */
380
381 static __inline__ void arp_update_hhs(struct arp_table * entry)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
382 {
383 struct hh_cache *hh;
384
385 for (hh=entry->hh; hh; hh=hh->hh_next)
386 entry->dev->header_cache_update(hh, entry->dev, entry->ha);
387 }
388
389 /*
390 * Check if there are too old entries and remove them. If the ATF_PERM
391 * flag is set, they are always left in the arp cache (permanent entry).
392 * If an entry was not be confirmed for ARP_CONFIRM_INTERVAL,
393 * declare it invalid and send point-to-point ARP request.
394 * If it will not be confirmed for ARP_CONFIRM_TIMEOUT,
395 * give it to shred by arp_expire_entry.
396 */
397
398 static void arp_check_expire(unsigned long dummy)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
399 {
400 int i;
401 unsigned long now = jiffies;
402
403 del_timer(&arp_timer);
404
405 if (!arp_lock)
406 {
407 arp_fast_lock();
408
409 for (i = 0; i < ARP_TABLE_SIZE; i++)
410 {
411 struct arp_table *entry;
412 struct arp_table **pentry;
413
414 pentry = &arp_tables[i];
415
416 while ((entry = *pentry) != NULL)
417 {
418 cli();
419 if (now - entry->last_used > ARP_TIMEOUT
420 && !(entry->flags & ATF_PERM)
421 && !arp_count_hhs(entry))
422 {
423 *pentry = entry->next;
424 sti();
425 #if RT_CACHE_DEBUG >= 2
426 printk("arp_expire: %08x expired\n", entry->ip);
427 #endif
428 arp_free_entry(entry);
429 }
430 else if (entry->last_updated
431 && now - entry->last_updated > ARP_CONFIRM_INTERVAL
432 && !(entry->flags & ATF_PERM))
433 {
434 struct device * dev = entry->dev;
435 pentry = &entry->next;
436 entry->flags &= ~ATF_COM;
437 arp_invalidate_hhs(entry);
438 sti();
439 entry->retries = ARP_MAX_TRIES+1;
440 del_timer(&entry->timer);
441 entry->timer.expires = jiffies + ARP_CONFIRM_TIMEOUT;
442 add_timer(&entry->timer);
443 arp_send(ARPOP_REQUEST, ETH_P_ARP, entry->ip,
444 dev, dev->pa_addr, entry->ha,
445 dev->dev_addr, NULL);
446 #if RT_CACHE_DEBUG >= 2
447 printk("arp_expire: %08x requires confirmation\n", entry->ip);
448 #endif
449 }
450 else
451 pentry = &entry->next; /* go to next entry */
452 }
453 }
454 arp_unlock();
455 }
456
457 ip_rt_check_expire();
458
459 /*
460 * Set the timer again.
461 */
462
463 arp_timer.expires = jiffies + ARP_CHECK_INTERVAL;
464 add_timer(&arp_timer);
465 }
466
467 /*
468 * This function is called, if an entry is not resolved in ARP_RES_TIME.
469 * When more than MAX_ARP_TRIES retries was done, release queued skb's,
470 * but not discard entry itself if it is in use.
471 */
472
473 static void arp_expire_request (unsigned long arg)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
474 {
475 struct arp_table *entry = (struct arp_table *) arg;
476 struct arp_table **pentry;
477 unsigned long hash;
478 unsigned long flags;
479
480 save_flags(flags);
481 cli();
482
483 /*
484 * Since all timeouts are handled with interrupts enabled, there is a
485 * small chance, that this entry has just been resolved by an incoming
486 * packet. This is the only race condition, but it is handled...
487 */
488
489 if (entry->flags & ATF_COM)
490 {
491 restore_flags(flags);
492 return;
493 }
494
495 if (arp_lock)
496 {
497 #if RT_CACHE_DEBUG >= 1
498 printk("arp_expire_request: %08x postponed\n", entry->ip);
499 #endif
500 del_timer(&entry->timer);
501 entry->timer.expires = jiffies + HZ/10;
502 add_timer(&entry->timer);
503 restore_flags(flags);
504 return;
505 }
506
507 arp_fast_lock();
508 restore_flags(flags);
509
510 if (entry->last_updated && --entry->retries > 0)
511 {
512 struct device *dev = entry->dev;
513
514 #if RT_CACHE_DEBUG >= 2
515 printk("arp_expire_request: %08x timed out\n", entry->ip);
516 #endif
517 /* Set new timer. */
518 del_timer(&entry->timer);
519 entry->timer.expires = jiffies + ARP_RES_TIME;
520 add_timer(&entry->timer);
521 arp_send(ARPOP_REQUEST, ETH_P_ARP, entry->ip, dev, dev->pa_addr,
522 NULL, dev->dev_addr, NULL);
523 arp_unlock();
524 return;
525 }
526
527 arp_release_entry(entry);
528
529 cli();
530 if (arp_count_hhs(entry))
531 {
532 struct device *dev = entry->dev;
533 #if RT_CACHE_DEBUG >= 2
534 printk("arp_expire_request: %08x is dead\n", entry->ip);
535 #endif
536 arp_release_entry(entry);
537 entry->retries = ARP_MAX_TRIES;
538 restore_flags(flags);
539 entry->last_updated = 0;
540 del_timer(&entry->timer);
541 entry->timer.expires = jiffies + ARP_DEAD_RES_TIME;
542 add_timer(&entry->timer);
543 arp_send(ARPOP_REQUEST, ETH_P_ARP, entry->ip, dev, dev->pa_addr,
544 NULL, dev->dev_addr, NULL);
545 arp_unlock();
546 return;
547 }
548 restore_flags(flags);
549
550 hash = HASH(entry->ip);
551
552 pentry = &arp_tables[hash];
553
554 while (*pentry != NULL)
555 {
556 if (*pentry == entry)
557 {
558 cli();
559 *pentry = entry->next;
560 restore_flags(flags);
561 #if RT_CACHE_DEBUG >= 2
562 printk("arp_expire_request: %08x is killed\n", entry->ip);
563 #endif
564 arp_free_entry(entry);
565 arp_unlock();
566 return;
567 }
568 pentry = &(*pentry)->next;
569 }
570 printk("arp_expire_request: bug: ARP entry is lost!\n");
571 arp_unlock();
572 }
573
574 /*
575 * Purge a device from the ARP queue
576 */
577
578 int arp_device_event(struct notifier_block *this, unsigned long event, void *ptr)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
579 {
580 struct device *dev=ptr;
581 int i;
582
583 if (event != NETDEV_DOWN)
584 return NOTIFY_DONE;
585 /*
586 * This is a bit OTT - maybe we need some arp semaphores instead.
587 */
588
589 #if RT_CACHE_DEBUG >= 1
590 if (arp_lock)
591 printk("arp_device_event: bug\n");
592 #endif
593 arp_fast_lock();
594
595 for (i = 0; i < FULL_ARP_TABLE_SIZE; i++)
596 {
597 struct arp_table *entry;
598 struct arp_table **pentry = &arp_tables[i];
599
600 while ((entry = *pentry) != NULL)
601 {
602 if (entry->dev == dev)
603 {
604 *pentry = entry->next; /* remove from list */
605 arp_free_entry(entry);
606 }
607 else
608 pentry = &entry->next; /* go to next entry */
609 }
610 }
611 arp_unlock();
612 return NOTIFY_DONE;
613 }
614
615
616 /*
617 * Create and send an arp packet. If (dest_hw == NULL), we create a broadcast
618 * message.
619 */
620
621 void arp_send(int type, int ptype, u32 dest_ip,
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
622 struct device *dev, u32 src_ip,
623 unsigned char *dest_hw, unsigned char *src_hw,
624 unsigned char *target_hw)
625 {
626 struct sk_buff *skb;
627 struct arphdr *arp;
628 unsigned char *arp_ptr;
629
630 /*
631 * No arp on this interface.
632 */
633
634 if (dev->flags&IFF_NOARP)
635 return;
636
637 /*
638 * Allocate a buffer
639 */
640
641 skb = alloc_skb(sizeof(struct arphdr)+ 2*(dev->addr_len+4)
642 + dev->hard_header_len, GFP_ATOMIC);
643 if (skb == NULL)
644 {
645 printk("ARP: no memory to send an arp packet\n");
646 return;
647 }
648 skb_reserve(skb, dev->hard_header_len);
649 arp = (struct arphdr *) skb_put(skb,sizeof(struct arphdr) + 2*(dev->addr_len+4));
650 skb->arp = 1;
651 skb->dev = dev;
652 skb->free = 1;
653 skb->protocol = htons (ETH_P_IP);
654
655 /*
656 * Fill the device header for the ARP frame
657 */
658
659 dev->hard_header(skb,dev,ptype,dest_hw?dest_hw:dev->broadcast,src_hw?src_hw:NULL,skb->len);
660
661 /* Fill out the arp protocol part. */
662 arp->ar_hrd = htons(dev->type);
663 #ifdef CONFIG_AX25
664 #ifdef CONFIG_NETROM
665 arp->ar_pro = (dev->type == ARPHRD_AX25 || dev->type == ARPHRD_NETROM) ? htons(AX25_P_IP) : htons(ETH_P_IP);
666 #else
667 arp->ar_pro = (dev->type != ARPHRD_AX25) ? htons(ETH_P_IP) : htons(AX25_P_IP);
668 #endif
669 #else
670 arp->ar_pro = htons(ETH_P_IP);
671 #endif
672 arp->ar_hln = dev->addr_len;
673 arp->ar_pln = 4;
674 arp->ar_op = htons(type);
675
676 arp_ptr=(unsigned char *)(arp+1);
677
678 memcpy(arp_ptr, src_hw, dev->addr_len);
679 arp_ptr+=dev->addr_len;
680 memcpy(arp_ptr, &src_ip,4);
681 arp_ptr+=4;
682 if (target_hw != NULL)
683 memcpy(arp_ptr, target_hw, dev->addr_len);
684 else
685 memset(arp_ptr, 0, dev->addr_len);
686 arp_ptr+=dev->addr_len;
687 memcpy(arp_ptr, &dest_ip, 4);
688
689 dev_queue_xmit(skb, dev, 0);
690 }
691
692 /*
693 * This will try to retransmit everything on the queue.
694 */
695
696 static void arp_send_q(struct arp_table *entry)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
697 {
698 struct sk_buff *skb;
699
700 unsigned long flags;
701
702 /*
703 * Empty the entire queue, building its data up ready to send
704 */
705
706 if(!(entry->flags&ATF_COM))
707 {
708 printk("arp_send_q: incomplete entry for %s\n",
709 in_ntoa(entry->ip));
710 /* Can't flush the skb, because RFC1122 says to hang on to */
711 /* at least one from any unresolved entry. --MS */
712 /* Whats happened is that someone has 'unresolved' the entry
713 as we got to use it - this 'can't happen' -- AC */
714 return;
715 }
716
717 save_flags(flags);
718
719 cli();
720 while((skb = skb_dequeue(&entry->skb)) != NULL)
721 {
722 IS_SKB(skb);
723 skb_device_lock(skb);
724 restore_flags(flags);
725 if(!skb->dev->rebuild_header(skb->data,skb->dev,skb->raddr,skb))
726 {
727 skb->arp = 1;
728 if(skb->sk==NULL)
729 dev_queue_xmit(skb, skb->dev, 0);
730 else
731 dev_queue_xmit(skb,skb->dev,skb->sk->priority);
732 }
733 }
734 restore_flags(flags);
735 }
736
737
738 /*
739 * Delete an ARP mapping entry in the cache.
740 */
741
742 static void arp_destroy(struct arp_table * entry)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
743 {
744 struct arp_table *entry1;
745 struct arp_table **pentry;
746
747 if (entry->flags & ATF_PUBL)
748 pentry = &arp_proxy_list;
749 else
750 pentry = &arp_tables[HASH(entry->ip)];
751
752 while ((entry1 = *pentry) != NULL)
753 {
754 if (entry1 == entry)
755 {
756 *pentry = entry1->next;
757 del_timer(&entry->timer);
758 arp_free_entry(entry);
759 return;
760 }
761 pentry = &entry1->next;
762 }
763 }
764
765 /*
766 * Receive an arp request by the device layer. Maybe I rewrite it, to
767 * use the incoming packet for the reply. The time for the current
768 * "overhead" isn't that high...
769 */
770
771 int arp_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *pt)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
772 {
773 /*
774 * We shouldn't use this type conversion. Check later.
775 */
776
777 struct arphdr *arp = (struct arphdr *)skb->h.raw;
778 unsigned char *arp_ptr= (unsigned char *)(arp+1);
779 struct arp_table *entry;
780 struct arp_table *proxy_entry;
781 unsigned long hash;
782 unsigned char ha[MAX_ADDR_LEN]; /* So we can enable ints again. */
783 unsigned char *sha,*tha;
784 u32 sip,tip;
785
786 /*
787 * The hardware length of the packet should match the hardware length
788 * of the device. Similarly, the hardware types should match. The
789 * device should be ARP-able. Also, if pln is not 4, then the lookup
790 * is not from an IP number. We can't currently handle this, so toss
791 * it.
792 */
793 if (arp->ar_hln != dev->addr_len ||
794 dev->type != ntohs(arp->ar_hrd) ||
795 dev->flags & IFF_NOARP ||
796 arp->ar_pln != 4)
797 {
798 kfree_skb(skb, FREE_READ);
799 return 0;
800 /* Should this be an error/printk? Seems like something */
801 /* you'd want to know about. Unless it's just !IFF_NOARP. -- MS */
802 }
803
804 /*
805 * Another test.
806 * The logic here is that the protocol being looked up by arp should
807 * match the protocol the device speaks. If it doesn't, there is a
808 * problem, so toss the packet.
809 */
810 /* Again, should this be an error/printk? -- MS */
811
812 switch (dev->type)
813 {
814 #ifdef CONFIG_AX25
815 case ARPHRD_AX25:
816 if(arp->ar_pro != htons(AX25_P_IP))
817 {
818 kfree_skb(skb, FREE_READ);
819 return 0;
820 }
821 break;
822 #endif
823 #ifdef CONFIG_NETROM
824 case ARPHRD_NETROM:
825 if(arp->ar_pro != htons(AX25_P_IP))
826 {
827 kfree_skb(skb, FREE_READ);
828 return 0;
829 }
830 break;
831 #endif
832 case ARPHRD_ETHER:
833 case ARPHRD_ARCNET:
834 if(arp->ar_pro != htons(ETH_P_IP))
835 {
836 kfree_skb(skb, FREE_READ);
837 return 0;
838 }
839 break;
840
841 case ARPHRD_IEEE802:
842 if(arp->ar_pro != htons(ETH_P_IP))
843 {
844 kfree_skb(skb, FREE_READ);
845 return 0;
846 }
847 break;
848
849 default:
850 printk("ARP: dev->type mangled!\n");
851 kfree_skb(skb, FREE_READ);
852 return 0;
853 }
854
855 /*
856 * Extract fields
857 */
858
859 sha=arp_ptr;
860 arp_ptr += dev->addr_len;
861 memcpy(&sip, arp_ptr, 4);
862 arp_ptr += 4;
863 tha=arp_ptr;
864 arp_ptr += dev->addr_len;
865 memcpy(&tip, arp_ptr, 4);
866
867 /*
868 * Check for bad requests for 127.x.x.x and requests for multicast
869 * addresses. If this is one such, delete it.
870 */
871 if (LOOPBACK(tip) || MULTICAST(tip))
872 {
873 kfree_skb(skb, FREE_READ);
874 return 0;
875 }
876
877 /*
878 * Process entry. The idea here is we want to send a reply if it is a
879 * request for us or if it is a request for someone else that we hold
880 * a proxy for. We want to add an entry to our cache if it is a reply
881 * to us or if it is a request for our address.
882 * (The assumption for this last is that if someone is requesting our
883 * address, they are probably intending to talk to us, so it saves time
884 * if we cache their address. Their address is also probably not in
885 * our cache, since ours is not in their cache.)
886 *
887 * Putting this another way, we only care about replies if they are to
888 * us, in which case we add them to the cache. For requests, we care
889 * about those for us and those for our proxies. We reply to both,
890 * and in the case of requests for us we add the requester to the arp
891 * cache.
892 */
893
894 if (arp->ar_op == htons(ARPOP_REQUEST))
895 {
896 /*
897 * Only reply for the real device address or when it's in our proxy tables
898 */
899 if (tip != dev->pa_addr)
900 {
901 /*
902 * To get in here, it is a request for someone else. We need to
903 * check if that someone else is one of our proxies. If it isn't,
904 * we can toss it.
905 */
906 arp_fast_lock();
907
908 for (proxy_entry=arp_proxy_list;
909 proxy_entry;
910 proxy_entry = proxy_entry->next)
911 {
912 /* we will respond to a proxy arp request
913 if the masked arp table ip matches the masked
914 tip. This allows a single proxy arp table
915 entry to be used on a gateway machine to handle
916 all requests for a whole network, rather than
917 having to use a huge number of proxy arp entries
918 and having to keep them uptodate.
919 */
920 if (proxy_entry->dev == dev &&
921 !((proxy_entry->ip^tip)&proxy_entry->mask))
922 break;
923
924 }
925 if (proxy_entry)
926 {
927 memcpy(ha, proxy_entry->ha, dev->addr_len);
928 arp_unlock();
929 arp_send(ARPOP_REPLY,ETH_P_ARP,sip,dev,tip,sha,ha, sha);
930 kfree_skb(skb, FREE_READ);
931 return 0;
932 }
933 else
934 {
935 arp_unlock();
936 kfree_skb(skb, FREE_READ);
937 return 0;
938 }
939 }
940 else
941 {
942 /*
943 * To get here, it must be an arp request for us. We need to reply.
944 */
945 arp_send(ARPOP_REPLY,ETH_P_ARP,sip,dev,tip,sha,dev->dev_addr,sha);
946 }
947 }
948 /*
949 * It is now an arp reply.
950 */
951 if(ip_chk_addr(tip)!=IS_MYADDR)
952 {
953 /*
954 * Replies to other machines get tossed.
955 */
956 kfree_skb(skb, FREE_READ);
957 return 0;
958 }
959 /*
960 * Now all replies are handled. Next, anything that falls through to here
961 * needs to be added to the arp cache, or have its entry updated if it is
962 * there.
963 */
964
965 arp_fast_lock();
966
967 hash = HASH(sip);
968
969 for (entry=arp_tables[hash]; entry; entry=entry->next)
970 if (entry->ip == sip && entry->dev == dev)
971 break;
972
973 if (entry)
974 {
975 /*
976 * Entry found; update it only if it is not a permanent entry.
977 */
978 if (!(entry->flags & ATF_PERM)) {
979 memcpy(entry->ha, sha, dev->addr_len);
980 entry->last_updated = jiffies;
981 }
982 if (!(entry->flags & ATF_COM))
983 {
984 /*
985 * This entry was incomplete. Delete the retransmit timer
986 * and switch to complete status.
987 */
988 del_timer(&entry->timer);
989 entry->flags |= ATF_COM;
990 arp_update_hhs(entry);
991 /*
992 * Send out waiting packets. We might have problems, if someone is
993 * manually removing entries right now -- entry might become invalid
994 * underneath us.
995 */
996 arp_send_q(entry);
997 }
998 }
999 else
1000 {
1001 /*
1002 * No entry found. Need to add a new entry to the arp table.
1003 */
1004 entry = (struct arp_table *)kmalloc(sizeof(struct arp_table),GFP_ATOMIC);
1005 if(entry == NULL)
1006 {
1007 arp_unlock();
1008 printk("ARP: no memory for new arp entry\n");
1009 kfree_skb(skb, FREE_READ);
1010 return 0;
1011 }
1012
1013 entry->mask = DEF_ARP_NETMASK;
1014 entry->ip = sip;
1015 entry->flags = ATF_COM;
1016 entry->hh = NULL;
1017 init_timer(&entry->timer);
1018 entry->timer.function = arp_expire_request;
1019 entry->timer.data = (unsigned long)entry;
1020 memcpy(entry->ha, sha, dev->addr_len);
1021 entry->last_updated = entry->last_used = jiffies;
1022 entry->dev = skb->dev;
1023 skb_queue_head_init(&entry->skb);
1024 if (arp_lock == 1)
1025 {
1026 entry->next = arp_tables[hash];
1027 arp_tables[hash] = entry;
1028 }
1029 else
1030 {
1031 #if RT_CACHE_DEBUG >= 1
1032 printk("arp_rcv: %08x backlogged\n", entry->ip);
1033 #endif
1034 arp_enqueue(&arp_backlog, entry);
1035 arp_bh_mask |= ARP_BH_BACKLOG;
1036 }
1037 }
1038
1039 /*
1040 * Replies have been sent, and entries have been added. All done.
1041 */
1042 kfree_skb(skb, FREE_READ);
1043 arp_unlock();
1044 return 0;
1045 }
1046
1047 /*
1048 * Lookup ARP entry by (addr, dev) pair.
1049 * Flags: ATF_PUBL - search for proxy entries
1050 * ATF_NETMASK - search for proxy network entry.
1051 * NOTE: should be called with locked ARP tables.
1052 */
1053
1054 static struct arp_table *arp_lookup(u32 paddr, unsigned short flags, struct device * dev)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
1055 {
1056 struct arp_table *entry;
1057
1058 if (!(flags & ATF_PUBL))
1059 {
1060 for (entry = arp_tables[HASH(paddr)];
1061 entry != NULL; entry = entry->next)
1062 if (entry->ip == paddr && (!dev || entry->dev == dev))
1063 break;
1064 return entry;
1065 }
1066
1067 if (!(flags & ATF_NETMASK))
1068 {
1069 for (entry = arp_proxy_list;
1070 entry != NULL; entry = entry->next)
1071 if (entry->ip == paddr && (!dev || entry->dev == dev))
1072 break;
1073 return entry;
1074 }
1075
1076 for (entry=arp_proxy_list; entry != NULL; entry = entry->next)
1077 if (!((entry->ip^paddr)&entry->mask) &&
1078 (!dev || entry->dev == dev))
1079 break;
1080 return entry;
1081 }
1082
1083 /*
1084 * Find an arp mapping in the cache. If not found, return false.
1085 */
1086
1087 int arp_query(unsigned char *haddr, u32 paddr, struct device * dev)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
1088 {
1089 struct arp_table *entry;
1090
1091 arp_fast_lock();
1092
1093 entry = arp_lookup(paddr, 0, dev);
1094
1095 if (entry != NULL)
1096 {
1097 entry->last_used = jiffies;
1098 if (entry->flags & ATF_COM)
1099 {
1100 memcpy(haddr, entry->ha, dev->addr_len);
1101 arp_unlock();
1102 return 1;
1103 }
1104 }
1105 arp_unlock();
1106 return 0;
1107 }
1108
1109
1110 static int arp_set_predefined(int addr_hint, unsigned char * haddr, __u32 paddr, struct device * dev)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
1111 {
1112 switch (addr_hint)
1113 {
1114 case IS_MYADDR:
1115 printk("ARP: arp called for own IP address\n");
1116 memcpy(haddr, dev->dev_addr, dev->addr_len);
1117 return 1;
1118 #ifdef CONFIG_IP_MULTICAST
1119 case IS_MULTICAST:
1120 if(dev->type==ARPHRD_ETHER || dev->type==ARPHRD_IEEE802)
1121 {
1122 u32 taddr;
1123 haddr[0]=0x01;
1124 haddr[1]=0x00;
1125 haddr[2]=0x5e;
1126 taddr=ntohl(paddr);
1127 haddr[5]=taddr&0xff;
1128 taddr=taddr>>8;
1129 haddr[4]=taddr&0xff;
1130 taddr=taddr>>8;
1131 haddr[3]=taddr&0x7f;
1132 return 1;
1133 }
1134 /*
1135 * If a device does not support multicast broadcast the stuff (eg AX.25 for now)
1136 */
1137 #endif
1138
1139 case IS_BROADCAST:
1140 memcpy(haddr, dev->broadcast, dev->addr_len);
1141 return 1;
1142 }
1143 return 0;
1144 }
1145
1146 /*
1147 * Find an arp mapping in the cache. If not found, post a request.
1148 */
1149
1150 int arp_find(unsigned char *haddr, u32 paddr, struct device *dev,
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
1151 u32 saddr, struct sk_buff *skb)
1152 {
1153 struct arp_table *entry;
1154 unsigned long hash;
1155
1156 if (arp_set_predefined(ip_chk_addr(paddr), haddr, paddr, dev))
1157 {
1158 if (skb)
1159 skb->arp = 1;
1160 return 0;
1161 }
1162
1163 hash = HASH(paddr);
1164 arp_fast_lock();
1165
1166 /*
1167 * Find an entry
1168 */
1169 entry = arp_lookup(paddr, 0, dev);
1170
1171 if (entry != NULL) /* It exists */
1172 {
1173 if (!(entry->flags & ATF_COM))
1174 {
1175 /*
1176 * A request was already send, but no reply yet. Thus
1177 * queue the packet with the previous attempt
1178 */
1179
1180 if (skb != NULL)
1181 {
1182 if (entry->last_updated)
1183 {
1184 skb_queue_tail(&entry->skb, skb);
1185 skb_device_unlock(skb);
1186 }
1187 /*
1188 * If last_updated==0 host is dead, so
1189 * drop skb's and set socket error.
1190 */
1191 else
1192 {
1193 #if 0
1194 /*
1195 * FIXME: ICMP HOST UNREACHABLE should be
1196 * sent in this situation. --ANK
1197 */
1198 if (skb->sk)
1199 {
1200 skb->sk->err = EHOSTDOWN;
1201 skb->sk->error_report(skb->sk);
1202 }
1203 #else
1204 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0, dev);
1205 #endif
1206 dev_kfree_skb(skb, FREE_WRITE);
1207 }
1208 }
1209 arp_unlock();
1210 return 1;
1211 }
1212
1213 /*
1214 * Update the record
1215 */
1216
1217 entry->last_used = jiffies;
1218 memcpy(haddr, entry->ha, dev->addr_len);
1219 if (skb)
1220 skb->arp = 1;
1221 arp_unlock();
1222 return 0;
1223 }
1224
1225 /*
1226 * Create a new unresolved entry.
1227 */
1228
1229 entry = (struct arp_table *) kmalloc(sizeof(struct arp_table),
1230 GFP_ATOMIC);
1231 if (entry != NULL)
1232 {
1233 entry->last_updated = entry->last_used = jiffies;
1234 entry->flags = 0;
1235 entry->ip = paddr;
1236 entry->mask = DEF_ARP_NETMASK;
1237 memset(entry->ha, 0, dev->addr_len);
1238 entry->dev = dev;
1239 entry->hh = NULL;
1240 init_timer(&entry->timer);
1241 entry->timer.function = arp_expire_request;
1242 entry->timer.data = (unsigned long)entry;
1243 entry->timer.expires = jiffies + ARP_RES_TIME;
1244 skb_queue_head_init(&entry->skb);
1245 if (skb != NULL)
1246 {
1247 skb_queue_tail(&entry->skb, skb);
1248 skb_device_unlock(skb);
1249 }
1250 if (arp_lock == 1)
1251 {
1252 entry->next = arp_tables[hash];
1253 arp_tables[hash] = entry;
1254 add_timer(&entry->timer);
1255 entry->retries = ARP_MAX_TRIES;
1256 }
1257 else
1258 {
1259 #if RT_CACHE_DEBUG >= 1
1260 printk("arp_find: %08x backlogged\n", entry->ip);
1261 #endif
1262 arp_enqueue(&arp_backlog, entry);
1263 arp_bh_mask |= ARP_BH_BACKLOG;
1264 }
1265 }
1266 else if (skb != NULL)
1267 dev_kfree_skb(skb, FREE_WRITE);
1268 arp_unlock();
1269
1270 /*
1271 * If we didn't find an entry, we will try to send an ARP packet.
1272 */
1273
1274 arp_send(ARPOP_REQUEST, ETH_P_ARP, paddr, dev, saddr, NULL,
1275 dev->dev_addr, NULL);
1276
1277 return 1;
1278 }
1279
1280
1281 /*
1282 * Write the contents of the ARP cache to a PROCfs file.
1283 */
1284
1285 #define HBUFFERLEN 30
1286
1287 int arp_get_info(char *buffer, char **start, off_t offset, int length, int dummy)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
1288 {
1289 int len=0;
1290 off_t pos=0;
1291 int size;
1292 struct arp_table *entry;
1293 char hbuffer[HBUFFERLEN];
1294 int i,j,k;
1295 const char hexbuf[] = "0123456789ABCDEF";
1296
1297 size = sprintf(buffer,"IP address HW type Flags HW address Mask Device\n");
1298
1299 pos+=size;
1300 len+=size;
1301
1302 arp_fast_lock();
1303
1304 for(i=0; i<FULL_ARP_TABLE_SIZE; i++)
1305 {
1306 for(entry=arp_tables[i]; entry!=NULL; entry=entry->next)
1307 {
1308 /*
1309 * Convert hardware address to XX:XX:XX:XX ... form.
1310 */
1311 #ifdef CONFIG_AX25
1312 #ifdef CONFIG_NETROM
1313 if (entry->dev->type == ARPHRD_AX25 || entry->dev->type == ARPHRD_NETROM)
1314 strcpy(hbuffer,ax2asc((ax25_address *)entry->ha));
1315 else {
1316 #else
1317 if(entry->dev->type==ARPHRD_AX25)
1318 strcpy(hbuffer,ax2asc((ax25_address *)entry->ha));
1319 else {
1320 #endif
1321 #endif
1322
1323 for(k=0,j=0;k<HBUFFERLEN-3 && j<entry->dev->addr_len;j++)
1324 {
1325 hbuffer[k++]=hexbuf[ (entry->ha[j]>>4)&15 ];
1326 hbuffer[k++]=hexbuf[ entry->ha[j]&15 ];
1327 hbuffer[k++]=':';
1328 }
1329 hbuffer[--k]=0;
1330
1331 #ifdef CONFIG_AX25
1332 }
1333 #endif
1334 size = sprintf(buffer+len,
1335 "%-17s0x%-10x0x%-10x%s",
1336 in_ntoa(entry->ip),
1337 (unsigned int)entry->dev->type,
1338 entry->flags,
1339 hbuffer);
1340 #if RT_CACHE_DEBUG < 2
1341 size += sprintf(buffer+len+size,
1342 " %-17s %s\n",
1343 entry->mask==DEF_ARP_NETMASK ?
1344 "*" : in_ntoa(entry->mask), entry->dev->name);
1345 #else
1346 size += sprintf(buffer+len+size,
1347 " %-17s %s\t%ld\t%1d\n",
1348 entry->mask==DEF_ARP_NETMASK ?
1349 "*" : in_ntoa(entry->mask), entry->dev->name,
1350 entry->hh ? entry->hh->hh_refcnt : -1,
1351 entry->hh ? entry->hh->hh_uptodate : 0);
1352 #endif
1353
1354 len += size;
1355 pos += size;
1356
1357 if (pos <= offset)
1358 len=0;
1359 if (pos >= offset+length)
1360 break;
1361 }
1362 }
1363 arp_unlock();
1364
1365 *start = buffer+len-(pos-offset); /* Start of wanted data */
1366 len = pos-offset; /* Start slop */
1367 if (len>length)
1368 len = length; /* Ending slop */
1369 return len;
1370 }
1371
1372
1373
1374 int arp_bind_cache(struct hh_cache ** hhp, struct device *dev, unsigned short htype, u32 paddr)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
1375 {
1376 struct arp_table *entry;
1377 struct hh_cache *hh = *hhp;
1378 int addr_hint;
1379 unsigned long flags;
1380
1381 if (hh)
1382 return 1;
1383
1384 if ((addr_hint = ip_chk_addr(paddr)) != 0)
1385 {
1386 unsigned char haddr[MAX_ADDR_LEN];
1387 if (hh)
1388 return 1;
1389 hh = kmalloc(sizeof(struct hh_cache), GFP_ATOMIC);
1390 if (!hh)
1391 return 1;
1392 arp_set_predefined(addr_hint, haddr, paddr, dev);
1393 hh->hh_uptodate = 0;
1394 hh->hh_refcnt = 1;
1395 hh->hh_arp = NULL;
1396 hh->hh_next = NULL;
1397 hh->hh_type = htype;
1398 *hhp = hh;
1399 dev->header_cache_update(hh, dev, haddr);
1400 return 0;
1401 }
1402
1403 save_flags(flags);
1404
1405 arp_fast_lock();
1406
1407 entry = arp_lookup(paddr, 0, dev);
1408
1409 if (entry)
1410 {
1411 cli();
1412 for (hh = entry->hh; hh; hh=hh->hh_next)
1413 if (hh->hh_type == htype)
1414 break;
1415 if (hh)
1416 {
1417 hh->hh_refcnt++;
1418 *hhp = hh;
1419 restore_flags(flags);
1420 arp_unlock();
1421 return 1;
1422 }
1423 restore_flags(flags);
1424 }
1425
1426 hh = kmalloc(sizeof(struct hh_cache), GFP_ATOMIC);
1427 if (!hh)
1428 {
1429 arp_unlock();
1430 return 1;
1431 }
1432
1433 hh->hh_uptodate = 0;
1434 hh->hh_refcnt = 1;
1435 hh->hh_arp = NULL;
1436 hh->hh_next = NULL;
1437 hh->hh_type = htype;
1438
1439 if (entry)
1440 {
1441 dev->header_cache_update(hh, dev, entry->ha);
1442 *hhp = hh;
1443 cli();
1444 hh->hh_arp = (void*)entry;
1445 entry->hh = hh;
1446 hh->hh_refcnt++;
1447 restore_flags(flags);
1448 entry->last_used = jiffies;
1449 arp_unlock();
1450 return 0;
1451 }
1452
1453
1454 /*
1455 * Create a new unresolved entry.
1456 */
1457
1458 entry = (struct arp_table *) kmalloc(sizeof(struct arp_table),
1459 GFP_ATOMIC);
1460 if (entry == NULL)
1461 {
1462 kfree_s(hh, sizeof(struct hh_cache));
1463 arp_unlock();
1464 return 1;
1465 }
1466
1467 entry->last_updated = entry->last_used = jiffies;
1468 entry->flags = 0;
1469 entry->ip = paddr;
1470 entry->mask = DEF_ARP_NETMASK;
1471 memset(entry->ha, 0, dev->addr_len);
1472 entry->dev = dev;
1473 entry->hh = hh;
1474 ATOMIC_INCR(&hh->hh_refcnt);
1475 init_timer(&entry->timer);
1476 entry->timer.function = arp_expire_request;
1477 entry->timer.data = (unsigned long)entry;
1478 entry->timer.expires = jiffies + ARP_RES_TIME;
1479 skb_queue_head_init(&entry->skb);
1480
1481 if (arp_lock == 1)
1482 {
1483 unsigned long hash = HASH(paddr);
1484 cli();
1485 entry->next = arp_tables[hash];
1486 arp_tables[hash] = entry;
1487 hh->hh_arp = (void*)entry;
1488 entry->retries = ARP_MAX_TRIES;
1489 restore_flags(flags);
1490
1491 add_timer(&entry->timer);
1492 arp_send(ARPOP_REQUEST, ETH_P_ARP, paddr, dev, dev->pa_addr, NULL, dev->dev_addr, NULL);
1493 }
1494 else
1495 {
1496 #if RT_CACHE_DEBUG >= 1
1497 printk("arp_cache_bind: %08x backlogged\n", entry->ip);
1498 #endif
1499 arp_enqueue(&arp_backlog, entry);
1500 arp_bh_mask |= ARP_BH_BACKLOG;
1501 }
1502 *hhp = hh;
1503 arp_unlock();
1504 return 0;
1505 }
1506
1507 static void arp_run_bh()
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
1508 {
1509 unsigned long flags;
1510 struct arp_table *entry, *entry1;
1511 struct hh_cache *hh;
1512 __u32 sip;
1513
1514 save_flags(flags);
1515 cli();
1516 if (!arp_lock)
1517 {
1518 arp_fast_lock();
1519
1520 while ((entry = arp_dequeue(&arp_backlog)) != NULL)
1521 {
1522 unsigned long hash;
1523 sti();
1524 sip = entry->ip;
1525 hash = HASH(sip);
1526
1527 /* It's possible, that an entry with the same pair
1528 * (addr,type) was already created. Our entry is older,
1529 * so it should be discarded.
1530 */
1531 for (entry1=arp_tables[hash]; entry1; entry1=entry1->next)
1532 if (entry1->ip==sip && entry1->dev == entry->dev)
1533 break;
1534
1535 if (!entry1)
1536 {
1537 struct device * dev = entry->dev;
1538 cli();
1539 entry->next = arp_tables[hash];
1540 arp_tables[hash] = entry;
1541 for (hh=entry->hh; hh; hh=hh->hh_next)
1542 hh->hh_arp = (void*)entry;
1543 sti();
1544 del_timer(&entry->timer);
1545 entry->timer.expires = jiffies + ARP_RES_TIME;
1546 add_timer(&entry->timer);
1547 entry->retries = ARP_MAX_TRIES;
1548 arp_send(ARPOP_REQUEST, ETH_P_ARP, entry->ip, dev, dev->pa_addr, NULL, dev->dev_addr, NULL);
1549 #if RT_CACHE_DEBUG >= 1
1550 printk("arp_run_bh: %08x reinstalled\n", sip);
1551 #endif
1552 }
1553 else
1554 {
1555 struct sk_buff * skb;
1556 struct hh_cache * next;
1557
1558 /* Discard entry, but preserve its hh's and
1559 * skb's.
1560 */
1561 cli();
1562 for (hh=entry->hh; hh; hh=next)
1563 {
1564 next = hh->hh_next;
1565 hh->hh_next = entry1->hh;
1566 entry1->hh = hh;
1567 hh->hh_arp = (void*)entry1;
1568 }
1569 entry->hh = NULL;
1570
1571 /* Prune skb list from entry
1572 * and graft it to entry1.
1573 */
1574 while ((skb = skb_dequeue(&entry->skb)) != NULL)
1575 {
1576 skb_device_lock(skb);
1577 sti();
1578 skb_queue_tail(&entry1->skb, skb);
1579 skb_device_unlock(skb);
1580 cli();
1581 }
1582 sti();
1583
1584 #if RT_CACHE_DEBUG >= 1
1585 printk("arp_run_bh: entry %08x was born dead\n", entry->ip);
1586 #endif
1587 arp_free_entry(entry);
1588
1589 if (entry1->flags & ATF_COM)
1590 {
1591 arp_update_hhs(entry1);
1592 arp_send_q(entry1);
1593 }
1594 }
1595 cli();
1596 }
1597 arp_bh_mask &= ~ARP_BH_BACKLOG;
1598 arp_unlock();
1599 }
1600 restore_flags(flags);
1601 }
1602
1603 /*
1604 * Test if a hardware address is all zero
1605 */
1606 static inline int empty(unsigned char * addr, int len)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
1607 {
1608 while (len > 0) {
1609 if (*addr)
1610 return 0;
1611 len--;
1612 addr++;
1613 }
1614 return 1;
1615 }
1616
1617 /*
1618 * Set (create) an ARP cache entry.
1619 */
1620
1621 static int arp_req_set(struct arpreq *r, struct device * dev)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
1622 {
1623 struct arp_table *entry;
1624 struct sockaddr_in *si;
1625 struct rtable *rt;
1626 struct device *dev1;
1627 unsigned char *ha;
1628 u32 ip;
1629
1630 /*
1631 * Find out about the hardware type.
1632 * We have to be compatible with BSD UNIX, so we have to
1633 * assume that a "not set" value (i.e. 0) means Ethernet.
1634 *
1635 * ANK: Hey, who wrote it? Do you really mean that BSD considers
1636 * ARPHRD_NETROM as ARPHRD_ETHER, or somthing another?
1637 */
1638
1639 si = (struct sockaddr_in *) &r->arp_pa;
1640 ip = si->sin_addr.s_addr;
1641
1642 /*
1643 * Is it reachable ?
1644 */
1645
1646 if (ip_chk_addr(ip) == IS_MYADDR)
1647 dev1 = dev_get("lo");
1648 else {
1649 rt = ip_rt_route(ip, 0);
1650 if (!rt)
1651 return -ENETUNREACH;
1652 dev1 = rt->rt_dev;
1653 ip_rt_put(rt);
1654 }
1655
1656 if (!dev) /* this is can only be NULL if ATF_PUBL is not set */
1657 dev = dev1;
1658
1659 if (((r->arp_flags & ATF_PUBL) && dev == dev1) ||
1660 (!(r->arp_flags & ATF_PUBL) && dev != dev1))
1661 return -EINVAL;
1662
1663 #if RT_CACHE_DEBUG >= 1
1664 if (arp_lock)
1665 printk("arp_req_set: bug\n");
1666 #endif
1667 arp_fast_lock();
1668
1669 /*
1670 * Is there an existing entry for this address?
1671 */
1672
1673 /*
1674 * Find the entry
1675 */
1676
1677 entry = arp_lookup(ip, r->arp_flags & ~ATF_NETMASK, dev);
1678
1679 if (entry)
1680 {
1681 arp_destroy(entry);
1682 entry = NULL;
1683 }
1684
1685 /*
1686 * Do we need to create a new entry
1687 */
1688
1689 if (entry == NULL)
1690 {
1691 entry = (struct arp_table *) kmalloc(sizeof(struct arp_table),
1692 GFP_ATOMIC);
1693 if (entry == NULL)
1694 {
1695 arp_unlock();
1696 return -ENOMEM;
1697 }
1698 entry->ip = ip;
1699 entry->hh = NULL;
1700 init_timer(&entry->timer);
1701 entry->timer.function = arp_expire_request;
1702 entry->timer.data = (unsigned long)entry;
1703
1704 if (r->arp_flags & ATF_PUBL)
1705 {
1706 cli();
1707 entry->next = arp_proxy_list;
1708 arp_proxy_list = entry;
1709 sti();
1710 }
1711 else
1712 {
1713 unsigned long hash = HASH(ip);
1714 cli();
1715 entry->next = arp_tables[hash];
1716 arp_tables[hash] = entry;
1717 sti();
1718 }
1719 skb_queue_head_init(&entry->skb);
1720 }
1721 /*
1722 * We now have a pointer to an ARP entry. Update it!
1723 */
1724 ha = r->arp_ha.sa_data;
1725 if ((r->arp_flags & ATF_COM) && empty(ha, dev->addr_len))
1726 ha = dev->dev_addr;
1727 memcpy(entry->ha, ha, dev->addr_len);
1728 entry->last_updated = entry->last_used = jiffies;
1729 entry->flags = r->arp_flags | ATF_COM;
1730 if ((entry->flags & ATF_PUBL) && (entry->flags & ATF_NETMASK))
1731 {
1732 si = (struct sockaddr_in *) &r->arp_netmask;
1733 entry->mask = si->sin_addr.s_addr;
1734 }
1735 else
1736 entry->mask = DEF_ARP_NETMASK;
1737 entry->dev = dev;
1738 arp_update_hhs(entry);
1739 arp_unlock();
1740 return 0;
1741 }
1742
1743
1744
1745 /*
1746 * Get an ARP cache entry.
1747 */
1748
1749 static int arp_req_get(struct arpreq *r, struct device *dev)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
1750 {
1751 struct arp_table *entry;
1752 struct sockaddr_in *si;
1753
1754 si = (struct sockaddr_in *) &r->arp_pa;
1755
1756 #if RT_CACHE_DEBUG >= 1
1757 if (arp_lock)
1758 printk("arp_req_set: bug\n");
1759 #endif
1760 arp_fast_lock();
1761
1762 entry = arp_lookup(si->sin_addr.s_addr, r->arp_flags|ATF_NETMASK, dev);
1763
1764 if (entry == NULL)
1765 {
1766 arp_unlock();
1767 return -ENXIO;
1768 }
1769
1770 /*
1771 * We found it; copy into structure.
1772 */
1773
1774 memcpy(r->arp_ha.sa_data, &entry->ha, entry->dev->addr_len);
1775 r->arp_ha.sa_family = entry->dev->type;
1776 r->arp_flags = entry->flags;
1777 strncpy(r->arp_dev, entry->dev->name, 16);
1778 arp_unlock();
1779 return 0;
1780 }
1781
1782 static int arp_req_delete(struct arpreq *r, struct device * dev)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
1783 {
1784 struct arp_table *entry;
1785 struct sockaddr_in *si;
1786
1787 si = (struct sockaddr_in *) &r->arp_pa;
1788 #if RT_CACHE_DEBUG >= 1
1789 if (arp_lock)
1790 printk("arp_req_delete: bug\n");
1791 #endif
1792 arp_fast_lock();
1793
1794 if (!(r->arp_flags & ATF_PUBL))
1795 {
1796 for (entry = arp_tables[HASH(si->sin_addr.s_addr)];
1797 entry != NULL; entry = entry->next)
1798 if (entry->ip == si->sin_addr.s_addr
1799 && (!dev || entry->dev == dev))
1800 {
1801 arp_destroy(entry);
1802 arp_unlock();
1803 return 0;
1804 }
1805 }
1806 else
1807 {
1808 for (entry = arp_proxy_list;
1809 entry != NULL; entry = entry->next)
1810 if (entry->ip == si->sin_addr.s_addr
1811 && (!dev || entry->dev == dev))
1812 {
1813 arp_destroy(entry);
1814 arp_unlock();
1815 return 0;
1816 }
1817 }
1818
1819 arp_unlock();
1820 return -ENXIO;
1821 }
1822
1823 /*
1824 * Handle an ARP layer I/O control request.
1825 */
1826
1827 int arp_ioctl(unsigned int cmd, void *arg)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
1828 {
1829 int err;
1830 struct arpreq r;
1831
1832 struct device * dev = NULL;
1833
1834 switch(cmd)
1835 {
1836 case SIOCDARP:
1837 case SIOCSARP:
1838 if (!suser())
1839 return -EPERM;
1840 case SIOCGARP:
1841 err = verify_area(VERIFY_READ, arg, sizeof(struct arpreq));
1842 if (err)
1843 return err;
1844 memcpy_fromfs(&r, arg, sizeof(struct arpreq));
1845 break;
1846 case OLD_SIOCDARP:
1847 case OLD_SIOCSARP:
1848 if (!suser())
1849 return -EPERM;
1850 case OLD_SIOCGARP:
1851 err = verify_area(VERIFY_READ, arg, sizeof(struct arpreq_old));
1852 if (err)
1853 return err;
1854 memcpy_fromfs(&r, arg, sizeof(struct arpreq_old));
1855 memset(&r.arp_dev, 0, sizeof(r.arp_dev));
1856 break;
1857 default:
1858 return -EINVAL;
1859 }
1860
1861 if (r.arp_pa.sa_family != AF_INET)
1862 return -EPFNOSUPPORT;
1863 if (((struct sockaddr_in *)&r.arp_pa)->sin_addr.s_addr == 0)
1864 return -EINVAL;
1865
1866 if (r.arp_dev[0])
1867 {
1868 if ((dev = dev_get(r.arp_dev)) == NULL)
1869 return -ENODEV;
1870
1871 if (!r.arp_ha.sa_family)
1872 r.arp_ha.sa_family = dev->type;
1873 else if (r.arp_ha.sa_family != dev->type)
1874 return -EINVAL;
1875 }
1876 else
1877 {
1878 if ((r.arp_flags & ATF_PUBL) &&
1879 ((cmd == SIOCSARP) || (cmd == OLD_SIOCSARP))) {
1880 if ((dev = dev_getbytype(r.arp_ha.sa_family)) == NULL)
1881 return -ENODEV;
1882 }
1883 }
1884
1885 switch(cmd)
1886 {
1887 case SIOCDARP:
1888 return arp_req_delete(&r, dev);
1889 case SIOCSARP:
1890 return arp_req_set(&r, dev);
1891 case OLD_SIOCDARP:
1892 /* old SIOCDARP destoyes both
1893 * normal and proxy mappings
1894 */
1895 r.arp_flags &= ~ATF_PUBL;
1896 err = arp_req_delete(&r, dev);
1897 r.arp_flags |= ATF_PUBL;
1898 if (!err)
1899 arp_req_delete(&r, dev);
1900 else
1901 err = arp_req_delete(&r, dev);
1902 return err;
1903 case OLD_SIOCSARP:
1904 err = arp_req_set(&r, dev);
1905 /* old SIOCSARP works so funny,
1906 * that its behaviour can be emulated
1907 * only approximately 8).
1908 * It should work. --ANK
1909 */
1910 if (r.arp_flags & ATF_PUBL)
1911 {
1912 r.arp_flags &= ~ATF_PUBL;
1913 arp_req_delete(&r, dev);
1914 }
1915 return err;
1916 case SIOCGARP:
1917 err = verify_area(VERIFY_WRITE, arg, sizeof(struct arpreq));
1918 if (err)
1919 return err;
1920 err = arp_req_get(&r, dev);
1921 if (!err)
1922 memcpy_tofs(arg, &r, sizeof(r));
1923 return err;
1924 case OLD_SIOCGARP:
1925 err = verify_area(VERIFY_WRITE, arg, sizeof(struct arpreq_old));
1926 if (err)
1927 return err;
1928 r.arp_flags &= ~ATF_PUBL;
1929 err = arp_req_get(&r, dev);
1930 if (err < 0)
1931 {
1932 r.arp_flags |= ATF_PUBL;
1933 err = arp_req_get(&r, dev);
1934 }
1935 if (!err)
1936 memcpy_tofs(arg, &r, sizeof(struct arpreq_old));
1937 return err;
1938 }
1939 /*NOTREACHED*/
1940 return 0;
1941 }
1942
1943
1944 /*
1945 * Called once on startup.
1946 */
1947
1948 static struct packet_type arp_packet_type =
1949 {
1950 0, /* Should be: __constant_htons(ETH_P_ARP) - but this _doesn't_ come out constant! */
1951 NULL, /* All devices */
1952 arp_rcv,
1953 NULL,
1954 NULL
1955 };
1956
1957 static struct notifier_block arp_dev_notifier={
1958 arp_device_event,
1959 NULL,
1960 0
1961 };
1962
1963 void arp_init (void)
/* ![[previous]](../icons/left.png)
![[next]](../icons/n_right.png)
![[first]](../icons/first.png)
![[last]](../icons/n_last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
1964 {
1965 /* Register the packet type */
1966 arp_packet_type.type=htons(ETH_P_ARP);
1967 dev_add_pack(&arp_packet_type);
1968 /* Start with the regular checks for expired arp entries. */
1969 add_timer(&arp_timer);
1970 /* Register for device down reports */
1971 register_netdevice_notifier(&arp_dev_notifier);
1972
1973 proc_net_register(&(struct proc_dir_entry) {
1974 PROC_NET_ARP, 3, "arp",
1975 S_IFREG | S_IRUGO, 1, 0, 0,
1976 0, &proc_net_inode_operations,
1977 arp_get_info
1978 });
1979 }
1980