1 /* linux/net/inet/arp.c
2 *
3 * Copyright (C) 1994 by Florian La Roche
4 *
5 * This module implements the Address Resolution Protocol ARP (RFC 826),
6 * which is used to convert IP addresses (or in the future maybe other
7 * high-level addresses into a low-level hardware address (like an Ethernet
8 * address).
9 *
10 * FIXME:
11 * Experiment with better retransmit timers
12 * Clean up the timer deletions
13 * If you create a proxy entry set your interface address to the address
14 * and then delete it, proxies may get out of sync with reality - check this
15 *
16 * This program is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License
18 * as published by the Free Software Foundation; either version
19 * 2 of the License, or (at your option) any later version.
20 *
21 *
22 * Fixes:
23 * Alan Cox : Removed the ethernet assumptions in Florian's code
24 * Alan Cox : Fixed some small errors in the ARP logic
25 * Alan Cox : Allow >4K in /proc
26 * Alan Cox : Make ARP add its own protocol entry
27 *
28 * Ross Martin : Rewrote arp_rcv() and arp_get_info()
29 * Stephen Henson : Add AX25 support to arp_get_info()
30 * Alan Cox : Drop data when a device is downed.
31 * Alan Cox : Use init_timer().
32 * Alan Cox : Double lock fixes.
33 * Martin Seine : Move the arphdr structure
34 * to if_arp.h for compatibility.
35 * with BSD based programs.
36 * Andrew Tridgell : Added ARP netmask code and
37 * re-arranged proxy handling.
38 * Alan Cox : Changed to use notifiers.
39 * Niibe Yutaka : Reply for this device or proxies only.
40 * Alan Cox : Don't proxy across hardware types!
41 * Jonathan Naylor : Added support for NET/ROM.
42 * Mike Shaver : RFC1122 checks.
43 * Jonathan Naylor : Only lookup the hardware address for
44 * the correct hardware type.
45 * Germano Caronni : Assorted subtle races.
46 * Craig Schlenter : Don't modify permanent entry
47 * during arp_rcv.
48 * Russ Nelson : Tidied up a few bits.
49 * Alexey Kuznetsov: Major changes to caching and behaviour,
50 * eg intelligent arp probing and generation
51 * of host down events.
52 * Alan Cox : Missing unlock in device events.
53 */
54
55 /* RFC1122 Status:
56 2.3.2.1 (ARP Cache Validation):
57 MUST provide mechanism to flush stale cache entries (OK)
58 SHOULD be able to configure cache timeout (NOT YET)
59 MUST throttle ARP retransmits (OK)
60 2.3.2.2 (ARP Packet Queue):
61 SHOULD save at least one packet from each "conversation" with an
62 unresolved IP address. (OK)
63 950727 -- MS
64 */
65
66 #include <linux/types.h>
67 #include <linux/string.h>
68 #include <linux/kernel.h>
69 #include <linux/sched.h>
70 #include <linux/config.h>
71 #include <linux/socket.h>
72 #include <linux/sockios.h>
73 #include <linux/errno.h>
74 #include <linux/if_arp.h>
75 #include <linux/in.h>
76 #include <linux/mm.h>
77 #include <linux/inet.h>
78 #include <linux/netdevice.h>
79 #include <linux/etherdevice.h>
80 #include <linux/trdevice.h>
81 #include <linux/skbuff.h>
82 #include <linux/proc_fs.h>
83 #include <linux/stat.h>
84
85 #include <net/ip.h>
86 #include <net/icmp.h>
87 #include <net/route.h>
88 #include <net/protocol.h>
89 #include <net/tcp.h>
90 #include <net/sock.h>
91 #include <net/arp.h>
92 #ifdef CONFIG_AX25
93 #include <net/ax25.h>
94 #ifdef CONFIG_NETROM
95 #include <net/netrom.h>
96 #endif
97 #endif
98
99 #include <asm/system.h>
100 #include <asm/segment.h>
101
102 #include <stdarg.h>
103
104 /*
105 * This structure defines the ARP mapping cache. As long as we make changes
106 * in this structure, we keep interrupts off. But normally we can copy the
107 * hardware address and the device pointer in a local variable and then
108 * make any "long calls" to send a packet out.
109 */
110
111 struct arp_table
112 {
113 struct arp_table *next; /* Linked entry list */
114 unsigned long last_used; /* For expiry */
115 unsigned long last_updated; /* For expiry */
116 unsigned int flags; /* Control status */
117 u32 ip; /* ip address of entry */
118 u32 mask; /* netmask - used for generalised proxy arps (tridge) */
119 unsigned char ha[MAX_ADDR_LEN]; /* Hardware address */
120 struct device *dev; /* Device the entry is tied to */
121
122 /*
123 * The following entries are only used for unresolved hw addresses.
124 */
125
126 struct timer_list timer; /* expire timer */
127 int retries; /* remaining retries */
128 struct sk_buff_head skb; /* list of queued packets */
129 struct hh_cache *hh;
130 };
131
132
133 /*
134 * Configurable Parameters (don't touch unless you know what you are doing
135 */
136
137 /*
138 * If an arp request is send, ARP_RES_TIME is the timeout value until the
139 * next request is send.
140 * RFC1122: OK. Throttles ARPing, as per 2.3.2.1. (MUST)
141 * The recommended minimum timeout is 1 second per destination.
142 * This timeout is prolongated to ARP_DEAD_RES_TIME, if
143 * destination does not respond.
144 */
145
146 #define ARP_RES_TIME (5*HZ)
147 #define ARP_DEAD_RES_TIME (60*HZ)
148
149 /*
150 * The number of times an arp request is send, until the host is
151 * considered temporarily unreachable.
152 */
153
154 #define ARP_MAX_TRIES 3
155
156 /*
157 * After that time, an unused entry is deleted from the arp table.
158 */
159
160 #define ARP_TIMEOUT (600*HZ)
161
162 /*
163 * How often is the function 'arp_check_retries' called.
164 * An unused entry is invalidated in the time between ARP_TIMEOUT and
165 * (ARP_TIMEOUT+ARP_CHECK_INTERVAL).
166 */
167
168 #define ARP_CHECK_INTERVAL (60*HZ)
169
170 /*
171 * The entry is reconfirmed by sending point-to-point ARP
172 * request after ARP_CONFIRM_INTERVAL. If destinations does not respond
173 * for ARP_CONFIRM_TIMEOUT, normal broadcast resolution scheme is started.
174 */
175
176 #define ARP_CONFIRM_INTERVAL (300*HZ)
177 #define ARP_CONFIRM_TIMEOUT ARP_RES_TIME
178
179 static unsigned long arp_lock;
180 static unsigned long arp_bh_mask;
181
182 #define ARP_BH_BACKLOG 1
183
184 static struct arp_table *arp_backlog;
185
186 static void arp_run_bh(void);
187 static void arp_check_expire (unsigned long);
188
189 static struct timer_list arp_timer =
190 { NULL, NULL, ARP_CHECK_INTERVAL, 0L, &arp_check_expire };
191
192 /*
193 * The default arp netmask is just 255.255.255.255 which means it's
194 * a single machine entry. Only proxy entries can have other netmasks
195 */
196
197 #define DEF_ARP_NETMASK (~0)
198
199 /*
200 * The size of the hash table. Must be a power of two.
201 * Maybe we should remove hashing in the future for arp and concentrate
202 * on Patrick Schaaf's Host-Cache-Lookup...
203 */
204
205 #define ARP_TABLE_SIZE 16
206 #define FULL_ARP_TABLE_SIZE (ARP_TABLE_SIZE+1)
207
208 struct arp_table *arp_tables[FULL_ARP_TABLE_SIZE] =
209 {
210 NULL,
211 };
212
213 #define arp_proxy_list arp_tables[ARP_TABLE_SIZE]
214
215 /*
216 * The last bits in the IP address are used for the cache lookup.
217 * A special entry is used for proxy arp entries
218 */
219
220 #define HASH(paddr) (htonl(paddr) & (ARP_TABLE_SIZE - 1))
221
222 /*
223 * Lock/unlock arp_table chains.
224 */
225
226 static __inline__ void arp_fast_lock(void)
/* ![[previous]](../icons/n_left.png)
![[next]](../icons/right.png)
![[first]](../icons/n_first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
227 {
228 ATOMIC_INCR(&arp_lock);
229 }
230
231 static __inline__ void arp_fast_unlock(void)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
232 {
233 ATOMIC_DECR(&arp_lock);
234 }
235
236 static __inline__ void arp_unlock(void)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
237 {
238 if (!ATOMIC_DECR_AND_CHECK(&arp_lock) && arp_bh_mask)
239 arp_run_bh();
240 }
241
242 /*
243 * Enqueue to FIFO list.
244 */
245
246 static void arp_enqueue(struct arp_table **q, struct arp_table *entry)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
247 {
248 unsigned long flags;
249 struct arp_table * tail;
250
251 save_flags(flags);
252 cli();
253 tail = *q;
254 if (!tail)
255 entry->next = entry;
256 else
257 {
258 entry->next = tail->next;
259 tail->next = entry;
260 }
261 *q = entry;
262 restore_flags(flags);
263 return;
264 }
265
266 /*
267 * Dequeue from FIFO list,
268 * caller should mask interrupts.
269 */
270
271 static struct arp_table * arp_dequeue(struct arp_table **q)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
272 {
273 struct arp_table * entry;
274
275 if (*q)
276 {
277 entry = (*q)->next;
278 (*q)->next = entry->next;
279 if (entry->next == entry)
280 *q = NULL;
281 entry->next = NULL;
282 return entry;
283 }
284 return NULL;
285 }
286
287 /*
288 * Purge all linked skb's of the entry.
289 */
290
291 static void arp_release_entry(struct arp_table *entry)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
292 {
293 struct sk_buff *skb;
294 unsigned long flags;
295
296 save_flags(flags);
297 cli();
298 /* Release the list of `skb' pointers. */
299 while ((skb = skb_dequeue(&entry->skb)) != NULL)
300 {
301 skb_device_lock(skb);
302 restore_flags(flags);
303 dev_kfree_skb(skb, FREE_WRITE);
304 cli();
305 }
306 restore_flags(flags);
307 return;
308 }
309
310 /*
311 * Release the entry and all resources linked to it: skb's, hh's, timer
312 * and certainly memory.
313 */
314
315 static void arp_free_entry(struct arp_table *entry)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
316 {
317 unsigned long flags;
318 struct hh_cache *hh, *next;
319
320 del_timer(&entry->timer);
321
322 save_flags(flags);
323 cli();
324 arp_release_entry(entry);
325
326 for (hh = entry->hh; hh; hh = next)
327 {
328 next = hh->hh_next;
329 hh->hh_arp = NULL;
330 if (!--hh->hh_refcnt)
331 kfree_s(hh, sizeof(struct(struct hh_cache)));
332 }
333 restore_flags(flags);
334
335 kfree_s(entry, sizeof(struct arp_table));
336 return;
337 }
338
339 /*
340 * How many users has this entry?
341 */
342
343 static __inline__ int arp_count_hhs(struct arp_table * entry)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
344 {
345 struct hh_cache *hh, **hhp;
346 int count = 0;
347
348 hhp = &entry->hh;
349 while ((hh=*hhp) != NULL)
350 {
351 if (hh->hh_refcnt == 1)
352 {
353 *hhp = hh->hh_next;
354 kfree_s(hh, sizeof(struct hh_cache));
355 continue;
356 }
357 count += hh->hh_refcnt-1;
358 hhp = &hh->hh_next;
359 }
360
361 return count;
362 }
363
364 /*
365 * Invalidate all hh's, so that higher level will not try to use it.
366 */
367
368 static __inline__ void arp_invalidate_hhs(struct arp_table * entry)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
369 {
370 struct hh_cache *hh;
371
372 for (hh=entry->hh; hh; hh=hh->hh_next)
373 hh->hh_uptodate = 0;
374 }
375
376 /*
377 * Signal to device layer, that hardware address may be changed.
378 */
379
380 static __inline__ void arp_update_hhs(struct arp_table * entry)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
381 {
382 struct hh_cache *hh;
383
384 for (hh=entry->hh; hh; hh=hh->hh_next)
385 entry->dev->header_cache_update(hh, entry->dev, entry->ha);
386 }
387
388 /*
389 * Check if there are too old entries and remove them. If the ATF_PERM
390 * flag is set, they are always left in the arp cache (permanent entry).
391 * If an entry was not be confirmed for ARP_CONFIRM_INTERVAL,
392 * declare it invalid and send point-to-point ARP request.
393 * If it will not be confirmed for ARP_CONFIRM_TIMEOUT,
394 * give it to shred by arp_expire_entry.
395 */
396
397 static void arp_check_expire(unsigned long dummy)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
398 {
399 int i;
400 unsigned long now = jiffies;
401
402 del_timer(&arp_timer);
403
404 if (!arp_lock)
405 {
406 arp_fast_lock();
407
408 for (i = 0; i < ARP_TABLE_SIZE; i++)
409 {
410 struct arp_table *entry;
411 struct arp_table **pentry;
412
413 pentry = &arp_tables[i];
414
415 while ((entry = *pentry) != NULL)
416 {
417 cli();
418 if (now - entry->last_used > ARP_TIMEOUT
419 && !(entry->flags & ATF_PERM)
420 && !arp_count_hhs(entry))
421 {
422 *pentry = entry->next;
423 sti();
424 #if RT_CACHE_DEBUG >= 2
425 printk("arp_expire: %08x expired\n", entry->ip);
426 #endif
427 arp_free_entry(entry);
428 }
429 else if (entry->last_updated
430 && now - entry->last_updated > ARP_CONFIRM_INTERVAL
431 && !(entry->flags & ATF_PERM))
432 {
433 struct device * dev = entry->dev;
434 pentry = &entry->next;
435 entry->flags &= ~ATF_COM;
436 arp_invalidate_hhs(entry);
437 sti();
438 entry->retries = ARP_MAX_TRIES+1;
439 del_timer(&entry->timer);
440 entry->timer.expires = jiffies + ARP_CONFIRM_TIMEOUT;
441 add_timer(&entry->timer);
442 arp_send(ARPOP_REQUEST, ETH_P_ARP, entry->ip,
443 dev, dev->pa_addr, entry->ha,
444 dev->dev_addr, NULL);
445 #if RT_CACHE_DEBUG >= 2
446 printk("arp_expire: %08x requires confirmation\n", entry->ip);
447 #endif
448 }
449 else
450 pentry = &entry->next; /* go to next entry */
451 }
452 }
453 arp_unlock();
454 }
455
456 ip_rt_check_expire();
457
458 /*
459 * Set the timer again.
460 */
461
462 arp_timer.expires = jiffies + ARP_CHECK_INTERVAL;
463 add_timer(&arp_timer);
464 }
465
466 /*
467 * This function is called, if an entry is not resolved in ARP_RES_TIME.
468 * When more than MAX_ARP_TRIES retries was done, release queued skb's,
469 * but not discard entry itself if it is in use.
470 */
471
472 static void arp_expire_request (unsigned long arg)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
473 {
474 struct arp_table *entry = (struct arp_table *) arg;
475 struct arp_table **pentry;
476 unsigned long hash;
477 unsigned long flags;
478
479 save_flags(flags);
480 cli();
481
482 /*
483 * Since all timeouts are handled with interrupts enabled, there is a
484 * small chance, that this entry has just been resolved by an incoming
485 * packet. This is the only race condition, but it is handled...
486 */
487
488 if (entry->flags & ATF_COM)
489 {
490 restore_flags(flags);
491 return;
492 }
493
494 if (arp_lock)
495 {
496 #if RT_CACHE_DEBUG >= 1
497 printk("arp_expire_request: %08x postponed\n", entry->ip);
498 #endif
499 del_timer(&entry->timer);
500 entry->timer.expires = jiffies + HZ/10;
501 add_timer(&entry->timer);
502 restore_flags(flags);
503 return;
504 }
505
506 arp_fast_lock();
507 restore_flags(flags);
508
509 if (entry->last_updated && --entry->retries > 0)
510 {
511 struct device *dev = entry->dev;
512
513 #if RT_CACHE_DEBUG >= 2
514 printk("arp_expire_request: %08x timed out\n", entry->ip);
515 #endif
516 /* Set new timer. */
517 del_timer(&entry->timer);
518 entry->timer.expires = jiffies + ARP_RES_TIME;
519 add_timer(&entry->timer);
520 arp_send(ARPOP_REQUEST, ETH_P_ARP, entry->ip, dev, dev->pa_addr,
521 NULL, dev->dev_addr, NULL);
522 arp_unlock();
523 return;
524 }
525
526 arp_release_entry(entry);
527
528 cli();
529 if (arp_count_hhs(entry))
530 {
531 struct device *dev = entry->dev;
532 #if RT_CACHE_DEBUG >= 2
533 printk("arp_expire_request: %08x is dead\n", entry->ip);
534 #endif
535 arp_release_entry(entry);
536 entry->retries = ARP_MAX_TRIES;
537 restore_flags(flags);
538 entry->last_updated = 0;
539 del_timer(&entry->timer);
540 entry->timer.expires = jiffies + ARP_DEAD_RES_TIME;
541 add_timer(&entry->timer);
542 arp_send(ARPOP_REQUEST, ETH_P_ARP, entry->ip, dev, dev->pa_addr,
543 NULL, dev->dev_addr, NULL);
544 arp_unlock();
545 return;
546 }
547 restore_flags(flags);
548
549 hash = HASH(entry->ip);
550
551 pentry = &arp_tables[hash];
552
553 while (*pentry != NULL)
554 {
555 if (*pentry == entry)
556 {
557 cli();
558 *pentry = entry->next;
559 restore_flags(flags);
560 #if RT_CACHE_DEBUG >= 2
561 printk("arp_expire_request: %08x is killed\n", entry->ip);
562 #endif
563 arp_free_entry(entry);
564 arp_unlock();
565 return;
566 }
567 pentry = &(*pentry)->next;
568 }
569 printk("arp_expire_request: bug: ARP entry is lost!\n");
570 arp_unlock();
571 }
572
573 /*
574 * Purge a device from the ARP queue
575 */
576
577 int arp_device_event(struct notifier_block *this, unsigned long event, void *ptr)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
578 {
579 struct device *dev=ptr;
580 int i;
581
582 if (event != NETDEV_DOWN)
583 return NOTIFY_DONE;
584 /*
585 * This is a bit OTT - maybe we need some arp semaphores instead.
586 */
587
588 #if RT_CACHE_DEBUG >= 1
589 if (arp_lock)
590 printk("arp_device_event: bug\n");
591 #endif
592 arp_fast_lock();
593
594 for (i = 0; i < FULL_ARP_TABLE_SIZE; i++)
595 {
596 struct arp_table *entry;
597 struct arp_table **pentry = &arp_tables[i];
598
599 while ((entry = *pentry) != NULL)
600 {
601 if (entry->dev == dev)
602 {
603 *pentry = entry->next; /* remove from list */
604 arp_free_entry(entry);
605 }
606 else
607 pentry = &entry->next; /* go to next entry */
608 }
609 }
610 arp_unlock();
611 return NOTIFY_DONE;
612 }
613
614
615 /*
616 * Create and send an arp packet. If (dest_hw == NULL), we create a broadcast
617 * message.
618 */
619
620 void arp_send(int type, int ptype, u32 dest_ip,
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
621 struct device *dev, u32 src_ip,
622 unsigned char *dest_hw, unsigned char *src_hw,
623 unsigned char *target_hw)
624 {
625 struct sk_buff *skb;
626 struct arphdr *arp;
627 unsigned char *arp_ptr;
628
629 /*
630 * No arp on this interface.
631 */
632
633 if (dev->flags&IFF_NOARP)
634 return;
635
636 /*
637 * Allocate a buffer
638 */
639
640 skb = alloc_skb(sizeof(struct arphdr)+ 2*(dev->addr_len+4)
641 + dev->hard_header_len, GFP_ATOMIC);
642 if (skb == NULL)
643 {
644 printk("ARP: no memory to send an arp packet\n");
645 return;
646 }
647 skb_reserve(skb, dev->hard_header_len);
648 arp = (struct arphdr *) skb_put(skb,sizeof(struct arphdr) + 2*(dev->addr_len+4));
649 skb->arp = 1;
650 skb->dev = dev;
651 skb->free = 1;
652
653 /*
654 * Fill the device header for the ARP frame
655 */
656
657 dev->hard_header(skb,dev,ptype,dest_hw?dest_hw:dev->broadcast,src_hw?src_hw:NULL,skb->len);
658
659 /* Fill out the arp protocol part. */
660 arp->ar_hrd = htons(dev->type);
661 #ifdef CONFIG_AX25
662 #ifdef CONFIG_NETROM
663 arp->ar_pro = (dev->type == ARPHRD_AX25 || dev->type == ARPHRD_NETROM) ? htons(AX25_P_IP) : htons(ETH_P_IP);
664 #else
665 arp->ar_pro = (dev->type != ARPHRD_AX25) ? htons(ETH_P_IP) : htons(AX25_P_IP);
666 #endif
667 #else
668 arp->ar_pro = htons(ETH_P_IP);
669 #endif
670 arp->ar_hln = dev->addr_len;
671 arp->ar_pln = 4;
672 arp->ar_op = htons(type);
673
674 arp_ptr=(unsigned char *)(arp+1);
675
676 memcpy(arp_ptr, src_hw, dev->addr_len);
677 arp_ptr+=dev->addr_len;
678 memcpy(arp_ptr, &src_ip,4);
679 arp_ptr+=4;
680 if (target_hw != NULL)
681 memcpy(arp_ptr, target_hw, dev->addr_len);
682 else
683 memset(arp_ptr, 0, dev->addr_len);
684 arp_ptr+=dev->addr_len;
685 memcpy(arp_ptr, &dest_ip, 4);
686
687 dev_queue_xmit(skb, dev, 0);
688 }
689
690 /*
691 * This will try to retransmit everything on the queue.
692 */
693
694 static void arp_send_q(struct arp_table *entry)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
695 {
696 struct sk_buff *skb;
697
698 unsigned long flags;
699
700 /*
701 * Empty the entire queue, building its data up ready to send
702 */
703
704 if(!(entry->flags&ATF_COM))
705 {
706 printk("arp_send_q: incomplete entry for %s\n",
707 in_ntoa(entry->ip));
708 /* Can't flush the skb, because RFC1122 says to hang on to */
709 /* at least one from any unresolved entry. --MS */
710 /* Whats happened is that someone has 'unresolved' the entry
711 as we got to use it - this 'can't happen' -- AC */
712 return;
713 }
714
715 save_flags(flags);
716
717 cli();
718 while((skb = skb_dequeue(&entry->skb)) != NULL)
719 {
720 IS_SKB(skb);
721 skb_device_lock(skb);
722 restore_flags(flags);
723 if(!skb->dev->rebuild_header(skb->data,skb->dev,skb->raddr,skb))
724 {
725 skb->arp = 1;
726 if(skb->sk==NULL)
727 dev_queue_xmit(skb, skb->dev, 0);
728 else
729 dev_queue_xmit(skb,skb->dev,skb->sk->priority);
730 }
731 }
732 restore_flags(flags);
733 }
734
735
736 /*
737 * Delete an ARP mapping entry in the cache.
738 */
739
740 static void arp_destroy(struct arp_table * entry)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
741 {
742 struct arp_table *entry1;
743 struct arp_table **pentry;
744
745 if (entry->flags & ATF_PUBL)
746 pentry = &arp_proxy_list;
747 else
748 pentry = &arp_tables[HASH(entry->ip)];
749
750 while ((entry1 = *pentry) != NULL)
751 {
752 if (entry1 == entry)
753 {
754 *pentry = entry1->next;
755 del_timer(&entry->timer);
756 arp_free_entry(entry);
757 return;
758 }
759 pentry = &entry1->next;
760 }
761 }
762
763 /*
764 * Receive an arp request by the device layer. Maybe I rewrite it, to
765 * use the incoming packet for the reply. The time for the current
766 * "overhead" isn't that high...
767 */
768
769 int arp_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *pt)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
770 {
771 /*
772 * We shouldn't use this type conversion. Check later.
773 */
774
775 struct arphdr *arp = (struct arphdr *)skb->h.raw;
776 unsigned char *arp_ptr= (unsigned char *)(arp+1);
777 struct arp_table *entry;
778 struct arp_table *proxy_entry;
779 unsigned long hash;
780 unsigned char ha[MAX_ADDR_LEN]; /* So we can enable ints again. */
781 unsigned char *sha,*tha;
782 u32 sip,tip;
783
784 /*
785 * The hardware length of the packet should match the hardware length
786 * of the device. Similarly, the hardware types should match. The
787 * device should be ARP-able. Also, if pln is not 4, then the lookup
788 * is not from an IP number. We can't currently handle this, so toss
789 * it.
790 */
791 if (arp->ar_hln != dev->addr_len ||
792 dev->type != ntohs(arp->ar_hrd) ||
793 dev->flags & IFF_NOARP ||
794 arp->ar_pln != 4)
795 {
796 kfree_skb(skb, FREE_READ);
797 return 0;
798 /* Should this be an error/printk? Seems like something */
799 /* you'd want to know about. Unless it's just !IFF_NOARP. -- MS */
800 }
801
802 /*
803 * Another test.
804 * The logic here is that the protocol being looked up by arp should
805 * match the protocol the device speaks. If it doesn't, there is a
806 * problem, so toss the packet.
807 */
808 /* Again, should this be an error/printk? -- MS */
809
810 switch (dev->type)
811 {
812 #ifdef CONFIG_AX25
813 case ARPHRD_AX25:
814 if(arp->ar_pro != htons(AX25_P_IP))
815 {
816 kfree_skb(skb, FREE_READ);
817 return 0;
818 }
819 break;
820 #endif
821 #ifdef CONFIG_NETROM
822 case ARPHRD_NETROM:
823 if(arp->ar_pro != htons(AX25_P_IP))
824 {
825 kfree_skb(skb, FREE_READ);
826 return 0;
827 }
828 break;
829 #endif
830 case ARPHRD_ETHER:
831 case ARPHRD_ARCNET:
832 if(arp->ar_pro != htons(ETH_P_IP))
833 {
834 kfree_skb(skb, FREE_READ);
835 return 0;
836 }
837 break;
838
839 case ARPHRD_IEEE802:
840 if(arp->ar_pro != htons(ETH_P_IP))
841 {
842 kfree_skb(skb, FREE_READ);
843 return 0;
844 }
845 break;
846
847 default:
848 printk("ARP: dev->type mangled!\n");
849 kfree_skb(skb, FREE_READ);
850 return 0;
851 }
852
853 /*
854 * Extract fields
855 */
856
857 sha=arp_ptr;
858 arp_ptr += dev->addr_len;
859 memcpy(&sip, arp_ptr, 4);
860 arp_ptr += 4;
861 tha=arp_ptr;
862 arp_ptr += dev->addr_len;
863 memcpy(&tip, arp_ptr, 4);
864
865 /*
866 * Check for bad requests for 127.x.x.x and requests for multicast
867 * addresses. If this is one such, delete it.
868 */
869 if (LOOPBACK(tip) || MULTICAST(tip))
870 {
871 kfree_skb(skb, FREE_READ);
872 return 0;
873 }
874
875 /*
876 * Process entry. The idea here is we want to send a reply if it is a
877 * request for us or if it is a request for someone else that we hold
878 * a proxy for. We want to add an entry to our cache if it is a reply
879 * to us or if it is a request for our address.
880 * (The assumption for this last is that if someone is requesting our
881 * address, they are probably intending to talk to us, so it saves time
882 * if we cache their address. Their address is also probably not in
883 * our cache, since ours is not in their cache.)
884 *
885 * Putting this another way, we only care about replies if they are to
886 * us, in which case we add them to the cache. For requests, we care
887 * about those for us and those for our proxies. We reply to both,
888 * and in the case of requests for us we add the requester to the arp
889 * cache.
890 */
891
892 if (arp->ar_op == htons(ARPOP_REQUEST))
893 {
894 /*
895 * Only reply for the real device address or when it's in our proxy tables
896 */
897 if (tip != dev->pa_addr)
898 {
899 /*
900 * To get in here, it is a request for someone else. We need to
901 * check if that someone else is one of our proxies. If it isn't,
902 * we can toss it.
903 */
904 arp_fast_lock();
905
906 for (proxy_entry=arp_proxy_list;
907 proxy_entry;
908 proxy_entry = proxy_entry->next)
909 {
910 /* we will respond to a proxy arp request
911 if the masked arp table ip matches the masked
912 tip. This allows a single proxy arp table
913 entry to be used on a gateway machine to handle
914 all requests for a whole network, rather than
915 having to use a huge number of proxy arp entries
916 and having to keep them uptodate.
917 */
918 if (proxy_entry->dev == dev &&
919 !((proxy_entry->ip^tip)&proxy_entry->mask))
920 break;
921
922 }
923 if (proxy_entry)
924 {
925 memcpy(ha, proxy_entry->ha, dev->addr_len);
926 arp_unlock();
927 arp_send(ARPOP_REPLY,ETH_P_ARP,sip,dev,tip,sha,ha, sha);
928 kfree_skb(skb, FREE_READ);
929 return 0;
930 }
931 else
932 {
933 arp_unlock();
934 kfree_skb(skb, FREE_READ);
935 return 0;
936 }
937 }
938 else
939 {
940 /*
941 * To get here, it must be an arp request for us. We need to reply.
942 */
943 arp_send(ARPOP_REPLY,ETH_P_ARP,sip,dev,tip,sha,dev->dev_addr,sha);
944 }
945 }
946 /*
947 * It is now an arp reply.
948 */
949 if(ip_chk_addr(tip)!=IS_MYADDR)
950 {
951 /*
952 * Replies to other machines get tossed.
953 */
954 kfree_skb(skb, FREE_READ);
955 return 0;
956 }
957 /*
958 * Now all replies are handled. Next, anything that falls through to here
959 * needs to be added to the arp cache, or have its entry updated if it is
960 * there.
961 */
962
963 arp_fast_lock();
964
965 hash = HASH(sip);
966
967 for (entry=arp_tables[hash]; entry; entry=entry->next)
968 if (entry->ip == sip && entry->dev == dev)
969 break;
970
971 if (entry)
972 {
973 /*
974 * Entry found; update it only if it is not a permanent entry.
975 */
976 if (!(entry->flags & ATF_PERM)) {
977 memcpy(entry->ha, sha, dev->addr_len);
978 entry->last_updated = jiffies;
979 }
980 if (!(entry->flags & ATF_COM))
981 {
982 /*
983 * This entry was incomplete. Delete the retransmit timer
984 * and switch to complete status.
985 */
986 del_timer(&entry->timer);
987 entry->flags |= ATF_COM;
988 arp_update_hhs(entry);
989 /*
990 * Send out waiting packets. We might have problems, if someone is
991 * manually removing entries right now -- entry might become invalid
992 * underneath us.
993 */
994 arp_send_q(entry);
995 }
996 }
997 else
998 {
999 /*
1000 * No entry found. Need to add a new entry to the arp table.
1001 */
1002 entry = (struct arp_table *)kmalloc(sizeof(struct arp_table),GFP_ATOMIC);
1003 if(entry == NULL)
1004 {
1005 arp_unlock();
1006 printk("ARP: no memory for new arp entry\n");
1007 kfree_skb(skb, FREE_READ);
1008 return 0;
1009 }
1010
1011 entry->mask = DEF_ARP_NETMASK;
1012 entry->ip = sip;
1013 entry->flags = ATF_COM;
1014 entry->hh = NULL;
1015 init_timer(&entry->timer);
1016 entry->timer.function = arp_expire_request;
1017 entry->timer.data = (unsigned long)entry;
1018 memcpy(entry->ha, sha, dev->addr_len);
1019 entry->last_updated = entry->last_used = jiffies;
1020 entry->dev = skb->dev;
1021 skb_queue_head_init(&entry->skb);
1022 if (arp_lock == 1)
1023 {
1024 entry->next = arp_tables[hash];
1025 arp_tables[hash] = entry;
1026 }
1027 else
1028 {
1029 #if RT_CACHE_DEBUG >= 1
1030 printk("arp_rcv: %08x backlogged\n", entry->ip);
1031 #endif
1032 arp_enqueue(&arp_backlog, entry);
1033 arp_bh_mask |= ARP_BH_BACKLOG;
1034 }
1035 }
1036
1037 /*
1038 * Replies have been sent, and entries have been added. All done.
1039 */
1040 kfree_skb(skb, FREE_READ);
1041 arp_unlock();
1042 return 0;
1043 }
1044
1045 /*
1046 * Lookup ARP entry by (addr, dev) pair.
1047 * Flags: ATF_PUBL - search for proxy entries
1048 * ATF_NETMASK - search for proxy network entry.
1049 * NOTE: should be called with locked ARP tables.
1050 */
1051
1052 static struct arp_table *arp_lookup(u32 paddr, unsigned short flags, struct device * dev)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
1053 {
1054 struct arp_table *entry;
1055
1056 if (!(flags & ATF_PUBL))
1057 {
1058 for (entry = arp_tables[HASH(paddr)];
1059 entry != NULL; entry = entry->next)
1060 if (entry->ip == paddr && entry->dev == dev)
1061 break;
1062 return entry;
1063 }
1064
1065 if (!(flags & ATF_NETMASK))
1066 {
1067 for (entry = arp_proxy_list;
1068 entry != NULL; entry = entry->next)
1069 if (entry->ip == paddr && entry->dev == dev)
1070 break;
1071 return entry;
1072 }
1073
1074 for (entry=arp_proxy_list; entry != NULL; entry = entry->next)
1075 if (!((entry->ip^paddr)&entry->mask) && entry->dev == dev)
1076 break;
1077 return entry;
1078 }
1079
1080 /*
1081 * Find an arp mapping in the cache. If not found, return false.
1082 */
1083
1084 int arp_query(unsigned char *haddr, u32 paddr, struct device * dev)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
1085 {
1086 struct arp_table *entry;
1087
1088 arp_fast_lock();
1089
1090 entry = arp_lookup(paddr, 0, dev);
1091
1092 if (entry != NULL)
1093 {
1094 entry->last_used = jiffies;
1095 if (entry->flags & ATF_COM)
1096 {
1097 memcpy(haddr, entry->ha, dev->addr_len);
1098 arp_unlock();
1099 return 1;
1100 }
1101 }
1102 arp_unlock();
1103 return 0;
1104 }
1105
1106
1107 static int arp_set_predefined(int addr_hint, unsigned char * haddr, __u32 paddr, struct device * dev)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
1108 {
1109 switch (addr_hint)
1110 {
1111 case IS_MYADDR:
1112 printk("ARP: arp called for own IP address\n");
1113 memcpy(haddr, dev->dev_addr, dev->addr_len);
1114 return 1;
1115 #ifdef CONFIG_IP_MULTICAST
1116 case IS_MULTICAST:
1117 if(dev->type==ARPHRD_ETHER || dev->type==ARPHRD_IEEE802)
1118 {
1119 u32 taddr;
1120 haddr[0]=0x01;
1121 haddr[1]=0x00;
1122 haddr[2]=0x5e;
1123 taddr=ntohl(paddr);
1124 haddr[5]=taddr&0xff;
1125 taddr=taddr>>8;
1126 haddr[4]=taddr&0xff;
1127 taddr=taddr>>8;
1128 haddr[3]=taddr&0x7f;
1129 return 1;
1130 }
1131 /*
1132 * If a device does not support multicast broadcast the stuff (eg AX.25 for now)
1133 */
1134 #endif
1135
1136 case IS_BROADCAST:
1137 memcpy(haddr, dev->broadcast, dev->addr_len);
1138 return 1;
1139 }
1140 return 0;
1141 }
1142
1143 /*
1144 * Find an arp mapping in the cache. If not found, post a request.
1145 */
1146
1147 int arp_find(unsigned char *haddr, u32 paddr, struct device *dev,
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
1148 u32 saddr, struct sk_buff *skb)
1149 {
1150 struct arp_table *entry;
1151 unsigned long hash;
1152
1153 if (arp_set_predefined(ip_chk_addr(paddr), haddr, paddr, dev))
1154 {
1155 if (skb)
1156 skb->arp = 1;
1157 return 0;
1158 }
1159
1160 hash = HASH(paddr);
1161 arp_fast_lock();
1162
1163 /*
1164 * Find an entry
1165 */
1166 entry = arp_lookup(paddr, 0, dev);
1167
1168 if (entry != NULL) /* It exists */
1169 {
1170 if (!(entry->flags & ATF_COM))
1171 {
1172 /*
1173 * A request was already send, but no reply yet. Thus
1174 * queue the packet with the previous attempt
1175 */
1176
1177 if (skb != NULL)
1178 {
1179 if (entry->last_updated)
1180 {
1181 skb_queue_tail(&entry->skb, skb);
1182 skb_device_unlock(skb);
1183 }
1184 /*
1185 * If last_updated==0 host is dead, so
1186 * drop skb's and set socket error.
1187 */
1188 else
1189 {
1190 #if 0
1191 /*
1192 * FIXME: ICMP HOST UNREACHABLE should be
1193 * sent in this situation. --ANK
1194 */
1195 if (skb->sk)
1196 {
1197 skb->sk->err = EHOSTDOWN;
1198 skb->sk->error_report(skb->sk);
1199 }
1200 #else
1201 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0, dev);
1202 #endif
1203 dev_kfree_skb(skb, FREE_WRITE);
1204 }
1205 }
1206 arp_unlock();
1207 return 1;
1208 }
1209
1210 /*
1211 * Update the record
1212 */
1213
1214 entry->last_used = jiffies;
1215 memcpy(haddr, entry->ha, dev->addr_len);
1216 if (skb)
1217 skb->arp = 1;
1218 arp_unlock();
1219 return 0;
1220 }
1221
1222 /*
1223 * Create a new unresolved entry.
1224 */
1225
1226 entry = (struct arp_table *) kmalloc(sizeof(struct arp_table),
1227 GFP_ATOMIC);
1228 if (entry != NULL)
1229 {
1230 entry->last_updated = entry->last_used = jiffies;
1231 entry->flags = 0;
1232 entry->ip = paddr;
1233 entry->mask = DEF_ARP_NETMASK;
1234 memset(entry->ha, 0, dev->addr_len);
1235 entry->dev = dev;
1236 entry->hh = NULL;
1237 init_timer(&entry->timer);
1238 entry->timer.function = arp_expire_request;
1239 entry->timer.data = (unsigned long)entry;
1240 entry->timer.expires = jiffies + ARP_RES_TIME;
1241 skb_queue_head_init(&entry->skb);
1242 if (skb != NULL)
1243 {
1244 skb_queue_tail(&entry->skb, skb);
1245 skb_device_unlock(skb);
1246 }
1247 if (arp_lock == 1)
1248 {
1249 entry->next = arp_tables[hash];
1250 arp_tables[hash] = entry;
1251 add_timer(&entry->timer);
1252 entry->retries = ARP_MAX_TRIES;
1253 }
1254 else
1255 {
1256 #if RT_CACHE_DEBUG >= 1
1257 printk("arp_find: %08x backlogged\n", entry->ip);
1258 #endif
1259 arp_enqueue(&arp_backlog, entry);
1260 arp_bh_mask |= ARP_BH_BACKLOG;
1261 }
1262 }
1263 else if (skb != NULL)
1264 dev_kfree_skb(skb, FREE_WRITE);
1265 arp_unlock();
1266
1267 /*
1268 * If we didn't find an entry, we will try to send an ARP packet.
1269 */
1270
1271 arp_send(ARPOP_REQUEST, ETH_P_ARP, paddr, dev, saddr, NULL,
1272 dev->dev_addr, NULL);
1273
1274 return 1;
1275 }
1276
1277
1278 /*
1279 * Write the contents of the ARP cache to a PROCfs file.
1280 */
1281
1282 #define HBUFFERLEN 30
1283
1284 int arp_get_info(char *buffer, char **start, off_t offset, int length, int dummy)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
1285 {
1286 int len=0;
1287 off_t pos=0;
1288 int size;
1289 struct arp_table *entry;
1290 char hbuffer[HBUFFERLEN];
1291 int i,j,k;
1292 const char hexbuf[] = "0123456789ABCDEF";
1293
1294 size = sprintf(buffer,"IP address HW type Flags HW address Mask Device\n");
1295
1296 pos+=size;
1297 len+=size;
1298
1299 arp_fast_lock();
1300
1301 for(i=0; i<FULL_ARP_TABLE_SIZE; i++)
1302 {
1303 for(entry=arp_tables[i]; entry!=NULL; entry=entry->next)
1304 {
1305 /*
1306 * Convert hardware address to XX:XX:XX:XX ... form.
1307 */
1308 #ifdef CONFIG_AX25
1309 #ifdef CONFIG_NETROM
1310 if (entry->dev->type == ARPHRD_AX25 || entry->dev->type == ARPHRD_NETROM)
1311 strcpy(hbuffer,ax2asc((ax25_address *)entry->ha));
1312 else {
1313 #else
1314 if(entry->dev->type==ARPHRD_AX25)
1315 strcpy(hbuffer,ax2asc((ax25_address *)entry->ha));
1316 else {
1317 #endif
1318 #endif
1319
1320 for(k=0,j=0;k<HBUFFERLEN-3 && j<entry->dev->addr_len;j++)
1321 {
1322 hbuffer[k++]=hexbuf[ (entry->ha[j]>>4)&15 ];
1323 hbuffer[k++]=hexbuf[ entry->ha[j]&15 ];
1324 hbuffer[k++]=':';
1325 }
1326 hbuffer[--k]=0;
1327
1328 #ifdef CONFIG_AX25
1329 }
1330 #endif
1331 size = sprintf(buffer+len,
1332 "%-17s0x%-10x0x%-10x%s",
1333 in_ntoa(entry->ip),
1334 (unsigned int)entry->dev->type,
1335 entry->flags,
1336 hbuffer);
1337 #if RT_CACHE_DEBUG < 2
1338 size += sprintf(buffer+len+size,
1339 " %-17s %s\n",
1340 entry->mask==DEF_ARP_NETMASK ?
1341 "*" : in_ntoa(entry->mask), entry->dev->name);
1342 #else
1343 size += sprintf(buffer+len+size,
1344 " %-17s %s\t%ld\t%1d\n",
1345 entry->mask==DEF_ARP_NETMASK ?
1346 "*" : in_ntoa(entry->mask), entry->dev->name,
1347 entry->hh ? entry->hh->hh_refcnt : -1,
1348 entry->hh ? entry->hh->hh_uptodate : 0);
1349 #endif
1350
1351 len += size;
1352 pos += size;
1353
1354 if (pos <= offset)
1355 len=0;
1356 if (pos >= offset+length)
1357 break;
1358 }
1359 }
1360 arp_unlock();
1361
1362 *start = buffer+len-(pos-offset); /* Start of wanted data */
1363 len = pos-offset; /* Start slop */
1364 if (len>length)
1365 len = length; /* Ending slop */
1366 return len;
1367 }
1368
1369
1370
1371 int arp_bind_cache(struct hh_cache ** hhp, struct device *dev, unsigned short htype, u32 paddr)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
1372 {
1373 struct arp_table *entry;
1374 struct hh_cache *hh = *hhp;
1375 int addr_hint;
1376 unsigned long flags;
1377
1378 if (hh)
1379 return 1;
1380
1381 if ((addr_hint = ip_chk_addr(paddr)) != 0)
1382 {
1383 unsigned char haddr[MAX_ADDR_LEN];
1384 if (hh)
1385 return 1;
1386 hh = kmalloc(sizeof(struct hh_cache), GFP_ATOMIC);
1387 if (!hh)
1388 return 1;
1389 arp_set_predefined(addr_hint, haddr, paddr, dev);
1390 hh->hh_uptodate = 0;
1391 hh->hh_refcnt = 1;
1392 hh->hh_arp = NULL;
1393 hh->hh_next = NULL;
1394 hh->hh_type = htype;
1395 *hhp = hh;
1396 dev->header_cache_update(hh, dev, haddr);
1397 return 0;
1398 }
1399
1400 save_flags(flags);
1401
1402 arp_fast_lock();
1403
1404 entry = arp_lookup(paddr, 0, dev);
1405
1406 if (entry)
1407 {
1408 cli();
1409 for (hh = entry->hh; hh; hh=hh->hh_next)
1410 if (hh->hh_type == htype)
1411 break;
1412 if (hh)
1413 {
1414 hh->hh_refcnt++;
1415 *hhp = hh;
1416 restore_flags(flags);
1417 arp_unlock();
1418 return 1;
1419 }
1420 restore_flags(flags);
1421 }
1422
1423 hh = kmalloc(sizeof(struct hh_cache), GFP_ATOMIC);
1424 if (!hh)
1425 {
1426 arp_unlock();
1427 return 1;
1428 }
1429
1430 hh->hh_uptodate = 0;
1431 hh->hh_refcnt = 1;
1432 hh->hh_arp = NULL;
1433 hh->hh_next = NULL;
1434 hh->hh_type = htype;
1435
1436 if (entry)
1437 {
1438 dev->header_cache_update(hh, dev, entry->ha);
1439 *hhp = hh;
1440 cli();
1441 hh->hh_arp = (void*)entry;
1442 entry->hh = hh;
1443 hh->hh_refcnt++;
1444 restore_flags(flags);
1445 entry->last_used = jiffies;
1446 arp_unlock();
1447 return 0;
1448 }
1449
1450
1451 /*
1452 * Create a new unresolved entry.
1453 */
1454
1455 entry = (struct arp_table *) kmalloc(sizeof(struct arp_table),
1456 GFP_ATOMIC);
1457 if (entry == NULL)
1458 {
1459 kfree_s(hh, sizeof(struct hh_cache));
1460 arp_unlock();
1461 return 1;
1462 }
1463
1464 entry->last_updated = entry->last_used = jiffies;
1465 entry->flags = 0;
1466 entry->ip = paddr;
1467 entry->mask = DEF_ARP_NETMASK;
1468 memset(entry->ha, 0, dev->addr_len);
1469 entry->dev = dev;
1470 entry->hh = hh;
1471 ATOMIC_INCR(&hh->hh_refcnt);
1472 init_timer(&entry->timer);
1473 entry->timer.function = arp_expire_request;
1474 entry->timer.data = (unsigned long)entry;
1475 entry->timer.expires = jiffies + ARP_RES_TIME;
1476 skb_queue_head_init(&entry->skb);
1477
1478 if (arp_lock == 1)
1479 {
1480 unsigned long hash = HASH(paddr);
1481 cli();
1482 entry->next = arp_tables[hash];
1483 arp_tables[hash] = entry;
1484 hh->hh_arp = (void*)entry;
1485 entry->retries = ARP_MAX_TRIES;
1486 restore_flags(flags);
1487
1488 add_timer(&entry->timer);
1489 arp_send(ARPOP_REQUEST, ETH_P_ARP, paddr, dev, dev->pa_addr, NULL, dev->dev_addr, NULL);
1490 }
1491 else
1492 {
1493 #if RT_CACHE_DEBUG >= 1
1494 printk("arp_cache_bind: %08x backlogged\n", entry->ip);
1495 #endif
1496 arp_enqueue(&arp_backlog, entry);
1497 arp_bh_mask |= ARP_BH_BACKLOG;
1498 }
1499 *hhp = hh;
1500 arp_unlock();
1501 return 0;
1502 }
1503
1504 static void arp_run_bh()
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
1505 {
1506 unsigned long flags;
1507 struct arp_table *entry, *entry1;
1508 struct hh_cache *hh;
1509 __u32 sip;
1510
1511 save_flags(flags);
1512 cli();
1513 if (!arp_lock)
1514 {
1515 arp_fast_lock();
1516
1517 while ((entry = arp_dequeue(&arp_backlog)) != NULL)
1518 {
1519 unsigned long hash;
1520 sti();
1521 sip = entry->ip;
1522 hash = HASH(sip);
1523
1524 /* It's possible, that an entry with the same pair
1525 * (addr,type) was already created. Our entry is older,
1526 * so it should be discarded.
1527 */
1528 for (entry1=arp_tables[hash]; entry1; entry1=entry1->next)
1529 if (entry1->ip==sip && entry1->dev == entry->dev)
1530 break;
1531
1532 if (!entry1)
1533 {
1534 struct device * dev = entry->dev;
1535 cli();
1536 entry->next = arp_tables[hash];
1537 arp_tables[hash] = entry;
1538 for (hh=entry->hh; hh; hh=hh->hh_next)
1539 hh->hh_arp = (void*)entry;
1540 sti();
1541 del_timer(&entry->timer);
1542 entry->timer.expires = jiffies + ARP_RES_TIME;
1543 add_timer(&entry->timer);
1544 entry->retries = ARP_MAX_TRIES;
1545 arp_send(ARPOP_REQUEST, ETH_P_ARP, entry->ip, dev, dev->pa_addr, NULL, dev->dev_addr, NULL);
1546 #if RT_CACHE_DEBUG >= 1
1547 printk("arp_run_bh: %08x reinstalled\n", sip);
1548 #endif
1549 }
1550 else
1551 {
1552 struct sk_buff * skb;
1553 struct hh_cache * next;
1554
1555 /* Discard entry, but preserve its hh's and
1556 * skb's.
1557 */
1558 cli();
1559 for (hh=entry->hh; hh; hh=next)
1560 {
1561 next = hh->hh_next;
1562 hh->hh_next = entry1->hh;
1563 entry1->hh = hh;
1564 hh->hh_arp = (void*)entry1;
1565 }
1566 entry->hh = NULL;
1567
1568 /* Prune skb list from entry
1569 * and graft it to entry1.
1570 */
1571 while ((skb = skb_dequeue(&entry->skb)) != NULL)
1572 {
1573 skb_device_lock(skb);
1574 sti();
1575 skb_queue_tail(&entry1->skb, skb);
1576 skb_device_unlock(skb);
1577 cli();
1578 }
1579 sti();
1580
1581 #if RT_CACHE_DEBUG >= 1
1582 printk("arp_run_bh: entry %08x was born dead\n", entry->ip);
1583 #endif
1584 arp_free_entry(entry);
1585
1586 if (entry1->flags & ATF_COM)
1587 {
1588 arp_update_hhs(entry1);
1589 arp_send_q(entry1);
1590 }
1591 }
1592 cli();
1593 }
1594 arp_bh_mask &= ~ARP_BH_BACKLOG;
1595 arp_unlock();
1596 }
1597 restore_flags(flags);
1598 }
1599
1600
1601 /*
1602 * Set (create) an ARP cache entry.
1603 */
1604
1605 static int arp_req_set(struct arpreq *r, struct device * dev)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
1606 {
1607 struct arp_table *entry;
1608 struct sockaddr_in *si;
1609 struct rtable *rt;
1610 struct device * dev1;
1611 u32 ip;
1612
1613 /*
1614 * Find out about the hardware type.
1615 * We have to be compatible with BSD UNIX, so we have to
1616 * assume that a "not set" value (i.e. 0) means Ethernet.
1617 *
1618 * ANK: Hey, who wrote it? Do you really mean that BSD considers
1619 * ARPHRD_NETROM as ARPHRD_ETHER, or somthing another?
1620 */
1621
1622 si = (struct sockaddr_in *) &r->arp_pa;
1623 ip = si->sin_addr.s_addr;
1624
1625 /*
1626 * Is it reachable ?
1627 */
1628
1629 rt = ip_rt_route(ip, 0);
1630 if (!rt)
1631 return -ENETUNREACH;
1632 dev1 = rt->rt_dev;
1633 ip_rt_put(rt);
1634
1635 if (((r->arp_flags & ATF_PUBL) && dev == dev1) ||
1636 (!(r->arp_flags & ATF_PUBL) && dev != dev1))
1637 return -EINVAL;
1638
1639 #if RT_CACHE_DEBUG >= 1
1640 if (arp_lock)
1641 printk("arp_req_set: bug\n");
1642 #endif
1643 arp_fast_lock();
1644
1645 /*
1646 * Is there an existing entry for this address?
1647 */
1648
1649 /*
1650 * Find the entry
1651 */
1652
1653 entry = arp_lookup(ip, r->arp_flags & ~ATF_NETMASK, dev);
1654
1655 if (entry)
1656 {
1657 arp_destroy(entry);
1658 entry = NULL;
1659 }
1660
1661 /*
1662 * Do we need to create a new entry
1663 */
1664
1665 if (entry == NULL)
1666 {
1667 entry = (struct arp_table *) kmalloc(sizeof(struct arp_table),
1668 GFP_ATOMIC);
1669 if (entry == NULL)
1670 {
1671 arp_unlock();
1672 return -ENOMEM;
1673 }
1674 entry->ip = ip;
1675 entry->hh = NULL;
1676 init_timer(&entry->timer);
1677 entry->timer.function = arp_expire_request;
1678 entry->timer.data = (unsigned long)entry;
1679
1680 if (r->arp_flags & ATF_PUBL)
1681 {
1682 cli();
1683 entry->next = arp_proxy_list;
1684 arp_proxy_list = entry;
1685 sti();
1686 }
1687 else
1688 {
1689 unsigned long hash = HASH(ip);
1690 cli();
1691 entry->next = arp_tables[hash];
1692 arp_tables[hash] = entry;
1693 sti();
1694 }
1695 skb_queue_head_init(&entry->skb);
1696 }
1697 /*
1698 * We now have a pointer to an ARP entry. Update it!
1699 */
1700
1701 if ((r->arp_flags & ATF_COM) && !r->arp_ha.sa_data[0])
1702 memcpy(&entry->ha, dev->dev_addr, dev->addr_len);
1703 else
1704 memcpy(&entry->ha, &r->arp_ha.sa_data, dev->addr_len);
1705 entry->last_updated = entry->last_used = jiffies;
1706 entry->flags = r->arp_flags | ATF_COM;
1707 if ((entry->flags & ATF_PUBL) && (entry->flags & ATF_NETMASK))
1708 {
1709 si = (struct sockaddr_in *) &r->arp_netmask;
1710 entry->mask = si->sin_addr.s_addr;
1711 }
1712 else
1713 entry->mask = DEF_ARP_NETMASK;
1714 entry->dev = dev;
1715 arp_update_hhs(entry);
1716 arp_unlock();
1717 return 0;
1718 }
1719
1720
1721
1722 /*
1723 * Get an ARP cache entry.
1724 */
1725
1726 static int arp_req_get(struct arpreq *r, struct device *dev)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
1727 {
1728 struct arp_table *entry;
1729 struct sockaddr_in *si;
1730
1731 si = (struct sockaddr_in *) &r->arp_pa;
1732
1733 #if RT_CACHE_DEBUG >= 1
1734 if (arp_lock)
1735 printk("arp_req_set: bug\n");
1736 #endif
1737 arp_fast_lock();
1738
1739 entry = arp_lookup(si->sin_addr.s_addr, r->arp_flags|ATF_NETMASK, dev);
1740
1741 if (entry == NULL)
1742 {
1743 arp_unlock();
1744 return -ENXIO;
1745 }
1746
1747 /*
1748 * We found it; copy into structure.
1749 */
1750
1751 memcpy(r->arp_ha.sa_data, &entry->ha, entry->dev->addr_len);
1752 r->arp_ha.sa_family = entry->dev->type;
1753 r->arp_flags = entry->flags;
1754 strncpy(r->arp_dev, entry->dev->name, 16);
1755 arp_unlock();
1756 return 0;
1757 }
1758
1759 static int arp_req_delete(struct arpreq *r, struct device * dev)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
1760 {
1761 struct arp_table *entry;
1762 struct sockaddr_in *si;
1763
1764 si = (struct sockaddr_in *) &r->arp_pa;
1765 #if RT_CACHE_DEBUG >= 1
1766 if (arp_lock)
1767 printk("arp_req_delete: bug\n");
1768 #endif
1769 arp_fast_lock();
1770
1771 if (!(r->arp_flags & ATF_PUBL))
1772 {
1773 for (entry = arp_tables[HASH(si->sin_addr.s_addr)];
1774 entry != NULL; entry = entry->next)
1775 if (entry->ip == si->sin_addr.s_addr
1776 && entry->dev == dev)
1777 {
1778 arp_destroy(entry);
1779 arp_unlock();
1780 return 0;
1781 }
1782 }
1783 else
1784 {
1785 for (entry = arp_proxy_list;
1786 entry != NULL; entry = entry->next)
1787 if (entry->ip == si->sin_addr.s_addr
1788 && entry->dev == dev)
1789 {
1790 arp_destroy(entry);
1791 arp_unlock();
1792 return 0;
1793 }
1794 }
1795
1796 arp_unlock();
1797 return -ENXIO;
1798 }
1799
1800 /*
1801 * Handle an ARP layer I/O control request.
1802 */
1803
1804 int arp_ioctl(unsigned int cmd, void *arg)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
1805 {
1806 int err;
1807 struct arpreq r;
1808
1809 struct device * dev = NULL;
1810
1811 switch(cmd)
1812 {
1813 case SIOCDARP:
1814 case SIOCSARP:
1815 if (!suser())
1816 return -EPERM;
1817 case SIOCGARP:
1818 err = verify_area(VERIFY_READ, arg, sizeof(struct arpreq));
1819 if (err)
1820 return err;
1821 memcpy_fromfs(&r, arg, sizeof(struct arpreq));
1822 break;
1823 case OLD_SIOCDARP:
1824 case OLD_SIOCSARP:
1825 if (!suser())
1826 return -EPERM;
1827 case OLD_SIOCGARP:
1828 err = verify_area(VERIFY_READ, arg, sizeof(struct arpreq_old));
1829 if (err)
1830 return err;
1831 memcpy_fromfs(&r, arg, sizeof(struct arpreq_old));
1832 memset(&r.arp_dev, 0, sizeof(r.arp_dev));
1833 break;
1834 default:
1835 return -EINVAL;
1836 }
1837
1838 if (r.arp_pa.sa_family != AF_INET)
1839 return -EPFNOSUPPORT;
1840 if (((struct sockaddr_in *)&r.arp_pa)->sin_addr.s_addr == 0)
1841 return -EINVAL;
1842
1843 if (r.arp_dev[0])
1844 {
1845 if ((dev = dev_get(r.arp_dev)) == NULL)
1846 return -ENODEV;
1847
1848 if (!r.arp_ha.sa_family)
1849 r.arp_ha.sa_family = dev->type;
1850 else if (r.arp_ha.sa_family != dev->type)
1851 return -EINVAL;
1852 }
1853 else
1854 {
1855 /*
1856 * Device was not specified. Take the first suitable one.
1857 */
1858 if ((dev = dev_getbytype(r.arp_ha.sa_family)) == NULL)
1859 return -ENODEV;
1860 }
1861
1862 switch(cmd)
1863 {
1864 case SIOCDARP:
1865 return arp_req_delete(&r, dev);
1866 case SIOCSARP:
1867 return arp_req_set(&r, dev);
1868 case OLD_SIOCDARP:
1869 /* old SIOCDARP destoyes both
1870 * normal and proxy mappings
1871 */
1872 r.arp_flags &= ~ATF_PUBL;
1873 err = arp_req_delete(&r, dev);
1874 r.arp_flags |= ATF_PUBL;
1875 if (!err)
1876 arp_req_delete(&r, dev);
1877 else
1878 err = arp_req_delete(&r, dev);
1879 return err;
1880 case OLD_SIOCSARP:
1881 err = arp_req_set(&r, dev);
1882 /* old SIOCSARP works so funny,
1883 * that its behaviour can be emulated
1884 * only approximately 8).
1885 * It should work. --ANK
1886 */
1887 if (r.arp_flags & ATF_PUBL)
1888 {
1889 r.arp_flags &= ~ATF_PUBL;
1890 arp_req_delete(&r, dev);
1891 }
1892 return err;
1893 case SIOCGARP:
1894 err = verify_area(VERIFY_WRITE, arg, sizeof(struct arpreq));
1895 if (err)
1896 return err;
1897 err = arp_req_get(&r, dev);
1898 if (!err)
1899 memcpy_tofs(arg, &r, sizeof(r));
1900 return err;
1901 case OLD_SIOCGARP:
1902 err = verify_area(VERIFY_WRITE, arg, sizeof(struct arpreq_old));
1903 if (err)
1904 return err;
1905 r.arp_flags &= ~ATF_PUBL;
1906 err = arp_req_get(&r, dev);
1907 if (err < 0)
1908 {
1909 r.arp_flags |= ATF_PUBL;
1910 err = arp_req_get(&r, dev);
1911 }
1912 if (!err)
1913 memcpy_tofs(arg, &r, sizeof(struct arpreq_old));
1914 return err;
1915 }
1916 /*NOTREACHED*/
1917 return 0;
1918 }
1919
1920
1921 /*
1922 * Called once on startup.
1923 */
1924
1925 static struct packet_type arp_packet_type =
1926 {
1927 0, /* Should be: __constant_htons(ETH_P_ARP) - but this _doesn't_ come out constant! */
1928 NULL, /* All devices */
1929 arp_rcv,
1930 NULL,
1931 NULL
1932 };
1933
1934 static struct notifier_block arp_dev_notifier={
1935 arp_device_event,
1936 NULL,
1937 0
1938 };
1939
1940 void arp_init (void)
/* ![[previous]](../icons/left.png)
![[next]](../icons/n_right.png)
![[first]](../icons/first.png)
![[last]](../icons/n_last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
1941 {
1942 /* Register the packet type */
1943 arp_packet_type.type=htons(ETH_P_ARP);
1944 dev_add_pack(&arp_packet_type);
1945 /* Start with the regular checks for expired arp entries. */
1946 add_timer(&arp_timer);
1947 /* Register for device down reports */
1948 register_netdevice_notifier(&arp_dev_notifier);
1949
1950 proc_net_register(&(struct proc_dir_entry) {
1951 PROC_NET_ARP, 3, "arp",
1952 S_IFREG | S_IRUGO, 1, 0, 0,
1953 0, &proc_net_inode_operations,
1954 arp_get_info
1955 });
1956 }
1957