1 /* linux/net/inet/arp.c
2 *
3 * Copyright (C) 1994 by Florian La Roche
4 *
5 * This module implements the Address Resolution Protocol ARP (RFC 826),
6 * which is used to convert IP addresses (or in the future maybe other
7 * high-level addresses into a low-level hardware address (like an Ethernet
8 * address).
9 *
10 * FIXME:
11 * Experiment with better retransmit timers
12 * Clean up the timer deletions
13 * If you create a proxy entry set your interface address to the address
14 * and then delete it, proxies may get out of sync with reality - check this
15 *
16 * This program is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License
18 * as published by the Free Software Foundation; either version
19 * 2 of the License, or (at your option) any later version.
20 *
21 *
22 * Fixes:
23 * Alan Cox : Removed the ethernet assumptions in Florian's code
24 * Alan Cox : Fixed some small errors in the ARP logic
25 * Alan Cox : Allow >4K in /proc
26 * Alan Cox : Make ARP add its own protocol entry
27 *
28 * Ross Martin : Rewrote arp_rcv() and arp_get_info()
29 * Stephen Henson : Add AX25 support to arp_get_info()
30 * Alan Cox : Drop data when a device is downed.
31 * Alan Cox : Use init_timer().
32 * Alan Cox : Double lock fixes.
33 * Martin Seine : Move the arphdr structure
34 * to if_arp.h for compatibility.
35 * with BSD based programs.
36 * Andrew Tridgell : Added ARP netmask code and
37 * re-arranged proxy handling.
38 * Alan Cox : Changed to use notifiers.
39 * Niibe Yutaka : Reply for this device or proxies only.
40 * Alan Cox : Don't proxy across hardware types!
41 * Jonathan Naylor : Added support for NET/ROM.
42 * Mike Shaver : RFC1122 checks.
43 * Jonathan Naylor : Only lookup the hardware address for
44 * the correct hardware type.
45 * Germano Caronni : Assorted subtle races.
46 * Craig Schlenter : Don't modify permanent entry
47 * during arp_rcv.
48 * Russ Nelson : Tidied up a few bits.
49 * Alexey Kuznetsov: Major changes to caching and behaviour,
50 * eg intelligent arp probing and generation
51 * of host down events.
52 * Alan Cox : Missing unlock in device events.
53 * Eckes : ARP ioctl control errors.
54 */
55
56 /* RFC1122 Status:
57 2.3.2.1 (ARP Cache Validation):
58 MUST provide mechanism to flush stale cache entries (OK)
59 SHOULD be able to configure cache timeout (NOT YET)
60 MUST throttle ARP retransmits (OK)
61 2.3.2.2 (ARP Packet Queue):
62 SHOULD save at least one packet from each "conversation" with an
63 unresolved IP address. (OK)
64 950727 -- MS
65 */
66
67 #include <linux/types.h>
68 #include <linux/string.h>
69 #include <linux/kernel.h>
70 #include <linux/sched.h>
71 #include <linux/config.h>
72 #include <linux/socket.h>
73 #include <linux/sockios.h>
74 #include <linux/errno.h>
75 #include <linux/if_arp.h>
76 #include <linux/in.h>
77 #include <linux/mm.h>
78 #include <linux/inet.h>
79 #include <linux/netdevice.h>
80 #include <linux/etherdevice.h>
81 #include <linux/trdevice.h>
82 #include <linux/skbuff.h>
83 #include <linux/proc_fs.h>
84 #include <linux/stat.h>
85
86 #include <net/ip.h>
87 #include <net/icmp.h>
88 #include <net/route.h>
89 #include <net/protocol.h>
90 #include <net/tcp.h>
91 #include <net/sock.h>
92 #include <net/arp.h>
93 #ifdef CONFIG_AX25
94 #include <net/ax25.h>
95 #ifdef CONFIG_NETROM
96 #include <net/netrom.h>
97 #endif
98 #endif
99 #ifdef CONFIG_NET_ALIAS
100 #include <linux/net_alias.h>
101 #endif
102
103 #include <asm/system.h>
104 #include <asm/segment.h>
105
106 #include <stdarg.h>
107
108 /*
109 * This structure defines the ARP mapping cache. As long as we make changes
110 * in this structure, we keep interrupts off. But normally we can copy the
111 * hardware address and the device pointer in a local variable and then
112 * make any "long calls" to send a packet out.
113 */
114
115 struct arp_table
116 {
117 struct arp_table *next; /* Linked entry list */
118 unsigned long last_used; /* For expiry */
119 unsigned long last_updated; /* For expiry */
120 unsigned int flags; /* Control status */
121 u32 ip; /* ip address of entry */
122 u32 mask; /* netmask - used for generalised proxy arps (tridge) */
123 unsigned char ha[MAX_ADDR_LEN]; /* Hardware address */
124 struct device *dev; /* Device the entry is tied to */
125
126 /*
127 * The following entries are only used for unresolved hw addresses.
128 */
129
130 struct timer_list timer; /* expire timer */
131 int retries; /* remaining retries */
132 struct sk_buff_head skb; /* list of queued packets */
133 struct hh_cache *hh;
134 };
135
136
137 /*
138 * Configurable Parameters (don't touch unless you know what you are doing
139 */
140
141 /*
142 * If an arp request is send, ARP_RES_TIME is the timeout value until the
143 * next request is send.
144 * RFC1122: OK. Throttles ARPing, as per 2.3.2.1. (MUST)
145 * The recommended minimum timeout is 1 second per destination.
146 * This timeout is prolongated to ARP_DEAD_RES_TIME, if
147 * destination does not respond.
148 */
149
150 #define ARP_RES_TIME (5*HZ)
151 #define ARP_DEAD_RES_TIME (60*HZ)
152
153 /*
154 * The number of times an arp request is send, until the host is
155 * considered temporarily unreachable.
156 */
157
158 #define ARP_MAX_TRIES 3
159
160 /*
161 * After that time, an unused entry is deleted from the arp table.
162 */
163
164 #define ARP_TIMEOUT (600*HZ)
165
166 /*
167 * How often is the function 'arp_check_retries' called.
168 * An unused entry is invalidated in the time between ARP_TIMEOUT and
169 * (ARP_TIMEOUT+ARP_CHECK_INTERVAL).
170 */
171
172 #define ARP_CHECK_INTERVAL (60*HZ)
173
174 /*
175 * The entry is reconfirmed by sending point-to-point ARP
176 * request after ARP_CONFIRM_INTERVAL. If destinations does not respond
177 * for ARP_CONFIRM_TIMEOUT, normal broadcast resolution scheme is started.
178 */
179
180 #define ARP_CONFIRM_INTERVAL (300*HZ)
181 #define ARP_CONFIRM_TIMEOUT ARP_RES_TIME
182
183 static unsigned long arp_lock;
184 static unsigned long arp_bh_mask;
185
186 #define ARP_BH_BACKLOG 1
187
188 static struct arp_table *arp_backlog;
189
190 static void arp_run_bh(void);
191 static void arp_check_expire (unsigned long);
192
193 static struct timer_list arp_timer =
194 { NULL, NULL, ARP_CHECK_INTERVAL, 0L, &arp_check_expire };
195
196 /*
197 * The default arp netmask is just 255.255.255.255 which means it's
198 * a single machine entry. Only proxy entries can have other netmasks
199 */
200
201 #define DEF_ARP_NETMASK (~0)
202
203 /*
204 * The size of the hash table. Must be a power of two.
205 * Maybe we should remove hashing in the future for arp and concentrate
206 * on Patrick Schaaf's Host-Cache-Lookup...
207 */
208
209 #define ARP_TABLE_SIZE 16
210 #define FULL_ARP_TABLE_SIZE (ARP_TABLE_SIZE+1)
211
212 struct arp_table *arp_tables[FULL_ARP_TABLE_SIZE] =
213 {
214 NULL,
215 };
216
217 #define arp_proxy_list arp_tables[ARP_TABLE_SIZE]
218
219 /*
220 * The last bits in the IP address are used for the cache lookup.
221 * A special entry is used for proxy arp entries
222 */
223
224 #define HASH(paddr) (htonl(paddr) & (ARP_TABLE_SIZE - 1))
225
226 /*
227 * Lock/unlock arp_table chains.
228 */
229
230 static __inline__ void arp_fast_lock(void)
/* ![[previous]](../icons/n_left.png)
![[next]](../icons/right.png)
![[first]](../icons/n_first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
231 {
232 ATOMIC_INCR(&arp_lock);
233 }
234
235 static __inline__ void arp_fast_unlock(void)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
236 {
237 ATOMIC_DECR(&arp_lock);
238 }
239
240 static __inline__ void arp_unlock(void)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
241 {
242 if (!ATOMIC_DECR_AND_CHECK(&arp_lock) && arp_bh_mask)
243 arp_run_bh();
244 }
245
246 /*
247 * Enqueue to FIFO list.
248 */
249
250 static void arp_enqueue(struct arp_table **q, struct arp_table *entry)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
251 {
252 unsigned long flags;
253 struct arp_table * tail;
254
255 save_flags(flags);
256 cli();
257 tail = *q;
258 if (!tail)
259 entry->next = entry;
260 else
261 {
262 entry->next = tail->next;
263 tail->next = entry;
264 }
265 *q = entry;
266 restore_flags(flags);
267 return;
268 }
269
270 /*
271 * Dequeue from FIFO list,
272 * caller should mask interrupts.
273 */
274
275 static struct arp_table * arp_dequeue(struct arp_table **q)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
276 {
277 struct arp_table * entry;
278
279 if (*q)
280 {
281 entry = (*q)->next;
282 (*q)->next = entry->next;
283 if (entry->next == entry)
284 *q = NULL;
285 entry->next = NULL;
286 return entry;
287 }
288 return NULL;
289 }
290
291 /*
292 * Purge all linked skb's of the entry.
293 */
294
295 static void arp_release_entry(struct arp_table *entry)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
296 {
297 struct sk_buff *skb;
298 unsigned long flags;
299
300 save_flags(flags);
301 cli();
302 /* Release the list of `skb' pointers. */
303 while ((skb = skb_dequeue(&entry->skb)) != NULL)
304 {
305 skb_device_lock(skb);
306 restore_flags(flags);
307 dev_kfree_skb(skb, FREE_WRITE);
308 cli();
309 }
310 restore_flags(flags);
311 return;
312 }
313
314 /*
315 * Release the entry and all resources linked to it: skb's, hh's, timer
316 * and certainly memory.
317 */
318
319 static void arp_free_entry(struct arp_table *entry)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
320 {
321 unsigned long flags;
322 struct hh_cache *hh, *next;
323
324 del_timer(&entry->timer);
325
326 save_flags(flags);
327 cli();
328 arp_release_entry(entry);
329
330 for (hh = entry->hh; hh; hh = next)
331 {
332 next = hh->hh_next;
333 hh->hh_arp = NULL;
334 if (!--hh->hh_refcnt)
335 kfree_s(hh, sizeof(struct(struct hh_cache)));
336 }
337 restore_flags(flags);
338
339 kfree_s(entry, sizeof(struct arp_table));
340 return;
341 }
342
343 /*
344 * How many users has this entry?
345 */
346
347 static __inline__ int arp_count_hhs(struct arp_table * entry)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
348 {
349 struct hh_cache *hh, **hhp;
350 int count = 0;
351
352 hhp = &entry->hh;
353 while ((hh=*hhp) != NULL)
354 {
355 if (hh->hh_refcnt == 1)
356 {
357 *hhp = hh->hh_next;
358 kfree_s(hh, sizeof(struct hh_cache));
359 continue;
360 }
361 count += hh->hh_refcnt-1;
362 hhp = &hh->hh_next;
363 }
364
365 return count;
366 }
367
368 /*
369 * Invalidate all hh's, so that higher level will not try to use it.
370 */
371
372 static __inline__ void arp_invalidate_hhs(struct arp_table * entry)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
373 {
374 struct hh_cache *hh;
375
376 for (hh=entry->hh; hh; hh=hh->hh_next)
377 hh->hh_uptodate = 0;
378 }
379
380 /*
381 * Signal to device layer, that hardware address may be changed.
382 */
383
384 static __inline__ void arp_update_hhs(struct arp_table * entry)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
385 {
386 struct hh_cache *hh;
387
388 for (hh=entry->hh; hh; hh=hh->hh_next)
389 entry->dev->header_cache_update(hh, entry->dev, entry->ha);
390 }
391
392 /*
393 * Check if there are too old entries and remove them. If the ATF_PERM
394 * flag is set, they are always left in the arp cache (permanent entry).
395 * If an entry was not be confirmed for ARP_CONFIRM_INTERVAL,
396 * declare it invalid and send point-to-point ARP request.
397 * If it will not be confirmed for ARP_CONFIRM_TIMEOUT,
398 * give it to shred by arp_expire_entry.
399 */
400
401 static void arp_check_expire(unsigned long dummy)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
402 {
403 int i;
404 unsigned long now = jiffies;
405
406 del_timer(&arp_timer);
407
408 if (!arp_lock)
409 {
410 arp_fast_lock();
411
412 for (i = 0; i < ARP_TABLE_SIZE; i++)
413 {
414 struct arp_table *entry;
415 struct arp_table **pentry;
416
417 pentry = &arp_tables[i];
418
419 while ((entry = *pentry) != NULL)
420 {
421 cli();
422 if (now - entry->last_used > ARP_TIMEOUT
423 && !(entry->flags & ATF_PERM)
424 && !arp_count_hhs(entry))
425 {
426 *pentry = entry->next;
427 sti();
428 #if RT_CACHE_DEBUG >= 2
429 printk("arp_expire: %08x expired\n", entry->ip);
430 #endif
431 arp_free_entry(entry);
432 }
433 else if (entry->last_updated
434 && now - entry->last_updated > ARP_CONFIRM_INTERVAL
435 && !(entry->flags & ATF_PERM))
436 {
437 struct device * dev = entry->dev;
438 pentry = &entry->next;
439 entry->flags &= ~ATF_COM;
440 arp_invalidate_hhs(entry);
441 sti();
442 entry->retries = ARP_MAX_TRIES+1;
443 del_timer(&entry->timer);
444 entry->timer.expires = jiffies + ARP_CONFIRM_TIMEOUT;
445 add_timer(&entry->timer);
446 arp_send(ARPOP_REQUEST, ETH_P_ARP, entry->ip,
447 dev, dev->pa_addr, entry->ha,
448 dev->dev_addr, NULL);
449 #if RT_CACHE_DEBUG >= 2
450 printk("arp_expire: %08x requires confirmation\n", entry->ip);
451 #endif
452 }
453 else
454 pentry = &entry->next; /* go to next entry */
455 }
456 }
457 arp_unlock();
458 }
459
460 ip_rt_check_expire();
461
462 /*
463 * Set the timer again.
464 */
465
466 arp_timer.expires = jiffies + ARP_CHECK_INTERVAL;
467 add_timer(&arp_timer);
468 }
469
470 /*
471 * This function is called, if an entry is not resolved in ARP_RES_TIME.
472 * When more than MAX_ARP_TRIES retries was done, release queued skb's,
473 * but not discard entry itself if it is in use.
474 */
475
476 static void arp_expire_request (unsigned long arg)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
477 {
478 struct arp_table *entry = (struct arp_table *) arg;
479 struct arp_table **pentry;
480 unsigned long hash;
481 unsigned long flags;
482
483 save_flags(flags);
484 cli();
485
486 /*
487 * Since all timeouts are handled with interrupts enabled, there is a
488 * small chance, that this entry has just been resolved by an incoming
489 * packet. This is the only race condition, but it is handled...
490 */
491
492 if (entry->flags & ATF_COM)
493 {
494 restore_flags(flags);
495 return;
496 }
497
498 if (arp_lock)
499 {
500 #if RT_CACHE_DEBUG >= 1
501 printk("arp_expire_request: %08x postponed\n", entry->ip);
502 #endif
503 del_timer(&entry->timer);
504 entry->timer.expires = jiffies + HZ/10;
505 add_timer(&entry->timer);
506 restore_flags(flags);
507 return;
508 }
509
510 arp_fast_lock();
511 restore_flags(flags);
512
513 if (entry->last_updated && --entry->retries > 0)
514 {
515 struct device *dev = entry->dev;
516
517 #if RT_CACHE_DEBUG >= 2
518 printk("arp_expire_request: %08x timed out\n", entry->ip);
519 #endif
520 /* Set new timer. */
521 del_timer(&entry->timer);
522 entry->timer.expires = jiffies + ARP_RES_TIME;
523 add_timer(&entry->timer);
524 arp_send(ARPOP_REQUEST, ETH_P_ARP, entry->ip, dev, dev->pa_addr,
525 NULL, dev->dev_addr, NULL);
526 arp_unlock();
527 return;
528 }
529
530 arp_release_entry(entry);
531
532 cli();
533 if (arp_count_hhs(entry))
534 {
535 struct device *dev = entry->dev;
536 #if RT_CACHE_DEBUG >= 2
537 printk("arp_expire_request: %08x is dead\n", entry->ip);
538 #endif
539 arp_release_entry(entry);
540 entry->retries = ARP_MAX_TRIES;
541 restore_flags(flags);
542 entry->last_updated = 0;
543 del_timer(&entry->timer);
544 entry->timer.expires = jiffies + ARP_DEAD_RES_TIME;
545 add_timer(&entry->timer);
546 arp_send(ARPOP_REQUEST, ETH_P_ARP, entry->ip, dev, dev->pa_addr,
547 NULL, dev->dev_addr, NULL);
548 arp_unlock();
549 return;
550 }
551 restore_flags(flags);
552
553 hash = HASH(entry->ip);
554
555 pentry = &arp_tables[hash];
556
557 while (*pentry != NULL)
558 {
559 if (*pentry == entry)
560 {
561 cli();
562 *pentry = entry->next;
563 restore_flags(flags);
564 #if RT_CACHE_DEBUG >= 2
565 printk("arp_expire_request: %08x is killed\n", entry->ip);
566 #endif
567 arp_free_entry(entry);
568 arp_unlock();
569 return;
570 }
571 pentry = &(*pentry)->next;
572 }
573 printk("arp_expire_request: bug: ARP entry is lost!\n");
574 arp_unlock();
575 }
576
577 /*
578 * Purge a device from the ARP queue
579 */
580
581 int arp_device_event(struct notifier_block *this, unsigned long event, void *ptr)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
582 {
583 struct device *dev=ptr;
584 int i;
585
586 if (event != NETDEV_DOWN)
587 return NOTIFY_DONE;
588 /*
589 * This is a bit OTT - maybe we need some arp semaphores instead.
590 */
591
592 #if RT_CACHE_DEBUG >= 1
593 if (arp_lock)
594 printk("arp_device_event: bug\n");
595 #endif
596 arp_fast_lock();
597
598 for (i = 0; i < FULL_ARP_TABLE_SIZE; i++)
599 {
600 struct arp_table *entry;
601 struct arp_table **pentry = &arp_tables[i];
602
603 while ((entry = *pentry) != NULL)
604 {
605 if (entry->dev == dev)
606 {
607 *pentry = entry->next; /* remove from list */
608 arp_free_entry(entry);
609 }
610 else
611 pentry = &entry->next; /* go to next entry */
612 }
613 }
614 arp_unlock();
615 return NOTIFY_DONE;
616 }
617
618
619 /*
620 * Create and send an arp packet. If (dest_hw == NULL), we create a broadcast
621 * message.
622 */
623
624 void arp_send(int type, int ptype, u32 dest_ip,
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
625 struct device *dev, u32 src_ip,
626 unsigned char *dest_hw, unsigned char *src_hw,
627 unsigned char *target_hw)
628 {
629 struct sk_buff *skb;
630 struct arphdr *arp;
631 unsigned char *arp_ptr;
632
633 /*
634 * No arp on this interface.
635 */
636
637 if (dev->flags&IFF_NOARP)
638 return;
639
640 /*
641 * Allocate a buffer
642 */
643
644 skb = alloc_skb(sizeof(struct arphdr)+ 2*(dev->addr_len+4)
645 + dev->hard_header_len, GFP_ATOMIC);
646 if (skb == NULL)
647 {
648 printk("ARP: no memory to send an arp packet\n");
649 return;
650 }
651 skb_reserve(skb, dev->hard_header_len);
652 arp = (struct arphdr *) skb_put(skb,sizeof(struct arphdr) + 2*(dev->addr_len+4));
653 skb->arp = 1;
654 skb->dev = dev;
655 skb->free = 1;
656 skb->protocol = htons (ETH_P_IP);
657
658 /*
659 * Fill the device header for the ARP frame
660 */
661
662 dev->hard_header(skb,dev,ptype,dest_hw?dest_hw:dev->broadcast,src_hw?src_hw:NULL,skb->len);
663
664 /* Fill out the arp protocol part. */
665 arp->ar_hrd = htons(dev->type);
666 #ifdef CONFIG_AX25
667 #ifdef CONFIG_NETROM
668 arp->ar_pro = (dev->type == ARPHRD_AX25 || dev->type == ARPHRD_NETROM) ? htons(AX25_P_IP) : htons(ETH_P_IP);
669 #else
670 arp->ar_pro = (dev->type != ARPHRD_AX25) ? htons(ETH_P_IP) : htons(AX25_P_IP);
671 #endif
672 #else
673 arp->ar_pro = htons(ETH_P_IP);
674 #endif
675 arp->ar_hln = dev->addr_len;
676 arp->ar_pln = 4;
677 arp->ar_op = htons(type);
678
679 arp_ptr=(unsigned char *)(arp+1);
680
681 memcpy(arp_ptr, src_hw, dev->addr_len);
682 arp_ptr+=dev->addr_len;
683 memcpy(arp_ptr, &src_ip,4);
684 arp_ptr+=4;
685 if (target_hw != NULL)
686 memcpy(arp_ptr, target_hw, dev->addr_len);
687 else
688 memset(arp_ptr, 0, dev->addr_len);
689 arp_ptr+=dev->addr_len;
690 memcpy(arp_ptr, &dest_ip, 4);
691
692 dev_queue_xmit(skb, dev, 0);
693 }
694
695 /*
696 * This will try to retransmit everything on the queue.
697 */
698
699 static void arp_send_q(struct arp_table *entry)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
700 {
701 struct sk_buff *skb;
702
703 unsigned long flags;
704
705 /*
706 * Empty the entire queue, building its data up ready to send
707 */
708
709 if(!(entry->flags&ATF_COM))
710 {
711 printk("arp_send_q: incomplete entry for %s\n",
712 in_ntoa(entry->ip));
713 /* Can't flush the skb, because RFC1122 says to hang on to */
714 /* at least one from any unresolved entry. --MS */
715 /* Whats happened is that someone has 'unresolved' the entry
716 as we got to use it - this 'can't happen' -- AC */
717 return;
718 }
719
720 save_flags(flags);
721
722 cli();
723 while((skb = skb_dequeue(&entry->skb)) != NULL)
724 {
725 IS_SKB(skb);
726 skb_device_lock(skb);
727 restore_flags(flags);
728 if(!skb->dev->rebuild_header(skb->data,skb->dev,skb->raddr,skb))
729 {
730 skb->arp = 1;
731 if(skb->sk==NULL)
732 dev_queue_xmit(skb, skb->dev, 0);
733 else
734 dev_queue_xmit(skb,skb->dev,skb->sk->priority);
735 }
736 }
737 restore_flags(flags);
738 }
739
740
741 /*
742 * Delete an ARP mapping entry in the cache.
743 */
744
745 static void arp_destroy(struct arp_table * entry)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
746 {
747 struct arp_table *entry1;
748 struct arp_table **pentry;
749
750 if (entry->flags & ATF_PUBL)
751 pentry = &arp_proxy_list;
752 else
753 pentry = &arp_tables[HASH(entry->ip)];
754
755 while ((entry1 = *pentry) != NULL)
756 {
757 if (entry1 == entry)
758 {
759 *pentry = entry1->next;
760 del_timer(&entry->timer);
761 arp_free_entry(entry);
762 return;
763 }
764 pentry = &entry1->next;
765 }
766 }
767
768 /*
769 * Receive an arp request by the device layer. Maybe I rewrite it, to
770 * use the incoming packet for the reply. The time for the current
771 * "overhead" isn't that high...
772 */
773
774 int arp_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *pt)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
775 {
776 /*
777 * We shouldn't use this type conversion. Check later.
778 */
779
780 struct arphdr *arp = (struct arphdr *)skb->h.raw;
781 unsigned char *arp_ptr= (unsigned char *)(arp+1);
782 struct arp_table *entry;
783 struct arp_table *proxy_entry;
784 unsigned long hash;
785 unsigned char ha[MAX_ADDR_LEN]; /* So we can enable ints again. */
786 unsigned char *sha,*tha;
787 u32 sip,tip;
788
789 /*
790 * The hardware length of the packet should match the hardware length
791 * of the device. Similarly, the hardware types should match. The
792 * device should be ARP-able. Also, if pln is not 4, then the lookup
793 * is not from an IP number. We can't currently handle this, so toss
794 * it.
795 */
796 if (arp->ar_hln != dev->addr_len ||
797 dev->type != ntohs(arp->ar_hrd) ||
798 dev->flags & IFF_NOARP ||
799 arp->ar_pln != 4)
800 {
801 kfree_skb(skb, FREE_READ);
802 return 0;
803 /* Should this be an error/printk? Seems like something */
804 /* you'd want to know about. Unless it's just !IFF_NOARP. -- MS */
805 }
806
807 /*
808 * Another test.
809 * The logic here is that the protocol being looked up by arp should
810 * match the protocol the device speaks. If it doesn't, there is a
811 * problem, so toss the packet.
812 */
813 /* Again, should this be an error/printk? -- MS */
814
815 switch (dev->type)
816 {
817 #ifdef CONFIG_AX25
818 case ARPHRD_AX25:
819 if(arp->ar_pro != htons(AX25_P_IP))
820 {
821 kfree_skb(skb, FREE_READ);
822 return 0;
823 }
824 break;
825 #endif
826 #ifdef CONFIG_NETROM
827 case ARPHRD_NETROM:
828 if(arp->ar_pro != htons(AX25_P_IP))
829 {
830 kfree_skb(skb, FREE_READ);
831 return 0;
832 }
833 break;
834 #endif
835 case ARPHRD_ETHER:
836 case ARPHRD_ARCNET:
837 if(arp->ar_pro != htons(ETH_P_IP))
838 {
839 kfree_skb(skb, FREE_READ);
840 return 0;
841 }
842 break;
843
844 case ARPHRD_IEEE802:
845 if(arp->ar_pro != htons(ETH_P_IP))
846 {
847 kfree_skb(skb, FREE_READ);
848 return 0;
849 }
850 break;
851
852 default:
853 printk("ARP: dev->type mangled!\n");
854 kfree_skb(skb, FREE_READ);
855 return 0;
856 }
857
858 /*
859 * Extract fields
860 */
861
862 sha=arp_ptr;
863 arp_ptr += dev->addr_len;
864 memcpy(&sip, arp_ptr, 4);
865 arp_ptr += 4;
866 tha=arp_ptr;
867 arp_ptr += dev->addr_len;
868 memcpy(&tip, arp_ptr, 4);
869
870 /*
871 * Check for bad requests for 127.x.x.x and requests for multicast
872 * addresses. If this is one such, delete it.
873 */
874 if (LOOPBACK(tip) || MULTICAST(tip))
875 {
876 kfree_skb(skb, FREE_READ);
877 return 0;
878 }
879
880 /*
881 * Process entry. The idea here is we want to send a reply if it is a
882 * request for us or if it is a request for someone else that we hold
883 * a proxy for. We want to add an entry to our cache if it is a reply
884 * to us or if it is a request for our address.
885 * (The assumption for this last is that if someone is requesting our
886 * address, they are probably intending to talk to us, so it saves time
887 * if we cache their address. Their address is also probably not in
888 * our cache, since ours is not in their cache.)
889 *
890 * Putting this another way, we only care about replies if they are to
891 * us, in which case we add them to the cache. For requests, we care
892 * about those for us and those for our proxies. We reply to both,
893 * and in the case of requests for us we add the requester to the arp
894 * cache.
895 */
896
897 /*
898 * try to switch to alias device whose addr is tip or closest to sip.
899 */
900
901 #ifdef CONFIG_NET_ALIAS
902 if (tip != dev->pa_addr && net_alias_has(skb->dev))
903 {
904 /*
905 * net_alias_dev_rcv_sel32 returns main dev if it fails to found other.
906 */
907 dev = net_alias_dev_rcv_sel32(dev, AF_INET, sip, tip);
908
909 if (dev->type != ntohs(arp->ar_hrd) || dev->flags & IFF_NOARP)
910 {
911 kfree_skb(skb, FREE_READ);
912 return 0;
913 }
914 }
915 #endif
916
917 if (arp->ar_op == htons(ARPOP_REQUEST))
918 {
919 /*
920 * Only reply for the real device address or when it's in our proxy tables
921 */
922 if (tip != dev->pa_addr)
923 {
924 /*
925 * To get in here, it is a request for someone else. We need to
926 * check if that someone else is one of our proxies. If it isn't,
927 * we can toss it.
928 */
929 arp_fast_lock();
930
931 for (proxy_entry=arp_proxy_list;
932 proxy_entry;
933 proxy_entry = proxy_entry->next)
934 {
935 /* we will respond to a proxy arp request
936 if the masked arp table ip matches the masked
937 tip. This allows a single proxy arp table
938 entry to be used on a gateway machine to handle
939 all requests for a whole network, rather than
940 having to use a huge number of proxy arp entries
941 and having to keep them uptodate.
942 */
943 if (proxy_entry->dev == dev &&
944 !((proxy_entry->ip^tip)&proxy_entry->mask))
945 break;
946
947 }
948 if (proxy_entry)
949 {
950 memcpy(ha, proxy_entry->ha, dev->addr_len);
951 arp_unlock();
952 arp_send(ARPOP_REPLY,ETH_P_ARP,sip,dev,tip,sha,ha, sha);
953 kfree_skb(skb, FREE_READ);
954 return 0;
955 }
956 else
957 {
958 arp_unlock();
959 kfree_skb(skb, FREE_READ);
960 return 0;
961 }
962 }
963 else
964 {
965 /*
966 * To get here, it must be an arp request for us. We need to reply.
967 */
968 arp_send(ARPOP_REPLY,ETH_P_ARP,sip,dev,tip,sha,dev->dev_addr,sha);
969 }
970 }
971 /*
972 * It is now an arp reply.
973 */
974 if(ip_chk_addr(tip)!=IS_MYADDR)
975 {
976 /*
977 * Replies to other machines get tossed.
978 */
979 kfree_skb(skb, FREE_READ);
980 return 0;
981 }
982 /*
983 * Now all replies are handled. Next, anything that falls through to here
984 * needs to be added to the arp cache, or have its entry updated if it is
985 * there.
986 */
987
988 arp_fast_lock();
989
990 hash = HASH(sip);
991
992 for (entry=arp_tables[hash]; entry; entry=entry->next)
993 if (entry->ip == sip && entry->dev == dev)
994 break;
995
996 if (entry)
997 {
998 /*
999 * Entry found; update it only if it is not a permanent entry.
1000 */
1001 if (!(entry->flags & ATF_PERM)) {
1002 memcpy(entry->ha, sha, dev->addr_len);
1003 entry->last_updated = jiffies;
1004 }
1005 if (!(entry->flags & ATF_COM))
1006 {
1007 /*
1008 * This entry was incomplete. Delete the retransmit timer
1009 * and switch to complete status.
1010 */
1011 del_timer(&entry->timer);
1012 entry->flags |= ATF_COM;
1013 arp_update_hhs(entry);
1014 /*
1015 * Send out waiting packets. We might have problems, if someone is
1016 * manually removing entries right now -- entry might become invalid
1017 * underneath us.
1018 */
1019 arp_send_q(entry);
1020 }
1021 }
1022 else
1023 {
1024 /*
1025 * No entry found. Need to add a new entry to the arp table.
1026 */
1027 entry = (struct arp_table *)kmalloc(sizeof(struct arp_table),GFP_ATOMIC);
1028 if(entry == NULL)
1029 {
1030 arp_unlock();
1031 printk("ARP: no memory for new arp entry\n");
1032 kfree_skb(skb, FREE_READ);
1033 return 0;
1034 }
1035
1036 entry->mask = DEF_ARP_NETMASK;
1037 entry->ip = sip;
1038 entry->flags = ATF_COM;
1039 entry->hh = NULL;
1040 init_timer(&entry->timer);
1041 entry->timer.function = arp_expire_request;
1042 entry->timer.data = (unsigned long)entry;
1043 memcpy(entry->ha, sha, dev->addr_len);
1044 entry->last_updated = entry->last_used = jiffies;
1045 /*
1046 * make entry point to 'correct' device
1047 */
1048
1049 #ifdef CONFIG_NET_ALIAS
1050 entry->dev = dev;
1051 #else
1052 entry->dev = skb->dev;
1053 #endif
1054 skb_queue_head_init(&entry->skb);
1055 if (arp_lock == 1)
1056 {
1057 entry->next = arp_tables[hash];
1058 arp_tables[hash] = entry;
1059 }
1060 else
1061 {
1062 #if RT_CACHE_DEBUG >= 1
1063 printk("arp_rcv: %08x backlogged\n", entry->ip);
1064 #endif
1065 arp_enqueue(&arp_backlog, entry);
1066 arp_bh_mask |= ARP_BH_BACKLOG;
1067 }
1068 }
1069
1070 /*
1071 * Replies have been sent, and entries have been added. All done.
1072 */
1073 kfree_skb(skb, FREE_READ);
1074 arp_unlock();
1075 return 0;
1076 }
1077
1078 /*
1079 * Lookup ARP entry by (addr, dev) pair.
1080 * Flags: ATF_PUBL - search for proxy entries
1081 * ATF_NETMASK - search for proxy network entry.
1082 * NOTE: should be called with locked ARP tables.
1083 */
1084
1085 static struct arp_table *arp_lookup(u32 paddr, unsigned short flags, struct device * dev)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
1086 {
1087 struct arp_table *entry;
1088
1089 if (!(flags & ATF_PUBL))
1090 {
1091 for (entry = arp_tables[HASH(paddr)];
1092 entry != NULL; entry = entry->next)
1093 if (entry->ip == paddr && (!dev || entry->dev == dev))
1094 break;
1095 return entry;
1096 }
1097
1098 if (!(flags & ATF_NETMASK))
1099 {
1100 for (entry = arp_proxy_list;
1101 entry != NULL; entry = entry->next)
1102 if (entry->ip == paddr && (!dev || entry->dev == dev))
1103 break;
1104 return entry;
1105 }
1106
1107 for (entry=arp_proxy_list; entry != NULL; entry = entry->next)
1108 if (!((entry->ip^paddr)&entry->mask) &&
1109 (!dev || entry->dev == dev))
1110 break;
1111 return entry;
1112 }
1113
1114 /*
1115 * Find an arp mapping in the cache. If not found, return false.
1116 */
1117
1118 int arp_query(unsigned char *haddr, u32 paddr, struct device * dev)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
1119 {
1120 struct arp_table *entry;
1121
1122 arp_fast_lock();
1123
1124 entry = arp_lookup(paddr, 0, dev);
1125
1126 if (entry != NULL)
1127 {
1128 entry->last_used = jiffies;
1129 if (entry->flags & ATF_COM)
1130 {
1131 memcpy(haddr, entry->ha, dev->addr_len);
1132 arp_unlock();
1133 return 1;
1134 }
1135 }
1136 arp_unlock();
1137 return 0;
1138 }
1139
1140
1141 static int arp_set_predefined(int addr_hint, unsigned char * haddr, __u32 paddr, struct device * dev)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
1142 {
1143 switch (addr_hint)
1144 {
1145 case IS_MYADDR:
1146 printk("ARP: arp called for own IP address\n");
1147 memcpy(haddr, dev->dev_addr, dev->addr_len);
1148 return 1;
1149 #ifdef CONFIG_IP_MULTICAST
1150 case IS_MULTICAST:
1151 if(dev->type==ARPHRD_ETHER || dev->type==ARPHRD_IEEE802)
1152 {
1153 u32 taddr;
1154 haddr[0]=0x01;
1155 haddr[1]=0x00;
1156 haddr[2]=0x5e;
1157 taddr=ntohl(paddr);
1158 haddr[5]=taddr&0xff;
1159 taddr=taddr>>8;
1160 haddr[4]=taddr&0xff;
1161 taddr=taddr>>8;
1162 haddr[3]=taddr&0x7f;
1163 return 1;
1164 }
1165 /*
1166 * If a device does not support multicast broadcast the stuff (eg AX.25 for now)
1167 */
1168 #endif
1169
1170 case IS_BROADCAST:
1171 memcpy(haddr, dev->broadcast, dev->addr_len);
1172 return 1;
1173 }
1174 return 0;
1175 }
1176
1177 /*
1178 * Find an arp mapping in the cache. If not found, post a request.
1179 */
1180
1181 int arp_find(unsigned char *haddr, u32 paddr, struct device *dev,
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
1182 u32 saddr, struct sk_buff *skb)
1183 {
1184 struct arp_table *entry;
1185 unsigned long hash;
1186
1187 if (arp_set_predefined(ip_chk_addr(paddr), haddr, paddr, dev))
1188 {
1189 if (skb)
1190 skb->arp = 1;
1191 return 0;
1192 }
1193
1194 hash = HASH(paddr);
1195 arp_fast_lock();
1196
1197 /*
1198 * Find an entry
1199 */
1200 entry = arp_lookup(paddr, 0, dev);
1201
1202 if (entry != NULL) /* It exists */
1203 {
1204 if (!(entry->flags & ATF_COM))
1205 {
1206 /*
1207 * A request was already send, but no reply yet. Thus
1208 * queue the packet with the previous attempt
1209 */
1210
1211 if (skb != NULL)
1212 {
1213 if (entry->last_updated)
1214 {
1215 skb_queue_tail(&entry->skb, skb);
1216 skb_device_unlock(skb);
1217 }
1218 /*
1219 * If last_updated==0 host is dead, so
1220 * drop skb's and set socket error.
1221 */
1222 else
1223 {
1224 #if 0
1225 /*
1226 * FIXME: ICMP HOST UNREACHABLE should be
1227 * sent in this situation. --ANK
1228 */
1229 if (skb->sk)
1230 {
1231 skb->sk->err = EHOSTDOWN;
1232 skb->sk->error_report(skb->sk);
1233 }
1234 #else
1235 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0, dev);
1236 #endif
1237 dev_kfree_skb(skb, FREE_WRITE);
1238 }
1239 }
1240 arp_unlock();
1241 return 1;
1242 }
1243
1244 /*
1245 * Update the record
1246 */
1247
1248 entry->last_used = jiffies;
1249 memcpy(haddr, entry->ha, dev->addr_len);
1250 if (skb)
1251 skb->arp = 1;
1252 arp_unlock();
1253 return 0;
1254 }
1255
1256 /*
1257 * Create a new unresolved entry.
1258 */
1259
1260 entry = (struct arp_table *) kmalloc(sizeof(struct arp_table),
1261 GFP_ATOMIC);
1262 if (entry != NULL)
1263 {
1264 entry->last_updated = entry->last_used = jiffies;
1265 entry->flags = 0;
1266 entry->ip = paddr;
1267 entry->mask = DEF_ARP_NETMASK;
1268 memset(entry->ha, 0, dev->addr_len);
1269 entry->dev = dev;
1270 entry->hh = NULL;
1271 init_timer(&entry->timer);
1272 entry->timer.function = arp_expire_request;
1273 entry->timer.data = (unsigned long)entry;
1274 entry->timer.expires = jiffies + ARP_RES_TIME;
1275 skb_queue_head_init(&entry->skb);
1276 if (skb != NULL)
1277 {
1278 skb_queue_tail(&entry->skb, skb);
1279 skb_device_unlock(skb);
1280 }
1281 if (arp_lock == 1)
1282 {
1283 entry->next = arp_tables[hash];
1284 arp_tables[hash] = entry;
1285 add_timer(&entry->timer);
1286 entry->retries = ARP_MAX_TRIES;
1287 }
1288 else
1289 {
1290 #if RT_CACHE_DEBUG >= 1
1291 printk("arp_find: %08x backlogged\n", entry->ip);
1292 #endif
1293 arp_enqueue(&arp_backlog, entry);
1294 arp_bh_mask |= ARP_BH_BACKLOG;
1295 }
1296 }
1297 else if (skb != NULL)
1298 dev_kfree_skb(skb, FREE_WRITE);
1299 arp_unlock();
1300
1301 /*
1302 * If we didn't find an entry, we will try to send an ARP packet.
1303 */
1304
1305 arp_send(ARPOP_REQUEST, ETH_P_ARP, paddr, dev, saddr, NULL,
1306 dev->dev_addr, NULL);
1307
1308 return 1;
1309 }
1310
1311
1312 /*
1313 * Write the contents of the ARP cache to a PROCfs file.
1314 */
1315
1316 #define HBUFFERLEN 30
1317
1318 int arp_get_info(char *buffer, char **start, off_t offset, int length, int dummy)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
1319 {
1320 int len=0;
1321 off_t pos=0;
1322 int size;
1323 struct arp_table *entry;
1324 char hbuffer[HBUFFERLEN];
1325 int i,j,k;
1326 const char hexbuf[] = "0123456789ABCDEF";
1327
1328 size = sprintf(buffer,"IP address HW type Flags HW address Mask Device\n");
1329
1330 pos+=size;
1331 len+=size;
1332
1333 arp_fast_lock();
1334
1335 for(i=0; i<FULL_ARP_TABLE_SIZE; i++)
1336 {
1337 for(entry=arp_tables[i]; entry!=NULL; entry=entry->next)
1338 {
1339 /*
1340 * Convert hardware address to XX:XX:XX:XX ... form.
1341 */
1342 #ifdef CONFIG_AX25
1343 #ifdef CONFIG_NETROM
1344 if (entry->dev->type == ARPHRD_AX25 || entry->dev->type == ARPHRD_NETROM)
1345 strcpy(hbuffer,ax2asc((ax25_address *)entry->ha));
1346 else {
1347 #else
1348 if(entry->dev->type==ARPHRD_AX25)
1349 strcpy(hbuffer,ax2asc((ax25_address *)entry->ha));
1350 else {
1351 #endif
1352 #endif
1353
1354 for(k=0,j=0;k<HBUFFERLEN-3 && j<entry->dev->addr_len;j++)
1355 {
1356 hbuffer[k++]=hexbuf[ (entry->ha[j]>>4)&15 ];
1357 hbuffer[k++]=hexbuf[ entry->ha[j]&15 ];
1358 hbuffer[k++]=':';
1359 }
1360 hbuffer[--k]=0;
1361
1362 #ifdef CONFIG_AX25
1363 }
1364 #endif
1365 size = sprintf(buffer+len,
1366 "%-17s0x%-10x0x%-10x%s",
1367 in_ntoa(entry->ip),
1368 (unsigned int)entry->dev->type,
1369 entry->flags,
1370 hbuffer);
1371 #if RT_CACHE_DEBUG < 2
1372 size += sprintf(buffer+len+size,
1373 " %-17s %s\n",
1374 entry->mask==DEF_ARP_NETMASK ?
1375 "*" : in_ntoa(entry->mask), entry->dev->name);
1376 #else
1377 size += sprintf(buffer+len+size,
1378 " %-17s %s\t%ld\t%1d\n",
1379 entry->mask==DEF_ARP_NETMASK ?
1380 "*" : in_ntoa(entry->mask), entry->dev->name,
1381 entry->hh ? entry->hh->hh_refcnt : -1,
1382 entry->hh ? entry->hh->hh_uptodate : 0);
1383 #endif
1384
1385 len += size;
1386 pos += size;
1387
1388 if (pos <= offset)
1389 len=0;
1390 if (pos >= offset+length)
1391 break;
1392 }
1393 }
1394 arp_unlock();
1395
1396 *start = buffer+len-(pos-offset); /* Start of wanted data */
1397 len = pos-offset; /* Start slop */
1398 if (len>length)
1399 len = length; /* Ending slop */
1400 return len;
1401 }
1402
1403
1404
1405 int arp_bind_cache(struct hh_cache ** hhp, struct device *dev, unsigned short htype, u32 paddr)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
1406 {
1407 struct arp_table *entry;
1408 struct hh_cache *hh = *hhp;
1409 int addr_hint;
1410 unsigned long flags;
1411
1412 if (hh)
1413 return 1;
1414
1415 if ((addr_hint = ip_chk_addr(paddr)) != 0)
1416 {
1417 unsigned char haddr[MAX_ADDR_LEN];
1418 if (hh)
1419 return 1;
1420 hh = kmalloc(sizeof(struct hh_cache), GFP_ATOMIC);
1421 if (!hh)
1422 return 1;
1423 arp_set_predefined(addr_hint, haddr, paddr, dev);
1424 hh->hh_uptodate = 0;
1425 hh->hh_refcnt = 1;
1426 hh->hh_arp = NULL;
1427 hh->hh_next = NULL;
1428 hh->hh_type = htype;
1429 *hhp = hh;
1430 dev->header_cache_update(hh, dev, haddr);
1431 return 0;
1432 }
1433
1434 save_flags(flags);
1435
1436 arp_fast_lock();
1437
1438 entry = arp_lookup(paddr, 0, dev);
1439
1440 if (entry)
1441 {
1442 cli();
1443 for (hh = entry->hh; hh; hh=hh->hh_next)
1444 if (hh->hh_type == htype)
1445 break;
1446 if (hh)
1447 {
1448 hh->hh_refcnt++;
1449 *hhp = hh;
1450 restore_flags(flags);
1451 arp_unlock();
1452 return 1;
1453 }
1454 restore_flags(flags);
1455 }
1456
1457 hh = kmalloc(sizeof(struct hh_cache), GFP_ATOMIC);
1458 if (!hh)
1459 {
1460 arp_unlock();
1461 return 1;
1462 }
1463
1464 hh->hh_uptodate = 0;
1465 hh->hh_refcnt = 1;
1466 hh->hh_arp = NULL;
1467 hh->hh_next = NULL;
1468 hh->hh_type = htype;
1469
1470 if (entry)
1471 {
1472 dev->header_cache_update(hh, dev, entry->ha);
1473 *hhp = hh;
1474 cli();
1475 hh->hh_arp = (void*)entry;
1476 entry->hh = hh;
1477 hh->hh_refcnt++;
1478 restore_flags(flags);
1479 entry->last_used = jiffies;
1480 arp_unlock();
1481 return 0;
1482 }
1483
1484
1485 /*
1486 * Create a new unresolved entry.
1487 */
1488
1489 entry = (struct arp_table *) kmalloc(sizeof(struct arp_table),
1490 GFP_ATOMIC);
1491 if (entry == NULL)
1492 {
1493 kfree_s(hh, sizeof(struct hh_cache));
1494 arp_unlock();
1495 return 1;
1496 }
1497
1498 entry->last_updated = entry->last_used = jiffies;
1499 entry->flags = 0;
1500 entry->ip = paddr;
1501 entry->mask = DEF_ARP_NETMASK;
1502 memset(entry->ha, 0, dev->addr_len);
1503 entry->dev = dev;
1504 entry->hh = hh;
1505 ATOMIC_INCR(&hh->hh_refcnt);
1506 init_timer(&entry->timer);
1507 entry->timer.function = arp_expire_request;
1508 entry->timer.data = (unsigned long)entry;
1509 entry->timer.expires = jiffies + ARP_RES_TIME;
1510 skb_queue_head_init(&entry->skb);
1511
1512 if (arp_lock == 1)
1513 {
1514 unsigned long hash = HASH(paddr);
1515 cli();
1516 entry->next = arp_tables[hash];
1517 arp_tables[hash] = entry;
1518 hh->hh_arp = (void*)entry;
1519 entry->retries = ARP_MAX_TRIES;
1520 restore_flags(flags);
1521
1522 add_timer(&entry->timer);
1523 arp_send(ARPOP_REQUEST, ETH_P_ARP, paddr, dev, dev->pa_addr, NULL, dev->dev_addr, NULL);
1524 }
1525 else
1526 {
1527 #if RT_CACHE_DEBUG >= 1
1528 printk("arp_cache_bind: %08x backlogged\n", entry->ip);
1529 #endif
1530 arp_enqueue(&arp_backlog, entry);
1531 arp_bh_mask |= ARP_BH_BACKLOG;
1532 }
1533 *hhp = hh;
1534 arp_unlock();
1535 return 0;
1536 }
1537
1538 static void arp_run_bh()
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
1539 {
1540 unsigned long flags;
1541 struct arp_table *entry, *entry1;
1542 struct hh_cache *hh;
1543 __u32 sip;
1544
1545 save_flags(flags);
1546 cli();
1547 if (!arp_lock)
1548 {
1549 arp_fast_lock();
1550
1551 while ((entry = arp_dequeue(&arp_backlog)) != NULL)
1552 {
1553 unsigned long hash;
1554 sti();
1555 sip = entry->ip;
1556 hash = HASH(sip);
1557
1558 /* It's possible, that an entry with the same pair
1559 * (addr,type) was already created. Our entry is older,
1560 * so it should be discarded.
1561 */
1562 for (entry1=arp_tables[hash]; entry1; entry1=entry1->next)
1563 if (entry1->ip==sip && entry1->dev == entry->dev)
1564 break;
1565
1566 if (!entry1)
1567 {
1568 struct device * dev = entry->dev;
1569 cli();
1570 entry->next = arp_tables[hash];
1571 arp_tables[hash] = entry;
1572 for (hh=entry->hh; hh; hh=hh->hh_next)
1573 hh->hh_arp = (void*)entry;
1574 sti();
1575 del_timer(&entry->timer);
1576 entry->timer.expires = jiffies + ARP_RES_TIME;
1577 add_timer(&entry->timer);
1578 entry->retries = ARP_MAX_TRIES;
1579 arp_send(ARPOP_REQUEST, ETH_P_ARP, entry->ip, dev, dev->pa_addr, NULL, dev->dev_addr, NULL);
1580 #if RT_CACHE_DEBUG >= 1
1581 printk("arp_run_bh: %08x reinstalled\n", sip);
1582 #endif
1583 }
1584 else
1585 {
1586 struct sk_buff * skb;
1587 struct hh_cache * next;
1588
1589 /* Discard entry, but preserve its hh's and
1590 * skb's.
1591 */
1592 cli();
1593 for (hh=entry->hh; hh; hh=next)
1594 {
1595 next = hh->hh_next;
1596 hh->hh_next = entry1->hh;
1597 entry1->hh = hh;
1598 hh->hh_arp = (void*)entry1;
1599 }
1600 entry->hh = NULL;
1601
1602 /* Prune skb list from entry
1603 * and graft it to entry1.
1604 */
1605 while ((skb = skb_dequeue(&entry->skb)) != NULL)
1606 {
1607 skb_device_lock(skb);
1608 sti();
1609 skb_queue_tail(&entry1->skb, skb);
1610 skb_device_unlock(skb);
1611 cli();
1612 }
1613 sti();
1614
1615 #if RT_CACHE_DEBUG >= 1
1616 printk("arp_run_bh: entry %08x was born dead\n", entry->ip);
1617 #endif
1618 arp_free_entry(entry);
1619
1620 if (entry1->flags & ATF_COM)
1621 {
1622 arp_update_hhs(entry1);
1623 arp_send_q(entry1);
1624 }
1625 }
1626 cli();
1627 }
1628 arp_bh_mask &= ~ARP_BH_BACKLOG;
1629 arp_unlock();
1630 }
1631 restore_flags(flags);
1632 }
1633
1634 /*
1635 * Test if a hardware address is all zero
1636 */
1637
1638 static inline int empty(unsigned char * addr, int len)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
1639 {
1640 while (len > 0) {
1641 if (*addr)
1642 return 0;
1643 len--;
1644 addr++;
1645 }
1646 return 1;
1647 }
1648
1649 /*
1650 * Set (create) an ARP cache entry.
1651 */
1652
1653 static int arp_req_set(struct arpreq *r, struct device * dev)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
1654 {
1655 struct arp_table *entry;
1656 struct sockaddr_in *si;
1657 struct rtable *rt;
1658 struct device *dev1;
1659 unsigned char *ha;
1660 u32 ip;
1661
1662 /*
1663 * Extract destination.
1664 */
1665
1666 si = (struct sockaddr_in *) &r->arp_pa;
1667 ip = si->sin_addr.s_addr;
1668
1669 /*
1670 * Is it reachable ?
1671 */
1672
1673 if (ip_chk_addr(ip) == IS_MYADDR)
1674 dev1 = dev_get("lo");
1675 else {
1676 rt = ip_rt_route(ip, 0);
1677 if (!rt)
1678 return -ENETUNREACH;
1679 dev1 = rt->rt_dev;
1680 ip_rt_put(rt);
1681 }
1682
1683 /* good guess about the device if it isn't a ATF_PUBL entry */
1684 if (!dev) {
1685 if (dev1->flags&(IFF_LOOPBACK|IFF_NOARP))
1686 return -ENODEV;
1687 dev = dev1;
1688 }
1689
1690 /* this needs to be checked only for dev=dev1 but it doesnt hurt */
1691 if (r->arp_ha.sa_family != dev->type)
1692 return -EINVAL;
1693
1694 if (((r->arp_flags & ATF_PUBL) && dev == dev1) ||
1695 (!(r->arp_flags & ATF_PUBL) && dev != dev1))
1696 return -EINVAL;
1697
1698 #if RT_CACHE_DEBUG >= 1
1699 if (arp_lock)
1700 printk("arp_req_set: bug\n");
1701 #endif
1702 arp_fast_lock();
1703
1704 /*
1705 * Is there an existing entry for this address?
1706 */
1707
1708 /*
1709 * Find the entry
1710 */
1711
1712 entry = arp_lookup(ip, r->arp_flags & ~ATF_NETMASK, dev);
1713
1714 if (entry)
1715 {
1716 arp_destroy(entry);
1717 entry = NULL;
1718 }
1719
1720 /*
1721 * Do we need to create a new entry
1722 */
1723
1724 if (entry == NULL)
1725 {
1726 entry = (struct arp_table *) kmalloc(sizeof(struct arp_table),
1727 GFP_ATOMIC);
1728 if (entry == NULL)
1729 {
1730 arp_unlock();
1731 return -ENOMEM;
1732 }
1733 entry->ip = ip;
1734 entry->hh = NULL;
1735 init_timer(&entry->timer);
1736 entry->timer.function = arp_expire_request;
1737 entry->timer.data = (unsigned long)entry;
1738
1739 if (r->arp_flags & ATF_PUBL)
1740 {
1741 cli();
1742 entry->next = arp_proxy_list;
1743 arp_proxy_list = entry;
1744 sti();
1745 }
1746 else
1747 {
1748 unsigned long hash = HASH(ip);
1749 cli();
1750 entry->next = arp_tables[hash];
1751 arp_tables[hash] = entry;
1752 sti();
1753 }
1754 skb_queue_head_init(&entry->skb);
1755 }
1756 /*
1757 * We now have a pointer to an ARP entry. Update it!
1758 */
1759 ha = r->arp_ha.sa_data;
1760 if ((r->arp_flags & ATF_COM) && empty(ha, dev->addr_len))
1761 ha = dev->dev_addr;
1762 memcpy(entry->ha, ha, dev->addr_len);
1763 entry->last_updated = entry->last_used = jiffies;
1764 entry->flags = r->arp_flags | ATF_COM;
1765 if ((entry->flags & ATF_PUBL) && (entry->flags & ATF_NETMASK))
1766 {
1767 si = (struct sockaddr_in *) &r->arp_netmask;
1768 entry->mask = si->sin_addr.s_addr;
1769 }
1770 else
1771 entry->mask = DEF_ARP_NETMASK;
1772 entry->dev = dev;
1773 arp_update_hhs(entry);
1774 arp_unlock();
1775 return 0;
1776 }
1777
1778
1779
1780 /*
1781 * Get an ARP cache entry.
1782 */
1783
1784 static int arp_req_get(struct arpreq *r, struct device *dev)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
1785 {
1786 struct arp_table *entry;
1787 struct sockaddr_in *si;
1788
1789 si = (struct sockaddr_in *) &r->arp_pa;
1790
1791 #if RT_CACHE_DEBUG >= 1
1792 if (arp_lock)
1793 printk("arp_req_set: bug\n");
1794 #endif
1795 arp_fast_lock();
1796
1797 entry = arp_lookup(si->sin_addr.s_addr, r->arp_flags|ATF_NETMASK, dev);
1798
1799 if (entry == NULL)
1800 {
1801 arp_unlock();
1802 return -ENXIO;
1803 }
1804
1805 /*
1806 * We found it; copy into structure.
1807 */
1808
1809 memcpy(r->arp_ha.sa_data, &entry->ha, entry->dev->addr_len);
1810 r->arp_ha.sa_family = entry->dev->type;
1811 r->arp_flags = entry->flags;
1812 strncpy(r->arp_dev, entry->dev->name, 16);
1813 arp_unlock();
1814 return 0;
1815 }
1816
1817 static int arp_req_delete(struct arpreq *r, struct device * dev)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
1818 {
1819 struct arp_table *entry;
1820 struct sockaddr_in *si;
1821
1822 si = (struct sockaddr_in *) &r->arp_pa;
1823 #if RT_CACHE_DEBUG >= 1
1824 if (arp_lock)
1825 printk("arp_req_delete: bug\n");
1826 #endif
1827 arp_fast_lock();
1828
1829 if (!(r->arp_flags & ATF_PUBL))
1830 {
1831 for (entry = arp_tables[HASH(si->sin_addr.s_addr)];
1832 entry != NULL; entry = entry->next)
1833 if (entry->ip == si->sin_addr.s_addr
1834 && (!dev || entry->dev == dev))
1835 {
1836 arp_destroy(entry);
1837 arp_unlock();
1838 return 0;
1839 }
1840 }
1841 else
1842 {
1843 for (entry = arp_proxy_list;
1844 entry != NULL; entry = entry->next)
1845 if (entry->ip == si->sin_addr.s_addr
1846 && (!dev || entry->dev == dev))
1847 {
1848 arp_destroy(entry);
1849 arp_unlock();
1850 return 0;
1851 }
1852 }
1853
1854 arp_unlock();
1855 return -ENXIO;
1856 }
1857
1858 /*
1859 * Handle an ARP layer I/O control request.
1860 */
1861
1862 int arp_ioctl(unsigned int cmd, void *arg)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
1863 {
1864 int err;
1865 struct arpreq r;
1866
1867 struct device * dev = NULL;
1868
1869 switch(cmd)
1870 {
1871 case SIOCDARP:
1872 case SIOCSARP:
1873 if (!suser())
1874 return -EPERM;
1875 case SIOCGARP:
1876 err = verify_area(VERIFY_READ, arg, sizeof(struct arpreq));
1877 if (err)
1878 return err;
1879 memcpy_fromfs(&r, arg, sizeof(struct arpreq));
1880 break;
1881 case OLD_SIOCDARP:
1882 case OLD_SIOCSARP:
1883 if (!suser())
1884 return -EPERM;
1885 case OLD_SIOCGARP:
1886 err = verify_area(VERIFY_READ, arg, sizeof(struct arpreq_old));
1887 if (err)
1888 return err;
1889 memcpy_fromfs(&r, arg, sizeof(struct arpreq_old));
1890 memset(&r.arp_dev, 0, sizeof(r.arp_dev));
1891 break;
1892 default:
1893 return -EINVAL;
1894 }
1895
1896 if (r.arp_pa.sa_family != AF_INET)
1897 return -EPFNOSUPPORT;
1898 if (((struct sockaddr_in *)&r.arp_pa)->sin_addr.s_addr == 0)
1899 return -EINVAL;
1900
1901 if (r.arp_dev[0])
1902 {
1903 if ((dev = dev_get(r.arp_dev)) == NULL)
1904 return -ENODEV;
1905
1906 if (!r.arp_ha.sa_family)
1907 r.arp_ha.sa_family = dev->type;
1908 else if (r.arp_ha.sa_family != dev->type)
1909 return -EINVAL;
1910 }
1911 else
1912 {
1913 if ((r.arp_flags & ATF_PUBL) &&
1914 ((cmd == SIOCSARP) || (cmd == OLD_SIOCSARP))) {
1915 if ((dev = dev_getbytype(r.arp_ha.sa_family)) == NULL)
1916 return -ENODEV;
1917 }
1918 }
1919
1920 switch(cmd)
1921 {
1922 case SIOCDARP:
1923 return arp_req_delete(&r, dev);
1924 case SIOCSARP:
1925 return arp_req_set(&r, dev);
1926 case OLD_SIOCDARP:
1927 /* old SIOCDARP destoyes both
1928 * normal and proxy mappings
1929 */
1930 r.arp_flags &= ~ATF_PUBL;
1931 err = arp_req_delete(&r, dev);
1932 r.arp_flags |= ATF_PUBL;
1933 if (!err)
1934 arp_req_delete(&r, dev);
1935 else
1936 err = arp_req_delete(&r, dev);
1937 return err;
1938 case OLD_SIOCSARP:
1939 err = arp_req_set(&r, dev);
1940 /* old SIOCSARP works so funny,
1941 * that its behaviour can be emulated
1942 * only approximately 8).
1943 * It should work. --ANK
1944 */
1945 if (r.arp_flags & ATF_PUBL)
1946 {
1947 r.arp_flags &= ~ATF_PUBL;
1948 arp_req_delete(&r, dev);
1949 }
1950 return err;
1951 case SIOCGARP:
1952 err = verify_area(VERIFY_WRITE, arg, sizeof(struct arpreq));
1953 if (err)
1954 return err;
1955 err = arp_req_get(&r, dev);
1956 if (!err)
1957 memcpy_tofs(arg, &r, sizeof(r));
1958 return err;
1959 case OLD_SIOCGARP:
1960 err = verify_area(VERIFY_WRITE, arg, sizeof(struct arpreq_old));
1961 if (err)
1962 return err;
1963 r.arp_flags &= ~ATF_PUBL;
1964 err = arp_req_get(&r, dev);
1965 if (err < 0)
1966 {
1967 r.arp_flags |= ATF_PUBL;
1968 err = arp_req_get(&r, dev);
1969 }
1970 if (!err)
1971 memcpy_tofs(arg, &r, sizeof(struct arpreq_old));
1972 return err;
1973 }
1974 /*NOTREACHED*/
1975 return 0;
1976 }
1977
1978
1979 /*
1980 * Called once on startup.
1981 */
1982
1983 static struct packet_type arp_packet_type =
1984 {
1985 0, /* Should be: __constant_htons(ETH_P_ARP) - but this _doesn't_ come out constant! */
1986 NULL, /* All devices */
1987 arp_rcv,
1988 NULL,
1989 NULL
1990 };
1991
1992 static struct notifier_block arp_dev_notifier={
1993 arp_device_event,
1994 NULL,
1995 0
1996 };
1997
1998 void arp_init (void)
/* ![[previous]](../icons/left.png)
![[next]](../icons/n_right.png)
![[first]](../icons/first.png)
![[last]](../icons/n_last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
1999 {
2000 /* Register the packet type */
2001 arp_packet_type.type=htons(ETH_P_ARP);
2002 dev_add_pack(&arp_packet_type);
2003 /* Start with the regular checks for expired arp entries. */
2004 add_timer(&arp_timer);
2005 /* Register for device down reports */
2006 register_netdevice_notifier(&arp_dev_notifier);
2007
2008 #ifdef CONFIG_PROC_FS
2009 proc_net_register(&(struct proc_dir_entry) {
2010 PROC_NET_ARP, 3, "arp",
2011 S_IFREG | S_IRUGO, 1, 0, 0,
2012 0, &proc_net_inode_operations,
2013 arp_get_info
2014 });
2015 #endif
2016 }
2017