1 /* linux/net/inet/arp.c
2 *
3 * Copyright (C) 1994 by Florian La Roche
4 *
5 * This module implements the Address Resolution Protocol ARP (RFC 826),
6 * which is used to convert IP addresses (or in the future maybe other
7 * high-level addresses into a low-level hardware address (like an Ethernet
8 * address).
9 *
10 * FIXME:
11 * Experiment with better retransmit timers
12 * Clean up the timer deletions
13 * If you create a proxy entry set your interface address to the address
14 * and then delete it, proxies may get out of sync with reality - check this
15 *
16 * This program is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License
18 * as published by the Free Software Foundation; either version
19 * 2 of the License, or (at your option) any later version.
20 *
21 *
22 * Fixes:
23 * Alan Cox : Removed the ethernet assumptions in Florian's code
24 * Alan Cox : Fixed some small errors in the ARP logic
25 * Alan Cox : Allow >4K in /proc
26 * Alan Cox : Make ARP add its own protocol entry
27 *
28 * Ross Martin : Rewrote arp_rcv() and arp_get_info()
29 * Stephen Henson : Add AX25 support to arp_get_info()
30 * Alan Cox : Drop data when a device is downed.
31 * Alan Cox : Use init_timer().
32 * Alan Cox : Double lock fixes.
33 * Martin Seine : Move the arphdr structure
34 * to if_arp.h for compatibility.
35 * with BSD based programs.
36 * Andrew Tridgell : Added ARP netmask code and
37 * re-arranged proxy handling.
38 * Alan Cox : Changed to use notifiers.
39 * Niibe Yutaka : Reply for this device or proxies only.
40 * Alan Cox : Don't proxy across hardware types!
41 * Jonathan Naylor : Added support for NET/ROM.
42 * Mike Shaver : RFC1122 checks.
43 * Jonathan Naylor : Only lookup the hardware address for
44 * the correct hardware type.
45 * Germano Caronni : Assorted subtle races.
46 * Craig Schlenter : Don't modify permanent entry
47 * during arp_rcv.
48 * Russ Nelson : Tidied up a few bits.
49 * Alexey Kuznetsov: Major changes to caching and behaviour,
50 * eg intelligent arp probing and generation
51 * of host down events.
52 * Alan Cox : Missing unlock in device events.
53 * Eckes : ARP ioctl control errors.
54 * Alexey Kuznetsov: Arp free fix.
55 */
56
57 /* RFC1122 Status:
58 2.3.2.1 (ARP Cache Validation):
59 MUST provide mechanism to flush stale cache entries (OK)
60 SHOULD be able to configure cache timeout (NOT YET)
61 MUST throttle ARP retransmits (OK)
62 2.3.2.2 (ARP Packet Queue):
63 SHOULD save at least one packet from each "conversation" with an
64 unresolved IP address. (OK)
65 950727 -- MS
66 */
67
68 #include <linux/types.h>
69 #include <linux/string.h>
70 #include <linux/kernel.h>
71 #include <linux/sched.h>
72 #include <linux/config.h>
73 #include <linux/socket.h>
74 #include <linux/sockios.h>
75 #include <linux/errno.h>
76 #include <linux/if_arp.h>
77 #include <linux/in.h>
78 #include <linux/mm.h>
79 #include <linux/inet.h>
80 #include <linux/netdevice.h>
81 #include <linux/etherdevice.h>
82 #include <linux/trdevice.h>
83 #include <linux/skbuff.h>
84 #include <linux/proc_fs.h>
85 #include <linux/stat.h>
86
87 #include <net/ip.h>
88 #include <net/icmp.h>
89 #include <net/route.h>
90 #include <net/protocol.h>
91 #include <net/tcp.h>
92 #include <net/sock.h>
93 #include <net/arp.h>
94 #ifdef CONFIG_AX25
95 #include <net/ax25.h>
96 #ifdef CONFIG_NETROM
97 #include <net/netrom.h>
98 #endif
99 #endif
100 #ifdef CONFIG_NET_ALIAS
101 #include <linux/net_alias.h>
102 #endif
103
104 #include <asm/system.h>
105 #include <asm/segment.h>
106
107 #include <stdarg.h>
108
109 /*
110 * This structure defines the ARP mapping cache. As long as we make changes
111 * in this structure, we keep interrupts off. But normally we can copy the
112 * hardware address and the device pointer in a local variable and then
113 * make any "long calls" to send a packet out.
114 */
115
116 struct arp_table
117 {
118 struct arp_table *next; /* Linked entry list */
119 unsigned long last_used; /* For expiry */
120 unsigned long last_updated; /* For expiry */
121 unsigned int flags; /* Control status */
122 u32 ip; /* ip address of entry */
123 u32 mask; /* netmask - used for generalised proxy arps (tridge) */
124 unsigned char ha[MAX_ADDR_LEN]; /* Hardware address */
125 struct device *dev; /* Device the entry is tied to */
126
127 /*
128 * The following entries are only used for unresolved hw addresses.
129 */
130
131 struct timer_list timer; /* expire timer */
132 int retries; /* remaining retries */
133 struct sk_buff_head skb; /* list of queued packets */
134 struct hh_cache *hh;
135 };
136
137
138 /*
139 * Configurable Parameters (don't touch unless you know what you are doing
140 */
141
142 /*
143 * If an arp request is send, ARP_RES_TIME is the timeout value until the
144 * next request is send.
145 * RFC1122: OK. Throttles ARPing, as per 2.3.2.1. (MUST)
146 * The recommended minimum timeout is 1 second per destination.
147 * This timeout is prolongated to ARP_DEAD_RES_TIME, if
148 * destination does not respond.
149 */
150
151 #define ARP_RES_TIME (5*HZ)
152 #define ARP_DEAD_RES_TIME (60*HZ)
153
154 /*
155 * The number of times an arp request is send, until the host is
156 * considered temporarily unreachable.
157 */
158
159 #define ARP_MAX_TRIES 3
160
161 /*
162 * After that time, an unused entry is deleted from the arp table.
163 */
164
165 #define ARP_TIMEOUT (600*HZ)
166
167 /*
168 * How often is the function 'arp_check_retries' called.
169 * An unused entry is invalidated in the time between ARP_TIMEOUT and
170 * (ARP_TIMEOUT+ARP_CHECK_INTERVAL).
171 */
172
173 #define ARP_CHECK_INTERVAL (60*HZ)
174
175 /*
176 * The entry is reconfirmed by sending point-to-point ARP
177 * request after ARP_CONFIRM_INTERVAL. If destinations does not respond
178 * for ARP_CONFIRM_TIMEOUT, normal broadcast resolution scheme is started.
179 */
180
181 #define ARP_CONFIRM_INTERVAL (300*HZ)
182 #define ARP_CONFIRM_TIMEOUT ARP_RES_TIME
183
184 static unsigned int arp_lock;
185 static unsigned int arp_bh_mask;
186
187 #define ARP_BH_BACKLOG 1
188
189 static struct arp_table *arp_backlog;
190
191 static void arp_run_bh(void);
192 static void arp_check_expire (unsigned long);
193
194 static struct timer_list arp_timer =
195 { NULL, NULL, ARP_CHECK_INTERVAL, 0L, &arp_check_expire };
196
197 /*
198 * The default arp netmask is just 255.255.255.255 which means it's
199 * a single machine entry. Only proxy entries can have other netmasks
200 */
201
202 #define DEF_ARP_NETMASK (~0)
203
204 /*
205 * The size of the hash table. Must be a power of two.
206 * Maybe we should remove hashing in the future for arp and concentrate
207 * on Patrick Schaaf's Host-Cache-Lookup...
208 */
209
210 #define ARP_TABLE_SIZE 16
211 #define FULL_ARP_TABLE_SIZE (ARP_TABLE_SIZE+1)
212
213 struct arp_table *arp_tables[FULL_ARP_TABLE_SIZE] =
214 {
215 NULL,
216 };
217
218 #define arp_proxy_list arp_tables[ARP_TABLE_SIZE]
219
220 /*
221 * The last bits in the IP address are used for the cache lookup.
222 * A special entry is used for proxy arp entries
223 */
224
225 #define HASH(paddr) (htonl(paddr) & (ARP_TABLE_SIZE - 1))
226
227 /*
228 * Lock/unlock arp_table chains.
229 */
230
231 static __inline__ void arp_fast_lock(void)
/* ![[previous]](../icons/n_left.png)
![[next]](../icons/right.png)
![[first]](../icons/n_first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
232 {
233 ATOMIC_INCR(&arp_lock);
234 }
235
236 static __inline__ void arp_fast_unlock(void)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
237 {
238 ATOMIC_DECR(&arp_lock);
239 }
240
241 static __inline__ void arp_unlock(void)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
242 {
243 if (!ATOMIC_DECR_AND_CHECK(&arp_lock) && arp_bh_mask)
244 arp_run_bh();
245 }
246
247 /*
248 * Enqueue to FIFO list.
249 */
250
251 static void arp_enqueue(struct arp_table **q, struct arp_table *entry)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
252 {
253 unsigned long flags;
254 struct arp_table * tail;
255
256 save_flags(flags);
257 cli();
258 tail = *q;
259 if (!tail)
260 entry->next = entry;
261 else
262 {
263 entry->next = tail->next;
264 tail->next = entry;
265 }
266 *q = entry;
267 restore_flags(flags);
268 return;
269 }
270
271 /*
272 * Dequeue from FIFO list,
273 * caller should mask interrupts.
274 */
275
276 static struct arp_table * arp_dequeue(struct arp_table **q)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
277 {
278 struct arp_table * entry;
279
280 if (*q)
281 {
282 entry = (*q)->next;
283 (*q)->next = entry->next;
284 if (entry->next == entry)
285 *q = NULL;
286 entry->next = NULL;
287 return entry;
288 }
289 return NULL;
290 }
291
292 /*
293 * Purge all linked skb's of the entry.
294 */
295
296 static void arp_release_entry(struct arp_table *entry)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
297 {
298 struct sk_buff *skb;
299 unsigned long flags;
300
301 save_flags(flags);
302 cli();
303 /* Release the list of `skb' pointers. */
304 while ((skb = skb_dequeue(&entry->skb)) != NULL)
305 {
306 skb_device_lock(skb);
307 restore_flags(flags);
308 dev_kfree_skb(skb, FREE_WRITE);
309 cli();
310 }
311 restore_flags(flags);
312 return;
313 }
314
315 /*
316 * Release the entry and all resources linked to it: skb's, hh's, timer
317 * and certainly memory.
318 */
319
320 static void arp_free_entry(struct arp_table *entry)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
321 {
322 unsigned long flags;
323 struct hh_cache *hh, *next;
324
325 del_timer(&entry->timer);
326
327 save_flags(flags);
328 cli();
329 arp_release_entry(entry);
330
331 for (hh = entry->hh; hh; hh = next)
332 {
333 next = hh->hh_next;
334 hh->hh_arp = NULL;
335 hh->hh_uptodate = 0;
336 if (!--hh->hh_refcnt)
337 kfree_s(hh, sizeof(struct(struct hh_cache)));
338 }
339 restore_flags(flags);
340
341 kfree_s(entry, sizeof(struct arp_table));
342 return;
343 }
344
345 /*
346 * How many users has this entry?
347 */
348
349 static __inline__ int arp_count_hhs(struct arp_table * entry)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
350 {
351 struct hh_cache *hh, **hhp;
352 int count = 0;
353
354 hhp = &entry->hh;
355 while ((hh=*hhp) != NULL)
356 {
357 if (hh->hh_refcnt == 1)
358 {
359 *hhp = hh->hh_next;
360 kfree_s(hh, sizeof(struct hh_cache));
361 continue;
362 }
363 count += hh->hh_refcnt-1;
364 hhp = &hh->hh_next;
365 }
366
367 return count;
368 }
369
370 /*
371 * Invalidate all hh's, so that higher level will not try to use it.
372 */
373
374 static __inline__ void arp_invalidate_hhs(struct arp_table * entry)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
375 {
376 struct hh_cache *hh;
377
378 for (hh=entry->hh; hh; hh=hh->hh_next)
379 hh->hh_uptodate = 0;
380 }
381
382 /*
383 * Signal to device layer, that hardware address may be changed.
384 */
385
386 static __inline__ void arp_update_hhs(struct arp_table * entry)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
387 {
388 struct hh_cache *hh;
389
390 for (hh=entry->hh; hh; hh=hh->hh_next)
391 entry->dev->header_cache_update(hh, entry->dev, entry->ha);
392 }
393
394 /*
395 * Check if there are too old entries and remove them. If the ATF_PERM
396 * flag is set, they are always left in the arp cache (permanent entry).
397 * If an entry was not be confirmed for ARP_CONFIRM_INTERVAL,
398 * declare it invalid and send point-to-point ARP request.
399 * If it will not be confirmed for ARP_CONFIRM_TIMEOUT,
400 * give it to shred by arp_expire_entry.
401 */
402
403 static void arp_check_expire(unsigned long dummy)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
404 {
405 int i;
406 unsigned long now = jiffies;
407
408 del_timer(&arp_timer);
409
410 if (!arp_lock)
411 {
412 arp_fast_lock();
413
414 for (i = 0; i < ARP_TABLE_SIZE; i++)
415 {
416 struct arp_table *entry;
417 struct arp_table **pentry;
418
419 pentry = &arp_tables[i];
420
421 while ((entry = *pentry) != NULL)
422 {
423 cli();
424 if (now - entry->last_used > ARP_TIMEOUT
425 && !(entry->flags & ATF_PERM)
426 && !arp_count_hhs(entry))
427 {
428 *pentry = entry->next;
429 sti();
430 #if RT_CACHE_DEBUG >= 2
431 printk("arp_expire: %08x expired\n", entry->ip);
432 #endif
433 arp_free_entry(entry);
434 }
435 else if (entry->last_updated
436 && now - entry->last_updated > ARP_CONFIRM_INTERVAL
437 && !(entry->flags & ATF_PERM))
438 {
439 struct device * dev = entry->dev;
440 pentry = &entry->next;
441 entry->flags &= ~ATF_COM;
442 arp_invalidate_hhs(entry);
443 sti();
444 entry->retries = ARP_MAX_TRIES+1;
445 del_timer(&entry->timer);
446 entry->timer.expires = jiffies + ARP_CONFIRM_TIMEOUT;
447 add_timer(&entry->timer);
448 arp_send(ARPOP_REQUEST, ETH_P_ARP, entry->ip,
449 dev, dev->pa_addr, entry->ha,
450 dev->dev_addr, NULL);
451 #if RT_CACHE_DEBUG >= 2
452 printk("arp_expire: %08x requires confirmation\n", entry->ip);
453 #endif
454 }
455 else
456 pentry = &entry->next; /* go to next entry */
457 }
458 }
459 arp_unlock();
460 }
461
462 ip_rt_check_expire();
463
464 /*
465 * Set the timer again.
466 */
467
468 arp_timer.expires = jiffies + ARP_CHECK_INTERVAL;
469 add_timer(&arp_timer);
470 }
471
472 /*
473 * This function is called, if an entry is not resolved in ARP_RES_TIME.
474 * When more than MAX_ARP_TRIES retries was done, release queued skb's,
475 * but not discard entry itself if it is in use.
476 */
477
478 static void arp_expire_request (unsigned long arg)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
479 {
480 struct arp_table *entry = (struct arp_table *) arg;
481 struct arp_table **pentry;
482 unsigned long hash;
483 unsigned long flags;
484
485 save_flags(flags);
486 cli();
487
488 /*
489 * Since all timeouts are handled with interrupts enabled, there is a
490 * small chance, that this entry has just been resolved by an incoming
491 * packet. This is the only race condition, but it is handled...
492 */
493
494 if (entry->flags & ATF_COM)
495 {
496 restore_flags(flags);
497 return;
498 }
499
500 if (arp_lock)
501 {
502 #if RT_CACHE_DEBUG >= 1
503 printk("arp_expire_request: %08x postponed\n", entry->ip);
504 #endif
505 del_timer(&entry->timer);
506 entry->timer.expires = jiffies + HZ/10;
507 add_timer(&entry->timer);
508 restore_flags(flags);
509 return;
510 }
511
512 arp_fast_lock();
513 restore_flags(flags);
514
515 if (entry->last_updated && --entry->retries > 0)
516 {
517 struct device *dev = entry->dev;
518
519 #if RT_CACHE_DEBUG >= 2
520 printk("arp_expire_request: %08x timed out\n", entry->ip);
521 #endif
522 /* Set new timer. */
523 del_timer(&entry->timer);
524 entry->timer.expires = jiffies + ARP_RES_TIME;
525 add_timer(&entry->timer);
526 arp_send(ARPOP_REQUEST, ETH_P_ARP, entry->ip, dev, dev->pa_addr,
527 NULL, dev->dev_addr, NULL);
528 arp_unlock();
529 return;
530 }
531
532 arp_release_entry(entry);
533
534 cli();
535 if (arp_count_hhs(entry))
536 {
537 struct device *dev = entry->dev;
538 #if RT_CACHE_DEBUG >= 2
539 printk("arp_expire_request: %08x is dead\n", entry->ip);
540 #endif
541 arp_release_entry(entry);
542 entry->retries = ARP_MAX_TRIES;
543 restore_flags(flags);
544 entry->last_updated = 0;
545 del_timer(&entry->timer);
546 entry->timer.expires = jiffies + ARP_DEAD_RES_TIME;
547 add_timer(&entry->timer);
548 arp_send(ARPOP_REQUEST, ETH_P_ARP, entry->ip, dev, dev->pa_addr,
549 NULL, dev->dev_addr, NULL);
550 arp_unlock();
551 return;
552 }
553 restore_flags(flags);
554
555 hash = HASH(entry->ip);
556
557 pentry = &arp_tables[hash];
558
559 while (*pentry != NULL)
560 {
561 if (*pentry == entry)
562 {
563 cli();
564 *pentry = entry->next;
565 restore_flags(flags);
566 #if RT_CACHE_DEBUG >= 2
567 printk("arp_expire_request: %08x is killed\n", entry->ip);
568 #endif
569 arp_free_entry(entry);
570 arp_unlock();
571 return;
572 }
573 pentry = &(*pentry)->next;
574 }
575 printk("arp_expire_request: bug: ARP entry is lost!\n");
576 arp_unlock();
577 }
578
579 /*
580 * Purge a device from the ARP queue
581 */
582
583 int arp_device_event(struct notifier_block *this, unsigned long event, void *ptr)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
584 {
585 struct device *dev=ptr;
586 int i;
587
588 if (event != NETDEV_DOWN)
589 return NOTIFY_DONE;
590 /*
591 * This is a bit OTT - maybe we need some arp semaphores instead.
592 */
593
594 #if RT_CACHE_DEBUG >= 1
595 if (arp_lock)
596 printk("arp_device_event: bug\n");
597 #endif
598 arp_fast_lock();
599
600 for (i = 0; i < FULL_ARP_TABLE_SIZE; i++)
601 {
602 struct arp_table *entry;
603 struct arp_table **pentry = &arp_tables[i];
604
605 while ((entry = *pentry) != NULL)
606 {
607 if (entry->dev == dev)
608 {
609 *pentry = entry->next; /* remove from list */
610 arp_free_entry(entry);
611 }
612 else
613 pentry = &entry->next; /* go to next entry */
614 }
615 }
616 arp_unlock();
617 return NOTIFY_DONE;
618 }
619
620
621 /*
622 * Create and send an arp packet. If (dest_hw == NULL), we create a broadcast
623 * message.
624 */
625
626 void arp_send(int type, int ptype, u32 dest_ip,
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
627 struct device *dev, u32 src_ip,
628 unsigned char *dest_hw, unsigned char *src_hw,
629 unsigned char *target_hw)
630 {
631 struct sk_buff *skb;
632 struct arphdr *arp;
633 unsigned char *arp_ptr;
634
635 /*
636 * No arp on this interface.
637 */
638
639 if (dev->flags&IFF_NOARP)
640 return;
641
642 /*
643 * Allocate a buffer
644 */
645
646 skb = alloc_skb(sizeof(struct arphdr)+ 2*(dev->addr_len+4)
647 + dev->hard_header_len, GFP_ATOMIC);
648 if (skb == NULL)
649 {
650 printk("ARP: no memory to send an arp packet\n");
651 return;
652 }
653 skb_reserve(skb, dev->hard_header_len);
654 arp = (struct arphdr *) skb_put(skb,sizeof(struct arphdr) + 2*(dev->addr_len+4));
655 skb->arp = 1;
656 skb->dev = dev;
657 skb->free = 1;
658 skb->protocol = htons (ETH_P_IP);
659
660 /*
661 * Fill the device header for the ARP frame
662 */
663
664 dev->hard_header(skb,dev,ptype,dest_hw?dest_hw:dev->broadcast,src_hw?src_hw:NULL,skb->len);
665
666 /* Fill out the arp protocol part. */
667 arp->ar_hrd = htons(dev->type);
668 #ifdef CONFIG_AX25
669 #ifdef CONFIG_NETROM
670 arp->ar_pro = (dev->type == ARPHRD_AX25 || dev->type == ARPHRD_NETROM) ? htons(AX25_P_IP) : htons(ETH_P_IP);
671 #else
672 arp->ar_pro = (dev->type != ARPHRD_AX25) ? htons(ETH_P_IP) : htons(AX25_P_IP);
673 #endif
674 #else
675 arp->ar_pro = htons(ETH_P_IP);
676 #endif
677 arp->ar_hln = dev->addr_len;
678 arp->ar_pln = 4;
679 arp->ar_op = htons(type);
680
681 arp_ptr=(unsigned char *)(arp+1);
682
683 memcpy(arp_ptr, src_hw, dev->addr_len);
684 arp_ptr+=dev->addr_len;
685 memcpy(arp_ptr, &src_ip,4);
686 arp_ptr+=4;
687 if (target_hw != NULL)
688 memcpy(arp_ptr, target_hw, dev->addr_len);
689 else
690 memset(arp_ptr, 0, dev->addr_len);
691 arp_ptr+=dev->addr_len;
692 memcpy(arp_ptr, &dest_ip, 4);
693
694 dev_queue_xmit(skb, dev, 0);
695 }
696
697 /*
698 * This will try to retransmit everything on the queue.
699 */
700
701 static void arp_send_q(struct arp_table *entry)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
702 {
703 struct sk_buff *skb;
704
705 unsigned long flags;
706
707 /*
708 * Empty the entire queue, building its data up ready to send
709 */
710
711 if(!(entry->flags&ATF_COM))
712 {
713 printk("arp_send_q: incomplete entry for %s\n",
714 in_ntoa(entry->ip));
715 /* Can't flush the skb, because RFC1122 says to hang on to */
716 /* at least one from any unresolved entry. --MS */
717 /* Whats happened is that someone has 'unresolved' the entry
718 as we got to use it - this 'can't happen' -- AC */
719 return;
720 }
721
722 save_flags(flags);
723
724 cli();
725 while((skb = skb_dequeue(&entry->skb)) != NULL)
726 {
727 IS_SKB(skb);
728 skb_device_lock(skb);
729 restore_flags(flags);
730 if(!skb->dev->rebuild_header(skb->data,skb->dev,skb->raddr,skb))
731 {
732 skb->arp = 1;
733 if(skb->sk==NULL)
734 dev_queue_xmit(skb, skb->dev, 0);
735 else
736 dev_queue_xmit(skb,skb->dev,skb->sk->priority);
737 }
738 }
739 restore_flags(flags);
740 }
741
742
743 /*
744 * Delete an ARP mapping entry in the cache.
745 */
746
747 static void arp_destroy(struct arp_table * entry)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
748 {
749 struct arp_table *entry1;
750 struct arp_table **pentry;
751
752 if (entry->flags & ATF_PUBL)
753 pentry = &arp_proxy_list;
754 else
755 pentry = &arp_tables[HASH(entry->ip)];
756
757 while ((entry1 = *pentry) != NULL)
758 {
759 if (entry1 == entry)
760 {
761 *pentry = entry1->next;
762 del_timer(&entry->timer);
763 arp_free_entry(entry);
764 return;
765 }
766 pentry = &entry1->next;
767 }
768 }
769
770 /*
771 * Receive an arp request by the device layer. Maybe I rewrite it, to
772 * use the incoming packet for the reply. The time for the current
773 * "overhead" isn't that high...
774 */
775
776 int arp_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *pt)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
777 {
778 /*
779 * We shouldn't use this type conversion. Check later.
780 */
781
782 struct arphdr *arp = (struct arphdr *)skb->h.raw;
783 unsigned char *arp_ptr= (unsigned char *)(arp+1);
784 struct arp_table *entry;
785 struct arp_table *proxy_entry;
786 unsigned long hash;
787 unsigned char ha[MAX_ADDR_LEN]; /* So we can enable ints again. */
788 unsigned char *sha,*tha;
789 u32 sip,tip;
790
791 /*
792 * The hardware length of the packet should match the hardware length
793 * of the device. Similarly, the hardware types should match. The
794 * device should be ARP-able. Also, if pln is not 4, then the lookup
795 * is not from an IP number. We can't currently handle this, so toss
796 * it.
797 */
798 if (arp->ar_hln != dev->addr_len ||
799 dev->type != ntohs(arp->ar_hrd) ||
800 dev->flags & IFF_NOARP ||
801 arp->ar_pln != 4)
802 {
803 kfree_skb(skb, FREE_READ);
804 return 0;
805 /* Should this be an error/printk? Seems like something */
806 /* you'd want to know about. Unless it's just !IFF_NOARP. -- MS */
807 }
808
809 /*
810 * Another test.
811 * The logic here is that the protocol being looked up by arp should
812 * match the protocol the device speaks. If it doesn't, there is a
813 * problem, so toss the packet.
814 */
815 /* Again, should this be an error/printk? -- MS */
816
817 switch (dev->type)
818 {
819 #ifdef CONFIG_AX25
820 case ARPHRD_AX25:
821 if(arp->ar_pro != htons(AX25_P_IP))
822 {
823 kfree_skb(skb, FREE_READ);
824 return 0;
825 }
826 break;
827 #endif
828 #ifdef CONFIG_NETROM
829 case ARPHRD_NETROM:
830 if(arp->ar_pro != htons(AX25_P_IP))
831 {
832 kfree_skb(skb, FREE_READ);
833 return 0;
834 }
835 break;
836 #endif
837 case ARPHRD_ETHER:
838 case ARPHRD_ARCNET:
839 if(arp->ar_pro != htons(ETH_P_IP))
840 {
841 kfree_skb(skb, FREE_READ);
842 return 0;
843 }
844 break;
845
846 case ARPHRD_IEEE802:
847 if(arp->ar_pro != htons(ETH_P_IP))
848 {
849 kfree_skb(skb, FREE_READ);
850 return 0;
851 }
852 break;
853
854 default:
855 printk("ARP: dev->type mangled!\n");
856 kfree_skb(skb, FREE_READ);
857 return 0;
858 }
859
860 /*
861 * Extract fields
862 */
863
864 sha=arp_ptr;
865 arp_ptr += dev->addr_len;
866 memcpy(&sip, arp_ptr, 4);
867 arp_ptr += 4;
868 tha=arp_ptr;
869 arp_ptr += dev->addr_len;
870 memcpy(&tip, arp_ptr, 4);
871
872 /*
873 * Check for bad requests for 127.x.x.x and requests for multicast
874 * addresses. If this is one such, delete it.
875 */
876 if (LOOPBACK(tip) || MULTICAST(tip))
877 {
878 kfree_skb(skb, FREE_READ);
879 return 0;
880 }
881
882 /*
883 * Process entry. The idea here is we want to send a reply if it is a
884 * request for us or if it is a request for someone else that we hold
885 * a proxy for. We want to add an entry to our cache if it is a reply
886 * to us or if it is a request for our address.
887 * (The assumption for this last is that if someone is requesting our
888 * address, they are probably intending to talk to us, so it saves time
889 * if we cache their address. Their address is also probably not in
890 * our cache, since ours is not in their cache.)
891 *
892 * Putting this another way, we only care about replies if they are to
893 * us, in which case we add them to the cache. For requests, we care
894 * about those for us and those for our proxies. We reply to both,
895 * and in the case of requests for us we add the requester to the arp
896 * cache.
897 */
898
899 /*
900 * try to switch to alias device whose addr is tip or closest to sip.
901 */
902
903 #ifdef CONFIG_NET_ALIAS
904 if (tip != dev->pa_addr && net_alias_has(skb->dev))
905 {
906 /*
907 * net_alias_dev_rcv_sel32 returns main dev if it fails to found other.
908 */
909 dev = net_alias_dev_rcv_sel32(dev, AF_INET, sip, tip);
910
911 if (dev->type != ntohs(arp->ar_hrd) || dev->flags & IFF_NOARP)
912 {
913 kfree_skb(skb, FREE_READ);
914 return 0;
915 }
916 }
917 #endif
918
919 if (arp->ar_op == htons(ARPOP_REQUEST))
920 {
921 /*
922 * Only reply for the real device address or when it's in our proxy tables
923 */
924 if (tip != dev->pa_addr)
925 {
926 /*
927 * To get in here, it is a request for someone else. We need to
928 * check if that someone else is one of our proxies. If it isn't,
929 * we can toss it.
930 */
931 arp_fast_lock();
932
933 for (proxy_entry=arp_proxy_list;
934 proxy_entry;
935 proxy_entry = proxy_entry->next)
936 {
937 /* we will respond to a proxy arp request
938 if the masked arp table ip matches the masked
939 tip. This allows a single proxy arp table
940 entry to be used on a gateway machine to handle
941 all requests for a whole network, rather than
942 having to use a huge number of proxy arp entries
943 and having to keep them uptodate.
944 */
945 if (proxy_entry->dev == dev &&
946 !((proxy_entry->ip^tip)&proxy_entry->mask))
947 break;
948
949 }
950 if (proxy_entry)
951 {
952 memcpy(ha, proxy_entry->ha, dev->addr_len);
953 arp_unlock();
954 arp_send(ARPOP_REPLY,ETH_P_ARP,sip,dev,tip,sha,ha, sha);
955 kfree_skb(skb, FREE_READ);
956 return 0;
957 }
958 else
959 {
960 arp_unlock();
961 kfree_skb(skb, FREE_READ);
962 return 0;
963 }
964 }
965 else
966 {
967 /*
968 * To get here, it must be an arp request for us. We need to reply.
969 */
970 arp_send(ARPOP_REPLY,ETH_P_ARP,sip,dev,tip,sha,dev->dev_addr,sha);
971 }
972 }
973 /*
974 * It is now an arp reply.
975 */
976 if(ip_chk_addr(tip)!=IS_MYADDR)
977 {
978 /*
979 * Replies to other machines get tossed.
980 */
981 kfree_skb(skb, FREE_READ);
982 return 0;
983 }
984 /*
985 * Now all replies are handled. Next, anything that falls through to here
986 * needs to be added to the arp cache, or have its entry updated if it is
987 * there.
988 */
989
990 arp_fast_lock();
991
992 hash = HASH(sip);
993
994 for (entry=arp_tables[hash]; entry; entry=entry->next)
995 if (entry->ip == sip && entry->dev == dev)
996 break;
997
998 if (entry)
999 {
1000 /*
1001 * Entry found; update it only if it is not a permanent entry.
1002 */
1003 if (!(entry->flags & ATF_PERM)) {
1004 memcpy(entry->ha, sha, dev->addr_len);
1005 entry->last_updated = jiffies;
1006 }
1007 if (!(entry->flags & ATF_COM))
1008 {
1009 /*
1010 * This entry was incomplete. Delete the retransmit timer
1011 * and switch to complete status.
1012 */
1013 del_timer(&entry->timer);
1014 entry->flags |= ATF_COM;
1015 arp_update_hhs(entry);
1016 /*
1017 * Send out waiting packets. We might have problems, if someone is
1018 * manually removing entries right now -- entry might become invalid
1019 * underneath us.
1020 */
1021 arp_send_q(entry);
1022 }
1023 }
1024 else
1025 {
1026 /*
1027 * No entry found. Need to add a new entry to the arp table.
1028 */
1029 entry = (struct arp_table *)kmalloc(sizeof(struct arp_table),GFP_ATOMIC);
1030 if(entry == NULL)
1031 {
1032 arp_unlock();
1033 printk("ARP: no memory for new arp entry\n");
1034 kfree_skb(skb, FREE_READ);
1035 return 0;
1036 }
1037
1038 entry->mask = DEF_ARP_NETMASK;
1039 entry->ip = sip;
1040 entry->flags = ATF_COM;
1041 entry->hh = NULL;
1042 init_timer(&entry->timer);
1043 entry->timer.function = arp_expire_request;
1044 entry->timer.data = (unsigned long)entry;
1045 memcpy(entry->ha, sha, dev->addr_len);
1046 entry->last_updated = entry->last_used = jiffies;
1047 /*
1048 * make entry point to 'correct' device
1049 */
1050
1051 #ifdef CONFIG_NET_ALIAS
1052 entry->dev = dev;
1053 #else
1054 entry->dev = skb->dev;
1055 #endif
1056 skb_queue_head_init(&entry->skb);
1057 if (arp_lock == 1)
1058 {
1059 entry->next = arp_tables[hash];
1060 arp_tables[hash] = entry;
1061 }
1062 else
1063 {
1064 #if RT_CACHE_DEBUG >= 1
1065 printk("arp_rcv: %08x backlogged\n", entry->ip);
1066 #endif
1067 arp_enqueue(&arp_backlog, entry);
1068 arp_bh_mask |= ARP_BH_BACKLOG;
1069 }
1070 }
1071
1072 /*
1073 * Replies have been sent, and entries have been added. All done.
1074 */
1075 kfree_skb(skb, FREE_READ);
1076 arp_unlock();
1077 return 0;
1078 }
1079
1080 /*
1081 * Lookup ARP entry by (addr, dev) pair.
1082 * Flags: ATF_PUBL - search for proxy entries
1083 * ATF_NETMASK - search for proxy network entry.
1084 * NOTE: should be called with locked ARP tables.
1085 */
1086
1087 static struct arp_table *arp_lookup(u32 paddr, unsigned short flags, struct device * dev)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
1088 {
1089 struct arp_table *entry;
1090
1091 if (!(flags & ATF_PUBL))
1092 {
1093 for (entry = arp_tables[HASH(paddr)];
1094 entry != NULL; entry = entry->next)
1095 if (entry->ip == paddr && (!dev || entry->dev == dev))
1096 break;
1097 return entry;
1098 }
1099
1100 if (!(flags & ATF_NETMASK))
1101 {
1102 for (entry = arp_proxy_list;
1103 entry != NULL; entry = entry->next)
1104 if (entry->ip == paddr && (!dev || entry->dev == dev))
1105 break;
1106 return entry;
1107 }
1108
1109 for (entry=arp_proxy_list; entry != NULL; entry = entry->next)
1110 if (!((entry->ip^paddr)&entry->mask) &&
1111 (!dev || entry->dev == dev))
1112 break;
1113 return entry;
1114 }
1115
1116 /*
1117 * Find an arp mapping in the cache. If not found, return false.
1118 */
1119
1120 int arp_query(unsigned char *haddr, u32 paddr, struct device * dev)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
1121 {
1122 struct arp_table *entry;
1123
1124 arp_fast_lock();
1125
1126 entry = arp_lookup(paddr, 0, dev);
1127
1128 if (entry != NULL)
1129 {
1130 entry->last_used = jiffies;
1131 if (entry->flags & ATF_COM)
1132 {
1133 memcpy(haddr, entry->ha, dev->addr_len);
1134 arp_unlock();
1135 return 1;
1136 }
1137 }
1138 arp_unlock();
1139 return 0;
1140 }
1141
1142
1143 static int arp_set_predefined(int addr_hint, unsigned char * haddr, __u32 paddr, struct device * dev)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
1144 {
1145 switch (addr_hint)
1146 {
1147 case IS_MYADDR:
1148 printk("ARP: arp called for own IP address\n");
1149 memcpy(haddr, dev->dev_addr, dev->addr_len);
1150 return 1;
1151 #ifdef CONFIG_IP_MULTICAST
1152 case IS_MULTICAST:
1153 if(dev->type==ARPHRD_ETHER || dev->type==ARPHRD_IEEE802)
1154 {
1155 u32 taddr;
1156 haddr[0]=0x01;
1157 haddr[1]=0x00;
1158 haddr[2]=0x5e;
1159 taddr=ntohl(paddr);
1160 haddr[5]=taddr&0xff;
1161 taddr=taddr>>8;
1162 haddr[4]=taddr&0xff;
1163 taddr=taddr>>8;
1164 haddr[3]=taddr&0x7f;
1165 return 1;
1166 }
1167 /*
1168 * If a device does not support multicast broadcast the stuff (eg AX.25 for now)
1169 */
1170 #endif
1171
1172 case IS_BROADCAST:
1173 memcpy(haddr, dev->broadcast, dev->addr_len);
1174 return 1;
1175 }
1176 return 0;
1177 }
1178
1179 /*
1180 * Find an arp mapping in the cache. If not found, post a request.
1181 */
1182
1183 int arp_find(unsigned char *haddr, u32 paddr, struct device *dev,
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
1184 u32 saddr, struct sk_buff *skb)
1185 {
1186 struct arp_table *entry;
1187 unsigned long hash;
1188
1189 if (arp_set_predefined(ip_chk_addr(paddr), haddr, paddr, dev))
1190 {
1191 if (skb)
1192 skb->arp = 1;
1193 return 0;
1194 }
1195
1196 hash = HASH(paddr);
1197 arp_fast_lock();
1198
1199 /*
1200 * Find an entry
1201 */
1202 entry = arp_lookup(paddr, 0, dev);
1203
1204 if (entry != NULL) /* It exists */
1205 {
1206 if (!(entry->flags & ATF_COM))
1207 {
1208 /*
1209 * A request was already send, but no reply yet. Thus
1210 * queue the packet with the previous attempt
1211 */
1212
1213 if (skb != NULL)
1214 {
1215 if (entry->last_updated)
1216 {
1217 skb_queue_tail(&entry->skb, skb);
1218 skb_device_unlock(skb);
1219 }
1220 /*
1221 * If last_updated==0 host is dead, so
1222 * drop skb's and set socket error.
1223 */
1224 else
1225 {
1226 #if 0
1227 /*
1228 * FIXME: ICMP HOST UNREACHABLE should be
1229 * sent in this situation. --ANK
1230 */
1231 if (skb->sk)
1232 {
1233 skb->sk->err = EHOSTDOWN;
1234 skb->sk->error_report(skb->sk);
1235 }
1236 #else
1237 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0, dev);
1238 #endif
1239 dev_kfree_skb(skb, FREE_WRITE);
1240 }
1241 }
1242 arp_unlock();
1243 return 1;
1244 }
1245
1246 /*
1247 * Update the record
1248 */
1249
1250 entry->last_used = jiffies;
1251 memcpy(haddr, entry->ha, dev->addr_len);
1252 if (skb)
1253 skb->arp = 1;
1254 arp_unlock();
1255 return 0;
1256 }
1257
1258 /*
1259 * Create a new unresolved entry.
1260 */
1261
1262 entry = (struct arp_table *) kmalloc(sizeof(struct arp_table),
1263 GFP_ATOMIC);
1264 if (entry != NULL)
1265 {
1266 entry->last_updated = entry->last_used = jiffies;
1267 entry->flags = 0;
1268 entry->ip = paddr;
1269 entry->mask = DEF_ARP_NETMASK;
1270 memset(entry->ha, 0, dev->addr_len);
1271 entry->dev = dev;
1272 entry->hh = NULL;
1273 init_timer(&entry->timer);
1274 entry->timer.function = arp_expire_request;
1275 entry->timer.data = (unsigned long)entry;
1276 entry->timer.expires = jiffies + ARP_RES_TIME;
1277 skb_queue_head_init(&entry->skb);
1278 if (skb != NULL)
1279 {
1280 skb_queue_tail(&entry->skb, skb);
1281 skb_device_unlock(skb);
1282 }
1283 if (arp_lock == 1)
1284 {
1285 entry->next = arp_tables[hash];
1286 arp_tables[hash] = entry;
1287 add_timer(&entry->timer);
1288 entry->retries = ARP_MAX_TRIES;
1289 }
1290 else
1291 {
1292 #if RT_CACHE_DEBUG >= 1
1293 printk("arp_find: %08x backlogged\n", entry->ip);
1294 #endif
1295 arp_enqueue(&arp_backlog, entry);
1296 arp_bh_mask |= ARP_BH_BACKLOG;
1297 }
1298 }
1299 else if (skb != NULL)
1300 dev_kfree_skb(skb, FREE_WRITE);
1301 arp_unlock();
1302
1303 /*
1304 * If we didn't find an entry, we will try to send an ARP packet.
1305 */
1306
1307 arp_send(ARPOP_REQUEST, ETH_P_ARP, paddr, dev, saddr, NULL,
1308 dev->dev_addr, NULL);
1309
1310 return 1;
1311 }
1312
1313
1314 /*
1315 * Write the contents of the ARP cache to a PROCfs file.
1316 */
1317
1318 #define HBUFFERLEN 30
1319
1320 int arp_get_info(char *buffer, char **start, off_t offset, int length, int dummy)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
1321 {
1322 int len=0;
1323 off_t pos=0;
1324 int size;
1325 struct arp_table *entry;
1326 char hbuffer[HBUFFERLEN];
1327 int i,j,k;
1328 const char hexbuf[] = "0123456789ABCDEF";
1329
1330 size = sprintf(buffer,"IP address HW type Flags HW address Mask Device\n");
1331
1332 pos+=size;
1333 len+=size;
1334
1335 arp_fast_lock();
1336
1337 for(i=0; i<FULL_ARP_TABLE_SIZE; i++)
1338 {
1339 for(entry=arp_tables[i]; entry!=NULL; entry=entry->next)
1340 {
1341 /*
1342 * Convert hardware address to XX:XX:XX:XX ... form.
1343 */
1344 #ifdef CONFIG_AX25
1345 #ifdef CONFIG_NETROM
1346 if (entry->dev->type == ARPHRD_AX25 || entry->dev->type == ARPHRD_NETROM)
1347 strcpy(hbuffer,ax2asc((ax25_address *)entry->ha));
1348 else {
1349 #else
1350 if(entry->dev->type==ARPHRD_AX25)
1351 strcpy(hbuffer,ax2asc((ax25_address *)entry->ha));
1352 else {
1353 #endif
1354 #endif
1355
1356 for(k=0,j=0;k<HBUFFERLEN-3 && j<entry->dev->addr_len;j++)
1357 {
1358 hbuffer[k++]=hexbuf[ (entry->ha[j]>>4)&15 ];
1359 hbuffer[k++]=hexbuf[ entry->ha[j]&15 ];
1360 hbuffer[k++]=':';
1361 }
1362 hbuffer[--k]=0;
1363
1364 #ifdef CONFIG_AX25
1365 }
1366 #endif
1367 size = sprintf(buffer+len,
1368 "%-17s0x%-10x0x%-10x%s",
1369 in_ntoa(entry->ip),
1370 (unsigned int)entry->dev->type,
1371 entry->flags,
1372 hbuffer);
1373 #if RT_CACHE_DEBUG < 2
1374 size += sprintf(buffer+len+size,
1375 " %-17s %s\n",
1376 entry->mask==DEF_ARP_NETMASK ?
1377 "*" : in_ntoa(entry->mask), entry->dev->name);
1378 #else
1379 size += sprintf(buffer+len+size,
1380 " %-17s %s\t%ld\t%1d\n",
1381 entry->mask==DEF_ARP_NETMASK ?
1382 "*" : in_ntoa(entry->mask), entry->dev->name,
1383 entry->hh ? entry->hh->hh_refcnt : -1,
1384 entry->hh ? entry->hh->hh_uptodate : 0);
1385 #endif
1386
1387 len += size;
1388 pos += size;
1389
1390 if (pos <= offset)
1391 len=0;
1392 if (pos >= offset+length)
1393 break;
1394 }
1395 }
1396 arp_unlock();
1397
1398 *start = buffer+len-(pos-offset); /* Start of wanted data */
1399 len = pos-offset; /* Start slop */
1400 if (len>length)
1401 len = length; /* Ending slop */
1402 return len;
1403 }
1404
1405
1406
1407 int arp_bind_cache(struct hh_cache ** hhp, struct device *dev, unsigned short htype, u32 paddr)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
1408 {
1409 struct arp_table *entry;
1410 struct hh_cache *hh = *hhp;
1411 int addr_hint;
1412 unsigned long flags;
1413
1414 if (hh)
1415 return 1;
1416
1417 if ((addr_hint = ip_chk_addr(paddr)) != 0)
1418 {
1419 unsigned char haddr[MAX_ADDR_LEN];
1420 if (hh)
1421 return 1;
1422 hh = kmalloc(sizeof(struct hh_cache), GFP_ATOMIC);
1423 if (!hh)
1424 return 1;
1425 arp_set_predefined(addr_hint, haddr, paddr, dev);
1426 hh->hh_uptodate = 0;
1427 hh->hh_refcnt = 1;
1428 hh->hh_arp = NULL;
1429 hh->hh_next = NULL;
1430 hh->hh_type = htype;
1431 *hhp = hh;
1432 dev->header_cache_update(hh, dev, haddr);
1433 return 0;
1434 }
1435
1436 save_flags(flags);
1437
1438 arp_fast_lock();
1439
1440 entry = arp_lookup(paddr, 0, dev);
1441
1442 if (entry)
1443 {
1444 cli();
1445 for (hh = entry->hh; hh; hh=hh->hh_next)
1446 if (hh->hh_type == htype)
1447 break;
1448 if (hh)
1449 {
1450 hh->hh_refcnt++;
1451 *hhp = hh;
1452 restore_flags(flags);
1453 arp_unlock();
1454 return 1;
1455 }
1456 restore_flags(flags);
1457 }
1458
1459 hh = kmalloc(sizeof(struct hh_cache), GFP_ATOMIC);
1460 if (!hh)
1461 {
1462 arp_unlock();
1463 return 1;
1464 }
1465
1466 hh->hh_uptodate = 0;
1467 hh->hh_refcnt = 1;
1468 hh->hh_arp = NULL;
1469 hh->hh_next = NULL;
1470 hh->hh_type = htype;
1471
1472 if (entry)
1473 {
1474 dev->header_cache_update(hh, dev, entry->ha);
1475 *hhp = hh;
1476 cli();
1477 hh->hh_arp = (void*)entry;
1478 entry->hh = hh;
1479 hh->hh_refcnt++;
1480 restore_flags(flags);
1481 entry->last_used = jiffies;
1482 arp_unlock();
1483 return 0;
1484 }
1485
1486
1487 /*
1488 * Create a new unresolved entry.
1489 */
1490
1491 entry = (struct arp_table *) kmalloc(sizeof(struct arp_table),
1492 GFP_ATOMIC);
1493 if (entry == NULL)
1494 {
1495 kfree_s(hh, sizeof(struct hh_cache));
1496 arp_unlock();
1497 return 1;
1498 }
1499
1500 entry->last_updated = entry->last_used = jiffies;
1501 entry->flags = 0;
1502 entry->ip = paddr;
1503 entry->mask = DEF_ARP_NETMASK;
1504 memset(entry->ha, 0, dev->addr_len);
1505 entry->dev = dev;
1506 entry->hh = hh;
1507 ATOMIC_INCR(&hh->hh_refcnt);
1508 init_timer(&entry->timer);
1509 entry->timer.function = arp_expire_request;
1510 entry->timer.data = (unsigned long)entry;
1511 entry->timer.expires = jiffies + ARP_RES_TIME;
1512 skb_queue_head_init(&entry->skb);
1513
1514 if (arp_lock == 1)
1515 {
1516 unsigned long hash = HASH(paddr);
1517 cli();
1518 entry->next = arp_tables[hash];
1519 arp_tables[hash] = entry;
1520 hh->hh_arp = (void*)entry;
1521 entry->retries = ARP_MAX_TRIES;
1522 restore_flags(flags);
1523
1524 add_timer(&entry->timer);
1525 arp_send(ARPOP_REQUEST, ETH_P_ARP, paddr, dev, dev->pa_addr, NULL, dev->dev_addr, NULL);
1526 }
1527 else
1528 {
1529 #if RT_CACHE_DEBUG >= 1
1530 printk("arp_cache_bind: %08x backlogged\n", entry->ip);
1531 #endif
1532 arp_enqueue(&arp_backlog, entry);
1533 arp_bh_mask |= ARP_BH_BACKLOG;
1534 }
1535 *hhp = hh;
1536 arp_unlock();
1537 return 0;
1538 }
1539
1540 static void arp_run_bh()
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
1541 {
1542 unsigned long flags;
1543 struct arp_table *entry, *entry1;
1544 struct hh_cache *hh;
1545 __u32 sip;
1546
1547 save_flags(flags);
1548 cli();
1549 if (!arp_lock)
1550 {
1551 arp_fast_lock();
1552
1553 while ((entry = arp_dequeue(&arp_backlog)) != NULL)
1554 {
1555 unsigned long hash;
1556 sti();
1557 sip = entry->ip;
1558 hash = HASH(sip);
1559
1560 /* It's possible, that an entry with the same pair
1561 * (addr,type) was already created. Our entry is older,
1562 * so it should be discarded.
1563 */
1564 for (entry1=arp_tables[hash]; entry1; entry1=entry1->next)
1565 if (entry1->ip==sip && entry1->dev == entry->dev)
1566 break;
1567
1568 if (!entry1)
1569 {
1570 struct device * dev = entry->dev;
1571 cli();
1572 entry->next = arp_tables[hash];
1573 arp_tables[hash] = entry;
1574 for (hh=entry->hh; hh; hh=hh->hh_next)
1575 hh->hh_arp = (void*)entry;
1576 sti();
1577 del_timer(&entry->timer);
1578 entry->timer.expires = jiffies + ARP_RES_TIME;
1579 add_timer(&entry->timer);
1580 entry->retries = ARP_MAX_TRIES;
1581 arp_send(ARPOP_REQUEST, ETH_P_ARP, entry->ip, dev, dev->pa_addr, NULL, dev->dev_addr, NULL);
1582 #if RT_CACHE_DEBUG >= 1
1583 printk("arp_run_bh: %08x reinstalled\n", sip);
1584 #endif
1585 }
1586 else
1587 {
1588 struct sk_buff * skb;
1589 struct hh_cache * next;
1590
1591 /* Discard entry, but preserve its hh's and
1592 * skb's.
1593 */
1594 cli();
1595 for (hh=entry->hh; hh; hh=next)
1596 {
1597 next = hh->hh_next;
1598 hh->hh_next = entry1->hh;
1599 entry1->hh = hh;
1600 hh->hh_arp = (void*)entry1;
1601 }
1602 entry->hh = NULL;
1603
1604 /* Prune skb list from entry
1605 * and graft it to entry1.
1606 */
1607 while ((skb = skb_dequeue(&entry->skb)) != NULL)
1608 {
1609 skb_device_lock(skb);
1610 sti();
1611 skb_queue_tail(&entry1->skb, skb);
1612 skb_device_unlock(skb);
1613 cli();
1614 }
1615 sti();
1616
1617 #if RT_CACHE_DEBUG >= 1
1618 printk("arp_run_bh: entry %08x was born dead\n", entry->ip);
1619 #endif
1620 arp_free_entry(entry);
1621
1622 if (entry1->flags & ATF_COM)
1623 {
1624 arp_update_hhs(entry1);
1625 arp_send_q(entry1);
1626 }
1627 }
1628 cli();
1629 }
1630 arp_bh_mask &= ~ARP_BH_BACKLOG;
1631 arp_unlock();
1632 }
1633 restore_flags(flags);
1634 }
1635
1636 /*
1637 * Test if a hardware address is all zero
1638 */
1639
1640 static inline int empty(unsigned char * addr, int len)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
1641 {
1642 while (len > 0) {
1643 if (*addr)
1644 return 0;
1645 len--;
1646 addr++;
1647 }
1648 return 1;
1649 }
1650
1651 /*
1652 * Set (create) an ARP cache entry.
1653 */
1654
1655 static int arp_req_set(struct arpreq *r, struct device * dev)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
1656 {
1657 struct arp_table *entry;
1658 struct sockaddr_in *si;
1659 struct rtable *rt;
1660 struct device *dev1;
1661 unsigned char *ha;
1662 u32 ip;
1663
1664 /*
1665 * Extract destination.
1666 */
1667
1668 si = (struct sockaddr_in *) &r->arp_pa;
1669 ip = si->sin_addr.s_addr;
1670
1671 /*
1672 * Is it reachable ?
1673 */
1674
1675 if (ip_chk_addr(ip) == IS_MYADDR)
1676 dev1 = dev_get("lo");
1677 else {
1678 rt = ip_rt_route(ip, 0);
1679 if (!rt)
1680 return -ENETUNREACH;
1681 dev1 = rt->rt_dev;
1682 ip_rt_put(rt);
1683 }
1684
1685 /* good guess about the device if it isn't a ATF_PUBL entry */
1686 if (!dev) {
1687 if (dev1->flags&(IFF_LOOPBACK|IFF_NOARP))
1688 return -ENODEV;
1689 dev = dev1;
1690 }
1691
1692 /* this needs to be checked only for dev=dev1 but it doesnt hurt */
1693 if (r->arp_ha.sa_family != dev->type)
1694 return -EINVAL;
1695
1696 if (((r->arp_flags & ATF_PUBL) && dev == dev1) ||
1697 (!(r->arp_flags & ATF_PUBL) && dev != dev1))
1698 return -EINVAL;
1699
1700 #if RT_CACHE_DEBUG >= 1
1701 if (arp_lock)
1702 printk("arp_req_set: bug\n");
1703 #endif
1704 arp_fast_lock();
1705
1706 /*
1707 * Is there an existing entry for this address?
1708 */
1709
1710 /*
1711 * Find the entry
1712 */
1713
1714 entry = arp_lookup(ip, r->arp_flags & ~ATF_NETMASK, dev);
1715
1716 if (entry)
1717 {
1718 arp_destroy(entry);
1719 entry = NULL;
1720 }
1721
1722 /*
1723 * Do we need to create a new entry
1724 */
1725
1726 if (entry == NULL)
1727 {
1728 entry = (struct arp_table *) kmalloc(sizeof(struct arp_table),
1729 GFP_ATOMIC);
1730 if (entry == NULL)
1731 {
1732 arp_unlock();
1733 return -ENOMEM;
1734 }
1735 entry->ip = ip;
1736 entry->hh = NULL;
1737 init_timer(&entry->timer);
1738 entry->timer.function = arp_expire_request;
1739 entry->timer.data = (unsigned long)entry;
1740
1741 if (r->arp_flags & ATF_PUBL)
1742 {
1743 cli();
1744 entry->next = arp_proxy_list;
1745 arp_proxy_list = entry;
1746 sti();
1747 }
1748 else
1749 {
1750 unsigned long hash = HASH(ip);
1751 cli();
1752 entry->next = arp_tables[hash];
1753 arp_tables[hash] = entry;
1754 sti();
1755 }
1756 skb_queue_head_init(&entry->skb);
1757 }
1758 /*
1759 * We now have a pointer to an ARP entry. Update it!
1760 */
1761 ha = r->arp_ha.sa_data;
1762 if ((r->arp_flags & ATF_COM) && empty(ha, dev->addr_len))
1763 ha = dev->dev_addr;
1764 memcpy(entry->ha, ha, dev->addr_len);
1765 entry->last_updated = entry->last_used = jiffies;
1766 entry->flags = r->arp_flags | ATF_COM;
1767 if ((entry->flags & ATF_PUBL) && (entry->flags & ATF_NETMASK))
1768 {
1769 si = (struct sockaddr_in *) &r->arp_netmask;
1770 entry->mask = si->sin_addr.s_addr;
1771 }
1772 else
1773 entry->mask = DEF_ARP_NETMASK;
1774 entry->dev = dev;
1775 arp_update_hhs(entry);
1776 arp_unlock();
1777 return 0;
1778 }
1779
1780
1781
1782 /*
1783 * Get an ARP cache entry.
1784 */
1785
1786 static int arp_req_get(struct arpreq *r, struct device *dev)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
1787 {
1788 struct arp_table *entry;
1789 struct sockaddr_in *si;
1790
1791 si = (struct sockaddr_in *) &r->arp_pa;
1792
1793 #if RT_CACHE_DEBUG >= 1
1794 if (arp_lock)
1795 printk("arp_req_set: bug\n");
1796 #endif
1797 arp_fast_lock();
1798
1799 entry = arp_lookup(si->sin_addr.s_addr, r->arp_flags|ATF_NETMASK, dev);
1800
1801 if (entry == NULL)
1802 {
1803 arp_unlock();
1804 return -ENXIO;
1805 }
1806
1807 /*
1808 * We found it; copy into structure.
1809 */
1810
1811 memcpy(r->arp_ha.sa_data, &entry->ha, entry->dev->addr_len);
1812 r->arp_ha.sa_family = entry->dev->type;
1813 r->arp_flags = entry->flags;
1814 strncpy(r->arp_dev, entry->dev->name, 16);
1815 arp_unlock();
1816 return 0;
1817 }
1818
1819 static int arp_req_delete(struct arpreq *r, struct device * dev)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
1820 {
1821 struct arp_table *entry;
1822 struct sockaddr_in *si;
1823
1824 si = (struct sockaddr_in *) &r->arp_pa;
1825 #if RT_CACHE_DEBUG >= 1
1826 if (arp_lock)
1827 printk("arp_req_delete: bug\n");
1828 #endif
1829 arp_fast_lock();
1830
1831 if (!(r->arp_flags & ATF_PUBL))
1832 {
1833 for (entry = arp_tables[HASH(si->sin_addr.s_addr)];
1834 entry != NULL; entry = entry->next)
1835 if (entry->ip == si->sin_addr.s_addr
1836 && (!dev || entry->dev == dev))
1837 {
1838 arp_destroy(entry);
1839 arp_unlock();
1840 return 0;
1841 }
1842 }
1843 else
1844 {
1845 for (entry = arp_proxy_list;
1846 entry != NULL; entry = entry->next)
1847 if (entry->ip == si->sin_addr.s_addr
1848 && (!dev || entry->dev == dev))
1849 {
1850 arp_destroy(entry);
1851 arp_unlock();
1852 return 0;
1853 }
1854 }
1855
1856 arp_unlock();
1857 return -ENXIO;
1858 }
1859
1860 /*
1861 * Handle an ARP layer I/O control request.
1862 */
1863
1864 int arp_ioctl(unsigned int cmd, void *arg)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
1865 {
1866 int err;
1867 struct arpreq r;
1868
1869 struct device * dev = NULL;
1870
1871 switch(cmd)
1872 {
1873 case SIOCDARP:
1874 case SIOCSARP:
1875 if (!suser())
1876 return -EPERM;
1877 case SIOCGARP:
1878 err = verify_area(VERIFY_READ, arg, sizeof(struct arpreq));
1879 if (err)
1880 return err;
1881 memcpy_fromfs(&r, arg, sizeof(struct arpreq));
1882 break;
1883 case OLD_SIOCDARP:
1884 case OLD_SIOCSARP:
1885 if (!suser())
1886 return -EPERM;
1887 case OLD_SIOCGARP:
1888 err = verify_area(VERIFY_READ, arg, sizeof(struct arpreq_old));
1889 if (err)
1890 return err;
1891 memcpy_fromfs(&r, arg, sizeof(struct arpreq_old));
1892 memset(&r.arp_dev, 0, sizeof(r.arp_dev));
1893 break;
1894 default:
1895 return -EINVAL;
1896 }
1897
1898 if (r.arp_pa.sa_family != AF_INET)
1899 return -EPFNOSUPPORT;
1900 if (((struct sockaddr_in *)&r.arp_pa)->sin_addr.s_addr == 0)
1901 return -EINVAL;
1902
1903 if (r.arp_dev[0])
1904 {
1905 if ((dev = dev_get(r.arp_dev)) == NULL)
1906 return -ENODEV;
1907
1908 if (!r.arp_ha.sa_family)
1909 r.arp_ha.sa_family = dev->type;
1910 else if (r.arp_ha.sa_family != dev->type)
1911 return -EINVAL;
1912 }
1913 else
1914 {
1915 if ((r.arp_flags & ATF_PUBL) &&
1916 ((cmd == SIOCSARP) || (cmd == OLD_SIOCSARP))) {
1917 if ((dev = dev_getbytype(r.arp_ha.sa_family)) == NULL)
1918 return -ENODEV;
1919 }
1920 }
1921
1922 switch(cmd)
1923 {
1924 case SIOCDARP:
1925 return arp_req_delete(&r, dev);
1926 case SIOCSARP:
1927 return arp_req_set(&r, dev);
1928 case OLD_SIOCDARP:
1929 /* old SIOCDARP destoyes both
1930 * normal and proxy mappings
1931 */
1932 r.arp_flags &= ~ATF_PUBL;
1933 err = arp_req_delete(&r, dev);
1934 r.arp_flags |= ATF_PUBL;
1935 if (!err)
1936 arp_req_delete(&r, dev);
1937 else
1938 err = arp_req_delete(&r, dev);
1939 return err;
1940 case OLD_SIOCSARP:
1941 err = arp_req_set(&r, dev);
1942 /* old SIOCSARP works so funny,
1943 * that its behaviour can be emulated
1944 * only approximately 8).
1945 * It should work. --ANK
1946 */
1947 if (r.arp_flags & ATF_PUBL)
1948 {
1949 r.arp_flags &= ~ATF_PUBL;
1950 arp_req_delete(&r, dev);
1951 }
1952 return err;
1953 case SIOCGARP:
1954 err = verify_area(VERIFY_WRITE, arg, sizeof(struct arpreq));
1955 if (err)
1956 return err;
1957 err = arp_req_get(&r, dev);
1958 if (!err)
1959 memcpy_tofs(arg, &r, sizeof(r));
1960 return err;
1961 case OLD_SIOCGARP:
1962 err = verify_area(VERIFY_WRITE, arg, sizeof(struct arpreq_old));
1963 if (err)
1964 return err;
1965 r.arp_flags &= ~ATF_PUBL;
1966 err = arp_req_get(&r, dev);
1967 if (err < 0)
1968 {
1969 r.arp_flags |= ATF_PUBL;
1970 err = arp_req_get(&r, dev);
1971 }
1972 if (!err)
1973 memcpy_tofs(arg, &r, sizeof(struct arpreq_old));
1974 return err;
1975 }
1976 /*NOTREACHED*/
1977 return 0;
1978 }
1979
1980
1981 /*
1982 * Called once on startup.
1983 */
1984
1985 static struct packet_type arp_packet_type =
1986 {
1987 0, /* Should be: __constant_htons(ETH_P_ARP) - but this _doesn't_ come out constant! */
1988 NULL, /* All devices */
1989 arp_rcv,
1990 NULL,
1991 NULL
1992 };
1993
1994 static struct notifier_block arp_dev_notifier={
1995 arp_device_event,
1996 NULL,
1997 0
1998 };
1999
2000 void arp_init (void)
/* ![[previous]](../icons/left.png)
![[next]](../icons/n_right.png)
![[first]](../icons/first.png)
![[last]](../icons/n_last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
2001 {
2002 /* Register the packet type */
2003 arp_packet_type.type=htons(ETH_P_ARP);
2004 dev_add_pack(&arp_packet_type);
2005 /* Start with the regular checks for expired arp entries. */
2006 add_timer(&arp_timer);
2007 /* Register for device down reports */
2008 register_netdevice_notifier(&arp_dev_notifier);
2009
2010 #ifdef CONFIG_PROC_FS
2011 proc_net_register(&(struct proc_dir_entry) {
2012 PROC_NET_ARP, 3, "arp",
2013 S_IFREG | S_IRUGO, 1, 0, 0,
2014 0, &proc_net_inode_operations,
2015 arp_get_info
2016 });
2017 #endif
2018 }
2019