This source file includes following definitions.
- arp_fast_lock
- arp_fast_unlock
- arp_unlock
- arp_enqueue
- arp_dequeue
- arp_release_entry
- arp_free_entry
- arp_count_hhs
- arp_force_expire
- arpd_update
- arp_add_entry
- arpd_lookup
- arp_invalidate_hhs
- arp_update_hhs
- arp_check_expire
- arp_expire_request
- arp_device_event
- arp_send
- arp_send_q
- arp_destroy
- arp_rcv
- arp_lookup
- arp_query
- arp_set_predefined
- arp_find
- arp_get_info
- arp_bind_cache
- arp_run_bh
- empty
- arp_req_set
- arp_req_get
- arp_req_delete
- arp_ioctl
- arp_init
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70 #include <linux/types.h>
71 #include <linux/string.h>
72 #include <linux/kernel.h>
73 #include <linux/sched.h>
74 #include <linux/config.h>
75 #include <linux/socket.h>
76 #include <linux/sockios.h>
77 #include <linux/errno.h>
78 #include <linux/in.h>
79 #include <linux/mm.h>
80 #include <linux/inet.h>
81 #include <linux/netdevice.h>
82 #include <linux/etherdevice.h>
83 #include <linux/if_arp.h>
84 #include <linux/trdevice.h>
85 #include <linux/skbuff.h>
86 #include <linux/proc_fs.h>
87 #include <linux/stat.h>
88
89 #include <net/ip.h>
90 #include <net/icmp.h>
91 #include <net/route.h>
92 #include <net/protocol.h>
93 #include <net/tcp.h>
94 #include <net/sock.h>
95 #include <net/arp.h>
96 #ifdef CONFIG_AX25
97 #include <net/ax25.h>
98 #ifdef CONFIG_NETROM
99 #include <net/netrom.h>
100 #endif
101 #endif
102 #ifdef CONFIG_NET_ALIAS
103 #include <linux/net_alias.h>
104 #endif
105 #ifdef CONFIG_ARPD
106 #include <linux/kerneld.h>
107 #endif
108
109 #include <asm/system.h>
110 #include <asm/segment.h>
111
112 #include <stdarg.h>
113
114
115
116
117
118
119
120
121 struct arp_table
122 {
123 struct arp_table *next;
124 unsigned long last_used;
125 unsigned long last_updated;
126 unsigned int flags;
127 u32 ip;
128 u32 mask;
129 unsigned char ha[MAX_ADDR_LEN];
130 struct device *dev;
131
132
133
134
135
136 struct timer_list timer;
137 int retries;
138 struct sk_buff_head skb;
139 struct hh_cache *hh;
140 };
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156 #define ARP_RES_TIME (5*HZ)
157 #define ARP_DEAD_RES_TIME (60*HZ)
158
159
160
161
162
163
164 #define ARP_MAX_TRIES 3
165
166
167
168
169
170 #define ARP_TIMEOUT (600*HZ)
171
172
173
174
175
176
177
178 #define ARP_CHECK_INTERVAL (60*HZ)
179
180
181
182
183
184
185
186 #define ARP_CONFIRM_INTERVAL (300*HZ)
187 #define ARP_CONFIRM_TIMEOUT ARP_RES_TIME
188
189 static unsigned int arp_lock;
190 static unsigned int arp_bh_mask;
191
192 #define ARP_BH_BACKLOG 1
193
194 static struct arp_table *arp_backlog;
195
196
197
198 #ifdef CONFIG_ARPD
199 #define ARP_MAXSIZE 256
200 #endif
201
202 static unsigned int arp_size = 0;
203
204 static void arp_run_bh(void);
205 static void arp_check_expire (unsigned long);
206
207 static struct timer_list arp_timer =
208 { NULL, NULL, ARP_CHECK_INTERVAL, 0L, &arp_check_expire };
209
210
211
212
213
214
215 #define DEF_ARP_NETMASK (~0)
216
217
218
219
220
221
222
223 #define ARP_TABLE_SIZE 16
224 #define FULL_ARP_TABLE_SIZE (ARP_TABLE_SIZE+1)
225
226 struct arp_table *arp_tables[FULL_ARP_TABLE_SIZE] =
227 {
228 NULL,
229 };
230
231 #define arp_proxy_list arp_tables[ARP_TABLE_SIZE]
232
233
234
235
236
237
238 #define HASH(paddr) (htonl(paddr) & (ARP_TABLE_SIZE - 1))
239
240
241
242
243
244 static __inline__ void arp_fast_lock(void)
245 {
246 ATOMIC_INCR(&arp_lock);
247 }
248
249 static __inline__ void arp_fast_unlock(void)
250 {
251 ATOMIC_DECR(&arp_lock);
252 }
253
254 static __inline__ void arp_unlock(void)
255 {
256 if (!ATOMIC_DECR_AND_CHECK(&arp_lock) && arp_bh_mask)
257 arp_run_bh();
258 }
259
260
261
262
263
264 static void arp_enqueue(struct arp_table **q, struct arp_table *entry)
265 {
266 unsigned long flags;
267 struct arp_table * tail;
268
269 save_flags(flags);
270 cli();
271 tail = *q;
272 if (!tail)
273 entry->next = entry;
274 else
275 {
276 entry->next = tail->next;
277 tail->next = entry;
278 }
279 *q = entry;
280 restore_flags(flags);
281 return;
282 }
283
284
285
286
287
288
289 static struct arp_table * arp_dequeue(struct arp_table **q)
290 {
291 struct arp_table * entry;
292
293 if (*q)
294 {
295 entry = (*q)->next;
296 (*q)->next = entry->next;
297 if (entry->next == entry)
298 *q = NULL;
299 entry->next = NULL;
300 return entry;
301 }
302 return NULL;
303 }
304
305
306
307
308
309 static void arp_release_entry(struct arp_table *entry)
310 {
311 struct sk_buff *skb;
312 unsigned long flags;
313
314 save_flags(flags);
315 cli();
316
317 while ((skb = skb_dequeue(&entry->skb)) != NULL)
318 {
319 skb_device_lock(skb);
320 restore_flags(flags);
321 dev_kfree_skb(skb, FREE_WRITE);
322 cli();
323 }
324 restore_flags(flags);
325 return;
326 }
327
328
329
330
331
332
333 static void arp_free_entry(struct arp_table *entry)
334 {
335 unsigned long flags;
336 struct hh_cache *hh, *next;
337
338 del_timer(&entry->timer);
339
340 save_flags(flags);
341 cli();
342 arp_release_entry(entry);
343
344 for (hh = entry->hh; hh; hh = next)
345 {
346 next = hh->hh_next;
347 hh->hh_arp = NULL;
348 hh->hh_uptodate = 0;
349 if (!--hh->hh_refcnt)
350 kfree_s(hh, sizeof(struct(struct hh_cache)));
351 }
352 restore_flags(flags);
353
354 kfree_s(entry, sizeof(struct arp_table));
355 --arp_size;
356 return;
357 }
358
359
360
361
362
363 static __inline__ int arp_count_hhs(struct arp_table * entry)
364 {
365 struct hh_cache *hh, **hhp;
366 int count = 0;
367
368 hhp = &entry->hh;
369 while ((hh=*hhp) != NULL)
370 {
371 if (hh->hh_refcnt == 1)
372 {
373 *hhp = hh->hh_next;
374 kfree_s(hh, sizeof(struct hh_cache));
375 continue;
376 }
377 count += hh->hh_refcnt-1;
378 hhp = &hh->hh_next;
379 }
380
381 return count;
382 }
383
384
385
386
387
388
389
390
391
392
393
394 #ifdef CONFIG_ARPD
395 static int arp_force_expire(void)
396 {
397 int i;
398 struct arp_table *entry = NULL;
399 struct arp_table **pentry = NULL;
400 struct arp_table **oldest_entry = NULL, **last_resort = NULL;
401 unsigned long oldest_used = ~0;
402
403 #if RT_CACHE_DEBUG >= 2
404 printk("Looking for something to force expire.\n");
405 #endif
406 for (i = 0; i < ARP_TABLE_SIZE; i++)
407 {
408 pentry = &arp_tables[i];
409
410 while ((entry = *pentry) != NULL)
411 {
412 if (entry->last_used < oldest_used)
413 {
414 if (arp_count_hhs(entry) == 0)
415 {
416 oldest_entry = pentry;
417 }
418 last_resort = pentry;
419 oldest_used = entry->last_used;
420 }
421 pentry = &entry->next;
422 }
423 }
424 if (oldest_entry == NULL)
425 {
426 if (last_resort == NULL)
427 return -1;
428 oldest_entry = last_resort;
429 }
430
431 entry = *oldest_entry;
432 *oldest_entry = (*oldest_entry)->next;
433 #if RT_CACHE_DEBUG >= 2
434 printk("Force expiring %08x\n", entry->ip);
435 #endif
436 arp_free_entry(entry);
437 return 0;
438 }
439 #endif
440
441
442 static void arpd_update(struct arp_table * entry, int loc)
443 {
444 #ifdef CONFIG_ARPD
445 static struct arpd_request arpreq;
446
447 arpreq.req = ARPD_UPDATE;
448 arpreq.ip = entry->ip;
449 arpreq.mask = entry->mask;
450 memcpy (arpreq.ha, entry->ha, MAX_ADDR_LEN);
451 arpreq.loc = loc;
452 arpreq.last_used = entry->last_used;
453 arpreq.last_updated = entry->last_updated;
454 arpreq.flags = entry->flags;
455 arpreq.dev = entry->dev;
456
457 kerneld_send(KERNELD_ARP, 0, sizeof(arpreq),
458 (char *) &arpreq, NULL);
459 #endif
460 }
461
462
463
464
465
466
467
468
469 static struct arp_table * arp_add_entry(void)
470 {
471 struct arp_table * entry;
472
473 #ifdef CONFIG_ARPD
474 if (arp_size >= ARP_MAXSIZE)
475 {
476 if (arp_force_expire() < 0)
477 return NULL;
478 }
479 #endif
480
481 entry = (struct arp_table *)
482 kmalloc(sizeof(struct arp_table),GFP_ATOMIC);
483
484 if (entry != NULL)
485 ++arp_size;
486 return entry;
487 }
488
489
490
491
492
493
494 static struct arp_table * arpd_lookup(u32 addr, unsigned short flags,
495 struct device * dev,
496 int loc)
497 {
498 #ifdef CONFIG_ARPD
499 static struct arpd_request arpreq, retreq;
500 struct arp_table * entry;
501 int rv, i;
502
503 arpreq.req = ARPD_LOOKUP;
504 arpreq.ip = addr;
505 arpreq.loc = loc;
506
507 rv = kerneld_send(KERNELD_ARP,
508 sizeof(retreq) | KERNELD_WAIT,
509 sizeof(arpreq),
510 (char *) &arpreq,
511 (char *) &retreq);
512
513
514
515
516 if (rv != 0)
517 return NULL;
518 if (dev != retreq.dev)
519 return NULL;
520 if (! memcmp (retreq.ha, "\0\0\0\0\0\0", 6))
521 return NULL;
522
523 arp_fast_lock();
524 entry = arp_add_entry();
525 arp_unlock();
526
527 if (entry == NULL)
528 return NULL;
529
530 entry->next = NULL;
531 entry->last_used = retreq.last_used;
532 entry->last_updated = retreq.last_updated;
533 entry->flags = retreq.flags;
534 entry->ip = retreq.ip;
535 entry->mask = retreq.mask;
536 memcpy (entry->ha, retreq.ha, MAX_ADDR_LEN);
537 arpreq.dev = entry->dev;
538
539 skb_queue_head_init(&entry->skb);
540 entry->hh = NULL;
541 entry->retries = 0;
542
543 #if RT_CACHE_DEBUG >= 2
544 printk("Inserting arpd entry %08x\n in local cache.", entry->ip);
545 #endif
546 i = HASH(entry->ip);
547 arp_fast_lock();
548 entry->next = arp_tables[i]->next;
549 arp_tables[i]->next = entry;
550 arp_unlock();
551 return entry;
552 #endif
553 return NULL;
554 }
555
556
557
558
559
560
561 static __inline__ void arp_invalidate_hhs(struct arp_table * entry)
562 {
563 struct hh_cache *hh;
564
565 for (hh=entry->hh; hh; hh=hh->hh_next)
566 hh->hh_uptodate = 0;
567 }
568
569
570
571
572
573 static __inline__ void arp_update_hhs(struct arp_table * entry)
574 {
575 struct hh_cache *hh;
576
577 for (hh=entry->hh; hh; hh=hh->hh_next)
578 entry->dev->header_cache_update(hh, entry->dev, entry->ha);
579 }
580
581
582
583
584
585
586
587
588
589
590 static void arp_check_expire(unsigned long dummy)
591 {
592 int i;
593 unsigned long now = jiffies;
594
595 del_timer(&arp_timer);
596
597 if (!arp_lock)
598 {
599 arp_fast_lock();
600
601 for (i = 0; i < ARP_TABLE_SIZE; i++)
602 {
603 struct arp_table *entry;
604 struct arp_table **pentry;
605
606 pentry = &arp_tables[i];
607
608 while ((entry = *pentry) != NULL)
609 {
610 cli();
611 if (now - entry->last_used > ARP_TIMEOUT
612 && !(entry->flags & ATF_PERM)
613 && !arp_count_hhs(entry))
614 {
615 *pentry = entry->next;
616 sti();
617 #if RT_CACHE_DEBUG >= 2
618 printk("arp_expire: %08x expired\n", entry->ip);
619 #endif
620 arp_free_entry(entry);
621 }
622 else if (entry->last_updated
623 && now - entry->last_updated > ARP_CONFIRM_INTERVAL
624 && !(entry->flags & ATF_PERM))
625 {
626 struct device * dev = entry->dev;
627 pentry = &entry->next;
628 entry->flags &= ~ATF_COM;
629 arp_invalidate_hhs(entry);
630 sti();
631 entry->retries = ARP_MAX_TRIES+1;
632 del_timer(&entry->timer);
633 entry->timer.expires = jiffies + ARP_CONFIRM_TIMEOUT;
634 add_timer(&entry->timer);
635 arp_send(ARPOP_REQUEST, ETH_P_ARP, entry->ip,
636 dev, dev->pa_addr, entry->ha,
637 dev->dev_addr, NULL);
638 #if RT_CACHE_DEBUG >= 2
639 printk("arp_expire: %08x requires confirmation\n", entry->ip);
640 #endif
641 }
642 else
643 pentry = &entry->next;
644 }
645 }
646 arp_unlock();
647 }
648
649 ip_rt_check_expire();
650
651
652
653
654
655 arp_timer.expires = jiffies + ARP_CHECK_INTERVAL;
656 add_timer(&arp_timer);
657 }
658
659
660
661
662
663
664
665 static void arp_expire_request (unsigned long arg)
666 {
667 struct arp_table *entry = (struct arp_table *) arg;
668 struct arp_table **pentry;
669 unsigned long hash;
670 unsigned long flags;
671
672 save_flags(flags);
673 cli();
674
675
676
677
678
679
680
681 if (entry->flags & ATF_COM)
682 {
683 restore_flags(flags);
684 return;
685 }
686
687 if (arp_lock)
688 {
689 #if RT_CACHE_DEBUG >= 1
690 printk("arp_expire_request: %08x postponed\n", entry->ip);
691 #endif
692 del_timer(&entry->timer);
693 entry->timer.expires = jiffies + HZ/10;
694 add_timer(&entry->timer);
695 restore_flags(flags);
696 return;
697 }
698
699 arp_fast_lock();
700 restore_flags(flags);
701
702 if (entry->last_updated && --entry->retries > 0)
703 {
704 struct device *dev = entry->dev;
705
706 #if RT_CACHE_DEBUG >= 2
707 printk("arp_expire_request: %08x timed out\n", entry->ip);
708 #endif
709
710 del_timer(&entry->timer);
711 entry->timer.expires = jiffies + ARP_RES_TIME;
712 add_timer(&entry->timer);
713 arp_send(ARPOP_REQUEST, ETH_P_ARP, entry->ip, dev, dev->pa_addr,
714 NULL, dev->dev_addr, NULL);
715 arp_unlock();
716 return;
717 }
718
719 arp_release_entry(entry);
720
721 cli();
722 if (arp_count_hhs(entry))
723 {
724 struct device *dev = entry->dev;
725 #if RT_CACHE_DEBUG >= 2
726 printk("arp_expire_request: %08x is dead\n", entry->ip);
727 #endif
728 arp_release_entry(entry);
729 entry->retries = ARP_MAX_TRIES;
730 restore_flags(flags);
731 entry->last_updated = 0;
732 del_timer(&entry->timer);
733 entry->timer.expires = jiffies + ARP_DEAD_RES_TIME;
734 add_timer(&entry->timer);
735 arp_send(ARPOP_REQUEST, ETH_P_ARP, entry->ip, dev, dev->pa_addr,
736 NULL, dev->dev_addr, NULL);
737 arp_unlock();
738 return;
739 }
740 restore_flags(flags);
741
742 hash = HASH(entry->ip);
743
744 pentry = &arp_tables[hash];
745
746 while (*pentry != NULL)
747 {
748 if (*pentry == entry)
749 {
750 cli();
751 *pentry = entry->next;
752 restore_flags(flags);
753 #if RT_CACHE_DEBUG >= 2
754 printk("arp_expire_request: %08x is killed\n", entry->ip);
755 #endif
756 arp_free_entry(entry);
757 arp_unlock();
758 return;
759 }
760 pentry = &(*pentry)->next;
761 }
762 printk("arp_expire_request: bug: ARP entry is lost!\n");
763 arp_unlock();
764 }
765
766
767
768
769
770 int arp_device_event(struct notifier_block *this, unsigned long event, void *ptr)
771 {
772 struct device *dev=ptr;
773 int i;
774
775 if (event != NETDEV_DOWN)
776 return NOTIFY_DONE;
777
778
779
780
781 #if RT_CACHE_DEBUG >= 1
782 if (arp_lock)
783 printk("arp_device_event: bug\n");
784 #endif
785 arp_fast_lock();
786
787 for (i = 0; i < FULL_ARP_TABLE_SIZE; i++)
788 {
789 struct arp_table *entry;
790 struct arp_table **pentry = &arp_tables[i];
791
792 while ((entry = *pentry) != NULL)
793 {
794 if (entry->dev == dev)
795 {
796 *pentry = entry->next;
797 arp_free_entry(entry);
798 }
799 else
800 pentry = &entry->next;
801 }
802 }
803 arp_unlock();
804 return NOTIFY_DONE;
805 }
806
807
808
809
810
811
812
813 void arp_send(int type, int ptype, u32 dest_ip,
814 struct device *dev, u32 src_ip,
815 unsigned char *dest_hw, unsigned char *src_hw,
816 unsigned char *target_hw)
817 {
818 struct sk_buff *skb;
819 struct arphdr *arp;
820 unsigned char *arp_ptr;
821
822
823
824
825
826 if (dev->flags&IFF_NOARP)
827 return;
828
829
830
831
832
833 skb = alloc_skb(sizeof(struct arphdr)+ 2*(dev->addr_len+4)
834 + dev->hard_header_len, GFP_ATOMIC);
835 if (skb == NULL)
836 {
837 printk("ARP: no memory to send an arp packet\n");
838 return;
839 }
840 skb_reserve(skb, dev->hard_header_len);
841 arp = (struct arphdr *) skb_put(skb,sizeof(struct arphdr) + 2*(dev->addr_len+4));
842 skb->arp = 1;
843 skb->dev = dev;
844 skb->free = 1;
845 skb->protocol = htons (ETH_P_IP);
846
847
848
849
850
851 dev->hard_header(skb,dev,ptype,dest_hw?dest_hw:dev->broadcast,src_hw?src_hw:NULL,skb->len);
852
853
854 arp->ar_hrd = htons(dev->type);
855 #ifdef CONFIG_AX25
856 #ifdef CONFIG_NETROM
857 arp->ar_pro = (dev->type == ARPHRD_AX25 || dev->type == ARPHRD_NETROM) ? htons(AX25_P_IP) : htons(ETH_P_IP);
858 #else
859 arp->ar_pro = (dev->type != ARPHRD_AX25) ? htons(ETH_P_IP) : htons(AX25_P_IP);
860 #endif
861 #else
862 arp->ar_pro = htons(ETH_P_IP);
863 #endif
864 arp->ar_hln = dev->addr_len;
865 arp->ar_pln = 4;
866 arp->ar_op = htons(type);
867
868 arp_ptr=(unsigned char *)(arp+1);
869
870 memcpy(arp_ptr, src_hw, dev->addr_len);
871 arp_ptr+=dev->addr_len;
872 memcpy(arp_ptr, &src_ip,4);
873 arp_ptr+=4;
874 if (target_hw != NULL)
875 memcpy(arp_ptr, target_hw, dev->addr_len);
876 else
877 memset(arp_ptr, 0, dev->addr_len);
878 arp_ptr+=dev->addr_len;
879 memcpy(arp_ptr, &dest_ip, 4);
880
881 dev_queue_xmit(skb, dev, 0);
882 }
883
884
885
886
887
888 static void arp_send_q(struct arp_table *entry)
889 {
890 struct sk_buff *skb;
891
892 unsigned long flags;
893
894
895
896
897
898 if(!(entry->flags&ATF_COM))
899 {
900 printk("arp_send_q: incomplete entry for %s\n",
901 in_ntoa(entry->ip));
902
903
904
905
906 return;
907 }
908
909 save_flags(flags);
910
911 cli();
912 while((skb = skb_dequeue(&entry->skb)) != NULL)
913 {
914 IS_SKB(skb);
915 skb_device_lock(skb);
916 restore_flags(flags);
917 if(!skb->dev->rebuild_header(skb->data,skb->dev,skb->raddr,skb))
918 {
919 skb->arp = 1;
920 if(skb->sk==NULL)
921 dev_queue_xmit(skb, skb->dev, 0);
922 else
923 dev_queue_xmit(skb,skb->dev,skb->sk->priority);
924 }
925 }
926 restore_flags(flags);
927 }
928
929
930
931
932
933
934 static void arp_destroy(struct arp_table * entry)
935 {
936 struct arp_table *entry1;
937 struct arp_table **pentry;
938
939 if (entry->flags & ATF_PUBL)
940 pentry = &arp_proxy_list;
941 else
942 pentry = &arp_tables[HASH(entry->ip)];
943
944 while ((entry1 = *pentry) != NULL)
945 {
946 if (entry1 == entry)
947 {
948 *pentry = entry1->next;
949 del_timer(&entry->timer);
950 arp_free_entry(entry);
951 return;
952 }
953 pentry = &entry1->next;
954 }
955 }
956
957
958
959
960
961
962
963 int arp_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *pt)
964 {
965
966
967
968
969 struct arphdr *arp = (struct arphdr *)skb->h.raw;
970 unsigned char *arp_ptr= (unsigned char *)(arp+1);
971 struct arp_table *entry;
972 struct arp_table *proxy_entry;
973 unsigned long hash, grat=0;
974 unsigned char ha[MAX_ADDR_LEN];
975 unsigned char *sha,*tha;
976 u32 sip,tip;
977
978
979
980
981
982
983
984
985 if (arp->ar_hln != dev->addr_len ||
986 dev->type != ntohs(arp->ar_hrd) ||
987 dev->flags & IFF_NOARP ||
988 arp->ar_pln != 4)
989 {
990 kfree_skb(skb, FREE_READ);
991 return 0;
992
993
994 }
995
996
997
998
999
1000
1001
1002
1003
1004 switch (dev->type)
1005 {
1006 #ifdef CONFIG_AX25
1007 case ARPHRD_AX25:
1008 if(arp->ar_pro != htons(AX25_P_IP))
1009 {
1010 kfree_skb(skb, FREE_READ);
1011 return 0;
1012 }
1013 break;
1014 #endif
1015 #ifdef CONFIG_NETROM
1016 case ARPHRD_NETROM:
1017 if(arp->ar_pro != htons(AX25_P_IP))
1018 {
1019 kfree_skb(skb, FREE_READ);
1020 return 0;
1021 }
1022 break;
1023 #endif
1024 case ARPHRD_ETHER:
1025 case ARPHRD_ARCNET:
1026 if(arp->ar_pro != htons(ETH_P_IP))
1027 {
1028 kfree_skb(skb, FREE_READ);
1029 return 0;
1030 }
1031 break;
1032
1033 case ARPHRD_IEEE802:
1034 if(arp->ar_pro != htons(ETH_P_IP))
1035 {
1036 kfree_skb(skb, FREE_READ);
1037 return 0;
1038 }
1039 break;
1040
1041 default:
1042 printk("ARP: dev->type mangled!\n");
1043 kfree_skb(skb, FREE_READ);
1044 return 0;
1045 }
1046
1047
1048
1049
1050
1051 sha=arp_ptr;
1052 arp_ptr += dev->addr_len;
1053 memcpy(&sip, arp_ptr, 4);
1054 arp_ptr += 4;
1055 tha=arp_ptr;
1056 arp_ptr += dev->addr_len;
1057 memcpy(&tip, arp_ptr, 4);
1058
1059
1060
1061
1062
1063 if (LOOPBACK(tip) || MULTICAST(tip))
1064 {
1065 kfree_skb(skb, FREE_READ);
1066 return 0;
1067 }
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090 #ifdef CONFIG_NET_ALIAS
1091 if (tip != dev->pa_addr && net_alias_has(skb->dev))
1092 {
1093
1094
1095
1096 dev = net_alias_dev_rcv_sel32(dev, AF_INET, sip, tip);
1097
1098 if (dev->type != ntohs(arp->ar_hrd) || dev->flags & IFF_NOARP)
1099 {
1100 kfree_skb(skb, FREE_READ);
1101 return 0;
1102 }
1103 }
1104 #endif
1105
1106 if (arp->ar_op == htons(ARPOP_REQUEST))
1107 {
1108
1109
1110
1111 if (tip != dev->pa_addr)
1112 {
1113
1114
1115
1116
1117
1118 grat = (sip == tip) && (sha == tha);
1119 arp_fast_lock();
1120
1121 for (proxy_entry=arp_proxy_list;
1122 proxy_entry;
1123 proxy_entry = proxy_entry->next)
1124 {
1125
1126
1127
1128
1129
1130
1131
1132
1133 if (proxy_entry->dev == dev &&
1134 !((proxy_entry->ip^tip)&proxy_entry->mask))
1135 break;
1136
1137 }
1138 if (proxy_entry)
1139 {
1140 if (grat)
1141 {
1142 if(!(proxy_entry->flags&ATF_PERM))
1143 arp_destroy(proxy_entry);
1144 goto gratuitous;
1145 }
1146 memcpy(ha, proxy_entry->ha, dev->addr_len);
1147 arp_unlock();
1148 arp_send(ARPOP_REPLY,ETH_P_ARP,sip,dev,tip,sha,ha, sha);
1149 kfree_skb(skb, FREE_READ);
1150 return 0;
1151 }
1152 else
1153 {
1154 if (grat)
1155 goto gratuitous;
1156 arp_unlock();
1157 kfree_skb(skb, FREE_READ);
1158 return 0;
1159 }
1160 }
1161 else
1162 {
1163
1164
1165
1166 arp_send(ARPOP_REPLY,ETH_P_ARP,sip,dev,tip,sha,dev->dev_addr,sha);
1167 }
1168 }
1169
1170
1171
1172 if(ip_chk_addr(tip)!=IS_MYADDR)
1173 {
1174
1175
1176
1177 kfree_skb(skb, FREE_READ);
1178 return 0;
1179 }
1180
1181
1182
1183
1184
1185
1186 arp_fast_lock();
1187
1188 gratuitous:
1189
1190 hash = HASH(sip);
1191
1192 for (entry=arp_tables[hash]; entry; entry=entry->next)
1193 if (entry->ip == sip && entry->dev == dev)
1194 break;
1195
1196 if (entry)
1197 {
1198
1199
1200
1201 if (!(entry->flags & ATF_PERM)) {
1202 memcpy(entry->ha, sha, dev->addr_len);
1203 entry->last_updated = jiffies;
1204 arpd_update(entry, __LINE__);
1205 }
1206 if (!(entry->flags & ATF_COM))
1207 {
1208
1209
1210
1211
1212 del_timer(&entry->timer);
1213 entry->flags |= ATF_COM;
1214 arp_update_hhs(entry);
1215
1216
1217
1218
1219
1220 arp_send_q(entry);
1221 }
1222 }
1223 else
1224 {
1225
1226
1227
1228
1229 if (grat)
1230 goto end;
1231
1232 entry = arp_add_entry();
1233 if(entry == NULL)
1234 {
1235 arp_unlock();
1236 #if RT_CACHE_DEBUG >= 2
1237 printk("ARP: no memory for new arp entry\n");
1238 #endif
1239 kfree_skb(skb, FREE_READ);
1240 return 0;
1241 }
1242
1243 entry->mask = DEF_ARP_NETMASK;
1244 entry->ip = sip;
1245 entry->flags = ATF_COM;
1246 entry->hh = NULL;
1247 init_timer(&entry->timer);
1248 entry->timer.function = arp_expire_request;
1249 entry->timer.data = (unsigned long)entry;
1250 memcpy(entry->ha, sha, dev->addr_len);
1251 entry->last_updated = entry->last_used = jiffies;
1252 arpd_update(entry, __LINE__);
1253
1254
1255
1256
1257 #ifdef CONFIG_NET_ALIAS
1258 entry->dev = dev;
1259 #else
1260 entry->dev = skb->dev;
1261 #endif
1262 skb_queue_head_init(&entry->skb);
1263 if (arp_lock == 1)
1264 {
1265 entry->next = arp_tables[hash];
1266 arp_tables[hash] = entry;
1267 }
1268 else
1269 {
1270 #if RT_CACHE_DEBUG >= 2
1271 printk("arp_rcv: %08x backlogged\n", entry->ip);
1272 #endif
1273 arp_enqueue(&arp_backlog, entry);
1274 arp_bh_mask |= ARP_BH_BACKLOG;
1275 }
1276 }
1277
1278
1279
1280
1281
1282 end:
1283 kfree_skb(skb, FREE_READ);
1284 arp_unlock();
1285 return 0;
1286 }
1287
1288
1289
1290
1291
1292
1293
1294
1295 static struct arp_table *arp_lookup(u32 paddr, unsigned short flags, struct device * dev)
1296 {
1297 struct arp_table *entry;
1298
1299 if (!(flags & ATF_PUBL))
1300 {
1301 for (entry = arp_tables[HASH(paddr)];
1302 entry != NULL; entry = entry->next)
1303 if (entry->ip == paddr && (!dev || entry->dev == dev))
1304 break;
1305 return entry;
1306 }
1307
1308 if (!(flags & ATF_NETMASK))
1309 {
1310 for (entry = arp_proxy_list;
1311 entry != NULL; entry = entry->next)
1312 if (entry->ip == paddr && (!dev || entry->dev == dev))
1313 break;
1314 return entry;
1315 }
1316
1317 for (entry=arp_proxy_list; entry != NULL; entry = entry->next)
1318 if (!((entry->ip^paddr)&entry->mask) &&
1319 (!dev || entry->dev == dev))
1320 break;
1321 return entry;
1322 }
1323
1324
1325
1326
1327
1328 int arp_query(unsigned char *haddr, u32 paddr, struct device * dev)
1329 {
1330 struct arp_table *entry;
1331
1332 arp_fast_lock();
1333
1334 entry = arp_lookup(paddr, 0, dev);
1335 if (entry == NULL)
1336 entry = arpd_lookup(paddr, 0, dev, __LINE__);
1337
1338 if (entry != NULL)
1339 {
1340 entry->last_used = jiffies;
1341 if (entry->flags & ATF_COM)
1342 {
1343 memcpy(haddr, entry->ha, dev->addr_len);
1344 arpd_update(entry, __LINE__);
1345 arp_unlock();
1346 return 1;
1347 }
1348 }
1349 arpd_update(entry, __LINE__);
1350 arp_unlock();
1351 return 0;
1352 }
1353
1354
1355 static int arp_set_predefined(int addr_hint, unsigned char * haddr, __u32 paddr, struct device * dev)
1356 {
1357 switch (addr_hint)
1358 {
1359 case IS_MYADDR:
1360 printk("ARP: arp called for own IP address\n");
1361 memcpy(haddr, dev->dev_addr, dev->addr_len);
1362 return 1;
1363 #ifdef CONFIG_IP_MULTICAST
1364 case IS_MULTICAST:
1365 if(dev->type==ARPHRD_ETHER || dev->type==ARPHRD_IEEE802)
1366 {
1367 u32 taddr;
1368 haddr[0]=0x01;
1369 haddr[1]=0x00;
1370 haddr[2]=0x5e;
1371 taddr=ntohl(paddr);
1372 haddr[5]=taddr&0xff;
1373 taddr=taddr>>8;
1374 haddr[4]=taddr&0xff;
1375 taddr=taddr>>8;
1376 haddr[3]=taddr&0x7f;
1377 return 1;
1378 }
1379
1380
1381
1382 #endif
1383
1384 case IS_BROADCAST:
1385 memcpy(haddr, dev->broadcast, dev->addr_len);
1386 return 1;
1387 }
1388 return 0;
1389 }
1390
1391
1392
1393
1394
1395 int arp_find(unsigned char *haddr, u32 paddr, struct device *dev,
1396 u32 saddr, struct sk_buff *skb)
1397 {
1398 struct arp_table *entry;
1399 unsigned long hash;
1400
1401 if (arp_set_predefined(ip_chk_addr(paddr), haddr, paddr, dev))
1402 {
1403 if (skb)
1404 skb->arp = 1;
1405 return 0;
1406 }
1407
1408 hash = HASH(paddr);
1409 arp_fast_lock();
1410
1411
1412
1413
1414 entry = arp_lookup(paddr, 0, dev);
1415 if (entry == NULL)
1416 entry = arpd_lookup(paddr, 0, dev, __LINE__);
1417
1418 if (entry != NULL)
1419 {
1420 if (!(entry->flags & ATF_COM))
1421 {
1422
1423
1424
1425
1426
1427 if (skb != NULL)
1428 {
1429 if (entry->last_updated)
1430 {
1431 skb_queue_tail(&entry->skb, skb);
1432 skb_device_unlock(skb);
1433 }
1434
1435
1436
1437
1438 else
1439 {
1440 #if 0
1441
1442
1443
1444
1445 if (skb->sk)
1446 {
1447 skb->sk->err = EHOSTDOWN;
1448 skb->sk->error_report(skb->sk);
1449 }
1450 #else
1451 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0, dev);
1452 #endif
1453 dev_kfree_skb(skb, FREE_WRITE);
1454 }
1455 }
1456 arp_unlock();
1457 return 1;
1458 }
1459
1460
1461
1462
1463
1464 entry->last_used = jiffies;
1465 memcpy(haddr, entry->ha, dev->addr_len);
1466 arpd_update(entry, __LINE__);
1467 if (skb)
1468 skb->arp = 1;
1469 arp_unlock();
1470 return 0;
1471 }
1472
1473
1474
1475
1476
1477 entry = arp_add_entry();
1478 if (entry != NULL)
1479 {
1480 entry->last_updated = entry->last_used = jiffies;
1481 entry->flags = 0;
1482 entry->ip = paddr;
1483 entry->mask = DEF_ARP_NETMASK;
1484 memset(entry->ha, 0, dev->addr_len);
1485 entry->dev = dev;
1486 entry->hh = NULL;
1487 arpd_update(entry, __LINE__);
1488 init_timer(&entry->timer);
1489 entry->timer.function = arp_expire_request;
1490 entry->timer.data = (unsigned long)entry;
1491 entry->timer.expires = jiffies + ARP_RES_TIME;
1492 skb_queue_head_init(&entry->skb);
1493 if (skb != NULL)
1494 {
1495 skb_queue_tail(&entry->skb, skb);
1496 skb_device_unlock(skb);
1497 }
1498 if (arp_lock == 1)
1499 {
1500 entry->next = arp_tables[hash];
1501 arp_tables[hash] = entry;
1502 add_timer(&entry->timer);
1503 entry->retries = ARP_MAX_TRIES;
1504 }
1505 else
1506 {
1507 #if RT_CACHE_DEBUG >= 2
1508 printk("arp_find: %08x backlogged\n", entry->ip);
1509 #endif
1510 arp_enqueue(&arp_backlog, entry);
1511 arp_bh_mask |= ARP_BH_BACKLOG;
1512 }
1513 }
1514 else if (skb != NULL)
1515 dev_kfree_skb(skb, FREE_WRITE);
1516 arp_unlock();
1517
1518
1519
1520
1521
1522 arp_send(ARPOP_REQUEST, ETH_P_ARP, paddr, dev, saddr, NULL,
1523 dev->dev_addr, NULL);
1524
1525 return 1;
1526 }
1527
1528
1529
1530
1531
1532
1533 #define HBUFFERLEN 30
1534
1535 int arp_get_info(char *buffer, char **start, off_t offset, int length, int dummy)
1536 {
1537 int len=0;
1538 off_t pos=0;
1539 int size;
1540 struct arp_table *entry;
1541 char hbuffer[HBUFFERLEN];
1542 int i,j,k;
1543 const char hexbuf[] = "0123456789ABCDEF";
1544
1545 size = sprintf(buffer,"IP address HW type Flags HW address Mask Device\n");
1546
1547 pos+=size;
1548 len+=size;
1549
1550 arp_fast_lock();
1551
1552 for(i=0; i<FULL_ARP_TABLE_SIZE; i++)
1553 {
1554 for(entry=arp_tables[i]; entry!=NULL; entry=entry->next)
1555 {
1556
1557
1558
1559 #ifdef CONFIG_AX25
1560 #ifdef CONFIG_NETROM
1561 if (entry->dev->type == ARPHRD_AX25 || entry->dev->type == ARPHRD_NETROM)
1562 strcpy(hbuffer,ax2asc((ax25_address *)entry->ha));
1563 else {
1564 #else
1565 if(entry->dev->type==ARPHRD_AX25)
1566 strcpy(hbuffer,ax2asc((ax25_address *)entry->ha));
1567 else {
1568 #endif
1569 #endif
1570
1571 for(k=0,j=0;k<HBUFFERLEN-3 && j<entry->dev->addr_len;j++)
1572 {
1573 hbuffer[k++]=hexbuf[ (entry->ha[j]>>4)&15 ];
1574 hbuffer[k++]=hexbuf[ entry->ha[j]&15 ];
1575 hbuffer[k++]=':';
1576 }
1577 hbuffer[--k]=0;
1578
1579 #ifdef CONFIG_AX25
1580 }
1581 #endif
1582 size = sprintf(buffer+len,
1583 "%-17s0x%-10x0x%-10x%s",
1584 in_ntoa(entry->ip),
1585 (unsigned int)entry->dev->type,
1586 entry->flags,
1587 hbuffer);
1588 #if RT_CACHE_DEBUG < 2
1589 size += sprintf(buffer+len+size,
1590 " %-17s %s\n",
1591 entry->mask==DEF_ARP_NETMASK ?
1592 "*" : in_ntoa(entry->mask), entry->dev->name);
1593 #else
1594 size += sprintf(buffer+len+size,
1595 " %-17s %s\t%ld\t%1d\n",
1596 entry->mask==DEF_ARP_NETMASK ?
1597 "*" : in_ntoa(entry->mask), entry->dev->name,
1598 entry->hh ? entry->hh->hh_refcnt : -1,
1599 entry->hh ? entry->hh->hh_uptodate : 0);
1600 #endif
1601
1602 len += size;
1603 pos += size;
1604
1605 if (pos <= offset)
1606 len=0;
1607 if (pos >= offset+length)
1608 goto done;
1609 }
1610 }
1611 done:
1612 arp_unlock();
1613
1614 *start = buffer+len-(pos-offset);
1615 len = pos-offset;
1616 if (len>length)
1617 len = length;
1618 return len;
1619 }
1620
1621
1622
1623 int arp_bind_cache(struct hh_cache ** hhp, struct device *dev, unsigned short htype, u32 paddr)
1624 {
1625 struct arp_table *entry;
1626 struct hh_cache *hh = *hhp;
1627 int addr_hint;
1628 unsigned long flags;
1629
1630 if (hh)
1631 return 1;
1632
1633 if ((addr_hint = ip_chk_addr(paddr)) != 0)
1634 {
1635 unsigned char haddr[MAX_ADDR_LEN];
1636 if (hh)
1637 return 1;
1638 hh = kmalloc(sizeof(struct hh_cache), GFP_ATOMIC);
1639 if (!hh)
1640 return 1;
1641 arp_set_predefined(addr_hint, haddr, paddr, dev);
1642 hh->hh_uptodate = 0;
1643 hh->hh_refcnt = 1;
1644 hh->hh_arp = NULL;
1645 hh->hh_next = NULL;
1646 hh->hh_type = htype;
1647 *hhp = hh;
1648 dev->header_cache_update(hh, dev, haddr);
1649 return 0;
1650 }
1651
1652 save_flags(flags);
1653
1654 arp_fast_lock();
1655
1656 entry = arp_lookup(paddr, 0, dev);
1657 if (entry == NULL)
1658 entry = arpd_lookup(paddr, 0, dev, __LINE__);
1659
1660 if (entry)
1661 {
1662 cli();
1663 for (hh = entry->hh; hh; hh=hh->hh_next)
1664 if (hh->hh_type == htype)
1665 break;
1666 if (hh)
1667 {
1668 hh->hh_refcnt++;
1669 *hhp = hh;
1670 restore_flags(flags);
1671 arp_unlock();
1672 return 1;
1673 }
1674 restore_flags(flags);
1675 }
1676
1677 hh = kmalloc(sizeof(struct hh_cache), GFP_ATOMIC);
1678 if (!hh)
1679 {
1680 arp_unlock();
1681 return 1;
1682 }
1683
1684 hh->hh_uptodate = 0;
1685 hh->hh_refcnt = 1;
1686 hh->hh_arp = NULL;
1687 hh->hh_next = NULL;
1688 hh->hh_type = htype;
1689
1690 if (entry)
1691 {
1692 dev->header_cache_update(hh, dev, entry->ha);
1693 *hhp = hh;
1694 cli();
1695 hh->hh_arp = (void*)entry;
1696 entry->hh = hh;
1697 hh->hh_refcnt++;
1698 restore_flags(flags);
1699 entry->last_used = jiffies;
1700 arpd_update(entry, __LINE__);
1701 arp_unlock();
1702 return 0;
1703 }
1704
1705
1706
1707
1708
1709
1710 entry = arp_add_entry();
1711 if (entry == NULL)
1712 {
1713 kfree_s(hh, sizeof(struct hh_cache));
1714 arp_unlock();
1715 return 1;
1716 }
1717
1718 entry->last_updated = entry->last_used = jiffies;
1719 entry->flags = 0;
1720 entry->ip = paddr;
1721 entry->mask = DEF_ARP_NETMASK;
1722 memset(entry->ha, 0, dev->addr_len);
1723 entry->dev = dev;
1724 entry->hh = hh;
1725 arpd_update(entry, __LINE__);
1726 ATOMIC_INCR(&hh->hh_refcnt);
1727 init_timer(&entry->timer);
1728 entry->timer.function = arp_expire_request;
1729 entry->timer.data = (unsigned long)entry;
1730 entry->timer.expires = jiffies + ARP_RES_TIME;
1731 skb_queue_head_init(&entry->skb);
1732
1733 if (arp_lock == 1)
1734 {
1735 unsigned long hash = HASH(paddr);
1736 cli();
1737 entry->next = arp_tables[hash];
1738 arp_tables[hash] = entry;
1739 hh->hh_arp = (void*)entry;
1740 entry->retries = ARP_MAX_TRIES;
1741 restore_flags(flags);
1742
1743 add_timer(&entry->timer);
1744 arp_send(ARPOP_REQUEST, ETH_P_ARP, paddr, dev, dev->pa_addr, NULL, dev->dev_addr, NULL);
1745 }
1746 else
1747 {
1748 #if RT_CACHE_DEBUG >= 1
1749 printk("arp_cache_bind: %08x backlogged\n", entry->ip);
1750 #endif
1751 arp_enqueue(&arp_backlog, entry);
1752 arp_bh_mask |= ARP_BH_BACKLOG;
1753 }
1754 *hhp = hh;
1755 arp_unlock();
1756 return 0;
1757 }
1758
1759 static void arp_run_bh()
1760 {
1761 unsigned long flags;
1762 struct arp_table *entry, *entry1;
1763 struct hh_cache *hh;
1764 __u32 sip;
1765
1766 save_flags(flags);
1767 cli();
1768 if (!arp_lock)
1769 {
1770 arp_fast_lock();
1771
1772 while ((entry = arp_dequeue(&arp_backlog)) != NULL)
1773 {
1774 unsigned long hash;
1775 sti();
1776 sip = entry->ip;
1777 hash = HASH(sip);
1778
1779
1780
1781
1782
1783 for (entry1=arp_tables[hash]; entry1; entry1=entry1->next)
1784 if (entry1->ip==sip && entry1->dev == entry->dev)
1785 break;
1786
1787 if (!entry1)
1788 {
1789 struct device * dev = entry->dev;
1790 cli();
1791 entry->next = arp_tables[hash];
1792 arp_tables[hash] = entry;
1793 for (hh=entry->hh; hh; hh=hh->hh_next)
1794 hh->hh_arp = (void*)entry;
1795 sti();
1796 del_timer(&entry->timer);
1797 entry->timer.expires = jiffies + ARP_RES_TIME;
1798 add_timer(&entry->timer);
1799 entry->retries = ARP_MAX_TRIES;
1800 arp_send(ARPOP_REQUEST, ETH_P_ARP, entry->ip, dev, dev->pa_addr, NULL, dev->dev_addr, NULL);
1801 #if RT_CACHE_DEBUG >= 1
1802 printk("arp_run_bh: %08x reinstalled\n", sip);
1803 #endif
1804 }
1805 else
1806 {
1807 struct sk_buff * skb;
1808 struct hh_cache * next;
1809
1810
1811
1812
1813 cli();
1814 for (hh=entry->hh; hh; hh=next)
1815 {
1816 next = hh->hh_next;
1817 hh->hh_next = entry1->hh;
1818 entry1->hh = hh;
1819 hh->hh_arp = (void*)entry1;
1820 }
1821 entry->hh = NULL;
1822
1823
1824
1825
1826 while ((skb = skb_dequeue(&entry->skb)) != NULL)
1827 {
1828 skb_device_lock(skb);
1829 sti();
1830 skb_queue_tail(&entry1->skb, skb);
1831 skb_device_unlock(skb);
1832 cli();
1833 }
1834 sti();
1835
1836 #if RT_CACHE_DEBUG >= 1
1837 printk("arp_run_bh: entry %08x was born dead\n", entry->ip);
1838 #endif
1839 arp_free_entry(entry);
1840
1841 if (entry1->flags & ATF_COM)
1842 {
1843 arp_update_hhs(entry1);
1844 arp_send_q(entry1);
1845 }
1846 }
1847 cli();
1848 }
1849 arp_bh_mask &= ~ARP_BH_BACKLOG;
1850 arp_unlock();
1851 }
1852 restore_flags(flags);
1853 }
1854
1855
1856
1857
1858
1859 static inline int empty(unsigned char * addr, int len)
1860 {
1861 while (len > 0) {
1862 if (*addr)
1863 return 0;
1864 len--;
1865 addr++;
1866 }
1867 return 1;
1868 }
1869
1870
1871
1872
1873
1874 static int arp_req_set(struct arpreq *r, struct device * dev)
1875 {
1876 struct arp_table *entry;
1877 struct sockaddr_in *si;
1878 struct rtable *rt;
1879 struct device *dev1;
1880 unsigned char *ha;
1881 u32 ip;
1882
1883
1884
1885
1886
1887 si = (struct sockaddr_in *) &r->arp_pa;
1888 ip = si->sin_addr.s_addr;
1889
1890
1891
1892
1893
1894 if (ip_chk_addr(ip) == IS_MYADDR)
1895 dev1 = dev_get("lo");
1896 else {
1897 rt = ip_rt_route(ip, 0);
1898 if (!rt)
1899 return -ENETUNREACH;
1900 dev1 = rt->rt_dev;
1901 ip_rt_put(rt);
1902 }
1903
1904
1905 if (!dev) {
1906 if (dev1->flags&(IFF_LOOPBACK|IFF_NOARP))
1907 return -ENODEV;
1908 dev = dev1;
1909 }
1910
1911
1912 if (r->arp_ha.sa_family != dev->type)
1913 return -EINVAL;
1914
1915 if (((r->arp_flags & ATF_PUBL) && dev == dev1) ||
1916 (!(r->arp_flags & ATF_PUBL) && dev != dev1))
1917 return -EINVAL;
1918
1919 #if RT_CACHE_DEBUG >= 1
1920 if (arp_lock)
1921 printk("arp_req_set: bug\n");
1922 #endif
1923 arp_fast_lock();
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933 entry = arp_lookup(ip, r->arp_flags & ~ATF_NETMASK, dev);
1934 if (entry == NULL)
1935 entry = arpd_lookup(ip, r->arp_flags & ~ATF_NETMASK, dev, __LINE__);
1936
1937 if (entry)
1938 {
1939 arp_destroy(entry);
1940 entry = NULL;
1941 }
1942
1943
1944
1945
1946
1947 if (entry == NULL)
1948 {
1949 entry = arp_add_entry();
1950 if (entry == NULL)
1951 {
1952 arp_unlock();
1953 return -ENOMEM;
1954 }
1955 entry->ip = ip;
1956 entry->hh = NULL;
1957 init_timer(&entry->timer);
1958 entry->timer.function = arp_expire_request;
1959 entry->timer.data = (unsigned long)entry;
1960
1961 if (r->arp_flags & ATF_PUBL)
1962 {
1963 cli();
1964 entry->next = arp_proxy_list;
1965 arp_proxy_list = entry;
1966 sti();
1967 }
1968 else
1969 {
1970 unsigned long hash = HASH(ip);
1971 cli();
1972 entry->next = arp_tables[hash];
1973 arp_tables[hash] = entry;
1974 sti();
1975 }
1976 skb_queue_head_init(&entry->skb);
1977 }
1978
1979
1980
1981 ha = r->arp_ha.sa_data;
1982 if ((r->arp_flags & ATF_COM) && empty(ha, dev->addr_len))
1983 ha = dev->dev_addr;
1984 memcpy(entry->ha, ha, dev->addr_len);
1985 entry->last_updated = entry->last_used = jiffies;
1986 arpd_update(entry, __LINE__);
1987 entry->flags = r->arp_flags | ATF_COM;
1988 if ((entry->flags & ATF_PUBL) && (entry->flags & ATF_NETMASK))
1989 {
1990 si = (struct sockaddr_in *) &r->arp_netmask;
1991 entry->mask = si->sin_addr.s_addr;
1992 }
1993 else
1994 entry->mask = DEF_ARP_NETMASK;
1995 entry->dev = dev;
1996 arp_update_hhs(entry);
1997 arp_unlock();
1998 return 0;
1999 }
2000
2001
2002
2003
2004
2005
2006
2007 static int arp_req_get(struct arpreq *r, struct device *dev)
2008 {
2009 struct arp_table *entry;
2010 struct sockaddr_in *si;
2011
2012 si = (struct sockaddr_in *) &r->arp_pa;
2013
2014 #if RT_CACHE_DEBUG >= 1
2015 if (arp_lock)
2016 printk("arp_req_set: bug\n");
2017 #endif
2018 arp_fast_lock();
2019
2020 entry = arp_lookup(si->sin_addr.s_addr, r->arp_flags|ATF_NETMASK, dev);
2021 if (entry == NULL)
2022 entry = arpd_lookup(si->sin_addr.s_addr,
2023 r->arp_flags|ATF_NETMASK, dev, __LINE__);
2024
2025 if (entry == NULL)
2026 {
2027 arp_unlock();
2028 return -ENXIO;
2029 }
2030
2031
2032
2033
2034
2035 memcpy(r->arp_ha.sa_data, &entry->ha, entry->dev->addr_len);
2036 r->arp_ha.sa_family = entry->dev->type;
2037 r->arp_flags = entry->flags;
2038 strncpy(r->arp_dev, entry->dev->name, 16);
2039 arp_unlock();
2040 return 0;
2041 }
2042
2043 static int arp_req_delete(struct arpreq *r, struct device * dev)
2044 {
2045 struct arp_table *entry;
2046 struct sockaddr_in *si;
2047
2048 si = (struct sockaddr_in *) &r->arp_pa;
2049 #if RT_CACHE_DEBUG >= 1
2050 if (arp_lock)
2051 printk("arp_req_delete: bug\n");
2052 #endif
2053 arp_fast_lock();
2054
2055 if (!(r->arp_flags & ATF_PUBL))
2056 {
2057 for (entry = arp_tables[HASH(si->sin_addr.s_addr)];
2058 entry != NULL; entry = entry->next)
2059 if (entry->ip == si->sin_addr.s_addr
2060 && (!dev || entry->dev == dev))
2061 {
2062 arp_destroy(entry);
2063 arp_unlock();
2064 return 0;
2065 }
2066 }
2067 else
2068 {
2069 for (entry = arp_proxy_list;
2070 entry != NULL; entry = entry->next)
2071 if (entry->ip == si->sin_addr.s_addr
2072 && (!dev || entry->dev == dev))
2073 {
2074 arp_destroy(entry);
2075 arp_unlock();
2076 return 0;
2077 }
2078 }
2079
2080 arp_unlock();
2081 return -ENXIO;
2082 }
2083
2084
2085
2086
2087
2088 int arp_ioctl(unsigned int cmd, void *arg)
2089 {
2090 int err;
2091 struct arpreq r;
2092
2093 struct device * dev = NULL;
2094
2095 switch(cmd)
2096 {
2097 case SIOCDARP:
2098 case SIOCSARP:
2099 if (!suser())
2100 return -EPERM;
2101 case SIOCGARP:
2102 err = verify_area(VERIFY_READ, arg, sizeof(struct arpreq));
2103 if (err)
2104 return err;
2105 memcpy_fromfs(&r, arg, sizeof(struct arpreq));
2106 break;
2107 case OLD_SIOCDARP:
2108 case OLD_SIOCSARP:
2109 if (!suser())
2110 return -EPERM;
2111 case OLD_SIOCGARP:
2112 err = verify_area(VERIFY_READ, arg, sizeof(struct arpreq_old));
2113 if (err)
2114 return err;
2115 memcpy_fromfs(&r, arg, sizeof(struct arpreq_old));
2116 memset(&r.arp_dev, 0, sizeof(r.arp_dev));
2117 break;
2118 default:
2119 return -EINVAL;
2120 }
2121
2122 if (r.arp_pa.sa_family != AF_INET)
2123 return -EPFNOSUPPORT;
2124 if (((struct sockaddr_in *)&r.arp_pa)->sin_addr.s_addr == 0)
2125 return -EINVAL;
2126
2127 if (r.arp_dev[0])
2128 {
2129 if ((dev = dev_get(r.arp_dev)) == NULL)
2130 return -ENODEV;
2131
2132 if (!r.arp_ha.sa_family)
2133 r.arp_ha.sa_family = dev->type;
2134 else if (r.arp_ha.sa_family != dev->type)
2135 return -EINVAL;
2136 }
2137 else
2138 {
2139 if ((r.arp_flags & ATF_PUBL) &&
2140 ((cmd == SIOCSARP) || (cmd == OLD_SIOCSARP))) {
2141 if ((dev = dev_getbytype(r.arp_ha.sa_family)) == NULL)
2142 return -ENODEV;
2143 }
2144 }
2145
2146 switch(cmd)
2147 {
2148 case SIOCDARP:
2149 return arp_req_delete(&r, dev);
2150 case SIOCSARP:
2151 return arp_req_set(&r, dev);
2152 case OLD_SIOCDARP:
2153
2154
2155
2156 r.arp_flags &= ~ATF_PUBL;
2157 err = arp_req_delete(&r, dev);
2158 r.arp_flags |= ATF_PUBL;
2159 if (!err)
2160 arp_req_delete(&r, dev);
2161 else
2162 err = arp_req_delete(&r, dev);
2163 return err;
2164 case OLD_SIOCSARP:
2165 err = arp_req_set(&r, dev);
2166
2167
2168
2169
2170
2171 if (r.arp_flags & ATF_PUBL)
2172 {
2173 r.arp_flags &= ~ATF_PUBL;
2174 arp_req_delete(&r, dev);
2175 }
2176 return err;
2177 case SIOCGARP:
2178 err = verify_area(VERIFY_WRITE, arg, sizeof(struct arpreq));
2179 if (err)
2180 return err;
2181 err = arp_req_get(&r, dev);
2182 if (!err)
2183 memcpy_tofs(arg, &r, sizeof(r));
2184 return err;
2185 case OLD_SIOCGARP:
2186 err = verify_area(VERIFY_WRITE, arg, sizeof(struct arpreq_old));
2187 if (err)
2188 return err;
2189 r.arp_flags &= ~ATF_PUBL;
2190 err = arp_req_get(&r, dev);
2191 if (err < 0)
2192 {
2193 r.arp_flags |= ATF_PUBL;
2194 err = arp_req_get(&r, dev);
2195 }
2196 if (!err)
2197 memcpy_tofs(arg, &r, sizeof(struct arpreq_old));
2198 return err;
2199 }
2200
2201 return 0;
2202 }
2203
2204
2205
2206
2207
2208
2209 static struct packet_type arp_packet_type =
2210 {
2211 0,
2212 NULL,
2213 arp_rcv,
2214 NULL,
2215 NULL
2216 };
2217
2218 static struct notifier_block arp_dev_notifier={
2219 arp_device_event,
2220 NULL,
2221 0
2222 };
2223
2224 void arp_init (void)
2225 {
2226
2227 arp_packet_type.type=htons(ETH_P_ARP);
2228 dev_add_pack(&arp_packet_type);
2229
2230 add_timer(&arp_timer);
2231
2232 register_netdevice_notifier(&arp_dev_notifier);
2233
2234 #ifdef CONFIG_PROC_FS
2235 proc_net_register(&(struct proc_dir_entry) {
2236 PROC_NET_ARP, 3, "arp",
2237 S_IFREG | S_IRUGO, 1, 0, 0,
2238 0, &proc_net_inode_operations,
2239 arp_get_info
2240 });
2241 #endif
2242 }
2243