This source file includes following definitions.
- arp_fast_lock
- arp_fast_unlock
- arp_unlock
- arp_enqueue
- arp_dequeue
- arp_release_entry
- arp_free_entry
- arp_count_hhs
- arp_invalidate_hhs
- arp_update_hhs
- arp_check_expire
- arp_expire_request
- arp_device_event
- arp_send
- arp_send_q
- arp_destroy
- arp_rcv
- arp_lookup
- arp_query
- arp_set_predefined
- arp_find
- arp_get_info
- arp_bind_cache
- arp_run_bh
- empty
- arp_req_set
- arp_req_get
- arp_req_delete
- arp_ioctl
- arp_init
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68 #include <linux/types.h>
69 #include <linux/string.h>
70 #include <linux/kernel.h>
71 #include <linux/sched.h>
72 #include <linux/config.h>
73 #include <linux/socket.h>
74 #include <linux/sockios.h>
75 #include <linux/errno.h>
76 #include <linux/if_arp.h>
77 #include <linux/in.h>
78 #include <linux/mm.h>
79 #include <linux/inet.h>
80 #include <linux/netdevice.h>
81 #include <linux/etherdevice.h>
82 #include <linux/trdevice.h>
83 #include <linux/skbuff.h>
84 #include <linux/proc_fs.h>
85 #include <linux/stat.h>
86
87 #include <net/ip.h>
88 #include <net/icmp.h>
89 #include <net/route.h>
90 #include <net/protocol.h>
91 #include <net/tcp.h>
92 #include <net/sock.h>
93 #include <net/arp.h>
94 #ifdef CONFIG_AX25
95 #include <net/ax25.h>
96 #ifdef CONFIG_NETROM
97 #include <net/netrom.h>
98 #endif
99 #endif
100 #ifdef CONFIG_NET_ALIAS
101 #include <linux/net_alias.h>
102 #endif
103
104 #include <asm/system.h>
105 #include <asm/segment.h>
106
107 #include <stdarg.h>
108
109
110
111
112
113
114
115
116 struct arp_table
117 {
118 struct arp_table *next;
119 unsigned long last_used;
120 unsigned long last_updated;
121 unsigned int flags;
122 u32 ip;
123 u32 mask;
124 unsigned char ha[MAX_ADDR_LEN];
125 struct device *dev;
126
127
128
129
130
131 struct timer_list timer;
132 int retries;
133 struct sk_buff_head skb;
134 struct hh_cache *hh;
135 };
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151 #define ARP_RES_TIME (5*HZ)
152 #define ARP_DEAD_RES_TIME (60*HZ)
153
154
155
156
157
158
159 #define ARP_MAX_TRIES 3
160
161
162
163
164
165 #define ARP_TIMEOUT (600*HZ)
166
167
168
169
170
171
172
173 #define ARP_CHECK_INTERVAL (60*HZ)
174
175
176
177
178
179
180
181 #define ARP_CONFIRM_INTERVAL (300*HZ)
182 #define ARP_CONFIRM_TIMEOUT ARP_RES_TIME
183
184 static unsigned int arp_lock;
185 static unsigned int arp_bh_mask;
186
187 #define ARP_BH_BACKLOG 1
188
189 static struct arp_table *arp_backlog;
190
191 static void arp_run_bh(void);
192 static void arp_check_expire (unsigned long);
193
194 static struct timer_list arp_timer =
195 { NULL, NULL, ARP_CHECK_INTERVAL, 0L, &arp_check_expire };
196
197
198
199
200
201
202 #define DEF_ARP_NETMASK (~0)
203
204
205
206
207
208
209
210 #define ARP_TABLE_SIZE 16
211 #define FULL_ARP_TABLE_SIZE (ARP_TABLE_SIZE+1)
212
213 struct arp_table *arp_tables[FULL_ARP_TABLE_SIZE] =
214 {
215 NULL,
216 };
217
218 #define arp_proxy_list arp_tables[ARP_TABLE_SIZE]
219
220
221
222
223
224
225 #define HASH(paddr) (htonl(paddr) & (ARP_TABLE_SIZE - 1))
226
227
228
229
230
231 static __inline__ void arp_fast_lock(void)
232 {
233 ATOMIC_INCR(&arp_lock);
234 }
235
236 static __inline__ void arp_fast_unlock(void)
237 {
238 ATOMIC_DECR(&arp_lock);
239 }
240
241 static __inline__ void arp_unlock(void)
242 {
243 if (!ATOMIC_DECR_AND_CHECK(&arp_lock) && arp_bh_mask)
244 arp_run_bh();
245 }
246
247
248
249
250
251 static void arp_enqueue(struct arp_table **q, struct arp_table *entry)
252 {
253 unsigned long flags;
254 struct arp_table * tail;
255
256 save_flags(flags);
257 cli();
258 tail = *q;
259 if (!tail)
260 entry->next = entry;
261 else
262 {
263 entry->next = tail->next;
264 tail->next = entry;
265 }
266 *q = entry;
267 restore_flags(flags);
268 return;
269 }
270
271
272
273
274
275
276 static struct arp_table * arp_dequeue(struct arp_table **q)
277 {
278 struct arp_table * entry;
279
280 if (*q)
281 {
282 entry = (*q)->next;
283 (*q)->next = entry->next;
284 if (entry->next == entry)
285 *q = NULL;
286 entry->next = NULL;
287 return entry;
288 }
289 return NULL;
290 }
291
292
293
294
295
296 static void arp_release_entry(struct arp_table *entry)
297 {
298 struct sk_buff *skb;
299 unsigned long flags;
300
301 save_flags(flags);
302 cli();
303
304 while ((skb = skb_dequeue(&entry->skb)) != NULL)
305 {
306 skb_device_lock(skb);
307 restore_flags(flags);
308 dev_kfree_skb(skb, FREE_WRITE);
309 cli();
310 }
311 restore_flags(flags);
312 return;
313 }
314
315
316
317
318
319
320 static void arp_free_entry(struct arp_table *entry)
321 {
322 unsigned long flags;
323 struct hh_cache *hh, *next;
324
325 del_timer(&entry->timer);
326
327 save_flags(flags);
328 cli();
329 arp_release_entry(entry);
330
331 for (hh = entry->hh; hh; hh = next)
332 {
333 next = hh->hh_next;
334 hh->hh_arp = NULL;
335 hh->hh_uptodate = 0;
336 if (!--hh->hh_refcnt)
337 kfree_s(hh, sizeof(struct(struct hh_cache)));
338 }
339 restore_flags(flags);
340
341 kfree_s(entry, sizeof(struct arp_table));
342 return;
343 }
344
345
346
347
348
349 static __inline__ int arp_count_hhs(struct arp_table * entry)
350 {
351 struct hh_cache *hh, **hhp;
352 int count = 0;
353
354 hhp = &entry->hh;
355 while ((hh=*hhp) != NULL)
356 {
357 if (hh->hh_refcnt == 1)
358 {
359 *hhp = hh->hh_next;
360 kfree_s(hh, sizeof(struct hh_cache));
361 continue;
362 }
363 count += hh->hh_refcnt-1;
364 hhp = &hh->hh_next;
365 }
366
367 return count;
368 }
369
370
371
372
373
374 static __inline__ void arp_invalidate_hhs(struct arp_table * entry)
375 {
376 struct hh_cache *hh;
377
378 for (hh=entry->hh; hh; hh=hh->hh_next)
379 hh->hh_uptodate = 0;
380 }
381
382
383
384
385
386 static __inline__ void arp_update_hhs(struct arp_table * entry)
387 {
388 struct hh_cache *hh;
389
390 for (hh=entry->hh; hh; hh=hh->hh_next)
391 entry->dev->header_cache_update(hh, entry->dev, entry->ha);
392 }
393
394
395
396
397
398
399
400
401
402
403 static void arp_check_expire(unsigned long dummy)
404 {
405 int i;
406 unsigned long now = jiffies;
407
408 del_timer(&arp_timer);
409
410 if (!arp_lock)
411 {
412 arp_fast_lock();
413
414 for (i = 0; i < ARP_TABLE_SIZE; i++)
415 {
416 struct arp_table *entry;
417 struct arp_table **pentry;
418
419 pentry = &arp_tables[i];
420
421 while ((entry = *pentry) != NULL)
422 {
423 cli();
424 if (now - entry->last_used > ARP_TIMEOUT
425 && !(entry->flags & ATF_PERM)
426 && !arp_count_hhs(entry))
427 {
428 *pentry = entry->next;
429 sti();
430 #if RT_CACHE_DEBUG >= 2
431 printk("arp_expire: %08x expired\n", entry->ip);
432 #endif
433 arp_free_entry(entry);
434 }
435 else if (entry->last_updated
436 && now - entry->last_updated > ARP_CONFIRM_INTERVAL
437 && !(entry->flags & ATF_PERM))
438 {
439 struct device * dev = entry->dev;
440 pentry = &entry->next;
441 entry->flags &= ~ATF_COM;
442 arp_invalidate_hhs(entry);
443 sti();
444 entry->retries = ARP_MAX_TRIES+1;
445 del_timer(&entry->timer);
446 entry->timer.expires = jiffies + ARP_CONFIRM_TIMEOUT;
447 add_timer(&entry->timer);
448 arp_send(ARPOP_REQUEST, ETH_P_ARP, entry->ip,
449 dev, dev->pa_addr, entry->ha,
450 dev->dev_addr, NULL);
451 #if RT_CACHE_DEBUG >= 2
452 printk("arp_expire: %08x requires confirmation\n", entry->ip);
453 #endif
454 }
455 else
456 pentry = &entry->next;
457 }
458 }
459 arp_unlock();
460 }
461
462 ip_rt_check_expire();
463
464
465
466
467
468 arp_timer.expires = jiffies + ARP_CHECK_INTERVAL;
469 add_timer(&arp_timer);
470 }
471
472
473
474
475
476
477
478 static void arp_expire_request (unsigned long arg)
479 {
480 struct arp_table *entry = (struct arp_table *) arg;
481 struct arp_table **pentry;
482 unsigned long hash;
483 unsigned long flags;
484
485 save_flags(flags);
486 cli();
487
488
489
490
491
492
493
494 if (entry->flags & ATF_COM)
495 {
496 restore_flags(flags);
497 return;
498 }
499
500 if (arp_lock)
501 {
502 #if RT_CACHE_DEBUG >= 1
503 printk("arp_expire_request: %08x postponed\n", entry->ip);
504 #endif
505 del_timer(&entry->timer);
506 entry->timer.expires = jiffies + HZ/10;
507 add_timer(&entry->timer);
508 restore_flags(flags);
509 return;
510 }
511
512 arp_fast_lock();
513 restore_flags(flags);
514
515 if (entry->last_updated && --entry->retries > 0)
516 {
517 struct device *dev = entry->dev;
518
519 #if RT_CACHE_DEBUG >= 2
520 printk("arp_expire_request: %08x timed out\n", entry->ip);
521 #endif
522
523 del_timer(&entry->timer);
524 entry->timer.expires = jiffies + ARP_RES_TIME;
525 add_timer(&entry->timer);
526 arp_send(ARPOP_REQUEST, ETH_P_ARP, entry->ip, dev, dev->pa_addr,
527 NULL, dev->dev_addr, NULL);
528 arp_unlock();
529 return;
530 }
531
532 arp_release_entry(entry);
533
534 cli();
535 if (arp_count_hhs(entry))
536 {
537 struct device *dev = entry->dev;
538 #if RT_CACHE_DEBUG >= 2
539 printk("arp_expire_request: %08x is dead\n", entry->ip);
540 #endif
541 arp_release_entry(entry);
542 entry->retries = ARP_MAX_TRIES;
543 restore_flags(flags);
544 entry->last_updated = 0;
545 del_timer(&entry->timer);
546 entry->timer.expires = jiffies + ARP_DEAD_RES_TIME;
547 add_timer(&entry->timer);
548 arp_send(ARPOP_REQUEST, ETH_P_ARP, entry->ip, dev, dev->pa_addr,
549 NULL, dev->dev_addr, NULL);
550 arp_unlock();
551 return;
552 }
553 restore_flags(flags);
554
555 hash = HASH(entry->ip);
556
557 pentry = &arp_tables[hash];
558
559 while (*pentry != NULL)
560 {
561 if (*pentry == entry)
562 {
563 cli();
564 *pentry = entry->next;
565 restore_flags(flags);
566 #if RT_CACHE_DEBUG >= 2
567 printk("arp_expire_request: %08x is killed\n", entry->ip);
568 #endif
569 arp_free_entry(entry);
570 arp_unlock();
571 return;
572 }
573 pentry = &(*pentry)->next;
574 }
575 printk("arp_expire_request: bug: ARP entry is lost!\n");
576 arp_unlock();
577 }
578
579
580
581
582
583 int arp_device_event(struct notifier_block *this, unsigned long event, void *ptr)
584 {
585 struct device *dev=ptr;
586 int i;
587
588 if (event != NETDEV_DOWN)
589 return NOTIFY_DONE;
590
591
592
593
594 #if RT_CACHE_DEBUG >= 1
595 if (arp_lock)
596 printk("arp_device_event: bug\n");
597 #endif
598 arp_fast_lock();
599
600 for (i = 0; i < FULL_ARP_TABLE_SIZE; i++)
601 {
602 struct arp_table *entry;
603 struct arp_table **pentry = &arp_tables[i];
604
605 while ((entry = *pentry) != NULL)
606 {
607 if (entry->dev == dev)
608 {
609 *pentry = entry->next;
610 arp_free_entry(entry);
611 }
612 else
613 pentry = &entry->next;
614 }
615 }
616 arp_unlock();
617 return NOTIFY_DONE;
618 }
619
620
621
622
623
624
625
626 void arp_send(int type, int ptype, u32 dest_ip,
627 struct device *dev, u32 src_ip,
628 unsigned char *dest_hw, unsigned char *src_hw,
629 unsigned char *target_hw)
630 {
631 struct sk_buff *skb;
632 struct arphdr *arp;
633 unsigned char *arp_ptr;
634
635
636
637
638
639 if (dev->flags&IFF_NOARP)
640 return;
641
642
643
644
645
646 skb = alloc_skb(sizeof(struct arphdr)+ 2*(dev->addr_len+4)
647 + dev->hard_header_len, GFP_ATOMIC);
648 if (skb == NULL)
649 {
650 printk("ARP: no memory to send an arp packet\n");
651 return;
652 }
653 skb_reserve(skb, dev->hard_header_len);
654 arp = (struct arphdr *) skb_put(skb,sizeof(struct arphdr) + 2*(dev->addr_len+4));
655 skb->arp = 1;
656 skb->dev = dev;
657 skb->free = 1;
658 skb->protocol = htons (ETH_P_IP);
659
660
661
662
663
664 dev->hard_header(skb,dev,ptype,dest_hw?dest_hw:dev->broadcast,src_hw?src_hw:NULL,skb->len);
665
666
667 arp->ar_hrd = htons(dev->type);
668 #ifdef CONFIG_AX25
669 #ifdef CONFIG_NETROM
670 arp->ar_pro = (dev->type == ARPHRD_AX25 || dev->type == ARPHRD_NETROM) ? htons(AX25_P_IP) : htons(ETH_P_IP);
671 #else
672 arp->ar_pro = (dev->type != ARPHRD_AX25) ? htons(ETH_P_IP) : htons(AX25_P_IP);
673 #endif
674 #else
675 arp->ar_pro = htons(ETH_P_IP);
676 #endif
677 arp->ar_hln = dev->addr_len;
678 arp->ar_pln = 4;
679 arp->ar_op = htons(type);
680
681 arp_ptr=(unsigned char *)(arp+1);
682
683 memcpy(arp_ptr, src_hw, dev->addr_len);
684 arp_ptr+=dev->addr_len;
685 memcpy(arp_ptr, &src_ip,4);
686 arp_ptr+=4;
687 if (target_hw != NULL)
688 memcpy(arp_ptr, target_hw, dev->addr_len);
689 else
690 memset(arp_ptr, 0, dev->addr_len);
691 arp_ptr+=dev->addr_len;
692 memcpy(arp_ptr, &dest_ip, 4);
693
694 dev_queue_xmit(skb, dev, 0);
695 }
696
697
698
699
700
701 static void arp_send_q(struct arp_table *entry)
702 {
703 struct sk_buff *skb;
704
705 unsigned long flags;
706
707
708
709
710
711 if(!(entry->flags&ATF_COM))
712 {
713 printk("arp_send_q: incomplete entry for %s\n",
714 in_ntoa(entry->ip));
715
716
717
718
719 return;
720 }
721
722 save_flags(flags);
723
724 cli();
725 while((skb = skb_dequeue(&entry->skb)) != NULL)
726 {
727 IS_SKB(skb);
728 skb_device_lock(skb);
729 restore_flags(flags);
730 if(!skb->dev->rebuild_header(skb->data,skb->dev,skb->raddr,skb))
731 {
732 skb->arp = 1;
733 if(skb->sk==NULL)
734 dev_queue_xmit(skb, skb->dev, 0);
735 else
736 dev_queue_xmit(skb,skb->dev,skb->sk->priority);
737 }
738 }
739 restore_flags(flags);
740 }
741
742
743
744
745
746
747 static void arp_destroy(struct arp_table * entry)
748 {
749 struct arp_table *entry1;
750 struct arp_table **pentry;
751
752 if (entry->flags & ATF_PUBL)
753 pentry = &arp_proxy_list;
754 else
755 pentry = &arp_tables[HASH(entry->ip)];
756
757 while ((entry1 = *pentry) != NULL)
758 {
759 if (entry1 == entry)
760 {
761 *pentry = entry1->next;
762 del_timer(&entry->timer);
763 arp_free_entry(entry);
764 return;
765 }
766 pentry = &entry1->next;
767 }
768 }
769
770
771
772
773
774
775
776 int arp_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *pt)
777 {
778
779
780
781
782 struct arphdr *arp = (struct arphdr *)skb->h.raw;
783 unsigned char *arp_ptr= (unsigned char *)(arp+1);
784 struct arp_table *entry;
785 struct arp_table *proxy_entry;
786 unsigned long hash;
787 unsigned char ha[MAX_ADDR_LEN];
788 unsigned char *sha,*tha;
789 u32 sip,tip;
790
791
792
793
794
795
796
797
798 if (arp->ar_hln != dev->addr_len ||
799 dev->type != ntohs(arp->ar_hrd) ||
800 dev->flags & IFF_NOARP ||
801 arp->ar_pln != 4)
802 {
803 kfree_skb(skb, FREE_READ);
804 return 0;
805
806
807 }
808
809
810
811
812
813
814
815
816
817 switch (dev->type)
818 {
819 #ifdef CONFIG_AX25
820 case ARPHRD_AX25:
821 if(arp->ar_pro != htons(AX25_P_IP))
822 {
823 kfree_skb(skb, FREE_READ);
824 return 0;
825 }
826 break;
827 #endif
828 #ifdef CONFIG_NETROM
829 case ARPHRD_NETROM:
830 if(arp->ar_pro != htons(AX25_P_IP))
831 {
832 kfree_skb(skb, FREE_READ);
833 return 0;
834 }
835 break;
836 #endif
837 case ARPHRD_ETHER:
838 case ARPHRD_ARCNET:
839 if(arp->ar_pro != htons(ETH_P_IP))
840 {
841 kfree_skb(skb, FREE_READ);
842 return 0;
843 }
844 break;
845
846 case ARPHRD_IEEE802:
847 if(arp->ar_pro != htons(ETH_P_IP))
848 {
849 kfree_skb(skb, FREE_READ);
850 return 0;
851 }
852 break;
853
854 default:
855 printk("ARP: dev->type mangled!\n");
856 kfree_skb(skb, FREE_READ);
857 return 0;
858 }
859
860
861
862
863
864 sha=arp_ptr;
865 arp_ptr += dev->addr_len;
866 memcpy(&sip, arp_ptr, 4);
867 arp_ptr += 4;
868 tha=arp_ptr;
869 arp_ptr += dev->addr_len;
870 memcpy(&tip, arp_ptr, 4);
871
872
873
874
875
876 if (LOOPBACK(tip) || MULTICAST(tip))
877 {
878 kfree_skb(skb, FREE_READ);
879 return 0;
880 }
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903 #ifdef CONFIG_NET_ALIAS
904 if (tip != dev->pa_addr && net_alias_has(skb->dev))
905 {
906
907
908
909 dev = net_alias_dev_rcv_sel32(dev, AF_INET, sip, tip);
910
911 if (dev->type != ntohs(arp->ar_hrd) || dev->flags & IFF_NOARP)
912 {
913 kfree_skb(skb, FREE_READ);
914 return 0;
915 }
916 }
917 #endif
918
919 if (arp->ar_op == htons(ARPOP_REQUEST))
920 {
921
922
923
924 if (tip != dev->pa_addr)
925 {
926
927
928
929
930
931 arp_fast_lock();
932
933 for (proxy_entry=arp_proxy_list;
934 proxy_entry;
935 proxy_entry = proxy_entry->next)
936 {
937
938
939
940
941
942
943
944
945 if (proxy_entry->dev == dev &&
946 !((proxy_entry->ip^tip)&proxy_entry->mask))
947 break;
948
949 }
950 if (proxy_entry)
951 {
952 memcpy(ha, proxy_entry->ha, dev->addr_len);
953 arp_unlock();
954 arp_send(ARPOP_REPLY,ETH_P_ARP,sip,dev,tip,sha,ha, sha);
955 kfree_skb(skb, FREE_READ);
956 return 0;
957 }
958 else
959 {
960 arp_unlock();
961 kfree_skb(skb, FREE_READ);
962 return 0;
963 }
964 }
965 else
966 {
967
968
969
970 arp_send(ARPOP_REPLY,ETH_P_ARP,sip,dev,tip,sha,dev->dev_addr,sha);
971 }
972 }
973
974
975
976 if(ip_chk_addr(tip)!=IS_MYADDR)
977 {
978
979
980
981 kfree_skb(skb, FREE_READ);
982 return 0;
983 }
984
985
986
987
988
989
990 arp_fast_lock();
991
992 hash = HASH(sip);
993
994 for (entry=arp_tables[hash]; entry; entry=entry->next)
995 if (entry->ip == sip && entry->dev == dev)
996 break;
997
998 if (entry)
999 {
1000
1001
1002
1003 if (!(entry->flags & ATF_PERM)) {
1004 memcpy(entry->ha, sha, dev->addr_len);
1005 entry->last_updated = jiffies;
1006 }
1007 if (!(entry->flags & ATF_COM))
1008 {
1009
1010
1011
1012
1013 del_timer(&entry->timer);
1014 entry->flags |= ATF_COM;
1015 arp_update_hhs(entry);
1016
1017
1018
1019
1020
1021 arp_send_q(entry);
1022 }
1023 }
1024 else
1025 {
1026
1027
1028
1029 entry = (struct arp_table *)kmalloc(sizeof(struct arp_table),GFP_ATOMIC);
1030 if(entry == NULL)
1031 {
1032 arp_unlock();
1033 printk("ARP: no memory for new arp entry\n");
1034 kfree_skb(skb, FREE_READ);
1035 return 0;
1036 }
1037
1038 entry->mask = DEF_ARP_NETMASK;
1039 entry->ip = sip;
1040 entry->flags = ATF_COM;
1041 entry->hh = NULL;
1042 init_timer(&entry->timer);
1043 entry->timer.function = arp_expire_request;
1044 entry->timer.data = (unsigned long)entry;
1045 memcpy(entry->ha, sha, dev->addr_len);
1046 entry->last_updated = entry->last_used = jiffies;
1047
1048
1049
1050
1051 #ifdef CONFIG_NET_ALIAS
1052 entry->dev = dev;
1053 #else
1054 entry->dev = skb->dev;
1055 #endif
1056 skb_queue_head_init(&entry->skb);
1057 if (arp_lock == 1)
1058 {
1059 entry->next = arp_tables[hash];
1060 arp_tables[hash] = entry;
1061 }
1062 else
1063 {
1064 #if RT_CACHE_DEBUG >= 1
1065 printk("arp_rcv: %08x backlogged\n", entry->ip);
1066 #endif
1067 arp_enqueue(&arp_backlog, entry);
1068 arp_bh_mask |= ARP_BH_BACKLOG;
1069 }
1070 }
1071
1072
1073
1074
1075 kfree_skb(skb, FREE_READ);
1076 arp_unlock();
1077 return 0;
1078 }
1079
1080
1081
1082
1083
1084
1085
1086
1087 static struct arp_table *arp_lookup(u32 paddr, unsigned short flags, struct device * dev)
1088 {
1089 struct arp_table *entry;
1090
1091 if (!(flags & ATF_PUBL))
1092 {
1093 for (entry = arp_tables[HASH(paddr)];
1094 entry != NULL; entry = entry->next)
1095 if (entry->ip == paddr && (!dev || entry->dev == dev))
1096 break;
1097 return entry;
1098 }
1099
1100 if (!(flags & ATF_NETMASK))
1101 {
1102 for (entry = arp_proxy_list;
1103 entry != NULL; entry = entry->next)
1104 if (entry->ip == paddr && (!dev || entry->dev == dev))
1105 break;
1106 return entry;
1107 }
1108
1109 for (entry=arp_proxy_list; entry != NULL; entry = entry->next)
1110 if (!((entry->ip^paddr)&entry->mask) &&
1111 (!dev || entry->dev == dev))
1112 break;
1113 return entry;
1114 }
1115
1116
1117
1118
1119
1120 int arp_query(unsigned char *haddr, u32 paddr, struct device * dev)
1121 {
1122 struct arp_table *entry;
1123
1124 arp_fast_lock();
1125
1126 entry = arp_lookup(paddr, 0, dev);
1127
1128 if (entry != NULL)
1129 {
1130 entry->last_used = jiffies;
1131 if (entry->flags & ATF_COM)
1132 {
1133 memcpy(haddr, entry->ha, dev->addr_len);
1134 arp_unlock();
1135 return 1;
1136 }
1137 }
1138 arp_unlock();
1139 return 0;
1140 }
1141
1142
1143 static int arp_set_predefined(int addr_hint, unsigned char * haddr, __u32 paddr, struct device * dev)
1144 {
1145 switch (addr_hint)
1146 {
1147 case IS_MYADDR:
1148 printk("ARP: arp called for own IP address\n");
1149 memcpy(haddr, dev->dev_addr, dev->addr_len);
1150 return 1;
1151 #ifdef CONFIG_IP_MULTICAST
1152 case IS_MULTICAST:
1153 if(dev->type==ARPHRD_ETHER || dev->type==ARPHRD_IEEE802)
1154 {
1155 u32 taddr;
1156 haddr[0]=0x01;
1157 haddr[1]=0x00;
1158 haddr[2]=0x5e;
1159 taddr=ntohl(paddr);
1160 haddr[5]=taddr&0xff;
1161 taddr=taddr>>8;
1162 haddr[4]=taddr&0xff;
1163 taddr=taddr>>8;
1164 haddr[3]=taddr&0x7f;
1165 return 1;
1166 }
1167
1168
1169
1170 #endif
1171
1172 case IS_BROADCAST:
1173 memcpy(haddr, dev->broadcast, dev->addr_len);
1174 return 1;
1175 }
1176 return 0;
1177 }
1178
1179
1180
1181
1182
1183 int arp_find(unsigned char *haddr, u32 paddr, struct device *dev,
1184 u32 saddr, struct sk_buff *skb)
1185 {
1186 struct arp_table *entry;
1187 unsigned long hash;
1188
1189 if (arp_set_predefined(ip_chk_addr(paddr), haddr, paddr, dev))
1190 {
1191 if (skb)
1192 skb->arp = 1;
1193 return 0;
1194 }
1195
1196 hash = HASH(paddr);
1197 arp_fast_lock();
1198
1199
1200
1201
1202 entry = arp_lookup(paddr, 0, dev);
1203
1204 if (entry != NULL)
1205 {
1206 if (!(entry->flags & ATF_COM))
1207 {
1208
1209
1210
1211
1212
1213 if (skb != NULL)
1214 {
1215 if (entry->last_updated)
1216 {
1217 skb_queue_tail(&entry->skb, skb);
1218 skb_device_unlock(skb);
1219 }
1220
1221
1222
1223
1224 else
1225 {
1226 #if 0
1227
1228
1229
1230
1231 if (skb->sk)
1232 {
1233 skb->sk->err = EHOSTDOWN;
1234 skb->sk->error_report(skb->sk);
1235 }
1236 #else
1237 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0, dev);
1238 #endif
1239 dev_kfree_skb(skb, FREE_WRITE);
1240 }
1241 }
1242 arp_unlock();
1243 return 1;
1244 }
1245
1246
1247
1248
1249
1250 entry->last_used = jiffies;
1251 memcpy(haddr, entry->ha, dev->addr_len);
1252 if (skb)
1253 skb->arp = 1;
1254 arp_unlock();
1255 return 0;
1256 }
1257
1258
1259
1260
1261
1262 entry = (struct arp_table *) kmalloc(sizeof(struct arp_table),
1263 GFP_ATOMIC);
1264 if (entry != NULL)
1265 {
1266 entry->last_updated = entry->last_used = jiffies;
1267 entry->flags = 0;
1268 entry->ip = paddr;
1269 entry->mask = DEF_ARP_NETMASK;
1270 memset(entry->ha, 0, dev->addr_len);
1271 entry->dev = dev;
1272 entry->hh = NULL;
1273 init_timer(&entry->timer);
1274 entry->timer.function = arp_expire_request;
1275 entry->timer.data = (unsigned long)entry;
1276 entry->timer.expires = jiffies + ARP_RES_TIME;
1277 skb_queue_head_init(&entry->skb);
1278 if (skb != NULL)
1279 {
1280 skb_queue_tail(&entry->skb, skb);
1281 skb_device_unlock(skb);
1282 }
1283 if (arp_lock == 1)
1284 {
1285 entry->next = arp_tables[hash];
1286 arp_tables[hash] = entry;
1287 add_timer(&entry->timer);
1288 entry->retries = ARP_MAX_TRIES;
1289 }
1290 else
1291 {
1292 #if RT_CACHE_DEBUG >= 1
1293 printk("arp_find: %08x backlogged\n", entry->ip);
1294 #endif
1295 arp_enqueue(&arp_backlog, entry);
1296 arp_bh_mask |= ARP_BH_BACKLOG;
1297 }
1298 }
1299 else if (skb != NULL)
1300 dev_kfree_skb(skb, FREE_WRITE);
1301 arp_unlock();
1302
1303
1304
1305
1306
1307 arp_send(ARPOP_REQUEST, ETH_P_ARP, paddr, dev, saddr, NULL,
1308 dev->dev_addr, NULL);
1309
1310 return 1;
1311 }
1312
1313
1314
1315
1316
1317
1318 #define HBUFFERLEN 30
1319
1320 int arp_get_info(char *buffer, char **start, off_t offset, int length, int dummy)
1321 {
1322 int len=0;
1323 off_t pos=0;
1324 int size;
1325 struct arp_table *entry;
1326 char hbuffer[HBUFFERLEN];
1327 int i,j,k;
1328 const char hexbuf[] = "0123456789ABCDEF";
1329
1330 size = sprintf(buffer,"IP address HW type Flags HW address Mask Device\n");
1331
1332 pos+=size;
1333 len+=size;
1334
1335 arp_fast_lock();
1336
1337 for(i=0; i<FULL_ARP_TABLE_SIZE; i++)
1338 {
1339 for(entry=arp_tables[i]; entry!=NULL; entry=entry->next)
1340 {
1341
1342
1343
1344 #ifdef CONFIG_AX25
1345 #ifdef CONFIG_NETROM
1346 if (entry->dev->type == ARPHRD_AX25 || entry->dev->type == ARPHRD_NETROM)
1347 strcpy(hbuffer,ax2asc((ax25_address *)entry->ha));
1348 else {
1349 #else
1350 if(entry->dev->type==ARPHRD_AX25)
1351 strcpy(hbuffer,ax2asc((ax25_address *)entry->ha));
1352 else {
1353 #endif
1354 #endif
1355
1356 for(k=0,j=0;k<HBUFFERLEN-3 && j<entry->dev->addr_len;j++)
1357 {
1358 hbuffer[k++]=hexbuf[ (entry->ha[j]>>4)&15 ];
1359 hbuffer[k++]=hexbuf[ entry->ha[j]&15 ];
1360 hbuffer[k++]=':';
1361 }
1362 hbuffer[--k]=0;
1363
1364 #ifdef CONFIG_AX25
1365 }
1366 #endif
1367 size = sprintf(buffer+len,
1368 "%-17s0x%-10x0x%-10x%s",
1369 in_ntoa(entry->ip),
1370 (unsigned int)entry->dev->type,
1371 entry->flags,
1372 hbuffer);
1373 #if RT_CACHE_DEBUG < 2
1374 size += sprintf(buffer+len+size,
1375 " %-17s %s\n",
1376 entry->mask==DEF_ARP_NETMASK ?
1377 "*" : in_ntoa(entry->mask), entry->dev->name);
1378 #else
1379 size += sprintf(buffer+len+size,
1380 " %-17s %s\t%ld\t%1d\n",
1381 entry->mask==DEF_ARP_NETMASK ?
1382 "*" : in_ntoa(entry->mask), entry->dev->name,
1383 entry->hh ? entry->hh->hh_refcnt : -1,
1384 entry->hh ? entry->hh->hh_uptodate : 0);
1385 #endif
1386
1387 len += size;
1388 pos += size;
1389
1390 if (pos <= offset)
1391 len=0;
1392 if (pos >= offset+length)
1393 break;
1394 }
1395 }
1396 arp_unlock();
1397
1398 *start = buffer+len-(pos-offset);
1399 len = pos-offset;
1400 if (len>length)
1401 len = length;
1402 return len;
1403 }
1404
1405
1406
1407 int arp_bind_cache(struct hh_cache ** hhp, struct device *dev, unsigned short htype, u32 paddr)
1408 {
1409 struct arp_table *entry;
1410 struct hh_cache *hh = *hhp;
1411 int addr_hint;
1412 unsigned long flags;
1413
1414 if (hh)
1415 return 1;
1416
1417 if ((addr_hint = ip_chk_addr(paddr)) != 0)
1418 {
1419 unsigned char haddr[MAX_ADDR_LEN];
1420 if (hh)
1421 return 1;
1422 hh = kmalloc(sizeof(struct hh_cache), GFP_ATOMIC);
1423 if (!hh)
1424 return 1;
1425 arp_set_predefined(addr_hint, haddr, paddr, dev);
1426 hh->hh_uptodate = 0;
1427 hh->hh_refcnt = 1;
1428 hh->hh_arp = NULL;
1429 hh->hh_next = NULL;
1430 hh->hh_type = htype;
1431 *hhp = hh;
1432 dev->header_cache_update(hh, dev, haddr);
1433 return 0;
1434 }
1435
1436 save_flags(flags);
1437
1438 arp_fast_lock();
1439
1440 entry = arp_lookup(paddr, 0, dev);
1441
1442 if (entry)
1443 {
1444 cli();
1445 for (hh = entry->hh; hh; hh=hh->hh_next)
1446 if (hh->hh_type == htype)
1447 break;
1448 if (hh)
1449 {
1450 hh->hh_refcnt++;
1451 *hhp = hh;
1452 restore_flags(flags);
1453 arp_unlock();
1454 return 1;
1455 }
1456 restore_flags(flags);
1457 }
1458
1459 hh = kmalloc(sizeof(struct hh_cache), GFP_ATOMIC);
1460 if (!hh)
1461 {
1462 arp_unlock();
1463 return 1;
1464 }
1465
1466 hh->hh_uptodate = 0;
1467 hh->hh_refcnt = 1;
1468 hh->hh_arp = NULL;
1469 hh->hh_next = NULL;
1470 hh->hh_type = htype;
1471
1472 if (entry)
1473 {
1474 dev->header_cache_update(hh, dev, entry->ha);
1475 *hhp = hh;
1476 cli();
1477 hh->hh_arp = (void*)entry;
1478 entry->hh = hh;
1479 hh->hh_refcnt++;
1480 restore_flags(flags);
1481 entry->last_used = jiffies;
1482 arp_unlock();
1483 return 0;
1484 }
1485
1486
1487
1488
1489
1490
1491 entry = (struct arp_table *) kmalloc(sizeof(struct arp_table),
1492 GFP_ATOMIC);
1493 if (entry == NULL)
1494 {
1495 kfree_s(hh, sizeof(struct hh_cache));
1496 arp_unlock();
1497 return 1;
1498 }
1499
1500 entry->last_updated = entry->last_used = jiffies;
1501 entry->flags = 0;
1502 entry->ip = paddr;
1503 entry->mask = DEF_ARP_NETMASK;
1504 memset(entry->ha, 0, dev->addr_len);
1505 entry->dev = dev;
1506 entry->hh = hh;
1507 ATOMIC_INCR(&hh->hh_refcnt);
1508 init_timer(&entry->timer);
1509 entry->timer.function = arp_expire_request;
1510 entry->timer.data = (unsigned long)entry;
1511 entry->timer.expires = jiffies + ARP_RES_TIME;
1512 skb_queue_head_init(&entry->skb);
1513
1514 if (arp_lock == 1)
1515 {
1516 unsigned long hash = HASH(paddr);
1517 cli();
1518 entry->next = arp_tables[hash];
1519 arp_tables[hash] = entry;
1520 hh->hh_arp = (void*)entry;
1521 entry->retries = ARP_MAX_TRIES;
1522 restore_flags(flags);
1523
1524 add_timer(&entry->timer);
1525 arp_send(ARPOP_REQUEST, ETH_P_ARP, paddr, dev, dev->pa_addr, NULL, dev->dev_addr, NULL);
1526 }
1527 else
1528 {
1529 #if RT_CACHE_DEBUG >= 1
1530 printk("arp_cache_bind: %08x backlogged\n", entry->ip);
1531 #endif
1532 arp_enqueue(&arp_backlog, entry);
1533 arp_bh_mask |= ARP_BH_BACKLOG;
1534 }
1535 *hhp = hh;
1536 arp_unlock();
1537 return 0;
1538 }
1539
1540 static void arp_run_bh()
1541 {
1542 unsigned long flags;
1543 struct arp_table *entry, *entry1;
1544 struct hh_cache *hh;
1545 __u32 sip;
1546
1547 save_flags(flags);
1548 cli();
1549 if (!arp_lock)
1550 {
1551 arp_fast_lock();
1552
1553 while ((entry = arp_dequeue(&arp_backlog)) != NULL)
1554 {
1555 unsigned long hash;
1556 sti();
1557 sip = entry->ip;
1558 hash = HASH(sip);
1559
1560
1561
1562
1563
1564 for (entry1=arp_tables[hash]; entry1; entry1=entry1->next)
1565 if (entry1->ip==sip && entry1->dev == entry->dev)
1566 break;
1567
1568 if (!entry1)
1569 {
1570 struct device * dev = entry->dev;
1571 cli();
1572 entry->next = arp_tables[hash];
1573 arp_tables[hash] = entry;
1574 for (hh=entry->hh; hh; hh=hh->hh_next)
1575 hh->hh_arp = (void*)entry;
1576 sti();
1577 del_timer(&entry->timer);
1578 entry->timer.expires = jiffies + ARP_RES_TIME;
1579 add_timer(&entry->timer);
1580 entry->retries = ARP_MAX_TRIES;
1581 arp_send(ARPOP_REQUEST, ETH_P_ARP, entry->ip, dev, dev->pa_addr, NULL, dev->dev_addr, NULL);
1582 #if RT_CACHE_DEBUG >= 1
1583 printk("arp_run_bh: %08x reinstalled\n", sip);
1584 #endif
1585 }
1586 else
1587 {
1588 struct sk_buff * skb;
1589 struct hh_cache * next;
1590
1591
1592
1593
1594 cli();
1595 for (hh=entry->hh; hh; hh=next)
1596 {
1597 next = hh->hh_next;
1598 hh->hh_next = entry1->hh;
1599 entry1->hh = hh;
1600 hh->hh_arp = (void*)entry1;
1601 }
1602 entry->hh = NULL;
1603
1604
1605
1606
1607 while ((skb = skb_dequeue(&entry->skb)) != NULL)
1608 {
1609 skb_device_lock(skb);
1610 sti();
1611 skb_queue_tail(&entry1->skb, skb);
1612 skb_device_unlock(skb);
1613 cli();
1614 }
1615 sti();
1616
1617 #if RT_CACHE_DEBUG >= 1
1618 printk("arp_run_bh: entry %08x was born dead\n", entry->ip);
1619 #endif
1620 arp_free_entry(entry);
1621
1622 if (entry1->flags & ATF_COM)
1623 {
1624 arp_update_hhs(entry1);
1625 arp_send_q(entry1);
1626 }
1627 }
1628 cli();
1629 }
1630 arp_bh_mask &= ~ARP_BH_BACKLOG;
1631 arp_unlock();
1632 }
1633 restore_flags(flags);
1634 }
1635
1636
1637
1638
1639
1640 static inline int empty(unsigned char * addr, int len)
1641 {
1642 while (len > 0) {
1643 if (*addr)
1644 return 0;
1645 len--;
1646 addr++;
1647 }
1648 return 1;
1649 }
1650
1651
1652
1653
1654
1655 static int arp_req_set(struct arpreq *r, struct device * dev)
1656 {
1657 struct arp_table *entry;
1658 struct sockaddr_in *si;
1659 struct rtable *rt;
1660 struct device *dev1;
1661 unsigned char *ha;
1662 u32 ip;
1663
1664
1665
1666
1667
1668 si = (struct sockaddr_in *) &r->arp_pa;
1669 ip = si->sin_addr.s_addr;
1670
1671
1672
1673
1674
1675 if (ip_chk_addr(ip) == IS_MYADDR)
1676 dev1 = dev_get("lo");
1677 else {
1678 rt = ip_rt_route(ip, 0);
1679 if (!rt)
1680 return -ENETUNREACH;
1681 dev1 = rt->rt_dev;
1682 ip_rt_put(rt);
1683 }
1684
1685
1686 if (!dev) {
1687 if (dev1->flags&(IFF_LOOPBACK|IFF_NOARP))
1688 return -ENODEV;
1689 dev = dev1;
1690 }
1691
1692
1693 if (r->arp_ha.sa_family != dev->type)
1694 return -EINVAL;
1695
1696 if (((r->arp_flags & ATF_PUBL) && dev == dev1) ||
1697 (!(r->arp_flags & ATF_PUBL) && dev != dev1))
1698 return -EINVAL;
1699
1700 #if RT_CACHE_DEBUG >= 1
1701 if (arp_lock)
1702 printk("arp_req_set: bug\n");
1703 #endif
1704 arp_fast_lock();
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714 entry = arp_lookup(ip, r->arp_flags & ~ATF_NETMASK, dev);
1715
1716 if (entry)
1717 {
1718 arp_destroy(entry);
1719 entry = NULL;
1720 }
1721
1722
1723
1724
1725
1726 if (entry == NULL)
1727 {
1728 entry = (struct arp_table *) kmalloc(sizeof(struct arp_table),
1729 GFP_ATOMIC);
1730 if (entry == NULL)
1731 {
1732 arp_unlock();
1733 return -ENOMEM;
1734 }
1735 entry->ip = ip;
1736 entry->hh = NULL;
1737 init_timer(&entry->timer);
1738 entry->timer.function = arp_expire_request;
1739 entry->timer.data = (unsigned long)entry;
1740
1741 if (r->arp_flags & ATF_PUBL)
1742 {
1743 cli();
1744 entry->next = arp_proxy_list;
1745 arp_proxy_list = entry;
1746 sti();
1747 }
1748 else
1749 {
1750 unsigned long hash = HASH(ip);
1751 cli();
1752 entry->next = arp_tables[hash];
1753 arp_tables[hash] = entry;
1754 sti();
1755 }
1756 skb_queue_head_init(&entry->skb);
1757 }
1758
1759
1760
1761 ha = r->arp_ha.sa_data;
1762 if ((r->arp_flags & ATF_COM) && empty(ha, dev->addr_len))
1763 ha = dev->dev_addr;
1764 memcpy(entry->ha, ha, dev->addr_len);
1765 entry->last_updated = entry->last_used = jiffies;
1766 entry->flags = r->arp_flags | ATF_COM;
1767 if ((entry->flags & ATF_PUBL) && (entry->flags & ATF_NETMASK))
1768 {
1769 si = (struct sockaddr_in *) &r->arp_netmask;
1770 entry->mask = si->sin_addr.s_addr;
1771 }
1772 else
1773 entry->mask = DEF_ARP_NETMASK;
1774 entry->dev = dev;
1775 arp_update_hhs(entry);
1776 arp_unlock();
1777 return 0;
1778 }
1779
1780
1781
1782
1783
1784
1785
1786 static int arp_req_get(struct arpreq *r, struct device *dev)
1787 {
1788 struct arp_table *entry;
1789 struct sockaddr_in *si;
1790
1791 si = (struct sockaddr_in *) &r->arp_pa;
1792
1793 #if RT_CACHE_DEBUG >= 1
1794 if (arp_lock)
1795 printk("arp_req_set: bug\n");
1796 #endif
1797 arp_fast_lock();
1798
1799 entry = arp_lookup(si->sin_addr.s_addr, r->arp_flags|ATF_NETMASK, dev);
1800
1801 if (entry == NULL)
1802 {
1803 arp_unlock();
1804 return -ENXIO;
1805 }
1806
1807
1808
1809
1810
1811 memcpy(r->arp_ha.sa_data, &entry->ha, entry->dev->addr_len);
1812 r->arp_ha.sa_family = entry->dev->type;
1813 r->arp_flags = entry->flags;
1814 strncpy(r->arp_dev, entry->dev->name, 16);
1815 arp_unlock();
1816 return 0;
1817 }
1818
1819 static int arp_req_delete(struct arpreq *r, struct device * dev)
1820 {
1821 struct arp_table *entry;
1822 struct sockaddr_in *si;
1823
1824 si = (struct sockaddr_in *) &r->arp_pa;
1825 #if RT_CACHE_DEBUG >= 1
1826 if (arp_lock)
1827 printk("arp_req_delete: bug\n");
1828 #endif
1829 arp_fast_lock();
1830
1831 if (!(r->arp_flags & ATF_PUBL))
1832 {
1833 for (entry = arp_tables[HASH(si->sin_addr.s_addr)];
1834 entry != NULL; entry = entry->next)
1835 if (entry->ip == si->sin_addr.s_addr
1836 && (!dev || entry->dev == dev))
1837 {
1838 arp_destroy(entry);
1839 arp_unlock();
1840 return 0;
1841 }
1842 }
1843 else
1844 {
1845 for (entry = arp_proxy_list;
1846 entry != NULL; entry = entry->next)
1847 if (entry->ip == si->sin_addr.s_addr
1848 && (!dev || entry->dev == dev))
1849 {
1850 arp_destroy(entry);
1851 arp_unlock();
1852 return 0;
1853 }
1854 }
1855
1856 arp_unlock();
1857 return -ENXIO;
1858 }
1859
1860
1861
1862
1863
1864 int arp_ioctl(unsigned int cmd, void *arg)
1865 {
1866 int err;
1867 struct arpreq r;
1868
1869 struct device * dev = NULL;
1870
1871 switch(cmd)
1872 {
1873 case SIOCDARP:
1874 case SIOCSARP:
1875 if (!suser())
1876 return -EPERM;
1877 case SIOCGARP:
1878 err = verify_area(VERIFY_READ, arg, sizeof(struct arpreq));
1879 if (err)
1880 return err;
1881 memcpy_fromfs(&r, arg, sizeof(struct arpreq));
1882 break;
1883 case OLD_SIOCDARP:
1884 case OLD_SIOCSARP:
1885 if (!suser())
1886 return -EPERM;
1887 case OLD_SIOCGARP:
1888 err = verify_area(VERIFY_READ, arg, sizeof(struct arpreq_old));
1889 if (err)
1890 return err;
1891 memcpy_fromfs(&r, arg, sizeof(struct arpreq_old));
1892 memset(&r.arp_dev, 0, sizeof(r.arp_dev));
1893 break;
1894 default:
1895 return -EINVAL;
1896 }
1897
1898 if (r.arp_pa.sa_family != AF_INET)
1899 return -EPFNOSUPPORT;
1900 if (((struct sockaddr_in *)&r.arp_pa)->sin_addr.s_addr == 0)
1901 return -EINVAL;
1902
1903 if (r.arp_dev[0])
1904 {
1905 if ((dev = dev_get(r.arp_dev)) == NULL)
1906 return -ENODEV;
1907
1908 if (!r.arp_ha.sa_family)
1909 r.arp_ha.sa_family = dev->type;
1910 else if (r.arp_ha.sa_family != dev->type)
1911 return -EINVAL;
1912 }
1913 else
1914 {
1915 if ((r.arp_flags & ATF_PUBL) &&
1916 ((cmd == SIOCSARP) || (cmd == OLD_SIOCSARP))) {
1917 if ((dev = dev_getbytype(r.arp_ha.sa_family)) == NULL)
1918 return -ENODEV;
1919 }
1920 }
1921
1922 switch(cmd)
1923 {
1924 case SIOCDARP:
1925 return arp_req_delete(&r, dev);
1926 case SIOCSARP:
1927 return arp_req_set(&r, dev);
1928 case OLD_SIOCDARP:
1929
1930
1931
1932 r.arp_flags &= ~ATF_PUBL;
1933 err = arp_req_delete(&r, dev);
1934 r.arp_flags |= ATF_PUBL;
1935 if (!err)
1936 arp_req_delete(&r, dev);
1937 else
1938 err = arp_req_delete(&r, dev);
1939 return err;
1940 case OLD_SIOCSARP:
1941 err = arp_req_set(&r, dev);
1942
1943
1944
1945
1946
1947 if (r.arp_flags & ATF_PUBL)
1948 {
1949 r.arp_flags &= ~ATF_PUBL;
1950 arp_req_delete(&r, dev);
1951 }
1952 return err;
1953 case SIOCGARP:
1954 err = verify_area(VERIFY_WRITE, arg, sizeof(struct arpreq));
1955 if (err)
1956 return err;
1957 err = arp_req_get(&r, dev);
1958 if (!err)
1959 memcpy_tofs(arg, &r, sizeof(r));
1960 return err;
1961 case OLD_SIOCGARP:
1962 err = verify_area(VERIFY_WRITE, arg, sizeof(struct arpreq_old));
1963 if (err)
1964 return err;
1965 r.arp_flags &= ~ATF_PUBL;
1966 err = arp_req_get(&r, dev);
1967 if (err < 0)
1968 {
1969 r.arp_flags |= ATF_PUBL;
1970 err = arp_req_get(&r, dev);
1971 }
1972 if (!err)
1973 memcpy_tofs(arg, &r, sizeof(struct arpreq_old));
1974 return err;
1975 }
1976
1977 return 0;
1978 }
1979
1980
1981
1982
1983
1984
1985 static struct packet_type arp_packet_type =
1986 {
1987 0,
1988 NULL,
1989 arp_rcv,
1990 NULL,
1991 NULL
1992 };
1993
1994 static struct notifier_block arp_dev_notifier={
1995 arp_device_event,
1996 NULL,
1997 0
1998 };
1999
2000 void arp_init (void)
2001 {
2002
2003 arp_packet_type.type=htons(ETH_P_ARP);
2004 dev_add_pack(&arp_packet_type);
2005
2006 add_timer(&arp_timer);
2007
2008 register_netdevice_notifier(&arp_dev_notifier);
2009
2010 #ifdef CONFIG_PROC_FS
2011 proc_net_register(&(struct proc_dir_entry) {
2012 PROC_NET_ARP, 3, "arp",
2013 S_IFREG | S_IRUGO, 1, 0, 0,
2014 0, &proc_net_inode_operations,
2015 arp_get_info
2016 });
2017 #endif
2018 }
2019