This source file includes following definitions.
- arp_fast_lock
- arp_fast_unlock
- arp_unlock
- arp_enqueue
- arp_dequeue
- arp_release_entry
- arp_free_entry
- arp_count_hhs
- arp_invalidate_hhs
- arp_update_hhs
- arp_check_expire
- arp_expire_request
- arp_device_event
- arp_send
- arp_send_q
- arp_destroy
- arp_rcv
- arp_lookup
- arp_query
- arp_set_predefined
- arp_find
- arp_get_info
- arp_bind_cache
- arp_run_bh
- empty
- arp_req_set
- arp_req_get
- arp_req_delete
- arp_ioctl
- arp_init
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68 #include <linux/types.h>
69 #include <linux/string.h>
70 #include <linux/kernel.h>
71 #include <linux/sched.h>
72 #include <linux/config.h>
73 #include <linux/socket.h>
74 #include <linux/sockios.h>
75 #include <linux/errno.h>
76 #include <linux/if_arp.h>
77 #include <linux/in.h>
78 #include <linux/mm.h>
79 #include <linux/inet.h>
80 #include <linux/netdevice.h>
81 #include <linux/etherdevice.h>
82 #include <linux/trdevice.h>
83 #include <linux/skbuff.h>
84 #include <linux/proc_fs.h>
85 #include <linux/stat.h>
86
87 #include <net/ip.h>
88 #include <net/icmp.h>
89 #include <net/route.h>
90 #include <net/protocol.h>
91 #include <net/tcp.h>
92 #include <net/sock.h>
93 #include <net/arp.h>
94 #ifdef CONFIG_AX25
95 #include <net/ax25.h>
96 #ifdef CONFIG_NETROM
97 #include <net/netrom.h>
98 #endif
99 #endif
100 #ifdef CONFIG_NET_ALIAS
101 #include <linux/net_alias.h>
102 #endif
103
104 #include <asm/system.h>
105 #include <asm/segment.h>
106
107 #include <stdarg.h>
108
109
110
111
112
113
114
115
116 struct arp_table
117 {
118 struct arp_table *next;
119 unsigned long last_used;
120 unsigned long last_updated;
121 unsigned int flags;
122 u32 ip;
123 u32 mask;
124 unsigned char ha[MAX_ADDR_LEN];
125 struct device *dev;
126
127
128
129
130
131 struct timer_list timer;
132 int retries;
133 struct sk_buff_head skb;
134 struct hh_cache *hh;
135 };
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151 #define ARP_RES_TIME (5*HZ)
152 #define ARP_DEAD_RES_TIME (60*HZ)
153
154
155
156
157
158
159 #define ARP_MAX_TRIES 3
160
161
162
163
164
165 #define ARP_TIMEOUT (600*HZ)
166
167
168
169
170
171
172
173 #define ARP_CHECK_INTERVAL (60*HZ)
174
175
176
177
178
179
180
181 #define ARP_CONFIRM_INTERVAL (300*HZ)
182 #define ARP_CONFIRM_TIMEOUT ARP_RES_TIME
183
184 static unsigned int arp_lock;
185 static unsigned int arp_bh_mask;
186
187 #define ARP_BH_BACKLOG 1
188
189 static struct arp_table *arp_backlog;
190
191 static void arp_run_bh(void);
192 static void arp_check_expire (unsigned long);
193
194 static struct timer_list arp_timer =
195 { NULL, NULL, ARP_CHECK_INTERVAL, 0L, &arp_check_expire };
196
197
198
199
200
201
202 #define DEF_ARP_NETMASK (~0)
203
204
205
206
207
208
209
210 #define ARP_TABLE_SIZE 16
211 #define FULL_ARP_TABLE_SIZE (ARP_TABLE_SIZE+1)
212
213 struct arp_table *arp_tables[FULL_ARP_TABLE_SIZE] =
214 {
215 NULL,
216 };
217
218 #define arp_proxy_list arp_tables[ARP_TABLE_SIZE]
219
220
221
222
223
224
225 #define HASH(paddr) (htonl(paddr) & (ARP_TABLE_SIZE - 1))
226
227
228
229
230
231 static __inline__ void arp_fast_lock(void)
232 {
233 ATOMIC_INCR(&arp_lock);
234 }
235
236 static __inline__ void arp_fast_unlock(void)
237 {
238 ATOMIC_DECR(&arp_lock);
239 }
240
241 static __inline__ void arp_unlock(void)
242 {
243 if (!ATOMIC_DECR_AND_CHECK(&arp_lock) && arp_bh_mask)
244 arp_run_bh();
245 }
246
247
248
249
250
251 static void arp_enqueue(struct arp_table **q, struct arp_table *entry)
252 {
253 unsigned long flags;
254 struct arp_table * tail;
255
256 save_flags(flags);
257 cli();
258 tail = *q;
259 if (!tail)
260 entry->next = entry;
261 else
262 {
263 entry->next = tail->next;
264 tail->next = entry;
265 }
266 *q = entry;
267 restore_flags(flags);
268 return;
269 }
270
271
272
273
274
275
276 static struct arp_table * arp_dequeue(struct arp_table **q)
277 {
278 struct arp_table * entry;
279
280 if (*q)
281 {
282 entry = (*q)->next;
283 (*q)->next = entry->next;
284 if (entry->next == entry)
285 *q = NULL;
286 entry->next = NULL;
287 return entry;
288 }
289 return NULL;
290 }
291
292
293
294
295
296 static void arp_release_entry(struct arp_table *entry)
297 {
298 struct sk_buff *skb;
299 unsigned long flags;
300
301 save_flags(flags);
302 cli();
303
304 while ((skb = skb_dequeue(&entry->skb)) != NULL)
305 {
306 skb_device_lock(skb);
307 restore_flags(flags);
308 dev_kfree_skb(skb, FREE_WRITE);
309 cli();
310 }
311 restore_flags(flags);
312 return;
313 }
314
315
316
317
318
319
320 static void arp_free_entry(struct arp_table *entry)
321 {
322 unsigned long flags;
323 struct hh_cache *hh, *next;
324
325 del_timer(&entry->timer);
326
327 save_flags(flags);
328 cli();
329 arp_release_entry(entry);
330
331 for (hh = entry->hh; hh; hh = next)
332 {
333 next = hh->hh_next;
334 hh->hh_arp = NULL;
335 hh->hh_uptodate = 0;
336 if (!--hh->hh_refcnt)
337 kfree_s(hh, sizeof(struct(struct hh_cache)));
338 }
339 restore_flags(flags);
340
341 kfree_s(entry, sizeof(struct arp_table));
342 return;
343 }
344
345
346
347
348
349 static __inline__ int arp_count_hhs(struct arp_table * entry)
350 {
351 struct hh_cache *hh, **hhp;
352 int count = 0;
353
354 hhp = &entry->hh;
355 while ((hh=*hhp) != NULL)
356 {
357 if (hh->hh_refcnt == 1)
358 {
359 *hhp = hh->hh_next;
360 kfree_s(hh, sizeof(struct hh_cache));
361 continue;
362 }
363 count += hh->hh_refcnt-1;
364 hhp = &hh->hh_next;
365 }
366
367 return count;
368 }
369
370
371
372
373
374 static __inline__ void arp_invalidate_hhs(struct arp_table * entry)
375 {
376 struct hh_cache *hh;
377
378 for (hh=entry->hh; hh; hh=hh->hh_next)
379 hh->hh_uptodate = 0;
380 }
381
382
383
384
385
386 static __inline__ void arp_update_hhs(struct arp_table * entry)
387 {
388 struct hh_cache *hh;
389
390 for (hh=entry->hh; hh; hh=hh->hh_next)
391 entry->dev->header_cache_update(hh, entry->dev, entry->ha);
392 }
393
394
395
396
397
398
399
400
401
402
403 static void arp_check_expire(unsigned long dummy)
404 {
405 int i;
406 unsigned long now = jiffies;
407
408 del_timer(&arp_timer);
409
410 if (!arp_lock)
411 {
412 arp_fast_lock();
413
414 for (i = 0; i < ARP_TABLE_SIZE; i++)
415 {
416 struct arp_table *entry;
417 struct arp_table **pentry;
418
419 pentry = &arp_tables[i];
420
421 while ((entry = *pentry) != NULL)
422 {
423 cli();
424 if (now - entry->last_used > ARP_TIMEOUT
425 && !(entry->flags & ATF_PERM)
426 && !arp_count_hhs(entry))
427 {
428 *pentry = entry->next;
429 sti();
430 #if RT_CACHE_DEBUG >= 2
431 printk("arp_expire: %08x expired\n", entry->ip);
432 #endif
433 arp_free_entry(entry);
434 }
435 else if (entry->last_updated
436 && now - entry->last_updated > ARP_CONFIRM_INTERVAL
437 && !(entry->flags & ATF_PERM))
438 {
439 struct device * dev = entry->dev;
440 pentry = &entry->next;
441 entry->flags &= ~ATF_COM;
442 arp_invalidate_hhs(entry);
443 sti();
444 entry->retries = ARP_MAX_TRIES+1;
445 del_timer(&entry->timer);
446 entry->timer.expires = jiffies + ARP_CONFIRM_TIMEOUT;
447 add_timer(&entry->timer);
448 arp_send(ARPOP_REQUEST, ETH_P_ARP, entry->ip,
449 dev, dev->pa_addr, entry->ha,
450 dev->dev_addr, NULL);
451 #if RT_CACHE_DEBUG >= 2
452 printk("arp_expire: %08x requires confirmation\n", entry->ip);
453 #endif
454 }
455 else
456 pentry = &entry->next;
457 }
458 }
459 arp_unlock();
460 }
461
462 ip_rt_check_expire();
463
464
465
466
467
468 arp_timer.expires = jiffies + ARP_CHECK_INTERVAL;
469 add_timer(&arp_timer);
470 }
471
472
473
474
475
476
477
478 static void arp_expire_request (unsigned long arg)
479 {
480 struct arp_table *entry = (struct arp_table *) arg;
481 struct arp_table **pentry;
482 unsigned long hash;
483 unsigned long flags;
484
485 save_flags(flags);
486 cli();
487
488
489
490
491
492
493
494 if (entry->flags & ATF_COM)
495 {
496 restore_flags(flags);
497 return;
498 }
499
500 if (arp_lock)
501 {
502 #if RT_CACHE_DEBUG >= 1
503 printk("arp_expire_request: %08x postponed\n", entry->ip);
504 #endif
505 del_timer(&entry->timer);
506 entry->timer.expires = jiffies + HZ/10;
507 add_timer(&entry->timer);
508 restore_flags(flags);
509 return;
510 }
511
512 arp_fast_lock();
513 restore_flags(flags);
514
515 if (entry->last_updated && --entry->retries > 0)
516 {
517 struct device *dev = entry->dev;
518
519 #if RT_CACHE_DEBUG >= 2
520 printk("arp_expire_request: %08x timed out\n", entry->ip);
521 #endif
522
523 del_timer(&entry->timer);
524 entry->timer.expires = jiffies + ARP_RES_TIME;
525 add_timer(&entry->timer);
526 arp_send(ARPOP_REQUEST, ETH_P_ARP, entry->ip, dev, dev->pa_addr,
527 NULL, dev->dev_addr, NULL);
528 arp_unlock();
529 return;
530 }
531
532 arp_release_entry(entry);
533
534 cli();
535 if (arp_count_hhs(entry))
536 {
537 struct device *dev = entry->dev;
538 #if RT_CACHE_DEBUG >= 2
539 printk("arp_expire_request: %08x is dead\n", entry->ip);
540 #endif
541 arp_release_entry(entry);
542 entry->retries = ARP_MAX_TRIES;
543 restore_flags(flags);
544 entry->last_updated = 0;
545 del_timer(&entry->timer);
546 entry->timer.expires = jiffies + ARP_DEAD_RES_TIME;
547 add_timer(&entry->timer);
548 arp_send(ARPOP_REQUEST, ETH_P_ARP, entry->ip, dev, dev->pa_addr,
549 NULL, dev->dev_addr, NULL);
550 arp_unlock();
551 return;
552 }
553 restore_flags(flags);
554
555 hash = HASH(entry->ip);
556
557 pentry = &arp_tables[hash];
558
559 while (*pentry != NULL)
560 {
561 if (*pentry == entry)
562 {
563 cli();
564 *pentry = entry->next;
565 restore_flags(flags);
566 #if RT_CACHE_DEBUG >= 2
567 printk("arp_expire_request: %08x is killed\n", entry->ip);
568 #endif
569 arp_free_entry(entry);
570 arp_unlock();
571 return;
572 }
573 pentry = &(*pentry)->next;
574 }
575 printk("arp_expire_request: bug: ARP entry is lost!\n");
576 arp_unlock();
577 }
578
579
580
581
582
583 int arp_device_event(struct notifier_block *this, unsigned long event, void *ptr)
584 {
585 struct device *dev=ptr;
586 int i;
587
588 if (event != NETDEV_DOWN)
589 return NOTIFY_DONE;
590
591
592
593
594 #if RT_CACHE_DEBUG >= 1
595 if (arp_lock)
596 printk("arp_device_event: bug\n");
597 #endif
598 arp_fast_lock();
599
600 for (i = 0; i < FULL_ARP_TABLE_SIZE; i++)
601 {
602 struct arp_table *entry;
603 struct arp_table **pentry = &arp_tables[i];
604
605 while ((entry = *pentry) != NULL)
606 {
607 if (entry->dev == dev)
608 {
609 *pentry = entry->next;
610 arp_free_entry(entry);
611 }
612 else
613 pentry = &entry->next;
614 }
615 }
616 arp_unlock();
617 return NOTIFY_DONE;
618 }
619
620
621
622
623
624
625
626 void arp_send(int type, int ptype, u32 dest_ip,
627 struct device *dev, u32 src_ip,
628 unsigned char *dest_hw, unsigned char *src_hw,
629 unsigned char *target_hw)
630 {
631 struct sk_buff *skb;
632 struct arphdr *arp;
633 unsigned char *arp_ptr;
634
635
636
637
638
639 if (dev->flags&IFF_NOARP)
640 return;
641
642
643
644
645
646 skb = alloc_skb(sizeof(struct arphdr)+ 2*(dev->addr_len+4)
647 + dev->hard_header_len, GFP_ATOMIC);
648 if (skb == NULL)
649 {
650 printk("ARP: no memory to send an arp packet\n");
651 return;
652 }
653 skb_reserve(skb, dev->hard_header_len);
654 arp = (struct arphdr *) skb_put(skb,sizeof(struct arphdr) + 2*(dev->addr_len+4));
655 skb->arp = 1;
656 skb->dev = dev;
657 skb->free = 1;
658 skb->protocol = htons (ETH_P_IP);
659
660
661
662
663
664 dev->hard_header(skb,dev,ptype,dest_hw?dest_hw:dev->broadcast,src_hw?src_hw:NULL,skb->len);
665
666
667 arp->ar_hrd = htons(dev->type);
668 #ifdef CONFIG_AX25
669 #ifdef CONFIG_NETROM
670 arp->ar_pro = (dev->type == ARPHRD_AX25 || dev->type == ARPHRD_NETROM) ? htons(AX25_P_IP) : htons(ETH_P_IP);
671 #else
672 arp->ar_pro = (dev->type != ARPHRD_AX25) ? htons(ETH_P_IP) : htons(AX25_P_IP);
673 #endif
674 #else
675 arp->ar_pro = htons(ETH_P_IP);
676 #endif
677 arp->ar_hln = dev->addr_len;
678 arp->ar_pln = 4;
679 arp->ar_op = htons(type);
680
681 arp_ptr=(unsigned char *)(arp+1);
682
683 memcpy(arp_ptr, src_hw, dev->addr_len);
684 arp_ptr+=dev->addr_len;
685 memcpy(arp_ptr, &src_ip,4);
686 arp_ptr+=4;
687 if (target_hw != NULL)
688 memcpy(arp_ptr, target_hw, dev->addr_len);
689 else
690 memset(arp_ptr, 0, dev->addr_len);
691 arp_ptr+=dev->addr_len;
692 memcpy(arp_ptr, &dest_ip, 4);
693
694 dev_queue_xmit(skb, dev, 0);
695 }
696
697
698
699
700
701 static void arp_send_q(struct arp_table *entry)
702 {
703 struct sk_buff *skb;
704
705 unsigned long flags;
706
707
708
709
710
711 if(!(entry->flags&ATF_COM))
712 {
713 printk("arp_send_q: incomplete entry for %s\n",
714 in_ntoa(entry->ip));
715
716
717
718
719 return;
720 }
721
722 save_flags(flags);
723
724 cli();
725 while((skb = skb_dequeue(&entry->skb)) != NULL)
726 {
727 IS_SKB(skb);
728 skb_device_lock(skb);
729 restore_flags(flags);
730 if(!skb->dev->rebuild_header(skb->data,skb->dev,skb->raddr,skb))
731 {
732 skb->arp = 1;
733 if(skb->sk==NULL)
734 dev_queue_xmit(skb, skb->dev, 0);
735 else
736 dev_queue_xmit(skb,skb->dev,skb->sk->priority);
737 }
738 }
739 restore_flags(flags);
740 }
741
742
743
744
745
746
747 static void arp_destroy(struct arp_table * entry)
748 {
749 struct arp_table *entry1;
750 struct arp_table **pentry;
751
752 if (entry->flags & ATF_PUBL)
753 pentry = &arp_proxy_list;
754 else
755 pentry = &arp_tables[HASH(entry->ip)];
756
757 while ((entry1 = *pentry) != NULL)
758 {
759 if (entry1 == entry)
760 {
761 *pentry = entry1->next;
762 del_timer(&entry->timer);
763 arp_free_entry(entry);
764 return;
765 }
766 pentry = &entry1->next;
767 }
768 }
769
770
771
772
773
774
775
776 int arp_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *pt)
777 {
778
779
780
781
782 struct arphdr *arp = (struct arphdr *)skb->h.raw;
783 unsigned char *arp_ptr= (unsigned char *)(arp+1);
784 struct arp_table *entry;
785 struct arp_table *proxy_entry;
786 unsigned long hash;
787 unsigned char ha[MAX_ADDR_LEN];
788 unsigned char *sha,*tha;
789 u32 sip,tip;
790
791
792
793
794
795
796
797
798 if (arp->ar_hln != dev->addr_len ||
799 dev->type != ntohs(arp->ar_hrd) ||
800 dev->flags & IFF_NOARP ||
801 arp->ar_pln != 4)
802 {
803 kfree_skb(skb, FREE_READ);
804 return 0;
805
806
807 }
808
809
810
811
812
813
814
815
816
817 switch (dev->type)
818 {
819 #ifdef CONFIG_AX25
820 case ARPHRD_AX25:
821 if(arp->ar_pro != htons(AX25_P_IP))
822 {
823 kfree_skb(skb, FREE_READ);
824 return 0;
825 }
826 break;
827 #endif
828 #ifdef CONFIG_NETROM
829 case ARPHRD_NETROM:
830 if(arp->ar_pro != htons(AX25_P_IP))
831 {
832 kfree_skb(skb, FREE_READ);
833 return 0;
834 }
835 break;
836 #endif
837 case ARPHRD_ETHER:
838 case ARPHRD_ARCNET:
839 if(arp->ar_pro != htons(ETH_P_IP))
840 {
841 kfree_skb(skb, FREE_READ);
842 return 0;
843 }
844 break;
845
846 case ARPHRD_IEEE802:
847 if(arp->ar_pro != htons(ETH_P_IP))
848 {
849 kfree_skb(skb, FREE_READ);
850 return 0;
851 }
852 break;
853
854 default:
855 printk("ARP: dev->type mangled!\n");
856 kfree_skb(skb, FREE_READ);
857 return 0;
858 }
859
860
861
862
863
864 sha=arp_ptr;
865 arp_ptr += dev->addr_len;
866 memcpy(&sip, arp_ptr, 4);
867 arp_ptr += 4;
868 tha=arp_ptr;
869 arp_ptr += dev->addr_len;
870 memcpy(&tip, arp_ptr, 4);
871
872
873
874
875
876 if (LOOPBACK(tip) || MULTICAST(tip))
877 {
878 kfree_skb(skb, FREE_READ);
879 return 0;
880 }
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903 #ifdef CONFIG_NET_ALIAS
904 if (tip != dev->pa_addr && net_alias_has(skb->dev))
905 {
906
907
908
909 dev = net_alias_dev_rcv_sel32(dev, AF_INET, sip, tip);
910
911 if (dev->type != ntohs(arp->ar_hrd) || dev->flags & IFF_NOARP)
912 {
913 kfree_skb(skb, FREE_READ);
914 return 0;
915 }
916 }
917 #endif
918
919 if (arp->ar_op == htons(ARPOP_REQUEST))
920 {
921
922
923
924 if (tip != dev->pa_addr)
925 {
926
927
928
929
930
931 arp_fast_lock();
932
933 for (proxy_entry=arp_proxy_list;
934 proxy_entry;
935 proxy_entry = proxy_entry->next)
936 {
937
938
939
940
941
942
943
944
945 if (proxy_entry->dev == dev &&
946 !((proxy_entry->ip^tip)&proxy_entry->mask))
947 break;
948
949 }
950 if (proxy_entry)
951 {
952 memcpy(ha, proxy_entry->ha, dev->addr_len);
953 arp_unlock();
954 arp_send(ARPOP_REPLY,ETH_P_ARP,sip,dev,tip,sha,ha, sha);
955 kfree_skb(skb, FREE_READ);
956 return 0;
957 }
958 else
959 {
960 arp_unlock();
961 kfree_skb(skb, FREE_READ);
962 return 0;
963 }
964 }
965 else
966 {
967
968
969
970 arp_send(ARPOP_REPLY,ETH_P_ARP,sip,dev,tip,sha,dev->dev_addr,sha);
971 }
972 }
973
974
975
976 if(ip_chk_addr(tip)!=IS_MYADDR)
977 {
978
979
980
981 kfree_skb(skb, FREE_READ);
982 return 0;
983 }
984
985
986
987
988
989
990 arp_fast_lock();
991
992 hash = HASH(sip);
993
994 for (entry=arp_tables[hash]; entry; entry=entry->next)
995 if (entry->ip == sip && entry->dev == dev)
996 break;
997
998 if (entry)
999 {
1000
1001
1002
1003 if (!(entry->flags & ATF_PERM)) {
1004 memcpy(entry->ha, sha, dev->addr_len);
1005 entry->last_updated = jiffies;
1006 }
1007 if (!(entry->flags & ATF_COM))
1008 {
1009
1010
1011
1012
1013 del_timer(&entry->timer);
1014 entry->flags |= ATF_COM;
1015 arp_update_hhs(entry);
1016
1017
1018
1019
1020
1021 arp_send_q(entry);
1022 }
1023 }
1024 else
1025 {
1026
1027
1028
1029 entry = (struct arp_table *)kmalloc(sizeof(struct arp_table),GFP_ATOMIC);
1030 if(entry == NULL)
1031 {
1032 arp_unlock();
1033 printk("ARP: no memory for new arp entry\n");
1034 kfree_skb(skb, FREE_READ);
1035 return 0;
1036 }
1037
1038 entry->mask = DEF_ARP_NETMASK;
1039 entry->ip = sip;
1040 entry->flags = ATF_COM;
1041 entry->hh = NULL;
1042 init_timer(&entry->timer);
1043 entry->timer.function = arp_expire_request;
1044 entry->timer.data = (unsigned long)entry;
1045 memcpy(entry->ha, sha, dev->addr_len);
1046 entry->last_updated = entry->last_used = jiffies;
1047
1048
1049
1050
1051 #ifdef CONFIG_NET_ALIAS
1052 entry->dev = dev;
1053 #else
1054 entry->dev = skb->dev;
1055 #endif
1056 skb_queue_head_init(&entry->skb);
1057 if (arp_lock == 1)
1058 {
1059 entry->next = arp_tables[hash];
1060 arp_tables[hash] = entry;
1061 }
1062 else
1063 {
1064 #if RT_CACHE_DEBUG >= 1
1065 printk("arp_rcv: %08x backlogged\n", entry->ip);
1066 #endif
1067 arp_enqueue(&arp_backlog, entry);
1068 arp_bh_mask |= ARP_BH_BACKLOG;
1069 }
1070 }
1071
1072
1073
1074
1075 kfree_skb(skb, FREE_READ);
1076 arp_unlock();
1077 return 0;
1078 }
1079
1080
1081
1082
1083
1084
1085
1086
1087 static struct arp_table *arp_lookup(u32 paddr, unsigned short flags, struct device * dev)
1088 {
1089 struct arp_table *entry;
1090
1091 if (!(flags & ATF_PUBL))
1092 {
1093 for (entry = arp_tables[HASH(paddr)];
1094 entry != NULL; entry = entry->next)
1095 if (entry->ip == paddr && (!dev || entry->dev == dev))
1096 break;
1097 return entry;
1098 }
1099
1100 if (!(flags & ATF_NETMASK))
1101 {
1102 for (entry = arp_proxy_list;
1103 entry != NULL; entry = entry->next)
1104 if (entry->ip == paddr && (!dev || entry->dev == dev))
1105 break;
1106 return entry;
1107 }
1108
1109 for (entry=arp_proxy_list; entry != NULL; entry = entry->next)
1110 if (!((entry->ip^paddr)&entry->mask) &&
1111 (!dev || entry->dev == dev))
1112 break;
1113 return entry;
1114 }
1115
1116
1117
1118
1119
1120 int arp_query(unsigned char *haddr, u32 paddr, struct device * dev)
1121 {
1122 struct arp_table *entry;
1123
1124 arp_fast_lock();
1125
1126 entry = arp_lookup(paddr, 0, dev);
1127
1128 if (entry != NULL)
1129 {
1130 entry->last_used = jiffies;
1131 if (entry->flags & ATF_COM)
1132 {
1133 memcpy(haddr, entry->ha, dev->addr_len);
1134 arp_unlock();
1135 return 1;
1136 }
1137 }
1138 arp_unlock();
1139 return 0;
1140 }
1141
1142
1143 static int arp_set_predefined(int addr_hint, unsigned char * haddr, __u32 paddr, struct device * dev)
1144 {
1145 switch (addr_hint)
1146 {
1147 case IS_MYADDR:
1148 printk("ARP: arp called for own IP address\n");
1149 memcpy(haddr, dev->dev_addr, dev->addr_len);
1150 return 1;
1151 #ifdef CONFIG_IP_MULTICAST
1152 case IS_MULTICAST:
1153 if(dev->type==ARPHRD_ETHER || dev->type==ARPHRD_IEEE802)
1154 {
1155 u32 taddr;
1156 haddr[0]=0x01;
1157 haddr[1]=0x00;
1158 haddr[2]=0x5e;
1159 taddr=ntohl(paddr);
1160 haddr[5]=taddr&0xff;
1161 taddr=taddr>>8;
1162 haddr[4]=taddr&0xff;
1163 taddr=taddr>>8;
1164 haddr[3]=taddr&0x7f;
1165 return 1;
1166 }
1167
1168
1169
1170 #endif
1171
1172 case IS_BROADCAST:
1173 memcpy(haddr, dev->broadcast, dev->addr_len);
1174 return 1;
1175 }
1176 return 0;
1177 }
1178
1179
1180
1181
1182
1183 int arp_find(unsigned char *haddr, u32 paddr, struct device *dev,
1184 u32 saddr, struct sk_buff *skb)
1185 {
1186 struct arp_table *entry;
1187 unsigned long hash;
1188
1189 if (arp_set_predefined(ip_chk_addr(paddr), haddr, paddr, dev))
1190 {
1191 if (skb)
1192 skb->arp = 1;
1193 return 0;
1194 }
1195
1196 hash = HASH(paddr);
1197 arp_fast_lock();
1198
1199
1200
1201
1202 entry = arp_lookup(paddr, 0, dev);
1203
1204 if (entry != NULL)
1205 {
1206 if (!(entry->flags & ATF_COM))
1207 {
1208
1209
1210
1211
1212
1213 if (skb != NULL)
1214 {
1215 if (entry->last_updated)
1216 {
1217 skb_queue_tail(&entry->skb, skb);
1218 skb_device_unlock(skb);
1219 }
1220
1221
1222
1223
1224 else
1225 {
1226 #if 0
1227
1228
1229
1230
1231 if (skb->sk)
1232 {
1233 skb->sk->err = EHOSTDOWN;
1234 skb->sk->error_report(skb->sk);
1235 }
1236 #else
1237 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0, dev);
1238 #endif
1239 dev_kfree_skb(skb, FREE_WRITE);
1240 }
1241 }
1242 arp_unlock();
1243 return 1;
1244 }
1245
1246
1247
1248
1249
1250 entry->last_used = jiffies;
1251 memcpy(haddr, entry->ha, dev->addr_len);
1252 if (skb)
1253 skb->arp = 1;
1254 arp_unlock();
1255 return 0;
1256 }
1257
1258
1259
1260
1261
1262 entry = (struct arp_table *) kmalloc(sizeof(struct arp_table),
1263 GFP_ATOMIC);
1264 if (entry != NULL)
1265 {
1266 entry->last_updated = entry->last_used = jiffies;
1267 entry->flags = 0;
1268 entry->ip = paddr;
1269 entry->mask = DEF_ARP_NETMASK;
1270 memset(entry->ha, 0, dev->addr_len);
1271 entry->dev = dev;
1272 entry->hh = NULL;
1273 init_timer(&entry->timer);
1274 entry->timer.function = arp_expire_request;
1275 entry->timer.data = (unsigned long)entry;
1276 entry->timer.expires = jiffies + ARP_RES_TIME;
1277 skb_queue_head_init(&entry->skb);
1278 if (skb != NULL)
1279 {
1280 skb_queue_tail(&entry->skb, skb);
1281 skb_device_unlock(skb);
1282 }
1283 if (arp_lock == 1)
1284 {
1285 entry->next = arp_tables[hash];
1286 arp_tables[hash] = entry;
1287 add_timer(&entry->timer);
1288 entry->retries = ARP_MAX_TRIES;
1289 }
1290 else
1291 {
1292 #if RT_CACHE_DEBUG >= 1
1293 printk("arp_find: %08x backlogged\n", entry->ip);
1294 #endif
1295 arp_enqueue(&arp_backlog, entry);
1296 arp_bh_mask |= ARP_BH_BACKLOG;
1297 }
1298 }
1299 else if (skb != NULL)
1300 dev_kfree_skb(skb, FREE_WRITE);
1301 arp_unlock();
1302
1303
1304
1305
1306
1307 arp_send(ARPOP_REQUEST, ETH_P_ARP, paddr, dev, saddr, NULL,
1308 dev->dev_addr, NULL);
1309
1310 return 1;
1311 }
1312
1313
1314
1315
1316
1317
1318 #define HBUFFERLEN 30
1319
1320 int arp_get_info(char *buffer, char **start, off_t offset, int length, int dummy)
1321 {
1322 int len=0;
1323 off_t pos=0;
1324 int size;
1325 struct arp_table *entry;
1326 char hbuffer[HBUFFERLEN];
1327 int i,j,k;
1328 const char hexbuf[] = "0123456789ABCDEF";
1329
1330 size = sprintf(buffer,"IP address HW type Flags HW address Mask Device\n");
1331
1332 pos+=size;
1333 len+=size;
1334
1335 arp_fast_lock();
1336
1337 for(i=0; i<FULL_ARP_TABLE_SIZE; i++)
1338 {
1339 for(entry=arp_tables[i]; entry!=NULL; entry=entry->next)
1340 {
1341
1342
1343
1344 #ifdef CONFIG_AX25
1345 #ifdef CONFIG_NETROM
1346 if (entry->dev->type == ARPHRD_AX25 || entry->dev->type == ARPHRD_NETROM)
1347 strcpy(hbuffer,ax2asc((ax25_address *)entry->ha));
1348 else {
1349 #else
1350 if(entry->dev->type==ARPHRD_AX25)
1351 strcpy(hbuffer,ax2asc((ax25_address *)entry->ha));
1352 else {
1353 #endif
1354 #endif
1355
1356 for(k=0,j=0;k<HBUFFERLEN-3 && j<entry->dev->addr_len;j++)
1357 {
1358 hbuffer[k++]=hexbuf[ (entry->ha[j]>>4)&15 ];
1359 hbuffer[k++]=hexbuf[ entry->ha[j]&15 ];
1360 hbuffer[k++]=':';
1361 }
1362 hbuffer[--k]=0;
1363
1364 #ifdef CONFIG_AX25
1365 }
1366 #endif
1367 size = sprintf(buffer+len,
1368 "%-17s0x%-10x0x%-10x%s",
1369 in_ntoa(entry->ip),
1370 (unsigned int)entry->dev->type,
1371 entry->flags,
1372 hbuffer);
1373 #if RT_CACHE_DEBUG < 2
1374 size += sprintf(buffer+len+size,
1375 " %-17s %s\n",
1376 entry->mask==DEF_ARP_NETMASK ?
1377 "*" : in_ntoa(entry->mask), entry->dev->name);
1378 #else
1379 size += sprintf(buffer+len+size,
1380 " %-17s %s\t%ld\t%1d\n",
1381 entry->mask==DEF_ARP_NETMASK ?
1382 "*" : in_ntoa(entry->mask), entry->dev->name,
1383 entry->hh ? entry->hh->hh_refcnt : -1,
1384 entry->hh ? entry->hh->hh_uptodate : 0);
1385 #endif
1386
1387 len += size;
1388 pos += size;
1389
1390 if (pos <= offset)
1391 len=0;
1392 if (pos >= offset+length)
1393 goto done;
1394 }
1395 }
1396 done:
1397 arp_unlock();
1398
1399 *start = buffer+len-(pos-offset);
1400 len = pos-offset;
1401 if (len>length)
1402 len = length;
1403 return len;
1404 }
1405
1406
1407
1408 int arp_bind_cache(struct hh_cache ** hhp, struct device *dev, unsigned short htype, u32 paddr)
1409 {
1410 struct arp_table *entry;
1411 struct hh_cache *hh = *hhp;
1412 int addr_hint;
1413 unsigned long flags;
1414
1415 if (hh)
1416 return 1;
1417
1418 if ((addr_hint = ip_chk_addr(paddr)) != 0)
1419 {
1420 unsigned char haddr[MAX_ADDR_LEN];
1421 if (hh)
1422 return 1;
1423 hh = kmalloc(sizeof(struct hh_cache), GFP_ATOMIC);
1424 if (!hh)
1425 return 1;
1426 arp_set_predefined(addr_hint, haddr, paddr, dev);
1427 hh->hh_uptodate = 0;
1428 hh->hh_refcnt = 1;
1429 hh->hh_arp = NULL;
1430 hh->hh_next = NULL;
1431 hh->hh_type = htype;
1432 *hhp = hh;
1433 dev->header_cache_update(hh, dev, haddr);
1434 return 0;
1435 }
1436
1437 save_flags(flags);
1438
1439 arp_fast_lock();
1440
1441 entry = arp_lookup(paddr, 0, dev);
1442
1443 if (entry)
1444 {
1445 cli();
1446 for (hh = entry->hh; hh; hh=hh->hh_next)
1447 if (hh->hh_type == htype)
1448 break;
1449 if (hh)
1450 {
1451 hh->hh_refcnt++;
1452 *hhp = hh;
1453 restore_flags(flags);
1454 arp_unlock();
1455 return 1;
1456 }
1457 restore_flags(flags);
1458 }
1459
1460 hh = kmalloc(sizeof(struct hh_cache), GFP_ATOMIC);
1461 if (!hh)
1462 {
1463 arp_unlock();
1464 return 1;
1465 }
1466
1467 hh->hh_uptodate = 0;
1468 hh->hh_refcnt = 1;
1469 hh->hh_arp = NULL;
1470 hh->hh_next = NULL;
1471 hh->hh_type = htype;
1472
1473 if (entry)
1474 {
1475 dev->header_cache_update(hh, dev, entry->ha);
1476 *hhp = hh;
1477 cli();
1478 hh->hh_arp = (void*)entry;
1479 entry->hh = hh;
1480 hh->hh_refcnt++;
1481 restore_flags(flags);
1482 entry->last_used = jiffies;
1483 arp_unlock();
1484 return 0;
1485 }
1486
1487
1488
1489
1490
1491
1492 entry = (struct arp_table *) kmalloc(sizeof(struct arp_table),
1493 GFP_ATOMIC);
1494 if (entry == NULL)
1495 {
1496 kfree_s(hh, sizeof(struct hh_cache));
1497 arp_unlock();
1498 return 1;
1499 }
1500
1501 entry->last_updated = entry->last_used = jiffies;
1502 entry->flags = 0;
1503 entry->ip = paddr;
1504 entry->mask = DEF_ARP_NETMASK;
1505 memset(entry->ha, 0, dev->addr_len);
1506 entry->dev = dev;
1507 entry->hh = hh;
1508 ATOMIC_INCR(&hh->hh_refcnt);
1509 init_timer(&entry->timer);
1510 entry->timer.function = arp_expire_request;
1511 entry->timer.data = (unsigned long)entry;
1512 entry->timer.expires = jiffies + ARP_RES_TIME;
1513 skb_queue_head_init(&entry->skb);
1514
1515 if (arp_lock == 1)
1516 {
1517 unsigned long hash = HASH(paddr);
1518 cli();
1519 entry->next = arp_tables[hash];
1520 arp_tables[hash] = entry;
1521 hh->hh_arp = (void*)entry;
1522 entry->retries = ARP_MAX_TRIES;
1523 restore_flags(flags);
1524
1525 add_timer(&entry->timer);
1526 arp_send(ARPOP_REQUEST, ETH_P_ARP, paddr, dev, dev->pa_addr, NULL, dev->dev_addr, NULL);
1527 }
1528 else
1529 {
1530 #if RT_CACHE_DEBUG >= 1
1531 printk("arp_cache_bind: %08x backlogged\n", entry->ip);
1532 #endif
1533 arp_enqueue(&arp_backlog, entry);
1534 arp_bh_mask |= ARP_BH_BACKLOG;
1535 }
1536 *hhp = hh;
1537 arp_unlock();
1538 return 0;
1539 }
1540
1541 static void arp_run_bh()
1542 {
1543 unsigned long flags;
1544 struct arp_table *entry, *entry1;
1545 struct hh_cache *hh;
1546 __u32 sip;
1547
1548 save_flags(flags);
1549 cli();
1550 if (!arp_lock)
1551 {
1552 arp_fast_lock();
1553
1554 while ((entry = arp_dequeue(&arp_backlog)) != NULL)
1555 {
1556 unsigned long hash;
1557 sti();
1558 sip = entry->ip;
1559 hash = HASH(sip);
1560
1561
1562
1563
1564
1565 for (entry1=arp_tables[hash]; entry1; entry1=entry1->next)
1566 if (entry1->ip==sip && entry1->dev == entry->dev)
1567 break;
1568
1569 if (!entry1)
1570 {
1571 struct device * dev = entry->dev;
1572 cli();
1573 entry->next = arp_tables[hash];
1574 arp_tables[hash] = entry;
1575 for (hh=entry->hh; hh; hh=hh->hh_next)
1576 hh->hh_arp = (void*)entry;
1577 sti();
1578 del_timer(&entry->timer);
1579 entry->timer.expires = jiffies + ARP_RES_TIME;
1580 add_timer(&entry->timer);
1581 entry->retries = ARP_MAX_TRIES;
1582 arp_send(ARPOP_REQUEST, ETH_P_ARP, entry->ip, dev, dev->pa_addr, NULL, dev->dev_addr, NULL);
1583 #if RT_CACHE_DEBUG >= 1
1584 printk("arp_run_bh: %08x reinstalled\n", sip);
1585 #endif
1586 }
1587 else
1588 {
1589 struct sk_buff * skb;
1590 struct hh_cache * next;
1591
1592
1593
1594
1595 cli();
1596 for (hh=entry->hh; hh; hh=next)
1597 {
1598 next = hh->hh_next;
1599 hh->hh_next = entry1->hh;
1600 entry1->hh = hh;
1601 hh->hh_arp = (void*)entry1;
1602 }
1603 entry->hh = NULL;
1604
1605
1606
1607
1608 while ((skb = skb_dequeue(&entry->skb)) != NULL)
1609 {
1610 skb_device_lock(skb);
1611 sti();
1612 skb_queue_tail(&entry1->skb, skb);
1613 skb_device_unlock(skb);
1614 cli();
1615 }
1616 sti();
1617
1618 #if RT_CACHE_DEBUG >= 1
1619 printk("arp_run_bh: entry %08x was born dead\n", entry->ip);
1620 #endif
1621 arp_free_entry(entry);
1622
1623 if (entry1->flags & ATF_COM)
1624 {
1625 arp_update_hhs(entry1);
1626 arp_send_q(entry1);
1627 }
1628 }
1629 cli();
1630 }
1631 arp_bh_mask &= ~ARP_BH_BACKLOG;
1632 arp_unlock();
1633 }
1634 restore_flags(flags);
1635 }
1636
1637
1638
1639
1640
1641 static inline int empty(unsigned char * addr, int len)
1642 {
1643 while (len > 0) {
1644 if (*addr)
1645 return 0;
1646 len--;
1647 addr++;
1648 }
1649 return 1;
1650 }
1651
1652
1653
1654
1655
1656 static int arp_req_set(struct arpreq *r, struct device * dev)
1657 {
1658 struct arp_table *entry;
1659 struct sockaddr_in *si;
1660 struct rtable *rt;
1661 struct device *dev1;
1662 unsigned char *ha;
1663 u32 ip;
1664
1665
1666
1667
1668
1669 si = (struct sockaddr_in *) &r->arp_pa;
1670 ip = si->sin_addr.s_addr;
1671
1672
1673
1674
1675
1676 if (ip_chk_addr(ip) == IS_MYADDR)
1677 dev1 = dev_get("lo");
1678 else {
1679 rt = ip_rt_route(ip, 0);
1680 if (!rt)
1681 return -ENETUNREACH;
1682 dev1 = rt->rt_dev;
1683 ip_rt_put(rt);
1684 }
1685
1686
1687 if (!dev) {
1688 if (dev1->flags&(IFF_LOOPBACK|IFF_NOARP))
1689 return -ENODEV;
1690 dev = dev1;
1691 }
1692
1693
1694 if (r->arp_ha.sa_family != dev->type)
1695 return -EINVAL;
1696
1697 if (((r->arp_flags & ATF_PUBL) && dev == dev1) ||
1698 (!(r->arp_flags & ATF_PUBL) && dev != dev1))
1699 return -EINVAL;
1700
1701 #if RT_CACHE_DEBUG >= 1
1702 if (arp_lock)
1703 printk("arp_req_set: bug\n");
1704 #endif
1705 arp_fast_lock();
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715 entry = arp_lookup(ip, r->arp_flags & ~ATF_NETMASK, dev);
1716
1717 if (entry)
1718 {
1719 arp_destroy(entry);
1720 entry = NULL;
1721 }
1722
1723
1724
1725
1726
1727 if (entry == NULL)
1728 {
1729 entry = (struct arp_table *) kmalloc(sizeof(struct arp_table),
1730 GFP_ATOMIC);
1731 if (entry == NULL)
1732 {
1733 arp_unlock();
1734 return -ENOMEM;
1735 }
1736 entry->ip = ip;
1737 entry->hh = NULL;
1738 init_timer(&entry->timer);
1739 entry->timer.function = arp_expire_request;
1740 entry->timer.data = (unsigned long)entry;
1741
1742 if (r->arp_flags & ATF_PUBL)
1743 {
1744 cli();
1745 entry->next = arp_proxy_list;
1746 arp_proxy_list = entry;
1747 sti();
1748 }
1749 else
1750 {
1751 unsigned long hash = HASH(ip);
1752 cli();
1753 entry->next = arp_tables[hash];
1754 arp_tables[hash] = entry;
1755 sti();
1756 }
1757 skb_queue_head_init(&entry->skb);
1758 }
1759
1760
1761
1762 ha = r->arp_ha.sa_data;
1763 if ((r->arp_flags & ATF_COM) && empty(ha, dev->addr_len))
1764 ha = dev->dev_addr;
1765 memcpy(entry->ha, ha, dev->addr_len);
1766 entry->last_updated = entry->last_used = jiffies;
1767 entry->flags = r->arp_flags | ATF_COM;
1768 if ((entry->flags & ATF_PUBL) && (entry->flags & ATF_NETMASK))
1769 {
1770 si = (struct sockaddr_in *) &r->arp_netmask;
1771 entry->mask = si->sin_addr.s_addr;
1772 }
1773 else
1774 entry->mask = DEF_ARP_NETMASK;
1775 entry->dev = dev;
1776 arp_update_hhs(entry);
1777 arp_unlock();
1778 return 0;
1779 }
1780
1781
1782
1783
1784
1785
1786
1787 static int arp_req_get(struct arpreq *r, struct device *dev)
1788 {
1789 struct arp_table *entry;
1790 struct sockaddr_in *si;
1791
1792 si = (struct sockaddr_in *) &r->arp_pa;
1793
1794 #if RT_CACHE_DEBUG >= 1
1795 if (arp_lock)
1796 printk("arp_req_set: bug\n");
1797 #endif
1798 arp_fast_lock();
1799
1800 entry = arp_lookup(si->sin_addr.s_addr, r->arp_flags|ATF_NETMASK, dev);
1801
1802 if (entry == NULL)
1803 {
1804 arp_unlock();
1805 return -ENXIO;
1806 }
1807
1808
1809
1810
1811
1812 memcpy(r->arp_ha.sa_data, &entry->ha, entry->dev->addr_len);
1813 r->arp_ha.sa_family = entry->dev->type;
1814 r->arp_flags = entry->flags;
1815 strncpy(r->arp_dev, entry->dev->name, 16);
1816 arp_unlock();
1817 return 0;
1818 }
1819
1820 static int arp_req_delete(struct arpreq *r, struct device * dev)
1821 {
1822 struct arp_table *entry;
1823 struct sockaddr_in *si;
1824
1825 si = (struct sockaddr_in *) &r->arp_pa;
1826 #if RT_CACHE_DEBUG >= 1
1827 if (arp_lock)
1828 printk("arp_req_delete: bug\n");
1829 #endif
1830 arp_fast_lock();
1831
1832 if (!(r->arp_flags & ATF_PUBL))
1833 {
1834 for (entry = arp_tables[HASH(si->sin_addr.s_addr)];
1835 entry != NULL; entry = entry->next)
1836 if (entry->ip == si->sin_addr.s_addr
1837 && (!dev || entry->dev == dev))
1838 {
1839 arp_destroy(entry);
1840 arp_unlock();
1841 return 0;
1842 }
1843 }
1844 else
1845 {
1846 for (entry = arp_proxy_list;
1847 entry != NULL; entry = entry->next)
1848 if (entry->ip == si->sin_addr.s_addr
1849 && (!dev || entry->dev == dev))
1850 {
1851 arp_destroy(entry);
1852 arp_unlock();
1853 return 0;
1854 }
1855 }
1856
1857 arp_unlock();
1858 return -ENXIO;
1859 }
1860
1861
1862
1863
1864
1865 int arp_ioctl(unsigned int cmd, void *arg)
1866 {
1867 int err;
1868 struct arpreq r;
1869
1870 struct device * dev = NULL;
1871
1872 switch(cmd)
1873 {
1874 case SIOCDARP:
1875 case SIOCSARP:
1876 if (!suser())
1877 return -EPERM;
1878 case SIOCGARP:
1879 err = verify_area(VERIFY_READ, arg, sizeof(struct arpreq));
1880 if (err)
1881 return err;
1882 memcpy_fromfs(&r, arg, sizeof(struct arpreq));
1883 break;
1884 case OLD_SIOCDARP:
1885 case OLD_SIOCSARP:
1886 if (!suser())
1887 return -EPERM;
1888 case OLD_SIOCGARP:
1889 err = verify_area(VERIFY_READ, arg, sizeof(struct arpreq_old));
1890 if (err)
1891 return err;
1892 memcpy_fromfs(&r, arg, sizeof(struct arpreq_old));
1893 memset(&r.arp_dev, 0, sizeof(r.arp_dev));
1894 break;
1895 default:
1896 return -EINVAL;
1897 }
1898
1899 if (r.arp_pa.sa_family != AF_INET)
1900 return -EPFNOSUPPORT;
1901 if (((struct sockaddr_in *)&r.arp_pa)->sin_addr.s_addr == 0)
1902 return -EINVAL;
1903
1904 if (r.arp_dev[0])
1905 {
1906 if ((dev = dev_get(r.arp_dev)) == NULL)
1907 return -ENODEV;
1908
1909 if (!r.arp_ha.sa_family)
1910 r.arp_ha.sa_family = dev->type;
1911 else if (r.arp_ha.sa_family != dev->type)
1912 return -EINVAL;
1913 }
1914 else
1915 {
1916 if ((r.arp_flags & ATF_PUBL) &&
1917 ((cmd == SIOCSARP) || (cmd == OLD_SIOCSARP))) {
1918 if ((dev = dev_getbytype(r.arp_ha.sa_family)) == NULL)
1919 return -ENODEV;
1920 }
1921 }
1922
1923 switch(cmd)
1924 {
1925 case SIOCDARP:
1926 return arp_req_delete(&r, dev);
1927 case SIOCSARP:
1928 return arp_req_set(&r, dev);
1929 case OLD_SIOCDARP:
1930
1931
1932
1933 r.arp_flags &= ~ATF_PUBL;
1934 err = arp_req_delete(&r, dev);
1935 r.arp_flags |= ATF_PUBL;
1936 if (!err)
1937 arp_req_delete(&r, dev);
1938 else
1939 err = arp_req_delete(&r, dev);
1940 return err;
1941 case OLD_SIOCSARP:
1942 err = arp_req_set(&r, dev);
1943
1944
1945
1946
1947
1948 if (r.arp_flags & ATF_PUBL)
1949 {
1950 r.arp_flags &= ~ATF_PUBL;
1951 arp_req_delete(&r, dev);
1952 }
1953 return err;
1954 case SIOCGARP:
1955 err = verify_area(VERIFY_WRITE, arg, sizeof(struct arpreq));
1956 if (err)
1957 return err;
1958 err = arp_req_get(&r, dev);
1959 if (!err)
1960 memcpy_tofs(arg, &r, sizeof(r));
1961 return err;
1962 case OLD_SIOCGARP:
1963 err = verify_area(VERIFY_WRITE, arg, sizeof(struct arpreq_old));
1964 if (err)
1965 return err;
1966 r.arp_flags &= ~ATF_PUBL;
1967 err = arp_req_get(&r, dev);
1968 if (err < 0)
1969 {
1970 r.arp_flags |= ATF_PUBL;
1971 err = arp_req_get(&r, dev);
1972 }
1973 if (!err)
1974 memcpy_tofs(arg, &r, sizeof(struct arpreq_old));
1975 return err;
1976 }
1977
1978 return 0;
1979 }
1980
1981
1982
1983
1984
1985
1986 static struct packet_type arp_packet_type =
1987 {
1988 0,
1989 NULL,
1990 arp_rcv,
1991 NULL,
1992 NULL
1993 };
1994
1995 static struct notifier_block arp_dev_notifier={
1996 arp_device_event,
1997 NULL,
1998 0
1999 };
2000
2001 void arp_init (void)
2002 {
2003
2004 arp_packet_type.type=htons(ETH_P_ARP);
2005 dev_add_pack(&arp_packet_type);
2006
2007 add_timer(&arp_timer);
2008
2009 register_netdevice_notifier(&arp_dev_notifier);
2010
2011 #ifdef CONFIG_PROC_FS
2012 proc_net_register(&(struct proc_dir_entry) {
2013 PROC_NET_ARP, 3, "arp",
2014 S_IFREG | S_IRUGO, 1, 0, 0,
2015 0, &proc_net_inode_operations,
2016 arp_get_info
2017 });
2018 #endif
2019 }
2020