This source file includes following definitions.
- arp_fast_lock
- arp_fast_unlock
- arp_unlock
- arp_enqueue
- arp_dequeue
- arp_release_entry
- arp_free_entry
- arp_count_hhs
- arp_invalidate_hhs
- arp_update_hhs
- arp_check_expire
- arp_expire_request
- arp_device_event
- arp_send
- arp_send_q
- arp_destroy
- arp_rcv
- arp_lookup
- arp_query
- arp_set_predefined
- arp_find
- arp_get_info
- arp_bind_cache
- arp_run_bh
- empty
- arp_req_set
- arp_req_get
- arp_req_delete
- arp_ioctl
- arp_init
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69 #include <linux/types.h>
70 #include <linux/string.h>
71 #include <linux/kernel.h>
72 #include <linux/sched.h>
73 #include <linux/config.h>
74 #include <linux/socket.h>
75 #include <linux/sockios.h>
76 #include <linux/errno.h>
77 #include <linux/if_arp.h>
78 #include <linux/in.h>
79 #include <linux/mm.h>
80 #include <linux/inet.h>
81 #include <linux/netdevice.h>
82 #include <linux/etherdevice.h>
83 #include <linux/trdevice.h>
84 #include <linux/skbuff.h>
85 #include <linux/proc_fs.h>
86 #include <linux/stat.h>
87
88 #include <net/ip.h>
89 #include <net/icmp.h>
90 #include <net/route.h>
91 #include <net/protocol.h>
92 #include <net/tcp.h>
93 #include <net/sock.h>
94 #include <net/arp.h>
95 #ifdef CONFIG_AX25
96 #include <net/ax25.h>
97 #ifdef CONFIG_NETROM
98 #include <net/netrom.h>
99 #endif
100 #endif
101 #ifdef CONFIG_NET_ALIAS
102 #include <linux/net_alias.h>
103 #endif
104
105 #include <asm/system.h>
106 #include <asm/segment.h>
107
108 #include <stdarg.h>
109
110
111
112
113
114
115
116
117 struct arp_table
118 {
119 struct arp_table *next;
120 unsigned long last_used;
121 unsigned long last_updated;
122 unsigned int flags;
123 u32 ip;
124 u32 mask;
125 unsigned char ha[MAX_ADDR_LEN];
126 struct device *dev;
127
128
129
130
131
132 struct timer_list timer;
133 int retries;
134 struct sk_buff_head skb;
135 struct hh_cache *hh;
136 };
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152 #define ARP_RES_TIME (5*HZ)
153 #define ARP_DEAD_RES_TIME (60*HZ)
154
155
156
157
158
159
160 #define ARP_MAX_TRIES 3
161
162
163
164
165
166 #define ARP_TIMEOUT (600*HZ)
167
168
169
170
171
172
173
174 #define ARP_CHECK_INTERVAL (60*HZ)
175
176
177
178
179
180
181
182 #define ARP_CONFIRM_INTERVAL (300*HZ)
183 #define ARP_CONFIRM_TIMEOUT ARP_RES_TIME
184
185 static unsigned int arp_lock;
186 static unsigned int arp_bh_mask;
187
188 #define ARP_BH_BACKLOG 1
189
190 static struct arp_table *arp_backlog;
191
192 static void arp_run_bh(void);
193 static void arp_check_expire (unsigned long);
194
195 static struct timer_list arp_timer =
196 { NULL, NULL, ARP_CHECK_INTERVAL, 0L, &arp_check_expire };
197
198
199
200
201
202
203 #define DEF_ARP_NETMASK (~0)
204
205
206
207
208
209
210
211 #define ARP_TABLE_SIZE 16
212 #define FULL_ARP_TABLE_SIZE (ARP_TABLE_SIZE+1)
213
214 struct arp_table *arp_tables[FULL_ARP_TABLE_SIZE] =
215 {
216 NULL,
217 };
218
219 #define arp_proxy_list arp_tables[ARP_TABLE_SIZE]
220
221
222
223
224
225
226 #define HASH(paddr) (htonl(paddr) & (ARP_TABLE_SIZE - 1))
227
228
229
230
231
232 static __inline__ void arp_fast_lock(void)
233 {
234 ATOMIC_INCR(&arp_lock);
235 }
236
237 static __inline__ void arp_fast_unlock(void)
238 {
239 ATOMIC_DECR(&arp_lock);
240 }
241
242 static __inline__ void arp_unlock(void)
243 {
244 if (!ATOMIC_DECR_AND_CHECK(&arp_lock) && arp_bh_mask)
245 arp_run_bh();
246 }
247
248
249
250
251
252 static void arp_enqueue(struct arp_table **q, struct arp_table *entry)
253 {
254 unsigned long flags;
255 struct arp_table * tail;
256
257 save_flags(flags);
258 cli();
259 tail = *q;
260 if (!tail)
261 entry->next = entry;
262 else
263 {
264 entry->next = tail->next;
265 tail->next = entry;
266 }
267 *q = entry;
268 restore_flags(flags);
269 return;
270 }
271
272
273
274
275
276
277 static struct arp_table * arp_dequeue(struct arp_table **q)
278 {
279 struct arp_table * entry;
280
281 if (*q)
282 {
283 entry = (*q)->next;
284 (*q)->next = entry->next;
285 if (entry->next == entry)
286 *q = NULL;
287 entry->next = NULL;
288 return entry;
289 }
290 return NULL;
291 }
292
293
294
295
296
297 static void arp_release_entry(struct arp_table *entry)
298 {
299 struct sk_buff *skb;
300 unsigned long flags;
301
302 save_flags(flags);
303 cli();
304
305 while ((skb = skb_dequeue(&entry->skb)) != NULL)
306 {
307 skb_device_lock(skb);
308 restore_flags(flags);
309 dev_kfree_skb(skb, FREE_WRITE);
310 cli();
311 }
312 restore_flags(flags);
313 return;
314 }
315
316
317
318
319
320
321 static void arp_free_entry(struct arp_table *entry)
322 {
323 unsigned long flags;
324 struct hh_cache *hh, *next;
325
326 del_timer(&entry->timer);
327
328 save_flags(flags);
329 cli();
330 arp_release_entry(entry);
331
332 for (hh = entry->hh; hh; hh = next)
333 {
334 next = hh->hh_next;
335 hh->hh_arp = NULL;
336 hh->hh_uptodate = 0;
337 if (!--hh->hh_refcnt)
338 kfree_s(hh, sizeof(struct(struct hh_cache)));
339 }
340 restore_flags(flags);
341
342 kfree_s(entry, sizeof(struct arp_table));
343 return;
344 }
345
346
347
348
349
350 static __inline__ int arp_count_hhs(struct arp_table * entry)
351 {
352 struct hh_cache *hh, **hhp;
353 int count = 0;
354
355 hhp = &entry->hh;
356 while ((hh=*hhp) != NULL)
357 {
358 if (hh->hh_refcnt == 1)
359 {
360 *hhp = hh->hh_next;
361 kfree_s(hh, sizeof(struct hh_cache));
362 continue;
363 }
364 count += hh->hh_refcnt-1;
365 hhp = &hh->hh_next;
366 }
367
368 return count;
369 }
370
371
372
373
374
375 static __inline__ void arp_invalidate_hhs(struct arp_table * entry)
376 {
377 struct hh_cache *hh;
378
379 for (hh=entry->hh; hh; hh=hh->hh_next)
380 hh->hh_uptodate = 0;
381 }
382
383
384
385
386
387 static __inline__ void arp_update_hhs(struct arp_table * entry)
388 {
389 struct hh_cache *hh;
390
391 for (hh=entry->hh; hh; hh=hh->hh_next)
392 entry->dev->header_cache_update(hh, entry->dev, entry->ha);
393 }
394
395
396
397
398
399
400
401
402
403
404 static void arp_check_expire(unsigned long dummy)
405 {
406 int i;
407 unsigned long now = jiffies;
408
409 del_timer(&arp_timer);
410
411 if (!arp_lock)
412 {
413 arp_fast_lock();
414
415 for (i = 0; i < ARP_TABLE_SIZE; i++)
416 {
417 struct arp_table *entry;
418 struct arp_table **pentry;
419
420 pentry = &arp_tables[i];
421
422 while ((entry = *pentry) != NULL)
423 {
424 cli();
425 if (now - entry->last_used > ARP_TIMEOUT
426 && !(entry->flags & ATF_PERM)
427 && !arp_count_hhs(entry))
428 {
429 *pentry = entry->next;
430 sti();
431 #if RT_CACHE_DEBUG >= 2
432 printk("arp_expire: %08x expired\n", entry->ip);
433 #endif
434 arp_free_entry(entry);
435 }
436 else if (entry->last_updated
437 && now - entry->last_updated > ARP_CONFIRM_INTERVAL
438 && !(entry->flags & ATF_PERM))
439 {
440 struct device * dev = entry->dev;
441 pentry = &entry->next;
442 entry->flags &= ~ATF_COM;
443 arp_invalidate_hhs(entry);
444 sti();
445 entry->retries = ARP_MAX_TRIES+1;
446 del_timer(&entry->timer);
447 entry->timer.expires = jiffies + ARP_CONFIRM_TIMEOUT;
448 add_timer(&entry->timer);
449 arp_send(ARPOP_REQUEST, ETH_P_ARP, entry->ip,
450 dev, dev->pa_addr, entry->ha,
451 dev->dev_addr, NULL);
452 #if RT_CACHE_DEBUG >= 2
453 printk("arp_expire: %08x requires confirmation\n", entry->ip);
454 #endif
455 }
456 else
457 pentry = &entry->next;
458 }
459 }
460 arp_unlock();
461 }
462
463 ip_rt_check_expire();
464
465
466
467
468
469 arp_timer.expires = jiffies + ARP_CHECK_INTERVAL;
470 add_timer(&arp_timer);
471 }
472
473
474
475
476
477
478
479 static void arp_expire_request (unsigned long arg)
480 {
481 struct arp_table *entry = (struct arp_table *) arg;
482 struct arp_table **pentry;
483 unsigned long hash;
484 unsigned long flags;
485
486 save_flags(flags);
487 cli();
488
489
490
491
492
493
494
495 if (entry->flags & ATF_COM)
496 {
497 restore_flags(flags);
498 return;
499 }
500
501 if (arp_lock)
502 {
503 #if RT_CACHE_DEBUG >= 1
504 printk("arp_expire_request: %08x postponed\n", entry->ip);
505 #endif
506 del_timer(&entry->timer);
507 entry->timer.expires = jiffies + HZ/10;
508 add_timer(&entry->timer);
509 restore_flags(flags);
510 return;
511 }
512
513 arp_fast_lock();
514 restore_flags(flags);
515
516 if (entry->last_updated && --entry->retries > 0)
517 {
518 struct device *dev = entry->dev;
519
520 #if RT_CACHE_DEBUG >= 2
521 printk("arp_expire_request: %08x timed out\n", entry->ip);
522 #endif
523
524 del_timer(&entry->timer);
525 entry->timer.expires = jiffies + ARP_RES_TIME;
526 add_timer(&entry->timer);
527 arp_send(ARPOP_REQUEST, ETH_P_ARP, entry->ip, dev, dev->pa_addr,
528 NULL, dev->dev_addr, NULL);
529 arp_unlock();
530 return;
531 }
532
533 arp_release_entry(entry);
534
535 cli();
536 if (arp_count_hhs(entry))
537 {
538 struct device *dev = entry->dev;
539 #if RT_CACHE_DEBUG >= 2
540 printk("arp_expire_request: %08x is dead\n", entry->ip);
541 #endif
542 arp_release_entry(entry);
543 entry->retries = ARP_MAX_TRIES;
544 restore_flags(flags);
545 entry->last_updated = 0;
546 del_timer(&entry->timer);
547 entry->timer.expires = jiffies + ARP_DEAD_RES_TIME;
548 add_timer(&entry->timer);
549 arp_send(ARPOP_REQUEST, ETH_P_ARP, entry->ip, dev, dev->pa_addr,
550 NULL, dev->dev_addr, NULL);
551 arp_unlock();
552 return;
553 }
554 restore_flags(flags);
555
556 hash = HASH(entry->ip);
557
558 pentry = &arp_tables[hash];
559
560 while (*pentry != NULL)
561 {
562 if (*pentry == entry)
563 {
564 cli();
565 *pentry = entry->next;
566 restore_flags(flags);
567 #if RT_CACHE_DEBUG >= 2
568 printk("arp_expire_request: %08x is killed\n", entry->ip);
569 #endif
570 arp_free_entry(entry);
571 arp_unlock();
572 return;
573 }
574 pentry = &(*pentry)->next;
575 }
576 printk("arp_expire_request: bug: ARP entry is lost!\n");
577 arp_unlock();
578 }
579
580
581
582
583
584 int arp_device_event(struct notifier_block *this, unsigned long event, void *ptr)
585 {
586 struct device *dev=ptr;
587 int i;
588
589 if (event != NETDEV_DOWN)
590 return NOTIFY_DONE;
591
592
593
594
595 #if RT_CACHE_DEBUG >= 1
596 if (arp_lock)
597 printk("arp_device_event: bug\n");
598 #endif
599 arp_fast_lock();
600
601 for (i = 0; i < FULL_ARP_TABLE_SIZE; i++)
602 {
603 struct arp_table *entry;
604 struct arp_table **pentry = &arp_tables[i];
605
606 while ((entry = *pentry) != NULL)
607 {
608 if (entry->dev == dev)
609 {
610 *pentry = entry->next;
611 arp_free_entry(entry);
612 }
613 else
614 pentry = &entry->next;
615 }
616 }
617 arp_unlock();
618 return NOTIFY_DONE;
619 }
620
621
622
623
624
625
626
627 void arp_send(int type, int ptype, u32 dest_ip,
628 struct device *dev, u32 src_ip,
629 unsigned char *dest_hw, unsigned char *src_hw,
630 unsigned char *target_hw)
631 {
632 struct sk_buff *skb;
633 struct arphdr *arp;
634 unsigned char *arp_ptr;
635
636
637
638
639
640 if (dev->flags&IFF_NOARP)
641 return;
642
643
644
645
646
647 skb = alloc_skb(sizeof(struct arphdr)+ 2*(dev->addr_len+4)
648 + dev->hard_header_len, GFP_ATOMIC);
649 if (skb == NULL)
650 {
651 printk("ARP: no memory to send an arp packet\n");
652 return;
653 }
654 skb_reserve(skb, dev->hard_header_len);
655 arp = (struct arphdr *) skb_put(skb,sizeof(struct arphdr) + 2*(dev->addr_len+4));
656 skb->arp = 1;
657 skb->dev = dev;
658 skb->free = 1;
659 skb->protocol = htons (ETH_P_IP);
660
661
662
663
664
665 dev->hard_header(skb,dev,ptype,dest_hw?dest_hw:dev->broadcast,src_hw?src_hw:NULL,skb->len);
666
667
668 arp->ar_hrd = htons(dev->type);
669 #ifdef CONFIG_AX25
670 #ifdef CONFIG_NETROM
671 arp->ar_pro = (dev->type == ARPHRD_AX25 || dev->type == ARPHRD_NETROM) ? htons(AX25_P_IP) : htons(ETH_P_IP);
672 #else
673 arp->ar_pro = (dev->type != ARPHRD_AX25) ? htons(ETH_P_IP) : htons(AX25_P_IP);
674 #endif
675 #else
676 arp->ar_pro = htons(ETH_P_IP);
677 #endif
678 arp->ar_hln = dev->addr_len;
679 arp->ar_pln = 4;
680 arp->ar_op = htons(type);
681
682 arp_ptr=(unsigned char *)(arp+1);
683
684 memcpy(arp_ptr, src_hw, dev->addr_len);
685 arp_ptr+=dev->addr_len;
686 memcpy(arp_ptr, &src_ip,4);
687 arp_ptr+=4;
688 if (target_hw != NULL)
689 memcpy(arp_ptr, target_hw, dev->addr_len);
690 else
691 memset(arp_ptr, 0, dev->addr_len);
692 arp_ptr+=dev->addr_len;
693 memcpy(arp_ptr, &dest_ip, 4);
694
695 dev_queue_xmit(skb, dev, 0);
696 }
697
698
699
700
701
702 static void arp_send_q(struct arp_table *entry)
703 {
704 struct sk_buff *skb;
705
706 unsigned long flags;
707
708
709
710
711
712 if(!(entry->flags&ATF_COM))
713 {
714 printk("arp_send_q: incomplete entry for %s\n",
715 in_ntoa(entry->ip));
716
717
718
719
720 return;
721 }
722
723 save_flags(flags);
724
725 cli();
726 while((skb = skb_dequeue(&entry->skb)) != NULL)
727 {
728 IS_SKB(skb);
729 skb_device_lock(skb);
730 restore_flags(flags);
731 if(!skb->dev->rebuild_header(skb->data,skb->dev,skb->raddr,skb))
732 {
733 skb->arp = 1;
734 if(skb->sk==NULL)
735 dev_queue_xmit(skb, skb->dev, 0);
736 else
737 dev_queue_xmit(skb,skb->dev,skb->sk->priority);
738 }
739 }
740 restore_flags(flags);
741 }
742
743
744
745
746
747
748 static void arp_destroy(struct arp_table * entry)
749 {
750 struct arp_table *entry1;
751 struct arp_table **pentry;
752
753 if (entry->flags & ATF_PUBL)
754 pentry = &arp_proxy_list;
755 else
756 pentry = &arp_tables[HASH(entry->ip)];
757
758 while ((entry1 = *pentry) != NULL)
759 {
760 if (entry1 == entry)
761 {
762 *pentry = entry1->next;
763 del_timer(&entry->timer);
764 arp_free_entry(entry);
765 return;
766 }
767 pentry = &entry1->next;
768 }
769 }
770
771
772
773
774
775
776
777 int arp_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *pt)
778 {
779
780
781
782
783 struct arphdr *arp = (struct arphdr *)skb->h.raw;
784 unsigned char *arp_ptr= (unsigned char *)(arp+1);
785 struct arp_table *entry;
786 struct arp_table *proxy_entry;
787 unsigned long hash, grat=0;
788 unsigned char ha[MAX_ADDR_LEN];
789 unsigned char *sha,*tha;
790 u32 sip,tip;
791
792
793
794
795
796
797
798
799 if (arp->ar_hln != dev->addr_len ||
800 dev->type != ntohs(arp->ar_hrd) ||
801 dev->flags & IFF_NOARP ||
802 arp->ar_pln != 4)
803 {
804 kfree_skb(skb, FREE_READ);
805 return 0;
806
807
808 }
809
810
811
812
813
814
815
816
817
818 switch (dev->type)
819 {
820 #ifdef CONFIG_AX25
821 case ARPHRD_AX25:
822 if(arp->ar_pro != htons(AX25_P_IP))
823 {
824 kfree_skb(skb, FREE_READ);
825 return 0;
826 }
827 break;
828 #endif
829 #ifdef CONFIG_NETROM
830 case ARPHRD_NETROM:
831 if(arp->ar_pro != htons(AX25_P_IP))
832 {
833 kfree_skb(skb, FREE_READ);
834 return 0;
835 }
836 break;
837 #endif
838 case ARPHRD_ETHER:
839 case ARPHRD_ARCNET:
840 if(arp->ar_pro != htons(ETH_P_IP))
841 {
842 kfree_skb(skb, FREE_READ);
843 return 0;
844 }
845 break;
846
847 case ARPHRD_IEEE802:
848 if(arp->ar_pro != htons(ETH_P_IP))
849 {
850 kfree_skb(skb, FREE_READ);
851 return 0;
852 }
853 break;
854
855 default:
856 printk("ARP: dev->type mangled!\n");
857 kfree_skb(skb, FREE_READ);
858 return 0;
859 }
860
861
862
863
864
865 sha=arp_ptr;
866 arp_ptr += dev->addr_len;
867 memcpy(&sip, arp_ptr, 4);
868 arp_ptr += 4;
869 tha=arp_ptr;
870 arp_ptr += dev->addr_len;
871 memcpy(&tip, arp_ptr, 4);
872
873
874
875
876
877 if (LOOPBACK(tip) || MULTICAST(tip))
878 {
879 kfree_skb(skb, FREE_READ);
880 return 0;
881 }
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904 #ifdef CONFIG_NET_ALIAS
905 if (tip != dev->pa_addr && net_alias_has(skb->dev))
906 {
907
908
909
910 dev = net_alias_dev_rcv_sel32(dev, AF_INET, sip, tip);
911
912 if (dev->type != ntohs(arp->ar_hrd) || dev->flags & IFF_NOARP)
913 {
914 kfree_skb(skb, FREE_READ);
915 return 0;
916 }
917 }
918 #endif
919
920 if (arp->ar_op == htons(ARPOP_REQUEST))
921 {
922
923
924
925 if (tip != dev->pa_addr)
926 {
927
928
929
930
931
932 grat = (sip == tip) && (sha == tha);
933 arp_fast_lock();
934
935 for (proxy_entry=arp_proxy_list;
936 proxy_entry;
937 proxy_entry = proxy_entry->next)
938 {
939
940
941
942
943
944
945
946
947 if (proxy_entry->dev == dev &&
948 !((proxy_entry->ip^tip)&proxy_entry->mask))
949 break;
950
951 }
952 if (proxy_entry)
953 {
954 if (grat)
955 {
956 if(!(proxy_entry->flags&ATF_PERM))
957 arp_destroy(proxy_entry);
958 goto gratuitous;
959 }
960 memcpy(ha, proxy_entry->ha, dev->addr_len);
961 arp_unlock();
962 arp_send(ARPOP_REPLY,ETH_P_ARP,sip,dev,tip,sha,ha, sha);
963 kfree_skb(skb, FREE_READ);
964 return 0;
965 }
966 else
967 {
968 if (grat)
969 goto gratuitous;
970 arp_unlock();
971 kfree_skb(skb, FREE_READ);
972 return 0;
973 }
974 }
975 else
976 {
977
978
979
980 arp_send(ARPOP_REPLY,ETH_P_ARP,sip,dev,tip,sha,dev->dev_addr,sha);
981 }
982 }
983
984
985
986 if(ip_chk_addr(tip)!=IS_MYADDR)
987 {
988
989
990
991 kfree_skb(skb, FREE_READ);
992 return 0;
993 }
994
995
996
997
998
999
1000 arp_fast_lock();
1001
1002 gratuitous:
1003
1004 hash = HASH(sip);
1005
1006 for (entry=arp_tables[hash]; entry; entry=entry->next)
1007 if (entry->ip == sip && entry->dev == dev)
1008 break;
1009
1010 if (entry)
1011 {
1012
1013
1014
1015 if (!(entry->flags & ATF_PERM)) {
1016 memcpy(entry->ha, sha, dev->addr_len);
1017 entry->last_updated = jiffies;
1018 }
1019 if (!(entry->flags & ATF_COM))
1020 {
1021
1022
1023
1024
1025 del_timer(&entry->timer);
1026 entry->flags |= ATF_COM;
1027 arp_update_hhs(entry);
1028
1029
1030
1031
1032
1033 arp_send_q(entry);
1034 }
1035 }
1036 else
1037 {
1038
1039
1040
1041
1042 if (grat)
1043 goto end;
1044
1045 entry = (struct arp_table *)kmalloc(sizeof(struct arp_table),GFP_ATOMIC);
1046 if(entry == NULL)
1047 {
1048 arp_unlock();
1049 printk("ARP: no memory for new arp entry\n");
1050 kfree_skb(skb, FREE_READ);
1051 return 0;
1052 }
1053
1054 entry->mask = DEF_ARP_NETMASK;
1055 entry->ip = sip;
1056 entry->flags = ATF_COM;
1057 entry->hh = NULL;
1058 init_timer(&entry->timer);
1059 entry->timer.function = arp_expire_request;
1060 entry->timer.data = (unsigned long)entry;
1061 memcpy(entry->ha, sha, dev->addr_len);
1062 entry->last_updated = entry->last_used = jiffies;
1063
1064
1065
1066
1067 #ifdef CONFIG_NET_ALIAS
1068 entry->dev = dev;
1069 #else
1070 entry->dev = skb->dev;
1071 #endif
1072 skb_queue_head_init(&entry->skb);
1073 if (arp_lock == 1)
1074 {
1075 entry->next = arp_tables[hash];
1076 arp_tables[hash] = entry;
1077 }
1078 else
1079 {
1080 #if RT_CACHE_DEBUG >= 1
1081 printk("arp_rcv: %08x backlogged\n", entry->ip);
1082 #endif
1083 arp_enqueue(&arp_backlog, entry);
1084 arp_bh_mask |= ARP_BH_BACKLOG;
1085 }
1086 }
1087
1088
1089
1090
1091
1092 end:
1093 kfree_skb(skb, FREE_READ);
1094 arp_unlock();
1095 return 0;
1096 }
1097
1098
1099
1100
1101
1102
1103
1104
1105 static struct arp_table *arp_lookup(u32 paddr, unsigned short flags, struct device * dev)
1106 {
1107 struct arp_table *entry;
1108
1109 if (!(flags & ATF_PUBL))
1110 {
1111 for (entry = arp_tables[HASH(paddr)];
1112 entry != NULL; entry = entry->next)
1113 if (entry->ip == paddr && (!dev || entry->dev == dev))
1114 break;
1115 return entry;
1116 }
1117
1118 if (!(flags & ATF_NETMASK))
1119 {
1120 for (entry = arp_proxy_list;
1121 entry != NULL; entry = entry->next)
1122 if (entry->ip == paddr && (!dev || entry->dev == dev))
1123 break;
1124 return entry;
1125 }
1126
1127 for (entry=arp_proxy_list; entry != NULL; entry = entry->next)
1128 if (!((entry->ip^paddr)&entry->mask) &&
1129 (!dev || entry->dev == dev))
1130 break;
1131 return entry;
1132 }
1133
1134
1135
1136
1137
1138 int arp_query(unsigned char *haddr, u32 paddr, struct device * dev)
1139 {
1140 struct arp_table *entry;
1141
1142 arp_fast_lock();
1143
1144 entry = arp_lookup(paddr, 0, dev);
1145
1146 if (entry != NULL)
1147 {
1148 entry->last_used = jiffies;
1149 if (entry->flags & ATF_COM)
1150 {
1151 memcpy(haddr, entry->ha, dev->addr_len);
1152 arp_unlock();
1153 return 1;
1154 }
1155 }
1156 arp_unlock();
1157 return 0;
1158 }
1159
1160
1161 static int arp_set_predefined(int addr_hint, unsigned char * haddr, __u32 paddr, struct device * dev)
1162 {
1163 switch (addr_hint)
1164 {
1165 case IS_MYADDR:
1166 printk("ARP: arp called for own IP address\n");
1167 memcpy(haddr, dev->dev_addr, dev->addr_len);
1168 return 1;
1169 #ifdef CONFIG_IP_MULTICAST
1170 case IS_MULTICAST:
1171 if(dev->type==ARPHRD_ETHER || dev->type==ARPHRD_IEEE802)
1172 {
1173 u32 taddr;
1174 haddr[0]=0x01;
1175 haddr[1]=0x00;
1176 haddr[2]=0x5e;
1177 taddr=ntohl(paddr);
1178 haddr[5]=taddr&0xff;
1179 taddr=taddr>>8;
1180 haddr[4]=taddr&0xff;
1181 taddr=taddr>>8;
1182 haddr[3]=taddr&0x7f;
1183 return 1;
1184 }
1185
1186
1187
1188 #endif
1189
1190 case IS_BROADCAST:
1191 memcpy(haddr, dev->broadcast, dev->addr_len);
1192 return 1;
1193 }
1194 return 0;
1195 }
1196
1197
1198
1199
1200
1201 int arp_find(unsigned char *haddr, u32 paddr, struct device *dev,
1202 u32 saddr, struct sk_buff *skb)
1203 {
1204 struct arp_table *entry;
1205 unsigned long hash;
1206
1207 if (arp_set_predefined(ip_chk_addr(paddr), haddr, paddr, dev))
1208 {
1209 if (skb)
1210 skb->arp = 1;
1211 return 0;
1212 }
1213
1214 hash = HASH(paddr);
1215 arp_fast_lock();
1216
1217
1218
1219
1220 entry = arp_lookup(paddr, 0, dev);
1221
1222 if (entry != NULL)
1223 {
1224 if (!(entry->flags & ATF_COM))
1225 {
1226
1227
1228
1229
1230
1231 if (skb != NULL)
1232 {
1233 if (entry->last_updated)
1234 {
1235 skb_queue_tail(&entry->skb, skb);
1236 skb_device_unlock(skb);
1237 }
1238
1239
1240
1241
1242 else
1243 {
1244 #if 0
1245
1246
1247
1248
1249 if (skb->sk)
1250 {
1251 skb->sk->err = EHOSTDOWN;
1252 skb->sk->error_report(skb->sk);
1253 }
1254 #else
1255 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0, dev);
1256 #endif
1257 dev_kfree_skb(skb, FREE_WRITE);
1258 }
1259 }
1260 arp_unlock();
1261 return 1;
1262 }
1263
1264
1265
1266
1267
1268 entry->last_used = jiffies;
1269 memcpy(haddr, entry->ha, dev->addr_len);
1270 if (skb)
1271 skb->arp = 1;
1272 arp_unlock();
1273 return 0;
1274 }
1275
1276
1277
1278
1279
1280 entry = (struct arp_table *) kmalloc(sizeof(struct arp_table),
1281 GFP_ATOMIC);
1282 if (entry != NULL)
1283 {
1284 entry->last_updated = entry->last_used = jiffies;
1285 entry->flags = 0;
1286 entry->ip = paddr;
1287 entry->mask = DEF_ARP_NETMASK;
1288 memset(entry->ha, 0, dev->addr_len);
1289 entry->dev = dev;
1290 entry->hh = NULL;
1291 init_timer(&entry->timer);
1292 entry->timer.function = arp_expire_request;
1293 entry->timer.data = (unsigned long)entry;
1294 entry->timer.expires = jiffies + ARP_RES_TIME;
1295 skb_queue_head_init(&entry->skb);
1296 if (skb != NULL)
1297 {
1298 skb_queue_tail(&entry->skb, skb);
1299 skb_device_unlock(skb);
1300 }
1301 if (arp_lock == 1)
1302 {
1303 entry->next = arp_tables[hash];
1304 arp_tables[hash] = entry;
1305 add_timer(&entry->timer);
1306 entry->retries = ARP_MAX_TRIES;
1307 }
1308 else
1309 {
1310 #if RT_CACHE_DEBUG >= 1
1311 printk("arp_find: %08x backlogged\n", entry->ip);
1312 #endif
1313 arp_enqueue(&arp_backlog, entry);
1314 arp_bh_mask |= ARP_BH_BACKLOG;
1315 }
1316 }
1317 else if (skb != NULL)
1318 dev_kfree_skb(skb, FREE_WRITE);
1319 arp_unlock();
1320
1321
1322
1323
1324
1325 arp_send(ARPOP_REQUEST, ETH_P_ARP, paddr, dev, saddr, NULL,
1326 dev->dev_addr, NULL);
1327
1328 return 1;
1329 }
1330
1331
1332
1333
1334
1335
1336 #define HBUFFERLEN 30
1337
1338 int arp_get_info(char *buffer, char **start, off_t offset, int length, int dummy)
1339 {
1340 int len=0;
1341 off_t pos=0;
1342 int size;
1343 struct arp_table *entry;
1344 char hbuffer[HBUFFERLEN];
1345 int i,j,k;
1346 const char hexbuf[] = "0123456789ABCDEF";
1347
1348 size = sprintf(buffer,"IP address HW type Flags HW address Mask Device\n");
1349
1350 pos+=size;
1351 len+=size;
1352
1353 arp_fast_lock();
1354
1355 for(i=0; i<FULL_ARP_TABLE_SIZE; i++)
1356 {
1357 for(entry=arp_tables[i]; entry!=NULL; entry=entry->next)
1358 {
1359
1360
1361
1362 #ifdef CONFIG_AX25
1363 #ifdef CONFIG_NETROM
1364 if (entry->dev->type == ARPHRD_AX25 || entry->dev->type == ARPHRD_NETROM)
1365 strcpy(hbuffer,ax2asc((ax25_address *)entry->ha));
1366 else {
1367 #else
1368 if(entry->dev->type==ARPHRD_AX25)
1369 strcpy(hbuffer,ax2asc((ax25_address *)entry->ha));
1370 else {
1371 #endif
1372 #endif
1373
1374 for(k=0,j=0;k<HBUFFERLEN-3 && j<entry->dev->addr_len;j++)
1375 {
1376 hbuffer[k++]=hexbuf[ (entry->ha[j]>>4)&15 ];
1377 hbuffer[k++]=hexbuf[ entry->ha[j]&15 ];
1378 hbuffer[k++]=':';
1379 }
1380 hbuffer[--k]=0;
1381
1382 #ifdef CONFIG_AX25
1383 }
1384 #endif
1385 size = sprintf(buffer+len,
1386 "%-17s0x%-10x0x%-10x%s",
1387 in_ntoa(entry->ip),
1388 (unsigned int)entry->dev->type,
1389 entry->flags,
1390 hbuffer);
1391 #if RT_CACHE_DEBUG < 2
1392 size += sprintf(buffer+len+size,
1393 " %-17s %s\n",
1394 entry->mask==DEF_ARP_NETMASK ?
1395 "*" : in_ntoa(entry->mask), entry->dev->name);
1396 #else
1397 size += sprintf(buffer+len+size,
1398 " %-17s %s\t%ld\t%1d\n",
1399 entry->mask==DEF_ARP_NETMASK ?
1400 "*" : in_ntoa(entry->mask), entry->dev->name,
1401 entry->hh ? entry->hh->hh_refcnt : -1,
1402 entry->hh ? entry->hh->hh_uptodate : 0);
1403 #endif
1404
1405 len += size;
1406 pos += size;
1407
1408 if (pos <= offset)
1409 len=0;
1410 if (pos >= offset+length)
1411 goto done;
1412 }
1413 }
1414 done:
1415 arp_unlock();
1416
1417 *start = buffer+len-(pos-offset);
1418 len = pos-offset;
1419 if (len>length)
1420 len = length;
1421 return len;
1422 }
1423
1424
1425
1426 int arp_bind_cache(struct hh_cache ** hhp, struct device *dev, unsigned short htype, u32 paddr)
1427 {
1428 struct arp_table *entry;
1429 struct hh_cache *hh = *hhp;
1430 int addr_hint;
1431 unsigned long flags;
1432
1433 if (hh)
1434 return 1;
1435
1436 if ((addr_hint = ip_chk_addr(paddr)) != 0)
1437 {
1438 unsigned char haddr[MAX_ADDR_LEN];
1439 if (hh)
1440 return 1;
1441 hh = kmalloc(sizeof(struct hh_cache), GFP_ATOMIC);
1442 if (!hh)
1443 return 1;
1444 arp_set_predefined(addr_hint, haddr, paddr, dev);
1445 hh->hh_uptodate = 0;
1446 hh->hh_refcnt = 1;
1447 hh->hh_arp = NULL;
1448 hh->hh_next = NULL;
1449 hh->hh_type = htype;
1450 *hhp = hh;
1451 dev->header_cache_update(hh, dev, haddr);
1452 return 0;
1453 }
1454
1455 save_flags(flags);
1456
1457 arp_fast_lock();
1458
1459 entry = arp_lookup(paddr, 0, dev);
1460
1461 if (entry)
1462 {
1463 cli();
1464 for (hh = entry->hh; hh; hh=hh->hh_next)
1465 if (hh->hh_type == htype)
1466 break;
1467 if (hh)
1468 {
1469 hh->hh_refcnt++;
1470 *hhp = hh;
1471 restore_flags(flags);
1472 arp_unlock();
1473 return 1;
1474 }
1475 restore_flags(flags);
1476 }
1477
1478 hh = kmalloc(sizeof(struct hh_cache), GFP_ATOMIC);
1479 if (!hh)
1480 {
1481 arp_unlock();
1482 return 1;
1483 }
1484
1485 hh->hh_uptodate = 0;
1486 hh->hh_refcnt = 1;
1487 hh->hh_arp = NULL;
1488 hh->hh_next = NULL;
1489 hh->hh_type = htype;
1490
1491 if (entry)
1492 {
1493 dev->header_cache_update(hh, dev, entry->ha);
1494 *hhp = hh;
1495 cli();
1496 hh->hh_arp = (void*)entry;
1497 entry->hh = hh;
1498 hh->hh_refcnt++;
1499 restore_flags(flags);
1500 entry->last_used = jiffies;
1501 arp_unlock();
1502 return 0;
1503 }
1504
1505
1506
1507
1508
1509
1510 entry = (struct arp_table *) kmalloc(sizeof(struct arp_table),
1511 GFP_ATOMIC);
1512 if (entry == NULL)
1513 {
1514 kfree_s(hh, sizeof(struct hh_cache));
1515 arp_unlock();
1516 return 1;
1517 }
1518
1519 entry->last_updated = entry->last_used = jiffies;
1520 entry->flags = 0;
1521 entry->ip = paddr;
1522 entry->mask = DEF_ARP_NETMASK;
1523 memset(entry->ha, 0, dev->addr_len);
1524 entry->dev = dev;
1525 entry->hh = hh;
1526 ATOMIC_INCR(&hh->hh_refcnt);
1527 init_timer(&entry->timer);
1528 entry->timer.function = arp_expire_request;
1529 entry->timer.data = (unsigned long)entry;
1530 entry->timer.expires = jiffies + ARP_RES_TIME;
1531 skb_queue_head_init(&entry->skb);
1532
1533 if (arp_lock == 1)
1534 {
1535 unsigned long hash = HASH(paddr);
1536 cli();
1537 entry->next = arp_tables[hash];
1538 arp_tables[hash] = entry;
1539 hh->hh_arp = (void*)entry;
1540 entry->retries = ARP_MAX_TRIES;
1541 restore_flags(flags);
1542
1543 add_timer(&entry->timer);
1544 arp_send(ARPOP_REQUEST, ETH_P_ARP, paddr, dev, dev->pa_addr, NULL, dev->dev_addr, NULL);
1545 }
1546 else
1547 {
1548 #if RT_CACHE_DEBUG >= 1
1549 printk("arp_cache_bind: %08x backlogged\n", entry->ip);
1550 #endif
1551 arp_enqueue(&arp_backlog, entry);
1552 arp_bh_mask |= ARP_BH_BACKLOG;
1553 }
1554 *hhp = hh;
1555 arp_unlock();
1556 return 0;
1557 }
1558
1559 static void arp_run_bh()
1560 {
1561 unsigned long flags;
1562 struct arp_table *entry, *entry1;
1563 struct hh_cache *hh;
1564 __u32 sip;
1565
1566 save_flags(flags);
1567 cli();
1568 if (!arp_lock)
1569 {
1570 arp_fast_lock();
1571
1572 while ((entry = arp_dequeue(&arp_backlog)) != NULL)
1573 {
1574 unsigned long hash;
1575 sti();
1576 sip = entry->ip;
1577 hash = HASH(sip);
1578
1579
1580
1581
1582
1583 for (entry1=arp_tables[hash]; entry1; entry1=entry1->next)
1584 if (entry1->ip==sip && entry1->dev == entry->dev)
1585 break;
1586
1587 if (!entry1)
1588 {
1589 struct device * dev = entry->dev;
1590 cli();
1591 entry->next = arp_tables[hash];
1592 arp_tables[hash] = entry;
1593 for (hh=entry->hh; hh; hh=hh->hh_next)
1594 hh->hh_arp = (void*)entry;
1595 sti();
1596 del_timer(&entry->timer);
1597 entry->timer.expires = jiffies + ARP_RES_TIME;
1598 add_timer(&entry->timer);
1599 entry->retries = ARP_MAX_TRIES;
1600 arp_send(ARPOP_REQUEST, ETH_P_ARP, entry->ip, dev, dev->pa_addr, NULL, dev->dev_addr, NULL);
1601 #if RT_CACHE_DEBUG >= 1
1602 printk("arp_run_bh: %08x reinstalled\n", sip);
1603 #endif
1604 }
1605 else
1606 {
1607 struct sk_buff * skb;
1608 struct hh_cache * next;
1609
1610
1611
1612
1613 cli();
1614 for (hh=entry->hh; hh; hh=next)
1615 {
1616 next = hh->hh_next;
1617 hh->hh_next = entry1->hh;
1618 entry1->hh = hh;
1619 hh->hh_arp = (void*)entry1;
1620 }
1621 entry->hh = NULL;
1622
1623
1624
1625
1626 while ((skb = skb_dequeue(&entry->skb)) != NULL)
1627 {
1628 skb_device_lock(skb);
1629 sti();
1630 skb_queue_tail(&entry1->skb, skb);
1631 skb_device_unlock(skb);
1632 cli();
1633 }
1634 sti();
1635
1636 #if RT_CACHE_DEBUG >= 1
1637 printk("arp_run_bh: entry %08x was born dead\n", entry->ip);
1638 #endif
1639 arp_free_entry(entry);
1640
1641 if (entry1->flags & ATF_COM)
1642 {
1643 arp_update_hhs(entry1);
1644 arp_send_q(entry1);
1645 }
1646 }
1647 cli();
1648 }
1649 arp_bh_mask &= ~ARP_BH_BACKLOG;
1650 arp_unlock();
1651 }
1652 restore_flags(flags);
1653 }
1654
1655
1656
1657
1658
1659 static inline int empty(unsigned char * addr, int len)
1660 {
1661 while (len > 0) {
1662 if (*addr)
1663 return 0;
1664 len--;
1665 addr++;
1666 }
1667 return 1;
1668 }
1669
1670
1671
1672
1673
1674 static int arp_req_set(struct arpreq *r, struct device * dev)
1675 {
1676 struct arp_table *entry;
1677 struct sockaddr_in *si;
1678 struct rtable *rt;
1679 struct device *dev1;
1680 unsigned char *ha;
1681 u32 ip;
1682
1683
1684
1685
1686
1687 si = (struct sockaddr_in *) &r->arp_pa;
1688 ip = si->sin_addr.s_addr;
1689
1690
1691
1692
1693
1694 if (ip_chk_addr(ip) == IS_MYADDR)
1695 dev1 = dev_get("lo");
1696 else {
1697 rt = ip_rt_route(ip, 0);
1698 if (!rt)
1699 return -ENETUNREACH;
1700 dev1 = rt->rt_dev;
1701 ip_rt_put(rt);
1702 }
1703
1704
1705 if (!dev) {
1706 if (dev1->flags&(IFF_LOOPBACK|IFF_NOARP))
1707 return -ENODEV;
1708 dev = dev1;
1709 }
1710
1711
1712 if (r->arp_ha.sa_family != dev->type)
1713 return -EINVAL;
1714
1715 if (((r->arp_flags & ATF_PUBL) && dev == dev1) ||
1716 (!(r->arp_flags & ATF_PUBL) && dev != dev1))
1717 return -EINVAL;
1718
1719 #if RT_CACHE_DEBUG >= 1
1720 if (arp_lock)
1721 printk("arp_req_set: bug\n");
1722 #endif
1723 arp_fast_lock();
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733 entry = arp_lookup(ip, r->arp_flags & ~ATF_NETMASK, dev);
1734
1735 if (entry)
1736 {
1737 arp_destroy(entry);
1738 entry = NULL;
1739 }
1740
1741
1742
1743
1744
1745 if (entry == NULL)
1746 {
1747 entry = (struct arp_table *) kmalloc(sizeof(struct arp_table),
1748 GFP_ATOMIC);
1749 if (entry == NULL)
1750 {
1751 arp_unlock();
1752 return -ENOMEM;
1753 }
1754 entry->ip = ip;
1755 entry->hh = NULL;
1756 init_timer(&entry->timer);
1757 entry->timer.function = arp_expire_request;
1758 entry->timer.data = (unsigned long)entry;
1759
1760 if (r->arp_flags & ATF_PUBL)
1761 {
1762 cli();
1763 entry->next = arp_proxy_list;
1764 arp_proxy_list = entry;
1765 sti();
1766 }
1767 else
1768 {
1769 unsigned long hash = HASH(ip);
1770 cli();
1771 entry->next = arp_tables[hash];
1772 arp_tables[hash] = entry;
1773 sti();
1774 }
1775 skb_queue_head_init(&entry->skb);
1776 }
1777
1778
1779
1780 ha = r->arp_ha.sa_data;
1781 if ((r->arp_flags & ATF_COM) && empty(ha, dev->addr_len))
1782 ha = dev->dev_addr;
1783 memcpy(entry->ha, ha, dev->addr_len);
1784 entry->last_updated = entry->last_used = jiffies;
1785 entry->flags = r->arp_flags | ATF_COM;
1786 if ((entry->flags & ATF_PUBL) && (entry->flags & ATF_NETMASK))
1787 {
1788 si = (struct sockaddr_in *) &r->arp_netmask;
1789 entry->mask = si->sin_addr.s_addr;
1790 }
1791 else
1792 entry->mask = DEF_ARP_NETMASK;
1793 entry->dev = dev;
1794 arp_update_hhs(entry);
1795 arp_unlock();
1796 return 0;
1797 }
1798
1799
1800
1801
1802
1803
1804
1805 static int arp_req_get(struct arpreq *r, struct device *dev)
1806 {
1807 struct arp_table *entry;
1808 struct sockaddr_in *si;
1809
1810 si = (struct sockaddr_in *) &r->arp_pa;
1811
1812 #if RT_CACHE_DEBUG >= 1
1813 if (arp_lock)
1814 printk("arp_req_set: bug\n");
1815 #endif
1816 arp_fast_lock();
1817
1818 entry = arp_lookup(si->sin_addr.s_addr, r->arp_flags|ATF_NETMASK, dev);
1819
1820 if (entry == NULL)
1821 {
1822 arp_unlock();
1823 return -ENXIO;
1824 }
1825
1826
1827
1828
1829
1830 memcpy(r->arp_ha.sa_data, &entry->ha, entry->dev->addr_len);
1831 r->arp_ha.sa_family = entry->dev->type;
1832 r->arp_flags = entry->flags;
1833 strncpy(r->arp_dev, entry->dev->name, 16);
1834 arp_unlock();
1835 return 0;
1836 }
1837
1838 static int arp_req_delete(struct arpreq *r, struct device * dev)
1839 {
1840 struct arp_table *entry;
1841 struct sockaddr_in *si;
1842
1843 si = (struct sockaddr_in *) &r->arp_pa;
1844 #if RT_CACHE_DEBUG >= 1
1845 if (arp_lock)
1846 printk("arp_req_delete: bug\n");
1847 #endif
1848 arp_fast_lock();
1849
1850 if (!(r->arp_flags & ATF_PUBL))
1851 {
1852 for (entry = arp_tables[HASH(si->sin_addr.s_addr)];
1853 entry != NULL; entry = entry->next)
1854 if (entry->ip == si->sin_addr.s_addr
1855 && (!dev || entry->dev == dev))
1856 {
1857 arp_destroy(entry);
1858 arp_unlock();
1859 return 0;
1860 }
1861 }
1862 else
1863 {
1864 for (entry = arp_proxy_list;
1865 entry != NULL; entry = entry->next)
1866 if (entry->ip == si->sin_addr.s_addr
1867 && (!dev || entry->dev == dev))
1868 {
1869 arp_destroy(entry);
1870 arp_unlock();
1871 return 0;
1872 }
1873 }
1874
1875 arp_unlock();
1876 return -ENXIO;
1877 }
1878
1879
1880
1881
1882
1883 int arp_ioctl(unsigned int cmd, void *arg)
1884 {
1885 int err;
1886 struct arpreq r;
1887
1888 struct device * dev = NULL;
1889
1890 switch(cmd)
1891 {
1892 case SIOCDARP:
1893 case SIOCSARP:
1894 if (!suser())
1895 return -EPERM;
1896 case SIOCGARP:
1897 err = verify_area(VERIFY_READ, arg, sizeof(struct arpreq));
1898 if (err)
1899 return err;
1900 memcpy_fromfs(&r, arg, sizeof(struct arpreq));
1901 break;
1902 case OLD_SIOCDARP:
1903 case OLD_SIOCSARP:
1904 if (!suser())
1905 return -EPERM;
1906 case OLD_SIOCGARP:
1907 err = verify_area(VERIFY_READ, arg, sizeof(struct arpreq_old));
1908 if (err)
1909 return err;
1910 memcpy_fromfs(&r, arg, sizeof(struct arpreq_old));
1911 memset(&r.arp_dev, 0, sizeof(r.arp_dev));
1912 break;
1913 default:
1914 return -EINVAL;
1915 }
1916
1917 if (r.arp_pa.sa_family != AF_INET)
1918 return -EPFNOSUPPORT;
1919 if (((struct sockaddr_in *)&r.arp_pa)->sin_addr.s_addr == 0)
1920 return -EINVAL;
1921
1922 if (r.arp_dev[0])
1923 {
1924 if ((dev = dev_get(r.arp_dev)) == NULL)
1925 return -ENODEV;
1926
1927 if (!r.arp_ha.sa_family)
1928 r.arp_ha.sa_family = dev->type;
1929 else if (r.arp_ha.sa_family != dev->type)
1930 return -EINVAL;
1931 }
1932 else
1933 {
1934 if ((r.arp_flags & ATF_PUBL) &&
1935 ((cmd == SIOCSARP) || (cmd == OLD_SIOCSARP))) {
1936 if ((dev = dev_getbytype(r.arp_ha.sa_family)) == NULL)
1937 return -ENODEV;
1938 }
1939 }
1940
1941 switch(cmd)
1942 {
1943 case SIOCDARP:
1944 return arp_req_delete(&r, dev);
1945 case SIOCSARP:
1946 return arp_req_set(&r, dev);
1947 case OLD_SIOCDARP:
1948
1949
1950
1951 r.arp_flags &= ~ATF_PUBL;
1952 err = arp_req_delete(&r, dev);
1953 r.arp_flags |= ATF_PUBL;
1954 if (!err)
1955 arp_req_delete(&r, dev);
1956 else
1957 err = arp_req_delete(&r, dev);
1958 return err;
1959 case OLD_SIOCSARP:
1960 err = arp_req_set(&r, dev);
1961
1962
1963
1964
1965
1966 if (r.arp_flags & ATF_PUBL)
1967 {
1968 r.arp_flags &= ~ATF_PUBL;
1969 arp_req_delete(&r, dev);
1970 }
1971 return err;
1972 case SIOCGARP:
1973 err = verify_area(VERIFY_WRITE, arg, sizeof(struct arpreq));
1974 if (err)
1975 return err;
1976 err = arp_req_get(&r, dev);
1977 if (!err)
1978 memcpy_tofs(arg, &r, sizeof(r));
1979 return err;
1980 case OLD_SIOCGARP:
1981 err = verify_area(VERIFY_WRITE, arg, sizeof(struct arpreq_old));
1982 if (err)
1983 return err;
1984 r.arp_flags &= ~ATF_PUBL;
1985 err = arp_req_get(&r, dev);
1986 if (err < 0)
1987 {
1988 r.arp_flags |= ATF_PUBL;
1989 err = arp_req_get(&r, dev);
1990 }
1991 if (!err)
1992 memcpy_tofs(arg, &r, sizeof(struct arpreq_old));
1993 return err;
1994 }
1995
1996 return 0;
1997 }
1998
1999
2000
2001
2002
2003
2004 static struct packet_type arp_packet_type =
2005 {
2006 0,
2007 NULL,
2008 arp_rcv,
2009 NULL,
2010 NULL
2011 };
2012
2013 static struct notifier_block arp_dev_notifier={
2014 arp_device_event,
2015 NULL,
2016 0
2017 };
2018
2019 void arp_init (void)
2020 {
2021
2022 arp_packet_type.type=htons(ETH_P_ARP);
2023 dev_add_pack(&arp_packet_type);
2024
2025 add_timer(&arp_timer);
2026
2027 register_netdevice_notifier(&arp_dev_notifier);
2028
2029 #ifdef CONFIG_PROC_FS
2030 proc_net_register(&(struct proc_dir_entry) {
2031 PROC_NET_ARP, 3, "arp",
2032 S_IFREG | S_IRUGO, 1, 0, 0,
2033 0, &proc_net_inode_operations,
2034 arp_get_info
2035 });
2036 #endif
2037 }
2038