This source file includes following definitions.
- arp_fast_lock
- arp_fast_unlock
- arp_unlock
- arp_enqueue
- arp_dequeue
- arp_release_entry
- arp_free_entry
- arp_count_hhs
- arp_invalidate_hhs
- arp_update_hhs
- arp_check_expire
- arp_expire_request
- arp_device_event
- arp_send
- arp_send_q
- arp_destroy
- arp_rcv
- arp_lookup
- arp_query
- arp_set_predefined
- arp_find
- arp_get_info
- arp_bind_cache
- arp_run_bh
- empty
- arp_req_set
- arp_req_get
- arp_req_delete
- arp_ioctl
- arp_init
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67 #include <linux/types.h>
68 #include <linux/string.h>
69 #include <linux/kernel.h>
70 #include <linux/sched.h>
71 #include <linux/config.h>
72 #include <linux/socket.h>
73 #include <linux/sockios.h>
74 #include <linux/errno.h>
75 #include <linux/if_arp.h>
76 #include <linux/in.h>
77 #include <linux/mm.h>
78 #include <linux/inet.h>
79 #include <linux/netdevice.h>
80 #include <linux/etherdevice.h>
81 #include <linux/trdevice.h>
82 #include <linux/skbuff.h>
83 #include <linux/proc_fs.h>
84 #include <linux/stat.h>
85
86 #include <net/ip.h>
87 #include <net/icmp.h>
88 #include <net/route.h>
89 #include <net/protocol.h>
90 #include <net/tcp.h>
91 #include <net/sock.h>
92 #include <net/arp.h>
93 #ifdef CONFIG_AX25
94 #include <net/ax25.h>
95 #ifdef CONFIG_NETROM
96 #include <net/netrom.h>
97 #endif
98 #endif
99 #ifdef CONFIG_NET_ALIAS
100 #include <linux/net_alias.h>
101 #endif
102
103 #include <asm/system.h>
104 #include <asm/segment.h>
105
106 #include <stdarg.h>
107
108
109
110
111
112
113
114
115 struct arp_table
116 {
117 struct arp_table *next;
118 unsigned long last_used;
119 unsigned long last_updated;
120 unsigned int flags;
121 u32 ip;
122 u32 mask;
123 unsigned char ha[MAX_ADDR_LEN];
124 struct device *dev;
125
126
127
128
129
130 struct timer_list timer;
131 int retries;
132 struct sk_buff_head skb;
133 struct hh_cache *hh;
134 };
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150 #define ARP_RES_TIME (5*HZ)
151 #define ARP_DEAD_RES_TIME (60*HZ)
152
153
154
155
156
157
158 #define ARP_MAX_TRIES 3
159
160
161
162
163
164 #define ARP_TIMEOUT (600*HZ)
165
166
167
168
169
170
171
172 #define ARP_CHECK_INTERVAL (60*HZ)
173
174
175
176
177
178
179
180 #define ARP_CONFIRM_INTERVAL (300*HZ)
181 #define ARP_CONFIRM_TIMEOUT ARP_RES_TIME
182
183 static unsigned long arp_lock;
184 static unsigned long arp_bh_mask;
185
186 #define ARP_BH_BACKLOG 1
187
188 static struct arp_table *arp_backlog;
189
190 static void arp_run_bh(void);
191 static void arp_check_expire (unsigned long);
192
193 static struct timer_list arp_timer =
194 { NULL, NULL, ARP_CHECK_INTERVAL, 0L, &arp_check_expire };
195
196
197
198
199
200
201 #define DEF_ARP_NETMASK (~0)
202
203
204
205
206
207
208
209 #define ARP_TABLE_SIZE 16
210 #define FULL_ARP_TABLE_SIZE (ARP_TABLE_SIZE+1)
211
212 struct arp_table *arp_tables[FULL_ARP_TABLE_SIZE] =
213 {
214 NULL,
215 };
216
217 #define arp_proxy_list arp_tables[ARP_TABLE_SIZE]
218
219
220
221
222
223
224 #define HASH(paddr) (htonl(paddr) & (ARP_TABLE_SIZE - 1))
225
226
227
228
229
230 static __inline__ void arp_fast_lock(void)
231 {
232 ATOMIC_INCR(&arp_lock);
233 }
234
235 static __inline__ void arp_fast_unlock(void)
236 {
237 ATOMIC_DECR(&arp_lock);
238 }
239
240 static __inline__ void arp_unlock(void)
241 {
242 if (!ATOMIC_DECR_AND_CHECK(&arp_lock) && arp_bh_mask)
243 arp_run_bh();
244 }
245
246
247
248
249
250 static void arp_enqueue(struct arp_table **q, struct arp_table *entry)
251 {
252 unsigned long flags;
253 struct arp_table * tail;
254
255 save_flags(flags);
256 cli();
257 tail = *q;
258 if (!tail)
259 entry->next = entry;
260 else
261 {
262 entry->next = tail->next;
263 tail->next = entry;
264 }
265 *q = entry;
266 restore_flags(flags);
267 return;
268 }
269
270
271
272
273
274
275 static struct arp_table * arp_dequeue(struct arp_table **q)
276 {
277 struct arp_table * entry;
278
279 if (*q)
280 {
281 entry = (*q)->next;
282 (*q)->next = entry->next;
283 if (entry->next == entry)
284 *q = NULL;
285 entry->next = NULL;
286 return entry;
287 }
288 return NULL;
289 }
290
291
292
293
294
295 static void arp_release_entry(struct arp_table *entry)
296 {
297 struct sk_buff *skb;
298 unsigned long flags;
299
300 save_flags(flags);
301 cli();
302
303 while ((skb = skb_dequeue(&entry->skb)) != NULL)
304 {
305 skb_device_lock(skb);
306 restore_flags(flags);
307 dev_kfree_skb(skb, FREE_WRITE);
308 cli();
309 }
310 restore_flags(flags);
311 return;
312 }
313
314
315
316
317
318
319 static void arp_free_entry(struct arp_table *entry)
320 {
321 unsigned long flags;
322 struct hh_cache *hh, *next;
323
324 del_timer(&entry->timer);
325
326 save_flags(flags);
327 cli();
328 arp_release_entry(entry);
329
330 for (hh = entry->hh; hh; hh = next)
331 {
332 next = hh->hh_next;
333 hh->hh_arp = NULL;
334 if (!--hh->hh_refcnt)
335 kfree_s(hh, sizeof(struct(struct hh_cache)));
336 }
337 restore_flags(flags);
338
339 kfree_s(entry, sizeof(struct arp_table));
340 return;
341 }
342
343
344
345
346
347 static __inline__ int arp_count_hhs(struct arp_table * entry)
348 {
349 struct hh_cache *hh, **hhp;
350 int count = 0;
351
352 hhp = &entry->hh;
353 while ((hh=*hhp) != NULL)
354 {
355 if (hh->hh_refcnt == 1)
356 {
357 *hhp = hh->hh_next;
358 kfree_s(hh, sizeof(struct hh_cache));
359 continue;
360 }
361 count += hh->hh_refcnt-1;
362 hhp = &hh->hh_next;
363 }
364
365 return count;
366 }
367
368
369
370
371
372 static __inline__ void arp_invalidate_hhs(struct arp_table * entry)
373 {
374 struct hh_cache *hh;
375
376 for (hh=entry->hh; hh; hh=hh->hh_next)
377 hh->hh_uptodate = 0;
378 }
379
380
381
382
383
384 static __inline__ void arp_update_hhs(struct arp_table * entry)
385 {
386 struct hh_cache *hh;
387
388 for (hh=entry->hh; hh; hh=hh->hh_next)
389 entry->dev->header_cache_update(hh, entry->dev, entry->ha);
390 }
391
392
393
394
395
396
397
398
399
400
401 static void arp_check_expire(unsigned long dummy)
402 {
403 int i;
404 unsigned long now = jiffies;
405
406 del_timer(&arp_timer);
407
408 if (!arp_lock)
409 {
410 arp_fast_lock();
411
412 for (i = 0; i < ARP_TABLE_SIZE; i++)
413 {
414 struct arp_table *entry;
415 struct arp_table **pentry;
416
417 pentry = &arp_tables[i];
418
419 while ((entry = *pentry) != NULL)
420 {
421 cli();
422 if (now - entry->last_used > ARP_TIMEOUT
423 && !(entry->flags & ATF_PERM)
424 && !arp_count_hhs(entry))
425 {
426 *pentry = entry->next;
427 sti();
428 #if RT_CACHE_DEBUG >= 2
429 printk("arp_expire: %08x expired\n", entry->ip);
430 #endif
431 arp_free_entry(entry);
432 }
433 else if (entry->last_updated
434 && now - entry->last_updated > ARP_CONFIRM_INTERVAL
435 && !(entry->flags & ATF_PERM))
436 {
437 struct device * dev = entry->dev;
438 pentry = &entry->next;
439 entry->flags &= ~ATF_COM;
440 arp_invalidate_hhs(entry);
441 sti();
442 entry->retries = ARP_MAX_TRIES+1;
443 del_timer(&entry->timer);
444 entry->timer.expires = jiffies + ARP_CONFIRM_TIMEOUT;
445 add_timer(&entry->timer);
446 arp_send(ARPOP_REQUEST, ETH_P_ARP, entry->ip,
447 dev, dev->pa_addr, entry->ha,
448 dev->dev_addr, NULL);
449 #if RT_CACHE_DEBUG >= 2
450 printk("arp_expire: %08x requires confirmation\n", entry->ip);
451 #endif
452 }
453 else
454 pentry = &entry->next;
455 }
456 }
457 arp_unlock();
458 }
459
460 ip_rt_check_expire();
461
462
463
464
465
466 arp_timer.expires = jiffies + ARP_CHECK_INTERVAL;
467 add_timer(&arp_timer);
468 }
469
470
471
472
473
474
475
476 static void arp_expire_request (unsigned long arg)
477 {
478 struct arp_table *entry = (struct arp_table *) arg;
479 struct arp_table **pentry;
480 unsigned long hash;
481 unsigned long flags;
482
483 save_flags(flags);
484 cli();
485
486
487
488
489
490
491
492 if (entry->flags & ATF_COM)
493 {
494 restore_flags(flags);
495 return;
496 }
497
498 if (arp_lock)
499 {
500 #if RT_CACHE_DEBUG >= 1
501 printk("arp_expire_request: %08x postponed\n", entry->ip);
502 #endif
503 del_timer(&entry->timer);
504 entry->timer.expires = jiffies + HZ/10;
505 add_timer(&entry->timer);
506 restore_flags(flags);
507 return;
508 }
509
510 arp_fast_lock();
511 restore_flags(flags);
512
513 if (entry->last_updated && --entry->retries > 0)
514 {
515 struct device *dev = entry->dev;
516
517 #if RT_CACHE_DEBUG >= 2
518 printk("arp_expire_request: %08x timed out\n", entry->ip);
519 #endif
520
521 del_timer(&entry->timer);
522 entry->timer.expires = jiffies + ARP_RES_TIME;
523 add_timer(&entry->timer);
524 arp_send(ARPOP_REQUEST, ETH_P_ARP, entry->ip, dev, dev->pa_addr,
525 NULL, dev->dev_addr, NULL);
526 arp_unlock();
527 return;
528 }
529
530 arp_release_entry(entry);
531
532 cli();
533 if (arp_count_hhs(entry))
534 {
535 struct device *dev = entry->dev;
536 #if RT_CACHE_DEBUG >= 2
537 printk("arp_expire_request: %08x is dead\n", entry->ip);
538 #endif
539 arp_release_entry(entry);
540 entry->retries = ARP_MAX_TRIES;
541 restore_flags(flags);
542 entry->last_updated = 0;
543 del_timer(&entry->timer);
544 entry->timer.expires = jiffies + ARP_DEAD_RES_TIME;
545 add_timer(&entry->timer);
546 arp_send(ARPOP_REQUEST, ETH_P_ARP, entry->ip, dev, dev->pa_addr,
547 NULL, dev->dev_addr, NULL);
548 arp_unlock();
549 return;
550 }
551 restore_flags(flags);
552
553 hash = HASH(entry->ip);
554
555 pentry = &arp_tables[hash];
556
557 while (*pentry != NULL)
558 {
559 if (*pentry == entry)
560 {
561 cli();
562 *pentry = entry->next;
563 restore_flags(flags);
564 #if RT_CACHE_DEBUG >= 2
565 printk("arp_expire_request: %08x is killed\n", entry->ip);
566 #endif
567 arp_free_entry(entry);
568 arp_unlock();
569 return;
570 }
571 pentry = &(*pentry)->next;
572 }
573 printk("arp_expire_request: bug: ARP entry is lost!\n");
574 arp_unlock();
575 }
576
577
578
579
580
581 int arp_device_event(struct notifier_block *this, unsigned long event, void *ptr)
582 {
583 struct device *dev=ptr;
584 int i;
585
586 if (event != NETDEV_DOWN)
587 return NOTIFY_DONE;
588
589
590
591
592 #if RT_CACHE_DEBUG >= 1
593 if (arp_lock)
594 printk("arp_device_event: bug\n");
595 #endif
596 arp_fast_lock();
597
598 for (i = 0; i < FULL_ARP_TABLE_SIZE; i++)
599 {
600 struct arp_table *entry;
601 struct arp_table **pentry = &arp_tables[i];
602
603 while ((entry = *pentry) != NULL)
604 {
605 if (entry->dev == dev)
606 {
607 *pentry = entry->next;
608 arp_free_entry(entry);
609 }
610 else
611 pentry = &entry->next;
612 }
613 }
614 arp_unlock();
615 return NOTIFY_DONE;
616 }
617
618
619
620
621
622
623
624 void arp_send(int type, int ptype, u32 dest_ip,
625 struct device *dev, u32 src_ip,
626 unsigned char *dest_hw, unsigned char *src_hw,
627 unsigned char *target_hw)
628 {
629 struct sk_buff *skb;
630 struct arphdr *arp;
631 unsigned char *arp_ptr;
632
633
634
635
636
637 if (dev->flags&IFF_NOARP)
638 return;
639
640
641
642
643
644 skb = alloc_skb(sizeof(struct arphdr)+ 2*(dev->addr_len+4)
645 + dev->hard_header_len, GFP_ATOMIC);
646 if (skb == NULL)
647 {
648 printk("ARP: no memory to send an arp packet\n");
649 return;
650 }
651 skb_reserve(skb, dev->hard_header_len);
652 arp = (struct arphdr *) skb_put(skb,sizeof(struct arphdr) + 2*(dev->addr_len+4));
653 skb->arp = 1;
654 skb->dev = dev;
655 skb->free = 1;
656 skb->protocol = htons (ETH_P_IP);
657
658
659
660
661
662 dev->hard_header(skb,dev,ptype,dest_hw?dest_hw:dev->broadcast,src_hw?src_hw:NULL,skb->len);
663
664
665 arp->ar_hrd = htons(dev->type);
666 #ifdef CONFIG_AX25
667 #ifdef CONFIG_NETROM
668 arp->ar_pro = (dev->type == ARPHRD_AX25 || dev->type == ARPHRD_NETROM) ? htons(AX25_P_IP) : htons(ETH_P_IP);
669 #else
670 arp->ar_pro = (dev->type != ARPHRD_AX25) ? htons(ETH_P_IP) : htons(AX25_P_IP);
671 #endif
672 #else
673 arp->ar_pro = htons(ETH_P_IP);
674 #endif
675 arp->ar_hln = dev->addr_len;
676 arp->ar_pln = 4;
677 arp->ar_op = htons(type);
678
679 arp_ptr=(unsigned char *)(arp+1);
680
681 memcpy(arp_ptr, src_hw, dev->addr_len);
682 arp_ptr+=dev->addr_len;
683 memcpy(arp_ptr, &src_ip,4);
684 arp_ptr+=4;
685 if (target_hw != NULL)
686 memcpy(arp_ptr, target_hw, dev->addr_len);
687 else
688 memset(arp_ptr, 0, dev->addr_len);
689 arp_ptr+=dev->addr_len;
690 memcpy(arp_ptr, &dest_ip, 4);
691
692 dev_queue_xmit(skb, dev, 0);
693 }
694
695
696
697
698
699 static void arp_send_q(struct arp_table *entry)
700 {
701 struct sk_buff *skb;
702
703 unsigned long flags;
704
705
706
707
708
709 if(!(entry->flags&ATF_COM))
710 {
711 printk("arp_send_q: incomplete entry for %s\n",
712 in_ntoa(entry->ip));
713
714
715
716
717 return;
718 }
719
720 save_flags(flags);
721
722 cli();
723 while((skb = skb_dequeue(&entry->skb)) != NULL)
724 {
725 IS_SKB(skb);
726 skb_device_lock(skb);
727 restore_flags(flags);
728 if(!skb->dev->rebuild_header(skb->data,skb->dev,skb->raddr,skb))
729 {
730 skb->arp = 1;
731 if(skb->sk==NULL)
732 dev_queue_xmit(skb, skb->dev, 0);
733 else
734 dev_queue_xmit(skb,skb->dev,skb->sk->priority);
735 }
736 }
737 restore_flags(flags);
738 }
739
740
741
742
743
744
745 static void arp_destroy(struct arp_table * entry)
746 {
747 struct arp_table *entry1;
748 struct arp_table **pentry;
749
750 if (entry->flags & ATF_PUBL)
751 pentry = &arp_proxy_list;
752 else
753 pentry = &arp_tables[HASH(entry->ip)];
754
755 while ((entry1 = *pentry) != NULL)
756 {
757 if (entry1 == entry)
758 {
759 *pentry = entry1->next;
760 del_timer(&entry->timer);
761 arp_free_entry(entry);
762 return;
763 }
764 pentry = &entry1->next;
765 }
766 }
767
768
769
770
771
772
773
774 int arp_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *pt)
775 {
776
777
778
779
780 struct arphdr *arp = (struct arphdr *)skb->h.raw;
781 unsigned char *arp_ptr= (unsigned char *)(arp+1);
782 struct arp_table *entry;
783 struct arp_table *proxy_entry;
784 unsigned long hash;
785 unsigned char ha[MAX_ADDR_LEN];
786 unsigned char *sha,*tha;
787 u32 sip,tip;
788
789
790
791
792
793
794
795
796 if (arp->ar_hln != dev->addr_len ||
797 dev->type != ntohs(arp->ar_hrd) ||
798 dev->flags & IFF_NOARP ||
799 arp->ar_pln != 4)
800 {
801 kfree_skb(skb, FREE_READ);
802 return 0;
803
804
805 }
806
807
808
809
810
811
812
813
814
815 switch (dev->type)
816 {
817 #ifdef CONFIG_AX25
818 case ARPHRD_AX25:
819 if(arp->ar_pro != htons(AX25_P_IP))
820 {
821 kfree_skb(skb, FREE_READ);
822 return 0;
823 }
824 break;
825 #endif
826 #ifdef CONFIG_NETROM
827 case ARPHRD_NETROM:
828 if(arp->ar_pro != htons(AX25_P_IP))
829 {
830 kfree_skb(skb, FREE_READ);
831 return 0;
832 }
833 break;
834 #endif
835 case ARPHRD_ETHER:
836 case ARPHRD_ARCNET:
837 if(arp->ar_pro != htons(ETH_P_IP))
838 {
839 kfree_skb(skb, FREE_READ);
840 return 0;
841 }
842 break;
843
844 case ARPHRD_IEEE802:
845 if(arp->ar_pro != htons(ETH_P_IP))
846 {
847 kfree_skb(skb, FREE_READ);
848 return 0;
849 }
850 break;
851
852 default:
853 printk("ARP: dev->type mangled!\n");
854 kfree_skb(skb, FREE_READ);
855 return 0;
856 }
857
858
859
860
861
862 sha=arp_ptr;
863 arp_ptr += dev->addr_len;
864 memcpy(&sip, arp_ptr, 4);
865 arp_ptr += 4;
866 tha=arp_ptr;
867 arp_ptr += dev->addr_len;
868 memcpy(&tip, arp_ptr, 4);
869
870
871
872
873
874 if (LOOPBACK(tip) || MULTICAST(tip))
875 {
876 kfree_skb(skb, FREE_READ);
877 return 0;
878 }
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901 #ifdef CONFIG_NET_ALIAS
902 if (net_alias_has(dev))
903 {
904 struct device *adev;
905 adev = net_alias_chk32(dev,AF_INET,tip,IFF_UP,IFF_NOARP);
906 if (adev != NULL) dev = adev;
907 }
908 #endif
909
910 if (arp->ar_op == htons(ARPOP_REQUEST))
911 {
912
913
914
915 if (tip != dev->pa_addr)
916 {
917
918
919
920
921
922 arp_fast_lock();
923
924 for (proxy_entry=arp_proxy_list;
925 proxy_entry;
926 proxy_entry = proxy_entry->next)
927 {
928
929
930
931
932
933
934
935
936 if (proxy_entry->dev == dev &&
937 !((proxy_entry->ip^tip)&proxy_entry->mask))
938 break;
939
940 }
941 if (proxy_entry)
942 {
943 memcpy(ha, proxy_entry->ha, dev->addr_len);
944 arp_unlock();
945 arp_send(ARPOP_REPLY,ETH_P_ARP,sip,dev,tip,sha,ha, sha);
946 kfree_skb(skb, FREE_READ);
947 return 0;
948 }
949 else
950 {
951 arp_unlock();
952 kfree_skb(skb, FREE_READ);
953 return 0;
954 }
955 }
956 else
957 {
958
959
960
961 arp_send(ARPOP_REPLY,ETH_P_ARP,sip,dev,tip,sha,dev->dev_addr,sha);
962 }
963 }
964
965
966
967 if(ip_chk_addr(tip)!=IS_MYADDR)
968 {
969
970
971
972 kfree_skb(skb, FREE_READ);
973 return 0;
974 }
975
976
977
978
979
980
981 arp_fast_lock();
982
983 hash = HASH(sip);
984
985 for (entry=arp_tables[hash]; entry; entry=entry->next)
986 if (entry->ip == sip && entry->dev == dev)
987 break;
988
989 if (entry)
990 {
991
992
993
994 if (!(entry->flags & ATF_PERM)) {
995 memcpy(entry->ha, sha, dev->addr_len);
996 entry->last_updated = jiffies;
997 }
998 if (!(entry->flags & ATF_COM))
999 {
1000
1001
1002
1003
1004 del_timer(&entry->timer);
1005 entry->flags |= ATF_COM;
1006 arp_update_hhs(entry);
1007
1008
1009
1010
1011
1012 arp_send_q(entry);
1013 }
1014 }
1015 else
1016 {
1017
1018
1019
1020 entry = (struct arp_table *)kmalloc(sizeof(struct arp_table),GFP_ATOMIC);
1021 if(entry == NULL)
1022 {
1023 arp_unlock();
1024 printk("ARP: no memory for new arp entry\n");
1025 kfree_skb(skb, FREE_READ);
1026 return 0;
1027 }
1028
1029 entry->mask = DEF_ARP_NETMASK;
1030 entry->ip = sip;
1031 entry->flags = ATF_COM;
1032 entry->hh = NULL;
1033 init_timer(&entry->timer);
1034 entry->timer.function = arp_expire_request;
1035 entry->timer.data = (unsigned long)entry;
1036 memcpy(entry->ha, sha, dev->addr_len);
1037 entry->last_updated = entry->last_used = jiffies;
1038
1039
1040
1041
1042 #ifdef CONFIG_NET_ALIAS
1043 entry->dev = dev;
1044 #else
1045 entry->dev = skb->dev;
1046 #endif
1047 skb_queue_head_init(&entry->skb);
1048 if (arp_lock == 1)
1049 {
1050 entry->next = arp_tables[hash];
1051 arp_tables[hash] = entry;
1052 }
1053 else
1054 {
1055 #if RT_CACHE_DEBUG >= 1
1056 printk("arp_rcv: %08x backlogged\n", entry->ip);
1057 #endif
1058 arp_enqueue(&arp_backlog, entry);
1059 arp_bh_mask |= ARP_BH_BACKLOG;
1060 }
1061 }
1062
1063
1064
1065
1066 kfree_skb(skb, FREE_READ);
1067 arp_unlock();
1068 return 0;
1069 }
1070
1071
1072
1073
1074
1075
1076
1077
1078 static struct arp_table *arp_lookup(u32 paddr, unsigned short flags, struct device * dev)
1079 {
1080 struct arp_table *entry;
1081
1082 if (!(flags & ATF_PUBL))
1083 {
1084 for (entry = arp_tables[HASH(paddr)];
1085 entry != NULL; entry = entry->next)
1086 if (entry->ip == paddr && (!dev || entry->dev == dev))
1087 break;
1088 return entry;
1089 }
1090
1091 if (!(flags & ATF_NETMASK))
1092 {
1093 for (entry = arp_proxy_list;
1094 entry != NULL; entry = entry->next)
1095 if (entry->ip == paddr && (!dev || entry->dev == dev))
1096 break;
1097 return entry;
1098 }
1099
1100 for (entry=arp_proxy_list; entry != NULL; entry = entry->next)
1101 if (!((entry->ip^paddr)&entry->mask) &&
1102 (!dev || entry->dev == dev))
1103 break;
1104 return entry;
1105 }
1106
1107
1108
1109
1110
1111 int arp_query(unsigned char *haddr, u32 paddr, struct device * dev)
1112 {
1113 struct arp_table *entry;
1114
1115 arp_fast_lock();
1116
1117 entry = arp_lookup(paddr, 0, dev);
1118
1119 if (entry != NULL)
1120 {
1121 entry->last_used = jiffies;
1122 if (entry->flags & ATF_COM)
1123 {
1124 memcpy(haddr, entry->ha, dev->addr_len);
1125 arp_unlock();
1126 return 1;
1127 }
1128 }
1129 arp_unlock();
1130 return 0;
1131 }
1132
1133
1134 static int arp_set_predefined(int addr_hint, unsigned char * haddr, __u32 paddr, struct device * dev)
1135 {
1136 switch (addr_hint)
1137 {
1138 case IS_MYADDR:
1139 printk("ARP: arp called for own IP address\n");
1140 memcpy(haddr, dev->dev_addr, dev->addr_len);
1141 return 1;
1142 #ifdef CONFIG_IP_MULTICAST
1143 case IS_MULTICAST:
1144 if(dev->type==ARPHRD_ETHER || dev->type==ARPHRD_IEEE802)
1145 {
1146 u32 taddr;
1147 haddr[0]=0x01;
1148 haddr[1]=0x00;
1149 haddr[2]=0x5e;
1150 taddr=ntohl(paddr);
1151 haddr[5]=taddr&0xff;
1152 taddr=taddr>>8;
1153 haddr[4]=taddr&0xff;
1154 taddr=taddr>>8;
1155 haddr[3]=taddr&0x7f;
1156 return 1;
1157 }
1158
1159
1160
1161 #endif
1162
1163 case IS_BROADCAST:
1164 memcpy(haddr, dev->broadcast, dev->addr_len);
1165 return 1;
1166 }
1167 return 0;
1168 }
1169
1170
1171
1172
1173
1174 int arp_find(unsigned char *haddr, u32 paddr, struct device *dev,
1175 u32 saddr, struct sk_buff *skb)
1176 {
1177 struct arp_table *entry;
1178 unsigned long hash;
1179
1180 if (arp_set_predefined(ip_chk_addr(paddr), haddr, paddr, dev))
1181 {
1182 if (skb)
1183 skb->arp = 1;
1184 return 0;
1185 }
1186
1187 hash = HASH(paddr);
1188 arp_fast_lock();
1189
1190
1191
1192
1193 entry = arp_lookup(paddr, 0, dev);
1194
1195 if (entry != NULL)
1196 {
1197 if (!(entry->flags & ATF_COM))
1198 {
1199
1200
1201
1202
1203
1204 if (skb != NULL)
1205 {
1206 if (entry->last_updated)
1207 {
1208 skb_queue_tail(&entry->skb, skb);
1209 skb_device_unlock(skb);
1210 }
1211
1212
1213
1214
1215 else
1216 {
1217 #if 0
1218
1219
1220
1221
1222 if (skb->sk)
1223 {
1224 skb->sk->err = EHOSTDOWN;
1225 skb->sk->error_report(skb->sk);
1226 }
1227 #else
1228 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0, dev);
1229 #endif
1230 dev_kfree_skb(skb, FREE_WRITE);
1231 }
1232 }
1233 arp_unlock();
1234 return 1;
1235 }
1236
1237
1238
1239
1240
1241 entry->last_used = jiffies;
1242 memcpy(haddr, entry->ha, dev->addr_len);
1243 if (skb)
1244 skb->arp = 1;
1245 arp_unlock();
1246 return 0;
1247 }
1248
1249
1250
1251
1252
1253 entry = (struct arp_table *) kmalloc(sizeof(struct arp_table),
1254 GFP_ATOMIC);
1255 if (entry != NULL)
1256 {
1257 entry->last_updated = entry->last_used = jiffies;
1258 entry->flags = 0;
1259 entry->ip = paddr;
1260 entry->mask = DEF_ARP_NETMASK;
1261 memset(entry->ha, 0, dev->addr_len);
1262 entry->dev = dev;
1263 entry->hh = NULL;
1264 init_timer(&entry->timer);
1265 entry->timer.function = arp_expire_request;
1266 entry->timer.data = (unsigned long)entry;
1267 entry->timer.expires = jiffies + ARP_RES_TIME;
1268 skb_queue_head_init(&entry->skb);
1269 if (skb != NULL)
1270 {
1271 skb_queue_tail(&entry->skb, skb);
1272 skb_device_unlock(skb);
1273 }
1274 if (arp_lock == 1)
1275 {
1276 entry->next = arp_tables[hash];
1277 arp_tables[hash] = entry;
1278 add_timer(&entry->timer);
1279 entry->retries = ARP_MAX_TRIES;
1280 }
1281 else
1282 {
1283 #if RT_CACHE_DEBUG >= 1
1284 printk("arp_find: %08x backlogged\n", entry->ip);
1285 #endif
1286 arp_enqueue(&arp_backlog, entry);
1287 arp_bh_mask |= ARP_BH_BACKLOG;
1288 }
1289 }
1290 else if (skb != NULL)
1291 dev_kfree_skb(skb, FREE_WRITE);
1292 arp_unlock();
1293
1294
1295
1296
1297
1298 arp_send(ARPOP_REQUEST, ETH_P_ARP, paddr, dev, saddr, NULL,
1299 dev->dev_addr, NULL);
1300
1301 return 1;
1302 }
1303
1304
1305
1306
1307
1308
1309 #define HBUFFERLEN 30
1310
1311 int arp_get_info(char *buffer, char **start, off_t offset, int length, int dummy)
1312 {
1313 int len=0;
1314 off_t pos=0;
1315 int size;
1316 struct arp_table *entry;
1317 char hbuffer[HBUFFERLEN];
1318 int i,j,k;
1319 const char hexbuf[] = "0123456789ABCDEF";
1320
1321 size = sprintf(buffer,"IP address HW type Flags HW address Mask Device\n");
1322
1323 pos+=size;
1324 len+=size;
1325
1326 arp_fast_lock();
1327
1328 for(i=0; i<FULL_ARP_TABLE_SIZE; i++)
1329 {
1330 for(entry=arp_tables[i]; entry!=NULL; entry=entry->next)
1331 {
1332
1333
1334
1335 #ifdef CONFIG_AX25
1336 #ifdef CONFIG_NETROM
1337 if (entry->dev->type == ARPHRD_AX25 || entry->dev->type == ARPHRD_NETROM)
1338 strcpy(hbuffer,ax2asc((ax25_address *)entry->ha));
1339 else {
1340 #else
1341 if(entry->dev->type==ARPHRD_AX25)
1342 strcpy(hbuffer,ax2asc((ax25_address *)entry->ha));
1343 else {
1344 #endif
1345 #endif
1346
1347 for(k=0,j=0;k<HBUFFERLEN-3 && j<entry->dev->addr_len;j++)
1348 {
1349 hbuffer[k++]=hexbuf[ (entry->ha[j]>>4)&15 ];
1350 hbuffer[k++]=hexbuf[ entry->ha[j]&15 ];
1351 hbuffer[k++]=':';
1352 }
1353 hbuffer[--k]=0;
1354
1355 #ifdef CONFIG_AX25
1356 }
1357 #endif
1358 size = sprintf(buffer+len,
1359 "%-17s0x%-10x0x%-10x%s",
1360 in_ntoa(entry->ip),
1361 (unsigned int)entry->dev->type,
1362 entry->flags,
1363 hbuffer);
1364 #if RT_CACHE_DEBUG < 2
1365 size += sprintf(buffer+len+size,
1366 " %-17s %s\n",
1367 entry->mask==DEF_ARP_NETMASK ?
1368 "*" : in_ntoa(entry->mask), entry->dev->name);
1369 #else
1370 size += sprintf(buffer+len+size,
1371 " %-17s %s\t%ld\t%1d\n",
1372 entry->mask==DEF_ARP_NETMASK ?
1373 "*" : in_ntoa(entry->mask), entry->dev->name,
1374 entry->hh ? entry->hh->hh_refcnt : -1,
1375 entry->hh ? entry->hh->hh_uptodate : 0);
1376 #endif
1377
1378 len += size;
1379 pos += size;
1380
1381 if (pos <= offset)
1382 len=0;
1383 if (pos >= offset+length)
1384 break;
1385 }
1386 }
1387 arp_unlock();
1388
1389 *start = buffer+len-(pos-offset);
1390 len = pos-offset;
1391 if (len>length)
1392 len = length;
1393 return len;
1394 }
1395
1396
1397
1398 int arp_bind_cache(struct hh_cache ** hhp, struct device *dev, unsigned short htype, u32 paddr)
1399 {
1400 struct arp_table *entry;
1401 struct hh_cache *hh = *hhp;
1402 int addr_hint;
1403 unsigned long flags;
1404
1405 if (hh)
1406 return 1;
1407
1408 if ((addr_hint = ip_chk_addr(paddr)) != 0)
1409 {
1410 unsigned char haddr[MAX_ADDR_LEN];
1411 if (hh)
1412 return 1;
1413 hh = kmalloc(sizeof(struct hh_cache), GFP_ATOMIC);
1414 if (!hh)
1415 return 1;
1416 arp_set_predefined(addr_hint, haddr, paddr, dev);
1417 hh->hh_uptodate = 0;
1418 hh->hh_refcnt = 1;
1419 hh->hh_arp = NULL;
1420 hh->hh_next = NULL;
1421 hh->hh_type = htype;
1422 *hhp = hh;
1423 dev->header_cache_update(hh, dev, haddr);
1424 return 0;
1425 }
1426
1427 save_flags(flags);
1428
1429 arp_fast_lock();
1430
1431 entry = arp_lookup(paddr, 0, dev);
1432
1433 if (entry)
1434 {
1435 cli();
1436 for (hh = entry->hh; hh; hh=hh->hh_next)
1437 if (hh->hh_type == htype)
1438 break;
1439 if (hh)
1440 {
1441 hh->hh_refcnt++;
1442 *hhp = hh;
1443 restore_flags(flags);
1444 arp_unlock();
1445 return 1;
1446 }
1447 restore_flags(flags);
1448 }
1449
1450 hh = kmalloc(sizeof(struct hh_cache), GFP_ATOMIC);
1451 if (!hh)
1452 {
1453 arp_unlock();
1454 return 1;
1455 }
1456
1457 hh->hh_uptodate = 0;
1458 hh->hh_refcnt = 1;
1459 hh->hh_arp = NULL;
1460 hh->hh_next = NULL;
1461 hh->hh_type = htype;
1462
1463 if (entry)
1464 {
1465 dev->header_cache_update(hh, dev, entry->ha);
1466 *hhp = hh;
1467 cli();
1468 hh->hh_arp = (void*)entry;
1469 entry->hh = hh;
1470 hh->hh_refcnt++;
1471 restore_flags(flags);
1472 entry->last_used = jiffies;
1473 arp_unlock();
1474 return 0;
1475 }
1476
1477
1478
1479
1480
1481
1482 entry = (struct arp_table *) kmalloc(sizeof(struct arp_table),
1483 GFP_ATOMIC);
1484 if (entry == NULL)
1485 {
1486 kfree_s(hh, sizeof(struct hh_cache));
1487 arp_unlock();
1488 return 1;
1489 }
1490
1491 entry->last_updated = entry->last_used = jiffies;
1492 entry->flags = 0;
1493 entry->ip = paddr;
1494 entry->mask = DEF_ARP_NETMASK;
1495 memset(entry->ha, 0, dev->addr_len);
1496 entry->dev = dev;
1497 entry->hh = hh;
1498 ATOMIC_INCR(&hh->hh_refcnt);
1499 init_timer(&entry->timer);
1500 entry->timer.function = arp_expire_request;
1501 entry->timer.data = (unsigned long)entry;
1502 entry->timer.expires = jiffies + ARP_RES_TIME;
1503 skb_queue_head_init(&entry->skb);
1504
1505 if (arp_lock == 1)
1506 {
1507 unsigned long hash = HASH(paddr);
1508 cli();
1509 entry->next = arp_tables[hash];
1510 arp_tables[hash] = entry;
1511 hh->hh_arp = (void*)entry;
1512 entry->retries = ARP_MAX_TRIES;
1513 restore_flags(flags);
1514
1515 add_timer(&entry->timer);
1516 arp_send(ARPOP_REQUEST, ETH_P_ARP, paddr, dev, dev->pa_addr, NULL, dev->dev_addr, NULL);
1517 }
1518 else
1519 {
1520 #if RT_CACHE_DEBUG >= 1
1521 printk("arp_cache_bind: %08x backlogged\n", entry->ip);
1522 #endif
1523 arp_enqueue(&arp_backlog, entry);
1524 arp_bh_mask |= ARP_BH_BACKLOG;
1525 }
1526 *hhp = hh;
1527 arp_unlock();
1528 return 0;
1529 }
1530
1531 static void arp_run_bh()
1532 {
1533 unsigned long flags;
1534 struct arp_table *entry, *entry1;
1535 struct hh_cache *hh;
1536 __u32 sip;
1537
1538 save_flags(flags);
1539 cli();
1540 if (!arp_lock)
1541 {
1542 arp_fast_lock();
1543
1544 while ((entry = arp_dequeue(&arp_backlog)) != NULL)
1545 {
1546 unsigned long hash;
1547 sti();
1548 sip = entry->ip;
1549 hash = HASH(sip);
1550
1551
1552
1553
1554
1555 for (entry1=arp_tables[hash]; entry1; entry1=entry1->next)
1556 if (entry1->ip==sip && entry1->dev == entry->dev)
1557 break;
1558
1559 if (!entry1)
1560 {
1561 struct device * dev = entry->dev;
1562 cli();
1563 entry->next = arp_tables[hash];
1564 arp_tables[hash] = entry;
1565 for (hh=entry->hh; hh; hh=hh->hh_next)
1566 hh->hh_arp = (void*)entry;
1567 sti();
1568 del_timer(&entry->timer);
1569 entry->timer.expires = jiffies + ARP_RES_TIME;
1570 add_timer(&entry->timer);
1571 entry->retries = ARP_MAX_TRIES;
1572 arp_send(ARPOP_REQUEST, ETH_P_ARP, entry->ip, dev, dev->pa_addr, NULL, dev->dev_addr, NULL);
1573 #if RT_CACHE_DEBUG >= 1
1574 printk("arp_run_bh: %08x reinstalled\n", sip);
1575 #endif
1576 }
1577 else
1578 {
1579 struct sk_buff * skb;
1580 struct hh_cache * next;
1581
1582
1583
1584
1585 cli();
1586 for (hh=entry->hh; hh; hh=next)
1587 {
1588 next = hh->hh_next;
1589 hh->hh_next = entry1->hh;
1590 entry1->hh = hh;
1591 hh->hh_arp = (void*)entry1;
1592 }
1593 entry->hh = NULL;
1594
1595
1596
1597
1598 while ((skb = skb_dequeue(&entry->skb)) != NULL)
1599 {
1600 skb_device_lock(skb);
1601 sti();
1602 skb_queue_tail(&entry1->skb, skb);
1603 skb_device_unlock(skb);
1604 cli();
1605 }
1606 sti();
1607
1608 #if RT_CACHE_DEBUG >= 1
1609 printk("arp_run_bh: entry %08x was born dead\n", entry->ip);
1610 #endif
1611 arp_free_entry(entry);
1612
1613 if (entry1->flags & ATF_COM)
1614 {
1615 arp_update_hhs(entry1);
1616 arp_send_q(entry1);
1617 }
1618 }
1619 cli();
1620 }
1621 arp_bh_mask &= ~ARP_BH_BACKLOG;
1622 arp_unlock();
1623 }
1624 restore_flags(flags);
1625 }
1626
1627
1628
1629
1630
1631 static inline int empty(unsigned char * addr, int len)
1632 {
1633 while (len > 0) {
1634 if (*addr)
1635 return 0;
1636 len--;
1637 addr++;
1638 }
1639 return 1;
1640 }
1641
1642
1643
1644
1645
1646 static int arp_req_set(struct arpreq *r, struct device * dev)
1647 {
1648 struct arp_table *entry;
1649 struct sockaddr_in *si;
1650 struct rtable *rt;
1651 struct device *dev1;
1652 unsigned char *ha;
1653 u32 ip;
1654
1655
1656
1657
1658
1659 si = (struct sockaddr_in *) &r->arp_pa;
1660 ip = si->sin_addr.s_addr;
1661
1662
1663
1664
1665
1666 if (ip_chk_addr(ip) == IS_MYADDR)
1667 dev1 = dev_get("lo");
1668 else {
1669 rt = ip_rt_route(ip, 0);
1670 if (!rt)
1671 return -ENETUNREACH;
1672 dev1 = rt->rt_dev;
1673 ip_rt_put(rt);
1674 }
1675
1676 if (!dev)
1677 dev = dev1;
1678
1679 if (((r->arp_flags & ATF_PUBL) && dev == dev1) ||
1680 (!(r->arp_flags & ATF_PUBL) && dev != dev1))
1681 return -EINVAL;
1682
1683 #if RT_CACHE_DEBUG >= 1
1684 if (arp_lock)
1685 printk("arp_req_set: bug\n");
1686 #endif
1687 arp_fast_lock();
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697 entry = arp_lookup(ip, r->arp_flags & ~ATF_NETMASK, dev);
1698
1699 if (entry)
1700 {
1701 arp_destroy(entry);
1702 entry = NULL;
1703 }
1704
1705
1706
1707
1708
1709 if (entry == NULL)
1710 {
1711 entry = (struct arp_table *) kmalloc(sizeof(struct arp_table),
1712 GFP_ATOMIC);
1713 if (entry == NULL)
1714 {
1715 arp_unlock();
1716 return -ENOMEM;
1717 }
1718 entry->ip = ip;
1719 entry->hh = NULL;
1720 init_timer(&entry->timer);
1721 entry->timer.function = arp_expire_request;
1722 entry->timer.data = (unsigned long)entry;
1723
1724 if (r->arp_flags & ATF_PUBL)
1725 {
1726 cli();
1727 entry->next = arp_proxy_list;
1728 arp_proxy_list = entry;
1729 sti();
1730 }
1731 else
1732 {
1733 unsigned long hash = HASH(ip);
1734 cli();
1735 entry->next = arp_tables[hash];
1736 arp_tables[hash] = entry;
1737 sti();
1738 }
1739 skb_queue_head_init(&entry->skb);
1740 }
1741
1742
1743
1744 ha = r->arp_ha.sa_data;
1745 if ((r->arp_flags & ATF_COM) && empty(ha, dev->addr_len))
1746 ha = dev->dev_addr;
1747 memcpy(entry->ha, ha, dev->addr_len);
1748 entry->last_updated = entry->last_used = jiffies;
1749 entry->flags = r->arp_flags | ATF_COM;
1750 if ((entry->flags & ATF_PUBL) && (entry->flags & ATF_NETMASK))
1751 {
1752 si = (struct sockaddr_in *) &r->arp_netmask;
1753 entry->mask = si->sin_addr.s_addr;
1754 }
1755 else
1756 entry->mask = DEF_ARP_NETMASK;
1757 entry->dev = dev;
1758 arp_update_hhs(entry);
1759 arp_unlock();
1760 return 0;
1761 }
1762
1763
1764
1765
1766
1767
1768
1769 static int arp_req_get(struct arpreq *r, struct device *dev)
1770 {
1771 struct arp_table *entry;
1772 struct sockaddr_in *si;
1773
1774 si = (struct sockaddr_in *) &r->arp_pa;
1775
1776 #if RT_CACHE_DEBUG >= 1
1777 if (arp_lock)
1778 printk("arp_req_set: bug\n");
1779 #endif
1780 arp_fast_lock();
1781
1782 entry = arp_lookup(si->sin_addr.s_addr, r->arp_flags|ATF_NETMASK, dev);
1783
1784 if (entry == NULL)
1785 {
1786 arp_unlock();
1787 return -ENXIO;
1788 }
1789
1790
1791
1792
1793
1794 memcpy(r->arp_ha.sa_data, &entry->ha, entry->dev->addr_len);
1795 r->arp_ha.sa_family = entry->dev->type;
1796 r->arp_flags = entry->flags;
1797 strncpy(r->arp_dev, entry->dev->name, 16);
1798 arp_unlock();
1799 return 0;
1800 }
1801
1802 static int arp_req_delete(struct arpreq *r, struct device * dev)
1803 {
1804 struct arp_table *entry;
1805 struct sockaddr_in *si;
1806
1807 si = (struct sockaddr_in *) &r->arp_pa;
1808 #if RT_CACHE_DEBUG >= 1
1809 if (arp_lock)
1810 printk("arp_req_delete: bug\n");
1811 #endif
1812 arp_fast_lock();
1813
1814 if (!(r->arp_flags & ATF_PUBL))
1815 {
1816 for (entry = arp_tables[HASH(si->sin_addr.s_addr)];
1817 entry != NULL; entry = entry->next)
1818 if (entry->ip == si->sin_addr.s_addr
1819 && (!dev || entry->dev == dev))
1820 {
1821 arp_destroy(entry);
1822 arp_unlock();
1823 return 0;
1824 }
1825 }
1826 else
1827 {
1828 for (entry = arp_proxy_list;
1829 entry != NULL; entry = entry->next)
1830 if (entry->ip == si->sin_addr.s_addr
1831 && (!dev || entry->dev == dev))
1832 {
1833 arp_destroy(entry);
1834 arp_unlock();
1835 return 0;
1836 }
1837 }
1838
1839 arp_unlock();
1840 return -ENXIO;
1841 }
1842
1843
1844
1845
1846
1847 int arp_ioctl(unsigned int cmd, void *arg)
1848 {
1849 int err;
1850 struct arpreq r;
1851
1852 struct device * dev = NULL;
1853
1854 switch(cmd)
1855 {
1856 case SIOCDARP:
1857 case SIOCSARP:
1858 if (!suser())
1859 return -EPERM;
1860 case SIOCGARP:
1861 err = verify_area(VERIFY_READ, arg, sizeof(struct arpreq));
1862 if (err)
1863 return err;
1864 memcpy_fromfs(&r, arg, sizeof(struct arpreq));
1865 break;
1866 case OLD_SIOCDARP:
1867 case OLD_SIOCSARP:
1868 if (!suser())
1869 return -EPERM;
1870 case OLD_SIOCGARP:
1871 err = verify_area(VERIFY_READ, arg, sizeof(struct arpreq_old));
1872 if (err)
1873 return err;
1874 memcpy_fromfs(&r, arg, sizeof(struct arpreq_old));
1875 memset(&r.arp_dev, 0, sizeof(r.arp_dev));
1876 break;
1877 default:
1878 return -EINVAL;
1879 }
1880
1881 if (r.arp_pa.sa_family != AF_INET)
1882 return -EPFNOSUPPORT;
1883 if (((struct sockaddr_in *)&r.arp_pa)->sin_addr.s_addr == 0)
1884 return -EINVAL;
1885
1886 if (r.arp_dev[0])
1887 {
1888 if ((dev = dev_get(r.arp_dev)) == NULL)
1889 return -ENODEV;
1890
1891 if (!r.arp_ha.sa_family)
1892 r.arp_ha.sa_family = dev->type;
1893 else if (r.arp_ha.sa_family != dev->type)
1894 return -EINVAL;
1895 }
1896 else
1897 {
1898 if ((r.arp_flags & ATF_PUBL) &&
1899 ((cmd == SIOCSARP) || (cmd == OLD_SIOCSARP))) {
1900 if ((dev = dev_getbytype(r.arp_ha.sa_family)) == NULL)
1901 return -ENODEV;
1902 }
1903 }
1904
1905 switch(cmd)
1906 {
1907 case SIOCDARP:
1908 return arp_req_delete(&r, dev);
1909 case SIOCSARP:
1910 return arp_req_set(&r, dev);
1911 case OLD_SIOCDARP:
1912
1913
1914
1915 r.arp_flags &= ~ATF_PUBL;
1916 err = arp_req_delete(&r, dev);
1917 r.arp_flags |= ATF_PUBL;
1918 if (!err)
1919 arp_req_delete(&r, dev);
1920 else
1921 err = arp_req_delete(&r, dev);
1922 return err;
1923 case OLD_SIOCSARP:
1924 err = arp_req_set(&r, dev);
1925
1926
1927
1928
1929
1930 if (r.arp_flags & ATF_PUBL)
1931 {
1932 r.arp_flags &= ~ATF_PUBL;
1933 arp_req_delete(&r, dev);
1934 }
1935 return err;
1936 case SIOCGARP:
1937 err = verify_area(VERIFY_WRITE, arg, sizeof(struct arpreq));
1938 if (err)
1939 return err;
1940 err = arp_req_get(&r, dev);
1941 if (!err)
1942 memcpy_tofs(arg, &r, sizeof(r));
1943 return err;
1944 case OLD_SIOCGARP:
1945 err = verify_area(VERIFY_WRITE, arg, sizeof(struct arpreq_old));
1946 if (err)
1947 return err;
1948 r.arp_flags &= ~ATF_PUBL;
1949 err = arp_req_get(&r, dev);
1950 if (err < 0)
1951 {
1952 r.arp_flags |= ATF_PUBL;
1953 err = arp_req_get(&r, dev);
1954 }
1955 if (!err)
1956 memcpy_tofs(arg, &r, sizeof(struct arpreq_old));
1957 return err;
1958 }
1959
1960 return 0;
1961 }
1962
1963
1964
1965
1966
1967
1968 static struct packet_type arp_packet_type =
1969 {
1970 0,
1971 NULL,
1972 arp_rcv,
1973 NULL,
1974 NULL
1975 };
1976
1977 static struct notifier_block arp_dev_notifier={
1978 arp_device_event,
1979 NULL,
1980 0
1981 };
1982
1983 void arp_init (void)
1984 {
1985
1986 arp_packet_type.type=htons(ETH_P_ARP);
1987 dev_add_pack(&arp_packet_type);
1988
1989 add_timer(&arp_timer);
1990
1991 register_netdevice_notifier(&arp_dev_notifier);
1992
1993 proc_net_register(&(struct proc_dir_entry) {
1994 PROC_NET_ARP, 3, "arp",
1995 S_IFREG | S_IRUGO, 1, 0, 0,
1996 0, &proc_net_inode_operations,
1997 arp_get_info
1998 });
1999 }
2000