This source file includes following definitions.
- arp_fast_lock
- arp_fast_unlock
- arp_unlock
- arp_enqueue
- arp_dequeue
- arp_release_entry
- arp_free_entry
- arp_count_hhs
- arp_invalidate_hhs
- arp_update_hhs
- arp_check_expire
- arp_expire_request
- arp_device_event
- arp_send
- arp_send_q
- arp_destroy
- arp_rcv
- arp_lookup
- arp_query
- arp_set_predefined
- arp_find
- arp_get_info
- arp_bind_cache
- arp_run_bh
- empty
- arp_req_set
- arp_req_get
- arp_req_delete
- arp_ioctl
- arp_init
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67 #include <linux/types.h>
68 #include <linux/string.h>
69 #include <linux/kernel.h>
70 #include <linux/sched.h>
71 #include <linux/config.h>
72 #include <linux/socket.h>
73 #include <linux/sockios.h>
74 #include <linux/errno.h>
75 #include <linux/if_arp.h>
76 #include <linux/in.h>
77 #include <linux/mm.h>
78 #include <linux/inet.h>
79 #include <linux/netdevice.h>
80 #include <linux/etherdevice.h>
81 #include <linux/trdevice.h>
82 #include <linux/skbuff.h>
83 #include <linux/proc_fs.h>
84 #include <linux/stat.h>
85
86 #include <net/ip.h>
87 #include <net/icmp.h>
88 #include <net/route.h>
89 #include <net/protocol.h>
90 #include <net/tcp.h>
91 #include <net/sock.h>
92 #include <net/arp.h>
93 #ifdef CONFIG_AX25
94 #include <net/ax25.h>
95 #ifdef CONFIG_NETROM
96 #include <net/netrom.h>
97 #endif
98 #endif
99
100 #include <asm/system.h>
101 #include <asm/segment.h>
102
103 #include <stdarg.h>
104
105
106
107
108
109
110
111
112 struct arp_table
113 {
114 struct arp_table *next;
115 unsigned long last_used;
116 unsigned long last_updated;
117 unsigned int flags;
118 u32 ip;
119 u32 mask;
120 unsigned char ha[MAX_ADDR_LEN];
121 struct device *dev;
122
123
124
125
126
127 struct timer_list timer;
128 int retries;
129 struct sk_buff_head skb;
130 struct hh_cache *hh;
131 };
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147 #define ARP_RES_TIME (5*HZ)
148 #define ARP_DEAD_RES_TIME (60*HZ)
149
150
151
152
153
154
155 #define ARP_MAX_TRIES 3
156
157
158
159
160
161 #define ARP_TIMEOUT (600*HZ)
162
163
164
165
166
167
168
169 #define ARP_CHECK_INTERVAL (60*HZ)
170
171
172
173
174
175
176
177 #define ARP_CONFIRM_INTERVAL (300*HZ)
178 #define ARP_CONFIRM_TIMEOUT ARP_RES_TIME
179
180 static unsigned long arp_lock;
181 static unsigned long arp_bh_mask;
182
183 #define ARP_BH_BACKLOG 1
184
185 static struct arp_table *arp_backlog;
186
187 static void arp_run_bh(void);
188 static void arp_check_expire (unsigned long);
189
190 static struct timer_list arp_timer =
191 { NULL, NULL, ARP_CHECK_INTERVAL, 0L, &arp_check_expire };
192
193
194
195
196
197
198 #define DEF_ARP_NETMASK (~0)
199
200
201
202
203
204
205
206 #define ARP_TABLE_SIZE 16
207 #define FULL_ARP_TABLE_SIZE (ARP_TABLE_SIZE+1)
208
209 struct arp_table *arp_tables[FULL_ARP_TABLE_SIZE] =
210 {
211 NULL,
212 };
213
214 #define arp_proxy_list arp_tables[ARP_TABLE_SIZE]
215
216
217
218
219
220
221 #define HASH(paddr) (htonl(paddr) & (ARP_TABLE_SIZE - 1))
222
223
224
225
226
227 static __inline__ void arp_fast_lock(void)
228 {
229 ATOMIC_INCR(&arp_lock);
230 }
231
232 static __inline__ void arp_fast_unlock(void)
233 {
234 ATOMIC_DECR(&arp_lock);
235 }
236
237 static __inline__ void arp_unlock(void)
238 {
239 if (!ATOMIC_DECR_AND_CHECK(&arp_lock) && arp_bh_mask)
240 arp_run_bh();
241 }
242
243
244
245
246
247 static void arp_enqueue(struct arp_table **q, struct arp_table *entry)
248 {
249 unsigned long flags;
250 struct arp_table * tail;
251
252 save_flags(flags);
253 cli();
254 tail = *q;
255 if (!tail)
256 entry->next = entry;
257 else
258 {
259 entry->next = tail->next;
260 tail->next = entry;
261 }
262 *q = entry;
263 restore_flags(flags);
264 return;
265 }
266
267
268
269
270
271
272 static struct arp_table * arp_dequeue(struct arp_table **q)
273 {
274 struct arp_table * entry;
275
276 if (*q)
277 {
278 entry = (*q)->next;
279 (*q)->next = entry->next;
280 if (entry->next == entry)
281 *q = NULL;
282 entry->next = NULL;
283 return entry;
284 }
285 return NULL;
286 }
287
288
289
290
291
292 static void arp_release_entry(struct arp_table *entry)
293 {
294 struct sk_buff *skb;
295 unsigned long flags;
296
297 save_flags(flags);
298 cli();
299
300 while ((skb = skb_dequeue(&entry->skb)) != NULL)
301 {
302 skb_device_lock(skb);
303 restore_flags(flags);
304 dev_kfree_skb(skb, FREE_WRITE);
305 cli();
306 }
307 restore_flags(flags);
308 return;
309 }
310
311
312
313
314
315
316 static void arp_free_entry(struct arp_table *entry)
317 {
318 unsigned long flags;
319 struct hh_cache *hh, *next;
320
321 del_timer(&entry->timer);
322
323 save_flags(flags);
324 cli();
325 arp_release_entry(entry);
326
327 for (hh = entry->hh; hh; hh = next)
328 {
329 next = hh->hh_next;
330 hh->hh_arp = NULL;
331 if (!--hh->hh_refcnt)
332 kfree_s(hh, sizeof(struct(struct hh_cache)));
333 }
334 restore_flags(flags);
335
336 kfree_s(entry, sizeof(struct arp_table));
337 return;
338 }
339
340
341
342
343
344 static __inline__ int arp_count_hhs(struct arp_table * entry)
345 {
346 struct hh_cache *hh, **hhp;
347 int count = 0;
348
349 hhp = &entry->hh;
350 while ((hh=*hhp) != NULL)
351 {
352 if (hh->hh_refcnt == 1)
353 {
354 *hhp = hh->hh_next;
355 kfree_s(hh, sizeof(struct hh_cache));
356 continue;
357 }
358 count += hh->hh_refcnt-1;
359 hhp = &hh->hh_next;
360 }
361
362 return count;
363 }
364
365
366
367
368
369 static __inline__ void arp_invalidate_hhs(struct arp_table * entry)
370 {
371 struct hh_cache *hh;
372
373 for (hh=entry->hh; hh; hh=hh->hh_next)
374 hh->hh_uptodate = 0;
375 }
376
377
378
379
380
381 static __inline__ void arp_update_hhs(struct arp_table * entry)
382 {
383 struct hh_cache *hh;
384
385 for (hh=entry->hh; hh; hh=hh->hh_next)
386 entry->dev->header_cache_update(hh, entry->dev, entry->ha);
387 }
388
389
390
391
392
393
394
395
396
397
398 static void arp_check_expire(unsigned long dummy)
399 {
400 int i;
401 unsigned long now = jiffies;
402
403 del_timer(&arp_timer);
404
405 if (!arp_lock)
406 {
407 arp_fast_lock();
408
409 for (i = 0; i < ARP_TABLE_SIZE; i++)
410 {
411 struct arp_table *entry;
412 struct arp_table **pentry;
413
414 pentry = &arp_tables[i];
415
416 while ((entry = *pentry) != NULL)
417 {
418 cli();
419 if (now - entry->last_used > ARP_TIMEOUT
420 && !(entry->flags & ATF_PERM)
421 && !arp_count_hhs(entry))
422 {
423 *pentry = entry->next;
424 sti();
425 #if RT_CACHE_DEBUG >= 2
426 printk("arp_expire: %08x expired\n", entry->ip);
427 #endif
428 arp_free_entry(entry);
429 }
430 else if (entry->last_updated
431 && now - entry->last_updated > ARP_CONFIRM_INTERVAL
432 && !(entry->flags & ATF_PERM))
433 {
434 struct device * dev = entry->dev;
435 pentry = &entry->next;
436 entry->flags &= ~ATF_COM;
437 arp_invalidate_hhs(entry);
438 sti();
439 entry->retries = ARP_MAX_TRIES+1;
440 del_timer(&entry->timer);
441 entry->timer.expires = jiffies + ARP_CONFIRM_TIMEOUT;
442 add_timer(&entry->timer);
443 arp_send(ARPOP_REQUEST, ETH_P_ARP, entry->ip,
444 dev, dev->pa_addr, entry->ha,
445 dev->dev_addr, NULL);
446 #if RT_CACHE_DEBUG >= 2
447 printk("arp_expire: %08x requires confirmation\n", entry->ip);
448 #endif
449 }
450 else
451 pentry = &entry->next;
452 }
453 }
454 arp_unlock();
455 }
456
457 ip_rt_check_expire();
458
459
460
461
462
463 arp_timer.expires = jiffies + ARP_CHECK_INTERVAL;
464 add_timer(&arp_timer);
465 }
466
467
468
469
470
471
472
473 static void arp_expire_request (unsigned long arg)
474 {
475 struct arp_table *entry = (struct arp_table *) arg;
476 struct arp_table **pentry;
477 unsigned long hash;
478 unsigned long flags;
479
480 save_flags(flags);
481 cli();
482
483
484
485
486
487
488
489 if (entry->flags & ATF_COM)
490 {
491 restore_flags(flags);
492 return;
493 }
494
495 if (arp_lock)
496 {
497 #if RT_CACHE_DEBUG >= 1
498 printk("arp_expire_request: %08x postponed\n", entry->ip);
499 #endif
500 del_timer(&entry->timer);
501 entry->timer.expires = jiffies + HZ/10;
502 add_timer(&entry->timer);
503 restore_flags(flags);
504 return;
505 }
506
507 arp_fast_lock();
508 restore_flags(flags);
509
510 if (entry->last_updated && --entry->retries > 0)
511 {
512 struct device *dev = entry->dev;
513
514 #if RT_CACHE_DEBUG >= 2
515 printk("arp_expire_request: %08x timed out\n", entry->ip);
516 #endif
517
518 del_timer(&entry->timer);
519 entry->timer.expires = jiffies + ARP_RES_TIME;
520 add_timer(&entry->timer);
521 arp_send(ARPOP_REQUEST, ETH_P_ARP, entry->ip, dev, dev->pa_addr,
522 NULL, dev->dev_addr, NULL);
523 arp_unlock();
524 return;
525 }
526
527 arp_release_entry(entry);
528
529 cli();
530 if (arp_count_hhs(entry))
531 {
532 struct device *dev = entry->dev;
533 #if RT_CACHE_DEBUG >= 2
534 printk("arp_expire_request: %08x is dead\n", entry->ip);
535 #endif
536 arp_release_entry(entry);
537 entry->retries = ARP_MAX_TRIES;
538 restore_flags(flags);
539 entry->last_updated = 0;
540 del_timer(&entry->timer);
541 entry->timer.expires = jiffies + ARP_DEAD_RES_TIME;
542 add_timer(&entry->timer);
543 arp_send(ARPOP_REQUEST, ETH_P_ARP, entry->ip, dev, dev->pa_addr,
544 NULL, dev->dev_addr, NULL);
545 arp_unlock();
546 return;
547 }
548 restore_flags(flags);
549
550 hash = HASH(entry->ip);
551
552 pentry = &arp_tables[hash];
553
554 while (*pentry != NULL)
555 {
556 if (*pentry == entry)
557 {
558 cli();
559 *pentry = entry->next;
560 restore_flags(flags);
561 #if RT_CACHE_DEBUG >= 2
562 printk("arp_expire_request: %08x is killed\n", entry->ip);
563 #endif
564 arp_free_entry(entry);
565 arp_unlock();
566 return;
567 }
568 pentry = &(*pentry)->next;
569 }
570 printk("arp_expire_request: bug: ARP entry is lost!\n");
571 arp_unlock();
572 }
573
574
575
576
577
578 int arp_device_event(struct notifier_block *this, unsigned long event, void *ptr)
579 {
580 struct device *dev=ptr;
581 int i;
582
583 if (event != NETDEV_DOWN)
584 return NOTIFY_DONE;
585
586
587
588
589 #if RT_CACHE_DEBUG >= 1
590 if (arp_lock)
591 printk("arp_device_event: bug\n");
592 #endif
593 arp_fast_lock();
594
595 for (i = 0; i < FULL_ARP_TABLE_SIZE; i++)
596 {
597 struct arp_table *entry;
598 struct arp_table **pentry = &arp_tables[i];
599
600 while ((entry = *pentry) != NULL)
601 {
602 if (entry->dev == dev)
603 {
604 *pentry = entry->next;
605 arp_free_entry(entry);
606 }
607 else
608 pentry = &entry->next;
609 }
610 }
611 arp_unlock();
612 return NOTIFY_DONE;
613 }
614
615
616
617
618
619
620
621 void arp_send(int type, int ptype, u32 dest_ip,
622 struct device *dev, u32 src_ip,
623 unsigned char *dest_hw, unsigned char *src_hw,
624 unsigned char *target_hw)
625 {
626 struct sk_buff *skb;
627 struct arphdr *arp;
628 unsigned char *arp_ptr;
629
630
631
632
633
634 if (dev->flags&IFF_NOARP)
635 return;
636
637
638
639
640
641 skb = alloc_skb(sizeof(struct arphdr)+ 2*(dev->addr_len+4)
642 + dev->hard_header_len, GFP_ATOMIC);
643 if (skb == NULL)
644 {
645 printk("ARP: no memory to send an arp packet\n");
646 return;
647 }
648 skb_reserve(skb, dev->hard_header_len);
649 arp = (struct arphdr *) skb_put(skb,sizeof(struct arphdr) + 2*(dev->addr_len+4));
650 skb->arp = 1;
651 skb->dev = dev;
652 skb->free = 1;
653 skb->protocol = htons (ETH_P_IP);
654
655
656
657
658
659 dev->hard_header(skb,dev,ptype,dest_hw?dest_hw:dev->broadcast,src_hw?src_hw:NULL,skb->len);
660
661
662 arp->ar_hrd = htons(dev->type);
663 #ifdef CONFIG_AX25
664 #ifdef CONFIG_NETROM
665 arp->ar_pro = (dev->type == ARPHRD_AX25 || dev->type == ARPHRD_NETROM) ? htons(AX25_P_IP) : htons(ETH_P_IP);
666 #else
667 arp->ar_pro = (dev->type != ARPHRD_AX25) ? htons(ETH_P_IP) : htons(AX25_P_IP);
668 #endif
669 #else
670 arp->ar_pro = htons(ETH_P_IP);
671 #endif
672 arp->ar_hln = dev->addr_len;
673 arp->ar_pln = 4;
674 arp->ar_op = htons(type);
675
676 arp_ptr=(unsigned char *)(arp+1);
677
678 memcpy(arp_ptr, src_hw, dev->addr_len);
679 arp_ptr+=dev->addr_len;
680 memcpy(arp_ptr, &src_ip,4);
681 arp_ptr+=4;
682 if (target_hw != NULL)
683 memcpy(arp_ptr, target_hw, dev->addr_len);
684 else
685 memset(arp_ptr, 0, dev->addr_len);
686 arp_ptr+=dev->addr_len;
687 memcpy(arp_ptr, &dest_ip, 4);
688
689 dev_queue_xmit(skb, dev, 0);
690 }
691
692
693
694
695
696 static void arp_send_q(struct arp_table *entry)
697 {
698 struct sk_buff *skb;
699
700 unsigned long flags;
701
702
703
704
705
706 if(!(entry->flags&ATF_COM))
707 {
708 printk("arp_send_q: incomplete entry for %s\n",
709 in_ntoa(entry->ip));
710
711
712
713
714 return;
715 }
716
717 save_flags(flags);
718
719 cli();
720 while((skb = skb_dequeue(&entry->skb)) != NULL)
721 {
722 IS_SKB(skb);
723 skb_device_lock(skb);
724 restore_flags(flags);
725 if(!skb->dev->rebuild_header(skb->data,skb->dev,skb->raddr,skb))
726 {
727 skb->arp = 1;
728 if(skb->sk==NULL)
729 dev_queue_xmit(skb, skb->dev, 0);
730 else
731 dev_queue_xmit(skb,skb->dev,skb->sk->priority);
732 }
733 }
734 restore_flags(flags);
735 }
736
737
738
739
740
741
742 static void arp_destroy(struct arp_table * entry)
743 {
744 struct arp_table *entry1;
745 struct arp_table **pentry;
746
747 if (entry->flags & ATF_PUBL)
748 pentry = &arp_proxy_list;
749 else
750 pentry = &arp_tables[HASH(entry->ip)];
751
752 while ((entry1 = *pentry) != NULL)
753 {
754 if (entry1 == entry)
755 {
756 *pentry = entry1->next;
757 del_timer(&entry->timer);
758 arp_free_entry(entry);
759 return;
760 }
761 pentry = &entry1->next;
762 }
763 }
764
765
766
767
768
769
770
771 int arp_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *pt)
772 {
773
774
775
776
777 struct arphdr *arp = (struct arphdr *)skb->h.raw;
778 unsigned char *arp_ptr= (unsigned char *)(arp+1);
779 struct arp_table *entry;
780 struct arp_table *proxy_entry;
781 unsigned long hash;
782 unsigned char ha[MAX_ADDR_LEN];
783 unsigned char *sha,*tha;
784 u32 sip,tip;
785
786
787
788
789
790
791
792
793 if (arp->ar_hln != dev->addr_len ||
794 dev->type != ntohs(arp->ar_hrd) ||
795 dev->flags & IFF_NOARP ||
796 arp->ar_pln != 4)
797 {
798 kfree_skb(skb, FREE_READ);
799 return 0;
800
801
802 }
803
804
805
806
807
808
809
810
811
812 switch (dev->type)
813 {
814 #ifdef CONFIG_AX25
815 case ARPHRD_AX25:
816 if(arp->ar_pro != htons(AX25_P_IP))
817 {
818 kfree_skb(skb, FREE_READ);
819 return 0;
820 }
821 break;
822 #endif
823 #ifdef CONFIG_NETROM
824 case ARPHRD_NETROM:
825 if(arp->ar_pro != htons(AX25_P_IP))
826 {
827 kfree_skb(skb, FREE_READ);
828 return 0;
829 }
830 break;
831 #endif
832 case ARPHRD_ETHER:
833 case ARPHRD_ARCNET:
834 if(arp->ar_pro != htons(ETH_P_IP))
835 {
836 kfree_skb(skb, FREE_READ);
837 return 0;
838 }
839 break;
840
841 case ARPHRD_IEEE802:
842 if(arp->ar_pro != htons(ETH_P_IP))
843 {
844 kfree_skb(skb, FREE_READ);
845 return 0;
846 }
847 break;
848
849 default:
850 printk("ARP: dev->type mangled!\n");
851 kfree_skb(skb, FREE_READ);
852 return 0;
853 }
854
855
856
857
858
859 sha=arp_ptr;
860 arp_ptr += dev->addr_len;
861 memcpy(&sip, arp_ptr, 4);
862 arp_ptr += 4;
863 tha=arp_ptr;
864 arp_ptr += dev->addr_len;
865 memcpy(&tip, arp_ptr, 4);
866
867
868
869
870
871 if (LOOPBACK(tip) || MULTICAST(tip))
872 {
873 kfree_skb(skb, FREE_READ);
874 return 0;
875 }
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894 if (arp->ar_op == htons(ARPOP_REQUEST))
895 {
896
897
898
899 if (tip != dev->pa_addr)
900 {
901
902
903
904
905
906 arp_fast_lock();
907
908 for (proxy_entry=arp_proxy_list;
909 proxy_entry;
910 proxy_entry = proxy_entry->next)
911 {
912
913
914
915
916
917
918
919
920 if (proxy_entry->dev == dev &&
921 !((proxy_entry->ip^tip)&proxy_entry->mask))
922 break;
923
924 }
925 if (proxy_entry)
926 {
927 memcpy(ha, proxy_entry->ha, dev->addr_len);
928 arp_unlock();
929 arp_send(ARPOP_REPLY,ETH_P_ARP,sip,dev,tip,sha,ha, sha);
930 kfree_skb(skb, FREE_READ);
931 return 0;
932 }
933 else
934 {
935 arp_unlock();
936 kfree_skb(skb, FREE_READ);
937 return 0;
938 }
939 }
940 else
941 {
942
943
944
945 arp_send(ARPOP_REPLY,ETH_P_ARP,sip,dev,tip,sha,dev->dev_addr,sha);
946 }
947 }
948
949
950
951 if(ip_chk_addr(tip)!=IS_MYADDR)
952 {
953
954
955
956 kfree_skb(skb, FREE_READ);
957 return 0;
958 }
959
960
961
962
963
964
965 arp_fast_lock();
966
967 hash = HASH(sip);
968
969 for (entry=arp_tables[hash]; entry; entry=entry->next)
970 if (entry->ip == sip && entry->dev == dev)
971 break;
972
973 if (entry)
974 {
975
976
977
978 if (!(entry->flags & ATF_PERM)) {
979 memcpy(entry->ha, sha, dev->addr_len);
980 entry->last_updated = jiffies;
981 }
982 if (!(entry->flags & ATF_COM))
983 {
984
985
986
987
988 del_timer(&entry->timer);
989 entry->flags |= ATF_COM;
990 arp_update_hhs(entry);
991
992
993
994
995
996 arp_send_q(entry);
997 }
998 }
999 else
1000 {
1001
1002
1003
1004 entry = (struct arp_table *)kmalloc(sizeof(struct arp_table),GFP_ATOMIC);
1005 if(entry == NULL)
1006 {
1007 arp_unlock();
1008 printk("ARP: no memory for new arp entry\n");
1009 kfree_skb(skb, FREE_READ);
1010 return 0;
1011 }
1012
1013 entry->mask = DEF_ARP_NETMASK;
1014 entry->ip = sip;
1015 entry->flags = ATF_COM;
1016 entry->hh = NULL;
1017 init_timer(&entry->timer);
1018 entry->timer.function = arp_expire_request;
1019 entry->timer.data = (unsigned long)entry;
1020 memcpy(entry->ha, sha, dev->addr_len);
1021 entry->last_updated = entry->last_used = jiffies;
1022 entry->dev = skb->dev;
1023 skb_queue_head_init(&entry->skb);
1024 if (arp_lock == 1)
1025 {
1026 entry->next = arp_tables[hash];
1027 arp_tables[hash] = entry;
1028 }
1029 else
1030 {
1031 #if RT_CACHE_DEBUG >= 1
1032 printk("arp_rcv: %08x backlogged\n", entry->ip);
1033 #endif
1034 arp_enqueue(&arp_backlog, entry);
1035 arp_bh_mask |= ARP_BH_BACKLOG;
1036 }
1037 }
1038
1039
1040
1041
1042 kfree_skb(skb, FREE_READ);
1043 arp_unlock();
1044 return 0;
1045 }
1046
1047
1048
1049
1050
1051
1052
1053
1054 static struct arp_table *arp_lookup(u32 paddr, unsigned short flags, struct device * dev)
1055 {
1056 struct arp_table *entry;
1057
1058 if (!(flags & ATF_PUBL))
1059 {
1060 for (entry = arp_tables[HASH(paddr)];
1061 entry != NULL; entry = entry->next)
1062 if (entry->ip == paddr && (!dev || entry->dev == dev))
1063 break;
1064 return entry;
1065 }
1066
1067 if (!(flags & ATF_NETMASK))
1068 {
1069 for (entry = arp_proxy_list;
1070 entry != NULL; entry = entry->next)
1071 if (entry->ip == paddr && (!dev || entry->dev == dev))
1072 break;
1073 return entry;
1074 }
1075
1076 for (entry=arp_proxy_list; entry != NULL; entry = entry->next)
1077 if (!((entry->ip^paddr)&entry->mask) &&
1078 (!dev || entry->dev == dev))
1079 break;
1080 return entry;
1081 }
1082
1083
1084
1085
1086
1087 int arp_query(unsigned char *haddr, u32 paddr, struct device * dev)
1088 {
1089 struct arp_table *entry;
1090
1091 arp_fast_lock();
1092
1093 entry = arp_lookup(paddr, 0, dev);
1094
1095 if (entry != NULL)
1096 {
1097 entry->last_used = jiffies;
1098 if (entry->flags & ATF_COM)
1099 {
1100 memcpy(haddr, entry->ha, dev->addr_len);
1101 arp_unlock();
1102 return 1;
1103 }
1104 }
1105 arp_unlock();
1106 return 0;
1107 }
1108
1109
1110 static int arp_set_predefined(int addr_hint, unsigned char * haddr, __u32 paddr, struct device * dev)
1111 {
1112 switch (addr_hint)
1113 {
1114 case IS_MYADDR:
1115 printk("ARP: arp called for own IP address\n");
1116 memcpy(haddr, dev->dev_addr, dev->addr_len);
1117 return 1;
1118 #ifdef CONFIG_IP_MULTICAST
1119 case IS_MULTICAST:
1120 if(dev->type==ARPHRD_ETHER || dev->type==ARPHRD_IEEE802)
1121 {
1122 u32 taddr;
1123 haddr[0]=0x01;
1124 haddr[1]=0x00;
1125 haddr[2]=0x5e;
1126 taddr=ntohl(paddr);
1127 haddr[5]=taddr&0xff;
1128 taddr=taddr>>8;
1129 haddr[4]=taddr&0xff;
1130 taddr=taddr>>8;
1131 haddr[3]=taddr&0x7f;
1132 return 1;
1133 }
1134
1135
1136
1137 #endif
1138
1139 case IS_BROADCAST:
1140 memcpy(haddr, dev->broadcast, dev->addr_len);
1141 return 1;
1142 }
1143 return 0;
1144 }
1145
1146
1147
1148
1149
1150 int arp_find(unsigned char *haddr, u32 paddr, struct device *dev,
1151 u32 saddr, struct sk_buff *skb)
1152 {
1153 struct arp_table *entry;
1154 unsigned long hash;
1155
1156 if (arp_set_predefined(ip_chk_addr(paddr), haddr, paddr, dev))
1157 {
1158 if (skb)
1159 skb->arp = 1;
1160 return 0;
1161 }
1162
1163 hash = HASH(paddr);
1164 arp_fast_lock();
1165
1166
1167
1168
1169 entry = arp_lookup(paddr, 0, dev);
1170
1171 if (entry != NULL)
1172 {
1173 if (!(entry->flags & ATF_COM))
1174 {
1175
1176
1177
1178
1179
1180 if (skb != NULL)
1181 {
1182 if (entry->last_updated)
1183 {
1184 skb_queue_tail(&entry->skb, skb);
1185 skb_device_unlock(skb);
1186 }
1187
1188
1189
1190
1191 else
1192 {
1193 #if 0
1194
1195
1196
1197
1198 if (skb->sk)
1199 {
1200 skb->sk->err = EHOSTDOWN;
1201 skb->sk->error_report(skb->sk);
1202 }
1203 #else
1204 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0, dev);
1205 #endif
1206 dev_kfree_skb(skb, FREE_WRITE);
1207 }
1208 }
1209 arp_unlock();
1210 return 1;
1211 }
1212
1213
1214
1215
1216
1217 entry->last_used = jiffies;
1218 memcpy(haddr, entry->ha, dev->addr_len);
1219 if (skb)
1220 skb->arp = 1;
1221 arp_unlock();
1222 return 0;
1223 }
1224
1225
1226
1227
1228
1229 entry = (struct arp_table *) kmalloc(sizeof(struct arp_table),
1230 GFP_ATOMIC);
1231 if (entry != NULL)
1232 {
1233 entry->last_updated = entry->last_used = jiffies;
1234 entry->flags = 0;
1235 entry->ip = paddr;
1236 entry->mask = DEF_ARP_NETMASK;
1237 memset(entry->ha, 0, dev->addr_len);
1238 entry->dev = dev;
1239 entry->hh = NULL;
1240 init_timer(&entry->timer);
1241 entry->timer.function = arp_expire_request;
1242 entry->timer.data = (unsigned long)entry;
1243 entry->timer.expires = jiffies + ARP_RES_TIME;
1244 skb_queue_head_init(&entry->skb);
1245 if (skb != NULL)
1246 {
1247 skb_queue_tail(&entry->skb, skb);
1248 skb_device_unlock(skb);
1249 }
1250 if (arp_lock == 1)
1251 {
1252 entry->next = arp_tables[hash];
1253 arp_tables[hash] = entry;
1254 add_timer(&entry->timer);
1255 entry->retries = ARP_MAX_TRIES;
1256 }
1257 else
1258 {
1259 #if RT_CACHE_DEBUG >= 1
1260 printk("arp_find: %08x backlogged\n", entry->ip);
1261 #endif
1262 arp_enqueue(&arp_backlog, entry);
1263 arp_bh_mask |= ARP_BH_BACKLOG;
1264 }
1265 }
1266 else if (skb != NULL)
1267 dev_kfree_skb(skb, FREE_WRITE);
1268 arp_unlock();
1269
1270
1271
1272
1273
1274 arp_send(ARPOP_REQUEST, ETH_P_ARP, paddr, dev, saddr, NULL,
1275 dev->dev_addr, NULL);
1276
1277 return 1;
1278 }
1279
1280
1281
1282
1283
1284
1285 #define HBUFFERLEN 30
1286
1287 int arp_get_info(char *buffer, char **start, off_t offset, int length, int dummy)
1288 {
1289 int len=0;
1290 off_t pos=0;
1291 int size;
1292 struct arp_table *entry;
1293 char hbuffer[HBUFFERLEN];
1294 int i,j,k;
1295 const char hexbuf[] = "0123456789ABCDEF";
1296
1297 size = sprintf(buffer,"IP address HW type Flags HW address Mask Device\n");
1298
1299 pos+=size;
1300 len+=size;
1301
1302 arp_fast_lock();
1303
1304 for(i=0; i<FULL_ARP_TABLE_SIZE; i++)
1305 {
1306 for(entry=arp_tables[i]; entry!=NULL; entry=entry->next)
1307 {
1308
1309
1310
1311 #ifdef CONFIG_AX25
1312 #ifdef CONFIG_NETROM
1313 if (entry->dev->type == ARPHRD_AX25 || entry->dev->type == ARPHRD_NETROM)
1314 strcpy(hbuffer,ax2asc((ax25_address *)entry->ha));
1315 else {
1316 #else
1317 if(entry->dev->type==ARPHRD_AX25)
1318 strcpy(hbuffer,ax2asc((ax25_address *)entry->ha));
1319 else {
1320 #endif
1321 #endif
1322
1323 for(k=0,j=0;k<HBUFFERLEN-3 && j<entry->dev->addr_len;j++)
1324 {
1325 hbuffer[k++]=hexbuf[ (entry->ha[j]>>4)&15 ];
1326 hbuffer[k++]=hexbuf[ entry->ha[j]&15 ];
1327 hbuffer[k++]=':';
1328 }
1329 hbuffer[--k]=0;
1330
1331 #ifdef CONFIG_AX25
1332 }
1333 #endif
1334 size = sprintf(buffer+len,
1335 "%-17s0x%-10x0x%-10x%s",
1336 in_ntoa(entry->ip),
1337 (unsigned int)entry->dev->type,
1338 entry->flags,
1339 hbuffer);
1340 #if RT_CACHE_DEBUG < 2
1341 size += sprintf(buffer+len+size,
1342 " %-17s %s\n",
1343 entry->mask==DEF_ARP_NETMASK ?
1344 "*" : in_ntoa(entry->mask), entry->dev->name);
1345 #else
1346 size += sprintf(buffer+len+size,
1347 " %-17s %s\t%ld\t%1d\n",
1348 entry->mask==DEF_ARP_NETMASK ?
1349 "*" : in_ntoa(entry->mask), entry->dev->name,
1350 entry->hh ? entry->hh->hh_refcnt : -1,
1351 entry->hh ? entry->hh->hh_uptodate : 0);
1352 #endif
1353
1354 len += size;
1355 pos += size;
1356
1357 if (pos <= offset)
1358 len=0;
1359 if (pos >= offset+length)
1360 break;
1361 }
1362 }
1363 arp_unlock();
1364
1365 *start = buffer+len-(pos-offset);
1366 len = pos-offset;
1367 if (len>length)
1368 len = length;
1369 return len;
1370 }
1371
1372
1373
1374 int arp_bind_cache(struct hh_cache ** hhp, struct device *dev, unsigned short htype, u32 paddr)
1375 {
1376 struct arp_table *entry;
1377 struct hh_cache *hh = *hhp;
1378 int addr_hint;
1379 unsigned long flags;
1380
1381 if (hh)
1382 return 1;
1383
1384 if ((addr_hint = ip_chk_addr(paddr)) != 0)
1385 {
1386 unsigned char haddr[MAX_ADDR_LEN];
1387 if (hh)
1388 return 1;
1389 hh = kmalloc(sizeof(struct hh_cache), GFP_ATOMIC);
1390 if (!hh)
1391 return 1;
1392 arp_set_predefined(addr_hint, haddr, paddr, dev);
1393 hh->hh_uptodate = 0;
1394 hh->hh_refcnt = 1;
1395 hh->hh_arp = NULL;
1396 hh->hh_next = NULL;
1397 hh->hh_type = htype;
1398 *hhp = hh;
1399 dev->header_cache_update(hh, dev, haddr);
1400 return 0;
1401 }
1402
1403 save_flags(flags);
1404
1405 arp_fast_lock();
1406
1407 entry = arp_lookup(paddr, 0, dev);
1408
1409 if (entry)
1410 {
1411 cli();
1412 for (hh = entry->hh; hh; hh=hh->hh_next)
1413 if (hh->hh_type == htype)
1414 break;
1415 if (hh)
1416 {
1417 hh->hh_refcnt++;
1418 *hhp = hh;
1419 restore_flags(flags);
1420 arp_unlock();
1421 return 1;
1422 }
1423 restore_flags(flags);
1424 }
1425
1426 hh = kmalloc(sizeof(struct hh_cache), GFP_ATOMIC);
1427 if (!hh)
1428 {
1429 arp_unlock();
1430 return 1;
1431 }
1432
1433 hh->hh_uptodate = 0;
1434 hh->hh_refcnt = 1;
1435 hh->hh_arp = NULL;
1436 hh->hh_next = NULL;
1437 hh->hh_type = htype;
1438
1439 if (entry)
1440 {
1441 dev->header_cache_update(hh, dev, entry->ha);
1442 *hhp = hh;
1443 cli();
1444 hh->hh_arp = (void*)entry;
1445 entry->hh = hh;
1446 hh->hh_refcnt++;
1447 restore_flags(flags);
1448 entry->last_used = jiffies;
1449 arp_unlock();
1450 return 0;
1451 }
1452
1453
1454
1455
1456
1457
1458 entry = (struct arp_table *) kmalloc(sizeof(struct arp_table),
1459 GFP_ATOMIC);
1460 if (entry == NULL)
1461 {
1462 kfree_s(hh, sizeof(struct hh_cache));
1463 arp_unlock();
1464 return 1;
1465 }
1466
1467 entry->last_updated = entry->last_used = jiffies;
1468 entry->flags = 0;
1469 entry->ip = paddr;
1470 entry->mask = DEF_ARP_NETMASK;
1471 memset(entry->ha, 0, dev->addr_len);
1472 entry->dev = dev;
1473 entry->hh = hh;
1474 ATOMIC_INCR(&hh->hh_refcnt);
1475 init_timer(&entry->timer);
1476 entry->timer.function = arp_expire_request;
1477 entry->timer.data = (unsigned long)entry;
1478 entry->timer.expires = jiffies + ARP_RES_TIME;
1479 skb_queue_head_init(&entry->skb);
1480
1481 if (arp_lock == 1)
1482 {
1483 unsigned long hash = HASH(paddr);
1484 cli();
1485 entry->next = arp_tables[hash];
1486 arp_tables[hash] = entry;
1487 hh->hh_arp = (void*)entry;
1488 entry->retries = ARP_MAX_TRIES;
1489 restore_flags(flags);
1490
1491 add_timer(&entry->timer);
1492 arp_send(ARPOP_REQUEST, ETH_P_ARP, paddr, dev, dev->pa_addr, NULL, dev->dev_addr, NULL);
1493 }
1494 else
1495 {
1496 #if RT_CACHE_DEBUG >= 1
1497 printk("arp_cache_bind: %08x backlogged\n", entry->ip);
1498 #endif
1499 arp_enqueue(&arp_backlog, entry);
1500 arp_bh_mask |= ARP_BH_BACKLOG;
1501 }
1502 *hhp = hh;
1503 arp_unlock();
1504 return 0;
1505 }
1506
1507 static void arp_run_bh()
1508 {
1509 unsigned long flags;
1510 struct arp_table *entry, *entry1;
1511 struct hh_cache *hh;
1512 __u32 sip;
1513
1514 save_flags(flags);
1515 cli();
1516 if (!arp_lock)
1517 {
1518 arp_fast_lock();
1519
1520 while ((entry = arp_dequeue(&arp_backlog)) != NULL)
1521 {
1522 unsigned long hash;
1523 sti();
1524 sip = entry->ip;
1525 hash = HASH(sip);
1526
1527
1528
1529
1530
1531 for (entry1=arp_tables[hash]; entry1; entry1=entry1->next)
1532 if (entry1->ip==sip && entry1->dev == entry->dev)
1533 break;
1534
1535 if (!entry1)
1536 {
1537 struct device * dev = entry->dev;
1538 cli();
1539 entry->next = arp_tables[hash];
1540 arp_tables[hash] = entry;
1541 for (hh=entry->hh; hh; hh=hh->hh_next)
1542 hh->hh_arp = (void*)entry;
1543 sti();
1544 del_timer(&entry->timer);
1545 entry->timer.expires = jiffies + ARP_RES_TIME;
1546 add_timer(&entry->timer);
1547 entry->retries = ARP_MAX_TRIES;
1548 arp_send(ARPOP_REQUEST, ETH_P_ARP, entry->ip, dev, dev->pa_addr, NULL, dev->dev_addr, NULL);
1549 #if RT_CACHE_DEBUG >= 1
1550 printk("arp_run_bh: %08x reinstalled\n", sip);
1551 #endif
1552 }
1553 else
1554 {
1555 struct sk_buff * skb;
1556 struct hh_cache * next;
1557
1558
1559
1560
1561 cli();
1562 for (hh=entry->hh; hh; hh=next)
1563 {
1564 next = hh->hh_next;
1565 hh->hh_next = entry1->hh;
1566 entry1->hh = hh;
1567 hh->hh_arp = (void*)entry1;
1568 }
1569 entry->hh = NULL;
1570
1571
1572
1573
1574 while ((skb = skb_dequeue(&entry->skb)) != NULL)
1575 {
1576 skb_device_lock(skb);
1577 sti();
1578 skb_queue_tail(&entry1->skb, skb);
1579 skb_device_unlock(skb);
1580 cli();
1581 }
1582 sti();
1583
1584 #if RT_CACHE_DEBUG >= 1
1585 printk("arp_run_bh: entry %08x was born dead\n", entry->ip);
1586 #endif
1587 arp_free_entry(entry);
1588
1589 if (entry1->flags & ATF_COM)
1590 {
1591 arp_update_hhs(entry1);
1592 arp_send_q(entry1);
1593 }
1594 }
1595 cli();
1596 }
1597 arp_bh_mask &= ~ARP_BH_BACKLOG;
1598 arp_unlock();
1599 }
1600 restore_flags(flags);
1601 }
1602
1603
1604
1605
1606 static inline int empty(unsigned char * addr, int len)
1607 {
1608 while (len > 0) {
1609 if (*addr)
1610 return 0;
1611 len--;
1612 addr++;
1613 }
1614 return 1;
1615 }
1616
1617
1618
1619
1620
1621 static int arp_req_set(struct arpreq *r, struct device * dev)
1622 {
1623 struct arp_table *entry;
1624 struct sockaddr_in *si;
1625 struct rtable *rt;
1626 struct device *dev1;
1627 unsigned char *ha;
1628 u32 ip;
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639 si = (struct sockaddr_in *) &r->arp_pa;
1640 ip = si->sin_addr.s_addr;
1641
1642
1643
1644
1645
1646 if (ip_chk_addr(ip) == IS_MYADDR)
1647 dev1 = dev_get("lo");
1648 else {
1649 rt = ip_rt_route(ip, 0);
1650 if (!rt)
1651 return -ENETUNREACH;
1652 dev1 = rt->rt_dev;
1653 ip_rt_put(rt);
1654 }
1655
1656 if (!dev)
1657 dev = dev1;
1658
1659 if (((r->arp_flags & ATF_PUBL) && dev == dev1) ||
1660 (!(r->arp_flags & ATF_PUBL) && dev != dev1))
1661 return -EINVAL;
1662
1663 #if RT_CACHE_DEBUG >= 1
1664 if (arp_lock)
1665 printk("arp_req_set: bug\n");
1666 #endif
1667 arp_fast_lock();
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677 entry = arp_lookup(ip, r->arp_flags & ~ATF_NETMASK, dev);
1678
1679 if (entry)
1680 {
1681 arp_destroy(entry);
1682 entry = NULL;
1683 }
1684
1685
1686
1687
1688
1689 if (entry == NULL)
1690 {
1691 entry = (struct arp_table *) kmalloc(sizeof(struct arp_table),
1692 GFP_ATOMIC);
1693 if (entry == NULL)
1694 {
1695 arp_unlock();
1696 return -ENOMEM;
1697 }
1698 entry->ip = ip;
1699 entry->hh = NULL;
1700 init_timer(&entry->timer);
1701 entry->timer.function = arp_expire_request;
1702 entry->timer.data = (unsigned long)entry;
1703
1704 if (r->arp_flags & ATF_PUBL)
1705 {
1706 cli();
1707 entry->next = arp_proxy_list;
1708 arp_proxy_list = entry;
1709 sti();
1710 }
1711 else
1712 {
1713 unsigned long hash = HASH(ip);
1714 cli();
1715 entry->next = arp_tables[hash];
1716 arp_tables[hash] = entry;
1717 sti();
1718 }
1719 skb_queue_head_init(&entry->skb);
1720 }
1721
1722
1723
1724 ha = r->arp_ha.sa_data;
1725 if ((r->arp_flags & ATF_COM) && empty(ha, dev->addr_len))
1726 ha = dev->dev_addr;
1727 memcpy(entry->ha, ha, dev->addr_len);
1728 entry->last_updated = entry->last_used = jiffies;
1729 entry->flags = r->arp_flags | ATF_COM;
1730 if ((entry->flags & ATF_PUBL) && (entry->flags & ATF_NETMASK))
1731 {
1732 si = (struct sockaddr_in *) &r->arp_netmask;
1733 entry->mask = si->sin_addr.s_addr;
1734 }
1735 else
1736 entry->mask = DEF_ARP_NETMASK;
1737 entry->dev = dev;
1738 arp_update_hhs(entry);
1739 arp_unlock();
1740 return 0;
1741 }
1742
1743
1744
1745
1746
1747
1748
1749 static int arp_req_get(struct arpreq *r, struct device *dev)
1750 {
1751 struct arp_table *entry;
1752 struct sockaddr_in *si;
1753
1754 si = (struct sockaddr_in *) &r->arp_pa;
1755
1756 #if RT_CACHE_DEBUG >= 1
1757 if (arp_lock)
1758 printk("arp_req_set: bug\n");
1759 #endif
1760 arp_fast_lock();
1761
1762 entry = arp_lookup(si->sin_addr.s_addr, r->arp_flags|ATF_NETMASK, dev);
1763
1764 if (entry == NULL)
1765 {
1766 arp_unlock();
1767 return -ENXIO;
1768 }
1769
1770
1771
1772
1773
1774 memcpy(r->arp_ha.sa_data, &entry->ha, entry->dev->addr_len);
1775 r->arp_ha.sa_family = entry->dev->type;
1776 r->arp_flags = entry->flags;
1777 strncpy(r->arp_dev, entry->dev->name, 16);
1778 arp_unlock();
1779 return 0;
1780 }
1781
1782 static int arp_req_delete(struct arpreq *r, struct device * dev)
1783 {
1784 struct arp_table *entry;
1785 struct sockaddr_in *si;
1786
1787 si = (struct sockaddr_in *) &r->arp_pa;
1788 #if RT_CACHE_DEBUG >= 1
1789 if (arp_lock)
1790 printk("arp_req_delete: bug\n");
1791 #endif
1792 arp_fast_lock();
1793
1794 if (!(r->arp_flags & ATF_PUBL))
1795 {
1796 for (entry = arp_tables[HASH(si->sin_addr.s_addr)];
1797 entry != NULL; entry = entry->next)
1798 if (entry->ip == si->sin_addr.s_addr
1799 && (!dev || entry->dev == dev))
1800 {
1801 arp_destroy(entry);
1802 arp_unlock();
1803 return 0;
1804 }
1805 }
1806 else
1807 {
1808 for (entry = arp_proxy_list;
1809 entry != NULL; entry = entry->next)
1810 if (entry->ip == si->sin_addr.s_addr
1811 && (!dev || entry->dev == dev))
1812 {
1813 arp_destroy(entry);
1814 arp_unlock();
1815 return 0;
1816 }
1817 }
1818
1819 arp_unlock();
1820 return -ENXIO;
1821 }
1822
1823
1824
1825
1826
1827 int arp_ioctl(unsigned int cmd, void *arg)
1828 {
1829 int err;
1830 struct arpreq r;
1831
1832 struct device * dev = NULL;
1833
1834 switch(cmd)
1835 {
1836 case SIOCDARP:
1837 case SIOCSARP:
1838 if (!suser())
1839 return -EPERM;
1840 case SIOCGARP:
1841 err = verify_area(VERIFY_READ, arg, sizeof(struct arpreq));
1842 if (err)
1843 return err;
1844 memcpy_fromfs(&r, arg, sizeof(struct arpreq));
1845 break;
1846 case OLD_SIOCDARP:
1847 case OLD_SIOCSARP:
1848 if (!suser())
1849 return -EPERM;
1850 case OLD_SIOCGARP:
1851 err = verify_area(VERIFY_READ, arg, sizeof(struct arpreq_old));
1852 if (err)
1853 return err;
1854 memcpy_fromfs(&r, arg, sizeof(struct arpreq_old));
1855 memset(&r.arp_dev, 0, sizeof(r.arp_dev));
1856 break;
1857 default:
1858 return -EINVAL;
1859 }
1860
1861 if (r.arp_pa.sa_family != AF_INET)
1862 return -EPFNOSUPPORT;
1863 if (((struct sockaddr_in *)&r.arp_pa)->sin_addr.s_addr == 0)
1864 return -EINVAL;
1865
1866 if (r.arp_dev[0])
1867 {
1868 if ((dev = dev_get(r.arp_dev)) == NULL)
1869 return -ENODEV;
1870
1871 if (!r.arp_ha.sa_family)
1872 r.arp_ha.sa_family = dev->type;
1873 else if (r.arp_ha.sa_family != dev->type)
1874 return -EINVAL;
1875 }
1876 else
1877 {
1878 if ((r.arp_flags & ATF_PUBL) &&
1879 ((cmd == SIOCSARP) || (cmd == OLD_SIOCSARP))) {
1880 if ((dev = dev_getbytype(r.arp_ha.sa_family)) == NULL)
1881 return -ENODEV;
1882 }
1883 }
1884
1885 switch(cmd)
1886 {
1887 case SIOCDARP:
1888 return arp_req_delete(&r, dev);
1889 case SIOCSARP:
1890 return arp_req_set(&r, dev);
1891 case OLD_SIOCDARP:
1892
1893
1894
1895 r.arp_flags &= ~ATF_PUBL;
1896 err = arp_req_delete(&r, dev);
1897 r.arp_flags |= ATF_PUBL;
1898 if (!err)
1899 arp_req_delete(&r, dev);
1900 else
1901 err = arp_req_delete(&r, dev);
1902 return err;
1903 case OLD_SIOCSARP:
1904 err = arp_req_set(&r, dev);
1905
1906
1907
1908
1909
1910 if (r.arp_flags & ATF_PUBL)
1911 {
1912 r.arp_flags &= ~ATF_PUBL;
1913 arp_req_delete(&r, dev);
1914 }
1915 return err;
1916 case SIOCGARP:
1917 err = verify_area(VERIFY_WRITE, arg, sizeof(struct arpreq));
1918 if (err)
1919 return err;
1920 err = arp_req_get(&r, dev);
1921 if (!err)
1922 memcpy_tofs(arg, &r, sizeof(r));
1923 return err;
1924 case OLD_SIOCGARP:
1925 err = verify_area(VERIFY_WRITE, arg, sizeof(struct arpreq_old));
1926 if (err)
1927 return err;
1928 r.arp_flags &= ~ATF_PUBL;
1929 err = arp_req_get(&r, dev);
1930 if (err < 0)
1931 {
1932 r.arp_flags |= ATF_PUBL;
1933 err = arp_req_get(&r, dev);
1934 }
1935 if (!err)
1936 memcpy_tofs(arg, &r, sizeof(struct arpreq_old));
1937 return err;
1938 }
1939
1940 return 0;
1941 }
1942
1943
1944
1945
1946
1947
1948 static struct packet_type arp_packet_type =
1949 {
1950 0,
1951 NULL,
1952 arp_rcv,
1953 NULL,
1954 NULL
1955 };
1956
1957 static struct notifier_block arp_dev_notifier={
1958 arp_device_event,
1959 NULL,
1960 0
1961 };
1962
1963 void arp_init (void)
1964 {
1965
1966 arp_packet_type.type=htons(ETH_P_ARP);
1967 dev_add_pack(&arp_packet_type);
1968
1969 add_timer(&arp_timer);
1970
1971 register_netdevice_notifier(&arp_dev_notifier);
1972
1973 proc_net_register(&(struct proc_dir_entry) {
1974 PROC_NET_ARP, 3, "arp",
1975 S_IFREG | S_IRUGO, 1, 0, 0,
1976 0, &proc_net_inode_operations,
1977 arp_get_info
1978 });
1979 }
1980