This source file includes following definitions.
- arp_fast_lock
- arp_fast_unlock
- arp_unlock
- arp_enqueue
- arp_dequeue
- arp_release_entry
- arp_free_entry
- arp_count_hhs
- arp_invalidate_hhs
- arp_update_hhs
- arp_check_expire
- arp_expire_request
- arp_device_event
- arp_send
- arp_send_q
- arp_destroy
- arp_rcv
- arp_lookup
- arp_query
- arp_set_predefined
- arp_find
- arp_get_info
- arp_bind_cache
- arp_run_bh
- arp_req_set
- arp_req_get
- arp_req_delete
- arp_ioctl
- arp_init
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66 #include <linux/types.h>
67 #include <linux/string.h>
68 #include <linux/kernel.h>
69 #include <linux/sched.h>
70 #include <linux/config.h>
71 #include <linux/socket.h>
72 #include <linux/sockios.h>
73 #include <linux/errno.h>
74 #include <linux/if_arp.h>
75 #include <linux/in.h>
76 #include <linux/mm.h>
77 #include <linux/inet.h>
78 #include <linux/netdevice.h>
79 #include <linux/etherdevice.h>
80 #include <linux/trdevice.h>
81 #include <linux/skbuff.h>
82 #include <linux/proc_fs.h>
83 #include <linux/stat.h>
84
85 #include <net/ip.h>
86 #include <net/icmp.h>
87 #include <net/route.h>
88 #include <net/protocol.h>
89 #include <net/tcp.h>
90 #include <net/sock.h>
91 #include <net/arp.h>
92 #ifdef CONFIG_AX25
93 #include <net/ax25.h>
94 #ifdef CONFIG_NETROM
95 #include <net/netrom.h>
96 #endif
97 #endif
98
99 #include <asm/system.h>
100 #include <asm/segment.h>
101
102 #include <stdarg.h>
103
104
105
106
107
108
109
110
111 struct arp_table
112 {
113 struct arp_table *next;
114 unsigned long last_used;
115 unsigned long last_updated;
116 unsigned int flags;
117 u32 ip;
118 u32 mask;
119 unsigned char ha[MAX_ADDR_LEN];
120 struct device *dev;
121
122
123
124
125
126 struct timer_list timer;
127 int retries;
128 struct sk_buff_head skb;
129 struct hh_cache *hh;
130 };
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146 #define ARP_RES_TIME (5*HZ)
147 #define ARP_DEAD_RES_TIME (60*HZ)
148
149
150
151
152
153
154 #define ARP_MAX_TRIES 3
155
156
157
158
159
160 #define ARP_TIMEOUT (600*HZ)
161
162
163
164
165
166
167
168 #define ARP_CHECK_INTERVAL (60*HZ)
169
170
171
172
173
174
175
176 #define ARP_CONFIRM_INTERVAL (300*HZ)
177 #define ARP_CONFIRM_TIMEOUT ARP_RES_TIME
178
179 static unsigned long arp_lock;
180 static unsigned long arp_bh_mask;
181
182 #define ARP_BH_BACKLOG 1
183
184 static struct arp_table *arp_backlog;
185
186 static void arp_run_bh(void);
187 static void arp_check_expire (unsigned long);
188
189 static struct timer_list arp_timer =
190 { NULL, NULL, ARP_CHECK_INTERVAL, 0L, &arp_check_expire };
191
192
193
194
195
196
197 #define DEF_ARP_NETMASK (~0)
198
199
200
201
202
203
204
205 #define ARP_TABLE_SIZE 16
206 #define FULL_ARP_TABLE_SIZE (ARP_TABLE_SIZE+1)
207
208 struct arp_table *arp_tables[FULL_ARP_TABLE_SIZE] =
209 {
210 NULL,
211 };
212
213 #define arp_proxy_list arp_tables[ARP_TABLE_SIZE]
214
215
216
217
218
219
220 #define HASH(paddr) (htonl(paddr) & (ARP_TABLE_SIZE - 1))
221
222
223
224
225
226 static __inline__ void arp_fast_lock(void)
227 {
228 ATOMIC_INCR(&arp_lock);
229 }
230
231 static __inline__ void arp_fast_unlock(void)
232 {
233 ATOMIC_DECR(&arp_lock);
234 }
235
236 static __inline__ void arp_unlock(void)
237 {
238 if (!ATOMIC_DECR_AND_CHECK(&arp_lock) && arp_bh_mask)
239 arp_run_bh();
240 }
241
242
243
244
245
246 static void arp_enqueue(struct arp_table **q, struct arp_table *entry)
247 {
248 unsigned long flags;
249 struct arp_table * tail;
250
251 save_flags(flags);
252 cli();
253 tail = *q;
254 if (!tail)
255 entry->next = entry;
256 else
257 {
258 entry->next = tail->next;
259 tail->next = entry;
260 }
261 *q = entry;
262 restore_flags(flags);
263 return;
264 }
265
266
267
268
269
270
271 static struct arp_table * arp_dequeue(struct arp_table **q)
272 {
273 struct arp_table * entry;
274
275 if (*q)
276 {
277 entry = (*q)->next;
278 (*q)->next = entry->next;
279 if (entry->next == entry)
280 *q = NULL;
281 entry->next = NULL;
282 return entry;
283 }
284 return NULL;
285 }
286
287
288
289
290
291 static void arp_release_entry(struct arp_table *entry)
292 {
293 struct sk_buff *skb;
294 unsigned long flags;
295
296 save_flags(flags);
297 cli();
298
299 while ((skb = skb_dequeue(&entry->skb)) != NULL)
300 {
301 skb_device_lock(skb);
302 restore_flags(flags);
303 dev_kfree_skb(skb, FREE_WRITE);
304 cli();
305 }
306 restore_flags(flags);
307 return;
308 }
309
310
311
312
313
314
315 static void arp_free_entry(struct arp_table *entry)
316 {
317 unsigned long flags;
318 struct hh_cache *hh, *next;
319
320 del_timer(&entry->timer);
321
322 save_flags(flags);
323 cli();
324 arp_release_entry(entry);
325
326 for (hh = entry->hh; hh; hh = next)
327 {
328 next = hh->hh_next;
329 hh->hh_arp = NULL;
330 if (!--hh->hh_refcnt)
331 kfree_s(hh, sizeof(struct(struct hh_cache)));
332 }
333 restore_flags(flags);
334
335 kfree_s(entry, sizeof(struct arp_table));
336 return;
337 }
338
339
340
341
342
343 static __inline__ int arp_count_hhs(struct arp_table * entry)
344 {
345 struct hh_cache *hh, **hhp;
346 int count = 0;
347
348 hhp = &entry->hh;
349 while ((hh=*hhp) != NULL)
350 {
351 if (hh->hh_refcnt == 1)
352 {
353 *hhp = hh->hh_next;
354 kfree_s(hh, sizeof(struct hh_cache));
355 continue;
356 }
357 count += hh->hh_refcnt-1;
358 hhp = &hh->hh_next;
359 }
360
361 return count;
362 }
363
364
365
366
367
368 static __inline__ void arp_invalidate_hhs(struct arp_table * entry)
369 {
370 struct hh_cache *hh;
371
372 for (hh=entry->hh; hh; hh=hh->hh_next)
373 hh->hh_uptodate = 0;
374 }
375
376
377
378
379
380 static __inline__ void arp_update_hhs(struct arp_table * entry)
381 {
382 struct hh_cache *hh;
383
384 for (hh=entry->hh; hh; hh=hh->hh_next)
385 entry->dev->header_cache_update(hh, entry->dev, entry->ha);
386 }
387
388
389
390
391
392
393
394
395
396
397 static void arp_check_expire(unsigned long dummy)
398 {
399 int i;
400 unsigned long now = jiffies;
401
402 del_timer(&arp_timer);
403
404 if (!arp_lock)
405 {
406 arp_fast_lock();
407
408 for (i = 0; i < ARP_TABLE_SIZE; i++)
409 {
410 struct arp_table *entry;
411 struct arp_table **pentry;
412
413 pentry = &arp_tables[i];
414
415 while ((entry = *pentry) != NULL)
416 {
417 cli();
418 if (now - entry->last_used > ARP_TIMEOUT
419 && !(entry->flags & ATF_PERM)
420 && !arp_count_hhs(entry))
421 {
422 *pentry = entry->next;
423 sti();
424 #if RT_CACHE_DEBUG >= 2
425 printk("arp_expire: %08x expired\n", entry->ip);
426 #endif
427 arp_free_entry(entry);
428 }
429 else if (entry->last_updated
430 && now - entry->last_updated > ARP_CONFIRM_INTERVAL
431 && !(entry->flags & ATF_PERM))
432 {
433 struct device * dev = entry->dev;
434 pentry = &entry->next;
435 entry->flags &= ~ATF_COM;
436 arp_invalidate_hhs(entry);
437 sti();
438 entry->retries = ARP_MAX_TRIES+1;
439 del_timer(&entry->timer);
440 entry->timer.expires = jiffies + ARP_CONFIRM_TIMEOUT;
441 add_timer(&entry->timer);
442 arp_send(ARPOP_REQUEST, ETH_P_ARP, entry->ip,
443 dev, dev->pa_addr, entry->ha,
444 dev->dev_addr, NULL);
445 #if RT_CACHE_DEBUG >= 2
446 printk("arp_expire: %08x requires confirmation\n", entry->ip);
447 #endif
448 }
449 else
450 pentry = &entry->next;
451 }
452 }
453 arp_unlock();
454 }
455
456 ip_rt_check_expire();
457
458
459
460
461
462 arp_timer.expires = jiffies + ARP_CHECK_INTERVAL;
463 add_timer(&arp_timer);
464 }
465
466
467
468
469
470
471
472 static void arp_expire_request (unsigned long arg)
473 {
474 struct arp_table *entry = (struct arp_table *) arg;
475 struct arp_table **pentry;
476 unsigned long hash;
477 unsigned long flags;
478
479 save_flags(flags);
480 cli();
481
482
483
484
485
486
487
488 if (entry->flags & ATF_COM)
489 {
490 restore_flags(flags);
491 return;
492 }
493
494 if (arp_lock)
495 {
496 #if RT_CACHE_DEBUG >= 1
497 printk("arp_expire_request: %08x postponed\n", entry->ip);
498 #endif
499 del_timer(&entry->timer);
500 entry->timer.expires = jiffies + HZ/10;
501 add_timer(&entry->timer);
502 restore_flags(flags);
503 return;
504 }
505
506 arp_fast_lock();
507 restore_flags(flags);
508
509 if (entry->last_updated && --entry->retries > 0)
510 {
511 struct device *dev = entry->dev;
512
513 #if RT_CACHE_DEBUG >= 2
514 printk("arp_expire_request: %08x timed out\n", entry->ip);
515 #endif
516
517 del_timer(&entry->timer);
518 entry->timer.expires = jiffies + ARP_RES_TIME;
519 add_timer(&entry->timer);
520 arp_send(ARPOP_REQUEST, ETH_P_ARP, entry->ip, dev, dev->pa_addr,
521 NULL, dev->dev_addr, NULL);
522 arp_unlock();
523 return;
524 }
525
526 arp_release_entry(entry);
527
528 cli();
529 if (arp_count_hhs(entry))
530 {
531 struct device *dev = entry->dev;
532 #if RT_CACHE_DEBUG >= 2
533 printk("arp_expire_request: %08x is dead\n", entry->ip);
534 #endif
535 arp_release_entry(entry);
536 entry->retries = ARP_MAX_TRIES;
537 restore_flags(flags);
538 entry->last_updated = 0;
539 del_timer(&entry->timer);
540 entry->timer.expires = jiffies + ARP_DEAD_RES_TIME;
541 add_timer(&entry->timer);
542 arp_send(ARPOP_REQUEST, ETH_P_ARP, entry->ip, dev, dev->pa_addr,
543 NULL, dev->dev_addr, NULL);
544 arp_unlock();
545 return;
546 }
547 restore_flags(flags);
548
549 hash = HASH(entry->ip);
550
551 pentry = &arp_tables[hash];
552
553 while (*pentry != NULL)
554 {
555 if (*pentry == entry)
556 {
557 cli();
558 *pentry = entry->next;
559 restore_flags(flags);
560 #if RT_CACHE_DEBUG >= 2
561 printk("arp_expire_request: %08x is killed\n", entry->ip);
562 #endif
563 arp_free_entry(entry);
564 arp_unlock();
565 return;
566 }
567 pentry = &(*pentry)->next;
568 }
569 printk("arp_expire_request: bug: ARP entry is lost!\n");
570 arp_unlock();
571 }
572
573
574
575
576
577 int arp_device_event(struct notifier_block *this, unsigned long event, void *ptr)
578 {
579 struct device *dev=ptr;
580 int i;
581
582 if (event != NETDEV_DOWN)
583 return NOTIFY_DONE;
584
585
586
587
588 #if RT_CACHE_DEBUG >= 1
589 if (arp_lock)
590 printk("arp_device_event: bug\n");
591 #endif
592 arp_fast_lock();
593
594 for (i = 0; i < FULL_ARP_TABLE_SIZE; i++)
595 {
596 struct arp_table *entry;
597 struct arp_table **pentry = &arp_tables[i];
598
599 while ((entry = *pentry) != NULL)
600 {
601 if (entry->dev == dev)
602 {
603 *pentry = entry->next;
604 arp_free_entry(entry);
605 }
606 else
607 pentry = &entry->next;
608 }
609 }
610 arp_unlock();
611 return NOTIFY_DONE;
612 }
613
614
615
616
617
618
619
620 void arp_send(int type, int ptype, u32 dest_ip,
621 struct device *dev, u32 src_ip,
622 unsigned char *dest_hw, unsigned char *src_hw,
623 unsigned char *target_hw)
624 {
625 struct sk_buff *skb;
626 struct arphdr *arp;
627 unsigned char *arp_ptr;
628
629
630
631
632
633 if (dev->flags&IFF_NOARP)
634 return;
635
636
637
638
639
640 skb = alloc_skb(sizeof(struct arphdr)+ 2*(dev->addr_len+4)
641 + dev->hard_header_len, GFP_ATOMIC);
642 if (skb == NULL)
643 {
644 printk("ARP: no memory to send an arp packet\n");
645 return;
646 }
647 skb_reserve(skb, dev->hard_header_len);
648 arp = (struct arphdr *) skb_put(skb,sizeof(struct arphdr) + 2*(dev->addr_len+4));
649 skb->arp = 1;
650 skb->dev = dev;
651 skb->free = 1;
652
653
654
655
656
657 dev->hard_header(skb,dev,ptype,dest_hw?dest_hw:dev->broadcast,src_hw?src_hw:NULL,skb->len);
658
659
660 arp->ar_hrd = htons(dev->type);
661 #ifdef CONFIG_AX25
662 #ifdef CONFIG_NETROM
663 arp->ar_pro = (dev->type == ARPHRD_AX25 || dev->type == ARPHRD_NETROM) ? htons(AX25_P_IP) : htons(ETH_P_IP);
664 #else
665 arp->ar_pro = (dev->type != ARPHRD_AX25) ? htons(ETH_P_IP) : htons(AX25_P_IP);
666 #endif
667 #else
668 arp->ar_pro = htons(ETH_P_IP);
669 #endif
670 arp->ar_hln = dev->addr_len;
671 arp->ar_pln = 4;
672 arp->ar_op = htons(type);
673
674 arp_ptr=(unsigned char *)(arp+1);
675
676 memcpy(arp_ptr, src_hw, dev->addr_len);
677 arp_ptr+=dev->addr_len;
678 memcpy(arp_ptr, &src_ip,4);
679 arp_ptr+=4;
680 if (target_hw != NULL)
681 memcpy(arp_ptr, target_hw, dev->addr_len);
682 else
683 memset(arp_ptr, 0, dev->addr_len);
684 arp_ptr+=dev->addr_len;
685 memcpy(arp_ptr, &dest_ip, 4);
686
687 dev_queue_xmit(skb, dev, 0);
688 }
689
690
691
692
693
694 static void arp_send_q(struct arp_table *entry)
695 {
696 struct sk_buff *skb;
697
698 unsigned long flags;
699
700
701
702
703
704 if(!(entry->flags&ATF_COM))
705 {
706 printk("arp_send_q: incomplete entry for %s\n",
707 in_ntoa(entry->ip));
708
709
710
711
712 return;
713 }
714
715 save_flags(flags);
716
717 cli();
718 while((skb = skb_dequeue(&entry->skb)) != NULL)
719 {
720 IS_SKB(skb);
721 skb_device_lock(skb);
722 restore_flags(flags);
723 if(!skb->dev->rebuild_header(skb->data,skb->dev,skb->raddr,skb))
724 {
725 skb->arp = 1;
726 if(skb->sk==NULL)
727 dev_queue_xmit(skb, skb->dev, 0);
728 else
729 dev_queue_xmit(skb,skb->dev,skb->sk->priority);
730 }
731 }
732 restore_flags(flags);
733 }
734
735
736
737
738
739
740 static void arp_destroy(struct arp_table * entry)
741 {
742 struct arp_table *entry1;
743 struct arp_table **pentry;
744
745 if (entry->flags & ATF_PUBL)
746 pentry = &arp_proxy_list;
747 else
748 pentry = &arp_tables[HASH(entry->ip)];
749
750 while ((entry1 = *pentry) != NULL)
751 {
752 if (entry1 == entry)
753 {
754 *pentry = entry1->next;
755 del_timer(&entry->timer);
756 arp_free_entry(entry);
757 return;
758 }
759 pentry = &entry1->next;
760 }
761 }
762
763
764
765
766
767
768
769 int arp_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *pt)
770 {
771
772
773
774
775 struct arphdr *arp = (struct arphdr *)skb->h.raw;
776 unsigned char *arp_ptr= (unsigned char *)(arp+1);
777 struct arp_table *entry;
778 struct arp_table *proxy_entry;
779 unsigned long hash;
780 unsigned char ha[MAX_ADDR_LEN];
781 unsigned char *sha,*tha;
782 u32 sip,tip;
783
784
785
786
787
788
789
790
791 if (arp->ar_hln != dev->addr_len ||
792 dev->type != ntohs(arp->ar_hrd) ||
793 dev->flags & IFF_NOARP ||
794 arp->ar_pln != 4)
795 {
796 kfree_skb(skb, FREE_READ);
797 return 0;
798
799
800 }
801
802
803
804
805
806
807
808
809
810 switch (dev->type)
811 {
812 #ifdef CONFIG_AX25
813 case ARPHRD_AX25:
814 if(arp->ar_pro != htons(AX25_P_IP))
815 {
816 kfree_skb(skb, FREE_READ);
817 return 0;
818 }
819 break;
820 #endif
821 #ifdef CONFIG_NETROM
822 case ARPHRD_NETROM:
823 if(arp->ar_pro != htons(AX25_P_IP))
824 {
825 kfree_skb(skb, FREE_READ);
826 return 0;
827 }
828 break;
829 #endif
830 case ARPHRD_ETHER:
831 case ARPHRD_ARCNET:
832 if(arp->ar_pro != htons(ETH_P_IP))
833 {
834 kfree_skb(skb, FREE_READ);
835 return 0;
836 }
837 break;
838
839 case ARPHRD_IEEE802:
840 if(arp->ar_pro != htons(ETH_P_IP))
841 {
842 kfree_skb(skb, FREE_READ);
843 return 0;
844 }
845 break;
846
847 default:
848 printk("ARP: dev->type mangled!\n");
849 kfree_skb(skb, FREE_READ);
850 return 0;
851 }
852
853
854
855
856
857 sha=arp_ptr;
858 arp_ptr += dev->addr_len;
859 memcpy(&sip, arp_ptr, 4);
860 arp_ptr += 4;
861 tha=arp_ptr;
862 arp_ptr += dev->addr_len;
863 memcpy(&tip, arp_ptr, 4);
864
865
866
867
868
869 if (LOOPBACK(tip) || MULTICAST(tip))
870 {
871 kfree_skb(skb, FREE_READ);
872 return 0;
873 }
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892 if (arp->ar_op == htons(ARPOP_REQUEST))
893 {
894
895
896
897 if (tip != dev->pa_addr)
898 {
899
900
901
902
903
904 arp_fast_lock();
905
906 for (proxy_entry=arp_proxy_list;
907 proxy_entry;
908 proxy_entry = proxy_entry->next)
909 {
910
911
912
913
914
915
916
917
918 if (proxy_entry->dev == dev &&
919 !((proxy_entry->ip^tip)&proxy_entry->mask))
920 break;
921
922 }
923 if (proxy_entry)
924 {
925 memcpy(ha, proxy_entry->ha, dev->addr_len);
926 arp_unlock();
927 arp_send(ARPOP_REPLY,ETH_P_ARP,sip,dev,tip,sha,ha, sha);
928 kfree_skb(skb, FREE_READ);
929 return 0;
930 }
931 else
932 {
933 arp_unlock();
934 kfree_skb(skb, FREE_READ);
935 return 0;
936 }
937 }
938 else
939 {
940
941
942
943 arp_send(ARPOP_REPLY,ETH_P_ARP,sip,dev,tip,sha,dev->dev_addr,sha);
944 }
945 }
946
947
948
949 if(ip_chk_addr(tip)!=IS_MYADDR)
950 {
951
952
953
954 kfree_skb(skb, FREE_READ);
955 return 0;
956 }
957
958
959
960
961
962
963 arp_fast_lock();
964
965 hash = HASH(sip);
966
967 for (entry=arp_tables[hash]; entry; entry=entry->next)
968 if (entry->ip == sip && entry->dev == dev)
969 break;
970
971 if (entry)
972 {
973
974
975
976 if (!(entry->flags & ATF_PERM)) {
977 memcpy(entry->ha, sha, dev->addr_len);
978 entry->last_updated = jiffies;
979 }
980 if (!(entry->flags & ATF_COM))
981 {
982
983
984
985
986 del_timer(&entry->timer);
987 entry->flags |= ATF_COM;
988 arp_update_hhs(entry);
989
990
991
992
993
994 arp_send_q(entry);
995 }
996 }
997 else
998 {
999
1000
1001
1002 entry = (struct arp_table *)kmalloc(sizeof(struct arp_table),GFP_ATOMIC);
1003 if(entry == NULL)
1004 {
1005 arp_unlock();
1006 printk("ARP: no memory for new arp entry\n");
1007 kfree_skb(skb, FREE_READ);
1008 return 0;
1009 }
1010
1011 entry->mask = DEF_ARP_NETMASK;
1012 entry->ip = sip;
1013 entry->flags = ATF_COM;
1014 entry->hh = NULL;
1015 init_timer(&entry->timer);
1016 entry->timer.function = arp_expire_request;
1017 entry->timer.data = (unsigned long)entry;
1018 memcpy(entry->ha, sha, dev->addr_len);
1019 entry->last_updated = entry->last_used = jiffies;
1020 entry->dev = skb->dev;
1021 skb_queue_head_init(&entry->skb);
1022 if (arp_lock == 1)
1023 {
1024 entry->next = arp_tables[hash];
1025 arp_tables[hash] = entry;
1026 }
1027 else
1028 {
1029 #if RT_CACHE_DEBUG >= 1
1030 printk("arp_rcv: %08x backlogged\n", entry->ip);
1031 #endif
1032 arp_enqueue(&arp_backlog, entry);
1033 arp_bh_mask |= ARP_BH_BACKLOG;
1034 }
1035 }
1036
1037
1038
1039
1040 kfree_skb(skb, FREE_READ);
1041 arp_unlock();
1042 return 0;
1043 }
1044
1045
1046
1047
1048
1049
1050
1051
1052 static struct arp_table *arp_lookup(u32 paddr, unsigned short flags, struct device * dev)
1053 {
1054 struct arp_table *entry;
1055
1056 if (!(flags & ATF_PUBL))
1057 {
1058 for (entry = arp_tables[HASH(paddr)];
1059 entry != NULL; entry = entry->next)
1060 if (entry->ip == paddr && entry->dev == dev)
1061 break;
1062 return entry;
1063 }
1064
1065 if (!(flags & ATF_NETMASK))
1066 {
1067 for (entry = arp_proxy_list;
1068 entry != NULL; entry = entry->next)
1069 if (entry->ip == paddr && entry->dev == dev)
1070 break;
1071 return entry;
1072 }
1073
1074 for (entry=arp_proxy_list; entry != NULL; entry = entry->next)
1075 if (!((entry->ip^paddr)&entry->mask) && entry->dev == dev)
1076 break;
1077 return entry;
1078 }
1079
1080
1081
1082
1083
1084 int arp_query(unsigned char *haddr, u32 paddr, struct device * dev)
1085 {
1086 struct arp_table *entry;
1087
1088 arp_fast_lock();
1089
1090 entry = arp_lookup(paddr, 0, dev);
1091
1092 if (entry != NULL)
1093 {
1094 entry->last_used = jiffies;
1095 if (entry->flags & ATF_COM)
1096 {
1097 memcpy(haddr, entry->ha, dev->addr_len);
1098 arp_unlock();
1099 return 1;
1100 }
1101 }
1102 arp_unlock();
1103 return 0;
1104 }
1105
1106
1107 static int arp_set_predefined(int addr_hint, unsigned char * haddr, __u32 paddr, struct device * dev)
1108 {
1109 switch (addr_hint)
1110 {
1111 case IS_MYADDR:
1112 printk("ARP: arp called for own IP address\n");
1113 memcpy(haddr, dev->dev_addr, dev->addr_len);
1114 return 1;
1115 #ifdef CONFIG_IP_MULTICAST
1116 case IS_MULTICAST:
1117 if(dev->type==ARPHRD_ETHER || dev->type==ARPHRD_IEEE802)
1118 {
1119 u32 taddr;
1120 haddr[0]=0x01;
1121 haddr[1]=0x00;
1122 haddr[2]=0x5e;
1123 taddr=ntohl(paddr);
1124 haddr[5]=taddr&0xff;
1125 taddr=taddr>>8;
1126 haddr[4]=taddr&0xff;
1127 taddr=taddr>>8;
1128 haddr[3]=taddr&0x7f;
1129 return 1;
1130 }
1131
1132
1133
1134 #endif
1135
1136 case IS_BROADCAST:
1137 memcpy(haddr, dev->broadcast, dev->addr_len);
1138 return 1;
1139 }
1140 return 0;
1141 }
1142
1143
1144
1145
1146
1147 int arp_find(unsigned char *haddr, u32 paddr, struct device *dev,
1148 u32 saddr, struct sk_buff *skb)
1149 {
1150 struct arp_table *entry;
1151 unsigned long hash;
1152
1153 if (arp_set_predefined(ip_chk_addr(paddr), haddr, paddr, dev))
1154 {
1155 if (skb)
1156 skb->arp = 1;
1157 return 0;
1158 }
1159
1160 hash = HASH(paddr);
1161 arp_fast_lock();
1162
1163
1164
1165
1166 entry = arp_lookup(paddr, 0, dev);
1167
1168 if (entry != NULL)
1169 {
1170 if (!(entry->flags & ATF_COM))
1171 {
1172
1173
1174
1175
1176
1177 if (skb != NULL)
1178 {
1179 if (entry->last_updated)
1180 {
1181 skb_queue_tail(&entry->skb, skb);
1182 skb_device_unlock(skb);
1183 }
1184
1185
1186
1187
1188 else
1189 {
1190 #if 0
1191
1192
1193
1194
1195 if (skb->sk)
1196 {
1197 skb->sk->err = EHOSTDOWN;
1198 skb->sk->error_report(skb->sk);
1199 }
1200 #else
1201 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0, dev);
1202 #endif
1203 dev_kfree_skb(skb, FREE_WRITE);
1204 }
1205 }
1206 arp_unlock();
1207 return 1;
1208 }
1209
1210
1211
1212
1213
1214 entry->last_used = jiffies;
1215 memcpy(haddr, entry->ha, dev->addr_len);
1216 if (skb)
1217 skb->arp = 1;
1218 arp_unlock();
1219 return 0;
1220 }
1221
1222
1223
1224
1225
1226 entry = (struct arp_table *) kmalloc(sizeof(struct arp_table),
1227 GFP_ATOMIC);
1228 if (entry != NULL)
1229 {
1230 entry->last_updated = entry->last_used = jiffies;
1231 entry->flags = 0;
1232 entry->ip = paddr;
1233 entry->mask = DEF_ARP_NETMASK;
1234 memset(entry->ha, 0, dev->addr_len);
1235 entry->dev = dev;
1236 entry->hh = NULL;
1237 init_timer(&entry->timer);
1238 entry->timer.function = arp_expire_request;
1239 entry->timer.data = (unsigned long)entry;
1240 entry->timer.expires = jiffies + ARP_RES_TIME;
1241 skb_queue_head_init(&entry->skb);
1242 if (skb != NULL)
1243 {
1244 skb_queue_tail(&entry->skb, skb);
1245 skb_device_unlock(skb);
1246 }
1247 if (arp_lock == 1)
1248 {
1249 entry->next = arp_tables[hash];
1250 arp_tables[hash] = entry;
1251 add_timer(&entry->timer);
1252 entry->retries = ARP_MAX_TRIES;
1253 }
1254 else
1255 {
1256 #if RT_CACHE_DEBUG >= 1
1257 printk("arp_find: %08x backlogged\n", entry->ip);
1258 #endif
1259 arp_enqueue(&arp_backlog, entry);
1260 arp_bh_mask |= ARP_BH_BACKLOG;
1261 }
1262 }
1263 else if (skb != NULL)
1264 dev_kfree_skb(skb, FREE_WRITE);
1265 arp_unlock();
1266
1267
1268
1269
1270
1271 arp_send(ARPOP_REQUEST, ETH_P_ARP, paddr, dev, saddr, NULL,
1272 dev->dev_addr, NULL);
1273
1274 return 1;
1275 }
1276
1277
1278
1279
1280
1281
1282 #define HBUFFERLEN 30
1283
1284 int arp_get_info(char *buffer, char **start, off_t offset, int length, int dummy)
1285 {
1286 int len=0;
1287 off_t pos=0;
1288 int size;
1289 struct arp_table *entry;
1290 char hbuffer[HBUFFERLEN];
1291 int i,j,k;
1292 const char hexbuf[] = "0123456789ABCDEF";
1293
1294 size = sprintf(buffer,"IP address HW type Flags HW address Mask Device\n");
1295
1296 pos+=size;
1297 len+=size;
1298
1299 arp_fast_lock();
1300
1301 for(i=0; i<FULL_ARP_TABLE_SIZE; i++)
1302 {
1303 for(entry=arp_tables[i]; entry!=NULL; entry=entry->next)
1304 {
1305
1306
1307
1308 #ifdef CONFIG_AX25
1309 #ifdef CONFIG_NETROM
1310 if (entry->dev->type == ARPHRD_AX25 || entry->dev->type == ARPHRD_NETROM)
1311 strcpy(hbuffer,ax2asc((ax25_address *)entry->ha));
1312 else {
1313 #else
1314 if(entry->dev->type==ARPHRD_AX25)
1315 strcpy(hbuffer,ax2asc((ax25_address *)entry->ha));
1316 else {
1317 #endif
1318 #endif
1319
1320 for(k=0,j=0;k<HBUFFERLEN-3 && j<entry->dev->addr_len;j++)
1321 {
1322 hbuffer[k++]=hexbuf[ (entry->ha[j]>>4)&15 ];
1323 hbuffer[k++]=hexbuf[ entry->ha[j]&15 ];
1324 hbuffer[k++]=':';
1325 }
1326 hbuffer[--k]=0;
1327
1328 #ifdef CONFIG_AX25
1329 }
1330 #endif
1331 size = sprintf(buffer+len,
1332 "%-17s0x%-10x0x%-10x%s",
1333 in_ntoa(entry->ip),
1334 (unsigned int)entry->dev->type,
1335 entry->flags,
1336 hbuffer);
1337 #if RT_CACHE_DEBUG < 2
1338 size += sprintf(buffer+len+size,
1339 " %-17s %s\n",
1340 entry->mask==DEF_ARP_NETMASK ?
1341 "*" : in_ntoa(entry->mask), entry->dev->name);
1342 #else
1343 size += sprintf(buffer+len+size,
1344 " %-17s %s\t%ld\t%1d\n",
1345 entry->mask==DEF_ARP_NETMASK ?
1346 "*" : in_ntoa(entry->mask), entry->dev->name,
1347 entry->hh ? entry->hh->hh_refcnt : -1,
1348 entry->hh ? entry->hh->hh_uptodate : 0);
1349 #endif
1350
1351 len += size;
1352 pos += size;
1353
1354 if (pos <= offset)
1355 len=0;
1356 if (pos >= offset+length)
1357 break;
1358 }
1359 }
1360 arp_unlock();
1361
1362 *start = buffer+len-(pos-offset);
1363 len = pos-offset;
1364 if (len>length)
1365 len = length;
1366 return len;
1367 }
1368
1369
1370
1371 int arp_bind_cache(struct hh_cache ** hhp, struct device *dev, unsigned short htype, u32 paddr)
1372 {
1373 struct arp_table *entry;
1374 struct hh_cache *hh = *hhp;
1375 int addr_hint;
1376 unsigned long flags;
1377
1378 if (hh)
1379 return 1;
1380
1381 if ((addr_hint = ip_chk_addr(paddr)) != 0)
1382 {
1383 unsigned char haddr[MAX_ADDR_LEN];
1384 if (hh)
1385 return 1;
1386 hh = kmalloc(sizeof(struct hh_cache), GFP_ATOMIC);
1387 if (!hh)
1388 return 1;
1389 arp_set_predefined(addr_hint, haddr, paddr, dev);
1390 hh->hh_uptodate = 0;
1391 hh->hh_refcnt = 1;
1392 hh->hh_arp = NULL;
1393 hh->hh_next = NULL;
1394 hh->hh_type = htype;
1395 *hhp = hh;
1396 dev->header_cache_update(hh, dev, haddr);
1397 return 0;
1398 }
1399
1400 save_flags(flags);
1401
1402 arp_fast_lock();
1403
1404 entry = arp_lookup(paddr, 0, dev);
1405
1406 if (entry)
1407 {
1408 cli();
1409 for (hh = entry->hh; hh; hh=hh->hh_next)
1410 if (hh->hh_type == htype)
1411 break;
1412 if (hh)
1413 {
1414 hh->hh_refcnt++;
1415 *hhp = hh;
1416 restore_flags(flags);
1417 arp_unlock();
1418 return 1;
1419 }
1420 restore_flags(flags);
1421 }
1422
1423 hh = kmalloc(sizeof(struct hh_cache), GFP_ATOMIC);
1424 if (!hh)
1425 {
1426 arp_unlock();
1427 return 1;
1428 }
1429
1430 hh->hh_uptodate = 0;
1431 hh->hh_refcnt = 1;
1432 hh->hh_arp = NULL;
1433 hh->hh_next = NULL;
1434 hh->hh_type = htype;
1435
1436 if (entry)
1437 {
1438 dev->header_cache_update(hh, dev, entry->ha);
1439 *hhp = hh;
1440 cli();
1441 hh->hh_arp = (void*)entry;
1442 entry->hh = hh;
1443 hh->hh_refcnt++;
1444 restore_flags(flags);
1445 entry->last_used = jiffies;
1446 arp_unlock();
1447 return 0;
1448 }
1449
1450
1451
1452
1453
1454
1455 entry = (struct arp_table *) kmalloc(sizeof(struct arp_table),
1456 GFP_ATOMIC);
1457 if (entry == NULL)
1458 {
1459 kfree_s(hh, sizeof(struct hh_cache));
1460 arp_unlock();
1461 return 1;
1462 }
1463
1464 entry->last_updated = entry->last_used = jiffies;
1465 entry->flags = 0;
1466 entry->ip = paddr;
1467 entry->mask = DEF_ARP_NETMASK;
1468 memset(entry->ha, 0, dev->addr_len);
1469 entry->dev = dev;
1470 entry->hh = hh;
1471 ATOMIC_INCR(&hh->hh_refcnt);
1472 init_timer(&entry->timer);
1473 entry->timer.function = arp_expire_request;
1474 entry->timer.data = (unsigned long)entry;
1475 entry->timer.expires = jiffies + ARP_RES_TIME;
1476 skb_queue_head_init(&entry->skb);
1477
1478 if (arp_lock == 1)
1479 {
1480 unsigned long hash = HASH(paddr);
1481 cli();
1482 entry->next = arp_tables[hash];
1483 arp_tables[hash] = entry;
1484 hh->hh_arp = (void*)entry;
1485 entry->retries = ARP_MAX_TRIES;
1486 restore_flags(flags);
1487
1488 add_timer(&entry->timer);
1489 arp_send(ARPOP_REQUEST, ETH_P_ARP, paddr, dev, dev->pa_addr, NULL, dev->dev_addr, NULL);
1490 }
1491 else
1492 {
1493 #if RT_CACHE_DEBUG >= 1
1494 printk("arp_cache_bind: %08x backlogged\n", entry->ip);
1495 #endif
1496 arp_enqueue(&arp_backlog, entry);
1497 arp_bh_mask |= ARP_BH_BACKLOG;
1498 }
1499 *hhp = hh;
1500 arp_unlock();
1501 return 0;
1502 }
1503
1504 static void arp_run_bh()
1505 {
1506 unsigned long flags;
1507 struct arp_table *entry, *entry1;
1508 struct hh_cache *hh;
1509 __u32 sip;
1510
1511 save_flags(flags);
1512 cli();
1513 if (!arp_lock)
1514 {
1515 arp_fast_lock();
1516
1517 while ((entry = arp_dequeue(&arp_backlog)) != NULL)
1518 {
1519 unsigned long hash;
1520 sti();
1521 sip = entry->ip;
1522 hash = HASH(sip);
1523
1524
1525
1526
1527
1528 for (entry1=arp_tables[hash]; entry1; entry1=entry1->next)
1529 if (entry1->ip==sip && entry1->dev == entry->dev)
1530 break;
1531
1532 if (!entry1)
1533 {
1534 struct device * dev = entry->dev;
1535 cli();
1536 entry->next = arp_tables[hash];
1537 arp_tables[hash] = entry;
1538 for (hh=entry->hh; hh; hh=hh->hh_next)
1539 hh->hh_arp = (void*)entry;
1540 sti();
1541 del_timer(&entry->timer);
1542 entry->timer.expires = jiffies + ARP_RES_TIME;
1543 add_timer(&entry->timer);
1544 entry->retries = ARP_MAX_TRIES;
1545 arp_send(ARPOP_REQUEST, ETH_P_ARP, entry->ip, dev, dev->pa_addr, NULL, dev->dev_addr, NULL);
1546 #if RT_CACHE_DEBUG >= 1
1547 printk("arp_run_bh: %08x reinstalled\n", sip);
1548 #endif
1549 }
1550 else
1551 {
1552 struct sk_buff * skb;
1553 struct hh_cache * next;
1554
1555
1556
1557
1558 cli();
1559 for (hh=entry->hh; hh; hh=next)
1560 {
1561 next = hh->hh_next;
1562 hh->hh_next = entry1->hh;
1563 entry1->hh = hh;
1564 hh->hh_arp = (void*)entry1;
1565 }
1566 entry->hh = NULL;
1567
1568
1569
1570
1571 while ((skb = skb_dequeue(&entry->skb)) != NULL)
1572 {
1573 skb_device_lock(skb);
1574 sti();
1575 skb_queue_tail(&entry1->skb, skb);
1576 skb_device_unlock(skb);
1577 cli();
1578 }
1579 sti();
1580
1581 #if RT_CACHE_DEBUG >= 1
1582 printk("arp_run_bh: entry %08x was born dead\n", entry->ip);
1583 #endif
1584 arp_free_entry(entry);
1585
1586 if (entry1->flags & ATF_COM)
1587 {
1588 arp_update_hhs(entry1);
1589 arp_send_q(entry1);
1590 }
1591 }
1592 cli();
1593 }
1594 arp_bh_mask &= ~ARP_BH_BACKLOG;
1595 arp_unlock();
1596 }
1597 restore_flags(flags);
1598 }
1599
1600
1601
1602
1603
1604
1605 static int arp_req_set(struct arpreq *r, struct device * dev)
1606 {
1607 struct arp_table *entry;
1608 struct sockaddr_in *si;
1609 struct rtable *rt;
1610 struct device * dev1;
1611 u32 ip;
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622 si = (struct sockaddr_in *) &r->arp_pa;
1623 ip = si->sin_addr.s_addr;
1624
1625
1626
1627
1628
1629 rt = ip_rt_route(ip, 0);
1630 if (!rt)
1631 return -ENETUNREACH;
1632 dev1 = rt->rt_dev;
1633 ip_rt_put(rt);
1634
1635 if (((r->arp_flags & ATF_PUBL) && dev == dev1) ||
1636 (!(r->arp_flags & ATF_PUBL) && dev != dev1))
1637 return -EINVAL;
1638
1639 #if RT_CACHE_DEBUG >= 1
1640 if (arp_lock)
1641 printk("arp_req_set: bug\n");
1642 #endif
1643 arp_fast_lock();
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653 entry = arp_lookup(ip, r->arp_flags & ~ATF_NETMASK, dev);
1654
1655 if (entry)
1656 {
1657 arp_destroy(entry);
1658 entry = NULL;
1659 }
1660
1661
1662
1663
1664
1665 if (entry == NULL)
1666 {
1667 entry = (struct arp_table *) kmalloc(sizeof(struct arp_table),
1668 GFP_ATOMIC);
1669 if (entry == NULL)
1670 {
1671 arp_unlock();
1672 return -ENOMEM;
1673 }
1674 entry->ip = ip;
1675 entry->hh = NULL;
1676 init_timer(&entry->timer);
1677 entry->timer.function = arp_expire_request;
1678 entry->timer.data = (unsigned long)entry;
1679
1680 if (r->arp_flags & ATF_PUBL)
1681 {
1682 cli();
1683 entry->next = arp_proxy_list;
1684 arp_proxy_list = entry;
1685 sti();
1686 }
1687 else
1688 {
1689 unsigned long hash = HASH(ip);
1690 cli();
1691 entry->next = arp_tables[hash];
1692 arp_tables[hash] = entry;
1693 sti();
1694 }
1695 skb_queue_head_init(&entry->skb);
1696 }
1697
1698
1699
1700
1701 if ((r->arp_flags & ATF_COM) && !r->arp_ha.sa_data[0])
1702 memcpy(&entry->ha, dev->dev_addr, dev->addr_len);
1703 else
1704 memcpy(&entry->ha, &r->arp_ha.sa_data, dev->addr_len);
1705 entry->last_updated = entry->last_used = jiffies;
1706 entry->flags = r->arp_flags | ATF_COM;
1707 if ((entry->flags & ATF_PUBL) && (entry->flags & ATF_NETMASK))
1708 {
1709 si = (struct sockaddr_in *) &r->arp_netmask;
1710 entry->mask = si->sin_addr.s_addr;
1711 }
1712 else
1713 entry->mask = DEF_ARP_NETMASK;
1714 entry->dev = dev;
1715 arp_update_hhs(entry);
1716 arp_unlock();
1717 return 0;
1718 }
1719
1720
1721
1722
1723
1724
1725
1726 static int arp_req_get(struct arpreq *r, struct device *dev)
1727 {
1728 struct arp_table *entry;
1729 struct sockaddr_in *si;
1730
1731 si = (struct sockaddr_in *) &r->arp_pa;
1732
1733 #if RT_CACHE_DEBUG >= 1
1734 if (arp_lock)
1735 printk("arp_req_set: bug\n");
1736 #endif
1737 arp_fast_lock();
1738
1739 entry = arp_lookup(si->sin_addr.s_addr, r->arp_flags|ATF_NETMASK, dev);
1740
1741 if (entry == NULL)
1742 {
1743 arp_unlock();
1744 return -ENXIO;
1745 }
1746
1747
1748
1749
1750
1751 memcpy(r->arp_ha.sa_data, &entry->ha, entry->dev->addr_len);
1752 r->arp_ha.sa_family = entry->dev->type;
1753 r->arp_flags = entry->flags;
1754 strncpy(r->arp_dev, entry->dev->name, 16);
1755 arp_unlock();
1756 return 0;
1757 }
1758
1759 static int arp_req_delete(struct arpreq *r, struct device * dev)
1760 {
1761 struct arp_table *entry;
1762 struct sockaddr_in *si;
1763
1764 si = (struct sockaddr_in *) &r->arp_pa;
1765 #if RT_CACHE_DEBUG >= 1
1766 if (arp_lock)
1767 printk("arp_req_delete: bug\n");
1768 #endif
1769 arp_fast_lock();
1770
1771 if (!(r->arp_flags & ATF_PUBL))
1772 {
1773 for (entry = arp_tables[HASH(si->sin_addr.s_addr)];
1774 entry != NULL; entry = entry->next)
1775 if (entry->ip == si->sin_addr.s_addr
1776 && entry->dev == dev)
1777 {
1778 arp_destroy(entry);
1779 arp_unlock();
1780 return 0;
1781 }
1782 }
1783 else
1784 {
1785 for (entry = arp_proxy_list;
1786 entry != NULL; entry = entry->next)
1787 if (entry->ip == si->sin_addr.s_addr
1788 && entry->dev == dev)
1789 {
1790 arp_destroy(entry);
1791 arp_unlock();
1792 return 0;
1793 }
1794 }
1795
1796 arp_unlock();
1797 return -ENXIO;
1798 }
1799
1800
1801
1802
1803
1804 int arp_ioctl(unsigned int cmd, void *arg)
1805 {
1806 int err;
1807 struct arpreq r;
1808
1809 struct device * dev = NULL;
1810
1811 switch(cmd)
1812 {
1813 case SIOCDARP:
1814 case SIOCSARP:
1815 if (!suser())
1816 return -EPERM;
1817 case SIOCGARP:
1818 err = verify_area(VERIFY_READ, arg, sizeof(struct arpreq));
1819 if (err)
1820 return err;
1821 memcpy_fromfs(&r, arg, sizeof(struct arpreq));
1822 break;
1823 case OLD_SIOCDARP:
1824 case OLD_SIOCSARP:
1825 if (!suser())
1826 return -EPERM;
1827 case OLD_SIOCGARP:
1828 err = verify_area(VERIFY_READ, arg, sizeof(struct arpreq_old));
1829 if (err)
1830 return err;
1831 memcpy_fromfs(&r, arg, sizeof(struct arpreq_old));
1832 memset(&r.arp_dev, 0, sizeof(r.arp_dev));
1833 break;
1834 default:
1835 return -EINVAL;
1836 }
1837
1838 if (r.arp_pa.sa_family != AF_INET)
1839 return -EPFNOSUPPORT;
1840 if (((struct sockaddr_in *)&r.arp_pa)->sin_addr.s_addr == 0)
1841 return -EINVAL;
1842
1843 if (r.arp_dev[0])
1844 {
1845 if ((dev = dev_get(r.arp_dev)) == NULL)
1846 return -ENODEV;
1847
1848 if (!r.arp_ha.sa_family)
1849 r.arp_ha.sa_family = dev->type;
1850 else if (r.arp_ha.sa_family != dev->type)
1851 return -EINVAL;
1852 }
1853 else
1854 {
1855
1856
1857
1858 if ((dev = dev_getbytype(r.arp_ha.sa_family)) == NULL)
1859 return -ENODEV;
1860 }
1861
1862 switch(cmd)
1863 {
1864 case SIOCDARP:
1865 return arp_req_delete(&r, dev);
1866 case SIOCSARP:
1867 return arp_req_set(&r, dev);
1868 case OLD_SIOCDARP:
1869
1870
1871
1872 r.arp_flags &= ~ATF_PUBL;
1873 err = arp_req_delete(&r, dev);
1874 r.arp_flags |= ATF_PUBL;
1875 if (!err)
1876 arp_req_delete(&r, dev);
1877 else
1878 err = arp_req_delete(&r, dev);
1879 return err;
1880 case OLD_SIOCSARP:
1881 err = arp_req_set(&r, dev);
1882
1883
1884
1885
1886
1887 if (r.arp_flags & ATF_PUBL)
1888 {
1889 r.arp_flags &= ~ATF_PUBL;
1890 arp_req_delete(&r, dev);
1891 }
1892 return err;
1893 case SIOCGARP:
1894 err = verify_area(VERIFY_WRITE, arg, sizeof(struct arpreq));
1895 if (err)
1896 return err;
1897 err = arp_req_get(&r, dev);
1898 if (!err)
1899 memcpy_tofs(arg, &r, sizeof(r));
1900 return err;
1901 case OLD_SIOCGARP:
1902 err = verify_area(VERIFY_WRITE, arg, sizeof(struct arpreq_old));
1903 if (err)
1904 return err;
1905 r.arp_flags &= ~ATF_PUBL;
1906 err = arp_req_get(&r, dev);
1907 if (err < 0)
1908 {
1909 r.arp_flags |= ATF_PUBL;
1910 err = arp_req_get(&r, dev);
1911 }
1912 if (!err)
1913 memcpy_tofs(arg, &r, sizeof(struct arpreq_old));
1914 return err;
1915 }
1916
1917 return 0;
1918 }
1919
1920
1921
1922
1923
1924
1925 static struct packet_type arp_packet_type =
1926 {
1927 0,
1928 NULL,
1929 arp_rcv,
1930 NULL,
1931 NULL
1932 };
1933
1934 static struct notifier_block arp_dev_notifier={
1935 arp_device_event,
1936 NULL,
1937 0
1938 };
1939
1940 void arp_init (void)
1941 {
1942
1943 arp_packet_type.type=htons(ETH_P_ARP);
1944 dev_add_pack(&arp_packet_type);
1945
1946 add_timer(&arp_timer);
1947
1948 register_netdevice_notifier(&arp_dev_notifier);
1949
1950 proc_net_register(&(struct proc_dir_entry) {
1951 PROC_NET_ARP, 3, "arp",
1952 S_IFREG | S_IRUGO, 1, 0, 0,
1953 0, &proc_net_inode_operations,
1954 arp_get_info
1955 });
1956 }
1957