This source file includes following definitions.
- arp_fast_lock
- arp_fast_unlock
- arp_unlock
- arp_enqueue
- arp_dequeue
- arp_release_entry
- arp_free_entry
- arp_count_hhs
- arp_invalidate_hhs
- arp_update_hhs
- arp_check_expire
- arp_expire_request
- arp_device_event
- arp_send
- arp_send_q
- arp_destroy
- arp_rcv
- arp_lookup
- arp_query
- arp_set_predefined
- arp_find
- arp_get_info
- arp_bind_cache
- arp_run_bh
- arp_req_set
- arp_req_get
- arp_req_delete
- arp_ioctl
- arp_init
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62 #include <linux/types.h>
63 #include <linux/string.h>
64 #include <linux/kernel.h>
65 #include <linux/sched.h>
66 #include <linux/config.h>
67 #include <linux/socket.h>
68 #include <linux/sockios.h>
69 #include <linux/errno.h>
70 #include <linux/if_arp.h>
71 #include <linux/in.h>
72 #include <linux/mm.h>
73 #include <asm/system.h>
74 #include <asm/segment.h>
75 #include <stdarg.h>
76 #include <linux/inet.h>
77 #include <linux/netdevice.h>
78 #include <linux/etherdevice.h>
79 #include <linux/trdevice.h>
80 #include <net/ip.h>
81 #include <net/route.h>
82 #include <net/protocol.h>
83 #include <net/tcp.h>
84 #include <linux/skbuff.h>
85 #include <net/sock.h>
86 #include <net/arp.h>
87 #ifdef CONFIG_AX25
88 #include <net/ax25.h>
89 #ifdef CONFIG_NETROM
90 #include <net/netrom.h>
91 #endif
92 #endif
93 #include <linux/proc_fs.h>
94 #include <linux/stat.h>
95
96
97
98
99
100
101
102
103
104 struct arp_table
105 {
106 struct arp_table *next;
107 unsigned long last_used;
108 unsigned long last_updated;
109 unsigned int flags;
110 u32 ip;
111 u32 mask;
112 unsigned char ha[MAX_ADDR_LEN];
113 struct device *dev;
114
115
116
117
118
119 struct timer_list timer;
120 int retries;
121 struct sk_buff_head skb;
122 struct hh_cache *hh;
123 };
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139 #define ARP_RES_TIME (5*HZ)
140 #define ARP_DEAD_RES_TIME (60*HZ)
141
142
143
144
145
146
147 #define ARP_MAX_TRIES 3
148
149
150
151
152
153 #define ARP_TIMEOUT (600*HZ)
154
155
156
157
158
159
160
161 #define ARP_CHECK_INTERVAL (60*HZ)
162
163
164
165
166
167
168
169 #define ARP_CONFIRM_INTERVAL (300*HZ)
170 #define ARP_CONFIRM_TIMEOUT ARP_RES_TIME
171
172 static unsigned long arp_lock;
173 static unsigned long arp_bh_mask;
174
175 #define ARP_BH_BACKLOG 1
176
177 static struct arp_table *arp_backlog;
178
179 static void arp_run_bh(void);
180 static void arp_check_expire (unsigned long);
181
182 static struct timer_list arp_timer =
183 { NULL, NULL, ARP_CHECK_INTERVAL, 0L, &arp_check_expire };
184
185
186
187
188
189
190 #define DEF_ARP_NETMASK (~0)
191
192
193
194
195
196
197
198 #define ARP_TABLE_SIZE 16
199 #define FULL_ARP_TABLE_SIZE (ARP_TABLE_SIZE+1)
200
201 struct arp_table *arp_tables[FULL_ARP_TABLE_SIZE] =
202 {
203 NULL,
204 };
205
206 #define arp_proxy_list arp_tables[ARP_TABLE_SIZE]
207
208
209
210
211
212
213 #define HASH(paddr) (htonl(paddr) & (ARP_TABLE_SIZE - 1))
214
215
216
217
218
219 static __inline__ void arp_fast_lock(void)
220 {
221 ATOMIC_INCR(&arp_lock);
222 }
223
224 static __inline__ void arp_fast_unlock(void)
225 {
226 ATOMIC_DECR(&arp_lock);
227 }
228
229 static __inline__ void arp_unlock(void)
230 {
231 if (!ATOMIC_DECR_AND_CHECK(&arp_lock) && arp_bh_mask)
232 arp_run_bh();
233 }
234
235
236
237
238
239 static void arp_enqueue(struct arp_table **q, struct arp_table *entry)
240 {
241 unsigned long flags;
242 struct arp_table * tail;
243
244 save_flags(flags);
245 cli();
246 tail = *q;
247 if (!tail)
248 entry->next = entry;
249 else
250 {
251 entry->next = tail->next;
252 tail->next = entry;
253 }
254 *q = entry;
255 restore_flags(flags);
256 return;
257 }
258
259
260
261
262
263
264 static struct arp_table * arp_dequeue(struct arp_table **q)
265 {
266 struct arp_table * entry;
267
268 if (*q)
269 {
270 entry = (*q)->next;
271 (*q)->next = entry->next;
272 if (entry->next == entry)
273 *q = NULL;
274 entry->next = NULL;
275 return entry;
276 }
277 return NULL;
278 }
279
280
281
282
283
284 static void arp_release_entry(struct arp_table *entry)
285 {
286 struct sk_buff *skb;
287 unsigned long flags;
288
289 save_flags(flags);
290 cli();
291
292 while ((skb = skb_dequeue(&entry->skb)) != NULL)
293 {
294 skb_device_lock(skb);
295 restore_flags(flags);
296 dev_kfree_skb(skb, FREE_WRITE);
297 cli();
298 }
299 restore_flags(flags);
300 return;
301 }
302
303
304
305
306
307
308 static void arp_free_entry(struct arp_table *entry)
309 {
310 unsigned long flags;
311 struct hh_cache *hh, *next;
312
313 del_timer(&entry->timer);
314
315 save_flags(flags);
316 cli();
317 arp_release_entry(entry);
318
319 for (hh = entry->hh; hh; hh = next)
320 {
321 next = hh->hh_next;
322 hh->hh_arp = NULL;
323 if (!--hh->hh_refcnt)
324 kfree_s(hh, sizeof(struct(struct hh_cache)));
325 }
326 restore_flags(flags);
327
328 kfree_s(entry, sizeof(struct arp_table));
329 return;
330 }
331
332
333
334
335
336 static __inline__ int arp_count_hhs(struct arp_table * entry)
337 {
338 struct hh_cache *hh, **hhp;
339 int count = 0;
340
341 hhp = &entry->hh;
342 while ((hh=*hhp) != NULL)
343 {
344 if (hh->hh_refcnt == 1)
345 {
346 *hhp = hh->hh_next;
347 kfree_s(hh, sizeof(struct hh_cache));
348 continue;
349 }
350 count += hh->hh_refcnt-1;
351 hhp = &hh->hh_next;
352 }
353
354 return count;
355 }
356
357
358
359
360
361 static __inline__ void arp_invalidate_hhs(struct arp_table * entry)
362 {
363 struct hh_cache *hh;
364
365 for (hh=entry->hh; hh; hh=hh->hh_next)
366 hh->hh_uptodate = 0;
367 }
368
369
370
371
372
373 static __inline__ void arp_update_hhs(struct arp_table * entry)
374 {
375 struct hh_cache *hh;
376
377 for (hh=entry->hh; hh; hh=hh->hh_next)
378 entry->dev->header_cache_update(hh, entry->dev, entry->ha);
379 }
380
381
382
383
384
385
386
387
388
389
390 static void arp_check_expire(unsigned long dummy)
391 {
392 int i;
393 unsigned long now = jiffies;
394
395 del_timer(&arp_timer);
396
397 if (!arp_lock)
398 {
399 arp_fast_lock();
400
401 for (i = 0; i < ARP_TABLE_SIZE; i++)
402 {
403 struct arp_table *entry;
404 struct arp_table **pentry;
405
406 pentry = &arp_tables[i];
407
408 while ((entry = *pentry) != NULL)
409 {
410 cli();
411 if (now - entry->last_used > ARP_TIMEOUT
412 && !(entry->flags & ATF_PERM)
413 && !arp_count_hhs(entry))
414 {
415 *pentry = entry->next;
416 sti();
417 #if RT_CACHE_DEBUG >= 2
418 printk("arp_expire: %08x expired\n", entry->ip);
419 #endif
420 arp_free_entry(entry);
421 }
422 else if (entry->last_updated
423 && now - entry->last_updated > ARP_CONFIRM_INTERVAL
424 && !(entry->flags & ATF_PERM))
425 {
426 struct device * dev = entry->dev;
427 pentry = &entry->next;
428 entry->flags &= ~ATF_COM;
429 arp_invalidate_hhs(entry);
430 sti();
431 entry->retries = ARP_MAX_TRIES+1;
432 del_timer(&entry->timer);
433 entry->timer.expires = jiffies + ARP_CONFIRM_TIMEOUT;
434 add_timer(&entry->timer);
435 arp_send(ARPOP_REQUEST, ETH_P_ARP, entry->ip,
436 dev, dev->pa_addr, entry->ha,
437 dev->dev_addr, NULL);
438 #if RT_CACHE_DEBUG >= 2
439 printk("arp_expire: %08x requires confirmation\n", entry->ip);
440 #endif
441 }
442 else
443 pentry = &entry->next;
444 }
445 }
446 arp_unlock();
447 }
448
449 ip_rt_check_expire();
450
451
452
453
454
455 arp_timer.expires = jiffies + ARP_CHECK_INTERVAL;
456 add_timer(&arp_timer);
457 }
458
459
460
461
462
463
464
465 static void arp_expire_request (unsigned long arg)
466 {
467 struct arp_table *entry = (struct arp_table *) arg;
468 struct arp_table **pentry;
469 unsigned long hash;
470 unsigned long flags;
471
472 save_flags(flags);
473 cli();
474
475
476
477
478
479
480
481 if (entry->flags & ATF_COM)
482 {
483 restore_flags(flags);
484 return;
485 }
486
487 if (arp_lock)
488 {
489 #if RT_CACHE_DEBUG >= 1
490 printk("arp_expire_request: %08x postponed\n", entry->ip);
491 #endif
492 del_timer(&entry->timer);
493 entry->timer.expires = jiffies + HZ/10;
494 add_timer(&entry->timer);
495 restore_flags(flags);
496 return;
497 }
498
499 arp_fast_lock();
500 restore_flags(flags);
501
502 if (entry->last_updated && --entry->retries > 0)
503 {
504 struct device *dev = entry->dev;
505
506 #if RT_CACHE_DEBUG >= 2
507 printk("arp_expire_request: %08x timed out\n", entry->ip);
508 #endif
509
510 del_timer(&entry->timer);
511 entry->timer.expires = jiffies + ARP_RES_TIME;
512 add_timer(&entry->timer);
513 arp_send(ARPOP_REQUEST, ETH_P_ARP, entry->ip, dev, dev->pa_addr,
514 NULL, dev->dev_addr, NULL);
515 arp_unlock();
516 return;
517 }
518
519 arp_release_entry(entry);
520
521 cli();
522 if (arp_count_hhs(entry))
523 {
524 struct device *dev = entry->dev;
525 #if RT_CACHE_DEBUG >= 2
526 printk("arp_expire_request: %08x is dead\n", entry->ip);
527 #endif
528 arp_release_entry(entry);
529 entry->retries = ARP_MAX_TRIES;
530 restore_flags(flags);
531 entry->last_updated = 0;
532 del_timer(&entry->timer);
533 entry->timer.expires = jiffies + ARP_DEAD_RES_TIME;
534 add_timer(&entry->timer);
535 arp_send(ARPOP_REQUEST, ETH_P_ARP, entry->ip, dev, dev->pa_addr,
536 NULL, dev->dev_addr, NULL);
537 arp_unlock();
538 return;
539 }
540 restore_flags(flags);
541
542 hash = HASH(entry->ip);
543
544 pentry = &arp_tables[hash];
545
546 while (*pentry != NULL)
547 {
548 if (*pentry == entry)
549 {
550 cli();
551 *pentry = entry->next;
552 restore_flags(flags);
553 #if RT_CACHE_DEBUG >= 2
554 printk("arp_expire_request: %08x is killed\n", entry->ip);
555 #endif
556 arp_free_entry(entry);
557 arp_unlock();
558 return;
559 }
560 pentry = &(*pentry)->next;
561 }
562 printk("arp_expire_request: bug: ARP entry is lost!\n");
563 arp_unlock();
564 }
565
566
567
568
569
570 int arp_device_event(struct notifier_block *this, unsigned long event, void *ptr)
571 {
572 struct device *dev=ptr;
573 int i;
574
575 if (event != NETDEV_DOWN)
576 return NOTIFY_DONE;
577
578
579
580
581 #if RT_CACHE_DEBUG >= 1
582 if (arp_lock)
583 printk("arp_device_event: bug\n");
584 #endif
585 arp_fast_lock();
586
587 for (i = 0; i < FULL_ARP_TABLE_SIZE; i++)
588 {
589 struct arp_table *entry;
590 struct arp_table **pentry = &arp_tables[i];
591
592 while ((entry = *pentry) != NULL)
593 {
594 if (entry->dev == dev)
595 {
596 *pentry = entry->next;
597 arp_free_entry(entry);
598 }
599 else
600 pentry = &entry->next;
601 }
602 }
603 return NOTIFY_DONE;
604 }
605
606
607
608
609
610
611
612 void arp_send(int type, int ptype, u32 dest_ip,
613 struct device *dev, u32 src_ip,
614 unsigned char *dest_hw, unsigned char *src_hw,
615 unsigned char *target_hw)
616 {
617 struct sk_buff *skb;
618 struct arphdr *arp;
619 unsigned char *arp_ptr;
620
621
622
623
624
625 if (dev->flags&IFF_NOARP)
626 return;
627
628
629
630
631
632 skb = alloc_skb(sizeof(struct arphdr)+ 2*(dev->addr_len+4)
633 + dev->hard_header_len, GFP_ATOMIC);
634 if (skb == NULL)
635 {
636 printk("ARP: no memory to send an arp packet\n");
637 return;
638 }
639 skb_reserve(skb, dev->hard_header_len);
640 arp = (struct arphdr *) skb_put(skb,sizeof(struct arphdr) + 2*(dev->addr_len+4));
641 skb->arp = 1;
642 skb->dev = dev;
643 skb->free = 1;
644
645
646
647
648
649 dev->hard_header(skb,dev,ptype,dest_hw?dest_hw:dev->broadcast,src_hw?src_hw:NULL,skb->len);
650
651
652 arp->ar_hrd = htons(dev->type);
653 #ifdef CONFIG_AX25
654 #ifdef CONFIG_NETROM
655 arp->ar_pro = (dev->type == ARPHRD_AX25 || dev->type == ARPHRD_NETROM) ? htons(AX25_P_IP) : htons(ETH_P_IP);
656 #else
657 arp->ar_pro = (dev->type != ARPHRD_AX25) ? htons(ETH_P_IP) : htons(AX25_P_IP);
658 #endif
659 #else
660 arp->ar_pro = htons(ETH_P_IP);
661 #endif
662 arp->ar_hln = dev->addr_len;
663 arp->ar_pln = 4;
664 arp->ar_op = htons(type);
665
666 arp_ptr=(unsigned char *)(arp+1);
667
668 memcpy(arp_ptr, src_hw, dev->addr_len);
669 arp_ptr+=dev->addr_len;
670 memcpy(arp_ptr, &src_ip,4);
671 arp_ptr+=4;
672 if (target_hw != NULL)
673 memcpy(arp_ptr, target_hw, dev->addr_len);
674 else
675 memset(arp_ptr, 0, dev->addr_len);
676 arp_ptr+=dev->addr_len;
677 memcpy(arp_ptr, &dest_ip, 4);
678
679 dev_queue_xmit(skb, dev, 0);
680 }
681
682
683
684
685
686 static void arp_send_q(struct arp_table *entry)
687 {
688 struct sk_buff *skb;
689
690 unsigned long flags;
691
692
693
694
695
696 if(!(entry->flags&ATF_COM))
697 {
698 printk("arp_send_q: incomplete entry for %s\n",
699 in_ntoa(entry->ip));
700
701
702
703
704 return;
705 }
706
707 save_flags(flags);
708
709 cli();
710 while((skb = skb_dequeue(&entry->skb)) != NULL)
711 {
712 IS_SKB(skb);
713 skb_device_lock(skb);
714 restore_flags(flags);
715 if(!skb->dev->rebuild_header(skb->data,skb->dev,skb->raddr,skb))
716 {
717 skb->arp = 1;
718 if(skb->sk==NULL)
719 dev_queue_xmit(skb, skb->dev, 0);
720 else
721 dev_queue_xmit(skb,skb->dev,skb->sk->priority);
722 }
723 }
724 restore_flags(flags);
725 }
726
727
728
729
730
731
732 static void arp_destroy(struct arp_table * entry)
733 {
734 struct arp_table *entry1;
735 struct arp_table **pentry;
736
737 if (entry->flags & ATF_PUBL)
738 pentry = &arp_proxy_list;
739 else
740 pentry = &arp_tables[HASH(entry->ip)];
741
742 while ((entry1 = *pentry) != NULL)
743 {
744 if (entry1 == entry)
745 {
746 *pentry = entry1->next;
747 del_timer(&entry->timer);
748 arp_free_entry(entry);
749 return;
750 }
751 pentry = &entry1->next;
752 }
753 }
754
755
756
757
758
759
760
761 int arp_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *pt)
762 {
763
764
765
766
767 struct arphdr *arp = (struct arphdr *)skb->h.raw;
768 unsigned char *arp_ptr= (unsigned char *)(arp+1);
769 struct arp_table *entry;
770 struct arp_table *proxy_entry;
771 unsigned long hash;
772 unsigned char ha[MAX_ADDR_LEN];
773 unsigned char *sha,*tha;
774 u32 sip,tip;
775
776
777
778
779
780
781
782
783 if (arp->ar_hln != dev->addr_len ||
784 dev->type != ntohs(arp->ar_hrd) ||
785 dev->flags & IFF_NOARP ||
786 arp->ar_pln != 4)
787 {
788 kfree_skb(skb, FREE_READ);
789 return 0;
790
791
792 }
793
794
795
796
797
798
799
800
801
802 switch (dev->type)
803 {
804 #ifdef CONFIG_AX25
805 case ARPHRD_AX25:
806 if(arp->ar_pro != htons(AX25_P_IP))
807 {
808 kfree_skb(skb, FREE_READ);
809 return 0;
810 }
811 break;
812 #endif
813 #ifdef CONFIG_NETROM
814 case ARPHRD_NETROM:
815 if(arp->ar_pro != htons(AX25_P_IP))
816 {
817 kfree_skb(skb, FREE_READ);
818 return 0;
819 }
820 break;
821 #endif
822 case ARPHRD_ETHER:
823 case ARPHRD_ARCNET:
824 if(arp->ar_pro != htons(ETH_P_IP))
825 {
826 kfree_skb(skb, FREE_READ);
827 return 0;
828 }
829 break;
830
831 case ARPHRD_IEEE802:
832 if(arp->ar_pro != htons(ETH_P_IP))
833 {
834 kfree_skb(skb, FREE_READ);
835 return 0;
836 }
837 break;
838
839 default:
840 printk("ARP: dev->type mangled!\n");
841 kfree_skb(skb, FREE_READ);
842 return 0;
843 }
844
845
846
847
848
849 sha=arp_ptr;
850 arp_ptr += dev->addr_len;
851 memcpy(&sip, arp_ptr, 4);
852 arp_ptr += 4;
853 tha=arp_ptr;
854 arp_ptr += dev->addr_len;
855 memcpy(&tip, arp_ptr, 4);
856
857
858
859
860
861 if (LOOPBACK(tip) || MULTICAST(tip))
862 {
863 kfree_skb(skb, FREE_READ);
864 return 0;
865 }
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884 if (arp->ar_op == htons(ARPOP_REQUEST))
885 {
886
887
888
889 if (tip != dev->pa_addr)
890 {
891
892
893
894
895
896 arp_fast_lock();
897
898 for (proxy_entry=arp_proxy_list;
899 proxy_entry;
900 proxy_entry = proxy_entry->next)
901 {
902
903
904
905
906
907
908
909
910 if (proxy_entry->dev == dev &&
911 !((proxy_entry->ip^tip)&proxy_entry->mask))
912 break;
913
914 }
915 if (proxy_entry)
916 {
917 memcpy(ha, proxy_entry->ha, dev->addr_len);
918 arp_unlock();
919 arp_send(ARPOP_REPLY,ETH_P_ARP,sip,dev,tip,sha,ha, sha);
920 kfree_skb(skb, FREE_READ);
921 return 0;
922 }
923 else
924 {
925 arp_unlock();
926 kfree_skb(skb, FREE_READ);
927 return 0;
928 }
929 }
930 else
931 {
932
933
934
935 arp_send(ARPOP_REPLY,ETH_P_ARP,sip,dev,tip,sha,dev->dev_addr,sha);
936 }
937 }
938
939
940
941 if(ip_chk_addr(tip)!=IS_MYADDR)
942 {
943
944
945
946 kfree_skb(skb, FREE_READ);
947 return 0;
948 }
949
950
951
952
953
954
955 arp_fast_lock();
956
957 hash = HASH(sip);
958
959 for (entry=arp_tables[hash]; entry; entry=entry->next)
960 if (entry->ip == sip && entry->dev == dev)
961 break;
962
963 if (entry)
964 {
965
966
967
968 if (!(entry->flags & ATF_PERM)) {
969 memcpy(entry->ha, sha, dev->addr_len);
970 entry->last_updated = jiffies;
971 }
972 if (!(entry->flags & ATF_COM))
973 {
974
975
976
977
978 del_timer(&entry->timer);
979 entry->flags |= ATF_COM;
980 arp_update_hhs(entry);
981
982
983
984
985
986 arp_send_q(entry);
987 }
988 }
989 else
990 {
991
992
993
994 entry = (struct arp_table *)kmalloc(sizeof(struct arp_table),GFP_ATOMIC);
995 if(entry == NULL)
996 {
997 arp_unlock();
998 printk("ARP: no memory for new arp entry\n");
999 kfree_skb(skb, FREE_READ);
1000 return 0;
1001 }
1002
1003 entry->mask = DEF_ARP_NETMASK;
1004 entry->ip = sip;
1005 entry->flags = ATF_COM;
1006 entry->hh = NULL;
1007 init_timer(&entry->timer);
1008 entry->timer.function = arp_expire_request;
1009 entry->timer.data = (unsigned long)entry;
1010 memcpy(entry->ha, sha, dev->addr_len);
1011 entry->last_updated = entry->last_used = jiffies;
1012 entry->dev = skb->dev;
1013 skb_queue_head_init(&entry->skb);
1014 if (arp_lock == 1)
1015 {
1016 entry->next = arp_tables[hash];
1017 arp_tables[hash] = entry;
1018 }
1019 else
1020 {
1021 #if RT_CACHE_DEBUG >= 1
1022 printk("arp_rcv: %08x backlogged\n", entry->ip);
1023 #endif
1024 arp_enqueue(&arp_backlog, entry);
1025 arp_bh_mask |= ARP_BH_BACKLOG;
1026 }
1027 }
1028
1029
1030
1031
1032 kfree_skb(skb, FREE_READ);
1033 arp_unlock();
1034 return 0;
1035 }
1036
1037
1038
1039
1040
1041
1042
1043
1044 static struct arp_table *arp_lookup(u32 paddr, unsigned short flags, struct device * dev)
1045 {
1046 struct arp_table *entry;
1047
1048 if (!(flags & ATF_PUBL))
1049 {
1050 for (entry = arp_tables[HASH(paddr)];
1051 entry != NULL; entry = entry->next)
1052 if (entry->ip == paddr && entry->dev == dev)
1053 break;
1054 return entry;
1055 }
1056
1057 if (!(flags & ATF_NETMASK))
1058 {
1059 for (entry = arp_proxy_list;
1060 entry != NULL; entry = entry->next)
1061 if (entry->ip == paddr && entry->dev == dev)
1062 break;
1063 return entry;
1064 }
1065
1066 for (entry=arp_proxy_list; entry != NULL; entry = entry->next)
1067 if (!((entry->ip^paddr)&entry->mask) && entry->dev == dev)
1068 break;
1069 return entry;
1070 }
1071
1072
1073
1074
1075
1076 int arp_query(unsigned char *haddr, u32 paddr, struct device * dev)
1077 {
1078 struct arp_table *entry;
1079
1080 arp_fast_lock();
1081
1082 entry = arp_lookup(paddr, 0, dev);
1083
1084 if (entry != NULL)
1085 {
1086 entry->last_used = jiffies;
1087 if (entry->flags & ATF_COM)
1088 {
1089 memcpy(haddr, entry->ha, dev->addr_len);
1090 arp_unlock();
1091 return 1;
1092 }
1093 }
1094 arp_unlock();
1095 return 0;
1096 }
1097
1098
1099 static int arp_set_predefined(int addr_hint, unsigned char * haddr, __u32 paddr, struct device * dev)
1100 {
1101 switch (addr_hint)
1102 {
1103 case IS_MYADDR:
1104 printk("ARP: arp called for own IP address\n");
1105 memcpy(haddr, dev->dev_addr, dev->addr_len);
1106 return 1;
1107 #ifdef CONFIG_IP_MULTICAST
1108 case IS_MULTICAST:
1109 if(dev->type==ARPHRD_ETHER || dev->type==ARPHRD_IEEE802)
1110 {
1111 u32 taddr;
1112 haddr[0]=0x01;
1113 haddr[1]=0x00;
1114 haddr[2]=0x5e;
1115 taddr=ntohl(paddr);
1116 haddr[5]=taddr&0xff;
1117 taddr=taddr>>8;
1118 haddr[4]=taddr&0xff;
1119 taddr=taddr>>8;
1120 haddr[3]=taddr&0x7f;
1121 return 1;
1122 }
1123
1124
1125
1126 #endif
1127
1128 case IS_BROADCAST:
1129 memcpy(haddr, dev->broadcast, dev->addr_len);
1130 return 1;
1131 }
1132 return 0;
1133 }
1134
1135
1136
1137
1138
1139 int arp_find(unsigned char *haddr, u32 paddr, struct device *dev,
1140 u32 saddr, struct sk_buff *skb)
1141 {
1142 struct arp_table *entry;
1143 unsigned long hash;
1144
1145 if (arp_set_predefined(ip_chk_addr(paddr), haddr, paddr, dev))
1146 {
1147 if (skb)
1148 skb->arp = 1;
1149 return 0;
1150 }
1151
1152 hash = HASH(paddr);
1153 arp_fast_lock();
1154
1155
1156
1157
1158 entry = arp_lookup(paddr, 0, dev);
1159
1160 if (entry != NULL)
1161 {
1162 if (!(entry->flags & ATF_COM))
1163 {
1164
1165
1166
1167
1168
1169 if (skb != NULL)
1170 {
1171 if (entry->last_updated)
1172 {
1173 skb_queue_tail(&entry->skb, skb);
1174 skb_device_unlock(skb);
1175 }
1176
1177
1178
1179
1180 else
1181 {
1182
1183
1184
1185
1186 if (skb->sk)
1187 {
1188 skb->sk->err = EHOSTDOWN;
1189 skb->sk->error_report(skb->sk);
1190 }
1191 dev_kfree_skb(skb, FREE_WRITE);
1192 }
1193 }
1194 arp_unlock();
1195 return 1;
1196 }
1197
1198
1199
1200
1201
1202 entry->last_used = jiffies;
1203 memcpy(haddr, entry->ha, dev->addr_len);
1204 if (skb)
1205 skb->arp = 1;
1206 arp_unlock();
1207 return 0;
1208 }
1209
1210
1211
1212
1213
1214 entry = (struct arp_table *) kmalloc(sizeof(struct arp_table),
1215 GFP_ATOMIC);
1216 if (entry != NULL)
1217 {
1218 entry->last_updated = entry->last_used = jiffies;
1219 entry->flags = 0;
1220 entry->ip = paddr;
1221 entry->mask = DEF_ARP_NETMASK;
1222 memset(entry->ha, 0, dev->addr_len);
1223 entry->dev = dev;
1224 entry->hh = NULL;
1225 init_timer(&entry->timer);
1226 entry->timer.function = arp_expire_request;
1227 entry->timer.data = (unsigned long)entry;
1228 entry->timer.expires = jiffies + ARP_RES_TIME;
1229 skb_queue_head_init(&entry->skb);
1230 if (skb != NULL)
1231 {
1232 skb_queue_tail(&entry->skb, skb);
1233 skb_device_unlock(skb);
1234 }
1235 if (arp_lock == 1)
1236 {
1237 entry->next = arp_tables[hash];
1238 arp_tables[hash] = entry;
1239 add_timer(&entry->timer);
1240 entry->retries = ARP_MAX_TRIES;
1241 }
1242 else
1243 {
1244 #if RT_CACHE_DEBUG >= 1
1245 printk("arp_find: %08x backlogged\n", entry->ip);
1246 #endif
1247 arp_enqueue(&arp_backlog, entry);
1248 arp_bh_mask |= ARP_BH_BACKLOG;
1249 }
1250 }
1251 else if (skb != NULL)
1252 dev_kfree_skb(skb, FREE_WRITE);
1253 arp_unlock();
1254
1255
1256
1257
1258
1259 arp_send(ARPOP_REQUEST, ETH_P_ARP, paddr, dev, saddr, NULL,
1260 dev->dev_addr, NULL);
1261
1262 return 1;
1263 }
1264
1265
1266
1267
1268
1269
1270 #define HBUFFERLEN 30
1271
1272 int arp_get_info(char *buffer, char **start, off_t offset, int length, int dummy)
1273 {
1274 int len=0;
1275 off_t pos=0;
1276 int size;
1277 struct arp_table *entry;
1278 char hbuffer[HBUFFERLEN];
1279 int i,j,k;
1280 const char hexbuf[] = "0123456789ABCDEF";
1281
1282 size = sprintf(buffer,"IP address HW type Flags HW address Mask Device\n");
1283
1284 pos+=size;
1285 len+=size;
1286
1287 arp_fast_lock();
1288
1289 for(i=0; i<FULL_ARP_TABLE_SIZE; i++)
1290 {
1291 for(entry=arp_tables[i]; entry!=NULL; entry=entry->next)
1292 {
1293
1294
1295
1296 #ifdef CONFIG_AX25
1297 #ifdef CONFIG_NETROM
1298 if (entry->dev->type == ARPHRD_AX25 || entry->dev->type == ARPHRD_NETROM)
1299 strcpy(hbuffer,ax2asc((ax25_address *)entry->ha));
1300 else {
1301 #else
1302 if(entry->dev->type==ARPHRD_AX25)
1303 strcpy(hbuffer,ax2asc((ax25_address *)entry->ha));
1304 else {
1305 #endif
1306 #endif
1307
1308 for(k=0,j=0;k<HBUFFERLEN-3 && j<entry->dev->addr_len;j++)
1309 {
1310 hbuffer[k++]=hexbuf[ (entry->ha[j]>>4)&15 ];
1311 hbuffer[k++]=hexbuf[ entry->ha[j]&15 ];
1312 hbuffer[k++]=':';
1313 }
1314 hbuffer[--k]=0;
1315
1316 #ifdef CONFIG_AX25
1317 }
1318 #endif
1319 size = sprintf(buffer+len,
1320 "%-17s0x%-10x0x%-10x%s",
1321 in_ntoa(entry->ip),
1322 (unsigned int)entry->dev->type,
1323 entry->flags,
1324 hbuffer);
1325 #if RT_CACHE_DEBUG < 2
1326 size += sprintf(buffer+len+size,
1327 " %-17s %s\n",
1328 entry->mask==DEF_ARP_NETMASK ?
1329 "*" : in_ntoa(entry->mask), entry->dev->name);
1330 #else
1331 size += sprintf(buffer+len+size,
1332 " %-17s %s\t%ld\t%1d\n",
1333 entry->mask==DEF_ARP_NETMASK ?
1334 "*" : in_ntoa(entry->mask), entry->dev->name,
1335 entry->hh ? entry->hh->hh_refcnt : -1,
1336 entry->hh ? entry->hh->hh_uptodate : 0);
1337 #endif
1338
1339 len += size;
1340 pos += size;
1341
1342 if (pos <= offset)
1343 len=0;
1344 if (pos >= offset+length)
1345 break;
1346 }
1347 }
1348 arp_unlock();
1349
1350 *start = buffer+len-(pos-offset);
1351 len = pos-offset;
1352 if (len>length)
1353 len = length;
1354 return len;
1355 }
1356
1357
1358
1359 int arp_bind_cache(struct hh_cache ** hhp, struct device *dev, unsigned short htype, u32 paddr)
1360 {
1361 struct arp_table *entry;
1362 struct hh_cache *hh = *hhp;
1363 int addr_hint;
1364 unsigned long flags;
1365
1366 if (hh)
1367 return 1;
1368
1369 if ((addr_hint = ip_chk_addr(paddr)) != 0)
1370 {
1371 unsigned char haddr[MAX_ADDR_LEN];
1372 if (hh)
1373 return 1;
1374 hh = kmalloc(sizeof(struct hh_cache), GFP_ATOMIC);
1375 if (!hh)
1376 return 1;
1377 arp_set_predefined(addr_hint, haddr, paddr, dev);
1378 hh->hh_uptodate = 0;
1379 hh->hh_refcnt = 1;
1380 hh->hh_arp = NULL;
1381 hh->hh_next = NULL;
1382 hh->hh_type = htype;
1383 *hhp = hh;
1384 dev->header_cache_update(hh, dev, haddr);
1385 return 0;
1386 }
1387
1388 save_flags(flags);
1389
1390 arp_fast_lock();
1391
1392 entry = arp_lookup(paddr, 0, dev);
1393
1394 if (entry)
1395 {
1396 cli();
1397 for (hh = entry->hh; hh; hh=hh->hh_next)
1398 if (hh->hh_type == htype)
1399 break;
1400 if (hh)
1401 {
1402 hh->hh_refcnt++;
1403 *hhp = hh;
1404 restore_flags(flags);
1405 arp_unlock();
1406 return 1;
1407 }
1408 restore_flags(flags);
1409 }
1410
1411 hh = kmalloc(sizeof(struct hh_cache), GFP_ATOMIC);
1412 if (!hh)
1413 {
1414 arp_unlock();
1415 return 1;
1416 }
1417
1418 hh->hh_uptodate = 0;
1419 hh->hh_refcnt = 1;
1420 hh->hh_arp = NULL;
1421 hh->hh_next = NULL;
1422 hh->hh_type = htype;
1423
1424 if (entry)
1425 {
1426 dev->header_cache_update(hh, dev, entry->ha);
1427 *hhp = hh;
1428 cli();
1429 hh->hh_arp = (void*)entry;
1430 entry->hh = hh;
1431 hh->hh_refcnt++;
1432 restore_flags(flags);
1433 entry->last_used = jiffies;
1434 arp_unlock();
1435 return 0;
1436 }
1437
1438
1439
1440
1441
1442
1443 entry = (struct arp_table *) kmalloc(sizeof(struct arp_table),
1444 GFP_ATOMIC);
1445 if (entry == NULL)
1446 {
1447 kfree_s(hh, sizeof(struct hh_cache));
1448 arp_unlock();
1449 return 1;
1450 }
1451
1452 entry->last_updated = entry->last_used = jiffies;
1453 entry->flags = 0;
1454 entry->ip = paddr;
1455 entry->mask = DEF_ARP_NETMASK;
1456 memset(entry->ha, 0, dev->addr_len);
1457 entry->dev = dev;
1458 entry->hh = hh;
1459 ATOMIC_INCR(&hh->hh_refcnt);
1460 init_timer(&entry->timer);
1461 entry->timer.function = arp_expire_request;
1462 entry->timer.data = (unsigned long)entry;
1463 entry->timer.expires = jiffies + ARP_RES_TIME;
1464 skb_queue_head_init(&entry->skb);
1465
1466 if (arp_lock == 1)
1467 {
1468 unsigned long hash = HASH(paddr);
1469 cli();
1470 entry->next = arp_tables[hash];
1471 arp_tables[hash] = entry;
1472 hh->hh_arp = (void*)entry;
1473 entry->retries = ARP_MAX_TRIES;
1474 restore_flags(flags);
1475
1476 add_timer(&entry->timer);
1477 arp_send(ARPOP_REQUEST, ETH_P_ARP, paddr, dev, dev->pa_addr, NULL, dev->dev_addr, NULL);
1478 }
1479 else
1480 {
1481 #if RT_CACHE_DEBUG >= 1
1482 printk("arp_cache_bind: %08x backlogged\n", entry->ip);
1483 #endif
1484 arp_enqueue(&arp_backlog, entry);
1485 arp_bh_mask |= ARP_BH_BACKLOG;
1486 }
1487 *hhp = hh;
1488 arp_unlock();
1489 return 0;
1490 }
1491
1492 static void arp_run_bh()
1493 {
1494 unsigned long flags;
1495 struct arp_table *entry, *entry1;
1496 struct hh_cache *hh;
1497 __u32 sip;
1498
1499 save_flags(flags);
1500 cli();
1501 if (!arp_lock)
1502 {
1503 arp_fast_lock();
1504
1505 while ((entry = arp_dequeue(&arp_backlog)) != NULL)
1506 {
1507 unsigned long hash;
1508 sti();
1509 sip = entry->ip;
1510 hash = HASH(sip);
1511
1512
1513
1514
1515
1516 for (entry1=arp_tables[hash]; entry1; entry1=entry1->next)
1517 if (entry1->ip==sip && entry1->dev == entry->dev)
1518 break;
1519
1520 if (!entry1)
1521 {
1522 struct device * dev = entry->dev;
1523 cli();
1524 entry->next = arp_tables[hash];
1525 arp_tables[hash] = entry;
1526 for (hh=entry->hh; hh; hh=hh->hh_next)
1527 hh->hh_arp = (void*)entry;
1528 sti();
1529 del_timer(&entry->timer);
1530 entry->timer.expires = jiffies + ARP_RES_TIME;
1531 add_timer(&entry->timer);
1532 entry->retries = ARP_MAX_TRIES;
1533 arp_send(ARPOP_REQUEST, ETH_P_ARP, entry->ip, dev, dev->pa_addr, NULL, dev->dev_addr, NULL);
1534 #if RT_CACHE_DEBUG >= 1
1535 printk("arp_run_bh: %08x reinstalled\n", sip);
1536 #endif
1537 }
1538 else
1539 {
1540 struct sk_buff * skb;
1541 struct hh_cache * next;
1542
1543
1544
1545
1546 cli();
1547 for (hh=entry->hh; hh; hh=next)
1548 {
1549 next = hh->hh_next;
1550 hh->hh_next = entry1->hh;
1551 entry1->hh = hh;
1552 hh->hh_arp = (void*)entry1;
1553 }
1554 entry->hh = NULL;
1555
1556
1557
1558
1559 while ((skb = skb_dequeue(&entry->skb)) != NULL)
1560 {
1561 skb_device_lock(skb);
1562 sti();
1563 skb_queue_tail(&entry1->skb, skb);
1564 skb_device_unlock(skb);
1565 cli();
1566 }
1567 sti();
1568
1569 #if RT_CACHE_DEBUG >= 1
1570 printk("arp_run_bh: entry %08x was born dead\n", entry->ip);
1571 #endif
1572 arp_free_entry(entry);
1573
1574 if (entry1->flags & ATF_COM)
1575 {
1576 arp_update_hhs(entry1);
1577 arp_send_q(entry1);
1578 }
1579 }
1580 cli();
1581 }
1582 arp_bh_mask &= ~ARP_BH_BACKLOG;
1583 arp_unlock();
1584 }
1585 restore_flags(flags);
1586 }
1587
1588
1589
1590
1591
1592
1593 static int arp_req_set(struct arpreq *r, struct device * dev)
1594 {
1595 struct arp_table *entry;
1596 struct sockaddr_in *si;
1597 struct rtable *rt;
1598 struct device * dev1;
1599 u32 ip;
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610 si = (struct sockaddr_in *) &r->arp_pa;
1611 ip = si->sin_addr.s_addr;
1612
1613
1614
1615
1616
1617 rt = ip_rt_route(ip, 0);
1618 if (!rt)
1619 return -ENETUNREACH;
1620 dev1 = rt->rt_dev;
1621 ip_rt_put(rt);
1622
1623 if (((r->arp_flags & ATF_PUBL) && dev == dev1) ||
1624 (!(r->arp_flags & ATF_PUBL) && dev != dev1))
1625 return -EINVAL;
1626
1627 #if RT_CACHE_DEBUG >= 1
1628 if (arp_lock)
1629 printk("arp_req_set: bug\n");
1630 #endif
1631 arp_fast_lock();
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641 entry = arp_lookup(ip, r->arp_flags & ~ATF_NETMASK, dev);
1642
1643 if (entry)
1644 {
1645 arp_destroy(entry);
1646 entry = NULL;
1647 }
1648
1649
1650
1651
1652
1653 if (entry == NULL)
1654 {
1655 entry = (struct arp_table *) kmalloc(sizeof(struct arp_table),
1656 GFP_ATOMIC);
1657 if (entry == NULL)
1658 {
1659 arp_unlock();
1660 return -ENOMEM;
1661 }
1662 entry->ip = ip;
1663 entry->hh = NULL;
1664 init_timer(&entry->timer);
1665 entry->timer.function = arp_expire_request;
1666 entry->timer.data = (unsigned long)entry;
1667
1668 if (r->arp_flags & ATF_PUBL)
1669 {
1670 cli();
1671 entry->next = arp_proxy_list;
1672 arp_proxy_list = entry;
1673 sti();
1674 }
1675 else
1676 {
1677 unsigned long hash = HASH(ip);
1678 cli();
1679 entry->next = arp_tables[hash];
1680 arp_tables[hash] = entry;
1681 sti();
1682 }
1683 skb_queue_head_init(&entry->skb);
1684 }
1685
1686
1687
1688
1689 if ((r->arp_flags & ATF_COM) && !r->arp_ha.sa_data[0])
1690 memcpy(&entry->ha, dev->dev_addr, dev->addr_len);
1691 else
1692 memcpy(&entry->ha, &r->arp_ha.sa_data, dev->addr_len);
1693 entry->last_updated = entry->last_used = jiffies;
1694 entry->flags = r->arp_flags | ATF_COM;
1695 if ((entry->flags & ATF_PUBL) && (entry->flags & ATF_NETMASK))
1696 {
1697 si = (struct sockaddr_in *) &r->arp_netmask;
1698 entry->mask = si->sin_addr.s_addr;
1699 }
1700 else
1701 entry->mask = DEF_ARP_NETMASK;
1702 entry->dev = dev;
1703 arp_update_hhs(entry);
1704 arp_unlock();
1705 return 0;
1706 }
1707
1708
1709
1710
1711
1712
1713
1714 static int arp_req_get(struct arpreq *r, struct device *dev)
1715 {
1716 struct arp_table *entry;
1717 struct sockaddr_in *si;
1718
1719 si = (struct sockaddr_in *) &r->arp_pa;
1720
1721 #if RT_CACHE_DEBUG >= 1
1722 if (arp_lock)
1723 printk("arp_req_set: bug\n");
1724 #endif
1725 arp_fast_lock();
1726
1727 entry = arp_lookup(si->sin_addr.s_addr, r->arp_flags|ATF_NETMASK, dev);
1728
1729 if (entry == NULL)
1730 {
1731 arp_unlock();
1732 return -ENXIO;
1733 }
1734
1735
1736
1737
1738
1739 memcpy(r->arp_ha.sa_data, &entry->ha, entry->dev->addr_len);
1740 r->arp_ha.sa_family = entry->dev->type;
1741 r->arp_flags = entry->flags;
1742 strncpy(r->arp_dev, entry->dev->name, 16);
1743 arp_unlock();
1744 return 0;
1745 }
1746
1747 static int arp_req_delete(struct arpreq *r, struct device * dev)
1748 {
1749 struct arp_table *entry;
1750 struct sockaddr_in *si;
1751
1752 si = (struct sockaddr_in *) &r->arp_pa;
1753 #if RT_CACHE_DEBUG >= 1
1754 if (arp_lock)
1755 printk("arp_req_delete: bug\n");
1756 #endif
1757 arp_fast_lock();
1758
1759 if (!(r->arp_flags & ATF_PUBL))
1760 {
1761 for (entry = arp_tables[HASH(si->sin_addr.s_addr)];
1762 entry != NULL; entry = entry->next)
1763 if (entry->ip == si->sin_addr.s_addr
1764 && entry->dev == dev)
1765 {
1766 arp_destroy(entry);
1767 arp_unlock();
1768 return 0;
1769 }
1770 }
1771 else
1772 {
1773 for (entry = arp_proxy_list;
1774 entry != NULL; entry = entry->next)
1775 if (entry->ip == si->sin_addr.s_addr
1776 && entry->dev == dev)
1777 {
1778 arp_destroy(entry);
1779 arp_unlock();
1780 return 0;
1781 }
1782 }
1783
1784 arp_unlock();
1785 return -ENXIO;
1786 }
1787
1788
1789
1790
1791
1792 int arp_ioctl(unsigned int cmd, void *arg)
1793 {
1794 int err;
1795 struct arpreq r;
1796
1797 struct device * dev = NULL;
1798
1799 switch(cmd)
1800 {
1801 case SIOCDARP:
1802 case SIOCSARP:
1803 if (!suser())
1804 return -EPERM;
1805 case SIOCGARP:
1806 err = verify_area(VERIFY_READ, arg, sizeof(struct arpreq));
1807 if (err)
1808 return err;
1809 memcpy_fromfs(&r, arg, sizeof(struct arpreq));
1810 break;
1811 case OLD_SIOCDARP:
1812 case OLD_SIOCSARP:
1813 if (!suser())
1814 return -EPERM;
1815 case OLD_SIOCGARP:
1816 err = verify_area(VERIFY_READ, arg, sizeof(struct arpreq_old));
1817 if (err)
1818 return err;
1819 memcpy_fromfs(&r, arg, sizeof(struct arpreq_old));
1820 memset(&r.arp_dev, 0, sizeof(r.arp_dev));
1821 break;
1822 default:
1823 return -EINVAL;
1824 }
1825
1826 if (r.arp_pa.sa_family != AF_INET)
1827 return -EPFNOSUPPORT;
1828 if (((struct sockaddr_in *)&r.arp_pa)->sin_addr.s_addr == 0)
1829 return -EINVAL;
1830
1831 if (r.arp_dev[0])
1832 {
1833 if ((dev = dev_get(r.arp_dev)) == NULL)
1834 return -ENODEV;
1835
1836 if (!r.arp_ha.sa_family)
1837 r.arp_ha.sa_family = dev->type;
1838 else if (r.arp_ha.sa_family != dev->type)
1839 return -EINVAL;
1840 }
1841 else
1842 {
1843
1844
1845
1846 if ((dev = dev_getbytype(r.arp_ha.sa_family)) == NULL)
1847 return -ENODEV;
1848 }
1849
1850 switch(cmd)
1851 {
1852 case SIOCDARP:
1853 return arp_req_delete(&r, dev);
1854 case SIOCSARP:
1855 return arp_req_set(&r, dev);
1856 case OLD_SIOCDARP:
1857
1858
1859
1860 r.arp_flags &= ~ATF_PUBL;
1861 err = arp_req_delete(&r, dev);
1862 r.arp_flags |= ATF_PUBL;
1863 if (!err)
1864 arp_req_delete(&r, dev);
1865 else
1866 err = arp_req_delete(&r, dev);
1867 return err;
1868 case OLD_SIOCSARP:
1869 err = arp_req_set(&r, dev);
1870
1871
1872
1873
1874
1875 if (r.arp_flags & ATF_PUBL)
1876 {
1877 r.arp_flags &= ~ATF_PUBL;
1878 arp_req_delete(&r, dev);
1879 }
1880 return err;
1881 case SIOCGARP:
1882 err = verify_area(VERIFY_WRITE, arg, sizeof(struct arpreq));
1883 if (err)
1884 return err;
1885 err = arp_req_get(&r, dev);
1886 if (!err)
1887 memcpy_tofs(arg, &r, sizeof(r));
1888 return err;
1889 case OLD_SIOCGARP:
1890 err = verify_area(VERIFY_WRITE, arg, sizeof(struct arpreq_old));
1891 if (err)
1892 return err;
1893 r.arp_flags &= ~ATF_PUBL;
1894 err = arp_req_get(&r, dev);
1895 if (err < 0)
1896 {
1897 r.arp_flags |= ATF_PUBL;
1898 err = arp_req_get(&r, dev);
1899 }
1900 if (!err)
1901 memcpy_tofs(arg, &r, sizeof(struct arpreq_old));
1902 return err;
1903 }
1904
1905 return 0;
1906 }
1907
1908
1909
1910
1911
1912
1913 static struct packet_type arp_packet_type =
1914 {
1915 0,
1916 NULL,
1917 arp_rcv,
1918 NULL,
1919 NULL
1920 };
1921
1922 static struct notifier_block arp_dev_notifier={
1923 arp_device_event,
1924 NULL,
1925 0
1926 };
1927
1928 void arp_init (void)
1929 {
1930
1931 arp_packet_type.type=htons(ETH_P_ARP);
1932 dev_add_pack(&arp_packet_type);
1933
1934 add_timer(&arp_timer);
1935
1936 register_netdevice_notifier(&arp_dev_notifier);
1937
1938 proc_net_register(&(struct proc_dir_entry) {
1939 PROC_NET_ARP, 3, "arp",
1940 S_IFREG | S_IRUGO, 1, 0, 0,
1941 0, &proc_net_inode_operations,
1942 arp_get_info
1943 });
1944 }
1945