This source file includes following definitions.
- arp_fast_lock
- arp_fast_unlock
- arp_unlock
- arp_enqueue
- arp_dequeue
- arp_release_entry
- arp_free_entry
- arp_count_hhs
- arp_invalidate_hhs
- arp_update_hhs
- arp_check_expire
- arp_expire_request
- arp_device_event
- arp_send
- arp_send_q
- arp_destroy
- arp_rcv
- arp_lookup
- arp_query
- arp_set_predefined
- arp_find
- arp_get_info
- arp_bind_cache
- arp_run_bh
- empty
- arp_req_set
- arp_req_get
- arp_req_delete
- arp_ioctl
- arp_init
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67 #include <linux/types.h>
68 #include <linux/string.h>
69 #include <linux/kernel.h>
70 #include <linux/sched.h>
71 #include <linux/config.h>
72 #include <linux/socket.h>
73 #include <linux/sockios.h>
74 #include <linux/errno.h>
75 #include <linux/if_arp.h>
76 #include <linux/in.h>
77 #include <linux/mm.h>
78 #include <linux/inet.h>
79 #include <linux/netdevice.h>
80 #include <linux/etherdevice.h>
81 #include <linux/trdevice.h>
82 #include <linux/skbuff.h>
83 #include <linux/proc_fs.h>
84 #include <linux/stat.h>
85
86 #include <net/ip.h>
87 #include <net/icmp.h>
88 #include <net/route.h>
89 #include <net/protocol.h>
90 #include <net/tcp.h>
91 #include <net/sock.h>
92 #include <net/arp.h>
93 #ifdef CONFIG_AX25
94 #include <net/ax25.h>
95 #ifdef CONFIG_NETROM
96 #include <net/netrom.h>
97 #endif
98 #endif
99 #ifdef CONFIG_NET_ALIAS
100 #include <linux/net_alias.h>
101 #endif
102
103 #include <asm/system.h>
104 #include <asm/segment.h>
105
106 #include <stdarg.h>
107
108
109
110
111
112
113
114
115 struct arp_table
116 {
117 struct arp_table *next;
118 unsigned long last_used;
119 unsigned long last_updated;
120 unsigned int flags;
121 u32 ip;
122 u32 mask;
123 unsigned char ha[MAX_ADDR_LEN];
124 struct device *dev;
125
126
127
128
129
130 struct timer_list timer;
131 int retries;
132 struct sk_buff_head skb;
133 struct hh_cache *hh;
134 };
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150 #define ARP_RES_TIME (5*HZ)
151 #define ARP_DEAD_RES_TIME (60*HZ)
152
153
154
155
156
157
158 #define ARP_MAX_TRIES 3
159
160
161
162
163
164 #define ARP_TIMEOUT (600*HZ)
165
166
167
168
169
170
171
172 #define ARP_CHECK_INTERVAL (60*HZ)
173
174
175
176
177
178
179
180 #define ARP_CONFIRM_INTERVAL (300*HZ)
181 #define ARP_CONFIRM_TIMEOUT ARP_RES_TIME
182
183 static unsigned long arp_lock;
184 static unsigned long arp_bh_mask;
185
186 #define ARP_BH_BACKLOG 1
187
188 static struct arp_table *arp_backlog;
189
190 static void arp_run_bh(void);
191 static void arp_check_expire (unsigned long);
192
193 static struct timer_list arp_timer =
194 { NULL, NULL, ARP_CHECK_INTERVAL, 0L, &arp_check_expire };
195
196
197
198
199
200
201 #define DEF_ARP_NETMASK (~0)
202
203
204
205
206
207
208
209 #define ARP_TABLE_SIZE 16
210 #define FULL_ARP_TABLE_SIZE (ARP_TABLE_SIZE+1)
211
212 struct arp_table *arp_tables[FULL_ARP_TABLE_SIZE] =
213 {
214 NULL,
215 };
216
217 #define arp_proxy_list arp_tables[ARP_TABLE_SIZE]
218
219
220
221
222
223
224 #define HASH(paddr) (htonl(paddr) & (ARP_TABLE_SIZE - 1))
225
226
227
228
229
230 static __inline__ void arp_fast_lock(void)
231 {
232 ATOMIC_INCR(&arp_lock);
233 }
234
235 static __inline__ void arp_fast_unlock(void)
236 {
237 ATOMIC_DECR(&arp_lock);
238 }
239
240 static __inline__ void arp_unlock(void)
241 {
242 if (!ATOMIC_DECR_AND_CHECK(&arp_lock) && arp_bh_mask)
243 arp_run_bh();
244 }
245
246
247
248
249
250 static void arp_enqueue(struct arp_table **q, struct arp_table *entry)
251 {
252 unsigned long flags;
253 struct arp_table * tail;
254
255 save_flags(flags);
256 cli();
257 tail = *q;
258 if (!tail)
259 entry->next = entry;
260 else
261 {
262 entry->next = tail->next;
263 tail->next = entry;
264 }
265 *q = entry;
266 restore_flags(flags);
267 return;
268 }
269
270
271
272
273
274
275 static struct arp_table * arp_dequeue(struct arp_table **q)
276 {
277 struct arp_table * entry;
278
279 if (*q)
280 {
281 entry = (*q)->next;
282 (*q)->next = entry->next;
283 if (entry->next == entry)
284 *q = NULL;
285 entry->next = NULL;
286 return entry;
287 }
288 return NULL;
289 }
290
291
292
293
294
295 static void arp_release_entry(struct arp_table *entry)
296 {
297 struct sk_buff *skb;
298 unsigned long flags;
299
300 save_flags(flags);
301 cli();
302
303 while ((skb = skb_dequeue(&entry->skb)) != NULL)
304 {
305 skb_device_lock(skb);
306 restore_flags(flags);
307 dev_kfree_skb(skb, FREE_WRITE);
308 cli();
309 }
310 restore_flags(flags);
311 return;
312 }
313
314
315
316
317
318
319 static void arp_free_entry(struct arp_table *entry)
320 {
321 unsigned long flags;
322 struct hh_cache *hh, *next;
323
324 del_timer(&entry->timer);
325
326 save_flags(flags);
327 cli();
328 arp_release_entry(entry);
329
330 for (hh = entry->hh; hh; hh = next)
331 {
332 next = hh->hh_next;
333 hh->hh_arp = NULL;
334 if (!--hh->hh_refcnt)
335 kfree_s(hh, sizeof(struct(struct hh_cache)));
336 }
337 restore_flags(flags);
338
339 kfree_s(entry, sizeof(struct arp_table));
340 return;
341 }
342
343
344
345
346
347 static __inline__ int arp_count_hhs(struct arp_table * entry)
348 {
349 struct hh_cache *hh, **hhp;
350 int count = 0;
351
352 hhp = &entry->hh;
353 while ((hh=*hhp) != NULL)
354 {
355 if (hh->hh_refcnt == 1)
356 {
357 *hhp = hh->hh_next;
358 kfree_s(hh, sizeof(struct hh_cache));
359 continue;
360 }
361 count += hh->hh_refcnt-1;
362 hhp = &hh->hh_next;
363 }
364
365 return count;
366 }
367
368
369
370
371
372 static __inline__ void arp_invalidate_hhs(struct arp_table * entry)
373 {
374 struct hh_cache *hh;
375
376 for (hh=entry->hh; hh; hh=hh->hh_next)
377 hh->hh_uptodate = 0;
378 }
379
380
381
382
383
384 static __inline__ void arp_update_hhs(struct arp_table * entry)
385 {
386 struct hh_cache *hh;
387
388 for (hh=entry->hh; hh; hh=hh->hh_next)
389 entry->dev->header_cache_update(hh, entry->dev, entry->ha);
390 }
391
392
393
394
395
396
397
398
399
400
401 static void arp_check_expire(unsigned long dummy)
402 {
403 int i;
404 unsigned long now = jiffies;
405
406 del_timer(&arp_timer);
407
408 if (!arp_lock)
409 {
410 arp_fast_lock();
411
412 for (i = 0; i < ARP_TABLE_SIZE; i++)
413 {
414 struct arp_table *entry;
415 struct arp_table **pentry;
416
417 pentry = &arp_tables[i];
418
419 while ((entry = *pentry) != NULL)
420 {
421 cli();
422 if (now - entry->last_used > ARP_TIMEOUT
423 && !(entry->flags & ATF_PERM)
424 && !arp_count_hhs(entry))
425 {
426 *pentry = entry->next;
427 sti();
428 #if RT_CACHE_DEBUG >= 2
429 printk("arp_expire: %08x expired\n", entry->ip);
430 #endif
431 arp_free_entry(entry);
432 }
433 else if (entry->last_updated
434 && now - entry->last_updated > ARP_CONFIRM_INTERVAL
435 && !(entry->flags & ATF_PERM))
436 {
437 struct device * dev = entry->dev;
438 pentry = &entry->next;
439 entry->flags &= ~ATF_COM;
440 arp_invalidate_hhs(entry);
441 sti();
442 entry->retries = ARP_MAX_TRIES+1;
443 del_timer(&entry->timer);
444 entry->timer.expires = jiffies + ARP_CONFIRM_TIMEOUT;
445 add_timer(&entry->timer);
446 arp_send(ARPOP_REQUEST, ETH_P_ARP, entry->ip,
447 dev, dev->pa_addr, entry->ha,
448 dev->dev_addr, NULL);
449 #if RT_CACHE_DEBUG >= 2
450 printk("arp_expire: %08x requires confirmation\n", entry->ip);
451 #endif
452 }
453 else
454 pentry = &entry->next;
455 }
456 }
457 arp_unlock();
458 }
459
460 ip_rt_check_expire();
461
462
463
464
465
466 arp_timer.expires = jiffies + ARP_CHECK_INTERVAL;
467 add_timer(&arp_timer);
468 }
469
470
471
472
473
474
475
476 static void arp_expire_request (unsigned long arg)
477 {
478 struct arp_table *entry = (struct arp_table *) arg;
479 struct arp_table **pentry;
480 unsigned long hash;
481 unsigned long flags;
482
483 save_flags(flags);
484 cli();
485
486
487
488
489
490
491
492 if (entry->flags & ATF_COM)
493 {
494 restore_flags(flags);
495 return;
496 }
497
498 if (arp_lock)
499 {
500 #if RT_CACHE_DEBUG >= 1
501 printk("arp_expire_request: %08x postponed\n", entry->ip);
502 #endif
503 del_timer(&entry->timer);
504 entry->timer.expires = jiffies + HZ/10;
505 add_timer(&entry->timer);
506 restore_flags(flags);
507 return;
508 }
509
510 arp_fast_lock();
511 restore_flags(flags);
512
513 if (entry->last_updated && --entry->retries > 0)
514 {
515 struct device *dev = entry->dev;
516
517 #if RT_CACHE_DEBUG >= 2
518 printk("arp_expire_request: %08x timed out\n", entry->ip);
519 #endif
520
521 del_timer(&entry->timer);
522 entry->timer.expires = jiffies + ARP_RES_TIME;
523 add_timer(&entry->timer);
524 arp_send(ARPOP_REQUEST, ETH_P_ARP, entry->ip, dev, dev->pa_addr,
525 NULL, dev->dev_addr, NULL);
526 arp_unlock();
527 return;
528 }
529
530 arp_release_entry(entry);
531
532 cli();
533 if (arp_count_hhs(entry))
534 {
535 struct device *dev = entry->dev;
536 #if RT_CACHE_DEBUG >= 2
537 printk("arp_expire_request: %08x is dead\n", entry->ip);
538 #endif
539 arp_release_entry(entry);
540 entry->retries = ARP_MAX_TRIES;
541 restore_flags(flags);
542 entry->last_updated = 0;
543 del_timer(&entry->timer);
544 entry->timer.expires = jiffies + ARP_DEAD_RES_TIME;
545 add_timer(&entry->timer);
546 arp_send(ARPOP_REQUEST, ETH_P_ARP, entry->ip, dev, dev->pa_addr,
547 NULL, dev->dev_addr, NULL);
548 arp_unlock();
549 return;
550 }
551 restore_flags(flags);
552
553 hash = HASH(entry->ip);
554
555 pentry = &arp_tables[hash];
556
557 while (*pentry != NULL)
558 {
559 if (*pentry == entry)
560 {
561 cli();
562 *pentry = entry->next;
563 restore_flags(flags);
564 #if RT_CACHE_DEBUG >= 2
565 printk("arp_expire_request: %08x is killed\n", entry->ip);
566 #endif
567 arp_free_entry(entry);
568 arp_unlock();
569 return;
570 }
571 pentry = &(*pentry)->next;
572 }
573 printk("arp_expire_request: bug: ARP entry is lost!\n");
574 arp_unlock();
575 }
576
577
578
579
580
581 int arp_device_event(struct notifier_block *this, unsigned long event, void *ptr)
582 {
583 struct device *dev=ptr;
584 int i;
585
586 if (event != NETDEV_DOWN)
587 return NOTIFY_DONE;
588
589
590
591
592 #if RT_CACHE_DEBUG >= 1
593 if (arp_lock)
594 printk("arp_device_event: bug\n");
595 #endif
596 arp_fast_lock();
597
598 for (i = 0; i < FULL_ARP_TABLE_SIZE; i++)
599 {
600 struct arp_table *entry;
601 struct arp_table **pentry = &arp_tables[i];
602
603 while ((entry = *pentry) != NULL)
604 {
605 if (entry->dev == dev)
606 {
607 *pentry = entry->next;
608 arp_free_entry(entry);
609 }
610 else
611 pentry = &entry->next;
612 }
613 }
614 arp_unlock();
615 return NOTIFY_DONE;
616 }
617
618
619
620
621
622
623
624 void arp_send(int type, int ptype, u32 dest_ip,
625 struct device *dev, u32 src_ip,
626 unsigned char *dest_hw, unsigned char *src_hw,
627 unsigned char *target_hw)
628 {
629 struct sk_buff *skb;
630 struct arphdr *arp;
631 unsigned char *arp_ptr;
632
633
634
635
636
637 if (dev->flags&IFF_NOARP)
638 return;
639
640
641
642
643
644 skb = alloc_skb(sizeof(struct arphdr)+ 2*(dev->addr_len+4)
645 + dev->hard_header_len, GFP_ATOMIC);
646 if (skb == NULL)
647 {
648 printk("ARP: no memory to send an arp packet\n");
649 return;
650 }
651 skb_reserve(skb, dev->hard_header_len);
652 arp = (struct arphdr *) skb_put(skb,sizeof(struct arphdr) + 2*(dev->addr_len+4));
653 skb->arp = 1;
654 skb->dev = dev;
655 skb->free = 1;
656 skb->protocol = htons (ETH_P_IP);
657
658
659
660
661
662 dev->hard_header(skb,dev,ptype,dest_hw?dest_hw:dev->broadcast,src_hw?src_hw:NULL,skb->len);
663
664
665 arp->ar_hrd = htons(dev->type);
666 #ifdef CONFIG_AX25
667 #ifdef CONFIG_NETROM
668 arp->ar_pro = (dev->type == ARPHRD_AX25 || dev->type == ARPHRD_NETROM) ? htons(AX25_P_IP) : htons(ETH_P_IP);
669 #else
670 arp->ar_pro = (dev->type != ARPHRD_AX25) ? htons(ETH_P_IP) : htons(AX25_P_IP);
671 #endif
672 #else
673 arp->ar_pro = htons(ETH_P_IP);
674 #endif
675 arp->ar_hln = dev->addr_len;
676 arp->ar_pln = 4;
677 arp->ar_op = htons(type);
678
679 arp_ptr=(unsigned char *)(arp+1);
680
681 memcpy(arp_ptr, src_hw, dev->addr_len);
682 arp_ptr+=dev->addr_len;
683 memcpy(arp_ptr, &src_ip,4);
684 arp_ptr+=4;
685 if (target_hw != NULL)
686 memcpy(arp_ptr, target_hw, dev->addr_len);
687 else
688 memset(arp_ptr, 0, dev->addr_len);
689 arp_ptr+=dev->addr_len;
690 memcpy(arp_ptr, &dest_ip, 4);
691
692 dev_queue_xmit(skb, dev, 0);
693 }
694
695
696
697
698
699 static void arp_send_q(struct arp_table *entry)
700 {
701 struct sk_buff *skb;
702
703 unsigned long flags;
704
705
706
707
708
709 if(!(entry->flags&ATF_COM))
710 {
711 printk("arp_send_q: incomplete entry for %s\n",
712 in_ntoa(entry->ip));
713
714
715
716
717 return;
718 }
719
720 save_flags(flags);
721
722 cli();
723 while((skb = skb_dequeue(&entry->skb)) != NULL)
724 {
725 IS_SKB(skb);
726 skb_device_lock(skb);
727 restore_flags(flags);
728 if(!skb->dev->rebuild_header(skb->data,skb->dev,skb->raddr,skb))
729 {
730 skb->arp = 1;
731 if(skb->sk==NULL)
732 dev_queue_xmit(skb, skb->dev, 0);
733 else
734 dev_queue_xmit(skb,skb->dev,skb->sk->priority);
735 }
736 }
737 restore_flags(flags);
738 }
739
740
741
742
743
744
745 static void arp_destroy(struct arp_table * entry)
746 {
747 struct arp_table *entry1;
748 struct arp_table **pentry;
749
750 if (entry->flags & ATF_PUBL)
751 pentry = &arp_proxy_list;
752 else
753 pentry = &arp_tables[HASH(entry->ip)];
754
755 while ((entry1 = *pentry) != NULL)
756 {
757 if (entry1 == entry)
758 {
759 *pentry = entry1->next;
760 del_timer(&entry->timer);
761 arp_free_entry(entry);
762 return;
763 }
764 pentry = &entry1->next;
765 }
766 }
767
768
769
770
771
772
773
774 int arp_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *pt)
775 {
776
777
778
779
780 struct arphdr *arp = (struct arphdr *)skb->h.raw;
781 unsigned char *arp_ptr= (unsigned char *)(arp+1);
782 struct arp_table *entry;
783 struct arp_table *proxy_entry;
784 unsigned long hash;
785 unsigned char ha[MAX_ADDR_LEN];
786 unsigned char *sha,*tha;
787 u32 sip,tip;
788
789
790
791
792
793
794
795
796 if (arp->ar_hln != dev->addr_len ||
797 dev->type != ntohs(arp->ar_hrd) ||
798 dev->flags & IFF_NOARP ||
799 arp->ar_pln != 4)
800 {
801 kfree_skb(skb, FREE_READ);
802 return 0;
803
804
805 }
806
807
808
809
810
811
812
813
814
815 switch (dev->type)
816 {
817 #ifdef CONFIG_AX25
818 case ARPHRD_AX25:
819 if(arp->ar_pro != htons(AX25_P_IP))
820 {
821 kfree_skb(skb, FREE_READ);
822 return 0;
823 }
824 break;
825 #endif
826 #ifdef CONFIG_NETROM
827 case ARPHRD_NETROM:
828 if(arp->ar_pro != htons(AX25_P_IP))
829 {
830 kfree_skb(skb, FREE_READ);
831 return 0;
832 }
833 break;
834 #endif
835 case ARPHRD_ETHER:
836 case ARPHRD_ARCNET:
837 if(arp->ar_pro != htons(ETH_P_IP))
838 {
839 kfree_skb(skb, FREE_READ);
840 return 0;
841 }
842 break;
843
844 case ARPHRD_IEEE802:
845 if(arp->ar_pro != htons(ETH_P_IP))
846 {
847 kfree_skb(skb, FREE_READ);
848 return 0;
849 }
850 break;
851
852 default:
853 printk("ARP: dev->type mangled!\n");
854 kfree_skb(skb, FREE_READ);
855 return 0;
856 }
857
858
859
860
861
862 sha=arp_ptr;
863 arp_ptr += dev->addr_len;
864 memcpy(&sip, arp_ptr, 4);
865 arp_ptr += 4;
866 tha=arp_ptr;
867 arp_ptr += dev->addr_len;
868 memcpy(&tip, arp_ptr, 4);
869
870
871
872
873
874 if (LOOPBACK(tip) || MULTICAST(tip))
875 {
876 kfree_skb(skb, FREE_READ);
877 return 0;
878 }
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901 #ifdef CONFIG_NET_ALIAS
902 if (tip != dev->pa_addr && net_alias_has(skb->dev))
903 {
904
905
906
907 dev = net_alias_dev_rcv_sel32(dev, AF_INET, sip, tip);
908
909 if (dev->type != ntohs(arp->ar_hrd) || dev->flags & IFF_NOARP)
910 {
911 kfree_skb(skb, FREE_READ);
912 return 0;
913 }
914 }
915 #endif
916
917 if (arp->ar_op == htons(ARPOP_REQUEST))
918 {
919
920
921
922 if (tip != dev->pa_addr)
923 {
924
925
926
927
928
929 arp_fast_lock();
930
931 for (proxy_entry=arp_proxy_list;
932 proxy_entry;
933 proxy_entry = proxy_entry->next)
934 {
935
936
937
938
939
940
941
942
943 if (proxy_entry->dev == dev &&
944 !((proxy_entry->ip^tip)&proxy_entry->mask))
945 break;
946
947 }
948 if (proxy_entry)
949 {
950 memcpy(ha, proxy_entry->ha, dev->addr_len);
951 arp_unlock();
952 arp_send(ARPOP_REPLY,ETH_P_ARP,sip,dev,tip,sha,ha, sha);
953 kfree_skb(skb, FREE_READ);
954 return 0;
955 }
956 else
957 {
958 arp_unlock();
959 kfree_skb(skb, FREE_READ);
960 return 0;
961 }
962 }
963 else
964 {
965
966
967
968 arp_send(ARPOP_REPLY,ETH_P_ARP,sip,dev,tip,sha,dev->dev_addr,sha);
969 }
970 }
971
972
973
974 if(ip_chk_addr(tip)!=IS_MYADDR)
975 {
976
977
978
979 kfree_skb(skb, FREE_READ);
980 return 0;
981 }
982
983
984
985
986
987
988 arp_fast_lock();
989
990 hash = HASH(sip);
991
992 for (entry=arp_tables[hash]; entry; entry=entry->next)
993 if (entry->ip == sip && entry->dev == dev)
994 break;
995
996 if (entry)
997 {
998
999
1000
1001 if (!(entry->flags & ATF_PERM)) {
1002 memcpy(entry->ha, sha, dev->addr_len);
1003 entry->last_updated = jiffies;
1004 }
1005 if (!(entry->flags & ATF_COM))
1006 {
1007
1008
1009
1010
1011 del_timer(&entry->timer);
1012 entry->flags |= ATF_COM;
1013 arp_update_hhs(entry);
1014
1015
1016
1017
1018
1019 arp_send_q(entry);
1020 }
1021 }
1022 else
1023 {
1024
1025
1026
1027 entry = (struct arp_table *)kmalloc(sizeof(struct arp_table),GFP_ATOMIC);
1028 if(entry == NULL)
1029 {
1030 arp_unlock();
1031 printk("ARP: no memory for new arp entry\n");
1032 kfree_skb(skb, FREE_READ);
1033 return 0;
1034 }
1035
1036 entry->mask = DEF_ARP_NETMASK;
1037 entry->ip = sip;
1038 entry->flags = ATF_COM;
1039 entry->hh = NULL;
1040 init_timer(&entry->timer);
1041 entry->timer.function = arp_expire_request;
1042 entry->timer.data = (unsigned long)entry;
1043 memcpy(entry->ha, sha, dev->addr_len);
1044 entry->last_updated = entry->last_used = jiffies;
1045
1046
1047
1048
1049 #ifdef CONFIG_NET_ALIAS
1050 entry->dev = dev;
1051 #else
1052 entry->dev = skb->dev;
1053 #endif
1054 skb_queue_head_init(&entry->skb);
1055 if (arp_lock == 1)
1056 {
1057 entry->next = arp_tables[hash];
1058 arp_tables[hash] = entry;
1059 }
1060 else
1061 {
1062 #if RT_CACHE_DEBUG >= 1
1063 printk("arp_rcv: %08x backlogged\n", entry->ip);
1064 #endif
1065 arp_enqueue(&arp_backlog, entry);
1066 arp_bh_mask |= ARP_BH_BACKLOG;
1067 }
1068 }
1069
1070
1071
1072
1073 kfree_skb(skb, FREE_READ);
1074 arp_unlock();
1075 return 0;
1076 }
1077
1078
1079
1080
1081
1082
1083
1084
1085 static struct arp_table *arp_lookup(u32 paddr, unsigned short flags, struct device * dev)
1086 {
1087 struct arp_table *entry;
1088
1089 if (!(flags & ATF_PUBL))
1090 {
1091 for (entry = arp_tables[HASH(paddr)];
1092 entry != NULL; entry = entry->next)
1093 if (entry->ip == paddr && (!dev || entry->dev == dev))
1094 break;
1095 return entry;
1096 }
1097
1098 if (!(flags & ATF_NETMASK))
1099 {
1100 for (entry = arp_proxy_list;
1101 entry != NULL; entry = entry->next)
1102 if (entry->ip == paddr && (!dev || entry->dev == dev))
1103 break;
1104 return entry;
1105 }
1106
1107 for (entry=arp_proxy_list; entry != NULL; entry = entry->next)
1108 if (!((entry->ip^paddr)&entry->mask) &&
1109 (!dev || entry->dev == dev))
1110 break;
1111 return entry;
1112 }
1113
1114
1115
1116
1117
1118 int arp_query(unsigned char *haddr, u32 paddr, struct device * dev)
1119 {
1120 struct arp_table *entry;
1121
1122 arp_fast_lock();
1123
1124 entry = arp_lookup(paddr, 0, dev);
1125
1126 if (entry != NULL)
1127 {
1128 entry->last_used = jiffies;
1129 if (entry->flags & ATF_COM)
1130 {
1131 memcpy(haddr, entry->ha, dev->addr_len);
1132 arp_unlock();
1133 return 1;
1134 }
1135 }
1136 arp_unlock();
1137 return 0;
1138 }
1139
1140
1141 static int arp_set_predefined(int addr_hint, unsigned char * haddr, __u32 paddr, struct device * dev)
1142 {
1143 switch (addr_hint)
1144 {
1145 case IS_MYADDR:
1146 printk("ARP: arp called for own IP address\n");
1147 memcpy(haddr, dev->dev_addr, dev->addr_len);
1148 return 1;
1149 #ifdef CONFIG_IP_MULTICAST
1150 case IS_MULTICAST:
1151 if(dev->type==ARPHRD_ETHER || dev->type==ARPHRD_IEEE802)
1152 {
1153 u32 taddr;
1154 haddr[0]=0x01;
1155 haddr[1]=0x00;
1156 haddr[2]=0x5e;
1157 taddr=ntohl(paddr);
1158 haddr[5]=taddr&0xff;
1159 taddr=taddr>>8;
1160 haddr[4]=taddr&0xff;
1161 taddr=taddr>>8;
1162 haddr[3]=taddr&0x7f;
1163 return 1;
1164 }
1165
1166
1167
1168 #endif
1169
1170 case IS_BROADCAST:
1171 memcpy(haddr, dev->broadcast, dev->addr_len);
1172 return 1;
1173 }
1174 return 0;
1175 }
1176
1177
1178
1179
1180
1181 int arp_find(unsigned char *haddr, u32 paddr, struct device *dev,
1182 u32 saddr, struct sk_buff *skb)
1183 {
1184 struct arp_table *entry;
1185 unsigned long hash;
1186
1187 if (arp_set_predefined(ip_chk_addr(paddr), haddr, paddr, dev))
1188 {
1189 if (skb)
1190 skb->arp = 1;
1191 return 0;
1192 }
1193
1194 hash = HASH(paddr);
1195 arp_fast_lock();
1196
1197
1198
1199
1200 entry = arp_lookup(paddr, 0, dev);
1201
1202 if (entry != NULL)
1203 {
1204 if (!(entry->flags & ATF_COM))
1205 {
1206
1207
1208
1209
1210
1211 if (skb != NULL)
1212 {
1213 if (entry->last_updated)
1214 {
1215 skb_queue_tail(&entry->skb, skb);
1216 skb_device_unlock(skb);
1217 }
1218
1219
1220
1221
1222 else
1223 {
1224 #if 0
1225
1226
1227
1228
1229 if (skb->sk)
1230 {
1231 skb->sk->err = EHOSTDOWN;
1232 skb->sk->error_report(skb->sk);
1233 }
1234 #else
1235 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0, dev);
1236 #endif
1237 dev_kfree_skb(skb, FREE_WRITE);
1238 }
1239 }
1240 arp_unlock();
1241 return 1;
1242 }
1243
1244
1245
1246
1247
1248 entry->last_used = jiffies;
1249 memcpy(haddr, entry->ha, dev->addr_len);
1250 if (skb)
1251 skb->arp = 1;
1252 arp_unlock();
1253 return 0;
1254 }
1255
1256
1257
1258
1259
1260 entry = (struct arp_table *) kmalloc(sizeof(struct arp_table),
1261 GFP_ATOMIC);
1262 if (entry != NULL)
1263 {
1264 entry->last_updated = entry->last_used = jiffies;
1265 entry->flags = 0;
1266 entry->ip = paddr;
1267 entry->mask = DEF_ARP_NETMASK;
1268 memset(entry->ha, 0, dev->addr_len);
1269 entry->dev = dev;
1270 entry->hh = NULL;
1271 init_timer(&entry->timer);
1272 entry->timer.function = arp_expire_request;
1273 entry->timer.data = (unsigned long)entry;
1274 entry->timer.expires = jiffies + ARP_RES_TIME;
1275 skb_queue_head_init(&entry->skb);
1276 if (skb != NULL)
1277 {
1278 skb_queue_tail(&entry->skb, skb);
1279 skb_device_unlock(skb);
1280 }
1281 if (arp_lock == 1)
1282 {
1283 entry->next = arp_tables[hash];
1284 arp_tables[hash] = entry;
1285 add_timer(&entry->timer);
1286 entry->retries = ARP_MAX_TRIES;
1287 }
1288 else
1289 {
1290 #if RT_CACHE_DEBUG >= 1
1291 printk("arp_find: %08x backlogged\n", entry->ip);
1292 #endif
1293 arp_enqueue(&arp_backlog, entry);
1294 arp_bh_mask |= ARP_BH_BACKLOG;
1295 }
1296 }
1297 else if (skb != NULL)
1298 dev_kfree_skb(skb, FREE_WRITE);
1299 arp_unlock();
1300
1301
1302
1303
1304
1305 arp_send(ARPOP_REQUEST, ETH_P_ARP, paddr, dev, saddr, NULL,
1306 dev->dev_addr, NULL);
1307
1308 return 1;
1309 }
1310
1311
1312
1313
1314
1315
1316 #define HBUFFERLEN 30
1317
1318 int arp_get_info(char *buffer, char **start, off_t offset, int length, int dummy)
1319 {
1320 int len=0;
1321 off_t pos=0;
1322 int size;
1323 struct arp_table *entry;
1324 char hbuffer[HBUFFERLEN];
1325 int i,j,k;
1326 const char hexbuf[] = "0123456789ABCDEF";
1327
1328 size = sprintf(buffer,"IP address HW type Flags HW address Mask Device\n");
1329
1330 pos+=size;
1331 len+=size;
1332
1333 arp_fast_lock();
1334
1335 for(i=0; i<FULL_ARP_TABLE_SIZE; i++)
1336 {
1337 for(entry=arp_tables[i]; entry!=NULL; entry=entry->next)
1338 {
1339
1340
1341
1342 #ifdef CONFIG_AX25
1343 #ifdef CONFIG_NETROM
1344 if (entry->dev->type == ARPHRD_AX25 || entry->dev->type == ARPHRD_NETROM)
1345 strcpy(hbuffer,ax2asc((ax25_address *)entry->ha));
1346 else {
1347 #else
1348 if(entry->dev->type==ARPHRD_AX25)
1349 strcpy(hbuffer,ax2asc((ax25_address *)entry->ha));
1350 else {
1351 #endif
1352 #endif
1353
1354 for(k=0,j=0;k<HBUFFERLEN-3 && j<entry->dev->addr_len;j++)
1355 {
1356 hbuffer[k++]=hexbuf[ (entry->ha[j]>>4)&15 ];
1357 hbuffer[k++]=hexbuf[ entry->ha[j]&15 ];
1358 hbuffer[k++]=':';
1359 }
1360 hbuffer[--k]=0;
1361
1362 #ifdef CONFIG_AX25
1363 }
1364 #endif
1365 size = sprintf(buffer+len,
1366 "%-17s0x%-10x0x%-10x%s",
1367 in_ntoa(entry->ip),
1368 (unsigned int)entry->dev->type,
1369 entry->flags,
1370 hbuffer);
1371 #if RT_CACHE_DEBUG < 2
1372 size += sprintf(buffer+len+size,
1373 " %-17s %s\n",
1374 entry->mask==DEF_ARP_NETMASK ?
1375 "*" : in_ntoa(entry->mask), entry->dev->name);
1376 #else
1377 size += sprintf(buffer+len+size,
1378 " %-17s %s\t%ld\t%1d\n",
1379 entry->mask==DEF_ARP_NETMASK ?
1380 "*" : in_ntoa(entry->mask), entry->dev->name,
1381 entry->hh ? entry->hh->hh_refcnt : -1,
1382 entry->hh ? entry->hh->hh_uptodate : 0);
1383 #endif
1384
1385 len += size;
1386 pos += size;
1387
1388 if (pos <= offset)
1389 len=0;
1390 if (pos >= offset+length)
1391 break;
1392 }
1393 }
1394 arp_unlock();
1395
1396 *start = buffer+len-(pos-offset);
1397 len = pos-offset;
1398 if (len>length)
1399 len = length;
1400 return len;
1401 }
1402
1403
1404
1405 int arp_bind_cache(struct hh_cache ** hhp, struct device *dev, unsigned short htype, u32 paddr)
1406 {
1407 struct arp_table *entry;
1408 struct hh_cache *hh = *hhp;
1409 int addr_hint;
1410 unsigned long flags;
1411
1412 if (hh)
1413 return 1;
1414
1415 if ((addr_hint = ip_chk_addr(paddr)) != 0)
1416 {
1417 unsigned char haddr[MAX_ADDR_LEN];
1418 if (hh)
1419 return 1;
1420 hh = kmalloc(sizeof(struct hh_cache), GFP_ATOMIC);
1421 if (!hh)
1422 return 1;
1423 arp_set_predefined(addr_hint, haddr, paddr, dev);
1424 hh->hh_uptodate = 0;
1425 hh->hh_refcnt = 1;
1426 hh->hh_arp = NULL;
1427 hh->hh_next = NULL;
1428 hh->hh_type = htype;
1429 *hhp = hh;
1430 dev->header_cache_update(hh, dev, haddr);
1431 return 0;
1432 }
1433
1434 save_flags(flags);
1435
1436 arp_fast_lock();
1437
1438 entry = arp_lookup(paddr, 0, dev);
1439
1440 if (entry)
1441 {
1442 cli();
1443 for (hh = entry->hh; hh; hh=hh->hh_next)
1444 if (hh->hh_type == htype)
1445 break;
1446 if (hh)
1447 {
1448 hh->hh_refcnt++;
1449 *hhp = hh;
1450 restore_flags(flags);
1451 arp_unlock();
1452 return 1;
1453 }
1454 restore_flags(flags);
1455 }
1456
1457 hh = kmalloc(sizeof(struct hh_cache), GFP_ATOMIC);
1458 if (!hh)
1459 {
1460 arp_unlock();
1461 return 1;
1462 }
1463
1464 hh->hh_uptodate = 0;
1465 hh->hh_refcnt = 1;
1466 hh->hh_arp = NULL;
1467 hh->hh_next = NULL;
1468 hh->hh_type = htype;
1469
1470 if (entry)
1471 {
1472 dev->header_cache_update(hh, dev, entry->ha);
1473 *hhp = hh;
1474 cli();
1475 hh->hh_arp = (void*)entry;
1476 entry->hh = hh;
1477 hh->hh_refcnt++;
1478 restore_flags(flags);
1479 entry->last_used = jiffies;
1480 arp_unlock();
1481 return 0;
1482 }
1483
1484
1485
1486
1487
1488
1489 entry = (struct arp_table *) kmalloc(sizeof(struct arp_table),
1490 GFP_ATOMIC);
1491 if (entry == NULL)
1492 {
1493 kfree_s(hh, sizeof(struct hh_cache));
1494 arp_unlock();
1495 return 1;
1496 }
1497
1498 entry->last_updated = entry->last_used = jiffies;
1499 entry->flags = 0;
1500 entry->ip = paddr;
1501 entry->mask = DEF_ARP_NETMASK;
1502 memset(entry->ha, 0, dev->addr_len);
1503 entry->dev = dev;
1504 entry->hh = hh;
1505 ATOMIC_INCR(&hh->hh_refcnt);
1506 init_timer(&entry->timer);
1507 entry->timer.function = arp_expire_request;
1508 entry->timer.data = (unsigned long)entry;
1509 entry->timer.expires = jiffies + ARP_RES_TIME;
1510 skb_queue_head_init(&entry->skb);
1511
1512 if (arp_lock == 1)
1513 {
1514 unsigned long hash = HASH(paddr);
1515 cli();
1516 entry->next = arp_tables[hash];
1517 arp_tables[hash] = entry;
1518 hh->hh_arp = (void*)entry;
1519 entry->retries = ARP_MAX_TRIES;
1520 restore_flags(flags);
1521
1522 add_timer(&entry->timer);
1523 arp_send(ARPOP_REQUEST, ETH_P_ARP, paddr, dev, dev->pa_addr, NULL, dev->dev_addr, NULL);
1524 }
1525 else
1526 {
1527 #if RT_CACHE_DEBUG >= 1
1528 printk("arp_cache_bind: %08x backlogged\n", entry->ip);
1529 #endif
1530 arp_enqueue(&arp_backlog, entry);
1531 arp_bh_mask |= ARP_BH_BACKLOG;
1532 }
1533 *hhp = hh;
1534 arp_unlock();
1535 return 0;
1536 }
1537
1538 static void arp_run_bh()
1539 {
1540 unsigned long flags;
1541 struct arp_table *entry, *entry1;
1542 struct hh_cache *hh;
1543 __u32 sip;
1544
1545 save_flags(flags);
1546 cli();
1547 if (!arp_lock)
1548 {
1549 arp_fast_lock();
1550
1551 while ((entry = arp_dequeue(&arp_backlog)) != NULL)
1552 {
1553 unsigned long hash;
1554 sti();
1555 sip = entry->ip;
1556 hash = HASH(sip);
1557
1558
1559
1560
1561
1562 for (entry1=arp_tables[hash]; entry1; entry1=entry1->next)
1563 if (entry1->ip==sip && entry1->dev == entry->dev)
1564 break;
1565
1566 if (!entry1)
1567 {
1568 struct device * dev = entry->dev;
1569 cli();
1570 entry->next = arp_tables[hash];
1571 arp_tables[hash] = entry;
1572 for (hh=entry->hh; hh; hh=hh->hh_next)
1573 hh->hh_arp = (void*)entry;
1574 sti();
1575 del_timer(&entry->timer);
1576 entry->timer.expires = jiffies + ARP_RES_TIME;
1577 add_timer(&entry->timer);
1578 entry->retries = ARP_MAX_TRIES;
1579 arp_send(ARPOP_REQUEST, ETH_P_ARP, entry->ip, dev, dev->pa_addr, NULL, dev->dev_addr, NULL);
1580 #if RT_CACHE_DEBUG >= 1
1581 printk("arp_run_bh: %08x reinstalled\n", sip);
1582 #endif
1583 }
1584 else
1585 {
1586 struct sk_buff * skb;
1587 struct hh_cache * next;
1588
1589
1590
1591
1592 cli();
1593 for (hh=entry->hh; hh; hh=next)
1594 {
1595 next = hh->hh_next;
1596 hh->hh_next = entry1->hh;
1597 entry1->hh = hh;
1598 hh->hh_arp = (void*)entry1;
1599 }
1600 entry->hh = NULL;
1601
1602
1603
1604
1605 while ((skb = skb_dequeue(&entry->skb)) != NULL)
1606 {
1607 skb_device_lock(skb);
1608 sti();
1609 skb_queue_tail(&entry1->skb, skb);
1610 skb_device_unlock(skb);
1611 cli();
1612 }
1613 sti();
1614
1615 #if RT_CACHE_DEBUG >= 1
1616 printk("arp_run_bh: entry %08x was born dead\n", entry->ip);
1617 #endif
1618 arp_free_entry(entry);
1619
1620 if (entry1->flags & ATF_COM)
1621 {
1622 arp_update_hhs(entry1);
1623 arp_send_q(entry1);
1624 }
1625 }
1626 cli();
1627 }
1628 arp_bh_mask &= ~ARP_BH_BACKLOG;
1629 arp_unlock();
1630 }
1631 restore_flags(flags);
1632 }
1633
1634
1635
1636
1637
1638 static inline int empty(unsigned char * addr, int len)
1639 {
1640 while (len > 0) {
1641 if (*addr)
1642 return 0;
1643 len--;
1644 addr++;
1645 }
1646 return 1;
1647 }
1648
1649
1650
1651
1652
1653 static int arp_req_set(struct arpreq *r, struct device * dev)
1654 {
1655 struct arp_table *entry;
1656 struct sockaddr_in *si;
1657 struct rtable *rt;
1658 struct device *dev1;
1659 unsigned char *ha;
1660 u32 ip;
1661
1662
1663
1664
1665
1666 si = (struct sockaddr_in *) &r->arp_pa;
1667 ip = si->sin_addr.s_addr;
1668
1669
1670
1671
1672
1673 if (ip_chk_addr(ip) == IS_MYADDR)
1674 dev1 = dev_get("lo");
1675 else {
1676 rt = ip_rt_route(ip, 0);
1677 if (!rt)
1678 return -ENETUNREACH;
1679 dev1 = rt->rt_dev;
1680 ip_rt_put(rt);
1681 }
1682
1683 if (!dev)
1684 dev = dev1;
1685
1686 if (((r->arp_flags & ATF_PUBL) && dev == dev1) ||
1687 (!(r->arp_flags & ATF_PUBL) && dev != dev1))
1688 return -EINVAL;
1689
1690 #if RT_CACHE_DEBUG >= 1
1691 if (arp_lock)
1692 printk("arp_req_set: bug\n");
1693 #endif
1694 arp_fast_lock();
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704 entry = arp_lookup(ip, r->arp_flags & ~ATF_NETMASK, dev);
1705
1706 if (entry)
1707 {
1708 arp_destroy(entry);
1709 entry = NULL;
1710 }
1711
1712
1713
1714
1715
1716 if (entry == NULL)
1717 {
1718 entry = (struct arp_table *) kmalloc(sizeof(struct arp_table),
1719 GFP_ATOMIC);
1720 if (entry == NULL)
1721 {
1722 arp_unlock();
1723 return -ENOMEM;
1724 }
1725 entry->ip = ip;
1726 entry->hh = NULL;
1727 init_timer(&entry->timer);
1728 entry->timer.function = arp_expire_request;
1729 entry->timer.data = (unsigned long)entry;
1730
1731 if (r->arp_flags & ATF_PUBL)
1732 {
1733 cli();
1734 entry->next = arp_proxy_list;
1735 arp_proxy_list = entry;
1736 sti();
1737 }
1738 else
1739 {
1740 unsigned long hash = HASH(ip);
1741 cli();
1742 entry->next = arp_tables[hash];
1743 arp_tables[hash] = entry;
1744 sti();
1745 }
1746 skb_queue_head_init(&entry->skb);
1747 }
1748
1749
1750
1751 ha = r->arp_ha.sa_data;
1752 if ((r->arp_flags & ATF_COM) && empty(ha, dev->addr_len))
1753 ha = dev->dev_addr;
1754 memcpy(entry->ha, ha, dev->addr_len);
1755 entry->last_updated = entry->last_used = jiffies;
1756 entry->flags = r->arp_flags | ATF_COM;
1757 if ((entry->flags & ATF_PUBL) && (entry->flags & ATF_NETMASK))
1758 {
1759 si = (struct sockaddr_in *) &r->arp_netmask;
1760 entry->mask = si->sin_addr.s_addr;
1761 }
1762 else
1763 entry->mask = DEF_ARP_NETMASK;
1764 entry->dev = dev;
1765 arp_update_hhs(entry);
1766 arp_unlock();
1767 return 0;
1768 }
1769
1770
1771
1772
1773
1774
1775
1776 static int arp_req_get(struct arpreq *r, struct device *dev)
1777 {
1778 struct arp_table *entry;
1779 struct sockaddr_in *si;
1780
1781 si = (struct sockaddr_in *) &r->arp_pa;
1782
1783 #if RT_CACHE_DEBUG >= 1
1784 if (arp_lock)
1785 printk("arp_req_set: bug\n");
1786 #endif
1787 arp_fast_lock();
1788
1789 entry = arp_lookup(si->sin_addr.s_addr, r->arp_flags|ATF_NETMASK, dev);
1790
1791 if (entry == NULL)
1792 {
1793 arp_unlock();
1794 return -ENXIO;
1795 }
1796
1797
1798
1799
1800
1801 memcpy(r->arp_ha.sa_data, &entry->ha, entry->dev->addr_len);
1802 r->arp_ha.sa_family = entry->dev->type;
1803 r->arp_flags = entry->flags;
1804 strncpy(r->arp_dev, entry->dev->name, 16);
1805 arp_unlock();
1806 return 0;
1807 }
1808
1809 static int arp_req_delete(struct arpreq *r, struct device * dev)
1810 {
1811 struct arp_table *entry;
1812 struct sockaddr_in *si;
1813
1814 si = (struct sockaddr_in *) &r->arp_pa;
1815 #if RT_CACHE_DEBUG >= 1
1816 if (arp_lock)
1817 printk("arp_req_delete: bug\n");
1818 #endif
1819 arp_fast_lock();
1820
1821 if (!(r->arp_flags & ATF_PUBL))
1822 {
1823 for (entry = arp_tables[HASH(si->sin_addr.s_addr)];
1824 entry != NULL; entry = entry->next)
1825 if (entry->ip == si->sin_addr.s_addr
1826 && (!dev || entry->dev == dev))
1827 {
1828 arp_destroy(entry);
1829 arp_unlock();
1830 return 0;
1831 }
1832 }
1833 else
1834 {
1835 for (entry = arp_proxy_list;
1836 entry != NULL; entry = entry->next)
1837 if (entry->ip == si->sin_addr.s_addr
1838 && (!dev || entry->dev == dev))
1839 {
1840 arp_destroy(entry);
1841 arp_unlock();
1842 return 0;
1843 }
1844 }
1845
1846 arp_unlock();
1847 return -ENXIO;
1848 }
1849
1850
1851
1852
1853
1854 int arp_ioctl(unsigned int cmd, void *arg)
1855 {
1856 int err;
1857 struct arpreq r;
1858
1859 struct device * dev = NULL;
1860
1861 switch(cmd)
1862 {
1863 case SIOCDARP:
1864 case SIOCSARP:
1865 if (!suser())
1866 return -EPERM;
1867 case SIOCGARP:
1868 err = verify_area(VERIFY_READ, arg, sizeof(struct arpreq));
1869 if (err)
1870 return err;
1871 memcpy_fromfs(&r, arg, sizeof(struct arpreq));
1872 break;
1873 case OLD_SIOCDARP:
1874 case OLD_SIOCSARP:
1875 if (!suser())
1876 return -EPERM;
1877 case OLD_SIOCGARP:
1878 err = verify_area(VERIFY_READ, arg, sizeof(struct arpreq_old));
1879 if (err)
1880 return err;
1881 memcpy_fromfs(&r, arg, sizeof(struct arpreq_old));
1882 memset(&r.arp_dev, 0, sizeof(r.arp_dev));
1883 break;
1884 default:
1885 return -EINVAL;
1886 }
1887
1888 if (r.arp_pa.sa_family != AF_INET)
1889 return -EPFNOSUPPORT;
1890 if (((struct sockaddr_in *)&r.arp_pa)->sin_addr.s_addr == 0)
1891 return -EINVAL;
1892
1893 if (r.arp_dev[0])
1894 {
1895 if ((dev = dev_get(r.arp_dev)) == NULL)
1896 return -ENODEV;
1897
1898 if (!r.arp_ha.sa_family)
1899 r.arp_ha.sa_family = dev->type;
1900 else if (r.arp_ha.sa_family != dev->type)
1901 return -EINVAL;
1902 }
1903 else
1904 {
1905 if ((r.arp_flags & ATF_PUBL) &&
1906 ((cmd == SIOCSARP) || (cmd == OLD_SIOCSARP))) {
1907 if ((dev = dev_getbytype(r.arp_ha.sa_family)) == NULL)
1908 return -ENODEV;
1909 }
1910 }
1911
1912 switch(cmd)
1913 {
1914 case SIOCDARP:
1915 return arp_req_delete(&r, dev);
1916 case SIOCSARP:
1917 return arp_req_set(&r, dev);
1918 case OLD_SIOCDARP:
1919
1920
1921
1922 r.arp_flags &= ~ATF_PUBL;
1923 err = arp_req_delete(&r, dev);
1924 r.arp_flags |= ATF_PUBL;
1925 if (!err)
1926 arp_req_delete(&r, dev);
1927 else
1928 err = arp_req_delete(&r, dev);
1929 return err;
1930 case OLD_SIOCSARP:
1931 err = arp_req_set(&r, dev);
1932
1933
1934
1935
1936
1937 if (r.arp_flags & ATF_PUBL)
1938 {
1939 r.arp_flags &= ~ATF_PUBL;
1940 arp_req_delete(&r, dev);
1941 }
1942 return err;
1943 case SIOCGARP:
1944 err = verify_area(VERIFY_WRITE, arg, sizeof(struct arpreq));
1945 if (err)
1946 return err;
1947 err = arp_req_get(&r, dev);
1948 if (!err)
1949 memcpy_tofs(arg, &r, sizeof(r));
1950 return err;
1951 case OLD_SIOCGARP:
1952 err = verify_area(VERIFY_WRITE, arg, sizeof(struct arpreq_old));
1953 if (err)
1954 return err;
1955 r.arp_flags &= ~ATF_PUBL;
1956 err = arp_req_get(&r, dev);
1957 if (err < 0)
1958 {
1959 r.arp_flags |= ATF_PUBL;
1960 err = arp_req_get(&r, dev);
1961 }
1962 if (!err)
1963 memcpy_tofs(arg, &r, sizeof(struct arpreq_old));
1964 return err;
1965 }
1966
1967 return 0;
1968 }
1969
1970
1971
1972
1973
1974
1975 static struct packet_type arp_packet_type =
1976 {
1977 0,
1978 NULL,
1979 arp_rcv,
1980 NULL,
1981 NULL
1982 };
1983
1984 static struct notifier_block arp_dev_notifier={
1985 arp_device_event,
1986 NULL,
1987 0
1988 };
1989
1990 void arp_init (void)
1991 {
1992
1993 arp_packet_type.type=htons(ETH_P_ARP);
1994 dev_add_pack(&arp_packet_type);
1995
1996 add_timer(&arp_timer);
1997
1998 register_netdevice_notifier(&arp_dev_notifier);
1999
2000 proc_net_register(&(struct proc_dir_entry) {
2001 PROC_NET_ARP, 3, "arp",
2002 S_IFREG | S_IRUGO, 1, 0, 0,
2003 0, &proc_net_inode_operations,
2004 arp_get_info
2005 });
2006 }
2007