This source file includes following definitions.
- arp_fast_lock
- arp_unlock
- arp_enqueue
- arp_dequeue
- arp_purge_send_q
- arp_free_entry
- arp_count_hhs
- arp_update_hhs
- arp_invalidate_hhs
- arp_set_hh
- arp_alloc_hh
- empty
- arpd_send
- arpd_update
- arpd_lookup
- arpd_flush
- arpd_callback
- arpd_update
- arp_force_expire
- arp_check_expire
- arp_expire_request
- arp_alloc_entry
- arp_device_event
- arp_send_q
- arp_update
- arp_lookup
- arp_query
- arp_set_predefined
- arp_new_entry
- arp_find
- arp_bind_cache
- arp_run_bh
- arp_send
- arp_rcv
- arp_req_set
- arp_req_get
- arp_req_delete
- arp_ioctl
- arp_get_info
- arp_init
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70 #include <linux/types.h>
71 #include <linux/string.h>
72 #include <linux/kernel.h>
73 #include <linux/sched.h>
74 #include <linux/config.h>
75 #include <linux/socket.h>
76 #include <linux/sockios.h>
77 #include <linux/errno.h>
78 #include <linux/in.h>
79 #include <linux/mm.h>
80 #include <linux/inet.h>
81 #include <linux/netdevice.h>
82 #include <linux/etherdevice.h>
83 #include <linux/if_arp.h>
84 #include <linux/trdevice.h>
85 #include <linux/skbuff.h>
86 #include <linux/proc_fs.h>
87 #include <linux/stat.h>
88
89 #include <net/ip.h>
90 #include <net/icmp.h>
91 #include <net/route.h>
92 #include <net/protocol.h>
93 #include <net/tcp.h>
94 #include <net/sock.h>
95 #include <net/arp.h>
96 #ifdef CONFIG_AX25
97 #include <net/ax25.h>
98 #ifdef CONFIG_NETROM
99 #include <net/netrom.h>
100 #endif
101 #endif
102 #ifdef CONFIG_NET_ALIAS
103 #include <linux/net_alias.h>
104 #endif
105 #ifdef CONFIG_ARPD
106 #include <net/netlink.h>
107 #endif
108
109 #include <asm/system.h>
110 #include <asm/segment.h>
111
112 #include <stdarg.h>
113
114
115
116
117
118
119
120
121
122
123
124 #ifndef CONFIG_ARPD
125 #define ARP_TIMEOUT (600*HZ)
126 #else
127 #define ARP_TIMEOUT (60*HZ)
128 #define ARPD_TIMEOUT (600*HZ)
129 #endif
130
131
132
133
134
135
136 #define ARP_CHECK_INTERVAL (60*HZ)
137
138
139
140
141
142
143
144
145 #if RT_CACHE_DEBUG >= 2
146 #define ARP_MAXSIZE 4
147 #else
148 #ifdef CONFIG_ARPD
149 #define ARP_MAXSIZE 64
150 #else
151 #define ARP_MAXSIZE 256
152 #endif
153 #endif
154
155
156
157
158
159
160
161
162
163 #define ARP_RES_TIME (5*HZ)
164
165
166
167
168
169
170 #define ARP_MAX_TRIES 3
171
172
173
174
175
176
177
178
179
180
181
182
183
184 #define ARP_CONFIRM_INTERVAL (300*HZ)
185
186
187
188
189
190 #define ARP_CONFIRM_TIMEOUT ARP_RES_TIME
191
192
193
194
195
196
197
198
199 #define ARP_MAX_PINGS 1
200
201
202
203
204
205
206
207
208
209
210
211
212 #define ARP_DEAD_RES_TIME (60*HZ)
213
214
215
216
217
218 struct arp_table
219 {
220 struct arp_table *next;
221 unsigned long last_used;
222 unsigned long last_updated;
223 unsigned int flags;
224 u32 ip;
225 u32 mask;
226 unsigned char ha[MAX_ADDR_LEN];
227 struct device *dev;
228 struct hh_cache *hh;
229
230
231
232
233
234 struct timer_list timer;
235 int retries;
236 struct sk_buff_head skb;
237 };
238
239
240 static atomic_t arp_size = 0;
241
242 #ifdef CONFIG_ARPD
243 static int arpd_not_running;
244 static int arpd_stamp;
245 #endif
246
247 static unsigned int arp_bh_mask;
248
249 #define ARP_BH_BACKLOG 1
250
251
252
253
254 static struct arp_table *arp_backlog;
255
256
257
258
259 static struct arp_table *arp_req_backlog;
260
261
262 static void arp_run_bh(void);
263 static void arp_check_expire (unsigned long);
264 static int arp_update (u32 sip, char *sha, struct device * dev,
265 struct arp_table *ientry, int grat);
266
267 static struct timer_list arp_timer =
268 { NULL, NULL, ARP_CHECK_INTERVAL, 0L, &arp_check_expire };
269
270
271
272
273
274
275 #define DEF_ARP_NETMASK (~0)
276
277
278
279
280
281 #define ARP_TABLE_SIZE 16
282 #define FULL_ARP_TABLE_SIZE (ARP_TABLE_SIZE+1)
283
284 struct arp_table *arp_tables[FULL_ARP_TABLE_SIZE] =
285 {
286 NULL,
287 };
288
289 #define arp_proxy_list arp_tables[ARP_TABLE_SIZE]
290
291
292
293
294
295
296 #define HASH(paddr) (htonl(paddr) & (ARP_TABLE_SIZE - 1))
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328 static atomic_t arp_lock;
329
330 #define ARP_LOCKED() (arp_lock != 1)
331
332 static __inline__ void arp_fast_lock(void)
333 {
334 atomic_inc(&arp_lock);
335 }
336
337 static __inline__ void arp_unlock(void)
338 {
339 if (atomic_dec_and_test(&arp_lock) && arp_bh_mask)
340 arp_run_bh();
341 }
342
343
344
345
346
347 static void arp_enqueue(struct arp_table **q, struct arp_table *entry)
348 {
349 unsigned long flags;
350 struct arp_table * tail;
351
352 save_flags(flags);
353 cli();
354 tail = *q;
355 if (!tail)
356 entry->next = entry;
357 else
358 {
359 entry->next = tail->next;
360 tail->next = entry;
361 }
362 *q = entry;
363 restore_flags(flags);
364 return;
365 }
366
367
368
369
370
371
372 static struct arp_table * arp_dequeue(struct arp_table **q)
373 {
374 struct arp_table * entry;
375
376 if (*q)
377 {
378 entry = (*q)->next;
379 (*q)->next = entry->next;
380 if (entry->next == entry)
381 *q = NULL;
382 entry->next = NULL;
383 return entry;
384 }
385 return NULL;
386 }
387
388
389
390
391
392 static void arp_purge_send_q(struct arp_table *entry)
393 {
394 struct sk_buff *skb;
395 unsigned long flags;
396
397 save_flags(flags);
398 cli();
399
400 while ((skb = skb_dequeue(&entry->skb)) != NULL)
401 {
402 skb_device_lock(skb);
403 restore_flags(flags);
404 dev_kfree_skb(skb, FREE_WRITE);
405 cli();
406 }
407 restore_flags(flags);
408 return;
409 }
410
411
412
413
414
415
416
417 static void arp_free_entry(struct arp_table *entry)
418 {
419 unsigned long flags;
420 struct hh_cache *hh, *next;
421
422 del_timer(&entry->timer);
423 arp_purge_send_q(entry);
424
425 save_flags(flags);
426 cli();
427 hh = entry->hh;
428 entry->hh = NULL;
429 restore_flags(flags);
430
431 for ( ; hh; hh = next)
432 {
433 next = hh->hh_next;
434 hh->hh_uptodate = 0;
435 hh->hh_next = NULL;
436 hh->hh_arp = NULL;
437 if (atomic_dec_and_test(&hh->hh_refcnt))
438 kfree_s(hh, sizeof(struct(struct hh_cache)));
439 }
440
441 kfree_s(entry, sizeof(struct arp_table));
442 atomic_dec(&arp_size);
443 return;
444 }
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461 static __inline__ int arp_count_hhs(struct arp_table * entry)
462 {
463 struct hh_cache *hh;
464 int count = 0;
465
466 for (hh = entry->hh; hh; hh = hh->hh_next)
467 count += hh->hh_refcnt-1;
468
469 return count;
470 }
471
472
473
474
475
476 static __inline__ void arp_update_hhs(struct arp_table * entry)
477 {
478 struct hh_cache *hh;
479
480 for (hh=entry->hh; hh; hh=hh->hh_next)
481 entry->dev->header_cache_update(hh, entry->dev, entry->ha);
482 }
483
484
485
486
487
488 static __inline__ void arp_invalidate_hhs(struct arp_table * entry)
489 {
490 struct hh_cache *hh;
491
492 for (hh=entry->hh; hh; hh=hh->hh_next)
493 hh->hh_uptodate = 0;
494 }
495
496
497
498
499
500
501 static int arp_set_hh(struct hh_cache **hhp, struct hh_cache *hh)
502 {
503 unsigned long flags;
504 struct hh_cache *hh1;
505 struct arp_table *entry;
506
507 atomic_inc(&hh->hh_refcnt);
508
509 save_flags(flags);
510 cli();
511 if ((hh1 = *hhp) == NULL)
512 {
513 *hhp = hh;
514 restore_flags(flags);
515 return 0;
516 }
517
518 entry = (struct arp_table*)hh->hh_arp;
519
520
521
522
523
524 if (!hh1->hh_arp && entry)
525 {
526 atomic_inc(&hh1->hh_refcnt);
527 hh1->hh_next = entry->hh;
528 entry->hh = hh1;
529 hh1->hh_arp = (void*)entry;
530 restore_flags(flags);
531
532 if (entry->flags & ATF_COM)
533 entry->dev->header_cache_update(hh1, entry->dev, entry->ha);
534 #if RT_CACHE_DEBUG >= 1
535 printk("arp_set_hh: %08x is reattached. Good!\n", entry->ip);
536 #endif
537 }
538 #if RT_CACHE_DEBUG >= 1
539 else if (entry)
540 printk("arp_set_hh: %08x rr1 ok!\n", entry->ip);
541 #endif
542 restore_flags(flags);
543 if (atomic_dec_and_test(&hh->hh_refcnt))
544 kfree_s(hh, sizeof(struct hh_cache));
545 return 1;
546 }
547
548 static __inline__ struct hh_cache * arp_alloc_hh(int htype)
549 {
550 struct hh_cache *hh;
551 hh = kmalloc(sizeof(struct hh_cache), GFP_ATOMIC);
552 if (hh)
553 {
554 memset(hh, 0, sizeof(struct hh_cache));
555 hh->hh_type = htype;
556 }
557 return hh;
558 }
559
560
561
562
563
564 static __inline__ int empty(unsigned char * addr, int len)
565 {
566 while (len > 0)
567 {
568 if (*addr)
569 return 0;
570 len--;
571 addr++;
572 }
573 return 1;
574 }
575
576
577 #ifdef CONFIG_ARPD
578
579
580
581
582 static void arpd_send(int req, u32 addr, struct device * dev, char *ha,
583 unsigned long updated)
584 {
585 int retval;
586 struct sk_buff *skb;
587 struct arpd_request *arpreq;
588
589 if (arpd_not_running)
590 return;
591
592 skb = alloc_skb(sizeof(struct arpd_request), GFP_ATOMIC);
593 if (skb == NULL)
594 return;
595
596 skb->free=1;
597 arpreq=(struct arpd_request *)skb_put(skb, sizeof(struct arpd_request));
598 arpreq->req = req;
599 arpreq->ip = addr;
600 arpreq->dev = (unsigned long)dev;
601 arpreq->stamp = arpd_stamp;
602 arpreq->updated = updated;
603 if (ha)
604 memcpy(arpreq->ha, ha, sizeof(arpreq->ha));
605
606 retval = netlink_post(NETLINK_ARPD, skb);
607 if (retval)
608 {
609 kfree_skb(skb, FREE_WRITE);
610 if (retval == -EUNATCH)
611 arpd_not_running = 1;
612 }
613 }
614
615
616
617
618
619 static __inline__ void arpd_update(struct arp_table * entry)
620 {
621 if (arpd_not_running)
622 return;
623 arpd_send(ARPD_UPDATE, entry->ip, entry->dev, entry->ha,
624 entry->last_updated);
625 }
626
627
628
629
630
631 static __inline__ void arpd_lookup(u32 addr, struct device * dev)
632 {
633 if (arpd_not_running)
634 return;
635 arpd_send(ARPD_LOOKUP, addr, dev, NULL, 0);
636 }
637
638
639
640
641
642 static __inline__ void arpd_flush(struct device * dev)
643 {
644 if (arpd_not_running)
645 return;
646 arpd_send(ARPD_FLUSH, 0, dev, NULL, 0);
647 }
648
649
650 static int arpd_callback(struct sk_buff *skb)
651 {
652 struct device * dev;
653 struct arpd_request *retreq;
654
655 arpd_not_running = 0;
656
657 if (skb->len != sizeof(struct arpd_request))
658 {
659 kfree_skb(skb, FREE_READ);
660 return -EINVAL;
661 }
662
663 retreq = (struct arpd_request *)skb->data;
664 dev = (struct device*)retreq->dev;
665
666 if (retreq->stamp != arpd_stamp || !dev)
667 {
668 kfree_skb(skb, FREE_READ);
669 return -EINVAL;
670 }
671
672 if (!retreq->updated || empty(retreq->ha, sizeof(retreq->ha)))
673 {
674
675
676
677 arp_send(ARPOP_REQUEST, ETH_P_ARP, retreq->ip, dev, dev->pa_addr, NULL,
678 dev->dev_addr, NULL);
679 }
680 else
681 {
682 arp_fast_lock();
683 arp_update(retreq->ip, retreq->ha, dev, NULL, 0);
684 arp_unlock();
685
686
687
688
689
690
691 if (jiffies - retreq->updated < ARPD_TIMEOUT)
692 arp_send(ARPOP_REQUEST, ETH_P_ARP, retreq->ip, dev, dev->pa_addr, NULL,
693 dev->dev_addr, NULL);
694 }
695
696 kfree_skb(skb, FREE_READ);
697 return sizeof(struct arpd_request);
698 }
699
700 #else
701
702 static __inline__ void arpd_update(struct arp_table * entry)
703 {
704 return;
705 }
706
707 #endif
708
709
710
711
712
713
714
715
716
717
718
719
720
721 static int arp_force_expire(void)
722 {
723 int i;
724 struct arp_table *entry, **pentry;
725 struct arp_table **oldest_entry = NULL;
726 unsigned long oldest_used = ~0;
727 unsigned long flags;
728 unsigned long now = jiffies;
729 int result = 0;
730
731 static last_index;
732
733 if (ARP_LOCKED())
734 return 0;
735
736 save_flags(flags);
737
738 if (last_index >= ARP_TABLE_SIZE)
739 last_index = 0;
740
741 for (i = 0; i < ARP_TABLE_SIZE; i++, last_index++)
742 {
743 pentry = &arp_tables[last_index & (ARP_TABLE_SIZE-1)];
744
745 while ((entry = *pentry) != NULL)
746 {
747 if (!(entry->flags & ATF_PERM))
748 {
749 int users;
750 cli();
751 users = arp_count_hhs(entry);
752
753 if (!users && now - entry->last_used > ARP_TIMEOUT)
754 {
755 *pentry = entry->next;
756 restore_flags(flags);
757 #if RT_CACHE_DEBUG >= 2
758 printk("arp_force_expire: %08x expired\n", entry->ip);
759 #endif
760 arp_free_entry(entry);
761 result++;
762 if (arp_size < ARP_MAXSIZE)
763 goto done;
764 continue;
765 }
766 restore_flags(flags);
767 if (!users && entry->last_used < oldest_used)
768 {
769 oldest_entry = pentry;
770 oldest_used = entry->last_used;
771 }
772 }
773 pentry = &entry->next;
774 }
775 }
776
777 done:
778 if (result || !oldest_entry)
779 return result;
780
781 entry = *oldest_entry;
782 *oldest_entry = entry->next;
783 #if RT_CACHE_DEBUG >= 2
784 printk("arp_force_expire: expiring %08x\n", entry->ip);
785 #endif
786 arp_free_entry(entry);
787 return 1;
788 }
789
790
791
792
793
794
795
796
797
798
799 static void arp_check_expire(unsigned long dummy)
800 {
801 int i;
802 unsigned long now = jiffies;
803
804 del_timer(&arp_timer);
805
806 #ifdef CONFIG_ARPD
807 arpd_not_running = 0;
808 #endif
809
810 ip_rt_check_expire();
811
812 arp_fast_lock();
813
814 if (!ARP_LOCKED())
815 {
816
817 for (i = 0; i < ARP_TABLE_SIZE; i++)
818 {
819 struct arp_table *entry, **pentry;
820
821 pentry = &arp_tables[i];
822
823 while ((entry = *pentry) != NULL)
824 {
825 if (entry->flags & ATF_PERM)
826 {
827 pentry = &entry->next;
828 continue;
829 }
830
831 cli();
832 if (now - entry->last_used > ARP_TIMEOUT
833 && !arp_count_hhs(entry))
834 {
835 *pentry = entry->next;
836 sti();
837 #if RT_CACHE_DEBUG >= 2
838 printk("arp_expire: %08x expired\n", entry->ip);
839 #endif
840 arp_free_entry(entry);
841 continue;
842 }
843 sti();
844 if (entry->last_updated
845 && now - entry->last_updated > ARP_CONFIRM_INTERVAL
846 && !(entry->flags & ATF_PERM))
847 {
848 struct device * dev = entry->dev;
849 entry->retries = ARP_MAX_TRIES+ARP_MAX_PINGS;
850 del_timer(&entry->timer);
851 entry->timer.expires = jiffies + ARP_CONFIRM_TIMEOUT;
852 add_timer(&entry->timer);
853 arp_send(ARPOP_REQUEST, ETH_P_ARP, entry->ip,
854 dev, dev->pa_addr, entry->ha,
855 dev->dev_addr, NULL);
856 #if RT_CACHE_DEBUG >= 2
857 printk("arp_expire: %08x requires confirmation\n", entry->ip);
858 #endif
859 }
860 pentry = &entry->next;
861 }
862 }
863 }
864
865 arp_unlock();
866
867
868
869
870
871 arp_timer.expires = jiffies + ARP_CHECK_INTERVAL;
872 add_timer(&arp_timer);
873 }
874
875
876
877
878
879
880
881 static void arp_expire_request (unsigned long arg)
882 {
883 struct arp_table *entry = (struct arp_table *) arg;
884 struct arp_table **pentry;
885 unsigned long hash;
886 unsigned long flags;
887
888 arp_fast_lock();
889
890 save_flags(flags);
891 cli();
892 del_timer(&entry->timer);
893
894
895
896
897 if (ARP_LOCKED())
898 {
899 #if RT_CACHE_DEBUG >= 1
900 printk(KERN_DEBUG "arp_expire_request: %08x deferred\n", entry->ip);
901 #endif
902 entry->timer.expires = jiffies + HZ/10;
903 add_timer(&entry->timer);
904 restore_flags(flags);
905 arp_unlock();
906 return;
907 }
908
909
910
911
912
913
914
915
916
917
918
919
920 if ((entry->flags & ATF_COM) && entry->last_updated
921 && jiffies - entry->last_updated <= ARP_CONFIRM_INTERVAL)
922 {
923 restore_flags(flags);
924 arp_unlock();
925 return;
926 }
927
928 restore_flags(flags);
929
930 if (entry->last_updated && --entry->retries > 0)
931 {
932 struct device *dev = entry->dev;
933
934 #if RT_CACHE_DEBUG >= 2
935 printk("arp_expire_request: %08x timed out\n", entry->ip);
936 #endif
937
938 entry->timer.expires = jiffies + ARP_RES_TIME;
939 add_timer(&entry->timer);
940 arp_send(ARPOP_REQUEST, ETH_P_ARP, entry->ip, dev, dev->pa_addr,
941 entry->retries > ARP_MAX_TRIES ? entry->ha : NULL,
942 dev->dev_addr, NULL);
943 arp_unlock();
944 return;
945 }
946
947
948
949
950
951 arp_purge_send_q(entry);
952
953 cli();
954 if (arp_count_hhs(entry))
955 {
956
957
958
959
960
961
962
963
964 struct device *dev = entry->dev;
965 #if RT_CACHE_DEBUG >= 2
966 printk("arp_expire_request: %08x is dead\n", entry->ip);
967 #endif
968 entry->retries = ARP_MAX_TRIES;
969 entry->flags &= ~ATF_COM;
970 arp_invalidate_hhs(entry);
971 restore_flags(flags);
972
973
974
975
976 entry->last_updated = 0;
977 arpd_update(entry);
978
979 entry->timer.expires = jiffies + ARP_DEAD_RES_TIME;
980 add_timer(&entry->timer);
981 arp_send(ARPOP_REQUEST, ETH_P_ARP, entry->ip, dev, dev->pa_addr,
982 NULL, dev->dev_addr, NULL);
983 arp_unlock();
984 return;
985 }
986 restore_flags(flags);
987
988 entry->last_updated = 0;
989 arpd_update(entry);
990
991 hash = HASH(entry->ip);
992
993 pentry = &arp_tables[hash];
994
995 while (*pentry != NULL)
996 {
997 if (*pentry != entry)
998 {
999 pentry = &(*pentry)->next;
1000 continue;
1001 }
1002 *pentry = entry->next;
1003 #if RT_CACHE_DEBUG >= 2
1004 printk("arp_expire_request: %08x is killed\n", entry->ip);
1005 #endif
1006 arp_free_entry(entry);
1007 }
1008 arp_unlock();
1009 }
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019 static struct arp_table * arp_alloc_entry(void)
1020 {
1021 struct arp_table * entry;
1022
1023
1024 if (arp_size >= ARP_MAXSIZE)
1025 arp_force_expire();
1026
1027 entry = (struct arp_table *)
1028 kmalloc(sizeof(struct arp_table),GFP_ATOMIC);
1029
1030 if (entry != NULL)
1031 {
1032 atomic_inc(&arp_size);
1033 memset(entry, 0, sizeof(struct arp_table));
1034
1035 entry->mask = DEF_ARP_NETMASK;
1036 init_timer(&entry->timer);
1037 entry->timer.function = arp_expire_request;
1038 entry->timer.data = (unsigned long)entry;
1039 entry->last_updated = entry->last_used = jiffies;
1040 skb_queue_head_init(&entry->skb);
1041 }
1042 return entry;
1043 }
1044
1045
1046
1047
1048
1049
1050
1051 int arp_device_event(struct notifier_block *this, unsigned long event, void *ptr)
1052 {
1053 struct device *dev=ptr;
1054 int i;
1055
1056 if (event != NETDEV_DOWN)
1057 return NOTIFY_DONE;
1058
1059 #ifdef CONFIG_ARPD
1060 arpd_flush(dev);
1061 arpd_stamp++;
1062 #endif
1063
1064 arp_fast_lock();
1065 #if RT_CACHE_DEBUG >= 1
1066 if (ARP_LOCKED())
1067 printk("arp_device_event: impossible\n");
1068 #endif
1069
1070 for (i = 0; i < FULL_ARP_TABLE_SIZE; i++)
1071 {
1072 struct arp_table *entry;
1073 struct arp_table **pentry = &arp_tables[i];
1074
1075 while ((entry = *pentry) != NULL)
1076 {
1077 if (entry->dev == dev)
1078 {
1079 *pentry = entry->next;
1080 arp_free_entry(entry);
1081 }
1082 else
1083 pentry = &entry->next;
1084 }
1085 }
1086 arp_unlock();
1087 return NOTIFY_DONE;
1088 }
1089
1090
1091
1092
1093
1094
1095
1096 static void arp_send_q(struct arp_table *entry)
1097 {
1098 struct sk_buff *skb;
1099
1100 unsigned long flags;
1101
1102
1103
1104
1105
1106 if(!(entry->flags&ATF_COM))
1107 {
1108 printk("arp_send_q: incomplete entry for %s\n",
1109 in_ntoa(entry->ip));
1110
1111
1112
1113
1114 return;
1115 }
1116
1117 save_flags(flags);
1118
1119 cli();
1120 while((skb = skb_dequeue(&entry->skb)) != NULL)
1121 {
1122 IS_SKB(skb);
1123 skb_device_lock(skb);
1124 restore_flags(flags);
1125 if(!skb->dev->rebuild_header(skb->data,skb->dev,skb->raddr,skb))
1126 {
1127 skb->arp = 1;
1128 if(skb->sk==NULL)
1129 dev_queue_xmit(skb, skb->dev, 0);
1130 else
1131 dev_queue_xmit(skb,skb->dev,skb->sk->priority);
1132 }
1133 }
1134 restore_flags(flags);
1135 }
1136
1137
1138 static int
1139 arp_update (u32 sip, char *sha, struct device * dev,
1140 struct arp_table *ientry, int grat)
1141 {
1142 struct arp_table * entry;
1143 unsigned long hash;
1144
1145 hash = HASH(sip);
1146
1147 for (entry=arp_tables[hash]; entry; entry = entry->next)
1148 if (entry->ip == sip && entry->dev == dev)
1149 break;
1150
1151 if (entry)
1152 {
1153
1154
1155
1156 if (!(entry->flags & ATF_PERM))
1157 {
1158 del_timer(&entry->timer);
1159 entry->last_updated = jiffies;
1160 if (memcmp(entry->ha, sha, dev->addr_len)!=0)
1161 {
1162 memcpy(entry->ha, sha, dev->addr_len);
1163 if (entry->flags & ATF_COM)
1164 arp_update_hhs(entry);
1165 }
1166 arpd_update(entry);
1167 }
1168
1169 if (!(entry->flags & ATF_COM))
1170 {
1171
1172
1173
1174
1175 entry->flags |= ATF_COM;
1176 arp_update_hhs(entry);
1177
1178
1179
1180
1181
1182 arp_send_q(entry);
1183 }
1184 return 1;
1185 }
1186
1187
1188
1189
1190 entry = ientry;
1191
1192 if (grat && !entry)
1193 return 0;
1194
1195 if (!entry)
1196 {
1197 entry = arp_alloc_entry();
1198 if (!entry)
1199 return 0;
1200
1201 entry->ip = sip;
1202 entry->flags = ATF_COM;
1203 memcpy(entry->ha, sha, dev->addr_len);
1204 entry->dev = dev;
1205 }
1206
1207 entry->last_updated = entry->last_used = jiffies;
1208 arpd_update(entry);
1209
1210 if (!ARP_LOCKED())
1211 {
1212 entry->next = arp_tables[hash];
1213 arp_tables[hash] = entry;
1214 return 0;
1215 }
1216 #if RT_CACHE_DEBUG >= 2
1217 printk("arp_update: %08x backlogged\n", entry->ip);
1218 #endif
1219 arp_enqueue(&arp_backlog, entry);
1220 arp_bh_mask |= ARP_BH_BACKLOG;
1221 return 0;
1222 }
1223
1224
1225
1226 static __inline__ struct arp_table *arp_lookup(u32 paddr, struct device * dev)
1227 {
1228 struct arp_table *entry;
1229
1230 for (entry = arp_tables[HASH(paddr)]; entry != NULL; entry = entry->next)
1231 if (entry->ip == paddr && (!dev || entry->dev == dev))
1232 return entry;
1233 return NULL;
1234 }
1235
1236
1237
1238
1239
1240 int arp_query(unsigned char *haddr, u32 paddr, struct device * dev)
1241 {
1242 struct arp_table *entry;
1243
1244 arp_fast_lock();
1245
1246 entry = arp_lookup(paddr, dev);
1247
1248 if (entry != NULL)
1249 {
1250 entry->last_used = jiffies;
1251 if (entry->flags & ATF_COM)
1252 {
1253 memcpy(haddr, entry->ha, dev->addr_len);
1254 arp_unlock();
1255 return 1;
1256 }
1257 }
1258 arp_unlock();
1259 return 0;
1260 }
1261
1262
1263 static int arp_set_predefined(int addr_hint, unsigned char * haddr, u32 paddr, struct device * dev)
1264 {
1265 switch (addr_hint)
1266 {
1267 case IS_MYADDR:
1268 printk(KERN_DEBUG "ARP: arp called for own IP address\n");
1269 memcpy(haddr, dev->dev_addr, dev->addr_len);
1270 return 1;
1271 #ifdef CONFIG_IP_MULTICAST
1272 case IS_MULTICAST:
1273 if(dev->type==ARPHRD_ETHER || dev->type==ARPHRD_IEEE802)
1274 {
1275 u32 taddr;
1276 haddr[0]=0x01;
1277 haddr[1]=0x00;
1278 haddr[2]=0x5e;
1279 taddr=ntohl(paddr);
1280 haddr[5]=taddr&0xff;
1281 taddr=taddr>>8;
1282 haddr[4]=taddr&0xff;
1283 taddr=taddr>>8;
1284 haddr[3]=taddr&0x7f;
1285 return 1;
1286 }
1287
1288
1289
1290 #endif
1291
1292 case IS_BROADCAST:
1293 memcpy(haddr, dev->broadcast, dev->addr_len);
1294 return 1;
1295 }
1296 return 0;
1297 }
1298
1299
1300
1301
1302
1303 struct arp_table * arp_new_entry(u32 paddr, struct device *dev, struct hh_cache *hh, struct sk_buff *skb)
1304 {
1305 struct arp_table *entry;
1306
1307 entry = arp_alloc_entry();
1308
1309 if (entry != NULL)
1310 {
1311 entry->ip = paddr;
1312 entry->dev = dev;
1313 if (hh)
1314 {
1315 entry->hh = hh;
1316 atomic_inc(&hh->hh_refcnt);
1317 hh->hh_arp = (void*)entry;
1318 }
1319 entry->timer.expires = jiffies + ARP_RES_TIME;
1320
1321 if (skb != NULL)
1322 {
1323 skb_queue_tail(&entry->skb, skb);
1324 skb_device_unlock(skb);
1325 }
1326
1327 if (!ARP_LOCKED())
1328 {
1329 unsigned long hash = HASH(paddr);
1330 entry->next = arp_tables[hash];
1331 arp_tables[hash] = entry;
1332 add_timer(&entry->timer);
1333 entry->retries = ARP_MAX_TRIES;
1334 #ifdef CONFIG_ARPD
1335 if (!arpd_not_running)
1336 arpd_lookup(paddr, dev);
1337 else
1338 #endif
1339 arp_send(ARPOP_REQUEST, ETH_P_ARP, paddr, dev, dev->pa_addr, NULL,
1340 dev->dev_addr, NULL);
1341 }
1342 else
1343 {
1344 #if RT_CACHE_DEBUG >= 2
1345 printk("arp_new_entry: %08x backlogged\n", entry->ip);
1346 #endif
1347 arp_enqueue(&arp_req_backlog, entry);
1348 arp_bh_mask |= ARP_BH_BACKLOG;
1349 }
1350 }
1351 return entry;
1352 }
1353
1354
1355
1356
1357
1358
1359 int arp_find(unsigned char *haddr, u32 paddr, struct device *dev,
1360 u32 saddr, struct sk_buff *skb)
1361 {
1362 struct arp_table *entry;
1363 unsigned long hash;
1364
1365 if (arp_set_predefined(ip_chk_addr(paddr), haddr, paddr, dev))
1366 {
1367 if (skb)
1368 skb->arp = 1;
1369 return 0;
1370 }
1371
1372 hash = HASH(paddr);
1373 arp_fast_lock();
1374
1375
1376
1377
1378 entry = arp_lookup(paddr, dev);
1379
1380 if (entry != NULL)
1381 {
1382 if (entry->flags & ATF_COM)
1383 {
1384 entry->last_used = jiffies;
1385 memcpy(haddr, entry->ha, dev->addr_len);
1386 if (skb)
1387 skb->arp = 1;
1388 arp_unlock();
1389 return 0;
1390 }
1391
1392
1393
1394
1395
1396
1397 if (skb != NULL)
1398 {
1399 if (entry->last_updated)
1400 {
1401 skb_queue_tail(&entry->skb, skb);
1402 skb_device_unlock(skb);
1403 }
1404
1405
1406
1407
1408 else
1409 {
1410 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0, dev);
1411 dev_kfree_skb(skb, FREE_WRITE);
1412 }
1413 }
1414 arp_unlock();
1415 return 1;
1416 }
1417
1418 entry = arp_new_entry(paddr, dev, NULL, skb);
1419
1420 if (skb != NULL && !entry)
1421 dev_kfree_skb(skb, FREE_WRITE);
1422
1423 arp_unlock();
1424 return 1;
1425 }
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443 int arp_bind_cache(struct hh_cache ** hhp, struct device *dev, unsigned short htype, u32 paddr)
1444 {
1445 struct arp_table *entry;
1446 struct hh_cache *hh;
1447 int addr_hint;
1448 unsigned long flags;
1449
1450 save_flags(flags);
1451
1452 if ((addr_hint = ip_chk_addr(paddr)) != 0)
1453 {
1454 unsigned char haddr[MAX_ADDR_LEN];
1455 if (*hhp)
1456 return 1;
1457 hh = arp_alloc_hh(htype);
1458 if (!hh)
1459 return 1;
1460 arp_set_predefined(addr_hint, haddr, paddr, dev);
1461 dev->header_cache_update(hh, dev, haddr);
1462 return arp_set_hh(hhp, hh);
1463 }
1464
1465 arp_fast_lock();
1466
1467 entry = arp_lookup(paddr, dev);
1468
1469 if (entry)
1470 {
1471 for (hh = entry->hh; hh; hh=hh->hh_next)
1472 if (hh->hh_type == htype)
1473 break;
1474
1475 if (hh)
1476 {
1477 arp_set_hh(hhp, hh);
1478 arp_unlock();
1479 return 1;
1480 }
1481 }
1482
1483 hh = arp_alloc_hh(htype);
1484 if (!hh)
1485 {
1486 arp_unlock();
1487 return 1;
1488 }
1489
1490 if (entry)
1491 {
1492
1493 cli();
1494 hh->hh_arp = (void*)entry;
1495 hh->hh_next = entry->hh;
1496 entry->hh = hh;
1497 atomic_inc(&hh->hh_refcnt);
1498 restore_flags(flags);
1499
1500 if (entry->flags & ATF_COM)
1501 dev->header_cache_update(hh, dev, entry->ha);
1502
1503 if (arp_set_hh(hhp, hh))
1504 {
1505 arp_unlock();
1506 return 0;
1507 }
1508
1509 entry->last_used = jiffies;
1510 arp_unlock();
1511 return 0;
1512 }
1513
1514 entry = arp_new_entry(paddr, dev, hh, NULL);
1515 if (entry == NULL)
1516 {
1517 kfree_s(hh, sizeof(struct hh_cache));
1518 arp_unlock();
1519 return 1;
1520 }
1521
1522 if (!arp_set_hh(hhp, hh))
1523 {
1524 arp_unlock();
1525 return 0;
1526 }
1527 arp_unlock();
1528 return 1;
1529 }
1530
1531 static void arp_run_bh()
1532 {
1533 unsigned long flags;
1534 struct arp_table *entry, *entry1;
1535 struct device * dev;
1536 unsigned long hash;
1537 struct hh_cache *hh;
1538 u32 sip;
1539
1540 save_flags(flags);
1541 cli();
1542 arp_fast_lock();
1543
1544 while (arp_bh_mask)
1545 {
1546 arp_bh_mask &= ~ARP_BH_BACKLOG;
1547
1548 while ((entry = arp_dequeue(&arp_backlog)) != NULL)
1549 {
1550 restore_flags(flags);
1551 if (arp_update(entry->ip, entry->ha, entry->dev, entry, 0))
1552 arp_free_entry(entry);
1553 cli();
1554 }
1555
1556 cli();
1557 while ((entry = arp_dequeue(&arp_req_backlog)) != NULL)
1558 {
1559 restore_flags(flags);
1560
1561 dev = entry->dev;
1562 sip = entry->ip;
1563 hash = HASH(sip);
1564
1565 for (entry1 = arp_tables[hash]; entry1; entry1 = entry1->next)
1566 if (entry1->ip == sip && entry1->dev == dev)
1567 break;
1568
1569 if (!entry1)
1570 {
1571 cli();
1572 entry->next = arp_tables[hash];
1573 arp_tables[hash] = entry;
1574 restore_flags(flags);
1575 entry->timer.expires = jiffies + ARP_RES_TIME;
1576 entry->retries = ARP_MAX_TRIES;
1577 entry->last_used = jiffies;
1578 if (!(entry->flags & ATF_COM))
1579 {
1580 add_timer(&entry->timer);
1581 #ifdef CONFIG_ARPD
1582 if (!arpd_not_running)
1583 arpd_lookup(sip, dev);
1584 else
1585 #endif
1586 arp_send(ARPOP_REQUEST, ETH_P_ARP, sip, dev, dev->pa_addr, NULL, dev->dev_addr, NULL);
1587 }
1588 #if RT_CACHE_DEBUG >= 1
1589 printk(KERN_DEBUG "arp_run_bh: %08x reinstalled\n", sip);
1590 #endif
1591 }
1592 else
1593 {
1594 struct sk_buff * skb;
1595 struct hh_cache * next;
1596
1597
1598
1599
1600 cli();
1601 for (hh=entry->hh; hh; hh=next)
1602 {
1603 next = hh->hh_next;
1604 hh->hh_next = entry1->hh;
1605 entry1->hh = hh;
1606 hh->hh_arp = (void*)entry1;
1607 }
1608 entry->hh = NULL;
1609
1610
1611
1612
1613 while ((skb = skb_dequeue(&entry->skb)) != NULL)
1614 {
1615 skb_device_lock(skb);
1616 restore_flags(flags);
1617 skb_queue_tail(&entry1->skb, skb);
1618 skb_device_unlock(skb);
1619 cli();
1620 }
1621 restore_flags(flags);
1622
1623 arp_free_entry(entry);
1624
1625 if (entry1->flags & ATF_COM)
1626 {
1627 arp_update_hhs(entry1);
1628 arp_send_q(entry1);
1629 }
1630 }
1631 cli();
1632 }
1633 cli();
1634 }
1635 arp_unlock();
1636 restore_flags(flags);
1637 }
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649 void arp_send(int type, int ptype, u32 dest_ip,
1650 struct device *dev, u32 src_ip,
1651 unsigned char *dest_hw, unsigned char *src_hw,
1652 unsigned char *target_hw)
1653 {
1654 struct sk_buff *skb;
1655 struct arphdr *arp;
1656 unsigned char *arp_ptr;
1657
1658
1659
1660
1661
1662 if (dev->flags&IFF_NOARP)
1663 return;
1664
1665
1666
1667
1668
1669 skb = alloc_skb(sizeof(struct arphdr)+ 2*(dev->addr_len+4)
1670 + dev->hard_header_len, GFP_ATOMIC);
1671 if (skb == NULL)
1672 {
1673 printk("ARP: no memory to send an arp packet\n");
1674 return;
1675 }
1676 skb_reserve(skb, dev->hard_header_len);
1677 arp = (struct arphdr *) skb_put(skb,sizeof(struct arphdr) + 2*(dev->addr_len+4));
1678 skb->arp = 1;
1679 skb->dev = dev;
1680 skb->free = 1;
1681 skb->protocol = htons (ETH_P_IP);
1682
1683
1684
1685
1686
1687 dev->hard_header(skb,dev,ptype,dest_hw?dest_hw:dev->broadcast,src_hw?src_hw:NULL,skb->len);
1688
1689
1690 arp->ar_hrd = htons(dev->type);
1691 #ifdef CONFIG_AX25
1692 #ifdef CONFIG_NETROM
1693 arp->ar_pro = (dev->type == ARPHRD_AX25 || dev->type == ARPHRD_NETROM) ? htons(AX25_P_IP) : htons(ETH_P_IP);
1694 #else
1695 arp->ar_pro = (dev->type != ARPHRD_AX25) ? htons(ETH_P_IP) : htons(AX25_P_IP);
1696 #endif
1697 #else
1698 arp->ar_pro = htons(ETH_P_IP);
1699 #endif
1700 arp->ar_hln = dev->addr_len;
1701 arp->ar_pln = 4;
1702 arp->ar_op = htons(type);
1703
1704 arp_ptr=(unsigned char *)(arp+1);
1705
1706 memcpy(arp_ptr, src_hw, dev->addr_len);
1707 arp_ptr+=dev->addr_len;
1708 memcpy(arp_ptr, &src_ip,4);
1709 arp_ptr+=4;
1710 if (target_hw != NULL)
1711 memcpy(arp_ptr, target_hw, dev->addr_len);
1712 else
1713 memset(arp_ptr, 0, dev->addr_len);
1714 arp_ptr+=dev->addr_len;
1715 memcpy(arp_ptr, &dest_ip, 4);
1716
1717 dev_queue_xmit(skb, dev, 0);
1718 }
1719
1720
1721
1722
1723
1724
1725 int arp_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *pt)
1726 {
1727
1728
1729
1730
1731 struct arphdr *arp = (struct arphdr *)skb->h.raw;
1732 unsigned char *arp_ptr= (unsigned char *)(arp+1);
1733 unsigned char *sha,*tha;
1734 u32 sip,tip;
1735
1736
1737
1738
1739
1740
1741
1742
1743 if (arp->ar_hln != dev->addr_len ||
1744 dev->type != ntohs(arp->ar_hrd) ||
1745 dev->flags & IFF_NOARP ||
1746 arp->ar_pln != 4)
1747 {
1748 kfree_skb(skb, FREE_READ);
1749 return 0;
1750
1751
1752 }
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762 switch (dev->type)
1763 {
1764 #ifdef CONFIG_AX25
1765 case ARPHRD_AX25:
1766 if(arp->ar_pro != htons(AX25_P_IP))
1767 {
1768 kfree_skb(skb, FREE_READ);
1769 return 0;
1770 }
1771 break;
1772 #endif
1773 #ifdef CONFIG_NETROM
1774 case ARPHRD_NETROM:
1775 if(arp->ar_pro != htons(AX25_P_IP))
1776 {
1777 kfree_skb(skb, FREE_READ);
1778 return 0;
1779 }
1780 break;
1781 #endif
1782 case ARPHRD_ETHER:
1783 case ARPHRD_ARCNET:
1784 case ARPHRD_METRICOM:
1785 if(arp->ar_pro != htons(ETH_P_IP))
1786 {
1787 kfree_skb(skb, FREE_READ);
1788 return 0;
1789 }
1790 break;
1791
1792 case ARPHRD_IEEE802:
1793 if(arp->ar_pro != htons(ETH_P_IP))
1794 {
1795 kfree_skb(skb, FREE_READ);
1796 return 0;
1797 }
1798 break;
1799
1800 default:
1801 printk("ARP: dev->type mangled!\n");
1802 kfree_skb(skb, FREE_READ);
1803 return 0;
1804 }
1805
1806
1807
1808
1809
1810 sha=arp_ptr;
1811 arp_ptr += dev->addr_len;
1812 memcpy(&sip, arp_ptr, 4);
1813 arp_ptr += 4;
1814 tha=arp_ptr;
1815 arp_ptr += dev->addr_len;
1816 memcpy(&tip, arp_ptr, 4);
1817
1818
1819
1820
1821
1822 if (LOOPBACK(tip) || MULTICAST(tip))
1823 {
1824 kfree_skb(skb, FREE_READ);
1825 return 0;
1826 }
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849 #ifdef CONFIG_NET_ALIAS
1850 if (tip != dev->pa_addr && net_alias_has(skb->dev))
1851 {
1852
1853
1854
1855 dev = net_alias_dev_rcv_sel32(dev, AF_INET, sip, tip);
1856
1857 if (dev->type != ntohs(arp->ar_hrd) || dev->flags & IFF_NOARP)
1858 {
1859 kfree_skb(skb, FREE_READ);
1860 return 0;
1861 }
1862 }
1863 #endif
1864
1865 if (arp->ar_op == htons(ARPOP_REQUEST))
1866 {
1867
1868
1869
1870
1871 if (tip != dev->pa_addr)
1872 {
1873 struct arp_table *proxy_entry;
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883 arp_fast_lock();
1884
1885 for (proxy_entry = arp_proxy_list; proxy_entry;
1886 proxy_entry = proxy_entry->next)
1887 {
1888 if (proxy_entry->dev == dev &&
1889 !((proxy_entry->ip^tip)&proxy_entry->mask))
1890 break;
1891 }
1892
1893 if (proxy_entry && (proxy_entry->mask || ((dev->pa_addr^tip)&dev->pa_mask)))
1894 {
1895 char ha[MAX_ADDR_LEN];
1896 struct rtable * rt;
1897
1898
1899
1900
1901
1902
1903 memcpy(ha, proxy_entry->ha, dev->addr_len);
1904 arp_unlock();
1905
1906 rt = ip_rt_route(tip, 0);
1907 if (rt && rt->rt_dev != dev)
1908 arp_send(ARPOP_REPLY,ETH_P_ARP,sip,dev,tip,sha,ha,sha);
1909 ip_rt_put(rt);
1910
1911 }
1912 else
1913 arp_unlock();
1914 }
1915 else
1916 arp_send(ARPOP_REPLY,ETH_P_ARP,sip,dev,tip,sha,dev->dev_addr,sha);
1917
1918
1919
1920
1921 arp_fast_lock();
1922 arp_update(sip, sha, dev, NULL, 1);
1923 arp_unlock();
1924 kfree_skb(skb, FREE_READ);
1925 return 0;
1926 }
1927
1928 arp_fast_lock();
1929 arp_update(sip, sha, dev, NULL, ip_chk_addr(tip) != IS_MYADDR);
1930 arp_unlock();
1931 kfree_skb(skb, FREE_READ);
1932 return 0;
1933 }
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945 static int arp_req_set(struct arpreq *r, struct device * dev)
1946 {
1947 struct arp_table *entry, **entryp;
1948 struct sockaddr_in *si;
1949 unsigned char *ha;
1950 u32 ip;
1951 u32 mask = DEF_ARP_NETMASK;
1952 unsigned long flags;
1953
1954
1955
1956
1957
1958 if (r->arp_flags&ATF_NETMASK)
1959 {
1960 si = (struct sockaddr_in *) &r->arp_netmask;
1961 mask = si->sin_addr.s_addr;
1962 }
1963
1964
1965
1966
1967
1968 si = (struct sockaddr_in *) &r->arp_pa;
1969 ip = si->sin_addr.s_addr;
1970
1971
1972 if (r->arp_flags&ATF_PUBL)
1973 {
1974 if (!mask && ip)
1975 return -EINVAL;
1976 if (!dev)
1977 dev = dev_getbytype(r->arp_ha.sa_family);
1978 }
1979 else
1980 {
1981 if (ip_chk_addr(ip))
1982 return -EINVAL;
1983 if (!dev)
1984 {
1985 struct rtable * rt;
1986 rt = ip_rt_route(ip, 0);
1987 if (!rt)
1988 return -ENETUNREACH;
1989 dev = rt->rt_dev;
1990 ip_rt_put(rt);
1991 }
1992 }
1993 if (!dev || (dev->flags&(IFF_LOOPBACK|IFF_NOARP)))
1994 return -ENODEV;
1995
1996 if (r->arp_ha.sa_family != dev->type)
1997 return -EINVAL;
1998
1999 arp_fast_lock();
2000 #if RT_CACHE_DEBUG >= 1
2001 if (ARP_LOCKED())
2002 printk("arp_req_set: bug\n");
2003 #endif
2004
2005 if (!(r->arp_flags & ATF_PUBL))
2006 entryp = &arp_tables[HASH(ip)];
2007 else
2008 entryp = &arp_proxy_list;
2009
2010 while ((entry = *entryp) != NULL)
2011 {
2012 if (entry->ip == ip && entry->mask == mask && entry->dev == dev)
2013 break;
2014 if ((entry->mask & mask) != mask)
2015 {
2016 entry = NULL;
2017 break;
2018 }
2019 entryp = &entry->next;
2020 }
2021
2022
2023
2024
2025
2026 if (entry == NULL)
2027 {
2028 entry = arp_alloc_entry();
2029 if (entry == NULL)
2030 {
2031 arp_unlock();
2032 return -ENOMEM;
2033 }
2034 entry->ip = ip;
2035 entry->dev = dev;
2036 entry->mask = mask;
2037 entry->flags = r->arp_flags;
2038
2039 entry->next = (*entryp)->next;
2040 *entryp = entry;
2041 }
2042
2043 ha = r->arp_ha.sa_data;
2044 if (empty(ha, dev->addr_len))
2045 ha = dev->dev_addr;
2046
2047 save_flags(flags);
2048 cli();
2049 memcpy(entry->ha, ha, dev->addr_len);
2050 entry->last_updated = entry->last_used = jiffies;
2051 entry->flags |= ATF_COM;
2052 restore_flags(flags);
2053 arpd_update(entry);
2054 arp_update_hhs(entry);
2055 arp_unlock();
2056 return 0;
2057 }
2058
2059
2060
2061
2062
2063
2064
2065 static int arp_req_get(struct arpreq *r, struct device *dev)
2066 {
2067 struct arp_table *entry;
2068 struct sockaddr_in *si;
2069 u32 mask = DEF_ARP_NETMASK;
2070
2071 if (r->arp_flags&ATF_NETMASK)
2072 {
2073 si = (struct sockaddr_in *) &r->arp_netmask;
2074 mask = si->sin_addr.s_addr;
2075 }
2076
2077 si = (struct sockaddr_in *) &r->arp_pa;
2078
2079 arp_fast_lock();
2080 #if RT_CACHE_DEBUG >= 1
2081 if (ARP_LOCKED())
2082 printk("arp_req_set: impossible\n");
2083 #endif
2084
2085 if (!(r->arp_flags & ATF_PUBL))
2086 entry = arp_tables[HASH(si->sin_addr.s_addr)];
2087 else
2088 entry = arp_proxy_list;
2089
2090 for ( ; entry ;entry = entry->next)
2091 {
2092 if (entry->ip == si->sin_addr.s_addr
2093 && (!dev || entry->dev == dev)
2094 && (!(r->arp_flags&ATF_NETMASK) || entry->mask == mask))
2095 {
2096 memcpy(r->arp_ha.sa_data, entry->ha, entry->dev->addr_len);
2097 r->arp_ha.sa_family = entry->dev->type;
2098 r->arp_flags = entry->flags;
2099 strncpy(r->arp_dev, entry->dev->name, sizeof(r->arp_dev));
2100 arp_unlock();
2101 return 0;
2102 }
2103 }
2104
2105 arp_unlock();
2106 return -ENXIO;
2107 }
2108
2109 static int arp_req_delete(struct arpreq *r, struct device * dev)
2110 {
2111 struct sockaddr_in *si;
2112 struct arp_table *entry, **entryp;
2113 int retval = -ENXIO;
2114 u32 mask = DEF_ARP_NETMASK;
2115
2116 if (r->arp_flags&ATF_NETMASK)
2117 {
2118 si = (struct sockaddr_in *) &r->arp_netmask;
2119 mask = si->sin_addr.s_addr;
2120 }
2121
2122 si = (struct sockaddr_in *) &r->arp_pa;
2123
2124 arp_fast_lock();
2125 #if RT_CACHE_DEBUG >= 1
2126 if (ARP_LOCKED())
2127 printk("arp_req_delete: impossible\n");
2128 #endif
2129
2130 if (!(r->arp_flags & ATF_PUBL))
2131 entryp = &arp_tables[HASH(si->sin_addr.s_addr)];
2132 else
2133 entryp = &arp_proxy_list;
2134
2135 while ((entry = *entryp) != NULL)
2136 {
2137 if (entry->ip == si->sin_addr.s_addr
2138 && (!dev || entry->dev == dev)
2139 && (!(r->arp_flags&ATF_NETMASK) || entry->mask == mask))
2140 {
2141 *entryp = entry->next;
2142 arp_free_entry(entry);
2143 retval = 0;
2144 continue;
2145 }
2146 entryp = &entry->next;
2147 }
2148
2149 arp_unlock();
2150 return retval;
2151 }
2152
2153
2154
2155
2156
2157 int arp_ioctl(unsigned int cmd, void *arg)
2158 {
2159 int err;
2160 struct arpreq r;
2161
2162 struct device * dev = NULL;
2163
2164 switch(cmd)
2165 {
2166 case SIOCDARP:
2167 case SIOCSARP:
2168 if (!suser())
2169 return -EPERM;
2170 case SIOCGARP:
2171 err = verify_area(VERIFY_READ, arg, sizeof(struct arpreq));
2172 if (err)
2173 return err;
2174 memcpy_fromfs(&r, arg, sizeof(struct arpreq));
2175 break;
2176 case OLD_SIOCDARP:
2177 case OLD_SIOCSARP:
2178 if (!suser())
2179 return -EPERM;
2180 case OLD_SIOCGARP:
2181 err = verify_area(VERIFY_READ, arg, sizeof(struct arpreq_old));
2182 if (err)
2183 return err;
2184 memcpy_fromfs(&r, arg, sizeof(struct arpreq_old));
2185 memset(&r.arp_dev, 0, sizeof(r.arp_dev));
2186 break;
2187 default:
2188 return -EINVAL;
2189 }
2190
2191 if (r.arp_pa.sa_family != AF_INET)
2192 return -EPFNOSUPPORT;
2193
2194 if (!(r.arp_flags & ATF_PUBL))
2195 r.arp_flags &= ~ATF_NETMASK;
2196 if (!(r.arp_flags & ATF_NETMASK))
2197 ((struct sockaddr_in *)&r.arp_netmask)->sin_addr.s_addr=DEF_ARP_NETMASK;
2198
2199 if (r.arp_dev[0])
2200 {
2201 if ((dev = dev_get(r.arp_dev)) == NULL)
2202 return -ENODEV;
2203
2204 if (!r.arp_ha.sa_family)
2205 r.arp_ha.sa_family = dev->type;
2206 else if (r.arp_ha.sa_family != dev->type)
2207 return -EINVAL;
2208 }
2209
2210 switch(cmd)
2211 {
2212 case SIOCDARP:
2213 return arp_req_delete(&r, dev);
2214 case SIOCSARP:
2215 return arp_req_set(&r, dev);
2216 case OLD_SIOCDARP:
2217
2218
2219
2220 r.arp_flags &= ~ATF_PUBL;
2221 err = arp_req_delete(&r, dev);
2222 r.arp_flags |= ATF_PUBL;
2223 if (!err)
2224 arp_req_delete(&r, dev);
2225 else
2226 err = arp_req_delete(&r, dev);
2227 return err;
2228 case OLD_SIOCSARP:
2229 err = arp_req_set(&r, dev);
2230
2231
2232
2233
2234
2235 if (r.arp_flags & ATF_PUBL)
2236 {
2237 r.arp_flags &= ~ATF_PUBL;
2238 arp_req_delete(&r, dev);
2239 }
2240 return err;
2241 case SIOCGARP:
2242 err = verify_area(VERIFY_WRITE, arg, sizeof(struct arpreq));
2243 if (err)
2244 return err;
2245 err = arp_req_get(&r, dev);
2246 if (!err)
2247 memcpy_tofs(arg, &r, sizeof(r));
2248 return err;
2249 case OLD_SIOCGARP:
2250 err = verify_area(VERIFY_WRITE, arg, sizeof(struct arpreq_old));
2251 if (err)
2252 return err;
2253 r.arp_flags &= ~ATF_PUBL;
2254 err = arp_req_get(&r, dev);
2255 if (err < 0)
2256 {
2257 r.arp_flags |= ATF_PUBL;
2258 err = arp_req_get(&r, dev);
2259 }
2260 if (!err)
2261 memcpy_tofs(arg, &r, sizeof(struct arpreq_old));
2262 return err;
2263 }
2264
2265 return 0;
2266 }
2267
2268
2269
2270
2271
2272 #define HBUFFERLEN 30
2273
2274 int arp_get_info(char *buffer, char **start, off_t offset, int length, int dummy)
2275 {
2276 int len=0;
2277 off_t pos=0;
2278 int size;
2279 struct arp_table *entry;
2280 char hbuffer[HBUFFERLEN];
2281 int i,j,k;
2282 const char hexbuf[] = "0123456789ABCDEF";
2283
2284 size = sprintf(buffer,"IP address HW type Flags HW address Mask Device\n");
2285
2286 pos+=size;
2287 len+=size;
2288
2289 arp_fast_lock();
2290
2291 for(i=0; i<FULL_ARP_TABLE_SIZE; i++)
2292 {
2293 for(entry=arp_tables[i]; entry!=NULL; entry=entry->next)
2294 {
2295
2296
2297
2298 #ifdef CONFIG_AX25
2299 #ifdef CONFIG_NETROM
2300 if (entry->dev->type == ARPHRD_AX25 || entry->dev->type == ARPHRD_NETROM)
2301 strcpy(hbuffer,ax2asc((ax25_address *)entry->ha));
2302 else {
2303 #else
2304 if(entry->dev->type==ARPHRD_AX25)
2305 strcpy(hbuffer,ax2asc((ax25_address *)entry->ha));
2306 else {
2307 #endif
2308 #endif
2309
2310 for(k=0,j=0;k<HBUFFERLEN-3 && j<entry->dev->addr_len;j++)
2311 {
2312 hbuffer[k++]=hexbuf[ (entry->ha[j]>>4)&15 ];
2313 hbuffer[k++]=hexbuf[ entry->ha[j]&15 ];
2314 hbuffer[k++]=':';
2315 }
2316 hbuffer[--k]=0;
2317
2318 #ifdef CONFIG_AX25
2319 }
2320 #endif
2321 size = sprintf(buffer+len,
2322 "%-17s0x%-10x0x%-10x%s",
2323 in_ntoa(entry->ip),
2324 (unsigned int)entry->dev->type,
2325 entry->flags,
2326 hbuffer);
2327 #if RT_CACHE_DEBUG < 2
2328 size += sprintf(buffer+len+size,
2329 " %-17s %s\n",
2330 entry->mask==DEF_ARP_NETMASK ?
2331 "*" : in_ntoa(entry->mask), entry->dev->name);
2332 #else
2333 size += sprintf(buffer+len+size,
2334 " %-17s %s\t%d\t%1d\n",
2335 entry->mask==DEF_ARP_NETMASK ?
2336 "*" : in_ntoa(entry->mask), entry->dev->name,
2337 entry->hh ? entry->hh->hh_refcnt : -1,
2338 entry->hh ? entry->hh->hh_uptodate : 0);
2339 #endif
2340
2341 len += size;
2342 pos += size;
2343
2344 if (pos <= offset)
2345 len=0;
2346 if (pos >= offset+length)
2347 goto done;
2348 }
2349 }
2350 done:
2351 arp_unlock();
2352
2353 *start = buffer+len-(pos-offset);
2354 len = pos-offset;
2355 if (len>length)
2356 len = length;
2357 return len;
2358 }
2359
2360
2361
2362
2363
2364
2365
2366 static struct packet_type arp_packet_type =
2367 {
2368 0,
2369 NULL,
2370 arp_rcv,
2371 NULL,
2372 NULL
2373 };
2374
2375 static struct notifier_block arp_dev_notifier={
2376 arp_device_event,
2377 NULL,
2378 0
2379 };
2380
2381 void arp_init (void)
2382 {
2383
2384 arp_packet_type.type=htons(ETH_P_ARP);
2385 dev_add_pack(&arp_packet_type);
2386
2387 add_timer(&arp_timer);
2388
2389 register_netdevice_notifier(&arp_dev_notifier);
2390
2391 #ifdef CONFIG_PROC_FS
2392 proc_net_register(&(struct proc_dir_entry) {
2393 PROC_NET_ARP, 3, "arp",
2394 S_IFREG | S_IRUGO, 1, 0, 0,
2395 0, &proc_net_inode_operations,
2396 arp_get_info
2397 });
2398 #endif
2399
2400 #ifdef CONFIG_ARPD
2401 netlink_attach(NETLINK_ARPD, arpd_callback);
2402 #endif
2403 }