This source file includes following definitions.
- arp_fast_lock
- arp_fast_unlock
- arp_unlock
- arp_enqueue
- arp_dequeue
- arp_release_entry
- arp_free_entry
- arp_count_hhs
- arp_force_expire
- arpd_update
- arp_add_entry
- arpd_lookup
- arp_invalidate_hhs
- arp_update_hhs
- arp_check_expire
- arp_expire_request
- arp_device_event
- arp_send
- arp_send_q
- arp_destroy
- arp_rcv
- arp_lookup
- arp_query
- arp_set_predefined
- arp_find
- arp_get_info
- arp_bind_cache
- arp_run_bh
- empty
- arp_req_set
- arp_req_get
- arp_req_delete
- arp_ioctl
- arp_init
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70 #include <linux/types.h>
71 #include <linux/string.h>
72 #include <linux/kernel.h>
73 #include <linux/sched.h>
74 #include <linux/config.h>
75 #include <linux/socket.h>
76 #include <linux/sockios.h>
77 #include <linux/errno.h>
78 #include <linux/in.h>
79 #include <linux/mm.h>
80 #include <linux/inet.h>
81 #include <linux/netdevice.h>
82 #include <linux/etherdevice.h>
83 #include <linux/if_arp.h>
84 #include <linux/trdevice.h>
85 #include <linux/skbuff.h>
86 #include <linux/proc_fs.h>
87 #include <linux/stat.h>
88
89 #include <net/ip.h>
90 #include <net/icmp.h>
91 #include <net/route.h>
92 #include <net/protocol.h>
93 #include <net/tcp.h>
94 #include <net/sock.h>
95 #include <net/arp.h>
96 #ifdef CONFIG_AX25
97 #include <net/ax25.h>
98 #ifdef CONFIG_NETROM
99 #include <net/netrom.h>
100 #endif
101 #endif
102 #ifdef CONFIG_NET_ALIAS
103 #include <linux/net_alias.h>
104 #endif
105 #ifdef CONFIG_ARPD
106 #include <linux/kerneld.h>
107 #endif
108
109 #include <asm/system.h>
110 #include <asm/segment.h>
111
112 #include <stdarg.h>
113
114
115
116
117
118
119
120
121 struct arp_table
122 {
123 struct arp_table *next;
124 unsigned long last_used;
125 unsigned long last_updated;
126 unsigned int flags;
127 u32 ip;
128 u32 mask;
129 unsigned char ha[MAX_ADDR_LEN];
130 struct device *dev;
131
132
133
134
135
136 struct timer_list timer;
137 int retries;
138 struct sk_buff_head skb;
139 struct hh_cache *hh;
140 };
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156 #define ARP_RES_TIME (5*HZ)
157 #define ARP_DEAD_RES_TIME (60*HZ)
158
159
160
161
162
163
164 #define ARP_MAX_TRIES 3
165
166
167
168
169
170 #define ARP_TIMEOUT (600*HZ)
171
172
173
174
175
176
177
178 #define ARP_CHECK_INTERVAL (60*HZ)
179
180
181
182
183
184
185
186 #define ARP_CONFIRM_INTERVAL (300*HZ)
187 #define ARP_CONFIRM_TIMEOUT ARP_RES_TIME
188
189 static unsigned int arp_lock;
190 static unsigned int arp_bh_mask;
191
192 #define ARP_BH_BACKLOG 1
193
194 static struct arp_table *arp_backlog;
195
196
197
198 #ifdef CONFIG_ARPD
199 #define ARP_MAXSIZE 256
200 #endif
201
202 static unsigned int arp_size = 0;
203
204 static void arp_run_bh(void);
205 static void arp_check_expire (unsigned long);
206
207 static struct timer_list arp_timer =
208 { NULL, NULL, ARP_CHECK_INTERVAL, 0L, &arp_check_expire };
209
210
211
212
213
214
215 #define DEF_ARP_NETMASK (~0)
216
217
218
219
220
221
222
223 #define ARP_TABLE_SIZE 16
224 #define FULL_ARP_TABLE_SIZE (ARP_TABLE_SIZE+1)
225
226 struct arp_table *arp_tables[FULL_ARP_TABLE_SIZE] =
227 {
228 NULL,
229 };
230
231 #define arp_proxy_list arp_tables[ARP_TABLE_SIZE]
232
233
234
235
236
237
238 #define HASH(paddr) (htonl(paddr) & (ARP_TABLE_SIZE - 1))
239
240
241
242
243
244 static __inline__ void arp_fast_lock(void)
245 {
246 ATOMIC_INCR(&arp_lock);
247 }
248
249 static __inline__ void arp_fast_unlock(void)
250 {
251 ATOMIC_DECR(&arp_lock);
252 }
253
254 static __inline__ void arp_unlock(void)
255 {
256 if (!ATOMIC_DECR_AND_CHECK(&arp_lock) && arp_bh_mask)
257 arp_run_bh();
258 }
259
260
261
262
263
264 static void arp_enqueue(struct arp_table **q, struct arp_table *entry)
265 {
266 unsigned long flags;
267 struct arp_table * tail;
268
269 save_flags(flags);
270 cli();
271 tail = *q;
272 if (!tail)
273 entry->next = entry;
274 else
275 {
276 entry->next = tail->next;
277 tail->next = entry;
278 }
279 *q = entry;
280 restore_flags(flags);
281 return;
282 }
283
284
285
286
287
288
289 static struct arp_table * arp_dequeue(struct arp_table **q)
290 {
291 struct arp_table * entry;
292
293 if (*q)
294 {
295 entry = (*q)->next;
296 (*q)->next = entry->next;
297 if (entry->next == entry)
298 *q = NULL;
299 entry->next = NULL;
300 return entry;
301 }
302 return NULL;
303 }
304
305
306
307
308
309 static void arp_release_entry(struct arp_table *entry)
310 {
311 struct sk_buff *skb;
312 unsigned long flags;
313
314 save_flags(flags);
315 cli();
316
317 while ((skb = skb_dequeue(&entry->skb)) != NULL)
318 {
319 skb_device_lock(skb);
320 restore_flags(flags);
321 dev_kfree_skb(skb, FREE_WRITE);
322 cli();
323 }
324 restore_flags(flags);
325 return;
326 }
327
328
329
330
331
332
333 static void arp_free_entry(struct arp_table *entry)
334 {
335 unsigned long flags;
336 struct hh_cache *hh, *next;
337
338 del_timer(&entry->timer);
339
340 save_flags(flags);
341 cli();
342 arp_release_entry(entry);
343
344 for (hh = entry->hh; hh; hh = next)
345 {
346 next = hh->hh_next;
347 hh->hh_arp = NULL;
348 hh->hh_uptodate = 0;
349 if (!--hh->hh_refcnt)
350 kfree_s(hh, sizeof(struct(struct hh_cache)));
351 }
352 restore_flags(flags);
353
354 kfree_s(entry, sizeof(struct arp_table));
355 --arp_size;
356 return;
357 }
358
359
360
361
362
363 static __inline__ int arp_count_hhs(struct arp_table * entry)
364 {
365 struct hh_cache *hh, **hhp;
366 int count = 0;
367
368 hhp = &entry->hh;
369 while ((hh=*hhp) != NULL)
370 {
371 if (hh->hh_refcnt == 1)
372 {
373 *hhp = hh->hh_next;
374 kfree_s(hh, sizeof(struct hh_cache));
375 continue;
376 }
377 count += hh->hh_refcnt-1;
378 hhp = &hh->hh_next;
379 }
380
381 return count;
382 }
383
384
385
386
387
388
389
390
391
392
393
394 #ifdef CONFIG_ARPD
395 static int arp_force_expire(void)
396 {
397 int i;
398 struct arp_table *entry = NULL;
399 struct arp_table **pentry = NULL;
400 struct arp_table **oldest_entry = NULL, **last_resort = NULL;
401 unsigned long oldest_used = ~0;
402
403 #if RT_CACHE_DEBUG >= 2
404 printk("Looking for something to force expire.\n");
405 #endif
406 for (i = 0; i < ARP_TABLE_SIZE; i++)
407 {
408 pentry = &arp_tables[i];
409
410 while ((entry = *pentry) != NULL)
411 {
412 if (entry->last_used < oldest_used)
413 {
414 if (arp_count_hhs(entry) == 0)
415 {
416 oldest_entry = pentry;
417 }
418 last_resort = pentry;
419 oldest_used = entry->last_used;
420 }
421 pentry = &entry->next;
422 }
423 }
424 if (oldest_entry == NULL)
425 {
426 if (last_resort == NULL)
427 return -1;
428 oldest_entry = last_resort;
429 }
430
431 entry = *oldest_entry;
432 *oldest_entry = (*oldest_entry)->next;
433 #if RT_CACHE_DEBUG >= 2
434 printk("Force expiring %08x\n", entry->ip);
435 #endif
436 arp_free_entry(entry);
437 return 0;
438 }
439 #endif
440
441
442 static void arpd_update(struct arp_table * entry, int loc)
443 {
444 #ifdef CONFIG_ARPD
445 static struct arpd_request arpreq;
446
447 arpreq.req = ARPD_UPDATE;
448 arpreq.ip = entry->ip;
449 arpreq.mask = entry->mask;
450 memcpy (arpreq.ha, entry->ha, MAX_ADDR_LEN);
451 arpreq.loc = loc;
452 arpreq.last_used = entry->last_used;
453 arpreq.last_updated = entry->last_updated;
454 arpreq.flags = entry->flags;
455 arpreq.dev = entry->dev;
456
457 kerneld_send(KERNELD_ARP, 0, sizeof(arpreq),
458 (char *) &arpreq, NULL);
459 #endif
460 }
461
462
463
464
465
466
467
468
469 static struct arp_table * arp_add_entry(void)
470 {
471 struct arp_table * entry;
472
473 #ifdef CONFIG_ARPD
474 if (arp_size >= ARP_MAXSIZE)
475 {
476 if (arp_force_expire() < 0)
477 return NULL;
478 }
479 #endif
480
481 entry = (struct arp_table *)
482 kmalloc(sizeof(struct arp_table),GFP_ATOMIC);
483
484 if (entry != NULL)
485 ++arp_size;
486 return entry;
487 }
488
489
490
491
492
493
494 static struct arp_table * arpd_lookup(u32 addr, unsigned short flags,
495 struct device * dev,
496 int loc)
497 {
498 #ifdef CONFIG_ARPD
499 static struct arpd_request arpreq, retreq;
500 struct arp_table * entry;
501 int rv, i;
502
503 arpreq.req = ARPD_LOOKUP;
504 arpreq.ip = addr;
505 arpreq.loc = loc;
506
507 rv = kerneld_send(KERNELD_ARP,
508 sizeof(retreq) | KERNELD_WAIT,
509 sizeof(arpreq),
510 (char *) &arpreq,
511 (char *) &retreq);
512
513
514
515
516 if (rv != 0)
517 return NULL;
518 if (dev != retreq.dev)
519 return NULL;
520 if (! memcmp (retreq.ha, "\0\0\0\0\0\0", 6))
521 return NULL;
522
523 arp_fast_lock();
524 entry = arp_add_entry();
525 arp_unlock();
526
527 if (entry == NULL)
528 return NULL;
529
530 entry->next = NULL;
531 entry->last_used = retreq.last_used;
532 entry->last_updated = retreq.last_updated;
533 entry->flags = retreq.flags;
534 entry->ip = retreq.ip;
535 entry->mask = retreq.mask;
536 memcpy (entry->ha, retreq.ha, MAX_ADDR_LEN);
537 arpreq.dev = entry->dev;
538
539 skb_queue_head_init(&entry->skb);
540 entry->hh = NULL;
541 entry->retries = 0;
542
543 #if RT_CACHE_DEBUG >= 2
544 printk("Inserting arpd entry %08x\n in local cache.", entry->ip);
545 #endif
546 i = HASH(entry->ip);
547 arp_fast_lock();
548 entry->next = arp_tables[i]->next;
549 arp_tables[i]->next = entry;
550 arp_unlock();
551 return entry;
552 #endif
553 return NULL;
554 }
555
556
557
558
559
560
561 static __inline__ void arp_invalidate_hhs(struct arp_table * entry)
562 {
563 struct hh_cache *hh;
564
565 for (hh=entry->hh; hh; hh=hh->hh_next)
566 hh->hh_uptodate = 0;
567 }
568
569
570
571
572
573 static __inline__ void arp_update_hhs(struct arp_table * entry)
574 {
575 struct hh_cache *hh;
576
577 for (hh=entry->hh; hh; hh=hh->hh_next)
578 entry->dev->header_cache_update(hh, entry->dev, entry->ha);
579 }
580
581
582
583
584
585
586
587
588
589
590 static void arp_check_expire(unsigned long dummy)
591 {
592 int i;
593 unsigned long now = jiffies;
594
595 del_timer(&arp_timer);
596
597 if (!arp_lock)
598 {
599 arp_fast_lock();
600
601 for (i = 0; i < ARP_TABLE_SIZE; i++)
602 {
603 struct arp_table *entry;
604 struct arp_table **pentry;
605
606 pentry = &arp_tables[i];
607
608 while ((entry = *pentry) != NULL)
609 {
610 cli();
611 if (now - entry->last_used > ARP_TIMEOUT
612 && !(entry->flags & ATF_PERM)
613 && !arp_count_hhs(entry))
614 {
615 *pentry = entry->next;
616 sti();
617 #if RT_CACHE_DEBUG >= 2
618 printk("arp_expire: %08x expired\n", entry->ip);
619 #endif
620 arp_free_entry(entry);
621 }
622 else if (entry->last_updated
623 && now - entry->last_updated > ARP_CONFIRM_INTERVAL
624 && !(entry->flags & ATF_PERM))
625 {
626 struct device * dev = entry->dev;
627 pentry = &entry->next;
628 entry->flags &= ~ATF_COM;
629 arp_invalidate_hhs(entry);
630 sti();
631 entry->retries = ARP_MAX_TRIES+1;
632 del_timer(&entry->timer);
633 entry->timer.expires = jiffies + ARP_CONFIRM_TIMEOUT;
634 add_timer(&entry->timer);
635 arp_send(ARPOP_REQUEST, ETH_P_ARP, entry->ip,
636 dev, dev->pa_addr, entry->ha,
637 dev->dev_addr, NULL);
638 #if RT_CACHE_DEBUG >= 2
639 printk("arp_expire: %08x requires confirmation\n", entry->ip);
640 #endif
641 }
642 else
643 pentry = &entry->next;
644 }
645 }
646 arp_unlock();
647 }
648
649 ip_rt_check_expire();
650
651
652
653
654
655 arp_timer.expires = jiffies + ARP_CHECK_INTERVAL;
656 add_timer(&arp_timer);
657 }
658
659
660
661
662
663
664
665 static void arp_expire_request (unsigned long arg)
666 {
667 struct arp_table *entry = (struct arp_table *) arg;
668 struct arp_table **pentry;
669 unsigned long hash;
670 unsigned long flags;
671
672 save_flags(flags);
673 cli();
674
675
676
677
678
679
680
681 if (entry->flags & ATF_COM)
682 {
683 restore_flags(flags);
684 return;
685 }
686
687 if (arp_lock)
688 {
689 #if RT_CACHE_DEBUG >= 1
690 printk("arp_expire_request: %08x postponed\n", entry->ip);
691 #endif
692 del_timer(&entry->timer);
693 entry->timer.expires = jiffies + HZ/10;
694 add_timer(&entry->timer);
695 restore_flags(flags);
696 return;
697 }
698
699 arp_fast_lock();
700 restore_flags(flags);
701
702 if (entry->last_updated && --entry->retries > 0)
703 {
704 struct device *dev = entry->dev;
705
706 #if RT_CACHE_DEBUG >= 2
707 printk("arp_expire_request: %08x timed out\n", entry->ip);
708 #endif
709
710 del_timer(&entry->timer);
711 entry->timer.expires = jiffies + ARP_RES_TIME;
712 add_timer(&entry->timer);
713 arp_send(ARPOP_REQUEST, ETH_P_ARP, entry->ip, dev, dev->pa_addr,
714 NULL, dev->dev_addr, NULL);
715 arp_unlock();
716 return;
717 }
718
719 arp_release_entry(entry);
720
721 cli();
722 if (arp_count_hhs(entry))
723 {
724 struct device *dev = entry->dev;
725 #if RT_CACHE_DEBUG >= 2
726 printk("arp_expire_request: %08x is dead\n", entry->ip);
727 #endif
728 arp_release_entry(entry);
729 entry->retries = ARP_MAX_TRIES;
730 restore_flags(flags);
731 entry->last_updated = 0;
732 del_timer(&entry->timer);
733 entry->timer.expires = jiffies + ARP_DEAD_RES_TIME;
734 add_timer(&entry->timer);
735 arp_send(ARPOP_REQUEST, ETH_P_ARP, entry->ip, dev, dev->pa_addr,
736 NULL, dev->dev_addr, NULL);
737 arp_unlock();
738 return;
739 }
740 restore_flags(flags);
741
742 hash = HASH(entry->ip);
743
744 pentry = &arp_tables[hash];
745
746 while (*pentry != NULL)
747 {
748 if (*pentry == entry)
749 {
750 cli();
751 *pentry = entry->next;
752 restore_flags(flags);
753 #if RT_CACHE_DEBUG >= 2
754 printk("arp_expire_request: %08x is killed\n", entry->ip);
755 #endif
756 arp_free_entry(entry);
757 arp_unlock();
758 return;
759 }
760 pentry = &(*pentry)->next;
761 }
762 printk("arp_expire_request: bug: ARP entry is lost!\n");
763 arp_unlock();
764 }
765
766
767
768
769
770 int arp_device_event(struct notifier_block *this, unsigned long event, void *ptr)
771 {
772 struct device *dev=ptr;
773 int i;
774
775 if (event != NETDEV_DOWN)
776 return NOTIFY_DONE;
777
778
779
780
781 #if RT_CACHE_DEBUG >= 1
782 if (arp_lock)
783 printk("arp_device_event: bug\n");
784 #endif
785 arp_fast_lock();
786
787 for (i = 0; i < FULL_ARP_TABLE_SIZE; i++)
788 {
789 struct arp_table *entry;
790 struct arp_table **pentry = &arp_tables[i];
791
792 while ((entry = *pentry) != NULL)
793 {
794 if (entry->dev == dev)
795 {
796 *pentry = entry->next;
797 arp_free_entry(entry);
798 }
799 else
800 pentry = &entry->next;
801 }
802 }
803 arp_unlock();
804 return NOTIFY_DONE;
805 }
806
807
808
809
810
811
812
813 void arp_send(int type, int ptype, u32 dest_ip,
814 struct device *dev, u32 src_ip,
815 unsigned char *dest_hw, unsigned char *src_hw,
816 unsigned char *target_hw)
817 {
818 struct sk_buff *skb;
819 struct arphdr *arp;
820 unsigned char *arp_ptr;
821
822
823
824
825
826 if (dev->flags&IFF_NOARP)
827 return;
828
829
830
831
832
833 skb = alloc_skb(sizeof(struct arphdr)+ 2*(dev->addr_len+4)
834 + dev->hard_header_len, GFP_ATOMIC);
835 if (skb == NULL)
836 {
837 printk("ARP: no memory to send an arp packet\n");
838 return;
839 }
840 skb_reserve(skb, dev->hard_header_len);
841 arp = (struct arphdr *) skb_put(skb,sizeof(struct arphdr) + 2*(dev->addr_len+4));
842 skb->arp = 1;
843 skb->dev = dev;
844 skb->free = 1;
845 skb->protocol = htons (ETH_P_IP);
846
847
848
849
850
851 dev->hard_header(skb,dev,ptype,dest_hw?dest_hw:dev->broadcast,src_hw?src_hw:NULL,skb->len);
852
853
854 arp->ar_hrd = htons(dev->type);
855 #ifdef CONFIG_AX25
856 #ifdef CONFIG_NETROM
857 arp->ar_pro = (dev->type == ARPHRD_AX25 || dev->type == ARPHRD_NETROM) ? htons(AX25_P_IP) : htons(ETH_P_IP);
858 #else
859 arp->ar_pro = (dev->type != ARPHRD_AX25) ? htons(ETH_P_IP) : htons(AX25_P_IP);
860 #endif
861 #else
862 arp->ar_pro = htons(ETH_P_IP);
863 #endif
864 arp->ar_hln = dev->addr_len;
865 arp->ar_pln = 4;
866 arp->ar_op = htons(type);
867
868 arp_ptr=(unsigned char *)(arp+1);
869
870 memcpy(arp_ptr, src_hw, dev->addr_len);
871 arp_ptr+=dev->addr_len;
872 memcpy(arp_ptr, &src_ip,4);
873 arp_ptr+=4;
874 if (target_hw != NULL)
875 memcpy(arp_ptr, target_hw, dev->addr_len);
876 else
877 memset(arp_ptr, 0, dev->addr_len);
878 arp_ptr+=dev->addr_len;
879 memcpy(arp_ptr, &dest_ip, 4);
880
881 dev_queue_xmit(skb, dev, 0);
882 }
883
884
885
886
887
888 static void arp_send_q(struct arp_table *entry)
889 {
890 struct sk_buff *skb;
891
892 unsigned long flags;
893
894
895
896
897
898 if(!(entry->flags&ATF_COM))
899 {
900 printk("arp_send_q: incomplete entry for %s\n",
901 in_ntoa(entry->ip));
902
903
904
905
906 return;
907 }
908
909 save_flags(flags);
910
911 cli();
912 while((skb = skb_dequeue(&entry->skb)) != NULL)
913 {
914 IS_SKB(skb);
915 skb_device_lock(skb);
916 restore_flags(flags);
917 if(!skb->dev->rebuild_header(skb->data,skb->dev,skb->raddr,skb))
918 {
919 skb->arp = 1;
920 if(skb->sk==NULL)
921 dev_queue_xmit(skb, skb->dev, 0);
922 else
923 dev_queue_xmit(skb,skb->dev,skb->sk->priority);
924 }
925 }
926 restore_flags(flags);
927 }
928
929
930
931
932
933
934 static void arp_destroy(struct arp_table * entry)
935 {
936 struct arp_table *entry1;
937 struct arp_table **pentry;
938
939 if (entry->flags & ATF_PUBL)
940 pentry = &arp_proxy_list;
941 else
942 pentry = &arp_tables[HASH(entry->ip)];
943
944 while ((entry1 = *pentry) != NULL)
945 {
946 if (entry1 == entry)
947 {
948 *pentry = entry1->next;
949 del_timer(&entry->timer);
950 arp_free_entry(entry);
951 return;
952 }
953 pentry = &entry1->next;
954 }
955 }
956
957
958
959
960
961
962
963 int arp_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *pt)
964 {
965
966
967
968
969 struct arphdr *arp = (struct arphdr *)skb->h.raw;
970 unsigned char *arp_ptr= (unsigned char *)(arp+1);
971 struct arp_table *entry;
972 struct arp_table *proxy_entry;
973 unsigned long hash, grat=0;
974 unsigned char ha[MAX_ADDR_LEN];
975 unsigned char *sha,*tha;
976 u32 sip,tip;
977
978
979
980
981
982
983
984
985 if (arp->ar_hln != dev->addr_len ||
986 dev->type != ntohs(arp->ar_hrd) ||
987 dev->flags & IFF_NOARP ||
988 arp->ar_pln != 4)
989 {
990 kfree_skb(skb, FREE_READ);
991 return 0;
992
993
994 }
995
996
997
998
999
1000
1001
1002
1003
1004 switch (dev->type)
1005 {
1006 #ifdef CONFIG_AX25
1007 case ARPHRD_AX25:
1008 if(arp->ar_pro != htons(AX25_P_IP))
1009 {
1010 kfree_skb(skb, FREE_READ);
1011 return 0;
1012 }
1013 break;
1014 #endif
1015 #ifdef CONFIG_NETROM
1016 case ARPHRD_NETROM:
1017 if(arp->ar_pro != htons(AX25_P_IP))
1018 {
1019 kfree_skb(skb, FREE_READ);
1020 return 0;
1021 }
1022 break;
1023 #endif
1024 case ARPHRD_ETHER:
1025 case ARPHRD_ARCNET:
1026 if(arp->ar_pro != htons(ETH_P_IP))
1027 {
1028 kfree_skb(skb, FREE_READ);
1029 return 0;
1030 }
1031 break;
1032
1033 case ARPHRD_IEEE802:
1034 if(arp->ar_pro != htons(ETH_P_IP))
1035 {
1036 kfree_skb(skb, FREE_READ);
1037 return 0;
1038 }
1039 break;
1040
1041 default:
1042 printk("ARP: dev->type mangled!\n");
1043 kfree_skb(skb, FREE_READ);
1044 return 0;
1045 }
1046
1047
1048
1049
1050
1051 sha=arp_ptr;
1052 arp_ptr += dev->addr_len;
1053 memcpy(&sip, arp_ptr, 4);
1054 arp_ptr += 4;
1055 tha=arp_ptr;
1056 arp_ptr += dev->addr_len;
1057 memcpy(&tip, arp_ptr, 4);
1058
1059
1060
1061
1062
1063 if (LOOPBACK(tip) || MULTICAST(tip))
1064 {
1065 kfree_skb(skb, FREE_READ);
1066 return 0;
1067 }
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090 #ifdef CONFIG_NET_ALIAS
1091 if (tip != dev->pa_addr && net_alias_has(skb->dev))
1092 {
1093
1094
1095
1096 dev = net_alias_dev_rcv_sel32(dev, AF_INET, sip, tip);
1097
1098 if (dev->type != ntohs(arp->ar_hrd) || dev->flags & IFF_NOARP)
1099 {
1100 kfree_skb(skb, FREE_READ);
1101 return 0;
1102 }
1103 }
1104 #endif
1105
1106 if (arp->ar_op == htons(ARPOP_REQUEST))
1107 {
1108
1109
1110
1111 if (tip != dev->pa_addr)
1112 {
1113
1114
1115
1116
1117
1118 arp_fast_lock();
1119
1120 for (proxy_entry=arp_proxy_list;
1121 proxy_entry;
1122 proxy_entry = proxy_entry->next)
1123 {
1124
1125
1126
1127
1128
1129
1130
1131
1132 if (proxy_entry->dev == dev &&
1133 !((proxy_entry->ip^tip)&proxy_entry->mask))
1134 break;
1135
1136 }
1137 if (proxy_entry)
1138 {
1139 memcpy(ha, proxy_entry->ha, dev->addr_len);
1140 arp_unlock();
1141 arp_send(ARPOP_REPLY,ETH_P_ARP,sip,dev,tip,sha,ha, sha);
1142 kfree_skb(skb, FREE_READ);
1143 return 0;
1144 }
1145 else
1146 {
1147 arp_unlock();
1148 }
1149 }
1150 else
1151 {
1152
1153
1154
1155 arp_send(ARPOP_REPLY,ETH_P_ARP,sip,dev,tip,sha,dev->dev_addr,sha);
1156 }
1157 grat = 1;
1158 goto gratuitous;
1159 }
1160
1161
1162
1163 if(ip_chk_addr(tip)!=IS_MYADDR)
1164 {
1165
1166
1167
1168 kfree_skb(skb, FREE_READ);
1169 return 0;
1170 }
1171
1172
1173
1174
1175
1176
1177 gratuitous:
1178
1179 arp_fast_lock();
1180
1181
1182 hash = HASH(sip);
1183 for (entry=arp_tables[hash]; entry; entry=entry->next)
1184 if (entry->ip == sip && entry->dev == dev)
1185 break;
1186
1187 if (entry)
1188 {
1189
1190
1191
1192 if (!(entry->flags & ATF_PERM))
1193 {
1194 if(memcmp(entry->ha, sha,dev->addr_len)!=0)
1195 {
1196 memcpy(entry->ha, sha, dev->addr_len);
1197 if(entry->flags & ATF_COM)
1198 arp_update_hhs(entry);
1199 }
1200 entry->last_updated = jiffies;
1201 arpd_update(entry, __LINE__);
1202 }
1203 if (!(entry->flags & ATF_COM))
1204 {
1205
1206
1207
1208
1209 del_timer(&entry->timer);
1210 entry->flags |= ATF_COM;
1211 arp_update_hhs(entry);
1212
1213
1214
1215
1216
1217 arp_send_q(entry);
1218 }
1219 }
1220 else
1221 {
1222
1223
1224
1225
1226 if (grat)
1227 goto end;
1228
1229 entry = arp_add_entry();
1230 if(entry == NULL)
1231 {
1232 arp_unlock();
1233 #if RT_CACHE_DEBUG >= 2
1234 printk("ARP: no memory for new arp entry\n");
1235 #endif
1236 kfree_skb(skb, FREE_READ);
1237 return 0;
1238 }
1239
1240 entry->mask = DEF_ARP_NETMASK;
1241 entry->ip = sip;
1242 entry->flags = ATF_COM;
1243 entry->hh = NULL;
1244 init_timer(&entry->timer);
1245 entry->timer.function = arp_expire_request;
1246 entry->timer.data = (unsigned long)entry;
1247 memcpy(entry->ha, sha, dev->addr_len);
1248 entry->last_updated = entry->last_used = jiffies;
1249 arpd_update(entry, __LINE__);
1250
1251
1252
1253
1254 #ifdef CONFIG_NET_ALIAS
1255 entry->dev = dev;
1256 #else
1257 entry->dev = skb->dev;
1258 #endif
1259 skb_queue_head_init(&entry->skb);
1260 if (arp_lock == 1)
1261 {
1262 entry->next = arp_tables[hash];
1263 arp_tables[hash] = entry;
1264 }
1265 else
1266 {
1267 #if RT_CACHE_DEBUG >= 2
1268 printk("arp_rcv: %08x backlogged\n", entry->ip);
1269 #endif
1270 arp_enqueue(&arp_backlog, entry);
1271 arp_bh_mask |= ARP_BH_BACKLOG;
1272 }
1273 }
1274
1275
1276
1277
1278
1279 end:
1280 kfree_skb(skb, FREE_READ);
1281 arp_unlock();
1282 return 0;
1283 }
1284
1285
1286
1287
1288
1289
1290
1291
1292 static struct arp_table *arp_lookup(u32 paddr, unsigned short flags, struct device * dev)
1293 {
1294 struct arp_table *entry;
1295
1296 if (!(flags & ATF_PUBL))
1297 {
1298 for (entry = arp_tables[HASH(paddr)];
1299 entry != NULL; entry = entry->next)
1300 if (entry->ip == paddr && (!dev || entry->dev == dev))
1301 break;
1302 return entry;
1303 }
1304
1305 if (!(flags & ATF_NETMASK))
1306 {
1307 for (entry = arp_proxy_list;
1308 entry != NULL; entry = entry->next)
1309 if (entry->ip == paddr && (!dev || entry->dev == dev))
1310 break;
1311 return entry;
1312 }
1313
1314 for (entry=arp_proxy_list; entry != NULL; entry = entry->next)
1315 if (!((entry->ip^paddr)&entry->mask) &&
1316 (!dev || entry->dev == dev))
1317 break;
1318 return entry;
1319 }
1320
1321
1322
1323
1324
1325 int arp_query(unsigned char *haddr, u32 paddr, struct device * dev)
1326 {
1327 struct arp_table *entry;
1328
1329 arp_fast_lock();
1330
1331 entry = arp_lookup(paddr, 0, dev);
1332 if (entry == NULL)
1333 entry = arpd_lookup(paddr, 0, dev, __LINE__);
1334
1335 if (entry != NULL)
1336 {
1337 entry->last_used = jiffies;
1338 if (entry->flags & ATF_COM)
1339 {
1340 memcpy(haddr, entry->ha, dev->addr_len);
1341 arpd_update(entry, __LINE__);
1342 arp_unlock();
1343 return 1;
1344 }
1345 }
1346 arpd_update(entry, __LINE__);
1347 arp_unlock();
1348 return 0;
1349 }
1350
1351
1352 static int arp_set_predefined(int addr_hint, unsigned char * haddr, __u32 paddr, struct device * dev)
1353 {
1354 switch (addr_hint)
1355 {
1356 case IS_MYADDR:
1357 printk("ARP: arp called for own IP address\n");
1358 memcpy(haddr, dev->dev_addr, dev->addr_len);
1359 return 1;
1360 #ifdef CONFIG_IP_MULTICAST
1361 case IS_MULTICAST:
1362 if(dev->type==ARPHRD_ETHER || dev->type==ARPHRD_IEEE802)
1363 {
1364 u32 taddr;
1365 haddr[0]=0x01;
1366 haddr[1]=0x00;
1367 haddr[2]=0x5e;
1368 taddr=ntohl(paddr);
1369 haddr[5]=taddr&0xff;
1370 taddr=taddr>>8;
1371 haddr[4]=taddr&0xff;
1372 taddr=taddr>>8;
1373 haddr[3]=taddr&0x7f;
1374 return 1;
1375 }
1376
1377
1378
1379 #endif
1380
1381 case IS_BROADCAST:
1382 memcpy(haddr, dev->broadcast, dev->addr_len);
1383 return 1;
1384 }
1385 return 0;
1386 }
1387
1388
1389
1390
1391
1392 int arp_find(unsigned char *haddr, u32 paddr, struct device *dev,
1393 u32 saddr, struct sk_buff *skb)
1394 {
1395 struct arp_table *entry;
1396 unsigned long hash;
1397
1398 if (arp_set_predefined(ip_chk_addr(paddr), haddr, paddr, dev))
1399 {
1400 if (skb)
1401 skb->arp = 1;
1402 return 0;
1403 }
1404
1405 hash = HASH(paddr);
1406 arp_fast_lock();
1407
1408
1409
1410
1411 entry = arp_lookup(paddr, 0, dev);
1412 if (entry == NULL)
1413 entry = arpd_lookup(paddr, 0, dev, __LINE__);
1414
1415 if (entry != NULL)
1416 {
1417 if (!(entry->flags & ATF_COM))
1418 {
1419
1420
1421
1422
1423
1424 if (skb != NULL)
1425 {
1426 if (entry->last_updated)
1427 {
1428 skb_queue_tail(&entry->skb, skb);
1429 skb_device_unlock(skb);
1430 }
1431
1432
1433
1434
1435 else
1436 {
1437 #if 0
1438
1439
1440
1441
1442 if (skb->sk)
1443 {
1444 skb->sk->err = EHOSTDOWN;
1445 skb->sk->error_report(skb->sk);
1446 }
1447 #else
1448 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0, dev);
1449 #endif
1450 dev_kfree_skb(skb, FREE_WRITE);
1451 }
1452 }
1453 arp_unlock();
1454 return 1;
1455 }
1456
1457
1458
1459
1460
1461 entry->last_used = jiffies;
1462 memcpy(haddr, entry->ha, dev->addr_len);
1463 arpd_update(entry, __LINE__);
1464 if (skb)
1465 skb->arp = 1;
1466 arp_unlock();
1467 return 0;
1468 }
1469
1470
1471
1472
1473
1474 entry = arp_add_entry();
1475 if (entry != NULL)
1476 {
1477 entry->last_updated = entry->last_used = jiffies;
1478 entry->flags = 0;
1479 entry->ip = paddr;
1480 entry->mask = DEF_ARP_NETMASK;
1481 memset(entry->ha, 0, dev->addr_len);
1482 entry->dev = dev;
1483 entry->hh = NULL;
1484 arpd_update(entry, __LINE__);
1485 init_timer(&entry->timer);
1486 entry->timer.function = arp_expire_request;
1487 entry->timer.data = (unsigned long)entry;
1488 entry->timer.expires = jiffies + ARP_RES_TIME;
1489 skb_queue_head_init(&entry->skb);
1490 if (skb != NULL)
1491 {
1492 skb_queue_tail(&entry->skb, skb);
1493 skb_device_unlock(skb);
1494 }
1495 if (arp_lock == 1)
1496 {
1497 entry->next = arp_tables[hash];
1498 arp_tables[hash] = entry;
1499 add_timer(&entry->timer);
1500 entry->retries = ARP_MAX_TRIES;
1501 }
1502 else
1503 {
1504 #if RT_CACHE_DEBUG >= 2
1505 printk("arp_find: %08x backlogged\n", entry->ip);
1506 #endif
1507 arp_enqueue(&arp_backlog, entry);
1508 arp_bh_mask |= ARP_BH_BACKLOG;
1509 }
1510 }
1511 else if (skb != NULL)
1512 dev_kfree_skb(skb, FREE_WRITE);
1513 arp_unlock();
1514
1515
1516
1517
1518
1519 arp_send(ARPOP_REQUEST, ETH_P_ARP, paddr, dev, saddr, NULL,
1520 dev->dev_addr, NULL);
1521
1522 return 1;
1523 }
1524
1525
1526
1527
1528
1529
1530 #define HBUFFERLEN 30
1531
1532 int arp_get_info(char *buffer, char **start, off_t offset, int length, int dummy)
1533 {
1534 int len=0;
1535 off_t pos=0;
1536 int size;
1537 struct arp_table *entry;
1538 char hbuffer[HBUFFERLEN];
1539 int i,j,k;
1540 const char hexbuf[] = "0123456789ABCDEF";
1541
1542 size = sprintf(buffer,"IP address HW type Flags HW address Mask Device\n");
1543
1544 pos+=size;
1545 len+=size;
1546
1547 arp_fast_lock();
1548
1549 for(i=0; i<FULL_ARP_TABLE_SIZE; i++)
1550 {
1551 for(entry=arp_tables[i]; entry!=NULL; entry=entry->next)
1552 {
1553
1554
1555
1556 #ifdef CONFIG_AX25
1557 #ifdef CONFIG_NETROM
1558 if (entry->dev->type == ARPHRD_AX25 || entry->dev->type == ARPHRD_NETROM)
1559 strcpy(hbuffer,ax2asc((ax25_address *)entry->ha));
1560 else {
1561 #else
1562 if(entry->dev->type==ARPHRD_AX25)
1563 strcpy(hbuffer,ax2asc((ax25_address *)entry->ha));
1564 else {
1565 #endif
1566 #endif
1567
1568 for(k=0,j=0;k<HBUFFERLEN-3 && j<entry->dev->addr_len;j++)
1569 {
1570 hbuffer[k++]=hexbuf[ (entry->ha[j]>>4)&15 ];
1571 hbuffer[k++]=hexbuf[ entry->ha[j]&15 ];
1572 hbuffer[k++]=':';
1573 }
1574 hbuffer[--k]=0;
1575
1576 #ifdef CONFIG_AX25
1577 }
1578 #endif
1579 size = sprintf(buffer+len,
1580 "%-17s0x%-10x0x%-10x%s",
1581 in_ntoa(entry->ip),
1582 (unsigned int)entry->dev->type,
1583 entry->flags,
1584 hbuffer);
1585 #if RT_CACHE_DEBUG < 2
1586 size += sprintf(buffer+len+size,
1587 " %-17s %s\n",
1588 entry->mask==DEF_ARP_NETMASK ?
1589 "*" : in_ntoa(entry->mask), entry->dev->name);
1590 #else
1591 size += sprintf(buffer+len+size,
1592 " %-17s %s\t%ld\t%1d\n",
1593 entry->mask==DEF_ARP_NETMASK ?
1594 "*" : in_ntoa(entry->mask), entry->dev->name,
1595 entry->hh ? entry->hh->hh_refcnt : -1,
1596 entry->hh ? entry->hh->hh_uptodate : 0);
1597 #endif
1598
1599 len += size;
1600 pos += size;
1601
1602 if (pos <= offset)
1603 len=0;
1604 if (pos >= offset+length)
1605 goto done;
1606 }
1607 }
1608 done:
1609 arp_unlock();
1610
1611 *start = buffer+len-(pos-offset);
1612 len = pos-offset;
1613 if (len>length)
1614 len = length;
1615 return len;
1616 }
1617
1618
1619
1620 int arp_bind_cache(struct hh_cache ** hhp, struct device *dev, unsigned short htype, u32 paddr)
1621 {
1622 struct arp_table *entry;
1623 struct hh_cache *hh = *hhp;
1624 int addr_hint;
1625 unsigned long flags;
1626
1627 if (hh)
1628 return 1;
1629
1630 if ((addr_hint = ip_chk_addr(paddr)) != 0)
1631 {
1632 unsigned char haddr[MAX_ADDR_LEN];
1633 if (hh)
1634 return 1;
1635 hh = kmalloc(sizeof(struct hh_cache), GFP_ATOMIC);
1636 if (!hh)
1637 return 1;
1638 arp_set_predefined(addr_hint, haddr, paddr, dev);
1639 hh->hh_uptodate = 0;
1640 hh->hh_refcnt = 1;
1641 hh->hh_arp = NULL;
1642 hh->hh_next = NULL;
1643 hh->hh_type = htype;
1644 *hhp = hh;
1645 dev->header_cache_update(hh, dev, haddr);
1646 return 0;
1647 }
1648
1649 save_flags(flags);
1650
1651 arp_fast_lock();
1652
1653 entry = arp_lookup(paddr, 0, dev);
1654 if (entry == NULL)
1655 entry = arpd_lookup(paddr, 0, dev, __LINE__);
1656
1657 if (entry)
1658 {
1659 cli();
1660 for (hh = entry->hh; hh; hh=hh->hh_next)
1661 if (hh->hh_type == htype)
1662 break;
1663 if (hh)
1664 {
1665 hh->hh_refcnt++;
1666 *hhp = hh;
1667 restore_flags(flags);
1668 arp_unlock();
1669 return 1;
1670 }
1671 restore_flags(flags);
1672 }
1673
1674 hh = kmalloc(sizeof(struct hh_cache), GFP_ATOMIC);
1675 if (!hh)
1676 {
1677 arp_unlock();
1678 return 1;
1679 }
1680
1681 hh->hh_uptodate = 0;
1682 hh->hh_refcnt = 1;
1683 hh->hh_arp = NULL;
1684 hh->hh_next = NULL;
1685 hh->hh_type = htype;
1686
1687 if (entry)
1688 {
1689 dev->header_cache_update(hh, dev, entry->ha);
1690 *hhp = hh;
1691 cli();
1692 hh->hh_arp = (void*)entry;
1693 entry->hh = hh;
1694 hh->hh_refcnt++;
1695 restore_flags(flags);
1696 entry->last_used = jiffies;
1697 arpd_update(entry, __LINE__);
1698 arp_unlock();
1699 return 0;
1700 }
1701
1702
1703
1704
1705
1706
1707 entry = arp_add_entry();
1708 if (entry == NULL)
1709 {
1710 kfree_s(hh, sizeof(struct hh_cache));
1711 arp_unlock();
1712 return 1;
1713 }
1714
1715 entry->last_updated = entry->last_used = jiffies;
1716 entry->flags = 0;
1717 entry->ip = paddr;
1718 entry->mask = DEF_ARP_NETMASK;
1719 memset(entry->ha, 0, dev->addr_len);
1720 entry->dev = dev;
1721 entry->hh = hh;
1722 arpd_update(entry, __LINE__);
1723 ATOMIC_INCR(&hh->hh_refcnt);
1724 init_timer(&entry->timer);
1725 entry->timer.function = arp_expire_request;
1726 entry->timer.data = (unsigned long)entry;
1727 entry->timer.expires = jiffies + ARP_RES_TIME;
1728 skb_queue_head_init(&entry->skb);
1729
1730 if (arp_lock == 1)
1731 {
1732 unsigned long hash = HASH(paddr);
1733 cli();
1734 entry->next = arp_tables[hash];
1735 arp_tables[hash] = entry;
1736 hh->hh_arp = (void*)entry;
1737 entry->retries = ARP_MAX_TRIES;
1738 restore_flags(flags);
1739
1740 add_timer(&entry->timer);
1741 arp_send(ARPOP_REQUEST, ETH_P_ARP, paddr, dev, dev->pa_addr, NULL, dev->dev_addr, NULL);
1742 }
1743 else
1744 {
1745 #if RT_CACHE_DEBUG >= 1
1746 printk("arp_cache_bind: %08x backlogged\n", entry->ip);
1747 #endif
1748 arp_enqueue(&arp_backlog, entry);
1749 arp_bh_mask |= ARP_BH_BACKLOG;
1750 }
1751 *hhp = hh;
1752 arp_unlock();
1753 return 0;
1754 }
1755
1756 static void arp_run_bh()
1757 {
1758 unsigned long flags;
1759 struct arp_table *entry, *entry1;
1760 struct hh_cache *hh;
1761 __u32 sip;
1762
1763 save_flags(flags);
1764 cli();
1765 if (!arp_lock)
1766 {
1767 arp_fast_lock();
1768
1769 while ((entry = arp_dequeue(&arp_backlog)) != NULL)
1770 {
1771 unsigned long hash;
1772 sti();
1773 sip = entry->ip;
1774 hash = HASH(sip);
1775
1776
1777
1778
1779
1780 for (entry1=arp_tables[hash]; entry1; entry1=entry1->next)
1781 if (entry1->ip==sip && entry1->dev == entry->dev)
1782 break;
1783
1784 if (!entry1)
1785 {
1786 struct device * dev = entry->dev;
1787 cli();
1788 entry->next = arp_tables[hash];
1789 arp_tables[hash] = entry;
1790 for (hh=entry->hh; hh; hh=hh->hh_next)
1791 hh->hh_arp = (void*)entry;
1792 sti();
1793 del_timer(&entry->timer);
1794 entry->timer.expires = jiffies + ARP_RES_TIME;
1795 add_timer(&entry->timer);
1796 entry->retries = ARP_MAX_TRIES;
1797 arp_send(ARPOP_REQUEST, ETH_P_ARP, entry->ip, dev, dev->pa_addr, NULL, dev->dev_addr, NULL);
1798 #if RT_CACHE_DEBUG >= 1
1799 printk("arp_run_bh: %08x reinstalled\n", sip);
1800 #endif
1801 }
1802 else
1803 {
1804 struct sk_buff * skb;
1805 struct hh_cache * next;
1806
1807
1808
1809
1810 cli();
1811 for (hh=entry->hh; hh; hh=next)
1812 {
1813 next = hh->hh_next;
1814 hh->hh_next = entry1->hh;
1815 entry1->hh = hh;
1816 hh->hh_arp = (void*)entry1;
1817 }
1818 entry->hh = NULL;
1819
1820
1821
1822
1823 while ((skb = skb_dequeue(&entry->skb)) != NULL)
1824 {
1825 skb_device_lock(skb);
1826 sti();
1827 skb_queue_tail(&entry1->skb, skb);
1828 skb_device_unlock(skb);
1829 cli();
1830 }
1831 sti();
1832
1833 #if RT_CACHE_DEBUG >= 1
1834 printk("arp_run_bh: entry %08x was born dead\n", entry->ip);
1835 #endif
1836 arp_free_entry(entry);
1837
1838 if (entry1->flags & ATF_COM)
1839 {
1840 arp_update_hhs(entry1);
1841 arp_send_q(entry1);
1842 }
1843 }
1844 cli();
1845 }
1846 arp_bh_mask &= ~ARP_BH_BACKLOG;
1847 arp_unlock();
1848 }
1849 restore_flags(flags);
1850 }
1851
1852
1853
1854
1855
1856 static inline int empty(unsigned char * addr, int len)
1857 {
1858 while (len > 0) {
1859 if (*addr)
1860 return 0;
1861 len--;
1862 addr++;
1863 }
1864 return 1;
1865 }
1866
1867
1868
1869
1870
1871 static int arp_req_set(struct arpreq *r, struct device * dev)
1872 {
1873 struct arp_table *entry;
1874 struct sockaddr_in *si;
1875 struct rtable *rt;
1876 struct device *dev1;
1877 unsigned char *ha;
1878 u32 ip;
1879
1880
1881
1882
1883
1884 si = (struct sockaddr_in *) &r->arp_pa;
1885 ip = si->sin_addr.s_addr;
1886
1887
1888
1889
1890
1891 if (ip_chk_addr(ip) == IS_MYADDR)
1892 dev1 = dev_get("lo");
1893 else {
1894 rt = ip_rt_route(ip, 0);
1895 if (!rt)
1896 return -ENETUNREACH;
1897 dev1 = rt->rt_dev;
1898 ip_rt_put(rt);
1899 }
1900
1901
1902 if (!dev) {
1903 if (dev1->flags&(IFF_LOOPBACK|IFF_NOARP))
1904 return -ENODEV;
1905 dev = dev1;
1906 }
1907
1908
1909 if (r->arp_ha.sa_family != dev->type)
1910 return -EINVAL;
1911
1912 if (((r->arp_flags & ATF_PUBL) && dev == dev1) ||
1913 (!(r->arp_flags & ATF_PUBL) && dev != dev1))
1914 return -EINVAL;
1915
1916 #if RT_CACHE_DEBUG >= 1
1917 if (arp_lock)
1918 printk("arp_req_set: bug\n");
1919 #endif
1920 arp_fast_lock();
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930 entry = arp_lookup(ip, r->arp_flags & ~ATF_NETMASK, dev);
1931 if (entry == NULL)
1932 entry = arpd_lookup(ip, r->arp_flags & ~ATF_NETMASK, dev, __LINE__);
1933
1934 if (entry)
1935 {
1936 arp_destroy(entry);
1937 entry = NULL;
1938 }
1939
1940
1941
1942
1943
1944 if (entry == NULL)
1945 {
1946 entry = arp_add_entry();
1947 if (entry == NULL)
1948 {
1949 arp_unlock();
1950 return -ENOMEM;
1951 }
1952 entry->ip = ip;
1953 entry->hh = NULL;
1954 init_timer(&entry->timer);
1955 entry->timer.function = arp_expire_request;
1956 entry->timer.data = (unsigned long)entry;
1957
1958 if (r->arp_flags & ATF_PUBL)
1959 {
1960 cli();
1961 entry->next = arp_proxy_list;
1962 arp_proxy_list = entry;
1963 sti();
1964 }
1965 else
1966 {
1967 unsigned long hash = HASH(ip);
1968 cli();
1969 entry->next = arp_tables[hash];
1970 arp_tables[hash] = entry;
1971 sti();
1972 }
1973 skb_queue_head_init(&entry->skb);
1974 }
1975
1976
1977
1978 ha = r->arp_ha.sa_data;
1979 if ((r->arp_flags & ATF_COM) && empty(ha, dev->addr_len))
1980 ha = dev->dev_addr;
1981 memcpy(entry->ha, ha, dev->addr_len);
1982 entry->last_updated = entry->last_used = jiffies;
1983 arpd_update(entry, __LINE__);
1984 entry->flags = r->arp_flags | ATF_COM;
1985 if ((entry->flags & ATF_PUBL) && (entry->flags & ATF_NETMASK))
1986 {
1987 si = (struct sockaddr_in *) &r->arp_netmask;
1988 entry->mask = si->sin_addr.s_addr;
1989 }
1990 else
1991 entry->mask = DEF_ARP_NETMASK;
1992 entry->dev = dev;
1993 arp_update_hhs(entry);
1994 arp_unlock();
1995 return 0;
1996 }
1997
1998
1999
2000
2001
2002
2003
2004 static int arp_req_get(struct arpreq *r, struct device *dev)
2005 {
2006 struct arp_table *entry;
2007 struct sockaddr_in *si;
2008
2009 si = (struct sockaddr_in *) &r->arp_pa;
2010
2011 #if RT_CACHE_DEBUG >= 1
2012 if (arp_lock)
2013 printk("arp_req_set: bug\n");
2014 #endif
2015 arp_fast_lock();
2016
2017 entry = arp_lookup(si->sin_addr.s_addr, r->arp_flags|ATF_NETMASK, dev);
2018 if (entry == NULL)
2019 entry = arpd_lookup(si->sin_addr.s_addr,
2020 r->arp_flags|ATF_NETMASK, dev, __LINE__);
2021
2022 if (entry == NULL)
2023 {
2024 arp_unlock();
2025 return -ENXIO;
2026 }
2027
2028
2029
2030
2031
2032 memcpy(r->arp_ha.sa_data, &entry->ha, entry->dev->addr_len);
2033 r->arp_ha.sa_family = entry->dev->type;
2034 r->arp_flags = entry->flags;
2035 strncpy(r->arp_dev, entry->dev->name, 16);
2036 arp_unlock();
2037 return 0;
2038 }
2039
2040 static int arp_req_delete(struct arpreq *r, struct device * dev)
2041 {
2042 struct arp_table *entry;
2043 struct sockaddr_in *si;
2044
2045 si = (struct sockaddr_in *) &r->arp_pa;
2046 #if RT_CACHE_DEBUG >= 1
2047 if (arp_lock)
2048 printk("arp_req_delete: bug\n");
2049 #endif
2050 arp_fast_lock();
2051
2052 if (!(r->arp_flags & ATF_PUBL))
2053 {
2054 for (entry = arp_tables[HASH(si->sin_addr.s_addr)];
2055 entry != NULL; entry = entry->next)
2056 if (entry->ip == si->sin_addr.s_addr
2057 && (!dev || entry->dev == dev))
2058 {
2059 arp_destroy(entry);
2060 arp_unlock();
2061 return 0;
2062 }
2063 }
2064 else
2065 {
2066 for (entry = arp_proxy_list;
2067 entry != NULL; entry = entry->next)
2068 if (entry->ip == si->sin_addr.s_addr
2069 && (!dev || entry->dev == dev))
2070 {
2071 arp_destroy(entry);
2072 arp_unlock();
2073 return 0;
2074 }
2075 }
2076
2077 arp_unlock();
2078 return -ENXIO;
2079 }
2080
2081
2082
2083
2084
2085 int arp_ioctl(unsigned int cmd, void *arg)
2086 {
2087 int err;
2088 struct arpreq r;
2089
2090 struct device * dev = NULL;
2091
2092 switch(cmd)
2093 {
2094 case SIOCDARP:
2095 case SIOCSARP:
2096 if (!suser())
2097 return -EPERM;
2098 case SIOCGARP:
2099 err = verify_area(VERIFY_READ, arg, sizeof(struct arpreq));
2100 if (err)
2101 return err;
2102 memcpy_fromfs(&r, arg, sizeof(struct arpreq));
2103 break;
2104 case OLD_SIOCDARP:
2105 case OLD_SIOCSARP:
2106 if (!suser())
2107 return -EPERM;
2108 case OLD_SIOCGARP:
2109 err = verify_area(VERIFY_READ, arg, sizeof(struct arpreq_old));
2110 if (err)
2111 return err;
2112 memcpy_fromfs(&r, arg, sizeof(struct arpreq_old));
2113 memset(&r.arp_dev, 0, sizeof(r.arp_dev));
2114 break;
2115 default:
2116 return -EINVAL;
2117 }
2118
2119 if (r.arp_pa.sa_family != AF_INET)
2120 return -EPFNOSUPPORT;
2121 if (((struct sockaddr_in *)&r.arp_pa)->sin_addr.s_addr == 0)
2122 return -EINVAL;
2123
2124 if (r.arp_dev[0])
2125 {
2126 if ((dev = dev_get(r.arp_dev)) == NULL)
2127 return -ENODEV;
2128
2129 if (!r.arp_ha.sa_family)
2130 r.arp_ha.sa_family = dev->type;
2131 else if (r.arp_ha.sa_family != dev->type)
2132 return -EINVAL;
2133 }
2134 else
2135 {
2136 if ((r.arp_flags & ATF_PUBL) &&
2137 ((cmd == SIOCSARP) || (cmd == OLD_SIOCSARP))) {
2138 if ((dev = dev_getbytype(r.arp_ha.sa_family)) == NULL)
2139 return -ENODEV;
2140 }
2141 }
2142
2143 switch(cmd)
2144 {
2145 case SIOCDARP:
2146 return arp_req_delete(&r, dev);
2147 case SIOCSARP:
2148 return arp_req_set(&r, dev);
2149 case OLD_SIOCDARP:
2150
2151
2152
2153 r.arp_flags &= ~ATF_PUBL;
2154 err = arp_req_delete(&r, dev);
2155 r.arp_flags |= ATF_PUBL;
2156 if (!err)
2157 arp_req_delete(&r, dev);
2158 else
2159 err = arp_req_delete(&r, dev);
2160 return err;
2161 case OLD_SIOCSARP:
2162 err = arp_req_set(&r, dev);
2163
2164
2165
2166
2167
2168 if (r.arp_flags & ATF_PUBL)
2169 {
2170 r.arp_flags &= ~ATF_PUBL;
2171 arp_req_delete(&r, dev);
2172 }
2173 return err;
2174 case SIOCGARP:
2175 err = verify_area(VERIFY_WRITE, arg, sizeof(struct arpreq));
2176 if (err)
2177 return err;
2178 err = arp_req_get(&r, dev);
2179 if (!err)
2180 memcpy_tofs(arg, &r, sizeof(r));
2181 return err;
2182 case OLD_SIOCGARP:
2183 err = verify_area(VERIFY_WRITE, arg, sizeof(struct arpreq_old));
2184 if (err)
2185 return err;
2186 r.arp_flags &= ~ATF_PUBL;
2187 err = arp_req_get(&r, dev);
2188 if (err < 0)
2189 {
2190 r.arp_flags |= ATF_PUBL;
2191 err = arp_req_get(&r, dev);
2192 }
2193 if (!err)
2194 memcpy_tofs(arg, &r, sizeof(struct arpreq_old));
2195 return err;
2196 }
2197
2198 return 0;
2199 }
2200
2201
2202
2203
2204
2205
2206 static struct packet_type arp_packet_type =
2207 {
2208 0,
2209 NULL,
2210 arp_rcv,
2211 NULL,
2212 NULL
2213 };
2214
2215 static struct notifier_block arp_dev_notifier={
2216 arp_device_event,
2217 NULL,
2218 0
2219 };
2220
2221 void arp_init (void)
2222 {
2223
2224 arp_packet_type.type=htons(ETH_P_ARP);
2225 dev_add_pack(&arp_packet_type);
2226
2227 add_timer(&arp_timer);
2228
2229 register_netdevice_notifier(&arp_dev_notifier);
2230
2231 #ifdef CONFIG_PROC_FS
2232 proc_net_register(&(struct proc_dir_entry) {
2233 PROC_NET_ARP, 3, "arp",
2234 S_IFREG | S_IRUGO, 1, 0, 0,
2235 0, &proc_net_inode_operations,
2236 arp_get_info
2237 });
2238 #endif
2239 }
2240