This source file includes following definitions.
- arp_fast_lock
- arp_fast_unlock
- arp_unlock
- arp_enqueue
- arp_dequeue
- arp_release_entry
- arp_free_entry
- arp_count_hhs
- arp_force_expire
- arpd_update
- arp_add_entry
- arpd_lookup
- arp_invalidate_hhs
- arp_update_hhs
- arp_check_expire
- arp_expire_request
- arp_device_event
- arp_send
- arp_send_q
- arp_destroy
- arp_rcv
- arp_lookup
- arp_query
- arp_set_predefined
- arp_find
- arp_get_info
- arp_bind_cache
- arp_run_bh
- empty
- arp_req_set
- arp_req_get
- arp_req_delete
- arp_ioctl
- arp_init
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70 #include <linux/types.h>
71 #include <linux/string.h>
72 #include <linux/kernel.h>
73 #include <linux/sched.h>
74 #include <linux/config.h>
75 #include <linux/socket.h>
76 #include <linux/sockios.h>
77 #include <linux/errno.h>
78 #include <linux/in.h>
79 #include <linux/mm.h>
80 #include <linux/inet.h>
81 #include <linux/netdevice.h>
82 #include <linux/etherdevice.h>
83 #include <linux/if_arp.h>
84 #include <linux/trdevice.h>
85 #include <linux/skbuff.h>
86 #include <linux/proc_fs.h>
87 #include <linux/stat.h>
88
89 #include <net/ip.h>
90 #include <net/icmp.h>
91 #include <net/route.h>
92 #include <net/protocol.h>
93 #include <net/tcp.h>
94 #include <net/sock.h>
95 #include <net/arp.h>
96 #ifdef CONFIG_AX25
97 #include <net/ax25.h>
98 #ifdef CONFIG_NETROM
99 #include <net/netrom.h>
100 #endif
101 #endif
102 #ifdef CONFIG_NET_ALIAS
103 #include <linux/net_alias.h>
104 #endif
105 #ifdef CONFIG_ARPD
106 #include <linux/kerneld.h>
107 #endif
108
109 #include <asm/system.h>
110 #include <asm/segment.h>
111
112 #include <stdarg.h>
113
114
115
116
117
118
119
120
121 struct arp_table
122 {
123 struct arp_table *next;
124 unsigned long last_used;
125 unsigned long last_updated;
126 unsigned int flags;
127 u32 ip;
128 u32 mask;
129 unsigned char ha[MAX_ADDR_LEN];
130 struct device *dev;
131
132
133
134
135
136 struct timer_list timer;
137 int retries;
138 struct sk_buff_head skb;
139 struct hh_cache *hh;
140 };
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156 #define ARP_RES_TIME (5*HZ)
157 #define ARP_DEAD_RES_TIME (60*HZ)
158
159
160
161
162
163
164 #define ARP_MAX_TRIES 3
165
166
167
168
169
170 #define ARP_TIMEOUT (600*HZ)
171
172
173
174
175
176
177
178 #define ARP_CHECK_INTERVAL (60*HZ)
179
180
181
182
183
184
185
186 #define ARP_CONFIRM_INTERVAL (300*HZ)
187 #define ARP_CONFIRM_TIMEOUT ARP_RES_TIME
188
189 static unsigned int arp_lock;
190 static unsigned int arp_bh_mask;
191
192 #define ARP_BH_BACKLOG 1
193
194 static struct arp_table *arp_backlog;
195
196
197
198 #ifdef CONFIG_ARPD
199 #define ARP_MAXSIZE 256
200 #endif
201
202 static unsigned int arp_size = 0;
203
204 static void arp_run_bh(void);
205 static void arp_check_expire (unsigned long);
206
207 static struct timer_list arp_timer =
208 { NULL, NULL, ARP_CHECK_INTERVAL, 0L, &arp_check_expire };
209
210
211
212
213
214
215 #define DEF_ARP_NETMASK (~0)
216
217
218
219
220
221
222
223 #define ARP_TABLE_SIZE 16
224 #define FULL_ARP_TABLE_SIZE (ARP_TABLE_SIZE+1)
225
226 struct arp_table *arp_tables[FULL_ARP_TABLE_SIZE] =
227 {
228 NULL,
229 };
230
231 #define arp_proxy_list arp_tables[ARP_TABLE_SIZE]
232
233
234
235
236
237
238 #define HASH(paddr) (htonl(paddr) & (ARP_TABLE_SIZE - 1))
239
240
241
242
243
244 static __inline__ void arp_fast_lock(void)
245 {
246 ATOMIC_INCR(&arp_lock);
247 }
248
249 static __inline__ void arp_fast_unlock(void)
250 {
251 ATOMIC_DECR(&arp_lock);
252 }
253
254 static __inline__ void arp_unlock(void)
255 {
256 if (!ATOMIC_DECR_AND_CHECK(&arp_lock) && arp_bh_mask)
257 arp_run_bh();
258 }
259
260
261
262
263
264 static void arp_enqueue(struct arp_table **q, struct arp_table *entry)
265 {
266 unsigned long flags;
267 struct arp_table * tail;
268
269 save_flags(flags);
270 cli();
271 tail = *q;
272 if (!tail)
273 entry->next = entry;
274 else
275 {
276 entry->next = tail->next;
277 tail->next = entry;
278 }
279 *q = entry;
280 restore_flags(flags);
281 return;
282 }
283
284
285
286
287
288
289 static struct arp_table * arp_dequeue(struct arp_table **q)
290 {
291 struct arp_table * entry;
292
293 if (*q)
294 {
295 entry = (*q)->next;
296 (*q)->next = entry->next;
297 if (entry->next == entry)
298 *q = NULL;
299 entry->next = NULL;
300 return entry;
301 }
302 return NULL;
303 }
304
305
306
307
308
309 static void arp_release_entry(struct arp_table *entry)
310 {
311 struct sk_buff *skb;
312 unsigned long flags;
313
314 save_flags(flags);
315 cli();
316
317 while ((skb = skb_dequeue(&entry->skb)) != NULL)
318 {
319 skb_device_lock(skb);
320 restore_flags(flags);
321 dev_kfree_skb(skb, FREE_WRITE);
322 cli();
323 }
324 restore_flags(flags);
325 return;
326 }
327
328
329
330
331
332
333 static void arp_free_entry(struct arp_table *entry)
334 {
335 unsigned long flags;
336 struct hh_cache *hh, *next;
337
338 del_timer(&entry->timer);
339
340 save_flags(flags);
341 cli();
342 arp_release_entry(entry);
343
344 for (hh = entry->hh; hh; hh = next)
345 {
346 next = hh->hh_next;
347 hh->hh_arp = NULL;
348 hh->hh_uptodate = 0;
349 if (!--hh->hh_refcnt)
350 kfree_s(hh, sizeof(struct(struct hh_cache)));
351 }
352 restore_flags(flags);
353
354 kfree_s(entry, sizeof(struct arp_table));
355 --arp_size;
356 return;
357 }
358
359
360
361
362
363 static __inline__ int arp_count_hhs(struct arp_table * entry)
364 {
365 struct hh_cache *hh, **hhp;
366 int count = 0;
367
368 hhp = &entry->hh;
369 while ((hh=*hhp) != NULL)
370 {
371 if (hh->hh_refcnt == 1)
372 {
373 *hhp = hh->hh_next;
374 kfree_s(hh, sizeof(struct hh_cache));
375 continue;
376 }
377 count += hh->hh_refcnt-1;
378 hhp = &hh->hh_next;
379 }
380
381 return count;
382 }
383
384
385
386
387
388
389
390
391
392
393
394 #ifdef CONFIG_ARPD
395 static int arp_force_expire(void)
396 {
397 int i;
398 struct arp_table *entry = NULL;
399 struct arp_table **pentry = NULL;
400 struct arp_table **oldest_entry = NULL, **last_resort = NULL;
401 unsigned long oldest_used = ~0;
402
403 #if RT_CACHE_DEBUG >= 2
404 printk("Looking for something to force expire.\n");
405 #endif
406 for (i = 0; i < ARP_TABLE_SIZE; i++)
407 {
408 pentry = &arp_tables[i];
409
410 while ((entry = *pentry) != NULL)
411 {
412 if (entry->last_used < oldest_used)
413 {
414 if (arp_count_hhs(entry) == 0)
415 {
416 oldest_entry = pentry;
417 }
418 last_resort = pentry;
419 oldest_used = entry->last_used;
420 }
421 pentry = &entry->next;
422 }
423 }
424 if (oldest_entry == NULL)
425 {
426 if (last_resort == NULL)
427 return -1;
428 oldest_entry = last_resort;
429 }
430
431 entry = *oldest_entry;
432 *oldest_entry = (*oldest_entry)->next;
433 #if RT_CACHE_DEBUG >= 2
434 printk("Force expiring %08x\n", entry->ip);
435 #endif
436 arp_free_entry(entry);
437 return 0;
438 }
439 #endif
440
441
442 static void arpd_update(struct arp_table * entry, int loc)
443 {
444 #ifdef CONFIG_ARPD
445 static struct arpd_request arpreq;
446
447 arpreq.req = ARPD_UPDATE;
448 arpreq.ip = entry->ip;
449 arpreq.mask = entry->mask;
450 memcpy (arpreq.ha, entry->ha, MAX_ADDR_LEN);
451 arpreq.loc = loc;
452 arpreq.last_used = entry->last_used;
453 arpreq.last_updated = entry->last_updated;
454 arpreq.flags = entry->flags;
455 arpreq.dev = entry->dev;
456
457 kerneld_send(KERNELD_ARP, 0, sizeof(arpreq),
458 (char *) &arpreq, NULL);
459 #endif
460 }
461
462
463
464
465
466
467
468
469 static struct arp_table * arp_add_entry(void)
470 {
471 struct arp_table * entry;
472
473 #ifdef CONFIG_ARPD
474 if (arp_size >= ARP_MAXSIZE)
475 {
476 if (arp_force_expire() < 0)
477 return NULL;
478 }
479 #endif
480
481 entry = (struct arp_table *)
482 kmalloc(sizeof(struct arp_table),GFP_ATOMIC);
483
484 if (entry != NULL)
485 ++arp_size;
486 return entry;
487 }
488
489
490
491
492
493
494 static struct arp_table * arpd_lookup(u32 addr, unsigned short flags,
495 struct device * dev,
496 int loc)
497 {
498 #ifdef CONFIG_ARPD
499 static struct arpd_request arpreq, retreq;
500 struct arp_table * entry;
501 int rv, i;
502
503 arpreq.req = ARPD_LOOKUP;
504 arpreq.ip = addr;
505 arpreq.loc = loc;
506
507 rv = kerneld_send(KERNELD_ARP,
508 sizeof(retreq) | KERNELD_WAIT,
509 sizeof(arpreq),
510 (char *) &arpreq,
511 (char *) &retreq);
512
513
514
515
516 if (rv != 0)
517 return NULL;
518 if (dev != retreq.dev)
519 return NULL;
520 if (! memcmp (retreq.ha, "\0\0\0\0\0\0", 6))
521 return NULL;
522
523 arp_fast_lock();
524 entry = arp_add_entry();
525 arp_unlock();
526
527 if (entry == NULL)
528 return NULL;
529
530 entry->next = NULL;
531 entry->last_used = retreq.last_used;
532 entry->last_updated = retreq.last_updated;
533 entry->flags = retreq.flags;
534 entry->ip = retreq.ip;
535 entry->mask = retreq.mask;
536 memcpy (entry->ha, retreq.ha, MAX_ADDR_LEN);
537 arpreq.dev = entry->dev;
538
539 skb_queue_head_init(&entry->skb);
540 entry->hh = NULL;
541 entry->retries = 0;
542
543 #if RT_CACHE_DEBUG >= 2
544 printk("Inserting arpd entry %08x\n in local cache.", entry->ip);
545 #endif
546 i = HASH(entry->ip);
547 arp_fast_lock();
548 entry->next = arp_tables[i]->next;
549 arp_tables[i]->next = entry;
550 arp_unlock();
551 return entry;
552 #endif
553 return NULL;
554 }
555
556
557
558
559
560
561 static __inline__ void arp_invalidate_hhs(struct arp_table * entry)
562 {
563 struct hh_cache *hh;
564
565 for (hh=entry->hh; hh; hh=hh->hh_next)
566 hh->hh_uptodate = 0;
567 }
568
569
570
571
572
573 static __inline__ void arp_update_hhs(struct arp_table * entry)
574 {
575 struct hh_cache *hh;
576
577 for (hh=entry->hh; hh; hh=hh->hh_next)
578 entry->dev->header_cache_update(hh, entry->dev, entry->ha);
579 }
580
581
582
583
584
585
586
587
588
589
590 static void arp_check_expire(unsigned long dummy)
591 {
592 int i;
593 unsigned long now = jiffies;
594
595 del_timer(&arp_timer);
596
597 if (!arp_lock)
598 {
599 arp_fast_lock();
600
601 for (i = 0; i < ARP_TABLE_SIZE; i++)
602 {
603 struct arp_table *entry;
604 struct arp_table **pentry;
605
606 pentry = &arp_tables[i];
607
608 while ((entry = *pentry) != NULL)
609 {
610 cli();
611 if (now - entry->last_used > ARP_TIMEOUT
612 && !(entry->flags & ATF_PERM)
613 && !arp_count_hhs(entry))
614 {
615 *pentry = entry->next;
616 sti();
617 #if RT_CACHE_DEBUG >= 2
618 printk("arp_expire: %08x expired\n", entry->ip);
619 #endif
620 arp_free_entry(entry);
621 }
622 else if (entry->last_updated
623 && now - entry->last_updated > ARP_CONFIRM_INTERVAL
624 && !(entry->flags & ATF_PERM))
625 {
626 struct device * dev = entry->dev;
627 pentry = &entry->next;
628 entry->flags &= ~ATF_COM;
629 arp_invalidate_hhs(entry);
630 sti();
631 entry->retries = ARP_MAX_TRIES+1;
632 del_timer(&entry->timer);
633 entry->timer.expires = jiffies + ARP_CONFIRM_TIMEOUT;
634 add_timer(&entry->timer);
635 arp_send(ARPOP_REQUEST, ETH_P_ARP, entry->ip,
636 dev, dev->pa_addr, entry->ha,
637 dev->dev_addr, NULL);
638 #if RT_CACHE_DEBUG >= 2
639 printk("arp_expire: %08x requires confirmation\n", entry->ip);
640 #endif
641 }
642 else
643 pentry = &entry->next;
644 }
645 }
646 arp_unlock();
647 }
648
649 ip_rt_check_expire();
650
651
652
653
654
655 arp_timer.expires = jiffies + ARP_CHECK_INTERVAL;
656 add_timer(&arp_timer);
657 }
658
659
660
661
662
663
664
665 static void arp_expire_request (unsigned long arg)
666 {
667 struct arp_table *entry = (struct arp_table *) arg;
668 struct arp_table **pentry;
669 unsigned long hash;
670 unsigned long flags;
671
672 save_flags(flags);
673 cli();
674
675
676
677
678
679
680
681 if (entry->flags & ATF_COM)
682 {
683 restore_flags(flags);
684 return;
685 }
686
687 if (arp_lock)
688 {
689 #if RT_CACHE_DEBUG >= 1
690 printk("arp_expire_request: %08x postponed\n", entry->ip);
691 #endif
692 del_timer(&entry->timer);
693 entry->timer.expires = jiffies + HZ/10;
694 add_timer(&entry->timer);
695 restore_flags(flags);
696 return;
697 }
698
699 arp_fast_lock();
700 restore_flags(flags);
701
702 if (entry->last_updated && --entry->retries > 0)
703 {
704 struct device *dev = entry->dev;
705
706 #if RT_CACHE_DEBUG >= 2
707 printk("arp_expire_request: %08x timed out\n", entry->ip);
708 #endif
709
710 del_timer(&entry->timer);
711 entry->timer.expires = jiffies + ARP_RES_TIME;
712 add_timer(&entry->timer);
713 arp_send(ARPOP_REQUEST, ETH_P_ARP, entry->ip, dev, dev->pa_addr,
714 NULL, dev->dev_addr, NULL);
715 arp_unlock();
716 return;
717 }
718
719 arp_release_entry(entry);
720
721 cli();
722 if (arp_count_hhs(entry))
723 {
724 struct device *dev = entry->dev;
725 #if RT_CACHE_DEBUG >= 2
726 printk("arp_expire_request: %08x is dead\n", entry->ip);
727 #endif
728 arp_release_entry(entry);
729 entry->retries = ARP_MAX_TRIES;
730 restore_flags(flags);
731 entry->last_updated = 0;
732 del_timer(&entry->timer);
733 entry->timer.expires = jiffies + ARP_DEAD_RES_TIME;
734 add_timer(&entry->timer);
735 arp_send(ARPOP_REQUEST, ETH_P_ARP, entry->ip, dev, dev->pa_addr,
736 NULL, dev->dev_addr, NULL);
737 arp_unlock();
738 return;
739 }
740 restore_flags(flags);
741
742 hash = HASH(entry->ip);
743
744 pentry = &arp_tables[hash];
745
746 while (*pentry != NULL)
747 {
748 if (*pentry == entry)
749 {
750 cli();
751 *pentry = entry->next;
752 restore_flags(flags);
753 #if RT_CACHE_DEBUG >= 2
754 printk("arp_expire_request: %08x is killed\n", entry->ip);
755 #endif
756 arp_free_entry(entry);
757 arp_unlock();
758 return;
759 }
760 pentry = &(*pentry)->next;
761 }
762 printk("arp_expire_request: bug: ARP entry is lost!\n");
763 arp_unlock();
764 }
765
766
767
768
769
770 int arp_device_event(struct notifier_block *this, unsigned long event, void *ptr)
771 {
772 struct device *dev=ptr;
773 int i;
774
775 if (event != NETDEV_DOWN)
776 return NOTIFY_DONE;
777
778
779
780
781 #if RT_CACHE_DEBUG >= 1
782 if (arp_lock)
783 printk("arp_device_event: bug\n");
784 #endif
785 arp_fast_lock();
786
787 for (i = 0; i < FULL_ARP_TABLE_SIZE; i++)
788 {
789 struct arp_table *entry;
790 struct arp_table **pentry = &arp_tables[i];
791
792 while ((entry = *pentry) != NULL)
793 {
794 if (entry->dev == dev)
795 {
796 *pentry = entry->next;
797 arp_free_entry(entry);
798 }
799 else
800 pentry = &entry->next;
801 }
802 }
803 arp_unlock();
804 return NOTIFY_DONE;
805 }
806
807
808
809
810
811
812
813 void arp_send(int type, int ptype, u32 dest_ip,
814 struct device *dev, u32 src_ip,
815 unsigned char *dest_hw, unsigned char *src_hw,
816 unsigned char *target_hw)
817 {
818 struct sk_buff *skb;
819 struct arphdr *arp;
820 unsigned char *arp_ptr;
821
822
823
824
825
826 if (dev->flags&IFF_NOARP)
827 return;
828
829
830
831
832
833 skb = alloc_skb(sizeof(struct arphdr)+ 2*(dev->addr_len+4)
834 + dev->hard_header_len, GFP_ATOMIC);
835 if (skb == NULL)
836 {
837 printk("ARP: no memory to send an arp packet\n");
838 return;
839 }
840 skb_reserve(skb, dev->hard_header_len);
841 arp = (struct arphdr *) skb_put(skb,sizeof(struct arphdr) + 2*(dev->addr_len+4));
842 skb->arp = 1;
843 skb->dev = dev;
844 skb->free = 1;
845 skb->protocol = htons (ETH_P_IP);
846
847
848
849
850
851 dev->hard_header(skb,dev,ptype,dest_hw?dest_hw:dev->broadcast,src_hw?src_hw:NULL,skb->len);
852
853
854 arp->ar_hrd = htons(dev->type);
855 #ifdef CONFIG_AX25
856 #ifdef CONFIG_NETROM
857 arp->ar_pro = (dev->type == ARPHRD_AX25 || dev->type == ARPHRD_NETROM) ? htons(AX25_P_IP) : htons(ETH_P_IP);
858 #else
859 arp->ar_pro = (dev->type != ARPHRD_AX25) ? htons(ETH_P_IP) : htons(AX25_P_IP);
860 #endif
861 #else
862 arp->ar_pro = htons(ETH_P_IP);
863 #endif
864 arp->ar_hln = dev->addr_len;
865 arp->ar_pln = 4;
866 arp->ar_op = htons(type);
867
868 arp_ptr=(unsigned char *)(arp+1);
869
870 memcpy(arp_ptr, src_hw, dev->addr_len);
871 arp_ptr+=dev->addr_len;
872 memcpy(arp_ptr, &src_ip,4);
873 arp_ptr+=4;
874 if (target_hw != NULL)
875 memcpy(arp_ptr, target_hw, dev->addr_len);
876 else
877 memset(arp_ptr, 0, dev->addr_len);
878 arp_ptr+=dev->addr_len;
879 memcpy(arp_ptr, &dest_ip, 4);
880
881 dev_queue_xmit(skb, dev, 0);
882 }
883
884
885
886
887
888 static void arp_send_q(struct arp_table *entry)
889 {
890 struct sk_buff *skb;
891
892 unsigned long flags;
893
894
895
896
897
898 if(!(entry->flags&ATF_COM))
899 {
900 printk("arp_send_q: incomplete entry for %s\n",
901 in_ntoa(entry->ip));
902
903
904
905
906 return;
907 }
908
909 save_flags(flags);
910
911 cli();
912 while((skb = skb_dequeue(&entry->skb)) != NULL)
913 {
914 IS_SKB(skb);
915 skb_device_lock(skb);
916 restore_flags(flags);
917 if(!skb->dev->rebuild_header(skb->data,skb->dev,skb->raddr,skb))
918 {
919 skb->arp = 1;
920 if(skb->sk==NULL)
921 dev_queue_xmit(skb, skb->dev, 0);
922 else
923 dev_queue_xmit(skb,skb->dev,skb->sk->priority);
924 }
925 }
926 restore_flags(flags);
927 }
928
929
930
931
932
933
934 static void arp_destroy(struct arp_table * entry)
935 {
936 struct arp_table *entry1;
937 struct arp_table **pentry;
938
939 if (entry->flags & ATF_PUBL)
940 pentry = &arp_proxy_list;
941 else
942 pentry = &arp_tables[HASH(entry->ip)];
943
944 while ((entry1 = *pentry) != NULL)
945 {
946 if (entry1 == entry)
947 {
948 *pentry = entry1->next;
949 del_timer(&entry->timer);
950 arp_free_entry(entry);
951 return;
952 }
953 pentry = &entry1->next;
954 }
955 }
956
957
958
959
960
961
962
963 int arp_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *pt)
964 {
965
966
967
968
969 struct arphdr *arp = (struct arphdr *)skb->h.raw;
970 unsigned char *arp_ptr= (unsigned char *)(arp+1);
971 struct arp_table *entry;
972 struct arp_table *proxy_entry;
973 unsigned long hash, grat=0;
974 unsigned char ha[MAX_ADDR_LEN];
975 unsigned char *sha,*tha;
976 u32 sip,tip;
977
978
979
980
981
982
983
984
985 if (arp->ar_hln != dev->addr_len ||
986 dev->type != ntohs(arp->ar_hrd) ||
987 dev->flags & IFF_NOARP ||
988 arp->ar_pln != 4)
989 {
990 kfree_skb(skb, FREE_READ);
991 return 0;
992
993
994 }
995
996
997
998
999
1000
1001
1002
1003
1004 switch (dev->type)
1005 {
1006 #ifdef CONFIG_AX25
1007 case ARPHRD_AX25:
1008 if(arp->ar_pro != htons(AX25_P_IP))
1009 {
1010 kfree_skb(skb, FREE_READ);
1011 return 0;
1012 }
1013 break;
1014 #endif
1015 #ifdef CONFIG_NETROM
1016 case ARPHRD_NETROM:
1017 if(arp->ar_pro != htons(AX25_P_IP))
1018 {
1019 kfree_skb(skb, FREE_READ);
1020 return 0;
1021 }
1022 break;
1023 #endif
1024 case ARPHRD_ETHER:
1025 case ARPHRD_ARCNET:
1026 case ARPHRD_METRICOM:
1027 if(arp->ar_pro != htons(ETH_P_IP))
1028 {
1029 kfree_skb(skb, FREE_READ);
1030 return 0;
1031 }
1032 break;
1033
1034 case ARPHRD_IEEE802:
1035 if(arp->ar_pro != htons(ETH_P_IP))
1036 {
1037 kfree_skb(skb, FREE_READ);
1038 return 0;
1039 }
1040 break;
1041
1042 default:
1043 printk("ARP: dev->type mangled!\n");
1044 kfree_skb(skb, FREE_READ);
1045 return 0;
1046 }
1047
1048
1049
1050
1051
1052 sha=arp_ptr;
1053 arp_ptr += dev->addr_len;
1054 memcpy(&sip, arp_ptr, 4);
1055 arp_ptr += 4;
1056 tha=arp_ptr;
1057 arp_ptr += dev->addr_len;
1058 memcpy(&tip, arp_ptr, 4);
1059
1060
1061
1062
1063
1064 if (LOOPBACK(tip) || MULTICAST(tip))
1065 {
1066 kfree_skb(skb, FREE_READ);
1067 return 0;
1068 }
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091 #ifdef CONFIG_NET_ALIAS
1092 if (tip != dev->pa_addr && net_alias_has(skb->dev))
1093 {
1094
1095
1096
1097 dev = net_alias_dev_rcv_sel32(dev, AF_INET, sip, tip);
1098
1099 if (dev->type != ntohs(arp->ar_hrd) || dev->flags & IFF_NOARP)
1100 {
1101 kfree_skb(skb, FREE_READ);
1102 return 0;
1103 }
1104 }
1105 #endif
1106
1107 if (arp->ar_op == htons(ARPOP_REQUEST))
1108 {
1109
1110
1111
1112 if (tip != dev->pa_addr)
1113 {
1114
1115
1116
1117
1118
1119 arp_fast_lock();
1120
1121 for (proxy_entry=arp_proxy_list;
1122 proxy_entry;
1123 proxy_entry = proxy_entry->next)
1124 {
1125
1126
1127
1128
1129
1130
1131
1132
1133 if (proxy_entry->dev == dev &&
1134 !((proxy_entry->ip^tip)&proxy_entry->mask))
1135 break;
1136
1137 }
1138 if (proxy_entry)
1139 {
1140 memcpy(ha, proxy_entry->ha, dev->addr_len);
1141 arp_unlock();
1142 arp_send(ARPOP_REPLY,ETH_P_ARP,sip,dev,tip,sha,ha, sha);
1143 kfree_skb(skb, FREE_READ);
1144 return 0;
1145 }
1146 else
1147 {
1148 arp_unlock();
1149 }
1150 }
1151 else
1152 {
1153
1154
1155
1156 arp_send(ARPOP_REPLY,ETH_P_ARP,sip,dev,tip,sha,dev->dev_addr,sha);
1157 }
1158 grat = 1;
1159 goto gratuitous;
1160 }
1161
1162
1163
1164 if(ip_chk_addr(tip)!=IS_MYADDR)
1165 {
1166
1167
1168
1169 kfree_skb(skb, FREE_READ);
1170 return 0;
1171 }
1172
1173
1174
1175
1176
1177
1178 gratuitous:
1179
1180 arp_fast_lock();
1181
1182
1183 hash = HASH(sip);
1184 for (entry=arp_tables[hash]; entry; entry=entry->next)
1185 if (entry->ip == sip && entry->dev == dev)
1186 break;
1187
1188 if (entry)
1189 {
1190
1191
1192
1193 if (!(entry->flags & ATF_PERM))
1194 {
1195 if(memcmp(entry->ha, sha,dev->addr_len)!=0)
1196 {
1197 memcpy(entry->ha, sha, dev->addr_len);
1198 if(entry->flags & ATF_COM)
1199 arp_update_hhs(entry);
1200 }
1201 entry->last_updated = jiffies;
1202 arpd_update(entry, __LINE__);
1203 }
1204 if (!(entry->flags & ATF_COM))
1205 {
1206
1207
1208
1209
1210 del_timer(&entry->timer);
1211 entry->flags |= ATF_COM;
1212 arp_update_hhs(entry);
1213
1214
1215
1216
1217
1218 arp_send_q(entry);
1219 }
1220 }
1221 else
1222 {
1223
1224
1225
1226
1227 if (grat)
1228 goto end;
1229
1230 entry = arp_add_entry();
1231 if(entry == NULL)
1232 {
1233 arp_unlock();
1234 #if RT_CACHE_DEBUG >= 2
1235 printk("ARP: no memory for new arp entry\n");
1236 #endif
1237 kfree_skb(skb, FREE_READ);
1238 return 0;
1239 }
1240
1241 entry->mask = DEF_ARP_NETMASK;
1242 entry->ip = sip;
1243 entry->flags = ATF_COM;
1244 entry->hh = NULL;
1245 init_timer(&entry->timer);
1246 entry->timer.function = arp_expire_request;
1247 entry->timer.data = (unsigned long)entry;
1248 memcpy(entry->ha, sha, dev->addr_len);
1249 entry->last_updated = entry->last_used = jiffies;
1250 arpd_update(entry, __LINE__);
1251
1252
1253
1254
1255 #ifdef CONFIG_NET_ALIAS
1256 entry->dev = dev;
1257 #else
1258 entry->dev = skb->dev;
1259 #endif
1260 skb_queue_head_init(&entry->skb);
1261 if (arp_lock == 1)
1262 {
1263 entry->next = arp_tables[hash];
1264 arp_tables[hash] = entry;
1265 }
1266 else
1267 {
1268 #if RT_CACHE_DEBUG >= 2
1269 printk("arp_rcv: %08x backlogged\n", entry->ip);
1270 #endif
1271 arp_enqueue(&arp_backlog, entry);
1272 arp_bh_mask |= ARP_BH_BACKLOG;
1273 }
1274 }
1275
1276
1277
1278
1279
1280 end:
1281 kfree_skb(skb, FREE_READ);
1282 arp_unlock();
1283 return 0;
1284 }
1285
1286
1287
1288
1289
1290
1291
1292
1293 static struct arp_table *arp_lookup(u32 paddr, unsigned short flags, struct device * dev)
1294 {
1295 struct arp_table *entry;
1296
1297 if (!(flags & ATF_PUBL))
1298 {
1299 for (entry = arp_tables[HASH(paddr)];
1300 entry != NULL; entry = entry->next)
1301 if (entry->ip == paddr && (!dev || entry->dev == dev))
1302 break;
1303 return entry;
1304 }
1305
1306 if (!(flags & ATF_NETMASK))
1307 {
1308 for (entry = arp_proxy_list;
1309 entry != NULL; entry = entry->next)
1310 if (entry->ip == paddr && (!dev || entry->dev == dev))
1311 break;
1312 return entry;
1313 }
1314
1315 for (entry=arp_proxy_list; entry != NULL; entry = entry->next)
1316 if (!((entry->ip^paddr)&entry->mask) &&
1317 (!dev || entry->dev == dev))
1318 break;
1319 return entry;
1320 }
1321
1322
1323
1324
1325
1326 int arp_query(unsigned char *haddr, u32 paddr, struct device * dev)
1327 {
1328 struct arp_table *entry;
1329
1330 arp_fast_lock();
1331
1332 entry = arp_lookup(paddr, 0, dev);
1333 if (entry == NULL)
1334 entry = arpd_lookup(paddr, 0, dev, __LINE__);
1335
1336 if (entry != NULL)
1337 {
1338 entry->last_used = jiffies;
1339 if (entry->flags & ATF_COM)
1340 {
1341 memcpy(haddr, entry->ha, dev->addr_len);
1342 arpd_update(entry, __LINE__);
1343 arp_unlock();
1344 return 1;
1345 }
1346 }
1347 arpd_update(entry, __LINE__);
1348 arp_unlock();
1349 return 0;
1350 }
1351
1352
1353 static int arp_set_predefined(int addr_hint, unsigned char * haddr, __u32 paddr, struct device * dev)
1354 {
1355 switch (addr_hint)
1356 {
1357 case IS_MYADDR:
1358 printk("ARP: arp called for own IP address\n");
1359 memcpy(haddr, dev->dev_addr, dev->addr_len);
1360 return 1;
1361 #ifdef CONFIG_IP_MULTICAST
1362 case IS_MULTICAST:
1363 if(dev->type==ARPHRD_ETHER || dev->type==ARPHRD_IEEE802)
1364 {
1365 u32 taddr;
1366 haddr[0]=0x01;
1367 haddr[1]=0x00;
1368 haddr[2]=0x5e;
1369 taddr=ntohl(paddr);
1370 haddr[5]=taddr&0xff;
1371 taddr=taddr>>8;
1372 haddr[4]=taddr&0xff;
1373 taddr=taddr>>8;
1374 haddr[3]=taddr&0x7f;
1375 return 1;
1376 }
1377
1378
1379
1380 #endif
1381
1382 case IS_BROADCAST:
1383 memcpy(haddr, dev->broadcast, dev->addr_len);
1384 return 1;
1385 }
1386 return 0;
1387 }
1388
1389
1390
1391
1392
1393 int arp_find(unsigned char *haddr, u32 paddr, struct device *dev,
1394 u32 saddr, struct sk_buff *skb)
1395 {
1396 struct arp_table *entry;
1397 unsigned long hash;
1398
1399 if (arp_set_predefined(ip_chk_addr(paddr), haddr, paddr, dev))
1400 {
1401 if (skb)
1402 skb->arp = 1;
1403 return 0;
1404 }
1405
1406 hash = HASH(paddr);
1407 arp_fast_lock();
1408
1409
1410
1411
1412 entry = arp_lookup(paddr, 0, dev);
1413 if (entry == NULL)
1414 entry = arpd_lookup(paddr, 0, dev, __LINE__);
1415
1416 if (entry != NULL)
1417 {
1418 if (!(entry->flags & ATF_COM))
1419 {
1420
1421
1422
1423
1424
1425 if (skb != NULL)
1426 {
1427 if (entry->last_updated)
1428 {
1429 skb_queue_tail(&entry->skb, skb);
1430 skb_device_unlock(skb);
1431 }
1432
1433
1434
1435
1436 else
1437 {
1438 #if 0
1439
1440
1441
1442
1443 if (skb->sk)
1444 {
1445 skb->sk->err = EHOSTDOWN;
1446 skb->sk->error_report(skb->sk);
1447 }
1448 #else
1449 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0, dev);
1450 #endif
1451 dev_kfree_skb(skb, FREE_WRITE);
1452 }
1453 }
1454 arp_unlock();
1455 return 1;
1456 }
1457
1458
1459
1460
1461
1462 entry->last_used = jiffies;
1463 memcpy(haddr, entry->ha, dev->addr_len);
1464 arpd_update(entry, __LINE__);
1465 if (skb)
1466 skb->arp = 1;
1467 arp_unlock();
1468 return 0;
1469 }
1470
1471
1472
1473
1474
1475 entry = arp_add_entry();
1476 if (entry != NULL)
1477 {
1478 entry->last_updated = entry->last_used = jiffies;
1479 entry->flags = 0;
1480 entry->ip = paddr;
1481 entry->mask = DEF_ARP_NETMASK;
1482 memset(entry->ha, 0, dev->addr_len);
1483 entry->dev = dev;
1484 entry->hh = NULL;
1485 arpd_update(entry, __LINE__);
1486 init_timer(&entry->timer);
1487 entry->timer.function = arp_expire_request;
1488 entry->timer.data = (unsigned long)entry;
1489 entry->timer.expires = jiffies + ARP_RES_TIME;
1490 skb_queue_head_init(&entry->skb);
1491 if (skb != NULL)
1492 {
1493 skb_queue_tail(&entry->skb, skb);
1494 skb_device_unlock(skb);
1495 }
1496 if (arp_lock == 1)
1497 {
1498 entry->next = arp_tables[hash];
1499 arp_tables[hash] = entry;
1500 add_timer(&entry->timer);
1501 entry->retries = ARP_MAX_TRIES;
1502 }
1503 else
1504 {
1505 #if RT_CACHE_DEBUG >= 2
1506 printk("arp_find: %08x backlogged\n", entry->ip);
1507 #endif
1508 arp_enqueue(&arp_backlog, entry);
1509 arp_bh_mask |= ARP_BH_BACKLOG;
1510 }
1511 }
1512 else if (skb != NULL)
1513 dev_kfree_skb(skb, FREE_WRITE);
1514 arp_unlock();
1515
1516
1517
1518
1519
1520 arp_send(ARPOP_REQUEST, ETH_P_ARP, paddr, dev, saddr, NULL,
1521 dev->dev_addr, NULL);
1522
1523 return 1;
1524 }
1525
1526
1527
1528
1529
1530
1531 #define HBUFFERLEN 30
1532
1533 int arp_get_info(char *buffer, char **start, off_t offset, int length, int dummy)
1534 {
1535 int len=0;
1536 off_t pos=0;
1537 int size;
1538 struct arp_table *entry;
1539 char hbuffer[HBUFFERLEN];
1540 int i,j,k;
1541 const char hexbuf[] = "0123456789ABCDEF";
1542
1543 size = sprintf(buffer,"IP address HW type Flags HW address Mask Device\n");
1544
1545 pos+=size;
1546 len+=size;
1547
1548 arp_fast_lock();
1549
1550 for(i=0; i<FULL_ARP_TABLE_SIZE; i++)
1551 {
1552 for(entry=arp_tables[i]; entry!=NULL; entry=entry->next)
1553 {
1554
1555
1556
1557 #ifdef CONFIG_AX25
1558 #ifdef CONFIG_NETROM
1559 if (entry->dev->type == ARPHRD_AX25 || entry->dev->type == ARPHRD_NETROM)
1560 strcpy(hbuffer,ax2asc((ax25_address *)entry->ha));
1561 else {
1562 #else
1563 if(entry->dev->type==ARPHRD_AX25)
1564 strcpy(hbuffer,ax2asc((ax25_address *)entry->ha));
1565 else {
1566 #endif
1567 #endif
1568
1569 for(k=0,j=0;k<HBUFFERLEN-3 && j<entry->dev->addr_len;j++)
1570 {
1571 hbuffer[k++]=hexbuf[ (entry->ha[j]>>4)&15 ];
1572 hbuffer[k++]=hexbuf[ entry->ha[j]&15 ];
1573 hbuffer[k++]=':';
1574 }
1575 hbuffer[--k]=0;
1576
1577 #ifdef CONFIG_AX25
1578 }
1579 #endif
1580 size = sprintf(buffer+len,
1581 "%-17s0x%-10x0x%-10x%s",
1582 in_ntoa(entry->ip),
1583 (unsigned int)entry->dev->type,
1584 entry->flags,
1585 hbuffer);
1586 #if RT_CACHE_DEBUG < 2
1587 size += sprintf(buffer+len+size,
1588 " %-17s %s\n",
1589 entry->mask==DEF_ARP_NETMASK ?
1590 "*" : in_ntoa(entry->mask), entry->dev->name);
1591 #else
1592 size += sprintf(buffer+len+size,
1593 " %-17s %s\t%ld\t%1d\n",
1594 entry->mask==DEF_ARP_NETMASK ?
1595 "*" : in_ntoa(entry->mask), entry->dev->name,
1596 entry->hh ? entry->hh->hh_refcnt : -1,
1597 entry->hh ? entry->hh->hh_uptodate : 0);
1598 #endif
1599
1600 len += size;
1601 pos += size;
1602
1603 if (pos <= offset)
1604 len=0;
1605 if (pos >= offset+length)
1606 goto done;
1607 }
1608 }
1609 done:
1610 arp_unlock();
1611
1612 *start = buffer+len-(pos-offset);
1613 len = pos-offset;
1614 if (len>length)
1615 len = length;
1616 return len;
1617 }
1618
1619
1620
1621 int arp_bind_cache(struct hh_cache ** hhp, struct device *dev, unsigned short htype, u32 paddr)
1622 {
1623 struct arp_table *entry;
1624 struct hh_cache *hh = *hhp;
1625 int addr_hint;
1626 unsigned long flags;
1627
1628 if (hh)
1629 return 1;
1630
1631 if ((addr_hint = ip_chk_addr(paddr)) != 0)
1632 {
1633 unsigned char haddr[MAX_ADDR_LEN];
1634 if (hh)
1635 return 1;
1636 hh = kmalloc(sizeof(struct hh_cache), GFP_ATOMIC);
1637 if (!hh)
1638 return 1;
1639 arp_set_predefined(addr_hint, haddr, paddr, dev);
1640 hh->hh_uptodate = 0;
1641 hh->hh_refcnt = 1;
1642 hh->hh_arp = NULL;
1643 hh->hh_next = NULL;
1644 hh->hh_type = htype;
1645 *hhp = hh;
1646 dev->header_cache_update(hh, dev, haddr);
1647 return 0;
1648 }
1649
1650 save_flags(flags);
1651
1652 arp_fast_lock();
1653
1654 entry = arp_lookup(paddr, 0, dev);
1655 if (entry == NULL)
1656 entry = arpd_lookup(paddr, 0, dev, __LINE__);
1657
1658 if (entry)
1659 {
1660 cli();
1661 for (hh = entry->hh; hh; hh=hh->hh_next)
1662 if (hh->hh_type == htype)
1663 break;
1664 if (hh)
1665 {
1666 hh->hh_refcnt++;
1667 *hhp = hh;
1668 restore_flags(flags);
1669 arp_unlock();
1670 return 1;
1671 }
1672 restore_flags(flags);
1673 }
1674
1675 hh = kmalloc(sizeof(struct hh_cache), GFP_ATOMIC);
1676 if (!hh)
1677 {
1678 arp_unlock();
1679 return 1;
1680 }
1681
1682 hh->hh_uptodate = 0;
1683 hh->hh_refcnt = 1;
1684 hh->hh_arp = NULL;
1685 hh->hh_next = NULL;
1686 hh->hh_type = htype;
1687
1688 if (entry)
1689 {
1690 dev->header_cache_update(hh, dev, entry->ha);
1691 *hhp = hh;
1692 cli();
1693 hh->hh_arp = (void*)entry;
1694 entry->hh = hh;
1695 hh->hh_refcnt++;
1696 restore_flags(flags);
1697 entry->last_used = jiffies;
1698 arpd_update(entry, __LINE__);
1699 arp_unlock();
1700 return 0;
1701 }
1702
1703
1704
1705
1706
1707
1708 entry = arp_add_entry();
1709 if (entry == NULL)
1710 {
1711 kfree_s(hh, sizeof(struct hh_cache));
1712 arp_unlock();
1713 return 1;
1714 }
1715
1716 entry->last_updated = entry->last_used = jiffies;
1717 entry->flags = 0;
1718 entry->ip = paddr;
1719 entry->mask = DEF_ARP_NETMASK;
1720 memset(entry->ha, 0, dev->addr_len);
1721 entry->dev = dev;
1722 entry->hh = hh;
1723 arpd_update(entry, __LINE__);
1724 ATOMIC_INCR(&hh->hh_refcnt);
1725 init_timer(&entry->timer);
1726 entry->timer.function = arp_expire_request;
1727 entry->timer.data = (unsigned long)entry;
1728 entry->timer.expires = jiffies + ARP_RES_TIME;
1729 skb_queue_head_init(&entry->skb);
1730
1731 if (arp_lock == 1)
1732 {
1733 unsigned long hash = HASH(paddr);
1734 cli();
1735 entry->next = arp_tables[hash];
1736 arp_tables[hash] = entry;
1737 hh->hh_arp = (void*)entry;
1738 entry->retries = ARP_MAX_TRIES;
1739 restore_flags(flags);
1740
1741 add_timer(&entry->timer);
1742 arp_send(ARPOP_REQUEST, ETH_P_ARP, paddr, dev, dev->pa_addr, NULL, dev->dev_addr, NULL);
1743 }
1744 else
1745 {
1746 #if RT_CACHE_DEBUG >= 1
1747 printk("arp_cache_bind: %08x backlogged\n", entry->ip);
1748 #endif
1749 arp_enqueue(&arp_backlog, entry);
1750 arp_bh_mask |= ARP_BH_BACKLOG;
1751 }
1752 *hhp = hh;
1753 arp_unlock();
1754 return 0;
1755 }
1756
1757 static void arp_run_bh()
1758 {
1759 unsigned long flags;
1760 struct arp_table *entry, *entry1;
1761 struct hh_cache *hh;
1762 __u32 sip;
1763
1764 save_flags(flags);
1765 cli();
1766 if (!arp_lock)
1767 {
1768 arp_fast_lock();
1769
1770 while ((entry = arp_dequeue(&arp_backlog)) != NULL)
1771 {
1772 unsigned long hash;
1773 sti();
1774 sip = entry->ip;
1775 hash = HASH(sip);
1776
1777
1778
1779
1780
1781 for (entry1=arp_tables[hash]; entry1; entry1=entry1->next)
1782 if (entry1->ip==sip && entry1->dev == entry->dev)
1783 break;
1784
1785 if (!entry1)
1786 {
1787 struct device * dev = entry->dev;
1788 cli();
1789 entry->next = arp_tables[hash];
1790 arp_tables[hash] = entry;
1791 for (hh=entry->hh; hh; hh=hh->hh_next)
1792 hh->hh_arp = (void*)entry;
1793 sti();
1794 del_timer(&entry->timer);
1795 entry->timer.expires = jiffies + ARP_RES_TIME;
1796 add_timer(&entry->timer);
1797 entry->retries = ARP_MAX_TRIES;
1798 arp_send(ARPOP_REQUEST, ETH_P_ARP, entry->ip, dev, dev->pa_addr, NULL, dev->dev_addr, NULL);
1799 #if RT_CACHE_DEBUG >= 1
1800 printk("arp_run_bh: %08x reinstalled\n", sip);
1801 #endif
1802 }
1803 else
1804 {
1805 struct sk_buff * skb;
1806 struct hh_cache * next;
1807
1808
1809
1810
1811 cli();
1812 for (hh=entry->hh; hh; hh=next)
1813 {
1814 next = hh->hh_next;
1815 hh->hh_next = entry1->hh;
1816 entry1->hh = hh;
1817 hh->hh_arp = (void*)entry1;
1818 }
1819 entry->hh = NULL;
1820
1821
1822
1823
1824 while ((skb = skb_dequeue(&entry->skb)) != NULL)
1825 {
1826 skb_device_lock(skb);
1827 sti();
1828 skb_queue_tail(&entry1->skb, skb);
1829 skb_device_unlock(skb);
1830 cli();
1831 }
1832 sti();
1833
1834 #if RT_CACHE_DEBUG >= 1
1835 printk("arp_run_bh: entry %08x was born dead\n", entry->ip);
1836 #endif
1837 arp_free_entry(entry);
1838
1839 if (entry1->flags & ATF_COM)
1840 {
1841 arp_update_hhs(entry1);
1842 arp_send_q(entry1);
1843 }
1844 }
1845 cli();
1846 }
1847 arp_bh_mask &= ~ARP_BH_BACKLOG;
1848 arp_unlock();
1849 }
1850 restore_flags(flags);
1851 }
1852
1853
1854
1855
1856
1857 static inline int empty(unsigned char * addr, int len)
1858 {
1859 while (len > 0) {
1860 if (*addr)
1861 return 0;
1862 len--;
1863 addr++;
1864 }
1865 return 1;
1866 }
1867
1868
1869
1870
1871
1872 static int arp_req_set(struct arpreq *r, struct device * dev)
1873 {
1874 struct arp_table *entry;
1875 struct sockaddr_in *si;
1876 struct rtable *rt;
1877 struct device *dev1;
1878 unsigned char *ha;
1879 u32 ip;
1880
1881
1882
1883
1884
1885 si = (struct sockaddr_in *) &r->arp_pa;
1886 ip = si->sin_addr.s_addr;
1887
1888
1889
1890
1891
1892 if (ip_chk_addr(ip) == IS_MYADDR)
1893 dev1 = dev_get("lo");
1894 else {
1895 rt = ip_rt_route(ip, 0);
1896 if (!rt)
1897 return -ENETUNREACH;
1898 dev1 = rt->rt_dev;
1899 ip_rt_put(rt);
1900 }
1901
1902
1903 if (!dev) {
1904 if (dev1->flags&(IFF_LOOPBACK|IFF_NOARP))
1905 return -ENODEV;
1906 dev = dev1;
1907 }
1908
1909
1910 if (r->arp_ha.sa_family != dev->type)
1911 return -EINVAL;
1912
1913 if (((r->arp_flags & ATF_PUBL) && dev == dev1) ||
1914 (!(r->arp_flags & ATF_PUBL) && dev != dev1))
1915 return -EINVAL;
1916
1917 #if RT_CACHE_DEBUG >= 1
1918 if (arp_lock)
1919 printk("arp_req_set: bug\n");
1920 #endif
1921 arp_fast_lock();
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931 entry = arp_lookup(ip, r->arp_flags & ~ATF_NETMASK, dev);
1932 if (entry == NULL)
1933 entry = arpd_lookup(ip, r->arp_flags & ~ATF_NETMASK, dev, __LINE__);
1934
1935 if (entry)
1936 {
1937 arp_destroy(entry);
1938 entry = NULL;
1939 }
1940
1941
1942
1943
1944
1945 if (entry == NULL)
1946 {
1947 entry = arp_add_entry();
1948 if (entry == NULL)
1949 {
1950 arp_unlock();
1951 return -ENOMEM;
1952 }
1953 entry->ip = ip;
1954 entry->hh = NULL;
1955 init_timer(&entry->timer);
1956 entry->timer.function = arp_expire_request;
1957 entry->timer.data = (unsigned long)entry;
1958
1959 if (r->arp_flags & ATF_PUBL)
1960 {
1961 cli();
1962 entry->next = arp_proxy_list;
1963 arp_proxy_list = entry;
1964 sti();
1965 }
1966 else
1967 {
1968 unsigned long hash = HASH(ip);
1969 cli();
1970 entry->next = arp_tables[hash];
1971 arp_tables[hash] = entry;
1972 sti();
1973 }
1974 skb_queue_head_init(&entry->skb);
1975 }
1976
1977
1978
1979 ha = r->arp_ha.sa_data;
1980 if ((r->arp_flags & ATF_COM) && empty(ha, dev->addr_len))
1981 ha = dev->dev_addr;
1982 memcpy(entry->ha, ha, dev->addr_len);
1983 entry->last_updated = entry->last_used = jiffies;
1984 arpd_update(entry, __LINE__);
1985 entry->flags = r->arp_flags | ATF_COM;
1986 if ((entry->flags & ATF_PUBL) && (entry->flags & ATF_NETMASK))
1987 {
1988 si = (struct sockaddr_in *) &r->arp_netmask;
1989 entry->mask = si->sin_addr.s_addr;
1990 }
1991 else
1992 entry->mask = DEF_ARP_NETMASK;
1993 entry->dev = dev;
1994 arp_update_hhs(entry);
1995 arp_unlock();
1996 return 0;
1997 }
1998
1999
2000
2001
2002
2003
2004
2005 static int arp_req_get(struct arpreq *r, struct device *dev)
2006 {
2007 struct arp_table *entry;
2008 struct sockaddr_in *si;
2009
2010 si = (struct sockaddr_in *) &r->arp_pa;
2011
2012 #if RT_CACHE_DEBUG >= 1
2013 if (arp_lock)
2014 printk("arp_req_set: bug\n");
2015 #endif
2016 arp_fast_lock();
2017
2018 entry = arp_lookup(si->sin_addr.s_addr, r->arp_flags|ATF_NETMASK, dev);
2019 if (entry == NULL)
2020 entry = arpd_lookup(si->sin_addr.s_addr,
2021 r->arp_flags|ATF_NETMASK, dev, __LINE__);
2022
2023 if (entry == NULL)
2024 {
2025 arp_unlock();
2026 return -ENXIO;
2027 }
2028
2029
2030
2031
2032
2033 memcpy(r->arp_ha.sa_data, &entry->ha, entry->dev->addr_len);
2034 r->arp_ha.sa_family = entry->dev->type;
2035 r->arp_flags = entry->flags;
2036 strncpy(r->arp_dev, entry->dev->name, 16);
2037 arp_unlock();
2038 return 0;
2039 }
2040
2041 static int arp_req_delete(struct arpreq *r, struct device * dev)
2042 {
2043 struct arp_table *entry;
2044 struct sockaddr_in *si;
2045
2046 si = (struct sockaddr_in *) &r->arp_pa;
2047 #if RT_CACHE_DEBUG >= 1
2048 if (arp_lock)
2049 printk("arp_req_delete: bug\n");
2050 #endif
2051 arp_fast_lock();
2052
2053 if (!(r->arp_flags & ATF_PUBL))
2054 {
2055 for (entry = arp_tables[HASH(si->sin_addr.s_addr)];
2056 entry != NULL; entry = entry->next)
2057 if (entry->ip == si->sin_addr.s_addr
2058 && (!dev || entry->dev == dev))
2059 {
2060 arp_destroy(entry);
2061 arp_unlock();
2062 return 0;
2063 }
2064 }
2065 else
2066 {
2067 for (entry = arp_proxy_list;
2068 entry != NULL; entry = entry->next)
2069 if (entry->ip == si->sin_addr.s_addr
2070 && (!dev || entry->dev == dev))
2071 {
2072 arp_destroy(entry);
2073 arp_unlock();
2074 return 0;
2075 }
2076 }
2077
2078 arp_unlock();
2079 return -ENXIO;
2080 }
2081
2082
2083
2084
2085
2086 int arp_ioctl(unsigned int cmd, void *arg)
2087 {
2088 int err;
2089 struct arpreq r;
2090
2091 struct device * dev = NULL;
2092
2093 switch(cmd)
2094 {
2095 case SIOCDARP:
2096 case SIOCSARP:
2097 if (!suser())
2098 return -EPERM;
2099 case SIOCGARP:
2100 err = verify_area(VERIFY_READ, arg, sizeof(struct arpreq));
2101 if (err)
2102 return err;
2103 memcpy_fromfs(&r, arg, sizeof(struct arpreq));
2104 break;
2105 case OLD_SIOCDARP:
2106 case OLD_SIOCSARP:
2107 if (!suser())
2108 return -EPERM;
2109 case OLD_SIOCGARP:
2110 err = verify_area(VERIFY_READ, arg, sizeof(struct arpreq_old));
2111 if (err)
2112 return err;
2113 memcpy_fromfs(&r, arg, sizeof(struct arpreq_old));
2114 memset(&r.arp_dev, 0, sizeof(r.arp_dev));
2115 break;
2116 default:
2117 return -EINVAL;
2118 }
2119
2120 if (r.arp_pa.sa_family != AF_INET)
2121 return -EPFNOSUPPORT;
2122 if (((struct sockaddr_in *)&r.arp_pa)->sin_addr.s_addr == 0)
2123 return -EINVAL;
2124
2125 if (r.arp_dev[0])
2126 {
2127 if ((dev = dev_get(r.arp_dev)) == NULL)
2128 return -ENODEV;
2129
2130 if (!r.arp_ha.sa_family)
2131 r.arp_ha.sa_family = dev->type;
2132 else if (r.arp_ha.sa_family != dev->type)
2133 return -EINVAL;
2134 }
2135 else
2136 {
2137 if ((r.arp_flags & ATF_PUBL) &&
2138 ((cmd == SIOCSARP) || (cmd == OLD_SIOCSARP))) {
2139 if ((dev = dev_getbytype(r.arp_ha.sa_family)) == NULL)
2140 return -ENODEV;
2141 }
2142 }
2143
2144 switch(cmd)
2145 {
2146 case SIOCDARP:
2147 return arp_req_delete(&r, dev);
2148 case SIOCSARP:
2149 return arp_req_set(&r, dev);
2150 case OLD_SIOCDARP:
2151
2152
2153
2154 r.arp_flags &= ~ATF_PUBL;
2155 err = arp_req_delete(&r, dev);
2156 r.arp_flags |= ATF_PUBL;
2157 if (!err)
2158 arp_req_delete(&r, dev);
2159 else
2160 err = arp_req_delete(&r, dev);
2161 return err;
2162 case OLD_SIOCSARP:
2163 err = arp_req_set(&r, dev);
2164
2165
2166
2167
2168
2169 if (r.arp_flags & ATF_PUBL)
2170 {
2171 r.arp_flags &= ~ATF_PUBL;
2172 arp_req_delete(&r, dev);
2173 }
2174 return err;
2175 case SIOCGARP:
2176 err = verify_area(VERIFY_WRITE, arg, sizeof(struct arpreq));
2177 if (err)
2178 return err;
2179 err = arp_req_get(&r, dev);
2180 if (!err)
2181 memcpy_tofs(arg, &r, sizeof(r));
2182 return err;
2183 case OLD_SIOCGARP:
2184 err = verify_area(VERIFY_WRITE, arg, sizeof(struct arpreq_old));
2185 if (err)
2186 return err;
2187 r.arp_flags &= ~ATF_PUBL;
2188 err = arp_req_get(&r, dev);
2189 if (err < 0)
2190 {
2191 r.arp_flags |= ATF_PUBL;
2192 err = arp_req_get(&r, dev);
2193 }
2194 if (!err)
2195 memcpy_tofs(arg, &r, sizeof(struct arpreq_old));
2196 return err;
2197 }
2198
2199 return 0;
2200 }
2201
2202
2203
2204
2205
2206
2207 static struct packet_type arp_packet_type =
2208 {
2209 0,
2210 NULL,
2211 arp_rcv,
2212 NULL,
2213 NULL
2214 };
2215
2216 static struct notifier_block arp_dev_notifier={
2217 arp_device_event,
2218 NULL,
2219 0
2220 };
2221
2222 void arp_init (void)
2223 {
2224
2225 arp_packet_type.type=htons(ETH_P_ARP);
2226 dev_add_pack(&arp_packet_type);
2227
2228 add_timer(&arp_timer);
2229
2230 register_netdevice_notifier(&arp_dev_notifier);
2231
2232 #ifdef CONFIG_PROC_FS
2233 proc_net_register(&(struct proc_dir_entry) {
2234 PROC_NET_ARP, 3, "arp",
2235 S_IFREG | S_IRUGO, 1, 0, 0,
2236 0, &proc_net_inode_operations,
2237 arp_get_info
2238 });
2239 #endif
2240 }
2241