This source file includes following definitions.
- min
- dev_add_pack
- dev_remove_pack
- dev_get
- dev_open
- dev_close
- register_netdevice_notifier
- unregister_netdevice_notifier
- dev_queue_xmit
- netif_rx
- dev_rint
- dev_transmit
- in_net_bh
- net_bh
- dev_tint
- dev_ifconf
- sprintf_stats
- dev_get_info
- bad_mask
- dev_ifsioc
- dev_ioctl
- dev_init
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48 #include <asm/segment.h>
49 #include <asm/system.h>
50 #include <asm/bitops.h>
51 #include <linux/config.h>
52 #include <linux/types.h>
53 #include <linux/kernel.h>
54 #include <linux/sched.h>
55 #include <linux/string.h>
56 #include <linux/mm.h>
57 #include <linux/socket.h>
58 #include <linux/sockios.h>
59 #include <linux/in.h>
60 #include <linux/errno.h>
61 #include <linux/interrupt.h>
62 #include <linux/if_ether.h>
63 #include <linux/inet.h>
64 #include <linux/netdevice.h>
65 #include <linux/etherdevice.h>
66 #include <linux/notifier.h>
67 #include <net/ip.h>
68 #include <net/route.h>
69 #include <linux/skbuff.h>
70 #include <net/sock.h>
71 #include <net/arp.h>
72
73
74
75
76
77
78
79 struct packet_type *ptype_base[16];
80 struct packet_type *ptype_all = NULL;
81
82
83
84
85
86 struct notifier_block *netdev_chain=NULL;
87
88
89
90
91
92
93 static struct sk_buff_head backlog =
94 {
95 (struct sk_buff *)&backlog, (struct sk_buff *)&backlog
96 #if CONFIG_SKB_CHECK
97 ,SK_HEAD_SKB
98 #endif
99 };
100
101
102
103
104
105 static int backlog_size = 0;
106
107
108
109
110
111 static __inline__ unsigned long min(unsigned long a, unsigned long b)
112 {
113 return (a < b)? a : b;
114 }
115
116
117
118
119
120
121
122
123
124
125
126
127 static int dev_nit=0;
128
129
130
131
132
133
134
135 void dev_add_pack(struct packet_type *pt)
136 {
137 int hash;
138 if(pt->type==htons(ETH_P_ALL))
139 {
140 dev_nit++;
141 pt->next=ptype_all;
142 ptype_all=pt;
143 }
144 else
145 {
146 hash=ntohs(pt->type)&15;
147 pt->next = ptype_base[hash];
148 ptype_base[hash] = pt;
149 }
150 }
151
152
153
154
155
156
157 void dev_remove_pack(struct packet_type *pt)
158 {
159 struct packet_type **pt1;
160 if(pt->type==htons(ETH_P_ALL))
161 {
162 dev_nit--;
163 pt1=&ptype_all;
164 }
165 else
166 pt1=&ptype_base[ntohs(pt->type)&15];
167 for(; (*pt1)!=NULL; pt1=&((*pt1)->next))
168 {
169 if(pt==(*pt1))
170 {
171 *pt1=pt->next;
172 return;
173 }
174 }
175 printk("dev_remove_pack: %p not found.\n", pt);
176 }
177
178
179
180
181
182
183
184
185
186
187
188 struct device *dev_get(char *name)
189 {
190 struct device *dev;
191
192 for (dev = dev_base; dev != NULL; dev = dev->next)
193 {
194 if (strcmp(dev->name, name) == 0)
195 return(dev);
196 }
197 return(NULL);
198 }
199
200
201
202
203
204
205 int dev_open(struct device *dev)
206 {
207 int ret = 0;
208
209
210
211
212 if (dev->open)
213 ret = dev->open(dev);
214
215
216
217
218
219 if (ret == 0)
220 {
221 dev->flags |= (IFF_UP | IFF_RUNNING);
222
223
224
225 #ifdef CONFIG_IP_MULTICAST
226
227
228
229 ip_mc_allhost(dev);
230 #endif
231 dev_mc_upload(dev);
232 notifier_call_chain(&netdev_chain, NETDEV_UP, dev);
233 }
234 return(ret);
235 }
236
237
238
239
240
241
242 int dev_close(struct device *dev)
243 {
244
245
246
247
248 if (dev->flags != 0)
249 {
250 int ct=0;
251 dev->flags = 0;
252
253
254
255 if (dev->stop)
256 dev->stop(dev);
257
258
259
260 notifier_call_chain(&netdev_chain, NETDEV_DOWN, dev);
261
262
263
264 dev_mc_discard(dev);
265
266
267
268 dev->pa_addr = 0;
269 dev->pa_dstaddr = 0;
270 dev->pa_brdaddr = 0;
271 dev->pa_mask = 0;
272
273
274
275 while(ct<DEV_NUMBUFFS)
276 {
277 struct sk_buff *skb;
278 while((skb=skb_dequeue(&dev->buffs[ct]))!=NULL)
279 if(skb->free)
280 kfree_skb(skb,FREE_WRITE);
281 ct++;
282 }
283 }
284 return(0);
285 }
286
287
288
289
290
291
292
293 int register_netdevice_notifier(struct notifier_block *nb)
294 {
295 return notifier_chain_register(&netdev_chain, nb);
296 }
297
298 int unregister_netdevice_notifier(struct notifier_block *nb)
299 {
300 return notifier_chain_unregister(&netdev_chain,nb);
301 }
302
303
304
305
306
307
308
309
310
311
312
313 void dev_queue_xmit(struct sk_buff *skb, struct device *dev, int pri)
314 {
315 unsigned long flags;
316 struct packet_type *ptype;
317 int where = 0;
318
319
320
321 if(pri>=0 && !skb_device_locked(skb))
322 skb_device_lock(skb);
323 #if CONFIG_SKB_CHECK
324 IS_SKB(skb);
325 #endif
326 skb->dev = dev;
327
328
329
330
331
332
333
334 if (pri < 0)
335 {
336 pri = -pri-1;
337 where = 1;
338 }
339
340 #ifdef CONFIG_NET_DEBUG
341 if (pri >= DEV_NUMBUFFS)
342 {
343 printk("bad priority in dev_queue_xmit.\n");
344 pri = 1;
345 }
346 #endif
347
348
349
350
351
352
353 if (!skb->arp && dev->rebuild_header(skb->data, dev, skb->raddr, skb)) {
354 return;
355 }
356
357 save_flags(flags);
358 cli();
359 if (!where)
360
361 {
362 skb_queue_tail(dev->buffs + pri,skb);
363 skb_device_unlock(skb);
364 skb = skb_dequeue(dev->buffs + pri);
365 skb_device_lock(skb);
366 }
367 restore_flags(flags);
368
369
370 if(!where && dev_nit)
371 {
372 skb->stamp=xtime;
373 for (ptype = ptype_all; ptype!=NULL; ptype = ptype->next)
374 {
375
376
377
378 if ((ptype->dev == dev || !ptype->dev) &&
379 ((struct sock *)ptype->data != skb->sk))
380 {
381 struct sk_buff *skb2;
382 if ((skb2 = skb_clone(skb, GFP_ATOMIC)) == NULL)
383 break;
384 skb2->h.raw = skb2->data + dev->hard_header_len;
385 skb2->mac.raw = skb2->data;
386 ptype->func(skb2, skb->dev, ptype);
387 }
388 }
389 }
390 start_bh_atomic();
391 if (dev->hard_start_xmit(skb, dev) == 0) {
392
393
394
395 end_bh_atomic();
396 return;
397 }
398 end_bh_atomic();
399
400
401
402
403
404 cli();
405 skb_device_unlock(skb);
406 skb_queue_head(dev->buffs + pri,skb);
407 restore_flags(flags);
408 }
409
410
411
412
413
414
415
416 void netif_rx(struct sk_buff *skb)
417 {
418 static int dropping = 0;
419
420
421
422
423
424
425 skb->sk = NULL;
426 skb->free = 1;
427 if(skb->stamp.tv_sec==0)
428 skb->stamp = xtime;
429
430
431
432
433
434 if (!backlog_size)
435 dropping = 0;
436 else if (backlog_size > 300)
437 dropping = 1;
438
439 if (dropping)
440 {
441 kfree_skb(skb, FREE_READ);
442 return;
443 }
444
445
446
447
448 #if CONFIG_SKB_CHECK
449 IS_SKB(skb);
450 #endif
451 skb_queue_tail(&backlog,skb);
452 backlog_size++;
453
454
455
456
457
458
459 #ifdef CONFIG_NET_RUNONIRQ
460 net_bh();
461 #else
462 mark_bh(NET_BH);
463 #endif
464 return;
465 }
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482 int dev_rint(unsigned char *buff, long len, int flags, struct device *dev)
483 {
484 static int dropping = 0;
485 struct sk_buff *skb = NULL;
486 unsigned char *to;
487 int amount, left;
488 int len2;
489
490 if (dev == NULL || buff == NULL || len <= 0)
491 return(1);
492
493 if (flags & IN_SKBUFF)
494 {
495 skb = (struct sk_buff *) buff;
496 }
497 else
498 {
499 if (dropping)
500 {
501 if (skb_peek(&backlog) != NULL)
502 return(1);
503 printk("INET: dev_rint: no longer dropping packets.\n");
504 dropping = 0;
505 }
506
507 skb = alloc_skb(len, GFP_ATOMIC);
508 if (skb == NULL)
509 {
510 printk("dev_rint: packet dropped on %s (no memory) !\n",
511 dev->name);
512 dropping = 1;
513 return(1);
514 }
515
516
517
518
519
520
521 to = skb_put(skb,len);
522 left = len;
523
524 len2 = len;
525 while (len2 > 0)
526 {
527 amount = min(len2, (unsigned long) dev->rmem_end -
528 (unsigned long) buff);
529 memcpy(to, buff, amount);
530 len2 -= amount;
531 left -= amount;
532 buff += amount;
533 to += amount;
534 if ((unsigned long) buff == dev->rmem_end)
535 buff = (unsigned char *) dev->rmem_start;
536 }
537 }
538
539
540
541
542
543 skb->dev = dev;
544 skb->free = 1;
545
546 netif_rx(skb);
547
548
549
550 return(0);
551 }
552
553
554
555
556
557
558 void dev_transmit(void)
559 {
560 struct device *dev;
561
562 for (dev = dev_base; dev != NULL; dev = dev->next)
563 {
564 if (dev->flags != 0 && !dev->tbusy) {
565
566
567
568 dev_tint(dev);
569 }
570 }
571 }
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586 volatile char in_bh = 0;
587
588 int in_net_bh()
589 {
590 return(in_bh==0?0:1);
591 }
592
593
594
595
596
597
598
599
600
601 void net_bh(void *tmp)
602 {
603 struct sk_buff *skb;
604 struct packet_type *ptype;
605 struct packet_type *pt_prev;
606 unsigned short type;
607
608
609
610
611
612 if (set_bit(1, (void*)&in_bh))
613 return;
614
615
616
617
618
619
620
621
622 dev_transmit();
623
624
625
626
627
628
629
630 cli();
631
632
633
634
635
636 while((skb=skb_dequeue(&backlog))!=NULL)
637 {
638
639
640
641 backlog_size--;
642
643 sti();
644
645
646
647
648
649
650
651
652 skb->h.raw = skb->data;
653
654
655
656
657
658 type = skb->protocol;
659
660
661
662
663
664
665 pt_prev = NULL;
666 for (ptype = ptype_all; ptype!=NULL; ptype=ptype->next)
667 {
668 if(pt_prev)
669 {
670 struct sk_buff *skb2=skb_clone(skb, GFP_ATOMIC);
671 if(skb2)
672 pt_prev->func(skb2,skb->dev, pt_prev);
673 }
674 pt_prev=ptype;
675 }
676
677 for (ptype = ptype_base[ntohs(type)&15]; ptype != NULL; ptype = ptype->next)
678 {
679 if (ptype->type == type && (!ptype->dev || ptype->dev==skb->dev))
680 {
681
682
683
684
685 if(pt_prev)
686 {
687 struct sk_buff *skb2;
688
689 skb2=skb_clone(skb, GFP_ATOMIC);
690
691
692
693
694
695
696 if(skb2)
697 pt_prev->func(skb2, skb->dev, pt_prev);
698 }
699
700 pt_prev=ptype;
701 }
702 }
703
704
705
706
707
708 if(pt_prev)
709 pt_prev->func(skb, skb->dev, pt_prev);
710
711
712
713
714 else
715 kfree_skb(skb, FREE_WRITE);
716
717
718
719
720
721
722 #ifdef CONFIG_XMIT_EVERY
723 dev_transmit();
724 #endif
725 cli();
726 }
727
728
729
730
731
732 in_bh = 0;
733 sti();
734
735
736
737
738
739 dev_transmit();
740 }
741
742
743
744
745
746
747
748 void dev_tint(struct device *dev)
749 {
750 int i;
751 struct sk_buff *skb;
752 unsigned long flags;
753
754 save_flags(flags);
755
756
757
758
759 for(i = 0;i < DEV_NUMBUFFS; i++)
760 {
761
762
763
764
765
766 cli();
767 while((skb=skb_dequeue(&dev->buffs[i]))!=NULL)
768 {
769
770
771
772 skb_device_lock(skb);
773 restore_flags(flags);
774
775
776
777
778 dev_queue_xmit(skb,dev,-i - 1);
779
780
781
782 if (dev->tbusy)
783 return;
784 cli();
785 }
786 }
787 restore_flags(flags);
788 }
789
790
791
792
793
794
795
796
797 static int dev_ifconf(char *arg)
798 {
799 struct ifconf ifc;
800 struct ifreq ifr;
801 struct device *dev;
802 char *pos;
803 int len;
804 int err;
805
806
807
808
809
810 err=verify_area(VERIFY_WRITE, arg, sizeof(struct ifconf));
811 if(err)
812 return err;
813 memcpy_fromfs(&ifc, arg, sizeof(struct ifconf));
814 len = ifc.ifc_len;
815 pos = ifc.ifc_buf;
816
817
818
819
820
821
822 err=verify_area(VERIFY_WRITE,pos,len);
823 if(err)
824 return err;
825
826
827
828
829
830 for (dev = dev_base; dev != NULL; dev = dev->next)
831 {
832 if(!(dev->flags & IFF_UP))
833 continue;
834 memset(&ifr, 0, sizeof(struct ifreq));
835 strcpy(ifr.ifr_name, dev->name);
836 (*(struct sockaddr_in *) &ifr.ifr_addr).sin_family = dev->family;
837 (*(struct sockaddr_in *) &ifr.ifr_addr).sin_addr.s_addr = dev->pa_addr;
838
839
840
841
842
843 if (len < sizeof(struct ifreq))
844 break;
845
846
847
848
849
850 memcpy_tofs(pos, &ifr, sizeof(struct ifreq));
851 pos += sizeof(struct ifreq);
852 len -= sizeof(struct ifreq);
853 }
854
855
856
857
858
859 ifc.ifc_len = (pos - ifc.ifc_buf);
860 ifc.ifc_req = (struct ifreq *) ifc.ifc_buf;
861 memcpy_tofs(arg, &ifc, sizeof(struct ifconf));
862
863
864
865
866
867 return(pos - arg);
868 }
869
870
871
872
873
874
875
876 static int sprintf_stats(char *buffer, struct device *dev)
877 {
878 struct enet_statistics *stats = (dev->get_stats ? dev->get_stats(dev): NULL);
879 int size;
880
881 if (stats)
882 size = sprintf(buffer, "%6s:%7d %4d %4d %4d %4d %8d %4d %4d %4d %5d %4d\n",
883 dev->name,
884 stats->rx_packets, stats->rx_errors,
885 stats->rx_dropped + stats->rx_missed_errors,
886 stats->rx_fifo_errors,
887 stats->rx_length_errors + stats->rx_over_errors
888 + stats->rx_crc_errors + stats->rx_frame_errors,
889 stats->tx_packets, stats->tx_errors, stats->tx_dropped,
890 stats->tx_fifo_errors, stats->collisions,
891 stats->tx_carrier_errors + stats->tx_aborted_errors
892 + stats->tx_window_errors + stats->tx_heartbeat_errors);
893 else
894 size = sprintf(buffer, "%6s: No statistics available.\n", dev->name);
895
896 return size;
897 }
898
899
900
901
902
903
904 int dev_get_info(char *buffer, char **start, off_t offset, int length)
905 {
906 int len=0;
907 off_t begin=0;
908 off_t pos=0;
909 int size;
910
911 struct device *dev;
912
913
914 size = sprintf(buffer, "Inter-| Receive | Transmit\n"
915 " face |packets errs drop fifo frame|packets errs drop fifo colls carrier\n");
916
917 pos+=size;
918 len+=size;
919
920
921 for (dev = dev_base; dev != NULL; dev = dev->next)
922 {
923 size = sprintf_stats(buffer+len, dev);
924 len+=size;
925 pos=begin+len;
926
927 if(pos<offset)
928 {
929 len=0;
930 begin=pos;
931 }
932 if(pos>offset+length)
933 break;
934 }
935
936 *start=buffer+(offset-begin);
937 len-=(offset-begin);
938 if(len>length)
939 len=length;
940 return len;
941 }
942
943
944
945
946
947
948 static inline int bad_mask(unsigned long mask, unsigned long addr)
949 {
950 if (addr & (mask = ~mask))
951 return 1;
952 mask = ntohl(mask);
953 if (mask & (mask+1))
954 return 1;
955 return 0;
956 }
957
958
959
960
961
962
963
964
965 static int dev_ifsioc(void *arg, unsigned int getset)
966 {
967 struct ifreq ifr;
968 struct device *dev;
969 int ret;
970
971
972
973
974
975 int err=verify_area(VERIFY_WRITE, arg, sizeof(struct ifreq));
976 if(err)
977 return err;
978
979 memcpy_fromfs(&ifr, arg, sizeof(struct ifreq));
980
981
982
983
984
985 if ((dev = dev_get(ifr.ifr_name)) == NULL)
986 return(-ENODEV);
987
988 switch(getset)
989 {
990 case SIOCGIFFLAGS:
991 ifr.ifr_flags = dev->flags;
992 goto rarok;
993
994 case SIOCSIFFLAGS:
995 {
996 int old_flags = dev->flags;
997 dev->flags = ifr.ifr_flags & (
998 IFF_UP | IFF_BROADCAST | IFF_DEBUG | IFF_LOOPBACK |
999 IFF_POINTOPOINT | IFF_NOTRAILERS | IFF_RUNNING |
1000 IFF_NOARP | IFF_PROMISC | IFF_ALLMULTI | IFF_SLAVE | IFF_MASTER
1001 | IFF_MULTICAST);
1002
1003
1004
1005
1006 dev_mc_upload(dev);
1007
1008
1009
1010
1011
1012 if ((old_flags & IFF_UP) && ((dev->flags & IFF_UP) == 0))
1013 {
1014 ret = dev_close(dev);
1015 }
1016 else
1017 {
1018
1019
1020
1021
1022 ret = (! (old_flags & IFF_UP) && (dev->flags & IFF_UP))
1023 ? dev_open(dev) : 0;
1024
1025
1026
1027 if(ret<0)
1028 dev->flags&=~IFF_UP;
1029 }
1030 }
1031 break;
1032
1033 case SIOCGIFADDR:
1034 (*(struct sockaddr_in *)
1035 &ifr.ifr_addr).sin_addr.s_addr = dev->pa_addr;
1036 (*(struct sockaddr_in *)
1037 &ifr.ifr_addr).sin_family = dev->family;
1038 (*(struct sockaddr_in *)
1039 &ifr.ifr_addr).sin_port = 0;
1040 goto rarok;
1041
1042 case SIOCSIFADDR:
1043 dev->pa_addr = (*(struct sockaddr_in *)
1044 &ifr.ifr_addr).sin_addr.s_addr;
1045 dev->family = ifr.ifr_addr.sa_family;
1046
1047 #ifdef CONFIG_INET
1048
1049
1050 dev->pa_mask = ip_get_mask(dev->pa_addr);
1051 #endif
1052 dev->pa_brdaddr = dev->pa_addr | ~dev->pa_mask;
1053 ret = 0;
1054 break;
1055
1056 case SIOCGIFBRDADDR:
1057 (*(struct sockaddr_in *)
1058 &ifr.ifr_broadaddr).sin_addr.s_addr = dev->pa_brdaddr;
1059 (*(struct sockaddr_in *)
1060 &ifr.ifr_broadaddr).sin_family = dev->family;
1061 (*(struct sockaddr_in *)
1062 &ifr.ifr_broadaddr).sin_port = 0;
1063 goto rarok;
1064
1065 case SIOCSIFBRDADDR:
1066 dev->pa_brdaddr = (*(struct sockaddr_in *)
1067 &ifr.ifr_broadaddr).sin_addr.s_addr;
1068 ret = 0;
1069 break;
1070
1071 case SIOCGIFDSTADDR:
1072 (*(struct sockaddr_in *)
1073 &ifr.ifr_dstaddr).sin_addr.s_addr = dev->pa_dstaddr;
1074 (*(struct sockaddr_in *)
1075 &ifr.ifr_dstaddr).sin_family = dev->family;
1076 (*(struct sockaddr_in *)
1077 &ifr.ifr_dstaddr).sin_port = 0;
1078 goto rarok;
1079
1080 case SIOCSIFDSTADDR:
1081 dev->pa_dstaddr = (*(struct sockaddr_in *)
1082 &ifr.ifr_dstaddr).sin_addr.s_addr;
1083 ret = 0;
1084 break;
1085
1086 case SIOCGIFNETMASK:
1087 (*(struct sockaddr_in *)
1088 &ifr.ifr_netmask).sin_addr.s_addr = dev->pa_mask;
1089 (*(struct sockaddr_in *)
1090 &ifr.ifr_netmask).sin_family = dev->family;
1091 (*(struct sockaddr_in *)
1092 &ifr.ifr_netmask).sin_port = 0;
1093 goto rarok;
1094
1095 case SIOCSIFNETMASK:
1096 {
1097 unsigned long mask = (*(struct sockaddr_in *)
1098 &ifr.ifr_netmask).sin_addr.s_addr;
1099 ret = -EINVAL;
1100
1101
1102
1103 if (bad_mask(mask,0))
1104 break;
1105 dev->pa_mask = mask;
1106 ret = 0;
1107 }
1108 break;
1109
1110 case SIOCGIFMETRIC:
1111
1112 ifr.ifr_metric = dev->metric;
1113 goto rarok;
1114
1115 case SIOCSIFMETRIC:
1116 dev->metric = ifr.ifr_metric;
1117 ret=0;
1118 break;
1119
1120 case SIOCGIFMTU:
1121 ifr.ifr_mtu = dev->mtu;
1122 goto rarok;
1123
1124 case SIOCSIFMTU:
1125
1126
1127
1128
1129
1130 if(ifr.ifr_mtu<68)
1131 return -EINVAL;
1132 dev->mtu = ifr.ifr_mtu;
1133 ret = 0;
1134 break;
1135
1136 case SIOCGIFMEM:
1137
1138 ret = -EINVAL;
1139 break;
1140
1141 case SIOCSIFMEM:
1142 ret = -EINVAL;
1143 break;
1144
1145 case OLD_SIOCGIFHWADDR:
1146 memcpy(ifr.old_ifr_hwaddr,dev->dev_addr, MAX_ADDR_LEN);
1147 goto rarok;
1148
1149 case SIOCGIFHWADDR:
1150 memcpy(ifr.ifr_hwaddr.sa_data,dev->dev_addr, MAX_ADDR_LEN);
1151 ifr.ifr_hwaddr.sa_family=dev->type;
1152 goto rarok;
1153
1154 case SIOCSIFHWADDR:
1155 if(dev->set_mac_address==NULL)
1156 return -EOPNOTSUPP;
1157 if(ifr.ifr_hwaddr.sa_family!=dev->type)
1158 return -EINVAL;
1159 ret=dev->set_mac_address(dev,ifr.ifr_hwaddr.sa_data);
1160 break;
1161
1162 case SIOCGIFMAP:
1163 ifr.ifr_map.mem_start=dev->mem_start;
1164 ifr.ifr_map.mem_end=dev->mem_end;
1165 ifr.ifr_map.base_addr=dev->base_addr;
1166 ifr.ifr_map.irq=dev->irq;
1167 ifr.ifr_map.dma=dev->dma;
1168 ifr.ifr_map.port=dev->if_port;
1169 goto rarok;
1170
1171 case SIOCSIFMAP:
1172 if(dev->set_config==NULL)
1173 return -EOPNOTSUPP;
1174 return dev->set_config(dev,&ifr.ifr_map);
1175
1176 case SIOCADDMULTI:
1177 if(dev->set_multicast_list==NULL)
1178 return -EINVAL;
1179 if(ifr.ifr_hwaddr.sa_family!=AF_UNSPEC)
1180 return -EINVAL;
1181 dev_mc_add(dev,ifr.ifr_hwaddr.sa_data, dev->addr_len, 1);
1182 return 0;
1183
1184 case SIOCDELMULTI:
1185 if(dev->set_multicast_list==NULL)
1186 return -EINVAL;
1187 if(ifr.ifr_hwaddr.sa_family!=AF_UNSPEC)
1188 return -EINVAL;
1189 dev_mc_delete(dev,ifr.ifr_hwaddr.sa_data,dev->addr_len, 1);
1190 return 0;
1191
1192
1193
1194
1195 default:
1196 if((getset >= SIOCDEVPRIVATE) &&
1197 (getset <= (SIOCDEVPRIVATE + 15))) {
1198 if(dev->do_ioctl==NULL)
1199 return -EOPNOTSUPP;
1200 ret=dev->do_ioctl(dev, &ifr, getset);
1201 memcpy_tofs(arg,&ifr,sizeof(struct ifreq));
1202 break;
1203 }
1204
1205 ret = -EINVAL;
1206 }
1207 return(ret);
1208
1209
1210
1211 rarok:
1212 memcpy_tofs(arg, &ifr, sizeof(struct ifreq));
1213 return 0;
1214 }
1215
1216
1217
1218
1219
1220
1221
1222 int dev_ioctl(unsigned int cmd, void *arg)
1223 {
1224 switch(cmd)
1225 {
1226 case SIOCGIFCONF:
1227 (void) dev_ifconf((char *) arg);
1228 return 0;
1229
1230
1231
1232
1233
1234 case SIOCGIFFLAGS:
1235 case SIOCGIFADDR:
1236 case SIOCGIFDSTADDR:
1237 case SIOCGIFBRDADDR:
1238 case SIOCGIFNETMASK:
1239 case SIOCGIFMETRIC:
1240 case SIOCGIFMTU:
1241 case SIOCGIFMEM:
1242 case SIOCGIFHWADDR:
1243 case SIOCSIFHWADDR:
1244 case OLD_SIOCGIFHWADDR:
1245 case SIOCGIFSLAVE:
1246 case SIOCGIFMAP:
1247 return dev_ifsioc(arg, cmd);
1248
1249
1250
1251
1252
1253 case SIOCSIFFLAGS:
1254 case SIOCSIFADDR:
1255 case SIOCSIFDSTADDR:
1256 case SIOCSIFBRDADDR:
1257 case SIOCSIFNETMASK:
1258 case SIOCSIFMETRIC:
1259 case SIOCSIFMTU:
1260 case SIOCSIFMEM:
1261 case SIOCSIFMAP:
1262 case SIOCSIFSLAVE:
1263 case SIOCADDMULTI:
1264 case SIOCDELMULTI:
1265 if (!suser())
1266 return -EPERM;
1267 return dev_ifsioc(arg, cmd);
1268
1269 case SIOCSIFLINK:
1270 return -EINVAL;
1271
1272
1273
1274
1275
1276 default:
1277 if((cmd >= SIOCDEVPRIVATE) &&
1278 (cmd <= (SIOCDEVPRIVATE + 15))) {
1279 return dev_ifsioc(arg, cmd);
1280 }
1281 return -EINVAL;
1282 }
1283 }
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293 void dev_init(void)
1294 {
1295 struct device *dev, *dev2;
1296
1297
1298
1299
1300
1301
1302
1303
1304 dev2 = NULL;
1305 for (dev = dev_base; dev != NULL; dev=dev->next)
1306 {
1307 if (dev->init && dev->init(dev))
1308 {
1309
1310
1311
1312
1313 if (dev2 == NULL)
1314 dev_base = dev->next;
1315 else
1316 dev2->next = dev->next;
1317 }
1318 else
1319 {
1320 dev2 = dev;
1321 }
1322 }
1323 }
1324