This source file includes following definitions.
- min
- dev_add_pack
- dev_remove_pack
- dev_get
- dev_open
- dev_close
- register_netdevice_notifier
- unregister_netdevice_notifier
- dev_queue_xmit
- netif_rx
- dev_rint
- dev_transmit
- in_net_bh
- net_bh
- dev_tint
- dev_ifconf
- sprintf_stats
- dev_get_info
- bad_mask
- dev_ifsioc
- dev_ioctl
- dev_init
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40 #include <asm/segment.h>
41 #include <asm/system.h>
42 #include <asm/bitops.h>
43 #include <linux/config.h>
44 #include <linux/types.h>
45 #include <linux/kernel.h>
46 #include <linux/sched.h>
47 #include <linux/string.h>
48 #include <linux/mm.h>
49 #include <linux/socket.h>
50 #include <linux/sockios.h>
51 #include <linux/in.h>
52 #include <linux/errno.h>
53 #include <linux/interrupt.h>
54 #include <linux/if_ether.h>
55 #include <linux/inet.h>
56 #include <linux/netdevice.h>
57 #include <linux/etherdevice.h>
58 #include <linux/notifier.h>
59 #include "ip.h"
60 #include "route.h"
61 #include <linux/skbuff.h>
62 #include "sock.h"
63 #include "arp.h"
64
65
66
67
68
69
70
71 struct packet_type *ptype_base = NULL;
72
73
74
75
76
77 struct notifier_block *netdev_chain=NULL;
78
79
80
81
82
83
84 static struct sk_buff_head backlog =
85 {
86 (struct sk_buff *)&backlog, (struct sk_buff *)&backlog
87 #ifdef CONFIG_SKB_CHECK
88 ,SK_HEAD_SKB
89 #endif
90 };
91
92
93
94
95
96 static int backlog_size = 0;
97
98
99
100
101
102 static __inline__ unsigned long min(unsigned long a, unsigned long b)
103 {
104 return (a < b)? a : b;
105 }
106
107
108
109
110
111
112
113
114
115
116
117
118 static int dev_nit=0;
119
120
121
122
123
124
125
126 void dev_add_pack(struct packet_type *pt)
127 {
128 if(pt->type==htons(ETH_P_ALL))
129 dev_nit++;
130 pt->next = ptype_base;
131 ptype_base = pt;
132 }
133
134
135
136
137
138
139 void dev_remove_pack(struct packet_type *pt)
140 {
141 struct packet_type **pt1;
142 if(pt->type==htons(ETH_P_ALL))
143 dev_nit--;
144 for(pt1=&ptype_base; (*pt1)!=NULL; pt1=&((*pt1)->next))
145 {
146 if(pt==(*pt1))
147 {
148 *pt1=pt->next;
149 return;
150 }
151 }
152 }
153
154
155
156
157
158
159
160
161
162
163
164 struct device *dev_get(char *name)
165 {
166 struct device *dev;
167
168 for (dev = dev_base; dev != NULL; dev = dev->next)
169 {
170 if (strcmp(dev->name, name) == 0)
171 return(dev);
172 }
173 return(NULL);
174 }
175
176
177
178
179
180
181 int dev_open(struct device *dev)
182 {
183 int ret = 0;
184
185
186
187
188 if (dev->open)
189 ret = dev->open(dev);
190
191
192
193
194
195 if (ret == 0)
196 {
197 dev->flags |= (IFF_UP | IFF_RUNNING);
198
199
200
201 #ifdef CONFIG_IP_MULTICAST
202
203
204
205 ip_mc_allhost(dev);
206 #endif
207 dev_mc_upload(dev);
208 notifier_call_chain(&netdev_chain, NETDEV_UP, dev);
209 }
210 return(ret);
211 }
212
213
214
215
216
217
218 int dev_close(struct device *dev)
219 {
220
221
222
223
224 if (dev->flags != 0)
225 {
226 int ct=0;
227 dev->flags = 0;
228
229
230
231 if (dev->stop)
232 dev->stop(dev);
233
234 notifier_call_chain(&netdev_chain, NETDEV_DOWN, dev);
235 #if 0
236
237
238
239 #ifdef CONFIG_INET
240 ip_rt_flush(dev);
241 arp_device_down(dev);
242 #endif
243 #ifdef CONFIG_IPX
244 ipxrtr_device_down(dev);
245 #endif
246 #endif
247
248
249
250 dev_mc_discard(dev);
251
252
253
254 dev->pa_addr = 0;
255 dev->pa_dstaddr = 0;
256 dev->pa_brdaddr = 0;
257 dev->pa_mask = 0;
258
259
260
261 while(ct<DEV_NUMBUFFS)
262 {
263 struct sk_buff *skb;
264 while((skb=skb_dequeue(&dev->buffs[ct]))!=NULL)
265 if(skb->free)
266 kfree_skb(skb,FREE_WRITE);
267 ct++;
268 }
269 }
270 return(0);
271 }
272
273
274
275
276
277
278
279 int register_netdevice_notifier(struct notifier_block *nb)
280 {
281 return notifier_chain_register(&netdev_chain, nb);
282 }
283
284 int unregister_netdevice_notifier(struct notifier_block *nb)
285 {
286 return notifier_chain_unregister(&netdev_chain,nb);
287 }
288
289
290
291
292
293
294
295
296
297
298
299 void dev_queue_xmit(struct sk_buff *skb, struct device *dev, int pri)
300 {
301 unsigned long flags;
302 int nitcount;
303 struct packet_type *ptype;
304 int where = 0;
305
306
307
308 if (dev == NULL)
309 {
310 printk("dev.c: dev_queue_xmit: dev = NULL\n");
311 return;
312 }
313
314 if(pri>=0 && !skb_device_locked(skb))
315 skb_device_lock(skb);
316 #ifdef CONFIG_SLAVE_BALANCING
317 save_flags(flags);
318 cli();
319 if(dev->slave!=NULL && dev->slave->pkt_queue < dev->pkt_queue &&
320 (dev->slave->flags & IFF_UP))
321 dev=dev->slave;
322 restore_flags(flags);
323 #endif
324 #ifdef CONFIG_SKB_CHECK
325 IS_SKB(skb);
326 #endif
327 skb->dev = dev;
328
329
330
331
332
333 if (skb->next != NULL)
334 {
335
336
337
338 printk("dev_queue_xmit: worked around a missed interrupt\n");
339 dev->hard_start_xmit(NULL, dev);
340 return;
341 }
342
343
344
345
346
347
348
349 if (pri < 0)
350 {
351 pri = -pri-1;
352 where = 1;
353 }
354
355 if (pri >= DEV_NUMBUFFS)
356 {
357 printk("bad priority in dev_queue_xmit.\n");
358 pri = 1;
359 }
360
361
362
363
364
365
366 if (!skb->arp && dev->rebuild_header(skb->data, dev, skb->raddr, skb)) {
367 return;
368 }
369
370 save_flags(flags);
371 cli();
372 if (!where) {
373 #ifdef CONFIG_SLAVE_BALANCING
374 skb->in_dev_queue=1;
375 #endif
376 skb_queue_tail(dev->buffs + pri,skb);
377 skb_device_unlock(skb);
378 skb = skb_dequeue(dev->buffs + pri);
379 skb_device_lock(skb);
380 #ifdef CONFIG_SLAVE_BALANCING
381 skb->in_dev_queue=0;
382 #endif
383 }
384 restore_flags(flags);
385
386
387 if(!where)
388 {
389 for (nitcount= dev_nit, ptype = ptype_base; nitcount > 0 && ptype != NULL; ptype = ptype->next)
390 {
391 if (ptype->type == htons(ETH_P_ALL) && (ptype->dev==dev || !ptype->dev))
392 {
393 struct sk_buff *skb2;
394 if ((skb2 = skb_clone(skb, GFP_ATOMIC)) == NULL)
395 break;
396 ptype->func(skb2, skb->dev, ptype);
397 nitcount--;
398 }
399 }
400 }
401 if (dev->hard_start_xmit(skb, dev) == 0) {
402
403
404
405 return;
406 }
407
408
409
410
411
412 cli();
413 #ifdef CONFIG_SLAVE_BALANCING
414 skb->in_dev_queue=1;
415 dev->pkt_queue++;
416 #endif
417 skb_device_unlock(skb);
418 skb_queue_head(dev->buffs + pri,skb);
419 restore_flags(flags);
420 }
421
422
423
424
425
426
427
428 void netif_rx(struct sk_buff *skb)
429 {
430 static int dropping = 0;
431
432
433
434
435
436
437 skb->sk = NULL;
438 skb->free = 1;
439 if(skb->stamp.tv_sec==0)
440 skb->stamp = xtime;
441
442
443
444
445
446 if (!backlog_size)
447 dropping = 0;
448 else if (backlog_size > 300)
449 dropping = 1;
450
451 if (dropping)
452 {
453 kfree_skb(skb, FREE_READ);
454 return;
455 }
456
457
458
459
460 #ifdef CONFIG_SKB_CHECK
461 IS_SKB(skb);
462 #endif
463 skb_queue_tail(&backlog,skb);
464 backlog_size++;
465
466
467
468
469
470
471 mark_bh(NET_BH);
472 return;
473 }
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490 int dev_rint(unsigned char *buff, long len, int flags, struct device *dev)
491 {
492 static int dropping = 0;
493 struct sk_buff *skb = NULL;
494 unsigned char *to;
495 int amount, left;
496 int len2;
497
498 if (dev == NULL || buff == NULL || len <= 0)
499 return(1);
500
501 if (flags & IN_SKBUFF)
502 {
503 skb = (struct sk_buff *) buff;
504 }
505 else
506 {
507 if (dropping)
508 {
509 if (skb_peek(&backlog) != NULL)
510 return(1);
511 printk("INET: dev_rint: no longer dropping packets.\n");
512 dropping = 0;
513 }
514
515 skb = alloc_skb(len, GFP_ATOMIC);
516 if (skb == NULL)
517 {
518 printk("dev_rint: packet dropped on %s (no memory) !\n",
519 dev->name);
520 dropping = 1;
521 return(1);
522 }
523
524
525
526
527
528
529 to = skb->data;
530 left = len;
531
532 len2 = len;
533 while (len2 > 0)
534 {
535 amount = min(len2, (unsigned long) dev->rmem_end -
536 (unsigned long) buff);
537 memcpy(to, buff, amount);
538 len2 -= amount;
539 left -= amount;
540 buff += amount;
541 to += amount;
542 if ((unsigned long) buff == dev->rmem_end)
543 buff = (unsigned char *) dev->rmem_start;
544 }
545 }
546
547
548
549
550
551 skb->len = len;
552 skb->dev = dev;
553 skb->free = 1;
554
555 netif_rx(skb);
556
557
558
559 return(0);
560 }
561
562
563
564
565
566
567 void dev_transmit(void)
568 {
569 struct device *dev;
570
571 for (dev = dev_base; dev != NULL; dev = dev->next)
572 {
573 if (dev->flags != 0 && !dev->tbusy) {
574
575
576
577 dev_tint(dev);
578 }
579 }
580 }
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595 volatile char in_bh = 0;
596
597 int in_net_bh()
598 {
599 return(in_bh==0?0:1);
600 }
601
602
603
604
605
606
607
608
609
610 void net_bh(void *tmp)
611 {
612 struct sk_buff *skb;
613 struct packet_type *ptype;
614 struct packet_type *pt_prev;
615 unsigned short type;
616
617
618
619
620
621 if (set_bit(1, (void*)&in_bh))
622 return;
623
624
625
626
627
628
629
630 dev_transmit();
631
632
633
634
635
636
637
638 cli();
639
640
641
642
643
644 while((skb=skb_dequeue(&backlog))!=NULL)
645 {
646
647
648
649 backlog_size--;
650
651 sti();
652
653
654
655
656
657
658
659
660 skb->h.raw = skb->data + skb->dev->hard_header_len;
661 skb->len -= skb->dev->hard_header_len;
662
663
664
665
666
667
668
669
670
671
672
673
674 type = skb->dev->type_trans(skb, skb->dev);
675
676
677
678
679
680
681
682
683
684
685
686
687 pt_prev = NULL;
688 for (ptype = ptype_base; ptype != NULL; ptype = ptype->next)
689 {
690 if ((ptype->type == type || ptype->type == htons(ETH_P_ALL)) && (!ptype->dev || ptype->dev==skb->dev))
691 {
692
693
694
695
696 if(pt_prev)
697 {
698 struct sk_buff *skb2;
699
700 skb2=skb_clone(skb, GFP_ATOMIC);
701
702
703
704
705
706
707 if(skb2)
708 pt_prev->func(skb2, skb->dev, pt_prev);
709 }
710
711 pt_prev=ptype;
712 }
713 }
714
715
716
717
718
719 if(pt_prev)
720 pt_prev->func(skb, skb->dev, pt_prev);
721
722
723
724
725 else
726 kfree_skb(skb, FREE_WRITE);
727
728
729
730
731
732
733
734 dev_transmit();
735 cli();
736 }
737
738
739
740
741
742 in_bh = 0;
743 sti();
744
745
746
747
748
749 dev_transmit();
750 }
751
752
753
754
755
756
757
758 void dev_tint(struct device *dev)
759 {
760 int i;
761 struct sk_buff *skb;
762 unsigned long flags;
763
764 save_flags(flags);
765
766
767
768
769 for(i = 0;i < DEV_NUMBUFFS; i++)
770 {
771
772
773
774
775
776 cli();
777 while((skb=skb_dequeue(&dev->buffs[i]))!=NULL)
778 {
779
780
781
782 skb_device_lock(skb);
783 restore_flags(flags);
784
785
786
787
788 dev_queue_xmit(skb,dev,-i - 1);
789
790
791
792 if (dev->tbusy)
793 return;
794 cli();
795 }
796 }
797 restore_flags(flags);
798 }
799
800
801
802
803
804
805
806
807 static int dev_ifconf(char *arg)
808 {
809 struct ifconf ifc;
810 struct ifreq ifr;
811 struct device *dev;
812 char *pos;
813 int len;
814 int err;
815
816
817
818
819
820 err=verify_area(VERIFY_WRITE, arg, sizeof(struct ifconf));
821 if(err)
822 return err;
823 memcpy_fromfs(&ifc, arg, sizeof(struct ifconf));
824 len = ifc.ifc_len;
825 pos = ifc.ifc_buf;
826
827
828
829
830
831
832 err=verify_area(VERIFY_WRITE,pos,len);
833 if(err)
834 return err;
835
836
837
838
839
840 for (dev = dev_base; dev != NULL; dev = dev->next)
841 {
842 if(!(dev->flags & IFF_UP))
843 continue;
844 memset(&ifr, 0, sizeof(struct ifreq));
845 strcpy(ifr.ifr_name, dev->name);
846 (*(struct sockaddr_in *) &ifr.ifr_addr).sin_family = dev->family;
847 (*(struct sockaddr_in *) &ifr.ifr_addr).sin_addr.s_addr = dev->pa_addr;
848
849
850
851
852
853 memcpy_tofs(pos, &ifr, sizeof(struct ifreq));
854 pos += sizeof(struct ifreq);
855 len -= sizeof(struct ifreq);
856
857
858
859
860
861 if (len < sizeof(struct ifreq))
862 break;
863 }
864
865
866
867
868
869 ifc.ifc_len = (pos - ifc.ifc_buf);
870 ifc.ifc_req = (struct ifreq *) ifc.ifc_buf;
871 memcpy_tofs(arg, &ifc, sizeof(struct ifconf));
872
873
874
875
876
877 return(pos - arg);
878 }
879
880
881
882
883
884
885
886 static int sprintf_stats(char *buffer, struct device *dev)
887 {
888 struct enet_statistics *stats = (dev->get_stats ? dev->get_stats(dev): NULL);
889 int size;
890
891 if (stats)
892 size = sprintf(buffer, "%6s:%7d %4d %4d %4d %4d %8d %4d %4d %4d %5d %4d\n",
893 dev->name,
894 stats->rx_packets, stats->rx_errors,
895 stats->rx_dropped + stats->rx_missed_errors,
896 stats->rx_fifo_errors,
897 stats->rx_length_errors + stats->rx_over_errors
898 + stats->rx_crc_errors + stats->rx_frame_errors,
899 stats->tx_packets, stats->tx_errors, stats->tx_dropped,
900 stats->tx_fifo_errors, stats->collisions,
901 stats->tx_carrier_errors + stats->tx_aborted_errors
902 + stats->tx_window_errors + stats->tx_heartbeat_errors);
903 else
904 size = sprintf(buffer, "%6s: No statistics available.\n", dev->name);
905
906 return size;
907 }
908
909
910
911
912
913
914 int dev_get_info(char *buffer, char **start, off_t offset, int length)
915 {
916 int len=0;
917 off_t begin=0;
918 off_t pos=0;
919 int size;
920
921 struct device *dev;
922
923
924 size = sprintf(buffer, "Inter-| Receive | Transmit\n"
925 " face |packets errs drop fifo frame|packets errs drop fifo colls carrier\n");
926
927 pos+=size;
928 len+=size;
929
930
931 for (dev = dev_base; dev != NULL; dev = dev->next)
932 {
933 size = sprintf_stats(buffer+len, dev);
934 len+=size;
935 pos=begin+len;
936
937 if(pos<offset)
938 {
939 len=0;
940 begin=pos;
941 }
942 if(pos>offset+length)
943 break;
944 }
945
946 *start=buffer+(offset-begin);
947 len-=(offset-begin);
948 if(len>length)
949 len=length;
950 return len;
951 }
952
953
954
955
956
957
958 static inline int bad_mask(unsigned long mask, unsigned long addr)
959 {
960 if (addr & (mask = ~mask))
961 return 1;
962 mask = ntohl(mask);
963 if (mask & (mask+1))
964 return 1;
965 return 0;
966 }
967
968
969
970
971
972
973
974
975 static int dev_ifsioc(void *arg, unsigned int getset)
976 {
977 struct ifreq ifr;
978 struct device *dev;
979 int ret;
980
981
982
983
984
985 int err=verify_area(VERIFY_WRITE, arg, sizeof(struct ifreq));
986 if(err)
987 return err;
988
989 memcpy_fromfs(&ifr, arg, sizeof(struct ifreq));
990
991
992
993
994
995 if ((dev = dev_get(ifr.ifr_name)) == NULL)
996 return(-ENODEV);
997
998 switch(getset)
999 {
1000 case SIOCGIFFLAGS:
1001 ifr.ifr_flags = dev->flags;
1002 memcpy_tofs(arg, &ifr, sizeof(struct ifreq));
1003 ret = 0;
1004 break;
1005 case SIOCSIFFLAGS:
1006 {
1007 int old_flags = dev->flags;
1008 #ifdef CONFIG_SLAVE_BALANCING
1009 if(dev->flags&IFF_SLAVE)
1010 return -EBUSY;
1011 #endif
1012 dev->flags = ifr.ifr_flags & (
1013 IFF_UP | IFF_BROADCAST | IFF_DEBUG | IFF_LOOPBACK |
1014 IFF_POINTOPOINT | IFF_NOTRAILERS | IFF_RUNNING |
1015 IFF_NOARP | IFF_PROMISC | IFF_ALLMULTI | IFF_SLAVE | IFF_MASTER
1016 | IFF_MULTICAST);
1017 #ifdef CONFIG_SLAVE_BALANCING
1018 if(!(dev->flags&IFF_MASTER) && dev->slave)
1019 {
1020 dev->slave->flags&=~IFF_SLAVE;
1021 dev->slave=NULL;
1022 }
1023 #endif
1024
1025
1026
1027
1028 dev_mc_upload(dev);
1029 #if 0
1030 if( dev->set_multicast_list!=NULL)
1031 {
1032
1033
1034
1035
1036
1037 if ( (old_flags & IFF_PROMISC) && ((dev->flags & IFF_PROMISC) == 0))
1038 dev->set_multicast_list(dev,0,NULL);
1039
1040
1041
1042
1043
1044 if ( (dev->flags & IFF_PROMISC) && ((old_flags & IFF_PROMISC) == 0))
1045 dev->set_multicast_list(dev,-1,NULL);
1046 }
1047 #endif
1048
1049
1050
1051
1052 if ((old_flags & IFF_UP) && ((dev->flags & IFF_UP) == 0))
1053 {
1054 ret = dev_close(dev);
1055 }
1056 else
1057 {
1058
1059
1060
1061
1062 ret = (! (old_flags & IFF_UP) && (dev->flags & IFF_UP))
1063 ? dev_open(dev) : 0;
1064
1065
1066
1067 if(ret<0)
1068 dev->flags&=~IFF_UP;
1069 }
1070 }
1071 break;
1072
1073 case SIOCGIFADDR:
1074 (*(struct sockaddr_in *)
1075 &ifr.ifr_addr).sin_addr.s_addr = dev->pa_addr;
1076 (*(struct sockaddr_in *)
1077 &ifr.ifr_addr).sin_family = dev->family;
1078 (*(struct sockaddr_in *)
1079 &ifr.ifr_addr).sin_port = 0;
1080 memcpy_tofs(arg, &ifr, sizeof(struct ifreq));
1081 ret = 0;
1082 break;
1083
1084 case SIOCSIFADDR:
1085 dev->pa_addr = (*(struct sockaddr_in *)
1086 &ifr.ifr_addr).sin_addr.s_addr;
1087 dev->family = ifr.ifr_addr.sa_family;
1088
1089 #ifdef CONFIG_INET
1090
1091
1092 dev->pa_mask = ip_get_mask(dev->pa_addr);
1093 #endif
1094 dev->pa_brdaddr = dev->pa_addr | ~dev->pa_mask;
1095 ret = 0;
1096 break;
1097
1098 case SIOCGIFBRDADDR:
1099 (*(struct sockaddr_in *)
1100 &ifr.ifr_broadaddr).sin_addr.s_addr = dev->pa_brdaddr;
1101 (*(struct sockaddr_in *)
1102 &ifr.ifr_broadaddr).sin_family = dev->family;
1103 (*(struct sockaddr_in *)
1104 &ifr.ifr_broadaddr).sin_port = 0;
1105 memcpy_tofs(arg, &ifr, sizeof(struct ifreq));
1106 ret = 0;
1107 break;
1108
1109 case SIOCSIFBRDADDR:
1110 dev->pa_brdaddr = (*(struct sockaddr_in *)
1111 &ifr.ifr_broadaddr).sin_addr.s_addr;
1112 ret = 0;
1113 break;
1114
1115 case SIOCGIFDSTADDR:
1116 (*(struct sockaddr_in *)
1117 &ifr.ifr_dstaddr).sin_addr.s_addr = dev->pa_dstaddr;
1118 (*(struct sockaddr_in *)
1119 &ifr.ifr_broadaddr).sin_family = dev->family;
1120 (*(struct sockaddr_in *)
1121 &ifr.ifr_broadaddr).sin_port = 0;
1122 memcpy_tofs(arg, &ifr, sizeof(struct ifreq));
1123 ret = 0;
1124 break;
1125
1126 case SIOCSIFDSTADDR:
1127 dev->pa_dstaddr = (*(struct sockaddr_in *)
1128 &ifr.ifr_dstaddr).sin_addr.s_addr;
1129 ret = 0;
1130 break;
1131
1132 case SIOCGIFNETMASK:
1133 (*(struct sockaddr_in *)
1134 &ifr.ifr_netmask).sin_addr.s_addr = dev->pa_mask;
1135 (*(struct sockaddr_in *)
1136 &ifr.ifr_netmask).sin_family = dev->family;
1137 (*(struct sockaddr_in *)
1138 &ifr.ifr_netmask).sin_port = 0;
1139 memcpy_tofs(arg, &ifr, sizeof(struct ifreq));
1140 ret = 0;
1141 break;
1142
1143 case SIOCSIFNETMASK:
1144 {
1145 unsigned long mask = (*(struct sockaddr_in *)
1146 &ifr.ifr_netmask).sin_addr.s_addr;
1147 ret = -EINVAL;
1148
1149
1150
1151 if (bad_mask(mask,0))
1152 break;
1153 dev->pa_mask = mask;
1154 ret = 0;
1155 }
1156 break;
1157
1158 case SIOCGIFMETRIC:
1159
1160 ifr.ifr_metric = dev->metric;
1161 memcpy_tofs(arg, &ifr, sizeof(struct ifreq));
1162 ret = 0;
1163 break;
1164
1165 case SIOCSIFMETRIC:
1166 dev->metric = ifr.ifr_metric;
1167 ret = 0;
1168 break;
1169
1170 case SIOCGIFMTU:
1171 ifr.ifr_mtu = dev->mtu;
1172 memcpy_tofs(arg, &ifr, sizeof(struct ifreq));
1173 ret = 0;
1174 break;
1175
1176 case SIOCSIFMTU:
1177
1178
1179
1180
1181
1182 if(ifr.ifr_mtu<1 || ifr.ifr_mtu>3800)
1183 return -EINVAL;
1184 dev->mtu = ifr.ifr_mtu;
1185 ret = 0;
1186 break;
1187
1188 case SIOCGIFMEM:
1189
1190 printk("NET: ioctl(SIOCGIFMEM, %p)\n", arg);
1191 ret = -EINVAL;
1192 break;
1193
1194 case SIOCSIFMEM:
1195 printk("NET: ioctl(SIOCSIFMEM, %p)\n", arg);
1196 ret = -EINVAL;
1197 break;
1198
1199 case OLD_SIOCGIFHWADDR:
1200 memcpy(ifr.old_ifr_hwaddr,dev->dev_addr, MAX_ADDR_LEN);
1201 memcpy_tofs(arg,&ifr,sizeof(struct ifreq));
1202 ret=0;
1203 break;
1204
1205 case SIOCGIFHWADDR:
1206 memcpy(ifr.ifr_hwaddr.sa_data,dev->dev_addr, MAX_ADDR_LEN);
1207 ifr.ifr_hwaddr.sa_family=dev->type;
1208 memcpy_tofs(arg,&ifr,sizeof(struct ifreq));
1209 ret=0;
1210 break;
1211
1212 case SIOCSIFHWADDR:
1213 if(dev->set_mac_address==NULL)
1214 return -EOPNOTSUPP;
1215 if(ifr.ifr_hwaddr.sa_family!=dev->type)
1216 return -EINVAL;
1217 ret=dev->set_mac_address(dev,ifr.ifr_hwaddr.sa_data);
1218 break;
1219
1220 case SIOCGIFMAP:
1221 ifr.ifr_map.mem_start=dev->mem_start;
1222 ifr.ifr_map.mem_end=dev->mem_end;
1223 ifr.ifr_map.base_addr=dev->base_addr;
1224 ifr.ifr_map.irq=dev->irq;
1225 ifr.ifr_map.dma=dev->dma;
1226 ifr.ifr_map.port=dev->if_port;
1227 memcpy_tofs(arg,&ifr,sizeof(struct ifreq));
1228 ret=0;
1229 break;
1230
1231 case SIOCSIFMAP:
1232 if(dev->set_config==NULL)
1233 return -EOPNOTSUPP;
1234 return dev->set_config(dev,&ifr.ifr_map);
1235
1236 case SIOCGIFSLAVE:
1237 #ifdef CONFIG_SLAVE_BALANCING
1238 if(dev->slave==NULL)
1239 return -ENOENT;
1240 strncpy(ifr.ifr_name,dev->name,sizeof(ifr.ifr_name));
1241 memcpy_tofs(arg,&ifr,sizeof(struct ifreq));
1242 ret=0;
1243 #else
1244 return -ENOENT;
1245 #endif
1246 break;
1247 #ifdef CONFIG_SLAVE_BALANCING
1248 case SIOCSIFSLAVE:
1249 {
1250
1251
1252
1253
1254
1255 unsigned long flags;
1256 struct device *slave=dev_get(ifr.ifr_slave);
1257 save_flags(flags);
1258 if(slave==NULL)
1259 {
1260 return -ENODEV;
1261 }
1262 cli();
1263 if((slave->flags&(IFF_UP|IFF_RUNNING))!=(IFF_UP|IFF_RUNNING))
1264 {
1265 restore_flags(flags);
1266 return -EINVAL;
1267 }
1268 if(dev->flags&IFF_SLAVE)
1269 {
1270 restore_flags(flags);
1271 return -EBUSY;
1272 }
1273 if(dev->slave!=NULL)
1274 {
1275 restore_flags(flags);
1276 return -EBUSY;
1277 }
1278 if(slave->flags&IFF_SLAVE)
1279 {
1280 restore_flags(flags);
1281 return -EBUSY;
1282 }
1283 dev->slave=slave;
1284 slave->flags|=IFF_SLAVE;
1285 dev->flags|=IFF_MASTER;
1286 restore_flags(flags);
1287 ret=0;
1288 }
1289 break;
1290 #endif
1291
1292 case SIOCADDMULTI:
1293 if(dev->set_multicast_list==NULL)
1294 return -EINVAL;
1295 if(ifr.ifr_hwaddr.sa_family!=AF_UNSPEC)
1296 return -EINVAL;
1297 dev_mc_add(dev,ifr.ifr_hwaddr.sa_data, dev->addr_len, 1);
1298 return 0;
1299
1300 case SIOCDELMULTI:
1301 if(dev->set_multicast_list==NULL)
1302 return -EINVAL;
1303 if(ifr.ifr_hwaddr.sa_family!=AF_UNSPEC)
1304 return -EINVAL;
1305 dev_mc_delete(dev,ifr.ifr_hwaddr.sa_data,dev->addr_len, 1);
1306 return 0;
1307
1308
1309
1310
1311 default:
1312 if((getset >= SIOCDEVPRIVATE) &&
1313 (getset <= (SIOCDEVPRIVATE + 15))) {
1314 if(dev->do_ioctl==NULL)
1315 return -EOPNOTSUPP;
1316 ret=dev->do_ioctl(dev, &ifr, getset);
1317 memcpy_tofs(arg,&ifr,sizeof(struct ifreq));
1318 break;
1319 }
1320
1321 ret = -EINVAL;
1322 }
1323 return(ret);
1324 }
1325
1326
1327
1328
1329
1330
1331
1332 int dev_ioctl(unsigned int cmd, void *arg)
1333 {
1334 switch(cmd)
1335 {
1336 case SIOCGIFCONF:
1337 (void) dev_ifconf((char *) arg);
1338 return 0;
1339
1340
1341
1342
1343
1344 case SIOCGIFFLAGS:
1345 case SIOCGIFADDR:
1346 case SIOCGIFDSTADDR:
1347 case SIOCGIFBRDADDR:
1348 case SIOCGIFNETMASK:
1349 case SIOCGIFMETRIC:
1350 case SIOCGIFMTU:
1351 case SIOCGIFMEM:
1352 case SIOCGIFHWADDR:
1353 case SIOCSIFHWADDR:
1354 case OLD_SIOCGIFHWADDR:
1355 case SIOCGIFSLAVE:
1356 case SIOCGIFMAP:
1357 return dev_ifsioc(arg, cmd);
1358
1359
1360
1361
1362
1363 case SIOCSIFFLAGS:
1364 case SIOCSIFADDR:
1365 case SIOCSIFDSTADDR:
1366 case SIOCSIFBRDADDR:
1367 case SIOCSIFNETMASK:
1368 case SIOCSIFMETRIC:
1369 case SIOCSIFMTU:
1370 case SIOCSIFMEM:
1371 case SIOCSIFMAP:
1372 case SIOCSIFSLAVE:
1373 case SIOCADDMULTI:
1374 case SIOCDELMULTI:
1375 if (!suser())
1376 return -EPERM;
1377 return dev_ifsioc(arg, cmd);
1378
1379 case SIOCSIFLINK:
1380 return -EINVAL;
1381
1382
1383
1384
1385
1386 default:
1387 if((cmd >= SIOCDEVPRIVATE) &&
1388 (cmd <= (SIOCDEVPRIVATE + 15))) {
1389 return dev_ifsioc(arg, cmd);
1390 }
1391 return -EINVAL;
1392 }
1393 }
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406 void dev_init(void)
1407 {
1408 struct device *dev, *dev2;
1409
1410
1411
1412
1413
1414
1415
1416
1417 dev2 = NULL;
1418 for (dev = dev_base; dev != NULL; dev=dev->next)
1419 {
1420 if (dev->init && dev->init(dev))
1421 {
1422
1423
1424
1425
1426 if (dev2 == NULL)
1427 dev_base = dev->next;
1428 else
1429 dev2->next = dev->next;
1430 }
1431 else
1432 {
1433 dev2 = dev;
1434 }
1435 }
1436 }