This source file includes following definitions.
- min
- dev_add_pack
- dev_remove_pack
- dev_get
- dev_open
- dev_close
- register_netdevice_notifier
- unregister_netdevice_notifier
- dev_queue_xmit
- netif_rx
- dev_rint
- dev_transmit
- in_net_bh
- net_bh
- dev_tint
- dev_ifconf
- sprintf_stats
- dev_get_info
- bad_mask
- dev_ifsioc
- dev_ioctl
- dev_init
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40 #include <asm/segment.h>
41 #include <asm/system.h>
42 #include <asm/bitops.h>
43 #include <linux/config.h>
44 #include <linux/types.h>
45 #include <linux/kernel.h>
46 #include <linux/sched.h>
47 #include <linux/string.h>
48 #include <linux/mm.h>
49 #include <linux/socket.h>
50 #include <linux/sockios.h>
51 #include <linux/in.h>
52 #include <linux/errno.h>
53 #include <linux/interrupt.h>
54 #include <linux/if_ether.h>
55 #include <linux/inet.h>
56 #include <linux/netdevice.h>
57 #include <linux/etherdevice.h>
58 #include <linux/notifier.h>
59 #include "ip.h"
60 #include "route.h"
61 #include <linux/skbuff.h>
62 #include "sock.h"
63 #include "arp.h"
64
65
66
67
68
69
70
71 struct packet_type *ptype_base = NULL;
72
73
74
75
76
77 struct notifier_block *netdev_chain=NULL;
78
79
80
81
82
83
84 static struct sk_buff_head backlog =
85 {
86 (struct sk_buff *)&backlog, (struct sk_buff *)&backlog
87 #ifdef CONFIG_SKB_CHECK
88 ,SK_HEAD_SKB
89 #endif
90 };
91
92
93
94
95
96 static int backlog_size = 0;
97
98
99
100
101
102 static __inline__ unsigned long min(unsigned long a, unsigned long b)
103 {
104 return (a < b)? a : b;
105 }
106
107
108
109
110
111
112
113
114
115
116
117
118 static int dev_nit=0;
119
120
121
122
123
124
125
126 void dev_add_pack(struct packet_type *pt)
127 {
128 if(pt->type==htons(ETH_P_ALL))
129 dev_nit++;
130 pt->next = ptype_base;
131 ptype_base = pt;
132 }
133
134
135
136
137
138
139 void dev_remove_pack(struct packet_type *pt)
140 {
141 struct packet_type **pt1;
142 if(pt->type==htons(ETH_P_ALL))
143 dev_nit--;
144 for(pt1=&ptype_base; (*pt1)!=NULL; pt1=&((*pt1)->next))
145 {
146 if(pt==(*pt1))
147 {
148 *pt1=pt->next;
149 return;
150 }
151 }
152 }
153
154
155
156
157
158
159
160
161
162
163
164 struct device *dev_get(char *name)
165 {
166 struct device *dev;
167
168 for (dev = dev_base; dev != NULL; dev = dev->next)
169 {
170 if (strcmp(dev->name, name) == 0)
171 return(dev);
172 }
173 return(NULL);
174 }
175
176
177
178
179
180
181 int dev_open(struct device *dev)
182 {
183 int ret = 0;
184
185
186
187
188 if (dev->open)
189 ret = dev->open(dev);
190
191
192
193
194
195 if (ret == 0)
196 {
197 dev->flags |= (IFF_UP | IFF_RUNNING);
198
199
200
201 #ifdef CONFIG_IP_MULTICAST
202
203
204
205 ip_mc_allhost(dev);
206 #endif
207 dev_mc_upload(dev);
208 notifier_call_chain(&netdev_chain, NETDEV_UP, dev);
209 }
210 return(ret);
211 }
212
213
214
215
216
217
218 int dev_close(struct device *dev)
219 {
220
221
222
223
224 if (dev->flags != 0)
225 {
226 int ct=0;
227 dev->flags = 0;
228
229
230
231 if (dev->stop)
232 dev->stop(dev);
233
234 notifier_call_chain(&netdev_chain, NETDEV_DOWN, dev);
235 #if 0
236
237
238
239 #ifdef CONFIG_INET
240 ip_rt_flush(dev);
241 arp_device_down(dev);
242 #endif
243 #ifdef CONFIG_IPX
244 ipxrtr_device_down(dev);
245 #endif
246 #endif
247
248
249
250 dev_mc_discard(dev);
251
252
253
254 dev->pa_addr = 0;
255 dev->pa_dstaddr = 0;
256 dev->pa_brdaddr = 0;
257 dev->pa_mask = 0;
258
259
260
261 while(ct<DEV_NUMBUFFS)
262 {
263 struct sk_buff *skb;
264 while((skb=skb_dequeue(&dev->buffs[ct]))!=NULL)
265 if(skb->free)
266 kfree_skb(skb,FREE_WRITE);
267 ct++;
268 }
269 }
270 return(0);
271 }
272
273
274
275
276
277
278
279 int register_netdevice_notifier(struct notifier_block *nb)
280 {
281 return notifier_chain_register(&netdev_chain, nb);
282 }
283
284 int unregister_netdevice_notifier(struct notifier_block *nb)
285 {
286 return notifier_chain_unregister(&netdev_chain,nb);
287 }
288
289
290
291
292
293
294
295
296
297
298
299 void dev_queue_xmit(struct sk_buff *skb, struct device *dev, int pri)
300 {
301 unsigned long flags;
302 int nitcount;
303 struct packet_type *ptype;
304 int where = 0;
305
306
307
308 if (dev == NULL)
309 {
310 printk("dev.c: dev_queue_xmit: dev = NULL\n");
311 return;
312 }
313
314 if(pri>=0 && !skb_device_locked(skb))
315 skb_device_lock(skb);
316 #ifdef CONFIG_SLAVE_BALANCING
317 save_flags(flags);
318 cli();
319 if(dev->slave!=NULL && dev->slave->pkt_queue < dev->pkt_queue &&
320 (dev->slave->flags & IFF_UP))
321 dev=dev->slave;
322 restore_flags(flags);
323 #endif
324 #ifdef CONFIG_SKB_CHECK
325 IS_SKB(skb);
326 #endif
327 skb->dev = dev;
328
329
330
331
332
333 if (skb->next != NULL)
334 {
335
336
337
338 printk("dev_queue_xmit: worked around a missed interrupt\n");
339 dev->hard_start_xmit(NULL, dev);
340 return;
341 }
342
343
344
345
346
347
348
349 if (pri < 0)
350 {
351 pri = -pri-1;
352 where = 1;
353 }
354
355 if (pri >= DEV_NUMBUFFS)
356 {
357 printk("bad priority in dev_queue_xmit.\n");
358 pri = 1;
359 }
360
361
362
363
364
365
366 if (!skb->arp && dev->rebuild_header(skb->data, dev, skb->raddr, skb)) {
367 return;
368 }
369
370 save_flags(flags);
371 cli();
372 if (!where) {
373 #ifdef CONFIG_SLAVE_BALANCING
374 skb->in_dev_queue=1;
375 #endif
376 skb_queue_tail(dev->buffs + pri,skb);
377 skb_device_unlock(skb);
378 skb = skb_dequeue(dev->buffs + pri);
379 skb_device_lock(skb);
380 #ifdef CONFIG_SLAVE_BALANCING
381 skb->in_dev_queue=0;
382 #endif
383 }
384 restore_flags(flags);
385
386
387 if(!where)
388 {
389 for (nitcount= dev_nit, ptype = ptype_base; nitcount > 0 && ptype != NULL; ptype = ptype->next)
390 {
391
392
393
394 if (ptype->type == htons(ETH_P_ALL) &&
395 (ptype->dev == dev || !ptype->dev) &&
396 ((struct sock *)ptype->data != skb->sk))
397 {
398 struct sk_buff *skb2;
399 if ((skb2 = skb_clone(skb, GFP_ATOMIC)) == NULL)
400 break;
401 ptype->func(skb2, skb->dev, ptype);
402 nitcount--;
403 }
404 }
405 }
406 if (dev->hard_start_xmit(skb, dev) == 0) {
407
408
409
410 return;
411 }
412
413
414
415
416
417 cli();
418 #ifdef CONFIG_SLAVE_BALANCING
419 skb->in_dev_queue=1;
420 dev->pkt_queue++;
421 #endif
422 skb_device_unlock(skb);
423 skb_queue_head(dev->buffs + pri,skb);
424 restore_flags(flags);
425 }
426
427
428
429
430
431
432
433 void netif_rx(struct sk_buff *skb)
434 {
435 static int dropping = 0;
436
437
438
439
440
441
442 skb->sk = NULL;
443 skb->free = 1;
444 if(skb->stamp.tv_sec==0)
445 skb->stamp = xtime;
446
447
448
449
450
451 if (!backlog_size)
452 dropping = 0;
453 else if (backlog_size > 300)
454 dropping = 1;
455
456 if (dropping)
457 {
458 kfree_skb(skb, FREE_READ);
459 return;
460 }
461
462
463
464
465 #ifdef CONFIG_SKB_CHECK
466 IS_SKB(skb);
467 #endif
468 skb_queue_tail(&backlog,skb);
469 backlog_size++;
470
471
472
473
474
475
476 mark_bh(NET_BH);
477 return;
478 }
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495 int dev_rint(unsigned char *buff, long len, int flags, struct device *dev)
496 {
497 static int dropping = 0;
498 struct sk_buff *skb = NULL;
499 unsigned char *to;
500 int amount, left;
501 int len2;
502
503 if (dev == NULL || buff == NULL || len <= 0)
504 return(1);
505
506 if (flags & IN_SKBUFF)
507 {
508 skb = (struct sk_buff *) buff;
509 }
510 else
511 {
512 if (dropping)
513 {
514 if (skb_peek(&backlog) != NULL)
515 return(1);
516 printk("INET: dev_rint: no longer dropping packets.\n");
517 dropping = 0;
518 }
519
520 skb = alloc_skb(len, GFP_ATOMIC);
521 if (skb == NULL)
522 {
523 printk("dev_rint: packet dropped on %s (no memory) !\n",
524 dev->name);
525 dropping = 1;
526 return(1);
527 }
528
529
530
531
532
533
534 to = skb->data;
535 left = len;
536
537 len2 = len;
538 while (len2 > 0)
539 {
540 amount = min(len2, (unsigned long) dev->rmem_end -
541 (unsigned long) buff);
542 memcpy(to, buff, amount);
543 len2 -= amount;
544 left -= amount;
545 buff += amount;
546 to += amount;
547 if ((unsigned long) buff == dev->rmem_end)
548 buff = (unsigned char *) dev->rmem_start;
549 }
550 }
551
552
553
554
555
556 skb->len = len;
557 skb->dev = dev;
558 skb->free = 1;
559
560 netif_rx(skb);
561
562
563
564 return(0);
565 }
566
567
568
569
570
571
572 void dev_transmit(void)
573 {
574 struct device *dev;
575
576 for (dev = dev_base; dev != NULL; dev = dev->next)
577 {
578 if (dev->flags != 0 && !dev->tbusy) {
579
580
581
582 dev_tint(dev);
583 }
584 }
585 }
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600 volatile char in_bh = 0;
601
602 int in_net_bh()
603 {
604 return(in_bh==0?0:1);
605 }
606
607
608
609
610
611
612
613
614
615 void net_bh(void *tmp)
616 {
617 struct sk_buff *skb;
618 struct packet_type *ptype;
619 struct packet_type *pt_prev;
620 unsigned short type;
621
622
623
624
625
626 if (set_bit(1, (void*)&in_bh))
627 return;
628
629
630
631
632
633
634
635 dev_transmit();
636
637
638
639
640
641
642
643 cli();
644
645
646
647
648
649 while((skb=skb_dequeue(&backlog))!=NULL)
650 {
651
652
653
654 backlog_size--;
655
656 sti();
657
658
659
660
661
662
663
664
665 skb->h.raw = skb->data + skb->dev->hard_header_len;
666 skb->len -= skb->dev->hard_header_len;
667
668
669
670
671
672
673
674
675
676
677
678
679 type = skb->dev->type_trans(skb, skb->dev);
680
681
682
683
684
685
686
687
688
689
690
691
692 pt_prev = NULL;
693 for (ptype = ptype_base; ptype != NULL; ptype = ptype->next)
694 {
695 if ((ptype->type == type || ptype->type == htons(ETH_P_ALL)) && (!ptype->dev || ptype->dev==skb->dev))
696 {
697
698
699
700
701 if(pt_prev)
702 {
703 struct sk_buff *skb2;
704
705 skb2=skb_clone(skb, GFP_ATOMIC);
706
707
708
709
710
711
712 if(skb2)
713 pt_prev->func(skb2, skb->dev, pt_prev);
714 }
715
716 pt_prev=ptype;
717 }
718 }
719
720
721
722
723
724 if(pt_prev)
725 pt_prev->func(skb, skb->dev, pt_prev);
726
727
728
729
730 else
731 kfree_skb(skb, FREE_WRITE);
732
733
734
735
736
737
738
739 dev_transmit();
740 cli();
741 }
742
743
744
745
746
747 in_bh = 0;
748 sti();
749
750
751
752
753
754 dev_transmit();
755 }
756
757
758
759
760
761
762
763 void dev_tint(struct device *dev)
764 {
765 int i;
766 struct sk_buff *skb;
767 unsigned long flags;
768
769 save_flags(flags);
770
771
772
773
774 for(i = 0;i < DEV_NUMBUFFS; i++)
775 {
776
777
778
779
780
781 cli();
782 while((skb=skb_dequeue(&dev->buffs[i]))!=NULL)
783 {
784
785
786
787 skb_device_lock(skb);
788 restore_flags(flags);
789
790
791
792
793 dev_queue_xmit(skb,dev,-i - 1);
794
795
796
797 if (dev->tbusy)
798 return;
799 cli();
800 }
801 }
802 restore_flags(flags);
803 }
804
805
806
807
808
809
810
811
812 static int dev_ifconf(char *arg)
813 {
814 struct ifconf ifc;
815 struct ifreq ifr;
816 struct device *dev;
817 char *pos;
818 int len;
819 int err;
820
821
822
823
824
825 err=verify_area(VERIFY_WRITE, arg, sizeof(struct ifconf));
826 if(err)
827 return err;
828 memcpy_fromfs(&ifc, arg, sizeof(struct ifconf));
829 len = ifc.ifc_len;
830 pos = ifc.ifc_buf;
831
832
833
834
835
836
837 err=verify_area(VERIFY_WRITE,pos,len);
838 if(err)
839 return err;
840
841
842
843
844
845 for (dev = dev_base; dev != NULL; dev = dev->next)
846 {
847 if(!(dev->flags & IFF_UP))
848 continue;
849 memset(&ifr, 0, sizeof(struct ifreq));
850 strcpy(ifr.ifr_name, dev->name);
851 (*(struct sockaddr_in *) &ifr.ifr_addr).sin_family = dev->family;
852 (*(struct sockaddr_in *) &ifr.ifr_addr).sin_addr.s_addr = dev->pa_addr;
853
854
855
856
857
858 memcpy_tofs(pos, &ifr, sizeof(struct ifreq));
859 pos += sizeof(struct ifreq);
860 len -= sizeof(struct ifreq);
861
862
863
864
865
866 if (len < sizeof(struct ifreq))
867 break;
868 }
869
870
871
872
873
874 ifc.ifc_len = (pos - ifc.ifc_buf);
875 ifc.ifc_req = (struct ifreq *) ifc.ifc_buf;
876 memcpy_tofs(arg, &ifc, sizeof(struct ifconf));
877
878
879
880
881
882 return(pos - arg);
883 }
884
885
886
887
888
889
890
891 static int sprintf_stats(char *buffer, struct device *dev)
892 {
893 struct enet_statistics *stats = (dev->get_stats ? dev->get_stats(dev): NULL);
894 int size;
895
896 if (stats)
897 size = sprintf(buffer, "%6s:%7d %4d %4d %4d %4d %8d %4d %4d %4d %5d %4d\n",
898 dev->name,
899 stats->rx_packets, stats->rx_errors,
900 stats->rx_dropped + stats->rx_missed_errors,
901 stats->rx_fifo_errors,
902 stats->rx_length_errors + stats->rx_over_errors
903 + stats->rx_crc_errors + stats->rx_frame_errors,
904 stats->tx_packets, stats->tx_errors, stats->tx_dropped,
905 stats->tx_fifo_errors, stats->collisions,
906 stats->tx_carrier_errors + stats->tx_aborted_errors
907 + stats->tx_window_errors + stats->tx_heartbeat_errors);
908 else
909 size = sprintf(buffer, "%6s: No statistics available.\n", dev->name);
910
911 return size;
912 }
913
914
915
916
917
918
919 int dev_get_info(char *buffer, char **start, off_t offset, int length)
920 {
921 int len=0;
922 off_t begin=0;
923 off_t pos=0;
924 int size;
925
926 struct device *dev;
927
928
929 size = sprintf(buffer, "Inter-| Receive | Transmit\n"
930 " face |packets errs drop fifo frame|packets errs drop fifo colls carrier\n");
931
932 pos+=size;
933 len+=size;
934
935
936 for (dev = dev_base; dev != NULL; dev = dev->next)
937 {
938 size = sprintf_stats(buffer+len, dev);
939 len+=size;
940 pos=begin+len;
941
942 if(pos<offset)
943 {
944 len=0;
945 begin=pos;
946 }
947 if(pos>offset+length)
948 break;
949 }
950
951 *start=buffer+(offset-begin);
952 len-=(offset-begin);
953 if(len>length)
954 len=length;
955 return len;
956 }
957
958
959
960
961
962
963 static inline int bad_mask(unsigned long mask, unsigned long addr)
964 {
965 if (addr & (mask = ~mask))
966 return 1;
967 mask = ntohl(mask);
968 if (mask & (mask+1))
969 return 1;
970 return 0;
971 }
972
973
974
975
976
977
978
979
980 static int dev_ifsioc(void *arg, unsigned int getset)
981 {
982 struct ifreq ifr;
983 struct device *dev;
984 int ret;
985
986
987
988
989
990 int err=verify_area(VERIFY_WRITE, arg, sizeof(struct ifreq));
991 if(err)
992 return err;
993
994 memcpy_fromfs(&ifr, arg, sizeof(struct ifreq));
995
996
997
998
999
1000 if ((dev = dev_get(ifr.ifr_name)) == NULL)
1001 return(-ENODEV);
1002
1003 switch(getset)
1004 {
1005 case SIOCGIFFLAGS:
1006 ifr.ifr_flags = dev->flags;
1007 memcpy_tofs(arg, &ifr, sizeof(struct ifreq));
1008 ret = 0;
1009 break;
1010 case SIOCSIFFLAGS:
1011 {
1012 int old_flags = dev->flags;
1013 #ifdef CONFIG_SLAVE_BALANCING
1014 if(dev->flags&IFF_SLAVE)
1015 return -EBUSY;
1016 #endif
1017 dev->flags = ifr.ifr_flags & (
1018 IFF_UP | IFF_BROADCAST | IFF_DEBUG | IFF_LOOPBACK |
1019 IFF_POINTOPOINT | IFF_NOTRAILERS | IFF_RUNNING |
1020 IFF_NOARP | IFF_PROMISC | IFF_ALLMULTI | IFF_SLAVE | IFF_MASTER
1021 | IFF_MULTICAST);
1022 #ifdef CONFIG_SLAVE_BALANCING
1023 if(!(dev->flags&IFF_MASTER) && dev->slave)
1024 {
1025 dev->slave->flags&=~IFF_SLAVE;
1026 dev->slave=NULL;
1027 }
1028 #endif
1029
1030
1031
1032
1033 dev_mc_upload(dev);
1034 #if 0
1035 if( dev->set_multicast_list!=NULL)
1036 {
1037
1038
1039
1040
1041
1042 if ( (old_flags & IFF_PROMISC) && ((dev->flags & IFF_PROMISC) == 0))
1043 dev->set_multicast_list(dev,0,NULL);
1044
1045
1046
1047
1048
1049 if ( (dev->flags & IFF_PROMISC) && ((old_flags & IFF_PROMISC) == 0))
1050 dev->set_multicast_list(dev,-1,NULL);
1051 }
1052 #endif
1053
1054
1055
1056
1057 if ((old_flags & IFF_UP) && ((dev->flags & IFF_UP) == 0))
1058 {
1059 ret = dev_close(dev);
1060 }
1061 else
1062 {
1063
1064
1065
1066
1067 ret = (! (old_flags & IFF_UP) && (dev->flags & IFF_UP))
1068 ? dev_open(dev) : 0;
1069
1070
1071
1072 if(ret<0)
1073 dev->flags&=~IFF_UP;
1074 }
1075 }
1076 break;
1077
1078 case SIOCGIFADDR:
1079 (*(struct sockaddr_in *)
1080 &ifr.ifr_addr).sin_addr.s_addr = dev->pa_addr;
1081 (*(struct sockaddr_in *)
1082 &ifr.ifr_addr).sin_family = dev->family;
1083 (*(struct sockaddr_in *)
1084 &ifr.ifr_addr).sin_port = 0;
1085 memcpy_tofs(arg, &ifr, sizeof(struct ifreq));
1086 ret = 0;
1087 break;
1088
1089 case SIOCSIFADDR:
1090 dev->pa_addr = (*(struct sockaddr_in *)
1091 &ifr.ifr_addr).sin_addr.s_addr;
1092 dev->family = ifr.ifr_addr.sa_family;
1093
1094 #ifdef CONFIG_INET
1095
1096
1097 dev->pa_mask = ip_get_mask(dev->pa_addr);
1098 #endif
1099 dev->pa_brdaddr = dev->pa_addr | ~dev->pa_mask;
1100 ret = 0;
1101 break;
1102
1103 case SIOCGIFBRDADDR:
1104 (*(struct sockaddr_in *)
1105 &ifr.ifr_broadaddr).sin_addr.s_addr = dev->pa_brdaddr;
1106 (*(struct sockaddr_in *)
1107 &ifr.ifr_broadaddr).sin_family = dev->family;
1108 (*(struct sockaddr_in *)
1109 &ifr.ifr_broadaddr).sin_port = 0;
1110 memcpy_tofs(arg, &ifr, sizeof(struct ifreq));
1111 ret = 0;
1112 break;
1113
1114 case SIOCSIFBRDADDR:
1115 dev->pa_brdaddr = (*(struct sockaddr_in *)
1116 &ifr.ifr_broadaddr).sin_addr.s_addr;
1117 ret = 0;
1118 break;
1119
1120 case SIOCGIFDSTADDR:
1121 (*(struct sockaddr_in *)
1122 &ifr.ifr_dstaddr).sin_addr.s_addr = dev->pa_dstaddr;
1123 (*(struct sockaddr_in *)
1124 &ifr.ifr_broadaddr).sin_family = dev->family;
1125 (*(struct sockaddr_in *)
1126 &ifr.ifr_broadaddr).sin_port = 0;
1127 memcpy_tofs(arg, &ifr, sizeof(struct ifreq));
1128 ret = 0;
1129 break;
1130
1131 case SIOCSIFDSTADDR:
1132 dev->pa_dstaddr = (*(struct sockaddr_in *)
1133 &ifr.ifr_dstaddr).sin_addr.s_addr;
1134 ret = 0;
1135 break;
1136
1137 case SIOCGIFNETMASK:
1138 (*(struct sockaddr_in *)
1139 &ifr.ifr_netmask).sin_addr.s_addr = dev->pa_mask;
1140 (*(struct sockaddr_in *)
1141 &ifr.ifr_netmask).sin_family = dev->family;
1142 (*(struct sockaddr_in *)
1143 &ifr.ifr_netmask).sin_port = 0;
1144 memcpy_tofs(arg, &ifr, sizeof(struct ifreq));
1145 ret = 0;
1146 break;
1147
1148 case SIOCSIFNETMASK:
1149 {
1150 unsigned long mask = (*(struct sockaddr_in *)
1151 &ifr.ifr_netmask).sin_addr.s_addr;
1152 ret = -EINVAL;
1153
1154
1155
1156 if (bad_mask(mask,0))
1157 break;
1158 dev->pa_mask = mask;
1159 ret = 0;
1160 }
1161 break;
1162
1163 case SIOCGIFMETRIC:
1164
1165 ifr.ifr_metric = dev->metric;
1166 memcpy_tofs(arg, &ifr, sizeof(struct ifreq));
1167 ret = 0;
1168 break;
1169
1170 case SIOCSIFMETRIC:
1171 dev->metric = ifr.ifr_metric;
1172 ret = 0;
1173 break;
1174
1175 case SIOCGIFMTU:
1176 ifr.ifr_mtu = dev->mtu;
1177 memcpy_tofs(arg, &ifr, sizeof(struct ifreq));
1178 ret = 0;
1179 break;
1180
1181 case SIOCSIFMTU:
1182
1183
1184
1185
1186
1187 if(ifr.ifr_mtu<1 || ifr.ifr_mtu>3800)
1188 return -EINVAL;
1189 dev->mtu = ifr.ifr_mtu;
1190 ret = 0;
1191 break;
1192
1193 case SIOCGIFMEM:
1194
1195 printk("NET: ioctl(SIOCGIFMEM, %p)\n", arg);
1196 ret = -EINVAL;
1197 break;
1198
1199 case SIOCSIFMEM:
1200 printk("NET: ioctl(SIOCSIFMEM, %p)\n", arg);
1201 ret = -EINVAL;
1202 break;
1203
1204 case OLD_SIOCGIFHWADDR:
1205 memcpy(ifr.old_ifr_hwaddr,dev->dev_addr, MAX_ADDR_LEN);
1206 memcpy_tofs(arg,&ifr,sizeof(struct ifreq));
1207 ret=0;
1208 break;
1209
1210 case SIOCGIFHWADDR:
1211 memcpy(ifr.ifr_hwaddr.sa_data,dev->dev_addr, MAX_ADDR_LEN);
1212 ifr.ifr_hwaddr.sa_family=dev->type;
1213 memcpy_tofs(arg,&ifr,sizeof(struct ifreq));
1214 ret=0;
1215 break;
1216
1217 case SIOCSIFHWADDR:
1218 if(dev->set_mac_address==NULL)
1219 return -EOPNOTSUPP;
1220 if(ifr.ifr_hwaddr.sa_family!=dev->type)
1221 return -EINVAL;
1222 ret=dev->set_mac_address(dev,ifr.ifr_hwaddr.sa_data);
1223 break;
1224
1225 case SIOCGIFMAP:
1226 ifr.ifr_map.mem_start=dev->mem_start;
1227 ifr.ifr_map.mem_end=dev->mem_end;
1228 ifr.ifr_map.base_addr=dev->base_addr;
1229 ifr.ifr_map.irq=dev->irq;
1230 ifr.ifr_map.dma=dev->dma;
1231 ifr.ifr_map.port=dev->if_port;
1232 memcpy_tofs(arg,&ifr,sizeof(struct ifreq));
1233 ret=0;
1234 break;
1235
1236 case SIOCSIFMAP:
1237 if(dev->set_config==NULL)
1238 return -EOPNOTSUPP;
1239 return dev->set_config(dev,&ifr.ifr_map);
1240
1241 case SIOCGIFSLAVE:
1242 #ifdef CONFIG_SLAVE_BALANCING
1243 if(dev->slave==NULL)
1244 return -ENOENT;
1245 strncpy(ifr.ifr_name,dev->name,sizeof(ifr.ifr_name));
1246 memcpy_tofs(arg,&ifr,sizeof(struct ifreq));
1247 ret=0;
1248 #else
1249 return -ENOENT;
1250 #endif
1251 break;
1252 #ifdef CONFIG_SLAVE_BALANCING
1253 case SIOCSIFSLAVE:
1254 {
1255
1256
1257
1258
1259
1260 unsigned long flags;
1261 struct device *slave=dev_get(ifr.ifr_slave);
1262 save_flags(flags);
1263 if(slave==NULL)
1264 {
1265 return -ENODEV;
1266 }
1267 cli();
1268 if((slave->flags&(IFF_UP|IFF_RUNNING))!=(IFF_UP|IFF_RUNNING))
1269 {
1270 restore_flags(flags);
1271 return -EINVAL;
1272 }
1273 if(dev->flags&IFF_SLAVE)
1274 {
1275 restore_flags(flags);
1276 return -EBUSY;
1277 }
1278 if(dev->slave!=NULL)
1279 {
1280 restore_flags(flags);
1281 return -EBUSY;
1282 }
1283 if(slave->flags&IFF_SLAVE)
1284 {
1285 restore_flags(flags);
1286 return -EBUSY;
1287 }
1288 dev->slave=slave;
1289 slave->flags|=IFF_SLAVE;
1290 dev->flags|=IFF_MASTER;
1291 restore_flags(flags);
1292 ret=0;
1293 }
1294 break;
1295 #endif
1296
1297 case SIOCADDMULTI:
1298 if(dev->set_multicast_list==NULL)
1299 return -EINVAL;
1300 if(ifr.ifr_hwaddr.sa_family!=AF_UNSPEC)
1301 return -EINVAL;
1302 dev_mc_add(dev,ifr.ifr_hwaddr.sa_data, dev->addr_len, 1);
1303 return 0;
1304
1305 case SIOCDELMULTI:
1306 if(dev->set_multicast_list==NULL)
1307 return -EINVAL;
1308 if(ifr.ifr_hwaddr.sa_family!=AF_UNSPEC)
1309 return -EINVAL;
1310 dev_mc_delete(dev,ifr.ifr_hwaddr.sa_data,dev->addr_len, 1);
1311 return 0;
1312
1313
1314
1315
1316 default:
1317 if((getset >= SIOCDEVPRIVATE) &&
1318 (getset <= (SIOCDEVPRIVATE + 15))) {
1319 if(dev->do_ioctl==NULL)
1320 return -EOPNOTSUPP;
1321 ret=dev->do_ioctl(dev, &ifr, getset);
1322 memcpy_tofs(arg,&ifr,sizeof(struct ifreq));
1323 break;
1324 }
1325
1326 ret = -EINVAL;
1327 }
1328 return(ret);
1329 }
1330
1331
1332
1333
1334
1335
1336
1337 int dev_ioctl(unsigned int cmd, void *arg)
1338 {
1339 switch(cmd)
1340 {
1341 case SIOCGIFCONF:
1342 (void) dev_ifconf((char *) arg);
1343 return 0;
1344
1345
1346
1347
1348
1349 case SIOCGIFFLAGS:
1350 case SIOCGIFADDR:
1351 case SIOCGIFDSTADDR:
1352 case SIOCGIFBRDADDR:
1353 case SIOCGIFNETMASK:
1354 case SIOCGIFMETRIC:
1355 case SIOCGIFMTU:
1356 case SIOCGIFMEM:
1357 case SIOCGIFHWADDR:
1358 case SIOCSIFHWADDR:
1359 case OLD_SIOCGIFHWADDR:
1360 case SIOCGIFSLAVE:
1361 case SIOCGIFMAP:
1362 return dev_ifsioc(arg, cmd);
1363
1364
1365
1366
1367
1368 case SIOCSIFFLAGS:
1369 case SIOCSIFADDR:
1370 case SIOCSIFDSTADDR:
1371 case SIOCSIFBRDADDR:
1372 case SIOCSIFNETMASK:
1373 case SIOCSIFMETRIC:
1374 case SIOCSIFMTU:
1375 case SIOCSIFMEM:
1376 case SIOCSIFMAP:
1377 case SIOCSIFSLAVE:
1378 case SIOCADDMULTI:
1379 case SIOCDELMULTI:
1380 if (!suser())
1381 return -EPERM;
1382 return dev_ifsioc(arg, cmd);
1383
1384 case SIOCSIFLINK:
1385 return -EINVAL;
1386
1387
1388
1389
1390
1391 default:
1392 if((cmd >= SIOCDEVPRIVATE) &&
1393 (cmd <= (SIOCDEVPRIVATE + 15))) {
1394 return dev_ifsioc(arg, cmd);
1395 }
1396 return -EINVAL;
1397 }
1398 }
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411 void dev_init(void)
1412 {
1413 struct device *dev, *dev2;
1414
1415
1416
1417
1418
1419
1420
1421
1422 dev2 = NULL;
1423 for (dev = dev_base; dev != NULL; dev=dev->next)
1424 {
1425 if (dev->init && dev->init(dev))
1426 {
1427
1428
1429
1430
1431 if (dev2 == NULL)
1432 dev_base = dev->next;
1433 else
1434 dev2->next = dev->next;
1435 }
1436 else
1437 {
1438 dev2 = dev;
1439 }
1440 }
1441 }