This source file includes following definitions.
- min
- dev_add_pack
- dev_remove_pack
- dev_get
- dev_open
- dev_close
- register_netdevice_notifier
- unregister_netdevice_notifier
- dev_queue_xmit
- netif_rx
- dev_rint
- dev_transmit
- in_net_bh
- net_bh
- dev_tint
- dev_ifconf
- sprintf_stats
- dev_get_info
- bad_mask
- dev_ifsioc
- dev_ioctl
- dev_init
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41 #include <asm/segment.h>
42 #include <asm/system.h>
43 #include <asm/bitops.h>
44 #include <linux/config.h>
45 #include <linux/types.h>
46 #include <linux/kernel.h>
47 #include <linux/sched.h>
48 #include <linux/string.h>
49 #include <linux/mm.h>
50 #include <linux/socket.h>
51 #include <linux/sockios.h>
52 #include <linux/in.h>
53 #include <linux/errno.h>
54 #include <linux/interrupt.h>
55 #include <linux/if_ether.h>
56 #include <linux/inet.h>
57 #include <linux/netdevice.h>
58 #include <linux/etherdevice.h>
59 #include <linux/notifier.h>
60 #include "ip.h"
61 #include "route.h"
62 #include <linux/skbuff.h>
63 #include "sock.h"
64 #include "arp.h"
65
66
67
68
69
70
71
72 struct packet_type *ptype_base = NULL;
73
74
75
76
77
78 struct notifier_block *netdev_chain=NULL;
79
80
81
82
83
84
85 static struct sk_buff_head backlog =
86 {
87 (struct sk_buff *)&backlog, (struct sk_buff *)&backlog
88 #ifdef CONFIG_SKB_CHECK
89 ,SK_HEAD_SKB
90 #endif
91 };
92
93
94
95
96
97 static int backlog_size = 0;
98
99
100
101
102
103 static __inline__ unsigned long min(unsigned long a, unsigned long b)
104 {
105 return (a < b)? a : b;
106 }
107
108
109
110
111
112
113
114
115
116
117
118
119 static int dev_nit=0;
120
121
122
123
124
125
126
127 void dev_add_pack(struct packet_type *pt)
128 {
129 if(pt->type==htons(ETH_P_ALL))
130 dev_nit++;
131 pt->next = ptype_base;
132 ptype_base = pt;
133 }
134
135
136
137
138
139
140 void dev_remove_pack(struct packet_type *pt)
141 {
142 struct packet_type **pt1;
143 if(pt->type==htons(ETH_P_ALL))
144 dev_nit--;
145 for(pt1=&ptype_base; (*pt1)!=NULL; pt1=&((*pt1)->next))
146 {
147 if(pt==(*pt1))
148 {
149 *pt1=pt->next;
150 return;
151 }
152 }
153 }
154
155
156
157
158
159
160
161
162
163
164
165 struct device *dev_get(char *name)
166 {
167 struct device *dev;
168
169 for (dev = dev_base; dev != NULL; dev = dev->next)
170 {
171 if (strcmp(dev->name, name) == 0)
172 return(dev);
173 }
174 return(NULL);
175 }
176
177
178
179
180
181
182 int dev_open(struct device *dev)
183 {
184 int ret = 0;
185
186
187
188
189 if (dev->open)
190 ret = dev->open(dev);
191
192
193
194
195
196 if (ret == 0)
197 {
198 dev->flags |= (IFF_UP | IFF_RUNNING);
199
200
201
202 #ifdef CONFIG_IP_MULTICAST
203
204
205
206 ip_mc_allhost(dev);
207 #endif
208 dev_mc_upload(dev);
209 notifier_call_chain(&netdev_chain, NETDEV_UP, dev);
210 }
211 return(ret);
212 }
213
214
215
216
217
218
219 int dev_close(struct device *dev)
220 {
221
222
223
224
225 if (dev->flags != 0)
226 {
227 int ct=0;
228 dev->flags = 0;
229
230
231
232 if (dev->stop)
233 dev->stop(dev);
234
235 notifier_call_chain(&netdev_chain, NETDEV_DOWN, dev);
236 #if 0
237
238
239
240 #ifdef CONFIG_INET
241 ip_rt_flush(dev);
242 arp_device_down(dev);
243 #endif
244 #ifdef CONFIG_IPX
245 ipxrtr_device_down(dev);
246 #endif
247 #endif
248
249
250
251 dev_mc_discard(dev);
252
253
254
255 dev->pa_addr = 0;
256 dev->pa_dstaddr = 0;
257 dev->pa_brdaddr = 0;
258 dev->pa_mask = 0;
259
260
261
262 while(ct<DEV_NUMBUFFS)
263 {
264 struct sk_buff *skb;
265 while((skb=skb_dequeue(&dev->buffs[ct]))!=NULL)
266 if(skb->free)
267 kfree_skb(skb,FREE_WRITE);
268 ct++;
269 }
270 }
271 return(0);
272 }
273
274
275
276
277
278
279
280 int register_netdevice_notifier(struct notifier_block *nb)
281 {
282 return notifier_chain_register(&netdev_chain, nb);
283 }
284
285 int unregister_netdevice_notifier(struct notifier_block *nb)
286 {
287 return notifier_chain_unregister(&netdev_chain,nb);
288 }
289
290
291
292
293
294
295
296
297
298
299
300 void dev_queue_xmit(struct sk_buff *skb, struct device *dev, int pri)
301 {
302 unsigned long flags;
303 int nitcount;
304 struct packet_type *ptype;
305 int where = 0;
306
307
308
309 if (dev == NULL)
310 {
311 printk("dev.c: dev_queue_xmit: dev = NULL\n");
312 return;
313 }
314
315 if(pri>=0 && !skb_device_locked(skb))
316 skb_device_lock(skb);
317 #ifdef CONFIG_SLAVE_BALANCING
318 save_flags(flags);
319 cli();
320 if(dev->slave!=NULL && dev->slave->pkt_queue < dev->pkt_queue &&
321 (dev->slave->flags & IFF_UP))
322 dev=dev->slave;
323 restore_flags(flags);
324 #endif
325 #ifdef CONFIG_SKB_CHECK
326 IS_SKB(skb);
327 #endif
328 skb->dev = dev;
329
330
331
332
333
334 if (skb->next != NULL)
335 {
336
337
338
339 printk("dev_queue_xmit: worked around a missed interrupt\n");
340 dev->hard_start_xmit(NULL, dev);
341 return;
342 }
343
344
345
346
347
348
349
350 if (pri < 0)
351 {
352 pri = -pri-1;
353 where = 1;
354 }
355
356 if (pri >= DEV_NUMBUFFS)
357 {
358 printk("bad priority in dev_queue_xmit.\n");
359 pri = 1;
360 }
361
362
363
364
365
366
367 if (!skb->arp && dev->rebuild_header(skb->data, dev, skb->raddr, skb)) {
368 return;
369 }
370
371 save_flags(flags);
372 cli();
373 if (!where) {
374 #ifdef CONFIG_SLAVE_BALANCING
375 skb->in_dev_queue=1;
376 #endif
377 skb_queue_tail(dev->buffs + pri,skb);
378 skb_device_unlock(skb);
379 skb = skb_dequeue(dev->buffs + pri);
380 skb_device_lock(skb);
381 #ifdef CONFIG_SLAVE_BALANCING
382 skb->in_dev_queue=0;
383 #endif
384 }
385 restore_flags(flags);
386
387
388 if(!where)
389 {
390 for (nitcount= dev_nit, ptype = ptype_base; nitcount > 0 && ptype != NULL; ptype = ptype->next)
391 {
392
393
394
395 if (ptype->type == htons(ETH_P_ALL) &&
396 (ptype->dev == dev || !ptype->dev) &&
397 ((struct sock *)ptype->data != skb->sk))
398 {
399 struct sk_buff *skb2;
400 if ((skb2 = skb_clone(skb, GFP_ATOMIC)) == NULL)
401 break;
402
403
404
405
406 skb2->len-=skb->dev->hard_header_len;
407 ptype->func(skb2, skb->dev, ptype);
408 nitcount--;
409 }
410 }
411 }
412 if (dev->hard_start_xmit(skb, dev) == 0) {
413
414
415
416 return;
417 }
418
419
420
421
422
423 cli();
424 #ifdef CONFIG_SLAVE_BALANCING
425 skb->in_dev_queue=1;
426 dev->pkt_queue++;
427 #endif
428 skb_device_unlock(skb);
429 skb_queue_head(dev->buffs + pri,skb);
430 restore_flags(flags);
431 }
432
433
434
435
436
437
438
439 void netif_rx(struct sk_buff *skb)
440 {
441 static int dropping = 0;
442
443
444
445
446
447
448 skb->sk = NULL;
449 skb->free = 1;
450 if(skb->stamp.tv_sec==0)
451 skb->stamp = xtime;
452
453
454
455
456
457 if (!backlog_size)
458 dropping = 0;
459 else if (backlog_size > 300)
460 dropping = 1;
461
462 if (dropping)
463 {
464 kfree_skb(skb, FREE_READ);
465 return;
466 }
467
468
469
470
471 #ifdef CONFIG_SKB_CHECK
472 IS_SKB(skb);
473 #endif
474 skb_queue_tail(&backlog,skb);
475 backlog_size++;
476
477
478
479
480
481
482 mark_bh(NET_BH);
483 return;
484 }
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501 int dev_rint(unsigned char *buff, long len, int flags, struct device *dev)
502 {
503 static int dropping = 0;
504 struct sk_buff *skb = NULL;
505 unsigned char *to;
506 int amount, left;
507 int len2;
508
509 if (dev == NULL || buff == NULL || len <= 0)
510 return(1);
511
512 if (flags & IN_SKBUFF)
513 {
514 skb = (struct sk_buff *) buff;
515 }
516 else
517 {
518 if (dropping)
519 {
520 if (skb_peek(&backlog) != NULL)
521 return(1);
522 printk("INET: dev_rint: no longer dropping packets.\n");
523 dropping = 0;
524 }
525
526 skb = alloc_skb(len, GFP_ATOMIC);
527 if (skb == NULL)
528 {
529 printk("dev_rint: packet dropped on %s (no memory) !\n",
530 dev->name);
531 dropping = 1;
532 return(1);
533 }
534
535
536
537
538
539
540 to = skb->data;
541 left = len;
542
543 len2 = len;
544 while (len2 > 0)
545 {
546 amount = min(len2, (unsigned long) dev->rmem_end -
547 (unsigned long) buff);
548 memcpy(to, buff, amount);
549 len2 -= amount;
550 left -= amount;
551 buff += amount;
552 to += amount;
553 if ((unsigned long) buff == dev->rmem_end)
554 buff = (unsigned char *) dev->rmem_start;
555 }
556 }
557
558
559
560
561
562 skb->len = len;
563 skb->dev = dev;
564 skb->free = 1;
565
566 netif_rx(skb);
567
568
569
570 return(0);
571 }
572
573
574
575
576
577
578 void dev_transmit(void)
579 {
580 struct device *dev;
581
582 for (dev = dev_base; dev != NULL; dev = dev->next)
583 {
584 if (dev->flags != 0 && !dev->tbusy) {
585
586
587
588 dev_tint(dev);
589 }
590 }
591 }
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606 volatile char in_bh = 0;
607
608 int in_net_bh()
609 {
610 return(in_bh==0?0:1);
611 }
612
613
614
615
616
617
618
619
620
621 void net_bh(void *tmp)
622 {
623 struct sk_buff *skb;
624 struct packet_type *ptype;
625 struct packet_type *pt_prev;
626 unsigned short type;
627
628
629
630
631
632 if (set_bit(1, (void*)&in_bh))
633 return;
634
635
636
637
638
639
640
641 dev_transmit();
642
643
644
645
646
647
648
649 cli();
650
651
652
653
654
655 while((skb=skb_dequeue(&backlog))!=NULL)
656 {
657
658
659
660 backlog_size--;
661
662 sti();
663
664
665
666
667
668
669
670
671 skb->h.raw = skb->data + skb->dev->hard_header_len;
672 skb->len -= skb->dev->hard_header_len;
673
674
675
676
677
678
679
680
681
682
683
684
685 type = skb->dev->type_trans(skb, skb->dev);
686
687
688
689
690
691
692
693
694
695
696
697
698 pt_prev = NULL;
699 for (ptype = ptype_base; ptype != NULL; ptype = ptype->next)
700 {
701 if ((ptype->type == type || ptype->type == htons(ETH_P_ALL)) && (!ptype->dev || ptype->dev==skb->dev))
702 {
703
704
705
706
707 if(pt_prev)
708 {
709 struct sk_buff *skb2;
710
711 skb2=skb_clone(skb, GFP_ATOMIC);
712
713
714
715
716
717
718 if(skb2)
719 pt_prev->func(skb2, skb->dev, pt_prev);
720 }
721
722 pt_prev=ptype;
723 }
724 }
725
726
727
728
729
730 if(pt_prev)
731 pt_prev->func(skb, skb->dev, pt_prev);
732
733
734
735
736 else
737 kfree_skb(skb, FREE_WRITE);
738
739
740
741
742
743
744
745 dev_transmit();
746 cli();
747 }
748
749
750
751
752
753 in_bh = 0;
754 sti();
755
756
757
758
759
760 dev_transmit();
761 }
762
763
764
765
766
767
768
769 void dev_tint(struct device *dev)
770 {
771 int i;
772 struct sk_buff *skb;
773 unsigned long flags;
774
775 save_flags(flags);
776
777
778
779
780 for(i = 0;i < DEV_NUMBUFFS; i++)
781 {
782
783
784
785
786
787 cli();
788 while((skb=skb_dequeue(&dev->buffs[i]))!=NULL)
789 {
790
791
792
793 skb_device_lock(skb);
794 restore_flags(flags);
795
796
797
798
799 dev_queue_xmit(skb,dev,-i - 1);
800
801
802
803 if (dev->tbusy)
804 return;
805 cli();
806 }
807 }
808 restore_flags(flags);
809 }
810
811
812
813
814
815
816
817
818 static int dev_ifconf(char *arg)
819 {
820 struct ifconf ifc;
821 struct ifreq ifr;
822 struct device *dev;
823 char *pos;
824 int len;
825 int err;
826
827
828
829
830
831 err=verify_area(VERIFY_WRITE, arg, sizeof(struct ifconf));
832 if(err)
833 return err;
834 memcpy_fromfs(&ifc, arg, sizeof(struct ifconf));
835 len = ifc.ifc_len;
836 pos = ifc.ifc_buf;
837
838
839
840
841
842
843 err=verify_area(VERIFY_WRITE,pos,len);
844 if(err)
845 return err;
846
847
848
849
850
851 for (dev = dev_base; dev != NULL; dev = dev->next)
852 {
853 if(!(dev->flags & IFF_UP))
854 continue;
855 memset(&ifr, 0, sizeof(struct ifreq));
856 strcpy(ifr.ifr_name, dev->name);
857 (*(struct sockaddr_in *) &ifr.ifr_addr).sin_family = dev->family;
858 (*(struct sockaddr_in *) &ifr.ifr_addr).sin_addr.s_addr = dev->pa_addr;
859
860
861
862
863
864 memcpy_tofs(pos, &ifr, sizeof(struct ifreq));
865 pos += sizeof(struct ifreq);
866 len -= sizeof(struct ifreq);
867
868
869
870
871
872 if (len < sizeof(struct ifreq))
873 break;
874 }
875
876
877
878
879
880 ifc.ifc_len = (pos - ifc.ifc_buf);
881 ifc.ifc_req = (struct ifreq *) ifc.ifc_buf;
882 memcpy_tofs(arg, &ifc, sizeof(struct ifconf));
883
884
885
886
887
888 return(pos - arg);
889 }
890
891
892
893
894
895
896
897 static int sprintf_stats(char *buffer, struct device *dev)
898 {
899 struct enet_statistics *stats = (dev->get_stats ? dev->get_stats(dev): NULL);
900 int size;
901
902 if (stats)
903 size = sprintf(buffer, "%6s:%7d %4d %4d %4d %4d %8d %4d %4d %4d %5d %4d\n",
904 dev->name,
905 stats->rx_packets, stats->rx_errors,
906 stats->rx_dropped + stats->rx_missed_errors,
907 stats->rx_fifo_errors,
908 stats->rx_length_errors + stats->rx_over_errors
909 + stats->rx_crc_errors + stats->rx_frame_errors,
910 stats->tx_packets, stats->tx_errors, stats->tx_dropped,
911 stats->tx_fifo_errors, stats->collisions,
912 stats->tx_carrier_errors + stats->tx_aborted_errors
913 + stats->tx_window_errors + stats->tx_heartbeat_errors);
914 else
915 size = sprintf(buffer, "%6s: No statistics available.\n", dev->name);
916
917 return size;
918 }
919
920
921
922
923
924
925 int dev_get_info(char *buffer, char **start, off_t offset, int length)
926 {
927 int len=0;
928 off_t begin=0;
929 off_t pos=0;
930 int size;
931
932 struct device *dev;
933
934
935 size = sprintf(buffer, "Inter-| Receive | Transmit\n"
936 " face |packets errs drop fifo frame|packets errs drop fifo colls carrier\n");
937
938 pos+=size;
939 len+=size;
940
941
942 for (dev = dev_base; dev != NULL; dev = dev->next)
943 {
944 size = sprintf_stats(buffer+len, dev);
945 len+=size;
946 pos=begin+len;
947
948 if(pos<offset)
949 {
950 len=0;
951 begin=pos;
952 }
953 if(pos>offset+length)
954 break;
955 }
956
957 *start=buffer+(offset-begin);
958 len-=(offset-begin);
959 if(len>length)
960 len=length;
961 return len;
962 }
963
964
965
966
967
968
969 static inline int bad_mask(unsigned long mask, unsigned long addr)
970 {
971 if (addr & (mask = ~mask))
972 return 1;
973 mask = ntohl(mask);
974 if (mask & (mask+1))
975 return 1;
976 return 0;
977 }
978
979
980
981
982
983
984
985
986 static int dev_ifsioc(void *arg, unsigned int getset)
987 {
988 struct ifreq ifr;
989 struct device *dev;
990 int ret;
991
992
993
994
995
996 int err=verify_area(VERIFY_WRITE, arg, sizeof(struct ifreq));
997 if(err)
998 return err;
999
1000 memcpy_fromfs(&ifr, arg, sizeof(struct ifreq));
1001
1002
1003
1004
1005
1006 if ((dev = dev_get(ifr.ifr_name)) == NULL)
1007 return(-ENODEV);
1008
1009 switch(getset)
1010 {
1011 case SIOCGIFFLAGS:
1012 ifr.ifr_flags = dev->flags;
1013 memcpy_tofs(arg, &ifr, sizeof(struct ifreq));
1014 ret = 0;
1015 break;
1016 case SIOCSIFFLAGS:
1017 {
1018 int old_flags = dev->flags;
1019 #ifdef CONFIG_SLAVE_BALANCING
1020 if(dev->flags&IFF_SLAVE)
1021 return -EBUSY;
1022 #endif
1023 dev->flags = ifr.ifr_flags & (
1024 IFF_UP | IFF_BROADCAST | IFF_DEBUG | IFF_LOOPBACK |
1025 IFF_POINTOPOINT | IFF_NOTRAILERS | IFF_RUNNING |
1026 IFF_NOARP | IFF_PROMISC | IFF_ALLMULTI | IFF_SLAVE | IFF_MASTER
1027 | IFF_MULTICAST);
1028 #ifdef CONFIG_SLAVE_BALANCING
1029 if(!(dev->flags&IFF_MASTER) && dev->slave)
1030 {
1031 dev->slave->flags&=~IFF_SLAVE;
1032 dev->slave=NULL;
1033 }
1034 #endif
1035
1036
1037
1038
1039 dev_mc_upload(dev);
1040 #if 0
1041 if( dev->set_multicast_list!=NULL)
1042 {
1043
1044
1045
1046
1047
1048 if ( (old_flags & IFF_PROMISC) && ((dev->flags & IFF_PROMISC) == 0))
1049 dev->set_multicast_list(dev,0,NULL);
1050
1051
1052
1053
1054
1055 if ( (dev->flags & IFF_PROMISC) && ((old_flags & IFF_PROMISC) == 0))
1056 dev->set_multicast_list(dev,-1,NULL);
1057 }
1058 #endif
1059
1060
1061
1062
1063 if ((old_flags & IFF_UP) && ((dev->flags & IFF_UP) == 0))
1064 {
1065 ret = dev_close(dev);
1066 }
1067 else
1068 {
1069
1070
1071
1072
1073 ret = (! (old_flags & IFF_UP) && (dev->flags & IFF_UP))
1074 ? dev_open(dev) : 0;
1075
1076
1077
1078 if(ret<0)
1079 dev->flags&=~IFF_UP;
1080 }
1081 }
1082 break;
1083
1084 case SIOCGIFADDR:
1085 (*(struct sockaddr_in *)
1086 &ifr.ifr_addr).sin_addr.s_addr = dev->pa_addr;
1087 (*(struct sockaddr_in *)
1088 &ifr.ifr_addr).sin_family = dev->family;
1089 (*(struct sockaddr_in *)
1090 &ifr.ifr_addr).sin_port = 0;
1091 memcpy_tofs(arg, &ifr, sizeof(struct ifreq));
1092 ret = 0;
1093 break;
1094
1095 case SIOCSIFADDR:
1096 dev->pa_addr = (*(struct sockaddr_in *)
1097 &ifr.ifr_addr).sin_addr.s_addr;
1098 dev->family = ifr.ifr_addr.sa_family;
1099
1100 #ifdef CONFIG_INET
1101
1102
1103 dev->pa_mask = ip_get_mask(dev->pa_addr);
1104 #endif
1105 dev->pa_brdaddr = dev->pa_addr | ~dev->pa_mask;
1106 ret = 0;
1107 break;
1108
1109 case SIOCGIFBRDADDR:
1110 (*(struct sockaddr_in *)
1111 &ifr.ifr_broadaddr).sin_addr.s_addr = dev->pa_brdaddr;
1112 (*(struct sockaddr_in *)
1113 &ifr.ifr_broadaddr).sin_family = dev->family;
1114 (*(struct sockaddr_in *)
1115 &ifr.ifr_broadaddr).sin_port = 0;
1116 memcpy_tofs(arg, &ifr, sizeof(struct ifreq));
1117 ret = 0;
1118 break;
1119
1120 case SIOCSIFBRDADDR:
1121 dev->pa_brdaddr = (*(struct sockaddr_in *)
1122 &ifr.ifr_broadaddr).sin_addr.s_addr;
1123 ret = 0;
1124 break;
1125
1126 case SIOCGIFDSTADDR:
1127 (*(struct sockaddr_in *)
1128 &ifr.ifr_dstaddr).sin_addr.s_addr = dev->pa_dstaddr;
1129 (*(struct sockaddr_in *)
1130 &ifr.ifr_broadaddr).sin_family = dev->family;
1131 (*(struct sockaddr_in *)
1132 &ifr.ifr_broadaddr).sin_port = 0;
1133 memcpy_tofs(arg, &ifr, sizeof(struct ifreq));
1134 ret = 0;
1135 break;
1136
1137 case SIOCSIFDSTADDR:
1138 dev->pa_dstaddr = (*(struct sockaddr_in *)
1139 &ifr.ifr_dstaddr).sin_addr.s_addr;
1140 ret = 0;
1141 break;
1142
1143 case SIOCGIFNETMASK:
1144 (*(struct sockaddr_in *)
1145 &ifr.ifr_netmask).sin_addr.s_addr = dev->pa_mask;
1146 (*(struct sockaddr_in *)
1147 &ifr.ifr_netmask).sin_family = dev->family;
1148 (*(struct sockaddr_in *)
1149 &ifr.ifr_netmask).sin_port = 0;
1150 memcpy_tofs(arg, &ifr, sizeof(struct ifreq));
1151 ret = 0;
1152 break;
1153
1154 case SIOCSIFNETMASK:
1155 {
1156 unsigned long mask = (*(struct sockaddr_in *)
1157 &ifr.ifr_netmask).sin_addr.s_addr;
1158 ret = -EINVAL;
1159
1160
1161
1162 if (bad_mask(mask,0))
1163 break;
1164 dev->pa_mask = mask;
1165 ret = 0;
1166 }
1167 break;
1168
1169 case SIOCGIFMETRIC:
1170
1171 ifr.ifr_metric = dev->metric;
1172 memcpy_tofs(arg, &ifr, sizeof(struct ifreq));
1173 ret = 0;
1174 break;
1175
1176 case SIOCSIFMETRIC:
1177 dev->metric = ifr.ifr_metric;
1178 ret = 0;
1179 break;
1180
1181 case SIOCGIFMTU:
1182 ifr.ifr_mtu = dev->mtu;
1183 memcpy_tofs(arg, &ifr, sizeof(struct ifreq));
1184 ret = 0;
1185 break;
1186
1187 case SIOCSIFMTU:
1188
1189
1190
1191
1192
1193 if(ifr.ifr_mtu<1 || ifr.ifr_mtu>3800)
1194 return -EINVAL;
1195 dev->mtu = ifr.ifr_mtu;
1196 ret = 0;
1197 break;
1198
1199 case SIOCGIFMEM:
1200
1201 printk("NET: ioctl(SIOCGIFMEM, %p)\n", arg);
1202 ret = -EINVAL;
1203 break;
1204
1205 case SIOCSIFMEM:
1206 printk("NET: ioctl(SIOCSIFMEM, %p)\n", arg);
1207 ret = -EINVAL;
1208 break;
1209
1210 case OLD_SIOCGIFHWADDR:
1211 memcpy(ifr.old_ifr_hwaddr,dev->dev_addr, MAX_ADDR_LEN);
1212 memcpy_tofs(arg,&ifr,sizeof(struct ifreq));
1213 ret=0;
1214 break;
1215
1216 case SIOCGIFHWADDR:
1217 memcpy(ifr.ifr_hwaddr.sa_data,dev->dev_addr, MAX_ADDR_LEN);
1218 ifr.ifr_hwaddr.sa_family=dev->type;
1219 memcpy_tofs(arg,&ifr,sizeof(struct ifreq));
1220 ret=0;
1221 break;
1222
1223 case SIOCSIFHWADDR:
1224 if(dev->set_mac_address==NULL)
1225 return -EOPNOTSUPP;
1226 if(ifr.ifr_hwaddr.sa_family!=dev->type)
1227 return -EINVAL;
1228 ret=dev->set_mac_address(dev,ifr.ifr_hwaddr.sa_data);
1229 break;
1230
1231 case SIOCGIFMAP:
1232 ifr.ifr_map.mem_start=dev->mem_start;
1233 ifr.ifr_map.mem_end=dev->mem_end;
1234 ifr.ifr_map.base_addr=dev->base_addr;
1235 ifr.ifr_map.irq=dev->irq;
1236 ifr.ifr_map.dma=dev->dma;
1237 ifr.ifr_map.port=dev->if_port;
1238 memcpy_tofs(arg,&ifr,sizeof(struct ifreq));
1239 ret=0;
1240 break;
1241
1242 case SIOCSIFMAP:
1243 if(dev->set_config==NULL)
1244 return -EOPNOTSUPP;
1245 return dev->set_config(dev,&ifr.ifr_map);
1246
1247 case SIOCGIFSLAVE:
1248 #ifdef CONFIG_SLAVE_BALANCING
1249 if(dev->slave==NULL)
1250 return -ENOENT;
1251 strncpy(ifr.ifr_name,dev->name,sizeof(ifr.ifr_name));
1252 memcpy_tofs(arg,&ifr,sizeof(struct ifreq));
1253 ret=0;
1254 #else
1255 return -ENOENT;
1256 #endif
1257 break;
1258 #ifdef CONFIG_SLAVE_BALANCING
1259 case SIOCSIFSLAVE:
1260 {
1261
1262
1263
1264
1265
1266 unsigned long flags;
1267 struct device *slave=dev_get(ifr.ifr_slave);
1268 save_flags(flags);
1269 if(slave==NULL)
1270 {
1271 return -ENODEV;
1272 }
1273 cli();
1274 if((slave->flags&(IFF_UP|IFF_RUNNING))!=(IFF_UP|IFF_RUNNING))
1275 {
1276 restore_flags(flags);
1277 return -EINVAL;
1278 }
1279 if(dev->flags&IFF_SLAVE)
1280 {
1281 restore_flags(flags);
1282 return -EBUSY;
1283 }
1284 if(dev->slave!=NULL)
1285 {
1286 restore_flags(flags);
1287 return -EBUSY;
1288 }
1289 if(slave->flags&IFF_SLAVE)
1290 {
1291 restore_flags(flags);
1292 return -EBUSY;
1293 }
1294 dev->slave=slave;
1295 slave->flags|=IFF_SLAVE;
1296 dev->flags|=IFF_MASTER;
1297 restore_flags(flags);
1298 ret=0;
1299 }
1300 break;
1301 #endif
1302
1303 case SIOCADDMULTI:
1304 if(dev->set_multicast_list==NULL)
1305 return -EINVAL;
1306 if(ifr.ifr_hwaddr.sa_family!=AF_UNSPEC)
1307 return -EINVAL;
1308 dev_mc_add(dev,ifr.ifr_hwaddr.sa_data, dev->addr_len, 1);
1309 return 0;
1310
1311 case SIOCDELMULTI:
1312 if(dev->set_multicast_list==NULL)
1313 return -EINVAL;
1314 if(ifr.ifr_hwaddr.sa_family!=AF_UNSPEC)
1315 return -EINVAL;
1316 dev_mc_delete(dev,ifr.ifr_hwaddr.sa_data,dev->addr_len, 1);
1317 return 0;
1318
1319
1320
1321
1322 default:
1323 if((getset >= SIOCDEVPRIVATE) &&
1324 (getset <= (SIOCDEVPRIVATE + 15))) {
1325 if(dev->do_ioctl==NULL)
1326 return -EOPNOTSUPP;
1327 ret=dev->do_ioctl(dev, &ifr, getset);
1328 memcpy_tofs(arg,&ifr,sizeof(struct ifreq));
1329 break;
1330 }
1331
1332 ret = -EINVAL;
1333 }
1334 return(ret);
1335 }
1336
1337
1338
1339
1340
1341
1342
1343 int dev_ioctl(unsigned int cmd, void *arg)
1344 {
1345 switch(cmd)
1346 {
1347 case SIOCGIFCONF:
1348 (void) dev_ifconf((char *) arg);
1349 return 0;
1350
1351
1352
1353
1354
1355 case SIOCGIFFLAGS:
1356 case SIOCGIFADDR:
1357 case SIOCGIFDSTADDR:
1358 case SIOCGIFBRDADDR:
1359 case SIOCGIFNETMASK:
1360 case SIOCGIFMETRIC:
1361 case SIOCGIFMTU:
1362 case SIOCGIFMEM:
1363 case SIOCGIFHWADDR:
1364 case SIOCSIFHWADDR:
1365 case OLD_SIOCGIFHWADDR:
1366 case SIOCGIFSLAVE:
1367 case SIOCGIFMAP:
1368 return dev_ifsioc(arg, cmd);
1369
1370
1371
1372
1373
1374 case SIOCSIFFLAGS:
1375 case SIOCSIFADDR:
1376 case SIOCSIFDSTADDR:
1377 case SIOCSIFBRDADDR:
1378 case SIOCSIFNETMASK:
1379 case SIOCSIFMETRIC:
1380 case SIOCSIFMTU:
1381 case SIOCSIFMEM:
1382 case SIOCSIFMAP:
1383 case SIOCSIFSLAVE:
1384 case SIOCADDMULTI:
1385 case SIOCDELMULTI:
1386 if (!suser())
1387 return -EPERM;
1388 return dev_ifsioc(arg, cmd);
1389
1390 case SIOCSIFLINK:
1391 return -EINVAL;
1392
1393
1394
1395
1396
1397 default:
1398 if((cmd >= SIOCDEVPRIVATE) &&
1399 (cmd <= (SIOCDEVPRIVATE + 15))) {
1400 return dev_ifsioc(arg, cmd);
1401 }
1402 return -EINVAL;
1403 }
1404 }
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417 void dev_init(void)
1418 {
1419 struct device *dev, *dev2;
1420
1421
1422
1423
1424
1425
1426
1427
1428 dev2 = NULL;
1429 for (dev = dev_base; dev != NULL; dev=dev->next)
1430 {
1431 if (dev->init && dev->init(dev))
1432 {
1433
1434
1435
1436
1437 if (dev2 == NULL)
1438 dev_base = dev->next;
1439 else
1440 dev2->next = dev->next;
1441 }
1442 else
1443 {
1444 dev2 = dev;
1445 }
1446 }
1447 }