This source file includes following definitions.
- min
- dev_add_pack
- dev_remove_pack
- dev_get
- dev_open
- dev_close
- dev_queue_xmit
- netif_rx
- dev_rint
- dev_transmit
- in_net_bh
- net_bh
- dev_tint
- dev_ifconf
- sprintf_stats
- dev_get_info
- bad_mask
- dev_ifsioc
- dev_ioctl
- dev_init
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34 #include <asm/segment.h>
35 #include <asm/system.h>
36 #include <asm/bitops.h>
37 #include <linux/config.h>
38 #include <linux/types.h>
39 #include <linux/kernel.h>
40 #include <linux/sched.h>
41 #include <linux/string.h>
42 #include <linux/mm.h>
43 #include <linux/socket.h>
44 #include <linux/sockios.h>
45 #include <linux/in.h>
46 #include <linux/errno.h>
47 #include <linux/interrupt.h>
48 #include <linux/if_ether.h>
49 #include <linux/inet.h>
50 #include <linux/netdevice.h>
51 #include <linux/etherdevice.h>
52 #include "ip.h"
53 #include "route.h"
54 #include <linux/skbuff.h>
55 #include "sock.h"
56 #include "arp.h"
57
58
59
60
61
62
63
64 struct packet_type *ptype_base = NULL;
65
66
67
68
69
70
71 static struct sk_buff_head backlog =
72 {
73 (struct sk_buff *)&backlog, (struct sk_buff *)&backlog
74 #ifdef CONFIG_SKB_CHECK
75 ,SK_HEAD_SKB
76 #endif
77 };
78
79
80
81
82
83 static int backlog_size = 0;
84
85
86
87
88
89
90 static int dev_nit=0;
91
92
93
94
95
96 static __inline__ unsigned long min(unsigned long a, unsigned long b)
97 {
98 return (a < b)? a : b;
99 }
100
101
102
103
104
105
106
107
108
109
110
111
112
113 void dev_add_pack(struct packet_type *pt)
114 {
115 struct packet_type *p1;
116 pt->next = ptype_base;
117
118
119
120
121
122
123
124 pt->copy=0;
125
126
127
128 if(pt->type == htons(ETH_P_ALL))
129 dev_nit++;
130 else
131 {
132
133
134
135
136 for (p1 = ptype_base; p1 != NULL; p1 = p1->next)
137 {
138 if (p1->type == pt->type)
139 {
140 pt->copy = 1;
141 break;
142 }
143 }
144 }
145
146
147
148
149
150 if (pt->type == htons(ETH_P_ALL))
151 {
152 pt->next=NULL;
153 if(ptype_base==NULL)
154 ptype_base=pt;
155 else
156 {
157
158
159
160 for(p1=ptype_base;p1->next!=NULL;p1=p1->next);
161
162
163
164 p1->next=pt;
165 }
166 }
167 else
168
169
170
171 ptype_base = pt;
172 }
173
174
175
176
177
178
179 void dev_remove_pack(struct packet_type *pt)
180 {
181 struct packet_type *lpt, *pt1;
182
183
184
185
186
187 if (pt->type == htons(ETH_P_ALL))
188 dev_nit--;
189
190
191
192
193
194 if (pt == ptype_base)
195 {
196 ptype_base = pt->next;
197 return;
198 }
199
200 lpt = NULL;
201
202
203
204
205
206
207
208
209 for (pt1 = ptype_base; pt1->next != NULL; pt1 = pt1->next)
210 {
211 if (pt1->next == pt )
212 {
213 cli();
214 if (!pt->copy && lpt)
215 lpt->copy = 0;
216 pt1->next = pt->next;
217 sti();
218 return;
219 }
220 if (pt1->next->type == pt->type && pt->type != htons(ETH_P_ALL))
221 lpt = pt1->next;
222 }
223 }
224
225
226
227
228
229
230
231
232
233
234
235 struct device *dev_get(char *name)
236 {
237 struct device *dev;
238
239 for (dev = dev_base; dev != NULL; dev = dev->next)
240 {
241 if (strcmp(dev->name, name) == 0)
242 return(dev);
243 }
244 return(NULL);
245 }
246
247
248
249
250
251
252 int dev_open(struct device *dev)
253 {
254 int ret = 0;
255
256
257
258
259 if (dev->open)
260 ret = dev->open(dev);
261
262
263
264
265
266 if (ret == 0)
267 dev->flags |= (IFF_UP | IFF_RUNNING);
268
269 return(ret);
270 }
271
272
273
274
275
276
277
278
279
280
281
282 int dev_close(struct device *dev)
283 {
284
285
286
287
288 if (dev->flags != 0)
289 {
290 int ct=0;
291 dev->flags = 0;
292
293
294
295 if (dev->stop)
296 dev->stop(dev);
297
298
299
300 #ifdef CONFIG_INET
301 ip_rt_flush(dev);
302 arp_device_down(dev);
303 #endif
304 #ifdef CONFIG_IPX
305 ipxrtr_device_down(dev);
306 #endif
307
308
309
310 dev->pa_addr = 0;
311 dev->pa_dstaddr = 0;
312 dev->pa_brdaddr = 0;
313 dev->pa_mask = 0;
314
315
316
317 while(ct<DEV_NUMBUFFS)
318 {
319 struct sk_buff *skb;
320 while((skb=skb_dequeue(&dev->buffs[ct]))!=NULL)
321 if(skb->free)
322 kfree_skb(skb,FREE_WRITE);
323 ct++;
324 }
325 }
326 return(0);
327 }
328
329
330
331
332
333
334
335
336
337
338 void dev_queue_xmit(struct sk_buff *skb, struct device *dev, int pri)
339 {
340 unsigned long flags;
341 int nitcount;
342 struct packet_type *ptype;
343 int where = 0;
344
345
346
347 if (dev == NULL)
348 {
349 printk("dev.c: dev_queue_xmit: dev = NULL\n");
350 return;
351 }
352
353 if(pri>=0 && !skb_device_locked(skb))
354 skb_device_lock(skb);
355 #ifdef CONFIG_SLAVE_BALANCING
356 save_flags(flags);
357 cli();
358 if(dev->slave!=NULL && dev->slave->pkt_queue < dev->pkt_queue &&
359 (dev->slave->flags & IFF_UP))
360 dev=dev->slave;
361 restore_flags(flags);
362 #endif
363
364 IS_SKB(skb);
365
366 skb->dev = dev;
367
368
369
370
371
372 if (skb->next != NULL)
373 {
374
375
376
377 printk("dev_queue_xmit: worked around a missed interrupt\n");
378 dev->hard_start_xmit(NULL, dev);
379 return;
380 }
381
382
383
384
385
386
387
388 if (pri < 0)
389 {
390 pri = -pri-1;
391 where = 1;
392 }
393
394 if (pri >= DEV_NUMBUFFS)
395 {
396 printk("bad priority in dev_queue_xmit.\n");
397 pri = 1;
398 }
399
400
401
402
403
404
405 if (!skb->arp && dev->rebuild_header(skb->data, dev, skb->raddr, skb)) {
406 return;
407 }
408
409 save_flags(flags);
410 cli();
411 if (!where) {
412 #ifdef CONFIG_SLAVE_BALANCING
413 skb->in_dev_queue=1;
414 #endif
415 skb_queue_tail(dev->buffs + pri,skb);
416 skb_device_unlock(skb);
417 skb = skb_dequeue(dev->buffs + pri);
418 skb_device_lock(skb);
419 #ifdef CONFIG_SLAVE_BALANCING
420 skb->in_dev_queue=0;
421 #endif
422 }
423 restore_flags(flags);
424
425
426 if(!where)
427 {
428 for (nitcount = dev_nit, ptype = ptype_base; nitcount > 0 && ptype != NULL; ptype = ptype->next)
429 {
430 if (ptype->type == htons(ETH_P_ALL)) {
431 struct sk_buff *skb2;
432 if ((skb2 = skb_clone(skb, GFP_ATOMIC)) == NULL)
433 break;
434 ptype->func(skb2, skb->dev, ptype);
435 nitcount--;
436 }
437 }
438 }
439 if (dev->hard_start_xmit(skb, dev) == 0) {
440
441
442
443 return;
444 }
445
446
447
448
449
450 cli();
451 #ifdef CONFIG_SLAVE_BALANCING
452 skb->in_dev_queue=1;
453 dev->pkt_queue++;
454 #endif
455 skb_device_unlock(skb);
456 skb_queue_head(dev->buffs + pri,skb);
457 restore_flags(flags);
458 }
459
460
461
462
463
464
465
466 void netif_rx(struct sk_buff *skb)
467 {
468 static int dropping = 0;
469
470
471
472
473
474
475 skb->sk = NULL;
476 skb->free = 1;
477 if(skb->stamp.tv_sec==0)
478 skb->stamp = xtime;
479
480
481
482
483
484 if (!backlog_size)
485 dropping = 0;
486 else if (backlog_size > 100)
487 dropping = 1;
488
489 if (dropping)
490 {
491 kfree_skb(skb, FREE_READ);
492 return;
493 }
494
495
496
497
498
499 IS_SKB(skb);
500 skb_queue_tail(&backlog,skb);
501 backlog_size++;
502
503
504
505
506
507
508 mark_bh(NET_BH);
509 return;
510 }
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527 int dev_rint(unsigned char *buff, long len, int flags, struct device *dev)
528 {
529 static int dropping = 0;
530 struct sk_buff *skb = NULL;
531 unsigned char *to;
532 int amount, left;
533 int len2;
534
535 if (dev == NULL || buff == NULL || len <= 0)
536 return(1);
537
538 if (flags & IN_SKBUFF)
539 {
540 skb = (struct sk_buff *) buff;
541 }
542 else
543 {
544 if (dropping)
545 {
546 if (skb_peek(&backlog) != NULL)
547 return(1);
548 printk("INET: dev_rint: no longer dropping packets.\n");
549 dropping = 0;
550 }
551
552 skb = alloc_skb(len, GFP_ATOMIC);
553 if (skb == NULL)
554 {
555 printk("dev_rint: packet dropped on %s (no memory) !\n",
556 dev->name);
557 dropping = 1;
558 return(1);
559 }
560
561
562
563
564
565
566 to = skb->data;
567 left = len;
568
569 len2 = len;
570 while (len2 > 0)
571 {
572 amount = min(len2, (unsigned long) dev->rmem_end -
573 (unsigned long) buff);
574 memcpy(to, buff, amount);
575 len2 -= amount;
576 left -= amount;
577 buff += amount;
578 to += amount;
579 if ((unsigned long) buff == dev->rmem_end)
580 buff = (unsigned char *) dev->rmem_start;
581 }
582 }
583
584
585
586
587
588 skb->len = len;
589 skb->dev = dev;
590 skb->free = 1;
591
592 netif_rx(skb);
593
594
595
596 return(0);
597 }
598
599
600
601
602
603
604 void dev_transmit(void)
605 {
606 struct device *dev;
607
608 for (dev = dev_base; dev != NULL; dev = dev->next)
609 {
610 if (dev->flags != 0 && !dev->tbusy) {
611
612
613
614 dev_tint(dev);
615 }
616 }
617 }
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632 volatile char in_bh = 0;
633
634 int in_net_bh()
635 {
636 return(in_bh==0?0:1);
637 }
638
639
640
641
642
643
644
645
646
647 void net_bh(void *tmp)
648 {
649 struct sk_buff *skb;
650 struct packet_type *ptype;
651 unsigned short type;
652 unsigned char flag = 0;
653 int nitcount;
654
655
656
657
658
659 if (set_bit(1, (void*)&in_bh))
660 return;
661
662
663
664
665
666
667
668 dev_transmit();
669
670
671
672
673
674
675
676 cli();
677
678
679
680
681
682 while((skb=skb_dequeue(&backlog))!=NULL)
683 {
684
685
686
687 backlog_size--;
688
689 nitcount=dev_nit;
690 flag=0;
691 sti();
692
693
694
695
696
697
698
699
700 skb->h.raw = skb->data + skb->dev->hard_header_len;
701 skb->len -= skb->dev->hard_header_len;
702
703
704
705
706
707
708
709
710
711
712
713
714 type = skb->dev->type_trans(skb, skb->dev);
715
716
717
718
719
720
721
722
723
724
725
726
727
728 for (ptype = ptype_base; ptype != NULL; ptype = ptype->next)
729 {
730 if (ptype->type == type || ptype->type == htons(ETH_P_ALL))
731 {
732 struct sk_buff *skb2;
733
734 if (ptype->type == htons(ETH_P_ALL))
735 nitcount--;
736 if (ptype->copy || nitcount)
737 {
738
739
740
741 #ifdef OLD
742 skb2 = alloc_skb(skb->len, GFP_ATOMIC);
743 if (skb2 == NULL)
744 continue;
745 memcpy(skb2, skb, skb2->mem_len);
746 skb2->mem_addr = skb2;
747 skb2->h.raw = (unsigned char *)(
748 (unsigned long) skb2 +
749 (unsigned long) skb->h.raw -
750 (unsigned long) skb
751 );
752 skb2->free = 1;
753 #else
754 skb2=skb_clone(skb, GFP_ATOMIC);
755 if(skb2==NULL)
756 continue;
757 #endif
758 }
759 else
760 {
761 skb2 = skb;
762 }
763
764
765
766
767
768 flag = 1;
769
770
771
772
773
774
775 ptype->func(skb2, skb->dev, ptype);
776 }
777 }
778
779
780
781
782
783 if (!flag)
784 {
785 kfree_skb(skb, FREE_WRITE);
786 }
787
788
789
790
791
792 dev_transmit();
793 cli();
794 }
795
796
797
798
799
800 in_bh = 0;
801 sti();
802
803
804
805
806
807 dev_transmit();
808 }
809
810
811
812
813
814
815
816 void dev_tint(struct device *dev)
817 {
818 int i;
819 struct sk_buff *skb;
820 unsigned long flags;
821
822 save_flags(flags);
823
824
825
826
827 for(i = 0;i < DEV_NUMBUFFS; i++)
828 {
829
830
831
832
833
834 cli();
835 while((skb=skb_dequeue(&dev->buffs[i]))!=NULL)
836 {
837
838
839
840 skb_device_lock(skb);
841 restore_flags(flags);
842
843
844
845
846 dev_queue_xmit(skb,dev,-i - 1);
847
848
849
850 if (dev->tbusy)
851 return;
852 cli();
853 }
854 }
855 restore_flags(flags);
856 }
857
858
859
860
861
862
863
864
865 static int dev_ifconf(char *arg)
866 {
867 struct ifconf ifc;
868 struct ifreq ifr;
869 struct device *dev;
870 char *pos;
871 int len;
872 int err;
873
874
875
876
877
878 err=verify_area(VERIFY_WRITE, arg, sizeof(struct ifconf));
879 if(err)
880 return err;
881 memcpy_fromfs(&ifc, arg, sizeof(struct ifconf));
882 len = ifc.ifc_len;
883 pos = ifc.ifc_buf;
884
885
886
887
888
889
890 err=verify_area(VERIFY_WRITE,pos,len);
891 if(err)
892 return err;
893
894
895
896
897
898 for (dev = dev_base; dev != NULL; dev = dev->next)
899 {
900 if(!(dev->flags & IFF_UP))
901 continue;
902 memset(&ifr, 0, sizeof(struct ifreq));
903 strcpy(ifr.ifr_name, dev->name);
904 (*(struct sockaddr_in *) &ifr.ifr_addr).sin_family = dev->family;
905 (*(struct sockaddr_in *) &ifr.ifr_addr).sin_addr.s_addr = dev->pa_addr;
906
907
908
909
910
911 memcpy_tofs(pos, &ifr, sizeof(struct ifreq));
912 pos += sizeof(struct ifreq);
913 len -= sizeof(struct ifreq);
914
915
916
917
918
919 if (len < sizeof(struct ifreq))
920 break;
921 }
922
923
924
925
926
927 ifc.ifc_len = (pos - ifc.ifc_buf);
928 ifc.ifc_req = (struct ifreq *) ifc.ifc_buf;
929 memcpy_tofs(arg, &ifc, sizeof(struct ifconf));
930
931
932
933
934
935 return(pos - arg);
936 }
937
938
939
940
941
942
943
944 static int sprintf_stats(char *buffer, struct device *dev)
945 {
946 struct enet_statistics *stats = (dev->get_stats ? dev->get_stats(dev): NULL);
947 int size;
948
949 if (stats)
950 size = sprintf(buffer, "%6s:%7d %4d %4d %4d %4d %8d %4d %4d %4d %5d %4d\n",
951 dev->name,
952 stats->rx_packets, stats->rx_errors,
953 stats->rx_dropped + stats->rx_missed_errors,
954 stats->rx_fifo_errors,
955 stats->rx_length_errors + stats->rx_over_errors
956 + stats->rx_crc_errors + stats->rx_frame_errors,
957 stats->tx_packets, stats->tx_errors, stats->tx_dropped,
958 stats->tx_fifo_errors, stats->collisions,
959 stats->tx_carrier_errors + stats->tx_aborted_errors
960 + stats->tx_window_errors + stats->tx_heartbeat_errors);
961 else
962 size = sprintf(buffer, "%6s: No statistics available.\n", dev->name);
963
964 return size;
965 }
966
967
968
969
970
971
972 int dev_get_info(char *buffer, char **start, off_t offset, int length)
973 {
974 int len=0;
975 off_t begin=0;
976 off_t pos=0;
977 int size;
978
979 struct device *dev;
980
981
982 size = sprintf(buffer, "Inter-| Receive | Transmit\n"
983 " face |packets errs drop fifo frame|packets errs drop fifo colls carrier\n");
984
985 pos+=size;
986 len+=size;
987
988
989 for (dev = dev_base; dev != NULL; dev = dev->next)
990 {
991 size = sprintf_stats(buffer+len, dev);
992 len+=size;
993 pos=begin+len;
994
995 if(pos<offset)
996 {
997 len=0;
998 begin=pos;
999 }
1000 if(pos>offset+length)
1001 break;
1002 }
1003
1004 *start=buffer+(offset-begin);
1005 len-=(offset-begin);
1006 if(len>length)
1007 len=length;
1008 return len;
1009 }
1010
1011
1012
1013
1014
1015
1016 static inline int bad_mask(unsigned long mask, unsigned long addr)
1017 {
1018 if (addr & (mask = ~mask))
1019 return 1;
1020 mask = ntohl(mask);
1021 if (mask & (mask+1))
1022 return 1;
1023 return 0;
1024 }
1025
1026
1027
1028
1029
1030
1031
1032
1033 static int dev_ifsioc(void *arg, unsigned int getset)
1034 {
1035 struct ifreq ifr;
1036 struct device *dev;
1037 int ret;
1038
1039
1040
1041
1042
1043 int err=verify_area(VERIFY_WRITE, arg, sizeof(struct ifreq));
1044 if(err)
1045 return err;
1046
1047 memcpy_fromfs(&ifr, arg, sizeof(struct ifreq));
1048
1049
1050
1051
1052
1053 if ((dev = dev_get(ifr.ifr_name)) == NULL)
1054 return(-ENODEV);
1055
1056 switch(getset)
1057 {
1058 case SIOCGIFFLAGS:
1059 ifr.ifr_flags = dev->flags;
1060 memcpy_tofs(arg, &ifr, sizeof(struct ifreq));
1061 ret = 0;
1062 break;
1063 case SIOCSIFFLAGS:
1064 {
1065 int old_flags = dev->flags;
1066 #ifdef CONFIG_SLAVE_BALANCING
1067 if(dev->flags&IFF_SLAVE)
1068 return -EBUSY;
1069 #endif
1070 dev->flags = ifr.ifr_flags & (
1071 IFF_UP | IFF_BROADCAST | IFF_DEBUG | IFF_LOOPBACK |
1072 IFF_POINTOPOINT | IFF_NOTRAILERS | IFF_RUNNING |
1073 IFF_NOARP | IFF_PROMISC | IFF_ALLMULTI | IFF_SLAVE | IFF_MASTER);
1074 #ifdef CONFIG_SLAVE_BALANCING
1075 if(!(dev->flags&IFF_MASTER) && dev->slave)
1076 {
1077 dev->slave->flags&=~IFF_SLAVE;
1078 dev->slave=NULL;
1079 }
1080 #endif
1081
1082 if( dev->set_multicast_list!=NULL)
1083 {
1084
1085
1086
1087
1088
1089 if ( (old_flags & IFF_PROMISC) && ((dev->flags & IFF_PROMISC) == 0))
1090 dev->set_multicast_list(dev,0,NULL);
1091
1092
1093
1094
1095
1096 if ( (dev->flags & IFF_PROMISC) && ((old_flags & IFF_PROMISC) == 0))
1097 dev->set_multicast_list(dev,-1,NULL);
1098 }
1099
1100
1101
1102
1103
1104 if ((old_flags & IFF_UP) && ((dev->flags & IFF_UP) == 0))
1105 {
1106 ret = dev_close(dev);
1107 }
1108 else
1109 {
1110
1111
1112
1113
1114 ret = (! (old_flags & IFF_UP) && (dev->flags & IFF_UP))
1115 ? dev_open(dev) : 0;
1116
1117
1118
1119 if(ret<0)
1120 dev->flags&=~IFF_UP;
1121 }
1122 }
1123 break;
1124
1125 case SIOCGIFADDR:
1126 (*(struct sockaddr_in *)
1127 &ifr.ifr_addr).sin_addr.s_addr = dev->pa_addr;
1128 (*(struct sockaddr_in *)
1129 &ifr.ifr_addr).sin_family = dev->family;
1130 (*(struct sockaddr_in *)
1131 &ifr.ifr_addr).sin_port = 0;
1132 memcpy_tofs(arg, &ifr, sizeof(struct ifreq));
1133 ret = 0;
1134 break;
1135
1136 case SIOCSIFADDR:
1137 dev->pa_addr = (*(struct sockaddr_in *)
1138 &ifr.ifr_addr).sin_addr.s_addr;
1139 dev->family = ifr.ifr_addr.sa_family;
1140
1141 #ifdef CONFIG_INET
1142
1143
1144 dev->pa_mask = ip_get_mask(dev->pa_addr);
1145 #endif
1146 dev->pa_brdaddr = dev->pa_addr | ~dev->pa_mask;
1147 ret = 0;
1148 break;
1149
1150 case SIOCGIFBRDADDR:
1151 (*(struct sockaddr_in *)
1152 &ifr.ifr_broadaddr).sin_addr.s_addr = dev->pa_brdaddr;
1153 (*(struct sockaddr_in *)
1154 &ifr.ifr_broadaddr).sin_family = dev->family;
1155 (*(struct sockaddr_in *)
1156 &ifr.ifr_broadaddr).sin_port = 0;
1157 memcpy_tofs(arg, &ifr, sizeof(struct ifreq));
1158 ret = 0;
1159 break;
1160
1161 case SIOCSIFBRDADDR:
1162 dev->pa_brdaddr = (*(struct sockaddr_in *)
1163 &ifr.ifr_broadaddr).sin_addr.s_addr;
1164 ret = 0;
1165 break;
1166
1167 case SIOCGIFDSTADDR:
1168 (*(struct sockaddr_in *)
1169 &ifr.ifr_dstaddr).sin_addr.s_addr = dev->pa_dstaddr;
1170 (*(struct sockaddr_in *)
1171 &ifr.ifr_broadaddr).sin_family = dev->family;
1172 (*(struct sockaddr_in *)
1173 &ifr.ifr_broadaddr).sin_port = 0;
1174 memcpy_tofs(arg, &ifr, sizeof(struct ifreq));
1175 ret = 0;
1176 break;
1177
1178 case SIOCSIFDSTADDR:
1179 dev->pa_dstaddr = (*(struct sockaddr_in *)
1180 &ifr.ifr_dstaddr).sin_addr.s_addr;
1181 ret = 0;
1182 break;
1183
1184 case SIOCGIFNETMASK:
1185 (*(struct sockaddr_in *)
1186 &ifr.ifr_netmask).sin_addr.s_addr = dev->pa_mask;
1187 (*(struct sockaddr_in *)
1188 &ifr.ifr_netmask).sin_family = dev->family;
1189 (*(struct sockaddr_in *)
1190 &ifr.ifr_netmask).sin_port = 0;
1191 memcpy_tofs(arg, &ifr, sizeof(struct ifreq));
1192 ret = 0;
1193 break;
1194
1195 case SIOCSIFNETMASK:
1196 {
1197 unsigned long mask = (*(struct sockaddr_in *)
1198 &ifr.ifr_netmask).sin_addr.s_addr;
1199 ret = -EINVAL;
1200
1201
1202
1203 if (bad_mask(mask,0))
1204 break;
1205 dev->pa_mask = mask;
1206 ret = 0;
1207 }
1208 break;
1209
1210 case SIOCGIFMETRIC:
1211
1212 ifr.ifr_metric = dev->metric;
1213 memcpy_tofs(arg, &ifr, sizeof(struct ifreq));
1214 ret = 0;
1215 break;
1216
1217 case SIOCSIFMETRIC:
1218 dev->metric = ifr.ifr_metric;
1219 ret = 0;
1220 break;
1221
1222 case SIOCGIFMTU:
1223 ifr.ifr_mtu = dev->mtu;
1224 memcpy_tofs(arg, &ifr, sizeof(struct ifreq));
1225 ret = 0;
1226 break;
1227
1228 case SIOCSIFMTU:
1229
1230
1231
1232
1233
1234 if(ifr.ifr_mtu<1 || ifr.ifr_mtu>3800)
1235 return -EINVAL;
1236 dev->mtu = ifr.ifr_mtu;
1237 ret = 0;
1238 break;
1239
1240 case SIOCGIFMEM:
1241
1242 printk("NET: ioctl(SIOCGIFMEM, 0x%08X)\n", (int)arg);
1243 ret = -EINVAL;
1244 break;
1245
1246 case SIOCSIFMEM:
1247 printk("NET: ioctl(SIOCSIFMEM, 0x%08X)\n", (int)arg);
1248 ret = -EINVAL;
1249 break;
1250
1251 case OLD_SIOCGIFHWADDR:
1252 memcpy(ifr.old_ifr_hwaddr,dev->dev_addr, MAX_ADDR_LEN);
1253 memcpy_tofs(arg,&ifr,sizeof(struct ifreq));
1254 ret=0;
1255 break;
1256
1257 case SIOCGIFHWADDR:
1258 memcpy(ifr.ifr_hwaddr.sa_data,dev->dev_addr, MAX_ADDR_LEN);
1259 ifr.ifr_hwaddr.sa_family=dev->type;
1260 memcpy_tofs(arg,&ifr,sizeof(struct ifreq));
1261 ret=0;
1262 break;
1263
1264 case SIOCSIFHWADDR:
1265 if(dev->set_mac_address==NULL)
1266 return -EOPNOTSUPP;
1267 if(ifr.ifr_hwaddr.sa_family!=dev->type)
1268 return -EINVAL;
1269 ret=dev->set_mac_address(dev,ifr.ifr_hwaddr.sa_data);
1270 break;
1271
1272 case SIOCGIFMAP:
1273 ifr.ifr_map.mem_start=dev->mem_start;
1274 ifr.ifr_map.mem_end=dev->mem_end;
1275 ifr.ifr_map.base_addr=dev->base_addr;
1276 ifr.ifr_map.irq=dev->irq;
1277 ifr.ifr_map.dma=dev->dma;
1278 ifr.ifr_map.port=dev->if_port;
1279 memcpy_tofs(arg,&ifr,sizeof(struct ifreq));
1280 ret=0;
1281 break;
1282
1283 case SIOCSIFMAP:
1284 if(dev->set_config==NULL)
1285 return -EOPNOTSUPP;
1286 return dev->set_config(dev,&ifr.ifr_map);
1287
1288 case SIOCGIFSLAVE:
1289 #ifdef CONFIG_SLAVE_BALANCING
1290 if(dev->slave==NULL)
1291 return -ENOENT;
1292 strncpy(ifr.ifr_name,dev->name,sizeof(ifr.ifr_name));
1293 memcpy_tofs(arg,&ifr,sizeof(struct ifreq));
1294 ret=0;
1295 #else
1296 return -ENOENT;
1297 #endif
1298 break;
1299 #ifdef CONFIG_SLAVE_BALANCING
1300 case SIOCSIFSLAVE:
1301 {
1302
1303
1304
1305
1306
1307 unsigned long flags;
1308 struct device *slave=dev_get(ifr.ifr_slave);
1309 save_flags(flags);
1310 if(slave==NULL)
1311 {
1312 return -ENODEV;
1313 }
1314 cli();
1315 if((slave->flags&(IFF_UP|IFF_RUNNING))!=(IFF_UP|IFF_RUNNING))
1316 {
1317 restore_flags(flags);
1318 return -EINVAL;
1319 }
1320 if(dev->flags&IFF_SLAVE)
1321 {
1322 restore_flags(flags);
1323 return -EBUSY;
1324 }
1325 if(dev->slave!=NULL)
1326 {
1327 restore_flags(flags);
1328 return -EBUSY;
1329 }
1330 if(slave->flags&IFF_SLAVE)
1331 {
1332 restore_flags(flags);
1333 return -EBUSY;
1334 }
1335 dev->slave=slave;
1336 slave->flags|=IFF_SLAVE;
1337 dev->flags|=IFF_MASTER;
1338 restore_flags(flags);
1339 ret=0;
1340 }
1341 break;
1342 #endif
1343
1344
1345
1346
1347 default:
1348 if((getset >= SIOCDEVPRIVATE) &&
1349 (getset <= (SIOCDEVPRIVATE + 15))) {
1350 if(dev->do_ioctl==NULL)
1351 return -EOPNOTSUPP;
1352 ret=dev->do_ioctl(dev, &ifr, getset);
1353 memcpy_tofs(arg,&ifr,sizeof(struct ifreq));
1354 break;
1355 }
1356
1357 ret = -EINVAL;
1358 }
1359 return(ret);
1360 }
1361
1362
1363
1364
1365
1366
1367
1368 int dev_ioctl(unsigned int cmd, void *arg)
1369 {
1370 switch(cmd)
1371 {
1372 case SIOCGIFCONF:
1373 (void) dev_ifconf((char *) arg);
1374 return 0;
1375
1376
1377
1378
1379
1380 case SIOCGIFFLAGS:
1381 case SIOCGIFADDR:
1382 case SIOCGIFDSTADDR:
1383 case SIOCGIFBRDADDR:
1384 case SIOCGIFNETMASK:
1385 case SIOCGIFMETRIC:
1386 case SIOCGIFMTU:
1387 case SIOCGIFMEM:
1388 case SIOCGIFHWADDR:
1389 case SIOCSIFHWADDR:
1390 case OLD_SIOCGIFHWADDR:
1391 case SIOCGIFSLAVE:
1392 case SIOCGIFMAP:
1393 return dev_ifsioc(arg, cmd);
1394
1395
1396
1397
1398
1399 case SIOCSIFFLAGS:
1400 case SIOCSIFADDR:
1401 case SIOCSIFDSTADDR:
1402 case SIOCSIFBRDADDR:
1403 case SIOCSIFNETMASK:
1404 case SIOCSIFMETRIC:
1405 case SIOCSIFMTU:
1406 case SIOCSIFMEM:
1407 case SIOCSIFMAP:
1408 case SIOCSIFSLAVE:
1409 if (!suser())
1410 return -EPERM;
1411 return dev_ifsioc(arg, cmd);
1412
1413 case SIOCSIFLINK:
1414 return -EINVAL;
1415
1416
1417
1418
1419
1420 default:
1421 if((cmd >= SIOCDEVPRIVATE) &&
1422 (cmd <= (SIOCDEVPRIVATE + 15))) {
1423 if (!suser())
1424 return -EPERM;
1425 return dev_ifsioc(arg, cmd);
1426 }
1427 return -EINVAL;
1428 }
1429 }
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442 void dev_init(void)
1443 {
1444 struct device *dev, *dev2;
1445
1446
1447
1448
1449
1450
1451
1452
1453 dev2 = NULL;
1454 for (dev = dev_base; dev != NULL; dev=dev->next)
1455 {
1456 if (dev->init && dev->init(dev))
1457 {
1458
1459
1460
1461
1462 if (dev2 == NULL)
1463 dev_base = dev->next;
1464 else
1465 dev2->next = dev->next;
1466 }
1467 else
1468 {
1469 dev2 = dev;
1470 }
1471 }
1472 }