This source file includes following definitions.
- min
- dev_add_pack
- dev_remove_pack
- dev_get
- dev_open
- dev_close
- register_netdevice_notifier
- unregister_netdevice_notifier
- dev_queue_xmit
- netif_rx
- dev_rint
- dev_transmit
- in_net_bh
- net_bh
- dev_tint
- dev_ifconf
- sprintf_stats
- dev_get_info
- bad_mask
- dev_ifsioc
- dev_ioctl
- net_dev_init
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47 #include <asm/segment.h>
48 #include <asm/system.h>
49 #include <asm/bitops.h>
50 #include <linux/config.h>
51 #include <linux/types.h>
52 #include <linux/kernel.h>
53 #include <linux/sched.h>
54 #include <linux/string.h>
55 #include <linux/mm.h>
56 #include <linux/socket.h>
57 #include <linux/sockios.h>
58 #include <linux/in.h>
59 #include <linux/errno.h>
60 #include <linux/interrupt.h>
61 #include <linux/if_ether.h>
62 #include <linux/inet.h>
63 #include <linux/netdevice.h>
64 #include <linux/etherdevice.h>
65 #include <linux/notifier.h>
66 #include <net/ip.h>
67 #include <net/route.h>
68 #include <linux/skbuff.h>
69 #include <net/sock.h>
70 #include <net/arp.h>
71 #include <linux/proc_fs.h>
72 #include <linux/stat.h>
73
74
75
76
77
78
79 struct packet_type *ptype_base[16];
80 struct packet_type *ptype_all = NULL;
81
82
83
84
85
86 int dev_lockct=0;
87
88
89
90
91
92 struct notifier_block *netdev_chain=NULL;
93
94
95
96
97
98
99 static struct sk_buff_head backlog =
100 {
101 (struct sk_buff *)&backlog, (struct sk_buff *)&backlog
102 #if CONFIG_SKB_CHECK
103 ,SK_HEAD_SKB
104 #endif
105 };
106
107
108
109
110
111 static int backlog_size = 0;
112
113
114
115
116
117 static __inline__ unsigned long min(unsigned long a, unsigned long b)
118 {
119 return (a < b)? a : b;
120 }
121
122
123
124
125
126
127
128
129
130
131
132
133 static int dev_nit=0;
134
135
136
137
138
139
140
141 void dev_add_pack(struct packet_type *pt)
142 {
143 int hash;
144 if(pt->type==htons(ETH_P_ALL))
145 {
146 dev_nit++;
147 pt->next=ptype_all;
148 ptype_all=pt;
149 }
150 else
151 {
152 hash=ntohs(pt->type)&15;
153 pt->next = ptype_base[hash];
154 ptype_base[hash] = pt;
155 }
156 }
157
158
159
160
161
162
163 void dev_remove_pack(struct packet_type *pt)
164 {
165 struct packet_type **pt1;
166 if(pt->type==htons(ETH_P_ALL))
167 {
168 dev_nit--;
169 pt1=&ptype_all;
170 }
171 else
172 pt1=&ptype_base[ntohs(pt->type)&15];
173 for(; (*pt1)!=NULL; pt1=&((*pt1)->next))
174 {
175 if(pt==(*pt1))
176 {
177 *pt1=pt->next;
178 return;
179 }
180 }
181 printk("dev_remove_pack: %p not found.\n", pt);
182 }
183
184
185
186
187
188
189
190
191
192
193
194 struct device *dev_get(const char *name)
195 {
196 struct device *dev;
197
198 for (dev = dev_base; dev != NULL; dev = dev->next)
199 {
200 if (strcmp(dev->name, name) == 0)
201 return(dev);
202 }
203 return(NULL);
204 }
205
206
207
208
209
210
211 int dev_open(struct device *dev)
212 {
213 int ret = 0;
214
215
216
217
218 if (dev->open)
219 ret = dev->open(dev);
220
221
222
223
224
225 if (ret == 0)
226 {
227 dev->flags |= (IFF_UP | IFF_RUNNING);
228
229
230
231 dev_mc_upload(dev);
232 notifier_call_chain(&netdev_chain, NETDEV_UP, dev);
233 }
234 return(ret);
235 }
236
237
238
239
240
241
242 int dev_close(struct device *dev)
243 {
244 int ct=0;
245
246
247
248
249
250
251 if ((dev->flags & IFF_UP) && dev->stop)
252 dev->stop(dev);
253
254
255
256
257
258 dev->flags&=~(IFF_UP|IFF_RUNNING);
259
260
261
262
263 notifier_call_chain(&netdev_chain, NETDEV_DOWN, dev);
264
265
266
267 dev_mc_discard(dev);
268
269
270
271 dev->pa_addr = 0;
272 dev->pa_dstaddr = 0;
273 dev->pa_brdaddr = 0;
274 dev->pa_mask = 0;
275
276
277
278 while(ct<DEV_NUMBUFFS)
279 {
280 struct sk_buff *skb;
281 while((skb=skb_dequeue(&dev->buffs[ct]))!=NULL)
282 if(skb->free)
283 kfree_skb(skb,FREE_WRITE);
284 ct++;
285 }
286 return(0);
287 }
288
289
290
291
292
293
294
295 int register_netdevice_notifier(struct notifier_block *nb)
296 {
297 return notifier_chain_register(&netdev_chain, nb);
298 }
299
300 int unregister_netdevice_notifier(struct notifier_block *nb)
301 {
302 return notifier_chain_unregister(&netdev_chain,nb);
303 }
304
305
306
307
308
309
310
311
312
313
314
315 void dev_queue_xmit(struct sk_buff *skb, struct device *dev, int pri)
316 {
317 unsigned long flags;
318 struct packet_type *ptype;
319 int where = 0;
320
321
322
323 if(pri>=0 && !skb_device_locked(skb))
324 skb_device_lock(skb);
325 #if CONFIG_SKB_CHECK
326 IS_SKB(skb);
327 #endif
328 skb->dev = dev;
329
330
331
332
333
334
335
336 if (pri < 0)
337 {
338 pri = -pri-1;
339 where = 1;
340 }
341
342 #ifdef CONFIG_NET_DEBUG
343 if (pri >= DEV_NUMBUFFS)
344 {
345 printk("bad priority in dev_queue_xmit.\n");
346 pri = 1;
347 }
348 #endif
349
350
351
352
353
354
355 if (!skb->arp && dev->rebuild_header(skb->data, dev, skb->raddr, skb)) {
356 return;
357 }
358
359 save_flags(flags);
360 cli();
361 if (!where)
362
363 {
364 skb_queue_tail(dev->buffs + pri,skb);
365 skb_device_unlock(skb);
366 skb = skb_dequeue(dev->buffs + pri);
367 skb_device_lock(skb);
368 }
369 restore_flags(flags);
370
371
372 if(!where && dev_nit)
373 {
374 skb->stamp=xtime;
375 for (ptype = ptype_all; ptype!=NULL; ptype = ptype->next)
376 {
377
378
379
380 if ((ptype->dev == dev || !ptype->dev) &&
381 ((struct sock *)ptype->data != skb->sk))
382 {
383 struct sk_buff *skb2;
384 if ((skb2 = skb_clone(skb, GFP_ATOMIC)) == NULL)
385 break;
386 skb2->h.raw = skb2->data + dev->hard_header_len;
387 skb2->mac.raw = skb2->data;
388 ptype->func(skb2, skb->dev, ptype);
389 }
390 }
391 }
392 start_bh_atomic();
393 if (dev->hard_start_xmit(skb, dev) == 0) {
394
395
396
397 end_bh_atomic();
398 return;
399 }
400 end_bh_atomic();
401
402
403
404
405
406 cli();
407 skb_device_unlock(skb);
408 skb_queue_head(dev->buffs + pri,skb);
409 restore_flags(flags);
410 }
411
412
413
414
415
416
417
418 void netif_rx(struct sk_buff *skb)
419 {
420 static int dropping = 0;
421
422
423
424
425
426
427 skb->sk = NULL;
428 skb->free = 1;
429 if(skb->stamp.tv_sec==0)
430 skb->stamp = xtime;
431
432
433
434
435
436 if (!backlog_size)
437 dropping = 0;
438 else if (backlog_size > 300)
439 dropping = 1;
440
441 if (dropping)
442 {
443 kfree_skb(skb, FREE_READ);
444 return;
445 }
446
447
448
449
450 #if CONFIG_SKB_CHECK
451 IS_SKB(skb);
452 #endif
453 skb_queue_tail(&backlog,skb);
454 backlog_size++;
455
456
457
458
459
460
461 #ifdef CONFIG_NET_RUNONIRQ
462 net_bh();
463 #else
464 mark_bh(NET_BH);
465 #endif
466 return;
467 }
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484 int dev_rint(unsigned char *buff, long len, int flags, struct device *dev)
485 {
486 static int dropping = 0;
487 struct sk_buff *skb = NULL;
488 unsigned char *to;
489 int amount, left;
490 int len2;
491
492 if (dev == NULL || buff == NULL || len <= 0)
493 return(1);
494
495 if (flags & IN_SKBUFF)
496 {
497 skb = (struct sk_buff *) buff;
498 }
499 else
500 {
501 if (dropping)
502 {
503 if (skb_peek(&backlog) != NULL)
504 return(1);
505 printk("INET: dev_rint: no longer dropping packets.\n");
506 dropping = 0;
507 }
508
509 skb = alloc_skb(len, GFP_ATOMIC);
510 if (skb == NULL)
511 {
512 printk("dev_rint: packet dropped on %s (no memory) !\n",
513 dev->name);
514 dropping = 1;
515 return(1);
516 }
517
518
519
520
521
522
523 to = skb_put(skb,len);
524 left = len;
525
526 len2 = len;
527 while (len2 > 0)
528 {
529 amount = min(len2, (unsigned long) dev->rmem_end -
530 (unsigned long) buff);
531 memcpy(to, buff, amount);
532 len2 -= amount;
533 left -= amount;
534 buff += amount;
535 to += amount;
536 if ((unsigned long) buff == dev->rmem_end)
537 buff = (unsigned char *) dev->rmem_start;
538 }
539 }
540
541
542
543
544
545 skb->dev = dev;
546 skb->free = 1;
547
548 netif_rx(skb);
549
550
551
552 return(0);
553 }
554
555
556
557
558
559
560 void dev_transmit(void)
561 {
562 struct device *dev;
563
564 for (dev = dev_base; dev != NULL; dev = dev->next)
565 {
566 if (dev->flags != 0 && !dev->tbusy) {
567
568
569
570 dev_tint(dev);
571 }
572 }
573 }
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588 volatile unsigned long in_bh = 0;
589
590 int in_net_bh()
591 {
592 return(in_bh==0?0:1);
593 }
594
595
596
597
598
599
600
601
602
603 void net_bh(void *tmp)
604 {
605 struct sk_buff *skb;
606 struct packet_type *ptype;
607 struct packet_type *pt_prev;
608 unsigned short type;
609
610
611
612
613
614 if (set_bit(1, (void*)&in_bh))
615 return;
616
617
618
619
620
621
622
623
624 dev_transmit();
625
626
627
628
629
630
631
632 cli();
633
634
635
636
637
638 while((skb=skb_dequeue(&backlog))!=NULL)
639 {
640
641
642
643 backlog_size--;
644
645 sti();
646
647
648
649
650
651
652
653
654 skb->h.raw = skb->data;
655
656
657
658
659
660 type = skb->protocol;
661
662
663
664
665
666
667 pt_prev = NULL;
668 for (ptype = ptype_all; ptype!=NULL; ptype=ptype->next)
669 {
670 if(pt_prev)
671 {
672 struct sk_buff *skb2=skb_clone(skb, GFP_ATOMIC);
673 if(skb2)
674 pt_prev->func(skb2,skb->dev, pt_prev);
675 }
676 pt_prev=ptype;
677 }
678
679 for (ptype = ptype_base[ntohs(type)&15]; ptype != NULL; ptype = ptype->next)
680 {
681 if (ptype->type == type && (!ptype->dev || ptype->dev==skb->dev))
682 {
683
684
685
686
687 if(pt_prev)
688 {
689 struct sk_buff *skb2;
690
691 skb2=skb_clone(skb, GFP_ATOMIC);
692
693
694
695
696
697
698 if(skb2)
699 pt_prev->func(skb2, skb->dev, pt_prev);
700 }
701
702 pt_prev=ptype;
703 }
704 }
705
706
707
708
709
710 if(pt_prev)
711 pt_prev->func(skb, skb->dev, pt_prev);
712
713
714
715
716 else
717 kfree_skb(skb, FREE_WRITE);
718
719
720
721
722
723
724 #ifdef CONFIG_XMIT_EVERY
725 dev_transmit();
726 #endif
727 cli();
728 }
729
730
731
732
733
734 in_bh = 0;
735 sti();
736
737
738
739
740
741 dev_transmit();
742 }
743
744
745
746
747
748
749
750 void dev_tint(struct device *dev)
751 {
752 int i;
753 struct sk_buff *skb;
754 unsigned long flags;
755
756 save_flags(flags);
757
758
759
760
761 for(i = 0;i < DEV_NUMBUFFS; i++)
762 {
763
764
765
766
767
768 cli();
769 while((skb=skb_dequeue(&dev->buffs[i]))!=NULL)
770 {
771
772
773
774 skb_device_lock(skb);
775 restore_flags(flags);
776
777
778
779
780 dev_queue_xmit(skb,dev,-i - 1);
781
782
783
784 if (dev->tbusy)
785 return;
786 cli();
787 }
788 }
789 restore_flags(flags);
790 }
791
792
793
794
795
796
797
798
799 static int dev_ifconf(char *arg)
800 {
801 struct ifconf ifc;
802 struct ifreq ifr;
803 struct device *dev;
804 char *pos;
805 int len;
806 int err;
807
808
809
810
811
812 err=verify_area(VERIFY_WRITE, arg, sizeof(struct ifconf));
813 if(err)
814 return err;
815 memcpy_fromfs(&ifc, arg, sizeof(struct ifconf));
816 len = ifc.ifc_len;
817 pos = ifc.ifc_buf;
818
819
820
821
822
823
824 err=verify_area(VERIFY_WRITE,pos,len);
825 if(err)
826 return err;
827
828
829
830
831
832 for (dev = dev_base; dev != NULL; dev = dev->next)
833 {
834 if(!(dev->flags & IFF_UP))
835 continue;
836 memset(&ifr, 0, sizeof(struct ifreq));
837 strcpy(ifr.ifr_name, dev->name);
838 (*(struct sockaddr_in *) &ifr.ifr_addr).sin_family = dev->family;
839 (*(struct sockaddr_in *) &ifr.ifr_addr).sin_addr.s_addr = dev->pa_addr;
840
841
842
843
844
845 if (len < sizeof(struct ifreq))
846 break;
847
848
849
850
851
852 memcpy_tofs(pos, &ifr, sizeof(struct ifreq));
853 pos += sizeof(struct ifreq);
854 len -= sizeof(struct ifreq);
855 }
856
857
858
859
860
861 ifc.ifc_len = (pos - ifc.ifc_buf);
862 ifc.ifc_req = (struct ifreq *) ifc.ifc_buf;
863 memcpy_tofs(arg, &ifc, sizeof(struct ifconf));
864
865
866
867
868
869 return(pos - arg);
870 }
871
872
873
874
875
876
877
878 static int sprintf_stats(char *buffer, struct device *dev)
879 {
880 struct enet_statistics *stats = (dev->get_stats ? dev->get_stats(dev): NULL);
881 int size;
882
883 if (stats)
884 size = sprintf(buffer, "%6s:%7d %4d %4d %4d %4d %8d %4d %4d %4d %5d %4d\n",
885 dev->name,
886 stats->rx_packets, stats->rx_errors,
887 stats->rx_dropped + stats->rx_missed_errors,
888 stats->rx_fifo_errors,
889 stats->rx_length_errors + stats->rx_over_errors
890 + stats->rx_crc_errors + stats->rx_frame_errors,
891 stats->tx_packets, stats->tx_errors, stats->tx_dropped,
892 stats->tx_fifo_errors, stats->collisions,
893 stats->tx_carrier_errors + stats->tx_aborted_errors
894 + stats->tx_window_errors + stats->tx_heartbeat_errors);
895 else
896 size = sprintf(buffer, "%6s: No statistics available.\n", dev->name);
897
898 return size;
899 }
900
901
902
903
904
905
906 int dev_get_info(char *buffer, char **start, off_t offset, int length, int dummy)
907 {
908 int len=0;
909 off_t begin=0;
910 off_t pos=0;
911 int size;
912
913 struct device *dev;
914
915
916 size = sprintf(buffer, "Inter-| Receive | Transmit\n"
917 " face |packets errs drop fifo frame|packets errs drop fifo colls carrier\n");
918
919 pos+=size;
920 len+=size;
921
922
923 for (dev = dev_base; dev != NULL; dev = dev->next)
924 {
925 size = sprintf_stats(buffer+len, dev);
926 len+=size;
927 pos=begin+len;
928
929 if(pos<offset)
930 {
931 len=0;
932 begin=pos;
933 }
934 if(pos>offset+length)
935 break;
936 }
937
938 *start=buffer+(offset-begin);
939 len-=(offset-begin);
940 if(len>length)
941 len=length;
942 return len;
943 }
944
945
946
947
948
949
950 static inline int bad_mask(unsigned long mask, unsigned long addr)
951 {
952 if (addr & (mask = ~mask))
953 return 1;
954 mask = ntohl(mask);
955 if (mask & (mask+1))
956 return 1;
957 return 0;
958 }
959
960
961
962
963
964
965
966
967 static int dev_ifsioc(void *arg, unsigned int getset)
968 {
969 struct ifreq ifr;
970 struct device *dev;
971 int ret;
972
973
974
975
976
977 int err=verify_area(VERIFY_WRITE, arg, sizeof(struct ifreq));
978 if(err)
979 return err;
980
981 memcpy_fromfs(&ifr, arg, sizeof(struct ifreq));
982
983
984
985
986
987 if ((dev = dev_get(ifr.ifr_name)) == NULL)
988 return(-ENODEV);
989
990 switch(getset)
991 {
992 case SIOCGIFFLAGS:
993 ifr.ifr_flags = dev->flags;
994 goto rarok;
995
996 case SIOCSIFFLAGS:
997 {
998 int old_flags = dev->flags;
999
1000
1001
1002
1003
1004
1005 dev_lock_wait();
1006
1007
1008
1009
1010
1011 dev->flags = (ifr.ifr_flags & (
1012 IFF_BROADCAST | IFF_DEBUG | IFF_LOOPBACK |
1013 IFF_POINTOPOINT | IFF_NOTRAILERS | IFF_RUNNING |
1014 IFF_NOARP | IFF_PROMISC | IFF_ALLMULTI | IFF_SLAVE | IFF_MASTER
1015 | IFF_MULTICAST)) | (dev->flags & IFF_UP);
1016
1017
1018
1019
1020 dev_mc_upload(dev);
1021
1022
1023
1024
1025
1026
1027
1028 if ((old_flags^ifr.ifr_flags)&IFF_UP)
1029 {
1030 if(old_flags&IFF_UP)
1031 ret=dev_close(dev);
1032 else
1033 {
1034 ret=dev_open(dev);
1035 if(ret<0)
1036 dev->flags&=~IFF_UP;
1037 }
1038 }
1039 else
1040 ret=0;
1041
1042
1043
1044
1045 dev_mc_upload(dev);
1046 }
1047 break;
1048
1049 case SIOCGIFADDR:
1050 if(ifr.ifr_addr.sa_family==AF_UNSPEC)
1051 {
1052 memcpy(ifr.ifr_hwaddr.sa_data,dev->dev_addr, MAX_ADDR_LEN);
1053 ifr.ifr_hwaddr.sa_family=dev->type;
1054 goto rarok;
1055 }
1056 else
1057 {
1058 (*(struct sockaddr_in *)
1059 &ifr.ifr_addr).sin_addr.s_addr = dev->pa_addr;
1060 (*(struct sockaddr_in *)
1061 &ifr.ifr_addr).sin_family = dev->family;
1062 (*(struct sockaddr_in *)
1063 &ifr.ifr_addr).sin_port = 0;
1064 }
1065 goto rarok;
1066
1067 case SIOCSIFADDR:
1068
1069
1070
1071
1072
1073
1074 if(ifr.ifr_addr.sa_family==AF_UNSPEC)
1075 {
1076 if(dev->set_mac_address==NULL)
1077 return -EOPNOTSUPP;
1078 ret=dev->set_mac_address(dev,&ifr.ifr_addr);
1079 }
1080 else
1081 {
1082 dev->pa_addr = (*(struct sockaddr_in *)
1083 &ifr.ifr_addr).sin_addr.s_addr;
1084 dev->family = ifr.ifr_addr.sa_family;
1085
1086 #ifdef CONFIG_INET
1087
1088
1089 dev->pa_mask = ip_get_mask(dev->pa_addr);
1090 #endif
1091 dev->pa_brdaddr = dev->pa_addr | ~dev->pa_mask;
1092 ret = 0;
1093 }
1094 break;
1095
1096 case SIOCGIFBRDADDR:
1097 (*(struct sockaddr_in *)
1098 &ifr.ifr_broadaddr).sin_addr.s_addr = dev->pa_brdaddr;
1099 (*(struct sockaddr_in *)
1100 &ifr.ifr_broadaddr).sin_family = dev->family;
1101 (*(struct sockaddr_in *)
1102 &ifr.ifr_broadaddr).sin_port = 0;
1103 goto rarok;
1104
1105 case SIOCSIFBRDADDR:
1106 dev->pa_brdaddr = (*(struct sockaddr_in *)
1107 &ifr.ifr_broadaddr).sin_addr.s_addr;
1108 ret = 0;
1109 break;
1110
1111 case SIOCGIFDSTADDR:
1112 (*(struct sockaddr_in *)
1113 &ifr.ifr_dstaddr).sin_addr.s_addr = dev->pa_dstaddr;
1114 (*(struct sockaddr_in *)
1115 &ifr.ifr_dstaddr).sin_family = dev->family;
1116 (*(struct sockaddr_in *)
1117 &ifr.ifr_dstaddr).sin_port = 0;
1118 goto rarok;
1119
1120 case SIOCSIFDSTADDR:
1121 dev->pa_dstaddr = (*(struct sockaddr_in *)
1122 &ifr.ifr_dstaddr).sin_addr.s_addr;
1123 ret = 0;
1124 break;
1125
1126 case SIOCGIFNETMASK:
1127 (*(struct sockaddr_in *)
1128 &ifr.ifr_netmask).sin_addr.s_addr = dev->pa_mask;
1129 (*(struct sockaddr_in *)
1130 &ifr.ifr_netmask).sin_family = dev->family;
1131 (*(struct sockaddr_in *)
1132 &ifr.ifr_netmask).sin_port = 0;
1133 goto rarok;
1134
1135 case SIOCSIFNETMASK:
1136 {
1137 unsigned long mask = (*(struct sockaddr_in *)
1138 &ifr.ifr_netmask).sin_addr.s_addr;
1139 ret = -EINVAL;
1140
1141
1142
1143 if (bad_mask(mask,0))
1144 break;
1145 dev->pa_mask = mask;
1146 ret = 0;
1147 }
1148 break;
1149
1150 case SIOCGIFMETRIC:
1151
1152 ifr.ifr_metric = dev->metric;
1153 goto rarok;
1154
1155 case SIOCSIFMETRIC:
1156 dev->metric = ifr.ifr_metric;
1157 ret=0;
1158 break;
1159
1160 case SIOCGIFMTU:
1161 ifr.ifr_mtu = dev->mtu;
1162 goto rarok;
1163
1164 case SIOCSIFMTU:
1165
1166
1167
1168
1169
1170 if(ifr.ifr_mtu<68)
1171 return -EINVAL;
1172 dev->mtu = ifr.ifr_mtu;
1173 ret = 0;
1174 break;
1175
1176 case SIOCGIFMEM:
1177
1178 ret = -EINVAL;
1179 break;
1180
1181 case SIOCSIFMEM:
1182 ret = -EINVAL;
1183 break;
1184
1185 case SIOCGIFHWADDR:
1186 memcpy(ifr.ifr_hwaddr.sa_data,dev->dev_addr, MAX_ADDR_LEN);
1187 ifr.ifr_hwaddr.sa_family=dev->type;
1188 goto rarok;
1189
1190 case SIOCSIFHWADDR:
1191 if(dev->set_mac_address==NULL)
1192 return -EOPNOTSUPP;
1193 if(ifr.ifr_hwaddr.sa_family!=dev->type)
1194 return -EINVAL;
1195 ret=dev->set_mac_address(dev,&ifr.ifr_hwaddr);
1196 break;
1197
1198 case SIOCGIFMAP:
1199 ifr.ifr_map.mem_start=dev->mem_start;
1200 ifr.ifr_map.mem_end=dev->mem_end;
1201 ifr.ifr_map.base_addr=dev->base_addr;
1202 ifr.ifr_map.irq=dev->irq;
1203 ifr.ifr_map.dma=dev->dma;
1204 ifr.ifr_map.port=dev->if_port;
1205 goto rarok;
1206
1207 case SIOCSIFMAP:
1208 if(dev->set_config==NULL)
1209 return -EOPNOTSUPP;
1210 return dev->set_config(dev,&ifr.ifr_map);
1211
1212 case SIOCADDMULTI:
1213 if(dev->set_multicast_list==NULL)
1214 return -EINVAL;
1215 if(ifr.ifr_hwaddr.sa_family!=AF_UNSPEC)
1216 return -EINVAL;
1217 dev_mc_add(dev,ifr.ifr_hwaddr.sa_data, dev->addr_len, 1);
1218 return 0;
1219
1220 case SIOCDELMULTI:
1221 if(dev->set_multicast_list==NULL)
1222 return -EINVAL;
1223 if(ifr.ifr_hwaddr.sa_family!=AF_UNSPEC)
1224 return -EINVAL;
1225 dev_mc_delete(dev,ifr.ifr_hwaddr.sa_data,dev->addr_len, 1);
1226 return 0;
1227
1228
1229
1230
1231 default:
1232 if((getset >= SIOCDEVPRIVATE) &&
1233 (getset <= (SIOCDEVPRIVATE + 15))) {
1234 if(dev->do_ioctl==NULL)
1235 return -EOPNOTSUPP;
1236 ret=dev->do_ioctl(dev, &ifr, getset);
1237 memcpy_tofs(arg,&ifr,sizeof(struct ifreq));
1238 break;
1239 }
1240
1241 ret = -EINVAL;
1242 }
1243 return(ret);
1244
1245
1246
1247 rarok:
1248 memcpy_tofs(arg, &ifr, sizeof(struct ifreq));
1249 return 0;
1250 }
1251
1252
1253
1254
1255
1256
1257
1258 int dev_ioctl(unsigned int cmd, void *arg)
1259 {
1260 switch(cmd)
1261 {
1262 case SIOCGIFCONF:
1263 (void) dev_ifconf((char *) arg);
1264 return 0;
1265
1266
1267
1268
1269
1270 case SIOCGIFFLAGS:
1271 case SIOCGIFADDR:
1272 case SIOCGIFDSTADDR:
1273 case SIOCGIFBRDADDR:
1274 case SIOCGIFNETMASK:
1275 case SIOCGIFMETRIC:
1276 case SIOCGIFMTU:
1277 case SIOCGIFMEM:
1278 case SIOCGIFHWADDR:
1279 case SIOCSIFHWADDR:
1280 case SIOCGIFSLAVE:
1281 case SIOCGIFMAP:
1282 return dev_ifsioc(arg, cmd);
1283
1284
1285
1286
1287
1288 case SIOCSIFFLAGS:
1289 case SIOCSIFADDR:
1290 case SIOCSIFDSTADDR:
1291 case SIOCSIFBRDADDR:
1292 case SIOCSIFNETMASK:
1293 case SIOCSIFMETRIC:
1294 case SIOCSIFMTU:
1295 case SIOCSIFMEM:
1296 case SIOCSIFMAP:
1297 case SIOCSIFSLAVE:
1298 case SIOCADDMULTI:
1299 case SIOCDELMULTI:
1300 if (!suser())
1301 return -EPERM;
1302 return dev_ifsioc(arg, cmd);
1303
1304 case SIOCSIFLINK:
1305 return -EINVAL;
1306
1307
1308
1309
1310
1311 default:
1312 if((cmd >= SIOCDEVPRIVATE) &&
1313 (cmd <= (SIOCDEVPRIVATE + 15))) {
1314 return dev_ifsioc(arg, cmd);
1315 }
1316 return -EINVAL;
1317 }
1318 }
1319
1320
1321
1322
1323
1324
1325
1326
1327 extern int lance_init(void);
1328 extern int pi_init(void);
1329 extern int dec21040_init(void);
1330
1331 int net_dev_init(void)
1332 {
1333 struct device *dev, **dp;
1334
1335
1336
1337
1338
1339
1340 #if defined(CONFIG_LANCE)
1341 lance_init();
1342 #endif
1343 #if defined(CONFIG_PI)
1344 pi_init();
1345 #endif
1346 #if defined(CONFIG_DEC_ELCP)
1347 dec21040_init();
1348 #endif
1349
1350
1351
1352
1353
1354
1355
1356
1357 dp = &dev_base;
1358 while ((dev = *dp) != NULL)
1359 {
1360 int i;
1361 for (i = 0; i < DEV_NUMBUFFS; i++) {
1362 skb_queue_head_init(dev->buffs + i);
1363 }
1364
1365 if (dev->init && dev->init(dev))
1366 {
1367
1368
1369
1370 *dp = dev->next;
1371 }
1372 else
1373 {
1374 dp = &dev->next;
1375 }
1376 }
1377
1378 proc_net_register(&(struct proc_dir_entry) {
1379 PROC_NET_DEV, 3, "dev",
1380 S_IFREG | S_IRUGO, 1, 0, 0,
1381 0, &proc_net_inode_operations,
1382 dev_get_info
1383 });
1384
1385 bh_base[NET_BH].routine = net_bh;
1386 enable_bh(NET_BH);
1387 return 0;
1388 }