This source file includes following definitions.
- min
- dev_add_pack
- dev_remove_pack
- dev_get
- dev_load
- dev_open
- dev_close
- register_netdevice_notifier
- unregister_netdevice_notifier
- dev_queue_xmit
- netif_rx
- dev_transmit
- in_net_bh
- net_bh
- dev_tint
- dev_ifconf
- sprintf_stats
- dev_get_info
- bad_mask
- dev_ifsioc
- dev_ioctl
- net_dev_init
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49 #include <asm/segment.h>
50 #include <asm/system.h>
51 #include <asm/bitops.h>
52 #include <linux/config.h>
53 #include <linux/types.h>
54 #include <linux/kernel.h>
55 #include <linux/sched.h>
56 #include <linux/string.h>
57 #include <linux/mm.h>
58 #include <linux/socket.h>
59 #include <linux/sockios.h>
60 #include <linux/in.h>
61 #include <linux/errno.h>
62 #include <linux/interrupt.h>
63 #include <linux/if_ether.h>
64 #include <linux/inet.h>
65 #include <linux/netdevice.h>
66 #include <linux/etherdevice.h>
67 #include <linux/notifier.h>
68 #include <net/ip.h>
69 #include <net/route.h>
70 #include <linux/skbuff.h>
71 #include <net/sock.h>
72 #include <net/arp.h>
73 #include <net/slhc.h>
74 #include <linux/proc_fs.h>
75 #include <linux/stat.h>
76 #ifdef CONFIG_NET_ALIAS
77 #include <linux/net_alias.h>
78 #endif
79 #ifdef CONFIG_KERNELD
80 #include <linux/kerneld.h>
81 #endif
82
83
84
85
86
87
88 struct packet_type *ptype_base[16];
89 struct packet_type *ptype_all = NULL;
90
91
92
93
94
95 int dev_lockct=0;
96
97
98
99
100
101 struct notifier_block *netdev_chain=NULL;
102
103
104
105
106
107
108 static struct sk_buff_head backlog;
109
110
111
112
113
114 static int backlog_size = 0;
115
116
117
118
119
120 static __inline__ unsigned long min(unsigned long a, unsigned long b)
121 {
122 return (a < b)? a : b;
123 }
124
125
126
127
128
129
130
131
132
133
134
135
136 static int dev_nit=0;
137
138
139
140
141
142
143
144 void dev_add_pack(struct packet_type *pt)
145 {
146 int hash;
147 if(pt->type==htons(ETH_P_ALL))
148 {
149 dev_nit++;
150 pt->next=ptype_all;
151 ptype_all=pt;
152 }
153 else
154 {
155 hash=ntohs(pt->type)&15;
156 pt->next = ptype_base[hash];
157 ptype_base[hash] = pt;
158 }
159 }
160
161
162
163
164
165
166 void dev_remove_pack(struct packet_type *pt)
167 {
168 struct packet_type **pt1;
169 if(pt->type==htons(ETH_P_ALL))
170 {
171 dev_nit--;
172 pt1=&ptype_all;
173 }
174 else
175 pt1=&ptype_base[ntohs(pt->type)&15];
176 for(; (*pt1)!=NULL; pt1=&((*pt1)->next))
177 {
178 if(pt==(*pt1))
179 {
180 *pt1=pt->next;
181 return;
182 }
183 }
184 printk("dev_remove_pack: %p not found.\n", pt);
185 }
186
187
188
189
190
191
192
193
194
195
196
197 struct device *dev_get(const char *name)
198 {
199 struct device *dev;
200
201 for (dev = dev_base; dev != NULL; dev = dev->next)
202 {
203 if (strcmp(dev->name, name) == 0)
204 return(dev);
205 }
206 return NULL;
207 }
208
209
210
211
212
213 #ifdef CONFIG_KERNELD
214
215 extern __inline__ void dev_load(const char *name)
216 {
217 if(!dev_get(name))
218 request_module(name);
219 }
220
221 #endif
222
223
224
225
226
227 int dev_open(struct device *dev)
228 {
229 int ret = 0;
230
231
232
233
234 if (dev->open)
235 ret = dev->open(dev);
236
237
238
239
240
241 if (ret == 0)
242 {
243 dev->flags |= (IFF_UP | IFF_RUNNING);
244
245
246
247 dev_mc_upload(dev);
248 notifier_call_chain(&netdev_chain, NETDEV_UP, dev);
249 }
250 return(ret);
251 }
252
253
254
255
256
257
258 int dev_close(struct device *dev)
259 {
260 int ct=0;
261
262
263
264
265
266
267 if ((dev->flags & IFF_UP) && dev->stop)
268 dev->stop(dev);
269
270
271
272
273
274 dev->flags&=~(IFF_UP|IFF_RUNNING);
275
276
277
278
279 notifier_call_chain(&netdev_chain, NETDEV_DOWN, dev);
280
281
282
283 dev_mc_discard(dev);
284
285
286
287 dev->pa_addr = 0;
288 dev->pa_dstaddr = 0;
289 dev->pa_brdaddr = 0;
290 dev->pa_mask = 0;
291
292
293
294 while(ct<DEV_NUMBUFFS)
295 {
296 struct sk_buff *skb;
297 while((skb=skb_dequeue(&dev->buffs[ct]))!=NULL)
298 if(skb->free)
299 kfree_skb(skb,FREE_WRITE);
300 ct++;
301 }
302 return(0);
303 }
304
305
306
307
308
309
310
311 int register_netdevice_notifier(struct notifier_block *nb)
312 {
313 return notifier_chain_register(&netdev_chain, nb);
314 }
315
316 int unregister_netdevice_notifier(struct notifier_block *nb)
317 {
318 return notifier_chain_unregister(&netdev_chain,nb);
319 }
320
321
322
323
324
325
326
327
328
329 void dev_queue_xmit(struct sk_buff *skb, struct device *dev, int pri)
330 {
331 unsigned long flags;
332 struct sk_buff_head *list;
333 int retransmission = 0;
334
335
336
337 if(pri>=0 && !skb_device_locked(skb))
338 skb_device_lock(skb);
339 #if CONFIG_SKB_CHECK
340 IS_SKB(skb);
341 #endif
342 skb->dev = dev;
343
344
345
346
347
348
349
350 if (pri < 0)
351 {
352 pri = -pri-1;
353 retransmission = 1;
354 }
355
356 #ifdef CONFIG_NET_DEBUG
357 if (pri >= DEV_NUMBUFFS)
358 {
359 printk("bad priority in dev_queue_xmit.\n");
360 pri = 1;
361 }
362 #endif
363
364
365
366
367
368
369 if (!skb->arp && dev->rebuild_header(skb->data, dev, skb->raddr, skb)) {
370 return;
371 }
372
373
374
375
376
377
378
379
380
381 #ifdef CONFIG_NET_ALIAS
382 if (net_alias_is(dev))
383 skb->dev = dev = net_alias_main_dev(dev);
384 #endif
385 list = dev->buffs + pri;
386
387 save_flags(flags);
388
389 if (!retransmission) {
390 if (skb_queue_len(list)) {
391
392 if (skb_queue_len(list) > dev->tx_queue_len) {
393 dev_kfree_skb(skb, FREE_WRITE);
394 return;
395 }
396 cli();
397 skb_device_unlock(skb);
398 __skb_queue_tail(list, skb);
399 skb = __skb_dequeue(list);
400 skb_device_lock(skb);
401 restore_flags(flags);
402 }
403
404
405 if (dev_nit) {
406 struct packet_type *ptype;
407 skb->stamp=xtime;
408 for (ptype = ptype_all; ptype!=NULL; ptype = ptype->next)
409 {
410
411
412
413 if ((ptype->dev == dev || !ptype->dev) &&
414 ((struct sock *)ptype->data != skb->sk))
415 {
416 struct sk_buff *skb2;
417 if ((skb2 = skb_clone(skb, GFP_ATOMIC)) == NULL)
418 break;
419 skb2->h.raw = skb2->data + dev->hard_header_len;
420 skb2->mac.raw = skb2->data;
421 ptype->func(skb2, skb->dev, ptype);
422 }
423 }
424 }
425 }
426 start_bh_atomic();
427 if (dev->hard_start_xmit(skb, dev) == 0) {
428
429
430
431 end_bh_atomic();
432 return;
433 }
434 end_bh_atomic();
435
436
437
438
439
440 cli();
441 skb_device_unlock(skb);
442 __skb_queue_head(list,skb);
443 restore_flags(flags);
444 }
445
446
447
448
449
450
451
452 void netif_rx(struct sk_buff *skb)
453 {
454 static int dropping = 0;
455
456
457
458
459
460
461
462 skb->sk = NULL;
463 skb->free = 1;
464 if(skb->stamp.tv_sec==0)
465 skb->stamp = xtime;
466
467
468
469
470
471 if (!backlog_size)
472 dropping = 0;
473 else if (backlog_size > 300)
474 dropping = 1;
475
476 if (dropping)
477 {
478 kfree_skb(skb, FREE_READ);
479 return;
480 }
481
482
483
484
485 #if CONFIG_SKB_CHECK
486 IS_SKB(skb);
487 #endif
488 skb_queue_tail(&backlog,skb);
489 backlog_size++;
490
491
492
493
494
495
496 #ifdef CONFIG_NET_RUNONIRQ
497 net_bh();
498 #else
499 mark_bh(NET_BH);
500 #endif
501 return;
502 }
503
504
505
506
507
508 void dev_transmit(void)
509 {
510 struct device *dev;
511
512 for (dev = dev_base; dev != NULL; dev = dev->next)
513 {
514 if (dev->flags != 0 && !dev->tbusy) {
515
516
517
518 dev_tint(dev);
519 }
520 }
521 }
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536 volatile unsigned long in_bh = 0;
537
538 int in_net_bh()
539 {
540 return(in_bh==0?0:1);
541 }
542
543
544
545
546
547
548
549
550
551 void net_bh(void *tmp)
552 {
553 struct sk_buff *skb;
554 struct packet_type *ptype;
555 struct packet_type *pt_prev;
556 unsigned short type;
557
558
559
560
561
562 if (set_bit(1, (void*)&in_bh))
563 return;
564
565
566
567
568
569
570
571
572 dev_transmit();
573
574
575
576
577
578
579
580 cli();
581
582
583
584
585
586 while((skb=__skb_dequeue(&backlog))!=NULL)
587 {
588
589
590
591 backlog_size--;
592
593 sti();
594
595
596
597
598
599
600
601
602 skb->h.raw = skb->data;
603
604
605
606
607
608 type = skb->protocol;
609
610
611
612
613
614
615 pt_prev = NULL;
616 for (ptype = ptype_all; ptype!=NULL; ptype=ptype->next)
617 {
618 if(pt_prev)
619 {
620 struct sk_buff *skb2=skb_clone(skb, GFP_ATOMIC);
621 if(skb2)
622 pt_prev->func(skb2,skb->dev, pt_prev);
623 }
624 pt_prev=ptype;
625 }
626
627 for (ptype = ptype_base[ntohs(type)&15]; ptype != NULL; ptype = ptype->next)
628 {
629 if (ptype->type == type && (!ptype->dev || ptype->dev==skb->dev))
630 {
631
632
633
634
635 if(pt_prev)
636 {
637 struct sk_buff *skb2;
638
639 skb2=skb_clone(skb, GFP_ATOMIC);
640
641
642
643
644
645
646 if(skb2)
647 pt_prev->func(skb2, skb->dev, pt_prev);
648 }
649
650 pt_prev=ptype;
651 }
652 }
653
654
655
656
657
658 if(pt_prev)
659 pt_prev->func(skb, skb->dev, pt_prev);
660
661
662
663
664 else
665 kfree_skb(skb, FREE_WRITE);
666
667
668
669
670
671
672 #ifdef XMIT_EVERY
673 dev_transmit();
674 #endif
675 cli();
676 }
677
678
679
680
681
682 in_bh = 0;
683 sti();
684
685
686
687
688
689 #ifdef XMIT_AFTER
690 dev_transmit();
691 #endif
692 }
693
694
695
696
697
698
699
700 void dev_tint(struct device *dev)
701 {
702 int i;
703 unsigned long flags;
704 struct sk_buff_head * head;
705
706
707
708
709
710 #ifdef CONFIG_NET_ALIAS
711 if (net_alias_is(dev)) return;
712 #endif
713 head = dev->buffs;
714 save_flags(flags);
715 cli();
716
717
718
719
720 for(i = 0;i < DEV_NUMBUFFS; i++,head++)
721 {
722 struct sk_buff *skb = skb_peek(head);
723
724 if (skb) {
725 __skb_unlink(skb, head);
726
727
728
729 skb_device_lock(skb);
730 restore_flags(flags);
731
732
733
734
735 dev_queue_xmit(skb,dev,-i - 1);
736
737
738
739 if (dev->tbusy)
740 return;
741 cli();
742 }
743 }
744 restore_flags(flags);
745 }
746
747
748
749
750
751
752
753
754 static int dev_ifconf(char *arg)
755 {
756 struct ifconf ifc;
757 struct ifreq ifr;
758 struct device *dev;
759 char *pos;
760 int len;
761 int err;
762
763
764
765
766
767 err=verify_area(VERIFY_WRITE, arg, sizeof(struct ifconf));
768 if(err)
769 return err;
770 memcpy_fromfs(&ifc, arg, sizeof(struct ifconf));
771 len = ifc.ifc_len;
772 pos = ifc.ifc_buf;
773
774
775
776
777
778
779 err=verify_area(VERIFY_WRITE,pos,len);
780 if(err)
781 return err;
782
783
784
785
786
787 for (dev = dev_base; dev != NULL; dev = dev->next)
788 {
789 if(!(dev->flags & IFF_UP))
790 continue;
791 memset(&ifr, 0, sizeof(struct ifreq));
792 strcpy(ifr.ifr_name, dev->name);
793 (*(struct sockaddr_in *) &ifr.ifr_addr).sin_family = dev->family;
794 (*(struct sockaddr_in *) &ifr.ifr_addr).sin_addr.s_addr = dev->pa_addr;
795
796
797
798
799
800 if (len < sizeof(struct ifreq))
801 break;
802
803
804
805
806
807 memcpy_tofs(pos, &ifr, sizeof(struct ifreq));
808 pos += sizeof(struct ifreq);
809 len -= sizeof(struct ifreq);
810 }
811
812
813
814
815
816 ifc.ifc_len = (pos - ifc.ifc_buf);
817 ifc.ifc_req = (struct ifreq *) ifc.ifc_buf;
818 memcpy_tofs(arg, &ifc, sizeof(struct ifconf));
819
820
821
822
823
824 return(pos - arg);
825 }
826
827
828
829
830
831
832
833 #ifdef CONFIG_PROC_FS
834 static int sprintf_stats(char *buffer, struct device *dev)
835 {
836 struct enet_statistics *stats = (dev->get_stats ? dev->get_stats(dev): NULL);
837 int size;
838
839 if (stats)
840 size = sprintf(buffer, "%6s:%7d %4d %4d %4d %4d %8d %4d %4d %4d %5d %4d\n",
841 dev->name,
842 stats->rx_packets, stats->rx_errors,
843 stats->rx_dropped + stats->rx_missed_errors,
844 stats->rx_fifo_errors,
845 stats->rx_length_errors + stats->rx_over_errors
846 + stats->rx_crc_errors + stats->rx_frame_errors,
847 stats->tx_packets, stats->tx_errors, stats->tx_dropped,
848 stats->tx_fifo_errors, stats->collisions,
849 stats->tx_carrier_errors + stats->tx_aborted_errors
850 + stats->tx_window_errors + stats->tx_heartbeat_errors);
851 else
852 size = sprintf(buffer, "%6s: No statistics available.\n", dev->name);
853
854 return size;
855 }
856
857
858
859
860
861
862 int dev_get_info(char *buffer, char **start, off_t offset, int length, int dummy)
863 {
864 int len=0;
865 off_t begin=0;
866 off_t pos=0;
867 int size;
868
869 struct device *dev;
870
871
872 size = sprintf(buffer, "Inter-| Receive | Transmit\n"
873 " face |packets errs drop fifo frame|packets errs drop fifo colls carrier\n");
874
875 pos+=size;
876 len+=size;
877
878
879 for (dev = dev_base; dev != NULL; dev = dev->next)
880 {
881 size = sprintf_stats(buffer+len, dev);
882 len+=size;
883 pos=begin+len;
884
885 if(pos<offset)
886 {
887 len=0;
888 begin=pos;
889 }
890 if(pos>offset+length)
891 break;
892 }
893
894 *start=buffer+(offset-begin);
895 len-=(offset-begin);
896 if(len>length)
897 len=length;
898 return len;
899 }
900 #endif
901
902
903
904
905
906
907 static inline int bad_mask(unsigned long mask, unsigned long addr)
908 {
909 if (addr & (mask = ~mask))
910 return 1;
911 mask = ntohl(mask);
912 if (mask & (mask+1))
913 return 1;
914 return 0;
915 }
916
917
918
919
920
921
922
923
924 static int dev_ifsioc(void *arg, unsigned int getset)
925 {
926 struct ifreq ifr;
927 struct device *dev;
928 int ret;
929
930
931
932
933
934 int err=verify_area(VERIFY_WRITE, arg, sizeof(struct ifreq));
935 if(err)
936 return err;
937
938 memcpy_fromfs(&ifr, arg, sizeof(struct ifreq));
939
940
941
942
943
944
945
946
947
948
949
950
951 #ifdef CONFIG_KERNELD
952 dev_load(ifr.ifr_name);
953 #endif
954
955 #ifdef CONFIG_NET_ALIAS
956 if ((dev = net_alias_dev_get(ifr.ifr_name, getset == SIOCSIFADDR, &err, NULL, NULL)) == NULL)
957 return(err);
958 #else
959 if ((dev = dev_get(ifr.ifr_name)) == NULL)
960 return(-ENODEV);
961 #endif
962 switch(getset)
963 {
964 case SIOCGIFFLAGS:
965 ifr.ifr_flags = dev->flags;
966 goto rarok;
967
968 case SIOCSIFFLAGS:
969 {
970 int old_flags = dev->flags;
971
972
973
974
975
976
977 dev_lock_wait();
978
979
980
981
982
983 dev->flags = (ifr.ifr_flags & (
984 IFF_BROADCAST | IFF_DEBUG | IFF_LOOPBACK |
985 IFF_POINTOPOINT | IFF_NOTRAILERS | IFF_RUNNING |
986 IFF_NOARP | IFF_PROMISC | IFF_ALLMULTI | IFF_SLAVE | IFF_MASTER
987 | IFF_MULTICAST)) | (dev->flags & IFF_UP);
988
989
990
991
992 dev_mc_upload(dev);
993
994
995
996
997
998
999
1000 if ((old_flags^ifr.ifr_flags)&IFF_UP)
1001 {
1002 if(old_flags&IFF_UP)
1003 ret=dev_close(dev);
1004 else
1005 {
1006 ret=dev_open(dev);
1007 if(ret<0)
1008 dev->flags&=~IFF_UP;
1009 }
1010 }
1011 else
1012 ret=0;
1013
1014
1015
1016
1017 dev_mc_upload(dev);
1018 }
1019 break;
1020
1021 case SIOCGIFADDR:
1022 if(ifr.ifr_addr.sa_family==AF_UNSPEC)
1023 {
1024 memcpy(ifr.ifr_hwaddr.sa_data,dev->dev_addr, MAX_ADDR_LEN);
1025 ifr.ifr_hwaddr.sa_family=dev->type;
1026 goto rarok;
1027 }
1028 else
1029 {
1030 (*(struct sockaddr_in *)
1031 &ifr.ifr_addr).sin_addr.s_addr = dev->pa_addr;
1032 (*(struct sockaddr_in *)
1033 &ifr.ifr_addr).sin_family = dev->family;
1034 (*(struct sockaddr_in *)
1035 &ifr.ifr_addr).sin_port = 0;
1036 }
1037 goto rarok;
1038
1039 case SIOCSIFADDR:
1040
1041
1042
1043
1044
1045
1046 if(ifr.ifr_addr.sa_family==AF_UNSPEC)
1047 {
1048 if(dev->set_mac_address==NULL)
1049 return -EOPNOTSUPP;
1050 ret=dev->set_mac_address(dev,&ifr.ifr_addr);
1051 }
1052 else
1053 {
1054
1055
1056
1057
1058
1059
1060 #ifdef CONFIG_NET_ALIAS
1061 if (net_alias_is(dev))
1062 net_alias_dev_rehash(dev ,&ifr.ifr_addr);
1063 #endif
1064 dev->pa_addr = (*(struct sockaddr_in *)
1065 &ifr.ifr_addr).sin_addr.s_addr;
1066 dev->family = ifr.ifr_addr.sa_family;
1067
1068 #ifdef CONFIG_INET
1069
1070
1071 dev->pa_mask = ip_get_mask(dev->pa_addr);
1072 #endif
1073 dev->pa_brdaddr = dev->pa_addr | ~dev->pa_mask;
1074 ret = 0;
1075 }
1076 break;
1077
1078 case SIOCGIFBRDADDR:
1079 (*(struct sockaddr_in *)
1080 &ifr.ifr_broadaddr).sin_addr.s_addr = dev->pa_brdaddr;
1081 (*(struct sockaddr_in *)
1082 &ifr.ifr_broadaddr).sin_family = dev->family;
1083 (*(struct sockaddr_in *)
1084 &ifr.ifr_broadaddr).sin_port = 0;
1085 goto rarok;
1086
1087 case SIOCSIFBRDADDR:
1088 dev->pa_brdaddr = (*(struct sockaddr_in *)
1089 &ifr.ifr_broadaddr).sin_addr.s_addr;
1090 ret = 0;
1091 break;
1092
1093 case SIOCGIFDSTADDR:
1094 (*(struct sockaddr_in *)
1095 &ifr.ifr_dstaddr).sin_addr.s_addr = dev->pa_dstaddr;
1096 (*(struct sockaddr_in *)
1097 &ifr.ifr_dstaddr).sin_family = dev->family;
1098 (*(struct sockaddr_in *)
1099 &ifr.ifr_dstaddr).sin_port = 0;
1100 goto rarok;
1101
1102 case SIOCSIFDSTADDR:
1103 dev->pa_dstaddr = (*(struct sockaddr_in *)
1104 &ifr.ifr_dstaddr).sin_addr.s_addr;
1105 ret = 0;
1106 break;
1107
1108 case SIOCGIFNETMASK:
1109 (*(struct sockaddr_in *)
1110 &ifr.ifr_netmask).sin_addr.s_addr = dev->pa_mask;
1111 (*(struct sockaddr_in *)
1112 &ifr.ifr_netmask).sin_family = dev->family;
1113 (*(struct sockaddr_in *)
1114 &ifr.ifr_netmask).sin_port = 0;
1115 goto rarok;
1116
1117 case SIOCSIFNETMASK:
1118 {
1119 unsigned long mask = (*(struct sockaddr_in *)
1120 &ifr.ifr_netmask).sin_addr.s_addr;
1121 ret = -EINVAL;
1122
1123
1124
1125 if (bad_mask(mask,0))
1126 break;
1127 dev->pa_mask = mask;
1128 ret = 0;
1129 }
1130 break;
1131
1132 case SIOCGIFMETRIC:
1133
1134 ifr.ifr_metric = dev->metric;
1135 goto rarok;
1136
1137 case SIOCSIFMETRIC:
1138 dev->metric = ifr.ifr_metric;
1139 ret=0;
1140 break;
1141
1142 case SIOCGIFMTU:
1143 ifr.ifr_mtu = dev->mtu;
1144 goto rarok;
1145
1146 case SIOCSIFMTU:
1147
1148
1149
1150
1151
1152 if(ifr.ifr_mtu<68)
1153 return -EINVAL;
1154 dev->mtu = ifr.ifr_mtu;
1155 ret = 0;
1156 break;
1157
1158 case SIOCGIFMEM:
1159
1160 ret = -EINVAL;
1161 break;
1162
1163 case SIOCSIFMEM:
1164 ret = -EINVAL;
1165 break;
1166
1167 case SIOCGIFHWADDR:
1168 memcpy(ifr.ifr_hwaddr.sa_data,dev->dev_addr, MAX_ADDR_LEN);
1169 ifr.ifr_hwaddr.sa_family=dev->type;
1170 goto rarok;
1171
1172 case SIOCSIFHWADDR:
1173 if(dev->set_mac_address==NULL)
1174 return -EOPNOTSUPP;
1175 if(ifr.ifr_hwaddr.sa_family!=dev->type)
1176 return -EINVAL;
1177 ret=dev->set_mac_address(dev,&ifr.ifr_hwaddr);
1178 break;
1179
1180 case SIOCGIFMAP:
1181 ifr.ifr_map.mem_start=dev->mem_start;
1182 ifr.ifr_map.mem_end=dev->mem_end;
1183 ifr.ifr_map.base_addr=dev->base_addr;
1184 ifr.ifr_map.irq=dev->irq;
1185 ifr.ifr_map.dma=dev->dma;
1186 ifr.ifr_map.port=dev->if_port;
1187 goto rarok;
1188
1189 case SIOCSIFMAP:
1190 if(dev->set_config==NULL)
1191 return -EOPNOTSUPP;
1192 return dev->set_config(dev,&ifr.ifr_map);
1193
1194 case SIOCADDMULTI:
1195 if(dev->set_multicast_list==NULL)
1196 return -EINVAL;
1197 if(ifr.ifr_hwaddr.sa_family!=AF_UNSPEC)
1198 return -EINVAL;
1199 dev_mc_add(dev,ifr.ifr_hwaddr.sa_data, dev->addr_len, 1);
1200 return 0;
1201
1202 case SIOCDELMULTI:
1203 if(dev->set_multicast_list==NULL)
1204 return -EINVAL;
1205 if(ifr.ifr_hwaddr.sa_family!=AF_UNSPEC)
1206 return -EINVAL;
1207 dev_mc_delete(dev,ifr.ifr_hwaddr.sa_data,dev->addr_len, 1);
1208 return 0;
1209
1210
1211
1212
1213 default:
1214 if((getset >= SIOCDEVPRIVATE) &&
1215 (getset <= (SIOCDEVPRIVATE + 15))) {
1216 if(dev->do_ioctl==NULL)
1217 return -EOPNOTSUPP;
1218 ret=dev->do_ioctl(dev, &ifr, getset);
1219 memcpy_tofs(arg,&ifr,sizeof(struct ifreq));
1220 break;
1221 }
1222
1223 ret = -EINVAL;
1224 }
1225 return(ret);
1226
1227
1228
1229 rarok:
1230 memcpy_tofs(arg, &ifr, sizeof(struct ifreq));
1231 return 0;
1232 }
1233
1234
1235
1236
1237
1238
1239
1240 int dev_ioctl(unsigned int cmd, void *arg)
1241 {
1242 switch(cmd)
1243 {
1244 case SIOCGIFCONF:
1245 (void) dev_ifconf((char *) arg);
1246 return 0;
1247
1248
1249
1250
1251
1252 case SIOCGIFFLAGS:
1253 case SIOCGIFADDR:
1254 case SIOCGIFDSTADDR:
1255 case SIOCGIFBRDADDR:
1256 case SIOCGIFNETMASK:
1257 case SIOCGIFMETRIC:
1258 case SIOCGIFMTU:
1259 case SIOCGIFMEM:
1260 case SIOCGIFHWADDR:
1261 case SIOCSIFHWADDR:
1262 case SIOCGIFSLAVE:
1263 case SIOCGIFMAP:
1264 return dev_ifsioc(arg, cmd);
1265
1266
1267
1268
1269
1270 case SIOCSIFFLAGS:
1271 case SIOCSIFADDR:
1272 case SIOCSIFDSTADDR:
1273 case SIOCSIFBRDADDR:
1274 case SIOCSIFNETMASK:
1275 case SIOCSIFMETRIC:
1276 case SIOCSIFMTU:
1277 case SIOCSIFMEM:
1278 case SIOCSIFMAP:
1279 case SIOCSIFSLAVE:
1280 case SIOCADDMULTI:
1281 case SIOCDELMULTI:
1282 if (!suser())
1283 return -EPERM;
1284 return dev_ifsioc(arg, cmd);
1285
1286 case SIOCSIFLINK:
1287 return -EINVAL;
1288
1289
1290
1291
1292
1293 default:
1294 if((cmd >= SIOCDEVPRIVATE) &&
1295 (cmd <= (SIOCDEVPRIVATE + 15))) {
1296 return dev_ifsioc(arg, cmd);
1297 }
1298 return -EINVAL;
1299 }
1300 }
1301
1302
1303
1304
1305
1306
1307
1308
1309 extern int lance_init(void);
1310 extern int pi_init(void);
1311 extern int dec21040_init(void);
1312
1313 int net_dev_init(void)
1314 {
1315 struct device *dev, **dp;
1316
1317
1318
1319
1320
1321 skb_queue_head_init(&backlog);
1322
1323
1324
1325
1326
1327
1328 #if defined(CONFIG_LANCE)
1329 lance_init();
1330 #endif
1331 #if defined(CONFIG_PI)
1332 pi_init();
1333 #endif
1334 #if defined(CONFIG_PT)
1335 pt_init();
1336 #endif
1337 #if defined(CONFIG_DEC_ELCP)
1338 dec21040_init();
1339 #endif
1340
1341
1342
1343
1344 #if (defined(CONFIG_SLIP_COMPRESSED) || defined(CONFIG_PPP)) && defined(CONFIG_SLHC_BUILTIN)
1345 slhc_install();
1346 #endif
1347
1348
1349
1350
1351
1352
1353
1354
1355 dp = &dev_base;
1356 while ((dev = *dp) != NULL)
1357 {
1358 int i;
1359 for (i = 0; i < DEV_NUMBUFFS; i++) {
1360 skb_queue_head_init(dev->buffs + i);
1361 }
1362
1363 if (dev->init && dev->init(dev))
1364 {
1365
1366
1367
1368 *dp = dev->next;
1369 }
1370 else
1371 {
1372 dp = &dev->next;
1373 }
1374 }
1375
1376 #ifdef CONFIG_PROC_FS
1377 proc_net_register(&(struct proc_dir_entry) {
1378 PROC_NET_DEV, 3, "dev",
1379 S_IFREG | S_IRUGO, 1, 0, 0,
1380 0, &proc_net_inode_operations,
1381 dev_get_info
1382 });
1383 #endif
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393 #ifdef CONFIG_NET_ALIAS
1394 net_alias_init();
1395 #endif
1396
1397 bh_base[NET_BH].routine = net_bh;
1398 enable_bh(NET_BH);
1399 return 0;
1400 }