This source file includes following definitions.
- min
- dev_add_pack
- dev_remove_pack
- dev_get
- dev_open
- dev_close
- register_netdevice_notifier
- unregister_netdevice_notifier
- dev_queue_xmit
- netif_rx
- dev_rint
- dev_transmit
- in_net_bh
- net_bh
- dev_tint
- dev_ifconf
- sprintf_stats
- dev_get_info
- bad_mask
- dev_ifsioc
- dev_ioctl
- net_dev_init
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48 #include <asm/segment.h>
49 #include <asm/system.h>
50 #include <asm/bitops.h>
51 #include <linux/config.h>
52 #include <linux/types.h>
53 #include <linux/kernel.h>
54 #include <linux/sched.h>
55 #include <linux/string.h>
56 #include <linux/mm.h>
57 #include <linux/socket.h>
58 #include <linux/sockios.h>
59 #include <linux/in.h>
60 #include <linux/errno.h>
61 #include <linux/interrupt.h>
62 #include <linux/if_ether.h>
63 #include <linux/inet.h>
64 #include <linux/netdevice.h>
65 #include <linux/etherdevice.h>
66 #include <linux/notifier.h>
67 #include <net/ip.h>
68 #include <net/route.h>
69 #include <linux/skbuff.h>
70 #include <net/sock.h>
71 #include <net/arp.h>
72 #include <linux/proc_fs.h>
73 #include <linux/stat.h>
74 #ifdef CONFIG_NET_ALIAS
75 #include <linux/net_alias.h>
76 #endif
77 #ifdef CONFIG_KERNELD
78 #include <linux/kerneld.h>
79 #endif
80
81
82
83
84
85
86 struct packet_type *ptype_base[16];
87 struct packet_type *ptype_all = NULL;
88
89
90
91
92
93 int dev_lockct=0;
94
95
96
97
98
99 struct notifier_block *netdev_chain=NULL;
100
101
102
103
104
105
106 static struct sk_buff_head backlog =
107 {
108 (struct sk_buff *)&backlog, (struct sk_buff *)&backlog
109 #if CONFIG_SKB_CHECK
110 ,SK_HEAD_SKB
111 #endif
112 };
113
114
115
116
117
118 static int backlog_size = 0;
119
120
121
122
123
124 static __inline__ unsigned long min(unsigned long a, unsigned long b)
125 {
126 return (a < b)? a : b;
127 }
128
129
130
131
132
133
134
135
136
137
138
139
140 static int dev_nit=0;
141
142
143
144
145
146
147
148 void dev_add_pack(struct packet_type *pt)
149 {
150 int hash;
151 if(pt->type==htons(ETH_P_ALL))
152 {
153 dev_nit++;
154 pt->next=ptype_all;
155 ptype_all=pt;
156 }
157 else
158 {
159 hash=ntohs(pt->type)&15;
160 pt->next = ptype_base[hash];
161 ptype_base[hash] = pt;
162 }
163 }
164
165
166
167
168
169
170 void dev_remove_pack(struct packet_type *pt)
171 {
172 struct packet_type **pt1;
173 if(pt->type==htons(ETH_P_ALL))
174 {
175 dev_nit--;
176 pt1=&ptype_all;
177 }
178 else
179 pt1=&ptype_base[ntohs(pt->type)&15];
180 for(; (*pt1)!=NULL; pt1=&((*pt1)->next))
181 {
182 if(pt==(*pt1))
183 {
184 *pt1=pt->next;
185 return;
186 }
187 }
188 printk("dev_remove_pack: %p not found.\n", pt);
189 }
190
191
192
193
194
195
196
197
198
199
200
201 struct device *dev_get(const char *name)
202 {
203 struct device *dev;
204
205 for (dev = dev_base; dev != NULL; dev = dev->next)
206 {
207 if (strcmp(dev->name, name) == 0)
208 return(dev);
209 }
210 #ifdef CONFIG_KERNELD
211 if (request_module(name) == 0)
212 for (dev = dev_base; dev != NULL; dev = dev->next) {
213 if (strcmp(dev->name, name) == 0)
214 return(dev);
215 }
216 #endif
217 return(NULL);
218 }
219
220
221
222
223
224
225 int dev_open(struct device *dev)
226 {
227 int ret = 0;
228
229
230
231
232 if (dev->open)
233 ret = dev->open(dev);
234
235
236
237
238
239 if (ret == 0)
240 {
241 dev->flags |= (IFF_UP | IFF_RUNNING);
242
243
244
245 dev_mc_upload(dev);
246 notifier_call_chain(&netdev_chain, NETDEV_UP, dev);
247 }
248 return(ret);
249 }
250
251
252
253
254
255
256 int dev_close(struct device *dev)
257 {
258 int ct=0;
259
260
261
262
263
264
265 if ((dev->flags & IFF_UP) && dev->stop)
266 dev->stop(dev);
267
268
269
270
271
272 dev->flags&=~(IFF_UP|IFF_RUNNING);
273
274
275
276
277 notifier_call_chain(&netdev_chain, NETDEV_DOWN, dev);
278
279
280
281 dev_mc_discard(dev);
282
283
284
285 dev->pa_addr = 0;
286 dev->pa_dstaddr = 0;
287 dev->pa_brdaddr = 0;
288 dev->pa_mask = 0;
289
290
291
292 while(ct<DEV_NUMBUFFS)
293 {
294 struct sk_buff *skb;
295 while((skb=skb_dequeue(&dev->buffs[ct]))!=NULL)
296 if(skb->free)
297 kfree_skb(skb,FREE_WRITE);
298 ct++;
299 }
300 return(0);
301 }
302
303
304
305
306
307
308
309 int register_netdevice_notifier(struct notifier_block *nb)
310 {
311 return notifier_chain_register(&netdev_chain, nb);
312 }
313
314 int unregister_netdevice_notifier(struct notifier_block *nb)
315 {
316 return notifier_chain_unregister(&netdev_chain,nb);
317 }
318
319
320
321
322
323
324
325
326
327
328
329 void dev_queue_xmit(struct sk_buff *skb, struct device *dev, int pri)
330 {
331 unsigned long flags;
332 struct packet_type *ptype;
333 int where = 0;
334
335
336
337 if(pri>=0 && !skb_device_locked(skb))
338 skb_device_lock(skb);
339 #if CONFIG_SKB_CHECK
340 IS_SKB(skb);
341 #endif
342 skb->dev = dev;
343
344
345
346
347
348
349
350 if (pri < 0)
351 {
352 pri = -pri-1;
353 where = 1;
354 }
355
356 #ifdef CONFIG_NET_DEBUG
357 if (pri >= DEV_NUMBUFFS)
358 {
359 printk("bad priority in dev_queue_xmit.\n");
360 pri = 1;
361 }
362 #endif
363
364
365
366
367
368
369 if (!skb->arp && dev->rebuild_header(skb->data, dev, skb->raddr, skb)) {
370 return;
371 }
372
373
374
375
376
377
378
379
380
381 #ifdef CONFIG_NET_ALIAS
382 if (net_alias_is(dev))
383 skb->dev = dev = net_alias_main_dev(dev);
384 #endif
385
386 save_flags(flags);
387 cli();
388 if (!where)
389
390 {
391 skb_queue_tail(dev->buffs + pri,skb);
392 skb_device_unlock(skb);
393 skb = skb_dequeue(dev->buffs + pri);
394 skb_device_lock(skb);
395 }
396 restore_flags(flags);
397
398
399 if(!where && dev_nit)
400 {
401 skb->stamp=xtime;
402 for (ptype = ptype_all; ptype!=NULL; ptype = ptype->next)
403 {
404
405
406
407 if ((ptype->dev == dev || !ptype->dev) &&
408 ((struct sock *)ptype->data != skb->sk))
409 {
410 struct sk_buff *skb2;
411 if ((skb2 = skb_clone(skb, GFP_ATOMIC)) == NULL)
412 break;
413 skb2->h.raw = skb2->data + dev->hard_header_len;
414 skb2->mac.raw = skb2->data;
415 ptype->func(skb2, skb->dev, ptype);
416 }
417 }
418 }
419 start_bh_atomic();
420 if (dev->hard_start_xmit(skb, dev) == 0) {
421
422
423
424 end_bh_atomic();
425 return;
426 }
427 end_bh_atomic();
428
429
430
431
432
433 cli();
434 skb_device_unlock(skb);
435 skb_queue_head(dev->buffs + pri,skb);
436 restore_flags(flags);
437 }
438
439
440
441
442
443
444
445 void netif_rx(struct sk_buff *skb)
446 {
447 static int dropping = 0;
448
449
450
451
452
453
454 skb->sk = NULL;
455 skb->free = 1;
456 if(skb->stamp.tv_sec==0)
457 skb->stamp = xtime;
458
459
460
461
462
463 if (!backlog_size)
464 dropping = 0;
465 else if (backlog_size > 300)
466 dropping = 1;
467
468 if (dropping)
469 {
470 kfree_skb(skb, FREE_READ);
471 return;
472 }
473
474
475
476
477 #if CONFIG_SKB_CHECK
478 IS_SKB(skb);
479 #endif
480 skb_queue_tail(&backlog,skb);
481 backlog_size++;
482
483
484
485
486
487
488 #ifdef CONFIG_NET_RUNONIRQ
489 net_bh();
490 #else
491 mark_bh(NET_BH);
492 #endif
493 return;
494 }
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511 int dev_rint(unsigned char *buff, long len, int flags, struct device *dev)
512 {
513 static int dropping = 0;
514 struct sk_buff *skb = NULL;
515 unsigned char *to;
516 int amount, left;
517 int len2;
518
519 if (dev == NULL || buff == NULL || len <= 0)
520 return(1);
521
522 if (flags & IN_SKBUFF)
523 {
524 skb = (struct sk_buff *) buff;
525 }
526 else
527 {
528 if (dropping)
529 {
530 if (skb_peek(&backlog) != NULL)
531 return(1);
532 printk("INET: dev_rint: no longer dropping packets.\n");
533 dropping = 0;
534 }
535
536 skb = alloc_skb(len, GFP_ATOMIC);
537 if (skb == NULL)
538 {
539 printk("dev_rint: packet dropped on %s (no memory) !\n",
540 dev->name);
541 dropping = 1;
542 return(1);
543 }
544
545
546
547
548
549
550 to = skb_put(skb,len);
551 left = len;
552
553 len2 = len;
554 while (len2 > 0)
555 {
556 amount = min(len2, (unsigned long) dev->rmem_end -
557 (unsigned long) buff);
558 memcpy(to, buff, amount);
559 len2 -= amount;
560 left -= amount;
561 buff += amount;
562 to += amount;
563 if ((unsigned long) buff == dev->rmem_end)
564 buff = (unsigned char *) dev->rmem_start;
565 }
566 }
567
568
569
570
571
572 skb->dev = dev;
573 skb->free = 1;
574
575 netif_rx(skb);
576
577
578
579 return(0);
580 }
581
582
583
584
585
586
587 void dev_transmit(void)
588 {
589 struct device *dev;
590
591 for (dev = dev_base; dev != NULL; dev = dev->next)
592 {
593 if (dev->flags != 0 && !dev->tbusy) {
594
595
596
597 dev_tint(dev);
598 }
599 }
600 }
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615 volatile unsigned long in_bh = 0;
616
617 int in_net_bh()
618 {
619 return(in_bh==0?0:1);
620 }
621
622
623
624
625
626
627
628
629
630 void net_bh(void *tmp)
631 {
632 struct sk_buff *skb;
633 struct packet_type *ptype;
634 struct packet_type *pt_prev;
635 unsigned short type;
636
637
638
639
640
641 if (set_bit(1, (void*)&in_bh))
642 return;
643
644
645
646
647
648
649
650
651 dev_transmit();
652
653
654
655
656
657
658
659 cli();
660
661
662
663
664
665 while((skb=skb_dequeue(&backlog))!=NULL)
666 {
667
668
669
670 backlog_size--;
671
672 sti();
673
674
675
676
677
678
679
680
681 skb->h.raw = skb->data;
682
683
684
685
686
687 type = skb->protocol;
688
689
690
691
692
693
694 pt_prev = NULL;
695 for (ptype = ptype_all; ptype!=NULL; ptype=ptype->next)
696 {
697 if(pt_prev)
698 {
699 struct sk_buff *skb2=skb_clone(skb, GFP_ATOMIC);
700 if(skb2)
701 pt_prev->func(skb2,skb->dev, pt_prev);
702 }
703 pt_prev=ptype;
704 }
705
706 for (ptype = ptype_base[ntohs(type)&15]; ptype != NULL; ptype = ptype->next)
707 {
708 if (ptype->type == type && (!ptype->dev || ptype->dev==skb->dev))
709 {
710
711
712
713
714 if(pt_prev)
715 {
716 struct sk_buff *skb2;
717
718 skb2=skb_clone(skb, GFP_ATOMIC);
719
720
721
722
723
724
725 if(skb2)
726 pt_prev->func(skb2, skb->dev, pt_prev);
727 }
728
729 pt_prev=ptype;
730 }
731 }
732
733
734
735
736
737 if(pt_prev)
738 pt_prev->func(skb, skb->dev, pt_prev);
739
740
741
742
743 else
744 kfree_skb(skb, FREE_WRITE);
745
746
747
748
749
750
751 #ifdef CONFIG_XMIT_EVERY
752 dev_transmit();
753 #endif
754 cli();
755 }
756
757
758
759
760
761 in_bh = 0;
762 sti();
763
764
765
766
767
768 dev_transmit();
769 }
770
771
772
773
774
775
776
777 void dev_tint(struct device *dev)
778 {
779 int i;
780 struct sk_buff *skb;
781 unsigned long flags;
782
783
784
785
786
787 #ifdef CONFIG_NET_ALIAS
788 if (net_alias_is(dev)) return;
789 #endif
790 save_flags(flags);
791
792
793
794
795 for(i = 0;i < DEV_NUMBUFFS; i++)
796 {
797
798
799
800
801
802 cli();
803 while((skb=skb_dequeue(&dev->buffs[i]))!=NULL)
804 {
805
806
807
808 skb_device_lock(skb);
809 restore_flags(flags);
810
811
812
813
814 dev_queue_xmit(skb,dev,-i - 1);
815
816
817
818 if (dev->tbusy)
819 return;
820 cli();
821 }
822 }
823 restore_flags(flags);
824 }
825
826
827
828
829
830
831
832
833 static int dev_ifconf(char *arg)
834 {
835 struct ifconf ifc;
836 struct ifreq ifr;
837 struct device *dev;
838 char *pos;
839 int len;
840 int err;
841
842
843
844
845
846 err=verify_area(VERIFY_WRITE, arg, sizeof(struct ifconf));
847 if(err)
848 return err;
849 memcpy_fromfs(&ifc, arg, sizeof(struct ifconf));
850 len = ifc.ifc_len;
851 pos = ifc.ifc_buf;
852
853
854
855
856
857
858 err=verify_area(VERIFY_WRITE,pos,len);
859 if(err)
860 return err;
861
862
863
864
865
866 for (dev = dev_base; dev != NULL; dev = dev->next)
867 {
868 if(!(dev->flags & IFF_UP))
869 continue;
870 memset(&ifr, 0, sizeof(struct ifreq));
871 strcpy(ifr.ifr_name, dev->name);
872 (*(struct sockaddr_in *) &ifr.ifr_addr).sin_family = dev->family;
873 (*(struct sockaddr_in *) &ifr.ifr_addr).sin_addr.s_addr = dev->pa_addr;
874
875
876
877
878
879 if (len < sizeof(struct ifreq))
880 break;
881
882
883
884
885
886 memcpy_tofs(pos, &ifr, sizeof(struct ifreq));
887 pos += sizeof(struct ifreq);
888 len -= sizeof(struct ifreq);
889 }
890
891
892
893
894
895 ifc.ifc_len = (pos - ifc.ifc_buf);
896 ifc.ifc_req = (struct ifreq *) ifc.ifc_buf;
897 memcpy_tofs(arg, &ifc, sizeof(struct ifconf));
898
899
900
901
902
903 return(pos - arg);
904 }
905
906
907
908
909
910
911
912 static int sprintf_stats(char *buffer, struct device *dev)
913 {
914 struct enet_statistics *stats = (dev->get_stats ? dev->get_stats(dev): NULL);
915 int size;
916
917 if (stats)
918 size = sprintf(buffer, "%6s:%7d %4d %4d %4d %4d %8d %4d %4d %4d %5d %4d\n",
919 dev->name,
920 stats->rx_packets, stats->rx_errors,
921 stats->rx_dropped + stats->rx_missed_errors,
922 stats->rx_fifo_errors,
923 stats->rx_length_errors + stats->rx_over_errors
924 + stats->rx_crc_errors + stats->rx_frame_errors,
925 stats->tx_packets, stats->tx_errors, stats->tx_dropped,
926 stats->tx_fifo_errors, stats->collisions,
927 stats->tx_carrier_errors + stats->tx_aborted_errors
928 + stats->tx_window_errors + stats->tx_heartbeat_errors);
929 else
930 size = sprintf(buffer, "%6s: No statistics available.\n", dev->name);
931
932 return size;
933 }
934
935
936
937
938
939
940 int dev_get_info(char *buffer, char **start, off_t offset, int length, int dummy)
941 {
942 int len=0;
943 off_t begin=0;
944 off_t pos=0;
945 int size;
946
947 struct device *dev;
948
949
950 size = sprintf(buffer, "Inter-| Receive | Transmit\n"
951 " face |packets errs drop fifo frame|packets errs drop fifo colls carrier\n");
952
953 pos+=size;
954 len+=size;
955
956
957 for (dev = dev_base; dev != NULL; dev = dev->next)
958 {
959 size = sprintf_stats(buffer+len, dev);
960 len+=size;
961 pos=begin+len;
962
963 if(pos<offset)
964 {
965 len=0;
966 begin=pos;
967 }
968 if(pos>offset+length)
969 break;
970 }
971
972 *start=buffer+(offset-begin);
973 len-=(offset-begin);
974 if(len>length)
975 len=length;
976 return len;
977 }
978
979
980
981
982
983
984 static inline int bad_mask(unsigned long mask, unsigned long addr)
985 {
986 if (addr & (mask = ~mask))
987 return 1;
988 mask = ntohl(mask);
989 if (mask & (mask+1))
990 return 1;
991 return 0;
992 }
993
994
995
996
997
998
999
1000
1001 static int dev_ifsioc(void *arg, unsigned int getset)
1002 {
1003 struct ifreq ifr;
1004 struct device *dev;
1005 int ret;
1006
1007
1008
1009
1010
1011 int err=verify_area(VERIFY_WRITE, arg, sizeof(struct ifreq));
1012 if(err)
1013 return err;
1014
1015 memcpy_fromfs(&ifr, arg, sizeof(struct ifreq));
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028 #ifdef CONFIG_NET_ALIAS
1029 if ((dev = net_alias_dev_get(ifr.ifr_name, getset == SIOCSIFADDR, &err, NULL, NULL)) == NULL)
1030 return(err);
1031 #else
1032 if ((dev = dev_get(ifr.ifr_name)) == NULL)
1033 return(-ENODEV);
1034 #endif
1035 switch(getset)
1036 {
1037 case SIOCGIFFLAGS:
1038 ifr.ifr_flags = dev->flags;
1039 goto rarok;
1040
1041 case SIOCSIFFLAGS:
1042 {
1043 int old_flags = dev->flags;
1044
1045
1046
1047
1048
1049
1050 dev_lock_wait();
1051
1052
1053
1054
1055
1056 dev->flags = (ifr.ifr_flags & (
1057 IFF_BROADCAST | IFF_DEBUG | IFF_LOOPBACK |
1058 IFF_POINTOPOINT | IFF_NOTRAILERS | IFF_RUNNING |
1059 IFF_NOARP | IFF_PROMISC | IFF_ALLMULTI | IFF_SLAVE | IFF_MASTER
1060 | IFF_MULTICAST)) | (dev->flags & IFF_UP);
1061
1062
1063
1064
1065 dev_mc_upload(dev);
1066
1067
1068
1069
1070
1071
1072
1073 if ((old_flags^ifr.ifr_flags)&IFF_UP)
1074 {
1075 if(old_flags&IFF_UP)
1076 ret=dev_close(dev);
1077 else
1078 {
1079 ret=dev_open(dev);
1080 if(ret<0)
1081 dev->flags&=~IFF_UP;
1082 }
1083 }
1084 else
1085 ret=0;
1086
1087
1088
1089
1090 dev_mc_upload(dev);
1091 }
1092 break;
1093
1094 case SIOCGIFADDR:
1095 if(ifr.ifr_addr.sa_family==AF_UNSPEC)
1096 {
1097 memcpy(ifr.ifr_hwaddr.sa_data,dev->dev_addr, MAX_ADDR_LEN);
1098 ifr.ifr_hwaddr.sa_family=dev->type;
1099 goto rarok;
1100 }
1101 else
1102 {
1103 (*(struct sockaddr_in *)
1104 &ifr.ifr_addr).sin_addr.s_addr = dev->pa_addr;
1105 (*(struct sockaddr_in *)
1106 &ifr.ifr_addr).sin_family = dev->family;
1107 (*(struct sockaddr_in *)
1108 &ifr.ifr_addr).sin_port = 0;
1109 }
1110 goto rarok;
1111
1112 case SIOCSIFADDR:
1113
1114
1115
1116
1117
1118
1119 if(ifr.ifr_addr.sa_family==AF_UNSPEC)
1120 {
1121 if(dev->set_mac_address==NULL)
1122 return -EOPNOTSUPP;
1123 ret=dev->set_mac_address(dev,&ifr.ifr_addr);
1124 }
1125 else
1126 {
1127
1128
1129
1130
1131
1132
1133 #ifdef CONFIG_NET_ALIAS
1134 if (net_alias_is(dev))
1135 net_alias_dev_rehash(dev ,&ifr.ifr_addr);
1136 #endif
1137 dev->pa_addr = (*(struct sockaddr_in *)
1138 &ifr.ifr_addr).sin_addr.s_addr;
1139 dev->family = ifr.ifr_addr.sa_family;
1140
1141 #ifdef CONFIG_INET
1142
1143
1144 dev->pa_mask = ip_get_mask(dev->pa_addr);
1145 #endif
1146 dev->pa_brdaddr = dev->pa_addr | ~dev->pa_mask;
1147 ret = 0;
1148 }
1149 break;
1150
1151 case SIOCGIFBRDADDR:
1152 (*(struct sockaddr_in *)
1153 &ifr.ifr_broadaddr).sin_addr.s_addr = dev->pa_brdaddr;
1154 (*(struct sockaddr_in *)
1155 &ifr.ifr_broadaddr).sin_family = dev->family;
1156 (*(struct sockaddr_in *)
1157 &ifr.ifr_broadaddr).sin_port = 0;
1158 goto rarok;
1159
1160 case SIOCSIFBRDADDR:
1161 dev->pa_brdaddr = (*(struct sockaddr_in *)
1162 &ifr.ifr_broadaddr).sin_addr.s_addr;
1163 ret = 0;
1164 break;
1165
1166 case SIOCGIFDSTADDR:
1167 (*(struct sockaddr_in *)
1168 &ifr.ifr_dstaddr).sin_addr.s_addr = dev->pa_dstaddr;
1169 (*(struct sockaddr_in *)
1170 &ifr.ifr_dstaddr).sin_family = dev->family;
1171 (*(struct sockaddr_in *)
1172 &ifr.ifr_dstaddr).sin_port = 0;
1173 goto rarok;
1174
1175 case SIOCSIFDSTADDR:
1176 dev->pa_dstaddr = (*(struct sockaddr_in *)
1177 &ifr.ifr_dstaddr).sin_addr.s_addr;
1178 ret = 0;
1179 break;
1180
1181 case SIOCGIFNETMASK:
1182 (*(struct sockaddr_in *)
1183 &ifr.ifr_netmask).sin_addr.s_addr = dev->pa_mask;
1184 (*(struct sockaddr_in *)
1185 &ifr.ifr_netmask).sin_family = dev->family;
1186 (*(struct sockaddr_in *)
1187 &ifr.ifr_netmask).sin_port = 0;
1188 goto rarok;
1189
1190 case SIOCSIFNETMASK:
1191 {
1192 unsigned long mask = (*(struct sockaddr_in *)
1193 &ifr.ifr_netmask).sin_addr.s_addr;
1194 ret = -EINVAL;
1195
1196
1197
1198 if (bad_mask(mask,0))
1199 break;
1200 dev->pa_mask = mask;
1201 ret = 0;
1202 }
1203 break;
1204
1205 case SIOCGIFMETRIC:
1206
1207 ifr.ifr_metric = dev->metric;
1208 goto rarok;
1209
1210 case SIOCSIFMETRIC:
1211 dev->metric = ifr.ifr_metric;
1212 ret=0;
1213 break;
1214
1215 case SIOCGIFMTU:
1216 ifr.ifr_mtu = dev->mtu;
1217 goto rarok;
1218
1219 case SIOCSIFMTU:
1220
1221
1222
1223
1224
1225 if(ifr.ifr_mtu<68)
1226 return -EINVAL;
1227 dev->mtu = ifr.ifr_mtu;
1228 ret = 0;
1229 break;
1230
1231 case SIOCGIFMEM:
1232
1233 ret = -EINVAL;
1234 break;
1235
1236 case SIOCSIFMEM:
1237 ret = -EINVAL;
1238 break;
1239
1240 case SIOCGIFHWADDR:
1241 memcpy(ifr.ifr_hwaddr.sa_data,dev->dev_addr, MAX_ADDR_LEN);
1242 ifr.ifr_hwaddr.sa_family=dev->type;
1243 goto rarok;
1244
1245 case SIOCSIFHWADDR:
1246 if(dev->set_mac_address==NULL)
1247 return -EOPNOTSUPP;
1248 if(ifr.ifr_hwaddr.sa_family!=dev->type)
1249 return -EINVAL;
1250 ret=dev->set_mac_address(dev,&ifr.ifr_hwaddr);
1251 break;
1252
1253 case SIOCGIFMAP:
1254 ifr.ifr_map.mem_start=dev->mem_start;
1255 ifr.ifr_map.mem_end=dev->mem_end;
1256 ifr.ifr_map.base_addr=dev->base_addr;
1257 ifr.ifr_map.irq=dev->irq;
1258 ifr.ifr_map.dma=dev->dma;
1259 ifr.ifr_map.port=dev->if_port;
1260 goto rarok;
1261
1262 case SIOCSIFMAP:
1263 if(dev->set_config==NULL)
1264 return -EOPNOTSUPP;
1265 return dev->set_config(dev,&ifr.ifr_map);
1266
1267 case SIOCADDMULTI:
1268 if(dev->set_multicast_list==NULL)
1269 return -EINVAL;
1270 if(ifr.ifr_hwaddr.sa_family!=AF_UNSPEC)
1271 return -EINVAL;
1272 dev_mc_add(dev,ifr.ifr_hwaddr.sa_data, dev->addr_len, 1);
1273 return 0;
1274
1275 case SIOCDELMULTI:
1276 if(dev->set_multicast_list==NULL)
1277 return -EINVAL;
1278 if(ifr.ifr_hwaddr.sa_family!=AF_UNSPEC)
1279 return -EINVAL;
1280 dev_mc_delete(dev,ifr.ifr_hwaddr.sa_data,dev->addr_len, 1);
1281 return 0;
1282
1283
1284
1285
1286 default:
1287 if((getset >= SIOCDEVPRIVATE) &&
1288 (getset <= (SIOCDEVPRIVATE + 15))) {
1289 if(dev->do_ioctl==NULL)
1290 return -EOPNOTSUPP;
1291 ret=dev->do_ioctl(dev, &ifr, getset);
1292 memcpy_tofs(arg,&ifr,sizeof(struct ifreq));
1293 break;
1294 }
1295
1296 ret = -EINVAL;
1297 }
1298 return(ret);
1299
1300
1301
1302 rarok:
1303 memcpy_tofs(arg, &ifr, sizeof(struct ifreq));
1304 return 0;
1305 }
1306
1307
1308
1309
1310
1311
1312
1313 int dev_ioctl(unsigned int cmd, void *arg)
1314 {
1315 switch(cmd)
1316 {
1317 case SIOCGIFCONF:
1318 (void) dev_ifconf((char *) arg);
1319 return 0;
1320
1321
1322
1323
1324
1325 case SIOCGIFFLAGS:
1326 case SIOCGIFADDR:
1327 case SIOCGIFDSTADDR:
1328 case SIOCGIFBRDADDR:
1329 case SIOCGIFNETMASK:
1330 case SIOCGIFMETRIC:
1331 case SIOCGIFMTU:
1332 case SIOCGIFMEM:
1333 case SIOCGIFHWADDR:
1334 case SIOCSIFHWADDR:
1335 case SIOCGIFSLAVE:
1336 case SIOCGIFMAP:
1337 return dev_ifsioc(arg, cmd);
1338
1339
1340
1341
1342
1343 case SIOCSIFFLAGS:
1344 case SIOCSIFADDR:
1345 case SIOCSIFDSTADDR:
1346 case SIOCSIFBRDADDR:
1347 case SIOCSIFNETMASK:
1348 case SIOCSIFMETRIC:
1349 case SIOCSIFMTU:
1350 case SIOCSIFMEM:
1351 case SIOCSIFMAP:
1352 case SIOCSIFSLAVE:
1353 case SIOCADDMULTI:
1354 case SIOCDELMULTI:
1355 if (!suser())
1356 return -EPERM;
1357 return dev_ifsioc(arg, cmd);
1358
1359 case SIOCSIFLINK:
1360 return -EINVAL;
1361
1362
1363
1364
1365
1366 default:
1367 if((cmd >= SIOCDEVPRIVATE) &&
1368 (cmd <= (SIOCDEVPRIVATE + 15))) {
1369 return dev_ifsioc(arg, cmd);
1370 }
1371 return -EINVAL;
1372 }
1373 }
1374
1375
1376
1377
1378
1379
1380
1381
1382 extern int lance_init(void);
1383 extern int pi_init(void);
1384 extern int dec21040_init(void);
1385
1386 int net_dev_init(void)
1387 {
1388 struct device *dev, **dp;
1389
1390
1391
1392
1393
1394
1395 #if defined(CONFIG_LANCE)
1396 lance_init();
1397 #endif
1398 #if defined(CONFIG_PI)
1399 pi_init();
1400 #endif
1401 #if defined(CONFIG_PT)
1402 pt_init();
1403 #endif
1404 #if defined(CONFIG_DEC_ELCP)
1405 dec21040_init();
1406 #endif
1407
1408
1409
1410
1411
1412
1413
1414
1415 dp = &dev_base;
1416 while ((dev = *dp) != NULL)
1417 {
1418 int i;
1419 for (i = 0; i < DEV_NUMBUFFS; i++) {
1420 skb_queue_head_init(dev->buffs + i);
1421 }
1422
1423 if (dev->init && dev->init(dev))
1424 {
1425
1426
1427
1428 *dp = dev->next;
1429 }
1430 else
1431 {
1432 dp = &dev->next;
1433 }
1434 }
1435
1436 proc_net_register(&(struct proc_dir_entry) {
1437 PROC_NET_DEV, 3, "dev",
1438 S_IFREG | S_IRUGO, 1, 0, 0,
1439 0, &proc_net_inode_operations,
1440 dev_get_info
1441 });
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451 #ifdef CONFIG_NET_ALIAS
1452 net_alias_init();
1453 #endif
1454
1455 bh_base[NET_BH].routine = net_bh;
1456 enable_bh(NET_BH);
1457 return 0;
1458 }