This source file includes following definitions.
- min
- dev_add_pack
- dev_remove_pack
- dev_get
- dev_open
- dev_close
- register_netdevice_notifier
- unregister_netdevice_notifier
- dev_queue_xmit
- netif_rx
- dev_rint
- dev_transmit
- in_net_bh
- net_bh
- dev_tint
- dev_ifconf
- sprintf_stats
- dev_get_info
- bad_mask
- dev_ifsioc
- dev_ioctl
- net_dev_init
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47 #include <asm/segment.h>
48 #include <asm/system.h>
49 #include <asm/bitops.h>
50 #include <linux/config.h>
51 #include <linux/types.h>
52 #include <linux/kernel.h>
53 #include <linux/sched.h>
54 #include <linux/string.h>
55 #include <linux/mm.h>
56 #include <linux/socket.h>
57 #include <linux/sockios.h>
58 #include <linux/in.h>
59 #include <linux/errno.h>
60 #include <linux/interrupt.h>
61 #include <linux/if_ether.h>
62 #include <linux/inet.h>
63 #include <linux/netdevice.h>
64 #include <linux/etherdevice.h>
65 #include <linux/notifier.h>
66 #include <net/ip.h>
67 #include <net/route.h>
68 #include <linux/skbuff.h>
69 #include <net/sock.h>
70 #include <net/arp.h>
71 #include <linux/proc_fs.h>
72 #include <linux/stat.h>
73 #ifdef CONFIG_NET_ALIAS
74 #include <linux/net_alias.h>
75 #endif
76
77
78
79
80
81
82 struct packet_type *ptype_base[16];
83 struct packet_type *ptype_all = NULL;
84
85
86
87
88
89 int dev_lockct=0;
90
91
92
93
94
95 struct notifier_block *netdev_chain=NULL;
96
97
98
99
100
101
102 static struct sk_buff_head backlog =
103 {
104 (struct sk_buff *)&backlog, (struct sk_buff *)&backlog
105 #if CONFIG_SKB_CHECK
106 ,SK_HEAD_SKB
107 #endif
108 };
109
110
111
112
113
114 static int backlog_size = 0;
115
116
117
118
119
120 static __inline__ unsigned long min(unsigned long a, unsigned long b)
121 {
122 return (a < b)? a : b;
123 }
124
125
126
127
128
129
130
131
132
133
134
135
136 static int dev_nit=0;
137
138
139
140
141
142
143
144 void dev_add_pack(struct packet_type *pt)
145 {
146 int hash;
147 if(pt->type==htons(ETH_P_ALL))
148 {
149 dev_nit++;
150 pt->next=ptype_all;
151 ptype_all=pt;
152 }
153 else
154 {
155 hash=ntohs(pt->type)&15;
156 pt->next = ptype_base[hash];
157 ptype_base[hash] = pt;
158 }
159 }
160
161
162
163
164
165
166 void dev_remove_pack(struct packet_type *pt)
167 {
168 struct packet_type **pt1;
169 if(pt->type==htons(ETH_P_ALL))
170 {
171 dev_nit--;
172 pt1=&ptype_all;
173 }
174 else
175 pt1=&ptype_base[ntohs(pt->type)&15];
176 for(; (*pt1)!=NULL; pt1=&((*pt1)->next))
177 {
178 if(pt==(*pt1))
179 {
180 *pt1=pt->next;
181 return;
182 }
183 }
184 printk("dev_remove_pack: %p not found.\n", pt);
185 }
186
187
188
189
190
191
192
193
194
195
196
197 struct device *dev_get(const char *name)
198 {
199 struct device *dev;
200
201 for (dev = dev_base; dev != NULL; dev = dev->next)
202 {
203 if (strcmp(dev->name, name) == 0)
204 return(dev);
205 }
206 return(NULL);
207 }
208
209
210
211
212
213
214 int dev_open(struct device *dev)
215 {
216 int ret = 0;
217
218
219
220
221 if (dev->open)
222 ret = dev->open(dev);
223
224
225
226
227
228 if (ret == 0)
229 {
230 dev->flags |= (IFF_UP | IFF_RUNNING);
231
232
233
234 dev_mc_upload(dev);
235 notifier_call_chain(&netdev_chain, NETDEV_UP, dev);
236 }
237 return(ret);
238 }
239
240
241
242
243
244
245 int dev_close(struct device *dev)
246 {
247 int ct=0;
248
249
250
251
252
253
254 if ((dev->flags & IFF_UP) && dev->stop)
255 dev->stop(dev);
256
257
258
259
260
261 dev->flags&=~(IFF_UP|IFF_RUNNING);
262
263
264
265
266 notifier_call_chain(&netdev_chain, NETDEV_DOWN, dev);
267
268
269
270 dev_mc_discard(dev);
271
272
273
274 dev->pa_addr = 0;
275 dev->pa_dstaddr = 0;
276 dev->pa_brdaddr = 0;
277 dev->pa_mask = 0;
278
279
280
281 while(ct<DEV_NUMBUFFS)
282 {
283 struct sk_buff *skb;
284 while((skb=skb_dequeue(&dev->buffs[ct]))!=NULL)
285 if(skb->free)
286 kfree_skb(skb,FREE_WRITE);
287 ct++;
288 }
289 return(0);
290 }
291
292
293
294
295
296
297
298 int register_netdevice_notifier(struct notifier_block *nb)
299 {
300 return notifier_chain_register(&netdev_chain, nb);
301 }
302
303 int unregister_netdevice_notifier(struct notifier_block *nb)
304 {
305 return notifier_chain_unregister(&netdev_chain,nb);
306 }
307
308
309
310
311
312
313
314
315
316
317
318 void dev_queue_xmit(struct sk_buff *skb, struct device *dev, int pri)
319 {
320 unsigned long flags;
321 struct packet_type *ptype;
322 int where = 0;
323
324
325
326 if(pri>=0 && !skb_device_locked(skb))
327 skb_device_lock(skb);
328 #if CONFIG_SKB_CHECK
329 IS_SKB(skb);
330 #endif
331 skb->dev = dev;
332
333
334
335
336
337
338
339 if (pri < 0)
340 {
341 pri = -pri-1;
342 where = 1;
343 }
344
345 #ifdef CONFIG_NET_DEBUG
346 if (pri >= DEV_NUMBUFFS)
347 {
348 printk("bad priority in dev_queue_xmit.\n");
349 pri = 1;
350 }
351 #endif
352
353
354
355
356
357
358 if (!skb->arp && dev->rebuild_header(skb->data, dev, skb->raddr, skb)) {
359 return;
360 }
361
362
363
364
365
366
367
368
369
370 #ifdef CONFIG_NET_ALIAS
371 if (net_alias_is(dev))
372 skb->dev = dev = net_alias_main_dev(dev);
373 #endif
374
375 save_flags(flags);
376 cli();
377 if (!where)
378
379 {
380 skb_queue_tail(dev->buffs + pri,skb);
381 skb_device_unlock(skb);
382 skb = skb_dequeue(dev->buffs + pri);
383 skb_device_lock(skb);
384 }
385 restore_flags(flags);
386
387
388 if(!where && dev_nit)
389 {
390 skb->stamp=xtime;
391 for (ptype = ptype_all; ptype!=NULL; ptype = ptype->next)
392 {
393
394
395
396 if ((ptype->dev == dev || !ptype->dev) &&
397 ((struct sock *)ptype->data != skb->sk))
398 {
399 struct sk_buff *skb2;
400 if ((skb2 = skb_clone(skb, GFP_ATOMIC)) == NULL)
401 break;
402 skb2->h.raw = skb2->data + dev->hard_header_len;
403 skb2->mac.raw = skb2->data;
404 ptype->func(skb2, skb->dev, ptype);
405 }
406 }
407 }
408 start_bh_atomic();
409 if (dev->hard_start_xmit(skb, dev) == 0) {
410
411
412
413 end_bh_atomic();
414 return;
415 }
416 end_bh_atomic();
417
418
419
420
421
422 cli();
423 skb_device_unlock(skb);
424 skb_queue_head(dev->buffs + pri,skb);
425 restore_flags(flags);
426 }
427
428
429
430
431
432
433
434 void netif_rx(struct sk_buff *skb)
435 {
436 static int dropping = 0;
437
438
439
440
441
442
443 skb->sk = NULL;
444 skb->free = 1;
445 if(skb->stamp.tv_sec==0)
446 skb->stamp = xtime;
447
448
449
450
451
452 if (!backlog_size)
453 dropping = 0;
454 else if (backlog_size > 300)
455 dropping = 1;
456
457 if (dropping)
458 {
459 kfree_skb(skb, FREE_READ);
460 return;
461 }
462
463
464
465
466 #if CONFIG_SKB_CHECK
467 IS_SKB(skb);
468 #endif
469 skb_queue_tail(&backlog,skb);
470 backlog_size++;
471
472
473
474
475
476
477 #ifdef CONFIG_NET_RUNONIRQ
478 net_bh();
479 #else
480 mark_bh(NET_BH);
481 #endif
482 return;
483 }
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500 int dev_rint(unsigned char *buff, long len, int flags, struct device *dev)
501 {
502 static int dropping = 0;
503 struct sk_buff *skb = NULL;
504 unsigned char *to;
505 int amount, left;
506 int len2;
507
508 if (dev == NULL || buff == NULL || len <= 0)
509 return(1);
510
511 if (flags & IN_SKBUFF)
512 {
513 skb = (struct sk_buff *) buff;
514 }
515 else
516 {
517 if (dropping)
518 {
519 if (skb_peek(&backlog) != NULL)
520 return(1);
521 printk("INET: dev_rint: no longer dropping packets.\n");
522 dropping = 0;
523 }
524
525 skb = alloc_skb(len, GFP_ATOMIC);
526 if (skb == NULL)
527 {
528 printk("dev_rint: packet dropped on %s (no memory) !\n",
529 dev->name);
530 dropping = 1;
531 return(1);
532 }
533
534
535
536
537
538
539 to = skb_put(skb,len);
540 left = len;
541
542 len2 = len;
543 while (len2 > 0)
544 {
545 amount = min(len2, (unsigned long) dev->rmem_end -
546 (unsigned long) buff);
547 memcpy(to, buff, amount);
548 len2 -= amount;
549 left -= amount;
550 buff += amount;
551 to += amount;
552 if ((unsigned long) buff == dev->rmem_end)
553 buff = (unsigned char *) dev->rmem_start;
554 }
555 }
556
557
558
559
560
561 skb->dev = dev;
562 skb->free = 1;
563
564 netif_rx(skb);
565
566
567
568 return(0);
569 }
570
571
572
573
574
575
576 void dev_transmit(void)
577 {
578 struct device *dev;
579
580 for (dev = dev_base; dev != NULL; dev = dev->next)
581 {
582 if (dev->flags != 0 && !dev->tbusy) {
583
584
585
586 dev_tint(dev);
587 }
588 }
589 }
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604 volatile unsigned long in_bh = 0;
605
606 int in_net_bh()
607 {
608 return(in_bh==0?0:1);
609 }
610
611
612
613
614
615
616
617
618
619 void net_bh(void *tmp)
620 {
621 struct sk_buff *skb;
622 struct packet_type *ptype;
623 struct packet_type *pt_prev;
624 unsigned short type;
625
626
627
628
629
630 if (set_bit(1, (void*)&in_bh))
631 return;
632
633
634
635
636
637
638
639
640 dev_transmit();
641
642
643
644
645
646
647
648 cli();
649
650
651
652
653
654 while((skb=skb_dequeue(&backlog))!=NULL)
655 {
656
657
658
659 backlog_size--;
660
661 sti();
662
663
664
665
666
667
668
669
670 skb->h.raw = skb->data;
671
672
673
674
675
676 type = skb->protocol;
677
678
679
680
681
682
683 pt_prev = NULL;
684 for (ptype = ptype_all; ptype!=NULL; ptype=ptype->next)
685 {
686 if(pt_prev)
687 {
688 struct sk_buff *skb2=skb_clone(skb, GFP_ATOMIC);
689 if(skb2)
690 pt_prev->func(skb2,skb->dev, pt_prev);
691 }
692 pt_prev=ptype;
693 }
694
695 for (ptype = ptype_base[ntohs(type)&15]; ptype != NULL; ptype = ptype->next)
696 {
697 if (ptype->type == type && (!ptype->dev || ptype->dev==skb->dev))
698 {
699
700
701
702
703 if(pt_prev)
704 {
705 struct sk_buff *skb2;
706
707 skb2=skb_clone(skb, GFP_ATOMIC);
708
709
710
711
712
713
714 if(skb2)
715 pt_prev->func(skb2, skb->dev, pt_prev);
716 }
717
718 pt_prev=ptype;
719 }
720 }
721
722
723
724
725
726 if(pt_prev)
727 pt_prev->func(skb, skb->dev, pt_prev);
728
729
730
731
732 else
733 kfree_skb(skb, FREE_WRITE);
734
735
736
737
738
739
740 #ifdef CONFIG_XMIT_EVERY
741 dev_transmit();
742 #endif
743 cli();
744 }
745
746
747
748
749
750 in_bh = 0;
751 sti();
752
753
754
755
756
757 dev_transmit();
758 }
759
760
761
762
763
764
765
766 void dev_tint(struct device *dev)
767 {
768 int i;
769 struct sk_buff *skb;
770 unsigned long flags;
771
772
773
774
775
776 #ifdef CONFIG_NET_ALIAS
777 if (net_alias_is(dev)) return;
778 #endif
779 save_flags(flags);
780
781
782
783
784 for(i = 0;i < DEV_NUMBUFFS; i++)
785 {
786
787
788
789
790
791 cli();
792 while((skb=skb_dequeue(&dev->buffs[i]))!=NULL)
793 {
794
795
796
797 skb_device_lock(skb);
798 restore_flags(flags);
799
800
801
802
803 dev_queue_xmit(skb,dev,-i - 1);
804
805
806
807 if (dev->tbusy)
808 return;
809 cli();
810 }
811 }
812 restore_flags(flags);
813 }
814
815
816
817
818
819
820
821
822 static int dev_ifconf(char *arg)
823 {
824 struct ifconf ifc;
825 struct ifreq ifr;
826 struct device *dev;
827 char *pos;
828 int len;
829 int err;
830
831
832
833
834
835 err=verify_area(VERIFY_WRITE, arg, sizeof(struct ifconf));
836 if(err)
837 return err;
838 memcpy_fromfs(&ifc, arg, sizeof(struct ifconf));
839 len = ifc.ifc_len;
840 pos = ifc.ifc_buf;
841
842
843
844
845
846
847 err=verify_area(VERIFY_WRITE,pos,len);
848 if(err)
849 return err;
850
851
852
853
854
855 for (dev = dev_base; dev != NULL; dev = dev->next)
856 {
857 if(!(dev->flags & IFF_UP))
858 continue;
859 memset(&ifr, 0, sizeof(struct ifreq));
860 strcpy(ifr.ifr_name, dev->name);
861 (*(struct sockaddr_in *) &ifr.ifr_addr).sin_family = dev->family;
862 (*(struct sockaddr_in *) &ifr.ifr_addr).sin_addr.s_addr = dev->pa_addr;
863
864
865
866
867
868 if (len < sizeof(struct ifreq))
869 break;
870
871
872
873
874
875 memcpy_tofs(pos, &ifr, sizeof(struct ifreq));
876 pos += sizeof(struct ifreq);
877 len -= sizeof(struct ifreq);
878 }
879
880
881
882
883
884 ifc.ifc_len = (pos - ifc.ifc_buf);
885 ifc.ifc_req = (struct ifreq *) ifc.ifc_buf;
886 memcpy_tofs(arg, &ifc, sizeof(struct ifconf));
887
888
889
890
891
892 return(pos - arg);
893 }
894
895
896
897
898
899
900
901 static int sprintf_stats(char *buffer, struct device *dev)
902 {
903 struct enet_statistics *stats = (dev->get_stats ? dev->get_stats(dev): NULL);
904 int size;
905
906 if (stats)
907 size = sprintf(buffer, "%6s:%7d %4d %4d %4d %4d %8d %4d %4d %4d %5d %4d\n",
908 dev->name,
909 stats->rx_packets, stats->rx_errors,
910 stats->rx_dropped + stats->rx_missed_errors,
911 stats->rx_fifo_errors,
912 stats->rx_length_errors + stats->rx_over_errors
913 + stats->rx_crc_errors + stats->rx_frame_errors,
914 stats->tx_packets, stats->tx_errors, stats->tx_dropped,
915 stats->tx_fifo_errors, stats->collisions,
916 stats->tx_carrier_errors + stats->tx_aborted_errors
917 + stats->tx_window_errors + stats->tx_heartbeat_errors);
918 else
919 size = sprintf(buffer, "%6s: No statistics available.\n", dev->name);
920
921 return size;
922 }
923
924
925
926
927
928
929 int dev_get_info(char *buffer, char **start, off_t offset, int length, int dummy)
930 {
931 int len=0;
932 off_t begin=0;
933 off_t pos=0;
934 int size;
935
936 struct device *dev;
937
938
939 size = sprintf(buffer, "Inter-| Receive | Transmit\n"
940 " face |packets errs drop fifo frame|packets errs drop fifo colls carrier\n");
941
942 pos+=size;
943 len+=size;
944
945
946 for (dev = dev_base; dev != NULL; dev = dev->next)
947 {
948 size = sprintf_stats(buffer+len, dev);
949 len+=size;
950 pos=begin+len;
951
952 if(pos<offset)
953 {
954 len=0;
955 begin=pos;
956 }
957 if(pos>offset+length)
958 break;
959 }
960
961 *start=buffer+(offset-begin);
962 len-=(offset-begin);
963 if(len>length)
964 len=length;
965 return len;
966 }
967
968
969
970
971
972
973 static inline int bad_mask(unsigned long mask, unsigned long addr)
974 {
975 if (addr & (mask = ~mask))
976 return 1;
977 mask = ntohl(mask);
978 if (mask & (mask+1))
979 return 1;
980 return 0;
981 }
982
983
984
985
986
987
988
989
990 static int dev_ifsioc(void *arg, unsigned int getset)
991 {
992 struct ifreq ifr;
993 struct device *dev;
994 int ret;
995
996
997
998
999
1000 int err=verify_area(VERIFY_WRITE, arg, sizeof(struct ifreq));
1001 if(err)
1002 return err;
1003
1004 memcpy_fromfs(&ifr, arg, sizeof(struct ifreq));
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017 #ifdef CONFIG_NET_ALIAS
1018 if ((dev = net_alias_dev_get(ifr.ifr_name, getset == SIOCSIFADDR, &err, NULL, NULL)) == NULL)
1019 return(err);
1020 #else
1021 if ((dev = dev_get(ifr.ifr_name)) == NULL)
1022 return(-ENODEV);
1023 #endif
1024 switch(getset)
1025 {
1026 case SIOCGIFFLAGS:
1027 ifr.ifr_flags = dev->flags;
1028 goto rarok;
1029
1030 case SIOCSIFFLAGS:
1031 {
1032 int old_flags = dev->flags;
1033
1034
1035
1036
1037
1038
1039 dev_lock_wait();
1040
1041
1042
1043
1044
1045 dev->flags = (ifr.ifr_flags & (
1046 IFF_BROADCAST | IFF_DEBUG | IFF_LOOPBACK |
1047 IFF_POINTOPOINT | IFF_NOTRAILERS | IFF_RUNNING |
1048 IFF_NOARP | IFF_PROMISC | IFF_ALLMULTI | IFF_SLAVE | IFF_MASTER
1049 | IFF_MULTICAST)) | (dev->flags & IFF_UP);
1050
1051
1052
1053
1054 dev_mc_upload(dev);
1055
1056
1057
1058
1059
1060
1061
1062 if ((old_flags^ifr.ifr_flags)&IFF_UP)
1063 {
1064 if(old_flags&IFF_UP)
1065 ret=dev_close(dev);
1066 else
1067 {
1068 ret=dev_open(dev);
1069 if(ret<0)
1070 dev->flags&=~IFF_UP;
1071 }
1072 }
1073 else
1074 ret=0;
1075
1076
1077
1078
1079 dev_mc_upload(dev);
1080 }
1081 break;
1082
1083 case SIOCGIFADDR:
1084 if(ifr.ifr_addr.sa_family==AF_UNSPEC)
1085 {
1086 memcpy(ifr.ifr_hwaddr.sa_data,dev->dev_addr, MAX_ADDR_LEN);
1087 ifr.ifr_hwaddr.sa_family=dev->type;
1088 goto rarok;
1089 }
1090 else
1091 {
1092 (*(struct sockaddr_in *)
1093 &ifr.ifr_addr).sin_addr.s_addr = dev->pa_addr;
1094 (*(struct sockaddr_in *)
1095 &ifr.ifr_addr).sin_family = dev->family;
1096 (*(struct sockaddr_in *)
1097 &ifr.ifr_addr).sin_port = 0;
1098 }
1099 goto rarok;
1100
1101 case SIOCSIFADDR:
1102
1103
1104
1105
1106
1107
1108 if(ifr.ifr_addr.sa_family==AF_UNSPEC)
1109 {
1110 if(dev->set_mac_address==NULL)
1111 return -EOPNOTSUPP;
1112 ret=dev->set_mac_address(dev,&ifr.ifr_addr);
1113 }
1114 else
1115 {
1116
1117
1118
1119
1120
1121
1122 #ifdef CONFIG_NET_ALIAS
1123 if (net_alias_is(dev))
1124 net_alias_rehash(dev->my_alias,&ifr.ifr_addr);
1125 #endif
1126 dev->pa_addr = (*(struct sockaddr_in *)
1127 &ifr.ifr_addr).sin_addr.s_addr;
1128 dev->family = ifr.ifr_addr.sa_family;
1129
1130 #ifdef CONFIG_INET
1131
1132
1133 dev->pa_mask = ip_get_mask(dev->pa_addr);
1134 #endif
1135 dev->pa_brdaddr = dev->pa_addr | ~dev->pa_mask;
1136 ret = 0;
1137 }
1138 break;
1139
1140 case SIOCGIFBRDADDR:
1141 (*(struct sockaddr_in *)
1142 &ifr.ifr_broadaddr).sin_addr.s_addr = dev->pa_brdaddr;
1143 (*(struct sockaddr_in *)
1144 &ifr.ifr_broadaddr).sin_family = dev->family;
1145 (*(struct sockaddr_in *)
1146 &ifr.ifr_broadaddr).sin_port = 0;
1147 goto rarok;
1148
1149 case SIOCSIFBRDADDR:
1150 dev->pa_brdaddr = (*(struct sockaddr_in *)
1151 &ifr.ifr_broadaddr).sin_addr.s_addr;
1152 ret = 0;
1153 break;
1154
1155 case SIOCGIFDSTADDR:
1156 (*(struct sockaddr_in *)
1157 &ifr.ifr_dstaddr).sin_addr.s_addr = dev->pa_dstaddr;
1158 (*(struct sockaddr_in *)
1159 &ifr.ifr_dstaddr).sin_family = dev->family;
1160 (*(struct sockaddr_in *)
1161 &ifr.ifr_dstaddr).sin_port = 0;
1162 goto rarok;
1163
1164 case SIOCSIFDSTADDR:
1165 dev->pa_dstaddr = (*(struct sockaddr_in *)
1166 &ifr.ifr_dstaddr).sin_addr.s_addr;
1167 ret = 0;
1168 break;
1169
1170 case SIOCGIFNETMASK:
1171 (*(struct sockaddr_in *)
1172 &ifr.ifr_netmask).sin_addr.s_addr = dev->pa_mask;
1173 (*(struct sockaddr_in *)
1174 &ifr.ifr_netmask).sin_family = dev->family;
1175 (*(struct sockaddr_in *)
1176 &ifr.ifr_netmask).sin_port = 0;
1177 goto rarok;
1178
1179 case SIOCSIFNETMASK:
1180 {
1181 unsigned long mask = (*(struct sockaddr_in *)
1182 &ifr.ifr_netmask).sin_addr.s_addr;
1183 ret = -EINVAL;
1184
1185
1186
1187 if (bad_mask(mask,0))
1188 break;
1189 dev->pa_mask = mask;
1190 ret = 0;
1191 }
1192 break;
1193
1194 case SIOCGIFMETRIC:
1195
1196 ifr.ifr_metric = dev->metric;
1197 goto rarok;
1198
1199 case SIOCSIFMETRIC:
1200 dev->metric = ifr.ifr_metric;
1201 ret=0;
1202 break;
1203
1204 case SIOCGIFMTU:
1205 ifr.ifr_mtu = dev->mtu;
1206 goto rarok;
1207
1208 case SIOCSIFMTU:
1209
1210
1211
1212
1213
1214 if(ifr.ifr_mtu<68)
1215 return -EINVAL;
1216 dev->mtu = ifr.ifr_mtu;
1217 ret = 0;
1218 break;
1219
1220 case SIOCGIFMEM:
1221
1222 ret = -EINVAL;
1223 break;
1224
1225 case SIOCSIFMEM:
1226 ret = -EINVAL;
1227 break;
1228
1229 case SIOCGIFHWADDR:
1230 memcpy(ifr.ifr_hwaddr.sa_data,dev->dev_addr, MAX_ADDR_LEN);
1231 ifr.ifr_hwaddr.sa_family=dev->type;
1232 goto rarok;
1233
1234 case SIOCSIFHWADDR:
1235 if(dev->set_mac_address==NULL)
1236 return -EOPNOTSUPP;
1237 if(ifr.ifr_hwaddr.sa_family!=dev->type)
1238 return -EINVAL;
1239 ret=dev->set_mac_address(dev,&ifr.ifr_hwaddr);
1240 break;
1241
1242 case SIOCGIFMAP:
1243 ifr.ifr_map.mem_start=dev->mem_start;
1244 ifr.ifr_map.mem_end=dev->mem_end;
1245 ifr.ifr_map.base_addr=dev->base_addr;
1246 ifr.ifr_map.irq=dev->irq;
1247 ifr.ifr_map.dma=dev->dma;
1248 ifr.ifr_map.port=dev->if_port;
1249 goto rarok;
1250
1251 case SIOCSIFMAP:
1252 if(dev->set_config==NULL)
1253 return -EOPNOTSUPP;
1254 return dev->set_config(dev,&ifr.ifr_map);
1255
1256 case SIOCADDMULTI:
1257 if(dev->set_multicast_list==NULL)
1258 return -EINVAL;
1259 if(ifr.ifr_hwaddr.sa_family!=AF_UNSPEC)
1260 return -EINVAL;
1261 dev_mc_add(dev,ifr.ifr_hwaddr.sa_data, dev->addr_len, 1);
1262 return 0;
1263
1264 case SIOCDELMULTI:
1265 if(dev->set_multicast_list==NULL)
1266 return -EINVAL;
1267 if(ifr.ifr_hwaddr.sa_family!=AF_UNSPEC)
1268 return -EINVAL;
1269 dev_mc_delete(dev,ifr.ifr_hwaddr.sa_data,dev->addr_len, 1);
1270 return 0;
1271
1272
1273
1274
1275 default:
1276 if((getset >= SIOCDEVPRIVATE) &&
1277 (getset <= (SIOCDEVPRIVATE + 15))) {
1278 if(dev->do_ioctl==NULL)
1279 return -EOPNOTSUPP;
1280 ret=dev->do_ioctl(dev, &ifr, getset);
1281 memcpy_tofs(arg,&ifr,sizeof(struct ifreq));
1282 break;
1283 }
1284
1285 ret = -EINVAL;
1286 }
1287 return(ret);
1288
1289
1290
1291 rarok:
1292 memcpy_tofs(arg, &ifr, sizeof(struct ifreq));
1293 return 0;
1294 }
1295
1296
1297
1298
1299
1300
1301
1302 int dev_ioctl(unsigned int cmd, void *arg)
1303 {
1304 switch(cmd)
1305 {
1306 case SIOCGIFCONF:
1307 (void) dev_ifconf((char *) arg);
1308 return 0;
1309
1310
1311
1312
1313
1314 case SIOCGIFFLAGS:
1315 case SIOCGIFADDR:
1316 case SIOCGIFDSTADDR:
1317 case SIOCGIFBRDADDR:
1318 case SIOCGIFNETMASK:
1319 case SIOCGIFMETRIC:
1320 case SIOCGIFMTU:
1321 case SIOCGIFMEM:
1322 case SIOCGIFHWADDR:
1323 case SIOCSIFHWADDR:
1324 case SIOCGIFSLAVE:
1325 case SIOCGIFMAP:
1326 return dev_ifsioc(arg, cmd);
1327
1328
1329
1330
1331
1332 case SIOCSIFFLAGS:
1333 case SIOCSIFADDR:
1334 case SIOCSIFDSTADDR:
1335 case SIOCSIFBRDADDR:
1336 case SIOCSIFNETMASK:
1337 case SIOCSIFMETRIC:
1338 case SIOCSIFMTU:
1339 case SIOCSIFMEM:
1340 case SIOCSIFMAP:
1341 case SIOCSIFSLAVE:
1342 case SIOCADDMULTI:
1343 case SIOCDELMULTI:
1344 if (!suser())
1345 return -EPERM;
1346 return dev_ifsioc(arg, cmd);
1347
1348 case SIOCSIFLINK:
1349 return -EINVAL;
1350
1351
1352
1353
1354
1355 default:
1356 if((cmd >= SIOCDEVPRIVATE) &&
1357 (cmd <= (SIOCDEVPRIVATE + 15))) {
1358 return dev_ifsioc(arg, cmd);
1359 }
1360 return -EINVAL;
1361 }
1362 }
1363
1364
1365
1366
1367
1368
1369
1370
1371 extern int lance_init(void);
1372 extern int pi_init(void);
1373 extern int dec21040_init(void);
1374
1375 int net_dev_init(void)
1376 {
1377 struct device *dev, **dp;
1378
1379
1380
1381
1382
1383
1384 #if defined(CONFIG_LANCE)
1385 lance_init();
1386 #endif
1387 #if defined(CONFIG_PI)
1388 pi_init();
1389 #endif
1390 #if defined(CONFIG_DEC_ELCP)
1391 dec21040_init();
1392 #endif
1393
1394
1395
1396
1397
1398
1399
1400
1401 dp = &dev_base;
1402 while ((dev = *dp) != NULL)
1403 {
1404 int i;
1405 for (i = 0; i < DEV_NUMBUFFS; i++) {
1406 skb_queue_head_init(dev->buffs + i);
1407 }
1408
1409 if (dev->init && dev->init(dev))
1410 {
1411
1412
1413
1414 *dp = dev->next;
1415 }
1416 else
1417 {
1418 dp = &dev->next;
1419 }
1420 }
1421
1422 proc_net_register(&(struct proc_dir_entry) {
1423 PROC_NET_DEV, 3, "dev",
1424 S_IFREG | S_IRUGO, 1, 0, 0,
1425 0, &proc_net_inode_operations,
1426 dev_get_info
1427 });
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437 #ifdef CONFIG_NET_ALIAS
1438 net_alias_init();
1439 #endif
1440
1441 bh_base[NET_BH].routine = net_bh;
1442 enable_bh(NET_BH);
1443 return 0;
1444 }