This source file includes following definitions.
- min
- dev_add_pack
- dev_remove_pack
- dev_get
- dev_open
- dev_close
- register_netdevice_notifier
- unregister_netdevice_notifier
- dev_queue_xmit
- netif_rx
- dev_rint
- dev_transmit
- in_net_bh
- net_bh
- dev_tint
- dev_ifconf
- sprintf_stats
- dev_get_info
- bad_mask
- dev_ifsioc
- dev_ioctl
- dev_init
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49 #include <asm/segment.h>
50 #include <asm/system.h>
51 #include <asm/bitops.h>
52 #include <linux/config.h>
53 #include <linux/types.h>
54 #include <linux/kernel.h>
55 #include <linux/sched.h>
56 #include <linux/string.h>
57 #include <linux/mm.h>
58 #include <linux/socket.h>
59 #include <linux/sockios.h>
60 #include <linux/in.h>
61 #include <linux/errno.h>
62 #include <linux/interrupt.h>
63 #include <linux/if_ether.h>
64 #include <linux/inet.h>
65 #include <linux/netdevice.h>
66 #include <linux/etherdevice.h>
67 #include <linux/notifier.h>
68 #include <net/ip.h>
69 #include <net/route.h>
70 #include <linux/skbuff.h>
71 #include <net/sock.h>
72 #include <net/arp.h>
73 #include <linux/proc_fs.h>
74 #include <linux/stat.h>
75
76
77
78
79
80
81 struct packet_type *ptype_base[16];
82 struct packet_type *ptype_all = NULL;
83
84
85
86
87
88 int dev_lockct=0;
89
90
91
92
93
94 struct notifier_block *netdev_chain=NULL;
95
96
97
98
99
100
101 static struct sk_buff_head backlog =
102 {
103 (struct sk_buff *)&backlog, (struct sk_buff *)&backlog
104 #if CONFIG_SKB_CHECK
105 ,SK_HEAD_SKB
106 #endif
107 };
108
109
110
111
112
113 static int backlog_size = 0;
114
115
116
117
118
119 static __inline__ unsigned long min(unsigned long a, unsigned long b)
120 {
121 return (a < b)? a : b;
122 }
123
124
125
126
127
128
129
130
131
132
133
134
135 static int dev_nit=0;
136
137
138
139
140
141
142
143 void dev_add_pack(struct packet_type *pt)
144 {
145 int hash;
146 if(pt->type==htons(ETH_P_ALL))
147 {
148 dev_nit++;
149 pt->next=ptype_all;
150 ptype_all=pt;
151 }
152 else
153 {
154 hash=ntohs(pt->type)&15;
155 pt->next = ptype_base[hash];
156 ptype_base[hash] = pt;
157 }
158 }
159
160
161
162
163
164
165 void dev_remove_pack(struct packet_type *pt)
166 {
167 struct packet_type **pt1;
168 if(pt->type==htons(ETH_P_ALL))
169 {
170 dev_nit--;
171 pt1=&ptype_all;
172 }
173 else
174 pt1=&ptype_base[ntohs(pt->type)&15];
175 for(; (*pt1)!=NULL; pt1=&((*pt1)->next))
176 {
177 if(pt==(*pt1))
178 {
179 *pt1=pt->next;
180 return;
181 }
182 }
183 printk("dev_remove_pack: %p not found.\n", pt);
184 }
185
186
187
188
189
190
191
192
193
194
195
196 struct device *dev_get(const char *name)
197 {
198 struct device *dev;
199
200 for (dev = dev_base; dev != NULL; dev = dev->next)
201 {
202 if (strcmp(dev->name, name) == 0)
203 return(dev);
204 }
205 return(NULL);
206 }
207
208
209
210
211
212
213 int dev_open(struct device *dev)
214 {
215 int ret = 0;
216
217
218
219
220 if (dev->open)
221 ret = dev->open(dev);
222
223
224
225
226
227 if (ret == 0)
228 {
229 dev->flags |= (IFF_UP | IFF_RUNNING);
230
231
232
233 #ifdef CONFIG_IP_MULTICAST
234
235
236
237 ip_mc_allhost(dev);
238 #endif
239 dev_mc_upload(dev);
240 notifier_call_chain(&netdev_chain, NETDEV_UP, dev);
241 }
242 return(ret);
243 }
244
245
246
247
248
249
250 int dev_close(struct device *dev)
251 {
252 int ct=0;
253
254
255
256
257
258 if ((dev->flags & IFF_UP) && dev->stop)
259 dev->stop(dev);
260
261 dev->flags = 0;
262
263
264
265
266 notifier_call_chain(&netdev_chain, NETDEV_DOWN, dev);
267
268
269
270 dev_mc_discard(dev);
271
272
273
274 dev->pa_addr = 0;
275 dev->pa_dstaddr = 0;
276 dev->pa_brdaddr = 0;
277 dev->pa_mask = 0;
278
279
280
281 while(ct<DEV_NUMBUFFS)
282 {
283 struct sk_buff *skb;
284 while((skb=skb_dequeue(&dev->buffs[ct]))!=NULL)
285 if(skb->free)
286 kfree_skb(skb,FREE_WRITE);
287 ct++;
288 }
289 return(0);
290 }
291
292
293
294
295
296
297
298 int register_netdevice_notifier(struct notifier_block *nb)
299 {
300 return notifier_chain_register(&netdev_chain, nb);
301 }
302
303 int unregister_netdevice_notifier(struct notifier_block *nb)
304 {
305 return notifier_chain_unregister(&netdev_chain,nb);
306 }
307
308
309
310
311
312
313
314
315
316
317
318 void dev_queue_xmit(struct sk_buff *skb, struct device *dev, int pri)
319 {
320 unsigned long flags;
321 struct packet_type *ptype;
322 int where = 0;
323
324
325
326 if(pri>=0 && !skb_device_locked(skb))
327 skb_device_lock(skb);
328 #if CONFIG_SKB_CHECK
329 IS_SKB(skb);
330 #endif
331 skb->dev = dev;
332
333
334
335
336
337
338
339 if (pri < 0)
340 {
341 pri = -pri-1;
342 where = 1;
343 }
344
345 #ifdef CONFIG_NET_DEBUG
346 if (pri >= DEV_NUMBUFFS)
347 {
348 printk("bad priority in dev_queue_xmit.\n");
349 pri = 1;
350 }
351 #endif
352
353
354
355
356
357
358 if (!skb->arp && dev->rebuild_header(skb->data, dev, skb->raddr, skb)) {
359 return;
360 }
361
362 save_flags(flags);
363 cli();
364 if (!where)
365
366 {
367 skb_queue_tail(dev->buffs + pri,skb);
368 skb_device_unlock(skb);
369 skb = skb_dequeue(dev->buffs + pri);
370 skb_device_lock(skb);
371 }
372 restore_flags(flags);
373
374
375 if(!where && dev_nit)
376 {
377 skb->stamp=xtime;
378 for (ptype = ptype_all; ptype!=NULL; ptype = ptype->next)
379 {
380
381
382
383 if ((ptype->dev == dev || !ptype->dev) &&
384 ((struct sock *)ptype->data != skb->sk))
385 {
386 struct sk_buff *skb2;
387 if ((skb2 = skb_clone(skb, GFP_ATOMIC)) == NULL)
388 break;
389 skb2->h.raw = skb2->data + dev->hard_header_len;
390 skb2->mac.raw = skb2->data;
391 ptype->func(skb2, skb->dev, ptype);
392 }
393 }
394 }
395 start_bh_atomic();
396 if (dev->hard_start_xmit(skb, dev) == 0) {
397
398
399
400 end_bh_atomic();
401 return;
402 }
403 end_bh_atomic();
404
405
406
407
408
409 cli();
410 skb_device_unlock(skb);
411 skb_queue_head(dev->buffs + pri,skb);
412 restore_flags(flags);
413 }
414
415
416
417
418
419
420
421 void netif_rx(struct sk_buff *skb)
422 {
423 static int dropping = 0;
424
425
426
427
428
429
430 skb->sk = NULL;
431 skb->free = 1;
432 if(skb->stamp.tv_sec==0)
433 skb->stamp = xtime;
434
435
436
437
438
439 if (!backlog_size)
440 dropping = 0;
441 else if (backlog_size > 300)
442 dropping = 1;
443
444 if (dropping)
445 {
446 kfree_skb(skb, FREE_READ);
447 return;
448 }
449
450
451
452
453 #if CONFIG_SKB_CHECK
454 IS_SKB(skb);
455 #endif
456 skb_queue_tail(&backlog,skb);
457 backlog_size++;
458
459
460
461
462
463
464 #ifdef CONFIG_NET_RUNONIRQ
465 net_bh();
466 #else
467 mark_bh(NET_BH);
468 #endif
469 return;
470 }
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487 int dev_rint(unsigned char *buff, long len, int flags, struct device *dev)
488 {
489 static int dropping = 0;
490 struct sk_buff *skb = NULL;
491 unsigned char *to;
492 int amount, left;
493 int len2;
494
495 if (dev == NULL || buff == NULL || len <= 0)
496 return(1);
497
498 if (flags & IN_SKBUFF)
499 {
500 skb = (struct sk_buff *) buff;
501 }
502 else
503 {
504 if (dropping)
505 {
506 if (skb_peek(&backlog) != NULL)
507 return(1);
508 printk("INET: dev_rint: no longer dropping packets.\n");
509 dropping = 0;
510 }
511
512 skb = alloc_skb(len, GFP_ATOMIC);
513 if (skb == NULL)
514 {
515 printk("dev_rint: packet dropped on %s (no memory) !\n",
516 dev->name);
517 dropping = 1;
518 return(1);
519 }
520
521
522
523
524
525
526 to = skb_put(skb,len);
527 left = len;
528
529 len2 = len;
530 while (len2 > 0)
531 {
532 amount = min(len2, (unsigned long) dev->rmem_end -
533 (unsigned long) buff);
534 memcpy(to, buff, amount);
535 len2 -= amount;
536 left -= amount;
537 buff += amount;
538 to += amount;
539 if ((unsigned long) buff == dev->rmem_end)
540 buff = (unsigned char *) dev->rmem_start;
541 }
542 }
543
544
545
546
547
548 skb->dev = dev;
549 skb->free = 1;
550
551 netif_rx(skb);
552
553
554
555 return(0);
556 }
557
558
559
560
561
562
563 void dev_transmit(void)
564 {
565 struct device *dev;
566
567 for (dev = dev_base; dev != NULL; dev = dev->next)
568 {
569 if (dev->flags != 0 && !dev->tbusy) {
570
571
572
573 dev_tint(dev);
574 }
575 }
576 }
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591 volatile char in_bh = 0;
592
593 int in_net_bh()
594 {
595 return(in_bh==0?0:1);
596 }
597
598
599
600
601
602
603
604
605
606 void net_bh(void *tmp)
607 {
608 struct sk_buff *skb;
609 struct packet_type *ptype;
610 struct packet_type *pt_prev;
611 unsigned short type;
612
613
614
615
616
617 if (set_bit(1, (void*)&in_bh))
618 return;
619
620
621
622
623
624
625
626
627 dev_transmit();
628
629
630
631
632
633
634
635 cli();
636
637
638
639
640
641 while((skb=skb_dequeue(&backlog))!=NULL)
642 {
643
644
645
646 backlog_size--;
647
648 sti();
649
650
651
652
653
654
655
656
657 skb->h.raw = skb->data;
658
659
660
661
662
663 type = skb->protocol;
664
665
666
667
668
669
670 pt_prev = NULL;
671 for (ptype = ptype_all; ptype!=NULL; ptype=ptype->next)
672 {
673 if(pt_prev)
674 {
675 struct sk_buff *skb2=skb_clone(skb, GFP_ATOMIC);
676 if(skb2)
677 pt_prev->func(skb2,skb->dev, pt_prev);
678 }
679 pt_prev=ptype;
680 }
681
682 for (ptype = ptype_base[ntohs(type)&15]; ptype != NULL; ptype = ptype->next)
683 {
684 if (ptype->type == type && (!ptype->dev || ptype->dev==skb->dev))
685 {
686
687
688
689
690 if(pt_prev)
691 {
692 struct sk_buff *skb2;
693
694 skb2=skb_clone(skb, GFP_ATOMIC);
695
696
697
698
699
700
701 if(skb2)
702 pt_prev->func(skb2, skb->dev, pt_prev);
703 }
704
705 pt_prev=ptype;
706 }
707 }
708
709
710
711
712
713 if(pt_prev)
714 pt_prev->func(skb, skb->dev, pt_prev);
715
716
717
718
719 else
720 kfree_skb(skb, FREE_WRITE);
721
722
723
724
725
726
727 #ifdef CONFIG_XMIT_EVERY
728 dev_transmit();
729 #endif
730 cli();
731 }
732
733
734
735
736
737 in_bh = 0;
738 sti();
739
740
741
742
743
744 dev_transmit();
745 }
746
747
748
749
750
751
752
753 void dev_tint(struct device *dev)
754 {
755 int i;
756 struct sk_buff *skb;
757 unsigned long flags;
758
759 save_flags(flags);
760
761
762
763
764 for(i = 0;i < DEV_NUMBUFFS; i++)
765 {
766
767
768
769
770
771 cli();
772 while((skb=skb_dequeue(&dev->buffs[i]))!=NULL)
773 {
774
775
776
777 skb_device_lock(skb);
778 restore_flags(flags);
779
780
781
782
783 dev_queue_xmit(skb,dev,-i - 1);
784
785
786
787 if (dev->tbusy)
788 return;
789 cli();
790 }
791 }
792 restore_flags(flags);
793 }
794
795
796
797
798
799
800
801
802 static int dev_ifconf(char *arg)
803 {
804 struct ifconf ifc;
805 struct ifreq ifr;
806 struct device *dev;
807 char *pos;
808 int len;
809 int err;
810
811
812
813
814
815 err=verify_area(VERIFY_WRITE, arg, sizeof(struct ifconf));
816 if(err)
817 return err;
818 memcpy_fromfs(&ifc, arg, sizeof(struct ifconf));
819 len = ifc.ifc_len;
820 pos = ifc.ifc_buf;
821
822
823
824
825
826
827 err=verify_area(VERIFY_WRITE,pos,len);
828 if(err)
829 return err;
830
831
832
833
834
835 for (dev = dev_base; dev != NULL; dev = dev->next)
836 {
837 if(!(dev->flags & IFF_UP))
838 continue;
839 memset(&ifr, 0, sizeof(struct ifreq));
840 strcpy(ifr.ifr_name, dev->name);
841 (*(struct sockaddr_in *) &ifr.ifr_addr).sin_family = dev->family;
842 (*(struct sockaddr_in *) &ifr.ifr_addr).sin_addr.s_addr = dev->pa_addr;
843
844
845
846
847
848 if (len < sizeof(struct ifreq))
849 break;
850
851
852
853
854
855 memcpy_tofs(pos, &ifr, sizeof(struct ifreq));
856 pos += sizeof(struct ifreq);
857 len -= sizeof(struct ifreq);
858 }
859
860
861
862
863
864 ifc.ifc_len = (pos - ifc.ifc_buf);
865 ifc.ifc_req = (struct ifreq *) ifc.ifc_buf;
866 memcpy_tofs(arg, &ifc, sizeof(struct ifconf));
867
868
869
870
871
872 return(pos - arg);
873 }
874
875
876
877
878
879
880
881 static int sprintf_stats(char *buffer, struct device *dev)
882 {
883 struct enet_statistics *stats = (dev->get_stats ? dev->get_stats(dev): NULL);
884 int size;
885
886 if (stats)
887 size = sprintf(buffer, "%6s:%7d %4d %4d %4d %4d %8d %4d %4d %4d %5d %4d\n",
888 dev->name,
889 stats->rx_packets, stats->rx_errors,
890 stats->rx_dropped + stats->rx_missed_errors,
891 stats->rx_fifo_errors,
892 stats->rx_length_errors + stats->rx_over_errors
893 + stats->rx_crc_errors + stats->rx_frame_errors,
894 stats->tx_packets, stats->tx_errors, stats->tx_dropped,
895 stats->tx_fifo_errors, stats->collisions,
896 stats->tx_carrier_errors + stats->tx_aborted_errors
897 + stats->tx_window_errors + stats->tx_heartbeat_errors);
898 else
899 size = sprintf(buffer, "%6s: No statistics available.\n", dev->name);
900
901 return size;
902 }
903
904
905
906
907
908
909 int dev_get_info(char *buffer, char **start, off_t offset, int length, int dummy)
910 {
911 int len=0;
912 off_t begin=0;
913 off_t pos=0;
914 int size;
915
916 struct device *dev;
917
918
919 size = sprintf(buffer, "Inter-| Receive | Transmit\n"
920 " face |packets errs drop fifo frame|packets errs drop fifo colls carrier\n");
921
922 pos+=size;
923 len+=size;
924
925
926 for (dev = dev_base; dev != NULL; dev = dev->next)
927 {
928 size = sprintf_stats(buffer+len, dev);
929 len+=size;
930 pos=begin+len;
931
932 if(pos<offset)
933 {
934 len=0;
935 begin=pos;
936 }
937 if(pos>offset+length)
938 break;
939 }
940
941 *start=buffer+(offset-begin);
942 len-=(offset-begin);
943 if(len>length)
944 len=length;
945 return len;
946 }
947
948
949
950
951
952
953 static inline int bad_mask(unsigned long mask, unsigned long addr)
954 {
955 if (addr & (mask = ~mask))
956 return 1;
957 mask = ntohl(mask);
958 if (mask & (mask+1))
959 return 1;
960 return 0;
961 }
962
963
964
965
966
967
968
969
970 static int dev_ifsioc(void *arg, unsigned int getset)
971 {
972 struct ifreq ifr;
973 struct device *dev;
974 int ret;
975
976
977
978
979
980 int err=verify_area(VERIFY_WRITE, arg, sizeof(struct ifreq));
981 if(err)
982 return err;
983
984 memcpy_fromfs(&ifr, arg, sizeof(struct ifreq));
985
986
987
988
989
990 if ((dev = dev_get(ifr.ifr_name)) == NULL)
991 return(-ENODEV);
992
993 switch(getset)
994 {
995 case SIOCGIFFLAGS:
996 ifr.ifr_flags = dev->flags;
997 goto rarok;
998
999 case SIOCSIFFLAGS:
1000 {
1001 int old_flags = dev->flags;
1002
1003
1004
1005
1006
1007
1008 dev_lock_wait();
1009
1010 dev->flags = ifr.ifr_flags & (
1011 IFF_UP | IFF_BROADCAST | IFF_DEBUG | IFF_LOOPBACK |
1012 IFF_POINTOPOINT | IFF_NOTRAILERS | IFF_RUNNING |
1013 IFF_NOARP | IFF_PROMISC | IFF_ALLMULTI | IFF_SLAVE | IFF_MASTER
1014 | IFF_MULTICAST);
1015
1016
1017
1018
1019 dev_mc_upload(dev);
1020
1021
1022
1023
1024
1025 if ((old_flags & IFF_UP) && ((dev->flags & IFF_UP) == 0))
1026 {
1027 ret = dev_close(dev);
1028 }
1029 else
1030 {
1031
1032
1033
1034
1035 ret = (! (old_flags & IFF_UP) && (dev->flags & IFF_UP))
1036 ? dev_open(dev) : 0;
1037
1038
1039
1040 if(ret<0)
1041 dev->flags&=~IFF_UP;
1042 }
1043 }
1044 break;
1045
1046 case SIOCGIFADDR:
1047 (*(struct sockaddr_in *)
1048 &ifr.ifr_addr).sin_addr.s_addr = dev->pa_addr;
1049 (*(struct sockaddr_in *)
1050 &ifr.ifr_addr).sin_family = dev->family;
1051 (*(struct sockaddr_in *)
1052 &ifr.ifr_addr).sin_port = 0;
1053 goto rarok;
1054
1055 case SIOCSIFADDR:
1056 dev->pa_addr = (*(struct sockaddr_in *)
1057 &ifr.ifr_addr).sin_addr.s_addr;
1058 dev->family = ifr.ifr_addr.sa_family;
1059
1060 #ifdef CONFIG_INET
1061
1062
1063 dev->pa_mask = ip_get_mask(dev->pa_addr);
1064 #endif
1065 dev->pa_brdaddr = dev->pa_addr | ~dev->pa_mask;
1066 ret = 0;
1067 break;
1068
1069 case SIOCGIFBRDADDR:
1070 (*(struct sockaddr_in *)
1071 &ifr.ifr_broadaddr).sin_addr.s_addr = dev->pa_brdaddr;
1072 (*(struct sockaddr_in *)
1073 &ifr.ifr_broadaddr).sin_family = dev->family;
1074 (*(struct sockaddr_in *)
1075 &ifr.ifr_broadaddr).sin_port = 0;
1076 goto rarok;
1077
1078 case SIOCSIFBRDADDR:
1079 dev->pa_brdaddr = (*(struct sockaddr_in *)
1080 &ifr.ifr_broadaddr).sin_addr.s_addr;
1081 ret = 0;
1082 break;
1083
1084 case SIOCGIFDSTADDR:
1085 (*(struct sockaddr_in *)
1086 &ifr.ifr_dstaddr).sin_addr.s_addr = dev->pa_dstaddr;
1087 (*(struct sockaddr_in *)
1088 &ifr.ifr_dstaddr).sin_family = dev->family;
1089 (*(struct sockaddr_in *)
1090 &ifr.ifr_dstaddr).sin_port = 0;
1091 goto rarok;
1092
1093 case SIOCSIFDSTADDR:
1094 dev->pa_dstaddr = (*(struct sockaddr_in *)
1095 &ifr.ifr_dstaddr).sin_addr.s_addr;
1096 ret = 0;
1097 break;
1098
1099 case SIOCGIFNETMASK:
1100 (*(struct sockaddr_in *)
1101 &ifr.ifr_netmask).sin_addr.s_addr = dev->pa_mask;
1102 (*(struct sockaddr_in *)
1103 &ifr.ifr_netmask).sin_family = dev->family;
1104 (*(struct sockaddr_in *)
1105 &ifr.ifr_netmask).sin_port = 0;
1106 goto rarok;
1107
1108 case SIOCSIFNETMASK:
1109 {
1110 unsigned long mask = (*(struct sockaddr_in *)
1111 &ifr.ifr_netmask).sin_addr.s_addr;
1112 ret = -EINVAL;
1113
1114
1115
1116 if (bad_mask(mask,0))
1117 break;
1118 dev->pa_mask = mask;
1119 ret = 0;
1120 }
1121 break;
1122
1123 case SIOCGIFMETRIC:
1124
1125 ifr.ifr_metric = dev->metric;
1126 goto rarok;
1127
1128 case SIOCSIFMETRIC:
1129 dev->metric = ifr.ifr_metric;
1130 ret=0;
1131 break;
1132
1133 case SIOCGIFMTU:
1134 ifr.ifr_mtu = dev->mtu;
1135 goto rarok;
1136
1137 case SIOCSIFMTU:
1138
1139
1140
1141
1142
1143 if(ifr.ifr_mtu<68)
1144 return -EINVAL;
1145 dev->mtu = ifr.ifr_mtu;
1146 ret = 0;
1147 break;
1148
1149 case SIOCGIFMEM:
1150
1151 ret = -EINVAL;
1152 break;
1153
1154 case SIOCSIFMEM:
1155 ret = -EINVAL;
1156 break;
1157
1158 case OLD_SIOCGIFHWADDR:
1159 memcpy(ifr.old_ifr_hwaddr,dev->dev_addr, MAX_ADDR_LEN);
1160 goto rarok;
1161
1162 case SIOCGIFHWADDR:
1163 memcpy(ifr.ifr_hwaddr.sa_data,dev->dev_addr, MAX_ADDR_LEN);
1164 ifr.ifr_hwaddr.sa_family=dev->type;
1165 goto rarok;
1166
1167 case SIOCSIFHWADDR:
1168 if(dev->set_mac_address==NULL)
1169 return -EOPNOTSUPP;
1170 if(ifr.ifr_hwaddr.sa_family!=dev->type)
1171 return -EINVAL;
1172 ret=dev->set_mac_address(dev,ifr.ifr_hwaddr.sa_data);
1173 break;
1174
1175 case SIOCGIFMAP:
1176 ifr.ifr_map.mem_start=dev->mem_start;
1177 ifr.ifr_map.mem_end=dev->mem_end;
1178 ifr.ifr_map.base_addr=dev->base_addr;
1179 ifr.ifr_map.irq=dev->irq;
1180 ifr.ifr_map.dma=dev->dma;
1181 ifr.ifr_map.port=dev->if_port;
1182 goto rarok;
1183
1184 case SIOCSIFMAP:
1185 if(dev->set_config==NULL)
1186 return -EOPNOTSUPP;
1187 return dev->set_config(dev,&ifr.ifr_map);
1188
1189 case SIOCADDMULTI:
1190 if(dev->set_multicast_list==NULL)
1191 return -EINVAL;
1192 if(ifr.ifr_hwaddr.sa_family!=AF_UNSPEC)
1193 return -EINVAL;
1194 dev_mc_add(dev,ifr.ifr_hwaddr.sa_data, dev->addr_len, 1);
1195 return 0;
1196
1197 case SIOCDELMULTI:
1198 if(dev->set_multicast_list==NULL)
1199 return -EINVAL;
1200 if(ifr.ifr_hwaddr.sa_family!=AF_UNSPEC)
1201 return -EINVAL;
1202 dev_mc_delete(dev,ifr.ifr_hwaddr.sa_data,dev->addr_len, 1);
1203 return 0;
1204
1205
1206
1207
1208 default:
1209 if((getset >= SIOCDEVPRIVATE) &&
1210 (getset <= (SIOCDEVPRIVATE + 15))) {
1211 if(dev->do_ioctl==NULL)
1212 return -EOPNOTSUPP;
1213 ret=dev->do_ioctl(dev, &ifr, getset);
1214 memcpy_tofs(arg,&ifr,sizeof(struct ifreq));
1215 break;
1216 }
1217
1218 ret = -EINVAL;
1219 }
1220 return(ret);
1221
1222
1223
1224 rarok:
1225 memcpy_tofs(arg, &ifr, sizeof(struct ifreq));
1226 return 0;
1227 }
1228
1229
1230
1231
1232
1233
1234
1235 int dev_ioctl(unsigned int cmd, void *arg)
1236 {
1237 switch(cmd)
1238 {
1239 case SIOCGIFCONF:
1240 (void) dev_ifconf((char *) arg);
1241 return 0;
1242
1243
1244
1245
1246
1247 case SIOCGIFFLAGS:
1248 case SIOCGIFADDR:
1249 case SIOCGIFDSTADDR:
1250 case SIOCGIFBRDADDR:
1251 case SIOCGIFNETMASK:
1252 case SIOCGIFMETRIC:
1253 case SIOCGIFMTU:
1254 case SIOCGIFMEM:
1255 case SIOCGIFHWADDR:
1256 case SIOCSIFHWADDR:
1257 case OLD_SIOCGIFHWADDR:
1258 case SIOCGIFSLAVE:
1259 case SIOCGIFMAP:
1260 return dev_ifsioc(arg, cmd);
1261
1262
1263
1264
1265
1266 case SIOCSIFFLAGS:
1267 case SIOCSIFADDR:
1268 case SIOCSIFDSTADDR:
1269 case SIOCSIFBRDADDR:
1270 case SIOCSIFNETMASK:
1271 case SIOCSIFMETRIC:
1272 case SIOCSIFMTU:
1273 case SIOCSIFMEM:
1274 case SIOCSIFMAP:
1275 case SIOCSIFSLAVE:
1276 case SIOCADDMULTI:
1277 case SIOCDELMULTI:
1278 if (!suser())
1279 return -EPERM;
1280 return dev_ifsioc(arg, cmd);
1281
1282 case SIOCSIFLINK:
1283 return -EINVAL;
1284
1285
1286
1287
1288
1289 default:
1290 if((cmd >= SIOCDEVPRIVATE) &&
1291 (cmd <= (SIOCDEVPRIVATE + 15))) {
1292 return dev_ifsioc(arg, cmd);
1293 }
1294 return -EINVAL;
1295 }
1296 }
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306 void dev_init(void)
1307 {
1308 struct device *dev, **dp;
1309
1310
1311
1312
1313
1314
1315
1316
1317 dp = &dev_base;
1318 while ((dev = *dp) != NULL)
1319 {
1320 int i;
1321 for (i = 0; i < DEV_NUMBUFFS; i++) {
1322 skb_queue_head_init(dev->buffs + i);
1323 }
1324
1325 if (dev->init && dev->init(dev))
1326 {
1327
1328
1329
1330 *dp = dev->next;
1331 }
1332 else
1333 {
1334 dp = &dev->next;
1335 }
1336 }
1337 proc_net_register(&(struct proc_dir_entry) {
1338 PROC_NET_DEV, 3, "dev",
1339 S_IFREG | S_IRUGO, 1, 0, 0,
1340 0, &proc_net_inode_operations,
1341 dev_get_info
1342 });
1343 }
1344