This source file includes following definitions.
- min
- dev_add_pack
- dev_remove_pack
- dev_get
- dev_open
- dev_close
- register_netdevice_notifier
- unregister_netdevice_notifier
- dev_queue_xmit
- netif_rx
- dev_rint
- dev_transmit
- in_net_bh
- net_bh
- dev_tint
- dev_ifconf
- sprintf_stats
- dev_get_info
- bad_mask
- dev_ifsioc
- dev_ioctl
- dev_init
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50 #include <asm/segment.h>
51 #include <asm/system.h>
52 #include <asm/bitops.h>
53 #include <linux/config.h>
54 #include <linux/types.h>
55 #include <linux/kernel.h>
56 #include <linux/sched.h>
57 #include <linux/string.h>
58 #include <linux/mm.h>
59 #include <linux/socket.h>
60 #include <linux/sockios.h>
61 #include <linux/in.h>
62 #include <linux/errno.h>
63 #include <linux/interrupt.h>
64 #include <linux/if_ether.h>
65 #include <linux/inet.h>
66 #include <linux/netdevice.h>
67 #include <linux/etherdevice.h>
68 #include <linux/notifier.h>
69 #include <net/ip.h>
70 #include <net/route.h>
71 #include <linux/skbuff.h>
72 #include <net/sock.h>
73 #include <net/arp.h>
74 #include <linux/proc_fs.h>
75 #include <linux/stat.h>
76
77
78
79
80
81
82 struct packet_type *ptype_base[16];
83 struct packet_type *ptype_all = NULL;
84
85
86
87
88
89 int dev_lockct=0;
90
91
92
93
94
95 struct notifier_block *netdev_chain=NULL;
96
97
98
99
100
101
102 static struct sk_buff_head backlog =
103 {
104 (struct sk_buff *)&backlog, (struct sk_buff *)&backlog
105 #if CONFIG_SKB_CHECK
106 ,SK_HEAD_SKB
107 #endif
108 };
109
110
111
112
113
114 static int backlog_size = 0;
115
116
117
118
119
120 static __inline__ unsigned long min(unsigned long a, unsigned long b)
121 {
122 return (a < b)? a : b;
123 }
124
125
126
127
128
129
130
131
132
133
134
135
136 static int dev_nit=0;
137
138
139
140
141
142
143
144 void dev_add_pack(struct packet_type *pt)
145 {
146 int hash;
147 if(pt->type==htons(ETH_P_ALL))
148 {
149 dev_nit++;
150 pt->next=ptype_all;
151 ptype_all=pt;
152 }
153 else
154 {
155 hash=ntohs(pt->type)&15;
156 pt->next = ptype_base[hash];
157 ptype_base[hash] = pt;
158 }
159 }
160
161
162
163
164
165
166 void dev_remove_pack(struct packet_type *pt)
167 {
168 struct packet_type **pt1;
169 if(pt->type==htons(ETH_P_ALL))
170 {
171 dev_nit--;
172 pt1=&ptype_all;
173 }
174 else
175 pt1=&ptype_base[ntohs(pt->type)&15];
176 for(; (*pt1)!=NULL; pt1=&((*pt1)->next))
177 {
178 if(pt==(*pt1))
179 {
180 *pt1=pt->next;
181 return;
182 }
183 }
184 printk("dev_remove_pack: %p not found.\n", pt);
185 }
186
187
188
189
190
191
192
193
194
195
196
197 struct device *dev_get(const char *name)
198 {
199 struct device *dev;
200
201 for (dev = dev_base; dev != NULL; dev = dev->next)
202 {
203 if (strcmp(dev->name, name) == 0)
204 return(dev);
205 }
206 return(NULL);
207 }
208
209
210
211
212
213
214 int dev_open(struct device *dev)
215 {
216 int ret = 0;
217
218
219
220
221 if (dev->open)
222 ret = dev->open(dev);
223
224
225
226
227
228 if (ret == 0)
229 {
230 dev->flags |= (IFF_UP | IFF_RUNNING);
231
232
233
234 #ifdef CONFIG_IP_MULTICAST
235
236
237
238 ip_mc_allhost(dev);
239 #endif
240 dev_mc_upload(dev);
241 notifier_call_chain(&netdev_chain, NETDEV_UP, dev);
242 }
243 return(ret);
244 }
245
246
247
248
249
250
251 int dev_close(struct device *dev)
252 {
253 int ct=0;
254
255
256
257
258
259 if ((dev->flags & IFF_UP) && dev->stop)
260 dev->stop(dev);
261
262 dev->flags = 0;
263
264
265
266
267 notifier_call_chain(&netdev_chain, NETDEV_DOWN, dev);
268
269
270
271 dev_mc_discard(dev);
272
273
274
275 dev->pa_addr = 0;
276 dev->pa_dstaddr = 0;
277 dev->pa_brdaddr = 0;
278 dev->pa_mask = 0;
279
280
281
282 while(ct<DEV_NUMBUFFS)
283 {
284 struct sk_buff *skb;
285 while((skb=skb_dequeue(&dev->buffs[ct]))!=NULL)
286 if(skb->free)
287 kfree_skb(skb,FREE_WRITE);
288 ct++;
289 }
290 return(0);
291 }
292
293
294
295
296
297
298
299 int register_netdevice_notifier(struct notifier_block *nb)
300 {
301 return notifier_chain_register(&netdev_chain, nb);
302 }
303
304 int unregister_netdevice_notifier(struct notifier_block *nb)
305 {
306 return notifier_chain_unregister(&netdev_chain,nb);
307 }
308
309
310
311
312
313
314
315
316
317
318
319 void dev_queue_xmit(struct sk_buff *skb, struct device *dev, int pri)
320 {
321 unsigned long flags;
322 struct packet_type *ptype;
323 int where = 0;
324
325
326
327 if(pri>=0 && !skb_device_locked(skb))
328 skb_device_lock(skb);
329 #if CONFIG_SKB_CHECK
330 IS_SKB(skb);
331 #endif
332 skb->dev = dev;
333
334
335
336
337
338
339
340 if (pri < 0)
341 {
342 pri = -pri-1;
343 where = 1;
344 }
345
346 #ifdef CONFIG_NET_DEBUG
347 if (pri >= DEV_NUMBUFFS)
348 {
349 printk("bad priority in dev_queue_xmit.\n");
350 pri = 1;
351 }
352 #endif
353
354
355
356
357
358
359 if (!skb->arp && dev->rebuild_header(skb->data, dev, skb->raddr, skb)) {
360 return;
361 }
362
363 save_flags(flags);
364 cli();
365 if (!where)
366
367 {
368 skb_queue_tail(dev->buffs + pri,skb);
369 skb_device_unlock(skb);
370 skb = skb_dequeue(dev->buffs + pri);
371 skb_device_lock(skb);
372 }
373 restore_flags(flags);
374
375
376 if(!where && dev_nit)
377 {
378 skb->stamp=xtime;
379 for (ptype = ptype_all; ptype!=NULL; ptype = ptype->next)
380 {
381
382
383
384 if ((ptype->dev == dev || !ptype->dev) &&
385 ((struct sock *)ptype->data != skb->sk))
386 {
387 struct sk_buff *skb2;
388 if ((skb2 = skb_clone(skb, GFP_ATOMIC)) == NULL)
389 break;
390 skb2->h.raw = skb2->data + dev->hard_header_len;
391 skb2->mac.raw = skb2->data;
392 ptype->func(skb2, skb->dev, ptype);
393 }
394 }
395 }
396 start_bh_atomic();
397 if (dev->hard_start_xmit(skb, dev) == 0) {
398
399
400
401 end_bh_atomic();
402 return;
403 }
404 end_bh_atomic();
405
406
407
408
409
410 cli();
411 skb_device_unlock(skb);
412 skb_queue_head(dev->buffs + pri,skb);
413 restore_flags(flags);
414 }
415
416
417
418
419
420
421
422 void netif_rx(struct sk_buff *skb)
423 {
424 static int dropping = 0;
425
426
427
428
429
430
431 skb->sk = NULL;
432 skb->free = 1;
433 if(skb->stamp.tv_sec==0)
434 skb->stamp = xtime;
435
436
437
438
439
440 if (!backlog_size)
441 dropping = 0;
442 else if (backlog_size > 300)
443 dropping = 1;
444
445 if (dropping)
446 {
447 kfree_skb(skb, FREE_READ);
448 return;
449 }
450
451
452
453
454 #if CONFIG_SKB_CHECK
455 IS_SKB(skb);
456 #endif
457 skb_queue_tail(&backlog,skb);
458 backlog_size++;
459
460
461
462
463
464
465 #ifdef CONFIG_NET_RUNONIRQ
466 net_bh();
467 #else
468 mark_bh(NET_BH);
469 #endif
470 return;
471 }
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488 int dev_rint(unsigned char *buff, long len, int flags, struct device *dev)
489 {
490 static int dropping = 0;
491 struct sk_buff *skb = NULL;
492 unsigned char *to;
493 int amount, left;
494 int len2;
495
496 if (dev == NULL || buff == NULL || len <= 0)
497 return(1);
498
499 if (flags & IN_SKBUFF)
500 {
501 skb = (struct sk_buff *) buff;
502 }
503 else
504 {
505 if (dropping)
506 {
507 if (skb_peek(&backlog) != NULL)
508 return(1);
509 printk("INET: dev_rint: no longer dropping packets.\n");
510 dropping = 0;
511 }
512
513 skb = alloc_skb(len, GFP_ATOMIC);
514 if (skb == NULL)
515 {
516 printk("dev_rint: packet dropped on %s (no memory) !\n",
517 dev->name);
518 dropping = 1;
519 return(1);
520 }
521
522
523
524
525
526
527 to = skb_put(skb,len);
528 left = len;
529
530 len2 = len;
531 while (len2 > 0)
532 {
533 amount = min(len2, (unsigned long) dev->rmem_end -
534 (unsigned long) buff);
535 memcpy(to, buff, amount);
536 len2 -= amount;
537 left -= amount;
538 buff += amount;
539 to += amount;
540 if ((unsigned long) buff == dev->rmem_end)
541 buff = (unsigned char *) dev->rmem_start;
542 }
543 }
544
545
546
547
548
549 skb->dev = dev;
550 skb->free = 1;
551
552 netif_rx(skb);
553
554
555
556 return(0);
557 }
558
559
560
561
562
563
564 void dev_transmit(void)
565 {
566 struct device *dev;
567
568 for (dev = dev_base; dev != NULL; dev = dev->next)
569 {
570 if (dev->flags != 0 && !dev->tbusy) {
571
572
573
574 dev_tint(dev);
575 }
576 }
577 }
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592 volatile char in_bh = 0;
593
594 int in_net_bh()
595 {
596 return(in_bh==0?0:1);
597 }
598
599
600
601
602
603
604
605
606
607 void net_bh(void *tmp)
608 {
609 struct sk_buff *skb;
610 struct packet_type *ptype;
611 struct packet_type *pt_prev;
612 unsigned short type;
613
614
615
616
617
618 if (set_bit(1, (void*)&in_bh))
619 return;
620
621
622
623
624
625
626
627
628 dev_transmit();
629
630
631
632
633
634
635
636 cli();
637
638
639
640
641
642 while((skb=skb_dequeue(&backlog))!=NULL)
643 {
644
645
646
647 backlog_size--;
648
649 sti();
650
651
652
653
654
655
656
657
658 skb->h.raw = skb->data;
659
660
661
662
663
664 type = skb->protocol;
665
666
667
668
669
670
671 pt_prev = NULL;
672 for (ptype = ptype_all; ptype!=NULL; ptype=ptype->next)
673 {
674 if(pt_prev)
675 {
676 struct sk_buff *skb2=skb_clone(skb, GFP_ATOMIC);
677 if(skb2)
678 pt_prev->func(skb2,skb->dev, pt_prev);
679 }
680 pt_prev=ptype;
681 }
682
683 for (ptype = ptype_base[ntohs(type)&15]; ptype != NULL; ptype = ptype->next)
684 {
685 if (ptype->type == type && (!ptype->dev || ptype->dev==skb->dev))
686 {
687
688
689
690
691 if(pt_prev)
692 {
693 struct sk_buff *skb2;
694
695 skb2=skb_clone(skb, GFP_ATOMIC);
696
697
698
699
700
701
702 if(skb2)
703 pt_prev->func(skb2, skb->dev, pt_prev);
704 }
705
706 pt_prev=ptype;
707 }
708 }
709
710
711
712
713
714 if(pt_prev)
715 pt_prev->func(skb, skb->dev, pt_prev);
716
717
718
719
720 else
721 kfree_skb(skb, FREE_WRITE);
722
723
724
725
726
727
728 #ifdef CONFIG_XMIT_EVERY
729 dev_transmit();
730 #endif
731 cli();
732 }
733
734
735
736
737
738 in_bh = 0;
739 sti();
740
741
742
743
744
745 dev_transmit();
746 }
747
748
749
750
751
752
753
754 void dev_tint(struct device *dev)
755 {
756 int i;
757 struct sk_buff *skb;
758 unsigned long flags;
759
760 save_flags(flags);
761
762
763
764
765 for(i = 0;i < DEV_NUMBUFFS; i++)
766 {
767
768
769
770
771
772 cli();
773 while((skb=skb_dequeue(&dev->buffs[i]))!=NULL)
774 {
775
776
777
778 skb_device_lock(skb);
779 restore_flags(flags);
780
781
782
783
784 dev_queue_xmit(skb,dev,-i - 1);
785
786
787
788 if (dev->tbusy)
789 return;
790 cli();
791 }
792 }
793 restore_flags(flags);
794 }
795
796
797
798
799
800
801
802
803 static int dev_ifconf(char *arg)
804 {
805 struct ifconf ifc;
806 struct ifreq ifr;
807 struct device *dev;
808 char *pos;
809 int len;
810 int err;
811
812
813
814
815
816 err=verify_area(VERIFY_WRITE, arg, sizeof(struct ifconf));
817 if(err)
818 return err;
819 memcpy_fromfs(&ifc, arg, sizeof(struct ifconf));
820 len = ifc.ifc_len;
821 pos = ifc.ifc_buf;
822
823
824
825
826
827
828 err=verify_area(VERIFY_WRITE,pos,len);
829 if(err)
830 return err;
831
832
833
834
835
836 for (dev = dev_base; dev != NULL; dev = dev->next)
837 {
838 if(!(dev->flags & IFF_UP))
839 continue;
840 memset(&ifr, 0, sizeof(struct ifreq));
841 strcpy(ifr.ifr_name, dev->name);
842 (*(struct sockaddr_in *) &ifr.ifr_addr).sin_family = dev->family;
843 (*(struct sockaddr_in *) &ifr.ifr_addr).sin_addr.s_addr = dev->pa_addr;
844
845
846
847
848
849 if (len < sizeof(struct ifreq))
850 break;
851
852
853
854
855
856 memcpy_tofs(pos, &ifr, sizeof(struct ifreq));
857 pos += sizeof(struct ifreq);
858 len -= sizeof(struct ifreq);
859 }
860
861
862
863
864
865 ifc.ifc_len = (pos - ifc.ifc_buf);
866 ifc.ifc_req = (struct ifreq *) ifc.ifc_buf;
867 memcpy_tofs(arg, &ifc, sizeof(struct ifconf));
868
869
870
871
872
873 return(pos - arg);
874 }
875
876
877
878
879
880
881
882 static int sprintf_stats(char *buffer, struct device *dev)
883 {
884 struct enet_statistics *stats = (dev->get_stats ? dev->get_stats(dev): NULL);
885 int size;
886
887 if (stats)
888 size = sprintf(buffer, "%6s:%7d %4d %4d %4d %4d %8d %4d %4d %4d %5d %4d\n",
889 dev->name,
890 stats->rx_packets, stats->rx_errors,
891 stats->rx_dropped + stats->rx_missed_errors,
892 stats->rx_fifo_errors,
893 stats->rx_length_errors + stats->rx_over_errors
894 + stats->rx_crc_errors + stats->rx_frame_errors,
895 stats->tx_packets, stats->tx_errors, stats->tx_dropped,
896 stats->tx_fifo_errors, stats->collisions,
897 stats->tx_carrier_errors + stats->tx_aborted_errors
898 + stats->tx_window_errors + stats->tx_heartbeat_errors);
899 else
900 size = sprintf(buffer, "%6s: No statistics available.\n", dev->name);
901
902 return size;
903 }
904
905
906
907
908
909
910 int dev_get_info(char *buffer, char **start, off_t offset, int length, int dummy)
911 {
912 int len=0;
913 off_t begin=0;
914 off_t pos=0;
915 int size;
916
917 struct device *dev;
918
919
920 size = sprintf(buffer, "Inter-| Receive | Transmit\n"
921 " face |packets errs drop fifo frame|packets errs drop fifo colls carrier\n");
922
923 pos+=size;
924 len+=size;
925
926
927 for (dev = dev_base; dev != NULL; dev = dev->next)
928 {
929 size = sprintf_stats(buffer+len, dev);
930 len+=size;
931 pos=begin+len;
932
933 if(pos<offset)
934 {
935 len=0;
936 begin=pos;
937 }
938 if(pos>offset+length)
939 break;
940 }
941
942 *start=buffer+(offset-begin);
943 len-=(offset-begin);
944 if(len>length)
945 len=length;
946 return len;
947 }
948
949
950
951
952
953
954 static inline int bad_mask(unsigned long mask, unsigned long addr)
955 {
956 if (addr & (mask = ~mask))
957 return 1;
958 mask = ntohl(mask);
959 if (mask & (mask+1))
960 return 1;
961 return 0;
962 }
963
964
965
966
967
968
969
970
971 static int dev_ifsioc(void *arg, unsigned int getset)
972 {
973 struct ifreq ifr;
974 struct device *dev;
975 int ret;
976
977
978
979
980
981 int err=verify_area(VERIFY_WRITE, arg, sizeof(struct ifreq));
982 if(err)
983 return err;
984
985 memcpy_fromfs(&ifr, arg, sizeof(struct ifreq));
986
987
988
989
990
991 if ((dev = dev_get(ifr.ifr_name)) == NULL)
992 return(-ENODEV);
993
994 switch(getset)
995 {
996 case SIOCGIFFLAGS:
997 ifr.ifr_flags = dev->flags;
998 goto rarok;
999
1000 case SIOCSIFFLAGS:
1001 {
1002 int old_flags = dev->flags;
1003
1004
1005
1006
1007
1008
1009 dev_lock_wait();
1010
1011 dev->flags = ifr.ifr_flags & (
1012 IFF_UP | IFF_BROADCAST | IFF_DEBUG | IFF_LOOPBACK |
1013 IFF_POINTOPOINT | IFF_NOTRAILERS | IFF_RUNNING |
1014 IFF_NOARP | IFF_PROMISC | IFF_ALLMULTI | IFF_SLAVE | IFF_MASTER
1015 | IFF_MULTICAST);
1016
1017
1018
1019
1020 dev_mc_upload(dev);
1021
1022
1023
1024
1025
1026 if ((old_flags & IFF_UP) && ((dev->flags & IFF_UP) == 0))
1027 {
1028
1029
1030
1031
1032 dev->flags|=IFF_UP;
1033 ret = dev_close(dev);
1034 }
1035 else
1036 {
1037
1038
1039
1040
1041 ret = (! (old_flags & IFF_UP) && (dev->flags & IFF_UP))
1042 ? dev_open(dev) : 0;
1043
1044
1045
1046 if(ret<0)
1047 dev->flags&=~IFF_UP;
1048 }
1049 }
1050 break;
1051
1052 case SIOCGIFADDR:
1053 (*(struct sockaddr_in *)
1054 &ifr.ifr_addr).sin_addr.s_addr = dev->pa_addr;
1055 (*(struct sockaddr_in *)
1056 &ifr.ifr_addr).sin_family = dev->family;
1057 (*(struct sockaddr_in *)
1058 &ifr.ifr_addr).sin_port = 0;
1059 goto rarok;
1060
1061 case SIOCSIFADDR:
1062 dev->pa_addr = (*(struct sockaddr_in *)
1063 &ifr.ifr_addr).sin_addr.s_addr;
1064 dev->family = ifr.ifr_addr.sa_family;
1065
1066 #ifdef CONFIG_INET
1067
1068
1069 dev->pa_mask = ip_get_mask(dev->pa_addr);
1070 #endif
1071 dev->pa_brdaddr = dev->pa_addr | ~dev->pa_mask;
1072 ret = 0;
1073 break;
1074
1075 case SIOCGIFBRDADDR:
1076 (*(struct sockaddr_in *)
1077 &ifr.ifr_broadaddr).sin_addr.s_addr = dev->pa_brdaddr;
1078 (*(struct sockaddr_in *)
1079 &ifr.ifr_broadaddr).sin_family = dev->family;
1080 (*(struct sockaddr_in *)
1081 &ifr.ifr_broadaddr).sin_port = 0;
1082 goto rarok;
1083
1084 case SIOCSIFBRDADDR:
1085 dev->pa_brdaddr = (*(struct sockaddr_in *)
1086 &ifr.ifr_broadaddr).sin_addr.s_addr;
1087 ret = 0;
1088 break;
1089
1090 case SIOCGIFDSTADDR:
1091 (*(struct sockaddr_in *)
1092 &ifr.ifr_dstaddr).sin_addr.s_addr = dev->pa_dstaddr;
1093 (*(struct sockaddr_in *)
1094 &ifr.ifr_dstaddr).sin_family = dev->family;
1095 (*(struct sockaddr_in *)
1096 &ifr.ifr_dstaddr).sin_port = 0;
1097 goto rarok;
1098
1099 case SIOCSIFDSTADDR:
1100 dev->pa_dstaddr = (*(struct sockaddr_in *)
1101 &ifr.ifr_dstaddr).sin_addr.s_addr;
1102 ret = 0;
1103 break;
1104
1105 case SIOCGIFNETMASK:
1106 (*(struct sockaddr_in *)
1107 &ifr.ifr_netmask).sin_addr.s_addr = dev->pa_mask;
1108 (*(struct sockaddr_in *)
1109 &ifr.ifr_netmask).sin_family = dev->family;
1110 (*(struct sockaddr_in *)
1111 &ifr.ifr_netmask).sin_port = 0;
1112 goto rarok;
1113
1114 case SIOCSIFNETMASK:
1115 {
1116 unsigned long mask = (*(struct sockaddr_in *)
1117 &ifr.ifr_netmask).sin_addr.s_addr;
1118 ret = -EINVAL;
1119
1120
1121
1122 if (bad_mask(mask,0))
1123 break;
1124 dev->pa_mask = mask;
1125 ret = 0;
1126 }
1127 break;
1128
1129 case SIOCGIFMETRIC:
1130
1131 ifr.ifr_metric = dev->metric;
1132 goto rarok;
1133
1134 case SIOCSIFMETRIC:
1135 dev->metric = ifr.ifr_metric;
1136 ret=0;
1137 break;
1138
1139 case SIOCGIFMTU:
1140 ifr.ifr_mtu = dev->mtu;
1141 goto rarok;
1142
1143 case SIOCSIFMTU:
1144
1145
1146
1147
1148
1149 if(ifr.ifr_mtu<68)
1150 return -EINVAL;
1151 dev->mtu = ifr.ifr_mtu;
1152 ret = 0;
1153 break;
1154
1155 case SIOCGIFMEM:
1156
1157 ret = -EINVAL;
1158 break;
1159
1160 case SIOCSIFMEM:
1161 ret = -EINVAL;
1162 break;
1163
1164 case OLD_SIOCGIFHWADDR:
1165 memcpy(ifr.old_ifr_hwaddr,dev->dev_addr, MAX_ADDR_LEN);
1166 goto rarok;
1167
1168 case SIOCGIFHWADDR:
1169 memcpy(ifr.ifr_hwaddr.sa_data,dev->dev_addr, MAX_ADDR_LEN);
1170 ifr.ifr_hwaddr.sa_family=dev->type;
1171 goto rarok;
1172
1173 case SIOCSIFHWADDR:
1174 if(dev->set_mac_address==NULL)
1175 return -EOPNOTSUPP;
1176 if(ifr.ifr_hwaddr.sa_family!=dev->type)
1177 return -EINVAL;
1178 ret=dev->set_mac_address(dev,ifr.ifr_hwaddr.sa_data);
1179 break;
1180
1181 case SIOCGIFMAP:
1182 ifr.ifr_map.mem_start=dev->mem_start;
1183 ifr.ifr_map.mem_end=dev->mem_end;
1184 ifr.ifr_map.base_addr=dev->base_addr;
1185 ifr.ifr_map.irq=dev->irq;
1186 ifr.ifr_map.dma=dev->dma;
1187 ifr.ifr_map.port=dev->if_port;
1188 goto rarok;
1189
1190 case SIOCSIFMAP:
1191 if(dev->set_config==NULL)
1192 return -EOPNOTSUPP;
1193 return dev->set_config(dev,&ifr.ifr_map);
1194
1195 case SIOCADDMULTI:
1196 if(dev->set_multicast_list==NULL)
1197 return -EINVAL;
1198 if(ifr.ifr_hwaddr.sa_family!=AF_UNSPEC)
1199 return -EINVAL;
1200 dev_mc_add(dev,ifr.ifr_hwaddr.sa_data, dev->addr_len, 1);
1201 return 0;
1202
1203 case SIOCDELMULTI:
1204 if(dev->set_multicast_list==NULL)
1205 return -EINVAL;
1206 if(ifr.ifr_hwaddr.sa_family!=AF_UNSPEC)
1207 return -EINVAL;
1208 dev_mc_delete(dev,ifr.ifr_hwaddr.sa_data,dev->addr_len, 1);
1209 return 0;
1210
1211
1212
1213
1214 default:
1215 if((getset >= SIOCDEVPRIVATE) &&
1216 (getset <= (SIOCDEVPRIVATE + 15))) {
1217 if(dev->do_ioctl==NULL)
1218 return -EOPNOTSUPP;
1219 ret=dev->do_ioctl(dev, &ifr, getset);
1220 memcpy_tofs(arg,&ifr,sizeof(struct ifreq));
1221 break;
1222 }
1223
1224 ret = -EINVAL;
1225 }
1226 return(ret);
1227
1228
1229
1230 rarok:
1231 memcpy_tofs(arg, &ifr, sizeof(struct ifreq));
1232 return 0;
1233 }
1234
1235
1236
1237
1238
1239
1240
1241 int dev_ioctl(unsigned int cmd, void *arg)
1242 {
1243 switch(cmd)
1244 {
1245 case SIOCGIFCONF:
1246 (void) dev_ifconf((char *) arg);
1247 return 0;
1248
1249
1250
1251
1252
1253 case SIOCGIFFLAGS:
1254 case SIOCGIFADDR:
1255 case SIOCGIFDSTADDR:
1256 case SIOCGIFBRDADDR:
1257 case SIOCGIFNETMASK:
1258 case SIOCGIFMETRIC:
1259 case SIOCGIFMTU:
1260 case SIOCGIFMEM:
1261 case SIOCGIFHWADDR:
1262 case SIOCSIFHWADDR:
1263 case OLD_SIOCGIFHWADDR:
1264 case SIOCGIFSLAVE:
1265 case SIOCGIFMAP:
1266 return dev_ifsioc(arg, cmd);
1267
1268
1269
1270
1271
1272 case SIOCSIFFLAGS:
1273 case SIOCSIFADDR:
1274 case SIOCSIFDSTADDR:
1275 case SIOCSIFBRDADDR:
1276 case SIOCSIFNETMASK:
1277 case SIOCSIFMETRIC:
1278 case SIOCSIFMTU:
1279 case SIOCSIFMEM:
1280 case SIOCSIFMAP:
1281 case SIOCSIFSLAVE:
1282 case SIOCADDMULTI:
1283 case SIOCDELMULTI:
1284 if (!suser())
1285 return -EPERM;
1286 return dev_ifsioc(arg, cmd);
1287
1288 case SIOCSIFLINK:
1289 return -EINVAL;
1290
1291
1292
1293
1294
1295 default:
1296 if((cmd >= SIOCDEVPRIVATE) &&
1297 (cmd <= (SIOCDEVPRIVATE + 15))) {
1298 return dev_ifsioc(arg, cmd);
1299 }
1300 return -EINVAL;
1301 }
1302 }
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312 void dev_init(void)
1313 {
1314 struct device *dev, **dp;
1315
1316
1317
1318
1319
1320
1321
1322
1323 dp = &dev_base;
1324 while ((dev = *dp) != NULL)
1325 {
1326 int i;
1327 for (i = 0; i < DEV_NUMBUFFS; i++) {
1328 skb_queue_head_init(dev->buffs + i);
1329 }
1330
1331 if (dev->init && dev->init(dev))
1332 {
1333
1334
1335
1336 *dp = dev->next;
1337 }
1338 else
1339 {
1340 dp = &dev->next;
1341 }
1342 }
1343 proc_net_register(&(struct proc_dir_entry) {
1344 PROC_NET_DEV, 3, "dev",
1345 S_IFREG | S_IRUGO, 1, 0, 0,
1346 0, &proc_net_inode_operations,
1347 dev_get_info
1348 });
1349 }
1350