This source file includes following definitions.
- min
- dev_add_pack
- dev_remove_pack
- dev_get
- dev_open
- dev_close
- register_netdevice_notifier
- unregister_netdevice_notifier
- dev_queue_xmit
- netif_rx
- dev_rint
- dev_transmit
- in_net_bh
- net_bh
- dev_tint
- dev_ifconf
- sprintf_stats
- dev_get_info
- bad_mask
- dev_ifsioc
- dev_ioctl
- dev_init
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50 #include <asm/segment.h>
51 #include <asm/system.h>
52 #include <asm/bitops.h>
53 #include <linux/config.h>
54 #include <linux/types.h>
55 #include <linux/kernel.h>
56 #include <linux/sched.h>
57 #include <linux/string.h>
58 #include <linux/mm.h>
59 #include <linux/socket.h>
60 #include <linux/sockios.h>
61 #include <linux/in.h>
62 #include <linux/errno.h>
63 #include <linux/interrupt.h>
64 #include <linux/if_ether.h>
65 #include <linux/inet.h>
66 #include <linux/netdevice.h>
67 #include <linux/etherdevice.h>
68 #include <linux/notifier.h>
69 #include <net/ip.h>
70 #include <net/route.h>
71 #include <linux/skbuff.h>
72 #include <net/sock.h>
73 #include <net/arp.h>
74 #include <linux/proc_fs.h>
75 #include <linux/stat.h>
76
77
78
79
80
81
82 struct packet_type *ptype_base[16];
83 struct packet_type *ptype_all = NULL;
84
85
86
87
88
89 int dev_lockct=0;
90
91
92
93
94
95 struct notifier_block *netdev_chain=NULL;
96
97
98
99
100
101
102 static struct sk_buff_head backlog =
103 {
104 (struct sk_buff *)&backlog, (struct sk_buff *)&backlog
105 #if CONFIG_SKB_CHECK
106 ,SK_HEAD_SKB
107 #endif
108 };
109
110
111
112
113
114 static int backlog_size = 0;
115
116
117
118
119
120 static __inline__ unsigned long min(unsigned long a, unsigned long b)
121 {
122 return (a < b)? a : b;
123 }
124
125
126
127
128
129
130
131
132
133
134
135
136 static int dev_nit=0;
137
138
139
140
141
142
143
144 void dev_add_pack(struct packet_type *pt)
145 {
146 int hash;
147 if(pt->type==htons(ETH_P_ALL))
148 {
149 dev_nit++;
150 pt->next=ptype_all;
151 ptype_all=pt;
152 }
153 else
154 {
155 hash=ntohs(pt->type)&15;
156 pt->next = ptype_base[hash];
157 ptype_base[hash] = pt;
158 }
159 }
160
161
162
163
164
165
166 void dev_remove_pack(struct packet_type *pt)
167 {
168 struct packet_type **pt1;
169 if(pt->type==htons(ETH_P_ALL))
170 {
171 dev_nit--;
172 pt1=&ptype_all;
173 }
174 else
175 pt1=&ptype_base[ntohs(pt->type)&15];
176 for(; (*pt1)!=NULL; pt1=&((*pt1)->next))
177 {
178 if(pt==(*pt1))
179 {
180 *pt1=pt->next;
181 return;
182 }
183 }
184 printk("dev_remove_pack: %p not found.\n", pt);
185 }
186
187
188
189
190
191
192
193
194
195
196
197 struct device *dev_get(const char *name)
198 {
199 struct device *dev;
200
201 for (dev = dev_base; dev != NULL; dev = dev->next)
202 {
203 if (strcmp(dev->name, name) == 0)
204 return(dev);
205 }
206 return(NULL);
207 }
208
209
210
211
212
213
214 int dev_open(struct device *dev)
215 {
216 int ret = 0;
217
218
219
220
221 if (dev->open)
222 ret = dev->open(dev);
223
224
225
226
227
228 if (ret == 0)
229 {
230 dev->flags |= (IFF_UP | IFF_RUNNING);
231
232
233
234 #ifdef CONFIG_IP_MULTICAST
235
236
237
238 ip_mc_allhost(dev);
239 #endif
240 dev_mc_upload(dev);
241 notifier_call_chain(&netdev_chain, NETDEV_UP, dev);
242 }
243 return(ret);
244 }
245
246
247
248
249
250
251 int dev_close(struct device *dev)
252 {
253 int ct=0;
254
255
256
257
258
259
260 if ((dev->flags & IFF_UP) && dev->stop)
261 dev->stop(dev);
262
263
264
265
266
267 dev->flags&=~(IFF_UP|IFF_RUNNING);
268
269
270
271
272 notifier_call_chain(&netdev_chain, NETDEV_DOWN, dev);
273
274
275
276 dev_mc_discard(dev);
277
278
279
280 dev->pa_addr = 0;
281 dev->pa_dstaddr = 0;
282 dev->pa_brdaddr = 0;
283 dev->pa_mask = 0;
284
285
286
287 while(ct<DEV_NUMBUFFS)
288 {
289 struct sk_buff *skb;
290 while((skb=skb_dequeue(&dev->buffs[ct]))!=NULL)
291 if(skb->free)
292 kfree_skb(skb,FREE_WRITE);
293 ct++;
294 }
295 return(0);
296 }
297
298
299
300
301
302
303
304 int register_netdevice_notifier(struct notifier_block *nb)
305 {
306 return notifier_chain_register(&netdev_chain, nb);
307 }
308
309 int unregister_netdevice_notifier(struct notifier_block *nb)
310 {
311 return notifier_chain_unregister(&netdev_chain,nb);
312 }
313
314
315
316
317
318
319
320
321
322
323
324 void dev_queue_xmit(struct sk_buff *skb, struct device *dev, int pri)
325 {
326 unsigned long flags;
327 struct packet_type *ptype;
328 int where = 0;
329
330
331
332 if(pri>=0 && !skb_device_locked(skb))
333 skb_device_lock(skb);
334 #if CONFIG_SKB_CHECK
335 IS_SKB(skb);
336 #endif
337 skb->dev = dev;
338
339
340
341
342
343
344
345 if (pri < 0)
346 {
347 pri = -pri-1;
348 where = 1;
349 }
350
351 #ifdef CONFIG_NET_DEBUG
352 if (pri >= DEV_NUMBUFFS)
353 {
354 printk("bad priority in dev_queue_xmit.\n");
355 pri = 1;
356 }
357 #endif
358
359
360
361
362
363
364 if (!skb->arp && dev->rebuild_header(skb->data, dev, skb->raddr, skb)) {
365 return;
366 }
367
368 save_flags(flags);
369 cli();
370 if (!where)
371
372 {
373 skb_queue_tail(dev->buffs + pri,skb);
374 skb_device_unlock(skb);
375 skb = skb_dequeue(dev->buffs + pri);
376 skb_device_lock(skb);
377 }
378 restore_flags(flags);
379
380
381 if(!where && dev_nit)
382 {
383 skb->stamp=xtime;
384 for (ptype = ptype_all; ptype!=NULL; ptype = ptype->next)
385 {
386
387
388
389 if ((ptype->dev == dev || !ptype->dev) &&
390 ((struct sock *)ptype->data != skb->sk))
391 {
392 struct sk_buff *skb2;
393 if ((skb2 = skb_clone(skb, GFP_ATOMIC)) == NULL)
394 break;
395 skb2->h.raw = skb2->data + dev->hard_header_len;
396 skb2->mac.raw = skb2->data;
397 ptype->func(skb2, skb->dev, ptype);
398 }
399 }
400 }
401 start_bh_atomic();
402 if (dev->hard_start_xmit(skb, dev) == 0) {
403
404
405
406 end_bh_atomic();
407 return;
408 }
409 end_bh_atomic();
410
411
412
413
414
415 cli();
416 skb_device_unlock(skb);
417 skb_queue_head(dev->buffs + pri,skb);
418 restore_flags(flags);
419 }
420
421
422
423
424
425
426
427 void netif_rx(struct sk_buff *skb)
428 {
429 static int dropping = 0;
430
431
432
433
434
435
436 skb->sk = NULL;
437 skb->free = 1;
438 if(skb->stamp.tv_sec==0)
439 skb->stamp = xtime;
440
441
442
443
444
445 if (!backlog_size)
446 dropping = 0;
447 else if (backlog_size > 300)
448 dropping = 1;
449
450 if (dropping)
451 {
452 kfree_skb(skb, FREE_READ);
453 return;
454 }
455
456
457
458
459 #if CONFIG_SKB_CHECK
460 IS_SKB(skb);
461 #endif
462 skb_queue_tail(&backlog,skb);
463 backlog_size++;
464
465
466
467
468
469
470 #ifdef CONFIG_NET_RUNONIRQ
471 net_bh();
472 #else
473 mark_bh(NET_BH);
474 #endif
475 return;
476 }
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493 int dev_rint(unsigned char *buff, long len, int flags, struct device *dev)
494 {
495 static int dropping = 0;
496 struct sk_buff *skb = NULL;
497 unsigned char *to;
498 int amount, left;
499 int len2;
500
501 if (dev == NULL || buff == NULL || len <= 0)
502 return(1);
503
504 if (flags & IN_SKBUFF)
505 {
506 skb = (struct sk_buff *) buff;
507 }
508 else
509 {
510 if (dropping)
511 {
512 if (skb_peek(&backlog) != NULL)
513 return(1);
514 printk("INET: dev_rint: no longer dropping packets.\n");
515 dropping = 0;
516 }
517
518 skb = alloc_skb(len, GFP_ATOMIC);
519 if (skb == NULL)
520 {
521 printk("dev_rint: packet dropped on %s (no memory) !\n",
522 dev->name);
523 dropping = 1;
524 return(1);
525 }
526
527
528
529
530
531
532 to = skb_put(skb,len);
533 left = len;
534
535 len2 = len;
536 while (len2 > 0)
537 {
538 amount = min(len2, (unsigned long) dev->rmem_end -
539 (unsigned long) buff);
540 memcpy(to, buff, amount);
541 len2 -= amount;
542 left -= amount;
543 buff += amount;
544 to += amount;
545 if ((unsigned long) buff == dev->rmem_end)
546 buff = (unsigned char *) dev->rmem_start;
547 }
548 }
549
550
551
552
553
554 skb->dev = dev;
555 skb->free = 1;
556
557 netif_rx(skb);
558
559
560
561 return(0);
562 }
563
564
565
566
567
568
569 void dev_transmit(void)
570 {
571 struct device *dev;
572
573 for (dev = dev_base; dev != NULL; dev = dev->next)
574 {
575 if (dev->flags != 0 && !dev->tbusy) {
576
577
578
579 dev_tint(dev);
580 }
581 }
582 }
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597 volatile char in_bh = 0;
598
599 int in_net_bh()
600 {
601 return(in_bh==0?0:1);
602 }
603
604
605
606
607
608
609
610
611
612 void net_bh(void *tmp)
613 {
614 struct sk_buff *skb;
615 struct packet_type *ptype;
616 struct packet_type *pt_prev;
617 unsigned short type;
618
619
620
621
622
623 if (set_bit(1, (void*)&in_bh))
624 return;
625
626
627
628
629
630
631
632
633 dev_transmit();
634
635
636
637
638
639
640
641 cli();
642
643
644
645
646
647 while((skb=skb_dequeue(&backlog))!=NULL)
648 {
649
650
651
652 backlog_size--;
653
654 sti();
655
656
657
658
659
660
661
662
663 skb->h.raw = skb->data;
664
665
666
667
668
669 type = skb->protocol;
670
671
672
673
674
675
676 pt_prev = NULL;
677 for (ptype = ptype_all; ptype!=NULL; ptype=ptype->next)
678 {
679 if(pt_prev)
680 {
681 struct sk_buff *skb2=skb_clone(skb, GFP_ATOMIC);
682 if(skb2)
683 pt_prev->func(skb2,skb->dev, pt_prev);
684 }
685 pt_prev=ptype;
686 }
687
688 for (ptype = ptype_base[ntohs(type)&15]; ptype != NULL; ptype = ptype->next)
689 {
690 if (ptype->type == type && (!ptype->dev || ptype->dev==skb->dev))
691 {
692
693
694
695
696 if(pt_prev)
697 {
698 struct sk_buff *skb2;
699
700 skb2=skb_clone(skb, GFP_ATOMIC);
701
702
703
704
705
706
707 if(skb2)
708 pt_prev->func(skb2, skb->dev, pt_prev);
709 }
710
711 pt_prev=ptype;
712 }
713 }
714
715
716
717
718
719 if(pt_prev)
720 pt_prev->func(skb, skb->dev, pt_prev);
721
722
723
724
725 else
726 kfree_skb(skb, FREE_WRITE);
727
728
729
730
731
732
733 #ifdef CONFIG_XMIT_EVERY
734 dev_transmit();
735 #endif
736 cli();
737 }
738
739
740
741
742
743 in_bh = 0;
744 sti();
745
746
747
748
749
750 dev_transmit();
751 }
752
753
754
755
756
757
758
759 void dev_tint(struct device *dev)
760 {
761 int i;
762 struct sk_buff *skb;
763 unsigned long flags;
764
765 save_flags(flags);
766
767
768
769
770 for(i = 0;i < DEV_NUMBUFFS; i++)
771 {
772
773
774
775
776
777 cli();
778 while((skb=skb_dequeue(&dev->buffs[i]))!=NULL)
779 {
780
781
782
783 skb_device_lock(skb);
784 restore_flags(flags);
785
786
787
788
789 dev_queue_xmit(skb,dev,-i - 1);
790
791
792
793 if (dev->tbusy)
794 return;
795 cli();
796 }
797 }
798 restore_flags(flags);
799 }
800
801
802
803
804
805
806
807
808 static int dev_ifconf(char *arg)
809 {
810 struct ifconf ifc;
811 struct ifreq ifr;
812 struct device *dev;
813 char *pos;
814 int len;
815 int err;
816
817
818
819
820
821 err=verify_area(VERIFY_WRITE, arg, sizeof(struct ifconf));
822 if(err)
823 return err;
824 memcpy_fromfs(&ifc, arg, sizeof(struct ifconf));
825 len = ifc.ifc_len;
826 pos = ifc.ifc_buf;
827
828
829
830
831
832
833 err=verify_area(VERIFY_WRITE,pos,len);
834 if(err)
835 return err;
836
837
838
839
840
841 for (dev = dev_base; dev != NULL; dev = dev->next)
842 {
843 if(!(dev->flags & IFF_UP))
844 continue;
845 memset(&ifr, 0, sizeof(struct ifreq));
846 strcpy(ifr.ifr_name, dev->name);
847 (*(struct sockaddr_in *) &ifr.ifr_addr).sin_family = dev->family;
848 (*(struct sockaddr_in *) &ifr.ifr_addr).sin_addr.s_addr = dev->pa_addr;
849
850
851
852
853
854 if (len < sizeof(struct ifreq))
855 break;
856
857
858
859
860
861 memcpy_tofs(pos, &ifr, sizeof(struct ifreq));
862 pos += sizeof(struct ifreq);
863 len -= sizeof(struct ifreq);
864 }
865
866
867
868
869
870 ifc.ifc_len = (pos - ifc.ifc_buf);
871 ifc.ifc_req = (struct ifreq *) ifc.ifc_buf;
872 memcpy_tofs(arg, &ifc, sizeof(struct ifconf));
873
874
875
876
877
878 return(pos - arg);
879 }
880
881
882
883
884
885
886
887 static int sprintf_stats(char *buffer, struct device *dev)
888 {
889 struct enet_statistics *stats = (dev->get_stats ? dev->get_stats(dev): NULL);
890 int size;
891
892 if (stats)
893 size = sprintf(buffer, "%6s:%7d %4d %4d %4d %4d %8d %4d %4d %4d %5d %4d\n",
894 dev->name,
895 stats->rx_packets, stats->rx_errors,
896 stats->rx_dropped + stats->rx_missed_errors,
897 stats->rx_fifo_errors,
898 stats->rx_length_errors + stats->rx_over_errors
899 + stats->rx_crc_errors + stats->rx_frame_errors,
900 stats->tx_packets, stats->tx_errors, stats->tx_dropped,
901 stats->tx_fifo_errors, stats->collisions,
902 stats->tx_carrier_errors + stats->tx_aborted_errors
903 + stats->tx_window_errors + stats->tx_heartbeat_errors);
904 else
905 size = sprintf(buffer, "%6s: No statistics available.\n", dev->name);
906
907 return size;
908 }
909
910
911
912
913
914
915 int dev_get_info(char *buffer, char **start, off_t offset, int length, int dummy)
916 {
917 int len=0;
918 off_t begin=0;
919 off_t pos=0;
920 int size;
921
922 struct device *dev;
923
924
925 size = sprintf(buffer, "Inter-| Receive | Transmit\n"
926 " face |packets errs drop fifo frame|packets errs drop fifo colls carrier\n");
927
928 pos+=size;
929 len+=size;
930
931
932 for (dev = dev_base; dev != NULL; dev = dev->next)
933 {
934 size = sprintf_stats(buffer+len, dev);
935 len+=size;
936 pos=begin+len;
937
938 if(pos<offset)
939 {
940 len=0;
941 begin=pos;
942 }
943 if(pos>offset+length)
944 break;
945 }
946
947 *start=buffer+(offset-begin);
948 len-=(offset-begin);
949 if(len>length)
950 len=length;
951 return len;
952 }
953
954
955
956
957
958
959 static inline int bad_mask(unsigned long mask, unsigned long addr)
960 {
961 if (addr & (mask = ~mask))
962 return 1;
963 mask = ntohl(mask);
964 if (mask & (mask+1))
965 return 1;
966 return 0;
967 }
968
969
970
971
972
973
974
975
976 static int dev_ifsioc(void *arg, unsigned int getset)
977 {
978 struct ifreq ifr;
979 struct device *dev;
980 int ret;
981
982
983
984
985
986 int err=verify_area(VERIFY_WRITE, arg, sizeof(struct ifreq));
987 if(err)
988 return err;
989
990 memcpy_fromfs(&ifr, arg, sizeof(struct ifreq));
991
992
993
994
995
996 if ((dev = dev_get(ifr.ifr_name)) == NULL)
997 return(-ENODEV);
998
999 switch(getset)
1000 {
1001 case SIOCGIFFLAGS:
1002 ifr.ifr_flags = dev->flags;
1003 goto rarok;
1004
1005 case SIOCSIFFLAGS:
1006 {
1007 int old_flags = dev->flags;
1008
1009
1010
1011
1012
1013
1014 dev_lock_wait();
1015
1016
1017
1018
1019
1020 dev->flags = (ifr.ifr_flags & (
1021 IFF_BROADCAST | IFF_DEBUG | IFF_LOOPBACK |
1022 IFF_POINTOPOINT | IFF_NOTRAILERS | IFF_RUNNING |
1023 IFF_NOARP | IFF_PROMISC | IFF_ALLMULTI | IFF_SLAVE | IFF_MASTER
1024 | IFF_MULTICAST)) | (dev->flags & IFF_UP);
1025
1026
1027
1028
1029 dev_mc_upload(dev);
1030
1031
1032
1033
1034
1035
1036
1037 if ((old_flags^ifr.ifr_flags)&IFF_UP)
1038 {
1039 if(old_flags&IFF_UP)
1040 ret=dev_close(dev);
1041 else
1042 {
1043 ret=dev_open(dev);
1044 if(ret<0)
1045 dev->flags&=~IFF_UP;
1046 }
1047 }
1048 else
1049 ret=0;
1050
1051
1052
1053
1054 dev_mc_upload(dev);
1055 }
1056 break;
1057
1058 case SIOCGIFADDR:
1059 (*(struct sockaddr_in *)
1060 &ifr.ifr_addr).sin_addr.s_addr = dev->pa_addr;
1061 (*(struct sockaddr_in *)
1062 &ifr.ifr_addr).sin_family = dev->family;
1063 (*(struct sockaddr_in *)
1064 &ifr.ifr_addr).sin_port = 0;
1065 goto rarok;
1066
1067 case SIOCSIFADDR:
1068 dev->pa_addr = (*(struct sockaddr_in *)
1069 &ifr.ifr_addr).sin_addr.s_addr;
1070 dev->family = ifr.ifr_addr.sa_family;
1071
1072 #ifdef CONFIG_INET
1073
1074
1075 dev->pa_mask = ip_get_mask(dev->pa_addr);
1076 #endif
1077 dev->pa_brdaddr = dev->pa_addr | ~dev->pa_mask;
1078 ret = 0;
1079 break;
1080
1081 case SIOCGIFBRDADDR:
1082 (*(struct sockaddr_in *)
1083 &ifr.ifr_broadaddr).sin_addr.s_addr = dev->pa_brdaddr;
1084 (*(struct sockaddr_in *)
1085 &ifr.ifr_broadaddr).sin_family = dev->family;
1086 (*(struct sockaddr_in *)
1087 &ifr.ifr_broadaddr).sin_port = 0;
1088 goto rarok;
1089
1090 case SIOCSIFBRDADDR:
1091 dev->pa_brdaddr = (*(struct sockaddr_in *)
1092 &ifr.ifr_broadaddr).sin_addr.s_addr;
1093 ret = 0;
1094 break;
1095
1096 case SIOCGIFDSTADDR:
1097 (*(struct sockaddr_in *)
1098 &ifr.ifr_dstaddr).sin_addr.s_addr = dev->pa_dstaddr;
1099 (*(struct sockaddr_in *)
1100 &ifr.ifr_dstaddr).sin_family = dev->family;
1101 (*(struct sockaddr_in *)
1102 &ifr.ifr_dstaddr).sin_port = 0;
1103 goto rarok;
1104
1105 case SIOCSIFDSTADDR:
1106 dev->pa_dstaddr = (*(struct sockaddr_in *)
1107 &ifr.ifr_dstaddr).sin_addr.s_addr;
1108 ret = 0;
1109 break;
1110
1111 case SIOCGIFNETMASK:
1112 (*(struct sockaddr_in *)
1113 &ifr.ifr_netmask).sin_addr.s_addr = dev->pa_mask;
1114 (*(struct sockaddr_in *)
1115 &ifr.ifr_netmask).sin_family = dev->family;
1116 (*(struct sockaddr_in *)
1117 &ifr.ifr_netmask).sin_port = 0;
1118 goto rarok;
1119
1120 case SIOCSIFNETMASK:
1121 {
1122 unsigned long mask = (*(struct sockaddr_in *)
1123 &ifr.ifr_netmask).sin_addr.s_addr;
1124 ret = -EINVAL;
1125
1126
1127
1128 if (bad_mask(mask,0))
1129 break;
1130 dev->pa_mask = mask;
1131 ret = 0;
1132 }
1133 break;
1134
1135 case SIOCGIFMETRIC:
1136
1137 ifr.ifr_metric = dev->metric;
1138 goto rarok;
1139
1140 case SIOCSIFMETRIC:
1141 dev->metric = ifr.ifr_metric;
1142 ret=0;
1143 break;
1144
1145 case SIOCGIFMTU:
1146 ifr.ifr_mtu = dev->mtu;
1147 goto rarok;
1148
1149 case SIOCSIFMTU:
1150
1151
1152
1153
1154
1155 if(ifr.ifr_mtu<68)
1156 return -EINVAL;
1157 dev->mtu = ifr.ifr_mtu;
1158 ret = 0;
1159 break;
1160
1161 case SIOCGIFMEM:
1162
1163 ret = -EINVAL;
1164 break;
1165
1166 case SIOCSIFMEM:
1167 ret = -EINVAL;
1168 break;
1169
1170 case OLD_SIOCGIFHWADDR:
1171 memcpy(ifr.old_ifr_hwaddr,dev->dev_addr, MAX_ADDR_LEN);
1172 goto rarok;
1173
1174 case SIOCGIFHWADDR:
1175 memcpy(ifr.ifr_hwaddr.sa_data,dev->dev_addr, MAX_ADDR_LEN);
1176 ifr.ifr_hwaddr.sa_family=dev->type;
1177 goto rarok;
1178
1179 case SIOCSIFHWADDR:
1180 if(dev->set_mac_address==NULL)
1181 return -EOPNOTSUPP;
1182 if(ifr.ifr_hwaddr.sa_family!=dev->type)
1183 return -EINVAL;
1184 ret=dev->set_mac_address(dev,ifr.ifr_hwaddr.sa_data);
1185 break;
1186
1187 case SIOCGIFMAP:
1188 ifr.ifr_map.mem_start=dev->mem_start;
1189 ifr.ifr_map.mem_end=dev->mem_end;
1190 ifr.ifr_map.base_addr=dev->base_addr;
1191 ifr.ifr_map.irq=dev->irq;
1192 ifr.ifr_map.dma=dev->dma;
1193 ifr.ifr_map.port=dev->if_port;
1194 goto rarok;
1195
1196 case SIOCSIFMAP:
1197 if(dev->set_config==NULL)
1198 return -EOPNOTSUPP;
1199 return dev->set_config(dev,&ifr.ifr_map);
1200
1201 case SIOCADDMULTI:
1202 if(dev->set_multicast_list==NULL)
1203 return -EINVAL;
1204 if(ifr.ifr_hwaddr.sa_family!=AF_UNSPEC)
1205 return -EINVAL;
1206 dev_mc_add(dev,ifr.ifr_hwaddr.sa_data, dev->addr_len, 1);
1207 return 0;
1208
1209 case SIOCDELMULTI:
1210 if(dev->set_multicast_list==NULL)
1211 return -EINVAL;
1212 if(ifr.ifr_hwaddr.sa_family!=AF_UNSPEC)
1213 return -EINVAL;
1214 dev_mc_delete(dev,ifr.ifr_hwaddr.sa_data,dev->addr_len, 1);
1215 return 0;
1216
1217
1218
1219
1220 default:
1221 if((getset >= SIOCDEVPRIVATE) &&
1222 (getset <= (SIOCDEVPRIVATE + 15))) {
1223 if(dev->do_ioctl==NULL)
1224 return -EOPNOTSUPP;
1225 ret=dev->do_ioctl(dev, &ifr, getset);
1226 memcpy_tofs(arg,&ifr,sizeof(struct ifreq));
1227 break;
1228 }
1229
1230 ret = -EINVAL;
1231 }
1232 return(ret);
1233
1234
1235
1236 rarok:
1237 memcpy_tofs(arg, &ifr, sizeof(struct ifreq));
1238 return 0;
1239 }
1240
1241
1242
1243
1244
1245
1246
1247 int dev_ioctl(unsigned int cmd, void *arg)
1248 {
1249 switch(cmd)
1250 {
1251 case SIOCGIFCONF:
1252 (void) dev_ifconf((char *) arg);
1253 return 0;
1254
1255
1256
1257
1258
1259 case SIOCGIFFLAGS:
1260 case SIOCGIFADDR:
1261 case SIOCGIFDSTADDR:
1262 case SIOCGIFBRDADDR:
1263 case SIOCGIFNETMASK:
1264 case SIOCGIFMETRIC:
1265 case SIOCGIFMTU:
1266 case SIOCGIFMEM:
1267 case SIOCGIFHWADDR:
1268 case SIOCSIFHWADDR:
1269 case OLD_SIOCGIFHWADDR:
1270 case SIOCGIFSLAVE:
1271 case SIOCGIFMAP:
1272 return dev_ifsioc(arg, cmd);
1273
1274
1275
1276
1277
1278 case SIOCSIFFLAGS:
1279 case SIOCSIFADDR:
1280 case SIOCSIFDSTADDR:
1281 case SIOCSIFBRDADDR:
1282 case SIOCSIFNETMASK:
1283 case SIOCSIFMETRIC:
1284 case SIOCSIFMTU:
1285 case SIOCSIFMEM:
1286 case SIOCSIFMAP:
1287 case SIOCSIFSLAVE:
1288 case SIOCADDMULTI:
1289 case SIOCDELMULTI:
1290 if (!suser())
1291 return -EPERM;
1292 return dev_ifsioc(arg, cmd);
1293
1294 case SIOCSIFLINK:
1295 return -EINVAL;
1296
1297
1298
1299
1300
1301 default:
1302 if((cmd >= SIOCDEVPRIVATE) &&
1303 (cmd <= (SIOCDEVPRIVATE + 15))) {
1304 return dev_ifsioc(arg, cmd);
1305 }
1306 return -EINVAL;
1307 }
1308 }
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318 void dev_init(void)
1319 {
1320 struct device *dev, **dp;
1321
1322
1323
1324
1325
1326
1327
1328
1329 dp = &dev_base;
1330 while ((dev = *dp) != NULL)
1331 {
1332 int i;
1333 for (i = 0; i < DEV_NUMBUFFS; i++) {
1334 skb_queue_head_init(dev->buffs + i);
1335 }
1336
1337 if (dev->init && dev->init(dev))
1338 {
1339
1340
1341
1342 *dp = dev->next;
1343 }
1344 else
1345 {
1346 dp = &dev->next;
1347 }
1348 }
1349 proc_net_register(&(struct proc_dir_entry) {
1350 PROC_NET_DEV, 3, "dev",
1351 S_IFREG | S_IRUGO, 1, 0, 0,
1352 0, &proc_net_inode_operations,
1353 dev_get_info
1354 });
1355 }
1356