1 /* 2 * NET3 Protocol independent device support routines. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation; either version 7 * 2 of the License, or (at your option) any later version. 8 * 9 * Derived from the non IP parts of dev.c 1.0.19 10 * Authors: Ross Biro, <bir7@leland.Stanford.Edu> 11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 12 * Mark Evans, <evansmp@uhura.aston.ac.uk> 13 * 14 * Additional Authors: 15 * Florian la Roche <rzsfl@rz.uni-sb.de> 16 * Alan Cox <gw4pts@gw4pts.ampr.org> 17 * David Hinds <dhinds@allegro.stanford.edu> 18 * 19 * Changes: 20 * Alan Cox : device private ioctl copies fields back. 21 * Alan Cox : Transmit queue code does relevant stunts to 22 * keep the queue safe. 23 * Alan Cox : Fixed double lock. 24 * Alan Cox : Fixed promisc NULL pointer trap 25 * 26 * Cleaned up and recommented by Alan Cox 2nd April 1994. I hope to have 27 * the rest as well commented in the end. 28 */ 29
30 /* 31 * A lot of these includes will be going walkies very soon 32 */ 33
34 #include <asm/segment.h>
35 #include <asm/system.h>
36 #include <asm/bitops.h>
37 #include <linux/config.h>
38 #include <linux/types.h>
39 #include <linux/kernel.h>
40 #include <linux/sched.h>
41 #include <linux/string.h>
42 #include <linux/mm.h>
43 #include <linux/socket.h>
44 #include <linux/sockios.h>
45 #include <linux/in.h>
46 #include <linux/errno.h>
47 #include <linux/interrupt.h>
48 #include <linux/if_ether.h>
49 #include <linux/inet.h>
50 #include <linux/netdevice.h>
51 #include <linux/etherdevice.h>
52 #include "ip.h"
53 #include "route.h"
54 #include <linux/skbuff.h>
55 #include "sock.h"
56 #include "arp.h"
57
58
59 /* 60 * The list of packet types we will receive (as opposed to discard) 61 * and the routines to invoke. 62 */ 63
64 structpacket_type *ptype_base = NULL;
65
66 /* 67 * Device drivers call our routines to queue packets here. We empty the 68 * queue in the bottom half handler. 69 */ 70
71 staticstructsk_buff_headbacklog =
72 { 73 (structsk_buff *)&backlog, (structsk_buff *)&backlog 74 #ifdefCONFIG_SKB_CHECK 75 ,SK_HEAD_SKB 76 #endif 77 };
78
79 /* 80 * We don't overdo the queue or we will thrash memory badly. 81 */ 82
83 staticintbacklog_size = 0;
84
85 /* 86 * The number of sockets open for 'all' protocol use. We have to 87 * know this to copy a buffer the correct number of times. 88 */ 89
90 staticintdev_nit=0;
91
92 /* 93 * Return the lesser of the two values. 94 */ 95
96 static__inline__unsignedlongmin(unsignedlonga, unsignedlongb)
/* */ 97 { 98 return (a < b)? a : b;
99 } 100
101
102 /****************************************************************************************** 103
104 Protocol management and registration routines 105
106 *******************************************************************************************/ 107
108
109 /* 110 * Add a protocol ID to the list. 111 */ 112
113 voiddev_add_pack(structpacket_type *pt)
/* */ 114 { 115 structpacket_type *p1;
116 pt->next = ptype_base;
117
118 /* 119 * Don't use copy counts on ETH_P_ALL. Instead keep a global 120 * count of number of these and use it and pt->copy to decide 121 * copies 122 */ 123
124 pt->copy=0; /* Assume we will not be copying the buffer before 125 * this routine gets it 126 */ 127
128 if(pt->type == htons(ETH_P_ALL))
129 dev_nit++; /* I'd like a /dev/nit too one day 8) */ 130 else 131 { 132 /* 133 * See if we need to copy it - that is another process also 134 * wishes to receive this type of packet. 135 */ 136 for (p1 = ptype_base; p1 != NULL; p1 = p1->next)
137 { 138 if (p1->type == pt->type)
139 { 140 pt->copy = 1; /* We will need to copy */ 141 break;
142 } 143 } 144 } 145
146 /* 147 * NIT taps must go at the end or net_bh will leak! 148 */ 149
150 if (pt->type == htons(ETH_P_ALL))
151 { 152 pt->next=NULL;
153 if(ptype_base==NULL)
154 ptype_base=pt;
155 else 156 { 157 /* 158 * Move to the end of the list 159 */ 160 for(p1=ptype_base;p1->next!=NULL;p1=p1->next);
161 /* 162 * Hook on the end 163 */ 164 p1->next=pt;
165 } 166 } 167 else 168 /* 169 * It goes on the start 170 */ 171 ptype_base = pt;
172 } 173
174
175 /* 176 * Remove a protocol ID from the list. 177 */ 178
179 voiddev_remove_pack(structpacket_type *pt)
/* */ 180 { 181 structpacket_type *lpt, *pt1;
182
183 /* 184 * Keep the count of nit (Network Interface Tap) sockets correct. 185 */ 186
187 if (pt->type == htons(ETH_P_ALL))
188 dev_nit--;
189
190 /* 191 * If we are first, just unhook us. 192 */ 193
194 if (pt == ptype_base)
195 { 196 ptype_base = pt->next;
197 return;
198 } 199
200 lpt = NULL;
201
202 /* 203 * This is harder. What we do is to walk the list of sockets 204 * for this type. We unhook the entry, and if there is a previous 205 * entry that is copying _and_ we are not copying, (ie we are the 206 * last entry for this type) then the previous one is set to 207 * non-copying as it is now the last. 208 */ 209 for (pt1 = ptype_base; pt1->next != NULL; pt1 = pt1->next)
210 { 211 if (pt1->next == pt )
212 { 213 cli();
214 if (!pt->copy && lpt)
215 lpt->copy = 0;
216 pt1->next = pt->next;
217 sti();
218 return;
219 } 220 if (pt1->next->type == pt->type && pt->type != htons(ETH_P_ALL))
221 lpt = pt1->next;
222 } 223 } 224
225 /***************************************************************************************** 226
227 Device Interface Subroutines 228
229 ******************************************************************************************/ 230
231 /* 232 * Find an interface by name. 233 */ 234
235 structdevice *dev_get(char *name)
/* */ 236 { 237 structdevice *dev;
238
239 for (dev = dev_base; dev != NULL; dev = dev->next)
240 { 241 if (strcmp(dev->name, name) == 0)
242 return(dev);
243 } 244 return(NULL);
245 } 246
247
248 /* 249 * Prepare an interface for use. 250 */ 251
252 intdev_open(structdevice *dev)
/* */ 253 { 254 intret = 0;
255
256 /* 257 * Call device private open method 258 */ 259 if (dev->open)
260 ret = dev->open(dev);
261
262 /* 263 * If it went open OK then set the flags 264 */ 265
266 if (ret == 0)
267 dev->flags |= (IFF_UP | IFF_RUNNING);
268
269 return(ret);
270 } 271
272
273 /* 274 * Completely shutdown an interface. 275 * 276 * WARNING: Both because of the way the upper layers work (that can be fixed) 277 * and because of races during a close (that can't be fixed any other way) 278 * a device may be given things to transmit EVEN WHEN IT IS DOWN. The driver 279 * MUST cope with this (eg by freeing and dumping the frame). 280 */ 281
282 intdev_close(structdevice *dev)
/* */ 283 { 284 /* 285 * Only close a device if it is up. 286 */ 287
288 if (dev->flags != 0)
289 { 290 intct=0;
291 dev->flags = 0;
292 /* 293 * Call the device specific close. This cannot fail. 294 */ 295 if (dev->stop)
296 dev->stop(dev);
297 /* 298 * Delete the route to the device. 299 */ 300 #ifdefCONFIG_INET 301 ip_rt_flush(dev);
302 arp_device_down(dev);
303 #endif 304 #ifdefCONFIG_IPX 305 ipxrtr_device_down(dev);
306 #endif 307 /* 308 * Blank the IP addresses 309 */ 310 dev->pa_addr = 0;
311 dev->pa_dstaddr = 0;
312 dev->pa_brdaddr = 0;
313 dev->pa_mask = 0;
314 /* 315 * Purge any queued packets when we down the link 316 */ 317 while(ct<DEV_NUMBUFFS)
318 { 319 structsk_buff *skb;
320 while((skb=skb_dequeue(&dev->buffs[ct]))!=NULL)
321 if(skb->free)
322 kfree_skb(skb,FREE_WRITE);
323 ct++;
324 } 325 } 326 return(0);
327 } 328
329
330 /* 331 * Send (or queue for sending) a packet. 332 * 333 * IMPORTANT: When this is called to resend frames. The caller MUST 334 * already have locked the sk_buff. Apart from that we do the 335 * rest of the magic. 336 */ 337
338 voiddev_queue_xmit(structsk_buff *skb, structdevice *dev, intpri)
/* */ 339 { 340 unsignedlongflags;
341 intnitcount;
342 structpacket_type *ptype;
343 intwhere = 0; /* used to say if the packet should go */ 344 /* at the front or the back of the */ 345 /* queue - front is a retransmit try */ 346
347 if (dev == NULL)
348 { 349 printk("dev.c: dev_queue_xmit: dev = NULL\n");
350 return;
351 } 352
353 if(pri>=0 && !skb_device_locked(skb))
354 skb_device_lock(skb); /* Shove a lock on the frame */ 355 #ifdefCONFIG_SLAVE_BALANCING 356 save_flags(flags);
357 cli();
358 if(dev->slave!=NULL && dev->slave->pkt_queue < dev->pkt_queue &&
359 (dev->slave->flags & IFF_UP))
360 dev=dev->slave;
361 restore_flags(flags);
362 #endif 363
364 IS_SKB(skb);
365
366 skb->dev = dev;
367
368 /* 369 * This just eliminates some race conditions, but not all... 370 */ 371
372 if (skb->next != NULL)
373 { 374 /* 375 * Make sure we haven't missed an interrupt. 376 */ 377 printk("dev_queue_xmit: worked around a missed interrupt\n");
378 dev->hard_start_xmit(NULL, dev);
379 return;
380 } 381
382 /* 383 * Negative priority is used to flag a frame that is being pulled from the 384 * queue front as a retransmit attempt. It therefore goes back on the queue 385 * start on a failure. 386 */ 387
388 if (pri < 0)
389 { 390 pri = -pri-1;
391 where = 1;
392 } 393
394 if (pri >= DEV_NUMBUFFS)
395 { 396 printk("bad priority in dev_queue_xmit.\n");
397 pri = 1;
398 } 399
400 /* 401 * If the address has not been resolved. Call the device header rebuilder. 402 * This can cover all protocols and technically not just ARP either. 403 */ 404
405 if (!skb->arp && dev->rebuild_header(skb->data, dev, skb->raddr, skb)) { 406 return;
407 } 408
409 save_flags(flags);
410 cli();
411 if (!where) { 412 #ifdefCONFIG_SLAVE_BALANCING 413 skb->in_dev_queue=1;
414 #endif 415 skb_queue_tail(dev->buffs + pri,skb);
416 skb_device_unlock(skb); /* Buffer is on the device queue and can be freed safely */ 417 skb = skb_dequeue(dev->buffs + pri);
418 skb_device_lock(skb); /* New buffer needs locking down */ 419 #ifdefCONFIG_SLAVE_BALANCING 420 skb->in_dev_queue=0;
421 #endif 422 } 423 restore_flags(flags);
424
425 /* copy outgoing packets to any sniffer packet handlers */ 426 if(!where)
427 { 428 for (nitcount = dev_nit, ptype = ptype_base; nitcount > 0 && ptype != NULL; ptype = ptype->next)
429 { 430 if (ptype->type == htons(ETH_P_ALL)) { 431 structsk_buff *skb2;
432 if ((skb2 = skb_clone(skb, GFP_ATOMIC)) == NULL)
433 break;
434 ptype->func(skb2, skb->dev, ptype);
435 nitcount--;
436 } 437 } 438 } 439 if (dev->hard_start_xmit(skb, dev) == 0) { 440 /* 441 * Packet is now solely the responsibility of the driver 442 */ 443 return;
444 } 445
446 /* 447 * Transmission failed, put skb back into a list. Once on the list its safe and 448 * no longer device locked (it can be freed safely from the device queue) 449 */ 450 cli();
451 #ifdefCONFIG_SLAVE_BALANCING 452 skb->in_dev_queue=1;
453 dev->pkt_queue++;
454 #endif 455 skb_device_unlock(skb);
456 skb_queue_head(dev->buffs + pri,skb);
457 restore_flags(flags);
458 } 459
460 /* 461 * Receive a packet from a device driver and queue it for the upper 462 * (protocol) levels. It always succeeds. This is the recommended 463 * interface to use. 464 */ 465
466 voidnetif_rx(structsk_buff *skb)
/* */ 467 { 468 staticintdropping = 0;
469
470 /* 471 * Any received buffers are un-owned and should be discarded 472 * when freed. These will be updated later as the frames get 473 * owners. 474 */ 475 skb->sk = NULL;
476 skb->free = 1;
477 if(skb->stamp.tv_sec==0)
478 skb->stamp = xtime;
479
480 /* 481 * Check that we aren't overdoing things. 482 */ 483
484 if (!backlog_size)
485 dropping = 0;
486 elseif (backlog_size > 100)
487 dropping = 1;
488
489 if (dropping)
490 { 491 kfree_skb(skb, FREE_READ);
492 return;
493 } 494
495 /* 496 * Add it to the "backlog" queue. 497 */ 498
499 IS_SKB(skb);
500 skb_queue_tail(&backlog,skb);
501 backlog_size++;
502
503 /* 504 * If any packet arrived, mark it for processing after the 505 * hardware interrupt returns. 506 */ 507
508 mark_bh(NET_BH);
509 return;
510 } 511
512
513 /* 514 * The old interface to fetch a packet from a device driver. 515 * This function is the base level entry point for all drivers that 516 * want to send a packet to the upper (protocol) levels. It takes 517 * care of de-multiplexing the packet to the various modules based 518 * on their protocol ID. 519 * 520 * Return values: 1 <- exit I can't do any more 521 * 0 <- feed me more (i.e. "done", "OK"). 522 * 523 * This function is OBSOLETE and should not be used by any new 524 * device. 525 */ 526
527 intdev_rint(unsignedchar *buff, longlen, intflags, structdevice *dev)
/* */ 528 { 529 staticintdropping = 0;
530 structsk_buff *skb = NULL;
531 unsignedchar *to;
532 intamount, left;
533 intlen2;
534
535 if (dev == NULL || buff == NULL || len <= 0)
536 return(1);
537
538 if (flags & IN_SKBUFF)
539 { 540 skb = (structsk_buff *) buff;
541 } 542 else 543 { 544 if (dropping)
545 { 546 if (skb_peek(&backlog) != NULL)
547 return(1);
548 printk("INET: dev_rint: no longer dropping packets.\n");
549 dropping = 0;
550 } 551
552 skb = alloc_skb(len, GFP_ATOMIC);
553 if (skb == NULL)
554 { 555 printk("dev_rint: packet dropped on %s (no memory) !\n",
556 dev->name);
557 dropping = 1;
558 return(1);
559 } 560
561 /* 562 * First we copy the packet into a buffer, and save it for later. We 563 * in effect handle the incoming data as if it were from a circular buffer 564 */ 565
566 to = skb->data;
567 left = len;
568
569 len2 = len;
570 while (len2 > 0)
571 { 572 amount = min(len2, (unsignedlong) dev->rmem_end -
573 (unsignedlong) buff);
574 memcpy(to, buff, amount);
575 len2 -= amount;
576 left -= amount;
577 buff += amount;
578 to += amount;
579 if ((unsignedlong) buff == dev->rmem_end)
580 buff = (unsignedchar *) dev->rmem_start;
581 } 582 } 583
584 /* 585 * Tag the frame and kick it to the proper receive routine 586 */ 587
588 skb->len = len;
589 skb->dev = dev;
590 skb->free = 1;
591
592 netif_rx(skb);
593 /* 594 * OK, all done. 595 */ 596 return(0);
597 } 598
599
600 /* 601 * This routine causes all interfaces to try to send some data. 602 */ 603
604 voiddev_transmit(void)
/* */ 605 { 606 structdevice *dev;
607
608 for (dev = dev_base; dev != NULL; dev = dev->next)
609 { 610 if (dev->flags != 0 && !dev->tbusy) { 611 /* 612 * Kick the device 613 */ 614 dev_tint(dev);
615 } 616 } 617 } 618
619
620 /********************************************************************************** 621
622 Receive Queue Processor 623 624 ***********************************************************************************/ 625
626 /* 627 * This is a single non-reentrant routine which takes the received packet 628 * queue and throws it at the networking layers in the hope that something 629 * useful will emerge. 630 */ 631
632 volatilecharin_bh = 0; /* Non-reentrant remember */ 633
634 intin_net_bh() /* Used by timer.c *//* */ 635 { 636 return(in_bh==0?0:1);
637 } 638
639 /* 640 * When we are called the queue is ready to grab, the interrupts are 641 * on and hardware can interrupt and queue to the receive queue a we 642 * run with no problems. 643 * This is run as a bottom half after an interrupt handler that does 644 * mark_bh(NET_BH); 645 */ 646
647 voidnet_bh(void *tmp)
/* */ 648 { 649 structsk_buff *skb;
650 structpacket_type *ptype;
651 unsignedshorttype;
652 unsignedcharflag = 0;
653 intnitcount;
654
655 /* 656 * Atomically check and mark our BUSY state. 657 */ 658
659 if (set_bit(1, (void*)&in_bh))
660 return;
661
662 /* 663 * Can we send anything now? We want to clear the 664 * decks for any more sends that get done as we 665 * process the input. 666 */ 667
668 dev_transmit();
669
670 /* 671 * Any data left to process. This may occur because a 672 * mark_bh() is done after we empty the queue including 673 * that from the device which does a mark_bh() just after 674 */ 675
676 cli();
677
678 /* 679 * While the queue is not empty 680 */ 681
682 while((skb=skb_dequeue(&backlog))!=NULL)
683 { 684 /* 685 * We have a packet. Therefore the queue has shrunk 686 */ 687 backlog_size--;
688
689 nitcount=dev_nit;
690 flag=0;
691 sti();
692
693 /* 694 * Bump the pointer to the next structure. 695 * This assumes that the basic 'skb' pointer points to 696 * the MAC header, if any (as indicated by its "length" 697 * field). Take care now! 698 */ 699
700 skb->h.raw = skb->data + skb->dev->hard_header_len;
701 skb->len -= skb->dev->hard_header_len;
702
703 /* 704 * Fetch the packet protocol ID. This is also quite ugly, as 705 * it depends on the protocol driver (the interface itself) to 706 * know what the type is, or where to get it from. The Ethernet 707 * interfaces fetch the ID from the two bytes in the Ethernet MAC 708 * header (the h_proto field in struct ethhdr), but other drivers 709 * may either use the ethernet ID's or extra ones that do not 710 * clash (eg ETH_P_AX25). We could set this before we queue the 711 * frame. In fact I may change this when I have time. 712 */ 713
714 type = skb->dev->type_trans(skb, skb->dev);
715
716 /* 717 * We got a packet ID. Now loop over the "known protocols" 718 * table (which is actually a linked list, but this will 719 * change soon if I get my way- FvK), and forward the packet 720 * to anyone who wants it. 721 * 722 * [FvK didn't get his way but he is right this ought to be 723 * hashed so we typically get a single hit. The speed cost 724 * here is minimal but no doubt adds up at the 4,000+ pkts/second 725 * rate we can hit flat out] 726 */ 727
728 for (ptype = ptype_base; ptype != NULL; ptype = ptype->next)
729 { 730 if (ptype->type == type || ptype->type == htons(ETH_P_ALL))
731 { 732 structsk_buff *skb2;
733
734 if (ptype->type == htons(ETH_P_ALL))
735 nitcount--;
736 if (ptype->copy || nitcount)
737 { 738 /* 739 * copy if we need to 740 */ 741 #ifdef OLD
742 skb2 = alloc_skb(skb->len, GFP_ATOMIC);
743 if (skb2 == NULL)
744 continue;
745 memcpy(skb2, skb, skb2->mem_len);
746 skb2->mem_addr = skb2;
747 skb2->h.raw = (unsignedchar *)(
748 (unsignedlong) skb2 +
749 (unsignedlong) skb->h.raw -
750 (unsignedlong) skb 751 );
752 skb2->free = 1;
753 #else 754 skb2=skb_clone(skb, GFP_ATOMIC);
755 if(skb2==NULL)
756 continue;
757 #endif 758 } 759 else 760 { 761 skb2 = skb;
762 } 763
764 /* 765 * Protocol located. 766 */ 767
768 flag = 1;
769
770 /* 771 * Kick the protocol handler. This should be fast 772 * and efficient code. 773 */ 774
775 ptype->func(skb2, skb->dev, ptype);
776 } 777 }/* End of protocol list loop */ 778
779 /* 780 * Has an unknown packet has been received ? 781 */ 782
783 if (!flag)
784 { 785 kfree_skb(skb, FREE_WRITE);
786 } 787
788 /* 789 * Again, see if we can transmit anything now. 790 */ 791
792 dev_transmit();
793 cli();
794 }/* End of queue loop */ 795
796 /* 797 * We have emptied the queue 798 */ 799
800 in_bh = 0;
801 sti();
802
803 /* 804 * One last output flush. 805 */ 806
807 dev_transmit();
808 } 809
810
811 /* 812 * This routine is called when an device driver (i.e. an 813 * interface) is ready to transmit a packet. 814 */ 815
816 voiddev_tint(structdevice *dev)
/* */ 817 { 818 inti;
819 structsk_buff *skb;
820 unsignedlongflags;
821
822 save_flags(flags);
823 /* 824 * Work the queues in priority order 825 */ 826
827 for(i = 0;i < DEV_NUMBUFFS; i++)
828 { 829 /* 830 * Pull packets from the queue 831 */ 832
833
834 cli();
835 while((skb=skb_dequeue(&dev->buffs[i]))!=NULL)
836 { 837 /* 838 * Stop anyone freeing the buffer while we retransmit it 839 */ 840 skb_device_lock(skb);
841 restore_flags(flags);
842 /* 843 * Feed them to the output stage and if it fails 844 * indicate they re-queue at the front. 845 */ 846 dev_queue_xmit(skb,dev,-i - 1);
847 /* 848 * If we can take no more then stop here. 849 */ 850 if (dev->tbusy)
851 return;
852 cli();
853 } 854 } 855 restore_flags(flags);
856 } 857
858
859 /* 860 * Perform a SIOCGIFCONF call. This structure will change 861 * size shortly, and there is nothing I can do about it. 862 * Thus we will need a 'compatibility mode'. 863 */ 864
865 staticintdev_ifconf(char *arg)
/* */ 866 { 867 structifconfifc;
868 structifreqifr;
869 structdevice *dev;
870 char *pos;
871 intlen;
872 interr;
873
874 /* 875 * Fetch the caller's info block. 876 */ 877
878 err=verify_area(VERIFY_WRITE, arg, sizeof(structifconf));
879 if(err)
880 returnerr;
881 memcpy_fromfs(&ifc, arg, sizeof(structifconf));
882 len = ifc.ifc_len;
883 pos = ifc.ifc_buf;
884
885 /* 886 * We now walk the device list filling each active device 887 * into the array. 888 */ 889
890 err=verify_area(VERIFY_WRITE,pos,len);
891 if(err)
892 returnerr;
893
894 /* 895 * Loop over the interfaces, and write an info block for each. 896 */ 897
898 for (dev = dev_base; dev != NULL; dev = dev->next)
899 { 900 if(!(dev->flags & IFF_UP)) /* Downed devices don't count */ 901 continue;
902 memset(&ifr, 0, sizeof(structifreq));
903 strcpy(ifr.ifr_name, dev->name);
904 (*(structsockaddr_in *) &ifr.ifr_addr).sin_family = dev->family;
905 (*(structsockaddr_in *) &ifr.ifr_addr).sin_addr.s_addr = dev->pa_addr;
906
907 /* 908 * Write this block to the caller's space. 909 */ 910
911 memcpy_tofs(pos, &ifr, sizeof(structifreq));
912 pos += sizeof(structifreq);
913 len -= sizeof(structifreq);
914
915 /* 916 * Have we run out of space here ? 917 */ 918
919 if (len < sizeof(structifreq))
920 break;
921 } 922
923 /* 924 * All done. Write the updated control block back to the caller. 925 */ 926
927 ifc.ifc_len = (pos - ifc.ifc_buf);
928 ifc.ifc_req = (structifreq *) ifc.ifc_buf;
929 memcpy_tofs(arg, &ifc, sizeof(structifconf));
930
931 /* 932 * Report how much was filled in 933 */ 934
935 return(pos - arg);
936 } 937
938
939 /* 940 * This is invoked by the /proc filesystem handler to display a device 941 * in detail. 942 */ 943
944 staticintsprintf_stats(char *buffer, structdevice *dev)
/* */ 945 { 946 structenet_statistics *stats = (dev->get_stats ? dev->get_stats(dev): NULL);
947 intsize;
948
949 if (stats)
950 size = sprintf(buffer, "%6s:%7d %4d %4d %4d %4d %8d %4d %4d %4d %5d %4d\n",
951 dev->name,
952 stats->rx_packets, stats->rx_errors,
953 stats->rx_dropped + stats->rx_missed_errors,
954 stats->rx_fifo_errors,
955 stats->rx_length_errors + stats->rx_over_errors 956 + stats->rx_crc_errors + stats->rx_frame_errors,
957 stats->tx_packets, stats->tx_errors, stats->tx_dropped,
958 stats->tx_fifo_errors, stats->collisions,
959 stats->tx_carrier_errors + stats->tx_aborted_errors 960 + stats->tx_window_errors + stats->tx_heartbeat_errors);
961 else 962 size = sprintf(buffer, "%6s: No statistics available.\n", dev->name);
963
964 returnsize;
965 } 966
967 /* 968 * Called from the PROCfs module. This now uses the new arbitrary sized /proc/net interface 969 * to create /proc/net/dev 970 */ 971
972 intdev_get_info(char *buffer, char **start, off_toffset, intlength)
/* */ 973 { 974 intlen=0;
975 off_tbegin=0;
976 off_tpos=0;
977 intsize;
978
979 structdevice *dev;
980
981
982 size = sprintf(buffer, "Inter-| Receive | Transmit\n"
983 " face |packets errs drop fifo frame|packets errs drop fifo colls carrier\n");
984
985 pos+=size;
986 len+=size;
987
988
989 for (dev = dev_base; dev != NULL; dev = dev->next)
990 { 991 size = sprintf_stats(buffer+len, dev);
992 len+=size;
993 pos=begin+len;
994
995 if(pos<offset)
996 { 997 len=0;
998 begin=pos;
999 }1000 if(pos>offset+length)
1001 break;
1002 }1003
1004 *start=buffer+(offset-begin); /* Start of wanted data */1005 len-=(offset-begin); /* Start slop */1006 if(len>length)
1007 len=length; /* Ending slop */1008 returnlen;
1009 }1010
1011
1012 /*1013 * This checks bitmasks for the ioctl calls for devices.1014 */1015
1016 staticinlineintbad_mask(unsignedlongmask, unsignedlongaddr)
/* */1017 {1018 if (addr & (mask = ~mask))
1019 return 1;
1020 mask = ntohl(mask);
1021 if (mask & (mask+1))
1022 return 1;
1023 return 0;
1024 }1025
1026 /*1027 * Perform the SIOCxIFxxx calls. 1028 *1029 * The socket layer has seen an ioctl the address family thinks is1030 * for the device. At this point we get invoked to make a decision1031 */1032
1033 staticintdev_ifsioc(void *arg, unsignedintgetset)
/* */1034 {1035 structifreqifr;
1036 structdevice *dev;
1037 intret;
1038
1039 /*1040 * Fetch the caller's info block into kernel space1041 */1042
1043 interr=verify_area(VERIFY_WRITE, arg, sizeof(structifreq));
1044 if(err)
1045 returnerr;
1046
1047 memcpy_fromfs(&ifr, arg, sizeof(structifreq));
1048
1049 /*1050 * See which interface the caller is talking about. 1051 */1052
1053 if ((dev = dev_get(ifr.ifr_name)) == NULL)
1054 return(-ENODEV);
1055
1056 switch(getset)
1057 {1058 caseSIOCGIFFLAGS: /* Get interface flags */1059 ifr.ifr_flags = dev->flags;
1060 memcpy_tofs(arg, &ifr, sizeof(structifreq));
1061 ret = 0;
1062 break;
1063 caseSIOCSIFFLAGS: /* Set interface flags */1064 {1065 intold_flags = dev->flags;
1066 #ifdefCONFIG_SLAVE_BALANCING1067 if(dev->flags&IFF_SLAVE)
1068 return -EBUSY;
1069 #endif1070 dev->flags = ifr.ifr_flags & (
1071 IFF_UP | IFF_BROADCAST | IFF_DEBUG | IFF_LOOPBACK |
1072 IFF_POINTOPOINT | IFF_NOTRAILERS | IFF_RUNNING |
1073 IFF_NOARP | IFF_PROMISC | IFF_ALLMULTI | IFF_SLAVE | IFF_MASTER);
1074 #ifdefCONFIG_SLAVE_BALANCING1075 if(!(dev->flags&IFF_MASTER) && dev->slave)
1076 {1077 dev->slave->flags&=~IFF_SLAVE;
1078 dev->slave=NULL;
1079 }1080 #endif1081
1082 if( dev->set_multicast_list!=NULL)
1083 {1084
1085 /*1086 * Has promiscuous mode been turned off1087 */1088
1089 if ( (old_flags & IFF_PROMISC) && ((dev->flags & IFF_PROMISC) == 0))
1090 dev->set_multicast_list(dev,0,NULL);
1091
1092 /*1093 * Has it been turned on1094 */1095
1096 if ( (dev->flags & IFF_PROMISC) && ((old_flags & IFF_PROMISC) == 0))
1097 dev->set_multicast_list(dev,-1,NULL);
1098 }1099
1100 /*1101 * Have we downed the interface1102 */1103
1104 if ((old_flags & IFF_UP) && ((dev->flags & IFF_UP) == 0))
1105 {1106 ret = dev_close(dev);
1107 }1108 else1109 {1110 /*1111 * Have we upped the interface 1112 */1113
1114 ret = (! (old_flags & IFF_UP) && (dev->flags & IFF_UP))
1115 ? dev_open(dev) : 0;
1116 /* 1117 * Check the flags.1118 */1119 if(ret<0)
1120 dev->flags&=~IFF_UP; /* Didn't open so down the if */1121 }1122 }1123 break;
1124
1125 caseSIOCGIFADDR: /* Get interface address (and family) */1126 (*(structsockaddr_in *)
1127 &ifr.ifr_addr).sin_addr.s_addr = dev->pa_addr;
1128 (*(structsockaddr_in *)
1129 &ifr.ifr_addr).sin_family = dev->family;
1130 (*(structsockaddr_in *)
1131 &ifr.ifr_addr).sin_port = 0;
1132 memcpy_tofs(arg, &ifr, sizeof(structifreq));
1133 ret = 0;
1134 break;
1135
1136 caseSIOCSIFADDR: /* Set interface address (and family) */1137 dev->pa_addr = (*(structsockaddr_in *)
1138 &ifr.ifr_addr).sin_addr.s_addr;
1139 dev->family = ifr.ifr_addr.sa_family;
1140
1141 #ifdefCONFIG_INET1142 /* This is naughty. When net-032e comes out It wants moving into the net0321143 code not the kernel. Till then it can sit here (SIGH) */1144 dev->pa_mask = ip_get_mask(dev->pa_addr);
1145 #endif1146 dev->pa_brdaddr = dev->pa_addr | ~dev->pa_mask;
1147 ret = 0;
1148 break;
1149
1150 caseSIOCGIFBRDADDR: /* Get the broadcast address */1151 (*(structsockaddr_in *)
1152 &ifr.ifr_broadaddr).sin_addr.s_addr = dev->pa_brdaddr;
1153 (*(structsockaddr_in *)
1154 &ifr.ifr_broadaddr).sin_family = dev->family;
1155 (*(structsockaddr_in *)
1156 &ifr.ifr_broadaddr).sin_port = 0;
1157 memcpy_tofs(arg, &ifr, sizeof(structifreq));
1158 ret = 0;
1159 break;
1160
1161 caseSIOCSIFBRDADDR: /* Set the broadcast address */1162 dev->pa_brdaddr = (*(structsockaddr_in *)
1163 &ifr.ifr_broadaddr).sin_addr.s_addr;
1164 ret = 0;
1165 break;
1166
1167 caseSIOCGIFDSTADDR: /* Get the destination address (for point-to-point links) */1168 (*(structsockaddr_in *)
1169 &ifr.ifr_dstaddr).sin_addr.s_addr = dev->pa_dstaddr;
1170 (*(structsockaddr_in *)
1171 &ifr.ifr_broadaddr).sin_family = dev->family;
1172 (*(structsockaddr_in *)
1173 &ifr.ifr_broadaddr).sin_port = 0;
1174 memcpy_tofs(arg, &ifr, sizeof(structifreq));
1175 ret = 0;
1176 break;
1177
1178 caseSIOCSIFDSTADDR: /* Set the destination address (for point-to-point links) */1179 dev->pa_dstaddr = (*(structsockaddr_in *)
1180 &ifr.ifr_dstaddr).sin_addr.s_addr;
1181 ret = 0;
1182 break;
1183
1184 caseSIOCGIFNETMASK: /* Get the netmask for the interface */1185 (*(structsockaddr_in *)
1186 &ifr.ifr_netmask).sin_addr.s_addr = dev->pa_mask;
1187 (*(structsockaddr_in *)
1188 &ifr.ifr_netmask).sin_family = dev->family;
1189 (*(structsockaddr_in *)
1190 &ifr.ifr_netmask).sin_port = 0;
1191 memcpy_tofs(arg, &ifr, sizeof(structifreq));
1192 ret = 0;
1193 break;
1194
1195 caseSIOCSIFNETMASK: /* Set the netmask for the interface */1196 {1197 unsignedlongmask = (*(structsockaddr_in *)
1198 &ifr.ifr_netmask).sin_addr.s_addr;
1199 ret = -EINVAL;
1200 /*1201 * The mask we set must be legal.1202 */1203 if (bad_mask(mask,0))
1204 break;
1205 dev->pa_mask = mask;
1206 ret = 0;
1207 }1208 break;
1209
1210 caseSIOCGIFMETRIC: /* Get the metric on the interface (currently unused) */1211
1212 ifr.ifr_metric = dev->metric;
1213 memcpy_tofs(arg, &ifr, sizeof(structifreq));
1214 ret = 0;
1215 break;
1216
1217 caseSIOCSIFMETRIC: /* Set the metric on the interface (currently unused) */1218 dev->metric = ifr.ifr_metric;
1219 ret = 0;
1220 break;
1221
1222 caseSIOCGIFMTU: /* Get the MTU of a device */1223 ifr.ifr_mtu = dev->mtu;
1224 memcpy_tofs(arg, &ifr, sizeof(structifreq));
1225 ret = 0;
1226 break;
1227
1228 caseSIOCSIFMTU: /* Set the MTU of a device */1229
1230 /*1231 * MTU must be positive and under the page size problem1232 */1233
1234 if(ifr.ifr_mtu<1 || ifr.ifr_mtu>3800)
1235 return -EINVAL;
1236 dev->mtu = ifr.ifr_mtu;
1237 ret = 0;
1238 break;
1239
1240 caseSIOCGIFMEM: /* Get the per device memory space. We can add this but currently1241 do not support it */1242 printk("NET: ioctl(SIOCGIFMEM, 0x%08X)\n", (int)arg);
1243 ret = -EINVAL;
1244 break;
1245
1246 caseSIOCSIFMEM: /* Set the per device memory buffer space. Not applicable in our case */1247 printk("NET: ioctl(SIOCSIFMEM, 0x%08X)\n", (int)arg);
1248 ret = -EINVAL;
1249 break;
1250
1251 caseOLD_SIOCGIFHWADDR: /* Get the hardware address. This will change and SIFHWADDR will be added */1252 memcpy(ifr.old_ifr_hwaddr,dev->dev_addr, MAX_ADDR_LEN);
1253 memcpy_tofs(arg,&ifr,sizeof(structifreq));
1254 ret=0;
1255 break;
1256
1257 caseSIOCGIFHWADDR:
1258 memcpy(ifr.ifr_hwaddr.sa_data,dev->dev_addr, MAX_ADDR_LEN);
1259 ifr.ifr_hwaddr.sa_family=dev->type;
1260 memcpy_tofs(arg,&ifr,sizeof(structifreq));
1261 ret=0;
1262 break;
1263
1264 caseSIOCSIFHWADDR:
1265 if(dev->set_mac_address==NULL)
1266 return -EOPNOTSUPP;
1267 if(ifr.ifr_hwaddr.sa_family!=dev->type)
1268 return -EINVAL;
1269 ret=dev->set_mac_address(dev,ifr.ifr_hwaddr.sa_data);
1270 break;
1271
1272 caseSIOCDEVPRIVATE:
1273 if(dev->do_ioctl==NULL)
1274 return -EOPNOTSUPP;
1275 ret=dev->do_ioctl(dev, &ifr);
1276 memcpy_tofs(arg,&ifr,sizeof(structifreq));
1277 break;
1278
1279 caseSIOCGIFMAP:
1280 ifr.ifr_map.mem_start=dev->mem_start;
1281 ifr.ifr_map.mem_end=dev->mem_end;
1282 ifr.ifr_map.base_addr=dev->base_addr;
1283 ifr.ifr_map.irq=dev->irq;
1284 ifr.ifr_map.dma=dev->dma;
1285 ifr.ifr_map.port=dev->if_port;
1286 memcpy_tofs(arg,&ifr,sizeof(structifreq));
1287 ret=0;
1288 break;
1289
1290 caseSIOCSIFMAP:
1291 if(dev->set_config==NULL)
1292 return -EOPNOTSUPP;
1293 returndev->set_config(dev,&ifr.ifr_map);
1294
1295 caseSIOCGIFSLAVE:
1296 #ifdefCONFIG_SLAVE_BALANCING1297 if(dev->slave==NULL)
1298 return -ENOENT;
1299 strncpy(ifr.ifr_name,dev->name,sizeof(ifr.ifr_name));
1300 memcpy_tofs(arg,&ifr,sizeof(structifreq));
1301 ret=0;
1302 #else1303 return -ENOENT;
1304 #endif1305 break;
1306 #ifdefCONFIG_SLAVE_BALANCING1307 caseSIOCSIFSLAVE:
1308 {1309
1310 /*1311 * Fun game. Get the device up and the flags right without1312 * letting some scummy user confuse us.1313 */1314 unsignedlongflags;
1315 structdevice *slave=dev_get(ifr.ifr_slave);
1316 save_flags(flags);
1317 if(slave==NULL)
1318 {1319 return -ENODEV;
1320 }1321 cli();
1322 if((slave->flags&(IFF_UP|IFF_RUNNING))!=(IFF_UP|IFF_RUNNING))
1323 {1324 restore_flags(flags);
1325 return -EINVAL;
1326 }1327 if(dev->flags&IFF_SLAVE)
1328 {1329 restore_flags(flags);
1330 return -EBUSY;
1331 }1332 if(dev->slave!=NULL)
1333 {1334 restore_flags(flags);
1335 return -EBUSY;
1336 }1337 if(slave->flags&IFF_SLAVE)
1338 {1339 restore_flags(flags);
1340 return -EBUSY;
1341 }1342 dev->slave=slave;
1343 slave->flags|=IFF_SLAVE;
1344 dev->flags|=IFF_MASTER;
1345 restore_flags(flags);
1346 ret=0;
1347 }1348 break;
1349 #endif1350 /*1351 * Unknown ioctl1352 */1353
1354 default:
1355 ret = -EINVAL;
1356 }1357 return(ret);
1358 }1359
1360
1361 /*1362 * This function handles all "interface"-type I/O control requests. The actual1363 * 'doing' part of this is dev_ifsioc above.1364 */1365
1366 intdev_ioctl(unsignedintcmd, void *arg)
/* */1367 {1368 switch(cmd)
1369 {1370 caseSIOCGIFCONF:
1371 (void) dev_ifconf((char *) arg);
1372 return 0;
1373
1374 /*1375 * Ioctl calls that can be done by all.1376 */1377
1378 caseSIOCGIFFLAGS:
1379 caseSIOCGIFADDR:
1380 caseSIOCGIFDSTADDR:
1381 caseSIOCGIFBRDADDR:
1382 caseSIOCGIFNETMASK:
1383 caseSIOCGIFMETRIC:
1384 caseSIOCGIFMTU:
1385 caseSIOCGIFMEM:
1386 caseSIOCGIFHWADDR:
1387 caseSIOCSIFHWADDR:
1388 caseOLD_SIOCGIFHWADDR:
1389 caseSIOCGIFSLAVE:
1390 caseSIOCGIFMAP:
1391 returndev_ifsioc(arg, cmd);
1392
1393 /*1394 * Ioctl calls requiring the power of a superuser1395 */1396
1397 caseSIOCSIFFLAGS:
1398 caseSIOCSIFADDR:
1399 caseSIOCSIFDSTADDR:
1400 caseSIOCSIFBRDADDR:
1401 caseSIOCSIFNETMASK:
1402 caseSIOCSIFMETRIC:
1403 caseSIOCSIFMTU:
1404 caseSIOCSIFMEM:
1405 caseSIOCSIFMAP:
1406 caseSIOCSIFSLAVE:
1407 caseSIOCDEVPRIVATE:
1408 if (!suser())
1409 return -EPERM;
1410 returndev_ifsioc(arg, cmd);
1411
1412 caseSIOCSIFLINK:
1413 return -EINVAL;
1414
1415 /*1416 * Unknown ioctl.1417 */1418
1419 default:
1420 return -EINVAL;
1421 }1422 }1423
1424
1425 /*1426 * Initialize the DEV module. At boot time this walks the device list and1427 * unhooks any devices that fail to initialise (normally hardware not 1428 * present) and leaves us with a valid list of present and active devices.1429 *1430 * The PCMCIA code may need to change this a little, and add a pair1431 * of register_inet_device() unregister_inet_device() calls. This will be1432 * needed for ethernet as modules support.1433 */1434
1435 voiddev_init(void)
/* */1436 {1437 structdevice *dev, *dev2;
1438
1439 /*1440 * Add the devices.1441 * If the call to dev->init fails, the dev is removed1442 * from the chain disconnecting the device until the1443 * next reboot.1444 */1445
1446 dev2 = NULL;
1447 for (dev = dev_base; dev != NULL; dev=dev->next)
1448 {1449 if (dev->init && dev->init(dev))
1450 {1451 /*1452 * It failed to come up. Unhook it.1453 */1454
1455 if (dev2 == NULL)
1456 dev_base = dev->next;
1457 else1458 dev2->next = dev->next;
1459 }1460 else1461 {1462 dev2 = dev;
1463 }1464 }1465 }