1 /* 2 * NET3 Protocol independant device support routines. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation; either version 7 * 2 of the License, or (at your option) any later version. 8 * 9 * Derived from the non IP parts of dev.c 1.0.19 10 * Authors: Ross Biro, <bir7@leland.Stanford.Edu> 11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 12 * Mark Evans, <evansmp@uhura.aston.ac.uk> 13 * 14 * Additional Authors: 15 * Florian la Roche <rzsfl@rz.uni-sb.de> 16 * Alan Cox <gw4pts@gw4pts.ampr.org> 17 * David Hinds <dhinds@allegro.stanford.edu> 18 * 19 * Changes: 20 * Alan Cox : device private ioctl copies fields back. 21 * Alan Cox : Transmit queue code does relevant stunts to 22 * keep the queue safe. 23 * 24 * Cleaned up and recommented by Alan Cox 2nd April 1994. I hope to have 25 * the rest as well commented in the end. 26 */ 27
28 /* 29 * A lot of these includes will be going walkies very soon 30 */ 31
32 #include <asm/segment.h>
33 #include <asm/system.h>
34 #include <asm/bitops.h>
35 #include <linux/config.h>
36 #include <linux/types.h>
37 #include <linux/kernel.h>
38 #include <linux/sched.h>
39 #include <linux/string.h>
40 #include <linux/mm.h>
41 #include <linux/socket.h>
42 #include <linux/sockios.h>
43 #include <linux/in.h>
44 #include <linux/errno.h>
45 #include <linux/interrupt.h>
46 #include <linux/if_ether.h>
47 #include <linux/inet.h>
48 #include <linux/netdevice.h>
49 #include <linux/etherdevice.h>
50 #include "ip.h"
51 #include "route.h"
52 #include <linux/skbuff.h>
53 #include "sock.h"
54 #include "arp.h"
55
56
57 /* 58 * The list of packet types we will receive (as opposed to discard) 59 * and the routines to invoke. 60 */ 61
62 structpacket_type *ptype_base = NULL;
63
64 /* 65 * Device drivers call our routines to queue packets here. We empty the 66 * queue in the bottom half handler. 67 */ 68
69 staticstructsk_buff_headbacklog =
70 { 71 (structsk_buff *)&backlog, (structsk_buff *)&backlog 72 #ifdefCONFIG_SKB_CHECK 73 ,SK_HEAD_SKB 74 #endif 75 };
76
77 /* 78 * We don't overdo the queue or we will thrash memory badly. 79 */ 80
81 staticintbacklog_size = 0;
82
83 /* 84 * The number of sockets open for 'all' protocol use. We have to 85 * know this to copy a buffer the correct number of times. 86 */ 87
88 staticintdev_nit=0;
89
90 /* 91 * Return the lesser of the two values. 92 */ 93
94 static__inline__unsignedlongmin(unsignedlonga, unsignedlongb)
/* */ 95 { 96 return (a < b)? a : b;
97 } 98
99
100 /****************************************************************************************** 101
102 Protocol management and registration routines 103
104 *******************************************************************************************/ 105
106
107 /* 108 * Add a protocol ID to the list. 109 */ 110
111 voiddev_add_pack(structpacket_type *pt)
/* */ 112 { 113 structpacket_type *p1;
114 pt->next = ptype_base;
115
116 /* 117 * Don't use copy counts on ETH_P_ALL. Instead keep a global 118 * count of number of these and use it and pt->copy to decide 119 * copies 120 */ 121
122 pt->copy=0; /* Assume we will not be copying the buffer before 123 * this routine gets it 124 */ 125
126 if(pt->type == htons(ETH_P_ALL))
127 dev_nit++; /* I'd like a /dev/nit too one day 8) */ 128 else 129 { 130 /* 131 * See if we need to copy it - that is another process also 132 * wishes to receive this type of packet. 133 */ 134 for (p1 = ptype_base; p1 != NULL; p1 = p1->next)
135 { 136 if (p1->type == pt->type)
137 { 138 pt->copy = 1; /* We will need to copy */ 139 break;
140 } 141 } 142 } 143
144 /* 145 * NIT taps must go at the end or net_bh will leak! 146 */ 147
148 if (pt->type == htons(ETH_P_ALL))
149 { 150 pt->next=NULL;
151 if(ptype_base==NULL)
152 ptype_base=pt;
153 else 154 { 155 /* 156 * Move to the end of the list 157 */ 158 for(p1=ptype_base;p1->next!=NULL;p1=p1->next);
159 /* 160 * Hook on the end 161 */ 162 p1->next=pt;
163 } 164 } 165 else 166 /* 167 * It goes on the start 168 */ 169 ptype_base = pt;
170 } 171
172
173 /* 174 * Remove a protocol ID from the list. 175 */ 176
177 voiddev_remove_pack(structpacket_type *pt)
/* */ 178 { 179 structpacket_type *lpt, *pt1;
180
181 /* 182 * Keep the count of nit (Network Interface Tap) sockets correct. 183 */ 184
185 if (pt->type == htons(ETH_P_ALL))
186 dev_nit--;
187
188 /* 189 * If we are first, just unhook us. 190 */ 191
192 if (pt == ptype_base)
193 { 194 ptype_base = pt->next;
195 return;
196 } 197
198 lpt = NULL;
199
200 /* 201 * This is harder. What we do is to walk the list of sockets 202 * for this type. We unhook the entry, and if there is a previous 203 * entry that is copying _and_ we are not copying, (ie we are the 204 * last entry for this type) then the previous one is set to 205 * non-copying as it is now the last. 206 */ 207 for (pt1 = ptype_base; pt1->next != NULL; pt1 = pt1->next)
208 { 209 if (pt1->next == pt )
210 { 211 cli();
212 if (!pt->copy && lpt)
213 lpt->copy = 0;
214 pt1->next = pt->next;
215 sti();
216 return;
217 } 218 if (pt1->next->type == pt->type && pt->type != htons(ETH_P_ALL))
219 lpt = pt1->next;
220 } 221 } 222
223 /***************************************************************************************** 224
225 Device Inteface Subroutines 226
227 ******************************************************************************************/ 228
229 /* 230 * Find an interface by name. 231 */ 232
233 structdevice *dev_get(char *name)
/* */ 234 { 235 structdevice *dev;
236
237 for (dev = dev_base; dev != NULL; dev = dev->next)
238 { 239 if (strcmp(dev->name, name) == 0)
240 return(dev);
241 } 242 return(NULL);
243 } 244
245
246 /* 247 * Prepare an interface for use. 248 */ 249
250 intdev_open(structdevice *dev)
/* */ 251 { 252 intret = 0;
253
254 /* 255 * Call device private open method 256 */ 257 if (dev->open)
258 ret = dev->open(dev);
259
260 /* 261 * If it went open OK then set the flags 262 */ 263
264 if (ret == 0)
265 dev->flags |= (IFF_UP | IFF_RUNNING);
266
267 return(ret);
268 } 269
270
271 /* 272 * Completely shutdown an interface. 273 * 274 * WARNING: Both because of the way the upper layers work (that can be fixed) 275 * and because of races during a close (that can't be fixed any other way) 276 * a device may be given things to transmit EVEN WHEN IT IS DOWN. The driver 277 * MUST cope with this (eg by freeing and dumping the frame). 278 */ 279
280 intdev_close(structdevice *dev)
/* */ 281 { 282 /* 283 * Only close a device if it is up. 284 */ 285
286 if (dev->flags != 0)
287 { 288 intct=0;
289 dev->flags = 0;
290 /* 291 * Call the device specific close. This cannot fail. 292 */ 293 if (dev->stop)
294 dev->stop(dev);
295 /* 296 * Delete the route to the device. 297 */ 298 #ifdefCONFIG_INET 299 ip_rt_flush(dev);
300 arp_device_down(dev);
301 #endif 302 #ifdefCONFIG_IPX 303 ipxrtr_device_down(dev);
304 #endif 305 /* 306 * Blank the IP addresses 307 */ 308 dev->pa_addr = 0;
309 dev->pa_dstaddr = 0;
310 dev->pa_brdaddr = 0;
311 dev->pa_mask = 0;
312 /* 313 * Purge any queued packets when we down the link 314 */ 315 while(ct<DEV_NUMBUFFS)
316 { 317 structsk_buff *skb;
318 while((skb=skb_dequeue(&dev->buffs[ct]))!=NULL)
319 if(skb->free)
320 kfree_skb(skb,FREE_WRITE);
321 ct++;
322 } 323 } 324 return(0);
325 } 326
327
328 /* 329 * Send (or queue for sending) a packet. 330 * 331 * IMPORTANT: When this is called to resend frames. The caller MUST 332 * already have locked the sk_buff. Apart from that we do the 333 * rest of the magic. 334 */ 335
336 voiddev_queue_xmit(structsk_buff *skb, structdevice *dev, intpri)
/* */ 337 { 338 unsignedlongflags;
339 intnitcount;
340 structpacket_type *ptype;
341 intwhere = 0; /* used to say if the packet should go */ 342 /* at the front or the back of the */ 343 /* queue - front is a retranmsit try */ 344
345 if (dev == NULL)
346 { 347 printk("dev.c: dev_queue_xmit: dev = NULL\n");
348 return;
349 } 350
351 if(pri>=0 && !skb_device_locked(skb))
352 skb_device_lock(skb); /* Shove a lock on the frame */ 353 #ifdefCONFIG_SLAVE_BALANCING 354 save_flags(flags);
355 cli();
356 if(dev->slave!=NULL && dev->slave->pkt_queue < dev->pkt_queue &&
357 (dev->slave->flags & IFF_UP))
358 dev=dev->slave;
359 restore_flags(flags);
360 #endif 361
362 IS_SKB(skb);
363
364 skb->dev = dev;
365
366 /* 367 * This just eliminates some race conditions, but not all... 368 */ 369
370 if (skb->next != NULL)
371 { 372 /* 373 * Make sure we haven't missed an interrupt. 374 */ 375 printk("dev_queue_xmit: worked around a missed interrupt\n");
376 dev->hard_start_xmit(NULL, dev);
377 return;
378 } 379
380 /* 381 * Negative priority is used to flag a frame that is being pulled from the 382 * queue front as a retransmit attempt. It therefore goes back on the queue 383 * start on a failure. 384 */ 385
386 if (pri < 0)
387 { 388 pri = -pri-1;
389 where = 1;
390 } 391
392 if (pri >= DEV_NUMBUFFS)
393 { 394 printk("bad priority in dev_queue_xmit.\n");
395 pri = 1;
396 } 397
398 /* 399 * If the address has not been resolved. Call the device header rebuilder. 400 * This can cover all protocols and technically not just ARP either. 401 */ 402
403 if (!skb->arp && dev->rebuild_header(skb->data, dev, skb->raddr, skb)) { 404 skb_device_unlock(skb); /* It's now safely on the arp queue */ 405 return;
406 } 407
408 save_flags(flags);
409 cli();
410 if (!where) { 411 #ifdefCONFIG_SLAVE_BALANCING 412 skb->in_dev_queue=1;
413 #endif 414 skb_queue_tail(dev->buffs + pri,skb);
415 skb_device_unlock(skb); /* Buffer is on the device queue and can be freed safely */ 416 skb = skb_dequeue(dev->buffs + pri);
417 skb_device_lock(skb); /* New buffer needs locking down */ 418 #ifdefCONFIG_SLAVE_BALANCING 419 skb->in_dev_queue=0;
420 #endif 421 } 422 restore_flags(flags);
423
424 /* copy outgoing packets to any sniffer packet handlers */ 425 for (nitcount = dev_nit, ptype = ptype_base; nitcount > 0 && ptype != NULL; ptype = ptype->next) { 426 if (ptype->type == htons(ETH_P_ALL)) { 427 structsk_buff *skb2;
428 if ((skb2 = skb_clone(skb, GFP_ATOMIC)) == NULL)
429 break;
430 ptype->func(skb2, skb->dev, ptype);
431 nitcount--;
432 } 433 } 434
435 if (dev->hard_start_xmit(skb, dev) == 0) { 436 /* 437 * Packet is now solely the responsibility of the driver 438 */ 439 #ifdefCONFIG_SLAVE_BALANCING 440 dev->pkt_queue--;
441 #endif 442 return;
443 } 444
445 /* 446 * Transmission failed, put skb back into a list. Once on the list its safe and 447 * no longer device locked (it can be freed safely from the device queue) 448 */ 449 cli();
450 #ifdefCONFIG_SLAVE_BALANCING 451 skb->in_dev_queue=1;
452 dev->pkt_queue++;
453 #endif 454 skb_device_unlock(skb);
455 skb_queue_head(dev->buffs + pri,skb);
456 restore_flags(flags);
457 } 458
459 /* 460 * Receive a packet from a device driver and queue it for the upper 461 * (protocol) levels. It always succeeds. This is the recommended 462 * interface to use. 463 */ 464
465 voidnetif_rx(structsk_buff *skb)
/* */ 466 { 467 staticintdropping = 0;
468 externstructtimevalxtime;
469
470 /* 471 * Any received buffers are un-owned and should be discarded 472 * when freed. These will be updated later as the frames get 473 * owners. 474 */ 475 skb->sk = NULL;
476 skb->free = 1;
477 if(skb->stamp.tv_sec==0)
478 skb->stamp = xtime;
479
480 /* 481 * Check that we aren't oevrdoing things. 482 */ 483
484 if (!backlog_size)
485 dropping = 0;
486 elseif (backlog_size > 100)
487 dropping = 1;
488
489 if (dropping)
490 { 491 kfree_skb(skb, FREE_READ);
492 return;
493 } 494
495 /* 496 * Add it to the "backlog" queue. 497 */ 498
499 IS_SKB(skb);
500 skb_queue_tail(&backlog,skb);
501 backlog_size++;
502
503 /* 504 * If any packet arrived, mark it for processing after the 505 * hardware interrupt returns. 506 */ 507
508 mark_bh(NET_BH);
509 return;
510 } 511
512
513 /* 514 * The old interface to fetch a packet from a device driver. 515 * This function is the base level entry point for all drivers that 516 * want to send a packet to the upper (protocol) levels. It takes 517 * care of de-multiplexing the packet to the various modules based 518 * on their protocol ID. 519 * 520 * Return values: 1 <- exit I can't do any more 521 * 0 <- feed me more (i.e. "done", "OK"). 522 * 523 * This function is OBSOLETE and should not be used by any new 524 * device. 525 */ 526
527 intdev_rint(unsignedchar *buff, longlen, intflags, structdevice *dev)
/* */ 528 { 529 staticintdropping = 0;
530 structsk_buff *skb = NULL;
531 unsignedchar *to;
532 intamount, left;
533 intlen2;
534
535 if (dev == NULL || buff == NULL || len <= 0)
536 return(1);
537
538 if (flags & IN_SKBUFF)
539 { 540 skb = (structsk_buff *) buff;
541 } 542 else 543 { 544 if (dropping)
545 { 546 if (skb_peek(&backlog) != NULL)
547 return(1);
548 printk("INET: dev_rint: no longer dropping packets.\n");
549 dropping = 0;
550 } 551
552 skb = alloc_skb(len, GFP_ATOMIC);
553 if (skb == NULL)
554 { 555 printk("dev_rint: packet dropped on %s (no memory) !\n",
556 dev->name);
557 dropping = 1;
558 return(1);
559 } 560
561 /* 562 * First we copy the packet into a buffer, and save it for later. We 563 * in effect handle the incoming data as if it were from a circular buffer 564 */ 565
566 to = skb->data;
567 left = len;
568
569 len2 = len;
570 while (len2 > 0)
571 { 572 amount = min(len2, (unsignedlong) dev->rmem_end -
573 (unsignedlong) buff);
574 memcpy(to, buff, amount);
575 len2 -= amount;
576 left -= amount;
577 buff += amount;
578 to += amount;
579 if ((unsignedlong) buff == dev->rmem_end)
580 buff = (unsignedchar *) dev->rmem_start;
581 } 582 } 583
584 /* 585 * Tag the frame and kick it to the proper receive routine 586 */ 587
588 skb->len = len;
589 skb->dev = dev;
590 skb->free = 1;
591
592 netif_rx(skb);
593 /* 594 * OK, all done. 595 */ 596 return(0);
597 } 598
599
600 /* 601 * This routine causes all interfaces to try to send some data. 602 */ 603
604 voiddev_transmit(void)
/* */ 605 { 606 structdevice *dev;
607
608 for (dev = dev_base; dev != NULL; dev = dev->next)
609 { 610 if (dev->flags != 0 && !dev->tbusy) { 611 /* 612 * Kick the device 613 */ 614 dev_tint(dev);
615 } 616 } 617 } 618
619
620 /********************************************************************************** 621
622 Receive Queue Processor 623 624 ***********************************************************************************/ 625
626 /* 627 * This is a single non-rentrant routine which takes the received packet 628 * queue and throws it at the networking layers in the hope that something 629 * useful will emerge. 630 */ 631
632 volatilecharin_bh = 0; /* Non-rentrant remember */ 633
634 intin_net_bh() /* Used by timer.c *//* */ 635 { 636 return(in_bh==0?0:1);
637 } 638
639 /* 640 * When we are called the queue is ready to grab, the interrupts are 641 * on and hardware can interrupt and queue to the receive queue a we 642 * run with no problems. 643 * This is run as a bottom half after an interrupt handler that does 644 * mark_bh(NET_BH); 645 */ 646
647 voidnet_bh(void *tmp)
/* */ 648 { 649 structsk_buff *skb;
650 structpacket_type *ptype;
651 unsignedshorttype;
652 unsignedcharflag = 0;
653 intnitcount;
654
655 /* 656 * Atomically check and mark our BUSY state. 657 */ 658
659 if (set_bit(1, (void*)&in_bh))
660 return;
661
662 /* 663 * Can we send anything now? We want to clear the 664 * decks for any more sends that get done as we 665 * process the input. 666 */ 667
668 dev_transmit();
669
670 /* 671 * Any data left to process. This may occur because a 672 * mark_bh() is done after we empty the queue including 673 * that from the device which does a mark_bh() just after 674 */ 675
676 cli();
677
678 /* 679 * While the queue is not empty 680 */ 681
682 while((skb=skb_dequeue(&backlog))!=NULL)
683 { 684 /* 685 * We have a packet. Therefore the queue has shrunk 686 */ 687 backlog_size--;
688
689 nitcount=dev_nit;
690 flag=0;
691 sti();
692
693 /* 694 * Bump the pointer to the next structure. 695 * This assumes that the basic 'skb' pointer points to 696 * the MAC header, if any (as indicated by its "length" 697 * field). Take care now! 698 */ 699
700 skb->h.raw = skb->data + skb->dev->hard_header_len;
701 skb->len -= skb->dev->hard_header_len;
702
703 /* 704 * Fetch the packet protocol ID. This is also quite ugly, as 705 * it depends on the protocol driver (the interface itself) to 706 * know what the type is, or where to get it from. The Ethernet 707 * interfaces fetch the ID from the two bytes in the Ethernet MAC 708 * header (the h_proto field in struct ethhdr), but other drivers 709 * may either use the ethernet ID's or extra ones that do not 710 * clash (eg ETH_P_AX25). We could set this before we queue the 711 * frame. In fact I may change this when I have time. 712 */ 713
714 type = skb->dev->type_trans(skb, skb->dev);
715
716 /* 717 * We got a packet ID. Now loop over the "known protocols" 718 * table (which is actually a linked list, but this will 719 * change soon if I get my way- FvK), and forward the packet 720 * to anyone who wants it. 721 * 722 * [FvK didn't get his way but he is right this ought to be 723 * hashed so we typically get a single hit. The speed cost 724 * here is minimal but no doubt adds up at the 4,000+ pkts/second 725 * rate we can hit flat out] 726 */ 727
728 for (ptype = ptype_base; ptype != NULL; ptype = ptype->next)
729 { 730 if (ptype->type == type || ptype->type == htons(ETH_P_ALL))
731 { 732 structsk_buff *skb2;
733
734 if (ptype->type == htons(ETH_P_ALL))
735 nitcount--;
736 if (ptype->copy || nitcount)
737 { 738 /* 739 * copy if we need to 740 */ 741 #ifdef OLD
742 skb2 = alloc_skb(skb->len, GFP_ATOMIC);
743 if (skb2 == NULL)
744 continue;
745 memcpy(skb2, skb, skb2->mem_len);
746 skb2->mem_addr = skb2;
747 skb2->h.raw = (unsignedchar *)(
748 (unsignedlong) skb2 +
749 (unsignedlong) skb->h.raw -
750 (unsignedlong) skb 751 );
752 skb2->free = 1;
753 #else 754 skb2=skb_clone(skb, GFP_ATOMIC);
755 if(skb2==NULL)
756 continue;
757 #endif 758 } 759 else 760 { 761 skb2 = skb;
762 } 763
764 /* 765 * Protocol located. 766 */ 767
768 flag = 1;
769
770 /* 771 * Kick the protocol handler. This should be fast 772 * and efficient code. 773 */ 774
775 ptype->func(skb2, skb->dev, ptype);
776 } 777 }/* End of protocol list loop */ 778
779 /* 780 * Has an unknown packet has been received ? 781 */ 782
783 if (!flag)
784 { 785 kfree_skb(skb, FREE_WRITE);
786 } 787
788 /* 789 * Again, see if we can transmit anything now. 790 */ 791
792 dev_transmit();
793 cli();
794 }/* End of queue loop */ 795
796 /* 797 * We have emptied the queue 798 */ 799
800 in_bh = 0;
801 sti();
802
803 /* 804 * One last output flush. 805 */ 806
807 dev_transmit();
808 } 809
810
811 /* 812 * This routine is called when an device driver (i.e. an 813 * interface) is ready to transmit a packet. 814 */ 815
816 voiddev_tint(structdevice *dev)
/* */ 817 { 818 inti;
819 structsk_buff *skb;
820 unsignedlongflags;
821
822 save_flags(flags);
823 /* 824 * Work the queues in priority order 825 */ 826
827 for(i = 0;i < DEV_NUMBUFFS; i++)
828 { 829 /* 830 * Pull packets from the queue 831 */ 832
833
834 cli();
835 while((skb=skb_dequeue(&dev->buffs[i]))!=NULL)
836 { 837 /* 838 * Stop anyone freeing the buffer while we retransmit it 839 */ 840 skb_device_lock(skb);
841 restore_flags(flags);
842 /* 843 * Feed them to the output stage and if it fails 844 * indicate they re-queue at the front. 845 */ 846 dev_queue_xmit(skb,dev,-i - 1);
847 /* 848 * If we can take no more then stop here. 849 */ 850 if (dev->tbusy)
851 return;
852 cli();
853 } 854 } 855 restore_flags(flags);
856 } 857
858
859 /* 860 * Perform a SIOCGIFCONF call. This structure will change 861 * size shortly, and there is nothing I can do about it. 862 * Thus we will need a 'compatibility mode'. 863 */ 864
865 staticintdev_ifconf(char *arg)
/* */ 866 { 867 structifconfifc;
868 structifreqifr;
869 structdevice *dev;
870 char *pos;
871 intlen;
872 interr;
873
874 /* 875 * Fetch the caller's info block. 876 */ 877
878 err=verify_area(VERIFY_WRITE, arg, sizeof(structifconf));
879 if(err)
880 returnerr;
881 memcpy_fromfs(&ifc, arg, sizeof(structifconf));
882 len = ifc.ifc_len;
883 pos = ifc.ifc_buf;
884
885 /* 886 * We now walk the device list filling each active device 887 * into the array. 888 */ 889
890 err=verify_area(VERIFY_WRITE,pos,len);
891 if(err)
892 returnerr;
893
894 /* 895 * Loop over the interfaces, and write an info block for each. 896 */ 897
898 for (dev = dev_base; dev != NULL; dev = dev->next)
899 { 900 if(!(dev->flags & IFF_UP)) /* Downed devices don't count */ 901 continue;
902 memset(&ifr, 0, sizeof(structifreq));
903 strcpy(ifr.ifr_name, dev->name);
904 (*(structsockaddr_in *) &ifr.ifr_addr).sin_family = dev->family;
905 (*(structsockaddr_in *) &ifr.ifr_addr).sin_addr.s_addr = dev->pa_addr;
906
907 /* 908 * Write this block to the caller's space. 909 */ 910
911 memcpy_tofs(pos, &ifr, sizeof(structifreq));
912 pos += sizeof(structifreq);
913 len -= sizeof(structifreq);
914
915 /* 916 * Have we run out of space here ? 917 */ 918
919 if (len < sizeof(structifreq))
920 break;
921 } 922
923 /* 924 * All done. Write the updated control block back to the caller. 925 */ 926
927 ifc.ifc_len = (pos - ifc.ifc_buf);
928 ifc.ifc_req = (structifreq *) ifc.ifc_buf;
929 memcpy_tofs(arg, &ifc, sizeof(structifconf));
930
931 /* 932 * Report how much was filled in 933 */ 934
935 return(pos - arg);
936 } 937
938
939 /* 940 * This is invoked by the /proc filesystem handler to display a device 941 * in detail. 942 */ 943
944 staticintsprintf_stats(char *buffer, structdevice *dev)
/* */ 945 { 946 structenet_statistics *stats = (dev->get_stats ? dev->get_stats(dev): NULL);
947 intsize;
948
949 if (stats)
950 size = sprintf(buffer, "%6s:%7d %4d %4d %4d %4d %8d %4d %4d %4d %5d %4d\n",
951 dev->name,
952 stats->rx_packets, stats->rx_errors,
953 stats->rx_dropped + stats->rx_missed_errors,
954 stats->rx_fifo_errors,
955 stats->rx_length_errors + stats->rx_over_errors 956 + stats->rx_crc_errors + stats->rx_frame_errors,
957 stats->tx_packets, stats->tx_errors, stats->tx_dropped,
958 stats->tx_fifo_errors, stats->collisions,
959 stats->tx_carrier_errors + stats->tx_aborted_errors 960 + stats->tx_window_errors + stats->tx_heartbeat_errors);
961 else 962 size = sprintf(buffer, "%6s: No statistics available.\n", dev->name);
963
964 returnsize;
965 } 966
967 /* 968 * Called from the PROCfs module. This now uses the new arbitary sized /proc/net interface 969 * to create /proc/net/dev 970 */ 971
972 intdev_get_info(char *buffer, char **start, off_toffset, intlength)
/* */ 973 { 974 intlen=0;
975 off_tbegin=0;
976 off_tpos=0;
977 intsize;
978
979 structdevice *dev;
980
981
982 size = sprintf(buffer, "Inter-| Receive | Transmit\n"
983 " face |packets errs drop fifo frame|packets errs drop fifo colls carrier\n");
984
985 pos+=size;
986 len+=size;
987
988
989 for (dev = dev_base; dev != NULL; dev = dev->next)
990 { 991 size = sprintf_stats(buffer+len, dev);
992 len+=size;
993 pos=begin+len;
994
995 if(pos<offset)
996 { 997 len=0;
998 begin=pos;
999 }1000 if(pos>offset+length)
1001 break;
1002 }1003
1004 *start=buffer+(offset-begin); /* Start of wanted data */1005 len-=(offset-begin); /* Start slop */1006 if(len>length)
1007 len=length; /* Ending slop */1008 returnlen;
1009 }1010
1011
1012 /*1013 * This checks bitmasks for the ioctl calls for devices.1014 */1015
1016 staticinlineintbad_mask(unsignedlongmask, unsignedlongaddr)
/* */1017 {1018 if (addr & (mask = ~mask))
1019 return 1;
1020 mask = ntohl(mask);
1021 if (mask & (mask+1))
1022 return 1;
1023 return 0;
1024 }1025
1026 /*1027 * Perform the SIOCxIFxxx calls. 1028 *1029 * The socket layer has seen an ioctl the address family thinks is1030 * for the device. At this point we get invoked to make a decision1031 */1032
1033 staticintdev_ifsioc(void *arg, unsignedintgetset)
/* */1034 {1035 structifreqifr;
1036 structdevice *dev;
1037 intret;
1038
1039 /*1040 * Fetch the caller's info block into kernel space1041 */1042
1043 interr=verify_area(VERIFY_WRITE, arg, sizeof(structifreq));
1044 if(err)
1045 returnerr;
1046
1047 memcpy_fromfs(&ifr, arg, sizeof(structifreq));
1048
1049 /*1050 * See which interface the caller is talking about. 1051 */1052
1053 if ((dev = dev_get(ifr.ifr_name)) == NULL)
1054 return(-ENODEV);
1055
1056 switch(getset)
1057 {1058 caseSIOCGIFFLAGS: /* Get interface flags */1059 ifr.ifr_flags = dev->flags;
1060 memcpy_tofs(arg, &ifr, sizeof(structifreq));
1061 ret = 0;
1062 break;
1063 caseSIOCSIFFLAGS: /* Set interface flags */1064 {1065 intold_flags = dev->flags;
1066 #ifdefCONFIG_SLAVE_BALANCING1067 if(dev->flags&IFF_SLAVE)
1068 return -EBUSY;
1069 #endif1070 dev->flags = ifr.ifr_flags & (
1071 IFF_UP | IFF_BROADCAST | IFF_DEBUG | IFF_LOOPBACK |
1072 IFF_POINTOPOINT | IFF_NOTRAILERS | IFF_RUNNING |
1073 IFF_NOARP | IFF_PROMISC | IFF_ALLMULTI | IFF_SLAVE | IFF_MASTER);
1074 #ifdefCONFIG_SLAVE_BALANCING1075 if(!(dev->flags&IFF_MASTER) && dev->slave)
1076 {1077 dev->slave->flags&=~IFF_SLAVE;
1078 dev->slave=NULL;
1079 }1080 #endif1081
1082 /*1083 * Has promiscuous mode been turned off1084 */1085 if ( (old_flags & IFF_PROMISC) && ((dev->flags & IFF_PROMISC) == 0))
1086 dev->set_multicast_list(dev,0,NULL);
1087
1088 /*1089 * Has it been turned on1090 */1091
1092 if ( (dev->flags & IFF_PROMISC) && ((old_flags & IFF_PROMISC) == 0))
1093 dev->set_multicast_list(dev,-1,NULL);
1094
1095 /*1096 * Have we downed the interface1097 */1098
1099 if ((old_flags & IFF_UP) && ((dev->flags & IFF_UP) == 0))
1100 {1101 ret = dev_close(dev);
1102 }1103 else1104 {1105 /*1106 * Have we upped the interface 1107 */1108
1109 ret = (! (old_flags & IFF_UP) && (dev->flags & IFF_UP))
1110 ? dev_open(dev) : 0;
1111 /* 1112 * Check the flags.1113 */1114 if(ret<0)
1115 dev->flags&=~IFF_UP; /* Didnt open so down the if */1116 }1117 }1118 break;
1119
1120 caseSIOCGIFADDR: /* Get interface address (and family) */1121 (*(structsockaddr_in *)
1122 &ifr.ifr_addr).sin_addr.s_addr = dev->pa_addr;
1123 (*(structsockaddr_in *)
1124 &ifr.ifr_addr).sin_family = dev->family;
1125 (*(structsockaddr_in *)
1126 &ifr.ifr_addr).sin_port = 0;
1127 memcpy_tofs(arg, &ifr, sizeof(structifreq));
1128 ret = 0;
1129 break;
1130
1131 caseSIOCSIFADDR: /* Set interface address (and family) */1132 dev->pa_addr = (*(structsockaddr_in *)
1133 &ifr.ifr_addr).sin_addr.s_addr;
1134 dev->family = ifr.ifr_addr.sa_family;
1135
1136 #ifdefCONFIG_INET1137 /* This is naughty. When net-032e comes out It wants moving into the net0321138 code not the kernel. Till then it can sit here (SIGH) */1139 dev->pa_mask = ip_get_mask(dev->pa_addr);
1140 #endif1141 dev->pa_brdaddr = dev->pa_addr | ~dev->pa_mask;
1142 ret = 0;
1143 break;
1144
1145 caseSIOCGIFBRDADDR: /* Get the broadcast address */1146 (*(structsockaddr_in *)
1147 &ifr.ifr_broadaddr).sin_addr.s_addr = dev->pa_brdaddr;
1148 (*(structsockaddr_in *)
1149 &ifr.ifr_broadaddr).sin_family = dev->family;
1150 (*(structsockaddr_in *)
1151 &ifr.ifr_broadaddr).sin_port = 0;
1152 memcpy_tofs(arg, &ifr, sizeof(structifreq));
1153 ret = 0;
1154 break;
1155
1156 caseSIOCSIFBRDADDR: /* Set the broadcast address */1157 dev->pa_brdaddr = (*(structsockaddr_in *)
1158 &ifr.ifr_broadaddr).sin_addr.s_addr;
1159 ret = 0;
1160 break;
1161
1162 caseSIOCGIFDSTADDR: /* Get the destination address (for point-to-point links) */1163 (*(structsockaddr_in *)
1164 &ifr.ifr_dstaddr).sin_addr.s_addr = dev->pa_dstaddr;
1165 (*(structsockaddr_in *)
1166 &ifr.ifr_broadaddr).sin_family = dev->family;
1167 (*(structsockaddr_in *)
1168 &ifr.ifr_broadaddr).sin_port = 0;
1169 memcpy_tofs(arg, &ifr, sizeof(structifreq));
1170 ret = 0;
1171 break;
1172
1173 caseSIOCSIFDSTADDR: /* Set the destination address (for point-to-point links) */1174 dev->pa_dstaddr = (*(structsockaddr_in *)
1175 &ifr.ifr_dstaddr).sin_addr.s_addr;
1176 ret = 0;
1177 break;
1178
1179 caseSIOCGIFNETMASK: /* Get the netmask for the interface */1180 (*(structsockaddr_in *)
1181 &ifr.ifr_netmask).sin_addr.s_addr = dev->pa_mask;
1182 (*(structsockaddr_in *)
1183 &ifr.ifr_netmask).sin_family = dev->family;
1184 (*(structsockaddr_in *)
1185 &ifr.ifr_netmask).sin_port = 0;
1186 memcpy_tofs(arg, &ifr, sizeof(structifreq));
1187 ret = 0;
1188 break;
1189
1190 caseSIOCSIFNETMASK: /* Set the netmask for the interface */1191 {1192 unsignedlongmask = (*(structsockaddr_in *)
1193 &ifr.ifr_netmask).sin_addr.s_addr;
1194 ret = -EINVAL;
1195 /*1196 * The mask we set must be legal.1197 */1198 if (bad_mask(mask,0))
1199 break;
1200 dev->pa_mask = mask;
1201 ret = 0;
1202 }1203 break;
1204
1205 caseSIOCGIFMETRIC: /* Get the metric on the inteface (currently unused) */1206
1207 ifr.ifr_metric = dev->metric;
1208 memcpy_tofs(arg, &ifr, sizeof(structifreq));
1209 ret = 0;
1210 break;
1211
1212 caseSIOCSIFMETRIC: /* Set the metric on the interface (currently unused) */1213 dev->metric = ifr.ifr_metric;
1214 ret = 0;
1215 break;
1216
1217 caseSIOCGIFMTU: /* Get the MTU of a device */1218 ifr.ifr_mtu = dev->mtu;
1219 memcpy_tofs(arg, &ifr, sizeof(structifreq));
1220 ret = 0;
1221 break;
1222
1223 caseSIOCSIFMTU: /* Set the MTU of a device */1224
1225 /*1226 * MTU must be positive and under the page size problem1227 */1228
1229 if(ifr.ifr_mtu<1 || ifr.ifr_mtu>3800)
1230 return -EINVAL;
1231 dev->mtu = ifr.ifr_mtu;
1232 ret = 0;
1233 break;
1234
1235 caseSIOCGIFMEM: /* Get the per device memory space. We can add this but currently1236 do not support it */1237 printk("NET: ioctl(SIOCGIFMEM, 0x%08X)\n", (int)arg);
1238 ret = -EINVAL;
1239 break;
1240
1241 caseSIOCSIFMEM: /* Set the per device memory buffer space. Not applicable in our case */1242 printk("NET: ioctl(SIOCSIFMEM, 0x%08X)\n", (int)arg);
1243 ret = -EINVAL;
1244 break;
1245
1246 caseOLD_SIOCGIFHWADDR: /* Get the hardware address. This will change and SIFHWADDR will be added */1247 memcpy(ifr.old_ifr_hwaddr,dev->dev_addr, MAX_ADDR_LEN);
1248 memcpy_tofs(arg,&ifr,sizeof(structifreq));
1249 ret=0;
1250 break;
1251
1252 caseSIOCGIFHWADDR:
1253 memcpy(ifr.ifr_hwaddr.sa_data,dev->dev_addr, MAX_ADDR_LEN);
1254 ifr.ifr_hwaddr.sa_family=dev->type;
1255 memcpy_tofs(arg,&ifr,sizeof(structifreq));
1256 ret=0;
1257 break;
1258
1259 caseSIOCSIFHWADDR:
1260 if(dev->set_mac_address==NULL)
1261 return -EOPNOTSUPP;
1262 if(ifr.ifr_hwaddr.sa_family!=dev->type)
1263 return -EINVAL;
1264 ret=dev->set_mac_address(dev,ifr.ifr_hwaddr.sa_data);
1265 break;
1266
1267 caseSIOCDEVPRIVATE:
1268 if(dev->do_ioctl==NULL)
1269 return -EOPNOTSUPP;
1270 ret=dev->do_ioctl(dev, &ifr);
1271 memcpy_tofs(arg,&ifr,sizeof(structifreq));
1272 break;
1273
1274 caseSIOCGIFMAP:
1275 ifr.ifr_map.mem_start=dev->mem_start;
1276 ifr.ifr_map.mem_end=dev->mem_end;
1277 ifr.ifr_map.base_addr=dev->base_addr;
1278 ifr.ifr_map.irq=dev->irq;
1279 ifr.ifr_map.dma=dev->dma;
1280 ifr.ifr_map.port=dev->if_port;
1281 memcpy_tofs(arg,&ifr,sizeof(structifreq));
1282 ret=0;
1283 break;
1284
1285 caseSIOCSIFMAP:
1286 if(dev->set_config==NULL)
1287 return -EOPNOTSUPP;
1288 returndev->set_config(dev,&ifr.ifr_map);
1289
1290 caseSIOCGIFSLAVE:
1291 #ifdefCONFIG_SLAVE_BALANCING1292 if(dev->slave==NULL)
1293 return -ENOENT;
1294 strncpy(ifr.ifr_name,dev->name,sizeof(ifr.ifr_name));
1295 memcpy_tofs(arg,&ifr,sizeof(structifreq));
1296 ret=0;
1297 #else1298 return -ENOENT;
1299 #endif1300 break;
1301 #ifdefCONFIG_SLAVE_BALANCING1302 caseSIOCSIFSLAVE:
1303 {1304
1305 /*1306 * Fun game. Get the device up and the flags right without1307 * letting some scummy user confuse us.1308 */1309 unsignedlongflags;
1310 structdevice *slave=dev_get(ifr.ifr_slave);
1311 save_flags(flags);
1312 if(slave==NULL)
1313 {1314 return -ENODEV;
1315 }1316 cli();
1317 if((slave->flags&(IFF_UP|IFF_RUNNING))!=(IFF_UP|IFF_RUNNING))
1318 {1319 restore_flags(flags);
1320 return -EINVAL;
1321 }1322 if(dev->flags&IFF_SLAVE)
1323 {1324 restore_flags(flags);
1325 return -EBUSY;
1326 }1327 if(dev->slave!=NULL)
1328 {1329 restore_flags(flags);
1330 return -EBUSY;
1331 }1332 if(slave->flags&IFF_SLAVE)
1333 {1334 restore_flags(flags);
1335 return -EBUSY;
1336 }1337 dev->slave=slave;
1338 slave->flags|=IFF_SLAVE;
1339 dev->flags|=IFF_MASTER;
1340 restore_flags(flags);
1341 ret=0;
1342 }1343 break;
1344 #endif1345 /*1346 * Unknown ioctl1347 */1348
1349 default:
1350 ret = -EINVAL;
1351 }1352 return(ret);
1353 }1354
1355
1356 /*1357 * This function handles all "interface"-type I/O control requests. The actual1358 * 'doing' part of this is dev_ifsioc above.1359 */1360
1361 intdev_ioctl(unsignedintcmd, void *arg)
/* */1362 {1363 switch(cmd)
1364 {1365 /*1366 * The old old setup ioctl. Even its name and this entry will soon be1367 * just so much ionization on a backup tape.1368 */1369
1370 caseSIOCGIFCONF:
1371 (void) dev_ifconf((char *) arg);
1372 return 0;
1373
1374 /*1375 * Ioctl calls that can be done by all.1376 */1377
1378 caseSIOCGIFFLAGS:
1379 caseSIOCGIFADDR:
1380 caseSIOCGIFDSTADDR:
1381 caseSIOCGIFBRDADDR:
1382 caseSIOCGIFNETMASK:
1383 caseSIOCGIFMETRIC:
1384 caseSIOCGIFMTU:
1385 caseSIOCGIFMEM:
1386 caseSIOCGIFHWADDR:
1387 caseSIOCSIFHWADDR:
1388 caseOLD_SIOCGIFHWADDR:
1389 caseSIOCGIFSLAVE:
1390 caseSIOCGIFMAP:
1391 returndev_ifsioc(arg, cmd);
1392
1393 /*1394 * Ioctl calls requiring the power of a superuser1395 */1396
1397 caseSIOCSIFFLAGS:
1398 caseSIOCSIFADDR:
1399 caseSIOCSIFDSTADDR:
1400 caseSIOCSIFBRDADDR:
1401 caseSIOCSIFNETMASK:
1402 caseSIOCSIFMETRIC:
1403 caseSIOCSIFMTU:
1404 caseSIOCSIFMEM:
1405 caseSIOCSIFMAP:
1406 caseSIOCSIFSLAVE:
1407 caseSIOCDEVPRIVATE:
1408 if (!suser())
1409 return -EPERM;
1410 returndev_ifsioc(arg, cmd);
1411
1412 caseSIOCSIFLINK:
1413 return -EINVAL;
1414
1415 /*1416 * Unknown ioctl.1417 */1418
1419 default:
1420 return -EINVAL;
1421 }1422 }1423
1424
1425 /*1426 * Initialize the DEV module. At boot time this walks the device list and1427 * unhooks any devices that fail to initialise (normally hardware not 1428 * present) and leaves us with a valid list of present and active devices.1429 *1430 * The PCMICA code may need to change this a little, and add a pair1431 * of register_inet_device() unregister_inet_device() calls. This will be1432 * needed for ethernet as modules support.1433 */1434
1435 voiddev_init(void)
/* */1436 {1437 structdevice *dev, *dev2;
1438
1439 /*1440 * Add the devices.1441 * If the call to dev->init fails, the dev is removed1442 * from the chain disconnecting the device until the1443 * next reboot.1444 */1445
1446 dev2 = NULL;
1447 for (dev = dev_base; dev != NULL; dev=dev->next)
1448 {1449 if (dev->init && dev->init(dev))
1450 {1451 /*1452 * It failed to come up. Unhook it.1453 */1454
1455 if (dev2 == NULL)
1456 dev_base = dev->next;
1457 else1458 dev2->next = dev->next;
1459 }1460 else1461 {1462 dev2 = dev;
1463 }1464 }1465 }