1 /* 2 * NET3 Protocol independant device support routines. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation; either version 7 * 2 of the License, or (at your option) any later version. 8 * 9 * Derived from the non IP parts of dev.c 1.0.19 10 * Authors: Ross Biro, <bir7@leland.Stanford.Edu> 11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 12 * Mark Evans, <evansmp@uhura.aston.ac.uk> 13 * 14 * Additional Authors: 15 * Florian la Roche <rzsfl@rz.uni-sb.de> 16 * Alan Cox <gw4pts@gw4pts.ampr.org> 17 * David Hinds <dhinds@allegro.stanford.edu> 18 * 19 * Changes: 20 * Alan Cox : device private ioctl copies fields back. 21 * Alan Cox : Transmit queue code does relevant stunts to 22 * keep the queue safe. 23 * 24 * Cleaned up and recommented by Alan Cox 2nd April 1994. I hope to have 25 * the rest as well commented in the end. 26 */ 27
28 /* 29 * A lot of these includes will be going walkies very soon 30 */ 31
32 #include <asm/segment.h>
33 #include <asm/system.h>
34 #include <asm/bitops.h>
35 #include <linux/config.h>
36 #include <linux/types.h>
37 #include <linux/kernel.h>
38 #include <linux/sched.h>
39 #include <linux/string.h>
40 #include <linux/mm.h>
41 #include <linux/socket.h>
42 #include <linux/sockios.h>
43 #include <linux/in.h>
44 #include <linux/errno.h>
45 #include <linux/interrupt.h>
46 #include <linux/if_ether.h>
47 #include <linux/inet.h>
48 #include <linux/netdevice.h>
49 #include <linux/etherdevice.h>
50 #include "ip.h"
51 #include "route.h"
52 #include <linux/skbuff.h>
53 #include "sock.h"
54 #include "arp.h"
55
56
57 /* 58 * The list of packet types we will receive (as opposed to discard) 59 * and the routines to invoke. 60 */ 61
62 structpacket_type *ptype_base = NULL;
63
64 /* 65 * Device drivers call our routines to queue packets here. We empty the 66 * queue in the bottom half handler. 67 */ 68
69 staticstructsk_buff_headbacklog =
70 { 71 (structsk_buff *)&backlog, (structsk_buff *)&backlog 72 #ifdefCONFIG_SKB_CHECK 73 ,SK_HEAD_SKB 74 #endif 75 };
76
77 /* 78 * We don't overdo the queue or we will thrash memory badly. 79 */ 80
81 staticintbacklog_size = 0;
82
83 /* 84 * The number of sockets open for 'all' protocol use. We have to 85 * know this to copy a buffer the correct number of times. 86 */ 87
88 staticintdev_nit=0;
89
90 /* 91 * Return the lesser of the two values. 92 */ 93
94 static__inline__unsignedlongmin(unsignedlonga, unsignedlongb)
/* */ 95 { 96 return (a < b)? a : b;
97 } 98
99
100 /****************************************************************************************** 101
102 Protocol management and registration routines 103
104 *******************************************************************************************/ 105
106
107 /* 108 * Add a protocol ID to the list. 109 */ 110
111 voiddev_add_pack(structpacket_type *pt)
/* */ 112 { 113 structpacket_type *p1;
114 pt->next = ptype_base;
115
116 /* 117 * Don't use copy counts on ETH_P_ALL. Instead keep a global 118 * count of number of these and use it and pt->copy to decide 119 * copies 120 */ 121
122 pt->copy=0; /* Assume we will not be copying the buffer before 123 * this routine gets it 124 */ 125
126 if(pt->type == htons(ETH_P_ALL))
127 dev_nit++; /* I'd like a /dev/nit too one day 8) */ 128 else 129 { 130 /* 131 * See if we need to copy it - that is another process also 132 * wishes to receive this type of packet. 133 */ 134 for (p1 = ptype_base; p1 != NULL; p1 = p1->next)
135 { 136 if (p1->type == pt->type)
137 { 138 pt->copy = 1; /* We will need to copy */ 139 break;
140 } 141 } 142 } 143
144 /* 145 * NIT taps must go at the end or net_bh will leak! 146 */ 147
148 if (pt->type == htons(ETH_P_ALL))
149 { 150 pt->next=NULL;
151 if(ptype_base==NULL)
152 ptype_base=pt;
153 else 154 { 155 /* 156 * Move to the end of the list 157 */ 158 for(p1=ptype_base;p1->next!=NULL;p1=p1->next);
159 /* 160 * Hook on the end 161 */ 162 p1->next=pt;
163 } 164 } 165 else 166 /* 167 * It goes on the start 168 */ 169 ptype_base = pt;
170 } 171
172
173 /* 174 * Remove a protocol ID from the list. 175 */ 176
177 voiddev_remove_pack(structpacket_type *pt)
/* */ 178 { 179 structpacket_type *lpt, *pt1;
180
181 /* 182 * Keep the count of nit (Network Interface Tap) sockets correct. 183 */ 184
185 if (pt->type == htons(ETH_P_ALL))
186 dev_nit--;
187
188 /* 189 * If we are first, just unhook us. 190 */ 191
192 if (pt == ptype_base)
193 { 194 ptype_base = pt->next;
195 return;
196 } 197
198 lpt = NULL;
199
200 /* 201 * This is harder. What we do is to walk the list of sockets 202 * for this type. We unhook the entry, and if there is a previous 203 * entry that is copying _and_ we are not copying, (ie we are the 204 * last entry for this type) then the previous one is set to 205 * non-copying as it is now the last. 206 */ 207 for (pt1 = ptype_base; pt1->next != NULL; pt1 = pt1->next)
208 { 209 if (pt1->next == pt )
210 { 211 cli();
212 if (!pt->copy && lpt)
213 lpt->copy = 0;
214 pt1->next = pt->next;
215 sti();
216 return;
217 } 218 if (pt1->next->type == pt->type && pt->type != htons(ETH_P_ALL))
219 lpt = pt1->next;
220 } 221 } 222
223 /***************************************************************************************** 224
225 Device Inteface Subroutines 226
227 ******************************************************************************************/ 228
229 /* 230 * Find an interface by name. 231 */ 232
233 structdevice *dev_get(char *name)
/* */ 234 { 235 structdevice *dev;
236
237 for (dev = dev_base; dev != NULL; dev = dev->next)
238 { 239 if (strcmp(dev->name, name) == 0)
240 return(dev);
241 } 242 return(NULL);
243 } 244
245
246 /* 247 * Prepare an interface for use. 248 */ 249
250 intdev_open(structdevice *dev)
/* */ 251 { 252 intret = 0;
253
254 /* 255 * Call device private open method 256 */ 257 if (dev->open)
258 ret = dev->open(dev);
259
260 /* 261 * If it went open OK then set the flags 262 */ 263
264 if (ret == 0)
265 dev->flags |= (IFF_UP | IFF_RUNNING);
266
267 return(ret);
268 } 269
270
271 /* 272 * Completely shutdown an interface. 273 * 274 * WARNING: Both because of the way the upper layers work (that can be fixed) 275 * and because of races during a close (that can't be fixed any other way) 276 * a device may be given things to transmit EVEN WHEN IT IS DOWN. The driver 277 * MUST cope with this (eg by freeing and dumping the frame). 278 */ 279
280 intdev_close(structdevice *dev)
/* */ 281 { 282 /* 283 * Only close a device if it is up. 284 */ 285
286 if (dev->flags != 0)
287 { 288 intct=0;
289 dev->flags = 0;
290 /* 291 * Call the device specific close. This cannot fail. 292 */ 293 if (dev->stop)
294 dev->stop(dev);
295 /* 296 * Delete the route to the device. 297 */ 298 #ifdefCONFIG_INET 299 ip_rt_flush(dev);
300 arp_device_down(dev);
301 #endif 302 #ifdefCONFIG_IPX 303 ipxrtr_device_down(dev);
304 #endif 305 /* 306 * Blank the IP addresses 307 */ 308 dev->pa_addr = 0;
309 dev->pa_dstaddr = 0;
310 dev->pa_brdaddr = 0;
311 dev->pa_mask = 0;
312 /* 313 * Purge any queued packets when we down the link 314 */ 315 while(ct<DEV_NUMBUFFS)
316 { 317 structsk_buff *skb;
318 while((skb=skb_dequeue(&dev->buffs[ct]))!=NULL)
319 if(skb->free)
320 kfree_skb(skb,FREE_WRITE);
321 ct++;
322 } 323 } 324 return(0);
325 } 326
327
328 /* 329 * Send (or queue for sending) a packet. 330 * 331 * IMPORTANT: When this is called to resend frames. The caller MUST 332 * already have locked the sk_buff. Apart from that we do the 333 * rest of the magic. 334 */ 335
336 voiddev_queue_xmit(structsk_buff *skb, structdevice *dev, intpri)
/* */ 337 { 338 unsignedlongflags;
339 intnitcount;
340 structpacket_type *ptype;
341 intwhere = 0; /* used to say if the packet should go */ 342 /* at the front or the back of the */ 343 /* queue - front is a retranmsit try */ 344
345 if (dev == NULL)
346 { 347 printk("dev.c: dev_queue_xmit: dev = NULL\n");
348 return;
349 } 350
351 if(pri>=0 && !skb_device_locked(skb))
352 skb_device_lock(skb); /* Shove a lock on the frame */ 353 #ifdefCONFIG_SLAVE_BALANCING 354 save_flags(flags);
355 cli();
356 if(dev->slave!=NULL && dev->slave->pkt_queue < dev->pkt_queue &&
357 (dev->slave->flags & IFF_UP))
358 dev=dev->slave;
359 restore_flags(flags);
360 #endif 361
362 IS_SKB(skb);
363
364 skb->dev = dev;
365
366 /* 367 * This just eliminates some race conditions, but not all... 368 */ 369
370 if (skb->next != NULL)
371 { 372 /* 373 * Make sure we haven't missed an interrupt. 374 */ 375 printk("dev_queue_xmit: worked around a missed interrupt\n");
376 dev->hard_start_xmit(NULL, dev);
377 return;
378 } 379
380 /* 381 * Negative priority is used to flag a frame that is being pulled from the 382 * queue front as a retransmit attempt. It therefore goes back on the queue 383 * start on a failure. 384 */ 385
386 if (pri < 0)
387 { 388 pri = -pri-1;
389 where = 1;
390 } 391
392 if (pri >= DEV_NUMBUFFS)
393 { 394 printk("bad priority in dev_queue_xmit.\n");
395 pri = 1;
396 } 397
398 /* 399 * If the address has not been resolved. Call the device header rebuilder. 400 * This can cover all protocols and technically not just ARP either. 401 */ 402
403 if (!skb->arp && dev->rebuild_header(skb->data, dev, skb->raddr, skb)) { 404 skb_device_unlock(skb); /* It's now safely on the arp queue */ 405 return;
406 } 407
408 save_flags(flags);
409 cli();
410 if (!where) { 411 #ifdefCONFIG_SLAVE_BALANCING 412 skb->in_dev_queue=1;
413 #endif 414 skb_queue_tail(dev->buffs + pri,skb);
415 skb_device_unlock(skb); /* Buffer is on the device queue and can be freed safely */ 416 skb = skb_dequeue(dev->buffs + pri);
417 skb_device_lock(skb); /* New buffer needs locking down */ 418 #ifdefCONFIG_SLAVE_BALANCING 419 skb->in_dev_queue=0;
420 #endif 421 } 422 restore_flags(flags);
423
424 /* copy outgoing packets to any sniffer packet handlers */ 425 if(!where)
426 { 427 for (nitcount = dev_nit, ptype = ptype_base; nitcount > 0 && ptype != NULL; ptype = ptype->next)
428 { 429 if (ptype->type == htons(ETH_P_ALL)) { 430 structsk_buff *skb2;
431 if ((skb2 = skb_clone(skb, GFP_ATOMIC)) == NULL)
432 break;
433 ptype->func(skb2, skb->dev, ptype);
434 nitcount--;
435 } 436 } 437 } 438 if (dev->hard_start_xmit(skb, dev) == 0) { 439 /* 440 * Packet is now solely the responsibility of the driver 441 */ 442 #ifdefCONFIG_SLAVE_BALANCING 443 dev->pkt_queue--;
444 #endif 445 return;
446 } 447
448 /* 449 * Transmission failed, put skb back into a list. Once on the list its safe and 450 * no longer device locked (it can be freed safely from the device queue) 451 */ 452 cli();
453 #ifdefCONFIG_SLAVE_BALANCING 454 skb->in_dev_queue=1;
455 dev->pkt_queue++;
456 #endif 457 skb_device_unlock(skb);
458 skb_queue_head(dev->buffs + pri,skb);
459 restore_flags(flags);
460 } 461
462 /* 463 * Receive a packet from a device driver and queue it for the upper 464 * (protocol) levels. It always succeeds. This is the recommended 465 * interface to use. 466 */ 467
468 voidnetif_rx(structsk_buff *skb)
/* */ 469 { 470 staticintdropping = 0;
471
472 /* 473 * Any received buffers are un-owned and should be discarded 474 * when freed. These will be updated later as the frames get 475 * owners. 476 */ 477 skb->sk = NULL;
478 skb->free = 1;
479 if(skb->stamp.tv_sec==0)
480 skb->stamp = xtime;
481
482 /* 483 * Check that we aren't oevrdoing things. 484 */ 485
486 if (!backlog_size)
487 dropping = 0;
488 elseif (backlog_size > 100)
489 dropping = 1;
490
491 if (dropping)
492 { 493 kfree_skb(skb, FREE_READ);
494 return;
495 } 496
497 /* 498 * Add it to the "backlog" queue. 499 */ 500
501 IS_SKB(skb);
502 skb_queue_tail(&backlog,skb);
503 backlog_size++;
504
505 /* 506 * If any packet arrived, mark it for processing after the 507 * hardware interrupt returns. 508 */ 509
510 mark_bh(NET_BH);
511 return;
512 } 513
514
515 /* 516 * The old interface to fetch a packet from a device driver. 517 * This function is the base level entry point for all drivers that 518 * want to send a packet to the upper (protocol) levels. It takes 519 * care of de-multiplexing the packet to the various modules based 520 * on their protocol ID. 521 * 522 * Return values: 1 <- exit I can't do any more 523 * 0 <- feed me more (i.e. "done", "OK"). 524 * 525 * This function is OBSOLETE and should not be used by any new 526 * device. 527 */ 528
529 intdev_rint(unsignedchar *buff, longlen, intflags, structdevice *dev)
/* */ 530 { 531 staticintdropping = 0;
532 structsk_buff *skb = NULL;
533 unsignedchar *to;
534 intamount, left;
535 intlen2;
536
537 if (dev == NULL || buff == NULL || len <= 0)
538 return(1);
539
540 if (flags & IN_SKBUFF)
541 { 542 skb = (structsk_buff *) buff;
543 } 544 else 545 { 546 if (dropping)
547 { 548 if (skb_peek(&backlog) != NULL)
549 return(1);
550 printk("INET: dev_rint: no longer dropping packets.\n");
551 dropping = 0;
552 } 553
554 skb = alloc_skb(len, GFP_ATOMIC);
555 if (skb == NULL)
556 { 557 printk("dev_rint: packet dropped on %s (no memory) !\n",
558 dev->name);
559 dropping = 1;
560 return(1);
561 } 562
563 /* 564 * First we copy the packet into a buffer, and save it for later. We 565 * in effect handle the incoming data as if it were from a circular buffer 566 */ 567
568 to = skb->data;
569 left = len;
570
571 len2 = len;
572 while (len2 > 0)
573 { 574 amount = min(len2, (unsignedlong) dev->rmem_end -
575 (unsignedlong) buff);
576 memcpy(to, buff, amount);
577 len2 -= amount;
578 left -= amount;
579 buff += amount;
580 to += amount;
581 if ((unsignedlong) buff == dev->rmem_end)
582 buff = (unsignedchar *) dev->rmem_start;
583 } 584 } 585
586 /* 587 * Tag the frame and kick it to the proper receive routine 588 */ 589
590 skb->len = len;
591 skb->dev = dev;
592 skb->free = 1;
593
594 netif_rx(skb);
595 /* 596 * OK, all done. 597 */ 598 return(0);
599 } 600
601
602 /* 603 * This routine causes all interfaces to try to send some data. 604 */ 605
606 voiddev_transmit(void)
/* */ 607 { 608 structdevice *dev;
609
610 for (dev = dev_base; dev != NULL; dev = dev->next)
611 { 612 if (dev->flags != 0 && !dev->tbusy) { 613 /* 614 * Kick the device 615 */ 616 dev_tint(dev);
617 } 618 } 619 } 620
621
622 /********************************************************************************** 623
624 Receive Queue Processor 625 626 ***********************************************************************************/ 627
628 /* 629 * This is a single non-rentrant routine which takes the received packet 630 * queue and throws it at the networking layers in the hope that something 631 * useful will emerge. 632 */ 633
634 volatilecharin_bh = 0; /* Non-rentrant remember */ 635
636 intin_net_bh() /* Used by timer.c *//* */ 637 { 638 return(in_bh==0?0:1);
639 } 640
641 /* 642 * When we are called the queue is ready to grab, the interrupts are 643 * on and hardware can interrupt and queue to the receive queue a we 644 * run with no problems. 645 * This is run as a bottom half after an interrupt handler that does 646 * mark_bh(NET_BH); 647 */ 648
649 voidnet_bh(void *tmp)
/* */ 650 { 651 structsk_buff *skb;
652 structpacket_type *ptype;
653 unsignedshorttype;
654 unsignedcharflag = 0;
655 intnitcount;
656
657 /* 658 * Atomically check and mark our BUSY state. 659 */ 660
661 if (set_bit(1, (void*)&in_bh))
662 return;
663
664 /* 665 * Can we send anything now? We want to clear the 666 * decks for any more sends that get done as we 667 * process the input. 668 */ 669
670 dev_transmit();
671
672 /* 673 * Any data left to process. This may occur because a 674 * mark_bh() is done after we empty the queue including 675 * that from the device which does a mark_bh() just after 676 */ 677
678 cli();
679
680 /* 681 * While the queue is not empty 682 */ 683
684 while((skb=skb_dequeue(&backlog))!=NULL)
685 { 686 /* 687 * We have a packet. Therefore the queue has shrunk 688 */ 689 backlog_size--;
690
691 nitcount=dev_nit;
692 flag=0;
693 sti();
694
695 /* 696 * Bump the pointer to the next structure. 697 * This assumes that the basic 'skb' pointer points to 698 * the MAC header, if any (as indicated by its "length" 699 * field). Take care now! 700 */ 701
702 skb->h.raw = skb->data + skb->dev->hard_header_len;
703 skb->len -= skb->dev->hard_header_len;
704
705 /* 706 * Fetch the packet protocol ID. This is also quite ugly, as 707 * it depends on the protocol driver (the interface itself) to 708 * know what the type is, or where to get it from. The Ethernet 709 * interfaces fetch the ID from the two bytes in the Ethernet MAC 710 * header (the h_proto field in struct ethhdr), but other drivers 711 * may either use the ethernet ID's or extra ones that do not 712 * clash (eg ETH_P_AX25). We could set this before we queue the 713 * frame. In fact I may change this when I have time. 714 */ 715
716 type = skb->dev->type_trans(skb, skb->dev);
717
718 /* 719 * We got a packet ID. Now loop over the "known protocols" 720 * table (which is actually a linked list, but this will 721 * change soon if I get my way- FvK), and forward the packet 722 * to anyone who wants it. 723 * 724 * [FvK didn't get his way but he is right this ought to be 725 * hashed so we typically get a single hit. The speed cost 726 * here is minimal but no doubt adds up at the 4,000+ pkts/second 727 * rate we can hit flat out] 728 */ 729
730 for (ptype = ptype_base; ptype != NULL; ptype = ptype->next)
731 { 732 if (ptype->type == type || ptype->type == htons(ETH_P_ALL))
733 { 734 structsk_buff *skb2;
735
736 if (ptype->type == htons(ETH_P_ALL))
737 nitcount--;
738 if (ptype->copy || nitcount)
739 { 740 /* 741 * copy if we need to 742 */ 743 #ifdef OLD
744 skb2 = alloc_skb(skb->len, GFP_ATOMIC);
745 if (skb2 == NULL)
746 continue;
747 memcpy(skb2, skb, skb2->mem_len);
748 skb2->mem_addr = skb2;
749 skb2->h.raw = (unsignedchar *)(
750 (unsignedlong) skb2 +
751 (unsignedlong) skb->h.raw -
752 (unsignedlong) skb 753 );
754 skb2->free = 1;
755 #else 756 skb2=skb_clone(skb, GFP_ATOMIC);
757 if(skb2==NULL)
758 continue;
759 #endif 760 } 761 else 762 { 763 skb2 = skb;
764 } 765
766 /* 767 * Protocol located. 768 */ 769
770 flag = 1;
771
772 /* 773 * Kick the protocol handler. This should be fast 774 * and efficient code. 775 */ 776
777 ptype->func(skb2, skb->dev, ptype);
778 } 779 }/* End of protocol list loop */ 780
781 /* 782 * Has an unknown packet has been received ? 783 */ 784
785 if (!flag)
786 { 787 kfree_skb(skb, FREE_WRITE);
788 } 789
790 /* 791 * Again, see if we can transmit anything now. 792 */ 793
794 dev_transmit();
795 cli();
796 }/* End of queue loop */ 797
798 /* 799 * We have emptied the queue 800 */ 801
802 in_bh = 0;
803 sti();
804
805 /* 806 * One last output flush. 807 */ 808
809 dev_transmit();
810 } 811
812
813 /* 814 * This routine is called when an device driver (i.e. an 815 * interface) is ready to transmit a packet. 816 */ 817
818 voiddev_tint(structdevice *dev)
/* */ 819 { 820 inti;
821 structsk_buff *skb;
822 unsignedlongflags;
823
824 save_flags(flags);
825 /* 826 * Work the queues in priority order 827 */ 828
829 for(i = 0;i < DEV_NUMBUFFS; i++)
830 { 831 /* 832 * Pull packets from the queue 833 */ 834
835
836 cli();
837 while((skb=skb_dequeue(&dev->buffs[i]))!=NULL)
838 { 839 /* 840 * Stop anyone freeing the buffer while we retransmit it 841 */ 842 skb_device_lock(skb);
843 restore_flags(flags);
844 /* 845 * Feed them to the output stage and if it fails 846 * indicate they re-queue at the front. 847 */ 848 dev_queue_xmit(skb,dev,-i - 1);
849 /* 850 * If we can take no more then stop here. 851 */ 852 if (dev->tbusy)
853 return;
854 cli();
855 } 856 } 857 restore_flags(flags);
858 } 859
860
861 /* 862 * Perform a SIOCGIFCONF call. This structure will change 863 * size shortly, and there is nothing I can do about it. 864 * Thus we will need a 'compatibility mode'. 865 */ 866
867 staticintdev_ifconf(char *arg)
/* */ 868 { 869 structifconfifc;
870 structifreqifr;
871 structdevice *dev;
872 char *pos;
873 intlen;
874 interr;
875
876 /* 877 * Fetch the caller's info block. 878 */ 879
880 err=verify_area(VERIFY_WRITE, arg, sizeof(structifconf));
881 if(err)
882 returnerr;
883 memcpy_fromfs(&ifc, arg, sizeof(structifconf));
884 len = ifc.ifc_len;
885 pos = ifc.ifc_buf;
886
887 /* 888 * We now walk the device list filling each active device 889 * into the array. 890 */ 891
892 err=verify_area(VERIFY_WRITE,pos,len);
893 if(err)
894 returnerr;
895
896 /* 897 * Loop over the interfaces, and write an info block for each. 898 */ 899
900 for (dev = dev_base; dev != NULL; dev = dev->next)
901 { 902 if(!(dev->flags & IFF_UP)) /* Downed devices don't count */ 903 continue;
904 memset(&ifr, 0, sizeof(structifreq));
905 strcpy(ifr.ifr_name, dev->name);
906 (*(structsockaddr_in *) &ifr.ifr_addr).sin_family = dev->family;
907 (*(structsockaddr_in *) &ifr.ifr_addr).sin_addr.s_addr = dev->pa_addr;
908
909 /* 910 * Write this block to the caller's space. 911 */ 912
913 memcpy_tofs(pos, &ifr, sizeof(structifreq));
914 pos += sizeof(structifreq);
915 len -= sizeof(structifreq);
916
917 /* 918 * Have we run out of space here ? 919 */ 920
921 if (len < sizeof(structifreq))
922 break;
923 } 924
925 /* 926 * All done. Write the updated control block back to the caller. 927 */ 928
929 ifc.ifc_len = (pos - ifc.ifc_buf);
930 ifc.ifc_req = (structifreq *) ifc.ifc_buf;
931 memcpy_tofs(arg, &ifc, sizeof(structifconf));
932
933 /* 934 * Report how much was filled in 935 */ 936
937 return(pos - arg);
938 } 939
940
941 /* 942 * This is invoked by the /proc filesystem handler to display a device 943 * in detail. 944 */ 945
946 staticintsprintf_stats(char *buffer, structdevice *dev)
/* */ 947 { 948 structenet_statistics *stats = (dev->get_stats ? dev->get_stats(dev): NULL);
949 intsize;
950
951 if (stats)
952 size = sprintf(buffer, "%6s:%7d %4d %4d %4d %4d %8d %4d %4d %4d %5d %4d\n",
953 dev->name,
954 stats->rx_packets, stats->rx_errors,
955 stats->rx_dropped + stats->rx_missed_errors,
956 stats->rx_fifo_errors,
957 stats->rx_length_errors + stats->rx_over_errors 958 + stats->rx_crc_errors + stats->rx_frame_errors,
959 stats->tx_packets, stats->tx_errors, stats->tx_dropped,
960 stats->tx_fifo_errors, stats->collisions,
961 stats->tx_carrier_errors + stats->tx_aborted_errors 962 + stats->tx_window_errors + stats->tx_heartbeat_errors);
963 else 964 size = sprintf(buffer, "%6s: No statistics available.\n", dev->name);
965
966 returnsize;
967 } 968
969 /* 970 * Called from the PROCfs module. This now uses the new arbitary sized /proc/net interface 971 * to create /proc/net/dev 972 */ 973
974 intdev_get_info(char *buffer, char **start, off_toffset, intlength)
/* */ 975 { 976 intlen=0;
977 off_tbegin=0;
978 off_tpos=0;
979 intsize;
980
981 structdevice *dev;
982
983
984 size = sprintf(buffer, "Inter-| Receive | Transmit\n"
985 " face |packets errs drop fifo frame|packets errs drop fifo colls carrier\n");
986
987 pos+=size;
988 len+=size;
989
990
991 for (dev = dev_base; dev != NULL; dev = dev->next)
992 { 993 size = sprintf_stats(buffer+len, dev);
994 len+=size;
995 pos=begin+len;
996
997 if(pos<offset)
998 { 999 len=0;
1000 begin=pos;
1001 }1002 if(pos>offset+length)
1003 break;
1004 }1005
1006 *start=buffer+(offset-begin); /* Start of wanted data */1007 len-=(offset-begin); /* Start slop */1008 if(len>length)
1009 len=length; /* Ending slop */1010 returnlen;
1011 }1012
1013
1014 /*1015 * This checks bitmasks for the ioctl calls for devices.1016 */1017
1018 staticinlineintbad_mask(unsignedlongmask, unsignedlongaddr)
/* */1019 {1020 if (addr & (mask = ~mask))
1021 return 1;
1022 mask = ntohl(mask);
1023 if (mask & (mask+1))
1024 return 1;
1025 return 0;
1026 }1027
1028 /*1029 * Perform the SIOCxIFxxx calls. 1030 *1031 * The socket layer has seen an ioctl the address family thinks is1032 * for the device. At this point we get invoked to make a decision1033 */1034
1035 staticintdev_ifsioc(void *arg, unsignedintgetset)
/* */1036 {1037 structifreqifr;
1038 structdevice *dev;
1039 intret;
1040
1041 /*1042 * Fetch the caller's info block into kernel space1043 */1044
1045 interr=verify_area(VERIFY_WRITE, arg, sizeof(structifreq));
1046 if(err)
1047 returnerr;
1048
1049 memcpy_fromfs(&ifr, arg, sizeof(structifreq));
1050
1051 /*1052 * See which interface the caller is talking about. 1053 */1054
1055 if ((dev = dev_get(ifr.ifr_name)) == NULL)
1056 return(-ENODEV);
1057
1058 switch(getset)
1059 {1060 caseSIOCGIFFLAGS: /* Get interface flags */1061 ifr.ifr_flags = dev->flags;
1062 memcpy_tofs(arg, &ifr, sizeof(structifreq));
1063 ret = 0;
1064 break;
1065 caseSIOCSIFFLAGS: /* Set interface flags */1066 {1067 intold_flags = dev->flags;
1068 #ifdefCONFIG_SLAVE_BALANCING1069 if(dev->flags&IFF_SLAVE)
1070 return -EBUSY;
1071 #endif1072 dev->flags = ifr.ifr_flags & (
1073 IFF_UP | IFF_BROADCAST | IFF_DEBUG | IFF_LOOPBACK |
1074 IFF_POINTOPOINT | IFF_NOTRAILERS | IFF_RUNNING |
1075 IFF_NOARP | IFF_PROMISC | IFF_ALLMULTI | IFF_SLAVE | IFF_MASTER);
1076 #ifdefCONFIG_SLAVE_BALANCING1077 if(!(dev->flags&IFF_MASTER) && dev->slave)
1078 {1079 dev->slave->flags&=~IFF_SLAVE;
1080 dev->slave=NULL;
1081 }1082 #endif1083
1084 /*1085 * Has promiscuous mode been turned off1086 */1087 if ( (old_flags & IFF_PROMISC) && ((dev->flags & IFF_PROMISC) == 0))
1088 dev->set_multicast_list(dev,0,NULL);
1089
1090 /*1091 * Has it been turned on1092 */1093
1094 if ( (dev->flags & IFF_PROMISC) && ((old_flags & IFF_PROMISC) == 0))
1095 dev->set_multicast_list(dev,-1,NULL);
1096
1097 /*1098 * Have we downed the interface1099 */1100
1101 if ((old_flags & IFF_UP) && ((dev->flags & IFF_UP) == 0))
1102 {1103 ret = dev_close(dev);
1104 }1105 else1106 {1107 /*1108 * Have we upped the interface 1109 */1110
1111 ret = (! (old_flags & IFF_UP) && (dev->flags & IFF_UP))
1112 ? dev_open(dev) : 0;
1113 /* 1114 * Check the flags.1115 */1116 if(ret<0)
1117 dev->flags&=~IFF_UP; /* Didnt open so down the if */1118 }1119 }1120 break;
1121
1122 caseSIOCGIFADDR: /* Get interface address (and family) */1123 (*(structsockaddr_in *)
1124 &ifr.ifr_addr).sin_addr.s_addr = dev->pa_addr;
1125 (*(structsockaddr_in *)
1126 &ifr.ifr_addr).sin_family = dev->family;
1127 (*(structsockaddr_in *)
1128 &ifr.ifr_addr).sin_port = 0;
1129 memcpy_tofs(arg, &ifr, sizeof(structifreq));
1130 ret = 0;
1131 break;
1132
1133 caseSIOCSIFADDR: /* Set interface address (and family) */1134 dev->pa_addr = (*(structsockaddr_in *)
1135 &ifr.ifr_addr).sin_addr.s_addr;
1136 dev->family = ifr.ifr_addr.sa_family;
1137
1138 #ifdefCONFIG_INET1139 /* This is naughty. When net-032e comes out It wants moving into the net0321140 code not the kernel. Till then it can sit here (SIGH) */1141 dev->pa_mask = ip_get_mask(dev->pa_addr);
1142 #endif1143 dev->pa_brdaddr = dev->pa_addr | ~dev->pa_mask;
1144 ret = 0;
1145 break;
1146
1147 caseSIOCGIFBRDADDR: /* Get the broadcast address */1148 (*(structsockaddr_in *)
1149 &ifr.ifr_broadaddr).sin_addr.s_addr = dev->pa_brdaddr;
1150 (*(structsockaddr_in *)
1151 &ifr.ifr_broadaddr).sin_family = dev->family;
1152 (*(structsockaddr_in *)
1153 &ifr.ifr_broadaddr).sin_port = 0;
1154 memcpy_tofs(arg, &ifr, sizeof(structifreq));
1155 ret = 0;
1156 break;
1157
1158 caseSIOCSIFBRDADDR: /* Set the broadcast address */1159 dev->pa_brdaddr = (*(structsockaddr_in *)
1160 &ifr.ifr_broadaddr).sin_addr.s_addr;
1161 ret = 0;
1162 break;
1163
1164 caseSIOCGIFDSTADDR: /* Get the destination address (for point-to-point links) */1165 (*(structsockaddr_in *)
1166 &ifr.ifr_dstaddr).sin_addr.s_addr = dev->pa_dstaddr;
1167 (*(structsockaddr_in *)
1168 &ifr.ifr_broadaddr).sin_family = dev->family;
1169 (*(structsockaddr_in *)
1170 &ifr.ifr_broadaddr).sin_port = 0;
1171 memcpy_tofs(arg, &ifr, sizeof(structifreq));
1172 ret = 0;
1173 break;
1174
1175 caseSIOCSIFDSTADDR: /* Set the destination address (for point-to-point links) */1176 dev->pa_dstaddr = (*(structsockaddr_in *)
1177 &ifr.ifr_dstaddr).sin_addr.s_addr;
1178 ret = 0;
1179 break;
1180
1181 caseSIOCGIFNETMASK: /* Get the netmask for the interface */1182 (*(structsockaddr_in *)
1183 &ifr.ifr_netmask).sin_addr.s_addr = dev->pa_mask;
1184 (*(structsockaddr_in *)
1185 &ifr.ifr_netmask).sin_family = dev->family;
1186 (*(structsockaddr_in *)
1187 &ifr.ifr_netmask).sin_port = 0;
1188 memcpy_tofs(arg, &ifr, sizeof(structifreq));
1189 ret = 0;
1190 break;
1191
1192 caseSIOCSIFNETMASK: /* Set the netmask for the interface */1193 {1194 unsignedlongmask = (*(structsockaddr_in *)
1195 &ifr.ifr_netmask).sin_addr.s_addr;
1196 ret = -EINVAL;
1197 /*1198 * The mask we set must be legal.1199 */1200 if (bad_mask(mask,0))
1201 break;
1202 dev->pa_mask = mask;
1203 ret = 0;
1204 }1205 break;
1206
1207 caseSIOCGIFMETRIC: /* Get the metric on the inteface (currently unused) */1208
1209 ifr.ifr_metric = dev->metric;
1210 memcpy_tofs(arg, &ifr, sizeof(structifreq));
1211 ret = 0;
1212 break;
1213
1214 caseSIOCSIFMETRIC: /* Set the metric on the interface (currently unused) */1215 dev->metric = ifr.ifr_metric;
1216 ret = 0;
1217 break;
1218
1219 caseSIOCGIFMTU: /* Get the MTU of a device */1220 ifr.ifr_mtu = dev->mtu;
1221 memcpy_tofs(arg, &ifr, sizeof(structifreq));
1222 ret = 0;
1223 break;
1224
1225 caseSIOCSIFMTU: /* Set the MTU of a device */1226
1227 /*1228 * MTU must be positive and under the page size problem1229 */1230
1231 if(ifr.ifr_mtu<1 || ifr.ifr_mtu>3800)
1232 return -EINVAL;
1233 dev->mtu = ifr.ifr_mtu;
1234 ret = 0;
1235 break;
1236
1237 caseSIOCGIFMEM: /* Get the per device memory space. We can add this but currently1238 do not support it */1239 printk("NET: ioctl(SIOCGIFMEM, 0x%08X)\n", (int)arg);
1240 ret = -EINVAL;
1241 break;
1242
1243 caseSIOCSIFMEM: /* Set the per device memory buffer space. Not applicable in our case */1244 printk("NET: ioctl(SIOCSIFMEM, 0x%08X)\n", (int)arg);
1245 ret = -EINVAL;
1246 break;
1247
1248 caseOLD_SIOCGIFHWADDR: /* Get the hardware address. This will change and SIFHWADDR will be added */1249 memcpy(ifr.old_ifr_hwaddr,dev->dev_addr, MAX_ADDR_LEN);
1250 memcpy_tofs(arg,&ifr,sizeof(structifreq));
1251 ret=0;
1252 break;
1253
1254 caseSIOCGIFHWADDR:
1255 memcpy(ifr.ifr_hwaddr.sa_data,dev->dev_addr, MAX_ADDR_LEN);
1256 ifr.ifr_hwaddr.sa_family=dev->type;
1257 memcpy_tofs(arg,&ifr,sizeof(structifreq));
1258 ret=0;
1259 break;
1260
1261 caseSIOCSIFHWADDR:
1262 if(dev->set_mac_address==NULL)
1263 return -EOPNOTSUPP;
1264 if(ifr.ifr_hwaddr.sa_family!=dev->type)
1265 return -EINVAL;
1266 ret=dev->set_mac_address(dev,ifr.ifr_hwaddr.sa_data);
1267 break;
1268
1269 caseSIOCDEVPRIVATE:
1270 if(dev->do_ioctl==NULL)
1271 return -EOPNOTSUPP;
1272 ret=dev->do_ioctl(dev, &ifr);
1273 memcpy_tofs(arg,&ifr,sizeof(structifreq));
1274 break;
1275
1276 caseSIOCGIFMAP:
1277 ifr.ifr_map.mem_start=dev->mem_start;
1278 ifr.ifr_map.mem_end=dev->mem_end;
1279 ifr.ifr_map.base_addr=dev->base_addr;
1280 ifr.ifr_map.irq=dev->irq;
1281 ifr.ifr_map.dma=dev->dma;
1282 ifr.ifr_map.port=dev->if_port;
1283 memcpy_tofs(arg,&ifr,sizeof(structifreq));
1284 ret=0;
1285 break;
1286
1287 caseSIOCSIFMAP:
1288 if(dev->set_config==NULL)
1289 return -EOPNOTSUPP;
1290 returndev->set_config(dev,&ifr.ifr_map);
1291
1292 caseSIOCGIFSLAVE:
1293 #ifdefCONFIG_SLAVE_BALANCING1294 if(dev->slave==NULL)
1295 return -ENOENT;
1296 strncpy(ifr.ifr_name,dev->name,sizeof(ifr.ifr_name));
1297 memcpy_tofs(arg,&ifr,sizeof(structifreq));
1298 ret=0;
1299 #else1300 return -ENOENT;
1301 #endif1302 break;
1303 #ifdefCONFIG_SLAVE_BALANCING1304 caseSIOCSIFSLAVE:
1305 {1306
1307 /*1308 * Fun game. Get the device up and the flags right without1309 * letting some scummy user confuse us.1310 */1311 unsignedlongflags;
1312 structdevice *slave=dev_get(ifr.ifr_slave);
1313 save_flags(flags);
1314 if(slave==NULL)
1315 {1316 return -ENODEV;
1317 }1318 cli();
1319 if((slave->flags&(IFF_UP|IFF_RUNNING))!=(IFF_UP|IFF_RUNNING))
1320 {1321 restore_flags(flags);
1322 return -EINVAL;
1323 }1324 if(dev->flags&IFF_SLAVE)
1325 {1326 restore_flags(flags);
1327 return -EBUSY;
1328 }1329 if(dev->slave!=NULL)
1330 {1331 restore_flags(flags);
1332 return -EBUSY;
1333 }1334 if(slave->flags&IFF_SLAVE)
1335 {1336 restore_flags(flags);
1337 return -EBUSY;
1338 }1339 dev->slave=slave;
1340 slave->flags|=IFF_SLAVE;
1341 dev->flags|=IFF_MASTER;
1342 restore_flags(flags);
1343 ret=0;
1344 }1345 break;
1346 #endif1347 /*1348 * Unknown ioctl1349 */1350
1351 default:
1352 ret = -EINVAL;
1353 }1354 return(ret);
1355 }1356
1357
1358 /*1359 * This function handles all "interface"-type I/O control requests. The actual1360 * 'doing' part of this is dev_ifsioc above.1361 */1362
1363 intdev_ioctl(unsignedintcmd, void *arg)
/* */1364 {1365 switch(cmd)
1366 {1367 /*1368 * The old old setup ioctl. Even its name and this entry will soon be1369 * just so much ionization on a backup tape.1370 */1371
1372 caseSIOCGIFCONF:
1373 (void) dev_ifconf((char *) arg);
1374 return 0;
1375
1376 /*1377 * Ioctl calls that can be done by all.1378 */1379
1380 caseSIOCGIFFLAGS:
1381 caseSIOCGIFADDR:
1382 caseSIOCGIFDSTADDR:
1383 caseSIOCGIFBRDADDR:
1384 caseSIOCGIFNETMASK:
1385 caseSIOCGIFMETRIC:
1386 caseSIOCGIFMTU:
1387 caseSIOCGIFMEM:
1388 caseSIOCGIFHWADDR:
1389 caseSIOCSIFHWADDR:
1390 caseOLD_SIOCGIFHWADDR:
1391 caseSIOCGIFSLAVE:
1392 caseSIOCGIFMAP:
1393 returndev_ifsioc(arg, cmd);
1394
1395 /*1396 * Ioctl calls requiring the power of a superuser1397 */1398
1399 caseSIOCSIFFLAGS:
1400 caseSIOCSIFADDR:
1401 caseSIOCSIFDSTADDR:
1402 caseSIOCSIFBRDADDR:
1403 caseSIOCSIFNETMASK:
1404 caseSIOCSIFMETRIC:
1405 caseSIOCSIFMTU:
1406 caseSIOCSIFMEM:
1407 caseSIOCSIFMAP:
1408 caseSIOCSIFSLAVE:
1409 caseSIOCDEVPRIVATE:
1410 if (!suser())
1411 return -EPERM;
1412 returndev_ifsioc(arg, cmd);
1413
1414 caseSIOCSIFLINK:
1415 return -EINVAL;
1416
1417 /*1418 * Unknown ioctl.1419 */1420
1421 default:
1422 return -EINVAL;
1423 }1424 }1425
1426
1427 /*1428 * Initialize the DEV module. At boot time this walks the device list and1429 * unhooks any devices that fail to initialise (normally hardware not 1430 * present) and leaves us with a valid list of present and active devices.1431 *1432 * The PCMICA code may need to change this a little, and add a pair1433 * of register_inet_device() unregister_inet_device() calls. This will be1434 * needed for ethernet as modules support.1435 */1436
1437 voiddev_init(void)
/* */1438 {1439 structdevice *dev, *dev2;
1440
1441 /*1442 * Add the devices.1443 * If the call to dev->init fails, the dev is removed1444 * from the chain disconnecting the device until the1445 * next reboot.1446 */1447
1448 dev2 = NULL;
1449 for (dev = dev_base; dev != NULL; dev=dev->next)
1450 {1451 if (dev->init && dev->init(dev))
1452 {1453 /*1454 * It failed to come up. Unhook it.1455 */1456
1457 if (dev2 == NULL)
1458 dev_base = dev->next;
1459 else1460 dev2->next = dev->next;
1461 }1462 else1463 {1464 dev2 = dev;
1465 }1466 }1467 }