1 /* 2 * NET3 Protocol independent device support routines. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation; either version 7 * 2 of the License, or (at your option) any later version. 8 * 9 * Derived from the non IP parts of dev.c 1.0.19 10 * Authors: Ross Biro, <bir7@leland.Stanford.Edu> 11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 12 * Mark Evans, <evansmp@uhura.aston.ac.uk> 13 * 14 * Additional Authors: 15 * Florian la Roche <rzsfl@rz.uni-sb.de> 16 * Alan Cox <gw4pts@gw4pts.ampr.org> 17 * David Hinds <dhinds@allegro.stanford.edu> 18 * 19 * Changes: 20 * Alan Cox : device private ioctl copies fields back. 21 * Alan Cox : Transmit queue code does relevant stunts to 22 * keep the queue safe. 23 * Alan Cox : Fixed double lock. 24 * 25 * Cleaned up and recommented by Alan Cox 2nd April 1994. I hope to have 26 * the rest as well commented in the end. 27 */ 28
29 /* 30 * A lot of these includes will be going walkies very soon 31 */ 32
33 #include <asm/segment.h>
34 #include <asm/system.h>
35 #include <asm/bitops.h>
36 #include <linux/config.h>
37 #include <linux/types.h>
38 #include <linux/kernel.h>
39 #include <linux/sched.h>
40 #include <linux/string.h>
41 #include <linux/mm.h>
42 #include <linux/socket.h>
43 #include <linux/sockios.h>
44 #include <linux/in.h>
45 #include <linux/errno.h>
46 #include <linux/interrupt.h>
47 #include <linux/if_ether.h>
48 #include <linux/inet.h>
49 #include <linux/netdevice.h>
50 #include <linux/etherdevice.h>
51 #include "ip.h"
52 #include "route.h"
53 #include <linux/skbuff.h>
54 #include "sock.h"
55 #include "arp.h"
56
57
58 /* 59 * The list of packet types we will receive (as opposed to discard) 60 * and the routines to invoke. 61 */ 62
63 structpacket_type *ptype_base = NULL;
64
65 /* 66 * Device drivers call our routines to queue packets here. We empty the 67 * queue in the bottom half handler. 68 */ 69
70 staticstructsk_buff_headbacklog =
71 { 72 (structsk_buff *)&backlog, (structsk_buff *)&backlog 73 #ifdefCONFIG_SKB_CHECK 74 ,SK_HEAD_SKB 75 #endif 76 };
77
78 /* 79 * We don't overdo the queue or we will thrash memory badly. 80 */ 81
82 staticintbacklog_size = 0;
83
84 /* 85 * The number of sockets open for 'all' protocol use. We have to 86 * know this to copy a buffer the correct number of times. 87 */ 88
89 staticintdev_nit=0;
90
91 /* 92 * Return the lesser of the two values. 93 */ 94
95 static__inline__unsignedlongmin(unsignedlonga, unsignedlongb)
/* */ 96 { 97 return (a < b)? a : b;
98 } 99
100
101 /****************************************************************************************** 102
103 Protocol management and registration routines 104
105 *******************************************************************************************/ 106
107
108 /* 109 * Add a protocol ID to the list. 110 */ 111
112 voiddev_add_pack(structpacket_type *pt)
/* */ 113 { 114 structpacket_type *p1;
115 pt->next = ptype_base;
116
117 /* 118 * Don't use copy counts on ETH_P_ALL. Instead keep a global 119 * count of number of these and use it and pt->copy to decide 120 * copies 121 */ 122
123 pt->copy=0; /* Assume we will not be copying the buffer before 124 * this routine gets it 125 */ 126
127 if(pt->type == htons(ETH_P_ALL))
128 dev_nit++; /* I'd like a /dev/nit too one day 8) */ 129 else 130 { 131 /* 132 * See if we need to copy it - that is another process also 133 * wishes to receive this type of packet. 134 */ 135 for (p1 = ptype_base; p1 != NULL; p1 = p1->next)
136 { 137 if (p1->type == pt->type)
138 { 139 pt->copy = 1; /* We will need to copy */ 140 break;
141 } 142 } 143 } 144
145 /* 146 * NIT taps must go at the end or net_bh will leak! 147 */ 148
149 if (pt->type == htons(ETH_P_ALL))
150 { 151 pt->next=NULL;
152 if(ptype_base==NULL)
153 ptype_base=pt;
154 else 155 { 156 /* 157 * Move to the end of the list 158 */ 159 for(p1=ptype_base;p1->next!=NULL;p1=p1->next);
160 /* 161 * Hook on the end 162 */ 163 p1->next=pt;
164 } 165 } 166 else 167 /* 168 * It goes on the start 169 */ 170 ptype_base = pt;
171 } 172
173
174 /* 175 * Remove a protocol ID from the list. 176 */ 177
178 voiddev_remove_pack(structpacket_type *pt)
/* */ 179 { 180 structpacket_type *lpt, *pt1;
181
182 /* 183 * Keep the count of nit (Network Interface Tap) sockets correct. 184 */ 185
186 if (pt->type == htons(ETH_P_ALL))
187 dev_nit--;
188
189 /* 190 * If we are first, just unhook us. 191 */ 192
193 if (pt == ptype_base)
194 { 195 ptype_base = pt->next;
196 return;
197 } 198
199 lpt = NULL;
200
201 /* 202 * This is harder. What we do is to walk the list of sockets 203 * for this type. We unhook the entry, and if there is a previous 204 * entry that is copying _and_ we are not copying, (ie we are the 205 * last entry for this type) then the previous one is set to 206 * non-copying as it is now the last. 207 */ 208 for (pt1 = ptype_base; pt1->next != NULL; pt1 = pt1->next)
209 { 210 if (pt1->next == pt )
211 { 212 cli();
213 if (!pt->copy && lpt)
214 lpt->copy = 0;
215 pt1->next = pt->next;
216 sti();
217 return;
218 } 219 if (pt1->next->type == pt->type && pt->type != htons(ETH_P_ALL))
220 lpt = pt1->next;
221 } 222 } 223
224 /***************************************************************************************** 225
226 Device Interface Subroutines 227
228 ******************************************************************************************/ 229
230 /* 231 * Find an interface by name. 232 */ 233
234 structdevice *dev_get(char *name)
/* */ 235 { 236 structdevice *dev;
237
238 for (dev = dev_base; dev != NULL; dev = dev->next)
239 { 240 if (strcmp(dev->name, name) == 0)
241 return(dev);
242 } 243 return(NULL);
244 } 245
246
247 /* 248 * Prepare an interface for use. 249 */ 250
251 intdev_open(structdevice *dev)
/* */ 252 { 253 intret = 0;
254
255 /* 256 * Call device private open method 257 */ 258 if (dev->open)
259 ret = dev->open(dev);
260
261 /* 262 * If it went open OK then set the flags 263 */ 264
265 if (ret == 0)
266 dev->flags |= (IFF_UP | IFF_RUNNING);
267
268 return(ret);
269 } 270
271
272 /* 273 * Completely shutdown an interface. 274 * 275 * WARNING: Both because of the way the upper layers work (that can be fixed) 276 * and because of races during a close (that can't be fixed any other way) 277 * a device may be given things to transmit EVEN WHEN IT IS DOWN. The driver 278 * MUST cope with this (eg by freeing and dumping the frame). 279 */ 280
281 intdev_close(structdevice *dev)
/* */ 282 { 283 /* 284 * Only close a device if it is up. 285 */ 286
287 if (dev->flags != 0)
288 { 289 intct=0;
290 dev->flags = 0;
291 /* 292 * Call the device specific close. This cannot fail. 293 */ 294 if (dev->stop)
295 dev->stop(dev);
296 /* 297 * Delete the route to the device. 298 */ 299 #ifdefCONFIG_INET 300 ip_rt_flush(dev);
301 arp_device_down(dev);
302 #endif 303 #ifdefCONFIG_IPX 304 ipxrtr_device_down(dev);
305 #endif 306 /* 307 * Blank the IP addresses 308 */ 309 dev->pa_addr = 0;
310 dev->pa_dstaddr = 0;
311 dev->pa_brdaddr = 0;
312 dev->pa_mask = 0;
313 /* 314 * Purge any queued packets when we down the link 315 */ 316 while(ct<DEV_NUMBUFFS)
317 { 318 structsk_buff *skb;
319 while((skb=skb_dequeue(&dev->buffs[ct]))!=NULL)
320 if(skb->free)
321 kfree_skb(skb,FREE_WRITE);
322 ct++;
323 } 324 } 325 return(0);
326 } 327
328
329 /* 330 * Send (or queue for sending) a packet. 331 * 332 * IMPORTANT: When this is called to resend frames. The caller MUST 333 * already have locked the sk_buff. Apart from that we do the 334 * rest of the magic. 335 */ 336
337 voiddev_queue_xmit(structsk_buff *skb, structdevice *dev, intpri)
/* */ 338 { 339 unsignedlongflags;
340 intnitcount;
341 structpacket_type *ptype;
342 intwhere = 0; /* used to say if the packet should go */ 343 /* at the front or the back of the */ 344 /* queue - front is a retransmit try */ 345
346 if (dev == NULL)
347 { 348 printk("dev.c: dev_queue_xmit: dev = NULL\n");
349 return;
350 } 351
352 if(pri>=0 && !skb_device_locked(skb))
353 skb_device_lock(skb); /* Shove a lock on the frame */ 354 #ifdefCONFIG_SLAVE_BALANCING 355 save_flags(flags);
356 cli();
357 if(dev->slave!=NULL && dev->slave->pkt_queue < dev->pkt_queue &&
358 (dev->slave->flags & IFF_UP))
359 dev=dev->slave;
360 restore_flags(flags);
361 #endif 362
363 IS_SKB(skb);
364
365 skb->dev = dev;
366
367 /* 368 * This just eliminates some race conditions, but not all... 369 */ 370
371 if (skb->next != NULL)
372 { 373 /* 374 * Make sure we haven't missed an interrupt. 375 */ 376 printk("dev_queue_xmit: worked around a missed interrupt\n");
377 dev->hard_start_xmit(NULL, dev);
378 return;
379 } 380
381 /* 382 * Negative priority is used to flag a frame that is being pulled from the 383 * queue front as a retransmit attempt. It therefore goes back on the queue 384 * start on a failure. 385 */ 386
387 if (pri < 0)
388 { 389 pri = -pri-1;
390 where = 1;
391 } 392
393 if (pri >= DEV_NUMBUFFS)
394 { 395 printk("bad priority in dev_queue_xmit.\n");
396 pri = 1;
397 } 398
399 /* 400 * If the address has not been resolved. Call the device header rebuilder. 401 * This can cover all protocols and technically not just ARP either. 402 */ 403
404 if (!skb->arp && dev->rebuild_header(skb->data, dev, skb->raddr, skb)) { 405 return;
406 } 407
408 save_flags(flags);
409 cli();
410 if (!where) { 411 #ifdefCONFIG_SLAVE_BALANCING 412 skb->in_dev_queue=1;
413 #endif 414 skb_queue_tail(dev->buffs + pri,skb);
415 skb_device_unlock(skb); /* Buffer is on the device queue and can be freed safely */ 416 skb = skb_dequeue(dev->buffs + pri);
417 skb_device_lock(skb); /* New buffer needs locking down */ 418 #ifdefCONFIG_SLAVE_BALANCING 419 skb->in_dev_queue=0;
420 #endif 421 } 422 restore_flags(flags);
423
424 /* copy outgoing packets to any sniffer packet handlers */ 425 if(!where)
426 { 427 for (nitcount = dev_nit, ptype = ptype_base; nitcount > 0 && ptype != NULL; ptype = ptype->next)
428 { 429 if (ptype->type == htons(ETH_P_ALL)) { 430 structsk_buff *skb2;
431 if ((skb2 = skb_clone(skb, GFP_ATOMIC)) == NULL)
432 break;
433 ptype->func(skb2, skb->dev, ptype);
434 nitcount--;
435 } 436 } 437 } 438 if (dev->hard_start_xmit(skb, dev) == 0) { 439 /* 440 * Packet is now solely the responsibility of the driver 441 */ 442 #ifdefCONFIG_SLAVE_BALANCING 443 dev->pkt_queue--;
444 #endif 445 return;
446 } 447
448 /* 449 * Transmission failed, put skb back into a list. Once on the list its safe and 450 * no longer device locked (it can be freed safely from the device queue) 451 */ 452 cli();
453 #ifdefCONFIG_SLAVE_BALANCING 454 skb->in_dev_queue=1;
455 dev->pkt_queue++;
456 #endif 457 skb_device_unlock(skb);
458 skb_queue_head(dev->buffs + pri,skb);
459 restore_flags(flags);
460 } 461
462 /* 463 * Receive a packet from a device driver and queue it for the upper 464 * (protocol) levels. It always succeeds. This is the recommended 465 * interface to use. 466 */ 467
468 voidnetif_rx(structsk_buff *skb)
/* */ 469 { 470 staticintdropping = 0;
471
472 /* 473 * Any received buffers are un-owned and should be discarded 474 * when freed. These will be updated later as the frames get 475 * owners. 476 */ 477 skb->sk = NULL;
478 skb->free = 1;
479 if(skb->stamp.tv_sec==0)
480 skb->stamp = xtime;
481
482 /* 483 * Check that we aren't overdoing things. 484 */ 485
486 if (!backlog_size)
487 dropping = 0;
488 elseif (backlog_size > 100)
489 dropping = 1;
490
491 if (dropping)
492 { 493 kfree_skb(skb, FREE_READ);
494 return;
495 } 496
497 /* 498 * Add it to the "backlog" queue. 499 */ 500
501 IS_SKB(skb);
502 skb_queue_tail(&backlog,skb);
503 backlog_size++;
504
505 /* 506 * If any packet arrived, mark it for processing after the 507 * hardware interrupt returns. 508 */ 509
510 mark_bh(NET_BH);
511 return;
512 } 513
514
515 /* 516 * The old interface to fetch a packet from a device driver. 517 * This function is the base level entry point for all drivers that 518 * want to send a packet to the upper (protocol) levels. It takes 519 * care of de-multiplexing the packet to the various modules based 520 * on their protocol ID. 521 * 522 * Return values: 1 <- exit I can't do any more 523 * 0 <- feed me more (i.e. "done", "OK"). 524 * 525 * This function is OBSOLETE and should not be used by any new 526 * device. 527 */ 528
529 intdev_rint(unsignedchar *buff, longlen, intflags, structdevice *dev)
/* */ 530 { 531 staticintdropping = 0;
532 structsk_buff *skb = NULL;
533 unsignedchar *to;
534 intamount, left;
535 intlen2;
536
537 if (dev == NULL || buff == NULL || len <= 0)
538 return(1);
539
540 if (flags & IN_SKBUFF)
541 { 542 skb = (structsk_buff *) buff;
543 } 544 else 545 { 546 if (dropping)
547 { 548 if (skb_peek(&backlog) != NULL)
549 return(1);
550 printk("INET: dev_rint: no longer dropping packets.\n");
551 dropping = 0;
552 } 553
554 skb = alloc_skb(len, GFP_ATOMIC);
555 if (skb == NULL)
556 { 557 printk("dev_rint: packet dropped on %s (no memory) !\n",
558 dev->name);
559 dropping = 1;
560 return(1);
561 } 562
563 /* 564 * First we copy the packet into a buffer, and save it for later. We 565 * in effect handle the incoming data as if it were from a circular buffer 566 */ 567
568 to = skb->data;
569 left = len;
570
571 len2 = len;
572 while (len2 > 0)
573 { 574 amount = min(len2, (unsignedlong) dev->rmem_end -
575 (unsignedlong) buff);
576 memcpy(to, buff, amount);
577 len2 -= amount;
578 left -= amount;
579 buff += amount;
580 to += amount;
581 if ((unsignedlong) buff == dev->rmem_end)
582 buff = (unsignedchar *) dev->rmem_start;
583 } 584 } 585
586 /* 587 * Tag the frame and kick it to the proper receive routine 588 */ 589
590 skb->len = len;
591 skb->dev = dev;
592 skb->free = 1;
593
594 netif_rx(skb);
595 /* 596 * OK, all done. 597 */ 598 return(0);
599 } 600
601
602 /* 603 * This routine causes all interfaces to try to send some data. 604 */ 605
606 voiddev_transmit(void)
/* */ 607 { 608 structdevice *dev;
609
610 for (dev = dev_base; dev != NULL; dev = dev->next)
611 { 612 if (dev->flags != 0 && !dev->tbusy) { 613 /* 614 * Kick the device 615 */ 616 dev_tint(dev);
617 } 618 } 619 } 620
621
622 /********************************************************************************** 623
624 Receive Queue Processor 625 626 ***********************************************************************************/ 627
628 /* 629 * This is a single non-reentrant routine which takes the received packet 630 * queue and throws it at the networking layers in the hope that something 631 * useful will emerge. 632 */ 633
634 volatilecharin_bh = 0; /* Non-reentrant remember */ 635
636 intin_net_bh() /* Used by timer.c *//* */ 637 { 638 return(in_bh==0?0:1);
639 } 640
641 /* 642 * When we are called the queue is ready to grab, the interrupts are 643 * on and hardware can interrupt and queue to the receive queue a we 644 * run with no problems. 645 * This is run as a bottom half after an interrupt handler that does 646 * mark_bh(NET_BH); 647 */ 648
649 voidnet_bh(void *tmp)
/* */ 650 { 651 structsk_buff *skb;
652 structpacket_type *ptype;
653 unsignedshorttype;
654 unsignedcharflag = 0;
655 intnitcount;
656
657 /* 658 * Atomically check and mark our BUSY state. 659 */ 660
661 if (set_bit(1, (void*)&in_bh))
662 return;
663
664 /* 665 * Can we send anything now? We want to clear the 666 * decks for any more sends that get done as we 667 * process the input. 668 */ 669
670 dev_transmit();
671
672 /* 673 * Any data left to process. This may occur because a 674 * mark_bh() is done after we empty the queue including 675 * that from the device which does a mark_bh() just after 676 */ 677
678 cli();
679
680 /* 681 * While the queue is not empty 682 */ 683
684 while((skb=skb_dequeue(&backlog))!=NULL)
685 { 686 /* 687 * We have a packet. Therefore the queue has shrunk 688 */ 689 backlog_size--;
690
691 nitcount=dev_nit;
692 flag=0;
693 sti();
694
695 /* 696 * Bump the pointer to the next structure. 697 * This assumes that the basic 'skb' pointer points to 698 * the MAC header, if any (as indicated by its "length" 699 * field). Take care now! 700 */ 701
702 skb->h.raw = skb->data + skb->dev->hard_header_len;
703 skb->len -= skb->dev->hard_header_len;
704
705 /* 706 * Fetch the packet protocol ID. This is also quite ugly, as 707 * it depends on the protocol driver (the interface itself) to 708 * know what the type is, or where to get it from. The Ethernet 709 * interfaces fetch the ID from the two bytes in the Ethernet MAC 710 * header (the h_proto field in struct ethhdr), but other drivers 711 * may either use the ethernet ID's or extra ones that do not 712 * clash (eg ETH_P_AX25). We could set this before we queue the 713 * frame. In fact I may change this when I have time. 714 */ 715
716 type = skb->dev->type_trans(skb, skb->dev);
717
718 /* 719 * We got a packet ID. Now loop over the "known protocols" 720 * table (which is actually a linked list, but this will 721 * change soon if I get my way- FvK), and forward the packet 722 * to anyone who wants it. 723 * 724 * [FvK didn't get his way but he is right this ought to be 725 * hashed so we typically get a single hit. The speed cost 726 * here is minimal but no doubt adds up at the 4,000+ pkts/second 727 * rate we can hit flat out] 728 */ 729
730 for (ptype = ptype_base; ptype != NULL; ptype = ptype->next)
731 { 732 if (ptype->type == type || ptype->type == htons(ETH_P_ALL))
733 { 734 structsk_buff *skb2;
735
736 if (ptype->type == htons(ETH_P_ALL))
737 nitcount--;
738 if (ptype->copy || nitcount)
739 { 740 /* 741 * copy if we need to 742 */ 743 #ifdef OLD
744 skb2 = alloc_skb(skb->len, GFP_ATOMIC);
745 if (skb2 == NULL)
746 continue;
747 memcpy(skb2, skb, skb2->mem_len);
748 skb2->mem_addr = skb2;
749 skb2->h.raw = (unsignedchar *)(
750 (unsignedlong) skb2 +
751 (unsignedlong) skb->h.raw -
752 (unsignedlong) skb 753 );
754 skb2->free = 1;
755 #else 756 skb2=skb_clone(skb, GFP_ATOMIC);
757 if(skb2==NULL)
758 continue;
759 #endif 760 } 761 else 762 { 763 skb2 = skb;
764 } 765
766 /* 767 * Protocol located. 768 */ 769
770 flag = 1;
771
772 /* 773 * Kick the protocol handler. This should be fast 774 * and efficient code. 775 */ 776
777 ptype->func(skb2, skb->dev, ptype);
778 } 779 }/* End of protocol list loop */ 780
781 /* 782 * Has an unknown packet has been received ? 783 */ 784
785 if (!flag)
786 { 787 kfree_skb(skb, FREE_WRITE);
788 } 789
790 /* 791 * Again, see if we can transmit anything now. 792 */ 793
794 dev_transmit();
795 cli();
796 }/* End of queue loop */ 797
798 /* 799 * We have emptied the queue 800 */ 801
802 in_bh = 0;
803 sti();
804
805 /* 806 * One last output flush. 807 */ 808
809 dev_transmit();
810 } 811
812
813 /* 814 * This routine is called when an device driver (i.e. an 815 * interface) is ready to transmit a packet. 816 */ 817
818 voiddev_tint(structdevice *dev)
/* */ 819 { 820 inti;
821 structsk_buff *skb;
822 unsignedlongflags;
823
824 save_flags(flags);
825 /* 826 * Work the queues in priority order 827 */ 828
829 for(i = 0;i < DEV_NUMBUFFS; i++)
830 { 831 /* 832 * Pull packets from the queue 833 */ 834
835
836 cli();
837 while((skb=skb_dequeue(&dev->buffs[i]))!=NULL)
838 { 839 /* 840 * Stop anyone freeing the buffer while we retransmit it 841 */ 842 skb_device_lock(skb);
843 restore_flags(flags);
844 /* 845 * Feed them to the output stage and if it fails 846 * indicate they re-queue at the front. 847 */ 848 dev_queue_xmit(skb,dev,-i - 1);
849 /* 850 * If we can take no more then stop here. 851 */ 852 if (dev->tbusy)
853 return;
854 cli();
855 } 856 } 857 restore_flags(flags);
858 } 859
860
861 /* 862 * Perform a SIOCGIFCONF call. This structure will change 863 * size shortly, and there is nothing I can do about it. 864 * Thus we will need a 'compatibility mode'. 865 */ 866
867 staticintdev_ifconf(char *arg)
/* */ 868 { 869 structifconfifc;
870 structifreqifr;
871 structdevice *dev;
872 char *pos;
873 intlen;
874 interr;
875
876 /* 877 * Fetch the caller's info block. 878 */ 879
880 err=verify_area(VERIFY_WRITE, arg, sizeof(structifconf));
881 if(err)
882 returnerr;
883 memcpy_fromfs(&ifc, arg, sizeof(structifconf));
884 len = ifc.ifc_len;
885 pos = ifc.ifc_buf;
886
887 /* 888 * We now walk the device list filling each active device 889 * into the array. 890 */ 891
892 err=verify_area(VERIFY_WRITE,pos,len);
893 if(err)
894 returnerr;
895
896 /* 897 * Loop over the interfaces, and write an info block for each. 898 */ 899
900 for (dev = dev_base; dev != NULL; dev = dev->next)
901 { 902 if(!(dev->flags & IFF_UP)) /* Downed devices don't count */ 903 continue;
904 memset(&ifr, 0, sizeof(structifreq));
905 strcpy(ifr.ifr_name, dev->name);
906 (*(structsockaddr_in *) &ifr.ifr_addr).sin_family = dev->family;
907 (*(structsockaddr_in *) &ifr.ifr_addr).sin_addr.s_addr = dev->pa_addr;
908
909 /* 910 * Write this block to the caller's space. 911 */ 912
913 memcpy_tofs(pos, &ifr, sizeof(structifreq));
914 pos += sizeof(structifreq);
915 len -= sizeof(structifreq);
916
917 /* 918 * Have we run out of space here ? 919 */ 920
921 if (len < sizeof(structifreq))
922 break;
923 } 924
925 /* 926 * All done. Write the updated control block back to the caller. 927 */ 928
929 ifc.ifc_len = (pos - ifc.ifc_buf);
930 ifc.ifc_req = (structifreq *) ifc.ifc_buf;
931 memcpy_tofs(arg, &ifc, sizeof(structifconf));
932
933 /* 934 * Report how much was filled in 935 */ 936
937 return(pos - arg);
938 } 939
940
941 /* 942 * This is invoked by the /proc filesystem handler to display a device 943 * in detail. 944 */ 945
946 staticintsprintf_stats(char *buffer, structdevice *dev)
/* */ 947 { 948 structenet_statistics *stats = (dev->get_stats ? dev->get_stats(dev): NULL);
949 intsize;
950
951 if (stats)
952 size = sprintf(buffer, "%6s:%7d %4d %4d %4d %4d %8d %4d %4d %4d %5d %4d\n",
953 dev->name,
954 stats->rx_packets, stats->rx_errors,
955 stats->rx_dropped + stats->rx_missed_errors,
956 stats->rx_fifo_errors,
957 stats->rx_length_errors + stats->rx_over_errors 958 + stats->rx_crc_errors + stats->rx_frame_errors,
959 stats->tx_packets, stats->tx_errors, stats->tx_dropped,
960 stats->tx_fifo_errors, stats->collisions,
961 stats->tx_carrier_errors + stats->tx_aborted_errors 962 + stats->tx_window_errors + stats->tx_heartbeat_errors);
963 else 964 size = sprintf(buffer, "%6s: No statistics available.\n", dev->name);
965
966 returnsize;
967 } 968
969 /* 970 * Called from the PROCfs module. This now uses the new arbitrary sized /proc/net interface 971 * to create /proc/net/dev 972 */ 973
974 intdev_get_info(char *buffer, char **start, off_toffset, intlength)
/* */ 975 { 976 intlen=0;
977 off_tbegin=0;
978 off_tpos=0;
979 intsize;
980
981 structdevice *dev;
982
983
984 size = sprintf(buffer, "Inter-| Receive | Transmit\n"
985 " face |packets errs drop fifo frame|packets errs drop fifo colls carrier\n");
986
987 pos+=size;
988 len+=size;
989
990
991 for (dev = dev_base; dev != NULL; dev = dev->next)
992 { 993 size = sprintf_stats(buffer+len, dev);
994 len+=size;
995 pos=begin+len;
996
997 if(pos<offset)
998 { 999 len=0;
1000 begin=pos;
1001 }1002 if(pos>offset+length)
1003 break;
1004 }1005
1006 *start=buffer+(offset-begin); /* Start of wanted data */1007 len-=(offset-begin); /* Start slop */1008 if(len>length)
1009 len=length; /* Ending slop */1010 returnlen;
1011 }1012
1013
1014 /*1015 * This checks bitmasks for the ioctl calls for devices.1016 */1017
1018 staticinlineintbad_mask(unsignedlongmask, unsignedlongaddr)
/* */1019 {1020 if (addr & (mask = ~mask))
1021 return 1;
1022 mask = ntohl(mask);
1023 if (mask & (mask+1))
1024 return 1;
1025 return 0;
1026 }1027
1028 /*1029 * Perform the SIOCxIFxxx calls. 1030 *1031 * The socket layer has seen an ioctl the address family thinks is1032 * for the device. At this point we get invoked to make a decision1033 */1034
1035 staticintdev_ifsioc(void *arg, unsignedintgetset)
/* */1036 {1037 structifreqifr;
1038 structdevice *dev;
1039 intret;
1040
1041 /*1042 * Fetch the caller's info block into kernel space1043 */1044
1045 interr=verify_area(VERIFY_WRITE, arg, sizeof(structifreq));
1046 if(err)
1047 returnerr;
1048
1049 memcpy_fromfs(&ifr, arg, sizeof(structifreq));
1050
1051 /*1052 * See which interface the caller is talking about. 1053 */1054
1055 if ((dev = dev_get(ifr.ifr_name)) == NULL)
1056 return(-ENODEV);
1057
1058 switch(getset)
1059 {1060 caseSIOCGIFFLAGS: /* Get interface flags */1061 ifr.ifr_flags = dev->flags;
1062 memcpy_tofs(arg, &ifr, sizeof(structifreq));
1063 ret = 0;
1064 break;
1065 caseSIOCSIFFLAGS: /* Set interface flags */1066 {1067 intold_flags = dev->flags;
1068 #ifdefCONFIG_SLAVE_BALANCING1069 if(dev->flags&IFF_SLAVE)
1070 return -EBUSY;
1071 #endif1072 dev->flags = ifr.ifr_flags & (
1073 IFF_UP | IFF_BROADCAST | IFF_DEBUG | IFF_LOOPBACK |
1074 IFF_POINTOPOINT | IFF_NOTRAILERS | IFF_RUNNING |
1075 IFF_NOARP | IFF_PROMISC | IFF_ALLMULTI | IFF_SLAVE | IFF_MASTER);
1076 #ifdefCONFIG_SLAVE_BALANCING1077 if(!(dev->flags&IFF_MASTER) && dev->slave)
1078 {1079 dev->slave->flags&=~IFF_SLAVE;
1080 dev->slave=NULL;
1081 }1082 #endif1083
1084 /*1085 * Has promiscuous mode been turned off1086 */1087 if ( (old_flags & IFF_PROMISC) && ((dev->flags & IFF_PROMISC) == 0))
1088 dev->set_multicast_list(dev,0,NULL);
1089
1090 /*1091 * Has it been turned on1092 */1093
1094 if ( (dev->flags & IFF_PROMISC) && ((old_flags & IFF_PROMISC) == 0))
1095 dev->set_multicast_list(dev,-1,NULL);
1096
1097 /*1098 * Have we downed the interface1099 */1100
1101 if ((old_flags & IFF_UP) && ((dev->flags & IFF_UP) == 0))
1102 {1103 ret = dev_close(dev);
1104 }1105 else1106 {1107 /*1108 * Have we upped the interface 1109 */1110
1111 ret = (! (old_flags & IFF_UP) && (dev->flags & IFF_UP))
1112 ? dev_open(dev) : 0;
1113 /* 1114 * Check the flags.1115 */1116 if(ret<0)
1117 dev->flags&=~IFF_UP; /* Didn't open so down the if */1118 }1119 }1120 break;
1121
1122 caseSIOCGIFADDR: /* Get interface address (and family) */1123 (*(structsockaddr_in *)
1124 &ifr.ifr_addr).sin_addr.s_addr = dev->pa_addr;
1125 (*(structsockaddr_in *)
1126 &ifr.ifr_addr).sin_family = dev->family;
1127 (*(structsockaddr_in *)
1128 &ifr.ifr_addr).sin_port = 0;
1129 memcpy_tofs(arg, &ifr, sizeof(structifreq));
1130 ret = 0;
1131 break;
1132
1133 caseSIOCSIFADDR: /* Set interface address (and family) */1134 dev->pa_addr = (*(structsockaddr_in *)
1135 &ifr.ifr_addr).sin_addr.s_addr;
1136 dev->family = ifr.ifr_addr.sa_family;
1137
1138 #ifdefCONFIG_INET1139 /* This is naughty. When net-032e comes out It wants moving into the net0321140 code not the kernel. Till then it can sit here (SIGH) */1141 dev->pa_mask = ip_get_mask(dev->pa_addr);
1142 #endif1143 dev->pa_brdaddr = dev->pa_addr | ~dev->pa_mask;
1144 ret = 0;
1145 break;
1146
1147 caseSIOCGIFBRDADDR: /* Get the broadcast address */1148 (*(structsockaddr_in *)
1149 &ifr.ifr_broadaddr).sin_addr.s_addr = dev->pa_brdaddr;
1150 (*(structsockaddr_in *)
1151 &ifr.ifr_broadaddr).sin_family = dev->family;
1152 (*(structsockaddr_in *)
1153 &ifr.ifr_broadaddr).sin_port = 0;
1154 memcpy_tofs(arg, &ifr, sizeof(structifreq));
1155 ret = 0;
1156 break;
1157
1158 caseSIOCSIFBRDADDR: /* Set the broadcast address */1159 dev->pa_brdaddr = (*(structsockaddr_in *)
1160 &ifr.ifr_broadaddr).sin_addr.s_addr;
1161 ret = 0;
1162 break;
1163
1164 caseSIOCGIFDSTADDR: /* Get the destination address (for point-to-point links) */1165 (*(structsockaddr_in *)
1166 &ifr.ifr_dstaddr).sin_addr.s_addr = dev->pa_dstaddr;
1167 (*(structsockaddr_in *)
1168 &ifr.ifr_broadaddr).sin_family = dev->family;
1169 (*(structsockaddr_in *)
1170 &ifr.ifr_broadaddr).sin_port = 0;
1171 memcpy_tofs(arg, &ifr, sizeof(structifreq));
1172 ret = 0;
1173 break;
1174
1175 caseSIOCSIFDSTADDR: /* Set the destination address (for point-to-point links) */1176 dev->pa_dstaddr = (*(structsockaddr_in *)
1177 &ifr.ifr_dstaddr).sin_addr.s_addr;
1178 ret = 0;
1179 break;
1180
1181 caseSIOCGIFNETMASK: /* Get the netmask for the interface */1182 (*(structsockaddr_in *)
1183 &ifr.ifr_netmask).sin_addr.s_addr = dev->pa_mask;
1184 (*(structsockaddr_in *)
1185 &ifr.ifr_netmask).sin_family = dev->family;
1186 (*(structsockaddr_in *)
1187 &ifr.ifr_netmask).sin_port = 0;
1188 memcpy_tofs(arg, &ifr, sizeof(structifreq));
1189 ret = 0;
1190 break;
1191
1192 caseSIOCSIFNETMASK: /* Set the netmask for the interface */1193 {1194 unsignedlongmask = (*(structsockaddr_in *)
1195 &ifr.ifr_netmask).sin_addr.s_addr;
1196 ret = -EINVAL;
1197 /*1198 * The mask we set must be legal.1199 */1200 if (bad_mask(mask,0))
1201 break;
1202 dev->pa_mask = mask;
1203 ret = 0;
1204 }1205 break;
1206
1207 caseSIOCGIFMETRIC: /* Get the metric on the interface (currently unused) */1208
1209 ifr.ifr_metric = dev->metric;
1210 memcpy_tofs(arg, &ifr, sizeof(structifreq));
1211 ret = 0;
1212 break;
1213
1214 caseSIOCSIFMETRIC: /* Set the metric on the interface (currently unused) */1215 dev->metric = ifr.ifr_metric;
1216 ret = 0;
1217 break;
1218
1219 caseSIOCGIFMTU: /* Get the MTU of a device */1220 ifr.ifr_mtu = dev->mtu;
1221 memcpy_tofs(arg, &ifr, sizeof(structifreq));
1222 ret = 0;
1223 break;
1224
1225 caseSIOCSIFMTU: /* Set the MTU of a device */1226
1227 /*1228 * MTU must be positive and under the page size problem1229 */1230
1231 if(ifr.ifr_mtu<1 || ifr.ifr_mtu>3800)
1232 return -EINVAL;
1233 dev->mtu = ifr.ifr_mtu;
1234 ret = 0;
1235 break;
1236
1237 caseSIOCGIFMEM: /* Get the per device memory space. We can add this but currently1238 do not support it */1239 printk("NET: ioctl(SIOCGIFMEM, 0x%08X)\n", (int)arg);
1240 ret = -EINVAL;
1241 break;
1242
1243 caseSIOCSIFMEM: /* Set the per device memory buffer space. Not applicable in our case */1244 printk("NET: ioctl(SIOCSIFMEM, 0x%08X)\n", (int)arg);
1245 ret = -EINVAL;
1246 break;
1247
1248 caseOLD_SIOCGIFHWADDR: /* Get the hardware address. This will change and SIFHWADDR will be added */1249 memcpy(ifr.old_ifr_hwaddr,dev->dev_addr, MAX_ADDR_LEN);
1250 memcpy_tofs(arg,&ifr,sizeof(structifreq));
1251 ret=0;
1252 break;
1253
1254 caseSIOCGIFHWADDR:
1255 memcpy(ifr.ifr_hwaddr.sa_data,dev->dev_addr, MAX_ADDR_LEN);
1256 ifr.ifr_hwaddr.sa_family=dev->type;
1257 memcpy_tofs(arg,&ifr,sizeof(structifreq));
1258 ret=0;
1259 break;
1260
1261 caseSIOCSIFHWADDR:
1262 if(dev->set_mac_address==NULL)
1263 return -EOPNOTSUPP;
1264 if(ifr.ifr_hwaddr.sa_family!=dev->type)
1265 return -EINVAL;
1266 ret=dev->set_mac_address(dev,ifr.ifr_hwaddr.sa_data);
1267 break;
1268
1269 caseSIOCDEVPRIVATE:
1270 if(dev->do_ioctl==NULL)
1271 return -EOPNOTSUPP;
1272 ret=dev->do_ioctl(dev, &ifr);
1273 memcpy_tofs(arg,&ifr,sizeof(structifreq));
1274 break;
1275
1276 caseSIOCGIFMAP:
1277 ifr.ifr_map.mem_start=dev->mem_start;
1278 ifr.ifr_map.mem_end=dev->mem_end;
1279 ifr.ifr_map.base_addr=dev->base_addr;
1280 ifr.ifr_map.irq=dev->irq;
1281 ifr.ifr_map.dma=dev->dma;
1282 ifr.ifr_map.port=dev->if_port;
1283 memcpy_tofs(arg,&ifr,sizeof(structifreq));
1284 ret=0;
1285 break;
1286
1287 caseSIOCSIFMAP:
1288 if(dev->set_config==NULL)
1289 return -EOPNOTSUPP;
1290 returndev->set_config(dev,&ifr.ifr_map);
1291
1292 caseSIOCGIFSLAVE:
1293 #ifdefCONFIG_SLAVE_BALANCING1294 if(dev->slave==NULL)
1295 return -ENOENT;
1296 strncpy(ifr.ifr_name,dev->name,sizeof(ifr.ifr_name));
1297 memcpy_tofs(arg,&ifr,sizeof(structifreq));
1298 ret=0;
1299 #else1300 return -ENOENT;
1301 #endif1302 break;
1303 #ifdefCONFIG_SLAVE_BALANCING1304 caseSIOCSIFSLAVE:
1305 {1306
1307 /*1308 * Fun game. Get the device up and the flags right without1309 * letting some scummy user confuse us.1310 */1311 unsignedlongflags;
1312 structdevice *slave=dev_get(ifr.ifr_slave);
1313 save_flags(flags);
1314 if(slave==NULL)
1315 {1316 return -ENODEV;
1317 }1318 cli();
1319 if((slave->flags&(IFF_UP|IFF_RUNNING))!=(IFF_UP|IFF_RUNNING))
1320 {1321 restore_flags(flags);
1322 return -EINVAL;
1323 }1324 if(dev->flags&IFF_SLAVE)
1325 {1326 restore_flags(flags);
1327 return -EBUSY;
1328 }1329 if(dev->slave!=NULL)
1330 {1331 restore_flags(flags);
1332 return -EBUSY;
1333 }1334 if(slave->flags&IFF_SLAVE)
1335 {1336 restore_flags(flags);
1337 return -EBUSY;
1338 }1339 dev->slave=slave;
1340 slave->flags|=IFF_SLAVE;
1341 dev->flags|=IFF_MASTER;
1342 restore_flags(flags);
1343 ret=0;
1344 }1345 break;
1346 #endif1347 /*1348 * Unknown ioctl1349 */1350
1351 default:
1352 ret = -EINVAL;
1353 }1354 return(ret);
1355 }1356
1357
1358 /*1359 * This function handles all "interface"-type I/O control requests. The actual1360 * 'doing' part of this is dev_ifsioc above.1361 */1362
1363 intdev_ioctl(unsignedintcmd, void *arg)
/* */1364 {1365 switch(cmd)
1366 {1367 /*1368 * The old old setup ioctl. Even its name and this entry will soon be1369 * just so much ionization on a backup tape.1370 */1371
1372 caseSIOCGIFCONF:
1373 (void) dev_ifconf((char *) arg);
1374 return 0;
1375
1376 /*1377 * Ioctl calls that can be done by all.1378 */1379
1380 caseSIOCGIFFLAGS:
1381 caseSIOCGIFADDR:
1382 caseSIOCGIFDSTADDR:
1383 caseSIOCGIFBRDADDR:
1384 caseSIOCGIFNETMASK:
1385 caseSIOCGIFMETRIC:
1386 caseSIOCGIFMTU:
1387 caseSIOCGIFMEM:
1388 caseSIOCGIFHWADDR:
1389 caseSIOCSIFHWADDR:
1390 caseOLD_SIOCGIFHWADDR:
1391 caseSIOCGIFSLAVE:
1392 caseSIOCGIFMAP:
1393 returndev_ifsioc(arg, cmd);
1394
1395 /*1396 * Ioctl calls requiring the power of a superuser1397 */1398
1399 caseSIOCSIFFLAGS:
1400 caseSIOCSIFADDR:
1401 caseSIOCSIFDSTADDR:
1402 caseSIOCSIFBRDADDR:
1403 caseSIOCSIFNETMASK:
1404 caseSIOCSIFMETRIC:
1405 caseSIOCSIFMTU:
1406 caseSIOCSIFMEM:
1407 caseSIOCSIFMAP:
1408 caseSIOCSIFSLAVE:
1409 caseSIOCDEVPRIVATE:
1410 if (!suser())
1411 return -EPERM;
1412 returndev_ifsioc(arg, cmd);
1413
1414 caseSIOCSIFLINK:
1415 return -EINVAL;
1416
1417 /*1418 * Unknown ioctl.1419 */1420
1421 default:
1422 return -EINVAL;
1423 }1424 }1425
1426
1427 /*1428 * Initialize the DEV module. At boot time this walks the device list and1429 * unhooks any devices that fail to initialise (normally hardware not 1430 * present) and leaves us with a valid list of present and active devices.1431 *1432 * The PCMCIA code may need to change this a little, and add a pair1433 * of register_inet_device() unregister_inet_device() calls. This will be1434 * needed for ethernet as modules support.1435 */1436
1437 voiddev_init(void)
/* */1438 {1439 structdevice *dev, *dev2;
1440
1441 /*1442 * Add the devices.1443 * If the call to dev->init fails, the dev is removed1444 * from the chain disconnecting the device until the1445 * next reboot.1446 */1447
1448 dev2 = NULL;
1449 for (dev = dev_base; dev != NULL; dev=dev->next)
1450 {1451 if (dev->init && dev->init(dev))
1452 {1453 /*1454 * It failed to come up. Unhook it.1455 */1456
1457 if (dev2 == NULL)
1458 dev_base = dev->next;
1459 else1460 dev2->next = dev->next;
1461 }1462 else1463 {1464 dev2 = dev;
1465 }1466 }1467 }