1 /*
2 * NET3 Protocol independant device support routines.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Derived from the non IP parts of dev.c 1.0.19
10 * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 *
14 * Additional Authors:
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dhinds@allegro.stanford.edu>
18 *
19 * Cleaned up and recommented by Alan Cox 2nd April 1994. I hope to have
20 * the rest as well commented in the end.
21 */
22
23 /*
24 * A lot of these includes will be going walkies very soon
25 */
26
27 #include <asm/segment.h>
28 #include <asm/system.h>
29 #include <asm/bitops.h>
30 #include <linux/config.h>
31 #include <linux/types.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/string.h>
35 #include <linux/mm.h>
36 #include <linux/socket.h>
37 #include <linux/sockios.h>
38 #include <linux/in.h>
39 #include <linux/errno.h>
40 #include <linux/interrupt.h>
41 #include <linux/if_ether.h>
42 #include <linux/inet.h>
43 #include <linux/netdevice.h>
44 #include <linux/etherdevice.h>
45 #include "ip.h"
46 #include "route.h"
47 #include <linux/skbuff.h>
48 #include "sock.h"
49 #include "arp.h"
50
51
52 /*
53 * The list of packet types we will receive (as opposed to discard)
54 * and the routines to invoke.
55 */
56
57 struct packet_type *ptype_base = NULL;
58
59 /*
60 * Device drivers call our routines to queue packets here. We empty the
61 * queue in the bottom half handler.
62 */
63
64 static struct sk_buff_head backlog =
65 {
66 (struct sk_buff *)&backlog, (struct sk_buff *)&backlog
67 #ifdef CONFIG_SKB_CHECK
68 ,SK_HEAD_SKB
69 #endif
70 };
71
72 /*
73 * We don't overdo the queue or we will thrash memory badly.
74 */
75
76 static int backlog_size = 0;
77
78 /*
79 * The number of sockets open for 'all' protocol use. We have to
80 * know this to copy a buffer the correct number of times.
81 */
82
83 static int dev_nit=0;
84
85 /*
86 * Return the lesser of the two values.
87 */
88
89 static __inline__ unsigned long min(unsigned long a, unsigned long b)
/* ![[previous]](../icons/n_left.png)
![[next]](../icons/right.png)
![[first]](../icons/n_first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
90 {
91 return (a < b)? a : b;
92 }
93
94
95 /******************************************************************************************
96
97 Protocol management and registration routines
98
99 *******************************************************************************************/
100
101
102 /*
103 * Add a protocol ID to the list.
104 */
105
106 void dev_add_pack(struct packet_type *pt)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
107 {
108 struct packet_type *p1;
109 pt->next = ptype_base;
110
111 /*
112 * Don't use copy counts on ETH_P_ALL. Instead keep a global
113 * count of number of these and use it and pt->copy to decide
114 * copies
115 */
116
117 pt->copy=0; /* Assume we will not be copying the buffer before
118 * this routine gets it
119 */
120
121 if(pt->type == htons(ETH_P_ALL))
122 dev_nit++; /* I'd like a /dev/nit too one day 8) */
123 else
124 {
125 /*
126 * See if we need to copy it - that is another process also
127 * wishes to receive this type of packet.
128 */
129 for (p1 = ptype_base; p1 != NULL; p1 = p1->next)
130 {
131 if (p1->type == pt->type)
132 {
133 pt->copy = 1; /* We will need to copy */
134 break;
135 }
136 }
137 }
138
139 /*
140 * NIT taps must go at the end or net_bh will leak!
141 */
142
143 if (pt->type == htons(ETH_P_ALL))
144 {
145 pt->next=NULL;
146 if(ptype_base==NULL)
147 ptype_base=pt;
148 else
149 {
150 /*
151 * Move to the end of the list
152 */
153 for(p1=ptype_base;p1->next!=NULL;p1=p1->next);
154 /*
155 * Hook on the end
156 */
157 p1->next=pt;
158 }
159 }
160 else
161 /*
162 * It goes on the start
163 */
164 ptype_base = pt;
165 }
166
167
168 /*
169 * Remove a protocol ID from the list.
170 */
171
172 void dev_remove_pack(struct packet_type *pt)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
173 {
174 struct packet_type *lpt, *pt1;
175
176 /*
177 * Keep the count of nit (Network Interface Tap) sockets correct.
178 */
179
180 if (pt->type == htons(ETH_P_ALL))
181 dev_nit--;
182
183 /*
184 * If we are first, just unhook us.
185 */
186
187 if (pt == ptype_base)
188 {
189 ptype_base = pt->next;
190 return;
191 }
192
193 lpt = NULL;
194
195 /*
196 * This is harder. What we do is to walk the list of sockets
197 * for this type. We unhook the entry, and if there is a previous
198 * entry that is copying _and_ we are not copying, (ie we are the
199 * last entry for this type) then the previous one is set to
200 * non-copying as it is now the last.
201 */
202 for (pt1 = ptype_base; pt1->next != NULL; pt1 = pt1->next)
203 {
204 if (pt1->next == pt )
205 {
206 cli();
207 if (!pt->copy && lpt)
208 lpt->copy = 0;
209 pt1->next = pt->next;
210 sti();
211 return;
212 }
213 if (pt1->next->type == pt->type && pt->type != htons(ETH_P_ALL))
214 lpt = pt1->next;
215 }
216 }
217
218 /*****************************************************************************************
219
220 Device Inteface Subroutines
221
222 ******************************************************************************************/
223
224 /*
225 * Find an interface by name.
226 */
227
228 struct device *dev_get(char *name)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
229 {
230 struct device *dev;
231
232 for (dev = dev_base; dev != NULL; dev = dev->next)
233 {
234 if (strcmp(dev->name, name) == 0)
235 return(dev);
236 }
237 return(NULL);
238 }
239
240
241 /*
242 * Prepare an interface for use.
243 */
244
245 int dev_open(struct device *dev)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
246 {
247 int ret = 0;
248
249 /*
250 * Call device private open method
251 */
252 if (dev->open)
253 ret = dev->open(dev);
254
255 /*
256 * If it went open OK then set the flags
257 */
258
259 if (ret == 0)
260 dev->flags |= (IFF_UP | IFF_RUNNING);
261
262 return(ret);
263 }
264
265
266 /*
267 * Completely shutdown an interface.
268 *
269 * WARNING: Both because of the way the upper layers work (that can be fixed)
270 * and because of races during a close (that can't be fixed any other way)
271 * a device may be given things to transmit EVEN WHEN IT IS DOWN. The driver
272 * MUST cope with this (eg by freeing and dumping the frame).
273 */
274
275 int dev_close(struct device *dev)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
276 {
277 /*
278 * Only close a device if it is up.
279 */
280
281 if (dev->flags != 0)
282 {
283 int ct=0;
284 dev->flags = 0;
285 /*
286 * Call the device specific close. This cannot fail.
287 */
288 if (dev->stop)
289 dev->stop(dev);
290 /*
291 * Delete the route to the device.
292 */
293 #ifdef CONFIG_INET
294 ip_rt_flush(dev);
295 #endif
296 #ifdef CONFIG_IPX
297 ipxrtr_device_down(dev);
298 #endif
299 /*
300 * Blank the IP addresses
301 */
302 dev->pa_addr = 0;
303 dev->pa_dstaddr = 0;
304 dev->pa_brdaddr = 0;
305 dev->pa_mask = 0;
306 /*
307 * Purge any queued packets when we down the link
308 */
309 while(ct<DEV_NUMBUFFS)
310 {
311 struct sk_buff *skb;
312 while((skb=skb_dequeue(&dev->buffs[ct]))!=NULL)
313 if(skb->free)
314 kfree_skb(skb,FREE_WRITE);
315 ct++;
316 }
317 }
318 return(0);
319 }
320
321
322 /*
323 * Send (or queue for sending) a packet.
324 */
325
326 void dev_queue_xmit(struct sk_buff *skb, struct device *dev, int pri)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
327 {
328 unsigned long flags;
329 int where = 0; /* used to say if the packet should go */
330 /* at the front or the back of the */
331 /* queue. */
332
333 if (dev == NULL)
334 {
335 printk("dev.c: dev_queue_xmit: dev = NULL\n");
336 return;
337 }
338 #ifdef CONFIG_SLAVE_BALANCING
339 save_flags(flags);
340 cli();
341 if(dev->slave!=NULL && dev->slave->pkt_queue < dev->pkt_queue &&
342 (dev->slave->flags & IFF_UP))
343 dev=dev->slave;
344 restore_flags(flags);
345 #endif
346
347 IS_SKB(skb);
348
349 skb->dev = dev;
350
351 /*
352 * This just eliminates some race conditions, but not all...
353 */
354
355 if (skb->next != NULL)
356 {
357 /*
358 * Make sure we haven't missed an interrupt.
359 */
360 printk("dev_queue_xmit: worked around a missed interrupt\n");
361 dev->hard_start_xmit(NULL, dev);
362 return;
363 }
364
365 /*
366 * Negative priority is used to flag a frame that is being pulled from the
367 * queue front as a retransmit attempt. It therefore goes back on the queue
368 * start on a failure.
369 */
370
371 if (pri < 0)
372 {
373 pri = -pri-1;
374 where = 1;
375 }
376
377 if (pri >= DEV_NUMBUFFS)
378 {
379 printk("bad priority in dev_queue_xmit.\n");
380 pri = 1;
381 }
382
383 /*
384 * If the address has not been resolved. Call the device header rebuilder.
385 * This can cover all protocols and technically not just ARP either.
386 */
387
388 if (!skb->arp && dev->rebuild_header(skb->data, dev, skb->raddr, skb)) {
389 return;
390 }
391
392 save_flags(flags);
393 cli();
394 if (!where) {
395 #ifdef CONFIG_SLAVE_BALANCING
396 skb->in_dev_queue=1;
397 #endif
398 skb_queue_tail(dev->buffs + pri,skb);
399 skb = skb_dequeue(dev->buffs + pri);
400 #ifdef CONFIG_SLAVE_BALANCING
401 skb->in_dev_queue=0;
402 #endif
403 }
404 restore_flags(flags);
405
406 if (dev->hard_start_xmit(skb, dev) == 0) {
407 #ifdef CONFIG_SLAVE_BALANCING
408 dev->pkt_queue--;
409 #endif
410 return;
411 }
412
413 /*
414 * Transmission failed, put skb back into a list.
415 */
416 cli();
417 #ifdef CONFIG_SLAVE_BALANCING
418 skb->in_dev_queue=1;
419 dev->pkt_queue++;
420 #endif
421 skb_queue_head(dev->buffs + pri,skb);
422 restore_flags(flags);
423 }
424
425 /*
426 * Receive a packet from a device driver and queue it for the upper
427 * (protocol) levels. It always succeeds. This is the recommended
428 * interface to use.
429 */
430
431 void netif_rx(struct sk_buff *skb)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
432 {
433 static int dropping = 0;
434 extern struct timeval xtime;
435
436 /*
437 * Any received buffers are un-owned and should be discarded
438 * when freed. These will be updated later as the frames get
439 * owners.
440 */
441 skb->sk = NULL;
442 skb->free = 1;
443 if(skb->stamp.tv_sec==0)
444 skb->stamp = xtime;
445
446 /*
447 * Check that we aren't oevrdoing things.
448 */
449
450 if (!backlog_size)
451 dropping = 0;
452 else if (backlog_size > 100)
453 dropping = 1;
454
455 if (dropping)
456 {
457 kfree_skb(skb, FREE_READ);
458 return;
459 }
460
461 /*
462 * Add it to the "backlog" queue.
463 */
464
465 IS_SKB(skb);
466 skb_queue_tail(&backlog,skb);
467 backlog_size++;
468
469 /*
470 * If any packet arrived, mark it for processing after the
471 * hardware interrupt returns.
472 */
473
474 mark_bh(NET_BH);
475 return;
476 }
477
478
479 /*
480 * The old interface to fetch a packet from a device driver.
481 * This function is the base level entry point for all drivers that
482 * want to send a packet to the upper (protocol) levels. It takes
483 * care of de-multiplexing the packet to the various modules based
484 * on their protocol ID.
485 *
486 * Return values: 1 <- exit I can't do any more
487 * 0 <- feed me more (i.e. "done", "OK").
488 *
489 * This function is OBSOLETE and should not be used by any new
490 * device.
491 */
492
493 int dev_rint(unsigned char *buff, long len, int flags, struct device *dev)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
494 {
495 static int dropping = 0;
496 struct sk_buff *skb = NULL;
497 unsigned char *to;
498 int amount, left;
499 int len2;
500
501 if (dev == NULL || buff == NULL || len <= 0)
502 return(1);
503
504 if (flags & IN_SKBUFF)
505 {
506 skb = (struct sk_buff *) buff;
507 }
508 else
509 {
510 if (dropping)
511 {
512 if (skb_peek(&backlog) != NULL)
513 return(1);
514 printk("INET: dev_rint: no longer dropping packets.\n");
515 dropping = 0;
516 }
517
518 skb = alloc_skb(len, GFP_ATOMIC);
519 if (skb == NULL)
520 {
521 printk("dev_rint: packet dropped on %s (no memory) !\n",
522 dev->name);
523 dropping = 1;
524 return(1);
525 }
526
527 /*
528 * First we copy the packet into a buffer, and save it for later. We
529 * in effect handle the incoming data as if it were from a circular buffer
530 */
531
532 to = skb->data;
533 left = len;
534
535 len2 = len;
536 while (len2 > 0)
537 {
538 amount = min(len2, (unsigned long) dev->rmem_end -
539 (unsigned long) buff);
540 memcpy(to, buff, amount);
541 len2 -= amount;
542 left -= amount;
543 buff += amount;
544 to += amount;
545 if ((unsigned long) buff == dev->rmem_end)
546 buff = (unsigned char *) dev->rmem_start;
547 }
548 }
549
550 /*
551 * Tag the frame and kick it to the proper receive routine
552 */
553
554 skb->len = len;
555 skb->dev = dev;
556 skb->free = 1;
557
558 netif_rx(skb);
559 /*
560 * OK, all done.
561 */
562 return(0);
563 }
564
565
566 /*
567 * This routine causes all interfaces to try to send some data.
568 */
569
570 void dev_transmit(void)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
571 {
572 struct device *dev;
573
574 for (dev = dev_base; dev != NULL; dev = dev->next)
575 {
576 if (dev->flags != 0 && !dev->tbusy) {
577 /*
578 * Kick the device
579 */
580 dev_tint(dev);
581 }
582 }
583 }
584
585
586 /**********************************************************************************
587
588 Receive Queue Processor
589
590 ***********************************************************************************/
591
592 /*
593 * This is a single non-rentrant routine which takes the received packet
594 * queue and throws it at the networking layers in the hope that something
595 * useful will emerge.
596 */
597
598 volatile char in_bh = 0; /* Non-rentrant remember */
599
600 int in_net_bh() /* Used by timer.c */
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
601 {
602 return(in_bh==0?0:1);
603 }
604
605 /*
606 * When we are called the queue is ready to grab, the interrupts are
607 * on and hardware can interrupt and queue to the receive queue a we
608 * run with no problems.
609 * This is run as a bottom half after an interrupt handler that does
610 * mark_bh(NET_BH);
611 */
612
613 void net_bh(void *tmp)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
614 {
615 struct sk_buff *skb;
616 struct packet_type *ptype;
617 unsigned short type;
618 unsigned char flag = 0;
619 int nitcount;
620
621 /*
622 * Atomically check and mark our BUSY state.
623 */
624
625 if (set_bit(1, (void*)&in_bh))
626 return;
627
628 /*
629 * Can we send anything now? We want to clear the
630 * decks for any more sends that get done as we
631 * process the input.
632 */
633
634 dev_transmit();
635
636 /*
637 * Any data left to process. This may occur because a
638 * mark_bh() is done after we empty the queue including
639 * that from the device which does a mark_bh() just after
640 */
641
642 cli();
643
644 /*
645 * While the queue is not empty
646 */
647
648 while((skb=skb_dequeue(&backlog))!=NULL)
649 {
650 /*
651 * We have a packet. Therefore the queue has shrunk
652 */
653 backlog_size--;
654
655 nitcount=dev_nit;
656 flag=0;
657 sti();
658
659 /*
660 * Bump the pointer to the next structure.
661 * This assumes that the basic 'skb' pointer points to
662 * the MAC header, if any (as indicated by its "length"
663 * field). Take care now!
664 */
665
666 skb->h.raw = skb->data + skb->dev->hard_header_len;
667 skb->len -= skb->dev->hard_header_len;
668
669 /*
670 * Fetch the packet protocol ID. This is also quite ugly, as
671 * it depends on the protocol driver (the interface itself) to
672 * know what the type is, or where to get it from. The Ethernet
673 * interfaces fetch the ID from the two bytes in the Ethernet MAC
674 * header (the h_proto field in struct ethhdr), but other drivers
675 * may either use the ethernet ID's or extra ones that do not
676 * clash (eg ETH_P_AX25). We could set this before we queue the
677 * frame. In fact I may change this when I have time.
678 */
679
680 type = skb->dev->type_trans(skb, skb->dev);
681
682 /*
683 * We got a packet ID. Now loop over the "known protocols"
684 * table (which is actually a linked list, but this will
685 * change soon if I get my way- FvK), and forward the packet
686 * to anyone who wants it.
687 *
688 * [FvK didn't get his way but he is right this ought to be
689 * hashed so we typically get a single hit. The speed cost
690 * here is minimal but no doubt adds up at the 4,000+ pkts/second
691 * rate we can hit flat out]
692 */
693
694 for (ptype = ptype_base; ptype != NULL; ptype = ptype->next)
695 {
696 if (ptype->type == type || ptype->type == htons(ETH_P_ALL))
697 {
698 struct sk_buff *skb2;
699
700 if (ptype->type == htons(ETH_P_ALL))
701 nitcount--;
702 if (ptype->copy || nitcount)
703 {
704 /*
705 * copy if we need to
706 */
707 #ifdef OLD
708 skb2 = alloc_skb(skb->len, GFP_ATOMIC);
709 if (skb2 == NULL)
710 continue;
711 memcpy(skb2, skb, skb2->mem_len);
712 skb2->mem_addr = skb2;
713 skb2->h.raw = (unsigned char *)(
714 (unsigned long) skb2 +
715 (unsigned long) skb->h.raw -
716 (unsigned long) skb
717 );
718 skb2->free = 1;
719 #else
720 skb2=skb_clone(skb, GFP_ATOMIC);
721 if(skb2==NULL)
722 continue;
723 #endif
724 }
725 else
726 {
727 skb2 = skb;
728 }
729
730 /*
731 * Protocol located.
732 */
733
734 flag = 1;
735
736 /*
737 * Kick the protocol handler. This should be fast
738 * and efficient code.
739 */
740
741 ptype->func(skb2, skb->dev, ptype);
742 }
743 } /* End of protocol list loop */
744
745 /*
746 * Has an unknown packet has been received ?
747 */
748
749 if (!flag)
750 {
751 kfree_skb(skb, FREE_WRITE);
752 }
753
754 /*
755 * Again, see if we can transmit anything now.
756 */
757
758 dev_transmit();
759 cli();
760 } /* End of queue loop */
761
762 /*
763 * We have emptied the queue
764 */
765
766 in_bh = 0;
767 sti();
768
769 /*
770 * One last output flush.
771 */
772
773 dev_transmit();
774 }
775
776
777 /*
778 * This routine is called when an device driver (i.e. an
779 * interface) is ready to transmit a packet.
780 */
781
782 void dev_tint(struct device *dev)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
783 {
784 int i;
785 struct sk_buff *skb;
786
787 /*
788 * Work the queues in priority order
789 */
790
791 for(i = 0;i < DEV_NUMBUFFS; i++)
792 {
793 /*
794 * Pull packets from the queue
795 */
796
797 while((skb=skb_dequeue(&dev->buffs[i]))!=NULL)
798 {
799 /*
800 * Feed them to the output stage and if it fails
801 * indicate they re-queue at the front.
802 */
803 dev_queue_xmit(skb,dev,-i - 1);
804 /*
805 * If we can take no more then stop here.
806 */
807 if (dev->tbusy)
808 return;
809 }
810 }
811 }
812
813
814 /*
815 * Perform a SIOCGIFCONF call. This structure will change
816 * size shortly, and there is nothing I can do about it.
817 * Thus we will need a 'compatibility mode'.
818 */
819
820 static int dev_ifconf(char *arg)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
821 {
822 struct ifconf ifc;
823 struct ifreq ifr;
824 struct device *dev;
825 char *pos;
826 int len;
827 int err;
828
829 /*
830 * Fetch the caller's info block.
831 */
832
833 err=verify_area(VERIFY_WRITE, arg, sizeof(struct ifconf));
834 if(err)
835 return err;
836 memcpy_fromfs(&ifc, arg, sizeof(struct ifconf));
837 len = ifc.ifc_len;
838 pos = ifc.ifc_buf;
839
840 /*
841 * We now walk the device list filling each active device
842 * into the array.
843 */
844
845 err=verify_area(VERIFY_WRITE,pos,len);
846 if(err)
847 return err;
848
849 /*
850 * Loop over the interfaces, and write an info block for each.
851 */
852
853 for (dev = dev_base; dev != NULL; dev = dev->next)
854 {
855 if(!(dev->flags & IFF_UP)) /* Downed devices don't count */
856 continue;
857 memset(&ifr, 0, sizeof(struct ifreq));
858 strcpy(ifr.ifr_name, dev->name);
859 (*(struct sockaddr_in *) &ifr.ifr_addr).sin_family = dev->family;
860 (*(struct sockaddr_in *) &ifr.ifr_addr).sin_addr.s_addr = dev->pa_addr;
861
862 /*
863 * Write this block to the caller's space.
864 */
865
866 memcpy_tofs(pos, &ifr, sizeof(struct ifreq));
867 pos += sizeof(struct ifreq);
868 len -= sizeof(struct ifreq);
869
870 /*
871 * Have we run out of space here ?
872 */
873
874 if (len < sizeof(struct ifreq))
875 break;
876 }
877
878 /*
879 * All done. Write the updated control block back to the caller.
880 */
881
882 ifc.ifc_len = (pos - ifc.ifc_buf);
883 ifc.ifc_req = (struct ifreq *) ifc.ifc_buf;
884 memcpy_tofs(arg, &ifc, sizeof(struct ifconf));
885
886 /*
887 * Report how much was filled in
888 */
889
890 return(pos - arg);
891 }
892
893
894 /*
895 * This is invoked by the /proc filesystem handler to display a device
896 * in detail.
897 */
898
899 static int sprintf_stats(char *buffer, struct device *dev)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
900 {
901 struct enet_statistics *stats = (dev->get_stats ? dev->get_stats(dev): NULL);
902 int size;
903
904 if (stats)
905 size = sprintf(buffer, "%6s:%7d %4d %4d %4d %4d %8d %4d %4d %4d %5d %4d\n",
906 dev->name,
907 stats->rx_packets, stats->rx_errors,
908 stats->rx_dropped + stats->rx_missed_errors,
909 stats->rx_fifo_errors,
910 stats->rx_length_errors + stats->rx_over_errors
911 + stats->rx_crc_errors + stats->rx_frame_errors,
912 stats->tx_packets, stats->tx_errors, stats->tx_dropped,
913 stats->tx_fifo_errors, stats->collisions,
914 stats->tx_carrier_errors + stats->tx_aborted_errors
915 + stats->tx_window_errors + stats->tx_heartbeat_errors);
916 else
917 size = sprintf(buffer, "%6s: No statistics available.\n", dev->name);
918
919 return size;
920 }
921
922 /*
923 * Called from the PROCfs module. This now uses the new arbitary sized /proc/net interface
924 * to create /proc/net/dev
925 */
926
927 int dev_get_info(char *buffer, char **start, off_t offset, int length)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
928 {
929 int len=0;
930 off_t begin=0;
931 off_t pos=0;
932 int size;
933
934 struct device *dev;
935
936
937 size = sprintf(buffer, "Inter-| Receive | Transmit\n"
938 " face |packets errs drop fifo frame|packets errs drop fifo colls carrier\n");
939
940 pos+=size;
941 len+=size;
942
943
944 for (dev = dev_base; dev != NULL; dev = dev->next)
945 {
946 size = sprintf_stats(buffer+len, dev);
947 len+=size;
948 pos=begin+len;
949
950 if(pos<offset)
951 {
952 len=0;
953 begin=pos;
954 }
955 if(pos>offset+length)
956 break;
957 }
958
959 *start=buffer+(offset-begin); /* Start of wanted data */
960 len-=(offset-begin); /* Start slop */
961 if(len>length)
962 len=length; /* Ending slop */
963 return len;
964 }
965
966
967 /*
968 * This checks bitmasks for the ioctl calls for devices.
969 */
970
971 static inline int bad_mask(unsigned long mask, unsigned long addr)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
972 {
973 if (addr & (mask = ~mask))
974 return 1;
975 mask = ntohl(mask);
976 if (mask & (mask+1))
977 return 1;
978 return 0;
979 }
980
981 /*
982 * Perform the SIOCxIFxxx calls.
983 *
984 * The socket layer has seen an ioctl the address family thinks is
985 * for the device. At this point we get invoked to make a decision
986 */
987
988 static int dev_ifsioc(void *arg, unsigned int getset)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
989 {
990 struct ifreq ifr;
991 struct device *dev;
992 int ret;
993
994 /*
995 * Fetch the caller's info block into kernel space
996 */
997
998 int err=verify_area(VERIFY_WRITE, arg, sizeof(struct ifreq));
999 if(err)
1000 return err;
1001
1002 memcpy_fromfs(&ifr, arg, sizeof(struct ifreq));
1003
1004 /*
1005 * See which interface the caller is talking about.
1006 */
1007
1008 if ((dev = dev_get(ifr.ifr_name)) == NULL)
1009 return(-ENODEV);
1010
1011 switch(getset)
1012 {
1013 case SIOCGIFFLAGS: /* Get interface flags */
1014 ifr.ifr_flags = dev->flags;
1015 memcpy_tofs(arg, &ifr, sizeof(struct ifreq));
1016 ret = 0;
1017 break;
1018 case SIOCSIFFLAGS: /* Set interface flags */
1019 {
1020 int old_flags = dev->flags;
1021 #ifdef CONFIG_SLAVE_BALANCING
1022 if(dev->flags&IFF_SLAVE)
1023 return -EBUSY;
1024 #endif
1025 dev->flags = ifr.ifr_flags & (
1026 IFF_UP | IFF_BROADCAST | IFF_DEBUG | IFF_LOOPBACK |
1027 IFF_POINTOPOINT | IFF_NOTRAILERS | IFF_RUNNING |
1028 IFF_NOARP | IFF_PROMISC | IFF_ALLMULTI | IFF_SLAVE | IFF_MASTER);
1029 #ifdef CONFIG_SLAVE_BALANCING
1030 if(!(dev->flags&IFF_MASTER) && dev->slave)
1031 {
1032 dev->slave->flags&=~IFF_SLAVE;
1033 dev->slave=NULL;
1034 }
1035 #endif
1036
1037 /*
1038 * Has promiscuous mode been turned off
1039 */
1040 if ( (old_flags & IFF_PROMISC) && ((dev->flags & IFF_PROMISC) == 0))
1041 dev->set_multicast_list(dev,0,NULL);
1042
1043 /*
1044 * Has it been turned on
1045 */
1046
1047 if ( (dev->flags & IFF_PROMISC) && ((old_flags & IFF_PROMISC) == 0))
1048 dev->set_multicast_list(dev,-1,NULL);
1049
1050 /*
1051 * Have we downed the interface
1052 */
1053
1054 if ((old_flags & IFF_UP) && ((dev->flags & IFF_UP) == 0))
1055 {
1056 ret = dev_close(dev);
1057 }
1058 else
1059 {
1060 /*
1061 * Have we upped the interface
1062 */
1063
1064 ret = (! (old_flags & IFF_UP) && (dev->flags & IFF_UP))
1065 ? dev_open(dev) : 0;
1066 /*
1067 * Check the flags.
1068 */
1069 if(ret<0)
1070 dev->flags&=~IFF_UP; /* Didnt open so down the if */
1071 }
1072 }
1073 break;
1074
1075 case SIOCGIFADDR: /* Get interface address (and family) */
1076 (*(struct sockaddr_in *)
1077 &ifr.ifr_addr).sin_addr.s_addr = dev->pa_addr;
1078 (*(struct sockaddr_in *)
1079 &ifr.ifr_addr).sin_family = dev->family;
1080 (*(struct sockaddr_in *)
1081 &ifr.ifr_addr).sin_port = 0;
1082 memcpy_tofs(arg, &ifr, sizeof(struct ifreq));
1083 ret = 0;
1084 break;
1085
1086 case SIOCSIFADDR: /* Set interface address (and family) */
1087 dev->pa_addr = (*(struct sockaddr_in *)
1088 &ifr.ifr_addr).sin_addr.s_addr;
1089 dev->family = ifr.ifr_addr.sa_family;
1090
1091 #ifdef CONFIG_INET
1092 /* This is naughty. When net-032e comes out It wants moving into the net032
1093 code not the kernel. Till then it can sit here (SIGH) */
1094 dev->pa_mask = ip_get_mask(dev->pa_addr);
1095 #endif
1096 dev->pa_brdaddr = dev->pa_addr | ~dev->pa_mask;
1097 ret = 0;
1098 break;
1099
1100 case SIOCGIFBRDADDR: /* Get the broadcast address */
1101 (*(struct sockaddr_in *)
1102 &ifr.ifr_broadaddr).sin_addr.s_addr = dev->pa_brdaddr;
1103 (*(struct sockaddr_in *)
1104 &ifr.ifr_broadaddr).sin_family = dev->family;
1105 (*(struct sockaddr_in *)
1106 &ifr.ifr_broadaddr).sin_port = 0;
1107 memcpy_tofs(arg, &ifr, sizeof(struct ifreq));
1108 ret = 0;
1109 break;
1110
1111 case SIOCSIFBRDADDR: /* Set the broadcast address */
1112 dev->pa_brdaddr = (*(struct sockaddr_in *)
1113 &ifr.ifr_broadaddr).sin_addr.s_addr;
1114 ret = 0;
1115 break;
1116
1117 case SIOCGIFDSTADDR: /* Get the destination address (for point-to-point links) */
1118 (*(struct sockaddr_in *)
1119 &ifr.ifr_dstaddr).sin_addr.s_addr = dev->pa_dstaddr;
1120 (*(struct sockaddr_in *)
1121 &ifr.ifr_broadaddr).sin_family = dev->family;
1122 (*(struct sockaddr_in *)
1123 &ifr.ifr_broadaddr).sin_port = 0;
1124 memcpy_tofs(arg, &ifr, sizeof(struct ifreq));
1125 ret = 0;
1126 break;
1127
1128 case SIOCSIFDSTADDR: /* Set the destination address (for point-to-point links) */
1129 dev->pa_dstaddr = (*(struct sockaddr_in *)
1130 &ifr.ifr_dstaddr).sin_addr.s_addr;
1131 ret = 0;
1132 break;
1133
1134 case SIOCGIFNETMASK: /* Get the netmask for the interface */
1135 (*(struct sockaddr_in *)
1136 &ifr.ifr_netmask).sin_addr.s_addr = dev->pa_mask;
1137 (*(struct sockaddr_in *)
1138 &ifr.ifr_netmask).sin_family = dev->family;
1139 (*(struct sockaddr_in *)
1140 &ifr.ifr_netmask).sin_port = 0;
1141 memcpy_tofs(arg, &ifr, sizeof(struct ifreq));
1142 ret = 0;
1143 break;
1144
1145 case SIOCSIFNETMASK: /* Set the netmask for the interface */
1146 {
1147 unsigned long mask = (*(struct sockaddr_in *)
1148 &ifr.ifr_netmask).sin_addr.s_addr;
1149 ret = -EINVAL;
1150 /*
1151 * The mask we set must be legal.
1152 */
1153 if (bad_mask(mask,0))
1154 break;
1155 dev->pa_mask = mask;
1156 ret = 0;
1157 }
1158 break;
1159
1160 case SIOCGIFMETRIC: /* Get the metric on the inteface (currently unused) */
1161
1162 ifr.ifr_metric = dev->metric;
1163 memcpy_tofs(arg, &ifr, sizeof(struct ifreq));
1164 ret = 0;
1165 break;
1166
1167 case SIOCSIFMETRIC: /* Set the metric on the interface (currently unused) */
1168 dev->metric = ifr.ifr_metric;
1169 ret = 0;
1170 break;
1171
1172 case SIOCGIFMTU: /* Get the MTU of a device */
1173 ifr.ifr_mtu = dev->mtu;
1174 memcpy_tofs(arg, &ifr, sizeof(struct ifreq));
1175 ret = 0;
1176 break;
1177
1178 case SIOCSIFMTU: /* Set the MTU of a device */
1179
1180 /*
1181 * MTU must be positive and under the page size problem
1182 */
1183
1184 if(ifr.ifr_mtu<1 || ifr.ifr_mtu>3800)
1185 return -EINVAL;
1186 dev->mtu = ifr.ifr_mtu;
1187 ret = 0;
1188 break;
1189
1190 case SIOCGIFMEM: /* Get the per device memory space. We can add this but currently
1191 do not support it */
1192 printk("NET: ioctl(SIOCGIFMEM, 0x%08X)\n", (int)arg);
1193 ret = -EINVAL;
1194 break;
1195
1196 case SIOCSIFMEM: /* Set the per device memory buffer space. Not applicable in our case */
1197 printk("NET: ioctl(SIOCSIFMEM, 0x%08X)\n", (int)arg);
1198 ret = -EINVAL;
1199 break;
1200
1201 case OLD_SIOCGIFHWADDR: /* Get the hardware address. This will change and SIFHWADDR will be added */
1202 memcpy(ifr.old_ifr_hwaddr,dev->dev_addr, MAX_ADDR_LEN);
1203 memcpy_tofs(arg,&ifr,sizeof(struct ifreq));
1204 ret=0;
1205 break;
1206
1207 case SIOCGIFHWADDR:
1208 memcpy(ifr.ifr_hwaddr.sa_data,dev->dev_addr, MAX_ADDR_LEN);
1209 ifr.ifr_hwaddr.sa_family=dev->type;
1210 memcpy_tofs(arg,&ifr,sizeof(struct ifreq));
1211 ret=0;
1212 break;
1213
1214 case SIOCSIFHWADDR:
1215 if(dev->set_mac_address==NULL)
1216 return -EOPNOTSUPP;
1217 if(ifr.ifr_hwaddr.sa_family!=dev->type)
1218 return -EINVAL;
1219 ret=dev->set_mac_address(dev,ifr.ifr_hwaddr.sa_data);
1220 break;
1221
1222 case SIOCDEVPRIVATE:
1223 if(dev->do_ioctl==NULL)
1224 return -EOPNOTSUPP;
1225 return dev->do_ioctl(dev, &ifr);
1226
1227 case SIOCGIFMAP:
1228 ifr.ifr_map.mem_start=dev->mem_start;
1229 ifr.ifr_map.mem_end=dev->mem_end;
1230 ifr.ifr_map.base_addr=dev->base_addr;
1231 ifr.ifr_map.irq=dev->irq;
1232 ifr.ifr_map.dma=dev->dma;
1233 ifr.ifr_map.port=dev->if_port;
1234 memcpy_tofs(arg,&ifr,sizeof(struct ifreq));
1235 ret=0;
1236 break;
1237
1238 case SIOCSIFMAP:
1239 if(dev->set_config==NULL)
1240 return -EOPNOTSUPP;
1241 return dev->set_config(dev,&ifr.ifr_map);
1242
1243 case SIOCGIFSLAVE:
1244 #ifdef CONFIG_SLAVE_BALANCING
1245 if(dev->slave==NULL)
1246 return -ENOENT;
1247 strncpy(ifr.ifr_name,dev->name,sizeof(ifr.ifr_name));
1248 memcpy_tofs(arg,&ifr,sizeof(struct ifreq));
1249 ret=0;
1250 #else
1251 return -ENOENT;
1252 #endif
1253 break;
1254 #ifdef CONFIG_SLAVE_BALANCING
1255 case SIOCSIFSLAVE:
1256 {
1257
1258 /*
1259 * Fun game. Get the device up and the flags right without
1260 * letting some scummy user confuse us.
1261 */
1262 unsigned long flags;
1263 struct device *slave=dev_get(ifr.ifr_slave);
1264 save_flags(flags);
1265 if(slave==NULL)
1266 {
1267 return -ENODEV;
1268 }
1269 cli();
1270 if(slave->flags&(IFF_UP|IFF_RUNNING)!=(IFF_UP|IFF_RUNNING))
1271 {
1272 restore_flags(flags);
1273 return -EINVAL;
1274 }
1275 if(dev->flags&IFF_SLAVE)
1276 {
1277 restore_flags(flags);
1278 return -EINVAL;
1279 }
1280 if(dev->slave!=NULL)
1281 {
1282 restore_flags(flags);
1283 return -EBUSY;
1284 }
1285 if(slave->flags&IFF_SLAVE)
1286 {
1287 restore_flags(flags);
1288 return -EBUSY;
1289 }
1290 dev->slave=slave;
1291 slave->flags|=IFF_SLAVE;
1292 dev->flags|=IFF_MASTER;
1293 restore_flags(flags);
1294 ret=0;
1295 }
1296 break;
1297 #endif
1298 /*
1299 * Unknown ioctl
1300 */
1301
1302 default:
1303 ret = -EINVAL;
1304 }
1305 return(ret);
1306 }
1307
1308
1309 /*
1310 * This function handles all "interface"-type I/O control requests. The actual
1311 * 'doing' part of this is dev_ifsioc above.
1312 */
1313
1314 int dev_ioctl(unsigned int cmd, void *arg)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
1315 {
1316 switch(cmd)
1317 {
1318 /*
1319 * The old old setup ioctl. Even its name and this entry will soon be
1320 * just so much ionization on a backup tape.
1321 */
1322
1323 case SIOCGIFCONF:
1324 (void) dev_ifconf((char *) arg);
1325 return 0;
1326
1327 /*
1328 * Ioctl calls that can be done by all.
1329 */
1330
1331 case SIOCGIFFLAGS:
1332 case SIOCGIFADDR:
1333 case SIOCGIFDSTADDR:
1334 case SIOCGIFBRDADDR:
1335 case SIOCGIFNETMASK:
1336 case SIOCGIFMETRIC:
1337 case SIOCGIFMTU:
1338 case SIOCGIFMEM:
1339 case SIOCGIFHWADDR:
1340 case SIOCSIFHWADDR:
1341 case OLD_SIOCGIFHWADDR:
1342 case SIOCGIFSLAVE:
1343 case SIOCGIFMAP:
1344 return dev_ifsioc(arg, cmd);
1345
1346 /*
1347 * Ioctl calls requiring the power of a superuser
1348 */
1349
1350 case SIOCSIFFLAGS:
1351 case SIOCSIFADDR:
1352 case SIOCSIFDSTADDR:
1353 case SIOCSIFBRDADDR:
1354 case SIOCSIFNETMASK:
1355 case SIOCSIFMETRIC:
1356 case SIOCSIFMTU:
1357 case SIOCSIFMEM:
1358 case SIOCSIFMAP:
1359 case SIOCSIFSLAVE:
1360 case SIOCDEVPRIVATE:
1361 if (!suser())
1362 return -EPERM;
1363 return dev_ifsioc(arg, cmd);
1364
1365 case SIOCSIFLINK:
1366 return -EINVAL;
1367
1368 /*
1369 * Unknown ioctl.
1370 */
1371
1372 default:
1373 return -EINVAL;
1374 }
1375 }
1376
1377
1378 /*
1379 * Initialize the DEV module. At boot time this walks the device list and
1380 * unhooks any devices that fail to initialise (normally hardware not
1381 * present) and leaves us with a valid list of present and active devices.
1382 *
1383 * The PCMICA code may need to change this a little, and add a pair
1384 * of register_inet_device() unregister_inet_device() calls. This will be
1385 * needed for ethernet as modules support.
1386 */
1387
1388 void dev_init(void)
/* ![[previous]](../icons/left.png)
![[next]](../icons/n_right.png)
![[first]](../icons/first.png)
![[last]](../icons/n_last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
1389 {
1390 struct device *dev, *dev2;
1391
1392 /*
1393 * Add the devices.
1394 * If the call to dev->init fails, the dev is removed
1395 * from the chain disconnecting the device until the
1396 * next reboot.
1397 */
1398
1399 dev2 = NULL;
1400 for (dev = dev_base; dev != NULL; dev=dev->next)
1401 {
1402 if (dev->init && dev->init(dev))
1403 {
1404 /*
1405 * It failed to come up. Unhook it.
1406 */
1407
1408 if (dev2 == NULL)
1409 dev_base = dev->next;
1410 else
1411 dev2->next = dev->next;
1412 }
1413 else
1414 {
1415 dev2 = dev;
1416 }
1417 }
1418 }