1 /*
2 * NET3 Protocol independent device support routines.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Derived from the non IP parts of dev.c 1.0.19
10 * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 *
14 * Additional Authors:
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dhinds@allegro.stanford.edu>
18 *
19 * Changes:
20 * Alan Cox : device private ioctl copies fields back.
21 * Alan Cox : Transmit queue code does relevant stunts to
22 * keep the queue safe.
23 * Alan Cox : Fixed double lock.
24 * Alan Cox : Fixed promisc NULL pointer trap
25 * ???????? : Support the full private ioctl range
26 * Alan Cox : Moved ioctl permission check into drivers
27 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
28 * Alan Cox : 100 backlog just doesn't cut it when
29 * you start doing multicast video 8)
30 * Alan Cox : Rewrote net_bh and list manager.
31 * Alan Cox : Fix ETH_P_ALL echoback lengths.
32 * Alan Cox : Took out transmit every packet pass
33 * Saved a few bytes in the ioctl handler
34 * Alan Cox : Network driver sets packet type before calling netif_rx. Saves
35 * a function call a packet.
36 * Alan Cox : Hashed net_bh()
37 * Richard Kooijman : Timestamp fixes.
38 * Alan Cox : Wrong field in SIOCGIFDSTADDR
39 *
40 * Cleaned up and recommented by Alan Cox 2nd April 1994. I hope to have
41 * the rest as well commented in the end.
42 */
43
44 /*
45 * A lot of these includes will be going walkies very soon
46 */
47
48 #include <asm/segment.h>
49 #include <asm/system.h>
50 #include <asm/bitops.h>
51 #include <linux/config.h>
52 #include <linux/types.h>
53 #include <linux/kernel.h>
54 #include <linux/sched.h>
55 #include <linux/string.h>
56 #include <linux/mm.h>
57 #include <linux/socket.h>
58 #include <linux/sockios.h>
59 #include <linux/in.h>
60 #include <linux/errno.h>
61 #include <linux/interrupt.h>
62 #include <linux/if_ether.h>
63 #include <linux/inet.h>
64 #include <linux/netdevice.h>
65 #include <linux/etherdevice.h>
66 #include <linux/notifier.h>
67 #include <net/ip.h>
68 #include <net/route.h>
69 #include <linux/skbuff.h>
70 #include <net/sock.h>
71 #include <net/arp.h>
72
73
74 /*
75 * The list of packet types we will receive (as opposed to discard)
76 * and the routines to invoke.
77 */
78
79 struct packet_type *ptype_base[16];
80 struct packet_type *ptype_all = NULL; /* Taps */
81
82 /*
83 * Our notifier list
84 */
85
86 struct notifier_block *netdev_chain=NULL;
87
88 /*
89 * Device drivers call our routines to queue packets here. We empty the
90 * queue in the bottom half handler.
91 */
92
93 static struct sk_buff_head backlog =
94 {
95 (struct sk_buff *)&backlog, (struct sk_buff *)&backlog
96 #ifdef CONFIG_SKB_CHECK
97 ,SK_HEAD_SKB
98 #endif
99 };
100
101 /*
102 * We don't overdo the queue or we will thrash memory badly.
103 */
104
105 static int backlog_size = 0;
106
107 /*
108 * Return the lesser of the two values.
109 */
110
111 static __inline__ unsigned long min(unsigned long a, unsigned long b)
/* ![[previous]](../icons/n_left.png)
![[next]](../icons/right.png)
![[first]](../icons/n_first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
112 {
113 return (a < b)? a : b;
114 }
115
116
117 /******************************************************************************************
118
119 Protocol management and registration routines
120
121 *******************************************************************************************/
122
123 /*
124 * For efficiency
125 */
126
127 static int dev_nit=0;
128
129 /*
130 * Add a protocol ID to the list. Now that the input handler is
131 * smarter we can dispense with all the messy stuff that used to be
132 * here.
133 */
134
135 void dev_add_pack(struct packet_type *pt)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
136 {
137 int hash;
138 if(pt->type==htons(ETH_P_ALL))
139 {
140 dev_nit++;
141 pt->next=ptype_all;
142 ptype_all=pt;
143 }
144 else
145 {
146 hash=ntohs(pt->type)&15;
147 pt->next = ptype_base[hash];
148 ptype_base[hash] = pt;
149 }
150 }
151
152
153 /*
154 * Remove a protocol ID from the list.
155 */
156
157 void dev_remove_pack(struct packet_type *pt)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
158 {
159 struct packet_type **pt1;
160 if(pt->type==htons(ETH_P_ALL))
161 {
162 dev_nit--;
163 pt1=&ptype_all;
164 }
165 else
166 pt1=&ptype_base[ntohs(pt->type)&15];
167 for(; (*pt1)!=NULL; pt1=&((*pt1)->next))
168 {
169 if(pt==(*pt1))
170 {
171 *pt1=pt->next;
172 return;
173 }
174 }
175 }
176
177 /*****************************************************************************************
178
179 Device Interface Subroutines
180
181 ******************************************************************************************/
182
183 /*
184 * Find an interface by name.
185 */
186
187 struct device *dev_get(char *name)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
188 {
189 struct device *dev;
190
191 for (dev = dev_base; dev != NULL; dev = dev->next)
192 {
193 if (strcmp(dev->name, name) == 0)
194 return(dev);
195 }
196 return(NULL);
197 }
198
199
200 /*
201 * Prepare an interface for use.
202 */
203
204 int dev_open(struct device *dev)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
205 {
206 int ret = 0;
207
208 /*
209 * Call device private open method
210 */
211 if (dev->open)
212 ret = dev->open(dev);
213
214 /*
215 * If it went open OK then set the flags
216 */
217
218 if (ret == 0)
219 {
220 dev->flags |= (IFF_UP | IFF_RUNNING);
221 /*
222 * Initialise multicasting status
223 */
224 #ifdef CONFIG_IP_MULTICAST
225 /*
226 * Join the all host group
227 */
228 ip_mc_allhost(dev);
229 #endif
230 dev_mc_upload(dev);
231 notifier_call_chain(&netdev_chain, NETDEV_UP, dev);
232 }
233 return(ret);
234 }
235
236
237 /*
238 * Completely shutdown an interface.
239 */
240
241 int dev_close(struct device *dev)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
242 {
243 /*
244 * Only close a device if it is up.
245 */
246
247 if (dev->flags != 0)
248 {
249 int ct=0;
250 dev->flags = 0;
251 /*
252 * Call the device specific close. This cannot fail.
253 */
254 if (dev->stop)
255 dev->stop(dev);
256 /*
257 * Tell people we are going down
258 */
259 notifier_call_chain(&netdev_chain, NETDEV_DOWN, dev);
260 /*
261 * Flush the multicast chain
262 */
263 dev_mc_discard(dev);
264 /*
265 * Blank the IP addresses
266 */
267 dev->pa_addr = 0;
268 dev->pa_dstaddr = 0;
269 dev->pa_brdaddr = 0;
270 dev->pa_mask = 0;
271 /*
272 * Purge any queued packets when we down the link
273 */
274 while(ct<DEV_NUMBUFFS)
275 {
276 struct sk_buff *skb;
277 while((skb=skb_dequeue(&dev->buffs[ct]))!=NULL)
278 if(skb->free)
279 kfree_skb(skb,FREE_WRITE);
280 ct++;
281 }
282 }
283 return(0);
284 }
285
286
287 /*
288 * Device change register/unregister. These are not inline or static
289 * as we export them to the world.
290 */
291
292 int register_netdevice_notifier(struct notifier_block *nb)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
293 {
294 return notifier_chain_register(&netdev_chain, nb);
295 }
296
297 int unregister_netdevice_notifier(struct notifier_block *nb)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
298 {
299 return notifier_chain_unregister(&netdev_chain,nb);
300 }
301
302
303
304 /*
305 * Send (or queue for sending) a packet.
306 *
307 * IMPORTANT: When this is called to resend frames. The caller MUST
308 * already have locked the sk_buff. Apart from that we do the
309 * rest of the magic.
310 */
311
312 void dev_queue_xmit(struct sk_buff *skb, struct device *dev, int pri)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
313 {
314 unsigned long flags;
315 int nitcount;
316 struct packet_type *ptype;
317 int where = 0; /* used to say if the packet should go */
318 /* at the front or the back of the */
319 /* queue - front is a retransmit try */
320
321 if(pri>=0 && !skb_device_locked(skb))
322 skb_device_lock(skb); /* Shove a lock on the frame */
323 #ifdef CONFIG_SKB_CHECK
324 IS_SKB(skb);
325 #endif
326 skb->dev = dev;
327
328 /*
329 * Negative priority is used to flag a frame that is being pulled from the
330 * queue front as a retransmit attempt. It therefore goes back on the queue
331 * start on a failure.
332 */
333
334 if (pri < 0)
335 {
336 pri = -pri-1;
337 where = 1;
338 }
339
340 #ifdef CONFIG_NET_DEBUG
341 if (pri >= DEV_NUMBUFFS)
342 {
343 printk("bad priority in dev_queue_xmit.\n");
344 pri = 1;
345 }
346 #endif
347
348 /*
349 * If the address has not been resolved. Call the device header rebuilder.
350 * This can cover all protocols and technically not just ARP either.
351 */
352
353 if (!skb->arp && dev->rebuild_header(skb->data, dev, skb->raddr, skb)) {
354 return;
355 }
356
357 save_flags(flags);
358 cli();
359 if (dev_nit && !where)
360 {
361 skb_queue_tail(dev->buffs + pri,skb);
362 skb_device_unlock(skb); /* Buffer is on the device queue and can be freed safely */
363 skb = skb_dequeue(dev->buffs + pri);
364 skb_device_lock(skb); /* New buffer needs locking down */
365 }
366 restore_flags(flags);
367
368 /* copy outgoing packets to any sniffer packet handlers */
369 if(!where)
370 {
371 skb->stamp=xtime;
372 for (ptype = ptype_all; ptype!=NULL; ptype = ptype->next)
373 {
374 /* Never send packets back to the socket
375 * they originated from - MvS (miquels@drinkel.ow.org)
376 */
377 if ((ptype->dev == dev || !ptype->dev) &&
378 ((struct sock *)ptype->data != skb->sk))
379 {
380 struct sk_buff *skb2;
381 if ((skb2 = skb_clone(skb, GFP_ATOMIC)) == NULL)
382 break;
383 /*
384 * The protocol knows this has (for other paths) been taken off
385 * and adds it back.
386 */
387 skb2->len-=skb->dev->hard_header_len;
388 ptype->func(skb2, skb->dev, ptype);
389 nitcount--;
390 }
391 }
392 }
393 start_bh_atomic();
394 if (dev->hard_start_xmit(skb, dev) == 0) {
395 /*
396 * Packet is now solely the responsibility of the driver
397 */
398 end_bh_atomic();
399 return;
400 }
401 end_bh_atomic();
402
403 /*
404 * Transmission failed, put skb back into a list. Once on the list it's safe and
405 * no longer device locked (it can be freed safely from the device queue)
406 */
407 cli();
408 skb_device_unlock(skb);
409 skb_queue_head(dev->buffs + pri,skb);
410 restore_flags(flags);
411 }
412
413 /*
414 * Receive a packet from a device driver and queue it for the upper
415 * (protocol) levels. It always succeeds. This is the recommended
416 * interface to use.
417 */
418
419 void netif_rx(struct sk_buff *skb)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
420 {
421 static int dropping = 0;
422
423 /*
424 * Any received buffers are un-owned and should be discarded
425 * when freed. These will be updated later as the frames get
426 * owners.
427 */
428 skb->sk = NULL;
429 skb->free = 1;
430 if(skb->stamp.tv_sec==0)
431 skb->stamp = xtime;
432
433 /*
434 * Check that we aren't overdoing things.
435 */
436
437 if (!backlog_size)
438 dropping = 0;
439 else if (backlog_size > 300)
440 dropping = 1;
441
442 if (dropping)
443 {
444 kfree_skb(skb, FREE_READ);
445 return;
446 }
447
448 /*
449 * Add it to the "backlog" queue.
450 */
451 #ifdef CONFIG_SKB_CHECK
452 IS_SKB(skb);
453 #endif
454 skb_queue_tail(&backlog,skb);
455 backlog_size++;
456
457 /*
458 * If any packet arrived, mark it for processing after the
459 * hardware interrupt returns.
460 */
461
462 #ifdef CONFIG_NET_RUNONIRQ /* Dont enable yet, needs some driver mods */
463 inet_bh();
464 #else
465 mark_bh(NET_BH);
466 #endif
467 return;
468 }
469
470
471 /*
472 * The old interface to fetch a packet from a device driver.
473 * This function is the base level entry point for all drivers that
474 * want to send a packet to the upper (protocol) levels. It takes
475 * care of de-multiplexing the packet to the various modules based
476 * on their protocol ID.
477 *
478 * Return values: 1 <- exit I can't do any more
479 * 0 <- feed me more (i.e. "done", "OK").
480 *
481 * This function is OBSOLETE and should not be used by any new
482 * device.
483 */
484
485 int dev_rint(unsigned char *buff, long len, int flags, struct device *dev)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
486 {
487 static int dropping = 0;
488 struct sk_buff *skb = NULL;
489 unsigned char *to;
490 int amount, left;
491 int len2;
492
493 if (dev == NULL || buff == NULL || len <= 0)
494 return(1);
495
496 if (flags & IN_SKBUFF)
497 {
498 skb = (struct sk_buff *) buff;
499 }
500 else
501 {
502 if (dropping)
503 {
504 if (skb_peek(&backlog) != NULL)
505 return(1);
506 printk("INET: dev_rint: no longer dropping packets.\n");
507 dropping = 0;
508 }
509
510 skb = alloc_skb(len, GFP_ATOMIC);
511 if (skb == NULL)
512 {
513 printk("dev_rint: packet dropped on %s (no memory) !\n",
514 dev->name);
515 dropping = 1;
516 return(1);
517 }
518
519 /*
520 * First we copy the packet into a buffer, and save it for later. We
521 * in effect handle the incoming data as if it were from a circular buffer
522 */
523
524 to = skb->data;
525 left = len;
526
527 len2 = len;
528 while (len2 > 0)
529 {
530 amount = min(len2, (unsigned long) dev->rmem_end -
531 (unsigned long) buff);
532 memcpy(to, buff, amount);
533 len2 -= amount;
534 left -= amount;
535 buff += amount;
536 to += amount;
537 if ((unsigned long) buff == dev->rmem_end)
538 buff = (unsigned char *) dev->rmem_start;
539 }
540 }
541
542 /*
543 * Tag the frame and kick it to the proper receive routine
544 */
545
546 skb->len = len;
547 skb->dev = dev;
548 skb->free = 1;
549
550 netif_rx(skb);
551 /*
552 * OK, all done.
553 */
554 return(0);
555 }
556
557
558 /*
559 * This routine causes all interfaces to try to send some data.
560 */
561
562 void dev_transmit(void)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
563 {
564 struct device *dev;
565
566 for (dev = dev_base; dev != NULL; dev = dev->next)
567 {
568 if (dev->flags != 0 && !dev->tbusy) {
569 /*
570 * Kick the device
571 */
572 dev_tint(dev);
573 }
574 }
575 }
576
577
578 /**********************************************************************************
579
580 Receive Queue Processor
581
582 ***********************************************************************************/
583
584 /*
585 * This is a single non-reentrant routine which takes the received packet
586 * queue and throws it at the networking layers in the hope that something
587 * useful will emerge.
588 */
589
590 volatile char in_bh = 0; /* Non-reentrant remember */
591
592 int in_net_bh() /* Used by timer.c */
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
593 {
594 return(in_bh==0?0:1);
595 }
596
597 /*
598 * When we are called the queue is ready to grab, the interrupts are
599 * on and hardware can interrupt and queue to the receive queue a we
600 * run with no problems.
601 * This is run as a bottom half after an interrupt handler that does
602 * mark_bh(NET_BH);
603 */
604
605 void net_bh(void *tmp)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
606 {
607 struct sk_buff *skb;
608 struct packet_type *ptype;
609 struct packet_type *pt_prev;
610 unsigned short type;
611
612 /*
613 * Atomically check and mark our BUSY state.
614 */
615
616 if (set_bit(1, (void*)&in_bh))
617 return;
618
619 /*
620 * Can we send anything now? We want to clear the
621 * decks for any more sends that get done as we
622 * process the input.
623 */
624
625 dev_transmit();
626
627 /*
628 * Any data left to process. This may occur because a
629 * mark_bh() is done after we empty the queue including
630 * that from the device which does a mark_bh() just after
631 */
632
633 cli();
634
635 /*
636 * While the queue is not empty
637 */
638
639 while((skb=skb_dequeue(&backlog))!=NULL)
640 {
641 /*
642 * We have a packet. Therefore the queue has shrunk
643 */
644 backlog_size--;
645
646 sti();
647
648 /*
649 * Bump the pointer to the next structure.
650 * This assumes that the basic 'skb' pointer points to
651 * the MAC header, if any (as indicated by its "length"
652 * field). Take care now!
653 */
654
655 skb->h.raw = skb->data + skb->dev->hard_header_len;
656 skb->len -= skb->dev->hard_header_len;
657
658 /*
659 * Fetch the packet protocol ID.
660 */
661
662 type = skb->protocol;
663
664 /*
665 * We got a packet ID. Now loop over the "known protocols"
666 * list. There are two lists. The ptype_all list of taps (normally empty)
667 * and the main protocol list which is hashed perfectly for normal protocols.
668 */
669 pt_prev = NULL;
670 for (ptype = ptype_all; ptype!=NULL; ptype=ptype->next)
671 {
672 if(pt_prev)
673 {
674 struct sk_buff *skb2=skb_clone(skb, GFP_ATOMIC);
675 if(skb2)
676 pt_prev->func(skb2,skb->dev, pt_prev);
677 }
678 pt_prev=ptype;
679 }
680
681 for (ptype = ptype_base[ntohs(type)&15]; ptype != NULL; ptype = ptype->next)
682 {
683 if ((ptype->type == type || ptype->type == htons(ETH_P_ALL)) && (!ptype->dev || ptype->dev==skb->dev))
684 {
685 /*
686 * We already have a match queued. Deliver
687 * to it and then remember the new match
688 */
689 if(pt_prev)
690 {
691 struct sk_buff *skb2;
692
693 skb2=skb_clone(skb, GFP_ATOMIC);
694
695 /*
696 * Kick the protocol handler. This should be fast
697 * and efficient code.
698 */
699
700 if(skb2)
701 pt_prev->func(skb2, skb->dev, pt_prev);
702 }
703 /* Remember the current last to do */
704 pt_prev=ptype;
705 }
706 } /* End of protocol list loop */
707
708 /*
709 * Is there a last item to send to ?
710 */
711
712 if(pt_prev)
713 pt_prev->func(skb, skb->dev, pt_prev);
714 /*
715 * Has an unknown packet has been received ?
716 */
717
718 else
719 kfree_skb(skb, FREE_WRITE);
720
721 /*
722 * Again, see if we can transmit anything now.
723 * [Ought to take this out judging by tests it slows
724 * us down not speeds us up]
725 */
726 #ifdef CONFIG_XMIT_EVERY
727 dev_transmit();
728 #endif
729 cli();
730 } /* End of queue loop */
731
732 /*
733 * We have emptied the queue
734 */
735
736 in_bh = 0;
737 sti();
738
739 /*
740 * One last output flush.
741 */
742
743 dev_transmit();
744 }
745
746
747 /*
748 * This routine is called when an device driver (i.e. an
749 * interface) is ready to transmit a packet.
750 */
751
752 void dev_tint(struct device *dev)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
753 {
754 int i;
755 struct sk_buff *skb;
756 unsigned long flags;
757
758 save_flags(flags);
759 /*
760 * Work the queues in priority order
761 */
762
763 for(i = 0;i < DEV_NUMBUFFS; i++)
764 {
765 /*
766 * Pull packets from the queue
767 */
768
769
770 cli();
771 while((skb=skb_dequeue(&dev->buffs[i]))!=NULL)
772 {
773 /*
774 * Stop anyone freeing the buffer while we retransmit it
775 */
776 skb_device_lock(skb);
777 restore_flags(flags);
778 /*
779 * Feed them to the output stage and if it fails
780 * indicate they re-queue at the front.
781 */
782 dev_queue_xmit(skb,dev,-i - 1);
783 /*
784 * If we can take no more then stop here.
785 */
786 if (dev->tbusy)
787 return;
788 cli();
789 }
790 }
791 restore_flags(flags);
792 }
793
794
795 /*
796 * Perform a SIOCGIFCONF call. This structure will change
797 * size shortly, and there is nothing I can do about it.
798 * Thus we will need a 'compatibility mode'.
799 */
800
801 static int dev_ifconf(char *arg)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
802 {
803 struct ifconf ifc;
804 struct ifreq ifr;
805 struct device *dev;
806 char *pos;
807 int len;
808 int err;
809
810 /*
811 * Fetch the caller's info block.
812 */
813
814 err=verify_area(VERIFY_WRITE, arg, sizeof(struct ifconf));
815 if(err)
816 return err;
817 memcpy_fromfs(&ifc, arg, sizeof(struct ifconf));
818 len = ifc.ifc_len;
819 pos = ifc.ifc_buf;
820
821 /*
822 * We now walk the device list filling each active device
823 * into the array.
824 */
825
826 err=verify_area(VERIFY_WRITE,pos,len);
827 if(err)
828 return err;
829
830 /*
831 * Loop over the interfaces, and write an info block for each.
832 */
833
834 for (dev = dev_base; dev != NULL; dev = dev->next)
835 {
836 if(!(dev->flags & IFF_UP)) /* Downed devices don't count */
837 continue;
838 memset(&ifr, 0, sizeof(struct ifreq));
839 strcpy(ifr.ifr_name, dev->name);
840 (*(struct sockaddr_in *) &ifr.ifr_addr).sin_family = dev->family;
841 (*(struct sockaddr_in *) &ifr.ifr_addr).sin_addr.s_addr = dev->pa_addr;
842
843 /*
844 * Write this block to the caller's space.
845 */
846
847 memcpy_tofs(pos, &ifr, sizeof(struct ifreq));
848 pos += sizeof(struct ifreq);
849 len -= sizeof(struct ifreq);
850
851 /*
852 * Have we run out of space here ?
853 */
854
855 if (len < sizeof(struct ifreq))
856 break;
857 }
858
859 /*
860 * All done. Write the updated control block back to the caller.
861 */
862
863 ifc.ifc_len = (pos - ifc.ifc_buf);
864 ifc.ifc_req = (struct ifreq *) ifc.ifc_buf;
865 memcpy_tofs(arg, &ifc, sizeof(struct ifconf));
866
867 /*
868 * Report how much was filled in
869 */
870
871 return(pos - arg);
872 }
873
874
875 /*
876 * This is invoked by the /proc filesystem handler to display a device
877 * in detail.
878 */
879
880 static int sprintf_stats(char *buffer, struct device *dev)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
881 {
882 struct enet_statistics *stats = (dev->get_stats ? dev->get_stats(dev): NULL);
883 int size;
884
885 if (stats)
886 size = sprintf(buffer, "%6s:%7d %4d %4d %4d %4d %8d %4d %4d %4d %5d %4d\n",
887 dev->name,
888 stats->rx_packets, stats->rx_errors,
889 stats->rx_dropped + stats->rx_missed_errors,
890 stats->rx_fifo_errors,
891 stats->rx_length_errors + stats->rx_over_errors
892 + stats->rx_crc_errors + stats->rx_frame_errors,
893 stats->tx_packets, stats->tx_errors, stats->tx_dropped,
894 stats->tx_fifo_errors, stats->collisions,
895 stats->tx_carrier_errors + stats->tx_aborted_errors
896 + stats->tx_window_errors + stats->tx_heartbeat_errors);
897 else
898 size = sprintf(buffer, "%6s: No statistics available.\n", dev->name);
899
900 return size;
901 }
902
903 /*
904 * Called from the PROCfs module. This now uses the new arbitrary sized /proc/net interface
905 * to create /proc/net/dev
906 */
907
908 int dev_get_info(char *buffer, char **start, off_t offset, int length)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
909 {
910 int len=0;
911 off_t begin=0;
912 off_t pos=0;
913 int size;
914
915 struct device *dev;
916
917
918 size = sprintf(buffer, "Inter-| Receive | Transmit\n"
919 " face |packets errs drop fifo frame|packets errs drop fifo colls carrier\n");
920
921 pos+=size;
922 len+=size;
923
924
925 for (dev = dev_base; dev != NULL; dev = dev->next)
926 {
927 size = sprintf_stats(buffer+len, dev);
928 len+=size;
929 pos=begin+len;
930
931 if(pos<offset)
932 {
933 len=0;
934 begin=pos;
935 }
936 if(pos>offset+length)
937 break;
938 }
939
940 *start=buffer+(offset-begin); /* Start of wanted data */
941 len-=(offset-begin); /* Start slop */
942 if(len>length)
943 len=length; /* Ending slop */
944 return len;
945 }
946
947
948 /*
949 * This checks bitmasks for the ioctl calls for devices.
950 */
951
952 static inline int bad_mask(unsigned long mask, unsigned long addr)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
953 {
954 if (addr & (mask = ~mask))
955 return 1;
956 mask = ntohl(mask);
957 if (mask & (mask+1))
958 return 1;
959 return 0;
960 }
961
962 /*
963 * Perform the SIOCxIFxxx calls.
964 *
965 * The socket layer has seen an ioctl the address family thinks is
966 * for the device. At this point we get invoked to make a decision
967 */
968
969 static int dev_ifsioc(void *arg, unsigned int getset)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
970 {
971 struct ifreq ifr;
972 struct device *dev;
973 int ret;
974
975 /*
976 * Fetch the caller's info block into kernel space
977 */
978
979 int err=verify_area(VERIFY_WRITE, arg, sizeof(struct ifreq));
980 if(err)
981 return err;
982
983 memcpy_fromfs(&ifr, arg, sizeof(struct ifreq));
984
985 /*
986 * See which interface the caller is talking about.
987 */
988
989 if ((dev = dev_get(ifr.ifr_name)) == NULL)
990 return(-ENODEV);
991
992 switch(getset)
993 {
994 case SIOCGIFFLAGS: /* Get interface flags */
995 ifr.ifr_flags = dev->flags;
996 goto rarok;
997
998 case SIOCSIFFLAGS: /* Set interface flags */
999 {
1000 int old_flags = dev->flags;
1001 dev->flags = ifr.ifr_flags & (
1002 IFF_UP | IFF_BROADCAST | IFF_DEBUG | IFF_LOOPBACK |
1003 IFF_POINTOPOINT | IFF_NOTRAILERS | IFF_RUNNING |
1004 IFF_NOARP | IFF_PROMISC | IFF_ALLMULTI | IFF_SLAVE | IFF_MASTER
1005 | IFF_MULTICAST);
1006 /*
1007 * Load in the correct multicast list now the flags have changed.
1008 */
1009
1010 dev_mc_upload(dev);
1011
1012 /*
1013 * Have we downed the interface
1014 */
1015
1016 if ((old_flags & IFF_UP) && ((dev->flags & IFF_UP) == 0))
1017 {
1018 ret = dev_close(dev);
1019 }
1020 else
1021 {
1022 /*
1023 * Have we upped the interface
1024 */
1025
1026 ret = (! (old_flags & IFF_UP) && (dev->flags & IFF_UP))
1027 ? dev_open(dev) : 0;
1028 /*
1029 * Check the flags.
1030 */
1031 if(ret<0)
1032 dev->flags&=~IFF_UP; /* Didn't open so down the if */
1033 }
1034 }
1035 break;
1036
1037 case SIOCGIFADDR: /* Get interface address (and family) */
1038 (*(struct sockaddr_in *)
1039 &ifr.ifr_addr).sin_addr.s_addr = dev->pa_addr;
1040 (*(struct sockaddr_in *)
1041 &ifr.ifr_addr).sin_family = dev->family;
1042 (*(struct sockaddr_in *)
1043 &ifr.ifr_addr).sin_port = 0;
1044 goto rarok;
1045
1046 case SIOCSIFADDR: /* Set interface address (and family) */
1047 dev->pa_addr = (*(struct sockaddr_in *)
1048 &ifr.ifr_addr).sin_addr.s_addr;
1049 dev->family = ifr.ifr_addr.sa_family;
1050
1051 #ifdef CONFIG_INET
1052 /* This is naughty. When net-032e comes out It wants moving into the net032
1053 code not the kernel. Till then it can sit here (SIGH) */
1054 dev->pa_mask = ip_get_mask(dev->pa_addr);
1055 #endif
1056 dev->pa_brdaddr = dev->pa_addr | ~dev->pa_mask;
1057 ret = 0;
1058 break;
1059
1060 case SIOCGIFBRDADDR: /* Get the broadcast address */
1061 (*(struct sockaddr_in *)
1062 &ifr.ifr_broadaddr).sin_addr.s_addr = dev->pa_brdaddr;
1063 (*(struct sockaddr_in *)
1064 &ifr.ifr_broadaddr).sin_family = dev->family;
1065 (*(struct sockaddr_in *)
1066 &ifr.ifr_broadaddr).sin_port = 0;
1067 goto rarok;
1068 memcpy_tofs(arg, &ifr, sizeof(struct ifreq));
1069 ret = 0;
1070 break;
1071
1072 case SIOCSIFBRDADDR: /* Set the broadcast address */
1073 dev->pa_brdaddr = (*(struct sockaddr_in *)
1074 &ifr.ifr_broadaddr).sin_addr.s_addr;
1075 ret = 0;
1076 break;
1077
1078 case SIOCGIFDSTADDR: /* Get the destination address (for point-to-point links) */
1079 (*(struct sockaddr_in *)
1080 &ifr.ifr_dstaddr).sin_addr.s_addr = dev->pa_dstaddr;
1081 (*(struct sockaddr_in *)
1082 &ifr.ifr_dstaddr).sin_family = dev->family;
1083 (*(struct sockaddr_in *)
1084 &ifr.ifr_dstaddr).sin_port = 0;
1085 memcpy_tofs(arg, &ifr, sizeof(struct ifreq));
1086 ret = 0;
1087 break;
1088
1089 case SIOCSIFDSTADDR: /* Set the destination address (for point-to-point links) */
1090 dev->pa_dstaddr = (*(struct sockaddr_in *)
1091 &ifr.ifr_dstaddr).sin_addr.s_addr;
1092 ret = 0;
1093 break;
1094
1095 case SIOCGIFNETMASK: /* Get the netmask for the interface */
1096 (*(struct sockaddr_in *)
1097 &ifr.ifr_netmask).sin_addr.s_addr = dev->pa_mask;
1098 (*(struct sockaddr_in *)
1099 &ifr.ifr_netmask).sin_family = dev->family;
1100 (*(struct sockaddr_in *)
1101 &ifr.ifr_netmask).sin_port = 0;
1102 goto rarok;
1103
1104 case SIOCSIFNETMASK: /* Set the netmask for the interface */
1105 {
1106 unsigned long mask = (*(struct sockaddr_in *)
1107 &ifr.ifr_netmask).sin_addr.s_addr;
1108 ret = -EINVAL;
1109 /*
1110 * The mask we set must be legal.
1111 */
1112 if (bad_mask(mask,0))
1113 break;
1114 dev->pa_mask = mask;
1115 ret = 0;
1116 }
1117 break;
1118
1119 case SIOCGIFMETRIC: /* Get the metric on the interface (currently unused) */
1120
1121 ifr.ifr_metric = dev->metric;
1122 goto rarok;
1123
1124 case SIOCSIFMETRIC: /* Set the metric on the interface (currently unused) */
1125 dev->metric = ifr.ifr_metric;
1126 ret=0;
1127 break;
1128
1129 case SIOCGIFMTU: /* Get the MTU of a device */
1130 ifr.ifr_mtu = dev->mtu;
1131 goto rarok;
1132
1133 case SIOCSIFMTU: /* Set the MTU of a device */
1134
1135 /*
1136 * MTU must be positive.
1137 */
1138
1139 if(ifr.ifr_mtu<68)
1140 return -EINVAL;
1141 dev->mtu = ifr.ifr_mtu;
1142 ret = 0;
1143 break;
1144
1145 case SIOCGIFMEM: /* Get the per device memory space. We can add this but currently
1146 do not support it */
1147 ret = -EINVAL;
1148 break;
1149
1150 case SIOCSIFMEM: /* Set the per device memory buffer space. Not applicable in our case */
1151 ret = -EINVAL;
1152 break;
1153
1154 case OLD_SIOCGIFHWADDR: /* Get the hardware address. This will change and SIFHWADDR will be added */
1155 memcpy(ifr.old_ifr_hwaddr,dev->dev_addr, MAX_ADDR_LEN);
1156 goto rarok;
1157
1158 case SIOCGIFHWADDR:
1159 memcpy(ifr.ifr_hwaddr.sa_data,dev->dev_addr, MAX_ADDR_LEN);
1160 ifr.ifr_hwaddr.sa_family=dev->type;
1161 goto rarok;
1162
1163 case SIOCSIFHWADDR:
1164 if(dev->set_mac_address==NULL)
1165 return -EOPNOTSUPP;
1166 if(ifr.ifr_hwaddr.sa_family!=dev->type)
1167 return -EINVAL;
1168 ret=dev->set_mac_address(dev,ifr.ifr_hwaddr.sa_data);
1169 break;
1170
1171 case SIOCGIFMAP:
1172 ifr.ifr_map.mem_start=dev->mem_start;
1173 ifr.ifr_map.mem_end=dev->mem_end;
1174 ifr.ifr_map.base_addr=dev->base_addr;
1175 ifr.ifr_map.irq=dev->irq;
1176 ifr.ifr_map.dma=dev->dma;
1177 ifr.ifr_map.port=dev->if_port;
1178 memcpy_tofs(arg,&ifr,sizeof(struct ifreq));
1179 ret=0;
1180 break;
1181
1182 case SIOCSIFMAP:
1183 if(dev->set_config==NULL)
1184 return -EOPNOTSUPP;
1185 return dev->set_config(dev,&ifr.ifr_map);
1186
1187 case SIOCADDMULTI:
1188 if(dev->set_multicast_list==NULL)
1189 return -EINVAL;
1190 if(ifr.ifr_hwaddr.sa_family!=AF_UNSPEC)
1191 return -EINVAL;
1192 dev_mc_add(dev,ifr.ifr_hwaddr.sa_data, dev->addr_len, 1);
1193 return 0;
1194
1195 case SIOCDELMULTI:
1196 if(dev->set_multicast_list==NULL)
1197 return -EINVAL;
1198 if(ifr.ifr_hwaddr.sa_family!=AF_UNSPEC)
1199 return -EINVAL;
1200 dev_mc_delete(dev,ifr.ifr_hwaddr.sa_data,dev->addr_len, 1);
1201 return 0;
1202 /*
1203 * Unknown or private ioctl
1204 */
1205
1206 default:
1207 if((getset >= SIOCDEVPRIVATE) &&
1208 (getset <= (SIOCDEVPRIVATE + 15))) {
1209 if(dev->do_ioctl==NULL)
1210 return -EOPNOTSUPP;
1211 ret=dev->do_ioctl(dev, &ifr, getset);
1212 memcpy_tofs(arg,&ifr,sizeof(struct ifreq));
1213 break;
1214 }
1215
1216 ret = -EINVAL;
1217 }
1218 return(ret);
1219 /*
1220 * The load of calls that return an ifreq and ok (saves memory).
1221 */
1222 rarok:
1223 memcpy_tofs(arg, &ifr, sizeof(struct ifreq));
1224 return 0;
1225 }
1226
1227
1228 /*
1229 * This function handles all "interface"-type I/O control requests. The actual
1230 * 'doing' part of this is dev_ifsioc above.
1231 */
1232
1233 int dev_ioctl(unsigned int cmd, void *arg)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
1234 {
1235 switch(cmd)
1236 {
1237 case SIOCGIFCONF:
1238 (void) dev_ifconf((char *) arg);
1239 return 0;
1240
1241 /*
1242 * Ioctl calls that can be done by all.
1243 */
1244
1245 case SIOCGIFFLAGS:
1246 case SIOCGIFADDR:
1247 case SIOCGIFDSTADDR:
1248 case SIOCGIFBRDADDR:
1249 case SIOCGIFNETMASK:
1250 case SIOCGIFMETRIC:
1251 case SIOCGIFMTU:
1252 case SIOCGIFMEM:
1253 case SIOCGIFHWADDR:
1254 case SIOCSIFHWADDR:
1255 case OLD_SIOCGIFHWADDR:
1256 case SIOCGIFSLAVE:
1257 case SIOCGIFMAP:
1258 return dev_ifsioc(arg, cmd);
1259
1260 /*
1261 * Ioctl calls requiring the power of a superuser
1262 */
1263
1264 case SIOCSIFFLAGS:
1265 case SIOCSIFADDR:
1266 case SIOCSIFDSTADDR:
1267 case SIOCSIFBRDADDR:
1268 case SIOCSIFNETMASK:
1269 case SIOCSIFMETRIC:
1270 case SIOCSIFMTU:
1271 case SIOCSIFMEM:
1272 case SIOCSIFMAP:
1273 case SIOCSIFSLAVE:
1274 case SIOCADDMULTI:
1275 case SIOCDELMULTI:
1276 if (!suser())
1277 return -EPERM;
1278 return dev_ifsioc(arg, cmd);
1279
1280 case SIOCSIFLINK:
1281 return -EINVAL;
1282
1283 /*
1284 * Unknown or private ioctl.
1285 */
1286
1287 default:
1288 if((cmd >= SIOCDEVPRIVATE) &&
1289 (cmd <= (SIOCDEVPRIVATE + 15))) {
1290 return dev_ifsioc(arg, cmd);
1291 }
1292 return -EINVAL;
1293 }
1294 }
1295
1296
1297 /*
1298 * Initialize the DEV module. At boot time this walks the device list and
1299 * unhooks any devices that fail to initialise (normally hardware not
1300 * present) and leaves us with a valid list of present and active devices.
1301 *
1302 */
1303
1304 void dev_init(void)
/* ![[previous]](../icons/left.png)
![[next]](../icons/n_right.png)
![[first]](../icons/first.png)
![[last]](../icons/n_last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
1305 {
1306 struct device *dev, *dev2;
1307
1308 /*
1309 * Add the devices.
1310 * If the call to dev->init fails, the dev is removed
1311 * from the chain disconnecting the device until the
1312 * next reboot.
1313 */
1314
1315 dev2 = NULL;
1316 for (dev = dev_base; dev != NULL; dev=dev->next)
1317 {
1318 if (dev->init && dev->init(dev))
1319 {
1320 /*
1321 * It failed to come up. Unhook it.
1322 */
1323
1324 if (dev2 == NULL)
1325 dev_base = dev->next;
1326 else
1327 dev2->next = dev->next;
1328 }
1329 else
1330 {
1331 dev2 = dev;
1332 }
1333 }
1334 }
1335