1 /*
2 * NET3 Protocol independent device support routines.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Derived from the non IP parts of dev.c 1.0.19
10 * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 *
14 * Additional Authors:
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dhinds@allegro.stanford.edu>
18 *
19 * Changes:
20 * Alan Cox : device private ioctl copies fields back.
21 * Alan Cox : Transmit queue code does relevant stunts to
22 * keep the queue safe.
23 * Alan Cox : Fixed double lock.
24 * Alan Cox : Fixed promisc NULL pointer trap
25 * ???????? : Support the full private ioctl range
26 * Alan Cox : Moved ioctl permission check into drivers
27 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
28 * Alan Cox : 100 backlog just doesn't cut it when
29 * you start doing multicast video 8)
30 * Alan Cox : Rewrote net_bh and list manager.
31 * Alan Cox : Fix ETH_P_ALL echoback lengths.
32 * Alan Cox : Took out transmit every packet pass
33 * Saved a few bytes in the ioctl handler
34 * Alan Cox : Network driver sets packet type before calling netif_rx. Saves
35 * a function call a packet.
36 * Alan Cox : Hashed net_bh()
37 * Richard Kooijman: Timestamp fixes.
38 * Alan Cox : Wrong field in SIOCGIFDSTADDR
39 * Alan Cox : Device lock protection.
40 * Alan Cox : Fixed nasty side effect of device close changes.
41 * Rudi Cilibrasi : Pass the right thing to set_mac_address()
42 *
43 */
44
45 #include <asm/segment.h>
46 #include <asm/system.h>
47 #include <asm/bitops.h>
48 #include <linux/config.h>
49 #include <linux/types.h>
50 #include <linux/kernel.h>
51 #include <linux/sched.h>
52 #include <linux/string.h>
53 #include <linux/mm.h>
54 #include <linux/socket.h>
55 #include <linux/sockios.h>
56 #include <linux/in.h>
57 #include <linux/errno.h>
58 #include <linux/interrupt.h>
59 #include <linux/if_ether.h>
60 #include <linux/inet.h>
61 #include <linux/netdevice.h>
62 #include <linux/etherdevice.h>
63 #include <linux/notifier.h>
64 #include <net/ip.h>
65 #include <net/route.h>
66 #include <linux/skbuff.h>
67 #include <net/sock.h>
68 #include <net/arp.h>
69 #include <linux/proc_fs.h>
70 #include <linux/stat.h>
71
72 /*
73 * The list of packet types we will receive (as opposed to discard)
74 * and the routines to invoke.
75 */
76
77 struct packet_type *ptype_base[16];
78 struct packet_type *ptype_all = NULL; /* Taps */
79
80 /*
81 * Device list lock
82 */
83
84 int dev_lockct=0;
85
86 /*
87 * Our notifier list
88 */
89
90 struct notifier_block *netdev_chain=NULL;
91
92 /*
93 * Device drivers call our routines to queue packets here. We empty the
94 * queue in the bottom half handler.
95 */
96
97 static struct sk_buff_head backlog =
98 {
99 (struct sk_buff *)&backlog, (struct sk_buff *)&backlog
100 #if CONFIG_SKB_CHECK
101 ,SK_HEAD_SKB
102 #endif
103 };
104
105 /*
106 * We don't overdo the queue or we will thrash memory badly.
107 */
108
109 static int backlog_size = 0;
110
111 /*
112 * Return the lesser of the two values.
113 */
114
115 static __inline__ unsigned long min(unsigned long a, unsigned long b)
/* ![[previous]](../icons/n_left.png)
![[next]](../icons/right.png)
![[first]](../icons/n_first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
116 {
117 return (a < b)? a : b;
118 }
119
120
121 /******************************************************************************************
122
123 Protocol management and registration routines
124
125 *******************************************************************************************/
126
127 /*
128 * For efficiency
129 */
130
131 static int dev_nit=0;
132
133 /*
134 * Add a protocol ID to the list. Now that the input handler is
135 * smarter we can dispense with all the messy stuff that used to be
136 * here.
137 */
138
139 void dev_add_pack(struct packet_type *pt)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
140 {
141 int hash;
142 if(pt->type==htons(ETH_P_ALL))
143 {
144 dev_nit++;
145 pt->next=ptype_all;
146 ptype_all=pt;
147 }
148 else
149 {
150 hash=ntohs(pt->type)&15;
151 pt->next = ptype_base[hash];
152 ptype_base[hash] = pt;
153 }
154 }
155
156
157 /*
158 * Remove a protocol ID from the list.
159 */
160
161 void dev_remove_pack(struct packet_type *pt)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
162 {
163 struct packet_type **pt1;
164 if(pt->type==htons(ETH_P_ALL))
165 {
166 dev_nit--;
167 pt1=&ptype_all;
168 }
169 else
170 pt1=&ptype_base[ntohs(pt->type)&15];
171 for(; (*pt1)!=NULL; pt1=&((*pt1)->next))
172 {
173 if(pt==(*pt1))
174 {
175 *pt1=pt->next;
176 return;
177 }
178 }
179 printk("dev_remove_pack: %p not found.\n", pt);
180 }
181
182 /*****************************************************************************************
183
184 Device Interface Subroutines
185
186 ******************************************************************************************/
187
188 /*
189 * Find an interface by name.
190 */
191
192 struct device *dev_get(const char *name)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
193 {
194 struct device *dev;
195
196 for (dev = dev_base; dev != NULL; dev = dev->next)
197 {
198 if (strcmp(dev->name, name) == 0)
199 return(dev);
200 }
201 return(NULL);
202 }
203
204
205 /*
206 * Prepare an interface for use.
207 */
208
209 int dev_open(struct device *dev)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
210 {
211 int ret = 0;
212
213 /*
214 * Call device private open method
215 */
216 if (dev->open)
217 ret = dev->open(dev);
218
219 /*
220 * If it went open OK then set the flags
221 */
222
223 if (ret == 0)
224 {
225 dev->flags |= (IFF_UP | IFF_RUNNING);
226 /*
227 * Initialise multicasting status
228 */
229 dev_mc_upload(dev);
230 notifier_call_chain(&netdev_chain, NETDEV_UP, dev);
231 }
232 return(ret);
233 }
234
235
236 /*
237 * Completely shutdown an interface.
238 */
239
240 int dev_close(struct device *dev)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
241 {
242 int ct=0;
243
244 /*
245 * Call the device specific close. This cannot fail.
246 * Only if device is UP
247 */
248
249 if ((dev->flags & IFF_UP) && dev->stop)
250 dev->stop(dev);
251
252 /*
253 * Device is now down.
254 */
255
256 dev->flags&=~(IFF_UP|IFF_RUNNING);
257
258 /*
259 * Tell people we are going down
260 */
261 notifier_call_chain(&netdev_chain, NETDEV_DOWN, dev);
262 /*
263 * Flush the multicast chain
264 */
265 dev_mc_discard(dev);
266 /*
267 * Blank the IP addresses
268 */
269 dev->pa_addr = 0;
270 dev->pa_dstaddr = 0;
271 dev->pa_brdaddr = 0;
272 dev->pa_mask = 0;
273 /*
274 * Purge any queued packets when we down the link
275 */
276 while(ct<DEV_NUMBUFFS)
277 {
278 struct sk_buff *skb;
279 while((skb=skb_dequeue(&dev->buffs[ct]))!=NULL)
280 if(skb->free)
281 kfree_skb(skb,FREE_WRITE);
282 ct++;
283 }
284 return(0);
285 }
286
287
288 /*
289 * Device change register/unregister. These are not inline or static
290 * as we export them to the world.
291 */
292
293 int register_netdevice_notifier(struct notifier_block *nb)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
294 {
295 return notifier_chain_register(&netdev_chain, nb);
296 }
297
298 int unregister_netdevice_notifier(struct notifier_block *nb)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
299 {
300 return notifier_chain_unregister(&netdev_chain,nb);
301 }
302
303
304
305 /*
306 * Send (or queue for sending) a packet.
307 *
308 * IMPORTANT: When this is called to resend frames. The caller MUST
309 * already have locked the sk_buff. Apart from that we do the
310 * rest of the magic.
311 */
312
313 void dev_queue_xmit(struct sk_buff *skb, struct device *dev, int pri)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
314 {
315 unsigned long flags;
316 struct packet_type *ptype;
317 int where = 0; /* used to say if the packet should go */
318 /* at the front or the back of the */
319 /* queue - front is a retransmit try */
320
321 if(pri>=0 && !skb_device_locked(skb))
322 skb_device_lock(skb); /* Shove a lock on the frame */
323 #if CONFIG_SKB_CHECK
324 IS_SKB(skb);
325 #endif
326 skb->dev = dev;
327
328 /*
329 * Negative priority is used to flag a frame that is being pulled from the
330 * queue front as a retransmit attempt. It therefore goes back on the queue
331 * start on a failure.
332 */
333
334 if (pri < 0)
335 {
336 pri = -pri-1;
337 where = 1;
338 }
339
340 #ifdef CONFIG_NET_DEBUG
341 if (pri >= DEV_NUMBUFFS)
342 {
343 printk("bad priority in dev_queue_xmit.\n");
344 pri = 1;
345 }
346 #endif
347
348 /*
349 * If the address has not been resolved. Call the device header rebuilder.
350 * This can cover all protocols and technically not just ARP either.
351 */
352
353 if (!skb->arp && dev->rebuild_header(skb->data, dev, skb->raddr, skb)) {
354 return;
355 }
356
357 save_flags(flags);
358 cli();
359 if (!where) /* Always keep order. It helps other hosts
360 far more than it costs us */
361 {
362 skb_queue_tail(dev->buffs + pri,skb);
363 skb_device_unlock(skb); /* Buffer is on the device queue and can be freed safely */
364 skb = skb_dequeue(dev->buffs + pri);
365 skb_device_lock(skb); /* New buffer needs locking down */
366 }
367 restore_flags(flags);
368
369 /* copy outgoing packets to any sniffer packet handlers */
370 if(!where && dev_nit)
371 {
372 skb->stamp=xtime;
373 for (ptype = ptype_all; ptype!=NULL; ptype = ptype->next)
374 {
375 /* Never send packets back to the socket
376 * they originated from - MvS (miquels@drinkel.ow.org)
377 */
378 if ((ptype->dev == dev || !ptype->dev) &&
379 ((struct sock *)ptype->data != skb->sk))
380 {
381 struct sk_buff *skb2;
382 if ((skb2 = skb_clone(skb, GFP_ATOMIC)) == NULL)
383 break;
384 skb2->h.raw = skb2->data + dev->hard_header_len;
385 skb2->mac.raw = skb2->data;
386 ptype->func(skb2, skb->dev, ptype);
387 }
388 }
389 }
390 start_bh_atomic();
391 if (dev->hard_start_xmit(skb, dev) == 0) {
392 /*
393 * Packet is now solely the responsibility of the driver
394 */
395 end_bh_atomic();
396 return;
397 }
398 end_bh_atomic();
399
400 /*
401 * Transmission failed, put skb back into a list. Once on the list it's safe and
402 * no longer device locked (it can be freed safely from the device queue)
403 */
404 cli();
405 skb_device_unlock(skb);
406 skb_queue_head(dev->buffs + pri,skb);
407 restore_flags(flags);
408 }
409
410 /*
411 * Receive a packet from a device driver and queue it for the upper
412 * (protocol) levels. It always succeeds. This is the recommended
413 * interface to use.
414 */
415
416 void netif_rx(struct sk_buff *skb)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
417 {
418 static int dropping = 0;
419
420 /*
421 * Any received buffers are un-owned and should be discarded
422 * when freed. These will be updated later as the frames get
423 * owners.
424 */
425 skb->sk = NULL;
426 skb->free = 1;
427 if(skb->stamp.tv_sec==0)
428 skb->stamp = xtime;
429
430 /*
431 * Check that we aren't overdoing things.
432 */
433
434 if (!backlog_size)
435 dropping = 0;
436 else if (backlog_size > 300)
437 dropping = 1;
438
439 if (dropping)
440 {
441 kfree_skb(skb, FREE_READ);
442 return;
443 }
444
445 /*
446 * Add it to the "backlog" queue.
447 */
448 #if CONFIG_SKB_CHECK
449 IS_SKB(skb);
450 #endif
451 skb_queue_tail(&backlog,skb);
452 backlog_size++;
453
454 /*
455 * If any packet arrived, mark it for processing after the
456 * hardware interrupt returns.
457 */
458
459 #ifdef CONFIG_NET_RUNONIRQ /* Dont enable yet, needs some driver mods */
460 net_bh();
461 #else
462 mark_bh(NET_BH);
463 #endif
464 return;
465 }
466
467
468 /*
469 * The old interface to fetch a packet from a device driver.
470 * This function is the base level entry point for all drivers that
471 * want to send a packet to the upper (protocol) levels. It takes
472 * care of de-multiplexing the packet to the various modules based
473 * on their protocol ID.
474 *
475 * Return values: 1 <- exit I can't do any more
476 * 0 <- feed me more (i.e. "done", "OK").
477 *
478 * This function is OBSOLETE and should not be used by any new
479 * device.
480 */
481
482 int dev_rint(unsigned char *buff, long len, int flags, struct device *dev)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
483 {
484 static int dropping = 0;
485 struct sk_buff *skb = NULL;
486 unsigned char *to;
487 int amount, left;
488 int len2;
489
490 if (dev == NULL || buff == NULL || len <= 0)
491 return(1);
492
493 if (flags & IN_SKBUFF)
494 {
495 skb = (struct sk_buff *) buff;
496 }
497 else
498 {
499 if (dropping)
500 {
501 if (skb_peek(&backlog) != NULL)
502 return(1);
503 printk("INET: dev_rint: no longer dropping packets.\n");
504 dropping = 0;
505 }
506
507 skb = alloc_skb(len, GFP_ATOMIC);
508 if (skb == NULL)
509 {
510 printk("dev_rint: packet dropped on %s (no memory) !\n",
511 dev->name);
512 dropping = 1;
513 return(1);
514 }
515
516 /*
517 * First we copy the packet into a buffer, and save it for later. We
518 * in effect handle the incoming data as if it were from a circular buffer
519 */
520
521 to = skb_put(skb,len);
522 left = len;
523
524 len2 = len;
525 while (len2 > 0)
526 {
527 amount = min(len2, (unsigned long) dev->rmem_end -
528 (unsigned long) buff);
529 memcpy(to, buff, amount);
530 len2 -= amount;
531 left -= amount;
532 buff += amount;
533 to += amount;
534 if ((unsigned long) buff == dev->rmem_end)
535 buff = (unsigned char *) dev->rmem_start;
536 }
537 }
538
539 /*
540 * Tag the frame and kick it to the proper receive routine
541 */
542
543 skb->dev = dev;
544 skb->free = 1;
545
546 netif_rx(skb);
547 /*
548 * OK, all done.
549 */
550 return(0);
551 }
552
553
554 /*
555 * This routine causes all interfaces to try to send some data.
556 */
557
558 void dev_transmit(void)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
559 {
560 struct device *dev;
561
562 for (dev = dev_base; dev != NULL; dev = dev->next)
563 {
564 if (dev->flags != 0 && !dev->tbusy) {
565 /*
566 * Kick the device
567 */
568 dev_tint(dev);
569 }
570 }
571 }
572
573
574 /**********************************************************************************
575
576 Receive Queue Processor
577
578 ***********************************************************************************/
579
580 /*
581 * This is a single non-reentrant routine which takes the received packet
582 * queue and throws it at the networking layers in the hope that something
583 * useful will emerge.
584 */
585
586 volatile unsigned long in_bh = 0; /* Non-reentrant remember */
587
588 int in_net_bh() /* Used by timer.c */
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
589 {
590 return(in_bh==0?0:1);
591 }
592
593 /*
594 * When we are called the queue is ready to grab, the interrupts are
595 * on and hardware can interrupt and queue to the receive queue a we
596 * run with no problems.
597 * This is run as a bottom half after an interrupt handler that does
598 * mark_bh(NET_BH);
599 */
600
601 void net_bh(void *tmp)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
602 {
603 struct sk_buff *skb;
604 struct packet_type *ptype;
605 struct packet_type *pt_prev;
606 unsigned short type;
607
608 /*
609 * Atomically check and mark our BUSY state.
610 */
611
612 if (set_bit(1, (void*)&in_bh))
613 return;
614
615 /*
616 * Can we send anything now? We want to clear the
617 * decks for any more sends that get done as we
618 * process the input. This also minimises the
619 * latency on a transmit interrupt bh.
620 */
621
622 dev_transmit();
623
624 /*
625 * Any data left to process. This may occur because a
626 * mark_bh() is done after we empty the queue including
627 * that from the device which does a mark_bh() just after
628 */
629
630 cli();
631
632 /*
633 * While the queue is not empty
634 */
635
636 while((skb=skb_dequeue(&backlog))!=NULL)
637 {
638 /*
639 * We have a packet. Therefore the queue has shrunk
640 */
641 backlog_size--;
642
643 sti();
644
645 /*
646 * Bump the pointer to the next structure.
647 *
648 * On entry to the protocol layer. skb->data and
649 * skb->h.raw point to the MAC and encapsulated data
650 */
651
652 skb->h.raw = skb->data;
653
654 /*
655 * Fetch the packet protocol ID.
656 */
657
658 type = skb->protocol;
659
660 /*
661 * We got a packet ID. Now loop over the "known protocols"
662 * list. There are two lists. The ptype_all list of taps (normally empty)
663 * and the main protocol list which is hashed perfectly for normal protocols.
664 */
665 pt_prev = NULL;
666 for (ptype = ptype_all; ptype!=NULL; ptype=ptype->next)
667 {
668 if(pt_prev)
669 {
670 struct sk_buff *skb2=skb_clone(skb, GFP_ATOMIC);
671 if(skb2)
672 pt_prev->func(skb2,skb->dev, pt_prev);
673 }
674 pt_prev=ptype;
675 }
676
677 for (ptype = ptype_base[ntohs(type)&15]; ptype != NULL; ptype = ptype->next)
678 {
679 if (ptype->type == type && (!ptype->dev || ptype->dev==skb->dev))
680 {
681 /*
682 * We already have a match queued. Deliver
683 * to it and then remember the new match
684 */
685 if(pt_prev)
686 {
687 struct sk_buff *skb2;
688
689 skb2=skb_clone(skb, GFP_ATOMIC);
690
691 /*
692 * Kick the protocol handler. This should be fast
693 * and efficient code.
694 */
695
696 if(skb2)
697 pt_prev->func(skb2, skb->dev, pt_prev);
698 }
699 /* Remember the current last to do */
700 pt_prev=ptype;
701 }
702 } /* End of protocol list loop */
703
704 /*
705 * Is there a last item to send to ?
706 */
707
708 if(pt_prev)
709 pt_prev->func(skb, skb->dev, pt_prev);
710 /*
711 * Has an unknown packet has been received ?
712 */
713
714 else
715 kfree_skb(skb, FREE_WRITE);
716
717 /*
718 * Again, see if we can transmit anything now.
719 * [Ought to take this out judging by tests it slows
720 * us down not speeds us up]
721 */
722 #ifdef CONFIG_XMIT_EVERY
723 dev_transmit();
724 #endif
725 cli();
726 } /* End of queue loop */
727
728 /*
729 * We have emptied the queue
730 */
731
732 in_bh = 0;
733 sti();
734
735 /*
736 * One last output flush.
737 */
738
739 dev_transmit();
740 }
741
742
743 /*
744 * This routine is called when an device driver (i.e. an
745 * interface) is ready to transmit a packet.
746 */
747
748 void dev_tint(struct device *dev)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
749 {
750 int i;
751 struct sk_buff *skb;
752 unsigned long flags;
753
754 save_flags(flags);
755 /*
756 * Work the queues in priority order
757 */
758
759 for(i = 0;i < DEV_NUMBUFFS; i++)
760 {
761 /*
762 * Pull packets from the queue
763 */
764
765
766 cli();
767 while((skb=skb_dequeue(&dev->buffs[i]))!=NULL)
768 {
769 /*
770 * Stop anyone freeing the buffer while we retransmit it
771 */
772 skb_device_lock(skb);
773 restore_flags(flags);
774 /*
775 * Feed them to the output stage and if it fails
776 * indicate they re-queue at the front.
777 */
778 dev_queue_xmit(skb,dev,-i - 1);
779 /*
780 * If we can take no more then stop here.
781 */
782 if (dev->tbusy)
783 return;
784 cli();
785 }
786 }
787 restore_flags(flags);
788 }
789
790
791 /*
792 * Perform a SIOCGIFCONF call. This structure will change
793 * size shortly, and there is nothing I can do about it.
794 * Thus we will need a 'compatibility mode'.
795 */
796
797 static int dev_ifconf(char *arg)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
798 {
799 struct ifconf ifc;
800 struct ifreq ifr;
801 struct device *dev;
802 char *pos;
803 int len;
804 int err;
805
806 /*
807 * Fetch the caller's info block.
808 */
809
810 err=verify_area(VERIFY_WRITE, arg, sizeof(struct ifconf));
811 if(err)
812 return err;
813 memcpy_fromfs(&ifc, arg, sizeof(struct ifconf));
814 len = ifc.ifc_len;
815 pos = ifc.ifc_buf;
816
817 /*
818 * We now walk the device list filling each active device
819 * into the array.
820 */
821
822 err=verify_area(VERIFY_WRITE,pos,len);
823 if(err)
824 return err;
825
826 /*
827 * Loop over the interfaces, and write an info block for each.
828 */
829
830 for (dev = dev_base; dev != NULL; dev = dev->next)
831 {
832 if(!(dev->flags & IFF_UP)) /* Downed devices don't count */
833 continue;
834 memset(&ifr, 0, sizeof(struct ifreq));
835 strcpy(ifr.ifr_name, dev->name);
836 (*(struct sockaddr_in *) &ifr.ifr_addr).sin_family = dev->family;
837 (*(struct sockaddr_in *) &ifr.ifr_addr).sin_addr.s_addr = dev->pa_addr;
838
839 /*
840 * Have we run out of space here ?
841 */
842
843 if (len < sizeof(struct ifreq))
844 break;
845
846 /*
847 * Write this block to the caller's space.
848 */
849
850 memcpy_tofs(pos, &ifr, sizeof(struct ifreq));
851 pos += sizeof(struct ifreq);
852 len -= sizeof(struct ifreq);
853 }
854
855 /*
856 * All done. Write the updated control block back to the caller.
857 */
858
859 ifc.ifc_len = (pos - ifc.ifc_buf);
860 ifc.ifc_req = (struct ifreq *) ifc.ifc_buf;
861 memcpy_tofs(arg, &ifc, sizeof(struct ifconf));
862
863 /*
864 * Report how much was filled in
865 */
866
867 return(pos - arg);
868 }
869
870
871 /*
872 * This is invoked by the /proc filesystem handler to display a device
873 * in detail.
874 */
875
876 static int sprintf_stats(char *buffer, struct device *dev)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
877 {
878 struct enet_statistics *stats = (dev->get_stats ? dev->get_stats(dev): NULL);
879 int size;
880
881 if (stats)
882 size = sprintf(buffer, "%6s:%7d %4d %4d %4d %4d %8d %4d %4d %4d %5d %4d\n",
883 dev->name,
884 stats->rx_packets, stats->rx_errors,
885 stats->rx_dropped + stats->rx_missed_errors,
886 stats->rx_fifo_errors,
887 stats->rx_length_errors + stats->rx_over_errors
888 + stats->rx_crc_errors + stats->rx_frame_errors,
889 stats->tx_packets, stats->tx_errors, stats->tx_dropped,
890 stats->tx_fifo_errors, stats->collisions,
891 stats->tx_carrier_errors + stats->tx_aborted_errors
892 + stats->tx_window_errors + stats->tx_heartbeat_errors);
893 else
894 size = sprintf(buffer, "%6s: No statistics available.\n", dev->name);
895
896 return size;
897 }
898
899 /*
900 * Called from the PROCfs module. This now uses the new arbitrary sized /proc/net interface
901 * to create /proc/net/dev
902 */
903
904 int dev_get_info(char *buffer, char **start, off_t offset, int length, int dummy)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
905 {
906 int len=0;
907 off_t begin=0;
908 off_t pos=0;
909 int size;
910
911 struct device *dev;
912
913
914 size = sprintf(buffer, "Inter-| Receive | Transmit\n"
915 " face |packets errs drop fifo frame|packets errs drop fifo colls carrier\n");
916
917 pos+=size;
918 len+=size;
919
920
921 for (dev = dev_base; dev != NULL; dev = dev->next)
922 {
923 size = sprintf_stats(buffer+len, dev);
924 len+=size;
925 pos=begin+len;
926
927 if(pos<offset)
928 {
929 len=0;
930 begin=pos;
931 }
932 if(pos>offset+length)
933 break;
934 }
935
936 *start=buffer+(offset-begin); /* Start of wanted data */
937 len-=(offset-begin); /* Start slop */
938 if(len>length)
939 len=length; /* Ending slop */
940 return len;
941 }
942
943
944 /*
945 * This checks bitmasks for the ioctl calls for devices.
946 */
947
948 static inline int bad_mask(unsigned long mask, unsigned long addr)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
949 {
950 if (addr & (mask = ~mask))
951 return 1;
952 mask = ntohl(mask);
953 if (mask & (mask+1))
954 return 1;
955 return 0;
956 }
957
958 /*
959 * Perform the SIOCxIFxxx calls.
960 *
961 * The socket layer has seen an ioctl the address family thinks is
962 * for the device. At this point we get invoked to make a decision
963 */
964
965 static int dev_ifsioc(void *arg, unsigned int getset)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
966 {
967 struct ifreq ifr;
968 struct device *dev;
969 int ret;
970
971 /*
972 * Fetch the caller's info block into kernel space
973 */
974
975 int err=verify_area(VERIFY_WRITE, arg, sizeof(struct ifreq));
976 if(err)
977 return err;
978
979 memcpy_fromfs(&ifr, arg, sizeof(struct ifreq));
980
981 /*
982 * See which interface the caller is talking about.
983 */
984
985 if ((dev = dev_get(ifr.ifr_name)) == NULL)
986 return(-ENODEV);
987
988 switch(getset)
989 {
990 case SIOCGIFFLAGS: /* Get interface flags */
991 ifr.ifr_flags = dev->flags;
992 goto rarok;
993
994 case SIOCSIFFLAGS: /* Set interface flags */
995 {
996 int old_flags = dev->flags;
997
998 /*
999 * We are not allowed to potentially close/unload
1000 * a device until we get this lock.
1001 */
1002
1003 dev_lock_wait();
1004
1005 /*
1006 * Set the flags on our device.
1007 */
1008
1009 dev->flags = (ifr.ifr_flags & (
1010 IFF_BROADCAST | IFF_DEBUG | IFF_LOOPBACK |
1011 IFF_POINTOPOINT | IFF_NOTRAILERS | IFF_RUNNING |
1012 IFF_NOARP | IFF_PROMISC | IFF_ALLMULTI | IFF_SLAVE | IFF_MASTER
1013 | IFF_MULTICAST)) | (dev->flags & IFF_UP);
1014 /*
1015 * Load in the correct multicast list now the flags have changed.
1016 */
1017
1018 dev_mc_upload(dev);
1019
1020 /*
1021 * Have we downed the interface. We handle IFF_UP ourselves
1022 * according to user attempts to set it, rather than blindly
1023 * setting it.
1024 */
1025
1026 if ((old_flags^ifr.ifr_flags)&IFF_UP) /* Bit is different ? */
1027 {
1028 if(old_flags&IFF_UP) /* Gone down */
1029 ret=dev_close(dev);
1030 else /* Come up */
1031 {
1032 ret=dev_open(dev);
1033 if(ret<0)
1034 dev->flags&=~IFF_UP; /* Open failed */
1035 }
1036 }
1037 else
1038 ret=0;
1039 /*
1040 * Load in the correct multicast list now the flags have changed.
1041 */
1042
1043 dev_mc_upload(dev);
1044 }
1045 break;
1046
1047 case SIOCGIFADDR: /* Get interface address (and family) */
1048 if(ifr.ifr_addr.sa_family==AF_UNSPEC)
1049 {
1050 memcpy(ifr.ifr_hwaddr.sa_data,dev->dev_addr, MAX_ADDR_LEN);
1051 ifr.ifr_hwaddr.sa_family=dev->type;
1052 goto rarok;
1053 }
1054 else
1055 {
1056 (*(struct sockaddr_in *)
1057 &ifr.ifr_addr).sin_addr.s_addr = dev->pa_addr;
1058 (*(struct sockaddr_in *)
1059 &ifr.ifr_addr).sin_family = dev->family;
1060 (*(struct sockaddr_in *)
1061 &ifr.ifr_addr).sin_port = 0;
1062 }
1063 goto rarok;
1064
1065 case SIOCSIFADDR: /* Set interface address (and family) */
1066
1067 /*
1068 * BSDism. SIOCSIFADDR family=AF_UNSPEC sets the
1069 * physical address. We can cope with this now.
1070 */
1071
1072 if(ifr.ifr_addr.sa_family==AF_UNSPEC)
1073 {
1074 if(dev->set_mac_address==NULL)
1075 return -EOPNOTSUPP;
1076 ret=dev->set_mac_address(dev,&ifr.ifr_addr);
1077 }
1078 else
1079 {
1080 dev->pa_addr = (*(struct sockaddr_in *)
1081 &ifr.ifr_addr).sin_addr.s_addr;
1082 dev->family = ifr.ifr_addr.sa_family;
1083
1084 #ifdef CONFIG_INET
1085 /* This is naughty. When net-032e comes out It wants moving into the net032
1086 code not the kernel. Till then it can sit here (SIGH) */
1087 dev->pa_mask = ip_get_mask(dev->pa_addr);
1088 #endif
1089 dev->pa_brdaddr = dev->pa_addr | ~dev->pa_mask;
1090 ret = 0;
1091 }
1092 break;
1093
1094 case SIOCGIFBRDADDR: /* Get the broadcast address */
1095 (*(struct sockaddr_in *)
1096 &ifr.ifr_broadaddr).sin_addr.s_addr = dev->pa_brdaddr;
1097 (*(struct sockaddr_in *)
1098 &ifr.ifr_broadaddr).sin_family = dev->family;
1099 (*(struct sockaddr_in *)
1100 &ifr.ifr_broadaddr).sin_port = 0;
1101 goto rarok;
1102
1103 case SIOCSIFBRDADDR: /* Set the broadcast address */
1104 dev->pa_brdaddr = (*(struct sockaddr_in *)
1105 &ifr.ifr_broadaddr).sin_addr.s_addr;
1106 ret = 0;
1107 break;
1108
1109 case SIOCGIFDSTADDR: /* Get the destination address (for point-to-point links) */
1110 (*(struct sockaddr_in *)
1111 &ifr.ifr_dstaddr).sin_addr.s_addr = dev->pa_dstaddr;
1112 (*(struct sockaddr_in *)
1113 &ifr.ifr_dstaddr).sin_family = dev->family;
1114 (*(struct sockaddr_in *)
1115 &ifr.ifr_dstaddr).sin_port = 0;
1116 goto rarok;
1117
1118 case SIOCSIFDSTADDR: /* Set the destination address (for point-to-point links) */
1119 dev->pa_dstaddr = (*(struct sockaddr_in *)
1120 &ifr.ifr_dstaddr).sin_addr.s_addr;
1121 ret = 0;
1122 break;
1123
1124 case SIOCGIFNETMASK: /* Get the netmask for the interface */
1125 (*(struct sockaddr_in *)
1126 &ifr.ifr_netmask).sin_addr.s_addr = dev->pa_mask;
1127 (*(struct sockaddr_in *)
1128 &ifr.ifr_netmask).sin_family = dev->family;
1129 (*(struct sockaddr_in *)
1130 &ifr.ifr_netmask).sin_port = 0;
1131 goto rarok;
1132
1133 case SIOCSIFNETMASK: /* Set the netmask for the interface */
1134 {
1135 unsigned long mask = (*(struct sockaddr_in *)
1136 &ifr.ifr_netmask).sin_addr.s_addr;
1137 ret = -EINVAL;
1138 /*
1139 * The mask we set must be legal.
1140 */
1141 if (bad_mask(mask,0))
1142 break;
1143 dev->pa_mask = mask;
1144 ret = 0;
1145 }
1146 break;
1147
1148 case SIOCGIFMETRIC: /* Get the metric on the interface (currently unused) */
1149
1150 ifr.ifr_metric = dev->metric;
1151 goto rarok;
1152
1153 case SIOCSIFMETRIC: /* Set the metric on the interface (currently unused) */
1154 dev->metric = ifr.ifr_metric;
1155 ret=0;
1156 break;
1157
1158 case SIOCGIFMTU: /* Get the MTU of a device */
1159 ifr.ifr_mtu = dev->mtu;
1160 goto rarok;
1161
1162 case SIOCSIFMTU: /* Set the MTU of a device */
1163
1164 /*
1165 * MTU must be positive.
1166 */
1167
1168 if(ifr.ifr_mtu<68)
1169 return -EINVAL;
1170 dev->mtu = ifr.ifr_mtu;
1171 ret = 0;
1172 break;
1173
1174 case SIOCGIFMEM: /* Get the per device memory space. We can add this but currently
1175 do not support it */
1176 ret = -EINVAL;
1177 break;
1178
1179 case SIOCSIFMEM: /* Set the per device memory buffer space. Not applicable in our case */
1180 ret = -EINVAL;
1181 break;
1182
1183 case SIOCGIFHWADDR:
1184 memcpy(ifr.ifr_hwaddr.sa_data,dev->dev_addr, MAX_ADDR_LEN);
1185 ifr.ifr_hwaddr.sa_family=dev->type;
1186 goto rarok;
1187
1188 case SIOCSIFHWADDR:
1189 if(dev->set_mac_address==NULL)
1190 return -EOPNOTSUPP;
1191 if(ifr.ifr_hwaddr.sa_family!=dev->type)
1192 return -EINVAL;
1193 ret=dev->set_mac_address(dev,&ifr.ifr_hwaddr);
1194 break;
1195
1196 case SIOCGIFMAP:
1197 ifr.ifr_map.mem_start=dev->mem_start;
1198 ifr.ifr_map.mem_end=dev->mem_end;
1199 ifr.ifr_map.base_addr=dev->base_addr;
1200 ifr.ifr_map.irq=dev->irq;
1201 ifr.ifr_map.dma=dev->dma;
1202 ifr.ifr_map.port=dev->if_port;
1203 goto rarok;
1204
1205 case SIOCSIFMAP:
1206 if(dev->set_config==NULL)
1207 return -EOPNOTSUPP;
1208 return dev->set_config(dev,&ifr.ifr_map);
1209
1210 case SIOCADDMULTI:
1211 if(dev->set_multicast_list==NULL)
1212 return -EINVAL;
1213 if(ifr.ifr_hwaddr.sa_family!=AF_UNSPEC)
1214 return -EINVAL;
1215 dev_mc_add(dev,ifr.ifr_hwaddr.sa_data, dev->addr_len, 1);
1216 return 0;
1217
1218 case SIOCDELMULTI:
1219 if(dev->set_multicast_list==NULL)
1220 return -EINVAL;
1221 if(ifr.ifr_hwaddr.sa_family!=AF_UNSPEC)
1222 return -EINVAL;
1223 dev_mc_delete(dev,ifr.ifr_hwaddr.sa_data,dev->addr_len, 1);
1224 return 0;
1225 /*
1226 * Unknown or private ioctl
1227 */
1228
1229 default:
1230 if((getset >= SIOCDEVPRIVATE) &&
1231 (getset <= (SIOCDEVPRIVATE + 15))) {
1232 if(dev->do_ioctl==NULL)
1233 return -EOPNOTSUPP;
1234 ret=dev->do_ioctl(dev, &ifr, getset);
1235 memcpy_tofs(arg,&ifr,sizeof(struct ifreq));
1236 break;
1237 }
1238
1239 ret = -EINVAL;
1240 }
1241 return(ret);
1242 /*
1243 * The load of calls that return an ifreq and ok (saves memory).
1244 */
1245 rarok:
1246 memcpy_tofs(arg, &ifr, sizeof(struct ifreq));
1247 return 0;
1248 }
1249
1250
1251 /*
1252 * This function handles all "interface"-type I/O control requests. The actual
1253 * 'doing' part of this is dev_ifsioc above.
1254 */
1255
1256 int dev_ioctl(unsigned int cmd, void *arg)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
1257 {
1258 switch(cmd)
1259 {
1260 case SIOCGIFCONF:
1261 (void) dev_ifconf((char *) arg);
1262 return 0;
1263
1264 /*
1265 * Ioctl calls that can be done by all.
1266 */
1267
1268 case SIOCGIFFLAGS:
1269 case SIOCGIFADDR:
1270 case SIOCGIFDSTADDR:
1271 case SIOCGIFBRDADDR:
1272 case SIOCGIFNETMASK:
1273 case SIOCGIFMETRIC:
1274 case SIOCGIFMTU:
1275 case SIOCGIFMEM:
1276 case SIOCGIFHWADDR:
1277 case SIOCSIFHWADDR:
1278 case SIOCGIFSLAVE:
1279 case SIOCGIFMAP:
1280 return dev_ifsioc(arg, cmd);
1281
1282 /*
1283 * Ioctl calls requiring the power of a superuser
1284 */
1285
1286 case SIOCSIFFLAGS:
1287 case SIOCSIFADDR:
1288 case SIOCSIFDSTADDR:
1289 case SIOCSIFBRDADDR:
1290 case SIOCSIFNETMASK:
1291 case SIOCSIFMETRIC:
1292 case SIOCSIFMTU:
1293 case SIOCSIFMEM:
1294 case SIOCSIFMAP:
1295 case SIOCSIFSLAVE:
1296 case SIOCADDMULTI:
1297 case SIOCDELMULTI:
1298 if (!suser())
1299 return -EPERM;
1300 return dev_ifsioc(arg, cmd);
1301
1302 case SIOCSIFLINK:
1303 return -EINVAL;
1304
1305 /*
1306 * Unknown or private ioctl.
1307 */
1308
1309 default:
1310 if((cmd >= SIOCDEVPRIVATE) &&
1311 (cmd <= (SIOCDEVPRIVATE + 15))) {
1312 return dev_ifsioc(arg, cmd);
1313 }
1314 return -EINVAL;
1315 }
1316 }
1317
1318
1319 /*
1320 * Initialize the DEV module. At boot time this walks the device list and
1321 * unhooks any devices that fail to initialise (normally hardware not
1322 * present) and leaves us with a valid list of present and active devices.
1323 *
1324 */
1325 extern int lance_init(void);
1326 extern int pi_init(void);
1327 extern int dec21040_init(void);
1328
1329 int net_dev_init(void)
/* ![[previous]](../icons/left.png)
![[next]](../icons/n_right.png)
![[first]](../icons/first.png)
![[last]](../icons/n_last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
1330 {
1331 struct device *dev, **dp;
1332
1333 /*
1334 * This is VeryUgly(tm).
1335 *
1336 * Some devices want to be initialized eary..
1337 */
1338 #if defined(CONFIG_LANCE)
1339 lance_init();
1340 #endif
1341 #if defined(CONFIG_PI)
1342 pi_init();
1343 #endif
1344 #if defined(CONFIG_DEC_ELCP)
1345 dec21040_init();
1346 #endif
1347
1348 /*
1349 * Add the devices.
1350 * If the call to dev->init fails, the dev is removed
1351 * from the chain disconnecting the device until the
1352 * next reboot.
1353 */
1354
1355 dp = &dev_base;
1356 while ((dev = *dp) != NULL)
1357 {
1358 int i;
1359 for (i = 0; i < DEV_NUMBUFFS; i++) {
1360 skb_queue_head_init(dev->buffs + i);
1361 }
1362
1363 if (dev->init && dev->init(dev))
1364 {
1365 /*
1366 * It failed to come up. Unhook it.
1367 */
1368 *dp = dev->next;
1369 }
1370 else
1371 {
1372 dp = &dev->next;
1373 }
1374 }
1375
1376 proc_net_register(&(struct proc_dir_entry) {
1377 PROC_NET_DEV, 3, "dev",
1378 S_IFREG | S_IRUGO, 1, 0, 0,
1379 0, &proc_net_inode_operations,
1380 dev_get_info
1381 });
1382
1383 bh_base[NET_BH].routine = net_bh;
1384 enable_bh(NET_BH);
1385 return 0;
1386 }