1 /*
2 * NET3 Protocol independent device support routines.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Derived from the non IP parts of dev.c 1.0.19
10 * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 *
14 * Additional Authors:
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dhinds@allegro.stanford.edu>
18 *
19 * Changes:
20 * Alan Cox : device private ioctl copies fields back.
21 * Alan Cox : Transmit queue code does relevant stunts to
22 * keep the queue safe.
23 * Alan Cox : Fixed double lock.
24 * Alan Cox : Fixed promisc NULL pointer trap
25 * ???????? : Support the full private ioctl range
26 * Alan Cox : Moved ioctl permission check into drivers
27 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
28 * Alan Cox : 100 backlog just doesn't cut it when
29 * you start doing multicast video 8)
30 * Alan Cox : Rewrote net_bh and list manager.
31 * Alan Cox : Fix ETH_P_ALL echoback lengths.
32 * Alan Cox : Took out transmit every packet pass
33 * Saved a few bytes in the ioctl handler
34 * Alan Cox : Network driver sets packet type before calling netif_rx. Saves
35 * a function call a packet.
36 * Alan Cox : Hashed net_bh()
37 * Richard Kooijman : Timestamp fixes.
38 * Alan Cox : Wrong field in SIOCGIFDSTADDR
39 * Alan Cox : Device lock protection.
40 * Alan Cox : Fixed nasty side effect of device close changes.
41 *
42 * Cleaned up and recommented by Alan Cox 2nd April 1994. I hope to have
43 * the rest as well commented in the end.
44 */
45
46 /*
47 * A lot of these includes will be going walkies very soon
48 */
49
50 #include <asm/segment.h>
51 #include <asm/system.h>
52 #include <asm/bitops.h>
53 #include <linux/config.h>
54 #include <linux/types.h>
55 #include <linux/kernel.h>
56 #include <linux/sched.h>
57 #include <linux/string.h>
58 #include <linux/mm.h>
59 #include <linux/socket.h>
60 #include <linux/sockios.h>
61 #include <linux/in.h>
62 #include <linux/errno.h>
63 #include <linux/interrupt.h>
64 #include <linux/if_ether.h>
65 #include <linux/inet.h>
66 #include <linux/netdevice.h>
67 #include <linux/etherdevice.h>
68 #include <linux/notifier.h>
69 #include <net/ip.h>
70 #include <net/route.h>
71 #include <linux/skbuff.h>
72 #include <net/sock.h>
73 #include <net/arp.h>
74 #include <linux/proc_fs.h>
75 #include <linux/stat.h>
76
77 /*
78 * The list of packet types we will receive (as opposed to discard)
79 * and the routines to invoke.
80 */
81
82 struct packet_type *ptype_base[16];
83 struct packet_type *ptype_all = NULL; /* Taps */
84
85 /*
86 * Device list lock
87 */
88
89 int dev_lockct=0;
90
91 /*
92 * Our notifier list
93 */
94
95 struct notifier_block *netdev_chain=NULL;
96
97 /*
98 * Device drivers call our routines to queue packets here. We empty the
99 * queue in the bottom half handler.
100 */
101
102 static struct sk_buff_head backlog =
103 {
104 (struct sk_buff *)&backlog, (struct sk_buff *)&backlog
105 #if CONFIG_SKB_CHECK
106 ,SK_HEAD_SKB
107 #endif
108 };
109
110 /*
111 * We don't overdo the queue or we will thrash memory badly.
112 */
113
114 static int backlog_size = 0;
115
116 /*
117 * Return the lesser of the two values.
118 */
119
120 static __inline__ unsigned long min(unsigned long a, unsigned long b)
/* ![[previous]](../icons/n_left.png)
![[next]](../icons/right.png)
![[first]](../icons/n_first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
121 {
122 return (a < b)? a : b;
123 }
124
125
126 /******************************************************************************************
127
128 Protocol management and registration routines
129
130 *******************************************************************************************/
131
132 /*
133 * For efficiency
134 */
135
136 static int dev_nit=0;
137
138 /*
139 * Add a protocol ID to the list. Now that the input handler is
140 * smarter we can dispense with all the messy stuff that used to be
141 * here.
142 */
143
144 void dev_add_pack(struct packet_type *pt)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
145 {
146 int hash;
147 if(pt->type==htons(ETH_P_ALL))
148 {
149 dev_nit++;
150 pt->next=ptype_all;
151 ptype_all=pt;
152 }
153 else
154 {
155 hash=ntohs(pt->type)&15;
156 pt->next = ptype_base[hash];
157 ptype_base[hash] = pt;
158 }
159 }
160
161
162 /*
163 * Remove a protocol ID from the list.
164 */
165
166 void dev_remove_pack(struct packet_type *pt)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
167 {
168 struct packet_type **pt1;
169 if(pt->type==htons(ETH_P_ALL))
170 {
171 dev_nit--;
172 pt1=&ptype_all;
173 }
174 else
175 pt1=&ptype_base[ntohs(pt->type)&15];
176 for(; (*pt1)!=NULL; pt1=&((*pt1)->next))
177 {
178 if(pt==(*pt1))
179 {
180 *pt1=pt->next;
181 return;
182 }
183 }
184 printk("dev_remove_pack: %p not found.\n", pt);
185 }
186
187 /*****************************************************************************************
188
189 Device Interface Subroutines
190
191 ******************************************************************************************/
192
193 /*
194 * Find an interface by name.
195 */
196
197 struct device *dev_get(const char *name)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
198 {
199 struct device *dev;
200
201 for (dev = dev_base; dev != NULL; dev = dev->next)
202 {
203 if (strcmp(dev->name, name) == 0)
204 return(dev);
205 }
206 return(NULL);
207 }
208
209
210 /*
211 * Prepare an interface for use.
212 */
213
214 int dev_open(struct device *dev)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
215 {
216 int ret = 0;
217
218 /*
219 * Call device private open method
220 */
221 if (dev->open)
222 ret = dev->open(dev);
223
224 /*
225 * If it went open OK then set the flags
226 */
227
228 if (ret == 0)
229 {
230 dev->flags |= (IFF_UP | IFF_RUNNING);
231 /*
232 * Initialise multicasting status
233 */
234 dev_mc_upload(dev);
235 notifier_call_chain(&netdev_chain, NETDEV_UP, dev);
236 }
237 return(ret);
238 }
239
240
241 /*
242 * Completely shutdown an interface.
243 */
244
245 int dev_close(struct device *dev)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
246 {
247 int ct=0;
248
249 /*
250 * Call the device specific close. This cannot fail.
251 * Only if device is UP
252 */
253
254 if ((dev->flags & IFF_UP) && dev->stop)
255 dev->stop(dev);
256
257 /*
258 * Device is now down.
259 */
260
261 dev->flags&=~(IFF_UP|IFF_RUNNING);
262
263 /*
264 * Tell people we are going down
265 */
266 notifier_call_chain(&netdev_chain, NETDEV_DOWN, dev);
267 /*
268 * Flush the multicast chain
269 */
270 dev_mc_discard(dev);
271 /*
272 * Blank the IP addresses
273 */
274 dev->pa_addr = 0;
275 dev->pa_dstaddr = 0;
276 dev->pa_brdaddr = 0;
277 dev->pa_mask = 0;
278 /*
279 * Purge any queued packets when we down the link
280 */
281 while(ct<DEV_NUMBUFFS)
282 {
283 struct sk_buff *skb;
284 while((skb=skb_dequeue(&dev->buffs[ct]))!=NULL)
285 if(skb->free)
286 kfree_skb(skb,FREE_WRITE);
287 ct++;
288 }
289 return(0);
290 }
291
292
293 /*
294 * Device change register/unregister. These are not inline or static
295 * as we export them to the world.
296 */
297
298 int register_netdevice_notifier(struct notifier_block *nb)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
299 {
300 return notifier_chain_register(&netdev_chain, nb);
301 }
302
303 int unregister_netdevice_notifier(struct notifier_block *nb)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
304 {
305 return notifier_chain_unregister(&netdev_chain,nb);
306 }
307
308
309
310 /*
311 * Send (or queue for sending) a packet.
312 *
313 * IMPORTANT: When this is called to resend frames. The caller MUST
314 * already have locked the sk_buff. Apart from that we do the
315 * rest of the magic.
316 */
317
318 void dev_queue_xmit(struct sk_buff *skb, struct device *dev, int pri)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
319 {
320 unsigned long flags;
321 struct packet_type *ptype;
322 int where = 0; /* used to say if the packet should go */
323 /* at the front or the back of the */
324 /* queue - front is a retransmit try */
325
326 if(pri>=0 && !skb_device_locked(skb))
327 skb_device_lock(skb); /* Shove a lock on the frame */
328 #if CONFIG_SKB_CHECK
329 IS_SKB(skb);
330 #endif
331 skb->dev = dev;
332
333 /*
334 * Negative priority is used to flag a frame that is being pulled from the
335 * queue front as a retransmit attempt. It therefore goes back on the queue
336 * start on a failure.
337 */
338
339 if (pri < 0)
340 {
341 pri = -pri-1;
342 where = 1;
343 }
344
345 #ifdef CONFIG_NET_DEBUG
346 if (pri >= DEV_NUMBUFFS)
347 {
348 printk("bad priority in dev_queue_xmit.\n");
349 pri = 1;
350 }
351 #endif
352
353 /*
354 * If the address has not been resolved. Call the device header rebuilder.
355 * This can cover all protocols and technically not just ARP either.
356 */
357
358 if (!skb->arp && dev->rebuild_header(skb->data, dev, skb->raddr, skb)) {
359 return;
360 }
361
362 save_flags(flags);
363 cli();
364 if (!where) /* Always keep order. It helps other hosts
365 far more than it costs us */
366 {
367 skb_queue_tail(dev->buffs + pri,skb);
368 skb_device_unlock(skb); /* Buffer is on the device queue and can be freed safely */
369 skb = skb_dequeue(dev->buffs + pri);
370 skb_device_lock(skb); /* New buffer needs locking down */
371 }
372 restore_flags(flags);
373
374 /* copy outgoing packets to any sniffer packet handlers */
375 if(!where && dev_nit)
376 {
377 skb->stamp=xtime;
378 for (ptype = ptype_all; ptype!=NULL; ptype = ptype->next)
379 {
380 /* Never send packets back to the socket
381 * they originated from - MvS (miquels@drinkel.ow.org)
382 */
383 if ((ptype->dev == dev || !ptype->dev) &&
384 ((struct sock *)ptype->data != skb->sk))
385 {
386 struct sk_buff *skb2;
387 if ((skb2 = skb_clone(skb, GFP_ATOMIC)) == NULL)
388 break;
389 skb2->h.raw = skb2->data + dev->hard_header_len;
390 skb2->mac.raw = skb2->data;
391 ptype->func(skb2, skb->dev, ptype);
392 }
393 }
394 }
395 start_bh_atomic();
396 if (dev->hard_start_xmit(skb, dev) == 0) {
397 /*
398 * Packet is now solely the responsibility of the driver
399 */
400 end_bh_atomic();
401 return;
402 }
403 end_bh_atomic();
404
405 /*
406 * Transmission failed, put skb back into a list. Once on the list it's safe and
407 * no longer device locked (it can be freed safely from the device queue)
408 */
409 cli();
410 skb_device_unlock(skb);
411 skb_queue_head(dev->buffs + pri,skb);
412 restore_flags(flags);
413 }
414
415 /*
416 * Receive a packet from a device driver and queue it for the upper
417 * (protocol) levels. It always succeeds. This is the recommended
418 * interface to use.
419 */
420
421 void netif_rx(struct sk_buff *skb)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
422 {
423 static int dropping = 0;
424
425 /*
426 * Any received buffers are un-owned and should be discarded
427 * when freed. These will be updated later as the frames get
428 * owners.
429 */
430 skb->sk = NULL;
431 skb->free = 1;
432 if(skb->stamp.tv_sec==0)
433 skb->stamp = xtime;
434
435 /*
436 * Check that we aren't overdoing things.
437 */
438
439 if (!backlog_size)
440 dropping = 0;
441 else if (backlog_size > 300)
442 dropping = 1;
443
444 if (dropping)
445 {
446 kfree_skb(skb, FREE_READ);
447 return;
448 }
449
450 /*
451 * Add it to the "backlog" queue.
452 */
453 #if CONFIG_SKB_CHECK
454 IS_SKB(skb);
455 #endif
456 skb_queue_tail(&backlog,skb);
457 backlog_size++;
458
459 /*
460 * If any packet arrived, mark it for processing after the
461 * hardware interrupt returns.
462 */
463
464 #ifdef CONFIG_NET_RUNONIRQ /* Dont enable yet, needs some driver mods */
465 net_bh();
466 #else
467 mark_bh(NET_BH);
468 #endif
469 return;
470 }
471
472
473 /*
474 * The old interface to fetch a packet from a device driver.
475 * This function is the base level entry point for all drivers that
476 * want to send a packet to the upper (protocol) levels. It takes
477 * care of de-multiplexing the packet to the various modules based
478 * on their protocol ID.
479 *
480 * Return values: 1 <- exit I can't do any more
481 * 0 <- feed me more (i.e. "done", "OK").
482 *
483 * This function is OBSOLETE and should not be used by any new
484 * device.
485 */
486
487 int dev_rint(unsigned char *buff, long len, int flags, struct device *dev)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
488 {
489 static int dropping = 0;
490 struct sk_buff *skb = NULL;
491 unsigned char *to;
492 int amount, left;
493 int len2;
494
495 if (dev == NULL || buff == NULL || len <= 0)
496 return(1);
497
498 if (flags & IN_SKBUFF)
499 {
500 skb = (struct sk_buff *) buff;
501 }
502 else
503 {
504 if (dropping)
505 {
506 if (skb_peek(&backlog) != NULL)
507 return(1);
508 printk("INET: dev_rint: no longer dropping packets.\n");
509 dropping = 0;
510 }
511
512 skb = alloc_skb(len, GFP_ATOMIC);
513 if (skb == NULL)
514 {
515 printk("dev_rint: packet dropped on %s (no memory) !\n",
516 dev->name);
517 dropping = 1;
518 return(1);
519 }
520
521 /*
522 * First we copy the packet into a buffer, and save it for later. We
523 * in effect handle the incoming data as if it were from a circular buffer
524 */
525
526 to = skb_put(skb,len);
527 left = len;
528
529 len2 = len;
530 while (len2 > 0)
531 {
532 amount = min(len2, (unsigned long) dev->rmem_end -
533 (unsigned long) buff);
534 memcpy(to, buff, amount);
535 len2 -= amount;
536 left -= amount;
537 buff += amount;
538 to += amount;
539 if ((unsigned long) buff == dev->rmem_end)
540 buff = (unsigned char *) dev->rmem_start;
541 }
542 }
543
544 /*
545 * Tag the frame and kick it to the proper receive routine
546 */
547
548 skb->dev = dev;
549 skb->free = 1;
550
551 netif_rx(skb);
552 /*
553 * OK, all done.
554 */
555 return(0);
556 }
557
558
559 /*
560 * This routine causes all interfaces to try to send some data.
561 */
562
563 void dev_transmit(void)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
564 {
565 struct device *dev;
566
567 for (dev = dev_base; dev != NULL; dev = dev->next)
568 {
569 if (dev->flags != 0 && !dev->tbusy) {
570 /*
571 * Kick the device
572 */
573 dev_tint(dev);
574 }
575 }
576 }
577
578
579 /**********************************************************************************
580
581 Receive Queue Processor
582
583 ***********************************************************************************/
584
585 /*
586 * This is a single non-reentrant routine which takes the received packet
587 * queue and throws it at the networking layers in the hope that something
588 * useful will emerge.
589 */
590
591 volatile char in_bh = 0; /* Non-reentrant remember */
592
593 int in_net_bh() /* Used by timer.c */
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
594 {
595 return(in_bh==0?0:1);
596 }
597
598 /*
599 * When we are called the queue is ready to grab, the interrupts are
600 * on and hardware can interrupt and queue to the receive queue a we
601 * run with no problems.
602 * This is run as a bottom half after an interrupt handler that does
603 * mark_bh(NET_BH);
604 */
605
606 void net_bh(void *tmp)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
607 {
608 struct sk_buff *skb;
609 struct packet_type *ptype;
610 struct packet_type *pt_prev;
611 unsigned short type;
612
613 /*
614 * Atomically check and mark our BUSY state.
615 */
616
617 if (set_bit(1, (void*)&in_bh))
618 return;
619
620 /*
621 * Can we send anything now? We want to clear the
622 * decks for any more sends that get done as we
623 * process the input. This also minimises the
624 * latency on a transmit interrupt bh.
625 */
626
627 dev_transmit();
628
629 /*
630 * Any data left to process. This may occur because a
631 * mark_bh() is done after we empty the queue including
632 * that from the device which does a mark_bh() just after
633 */
634
635 cli();
636
637 /*
638 * While the queue is not empty
639 */
640
641 while((skb=skb_dequeue(&backlog))!=NULL)
642 {
643 /*
644 * We have a packet. Therefore the queue has shrunk
645 */
646 backlog_size--;
647
648 sti();
649
650 /*
651 * Bump the pointer to the next structure.
652 *
653 * On entry to the protocol layer. skb->data and
654 * skb->h.raw point to the MAC and encapsulated data
655 */
656
657 skb->h.raw = skb->data;
658
659 /*
660 * Fetch the packet protocol ID.
661 */
662
663 type = skb->protocol;
664
665 /*
666 * We got a packet ID. Now loop over the "known protocols"
667 * list. There are two lists. The ptype_all list of taps (normally empty)
668 * and the main protocol list which is hashed perfectly for normal protocols.
669 */
670 pt_prev = NULL;
671 for (ptype = ptype_all; ptype!=NULL; ptype=ptype->next)
672 {
673 if(pt_prev)
674 {
675 struct sk_buff *skb2=skb_clone(skb, GFP_ATOMIC);
676 if(skb2)
677 pt_prev->func(skb2,skb->dev, pt_prev);
678 }
679 pt_prev=ptype;
680 }
681
682 for (ptype = ptype_base[ntohs(type)&15]; ptype != NULL; ptype = ptype->next)
683 {
684 if (ptype->type == type && (!ptype->dev || ptype->dev==skb->dev))
685 {
686 /*
687 * We already have a match queued. Deliver
688 * to it and then remember the new match
689 */
690 if(pt_prev)
691 {
692 struct sk_buff *skb2;
693
694 skb2=skb_clone(skb, GFP_ATOMIC);
695
696 /*
697 * Kick the protocol handler. This should be fast
698 * and efficient code.
699 */
700
701 if(skb2)
702 pt_prev->func(skb2, skb->dev, pt_prev);
703 }
704 /* Remember the current last to do */
705 pt_prev=ptype;
706 }
707 } /* End of protocol list loop */
708
709 /*
710 * Is there a last item to send to ?
711 */
712
713 if(pt_prev)
714 pt_prev->func(skb, skb->dev, pt_prev);
715 /*
716 * Has an unknown packet has been received ?
717 */
718
719 else
720 kfree_skb(skb, FREE_WRITE);
721
722 /*
723 * Again, see if we can transmit anything now.
724 * [Ought to take this out judging by tests it slows
725 * us down not speeds us up]
726 */
727 #ifdef CONFIG_XMIT_EVERY
728 dev_transmit();
729 #endif
730 cli();
731 } /* End of queue loop */
732
733 /*
734 * We have emptied the queue
735 */
736
737 in_bh = 0;
738 sti();
739
740 /*
741 * One last output flush.
742 */
743
744 dev_transmit();
745 }
746
747
748 /*
749 * This routine is called when an device driver (i.e. an
750 * interface) is ready to transmit a packet.
751 */
752
753 void dev_tint(struct device *dev)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
754 {
755 int i;
756 struct sk_buff *skb;
757 unsigned long flags;
758
759 save_flags(flags);
760 /*
761 * Work the queues in priority order
762 */
763
764 for(i = 0;i < DEV_NUMBUFFS; i++)
765 {
766 /*
767 * Pull packets from the queue
768 */
769
770
771 cli();
772 while((skb=skb_dequeue(&dev->buffs[i]))!=NULL)
773 {
774 /*
775 * Stop anyone freeing the buffer while we retransmit it
776 */
777 skb_device_lock(skb);
778 restore_flags(flags);
779 /*
780 * Feed them to the output stage and if it fails
781 * indicate they re-queue at the front.
782 */
783 dev_queue_xmit(skb,dev,-i - 1);
784 /*
785 * If we can take no more then stop here.
786 */
787 if (dev->tbusy)
788 return;
789 cli();
790 }
791 }
792 restore_flags(flags);
793 }
794
795
796 /*
797 * Perform a SIOCGIFCONF call. This structure will change
798 * size shortly, and there is nothing I can do about it.
799 * Thus we will need a 'compatibility mode'.
800 */
801
802 static int dev_ifconf(char *arg)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
803 {
804 struct ifconf ifc;
805 struct ifreq ifr;
806 struct device *dev;
807 char *pos;
808 int len;
809 int err;
810
811 /*
812 * Fetch the caller's info block.
813 */
814
815 err=verify_area(VERIFY_WRITE, arg, sizeof(struct ifconf));
816 if(err)
817 return err;
818 memcpy_fromfs(&ifc, arg, sizeof(struct ifconf));
819 len = ifc.ifc_len;
820 pos = ifc.ifc_buf;
821
822 /*
823 * We now walk the device list filling each active device
824 * into the array.
825 */
826
827 err=verify_area(VERIFY_WRITE,pos,len);
828 if(err)
829 return err;
830
831 /*
832 * Loop over the interfaces, and write an info block for each.
833 */
834
835 for (dev = dev_base; dev != NULL; dev = dev->next)
836 {
837 if(!(dev->flags & IFF_UP)) /* Downed devices don't count */
838 continue;
839 memset(&ifr, 0, sizeof(struct ifreq));
840 strcpy(ifr.ifr_name, dev->name);
841 (*(struct sockaddr_in *) &ifr.ifr_addr).sin_family = dev->family;
842 (*(struct sockaddr_in *) &ifr.ifr_addr).sin_addr.s_addr = dev->pa_addr;
843
844 /*
845 * Have we run out of space here ?
846 */
847
848 if (len < sizeof(struct ifreq))
849 break;
850
851 /*
852 * Write this block to the caller's space.
853 */
854
855 memcpy_tofs(pos, &ifr, sizeof(struct ifreq));
856 pos += sizeof(struct ifreq);
857 len -= sizeof(struct ifreq);
858 }
859
860 /*
861 * All done. Write the updated control block back to the caller.
862 */
863
864 ifc.ifc_len = (pos - ifc.ifc_buf);
865 ifc.ifc_req = (struct ifreq *) ifc.ifc_buf;
866 memcpy_tofs(arg, &ifc, sizeof(struct ifconf));
867
868 /*
869 * Report how much was filled in
870 */
871
872 return(pos - arg);
873 }
874
875
876 /*
877 * This is invoked by the /proc filesystem handler to display a device
878 * in detail.
879 */
880
881 static int sprintf_stats(char *buffer, struct device *dev)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
882 {
883 struct enet_statistics *stats = (dev->get_stats ? dev->get_stats(dev): NULL);
884 int size;
885
886 if (stats)
887 size = sprintf(buffer, "%6s:%7d %4d %4d %4d %4d %8d %4d %4d %4d %5d %4d\n",
888 dev->name,
889 stats->rx_packets, stats->rx_errors,
890 stats->rx_dropped + stats->rx_missed_errors,
891 stats->rx_fifo_errors,
892 stats->rx_length_errors + stats->rx_over_errors
893 + stats->rx_crc_errors + stats->rx_frame_errors,
894 stats->tx_packets, stats->tx_errors, stats->tx_dropped,
895 stats->tx_fifo_errors, stats->collisions,
896 stats->tx_carrier_errors + stats->tx_aborted_errors
897 + stats->tx_window_errors + stats->tx_heartbeat_errors);
898 else
899 size = sprintf(buffer, "%6s: No statistics available.\n", dev->name);
900
901 return size;
902 }
903
904 /*
905 * Called from the PROCfs module. This now uses the new arbitrary sized /proc/net interface
906 * to create /proc/net/dev
907 */
908
909 int dev_get_info(char *buffer, char **start, off_t offset, int length, int dummy)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
910 {
911 int len=0;
912 off_t begin=0;
913 off_t pos=0;
914 int size;
915
916 struct device *dev;
917
918
919 size = sprintf(buffer, "Inter-| Receive | Transmit\n"
920 " face |packets errs drop fifo frame|packets errs drop fifo colls carrier\n");
921
922 pos+=size;
923 len+=size;
924
925
926 for (dev = dev_base; dev != NULL; dev = dev->next)
927 {
928 size = sprintf_stats(buffer+len, dev);
929 len+=size;
930 pos=begin+len;
931
932 if(pos<offset)
933 {
934 len=0;
935 begin=pos;
936 }
937 if(pos>offset+length)
938 break;
939 }
940
941 *start=buffer+(offset-begin); /* Start of wanted data */
942 len-=(offset-begin); /* Start slop */
943 if(len>length)
944 len=length; /* Ending slop */
945 return len;
946 }
947
948
949 /*
950 * This checks bitmasks for the ioctl calls for devices.
951 */
952
953 static inline int bad_mask(unsigned long mask, unsigned long addr)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
954 {
955 if (addr & (mask = ~mask))
956 return 1;
957 mask = ntohl(mask);
958 if (mask & (mask+1))
959 return 1;
960 return 0;
961 }
962
963 /*
964 * Perform the SIOCxIFxxx calls.
965 *
966 * The socket layer has seen an ioctl the address family thinks is
967 * for the device. At this point we get invoked to make a decision
968 */
969
970 static int dev_ifsioc(void *arg, unsigned int getset)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
971 {
972 struct ifreq ifr;
973 struct device *dev;
974 int ret;
975
976 /*
977 * Fetch the caller's info block into kernel space
978 */
979
980 int err=verify_area(VERIFY_WRITE, arg, sizeof(struct ifreq));
981 if(err)
982 return err;
983
984 memcpy_fromfs(&ifr, arg, sizeof(struct ifreq));
985
986 /*
987 * See which interface the caller is talking about.
988 */
989
990 if ((dev = dev_get(ifr.ifr_name)) == NULL)
991 return(-ENODEV);
992
993 switch(getset)
994 {
995 case SIOCGIFFLAGS: /* Get interface flags */
996 ifr.ifr_flags = dev->flags;
997 goto rarok;
998
999 case SIOCSIFFLAGS: /* Set interface flags */
1000 {
1001 int old_flags = dev->flags;
1002
1003 /*
1004 * We are not allowed to potentially close/unload
1005 * a device until we get this lock.
1006 */
1007
1008 dev_lock_wait();
1009
1010 /*
1011 * Set the flags on our device.
1012 */
1013
1014 dev->flags = (ifr.ifr_flags & (
1015 IFF_BROADCAST | IFF_DEBUG | IFF_LOOPBACK |
1016 IFF_POINTOPOINT | IFF_NOTRAILERS | IFF_RUNNING |
1017 IFF_NOARP | IFF_PROMISC | IFF_ALLMULTI | IFF_SLAVE | IFF_MASTER
1018 | IFF_MULTICAST)) | (dev->flags & IFF_UP);
1019 /*
1020 * Load in the correct multicast list now the flags have changed.
1021 */
1022
1023 dev_mc_upload(dev);
1024
1025 /*
1026 * Have we downed the interface. We handle IFF_UP ourselves
1027 * according to user attempts to set it, rather than blindly
1028 * setting it.
1029 */
1030
1031 if ((old_flags^ifr.ifr_flags)&IFF_UP) /* Bit is different ? */
1032 {
1033 if(old_flags&IFF_UP) /* Gone down */
1034 ret=dev_close(dev);
1035 else /* Come up */
1036 {
1037 ret=dev_open(dev);
1038 if(ret<0)
1039 dev->flags&=~IFF_UP; /* Open failed */
1040 }
1041 }
1042 else
1043 ret=0;
1044 /*
1045 * Load in the correct multicast list now the flags have changed.
1046 */
1047
1048 dev_mc_upload(dev);
1049 }
1050 break;
1051
1052 case SIOCGIFADDR: /* Get interface address (and family) */
1053 (*(struct sockaddr_in *)
1054 &ifr.ifr_addr).sin_addr.s_addr = dev->pa_addr;
1055 (*(struct sockaddr_in *)
1056 &ifr.ifr_addr).sin_family = dev->family;
1057 (*(struct sockaddr_in *)
1058 &ifr.ifr_addr).sin_port = 0;
1059 goto rarok;
1060
1061 case SIOCSIFADDR: /* Set interface address (and family) */
1062 dev->pa_addr = (*(struct sockaddr_in *)
1063 &ifr.ifr_addr).sin_addr.s_addr;
1064 dev->family = ifr.ifr_addr.sa_family;
1065
1066 #ifdef CONFIG_INET
1067 /* This is naughty. When net-032e comes out It wants moving into the net032
1068 code not the kernel. Till then it can sit here (SIGH) */
1069 dev->pa_mask = ip_get_mask(dev->pa_addr);
1070 #endif
1071 dev->pa_brdaddr = dev->pa_addr | ~dev->pa_mask;
1072 ret = 0;
1073 break;
1074
1075 case SIOCGIFBRDADDR: /* Get the broadcast address */
1076 (*(struct sockaddr_in *)
1077 &ifr.ifr_broadaddr).sin_addr.s_addr = dev->pa_brdaddr;
1078 (*(struct sockaddr_in *)
1079 &ifr.ifr_broadaddr).sin_family = dev->family;
1080 (*(struct sockaddr_in *)
1081 &ifr.ifr_broadaddr).sin_port = 0;
1082 goto rarok;
1083
1084 case SIOCSIFBRDADDR: /* Set the broadcast address */
1085 dev->pa_brdaddr = (*(struct sockaddr_in *)
1086 &ifr.ifr_broadaddr).sin_addr.s_addr;
1087 ret = 0;
1088 break;
1089
1090 case SIOCGIFDSTADDR: /* Get the destination address (for point-to-point links) */
1091 (*(struct sockaddr_in *)
1092 &ifr.ifr_dstaddr).sin_addr.s_addr = dev->pa_dstaddr;
1093 (*(struct sockaddr_in *)
1094 &ifr.ifr_dstaddr).sin_family = dev->family;
1095 (*(struct sockaddr_in *)
1096 &ifr.ifr_dstaddr).sin_port = 0;
1097 goto rarok;
1098
1099 case SIOCSIFDSTADDR: /* Set the destination address (for point-to-point links) */
1100 dev->pa_dstaddr = (*(struct sockaddr_in *)
1101 &ifr.ifr_dstaddr).sin_addr.s_addr;
1102 ret = 0;
1103 break;
1104
1105 case SIOCGIFNETMASK: /* Get the netmask for the interface */
1106 (*(struct sockaddr_in *)
1107 &ifr.ifr_netmask).sin_addr.s_addr = dev->pa_mask;
1108 (*(struct sockaddr_in *)
1109 &ifr.ifr_netmask).sin_family = dev->family;
1110 (*(struct sockaddr_in *)
1111 &ifr.ifr_netmask).sin_port = 0;
1112 goto rarok;
1113
1114 case SIOCSIFNETMASK: /* Set the netmask for the interface */
1115 {
1116 unsigned long mask = (*(struct sockaddr_in *)
1117 &ifr.ifr_netmask).sin_addr.s_addr;
1118 ret = -EINVAL;
1119 /*
1120 * The mask we set must be legal.
1121 */
1122 if (bad_mask(mask,0))
1123 break;
1124 dev->pa_mask = mask;
1125 ret = 0;
1126 }
1127 break;
1128
1129 case SIOCGIFMETRIC: /* Get the metric on the interface (currently unused) */
1130
1131 ifr.ifr_metric = dev->metric;
1132 goto rarok;
1133
1134 case SIOCSIFMETRIC: /* Set the metric on the interface (currently unused) */
1135 dev->metric = ifr.ifr_metric;
1136 ret=0;
1137 break;
1138
1139 case SIOCGIFMTU: /* Get the MTU of a device */
1140 ifr.ifr_mtu = dev->mtu;
1141 goto rarok;
1142
1143 case SIOCSIFMTU: /* Set the MTU of a device */
1144
1145 /*
1146 * MTU must be positive.
1147 */
1148
1149 if(ifr.ifr_mtu<68)
1150 return -EINVAL;
1151 dev->mtu = ifr.ifr_mtu;
1152 ret = 0;
1153 break;
1154
1155 case SIOCGIFMEM: /* Get the per device memory space. We can add this but currently
1156 do not support it */
1157 ret = -EINVAL;
1158 break;
1159
1160 case SIOCSIFMEM: /* Set the per device memory buffer space. Not applicable in our case */
1161 ret = -EINVAL;
1162 break;
1163
1164 case OLD_SIOCGIFHWADDR: /* Get the hardware address. This will change and SIFHWADDR will be added */
1165 memcpy(ifr.old_ifr_hwaddr,dev->dev_addr, MAX_ADDR_LEN);
1166 goto rarok;
1167
1168 case SIOCGIFHWADDR:
1169 memcpy(ifr.ifr_hwaddr.sa_data,dev->dev_addr, MAX_ADDR_LEN);
1170 ifr.ifr_hwaddr.sa_family=dev->type;
1171 goto rarok;
1172
1173 case SIOCSIFHWADDR:
1174 if(dev->set_mac_address==NULL)
1175 return -EOPNOTSUPP;
1176 if(ifr.ifr_hwaddr.sa_family!=dev->type)
1177 return -EINVAL;
1178 ret=dev->set_mac_address(dev,ifr.ifr_hwaddr.sa_data);
1179 break;
1180
1181 case SIOCGIFMAP:
1182 ifr.ifr_map.mem_start=dev->mem_start;
1183 ifr.ifr_map.mem_end=dev->mem_end;
1184 ifr.ifr_map.base_addr=dev->base_addr;
1185 ifr.ifr_map.irq=dev->irq;
1186 ifr.ifr_map.dma=dev->dma;
1187 ifr.ifr_map.port=dev->if_port;
1188 goto rarok;
1189
1190 case SIOCSIFMAP:
1191 if(dev->set_config==NULL)
1192 return -EOPNOTSUPP;
1193 return dev->set_config(dev,&ifr.ifr_map);
1194
1195 case SIOCADDMULTI:
1196 if(dev->set_multicast_list==NULL)
1197 return -EINVAL;
1198 if(ifr.ifr_hwaddr.sa_family!=AF_UNSPEC)
1199 return -EINVAL;
1200 dev_mc_add(dev,ifr.ifr_hwaddr.sa_data, dev->addr_len, 1);
1201 return 0;
1202
1203 case SIOCDELMULTI:
1204 if(dev->set_multicast_list==NULL)
1205 return -EINVAL;
1206 if(ifr.ifr_hwaddr.sa_family!=AF_UNSPEC)
1207 return -EINVAL;
1208 dev_mc_delete(dev,ifr.ifr_hwaddr.sa_data,dev->addr_len, 1);
1209 return 0;
1210 /*
1211 * Unknown or private ioctl
1212 */
1213
1214 default:
1215 if((getset >= SIOCDEVPRIVATE) &&
1216 (getset <= (SIOCDEVPRIVATE + 15))) {
1217 if(dev->do_ioctl==NULL)
1218 return -EOPNOTSUPP;
1219 ret=dev->do_ioctl(dev, &ifr, getset);
1220 memcpy_tofs(arg,&ifr,sizeof(struct ifreq));
1221 break;
1222 }
1223
1224 ret = -EINVAL;
1225 }
1226 return(ret);
1227 /*
1228 * The load of calls that return an ifreq and ok (saves memory).
1229 */
1230 rarok:
1231 memcpy_tofs(arg, &ifr, sizeof(struct ifreq));
1232 return 0;
1233 }
1234
1235
1236 /*
1237 * This function handles all "interface"-type I/O control requests. The actual
1238 * 'doing' part of this is dev_ifsioc above.
1239 */
1240
1241 int dev_ioctl(unsigned int cmd, void *arg)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
1242 {
1243 switch(cmd)
1244 {
1245 case SIOCGIFCONF:
1246 (void) dev_ifconf((char *) arg);
1247 return 0;
1248
1249 /*
1250 * Ioctl calls that can be done by all.
1251 */
1252
1253 case SIOCGIFFLAGS:
1254 case SIOCGIFADDR:
1255 case SIOCGIFDSTADDR:
1256 case SIOCGIFBRDADDR:
1257 case SIOCGIFNETMASK:
1258 case SIOCGIFMETRIC:
1259 case SIOCGIFMTU:
1260 case SIOCGIFMEM:
1261 case SIOCGIFHWADDR:
1262 case SIOCSIFHWADDR:
1263 case OLD_SIOCGIFHWADDR:
1264 case SIOCGIFSLAVE:
1265 case SIOCGIFMAP:
1266 return dev_ifsioc(arg, cmd);
1267
1268 /*
1269 * Ioctl calls requiring the power of a superuser
1270 */
1271
1272 case SIOCSIFFLAGS:
1273 case SIOCSIFADDR:
1274 case SIOCSIFDSTADDR:
1275 case SIOCSIFBRDADDR:
1276 case SIOCSIFNETMASK:
1277 case SIOCSIFMETRIC:
1278 case SIOCSIFMTU:
1279 case SIOCSIFMEM:
1280 case SIOCSIFMAP:
1281 case SIOCSIFSLAVE:
1282 case SIOCADDMULTI:
1283 case SIOCDELMULTI:
1284 if (!suser())
1285 return -EPERM;
1286 return dev_ifsioc(arg, cmd);
1287
1288 case SIOCSIFLINK:
1289 return -EINVAL;
1290
1291 /*
1292 * Unknown or private ioctl.
1293 */
1294
1295 default:
1296 if((cmd >= SIOCDEVPRIVATE) &&
1297 (cmd <= (SIOCDEVPRIVATE + 15))) {
1298 return dev_ifsioc(arg, cmd);
1299 }
1300 return -EINVAL;
1301 }
1302 }
1303
1304
1305 /*
1306 * Initialize the DEV module. At boot time this walks the device list and
1307 * unhooks any devices that fail to initialise (normally hardware not
1308 * present) and leaves us with a valid list of present and active devices.
1309 *
1310 */
1311 extern int lance_init(void);
1312 extern int pi_init(void);
1313 extern int dec21040_init(void);
1314
1315 int net_dev_init(void)
/* ![[previous]](../icons/left.png)
![[next]](../icons/n_right.png)
![[first]](../icons/first.png)
![[last]](../icons/n_last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
1316 {
1317 struct device *dev, **dp;
1318
1319 /*
1320 * This is VeryUgly(tm).
1321 *
1322 * Some devices want to be initialized eary..
1323 */
1324 #if defined(CONFIG_LANCE)
1325 lance_init();
1326 #endif
1327 #if defined(CONFIG_PI)
1328 pi_init();
1329 #endif
1330 #if defined(CONFIG_DEC_ELCP)
1331 dec21040_init();
1332 #endif
1333
1334 /*
1335 * Add the devices.
1336 * If the call to dev->init fails, the dev is removed
1337 * from the chain disconnecting the device until the
1338 * next reboot.
1339 */
1340
1341 dp = &dev_base;
1342 while ((dev = *dp) != NULL)
1343 {
1344 int i;
1345 for (i = 0; i < DEV_NUMBUFFS; i++) {
1346 skb_queue_head_init(dev->buffs + i);
1347 }
1348
1349 if (dev->init && dev->init(dev))
1350 {
1351 /*
1352 * It failed to come up. Unhook it.
1353 */
1354 *dp = dev->next;
1355 }
1356 else
1357 {
1358 dp = &dev->next;
1359 }
1360 }
1361
1362 proc_net_register(&(struct proc_dir_entry) {
1363 PROC_NET_DEV, 3, "dev",
1364 S_IFREG | S_IRUGO, 1, 0, 0,
1365 0, &proc_net_inode_operations,
1366 dev_get_info
1367 });
1368
1369 bh_base[NET_BH].routine = net_bh;
1370 enable_bh(NET_BH);
1371 return 0;
1372 }
1373