This source file includes following definitions.
- de4x5_probe
- de4x5_hw_init
- de4x5_open
- de4x5_init
- de4x5_queue_pkt
- de4x5_interrupt
- de4x5_rx
- de4x5_tx
- de4x5_close
- de4x5_get_stats
- load_packet
- set_multicast_list
- SetMulticastFilter
- eisa_probe
- pci_probe
- alloc_device
- autoconf_media
- create_packet
- EISA_signature
- DevicePresent
- aprom_crc
- de4x5_ioctl
- asc2hex
- init_module
- cleanup_module
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97 static char *version = "de4x5.c:v0.21 1/19/95 davies@wanton.lkg.dec.com\n";
98
99 #include <stdarg.h>
100 #include <linux/config.h>
101 #include <linux/kernel.h>
102 #include <linux/sched.h>
103 #include <linux/string.h>
104 #include <linux/ptrace.h>
105 #include <linux/errno.h>
106 #include <linux/ioport.h>
107 #include <linux/malloc.h>
108 #include <linux/interrupt.h>
109 #include <linux/pci.h>
110 #include <asm/bitops.h>
111 #include <asm/io.h>
112 #include <asm/dma.h>
113 #include <asm/segment.h>
114
115 #include <linux/netdevice.h>
116 #include <linux/etherdevice.h>
117 #include <linux/skbuff.h>
118
119 #include <linux/time.h>
120 #include <linux/types.h>
121 #include <linux/unistd.h>
122
123 #ifdef MODULE
124 #include <linux/module.h>
125 #include <linux/version.h>
126 #endif
127
128 #include "de4x5.h"
129
130 #ifdef DE4X5_DEBUG
131 static int de4x5_debug = DE4X5_DEBUG;
132 #else
133 static int de4x5_debug = 1;
134 #endif
135
136 #ifndef PROBE_LENGTH
137 #define PROBE_LENGTH 32
138 #endif
139
140 #define ETH_PROM_SIG "FF0055AAFF0055AA"
141
142 #define DE4X5_SIGNATURE {"DE425",""}
143 #define DE4X5_NAME_LENGTH 8
144
145 #define DE4X5_EISA_IO_PORTS 0x0c00
146
147 #define MAX_EISA_SLOTS 16
148 #define EISA_SLOT_INC 0x1000
149 #define DE4X5_EISA_SEARCH 0x00000001
150 static u_long eisa_slots_full =
151 DE4X5_EISA_SEARCH;
152
153 #define PCI_MAX_BUS_NUM 8
154 static u_long pci_slots_full[PCI_MAX_BUS_NUM];
155
156
157 #define CRC_POLYNOMIAL_BE 0x04c11db7UL
158 #define CRC_POLYNOMIAL_LE 0xedb88320UL
159
160 #define LWPAD ((long)(sizeof(long) - 1))
161
162
163
164
165 static u_long irq_mask = IMR_RIM | IMR_TIM | IMR_TUM ;
166
167 static u_long irq_en = IMR_NIM | IMR_AIM;
168
169 #define ENABLE_IRQs \
170 imr |= irq_en;\
171 outl(imr, DE4X5_IMR)
172
173 #define DISABLE_IRQs \
174 imr = inl(DE4X5_IMR);\
175 imr &= ~irq_en;\
176 outl(imr, DE4X5_IMR)
177
178 #define UNMASK_IRQs \
179 imr |= irq_mask;\
180 outl(imr, DE4X5_IMR)
181
182 #define MASK_IRQs \
183 imr = inl(DE4X5_IMR);\
184 imr &= ~irq_mask;\
185 outl(imr, DE4X5_IMR)
186
187
188
189
190 #define START_DE4X5 \
191 omr = inl(DE4X5_OMR);\
192 omr |= OMR_ST | OMR_SR;\
193 outl(omr, DE4X5_OMR)
194
195 #define STOP_DE4X5 \
196 omr = inl(DE4X5_OMR);\
197 omr &= ~(OMR_ST|OMR_SR);\
198 outl(omr, DE4X5_OMR)
199
200
201
202
203 #define RESET_SIA \
204 outl(SICR_RESET, DE4X5_SICR); \
205 outl(STRR_RESET, DE4X5_STRR); \
206 outl(SIGR_RESET, DE4X5_SIGR)
207
208
209
210
211 #define PKT_BUF_SZ 1544
212 #define MAX_PKT_SZ 1514
213 #define MAX_DAT_SZ 1500
214 #define MIN_DAT_SZ 1
215 #define PKT_HDR_LEN 14
216
217
218
219
220
221
222
223 #define NUM_RX_DESC 64
224 #define NUM_TX_DESC 8
225 #define BUFF_ALLOC_RETRIES 10
226 #define RX_BUFF_SZ 256
227
228
229 struct de4x5_desc {
230 volatile long status;
231 u_long des1;
232 char *buf;
233 char *next;
234 };
235
236
237
238
239 #define DE4X5_PKT_STAT_SZ 16
240 #define DE4X5_PKT_BIN_SZ 128
241
242
243 struct de4x5_private {
244 struct de4x5_desc rx_ring[NUM_RX_DESC];
245 struct de4x5_desc tx_ring[NUM_TX_DESC];
246 struct sk_buff *skb[NUM_TX_DESC];
247 int rx_new, rx_old;
248 int tx_new, tx_old;
249 char setup_frame[SETUP_FRAME_LEN];
250 struct enet_statistics stats;
251 struct {
252 unsigned long bins[DE4X5_PKT_STAT_SZ];
253 unsigned long unicast;
254 unsigned long multicast;
255 unsigned long broadcast;
256 unsigned long excessive_collisions;
257 unsigned long tx_underruns;
258 unsigned long excessive_underruns;
259 } pktStats;
260 char rxRingSize;
261 char txRingSize;
262 char bus;
263 char lostMedia;
264 };
265
266 #define TX_BUFFS_AVAIL ((lp->tx_old<=lp->tx_new)?\
267 lp->tx_old+lp->txRingSize-lp->tx_new-1:\
268 lp->tx_old -lp->tx_new-1)
269 #define TX_SUSPENDED (((sts & STS_TS) ^ TS_SUSP)==0)
270
271
272
273
274 static int de4x5_open(struct device *dev);
275 static int de4x5_queue_pkt(struct sk_buff *skb, struct device *dev);
276 static void de4x5_interrupt(int reg_ptr);
277 static int de4x5_close(struct device *dev);
278 static struct enet_statistics *de4x5_get_stats(struct device *dev);
279 static void set_multicast_list(struct device *dev, int num_addrs, void *addrs);
280 static int de4x5_ioctl(struct device *dev, struct ifreq *rq, int cmd);
281
282
283
284
285 static int de4x5_hw_init(struct device *dev, short iobase);
286 static int de4x5_init(struct device *dev);
287 static int de4x5_rx(struct device *dev);
288 static int de4x5_tx(struct device *dev);
289
290 static int autoconf_media(struct device *dev);
291 static void create_packet(struct device *dev, char *frame, int len);
292 static void load_packet(struct device *dev, char *buf, u_long flags, struct sk_buff *skb);
293 static void EISA_signature(char * name, short iobase);
294 static int DevicePresent(short iobase);
295 static void SetMulticastFilter(struct device *dev, int num_addrs, char *addrs, char *multicast_table);
296
297 static int aprom_crc (struct device *dev);
298
299 static void eisa_probe(struct device *dev, short iobase);
300 static void pci_probe(struct device *dev, short iobase);
301 static struct device *alloc_device(struct device *dev, int iobase);
302
303 #ifdef MODULE
304 int init_module(void);
305 void cleanup_module(void);
306 # else
307 static unsigned char de4x5_irq[] = {5,9,10,11};
308 #endif
309
310 static int num_de4x5s = 0, num_eth = 0, autoprobed = 0;
311
312
313
314
315
316
317 static struct bus_type {
318 int bus;
319 int device;
320 } bus;
321
322
323
324
325 #define RESET_DE4X5 {\
326 long i;\
327 i=inl(DE4X5_BMR);\
328 outl(i | BMR_SWR, DE4X5_BMR);\
329 outl(i, DE4X5_BMR);\
330 for (i=0;i<5;i++) inl(DE4X5_BMR);\
331 }
332
333
334
335
336 int de4x5_probe(struct device *dev)
337 {
338 int tmp = num_de4x5s, iobase = dev->base_addr;
339 int status = -ENODEV;
340
341 if ((iobase > 0) && (iobase <0x100)) {
342 status = -ENXIO;
343
344 #ifdef MODULE
345 } else if (iobase == 0){
346 printk("Autoprobing is not supported when loading a module based driver.\n");
347 status = -EIO;
348 #endif
349 } else {
350
351 eisa_probe(dev, iobase);
352 pci_probe(dev, iobase);
353
354 if ((tmp == num_de4x5s) && (iobase != 0)) {
355 printk("%s: de4x5_probe() cannot find device at 0x%04x.\n", dev->name,
356 iobase);
357 }
358
359
360
361
362
363 for (; dev->priv == NULL && dev->next != NULL; dev = dev->next);
364
365 if (dev->priv) status = 0;
366 if (iobase == 0) autoprobed = 1;
367 }
368
369 return status;
370 }
371
372 static int
373 de4x5_hw_init(struct device *dev, short iobase)
374 {
375 struct bus_type *lp = &bus;
376 int tmpbus, i, j, status=0;
377 char *tmp, name[DE4X5_NAME_LENGTH + 1];
378 u_long nicsr;
379
380
381
382
383 RESET_DE4X5;
384
385 if (((nicsr=inl(DE4X5_STS)) & (STS_TS | STS_RS)) == 0) {
386
387
388
389
390 if (lp->bus == PCI) {
391 strcpy(name,"DE435");
392 } else {
393 EISA_signature(name, EISA_ID0);
394 }
395
396 if (*name != '\0') {
397 dev->base_addr = iobase;
398
399 if (lp->bus == EISA) {
400 printk("%s: %s at %#3x (EISA slot %d)",
401 dev->name, name, (u_short)iobase, (((u_short)iobase>>12)&0x0f));
402 } else {
403 printk("%s: %s at %#3x (PCI device %d)", dev->name, name, (u_short)iobase,lp->device);
404 }
405
406 printk(", h/w address ");
407 status = aprom_crc(dev);
408 for (i = 0; i < ETH_ALEN - 1; i++) {
409 printk("%2.2x:", dev->dev_addr[i]);
410 }
411 printk("%2.2x,\n", dev->dev_addr[i]);
412
413 tmpbus = lp->bus;
414
415 if (status == 0) {
416 struct de4x5_private *lp;
417
418
419
420
421
422 dev->priv = (void *) kmalloc(sizeof(struct de4x5_private) + LWPAD,
423 GFP_KERNEL);
424
425
426
427 dev->priv = (void *)(((u_long)dev->priv + LWPAD) & ~LWPAD);
428 lp = (struct de4x5_private *)dev->priv;
429 memset(dev->priv, 0, sizeof(struct de4x5_private));
430 lp->bus = tmpbus;
431
432
433
434
435
436
437 for (tmp=NULL, j=0; j<BUFF_ALLOC_RETRIES && tmp==NULL; j++) {
438 if ((tmp = (void *)kmalloc(RX_BUFF_SZ * NUM_RX_DESC + LWPAD,
439 GFP_KERNEL)) != NULL) {
440 tmp = (void *)(((u_long) tmp + LWPAD) & ~LWPAD);
441 for (i=0; i<NUM_RX_DESC; i++) {
442 lp->rx_ring[i].status = 0;
443 lp->rx_ring[i].des1 = RX_BUFF_SZ;
444 lp->rx_ring[i].buf = tmp + i * RX_BUFF_SZ;
445 lp->rx_ring[i].next = NULL;
446 }
447 }
448 }
449
450 if (tmp != NULL) {
451 lp->rxRingSize = NUM_RX_DESC;
452 lp->txRingSize = NUM_TX_DESC;
453
454
455 lp->rx_ring[lp->rxRingSize - 1].des1 |= RD_RER;
456 lp->tx_ring[lp->txRingSize - 1].des1 |= TD_TER;
457
458
459 outl((u_long)lp->rx_ring, DE4X5_RRBA);
460 outl((u_long)lp->tx_ring, DE4X5_TRBA);
461
462 if (dev->irq < 2) {
463 #ifndef MODULE
464 unsigned char irqnum;
465 u_long omr;
466 autoirq_setup(0);
467
468 omr = inl(DE4X5_OMR);
469 outl(IMR_AIM|IMR_RUM, DE4X5_IMR);
470 outl(OMR_SR | omr, DE4X5_OMR);
471
472 irqnum = autoirq_report(1);
473 if (!irqnum) {
474 printk(" and failed to detect IRQ line.\n");
475 status = -ENXIO;
476 } else {
477 for (dev->irq=0,i=0; i<sizeof(de4x5_irq) && !dev->irq; i++) {
478 if (irqnum == de4x5_irq[i]) {
479 dev->irq = irqnum;
480 printk(" and uses IRQ%d.\n", dev->irq);
481 }
482 }
483
484 if (!dev->irq) {
485 printk(" but incorrect IRQ line detected.\n");
486 status = -ENXIO;
487 }
488 }
489
490 outl(0, DE4X5_IMR);
491 #endif
492 } else {
493 printk(" and requires IRQ%d (not probed).\n", dev->irq);
494 }
495 } else {
496 printk("%s: Kernel could not allocate RX buffer memory.\n",
497 dev->name);
498 status = -ENXIO;
499 }
500 } else {
501 printk(" which has an Ethernet PROM CRC error.\n");
502 status = -ENXIO;
503 }
504 } else {
505 status = -ENXIO;
506 }
507 } else {
508 status = -ENXIO;
509 }
510
511 if (!status) {
512 if (de4x5_debug > 0) {
513 printk(version);
514 }
515
516
517 dev->open = &de4x5_open;
518 dev->hard_start_xmit = &de4x5_queue_pkt;
519 dev->stop = &de4x5_close;
520 dev->get_stats = &de4x5_get_stats;
521 #ifdef HAVE_MULTICAST
522 dev->set_multicast_list = &set_multicast_list;
523 #endif
524 dev->do_ioctl = &de4x5_ioctl;
525
526 dev->mem_start = 0;
527
528
529 ether_setup(dev);
530 } else {
531 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
532 if (lp) {
533 kfree_s(lp->rx_ring, RX_BUFF_SZ * NUM_RX_DESC + LWPAD);
534 }
535 if (dev->priv) {
536 kfree_s(dev->priv, sizeof(struct de4x5_private) + LWPAD);
537 dev->priv = NULL;
538 }
539 }
540
541 return status;
542 }
543
544
545 static int
546 de4x5_open(struct device *dev)
547 {
548 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
549 short iobase = dev->base_addr;
550 int i, status = 0;
551 u_long imr, omr, sts;
552
553
554
555
556 STOP_DE4X5;
557
558 if (request_irq(dev->irq, (void *)de4x5_interrupt, 0, "de4x5")) {
559 printk("de4x5_open(): Requested IRQ%d is busy\n",dev->irq);
560 status = -EAGAIN;
561 } else {
562
563 irq2dev_map[dev->irq] = dev;
564
565
566
567 status = de4x5_init(dev);
568
569 if (de4x5_debug > 1){
570 printk("%s: de4x5 open with irq %d\n",dev->name,dev->irq);
571 printk("\tphysical address: ");
572 for (i=0;i<6;i++){
573 printk("%2.2x:",(short)dev->dev_addr[i]);
574 }
575 printk("\n");
576 printk("\tchecked memory: 0x%08lx\n",eisa_slots_full);
577 printk("Descriptor head addresses:\n");
578 printk("\t0x%8.8lx 0x%8.8lx\n",(long)lp->rx_ring,(long)lp->tx_ring);
579 printk("Descriptor addresses:\nRX: ");
580 for (i=0;i<lp->rxRingSize-1;i++){
581 if (i < 3) {
582 printk("0x%8.8lx ",(long)&lp->rx_ring[i].status);
583 }
584 }
585 printk("...0x%8.8lx\n",(long)&lp->rx_ring[i].status);
586 printk("TX: ");
587 for (i=0;i<lp->txRingSize-1;i++){
588 if (i < 3) {
589 printk("0x%8.8lx ", (long)&lp->tx_ring[i].status);
590 }
591 }
592 printk("...0x%8.8lx\n", (long)&lp->tx_ring[i].status);
593 printk("Descriptor buffers:\nRX: ");
594 for (i=0;i<lp->rxRingSize-1;i++){
595 if (i < 3) {
596 printk("0x%8.8lx ",(long)lp->rx_ring[i].buf);
597 }
598 }
599 printk("...0x%8.8lx\n",(long)lp->rx_ring[i].buf);
600 printk("TX: ");
601 for (i=0;i<lp->txRingSize-1;i++){
602 if (i < 3) {
603 printk("0x%8.8lx ", (long)lp->tx_ring[i].buf);
604 }
605 }
606 printk("...0x%8.8lx\n", (long)lp->tx_ring[i].buf);
607 printk("Ring size: \nRX: %d\nTX: %d\n",
608 (short)lp->rxRingSize,
609 (short)lp->txRingSize);
610 printk("\tstatus: %d\n", status);
611 }
612
613 if (!status) {
614 dev->tbusy = 0;
615 dev->start = 1;
616 dev->interrupt = UNMASK_INTERRUPTS;
617
618
619
620
621 sts = inl(DE4X5_STS);
622 outl(sts, DE4X5_STS);
623
624
625
626
627 imr = 0;
628 UNMASK_IRQs;
629 ENABLE_IRQs;
630
631 START_DE4X5;
632 }
633 if (de4x5_debug > 1) {
634 printk("\tsts: 0x%08x\n", inl(DE4X5_STS));
635 printk("\tbmr: 0x%08x\n", inl(DE4X5_BMR));
636 printk("\timr: 0x%08x\n", inl(DE4X5_IMR));
637 printk("\tomr: 0x%08x\n", inl(DE4X5_OMR));
638 printk("\tsisr: 0x%08x\n", inl(DE4X5_SISR));
639 printk("\tsicr: 0x%08x\n", inl(DE4X5_SICR));
640 printk("\tstrr: 0x%08x\n", inl(DE4X5_STRR));
641 printk("\tsigr: 0x%08x\n", inl(DE4X5_SIGR));
642 }
643 }
644
645 #ifdef MODULE
646 MOD_INC_USE_COUNT;
647 #endif
648
649 return status;
650 }
651
652
653
654
655 static int
656 de4x5_init(struct device *dev)
657 {
658 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
659 short iobase = dev->base_addr;
660 int offset, status = 0;
661 u_long i, j, bmr, omr;
662 char *pa;
663
664
665 RESET_DE4X5;
666
667
668 bmr = inl(DE4X5_BMR);
669 bmr |= TAP_1_6MS | CAL_16LONG;
670 outl(bmr, DE4X5_BMR);
671
672
673 omr = OMR_HP;
674 offset = IMPERF_PA_OFFSET;
675
676
677 set_bit(0, (void *)&dev->tbusy);
678
679
680 outl((u_long)lp->rx_ring, DE4X5_RRBA);
681 outl((u_long)lp->tx_ring, DE4X5_TRBA);
682
683
684 lp->rx_new = lp->rx_old = 0;
685 lp->tx_new = lp->tx_old = 0;
686
687
688 for (i = 0; i < lp->rxRingSize; i++) {
689 lp->rx_ring[i].status = R_OWN;
690 }
691
692
693 for (i = 0; i < lp->txRingSize; i++) {
694 lp->tx_ring[i].status = 0;
695 }
696
697
698 memset(lp->setup_frame, 0, SETUP_FRAME_LEN);
699
700
701 for (pa=lp->setup_frame+offset, j=0; j<ETH_ALEN; j++) {
702 *(pa + j) = dev->dev_addr[j];
703 if (j & 0x01) pa += 2;
704 }
705
706
707 set_multicast_list(dev, 0, NULL);
708
709
710 load_packet(dev, lp->setup_frame, HASH_F | TD_SET | SETUP_FRAME_LEN, NULL);
711
712
713 outl(omr|OMR_ST, DE4X5_OMR);
714
715
716 for (j=0, i=0;i<100 && j==0;i++) {
717 if (lp->tx_ring[lp->tx_new].status >= 0) j=1;
718 }
719 outl(omr, DE4X5_OMR);
720
721 if (i == 100) {
722 printk("%s: Setup frame timed out, status %08x\n", dev->name,
723 inl(DE4X5_STS));
724 status = -EIO;
725 }
726
727
728 lp->tx_new = (++lp->tx_new) % lp->txRingSize;
729 lp->tx_old = lp->tx_new;
730
731
732 if (autoconf_media(dev) == 0) {
733 status = -EIO;
734 }
735
736 return 0;
737 }
738
739
740
741
742 static int
743 de4x5_queue_pkt(struct sk_buff *skb, struct device *dev)
744 {
745 volatile struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
746 int iobase = dev->base_addr;
747 int status = 0;
748 u_long imr, omr, sts;
749
750 sts = inl(DE4X5_STS);
751
752
753
754
755
756
757 if (dev->tbusy || (lp->lostMedia > 3)) {
758 int tickssofar = jiffies - dev->trans_start;
759 if (tickssofar < 10 && !lp->lostMedia) {
760
761 if ((TX_BUFFS_AVAIL > 0) && dev->tbusy) {
762 dev->tbusy = 0;
763 }
764 status = -1;
765 } else {
766 printk("%s: transmit timed out, status %08x, tbusy:%d, lostMedia:%d tickssofar:%d, resetting.\n",dev->name, inl(DE4X5_STS), dev->tbusy, lp->lostMedia, tickssofar);
767
768
769 STOP_DE4X5;
770 status = de4x5_init(dev);
771
772
773 if (!status) {
774
775 dev->trans_start = jiffies;
776 START_DE4X5;
777
778
779 sts = inl(DE4X5_STS);
780 outl(sts, DE4X5_STS);
781
782
783 imr = 0;
784 UNMASK_IRQs;
785
786 dev->interrupt = UNMASK_INTERRUPTS;
787 dev->start = 1;
788 dev->tbusy = 0;
789
790 ENABLE_IRQs;
791 } else {
792 printk("%s: hardware initialisation failure, status %08x.\n",
793 dev->name, inl(DE4X5_STS));
794 }
795 }
796 } else if (skb == NULL) {
797 dev_tint(dev);
798 } else if (skb->len > 0) {
799
800
801
802
803
804 if (set_bit(0, (void*)&dev->tbusy) != 0)
805 printk("%s: Transmitter access conflict.\n", dev->name);
806
807 if (TX_BUFFS_AVAIL > 0) {
808 if (((u_long)skb->data & ~0x03) != (u_long)skb->data) {
809 printk("%s: TX skb buffer alignment prob..\n", dev->name);
810 }
811
812 load_packet(dev, skb->data, TD_IC | TD_LS | TD_FS | skb->len, skb);
813 outl(POLL_DEMAND, DE4X5_TPD);
814
815 lp->tx_new = (++lp->tx_new) % lp->txRingSize;
816
817 dev->trans_start = jiffies;
818 }
819
820 if (TX_BUFFS_AVAIL > 0) {
821 dev->tbusy = 0;
822 }
823 }
824
825 return status;
826 }
827
828
829
830
831 static void
832 de4x5_interrupt(int reg_ptr)
833 {
834 int irq = -(((struct pt_regs *)reg_ptr)->orig_eax+2);
835 struct device *dev = (struct device *)(irq2dev_map[irq]);
836 struct de4x5_private *lp;
837 int iobase;
838 u_long imr, sts;
839
840 if (dev == NULL) {
841 printk ("de4x5_interrupt(): irq %d for unknown device.\n", irq);
842 } else {
843 lp = (struct de4x5_private *)dev->priv;
844 iobase = dev->base_addr;
845
846 if (dev->interrupt)
847 printk("%s: Re-entering the interrupt handler.\n", dev->name);
848
849 dev->interrupt = MASK_INTERRUPTS;
850
851
852
853
854
855
856
857 sts = inl(DE4X5_STS);
858 MASK_IRQs;
859
860
861
862
863 outl(sts, DE4X5_STS);
864
865 if (sts & STS_RI)
866 de4x5_rx(dev);
867
868 if (sts & STS_TI)
869 de4x5_tx(dev);
870
871 if ((TX_BUFFS_AVAIL > 0) && dev->tbusy) {
872 dev->tbusy = 0;
873 mark_bh(NET_BH);
874 }
875
876 dev->interrupt = UNMASK_INTERRUPTS;
877
878 UNMASK_IRQs;
879 }
880
881 return;
882 }
883
884 static int
885 de4x5_rx(struct device *dev)
886 {
887 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
888 int i, entry;
889 volatile long status;
890 char *buf;
891
892
893 for (entry = lp->rx_new; lp->rx_ring[entry].status >= 0;entry = lp->rx_new) {
894 status = lp->rx_ring[entry].status;
895
896 if (status & RD_FS) {
897 lp->rx_old = entry;
898 }
899
900 if (status & RD_LS) {
901 if (status & RD_ES) {
902 lp->stats.rx_errors++;
903 if (status & (RD_RF | RD_TL)) lp->stats.rx_frame_errors++;
904 if (status & RD_CE) lp->stats.rx_crc_errors++;
905 if (status & RD_OF) lp->stats.rx_fifo_errors++;
906 } else {
907 struct sk_buff *skb;
908 short pkt_len = (short)(lp->rx_ring[entry].status >> 16);
909
910 if ((skb = alloc_skb(pkt_len, GFP_ATOMIC)) != NULL) {
911 skb->len = pkt_len;
912 skb->dev = dev;
913
914 if (entry < lp->rx_old) {
915 short len = (lp->rxRingSize - lp->rx_old) * RX_BUFF_SZ;
916 memcpy(skb->data, lp->rx_ring[lp->rx_old].buf, len);
917 memcpy(skb->data + len, lp->rx_ring[0].buf, pkt_len - len);
918 } else {
919 memcpy(skb->data, lp->rx_ring[lp->rx_old].buf, pkt_len);
920 }
921
922
923
924
925
926 netif_rx(skb);
927
928
929
930
931 lp->stats.rx_packets++;
932 for (i=1; i<DE4X5_PKT_STAT_SZ-1; i++) {
933 if (pkt_len < i*DE4X5_PKT_BIN_SZ) {
934 lp->pktStats.bins[i]++;
935 i = DE4X5_PKT_STAT_SZ;
936 }
937 }
938 buf = skb->data;
939 if (buf[0] & 0x01) {
940 if ((*(long *)&buf[0] == -1) && (*(short *)&buf[4] == -1)) {
941 lp->pktStats.broadcast++;
942 } else {
943 lp->pktStats.multicast++;
944 }
945 } else if ((*(long *)&buf[0] == *(long *)&dev->dev_addr[0]) &&
946 (*(short *)&buf[4] == *(short *)&dev->dev_addr[4])) {
947 lp->pktStats.unicast++;
948 }
949
950 lp->pktStats.bins[0]++;
951 if (lp->pktStats.bins[0] == 0) {
952 memset((char *)&lp->pktStats, 0, sizeof(lp->pktStats));
953 }
954 } else {
955 printk("%s: Insufficient memory; nuking packet.\n", dev->name);
956 lp->stats.rx_dropped++;
957 break;
958 }
959 }
960
961
962 for (; lp->rx_old!=entry; lp->rx_old=(++lp->rx_old)%lp->rxRingSize) {
963 lp->rx_ring[lp->rx_old].status = R_OWN;
964 }
965 lp->rx_ring[entry].status = R_OWN;
966 }
967
968
969
970
971 lp->rx_new = (++lp->rx_new) % lp->rxRingSize;
972 }
973
974 return 0;
975 }
976
977
978
979
980 static int
981 de4x5_tx(struct device *dev)
982 {
983 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
984 int entry, iobase = dev->base_addr;
985 volatile long status;
986
987 for (entry = lp->tx_old; entry != lp->tx_new; entry = lp->tx_old) {
988 status = lp->tx_ring[entry].status;
989 if (status < 0) {
990 break;
991 } else if (status & TD_ES) {
992 lp->stats.tx_errors++;
993 if (status & TD_NC) lp->stats.tx_carrier_errors++;
994 if (status & TD_LC) lp->stats.tx_window_errors++;
995 if (status & TD_UF) lp->stats.tx_fifo_errors++;
996 if (status & TD_LC) lp->stats.collisions++;
997 if (status & TD_EC) lp->pktStats.excessive_collisions++;
998 if (status & TD_DE) lp->stats.tx_aborted_errors++;
999
1000 if (status & (TD_LO | TD_NC | TD_EC | TD_LF)) {
1001 lp->lostMedia++;
1002 } else {
1003 outl(POLL_DEMAND, DE4X5_TPD);
1004 }
1005 } else {
1006 lp->stats.tx_packets++;
1007 lp->lostMedia = 0;
1008 }
1009
1010 if (lp->skb[entry] != NULL) {
1011 dev_kfree_skb(lp->skb[entry], FREE_WRITE);
1012 }
1013
1014
1015 lp->tx_old = (++lp->tx_old) % lp->txRingSize;
1016 }
1017
1018 return 0;
1019 }
1020
1021 static int
1022 de4x5_close(struct device *dev)
1023 {
1024 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1025 int iobase = dev->base_addr;
1026 u_long imr, omr;
1027
1028 dev->start = 0;
1029 dev->tbusy = 1;
1030
1031 if (de4x5_debug > 1) {
1032 printk("%s: Shutting down ethercard, status was %8.8x.\n",
1033 dev->name, inl(DE4X5_STS));
1034 }
1035
1036
1037
1038
1039 DISABLE_IRQs;
1040
1041 STOP_DE4X5;
1042
1043
1044
1045
1046 free_irq(dev->irq);
1047 irq2dev_map[dev->irq] = 0;
1048
1049 #ifdef MODULE
1050 MOD_DEC_USE_COUNT;
1051 #endif
1052
1053 return 0;
1054 }
1055
1056 static struct enet_statistics *
1057 de4x5_get_stats(struct device *dev)
1058 {
1059 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1060 int iobase = dev->base_addr;
1061
1062 lp->stats.rx_missed_errors = (int) inl(DE4X5_MFC);
1063
1064 return &lp->stats;
1065 }
1066
1067 static void load_packet(struct device *dev, char *buf, u_long flags, struct sk_buff *skb)
1068 {
1069 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1070
1071 lp->tx_ring[lp->tx_new].buf = buf;
1072 lp->tx_ring[lp->tx_new].des1 &= TD_TER;
1073 lp->tx_ring[lp->tx_new].des1 |= flags;
1074 lp->skb[lp->tx_new] = skb;
1075 lp->tx_ring[lp->tx_new].status = T_OWN;
1076
1077 return;
1078 }
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088 static void
1089 set_multicast_list(struct device *dev, int num_addrs, void *addrs)
1090 {
1091 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1092 int iobase = dev->base_addr;
1093 u_long omr;
1094
1095
1096 if (irq2dev_map[dev->irq] != NULL) {
1097 omr = inl(DE4X5_OMR);
1098
1099 if (num_addrs >= 0) {
1100 SetMulticastFilter(dev, num_addrs, (char *)addrs, lp->setup_frame);
1101
1102
1103 load_packet(dev, lp->setup_frame, TD_IC | HASH_F | TD_SET |
1104 SETUP_FRAME_LEN, NULL);
1105 lp->tx_new = (++lp->tx_new) % lp->txRingSize;
1106 outl(POLL_DEMAND, DE4X5_TPD);
1107
1108 omr &= ~OMR_PR;
1109 omr |= OMR_PM;
1110 outl(omr, DE4X5_OMR);
1111 } else {
1112 omr |= OMR_PR;
1113 omr &= ~OMR_PM;
1114 outl(omr, DE4X5_OMR);
1115 }
1116 }
1117 }
1118
1119
1120
1121
1122
1123
1124 static void SetMulticastFilter(struct device *dev, int num_addrs, char *addrs, char *multicast_table)
1125 {
1126 char j, bit, byte;
1127 long *p = (long *) multicast_table;
1128 int i;
1129 u_short hashcode;
1130 u_long crc, poly = CRC_POLYNOMIAL_LE;
1131
1132 if (num_addrs == HASH_TABLE_LEN) {
1133 for (i=0; i<(HASH_TABLE_LEN >> 4); i++) {
1134 *p++ = 0x0000ffff;
1135 }
1136 } else {
1137
1138 memset(multicast_table, 0, (HASH_TABLE_LEN >> 2));
1139 *(multicast_table + (HASH_TABLE_LEN >> 3) - 3) = 0x80;
1140
1141
1142 for (i=0;i<num_addrs;i++) {
1143 if ((*addrs & 0x01) == 1) {
1144 crc = 0xffffffff;
1145 for (byte=0;byte<ETH_ALEN;byte++) {
1146
1147 for (bit = *addrs++,j=0;j<8;j++, bit>>=1) {
1148 crc = (crc >> 1) ^ (((crc ^ bit) & 0x01) ? poly : 0);
1149 }
1150 }
1151 hashcode = crc & ((1 << 9) - 1);
1152
1153 byte = hashcode >> 3;
1154 bit = 1 << (hashcode & 0x07);
1155
1156 byte <<= 1;
1157 if (byte & 0x02) {
1158 byte -= 1;
1159 }
1160 multicast_table[byte] |= bit;
1161
1162 } else {
1163 addrs += ETH_ALEN;
1164 }
1165 }
1166 }
1167
1168 return;
1169 }
1170
1171
1172
1173
1174
1175 static void eisa_probe(struct device *dev, short ioaddr)
1176 {
1177 int i, maxSlots;
1178 int status;
1179 u_short iobase;
1180 struct bus_type *lp = &bus;
1181
1182 if (!ioaddr && autoprobed) return ;
1183
1184 lp->bus = EISA;
1185
1186 if (ioaddr == 0) {
1187 iobase = EISA_SLOT_INC;
1188 i = 1;
1189 maxSlots = MAX_EISA_SLOTS;
1190 } else {
1191 iobase = ioaddr;
1192 i = (ioaddr >> 12);
1193 maxSlots = i + 1;
1194 }
1195
1196 for (status = -ENODEV; i<maxSlots && dev!=NULL; i++, iobase+=EISA_SLOT_INC) {
1197 if (((eisa_slots_full >> i) & 0x01) == 0) {
1198 if (DevicePresent(EISA_APROM) == 0) {
1199 eisa_slots_full |= (0x01 << i);
1200 if ((dev = alloc_device(dev, iobase)) != NULL) {
1201 if ((status = de4x5_hw_init(dev, iobase)) == 0) {
1202 num_de4x5s++;
1203 }
1204 num_eth++;
1205 }
1206 }
1207 } else {
1208 printk("%s: EISA device already allocated at 0x%04x.\n", dev->name, iobase);
1209 }
1210 }
1211
1212 return;
1213 }
1214
1215
1216
1217
1218 #define PCI_DEVICE (dev_num << 3)
1219
1220 static void pci_probe(struct device *dev, short ioaddr)
1221
1222 {
1223 u_char irq;
1224 u_short pb, dev_num;
1225 u_short i, vendor, device, status;
1226 u_long class, iobase;
1227 struct bus_type *lp = &bus;
1228 static char pci_init = 0;
1229
1230 if (!ioaddr && autoprobed) return ;
1231
1232 if (!pci_init) {
1233 for (i=0;i<PCI_MAX_BUS_NUM; i++) {
1234 pci_slots_full[i] = 0;
1235 pci_init = 1;
1236 }
1237 }
1238
1239 if (pcibios_present()) {
1240 lp->bus = PCI;
1241
1242 for (pb = 0, dev_num = 0; dev_num < 32 && dev != NULL; dev_num++) {
1243 pcibios_read_config_dword(pb, PCI_DEVICE, PCI_CLASS_REVISION, &class);
1244 if (class != 0xffffffff) {
1245 if (((pci_slots_full[pb] >> dev_num) & 0x01) == 0) {
1246 pcibios_read_config_word(pb, PCI_DEVICE, PCI_VENDOR_ID, &vendor);
1247 pcibios_read_config_word(pb, PCI_DEVICE, PCI_DEVICE_ID, &device);
1248 if ((vendor == DC21040_VID) && (device == DC21040_DID)) {
1249
1250 lp->device = dev_num;
1251
1252
1253 pcibios_read_config_dword(pb, PCI_DEVICE, PCI_BASE_ADDRESS_0, &iobase);
1254 iobase &= CBIO_MASK;
1255
1256
1257 pcibios_read_config_byte(pb, PCI_DEVICE, PCI_INTERRUPT_LINE, &irq);
1258
1259
1260 pcibios_read_config_word(pb, PCI_DEVICE, PCI_COMMAND, &status);
1261 status |= PCI_COMMAND_IO | PCI_COMMAND_MASTER;
1262 pcibios_write_config_word(pb, PCI_DEVICE, PCI_COMMAND, status);
1263
1264
1265 if (DevicePresent(DE4X5_APROM) == 0) {
1266 pci_slots_full[pb] |= (0x01 << dev_num);
1267 if ((dev = alloc_device(dev, iobase)) != NULL) {
1268 dev->irq = irq;
1269 if ((status = de4x5_hw_init(dev, iobase)) == 0) {
1270 num_de4x5s++;
1271 }
1272 num_eth++;
1273 }
1274 }
1275 }
1276 } else {
1277 printk("%s: PCI device already allocated at slot %d.\n", dev->name, dev_num);
1278 }
1279 }
1280 }
1281 }
1282
1283 return;
1284 }
1285
1286
1287
1288
1289
1290 static struct device *alloc_device(struct device *dev, int iobase)
1291 {
1292 int addAutoProbe = 0;
1293 struct device *tmp = NULL, *ret;
1294 int (*init)(struct device *) = NULL;
1295
1296
1297
1298
1299 while (dev->next != NULL) {
1300 if ((dev->base_addr == 0xffe0) || (dev->base_addr == 0)) break;
1301 dev = dev->next;
1302 num_eth++;
1303 }
1304
1305
1306
1307
1308
1309 if ((dev->base_addr == 0) && (num_de4x5s > 0)) {
1310 addAutoProbe++;
1311 tmp = dev->next;
1312 init = dev->init;
1313 }
1314
1315
1316
1317
1318
1319 if ((dev->next == NULL) &&
1320 !((dev->base_addr == 0xffe0) || (dev->base_addr == 0))){
1321 dev->next = (struct device *)kmalloc(sizeof(struct device) + 8,
1322 GFP_KERNEL);
1323
1324 dev = dev->next;
1325 if (dev == NULL) {
1326 printk("eth%d: Device not initialised, insufficient memory\n",
1327 num_eth);
1328 } else {
1329
1330
1331
1332
1333
1334 dev->name = (char *)(dev + sizeof(struct device));
1335 if (num_eth > 9999) {
1336 sprintf(dev->name,"eth????");
1337 } else {
1338 sprintf(dev->name,"eth%d", num_eth);
1339 }
1340 dev->base_addr = iobase;
1341 dev->next = NULL;
1342 dev->init = &de4x5_probe;
1343 num_de4x5s++;
1344 }
1345 }
1346 ret = dev;
1347
1348
1349
1350
1351
1352 if (ret != NULL) {
1353 if (addAutoProbe) {
1354 for (; (tmp->next!=NULL) && (tmp->base_addr!=0xffe0); tmp=tmp->next);
1355
1356
1357
1358
1359
1360 if ((tmp->next == NULL) && !(tmp->base_addr == 0xffe0)) {
1361 tmp->next = (struct device *)kmalloc(sizeof(struct device) + 8,
1362 GFP_KERNEL);
1363 tmp = tmp->next;
1364 if (tmp == NULL) {
1365 printk("%s: Insufficient memory to extend the device list.\n",
1366 dev->name);
1367 } else {
1368
1369
1370
1371
1372
1373 tmp->name = (char *)(tmp + sizeof(struct device));
1374 if (num_eth > 9999) {
1375 sprintf(tmp->name,"eth????");
1376 } else {
1377 sprintf(tmp->name,"eth%d", num_eth);
1378 }
1379 tmp->base_addr = 0;
1380 tmp->next = NULL;
1381 tmp->init = init;
1382 }
1383 } else {
1384 tmp->base_addr = 0;
1385 }
1386 }
1387 }
1388
1389 return ret;
1390 }
1391
1392
1393
1394
1395
1396
1397
1398 static int autoconf_media(struct device *dev)
1399 {
1400 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1401 int media, entry, iobase = dev->base_addr;
1402 char frame[64];
1403 u_long i, omr, sisr, linkBad;
1404 u_long t_330ms = 920000;
1405 u_long t_3s = 8000000;
1406
1407
1408 media = TP;
1409 RESET_SIA;
1410 outl(SICR_OE57 | SICR_SEL | SICR_SRL, DE4X5_SICR);
1411
1412
1413 for (linkBad=1,i=0;i<t_3s && linkBad;i++) {
1414 if (((sisr = inl(DE4X5_SISR)) & SISR_LKF) == 0) linkBad = 0;
1415 if (sisr & SISR_NCR) break;
1416 }
1417
1418 if (linkBad) {
1419
1420 media = BNC;
1421 RESET_SIA;
1422 outl(SIGR_JCK | SIGR_HUJ, DE4X5_SIGR);
1423 outl(STRR_CLD | STRR_CSQ | STRR_RSQ | STRR_DREN | STRR_ECEN, DE4X5_STRR);
1424 outl(SICR_OE57| SICR_OE24 | SICR_OE13 | SICR_SEL |
1425 SICR_AUI | SICR_SRL, DE4X5_SICR);
1426
1427
1428 for (i=0; i<t_330ms; i++) {
1429 sisr = inl(DE4X5_SISR);
1430 }
1431
1432
1433 create_packet(dev, frame, sizeof(frame));
1434
1435
1436 entry = lp->tx_new;
1437 load_packet(dev, frame, TD_LS | TD_FS | TD_AC | sizeof(frame), NULL);
1438
1439
1440 omr = inl(DE4X5_OMR);
1441 outl(omr|OMR_ST, DE4X5_OMR);
1442
1443
1444 lp->tx_new = (++lp->tx_new) % lp->txRingSize;
1445 lp->tx_old = lp->tx_new;
1446
1447
1448
1449
1450
1451 for (linkBad=1,i=0;i<t_3s && linkBad;i++) {
1452 if ((inl(DE4X5_SISR) & SISR_NCR) == 1) break;
1453 if (lp->tx_ring[entry].status >= 0) linkBad=0;
1454 }
1455
1456 outl(omr, DE4X5_OMR);
1457
1458 if (linkBad || (lp->tx_ring[entry].status & TD_ES)) {
1459
1460 media = AUI;
1461 RESET_SIA;
1462 outl(SIGR_JCK | SIGR_HUJ, DE4X5_SIGR);
1463 outl(STRR_CLD | STRR_CSQ | STRR_RSQ | STRR_DREN | STRR_ECEN, DE4X5_STRR);
1464 outl(SICR_OE57| SICR_SEL | SICR_AUI | SICR_SRL, DE4X5_SICR);
1465
1466
1467 entry = lp->tx_new;
1468 load_packet(dev, frame, TD_LS | TD_FS | TD_AC | sizeof(frame), NULL);
1469
1470
1471 omr = inl(DE4X5_OMR);
1472 outl(omr|OMR_ST, DE4X5_OMR);
1473
1474
1475 lp->tx_new = (++lp->tx_new) % lp->txRingSize;
1476 lp->tx_old = lp->tx_new;
1477
1478
1479
1480
1481
1482 for (linkBad=1,i=0;i<t_3s && linkBad;i++) {
1483 if ((inl(DE4X5_SISR) & SISR_NCR) == 1) break;
1484 if (lp->tx_ring[entry].status >= 0) linkBad=0;
1485 }
1486
1487 outl(omr, DE4X5_OMR);
1488
1489 if (linkBad || (lp->tx_ring[entry].status & TD_ES)) {
1490
1491 outl(SICR_RESET, DE4X5_SICR);
1492 outl(STRR_RESET, DE4X5_STRR);
1493 outl(SIGR_RESET, DE4X5_SIGR);
1494
1495 media = NC;
1496 }
1497 }
1498 }
1499
1500 if (de4x5_debug >= 1 ) {
1501 printk("%s: Media is %s.\n",dev->name,
1502 (media == NC ? "unconnected to this device" :
1503 (media == TP ? "TP" :
1504 (media == BNC ? "BNC" :
1505 "AUI"))));
1506 }
1507
1508 if (media) lp->lostMedia = 0;
1509
1510 return media;
1511 }
1512
1513
1514
1515
1516 static void create_packet(struct device *dev, char *frame, int len)
1517 {
1518 int i, j;
1519 char *buf = frame;
1520
1521 for (i=0; i<ETH_ALEN; i++) {
1522 *buf++ = dev->dev_addr[i];
1523 }
1524 for (i=0; i<ETH_ALEN; i++) {
1525 *buf++ = dev->dev_addr[i];
1526 }
1527 for (j=1; j>=0; j--) {
1528 *buf++ = (char) ((len >> 8*j) & 0xff);
1529 }
1530 *buf++ = 0;
1531
1532 for (i=len-4; i<len; i++) {
1533 buf[i] = 0;
1534 }
1535
1536 return;
1537 }
1538
1539
1540
1541 static void EISA_signature(char *name, short iobase)
1542 {
1543 unsigned long i;
1544 char *signatures[] = DE4X5_SIGNATURE;
1545 char ManCode[8];
1546 union {
1547 u_long ID;
1548 u_char Id[4];
1549 } Eisa;
1550
1551 strcpy(name, "");
1552 Eisa.ID = inl(iobase);
1553
1554 ManCode[0]=(((Eisa.Id[0]>>2)&0x1f)+0x40);
1555 ManCode[1]=(((Eisa.Id[1]&0xe0)>>5)+((Eisa.Id[0]&0x03)<<3)+0x40);
1556 ManCode[2]=(((Eisa.Id[2]>>4)&0x0f)+0x30);
1557 ManCode[3]=((Eisa.Id[2]&0x0f)+0x30);
1558 ManCode[4]=(((Eisa.Id[3]>>4)&0x0f)+0x30);
1559 ManCode[5]='\0';
1560
1561 for (i=0;*signatures[i] != '\0' && *name == '\0';i++) {
1562 if (strstr(ManCode, signatures[i]) != NULL) {
1563 strcpy(name,ManCode);
1564 }
1565 }
1566
1567 return;
1568 }
1569
1570
1571
1572
1573
1574
1575 static int DevicePresent(short aprom_addr)
1576 {
1577 static short fp=1, sigLength=0;
1578 static char devSig[] = ETH_PROM_SIG;
1579 char data;
1580 long i, j;
1581 int status = 0;
1582 struct bus_type *lp = &bus;
1583 static char asc2hex(char value);
1584
1585
1586
1587
1588 if (fp) {
1589 for (i=0,j=0;devSig[i] != '\0' && !status;i+=2,j++) {
1590 if ((devSig[i]=asc2hex(devSig[i]))>=0) {
1591 devSig[i]<<=4;
1592 if((devSig[i+1]=asc2hex(devSig[i+1]))>=0){
1593 devSig[j]=devSig[i]+devSig[i+1];
1594 } else {
1595 status= -1;
1596 }
1597 } else {
1598 status= -1;
1599 }
1600 }
1601 sigLength=j;
1602 fp = 0;
1603 }
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613 if (!status) {
1614 long tmp;
1615 for (i=0,j=0;j<sigLength && i<PROBE_LENGTH+sigLength-1;i++) {
1616 if (lp->bus == PCI) {
1617 while ((tmp = inl(aprom_addr)) < 0);
1618 data = (char)tmp;
1619 } else {
1620 data = inb(aprom_addr);
1621 }
1622 if (devSig[j] == data) {
1623 j++;
1624 } else {
1625 j=0;
1626 }
1627 }
1628
1629 if (j!=sigLength) {
1630 status = -ENODEV;
1631 }
1632 }
1633
1634 return status;
1635 }
1636
1637 static int aprom_crc(struct device *dev)
1638 {
1639 int iobase = dev->base_addr;
1640 long i, k, tmp;
1641 unsigned short j,chksum;
1642 unsigned char status = 0;
1643 struct bus_type *lp = &bus;
1644
1645 for (i=0,k=0,j=0;j<3;j++) {
1646 k <<= 1 ;
1647 if (k > 0xffff) k-=0xffff;
1648
1649 if (lp->bus == PCI) {
1650 while ((tmp = inl(DE4X5_APROM)) < 0);
1651 k += (u_char) tmp;
1652 dev->dev_addr[i++] = (u_char) tmp;
1653 while ((tmp = inl(DE4X5_APROM)) < 0);
1654 k += (u_short) (tmp << 8);
1655 dev->dev_addr[i++] = (u_char) tmp;
1656 } else {
1657 k += (u_char) (tmp = inb(EISA_APROM));
1658 dev->dev_addr[i++] = (u_char) tmp;
1659 k += (u_short) ((tmp = inb(EISA_APROM)) << 8);
1660 dev->dev_addr[i++] = (u_char) tmp;
1661 }
1662
1663 if (k > 0xffff) k-=0xffff;
1664 }
1665 if (k == 0xffff) k=0;
1666
1667 if (lp->bus == PCI) {
1668 while ((tmp = inl(DE4X5_APROM)) < 0);
1669 chksum = (u_char) tmp;
1670 while ((tmp = inl(DE4X5_APROM)) < 0);
1671 chksum |= (u_short) (tmp << 8);
1672 } else {
1673 chksum = (u_char) inb(EISA_APROM);
1674 chksum |= (u_short) (inb(EISA_APROM) << 8);
1675 }
1676
1677 if (k != chksum) status = -1;
1678
1679 return status;
1680 }
1681
1682
1683
1684
1685
1686 static int de4x5_ioctl(struct device *dev, struct ifreq *rq, int cmd)
1687 {
1688 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1689 struct de4x5_ioctl *ioc = (struct de4x5_ioctl *) &rq->ifr_data;
1690 int i, j, iobase = dev->base_addr, status = 0;
1691 u_long omr;
1692 union {
1693 unsigned char addr[HASH_TABLE_LEN * ETH_ALEN];
1694 unsigned short sval[(HASH_TABLE_LEN * ETH_ALEN) >> 1];
1695 unsigned long lval[(HASH_TABLE_LEN * ETH_ALEN) >> 2];
1696 } tmp;
1697
1698 switch(ioc->cmd) {
1699 case DE4X5_GET_HWADDR:
1700 for (i=0; i<ETH_ALEN; i++) {
1701 tmp.addr[i] = dev->dev_addr[i];
1702 }
1703 ioc->len = ETH_ALEN;
1704 memcpy_tofs(ioc->data, tmp.addr, ioc->len);
1705
1706 break;
1707 case DE4X5_SET_HWADDR:
1708 if (suser()) {
1709 int offset;
1710 char *pa;
1711 u_long omr;
1712
1713 memcpy_fromfs(tmp.addr,ioc->data,ETH_ALEN);
1714 for (i=0; i<ETH_ALEN; i++) {
1715 dev->dev_addr[i] = tmp.addr[i];
1716 }
1717 omr = inl(DE4X5_OMR);
1718 if (omr & OMR_HP) {
1719 offset = IMPERF_PA_OFFSET;
1720 } else {
1721 offset = PERF_PA_OFFSET;
1722 }
1723
1724 for (pa=lp->setup_frame+offset, i=0; i<ETH_ALEN; i++) {
1725 *(pa + i) = dev->dev_addr[i];
1726 if (i & 0x01) pa += 2;
1727 }
1728
1729 while (set_bit(0, (void *)&dev->tbusy) != 0);
1730 load_packet(dev, lp->setup_frame, TD_IC | HASH_F | TD_SET |
1731 SETUP_FRAME_LEN, NULL);
1732 lp->tx_new = (++lp->tx_new) % lp->txRingSize;
1733 outl(POLL_DEMAND, DE4X5_TPD);
1734 dev->tbusy = 0;
1735
1736 } else {
1737 status = -EPERM;
1738 }
1739
1740 break;
1741 case DE4X5_SET_PROM:
1742 if (suser()) {
1743 omr = inl(DE4X5_OMR);
1744 omr |= OMR_PR;
1745 omr &= ~OMR_PM;
1746 outl(omr, DE4X5_OMR);
1747 } else {
1748 status = -EPERM;
1749 }
1750
1751 break;
1752 case DE4X5_CLR_PROM:
1753 if (suser()) {
1754 omr = inl(DE4X5_OMR);
1755 omr &= ~OMR_PR;
1756 outb(omr, DE4X5_OMR);
1757 } else {
1758 status = -EPERM;
1759 }
1760
1761 break;
1762 case DE4X5_SAY_BOO:
1763 printk("%s: Boo!\n", dev->name);
1764
1765 break;
1766 case DE4X5_GET_MCA:
1767 ioc->len = (HASH_TABLE_LEN >> 3);
1768 memcpy_tofs(ioc->data, lp->setup_frame, 192);
1769
1770 break;
1771 case DE4X5_SET_MCA:
1772 if (suser()) {
1773 if (ioc->len != HASH_TABLE_LEN) {
1774 memcpy_fromfs(tmp.addr, ioc->data, ETH_ALEN * ioc->len);
1775 }
1776 set_multicast_list(dev, ioc->len, tmp.addr);
1777 } else {
1778 status = -EPERM;
1779 }
1780
1781 break;
1782 case DE4X5_CLR_MCA:
1783 if (suser()) {
1784 set_multicast_list(dev, 0, NULL);
1785 } else {
1786 status = -EPERM;
1787 }
1788
1789 break;
1790 case DE4X5_MCA_EN:
1791 if (suser()) {
1792 omr = inl(DE4X5_OMR);
1793 omr |= OMR_PM;
1794 omr &= ~OMR_PR;
1795 outl(omr, DE4X5_OMR);
1796 } else {
1797 status = -EPERM;
1798 }
1799
1800 break;
1801 case DE4X5_GET_STATS:
1802 cli();
1803 memcpy_tofs(ioc->data, &lp->pktStats, sizeof(lp->pktStats));
1804 ioc->len = DE4X5_PKT_STAT_SZ;
1805 sti();
1806
1807 break;
1808 case DE4X5_CLR_STATS:
1809 if (suser()) {
1810 cli();
1811 memset(&lp->pktStats, 0, sizeof(lp->pktStats));
1812 sti();
1813 } else {
1814 status = -EPERM;
1815 }
1816
1817 break;
1818 case DE4X5_GET_OMR:
1819 tmp.addr[0] = inl(DE4X5_OMR);
1820 memcpy_tofs(ioc->data, tmp.addr, 1);
1821
1822 break;
1823 case DE4X5_SET_OMR:
1824 if (suser()) {
1825 memcpy_fromfs(tmp.addr, ioc->data, 1);
1826 outl(tmp.addr[0], DE4X5_OMR);
1827 } else {
1828 status = -EPERM;
1829 }
1830
1831 break;
1832 case DE4X5_GET_REG:
1833 tmp.lval[0] = inl(DE4X5_STS);
1834 tmp.lval[1] = inl(DE4X5_BMR);
1835 tmp.lval[2] = inl(DE4X5_IMR);
1836 tmp.lval[3] = inl(DE4X5_OMR);
1837 tmp.lval[4] = inl(DE4X5_SISR);
1838 tmp.lval[5] = inl(DE4X5_SICR);
1839 tmp.lval[6] = inl(DE4X5_STRR);
1840 tmp.lval[7] = inl(DE4X5_SIGR);
1841 memcpy_tofs(ioc->data, tmp.addr, 32);
1842
1843 break;
1844
1845 #define DE4X5_DUMP 0x0f
1846
1847 case DE4X5_DUMP:
1848 j = 0;
1849 tmp.addr[j++] = dev->irq;
1850 for (i=0; i<ETH_ALEN; i++) {
1851 tmp.addr[j++] = dev->dev_addr[i];
1852 }
1853 tmp.addr[j++] = lp->rxRingSize;
1854 tmp.lval[j>>2] = eisa_slots_full; j+=4;
1855 tmp.lval[j>>2] = (long)lp->rx_ring; j+=4;
1856 tmp.lval[j>>2] = (long)lp->tx_ring; j+=4;
1857
1858 for (i=0;i<lp->rxRingSize-1;i++){
1859 if (i < 3) {
1860 tmp.lval[j>>2] = (long)&lp->rx_ring[i].status; j+=4;
1861 }
1862 }
1863 tmp.lval[j>>2] = (long)&lp->rx_ring[i].status; j+=4;
1864 for (i=0;i<lp->txRingSize-1;i++){
1865 if (i < 3) {
1866 tmp.lval[j>>2] = (long)&lp->tx_ring[i].status; j+=4;
1867 }
1868 }
1869 tmp.lval[j>>2] = (long)&lp->tx_ring[i].status; j+=4;
1870
1871 for (i=0;i<lp->rxRingSize-1;i++){
1872 if (i < 3) {
1873 tmp.lval[j>>2] = (long)lp->rx_ring[i].buf; j+=4;
1874 }
1875 }
1876 tmp.lval[j>>2] = (long)lp->rx_ring[i].buf; j+=4;
1877 for (i=0;i<lp->txRingSize-1;i++){
1878 if (i < 3) {
1879 tmp.lval[j>>2] = (long)lp->tx_ring[i].buf; j+=4;
1880 }
1881 }
1882 tmp.lval[j>>2] = (long)lp->tx_ring[i].buf; j+=4;
1883
1884 for (i=0;i<lp->rxRingSize;i++){
1885 tmp.lval[j>>2] = lp->rx_ring[i].status; j+=4;
1886 }
1887 for (i=0;i<lp->txRingSize;i++){
1888 tmp.lval[j>>2] = lp->tx_ring[i].status; j+=4;
1889 }
1890
1891 tmp.lval[j>>2] = inl(DE4X5_STS); j+=4;
1892 tmp.lval[j>>2] = inl(DE4X5_BMR); j+=4;
1893 tmp.lval[j>>2] = inl(DE4X5_IMR); j+=4;
1894 tmp.lval[j>>2] = inl(DE4X5_OMR); j+=4;
1895 tmp.lval[j>>2] = inl(DE4X5_SISR); j+=4;
1896 tmp.lval[j>>2] = inl(DE4X5_SICR); j+=4;
1897 tmp.lval[j>>2] = inl(DE4X5_STRR); j+=4;
1898 tmp.lval[j>>2] = inl(DE4X5_SIGR); j+=4;
1899
1900 tmp.addr[j++] = lp->txRingSize;
1901 tmp.addr[j++] = dev->tbusy;
1902
1903 ioc->len = j;
1904 memcpy_tofs(ioc->data, tmp.addr, ioc->len);
1905
1906 break;
1907 default:
1908 status = -EOPNOTSUPP;
1909 }
1910
1911 return status;
1912 }
1913
1914 static char asc2hex(char value)
1915 {
1916 value -= 0x30;
1917 if (value >= 0) {
1918 if (value > 9) {
1919 value &= 0x1f;
1920 value -= 0x07;
1921 if ((value < 0x0a) || (value > 0x0f)) {
1922 value = -1;
1923 }
1924 }
1925 } else {
1926 value = -1;
1927 }
1928 return value;
1929 }
1930
1931 #ifdef MODULE
1932 char kernel_version[] = UTS_RELEASE;
1933 static struct device thisDE4X5 = {
1934 " ",
1935 0, 0, 0, 0,
1936 0x2000, 10,
1937 0, 0, 0, NULL, de4x5_probe };
1938
1939 int io=0x2000;
1940 int irq=10;
1941
1942 int
1943 init_module(void)
1944 {
1945 thisDE4X5.base_addr=io;
1946 thisDE4X5.irq=irq;
1947 if (register_netdev(&thisDE4X5) != 0)
1948 return -EIO;
1949 return 0;
1950 }
1951
1952 void
1953 cleanup_module(void)
1954 {
1955 if (MOD_IN_USE) {
1956 printk("%s: device busy, remove delayed\n",thisDE4X5.name);
1957 } else {
1958 unregister_netdev(&thisDE4X5);
1959 }
1960 }
1961 #endif
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973