This source file includes following definitions.
- de4x5_probe
- de4x5_hw_init
- de4x5_open
- de4x5_init
- de4x5_queue_pkt
- de4x5_interrupt
- de4x5_rx
- de4x5_tx
- de4x5_ast
- de4x5_close
- de4x5_get_stats
- load_packet
- set_multicast_list
- SetMulticastFilter
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144 static const char *version = "de4x5.c:v0.32 6/26/95 davies@wanton.lkg.dec.com\n";
145
146 #include <linux/module.h>
147
148 #include <linux/kernel.h>
149 #include <linux/sched.h>
150 #include <linux/string.h>
151 #include <linux/interrupt.h>
152 #include <linux/ptrace.h>
153 #include <linux/errno.h>
154 #include <linux/ioport.h>
155 #include <linux/malloc.h>
156 #include <linux/bios32.h>
157 #include <linux/pci.h>
158 #include <linux/delay.h>
159 #include <asm/bitops.h>
160 #include <asm/io.h>
161 #include <asm/dma.h>
162 #include <asm/segment.h>
163
164 #include <linux/netdevice.h>
165 #include <linux/etherdevice.h>
166 #include <linux/skbuff.h>
167
168 #include <linux/time.h>
169 #include <linux/types.h>
170 #include <linux/unistd.h>
171
172 #include "de4x5.h"
173
174 #ifdef DE4X5_DEBUG
175 static int de4x5_debug = DE4X5_DEBUG;
176 #else
177 static int de4x5_debug = 1;
178 #endif
179
180 #ifdef DE4X5_AUTOSENSE
181 static int de4x5_autosense = DE4X5_AUTOSENSE;
182 #else
183 static int de4x5_autosense = AUTO;
184 #endif
185
186 #ifdef DE4X5_FULL_DUPLEX
187 static s32 de4x5_full_duplex = 1;
188 #else
189 static s32 de4x5_full_duplex = 0;
190 #endif
191
192 #define DE4X5_NDA 0xffe0
193
194
195
196
197 #define PROBE_LENGTH 32
198 #define ETH_PROM_SIG 0xAA5500FFUL
199
200
201
202
203 #define PKT_BUF_SZ 1536
204 #define MAX_PKT_SZ 1514
205 #define MAX_DAT_SZ 1500
206 #define MIN_DAT_SZ 1
207 #define PKT_HDR_LEN 14
208 #define FAKE_FRAME_LEN (MAX_PKT_SZ + 1)
209 #define QUEUE_PKT_TIMEOUT (3*HZ)
210
211
212 #define CRC_POLYNOMIAL_BE 0x04c11db7UL
213 #define CRC_POLYNOMIAL_LE 0xedb88320UL
214
215
216
217
218 #define DE4X5_EISA_IO_PORTS 0x0c00
219 #define DE4X5_EISA_TOTAL_SIZE 0xfff
220
221 #define MAX_EISA_SLOTS 16
222 #define EISA_SLOT_INC 0x1000
223
224 #define DE4X5_SIGNATURE {"DE425",""}
225 #define DE4X5_NAME_LENGTH 8
226
227
228
229
230 #define PCI_MAX_BUS_NUM 8
231 #define DE4X5_PCI_TOTAL_SIZE 0x80
232 #define DE4X5_CLASS_CODE 0x00020000
233
234
235
236
237
238
239
240 #define ALIGN4 ((u_long)4 - 1)
241 #define ALIGN8 ((u_long)8 - 1)
242 #define ALIGN16 ((u_long)16 - 1)
243 #define ALIGN32 ((u_long)32 - 1)
244 #define ALIGN64 ((u_long)64 - 1)
245 #define ALIGN128 ((u_long)128 - 1)
246
247 #define ALIGN ALIGN32
248 #define CACHE_ALIGN CAL_16LONG
249 #define DESC_SKIP_LEN DSL_0
250
251 #define DESC_ALIGN
252
253 #ifndef IS_NOT_DEC
254 static int is_not_dec = 0;
255 #else
256 static int is_not_dec = 1;
257 #endif
258
259
260
261
262 #define ENABLE_IRQs { \
263 imr |= lp->irq_en;\
264 outl(imr, DE4X5_IMR); \
265 }
266
267 #define DISABLE_IRQs {\
268 imr = inl(DE4X5_IMR);\
269 imr &= ~lp->irq_en;\
270 outl(imr, DE4X5_IMR); \
271 }
272
273 #define UNMASK_IRQs {\
274 imr |= lp->irq_mask;\
275 outl(imr, DE4X5_IMR); \
276 }
277
278 #define MASK_IRQs {\
279 imr = inl(DE4X5_IMR);\
280 imr &= ~lp->irq_mask;\
281 outl(imr, DE4X5_IMR); \
282 }
283
284
285
286
287 #define START_DE4X5 {\
288 omr = inl(DE4X5_OMR);\
289 omr |= OMR_ST | OMR_SR;\
290 outl(omr, DE4X5_OMR); \
291 }
292
293 #define STOP_DE4X5 {\
294 omr = inl(DE4X5_OMR);\
295 omr &= ~(OMR_ST|OMR_SR);\
296 outl(omr, DE4X5_OMR); \
297 }
298
299
300
301
302 #define RESET_SIA outl(0, DE4X5_SICR);
303
304
305
306
307 #define DE4X5_AUTOSENSE_MS 250
308
309
310
311
312 struct de4x5_srom {
313 char reserved[18];
314 char version;
315 char num_adapters;
316 char ieee_addr[6];
317 char info[100];
318 short chksum;
319 };
320
321
322
323
324
325
326
327
328
329 #define NUM_RX_DESC 8
330 #define NUM_TX_DESC 32
331 #define BUFF_ALLOC_RETRIES 10
332 #define RX_BUFF_SZ 1536
333
334 struct de4x5_desc {
335 volatile s32 status;
336 u32 des1;
337 u32 buf;
338 u32 next;
339 DESC_ALIGN
340 };
341
342
343
344
345 #define DE4X5_PKT_STAT_SZ 16
346 #define DE4X5_PKT_BIN_SZ 128
347
348
349 struct de4x5_private {
350 char adapter_name[80];
351 struct de4x5_desc rx_ring[NUM_RX_DESC];
352 struct de4x5_desc tx_ring[NUM_TX_DESC];
353 struct sk_buff *skb[NUM_TX_DESC];
354 int rx_new, rx_old;
355 int tx_new, tx_old;
356 char setup_frame[SETUP_FRAME_LEN];
357 struct enet_statistics stats;
358 struct {
359 u_int bins[DE4X5_PKT_STAT_SZ];
360 u_int unicast;
361 u_int multicast;
362 u_int broadcast;
363 u_int excessive_collisions;
364 u_int tx_underruns;
365 u_int excessive_underruns;
366 } pktStats;
367 char rxRingSize;
368 char txRingSize;
369 int bus;
370 int bus_num;
371 int chipset;
372 s32 irq_mask;
373 s32 irq_en;
374 int media;
375 int linkProb;
376 int autosense;
377 int tx_enable;
378 int lostMedia;
379 int setup_f;
380 };
381
382
383
384
385
386
387
388
389
390 #define TX_BUFFS_AVAIL ((lp->tx_old<=lp->tx_new)?\
391 lp->tx_old+lp->txRingSize-lp->tx_new-1:\
392 lp->tx_old -lp->tx_new-1)
393
394
395
396
397 static int de4x5_open(struct device *dev);
398 static int de4x5_queue_pkt(struct sk_buff *skb, struct device *dev);
399 static void de4x5_interrupt(int irq, struct pt_regs *regs);
400 static int de4x5_close(struct device *dev);
401 static struct enet_statistics *de4x5_get_stats(struct device *dev);
402 static void set_multicast_list(struct device *dev);
403 static int de4x5_ioctl(struct device *dev, struct ifreq *rq, int cmd);
404
405
406
407
408 static int de4x5_hw_init(struct device *dev, u_long iobase);
409 static int de4x5_init(struct device *dev);
410 static int de4x5_rx(struct device *dev);
411 static int de4x5_tx(struct device *dev);
412 static int de4x5_ast(struct device *dev);
413
414 static int autoconf_media(struct device *dev);
415 static void create_packet(struct device *dev, char *frame, int len);
416 static void dce_us_delay(u32 usec);
417 static void dce_ms_delay(u32 msec);
418 static void load_packet(struct device *dev, char *buf, u32 flags, struct sk_buff *skb);
419 static void dc21040_autoconf(struct device *dev);
420 static void dc21041_autoconf(struct device *dev);
421 static void dc21140_autoconf(struct device *dev);
422 static int test_media(struct device *dev, s32 irqs, s32 irq_mask, s32 csr13, s32 csr14, s32 csr15, s32 msec);
423
424 static int ping_media(struct device *dev);
425 static void reset_init_sia(struct device *dev, s32 sicr, s32 strr, s32 sigr);
426 static int test_ans(struct device *dev, s32 irqs, s32 irq_mask, s32 msec);
427 static void load_ms_timer(struct device *dev, u32 msec);
428 static int EISA_signature(char *name, s32 eisa_id);
429 static int DevicePresent(u_long iobase);
430 static short srom_rd(u_long address, u_char offset);
431 static void srom_latch(u_int command, u_long address);
432 static void srom_command(u_int command, u_long address);
433 static void srom_address(u_int command, u_long address, u_char offset);
434 static short srom_data(u_int command, u_long address);
435
436 static void sendto_srom(u_int command, u_long addr);
437 static int getfrom_srom(u_long addr);
438 static void SetMulticastFilter(struct device *dev, int num_addrs, char *addrs);
439 static int get_hw_addr(struct device *dev);
440
441 static void eisa_probe(struct device *dev, u_long iobase);
442 static void pci_probe(struct device *dev, u_long iobase);
443 static struct device *alloc_device(struct device *dev, u_long iobase);
444 static char *build_setup_frame(struct device *dev, int mode);
445 static void disable_ast(struct device *dev);
446 static void enable_ast(struct device *dev, u32 time_out);
447 static void kick_tx(struct device *dev);
448
449 #ifdef MODULE
450 int init_module(void);
451 void cleanup_module(void);
452 static int autoprobed = 1, loading_module = 1;
453 # else
454 static unsigned char de4x5_irq[] = {5,9,10,11};
455 static int autoprobed = 0, loading_module = 0;
456 #endif
457
458 static char name[DE4X5_NAME_LENGTH + 1];
459 static int num_de4x5s = 0, num_eth = 0;
460
461
462
463
464
465
466 static struct bus_type {
467 int bus;
468 int bus_num;
469 int device;
470 int chipset;
471 struct de4x5_srom srom;
472 int autosense;
473 } bus;
474
475
476
477
478 #define RESET_DE4X5 {\
479 int i;\
480 i=inl(DE4X5_BMR);\
481 dce_ms_delay(1);\
482 outl(i | BMR_SWR, DE4X5_BMR);\
483 dce_ms_delay(1);\
484 outl(i, DE4X5_BMR);\
485 dce_ms_delay(1);\
486 for (i=0;i<5;i++) {inl(DE4X5_BMR); dce_ms_delay(1);}\
487 dce_ms_delay(1);\
488 }
489
490
491
492 int de4x5_probe(struct device *dev)
493 {
494 int tmp = num_de4x5s, status = -ENODEV;
495 u_long iobase = dev->base_addr;
496
497 if ((iobase == 0) && loading_module){
498 printk("Autoprobing is not supported when loading a module based driver.\n");
499 status = -EIO;
500 } else {
501 eisa_probe(dev, iobase);
502 pci_probe(dev, iobase);
503
504 if ((tmp == num_de4x5s) && (iobase != 0) && loading_module) {
505 printk("%s: de4x5_probe() cannot find device at 0x%04lx.\n", dev->name,
506 iobase);
507 }
508
509
510
511
512
513 for (; (dev->priv == NULL) && (dev->next != NULL); dev = dev->next);
514
515 if (dev->priv) status = 0;
516 if (iobase == 0) autoprobed = 1;
517 }
518
519 return status;
520 }
521
522 static int
523 de4x5_hw_init(struct device *dev, u_long iobase)
524 {
525 struct bus_type *lp = &bus;
526 int tmpbus, tmpchs, i, j, status=0;
527 char *tmp;
528
529
530 if (lp->chipset == DC21041) {
531 outl(0, PCI_CFDA);
532 dce_ms_delay(10);
533 }
534
535 RESET_DE4X5;
536
537 if ((inl(DE4X5_STS) & (STS_TS | STS_RS)) == 0) {
538
539
540
541 if (lp->bus == PCI) {
542 if (!is_not_dec) {
543 if ((lp->chipset == DC21040) || (lp->chipset == DC21041)) {
544 strcpy(name, "DE435");
545 } else if (lp->chipset == DC21140) {
546 strcpy(name, "DE500");
547 }
548 } else {
549 strcpy(name, "UNKNOWN");
550 }
551 } else {
552 EISA_signature(name, EISA_ID0);
553 }
554
555 if (*name != '\0') {
556 dev->base_addr = iobase;
557 if (lp->bus == EISA) {
558 printk("%s: %s at %04lx (EISA slot %ld)",
559 dev->name, name, iobase, ((iobase>>12)&0x0f));
560 } else {
561 printk("%s: %s at %04lx (PCI bus %d, device %d)", dev->name, name,
562 iobase, lp->bus_num, lp->device);
563 }
564
565 printk(", h/w address ");
566 status = get_hw_addr(dev);
567 for (i = 0; i < ETH_ALEN - 1; i++) {
568 printk("%2.2x:", dev->dev_addr[i]);
569 }
570 printk("%2.2x,\n", dev->dev_addr[i]);
571
572 tmpbus = lp->bus;
573 tmpchs = lp->chipset;
574
575 if (status == 0) {
576 struct de4x5_private *lp;
577
578
579
580
581
582 dev->priv = (void *) kmalloc(sizeof(struct de4x5_private) + ALIGN,
583 GFP_KERNEL);
584 if (dev->priv == NULL)
585 return -ENOMEM;
586
587
588
589 dev->priv = (void *)(((u_long)dev->priv + ALIGN) & ~ALIGN);
590 lp = (struct de4x5_private *)dev->priv;
591 memset(dev->priv, 0, sizeof(struct de4x5_private));
592 lp->bus = tmpbus;
593 lp->chipset = tmpchs;
594
595
596
597
598 if (de4x5_autosense & AUTO) {
599 lp->autosense = AUTO;
600 } else {
601 if (lp->chipset != DC21140) {
602 if ((lp->chipset == DC21040) && (de4x5_autosense & TP_NW)) {
603 de4x5_autosense = TP;
604 }
605 if ((lp->chipset == DC21041) && (de4x5_autosense & BNC_AUI)) {
606 de4x5_autosense = BNC;
607 }
608 lp->autosense = de4x5_autosense & 0x001f;
609 } else {
610 lp->autosense = de4x5_autosense & 0x00c0;
611 }
612 }
613
614 sprintf(lp->adapter_name,"%s (%s)", name, dev->name);
615 request_region(iobase, (lp->bus == PCI ? DE4X5_PCI_TOTAL_SIZE :
616 DE4X5_EISA_TOTAL_SIZE),
617 lp->adapter_name);
618
619
620
621
622
623
624 for (tmp=NULL, j=0; (j<BUFF_ALLOC_RETRIES) && (tmp==NULL); j++) {
625 if ((tmp = (void *)kmalloc(RX_BUFF_SZ * NUM_RX_DESC + ALIGN,
626 GFP_KERNEL)) != NULL) {
627 tmp = (char *)(((u_long) tmp + ALIGN) & ~ALIGN);
628 for (i=0; i<NUM_RX_DESC; i++) {
629 lp->rx_ring[i].status = 0;
630 lp->rx_ring[i].des1 = RX_BUFF_SZ;
631 lp->rx_ring[i].buf = virt_to_bus(tmp + i * RX_BUFF_SZ);
632 lp->rx_ring[i].next = (u32)NULL;
633 }
634 barrier();
635 }
636 }
637
638 if (tmp != NULL) {
639 lp->rxRingSize = NUM_RX_DESC;
640 lp->txRingSize = NUM_TX_DESC;
641
642
643 lp->rx_ring[lp->rxRingSize - 1].des1 |= RD_RER;
644 lp->tx_ring[lp->txRingSize - 1].des1 |= TD_TER;
645
646
647 outl(virt_to_bus(lp->rx_ring), DE4X5_RRBA);
648 outl(virt_to_bus(lp->tx_ring), DE4X5_TRBA);
649
650
651 lp->irq_mask = IMR_RIM | IMR_TIM | IMR_TUM ;
652 lp->irq_en = IMR_NIM | IMR_AIM;
653
654 lp->tx_enable = TRUE;
655
656 if (dev->irq < 2) {
657 #ifndef MODULE
658 unsigned char irqnum;
659 s32 omr;
660 autoirq_setup(0);
661
662 omr = inl(DE4X5_OMR);
663 outl(IMR_AIM|IMR_RUM, DE4X5_IMR);
664 outl(OMR_SR | omr, DE4X5_OMR);
665
666 irqnum = autoirq_report(1);
667 if (!irqnum) {
668 printk(" and failed to detect IRQ line.\n");
669 status = -ENXIO;
670 } else {
671 for (dev->irq=0,i=0; (i<sizeof(de4x5_irq)) && (!dev->irq); i++) {
672 if (irqnum == de4x5_irq[i]) {
673 dev->irq = irqnum;
674 printk(" and uses IRQ%d.\n", dev->irq);
675 }
676 }
677
678 if (!dev->irq) {
679 printk(" but incorrect IRQ line detected.\n");
680 status = -ENXIO;
681 }
682 }
683
684 outl(0, DE4X5_IMR);
685
686 #endif
687 } else {
688 printk(" and requires IRQ%d (not probed).\n", dev->irq);
689 }
690 } else {
691 printk("%s: Kernel could not allocate RX buffer memory.\n",
692 dev->name);
693 status = -ENXIO;
694 }
695 if (status) release_region(iobase, (lp->bus == PCI ?
696 DE4X5_PCI_TOTAL_SIZE :
697 DE4X5_EISA_TOTAL_SIZE));
698 } else {
699 printk(" which has an Ethernet PROM CRC error.\n");
700 status = -ENXIO;
701 }
702 } else {
703 status = -ENXIO;
704 }
705 } else {
706 status = -ENXIO;
707 }
708
709 if (!status) {
710 if (de4x5_debug > 0) {
711 printk(version);
712 }
713
714
715 dev->open = &de4x5_open;
716 dev->hard_start_xmit = &de4x5_queue_pkt;
717 dev->stop = &de4x5_close;
718 dev->get_stats = &de4x5_get_stats;
719 #ifdef HAVE_MULTICAST
720 dev->set_multicast_list = &set_multicast_list;
721 #endif
722 dev->do_ioctl = &de4x5_ioctl;
723
724 dev->mem_start = 0;
725
726
727 ether_setup(dev);
728
729
730 if (lp->chipset == DC21041) {
731 outl(0, DE4X5_SICR);
732 outl(CFDA_PSM, PCI_CFDA);
733 }
734 } else {
735 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
736 if (lp) {
737 kfree_s(bus_to_virt(lp->rx_ring[0].buf),
738 RX_BUFF_SZ * NUM_RX_DESC + ALIGN);
739 }
740 if (dev->priv) {
741 kfree_s(dev->priv, sizeof(struct de4x5_private) + ALIGN);
742 dev->priv = NULL;
743 }
744 }
745
746 return status;
747 }
748
749
750 static int
751 de4x5_open(struct device *dev)
752 {
753 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
754 u_long iobase = dev->base_addr;
755 int i, status = 0;
756 s32 imr, omr, sts;
757
758
759
760
761 if (lp->chipset == DC21041) {
762 outl(0, PCI_CFDA);
763 dce_ms_delay(10);
764 }
765
766 if (request_irq(dev->irq, (void *)de4x5_interrupt, 0, lp->adapter_name)) {
767 printk("de4x5_open(): Requested IRQ%d is busy\n",dev->irq);
768 status = -EAGAIN;
769 } else {
770
771 irq2dev_map[dev->irq] = dev;
772
773
774
775 status = de4x5_init(dev);
776
777 if (de4x5_debug > 1){
778 printk("%s: de4x5 open with irq %d\n",dev->name,dev->irq);
779 printk("\tphysical address: ");
780 for (i=0;i<6;i++){
781 printk("%2.2x:",(short)dev->dev_addr[i]);
782 }
783 printk("\n");
784 printk("Descriptor head addresses:\n");
785 printk("\t0x%8.8lx 0x%8.8lx\n",(u_long)lp->rx_ring,(u_long)lp->tx_ring);
786 printk("Descriptor addresses:\nRX: ");
787 for (i=0;i<lp->rxRingSize-1;i++){
788 if (i < 3) {
789 printk("0x%8.8lx ",(u_long)&lp->rx_ring[i].status);
790 }
791 }
792 printk("...0x%8.8lx\n",(u_long)&lp->rx_ring[i].status);
793 printk("TX: ");
794 for (i=0;i<lp->txRingSize-1;i++){
795 if (i < 3) {
796 printk("0x%8.8lx ", (u_long)&lp->tx_ring[i].status);
797 }
798 }
799 printk("...0x%8.8lx\n", (u_long)&lp->tx_ring[i].status);
800 printk("Descriptor buffers:\nRX: ");
801 for (i=0;i<lp->rxRingSize-1;i++){
802 if (i < 3) {
803 printk("0x%8.8x ",lp->rx_ring[i].buf);
804 }
805 }
806 printk("...0x%8.8x\n",lp->rx_ring[i].buf);
807 printk("TX: ");
808 for (i=0;i<lp->txRingSize-1;i++){
809 if (i < 3) {
810 printk("0x%8.8x ", lp->tx_ring[i].buf);
811 }
812 }
813 printk("...0x%8.8x\n", lp->tx_ring[i].buf);
814 printk("Ring size: \nRX: %d\nTX: %d\n",
815 (short)lp->rxRingSize,
816 (short)lp->txRingSize);
817 printk("\tstatus: %d\n", status);
818 }
819
820 if (!status) {
821 dev->tbusy = 0;
822 dev->start = 1;
823 dev->interrupt = UNMASK_INTERRUPTS;
824 dev->trans_start = jiffies;
825
826 START_DE4X5;
827
828
829 imr = 0;
830 UNMASK_IRQs;
831
832
833 sts = inl(DE4X5_STS);
834 outl(sts, DE4X5_STS);
835
836 ENABLE_IRQs;
837 }
838 if (de4x5_debug > 1) {
839 printk("\tsts: 0x%08x\n", inl(DE4X5_STS));
840 printk("\tbmr: 0x%08x\n", inl(DE4X5_BMR));
841 printk("\timr: 0x%08x\n", inl(DE4X5_IMR));
842 printk("\tomr: 0x%08x\n", inl(DE4X5_OMR));
843 printk("\tsisr: 0x%08x\n", inl(DE4X5_SISR));
844 printk("\tsicr: 0x%08x\n", inl(DE4X5_SICR));
845 printk("\tstrr: 0x%08x\n", inl(DE4X5_STRR));
846 printk("\tsigr: 0x%08x\n", inl(DE4X5_SIGR));
847 }
848 }
849
850 MOD_INC_USE_COUNT;
851
852 return status;
853 }
854
855
856
857
858
859
860
861
862
863 static int
864 de4x5_init(struct device *dev)
865 {
866 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
867 u_long iobase = dev->base_addr;
868 int i, j, status = 0;
869 s32 bmr, omr;
870
871
872 set_bit(0, (void *)&dev->tbusy);
873
874 RESET_DE4X5;
875
876 bmr = inl(DE4X5_BMR);
877 bmr |= PBL_8 | DESC_SKIP_LEN | CACHE_ALIGN;
878 outl(bmr, DE4X5_BMR);
879
880 if (lp->chipset != DC21140) {
881 omr = TR_96;
882 lp->setup_f = HASH_PERF;
883 } else {
884 omr = OMR_SDP | OMR_SF;
885 lp->setup_f = PERFECT;
886 }
887 outl(virt_to_bus(lp->rx_ring), DE4X5_RRBA);
888 outl(virt_to_bus(lp->tx_ring), DE4X5_TRBA);
889
890 lp->rx_new = lp->rx_old = 0;
891 lp->tx_new = lp->tx_old = 0;
892
893 for (i = 0; i < lp->rxRingSize; i++) {
894 lp->rx_ring[i].status = R_OWN;
895 }
896
897 for (i = 0; i < lp->txRingSize; i++) {
898 lp->tx_ring[i].status = 0;
899 }
900
901 barrier();
902
903
904 SetMulticastFilter(dev, 0, NULL);
905
906 if (lp->chipset != DC21140) {
907 load_packet(dev, lp->setup_frame, HASH_F|TD_SET|SETUP_FRAME_LEN, NULL);
908 } else {
909 load_packet(dev, lp->setup_frame, PERFECT_F|TD_SET|SETUP_FRAME_LEN, NULL);
910 }
911 outl(omr|OMR_ST, DE4X5_OMR);
912
913
914 for (j=0, i=jiffies;(i<=jiffies+HZ/100) && (j==0);) {
915 if (lp->tx_ring[lp->tx_new].status >= 0) j=1;
916 }
917 outl(omr, DE4X5_OMR);
918
919 if (j == 0) {
920 printk("%s: Setup frame timed out, status %08x\n", dev->name,
921 inl(DE4X5_STS));
922 status = -EIO;
923 }
924
925 lp->tx_new = (++lp->tx_new) % lp->txRingSize;
926 lp->tx_old = lp->tx_new;
927
928
929 if (autoconf_media(dev) == 0) {
930 status = -EIO;
931 }
932
933 return 0;
934 }
935
936
937
938
939 static int
940 de4x5_queue_pkt(struct sk_buff *skb, struct device *dev)
941 {
942 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
943 u_long iobase = dev->base_addr;
944 int i, status = 0;
945 s32 imr, omr, sts;
946
947
948
949
950
951
952 if (set_bit(0, (void*)&dev->tbusy) == 0) {
953 cli();
954 de4x5_tx(dev);
955 dev->tbusy = 0;
956 sti();
957 }
958
959
960
961
962
963
964 if (dev->tbusy || (lp->lostMedia > LOST_MEDIA_THRESHOLD)) {
965 u_long tickssofar = jiffies - dev->trans_start;
966 if ((tickssofar < QUEUE_PKT_TIMEOUT) &&
967 (lp->lostMedia <= LOST_MEDIA_THRESHOLD)) {
968 status = -1;
969 } else {
970 if (de4x5_debug >= 1) {
971 printk("%s: transmit timed out, status %08x, tbusy:%ld, lostMedia:%d tickssofar:%ld, resetting.\n",dev->name, inl(DE4X5_STS), dev->tbusy, lp->lostMedia, tickssofar);
972 }
973
974
975 STOP_DE4X5;
976
977
978 for (i=lp->tx_old; i!=lp->tx_new; i=(++i)%lp->txRingSize) {
979 if (lp->skb[i] != NULL) {
980 if (lp->skb[i]->len != FAKE_FRAME_LEN) {
981 if (lp->tx_ring[i].status == T_OWN) {
982 dev_queue_xmit(lp->skb[i], dev, SOPRI_NORMAL);
983 } else {
984 dev_kfree_skb(lp->skb[i], FREE_WRITE);
985 }
986 } else {
987 dev_kfree_skb(lp->skb[i], FREE_WRITE);
988 }
989 lp->skb[i] = NULL;
990 }
991 }
992 if (skb->len != FAKE_FRAME_LEN) {
993 dev_queue_xmit(skb, dev, SOPRI_NORMAL);
994 } else {
995 dev_kfree_skb(skb, FREE_WRITE);
996 }
997
998
999 status = de4x5_init(dev);
1000
1001
1002 if (!status) {
1003
1004 dev->interrupt = UNMASK_INTERRUPTS;
1005 dev->start = 1;
1006 dev->tbusy = 0;
1007 dev->trans_start = jiffies;
1008
1009 START_DE4X5;
1010
1011
1012 imr = 0;
1013 UNMASK_IRQs;
1014
1015
1016 sts = inl(DE4X5_STS);
1017 outl(sts, DE4X5_STS);
1018
1019 ENABLE_IRQs;
1020 } else {
1021 printk("%s: hardware initialisation failure, status %08x.\n",
1022 dev->name, inl(DE4X5_STS));
1023 }
1024 }
1025 } else if (skb == NULL) {
1026 dev_tint(dev);
1027 } else if (skb->len == FAKE_FRAME_LEN) {
1028 dev_kfree_skb(skb, FREE_WRITE);
1029 } else if (skb->len > 0) {
1030
1031 if (set_bit(0, (void*)&dev->tbusy) != 0) {
1032 printk("%s: Transmitter access conflict.\n", dev->name);
1033 status = -1;
1034 } else {
1035 cli();
1036 if (TX_BUFFS_AVAIL) {
1037 load_packet(dev, skb->data, TD_IC | TD_LS | TD_FS | skb->len, skb);
1038 if (lp->tx_enable) {
1039 outl(POLL_DEMAND, DE4X5_TPD);
1040 }
1041
1042 lp->tx_new = (++lp->tx_new) % lp->txRingSize;
1043 dev->trans_start = jiffies;
1044
1045 if (TX_BUFFS_AVAIL) {
1046 dev->tbusy = 0;
1047 }
1048 } else {
1049 status = -1;
1050 }
1051 sti();
1052 }
1053 }
1054
1055 return status;
1056 }
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069 static void
1070 de4x5_interrupt(int irq, struct pt_regs *regs)
1071 {
1072 struct device *dev = (struct device *)(irq2dev_map[irq]);
1073 struct de4x5_private *lp;
1074 s32 imr, omr, sts;
1075 u_long iobase;
1076
1077 if (dev == NULL) {
1078 printk ("de4x5_interrupt(): irq %d for unknown device.\n", irq);
1079 } else {
1080 lp = (struct de4x5_private *)dev->priv;
1081 iobase = dev->base_addr;
1082
1083 if (dev->interrupt)
1084 printk("%s: Re-entering the interrupt handler.\n", dev->name);
1085
1086 DISABLE_IRQs;
1087 dev->interrupt = MASK_INTERRUPTS;
1088
1089 while ((sts = inl(DE4X5_STS)) & lp->irq_mask) {
1090 outl(sts, DE4X5_STS);
1091
1092 if (sts & (STS_RI | STS_RU))
1093 de4x5_rx(dev);
1094
1095 if (sts & (STS_TI | STS_TU))
1096 de4x5_tx(dev);
1097
1098 if (sts & STS_TM)
1099 de4x5_ast(dev);
1100
1101 if (sts & STS_LNF) {
1102 lp->lostMedia = LOST_MEDIA_THRESHOLD + 1;
1103 lp->irq_mask &= ~IMR_LFM;
1104 kick_tx(dev);
1105 }
1106
1107 if (sts & STS_SE) {
1108 STOP_DE4X5;
1109 printk("%s: Fatal bus error occured, sts=%#8x, device stopped.\n",
1110 dev->name, sts);
1111 }
1112 }
1113
1114 if (TX_BUFFS_AVAIL && dev->tbusy) {
1115 dev->tbusy = 0;
1116 mark_bh(NET_BH);
1117 }
1118
1119 dev->interrupt = UNMASK_INTERRUPTS;
1120 ENABLE_IRQs;
1121 }
1122
1123 return;
1124 }
1125
1126 static int
1127 de4x5_rx(struct device *dev)
1128 {
1129 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1130 int i, entry;
1131 s32 status;
1132 char *buf;
1133
1134 for (entry = lp->rx_new; lp->rx_ring[entry].status >= 0;entry = lp->rx_new) {
1135 status = lp->rx_ring[entry].status;
1136
1137 if (status & RD_FS) {
1138 lp->rx_old = entry;
1139 }
1140
1141 if (status & RD_LS) {
1142 if (status & RD_ES) {
1143 lp->stats.rx_errors++;
1144 if (status & (RD_RF | RD_TL)) lp->stats.rx_frame_errors++;
1145 if (status & RD_CE) lp->stats.rx_crc_errors++;
1146 if (status & RD_OF) lp->stats.rx_fifo_errors++;
1147 } else {
1148 struct sk_buff *skb;
1149 short pkt_len = (short)(lp->rx_ring[entry].status >> 16) - 4;
1150
1151 if ((skb = dev_alloc_skb(pkt_len+2)) != NULL) {
1152 skb->dev = dev;
1153
1154 skb_reserve(skb,2);
1155 if (entry < lp->rx_old) {
1156 short len = (lp->rxRingSize - lp->rx_old) * RX_BUFF_SZ;
1157 memcpy(skb_put(skb,len), bus_to_virt(lp->rx_ring[lp->rx_old].buf), len);
1158 memcpy(skb_put(skb,pkt_len-len), bus_to_virt(lp->rx_ring[0].buf), pkt_len - len);
1159 } else {
1160 memcpy(skb_put(skb,pkt_len), bus_to_virt(lp->rx_ring[lp->rx_old].buf), pkt_len);
1161 }
1162
1163
1164 skb->protocol=eth_type_trans(skb,dev);
1165 netif_rx(skb);
1166
1167
1168 lp->stats.rx_packets++;
1169 for (i=1; i<DE4X5_PKT_STAT_SZ-1; i++) {
1170 if (pkt_len < (i*DE4X5_PKT_BIN_SZ)) {
1171 lp->pktStats.bins[i]++;
1172 i = DE4X5_PKT_STAT_SZ;
1173 }
1174 }
1175 buf = skb->data;
1176 if (buf[0] & 0x01) {
1177 if ((*(s32 *)&buf[0] == -1) && (*(s16 *)&buf[4] == -1)) {
1178 lp->pktStats.broadcast++;
1179 } else {
1180 lp->pktStats.multicast++;
1181 }
1182 } else if ((*(s32 *)&buf[0] == *(s32 *)&dev->dev_addr[0]) &&
1183 (*(s16 *)&buf[4] == *(s16 *)&dev->dev_addr[4])) {
1184 lp->pktStats.unicast++;
1185 }
1186
1187 lp->pktStats.bins[0]++;
1188 if (lp->pktStats.bins[0] == 0) {
1189 memset((char *)&lp->pktStats, 0, sizeof(lp->pktStats));
1190 }
1191 } else {
1192 printk("%s: Insufficient memory; nuking packet.\n", dev->name);
1193 lp->stats.rx_dropped++;
1194 break;
1195 }
1196 }
1197
1198
1199 for (; lp->rx_old!=entry; lp->rx_old=(++lp->rx_old)%lp->rxRingSize) {
1200 lp->rx_ring[lp->rx_old].status = R_OWN;
1201 barrier();
1202 }
1203 lp->rx_ring[entry].status = R_OWN;
1204 barrier();
1205 }
1206
1207
1208
1209
1210 lp->rx_new = (++lp->rx_new) % lp->rxRingSize;
1211 }
1212
1213 return 0;
1214 }
1215
1216
1217
1218
1219 static int
1220 de4x5_tx(struct device *dev)
1221 {
1222 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1223 u_long iobase = dev->base_addr;
1224 int entry;
1225 s32 status;
1226
1227 for (entry = lp->tx_old; entry != lp->tx_new; entry = lp->tx_old) {
1228 status = lp->tx_ring[entry].status;
1229 if (status < 0) {
1230 break;
1231 } else if (status & TD_ES) {
1232 lp->stats.tx_errors++;
1233 if (status & TD_NC) lp->stats.tx_carrier_errors++;
1234 if (status & TD_LC) lp->stats.tx_window_errors++;
1235 if (status & TD_UF) lp->stats.tx_fifo_errors++;
1236 if (status & TD_LC) lp->stats.collisions++;
1237 if (status & TD_EC) lp->pktStats.excessive_collisions++;
1238 if (status & TD_DE) lp->stats.tx_aborted_errors++;
1239
1240 if ((status != 0x7fffffff) &&
1241 (status & (TD_LO | TD_NC | TD_EC | TD_LF))) {
1242 lp->lostMedia++;
1243 if (lp->lostMedia > LOST_MEDIA_THRESHOLD) {
1244 kick_tx(dev);
1245 }
1246 } else {
1247 outl(POLL_DEMAND, DE4X5_TPD);
1248 }
1249 } else {
1250 lp->stats.tx_packets++;
1251 lp->lostMedia = 0;
1252 }
1253
1254 if (lp->skb[entry] != NULL) {
1255 dev_kfree_skb(lp->skb[entry], FREE_WRITE);
1256 lp->skb[entry] = NULL;
1257 }
1258
1259
1260 lp->tx_old = (++lp->tx_old) % lp->txRingSize;
1261 }
1262
1263 return 0;
1264 }
1265
1266 static int
1267 de4x5_ast(struct device *dev)
1268 {
1269 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1270 u_long iobase = dev->base_addr;
1271 s32 gep;
1272
1273 disable_ast(dev);
1274
1275 if (lp->chipset == DC21140) {
1276 gep = inl(DE4X5_GEP);
1277 if (((lp->media == _100Mb) && (gep & GEP_SLNK)) ||
1278 ((lp->media == _10Mb) && (gep & GEP_LNP)) ||
1279 ((lp->media == _10Mb) && !(gep & GEP_SLNK)) ||
1280 (lp->media == NC)) {
1281 if (lp->linkProb || ((lp->media == NC) && (!(gep & GEP_LNP)))) {
1282 lp->lostMedia = LOST_MEDIA_THRESHOLD + 1;
1283 lp->linkProb = 0;
1284 kick_tx(dev);
1285 } else {
1286 switch(lp->media) {
1287 case NC:
1288 lp->linkProb = 0;
1289 enable_ast(dev, DE4X5_AUTOSENSE_MS);
1290 break;
1291
1292 case _10Mb:
1293 lp->linkProb = 1;
1294 enable_ast(dev, 1500);
1295 break;
1296
1297 case _100Mb:
1298 lp->linkProb = 1;
1299 enable_ast(dev, 4000);
1300 break;
1301 }
1302 }
1303 } else {
1304 lp->linkProb = 0;
1305 enable_ast(dev, DE4X5_AUTOSENSE_MS);
1306 }
1307 }
1308
1309 return 0;
1310 }
1311
1312 static int
1313 de4x5_close(struct device *dev)
1314 {
1315 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1316 u_long iobase = dev->base_addr;
1317 s32 imr, omr;
1318
1319 dev->start = 0;
1320 dev->tbusy = 1;
1321
1322 if (de4x5_debug > 1) {
1323 printk("%s: Shutting down ethercard, status was %8.8x.\n",
1324 dev->name, inl(DE4X5_STS));
1325 }
1326
1327
1328
1329
1330 DISABLE_IRQs;
1331
1332 STOP_DE4X5;
1333
1334
1335
1336
1337 free_irq(dev->irq);
1338 irq2dev_map[dev->irq] = 0;
1339
1340 MOD_DEC_USE_COUNT;
1341
1342
1343 if (lp->chipset == DC21041) {
1344 outl(0, DE4X5_SICR);
1345 outl(CFDA_PSM, PCI_CFDA);
1346 }
1347
1348 return 0;
1349 }
1350
1351 static struct enet_statistics *
1352 de4x5_get_stats(struct device *dev)
1353 {
1354 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1355 u_long iobase = dev->base_addr;
1356
1357 lp->stats.rx_missed_errors = (int) (inl(DE4X5_MFC) & (MFC_OVFL | MFC_CNTR));
1358
1359 return &lp->stats;
1360 }
1361
1362 static void load_packet(struct device *dev, char *buf, u32 flags, struct sk_buff *skb)
1363 {
1364 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1365
1366 lp->tx_ring[lp->tx_new].buf = virt_to_bus(buf);
1367 lp->tx_ring[lp->tx_new].des1 &= TD_TER;
1368 lp->tx_ring[lp->tx_new].des1 |= flags;
1369 lp->skb[lp->tx_new] = skb;
1370 barrier();
1371 lp->tx_ring[lp->tx_new].status = T_OWN;
1372 barrier();
1373
1374 return;
1375 }
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386 static void
1387 set_multicast_list(struct device *dev)
1388 {
1389 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1390 u_long iobase = dev->base_addr;
1391
1392
1393 if (irq2dev_map[dev->irq] != NULL)
1394 {
1395 if (num_addrs >= 0)
1396 {
1397 SetMulticastFilter(dev);
1398 if (lp->setup_f == HASH_PERF)
1399 {
1400 load_packet(dev, lp->setup_frame, TD_IC | HASH_F | TD_SET |
1401 SETUP_FRAME_LEN, NULL);
1402 }
1403 else
1404 {
1405 load_packet(dev, lp->setup_frame, TD_IC | PERFECT_F | TD_SET |
1406 SETUP_FRAME_LEN, NULL);
1407 }
1408
1409 lp->tx_new = (++lp->tx_new) % lp->txRingSize;
1410 outl(POLL_DEMAND, DE4X5_TPD);
1411 dev->trans_start = jiffies;
1412 }
1413 else
1414 {
1415
1416 u32 omr;
1417 omr = inl(DE4X5_OMR);
1418 omr |= OMR_PR;
1419 outl(omr, DE4X5_OMR);
1420 }
1421 }
1422 return;
1423 }
1424
1425
1426
1427
1428
1429
1430 static void SetMulticastFilter(struct device *dev)
1431 {
1432 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1433 struct dev_mc_list *dmi=dev->mc_list;
1434 u_long iobase = dev->base_addr;
1435 int i, j, bit, byte;
1436 u16 hashcode;
1437 u32 omr, crc, poly = CRC_POLYNOMIAL_LE;
1438 char *pa;
1439 unsigned char *addrs;
1440
1441 omr = inl(DE4X5_OMR);
1442 pa = build_setup_frame(dev, ALL);
1443
1444 if (lp->setup_f == HASH_PERF)
1445 {
1446 if (num_addrs >= HASH_TABLE_LEN || (dev->flags&IFF_ALLMULTI))
1447 {
1448
1449 omr |= OMR_PM;
1450 }
1451 else
1452 {
1453 omr &= ~OMR_PM;
1454
1455 for (i=0;i<num_addrs;i++)
1456 {
1457
1458 addrs=dmi->dmi_addr;
1459 dmi=dmi->next;
1460 if ((*addrs & 0x01) == 1)
1461 {
1462
1463 crc = 0xffffffff;
1464 for (byte=0;byte<ETH_ALEN;byte++)
1465 {
1466
1467
1468 for (bit = *addrs++,j=0;j<8;j++, bit>>=1)
1469 {
1470 crc = (crc >> 1) ^ (((crc ^ bit) & 0x01) ? poly : 0);
1471 }
1472 }
1473 hashcode = crc & HASH_BITS;
1474
1475 byte = hashcode >> 3;
1476 bit = 1 << (hashcode & 0x07);
1477
1478 byte <<= 1;
1479 if (byte & 0x02)
1480 {
1481 byte -= 1;
1482 }
1483 lp->setup_frame[byte] |= bit;
1484
1485 }
1486 else
1487 {
1488 addrs += ETH_ALEN;
1489 }
1490 }
1491 }
1492 else
1493 {
1494 omr &= ~OMR_PM;
1495 for (j=0; j<dev->mc_count; j++)
1496 {
1497 addrs=dmi->dmi_addr;
1498 dmi=dmi->next;
1499 for (i=0; i<ETH_ALEN; i++)
1500 {
1501 *(pa + (i&1)) = *addrs++;
1502 if (i & 0x01) pa += 4;
1503 }
1504 }
1505 }
1506
1507 if (dev->mc_count == 0)
1508 omr &= ~OMR_PR;
1509 outl(omr, DE4X5_OMR);
1510
1511 return;
1512 }
1513
1514
1515
1516
1517
1518 static void eisa_probe(struct device *dev, u_long ioaddr)
1519 {
1520 int i, maxSlots, status;
1521 u_short vendor, device;
1522 s32 cfid;
1523 u_long iobase;
1524 struct bus_type *lp = &bus;
1525 char name[DE4X5_STRLEN];
1526
1527 if (!ioaddr && autoprobed) return ;
1528 if ((ioaddr < 0x1000) && (ioaddr > 0)) return;
1529
1530 lp->bus = EISA;
1531
1532 if (ioaddr == 0) {
1533 iobase = EISA_SLOT_INC;
1534 i = 1;
1535 maxSlots = MAX_EISA_SLOTS;
1536 } else {
1537 iobase = ioaddr;
1538 i = (ioaddr >> 12);
1539 maxSlots = i + 1;
1540 }
1541
1542 for (status = -ENODEV; (i<maxSlots) && (dev!=NULL); i++, iobase+=EISA_SLOT_INC) {
1543 if (EISA_signature(name, EISA_ID)) {
1544 cfid = inl(PCI_CFID);
1545 device = (u_short)(cfid >> 16);
1546 vendor = (u_short) cfid;
1547
1548 lp->bus = EISA;
1549 lp->chipset = device;
1550 if (DevicePresent(EISA_APROM) == 0) {
1551
1552 outl(PCI_COMMAND_IO | PCI_COMMAND_MASTER, PCI_CFCS);
1553 outl(0x00004000, PCI_CFLT);
1554 outl(iobase, PCI_CBIO);
1555
1556 if (check_region(iobase, DE4X5_EISA_TOTAL_SIZE) == 0) {
1557 if ((dev = alloc_device(dev, iobase)) != NULL) {
1558 if ((status = de4x5_hw_init(dev, iobase)) == 0) {
1559 num_de4x5s++;
1560 }
1561 num_eth++;
1562 }
1563 } else if (autoprobed) {
1564 printk("%s: region already allocated at 0x%04lx.\n", dev->name, iobase);
1565 }
1566 }
1567 }
1568 }
1569
1570 return;
1571 }
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585 #define PCI_DEVICE (dev_num << 3)
1586 #define PCI_LAST_DEV 32
1587
1588 static void pci_probe(struct device *dev, u_long ioaddr)
1589 {
1590 u_char irq;
1591 u_char pb, pbus, dev_num, dnum, dev_fn;
1592 u_short vendor, device, index, status;
1593 u_int class = DE4X5_CLASS_CODE;
1594 u_int iobase;
1595 struct bus_type *lp = &bus;
1596
1597 if (!ioaddr && autoprobed) return ;
1598
1599 if (pcibios_present()) {
1600 lp->bus = PCI;
1601
1602 if (ioaddr < 0x1000) {
1603 pbus = (u_short)(ioaddr >> 8);
1604 dnum = (u_short)(ioaddr & 0xff);
1605 } else {
1606 pbus = 0;
1607 dnum = 0;
1608 }
1609
1610 for (index=0;
1611 (pcibios_find_class(class, index, &pb, &dev_fn)!= PCIBIOS_DEVICE_NOT_FOUND);
1612 index++) {
1613 dev_num = PCI_SLOT(dev_fn);
1614
1615 if ((!pbus && !dnum) || ((pbus == pb) && (dnum == dev_num))) {
1616 pcibios_read_config_word(pb, PCI_DEVICE, PCI_VENDOR_ID, &vendor);
1617 pcibios_read_config_word(pb, PCI_DEVICE, PCI_DEVICE_ID, &device);
1618 if (is_DC21040 || is_DC21041 || is_DC21140) {
1619
1620 lp->device = dev_num;
1621 lp->bus_num = pb;
1622
1623
1624 lp->chipset = device;
1625
1626
1627 pcibios_read_config_dword(pb, PCI_DEVICE, PCI_BASE_ADDRESS_0, &iobase);
1628 iobase &= CBIO_MASK;
1629
1630
1631 pcibios_read_config_byte(pb, PCI_DEVICE, PCI_INTERRUPT_LINE, &irq);
1632
1633
1634 pcibios_read_config_word(pb, PCI_DEVICE, PCI_COMMAND, &status);
1635 if (status & PCI_COMMAND_IO) {
1636 if (!(status & PCI_COMMAND_MASTER)) {
1637 status |= PCI_COMMAND_MASTER;
1638 pcibios_write_config_word(pb, PCI_DEVICE, PCI_COMMAND, status);
1639 pcibios_read_config_word(pb, PCI_DEVICE, PCI_COMMAND, &status);
1640 }
1641 if (status & PCI_COMMAND_MASTER) {
1642 if ((DevicePresent(DE4X5_APROM) == 0) || is_not_dec) {
1643 if (check_region(iobase, DE4X5_PCI_TOTAL_SIZE) == 0) {
1644 if ((dev = alloc_device(dev, iobase)) != NULL) {
1645 dev->irq = irq;
1646 if ((status = de4x5_hw_init(dev, iobase)) == 0) {
1647 num_de4x5s++;
1648 }
1649 num_eth++;
1650 }
1651 } else if (autoprobed) {
1652 printk("%s: region already allocated at 0x%04x.\n", dev->name, (u_short)iobase);
1653 }
1654 }
1655 }
1656 }
1657 }
1658 }
1659 }
1660 }
1661
1662 return;
1663 }
1664
1665
1666
1667
1668
1669 static struct device *alloc_device(struct device *dev, u_long iobase)
1670 {
1671 int addAutoProbe = 0;
1672 struct device *tmp = NULL, *ret;
1673 int (*init)(struct device *) = NULL;
1674
1675
1676
1677
1678 if (!loading_module) {
1679 while (dev->next != NULL) {
1680 if ((dev->base_addr == DE4X5_NDA) || (dev->base_addr == 0)) break;
1681 dev = dev->next;
1682 num_eth++;
1683 }
1684
1685
1686
1687
1688
1689 if ((dev->base_addr == 0) && (num_de4x5s > 0)) {
1690 addAutoProbe++;
1691 tmp = dev->next;
1692 init = dev->init;
1693 }
1694
1695
1696
1697
1698
1699 if ((dev->next == NULL) &&
1700 !((dev->base_addr == DE4X5_NDA) || (dev->base_addr == 0))){
1701 dev->next = (struct device *)kmalloc(sizeof(struct device) + 8,
1702 GFP_KERNEL);
1703
1704 dev = dev->next;
1705 if (dev == NULL) {
1706 printk("eth%d: Device not initialised, insufficient memory\n",
1707 num_eth);
1708 } else {
1709
1710
1711
1712
1713
1714 dev->name = (char *)(dev + sizeof(struct device));
1715 if (num_eth > 9999) {
1716 sprintf(dev->name,"eth????");
1717 } else {
1718 sprintf(dev->name,"eth%d", num_eth);
1719 }
1720 dev->base_addr = iobase;
1721 dev->next = NULL;
1722 dev->init = &de4x5_probe;
1723 num_de4x5s++;
1724 }
1725 }
1726 ret = dev;
1727
1728
1729
1730
1731
1732 if (ret != NULL) {
1733 if (addAutoProbe) {
1734 for (; (tmp->next!=NULL) && (tmp->base_addr!=DE4X5_NDA); tmp=tmp->next);
1735
1736
1737
1738
1739
1740 if ((tmp->next == NULL) && !(tmp->base_addr == DE4X5_NDA)) {
1741 tmp->next = (struct device *)kmalloc(sizeof(struct device) + 8,
1742 GFP_KERNEL);
1743 tmp = tmp->next;
1744 if (tmp == NULL) {
1745 printk("%s: Insufficient memory to extend the device list.\n",
1746 dev->name);
1747 } else {
1748
1749
1750
1751
1752
1753 tmp->name = (char *)(tmp + sizeof(struct device));
1754 if (num_eth > 9999) {
1755 sprintf(tmp->name,"eth????");
1756 } else {
1757 sprintf(tmp->name,"eth%d", num_eth);
1758 }
1759 tmp->base_addr = 0;
1760 tmp->next = NULL;
1761 tmp->init = init;
1762 }
1763 } else {
1764 tmp->base_addr = 0;
1765 }
1766 }
1767 }
1768 } else {
1769 ret = dev;
1770 }
1771
1772 return ret;
1773 }
1774
1775
1776
1777
1778
1779
1780
1781 static int autoconf_media(struct device *dev)
1782 {
1783 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1784 u_long iobase = dev->base_addr;
1785
1786 lp->tx_enable = YES;
1787 if (de4x5_debug > 0 ) {
1788 if (lp->chipset != DC21140) {
1789 printk("%s: Searching for media... ",dev->name);
1790 } else {
1791 printk("%s: Searching for mode... ",dev->name);
1792 }
1793 }
1794
1795 if (lp->chipset == DC21040) {
1796 lp->media = (lp->autosense == AUTO ? TP : lp->autosense);
1797 dc21040_autoconf(dev);
1798 } else if (lp->chipset == DC21041) {
1799 lp->media = (lp->autosense == AUTO ? TP_NW : lp->autosense);
1800 dc21041_autoconf(dev);
1801 } else if (lp->chipset == DC21140) {
1802 disable_ast(dev);
1803 lp->media = (lp->autosense == AUTO ? _10Mb : lp->autosense);
1804 dc21140_autoconf(dev);
1805 }
1806
1807 if (de4x5_debug > 0 ) {
1808 if (lp->chipset != DC21140) {
1809 printk("media is %s\n", (lp->media == NC ? "unconnected!" :
1810 (lp->media == TP ? "TP." :
1811 (lp->media == ANS ? "TP/Nway." :
1812 (lp->media == BNC ? "BNC." :
1813 (lp->media == AUI ? "AUI." :
1814 "BNC/AUI."
1815 ))))));
1816 } else {
1817 printk("mode is %s\n",(lp->media == NC ? "link down.":
1818 (lp->media == _100Mb ? "100Mb/s." :
1819 (lp->media == _10Mb ? "10Mb/s." :
1820 "\?\?\?"
1821 ))));
1822 }
1823 }
1824
1825 if (lp->media) {
1826 lp->lostMedia = 0;
1827 inl(DE4X5_MFC);
1828 if ((lp->media == TP) || (lp->media == ANS)) {
1829 lp->irq_mask |= IMR_LFM;
1830 }
1831 }
1832 dce_ms_delay(10);
1833
1834 return (lp->media);
1835 }
1836
1837 static void dc21040_autoconf(struct device *dev)
1838 {
1839 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1840 u_long iobase = dev->base_addr;
1841 int i, linkBad;
1842 s32 sisr = 0, t_3s = 3000;
1843
1844 switch (lp->media) {
1845 case TP:
1846 reset_init_sia(dev, 0x8f01, 0xffff, 0x0000);
1847 for (linkBad=1,i=0;(i<t_3s) && linkBad && !(sisr & SISR_NCR);i++) {
1848 if (((sisr = inl(DE4X5_SISR)) & SISR_LKF) == 0) linkBad = 0;
1849 dce_ms_delay(1);
1850 }
1851 if (linkBad && (lp->autosense == AUTO)) {
1852 lp->media = BNC_AUI;
1853 dc21040_autoconf(dev);
1854 }
1855 break;
1856
1857 case BNC:
1858 case AUI:
1859 case BNC_AUI:
1860 reset_init_sia(dev, 0x8f09, 0x0705, 0x0006);
1861 dce_ms_delay(500);
1862 linkBad = ping_media(dev);
1863 if (linkBad && (lp->autosense == AUTO)) {
1864 lp->media = EXT_SIA;
1865 dc21040_autoconf(dev);
1866 }
1867 break;
1868
1869 case EXT_SIA:
1870 reset_init_sia(dev, 0x3041, 0x0000, 0x0006);
1871 dce_ms_delay(500);
1872 linkBad = ping_media(dev);
1873 if (linkBad && (lp->autosense == AUTO)) {
1874 lp->media = NC;
1875 dc21040_autoconf(dev);
1876 }
1877 break;
1878
1879 case NC:
1880 #ifndef __alpha__
1881 reset_init_sia(dev, 0x8f01, 0xffff, 0x0000);
1882 break;
1883 #else
1884
1885 reset_init_sia(dev, 0x8f09, 0x0705, 0x0006);
1886 #endif
1887 }
1888
1889 return;
1890 }
1891
1892
1893
1894
1895
1896
1897
1898 static void dc21041_autoconf(struct device *dev)
1899 {
1900 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1901 u_long iobase = dev->base_addr;
1902 s32 sts, irqs, irq_mask, omr;
1903
1904 switch (lp->media) {
1905 case TP_NW:
1906 omr = inl(DE4X5_OMR);
1907 outl(omr | OMR_FD, DE4X5_OMR);
1908 irqs = STS_LNF | STS_LNP;
1909 irq_mask = IMR_LFM | IMR_LPM;
1910 sts = test_media(dev, irqs, irq_mask, 0xef01, 0xffff, 0x0008, 2400);
1911 if (sts & STS_LNP) {
1912 lp->media = ANS;
1913 } else {
1914 lp->media = AUI;
1915 }
1916 dc21041_autoconf(dev);
1917 break;
1918
1919 case ANS:
1920 irqs = STS_LNP;
1921 irq_mask = IMR_LPM;
1922 sts = test_ans(dev, irqs, irq_mask, 3000);
1923 if (!(sts & STS_LNP) && (lp->autosense == AUTO)) {
1924 lp->media = TP;
1925 dc21041_autoconf(dev);
1926 }
1927 break;
1928
1929 case TP:
1930 omr = inl(DE4X5_OMR);
1931 outl(omr & ~OMR_FD, DE4X5_OMR);
1932 irqs = STS_LNF | STS_LNP;
1933 irq_mask = IMR_LFM | IMR_LPM;
1934 sts = test_media(dev, irqs, irq_mask, 0xef01, 0xff3f, 0x0008, 2400);
1935 if (!(sts & STS_LNP) && (lp->autosense == AUTO)) {
1936 if (inl(DE4X5_SISR) & SISR_NRA) {
1937 lp->media = AUI;
1938 } else {
1939 lp->media = BNC;
1940 }
1941 dc21041_autoconf(dev);
1942 }
1943 break;
1944
1945 case AUI:
1946 omr = inl(DE4X5_OMR);
1947 outl(omr & ~OMR_FD, DE4X5_OMR);
1948 irqs = 0;
1949 irq_mask = 0;
1950 sts = test_media(dev, irqs, irq_mask, 0xef09, 0xf7fd, 0x000e, 1000);
1951 if (!(inl(DE4X5_SISR) & SISR_SRA) && (lp->autosense == AUTO)) {
1952 lp->media = BNC;
1953 dc21041_autoconf(dev);
1954 }
1955 break;
1956
1957 case BNC:
1958 omr = inl(DE4X5_OMR);
1959 outl(omr & ~OMR_FD, DE4X5_OMR);
1960 irqs = 0;
1961 irq_mask = 0;
1962 sts = test_media(dev, irqs, irq_mask, 0xef09, 0xf7fd, 0x0006, 1000);
1963 if (!(inl(DE4X5_SISR) & SISR_SRA) && (lp->autosense == AUTO)) {
1964 lp->media = NC;
1965 } else {
1966 if (ping_media(dev)) lp->media = NC;
1967 }
1968 break;
1969
1970 case NC:
1971 omr = inl(DE4X5_OMR);
1972 outl(omr | OMR_FD, DE4X5_OMR);
1973 reset_init_sia(dev, 0xef01, 0xffff, 0x0008);
1974 break;
1975 }
1976
1977 return;
1978 }
1979
1980
1981
1982
1983 static void dc21140_autoconf(struct device *dev)
1984 {
1985 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1986 u_long iobase = dev->base_addr;
1987 s32 omr;
1988
1989 switch(lp->media) {
1990 case _100Mb:
1991 omr = (inl(DE4X5_OMR) & ~(OMR_PS | OMR_HBD | OMR_TTM | OMR_PCS | OMR_SCR));
1992 omr |= (de4x5_full_duplex ? OMR_FD : 0);
1993 outl(omr | OMR_PS | OMR_HBD | OMR_PCS | OMR_SCR, DE4X5_OMR);
1994 outl(GEP_FDXD | GEP_MODE, DE4X5_GEP);
1995 break;
1996
1997 case _10Mb:
1998 omr = (inl(DE4X5_OMR) & ~(OMR_PS | OMR_HBD | OMR_TTM | OMR_PCS | OMR_SCR));
1999 omr |= (de4x5_full_duplex ? OMR_FD : 0);
2000 outl(omr | OMR_TTM, DE4X5_OMR);
2001 outl(GEP_FDXD, DE4X5_GEP);
2002 break;
2003 }
2004
2005 return;
2006 }
2007
2008 static int
2009 test_media(struct device *dev, s32 irqs, s32 irq_mask, s32 csr13, s32 csr14, s32 csr15, s32 msec)
2010 {
2011 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2012 u_long iobase = dev->base_addr;
2013 s32 sts, time, csr12;
2014
2015 reset_init_sia(dev, csr13, csr14, csr15);
2016
2017
2018 load_ms_timer(dev, msec);
2019
2020
2021 sts = inl(DE4X5_STS);
2022 outl(sts, DE4X5_STS);
2023
2024
2025 csr12 = inl(DE4X5_SISR);
2026 outl(csr12, DE4X5_SISR);
2027
2028
2029 do {
2030 time = inl(DE4X5_GPT) & GPT_VAL;
2031 sts = inl(DE4X5_STS);
2032 } while ((time != 0) && !(sts & irqs));
2033
2034 sts = inl(DE4X5_STS);
2035
2036 return sts;
2037 }
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061 static int ping_media(struct device *dev)
2062 {
2063 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2064 u_long iobase = dev->base_addr;
2065 int i, entry, linkBad;
2066 s32 omr, t_3s = 4000;
2067 char frame[64];
2068
2069 create_packet(dev, frame, sizeof(frame));
2070
2071 entry = lp->tx_new;
2072 load_packet(dev, frame, TD_LS | TD_FS | sizeof(frame),NULL);
2073
2074 omr = inl(DE4X5_OMR);
2075 outl(omr|OMR_ST, DE4X5_OMR);
2076
2077 lp->tx_new = (++lp->tx_new) % lp->txRingSize;
2078 lp->tx_old = lp->tx_new;
2079
2080
2081 for (linkBad=1,i=0;(i<t_3s) && linkBad;i++) {
2082 if ((inl(DE4X5_SISR) & SISR_NCR) == 1) break;
2083 if (lp->tx_ring[entry].status >= 0) linkBad=0;
2084 dce_ms_delay(1);
2085 }
2086 outl(omr, DE4X5_OMR);
2087
2088 return ((linkBad || (lp->tx_ring[entry].status & TD_ES)) ? 1 : 0);
2089 }
2090
2091
2092
2093
2094
2095 static int test_ans(struct device *dev, s32 irqs, s32 irq_mask, s32 msec)
2096 {
2097 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2098 u_long iobase = dev->base_addr;
2099 s32 sts, ans;
2100
2101 outl(irq_mask, DE4X5_IMR);
2102
2103
2104 load_ms_timer(dev, msec);
2105
2106
2107 sts = inl(DE4X5_STS);
2108 outl(sts, DE4X5_STS);
2109
2110
2111 do {
2112 ans = inl(DE4X5_SISR) & SISR_ANS;
2113 sts = inl(DE4X5_STS);
2114 } while (!(sts & irqs) && (ans ^ ANS_NWOK) != 0);
2115
2116 return ((sts & STS_LNP) && ((ans ^ ANS_NWOK) == 0) ? STS_LNP : 0);
2117 }
2118
2119
2120
2121
2122 static void reset_init_sia(struct device *dev, s32 sicr, s32 strr, s32 sigr)
2123 {
2124 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2125 u_long iobase = dev->base_addr;
2126
2127 RESET_SIA;
2128 outl(sigr, DE4X5_SIGR);
2129 outl(strr, DE4X5_STRR);
2130 outl(sicr, DE4X5_SICR);
2131
2132 return;
2133 }
2134
2135
2136
2137
2138 static void load_ms_timer(struct device *dev, u32 msec)
2139 {
2140 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2141 u_long iobase = dev->base_addr;
2142 s32 i = 2048, j;
2143
2144 if (lp->chipset == DC21140) {
2145 j = inl(DE4X5_OMR);
2146 if ((j & OMR_TTM) && (j & OMR_PS)) {
2147 i = 8192;
2148 } else if ((~j & OMR_TTM) && (j & OMR_PS)) {
2149 i = 819;
2150 }
2151 }
2152
2153 outl((s32)(msec * 10000)/i, DE4X5_GPT);
2154
2155 return;
2156 }
2157
2158
2159
2160
2161 static void create_packet(struct device *dev, char *frame, int len)
2162 {
2163 int i;
2164 char *buf = frame;
2165
2166 for (i=0; i<ETH_ALEN; i++) {
2167 *buf++ = dev->dev_addr[i];
2168 }
2169 for (i=0; i<ETH_ALEN; i++) {
2170 *buf++ = dev->dev_addr[i];
2171 }
2172
2173 *buf++ = 0;
2174 *buf++ = 1;
2175
2176 return;
2177 }
2178
2179
2180
2181
2182 static void dce_us_delay(u32 usec)
2183 {
2184 udelay(usec);
2185
2186 return;
2187 }
2188
2189
2190
2191
2192 static void dce_ms_delay(u32 msec)
2193 {
2194 u_int i;
2195
2196 for (i=0; i<msec; i++) {
2197 dce_us_delay(1000);
2198 }
2199
2200 return;
2201 }
2202
2203
2204
2205
2206
2207 static int EISA_signature(char *name, s32 eisa_id)
2208 {
2209 u_int i;
2210 const char *signatures[] = DE4X5_SIGNATURE;
2211 char ManCode[DE4X5_STRLEN];
2212 union {
2213 s32 ID;
2214 char Id[4];
2215 } Eisa;
2216 int status = 0;
2217
2218 *name = '\0';
2219 Eisa.ID = inl(eisa_id);
2220
2221 ManCode[0]=(((Eisa.Id[0]>>2)&0x1f)+0x40);
2222 ManCode[1]=(((Eisa.Id[1]&0xe0)>>5)+((Eisa.Id[0]&0x03)<<3)+0x40);
2223 ManCode[2]=(((Eisa.Id[2]>>4)&0x0f)+0x30);
2224 ManCode[3]=((Eisa.Id[2]&0x0f)+0x30);
2225 ManCode[4]=(((Eisa.Id[3]>>4)&0x0f)+0x30);
2226 ManCode[5]='\0';
2227
2228 for (i=0;(*signatures[i] != '\0') && (*name == '\0');i++) {
2229 if (strstr(ManCode, signatures[i]) != NULL) {
2230 strcpy(name,ManCode);
2231 status = 1;
2232 }
2233 }
2234
2235 return status;
2236 }
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250 static int DevicePresent(u_long aprom_addr)
2251 {
2252 union {
2253 struct {
2254 u32 a;
2255 u32 b;
2256 } llsig;
2257 char Sig[sizeof(u32) << 1];
2258 } dev;
2259 char data;
2260 int i, j, tmp, status = 0;
2261 short sigLength;
2262 struct bus_type *lp = &bus;
2263
2264 dev.llsig.a = ETH_PROM_SIG;
2265 dev.llsig.b = ETH_PROM_SIG;
2266 sigLength = sizeof(u32) << 1;
2267
2268 if (lp->chipset == DC21040) {
2269 for (i=0,j=0;(j<sigLength) && (i<PROBE_LENGTH+sigLength-1);i++) {
2270 if (lp->bus == PCI) {
2271 while ((tmp = inl(aprom_addr)) < 0);
2272 data = (char)tmp;
2273 } else {
2274 data = inb(aprom_addr);
2275 }
2276 if (dev.Sig[j] == data) {
2277 j++;
2278 } else {
2279 if (data == dev.Sig[0]) {
2280 j=1;
2281 } else {
2282 j=0;
2283 }
2284 }
2285 }
2286
2287 if (j!=sigLength) {
2288 status = -ENODEV;
2289 }
2290
2291 } else {
2292 short *p = (short *)&lp->srom;
2293 for (i=0; i<(sizeof(struct de4x5_srom)>>1); i++) {
2294 *p++ = srom_rd(aprom_addr, i);
2295 }
2296 }
2297
2298 return status;
2299 }
2300
2301 static int get_hw_addr(struct device *dev)
2302 {
2303 u_long iobase = dev->base_addr;
2304 int i, k, tmp, status = 0;
2305 u_short j,chksum;
2306 struct bus_type *lp = &bus;
2307
2308 for (i=0,k=0,j=0;j<3;j++) {
2309 k <<= 1 ;
2310 if (k > 0xffff) k-=0xffff;
2311
2312 if (lp->bus == PCI) {
2313 if (lp->chipset == DC21040) {
2314 while ((tmp = inl(DE4X5_APROM)) < 0);
2315 k += (u_char) tmp;
2316 dev->dev_addr[i++] = (u_char) tmp;
2317 while ((tmp = inl(DE4X5_APROM)) < 0);
2318 k += (u_short) (tmp << 8);
2319 dev->dev_addr[i++] = (u_char) tmp;
2320 } else {
2321 dev->dev_addr[i] = (u_char) lp->srom.ieee_addr[i]; i++;
2322 dev->dev_addr[i] = (u_char) lp->srom.ieee_addr[i]; i++;
2323 }
2324 } else {
2325 k += (u_char) (tmp = inb(EISA_APROM));
2326 dev->dev_addr[i++] = (u_char) tmp;
2327 k += (u_short) ((tmp = inb(EISA_APROM)) << 8);
2328 dev->dev_addr[i++] = (u_char) tmp;
2329 }
2330
2331 if (k > 0xffff) k-=0xffff;
2332 }
2333 if (k == 0xffff) k=0;
2334
2335 if (lp->bus == PCI) {
2336 if (lp->chipset == DC21040) {
2337 while ((tmp = inl(DE4X5_APROM)) < 0);
2338 chksum = (u_char) tmp;
2339 while ((tmp = inl(DE4X5_APROM)) < 0);
2340 chksum |= (u_short) (tmp << 8);
2341 if (k != chksum) status = -1;
2342 }
2343 } else {
2344 chksum = (u_char) inb(EISA_APROM);
2345 chksum |= (u_short) (inb(EISA_APROM) << 8);
2346 if (k != chksum) status = -1;
2347 }
2348
2349
2350 return status;
2351 }
2352
2353
2354
2355
2356 static short srom_rd(u_long addr, u_char offset)
2357 {
2358 sendto_srom(SROM_RD | SROM_SR, addr);
2359
2360 srom_latch(SROM_RD | SROM_SR | DT_CS, addr);
2361 srom_command(SROM_RD | SROM_SR | DT_IN | DT_CS, addr);
2362 srom_address(SROM_RD | SROM_SR | DT_CS, addr, offset);
2363
2364 return srom_data(SROM_RD | SROM_SR | DT_CS, addr);
2365 }
2366
2367 static void srom_latch(u_int command, u_long addr)
2368 {
2369 sendto_srom(command, addr);
2370 sendto_srom(command | DT_CLK, addr);
2371 sendto_srom(command, addr);
2372
2373 return;
2374 }
2375
2376 static void srom_command(u_int command, u_long addr)
2377 {
2378 srom_latch(command, addr);
2379 srom_latch(command, addr);
2380 srom_latch((command & 0x0000ff00) | DT_CS, addr);
2381
2382 return;
2383 }
2384
2385 static void srom_address(u_int command, u_long addr, u_char offset)
2386 {
2387 int i;
2388 char a;
2389
2390 a = (char)(offset << 2);
2391 for (i=0; i<6; i++, a <<= 1) {
2392 srom_latch(command | ((a < 0) ? DT_IN : 0), addr);
2393 }
2394 dce_us_delay(1);
2395
2396 i = (getfrom_srom(addr) >> 3) & 0x01;
2397 if (i != 0) {
2398 printk("Bad SROM address phase.....\n");
2399
2400 }
2401
2402 return;
2403 }
2404
2405 static short srom_data(u_int command, u_long addr)
2406 {
2407 int i;
2408 short word = 0;
2409 s32 tmp;
2410
2411 for (i=0; i<16; i++) {
2412 sendto_srom(command | DT_CLK, addr);
2413 tmp = getfrom_srom(addr);
2414 sendto_srom(command, addr);
2415
2416 word = (word << 1) | ((tmp >> 3) & 0x01);
2417 }
2418
2419 sendto_srom(command & 0x0000ff00, addr);
2420
2421 return word;
2422 }
2423
2424
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439 static void sendto_srom(u_int command, u_long addr)
2440 {
2441 outl(command, addr);
2442 dce_us_delay(1);
2443
2444 return;
2445 }
2446
2447 static int getfrom_srom(u_long addr)
2448 {
2449 s32 tmp;
2450
2451 tmp = inl(addr);
2452 dce_us_delay(1);
2453
2454 return tmp;
2455 }
2456
2457 static char *build_setup_frame(struct device *dev, int mode)
2458 {
2459 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2460 int i;
2461 char *pa = lp->setup_frame;
2462
2463
2464 if (mode == ALL) {
2465 memset(lp->setup_frame, 0, SETUP_FRAME_LEN);
2466 }
2467
2468 if (lp->setup_f == HASH_PERF) {
2469 for (pa=lp->setup_frame+IMPERF_PA_OFFSET, i=0; i<ETH_ALEN; i++) {
2470 *(pa + i) = dev->dev_addr[i];
2471 if (i & 0x01) pa += 2;
2472 }
2473 *(lp->setup_frame + (HASH_TABLE_LEN >> 3) - 3) = 0x80;
2474 } else {
2475 for (i=0; i<ETH_ALEN; i++) {
2476 *(pa + (i&1)) = dev->dev_addr[i];
2477 if (i & 0x01) pa += 4;
2478 }
2479 for (i=0; i<ETH_ALEN; i++) {
2480 *(pa + (i&1)) = (char) 0xff;
2481 if (i & 0x01) pa += 4;
2482 }
2483 }
2484
2485 return pa;
2486 }
2487
2488 static void enable_ast(struct device *dev, u32 time_out)
2489 {
2490 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2491 u_long iobase = dev->base_addr;
2492
2493 lp->irq_mask |= IMR_TMM;
2494 outl(lp->irq_mask, DE4X5_IMR);
2495 load_ms_timer(dev, time_out);
2496
2497 return;
2498 }
2499
2500 static void disable_ast(struct device *dev)
2501 {
2502 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2503 u_long iobase = dev->base_addr;
2504
2505 lp->irq_mask &= ~IMR_TMM;
2506 outl(lp->irq_mask, DE4X5_IMR);
2507 load_ms_timer(dev, 0);
2508
2509 return;
2510 }
2511
2512 static void kick_tx(struct device *dev)
2513 {
2514 struct sk_buff *skb;
2515
2516 if ((skb = alloc_skb(0, GFP_ATOMIC)) != NULL) {
2517 skb->len= FAKE_FRAME_LEN;
2518 skb->arp=1;
2519 skb->dev=dev;
2520 dev_queue_xmit(skb, dev, SOPRI_NORMAL);
2521 }
2522
2523 return;
2524 }
2525
2526
2527
2528
2529
2530 static int de4x5_ioctl(struct device *dev, struct ifreq *rq, int cmd)
2531 {
2532 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2533 struct de4x5_ioctl *ioc = (struct de4x5_ioctl *) &rq->ifr_data;
2534 u_long iobase = dev->base_addr;
2535 int i, j, status = 0;
2536 s32 omr;
2537 union {
2538 u8 addr[(HASH_TABLE_LEN * ETH_ALEN)];
2539 u16 sval[(HASH_TABLE_LEN * ETH_ALEN) >> 1];
2540 u32 lval[(HASH_TABLE_LEN * ETH_ALEN) >> 2];
2541 } tmp;
2542
2543 switch(ioc->cmd) {
2544 case DE4X5_GET_HWADDR:
2545 ioc->len = ETH_ALEN;
2546 status = verify_area(VERIFY_WRITE, (void *)ioc->data, ioc->len);
2547 if (status)
2548 break;
2549 for (i=0; i<ETH_ALEN; i++) {
2550 tmp.addr[i] = dev->dev_addr[i];
2551 }
2552 memcpy_tofs(ioc->data, tmp.addr, ioc->len);
2553
2554 break;
2555 case DE4X5_SET_HWADDR:
2556 status = verify_area(VERIFY_READ, (void *)ioc->data, ETH_ALEN);
2557 if (status)
2558 break;
2559 status = -EPERM;
2560 if (!suser())
2561 break;
2562 status = 0;
2563 memcpy_fromfs(tmp.addr, ioc->data, ETH_ALEN);
2564 for (i=0; i<ETH_ALEN; i++) {
2565 dev->dev_addr[i] = tmp.addr[i];
2566 }
2567 build_setup_frame(dev, PHYS_ADDR_ONLY);
2568
2569 while (set_bit(0, (void *)&dev->tbusy) != 0);
2570 if (lp->setup_f == HASH_PERF) {
2571 load_packet(dev, lp->setup_frame, TD_IC | HASH_F | TD_SET |
2572 SETUP_FRAME_LEN, NULL);
2573 } else {
2574 load_packet(dev, lp->setup_frame, TD_IC | PERFECT_F | TD_SET |
2575 SETUP_FRAME_LEN, NULL);
2576 }
2577 lp->tx_new = (++lp->tx_new) % lp->txRingSize;
2578 outl(POLL_DEMAND, DE4X5_TPD);
2579 dev->tbusy = 0;
2580
2581 break;
2582 case DE4X5_SET_PROM:
2583 if (suser()) {
2584 omr = inl(DE4X5_OMR);
2585 omr |= OMR_PR;
2586 outl(omr, DE4X5_OMR);
2587 } else {
2588 status = -EPERM;
2589 }
2590
2591 break;
2592 case DE4X5_CLR_PROM:
2593 if (suser()) {
2594 omr = inl(DE4X5_OMR);
2595 omr &= ~OMR_PR;
2596 outb(omr, DE4X5_OMR);
2597 } else {
2598 status = -EPERM;
2599 }
2600
2601 break;
2602 case DE4X5_SAY_BOO:
2603 printk("%s: Boo!\n", dev->name);
2604
2605 break;
2606 case DE4X5_GET_MCA:
2607 ioc->len = (HASH_TABLE_LEN >> 3);
2608 status = verify_area(VERIFY_WRITE, ioc->data, ioc->len);
2609 if (status)
2610 break;
2611 memcpy_tofs(ioc->data, lp->setup_frame, ioc->len);
2612
2613 break;
2614 case DE4X5_SET_MCA:
2615 if (suser()) {
2616 if (ioc->len != HASH_TABLE_LEN) {
2617 if (!(status = verify_area(VERIFY_READ, (void *)ioc->data, ETH_ALEN * ioc->len))) {
2618 memcpy_fromfs(tmp.addr, ioc->data, ETH_ALEN * ioc->len);
2619 set_multicast_list(dev, ioc->len, tmp.addr);
2620 }
2621 } else {
2622 set_multicast_list(dev, ioc->len, NULL);
2623 }
2624 } else {
2625 status = -EPERM;
2626 }
2627
2628 break;
2629 case DE4X5_CLR_MCA:
2630 if (suser()) {
2631 set_multicast_list(dev, 0, NULL);
2632 } else {
2633 status = -EPERM;
2634 }
2635
2636 break;
2637 case DE4X5_MCA_EN:
2638 if (suser()) {
2639 omr = inl(DE4X5_OMR);
2640 omr |= OMR_PM;
2641 outl(omr, DE4X5_OMR);
2642 } else {
2643 status = -EPERM;
2644 }
2645
2646 break;
2647 case DE4X5_GET_STATS:
2648 ioc->len = sizeof(lp->pktStats);
2649 status = verify_area(VERIFY_WRITE, (void *)ioc->data, ioc->len);
2650 if (status)
2651 break;
2652
2653 cli();
2654 memcpy_tofs(ioc->data, &lp->pktStats, ioc->len);
2655 sti();
2656
2657 break;
2658 case DE4X5_CLR_STATS:
2659 if (suser()) {
2660 cli();
2661 memset(&lp->pktStats, 0, sizeof(lp->pktStats));
2662 sti();
2663 } else {
2664 status = -EPERM;
2665 }
2666
2667 break;
2668 case DE4X5_GET_OMR:
2669 tmp.addr[0] = inl(DE4X5_OMR);
2670 if (!(status = verify_area(VERIFY_WRITE, (void *)ioc->data, 1))) {
2671 memcpy_tofs(ioc->data, tmp.addr, 1);
2672 }
2673
2674 break;
2675 case DE4X5_SET_OMR:
2676 if (suser()) {
2677 if (!(status = verify_area(VERIFY_READ, (void *)ioc->data, 1))) {
2678 memcpy_fromfs(tmp.addr, ioc->data, 1);
2679 outl(tmp.addr[0], DE4X5_OMR);
2680 }
2681 } else {
2682 status = -EPERM;
2683 }
2684
2685 break;
2686 case DE4X5_GET_REG:
2687 j = 0;
2688 tmp.lval[0] = inl(DE4X5_STS); j+=4;
2689 tmp.lval[1] = inl(DE4X5_BMR); j+=4;
2690 tmp.lval[2] = inl(DE4X5_IMR); j+=4;
2691 tmp.lval[3] = inl(DE4X5_OMR); j+=4;
2692 tmp.lval[4] = inl(DE4X5_SISR); j+=4;
2693 tmp.lval[5] = inl(DE4X5_SICR); j+=4;
2694 tmp.lval[6] = inl(DE4X5_STRR); j+=4;
2695 tmp.lval[7] = inl(DE4X5_SIGR); j+=4;
2696 ioc->len = j;
2697 if (!(status = verify_area(VERIFY_WRITE, (void *)ioc->data, ioc->len))) {
2698 memcpy_tofs(ioc->data, tmp.addr, ioc->len);
2699 }
2700 break;
2701
2702 #define DE4X5_DUMP 0x0f
2703
2704 case DE4X5_DUMP:
2705 j = 0;
2706 tmp.addr[j++] = dev->irq;
2707 for (i=0; i<ETH_ALEN; i++) {
2708 tmp.addr[j++] = dev->dev_addr[i];
2709 }
2710 tmp.addr[j++] = lp->rxRingSize;
2711 tmp.lval[j>>2] = (long)lp->rx_ring; j+=4;
2712 tmp.lval[j>>2] = (long)lp->tx_ring; j+=4;
2713
2714 for (i=0;i<lp->rxRingSize-1;i++){
2715 if (i < 3) {
2716 tmp.lval[j>>2] = (long)&lp->rx_ring[i].status; j+=4;
2717 }
2718 }
2719 tmp.lval[j>>2] = (long)&lp->rx_ring[i].status; j+=4;
2720 for (i=0;i<lp->txRingSize-1;i++){
2721 if (i < 3) {
2722 tmp.lval[j>>2] = (long)&lp->tx_ring[i].status; j+=4;
2723 }
2724 }
2725 tmp.lval[j>>2] = (long)&lp->tx_ring[i].status; j+=4;
2726
2727 for (i=0;i<lp->rxRingSize-1;i++){
2728 if (i < 3) {
2729 tmp.lval[j>>2] = (s32)lp->rx_ring[i].buf; j+=4;
2730 }
2731 }
2732 tmp.lval[j>>2] = (s32)lp->rx_ring[i].buf; j+=4;
2733 for (i=0;i<lp->txRingSize-1;i++){
2734 if (i < 3) {
2735 tmp.lval[j>>2] = (s32)lp->tx_ring[i].buf; j+=4;
2736 }
2737 }
2738 tmp.lval[j>>2] = (s32)lp->tx_ring[i].buf; j+=4;
2739
2740 for (i=0;i<lp->rxRingSize;i++){
2741 tmp.lval[j>>2] = lp->rx_ring[i].status; j+=4;
2742 }
2743 for (i=0;i<lp->txRingSize;i++){
2744 tmp.lval[j>>2] = lp->tx_ring[i].status; j+=4;
2745 }
2746
2747 tmp.lval[j>>2] = inl(DE4X5_STS); j+=4;
2748 tmp.lval[j>>2] = inl(DE4X5_BMR); j+=4;
2749 tmp.lval[j>>2] = inl(DE4X5_IMR); j+=4;
2750 tmp.lval[j>>2] = inl(DE4X5_OMR); j+=4;
2751 tmp.lval[j>>2] = inl(DE4X5_SISR); j+=4;
2752 tmp.lval[j>>2] = inl(DE4X5_SICR); j+=4;
2753 tmp.lval[j>>2] = inl(DE4X5_STRR); j+=4;
2754 tmp.lval[j>>2] = inl(DE4X5_SIGR); j+=4;
2755
2756 tmp.addr[j++] = lp->txRingSize;
2757 tmp.addr[j++] = dev->tbusy;
2758
2759 ioc->len = j;
2760 if (!(status = verify_area(VERIFY_WRITE, (void *)ioc->data, ioc->len))) {
2761 memcpy_tofs(ioc->data, tmp.addr, ioc->len);
2762 }
2763
2764 break;
2765 default:
2766 status = -EOPNOTSUPP;
2767 }
2768
2769 return status;
2770 }
2771
2772 #ifdef MODULE
2773 static char devicename[9] = { 0, };
2774 static struct device thisDE4X5 = {
2775 devicename,
2776 0, 0, 0, 0,
2777 0x2000, 10,
2778 0, 0, 0, NULL, de4x5_probe };
2779
2780 static int io=0x000b;
2781 static int irq=10;
2782
2783 int
2784 init_module(void)
2785 {
2786 thisDE4X5.base_addr=io;
2787 thisDE4X5.irq=irq;
2788 if (register_netdev(&thisDE4X5) != 0)
2789 return -EIO;
2790 return 0;
2791 }
2792
2793 void
2794 cleanup_module(void)
2795 {
2796 struct de4x5_private *lp = (struct de4x5_private *) thisDE4X5.priv;
2797
2798 if (lp) {
2799 kfree_s(bus_to_virt(lp->rx_ring[0].buf), RX_BUFF_SZ * NUM_RX_DESC + ALIGN);
2800 }
2801 kfree_s(thisDE4X5.priv, sizeof(struct de4x5_private) + ALIGN);
2802 thisDE4X5.priv = NULL;
2803
2804 release_region(thisDE4X5.base_addr, (lp->bus == PCI ?
2805 DE4X5_PCI_TOTAL_SIZE :
2806 DE4X5_EISA_TOTAL_SIZE));
2807 unregister_netdev(&thisDE4X5);
2808 }
2809 #endif
2810
2811
2812
2813
2814
2815
2816
2817
2818
2819
2820