This source file includes following definitions.
- de4x5_probe
- de4x5_hw_init
- de4x5_open
- de4x5_init
- de4x5_queue_pkt
- de4x5_interrupt
- de4x5_rx
- de4x5_tx
- de4x5_ast
- de4x5_close
- de4x5_get_stats
- load_packet
- set_multicast_list
- SetMulticastFilter
- eisa_probe
- pci_probe
- alloc_device
- autoconf_media
- dc21040_autoconf
- dc21041_autoconf
- dc21140_autoconf
- test_media
- ping_media
- test_ans
- reset_init_sia
- load_ms_timer
- create_packet
- dce_us_delay
- dce_ms_delay
- EISA_signature
- DevicePresent
- get_hw_addr
- srom_rd
- srom_latch
- srom_command
- srom_address
- srom_data
- sendto_srom
- getfrom_srom
- build_setup_frame
- enable_ast
- disable_ast
- kick_tx
- de4x5_ioctl
- init_module
- cleanup_module
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144 static const char *version = "de4x5.c:v0.32 6/26/95 davies@wanton.lkg.dec.com\n";
145
146 #include <linux/module.h>
147
148 #include <linux/kernel.h>
149 #include <linux/sched.h>
150 #include <linux/string.h>
151 #include <linux/interrupt.h>
152 #include <linux/ptrace.h>
153 #include <linux/errno.h>
154 #include <linux/ioport.h>
155 #include <linux/malloc.h>
156 #include <linux/bios32.h>
157 #include <linux/pci.h>
158 #include <linux/delay.h>
159 #include <asm/bitops.h>
160 #include <asm/io.h>
161 #include <asm/dma.h>
162 #include <asm/segment.h>
163
164 #include <linux/netdevice.h>
165 #include <linux/etherdevice.h>
166 #include <linux/skbuff.h>
167
168 #include <linux/time.h>
169 #include <linux/types.h>
170 #include <linux/unistd.h>
171
172 #include "de4x5.h"
173
174 #ifdef DE4X5_DEBUG
175 static int de4x5_debug = DE4X5_DEBUG;
176 #else
177 static int de4x5_debug = 1;
178 #endif
179
180 #ifdef DE4X5_AUTOSENSE
181 static int de4x5_autosense = DE4X5_AUTOSENSE;
182 #else
183 static int de4x5_autosense = AUTO;
184 #endif
185
186 #ifdef DE4X5_FULL_DUPLEX
187 static s32 de4x5_full_duplex = 1;
188 #else
189 static s32 de4x5_full_duplex = 0;
190 #endif
191
192 #define DE4X5_NDA 0xffe0
193
194
195
196
197 #define PROBE_LENGTH 32
198 #define ETH_PROM_SIG 0xAA5500FFUL
199
200
201
202
203 #define PKT_BUF_SZ 1536
204 #define MAX_PKT_SZ 1514
205 #define MAX_DAT_SZ 1500
206 #define MIN_DAT_SZ 1
207 #define PKT_HDR_LEN 14
208 #define FAKE_FRAME_LEN (MAX_PKT_SZ + 1)
209 #define QUEUE_PKT_TIMEOUT (3*HZ)
210
211
212 #define CRC_POLYNOMIAL_BE 0x04c11db7UL
213 #define CRC_POLYNOMIAL_LE 0xedb88320UL
214
215
216
217
218 #define DE4X5_EISA_IO_PORTS 0x0c00
219 #define DE4X5_EISA_TOTAL_SIZE 0xfff
220
221 #define MAX_EISA_SLOTS 16
222 #define EISA_SLOT_INC 0x1000
223
224 #define DE4X5_SIGNATURE {"DE425",""}
225 #define DE4X5_NAME_LENGTH 8
226
227
228
229
230 #define PCI_MAX_BUS_NUM 8
231 #define DE4X5_PCI_TOTAL_SIZE 0x80
232 #define DE4X5_CLASS_CODE 0x00020000
233
234
235
236
237
238
239
240 #define ALIGN4 ((u_long)4 - 1)
241 #define ALIGN8 ((u_long)8 - 1)
242 #define ALIGN16 ((u_long)16 - 1)
243 #define ALIGN32 ((u_long)32 - 1)
244 #define ALIGN64 ((u_long)64 - 1)
245 #define ALIGN128 ((u_long)128 - 1)
246
247 #define ALIGN ALIGN32
248 #define CACHE_ALIGN CAL_16LONG
249 #define DESC_SKIP_LEN DSL_0
250
251 #define DESC_ALIGN
252
253 #ifndef IS_NOT_DEC
254 static int is_not_dec = 0;
255 #else
256 static int is_not_dec = 1;
257 #endif
258
259
260
261
262 #define ENABLE_IRQs { \
263 imr |= lp->irq_en;\
264 outl(imr, DE4X5_IMR); \
265 }
266
267 #define DISABLE_IRQs {\
268 imr = inl(DE4X5_IMR);\
269 imr &= ~lp->irq_en;\
270 outl(imr, DE4X5_IMR); \
271 }
272
273 #define UNMASK_IRQs {\
274 imr |= lp->irq_mask;\
275 outl(imr, DE4X5_IMR); \
276 }
277
278 #define MASK_IRQs {\
279 imr = inl(DE4X5_IMR);\
280 imr &= ~lp->irq_mask;\
281 outl(imr, DE4X5_IMR); \
282 }
283
284
285
286
287 #define START_DE4X5 {\
288 omr = inl(DE4X5_OMR);\
289 omr |= OMR_ST | OMR_SR;\
290 outl(omr, DE4X5_OMR); \
291 }
292
293 #define STOP_DE4X5 {\
294 omr = inl(DE4X5_OMR);\
295 omr &= ~(OMR_ST|OMR_SR);\
296 outl(omr, DE4X5_OMR); \
297 }
298
299
300
301
302 #define RESET_SIA outl(0, DE4X5_SICR);
303
304
305
306
307 #define DE4X5_AUTOSENSE_MS 250
308
309
310
311
312 struct de4x5_srom {
313 char reserved[18];
314 char version;
315 char num_adapters;
316 char ieee_addr[6];
317 char info[100];
318 short chksum;
319 };
320
321
322
323
324
325
326
327
328
329 #define NUM_RX_DESC 8
330 #define NUM_TX_DESC 32
331 #define BUFF_ALLOC_RETRIES 10
332 #define RX_BUFF_SZ 1536
333
334 struct de4x5_desc {
335 volatile s32 status;
336 u32 des1;
337 u32 buf;
338 u32 next;
339 DESC_ALIGN
340 };
341
342
343
344
345 #define DE4X5_PKT_STAT_SZ 16
346 #define DE4X5_PKT_BIN_SZ 128
347
348
349 struct de4x5_private {
350 char adapter_name[80];
351 struct de4x5_desc rx_ring[NUM_RX_DESC];
352 struct de4x5_desc tx_ring[NUM_TX_DESC];
353 struct sk_buff *skb[NUM_TX_DESC];
354 int rx_new, rx_old;
355 int tx_new, tx_old;
356 char setup_frame[SETUP_FRAME_LEN];
357 struct enet_statistics stats;
358 struct {
359 u_int bins[DE4X5_PKT_STAT_SZ];
360 u_int unicast;
361 u_int multicast;
362 u_int broadcast;
363 u_int excessive_collisions;
364 u_int tx_underruns;
365 u_int excessive_underruns;
366 } pktStats;
367 char rxRingSize;
368 char txRingSize;
369 int bus;
370 int bus_num;
371 int chipset;
372 s32 irq_mask;
373 s32 irq_en;
374 int media;
375 int linkProb;
376 int autosense;
377 int tx_enable;
378 int lostMedia;
379 int setup_f;
380 };
381
382
383
384
385
386
387
388
389
390 #define TX_BUFFS_AVAIL ((lp->tx_old<=lp->tx_new)?\
391 lp->tx_old+lp->txRingSize-lp->tx_new-1:\
392 lp->tx_old -lp->tx_new-1)
393
394
395
396
397 static int de4x5_open(struct device *dev);
398 static int de4x5_queue_pkt(struct sk_buff *skb, struct device *dev);
399 static void de4x5_interrupt(int irq, struct pt_regs *regs);
400 static int de4x5_close(struct device *dev);
401 static struct enet_statistics *de4x5_get_stats(struct device *dev);
402 static void set_multicast_list(struct device *dev, int num_addrs, void *addrs);
403 static int de4x5_ioctl(struct device *dev, struct ifreq *rq, int cmd);
404
405
406
407
408 static int de4x5_hw_init(struct device *dev, u_long iobase);
409 static int de4x5_init(struct device *dev);
410 static int de4x5_rx(struct device *dev);
411 static int de4x5_tx(struct device *dev);
412 static int de4x5_ast(struct device *dev);
413
414 static int autoconf_media(struct device *dev);
415 static void create_packet(struct device *dev, char *frame, int len);
416 static void dce_us_delay(u32 usec);
417 static void dce_ms_delay(u32 msec);
418 static void load_packet(struct device *dev, char *buf, u32 flags, struct sk_buff *skb);
419 static void dc21040_autoconf(struct device *dev);
420 static void dc21041_autoconf(struct device *dev);
421 static void dc21140_autoconf(struct device *dev);
422 static int test_media(struct device *dev, s32 irqs, s32 irq_mask, s32 csr13, s32 csr14, s32 csr15, s32 msec);
423
424 static int ping_media(struct device *dev);
425 static void reset_init_sia(struct device *dev, s32 sicr, s32 strr, s32 sigr);
426 static int test_ans(struct device *dev, s32 irqs, s32 irq_mask, s32 msec);
427 static void load_ms_timer(struct device *dev, u32 msec);
428 static int EISA_signature(char *name, s32 eisa_id);
429 static int DevicePresent(u_long iobase);
430 static short srom_rd(u_long address, u_char offset);
431 static void srom_latch(u_int command, u_long address);
432 static void srom_command(u_int command, u_long address);
433 static void srom_address(u_int command, u_long address, u_char offset);
434 static short srom_data(u_int command, u_long address);
435
436 static void sendto_srom(u_int command, u_long addr);
437 static int getfrom_srom(u_long addr);
438 static void SetMulticastFilter(struct device *dev, int num_addrs, char *addrs);
439 static int get_hw_addr(struct device *dev);
440
441 static void eisa_probe(struct device *dev, u_long iobase);
442 static void pci_probe(struct device *dev, u_long iobase);
443 static struct device *alloc_device(struct device *dev, u_long iobase);
444 static char *build_setup_frame(struct device *dev, int mode);
445 static void disable_ast(struct device *dev);
446 static void enable_ast(struct device *dev, u32 time_out);
447 static void kick_tx(struct device *dev);
448
449 #ifdef MODULE
450 int init_module(void);
451 void cleanup_module(void);
452 static int autoprobed = 1, loading_module = 1;
453 # else
454 static unsigned char de4x5_irq[] = {5,9,10,11};
455 static int autoprobed = 0, loading_module = 0;
456 #endif
457
458 static char name[DE4X5_NAME_LENGTH + 1];
459 static int num_de4x5s = 0, num_eth = 0;
460
461
462
463
464
465
466 static struct bus_type {
467 int bus;
468 int bus_num;
469 int device;
470 int chipset;
471 struct de4x5_srom srom;
472 int autosense;
473 } bus;
474
475
476
477
478 #define RESET_DE4X5 {\
479 int i;\
480 i=inl(DE4X5_BMR);\
481 dce_ms_delay(1);\
482 outl(i | BMR_SWR, DE4X5_BMR);\
483 dce_ms_delay(1);\
484 outl(i, DE4X5_BMR);\
485 dce_ms_delay(1);\
486 for (i=0;i<5;i++) {inl(DE4X5_BMR); dce_ms_delay(1);}\
487 dce_ms_delay(1);\
488 }
489
490
491
492 int de4x5_probe(struct device *dev)
493 {
494 int tmp = num_de4x5s, status = -ENODEV;
495 u_long iobase = dev->base_addr;
496
497 if ((iobase == 0) && loading_module){
498 printk("Autoprobing is not supported when loading a module based driver.\n");
499 status = -EIO;
500 } else {
501 eisa_probe(dev, iobase);
502 pci_probe(dev, iobase);
503
504 if ((tmp == num_de4x5s) && (iobase != 0) && loading_module) {
505 printk("%s: de4x5_probe() cannot find device at 0x%04lx.\n", dev->name,
506 iobase);
507 }
508
509
510
511
512
513 for (; (dev->priv == NULL) && (dev->next != NULL); dev = dev->next);
514
515 if (dev->priv) status = 0;
516 if (iobase == 0) autoprobed = 1;
517 }
518
519 return status;
520 }
521
522 static int
523 de4x5_hw_init(struct device *dev, u_long iobase)
524 {
525 struct bus_type *lp = &bus;
526 int tmpbus, tmpchs, i, j, status=0;
527 char *tmp;
528
529
530 if (lp->chipset == DC21041) {
531 outl(0, PCI_CFDA);
532 dce_ms_delay(10);
533 }
534
535 RESET_DE4X5;
536
537 if ((inl(DE4X5_STS) & (STS_TS | STS_RS)) == 0) {
538
539
540
541 if (lp->bus == PCI) {
542 if (!is_not_dec) {
543 if ((lp->chipset == DC21040) || (lp->chipset == DC21041)) {
544 strcpy(name, "DE435");
545 } else if (lp->chipset == DC21140) {
546 strcpy(name, "DE500");
547 }
548 } else {
549 strcpy(name, "UNKNOWN");
550 }
551 } else {
552 EISA_signature(name, EISA_ID0);
553 }
554
555 if (*name != '\0') {
556 dev->base_addr = iobase;
557 if (lp->bus == EISA) {
558 printk("%s: %s at %04lx (EISA slot %ld)",
559 dev->name, name, iobase, ((iobase>>12)&0x0f));
560 } else {
561 printk("%s: %s at %04lx (PCI bus %d, device %d)", dev->name, name,
562 iobase, lp->bus_num, lp->device);
563 }
564
565 printk(", h/w address ");
566 status = get_hw_addr(dev);
567 for (i = 0; i < ETH_ALEN - 1; i++) {
568 printk("%2.2x:", dev->dev_addr[i]);
569 }
570 printk("%2.2x,\n", dev->dev_addr[i]);
571
572 tmpbus = lp->bus;
573 tmpchs = lp->chipset;
574
575 if (status == 0) {
576 struct de4x5_private *lp;
577
578
579
580
581
582 dev->priv = (void *) kmalloc(sizeof(struct de4x5_private) + ALIGN,
583 GFP_KERNEL);
584 if (dev->priv == NULL)
585 return -ENOMEM;
586
587
588
589 dev->priv = (void *)(((u_long)dev->priv + ALIGN) & ~ALIGN);
590 lp = (struct de4x5_private *)dev->priv;
591 memset(dev->priv, 0, sizeof(struct de4x5_private));
592 lp->bus = tmpbus;
593 lp->chipset = tmpchs;
594
595
596
597
598 if (de4x5_autosense & AUTO) {
599 lp->autosense = AUTO;
600 } else {
601 if (lp->chipset != DC21140) {
602 if ((lp->chipset == DC21040) && (de4x5_autosense & TP_NW)) {
603 de4x5_autosense = TP;
604 }
605 if ((lp->chipset == DC21041) && (de4x5_autosense & BNC_AUI)) {
606 de4x5_autosense = BNC;
607 }
608 lp->autosense = de4x5_autosense & 0x001f;
609 } else {
610 lp->autosense = de4x5_autosense & 0x00c0;
611 }
612 }
613
614 sprintf(lp->adapter_name,"%s (%s)", name, dev->name);
615 request_region(iobase, (lp->bus == PCI ? DE4X5_PCI_TOTAL_SIZE :
616 DE4X5_EISA_TOTAL_SIZE),
617 lp->adapter_name);
618
619
620
621
622
623
624 for (tmp=NULL, j=0; (j<BUFF_ALLOC_RETRIES) && (tmp==NULL); j++) {
625 if ((tmp = (void *)kmalloc(RX_BUFF_SZ * NUM_RX_DESC + ALIGN,
626 GFP_KERNEL)) != NULL) {
627 tmp = (char *)(((u_long) tmp + ALIGN) & ~ALIGN);
628 for (i=0; i<NUM_RX_DESC; i++) {
629 lp->rx_ring[i].status = 0;
630 lp->rx_ring[i].des1 = RX_BUFF_SZ;
631 lp->rx_ring[i].buf = virt_to_bus(tmp + i * RX_BUFF_SZ);
632 lp->rx_ring[i].next = (u32)NULL;
633 }
634 barrier();
635 }
636 }
637
638 if (tmp != NULL) {
639 lp->rxRingSize = NUM_RX_DESC;
640 lp->txRingSize = NUM_TX_DESC;
641
642
643 lp->rx_ring[lp->rxRingSize - 1].des1 |= RD_RER;
644 lp->tx_ring[lp->txRingSize - 1].des1 |= TD_TER;
645
646
647 outl(virt_to_bus(lp->rx_ring), DE4X5_RRBA);
648 outl(virt_to_bus(lp->tx_ring), DE4X5_TRBA);
649
650
651 lp->irq_mask = IMR_RIM | IMR_TIM | IMR_TUM ;
652 lp->irq_en = IMR_NIM | IMR_AIM;
653
654 lp->tx_enable = TRUE;
655
656 if (dev->irq < 2) {
657 #ifndef MODULE
658 unsigned char irqnum;
659 s32 omr;
660 autoirq_setup(0);
661
662 omr = inl(DE4X5_OMR);
663 outl(IMR_AIM|IMR_RUM, DE4X5_IMR);
664 outl(OMR_SR | omr, DE4X5_OMR);
665
666 irqnum = autoirq_report(1);
667 if (!irqnum) {
668 printk(" and failed to detect IRQ line.\n");
669 status = -ENXIO;
670 } else {
671 for (dev->irq=0,i=0; (i<sizeof(de4x5_irq)) && (!dev->irq); i++) {
672 if (irqnum == de4x5_irq[i]) {
673 dev->irq = irqnum;
674 printk(" and uses IRQ%d.\n", dev->irq);
675 }
676 }
677
678 if (!dev->irq) {
679 printk(" but incorrect IRQ line detected.\n");
680 status = -ENXIO;
681 }
682 }
683
684 outl(0, DE4X5_IMR);
685
686 #endif
687 } else {
688 printk(" and requires IRQ%d (not probed).\n", dev->irq);
689 }
690 } else {
691 printk("%s: Kernel could not allocate RX buffer memory.\n",
692 dev->name);
693 status = -ENXIO;
694 }
695 if (status) release_region(iobase, (lp->bus == PCI ?
696 DE4X5_PCI_TOTAL_SIZE :
697 DE4X5_EISA_TOTAL_SIZE));
698 } else {
699 printk(" which has an Ethernet PROM CRC error.\n");
700 status = -ENXIO;
701 }
702 } else {
703 status = -ENXIO;
704 }
705 } else {
706 status = -ENXIO;
707 }
708
709 if (!status) {
710 if (de4x5_debug > 0) {
711 printk(version);
712 }
713
714
715 dev->open = &de4x5_open;
716 dev->hard_start_xmit = &de4x5_queue_pkt;
717 dev->stop = &de4x5_close;
718 dev->get_stats = &de4x5_get_stats;
719 #ifdef HAVE_MULTICAST
720 dev->set_multicast_list = &set_multicast_list;
721 #endif
722 dev->do_ioctl = &de4x5_ioctl;
723
724 dev->mem_start = 0;
725
726
727 ether_setup(dev);
728
729
730 if (lp->chipset == DC21041) {
731 outl(0, DE4X5_SICR);
732 outl(CFDA_PSM, PCI_CFDA);
733 }
734 } else {
735 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
736 if (lp) {
737 kfree_s(bus_to_virt(lp->rx_ring[0].buf),
738 RX_BUFF_SZ * NUM_RX_DESC + ALIGN);
739 }
740 if (dev->priv) {
741 kfree_s(dev->priv, sizeof(struct de4x5_private) + ALIGN);
742 dev->priv = NULL;
743 }
744 }
745
746 return status;
747 }
748
749
750 static int
751 de4x5_open(struct device *dev)
752 {
753 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
754 u_long iobase = dev->base_addr;
755 int i, status = 0;
756 s32 imr, omr, sts;
757
758
759
760
761 if (lp->chipset == DC21041) {
762 outl(0, PCI_CFDA);
763 dce_ms_delay(10);
764 }
765
766 if (request_irq(dev->irq, (void *)de4x5_interrupt, 0, lp->adapter_name)) {
767 printk("de4x5_open(): Requested IRQ%d is busy\n",dev->irq);
768 status = -EAGAIN;
769 } else {
770
771 irq2dev_map[dev->irq] = dev;
772
773
774
775 status = de4x5_init(dev);
776
777 if (de4x5_debug > 1){
778 printk("%s: de4x5 open with irq %d\n",dev->name,dev->irq);
779 printk("\tphysical address: ");
780 for (i=0;i<6;i++){
781 printk("%2.2x:",(short)dev->dev_addr[i]);
782 }
783 printk("\n");
784 printk("Descriptor head addresses:\n");
785 printk("\t0x%8.8lx 0x%8.8lx\n",(u_long)lp->rx_ring,(u_long)lp->tx_ring);
786 printk("Descriptor addresses:\nRX: ");
787 for (i=0;i<lp->rxRingSize-1;i++){
788 if (i < 3) {
789 printk("0x%8.8lx ",(u_long)&lp->rx_ring[i].status);
790 }
791 }
792 printk("...0x%8.8lx\n",(u_long)&lp->rx_ring[i].status);
793 printk("TX: ");
794 for (i=0;i<lp->txRingSize-1;i++){
795 if (i < 3) {
796 printk("0x%8.8lx ", (u_long)&lp->tx_ring[i].status);
797 }
798 }
799 printk("...0x%8.8lx\n", (u_long)&lp->tx_ring[i].status);
800 printk("Descriptor buffers:\nRX: ");
801 for (i=0;i<lp->rxRingSize-1;i++){
802 if (i < 3) {
803 printk("0x%8.8x ",lp->rx_ring[i].buf);
804 }
805 }
806 printk("...0x%8.8x\n",lp->rx_ring[i].buf);
807 printk("TX: ");
808 for (i=0;i<lp->txRingSize-1;i++){
809 if (i < 3) {
810 printk("0x%8.8x ", lp->tx_ring[i].buf);
811 }
812 }
813 printk("...0x%8.8x\n", lp->tx_ring[i].buf);
814 printk("Ring size: \nRX: %d\nTX: %d\n",
815 (short)lp->rxRingSize,
816 (short)lp->txRingSize);
817 printk("\tstatus: %d\n", status);
818 }
819
820 if (!status) {
821 dev->tbusy = 0;
822 dev->start = 1;
823 dev->interrupt = UNMASK_INTERRUPTS;
824 dev->trans_start = jiffies;
825
826 START_DE4X5;
827
828
829 imr = 0;
830 UNMASK_IRQs;
831
832
833 sts = inl(DE4X5_STS);
834 outl(sts, DE4X5_STS);
835
836 ENABLE_IRQs;
837 }
838 if (de4x5_debug > 1) {
839 printk("\tsts: 0x%08x\n", inl(DE4X5_STS));
840 printk("\tbmr: 0x%08x\n", inl(DE4X5_BMR));
841 printk("\timr: 0x%08x\n", inl(DE4X5_IMR));
842 printk("\tomr: 0x%08x\n", inl(DE4X5_OMR));
843 printk("\tsisr: 0x%08x\n", inl(DE4X5_SISR));
844 printk("\tsicr: 0x%08x\n", inl(DE4X5_SICR));
845 printk("\tstrr: 0x%08x\n", inl(DE4X5_STRR));
846 printk("\tsigr: 0x%08x\n", inl(DE4X5_SIGR));
847 }
848 }
849
850 MOD_INC_USE_COUNT;
851
852 return status;
853 }
854
855
856
857
858
859
860
861
862
863 static int
864 de4x5_init(struct device *dev)
865 {
866 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
867 u_long iobase = dev->base_addr;
868 int i, j, status = 0;
869 s32 bmr, omr;
870
871
872 set_bit(0, (void *)&dev->tbusy);
873
874 RESET_DE4X5;
875
876 bmr = inl(DE4X5_BMR);
877 bmr |= PBL_8 | DESC_SKIP_LEN | CACHE_ALIGN;
878 outl(bmr, DE4X5_BMR);
879
880 if (lp->chipset != DC21140) {
881 omr = TR_96;
882 lp->setup_f = HASH_PERF;
883 } else {
884 omr = OMR_SDP | OMR_SF;
885 lp->setup_f = PERFECT;
886 }
887 outl(virt_to_bus(lp->rx_ring), DE4X5_RRBA);
888 outl(virt_to_bus(lp->tx_ring), DE4X5_TRBA);
889
890 lp->rx_new = lp->rx_old = 0;
891 lp->tx_new = lp->tx_old = 0;
892
893 for (i = 0; i < lp->rxRingSize; i++) {
894 lp->rx_ring[i].status = R_OWN;
895 }
896
897 for (i = 0; i < lp->txRingSize; i++) {
898 lp->tx_ring[i].status = 0;
899 }
900
901 barrier();
902
903
904 SetMulticastFilter(dev, 0, NULL);
905
906 if (lp->chipset != DC21140) {
907 load_packet(dev, lp->setup_frame, HASH_F|TD_SET|SETUP_FRAME_LEN, NULL);
908 } else {
909 load_packet(dev, lp->setup_frame, PERFECT_F|TD_SET|SETUP_FRAME_LEN, NULL);
910 }
911 outl(omr|OMR_ST, DE4X5_OMR);
912
913
914 for (j=0, i=jiffies;(i<=jiffies+HZ/100) && (j==0);) {
915 if (lp->tx_ring[lp->tx_new].status >= 0) j=1;
916 }
917 outl(omr, DE4X5_OMR);
918
919 if (j == 0) {
920 printk("%s: Setup frame timed out, status %08x\n", dev->name,
921 inl(DE4X5_STS));
922 status = -EIO;
923 }
924
925 lp->tx_new = (++lp->tx_new) % lp->txRingSize;
926 lp->tx_old = lp->tx_new;
927
928
929 if (autoconf_media(dev) == 0) {
930 status = -EIO;
931 }
932
933 return 0;
934 }
935
936
937
938
939 static int
940 de4x5_queue_pkt(struct sk_buff *skb, struct device *dev)
941 {
942 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
943 u_long iobase = dev->base_addr;
944 int i, status = 0;
945 s32 imr, omr, sts;
946
947
948
949
950
951
952 if (set_bit(0, (void*)&dev->tbusy) == 0) {
953 cli();
954 de4x5_tx(dev);
955 dev->tbusy = 0;
956 sti();
957 }
958
959
960
961
962
963
964 if (dev->tbusy || (lp->lostMedia > LOST_MEDIA_THRESHOLD)) {
965 u_long tickssofar = jiffies - dev->trans_start;
966 if ((tickssofar < QUEUE_PKT_TIMEOUT) &&
967 (lp->lostMedia <= LOST_MEDIA_THRESHOLD)) {
968 status = -1;
969 } else {
970 if (de4x5_debug >= 1) {
971 printk("%s: transmit timed out, status %08x, tbusy:%ld, lostMedia:%d tickssofar:%ld, resetting.\n",dev->name, inl(DE4X5_STS), dev->tbusy, lp->lostMedia, tickssofar);
972 }
973
974
975 STOP_DE4X5;
976
977
978 for (i=lp->tx_old; i!=lp->tx_new; i=(++i)%lp->txRingSize) {
979 if (lp->skb[i] != NULL) {
980 if (lp->skb[i]->len != FAKE_FRAME_LEN) {
981 if (lp->tx_ring[i].status == T_OWN) {
982 dev_queue_xmit(lp->skb[i], dev, SOPRI_NORMAL);
983 } else {
984 dev_kfree_skb(lp->skb[i], FREE_WRITE);
985 }
986 } else {
987 dev_kfree_skb(lp->skb[i], FREE_WRITE);
988 }
989 lp->skb[i] = NULL;
990 }
991 }
992 if (skb->len != FAKE_FRAME_LEN) {
993 dev_queue_xmit(skb, dev, SOPRI_NORMAL);
994 } else {
995 dev_kfree_skb(skb, FREE_WRITE);
996 }
997
998
999 status = de4x5_init(dev);
1000
1001
1002 if (!status) {
1003
1004 dev->interrupt = UNMASK_INTERRUPTS;
1005 dev->start = 1;
1006 dev->tbusy = 0;
1007 dev->trans_start = jiffies;
1008
1009 START_DE4X5;
1010
1011
1012 imr = 0;
1013 UNMASK_IRQs;
1014
1015
1016 sts = inl(DE4X5_STS);
1017 outl(sts, DE4X5_STS);
1018
1019 ENABLE_IRQs;
1020 } else {
1021 printk("%s: hardware initialisation failure, status %08x.\n",
1022 dev->name, inl(DE4X5_STS));
1023 }
1024 }
1025 } else if (skb == NULL) {
1026 dev_tint(dev);
1027 } else if (skb->len == FAKE_FRAME_LEN) {
1028 dev_kfree_skb(skb, FREE_WRITE);
1029 } else if (skb->len > 0) {
1030
1031 if (set_bit(0, (void*)&dev->tbusy) != 0) {
1032 printk("%s: Transmitter access conflict.\n", dev->name);
1033 status = -1;
1034 } else {
1035 cli();
1036 if (TX_BUFFS_AVAIL) {
1037 load_packet(dev, skb->data, TD_IC | TD_LS | TD_FS | skb->len, skb);
1038 if (lp->tx_enable) {
1039 outl(POLL_DEMAND, DE4X5_TPD);
1040 }
1041
1042 lp->tx_new = (++lp->tx_new) % lp->txRingSize;
1043 dev->trans_start = jiffies;
1044
1045 if (TX_BUFFS_AVAIL) {
1046 dev->tbusy = 0;
1047 }
1048 } else {
1049 status = -1;
1050 }
1051 sti();
1052 }
1053 }
1054
1055 return status;
1056 }
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069 static void
1070 de4x5_interrupt(int irq, struct pt_regs *regs)
1071 {
1072 struct device *dev = (struct device *)(irq2dev_map[irq]);
1073 struct de4x5_private *lp;
1074 s32 imr, omr, sts;
1075 u_long iobase;
1076
1077 if (dev == NULL) {
1078 printk ("de4x5_interrupt(): irq %d for unknown device.\n", irq);
1079 } else {
1080 lp = (struct de4x5_private *)dev->priv;
1081 iobase = dev->base_addr;
1082
1083 if (dev->interrupt)
1084 printk("%s: Re-entering the interrupt handler.\n", dev->name);
1085
1086 DISABLE_IRQs;
1087 dev->interrupt = MASK_INTERRUPTS;
1088
1089 while ((sts = inl(DE4X5_STS)) & lp->irq_mask) {
1090 outl(sts, DE4X5_STS);
1091
1092 if (sts & (STS_RI | STS_RU))
1093 de4x5_rx(dev);
1094
1095 if (sts & (STS_TI | STS_TU))
1096 de4x5_tx(dev);
1097
1098 if (sts & STS_TM)
1099 de4x5_ast(dev);
1100
1101 if (sts & STS_LNF) {
1102 lp->lostMedia = LOST_MEDIA_THRESHOLD + 1;
1103 lp->irq_mask &= ~IMR_LFM;
1104 kick_tx(dev);
1105 }
1106
1107 if (sts & STS_SE) {
1108 STOP_DE4X5;
1109 printk("%s: Fatal bus error occured, sts=%#8x, device stopped.\n",
1110 dev->name, sts);
1111 }
1112 }
1113
1114 if (TX_BUFFS_AVAIL && dev->tbusy) {
1115 dev->tbusy = 0;
1116 mark_bh(NET_BH);
1117 }
1118
1119 dev->interrupt = UNMASK_INTERRUPTS;
1120 ENABLE_IRQs;
1121 }
1122
1123 return;
1124 }
1125
1126 static int
1127 de4x5_rx(struct device *dev)
1128 {
1129 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1130 int i, entry;
1131 s32 status;
1132 char *buf;
1133
1134 for (entry = lp->rx_new; lp->rx_ring[entry].status >= 0;entry = lp->rx_new) {
1135 status = lp->rx_ring[entry].status;
1136
1137 if (status & RD_FS) {
1138 lp->rx_old = entry;
1139 }
1140
1141 if (status & RD_LS) {
1142 if (status & RD_ES) {
1143 lp->stats.rx_errors++;
1144 if (status & (RD_RF | RD_TL)) lp->stats.rx_frame_errors++;
1145 if (status & RD_CE) lp->stats.rx_crc_errors++;
1146 if (status & RD_OF) lp->stats.rx_fifo_errors++;
1147 } else {
1148 struct sk_buff *skb;
1149 short pkt_len = (short)(lp->rx_ring[entry].status >> 16) - 4;
1150
1151 if ((skb = dev_alloc_skb(pkt_len+2)) != NULL) {
1152 skb->dev = dev;
1153
1154 skb_reserve(skb,2);
1155 if (entry < lp->rx_old) {
1156 short len = (lp->rxRingSize - lp->rx_old) * RX_BUFF_SZ;
1157 memcpy(skb_put(skb,len), bus_to_virt(lp->rx_ring[lp->rx_old].buf), len);
1158 memcpy(skb_put(skb,pkt_len-len), bus_to_virt(lp->rx_ring[0].buf), pkt_len - len);
1159 } else {
1160 memcpy(skb_put(skb,pkt_len), bus_to_virt(lp->rx_ring[lp->rx_old].buf), pkt_len);
1161 }
1162
1163
1164 skb->protocol=eth_type_trans(skb,dev);
1165 netif_rx(skb);
1166
1167
1168 lp->stats.rx_packets++;
1169 for (i=1; i<DE4X5_PKT_STAT_SZ-1; i++) {
1170 if (pkt_len < (i*DE4X5_PKT_BIN_SZ)) {
1171 lp->pktStats.bins[i]++;
1172 i = DE4X5_PKT_STAT_SZ;
1173 }
1174 }
1175 buf = skb->data;
1176 if (buf[0] & 0x01) {
1177 if ((*(s32 *)&buf[0] == -1) && (*(s16 *)&buf[4] == -1)) {
1178 lp->pktStats.broadcast++;
1179 } else {
1180 lp->pktStats.multicast++;
1181 }
1182 } else if ((*(s32 *)&buf[0] == *(s32 *)&dev->dev_addr[0]) &&
1183 (*(s16 *)&buf[4] == *(s16 *)&dev->dev_addr[4])) {
1184 lp->pktStats.unicast++;
1185 }
1186
1187 lp->pktStats.bins[0]++;
1188 if (lp->pktStats.bins[0] == 0) {
1189 memset((char *)&lp->pktStats, 0, sizeof(lp->pktStats));
1190 }
1191 } else {
1192 printk("%s: Insufficient memory; nuking packet.\n", dev->name);
1193 lp->stats.rx_dropped++;
1194 break;
1195 }
1196 }
1197
1198
1199 for (; lp->rx_old!=entry; lp->rx_old=(++lp->rx_old)%lp->rxRingSize) {
1200 lp->rx_ring[lp->rx_old].status = R_OWN;
1201 barrier();
1202 }
1203 lp->rx_ring[entry].status = R_OWN;
1204 barrier();
1205 }
1206
1207
1208
1209
1210 lp->rx_new = (++lp->rx_new) % lp->rxRingSize;
1211 }
1212
1213 return 0;
1214 }
1215
1216
1217
1218
1219 static int
1220 de4x5_tx(struct device *dev)
1221 {
1222 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1223 u_long iobase = dev->base_addr;
1224 int entry;
1225 s32 status;
1226
1227 for (entry = lp->tx_old; entry != lp->tx_new; entry = lp->tx_old) {
1228 status = lp->tx_ring[entry].status;
1229 if (status < 0) {
1230 break;
1231 } else if (status & TD_ES) {
1232 lp->stats.tx_errors++;
1233 if (status & TD_NC) lp->stats.tx_carrier_errors++;
1234 if (status & TD_LC) lp->stats.tx_window_errors++;
1235 if (status & TD_UF) lp->stats.tx_fifo_errors++;
1236 if (status & TD_LC) lp->stats.collisions++;
1237 if (status & TD_EC) lp->pktStats.excessive_collisions++;
1238 if (status & TD_DE) lp->stats.tx_aborted_errors++;
1239
1240 if ((status != 0x7fffffff) &&
1241 (status & (TD_LO | TD_NC | TD_EC | TD_LF))) {
1242 lp->lostMedia++;
1243 if (lp->lostMedia > LOST_MEDIA_THRESHOLD) {
1244 kick_tx(dev);
1245 }
1246 } else {
1247 outl(POLL_DEMAND, DE4X5_TPD);
1248 }
1249 } else {
1250 lp->stats.tx_packets++;
1251 lp->lostMedia = 0;
1252 }
1253
1254 if (lp->skb[entry] != NULL) {
1255 dev_kfree_skb(lp->skb[entry], FREE_WRITE);
1256 lp->skb[entry] = NULL;
1257 }
1258
1259
1260 lp->tx_old = (++lp->tx_old) % lp->txRingSize;
1261 }
1262
1263 return 0;
1264 }
1265
1266 static int
1267 de4x5_ast(struct device *dev)
1268 {
1269 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1270 u_long iobase = dev->base_addr;
1271 s32 gep;
1272
1273 disable_ast(dev);
1274
1275 if (lp->chipset == DC21140) {
1276 gep = inl(DE4X5_GEP);
1277 if (((lp->media == _100Mb) && (gep & GEP_SLNK)) ||
1278 ((lp->media == _10Mb) && (gep & GEP_LNP)) ||
1279 ((lp->media == _10Mb) && !(gep & GEP_SLNK)) ||
1280 (lp->media == NC)) {
1281 if (lp->linkProb || ((lp->media == NC) && (!(gep & GEP_LNP)))) {
1282 lp->lostMedia = LOST_MEDIA_THRESHOLD + 1;
1283 lp->linkProb = 0;
1284 kick_tx(dev);
1285 } else {
1286 switch(lp->media) {
1287 case NC:
1288 lp->linkProb = 0;
1289 enable_ast(dev, DE4X5_AUTOSENSE_MS);
1290 break;
1291
1292 case _10Mb:
1293 lp->linkProb = 1;
1294 enable_ast(dev, 1500);
1295 break;
1296
1297 case _100Mb:
1298 lp->linkProb = 1;
1299 enable_ast(dev, 4000);
1300 break;
1301 }
1302 }
1303 } else {
1304 lp->linkProb = 0;
1305 enable_ast(dev, DE4X5_AUTOSENSE_MS);
1306 }
1307 }
1308
1309 return 0;
1310 }
1311
1312 static int
1313 de4x5_close(struct device *dev)
1314 {
1315 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1316 u_long iobase = dev->base_addr;
1317 s32 imr, omr;
1318
1319 dev->start = 0;
1320 dev->tbusy = 1;
1321
1322 if (de4x5_debug > 1) {
1323 printk("%s: Shutting down ethercard, status was %8.8x.\n",
1324 dev->name, inl(DE4X5_STS));
1325 }
1326
1327
1328
1329
1330 DISABLE_IRQs;
1331
1332 STOP_DE4X5;
1333
1334
1335
1336
1337 free_irq(dev->irq);
1338 irq2dev_map[dev->irq] = 0;
1339
1340 MOD_DEC_USE_COUNT;
1341
1342
1343 if (lp->chipset == DC21041) {
1344 outl(0, DE4X5_SICR);
1345 outl(CFDA_PSM, PCI_CFDA);
1346 }
1347
1348 return 0;
1349 }
1350
1351 static struct enet_statistics *
1352 de4x5_get_stats(struct device *dev)
1353 {
1354 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1355 u_long iobase = dev->base_addr;
1356
1357 lp->stats.rx_missed_errors = (int) (inl(DE4X5_MFC) & (MFC_OVFL | MFC_CNTR));
1358
1359 return &lp->stats;
1360 }
1361
1362 static void load_packet(struct device *dev, char *buf, u32 flags, struct sk_buff *skb)
1363 {
1364 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1365
1366 lp->tx_ring[lp->tx_new].buf = virt_to_bus(buf);
1367 lp->tx_ring[lp->tx_new].des1 &= TD_TER;
1368 lp->tx_ring[lp->tx_new].des1 |= flags;
1369 lp->skb[lp->tx_new] = skb;
1370 barrier();
1371 lp->tx_ring[lp->tx_new].status = T_OWN;
1372 barrier();
1373
1374 return;
1375 }
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386 static void
1387 set_multicast_list(struct device *dev, int num_addrs, void *addrs)
1388 {
1389 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1390 u_long iobase = dev->base_addr;
1391
1392
1393 if (irq2dev_map[dev->irq] != NULL) {
1394 if (num_addrs >= 0) {
1395 SetMulticastFilter(dev, num_addrs, (char *)addrs);
1396 if (lp->setup_f == HASH_PERF) {
1397 load_packet(dev, lp->setup_frame, TD_IC | HASH_F | TD_SET |
1398 SETUP_FRAME_LEN, NULL);
1399 } else {
1400 load_packet(dev, lp->setup_frame, TD_IC | PERFECT_F | TD_SET |
1401 SETUP_FRAME_LEN, NULL);
1402 }
1403
1404 lp->tx_new = (++lp->tx_new) % lp->txRingSize;
1405 outl(POLL_DEMAND, DE4X5_TPD);
1406 dev->trans_start = jiffies;
1407 } else {
1408 u32 omr;
1409 omr = inl(DE4X5_OMR);
1410 omr |= OMR_PR;
1411 outl(omr, DE4X5_OMR);
1412 }
1413 }
1414
1415 return;
1416 }
1417
1418
1419
1420
1421
1422
1423 static void SetMulticastFilter(struct device *dev, int num_addrs, char *addrs)
1424 {
1425 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1426 u_long iobase = dev->base_addr;
1427 int i, j, bit, byte;
1428 u16 hashcode;
1429 u32 omr, crc, poly = CRC_POLYNOMIAL_LE;
1430 char *pa;
1431
1432 omr = inl(DE4X5_OMR);
1433 pa = build_setup_frame(dev, ALL);
1434
1435 if (lp->setup_f == HASH_PERF) {
1436 if (num_addrs == HASH_TABLE_LEN) {
1437 omr |= OMR_PM;
1438 } else {
1439 omr &= ~OMR_PM;
1440
1441 for (i=0;i<num_addrs;i++) {
1442 if ((*addrs & 0x01) == 1) {
1443 crc = 0xffffffff;
1444 for (byte=0;byte<ETH_ALEN;byte++) {
1445
1446 for (bit = *addrs++,j=0;j<8;j++, bit>>=1) {
1447 crc = (crc >> 1) ^ (((crc ^ bit) & 0x01) ? poly : 0);
1448 }
1449 }
1450 hashcode = crc & HASH_BITS;
1451
1452 byte = hashcode >> 3;
1453 bit = 1 << (hashcode & 0x07);
1454
1455 byte <<= 1;
1456 if (byte & 0x02) {
1457 byte -= 1;
1458 }
1459 lp->setup_frame[byte] |= bit;
1460
1461 } else {
1462 addrs += ETH_ALEN;
1463 }
1464 }
1465 }
1466 } else {
1467 omr &= ~OMR_PM;
1468 for (j=0; j<num_addrs; j++) {
1469 for (i=0; i<ETH_ALEN; i++) {
1470 *(pa + (i&1)) = *addrs++;
1471 if (i & 0x01) pa += 4;
1472 }
1473 }
1474 }
1475
1476 if (num_addrs == 0)
1477 omr &= ~OMR_PR;
1478 outl(omr, DE4X5_OMR);
1479
1480 return;
1481 }
1482
1483
1484
1485
1486
1487 static void eisa_probe(struct device *dev, u_long ioaddr)
1488 {
1489 int i, maxSlots, status;
1490 u_short vendor, device;
1491 s32 cfid;
1492 u_long iobase;
1493 struct bus_type *lp = &bus;
1494 char name[DE4X5_STRLEN];
1495
1496 if (!ioaddr && autoprobed) return ;
1497 if ((ioaddr < 0x1000) && (ioaddr > 0)) return;
1498
1499 lp->bus = EISA;
1500
1501 if (ioaddr == 0) {
1502 iobase = EISA_SLOT_INC;
1503 i = 1;
1504 maxSlots = MAX_EISA_SLOTS;
1505 } else {
1506 iobase = ioaddr;
1507 i = (ioaddr >> 12);
1508 maxSlots = i + 1;
1509 }
1510
1511 for (status = -ENODEV; (i<maxSlots) && (dev!=NULL); i++, iobase+=EISA_SLOT_INC) {
1512 if (EISA_signature(name, EISA_ID)) {
1513 cfid = inl(PCI_CFID);
1514 device = (u_short)(cfid >> 16);
1515 vendor = (u_short) cfid;
1516
1517 lp->bus = EISA;
1518 lp->chipset = device;
1519 if (DevicePresent(EISA_APROM) == 0) {
1520
1521 outl(PCI_COMMAND_IO | PCI_COMMAND_MASTER, PCI_CFCS);
1522 outl(0x00004000, PCI_CFLT);
1523 outl(iobase, PCI_CBIO);
1524
1525 if (check_region(iobase, DE4X5_EISA_TOTAL_SIZE) == 0) {
1526 if ((dev = alloc_device(dev, iobase)) != NULL) {
1527 if ((status = de4x5_hw_init(dev, iobase)) == 0) {
1528 num_de4x5s++;
1529 }
1530 num_eth++;
1531 }
1532 } else if (autoprobed) {
1533 printk("%s: region already allocated at 0x%04lx.\n", dev->name, iobase);
1534 }
1535 }
1536 }
1537 }
1538
1539 return;
1540 }
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554 #define PCI_DEVICE (dev_num << 3)
1555 #define PCI_LAST_DEV 32
1556
1557 static void pci_probe(struct device *dev, u_long ioaddr)
1558 {
1559 u_char irq;
1560 u_char pb, pbus, dev_num, dnum, dev_fn;
1561 u_short vendor, device, index, status;
1562 u_int class = DE4X5_CLASS_CODE;
1563 u_int iobase;
1564 struct bus_type *lp = &bus;
1565
1566 if (!ioaddr && autoprobed) return ;
1567
1568 if (pcibios_present()) {
1569 lp->bus = PCI;
1570
1571 if (ioaddr < 0x1000) {
1572 pbus = (u_short)(ioaddr >> 8);
1573 dnum = (u_short)(ioaddr & 0xff);
1574 } else {
1575 pbus = 0;
1576 dnum = 0;
1577 }
1578
1579 for (index=0;
1580 (pcibios_find_class(class, index, &pb, &dev_fn)!= PCIBIOS_DEVICE_NOT_FOUND);
1581 index++) {
1582 dev_num = PCI_SLOT(dev_fn);
1583
1584 if ((!pbus && !dnum) || ((pbus == pb) && (dnum == dev_num))) {
1585 pcibios_read_config_word(pb, PCI_DEVICE, PCI_VENDOR_ID, &vendor);
1586 pcibios_read_config_word(pb, PCI_DEVICE, PCI_DEVICE_ID, &device);
1587 if (is_DC21040 || is_DC21041 || is_DC21140) {
1588
1589 lp->device = dev_num;
1590 lp->bus_num = pb;
1591
1592
1593 lp->chipset = device;
1594
1595
1596 pcibios_read_config_dword(pb, PCI_DEVICE, PCI_BASE_ADDRESS_0, &iobase);
1597 iobase &= CBIO_MASK;
1598
1599
1600 pcibios_read_config_byte(pb, PCI_DEVICE, PCI_INTERRUPT_LINE, &irq);
1601
1602
1603 pcibios_read_config_word(pb, PCI_DEVICE, PCI_COMMAND, &status);
1604 if (status & PCI_COMMAND_IO) {
1605 if (!(status & PCI_COMMAND_MASTER)) {
1606 status |= PCI_COMMAND_MASTER;
1607 pcibios_write_config_word(pb, PCI_DEVICE, PCI_COMMAND, status);
1608 pcibios_read_config_word(pb, PCI_DEVICE, PCI_COMMAND, &status);
1609 }
1610 if (status & PCI_COMMAND_MASTER) {
1611 if ((DevicePresent(DE4X5_APROM) == 0) || is_not_dec) {
1612 if (check_region(iobase, DE4X5_PCI_TOTAL_SIZE) == 0) {
1613 if ((dev = alloc_device(dev, iobase)) != NULL) {
1614 dev->irq = irq;
1615 if ((status = de4x5_hw_init(dev, iobase)) == 0) {
1616 num_de4x5s++;
1617 }
1618 num_eth++;
1619 }
1620 } else if (autoprobed) {
1621 printk("%s: region already allocated at 0x%04x.\n", dev->name, (u_short)iobase);
1622 }
1623 }
1624 }
1625 }
1626 }
1627 }
1628 }
1629 }
1630
1631 return;
1632 }
1633
1634
1635
1636
1637
1638 static struct device *alloc_device(struct device *dev, u_long iobase)
1639 {
1640 int addAutoProbe = 0;
1641 struct device *tmp = NULL, *ret;
1642 int (*init)(struct device *) = NULL;
1643
1644
1645
1646
1647 if (!loading_module) {
1648 while (dev->next != NULL) {
1649 if ((dev->base_addr == DE4X5_NDA) || (dev->base_addr == 0)) break;
1650 dev = dev->next;
1651 num_eth++;
1652 }
1653
1654
1655
1656
1657
1658 if ((dev->base_addr == 0) && (num_de4x5s > 0)) {
1659 addAutoProbe++;
1660 tmp = dev->next;
1661 init = dev->init;
1662 }
1663
1664
1665
1666
1667
1668 if ((dev->next == NULL) &&
1669 !((dev->base_addr == DE4X5_NDA) || (dev->base_addr == 0))){
1670 dev->next = (struct device *)kmalloc(sizeof(struct device) + 8,
1671 GFP_KERNEL);
1672
1673 dev = dev->next;
1674 if (dev == NULL) {
1675 printk("eth%d: Device not initialised, insufficient memory\n",
1676 num_eth);
1677 } else {
1678
1679
1680
1681
1682
1683 dev->name = (char *)(dev + sizeof(struct device));
1684 if (num_eth > 9999) {
1685 sprintf(dev->name,"eth????");
1686 } else {
1687 sprintf(dev->name,"eth%d", num_eth);
1688 }
1689 dev->base_addr = iobase;
1690 dev->next = NULL;
1691 dev->init = &de4x5_probe;
1692 num_de4x5s++;
1693 }
1694 }
1695 ret = dev;
1696
1697
1698
1699
1700
1701 if (ret != NULL) {
1702 if (addAutoProbe) {
1703 for (; (tmp->next!=NULL) && (tmp->base_addr!=DE4X5_NDA); tmp=tmp->next);
1704
1705
1706
1707
1708
1709 if ((tmp->next == NULL) && !(tmp->base_addr == DE4X5_NDA)) {
1710 tmp->next = (struct device *)kmalloc(sizeof(struct device) + 8,
1711 GFP_KERNEL);
1712 tmp = tmp->next;
1713 if (tmp == NULL) {
1714 printk("%s: Insufficient memory to extend the device list.\n",
1715 dev->name);
1716 } else {
1717
1718
1719
1720
1721
1722 tmp->name = (char *)(tmp + sizeof(struct device));
1723 if (num_eth > 9999) {
1724 sprintf(tmp->name,"eth????");
1725 } else {
1726 sprintf(tmp->name,"eth%d", num_eth);
1727 }
1728 tmp->base_addr = 0;
1729 tmp->next = NULL;
1730 tmp->init = init;
1731 }
1732 } else {
1733 tmp->base_addr = 0;
1734 }
1735 }
1736 }
1737 } else {
1738 ret = dev;
1739 }
1740
1741 return ret;
1742 }
1743
1744
1745
1746
1747
1748
1749
1750 static int autoconf_media(struct device *dev)
1751 {
1752 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1753 u_long iobase = dev->base_addr;
1754
1755 lp->tx_enable = YES;
1756 if (de4x5_debug > 0 ) {
1757 if (lp->chipset != DC21140) {
1758 printk("%s: Searching for media... ",dev->name);
1759 } else {
1760 printk("%s: Searching for mode... ",dev->name);
1761 }
1762 }
1763
1764 if (lp->chipset == DC21040) {
1765 lp->media = (lp->autosense == AUTO ? TP : lp->autosense);
1766 dc21040_autoconf(dev);
1767 } else if (lp->chipset == DC21041) {
1768 lp->media = (lp->autosense == AUTO ? TP_NW : lp->autosense);
1769 dc21041_autoconf(dev);
1770 } else if (lp->chipset == DC21140) {
1771 disable_ast(dev);
1772 lp->media = (lp->autosense == AUTO ? _10Mb : lp->autosense);
1773 dc21140_autoconf(dev);
1774 }
1775
1776 if (de4x5_debug > 0 ) {
1777 if (lp->chipset != DC21140) {
1778 printk("media is %s\n", (lp->media == NC ? "unconnected!" :
1779 (lp->media == TP ? "TP." :
1780 (lp->media == ANS ? "TP/Nway." :
1781 (lp->media == BNC ? "BNC." :
1782 (lp->media == AUI ? "AUI." :
1783 "BNC/AUI."
1784 ))))));
1785 } else {
1786 printk("mode is %s\n",(lp->media == NC ? "link down.":
1787 (lp->media == _100Mb ? "100Mb/s." :
1788 (lp->media == _10Mb ? "10Mb/s." :
1789 "\?\?\?"
1790 ))));
1791 }
1792 }
1793
1794 if (lp->media) {
1795 lp->lostMedia = 0;
1796 inl(DE4X5_MFC);
1797 if ((lp->media == TP) || (lp->media == ANS)) {
1798 lp->irq_mask |= IMR_LFM;
1799 }
1800 }
1801 dce_ms_delay(10);
1802
1803 return (lp->media);
1804 }
1805
1806 static void dc21040_autoconf(struct device *dev)
1807 {
1808 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1809 u_long iobase = dev->base_addr;
1810 int i, linkBad;
1811 s32 sisr = 0, t_3s = 3000;
1812
1813 switch (lp->media) {
1814 case TP:
1815 reset_init_sia(dev, 0x8f01, 0xffff, 0x0000);
1816 for (linkBad=1,i=0;(i<t_3s) && linkBad && !(sisr & SISR_NCR);i++) {
1817 if (((sisr = inl(DE4X5_SISR)) & SISR_LKF) == 0) linkBad = 0;
1818 dce_ms_delay(1);
1819 }
1820 if (linkBad && (lp->autosense == AUTO)) {
1821 lp->media = BNC_AUI;
1822 dc21040_autoconf(dev);
1823 }
1824 break;
1825
1826 case BNC:
1827 case AUI:
1828 case BNC_AUI:
1829 reset_init_sia(dev, 0x8f09, 0x0705, 0x0006);
1830 dce_ms_delay(500);
1831 linkBad = ping_media(dev);
1832 if (linkBad && (lp->autosense == AUTO)) {
1833 lp->media = EXT_SIA;
1834 dc21040_autoconf(dev);
1835 }
1836 break;
1837
1838 case EXT_SIA:
1839 reset_init_sia(dev, 0x3041, 0x0000, 0x0006);
1840 dce_ms_delay(500);
1841 linkBad = ping_media(dev);
1842 if (linkBad && (lp->autosense == AUTO)) {
1843 lp->media = NC;
1844 dc21040_autoconf(dev);
1845 }
1846 break;
1847
1848 case NC:
1849 #ifndef __alpha__
1850 reset_init_sia(dev, 0x8f01, 0xffff, 0x0000);
1851 break;
1852 #else
1853
1854 reset_init_sia(dev, 0x8f09, 0x0705, 0x0006);
1855 #endif
1856 }
1857
1858 return;
1859 }
1860
1861
1862
1863
1864
1865
1866
1867 static void dc21041_autoconf(struct device *dev)
1868 {
1869 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1870 u_long iobase = dev->base_addr;
1871 s32 sts, irqs, irq_mask, omr;
1872
1873 switch (lp->media) {
1874 case TP_NW:
1875 omr = inl(DE4X5_OMR);
1876 outl(omr | OMR_FD, DE4X5_OMR);
1877 irqs = STS_LNF | STS_LNP;
1878 irq_mask = IMR_LFM | IMR_LPM;
1879 sts = test_media(dev, irqs, irq_mask, 0xef01, 0xffff, 0x0008, 2400);
1880 if (sts & STS_LNP) {
1881 lp->media = ANS;
1882 } else {
1883 lp->media = AUI;
1884 }
1885 dc21041_autoconf(dev);
1886 break;
1887
1888 case ANS:
1889 irqs = STS_LNP;
1890 irq_mask = IMR_LPM;
1891 sts = test_ans(dev, irqs, irq_mask, 3000);
1892 if (!(sts & STS_LNP) && (lp->autosense == AUTO)) {
1893 lp->media = TP;
1894 dc21041_autoconf(dev);
1895 }
1896 break;
1897
1898 case TP:
1899 omr = inl(DE4X5_OMR);
1900 outl(omr & ~OMR_FD, DE4X5_OMR);
1901 irqs = STS_LNF | STS_LNP;
1902 irq_mask = IMR_LFM | IMR_LPM;
1903 sts = test_media(dev, irqs, irq_mask, 0xef01, 0xff3f, 0x0008, 2400);
1904 if (!(sts & STS_LNP) && (lp->autosense == AUTO)) {
1905 if (inl(DE4X5_SISR) & SISR_NRA) {
1906 lp->media = AUI;
1907 } else {
1908 lp->media = BNC;
1909 }
1910 dc21041_autoconf(dev);
1911 }
1912 break;
1913
1914 case AUI:
1915 omr = inl(DE4X5_OMR);
1916 outl(omr & ~OMR_FD, DE4X5_OMR);
1917 irqs = 0;
1918 irq_mask = 0;
1919 sts = test_media(dev, irqs, irq_mask, 0xef09, 0xf7fd, 0x000e, 1000);
1920 if (!(inl(DE4X5_SISR) & SISR_SRA) && (lp->autosense == AUTO)) {
1921 lp->media = BNC;
1922 dc21041_autoconf(dev);
1923 }
1924 break;
1925
1926 case BNC:
1927 omr = inl(DE4X5_OMR);
1928 outl(omr & ~OMR_FD, DE4X5_OMR);
1929 irqs = 0;
1930 irq_mask = 0;
1931 sts = test_media(dev, irqs, irq_mask, 0xef09, 0xf7fd, 0x0006, 1000);
1932 if (!(inl(DE4X5_SISR) & SISR_SRA) && (lp->autosense == AUTO)) {
1933 lp->media = NC;
1934 } else {
1935 if (ping_media(dev)) lp->media = NC;
1936 }
1937 break;
1938
1939 case NC:
1940 omr = inl(DE4X5_OMR);
1941 outl(omr | OMR_FD, DE4X5_OMR);
1942 reset_init_sia(dev, 0xef01, 0xffff, 0x0008);
1943 break;
1944 }
1945
1946 return;
1947 }
1948
1949
1950
1951
1952 static void dc21140_autoconf(struct device *dev)
1953 {
1954 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1955 u_long iobase = dev->base_addr;
1956 s32 omr;
1957
1958 switch(lp->media) {
1959 case _100Mb:
1960 omr = (inl(DE4X5_OMR) & ~(OMR_PS | OMR_HBD | OMR_TTM | OMR_PCS | OMR_SCR));
1961 omr |= (de4x5_full_duplex ? OMR_FD : 0);
1962 outl(omr | OMR_PS | OMR_HBD | OMR_PCS | OMR_SCR, DE4X5_OMR);
1963 outl(GEP_FDXD | GEP_MODE, DE4X5_GEP);
1964 break;
1965
1966 case _10Mb:
1967 omr = (inl(DE4X5_OMR) & ~(OMR_PS | OMR_HBD | OMR_TTM | OMR_PCS | OMR_SCR));
1968 omr |= (de4x5_full_duplex ? OMR_FD : 0);
1969 outl(omr | OMR_TTM, DE4X5_OMR);
1970 outl(GEP_FDXD, DE4X5_GEP);
1971 break;
1972 }
1973
1974 return;
1975 }
1976
1977 static int
1978 test_media(struct device *dev, s32 irqs, s32 irq_mask, s32 csr13, s32 csr14, s32 csr15, s32 msec)
1979 {
1980 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1981 u_long iobase = dev->base_addr;
1982 s32 sts, time, csr12;
1983
1984 reset_init_sia(dev, csr13, csr14, csr15);
1985
1986
1987 load_ms_timer(dev, msec);
1988
1989
1990 sts = inl(DE4X5_STS);
1991 outl(sts, DE4X5_STS);
1992
1993
1994 csr12 = inl(DE4X5_SISR);
1995 outl(csr12, DE4X5_SISR);
1996
1997
1998 do {
1999 time = inl(DE4X5_GPT) & GPT_VAL;
2000 sts = inl(DE4X5_STS);
2001 } while ((time != 0) && !(sts & irqs));
2002
2003 sts = inl(DE4X5_STS);
2004
2005 return sts;
2006 }
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030 static int ping_media(struct device *dev)
2031 {
2032 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2033 u_long iobase = dev->base_addr;
2034 int i, entry, linkBad;
2035 s32 omr, t_3s = 4000;
2036 char frame[64];
2037
2038 create_packet(dev, frame, sizeof(frame));
2039
2040 entry = lp->tx_new;
2041 load_packet(dev, frame, TD_LS | TD_FS | sizeof(frame),NULL);
2042
2043 omr = inl(DE4X5_OMR);
2044 outl(omr|OMR_ST, DE4X5_OMR);
2045
2046 lp->tx_new = (++lp->tx_new) % lp->txRingSize;
2047 lp->tx_old = lp->tx_new;
2048
2049
2050 for (linkBad=1,i=0;(i<t_3s) && linkBad;i++) {
2051 if ((inl(DE4X5_SISR) & SISR_NCR) == 1) break;
2052 if (lp->tx_ring[entry].status >= 0) linkBad=0;
2053 dce_ms_delay(1);
2054 }
2055 outl(omr, DE4X5_OMR);
2056
2057 return ((linkBad || (lp->tx_ring[entry].status & TD_ES)) ? 1 : 0);
2058 }
2059
2060
2061
2062
2063
2064 static int test_ans(struct device *dev, s32 irqs, s32 irq_mask, s32 msec)
2065 {
2066 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2067 u_long iobase = dev->base_addr;
2068 s32 sts, ans;
2069
2070 outl(irq_mask, DE4X5_IMR);
2071
2072
2073 load_ms_timer(dev, msec);
2074
2075
2076 sts = inl(DE4X5_STS);
2077 outl(sts, DE4X5_STS);
2078
2079
2080 do {
2081 ans = inl(DE4X5_SISR) & SISR_ANS;
2082 sts = inl(DE4X5_STS);
2083 } while (!(sts & irqs) && (ans ^ ANS_NWOK) != 0);
2084
2085 return ((sts & STS_LNP) && ((ans ^ ANS_NWOK) == 0) ? STS_LNP : 0);
2086 }
2087
2088
2089
2090
2091 static void reset_init_sia(struct device *dev, s32 sicr, s32 strr, s32 sigr)
2092 {
2093 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2094 u_long iobase = dev->base_addr;
2095
2096 RESET_SIA;
2097 outl(sigr, DE4X5_SIGR);
2098 outl(strr, DE4X5_STRR);
2099 outl(sicr, DE4X5_SICR);
2100
2101 return;
2102 }
2103
2104
2105
2106
2107 static void load_ms_timer(struct device *dev, u32 msec)
2108 {
2109 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2110 u_long iobase = dev->base_addr;
2111 s32 i = 2048, j;
2112
2113 if (lp->chipset == DC21140) {
2114 j = inl(DE4X5_OMR);
2115 if ((j & OMR_TTM) && (j & OMR_PS)) {
2116 i = 8192;
2117 } else if ((~j & OMR_TTM) && (j & OMR_PS)) {
2118 i = 819;
2119 }
2120 }
2121
2122 outl((s32)(msec * 10000)/i, DE4X5_GPT);
2123
2124 return;
2125 }
2126
2127
2128
2129
2130 static void create_packet(struct device *dev, char *frame, int len)
2131 {
2132 int i;
2133 char *buf = frame;
2134
2135 for (i=0; i<ETH_ALEN; i++) {
2136 *buf++ = dev->dev_addr[i];
2137 }
2138 for (i=0; i<ETH_ALEN; i++) {
2139 *buf++ = dev->dev_addr[i];
2140 }
2141
2142 *buf++ = 0;
2143 *buf++ = 1;
2144
2145 return;
2146 }
2147
2148
2149
2150
2151 static void dce_us_delay(u32 usec)
2152 {
2153 udelay(usec);
2154
2155 return;
2156 }
2157
2158
2159
2160
2161 static void dce_ms_delay(u32 msec)
2162 {
2163 u_int i;
2164
2165 for (i=0; i<msec; i++) {
2166 dce_us_delay(1000);
2167 }
2168
2169 return;
2170 }
2171
2172
2173
2174
2175
2176 static int EISA_signature(char *name, s32 eisa_id)
2177 {
2178 u_int i;
2179 const char *signatures[] = DE4X5_SIGNATURE;
2180 char ManCode[DE4X5_STRLEN];
2181 union {
2182 s32 ID;
2183 char Id[4];
2184 } Eisa;
2185 int status = 0;
2186
2187 *name = '\0';
2188 Eisa.ID = inl(eisa_id);
2189
2190 ManCode[0]=(((Eisa.Id[0]>>2)&0x1f)+0x40);
2191 ManCode[1]=(((Eisa.Id[1]&0xe0)>>5)+((Eisa.Id[0]&0x03)<<3)+0x40);
2192 ManCode[2]=(((Eisa.Id[2]>>4)&0x0f)+0x30);
2193 ManCode[3]=((Eisa.Id[2]&0x0f)+0x30);
2194 ManCode[4]=(((Eisa.Id[3]>>4)&0x0f)+0x30);
2195 ManCode[5]='\0';
2196
2197 for (i=0;(*signatures[i] != '\0') && (*name == '\0');i++) {
2198 if (strstr(ManCode, signatures[i]) != NULL) {
2199 strcpy(name,ManCode);
2200 status = 1;
2201 }
2202 }
2203
2204 return status;
2205 }
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219 static int DevicePresent(u_long aprom_addr)
2220 {
2221 union {
2222 struct {
2223 u32 a;
2224 u32 b;
2225 } llsig;
2226 char Sig[sizeof(u32) << 1];
2227 } dev;
2228 char data;
2229 int i, j, tmp, status = 0;
2230 short sigLength;
2231 struct bus_type *lp = &bus;
2232
2233 dev.llsig.a = ETH_PROM_SIG;
2234 dev.llsig.b = ETH_PROM_SIG;
2235 sigLength = sizeof(u32) << 1;
2236
2237 if (lp->chipset == DC21040) {
2238 for (i=0,j=0;(j<sigLength) && (i<PROBE_LENGTH+sigLength-1);i++) {
2239 if (lp->bus == PCI) {
2240 while ((tmp = inl(aprom_addr)) < 0);
2241 data = (char)tmp;
2242 } else {
2243 data = inb(aprom_addr);
2244 }
2245 if (dev.Sig[j] == data) {
2246 j++;
2247 } else {
2248 if (data == dev.Sig[0]) {
2249 j=1;
2250 } else {
2251 j=0;
2252 }
2253 }
2254 }
2255
2256 if (j!=sigLength) {
2257 status = -ENODEV;
2258 }
2259
2260 } else {
2261 short *p = (short *)&lp->srom;
2262 for (i=0; i<(sizeof(struct de4x5_srom)>>1); i++) {
2263 *p++ = srom_rd(aprom_addr, i);
2264 }
2265 }
2266
2267 return status;
2268 }
2269
2270 static int get_hw_addr(struct device *dev)
2271 {
2272 u_long iobase = dev->base_addr;
2273 int i, k, tmp, status = 0;
2274 u_short j,chksum;
2275 struct bus_type *lp = &bus;
2276
2277 for (i=0,k=0,j=0;j<3;j++) {
2278 k <<= 1 ;
2279 if (k > 0xffff) k-=0xffff;
2280
2281 if (lp->bus == PCI) {
2282 if (lp->chipset == DC21040) {
2283 while ((tmp = inl(DE4X5_APROM)) < 0);
2284 k += (u_char) tmp;
2285 dev->dev_addr[i++] = (u_char) tmp;
2286 while ((tmp = inl(DE4X5_APROM)) < 0);
2287 k += (u_short) (tmp << 8);
2288 dev->dev_addr[i++] = (u_char) tmp;
2289 } else {
2290 dev->dev_addr[i] = (u_char) lp->srom.ieee_addr[i]; i++;
2291 dev->dev_addr[i] = (u_char) lp->srom.ieee_addr[i]; i++;
2292 }
2293 } else {
2294 k += (u_char) (tmp = inb(EISA_APROM));
2295 dev->dev_addr[i++] = (u_char) tmp;
2296 k += (u_short) ((tmp = inb(EISA_APROM)) << 8);
2297 dev->dev_addr[i++] = (u_char) tmp;
2298 }
2299
2300 if (k > 0xffff) k-=0xffff;
2301 }
2302 if (k == 0xffff) k=0;
2303
2304 if (lp->bus == PCI) {
2305 if (lp->chipset == DC21040) {
2306 while ((tmp = inl(DE4X5_APROM)) < 0);
2307 chksum = (u_char) tmp;
2308 while ((tmp = inl(DE4X5_APROM)) < 0);
2309 chksum |= (u_short) (tmp << 8);
2310 if (k != chksum) status = -1;
2311 }
2312 } else {
2313 chksum = (u_char) inb(EISA_APROM);
2314 chksum |= (u_short) (inb(EISA_APROM) << 8);
2315 if (k != chksum) status = -1;
2316 }
2317
2318
2319 return status;
2320 }
2321
2322
2323
2324
2325 static short srom_rd(u_long addr, u_char offset)
2326 {
2327 sendto_srom(SROM_RD | SROM_SR, addr);
2328
2329 srom_latch(SROM_RD | SROM_SR | DT_CS, addr);
2330 srom_command(SROM_RD | SROM_SR | DT_IN | DT_CS, addr);
2331 srom_address(SROM_RD | SROM_SR | DT_CS, addr, offset);
2332
2333 return srom_data(SROM_RD | SROM_SR | DT_CS, addr);
2334 }
2335
2336 static void srom_latch(u_int command, u_long addr)
2337 {
2338 sendto_srom(command, addr);
2339 sendto_srom(command | DT_CLK, addr);
2340 sendto_srom(command, addr);
2341
2342 return;
2343 }
2344
2345 static void srom_command(u_int command, u_long addr)
2346 {
2347 srom_latch(command, addr);
2348 srom_latch(command, addr);
2349 srom_latch((command & 0x0000ff00) | DT_CS, addr);
2350
2351 return;
2352 }
2353
2354 static void srom_address(u_int command, u_long addr, u_char offset)
2355 {
2356 int i;
2357 char a;
2358
2359 a = (char)(offset << 2);
2360 for (i=0; i<6; i++, a <<= 1) {
2361 srom_latch(command | ((a < 0) ? DT_IN : 0), addr);
2362 }
2363 dce_us_delay(1);
2364
2365 i = (getfrom_srom(addr) >> 3) & 0x01;
2366 if (i != 0) {
2367 printk("Bad SROM address phase.....\n");
2368
2369 }
2370
2371 return;
2372 }
2373
2374 static short srom_data(u_int command, u_long addr)
2375 {
2376 int i;
2377 short word = 0;
2378 s32 tmp;
2379
2380 for (i=0; i<16; i++) {
2381 sendto_srom(command | DT_CLK, addr);
2382 tmp = getfrom_srom(addr);
2383 sendto_srom(command, addr);
2384
2385 word = (word << 1) | ((tmp >> 3) & 0x01);
2386 }
2387
2388 sendto_srom(command & 0x0000ff00, addr);
2389
2390 return word;
2391 }
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408 static void sendto_srom(u_int command, u_long addr)
2409 {
2410 outl(command, addr);
2411 dce_us_delay(1);
2412
2413 return;
2414 }
2415
2416 static int getfrom_srom(u_long addr)
2417 {
2418 s32 tmp;
2419
2420 tmp = inl(addr);
2421 dce_us_delay(1);
2422
2423 return tmp;
2424 }
2425
2426 static char *build_setup_frame(struct device *dev, int mode)
2427 {
2428 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2429 int i;
2430 char *pa = lp->setup_frame;
2431
2432
2433 if (mode == ALL) {
2434 memset(lp->setup_frame, 0, SETUP_FRAME_LEN);
2435 }
2436
2437 if (lp->setup_f == HASH_PERF) {
2438 for (pa=lp->setup_frame+IMPERF_PA_OFFSET, i=0; i<ETH_ALEN; i++) {
2439 *(pa + i) = dev->dev_addr[i];
2440 if (i & 0x01) pa += 2;
2441 }
2442 *(lp->setup_frame + (HASH_TABLE_LEN >> 3) - 3) = 0x80;
2443 } else {
2444 for (i=0; i<ETH_ALEN; i++) {
2445 *(pa + (i&1)) = dev->dev_addr[i];
2446 if (i & 0x01) pa += 4;
2447 }
2448 for (i=0; i<ETH_ALEN; i++) {
2449 *(pa + (i&1)) = (char) 0xff;
2450 if (i & 0x01) pa += 4;
2451 }
2452 }
2453
2454 return pa;
2455 }
2456
2457 static void enable_ast(struct device *dev, u32 time_out)
2458 {
2459 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2460 u_long iobase = dev->base_addr;
2461
2462 lp->irq_mask |= IMR_TMM;
2463 outl(lp->irq_mask, DE4X5_IMR);
2464 load_ms_timer(dev, time_out);
2465
2466 return;
2467 }
2468
2469 static void disable_ast(struct device *dev)
2470 {
2471 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2472 u_long iobase = dev->base_addr;
2473
2474 lp->irq_mask &= ~IMR_TMM;
2475 outl(lp->irq_mask, DE4X5_IMR);
2476 load_ms_timer(dev, 0);
2477
2478 return;
2479 }
2480
2481 static void kick_tx(struct device *dev)
2482 {
2483 struct sk_buff *skb;
2484
2485 if ((skb = alloc_skb(0, GFP_ATOMIC)) != NULL) {
2486 skb->len= FAKE_FRAME_LEN;
2487 skb->arp=1;
2488 skb->dev=dev;
2489 dev_queue_xmit(skb, dev, SOPRI_NORMAL);
2490 }
2491
2492 return;
2493 }
2494
2495
2496
2497
2498
2499 static int de4x5_ioctl(struct device *dev, struct ifreq *rq, int cmd)
2500 {
2501 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2502 struct de4x5_ioctl *ioc = (struct de4x5_ioctl *) &rq->ifr_data;
2503 u_long iobase = dev->base_addr;
2504 int i, j, status = 0;
2505 s32 omr;
2506 union {
2507 u8 addr[(HASH_TABLE_LEN * ETH_ALEN)];
2508 u16 sval[(HASH_TABLE_LEN * ETH_ALEN) >> 1];
2509 u32 lval[(HASH_TABLE_LEN * ETH_ALEN) >> 2];
2510 } tmp;
2511
2512 switch(ioc->cmd) {
2513 case DE4X5_GET_HWADDR:
2514 ioc->len = ETH_ALEN;
2515 status = verify_area(VERIFY_WRITE, (void *)ioc->data, ioc->len);
2516 if (status)
2517 break;
2518 for (i=0; i<ETH_ALEN; i++) {
2519 tmp.addr[i] = dev->dev_addr[i];
2520 }
2521 memcpy_tofs(ioc->data, tmp.addr, ioc->len);
2522
2523 break;
2524 case DE4X5_SET_HWADDR:
2525 status = verify_area(VERIFY_READ, (void *)ioc->data, ETH_ALEN);
2526 if (status)
2527 break;
2528 status = -EPERM;
2529 if (!suser())
2530 break;
2531 status = 0;
2532 memcpy_fromfs(tmp.addr, ioc->data, ETH_ALEN);
2533 for (i=0; i<ETH_ALEN; i++) {
2534 dev->dev_addr[i] = tmp.addr[i];
2535 }
2536 build_setup_frame(dev, PHYS_ADDR_ONLY);
2537
2538 while (set_bit(0, (void *)&dev->tbusy) != 0);
2539 if (lp->setup_f == HASH_PERF) {
2540 load_packet(dev, lp->setup_frame, TD_IC | HASH_F | TD_SET |
2541 SETUP_FRAME_LEN, NULL);
2542 } else {
2543 load_packet(dev, lp->setup_frame, TD_IC | PERFECT_F | TD_SET |
2544 SETUP_FRAME_LEN, NULL);
2545 }
2546 lp->tx_new = (++lp->tx_new) % lp->txRingSize;
2547 outl(POLL_DEMAND, DE4X5_TPD);
2548 dev->tbusy = 0;
2549
2550 break;
2551 case DE4X5_SET_PROM:
2552 if (suser()) {
2553 omr = inl(DE4X5_OMR);
2554 omr |= OMR_PR;
2555 outl(omr, DE4X5_OMR);
2556 } else {
2557 status = -EPERM;
2558 }
2559
2560 break;
2561 case DE4X5_CLR_PROM:
2562 if (suser()) {
2563 omr = inl(DE4X5_OMR);
2564 omr &= ~OMR_PR;
2565 outb(omr, DE4X5_OMR);
2566 } else {
2567 status = -EPERM;
2568 }
2569
2570 break;
2571 case DE4X5_SAY_BOO:
2572 printk("%s: Boo!\n", dev->name);
2573
2574 break;
2575 case DE4X5_GET_MCA:
2576 ioc->len = (HASH_TABLE_LEN >> 3);
2577 status = verify_area(VERIFY_WRITE, ioc->data, ioc->len);
2578 if (status)
2579 break;
2580 memcpy_tofs(ioc->data, lp->setup_frame, ioc->len);
2581
2582 break;
2583 case DE4X5_SET_MCA:
2584 if (suser()) {
2585 if (ioc->len != HASH_TABLE_LEN) {
2586 if (!(status = verify_area(VERIFY_READ, (void *)ioc->data, ETH_ALEN * ioc->len))) {
2587 memcpy_fromfs(tmp.addr, ioc->data, ETH_ALEN * ioc->len);
2588 set_multicast_list(dev, ioc->len, tmp.addr);
2589 }
2590 } else {
2591 set_multicast_list(dev, ioc->len, NULL);
2592 }
2593 } else {
2594 status = -EPERM;
2595 }
2596
2597 break;
2598 case DE4X5_CLR_MCA:
2599 if (suser()) {
2600 set_multicast_list(dev, 0, NULL);
2601 } else {
2602 status = -EPERM;
2603 }
2604
2605 break;
2606 case DE4X5_MCA_EN:
2607 if (suser()) {
2608 omr = inl(DE4X5_OMR);
2609 omr |= OMR_PM;
2610 outl(omr, DE4X5_OMR);
2611 } else {
2612 status = -EPERM;
2613 }
2614
2615 break;
2616 case DE4X5_GET_STATS:
2617 ioc->len = sizeof(lp->pktStats);
2618 status = verify_area(VERIFY_WRITE, (void *)ioc->data, ioc->len);
2619 if (status)
2620 break;
2621
2622 cli();
2623 memcpy_tofs(ioc->data, &lp->pktStats, ioc->len);
2624 sti();
2625
2626 break;
2627 case DE4X5_CLR_STATS:
2628 if (suser()) {
2629 cli();
2630 memset(&lp->pktStats, 0, sizeof(lp->pktStats));
2631 sti();
2632 } else {
2633 status = -EPERM;
2634 }
2635
2636 break;
2637 case DE4X5_GET_OMR:
2638 tmp.addr[0] = inl(DE4X5_OMR);
2639 if (!(status = verify_area(VERIFY_WRITE, (void *)ioc->data, 1))) {
2640 memcpy_tofs(ioc->data, tmp.addr, 1);
2641 }
2642
2643 break;
2644 case DE4X5_SET_OMR:
2645 if (suser()) {
2646 if (!(status = verify_area(VERIFY_READ, (void *)ioc->data, 1))) {
2647 memcpy_fromfs(tmp.addr, ioc->data, 1);
2648 outl(tmp.addr[0], DE4X5_OMR);
2649 }
2650 } else {
2651 status = -EPERM;
2652 }
2653
2654 break;
2655 case DE4X5_GET_REG:
2656 j = 0;
2657 tmp.lval[0] = inl(DE4X5_STS); j+=4;
2658 tmp.lval[1] = inl(DE4X5_BMR); j+=4;
2659 tmp.lval[2] = inl(DE4X5_IMR); j+=4;
2660 tmp.lval[3] = inl(DE4X5_OMR); j+=4;
2661 tmp.lval[4] = inl(DE4X5_SISR); j+=4;
2662 tmp.lval[5] = inl(DE4X5_SICR); j+=4;
2663 tmp.lval[6] = inl(DE4X5_STRR); j+=4;
2664 tmp.lval[7] = inl(DE4X5_SIGR); j+=4;
2665 ioc->len = j;
2666 if (!(status = verify_area(VERIFY_WRITE, (void *)ioc->data, ioc->len))) {
2667 memcpy_tofs(ioc->data, tmp.addr, ioc->len);
2668 }
2669 break;
2670
2671 #define DE4X5_DUMP 0x0f
2672
2673 case DE4X5_DUMP:
2674 j = 0;
2675 tmp.addr[j++] = dev->irq;
2676 for (i=0; i<ETH_ALEN; i++) {
2677 tmp.addr[j++] = dev->dev_addr[i];
2678 }
2679 tmp.addr[j++] = lp->rxRingSize;
2680 tmp.lval[j>>2] = (long)lp->rx_ring; j+=4;
2681 tmp.lval[j>>2] = (long)lp->tx_ring; j+=4;
2682
2683 for (i=0;i<lp->rxRingSize-1;i++){
2684 if (i < 3) {
2685 tmp.lval[j>>2] = (long)&lp->rx_ring[i].status; j+=4;
2686 }
2687 }
2688 tmp.lval[j>>2] = (long)&lp->rx_ring[i].status; j+=4;
2689 for (i=0;i<lp->txRingSize-1;i++){
2690 if (i < 3) {
2691 tmp.lval[j>>2] = (long)&lp->tx_ring[i].status; j+=4;
2692 }
2693 }
2694 tmp.lval[j>>2] = (long)&lp->tx_ring[i].status; j+=4;
2695
2696 for (i=0;i<lp->rxRingSize-1;i++){
2697 if (i < 3) {
2698 tmp.lval[j>>2] = (s32)lp->rx_ring[i].buf; j+=4;
2699 }
2700 }
2701 tmp.lval[j>>2] = (s32)lp->rx_ring[i].buf; j+=4;
2702 for (i=0;i<lp->txRingSize-1;i++){
2703 if (i < 3) {
2704 tmp.lval[j>>2] = (s32)lp->tx_ring[i].buf; j+=4;
2705 }
2706 }
2707 tmp.lval[j>>2] = (s32)lp->tx_ring[i].buf; j+=4;
2708
2709 for (i=0;i<lp->rxRingSize;i++){
2710 tmp.lval[j>>2] = lp->rx_ring[i].status; j+=4;
2711 }
2712 for (i=0;i<lp->txRingSize;i++){
2713 tmp.lval[j>>2] = lp->tx_ring[i].status; j+=4;
2714 }
2715
2716 tmp.lval[j>>2] = inl(DE4X5_STS); j+=4;
2717 tmp.lval[j>>2] = inl(DE4X5_BMR); j+=4;
2718 tmp.lval[j>>2] = inl(DE4X5_IMR); j+=4;
2719 tmp.lval[j>>2] = inl(DE4X5_OMR); j+=4;
2720 tmp.lval[j>>2] = inl(DE4X5_SISR); j+=4;
2721 tmp.lval[j>>2] = inl(DE4X5_SICR); j+=4;
2722 tmp.lval[j>>2] = inl(DE4X5_STRR); j+=4;
2723 tmp.lval[j>>2] = inl(DE4X5_SIGR); j+=4;
2724
2725 tmp.addr[j++] = lp->txRingSize;
2726 tmp.addr[j++] = dev->tbusy;
2727
2728 ioc->len = j;
2729 if (!(status = verify_area(VERIFY_WRITE, (void *)ioc->data, ioc->len))) {
2730 memcpy_tofs(ioc->data, tmp.addr, ioc->len);
2731 }
2732
2733 break;
2734 default:
2735 status = -EOPNOTSUPP;
2736 }
2737
2738 return status;
2739 }
2740
2741 #ifdef MODULE
2742 static char devicename[9] = { 0, };
2743 static struct device thisDE4X5 = {
2744 devicename,
2745 0, 0, 0, 0,
2746 0x2000, 10,
2747 0, 0, 0, NULL, de4x5_probe };
2748
2749 static int io=0x000b;
2750 static int irq=10;
2751
2752 int
2753 init_module(void)
2754 {
2755 thisDE4X5.base_addr=io;
2756 thisDE4X5.irq=irq;
2757 if (register_netdev(&thisDE4X5) != 0)
2758 return -EIO;
2759 return 0;
2760 }
2761
2762 void
2763 cleanup_module(void)
2764 {
2765 struct de4x5_private *lp = (struct de4x5_private *) thisDE4X5.priv;
2766
2767 if (lp) {
2768 kfree_s(bus_to_virt(lp->rx_ring[0].buf), RX_BUFF_SZ * NUM_RX_DESC + ALIGN);
2769 }
2770 kfree_s(thisDE4X5.priv, sizeof(struct de4x5_private) + ALIGN);
2771 thisDE4X5.priv = NULL;
2772
2773 release_region(thisDE4X5.base_addr, (lp->bus == PCI ?
2774 DE4X5_PCI_TOTAL_SIZE :
2775 DE4X5_EISA_TOTAL_SIZE));
2776 unregister_netdev(&thisDE4X5);
2777 }
2778 #endif
2779
2780
2781
2782
2783
2784
2785
2786
2787
2788
2789