This source file includes following definitions.
- de4x5_probe
- de4x5_hw_init
- de4x5_open
- de4x5_init
- de4x5_queue_pkt
- de4x5_interrupt
- de4x5_rx
- de4x5_tx
- de4x5_ast
- de4x5_close
- de4x5_get_stats
- load_packet
- set_multicast_list
- SetMulticastFilter
- eisa_probe
- pci_probe
- alloc_device
- autoconf_media
- dc21040_autoconf
- dc21041_autoconf
- dc21140_autoconf
- test_media
- ping_media
- test_ans
- reset_init_sia
- load_ms_timer
- create_packet
- dce_us_delay
- dce_ms_delay
- EISA_signature
- DevicePresent
- get_hw_addr
- srom_rd
- srom_latch
- srom_command
- srom_address
- srom_data
- sendto_srom
- getfrom_srom
- build_setup_frame
- enable_ast
- disable_ast
- kick_tx
- de4x5_ioctl
- init_module
- cleanup_module
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144 static const char *version = "de4x5.c:v0.32 6/26/95 davies@wanton.lkg.dec.com\n";
145
146 #include <linux/config.h>
147 #ifdef MODULE
148 #include <linux/module.h>
149 #include <linux/version.h>
150 #else
151 #define MOD_INC_USE_COUNT
152 #define MOD_DEC_USE_COUNT
153 #endif
154
155 #include <linux/kernel.h>
156 #include <linux/sched.h>
157 #include <linux/string.h>
158 #include <linux/interrupt.h>
159 #include <linux/ptrace.h>
160 #include <linux/errno.h>
161 #include <linux/ioport.h>
162 #include <linux/malloc.h>
163 #include <linux/bios32.h>
164 #include <linux/pci.h>
165 #include <linux/delay.h>
166 #include <asm/bitops.h>
167 #include <asm/io.h>
168 #include <asm/dma.h>
169 #include <asm/segment.h>
170
171 #include <linux/netdevice.h>
172 #include <linux/etherdevice.h>
173 #include <linux/skbuff.h>
174
175 #include <linux/time.h>
176 #include <linux/types.h>
177 #include <linux/unistd.h>
178
179 #include "de4x5.h"
180
181 #ifdef DE4X5_DEBUG
182 static int de4x5_debug = DE4X5_DEBUG;
183 #else
184 static int de4x5_debug = 1;
185 #endif
186
187 #ifdef DE4X5_AUTOSENSE
188 static int de4x5_autosense = DE4X5_AUTOSENSE;
189 #else
190 static int de4x5_autosense = AUTO;
191 #endif
192
193 #ifdef DE4X5_FULL_DUPLEX
194 static s32 de4x5_full_duplex = 1;
195 #else
196 static s32 de4x5_full_duplex = 0;
197 #endif
198
199 #define DE4X5_NDA 0xffe0
200
201
202
203
204 #define PROBE_LENGTH 32
205 #define ETH_PROM_SIG 0xAA5500FFUL
206
207
208
209
210 #define PKT_BUF_SZ 1536
211 #define MAX_PKT_SZ 1514
212 #define MAX_DAT_SZ 1500
213 #define MIN_DAT_SZ 1
214 #define PKT_HDR_LEN 14
215 #define FAKE_FRAME_LEN (MAX_PKT_SZ + 1)
216 #define QUEUE_PKT_TIMEOUT (3*HZ)
217
218
219 #define CRC_POLYNOMIAL_BE 0x04c11db7UL
220 #define CRC_POLYNOMIAL_LE 0xedb88320UL
221
222
223
224
225 #define DE4X5_EISA_IO_PORTS 0x0c00
226 #define DE4X5_EISA_TOTAL_SIZE 0xfff
227
228 #define MAX_EISA_SLOTS 16
229 #define EISA_SLOT_INC 0x1000
230
231 #define DE4X5_SIGNATURE {"DE425",""}
232 #define DE4X5_NAME_LENGTH 8
233
234
235
236
237 #define PCI_MAX_BUS_NUM 8
238 #define DE4X5_PCI_TOTAL_SIZE 0x80
239 #define DE4X5_CLASS_CODE 0x00020000
240
241
242
243
244
245
246
247 #define ALIGN4 ((u_long)4 - 1)
248 #define ALIGN8 ((u_long)8 - 1)
249 #define ALIGN16 ((u_long)16 - 1)
250 #define ALIGN32 ((u_long)32 - 1)
251 #define ALIGN64 ((u_long)64 - 1)
252 #define ALIGN128 ((u_long)128 - 1)
253
254 #define ALIGN ALIGN32
255 #define CACHE_ALIGN CAL_16LONG
256 #define DESC_SKIP_LEN DSL_0
257
258 #define DESC_ALIGN
259
260 #ifndef IS_NOT_DEC
261 static int is_not_dec = 0;
262 #else
263 static int is_not_dec = 1;
264 #endif
265
266
267
268
269 #define ENABLE_IRQs { \
270 imr |= lp->irq_en;\
271 outl(imr, DE4X5_IMR); \
272 }
273
274 #define DISABLE_IRQs {\
275 imr = inl(DE4X5_IMR);\
276 imr &= ~lp->irq_en;\
277 outl(imr, DE4X5_IMR); \
278 }
279
280 #define UNMASK_IRQs {\
281 imr |= lp->irq_mask;\
282 outl(imr, DE4X5_IMR); \
283 }
284
285 #define MASK_IRQs {\
286 imr = inl(DE4X5_IMR);\
287 imr &= ~lp->irq_mask;\
288 outl(imr, DE4X5_IMR); \
289 }
290
291
292
293
294 #define START_DE4X5 {\
295 omr = inl(DE4X5_OMR);\
296 omr |= OMR_ST | OMR_SR;\
297 outl(omr, DE4X5_OMR); \
298 }
299
300 #define STOP_DE4X5 {\
301 omr = inl(DE4X5_OMR);\
302 omr &= ~(OMR_ST|OMR_SR);\
303 outl(omr, DE4X5_OMR); \
304 }
305
306
307
308
309 #define RESET_SIA outl(0, DE4X5_SICR);
310
311
312
313
314 #define DE4X5_AUTOSENSE_MS 250
315
316
317
318
319 struct de4x5_srom {
320 char reserved[18];
321 char version;
322 char num_adapters;
323 char ieee_addr[6];
324 char info[100];
325 short chksum;
326 };
327
328
329
330
331
332
333
334
335
336 #define NUM_RX_DESC 8
337 #define NUM_TX_DESC 32
338 #define BUFF_ALLOC_RETRIES 10
339 #define RX_BUFF_SZ 1536
340
341 struct de4x5_desc {
342 volatile s32 status;
343 u32 des1;
344 u32 buf;
345 u32 next;
346 DESC_ALIGN
347 };
348
349
350
351
352 #define DE4X5_PKT_STAT_SZ 16
353 #define DE4X5_PKT_BIN_SZ 128
354
355
356 struct de4x5_private {
357 char adapter_name[80];
358 struct de4x5_desc rx_ring[NUM_RX_DESC];
359 struct de4x5_desc tx_ring[NUM_TX_DESC];
360 struct sk_buff *skb[NUM_TX_DESC];
361 int rx_new, rx_old;
362 int tx_new, tx_old;
363 char setup_frame[SETUP_FRAME_LEN];
364 struct enet_statistics stats;
365 struct {
366 u_int bins[DE4X5_PKT_STAT_SZ];
367 u_int unicast;
368 u_int multicast;
369 u_int broadcast;
370 u_int excessive_collisions;
371 u_int tx_underruns;
372 u_int excessive_underruns;
373 } pktStats;
374 char rxRingSize;
375 char txRingSize;
376 int bus;
377 int bus_num;
378 int chipset;
379 s32 irq_mask;
380 s32 irq_en;
381 int media;
382 int linkProb;
383 int autosense;
384 int tx_enable;
385 int lostMedia;
386 int setup_f;
387 };
388
389
390
391
392
393
394
395
396
397 #define TX_BUFFS_AVAIL ((lp->tx_old<=lp->tx_new)?\
398 lp->tx_old+lp->txRingSize-lp->tx_new-1:\
399 lp->tx_old -lp->tx_new-1)
400
401
402
403
404 static int de4x5_open(struct device *dev);
405 static int de4x5_queue_pkt(struct sk_buff *skb, struct device *dev);
406 static void de4x5_interrupt(int irq, struct pt_regs *regs);
407 static int de4x5_close(struct device *dev);
408 static struct enet_statistics *de4x5_get_stats(struct device *dev);
409 static void set_multicast_list(struct device *dev, int num_addrs, void *addrs);
410 static int de4x5_ioctl(struct device *dev, struct ifreq *rq, int cmd);
411
412
413
414
415 static int de4x5_hw_init(struct device *dev, u_long iobase);
416 static int de4x5_init(struct device *dev);
417 static int de4x5_rx(struct device *dev);
418 static int de4x5_tx(struct device *dev);
419 static int de4x5_ast(struct device *dev);
420
421 static int autoconf_media(struct device *dev);
422 static void create_packet(struct device *dev, char *frame, int len);
423 static void dce_us_delay(u32 usec);
424 static void dce_ms_delay(u32 msec);
425 static void load_packet(struct device *dev, char *buf, u32 flags, struct sk_buff *skb);
426 static void dc21040_autoconf(struct device *dev);
427 static void dc21041_autoconf(struct device *dev);
428 static void dc21140_autoconf(struct device *dev);
429 static int test_media(struct device *dev, s32 irqs, s32 irq_mask, s32 csr13, s32 csr14, s32 csr15, s32 msec);
430
431 static int ping_media(struct device *dev);
432 static void reset_init_sia(struct device *dev, s32 sicr, s32 strr, s32 sigr);
433 static int test_ans(struct device *dev, s32 irqs, s32 irq_mask, s32 msec);
434 static void load_ms_timer(struct device *dev, u32 msec);
435 static int EISA_signature(char *name, s32 eisa_id);
436 static int DevicePresent(u_long iobase);
437 static short srom_rd(u_long address, u_char offset);
438 static void srom_latch(u_int command, u_long address);
439 static void srom_command(u_int command, u_long address);
440 static void srom_address(u_int command, u_long address, u_char offset);
441 static short srom_data(u_int command, u_long address);
442
443 static void sendto_srom(u_int command, u_long addr);
444 static int getfrom_srom(u_long addr);
445 static void SetMulticastFilter(struct device *dev, int num_addrs, char *addrs);
446 static int get_hw_addr(struct device *dev);
447
448 static void eisa_probe(struct device *dev, u_long iobase);
449 static void pci_probe(struct device *dev, u_long iobase);
450 static struct device *alloc_device(struct device *dev, u_long iobase);
451 static char *build_setup_frame(struct device *dev, int mode);
452 static void disable_ast(struct device *dev);
453 static void enable_ast(struct device *dev, u32 time_out);
454 static void kick_tx(struct device *dev);
455
456 #ifdef MODULE
457 int init_module(void);
458 void cleanup_module(void);
459 static int autoprobed = 1, loading_module = 1;
460 # else
461 static unsigned char de4x5_irq[] = {5,9,10,11};
462 static int autoprobed = 0, loading_module = 0;
463 #endif
464
465 static char name[DE4X5_NAME_LENGTH + 1];
466 static int num_de4x5s = 0, num_eth = 0;
467
468
469
470
471
472
473 static struct bus_type {
474 int bus;
475 int bus_num;
476 int device;
477 int chipset;
478 struct de4x5_srom srom;
479 int autosense;
480 } bus;
481
482
483
484
485 #define RESET_DE4X5 {\
486 int i;\
487 i=inl(DE4X5_BMR);\
488 dce_ms_delay(1);\
489 outl(i | BMR_SWR, DE4X5_BMR);\
490 dce_ms_delay(1);\
491 outl(i, DE4X5_BMR);\
492 dce_ms_delay(1);\
493 for (i=0;i<5;i++) {inl(DE4X5_BMR); dce_ms_delay(1);}\
494 dce_ms_delay(1);\
495 }
496
497
498
499 int de4x5_probe(struct device *dev)
500 {
501 int tmp = num_de4x5s, status = -ENODEV;
502 u_long iobase = dev->base_addr;
503
504 if ((iobase == 0) && loading_module){
505 printk("Autoprobing is not supported when loading a module based driver.\n");
506 status = -EIO;
507 } else {
508 eisa_probe(dev, iobase);
509 pci_probe(dev, iobase);
510
511 if ((tmp == num_de4x5s) && (iobase != 0) && loading_module) {
512 printk("%s: de4x5_probe() cannot find device at 0x%04lx.\n", dev->name,
513 iobase);
514 }
515
516
517
518
519
520 for (; (dev->priv == NULL) && (dev->next != NULL); dev = dev->next);
521
522 if (dev->priv) status = 0;
523 if (iobase == 0) autoprobed = 1;
524 }
525
526 return status;
527 }
528
529 static int
530 de4x5_hw_init(struct device *dev, u_long iobase)
531 {
532 struct bus_type *lp = &bus;
533 int tmpbus, tmpchs, i, j, status=0;
534 char *tmp;
535
536
537 if (lp->chipset == DC21041) {
538 outl(0, PCI_CFDA);
539 dce_ms_delay(10);
540 }
541
542 RESET_DE4X5;
543
544 if ((inl(DE4X5_STS) & (STS_TS | STS_RS)) == 0) {
545
546
547
548 if (lp->bus == PCI) {
549 if (!is_not_dec) {
550 if ((lp->chipset == DC21040) || (lp->chipset == DC21041)) {
551 strcpy(name, "DE435");
552 } else if (lp->chipset == DC21140) {
553 strcpy(name, "DE500");
554 }
555 } else {
556 strcpy(name, "UNKNOWN");
557 }
558 } else {
559 EISA_signature(name, EISA_ID0);
560 }
561
562 if (*name != '\0') {
563 dev->base_addr = iobase;
564 if (lp->bus == EISA) {
565 printk("%s: %s at %04lx (EISA slot %ld)",
566 dev->name, name, iobase, ((iobase>>12)&0x0f));
567 } else {
568 printk("%s: %s at %04lx (PCI bus %d, device %d)", dev->name, name,
569 iobase, lp->bus_num, lp->device);
570 }
571
572 printk(", h/w address ");
573 status = get_hw_addr(dev);
574 for (i = 0; i < ETH_ALEN - 1; i++) {
575 printk("%2.2x:", dev->dev_addr[i]);
576 }
577 printk("%2.2x,\n", dev->dev_addr[i]);
578
579 tmpbus = lp->bus;
580 tmpchs = lp->chipset;
581
582 if (status == 0) {
583 struct de4x5_private *lp;
584
585
586
587
588
589 dev->priv = (void *) kmalloc(sizeof(struct de4x5_private) + ALIGN,
590 GFP_KERNEL);
591
592
593
594 dev->priv = (void *)(((u_long)dev->priv + ALIGN) & ~ALIGN);
595 lp = (struct de4x5_private *)dev->priv;
596 memset(dev->priv, 0, sizeof(struct de4x5_private));
597 lp->bus = tmpbus;
598 lp->chipset = tmpchs;
599
600
601
602
603 if (de4x5_autosense & AUTO) {
604 lp->autosense = AUTO;
605 } else {
606 if (lp->chipset != DC21140) {
607 if ((lp->chipset == DC21040) && (de4x5_autosense & TP_NW)) {
608 de4x5_autosense = TP;
609 }
610 if ((lp->chipset == DC21041) && (de4x5_autosense & BNC_AUI)) {
611 de4x5_autosense = BNC;
612 }
613 lp->autosense = de4x5_autosense & 0x001f;
614 } else {
615 lp->autosense = de4x5_autosense & 0x00c0;
616 }
617 }
618
619 sprintf(lp->adapter_name,"%s (%s)", name, dev->name);
620 request_region(iobase, (lp->bus == PCI ? DE4X5_PCI_TOTAL_SIZE :
621 DE4X5_EISA_TOTAL_SIZE),
622 lp->adapter_name);
623
624
625
626
627
628
629 for (tmp=NULL, j=0; (j<BUFF_ALLOC_RETRIES) && (tmp==NULL); j++) {
630 if ((tmp = (void *)kmalloc(RX_BUFF_SZ * NUM_RX_DESC + ALIGN,
631 GFP_KERNEL)) != NULL) {
632 tmp = (char *)(((u_long) tmp + ALIGN) & ~ALIGN);
633 for (i=0; i<NUM_RX_DESC; i++) {
634 lp->rx_ring[i].status = 0;
635 lp->rx_ring[i].des1 = RX_BUFF_SZ;
636 lp->rx_ring[i].buf = virt_to_bus(tmp + i * RX_BUFF_SZ);
637 lp->rx_ring[i].next = (u32)NULL;
638 }
639 barrier();
640 }
641 }
642
643 if (tmp != NULL) {
644 lp->rxRingSize = NUM_RX_DESC;
645 lp->txRingSize = NUM_TX_DESC;
646
647
648 lp->rx_ring[lp->rxRingSize - 1].des1 |= RD_RER;
649 lp->tx_ring[lp->txRingSize - 1].des1 |= TD_TER;
650
651
652 outl(virt_to_bus(lp->rx_ring), DE4X5_RRBA);
653 outl(virt_to_bus(lp->tx_ring), DE4X5_TRBA);
654
655
656 lp->irq_mask = IMR_RIM | IMR_TIM | IMR_TUM ;
657 lp->irq_en = IMR_NIM | IMR_AIM;
658
659 lp->tx_enable = TRUE;
660
661 if (dev->irq < 2) {
662 #ifndef MODULE
663 unsigned char irqnum;
664 s32 omr;
665 autoirq_setup(0);
666
667 omr = inl(DE4X5_OMR);
668 outl(IMR_AIM|IMR_RUM, DE4X5_IMR);
669 outl(OMR_SR | omr, DE4X5_OMR);
670
671 irqnum = autoirq_report(1);
672 if (!irqnum) {
673 printk(" and failed to detect IRQ line.\n");
674 status = -ENXIO;
675 } else {
676 for (dev->irq=0,i=0; (i<sizeof(de4x5_irq)) && (!dev->irq); i++) {
677 if (irqnum == de4x5_irq[i]) {
678 dev->irq = irqnum;
679 printk(" and uses IRQ%d.\n", dev->irq);
680 }
681 }
682
683 if (!dev->irq) {
684 printk(" but incorrect IRQ line detected.\n");
685 status = -ENXIO;
686 }
687 }
688
689 outl(0, DE4X5_IMR);
690
691 #endif
692 } else {
693 printk(" and requires IRQ%d (not probed).\n", dev->irq);
694 }
695 } else {
696 printk("%s: Kernel could not allocate RX buffer memory.\n",
697 dev->name);
698 status = -ENXIO;
699 }
700 if (status) release_region(iobase, (lp->bus == PCI ?
701 DE4X5_PCI_TOTAL_SIZE :
702 DE4X5_EISA_TOTAL_SIZE));
703 } else {
704 printk(" which has an Ethernet PROM CRC error.\n");
705 status = -ENXIO;
706 }
707 } else {
708 status = -ENXIO;
709 }
710 } else {
711 status = -ENXIO;
712 }
713
714 if (!status) {
715 if (de4x5_debug > 0) {
716 printk(version);
717 }
718
719
720 dev->open = &de4x5_open;
721 dev->hard_start_xmit = &de4x5_queue_pkt;
722 dev->stop = &de4x5_close;
723 dev->get_stats = &de4x5_get_stats;
724 #ifdef HAVE_MULTICAST
725 dev->set_multicast_list = &set_multicast_list;
726 #endif
727 dev->do_ioctl = &de4x5_ioctl;
728
729 dev->mem_start = 0;
730
731
732 ether_setup(dev);
733
734
735 if (lp->chipset == DC21041) {
736 outl(0, DE4X5_SICR);
737 outl(CFDA_PSM, PCI_CFDA);
738 }
739 } else {
740 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
741 if (lp) {
742 kfree_s(bus_to_virt(lp->rx_ring[0].buf),
743 RX_BUFF_SZ * NUM_RX_DESC + ALIGN);
744 }
745 if (dev->priv) {
746 kfree_s(dev->priv, sizeof(struct de4x5_private) + ALIGN);
747 dev->priv = NULL;
748 }
749 }
750
751 return status;
752 }
753
754
755 static int
756 de4x5_open(struct device *dev)
757 {
758 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
759 u_long iobase = dev->base_addr;
760 int i, status = 0;
761 s32 imr, omr, sts;
762
763
764
765
766 if (lp->chipset == DC21041) {
767 outl(0, PCI_CFDA);
768 dce_ms_delay(10);
769 }
770
771 if (request_irq(dev->irq, (void *)de4x5_interrupt, 0, lp->adapter_name)) {
772 printk("de4x5_open(): Requested IRQ%d is busy\n",dev->irq);
773 status = -EAGAIN;
774 } else {
775
776 irq2dev_map[dev->irq] = dev;
777
778
779
780 status = de4x5_init(dev);
781
782 if (de4x5_debug > 1){
783 printk("%s: de4x5 open with irq %d\n",dev->name,dev->irq);
784 printk("\tphysical address: ");
785 for (i=0;i<6;i++){
786 printk("%2.2x:",(short)dev->dev_addr[i]);
787 }
788 printk("\n");
789 printk("Descriptor head addresses:\n");
790 printk("\t0x%8.8lx 0x%8.8lx\n",(u_long)lp->rx_ring,(u_long)lp->tx_ring);
791 printk("Descriptor addresses:\nRX: ");
792 for (i=0;i<lp->rxRingSize-1;i++){
793 if (i < 3) {
794 printk("0x%8.8lx ",(u_long)&lp->rx_ring[i].status);
795 }
796 }
797 printk("...0x%8.8lx\n",(u_long)&lp->rx_ring[i].status);
798 printk("TX: ");
799 for (i=0;i<lp->txRingSize-1;i++){
800 if (i < 3) {
801 printk("0x%8.8lx ", (u_long)&lp->tx_ring[i].status);
802 }
803 }
804 printk("...0x%8.8lx\n", (u_long)&lp->tx_ring[i].status);
805 printk("Descriptor buffers:\nRX: ");
806 for (i=0;i<lp->rxRingSize-1;i++){
807 if (i < 3) {
808 printk("0x%8.8x ",lp->rx_ring[i].buf);
809 }
810 }
811 printk("...0x%8.8x\n",lp->rx_ring[i].buf);
812 printk("TX: ");
813 for (i=0;i<lp->txRingSize-1;i++){
814 if (i < 3) {
815 printk("0x%8.8x ", lp->tx_ring[i].buf);
816 }
817 }
818 printk("...0x%8.8x\n", lp->tx_ring[i].buf);
819 printk("Ring size: \nRX: %d\nTX: %d\n",
820 (short)lp->rxRingSize,
821 (short)lp->txRingSize);
822 printk("\tstatus: %d\n", status);
823 }
824
825 if (!status) {
826 dev->tbusy = 0;
827 dev->start = 1;
828 dev->interrupt = UNMASK_INTERRUPTS;
829 dev->trans_start = jiffies;
830
831 START_DE4X5;
832
833
834 imr = 0;
835 UNMASK_IRQs;
836
837
838 sts = inl(DE4X5_STS);
839 outl(sts, DE4X5_STS);
840
841 ENABLE_IRQs;
842 }
843 if (de4x5_debug > 1) {
844 printk("\tsts: 0x%08x\n", inl(DE4X5_STS));
845 printk("\tbmr: 0x%08x\n", inl(DE4X5_BMR));
846 printk("\timr: 0x%08x\n", inl(DE4X5_IMR));
847 printk("\tomr: 0x%08x\n", inl(DE4X5_OMR));
848 printk("\tsisr: 0x%08x\n", inl(DE4X5_SISR));
849 printk("\tsicr: 0x%08x\n", inl(DE4X5_SICR));
850 printk("\tstrr: 0x%08x\n", inl(DE4X5_STRR));
851 printk("\tsigr: 0x%08x\n", inl(DE4X5_SIGR));
852 }
853 }
854
855 MOD_INC_USE_COUNT;
856
857 return status;
858 }
859
860
861
862
863
864
865
866
867
868 static int
869 de4x5_init(struct device *dev)
870 {
871 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
872 u_long iobase = dev->base_addr;
873 int i, j, status = 0;
874 s32 bmr, omr;
875
876
877 set_bit(0, (void *)&dev->tbusy);
878
879 RESET_DE4X5;
880
881 bmr = inl(DE4X5_BMR);
882 bmr |= PBL_8 | DESC_SKIP_LEN | CACHE_ALIGN;
883 outl(bmr, DE4X5_BMR);
884
885 if (lp->chipset != DC21140) {
886 omr = TR_96;
887 lp->setup_f = HASH_PERF;
888 } else {
889 omr = OMR_SDP | OMR_SF;
890 lp->setup_f = PERFECT;
891 }
892 outl(virt_to_bus(lp->rx_ring), DE4X5_RRBA);
893 outl(virt_to_bus(lp->tx_ring), DE4X5_TRBA);
894
895 lp->rx_new = lp->rx_old = 0;
896 lp->tx_new = lp->tx_old = 0;
897
898 for (i = 0; i < lp->rxRingSize; i++) {
899 lp->rx_ring[i].status = R_OWN;
900 }
901
902 for (i = 0; i < lp->txRingSize; i++) {
903 lp->tx_ring[i].status = 0;
904 }
905
906 barrier();
907
908
909 SetMulticastFilter(dev, 0, NULL);
910
911 if (lp->chipset != DC21140) {
912 load_packet(dev, lp->setup_frame, HASH_F|TD_SET|SETUP_FRAME_LEN, NULL);
913 } else {
914 load_packet(dev, lp->setup_frame, PERFECT_F|TD_SET|SETUP_FRAME_LEN, NULL);
915 }
916 outl(omr|OMR_ST, DE4X5_OMR);
917
918
919 for (j=0, i=jiffies;(i<=jiffies+HZ/100) && (j==0);) {
920 if (lp->tx_ring[lp->tx_new].status >= 0) j=1;
921 }
922 outl(omr, DE4X5_OMR);
923
924 if (j == 0) {
925 printk("%s: Setup frame timed out, status %08x\n", dev->name,
926 inl(DE4X5_STS));
927 status = -EIO;
928 }
929
930 lp->tx_new = (++lp->tx_new) % lp->txRingSize;
931 lp->tx_old = lp->tx_new;
932
933
934 if (autoconf_media(dev) == 0) {
935 status = -EIO;
936 }
937
938 return 0;
939 }
940
941
942
943
944 static int
945 de4x5_queue_pkt(struct sk_buff *skb, struct device *dev)
946 {
947 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
948 u_long iobase = dev->base_addr;
949 int i, status = 0;
950 s32 imr, omr, sts;
951
952
953
954
955
956
957 if (set_bit(0, (void*)&dev->tbusy) == 0) {
958 cli();
959 de4x5_tx(dev);
960 dev->tbusy = 0;
961 sti();
962 }
963
964
965
966
967
968
969 if (dev->tbusy || (lp->lostMedia > LOST_MEDIA_THRESHOLD)) {
970 u_long tickssofar = jiffies - dev->trans_start;
971 if ((tickssofar < QUEUE_PKT_TIMEOUT) &&
972 (lp->lostMedia <= LOST_MEDIA_THRESHOLD)) {
973 status = -1;
974 } else {
975 if (de4x5_debug >= 1) {
976 printk("%s: transmit timed out, status %08x, tbusy:%ld, lostMedia:%d tickssofar:%ld, resetting.\n",dev->name, inl(DE4X5_STS), dev->tbusy, lp->lostMedia, tickssofar);
977 }
978
979
980 STOP_DE4X5;
981
982
983 for (i=lp->tx_old; i!=lp->tx_new; i=(++i)%lp->txRingSize) {
984 if (lp->skb[i] != NULL) {
985 if (lp->skb[i]->len != FAKE_FRAME_LEN) {
986 if (lp->tx_ring[i].status == T_OWN) {
987 dev_queue_xmit(lp->skb[i], dev, SOPRI_NORMAL);
988 } else {
989 dev_kfree_skb(lp->skb[i], FREE_WRITE);
990 }
991 } else {
992 dev_kfree_skb(lp->skb[i], FREE_WRITE);
993 }
994 lp->skb[i] = NULL;
995 }
996 }
997 if (skb->len != FAKE_FRAME_LEN) {
998 dev_queue_xmit(skb, dev, SOPRI_NORMAL);
999 } else {
1000 dev_kfree_skb(skb, FREE_WRITE);
1001 }
1002
1003
1004 status = de4x5_init(dev);
1005
1006
1007 if (!status) {
1008
1009 dev->interrupt = UNMASK_INTERRUPTS;
1010 dev->start = 1;
1011 dev->tbusy = 0;
1012 dev->trans_start = jiffies;
1013
1014 START_DE4X5;
1015
1016
1017 imr = 0;
1018 UNMASK_IRQs;
1019
1020
1021 sts = inl(DE4X5_STS);
1022 outl(sts, DE4X5_STS);
1023
1024 ENABLE_IRQs;
1025 } else {
1026 printk("%s: hardware initialisation failure, status %08x.\n",
1027 dev->name, inl(DE4X5_STS));
1028 }
1029 }
1030 } else if (skb == NULL) {
1031 dev_tint(dev);
1032 } else if (skb->len == FAKE_FRAME_LEN) {
1033 dev_kfree_skb(skb, FREE_WRITE);
1034 } else if (skb->len > 0) {
1035
1036 if (set_bit(0, (void*)&dev->tbusy) != 0) {
1037 printk("%s: Transmitter access conflict.\n", dev->name);
1038 status = -1;
1039 } else {
1040 cli();
1041 if (TX_BUFFS_AVAIL) {
1042 load_packet(dev, skb->data, TD_IC | TD_LS | TD_FS | skb->len, skb);
1043 if (lp->tx_enable) {
1044 outl(POLL_DEMAND, DE4X5_TPD);
1045 }
1046
1047 lp->tx_new = (++lp->tx_new) % lp->txRingSize;
1048 dev->trans_start = jiffies;
1049
1050 if (TX_BUFFS_AVAIL) {
1051 dev->tbusy = 0;
1052 }
1053 } else {
1054 status = -1;
1055 }
1056 sti();
1057 }
1058 }
1059
1060 return status;
1061 }
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074 static void
1075 de4x5_interrupt(int irq, struct pt_regs *regs)
1076 {
1077 struct device *dev = (struct device *)(irq2dev_map[irq]);
1078 struct de4x5_private *lp;
1079 s32 imr, omr, sts;
1080 u_long iobase;
1081
1082 if (dev == NULL) {
1083 printk ("de4x5_interrupt(): irq %d for unknown device.\n", irq);
1084 } else {
1085 lp = (struct de4x5_private *)dev->priv;
1086 iobase = dev->base_addr;
1087
1088 if (dev->interrupt)
1089 printk("%s: Re-entering the interrupt handler.\n", dev->name);
1090
1091 DISABLE_IRQs;
1092 dev->interrupt = MASK_INTERRUPTS;
1093
1094 while ((sts = inl(DE4X5_STS)) & lp->irq_mask) {
1095 outl(sts, DE4X5_STS);
1096
1097 if (sts & (STS_RI | STS_RU))
1098 de4x5_rx(dev);
1099
1100 if (sts & (STS_TI | STS_TU))
1101 de4x5_tx(dev);
1102
1103 if (sts & STS_TM)
1104 de4x5_ast(dev);
1105
1106 if (sts & STS_LNF) {
1107 lp->lostMedia = LOST_MEDIA_THRESHOLD + 1;
1108 lp->irq_mask &= ~IMR_LFM;
1109 kick_tx(dev);
1110 }
1111
1112 if (sts & STS_SE) {
1113 STOP_DE4X5;
1114 printk("%s: Fatal bus error occured, sts=%#8x, device stopped.\n",
1115 dev->name, sts);
1116 }
1117 }
1118
1119 if (TX_BUFFS_AVAIL && dev->tbusy) {
1120 dev->tbusy = 0;
1121 mark_bh(NET_BH);
1122 }
1123
1124 dev->interrupt = UNMASK_INTERRUPTS;
1125 ENABLE_IRQs;
1126 }
1127
1128 return;
1129 }
1130
1131 static int
1132 de4x5_rx(struct device *dev)
1133 {
1134 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1135 int i, entry;
1136 s32 status;
1137 char *buf;
1138
1139 for (entry = lp->rx_new; lp->rx_ring[entry].status >= 0;entry = lp->rx_new) {
1140 status = lp->rx_ring[entry].status;
1141
1142 if (status & RD_FS) {
1143 lp->rx_old = entry;
1144 }
1145
1146 if (status & RD_LS) {
1147 if (status & RD_ES) {
1148 lp->stats.rx_errors++;
1149 if (status & (RD_RF | RD_TL)) lp->stats.rx_frame_errors++;
1150 if (status & RD_CE) lp->stats.rx_crc_errors++;
1151 if (status & RD_OF) lp->stats.rx_fifo_errors++;
1152 } else {
1153 struct sk_buff *skb;
1154 short pkt_len = (short)(lp->rx_ring[entry].status >> 16) - 4;
1155
1156 if ((skb = dev_alloc_skb(pkt_len+2)) != NULL) {
1157 skb->dev = dev;
1158
1159 skb_reserve(skb,2);
1160 if (entry < lp->rx_old) {
1161 short len = (lp->rxRingSize - lp->rx_old) * RX_BUFF_SZ;
1162 memcpy(skb_put(skb,len), bus_to_virt(lp->rx_ring[lp->rx_old].buf), len);
1163 memcpy(skb_put(skb,pkt_len-len), bus_to_virt(lp->rx_ring[0].buf), pkt_len - len);
1164 } else {
1165 memcpy(skb_put(skb,pkt_len), bus_to_virt(lp->rx_ring[lp->rx_old].buf), pkt_len);
1166 }
1167
1168
1169 skb->protocol=eth_type_trans(skb,dev);
1170 netif_rx(skb);
1171
1172
1173 lp->stats.rx_packets++;
1174 for (i=1; i<DE4X5_PKT_STAT_SZ-1; i++) {
1175 if (pkt_len < (i*DE4X5_PKT_BIN_SZ)) {
1176 lp->pktStats.bins[i]++;
1177 i = DE4X5_PKT_STAT_SZ;
1178 }
1179 }
1180 buf = skb->data;
1181 if (buf[0] & 0x01) {
1182 if ((*(s32 *)&buf[0] == -1) && (*(s16 *)&buf[4] == -1)) {
1183 lp->pktStats.broadcast++;
1184 } else {
1185 lp->pktStats.multicast++;
1186 }
1187 } else if ((*(s32 *)&buf[0] == *(s32 *)&dev->dev_addr[0]) &&
1188 (*(s16 *)&buf[4] == *(s16 *)&dev->dev_addr[4])) {
1189 lp->pktStats.unicast++;
1190 }
1191
1192 lp->pktStats.bins[0]++;
1193 if (lp->pktStats.bins[0] == 0) {
1194 memset((char *)&lp->pktStats, 0, sizeof(lp->pktStats));
1195 }
1196 } else {
1197 printk("%s: Insufficient memory; nuking packet.\n", dev->name);
1198 lp->stats.rx_dropped++;
1199 break;
1200 }
1201 }
1202
1203
1204 for (; lp->rx_old!=entry; lp->rx_old=(++lp->rx_old)%lp->rxRingSize) {
1205 lp->rx_ring[lp->rx_old].status = R_OWN;
1206 barrier();
1207 }
1208 lp->rx_ring[entry].status = R_OWN;
1209 barrier();
1210 }
1211
1212
1213
1214
1215 lp->rx_new = (++lp->rx_new) % lp->rxRingSize;
1216 }
1217
1218 return 0;
1219 }
1220
1221
1222
1223
1224 static int
1225 de4x5_tx(struct device *dev)
1226 {
1227 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1228 u_long iobase = dev->base_addr;
1229 int entry;
1230 s32 status;
1231
1232 for (entry = lp->tx_old; entry != lp->tx_new; entry = lp->tx_old) {
1233 status = lp->tx_ring[entry].status;
1234 if (status < 0) {
1235 break;
1236 } else if (status & TD_ES) {
1237 lp->stats.tx_errors++;
1238 if (status & TD_NC) lp->stats.tx_carrier_errors++;
1239 if (status & TD_LC) lp->stats.tx_window_errors++;
1240 if (status & TD_UF) lp->stats.tx_fifo_errors++;
1241 if (status & TD_LC) lp->stats.collisions++;
1242 if (status & TD_EC) lp->pktStats.excessive_collisions++;
1243 if (status & TD_DE) lp->stats.tx_aborted_errors++;
1244
1245 if ((status != 0x7fffffff) &&
1246 (status & (TD_LO | TD_NC | TD_EC | TD_LF))) {
1247 lp->lostMedia++;
1248 if (lp->lostMedia > LOST_MEDIA_THRESHOLD) {
1249 kick_tx(dev);
1250 }
1251 } else {
1252 outl(POLL_DEMAND, DE4X5_TPD);
1253 }
1254 } else {
1255 lp->stats.tx_packets++;
1256 lp->lostMedia = 0;
1257 }
1258
1259 if (lp->skb[entry] != NULL) {
1260 dev_kfree_skb(lp->skb[entry], FREE_WRITE);
1261 lp->skb[entry] = NULL;
1262 }
1263
1264
1265 lp->tx_old = (++lp->tx_old) % lp->txRingSize;
1266 }
1267
1268 return 0;
1269 }
1270
1271 static int
1272 de4x5_ast(struct device *dev)
1273 {
1274 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1275 u_long iobase = dev->base_addr;
1276 s32 gep;
1277
1278 disable_ast(dev);
1279
1280 if (lp->chipset == DC21140) {
1281 gep = inl(DE4X5_GEP);
1282 if (((lp->media == _100Mb) && (gep & GEP_SLNK)) ||
1283 ((lp->media == _10Mb) && (gep & GEP_LNP)) ||
1284 ((lp->media == _10Mb) && !(gep & GEP_SLNK)) ||
1285 (lp->media == NC)) {
1286 if (lp->linkProb || ((lp->media == NC) && (!(gep & GEP_LNP)))) {
1287 lp->lostMedia = LOST_MEDIA_THRESHOLD + 1;
1288 lp->linkProb = 0;
1289 kick_tx(dev);
1290 } else {
1291 switch(lp->media) {
1292 case NC:
1293 lp->linkProb = 0;
1294 enable_ast(dev, DE4X5_AUTOSENSE_MS);
1295 break;
1296
1297 case _10Mb:
1298 lp->linkProb = 1;
1299 enable_ast(dev, 1500);
1300 break;
1301
1302 case _100Mb:
1303 lp->linkProb = 1;
1304 enable_ast(dev, 4000);
1305 break;
1306 }
1307 }
1308 } else {
1309 lp->linkProb = 0;
1310 enable_ast(dev, DE4X5_AUTOSENSE_MS);
1311 }
1312 }
1313
1314 return 0;
1315 }
1316
1317 static int
1318 de4x5_close(struct device *dev)
1319 {
1320 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1321 u_long iobase = dev->base_addr;
1322 s32 imr, omr;
1323
1324 dev->start = 0;
1325 dev->tbusy = 1;
1326
1327 if (de4x5_debug > 1) {
1328 printk("%s: Shutting down ethercard, status was %8.8x.\n",
1329 dev->name, inl(DE4X5_STS));
1330 }
1331
1332
1333
1334
1335 DISABLE_IRQs;
1336
1337 STOP_DE4X5;
1338
1339
1340
1341
1342 free_irq(dev->irq);
1343 irq2dev_map[dev->irq] = 0;
1344
1345 MOD_DEC_USE_COUNT;
1346
1347
1348 if (lp->chipset == DC21041) {
1349 outl(0, DE4X5_SICR);
1350 outl(CFDA_PSM, PCI_CFDA);
1351 }
1352
1353 return 0;
1354 }
1355
1356 static struct enet_statistics *
1357 de4x5_get_stats(struct device *dev)
1358 {
1359 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1360 u_long iobase = dev->base_addr;
1361
1362 lp->stats.rx_missed_errors = (int) (inl(DE4X5_MFC) & (MFC_OVFL | MFC_CNTR));
1363
1364 return &lp->stats;
1365 }
1366
1367 static void load_packet(struct device *dev, char *buf, u32 flags, struct sk_buff *skb)
1368 {
1369 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1370
1371 lp->tx_ring[lp->tx_new].buf = virt_to_bus(buf);
1372 lp->tx_ring[lp->tx_new].des1 &= TD_TER;
1373 lp->tx_ring[lp->tx_new].des1 |= flags;
1374 lp->skb[lp->tx_new] = skb;
1375 barrier();
1376 lp->tx_ring[lp->tx_new].status = T_OWN;
1377 barrier();
1378
1379 return;
1380 }
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391 static void
1392 set_multicast_list(struct device *dev, int num_addrs, void *addrs)
1393 {
1394 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1395 u_long iobase = dev->base_addr;
1396
1397
1398 if (irq2dev_map[dev->irq] != NULL) {
1399 if (num_addrs >= 0) {
1400 SetMulticastFilter(dev, num_addrs, (char *)addrs);
1401 if (lp->setup_f == HASH_PERF) {
1402 load_packet(dev, lp->setup_frame, TD_IC | HASH_F | TD_SET |
1403 SETUP_FRAME_LEN, NULL);
1404 } else {
1405 load_packet(dev, lp->setup_frame, TD_IC | PERFECT_F | TD_SET |
1406 SETUP_FRAME_LEN, NULL);
1407 }
1408
1409 lp->tx_new = (++lp->tx_new) % lp->txRingSize;
1410 outl(POLL_DEMAND, DE4X5_TPD);
1411 dev->trans_start = jiffies;
1412 } else {
1413 u32 omr;
1414 omr = inl(DE4X5_OMR);
1415 omr |= OMR_PR;
1416 outl(omr, DE4X5_OMR);
1417 }
1418 }
1419
1420 return;
1421 }
1422
1423
1424
1425
1426
1427
1428 static void SetMulticastFilter(struct device *dev, int num_addrs, char *addrs)
1429 {
1430 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1431 u_long iobase = dev->base_addr;
1432 int i, j, bit, byte;
1433 u16 hashcode;
1434 u32 omr, crc, poly = CRC_POLYNOMIAL_LE;
1435 char *pa;
1436
1437 omr = inl(DE4X5_OMR);
1438 pa = build_setup_frame(dev, ALL);
1439
1440 if (lp->setup_f == HASH_PERF) {
1441 if (num_addrs == HASH_TABLE_LEN) {
1442 omr |= OMR_PM;
1443 } else {
1444 omr &= ~OMR_PM;
1445
1446 for (i=0;i<num_addrs;i++) {
1447 if ((*addrs & 0x01) == 1) {
1448 crc = 0xffffffff;
1449 for (byte=0;byte<ETH_ALEN;byte++) {
1450
1451 for (bit = *addrs++,j=0;j<8;j++, bit>>=1) {
1452 crc = (crc >> 1) ^ (((crc ^ bit) & 0x01) ? poly : 0);
1453 }
1454 }
1455 hashcode = crc & HASH_BITS;
1456
1457 byte = hashcode >> 3;
1458 bit = 1 << (hashcode & 0x07);
1459
1460 byte <<= 1;
1461 if (byte & 0x02) {
1462 byte -= 1;
1463 }
1464 lp->setup_frame[byte] |= bit;
1465
1466 } else {
1467 addrs += ETH_ALEN;
1468 }
1469 }
1470 }
1471 } else {
1472 omr &= ~OMR_PM;
1473 for (j=0; j<num_addrs; j++) {
1474 for (i=0; i<ETH_ALEN; i++) {
1475 *(pa + (i&1)) = *addrs++;
1476 if (i & 0x01) pa += 4;
1477 }
1478 }
1479 }
1480
1481 if (num_addrs == 0)
1482 omr &= ~OMR_PR;
1483 outl(omr, DE4X5_OMR);
1484
1485 return;
1486 }
1487
1488
1489
1490
1491
1492 static void eisa_probe(struct device *dev, u_long ioaddr)
1493 {
1494 int i, maxSlots, status;
1495 u_short vendor, device;
1496 s32 cfid;
1497 u_long iobase;
1498 struct bus_type *lp = &bus;
1499 char name[DE4X5_STRLEN];
1500
1501 if (!ioaddr && autoprobed) return ;
1502 if ((ioaddr < 0x1000) && (ioaddr > 0)) return;
1503
1504 lp->bus = EISA;
1505
1506 if (ioaddr == 0) {
1507 iobase = EISA_SLOT_INC;
1508 i = 1;
1509 maxSlots = MAX_EISA_SLOTS;
1510 } else {
1511 iobase = ioaddr;
1512 i = (ioaddr >> 12);
1513 maxSlots = i + 1;
1514 }
1515
1516 for (status = -ENODEV; (i<maxSlots) && (dev!=NULL); i++, iobase+=EISA_SLOT_INC) {
1517 if (EISA_signature(name, EISA_ID)) {
1518 cfid = inl(PCI_CFID);
1519 device = (u_short)(cfid >> 16);
1520 vendor = (u_short) cfid;
1521
1522 lp->bus = EISA;
1523 lp->chipset = device;
1524 if (DevicePresent(EISA_APROM) == 0) {
1525
1526 outl(PCI_COMMAND_IO | PCI_COMMAND_MASTER, PCI_CFCS);
1527 outl(0x00004000, PCI_CFLT);
1528 outl(iobase, PCI_CBIO);
1529
1530 if (check_region(iobase, DE4X5_EISA_TOTAL_SIZE) == 0) {
1531 if ((dev = alloc_device(dev, iobase)) != NULL) {
1532 if ((status = de4x5_hw_init(dev, iobase)) == 0) {
1533 num_de4x5s++;
1534 }
1535 num_eth++;
1536 }
1537 } else if (autoprobed) {
1538 printk("%s: region already allocated at 0x%04lx.\n", dev->name, iobase);
1539 }
1540 }
1541 }
1542 }
1543
1544 return;
1545 }
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559 #define PCI_DEVICE (dev_num << 3)
1560 #define PCI_LAST_DEV 32
1561
1562 static void pci_probe(struct device *dev, u_long ioaddr)
1563 {
1564 u_char irq;
1565 u_char pb, pbus, dev_num, dnum, dev_fn;
1566 u_short vendor, device, index, status;
1567 u_int class = DE4X5_CLASS_CODE;
1568 u_int iobase;
1569 struct bus_type *lp = &bus;
1570
1571 if (!ioaddr && autoprobed) return ;
1572
1573 if (pcibios_present()) {
1574 lp->bus = PCI;
1575
1576 if (ioaddr < 0x1000) {
1577 pbus = (u_short)(ioaddr >> 8);
1578 dnum = (u_short)(ioaddr & 0xff);
1579 } else {
1580 pbus = 0;
1581 dnum = 0;
1582 }
1583
1584 for (index=0;
1585 (pcibios_find_class(class, index, &pb, &dev_fn)!= PCIBIOS_DEVICE_NOT_FOUND);
1586 index++) {
1587 dev_num = PCI_SLOT(dev_fn);
1588
1589 if ((!pbus && !dnum) || ((pbus == pb) && (dnum == dev_num))) {
1590 pcibios_read_config_word(pb, PCI_DEVICE, PCI_VENDOR_ID, &vendor);
1591 pcibios_read_config_word(pb, PCI_DEVICE, PCI_DEVICE_ID, &device);
1592 if (is_DC21040 || is_DC21041 || is_DC21140) {
1593
1594 lp->device = dev_num;
1595 lp->bus_num = pb;
1596
1597
1598 lp->chipset = device;
1599
1600
1601 pcibios_read_config_dword(pb, PCI_DEVICE, PCI_BASE_ADDRESS_0, &iobase);
1602 iobase &= CBIO_MASK;
1603
1604
1605 pcibios_read_config_byte(pb, PCI_DEVICE, PCI_INTERRUPT_LINE, &irq);
1606
1607
1608 pcibios_read_config_word(pb, PCI_DEVICE, PCI_COMMAND, &status);
1609 if (status & PCI_COMMAND_IO) {
1610 if (!(status & PCI_COMMAND_MASTER)) {
1611 status |= PCI_COMMAND_MASTER;
1612 pcibios_write_config_word(pb, PCI_DEVICE, PCI_COMMAND, status);
1613 pcibios_read_config_word(pb, PCI_DEVICE, PCI_COMMAND, &status);
1614 }
1615 if (status & PCI_COMMAND_MASTER) {
1616 if ((DevicePresent(DE4X5_APROM) == 0) || is_not_dec) {
1617 if (check_region(iobase, DE4X5_PCI_TOTAL_SIZE) == 0) {
1618 if ((dev = alloc_device(dev, iobase)) != NULL) {
1619 dev->irq = irq;
1620 if ((status = de4x5_hw_init(dev, iobase)) == 0) {
1621 num_de4x5s++;
1622 }
1623 num_eth++;
1624 }
1625 } else if (autoprobed) {
1626 printk("%s: region already allocated at 0x%04x.\n", dev->name, (u_short)iobase);
1627 }
1628 }
1629 }
1630 }
1631 }
1632 }
1633 }
1634 }
1635
1636 return;
1637 }
1638
1639
1640
1641
1642
1643 static struct device *alloc_device(struct device *dev, u_long iobase)
1644 {
1645 int addAutoProbe = 0;
1646 struct device *tmp = NULL, *ret;
1647 int (*init)(struct device *) = NULL;
1648
1649
1650
1651
1652 if (!loading_module) {
1653 while (dev->next != NULL) {
1654 if ((dev->base_addr == DE4X5_NDA) || (dev->base_addr == 0)) break;
1655 dev = dev->next;
1656 num_eth++;
1657 }
1658
1659
1660
1661
1662
1663 if ((dev->base_addr == 0) && (num_de4x5s > 0)) {
1664 addAutoProbe++;
1665 tmp = dev->next;
1666 init = dev->init;
1667 }
1668
1669
1670
1671
1672
1673 if ((dev->next == NULL) &&
1674 !((dev->base_addr == DE4X5_NDA) || (dev->base_addr == 0))){
1675 dev->next = (struct device *)kmalloc(sizeof(struct device) + 8,
1676 GFP_KERNEL);
1677
1678 dev = dev->next;
1679 if (dev == NULL) {
1680 printk("eth%d: Device not initialised, insufficient memory\n",
1681 num_eth);
1682 } else {
1683
1684
1685
1686
1687
1688 dev->name = (char *)(dev + sizeof(struct device));
1689 if (num_eth > 9999) {
1690 sprintf(dev->name,"eth????");
1691 } else {
1692 sprintf(dev->name,"eth%d", num_eth);
1693 }
1694 dev->base_addr = iobase;
1695 dev->next = NULL;
1696 dev->init = &de4x5_probe;
1697 num_de4x5s++;
1698 }
1699 }
1700 ret = dev;
1701
1702
1703
1704
1705
1706 if (ret != NULL) {
1707 if (addAutoProbe) {
1708 for (; (tmp->next!=NULL) && (tmp->base_addr!=DE4X5_NDA); tmp=tmp->next);
1709
1710
1711
1712
1713
1714 if ((tmp->next == NULL) && !(tmp->base_addr == DE4X5_NDA)) {
1715 tmp->next = (struct device *)kmalloc(sizeof(struct device) + 8,
1716 GFP_KERNEL);
1717 tmp = tmp->next;
1718 if (tmp == NULL) {
1719 printk("%s: Insufficient memory to extend the device list.\n",
1720 dev->name);
1721 } else {
1722
1723
1724
1725
1726
1727 tmp->name = (char *)(tmp + sizeof(struct device));
1728 if (num_eth > 9999) {
1729 sprintf(tmp->name,"eth????");
1730 } else {
1731 sprintf(tmp->name,"eth%d", num_eth);
1732 }
1733 tmp->base_addr = 0;
1734 tmp->next = NULL;
1735 tmp->init = init;
1736 }
1737 } else {
1738 tmp->base_addr = 0;
1739 }
1740 }
1741 }
1742 } else {
1743 ret = dev;
1744 }
1745
1746 return ret;
1747 }
1748
1749
1750
1751
1752
1753
1754
1755 static int autoconf_media(struct device *dev)
1756 {
1757 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1758 u_long iobase = dev->base_addr;
1759
1760 lp->tx_enable = YES;
1761 if (de4x5_debug > 0 ) {
1762 if (lp->chipset != DC21140) {
1763 printk("%s: Searching for media... ",dev->name);
1764 } else {
1765 printk("%s: Searching for mode... ",dev->name);
1766 }
1767 }
1768
1769 if (lp->chipset == DC21040) {
1770 lp->media = (lp->autosense == AUTO ? TP : lp->autosense);
1771 dc21040_autoconf(dev);
1772 } else if (lp->chipset == DC21041) {
1773 lp->media = (lp->autosense == AUTO ? TP_NW : lp->autosense);
1774 dc21041_autoconf(dev);
1775 } else if (lp->chipset == DC21140) {
1776 disable_ast(dev);
1777 lp->media = (lp->autosense == AUTO ? _10Mb : lp->autosense);
1778 dc21140_autoconf(dev);
1779 }
1780
1781 if (de4x5_debug > 0 ) {
1782 if (lp->chipset != DC21140) {
1783 printk("media is %s\n", (lp->media == NC ? "unconnected!" :
1784 (lp->media == TP ? "TP." :
1785 (lp->media == ANS ? "TP/Nway." :
1786 (lp->media == BNC ? "BNC." :
1787 (lp->media == AUI ? "AUI." :
1788 "BNC/AUI."
1789 ))))));
1790 } else {
1791 printk("mode is %s\n",(lp->media == NC ? "link down.":
1792 (lp->media == _100Mb ? "100Mb/s." :
1793 (lp->media == _10Mb ? "10Mb/s." :
1794 "\?\?\?"
1795 ))));
1796 }
1797 }
1798
1799 if (lp->media) {
1800 lp->lostMedia = 0;
1801 inl(DE4X5_MFC);
1802 if ((lp->media == TP) || (lp->media == ANS)) {
1803 lp->irq_mask |= IMR_LFM;
1804 }
1805 }
1806 dce_ms_delay(10);
1807
1808 return (lp->media);
1809 }
1810
1811 static void dc21040_autoconf(struct device *dev)
1812 {
1813 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1814 u_long iobase = dev->base_addr;
1815 int i, linkBad;
1816 s32 sisr = 0, t_3s = 3000;
1817
1818 switch (lp->media) {
1819 case TP:
1820 reset_init_sia(dev, 0x8f01, 0xffff, 0x0000);
1821 for (linkBad=1,i=0;(i<t_3s) && linkBad && !(sisr & SISR_NCR);i++) {
1822 if (((sisr = inl(DE4X5_SISR)) & SISR_LKF) == 0) linkBad = 0;
1823 dce_ms_delay(1);
1824 }
1825 if (linkBad && (lp->autosense == AUTO)) {
1826 lp->media = BNC_AUI;
1827 dc21040_autoconf(dev);
1828 }
1829 break;
1830
1831 case BNC:
1832 case AUI:
1833 case BNC_AUI:
1834 reset_init_sia(dev, 0x8f09, 0x0705, 0x0006);
1835 dce_ms_delay(500);
1836 linkBad = ping_media(dev);
1837 if (linkBad && (lp->autosense == AUTO)) {
1838 lp->media = NC;
1839 dc21040_autoconf(dev);
1840 }
1841 break;
1842
1843 case NC:
1844 #ifdef i386
1845 reset_init_sia(dev, 0x8f01, 0xffff, 0x0000);
1846 break;
1847 #else
1848
1849 reset_init_sia(dev, 0x8f09, 0x0705, 0x0006);
1850 #endif
1851 }
1852
1853 return;
1854 }
1855
1856
1857
1858
1859
1860
1861
1862 static void dc21041_autoconf(struct device *dev)
1863 {
1864 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1865 u_long iobase = dev->base_addr;
1866 s32 sts, irqs, irq_mask, omr;
1867
1868 switch (lp->media) {
1869 case TP_NW:
1870 omr = inl(DE4X5_OMR);
1871 outl(omr | OMR_FD, DE4X5_OMR);
1872 irqs = STS_LNF | STS_LNP;
1873 irq_mask = IMR_LFM | IMR_LPM;
1874 sts = test_media(dev, irqs, irq_mask, 0xef01, 0xffff, 0x0008, 2400);
1875 if (sts & STS_LNP) {
1876 lp->media = ANS;
1877 } else {
1878 lp->media = AUI;
1879 }
1880 dc21041_autoconf(dev);
1881 break;
1882
1883 case ANS:
1884 irqs = STS_LNP;
1885 irq_mask = IMR_LPM;
1886 sts = test_ans(dev, irqs, irq_mask, 3000);
1887 if (!(sts & STS_LNP) && (lp->autosense == AUTO)) {
1888 lp->media = TP;
1889 dc21041_autoconf(dev);
1890 }
1891 break;
1892
1893 case TP:
1894 omr = inl(DE4X5_OMR);
1895 outl(omr & ~OMR_FD, DE4X5_OMR);
1896 irqs = STS_LNF | STS_LNP;
1897 irq_mask = IMR_LFM | IMR_LPM;
1898 sts = test_media(dev, irqs, irq_mask, 0xef01, 0xff3f, 0x0008, 2400);
1899 if (!(sts & STS_LNP) && (lp->autosense == AUTO)) {
1900 if (inl(DE4X5_SISR) & SISR_NRA) {
1901 lp->media = AUI;
1902 } else {
1903 lp->media = BNC;
1904 }
1905 dc21041_autoconf(dev);
1906 }
1907 break;
1908
1909 case AUI:
1910 omr = inl(DE4X5_OMR);
1911 outl(omr & ~OMR_FD, DE4X5_OMR);
1912 irqs = 0;
1913 irq_mask = 0;
1914 sts = test_media(dev, irqs, irq_mask, 0xef09, 0xf7fd, 0x000e, 1000);
1915 if (!(inl(DE4X5_SISR) & SISR_SRA) && (lp->autosense == AUTO)) {
1916 lp->media = BNC;
1917 dc21041_autoconf(dev);
1918 }
1919 break;
1920
1921 case BNC:
1922 omr = inl(DE4X5_OMR);
1923 outl(omr & ~OMR_FD, DE4X5_OMR);
1924 irqs = 0;
1925 irq_mask = 0;
1926 sts = test_media(dev, irqs, irq_mask, 0xef09, 0xf7fd, 0x0006, 1000);
1927 if (!(inl(DE4X5_SISR) & SISR_SRA) && (lp->autosense == AUTO)) {
1928 lp->media = NC;
1929 } else {
1930 if (ping_media(dev)) lp->media = NC;
1931 }
1932 break;
1933
1934 case NC:
1935 omr = inl(DE4X5_OMR);
1936 outl(omr | OMR_FD, DE4X5_OMR);
1937 reset_init_sia(dev, 0xef01, 0xffff, 0x0008);
1938 break;
1939 }
1940
1941 return;
1942 }
1943
1944
1945
1946
1947 static void dc21140_autoconf(struct device *dev)
1948 {
1949 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1950 u_long iobase = dev->base_addr;
1951 s32 omr;
1952
1953 switch(lp->media) {
1954 case _100Mb:
1955 omr = (inl(DE4X5_OMR) & ~(OMR_PS | OMR_HBD | OMR_TTM | OMR_PCS | OMR_SCR));
1956 omr |= (de4x5_full_duplex ? OMR_FD : 0);
1957 outl(omr | OMR_PS | OMR_HBD | OMR_PCS | OMR_SCR, DE4X5_OMR);
1958 outl(GEP_FDXD | GEP_MODE, DE4X5_GEP);
1959 break;
1960
1961 case _10Mb:
1962 omr = (inl(DE4X5_OMR) & ~(OMR_PS | OMR_HBD | OMR_TTM | OMR_PCS | OMR_SCR));
1963 omr |= (de4x5_full_duplex ? OMR_FD : 0);
1964 outl(omr | OMR_TTM, DE4X5_OMR);
1965 outl(GEP_FDXD, DE4X5_GEP);
1966 break;
1967 }
1968
1969 return;
1970 }
1971
1972 static int
1973 test_media(struct device *dev, s32 irqs, s32 irq_mask, s32 csr13, s32 csr14, s32 csr15, s32 msec)
1974 {
1975 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1976 u_long iobase = dev->base_addr;
1977 s32 sts, time, csr12;
1978
1979 reset_init_sia(dev, csr13, csr14, csr15);
1980
1981
1982 load_ms_timer(dev, msec);
1983
1984
1985 sts = inl(DE4X5_STS);
1986 outl(sts, DE4X5_STS);
1987
1988
1989 csr12 = inl(DE4X5_SISR);
1990 outl(csr12, DE4X5_SISR);
1991
1992
1993 do {
1994 time = inl(DE4X5_GPT) & GPT_VAL;
1995 sts = inl(DE4X5_STS);
1996 } while ((time != 0) && !(sts & irqs));
1997
1998 sts = inl(DE4X5_STS);
1999
2000 return sts;
2001 }
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025 static int ping_media(struct device *dev)
2026 {
2027 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2028 u_long iobase = dev->base_addr;
2029 int i, entry, linkBad;
2030 s32 omr, t_3s = 4000;
2031 char frame[64];
2032
2033 create_packet(dev, frame, sizeof(frame));
2034
2035 entry = lp->tx_new;
2036 load_packet(dev, frame, TD_LS | TD_FS | sizeof(frame),NULL);
2037
2038 omr = inl(DE4X5_OMR);
2039 outl(omr|OMR_ST, DE4X5_OMR);
2040
2041 lp->tx_new = (++lp->tx_new) % lp->txRingSize;
2042 lp->tx_old = lp->tx_new;
2043
2044
2045 for (linkBad=1,i=0;(i<t_3s) && linkBad;i++) {
2046 if ((inl(DE4X5_SISR) & SISR_NCR) == 1) break;
2047 if (lp->tx_ring[entry].status >= 0) linkBad=0;
2048 dce_ms_delay(1);
2049 }
2050 outl(omr, DE4X5_OMR);
2051
2052 return ((linkBad || (lp->tx_ring[entry].status & TD_ES)) ? 1 : 0);
2053 }
2054
2055
2056
2057
2058
2059 static int test_ans(struct device *dev, s32 irqs, s32 irq_mask, s32 msec)
2060 {
2061 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2062 u_long iobase = dev->base_addr;
2063 s32 sts, ans;
2064
2065 outl(irq_mask, DE4X5_IMR);
2066
2067
2068 load_ms_timer(dev, msec);
2069
2070
2071 sts = inl(DE4X5_STS);
2072 outl(sts, DE4X5_STS);
2073
2074
2075 do {
2076 ans = inl(DE4X5_SISR) & SISR_ANS;
2077 sts = inl(DE4X5_STS);
2078 } while (!(sts & irqs) && (ans ^ ANS_NWOK) != 0);
2079
2080 return ((sts & STS_LNP) && ((ans ^ ANS_NWOK) == 0) ? STS_LNP : 0);
2081 }
2082
2083
2084
2085
2086 static void reset_init_sia(struct device *dev, s32 sicr, s32 strr, s32 sigr)
2087 {
2088 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2089 u_long iobase = dev->base_addr;
2090
2091 RESET_SIA;
2092 outl(sigr, DE4X5_SIGR);
2093 outl(strr, DE4X5_STRR);
2094 outl(sicr, DE4X5_SICR);
2095
2096 return;
2097 }
2098
2099
2100
2101
2102 static void load_ms_timer(struct device *dev, u32 msec)
2103 {
2104 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2105 u_long iobase = dev->base_addr;
2106 s32 i = 2048, j;
2107
2108 if (lp->chipset == DC21140) {
2109 j = inl(DE4X5_OMR);
2110 if ((j & OMR_TTM) && (j & OMR_PS)) {
2111 i = 8192;
2112 } else if ((~j & OMR_TTM) && (j & OMR_PS)) {
2113 i = 819;
2114 }
2115 }
2116
2117 outl((s32)(msec * 10000)/i, DE4X5_GPT);
2118
2119 return;
2120 }
2121
2122
2123
2124
2125 static void create_packet(struct device *dev, char *frame, int len)
2126 {
2127 int i;
2128 char *buf = frame;
2129
2130 for (i=0; i<ETH_ALEN; i++) {
2131 *buf++ = dev->dev_addr[i];
2132 }
2133 for (i=0; i<ETH_ALEN; i++) {
2134 *buf++ = dev->dev_addr[i];
2135 }
2136
2137 *buf++ = 0;
2138 *buf++ = 1;
2139
2140 return;
2141 }
2142
2143
2144
2145
2146 static void dce_us_delay(u32 usec)
2147 {
2148 udelay(usec);
2149
2150 return;
2151 }
2152
2153
2154
2155
2156 static void dce_ms_delay(u32 msec)
2157 {
2158 u_int i;
2159
2160 for (i=0; i<msec; i++) {
2161 dce_us_delay(1000);
2162 }
2163
2164 return;
2165 }
2166
2167
2168
2169
2170
2171 static int EISA_signature(char *name, s32 eisa_id)
2172 {
2173 u_int i;
2174 const char *signatures[] = DE4X5_SIGNATURE;
2175 char ManCode[DE4X5_STRLEN];
2176 union {
2177 s32 ID;
2178 char Id[4];
2179 } Eisa;
2180 int status = 0;
2181
2182 *name = '\0';
2183 Eisa.ID = inl(eisa_id);
2184
2185 ManCode[0]=(((Eisa.Id[0]>>2)&0x1f)+0x40);
2186 ManCode[1]=(((Eisa.Id[1]&0xe0)>>5)+((Eisa.Id[0]&0x03)<<3)+0x40);
2187 ManCode[2]=(((Eisa.Id[2]>>4)&0x0f)+0x30);
2188 ManCode[3]=((Eisa.Id[2]&0x0f)+0x30);
2189 ManCode[4]=(((Eisa.Id[3]>>4)&0x0f)+0x30);
2190 ManCode[5]='\0';
2191
2192 for (i=0;(*signatures[i] != '\0') && (*name == '\0');i++) {
2193 if (strstr(ManCode, signatures[i]) != NULL) {
2194 strcpy(name,ManCode);
2195 status = 1;
2196 }
2197 }
2198
2199 return status;
2200 }
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214 static int DevicePresent(u_long aprom_addr)
2215 {
2216 union {
2217 struct {
2218 u32 a;
2219 u32 b;
2220 } llsig;
2221 char Sig[sizeof(u32) << 1];
2222 } dev;
2223 char data;
2224 int i, j, tmp, status = 0;
2225 short sigLength;
2226 struct bus_type *lp = &bus;
2227
2228 dev.llsig.a = ETH_PROM_SIG;
2229 dev.llsig.b = ETH_PROM_SIG;
2230 sigLength = sizeof(u32) << 1;
2231
2232 if (lp->chipset == DC21040) {
2233 for (i=0,j=0;(j<sigLength) && (i<PROBE_LENGTH+sigLength-1);i++) {
2234 if (lp->bus == PCI) {
2235 while ((tmp = inl(aprom_addr)) < 0);
2236 data = (char)tmp;
2237 } else {
2238 data = inb(aprom_addr);
2239 }
2240 if (dev.Sig[j] == data) {
2241 j++;
2242 } else {
2243 if (data == dev.Sig[0]) {
2244 j=1;
2245 } else {
2246 j=0;
2247 }
2248 }
2249 }
2250
2251 if (j!=sigLength) {
2252 status = -ENODEV;
2253 }
2254
2255 } else {
2256 short *p = (short *)&lp->srom;
2257 for (i=0; i<(sizeof(struct de4x5_srom)>>1); i++) {
2258 *p++ = srom_rd(aprom_addr, i);
2259 }
2260 }
2261
2262 return status;
2263 }
2264
2265 static int get_hw_addr(struct device *dev)
2266 {
2267 u_long iobase = dev->base_addr;
2268 int i, k, tmp, status = 0;
2269 u_short j,chksum;
2270 struct bus_type *lp = &bus;
2271
2272 for (i=0,k=0,j=0;j<3;j++) {
2273 k <<= 1 ;
2274 if (k > 0xffff) k-=0xffff;
2275
2276 if (lp->bus == PCI) {
2277 if (lp->chipset == DC21040) {
2278 while ((tmp = inl(DE4X5_APROM)) < 0);
2279 k += (u_char) tmp;
2280 dev->dev_addr[i++] = (u_char) tmp;
2281 while ((tmp = inl(DE4X5_APROM)) < 0);
2282 k += (u_short) (tmp << 8);
2283 dev->dev_addr[i++] = (u_char) tmp;
2284 } else {
2285 dev->dev_addr[i] = (u_char) lp->srom.ieee_addr[i]; i++;
2286 dev->dev_addr[i] = (u_char) lp->srom.ieee_addr[i]; i++;
2287 }
2288 } else {
2289 k += (u_char) (tmp = inb(EISA_APROM));
2290 dev->dev_addr[i++] = (u_char) tmp;
2291 k += (u_short) ((tmp = inb(EISA_APROM)) << 8);
2292 dev->dev_addr[i++] = (u_char) tmp;
2293 }
2294
2295 if (k > 0xffff) k-=0xffff;
2296 }
2297 if (k == 0xffff) k=0;
2298
2299 if (lp->bus == PCI) {
2300 if (lp->chipset == DC21040) {
2301 while ((tmp = inl(DE4X5_APROM)) < 0);
2302 chksum = (u_char) tmp;
2303 while ((tmp = inl(DE4X5_APROM)) < 0);
2304 chksum |= (u_short) (tmp << 8);
2305 if (k != chksum) status = -1;
2306 }
2307 } else {
2308 chksum = (u_char) inb(EISA_APROM);
2309 chksum |= (u_short) (inb(EISA_APROM) << 8);
2310 if (k != chksum) status = -1;
2311 }
2312
2313
2314 return status;
2315 }
2316
2317
2318
2319
2320 static short srom_rd(u_long addr, u_char offset)
2321 {
2322 sendto_srom(SROM_RD | SROM_SR, addr);
2323
2324 srom_latch(SROM_RD | SROM_SR | DT_CS, addr);
2325 srom_command(SROM_RD | SROM_SR | DT_IN | DT_CS, addr);
2326 srom_address(SROM_RD | SROM_SR | DT_CS, addr, offset);
2327
2328 return srom_data(SROM_RD | SROM_SR | DT_CS, addr);
2329 }
2330
2331 static void srom_latch(u_int command, u_long addr)
2332 {
2333 sendto_srom(command, addr);
2334 sendto_srom(command | DT_CLK, addr);
2335 sendto_srom(command, addr);
2336
2337 return;
2338 }
2339
2340 static void srom_command(u_int command, u_long addr)
2341 {
2342 srom_latch(command, addr);
2343 srom_latch(command, addr);
2344 srom_latch((command & 0x0000ff00) | DT_CS, addr);
2345
2346 return;
2347 }
2348
2349 static void srom_address(u_int command, u_long addr, u_char offset)
2350 {
2351 int i;
2352 char a;
2353
2354 a = (char)(offset << 2);
2355 for (i=0; i<6; i++, a <<= 1) {
2356 srom_latch(command | ((a < 0) ? DT_IN : 0), addr);
2357 }
2358 dce_us_delay(1);
2359
2360 i = (getfrom_srom(addr) >> 3) & 0x01;
2361 if (i != 0) {
2362 printk("Bad SROM address phase.....\n");
2363
2364 }
2365
2366 return;
2367 }
2368
2369 static short srom_data(u_int command, u_long addr)
2370 {
2371 int i;
2372 short word = 0;
2373 s32 tmp;
2374
2375 for (i=0; i<16; i++) {
2376 sendto_srom(command | DT_CLK, addr);
2377 tmp = getfrom_srom(addr);
2378 sendto_srom(command, addr);
2379
2380 word = (word << 1) | ((tmp >> 3) & 0x01);
2381 }
2382
2383 sendto_srom(command & 0x0000ff00, addr);
2384
2385 return word;
2386 }
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403 static void sendto_srom(u_int command, u_long addr)
2404 {
2405 outl(command, addr);
2406 dce_us_delay(1);
2407
2408 return;
2409 }
2410
2411 static int getfrom_srom(u_long addr)
2412 {
2413 s32 tmp;
2414
2415 tmp = inl(addr);
2416 dce_us_delay(1);
2417
2418 return tmp;
2419 }
2420
2421 static char *build_setup_frame(struct device *dev, int mode)
2422 {
2423 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2424 int i;
2425 char *pa = lp->setup_frame;
2426
2427
2428 if (mode == ALL) {
2429 memset(lp->setup_frame, 0, SETUP_FRAME_LEN);
2430 }
2431
2432 if (lp->setup_f == HASH_PERF) {
2433 for (pa=lp->setup_frame+IMPERF_PA_OFFSET, i=0; i<ETH_ALEN; i++) {
2434 *(pa + i) = dev->dev_addr[i];
2435 if (i & 0x01) pa += 2;
2436 }
2437 *(lp->setup_frame + (HASH_TABLE_LEN >> 3) - 3) = 0x80;
2438 } else {
2439 for (i=0; i<ETH_ALEN; i++) {
2440 *(pa + (i&1)) = dev->dev_addr[i];
2441 if (i & 0x01) pa += 4;
2442 }
2443 for (i=0; i<ETH_ALEN; i++) {
2444 *(pa + (i&1)) = (char) 0xff;
2445 if (i & 0x01) pa += 4;
2446 }
2447 }
2448
2449 return pa;
2450 }
2451
2452 static void enable_ast(struct device *dev, u32 time_out)
2453 {
2454 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2455 u_long iobase = dev->base_addr;
2456
2457 lp->irq_mask |= IMR_TMM;
2458 outl(lp->irq_mask, DE4X5_IMR);
2459 load_ms_timer(dev, time_out);
2460
2461 return;
2462 }
2463
2464 static void disable_ast(struct device *dev)
2465 {
2466 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2467 u_long iobase = dev->base_addr;
2468
2469 lp->irq_mask &= ~IMR_TMM;
2470 outl(lp->irq_mask, DE4X5_IMR);
2471 load_ms_timer(dev, 0);
2472
2473 return;
2474 }
2475
2476 static void kick_tx(struct device *dev)
2477 {
2478 struct sk_buff *skb;
2479
2480 if ((skb = alloc_skb(0, GFP_ATOMIC)) != NULL) {
2481 skb->len= FAKE_FRAME_LEN;
2482 skb->arp=1;
2483 skb->dev=dev;
2484 dev_queue_xmit(skb, dev, SOPRI_NORMAL);
2485 }
2486
2487 return;
2488 }
2489
2490
2491
2492
2493
2494 static int de4x5_ioctl(struct device *dev, struct ifreq *rq, int cmd)
2495 {
2496 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2497 struct de4x5_ioctl *ioc = (struct de4x5_ioctl *) &rq->ifr_data;
2498 u_long iobase = dev->base_addr;
2499 int i, j, status = 0;
2500 s32 omr;
2501 union {
2502 u8 addr[(HASH_TABLE_LEN * ETH_ALEN)];
2503 u16 sval[(HASH_TABLE_LEN * ETH_ALEN) >> 1];
2504 u32 lval[(HASH_TABLE_LEN * ETH_ALEN) >> 2];
2505 } tmp;
2506
2507 switch(ioc->cmd) {
2508 case DE4X5_GET_HWADDR:
2509 ioc->len = ETH_ALEN;
2510 status = verify_area(VERIFY_WRITE, (void *)ioc->data, ioc->len);
2511 if (status)
2512 break;
2513 for (i=0; i<ETH_ALEN; i++) {
2514 tmp.addr[i] = dev->dev_addr[i];
2515 }
2516 memcpy_tofs(ioc->data, tmp.addr, ioc->len);
2517
2518 break;
2519 case DE4X5_SET_HWADDR:
2520 status = verify_area(VERIFY_READ, (void *)ioc->data, ETH_ALEN);
2521 if (status)
2522 break;
2523 status = -EPERM;
2524 if (!suser())
2525 break;
2526 status = 0;
2527 memcpy_fromfs(tmp.addr, ioc->data, ETH_ALEN);
2528 for (i=0; i<ETH_ALEN; i++) {
2529 dev->dev_addr[i] = tmp.addr[i];
2530 }
2531 build_setup_frame(dev, PHYS_ADDR_ONLY);
2532
2533 while (set_bit(0, (void *)&dev->tbusy) != 0);
2534 if (lp->setup_f == HASH_PERF) {
2535 load_packet(dev, lp->setup_frame, TD_IC | HASH_F | TD_SET |
2536 SETUP_FRAME_LEN, NULL);
2537 } else {
2538 load_packet(dev, lp->setup_frame, TD_IC | PERFECT_F | TD_SET |
2539 SETUP_FRAME_LEN, NULL);
2540 }
2541 lp->tx_new = (++lp->tx_new) % lp->txRingSize;
2542 outl(POLL_DEMAND, DE4X5_TPD);
2543 dev->tbusy = 0;
2544
2545 break;
2546 case DE4X5_SET_PROM:
2547 if (suser()) {
2548 omr = inl(DE4X5_OMR);
2549 omr |= OMR_PR;
2550 outl(omr, DE4X5_OMR);
2551 } else {
2552 status = -EPERM;
2553 }
2554
2555 break;
2556 case DE4X5_CLR_PROM:
2557 if (suser()) {
2558 omr = inl(DE4X5_OMR);
2559 omr &= ~OMR_PR;
2560 outb(omr, DE4X5_OMR);
2561 } else {
2562 status = -EPERM;
2563 }
2564
2565 break;
2566 case DE4X5_SAY_BOO:
2567 printk("%s: Boo!\n", dev->name);
2568
2569 break;
2570 case DE4X5_GET_MCA:
2571 ioc->len = (HASH_TABLE_LEN >> 3);
2572 status = verify_area(VERIFY_WRITE, ioc->data, ioc->len);
2573 if (status)
2574 break;
2575 memcpy_tofs(ioc->data, lp->setup_frame, ioc->len);
2576
2577 break;
2578 case DE4X5_SET_MCA:
2579 if (suser()) {
2580 if (ioc->len != HASH_TABLE_LEN) {
2581 if (!(status = verify_area(VERIFY_READ, (void *)ioc->data, ETH_ALEN * ioc->len))) {
2582 memcpy_fromfs(tmp.addr, ioc->data, ETH_ALEN * ioc->len);
2583 set_multicast_list(dev, ioc->len, tmp.addr);
2584 }
2585 } else {
2586 set_multicast_list(dev, ioc->len, NULL);
2587 }
2588 } else {
2589 status = -EPERM;
2590 }
2591
2592 break;
2593 case DE4X5_CLR_MCA:
2594 if (suser()) {
2595 set_multicast_list(dev, 0, NULL);
2596 } else {
2597 status = -EPERM;
2598 }
2599
2600 break;
2601 case DE4X5_MCA_EN:
2602 if (suser()) {
2603 omr = inl(DE4X5_OMR);
2604 omr |= OMR_PM;
2605 outl(omr, DE4X5_OMR);
2606 } else {
2607 status = -EPERM;
2608 }
2609
2610 break;
2611 case DE4X5_GET_STATS:
2612 ioc->len = sizeof(lp->pktStats);
2613 status = verify_area(VERIFY_WRITE, (void *)ioc->data, ioc->len);
2614 if (status)
2615 break;
2616
2617 cli();
2618 memcpy_tofs(ioc->data, &lp->pktStats, ioc->len);
2619 sti();
2620
2621 break;
2622 case DE4X5_CLR_STATS:
2623 if (suser()) {
2624 cli();
2625 memset(&lp->pktStats, 0, sizeof(lp->pktStats));
2626 sti();
2627 } else {
2628 status = -EPERM;
2629 }
2630
2631 break;
2632 case DE4X5_GET_OMR:
2633 tmp.addr[0] = inl(DE4X5_OMR);
2634 if (!(status = verify_area(VERIFY_WRITE, (void *)ioc->data, 1))) {
2635 memcpy_tofs(ioc->data, tmp.addr, 1);
2636 }
2637
2638 break;
2639 case DE4X5_SET_OMR:
2640 if (suser()) {
2641 if (!(status = verify_area(VERIFY_READ, (void *)ioc->data, 1))) {
2642 memcpy_fromfs(tmp.addr, ioc->data, 1);
2643 outl(tmp.addr[0], DE4X5_OMR);
2644 }
2645 } else {
2646 status = -EPERM;
2647 }
2648
2649 break;
2650 case DE4X5_GET_REG:
2651 j = 0;
2652 tmp.lval[0] = inl(DE4X5_STS); j+=4;
2653 tmp.lval[1] = inl(DE4X5_BMR); j+=4;
2654 tmp.lval[2] = inl(DE4X5_IMR); j+=4;
2655 tmp.lval[3] = inl(DE4X5_OMR); j+=4;
2656 tmp.lval[4] = inl(DE4X5_SISR); j+=4;
2657 tmp.lval[5] = inl(DE4X5_SICR); j+=4;
2658 tmp.lval[6] = inl(DE4X5_STRR); j+=4;
2659 tmp.lval[7] = inl(DE4X5_SIGR); j+=4;
2660 ioc->len = j;
2661 if (!(status = verify_area(VERIFY_WRITE, (void *)ioc->data, ioc->len))) {
2662 memcpy_tofs(ioc->data, tmp.addr, ioc->len);
2663 }
2664 break;
2665
2666 #define DE4X5_DUMP 0x0f
2667
2668 case DE4X5_DUMP:
2669 j = 0;
2670 tmp.addr[j++] = dev->irq;
2671 for (i=0; i<ETH_ALEN; i++) {
2672 tmp.addr[j++] = dev->dev_addr[i];
2673 }
2674 tmp.addr[j++] = lp->rxRingSize;
2675 tmp.lval[j>>2] = (long)lp->rx_ring; j+=4;
2676 tmp.lval[j>>2] = (long)lp->tx_ring; j+=4;
2677
2678 for (i=0;i<lp->rxRingSize-1;i++){
2679 if (i < 3) {
2680 tmp.lval[j>>2] = (long)&lp->rx_ring[i].status; j+=4;
2681 }
2682 }
2683 tmp.lval[j>>2] = (long)&lp->rx_ring[i].status; j+=4;
2684 for (i=0;i<lp->txRingSize-1;i++){
2685 if (i < 3) {
2686 tmp.lval[j>>2] = (long)&lp->tx_ring[i].status; j+=4;
2687 }
2688 }
2689 tmp.lval[j>>2] = (long)&lp->tx_ring[i].status; j+=4;
2690
2691 for (i=0;i<lp->rxRingSize-1;i++){
2692 if (i < 3) {
2693 tmp.lval[j>>2] = (s32)lp->rx_ring[i].buf; j+=4;
2694 }
2695 }
2696 tmp.lval[j>>2] = (s32)lp->rx_ring[i].buf; j+=4;
2697 for (i=0;i<lp->txRingSize-1;i++){
2698 if (i < 3) {
2699 tmp.lval[j>>2] = (s32)lp->tx_ring[i].buf; j+=4;
2700 }
2701 }
2702 tmp.lval[j>>2] = (s32)lp->tx_ring[i].buf; j+=4;
2703
2704 for (i=0;i<lp->rxRingSize;i++){
2705 tmp.lval[j>>2] = lp->rx_ring[i].status; j+=4;
2706 }
2707 for (i=0;i<lp->txRingSize;i++){
2708 tmp.lval[j>>2] = lp->tx_ring[i].status; j+=4;
2709 }
2710
2711 tmp.lval[j>>2] = inl(DE4X5_STS); j+=4;
2712 tmp.lval[j>>2] = inl(DE4X5_BMR); j+=4;
2713 tmp.lval[j>>2] = inl(DE4X5_IMR); j+=4;
2714 tmp.lval[j>>2] = inl(DE4X5_OMR); j+=4;
2715 tmp.lval[j>>2] = inl(DE4X5_SISR); j+=4;
2716 tmp.lval[j>>2] = inl(DE4X5_SICR); j+=4;
2717 tmp.lval[j>>2] = inl(DE4X5_STRR); j+=4;
2718 tmp.lval[j>>2] = inl(DE4X5_SIGR); j+=4;
2719
2720 tmp.addr[j++] = lp->txRingSize;
2721 tmp.addr[j++] = dev->tbusy;
2722
2723 ioc->len = j;
2724 if (!(status = verify_area(VERIFY_WRITE, (void *)ioc->data, ioc->len))) {
2725 memcpy_tofs(ioc->data, tmp.addr, ioc->len);
2726 }
2727
2728 break;
2729 default:
2730 status = -EOPNOTSUPP;
2731 }
2732
2733 return status;
2734 }
2735
2736 #ifdef MODULE
2737 char kernel_version[] = UTS_RELEASE;
2738 static char devicename[9] = { 0, };
2739 static struct device thisDE4X5 = {
2740 devicename,
2741 0, 0, 0, 0,
2742 0x2000, 10,
2743 0, 0, 0, NULL, de4x5_probe };
2744
2745 static int io=0x000b;
2746 static int irq=10;
2747
2748 int
2749 init_module(void)
2750 {
2751 thisDE4X5.base_addr=io;
2752 thisDE4X5.irq=irq;
2753 if (register_netdev(&thisDE4X5) != 0)
2754 return -EIO;
2755 return 0;
2756 }
2757
2758 void
2759 cleanup_module(void)
2760 {
2761 struct de4x5_private *lp = (struct de4x5_private *) thisDE4X5.priv;
2762
2763 if (MOD_IN_USE) {
2764 printk("%s: device busy, remove delayed\n",thisDE4X5.name);
2765 } else {
2766 if (lp) {
2767 kfree_s(bus_to_virt(lp->rx_ring[0].buf), RX_BUFF_SZ * NUM_RX_DESC + ALIGN);
2768 }
2769 kfree_s(thisDE4X5.priv, sizeof(struct de4x5_private) + ALIGN);
2770 thisDE4X5.priv = NULL;
2771
2772 release_region(thisDE4X5.base_addr, (lp->bus == PCI ?
2773 DE4X5_PCI_TOTAL_SIZE :
2774 DE4X5_EISA_TOTAL_SIZE));
2775 unregister_netdev(&thisDE4X5);
2776 }
2777 }
2778 #endif
2779
2780
2781
2782
2783
2784
2785
2786
2787
2788
2789