This source file includes following definitions.
- de4x5_probe
- de4x5_hw_init
- de4x5_open
- de4x5_init
- de4x5_queue_pkt
- de4x5_interrupt
- de4x5_rx
- de4x5_tx
- de4x5_ast
- de4x5_close
- de4x5_get_stats
- load_packet
- set_multicast_list
- SetMulticastFilter
- eisa_probe
- pci_probe
- alloc_device
- autoconf_media
- dc21040_autoconf
- dc21041_autoconf
- dc21140_autoconf
- test_media
- ping_media
- test_ans
- reset_init_sia
- load_ms_timer
- create_packet
- dce_us_delay
- dce_ms_delay
- EISA_signature
- DevicePresent
- get_hw_addr
- srom_rd
- srom_latch
- srom_command
- srom_address
- srom_data
- sendto_srom
- getfrom_srom
- build_setup_frame
- enable_ast
- disable_ast
- kick_tx
- de4x5_ioctl
- init_module
- cleanup_module
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144 static char *version = "de4x5.c:v0.32 6/26/95 davies@wanton.lkg.dec.com\n";
145
146 #include <linux/config.h>
147 #ifdef MODULE
148 #include <linux/module.h>
149 #include <linux/version.h>
150 #else
151 #define MOD_INC_USE_COUNT
152 #define MOD_DEC_USE_COUNT
153 #endif
154
155 #include <linux/kernel.h>
156 #include <linux/sched.h>
157 #include <linux/string.h>
158 #include <linux/interrupt.h>
159 #include <linux/ptrace.h>
160 #include <linux/errno.h>
161 #include <linux/ioport.h>
162 #include <linux/malloc.h>
163 #include <linux/bios32.h>
164 #include <linux/pci.h>
165 #include <linux/delay.h>
166 #include <asm/bitops.h>
167 #include <asm/io.h>
168 #include <asm/dma.h>
169 #include <asm/segment.h>
170
171 #include <linux/netdevice.h>
172 #include <linux/etherdevice.h>
173 #include <linux/skbuff.h>
174
175 #include <linux/time.h>
176 #include <linux/types.h>
177 #include <linux/unistd.h>
178
179 #include "de4x5.h"
180
181 #ifdef DE4X5_DEBUG
182 static int de4x5_debug = DE4X5_DEBUG;
183 #else
184 static int de4x5_debug = 1;
185 #endif
186
187 #ifdef DE4X5_AUTOSENSE
188 static int de4x5_autosense = DE4X5_AUTOSENSE;
189 #else
190 static int de4x5_autosense = AUTO;
191 #endif
192
193 #ifdef DE4X5_FULL_DUPLEX
194 static s32 de4x5_full_duplex = 1;
195 #else
196 static s32 de4x5_full_duplex = 0;
197 #endif
198
199 #define DE4X5_NDA 0xffe0
200
201
202
203
204 #define PROBE_LENGTH 32
205 #define ETH_PROM_SIG 0xAA5500FFUL
206
207
208
209
210 #define PKT_BUF_SZ 1536
211 #define MAX_PKT_SZ 1514
212 #define MAX_DAT_SZ 1500
213 #define MIN_DAT_SZ 1
214 #define PKT_HDR_LEN 14
215 #define FAKE_FRAME_LEN (MAX_PKT_SZ + 1)
216 #define QUEUE_PKT_TIMEOUT (3*HZ)
217
218
219 #define CRC_POLYNOMIAL_BE 0x04c11db7UL
220 #define CRC_POLYNOMIAL_LE 0xedb88320UL
221
222
223
224
225 #define DE4X5_EISA_IO_PORTS 0x0c00
226 #define DE4X5_EISA_TOTAL_SIZE 0xfff
227
228 #define MAX_EISA_SLOTS 16
229 #define EISA_SLOT_INC 0x1000
230
231 #define DE4X5_SIGNATURE {"DE425",""}
232 #define DE4X5_NAME_LENGTH 8
233
234
235
236
237 #define PCI_MAX_BUS_NUM 8
238 #define DE4X5_PCI_TOTAL_SIZE 0x80
239 #define DE4X5_CLASS_CODE 0x00020000
240
241
242
243
244
245
246
247 #define ALIGN4 ((u_long)4 - 1)
248 #define ALIGN8 ((u_long)8 - 1)
249 #define ALIGN16 ((u_long)16 - 1)
250 #define ALIGN32 ((u_long)32 - 1)
251 #define ALIGN64 ((u_long)64 - 1)
252 #define ALIGN128 ((u_long)128 - 1)
253
254 #define ALIGN ALIGN32
255 #define CACHE_ALIGN CAL_16LONG
256 #define DESC_SKIP_LEN DSL_0
257
258 #define DESC_ALIGN
259
260 #ifndef IS_NOT_DEC
261 static int is_not_dec = 0;
262 #else
263 static int is_not_dec = 1;
264 #endif
265
266
267
268
269 #define ENABLE_IRQs { \
270 imr |= lp->irq_en;\
271 outl(imr, DE4X5_IMR); \
272 }
273
274 #define DISABLE_IRQs {\
275 imr = inl(DE4X5_IMR);\
276 imr &= ~lp->irq_en;\
277 outl(imr, DE4X5_IMR); \
278 }
279
280 #define UNMASK_IRQs {\
281 imr |= lp->irq_mask;\
282 outl(imr, DE4X5_IMR); \
283 }
284
285 #define MASK_IRQs {\
286 imr = inl(DE4X5_IMR);\
287 imr &= ~lp->irq_mask;\
288 outl(imr, DE4X5_IMR); \
289 }
290
291
292
293
294 #define START_DE4X5 {\
295 omr = inl(DE4X5_OMR);\
296 omr |= OMR_ST | OMR_SR;\
297 outl(omr, DE4X5_OMR); \
298 }
299
300 #define STOP_DE4X5 {\
301 omr = inl(DE4X5_OMR);\
302 omr &= ~(OMR_ST|OMR_SR);\
303 outl(omr, DE4X5_OMR); \
304 }
305
306
307
308
309 #define RESET_SIA outl(0, DE4X5_SICR);
310
311
312
313
314 #define DE4X5_AUTOSENSE_MS 250
315
316
317
318
319 struct de4x5_srom {
320 char reserved[18];
321 char version;
322 char num_adapters;
323 char ieee_addr[6];
324 char info[100];
325 short chksum;
326 };
327
328
329
330
331
332
333
334
335
336 #define NUM_RX_DESC 8
337 #define NUM_TX_DESC 32
338 #define BUFF_ALLOC_RETRIES 10
339 #define RX_BUFF_SZ 1536
340
341 struct de4x5_desc {
342 volatile s32 status;
343 u32 des1;
344 u32 buf;
345 u32 next;
346 DESC_ALIGN
347 };
348
349
350
351
352 #define DE4X5_PKT_STAT_SZ 16
353 #define DE4X5_PKT_BIN_SZ 128
354
355
356 struct de4x5_private {
357 char adapter_name[80];
358 struct de4x5_desc rx_ring[NUM_RX_DESC];
359 struct de4x5_desc tx_ring[NUM_TX_DESC];
360 struct sk_buff *skb[NUM_TX_DESC];
361 int rx_new, rx_old;
362 int tx_new, tx_old;
363 char setup_frame[SETUP_FRAME_LEN];
364 struct enet_statistics stats;
365 struct {
366 u_int bins[DE4X5_PKT_STAT_SZ];
367 u_int unicast;
368 u_int multicast;
369 u_int broadcast;
370 u_int excessive_collisions;
371 u_int tx_underruns;
372 u_int excessive_underruns;
373 } pktStats;
374 char rxRingSize;
375 char txRingSize;
376 int bus;
377 int bus_num;
378 int chipset;
379 s32 irq_mask;
380 s32 irq_en;
381 int media;
382 int linkProb;
383 int autosense;
384 int tx_enable;
385 int lostMedia;
386 int setup_f;
387 };
388
389
390
391
392
393
394
395
396
397 #define TX_BUFFS_AVAIL ((lp->tx_old<=lp->tx_new)?\
398 lp->tx_old+lp->txRingSize-lp->tx_new-1:\
399 lp->tx_old -lp->tx_new-1)
400
401
402
403
404 static int de4x5_open(struct device *dev);
405 static int de4x5_queue_pkt(struct sk_buff *skb, struct device *dev);
406 static void de4x5_interrupt(int irq, struct pt_regs *regs);
407 static int de4x5_close(struct device *dev);
408 static struct enet_statistics *de4x5_get_stats(struct device *dev);
409 static void set_multicast_list(struct device *dev, int num_addrs, void *addrs);
410 static int de4x5_ioctl(struct device *dev, struct ifreq *rq, int cmd);
411
412
413
414
415 static int de4x5_hw_init(struct device *dev, u_long iobase);
416 static int de4x5_init(struct device *dev);
417 static int de4x5_rx(struct device *dev);
418 static int de4x5_tx(struct device *dev);
419 static int de4x5_ast(struct device *dev);
420
421 static int autoconf_media(struct device *dev);
422 static void create_packet(struct device *dev, char *frame, int len);
423 static void dce_us_delay(u32 usec);
424 static void dce_ms_delay(u32 msec);
425 static void load_packet(struct device *dev, char *buf, u32 flags, struct sk_buff *skb);
426 static void dc21040_autoconf(struct device *dev);
427 static void dc21041_autoconf(struct device *dev);
428 static void dc21140_autoconf(struct device *dev);
429 static int test_media(struct device *dev, s32 irqs, s32 irq_mask, s32 csr13, s32 csr14, s32 csr15, s32 msec);
430
431 static int ping_media(struct device *dev);
432 static void reset_init_sia(struct device *dev, s32 sicr, s32 strr, s32 sigr);
433 static int test_ans(struct device *dev, s32 irqs, s32 irq_mask, s32 msec);
434 static void load_ms_timer(struct device *dev, u32 msec);
435 static int EISA_signature(char *name, s32 eisa_id);
436 static int DevicePresent(u_long iobase);
437 static short srom_rd(u_long address, u_char offset);
438 static void srom_latch(u_int command, u_long address);
439 static void srom_command(u_int command, u_long address);
440 static void srom_address(u_int command, u_long address, u_char offset);
441 static short srom_data(u_int command, u_long address);
442
443 static void sendto_srom(u_int command, u_long addr);
444 static int getfrom_srom(u_long addr);
445 static void SetMulticastFilter(struct device *dev, int num_addrs, char *addrs);
446 static int get_hw_addr(struct device *dev);
447
448 static void eisa_probe(struct device *dev, u_long iobase);
449 static void pci_probe(struct device *dev, u_long iobase);
450 static struct device *alloc_device(struct device *dev, u_long iobase);
451 static char *build_setup_frame(struct device *dev, int mode);
452 static void disable_ast(struct device *dev);
453 static void enable_ast(struct device *dev, u32 time_out);
454 static void kick_tx(struct device *dev);
455
456 #ifdef MODULE
457 int init_module(void);
458 void cleanup_module(void);
459 static int autoprobed = 1, loading_module = 1;
460 # else
461 static unsigned char de4x5_irq[] = {5,9,10,11};
462 static int autoprobed = 0, loading_module = 0;
463 #endif
464
465 static char name[DE4X5_NAME_LENGTH + 1];
466 static int num_de4x5s = 0, num_eth = 0;
467
468
469
470
471
472
473 static struct bus_type {
474 int bus;
475 int bus_num;
476 int device;
477 int chipset;
478 struct de4x5_srom srom;
479 int autosense;
480 } bus;
481
482
483
484
485 #define RESET_DE4X5 {\
486 int i;\
487 i=inl(DE4X5_BMR);\
488 dce_ms_delay(1);\
489 outl(i | BMR_SWR, DE4X5_BMR);\
490 dce_ms_delay(1);\
491 outl(i, DE4X5_BMR);\
492 dce_ms_delay(1);\
493 for (i=0;i<5;i++) {inl(DE4X5_BMR); dce_ms_delay(1);}\
494 dce_ms_delay(1);\
495 }
496
497
498
499 int de4x5_probe(struct device *dev)
500 {
501 int tmp = num_de4x5s, status = -ENODEV;
502 u_long iobase = dev->base_addr;
503
504 if ((iobase == 0) && loading_module){
505 printk("Autoprobing is not supported when loading a module based driver.\n");
506 status = -EIO;
507 } else {
508 eisa_probe(dev, iobase);
509 pci_probe(dev, iobase);
510
511 if ((tmp == num_de4x5s) && (iobase != 0) && loading_module) {
512 printk("%s: de4x5_probe() cannot find device at 0x%04lx.\n", dev->name,
513 iobase);
514 }
515
516
517
518
519
520 for (; (dev->priv == NULL) && (dev->next != NULL); dev = dev->next);
521
522 if (dev->priv) status = 0;
523 if (iobase == 0) autoprobed = 1;
524 }
525
526 return status;
527 }
528
529 static int
530 de4x5_hw_init(struct device *dev, u_long iobase)
531 {
532 struct bus_type *lp = &bus;
533 int tmpbus, tmpchs, i, j, status=0;
534 char *tmp;
535
536
537 if (lp->chipset == DC21041) {
538 outl(0, PCI_CFDA);
539 dce_ms_delay(10);
540 }
541
542 RESET_DE4X5;
543
544 if ((inl(DE4X5_STS) & (STS_TS | STS_RS)) == 0) {
545
546
547
548 if (lp->bus == PCI) {
549 if (!is_not_dec) {
550 if ((lp->chipset == DC21040) || (lp->chipset == DC21041)) {
551 strcpy(name, "DE435");
552 } else if (lp->chipset == DC21140) {
553 strcpy(name, "DE500");
554 }
555 } else {
556 strcpy(name, "UNKNOWN");
557 }
558 } else {
559 EISA_signature(name, EISA_ID0);
560 }
561
562 if (*name != '\0') {
563 dev->base_addr = iobase;
564 if (lp->bus == EISA) {
565 printk("%s: %s at %04lx (EISA slot %ld)",
566 dev->name, name, iobase, ((iobase>>12)&0x0f));
567 } else {
568 printk("%s: %s at %04lx (PCI bus %d, device %d)", dev->name, name,
569 iobase, lp->bus_num, lp->device);
570 }
571
572 printk(", h/w address ");
573 status = get_hw_addr(dev);
574 for (i = 0; i < ETH_ALEN - 1; i++) {
575 printk("%2.2x:", dev->dev_addr[i]);
576 }
577 printk("%2.2x,\n", dev->dev_addr[i]);
578
579 tmpbus = lp->bus;
580 tmpchs = lp->chipset;
581
582 if (status == 0) {
583 struct de4x5_private *lp;
584
585
586
587
588
589 dev->priv = (void *) kmalloc(sizeof(struct de4x5_private) + ALIGN,
590 GFP_KERNEL);
591
592
593
594 dev->priv = (void *)(((u_long)dev->priv + ALIGN) & ~ALIGN);
595 lp = (struct de4x5_private *)dev->priv;
596 memset(dev->priv, 0, sizeof(struct de4x5_private));
597 lp->bus = tmpbus;
598 lp->chipset = tmpchs;
599
600
601
602
603 if (de4x5_autosense & AUTO) {
604 lp->autosense = AUTO;
605 } else {
606 if (lp->chipset != DC21140) {
607 if ((lp->chipset == DC21040) && (de4x5_autosense & TP_NW)) {
608 de4x5_autosense = TP;
609 }
610 if ((lp->chipset == DC21041) && (de4x5_autosense & BNC_AUI)) {
611 de4x5_autosense = BNC;
612 }
613 lp->autosense = de4x5_autosense & 0x001f;
614 } else {
615 lp->autosense = de4x5_autosense & 0x00c0;
616 }
617 }
618
619 sprintf(lp->adapter_name,"%s (%s)", name, dev->name);
620 request_region(iobase, (lp->bus == PCI ? DE4X5_PCI_TOTAL_SIZE :
621 DE4X5_EISA_TOTAL_SIZE),
622 lp->adapter_name);
623
624
625
626
627
628
629 for (tmp=NULL, j=0; (j<BUFF_ALLOC_RETRIES) && (tmp==NULL); j++) {
630 if ((tmp = (void *)kmalloc(RX_BUFF_SZ * NUM_RX_DESC + ALIGN,
631 GFP_KERNEL)) != NULL) {
632 tmp = (char *)(((u_long) tmp + ALIGN) & ~ALIGN);
633 for (i=0; i<NUM_RX_DESC; i++) {
634 lp->rx_ring[i].status = 0;
635 lp->rx_ring[i].des1 = RX_BUFF_SZ;
636 lp->rx_ring[i].buf = virt_to_bus(tmp + i * RX_BUFF_SZ);
637 lp->rx_ring[i].next = (u32)NULL;
638 }
639 barrier();
640 }
641 }
642
643 if (tmp != NULL) {
644 lp->rxRingSize = NUM_RX_DESC;
645 lp->txRingSize = NUM_TX_DESC;
646
647
648 lp->rx_ring[lp->rxRingSize - 1].des1 |= RD_RER;
649 lp->tx_ring[lp->txRingSize - 1].des1 |= TD_TER;
650
651
652 outl(virt_to_bus(lp->rx_ring), DE4X5_RRBA);
653 outl(virt_to_bus(lp->tx_ring), DE4X5_TRBA);
654
655
656 lp->irq_mask = IMR_RIM | IMR_TIM | IMR_TUM ;
657 lp->irq_en = IMR_NIM | IMR_AIM;
658
659 lp->tx_enable = TRUE;
660
661 if (dev->irq < 2) {
662 #ifndef MODULE
663 unsigned char irqnum;
664 s32 omr;
665 autoirq_setup(0);
666
667 omr = inl(DE4X5_OMR);
668 outl(IMR_AIM|IMR_RUM, DE4X5_IMR);
669 outl(OMR_SR | omr, DE4X5_OMR);
670
671 irqnum = autoirq_report(1);
672 if (!irqnum) {
673 printk(" and failed to detect IRQ line.\n");
674 status = -ENXIO;
675 } else {
676 for (dev->irq=0,i=0; (i<sizeof(de4x5_irq)) && (!dev->irq); i++) {
677 if (irqnum == de4x5_irq[i]) {
678 dev->irq = irqnum;
679 printk(" and uses IRQ%d.\n", dev->irq);
680 }
681 }
682
683 if (!dev->irq) {
684 printk(" but incorrect IRQ line detected.\n");
685 status = -ENXIO;
686 }
687 }
688
689 outl(0, DE4X5_IMR);
690
691 #endif
692 } else {
693 printk(" and requires IRQ%d (not probed).\n", dev->irq);
694 }
695 } else {
696 printk("%s: Kernel could not allocate RX buffer memory.\n",
697 dev->name);
698 status = -ENXIO;
699 }
700 if (status) release_region(iobase, (lp->bus == PCI ?
701 DE4X5_PCI_TOTAL_SIZE :
702 DE4X5_EISA_TOTAL_SIZE));
703 } else {
704 printk(" which has an Ethernet PROM CRC error.\n");
705 status = -ENXIO;
706 }
707 } else {
708 status = -ENXIO;
709 }
710 } else {
711 status = -ENXIO;
712 }
713
714 if (!status) {
715 if (de4x5_debug > 0) {
716 printk(version);
717 }
718
719
720 dev->open = &de4x5_open;
721 dev->hard_start_xmit = &de4x5_queue_pkt;
722 dev->stop = &de4x5_close;
723 dev->get_stats = &de4x5_get_stats;
724 #ifdef HAVE_MULTICAST
725 dev->set_multicast_list = &set_multicast_list;
726 #endif
727 dev->do_ioctl = &de4x5_ioctl;
728
729 dev->mem_start = 0;
730
731
732 ether_setup(dev);
733
734
735 if (lp->chipset == DC21041) {
736 outl(0, DE4X5_SICR);
737 outl(CFDA_PSM, PCI_CFDA);
738 }
739 } else {
740 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
741 if (lp) {
742 kfree_s(bus_to_virt(lp->rx_ring[0].buf),
743 RX_BUFF_SZ * NUM_RX_DESC + ALIGN);
744 }
745 if (dev->priv) {
746 kfree_s(dev->priv, sizeof(struct de4x5_private) + ALIGN);
747 dev->priv = NULL;
748 }
749 }
750
751 return status;
752 }
753
754
755 static int
756 de4x5_open(struct device *dev)
757 {
758 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
759 u_long iobase = dev->base_addr;
760 int i, status = 0;
761 s32 imr, omr, sts;
762
763
764
765
766 if (lp->chipset == DC21041) {
767 outl(0, PCI_CFDA);
768 dce_ms_delay(10);
769 }
770
771 if (request_irq(dev->irq, (void *)de4x5_interrupt, 0, lp->adapter_name)) {
772 printk("de4x5_open(): Requested IRQ%d is busy\n",dev->irq);
773 status = -EAGAIN;
774 } else {
775
776 irq2dev_map[dev->irq] = dev;
777
778
779
780 status = de4x5_init(dev);
781
782 if (de4x5_debug > 1){
783 printk("%s: de4x5 open with irq %d\n",dev->name,dev->irq);
784 printk("\tphysical address: ");
785 for (i=0;i<6;i++){
786 printk("%2.2x:",(short)dev->dev_addr[i]);
787 }
788 printk("\n");
789 printk("Descriptor head addresses:\n");
790 printk("\t0x%8.8lx 0x%8.8lx\n",(u_long)lp->rx_ring,(u_long)lp->tx_ring);
791 printk("Descriptor addresses:\nRX: ");
792 for (i=0;i<lp->rxRingSize-1;i++){
793 if (i < 3) {
794 printk("0x%8.8lx ",(u_long)&lp->rx_ring[i].status);
795 }
796 }
797 printk("...0x%8.8lx\n",(u_long)&lp->rx_ring[i].status);
798 printk("TX: ");
799 for (i=0;i<lp->txRingSize-1;i++){
800 if (i < 3) {
801 printk("0x%8.8lx ", (u_long)&lp->tx_ring[i].status);
802 }
803 }
804 printk("...0x%8.8lx\n", (u_long)&lp->tx_ring[i].status);
805 printk("Descriptor buffers:\nRX: ");
806 for (i=0;i<lp->rxRingSize-1;i++){
807 if (i < 3) {
808 printk("0x%8.8x ",lp->rx_ring[i].buf);
809 }
810 }
811 printk("...0x%8.8x\n",lp->rx_ring[i].buf);
812 printk("TX: ");
813 for (i=0;i<lp->txRingSize-1;i++){
814 if (i < 3) {
815 printk("0x%8.8x ", lp->tx_ring[i].buf);
816 }
817 }
818 printk("...0x%8.8x\n", lp->tx_ring[i].buf);
819 printk("Ring size: \nRX: %d\nTX: %d\n",
820 (short)lp->rxRingSize,
821 (short)lp->txRingSize);
822 printk("\tstatus: %d\n", status);
823 }
824
825 if (!status) {
826 dev->tbusy = 0;
827 dev->start = 1;
828 dev->interrupt = UNMASK_INTERRUPTS;
829 dev->trans_start = jiffies;
830
831 START_DE4X5;
832
833
834 imr = 0;
835 UNMASK_IRQs;
836
837
838 sts = inl(DE4X5_STS);
839 outl(sts, DE4X5_STS);
840
841 ENABLE_IRQs;
842 }
843 if (de4x5_debug > 1) {
844 printk("\tsts: 0x%08x\n", inl(DE4X5_STS));
845 printk("\tbmr: 0x%08x\n", inl(DE4X5_BMR));
846 printk("\timr: 0x%08x\n", inl(DE4X5_IMR));
847 printk("\tomr: 0x%08x\n", inl(DE4X5_OMR));
848 printk("\tsisr: 0x%08x\n", inl(DE4X5_SISR));
849 printk("\tsicr: 0x%08x\n", inl(DE4X5_SICR));
850 printk("\tstrr: 0x%08x\n", inl(DE4X5_STRR));
851 printk("\tsigr: 0x%08x\n", inl(DE4X5_SIGR));
852 }
853 }
854
855 MOD_INC_USE_COUNT;
856
857 return status;
858 }
859
860
861
862
863
864
865
866
867
868 static int
869 de4x5_init(struct device *dev)
870 {
871 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
872 u_long iobase = dev->base_addr;
873 int i, j, status = 0;
874 s32 bmr, omr;
875
876
877 set_bit(0, (void *)&dev->tbusy);
878
879 RESET_DE4X5;
880
881 bmr = inl(DE4X5_BMR);
882 bmr |= PBL_8 | DESC_SKIP_LEN | CACHE_ALIGN;
883 outl(bmr, DE4X5_BMR);
884
885 if (lp->chipset != DC21140) {
886 omr = TR_96;
887 lp->setup_f = HASH_PERF;
888 } else {
889 omr = OMR_SDP | OMR_SF;
890 lp->setup_f = PERFECT;
891 }
892 outl(virt_to_bus(lp->rx_ring), DE4X5_RRBA);
893 outl(virt_to_bus(lp->tx_ring), DE4X5_TRBA);
894
895 lp->rx_new = lp->rx_old = 0;
896 lp->tx_new = lp->tx_old = 0;
897
898 for (i = 0; i < lp->rxRingSize; i++) {
899 lp->rx_ring[i].status = R_OWN;
900 }
901
902 for (i = 0; i < lp->txRingSize; i++) {
903 lp->tx_ring[i].status = 0;
904 }
905
906 barrier();
907
908
909 SetMulticastFilter(dev, 0, NULL);
910
911 if (lp->chipset != DC21140) {
912 load_packet(dev, lp->setup_frame, HASH_F|TD_SET|SETUP_FRAME_LEN, NULL);
913 } else {
914 load_packet(dev, lp->setup_frame, PERFECT_F|TD_SET|SETUP_FRAME_LEN, NULL);
915 }
916 outl(omr|OMR_ST, DE4X5_OMR);
917
918
919 for (j=0, i=jiffies;(i<=jiffies+HZ/100) && (j==0);) {
920 if (lp->tx_ring[lp->tx_new].status >= 0) j=1;
921 }
922 outl(omr, DE4X5_OMR);
923
924 if (j == 0) {
925 printk("%s: Setup frame timed out, status %08x\n", dev->name,
926 inl(DE4X5_STS));
927 status = -EIO;
928 }
929
930 lp->tx_new = (++lp->tx_new) % lp->txRingSize;
931 lp->tx_old = lp->tx_new;
932
933
934 if (autoconf_media(dev) == 0) {
935 status = -EIO;
936 }
937
938 return 0;
939 }
940
941
942
943
944 static int
945 de4x5_queue_pkt(struct sk_buff *skb, struct device *dev)
946 {
947 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
948 u_long iobase = dev->base_addr;
949 int i, status = 0;
950 s32 imr, omr, sts;
951
952
953
954
955
956
957 if (set_bit(0, (void*)&dev->tbusy) == 0) {
958 cli();
959 de4x5_tx(dev);
960 dev->tbusy = 0;
961 sti();
962 }
963
964
965
966
967
968
969 if (dev->tbusy || (lp->lostMedia > LOST_MEDIA_THRESHOLD)) {
970 u_long tickssofar = jiffies - dev->trans_start;
971 if ((tickssofar < QUEUE_PKT_TIMEOUT) &&
972 (lp->lostMedia <= LOST_MEDIA_THRESHOLD)) {
973 status = -1;
974 } else {
975 if (de4x5_debug >= 1) {
976 printk("%s: transmit timed out, status %08x, tbusy:%ld, lostMedia:%d tickssofar:%ld, resetting.\n",dev->name, inl(DE4X5_STS), dev->tbusy, lp->lostMedia, tickssofar);
977 }
978
979
980 STOP_DE4X5;
981
982
983 for (i=lp->tx_old; i!=lp->tx_new; i=(++i)%lp->txRingSize) {
984 if (lp->skb[i] != NULL) {
985 if (lp->skb[i]->len != FAKE_FRAME_LEN) {
986 if (lp->tx_ring[i].status == T_OWN) {
987 dev_queue_xmit(lp->skb[i], dev, SOPRI_NORMAL);
988 } else {
989 dev_kfree_skb(lp->skb[i], FREE_WRITE);
990 }
991 } else {
992 dev_kfree_skb(lp->skb[i], FREE_WRITE);
993 }
994 lp->skb[i] = NULL;
995 }
996 }
997 if (skb->len != FAKE_FRAME_LEN) {
998 dev_queue_xmit(skb, dev, SOPRI_NORMAL);
999 } else {
1000 dev_kfree_skb(skb, FREE_WRITE);
1001 }
1002
1003
1004 status = de4x5_init(dev);
1005
1006
1007 if (!status) {
1008
1009 dev->interrupt = UNMASK_INTERRUPTS;
1010 dev->start = 1;
1011 dev->tbusy = 0;
1012 dev->trans_start = jiffies;
1013
1014 START_DE4X5;
1015
1016
1017 imr = 0;
1018 UNMASK_IRQs;
1019
1020
1021 sts = inl(DE4X5_STS);
1022 outl(sts, DE4X5_STS);
1023
1024 ENABLE_IRQs;
1025 } else {
1026 printk("%s: hardware initialisation failure, status %08x.\n",
1027 dev->name, inl(DE4X5_STS));
1028 }
1029 }
1030 } else if (skb == NULL) {
1031 dev_tint(dev);
1032 } else if (skb->len == FAKE_FRAME_LEN) {
1033 dev_kfree_skb(skb, FREE_WRITE);
1034 } else if (skb->len > 0) {
1035
1036 if (set_bit(0, (void*)&dev->tbusy) != 0) {
1037 printk("%s: Transmitter access conflict.\n", dev->name);
1038 status = -1;
1039 } else {
1040 cli();
1041 if (TX_BUFFS_AVAIL) {
1042 load_packet(dev, skb->data, TD_IC | TD_LS | TD_FS | skb->len, skb);
1043 if (lp->tx_enable) {
1044 outl(POLL_DEMAND, DE4X5_TPD);
1045 }
1046
1047 lp->tx_new = (++lp->tx_new) % lp->txRingSize;
1048 dev->trans_start = jiffies;
1049
1050 if (TX_BUFFS_AVAIL) {
1051 dev->tbusy = 0;
1052 }
1053 } else {
1054 status = -1;
1055 }
1056 sti();
1057 }
1058 }
1059
1060 return status;
1061 }
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074 static void
1075 de4x5_interrupt(int irq, struct pt_regs *regs)
1076 {
1077 struct device *dev = (struct device *)(irq2dev_map[irq]);
1078 struct de4x5_private *lp;
1079 s32 imr, omr, sts;
1080 u_long iobase;
1081
1082 if (dev == NULL) {
1083 printk ("de4x5_interrupt(): irq %d for unknown device.\n", irq);
1084 } else {
1085 lp = (struct de4x5_private *)dev->priv;
1086 iobase = dev->base_addr;
1087
1088 if (dev->interrupt)
1089 printk("%s: Re-entering the interrupt handler.\n", dev->name);
1090
1091 DISABLE_IRQs;
1092 dev->interrupt = MASK_INTERRUPTS;
1093
1094 while ((sts = inl(DE4X5_STS)) & lp->irq_mask) {
1095 outl(sts, DE4X5_STS);
1096
1097 if (sts & (STS_RI | STS_RU))
1098 de4x5_rx(dev);
1099
1100 if (sts & (STS_TI | STS_TU))
1101 de4x5_tx(dev);
1102
1103 if (sts & STS_TM)
1104 de4x5_ast(dev);
1105
1106 if (sts & STS_LNF) {
1107 lp->lostMedia = LOST_MEDIA_THRESHOLD + 1;
1108 lp->irq_mask &= ~IMR_LFM;
1109 kick_tx(dev);
1110 }
1111
1112 if (sts & STS_SE) {
1113 STOP_DE4X5;
1114 printk("%s: Fatal bus error occured, sts=%#8x, device stopped.\n",
1115 dev->name, sts);
1116 }
1117 }
1118
1119 if (TX_BUFFS_AVAIL && dev->tbusy) {
1120 dev->tbusy = 0;
1121 mark_bh(NET_BH);
1122 }
1123
1124 dev->interrupt = UNMASK_INTERRUPTS;
1125 ENABLE_IRQs;
1126 }
1127
1128 return;
1129 }
1130
1131 static int
1132 de4x5_rx(struct device *dev)
1133 {
1134 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1135 int i, entry;
1136 s32 status;
1137 char *buf;
1138
1139 for (entry = lp->rx_new; lp->rx_ring[entry].status >= 0;entry = lp->rx_new) {
1140 status = lp->rx_ring[entry].status;
1141
1142 if (status & RD_FS) {
1143 lp->rx_old = entry;
1144 }
1145
1146 if (status & RD_LS) {
1147 if (status & RD_ES) {
1148 lp->stats.rx_errors++;
1149 if (status & (RD_RF | RD_TL)) lp->stats.rx_frame_errors++;
1150 if (status & RD_CE) lp->stats.rx_crc_errors++;
1151 if (status & RD_OF) lp->stats.rx_fifo_errors++;
1152 } else {
1153 struct sk_buff *skb;
1154 short pkt_len = (short)(lp->rx_ring[entry].status >> 16) - 4;
1155
1156 if ((skb = dev_alloc_skb(pkt_len+2)) != NULL) {
1157 skb->dev = dev;
1158
1159 skb_reserve(skb,2);
1160 if (entry < lp->rx_old) {
1161 short len = (lp->rxRingSize - lp->rx_old) * RX_BUFF_SZ;
1162 memcpy(skb_put(skb,len), bus_to_virt(lp->rx_ring[lp->rx_old].buf), len);
1163 memcpy(skb_put(skb,pkt_len-len), bus_to_virt(lp->rx_ring[0].buf), pkt_len - len);
1164 } else {
1165 memcpy(skb_put(skb,pkt_len), bus_to_virt(lp->rx_ring[lp->rx_old].buf), pkt_len);
1166 }
1167
1168
1169 skb->protocol=eth_type_trans(skb,dev);
1170 netif_rx(skb);
1171
1172
1173 lp->stats.rx_packets++;
1174 for (i=1; i<DE4X5_PKT_STAT_SZ-1; i++) {
1175 if (pkt_len < (i*DE4X5_PKT_BIN_SZ)) {
1176 lp->pktStats.bins[i]++;
1177 i = DE4X5_PKT_STAT_SZ;
1178 }
1179 }
1180 buf = skb->data;
1181 if (buf[0] & 0x01) {
1182 if ((*(s32 *)&buf[0] == -1) && (*(s16 *)&buf[4] == -1)) {
1183 lp->pktStats.broadcast++;
1184 } else {
1185 lp->pktStats.multicast++;
1186 }
1187 } else if ((*(s32 *)&buf[0] == *(s32 *)&dev->dev_addr[0]) &&
1188 (*(s16 *)&buf[4] == *(s16 *)&dev->dev_addr[4])) {
1189 lp->pktStats.unicast++;
1190 }
1191
1192 lp->pktStats.bins[0]++;
1193 if (lp->pktStats.bins[0] == 0) {
1194 memset((char *)&lp->pktStats, 0, sizeof(lp->pktStats));
1195 }
1196 } else {
1197 printk("%s: Insufficient memory; nuking packet.\n", dev->name);
1198 lp->stats.rx_dropped++;
1199 break;
1200 }
1201 }
1202
1203
1204 for (; lp->rx_old!=entry; lp->rx_old=(++lp->rx_old)%lp->rxRingSize) {
1205 lp->rx_ring[lp->rx_old].status = R_OWN;
1206 barrier();
1207 }
1208 lp->rx_ring[entry].status = R_OWN;
1209 barrier();
1210 }
1211
1212
1213
1214
1215 lp->rx_new = (++lp->rx_new) % lp->rxRingSize;
1216 }
1217
1218 return 0;
1219 }
1220
1221
1222
1223
1224 static int
1225 de4x5_tx(struct device *dev)
1226 {
1227 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1228 u_long iobase = dev->base_addr;
1229 int entry;
1230 s32 status;
1231
1232 for (entry = lp->tx_old; entry != lp->tx_new; entry = lp->tx_old) {
1233 status = lp->tx_ring[entry].status;
1234 if (status < 0) {
1235 break;
1236 } else if (status & TD_ES) {
1237 lp->stats.tx_errors++;
1238 if (status & TD_NC) lp->stats.tx_carrier_errors++;
1239 if (status & TD_LC) lp->stats.tx_window_errors++;
1240 if (status & TD_UF) lp->stats.tx_fifo_errors++;
1241 if (status & TD_LC) lp->stats.collisions++;
1242 if (status & TD_EC) lp->pktStats.excessive_collisions++;
1243 if (status & TD_DE) lp->stats.tx_aborted_errors++;
1244
1245 if ((status != 0x7fffffff) &&
1246 (status & (TD_LO | TD_NC | TD_EC | TD_LF))) {
1247 lp->lostMedia++;
1248 if (lp->lostMedia > LOST_MEDIA_THRESHOLD) {
1249 kick_tx(dev);
1250 }
1251 } else {
1252 outl(POLL_DEMAND, DE4X5_TPD);
1253 }
1254 } else {
1255 lp->stats.tx_packets++;
1256 lp->lostMedia = 0;
1257 }
1258
1259 if (lp->skb[entry] != NULL) {
1260 dev_kfree_skb(lp->skb[entry], FREE_WRITE);
1261 lp->skb[entry] = NULL;
1262 }
1263
1264
1265 lp->tx_old = (++lp->tx_old) % lp->txRingSize;
1266 }
1267
1268 return 0;
1269 }
1270
1271 static int
1272 de4x5_ast(struct device *dev)
1273 {
1274 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1275 u_long iobase = dev->base_addr;
1276 s32 gep;
1277
1278 disable_ast(dev);
1279
1280 if (lp->chipset == DC21140) {
1281 gep = inl(DE4X5_GEP);
1282 if (((lp->media == _100Mb) && (gep & GEP_SLNK)) ||
1283 ((lp->media == _10Mb) && (gep & GEP_LNP)) ||
1284 ((lp->media == _10Mb) && !(gep & GEP_SLNK)) ||
1285 (lp->media == NC)) {
1286 if (lp->linkProb || ((lp->media == NC) && (!(gep & GEP_LNP)))) {
1287 lp->lostMedia = LOST_MEDIA_THRESHOLD + 1;
1288 lp->linkProb = 0;
1289 kick_tx(dev);
1290 } else {
1291 switch(lp->media) {
1292 case NC:
1293 lp->linkProb = 0;
1294 enable_ast(dev, DE4X5_AUTOSENSE_MS);
1295 break;
1296
1297 case _10Mb:
1298 lp->linkProb = 1;
1299 enable_ast(dev, 1500);
1300 break;
1301
1302 case _100Mb:
1303 lp->linkProb = 1;
1304 enable_ast(dev, 4000);
1305 break;
1306 }
1307 }
1308 } else {
1309 lp->linkProb = 0;
1310 enable_ast(dev, DE4X5_AUTOSENSE_MS);
1311 }
1312 }
1313
1314 return 0;
1315 }
1316
1317 static int
1318 de4x5_close(struct device *dev)
1319 {
1320 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1321 u_long iobase = dev->base_addr;
1322 s32 imr, omr;
1323
1324 dev->start = 0;
1325 dev->tbusy = 1;
1326
1327 if (de4x5_debug > 1) {
1328 printk("%s: Shutting down ethercard, status was %8.8x.\n",
1329 dev->name, inl(DE4X5_STS));
1330 }
1331
1332
1333
1334
1335 DISABLE_IRQs;
1336
1337 STOP_DE4X5;
1338
1339
1340
1341
1342 free_irq(dev->irq);
1343 irq2dev_map[dev->irq] = 0;
1344
1345 MOD_DEC_USE_COUNT;
1346
1347
1348 if (lp->chipset == DC21041) {
1349 outl(0, DE4X5_SICR);
1350 outl(CFDA_PSM, PCI_CFDA);
1351 }
1352
1353 return 0;
1354 }
1355
1356 static struct enet_statistics *
1357 de4x5_get_stats(struct device *dev)
1358 {
1359 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1360 u_long iobase = dev->base_addr;
1361
1362 lp->stats.rx_missed_errors = (int) (inl(DE4X5_MFC) & (MFC_OVFL | MFC_CNTR));
1363
1364 return &lp->stats;
1365 }
1366
1367 static void load_packet(struct device *dev, char *buf, u32 flags, struct sk_buff *skb)
1368 {
1369 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1370
1371 lp->tx_ring[lp->tx_new].buf = virt_to_bus(buf);
1372 lp->tx_ring[lp->tx_new].des1 &= TD_TER;
1373 lp->tx_ring[lp->tx_new].des1 |= flags;
1374 lp->skb[lp->tx_new] = skb;
1375 barrier();
1376 lp->tx_ring[lp->tx_new].status = T_OWN;
1377 barrier();
1378
1379 return;
1380 }
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391 static void
1392 set_multicast_list(struct device *dev, int num_addrs, void *addrs)
1393 {
1394 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1395 u_long iobase = dev->base_addr;
1396
1397
1398 if (irq2dev_map[dev->irq] != NULL) {
1399 if (num_addrs >= 0) {
1400 SetMulticastFilter(dev, num_addrs, (char *)addrs);
1401 if (lp->setup_f == HASH_PERF) {
1402 load_packet(dev, lp->setup_frame, TD_IC | HASH_F | TD_SET |
1403 SETUP_FRAME_LEN, NULL);
1404 } else {
1405 load_packet(dev, lp->setup_frame, TD_IC | PERFECT_F | TD_SET |
1406 SETUP_FRAME_LEN, NULL);
1407 }
1408
1409 lp->tx_new = (++lp->tx_new) % lp->txRingSize;
1410 outl(POLL_DEMAND, DE4X5_TPD);
1411 dev->trans_start = jiffies;
1412 }
1413 }
1414
1415 return;
1416 }
1417
1418
1419
1420
1421
1422
1423 static void SetMulticastFilter(struct device *dev, int num_addrs, char *addrs)
1424 {
1425 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1426 u_long iobase = dev->base_addr;
1427 int i, j, bit, byte;
1428 u16 hashcode;
1429 u32 omr, crc, poly = CRC_POLYNOMIAL_LE;
1430 char *pa;
1431
1432 omr = inl(DE4X5_OMR);
1433 pa = build_setup_frame(dev, ALL);
1434
1435 if (lp->setup_f == HASH_PERF) {
1436 if (num_addrs == HASH_TABLE_LEN) {
1437 omr |= OMR_PM;
1438 } else {
1439 omr &= ~OMR_PM;
1440
1441 for (i=0;i<num_addrs;i++) {
1442 if ((*addrs & 0x01) == 1) {
1443 crc = 0xffffffff;
1444 for (byte=0;byte<ETH_ALEN;byte++) {
1445
1446 for (bit = *addrs++,j=0;j<8;j++, bit>>=1) {
1447 crc = (crc >> 1) ^ (((crc ^ bit) & 0x01) ? poly : 0);
1448 }
1449 }
1450 hashcode = crc & HASH_BITS;
1451
1452 byte = hashcode >> 3;
1453 bit = 1 << (hashcode & 0x07);
1454
1455 byte <<= 1;
1456 if (byte & 0x02) {
1457 byte -= 1;
1458 }
1459 lp->setup_frame[byte] |= bit;
1460
1461 } else {
1462 addrs += ETH_ALEN;
1463 }
1464 }
1465 }
1466 } else {
1467 omr &= ~OMR_PM;
1468 for (j=0; j<num_addrs; j++) {
1469 for (i=0; i<ETH_ALEN; i++) {
1470 *(pa + (i&1)) = *addrs++;
1471 if (i & 0x01) pa += 4;
1472 }
1473 }
1474 }
1475 outl(omr, DE4X5_OMR);
1476
1477 return;
1478 }
1479
1480
1481
1482
1483
1484 static void eisa_probe(struct device *dev, u_long ioaddr)
1485 {
1486 int i, maxSlots, status;
1487 u_short vendor, device;
1488 s32 cfid;
1489 u_long iobase;
1490 struct bus_type *lp = &bus;
1491 char name[DE4X5_STRLEN];
1492
1493 if (!ioaddr && autoprobed) return ;
1494 if ((ioaddr < 0x1000) && (ioaddr > 0)) return;
1495
1496 lp->bus = EISA;
1497
1498 if (ioaddr == 0) {
1499 iobase = EISA_SLOT_INC;
1500 i = 1;
1501 maxSlots = MAX_EISA_SLOTS;
1502 } else {
1503 iobase = ioaddr;
1504 i = (ioaddr >> 12);
1505 maxSlots = i + 1;
1506 }
1507
1508 for (status = -ENODEV; (i<maxSlots) && (dev!=NULL); i++, iobase+=EISA_SLOT_INC) {
1509 if (EISA_signature(name, EISA_ID)) {
1510 cfid = inl(PCI_CFID);
1511 device = (u_short)(cfid >> 16);
1512 vendor = (u_short) cfid;
1513
1514 lp->bus = EISA;
1515 lp->chipset = device;
1516 if (DevicePresent(EISA_APROM) == 0) {
1517
1518 outl(PCI_COMMAND_IO | PCI_COMMAND_MASTER, PCI_CFCS);
1519 outl(0x00004000, PCI_CFLT);
1520 outl(iobase, PCI_CBIO);
1521
1522 if (check_region(iobase, DE4X5_EISA_TOTAL_SIZE) == 0) {
1523 if ((dev = alloc_device(dev, iobase)) != NULL) {
1524 if ((status = de4x5_hw_init(dev, iobase)) == 0) {
1525 num_de4x5s++;
1526 }
1527 num_eth++;
1528 }
1529 } else if (autoprobed) {
1530 printk("%s: region already allocated at 0x%04lx.\n", dev->name, iobase);
1531 }
1532 }
1533 }
1534 }
1535
1536 return;
1537 }
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551 #define PCI_DEVICE (dev_num << 3)
1552 #define PCI_LAST_DEV 32
1553
1554 static void pci_probe(struct device *dev, u_long ioaddr)
1555 {
1556 u_char irq;
1557 u_char pb, pbus, dev_num, dnum, dev_fn;
1558 u_short vendor, device, index, status;
1559 u_int class = DE4X5_CLASS_CODE;
1560 u_int iobase;
1561 struct bus_type *lp = &bus;
1562
1563 if (!ioaddr && autoprobed) return ;
1564
1565 if (pcibios_present()) {
1566 lp->bus = PCI;
1567
1568 if (ioaddr < 0x1000) {
1569 pbus = (u_short)(ioaddr >> 8);
1570 dnum = (u_short)(ioaddr & 0xff);
1571 } else {
1572 pbus = 0;
1573 dnum = 0;
1574 }
1575
1576 for (index=0;
1577 (pcibios_find_class(class, index, &pb, &dev_fn)!= PCIBIOS_DEVICE_NOT_FOUND);
1578 index++) {
1579 dev_num = PCI_SLOT(dev_fn);
1580
1581 if ((!pbus && !dnum) || ((pbus == pb) && (dnum == dev_num))) {
1582 pcibios_read_config_word(pb, PCI_DEVICE, PCI_VENDOR_ID, &vendor);
1583 pcibios_read_config_word(pb, PCI_DEVICE, PCI_DEVICE_ID, &device);
1584 if (is_DC21040 || is_DC21041 || is_DC21140) {
1585
1586 lp->device = dev_num;
1587 lp->bus_num = pb;
1588
1589
1590 lp->chipset = device;
1591
1592
1593 pcibios_read_config_dword(pb, PCI_DEVICE, PCI_BASE_ADDRESS_0, &iobase);
1594 iobase &= CBIO_MASK;
1595
1596
1597 pcibios_read_config_byte(pb, PCI_DEVICE, PCI_INTERRUPT_LINE, &irq);
1598
1599
1600 pcibios_read_config_word(pb, PCI_DEVICE, PCI_COMMAND, &status);
1601 if (status & PCI_COMMAND_IO) {
1602 if (!(status & PCI_COMMAND_MASTER)) {
1603 status |= PCI_COMMAND_MASTER;
1604 pcibios_write_config_word(pb, PCI_DEVICE, PCI_COMMAND, status);
1605 pcibios_read_config_word(pb, PCI_DEVICE, PCI_COMMAND, &status);
1606 }
1607 if (status & PCI_COMMAND_MASTER) {
1608 if ((DevicePresent(DE4X5_APROM) == 0) || is_not_dec) {
1609 if (check_region(iobase, DE4X5_PCI_TOTAL_SIZE) == 0) {
1610 if ((dev = alloc_device(dev, iobase)) != NULL) {
1611 dev->irq = irq;
1612 if ((status = de4x5_hw_init(dev, iobase)) == 0) {
1613 num_de4x5s++;
1614 }
1615 num_eth++;
1616 }
1617 } else if (autoprobed) {
1618 printk("%s: region already allocated at 0x%04x.\n", dev->name, (u_short)iobase);
1619 }
1620 }
1621 }
1622 }
1623 }
1624 }
1625 }
1626 }
1627
1628 return;
1629 }
1630
1631
1632
1633
1634
1635 static struct device *alloc_device(struct device *dev, u_long iobase)
1636 {
1637 int addAutoProbe = 0;
1638 struct device *tmp = NULL, *ret;
1639 int (*init)(struct device *) = NULL;
1640
1641
1642
1643
1644 if (!loading_module) {
1645 while (dev->next != NULL) {
1646 if ((dev->base_addr == DE4X5_NDA) || (dev->base_addr == 0)) break;
1647 dev = dev->next;
1648 num_eth++;
1649 }
1650
1651
1652
1653
1654
1655 if ((dev->base_addr == 0) && (num_de4x5s > 0)) {
1656 addAutoProbe++;
1657 tmp = dev->next;
1658 init = dev->init;
1659 }
1660
1661
1662
1663
1664
1665 if ((dev->next == NULL) &&
1666 !((dev->base_addr == DE4X5_NDA) || (dev->base_addr == 0))){
1667 dev->next = (struct device *)kmalloc(sizeof(struct device) + 8,
1668 GFP_KERNEL);
1669
1670 dev = dev->next;
1671 if (dev == NULL) {
1672 printk("eth%d: Device not initialised, insufficient memory\n",
1673 num_eth);
1674 } else {
1675
1676
1677
1678
1679
1680 dev->name = (char *)(dev + sizeof(struct device));
1681 if (num_eth > 9999) {
1682 sprintf(dev->name,"eth????");
1683 } else {
1684 sprintf(dev->name,"eth%d", num_eth);
1685 }
1686 dev->base_addr = iobase;
1687 dev->next = NULL;
1688 dev->init = &de4x5_probe;
1689 num_de4x5s++;
1690 }
1691 }
1692 ret = dev;
1693
1694
1695
1696
1697
1698 if (ret != NULL) {
1699 if (addAutoProbe) {
1700 for (; (tmp->next!=NULL) && (tmp->base_addr!=DE4X5_NDA); tmp=tmp->next);
1701
1702
1703
1704
1705
1706 if ((tmp->next == NULL) && !(tmp->base_addr == DE4X5_NDA)) {
1707 tmp->next = (struct device *)kmalloc(sizeof(struct device) + 8,
1708 GFP_KERNEL);
1709 tmp = tmp->next;
1710 if (tmp == NULL) {
1711 printk("%s: Insufficient memory to extend the device list.\n",
1712 dev->name);
1713 } else {
1714
1715
1716
1717
1718
1719 tmp->name = (char *)(tmp + sizeof(struct device));
1720 if (num_eth > 9999) {
1721 sprintf(tmp->name,"eth????");
1722 } else {
1723 sprintf(tmp->name,"eth%d", num_eth);
1724 }
1725 tmp->base_addr = 0;
1726 tmp->next = NULL;
1727 tmp->init = init;
1728 }
1729 } else {
1730 tmp->base_addr = 0;
1731 }
1732 }
1733 }
1734 } else {
1735 ret = dev;
1736 }
1737
1738 return ret;
1739 }
1740
1741
1742
1743
1744
1745
1746
1747 static int autoconf_media(struct device *dev)
1748 {
1749 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1750 u_long iobase = dev->base_addr;
1751
1752 lp->tx_enable = YES;
1753 if (de4x5_debug > 0 ) {
1754 if (lp->chipset != DC21140) {
1755 printk("%s: Searching for media... ",dev->name);
1756 } else {
1757 printk("%s: Searching for mode... ",dev->name);
1758 }
1759 }
1760
1761 if (lp->chipset == DC21040) {
1762 lp->media = (lp->autosense == AUTO ? TP : lp->autosense);
1763 dc21040_autoconf(dev);
1764 } else if (lp->chipset == DC21041) {
1765 lp->media = (lp->autosense == AUTO ? TP_NW : lp->autosense);
1766 dc21041_autoconf(dev);
1767 } else if (lp->chipset == DC21140) {
1768 disable_ast(dev);
1769 lp->media = (lp->autosense == AUTO ? _10Mb : lp->autosense);
1770 dc21140_autoconf(dev);
1771 }
1772
1773 if (de4x5_debug > 0 ) {
1774 if (lp->chipset != DC21140) {
1775 printk("media is %s\n", (lp->media == NC ? "unconnected!" :
1776 (lp->media == TP ? "TP." :
1777 (lp->media == ANS ? "TP/Nway." :
1778 (lp->media == BNC ? "BNC." :
1779 (lp->media == AUI ? "AUI." :
1780 "BNC/AUI."
1781 ))))));
1782 } else {
1783 printk("mode is %s\n",(lp->media == NC ? "link down.":
1784 (lp->media == _100Mb ? "100Mb/s." :
1785 (lp->media == _10Mb ? "10Mb/s." :
1786 "\?\?\?"
1787 ))));
1788 }
1789 }
1790
1791 if (lp->media) {
1792 lp->lostMedia = 0;
1793 inl(DE4X5_MFC);
1794 if ((lp->media == TP) || (lp->media == ANS)) {
1795 lp->irq_mask |= IMR_LFM;
1796 }
1797 }
1798 dce_ms_delay(10);
1799
1800 return (lp->media);
1801 }
1802
1803 static void dc21040_autoconf(struct device *dev)
1804 {
1805 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1806 u_long iobase = dev->base_addr;
1807 int i, linkBad;
1808 s32 sisr = 0, t_3s = 3000;
1809
1810 switch (lp->media) {
1811 case TP:
1812 reset_init_sia(dev, 0x8f01, 0xffff, 0x0000);
1813 for (linkBad=1,i=0;(i<t_3s) && linkBad && !(sisr & SISR_NCR);i++) {
1814 if (((sisr = inl(DE4X5_SISR)) & SISR_LKF) == 0) linkBad = 0;
1815 dce_ms_delay(1);
1816 }
1817 if (linkBad && (lp->autosense == AUTO)) {
1818 lp->media = BNC_AUI;
1819 dc21040_autoconf(dev);
1820 }
1821 break;
1822
1823 case BNC:
1824 case AUI:
1825 case BNC_AUI:
1826 reset_init_sia(dev, 0x8f09, 0x0705, 0x0006);
1827 dce_ms_delay(500);
1828 linkBad = ping_media(dev);
1829 if (linkBad && (lp->autosense == AUTO)) {
1830 lp->media = NC;
1831 dc21040_autoconf(dev);
1832 }
1833 break;
1834
1835 case NC:
1836 #ifdef i386
1837 reset_init_sia(dev, 0x8f01, 0xffff, 0x0000);
1838 break;
1839 #else
1840
1841 reset_init_sia(dev, 0x8f09, 0x0705, 0x0006);
1842 #endif
1843 }
1844
1845 return;
1846 }
1847
1848
1849
1850
1851
1852
1853
1854 static void dc21041_autoconf(struct device *dev)
1855 {
1856 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1857 u_long iobase = dev->base_addr;
1858 s32 sts, irqs, irq_mask, omr;
1859
1860 switch (lp->media) {
1861 case TP_NW:
1862 omr = inl(DE4X5_OMR);
1863 outl(omr | OMR_FD, DE4X5_OMR);
1864 irqs = STS_LNF | STS_LNP;
1865 irq_mask = IMR_LFM | IMR_LPM;
1866 sts = test_media(dev, irqs, irq_mask, 0xef01, 0xffff, 0x0008, 2400);
1867 if (sts & STS_LNP) {
1868 lp->media = ANS;
1869 } else {
1870 lp->media = AUI;
1871 }
1872 dc21041_autoconf(dev);
1873 break;
1874
1875 case ANS:
1876 irqs = STS_LNP;
1877 irq_mask = IMR_LPM;
1878 sts = test_ans(dev, irqs, irq_mask, 3000);
1879 if (!(sts & STS_LNP) && (lp->autosense == AUTO)) {
1880 lp->media = TP;
1881 dc21041_autoconf(dev);
1882 }
1883 break;
1884
1885 case TP:
1886 omr = inl(DE4X5_OMR);
1887 outl(omr & ~OMR_FD, DE4X5_OMR);
1888 irqs = STS_LNF | STS_LNP;
1889 irq_mask = IMR_LFM | IMR_LPM;
1890 sts = test_media(dev, irqs, irq_mask, 0xef01, 0xff3f, 0x0008, 2400);
1891 if (!(sts & STS_LNP) && (lp->autosense == AUTO)) {
1892 if (inl(DE4X5_SISR) & SISR_NRA) {
1893 lp->media = AUI;
1894 } else {
1895 lp->media = BNC;
1896 }
1897 dc21041_autoconf(dev);
1898 }
1899 break;
1900
1901 case AUI:
1902 omr = inl(DE4X5_OMR);
1903 outl(omr & ~OMR_FD, DE4X5_OMR);
1904 irqs = 0;
1905 irq_mask = 0;
1906 sts = test_media(dev, irqs, irq_mask, 0xef09, 0xf7fd, 0x000e, 1000);
1907 if (!(inl(DE4X5_SISR) & SISR_SRA) && (lp->autosense == AUTO)) {
1908 lp->media = BNC;
1909 dc21041_autoconf(dev);
1910 }
1911 break;
1912
1913 case BNC:
1914 omr = inl(DE4X5_OMR);
1915 outl(omr & ~OMR_FD, DE4X5_OMR);
1916 irqs = 0;
1917 irq_mask = 0;
1918 sts = test_media(dev, irqs, irq_mask, 0xef09, 0xf7fd, 0x0006, 1000);
1919 if (!(inl(DE4X5_SISR) & SISR_SRA) && (lp->autosense == AUTO)) {
1920 lp->media = NC;
1921 } else {
1922 if (ping_media(dev)) lp->media = NC;
1923 }
1924 break;
1925
1926 case NC:
1927 omr = inl(DE4X5_OMR);
1928 outl(omr | OMR_FD, DE4X5_OMR);
1929 reset_init_sia(dev, 0xef01, 0xffff, 0x0008);
1930 break;
1931 }
1932
1933 return;
1934 }
1935
1936
1937
1938
1939 static void dc21140_autoconf(struct device *dev)
1940 {
1941 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1942 u_long iobase = dev->base_addr;
1943 s32 omr;
1944
1945 switch(lp->media) {
1946 case _100Mb:
1947 omr = (inl(DE4X5_OMR) & ~(OMR_PS | OMR_HBD | OMR_TTM | OMR_PCS | OMR_SCR));
1948 omr |= (de4x5_full_duplex ? OMR_FD : 0);
1949 outl(omr | OMR_PS | OMR_HBD | OMR_PCS | OMR_SCR, DE4X5_OMR);
1950 outl(GEP_FDXD | GEP_MODE, DE4X5_GEP);
1951 break;
1952
1953 case _10Mb:
1954 omr = (inl(DE4X5_OMR) & ~(OMR_PS | OMR_HBD | OMR_TTM | OMR_PCS | OMR_SCR));
1955 omr |= (de4x5_full_duplex ? OMR_FD : 0);
1956 outl(omr | OMR_TTM, DE4X5_OMR);
1957 outl(GEP_FDXD, DE4X5_GEP);
1958 break;
1959 }
1960
1961 return;
1962 }
1963
1964 static int
1965 test_media(struct device *dev, s32 irqs, s32 irq_mask, s32 csr13, s32 csr14, s32 csr15, s32 msec)
1966 {
1967 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1968 u_long iobase = dev->base_addr;
1969 s32 sts, time, csr12;
1970
1971 reset_init_sia(dev, csr13, csr14, csr15);
1972
1973
1974 load_ms_timer(dev, msec);
1975
1976
1977 sts = inl(DE4X5_STS);
1978 outl(sts, DE4X5_STS);
1979
1980
1981 csr12 = inl(DE4X5_SISR);
1982 outl(csr12, DE4X5_SISR);
1983
1984
1985 do {
1986 time = inl(DE4X5_GPT) & GPT_VAL;
1987 sts = inl(DE4X5_STS);
1988 } while ((time != 0) && !(sts & irqs));
1989
1990 sts = inl(DE4X5_STS);
1991
1992 return sts;
1993 }
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017 static int ping_media(struct device *dev)
2018 {
2019 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2020 u_long iobase = dev->base_addr;
2021 int i, entry, linkBad;
2022 s32 omr, t_3s = 4000;
2023 char frame[64];
2024
2025 create_packet(dev, frame, sizeof(frame));
2026
2027 entry = lp->tx_new;
2028 load_packet(dev, frame, TD_LS | TD_FS | sizeof(frame),NULL);
2029
2030 omr = inl(DE4X5_OMR);
2031 outl(omr|OMR_ST, DE4X5_OMR);
2032
2033 lp->tx_new = (++lp->tx_new) % lp->txRingSize;
2034 lp->tx_old = lp->tx_new;
2035
2036
2037 for (linkBad=1,i=0;(i<t_3s) && linkBad;i++) {
2038 if ((inl(DE4X5_SISR) & SISR_NCR) == 1) break;
2039 if (lp->tx_ring[entry].status >= 0) linkBad=0;
2040 dce_ms_delay(1);
2041 }
2042 outl(omr, DE4X5_OMR);
2043
2044 return ((linkBad || (lp->tx_ring[entry].status & TD_ES)) ? 1 : 0);
2045 }
2046
2047
2048
2049
2050
2051 static int test_ans(struct device *dev, s32 irqs, s32 irq_mask, s32 msec)
2052 {
2053 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2054 u_long iobase = dev->base_addr;
2055 s32 sts, ans;
2056
2057 outl(irq_mask, DE4X5_IMR);
2058
2059
2060 load_ms_timer(dev, msec);
2061
2062
2063 sts = inl(DE4X5_STS);
2064 outl(sts, DE4X5_STS);
2065
2066
2067 do {
2068 ans = inl(DE4X5_SISR) & SISR_ANS;
2069 sts = inl(DE4X5_STS);
2070 } while (!(sts & irqs) && (ans ^ ANS_NWOK) != 0);
2071
2072 return ((sts & STS_LNP) && ((ans ^ ANS_NWOK) == 0) ? STS_LNP : 0);
2073 }
2074
2075
2076
2077
2078 static void reset_init_sia(struct device *dev, s32 sicr, s32 strr, s32 sigr)
2079 {
2080 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2081 u_long iobase = dev->base_addr;
2082
2083 RESET_SIA;
2084 outl(sigr, DE4X5_SIGR);
2085 outl(strr, DE4X5_STRR);
2086 outl(sicr, DE4X5_SICR);
2087
2088 return;
2089 }
2090
2091
2092
2093
2094 static void load_ms_timer(struct device *dev, u32 msec)
2095 {
2096 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2097 u_long iobase = dev->base_addr;
2098 s32 i = 2048, j;
2099
2100 if (lp->chipset == DC21140) {
2101 j = inl(DE4X5_OMR);
2102 if ((j & OMR_TTM) && (j & OMR_PS)) {
2103 i = 8192;
2104 } else if ((~j & OMR_TTM) && (j & OMR_PS)) {
2105 i = 819;
2106 }
2107 }
2108
2109 outl((s32)(msec * 10000)/i, DE4X5_GPT);
2110
2111 return;
2112 }
2113
2114
2115
2116
2117 static void create_packet(struct device *dev, char *frame, int len)
2118 {
2119 int i;
2120 char *buf = frame;
2121
2122 for (i=0; i<ETH_ALEN; i++) {
2123 *buf++ = dev->dev_addr[i];
2124 }
2125 for (i=0; i<ETH_ALEN; i++) {
2126 *buf++ = dev->dev_addr[i];
2127 }
2128
2129 *buf++ = 0;
2130 *buf++ = 1;
2131
2132 return;
2133 }
2134
2135
2136
2137
2138 static void dce_us_delay(u32 usec)
2139 {
2140 udelay(usec);
2141
2142 return;
2143 }
2144
2145
2146
2147
2148 static void dce_ms_delay(u32 msec)
2149 {
2150 u_int i;
2151
2152 for (i=0; i<msec; i++) {
2153 dce_us_delay(1000);
2154 }
2155
2156 return;
2157 }
2158
2159
2160
2161
2162
2163 static int EISA_signature(char *name, s32 eisa_id)
2164 {
2165 u_int i;
2166 char *signatures[] = DE4X5_SIGNATURE;
2167 char ManCode[DE4X5_STRLEN];
2168 union {
2169 s32 ID;
2170 char Id[4];
2171 } Eisa;
2172 int status = 0;
2173
2174 *name = '\0';
2175 Eisa.ID = inl(eisa_id);
2176
2177 ManCode[0]=(((Eisa.Id[0]>>2)&0x1f)+0x40);
2178 ManCode[1]=(((Eisa.Id[1]&0xe0)>>5)+((Eisa.Id[0]&0x03)<<3)+0x40);
2179 ManCode[2]=(((Eisa.Id[2]>>4)&0x0f)+0x30);
2180 ManCode[3]=((Eisa.Id[2]&0x0f)+0x30);
2181 ManCode[4]=(((Eisa.Id[3]>>4)&0x0f)+0x30);
2182 ManCode[5]='\0';
2183
2184 for (i=0;(*signatures[i] != '\0') && (*name == '\0');i++) {
2185 if (strstr(ManCode, signatures[i]) != NULL) {
2186 strcpy(name,ManCode);
2187 status = 1;
2188 }
2189 }
2190
2191 return status;
2192 }
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206 static int DevicePresent(u_long aprom_addr)
2207 {
2208 union {
2209 struct {
2210 u32 a;
2211 u32 b;
2212 } llsig;
2213 char Sig[sizeof(u32) << 1];
2214 } dev;
2215 char data;
2216 int i, j, tmp, status = 0;
2217 short sigLength;
2218 struct bus_type *lp = &bus;
2219
2220 dev.llsig.a = ETH_PROM_SIG;
2221 dev.llsig.b = ETH_PROM_SIG;
2222 sigLength = sizeof(u32) << 1;
2223
2224 if (lp->chipset == DC21040) {
2225 for (i=0,j=0;(j<sigLength) && (i<PROBE_LENGTH+sigLength-1);i++) {
2226 if (lp->bus == PCI) {
2227 while ((tmp = inl(aprom_addr)) < 0);
2228 data = (char)tmp;
2229 } else {
2230 data = inb(aprom_addr);
2231 }
2232 if (dev.Sig[j] == data) {
2233 j++;
2234 } else {
2235 if (data == dev.Sig[0]) {
2236 j=1;
2237 } else {
2238 j=0;
2239 }
2240 }
2241 }
2242
2243 if (j!=sigLength) {
2244 status = -ENODEV;
2245 }
2246
2247 } else {
2248 short *p = (short *)&lp->srom;
2249 for (i=0; i<(sizeof(struct de4x5_srom)>>1); i++) {
2250 *p++ = srom_rd(aprom_addr, i);
2251 }
2252 }
2253
2254 return status;
2255 }
2256
2257 static int get_hw_addr(struct device *dev)
2258 {
2259 u_long iobase = dev->base_addr;
2260 int i, k, tmp, status = 0;
2261 u_short j,chksum;
2262 struct bus_type *lp = &bus;
2263
2264 for (i=0,k=0,j=0;j<3;j++) {
2265 k <<= 1 ;
2266 if (k > 0xffff) k-=0xffff;
2267
2268 if (lp->bus == PCI) {
2269 if (lp->chipset == DC21040) {
2270 while ((tmp = inl(DE4X5_APROM)) < 0);
2271 k += (u_char) tmp;
2272 dev->dev_addr[i++] = (u_char) tmp;
2273 while ((tmp = inl(DE4X5_APROM)) < 0);
2274 k += (u_short) (tmp << 8);
2275 dev->dev_addr[i++] = (u_char) tmp;
2276 } else {
2277 dev->dev_addr[i] = (u_char) lp->srom.ieee_addr[i]; i++;
2278 dev->dev_addr[i] = (u_char) lp->srom.ieee_addr[i]; i++;
2279 }
2280 } else {
2281 k += (u_char) (tmp = inb(EISA_APROM));
2282 dev->dev_addr[i++] = (u_char) tmp;
2283 k += (u_short) ((tmp = inb(EISA_APROM)) << 8);
2284 dev->dev_addr[i++] = (u_char) tmp;
2285 }
2286
2287 if (k > 0xffff) k-=0xffff;
2288 }
2289 if (k == 0xffff) k=0;
2290
2291 if (lp->bus == PCI) {
2292 if (lp->chipset == DC21040) {
2293 while ((tmp = inl(DE4X5_APROM)) < 0);
2294 chksum = (u_char) tmp;
2295 while ((tmp = inl(DE4X5_APROM)) < 0);
2296 chksum |= (u_short) (tmp << 8);
2297 if (k != chksum) status = -1;
2298 }
2299 } else {
2300 chksum = (u_char) inb(EISA_APROM);
2301 chksum |= (u_short) (inb(EISA_APROM) << 8);
2302 if (k != chksum) status = -1;
2303 }
2304
2305
2306 return status;
2307 }
2308
2309
2310
2311
2312 static short srom_rd(u_long addr, u_char offset)
2313 {
2314 sendto_srom(SROM_RD | SROM_SR, addr);
2315
2316 srom_latch(SROM_RD | SROM_SR | DT_CS, addr);
2317 srom_command(SROM_RD | SROM_SR | DT_IN | DT_CS, addr);
2318 srom_address(SROM_RD | SROM_SR | DT_CS, addr, offset);
2319
2320 return srom_data(SROM_RD | SROM_SR | DT_CS, addr);
2321 }
2322
2323 static void srom_latch(u_int command, u_long addr)
2324 {
2325 sendto_srom(command, addr);
2326 sendto_srom(command | DT_CLK, addr);
2327 sendto_srom(command, addr);
2328
2329 return;
2330 }
2331
2332 static void srom_command(u_int command, u_long addr)
2333 {
2334 srom_latch(command, addr);
2335 srom_latch(command, addr);
2336 srom_latch((command & 0x0000ff00) | DT_CS, addr);
2337
2338 return;
2339 }
2340
2341 static void srom_address(u_int command, u_long addr, u_char offset)
2342 {
2343 int i;
2344 char a;
2345
2346 a = (char)(offset << 2);
2347 for (i=0; i<6; i++, a <<= 1) {
2348 srom_latch(command | ((a < 0) ? DT_IN : 0), addr);
2349 }
2350 dce_us_delay(1);
2351
2352 i = (getfrom_srom(addr) >> 3) & 0x01;
2353 if (i != 0) {
2354 printk("Bad SROM address phase.....\n");
2355
2356 }
2357
2358 return;
2359 }
2360
2361 static short srom_data(u_int command, u_long addr)
2362 {
2363 int i;
2364 short word = 0;
2365 s32 tmp;
2366
2367 for (i=0; i<16; i++) {
2368 sendto_srom(command | DT_CLK, addr);
2369 tmp = getfrom_srom(addr);
2370 sendto_srom(command, addr);
2371
2372 word = (word << 1) | ((tmp >> 3) & 0x01);
2373 }
2374
2375 sendto_srom(command & 0x0000ff00, addr);
2376
2377 return word;
2378 }
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395 static void sendto_srom(u_int command, u_long addr)
2396 {
2397 outl(command, addr);
2398 dce_us_delay(1);
2399
2400 return;
2401 }
2402
2403 static int getfrom_srom(u_long addr)
2404 {
2405 s32 tmp;
2406
2407 tmp = inl(addr);
2408 dce_us_delay(1);
2409
2410 return tmp;
2411 }
2412
2413 static char *build_setup_frame(struct device *dev, int mode)
2414 {
2415 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2416 int i;
2417 char *pa = lp->setup_frame;
2418
2419
2420 if (mode == ALL) {
2421 memset(lp->setup_frame, 0, SETUP_FRAME_LEN);
2422 }
2423
2424 if (lp->setup_f == HASH_PERF) {
2425 for (pa=lp->setup_frame+IMPERF_PA_OFFSET, i=0; i<ETH_ALEN; i++) {
2426 *(pa + i) = dev->dev_addr[i];
2427 if (i & 0x01) pa += 2;
2428 }
2429 *(lp->setup_frame + (HASH_TABLE_LEN >> 3) - 3) = 0x80;
2430 } else {
2431 for (i=0; i<ETH_ALEN; i++) {
2432 *(pa + (i&1)) = dev->dev_addr[i];
2433 if (i & 0x01) pa += 4;
2434 }
2435 for (i=0; i<ETH_ALEN; i++) {
2436 *(pa + (i&1)) = (char) 0xff;
2437 if (i & 0x01) pa += 4;
2438 }
2439 }
2440
2441 return pa;
2442 }
2443
2444 static void enable_ast(struct device *dev, u32 time_out)
2445 {
2446 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2447 u_long iobase = dev->base_addr;
2448
2449 lp->irq_mask |= IMR_TMM;
2450 outl(lp->irq_mask, DE4X5_IMR);
2451 load_ms_timer(dev, time_out);
2452
2453 return;
2454 }
2455
2456 static void disable_ast(struct device *dev)
2457 {
2458 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2459 u_long iobase = dev->base_addr;
2460
2461 lp->irq_mask &= ~IMR_TMM;
2462 outl(lp->irq_mask, DE4X5_IMR);
2463 load_ms_timer(dev, 0);
2464
2465 return;
2466 }
2467
2468 static void kick_tx(struct device *dev)
2469 {
2470 struct sk_buff *skb;
2471
2472 if ((skb = alloc_skb(0, GFP_ATOMIC)) != NULL) {
2473 skb->len= FAKE_FRAME_LEN;
2474 skb->arp=1;
2475 skb->dev=dev;
2476 dev_queue_xmit(skb, dev, SOPRI_NORMAL);
2477 }
2478
2479 return;
2480 }
2481
2482
2483
2484
2485
2486 static int de4x5_ioctl(struct device *dev, struct ifreq *rq, int cmd)
2487 {
2488 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2489 struct de4x5_ioctl *ioc = (struct de4x5_ioctl *) &rq->ifr_data;
2490 u_long iobase = dev->base_addr;
2491 int i, j, status = 0;
2492 s32 omr;
2493 union {
2494 u8 addr[(HASH_TABLE_LEN * ETH_ALEN)];
2495 u16 sval[(HASH_TABLE_LEN * ETH_ALEN) >> 1];
2496 u32 lval[(HASH_TABLE_LEN * ETH_ALEN) >> 2];
2497 } tmp;
2498
2499 switch(ioc->cmd) {
2500 case DE4X5_GET_HWADDR:
2501 ioc->len = ETH_ALEN;
2502 status = verify_area(VERIFY_WRITE, (void *)ioc->data, ioc->len);
2503 if (status)
2504 break;
2505 for (i=0; i<ETH_ALEN; i++) {
2506 tmp.addr[i] = dev->dev_addr[i];
2507 }
2508 memcpy_tofs(ioc->data, tmp.addr, ioc->len);
2509
2510 break;
2511 case DE4X5_SET_HWADDR:
2512 status = verify_area(VERIFY_READ, (void *)ioc->data, ETH_ALEN);
2513 if (status)
2514 break;
2515 status = -EPERM;
2516 if (!suser())
2517 break;
2518 status = 0;
2519 memcpy_fromfs(tmp.addr, ioc->data, ETH_ALEN);
2520 for (i=0; i<ETH_ALEN; i++) {
2521 dev->dev_addr[i] = tmp.addr[i];
2522 }
2523 build_setup_frame(dev, PHYS_ADDR_ONLY);
2524
2525 while (set_bit(0, (void *)&dev->tbusy) != 0);
2526 if (lp->setup_f == HASH_PERF) {
2527 load_packet(dev, lp->setup_frame, TD_IC | HASH_F | TD_SET |
2528 SETUP_FRAME_LEN, NULL);
2529 } else {
2530 load_packet(dev, lp->setup_frame, TD_IC | PERFECT_F | TD_SET |
2531 SETUP_FRAME_LEN, NULL);
2532 }
2533 lp->tx_new = (++lp->tx_new) % lp->txRingSize;
2534 outl(POLL_DEMAND, DE4X5_TPD);
2535 dev->tbusy = 0;
2536
2537 break;
2538 case DE4X5_SET_PROM:
2539 if (suser()) {
2540 omr = inl(DE4X5_OMR);
2541 omr |= OMR_PR;
2542 outl(omr, DE4X5_OMR);
2543 } else {
2544 status = -EPERM;
2545 }
2546
2547 break;
2548 case DE4X5_CLR_PROM:
2549 if (suser()) {
2550 omr = inl(DE4X5_OMR);
2551 omr &= ~OMR_PR;
2552 outb(omr, DE4X5_OMR);
2553 } else {
2554 status = -EPERM;
2555 }
2556
2557 break;
2558 case DE4X5_SAY_BOO:
2559 printk("%s: Boo!\n", dev->name);
2560
2561 break;
2562 case DE4X5_GET_MCA:
2563 ioc->len = (HASH_TABLE_LEN >> 3);
2564 status = verify_area(VERIFY_WRITE, ioc->data, ioc->len);
2565 if (status)
2566 break;
2567 memcpy_tofs(ioc->data, lp->setup_frame, ioc->len);
2568
2569 break;
2570 case DE4X5_SET_MCA:
2571 if (suser()) {
2572 if (ioc->len != HASH_TABLE_LEN) {
2573 if (!(status = verify_area(VERIFY_READ, (void *)ioc->data, ETH_ALEN * ioc->len))) {
2574 memcpy_fromfs(tmp.addr, ioc->data, ETH_ALEN * ioc->len);
2575 set_multicast_list(dev, ioc->len, tmp.addr);
2576 }
2577 } else {
2578 set_multicast_list(dev, ioc->len, NULL);
2579 }
2580 } else {
2581 status = -EPERM;
2582 }
2583
2584 break;
2585 case DE4X5_CLR_MCA:
2586 if (suser()) {
2587 set_multicast_list(dev, 0, NULL);
2588 } else {
2589 status = -EPERM;
2590 }
2591
2592 break;
2593 case DE4X5_MCA_EN:
2594 if (suser()) {
2595 omr = inl(DE4X5_OMR);
2596 omr |= OMR_PM;
2597 outl(omr, DE4X5_OMR);
2598 } else {
2599 status = -EPERM;
2600 }
2601
2602 break;
2603 case DE4X5_GET_STATS:
2604 ioc->len = sizeof(lp->pktStats);
2605 status = verify_area(VERIFY_WRITE, (void *)ioc->data, ioc->len);
2606 if (status)
2607 break;
2608
2609 cli();
2610 memcpy_tofs(ioc->data, &lp->pktStats, ioc->len);
2611 sti();
2612
2613 break;
2614 case DE4X5_CLR_STATS:
2615 if (suser()) {
2616 cli();
2617 memset(&lp->pktStats, 0, sizeof(lp->pktStats));
2618 sti();
2619 } else {
2620 status = -EPERM;
2621 }
2622
2623 break;
2624 case DE4X5_GET_OMR:
2625 tmp.addr[0] = inl(DE4X5_OMR);
2626 if (!(status = verify_area(VERIFY_WRITE, (void *)ioc->data, 1))) {
2627 memcpy_tofs(ioc->data, tmp.addr, 1);
2628 }
2629
2630 break;
2631 case DE4X5_SET_OMR:
2632 if (suser()) {
2633 if (!(status = verify_area(VERIFY_READ, (void *)ioc->data, 1))) {
2634 memcpy_fromfs(tmp.addr, ioc->data, 1);
2635 outl(tmp.addr[0], DE4X5_OMR);
2636 }
2637 } else {
2638 status = -EPERM;
2639 }
2640
2641 break;
2642 case DE4X5_GET_REG:
2643 j = 0;
2644 tmp.lval[0] = inl(DE4X5_STS); j+=4;
2645 tmp.lval[1] = inl(DE4X5_BMR); j+=4;
2646 tmp.lval[2] = inl(DE4X5_IMR); j+=4;
2647 tmp.lval[3] = inl(DE4X5_OMR); j+=4;
2648 tmp.lval[4] = inl(DE4X5_SISR); j+=4;
2649 tmp.lval[5] = inl(DE4X5_SICR); j+=4;
2650 tmp.lval[6] = inl(DE4X5_STRR); j+=4;
2651 tmp.lval[7] = inl(DE4X5_SIGR); j+=4;
2652 ioc->len = j;
2653 if (!(status = verify_area(VERIFY_WRITE, (void *)ioc->data, ioc->len))) {
2654 memcpy_tofs(ioc->data, tmp.addr, ioc->len);
2655 }
2656 break;
2657
2658 #define DE4X5_DUMP 0x0f
2659
2660 case DE4X5_DUMP:
2661 j = 0;
2662 tmp.addr[j++] = dev->irq;
2663 for (i=0; i<ETH_ALEN; i++) {
2664 tmp.addr[j++] = dev->dev_addr[i];
2665 }
2666 tmp.addr[j++] = lp->rxRingSize;
2667 tmp.lval[j>>2] = (long)lp->rx_ring; j+=4;
2668 tmp.lval[j>>2] = (long)lp->tx_ring; j+=4;
2669
2670 for (i=0;i<lp->rxRingSize-1;i++){
2671 if (i < 3) {
2672 tmp.lval[j>>2] = (long)&lp->rx_ring[i].status; j+=4;
2673 }
2674 }
2675 tmp.lval[j>>2] = (long)&lp->rx_ring[i].status; j+=4;
2676 for (i=0;i<lp->txRingSize-1;i++){
2677 if (i < 3) {
2678 tmp.lval[j>>2] = (long)&lp->tx_ring[i].status; j+=4;
2679 }
2680 }
2681 tmp.lval[j>>2] = (long)&lp->tx_ring[i].status; j+=4;
2682
2683 for (i=0;i<lp->rxRingSize-1;i++){
2684 if (i < 3) {
2685 tmp.lval[j>>2] = (s32)lp->rx_ring[i].buf; j+=4;
2686 }
2687 }
2688 tmp.lval[j>>2] = (s32)lp->rx_ring[i].buf; j+=4;
2689 for (i=0;i<lp->txRingSize-1;i++){
2690 if (i < 3) {
2691 tmp.lval[j>>2] = (s32)lp->tx_ring[i].buf; j+=4;
2692 }
2693 }
2694 tmp.lval[j>>2] = (s32)lp->tx_ring[i].buf; j+=4;
2695
2696 for (i=0;i<lp->rxRingSize;i++){
2697 tmp.lval[j>>2] = lp->rx_ring[i].status; j+=4;
2698 }
2699 for (i=0;i<lp->txRingSize;i++){
2700 tmp.lval[j>>2] = lp->tx_ring[i].status; j+=4;
2701 }
2702
2703 tmp.lval[j>>2] = inl(DE4X5_STS); j+=4;
2704 tmp.lval[j>>2] = inl(DE4X5_BMR); j+=4;
2705 tmp.lval[j>>2] = inl(DE4X5_IMR); j+=4;
2706 tmp.lval[j>>2] = inl(DE4X5_OMR); j+=4;
2707 tmp.lval[j>>2] = inl(DE4X5_SISR); j+=4;
2708 tmp.lval[j>>2] = inl(DE4X5_SICR); j+=4;
2709 tmp.lval[j>>2] = inl(DE4X5_STRR); j+=4;
2710 tmp.lval[j>>2] = inl(DE4X5_SIGR); j+=4;
2711
2712 tmp.addr[j++] = lp->txRingSize;
2713 tmp.addr[j++] = dev->tbusy;
2714
2715 ioc->len = j;
2716 if (!(status = verify_area(VERIFY_WRITE, (void *)ioc->data, ioc->len))) {
2717 memcpy_tofs(ioc->data, tmp.addr, ioc->len);
2718 }
2719
2720 break;
2721 default:
2722 status = -EOPNOTSUPP;
2723 }
2724
2725 return status;
2726 }
2727
2728 #ifdef MODULE
2729 char kernel_version[] = UTS_RELEASE;
2730 static struct device thisDE4X5 = {
2731 " ",
2732 0, 0, 0, 0,
2733 0x2000, 10,
2734 0, 0, 0, NULL, de4x5_probe };
2735
2736 static int io=0x000b;
2737 static int irq=10;
2738
2739 int
2740 init_module(void)
2741 {
2742 thisDE4X5.base_addr=io;
2743 thisDE4X5.irq=irq;
2744 if (register_netdev(&thisDE4X5) != 0)
2745 return -EIO;
2746 return 0;
2747 }
2748
2749 void
2750 cleanup_module(void)
2751 {
2752 struct de4x5_private *lp = (struct de4x5_private *) thisDE4X5.priv;
2753
2754 if (MOD_IN_USE) {
2755 printk("%s: device busy, remove delayed\n",thisDE4X5.name);
2756 } else {
2757 if (lp) {
2758 kfree_s(bus_to_virt(lp->rx_ring[0].buf), RX_BUFF_SZ * NUM_RX_DESC + ALIGN);
2759 }
2760 kfree_s(thisDE4X5.priv, sizeof(struct de4x5_private) + ALIGN);
2761 thisDE4X5.priv = NULL;
2762
2763 release_region(thisDE4X5.base_addr, (lp->bus == PCI ?
2764 DE4X5_PCI_TOTAL_SIZE :
2765 DE4X5_EISA_TOTAL_SIZE));
2766 unregister_netdev(&thisDE4X5);
2767 }
2768 }
2769 #endif
2770
2771
2772
2773
2774
2775
2776
2777
2778
2779
2780