This source file includes following definitions.
- de4x5_probe
- de4x5_hw_init
- de4x5_open
- de4x5_init
- de4x5_queue_pkt
- de4x5_interrupt
- de4x5_rx
- de4x5_tx
- de4x5_ast
- de4x5_close
- de4x5_get_stats
- load_packet
- set_multicast_list
- SetMulticastFilter
- eisa_probe
- pci_probe
- alloc_device
- autoconf_media
- dc21040_autoconf
- dc21041_autoconf
- dc21140_autoconf
- test_media
- ping_media
- test_ans
- reset_init_sia
- load_ms_timer
- create_packet
- dce_us_delay
- dce_ms_delay
- EISA_signature
- DevicePresent
- get_hw_addr
- srom_rd
- srom_latch
- srom_command
- srom_address
- srom_data
- sendto_srom
- getfrom_srom
- build_setup_frame
- enable_ast
- disable_ast
- kick_tx
- de4x5_ioctl
- init_module
- cleanup_module
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144 static const char *version = "de4x5.c:v0.32 6/26/95 davies@wanton.lkg.dec.com\n";
145
146 #include <linux/config.h>
147 #ifdef MODULE
148 #include <linux/module.h>
149 #include <linux/version.h>
150 #else
151 #define MOD_INC_USE_COUNT
152 #define MOD_DEC_USE_COUNT
153 #endif
154
155 #include <linux/kernel.h>
156 #include <linux/sched.h>
157 #include <linux/string.h>
158 #include <linux/interrupt.h>
159 #include <linux/ptrace.h>
160 #include <linux/errno.h>
161 #include <linux/ioport.h>
162 #include <linux/malloc.h>
163 #include <linux/bios32.h>
164 #include <linux/pci.h>
165 #include <linux/delay.h>
166 #include <asm/bitops.h>
167 #include <asm/io.h>
168 #include <asm/dma.h>
169 #include <asm/segment.h>
170
171 #include <linux/netdevice.h>
172 #include <linux/etherdevice.h>
173 #include <linux/skbuff.h>
174
175 #include <linux/time.h>
176 #include <linux/types.h>
177 #include <linux/unistd.h>
178
179 #include "de4x5.h"
180
181 #ifdef DE4X5_DEBUG
182 static int de4x5_debug = DE4X5_DEBUG;
183 #else
184 static int de4x5_debug = 1;
185 #endif
186
187 #ifdef DE4X5_AUTOSENSE
188 static int de4x5_autosense = DE4X5_AUTOSENSE;
189 #else
190 static int de4x5_autosense = AUTO;
191 #endif
192
193 #ifdef DE4X5_FULL_DUPLEX
194 static s32 de4x5_full_duplex = 1;
195 #else
196 static s32 de4x5_full_duplex = 0;
197 #endif
198
199 #define DE4X5_NDA 0xffe0
200
201
202
203
204 #define PROBE_LENGTH 32
205 #define ETH_PROM_SIG 0xAA5500FFUL
206
207
208
209
210 #define PKT_BUF_SZ 1536
211 #define MAX_PKT_SZ 1514
212 #define MAX_DAT_SZ 1500
213 #define MIN_DAT_SZ 1
214 #define PKT_HDR_LEN 14
215 #define FAKE_FRAME_LEN (MAX_PKT_SZ + 1)
216 #define QUEUE_PKT_TIMEOUT (3*HZ)
217
218
219 #define CRC_POLYNOMIAL_BE 0x04c11db7UL
220 #define CRC_POLYNOMIAL_LE 0xedb88320UL
221
222
223
224
225 #define DE4X5_EISA_IO_PORTS 0x0c00
226 #define DE4X5_EISA_TOTAL_SIZE 0xfff
227
228 #define MAX_EISA_SLOTS 16
229 #define EISA_SLOT_INC 0x1000
230
231 #define DE4X5_SIGNATURE {"DE425",""}
232 #define DE4X5_NAME_LENGTH 8
233
234
235
236
237 #define PCI_MAX_BUS_NUM 8
238 #define DE4X5_PCI_TOTAL_SIZE 0x80
239 #define DE4X5_CLASS_CODE 0x00020000
240
241
242
243
244
245
246
247 #define ALIGN4 ((u_long)4 - 1)
248 #define ALIGN8 ((u_long)8 - 1)
249 #define ALIGN16 ((u_long)16 - 1)
250 #define ALIGN32 ((u_long)32 - 1)
251 #define ALIGN64 ((u_long)64 - 1)
252 #define ALIGN128 ((u_long)128 - 1)
253
254 #define ALIGN ALIGN32
255 #define CACHE_ALIGN CAL_16LONG
256 #define DESC_SKIP_LEN DSL_0
257
258 #define DESC_ALIGN
259
260 #ifndef IS_NOT_DEC
261 static int is_not_dec = 0;
262 #else
263 static int is_not_dec = 1;
264 #endif
265
266
267
268
269 #define ENABLE_IRQs { \
270 imr |= lp->irq_en;\
271 outl(imr, DE4X5_IMR); \
272 }
273
274 #define DISABLE_IRQs {\
275 imr = inl(DE4X5_IMR);\
276 imr &= ~lp->irq_en;\
277 outl(imr, DE4X5_IMR); \
278 }
279
280 #define UNMASK_IRQs {\
281 imr |= lp->irq_mask;\
282 outl(imr, DE4X5_IMR); \
283 }
284
285 #define MASK_IRQs {\
286 imr = inl(DE4X5_IMR);\
287 imr &= ~lp->irq_mask;\
288 outl(imr, DE4X5_IMR); \
289 }
290
291
292
293
294 #define START_DE4X5 {\
295 omr = inl(DE4X5_OMR);\
296 omr |= OMR_ST | OMR_SR;\
297 outl(omr, DE4X5_OMR); \
298 }
299
300 #define STOP_DE4X5 {\
301 omr = inl(DE4X5_OMR);\
302 omr &= ~(OMR_ST|OMR_SR);\
303 outl(omr, DE4X5_OMR); \
304 }
305
306
307
308
309 #define RESET_SIA outl(0, DE4X5_SICR);
310
311
312
313
314 #define DE4X5_AUTOSENSE_MS 250
315
316
317
318
319 struct de4x5_srom {
320 char reserved[18];
321 char version;
322 char num_adapters;
323 char ieee_addr[6];
324 char info[100];
325 short chksum;
326 };
327
328
329
330
331
332
333
334
335
336 #define NUM_RX_DESC 8
337 #define NUM_TX_DESC 32
338 #define BUFF_ALLOC_RETRIES 10
339 #define RX_BUFF_SZ 1536
340
341 struct de4x5_desc {
342 volatile s32 status;
343 u32 des1;
344 u32 buf;
345 u32 next;
346 DESC_ALIGN
347 };
348
349
350
351
352 #define DE4X5_PKT_STAT_SZ 16
353 #define DE4X5_PKT_BIN_SZ 128
354
355
356 struct de4x5_private {
357 char adapter_name[80];
358 struct de4x5_desc rx_ring[NUM_RX_DESC];
359 struct de4x5_desc tx_ring[NUM_TX_DESC];
360 struct sk_buff *skb[NUM_TX_DESC];
361 int rx_new, rx_old;
362 int tx_new, tx_old;
363 char setup_frame[SETUP_FRAME_LEN];
364 struct enet_statistics stats;
365 struct {
366 u_int bins[DE4X5_PKT_STAT_SZ];
367 u_int unicast;
368 u_int multicast;
369 u_int broadcast;
370 u_int excessive_collisions;
371 u_int tx_underruns;
372 u_int excessive_underruns;
373 } pktStats;
374 char rxRingSize;
375 char txRingSize;
376 int bus;
377 int bus_num;
378 int chipset;
379 s32 irq_mask;
380 s32 irq_en;
381 int media;
382 int linkProb;
383 int autosense;
384 int tx_enable;
385 int lostMedia;
386 int setup_f;
387 };
388
389
390
391
392
393
394
395
396
397 #define TX_BUFFS_AVAIL ((lp->tx_old<=lp->tx_new)?\
398 lp->tx_old+lp->txRingSize-lp->tx_new-1:\
399 lp->tx_old -lp->tx_new-1)
400
401
402
403
404 static int de4x5_open(struct device *dev);
405 static int de4x5_queue_pkt(struct sk_buff *skb, struct device *dev);
406 static void de4x5_interrupt(int irq, struct pt_regs *regs);
407 static int de4x5_close(struct device *dev);
408 static struct enet_statistics *de4x5_get_stats(struct device *dev);
409 static void set_multicast_list(struct device *dev, int num_addrs, void *addrs);
410 static int de4x5_ioctl(struct device *dev, struct ifreq *rq, int cmd);
411
412
413
414
415 static int de4x5_hw_init(struct device *dev, u_long iobase);
416 static int de4x5_init(struct device *dev);
417 static int de4x5_rx(struct device *dev);
418 static int de4x5_tx(struct device *dev);
419 static int de4x5_ast(struct device *dev);
420
421 static int autoconf_media(struct device *dev);
422 static void create_packet(struct device *dev, char *frame, int len);
423 static void dce_us_delay(u32 usec);
424 static void dce_ms_delay(u32 msec);
425 static void load_packet(struct device *dev, char *buf, u32 flags, struct sk_buff *skb);
426 static void dc21040_autoconf(struct device *dev);
427 static void dc21041_autoconf(struct device *dev);
428 static void dc21140_autoconf(struct device *dev);
429 static int test_media(struct device *dev, s32 irqs, s32 irq_mask, s32 csr13, s32 csr14, s32 csr15, s32 msec);
430
431 static int ping_media(struct device *dev);
432 static void reset_init_sia(struct device *dev, s32 sicr, s32 strr, s32 sigr);
433 static int test_ans(struct device *dev, s32 irqs, s32 irq_mask, s32 msec);
434 static void load_ms_timer(struct device *dev, u32 msec);
435 static int EISA_signature(char *name, s32 eisa_id);
436 static int DevicePresent(u_long iobase);
437 static short srom_rd(u_long address, u_char offset);
438 static void srom_latch(u_int command, u_long address);
439 static void srom_command(u_int command, u_long address);
440 static void srom_address(u_int command, u_long address, u_char offset);
441 static short srom_data(u_int command, u_long address);
442
443 static void sendto_srom(u_int command, u_long addr);
444 static int getfrom_srom(u_long addr);
445 static void SetMulticastFilter(struct device *dev, int num_addrs, char *addrs);
446 static int get_hw_addr(struct device *dev);
447
448 static void eisa_probe(struct device *dev, u_long iobase);
449 static void pci_probe(struct device *dev, u_long iobase);
450 static struct device *alloc_device(struct device *dev, u_long iobase);
451 static char *build_setup_frame(struct device *dev, int mode);
452 static void disable_ast(struct device *dev);
453 static void enable_ast(struct device *dev, u32 time_out);
454 static void kick_tx(struct device *dev);
455
456 #ifdef MODULE
457 int init_module(void);
458 void cleanup_module(void);
459 static int autoprobed = 1, loading_module = 1;
460 # else
461 static unsigned char de4x5_irq[] = {5,9,10,11};
462 static int autoprobed = 0, loading_module = 0;
463 #endif
464
465 static char name[DE4X5_NAME_LENGTH + 1];
466 static int num_de4x5s = 0, num_eth = 0;
467
468
469
470
471
472
473 static struct bus_type {
474 int bus;
475 int bus_num;
476 int device;
477 int chipset;
478 struct de4x5_srom srom;
479 int autosense;
480 } bus;
481
482
483
484
485 #define RESET_DE4X5 {\
486 int i;\
487 i=inl(DE4X5_BMR);\
488 dce_ms_delay(1);\
489 outl(i | BMR_SWR, DE4X5_BMR);\
490 dce_ms_delay(1);\
491 outl(i, DE4X5_BMR);\
492 dce_ms_delay(1);\
493 for (i=0;i<5;i++) {inl(DE4X5_BMR); dce_ms_delay(1);}\
494 dce_ms_delay(1);\
495 }
496
497
498
499 int de4x5_probe(struct device *dev)
500 {
501 int tmp = num_de4x5s, status = -ENODEV;
502 u_long iobase = dev->base_addr;
503
504 if ((iobase == 0) && loading_module){
505 printk("Autoprobing is not supported when loading a module based driver.\n");
506 status = -EIO;
507 } else {
508 eisa_probe(dev, iobase);
509 pci_probe(dev, iobase);
510
511 if ((tmp == num_de4x5s) && (iobase != 0) && loading_module) {
512 printk("%s: de4x5_probe() cannot find device at 0x%04lx.\n", dev->name,
513 iobase);
514 }
515
516
517
518
519
520 for (; (dev->priv == NULL) && (dev->next != NULL); dev = dev->next);
521
522 if (dev->priv) status = 0;
523 if (iobase == 0) autoprobed = 1;
524 }
525
526 return status;
527 }
528
529 static int
530 de4x5_hw_init(struct device *dev, u_long iobase)
531 {
532 struct bus_type *lp = &bus;
533 int tmpbus, tmpchs, i, j, status=0;
534 char *tmp;
535
536
537 if (lp->chipset == DC21041) {
538 outl(0, PCI_CFDA);
539 dce_ms_delay(10);
540 }
541
542 RESET_DE4X5;
543
544 if ((inl(DE4X5_STS) & (STS_TS | STS_RS)) == 0) {
545
546
547
548 if (lp->bus == PCI) {
549 if (!is_not_dec) {
550 if ((lp->chipset == DC21040) || (lp->chipset == DC21041)) {
551 strcpy(name, "DE435");
552 } else if (lp->chipset == DC21140) {
553 strcpy(name, "DE500");
554 }
555 } else {
556 strcpy(name, "UNKNOWN");
557 }
558 } else {
559 EISA_signature(name, EISA_ID0);
560 }
561
562 if (*name != '\0') {
563 dev->base_addr = iobase;
564 if (lp->bus == EISA) {
565 printk("%s: %s at %04lx (EISA slot %ld)",
566 dev->name, name, iobase, ((iobase>>12)&0x0f));
567 } else {
568 printk("%s: %s at %04lx (PCI bus %d, device %d)", dev->name, name,
569 iobase, lp->bus_num, lp->device);
570 }
571
572 printk(", h/w address ");
573 status = get_hw_addr(dev);
574 for (i = 0; i < ETH_ALEN - 1; i++) {
575 printk("%2.2x:", dev->dev_addr[i]);
576 }
577 printk("%2.2x,\n", dev->dev_addr[i]);
578
579 tmpbus = lp->bus;
580 tmpchs = lp->chipset;
581
582 if (status == 0) {
583 struct de4x5_private *lp;
584
585
586
587
588
589 dev->priv = (void *) kmalloc(sizeof(struct de4x5_private) + ALIGN,
590 GFP_KERNEL);
591 if (dev->priv == NULL)
592 return -ENOMEM;
593
594
595
596 dev->priv = (void *)(((u_long)dev->priv + ALIGN) & ~ALIGN);
597 lp = (struct de4x5_private *)dev->priv;
598 memset(dev->priv, 0, sizeof(struct de4x5_private));
599 lp->bus = tmpbus;
600 lp->chipset = tmpchs;
601
602
603
604
605 if (de4x5_autosense & AUTO) {
606 lp->autosense = AUTO;
607 } else {
608 if (lp->chipset != DC21140) {
609 if ((lp->chipset == DC21040) && (de4x5_autosense & TP_NW)) {
610 de4x5_autosense = TP;
611 }
612 if ((lp->chipset == DC21041) && (de4x5_autosense & BNC_AUI)) {
613 de4x5_autosense = BNC;
614 }
615 lp->autosense = de4x5_autosense & 0x001f;
616 } else {
617 lp->autosense = de4x5_autosense & 0x00c0;
618 }
619 }
620
621 sprintf(lp->adapter_name,"%s (%s)", name, dev->name);
622 request_region(iobase, (lp->bus == PCI ? DE4X5_PCI_TOTAL_SIZE :
623 DE4X5_EISA_TOTAL_SIZE),
624 lp->adapter_name);
625
626
627
628
629
630
631 for (tmp=NULL, j=0; (j<BUFF_ALLOC_RETRIES) && (tmp==NULL); j++) {
632 if ((tmp = (void *)kmalloc(RX_BUFF_SZ * NUM_RX_DESC + ALIGN,
633 GFP_KERNEL)) != NULL) {
634 tmp = (char *)(((u_long) tmp + ALIGN) & ~ALIGN);
635 for (i=0; i<NUM_RX_DESC; i++) {
636 lp->rx_ring[i].status = 0;
637 lp->rx_ring[i].des1 = RX_BUFF_SZ;
638 lp->rx_ring[i].buf = virt_to_bus(tmp + i * RX_BUFF_SZ);
639 lp->rx_ring[i].next = (u32)NULL;
640 }
641 barrier();
642 }
643 }
644
645 if (tmp != NULL) {
646 lp->rxRingSize = NUM_RX_DESC;
647 lp->txRingSize = NUM_TX_DESC;
648
649
650 lp->rx_ring[lp->rxRingSize - 1].des1 |= RD_RER;
651 lp->tx_ring[lp->txRingSize - 1].des1 |= TD_TER;
652
653
654 outl(virt_to_bus(lp->rx_ring), DE4X5_RRBA);
655 outl(virt_to_bus(lp->tx_ring), DE4X5_TRBA);
656
657
658 lp->irq_mask = IMR_RIM | IMR_TIM | IMR_TUM ;
659 lp->irq_en = IMR_NIM | IMR_AIM;
660
661 lp->tx_enable = TRUE;
662
663 if (dev->irq < 2) {
664 #ifndef MODULE
665 unsigned char irqnum;
666 s32 omr;
667 autoirq_setup(0);
668
669 omr = inl(DE4X5_OMR);
670 outl(IMR_AIM|IMR_RUM, DE4X5_IMR);
671 outl(OMR_SR | omr, DE4X5_OMR);
672
673 irqnum = autoirq_report(1);
674 if (!irqnum) {
675 printk(" and failed to detect IRQ line.\n");
676 status = -ENXIO;
677 } else {
678 for (dev->irq=0,i=0; (i<sizeof(de4x5_irq)) && (!dev->irq); i++) {
679 if (irqnum == de4x5_irq[i]) {
680 dev->irq = irqnum;
681 printk(" and uses IRQ%d.\n", dev->irq);
682 }
683 }
684
685 if (!dev->irq) {
686 printk(" but incorrect IRQ line detected.\n");
687 status = -ENXIO;
688 }
689 }
690
691 outl(0, DE4X5_IMR);
692
693 #endif
694 } else {
695 printk(" and requires IRQ%d (not probed).\n", dev->irq);
696 }
697 } else {
698 printk("%s: Kernel could not allocate RX buffer memory.\n",
699 dev->name);
700 status = -ENXIO;
701 }
702 if (status) release_region(iobase, (lp->bus == PCI ?
703 DE4X5_PCI_TOTAL_SIZE :
704 DE4X5_EISA_TOTAL_SIZE));
705 } else {
706 printk(" which has an Ethernet PROM CRC error.\n");
707 status = -ENXIO;
708 }
709 } else {
710 status = -ENXIO;
711 }
712 } else {
713 status = -ENXIO;
714 }
715
716 if (!status) {
717 if (de4x5_debug > 0) {
718 printk(version);
719 }
720
721
722 dev->open = &de4x5_open;
723 dev->hard_start_xmit = &de4x5_queue_pkt;
724 dev->stop = &de4x5_close;
725 dev->get_stats = &de4x5_get_stats;
726 #ifdef HAVE_MULTICAST
727 dev->set_multicast_list = &set_multicast_list;
728 #endif
729 dev->do_ioctl = &de4x5_ioctl;
730
731 dev->mem_start = 0;
732
733
734 ether_setup(dev);
735
736
737 if (lp->chipset == DC21041) {
738 outl(0, DE4X5_SICR);
739 outl(CFDA_PSM, PCI_CFDA);
740 }
741 } else {
742 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
743 if (lp) {
744 kfree_s(bus_to_virt(lp->rx_ring[0].buf),
745 RX_BUFF_SZ * NUM_RX_DESC + ALIGN);
746 }
747 if (dev->priv) {
748 kfree_s(dev->priv, sizeof(struct de4x5_private) + ALIGN);
749 dev->priv = NULL;
750 }
751 }
752
753 return status;
754 }
755
756
757 static int
758 de4x5_open(struct device *dev)
759 {
760 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
761 u_long iobase = dev->base_addr;
762 int i, status = 0;
763 s32 imr, omr, sts;
764
765
766
767
768 if (lp->chipset == DC21041) {
769 outl(0, PCI_CFDA);
770 dce_ms_delay(10);
771 }
772
773 if (request_irq(dev->irq, (void *)de4x5_interrupt, 0, lp->adapter_name)) {
774 printk("de4x5_open(): Requested IRQ%d is busy\n",dev->irq);
775 status = -EAGAIN;
776 } else {
777
778 irq2dev_map[dev->irq] = dev;
779
780
781
782 status = de4x5_init(dev);
783
784 if (de4x5_debug > 1){
785 printk("%s: de4x5 open with irq %d\n",dev->name,dev->irq);
786 printk("\tphysical address: ");
787 for (i=0;i<6;i++){
788 printk("%2.2x:",(short)dev->dev_addr[i]);
789 }
790 printk("\n");
791 printk("Descriptor head addresses:\n");
792 printk("\t0x%8.8lx 0x%8.8lx\n",(u_long)lp->rx_ring,(u_long)lp->tx_ring);
793 printk("Descriptor addresses:\nRX: ");
794 for (i=0;i<lp->rxRingSize-1;i++){
795 if (i < 3) {
796 printk("0x%8.8lx ",(u_long)&lp->rx_ring[i].status);
797 }
798 }
799 printk("...0x%8.8lx\n",(u_long)&lp->rx_ring[i].status);
800 printk("TX: ");
801 for (i=0;i<lp->txRingSize-1;i++){
802 if (i < 3) {
803 printk("0x%8.8lx ", (u_long)&lp->tx_ring[i].status);
804 }
805 }
806 printk("...0x%8.8lx\n", (u_long)&lp->tx_ring[i].status);
807 printk("Descriptor buffers:\nRX: ");
808 for (i=0;i<lp->rxRingSize-1;i++){
809 if (i < 3) {
810 printk("0x%8.8x ",lp->rx_ring[i].buf);
811 }
812 }
813 printk("...0x%8.8x\n",lp->rx_ring[i].buf);
814 printk("TX: ");
815 for (i=0;i<lp->txRingSize-1;i++){
816 if (i < 3) {
817 printk("0x%8.8x ", lp->tx_ring[i].buf);
818 }
819 }
820 printk("...0x%8.8x\n", lp->tx_ring[i].buf);
821 printk("Ring size: \nRX: %d\nTX: %d\n",
822 (short)lp->rxRingSize,
823 (short)lp->txRingSize);
824 printk("\tstatus: %d\n", status);
825 }
826
827 if (!status) {
828 dev->tbusy = 0;
829 dev->start = 1;
830 dev->interrupt = UNMASK_INTERRUPTS;
831 dev->trans_start = jiffies;
832
833 START_DE4X5;
834
835
836 imr = 0;
837 UNMASK_IRQs;
838
839
840 sts = inl(DE4X5_STS);
841 outl(sts, DE4X5_STS);
842
843 ENABLE_IRQs;
844 }
845 if (de4x5_debug > 1) {
846 printk("\tsts: 0x%08x\n", inl(DE4X5_STS));
847 printk("\tbmr: 0x%08x\n", inl(DE4X5_BMR));
848 printk("\timr: 0x%08x\n", inl(DE4X5_IMR));
849 printk("\tomr: 0x%08x\n", inl(DE4X5_OMR));
850 printk("\tsisr: 0x%08x\n", inl(DE4X5_SISR));
851 printk("\tsicr: 0x%08x\n", inl(DE4X5_SICR));
852 printk("\tstrr: 0x%08x\n", inl(DE4X5_STRR));
853 printk("\tsigr: 0x%08x\n", inl(DE4X5_SIGR));
854 }
855 }
856
857 MOD_INC_USE_COUNT;
858
859 return status;
860 }
861
862
863
864
865
866
867
868
869
870 static int
871 de4x5_init(struct device *dev)
872 {
873 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
874 u_long iobase = dev->base_addr;
875 int i, j, status = 0;
876 s32 bmr, omr;
877
878
879 set_bit(0, (void *)&dev->tbusy);
880
881 RESET_DE4X5;
882
883 bmr = inl(DE4X5_BMR);
884 bmr |= PBL_8 | DESC_SKIP_LEN | CACHE_ALIGN;
885 outl(bmr, DE4X5_BMR);
886
887 if (lp->chipset != DC21140) {
888 omr = TR_96;
889 lp->setup_f = HASH_PERF;
890 } else {
891 omr = OMR_SDP | OMR_SF;
892 lp->setup_f = PERFECT;
893 }
894 outl(virt_to_bus(lp->rx_ring), DE4X5_RRBA);
895 outl(virt_to_bus(lp->tx_ring), DE4X5_TRBA);
896
897 lp->rx_new = lp->rx_old = 0;
898 lp->tx_new = lp->tx_old = 0;
899
900 for (i = 0; i < lp->rxRingSize; i++) {
901 lp->rx_ring[i].status = R_OWN;
902 }
903
904 for (i = 0; i < lp->txRingSize; i++) {
905 lp->tx_ring[i].status = 0;
906 }
907
908 barrier();
909
910
911 SetMulticastFilter(dev, 0, NULL);
912
913 if (lp->chipset != DC21140) {
914 load_packet(dev, lp->setup_frame, HASH_F|TD_SET|SETUP_FRAME_LEN, NULL);
915 } else {
916 load_packet(dev, lp->setup_frame, PERFECT_F|TD_SET|SETUP_FRAME_LEN, NULL);
917 }
918 outl(omr|OMR_ST, DE4X5_OMR);
919
920
921 for (j=0, i=jiffies;(i<=jiffies+HZ/100) && (j==0);) {
922 if (lp->tx_ring[lp->tx_new].status >= 0) j=1;
923 }
924 outl(omr, DE4X5_OMR);
925
926 if (j == 0) {
927 printk("%s: Setup frame timed out, status %08x\n", dev->name,
928 inl(DE4X5_STS));
929 status = -EIO;
930 }
931
932 lp->tx_new = (++lp->tx_new) % lp->txRingSize;
933 lp->tx_old = lp->tx_new;
934
935
936 if (autoconf_media(dev) == 0) {
937 status = -EIO;
938 }
939
940 return 0;
941 }
942
943
944
945
946 static int
947 de4x5_queue_pkt(struct sk_buff *skb, struct device *dev)
948 {
949 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
950 u_long iobase = dev->base_addr;
951 int i, status = 0;
952 s32 imr, omr, sts;
953
954
955
956
957
958
959 if (set_bit(0, (void*)&dev->tbusy) == 0) {
960 cli();
961 de4x5_tx(dev);
962 dev->tbusy = 0;
963 sti();
964 }
965
966
967
968
969
970
971 if (dev->tbusy || (lp->lostMedia > LOST_MEDIA_THRESHOLD)) {
972 u_long tickssofar = jiffies - dev->trans_start;
973 if ((tickssofar < QUEUE_PKT_TIMEOUT) &&
974 (lp->lostMedia <= LOST_MEDIA_THRESHOLD)) {
975 status = -1;
976 } else {
977 if (de4x5_debug >= 1) {
978 printk("%s: transmit timed out, status %08x, tbusy:%ld, lostMedia:%d tickssofar:%ld, resetting.\n",dev->name, inl(DE4X5_STS), dev->tbusy, lp->lostMedia, tickssofar);
979 }
980
981
982 STOP_DE4X5;
983
984
985 for (i=lp->tx_old; i!=lp->tx_new; i=(++i)%lp->txRingSize) {
986 if (lp->skb[i] != NULL) {
987 if (lp->skb[i]->len != FAKE_FRAME_LEN) {
988 if (lp->tx_ring[i].status == T_OWN) {
989 dev_queue_xmit(lp->skb[i], dev, SOPRI_NORMAL);
990 } else {
991 dev_kfree_skb(lp->skb[i], FREE_WRITE);
992 }
993 } else {
994 dev_kfree_skb(lp->skb[i], FREE_WRITE);
995 }
996 lp->skb[i] = NULL;
997 }
998 }
999 if (skb->len != FAKE_FRAME_LEN) {
1000 dev_queue_xmit(skb, dev, SOPRI_NORMAL);
1001 } else {
1002 dev_kfree_skb(skb, FREE_WRITE);
1003 }
1004
1005
1006 status = de4x5_init(dev);
1007
1008
1009 if (!status) {
1010
1011 dev->interrupt = UNMASK_INTERRUPTS;
1012 dev->start = 1;
1013 dev->tbusy = 0;
1014 dev->trans_start = jiffies;
1015
1016 START_DE4X5;
1017
1018
1019 imr = 0;
1020 UNMASK_IRQs;
1021
1022
1023 sts = inl(DE4X5_STS);
1024 outl(sts, DE4X5_STS);
1025
1026 ENABLE_IRQs;
1027 } else {
1028 printk("%s: hardware initialisation failure, status %08x.\n",
1029 dev->name, inl(DE4X5_STS));
1030 }
1031 }
1032 } else if (skb == NULL) {
1033 dev_tint(dev);
1034 } else if (skb->len == FAKE_FRAME_LEN) {
1035 dev_kfree_skb(skb, FREE_WRITE);
1036 } else if (skb->len > 0) {
1037
1038 if (set_bit(0, (void*)&dev->tbusy) != 0) {
1039 printk("%s: Transmitter access conflict.\n", dev->name);
1040 status = -1;
1041 } else {
1042 cli();
1043 if (TX_BUFFS_AVAIL) {
1044 load_packet(dev, skb->data, TD_IC | TD_LS | TD_FS | skb->len, skb);
1045 if (lp->tx_enable) {
1046 outl(POLL_DEMAND, DE4X5_TPD);
1047 }
1048
1049 lp->tx_new = (++lp->tx_new) % lp->txRingSize;
1050 dev->trans_start = jiffies;
1051
1052 if (TX_BUFFS_AVAIL) {
1053 dev->tbusy = 0;
1054 }
1055 } else {
1056 status = -1;
1057 }
1058 sti();
1059 }
1060 }
1061
1062 return status;
1063 }
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076 static void
1077 de4x5_interrupt(int irq, struct pt_regs *regs)
1078 {
1079 struct device *dev = (struct device *)(irq2dev_map[irq]);
1080 struct de4x5_private *lp;
1081 s32 imr, omr, sts;
1082 u_long iobase;
1083
1084 if (dev == NULL) {
1085 printk ("de4x5_interrupt(): irq %d for unknown device.\n", irq);
1086 } else {
1087 lp = (struct de4x5_private *)dev->priv;
1088 iobase = dev->base_addr;
1089
1090 if (dev->interrupt)
1091 printk("%s: Re-entering the interrupt handler.\n", dev->name);
1092
1093 DISABLE_IRQs;
1094 dev->interrupt = MASK_INTERRUPTS;
1095
1096 while ((sts = inl(DE4X5_STS)) & lp->irq_mask) {
1097 outl(sts, DE4X5_STS);
1098
1099 if (sts & (STS_RI | STS_RU))
1100 de4x5_rx(dev);
1101
1102 if (sts & (STS_TI | STS_TU))
1103 de4x5_tx(dev);
1104
1105 if (sts & STS_TM)
1106 de4x5_ast(dev);
1107
1108 if (sts & STS_LNF) {
1109 lp->lostMedia = LOST_MEDIA_THRESHOLD + 1;
1110 lp->irq_mask &= ~IMR_LFM;
1111 kick_tx(dev);
1112 }
1113
1114 if (sts & STS_SE) {
1115 STOP_DE4X5;
1116 printk("%s: Fatal bus error occured, sts=%#8x, device stopped.\n",
1117 dev->name, sts);
1118 }
1119 }
1120
1121 if (TX_BUFFS_AVAIL && dev->tbusy) {
1122 dev->tbusy = 0;
1123 mark_bh(NET_BH);
1124 }
1125
1126 dev->interrupt = UNMASK_INTERRUPTS;
1127 ENABLE_IRQs;
1128 }
1129
1130 return;
1131 }
1132
1133 static int
1134 de4x5_rx(struct device *dev)
1135 {
1136 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1137 int i, entry;
1138 s32 status;
1139 char *buf;
1140
1141 for (entry = lp->rx_new; lp->rx_ring[entry].status >= 0;entry = lp->rx_new) {
1142 status = lp->rx_ring[entry].status;
1143
1144 if (status & RD_FS) {
1145 lp->rx_old = entry;
1146 }
1147
1148 if (status & RD_LS) {
1149 if (status & RD_ES) {
1150 lp->stats.rx_errors++;
1151 if (status & (RD_RF | RD_TL)) lp->stats.rx_frame_errors++;
1152 if (status & RD_CE) lp->stats.rx_crc_errors++;
1153 if (status & RD_OF) lp->stats.rx_fifo_errors++;
1154 } else {
1155 struct sk_buff *skb;
1156 short pkt_len = (short)(lp->rx_ring[entry].status >> 16) - 4;
1157
1158 if ((skb = dev_alloc_skb(pkt_len+2)) != NULL) {
1159 skb->dev = dev;
1160
1161 skb_reserve(skb,2);
1162 if (entry < lp->rx_old) {
1163 short len = (lp->rxRingSize - lp->rx_old) * RX_BUFF_SZ;
1164 memcpy(skb_put(skb,len), bus_to_virt(lp->rx_ring[lp->rx_old].buf), len);
1165 memcpy(skb_put(skb,pkt_len-len), bus_to_virt(lp->rx_ring[0].buf), pkt_len - len);
1166 } else {
1167 memcpy(skb_put(skb,pkt_len), bus_to_virt(lp->rx_ring[lp->rx_old].buf), pkt_len);
1168 }
1169
1170
1171 skb->protocol=eth_type_trans(skb,dev);
1172 netif_rx(skb);
1173
1174
1175 lp->stats.rx_packets++;
1176 for (i=1; i<DE4X5_PKT_STAT_SZ-1; i++) {
1177 if (pkt_len < (i*DE4X5_PKT_BIN_SZ)) {
1178 lp->pktStats.bins[i]++;
1179 i = DE4X5_PKT_STAT_SZ;
1180 }
1181 }
1182 buf = skb->data;
1183 if (buf[0] & 0x01) {
1184 if ((*(s32 *)&buf[0] == -1) && (*(s16 *)&buf[4] == -1)) {
1185 lp->pktStats.broadcast++;
1186 } else {
1187 lp->pktStats.multicast++;
1188 }
1189 } else if ((*(s32 *)&buf[0] == *(s32 *)&dev->dev_addr[0]) &&
1190 (*(s16 *)&buf[4] == *(s16 *)&dev->dev_addr[4])) {
1191 lp->pktStats.unicast++;
1192 }
1193
1194 lp->pktStats.bins[0]++;
1195 if (lp->pktStats.bins[0] == 0) {
1196 memset((char *)&lp->pktStats, 0, sizeof(lp->pktStats));
1197 }
1198 } else {
1199 printk("%s: Insufficient memory; nuking packet.\n", dev->name);
1200 lp->stats.rx_dropped++;
1201 break;
1202 }
1203 }
1204
1205
1206 for (; lp->rx_old!=entry; lp->rx_old=(++lp->rx_old)%lp->rxRingSize) {
1207 lp->rx_ring[lp->rx_old].status = R_OWN;
1208 barrier();
1209 }
1210 lp->rx_ring[entry].status = R_OWN;
1211 barrier();
1212 }
1213
1214
1215
1216
1217 lp->rx_new = (++lp->rx_new) % lp->rxRingSize;
1218 }
1219
1220 return 0;
1221 }
1222
1223
1224
1225
1226 static int
1227 de4x5_tx(struct device *dev)
1228 {
1229 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1230 u_long iobase = dev->base_addr;
1231 int entry;
1232 s32 status;
1233
1234 for (entry = lp->tx_old; entry != lp->tx_new; entry = lp->tx_old) {
1235 status = lp->tx_ring[entry].status;
1236 if (status < 0) {
1237 break;
1238 } else if (status & TD_ES) {
1239 lp->stats.tx_errors++;
1240 if (status & TD_NC) lp->stats.tx_carrier_errors++;
1241 if (status & TD_LC) lp->stats.tx_window_errors++;
1242 if (status & TD_UF) lp->stats.tx_fifo_errors++;
1243 if (status & TD_LC) lp->stats.collisions++;
1244 if (status & TD_EC) lp->pktStats.excessive_collisions++;
1245 if (status & TD_DE) lp->stats.tx_aborted_errors++;
1246
1247 if ((status != 0x7fffffff) &&
1248 (status & (TD_LO | TD_NC | TD_EC | TD_LF))) {
1249 lp->lostMedia++;
1250 if (lp->lostMedia > LOST_MEDIA_THRESHOLD) {
1251 kick_tx(dev);
1252 }
1253 } else {
1254 outl(POLL_DEMAND, DE4X5_TPD);
1255 }
1256 } else {
1257 lp->stats.tx_packets++;
1258 lp->lostMedia = 0;
1259 }
1260
1261 if (lp->skb[entry] != NULL) {
1262 dev_kfree_skb(lp->skb[entry], FREE_WRITE);
1263 lp->skb[entry] = NULL;
1264 }
1265
1266
1267 lp->tx_old = (++lp->tx_old) % lp->txRingSize;
1268 }
1269
1270 return 0;
1271 }
1272
1273 static int
1274 de4x5_ast(struct device *dev)
1275 {
1276 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1277 u_long iobase = dev->base_addr;
1278 s32 gep;
1279
1280 disable_ast(dev);
1281
1282 if (lp->chipset == DC21140) {
1283 gep = inl(DE4X5_GEP);
1284 if (((lp->media == _100Mb) && (gep & GEP_SLNK)) ||
1285 ((lp->media == _10Mb) && (gep & GEP_LNP)) ||
1286 ((lp->media == _10Mb) && !(gep & GEP_SLNK)) ||
1287 (lp->media == NC)) {
1288 if (lp->linkProb || ((lp->media == NC) && (!(gep & GEP_LNP)))) {
1289 lp->lostMedia = LOST_MEDIA_THRESHOLD + 1;
1290 lp->linkProb = 0;
1291 kick_tx(dev);
1292 } else {
1293 switch(lp->media) {
1294 case NC:
1295 lp->linkProb = 0;
1296 enable_ast(dev, DE4X5_AUTOSENSE_MS);
1297 break;
1298
1299 case _10Mb:
1300 lp->linkProb = 1;
1301 enable_ast(dev, 1500);
1302 break;
1303
1304 case _100Mb:
1305 lp->linkProb = 1;
1306 enable_ast(dev, 4000);
1307 break;
1308 }
1309 }
1310 } else {
1311 lp->linkProb = 0;
1312 enable_ast(dev, DE4X5_AUTOSENSE_MS);
1313 }
1314 }
1315
1316 return 0;
1317 }
1318
1319 static int
1320 de4x5_close(struct device *dev)
1321 {
1322 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1323 u_long iobase = dev->base_addr;
1324 s32 imr, omr;
1325
1326 dev->start = 0;
1327 dev->tbusy = 1;
1328
1329 if (de4x5_debug > 1) {
1330 printk("%s: Shutting down ethercard, status was %8.8x.\n",
1331 dev->name, inl(DE4X5_STS));
1332 }
1333
1334
1335
1336
1337 DISABLE_IRQs;
1338
1339 STOP_DE4X5;
1340
1341
1342
1343
1344 free_irq(dev->irq);
1345 irq2dev_map[dev->irq] = 0;
1346
1347 MOD_DEC_USE_COUNT;
1348
1349
1350 if (lp->chipset == DC21041) {
1351 outl(0, DE4X5_SICR);
1352 outl(CFDA_PSM, PCI_CFDA);
1353 }
1354
1355 return 0;
1356 }
1357
1358 static struct enet_statistics *
1359 de4x5_get_stats(struct device *dev)
1360 {
1361 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1362 u_long iobase = dev->base_addr;
1363
1364 lp->stats.rx_missed_errors = (int) (inl(DE4X5_MFC) & (MFC_OVFL | MFC_CNTR));
1365
1366 return &lp->stats;
1367 }
1368
1369 static void load_packet(struct device *dev, char *buf, u32 flags, struct sk_buff *skb)
1370 {
1371 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1372
1373 lp->tx_ring[lp->tx_new].buf = virt_to_bus(buf);
1374 lp->tx_ring[lp->tx_new].des1 &= TD_TER;
1375 lp->tx_ring[lp->tx_new].des1 |= flags;
1376 lp->skb[lp->tx_new] = skb;
1377 barrier();
1378 lp->tx_ring[lp->tx_new].status = T_OWN;
1379 barrier();
1380
1381 return;
1382 }
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393 static void
1394 set_multicast_list(struct device *dev, int num_addrs, void *addrs)
1395 {
1396 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1397 u_long iobase = dev->base_addr;
1398
1399
1400 if (irq2dev_map[dev->irq] != NULL) {
1401 if (num_addrs >= 0) {
1402 SetMulticastFilter(dev, num_addrs, (char *)addrs);
1403 if (lp->setup_f == HASH_PERF) {
1404 load_packet(dev, lp->setup_frame, TD_IC | HASH_F | TD_SET |
1405 SETUP_FRAME_LEN, NULL);
1406 } else {
1407 load_packet(dev, lp->setup_frame, TD_IC | PERFECT_F | TD_SET |
1408 SETUP_FRAME_LEN, NULL);
1409 }
1410
1411 lp->tx_new = (++lp->tx_new) % lp->txRingSize;
1412 outl(POLL_DEMAND, DE4X5_TPD);
1413 dev->trans_start = jiffies;
1414 } else {
1415 u32 omr;
1416 omr = inl(DE4X5_OMR);
1417 omr |= OMR_PR;
1418 outl(omr, DE4X5_OMR);
1419 }
1420 }
1421
1422 return;
1423 }
1424
1425
1426
1427
1428
1429
1430 static void SetMulticastFilter(struct device *dev, int num_addrs, char *addrs)
1431 {
1432 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1433 u_long iobase = dev->base_addr;
1434 int i, j, bit, byte;
1435 u16 hashcode;
1436 u32 omr, crc, poly = CRC_POLYNOMIAL_LE;
1437 char *pa;
1438
1439 omr = inl(DE4X5_OMR);
1440 pa = build_setup_frame(dev, ALL);
1441
1442 if (lp->setup_f == HASH_PERF) {
1443 if (num_addrs == HASH_TABLE_LEN) {
1444 omr |= OMR_PM;
1445 } else {
1446 omr &= ~OMR_PM;
1447
1448 for (i=0;i<num_addrs;i++) {
1449 if ((*addrs & 0x01) == 1) {
1450 crc = 0xffffffff;
1451 for (byte=0;byte<ETH_ALEN;byte++) {
1452
1453 for (bit = *addrs++,j=0;j<8;j++, bit>>=1) {
1454 crc = (crc >> 1) ^ (((crc ^ bit) & 0x01) ? poly : 0);
1455 }
1456 }
1457 hashcode = crc & HASH_BITS;
1458
1459 byte = hashcode >> 3;
1460 bit = 1 << (hashcode & 0x07);
1461
1462 byte <<= 1;
1463 if (byte & 0x02) {
1464 byte -= 1;
1465 }
1466 lp->setup_frame[byte] |= bit;
1467
1468 } else {
1469 addrs += ETH_ALEN;
1470 }
1471 }
1472 }
1473 } else {
1474 omr &= ~OMR_PM;
1475 for (j=0; j<num_addrs; j++) {
1476 for (i=0; i<ETH_ALEN; i++) {
1477 *(pa + (i&1)) = *addrs++;
1478 if (i & 0x01) pa += 4;
1479 }
1480 }
1481 }
1482
1483 if (num_addrs == 0)
1484 omr &= ~OMR_PR;
1485 outl(omr, DE4X5_OMR);
1486
1487 return;
1488 }
1489
1490
1491
1492
1493
1494 static void eisa_probe(struct device *dev, u_long ioaddr)
1495 {
1496 int i, maxSlots, status;
1497 u_short vendor, device;
1498 s32 cfid;
1499 u_long iobase;
1500 struct bus_type *lp = &bus;
1501 char name[DE4X5_STRLEN];
1502
1503 if (!ioaddr && autoprobed) return ;
1504 if ((ioaddr < 0x1000) && (ioaddr > 0)) return;
1505
1506 lp->bus = EISA;
1507
1508 if (ioaddr == 0) {
1509 iobase = EISA_SLOT_INC;
1510 i = 1;
1511 maxSlots = MAX_EISA_SLOTS;
1512 } else {
1513 iobase = ioaddr;
1514 i = (ioaddr >> 12);
1515 maxSlots = i + 1;
1516 }
1517
1518 for (status = -ENODEV; (i<maxSlots) && (dev!=NULL); i++, iobase+=EISA_SLOT_INC) {
1519 if (EISA_signature(name, EISA_ID)) {
1520 cfid = inl(PCI_CFID);
1521 device = (u_short)(cfid >> 16);
1522 vendor = (u_short) cfid;
1523
1524 lp->bus = EISA;
1525 lp->chipset = device;
1526 if (DevicePresent(EISA_APROM) == 0) {
1527
1528 outl(PCI_COMMAND_IO | PCI_COMMAND_MASTER, PCI_CFCS);
1529 outl(0x00004000, PCI_CFLT);
1530 outl(iobase, PCI_CBIO);
1531
1532 if (check_region(iobase, DE4X5_EISA_TOTAL_SIZE) == 0) {
1533 if ((dev = alloc_device(dev, iobase)) != NULL) {
1534 if ((status = de4x5_hw_init(dev, iobase)) == 0) {
1535 num_de4x5s++;
1536 }
1537 num_eth++;
1538 }
1539 } else if (autoprobed) {
1540 printk("%s: region already allocated at 0x%04lx.\n", dev->name, iobase);
1541 }
1542 }
1543 }
1544 }
1545
1546 return;
1547 }
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561 #define PCI_DEVICE (dev_num << 3)
1562 #define PCI_LAST_DEV 32
1563
1564 static void pci_probe(struct device *dev, u_long ioaddr)
1565 {
1566 u_char irq;
1567 u_char pb, pbus, dev_num, dnum, dev_fn;
1568 u_short vendor, device, index, status;
1569 u_int class = DE4X5_CLASS_CODE;
1570 u_int iobase;
1571 struct bus_type *lp = &bus;
1572
1573 if (!ioaddr && autoprobed) return ;
1574
1575 if (pcibios_present()) {
1576 lp->bus = PCI;
1577
1578 if (ioaddr < 0x1000) {
1579 pbus = (u_short)(ioaddr >> 8);
1580 dnum = (u_short)(ioaddr & 0xff);
1581 } else {
1582 pbus = 0;
1583 dnum = 0;
1584 }
1585
1586 for (index=0;
1587 (pcibios_find_class(class, index, &pb, &dev_fn)!= PCIBIOS_DEVICE_NOT_FOUND);
1588 index++) {
1589 dev_num = PCI_SLOT(dev_fn);
1590
1591 if ((!pbus && !dnum) || ((pbus == pb) && (dnum == dev_num))) {
1592 pcibios_read_config_word(pb, PCI_DEVICE, PCI_VENDOR_ID, &vendor);
1593 pcibios_read_config_word(pb, PCI_DEVICE, PCI_DEVICE_ID, &device);
1594 if (is_DC21040 || is_DC21041 || is_DC21140) {
1595
1596 lp->device = dev_num;
1597 lp->bus_num = pb;
1598
1599
1600 lp->chipset = device;
1601
1602
1603 pcibios_read_config_dword(pb, PCI_DEVICE, PCI_BASE_ADDRESS_0, &iobase);
1604 iobase &= CBIO_MASK;
1605
1606
1607 pcibios_read_config_byte(pb, PCI_DEVICE, PCI_INTERRUPT_LINE, &irq);
1608
1609
1610 pcibios_read_config_word(pb, PCI_DEVICE, PCI_COMMAND, &status);
1611 if (status & PCI_COMMAND_IO) {
1612 if (!(status & PCI_COMMAND_MASTER)) {
1613 status |= PCI_COMMAND_MASTER;
1614 pcibios_write_config_word(pb, PCI_DEVICE, PCI_COMMAND, status);
1615 pcibios_read_config_word(pb, PCI_DEVICE, PCI_COMMAND, &status);
1616 }
1617 if (status & PCI_COMMAND_MASTER) {
1618 if ((DevicePresent(DE4X5_APROM) == 0) || is_not_dec) {
1619 if (check_region(iobase, DE4X5_PCI_TOTAL_SIZE) == 0) {
1620 if ((dev = alloc_device(dev, iobase)) != NULL) {
1621 dev->irq = irq;
1622 if ((status = de4x5_hw_init(dev, iobase)) == 0) {
1623 num_de4x5s++;
1624 }
1625 num_eth++;
1626 }
1627 } else if (autoprobed) {
1628 printk("%s: region already allocated at 0x%04x.\n", dev->name, (u_short)iobase);
1629 }
1630 }
1631 }
1632 }
1633 }
1634 }
1635 }
1636 }
1637
1638 return;
1639 }
1640
1641
1642
1643
1644
1645 static struct device *alloc_device(struct device *dev, u_long iobase)
1646 {
1647 int addAutoProbe = 0;
1648 struct device *tmp = NULL, *ret;
1649 int (*init)(struct device *) = NULL;
1650
1651
1652
1653
1654 if (!loading_module) {
1655 while (dev->next != NULL) {
1656 if ((dev->base_addr == DE4X5_NDA) || (dev->base_addr == 0)) break;
1657 dev = dev->next;
1658 num_eth++;
1659 }
1660
1661
1662
1663
1664
1665 if ((dev->base_addr == 0) && (num_de4x5s > 0)) {
1666 addAutoProbe++;
1667 tmp = dev->next;
1668 init = dev->init;
1669 }
1670
1671
1672
1673
1674
1675 if ((dev->next == NULL) &&
1676 !((dev->base_addr == DE4X5_NDA) || (dev->base_addr == 0))){
1677 dev->next = (struct device *)kmalloc(sizeof(struct device) + 8,
1678 GFP_KERNEL);
1679
1680 dev = dev->next;
1681 if (dev == NULL) {
1682 printk("eth%d: Device not initialised, insufficient memory\n",
1683 num_eth);
1684 } else {
1685
1686
1687
1688
1689
1690 dev->name = (char *)(dev + sizeof(struct device));
1691 if (num_eth > 9999) {
1692 sprintf(dev->name,"eth????");
1693 } else {
1694 sprintf(dev->name,"eth%d", num_eth);
1695 }
1696 dev->base_addr = iobase;
1697 dev->next = NULL;
1698 dev->init = &de4x5_probe;
1699 num_de4x5s++;
1700 }
1701 }
1702 ret = dev;
1703
1704
1705
1706
1707
1708 if (ret != NULL) {
1709 if (addAutoProbe) {
1710 for (; (tmp->next!=NULL) && (tmp->base_addr!=DE4X5_NDA); tmp=tmp->next);
1711
1712
1713
1714
1715
1716 if ((tmp->next == NULL) && !(tmp->base_addr == DE4X5_NDA)) {
1717 tmp->next = (struct device *)kmalloc(sizeof(struct device) + 8,
1718 GFP_KERNEL);
1719 tmp = tmp->next;
1720 if (tmp == NULL) {
1721 printk("%s: Insufficient memory to extend the device list.\n",
1722 dev->name);
1723 } else {
1724
1725
1726
1727
1728
1729 tmp->name = (char *)(tmp + sizeof(struct device));
1730 if (num_eth > 9999) {
1731 sprintf(tmp->name,"eth????");
1732 } else {
1733 sprintf(tmp->name,"eth%d", num_eth);
1734 }
1735 tmp->base_addr = 0;
1736 tmp->next = NULL;
1737 tmp->init = init;
1738 }
1739 } else {
1740 tmp->base_addr = 0;
1741 }
1742 }
1743 }
1744 } else {
1745 ret = dev;
1746 }
1747
1748 return ret;
1749 }
1750
1751
1752
1753
1754
1755
1756
1757 static int autoconf_media(struct device *dev)
1758 {
1759 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1760 u_long iobase = dev->base_addr;
1761
1762 lp->tx_enable = YES;
1763 if (de4x5_debug > 0 ) {
1764 if (lp->chipset != DC21140) {
1765 printk("%s: Searching for media... ",dev->name);
1766 } else {
1767 printk("%s: Searching for mode... ",dev->name);
1768 }
1769 }
1770
1771 if (lp->chipset == DC21040) {
1772 lp->media = (lp->autosense == AUTO ? TP : lp->autosense);
1773 dc21040_autoconf(dev);
1774 } else if (lp->chipset == DC21041) {
1775 lp->media = (lp->autosense == AUTO ? TP_NW : lp->autosense);
1776 dc21041_autoconf(dev);
1777 } else if (lp->chipset == DC21140) {
1778 disable_ast(dev);
1779 lp->media = (lp->autosense == AUTO ? _10Mb : lp->autosense);
1780 dc21140_autoconf(dev);
1781 }
1782
1783 if (de4x5_debug > 0 ) {
1784 if (lp->chipset != DC21140) {
1785 printk("media is %s\n", (lp->media == NC ? "unconnected!" :
1786 (lp->media == TP ? "TP." :
1787 (lp->media == ANS ? "TP/Nway." :
1788 (lp->media == BNC ? "BNC." :
1789 (lp->media == AUI ? "AUI." :
1790 "BNC/AUI."
1791 ))))));
1792 } else {
1793 printk("mode is %s\n",(lp->media == NC ? "link down.":
1794 (lp->media == _100Mb ? "100Mb/s." :
1795 (lp->media == _10Mb ? "10Mb/s." :
1796 "\?\?\?"
1797 ))));
1798 }
1799 }
1800
1801 if (lp->media) {
1802 lp->lostMedia = 0;
1803 inl(DE4X5_MFC);
1804 if ((lp->media == TP) || (lp->media == ANS)) {
1805 lp->irq_mask |= IMR_LFM;
1806 }
1807 }
1808 dce_ms_delay(10);
1809
1810 return (lp->media);
1811 }
1812
1813 static void dc21040_autoconf(struct device *dev)
1814 {
1815 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1816 u_long iobase = dev->base_addr;
1817 int i, linkBad;
1818 s32 sisr = 0, t_3s = 3000;
1819
1820 switch (lp->media) {
1821 case TP:
1822 reset_init_sia(dev, 0x8f01, 0xffff, 0x0000);
1823 for (linkBad=1,i=0;(i<t_3s) && linkBad && !(sisr & SISR_NCR);i++) {
1824 if (((sisr = inl(DE4X5_SISR)) & SISR_LKF) == 0) linkBad = 0;
1825 dce_ms_delay(1);
1826 }
1827 if (linkBad && (lp->autosense == AUTO)) {
1828 lp->media = BNC_AUI;
1829 dc21040_autoconf(dev);
1830 }
1831 break;
1832
1833 case BNC:
1834 case AUI:
1835 case BNC_AUI:
1836 reset_init_sia(dev, 0x8f09, 0x0705, 0x0006);
1837 dce_ms_delay(500);
1838 linkBad = ping_media(dev);
1839 if (linkBad && (lp->autosense == AUTO)) {
1840 lp->media = NC;
1841 dc21040_autoconf(dev);
1842 }
1843 break;
1844
1845 case NC:
1846 #ifdef i386
1847 reset_init_sia(dev, 0x8f01, 0xffff, 0x0000);
1848 break;
1849 #else
1850
1851 reset_init_sia(dev, 0x8f09, 0x0705, 0x0006);
1852 #endif
1853 }
1854
1855 return;
1856 }
1857
1858
1859
1860
1861
1862
1863
1864 static void dc21041_autoconf(struct device *dev)
1865 {
1866 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1867 u_long iobase = dev->base_addr;
1868 s32 sts, irqs, irq_mask, omr;
1869
1870 switch (lp->media) {
1871 case TP_NW:
1872 omr = inl(DE4X5_OMR);
1873 outl(omr | OMR_FD, DE4X5_OMR);
1874 irqs = STS_LNF | STS_LNP;
1875 irq_mask = IMR_LFM | IMR_LPM;
1876 sts = test_media(dev, irqs, irq_mask, 0xef01, 0xffff, 0x0008, 2400);
1877 if (sts & STS_LNP) {
1878 lp->media = ANS;
1879 } else {
1880 lp->media = AUI;
1881 }
1882 dc21041_autoconf(dev);
1883 break;
1884
1885 case ANS:
1886 irqs = STS_LNP;
1887 irq_mask = IMR_LPM;
1888 sts = test_ans(dev, irqs, irq_mask, 3000);
1889 if (!(sts & STS_LNP) && (lp->autosense == AUTO)) {
1890 lp->media = TP;
1891 dc21041_autoconf(dev);
1892 }
1893 break;
1894
1895 case TP:
1896 omr = inl(DE4X5_OMR);
1897 outl(omr & ~OMR_FD, DE4X5_OMR);
1898 irqs = STS_LNF | STS_LNP;
1899 irq_mask = IMR_LFM | IMR_LPM;
1900 sts = test_media(dev, irqs, irq_mask, 0xef01, 0xff3f, 0x0008, 2400);
1901 if (!(sts & STS_LNP) && (lp->autosense == AUTO)) {
1902 if (inl(DE4X5_SISR) & SISR_NRA) {
1903 lp->media = AUI;
1904 } else {
1905 lp->media = BNC;
1906 }
1907 dc21041_autoconf(dev);
1908 }
1909 break;
1910
1911 case AUI:
1912 omr = inl(DE4X5_OMR);
1913 outl(omr & ~OMR_FD, DE4X5_OMR);
1914 irqs = 0;
1915 irq_mask = 0;
1916 sts = test_media(dev, irqs, irq_mask, 0xef09, 0xf7fd, 0x000e, 1000);
1917 if (!(inl(DE4X5_SISR) & SISR_SRA) && (lp->autosense == AUTO)) {
1918 lp->media = BNC;
1919 dc21041_autoconf(dev);
1920 }
1921 break;
1922
1923 case BNC:
1924 omr = inl(DE4X5_OMR);
1925 outl(omr & ~OMR_FD, DE4X5_OMR);
1926 irqs = 0;
1927 irq_mask = 0;
1928 sts = test_media(dev, irqs, irq_mask, 0xef09, 0xf7fd, 0x0006, 1000);
1929 if (!(inl(DE4X5_SISR) & SISR_SRA) && (lp->autosense == AUTO)) {
1930 lp->media = NC;
1931 } else {
1932 if (ping_media(dev)) lp->media = NC;
1933 }
1934 break;
1935
1936 case NC:
1937 omr = inl(DE4X5_OMR);
1938 outl(omr | OMR_FD, DE4X5_OMR);
1939 reset_init_sia(dev, 0xef01, 0xffff, 0x0008);
1940 break;
1941 }
1942
1943 return;
1944 }
1945
1946
1947
1948
1949 static void dc21140_autoconf(struct device *dev)
1950 {
1951 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1952 u_long iobase = dev->base_addr;
1953 s32 omr;
1954
1955 switch(lp->media) {
1956 case _100Mb:
1957 omr = (inl(DE4X5_OMR) & ~(OMR_PS | OMR_HBD | OMR_TTM | OMR_PCS | OMR_SCR));
1958 omr |= (de4x5_full_duplex ? OMR_FD : 0);
1959 outl(omr | OMR_PS | OMR_HBD | OMR_PCS | OMR_SCR, DE4X5_OMR);
1960 outl(GEP_FDXD | GEP_MODE, DE4X5_GEP);
1961 break;
1962
1963 case _10Mb:
1964 omr = (inl(DE4X5_OMR) & ~(OMR_PS | OMR_HBD | OMR_TTM | OMR_PCS | OMR_SCR));
1965 omr |= (de4x5_full_duplex ? OMR_FD : 0);
1966 outl(omr | OMR_TTM, DE4X5_OMR);
1967 outl(GEP_FDXD, DE4X5_GEP);
1968 break;
1969 }
1970
1971 return;
1972 }
1973
1974 static int
1975 test_media(struct device *dev, s32 irqs, s32 irq_mask, s32 csr13, s32 csr14, s32 csr15, s32 msec)
1976 {
1977 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1978 u_long iobase = dev->base_addr;
1979 s32 sts, time, csr12;
1980
1981 reset_init_sia(dev, csr13, csr14, csr15);
1982
1983
1984 load_ms_timer(dev, msec);
1985
1986
1987 sts = inl(DE4X5_STS);
1988 outl(sts, DE4X5_STS);
1989
1990
1991 csr12 = inl(DE4X5_SISR);
1992 outl(csr12, DE4X5_SISR);
1993
1994
1995 do {
1996 time = inl(DE4X5_GPT) & GPT_VAL;
1997 sts = inl(DE4X5_STS);
1998 } while ((time != 0) && !(sts & irqs));
1999
2000 sts = inl(DE4X5_STS);
2001
2002 return sts;
2003 }
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027 static int ping_media(struct device *dev)
2028 {
2029 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2030 u_long iobase = dev->base_addr;
2031 int i, entry, linkBad;
2032 s32 omr, t_3s = 4000;
2033 char frame[64];
2034
2035 create_packet(dev, frame, sizeof(frame));
2036
2037 entry = lp->tx_new;
2038 load_packet(dev, frame, TD_LS | TD_FS | sizeof(frame),NULL);
2039
2040 omr = inl(DE4X5_OMR);
2041 outl(omr|OMR_ST, DE4X5_OMR);
2042
2043 lp->tx_new = (++lp->tx_new) % lp->txRingSize;
2044 lp->tx_old = lp->tx_new;
2045
2046
2047 for (linkBad=1,i=0;(i<t_3s) && linkBad;i++) {
2048 if ((inl(DE4X5_SISR) & SISR_NCR) == 1) break;
2049 if (lp->tx_ring[entry].status >= 0) linkBad=0;
2050 dce_ms_delay(1);
2051 }
2052 outl(omr, DE4X5_OMR);
2053
2054 return ((linkBad || (lp->tx_ring[entry].status & TD_ES)) ? 1 : 0);
2055 }
2056
2057
2058
2059
2060
2061 static int test_ans(struct device *dev, s32 irqs, s32 irq_mask, s32 msec)
2062 {
2063 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2064 u_long iobase = dev->base_addr;
2065 s32 sts, ans;
2066
2067 outl(irq_mask, DE4X5_IMR);
2068
2069
2070 load_ms_timer(dev, msec);
2071
2072
2073 sts = inl(DE4X5_STS);
2074 outl(sts, DE4X5_STS);
2075
2076
2077 do {
2078 ans = inl(DE4X5_SISR) & SISR_ANS;
2079 sts = inl(DE4X5_STS);
2080 } while (!(sts & irqs) && (ans ^ ANS_NWOK) != 0);
2081
2082 return ((sts & STS_LNP) && ((ans ^ ANS_NWOK) == 0) ? STS_LNP : 0);
2083 }
2084
2085
2086
2087
2088 static void reset_init_sia(struct device *dev, s32 sicr, s32 strr, s32 sigr)
2089 {
2090 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2091 u_long iobase = dev->base_addr;
2092
2093 RESET_SIA;
2094 outl(sigr, DE4X5_SIGR);
2095 outl(strr, DE4X5_STRR);
2096 outl(sicr, DE4X5_SICR);
2097
2098 return;
2099 }
2100
2101
2102
2103
2104 static void load_ms_timer(struct device *dev, u32 msec)
2105 {
2106 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2107 u_long iobase = dev->base_addr;
2108 s32 i = 2048, j;
2109
2110 if (lp->chipset == DC21140) {
2111 j = inl(DE4X5_OMR);
2112 if ((j & OMR_TTM) && (j & OMR_PS)) {
2113 i = 8192;
2114 } else if ((~j & OMR_TTM) && (j & OMR_PS)) {
2115 i = 819;
2116 }
2117 }
2118
2119 outl((s32)(msec * 10000)/i, DE4X5_GPT);
2120
2121 return;
2122 }
2123
2124
2125
2126
2127 static void create_packet(struct device *dev, char *frame, int len)
2128 {
2129 int i;
2130 char *buf = frame;
2131
2132 for (i=0; i<ETH_ALEN; i++) {
2133 *buf++ = dev->dev_addr[i];
2134 }
2135 for (i=0; i<ETH_ALEN; i++) {
2136 *buf++ = dev->dev_addr[i];
2137 }
2138
2139 *buf++ = 0;
2140 *buf++ = 1;
2141
2142 return;
2143 }
2144
2145
2146
2147
2148 static void dce_us_delay(u32 usec)
2149 {
2150 udelay(usec);
2151
2152 return;
2153 }
2154
2155
2156
2157
2158 static void dce_ms_delay(u32 msec)
2159 {
2160 u_int i;
2161
2162 for (i=0; i<msec; i++) {
2163 dce_us_delay(1000);
2164 }
2165
2166 return;
2167 }
2168
2169
2170
2171
2172
2173 static int EISA_signature(char *name, s32 eisa_id)
2174 {
2175 u_int i;
2176 const char *signatures[] = DE4X5_SIGNATURE;
2177 char ManCode[DE4X5_STRLEN];
2178 union {
2179 s32 ID;
2180 char Id[4];
2181 } Eisa;
2182 int status = 0;
2183
2184 *name = '\0';
2185 Eisa.ID = inl(eisa_id);
2186
2187 ManCode[0]=(((Eisa.Id[0]>>2)&0x1f)+0x40);
2188 ManCode[1]=(((Eisa.Id[1]&0xe0)>>5)+((Eisa.Id[0]&0x03)<<3)+0x40);
2189 ManCode[2]=(((Eisa.Id[2]>>4)&0x0f)+0x30);
2190 ManCode[3]=((Eisa.Id[2]&0x0f)+0x30);
2191 ManCode[4]=(((Eisa.Id[3]>>4)&0x0f)+0x30);
2192 ManCode[5]='\0';
2193
2194 for (i=0;(*signatures[i] != '\0') && (*name == '\0');i++) {
2195 if (strstr(ManCode, signatures[i]) != NULL) {
2196 strcpy(name,ManCode);
2197 status = 1;
2198 }
2199 }
2200
2201 return status;
2202 }
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216 static int DevicePresent(u_long aprom_addr)
2217 {
2218 union {
2219 struct {
2220 u32 a;
2221 u32 b;
2222 } llsig;
2223 char Sig[sizeof(u32) << 1];
2224 } dev;
2225 char data;
2226 int i, j, tmp, status = 0;
2227 short sigLength;
2228 struct bus_type *lp = &bus;
2229
2230 dev.llsig.a = ETH_PROM_SIG;
2231 dev.llsig.b = ETH_PROM_SIG;
2232 sigLength = sizeof(u32) << 1;
2233
2234 if (lp->chipset == DC21040) {
2235 for (i=0,j=0;(j<sigLength) && (i<PROBE_LENGTH+sigLength-1);i++) {
2236 if (lp->bus == PCI) {
2237 while ((tmp = inl(aprom_addr)) < 0);
2238 data = (char)tmp;
2239 } else {
2240 data = inb(aprom_addr);
2241 }
2242 if (dev.Sig[j] == data) {
2243 j++;
2244 } else {
2245 if (data == dev.Sig[0]) {
2246 j=1;
2247 } else {
2248 j=0;
2249 }
2250 }
2251 }
2252
2253 if (j!=sigLength) {
2254 status = -ENODEV;
2255 }
2256
2257 } else {
2258 short *p = (short *)&lp->srom;
2259 for (i=0; i<(sizeof(struct de4x5_srom)>>1); i++) {
2260 *p++ = srom_rd(aprom_addr, i);
2261 }
2262 }
2263
2264 return status;
2265 }
2266
2267 static int get_hw_addr(struct device *dev)
2268 {
2269 u_long iobase = dev->base_addr;
2270 int i, k, tmp, status = 0;
2271 u_short j,chksum;
2272 struct bus_type *lp = &bus;
2273
2274 for (i=0,k=0,j=0;j<3;j++) {
2275 k <<= 1 ;
2276 if (k > 0xffff) k-=0xffff;
2277
2278 if (lp->bus == PCI) {
2279 if (lp->chipset == DC21040) {
2280 while ((tmp = inl(DE4X5_APROM)) < 0);
2281 k += (u_char) tmp;
2282 dev->dev_addr[i++] = (u_char) tmp;
2283 while ((tmp = inl(DE4X5_APROM)) < 0);
2284 k += (u_short) (tmp << 8);
2285 dev->dev_addr[i++] = (u_char) tmp;
2286 } else {
2287 dev->dev_addr[i] = (u_char) lp->srom.ieee_addr[i]; i++;
2288 dev->dev_addr[i] = (u_char) lp->srom.ieee_addr[i]; i++;
2289 }
2290 } else {
2291 k += (u_char) (tmp = inb(EISA_APROM));
2292 dev->dev_addr[i++] = (u_char) tmp;
2293 k += (u_short) ((tmp = inb(EISA_APROM)) << 8);
2294 dev->dev_addr[i++] = (u_char) tmp;
2295 }
2296
2297 if (k > 0xffff) k-=0xffff;
2298 }
2299 if (k == 0xffff) k=0;
2300
2301 if (lp->bus == PCI) {
2302 if (lp->chipset == DC21040) {
2303 while ((tmp = inl(DE4X5_APROM)) < 0);
2304 chksum = (u_char) tmp;
2305 while ((tmp = inl(DE4X5_APROM)) < 0);
2306 chksum |= (u_short) (tmp << 8);
2307 if (k != chksum) status = -1;
2308 }
2309 } else {
2310 chksum = (u_char) inb(EISA_APROM);
2311 chksum |= (u_short) (inb(EISA_APROM) << 8);
2312 if (k != chksum) status = -1;
2313 }
2314
2315
2316 return status;
2317 }
2318
2319
2320
2321
2322 static short srom_rd(u_long addr, u_char offset)
2323 {
2324 sendto_srom(SROM_RD | SROM_SR, addr);
2325
2326 srom_latch(SROM_RD | SROM_SR | DT_CS, addr);
2327 srom_command(SROM_RD | SROM_SR | DT_IN | DT_CS, addr);
2328 srom_address(SROM_RD | SROM_SR | DT_CS, addr, offset);
2329
2330 return srom_data(SROM_RD | SROM_SR | DT_CS, addr);
2331 }
2332
2333 static void srom_latch(u_int command, u_long addr)
2334 {
2335 sendto_srom(command, addr);
2336 sendto_srom(command | DT_CLK, addr);
2337 sendto_srom(command, addr);
2338
2339 return;
2340 }
2341
2342 static void srom_command(u_int command, u_long addr)
2343 {
2344 srom_latch(command, addr);
2345 srom_latch(command, addr);
2346 srom_latch((command & 0x0000ff00) | DT_CS, addr);
2347
2348 return;
2349 }
2350
2351 static void srom_address(u_int command, u_long addr, u_char offset)
2352 {
2353 int i;
2354 char a;
2355
2356 a = (char)(offset << 2);
2357 for (i=0; i<6; i++, a <<= 1) {
2358 srom_latch(command | ((a < 0) ? DT_IN : 0), addr);
2359 }
2360 dce_us_delay(1);
2361
2362 i = (getfrom_srom(addr) >> 3) & 0x01;
2363 if (i != 0) {
2364 printk("Bad SROM address phase.....\n");
2365
2366 }
2367
2368 return;
2369 }
2370
2371 static short srom_data(u_int command, u_long addr)
2372 {
2373 int i;
2374 short word = 0;
2375 s32 tmp;
2376
2377 for (i=0; i<16; i++) {
2378 sendto_srom(command | DT_CLK, addr);
2379 tmp = getfrom_srom(addr);
2380 sendto_srom(command, addr);
2381
2382 word = (word << 1) | ((tmp >> 3) & 0x01);
2383 }
2384
2385 sendto_srom(command & 0x0000ff00, addr);
2386
2387 return word;
2388 }
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405 static void sendto_srom(u_int command, u_long addr)
2406 {
2407 outl(command, addr);
2408 dce_us_delay(1);
2409
2410 return;
2411 }
2412
2413 static int getfrom_srom(u_long addr)
2414 {
2415 s32 tmp;
2416
2417 tmp = inl(addr);
2418 dce_us_delay(1);
2419
2420 return tmp;
2421 }
2422
2423 static char *build_setup_frame(struct device *dev, int mode)
2424 {
2425 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2426 int i;
2427 char *pa = lp->setup_frame;
2428
2429
2430 if (mode == ALL) {
2431 memset(lp->setup_frame, 0, SETUP_FRAME_LEN);
2432 }
2433
2434 if (lp->setup_f == HASH_PERF) {
2435 for (pa=lp->setup_frame+IMPERF_PA_OFFSET, i=0; i<ETH_ALEN; i++) {
2436 *(pa + i) = dev->dev_addr[i];
2437 if (i & 0x01) pa += 2;
2438 }
2439 *(lp->setup_frame + (HASH_TABLE_LEN >> 3) - 3) = 0x80;
2440 } else {
2441 for (i=0; i<ETH_ALEN; i++) {
2442 *(pa + (i&1)) = dev->dev_addr[i];
2443 if (i & 0x01) pa += 4;
2444 }
2445 for (i=0; i<ETH_ALEN; i++) {
2446 *(pa + (i&1)) = (char) 0xff;
2447 if (i & 0x01) pa += 4;
2448 }
2449 }
2450
2451 return pa;
2452 }
2453
2454 static void enable_ast(struct device *dev, u32 time_out)
2455 {
2456 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2457 u_long iobase = dev->base_addr;
2458
2459 lp->irq_mask |= IMR_TMM;
2460 outl(lp->irq_mask, DE4X5_IMR);
2461 load_ms_timer(dev, time_out);
2462
2463 return;
2464 }
2465
2466 static void disable_ast(struct device *dev)
2467 {
2468 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2469 u_long iobase = dev->base_addr;
2470
2471 lp->irq_mask &= ~IMR_TMM;
2472 outl(lp->irq_mask, DE4X5_IMR);
2473 load_ms_timer(dev, 0);
2474
2475 return;
2476 }
2477
2478 static void kick_tx(struct device *dev)
2479 {
2480 struct sk_buff *skb;
2481
2482 if ((skb = alloc_skb(0, GFP_ATOMIC)) != NULL) {
2483 skb->len= FAKE_FRAME_LEN;
2484 skb->arp=1;
2485 skb->dev=dev;
2486 dev_queue_xmit(skb, dev, SOPRI_NORMAL);
2487 }
2488
2489 return;
2490 }
2491
2492
2493
2494
2495
2496 static int de4x5_ioctl(struct device *dev, struct ifreq *rq, int cmd)
2497 {
2498 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2499 struct de4x5_ioctl *ioc = (struct de4x5_ioctl *) &rq->ifr_data;
2500 u_long iobase = dev->base_addr;
2501 int i, j, status = 0;
2502 s32 omr;
2503 union {
2504 u8 addr[(HASH_TABLE_LEN * ETH_ALEN)];
2505 u16 sval[(HASH_TABLE_LEN * ETH_ALEN) >> 1];
2506 u32 lval[(HASH_TABLE_LEN * ETH_ALEN) >> 2];
2507 } tmp;
2508
2509 switch(ioc->cmd) {
2510 case DE4X5_GET_HWADDR:
2511 ioc->len = ETH_ALEN;
2512 status = verify_area(VERIFY_WRITE, (void *)ioc->data, ioc->len);
2513 if (status)
2514 break;
2515 for (i=0; i<ETH_ALEN; i++) {
2516 tmp.addr[i] = dev->dev_addr[i];
2517 }
2518 memcpy_tofs(ioc->data, tmp.addr, ioc->len);
2519
2520 break;
2521 case DE4X5_SET_HWADDR:
2522 status = verify_area(VERIFY_READ, (void *)ioc->data, ETH_ALEN);
2523 if (status)
2524 break;
2525 status = -EPERM;
2526 if (!suser())
2527 break;
2528 status = 0;
2529 memcpy_fromfs(tmp.addr, ioc->data, ETH_ALEN);
2530 for (i=0; i<ETH_ALEN; i++) {
2531 dev->dev_addr[i] = tmp.addr[i];
2532 }
2533 build_setup_frame(dev, PHYS_ADDR_ONLY);
2534
2535 while (set_bit(0, (void *)&dev->tbusy) != 0);
2536 if (lp->setup_f == HASH_PERF) {
2537 load_packet(dev, lp->setup_frame, TD_IC | HASH_F | TD_SET |
2538 SETUP_FRAME_LEN, NULL);
2539 } else {
2540 load_packet(dev, lp->setup_frame, TD_IC | PERFECT_F | TD_SET |
2541 SETUP_FRAME_LEN, NULL);
2542 }
2543 lp->tx_new = (++lp->tx_new) % lp->txRingSize;
2544 outl(POLL_DEMAND, DE4X5_TPD);
2545 dev->tbusy = 0;
2546
2547 break;
2548 case DE4X5_SET_PROM:
2549 if (suser()) {
2550 omr = inl(DE4X5_OMR);
2551 omr |= OMR_PR;
2552 outl(omr, DE4X5_OMR);
2553 } else {
2554 status = -EPERM;
2555 }
2556
2557 break;
2558 case DE4X5_CLR_PROM:
2559 if (suser()) {
2560 omr = inl(DE4X5_OMR);
2561 omr &= ~OMR_PR;
2562 outb(omr, DE4X5_OMR);
2563 } else {
2564 status = -EPERM;
2565 }
2566
2567 break;
2568 case DE4X5_SAY_BOO:
2569 printk("%s: Boo!\n", dev->name);
2570
2571 break;
2572 case DE4X5_GET_MCA:
2573 ioc->len = (HASH_TABLE_LEN >> 3);
2574 status = verify_area(VERIFY_WRITE, ioc->data, ioc->len);
2575 if (status)
2576 break;
2577 memcpy_tofs(ioc->data, lp->setup_frame, ioc->len);
2578
2579 break;
2580 case DE4X5_SET_MCA:
2581 if (suser()) {
2582 if (ioc->len != HASH_TABLE_LEN) {
2583 if (!(status = verify_area(VERIFY_READ, (void *)ioc->data, ETH_ALEN * ioc->len))) {
2584 memcpy_fromfs(tmp.addr, ioc->data, ETH_ALEN * ioc->len);
2585 set_multicast_list(dev, ioc->len, tmp.addr);
2586 }
2587 } else {
2588 set_multicast_list(dev, ioc->len, NULL);
2589 }
2590 } else {
2591 status = -EPERM;
2592 }
2593
2594 break;
2595 case DE4X5_CLR_MCA:
2596 if (suser()) {
2597 set_multicast_list(dev, 0, NULL);
2598 } else {
2599 status = -EPERM;
2600 }
2601
2602 break;
2603 case DE4X5_MCA_EN:
2604 if (suser()) {
2605 omr = inl(DE4X5_OMR);
2606 omr |= OMR_PM;
2607 outl(omr, DE4X5_OMR);
2608 } else {
2609 status = -EPERM;
2610 }
2611
2612 break;
2613 case DE4X5_GET_STATS:
2614 ioc->len = sizeof(lp->pktStats);
2615 status = verify_area(VERIFY_WRITE, (void *)ioc->data, ioc->len);
2616 if (status)
2617 break;
2618
2619 cli();
2620 memcpy_tofs(ioc->data, &lp->pktStats, ioc->len);
2621 sti();
2622
2623 break;
2624 case DE4X5_CLR_STATS:
2625 if (suser()) {
2626 cli();
2627 memset(&lp->pktStats, 0, sizeof(lp->pktStats));
2628 sti();
2629 } else {
2630 status = -EPERM;
2631 }
2632
2633 break;
2634 case DE4X5_GET_OMR:
2635 tmp.addr[0] = inl(DE4X5_OMR);
2636 if (!(status = verify_area(VERIFY_WRITE, (void *)ioc->data, 1))) {
2637 memcpy_tofs(ioc->data, tmp.addr, 1);
2638 }
2639
2640 break;
2641 case DE4X5_SET_OMR:
2642 if (suser()) {
2643 if (!(status = verify_area(VERIFY_READ, (void *)ioc->data, 1))) {
2644 memcpy_fromfs(tmp.addr, ioc->data, 1);
2645 outl(tmp.addr[0], DE4X5_OMR);
2646 }
2647 } else {
2648 status = -EPERM;
2649 }
2650
2651 break;
2652 case DE4X5_GET_REG:
2653 j = 0;
2654 tmp.lval[0] = inl(DE4X5_STS); j+=4;
2655 tmp.lval[1] = inl(DE4X5_BMR); j+=4;
2656 tmp.lval[2] = inl(DE4X5_IMR); j+=4;
2657 tmp.lval[3] = inl(DE4X5_OMR); j+=4;
2658 tmp.lval[4] = inl(DE4X5_SISR); j+=4;
2659 tmp.lval[5] = inl(DE4X5_SICR); j+=4;
2660 tmp.lval[6] = inl(DE4X5_STRR); j+=4;
2661 tmp.lval[7] = inl(DE4X5_SIGR); j+=4;
2662 ioc->len = j;
2663 if (!(status = verify_area(VERIFY_WRITE, (void *)ioc->data, ioc->len))) {
2664 memcpy_tofs(ioc->data, tmp.addr, ioc->len);
2665 }
2666 break;
2667
2668 #define DE4X5_DUMP 0x0f
2669
2670 case DE4X5_DUMP:
2671 j = 0;
2672 tmp.addr[j++] = dev->irq;
2673 for (i=0; i<ETH_ALEN; i++) {
2674 tmp.addr[j++] = dev->dev_addr[i];
2675 }
2676 tmp.addr[j++] = lp->rxRingSize;
2677 tmp.lval[j>>2] = (long)lp->rx_ring; j+=4;
2678 tmp.lval[j>>2] = (long)lp->tx_ring; j+=4;
2679
2680 for (i=0;i<lp->rxRingSize-1;i++){
2681 if (i < 3) {
2682 tmp.lval[j>>2] = (long)&lp->rx_ring[i].status; j+=4;
2683 }
2684 }
2685 tmp.lval[j>>2] = (long)&lp->rx_ring[i].status; j+=4;
2686 for (i=0;i<lp->txRingSize-1;i++){
2687 if (i < 3) {
2688 tmp.lval[j>>2] = (long)&lp->tx_ring[i].status; j+=4;
2689 }
2690 }
2691 tmp.lval[j>>2] = (long)&lp->tx_ring[i].status; j+=4;
2692
2693 for (i=0;i<lp->rxRingSize-1;i++){
2694 if (i < 3) {
2695 tmp.lval[j>>2] = (s32)lp->rx_ring[i].buf; j+=4;
2696 }
2697 }
2698 tmp.lval[j>>2] = (s32)lp->rx_ring[i].buf; j+=4;
2699 for (i=0;i<lp->txRingSize-1;i++){
2700 if (i < 3) {
2701 tmp.lval[j>>2] = (s32)lp->tx_ring[i].buf; j+=4;
2702 }
2703 }
2704 tmp.lval[j>>2] = (s32)lp->tx_ring[i].buf; j+=4;
2705
2706 for (i=0;i<lp->rxRingSize;i++){
2707 tmp.lval[j>>2] = lp->rx_ring[i].status; j+=4;
2708 }
2709 for (i=0;i<lp->txRingSize;i++){
2710 tmp.lval[j>>2] = lp->tx_ring[i].status; j+=4;
2711 }
2712
2713 tmp.lval[j>>2] = inl(DE4X5_STS); j+=4;
2714 tmp.lval[j>>2] = inl(DE4X5_BMR); j+=4;
2715 tmp.lval[j>>2] = inl(DE4X5_IMR); j+=4;
2716 tmp.lval[j>>2] = inl(DE4X5_OMR); j+=4;
2717 tmp.lval[j>>2] = inl(DE4X5_SISR); j+=4;
2718 tmp.lval[j>>2] = inl(DE4X5_SICR); j+=4;
2719 tmp.lval[j>>2] = inl(DE4X5_STRR); j+=4;
2720 tmp.lval[j>>2] = inl(DE4X5_SIGR); j+=4;
2721
2722 tmp.addr[j++] = lp->txRingSize;
2723 tmp.addr[j++] = dev->tbusy;
2724
2725 ioc->len = j;
2726 if (!(status = verify_area(VERIFY_WRITE, (void *)ioc->data, ioc->len))) {
2727 memcpy_tofs(ioc->data, tmp.addr, ioc->len);
2728 }
2729
2730 break;
2731 default:
2732 status = -EOPNOTSUPP;
2733 }
2734
2735 return status;
2736 }
2737
2738 #ifdef MODULE
2739 char kernel_version[] = UTS_RELEASE;
2740 static char devicename[9] = { 0, };
2741 static struct device thisDE4X5 = {
2742 devicename,
2743 0, 0, 0, 0,
2744 0x2000, 10,
2745 0, 0, 0, NULL, de4x5_probe };
2746
2747 static int io=0x000b;
2748 static int irq=10;
2749
2750 int
2751 init_module(void)
2752 {
2753 thisDE4X5.base_addr=io;
2754 thisDE4X5.irq=irq;
2755 if (register_netdev(&thisDE4X5) != 0)
2756 return -EIO;
2757 return 0;
2758 }
2759
2760 void
2761 cleanup_module(void)
2762 {
2763 struct de4x5_private *lp = (struct de4x5_private *) thisDE4X5.priv;
2764
2765 if (MOD_IN_USE) {
2766 printk("%s: device busy, remove delayed\n",thisDE4X5.name);
2767 } else {
2768 if (lp) {
2769 kfree_s(bus_to_virt(lp->rx_ring[0].buf), RX_BUFF_SZ * NUM_RX_DESC + ALIGN);
2770 }
2771 kfree_s(thisDE4X5.priv, sizeof(struct de4x5_private) + ALIGN);
2772 thisDE4X5.priv = NULL;
2773
2774 release_region(thisDE4X5.base_addr, (lp->bus == PCI ?
2775 DE4X5_PCI_TOTAL_SIZE :
2776 DE4X5_EISA_TOTAL_SIZE));
2777 unregister_netdev(&thisDE4X5);
2778 }
2779 }
2780 #endif
2781
2782
2783
2784
2785
2786
2787
2788
2789
2790
2791