This source file includes following definitions.
- de4x5_probe
- de4x5_hw_init
- de4x5_open
- de4x5_init
- de4x5_queue_pkt
- de4x5_interrupt
- de4x5_rx
- de4x5_tx
- de4x5_ast
- de4x5_close
- de4x5_get_stats
- load_packet
- set_multicast_list
- SetMulticastFilter
- eisa_probe
- pci_probe
- alloc_device
- autoconf_media
- dc21040_autoconf
- dc21041_autoconf
- dc21140_autoconf
- test_media
- ping_media
- test_ans
- reset_init_sia
- load_ms_timer
- create_packet
- dce_us_delay
- dce_ms_delay
- EISA_signature
- DevicePresent
- get_hw_addr
- srom_rd
- srom_latch
- srom_command
- srom_address
- srom_data
- sendto_srom
- getfrom_srom
- build_setup_frame
- enable_ast
- disable_ast
- kick_tx
- de4x5_ioctl
- init_module
- cleanup_module
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144 static const char *version = "de4x5.c:v0.32 6/26/95 davies@wanton.lkg.dec.com\n";
145
146 #include <linux/module.h>
147
148 #include <linux/kernel.h>
149 #include <linux/sched.h>
150 #include <linux/string.h>
151 #include <linux/interrupt.h>
152 #include <linux/ptrace.h>
153 #include <linux/errno.h>
154 #include <linux/ioport.h>
155 #include <linux/malloc.h>
156 #include <linux/bios32.h>
157 #include <linux/pci.h>
158 #include <linux/delay.h>
159 #include <asm/bitops.h>
160 #include <asm/io.h>
161 #include <asm/dma.h>
162 #include <asm/segment.h>
163
164 #include <linux/netdevice.h>
165 #include <linux/etherdevice.h>
166 #include <linux/skbuff.h>
167
168 #include <linux/time.h>
169 #include <linux/types.h>
170 #include <linux/unistd.h>
171
172 #include "de4x5.h"
173
174 #ifdef DE4X5_DEBUG
175 static int de4x5_debug = DE4X5_DEBUG;
176 #else
177 static int de4x5_debug = 1;
178 #endif
179
180 #ifdef DE4X5_AUTOSENSE
181 static int de4x5_autosense = DE4X5_AUTOSENSE;
182 #else
183 static int de4x5_autosense = AUTO;
184 #endif
185
186 #ifdef DE4X5_FULL_DUPLEX
187 static s32 de4x5_full_duplex = 1;
188 #else
189 static s32 de4x5_full_duplex = 0;
190 #endif
191
192 #define DE4X5_NDA 0xffe0
193
194
195
196
197 #define PROBE_LENGTH 32
198 #define ETH_PROM_SIG 0xAA5500FFUL
199
200
201
202
203 #define PKT_BUF_SZ 1536
204 #define MAX_PKT_SZ 1514
205 #define MAX_DAT_SZ 1500
206 #define MIN_DAT_SZ 1
207 #define PKT_HDR_LEN 14
208 #define FAKE_FRAME_LEN (MAX_PKT_SZ + 1)
209 #define QUEUE_PKT_TIMEOUT (3*HZ)
210
211
212 #define CRC_POLYNOMIAL_BE 0x04c11db7UL
213 #define CRC_POLYNOMIAL_LE 0xedb88320UL
214
215
216
217
218 #define DE4X5_EISA_IO_PORTS 0x0c00
219 #define DE4X5_EISA_TOTAL_SIZE 0xfff
220
221 #define MAX_EISA_SLOTS 16
222 #define EISA_SLOT_INC 0x1000
223
224 #define DE4X5_SIGNATURE {"DE425",""}
225 #define DE4X5_NAME_LENGTH 8
226
227
228
229
230 #define PCI_MAX_BUS_NUM 8
231 #define DE4X5_PCI_TOTAL_SIZE 0x80
232 #define DE4X5_CLASS_CODE 0x00020000
233
234
235
236
237
238
239
240 #define ALIGN4 ((u_long)4 - 1)
241 #define ALIGN8 ((u_long)8 - 1)
242 #define ALIGN16 ((u_long)16 - 1)
243 #define ALIGN32 ((u_long)32 - 1)
244 #define ALIGN64 ((u_long)64 - 1)
245 #define ALIGN128 ((u_long)128 - 1)
246
247 #define ALIGN ALIGN32
248 #define CACHE_ALIGN CAL_16LONG
249 #define DESC_SKIP_LEN DSL_0
250
251 #define DESC_ALIGN
252
253 #ifndef IS_NOT_DEC
254 static int is_not_dec = 0;
255 #else
256 static int is_not_dec = 1;
257 #endif
258
259
260
261
262 #define ENABLE_IRQs { \
263 imr |= lp->irq_en;\
264 outl(imr, DE4X5_IMR); \
265 }
266
267 #define DISABLE_IRQs {\
268 imr = inl(DE4X5_IMR);\
269 imr &= ~lp->irq_en;\
270 outl(imr, DE4X5_IMR); \
271 }
272
273 #define UNMASK_IRQs {\
274 imr |= lp->irq_mask;\
275 outl(imr, DE4X5_IMR); \
276 }
277
278 #define MASK_IRQs {\
279 imr = inl(DE4X5_IMR);\
280 imr &= ~lp->irq_mask;\
281 outl(imr, DE4X5_IMR); \
282 }
283
284
285
286
287 #define START_DE4X5 {\
288 omr = inl(DE4X5_OMR);\
289 omr |= OMR_ST | OMR_SR;\
290 outl(omr, DE4X5_OMR); \
291 }
292
293 #define STOP_DE4X5 {\
294 omr = inl(DE4X5_OMR);\
295 omr &= ~(OMR_ST|OMR_SR);\
296 outl(omr, DE4X5_OMR); \
297 }
298
299
300
301
302 #define RESET_SIA outl(0, DE4X5_SICR);
303
304
305
306
307 #define DE4X5_AUTOSENSE_MS 250
308
309
310
311
312 struct de4x5_srom {
313 char reserved[18];
314 char version;
315 char num_adapters;
316 char ieee_addr[6];
317 char info[100];
318 short chksum;
319 };
320
321
322
323
324
325
326
327
328
329 #define NUM_RX_DESC 8
330 #define NUM_TX_DESC 32
331 #define BUFF_ALLOC_RETRIES 10
332 #define RX_BUFF_SZ 1536
333
334 struct de4x5_desc {
335 volatile s32 status;
336 u32 des1;
337 u32 buf;
338 u32 next;
339 DESC_ALIGN
340 };
341
342
343
344
345 #define DE4X5_PKT_STAT_SZ 16
346 #define DE4X5_PKT_BIN_SZ 128
347
348
349 struct de4x5_private {
350 char adapter_name[80];
351 struct de4x5_desc rx_ring[NUM_RX_DESC];
352 struct de4x5_desc tx_ring[NUM_TX_DESC];
353 struct sk_buff *skb[NUM_TX_DESC];
354 int rx_new, rx_old;
355 int tx_new, tx_old;
356 char setup_frame[SETUP_FRAME_LEN];
357 struct enet_statistics stats;
358 struct {
359 u_int bins[DE4X5_PKT_STAT_SZ];
360 u_int unicast;
361 u_int multicast;
362 u_int broadcast;
363 u_int excessive_collisions;
364 u_int tx_underruns;
365 u_int excessive_underruns;
366 } pktStats;
367 char rxRingSize;
368 char txRingSize;
369 int bus;
370 int bus_num;
371 int chipset;
372 s32 irq_mask;
373 s32 irq_en;
374 int media;
375 int linkProb;
376 int autosense;
377 int tx_enable;
378 int lostMedia;
379 int setup_f;
380 };
381
382
383
384
385
386
387
388
389
390 #define TX_BUFFS_AVAIL ((lp->tx_old<=lp->tx_new)?\
391 lp->tx_old+lp->txRingSize-lp->tx_new-1:\
392 lp->tx_old -lp->tx_new-1)
393
394
395
396
397 static int de4x5_open(struct device *dev);
398 static int de4x5_queue_pkt(struct sk_buff *skb, struct device *dev);
399 static void de4x5_interrupt(int irq, struct pt_regs *regs);
400 static int de4x5_close(struct device *dev);
401 static struct enet_statistics *de4x5_get_stats(struct device *dev);
402 static void set_multicast_list(struct device *dev);
403 static int de4x5_ioctl(struct device *dev, struct ifreq *rq, int cmd);
404
405
406
407
408 static int de4x5_hw_init(struct device *dev, u_long iobase);
409 static int de4x5_init(struct device *dev);
410 static int de4x5_rx(struct device *dev);
411 static int de4x5_tx(struct device *dev);
412 static int de4x5_ast(struct device *dev);
413
414 static int autoconf_media(struct device *dev);
415 static void create_packet(struct device *dev, char *frame, int len);
416 static void dce_us_delay(u32 usec);
417 static void dce_ms_delay(u32 msec);
418 static void load_packet(struct device *dev, char *buf, u32 flags, struct sk_buff *skb);
419 static void dc21040_autoconf(struct device *dev);
420 static void dc21041_autoconf(struct device *dev);
421 static void dc21140_autoconf(struct device *dev);
422 static int test_media(struct device *dev, s32 irqs, s32 irq_mask, s32 csr13, s32 csr14, s32 csr15, s32 msec);
423
424 static int ping_media(struct device *dev);
425 static void reset_init_sia(struct device *dev, s32 sicr, s32 strr, s32 sigr);
426 static int test_ans(struct device *dev, s32 irqs, s32 irq_mask, s32 msec);
427 static void load_ms_timer(struct device *dev, u32 msec);
428 static int EISA_signature(char *name, s32 eisa_id);
429 static int DevicePresent(u_long iobase);
430 static short srom_rd(u_long address, u_char offset);
431 static void srom_latch(u_int command, u_long address);
432 static void srom_command(u_int command, u_long address);
433 static void srom_address(u_int command, u_long address, u_char offset);
434 static short srom_data(u_int command, u_long address);
435
436 static void sendto_srom(u_int command, u_long addr);
437 static int getfrom_srom(u_long addr);
438 static void SetMulticastFilter(struct device *dev);
439 static int get_hw_addr(struct device *dev);
440
441 static void eisa_probe(struct device *dev, u_long iobase);
442 static void pci_probe(struct device *dev, u_long iobase);
443 static struct device *alloc_device(struct device *dev, u_long iobase);
444 static char *build_setup_frame(struct device *dev, int mode);
445 static void disable_ast(struct device *dev);
446 static void enable_ast(struct device *dev, u32 time_out);
447 static void kick_tx(struct device *dev);
448
449 #ifdef MODULE
450 int init_module(void);
451 void cleanup_module(void);
452 static int autoprobed = 1, loading_module = 1;
453 # else
454 static unsigned char de4x5_irq[] = {5,9,10,11};
455 static int autoprobed = 0, loading_module = 0;
456 #endif
457
458 static char name[DE4X5_NAME_LENGTH + 1];
459 static int num_de4x5s = 0, num_eth = 0;
460
461
462
463
464
465
466 static struct bus_type {
467 int bus;
468 int bus_num;
469 int device;
470 int chipset;
471 struct de4x5_srom srom;
472 int autosense;
473 } bus;
474
475
476
477
478 #define RESET_DE4X5 {\
479 int i;\
480 i=inl(DE4X5_BMR);\
481 dce_ms_delay(1);\
482 outl(i | BMR_SWR, DE4X5_BMR);\
483 dce_ms_delay(1);\
484 outl(i, DE4X5_BMR);\
485 dce_ms_delay(1);\
486 for (i=0;i<5;i++) {inl(DE4X5_BMR); dce_ms_delay(1);}\
487 dce_ms_delay(1);\
488 }
489
490
491
492 int de4x5_probe(struct device *dev)
493 {
494 int tmp = num_de4x5s, status = -ENODEV;
495 u_long iobase = dev->base_addr;
496
497 if ((iobase == 0) && loading_module){
498 printk("Autoprobing is not supported when loading a module based driver.\n");
499 status = -EIO;
500 } else {
501 eisa_probe(dev, iobase);
502 pci_probe(dev, iobase);
503
504 if ((tmp == num_de4x5s) && (iobase != 0) && loading_module) {
505 printk("%s: de4x5_probe() cannot find device at 0x%04lx.\n", dev->name,
506 iobase);
507 }
508
509
510
511
512
513 for (; (dev->priv == NULL) && (dev->next != NULL); dev = dev->next);
514
515 if (dev->priv) status = 0;
516 if (iobase == 0) autoprobed = 1;
517 }
518
519 return status;
520 }
521
522 static int
523 de4x5_hw_init(struct device *dev, u_long iobase)
524 {
525 struct bus_type *lp = &bus;
526 int tmpbus, tmpchs, i, j, status=0;
527 char *tmp;
528
529
530 if (lp->chipset == DC21041) {
531 outl(0, PCI_CFDA);
532 dce_ms_delay(10);
533 }
534
535 RESET_DE4X5;
536
537 if ((inl(DE4X5_STS) & (STS_TS | STS_RS)) == 0) {
538
539
540
541 if (lp->bus == PCI) {
542 if (!is_not_dec) {
543 if ((lp->chipset == DC21040) || (lp->chipset == DC21041)) {
544 strcpy(name, "DE435");
545 } else if (lp->chipset == DC21140) {
546 strcpy(name, "DE500");
547 }
548 } else {
549 strcpy(name, "UNKNOWN");
550 }
551 } else {
552 EISA_signature(name, EISA_ID0);
553 }
554
555 if (*name != '\0') {
556 dev->base_addr = iobase;
557 if (lp->bus == EISA) {
558 printk("%s: %s at %04lx (EISA slot %ld)",
559 dev->name, name, iobase, ((iobase>>12)&0x0f));
560 } else {
561 printk("%s: %s at %04lx (PCI bus %d, device %d)", dev->name, name,
562 iobase, lp->bus_num, lp->device);
563 }
564
565 printk(", h/w address ");
566 status = get_hw_addr(dev);
567 for (i = 0; i < ETH_ALEN - 1; i++) {
568 printk("%2.2x:", dev->dev_addr[i]);
569 }
570 printk("%2.2x,\n", dev->dev_addr[i]);
571
572 tmpbus = lp->bus;
573 tmpchs = lp->chipset;
574
575 if (status == 0) {
576 struct de4x5_private *lp;
577
578
579
580
581
582 dev->priv = (void *) kmalloc(sizeof(struct de4x5_private) + ALIGN,
583 GFP_KERNEL);
584 if (dev->priv == NULL)
585 return -ENOMEM;
586
587
588
589 dev->priv = (void *)(((u_long)dev->priv + ALIGN) & ~ALIGN);
590 lp = (struct de4x5_private *)dev->priv;
591 memset(dev->priv, 0, sizeof(struct de4x5_private));
592 lp->bus = tmpbus;
593 lp->chipset = tmpchs;
594
595
596
597
598 if (de4x5_autosense & AUTO) {
599 lp->autosense = AUTO;
600 } else {
601 if (lp->chipset != DC21140) {
602 if ((lp->chipset == DC21040) && (de4x5_autosense & TP_NW)) {
603 de4x5_autosense = TP;
604 }
605 if ((lp->chipset == DC21041) && (de4x5_autosense & BNC_AUI)) {
606 de4x5_autosense = BNC;
607 }
608 lp->autosense = de4x5_autosense & 0x001f;
609 } else {
610 lp->autosense = de4x5_autosense & 0x00c0;
611 }
612 }
613
614 sprintf(lp->adapter_name,"%s (%s)", name, dev->name);
615 request_region(iobase, (lp->bus == PCI ? DE4X5_PCI_TOTAL_SIZE :
616 DE4X5_EISA_TOTAL_SIZE),
617 lp->adapter_name);
618
619
620
621
622
623
624 for (tmp=NULL, j=0; (j<BUFF_ALLOC_RETRIES) && (tmp==NULL); j++) {
625 if ((tmp = (void *)kmalloc(RX_BUFF_SZ * NUM_RX_DESC + ALIGN,
626 GFP_KERNEL)) != NULL) {
627 tmp = (char *)(((u_long) tmp + ALIGN) & ~ALIGN);
628 for (i=0; i<NUM_RX_DESC; i++) {
629 lp->rx_ring[i].status = 0;
630 lp->rx_ring[i].des1 = RX_BUFF_SZ;
631 lp->rx_ring[i].buf = virt_to_bus(tmp + i * RX_BUFF_SZ);
632 lp->rx_ring[i].next = (u32)NULL;
633 }
634 barrier();
635 }
636 }
637
638 if (tmp != NULL) {
639 lp->rxRingSize = NUM_RX_DESC;
640 lp->txRingSize = NUM_TX_DESC;
641
642
643 lp->rx_ring[lp->rxRingSize - 1].des1 |= RD_RER;
644 lp->tx_ring[lp->txRingSize - 1].des1 |= TD_TER;
645
646
647 outl(virt_to_bus(lp->rx_ring), DE4X5_RRBA);
648 outl(virt_to_bus(lp->tx_ring), DE4X5_TRBA);
649
650
651 lp->irq_mask = IMR_RIM | IMR_TIM | IMR_TUM ;
652 lp->irq_en = IMR_NIM | IMR_AIM;
653
654 lp->tx_enable = TRUE;
655
656 if (dev->irq < 2) {
657 #ifndef MODULE
658 unsigned char irqnum;
659 s32 omr;
660 autoirq_setup(0);
661
662 omr = inl(DE4X5_OMR);
663 outl(IMR_AIM|IMR_RUM, DE4X5_IMR);
664 outl(OMR_SR | omr, DE4X5_OMR);
665
666 irqnum = autoirq_report(1);
667 if (!irqnum) {
668 printk(" and failed to detect IRQ line.\n");
669 status = -ENXIO;
670 } else {
671 for (dev->irq=0,i=0; (i<sizeof(de4x5_irq)) && (!dev->irq); i++) {
672 if (irqnum == de4x5_irq[i]) {
673 dev->irq = irqnum;
674 printk(" and uses IRQ%d.\n", dev->irq);
675 }
676 }
677
678 if (!dev->irq) {
679 printk(" but incorrect IRQ line detected.\n");
680 status = -ENXIO;
681 }
682 }
683
684 outl(0, DE4X5_IMR);
685
686 #endif
687 } else {
688 printk(" and requires IRQ%d (not probed).\n", dev->irq);
689 }
690 } else {
691 printk("%s: Kernel could not allocate RX buffer memory.\n",
692 dev->name);
693 status = -ENXIO;
694 }
695 if (status) release_region(iobase, (lp->bus == PCI ?
696 DE4X5_PCI_TOTAL_SIZE :
697 DE4X5_EISA_TOTAL_SIZE));
698 } else {
699 printk(" which has an Ethernet PROM CRC error.\n");
700 status = -ENXIO;
701 }
702 } else {
703 status = -ENXIO;
704 }
705 } else {
706 status = -ENXIO;
707 }
708
709 if (!status) {
710 if (de4x5_debug > 0) {
711 printk(version);
712 }
713
714
715 dev->open = &de4x5_open;
716 dev->hard_start_xmit = &de4x5_queue_pkt;
717 dev->stop = &de4x5_close;
718 dev->get_stats = &de4x5_get_stats;
719 dev->set_multicast_list = &set_multicast_list;
720 dev->do_ioctl = &de4x5_ioctl;
721
722 dev->mem_start = 0;
723
724
725 ether_setup(dev);
726
727
728 if (lp->chipset == DC21041) {
729 outl(0, DE4X5_SICR);
730 outl(CFDA_PSM, PCI_CFDA);
731 }
732 } else {
733 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
734 if (lp) {
735 kfree_s(bus_to_virt(lp->rx_ring[0].buf),
736 RX_BUFF_SZ * NUM_RX_DESC + ALIGN);
737 }
738 if (dev->priv) {
739 kfree_s(dev->priv, sizeof(struct de4x5_private) + ALIGN);
740 dev->priv = NULL;
741 }
742 }
743
744 return status;
745 }
746
747
748 static int
749 de4x5_open(struct device *dev)
750 {
751 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
752 u_long iobase = dev->base_addr;
753 int i, status = 0;
754 s32 imr, omr, sts;
755
756
757
758
759 if (lp->chipset == DC21041) {
760 outl(0, PCI_CFDA);
761 dce_ms_delay(10);
762 }
763
764 if (request_irq(dev->irq, (void *)de4x5_interrupt, 0, lp->adapter_name)) {
765 printk("de4x5_open(): Requested IRQ%d is busy\n",dev->irq);
766 status = -EAGAIN;
767 } else {
768
769 irq2dev_map[dev->irq] = dev;
770
771
772
773 status = de4x5_init(dev);
774
775 if (de4x5_debug > 1){
776 printk("%s: de4x5 open with irq %d\n",dev->name,dev->irq);
777 printk("\tphysical address: ");
778 for (i=0;i<6;i++){
779 printk("%2.2x:",(short)dev->dev_addr[i]);
780 }
781 printk("\n");
782 printk("Descriptor head addresses:\n");
783 printk("\t0x%8.8lx 0x%8.8lx\n",(u_long)lp->rx_ring,(u_long)lp->tx_ring);
784 printk("Descriptor addresses:\nRX: ");
785 for (i=0;i<lp->rxRingSize-1;i++){
786 if (i < 3) {
787 printk("0x%8.8lx ",(u_long)&lp->rx_ring[i].status);
788 }
789 }
790 printk("...0x%8.8lx\n",(u_long)&lp->rx_ring[i].status);
791 printk("TX: ");
792 for (i=0;i<lp->txRingSize-1;i++){
793 if (i < 3) {
794 printk("0x%8.8lx ", (u_long)&lp->tx_ring[i].status);
795 }
796 }
797 printk("...0x%8.8lx\n", (u_long)&lp->tx_ring[i].status);
798 printk("Descriptor buffers:\nRX: ");
799 for (i=0;i<lp->rxRingSize-1;i++){
800 if (i < 3) {
801 printk("0x%8.8x ",lp->rx_ring[i].buf);
802 }
803 }
804 printk("...0x%8.8x\n",lp->rx_ring[i].buf);
805 printk("TX: ");
806 for (i=0;i<lp->txRingSize-1;i++){
807 if (i < 3) {
808 printk("0x%8.8x ", lp->tx_ring[i].buf);
809 }
810 }
811 printk("...0x%8.8x\n", lp->tx_ring[i].buf);
812 printk("Ring size: \nRX: %d\nTX: %d\n",
813 (short)lp->rxRingSize,
814 (short)lp->txRingSize);
815 printk("\tstatus: %d\n", status);
816 }
817
818 if (!status) {
819 dev->tbusy = 0;
820 dev->start = 1;
821 dev->interrupt = UNMASK_INTERRUPTS;
822 dev->trans_start = jiffies;
823
824 START_DE4X5;
825
826
827 imr = 0;
828 UNMASK_IRQs;
829
830
831 sts = inl(DE4X5_STS);
832 outl(sts, DE4X5_STS);
833
834 ENABLE_IRQs;
835 }
836 if (de4x5_debug > 1) {
837 printk("\tsts: 0x%08x\n", inl(DE4X5_STS));
838 printk("\tbmr: 0x%08x\n", inl(DE4X5_BMR));
839 printk("\timr: 0x%08x\n", inl(DE4X5_IMR));
840 printk("\tomr: 0x%08x\n", inl(DE4X5_OMR));
841 printk("\tsisr: 0x%08x\n", inl(DE4X5_SISR));
842 printk("\tsicr: 0x%08x\n", inl(DE4X5_SICR));
843 printk("\tstrr: 0x%08x\n", inl(DE4X5_STRR));
844 printk("\tsigr: 0x%08x\n", inl(DE4X5_SIGR));
845 }
846 }
847
848 MOD_INC_USE_COUNT;
849
850 return status;
851 }
852
853
854
855
856
857
858
859
860
861 static int
862 de4x5_init(struct device *dev)
863 {
864 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
865 u_long iobase = dev->base_addr;
866 int i, j, status = 0;
867 s32 bmr, omr;
868
869
870 set_bit(0, (void *)&dev->tbusy);
871
872 RESET_DE4X5;
873
874 bmr = inl(DE4X5_BMR);
875 bmr |= PBL_8 | DESC_SKIP_LEN | CACHE_ALIGN;
876 outl(bmr, DE4X5_BMR);
877
878 if (lp->chipset != DC21140) {
879 omr = TR_96;
880 lp->setup_f = HASH_PERF;
881 } else {
882 omr = OMR_SDP | OMR_SF;
883 lp->setup_f = PERFECT;
884 }
885 outl(virt_to_bus(lp->rx_ring), DE4X5_RRBA);
886 outl(virt_to_bus(lp->tx_ring), DE4X5_TRBA);
887
888 lp->rx_new = lp->rx_old = 0;
889 lp->tx_new = lp->tx_old = 0;
890
891 for (i = 0; i < lp->rxRingSize; i++) {
892 lp->rx_ring[i].status = R_OWN;
893 }
894
895 for (i = 0; i < lp->txRingSize; i++) {
896 lp->tx_ring[i].status = 0;
897 }
898
899 barrier();
900
901
902 SetMulticastFilter(dev);
903
904 if (lp->chipset != DC21140) {
905 load_packet(dev, lp->setup_frame, HASH_F|TD_SET|SETUP_FRAME_LEN, NULL);
906 } else {
907 load_packet(dev, lp->setup_frame, PERFECT_F|TD_SET|SETUP_FRAME_LEN, NULL);
908 }
909 outl(omr|OMR_ST, DE4X5_OMR);
910
911
912 for (j=0, i=jiffies;(i<=jiffies+HZ/100) && (j==0);) {
913 if (lp->tx_ring[lp->tx_new].status >= 0) j=1;
914 }
915 outl(omr, DE4X5_OMR);
916
917 if (j == 0) {
918 printk("%s: Setup frame timed out, status %08x\n", dev->name,
919 inl(DE4X5_STS));
920 status = -EIO;
921 }
922
923 lp->tx_new = (++lp->tx_new) % lp->txRingSize;
924 lp->tx_old = lp->tx_new;
925
926
927 if (autoconf_media(dev) == 0) {
928 status = -EIO;
929 }
930
931 return 0;
932 }
933
934
935
936
937 static int
938 de4x5_queue_pkt(struct sk_buff *skb, struct device *dev)
939 {
940 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
941 u_long iobase = dev->base_addr;
942 int i, status = 0;
943 s32 imr, omr, sts;
944
945
946
947
948
949
950 if (set_bit(0, (void*)&dev->tbusy) == 0) {
951 cli();
952 de4x5_tx(dev);
953 dev->tbusy = 0;
954 sti();
955 }
956
957
958
959
960
961
962 if (dev->tbusy || (lp->lostMedia > LOST_MEDIA_THRESHOLD)) {
963 u_long tickssofar = jiffies - dev->trans_start;
964 if ((tickssofar < QUEUE_PKT_TIMEOUT) &&
965 (lp->lostMedia <= LOST_MEDIA_THRESHOLD)) {
966 status = -1;
967 } else {
968 if (de4x5_debug >= 1) {
969 printk("%s: transmit timed out, status %08x, tbusy:%ld, lostMedia:%d tickssofar:%ld, resetting.\n",dev->name, inl(DE4X5_STS), dev->tbusy, lp->lostMedia, tickssofar);
970 }
971
972
973 STOP_DE4X5;
974
975
976 for (i=lp->tx_old; i!=lp->tx_new; i=(++i)%lp->txRingSize) {
977 if (lp->skb[i] != NULL) {
978 if (lp->skb[i]->len != FAKE_FRAME_LEN) {
979 if (lp->tx_ring[i].status == T_OWN) {
980 dev_queue_xmit(lp->skb[i], dev, SOPRI_NORMAL);
981 } else {
982 dev_kfree_skb(lp->skb[i], FREE_WRITE);
983 }
984 } else {
985 dev_kfree_skb(lp->skb[i], FREE_WRITE);
986 }
987 lp->skb[i] = NULL;
988 }
989 }
990 if (skb->len != FAKE_FRAME_LEN) {
991 dev_queue_xmit(skb, dev, SOPRI_NORMAL);
992 } else {
993 dev_kfree_skb(skb, FREE_WRITE);
994 }
995
996
997 status = de4x5_init(dev);
998
999
1000 if (!status) {
1001
1002 dev->interrupt = UNMASK_INTERRUPTS;
1003 dev->start = 1;
1004 dev->tbusy = 0;
1005 dev->trans_start = jiffies;
1006
1007 START_DE4X5;
1008
1009
1010 imr = 0;
1011 UNMASK_IRQs;
1012
1013
1014 sts = inl(DE4X5_STS);
1015 outl(sts, DE4X5_STS);
1016
1017 ENABLE_IRQs;
1018 } else {
1019 printk("%s: hardware initialisation failure, status %08x.\n",
1020 dev->name, inl(DE4X5_STS));
1021 }
1022 }
1023 } else if (skb == NULL) {
1024 dev_tint(dev);
1025 } else if (skb->len == FAKE_FRAME_LEN) {
1026 dev_kfree_skb(skb, FREE_WRITE);
1027 } else if (skb->len > 0) {
1028
1029 if (set_bit(0, (void*)&dev->tbusy) != 0) {
1030 printk("%s: Transmitter access conflict.\n", dev->name);
1031 status = -1;
1032 } else {
1033 cli();
1034 if (TX_BUFFS_AVAIL) {
1035 load_packet(dev, skb->data, TD_IC | TD_LS | TD_FS | skb->len, skb);
1036 if (lp->tx_enable) {
1037 outl(POLL_DEMAND, DE4X5_TPD);
1038 }
1039
1040 lp->tx_new = (++lp->tx_new) % lp->txRingSize;
1041 dev->trans_start = jiffies;
1042
1043 if (TX_BUFFS_AVAIL) {
1044 dev->tbusy = 0;
1045 }
1046 } else {
1047 status = -1;
1048 }
1049 sti();
1050 }
1051 }
1052
1053 return status;
1054 }
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067 static void
1068 de4x5_interrupt(int irq, struct pt_regs *regs)
1069 {
1070 struct device *dev = (struct device *)(irq2dev_map[irq]);
1071 struct de4x5_private *lp;
1072 s32 imr, omr, sts;
1073 u_long iobase;
1074
1075 if (dev == NULL) {
1076 printk ("de4x5_interrupt(): irq %d for unknown device.\n", irq);
1077 } else {
1078 lp = (struct de4x5_private *)dev->priv;
1079 iobase = dev->base_addr;
1080
1081 if (dev->interrupt)
1082 printk("%s: Re-entering the interrupt handler.\n", dev->name);
1083
1084 DISABLE_IRQs;
1085 dev->interrupt = MASK_INTERRUPTS;
1086
1087 while ((sts = inl(DE4X5_STS)) & lp->irq_mask) {
1088 outl(sts, DE4X5_STS);
1089
1090 if (sts & (STS_RI | STS_RU))
1091 de4x5_rx(dev);
1092
1093 if (sts & (STS_TI | STS_TU))
1094 de4x5_tx(dev);
1095
1096 if (sts & STS_TM)
1097 de4x5_ast(dev);
1098
1099 if (sts & STS_LNF) {
1100 lp->lostMedia = LOST_MEDIA_THRESHOLD + 1;
1101 lp->irq_mask &= ~IMR_LFM;
1102 kick_tx(dev);
1103 }
1104
1105 if (sts & STS_SE) {
1106 STOP_DE4X5;
1107 printk("%s: Fatal bus error occured, sts=%#8x, device stopped.\n",
1108 dev->name, sts);
1109 }
1110 }
1111
1112 if (TX_BUFFS_AVAIL && dev->tbusy) {
1113 dev->tbusy = 0;
1114 mark_bh(NET_BH);
1115 }
1116
1117 dev->interrupt = UNMASK_INTERRUPTS;
1118 ENABLE_IRQs;
1119 }
1120
1121 return;
1122 }
1123
1124 static int
1125 de4x5_rx(struct device *dev)
1126 {
1127 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1128 int i, entry;
1129 s32 status;
1130 char *buf;
1131
1132 for (entry = lp->rx_new; lp->rx_ring[entry].status >= 0;entry = lp->rx_new) {
1133 status = lp->rx_ring[entry].status;
1134
1135 if (status & RD_FS) {
1136 lp->rx_old = entry;
1137 }
1138
1139 if (status & RD_LS) {
1140 if (status & RD_ES) {
1141 lp->stats.rx_errors++;
1142 if (status & (RD_RF | RD_TL)) lp->stats.rx_frame_errors++;
1143 if (status & RD_CE) lp->stats.rx_crc_errors++;
1144 if (status & RD_OF) lp->stats.rx_fifo_errors++;
1145 } else {
1146 struct sk_buff *skb;
1147 short pkt_len = (short)(lp->rx_ring[entry].status >> 16) - 4;
1148
1149 if ((skb = dev_alloc_skb(pkt_len+2)) != NULL) {
1150 skb->dev = dev;
1151
1152 skb_reserve(skb,2);
1153 if (entry < lp->rx_old) {
1154 short len = (lp->rxRingSize - lp->rx_old) * RX_BUFF_SZ;
1155 memcpy(skb_put(skb,len), bus_to_virt(lp->rx_ring[lp->rx_old].buf), len);
1156 memcpy(skb_put(skb,pkt_len-len), bus_to_virt(lp->rx_ring[0].buf), pkt_len - len);
1157 } else {
1158 memcpy(skb_put(skb,pkt_len), bus_to_virt(lp->rx_ring[lp->rx_old].buf), pkt_len);
1159 }
1160
1161
1162 skb->protocol=eth_type_trans(skb,dev);
1163 netif_rx(skb);
1164
1165
1166 lp->stats.rx_packets++;
1167 for (i=1; i<DE4X5_PKT_STAT_SZ-1; i++) {
1168 if (pkt_len < (i*DE4X5_PKT_BIN_SZ)) {
1169 lp->pktStats.bins[i]++;
1170 i = DE4X5_PKT_STAT_SZ;
1171 }
1172 }
1173 buf = skb->data;
1174 if (buf[0] & 0x01) {
1175 if ((*(s32 *)&buf[0] == -1) && (*(s16 *)&buf[4] == -1)) {
1176 lp->pktStats.broadcast++;
1177 } else {
1178 lp->pktStats.multicast++;
1179 }
1180 } else if ((*(s32 *)&buf[0] == *(s32 *)&dev->dev_addr[0]) &&
1181 (*(s16 *)&buf[4] == *(s16 *)&dev->dev_addr[4])) {
1182 lp->pktStats.unicast++;
1183 }
1184
1185 lp->pktStats.bins[0]++;
1186 if (lp->pktStats.bins[0] == 0) {
1187 memset((char *)&lp->pktStats, 0, sizeof(lp->pktStats));
1188 }
1189 } else {
1190 printk("%s: Insufficient memory; nuking packet.\n", dev->name);
1191 lp->stats.rx_dropped++;
1192 break;
1193 }
1194 }
1195
1196
1197 for (; lp->rx_old!=entry; lp->rx_old=(++lp->rx_old)%lp->rxRingSize) {
1198 lp->rx_ring[lp->rx_old].status = R_OWN;
1199 barrier();
1200 }
1201 lp->rx_ring[entry].status = R_OWN;
1202 barrier();
1203 }
1204
1205
1206
1207
1208 lp->rx_new = (++lp->rx_new) % lp->rxRingSize;
1209 }
1210
1211 return 0;
1212 }
1213
1214
1215
1216
1217 static int
1218 de4x5_tx(struct device *dev)
1219 {
1220 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1221 u_long iobase = dev->base_addr;
1222 int entry;
1223 s32 status;
1224
1225 for (entry = lp->tx_old; entry != lp->tx_new; entry = lp->tx_old) {
1226 status = lp->tx_ring[entry].status;
1227 if (status < 0) {
1228 break;
1229 } else if (status & TD_ES) {
1230 lp->stats.tx_errors++;
1231 if (status & TD_NC) lp->stats.tx_carrier_errors++;
1232 if (status & TD_LC) lp->stats.tx_window_errors++;
1233 if (status & TD_UF) lp->stats.tx_fifo_errors++;
1234 if (status & TD_LC) lp->stats.collisions++;
1235 if (status & TD_EC) lp->pktStats.excessive_collisions++;
1236 if (status & TD_DE) lp->stats.tx_aborted_errors++;
1237
1238 if ((status != 0x7fffffff) &&
1239 (status & (TD_LO | TD_NC | TD_EC | TD_LF))) {
1240 lp->lostMedia++;
1241 if (lp->lostMedia > LOST_MEDIA_THRESHOLD) {
1242 kick_tx(dev);
1243 }
1244 } else {
1245 outl(POLL_DEMAND, DE4X5_TPD);
1246 }
1247 } else {
1248 lp->stats.tx_packets++;
1249 lp->lostMedia = 0;
1250 }
1251
1252 if (lp->skb[entry] != NULL) {
1253 dev_kfree_skb(lp->skb[entry], FREE_WRITE);
1254 lp->skb[entry] = NULL;
1255 }
1256
1257
1258 lp->tx_old = (++lp->tx_old) % lp->txRingSize;
1259 }
1260
1261 return 0;
1262 }
1263
1264 static int
1265 de4x5_ast(struct device *dev)
1266 {
1267 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1268 u_long iobase = dev->base_addr;
1269 s32 gep;
1270
1271 disable_ast(dev);
1272
1273 if (lp->chipset == DC21140) {
1274 gep = inl(DE4X5_GEP);
1275 if (((lp->media == _100Mb) && (gep & GEP_SLNK)) ||
1276 ((lp->media == _10Mb) && (gep & GEP_LNP)) ||
1277 ((lp->media == _10Mb) && !(gep & GEP_SLNK)) ||
1278 (lp->media == NC)) {
1279 if (lp->linkProb || ((lp->media == NC) && (!(gep & GEP_LNP)))) {
1280 lp->lostMedia = LOST_MEDIA_THRESHOLD + 1;
1281 lp->linkProb = 0;
1282 kick_tx(dev);
1283 } else {
1284 switch(lp->media) {
1285 case NC:
1286 lp->linkProb = 0;
1287 enable_ast(dev, DE4X5_AUTOSENSE_MS);
1288 break;
1289
1290 case _10Mb:
1291 lp->linkProb = 1;
1292 enable_ast(dev, 1500);
1293 break;
1294
1295 case _100Mb:
1296 lp->linkProb = 1;
1297 enable_ast(dev, 4000);
1298 break;
1299 }
1300 }
1301 } else {
1302 lp->linkProb = 0;
1303 enable_ast(dev, DE4X5_AUTOSENSE_MS);
1304 }
1305 }
1306
1307 return 0;
1308 }
1309
1310 static int
1311 de4x5_close(struct device *dev)
1312 {
1313 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1314 u_long iobase = dev->base_addr;
1315 s32 imr, omr;
1316
1317 dev->start = 0;
1318 dev->tbusy = 1;
1319
1320 if (de4x5_debug > 1) {
1321 printk("%s: Shutting down ethercard, status was %8.8x.\n",
1322 dev->name, inl(DE4X5_STS));
1323 }
1324
1325
1326
1327
1328 DISABLE_IRQs;
1329
1330 STOP_DE4X5;
1331
1332
1333
1334
1335 free_irq(dev->irq);
1336 irq2dev_map[dev->irq] = 0;
1337
1338 MOD_DEC_USE_COUNT;
1339
1340
1341 if (lp->chipset == DC21041) {
1342 outl(0, DE4X5_SICR);
1343 outl(CFDA_PSM, PCI_CFDA);
1344 }
1345
1346 return 0;
1347 }
1348
1349 static struct enet_statistics *
1350 de4x5_get_stats(struct device *dev)
1351 {
1352 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1353 u_long iobase = dev->base_addr;
1354
1355 lp->stats.rx_missed_errors = (int) (inl(DE4X5_MFC) & (MFC_OVFL | MFC_CNTR));
1356
1357 return &lp->stats;
1358 }
1359
1360 static void load_packet(struct device *dev, char *buf, u32 flags, struct sk_buff *skb)
1361 {
1362 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1363
1364 lp->tx_ring[lp->tx_new].buf = virt_to_bus(buf);
1365 lp->tx_ring[lp->tx_new].des1 &= TD_TER;
1366 lp->tx_ring[lp->tx_new].des1 |= flags;
1367 lp->skb[lp->tx_new] = skb;
1368 barrier();
1369 lp->tx_ring[lp->tx_new].status = T_OWN;
1370 barrier();
1371
1372 return;
1373 }
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384 static void
1385 set_multicast_list(struct device *dev)
1386 {
1387 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1388 u_long iobase = dev->base_addr;
1389
1390
1391 if (irq2dev_map[dev->irq] != NULL) {
1392 if (dev->flags & IFF_PROMISC) {
1393 u32 omr;
1394 omr = inl(DE4X5_OMR);
1395 omr |= OMR_PR;
1396 outl(omr, DE4X5_OMR);
1397 } else {
1398 SetMulticastFilter(dev);
1399 if (lp->setup_f == HASH_PERF) {
1400 load_packet(dev, lp->setup_frame, TD_IC | HASH_F | TD_SET |
1401 SETUP_FRAME_LEN, NULL);
1402 } else {
1403 load_packet(dev, lp->setup_frame, TD_IC | PERFECT_F | TD_SET |
1404 SETUP_FRAME_LEN, NULL);
1405 }
1406
1407 lp->tx_new = (++lp->tx_new) % lp->txRingSize;
1408 outl(POLL_DEMAND, DE4X5_TPD);
1409 dev->trans_start = jiffies;
1410 }
1411 }
1412
1413 return;
1414 }
1415
1416
1417
1418
1419
1420
1421 static void SetMulticastFilter(struct device *dev)
1422 {
1423 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1424 struct dev_mc_list *dmi=dev->mc_list;
1425 u_long iobase = dev->base_addr;
1426 int i, j, bit, byte;
1427 u16 hashcode;
1428 u32 omr, crc, poly = CRC_POLYNOMIAL_LE;
1429 char *pa;
1430 unsigned char *addrs;
1431
1432 omr = inl(DE4X5_OMR);
1433 omr &= ~OMR_PR;
1434 pa = build_setup_frame(dev, ALL);
1435
1436 if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 14)) {
1437 omr |= OMR_PM;
1438 } else if (lp->setup_f == HASH_PERF) {
1439
1440 for (i=0;i<dev->mc_count;i++) {
1441 addrs=dmi->dmi_addr;
1442 dmi=dmi->next;
1443 if ((*addrs & 0x01) == 1) {
1444 crc = 0xffffffff;
1445 for (byte=0;byte<ETH_ALEN;byte++) {
1446
1447 for (bit = *addrs++,j=0;j<8;j++, bit>>=1) {
1448 crc = (crc >> 1) ^ (((crc ^ bit) & 0x01) ? poly : 0);
1449 }
1450 }
1451 hashcode = crc & HASH_BITS;
1452
1453 byte = hashcode >> 3;
1454 bit = 1 << (hashcode & 0x07);
1455
1456 byte <<= 1;
1457 if (byte & 0x02) {
1458 byte -= 1;
1459 }
1460 lp->setup_frame[byte] |= bit;
1461 }
1462 }
1463 } else {
1464 for (j=0; j<dev->mc_count; j++) {
1465 addrs=dmi->dmi_addr;
1466 dmi=dmi->next;
1467 for (i=0; i<ETH_ALEN; i++) {
1468 *(pa + (i&1)) = *addrs++;
1469 if (i & 0x01) pa += 4;
1470 }
1471 }
1472 }
1473 outl(omr, DE4X5_OMR);
1474
1475 return;
1476 }
1477
1478
1479
1480
1481
1482 static void eisa_probe(struct device *dev, u_long ioaddr)
1483 {
1484 int i, maxSlots, status;
1485 u_short vendor, device;
1486 s32 cfid;
1487 u_long iobase;
1488 struct bus_type *lp = &bus;
1489 char name[DE4X5_STRLEN];
1490
1491 if (!ioaddr && autoprobed) return ;
1492 if ((ioaddr < 0x1000) && (ioaddr > 0)) return;
1493
1494 lp->bus = EISA;
1495
1496 if (ioaddr == 0) {
1497 iobase = EISA_SLOT_INC;
1498 i = 1;
1499 maxSlots = MAX_EISA_SLOTS;
1500 } else {
1501 iobase = ioaddr;
1502 i = (ioaddr >> 12);
1503 maxSlots = i + 1;
1504 }
1505
1506 for (status = -ENODEV; (i<maxSlots) && (dev!=NULL); i++, iobase+=EISA_SLOT_INC) {
1507 if (EISA_signature(name, EISA_ID)) {
1508 cfid = inl(PCI_CFID);
1509 device = (u_short)(cfid >> 16);
1510 vendor = (u_short) cfid;
1511
1512 lp->bus = EISA;
1513 lp->chipset = device;
1514 if (DevicePresent(EISA_APROM) == 0) {
1515
1516 outl(PCI_COMMAND_IO | PCI_COMMAND_MASTER, PCI_CFCS);
1517 outl(0x00004000, PCI_CFLT);
1518 outl(iobase, PCI_CBIO);
1519
1520 if (check_region(iobase, DE4X5_EISA_TOTAL_SIZE) == 0) {
1521 if ((dev = alloc_device(dev, iobase)) != NULL) {
1522 if ((status = de4x5_hw_init(dev, iobase)) == 0) {
1523 num_de4x5s++;
1524 }
1525 num_eth++;
1526 }
1527 } else if (autoprobed) {
1528 printk("%s: region already allocated at 0x%04lx.\n", dev->name, iobase);
1529 }
1530 }
1531 }
1532 }
1533
1534 return;
1535 }
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549 #define PCI_DEVICE (dev_num << 3)
1550 #define PCI_LAST_DEV 32
1551
1552 static void pci_probe(struct device *dev, u_long ioaddr)
1553 {
1554 u_char irq;
1555 u_char pb, pbus, dev_num, dnum, dev_fn;
1556 u_short vendor, device, index, status;
1557 u_int class = DE4X5_CLASS_CODE;
1558 u_int iobase;
1559 struct bus_type *lp = &bus;
1560
1561 if (!ioaddr && autoprobed) return ;
1562
1563 if (pcibios_present()) {
1564 lp->bus = PCI;
1565
1566 if (ioaddr < 0x1000) {
1567 pbus = (u_short)(ioaddr >> 8);
1568 dnum = (u_short)(ioaddr & 0xff);
1569 } else {
1570 pbus = 0;
1571 dnum = 0;
1572 }
1573
1574 for (index=0;
1575 (pcibios_find_class(class, index, &pb, &dev_fn)!= PCIBIOS_DEVICE_NOT_FOUND);
1576 index++) {
1577 dev_num = PCI_SLOT(dev_fn);
1578
1579 if ((!pbus && !dnum) || ((pbus == pb) && (dnum == dev_num))) {
1580 pcibios_read_config_word(pb, PCI_DEVICE, PCI_VENDOR_ID, &vendor);
1581 pcibios_read_config_word(pb, PCI_DEVICE, PCI_DEVICE_ID, &device);
1582 if (is_DC21040 || is_DC21041 || is_DC21140) {
1583
1584 lp->device = dev_num;
1585 lp->bus_num = pb;
1586
1587
1588 lp->chipset = device;
1589
1590
1591 pcibios_read_config_dword(pb, PCI_DEVICE, PCI_BASE_ADDRESS_0, &iobase);
1592 iobase &= CBIO_MASK;
1593
1594
1595 pcibios_read_config_byte(pb, PCI_DEVICE, PCI_INTERRUPT_LINE, &irq);
1596
1597
1598 pcibios_read_config_word(pb, PCI_DEVICE, PCI_COMMAND, &status);
1599 if (status & PCI_COMMAND_IO) {
1600 if (!(status & PCI_COMMAND_MASTER)) {
1601 status |= PCI_COMMAND_MASTER;
1602 pcibios_write_config_word(pb, PCI_DEVICE, PCI_COMMAND, status);
1603 pcibios_read_config_word(pb, PCI_DEVICE, PCI_COMMAND, &status);
1604 }
1605 if (status & PCI_COMMAND_MASTER) {
1606 if ((DevicePresent(DE4X5_APROM) == 0) || is_not_dec) {
1607 if (check_region(iobase, DE4X5_PCI_TOTAL_SIZE) == 0) {
1608 if ((dev = alloc_device(dev, iobase)) != NULL) {
1609 dev->irq = irq;
1610 if ((status = de4x5_hw_init(dev, iobase)) == 0) {
1611 num_de4x5s++;
1612 }
1613 num_eth++;
1614 }
1615 } else if (autoprobed) {
1616 printk("%s: region already allocated at 0x%04x.\n", dev->name, (u_short)iobase);
1617 }
1618 }
1619 }
1620 }
1621 }
1622 }
1623 }
1624 }
1625
1626 return;
1627 }
1628
1629
1630
1631
1632
1633 static struct device *alloc_device(struct device *dev, u_long iobase)
1634 {
1635 int addAutoProbe = 0;
1636 struct device *tmp = NULL, *ret;
1637 int (*init)(struct device *) = NULL;
1638
1639
1640
1641
1642 if (!loading_module) {
1643 while (dev->next != NULL) {
1644 if ((dev->base_addr == DE4X5_NDA) || (dev->base_addr == 0)) break;
1645 dev = dev->next;
1646 num_eth++;
1647 }
1648
1649
1650
1651
1652
1653 if ((dev->base_addr == 0) && (num_de4x5s > 0)) {
1654 addAutoProbe++;
1655 tmp = dev->next;
1656 init = dev->init;
1657 }
1658
1659
1660
1661
1662
1663 if ((dev->next == NULL) &&
1664 !((dev->base_addr == DE4X5_NDA) || (dev->base_addr == 0))){
1665 dev->next = (struct device *)kmalloc(sizeof(struct device) + 8,
1666 GFP_KERNEL);
1667
1668 dev = dev->next;
1669 if (dev == NULL) {
1670 printk("eth%d: Device not initialised, insufficient memory\n",
1671 num_eth);
1672 } else {
1673
1674
1675
1676
1677
1678 dev->name = (char *)(dev + sizeof(struct device));
1679 if (num_eth > 9999) {
1680 sprintf(dev->name,"eth????");
1681 } else {
1682 sprintf(dev->name,"eth%d", num_eth);
1683 }
1684 dev->base_addr = iobase;
1685 dev->next = NULL;
1686 dev->init = &de4x5_probe;
1687 num_de4x5s++;
1688 }
1689 }
1690 ret = dev;
1691
1692
1693
1694
1695
1696 if (ret != NULL) {
1697 if (addAutoProbe) {
1698 for (; (tmp->next!=NULL) && (tmp->base_addr!=DE4X5_NDA); tmp=tmp->next);
1699
1700
1701
1702
1703
1704 if ((tmp->next == NULL) && !(tmp->base_addr == DE4X5_NDA)) {
1705 tmp->next = (struct device *)kmalloc(sizeof(struct device) + 8,
1706 GFP_KERNEL);
1707 tmp = tmp->next;
1708 if (tmp == NULL) {
1709 printk("%s: Insufficient memory to extend the device list.\n",
1710 dev->name);
1711 } else {
1712
1713
1714
1715
1716
1717 tmp->name = (char *)(tmp + sizeof(struct device));
1718 if (num_eth > 9999) {
1719 sprintf(tmp->name,"eth????");
1720 } else {
1721 sprintf(tmp->name,"eth%d", num_eth);
1722 }
1723 tmp->base_addr = 0;
1724 tmp->next = NULL;
1725 tmp->init = init;
1726 }
1727 } else {
1728 tmp->base_addr = 0;
1729 }
1730 }
1731 }
1732 } else {
1733 ret = dev;
1734 }
1735
1736 return ret;
1737 }
1738
1739
1740
1741
1742
1743
1744
1745 static int autoconf_media(struct device *dev)
1746 {
1747 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1748 u_long iobase = dev->base_addr;
1749
1750 lp->tx_enable = YES;
1751 if (de4x5_debug > 0 ) {
1752 if (lp->chipset != DC21140) {
1753 printk("%s: Searching for media... ",dev->name);
1754 } else {
1755 printk("%s: Searching for mode... ",dev->name);
1756 }
1757 }
1758
1759 if (lp->chipset == DC21040) {
1760 lp->media = (lp->autosense == AUTO ? TP : lp->autosense);
1761 dc21040_autoconf(dev);
1762 } else if (lp->chipset == DC21041) {
1763 lp->media = (lp->autosense == AUTO ? TP_NW : lp->autosense);
1764 dc21041_autoconf(dev);
1765 } else if (lp->chipset == DC21140) {
1766 disable_ast(dev);
1767 lp->media = (lp->autosense == AUTO ? _10Mb : lp->autosense);
1768 dc21140_autoconf(dev);
1769 }
1770
1771 if (de4x5_debug > 0 ) {
1772 if (lp->chipset != DC21140) {
1773 printk("media is %s\n", (lp->media == NC ? "unconnected!" :
1774 (lp->media == TP ? "TP." :
1775 (lp->media == ANS ? "TP/Nway." :
1776 (lp->media == BNC ? "BNC." :
1777 (lp->media == AUI ? "AUI." :
1778 "BNC/AUI."
1779 ))))));
1780 } else {
1781 printk("mode is %s\n",(lp->media == NC ? "link down.":
1782 (lp->media == _100Mb ? "100Mb/s." :
1783 (lp->media == _10Mb ? "10Mb/s." :
1784 "\?\?\?"
1785 ))));
1786 }
1787 }
1788
1789 if (lp->media) {
1790 lp->lostMedia = 0;
1791 inl(DE4X5_MFC);
1792 if ((lp->media == TP) || (lp->media == ANS)) {
1793 lp->irq_mask |= IMR_LFM;
1794 }
1795 }
1796 dce_ms_delay(10);
1797
1798 return (lp->media);
1799 }
1800
1801 static void dc21040_autoconf(struct device *dev)
1802 {
1803 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1804 u_long iobase = dev->base_addr;
1805 int i, linkBad;
1806 s32 sisr = 0, t_3s = 3000;
1807
1808 switch (lp->media) {
1809 case TP:
1810 reset_init_sia(dev, 0x8f01, 0xffff, 0x0000);
1811 for (linkBad=1,i=0;(i<t_3s) && linkBad && !(sisr & SISR_NCR);i++) {
1812 if (((sisr = inl(DE4X5_SISR)) & SISR_LKF) == 0) linkBad = 0;
1813 dce_ms_delay(1);
1814 }
1815 if (linkBad && (lp->autosense == AUTO)) {
1816 lp->media = BNC_AUI;
1817 dc21040_autoconf(dev);
1818 }
1819 break;
1820
1821 case BNC:
1822 case AUI:
1823 case BNC_AUI:
1824 reset_init_sia(dev, 0x8f09, 0x0705, 0x0006);
1825 dce_ms_delay(500);
1826 linkBad = ping_media(dev);
1827 if (linkBad && (lp->autosense == AUTO)) {
1828 lp->media = EXT_SIA;
1829 dc21040_autoconf(dev);
1830 }
1831 break;
1832
1833 case EXT_SIA:
1834 reset_init_sia(dev, 0x3041, 0x0000, 0x0006);
1835 dce_ms_delay(500);
1836 linkBad = ping_media(dev);
1837 if (linkBad && (lp->autosense == AUTO)) {
1838 lp->media = NC;
1839 dc21040_autoconf(dev);
1840 }
1841 break;
1842
1843 case NC:
1844 #ifndef __alpha__
1845 reset_init_sia(dev, 0x8f01, 0xffff, 0x0000);
1846 break;
1847 #else
1848
1849 reset_init_sia(dev, 0x8f09, 0x0705, 0x0006);
1850 #endif
1851 }
1852
1853 return;
1854 }
1855
1856
1857
1858
1859
1860
1861
1862 static void dc21041_autoconf(struct device *dev)
1863 {
1864 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1865 u_long iobase = dev->base_addr;
1866 s32 sts, irqs, irq_mask, omr;
1867
1868 switch (lp->media) {
1869 case TP_NW:
1870 omr = inl(DE4X5_OMR);
1871 outl(omr | OMR_FD, DE4X5_OMR);
1872 irqs = STS_LNF | STS_LNP;
1873 irq_mask = IMR_LFM | IMR_LPM;
1874 sts = test_media(dev, irqs, irq_mask, 0xef01, 0xffff, 0x0008, 2400);
1875 if (sts & STS_LNP) {
1876 lp->media = ANS;
1877 } else {
1878 lp->media = AUI;
1879 }
1880 dc21041_autoconf(dev);
1881 break;
1882
1883 case ANS:
1884 irqs = STS_LNP;
1885 irq_mask = IMR_LPM;
1886 sts = test_ans(dev, irqs, irq_mask, 3000);
1887 if (!(sts & STS_LNP) && (lp->autosense == AUTO)) {
1888 lp->media = TP;
1889 dc21041_autoconf(dev);
1890 }
1891 break;
1892
1893 case TP:
1894 omr = inl(DE4X5_OMR);
1895 outl(omr & ~OMR_FD, DE4X5_OMR);
1896 irqs = STS_LNF | STS_LNP;
1897 irq_mask = IMR_LFM | IMR_LPM;
1898 sts = test_media(dev, irqs, irq_mask, 0xef01, 0xff3f, 0x0008, 2400);
1899 if (!(sts & STS_LNP) && (lp->autosense == AUTO)) {
1900 if (inl(DE4X5_SISR) & SISR_NRA) {
1901 lp->media = AUI;
1902 } else {
1903 lp->media = BNC;
1904 }
1905 dc21041_autoconf(dev);
1906 }
1907 break;
1908
1909 case AUI:
1910 omr = inl(DE4X5_OMR);
1911 outl(omr & ~OMR_FD, DE4X5_OMR);
1912 irqs = 0;
1913 irq_mask = 0;
1914 sts = test_media(dev, irqs, irq_mask, 0xef09, 0xf7fd, 0x000e, 1000);
1915 if (!(inl(DE4X5_SISR) & SISR_SRA) && (lp->autosense == AUTO)) {
1916 lp->media = BNC;
1917 dc21041_autoconf(dev);
1918 }
1919 break;
1920
1921 case BNC:
1922 omr = inl(DE4X5_OMR);
1923 outl(omr & ~OMR_FD, DE4X5_OMR);
1924 irqs = 0;
1925 irq_mask = 0;
1926 sts = test_media(dev, irqs, irq_mask, 0xef09, 0xf7fd, 0x0006, 1000);
1927 if (!(inl(DE4X5_SISR) & SISR_SRA) && (lp->autosense == AUTO)) {
1928 lp->media = NC;
1929 } else {
1930 if (ping_media(dev)) lp->media = NC;
1931 }
1932 break;
1933
1934 case NC:
1935 omr = inl(DE4X5_OMR);
1936 outl(omr | OMR_FD, DE4X5_OMR);
1937 reset_init_sia(dev, 0xef01, 0xffff, 0x0008);
1938 break;
1939 }
1940
1941 return;
1942 }
1943
1944
1945
1946
1947 static void dc21140_autoconf(struct device *dev)
1948 {
1949 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1950 u_long iobase = dev->base_addr;
1951 s32 omr;
1952
1953 switch(lp->media) {
1954 case _100Mb:
1955 omr = (inl(DE4X5_OMR) & ~(OMR_PS | OMR_HBD | OMR_TTM | OMR_PCS | OMR_SCR));
1956 omr |= (de4x5_full_duplex ? OMR_FD : 0);
1957 outl(omr | OMR_PS | OMR_HBD | OMR_PCS | OMR_SCR, DE4X5_OMR);
1958 outl(GEP_FDXD | GEP_MODE, DE4X5_GEP);
1959 break;
1960
1961 case _10Mb:
1962 omr = (inl(DE4X5_OMR) & ~(OMR_PS | OMR_HBD | OMR_TTM | OMR_PCS | OMR_SCR));
1963 omr |= (de4x5_full_duplex ? OMR_FD : 0);
1964 outl(omr | OMR_TTM, DE4X5_OMR);
1965 outl(GEP_FDXD, DE4X5_GEP);
1966 break;
1967 }
1968
1969 return;
1970 }
1971
1972 static int
1973 test_media(struct device *dev, s32 irqs, s32 irq_mask, s32 csr13, s32 csr14, s32 csr15, s32 msec)
1974 {
1975 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1976 u_long iobase = dev->base_addr;
1977 s32 sts, time, csr12;
1978
1979 reset_init_sia(dev, csr13, csr14, csr15);
1980
1981
1982 load_ms_timer(dev, msec);
1983
1984
1985 sts = inl(DE4X5_STS);
1986 outl(sts, DE4X5_STS);
1987
1988
1989 csr12 = inl(DE4X5_SISR);
1990 outl(csr12, DE4X5_SISR);
1991
1992
1993 do {
1994 time = inl(DE4X5_GPT) & GPT_VAL;
1995 sts = inl(DE4X5_STS);
1996 } while ((time != 0) && !(sts & irqs));
1997
1998 sts = inl(DE4X5_STS);
1999
2000 return sts;
2001 }
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025 static int ping_media(struct device *dev)
2026 {
2027 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2028 u_long iobase = dev->base_addr;
2029 int i, entry, linkBad;
2030 s32 omr, t_3s = 4000;
2031 char frame[64];
2032
2033 create_packet(dev, frame, sizeof(frame));
2034
2035 entry = lp->tx_new;
2036 load_packet(dev, frame, TD_LS | TD_FS | sizeof(frame),NULL);
2037
2038 omr = inl(DE4X5_OMR);
2039 outl(omr|OMR_ST, DE4X5_OMR);
2040
2041 lp->tx_new = (++lp->tx_new) % lp->txRingSize;
2042 lp->tx_old = lp->tx_new;
2043
2044
2045 for (linkBad=1,i=0;(i<t_3s) && linkBad;i++) {
2046 if ((inl(DE4X5_SISR) & SISR_NCR) == 1) break;
2047 if (lp->tx_ring[entry].status >= 0) linkBad=0;
2048 dce_ms_delay(1);
2049 }
2050 outl(omr, DE4X5_OMR);
2051
2052 return ((linkBad || (lp->tx_ring[entry].status & TD_ES)) ? 1 : 0);
2053 }
2054
2055
2056
2057
2058
2059 static int test_ans(struct device *dev, s32 irqs, s32 irq_mask, s32 msec)
2060 {
2061 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2062 u_long iobase = dev->base_addr;
2063 s32 sts, ans;
2064
2065 outl(irq_mask, DE4X5_IMR);
2066
2067
2068 load_ms_timer(dev, msec);
2069
2070
2071 sts = inl(DE4X5_STS);
2072 outl(sts, DE4X5_STS);
2073
2074
2075 do {
2076 ans = inl(DE4X5_SISR) & SISR_ANS;
2077 sts = inl(DE4X5_STS);
2078 } while (!(sts & irqs) && (ans ^ ANS_NWOK) != 0);
2079
2080 return ((sts & STS_LNP) && ((ans ^ ANS_NWOK) == 0) ? STS_LNP : 0);
2081 }
2082
2083
2084
2085
2086 static void reset_init_sia(struct device *dev, s32 sicr, s32 strr, s32 sigr)
2087 {
2088 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2089 u_long iobase = dev->base_addr;
2090
2091 RESET_SIA;
2092 outl(sigr, DE4X5_SIGR);
2093 outl(strr, DE4X5_STRR);
2094 outl(sicr, DE4X5_SICR);
2095
2096 return;
2097 }
2098
2099
2100
2101
2102 static void load_ms_timer(struct device *dev, u32 msec)
2103 {
2104 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2105 u_long iobase = dev->base_addr;
2106 s32 i = 2048, j;
2107
2108 if (lp->chipset == DC21140) {
2109 j = inl(DE4X5_OMR);
2110 if ((j & OMR_TTM) && (j & OMR_PS)) {
2111 i = 8192;
2112 } else if ((~j & OMR_TTM) && (j & OMR_PS)) {
2113 i = 819;
2114 }
2115 }
2116
2117 outl((s32)(msec * 10000)/i, DE4X5_GPT);
2118
2119 return;
2120 }
2121
2122
2123
2124
2125 static void create_packet(struct device *dev, char *frame, int len)
2126 {
2127 int i;
2128 char *buf = frame;
2129
2130 for (i=0; i<ETH_ALEN; i++) {
2131 *buf++ = dev->dev_addr[i];
2132 }
2133 for (i=0; i<ETH_ALEN; i++) {
2134 *buf++ = dev->dev_addr[i];
2135 }
2136
2137 *buf++ = 0;
2138 *buf++ = 1;
2139
2140 return;
2141 }
2142
2143
2144
2145
2146 static void dce_us_delay(u32 usec)
2147 {
2148 udelay(usec);
2149
2150 return;
2151 }
2152
2153
2154
2155
2156 static void dce_ms_delay(u32 msec)
2157 {
2158 u_int i;
2159
2160 for (i=0; i<msec; i++) {
2161 dce_us_delay(1000);
2162 }
2163
2164 return;
2165 }
2166
2167
2168
2169
2170
2171 static int EISA_signature(char *name, s32 eisa_id)
2172 {
2173 u_int i;
2174 const char *signatures[] = DE4X5_SIGNATURE;
2175 char ManCode[DE4X5_STRLEN];
2176 union {
2177 s32 ID;
2178 char Id[4];
2179 } Eisa;
2180 int status = 0;
2181
2182 *name = '\0';
2183 Eisa.ID = inl(eisa_id);
2184
2185 ManCode[0]=(((Eisa.Id[0]>>2)&0x1f)+0x40);
2186 ManCode[1]=(((Eisa.Id[1]&0xe0)>>5)+((Eisa.Id[0]&0x03)<<3)+0x40);
2187 ManCode[2]=(((Eisa.Id[2]>>4)&0x0f)+0x30);
2188 ManCode[3]=((Eisa.Id[2]&0x0f)+0x30);
2189 ManCode[4]=(((Eisa.Id[3]>>4)&0x0f)+0x30);
2190 ManCode[5]='\0';
2191
2192 for (i=0;(*signatures[i] != '\0') && (*name == '\0');i++) {
2193 if (strstr(ManCode, signatures[i]) != NULL) {
2194 strcpy(name,ManCode);
2195 status = 1;
2196 }
2197 }
2198
2199 return status;
2200 }
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214 static int DevicePresent(u_long aprom_addr)
2215 {
2216 union {
2217 struct {
2218 u32 a;
2219 u32 b;
2220 } llsig;
2221 char Sig[sizeof(u32) << 1];
2222 } dev;
2223 char data;
2224 int i, j, tmp, status = 0;
2225 short sigLength;
2226 struct bus_type *lp = &bus;
2227
2228 dev.llsig.a = ETH_PROM_SIG;
2229 dev.llsig.b = ETH_PROM_SIG;
2230 sigLength = sizeof(u32) << 1;
2231
2232 if (lp->chipset == DC21040) {
2233 for (i=0,j=0;(j<sigLength) && (i<PROBE_LENGTH+sigLength-1);i++) {
2234 if (lp->bus == PCI) {
2235 while ((tmp = inl(aprom_addr)) < 0);
2236 data = (char)tmp;
2237 } else {
2238 data = inb(aprom_addr);
2239 }
2240 if (dev.Sig[j] == data) {
2241 j++;
2242 } else {
2243 if (data == dev.Sig[0]) {
2244 j=1;
2245 } else {
2246 j=0;
2247 }
2248 }
2249 }
2250
2251 if (j!=sigLength) {
2252 status = -ENODEV;
2253 }
2254
2255 } else {
2256 short *p = (short *)&lp->srom;
2257 for (i=0; i<(sizeof(struct de4x5_srom)>>1); i++) {
2258 *p++ = srom_rd(aprom_addr, i);
2259 }
2260 }
2261
2262 return status;
2263 }
2264
2265 static int get_hw_addr(struct device *dev)
2266 {
2267 u_long iobase = dev->base_addr;
2268 int i, k, tmp, status = 0;
2269 u_short j,chksum;
2270 struct bus_type *lp = &bus;
2271
2272 for (i=0,k=0,j=0;j<3;j++) {
2273 k <<= 1 ;
2274 if (k > 0xffff) k-=0xffff;
2275
2276 if (lp->bus == PCI) {
2277 if (lp->chipset == DC21040) {
2278 while ((tmp = inl(DE4X5_APROM)) < 0);
2279 k += (u_char) tmp;
2280 dev->dev_addr[i++] = (u_char) tmp;
2281 while ((tmp = inl(DE4X5_APROM)) < 0);
2282 k += (u_short) (tmp << 8);
2283 dev->dev_addr[i++] = (u_char) tmp;
2284 } else {
2285 dev->dev_addr[i] = (u_char) lp->srom.ieee_addr[i]; i++;
2286 dev->dev_addr[i] = (u_char) lp->srom.ieee_addr[i]; i++;
2287 }
2288 } else {
2289 k += (u_char) (tmp = inb(EISA_APROM));
2290 dev->dev_addr[i++] = (u_char) tmp;
2291 k += (u_short) ((tmp = inb(EISA_APROM)) << 8);
2292 dev->dev_addr[i++] = (u_char) tmp;
2293 }
2294
2295 if (k > 0xffff) k-=0xffff;
2296 }
2297 if (k == 0xffff) k=0;
2298
2299 if (lp->bus == PCI) {
2300 if (lp->chipset == DC21040) {
2301 while ((tmp = inl(DE4X5_APROM)) < 0);
2302 chksum = (u_char) tmp;
2303 while ((tmp = inl(DE4X5_APROM)) < 0);
2304 chksum |= (u_short) (tmp << 8);
2305 if (k != chksum) status = -1;
2306 }
2307 } else {
2308 chksum = (u_char) inb(EISA_APROM);
2309 chksum |= (u_short) (inb(EISA_APROM) << 8);
2310 if (k != chksum) status = -1;
2311 }
2312
2313
2314 return status;
2315 }
2316
2317
2318
2319
2320 static short srom_rd(u_long addr, u_char offset)
2321 {
2322 sendto_srom(SROM_RD | SROM_SR, addr);
2323
2324 srom_latch(SROM_RD | SROM_SR | DT_CS, addr);
2325 srom_command(SROM_RD | SROM_SR | DT_IN | DT_CS, addr);
2326 srom_address(SROM_RD | SROM_SR | DT_CS, addr, offset);
2327
2328 return srom_data(SROM_RD | SROM_SR | DT_CS, addr);
2329 }
2330
2331 static void srom_latch(u_int command, u_long addr)
2332 {
2333 sendto_srom(command, addr);
2334 sendto_srom(command | DT_CLK, addr);
2335 sendto_srom(command, addr);
2336
2337 return;
2338 }
2339
2340 static void srom_command(u_int command, u_long addr)
2341 {
2342 srom_latch(command, addr);
2343 srom_latch(command, addr);
2344 srom_latch((command & 0x0000ff00) | DT_CS, addr);
2345
2346 return;
2347 }
2348
2349 static void srom_address(u_int command, u_long addr, u_char offset)
2350 {
2351 int i;
2352 char a;
2353
2354 a = (char)(offset << 2);
2355 for (i=0; i<6; i++, a <<= 1) {
2356 srom_latch(command | ((a < 0) ? DT_IN : 0), addr);
2357 }
2358 dce_us_delay(1);
2359
2360 i = (getfrom_srom(addr) >> 3) & 0x01;
2361 if (i != 0) {
2362 printk("Bad SROM address phase.....\n");
2363
2364 }
2365
2366 return;
2367 }
2368
2369 static short srom_data(u_int command, u_long addr)
2370 {
2371 int i;
2372 short word = 0;
2373 s32 tmp;
2374
2375 for (i=0; i<16; i++) {
2376 sendto_srom(command | DT_CLK, addr);
2377 tmp = getfrom_srom(addr);
2378 sendto_srom(command, addr);
2379
2380 word = (word << 1) | ((tmp >> 3) & 0x01);
2381 }
2382
2383 sendto_srom(command & 0x0000ff00, addr);
2384
2385 return word;
2386 }
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403 static void sendto_srom(u_int command, u_long addr)
2404 {
2405 outl(command, addr);
2406 dce_us_delay(1);
2407
2408 return;
2409 }
2410
2411 static int getfrom_srom(u_long addr)
2412 {
2413 s32 tmp;
2414
2415 tmp = inl(addr);
2416 dce_us_delay(1);
2417
2418 return tmp;
2419 }
2420
2421 static char *build_setup_frame(struct device *dev, int mode)
2422 {
2423 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2424 int i;
2425 char *pa = lp->setup_frame;
2426
2427
2428 if (mode == ALL) {
2429 memset(lp->setup_frame, 0, SETUP_FRAME_LEN);
2430 }
2431
2432 if (lp->setup_f == HASH_PERF) {
2433 for (pa=lp->setup_frame+IMPERF_PA_OFFSET, i=0; i<ETH_ALEN; i++) {
2434 *(pa + i) = dev->dev_addr[i];
2435 if (i & 0x01) pa += 2;
2436 }
2437 *(lp->setup_frame + (HASH_TABLE_LEN >> 3) - 3) = 0x80;
2438 } else {
2439 for (i=0; i<ETH_ALEN; i++) {
2440 *(pa + (i&1)) = dev->dev_addr[i];
2441 if (i & 0x01) pa += 4;
2442 }
2443 for (i=0; i<ETH_ALEN; i++) {
2444 *(pa + (i&1)) = (char) 0xff;
2445 if (i & 0x01) pa += 4;
2446 }
2447 }
2448
2449 return pa;
2450 }
2451
2452 static void enable_ast(struct device *dev, u32 time_out)
2453 {
2454 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2455 u_long iobase = dev->base_addr;
2456
2457 lp->irq_mask |= IMR_TMM;
2458 outl(lp->irq_mask, DE4X5_IMR);
2459 load_ms_timer(dev, time_out);
2460
2461 return;
2462 }
2463
2464 static void disable_ast(struct device *dev)
2465 {
2466 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2467 u_long iobase = dev->base_addr;
2468
2469 lp->irq_mask &= ~IMR_TMM;
2470 outl(lp->irq_mask, DE4X5_IMR);
2471 load_ms_timer(dev, 0);
2472
2473 return;
2474 }
2475
2476 static void kick_tx(struct device *dev)
2477 {
2478 struct sk_buff *skb;
2479
2480 if ((skb = alloc_skb(0, GFP_ATOMIC)) != NULL) {
2481 skb->len= FAKE_FRAME_LEN;
2482 skb->arp=1;
2483 skb->dev=dev;
2484 dev_queue_xmit(skb, dev, SOPRI_NORMAL);
2485 }
2486
2487 return;
2488 }
2489
2490
2491
2492
2493
2494 static int de4x5_ioctl(struct device *dev, struct ifreq *rq, int cmd)
2495 {
2496 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2497 struct de4x5_ioctl *ioc = (struct de4x5_ioctl *) &rq->ifr_data;
2498 u_long iobase = dev->base_addr;
2499 int i, j, status = 0;
2500 s32 omr;
2501 union {
2502 u8 addr[(HASH_TABLE_LEN * ETH_ALEN)];
2503 u16 sval[(HASH_TABLE_LEN * ETH_ALEN) >> 1];
2504 u32 lval[(HASH_TABLE_LEN * ETH_ALEN) >> 2];
2505 } tmp;
2506
2507 switch(ioc->cmd) {
2508 case DE4X5_GET_HWADDR:
2509 ioc->len = ETH_ALEN;
2510 status = verify_area(VERIFY_WRITE, (void *)ioc->data, ioc->len);
2511 if (status)
2512 break;
2513 for (i=0; i<ETH_ALEN; i++) {
2514 tmp.addr[i] = dev->dev_addr[i];
2515 }
2516 memcpy_tofs(ioc->data, tmp.addr, ioc->len);
2517
2518 break;
2519 case DE4X5_SET_HWADDR:
2520 status = verify_area(VERIFY_READ, (void *)ioc->data, ETH_ALEN);
2521 if (status)
2522 break;
2523 status = -EPERM;
2524 if (!suser())
2525 break;
2526 status = 0;
2527 memcpy_fromfs(tmp.addr, ioc->data, ETH_ALEN);
2528 for (i=0; i<ETH_ALEN; i++) {
2529 dev->dev_addr[i] = tmp.addr[i];
2530 }
2531 build_setup_frame(dev, PHYS_ADDR_ONLY);
2532
2533 while (set_bit(0, (void *)&dev->tbusy) != 0);
2534 if (lp->setup_f == HASH_PERF) {
2535 load_packet(dev, lp->setup_frame, TD_IC | HASH_F | TD_SET |
2536 SETUP_FRAME_LEN, NULL);
2537 } else {
2538 load_packet(dev, lp->setup_frame, TD_IC | PERFECT_F | TD_SET |
2539 SETUP_FRAME_LEN, NULL);
2540 }
2541 lp->tx_new = (++lp->tx_new) % lp->txRingSize;
2542 outl(POLL_DEMAND, DE4X5_TPD);
2543 dev->tbusy = 0;
2544
2545 break;
2546 case DE4X5_SET_PROM:
2547 if (suser()) {
2548 omr = inl(DE4X5_OMR);
2549 omr |= OMR_PR;
2550 outl(omr, DE4X5_OMR);
2551 } else {
2552 status = -EPERM;
2553 }
2554
2555 break;
2556 case DE4X5_CLR_PROM:
2557 if (suser()) {
2558 omr = inl(DE4X5_OMR);
2559 omr &= ~OMR_PR;
2560 outb(omr, DE4X5_OMR);
2561 } else {
2562 status = -EPERM;
2563 }
2564
2565 break;
2566 case DE4X5_SAY_BOO:
2567 printk("%s: Boo!\n", dev->name);
2568
2569 break;
2570 case DE4X5_GET_MCA:
2571 ioc->len = (HASH_TABLE_LEN >> 3);
2572 status = verify_area(VERIFY_WRITE, ioc->data, ioc->len);
2573 if (status)
2574 break;
2575 memcpy_tofs(ioc->data, lp->setup_frame, ioc->len);
2576
2577 break;
2578 case DE4X5_SET_MCA:
2579 if (suser()) {
2580 if (ioc->len != HASH_TABLE_LEN) {
2581 if (!(status = verify_area(VERIFY_READ, (void *)ioc->data, ETH_ALEN * ioc->len))) {
2582 memcpy_fromfs(tmp.addr, ioc->data, ETH_ALEN * ioc->len);
2583 set_multicast_list(dev);
2584 }
2585 } else {
2586 set_multicast_list(dev);
2587 }
2588 } else {
2589 status = -EPERM;
2590 }
2591
2592 break;
2593 case DE4X5_CLR_MCA:
2594 if (suser()) {
2595 set_multicast_list(dev);
2596 } else {
2597 status = -EPERM;
2598 }
2599
2600 break;
2601 case DE4X5_MCA_EN:
2602 if (suser()) {
2603 omr = inl(DE4X5_OMR);
2604 omr |= OMR_PM;
2605 outl(omr, DE4X5_OMR);
2606 } else {
2607 status = -EPERM;
2608 }
2609
2610 break;
2611 case DE4X5_GET_STATS:
2612 ioc->len = sizeof(lp->pktStats);
2613 status = verify_area(VERIFY_WRITE, (void *)ioc->data, ioc->len);
2614 if (status)
2615 break;
2616
2617 cli();
2618 memcpy_tofs(ioc->data, &lp->pktStats, ioc->len);
2619 sti();
2620
2621 break;
2622 case DE4X5_CLR_STATS:
2623 if (suser()) {
2624 cli();
2625 memset(&lp->pktStats, 0, sizeof(lp->pktStats));
2626 sti();
2627 } else {
2628 status = -EPERM;
2629 }
2630
2631 break;
2632 case DE4X5_GET_OMR:
2633 tmp.addr[0] = inl(DE4X5_OMR);
2634 if (!(status = verify_area(VERIFY_WRITE, (void *)ioc->data, 1))) {
2635 memcpy_tofs(ioc->data, tmp.addr, 1);
2636 }
2637
2638 break;
2639 case DE4X5_SET_OMR:
2640 if (suser()) {
2641 if (!(status = verify_area(VERIFY_READ, (void *)ioc->data, 1))) {
2642 memcpy_fromfs(tmp.addr, ioc->data, 1);
2643 outl(tmp.addr[0], DE4X5_OMR);
2644 }
2645 } else {
2646 status = -EPERM;
2647 }
2648
2649 break;
2650 case DE4X5_GET_REG:
2651 j = 0;
2652 tmp.lval[0] = inl(DE4X5_STS); j+=4;
2653 tmp.lval[1] = inl(DE4X5_BMR); j+=4;
2654 tmp.lval[2] = inl(DE4X5_IMR); j+=4;
2655 tmp.lval[3] = inl(DE4X5_OMR); j+=4;
2656 tmp.lval[4] = inl(DE4X5_SISR); j+=4;
2657 tmp.lval[5] = inl(DE4X5_SICR); j+=4;
2658 tmp.lval[6] = inl(DE4X5_STRR); j+=4;
2659 tmp.lval[7] = inl(DE4X5_SIGR); j+=4;
2660 ioc->len = j;
2661 if (!(status = verify_area(VERIFY_WRITE, (void *)ioc->data, ioc->len))) {
2662 memcpy_tofs(ioc->data, tmp.addr, ioc->len);
2663 }
2664 break;
2665
2666 #define DE4X5_DUMP 0x0f
2667
2668 case DE4X5_DUMP:
2669 j = 0;
2670 tmp.addr[j++] = dev->irq;
2671 for (i=0; i<ETH_ALEN; i++) {
2672 tmp.addr[j++] = dev->dev_addr[i];
2673 }
2674 tmp.addr[j++] = lp->rxRingSize;
2675 tmp.lval[j>>2] = (long)lp->rx_ring; j+=4;
2676 tmp.lval[j>>2] = (long)lp->tx_ring; j+=4;
2677
2678 for (i=0;i<lp->rxRingSize-1;i++){
2679 if (i < 3) {
2680 tmp.lval[j>>2] = (long)&lp->rx_ring[i].status; j+=4;
2681 }
2682 }
2683 tmp.lval[j>>2] = (long)&lp->rx_ring[i].status; j+=4;
2684 for (i=0;i<lp->txRingSize-1;i++){
2685 if (i < 3) {
2686 tmp.lval[j>>2] = (long)&lp->tx_ring[i].status; j+=4;
2687 }
2688 }
2689 tmp.lval[j>>2] = (long)&lp->tx_ring[i].status; j+=4;
2690
2691 for (i=0;i<lp->rxRingSize-1;i++){
2692 if (i < 3) {
2693 tmp.lval[j>>2] = (s32)lp->rx_ring[i].buf; j+=4;
2694 }
2695 }
2696 tmp.lval[j>>2] = (s32)lp->rx_ring[i].buf; j+=4;
2697 for (i=0;i<lp->txRingSize-1;i++){
2698 if (i < 3) {
2699 tmp.lval[j>>2] = (s32)lp->tx_ring[i].buf; j+=4;
2700 }
2701 }
2702 tmp.lval[j>>2] = (s32)lp->tx_ring[i].buf; j+=4;
2703
2704 for (i=0;i<lp->rxRingSize;i++){
2705 tmp.lval[j>>2] = lp->rx_ring[i].status; j+=4;
2706 }
2707 for (i=0;i<lp->txRingSize;i++){
2708 tmp.lval[j>>2] = lp->tx_ring[i].status; j+=4;
2709 }
2710
2711 tmp.lval[j>>2] = inl(DE4X5_STS); j+=4;
2712 tmp.lval[j>>2] = inl(DE4X5_BMR); j+=4;
2713 tmp.lval[j>>2] = inl(DE4X5_IMR); j+=4;
2714 tmp.lval[j>>2] = inl(DE4X5_OMR); j+=4;
2715 tmp.lval[j>>2] = inl(DE4X5_SISR); j+=4;
2716 tmp.lval[j>>2] = inl(DE4X5_SICR); j+=4;
2717 tmp.lval[j>>2] = inl(DE4X5_STRR); j+=4;
2718 tmp.lval[j>>2] = inl(DE4X5_SIGR); j+=4;
2719
2720 tmp.addr[j++] = lp->txRingSize;
2721 tmp.addr[j++] = dev->tbusy;
2722
2723 ioc->len = j;
2724 if (!(status = verify_area(VERIFY_WRITE, (void *)ioc->data, ioc->len))) {
2725 memcpy_tofs(ioc->data, tmp.addr, ioc->len);
2726 }
2727
2728 break;
2729 default:
2730 status = -EOPNOTSUPP;
2731 }
2732
2733 return status;
2734 }
2735
2736 #ifdef MODULE
2737 static char devicename[9] = { 0, };
2738 static struct device thisDE4X5 = {
2739 devicename,
2740 0, 0, 0, 0,
2741 0x2000, 10,
2742 0, 0, 0, NULL, de4x5_probe };
2743
2744 static int io=0x000b;
2745 static int irq=10;
2746
2747 int
2748 init_module(void)
2749 {
2750 thisDE4X5.base_addr=io;
2751 thisDE4X5.irq=irq;
2752 if (register_netdev(&thisDE4X5) != 0)
2753 return -EIO;
2754 return 0;
2755 }
2756
2757 void
2758 cleanup_module(void)
2759 {
2760 struct de4x5_private *lp = (struct de4x5_private *) thisDE4X5.priv;
2761
2762 if (lp) {
2763 kfree_s(bus_to_virt(lp->rx_ring[0].buf), RX_BUFF_SZ * NUM_RX_DESC + ALIGN);
2764 }
2765 kfree_s(thisDE4X5.priv, sizeof(struct de4x5_private) + ALIGN);
2766 thisDE4X5.priv = NULL;
2767
2768 release_region(thisDE4X5.base_addr, (lp->bus == PCI ?
2769 DE4X5_PCI_TOTAL_SIZE :
2770 DE4X5_EISA_TOTAL_SIZE));
2771 unregister_netdev(&thisDE4X5);
2772 }
2773 #endif
2774
2775
2776
2777
2778
2779
2780
2781
2782
2783
2784