This source file includes following definitions.
- de4x5_probe
- de4x5_hw_init
- de4x5_open
- de4x5_init
- de4x5_sw_reset
- de4x5_queue_pkt
- de4x5_interrupt
- de4x5_rx
- de4x5_tx
- de4x5_ast
- de4x5_close
- de4x5_get_stats
- load_packet
- set_multicast_list
- SetMulticastFilter
- eisa_probe
- pci_probe
- alloc_device
- autoconf_media
- dc21040_autoconf
- dc21040_state
- de4x5_suspect_state
- dc21041_autoconf
- dc21140m_autoconf
- de4x5_init_connection
- de4x5_reset_phy
- test_media
- test_tp
- test_mii_reg
- is_spd_100
- is_100_up
- is_10_up
- is_anc_capable
- ping_media
- de4x5_save_skbs
- de4x5_restore_skbs
- de4x5_cache_state
- de4x5_put_cache
- de4x5_putb_cache
- de4x5_get_cache
- test_ans
- de4x5_setup_intr
- reset_init_sia
- create_packet
- de4x5_us_delay
- de4x5_ms_delay
- EISA_signature
- PCI_signature
- DevicePresent
- get_hw_addr
- de4x5_bad_srom
- de4x5_strncmp
- srom_rd
- srom_latch
- srom_command
- srom_address
- srom_data
- sendto_srom
- getfrom_srom
- mii_rd
- mii_wr
- mii_rdata
- mii_wdata
- mii_address
- mii_ta
- mii_swap
- sendto_mii
- getfrom_mii
- mii_get_oui
- mii_get_phy
- build_setup_frame
- enable_ast
- disable_ast
- de4x5_switch_to_mii
- de4x5_switch_to_srl
- timeout
- de4x5_dbg_open
- de4x5_dbg_mii
- de4x5_dbg_media
- de4x5_dbg_srom
- de4x5_ioctl
- init_module
- cleanup_module
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189 static const char *version = "de4x5.c:v0.41 96/3/21 davies@wanton.lkg.dec.com\n";
190
191 #include <linux/module.h>
192
193 #include <linux/kernel.h>
194 #include <linux/sched.h>
195 #include <linux/string.h>
196 #include <linux/interrupt.h>
197 #include <linux/ptrace.h>
198 #include <linux/errno.h>
199 #include <linux/ioport.h>
200 #include <linux/malloc.h>
201 #include <linux/bios32.h>
202 #include <linux/pci.h>
203 #include <linux/delay.h>
204 #include <asm/bitops.h>
205 #include <asm/io.h>
206 #include <asm/dma.h>
207 #include <asm/segment.h>
208
209 #include <linux/netdevice.h>
210 #include <linux/etherdevice.h>
211 #include <linux/skbuff.h>
212
213 #include <linux/time.h>
214 #include <linux/types.h>
215 #include <linux/unistd.h>
216
217 #include "de4x5.h"
218
219 #define c_char const char
220
221
222
223
224 struct phy_table {
225 int reset;
226 int id;
227 int ta;
228 struct {
229 int reg;
230 int mask;
231 int value;
232 } spd;
233 };
234
235 struct mii_phy {
236 int reset;
237 int id;
238 int ta;
239 struct {
240 int reg;
241 int mask;
242 int value;
243 } spd;
244 int addr;
245 };
246
247 #define DE4X5_MAX_PHY 8
248
249
250
251
252
253 static struct phy_table phy_info[] = {
254 {0, NATIONAL_TX, 1, {0x19, 0x40, 0x00}},
255 {1, BROADCOM_T4, 1, {0x10, 0x02, 0x02}},
256 {0, SEEQ_T4 , 1, {0x12, 0x10, 0x10}},
257 {0, CYPRESS_T4 , 1, {0x05, 0x20, 0x20}}
258 };
259
260
261
262
263 static c_char enet_det[][ETH_ALEN] = {
264 {0x00, 0x00, 0xc0, 0x00, 0x00, 0x00}
265 };
266
267 #define SMC 1
268
269
270 #ifdef DE4X5_DEBUG
271 static int de4x5_debug = DE4X5_DEBUG;
272 #else
273 static int de4x5_debug = 1;
274 #endif
275
276 #ifdef DE4X5_AUTOSENSE
277 static int de4x5_autosense = DE4X5_AUTOSENSE;
278 #else
279 static int de4x5_autosense = AUTO;
280 #endif
281 #define DE4X5_AUTOSENSE_MS 250
282
283 #ifdef DE4X5_FULL_DUPLEX
284 static s32 de4x5_full_duplex = 1;
285 #else
286 static s32 de4x5_full_duplex = 0;
287 #endif
288
289 #define DE4X5_NDA 0xffe0
290
291
292
293
294 #define PROBE_LENGTH 32
295 #define ETH_PROM_SIG 0xAA5500FFUL
296
297
298
299
300 #define PKT_BUF_SZ 1536
301 #define MAX_PKT_SZ 1514
302 #define MAX_DAT_SZ 1500
303 #define MIN_DAT_SZ 1
304 #define PKT_HDR_LEN 14
305 #define FAKE_FRAME_LEN (MAX_PKT_SZ + 1)
306 #define QUEUE_PKT_TIMEOUT (3*HZ)
307
308
309 #define CRC_POLYNOMIAL_BE 0x04c11db7UL
310 #define CRC_POLYNOMIAL_LE 0xedb88320UL
311
312
313
314
315 #define DE4X5_EISA_IO_PORTS 0x0c00
316 #define DE4X5_EISA_TOTAL_SIZE 0x100
317
318 #define MAX_EISA_SLOTS 16
319 #define EISA_SLOT_INC 0x1000
320
321 #define DE4X5_SIGNATURE {"DE425","DE434","DE435","DE450","DE500"}
322 #define DE4X5_NAME_LENGTH 8
323
324
325
326
327 #define PCI_MAX_BUS_NUM 8
328 #define DE4X5_PCI_TOTAL_SIZE 0x80
329 #define DE4X5_CLASS_CODE 0x00020000
330
331
332
333
334
335
336
337 #define ALIGN4 ((u_long)4 - 1)
338 #define ALIGN8 ((u_long)8 - 1)
339 #define ALIGN16 ((u_long)16 - 1)
340 #define ALIGN32 ((u_long)32 - 1)
341 #define ALIGN64 ((u_long)64 - 1)
342 #define ALIGN128 ((u_long)128 - 1)
343
344 #define ALIGN ALIGN32
345 #define CACHE_ALIGN CAL_16LONG
346 #define DESC_SKIP_LEN DSL_0
347
348 #define DESC_ALIGN
349
350 #ifndef DEC_ONLY
351 static int dec_only = 0;
352 #else
353 static int dec_only = 1;
354 #endif
355
356
357
358
359 #define ENABLE_IRQs { \
360 imr |= lp->irq_en;\
361 outl(imr, DE4X5_IMR); \
362 }
363
364 #define DISABLE_IRQs {\
365 imr = inl(DE4X5_IMR);\
366 imr &= ~lp->irq_en;\
367 outl(imr, DE4X5_IMR); \
368 }
369
370 #define UNMASK_IRQs {\
371 imr |= lp->irq_mask;\
372 outl(imr, DE4X5_IMR); \
373 }
374
375 #define MASK_IRQs {\
376 imr = inl(DE4X5_IMR);\
377 imr &= ~lp->irq_mask;\
378 outl(imr, DE4X5_IMR); \
379 }
380
381
382
383
384 #define START_DE4X5 {\
385 omr = inl(DE4X5_OMR);\
386 omr |= OMR_ST | OMR_SR;\
387 outl(omr, DE4X5_OMR); \
388 }
389
390 #define STOP_DE4X5 {\
391 omr = inl(DE4X5_OMR);\
392 omr &= ~(OMR_ST|OMR_SR);\
393 outl(omr, DE4X5_OMR); \
394 }
395
396
397
398
399 #define RESET_SIA outl(0, DE4X5_SICR);
400
401
402
403
404 #define DE4X5_AUTOSENSE_MS 250
405
406
407
408
409 struct de4x5_srom {
410 char sub_vendor_id[2];
411 char sub_system_id[2];
412 char reserved[12];
413 char id_block_crc;
414 char reserved2;
415 char version;
416 char num_adapters;
417 char ieee_addr[6];
418 char info[100];
419 short chksum;
420 };
421
422
423
424
425
426
427
428
429
430 #define NUM_RX_DESC 8
431 #define NUM_TX_DESC 32
432 #define RX_BUFF_SZ 1536
433
434
435 struct de4x5_desc {
436 volatile s32 status;
437 u32 des1;
438 u32 buf;
439 u32 next;
440 DESC_ALIGN
441 };
442
443
444
445
446 #define DE4X5_PKT_STAT_SZ 16
447 #define DE4X5_PKT_BIN_SZ 128
448
449
450 struct de4x5_private {
451 char adapter_name[80];
452 struct de4x5_desc rx_ring[NUM_RX_DESC];
453 struct de4x5_desc tx_ring[NUM_TX_DESC];
454 struct sk_buff *skb[NUM_TX_DESC];
455 int rx_new, rx_old;
456 int tx_new, tx_old;
457 char setup_frame[SETUP_FRAME_LEN];
458 char frame[64];
459 struct enet_statistics stats;
460 struct {
461 u_int bins[DE4X5_PKT_STAT_SZ];
462 u_int unicast;
463 u_int multicast;
464 u_int broadcast;
465 u_int excessive_collisions;
466 u_int tx_underruns;
467 u_int excessive_underruns;
468 u_int rx_runt_frames;
469 u_int rx_collision;
470 u_int rx_dribble;
471 u_int rx_overflow;
472 } pktStats;
473 char rxRingSize;
474 char txRingSize;
475 int bus;
476 int bus_num;
477 int state;
478 int chipset;
479 s32 irq_mask;
480 s32 irq_en;
481 int media;
482 int c_media;
483 int linkOK;
484 int autosense;
485 int tx_enable;
486 int lostMedia;
487 int setup_f;
488 int local_state;
489 struct mii_phy phy[DE4X5_MAX_PHY];
490 int active;
491 int mii_cnt;
492 int timeout;
493 struct timer_list timer;
494 int tmp;
495 struct {
496 void *priv;
497 void *buf;
498 s32 csr0;
499 s32 csr6;
500 s32 csr7;
501 s32 csr13;
502 s32 csr14;
503 s32 csr15;
504 int save_cnt;
505 struct sk_buff *skb;
506 } cache;
507 };
508
509
510
511
512
513
514 static struct bus_type {
515 int bus;
516 int bus_num;
517 int device;
518 int chipset;
519 struct de4x5_srom srom;
520 int autosense;
521 } bus;
522
523
524
525
526
527
528
529
530 #define TX_BUFFS_AVAIL ((lp->tx_old<=lp->tx_new)?\
531 lp->tx_old+lp->txRingSize-lp->tx_new-1:\
532 lp->tx_old -lp->tx_new-1)
533
534
535
536
537 static int de4x5_open(struct device *dev);
538 static int de4x5_queue_pkt(struct sk_buff *skb, struct device *dev);
539 static void de4x5_interrupt(int irq, void *dev_id, struct pt_regs *regs);
540 static int de4x5_close(struct device *dev);
541 static struct enet_statistics *de4x5_get_stats(struct device *dev);
542 static void set_multicast_list(struct device *dev);
543 static int de4x5_ioctl(struct device *dev, struct ifreq *rq, int cmd);
544
545
546
547
548 static int de4x5_hw_init(struct device *dev, u_long iobase);
549 static int de4x5_init(struct device *dev);
550 static int de4x5_sw_reset(struct device *dev);
551 static int de4x5_rx(struct device *dev);
552 static int de4x5_tx(struct device *dev);
553 static int de4x5_ast(struct device *dev);
554
555 static int autoconf_media(struct device *dev);
556 static void create_packet(struct device *dev, char *frame, int len);
557 static void de4x5_us_delay(u32 usec);
558 static void de4x5_ms_delay(u32 msec);
559 static void load_packet(struct device *dev, char *buf, u32 flags, struct sk_buff *skb);
560 static int dc21040_autoconf(struct device *dev);
561 static int dc21041_autoconf(struct device *dev);
562 static int dc21140m_autoconf(struct device *dev);
563 static int de4x5_suspect_state(struct device *dev, int timeout, int prev_state, int (*fn)(struct device *, int), int (*asfn)(struct device *));
564 static int dc21040_state(struct device *dev, int csr13, int csr14, int csr15, int timeout, int next_state, int suspect_state, int (*fn)(struct device *, int));
565 static int test_media(struct device *dev, s32 irqs, s32 irq_mask, s32 csr13, s32 csr14, s32 csr15, s32 msec);
566
567 static int test_mii_reg(struct device *dev, int reg, int mask, int pol, long msec);
568 static int is_spd_100(struct device *dev);
569 static int is_100_up(struct device *dev);
570 static int is_10_up(struct device *dev);
571 static int is_anc_capable(struct device *dev);
572 static int ping_media(struct device *dev, int msec);
573 static void de4x5_save_skbs(struct device *dev);
574 static void de4x5_restore_skbs(struct device *dev);
575 static void de4x5_cache_state(struct device *dev, int flag);
576 static void de4x5_put_cache(struct device *dev, struct sk_buff *skb);
577 static void de4x5_putb_cache(struct device *dev, struct sk_buff *skb);
578 static struct sk_buff *de4x5_get_cache(struct device *dev);
579 static void de4x5_setup_intr(struct device *dev);
580 static void de4x5_init_connection(struct device *dev);
581 static int de4x5_reset_phy(struct device *dev);
582 static void reset_init_sia(struct device *dev, s32 sicr, s32 strr, s32 sigr);
583 static int test_ans(struct device *dev, s32 irqs, s32 irq_mask, s32 msec);
584 static int test_tp(struct device *dev, s32 msec);
585 static int EISA_signature(char *name, s32 eisa_id);
586 static int PCI_signature(char *name, struct bus_type *lp);
587 static void DevicePresent(u_long iobase);
588 static int de4x5_bad_srom(struct bus_type *lp);
589 static short srom_rd(u_long address, u_char offset);
590 static void srom_latch(u_int command, u_long address);
591 static void srom_command(u_int command, u_long address);
592 static void srom_address(u_int command, u_long address, u_char offset);
593 static short srom_data(u_int command, u_long address);
594
595 static void sendto_srom(u_int command, u_long addr);
596 static int getfrom_srom(u_long addr);
597 static int mii_rd(u_char phyreg, u_char phyaddr, u_long ioaddr);
598 static void mii_wr(int data, u_char phyreg, u_char phyaddr, u_long ioaddr);
599 static int mii_rdata(u_long ioaddr);
600 static void mii_wdata(int data, int len, u_long ioaddr);
601 static void mii_ta(u_long rw, u_long ioaddr);
602 static int mii_swap(int data, int len);
603 static void mii_address(u_char addr, u_long ioaddr);
604 static void sendto_mii(u32 command, int data, u_long ioaddr);
605 static int getfrom_mii(u32 command, u_long ioaddr);
606 static int mii_get_oui(u_char phyaddr, u_long ioaddr);
607 static int mii_get_phy(struct device *dev);
608 static void SetMulticastFilter(struct device *dev);
609 static int get_hw_addr(struct device *dev);
610
611 static void eisa_probe(struct device *dev, u_long iobase);
612 static void pci_probe(struct device *dev, u_long iobase);
613 static struct device *alloc_device(struct device *dev, u_long iobase);
614 static char *build_setup_frame(struct device *dev, int mode);
615 static void disable_ast(struct device *dev);
616 static void enable_ast(struct device *dev, u32 time_out);
617 static long de4x5_switch_to_srl(struct device *dev);
618 static long de4x5_switch_to_mii(struct device *dev);
619 static void timeout(struct device *dev, void (*fn)(u_long data), u_long data, u_long msec);
620 static void de4x5_dbg_open(struct device *dev);
621 static void de4x5_dbg_mii(struct device *dev, int k);
622 static void de4x5_dbg_media(struct device *dev);
623 static void de4x5_dbg_srom(struct de4x5_srom *p);
624 static int de4x5_strncmp(char *a, char *b, int n);
625
626 #ifdef MODULE
627 int init_module(void);
628 void cleanup_module(void);
629 static int autoprobed = 0, loading_module = 1;
630 # else
631 static int autoprobed = 0, loading_module = 0;
632 #endif
633
634 static char name[DE4X5_NAME_LENGTH + 1];
635 static int num_de4x5s = 0, num_eth = 0;
636
637
638
639
640 #define RESET_DE4X5 {\
641 int i;\
642 i=inl(DE4X5_BMR);\
643 de4x5_ms_delay(1);\
644 outl(i | BMR_SWR, DE4X5_BMR);\
645 de4x5_ms_delay(1);\
646 outl(i, DE4X5_BMR);\
647 de4x5_ms_delay(1);\
648 for (i=0;i<5;i++) {inl(DE4X5_BMR); de4x5_ms_delay(1);}\
649 de4x5_ms_delay(1);\
650 }
651
652
653
654
655
656
657
658 int de4x5_probe(struct device *dev)
659 {
660 int tmp = num_de4x5s, status = -ENODEV;
661 u_long iobase = dev->base_addr;
662
663 eisa_probe(dev, iobase);
664 pci_probe(dev, iobase);
665
666 if ((tmp == num_de4x5s) && (iobase != 0) && loading_module) {
667 printk("%s: de4x5_probe() cannot find device at 0x%04lx.\n", dev->name,
668 iobase);
669 }
670
671
672
673
674
675 for (; (dev->priv == NULL) && (dev->next != NULL); dev = dev->next);
676
677 if (dev->priv) status = 0;
678 if (iobase == 0) autoprobed = 1;
679
680 return status;
681 }
682
683 static int
684 de4x5_hw_init(struct device *dev, u_long iobase)
685 {
686 struct bus_type *lp = &bus;
687 int tmpbus, tmpchs, status=0;
688 int i, media = *((char *)&(lp->srom) + *((char *)&(lp->srom) + 19) * 3);
689 char *tmp;
690
691
692 if (lp->chipset == DC21041) {
693 outl(0, PCI_CFDA);
694 de4x5_ms_delay(10);
695 }
696
697 RESET_DE4X5;
698
699 if ((inl(DE4X5_STS) & (STS_TS | STS_RS)) != 0) {
700 return -ENXIO;
701 }
702
703
704
705
706 if (lp->bus == PCI) {
707 PCI_signature(name, lp);
708 } else {
709 EISA_signature(name, EISA_ID0);
710 }
711
712 if (*name == '\0') {
713 return -ENXIO;
714 }
715
716 dev->base_addr = iobase;
717 if (lp->bus == EISA) {
718 printk("%s: %s at %04lx (EISA slot %ld)",
719 dev->name, name, iobase, ((iobase>>12)&0x0f));
720 } else {
721 printk("%s: %s at %04lx (PCI bus %d, device %d)", dev->name, name,
722 iobase, lp->bus_num, lp->device);
723 }
724
725 printk(", h/w address ");
726 status = get_hw_addr(dev);
727 for (i = 0; i < ETH_ALEN - 1; i++) {
728 printk("%2.2x:", dev->dev_addr[i]);
729 }
730 printk("%2.2x,\n", dev->dev_addr[i]);
731
732 tmpbus = lp->bus;
733 tmpchs = lp->chipset;
734
735 if (status != 0) {
736 printk(" which has an Ethernet PROM CRC error.\n");
737 return -ENXIO;
738 } else {
739 struct de4x5_private *lp;
740
741
742
743
744
745 dev->priv = (void *) kmalloc(sizeof(struct de4x5_private) + ALIGN,
746 GFP_KERNEL);
747 if (dev->priv == NULL) {
748 return -ENOMEM;
749 }
750
751
752
753
754 tmp = dev->priv;
755 dev->priv = (void *)(((u_long)dev->priv + ALIGN) & ~ALIGN);
756 lp = (struct de4x5_private *)dev->priv;
757 memset(dev->priv, 0, sizeof(struct de4x5_private));
758 lp->bus = tmpbus;
759 lp->chipset = tmpchs;
760 lp->cache.priv = tmp;
761
762
763
764
765 if (media & MEDIA_MII) {
766 if (!mii_get_phy(dev)) {
767 printk("%s: MII search failed, no device found when one was expected\n", dev->name);
768 return -ENXIO;
769 }
770 } else {
771 mii_get_phy(dev);
772 }
773
774
775
776
777 if (de4x5_autosense & AUTO) {
778 lp->autosense = AUTO;
779 } else {
780 if (lp->chipset != DC21140) {
781 if ((lp->chipset == DC21040) && (de4x5_autosense & TP_NW)) {
782 de4x5_autosense = TP;
783 }
784 if ((lp->chipset == DC21041) && (de4x5_autosense & BNC_AUI)) {
785 de4x5_autosense = BNC;
786 }
787 lp->autosense = de4x5_autosense & 0x001f;
788 } else {
789 lp->autosense = de4x5_autosense & 0x00c0;
790 }
791 }
792
793 sprintf(lp->adapter_name,"%s (%s)", name, dev->name);
794
795
796
797
798 if ((tmp = (void *)kmalloc(RX_BUFF_SZ * NUM_RX_DESC + ALIGN,
799 GFP_KERNEL)) != NULL) {
800 lp->cache.buf = tmp;
801 tmp = (char *)(((u_long) tmp + ALIGN) & ~ALIGN);
802 for (i=0; i<NUM_RX_DESC; i++) {
803 lp->rx_ring[i].status = 0;
804 lp->rx_ring[i].des1 = RX_BUFF_SZ;
805 lp->rx_ring[i].buf = virt_to_bus(tmp + i * RX_BUFF_SZ);
806 lp->rx_ring[i].next = (u32)NULL;
807 }
808 barrier();
809
810 request_region(iobase, (lp->bus == PCI ? DE4X5_PCI_TOTAL_SIZE :
811 DE4X5_EISA_TOTAL_SIZE),
812 lp->adapter_name);
813
814 lp->rxRingSize = NUM_RX_DESC;
815 lp->txRingSize = NUM_TX_DESC;
816
817
818 lp->rx_ring[lp->rxRingSize - 1].des1 |= RD_RER;
819 lp->tx_ring[lp->txRingSize - 1].des1 |= TD_TER;
820
821
822 outl(virt_to_bus(lp->rx_ring), DE4X5_RRBA);
823 outl(virt_to_bus(lp->tx_ring), DE4X5_TRBA);
824
825
826 lp->irq_mask = IMR_RIM | IMR_TIM | IMR_TUM ;
827 lp->irq_en = IMR_NIM | IMR_AIM;
828
829
830 create_packet(dev, lp->frame, sizeof(lp->frame));
831
832
833 lp->state = CLOSED;
834
835 printk(" and requires IRQ%d (provided by %s).\n", dev->irq,
836 ((lp->bus == PCI) ? "PCI BIOS" : "EISA CNFG"));
837 } else {
838 printk("%s: Kernel could not allocate RX buffer memory.\n", dev->name);
839 }
840 if (status) {
841 release_region(iobase, (lp->bus == PCI ?
842 DE4X5_PCI_TOTAL_SIZE :
843 DE4X5_EISA_TOTAL_SIZE));
844 if (lp->rx_ring[0].buf) {
845 kfree(bus_to_virt(lp->rx_ring[0].buf));
846 }
847 kfree(dev->priv);
848 dev->priv = NULL;
849
850 return -ENXIO;
851 }
852 }
853
854 if (de4x5_debug > 0) {
855 printk(version);
856 }
857
858
859 dev->open = &de4x5_open;
860 dev->hard_start_xmit = &de4x5_queue_pkt;
861 dev->stop = &de4x5_close;
862 dev->get_stats = &de4x5_get_stats;
863 dev->set_multicast_list = &set_multicast_list;
864 dev->do_ioctl = &de4x5_ioctl;
865
866 dev->mem_start = 0;
867
868
869 ether_setup(dev);
870
871
872 if (lp->chipset == DC21041) {
873 outl(0, DE4X5_SICR);
874 outl(CFDA_PSM, PCI_CFDA);
875 }
876
877 return status;
878 }
879
880
881 static int
882 de4x5_open(struct device *dev)
883 {
884 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
885 u_long iobase = dev->base_addr;
886 int status = 0;
887 s32 omr;
888
889
890
891
892 if (lp->chipset == DC21041) {
893 outl(0, PCI_CFDA);
894 de4x5_ms_delay(10);
895 }
896
897 lp->state = OPEN;
898
899
900
901
902 status = de4x5_init(dev);
903
904 de4x5_dbg_open(dev);
905
906 if (request_irq(dev->irq, (void *)de4x5_interrupt, SA_SHIRQ,
907 lp->adapter_name, dev)) {
908 printk("de4x5_open(): Requested IRQ%d is busy\n",dev->irq);
909 status = -EAGAIN;
910 } else {
911 dev->tbusy = 0;
912 dev->start = 1;
913 dev->interrupt = UNMASK_INTERRUPTS;
914 dev->trans_start = jiffies;
915
916 START_DE4X5;
917
918 de4x5_setup_intr(dev);
919 }
920
921 if (de4x5_debug > 1) {
922 printk("\tsts: 0x%08x\n", inl(DE4X5_STS));
923 printk("\tbmr: 0x%08x\n", inl(DE4X5_BMR));
924 printk("\timr: 0x%08x\n", inl(DE4X5_IMR));
925 printk("\tomr: 0x%08x\n", inl(DE4X5_OMR));
926 printk("\tsisr: 0x%08x\n", inl(DE4X5_SISR));
927 printk("\tsicr: 0x%08x\n", inl(DE4X5_SICR));
928 printk("\tstrr: 0x%08x\n", inl(DE4X5_STRR));
929 printk("\tsigr: 0x%08x\n", inl(DE4X5_SIGR));
930 }
931
932 MOD_INC_USE_COUNT;
933
934 return status;
935 }
936
937
938
939
940
941
942
943
944
945 static int
946 de4x5_init(struct device *dev)
947 {
948
949 set_bit(0, (void *)&dev->tbusy);
950
951 de4x5_sw_reset(dev);
952
953
954 autoconf_media(dev);
955
956 return 0;
957 }
958
959 static int de4x5_sw_reset(struct device *dev)
960 {
961 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
962 u_long iobase = dev->base_addr;
963 int i, j, status = 0;
964 s32 bmr, omr;
965
966
967 if (lp->phy[lp->active].id == 0) {
968 de4x5_switch_to_srl(dev);
969 } else {
970 de4x5_switch_to_mii(dev);
971 }
972
973
974
975
976
977
978 bmr = (lp->chipset==DC21140 ? PBL_8 : PBL_4) | DESC_SKIP_LEN | CACHE_ALIGN;
979 outl(bmr, DE4X5_BMR);
980
981 omr = inl(DE4X5_OMR) & ~OMR_PR;
982 if (lp->chipset != DC21140) {
983 omr |= TR_96;
984 lp->setup_f = HASH_PERF;
985 } else {
986 omr |= OMR_SDP | OMR_SB | (!lp->phy[lp->active].id ? OMR_SF : 0);
987 lp->setup_f = PERFECT;
988 }
989 outl(virt_to_bus(lp->rx_ring), DE4X5_RRBA);
990 outl(virt_to_bus(lp->tx_ring), DE4X5_TRBA);
991
992 lp->rx_new = lp->rx_old = 0;
993 lp->tx_new = lp->tx_old = 0;
994
995 for (i = 0; i < lp->rxRingSize; i++) {
996 lp->rx_ring[i].status = R_OWN;
997 }
998
999 for (i = 0; i < lp->txRingSize; i++) {
1000 lp->tx_ring[i].status = 0;
1001 }
1002
1003 barrier();
1004
1005
1006 SetMulticastFilter(dev);
1007
1008 if (lp->chipset != DC21140) {
1009 load_packet(dev, lp->setup_frame, HASH_F|TD_SET|SETUP_FRAME_LEN, NULL);
1010 } else {
1011 load_packet(dev, lp->setup_frame, PERFECT_F|TD_SET|SETUP_FRAME_LEN, NULL);
1012 }
1013 outl(omr|OMR_ST, DE4X5_OMR);
1014
1015
1016 sti();
1017 for (j=0, i=0;(i<500) && (j==0);i++) {
1018 udelay(1000);
1019 if (lp->tx_ring[lp->tx_new].status >= 0) j=1;
1020 }
1021 outl(omr, DE4X5_OMR);
1022
1023 if (j == 0) {
1024 printk("%s: Setup frame timed out, status %08x\n", dev->name,
1025 inl(DE4X5_STS));
1026 status = -EIO;
1027 }
1028
1029 lp->tx_new = (++lp->tx_new) % lp->txRingSize;
1030 lp->tx_old = lp->tx_new;
1031
1032 return status;
1033 }
1034
1035
1036
1037
1038 static int
1039 de4x5_queue_pkt(struct sk_buff *skb, struct device *dev)
1040 {
1041 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1042 u_long iobase = dev->base_addr;
1043 int status = 0;
1044
1045 if (skb == NULL) {
1046 dev_tint(dev);
1047 return 0;
1048 }
1049
1050 if (lp->tx_enable == NO) {
1051 de4x5_put_cache(dev, skb);
1052 return 0;
1053 }
1054
1055
1056
1057
1058
1059
1060 set_bit(0, (void*)&dev->tbusy);
1061 cli();
1062 de4x5_tx(dev);
1063 sti();
1064
1065
1066 if (dev->tbusy || lp->skb[lp->tx_new]) {
1067 if (dev->interrupt) {
1068 de4x5_putb_cache(dev, skb);
1069 } else {
1070 de4x5_put_cache(dev, skb);
1071 }
1072 if (de4x5_debug > 1) {
1073 printk("%s: transmit busy, lost media or stale skb found:\n STS:%08x\n tbusy:%ld\n lostMedia:%d\n IMR:%08x\n OMR:%08x\n Stale skb: %s\n",dev->name, inl(DE4X5_STS), dev->tbusy, lp->lostMedia, inl(DE4X5_IMR), inl(DE4X5_OMR), (lp->skb[lp->tx_new] ? "YES" : "NO"));
1074 }
1075 } else if (skb->len > 0) {
1076
1077 if (lp->cache.skb && !dev->interrupt) {
1078 de4x5_put_cache(dev, skb);
1079 skb = de4x5_get_cache(dev);
1080 }
1081
1082 while (skb && !dev->tbusy && !lp->skb[lp->tx_new]) {
1083 set_bit(0, (void*)&dev->tbusy);
1084 cli();
1085 if (TX_BUFFS_AVAIL) {
1086 load_packet(dev, skb->data,
1087 TD_IC | TD_LS | TD_FS | skb->len, skb);
1088 outl(POLL_DEMAND, DE4X5_TPD);
1089
1090 lp->tx_new = (++lp->tx_new) % lp->txRingSize;
1091 dev->trans_start = jiffies;
1092
1093 if (TX_BUFFS_AVAIL) {
1094 dev->tbusy = 0;
1095 }
1096 skb = de4x5_get_cache(dev);
1097 }
1098 sti();
1099 }
1100 if (skb && (dev->tbusy || lp->skb[lp->tx_new])) {
1101 de4x5_putb_cache(dev, skb);
1102 }
1103 }
1104
1105 return status;
1106 }
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119 static void
1120 de4x5_interrupt(int irq, void *dev_id, struct pt_regs *regs)
1121 {
1122 struct device *dev = (struct device *)dev_id;
1123 struct de4x5_private *lp;
1124 s32 imr, omr, sts;
1125 u_long iobase;
1126
1127 if (dev == NULL) {
1128 printk ("de4x5_interrupt(): irq %d for unknown device.\n", irq);
1129 return;
1130 }
1131 lp = (struct de4x5_private *)dev->priv;
1132 iobase = dev->base_addr;
1133
1134 if (dev->interrupt)
1135 printk("%s: Re-entering the interrupt handler.\n", dev->name);
1136
1137 DISABLE_IRQs;
1138 dev->interrupt = MASK_INTERRUPTS;
1139
1140 for (;;) {
1141 sts = inl(DE4X5_STS);
1142 outl(sts, DE4X5_STS);
1143
1144 if (!(sts & lp->irq_mask)) break;
1145
1146 if (sts & (STS_RI | STS_RU))
1147 de4x5_rx(dev);
1148
1149 if (sts & (STS_TI | STS_TU))
1150 de4x5_tx(dev);
1151
1152 if (sts & STS_LNF) {
1153 lp->lostMedia = LOST_MEDIA_THRESHOLD + 1;
1154 lp->irq_mask &= ~IMR_LFM;
1155 }
1156
1157 if (sts & STS_SE) {
1158 STOP_DE4X5;
1159 printk("%s: Fatal bus error occurred, sts=%#8x, device stopped.\n",
1160 dev->name, sts);
1161 return;
1162 }
1163 }
1164
1165
1166 while (lp->cache.skb && !dev->tbusy && lp->tx_enable) {
1167 de4x5_queue_pkt(de4x5_get_cache(dev), dev);
1168 }
1169
1170 dev->interrupt = UNMASK_INTERRUPTS;
1171 ENABLE_IRQs;
1172
1173 return;
1174 }
1175
1176 static int
1177 de4x5_rx(struct device *dev)
1178 {
1179 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1180 int i, entry;
1181 s32 status;
1182 char *buf;
1183
1184 for (entry=lp->rx_new; lp->rx_ring[entry].status>=0;entry=lp->rx_new) {
1185 status = lp->rx_ring[entry].status;
1186
1187 if (status & RD_FS) {
1188 lp->rx_old = entry;
1189 }
1190
1191 if (status & RD_LS) {
1192 lp->linkOK++;
1193 if (status & RD_ES) {
1194 lp->stats.rx_errors++;
1195 if (status & (RD_RF | RD_TL)) lp->stats.rx_frame_errors++;
1196 if (status & RD_CE) lp->stats.rx_crc_errors++;
1197 if (status & RD_OF) lp->stats.rx_fifo_errors++;
1198 if (status & RD_TL) lp->stats.rx_length_errors++;
1199 if (status & RD_RF) lp->pktStats.rx_runt_frames++;
1200 if (status & RD_CS) lp->pktStats.rx_collision++;
1201 if (status & RD_DB) lp->pktStats.rx_dribble++;
1202 if (status & RD_OF) lp->pktStats.rx_overflow++;
1203 } else {
1204 struct sk_buff *skb;
1205 short pkt_len = (short)(lp->rx_ring[entry].status >> 16) - 4;
1206
1207 if ((skb = dev_alloc_skb(pkt_len+2)) == NULL) {
1208 printk("%s: Insufficient memory; nuking packet.\n",
1209 dev->name);
1210 lp->stats.rx_dropped++;
1211 break;
1212 }
1213
1214 skb->dev = dev;
1215 skb_reserve(skb,2);
1216 if (entry < lp->rx_old) {
1217 short len = (lp->rxRingSize - lp->rx_old) * RX_BUFF_SZ;
1218 memcpy(skb_put(skb,len), bus_to_virt(lp->rx_ring[lp->rx_old].buf), len);
1219 memcpy(skb_put(skb,pkt_len-len), bus_to_virt(lp->rx_ring[0].buf), pkt_len - len);
1220 } else {
1221 memcpy(skb_put(skb,pkt_len), bus_to_virt(lp->rx_ring[lp->rx_old].buf), pkt_len);
1222 }
1223
1224
1225 skb->protocol=eth_type_trans(skb,dev);
1226 netif_rx(skb);
1227
1228
1229 lp->stats.rx_packets++;
1230 for (i=1; i<DE4X5_PKT_STAT_SZ-1; i++) {
1231 if (pkt_len < (i*DE4X5_PKT_BIN_SZ)) {
1232 lp->pktStats.bins[i]++;
1233 i = DE4X5_PKT_STAT_SZ;
1234 }
1235 }
1236 buf = skb->data;
1237 if (buf[0] & 0x01) {
1238 if ((*(s32 *)&buf[0] == -1) && (*(s16 *)&buf[4] == -1)) {
1239 lp->pktStats.broadcast++;
1240 } else {
1241 lp->pktStats.multicast++;
1242 }
1243 } else if ((*(s32 *)&buf[0] == *(s32 *)&dev->dev_addr[0]) &&
1244 (*(s16 *)&buf[4] == *(s16 *)&dev->dev_addr[4])) {
1245 lp->pktStats.unicast++;
1246 }
1247
1248 lp->pktStats.bins[0]++;
1249 if (lp->pktStats.bins[0] == 0) {
1250 memset((char *)&lp->pktStats, 0, sizeof(lp->pktStats));
1251 }
1252 }
1253
1254
1255 for (;lp->rx_old!=entry;lp->rx_old=(++lp->rx_old)%lp->rxRingSize) {
1256 lp->rx_ring[lp->rx_old].status = R_OWN;
1257 barrier();
1258 }
1259 lp->rx_ring[entry].status = R_OWN;
1260 barrier();
1261 }
1262
1263
1264
1265
1266 lp->rx_new = (++lp->rx_new) % lp->rxRingSize;
1267 }
1268
1269 return 0;
1270 }
1271
1272
1273
1274
1275 static int
1276 de4x5_tx(struct device *dev)
1277 {
1278 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1279 u_long iobase = dev->base_addr;
1280 int entry;
1281 s32 status;
1282
1283 for (entry = lp->tx_old; entry != lp->tx_new; entry = lp->tx_old) {
1284 status = lp->tx_ring[entry].status;
1285 if (status < 0) {
1286 break;
1287 } else if (status & TD_ES) {
1288 lp->stats.tx_errors++;
1289 if (status & TD_NC) lp->stats.tx_carrier_errors++;
1290 if (status & TD_LC) lp->stats.tx_window_errors++;
1291 if (status & TD_UF) lp->stats.tx_fifo_errors++;
1292 if (status & TD_LC) lp->stats.collisions++;
1293 if (status & TD_EC) lp->pktStats.excessive_collisions++;
1294 if (status & TD_DE) lp->stats.tx_aborted_errors++;
1295
1296 if ((status != 0x7fffffff) &&
1297 (status & (TD_LO | TD_NC | TD_EC | TD_LF))) {
1298 lp->lostMedia++;
1299 } else {
1300 outl(POLL_DEMAND, DE4X5_TPD);
1301 }
1302 } else {
1303 lp->stats.tx_packets++;
1304 lp->lostMedia = 0;
1305 }
1306
1307 if (lp->skb[entry] != NULL) {
1308 dev_kfree_skb(lp->skb[entry], FREE_WRITE);
1309 lp->skb[entry] = NULL;
1310 }
1311
1312
1313 lp->tx_old = (++lp->tx_old) % lp->txRingSize;
1314 }
1315
1316 if (TX_BUFFS_AVAIL && dev->tbusy) {
1317 dev->tbusy = 0;
1318 if (dev->interrupt) mark_bh(NET_BH);
1319 }
1320
1321 return 0;
1322 }
1323
1324 static int
1325 de4x5_ast(struct device *dev)
1326 {
1327 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1328 int next_tick = DE4X5_AUTOSENSE_MS;
1329
1330 disable_ast(dev);
1331
1332 if (lp->chipset == DC21140) {
1333 next_tick = dc21140m_autoconf(dev);
1334 } else if (lp->chipset == DC21041) {
1335 next_tick = dc21041_autoconf(dev);
1336 } else if (lp->chipset == DC21040) {
1337 next_tick = dc21040_autoconf(dev);
1338 }
1339 lp->linkOK = 0;
1340 enable_ast(dev, next_tick);
1341
1342 return 0;
1343 }
1344
1345 static int
1346 de4x5_close(struct device *dev)
1347 {
1348 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1349 u_long iobase = dev->base_addr;
1350 s32 imr, omr;
1351
1352 disable_ast(dev);
1353 dev->start = 0;
1354 dev->tbusy = 1;
1355
1356 if (de4x5_debug > 1) {
1357 printk("%s: Shutting down ethercard, status was %8.8x.\n",
1358 dev->name, inl(DE4X5_STS));
1359 }
1360
1361
1362
1363
1364 DISABLE_IRQs;
1365
1366 STOP_DE4X5;
1367
1368
1369
1370
1371 free_irq(dev->irq, dev);
1372 lp->state = CLOSED;
1373
1374 MOD_DEC_USE_COUNT;
1375
1376
1377 if (lp->chipset == DC21041) {
1378 outl(0, DE4X5_SICR);
1379 outl(CFDA_PSM, PCI_CFDA);
1380 }
1381
1382 return 0;
1383 }
1384
1385 static struct enet_statistics *
1386 de4x5_get_stats(struct device *dev)
1387 {
1388 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1389 u_long iobase = dev->base_addr;
1390
1391 lp->stats.rx_missed_errors = (int)(inl(DE4X5_MFC) & (MFC_OVFL | MFC_CNTR));
1392
1393 return &lp->stats;
1394 }
1395
1396 static void load_packet(struct device *dev, char *buf, u32 flags, struct sk_buff *skb)
1397 {
1398 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1399
1400 lp->tx_ring[lp->tx_new].buf = virt_to_bus(buf);
1401 lp->tx_ring[lp->tx_new].des1 &= TD_TER;
1402 lp->tx_ring[lp->tx_new].des1 |= flags;
1403 lp->skb[lp->tx_new] = skb;
1404 barrier();
1405 lp->tx_ring[lp->tx_new].status = T_OWN;
1406 barrier();
1407
1408 return;
1409 }
1410
1411
1412
1413
1414 static void
1415 set_multicast_list(struct device *dev)
1416 {
1417 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1418 u_long iobase = dev->base_addr;
1419
1420
1421 if (lp->state == OPEN) {
1422 if (dev->flags & IFF_PROMISC) {
1423 u32 omr;
1424 omr = inl(DE4X5_OMR);
1425 omr |= OMR_PR;
1426 outl(omr, DE4X5_OMR);
1427 } else {
1428 SetMulticastFilter(dev);
1429 if (lp->setup_f == HASH_PERF) {
1430 load_packet(dev, lp->setup_frame, TD_IC | HASH_F | TD_SET |
1431 SETUP_FRAME_LEN, NULL);
1432 } else {
1433 load_packet(dev, lp->setup_frame, TD_IC | PERFECT_F | TD_SET |
1434 SETUP_FRAME_LEN, NULL);
1435 }
1436
1437 lp->tx_new = (++lp->tx_new) % lp->txRingSize;
1438 outl(POLL_DEMAND, DE4X5_TPD);
1439 dev->trans_start = jiffies;
1440 }
1441 }
1442
1443 return;
1444 }
1445
1446
1447
1448
1449
1450
1451 static void SetMulticastFilter(struct device *dev)
1452 {
1453 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1454 struct dev_mc_list *dmi=dev->mc_list;
1455 u_long iobase = dev->base_addr;
1456 int i, j, bit, byte;
1457 u16 hashcode;
1458 u32 omr, crc, poly = CRC_POLYNOMIAL_LE;
1459 char *pa;
1460 unsigned char *addrs;
1461
1462 omr = inl(DE4X5_OMR);
1463 omr &= ~(OMR_PR | OMR_PM);
1464 pa = build_setup_frame(dev, ALL);
1465
1466 if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 14)) {
1467 omr |= OMR_PM;
1468 } else if (lp->setup_f == HASH_PERF) {
1469 for (i=0;i<dev->mc_count;i++) {
1470 addrs=dmi->dmi_addr;
1471 dmi=dmi->next;
1472 if ((*addrs & 0x01) == 1) {
1473 crc = 0xffffffff;
1474 for (byte=0;byte<ETH_ALEN;byte++) {
1475
1476 for (bit = *addrs++,j=0;j<8;j++, bit>>=1) {
1477 crc = (crc >> 1) ^ (((crc ^ bit) & 0x01) ? poly : 0);
1478 }
1479 }
1480 hashcode = crc & HASH_BITS;
1481
1482 byte = hashcode >> 3;
1483 bit = 1 << (hashcode & 0x07);
1484
1485 byte <<= 1;
1486 if (byte & 0x02) {
1487 byte -= 1;
1488 }
1489 lp->setup_frame[byte] |= bit;
1490 }
1491 }
1492 } else {
1493 for (j=0; j<dev->mc_count; j++) {
1494 addrs=dmi->dmi_addr;
1495 dmi=dmi->next;
1496 for (i=0; i<ETH_ALEN; i++) {
1497 *(pa + (i&1)) = *addrs++;
1498 if (i & 0x01) pa += 4;
1499 }
1500 }
1501 }
1502 outl(omr, DE4X5_OMR);
1503
1504 return;
1505 }
1506
1507
1508
1509
1510
1511 static void eisa_probe(struct device *dev, u_long ioaddr)
1512 {
1513 int i, maxSlots, status;
1514 u_short vendor, device;
1515 s32 cfid;
1516 u_long iobase;
1517 struct bus_type *lp = &bus;
1518 char name[DE4X5_STRLEN];
1519
1520 if (!ioaddr && autoprobed) return;
1521
1522 lp->bus = EISA;
1523
1524 if (ioaddr == 0) {
1525 iobase = EISA_SLOT_INC;
1526 i = 1;
1527 maxSlots = MAX_EISA_SLOTS;
1528 } else {
1529 iobase = ioaddr;
1530 i = (ioaddr >> 12);
1531 maxSlots = i + 1;
1532 }
1533
1534 for (status = -ENODEV; (i<maxSlots) && (dev!=NULL); i++, iobase+=EISA_SLOT_INC) {
1535 if (EISA_signature(name, EISA_ID)) {
1536 cfid = inl(PCI_CFID);
1537 device = (u_short)(cfid >> 16);
1538 vendor = (u_short) cfid;
1539
1540 lp->chipset = device;
1541 DevicePresent(EISA_APROM);
1542
1543 outl(PCI_COMMAND_IO | PCI_COMMAND_MASTER, PCI_CFCS);
1544 outl(0x00004000, PCI_CFLT);
1545 outl(iobase, PCI_CBIO);
1546
1547 if (check_region(iobase, DE4X5_EISA_TOTAL_SIZE) == 0) {
1548 if ((dev = alloc_device(dev, iobase)) != NULL) {
1549 if ((status = de4x5_hw_init(dev, iobase)) == 0) {
1550 num_de4x5s++;
1551 }
1552 num_eth++;
1553 }
1554 } else if (autoprobed) {
1555 printk("%s: region already allocated at 0x%04lx.\n", dev->name,iobase);
1556 }
1557 }
1558 }
1559
1560 return;
1561 }
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575 #define PCI_DEVICE (dev_num << 3)
1576 #define PCI_LAST_DEV 32
1577
1578 static void pci_probe(struct device *dev, u_long ioaddr)
1579 {
1580 u_char irq;
1581 u_char pb, pbus, dev_num, dnum, dev_fn;
1582 u_short vendor, device, index, status;
1583 u_int class = DE4X5_CLASS_CODE;
1584 u_int iobase;
1585 struct bus_type *lp = &bus;
1586
1587 if ((!ioaddr || !loading_module) && autoprobed) return;
1588
1589 if (!pcibios_present()) return;
1590
1591 lp->bus = PCI;
1592
1593 if (ioaddr < 0x1000) {
1594 pbus = (u_short)(ioaddr >> 8);
1595 dnum = (u_short)(ioaddr & 0xff);
1596 } else {
1597 pbus = 0;
1598 dnum = 0;
1599 }
1600
1601 for (index=0;
1602 (pcibios_find_class(class, index, &pb, &dev_fn)!= PCIBIOS_DEVICE_NOT_FOUND);
1603 index++) {
1604 dev_num = PCI_SLOT(dev_fn);
1605
1606 if ((!pbus && !dnum) || ((pbus == pb) && (dnum == dev_num))) {
1607 pcibios_read_config_word(pb, PCI_DEVICE, PCI_VENDOR_ID, &vendor);
1608 pcibios_read_config_word(pb, PCI_DEVICE, PCI_DEVICE_ID, &device);
1609 if (!(is_DC21040 || is_DC21041 || is_DC21140)) continue;
1610
1611
1612 lp->device = dev_num;
1613 lp->bus_num = pb;
1614
1615
1616 lp->chipset = device;
1617
1618
1619 pcibios_read_config_dword(pb, PCI_DEVICE, PCI_BASE_ADDRESS_0, &iobase);
1620 iobase &= CBIO_MASK;
1621
1622
1623 pcibios_read_config_byte(pb, PCI_DEVICE, PCI_INTERRUPT_LINE, &irq);
1624 if ((irq == 0) || (irq == (u_char) 0xff)) continue;
1625
1626
1627 pcibios_read_config_word(pb, PCI_DEVICE, PCI_COMMAND, &status);
1628 if (!(status & PCI_COMMAND_IO)) continue;
1629
1630 if (!(status & PCI_COMMAND_MASTER)) {
1631 status |= PCI_COMMAND_MASTER;
1632 pcibios_write_config_word(pb, PCI_DEVICE, PCI_COMMAND, status);
1633 pcibios_read_config_word(pb, PCI_DEVICE, PCI_COMMAND, &status);
1634 }
1635 if (!(status & PCI_COMMAND_MASTER)) continue;
1636
1637 DevicePresent(DE4X5_APROM);
1638 if (check_region(iobase, DE4X5_PCI_TOTAL_SIZE) == 0) {
1639 if ((dev = alloc_device(dev, iobase)) != NULL) {
1640 dev->irq = irq;
1641 if ((status = de4x5_hw_init(dev, iobase)) == 0) {
1642 num_de4x5s++;
1643 }
1644 num_eth++;
1645 }
1646 } else if (autoprobed) {
1647 printk("%s: region already allocated at 0x%04x.\n", dev->name,
1648 (u_short)iobase);
1649 }
1650 }
1651 }
1652
1653 return;
1654 }
1655
1656
1657
1658
1659
1660 static struct device *alloc_device(struct device *dev, u_long iobase)
1661 {
1662 int addAutoProbe = 0;
1663 struct device *tmp = NULL, *ret;
1664 int (*init)(struct device *) = NULL;
1665
1666 if (loading_module) return dev;
1667
1668
1669
1670
1671 while (dev->next != NULL) {
1672 if ((dev->base_addr == DE4X5_NDA) || (dev->base_addr == 0)) break;
1673 dev = dev->next;
1674 num_eth++;
1675 }
1676
1677
1678
1679
1680
1681 if ((dev->base_addr == 0) && (num_de4x5s > 0)) {
1682 addAutoProbe++;
1683 tmp = dev->next;
1684 init = dev->init;
1685 }
1686
1687
1688
1689
1690
1691 if ((dev->next == NULL) &&
1692 !((dev->base_addr == DE4X5_NDA) || (dev->base_addr == 0))) {
1693 dev->next = (struct device *)kmalloc(sizeof(struct device)+8, GFP_KERNEL);
1694 dev = dev->next;
1695 if (dev == NULL) {
1696 printk("eth%d: Device not initialised, insufficient memory\n", num_eth);
1697 } else {
1698
1699
1700
1701
1702
1703 dev->name = (char *)(dev + sizeof(struct device));
1704 if (num_eth > 9999) {
1705 sprintf(dev->name,"eth????");
1706 } else {
1707 sprintf(dev->name,"eth%d", num_eth);
1708 }
1709 dev->base_addr = iobase;
1710 dev->next = NULL;
1711 dev->init = &de4x5_probe;
1712 num_de4x5s++;
1713 }
1714 }
1715 ret = dev;
1716
1717
1718
1719
1720
1721 if (ret != NULL) {
1722 if (addAutoProbe) {
1723 for (; (tmp->next!=NULL) && (tmp->base_addr!=DE4X5_NDA); tmp=tmp->next);
1724
1725
1726
1727
1728
1729 if ((tmp->next == NULL) && !(tmp->base_addr == DE4X5_NDA)) {
1730 tmp->next = (struct device *)kmalloc(sizeof(struct device) + 8,
1731 GFP_KERNEL);
1732 tmp = tmp->next;
1733 if (tmp == NULL) {
1734 printk("%s: Insufficient memory to extend the device list.\n",
1735 dev->name);
1736 } else {
1737
1738
1739
1740
1741
1742 tmp->name = (char *)(tmp + sizeof(struct device));
1743 if (num_eth > 9999) {
1744 sprintf(tmp->name,"eth????");
1745 } else {
1746 sprintf(tmp->name,"eth%d", num_eth);
1747 }
1748 tmp->base_addr = 0;
1749 tmp->next = NULL;
1750 tmp->init = init;
1751 }
1752 } else {
1753 tmp->base_addr = 0;
1754 }
1755 }
1756 }
1757
1758 return ret;
1759 }
1760
1761
1762
1763
1764
1765
1766
1767
1768 static int autoconf_media(struct device *dev)
1769 {
1770 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1771 u_long iobase = dev->base_addr;
1772 int next_tick = DE4X5_AUTOSENSE_MS;;
1773
1774 lp->linkOK = 0;
1775 lp->c_media = AUTO;
1776 disable_ast(dev);
1777 inl(DE4X5_MFC);
1778 lp->media = INIT;
1779 if (lp->chipset == DC21040) {
1780 next_tick = dc21040_autoconf(dev);
1781 } else if (lp->chipset == DC21041) {
1782 next_tick = dc21041_autoconf(dev);
1783 } else if (lp->chipset == DC21140) {
1784 next_tick = dc21140m_autoconf(dev);
1785 }
1786 if (lp->autosense == AUTO) enable_ast(dev, next_tick);
1787
1788 return (lp->media);
1789 }
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803 static int dc21040_autoconf(struct device *dev)
1804 {
1805 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1806 u_long iobase = dev->base_addr;
1807 int next_tick = DE4X5_AUTOSENSE_MS;
1808 s32 imr;
1809
1810 switch (lp->media) {
1811 case INIT:
1812 DISABLE_IRQs;
1813 lp->tx_enable = NO;
1814 lp->timeout = -1;
1815 de4x5_save_skbs(dev);
1816 if ((lp->autosense == AUTO) || (lp->autosense == TP)) {
1817 lp->media = TP;
1818 } else if ((lp->autosense == BNC) || (lp->autosense == AUI) || (lp->autosense == BNC_AUI)) {
1819 lp->media = BNC_AUI;
1820 } else if (lp->autosense == EXT_SIA) {
1821 lp->media = EXT_SIA;
1822 } else {
1823 lp->media = NC;
1824 }
1825 lp->local_state = 0;
1826 next_tick = dc21040_autoconf(dev);
1827 break;
1828
1829 case TP:
1830 dc21040_state(dev, 0x8f01, 0xffff, 0x0000, 3000, BNC_AUI,
1831 TP_SUSPECT, test_tp);
1832 break;
1833
1834 case TP_SUSPECT:
1835 de4x5_suspect_state(dev, 1000, TP, test_tp, dc21040_autoconf);
1836 break;
1837
1838 case BNC:
1839 case AUI:
1840 case BNC_AUI:
1841 dc21040_state(dev, 0x8f09, 0x0705, 0x0006, 3000, EXT_SIA,
1842 BNC_AUI_SUSPECT, ping_media);
1843 break;
1844
1845 case BNC_AUI_SUSPECT:
1846 de4x5_suspect_state(dev, 1000, BNC_AUI, ping_media, dc21040_autoconf);
1847 break;
1848
1849 case EXT_SIA:
1850 dc21040_state(dev, 0x3041, 0x0000, 0x0006, 3000,
1851 NC, EXT_SIA_SUSPECT, ping_media);
1852 break;
1853
1854 case EXT_SIA_SUSPECT:
1855 de4x5_suspect_state(dev, 1000, EXT_SIA, ping_media, dc21040_autoconf);
1856 break;
1857
1858 case NC:
1859 #ifndef __alpha__
1860 reset_init_sia(dev, 0x8f01, 0xffff, 0x0000);
1861 #else
1862
1863 reset_init_sia(dev, 0x8f09, 0x0705, 0x0006);
1864 #endif
1865 de4x5_dbg_media(dev);
1866 lp->media = INIT;
1867 lp->tx_enable = NO;
1868 break;
1869 }
1870
1871 return next_tick;
1872 }
1873
1874 static int
1875 dc21040_state(struct device *dev, int csr13, int csr14, int csr15, int timeout,
1876 int next_state, int suspect_state,
1877 int (*fn)(struct device *, int))
1878 {
1879 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1880 int next_tick = DE4X5_AUTOSENSE_MS;
1881 int linkBad;
1882
1883 switch (lp->local_state) {
1884 case 0:
1885 reset_init_sia(dev, csr13, csr14, csr15);
1886 lp->local_state++;
1887 next_tick = 500;
1888 break;
1889
1890 case 1:
1891 if (!lp->tx_enable) {
1892 linkBad = fn(dev, timeout);
1893 if (linkBad < 0) {
1894 next_tick = linkBad & ~TIMER_CB;
1895 } else {
1896 if (linkBad && (lp->autosense == AUTO)) {
1897 lp->local_state = 0;
1898 lp->media = next_state;
1899 } else {
1900 de4x5_init_connection(dev);
1901 }
1902 }
1903 } else if (!lp->linkOK && (lp->autosense == AUTO)) {
1904 lp->media = suspect_state;
1905 next_tick = 3000;
1906 }
1907 break;
1908 }
1909
1910 return next_tick;
1911 }
1912
1913 static int
1914 de4x5_suspect_state(struct device *dev, int timeout, int prev_state,
1915 int (*fn)(struct device *, int),
1916 int (*asfn)(struct device *))
1917 {
1918 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1919 int next_tick = DE4X5_AUTOSENSE_MS;
1920 int linkBad;
1921
1922 switch (lp->local_state) {
1923 case 1:
1924 if (lp->linkOK && !LOST_MEDIA) {
1925 lp->media = prev_state;
1926 } else {
1927 lp->local_state++;
1928 next_tick = asfn(dev);
1929 }
1930 break;
1931
1932 case 2:
1933 linkBad = fn(dev, timeout);
1934 if (linkBad < 0) {
1935 next_tick = linkBad & ~TIMER_CB;
1936 } else if (!linkBad) {
1937 lp->local_state--;
1938 lp->media = prev_state;
1939 } else {
1940 lp->media = INIT;
1941 }
1942 }
1943
1944 return next_tick;
1945 }
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956 static int dc21041_autoconf(struct device *dev)
1957 {
1958 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1959 u_long iobase = dev->base_addr;
1960 s32 sts, irqs, irq_mask, imr, omr;
1961 int next_tick = DE4X5_AUTOSENSE_MS;
1962
1963 switch (lp->media) {
1964 case INIT:
1965 DISABLE_IRQs;
1966 lp->tx_enable = NO;
1967 lp->timeout = -1;
1968 de4x5_save_skbs(dev);
1969 if ((lp->autosense == AUTO) || (lp->autosense == TP_NW)) {
1970 lp->media = TP;
1971 } else if (lp->autosense == TP) {
1972 lp->media = TP;
1973 } else if (lp->autosense == BNC) {
1974 lp->media = BNC;
1975 } else if (lp->autosense == AUI) {
1976 lp->media = AUI;
1977 } else {
1978 lp->media = NC;
1979 }
1980 lp->local_state = 0;
1981 next_tick = dc21041_autoconf(dev);
1982 break;
1983
1984 case TP_NW:
1985 if (lp->timeout < 0) {
1986 omr = inl(DE4X5_OMR);
1987 outl(omr | OMR_FD, DE4X5_OMR);
1988 }
1989 irqs = STS_LNF | STS_LNP;
1990 irq_mask = IMR_LFM | IMR_LPM;
1991 sts = test_media(dev, irqs, irq_mask, 0xef01, 0xffff, 0x0008, 2400);
1992 if (sts < 0) {
1993 next_tick = sts & ~TIMER_CB;
1994 } else {
1995 if (sts & STS_LNP) {
1996 lp->media = ANS;
1997 } else {
1998 lp->media = AUI;
1999 }
2000 next_tick = dc21041_autoconf(dev);
2001 }
2002 break;
2003
2004 case ANS:
2005 if (!lp->tx_enable) {
2006 irqs = STS_LNP;
2007 irq_mask = IMR_LPM;
2008 sts = test_ans(dev, irqs, irq_mask, 3000);
2009 if (sts < 0) {
2010 next_tick = sts & ~TIMER_CB;
2011 } else {
2012 if (!(sts & STS_LNP) && (lp->autosense == AUTO)) {
2013 lp->media = TP;
2014 next_tick = dc21041_autoconf(dev);
2015 } else {
2016 de4x5_init_connection(dev);
2017 }
2018 }
2019 } else if (!lp->linkOK && (lp->autosense == AUTO)) {
2020 lp->media = ANS_SUSPECT;
2021 next_tick = 3000;
2022 }
2023 break;
2024
2025 case ANS_SUSPECT:
2026 de4x5_suspect_state(dev, 1000, ANS, test_tp, dc21041_autoconf);
2027 break;
2028
2029 case TP:
2030 if (!lp->tx_enable) {
2031 if (lp->timeout < 0) {
2032 omr = inl(DE4X5_OMR);
2033 outl(omr & ~OMR_FD, DE4X5_OMR);
2034 }
2035 irqs = STS_LNF | STS_LNP;
2036 irq_mask = IMR_LFM | IMR_LPM;
2037 sts = test_media(dev,irqs, irq_mask, 0xef01, 0xff3f, 0x0008, 2400);
2038 if (sts < 0) {
2039 next_tick = sts & ~TIMER_CB;
2040 } else {
2041 if (!(sts & STS_LNP) && (lp->autosense == AUTO)) {
2042 if (inl(DE4X5_SISR) & SISR_NRA) {
2043 lp->media = AUI;
2044 } else {
2045 lp->media = BNC;
2046 }
2047 next_tick = dc21041_autoconf(dev);
2048 } else {
2049 de4x5_init_connection(dev);
2050 }
2051 }
2052 } else if (!lp->linkOK && (lp->autosense == AUTO)) {
2053 lp->media = TP_SUSPECT;
2054 next_tick = 3000;
2055 }
2056 break;
2057
2058 case TP_SUSPECT:
2059 de4x5_suspect_state(dev, 1000, TP, test_tp, dc21041_autoconf);
2060 break;
2061
2062 case AUI:
2063 if (!lp->tx_enable) {
2064 if (lp->timeout < 0) {
2065 omr = inl(DE4X5_OMR);
2066 outl(omr & ~OMR_FD, DE4X5_OMR);
2067 }
2068 irqs = 0;
2069 irq_mask = 0;
2070 sts = test_media(dev,irqs, irq_mask, 0xef09, 0xf7fd, 0x000e, 1000);
2071 if (sts < 0) {
2072 next_tick = sts & ~TIMER_CB;
2073 } else {
2074 if (!(inl(DE4X5_SISR) & SISR_SRA) && (lp->autosense == AUTO)) {
2075 lp->media = BNC;
2076 next_tick = dc21041_autoconf(dev);
2077 } else {
2078 de4x5_init_connection(dev);
2079 }
2080 }
2081 } else if (!lp->linkOK && (lp->autosense == AUTO)) {
2082 lp->media = AUI_SUSPECT;
2083 next_tick = 3000;
2084 }
2085 break;
2086
2087 case AUI_SUSPECT:
2088 de4x5_suspect_state(dev, 1000, AUI, ping_media, dc21041_autoconf);
2089 break;
2090
2091 case BNC:
2092 switch (lp->local_state) {
2093 case 0:
2094 if (lp->timeout < 0) {
2095 omr = inl(DE4X5_OMR);
2096 outl(omr & ~OMR_FD, DE4X5_OMR);
2097 }
2098 irqs = 0;
2099 irq_mask = 0;
2100 sts = test_media(dev,irqs, irq_mask, 0xef09, 0xf7fd, 0x0006, 1000);
2101 if (sts < 0) {
2102 next_tick = sts & ~TIMER_CB;
2103 } else {
2104 if (!(inl(DE4X5_SISR) & SISR_SRA) && (lp->autosense == AUTO)) {
2105 lp->media = NC;
2106 } else {
2107 lp->local_state++;
2108 next_tick = dc21041_autoconf(dev);
2109 }
2110 }
2111 break;
2112
2113 case 1:
2114 if (!lp->tx_enable) {
2115 if ((sts = ping_media(dev, 3000)) < 0) {
2116 next_tick = sts & ~TIMER_CB;
2117 } else {
2118 if (sts) {
2119 lp->local_state = 0;
2120 lp->media = NC;
2121 } else {
2122 de4x5_init_connection(dev);
2123 }
2124 }
2125 } else if (!lp->linkOK && (lp->autosense == AUTO)) {
2126 lp->media = BNC_SUSPECT;
2127 next_tick = 3000;
2128 }
2129 break;
2130 }
2131 break;
2132
2133 case BNC_SUSPECT:
2134 de4x5_suspect_state(dev, 1000, BNC, ping_media, dc21041_autoconf);
2135 break;
2136
2137 case NC:
2138 omr = inl(DE4X5_OMR);
2139 outl(omr | OMR_FD, DE4X5_OMR);
2140 reset_init_sia(dev, 0xef01, 0xffff, 0x0008);
2141 de4x5_dbg_media(dev);
2142 lp->media = INIT;
2143 lp->tx_enable = NO;
2144 break;
2145 }
2146
2147 return next_tick;
2148 }
2149
2150 static int dc21140m_autoconf(struct device *dev)
2151 {
2152 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2153 int ana, anlpa, cap, cr, sr, iobase = dev->base_addr;
2154 int next_tick = DE4X5_AUTOSENSE_MS;
2155 u_long imr, omr;
2156
2157 switch(lp->media) {
2158 case INIT:
2159 DISABLE_IRQs;
2160 lp->tx_enable = FALSE;
2161 lp->timeout = -1;
2162 if ((next_tick = de4x5_reset_phy(dev)) < 0) {
2163 next_tick &= ~TIMER_CB;
2164 } else {
2165 de4x5_save_skbs(dev);
2166 SET_10Mb;
2167 if (lp->autosense == _100Mb) {
2168 lp->media = _100Mb;
2169 } else if (lp->autosense == _10Mb) {
2170 lp->media = _10Mb;
2171 } else if ((lp->autosense == AUTO) && (sr=is_anc_capable(dev))) {
2172 ana = (((sr >> 6) & MII_ANA_TAF) | MII_ANA_CSMA);
2173 ana &= (de4x5_full_duplex ? ~0 : ~MII_ANA_FDAM);
2174 mii_wr(ana, MII_ANA, lp->phy[lp->active].addr, DE4X5_MII);
2175 lp->media = ANS;
2176 } else if (lp->autosense == AUTO) {
2177 lp->media = SPD_DET;
2178 } else if (is_spd_100(dev) && is_100_up(dev)) {
2179 lp->media = _100Mb;
2180 } else {
2181 lp->media = NC;
2182 }
2183 lp->local_state = 0;
2184 next_tick = dc21140m_autoconf(dev);
2185 }
2186 break;
2187
2188 case ANS:
2189 switch (lp->local_state) {
2190 case 0:
2191 if (lp->timeout < 0) {
2192 mii_wr(MII_CR_ASSE | MII_CR_RAN, MII_CR, lp->phy[lp->active].addr, DE4X5_MII);
2193 }
2194 cr = test_mii_reg(dev, MII_CR, MII_CR_RAN, FALSE, 500);
2195 if (cr < 0) {
2196 next_tick = cr & ~TIMER_CB;
2197 } else {
2198 if (cr) {
2199 lp->local_state = 0;
2200 lp->media = SPD_DET;
2201 } else {
2202 lp->local_state++;
2203 }
2204 next_tick = dc21140m_autoconf(dev);
2205 }
2206 break;
2207
2208 case 1:
2209 if ((sr=test_mii_reg(dev, MII_SR, MII_SR_ASSC, TRUE, 3000)) < 0) {
2210 next_tick = sr & ~TIMER_CB;
2211 } else {
2212 lp->media = SPD_DET;
2213 lp->local_state = 0;
2214 if (sr) {
2215 anlpa = mii_rd(MII_ANLPA, lp->phy[lp->active].addr, DE4X5_MII);
2216 ana = mii_rd(MII_ANA, lp->phy[lp->active].addr, DE4X5_MII);
2217 if ((anlpa & MII_ANLPA_ACK) && !(anlpa & MII_ANLPA_RF) &&
2218 (cap = anlpa & MII_ANLPA_TAF & ana)) {
2219 if (cap & MII_ANA_100M) {
2220 de4x5_full_duplex = ((ana & anlpa & MII_ANA_FDAM & MII_ANA_100M) ? TRUE : FALSE);
2221 lp->media = _100Mb;
2222 } else if (cap & MII_ANA_10M) {
2223 de4x5_full_duplex = ((ana & anlpa & MII_ANA_FDAM & MII_ANA_10M) ? TRUE : FALSE);
2224 lp->media = _10Mb;
2225 }
2226 }
2227 }
2228 next_tick = dc21140m_autoconf(dev);
2229 }
2230 break;
2231 }
2232 break;
2233
2234 case SPD_DET:
2235 if (!lp->phy[lp->active].id) {
2236 outl(GEP_FDXD | GEP_MODE, DE4X5_GEP);
2237 }
2238 if (is_spd_100(dev) && is_100_up(dev)) {
2239 lp->media = _100Mb;
2240 } else if (!is_spd_100(dev) && is_10_up(dev)) {
2241 lp->media = _10Mb;
2242 } else {
2243 lp->media = NC;
2244 }
2245 next_tick = dc21140m_autoconf(dev);
2246 break;
2247
2248 case _100Mb:
2249 next_tick = 3000;
2250 if (!lp->tx_enable) {
2251 SET_100Mb;
2252 de4x5_init_connection(dev);
2253 } else {
2254 if (!lp->linkOK && (lp->autosense == AUTO)) {
2255 if (!(is_spd_100(dev) && is_100_up(dev))) {
2256 lp->media = INIT;
2257 next_tick = DE4X5_AUTOSENSE_MS;
2258 }
2259 }
2260 }
2261 break;
2262
2263 case _10Mb:
2264 next_tick = 3000;
2265 if (!lp->tx_enable) {
2266 SET_10Mb;
2267 de4x5_init_connection(dev);
2268 } else {
2269 if (!lp->linkOK && (lp->autosense == AUTO)) {
2270 if (!(!is_spd_100(dev) && is_10_up(dev))) {
2271 lp->media = INIT;
2272 next_tick = DE4X5_AUTOSENSE_MS;
2273 }
2274 }
2275 }
2276 break;
2277
2278 case NC:
2279 SET_10Mb;
2280 de4x5_dbg_media(dev);
2281 lp->media = INIT;
2282 lp->tx_enable = FALSE;
2283 break;
2284 }
2285
2286 return next_tick;
2287 }
2288
2289 static void de4x5_init_connection(struct device *dev)
2290 {
2291 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2292 u_long iobase = dev->base_addr;
2293
2294 de4x5_dbg_media(dev);
2295 de4x5_restore_skbs(dev);
2296 cli();
2297 de4x5_rx(dev);
2298 de4x5_setup_intr(dev);
2299 lp->lostMedia = 0;
2300 lp->tx_enable = YES;
2301 sti();
2302 outl(POLL_DEMAND, DE4X5_TPD);
2303
2304 return;
2305 }
2306
2307 static int de4x5_reset_phy(struct device *dev)
2308 {
2309 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2310 u_long iobase = dev->base_addr;
2311 int next_tick = 0;
2312
2313 if (lp->phy[lp->active].id) {
2314 if (lp->timeout < 0) {
2315 outl(GEP_HRST, DE4X5_GEP);
2316 udelay(1000);
2317 outl(0x00, DE4X5_GEP);
2318 udelay(2000);
2319 mii_wr(MII_CR_RST, MII_CR, lp->phy[lp->active].addr, DE4X5_MII);
2320 }
2321 next_tick = test_mii_reg(dev, MII_CR, MII_CR_RST, FALSE, 500);
2322 }
2323
2324 return next_tick;
2325 }
2326
2327 static int
2328 test_media(struct device *dev, s32 irqs, s32 irq_mask, s32 csr13, s32 csr14, s32 csr15, s32 msec)
2329 {
2330 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2331 u_long iobase = dev->base_addr;
2332 s32 sts, csr12;
2333
2334 if (lp->timeout < 0) {
2335 lp->timeout = msec/100;
2336 reset_init_sia(dev, csr13, csr14, csr15);
2337
2338
2339 outl(irq_mask, DE4X5_IMR);
2340
2341
2342 sts = inl(DE4X5_STS);
2343 outl(sts, DE4X5_STS);
2344
2345
2346 if (lp->chipset == DC21041) {
2347 csr12 = inl(DE4X5_SISR);
2348 outl(csr12, DE4X5_SISR);
2349 }
2350 }
2351
2352 sts = inl(DE4X5_STS) & ~TIMER_CB;
2353
2354 if (!(sts & irqs) && --lp->timeout) {
2355 sts = 100 | TIMER_CB;
2356 } else {
2357 lp->timeout = -1;
2358 }
2359
2360 return sts;
2361 }
2362
2363 static int test_tp(struct device *dev, s32 msec)
2364 {
2365 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2366 u_long iobase = dev->base_addr;
2367 int sisr;
2368
2369 if (lp->timeout < 0) {
2370 lp->timeout = msec/100;
2371 }
2372
2373 sisr = (inl(DE4X5_SISR) & ~TIMER_CB) & (SISR_LKF | SISR_NCR);
2374
2375 if (sisr && --lp->timeout) {
2376 sisr = 100 | TIMER_CB;
2377 } else {
2378 lp->timeout = -1;
2379 }
2380
2381 return sisr;
2382 }
2383
2384
2385
2386
2387
2388 static int test_mii_reg(struct device *dev, int reg, int mask, int pol, long msec)
2389 {
2390 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2391 int test, iobase = dev->base_addr;
2392
2393 if (lp->timeout < 0) {
2394 lp->timeout = msec/100;
2395 }
2396
2397 if (pol) pol = ~0;
2398 reg = mii_rd(reg, lp->phy[lp->active].addr, DE4X5_MII) & mask;
2399 test = (reg ^ pol) & mask;
2400
2401 if (test && --lp->timeout) {
2402 reg = 100 | TIMER_CB;
2403 } else {
2404 lp->timeout = -1;
2405 }
2406
2407 return reg;
2408 }
2409
2410 static int is_spd_100(struct device *dev)
2411 {
2412 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2413 u_long iobase = dev->base_addr;
2414 int spd;
2415
2416 if (lp->phy[lp->active].id) {
2417 spd = mii_rd(lp->phy[lp->active].spd.reg, lp->phy[lp->active].addr, DE4X5_MII);
2418 spd = ~(spd ^ lp->phy[lp->active].spd.value);
2419 spd &= lp->phy[lp->active].spd.mask;
2420 } else {
2421 spd = ((~inl(DE4X5_GEP)) & GEP_SLNK);
2422 }
2423
2424 return spd;
2425 }
2426
2427 static int is_100_up(struct device *dev)
2428 {
2429 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2430 u_long iobase = dev->base_addr;
2431
2432 if (lp->phy[lp->active].id) {
2433
2434 mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII);
2435 return (mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII) & MII_SR_LKS);
2436 } else {
2437 return ((~inl(DE4X5_GEP)) & GEP_SLNK);
2438 }
2439 }
2440
2441 static int is_10_up(struct device *dev)
2442 {
2443 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2444 u_long iobase = dev->base_addr;
2445
2446 if (lp->phy[lp->active].id) {
2447
2448 mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII);
2449 return (mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII) & MII_SR_LKS);
2450 } else {
2451 return ((~inl(DE4X5_GEP)) & GEP_LNP);
2452 }
2453 }
2454
2455 static int is_anc_capable(struct device *dev)
2456 {
2457 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2458 u_long iobase = dev->base_addr;
2459
2460 if (lp->phy[lp->active].id) {
2461 return (mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII) & MII_SR_ANC);
2462 } else {
2463 return 0;
2464 }
2465 }
2466
2467
2468
2469
2470
2471 static int ping_media(struct device *dev, int msec)
2472 {
2473 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2474 u_long iobase = dev->base_addr;
2475 int sisr;
2476
2477 if (lp->timeout < 0) {
2478 lp->timeout = msec/100;
2479
2480 lp->tmp = lp->tx_new;
2481 load_packet(dev, lp->frame, TD_LS | TD_FS | sizeof(lp->frame), NULL);
2482 lp->tx_new = (++lp->tx_new) % lp->txRingSize;
2483 outl(POLL_DEMAND, DE4X5_TPD);
2484 }
2485
2486 sisr = inl(DE4X5_SISR);
2487
2488 if ((!(sisr & SISR_NCR)) && (lp->tx_ring[lp->tmp].status < 0) && (--lp->timeout)) {
2489 sisr = 100 | TIMER_CB;
2490 } else {
2491 if ((!(sisr & SISR_NCR)) &&
2492 !(lp->tx_ring[lp->tmp].status & (T_OWN | TD_ES)) && lp->timeout) {
2493 sisr = 0;
2494 } else {
2495 sisr = 1;
2496 }
2497 lp->timeout = -1;
2498 }
2499
2500 return sisr;
2501 }
2502
2503
2504
2505
2506
2507
2508
2509
2510 static void de4x5_save_skbs(struct device *dev)
2511 {
2512 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2513 u_long iobase = dev->base_addr;
2514 int i;
2515 s32 omr;
2516
2517 if (!lp->cache.save_cnt) {
2518 STOP_DE4X5;
2519 de4x5_tx(dev);
2520 for (i=lp->tx_new; i!=lp->tx_old; i--) {
2521 if (lp->skb[i]) {
2522 de4x5_putb_cache(dev, lp->skb[i]);
2523 lp->skb[i] = NULL;
2524 }
2525 if (i==0) i=lp->txRingSize;
2526 }
2527 if (lp->skb[i]) {
2528 de4x5_putb_cache(dev, lp->skb[i]);
2529 lp->skb[i] = NULL;
2530 }
2531
2532 de4x5_cache_state(dev, DE4X5_SAVE_STATE);
2533 de4x5_sw_reset(dev);
2534 de4x5_cache_state(dev, DE4X5_RESTORE_STATE);
2535 dev->tbusy = 0;
2536 lp->cache.save_cnt++;
2537 START_DE4X5;
2538 }
2539
2540 return;
2541 }
2542
2543 static void de4x5_restore_skbs(struct device *dev)
2544 {
2545 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2546 u_long iobase = dev->base_addr;
2547 struct sk_buff *skb;
2548 int i;
2549 s32 omr;
2550
2551 if (lp->cache.save_cnt) {
2552 STOP_DE4X5;
2553 de4x5_cache_state(dev, DE4X5_SAVE_STATE);
2554 de4x5_sw_reset(dev);
2555 de4x5_cache_state(dev, DE4X5_RESTORE_STATE);
2556 dev->tbusy = 1;
2557
2558 for (i=0; TX_BUFFS_AVAIL && lp->cache.skb; i++) {
2559 skb = de4x5_get_cache(dev);
2560 load_packet(dev, skb->data, TD_IC | TD_LS | TD_FS | skb->len, skb);
2561 lp->tx_new = (++lp->tx_new) % lp->txRingSize;
2562 }
2563 if (TX_BUFFS_AVAIL) {
2564 dev->tbusy = 0;
2565 }
2566 lp->cache.save_cnt--;
2567 START_DE4X5;
2568 }
2569
2570 return;
2571 }
2572
2573 static void de4x5_cache_state(struct device *dev, int flag)
2574 {
2575 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2576 u_long iobase = dev->base_addr;
2577 s32 gep;
2578
2579 switch(flag) {
2580 case DE4X5_SAVE_STATE:
2581 lp->cache.csr0 = inl(DE4X5_BMR);
2582 lp->cache.csr6 = (inl(DE4X5_OMR) & ~(OMR_ST | OMR_SR));
2583 lp->cache.csr7 = inl(DE4X5_IMR);
2584 if (lp->chipset != DC21140) {
2585 lp->cache.csr13 = inl(DE4X5_SICR);
2586 lp->cache.csr14 = inl(DE4X5_STRR);
2587 lp->cache.csr15 = inl(DE4X5_SIGR);
2588 }
2589 break;
2590
2591 case DE4X5_RESTORE_STATE:
2592 outl(lp->cache.csr0, DE4X5_BMR);
2593 outl(lp->cache.csr6, DE4X5_OMR);
2594 outl(lp->cache.csr7, DE4X5_IMR);
2595 if (lp->chipset == DC21140) {
2596 outl(GEP_INIT, DE4X5_GEP);
2597 gep = (lp->media == _100Mb ? GEP_MODE : 0);
2598 if (!lp->phy[lp->active].id && !de4x5_full_duplex) {
2599 gep |= GEP_FDXD;
2600 }
2601 outl(gep, DE4X5_GEP);
2602 } else {
2603 reset_init_sia(dev, lp->cache.csr13, lp->cache.csr14,
2604 lp->cache.csr15);
2605 }
2606 break;
2607 }
2608
2609 return;
2610 }
2611
2612 static void de4x5_put_cache(struct device *dev, struct sk_buff *skb)
2613 {
2614 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2615 struct sk_buff *p;
2616
2617 if (lp->cache.skb) {
2618 for (p=lp->cache.skb; p->next; p=p->next);
2619 p->next = skb;
2620 } else {
2621 lp->cache.skb = skb;
2622 }
2623 skb->next = NULL;
2624
2625 return;
2626 }
2627
2628 static void de4x5_putb_cache(struct device *dev, struct sk_buff *skb)
2629 {
2630 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2631 struct sk_buff *p = lp->cache.skb;
2632
2633 lp->cache.skb = skb;
2634 skb->next = p;
2635
2636 return;
2637 }
2638
2639 static struct sk_buff *de4x5_get_cache(struct device *dev)
2640 {
2641 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2642 struct sk_buff *p = lp->cache.skb;
2643
2644 if (p) {
2645 lp->cache.skb = p->next;
2646 p->next = NULL;
2647 }
2648
2649 return p;
2650 }
2651
2652
2653
2654
2655
2656 static int test_ans(struct device *dev, s32 irqs, s32 irq_mask, s32 msec)
2657 {
2658 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2659 u_long iobase = dev->base_addr;
2660 s32 sts, ans;
2661
2662 if (lp->timeout < 0) {
2663 lp->timeout = msec/100;
2664 outl(irq_mask, DE4X5_IMR);
2665
2666
2667 sts = inl(DE4X5_STS);
2668 outl(sts, DE4X5_STS);
2669 }
2670
2671 ans = inl(DE4X5_SISR) & SISR_ANS;
2672 sts = inl(DE4X5_STS) & ~TIMER_CB;
2673
2674 if (!(sts & irqs) && (ans ^ ANS_NWOK) && --lp->timeout) {
2675 sts = 100 | TIMER_CB;
2676 } else {
2677 lp->timeout = -1;
2678 }
2679
2680 return sts;
2681 }
2682
2683 static void de4x5_setup_intr(struct device *dev)
2684 {
2685 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2686 u_long iobase = dev->base_addr;
2687 s32 imr, sts;
2688
2689 if (inl(DE4X5_OMR) & OMR_SR) {
2690 imr = 0;
2691 UNMASK_IRQs;
2692 sts = inl(DE4X5_STS);
2693 outl(sts, DE4X5_STS);
2694 ENABLE_IRQs;
2695 }
2696
2697 return;
2698 }
2699
2700
2701
2702
2703 static void reset_init_sia(struct device *dev, s32 sicr, s32 strr, s32 sigr)
2704 {
2705 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2706 u_long iobase = dev->base_addr;
2707
2708 RESET_SIA;
2709 outl(sigr, DE4X5_SIGR);
2710 outl(strr, DE4X5_STRR);
2711 outl(sicr, DE4X5_SICR);
2712
2713 return;
2714 }
2715
2716
2717
2718
2719 static void create_packet(struct device *dev, char *frame, int len)
2720 {
2721 int i;
2722 char *buf = frame;
2723
2724 for (i=0; i<ETH_ALEN; i++) {
2725 *buf++ = dev->dev_addr[i];
2726 }
2727 for (i=0; i<ETH_ALEN; i++) {
2728 *buf++ = dev->dev_addr[i];
2729 }
2730
2731 *buf++ = 0;
2732 *buf++ = 1;
2733
2734 return;
2735 }
2736
2737
2738
2739
2740 static void de4x5_us_delay(u32 usec)
2741 {
2742 udelay(usec);
2743
2744 return;
2745 }
2746
2747
2748
2749
2750 static void de4x5_ms_delay(u32 msec)
2751 {
2752 u_int i;
2753
2754 for (i=0; i<msec; i++) {
2755 de4x5_us_delay(1000);
2756 }
2757
2758 return;
2759 }
2760
2761
2762
2763
2764
2765 static int EISA_signature(char *name, s32 eisa_id)
2766 {
2767 c_char *signatures[] = DE4X5_SIGNATURE;
2768 char ManCode[DE4X5_STRLEN];
2769 union {
2770 s32 ID;
2771 char Id[4];
2772 } Eisa;
2773 int i, status = 0, siglen = sizeof(signatures)/sizeof(c_char *);
2774
2775 *name = '\0';
2776 Eisa.ID = inl(eisa_id);
2777
2778 ManCode[0]=(((Eisa.Id[0]>>2)&0x1f)+0x40);
2779 ManCode[1]=(((Eisa.Id[1]&0xe0)>>5)+((Eisa.Id[0]&0x03)<<3)+0x40);
2780 ManCode[2]=(((Eisa.Id[2]>>4)&0x0f)+0x30);
2781 ManCode[3]=((Eisa.Id[2]&0x0f)+0x30);
2782 ManCode[4]=(((Eisa.Id[3]>>4)&0x0f)+0x30);
2783 ManCode[5]='\0';
2784
2785 for (i=0;i<siglen;i++) {
2786 if (strstr(ManCode, signatures[i]) != NULL) {
2787 strcpy(name,ManCode);
2788 status = 1;
2789 break;
2790 }
2791 }
2792
2793 return status;
2794 }
2795
2796
2797
2798
2799 static int PCI_signature(char *name, struct bus_type *lp)
2800 {
2801 c_char *de4x5_signatures[] = DE4X5_SIGNATURE;
2802 int i, status = 0, siglen = sizeof(de4x5_signatures)/sizeof(c_char *);
2803
2804 if (lp->chipset == DC21040) {
2805 strcpy(name, "DE434/5");
2806 } else {
2807 int i = *((char *)&lp->srom + 19) * 3;
2808 if (lp->chipset == DC21041) {
2809 strncpy(name, (char *)&lp->srom + 26 + i, 8);
2810 } else if (lp->chipset == DC21140) {
2811 strncpy(name, (char *)&lp->srom + 26 + i, 8);
2812 }
2813 }
2814 name[8] = '\0';
2815 for (i=0; i<siglen; i++) {
2816 if (strstr(name,de4x5_signatures[i])!=NULL) break;
2817 }
2818 if (i == siglen) {
2819 if (dec_only) {
2820 *name = '\0';
2821 } else {
2822 strcpy(name, (((lp->chipset == DC21040) ? "DC21040" :
2823 ((lp->chipset == DC21041) ? "DC21041" :
2824 ((lp->chipset == DC21140) ? "DC21140" : "UNKNOWN"
2825 )))));
2826 }
2827 }
2828
2829 return status;
2830 }
2831
2832
2833
2834
2835
2836 static void DevicePresent(u_long aprom_addr)
2837 {
2838 int i;
2839 struct bus_type *lp = &bus;
2840
2841 if (lp->chipset == DC21040) {
2842 outl(0, aprom_addr);
2843 } else {
2844 short *p = (short *)&lp->srom;
2845 for (i=0; i<(sizeof(struct de4x5_srom)>>1); i++) {
2846 *p++ = srom_rd(aprom_addr, i);
2847 }
2848 de4x5_dbg_srom((struct de4x5_srom *)&lp->srom);
2849 }
2850
2851 return;
2852 }
2853
2854 static int get_hw_addr(struct device *dev)
2855 {
2856 u_long iobase = dev->base_addr;
2857 int broken, i, k, tmp, status = 0;
2858 u_short j,chksum;
2859 struct bus_type *lp = &bus;
2860
2861 broken = de4x5_bad_srom(lp);
2862 for (i=0,k=0,j=0;j<3;j++) {
2863 k <<= 1;
2864 if (k > 0xffff) k-=0xffff;
2865
2866 if (lp->bus == PCI) {
2867 if (lp->chipset == DC21040) {
2868 while ((tmp = inl(DE4X5_APROM)) < 0);
2869 k += (u_char) tmp;
2870 dev->dev_addr[i++] = (u_char) tmp;
2871 while ((tmp = inl(DE4X5_APROM)) < 0);
2872 k += (u_short) (tmp << 8);
2873 dev->dev_addr[i++] = (u_char) tmp;
2874 } else if (!broken) {
2875 dev->dev_addr[i] = (u_char) lp->srom.ieee_addr[i]; i++;
2876 dev->dev_addr[i] = (u_char) lp->srom.ieee_addr[i]; i++;
2877 } else if (broken == SMC) {
2878 dev->dev_addr[i] = *((u_char *)&lp->srom + i); i++;
2879 dev->dev_addr[i] = *((u_char *)&lp->srom + i); i++;
2880 }
2881 } else {
2882 k += (u_char) (tmp = inb(EISA_APROM));
2883 dev->dev_addr[i++] = (u_char) tmp;
2884 k += (u_short) ((tmp = inb(EISA_APROM)) << 8);
2885 dev->dev_addr[i++] = (u_char) tmp;
2886 }
2887
2888 if (k > 0xffff) k-=0xffff;
2889 }
2890 if (k == 0xffff) k=0;
2891
2892 if (lp->bus == PCI) {
2893 if (lp->chipset == DC21040) {
2894 while ((tmp = inl(DE4X5_APROM)) < 0);
2895 chksum = (u_char) tmp;
2896 while ((tmp = inl(DE4X5_APROM)) < 0);
2897 chksum |= (u_short) (tmp << 8);
2898 if ((k != chksum) && (dec_only)) status = -1;
2899 }
2900 } else {
2901 chksum = (u_char) inb(EISA_APROM);
2902 chksum |= (u_short) (inb(EISA_APROM) << 8);
2903 if ((k != chksum) && (dec_only)) status = -1;
2904 }
2905
2906 return status;
2907 }
2908
2909
2910
2911
2912
2913 static int de4x5_bad_srom(struct bus_type *lp)
2914 {
2915 int i, status = 0;
2916
2917 for (i=0; i<sizeof(enet_det)/ETH_ALEN; i++) {
2918 if (!de4x5_strncmp((char *)&lp->srom, (char *)&enet_det[i], 3) &&
2919 !de4x5_strncmp((char *)&lp->srom+0x10, (char *)&enet_det[i], 3)) {
2920 status = SMC;
2921 break;
2922 }
2923 }
2924
2925 return status;
2926 }
2927
2928 static int de4x5_strncmp(char *a, char *b, int n)
2929 {
2930 int ret=0;
2931
2932 for (;n && !ret;n--) {
2933 ret = *a++ - *b++;
2934 }
2935
2936 return ret;
2937 }
2938
2939
2940
2941
2942 static short srom_rd(u_long addr, u_char offset)
2943 {
2944 sendto_srom(SROM_RD | SROM_SR, addr);
2945
2946 srom_latch(SROM_RD | SROM_SR | DT_CS, addr);
2947 srom_command(SROM_RD | SROM_SR | DT_IN | DT_CS, addr);
2948 srom_address(SROM_RD | SROM_SR | DT_CS, addr, offset);
2949
2950 return srom_data(SROM_RD | SROM_SR | DT_CS, addr);
2951 }
2952
2953 static void srom_latch(u_int command, u_long addr)
2954 {
2955 sendto_srom(command, addr);
2956 sendto_srom(command | DT_CLK, addr);
2957 sendto_srom(command, addr);
2958
2959 return;
2960 }
2961
2962 static void srom_command(u_int command, u_long addr)
2963 {
2964 srom_latch(command, addr);
2965 srom_latch(command, addr);
2966 srom_latch((command & 0x0000ff00) | DT_CS, addr);
2967
2968 return;
2969 }
2970
2971 static void srom_address(u_int command, u_long addr, u_char offset)
2972 {
2973 int i;
2974 char a;
2975
2976 a = (char)(offset << 2);
2977 for (i=0; i<6; i++, a <<= 1) {
2978 srom_latch(command | ((a < 0) ? DT_IN : 0), addr);
2979 }
2980 de4x5_us_delay(1);
2981
2982 i = (getfrom_srom(addr) >> 3) & 0x01;
2983 if (i != 0) {
2984 printk("Bad SROM address phase.....\n");
2985 }
2986
2987 return;
2988 }
2989
2990 static short srom_data(u_int command, u_long addr)
2991 {
2992 int i;
2993 short word = 0;
2994 s32 tmp;
2995
2996 for (i=0; i<16; i++) {
2997 sendto_srom(command | DT_CLK, addr);
2998 tmp = getfrom_srom(addr);
2999 sendto_srom(command, addr);
3000
3001 word = (word << 1) | ((tmp >> 3) & 0x01);
3002 }
3003
3004 sendto_srom(command & 0x0000ff00, addr);
3005
3006 return word;
3007 }
3008
3009
3010
3011
3012
3013
3014
3015
3016
3017
3018
3019
3020
3021
3022
3023
3024 static void sendto_srom(u_int command, u_long addr)
3025 {
3026 outl(command, addr);
3027 udelay(1);
3028
3029 return;
3030 }
3031
3032 static int getfrom_srom(u_long addr)
3033 {
3034 s32 tmp;
3035
3036 tmp = inl(addr);
3037 udelay(1);
3038
3039 return tmp;
3040 }
3041
3042
3043
3044
3045
3046 static int mii_rd(u_char phyreg, u_char phyaddr, u_long ioaddr)
3047 {
3048 mii_wdata(MII_PREAMBLE, 2, ioaddr);
3049 mii_wdata(MII_PREAMBLE, 32, ioaddr);
3050 mii_wdata(MII_STRD, 4, ioaddr);
3051 mii_address(phyaddr, ioaddr);
3052 mii_address(phyreg, ioaddr);
3053 mii_ta(MII_STRD, ioaddr);
3054
3055 return mii_rdata(ioaddr);
3056 }
3057
3058 static void mii_wr(int data, u_char phyreg, u_char phyaddr, u_long ioaddr)
3059 {
3060 mii_wdata(MII_PREAMBLE, 2, ioaddr);
3061 mii_wdata(MII_PREAMBLE, 32, ioaddr);
3062 mii_wdata(MII_STWR, 4, ioaddr);
3063 mii_address(phyaddr, ioaddr);
3064 mii_address(phyreg, ioaddr);
3065 mii_ta(MII_STWR, ioaddr);
3066 data = mii_swap(data, 16);
3067 mii_wdata(data, 16, ioaddr);
3068
3069 return;
3070 }
3071
3072 static int mii_rdata(u_long ioaddr)
3073 {
3074 int i;
3075 s32 tmp = 0;
3076
3077 for (i=0; i<16; i++) {
3078 tmp <<= 1;
3079 tmp |= getfrom_mii(MII_MRD | MII_RD, ioaddr);
3080 }
3081
3082 return tmp;
3083 }
3084
3085 static void mii_wdata(int data, int len, u_long ioaddr)
3086 {
3087 int i;
3088
3089 for (i=0; i<len; i++) {
3090 sendto_mii(MII_MWR | MII_WR, data, ioaddr);
3091 data >>= 1;
3092 }
3093
3094 return;
3095 }
3096
3097 static void mii_address(u_char addr, u_long ioaddr)
3098 {
3099 int i;
3100
3101 addr = mii_swap(addr, 5);
3102 for (i=0; i<5; i++) {
3103 sendto_mii(MII_MWR | MII_WR, addr, ioaddr);
3104 addr >>= 1;
3105 }
3106
3107 return;
3108 }
3109
3110 static void mii_ta(u_long rw, u_long ioaddr)
3111 {
3112 if (rw == MII_STWR) {
3113 sendto_mii(MII_MWR | MII_WR, 1, ioaddr);
3114 getfrom_mii(MII_MRD | MII_RD, ioaddr);
3115 } else {
3116 getfrom_mii(MII_MRD | MII_RD, ioaddr);
3117 }
3118
3119 return;
3120 }
3121
3122 static int mii_swap(int data, int len)
3123 {
3124 int i, tmp = 0;
3125
3126 for (i=0; i<len; i++) {
3127 tmp <<= 1;
3128 tmp |= (data & 1);
3129 data >>= 1;
3130 }
3131
3132 return tmp;
3133 }
3134
3135 static void sendto_mii(u32 command, int data, u_long ioaddr)
3136 {
3137 u32 j;
3138
3139 j = (data & 1) << 17;
3140 outl(command | j, ioaddr);
3141 udelay(1);
3142 outl(command | MII_MDC | j, ioaddr);
3143 udelay(1);
3144
3145 return;
3146 }
3147
3148 static int getfrom_mii(u32 command, u_long ioaddr)
3149 {
3150 outl(command, ioaddr);
3151 udelay(1);
3152 outl(command | MII_MDC, ioaddr);
3153 udelay(1);
3154
3155 return ((inl(ioaddr) >> 19) & 1);
3156 }
3157
3158
3159
3160
3161
3162 static int mii_get_oui(u_char phyaddr, u_long ioaddr)
3163 {
3164
3165
3166
3167
3168
3169
3170 int r2, r3;
3171
3172
3173 r2 = mii_rd(MII_ID0, phyaddr, ioaddr);
3174 r3 = mii_rd(MII_ID1, phyaddr, ioaddr);
3175
3176
3177
3178
3179
3180
3181
3182
3183
3184
3185
3186
3187
3188
3189
3190
3191
3192
3193
3194
3195
3196
3197
3198
3199
3200
3201
3202 return r2;
3203 }
3204
3205 static int mii_get_phy(struct device *dev)
3206 {
3207 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
3208 int iobase = dev->base_addr;
3209 int i, j, k, limit=sizeof(phy_info)/sizeof(struct phy_table);
3210 int id;
3211
3212
3213 outl(GEP_HRST, DE4X5_GEP);
3214 udelay(1000);
3215 outl(0x00, DE4X5_GEP);
3216 udelay(2000);
3217
3218
3219 lp->active = 0;
3220 for (lp->mii_cnt=0, i=1; i<DE4X5_MAX_MII; i++) {
3221 id = mii_get_oui(i, DE4X5_MII);
3222 if ((id == 0) || (id == -1)) continue;
3223 for (j=0; j<limit; j++) {
3224 if (id != phy_info[j].id) continue;
3225 for (k=0; lp->phy[k].id && (k < DE4X5_MAX_PHY); k++);
3226 if (k < DE4X5_MAX_PHY) {
3227 memcpy((char *)&lp->phy[k],
3228 (char *)&phy_info[j], sizeof(struct phy_table));
3229 lp->phy[k].addr = i;
3230 lp->mii_cnt++;
3231 } else {
3232 i = DE4X5_MAX_MII;
3233 j = limit;
3234 }
3235 }
3236 }
3237 if (lp->phy[lp->active].id) {
3238 for (k=0; lp->phy[k].id && (k < DE4X5_MAX_PHY); k++) {
3239 mii_wr(MII_CR_RST, MII_CR, lp->phy[k].addr, DE4X5_MII);
3240 while (mii_rd(MII_CR, lp->phy[k].addr, DE4X5_MII) & MII_CR_RST);
3241
3242 de4x5_dbg_mii(dev, k);
3243 }
3244 }
3245
3246 return lp->mii_cnt;
3247 }
3248
3249 static char *build_setup_frame(struct device *dev, int mode)
3250 {
3251 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
3252 int i;
3253 char *pa = lp->setup_frame;
3254
3255
3256 if (mode == ALL) {
3257 memset(lp->setup_frame, 0, SETUP_FRAME_LEN);
3258 }
3259
3260 if (lp->setup_f == HASH_PERF) {
3261 for (pa=lp->setup_frame+IMPERF_PA_OFFSET, i=0; i<ETH_ALEN; i++) {
3262 *(pa + i) = dev->dev_addr[i];
3263 if (i & 0x01) pa += 2;
3264 }
3265 *(lp->setup_frame + (HASH_TABLE_LEN >> 3) - 3) = 0x80;
3266 } else {
3267 for (i=0; i<ETH_ALEN; i++) {
3268 *(pa + (i&1)) = dev->dev_addr[i];
3269 if (i & 0x01) pa += 4;
3270 }
3271 for (i=0; i<ETH_ALEN; i++) {
3272 *(pa + (i&1)) = (char) 0xff;
3273 if (i & 0x01) pa += 4;
3274 }
3275 }
3276
3277 return pa;
3278 }
3279
3280 static void enable_ast(struct device *dev, u32 time_out)
3281 {
3282 timeout(dev, (void *)&de4x5_ast, (u_long)dev, time_out);
3283
3284 return;
3285 }
3286
3287 static void disable_ast(struct device *dev)
3288 {
3289 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
3290
3291 del_timer(&lp->timer);
3292
3293 return;
3294 }
3295
3296 static long de4x5_switch_to_mii(struct device *dev)
3297 {
3298 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
3299 int iobase = dev->base_addr;
3300 long omr;
3301
3302
3303 omr = (inl(DE4X5_OMR) & ~(OMR_PS | OMR_HBD | OMR_TTM | OMR_PCS | OMR_SCR));
3304 omr |= (OMR_PS | OMR_HBD);
3305 outl(omr, DE4X5_OMR);
3306
3307
3308 RESET_DE4X5;
3309
3310
3311 if (lp->chipset == DC21140) {
3312 outl(GEP_INIT, DE4X5_GEP);
3313 outl(0, DE4X5_GEP);
3314 }
3315
3316
3317 outl(omr, DE4X5_OMR);
3318
3319 return omr;
3320 }
3321
3322 static long de4x5_switch_to_srl(struct device *dev)
3323 {
3324 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
3325 int iobase = dev->base_addr;
3326 long omr;
3327
3328
3329 omr = (inl(DE4X5_OMR) & ~(OMR_PS | OMR_HBD | OMR_TTM | OMR_PCS | OMR_SCR));
3330 outl(omr | OMR_TTM, DE4X5_OMR);
3331 outl(omr, DE4X5_OMR);
3332
3333
3334 RESET_DE4X5;
3335
3336
3337 if (lp->chipset == DC21140) {
3338 outl(GEP_INIT, DE4X5_GEP);
3339 outl(0, DE4X5_GEP);
3340 }
3341
3342
3343 outl(omr, DE4X5_OMR);
3344
3345 return omr;
3346 }
3347
3348 static void timeout(struct device *dev, void (*fn)(u_long data), u_long data, u_long msec)
3349 {
3350 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
3351 int dt;
3352
3353
3354 del_timer(&lp->timer);
3355
3356
3357 dt = (msec * HZ) / 1000;
3358 if (dt==0) dt=1;
3359
3360
3361 lp->timer.expires = jiffies + dt;
3362 lp->timer.function = fn;
3363 lp->timer.data = data;
3364 add_timer(&lp->timer);
3365
3366 return;
3367 }
3368
3369 static void de4x5_dbg_open(struct device *dev)
3370 {
3371 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
3372 int i;
3373
3374 if (de4x5_debug > 1) {
3375 printk("%s: de4x5 opening with irq %d\n",dev->name,dev->irq);
3376 printk("\tphysical address: ");
3377 for (i=0;i<6;i++) {
3378 printk("%2.2x:",(short)dev->dev_addr[i]);
3379 }
3380 printk("\n");
3381 printk("Descriptor head addresses:\n");
3382 printk("\t0x%8.8lx 0x%8.8lx\n",(u_long)lp->rx_ring,(u_long)lp->tx_ring);
3383 printk("Descriptor addresses:\nRX: ");
3384 for (i=0;i<lp->rxRingSize-1;i++){
3385 if (i < 3) {
3386 printk("0x%8.8lx ",(u_long)&lp->rx_ring[i].status);
3387 }
3388 }
3389 printk("...0x%8.8lx\n",(u_long)&lp->rx_ring[i].status);
3390 printk("TX: ");
3391 for (i=0;i<lp->txRingSize-1;i++){
3392 if (i < 3) {
3393 printk("0x%8.8lx ", (u_long)&lp->tx_ring[i].status);
3394 }
3395 }
3396 printk("...0x%8.8lx\n", (u_long)&lp->tx_ring[i].status);
3397 printk("Descriptor buffers:\nRX: ");
3398 for (i=0;i<lp->rxRingSize-1;i++){
3399 if (i < 3) {
3400 printk("0x%8.8x ",lp->rx_ring[i].buf);
3401 }
3402 }
3403 printk("...0x%8.8x\n",lp->rx_ring[i].buf);
3404 printk("TX: ");
3405 for (i=0;i<lp->txRingSize-1;i++){
3406 if (i < 3) {
3407 printk("0x%8.8x ", lp->tx_ring[i].buf);
3408 }
3409 }
3410 printk("...0x%8.8x\n", lp->tx_ring[i].buf);
3411 printk("Ring size: \nRX: %d\nTX: %d\n",
3412 (short)lp->rxRingSize,
3413 (short)lp->txRingSize);
3414 }
3415
3416 return;
3417 }
3418
3419 static void de4x5_dbg_mii(struct device *dev, int k)
3420 {
3421 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
3422 int iobase = dev->base_addr;
3423
3424 if (de4x5_debug > 2) {
3425 printk("\nMII CR: %x\n",mii_rd(MII_CR,lp->phy[k].addr,DE4X5_MII));
3426 printk("MII SR: %x\n",mii_rd(MII_SR,lp->phy[k].addr,DE4X5_MII));
3427 printk("MII ID0: %x\n",mii_rd(MII_ID0,lp->phy[k].addr,DE4X5_MII));
3428 printk("MII ID1: %x\n",mii_rd(MII_ID1,lp->phy[k].addr,DE4X5_MII));
3429 if (lp->phy[k].id != BROADCOM_T4) {
3430 printk("MII ANA: %x\n",mii_rd(0x04,lp->phy[k].addr,DE4X5_MII));
3431 printk("MII ANC: %x\n",mii_rd(0x05,lp->phy[k].addr,DE4X5_MII));
3432 }
3433 printk("MII 16: %x\n",mii_rd(0x10,lp->phy[k].addr,DE4X5_MII));
3434 if (lp->phy[k].id != BROADCOM_T4) {
3435 printk("MII 17: %x\n",mii_rd(0x11,lp->phy[k].addr,DE4X5_MII));
3436 printk("MII 18: %x\n",mii_rd(0x12,lp->phy[k].addr,DE4X5_MII));
3437 } else {
3438 printk("MII 20: %x\n",mii_rd(0x14,lp->phy[k].addr,DE4X5_MII));
3439 }
3440 }
3441
3442 return;
3443 }
3444
3445 static void de4x5_dbg_media(struct device *dev)
3446 {
3447 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
3448
3449 if (lp->media != lp->c_media) {
3450 if (de4x5_debug > 0) {
3451 if (lp->chipset != DC21140) {
3452 printk("%s: media is %s\n", dev->name,
3453 (lp->media == NC ? "unconnected!" :
3454 (lp->media == TP ? "TP." :
3455 (lp->media == ANS ? "TP/Nway." :
3456 (lp->media == BNC ? "BNC." :
3457 (lp->media == BNC_AUI ? "BNC/AUI." :
3458 (lp->media == EXT_SIA ? "EXT SIA." :
3459 "???."
3460 )))))));
3461 } else {
3462 printk("%s: mode is %s\n", dev->name,
3463 (lp->media == NC ? "link down or incompatible connection.":
3464 (lp->media == _100Mb ? "100Mb/s." :
3465 (lp->media == _10Mb ? "10Mb/s." :
3466 "\?\?\?"
3467 ))));
3468 }
3469 }
3470 lp->c_media = lp->media;
3471 }
3472
3473 return;
3474 }
3475
3476 static void de4x5_dbg_srom(struct de4x5_srom *p)
3477 {
3478 int i;
3479
3480 if (de4x5_debug > 1) {
3481 printk("Sub-system Vendor ID: %04x\n", (u_short)*(p->sub_vendor_id));
3482 printk("Sub-system ID: %04x\n", (u_short)*(p->sub_system_id));
3483 printk("ID Block CRC: %02x\n", (u_char)(p->id_block_crc));
3484
3485 printk("Hardware Address: ");
3486 for (i=0;i<ETH_ALEN-1;i++) {
3487 printk("%02x:", (u_char)*(p->ieee_addr+i));
3488 }
3489 printk("%02x\n", (u_char)*(p->ieee_addr+i));
3490 printk("CRC checksum: %04x\n", (u_short)(p->chksum));
3491 for (i=0; i<64; i++) {
3492 printk("%3d %04x\n", i<<1, (u_short)*((u_short *)p+i));
3493 }
3494 }
3495
3496 return;
3497 }
3498
3499
3500
3501
3502
3503 static int de4x5_ioctl(struct device *dev, struct ifreq *rq, int cmd)
3504 {
3505 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
3506 struct de4x5_ioctl *ioc = (struct de4x5_ioctl *) &rq->ifr_data;
3507 u_long iobase = dev->base_addr;
3508 int i, j, status = 0;
3509 s32 omr;
3510 union {
3511 u8 addr[(HASH_TABLE_LEN * ETH_ALEN)];
3512 u16 sval[(HASH_TABLE_LEN * ETH_ALEN) >> 1];
3513 u32 lval[(HASH_TABLE_LEN * ETH_ALEN) >> 2];
3514 } tmp;
3515
3516 switch(ioc->cmd) {
3517 case DE4X5_GET_HWADDR:
3518 ioc->len = ETH_ALEN;
3519 status = verify_area(VERIFY_WRITE, (void *)ioc->data, ioc->len);
3520 if (status)
3521 break;
3522 for (i=0; i<ETH_ALEN; i++) {
3523 tmp.addr[i] = dev->dev_addr[i];
3524 }
3525 memcpy_tofs(ioc->data, tmp.addr, ioc->len);
3526
3527 break;
3528 case DE4X5_SET_HWADDR:
3529 status = verify_area(VERIFY_READ, (void *)ioc->data, ETH_ALEN);
3530 if (status)
3531 break;
3532 status = -EPERM;
3533 if (!suser())
3534 break;
3535 status = 0;
3536 memcpy_fromfs(tmp.addr, ioc->data, ETH_ALEN);
3537 for (i=0; i<ETH_ALEN; i++) {
3538 dev->dev_addr[i] = tmp.addr[i];
3539 }
3540 build_setup_frame(dev, PHYS_ADDR_ONLY);
3541
3542 while (set_bit(0, (void *)&dev->tbusy) != 0);
3543 if (lp->setup_f == HASH_PERF) {
3544 load_packet(dev, lp->setup_frame, TD_IC | HASH_F | TD_SET |
3545 SETUP_FRAME_LEN, NULL);
3546 } else {
3547 load_packet(dev, lp->setup_frame, TD_IC | PERFECT_F | TD_SET |
3548 SETUP_FRAME_LEN, NULL);
3549 }
3550 lp->tx_new = (++lp->tx_new) % lp->txRingSize;
3551 outl(POLL_DEMAND, DE4X5_TPD);
3552 dev->tbusy = 0;
3553
3554 break;
3555 case DE4X5_SET_PROM:
3556 if (suser()) {
3557 omr = inl(DE4X5_OMR);
3558 omr |= OMR_PR;
3559 outl(omr, DE4X5_OMR);
3560 } else {
3561 status = -EPERM;
3562 }
3563
3564 break;
3565 case DE4X5_CLR_PROM:
3566 if (suser()) {
3567 omr = inl(DE4X5_OMR);
3568 omr &= ~OMR_PR;
3569 outb(omr, DE4X5_OMR);
3570 } else {
3571 status = -EPERM;
3572 }
3573
3574 break;
3575 case DE4X5_SAY_BOO:
3576 printk("%s: Boo!\n", dev->name);
3577
3578 break;
3579 case DE4X5_GET_MCA:
3580 ioc->len = (HASH_TABLE_LEN >> 3);
3581 status = verify_area(VERIFY_WRITE, ioc->data, ioc->len);
3582 if (!status) {
3583 memcpy_tofs(ioc->data, lp->setup_frame, ioc->len);
3584 }
3585
3586 break;
3587 case DE4X5_SET_MCA:
3588 if (suser()) {
3589
3590 if (ioc->len != HASH_TABLE_LEN) {
3591 if (!(status = verify_area(VERIFY_READ, (void *)ioc->data, ETH_ALEN * ioc->len))) {
3592 memcpy_fromfs(tmp.addr, ioc->data, ETH_ALEN * ioc->len);
3593 set_multicast_list(dev);
3594 }
3595 } else {
3596 set_multicast_list(dev);
3597 }
3598 } else {
3599 status = -EPERM;
3600 }
3601
3602 break;
3603 case DE4X5_CLR_MCA:
3604 if (suser()) {
3605
3606 set_multicast_list(dev);
3607 } else {
3608 status = -EPERM;
3609 }
3610
3611 break;
3612 case DE4X5_MCA_EN:
3613 if (suser()) {
3614 omr = inl(DE4X5_OMR);
3615 omr |= OMR_PM;
3616 outl(omr, DE4X5_OMR);
3617 } else {
3618 status = -EPERM;
3619 }
3620
3621 break;
3622 case DE4X5_GET_STATS:
3623 ioc->len = sizeof(lp->pktStats);
3624 status = verify_area(VERIFY_WRITE, (void *)ioc->data, ioc->len);
3625 if (status)
3626 break;
3627
3628 cli();
3629 memcpy_tofs(ioc->data, &lp->pktStats, ioc->len);
3630 sti();
3631
3632 break;
3633 case DE4X5_CLR_STATS:
3634 if (suser()) {
3635 cli();
3636 memset(&lp->pktStats, 0, sizeof(lp->pktStats));
3637 sti();
3638 } else {
3639 status = -EPERM;
3640 }
3641
3642 break;
3643 case DE4X5_GET_OMR:
3644 tmp.addr[0] = inl(DE4X5_OMR);
3645 if (!(status = verify_area(VERIFY_WRITE, (void *)ioc->data, 1))) {
3646 memcpy_tofs(ioc->data, tmp.addr, 1);
3647 }
3648
3649 break;
3650 case DE4X5_SET_OMR:
3651 if (suser()) {
3652 if (!(status = verify_area(VERIFY_READ, (void *)ioc->data, 1))) {
3653 memcpy_fromfs(tmp.addr, ioc->data, 1);
3654 outl(tmp.addr[0], DE4X5_OMR);
3655 }
3656 } else {
3657 status = -EPERM;
3658 }
3659
3660 break;
3661 case DE4X5_GET_REG:
3662 j = 0;
3663 tmp.lval[0] = inl(DE4X5_STS); j+=4;
3664 tmp.lval[1] = inl(DE4X5_BMR); j+=4;
3665 tmp.lval[2] = inl(DE4X5_IMR); j+=4;
3666 tmp.lval[3] = inl(DE4X5_OMR); j+=4;
3667 tmp.lval[4] = inl(DE4X5_SISR); j+=4;
3668 tmp.lval[5] = inl(DE4X5_SICR); j+=4;
3669 tmp.lval[6] = inl(DE4X5_STRR); j+=4;
3670 tmp.lval[7] = inl(DE4X5_SIGR); j+=4;
3671 ioc->len = j;
3672 if (!(status = verify_area(VERIFY_WRITE, (void *)ioc->data, ioc->len))) {
3673 memcpy_tofs(ioc->data, tmp.addr, ioc->len);
3674 }
3675 break;
3676
3677 #define DE4X5_DUMP 0x0f
3678
3679 case DE4X5_DUMP:
3680 j = 0;
3681 tmp.addr[j++] = dev->irq;
3682 for (i=0; i<ETH_ALEN; i++) {
3683 tmp.addr[j++] = dev->dev_addr[i];
3684 }
3685 tmp.addr[j++] = lp->rxRingSize;
3686 tmp.lval[j>>2] = (long)lp->rx_ring; j+=4;
3687 tmp.lval[j>>2] = (long)lp->tx_ring; j+=4;
3688
3689 for (i=0;i<lp->rxRingSize-1;i++){
3690 if (i < 3) {
3691 tmp.lval[j>>2] = (long)&lp->rx_ring[i].status; j+=4;
3692 }
3693 }
3694 tmp.lval[j>>2] = (long)&lp->rx_ring[i].status; j+=4;
3695 for (i=0;i<lp->txRingSize-1;i++){
3696 if (i < 3) {
3697 tmp.lval[j>>2] = (long)&lp->tx_ring[i].status; j+=4;
3698 }
3699 }
3700 tmp.lval[j>>2] = (long)&lp->tx_ring[i].status; j+=4;
3701
3702 for (i=0;i<lp->rxRingSize-1;i++){
3703 if (i < 3) {
3704 tmp.lval[j>>2] = (s32)lp->rx_ring[i].buf; j+=4;
3705 }
3706 }
3707 tmp.lval[j>>2] = (s32)lp->rx_ring[i].buf; j+=4;
3708 for (i=0;i<lp->txRingSize-1;i++){
3709 if (i < 3) {
3710 tmp.lval[j>>2] = (s32)lp->tx_ring[i].buf; j+=4;
3711 }
3712 }
3713 tmp.lval[j>>2] = (s32)lp->tx_ring[i].buf; j+=4;
3714
3715 for (i=0;i<lp->rxRingSize;i++){
3716 tmp.lval[j>>2] = lp->rx_ring[i].status; j+=4;
3717 }
3718 for (i=0;i<lp->txRingSize;i++){
3719 tmp.lval[j>>2] = lp->tx_ring[i].status; j+=4;
3720 }
3721
3722 tmp.lval[j>>2] = inl(DE4X5_BMR); j+=4;
3723 tmp.lval[j>>2] = inl(DE4X5_TPD); j+=4;
3724 tmp.lval[j>>2] = inl(DE4X5_RPD); j+=4;
3725 tmp.lval[j>>2] = inl(DE4X5_RRBA); j+=4;
3726 tmp.lval[j>>2] = inl(DE4X5_TRBA); j+=4;
3727 tmp.lval[j>>2] = inl(DE4X5_STS); j+=4;
3728 tmp.lval[j>>2] = inl(DE4X5_OMR); j+=4;
3729 tmp.lval[j>>2] = inl(DE4X5_IMR); j+=4;
3730 tmp.lval[j>>2] = lp->chipset; j+=4;
3731 if (lp->chipset == DC21140) {
3732 tmp.lval[j>>2] = inl(DE4X5_GEP); j+=4;
3733 } else {
3734 tmp.lval[j>>2] = inl(DE4X5_SISR); j+=4;
3735 tmp.lval[j>>2] = inl(DE4X5_SICR); j+=4;
3736 tmp.lval[j>>2] = inl(DE4X5_STRR); j+=4;
3737 tmp.lval[j>>2] = inl(DE4X5_SIGR); j+=4;
3738 }
3739 tmp.lval[j>>2] = lp->phy[lp->active].id; j+=4;
3740 if (lp->phy[lp->active].id) {
3741 tmp.lval[j>>2] = lp->active; j+=4;
3742 tmp.lval[j>>2]=mii_rd(MII_CR,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
3743 tmp.lval[j>>2]=mii_rd(MII_SR,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
3744 tmp.lval[j>>2]=mii_rd(MII_ID0,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
3745 tmp.lval[j>>2]=mii_rd(MII_ID1,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
3746 if (lp->phy[lp->active].id != BROADCOM_T4) {
3747 tmp.lval[j>>2]=mii_rd(MII_ANA,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
3748 tmp.lval[j>>2]=mii_rd(MII_ANLPA,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
3749 }
3750 tmp.lval[j>>2]=mii_rd(0x10,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
3751 if (lp->phy[lp->active].id != BROADCOM_T4) {
3752 tmp.lval[j>>2]=mii_rd(0x11,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
3753 tmp.lval[j>>2]=mii_rd(0x12,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
3754 } else {
3755 tmp.lval[j>>2]=mii_rd(0x14,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
3756 }
3757 }
3758
3759 tmp.addr[j++] = lp->txRingSize;
3760 tmp.addr[j++] = dev->tbusy;
3761
3762 ioc->len = j;
3763 if (!(status = verify_area(VERIFY_WRITE, (void *)ioc->data, ioc->len))) {
3764 memcpy_tofs(ioc->data, tmp.addr, ioc->len);
3765 }
3766
3767 break;
3768 default:
3769 status = -EOPNOTSUPP;
3770 }
3771
3772 return status;
3773 }
3774
3775 #ifdef MODULE
3776
3777
3778
3779
3780
3781
3782
3783
3784
3785
3786 static char devicename[9] = { 0, };
3787 static struct device thisDE4X5 = {
3788 devicename,
3789 0, 0, 0, 0,
3790 0, 0,
3791 0, 0, 0, NULL, de4x5_probe };
3792
3793 static int io=0x0b;
3794
3795 int
3796 init_module(void)
3797 {
3798 struct device *p = (struct device *)&thisDE4X5;
3799
3800 thisDE4X5.base_addr = io;
3801 thisDE4X5.irq = 0;
3802
3803 for (; p!=NULL; p=p->next) {
3804 if (register_netdev(p) != 0)
3805 return -EIO;
3806 }
3807 io=0;
3808 return 0;
3809 }
3810
3811 void
3812 cleanup_module(void)
3813 {
3814 struct de4x5_private *lp = (struct de4x5_private *) thisDE4X5.priv;
3815 struct device *p = (struct device *)&thisDE4X5;
3816 int keep_loaded = 0;
3817
3818 for (; p!=NULL; p=p->next) {
3819 keep_loaded += (p->flags & IFF_UP);
3820 }
3821
3822 if (keep_loaded) {
3823 printk("de4x5: Cannot unload modules - %d interface%s%s still active.\n",
3824 keep_loaded, (keep_loaded>1 ? "s ": " "),
3825 (keep_loaded>1 ? "are": "is"));
3826 return;
3827 }
3828
3829 for (p=thisDE4X5.next; p!=NULL; p=p->next) {
3830 if (p->priv) {
3831 struct de4x5_private *lp = (struct de4x5_private *)p->priv;
3832 if (lp->cache.buf) {
3833 kfree(lp->cache.buf);
3834 }
3835 release_region(p->base_addr, (lp->bus == PCI ?
3836 DE4X5_PCI_TOTAL_SIZE :
3837 DE4X5_EISA_TOTAL_SIZE));
3838 kfree(lp->cache.priv);
3839 }
3840 unregister_netdev(p);
3841 kfree(p);
3842 }
3843
3844 if (thisDE4X5.priv) {
3845 if (lp->cache.buf) {
3846 kfree(lp->cache.buf);
3847 }
3848 release_region(thisDE4X5.base_addr,
3849 (lp->bus == PCI ?
3850 DE4X5_PCI_TOTAL_SIZE :
3851 DE4X5_EISA_TOTAL_SIZE));
3852 kfree(lp->cache.priv);
3853 thisDE4X5.priv = NULL;
3854 }
3855 unregister_netdev(&thisDE4X5);
3856
3857 return;
3858 }
3859 #endif
3860
3861
3862
3863
3864
3865
3866
3867
3868