This source file includes following definitions.
- de4x5_probe
- de4x5_hw_init
- de4x5_open
- de4x5_init
- de4x5_sw_reset
- de4x5_queue_pkt
- de4x5_interrupt
- de4x5_rx
- de4x5_tx
- de4x5_ast
- de4x5_close
- de4x5_get_stats
- load_packet
- set_multicast_list
- SetMulticastFilter
- eisa_probe
- pci_probe
- alloc_device
- autoconf_media
- dc21040_autoconf
- dc21040_state
- de4x5_suspect_state
- dc21041_autoconf
- dc21140m_autoconf
- de4x5_init_connection
- de4x5_reset_phy
- test_media
- test_tp
- test_mii_reg
- is_spd_100
- is_100_up
- is_10_up
- is_anc_capable
- ping_media
- de4x5_save_skbs
- de4x5_restore_skbs
- de4x5_cache_state
- de4x5_put_cache
- de4x5_putb_cache
- de4x5_get_cache
- test_ans
- de4x5_setup_intr
- reset_init_sia
- create_packet
- de4x5_us_delay
- de4x5_ms_delay
- EISA_signature
- PCI_signature
- DevicePresent
- get_hw_addr
- de4x5_bad_srom
- de4x5_strncmp
- srom_rd
- srom_latch
- srom_command
- srom_address
- srom_data
- sendto_srom
- getfrom_srom
- mii_rd
- mii_wr
- mii_rdata
- mii_wdata
- mii_address
- mii_ta
- mii_swap
- sendto_mii
- getfrom_mii
- mii_get_oui
- mii_get_phy
- build_setup_frame
- enable_ast
- disable_ast
- de4x5_switch_to_mii
- de4x5_switch_to_srl
- timeout
- de4x5_dbg_open
- de4x5_dbg_mii
- de4x5_dbg_media
- de4x5_dbg_srom
- de4x5_ioctl
- init_module
- cleanup_module
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183 static const char *version = "de4x5.c:v0.40 96/3/5 davies@wanton.lkg.dec.com\n";
184
185 #include <linux/module.h>
186
187 #include <linux/kernel.h>
188 #include <linux/sched.h>
189 #include <linux/string.h>
190 #include <linux/interrupt.h>
191 #include <linux/ptrace.h>
192 #include <linux/errno.h>
193 #include <linux/ioport.h>
194 #include <linux/malloc.h>
195 #include <linux/bios32.h>
196 #include <linux/pci.h>
197 #include <linux/delay.h>
198 #include <asm/bitops.h>
199 #include <asm/io.h>
200 #include <asm/dma.h>
201 #include <asm/segment.h>
202
203 #include <linux/netdevice.h>
204 #include <linux/etherdevice.h>
205 #include <linux/skbuff.h>
206
207 #include <linux/time.h>
208 #include <linux/types.h>
209 #include <linux/unistd.h>
210
211 #include "de4x5.h"
212
213 #define c_char const char
214
215
216
217
218 struct phy_table {
219 int reset;
220 int id;
221 int ta;
222 struct {
223 int reg;
224 int mask;
225 int value;
226 } spd;
227 };
228
229 struct mii_phy {
230 int reset;
231 int id;
232 int ta;
233 struct {
234 int reg;
235 int mask;
236 int value;
237 } spd;
238 int addr;
239 };
240
241 #define DE4X5_MAX_PHY 8
242
243
244
245
246
247 static struct phy_table phy_info[] = {
248 {0, NATIONAL_TX, 1, {0x19, 0x40, 0x00}},
249 {1, BROADCOM_T4, 1, {0x10, 0x02, 0x02}},
250 {0, SEEQ_T4 , 1, {0x12, 0x10, 0x10}},
251 {0, CYPRESS_T4 , 1, {0x05, 0x20, 0x20}}
252 };
253
254
255
256
257 static c_char enet_det[][ETH_ALEN] = {
258 {0x00, 0x00, 0x0c, 0x00, 0x00, 0x00}
259 };
260
261 #define SMC 1
262
263
264 #ifdef DE4X5_DEBUG
265 static int de4x5_debug = DE4X5_DEBUG;
266 #else
267 static int de4x5_debug = 1;
268 #endif
269
270 #ifdef DE4X5_AUTOSENSE
271 static int de4x5_autosense = DE4X5_AUTOSENSE;
272 #else
273 static int de4x5_autosense = AUTO;
274 #endif
275 #define DE4X5_AUTOSENSE_MS 250
276
277 #ifdef DE4X5_FULL_DUPLEX
278 static s32 de4x5_full_duplex = 1;
279 #else
280 static s32 de4x5_full_duplex = 0;
281 #endif
282
283 #define DE4X5_NDA 0xffe0
284
285
286
287
288 #define PROBE_LENGTH 32
289 #define ETH_PROM_SIG 0xAA5500FFUL
290
291
292
293
294 #define PKT_BUF_SZ 1536
295 #define MAX_PKT_SZ 1514
296 #define MAX_DAT_SZ 1500
297 #define MIN_DAT_SZ 1
298 #define PKT_HDR_LEN 14
299 #define FAKE_FRAME_LEN (MAX_PKT_SZ + 1)
300 #define QUEUE_PKT_TIMEOUT (3*HZ)
301
302
303 #define CRC_POLYNOMIAL_BE 0x04c11db7UL
304 #define CRC_POLYNOMIAL_LE 0xedb88320UL
305
306
307
308
309 #define DE4X5_EISA_IO_PORTS 0x0c00
310 #define DE4X5_EISA_TOTAL_SIZE 0x100
311
312 #define MAX_EISA_SLOTS 16
313 #define EISA_SLOT_INC 0x1000
314
315 #define DE4X5_SIGNATURE {"DE425","DE434","DE435","DE450","DE500"}
316 #define DE4X5_NAME_LENGTH 8
317
318
319
320
321 #define PCI_MAX_BUS_NUM 8
322 #define DE4X5_PCI_TOTAL_SIZE 0x80
323 #define DE4X5_CLASS_CODE 0x00020000
324
325
326
327
328
329
330
331 #define ALIGN4 ((u_long)4 - 1)
332 #define ALIGN8 ((u_long)8 - 1)
333 #define ALIGN16 ((u_long)16 - 1)
334 #define ALIGN32 ((u_long)32 - 1)
335 #define ALIGN64 ((u_long)64 - 1)
336 #define ALIGN128 ((u_long)128 - 1)
337
338 #define ALIGN ALIGN32
339 #define CACHE_ALIGN CAL_16LONG
340 #define DESC_SKIP_LEN DSL_0
341
342 #define DESC_ALIGN
343
344 #ifndef DEC_ONLY
345 static int dec_only = 0;
346 #else
347 static int dec_only = 1;
348 #endif
349
350
351
352
353 #define ENABLE_IRQs { \
354 imr |= lp->irq_en;\
355 outl(imr, DE4X5_IMR); \
356 }
357
358 #define DISABLE_IRQs {\
359 imr = inl(DE4X5_IMR);\
360 imr &= ~lp->irq_en;\
361 outl(imr, DE4X5_IMR); \
362 }
363
364 #define UNMASK_IRQs {\
365 imr |= lp->irq_mask;\
366 outl(imr, DE4X5_IMR); \
367 }
368
369 #define MASK_IRQs {\
370 imr = inl(DE4X5_IMR);\
371 imr &= ~lp->irq_mask;\
372 outl(imr, DE4X5_IMR); \
373 }
374
375
376
377
378 #define START_DE4X5 {\
379 omr = inl(DE4X5_OMR);\
380 omr |= OMR_ST | OMR_SR;\
381 outl(omr, DE4X5_OMR); \
382 }
383
384 #define STOP_DE4X5 {\
385 omr = inl(DE4X5_OMR);\
386 omr &= ~(OMR_ST|OMR_SR);\
387 outl(omr, DE4X5_OMR); \
388 }
389
390
391
392
393 #define RESET_SIA outl(0, DE4X5_SICR);
394
395
396
397
398 #define DE4X5_AUTOSENSE_MS 250
399
400
401
402
403 struct de4x5_srom {
404 char sub_vendor_id[2];
405 char sub_system_id[2];
406 char reserved[12];
407 char id_block_crc;
408 char reserved2;
409 char version;
410 char num_adapters;
411 char ieee_addr[6];
412 char info[100];
413 short chksum;
414 };
415
416
417
418
419
420
421
422
423
424 #define NUM_RX_DESC 8
425 #define NUM_TX_DESC 32
426 #define RX_BUFF_SZ 1536
427
428
429 struct de4x5_desc {
430 volatile s32 status;
431 u32 des1;
432 u32 buf;
433 u32 next;
434 DESC_ALIGN
435 };
436
437
438
439
440 #define DE4X5_PKT_STAT_SZ 16
441 #define DE4X5_PKT_BIN_SZ 128
442
443
444 struct de4x5_private {
445 char adapter_name[80];
446 struct de4x5_desc rx_ring[NUM_RX_DESC];
447 struct de4x5_desc tx_ring[NUM_TX_DESC];
448 struct sk_buff *skb[NUM_TX_DESC];
449 int rx_new, rx_old;
450 int tx_new, tx_old;
451 char setup_frame[SETUP_FRAME_LEN];
452 char frame[64];
453 struct enet_statistics stats;
454 struct {
455 u_int bins[DE4X5_PKT_STAT_SZ];
456 u_int unicast;
457 u_int multicast;
458 u_int broadcast;
459 u_int excessive_collisions;
460 u_int tx_underruns;
461 u_int excessive_underruns;
462 u_int rx_runt_frames;
463 u_int rx_collision;
464 u_int rx_dribble;
465 u_int rx_overflow;
466 } pktStats;
467 char rxRingSize;
468 char txRingSize;
469 int bus;
470 int bus_num;
471 int state;
472 int chipset;
473 s32 irq_mask;
474 s32 irq_en;
475 int media;
476 int c_media;
477 int linkOK;
478 int autosense;
479 int tx_enable;
480 int lostMedia;
481 int setup_f;
482 int local_state;
483 struct mii_phy phy[DE4X5_MAX_PHY];
484 int active;
485 int mii_cnt;
486 int timeout;
487 struct timer_list timer;
488 int tmp;
489 struct {
490 void *priv;
491 void *buf;
492 s32 csr0;
493 s32 csr6;
494 s32 csr7;
495 s32 csr13;
496 s32 csr14;
497 s32 csr15;
498 int save_cnt;
499 struct sk_buff *skb;
500 } cache;
501 };
502
503
504
505
506
507
508 static struct bus_type {
509 int bus;
510 int bus_num;
511 int device;
512 int chipset;
513 struct de4x5_srom srom;
514 int autosense;
515 } bus;
516
517
518
519
520
521
522
523
524 #define TX_BUFFS_AVAIL ((lp->tx_old<=lp->tx_new)?\
525 lp->tx_old+lp->txRingSize-lp->tx_new-1:\
526 lp->tx_old -lp->tx_new-1)
527
528
529
530
531 static int de4x5_open(struct device *dev);
532 static int de4x5_queue_pkt(struct sk_buff *skb, struct device *dev);
533 static void de4x5_interrupt(int irq, void *dev_id, struct pt_regs *regs);
534 static int de4x5_close(struct device *dev);
535 static struct enet_statistics *de4x5_get_stats(struct device *dev);
536 static void set_multicast_list(struct device *dev);
537 static int de4x5_ioctl(struct device *dev, struct ifreq *rq, int cmd);
538
539
540
541
542 static int de4x5_hw_init(struct device *dev, u_long iobase);
543 static int de4x5_init(struct device *dev);
544 static int de4x5_sw_reset(struct device *dev);
545 static int de4x5_rx(struct device *dev);
546 static int de4x5_tx(struct device *dev);
547 static int de4x5_ast(struct device *dev);
548
549 static int autoconf_media(struct device *dev);
550 static void create_packet(struct device *dev, char *frame, int len);
551 static void de4x5_us_delay(u32 usec);
552 static void de4x5_ms_delay(u32 msec);
553 static void load_packet(struct device *dev, char *buf, u32 flags, struct sk_buff *skb);
554 static int dc21040_autoconf(struct device *dev);
555 static int dc21041_autoconf(struct device *dev);
556 static int dc21140m_autoconf(struct device *dev);
557 static int de4x5_suspect_state(struct device *dev, int timeout, int prev_state, int (*fn)(struct device *, int), int (*asfn)(struct device *));
558 static int dc21040_state(struct device *dev, int csr13, int csr14, int csr15, int timeout, int next_state, int suspect_state, int (*fn)(struct device *, int));
559 static int test_media(struct device *dev, s32 irqs, s32 irq_mask, s32 csr13, s32 csr14, s32 csr15, s32 msec);
560
561 static int test_mii_reg(struct device *dev, int reg, int mask, int pol, long msec);
562 static int is_spd_100(struct device *dev);
563 static int is_100_up(struct device *dev);
564 static int is_10_up(struct device *dev);
565 static int is_anc_capable(struct device *dev);
566 static int ping_media(struct device *dev, int msec);
567 static void de4x5_save_skbs(struct device *dev);
568 static void de4x5_restore_skbs(struct device *dev);
569 static void de4x5_cache_state(struct device *dev, int flag);
570 static void de4x5_put_cache(struct device *dev, struct sk_buff *skb);
571 static void de4x5_putb_cache(struct device *dev, struct sk_buff *skb);
572 static struct sk_buff *de4x5_get_cache(struct device *dev);
573 static void de4x5_setup_intr(struct device *dev);
574 static void de4x5_init_connection(struct device *dev);
575 static int de4x5_reset_phy(struct device *dev);
576 static void reset_init_sia(struct device *dev, s32 sicr, s32 strr, s32 sigr);
577 static int test_ans(struct device *dev, s32 irqs, s32 irq_mask, s32 msec);
578 static int test_tp(struct device *dev, s32 msec);
579 static int EISA_signature(char *name, s32 eisa_id);
580 static int PCI_signature(char *name, struct bus_type *lp);
581 static void DevicePresent(u_long iobase);
582 static int de4x5_bad_srom(struct bus_type *lp);
583 static short srom_rd(u_long address, u_char offset);
584 static void srom_latch(u_int command, u_long address);
585 static void srom_command(u_int command, u_long address);
586 static void srom_address(u_int command, u_long address, u_char offset);
587 static short srom_data(u_int command, u_long address);
588
589 static void sendto_srom(u_int command, u_long addr);
590 static int getfrom_srom(u_long addr);
591 static int mii_rd(u_char phyreg, u_char phyaddr, u_long ioaddr);
592 static void mii_wr(int data, u_char phyreg, u_char phyaddr, u_long ioaddr);
593 static int mii_rdata(u_long ioaddr);
594 static void mii_wdata(int data, int len, u_long ioaddr);
595 static void mii_ta(u_long rw, u_long ioaddr);
596 static int mii_swap(int data, int len);
597 static void mii_address(u_char addr, u_long ioaddr);
598 static void sendto_mii(u32 command, int data, u_long ioaddr);
599 static int getfrom_mii(u32 command, u_long ioaddr);
600 static int mii_get_oui(u_char phyaddr, u_long ioaddr);
601 static int mii_get_phy(struct device *dev);
602 static void SetMulticastFilter(struct device *dev);
603 static int get_hw_addr(struct device *dev);
604
605 static void eisa_probe(struct device *dev, u_long iobase);
606 static void pci_probe(struct device *dev, u_long iobase);
607 static struct device *alloc_device(struct device *dev, u_long iobase);
608 static char *build_setup_frame(struct device *dev, int mode);
609 static void disable_ast(struct device *dev);
610 static void enable_ast(struct device *dev, u32 time_out);
611 static long de4x5_switch_to_srl(struct device *dev);
612 static long de4x5_switch_to_mii(struct device *dev);
613 static void timeout(struct device *dev, void (*fn)(u_long data), u_long data, u_long msec);
614 static void de4x5_dbg_open(struct device *dev);
615 static void de4x5_dbg_mii(struct device *dev, int k);
616 static void de4x5_dbg_media(struct device *dev);
617 static void de4x5_dbg_srom(struct de4x5_srom *p);
618 static int de4x5_strncmp(char *a, char *b, int n);
619
620 #ifdef MODULE
621 int init_module(void);
622 void cleanup_module(void);
623 static int autoprobed = 0, loading_module = 1;
624 # else
625 static int autoprobed = 0, loading_module = 0;
626 #endif
627
628 static char name[DE4X5_NAME_LENGTH + 1];
629 static int num_de4x5s = 0, num_eth = 0;
630
631
632
633
634 #define RESET_DE4X5 {\
635 int i;\
636 i=inl(DE4X5_BMR);\
637 de4x5_ms_delay(1);\
638 outl(i | BMR_SWR, DE4X5_BMR);\
639 de4x5_ms_delay(1);\
640 outl(i, DE4X5_BMR);\
641 de4x5_ms_delay(1);\
642 for (i=0;i<5;i++) {inl(DE4X5_BMR); de4x5_ms_delay(1);}\
643 de4x5_ms_delay(1);\
644 }
645
646
647
648
649
650
651
652 int de4x5_probe(struct device *dev)
653 {
654 int tmp = num_de4x5s, status = -ENODEV;
655 u_long iobase = dev->base_addr;
656
657 eisa_probe(dev, iobase);
658 pci_probe(dev, iobase);
659
660 if ((tmp == num_de4x5s) && (iobase != 0) && loading_module) {
661 printk("%s: de4x5_probe() cannot find device at 0x%04lx.\n", dev->name,
662 iobase);
663 }
664
665
666
667
668
669 for (; (dev->priv == NULL) && (dev->next != NULL); dev = dev->next);
670
671 if (dev->priv) status = 0;
672 if (iobase == 0) autoprobed = 1;
673
674 return status;
675 }
676
677 static int
678 de4x5_hw_init(struct device *dev, u_long iobase)
679 {
680 struct bus_type *lp = &bus;
681 int tmpbus, tmpchs, status=0;
682 int i, media = *((char *)&(lp->srom) + *((char *)&(lp->srom) + 19) * 3);
683 char *tmp;
684
685
686 if (lp->chipset == DC21041) {
687 outl(0, PCI_CFDA);
688 de4x5_ms_delay(10);
689 }
690
691 RESET_DE4X5;
692
693 if ((inl(DE4X5_STS) & (STS_TS | STS_RS)) != 0) {
694 return -ENXIO;
695 }
696
697
698
699
700 if (lp->bus == PCI) {
701 PCI_signature(name, lp);
702 } else {
703 EISA_signature(name, EISA_ID0);
704 }
705
706 if (*name == '\0') {
707 return -ENXIO;
708 }
709
710 dev->base_addr = iobase;
711 if (lp->bus == EISA) {
712 printk("%s: %s at %04lx (EISA slot %ld)",
713 dev->name, name, iobase, ((iobase>>12)&0x0f));
714 } else {
715 printk("%s: %s at %04lx (PCI bus %d, device %d)", dev->name, name,
716 iobase, lp->bus_num, lp->device);
717 }
718
719 printk(", h/w address ");
720 status = get_hw_addr(dev);
721 for (i = 0; i < ETH_ALEN - 1; i++) {
722 printk("%2.2x:", dev->dev_addr[i]);
723 }
724 printk("%2.2x,\n", dev->dev_addr[i]);
725
726 tmpbus = lp->bus;
727 tmpchs = lp->chipset;
728
729 if (status != 0) {
730 printk(" which has an Ethernet PROM CRC error.\n");
731 return -ENXIO;
732 } else {
733 struct de4x5_private *lp;
734
735
736
737
738
739 dev->priv = (void *) kmalloc(sizeof(struct de4x5_private) + ALIGN,
740 GFP_KERNEL);
741 if (dev->priv == NULL) {
742 return -ENOMEM;
743 }
744
745
746
747
748 tmp = dev->priv;
749 dev->priv = (void *)(((u_long)dev->priv + ALIGN) & ~ALIGN);
750 lp = (struct de4x5_private *)dev->priv;
751 memset(dev->priv, 0, sizeof(struct de4x5_private));
752 lp->bus = tmpbus;
753 lp->chipset = tmpchs;
754 lp->cache.priv = tmp;
755
756
757
758
759 if (media & MEDIA_MII) {
760 if (!mii_get_phy(dev)) {
761 printk("%s: MII search failed, no device found when one was expected\n", dev->name);
762 return -ENXIO;
763 }
764 } else {
765 mii_get_phy(dev);
766 }
767
768
769
770
771 if (de4x5_autosense & AUTO) {
772 lp->autosense = AUTO;
773 } else {
774 if (lp->chipset != DC21140) {
775 if ((lp->chipset == DC21040) && (de4x5_autosense & TP_NW)) {
776 de4x5_autosense = TP;
777 }
778 if ((lp->chipset == DC21041) && (de4x5_autosense & BNC_AUI)) {
779 de4x5_autosense = BNC;
780 }
781 lp->autosense = de4x5_autosense & 0x001f;
782 } else {
783 lp->autosense = de4x5_autosense & 0x00c0;
784 }
785 }
786
787 sprintf(lp->adapter_name,"%s (%s)", name, dev->name);
788
789
790
791
792 if ((tmp = (void *)kmalloc(RX_BUFF_SZ * NUM_RX_DESC + ALIGN,
793 GFP_KERNEL)) != NULL) {
794 lp->cache.buf = tmp;
795 tmp = (char *)(((u_long) tmp + ALIGN) & ~ALIGN);
796 for (i=0; i<NUM_RX_DESC; i++) {
797 lp->rx_ring[i].status = 0;
798 lp->rx_ring[i].des1 = RX_BUFF_SZ;
799 lp->rx_ring[i].buf = virt_to_bus(tmp + i * RX_BUFF_SZ);
800 lp->rx_ring[i].next = (u32)NULL;
801 }
802 barrier();
803
804 request_region(iobase, (lp->bus == PCI ? DE4X5_PCI_TOTAL_SIZE :
805 DE4X5_EISA_TOTAL_SIZE),
806 lp->adapter_name);
807
808 lp->rxRingSize = NUM_RX_DESC;
809 lp->txRingSize = NUM_TX_DESC;
810
811
812 lp->rx_ring[lp->rxRingSize - 1].des1 |= RD_RER;
813 lp->tx_ring[lp->txRingSize - 1].des1 |= TD_TER;
814
815
816 outl(virt_to_bus(lp->rx_ring), DE4X5_RRBA);
817 outl(virt_to_bus(lp->tx_ring), DE4X5_TRBA);
818
819
820 lp->irq_mask = IMR_RIM | IMR_TIM | IMR_TUM ;
821 lp->irq_en = IMR_NIM | IMR_AIM;
822
823
824 create_packet(dev, lp->frame, sizeof(lp->frame));
825
826
827 lp->state = CLOSED;
828
829 printk(" and requires IRQ%d (provided by %s).\n", dev->irq,
830 ((lp->bus == PCI) ? "PCI BIOS" : "EISA CNFG"));
831 } else {
832 printk("%s: Kernel could not allocate RX buffer memory.\n", dev->name);
833 }
834 if (status) {
835 release_region(iobase, (lp->bus == PCI ?
836 DE4X5_PCI_TOTAL_SIZE :
837 DE4X5_EISA_TOTAL_SIZE));
838 if (lp->rx_ring[0].buf) {
839 kfree(bus_to_virt(lp->rx_ring[0].buf));
840 }
841 kfree(dev->priv);
842 dev->priv = NULL;
843
844 return -ENXIO;
845 }
846 }
847
848 if (de4x5_debug > 0) {
849 printk(version);
850 }
851
852
853 dev->open = &de4x5_open;
854 dev->hard_start_xmit = &de4x5_queue_pkt;
855 dev->stop = &de4x5_close;
856 dev->get_stats = &de4x5_get_stats;
857 dev->set_multicast_list = &set_multicast_list;
858 dev->do_ioctl = &de4x5_ioctl;
859
860 dev->mem_start = 0;
861
862
863 ether_setup(dev);
864
865
866 if (lp->chipset == DC21041) {
867 outl(0, DE4X5_SICR);
868 outl(CFDA_PSM, PCI_CFDA);
869 }
870
871 return status;
872 }
873
874
875 static int
876 de4x5_open(struct device *dev)
877 {
878 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
879 u_long iobase = dev->base_addr;
880 int status = 0;
881 s32 omr;
882
883
884
885
886 if (lp->chipset == DC21041) {
887 outl(0, PCI_CFDA);
888 de4x5_ms_delay(10);
889 }
890
891 lp->state = OPEN;
892
893
894
895
896 status = de4x5_init(dev);
897
898 de4x5_dbg_open(dev);
899
900 if (request_irq(dev->irq, (void *)de4x5_interrupt, 0, lp->adapter_name, dev)) {
901 printk("de4x5_open(): Requested IRQ%d is busy\n",dev->irq);
902 status = -EAGAIN;
903 } else {
904 dev->tbusy = 0;
905 dev->start = 1;
906 dev->interrupt = UNMASK_INTERRUPTS;
907 dev->trans_start = jiffies;
908
909 START_DE4X5;
910
911 de4x5_setup_intr(dev);
912 }
913
914 if (de4x5_debug > 1) {
915 printk("\tsts: 0x%08x\n", inl(DE4X5_STS));
916 printk("\tbmr: 0x%08x\n", inl(DE4X5_BMR));
917 printk("\timr: 0x%08x\n", inl(DE4X5_IMR));
918 printk("\tomr: 0x%08x\n", inl(DE4X5_OMR));
919 printk("\tsisr: 0x%08x\n", inl(DE4X5_SISR));
920 printk("\tsicr: 0x%08x\n", inl(DE4X5_SICR));
921 printk("\tstrr: 0x%08x\n", inl(DE4X5_STRR));
922 printk("\tsigr: 0x%08x\n", inl(DE4X5_SIGR));
923 }
924
925 MOD_INC_USE_COUNT;
926
927 return status;
928 }
929
930
931
932
933
934
935
936
937
938 static int
939 de4x5_init(struct device *dev)
940 {
941
942 set_bit(0, (void *)&dev->tbusy);
943
944 de4x5_sw_reset(dev);
945
946
947 autoconf_media(dev);
948
949 return 0;
950 }
951
952 static int de4x5_sw_reset(struct device *dev)
953 {
954 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
955 u_long iobase = dev->base_addr;
956 int i, j, status = 0;
957 s32 bmr, omr;
958
959
960 if (lp->phy[lp->active].id == 0) {
961 de4x5_switch_to_srl(dev);
962 } else {
963 de4x5_switch_to_mii(dev);
964 }
965
966
967
968
969
970
971 bmr = (lp->chipset==DC21140 ? PBL_8 : PBL_4) | DESC_SKIP_LEN | CACHE_ALIGN;
972 outl(bmr, DE4X5_BMR);
973
974 omr = inl(DE4X5_OMR) & ~OMR_PR;
975 if (lp->chipset != DC21140) {
976 omr |= TR_96;
977 lp->setup_f = HASH_PERF;
978 } else {
979 omr |= OMR_SDP | OMR_SB | (!lp->phy[lp->active].id ? OMR_SF : 0);
980 lp->setup_f = PERFECT;
981 }
982 outl(virt_to_bus(lp->rx_ring), DE4X5_RRBA);
983 outl(virt_to_bus(lp->tx_ring), DE4X5_TRBA);
984
985 lp->rx_new = lp->rx_old = 0;
986 lp->tx_new = lp->tx_old = 0;
987
988 for (i = 0; i < lp->rxRingSize; i++) {
989 lp->rx_ring[i].status = R_OWN;
990 }
991
992 for (i = 0; i < lp->txRingSize; i++) {
993 lp->tx_ring[i].status = 0;
994 }
995
996 barrier();
997
998
999 SetMulticastFilter(dev);
1000
1001 if (lp->chipset != DC21140) {
1002 load_packet(dev, lp->setup_frame, HASH_F|TD_SET|SETUP_FRAME_LEN, NULL);
1003 } else {
1004 load_packet(dev, lp->setup_frame, PERFECT_F|TD_SET|SETUP_FRAME_LEN, NULL);
1005 }
1006 outl(omr|OMR_ST, DE4X5_OMR);
1007
1008
1009 sti();
1010 for (j=0, i=0;(i<500) && (j==0);i++) {
1011 udelay(1000);
1012 if (lp->tx_ring[lp->tx_new].status >= 0) j=1;
1013 }
1014 outl(omr, DE4X5_OMR);
1015
1016 if (j == 0) {
1017 printk("%s: Setup frame timed out, status %08x\n", dev->name,
1018 inl(DE4X5_STS));
1019 status = -EIO;
1020 }
1021
1022 lp->tx_new = (++lp->tx_new) % lp->txRingSize;
1023 lp->tx_old = lp->tx_new;
1024
1025 return status;
1026 }
1027
1028
1029
1030
1031 static int
1032 de4x5_queue_pkt(struct sk_buff *skb, struct device *dev)
1033 {
1034 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1035 u_long iobase = dev->base_addr;
1036 int status = 0;
1037
1038 if (skb == NULL) {
1039 dev_tint(dev);
1040 return 0;
1041 }
1042
1043 if (lp->tx_enable == NO) {
1044 de4x5_put_cache(dev, skb);
1045 return 0;
1046 }
1047
1048
1049
1050
1051
1052
1053 set_bit(0, (void*)&dev->tbusy);
1054 cli();
1055 de4x5_tx(dev);
1056 sti();
1057
1058
1059 if (dev->tbusy || lp->skb[lp->tx_new]) {
1060 if (dev->interrupt) {
1061 de4x5_putb_cache(dev, skb);
1062 } else {
1063 de4x5_put_cache(dev, skb);
1064 }
1065 if (de4x5_debug > 1) {
1066 printk("%s: transmit busy, lost media or stale skb found:\n STS:%08x\n tbusy:%ld\n lostMedia:%d\n IMR:%08x\n OMR:%08x\n Stale skb: %s\n",dev->name, inl(DE4X5_STS), dev->tbusy, lp->lostMedia, inl(DE4X5_IMR), inl(DE4X5_OMR), (lp->skb[lp->tx_new] ? "YES" : "NO"));
1067 }
1068 } else if (skb->len > 0) {
1069
1070 if (lp->cache.skb && !dev->interrupt) {
1071 de4x5_put_cache(dev, skb);
1072 skb = de4x5_get_cache(dev);
1073 }
1074
1075 while (skb && !dev->tbusy && !lp->skb[lp->tx_new]) {
1076 set_bit(0, (void*)&dev->tbusy);
1077 cli();
1078 if (TX_BUFFS_AVAIL) {
1079 load_packet(dev, skb->data,
1080 TD_IC | TD_LS | TD_FS | skb->len, skb);
1081 outl(POLL_DEMAND, DE4X5_TPD);
1082
1083 lp->tx_new = (++lp->tx_new) % lp->txRingSize;
1084 dev->trans_start = jiffies;
1085
1086 if (TX_BUFFS_AVAIL) {
1087 dev->tbusy = 0;
1088 }
1089 skb = de4x5_get_cache(dev);
1090 }
1091 sti();
1092 }
1093 if (skb && (dev->tbusy || lp->skb[lp->tx_new])) {
1094 de4x5_putb_cache(dev, skb);
1095 }
1096 }
1097
1098 return status;
1099 }
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112 static void
1113 de4x5_interrupt(int irq, void *dev_id, struct pt_regs *regs)
1114 {
1115 struct device *dev = (struct device *)dev_id;
1116 struct de4x5_private *lp;
1117 s32 imr, omr, sts;
1118 u_long iobase;
1119
1120 if (dev == NULL) {
1121 printk ("de4x5_interrupt(): irq %d for unknown device.\n", irq);
1122 return;
1123 }
1124 lp = (struct de4x5_private *)dev->priv;
1125 iobase = dev->base_addr;
1126
1127 if (dev->interrupt)
1128 printk("%s: Re-entering the interrupt handler.\n", dev->name);
1129
1130 DISABLE_IRQs;
1131 dev->interrupt = MASK_INTERRUPTS;
1132
1133 for (;;) {
1134 sts = inl(DE4X5_STS);
1135 outl(sts, DE4X5_STS);
1136
1137 if (!(sts & lp->irq_mask)) break;
1138
1139 if (sts & (STS_RI | STS_RU))
1140 de4x5_rx(dev);
1141
1142 if (sts & (STS_TI | STS_TU))
1143 de4x5_tx(dev);
1144
1145 if (sts & STS_LNF) {
1146 lp->lostMedia = LOST_MEDIA_THRESHOLD + 1;
1147 lp->irq_mask &= ~IMR_LFM;
1148 }
1149
1150 if (sts & STS_SE) {
1151 STOP_DE4X5;
1152 printk("%s: Fatal bus error occured, sts=%#8x, device stopped.\n",
1153 dev->name, sts);
1154 return;
1155 }
1156 }
1157
1158
1159 while (lp->cache.skb && !dev->tbusy && lp->tx_enable) {
1160 de4x5_queue_pkt(de4x5_get_cache(dev), dev);
1161 }
1162
1163 dev->interrupt = UNMASK_INTERRUPTS;
1164 ENABLE_IRQs;
1165
1166 return;
1167 }
1168
1169 static int
1170 de4x5_rx(struct device *dev)
1171 {
1172 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1173 int i, entry;
1174 s32 status;
1175 char *buf;
1176
1177 for (entry=lp->rx_new; lp->rx_ring[entry].status>=0;entry=lp->rx_new) {
1178 status = lp->rx_ring[entry].status;
1179
1180 if (status & RD_FS) {
1181 lp->rx_old = entry;
1182 }
1183
1184 if (status & RD_LS) {
1185 lp->linkOK++;
1186 if (status & RD_ES) {
1187 lp->stats.rx_errors++;
1188 if (status & (RD_RF | RD_TL)) lp->stats.rx_frame_errors++;
1189 if (status & RD_CE) lp->stats.rx_crc_errors++;
1190 if (status & RD_OF) lp->stats.rx_fifo_errors++;
1191 if (status & RD_TL) lp->stats.rx_length_errors++;
1192 if (status & RD_RF) lp->pktStats.rx_runt_frames++;
1193 if (status & RD_CS) lp->pktStats.rx_collision++;
1194 if (status & RD_DB) lp->pktStats.rx_dribble++;
1195 if (status & RD_OF) lp->pktStats.rx_overflow++;
1196 } else {
1197 struct sk_buff *skb;
1198 short pkt_len = (short)(lp->rx_ring[entry].status >> 16) - 4;
1199
1200 if ((skb = dev_alloc_skb(pkt_len+2)) == NULL) {
1201 printk("%s: Insufficient memory; nuking packet.\n",
1202 dev->name);
1203 lp->stats.rx_dropped++;
1204 break;
1205 }
1206
1207 skb->dev = dev;
1208 skb_reserve(skb,2);
1209 if (entry < lp->rx_old) {
1210 short len = (lp->rxRingSize - lp->rx_old) * RX_BUFF_SZ;
1211 memcpy(skb_put(skb,len), bus_to_virt(lp->rx_ring[lp->rx_old].buf), len);
1212 memcpy(skb_put(skb,pkt_len-len), bus_to_virt(lp->rx_ring[0].buf), pkt_len - len);
1213 } else {
1214 memcpy(skb_put(skb,pkt_len), bus_to_virt(lp->rx_ring[lp->rx_old].buf), pkt_len);
1215 }
1216
1217
1218 skb->protocol=eth_type_trans(skb,dev);
1219 netif_rx(skb);
1220
1221
1222 lp->stats.rx_packets++;
1223 for (i=1; i<DE4X5_PKT_STAT_SZ-1; i++) {
1224 if (pkt_len < (i*DE4X5_PKT_BIN_SZ)) {
1225 lp->pktStats.bins[i]++;
1226 i = DE4X5_PKT_STAT_SZ;
1227 }
1228 }
1229 buf = skb->data;
1230 if (buf[0] & 0x01) {
1231 if ((*(s32 *)&buf[0] == -1) && (*(s16 *)&buf[4] == -1)) {
1232 lp->pktStats.broadcast++;
1233 } else {
1234 lp->pktStats.multicast++;
1235 }
1236 } else if ((*(s32 *)&buf[0] == *(s32 *)&dev->dev_addr[0]) &&
1237 (*(s16 *)&buf[4] == *(s16 *)&dev->dev_addr[4])) {
1238 lp->pktStats.unicast++;
1239 }
1240
1241 lp->pktStats.bins[0]++;
1242 if (lp->pktStats.bins[0] == 0) {
1243 memset((char *)&lp->pktStats, 0, sizeof(lp->pktStats));
1244 }
1245 }
1246
1247
1248 for (;lp->rx_old!=entry;lp->rx_old=(++lp->rx_old)%lp->rxRingSize) {
1249 lp->rx_ring[lp->rx_old].status = R_OWN;
1250 barrier();
1251 }
1252 lp->rx_ring[entry].status = R_OWN;
1253 barrier();
1254 }
1255
1256
1257
1258
1259 lp->rx_new = (++lp->rx_new) % lp->rxRingSize;
1260 }
1261
1262 return 0;
1263 }
1264
1265
1266
1267
1268 static int
1269 de4x5_tx(struct device *dev)
1270 {
1271 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1272 u_long iobase = dev->base_addr;
1273 int entry;
1274 s32 status;
1275
1276 for (entry = lp->tx_old; entry != lp->tx_new; entry = lp->tx_old) {
1277 status = lp->tx_ring[entry].status;
1278 if (status < 0) {
1279 break;
1280 } else if (status & TD_ES) {
1281 lp->stats.tx_errors++;
1282 if (status & TD_NC) lp->stats.tx_carrier_errors++;
1283 if (status & TD_LC) lp->stats.tx_window_errors++;
1284 if (status & TD_UF) lp->stats.tx_fifo_errors++;
1285 if (status & TD_LC) lp->stats.collisions++;
1286 if (status & TD_EC) lp->pktStats.excessive_collisions++;
1287 if (status & TD_DE) lp->stats.tx_aborted_errors++;
1288
1289 if ((status != 0x7fffffff) &&
1290 (status & (TD_LO | TD_NC | TD_EC | TD_LF))) {
1291 lp->lostMedia++;
1292 } else {
1293 outl(POLL_DEMAND, DE4X5_TPD);
1294 }
1295 } else {
1296 lp->stats.tx_packets++;
1297 lp->lostMedia = 0;
1298 }
1299
1300 if (lp->skb[entry] != NULL) {
1301 dev_kfree_skb(lp->skb[entry], FREE_WRITE);
1302 lp->skb[entry] = NULL;
1303 }
1304
1305
1306 lp->tx_old = (++lp->tx_old) % lp->txRingSize;
1307 }
1308
1309 if (TX_BUFFS_AVAIL && dev->tbusy) {
1310 dev->tbusy = 0;
1311 if (dev->interrupt) mark_bh(NET_BH);
1312 }
1313
1314 return 0;
1315 }
1316
1317 static int
1318 de4x5_ast(struct device *dev)
1319 {
1320 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1321 int next_tick = DE4X5_AUTOSENSE_MS;
1322
1323 disable_ast(dev);
1324
1325 if (lp->chipset == DC21140) {
1326 next_tick = dc21140m_autoconf(dev);
1327 } else if (lp->chipset == DC21041) {
1328 next_tick = dc21041_autoconf(dev);
1329 } else if (lp->chipset == DC21040) {
1330 next_tick = dc21040_autoconf(dev);
1331 }
1332 lp->linkOK = 0;
1333 enable_ast(dev, next_tick);
1334
1335 return 0;
1336 }
1337
1338 static int
1339 de4x5_close(struct device *dev)
1340 {
1341 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1342 u_long iobase = dev->base_addr;
1343 s32 imr, omr;
1344
1345 disable_ast(dev);
1346 dev->start = 0;
1347 dev->tbusy = 1;
1348
1349 if (de4x5_debug > 1) {
1350 printk("%s: Shutting down ethercard, status was %8.8x.\n",
1351 dev->name, inl(DE4X5_STS));
1352 }
1353
1354
1355
1356
1357 DISABLE_IRQs;
1358
1359 STOP_DE4X5;
1360
1361
1362
1363
1364 free_irq(dev->irq, dev);
1365 lp->state = CLOSED;
1366
1367 MOD_DEC_USE_COUNT;
1368
1369
1370 if (lp->chipset == DC21041) {
1371 outl(0, DE4X5_SICR);
1372 outl(CFDA_PSM, PCI_CFDA);
1373 }
1374
1375 return 0;
1376 }
1377
1378 static struct enet_statistics *
1379 de4x5_get_stats(struct device *dev)
1380 {
1381 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1382 u_long iobase = dev->base_addr;
1383
1384 lp->stats.rx_missed_errors = (int)(inl(DE4X5_MFC) & (MFC_OVFL | MFC_CNTR));
1385
1386 return &lp->stats;
1387 }
1388
1389 static void load_packet(struct device *dev, char *buf, u32 flags, struct sk_buff *skb)
1390 {
1391 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1392
1393 lp->tx_ring[lp->tx_new].buf = virt_to_bus(buf);
1394 lp->tx_ring[lp->tx_new].des1 &= TD_TER;
1395 lp->tx_ring[lp->tx_new].des1 |= flags;
1396 lp->skb[lp->tx_new] = skb;
1397 barrier();
1398 lp->tx_ring[lp->tx_new].status = T_OWN;
1399 barrier();
1400
1401 return;
1402 }
1403
1404
1405
1406
1407 static void
1408 set_multicast_list(struct device *dev)
1409 {
1410 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1411 u_long iobase = dev->base_addr;
1412
1413
1414 if (lp->state == OPEN) {
1415 if (dev->flags & IFF_PROMISC) {
1416 u32 omr;
1417 omr = inl(DE4X5_OMR);
1418 omr |= OMR_PR;
1419 outl(omr, DE4X5_OMR);
1420 } else {
1421 SetMulticastFilter(dev);
1422 if (lp->setup_f == HASH_PERF) {
1423 load_packet(dev, lp->setup_frame, TD_IC | HASH_F | TD_SET |
1424 SETUP_FRAME_LEN, NULL);
1425 } else {
1426 load_packet(dev, lp->setup_frame, TD_IC | PERFECT_F | TD_SET |
1427 SETUP_FRAME_LEN, NULL);
1428 }
1429
1430 lp->tx_new = (++lp->tx_new) % lp->txRingSize;
1431 outl(POLL_DEMAND, DE4X5_TPD);
1432 dev->trans_start = jiffies;
1433 }
1434 }
1435
1436 return;
1437 }
1438
1439
1440
1441
1442
1443
1444 static void SetMulticastFilter(struct device *dev)
1445 {
1446 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1447 struct dev_mc_list *dmi=dev->mc_list;
1448 u_long iobase = dev->base_addr;
1449 int i, j, bit, byte;
1450 u16 hashcode;
1451 u32 omr, crc, poly = CRC_POLYNOMIAL_LE;
1452 char *pa;
1453 unsigned char *addrs;
1454
1455 omr = inl(DE4X5_OMR);
1456 omr &= ~(OMR_PR | OMR_PM);
1457 pa = build_setup_frame(dev, ALL);
1458
1459 if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 14)) {
1460 omr |= OMR_PM;
1461 } else if (lp->setup_f == HASH_PERF) {
1462 for (i=0;i<dev->mc_count;i++) {
1463 addrs=dmi->dmi_addr;
1464 dmi=dmi->next;
1465 if ((*addrs & 0x01) == 1) {
1466 crc = 0xffffffff;
1467 for (byte=0;byte<ETH_ALEN;byte++) {
1468
1469 for (bit = *addrs++,j=0;j<8;j++, bit>>=1) {
1470 crc = (crc >> 1) ^ (((crc ^ bit) & 0x01) ? poly : 0);
1471 }
1472 }
1473 hashcode = crc & HASH_BITS;
1474
1475 byte = hashcode >> 3;
1476 bit = 1 << (hashcode & 0x07);
1477
1478 byte <<= 1;
1479 if (byte & 0x02) {
1480 byte -= 1;
1481 }
1482 lp->setup_frame[byte] |= bit;
1483 }
1484 }
1485 } else {
1486 for (j=0; j<dev->mc_count; j++) {
1487 addrs=dmi->dmi_addr;
1488 dmi=dmi->next;
1489 for (i=0; i<ETH_ALEN; i++) {
1490 *(pa + (i&1)) = *addrs++;
1491 if (i & 0x01) pa += 4;
1492 }
1493 }
1494 }
1495 outl(omr, DE4X5_OMR);
1496
1497 return;
1498 }
1499
1500
1501
1502
1503
1504 static void eisa_probe(struct device *dev, u_long ioaddr)
1505 {
1506 int i, maxSlots, status;
1507 u_short vendor, device;
1508 s32 cfid;
1509 u_long iobase;
1510 struct bus_type *lp = &bus;
1511 char name[DE4X5_STRLEN];
1512
1513 if (!ioaddr && autoprobed) return;
1514
1515 lp->bus = EISA;
1516
1517 if (ioaddr == 0) {
1518 iobase = EISA_SLOT_INC;
1519 i = 1;
1520 maxSlots = MAX_EISA_SLOTS;
1521 } else {
1522 iobase = ioaddr;
1523 i = (ioaddr >> 12);
1524 maxSlots = i + 1;
1525 }
1526
1527 for (status = -ENODEV; (i<maxSlots) && (dev!=NULL); i++, iobase+=EISA_SLOT_INC) {
1528 if (EISA_signature(name, EISA_ID)) {
1529 cfid = inl(PCI_CFID);
1530 device = (u_short)(cfid >> 16);
1531 vendor = (u_short) cfid;
1532
1533 lp->chipset = device;
1534 DevicePresent(EISA_APROM);
1535
1536 outl(PCI_COMMAND_IO | PCI_COMMAND_MASTER, PCI_CFCS);
1537 outl(0x00004000, PCI_CFLT);
1538 outl(iobase, PCI_CBIO);
1539
1540 if (check_region(iobase, DE4X5_EISA_TOTAL_SIZE) == 0) {
1541 if ((dev = alloc_device(dev, iobase)) != NULL) {
1542 if ((status = de4x5_hw_init(dev, iobase)) == 0) {
1543 num_de4x5s++;
1544 }
1545 num_eth++;
1546 }
1547 } else if (autoprobed) {
1548 printk("%s: region already allocated at 0x%04lx.\n", dev->name,iobase);
1549 }
1550 }
1551 }
1552
1553 return;
1554 }
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568 #define PCI_DEVICE (dev_num << 3)
1569 #define PCI_LAST_DEV 32
1570
1571 static void pci_probe(struct device *dev, u_long ioaddr)
1572 {
1573 u_char irq;
1574 u_char pb, pbus, dev_num, dnum, dev_fn;
1575 u_short vendor, device, index, status;
1576 u_int class = DE4X5_CLASS_CODE;
1577 u_int iobase;
1578 struct bus_type *lp = &bus;
1579
1580 if (!ioaddr && autoprobed) return;
1581
1582 if (!pcibios_present()) return;
1583
1584 lp->bus = PCI;
1585
1586 if (ioaddr < 0x1000) {
1587 pbus = (u_short)(ioaddr >> 8);
1588 dnum = (u_short)(ioaddr & 0xff);
1589 } else {
1590 pbus = 0;
1591 dnum = 0;
1592 }
1593
1594 for (index=0;
1595 (pcibios_find_class(class, index, &pb, &dev_fn)!= PCIBIOS_DEVICE_NOT_FOUND);
1596 index++) {
1597 dev_num = PCI_SLOT(dev_fn);
1598
1599 if ((!pbus && !dnum) || ((pbus == pb) && (dnum == dev_num))) {
1600 pcibios_read_config_word(pb, PCI_DEVICE, PCI_VENDOR_ID, &vendor);
1601 pcibios_read_config_word(pb, PCI_DEVICE, PCI_DEVICE_ID, &device);
1602 if (!(is_DC21040 || is_DC21041 || is_DC21140)) continue;
1603
1604
1605 lp->device = dev_num;
1606 lp->bus_num = pb;
1607
1608
1609 lp->chipset = device;
1610
1611
1612 pcibios_read_config_dword(pb, PCI_DEVICE, PCI_BASE_ADDRESS_0, &iobase);
1613 iobase &= CBIO_MASK;
1614
1615
1616 pcibios_read_config_byte(pb, PCI_DEVICE, PCI_INTERRUPT_LINE, &irq);
1617 if ((irq == 0) || (irq == (u_char) 0xff)) continue;
1618
1619
1620 pcibios_read_config_word(pb, PCI_DEVICE, PCI_COMMAND, &status);
1621 if (!(status & PCI_COMMAND_IO)) continue;
1622
1623 if (!(status & PCI_COMMAND_MASTER)) {
1624 status |= PCI_COMMAND_MASTER;
1625 pcibios_write_config_word(pb, PCI_DEVICE, PCI_COMMAND, status);
1626 pcibios_read_config_word(pb, PCI_DEVICE, PCI_COMMAND, &status);
1627 }
1628 if (!(status & PCI_COMMAND_MASTER)) continue;
1629
1630 DevicePresent(DE4X5_APROM);
1631 if (check_region(iobase, DE4X5_PCI_TOTAL_SIZE) == 0) {
1632 if ((dev = alloc_device(dev, iobase)) != NULL) {
1633 dev->irq = irq;
1634 if ((status = de4x5_hw_init(dev, iobase)) == 0) {
1635 num_de4x5s++;
1636 }
1637 num_eth++;
1638 }
1639 } else if (autoprobed) {
1640 printk("%s: region already allocated at 0x%04x.\n", dev->name,
1641 (u_short)iobase);
1642 }
1643 }
1644 }
1645
1646 return;
1647 }
1648
1649
1650
1651
1652
1653 static struct device *alloc_device(struct device *dev, u_long iobase)
1654 {
1655 int addAutoProbe = 0;
1656 struct device *tmp = NULL, *ret;
1657 int (*init)(struct device *) = NULL;
1658
1659 if (loading_module) return dev;
1660
1661
1662
1663
1664 while (dev->next != NULL) {
1665 if ((dev->base_addr == DE4X5_NDA) || (dev->base_addr == 0)) break;
1666 dev = dev->next;
1667 num_eth++;
1668 }
1669
1670
1671
1672
1673
1674 if ((dev->base_addr == 0) && (num_de4x5s > 0)) {
1675 addAutoProbe++;
1676 tmp = dev->next;
1677 init = dev->init;
1678 }
1679
1680
1681
1682
1683
1684 if ((dev->next == NULL) &&
1685 !((dev->base_addr == DE4X5_NDA) || (dev->base_addr == 0))) {
1686 dev->next = (struct device *)kmalloc(sizeof(struct device)+8, GFP_KERNEL);
1687 dev = dev->next;
1688 if (dev == NULL) {
1689 printk("eth%d: Device not initialised, insufficient memory\n", num_eth);
1690 } else {
1691
1692
1693
1694
1695
1696 dev->name = (char *)(dev + sizeof(struct device));
1697 if (num_eth > 9999) {
1698 sprintf(dev->name,"eth????");
1699 } else {
1700 sprintf(dev->name,"eth%d", num_eth);
1701 }
1702 dev->base_addr = iobase;
1703 dev->next = NULL;
1704 dev->init = &de4x5_probe;
1705 num_de4x5s++;
1706 }
1707 }
1708 ret = dev;
1709
1710
1711
1712
1713
1714 if (ret != NULL) {
1715 if (addAutoProbe) {
1716 for (; (tmp->next!=NULL) && (tmp->base_addr!=DE4X5_NDA); tmp=tmp->next);
1717
1718
1719
1720
1721
1722 if ((tmp->next == NULL) && !(tmp->base_addr == DE4X5_NDA)) {
1723 tmp->next = (struct device *)kmalloc(sizeof(struct device) + 8,
1724 GFP_KERNEL);
1725 tmp = tmp->next;
1726 if (tmp == NULL) {
1727 printk("%s: Insufficient memory to extend the device list.\n",
1728 dev->name);
1729 } else {
1730
1731
1732
1733
1734
1735 tmp->name = (char *)(tmp + sizeof(struct device));
1736 if (num_eth > 9999) {
1737 sprintf(tmp->name,"eth????");
1738 } else {
1739 sprintf(tmp->name,"eth%d", num_eth);
1740 }
1741 tmp->base_addr = 0;
1742 tmp->next = NULL;
1743 tmp->init = init;
1744 }
1745 } else {
1746 tmp->base_addr = 0;
1747 }
1748 }
1749 }
1750
1751 return ret;
1752 }
1753
1754
1755
1756
1757
1758
1759
1760
1761 static int autoconf_media(struct device *dev)
1762 {
1763 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1764 u_long iobase = dev->base_addr;
1765 int next_tick = DE4X5_AUTOSENSE_MS;;
1766
1767 lp->linkOK = 0;
1768 lp->c_media = AUTO;
1769 disable_ast(dev);
1770 inl(DE4X5_MFC);
1771 lp->media = INIT;
1772 if (lp->chipset == DC21040) {
1773 next_tick = dc21040_autoconf(dev);
1774 } else if (lp->chipset == DC21041) {
1775 next_tick = dc21041_autoconf(dev);
1776 } else if (lp->chipset == DC21140) {
1777 next_tick = dc21140m_autoconf(dev);
1778 }
1779 if (lp->autosense == AUTO) enable_ast(dev, next_tick);
1780
1781 return (lp->media);
1782 }
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796 static int dc21040_autoconf(struct device *dev)
1797 {
1798 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1799 u_long iobase = dev->base_addr;
1800 int next_tick = DE4X5_AUTOSENSE_MS;
1801 s32 imr;
1802
1803 switch (lp->media) {
1804 case INIT:
1805 DISABLE_IRQs;
1806 lp->tx_enable = NO;
1807 lp->timeout = -1;
1808 de4x5_save_skbs(dev);
1809 if ((lp->autosense == AUTO) || (lp->autosense == TP)) {
1810 lp->media = TP;
1811 } else if ((lp->autosense == BNC) || (lp->autosense == AUI) || (lp->autosense == BNC_AUI)) {
1812 lp->media = BNC_AUI;
1813 } else if (lp->autosense == EXT_SIA) {
1814 lp->media = EXT_SIA;
1815 } else {
1816 lp->media = NC;
1817 }
1818 lp->local_state = 0;
1819 next_tick = dc21040_autoconf(dev);
1820 break;
1821
1822 case TP:
1823 dc21040_state(dev, 0x8f01, 0xffff, 0x0000, 3000, BNC_AUI,
1824 TP_SUSPECT, test_tp);
1825 break;
1826
1827 case TP_SUSPECT:
1828 de4x5_suspect_state(dev, 1000, TP, test_tp, dc21040_autoconf);
1829 break;
1830
1831 case BNC:
1832 case AUI:
1833 case BNC_AUI:
1834 dc21040_state(dev, 0x8f09, 0x0705, 0x0006, 3000, EXT_SIA,
1835 BNC_AUI_SUSPECT, ping_media);
1836 break;
1837
1838 case BNC_AUI_SUSPECT:
1839 de4x5_suspect_state(dev, 1000, BNC_AUI, ping_media, dc21040_autoconf);
1840 break;
1841
1842 case EXT_SIA:
1843 dc21040_state(dev, 0x3041, 0x0000, 0x0006, 3000,
1844 NC, EXT_SIA_SUSPECT, ping_media);
1845 break;
1846
1847 case EXT_SIA_SUSPECT:
1848 de4x5_suspect_state(dev, 1000, EXT_SIA, ping_media, dc21040_autoconf);
1849 break;
1850
1851 case NC:
1852 #ifndef __alpha__
1853 reset_init_sia(dev, 0x8f01, 0xffff, 0x0000);
1854 #else
1855
1856 reset_init_sia(dev, 0x8f09, 0x0705, 0x0006);
1857 #endif
1858 de4x5_dbg_media(dev);
1859 lp->media = INIT;
1860 lp->tx_enable = NO;
1861 break;
1862 }
1863
1864 return next_tick;
1865 }
1866
1867 static int
1868 dc21040_state(struct device *dev, int csr13, int csr14, int csr15, int timeout,
1869 int next_state, int suspect_state,
1870 int (*fn)(struct device *, int))
1871 {
1872 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1873 int next_tick = DE4X5_AUTOSENSE_MS;
1874 int linkBad;
1875
1876 switch (lp->local_state) {
1877 case 0:
1878 reset_init_sia(dev, csr13, csr14, csr15);
1879 lp->local_state++;
1880 next_tick = 500;
1881 break;
1882
1883 case 1:
1884 if (!lp->tx_enable) {
1885 linkBad = fn(dev, timeout);
1886 if (linkBad < 0) {
1887 next_tick = linkBad & ~TIMER_CB;
1888 } else {
1889 if (linkBad && (lp->autosense == AUTO)) {
1890 lp->local_state = 0;
1891 lp->media = next_state;
1892 } else {
1893 de4x5_init_connection(dev);
1894 }
1895 }
1896 } else if (!lp->linkOK && (lp->autosense == AUTO)) {
1897 lp->media = suspect_state;
1898 next_tick = 3000;
1899 }
1900 break;
1901 }
1902
1903 return next_tick;
1904 }
1905
1906 static int
1907 de4x5_suspect_state(struct device *dev, int timeout, int prev_state,
1908 int (*fn)(struct device *, int),
1909 int (*asfn)(struct device *))
1910 {
1911 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1912 int next_tick = DE4X5_AUTOSENSE_MS;
1913 int linkBad;
1914
1915 switch (lp->local_state) {
1916 case 1:
1917 if (lp->linkOK && !LOST_MEDIA) {
1918 lp->media = prev_state;
1919 } else {
1920 lp->local_state++;
1921 next_tick = asfn(dev);
1922 }
1923 break;
1924
1925 case 2:
1926 linkBad = fn(dev, timeout);
1927 if (linkBad < 0) {
1928 next_tick = linkBad & ~TIMER_CB;
1929 } else if (!linkBad) {
1930 lp->local_state--;
1931 lp->media = prev_state;
1932 } else {
1933 lp->media = INIT;
1934 }
1935 }
1936
1937 return next_tick;
1938 }
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949 static int dc21041_autoconf(struct device *dev)
1950 {
1951 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1952 u_long iobase = dev->base_addr;
1953 s32 sts, irqs, irq_mask, imr, omr;
1954 int next_tick = DE4X5_AUTOSENSE_MS;
1955
1956 switch (lp->media) {
1957 case INIT:
1958 DISABLE_IRQs;
1959 lp->tx_enable = NO;
1960 lp->timeout = -1;
1961 de4x5_save_skbs(dev);
1962 if ((lp->autosense == AUTO) || (lp->autosense == TP_NW)) {
1963 lp->media = TP;
1964 } else if (lp->autosense == TP) {
1965 lp->media = TP;
1966 } else if (lp->autosense == BNC) {
1967 lp->media = BNC;
1968 } else if (lp->autosense == AUI) {
1969 lp->media = AUI;
1970 } else {
1971 lp->media = NC;
1972 }
1973 lp->local_state = 0;
1974 next_tick = dc21041_autoconf(dev);
1975 break;
1976
1977 case TP_NW:
1978 if (lp->timeout < 0) {
1979 omr = inl(DE4X5_OMR);
1980 outl(omr | OMR_FD, DE4X5_OMR);
1981 }
1982 irqs = STS_LNF | STS_LNP;
1983 irq_mask = IMR_LFM | IMR_LPM;
1984 sts = test_media(dev, irqs, irq_mask, 0xef01, 0xffff, 0x0008, 2400);
1985 if (sts < 0) {
1986 next_tick = sts & ~TIMER_CB;
1987 } else {
1988 if (sts & STS_LNP) {
1989 lp->media = ANS;
1990 } else {
1991 lp->media = AUI;
1992 }
1993 next_tick = dc21041_autoconf(dev);
1994 }
1995 break;
1996
1997 case ANS:
1998 if (!lp->tx_enable) {
1999 irqs = STS_LNP;
2000 irq_mask = IMR_LPM;
2001 sts = test_ans(dev, irqs, irq_mask, 3000);
2002 if (sts < 0) {
2003 next_tick = sts & ~TIMER_CB;
2004 } else {
2005 if (!(sts & STS_LNP) && (lp->autosense == AUTO)) {
2006 lp->media = TP;
2007 next_tick = dc21041_autoconf(dev);
2008 } else {
2009 de4x5_init_connection(dev);
2010 }
2011 }
2012 } else if (!lp->linkOK && (lp->autosense == AUTO)) {
2013 lp->media = ANS_SUSPECT;
2014 next_tick = 3000;
2015 }
2016 break;
2017
2018 case ANS_SUSPECT:
2019 de4x5_suspect_state(dev, 1000, ANS, test_tp, dc21041_autoconf);
2020 break;
2021
2022 case TP:
2023 if (!lp->tx_enable) {
2024 if (lp->timeout < 0) {
2025 omr = inl(DE4X5_OMR);
2026 outl(omr & ~OMR_FD, DE4X5_OMR);
2027 }
2028 irqs = STS_LNF | STS_LNP;
2029 irq_mask = IMR_LFM | IMR_LPM;
2030 sts = test_media(dev,irqs, irq_mask, 0xef01, 0xff3f, 0x0008, 2400);
2031 if (sts < 0) {
2032 next_tick = sts & ~TIMER_CB;
2033 } else {
2034 if (!(sts & STS_LNP) && (lp->autosense == AUTO)) {
2035 if (inl(DE4X5_SISR) & SISR_NRA) {
2036 lp->media = AUI;
2037 } else {
2038 lp->media = BNC;
2039 }
2040 next_tick = dc21041_autoconf(dev);
2041 } else {
2042 de4x5_init_connection(dev);
2043 }
2044 }
2045 } else if (!lp->linkOK && (lp->autosense == AUTO)) {
2046 lp->media = TP_SUSPECT;
2047 next_tick = 3000;
2048 }
2049 break;
2050
2051 case TP_SUSPECT:
2052 de4x5_suspect_state(dev, 1000, TP, test_tp, dc21041_autoconf);
2053 break;
2054
2055 case AUI:
2056 if (!lp->tx_enable) {
2057 if (lp->timeout < 0) {
2058 omr = inl(DE4X5_OMR);
2059 outl(omr & ~OMR_FD, DE4X5_OMR);
2060 }
2061 irqs = 0;
2062 irq_mask = 0;
2063 sts = test_media(dev,irqs, irq_mask, 0xef09, 0xf7fd, 0x000e, 1000);
2064 if (sts < 0) {
2065 next_tick = sts & ~TIMER_CB;
2066 } else {
2067 if (!(inl(DE4X5_SISR) & SISR_SRA) && (lp->autosense == AUTO)) {
2068 lp->media = BNC;
2069 next_tick = dc21041_autoconf(dev);
2070 } else {
2071 de4x5_init_connection(dev);
2072 }
2073 }
2074 } else if (!lp->linkOK && (lp->autosense == AUTO)) {
2075 lp->media = AUI_SUSPECT;
2076 next_tick = 3000;
2077 }
2078 break;
2079
2080 case AUI_SUSPECT:
2081 de4x5_suspect_state(dev, 1000, AUI, ping_media, dc21041_autoconf);
2082 break;
2083
2084 case BNC:
2085 switch (lp->local_state) {
2086 case 0:
2087 if (lp->timeout < 0) {
2088 omr = inl(DE4X5_OMR);
2089 outl(omr & ~OMR_FD, DE4X5_OMR);
2090 }
2091 irqs = 0;
2092 irq_mask = 0;
2093 sts = test_media(dev,irqs, irq_mask, 0xef09, 0xf7fd, 0x0006, 1000);
2094 if (sts < 0) {
2095 next_tick = sts & ~TIMER_CB;
2096 } else {
2097 if (!(inl(DE4X5_SISR) & SISR_SRA) && (lp->autosense == AUTO)) {
2098 lp->media = NC;
2099 } else {
2100 lp->local_state++;
2101 next_tick = dc21041_autoconf(dev);
2102 }
2103 }
2104 break;
2105
2106 case 1:
2107 if (!lp->tx_enable) {
2108 if ((sts = ping_media(dev, 3000)) < 0) {
2109 next_tick = sts & ~TIMER_CB;
2110 } else {
2111 if (sts) {
2112 lp->local_state = 0;
2113 lp->media = NC;
2114 } else {
2115 de4x5_init_connection(dev);
2116 }
2117 }
2118 } else if (!lp->linkOK && (lp->autosense == AUTO)) {
2119 lp->media = BNC_SUSPECT;
2120 next_tick = 3000;
2121 }
2122 break;
2123 }
2124 break;
2125
2126 case BNC_SUSPECT:
2127 de4x5_suspect_state(dev, 1000, BNC, ping_media, dc21041_autoconf);
2128 break;
2129
2130 case NC:
2131 omr = inl(DE4X5_OMR);
2132 outl(omr | OMR_FD, DE4X5_OMR);
2133 reset_init_sia(dev, 0xef01, 0xffff, 0x0008);
2134 de4x5_dbg_media(dev);
2135 lp->media = INIT;
2136 lp->tx_enable = NO;
2137 break;
2138 }
2139
2140 return next_tick;
2141 }
2142
2143 static int dc21140m_autoconf(struct device *dev)
2144 {
2145 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2146 int ana, anlpa, cap, cr, sr, iobase = dev->base_addr;
2147 int next_tick = DE4X5_AUTOSENSE_MS;
2148 u_long imr, omr;
2149
2150 switch(lp->media) {
2151 case INIT:
2152 DISABLE_IRQs;
2153 lp->tx_enable = FALSE;
2154 lp->timeout = -1;
2155 if ((next_tick = de4x5_reset_phy(dev)) < 0) {
2156 next_tick &= ~TIMER_CB;
2157 } else {
2158 de4x5_save_skbs(dev);
2159 SET_10Mb;
2160 if (lp->autosense == _100Mb) {
2161 lp->media = _100Mb;
2162 } else if (lp->autosense == _10Mb) {
2163 lp->media = _10Mb;
2164 } else if ((lp->autosense == AUTO) && (sr=is_anc_capable(dev))) {
2165 ana = (((sr >> 6) & MII_ANA_TAF) | MII_ANA_CSMA);
2166 ana &= (de4x5_full_duplex ? ~0 : ~MII_ANA_FDAM);
2167 mii_wr(ana, MII_ANA, lp->phy[lp->active].addr, DE4X5_MII);
2168 lp->media = ANS;
2169 } else if (lp->autosense == AUTO) {
2170 lp->media = SPD_DET;
2171 } else if (is_spd_100(dev) && is_100_up(dev)) {
2172 lp->media = _100Mb;
2173 } else {
2174 lp->media = NC;
2175 }
2176 lp->local_state = 0;
2177 next_tick = dc21140m_autoconf(dev);
2178 }
2179 break;
2180
2181 case ANS:
2182 switch (lp->local_state) {
2183 case 0:
2184 if (lp->timeout < 0) {
2185 mii_wr(MII_CR_ASSE | MII_CR_RAN, MII_CR, lp->phy[lp->active].addr, DE4X5_MII);
2186 }
2187 cr = test_mii_reg(dev, MII_CR, MII_CR_RAN, FALSE, 500);
2188 if (cr < 0) {
2189 next_tick = cr & ~TIMER_CB;
2190 } else {
2191 if (cr) {
2192 lp->local_state = 0;
2193 lp->media = SPD_DET;
2194 } else {
2195 lp->local_state++;
2196 }
2197 next_tick = dc21140m_autoconf(dev);
2198 }
2199 break;
2200
2201 case 1:
2202 if ((sr=test_mii_reg(dev, MII_SR, MII_SR_ASSC, TRUE, 3000)) < 0) {
2203 next_tick = sr & ~TIMER_CB;
2204 } else {
2205 lp->media = SPD_DET;
2206 lp->local_state = 0;
2207 if (sr) {
2208 anlpa = mii_rd(MII_ANLPA, lp->phy[lp->active].addr, DE4X5_MII);
2209 ana = mii_rd(MII_ANA, lp->phy[lp->active].addr, DE4X5_MII);
2210 if ((anlpa & MII_ANLPA_ACK) && !(anlpa & MII_ANLPA_RF) &&
2211 (cap = anlpa & MII_ANLPA_TAF & ana)) {
2212 if (cap & MII_ANA_100M) {
2213 de4x5_full_duplex = ((ana & anlpa & MII_ANA_FDAM & MII_ANA_100M) ? TRUE : FALSE);
2214 lp->media = _100Mb;
2215 } else if (cap & MII_ANA_10M) {
2216 de4x5_full_duplex = ((ana & anlpa & MII_ANA_FDAM & MII_ANA_10M) ? TRUE : FALSE);
2217 lp->media = _10Mb;
2218 }
2219 }
2220 }
2221 next_tick = dc21140m_autoconf(dev);
2222 }
2223 break;
2224 }
2225 break;
2226
2227 case SPD_DET:
2228 if (!lp->phy[lp->active].id) {
2229 outl(GEP_FDXD | GEP_MODE, DE4X5_GEP);
2230 }
2231 if (is_spd_100(dev) && is_100_up(dev)) {
2232 lp->media = _100Mb;
2233 } else if (!is_spd_100(dev) && is_10_up(dev)) {
2234 lp->media = _10Mb;
2235 } else {
2236 lp->media = NC;
2237 }
2238 next_tick = dc21140m_autoconf(dev);
2239 break;
2240
2241 case _100Mb:
2242 next_tick = 3000;
2243 if (!lp->tx_enable) {
2244 SET_100Mb;
2245 de4x5_init_connection(dev);
2246 } else {
2247 if (!lp->linkOK && (lp->autosense == AUTO)) {
2248 if (!(is_spd_100(dev) && is_100_up(dev))) {
2249 lp->media = INIT;
2250 next_tick = DE4X5_AUTOSENSE_MS;
2251 }
2252 }
2253 }
2254 break;
2255
2256 case _10Mb:
2257 next_tick = 3000;
2258 if (!lp->tx_enable) {
2259 SET_10Mb;
2260 de4x5_init_connection(dev);
2261 } else {
2262 if (!lp->linkOK && (lp->autosense == AUTO)) {
2263 if (!(!is_spd_100(dev) && is_10_up(dev))) {
2264 lp->media = INIT;
2265 next_tick = DE4X5_AUTOSENSE_MS;
2266 }
2267 }
2268 }
2269 break;
2270
2271 case NC:
2272 SET_10Mb;
2273 de4x5_dbg_media(dev);
2274 lp->media = INIT;
2275 lp->tx_enable = FALSE;
2276 break;
2277 }
2278
2279 return next_tick;
2280 }
2281
2282 static void de4x5_init_connection(struct device *dev)
2283 {
2284 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2285 u_long iobase = dev->base_addr;
2286
2287 de4x5_dbg_media(dev);
2288 de4x5_restore_skbs(dev);
2289 cli();
2290 de4x5_rx(dev);
2291 de4x5_setup_intr(dev);
2292 lp->lostMedia = 0;
2293 lp->tx_enable = YES;
2294 sti();
2295 outl(POLL_DEMAND, DE4X5_TPD);
2296
2297 return;
2298 }
2299
2300 static int de4x5_reset_phy(struct device *dev)
2301 {
2302 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2303 u_long iobase = dev->base_addr;
2304 int next_tick = 0;
2305
2306 if (lp->phy[lp->active].id) {
2307 if (lp->timeout < 0) {
2308 outl(GEP_HRST, DE4X5_GEP);
2309 udelay(1000);
2310 outl(0x00, DE4X5_GEP);
2311 udelay(2000);
2312 mii_wr(MII_CR_RST, MII_CR, lp->phy[lp->active].addr, DE4X5_MII);
2313 }
2314 next_tick = test_mii_reg(dev, MII_CR, MII_CR_RST, FALSE, 500);
2315 }
2316
2317 return next_tick;
2318 }
2319
2320 static int
2321 test_media(struct device *dev, s32 irqs, s32 irq_mask, s32 csr13, s32 csr14, s32 csr15, s32 msec)
2322 {
2323 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2324 u_long iobase = dev->base_addr;
2325 s32 sts, csr12;
2326
2327 if (lp->timeout < 0) {
2328 lp->timeout = msec/100;
2329 reset_init_sia(dev, csr13, csr14, csr15);
2330
2331
2332 outl(irq_mask, DE4X5_IMR);
2333
2334
2335 sts = inl(DE4X5_STS);
2336 outl(sts, DE4X5_STS);
2337
2338
2339 if (lp->chipset == DC21041) {
2340 csr12 = inl(DE4X5_SISR);
2341 outl(csr12, DE4X5_SISR);
2342 }
2343 }
2344
2345 sts = inl(DE4X5_STS) & ~TIMER_CB;
2346
2347 if (!(sts & irqs) && --lp->timeout) {
2348 sts = 100 | TIMER_CB;
2349 } else {
2350 lp->timeout = -1;
2351 }
2352
2353 return sts;
2354 }
2355
2356 static int test_tp(struct device *dev, s32 msec)
2357 {
2358 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2359 u_long iobase = dev->base_addr;
2360 int sisr;
2361
2362 if (lp->timeout < 0) {
2363 lp->timeout = msec/100;
2364 }
2365
2366 sisr = (inl(DE4X5_SISR) & ~TIMER_CB) & (SISR_LKF | SISR_NCR);
2367
2368 if (sisr && --lp->timeout) {
2369 sisr = 100 | TIMER_CB;
2370 } else {
2371 lp->timeout = -1;
2372 }
2373
2374 return sisr;
2375 }
2376
2377
2378
2379
2380
2381 static int test_mii_reg(struct device *dev, int reg, int mask, int pol, long msec)
2382 {
2383 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2384 int test, iobase = dev->base_addr;
2385
2386 if (lp->timeout < 0) {
2387 lp->timeout = msec/100;
2388 }
2389
2390 if (pol) pol = ~0;
2391 reg = mii_rd(reg, lp->phy[lp->active].addr, DE4X5_MII) & mask;
2392 test = (reg ^ pol) & mask;
2393
2394 if (test && --lp->timeout) {
2395 reg = 100 | TIMER_CB;
2396 } else {
2397 lp->timeout = -1;
2398 }
2399
2400 return reg;
2401 }
2402
2403 static int is_spd_100(struct device *dev)
2404 {
2405 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2406 u_long iobase = dev->base_addr;
2407 int spd;
2408
2409 if (lp->phy[lp->active].id) {
2410 spd = mii_rd(lp->phy[lp->active].spd.reg, lp->phy[lp->active].addr, DE4X5_MII);
2411 spd = ~(spd ^ lp->phy[lp->active].spd.value);
2412 spd &= lp->phy[lp->active].spd.mask;
2413 } else {
2414 spd = ((~inl(DE4X5_GEP)) & GEP_SLNK);
2415 }
2416
2417 return spd;
2418 }
2419
2420 static int is_100_up(struct device *dev)
2421 {
2422 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2423 u_long iobase = dev->base_addr;
2424
2425 if (lp->phy[lp->active].id) {
2426
2427 mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII);
2428 return (mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII) & MII_SR_LKS);
2429 } else {
2430 return ((~inl(DE4X5_GEP)) & GEP_SLNK);
2431 }
2432 }
2433
2434 static int is_10_up(struct device *dev)
2435 {
2436 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2437 u_long iobase = dev->base_addr;
2438
2439 if (lp->phy[lp->active].id) {
2440
2441 mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII);
2442 return (mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII) & MII_SR_LKS);
2443 } else {
2444 return ((~inl(DE4X5_GEP)) & GEP_LNP);
2445 }
2446 }
2447
2448 static int is_anc_capable(struct device *dev)
2449 {
2450 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2451 u_long iobase = dev->base_addr;
2452
2453 if (lp->phy[lp->active].id) {
2454 return (mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII) & MII_SR_ANC);
2455 } else {
2456 return 0;
2457 }
2458 }
2459
2460
2461
2462
2463
2464 static int ping_media(struct device *dev, int msec)
2465 {
2466 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2467 u_long iobase = dev->base_addr;
2468 int sisr;
2469
2470 if (lp->timeout < 0) {
2471 lp->timeout = msec/100;
2472
2473 lp->tmp = lp->tx_new;
2474 load_packet(dev, lp->frame, TD_LS | TD_FS | sizeof(lp->frame), NULL);
2475 lp->tx_new = (++lp->tx_new) % lp->txRingSize;
2476 outl(POLL_DEMAND, DE4X5_TPD);
2477 }
2478
2479 sisr = inl(DE4X5_SISR);
2480
2481 if ((!(sisr & SISR_NCR)) && (lp->tx_ring[lp->tmp].status < 0) && (--lp->timeout)) {
2482 sisr = 100 | TIMER_CB;
2483 } else {
2484 if ((!(sisr & SISR_NCR)) &&
2485 !(lp->tx_ring[lp->tmp].status & (T_OWN | TD_ES)) && lp->timeout) {
2486 sisr = 0;
2487 } else {
2488 sisr = 1;
2489 }
2490 lp->timeout = -1;
2491 }
2492
2493 return sisr;
2494 }
2495
2496
2497
2498
2499
2500
2501
2502
2503 static void de4x5_save_skbs(struct device *dev)
2504 {
2505 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2506 u_long iobase = dev->base_addr;
2507 int i;
2508 s32 omr;
2509
2510 if (!lp->cache.save_cnt) {
2511 STOP_DE4X5;
2512 de4x5_tx(dev);
2513 for (i=lp->tx_new; i!=lp->tx_old; i--) {
2514 if (lp->skb[i]) {
2515 de4x5_putb_cache(dev, lp->skb[i]);
2516 lp->skb[i] = NULL;
2517 }
2518 if (i==0) i=lp->txRingSize;
2519 }
2520 if (lp->skb[i]) {
2521 de4x5_putb_cache(dev, lp->skb[i]);
2522 lp->skb[i] = NULL;
2523 }
2524
2525 de4x5_cache_state(dev, DE4X5_SAVE_STATE);
2526 de4x5_sw_reset(dev);
2527 de4x5_cache_state(dev, DE4X5_RESTORE_STATE);
2528 dev->tbusy = 0;
2529 lp->cache.save_cnt++;
2530 START_DE4X5;
2531 }
2532
2533 return;
2534 }
2535
2536 static void de4x5_restore_skbs(struct device *dev)
2537 {
2538 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2539 u_long iobase = dev->base_addr;
2540 struct sk_buff *skb;
2541 int i;
2542 s32 omr;
2543
2544 if (lp->cache.save_cnt) {
2545 STOP_DE4X5;
2546 de4x5_cache_state(dev, DE4X5_SAVE_STATE);
2547 de4x5_sw_reset(dev);
2548 de4x5_cache_state(dev, DE4X5_RESTORE_STATE);
2549 dev->tbusy = 1;
2550
2551 for (i=0; TX_BUFFS_AVAIL && lp->cache.skb; i++) {
2552 skb = de4x5_get_cache(dev);
2553 load_packet(dev, skb->data, TD_IC | TD_LS | TD_FS | skb->len, skb);
2554 lp->tx_new = (++lp->tx_new) % lp->txRingSize;
2555 }
2556 if (TX_BUFFS_AVAIL) {
2557 dev->tbusy = 0;
2558 }
2559 lp->cache.save_cnt--;
2560 START_DE4X5;
2561 }
2562
2563 return;
2564 }
2565
2566 static void de4x5_cache_state(struct device *dev, int flag)
2567 {
2568 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2569 u_long iobase = dev->base_addr;
2570 s32 gep;
2571
2572 switch(flag) {
2573 case DE4X5_SAVE_STATE:
2574 lp->cache.csr0 = inl(DE4X5_BMR);
2575 lp->cache.csr6 = (inl(DE4X5_OMR) & ~(OMR_ST | OMR_SR));
2576 lp->cache.csr7 = inl(DE4X5_IMR);
2577 if (lp->chipset != DC21140) {
2578 lp->cache.csr13 = inl(DE4X5_SICR);
2579 lp->cache.csr14 = inl(DE4X5_STRR);
2580 lp->cache.csr15 = inl(DE4X5_SIGR);
2581 }
2582 break;
2583
2584 case DE4X5_RESTORE_STATE:
2585 outl(lp->cache.csr0, DE4X5_BMR);
2586 outl(lp->cache.csr6, DE4X5_OMR);
2587 outl(lp->cache.csr7, DE4X5_IMR);
2588 if (lp->chipset == DC21140) {
2589 outl(GEP_INIT, DE4X5_GEP);
2590 gep = (lp->media == _100Mb ? GEP_MODE : 0);
2591 if (!lp->phy[lp->active].id && !de4x5_full_duplex) {
2592 gep |= GEP_FDXD;
2593 }
2594 outl(gep, DE4X5_GEP);
2595 } else {
2596 reset_init_sia(dev, lp->cache.csr13, lp->cache.csr14,
2597 lp->cache.csr15);
2598 }
2599 break;
2600 }
2601
2602 return;
2603 }
2604
2605 static void de4x5_put_cache(struct device *dev, struct sk_buff *skb)
2606 {
2607 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2608 struct sk_buff *p;
2609
2610 if (lp->cache.skb) {
2611 for (p=lp->cache.skb; p->next; p=p->next);
2612 p->next = skb;
2613 } else {
2614 lp->cache.skb = skb;
2615 }
2616 skb->next = NULL;
2617
2618 return;
2619 }
2620
2621 static void de4x5_putb_cache(struct device *dev, struct sk_buff *skb)
2622 {
2623 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2624 struct sk_buff *p = lp->cache.skb;
2625
2626 lp->cache.skb = skb;
2627 skb->next = p;
2628
2629 return;
2630 }
2631
2632 static struct sk_buff *de4x5_get_cache(struct device *dev)
2633 {
2634 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2635 struct sk_buff *p = lp->cache.skb;
2636
2637 if (p) {
2638 lp->cache.skb = p->next;
2639 p->next = NULL;
2640 }
2641
2642 return p;
2643 }
2644
2645
2646
2647
2648
2649 static int test_ans(struct device *dev, s32 irqs, s32 irq_mask, s32 msec)
2650 {
2651 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2652 u_long iobase = dev->base_addr;
2653 s32 sts, ans;
2654
2655 if (lp->timeout < 0) {
2656 lp->timeout = msec/100;
2657 outl(irq_mask, DE4X5_IMR);
2658
2659
2660 sts = inl(DE4X5_STS);
2661 outl(sts, DE4X5_STS);
2662 }
2663
2664 ans = inl(DE4X5_SISR) & SISR_ANS;
2665 sts = inl(DE4X5_STS) & ~TIMER_CB;
2666
2667 if (!(sts & irqs) && (ans ^ ANS_NWOK) && --lp->timeout) {
2668 sts = 100 | TIMER_CB;
2669 } else {
2670 lp->timeout = -1;
2671 }
2672
2673 return sts;
2674 }
2675
2676 static void de4x5_setup_intr(struct device *dev)
2677 {
2678 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2679 u_long iobase = dev->base_addr;
2680 s32 imr, sts;
2681
2682 if (inl(DE4X5_OMR) & OMR_SR) {
2683 imr = 0;
2684 UNMASK_IRQs;
2685 sts = inl(DE4X5_STS);
2686 outl(sts, DE4X5_STS);
2687 ENABLE_IRQs;
2688 }
2689
2690 return;
2691 }
2692
2693
2694
2695
2696 static void reset_init_sia(struct device *dev, s32 sicr, s32 strr, s32 sigr)
2697 {
2698 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2699 u_long iobase = dev->base_addr;
2700
2701 RESET_SIA;
2702 outl(sigr, DE4X5_SIGR);
2703 outl(strr, DE4X5_STRR);
2704 outl(sicr, DE4X5_SICR);
2705
2706 return;
2707 }
2708
2709
2710
2711
2712 static void create_packet(struct device *dev, char *frame, int len)
2713 {
2714 int i;
2715 char *buf = frame;
2716
2717 for (i=0; i<ETH_ALEN; i++) {
2718 *buf++ = dev->dev_addr[i];
2719 }
2720 for (i=0; i<ETH_ALEN; i++) {
2721 *buf++ = dev->dev_addr[i];
2722 }
2723
2724 *buf++ = 0;
2725 *buf++ = 1;
2726
2727 return;
2728 }
2729
2730
2731
2732
2733 static void de4x5_us_delay(u32 usec)
2734 {
2735 udelay(usec);
2736
2737 return;
2738 }
2739
2740
2741
2742
2743 static void de4x5_ms_delay(u32 msec)
2744 {
2745 u_int i;
2746
2747 for (i=0; i<msec; i++) {
2748 de4x5_us_delay(1000);
2749 }
2750
2751 return;
2752 }
2753
2754
2755
2756
2757
2758 static int EISA_signature(char *name, s32 eisa_id)
2759 {
2760 c_char *signatures[] = DE4X5_SIGNATURE;
2761 char ManCode[DE4X5_STRLEN];
2762 union {
2763 s32 ID;
2764 char Id[4];
2765 } Eisa;
2766 int i, status = 0, siglen = sizeof(signatures)/sizeof(c_char *);
2767
2768 *name = '\0';
2769 Eisa.ID = inl(eisa_id);
2770
2771 ManCode[0]=(((Eisa.Id[0]>>2)&0x1f)+0x40);
2772 ManCode[1]=(((Eisa.Id[1]&0xe0)>>5)+((Eisa.Id[0]&0x03)<<3)+0x40);
2773 ManCode[2]=(((Eisa.Id[2]>>4)&0x0f)+0x30);
2774 ManCode[3]=((Eisa.Id[2]&0x0f)+0x30);
2775 ManCode[4]=(((Eisa.Id[3]>>4)&0x0f)+0x30);
2776 ManCode[5]='\0';
2777
2778 for (i=0;i<siglen;i++) {
2779 if (strstr(ManCode, signatures[i]) != NULL) {
2780 strcpy(name,ManCode);
2781 status = 1;
2782 break;
2783 }
2784 }
2785
2786 return status;
2787 }
2788
2789
2790
2791
2792 static int PCI_signature(char *name, struct bus_type *lp)
2793 {
2794 c_char *de4x5_signatures[] = DE4X5_SIGNATURE;
2795 int i, status = 0, siglen = sizeof(de4x5_signatures)/sizeof(c_char *);
2796
2797 if (lp->chipset == DC21040) {
2798 strcpy(name, "DE434/5");
2799 } else {
2800 int i = *((char *)&lp->srom + 19) * 3;
2801 if (lp->chipset == DC21041) {
2802 strncpy(name, (char *)&lp->srom + 26 + i, 8);
2803 } else if (lp->chipset == DC21140) {
2804 strncpy(name, (char *)&lp->srom + 26 + i, 8);
2805 }
2806 }
2807 name[8] = '\0';
2808 for (i=0; i<siglen; i++) {
2809 if (strstr(name,de4x5_signatures[i])!=NULL) break;
2810 }
2811 if (i == siglen) {
2812 if (dec_only) {
2813 *name = '\0';
2814 } else {
2815 strcpy(name, "UNKNOWN");
2816 }
2817 }
2818
2819 return status;
2820 }
2821
2822
2823
2824
2825
2826 static void DevicePresent(u_long aprom_addr)
2827 {
2828 int i;
2829 struct bus_type *lp = &bus;
2830
2831 if (lp->chipset == DC21040) {
2832 outl(0, aprom_addr);
2833 } else {
2834 short *p = (short *)&lp->srom;
2835 for (i=0; i<(sizeof(struct de4x5_srom)>>1); i++) {
2836 *p++ = srom_rd(aprom_addr, i);
2837 }
2838 de4x5_dbg_srom((struct de4x5_srom *)&lp->srom);
2839 }
2840
2841 return;
2842 }
2843
2844 static int get_hw_addr(struct device *dev)
2845 {
2846 u_long iobase = dev->base_addr;
2847 int broken, i, k, tmp, status = 0;
2848 u_short j,chksum;
2849 struct bus_type *lp = &bus;
2850
2851 broken = de4x5_bad_srom(lp);
2852 for (i=0,k=0,j=0;j<3;j++) {
2853 k <<= 1;
2854 if (k > 0xffff) k-=0xffff;
2855
2856 if (lp->bus == PCI) {
2857 if (lp->chipset == DC21040) {
2858 while ((tmp = inl(DE4X5_APROM)) < 0);
2859 k += (u_char) tmp;
2860 dev->dev_addr[i++] = (u_char) tmp;
2861 while ((tmp = inl(DE4X5_APROM)) < 0);
2862 k += (u_short) (tmp << 8);
2863 dev->dev_addr[i++] = (u_char) tmp;
2864 } else if (!broken) {
2865 dev->dev_addr[i] = (u_char) lp->srom.ieee_addr[i]; i++;
2866 dev->dev_addr[i] = (u_char) lp->srom.ieee_addr[i]; i++;
2867 } else if (broken == SMC) {
2868 dev->dev_addr[i] = *((u_char *)&lp->srom + i); i++;
2869 dev->dev_addr[i] = *((u_char *)&lp->srom + i); i++;
2870 }
2871 } else {
2872 k += (u_char) (tmp = inb(EISA_APROM));
2873 dev->dev_addr[i++] = (u_char) tmp;
2874 k += (u_short) ((tmp = inb(EISA_APROM)) << 8);
2875 dev->dev_addr[i++] = (u_char) tmp;
2876 }
2877
2878 if (k > 0xffff) k-=0xffff;
2879 }
2880 if (k == 0xffff) k=0;
2881
2882 if (lp->bus == PCI) {
2883 if (lp->chipset == DC21040) {
2884 while ((tmp = inl(DE4X5_APROM)) < 0);
2885 chksum = (u_char) tmp;
2886 while ((tmp = inl(DE4X5_APROM)) < 0);
2887 chksum |= (u_short) (tmp << 8);
2888 if (k != chksum) status = -1;
2889 }
2890 } else {
2891 chksum = (u_char) inb(EISA_APROM);
2892 chksum |= (u_short) (inb(EISA_APROM) << 8);
2893 if (k != chksum) status = -1;
2894 }
2895
2896 return status;
2897 }
2898
2899
2900
2901
2902
2903 static int de4x5_bad_srom(struct bus_type *lp)
2904 {
2905 int i, status = 0;
2906
2907 for (i=0; i<sizeof(enet_det)/ETH_ALEN; i++) {
2908 if (!de4x5_strncmp((char *)&lp->srom, (char *)&enet_det[i], 3) &&
2909 !de4x5_strncmp((char *)&lp->srom+0x10, (char *)&enet_det[i], 3)) {
2910 status = SMC;
2911 break;
2912 }
2913 }
2914
2915 return status;
2916 }
2917
2918 static int de4x5_strncmp(char *a, char *b, int n)
2919 {
2920 int ret=0;
2921
2922 for (;n && !ret;n--) {
2923 ret = *a++ - *b++;
2924 }
2925
2926 return ret;
2927 }
2928
2929
2930
2931
2932 static short srom_rd(u_long addr, u_char offset)
2933 {
2934 sendto_srom(SROM_RD | SROM_SR, addr);
2935
2936 srom_latch(SROM_RD | SROM_SR | DT_CS, addr);
2937 srom_command(SROM_RD | SROM_SR | DT_IN | DT_CS, addr);
2938 srom_address(SROM_RD | SROM_SR | DT_CS, addr, offset);
2939
2940 return srom_data(SROM_RD | SROM_SR | DT_CS, addr);
2941 }
2942
2943 static void srom_latch(u_int command, u_long addr)
2944 {
2945 sendto_srom(command, addr);
2946 sendto_srom(command | DT_CLK, addr);
2947 sendto_srom(command, addr);
2948
2949 return;
2950 }
2951
2952 static void srom_command(u_int command, u_long addr)
2953 {
2954 srom_latch(command, addr);
2955 srom_latch(command, addr);
2956 srom_latch((command & 0x0000ff00) | DT_CS, addr);
2957
2958 return;
2959 }
2960
2961 static void srom_address(u_int command, u_long addr, u_char offset)
2962 {
2963 int i;
2964 char a;
2965
2966 a = (char)(offset << 2);
2967 for (i=0; i<6; i++, a <<= 1) {
2968 srom_latch(command | ((a < 0) ? DT_IN : 0), addr);
2969 }
2970 de4x5_us_delay(1);
2971
2972 i = (getfrom_srom(addr) >> 3) & 0x01;
2973 if (i != 0) {
2974 printk("Bad SROM address phase.....\n");
2975 }
2976
2977 return;
2978 }
2979
2980 static short srom_data(u_int command, u_long addr)
2981 {
2982 int i;
2983 short word = 0;
2984 s32 tmp;
2985
2986 for (i=0; i<16; i++) {
2987 sendto_srom(command | DT_CLK, addr);
2988 tmp = getfrom_srom(addr);
2989 sendto_srom(command, addr);
2990
2991 word = (word << 1) | ((tmp >> 3) & 0x01);
2992 }
2993
2994 sendto_srom(command & 0x0000ff00, addr);
2995
2996 return word;
2997 }
2998
2999
3000
3001
3002
3003
3004
3005
3006
3007
3008
3009
3010
3011
3012
3013
3014 static void sendto_srom(u_int command, u_long addr)
3015 {
3016 outl(command, addr);
3017 udelay(1);
3018
3019 return;
3020 }
3021
3022 static int getfrom_srom(u_long addr)
3023 {
3024 s32 tmp;
3025
3026 tmp = inl(addr);
3027 udelay(1);
3028
3029 return tmp;
3030 }
3031
3032
3033
3034
3035
3036 static int mii_rd(u_char phyreg, u_char phyaddr, u_long ioaddr)
3037 {
3038 mii_wdata(MII_PREAMBLE, 2, ioaddr);
3039 mii_wdata(MII_PREAMBLE, 32, ioaddr);
3040 mii_wdata(MII_STRD, 4, ioaddr);
3041 mii_address(phyaddr, ioaddr);
3042 mii_address(phyreg, ioaddr);
3043 mii_ta(MII_STRD, ioaddr);
3044
3045 return mii_rdata(ioaddr);
3046 }
3047
3048 static void mii_wr(int data, u_char phyreg, u_char phyaddr, u_long ioaddr)
3049 {
3050 mii_wdata(MII_PREAMBLE, 2, ioaddr);
3051 mii_wdata(MII_PREAMBLE, 32, ioaddr);
3052 mii_wdata(MII_STWR, 4, ioaddr);
3053 mii_address(phyaddr, ioaddr);
3054 mii_address(phyreg, ioaddr);
3055 mii_ta(MII_STWR, ioaddr);
3056 data = mii_swap(data, 16);
3057 mii_wdata(data, 16, ioaddr);
3058
3059 return;
3060 }
3061
3062 static int mii_rdata(u_long ioaddr)
3063 {
3064 int i;
3065 s32 tmp = 0;
3066
3067 for (i=0; i<16; i++) {
3068 tmp <<= 1;
3069 tmp |= getfrom_mii(MII_MRD | MII_RD, ioaddr);
3070 }
3071
3072 return tmp;
3073 }
3074
3075 static void mii_wdata(int data, int len, u_long ioaddr)
3076 {
3077 int i;
3078
3079 for (i=0; i<len; i++) {
3080 sendto_mii(MII_MWR | MII_WR, data, ioaddr);
3081 data >>= 1;
3082 }
3083
3084 return;
3085 }
3086
3087 static void mii_address(u_char addr, u_long ioaddr)
3088 {
3089 int i;
3090
3091 addr = mii_swap(addr, 5);
3092 for (i=0; i<5; i++) {
3093 sendto_mii(MII_MWR | MII_WR, addr, ioaddr);
3094 addr >>= 1;
3095 }
3096
3097 return;
3098 }
3099
3100 static void mii_ta(u_long rw, u_long ioaddr)
3101 {
3102 if (rw == MII_STWR) {
3103 sendto_mii(MII_MWR | MII_WR, 1, ioaddr);
3104 getfrom_mii(MII_MRD | MII_RD, ioaddr);
3105 } else {
3106 getfrom_mii(MII_MRD | MII_RD, ioaddr);
3107 }
3108
3109 return;
3110 }
3111
3112 static int mii_swap(int data, int len)
3113 {
3114 int i, tmp = 0;
3115
3116 for (i=0; i<len; i++) {
3117 tmp <<= 1;
3118 tmp |= (data & 1);
3119 data >>= 1;
3120 }
3121
3122 return tmp;
3123 }
3124
3125 static void sendto_mii(u32 command, int data, u_long ioaddr)
3126 {
3127 u32 j;
3128
3129 j = (data & 1) << 17;
3130 outl(command | j, ioaddr);
3131 udelay(1);
3132 outl(command | MII_MDC | j, ioaddr);
3133 udelay(1);
3134
3135 return;
3136 }
3137
3138 static int getfrom_mii(u32 command, u_long ioaddr)
3139 {
3140 outl(command, ioaddr);
3141 udelay(1);
3142 outl(command | MII_MDC, ioaddr);
3143 udelay(1);
3144
3145 return ((inl(ioaddr) >> 19) & 1);
3146 }
3147
3148
3149
3150
3151
3152 static int mii_get_oui(u_char phyaddr, u_long ioaddr)
3153 {
3154
3155
3156
3157
3158
3159
3160 int r2, r3;
3161
3162
3163 r2 = mii_rd(MII_ID0, phyaddr, ioaddr);
3164 r3 = mii_rd(MII_ID1, phyaddr, ioaddr);
3165
3166
3167
3168
3169
3170
3171
3172
3173
3174
3175
3176
3177
3178
3179
3180
3181
3182
3183
3184
3185
3186
3187
3188
3189
3190
3191
3192 return r2;
3193 }
3194
3195 static int mii_get_phy(struct device *dev)
3196 {
3197 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
3198 int iobase = dev->base_addr;
3199 int i, j, k, limit=sizeof(phy_info)/sizeof(struct phy_table);
3200 int id;
3201
3202
3203 outl(GEP_HRST, DE4X5_GEP);
3204 udelay(1000);
3205 outl(0x00, DE4X5_GEP);
3206 udelay(2000);
3207
3208
3209 lp->active = 0;
3210 for (lp->mii_cnt=0, i=1; i<DE4X5_MAX_MII; i++) {
3211 id = mii_get_oui(i, DE4X5_MII);
3212 if ((id == 0) || (id == -1)) continue;
3213 for (j=0; j<limit; j++) {
3214 if (id != phy_info[j].id) continue;
3215 for (k=0; lp->phy[k].id && (k < DE4X5_MAX_PHY); k++);
3216 if (k < DE4X5_MAX_PHY) {
3217 memcpy((char *)&lp->phy[k],
3218 (char *)&phy_info[j], sizeof(struct phy_table));
3219 lp->phy[k].addr = i;
3220 lp->mii_cnt++;
3221 } else {
3222 i = DE4X5_MAX_MII;
3223 j = limit;
3224 }
3225 }
3226 }
3227 if (lp->phy[lp->active].id) {
3228 for (k=0; lp->phy[k].id && (k < DE4X5_MAX_PHY); k++) {
3229 mii_wr(MII_CR_RST, MII_CR, lp->phy[k].addr, DE4X5_MII);
3230 while (mii_rd(MII_CR, lp->phy[k].addr, DE4X5_MII) & MII_CR_RST);
3231
3232 de4x5_dbg_mii(dev, k);
3233 }
3234 }
3235
3236 return lp->mii_cnt;
3237 }
3238
3239 static char *build_setup_frame(struct device *dev, int mode)
3240 {
3241 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
3242 int i;
3243 char *pa = lp->setup_frame;
3244
3245
3246 if (mode == ALL) {
3247 memset(lp->setup_frame, 0, SETUP_FRAME_LEN);
3248 }
3249
3250 if (lp->setup_f == HASH_PERF) {
3251 for (pa=lp->setup_frame+IMPERF_PA_OFFSET, i=0; i<ETH_ALEN; i++) {
3252 *(pa + i) = dev->dev_addr[i];
3253 if (i & 0x01) pa += 2;
3254 }
3255 *(lp->setup_frame + (HASH_TABLE_LEN >> 3) - 3) = 0x80;
3256 } else {
3257 for (i=0; i<ETH_ALEN; i++) {
3258 *(pa + (i&1)) = dev->dev_addr[i];
3259 if (i & 0x01) pa += 4;
3260 }
3261 for (i=0; i<ETH_ALEN; i++) {
3262 *(pa + (i&1)) = (char) 0xff;
3263 if (i & 0x01) pa += 4;
3264 }
3265 }
3266
3267 return pa;
3268 }
3269
3270 static void enable_ast(struct device *dev, u32 time_out)
3271 {
3272 timeout(dev, (void *)&de4x5_ast, (u_long)dev, time_out);
3273
3274 return;
3275 }
3276
3277 static void disable_ast(struct device *dev)
3278 {
3279 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
3280
3281 del_timer(&lp->timer);
3282
3283 return;
3284 }
3285
3286 static long de4x5_switch_to_mii(struct device *dev)
3287 {
3288 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
3289 int iobase = dev->base_addr;
3290 long omr;
3291
3292
3293 omr = (inl(DE4X5_OMR) & ~(OMR_PS | OMR_HBD | OMR_TTM | OMR_PCS | OMR_SCR));
3294 omr |= (OMR_PS | OMR_HBD);
3295 outl(omr, DE4X5_OMR);
3296
3297
3298 RESET_DE4X5;
3299
3300
3301 if (lp->chipset == DC21140) {
3302 outl(GEP_INIT, DE4X5_GEP);
3303 outl(0, DE4X5_GEP);
3304 }
3305
3306
3307 outl(omr, DE4X5_OMR);
3308
3309 return omr;
3310 }
3311
3312 static long de4x5_switch_to_srl(struct device *dev)
3313 {
3314 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
3315 int iobase = dev->base_addr;
3316 long omr;
3317
3318
3319 omr = (inl(DE4X5_OMR) & ~(OMR_PS | OMR_HBD | OMR_TTM | OMR_PCS | OMR_SCR));
3320 outl(omr | OMR_TTM, DE4X5_OMR);
3321 outl(omr, DE4X5_OMR);
3322
3323
3324 RESET_DE4X5;
3325
3326
3327 if (lp->chipset == DC21140) {
3328 outl(GEP_INIT, DE4X5_GEP);
3329 outl(0, DE4X5_GEP);
3330 }
3331
3332
3333 outl(omr, DE4X5_OMR);
3334
3335 return omr;
3336 }
3337
3338 static void timeout(struct device *dev, void (*fn)(u_long data), u_long data, u_long msec)
3339 {
3340 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
3341 int dt;
3342
3343
3344 del_timer(&lp->timer);
3345
3346
3347 dt = (msec * HZ) / 1000;
3348 if (dt==0) dt=1;
3349
3350
3351 lp->timer.expires = jiffies + dt;
3352 lp->timer.function = fn;
3353 lp->timer.data = data;
3354 add_timer(&lp->timer);
3355
3356 return;
3357 }
3358
3359 static void de4x5_dbg_open(struct device *dev)
3360 {
3361 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
3362 int i;
3363
3364 if (de4x5_debug > 1) {
3365 printk("%s: de4x5 opening with irq %d\n",dev->name,dev->irq);
3366 printk("\tphysical address: ");
3367 for (i=0;i<6;i++) {
3368 printk("%2.2x:",(short)dev->dev_addr[i]);
3369 }
3370 printk("\n");
3371 printk("Descriptor head addresses:\n");
3372 printk("\t0x%8.8lx 0x%8.8lx\n",(u_long)lp->rx_ring,(u_long)lp->tx_ring);
3373 printk("Descriptor addresses:\nRX: ");
3374 for (i=0;i<lp->rxRingSize-1;i++){
3375 if (i < 3) {
3376 printk("0x%8.8lx ",(u_long)&lp->rx_ring[i].status);
3377 }
3378 }
3379 printk("...0x%8.8lx\n",(u_long)&lp->rx_ring[i].status);
3380 printk("TX: ");
3381 for (i=0;i<lp->txRingSize-1;i++){
3382 if (i < 3) {
3383 printk("0x%8.8lx ", (u_long)&lp->tx_ring[i].status);
3384 }
3385 }
3386 printk("...0x%8.8lx\n", (u_long)&lp->tx_ring[i].status);
3387 printk("Descriptor buffers:\nRX: ");
3388 for (i=0;i<lp->rxRingSize-1;i++){
3389 if (i < 3) {
3390 printk("0x%8.8x ",lp->rx_ring[i].buf);
3391 }
3392 }
3393 printk("...0x%8.8x\n",lp->rx_ring[i].buf);
3394 printk("TX: ");
3395 for (i=0;i<lp->txRingSize-1;i++){
3396 if (i < 3) {
3397 printk("0x%8.8x ", lp->tx_ring[i].buf);
3398 }
3399 }
3400 printk("...0x%8.8x\n", lp->tx_ring[i].buf);
3401 printk("Ring size: \nRX: %d\nTX: %d\n",
3402 (short)lp->rxRingSize,
3403 (short)lp->txRingSize);
3404 }
3405
3406 return;
3407 }
3408
3409 static void de4x5_dbg_mii(struct device *dev, int k)
3410 {
3411 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
3412 int iobase = dev->base_addr;
3413
3414 if (de4x5_debug > 2) {
3415 printk("\nMII CR: %x\n",mii_rd(MII_CR,lp->phy[k].addr,DE4X5_MII));
3416 printk("MII SR: %x\n",mii_rd(MII_SR,lp->phy[k].addr,DE4X5_MII));
3417 printk("MII ID0: %x\n",mii_rd(MII_ID0,lp->phy[k].addr,DE4X5_MII));
3418 printk("MII ID1: %x\n",mii_rd(MII_ID1,lp->phy[k].addr,DE4X5_MII));
3419 if (lp->phy[k].id != BROADCOM_T4) {
3420 printk("MII ANA: %x\n",mii_rd(0x04,lp->phy[k].addr,DE4X5_MII));
3421 printk("MII ANC: %x\n",mii_rd(0x05,lp->phy[k].addr,DE4X5_MII));
3422 }
3423 printk("MII 16: %x\n",mii_rd(0x10,lp->phy[k].addr,DE4X5_MII));
3424 if (lp->phy[k].id != BROADCOM_T4) {
3425 printk("MII 17: %x\n",mii_rd(0x11,lp->phy[k].addr,DE4X5_MII));
3426 printk("MII 18: %x\n",mii_rd(0x12,lp->phy[k].addr,DE4X5_MII));
3427 } else {
3428 printk("MII 20: %x\n",mii_rd(0x14,lp->phy[k].addr,DE4X5_MII));
3429 }
3430 }
3431
3432 return;
3433 }
3434
3435 static void de4x5_dbg_media(struct device *dev)
3436 {
3437 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
3438
3439 if (lp->media != lp->c_media) {
3440 if (de4x5_debug > 0) {
3441 if (lp->chipset != DC21140) {
3442 printk("%s: media is %s\n", dev->name,
3443 (lp->media == NC ? "unconnected!" :
3444 (lp->media == TP ? "TP." :
3445 (lp->media == ANS ? "TP/Nway." :
3446 (lp->media == BNC ? "BNC." :
3447 (lp->media == BNC_AUI ? "BNC/AUI." :
3448 (lp->media == EXT_SIA ? "EXT SIA." :
3449 "???."
3450 )))))));
3451 } else {
3452 printk("%s: mode is %s\n", dev->name,
3453 (lp->media == NC ? "link down or incompatible connection.":
3454 (lp->media == _100Mb ? "100Mb/s." :
3455 (lp->media == _10Mb ? "10Mb/s." :
3456 "\?\?\?"
3457 ))));
3458 }
3459 }
3460 lp->c_media = lp->media;
3461 }
3462
3463 return;
3464 }
3465
3466 static void de4x5_dbg_srom(struct de4x5_srom *p)
3467 {
3468 int i;
3469
3470 if (de4x5_debug > 1) {
3471 printk("Sub-system Vendor ID: %04x\n", (u_short)*(p->sub_vendor_id));
3472 printk("Sub-system ID: %04x\n", (u_short)*(p->sub_system_id));
3473 printk("ID Block CRC: %02x\n", (u_char)(p->id_block_crc));
3474
3475 printk("Hardware Address: ");
3476 for (i=0;i<ETH_ALEN-1;i++) {
3477 printk("%02x:", (u_char)*(p->ieee_addr+i));
3478 }
3479 printk("%02x\n", (u_char)*(p->ieee_addr+i));
3480 printk("CRC checksum: %04x\n", (u_short)(p->chksum));
3481 for (i=0; i<64; i++) {
3482 printk("%3d %04x\n", i<<1, (u_short)*((u_short *)p+i));
3483 }
3484 }
3485
3486 return;
3487 }
3488
3489
3490
3491
3492
3493 static int de4x5_ioctl(struct device *dev, struct ifreq *rq, int cmd)
3494 {
3495 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
3496 struct de4x5_ioctl *ioc = (struct de4x5_ioctl *) &rq->ifr_data;
3497 u_long iobase = dev->base_addr;
3498 int i, j, status = 0;
3499 s32 omr;
3500 union {
3501 u8 addr[(HASH_TABLE_LEN * ETH_ALEN)];
3502 u16 sval[(HASH_TABLE_LEN * ETH_ALEN) >> 1];
3503 u32 lval[(HASH_TABLE_LEN * ETH_ALEN) >> 2];
3504 } tmp;
3505
3506 switch(ioc->cmd) {
3507 case DE4X5_GET_HWADDR:
3508 ioc->len = ETH_ALEN;
3509 status = verify_area(VERIFY_WRITE, (void *)ioc->data, ioc->len);
3510 if (status)
3511 break;
3512 for (i=0; i<ETH_ALEN; i++) {
3513 tmp.addr[i] = dev->dev_addr[i];
3514 }
3515 memcpy_tofs(ioc->data, tmp.addr, ioc->len);
3516
3517 break;
3518 case DE4X5_SET_HWADDR:
3519 status = verify_area(VERIFY_READ, (void *)ioc->data, ETH_ALEN);
3520 if (status)
3521 break;
3522 status = -EPERM;
3523 if (!suser())
3524 break;
3525 status = 0;
3526 memcpy_fromfs(tmp.addr, ioc->data, ETH_ALEN);
3527 for (i=0; i<ETH_ALEN; i++) {
3528 dev->dev_addr[i] = tmp.addr[i];
3529 }
3530 build_setup_frame(dev, PHYS_ADDR_ONLY);
3531
3532 while (set_bit(0, (void *)&dev->tbusy) != 0);
3533 if (lp->setup_f == HASH_PERF) {
3534 load_packet(dev, lp->setup_frame, TD_IC | HASH_F | TD_SET |
3535 SETUP_FRAME_LEN, NULL);
3536 } else {
3537 load_packet(dev, lp->setup_frame, TD_IC | PERFECT_F | TD_SET |
3538 SETUP_FRAME_LEN, NULL);
3539 }
3540 lp->tx_new = (++lp->tx_new) % lp->txRingSize;
3541 outl(POLL_DEMAND, DE4X5_TPD);
3542 dev->tbusy = 0;
3543
3544 break;
3545 case DE4X5_SET_PROM:
3546 if (suser()) {
3547 omr = inl(DE4X5_OMR);
3548 omr |= OMR_PR;
3549 outl(omr, DE4X5_OMR);
3550 } else {
3551 status = -EPERM;
3552 }
3553
3554 break;
3555 case DE4X5_CLR_PROM:
3556 if (suser()) {
3557 omr = inl(DE4X5_OMR);
3558 omr &= ~OMR_PR;
3559 outb(omr, DE4X5_OMR);
3560 } else {
3561 status = -EPERM;
3562 }
3563
3564 break;
3565 case DE4X5_SAY_BOO:
3566 printk("%s: Boo!\n", dev->name);
3567
3568 break;
3569 case DE4X5_GET_MCA:
3570 ioc->len = (HASH_TABLE_LEN >> 3);
3571 status = verify_area(VERIFY_WRITE, ioc->data, ioc->len);
3572 if (!status) {
3573 memcpy_tofs(ioc->data, lp->setup_frame, ioc->len);
3574 }
3575
3576 break;
3577 case DE4X5_SET_MCA:
3578 if (suser()) {
3579
3580 if (ioc->len != HASH_TABLE_LEN) {
3581 if (!(status = verify_area(VERIFY_READ, (void *)ioc->data, ETH_ALEN * ioc->len))) {
3582 memcpy_fromfs(tmp.addr, ioc->data, ETH_ALEN * ioc->len);
3583 set_multicast_list(dev);
3584 }
3585 } else {
3586 set_multicast_list(dev);
3587 }
3588 } else {
3589 status = -EPERM;
3590 }
3591
3592 break;
3593 case DE4X5_CLR_MCA:
3594 if (suser()) {
3595
3596 set_multicast_list(dev);
3597 } else {
3598 status = -EPERM;
3599 }
3600
3601 break;
3602 case DE4X5_MCA_EN:
3603 if (suser()) {
3604 omr = inl(DE4X5_OMR);
3605 omr |= OMR_PM;
3606 outl(omr, DE4X5_OMR);
3607 } else {
3608 status = -EPERM;
3609 }
3610
3611 break;
3612 case DE4X5_GET_STATS:
3613 ioc->len = sizeof(lp->pktStats);
3614 status = verify_area(VERIFY_WRITE, (void *)ioc->data, ioc->len);
3615 if (status)
3616 break;
3617
3618 cli();
3619 memcpy_tofs(ioc->data, &lp->pktStats, ioc->len);
3620 sti();
3621
3622 break;
3623 case DE4X5_CLR_STATS:
3624 if (suser()) {
3625 cli();
3626 memset(&lp->pktStats, 0, sizeof(lp->pktStats));
3627 sti();
3628 } else {
3629 status = -EPERM;
3630 }
3631
3632 break;
3633 case DE4X5_GET_OMR:
3634 tmp.addr[0] = inl(DE4X5_OMR);
3635 if (!(status = verify_area(VERIFY_WRITE, (void *)ioc->data, 1))) {
3636 memcpy_tofs(ioc->data, tmp.addr, 1);
3637 }
3638
3639 break;
3640 case DE4X5_SET_OMR:
3641 if (suser()) {
3642 if (!(status = verify_area(VERIFY_READ, (void *)ioc->data, 1))) {
3643 memcpy_fromfs(tmp.addr, ioc->data, 1);
3644 outl(tmp.addr[0], DE4X5_OMR);
3645 }
3646 } else {
3647 status = -EPERM;
3648 }
3649
3650 break;
3651 case DE4X5_GET_REG:
3652 j = 0;
3653 tmp.lval[0] = inl(DE4X5_STS); j+=4;
3654 tmp.lval[1] = inl(DE4X5_BMR); j+=4;
3655 tmp.lval[2] = inl(DE4X5_IMR); j+=4;
3656 tmp.lval[3] = inl(DE4X5_OMR); j+=4;
3657 tmp.lval[4] = inl(DE4X5_SISR); j+=4;
3658 tmp.lval[5] = inl(DE4X5_SICR); j+=4;
3659 tmp.lval[6] = inl(DE4X5_STRR); j+=4;
3660 tmp.lval[7] = inl(DE4X5_SIGR); j+=4;
3661 ioc->len = j;
3662 if (!(status = verify_area(VERIFY_WRITE, (void *)ioc->data, ioc->len))) {
3663 memcpy_tofs(ioc->data, tmp.addr, ioc->len);
3664 }
3665 break;
3666
3667 #define DE4X5_DUMP 0x0f
3668
3669 case DE4X5_DUMP:
3670 j = 0;
3671 tmp.addr[j++] = dev->irq;
3672 for (i=0; i<ETH_ALEN; i++) {
3673 tmp.addr[j++] = dev->dev_addr[i];
3674 }
3675 tmp.addr[j++] = lp->rxRingSize;
3676 tmp.lval[j>>2] = (long)lp->rx_ring; j+=4;
3677 tmp.lval[j>>2] = (long)lp->tx_ring; j+=4;
3678
3679 for (i=0;i<lp->rxRingSize-1;i++){
3680 if (i < 3) {
3681 tmp.lval[j>>2] = (long)&lp->rx_ring[i].status; j+=4;
3682 }
3683 }
3684 tmp.lval[j>>2] = (long)&lp->rx_ring[i].status; j+=4;
3685 for (i=0;i<lp->txRingSize-1;i++){
3686 if (i < 3) {
3687 tmp.lval[j>>2] = (long)&lp->tx_ring[i].status; j+=4;
3688 }
3689 }
3690 tmp.lval[j>>2] = (long)&lp->tx_ring[i].status; j+=4;
3691
3692 for (i=0;i<lp->rxRingSize-1;i++){
3693 if (i < 3) {
3694 tmp.lval[j>>2] = (s32)lp->rx_ring[i].buf; j+=4;
3695 }
3696 }
3697 tmp.lval[j>>2] = (s32)lp->rx_ring[i].buf; j+=4;
3698 for (i=0;i<lp->txRingSize-1;i++){
3699 if (i < 3) {
3700 tmp.lval[j>>2] = (s32)lp->tx_ring[i].buf; j+=4;
3701 }
3702 }
3703 tmp.lval[j>>2] = (s32)lp->tx_ring[i].buf; j+=4;
3704
3705 for (i=0;i<lp->rxRingSize;i++){
3706 tmp.lval[j>>2] = lp->rx_ring[i].status; j+=4;
3707 }
3708 for (i=0;i<lp->txRingSize;i++){
3709 tmp.lval[j>>2] = lp->tx_ring[i].status; j+=4;
3710 }
3711
3712 tmp.lval[j>>2] = inl(DE4X5_BMR); j+=4;
3713 tmp.lval[j>>2] = inl(DE4X5_TPD); j+=4;
3714 tmp.lval[j>>2] = inl(DE4X5_RPD); j+=4;
3715 tmp.lval[j>>2] = inl(DE4X5_RRBA); j+=4;
3716 tmp.lval[j>>2] = inl(DE4X5_TRBA); j+=4;
3717 tmp.lval[j>>2] = inl(DE4X5_STS); j+=4;
3718 tmp.lval[j>>2] = inl(DE4X5_OMR); j+=4;
3719 tmp.lval[j>>2] = inl(DE4X5_IMR); j+=4;
3720 tmp.lval[j>>2] = lp->chipset; j+=4;
3721 if (lp->chipset == DC21140) {
3722 tmp.lval[j>>2] = inl(DE4X5_GEP); j+=4;
3723 } else {
3724 tmp.lval[j>>2] = inl(DE4X5_SISR); j+=4;
3725 tmp.lval[j>>2] = inl(DE4X5_SICR); j+=4;
3726 tmp.lval[j>>2] = inl(DE4X5_STRR); j+=4;
3727 tmp.lval[j>>2] = inl(DE4X5_SIGR); j+=4;
3728 }
3729 tmp.lval[j>>2] = lp->phy[lp->active].id; j+=4;
3730 if (lp->phy[lp->active].id) {
3731 tmp.lval[j>>2] = lp->active; j+=4;
3732 tmp.lval[j>>2]=mii_rd(MII_CR,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
3733 tmp.lval[j>>2]=mii_rd(MII_SR,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
3734 tmp.lval[j>>2]=mii_rd(MII_ID0,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
3735 tmp.lval[j>>2]=mii_rd(MII_ID1,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
3736 if (lp->phy[lp->active].id != BROADCOM_T4) {
3737 tmp.lval[j>>2]=mii_rd(MII_ANA,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
3738 tmp.lval[j>>2]=mii_rd(MII_ANLPA,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
3739 }
3740 tmp.lval[j>>2]=mii_rd(0x10,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
3741 if (lp->phy[lp->active].id != BROADCOM_T4) {
3742 tmp.lval[j>>2]=mii_rd(0x11,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
3743 tmp.lval[j>>2]=mii_rd(0x12,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
3744 } else {
3745 tmp.lval[j>>2]=mii_rd(0x14,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
3746 }
3747 }
3748
3749 tmp.addr[j++] = lp->txRingSize;
3750 tmp.addr[j++] = dev->tbusy;
3751
3752 ioc->len = j;
3753 if (!(status = verify_area(VERIFY_WRITE, (void *)ioc->data, ioc->len))) {
3754 memcpy_tofs(ioc->data, tmp.addr, ioc->len);
3755 }
3756
3757 break;
3758 default:
3759 status = -EOPNOTSUPP;
3760 }
3761
3762 return status;
3763 }
3764
3765 #ifdef MODULE
3766
3767
3768
3769
3770
3771
3772
3773
3774
3775
3776 static char devicename[9] = { 0, };
3777 static struct device thisDE4X5 = {
3778 devicename,
3779 0, 0, 0, 0,
3780 0, 0,
3781 0, 0, 0, NULL, de4x5_probe };
3782
3783 static int io=0x0b;
3784
3785 int
3786 init_module(void)
3787 {
3788 struct device *p = (struct device *)&thisDE4X5;
3789
3790 thisDE4X5.base_addr = io;
3791 thisDE4X5.irq = 0;
3792
3793 for (; p!=NULL; p=p->next) {
3794 if (register_netdev(p) != 0)
3795 return -EIO;
3796 }
3797 io=0;
3798 return 0;
3799 }
3800
3801 void
3802 cleanup_module(void)
3803 {
3804 struct de4x5_private *lp = (struct de4x5_private *) thisDE4X5.priv;
3805 struct device *p = (struct device *)&thisDE4X5;
3806 int keep_loaded = 0;
3807
3808 for (; p!=NULL; p=p->next) {
3809 keep_loaded += (p->flags & IFF_UP);
3810 }
3811
3812 if (keep_loaded) {
3813 printk("de4x5: Cannot unload modules - %d interface%s%s still active.\n",
3814 keep_loaded, (keep_loaded>1 ? "s ": " "),
3815 (keep_loaded>1 ? "are": "is"));
3816 return;
3817 }
3818
3819 for (p=thisDE4X5.next; p!=NULL; p=p->next) {
3820 if (p->priv) {
3821 struct de4x5_private *lp = (struct de4x5_private *)p->priv;
3822 if (lp->cache.buf) {
3823 kfree(lp->cache.buf);
3824 }
3825 release_region(p->base_addr, (lp->bus == PCI ?
3826 DE4X5_PCI_TOTAL_SIZE :
3827 DE4X5_EISA_TOTAL_SIZE));
3828 kfree(lp->cache.priv);
3829 }
3830 unregister_netdev(p);
3831 kfree(p);
3832 }
3833
3834 if (thisDE4X5.priv) {
3835 if (lp->cache.buf) {
3836 kfree(lp->cache.buf);
3837 }
3838 release_region(thisDE4X5.base_addr,
3839 (lp->bus == PCI ?
3840 DE4X5_PCI_TOTAL_SIZE :
3841 DE4X5_EISA_TOTAL_SIZE));
3842 kfree(lp->cache.priv);
3843 thisDE4X5.priv = NULL;
3844 }
3845 unregister_netdev(&thisDE4X5);
3846
3847 return;
3848 }
3849 #endif
3850
3851
3852
3853
3854
3855
3856
3857
3858