This source file includes following definitions.
- de4x5_probe
- de4x5_hw_init
- de4x5_open
- de4x5_init
- de4x5_sw_reset
- de4x5_queue_pkt
- de4x5_interrupt
- de4x5_rx
- de4x5_tx
- de4x5_ast
- de4x5_txur
- de4x5_close
- de4x5_get_stats
- load_packet
- set_multicast_list
- SetMulticastFilter
- eisa_probe
- pci_probe
- alloc_device
- autoconf_media
- dc21040_autoconf
- dc21040_state
- de4x5_suspect_state
- dc21041_autoconf
- dc21140m_autoconf
- de4x5_init_connection
- de4x5_reset_phy
- test_media
- test_tp
- test_sym_link
- test_mii_reg
- is_spd_100
- is_100_up
- is_10_up
- is_anc_capable
- ping_media
- de4x5_alloc_rx_buff
- de4x5_free_rx_buffs
- de4x5_free_tx_buffs
- de4x5_save_skbs
- de4x5_restore_skbs
- de4x5_cache_state
- de4x5_put_cache
- de4x5_putb_cache
- de4x5_get_cache
- test_ans
- de4x5_setup_intr
- reset_init_sia
- create_packet
- de4x5_us_delay
- de4x5_ms_delay
- EISA_signature
- PCI_signature
- DevicePresent
- get_hw_addr
- de4x5_bad_srom
- de4x5_strncmp
- srom_rd
- srom_latch
- srom_command
- srom_address
- srom_data
- sendto_srom
- getfrom_srom
- mii_rd
- mii_wr
- mii_rdata
- mii_wdata
- mii_address
- mii_ta
- mii_swap
- sendto_mii
- getfrom_mii
- mii_get_oui
- mii_get_phy
- build_setup_frame
- enable_ast
- disable_ast
- de4x5_switch_to_mii
- de4x5_switch_to_srl
- timeout
- de4x5_dbg_open
- de4x5_dbg_mii
- de4x5_dbg_media
- de4x5_dbg_srom
- de4x5_dbg_rx
- de4x5_ioctl
- init_module
- cleanup_module
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202 static const char *version = "de4x5.c:v0.42 96/4/26 davies@wanton.lkg.dec.com\n";
203
204 #include <linux/module.h>
205
206 #include <linux/kernel.h>
207 #include <linux/sched.h>
208 #include <linux/string.h>
209 #include <linux/interrupt.h>
210 #include <linux/ptrace.h>
211 #include <linux/errno.h>
212 #include <linux/ioport.h>
213 #include <linux/malloc.h>
214 #include <linux/bios32.h>
215 #include <linux/pci.h>
216 #include <linux/delay.h>
217 #include <asm/bitops.h>
218 #include <asm/io.h>
219 #include <asm/dma.h>
220 #include <asm/segment.h>
221
222 #include <linux/netdevice.h>
223 #include <linux/etherdevice.h>
224 #include <linux/skbuff.h>
225
226 #include <linux/time.h>
227 #include <linux/types.h>
228 #include <linux/unistd.h>
229
230 #include "de4x5.h"
231
232 #define c_char const char
233
234
235
236
237 struct phy_table {
238 int reset;
239 int id;
240 int ta;
241 struct {
242 int reg;
243 int mask;
244 int value;
245 } spd;
246 };
247
248 struct mii_phy {
249 int reset;
250 int id;
251 int ta;
252 struct {
253 int reg;
254 int mask;
255 int value;
256 } spd;
257 int addr;
258 };
259
260 #define DE4X5_MAX_PHY 8
261
262
263
264
265
266 static struct phy_table phy_info[] = {
267 {0, NATIONAL_TX, 1, {0x19, 0x40, 0x00}},
268 {1, BROADCOM_T4, 1, {0x10, 0x02, 0x02}},
269 {0, SEEQ_T4 , 1, {0x12, 0x10, 0x10}},
270 {0, CYPRESS_T4 , 1, {0x05, 0x20, 0x20}}
271 };
272
273
274
275
276 static c_char enet_det[][ETH_ALEN] = {
277 {0x00, 0x00, 0xc0, 0x00, 0x00, 0x00}
278 };
279
280 #define SMC 1
281
282
283 #ifdef DE4X5_DEBUG
284 static int de4x5_debug = DE4X5_DEBUG;
285 #else
286 static int de4x5_debug = 1;
287 #endif
288
289 #ifdef DE4X5_AUTOSENSE
290 static int de4x5_autosense = DE4X5_AUTOSENSE;
291 #else
292 static int de4x5_autosense = AUTO;
293 #endif
294 #define DE4X5_AUTOSENSE_MS 250
295
296 #ifdef DE4X5_FULL_DUPLEX
297 static s32 de4x5_full_duplex = 1;
298 #else
299 static s32 de4x5_full_duplex = 0;
300 #endif
301
302 #define DE4X5_NDA 0xffe0
303
304
305
306
307 #define PROBE_LENGTH 32
308 #define ETH_PROM_SIG 0xAA5500FFUL
309
310
311
312
313 #define PKT_BUF_SZ 1536
314 #define IEEE802_3_SZ 1518
315 #define MAX_PKT_SZ 1514
316 #define MAX_DAT_SZ 1500
317 #define MIN_DAT_SZ 1
318 #define PKT_HDR_LEN 14
319 #define FAKE_FRAME_LEN (MAX_PKT_SZ + 1)
320 #define QUEUE_PKT_TIMEOUT (3*HZ)
321
322
323 #define CRC_POLYNOMIAL_BE 0x04c11db7UL
324 #define CRC_POLYNOMIAL_LE 0xedb88320UL
325
326
327
328
329 #define DE4X5_EISA_IO_PORTS 0x0c00
330 #define DE4X5_EISA_TOTAL_SIZE 0x100
331
332 #define MAX_EISA_SLOTS 16
333 #define EISA_SLOT_INC 0x1000
334
335 #define DE4X5_SIGNATURE {"DE425","DE434","DE435","DE450","DE500"}
336 #define DE4X5_NAME_LENGTH 8
337
338
339
340
341 #define PCI_MAX_BUS_NUM 8
342 #define DE4X5_PCI_TOTAL_SIZE 0x80
343 #define DE4X5_CLASS_CODE 0x00020000
344
345
346
347
348
349
350
351 #define ALIGN4 ((u_long)4 - 1)
352 #define ALIGN8 ((u_long)8 - 1)
353 #define ALIGN16 ((u_long)16 - 1)
354 #define ALIGN32 ((u_long)32 - 1)
355 #define ALIGN64 ((u_long)64 - 1)
356 #define ALIGN128 ((u_long)128 - 1)
357
358 #define ALIGN ALIGN32
359 #define CACHE_ALIGN CAL_16LONG
360 #define DESC_SKIP_LEN DSL_0
361
362 #define DESC_ALIGN
363
364 #ifndef DEC_ONLY
365 static int dec_only = 0;
366 #else
367 static int dec_only = 1;
368 #endif
369
370
371
372
373 #define ENABLE_IRQs { \
374 imr |= lp->irq_en;\
375 outl(imr, DE4X5_IMR); \
376 }
377
378 #define DISABLE_IRQs {\
379 imr = inl(DE4X5_IMR);\
380 imr &= ~lp->irq_en;\
381 outl(imr, DE4X5_IMR); \
382 }
383
384 #define UNMASK_IRQs {\
385 imr |= lp->irq_mask;\
386 outl(imr, DE4X5_IMR); \
387 }
388
389 #define MASK_IRQs {\
390 imr = inl(DE4X5_IMR);\
391 imr &= ~lp->irq_mask;\
392 outl(imr, DE4X5_IMR); \
393 }
394
395
396
397
398 #define START_DE4X5 {\
399 omr = inl(DE4X5_OMR);\
400 omr |= OMR_ST | OMR_SR;\
401 outl(omr, DE4X5_OMR); \
402 }
403
404 #define STOP_DE4X5 {\
405 omr = inl(DE4X5_OMR);\
406 omr &= ~(OMR_ST|OMR_SR);\
407 outl(omr, DE4X5_OMR); \
408 }
409
410
411
412
413 #define RESET_SIA outl(0, DE4X5_SICR);
414
415
416
417
418 #define DE4X5_AUTOSENSE_MS 250
419
420
421
422
423 struct de4x5_srom {
424 char sub_vendor_id[2];
425 char sub_system_id[2];
426 char reserved[12];
427 char id_block_crc;
428 char reserved2;
429 char version;
430 char num_adapters;
431 char ieee_addr[6];
432 char info[100];
433 short chksum;
434 };
435 #define SUB_VENDOR_ID 0x500a
436
437
438
439
440
441
442
443
444
445 #define NUM_RX_DESC 8
446 #define NUM_TX_DESC 32
447 #define RX_BUFF_SZ 1536
448
449
450 struct de4x5_desc {
451 volatile s32 status;
452 u32 des1;
453 u32 buf;
454 u32 next;
455 DESC_ALIGN
456 };
457
458
459
460
461 #define DE4X5_PKT_STAT_SZ 16
462 #define DE4X5_PKT_BIN_SZ 128
463
464
465 struct de4x5_private {
466 char adapter_name[80];
467 struct de4x5_desc rx_ring[NUM_RX_DESC];
468 struct de4x5_desc tx_ring[NUM_TX_DESC];
469 struct sk_buff *tx_skb[NUM_TX_DESC];
470 struct sk_buff *rx_skb[NUM_RX_DESC];
471 int rx_new, rx_old;
472 int tx_new, tx_old;
473 char setup_frame[SETUP_FRAME_LEN];
474 char frame[64];
475 struct enet_statistics stats;
476 struct {
477 u_int bins[DE4X5_PKT_STAT_SZ];
478 u_int unicast;
479 u_int multicast;
480 u_int broadcast;
481 u_int excessive_collisions;
482 u_int tx_underruns;
483 u_int excessive_underruns;
484 u_int rx_runt_frames;
485 u_int rx_collision;
486 u_int rx_dribble;
487 u_int rx_overflow;
488 } pktStats;
489 char rxRingSize;
490 char txRingSize;
491 int bus;
492 int bus_num;
493 int state;
494 int chipset;
495 s32 irq_mask;
496 s32 irq_en;
497 int media;
498 int c_media;
499 int linkOK;
500 int autosense;
501 int tx_enable;
502 int lostMedia;
503 int setup_f;
504 int local_state;
505 struct mii_phy phy[DE4X5_MAX_PHY];
506 int active;
507 int mii_cnt;
508 int timeout;
509 struct timer_list timer;
510 int tmp;
511 struct {
512 void *priv;
513 void *buf;
514 s32 csr0;
515 s32 csr6;
516 s32 csr7;
517 s32 csr13;
518 s32 csr14;
519 s32 csr15;
520 int save_cnt;
521 struct sk_buff *skb;
522 } cache;
523 };
524
525
526
527
528
529
530 static struct bus_type {
531 int bus;
532 int bus_num;
533 int device;
534 int chipset;
535 struct de4x5_srom srom;
536 int autosense;
537 } bus;
538
539
540
541
542
543
544
545
546 #define TX_BUFFS_AVAIL ((lp->tx_old<=lp->tx_new)?\
547 lp->tx_old+lp->txRingSize-lp->tx_new-1:\
548 lp->tx_old -lp->tx_new-1)
549
550 #define TX_PKT_PENDING (lp->tx_old != lp->tx_new)
551
552
553
554
555 static int de4x5_open(struct device *dev);
556 static int de4x5_queue_pkt(struct sk_buff *skb, struct device *dev);
557 static void de4x5_interrupt(int irq, void *dev_id, struct pt_regs *regs);
558 static int de4x5_close(struct device *dev);
559 static struct enet_statistics *de4x5_get_stats(struct device *dev);
560 static void set_multicast_list(struct device *dev);
561 static int de4x5_ioctl(struct device *dev, struct ifreq *rq, int cmd);
562
563
564
565
566 static int de4x5_hw_init(struct device *dev, u_long iobase);
567 static int de4x5_init(struct device *dev);
568 static int de4x5_sw_reset(struct device *dev);
569 static int de4x5_rx(struct device *dev);
570 static int de4x5_tx(struct device *dev);
571 static int de4x5_ast(struct device *dev);
572 static int de4x5_txur(struct device *dev);
573
574 static int autoconf_media(struct device *dev);
575 static void create_packet(struct device *dev, char *frame, int len);
576 static void de4x5_us_delay(u32 usec);
577 static void de4x5_ms_delay(u32 msec);
578 static void load_packet(struct device *dev, char *buf, u32 flags, struct sk_buff *skb);
579 static int dc21040_autoconf(struct device *dev);
580 static int dc21041_autoconf(struct device *dev);
581 static int dc21140m_autoconf(struct device *dev);
582 static int de4x5_suspect_state(struct device *dev, int timeout, int prev_state, int (*fn)(struct device *, int), int (*asfn)(struct device *));
583 static int dc21040_state(struct device *dev, int csr13, int csr14, int csr15, int timeout, int next_state, int suspect_state, int (*fn)(struct device *, int));
584 static int test_media(struct device *dev, s32 irqs, s32 irq_mask, s32 csr13, s32 csr14, s32 csr15, s32 msec);
585 static int test_sym_link(struct device *dev, int msec);
586 static int test_mii_reg(struct device *dev, int reg, int mask, int pol, long msec);
587 static int is_spd_100(struct device *dev);
588 static int is_100_up(struct device *dev);
589 static int is_10_up(struct device *dev);
590 static int is_anc_capable(struct device *dev);
591 static int ping_media(struct device *dev, int msec);
592 static struct sk_buff *de4x5_alloc_rx_buff(struct device *dev, int index, int len);
593 static void de4x5_free_rx_buffs(struct device *dev);
594 static void de4x5_free_tx_buffs(struct device *dev);
595 static void de4x5_save_skbs(struct device *dev);
596 static void de4x5_restore_skbs(struct device *dev);
597 static void de4x5_cache_state(struct device *dev, int flag);
598 static void de4x5_put_cache(struct device *dev, struct sk_buff *skb);
599 static void de4x5_putb_cache(struct device *dev, struct sk_buff *skb);
600 static struct sk_buff *de4x5_get_cache(struct device *dev);
601 static void de4x5_setup_intr(struct device *dev);
602 static void de4x5_init_connection(struct device *dev);
603 static int de4x5_reset_phy(struct device *dev);
604 static void reset_init_sia(struct device *dev, s32 sicr, s32 strr, s32 sigr);
605 static int test_ans(struct device *dev, s32 irqs, s32 irq_mask, s32 msec);
606 static int test_tp(struct device *dev, s32 msec);
607 static int EISA_signature(char *name, s32 eisa_id);
608 static int PCI_signature(char *name, struct bus_type *lp);
609 static void DevicePresent(u_long iobase);
610 static int de4x5_bad_srom(struct bus_type *lp);
611 static short srom_rd(u_long address, u_char offset);
612 static void srom_latch(u_int command, u_long address);
613 static void srom_command(u_int command, u_long address);
614 static void srom_address(u_int command, u_long address, u_char offset);
615 static short srom_data(u_int command, u_long address);
616
617 static void sendto_srom(u_int command, u_long addr);
618 static int getfrom_srom(u_long addr);
619 static int mii_rd(u_char phyreg, u_char phyaddr, u_long ioaddr);
620 static void mii_wr(int data, u_char phyreg, u_char phyaddr, u_long ioaddr);
621 static int mii_rdata(u_long ioaddr);
622 static void mii_wdata(int data, int len, u_long ioaddr);
623 static void mii_ta(u_long rw, u_long ioaddr);
624 static int mii_swap(int data, int len);
625 static void mii_address(u_char addr, u_long ioaddr);
626 static void sendto_mii(u32 command, int data, u_long ioaddr);
627 static int getfrom_mii(u32 command, u_long ioaddr);
628 static int mii_get_oui(u_char phyaddr, u_long ioaddr);
629 static int mii_get_phy(struct device *dev);
630 static void SetMulticastFilter(struct device *dev);
631 static int get_hw_addr(struct device *dev);
632
633 static void eisa_probe(struct device *dev, u_long iobase);
634 static void pci_probe(struct device *dev, u_long iobase);
635 static struct device *alloc_device(struct device *dev, u_long iobase);
636 static char *build_setup_frame(struct device *dev, int mode);
637 static void disable_ast(struct device *dev);
638 static void enable_ast(struct device *dev, u32 time_out);
639 static long de4x5_switch_to_srl(struct device *dev);
640 static long de4x5_switch_to_mii(struct device *dev);
641 static void timeout(struct device *dev, void (*fn)(u_long data), u_long data, u_long msec);
642 static void de4x5_dbg_open(struct device *dev);
643 static void de4x5_dbg_mii(struct device *dev, int k);
644 static void de4x5_dbg_media(struct device *dev);
645 static void de4x5_dbg_srom(struct de4x5_srom *p);
646 static void de4x5_dbg_rx(struct sk_buff *skb, int len);
647 static int de4x5_strncmp(char *a, char *b, int n);
648
649 #ifdef MODULE
650 int init_module(void);
651 void cleanup_module(void);
652 static int autoprobed = 0, loading_module = 1;
653 # else
654 static int autoprobed = 0, loading_module = 0;
655 #endif
656
657 static char name[DE4X5_NAME_LENGTH + 1];
658 static int num_de4x5s = 0, num_eth = 0;
659
660
661
662
663 #define RESET_DE4X5 {\
664 int i;\
665 i=inl(DE4X5_BMR);\
666 de4x5_ms_delay(1);\
667 outl(i | BMR_SWR, DE4X5_BMR);\
668 de4x5_ms_delay(1);\
669 outl(i, DE4X5_BMR);\
670 de4x5_ms_delay(1);\
671 for (i=0;i<5;i++) {inl(DE4X5_BMR); de4x5_ms_delay(1);}\
672 de4x5_ms_delay(1);\
673 }
674
675
676
677
678
679
680
681 int
682 de4x5_probe(struct device *dev)
683 {
684 int tmp = num_de4x5s, status = -ENODEV;
685 u_long iobase = dev->base_addr;
686
687 eisa_probe(dev, iobase);
688 pci_probe(dev, iobase);
689
690 if ((tmp == num_de4x5s) && (iobase != 0) && loading_module) {
691 printk("%s: de4x5_probe() cannot find device at 0x%04lx.\n", dev->name,
692 iobase);
693 }
694
695
696
697
698
699 for (; (dev->priv == NULL) && (dev->next != NULL); dev = dev->next);
700
701 if (dev->priv) status = 0;
702 if (iobase == 0) autoprobed = 1;
703
704 return status;
705 }
706
707 static int
708 de4x5_hw_init(struct device *dev, u_long iobase)
709 {
710 struct bus_type *lp = &bus;
711 int tmpbus, tmpchs, status=0;
712 int i, media = *((char *)&(lp->srom) + *((char *)&(lp->srom) + 19) * 3);
713 char *tmp;
714
715
716 if (lp->chipset == DC21041) {
717 outl(0, PCI_CFDA);
718 de4x5_ms_delay(10);
719 }
720
721 RESET_DE4X5;
722
723 if ((inl(DE4X5_STS) & (STS_TS | STS_RS)) != 0) {
724 return -ENXIO;
725 }
726
727
728
729
730 if (lp->bus == PCI) {
731 PCI_signature(name, lp);
732 } else {
733 EISA_signature(name, EISA_ID0);
734 }
735
736 if (*name == '\0') {
737 return -ENXIO;
738 }
739
740 dev->base_addr = iobase;
741 if (lp->bus == EISA) {
742 printk("%s: %s at %04lx (EISA slot %ld)",
743 dev->name, name, iobase, ((iobase>>12)&0x0f));
744 } else {
745 printk("%s: %s at %04lx (PCI bus %d, device %d)", dev->name, name,
746 iobase, lp->bus_num, lp->device);
747 }
748
749 printk(", h/w address ");
750 status = get_hw_addr(dev);
751 for (i = 0; i < ETH_ALEN - 1; i++) {
752 printk("%2.2x:", dev->dev_addr[i]);
753 }
754 printk("%2.2x,\n", dev->dev_addr[i]);
755
756 tmpbus = lp->bus;
757 tmpchs = lp->chipset;
758
759 if (status != 0) {
760 printk(" which has an Ethernet PROM CRC error.\n");
761 return -ENXIO;
762 } else {
763 struct de4x5_private *lp;
764
765
766
767
768
769 dev->priv = (void *) kmalloc(sizeof(struct de4x5_private) + ALIGN,
770 GFP_KERNEL);
771 if (dev->priv == NULL) {
772 return -ENOMEM;
773 }
774
775
776
777
778 tmp = dev->priv;
779 dev->priv = (void *)(((u_long)dev->priv + ALIGN) & ~ALIGN);
780 lp = (struct de4x5_private *)dev->priv;
781 memset(dev->priv, 0, sizeof(struct de4x5_private));
782 lp->bus = tmpbus;
783 lp->chipset = tmpchs;
784 lp->cache.priv = tmp;
785
786
787
788
789 if (media & MEDIA_MII) {
790 if (!mii_get_phy(dev)) {
791 printk("%s: MII search failed, no device found when one was expected\n", dev->name);
792 return -ENXIO;
793 }
794 } else {
795 mii_get_phy(dev);
796 }
797
798
799
800
801 if (de4x5_autosense & AUTO) {
802 lp->autosense = AUTO;
803 } else {
804 if (lp->chipset != DC21140) {
805 if ((lp->chipset == DC21040) && (de4x5_autosense & TP_NW)) {
806 de4x5_autosense = TP;
807 }
808 if ((lp->chipset == DC21041) && (de4x5_autosense & BNC_AUI)) {
809 de4x5_autosense = BNC;
810 }
811 lp->autosense = de4x5_autosense & 0x001f;
812 } else {
813 lp->autosense = de4x5_autosense & 0x00c0;
814 }
815 }
816
817 sprintf(lp->adapter_name,"%s (%s)", name, dev->name);
818
819
820
821
822
823 #ifndef __alpha__
824 for (i=0; i<NUM_RX_DESC; i++) {
825 lp->rx_ring[i].status = 0;
826 lp->rx_ring[i].des1 = RX_BUFF_SZ;
827 lp->rx_ring[i].buf = 0;
828 lp->rx_ring[i].next = 0;
829 lp->rx_skb[i] = (struct sk_buff *) 1;
830 }
831
832 #else
833 if ((tmp = (void *)kmalloc(RX_BUFF_SZ * NUM_RX_DESC + ALIGN,
834 GFP_KERNEL)) == NULL) {
835 kfree(lp->cache.priv);
836 return -ENOMEM;
837 }
838
839 lp->cache.buf = tmp;
840 tmp = (char *)(((u_long) tmp + ALIGN) & ~ALIGN);
841 for (i=0; i<NUM_RX_DESC; i++) {
842 lp->rx_ring[i].status = 0;
843 lp->rx_ring[i].des1 = RX_BUFF_SZ;
844 lp->rx_ring[i].buf = virt_to_bus(tmp + i * RX_BUFF_SZ);
845 lp->rx_ring[i].next = 0;
846 lp->rx_skb[i] = (struct sk_buff *) 1;
847 }
848 #endif
849
850 barrier();
851
852 request_region(iobase, (lp->bus == PCI ? DE4X5_PCI_TOTAL_SIZE :
853 DE4X5_EISA_TOTAL_SIZE),
854 lp->adapter_name);
855
856 lp->rxRingSize = NUM_RX_DESC;
857 lp->txRingSize = NUM_TX_DESC;
858
859
860 lp->rx_ring[lp->rxRingSize - 1].des1 |= RD_RER;
861 lp->tx_ring[lp->txRingSize - 1].des1 |= TD_TER;
862
863
864 outl(virt_to_bus(lp->rx_ring), DE4X5_RRBA);
865 outl(virt_to_bus(lp->tx_ring), DE4X5_TRBA);
866
867
868 lp->irq_mask = IMR_RIM | IMR_TIM | IMR_TUM | IMR_UNM;
869 lp->irq_en = IMR_NIM | IMR_AIM;
870
871
872 create_packet(dev, lp->frame, sizeof(lp->frame));
873
874
875 lp->state = CLOSED;
876
877 printk(" and requires IRQ%d (provided by %s).\n", dev->irq,
878 ((lp->bus == PCI) ? "PCI BIOS" : "EISA CNFG"));
879
880 }
881
882 if (de4x5_debug > 0) {
883 printk(version);
884 }
885
886
887 dev->open = &de4x5_open;
888 dev->hard_start_xmit = &de4x5_queue_pkt;
889 dev->stop = &de4x5_close;
890 dev->get_stats = &de4x5_get_stats;
891 dev->set_multicast_list = &set_multicast_list;
892 dev->do_ioctl = &de4x5_ioctl;
893
894 dev->mem_start = 0;
895
896
897 ether_setup(dev);
898
899
900 if (lp->chipset == DC21041) {
901 outl(0, DE4X5_SICR);
902 outl(CFDA_PSM, PCI_CFDA);
903 }
904
905 return status;
906 }
907
908
909 static int
910 de4x5_open(struct device *dev)
911 {
912 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
913 u_long iobase = dev->base_addr;
914 int i, status = 0;
915 s32 omr;
916
917
918 for (i=0; i<lp->rxRingSize; i++) {
919 if (de4x5_alloc_rx_buff(dev, i, 0) == NULL) {
920 de4x5_free_rx_buffs(dev);
921 return -EAGAIN;
922 }
923 }
924
925
926
927
928 if (lp->chipset == DC21041) {
929 outl(0, PCI_CFDA);
930 de4x5_ms_delay(10);
931 }
932
933
934
935
936 status = de4x5_init(dev);
937
938 lp->state = OPEN;
939 de4x5_dbg_open(dev);
940
941 if (request_irq(dev->irq, (void *)de4x5_interrupt, SA_SHIRQ,
942 lp->adapter_name, dev)) {
943 printk("de4x5_open(): Requested IRQ%d is busy\n",dev->irq);
944 status = -EAGAIN;
945 } else {
946 dev->tbusy = 0;
947 dev->start = 1;
948 dev->interrupt = UNMASK_INTERRUPTS;
949 dev->trans_start = jiffies;
950
951 START_DE4X5;
952
953 de4x5_setup_intr(dev);
954 }
955
956 if (de4x5_debug > 1) {
957 printk("\tsts: 0x%08x\n", inl(DE4X5_STS));
958 printk("\tbmr: 0x%08x\n", inl(DE4X5_BMR));
959 printk("\timr: 0x%08x\n", inl(DE4X5_IMR));
960 printk("\tomr: 0x%08x\n", inl(DE4X5_OMR));
961 printk("\tsisr: 0x%08x\n", inl(DE4X5_SISR));
962 printk("\tsicr: 0x%08x\n", inl(DE4X5_SICR));
963 printk("\tstrr: 0x%08x\n", inl(DE4X5_STRR));
964 printk("\tsigr: 0x%08x\n", inl(DE4X5_SIGR));
965 }
966
967 MOD_INC_USE_COUNT;
968
969 return status;
970 }
971
972
973
974
975
976
977
978
979
980 static int
981 de4x5_init(struct device *dev)
982 {
983
984 set_bit(0, (void *)&dev->tbusy);
985
986 de4x5_sw_reset(dev);
987
988
989 autoconf_media(dev);
990
991 return 0;
992 }
993
994 static int
995 de4x5_sw_reset(struct device *dev)
996 {
997 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
998 u_long iobase = dev->base_addr;
999 int i, j, status = 0;
1000 s32 bmr, omr;
1001
1002
1003 if (lp->phy[lp->active].id == 0) {
1004 de4x5_switch_to_srl(dev);
1005 } else {
1006 de4x5_switch_to_mii(dev);
1007 }
1008
1009
1010
1011
1012
1013
1014 bmr = (lp->chipset==DC21140 ? PBL_8 : PBL_4) | DESC_SKIP_LEN | CACHE_ALIGN;
1015 outl(bmr, DE4X5_BMR);
1016
1017 omr = inl(DE4X5_OMR) & ~OMR_PR;
1018 if (lp->chipset == DC21140) {
1019 omr |= (OMR_SDP | OMR_SB);
1020 }
1021 lp->setup_f = PERFECT;
1022 outl(virt_to_bus(lp->rx_ring), DE4X5_RRBA);
1023 outl(virt_to_bus(lp->tx_ring), DE4X5_TRBA);
1024
1025 lp->rx_new = lp->rx_old = 0;
1026 lp->tx_new = lp->tx_old = 0;
1027
1028 for (i = 0; i < lp->rxRingSize; i++) {
1029 lp->rx_ring[i].status = R_OWN;
1030 }
1031
1032 for (i = 0; i < lp->txRingSize; i++) {
1033 lp->tx_ring[i].status = 0;
1034 }
1035
1036 barrier();
1037
1038
1039 SetMulticastFilter(dev);
1040
1041 load_packet(dev, lp->setup_frame, PERFECT_F|TD_SET|SETUP_FRAME_LEN, NULL);
1042 outl(omr|OMR_ST, DE4X5_OMR);
1043
1044
1045 sti();
1046 for (j=0, i=0;(i<500) && (j==0);i++) {
1047 udelay(1000);
1048 if (lp->tx_ring[lp->tx_new].status >= 0) j=1;
1049 }
1050 outl(omr, DE4X5_OMR);
1051
1052 if (j == 0) {
1053 printk("%s: Setup frame timed out, status %08x\n", dev->name,
1054 inl(DE4X5_STS));
1055 status = -EIO;
1056 }
1057
1058 lp->tx_new = (++lp->tx_new) % lp->txRingSize;
1059 lp->tx_old = lp->tx_new;
1060
1061 return status;
1062 }
1063
1064
1065
1066
1067 static int
1068 de4x5_queue_pkt(struct sk_buff *skb, struct device *dev)
1069 {
1070 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1071 u_long iobase = dev->base_addr;
1072 int status = 0;
1073
1074 if (skb == NULL) {
1075 dev_tint(dev);
1076 return 0;
1077 }
1078
1079 if (lp->tx_enable == NO) {
1080 return -1;
1081 }
1082
1083
1084
1085
1086
1087
1088 set_bit(0, (void*)&dev->tbusy);
1089 cli();
1090 de4x5_tx(dev);
1091 sti();
1092
1093
1094 if (dev->tbusy || lp->tx_skb[lp->tx_new]) {
1095 if (dev->interrupt) {
1096 de4x5_putb_cache(dev, skb);
1097 } else {
1098 de4x5_put_cache(dev, skb);
1099 }
1100 if (de4x5_debug > 1) {
1101 printk("%s: transmit busy, lost media or stale skb found:\n STS:%08x\n tbusy:%ld\n lostMedia:%d\n IMR:%08x\n OMR:%08x\n Stale skb: %s\n",dev->name, inl(DE4X5_STS), dev->tbusy, lp->lostMedia, inl(DE4X5_IMR), inl(DE4X5_OMR), (lp->tx_skb[lp->tx_new] ? "YES" : "NO"));
1102 }
1103 } else if (skb->len > 0) {
1104
1105 if (lp->cache.skb && !dev->interrupt) {
1106 de4x5_put_cache(dev, skb);
1107 skb = de4x5_get_cache(dev);
1108 }
1109
1110 while (skb && !dev->tbusy && !lp->tx_skb[lp->tx_new]) {
1111 set_bit(0, (void*)&dev->tbusy);
1112 cli();
1113 if (TX_BUFFS_AVAIL) {
1114 load_packet(dev, skb->data,
1115 TD_IC | TD_LS | TD_FS | skb->len, skb);
1116 outl(POLL_DEMAND, DE4X5_TPD);
1117
1118 lp->tx_new = (++lp->tx_new) % lp->txRingSize;
1119 dev->trans_start = jiffies;
1120
1121 if (TX_BUFFS_AVAIL) {
1122 dev->tbusy = 0;
1123 }
1124 skb = de4x5_get_cache(dev);
1125 }
1126 sti();
1127 }
1128 if (skb && (dev->tbusy || lp->tx_skb[lp->tx_new])) {
1129 de4x5_putb_cache(dev, skb);
1130 }
1131 }
1132
1133 return status;
1134 }
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147 static void
1148 de4x5_interrupt(int irq, void *dev_id, struct pt_regs *regs)
1149 {
1150 struct device *dev = (struct device *)dev_id;
1151 struct de4x5_private *lp;
1152 s32 imr, omr, sts, limit;
1153 u_long iobase;
1154
1155 if (dev == NULL) {
1156 printk ("de4x5_interrupt(): irq %d for unknown device.\n", irq);
1157 return;
1158 }
1159 lp = (struct de4x5_private *)dev->priv;
1160 iobase = dev->base_addr;
1161
1162 if (dev->interrupt)
1163 printk("%s: Re-entering the interrupt handler.\n", dev->name);
1164
1165 DISABLE_IRQs;
1166 dev->interrupt = MASK_INTERRUPTS;
1167
1168 for (limit=0; limit<8; limit++) {
1169 sts = inl(DE4X5_STS);
1170 outl(sts, DE4X5_STS);
1171
1172 if (!(sts & lp->irq_mask)) break;
1173
1174 if (sts & (STS_RI | STS_RU))
1175 de4x5_rx(dev);
1176
1177 if (sts & (STS_TI | STS_TU))
1178 de4x5_tx(dev);
1179
1180 if (sts & STS_LNF) {
1181 lp->lostMedia = LOST_MEDIA_THRESHOLD + 1;
1182 lp->irq_mask &= ~IMR_LFM;
1183 }
1184
1185 if (sts & STS_UNF) {
1186 de4x5_txur(dev);
1187 }
1188
1189 if (sts & STS_SE) {
1190 STOP_DE4X5;
1191 printk("%s: Fatal bus error occurred, sts=%#8x, device stopped.\n",
1192 dev->name, sts);
1193 return;
1194 }
1195 }
1196
1197
1198 while (lp->cache.skb && !dev->tbusy && lp->tx_enable) {
1199 de4x5_queue_pkt(de4x5_get_cache(dev), dev);
1200 }
1201
1202 dev->interrupt = UNMASK_INTERRUPTS;
1203 ENABLE_IRQs;
1204
1205 return;
1206 }
1207
1208 static int
1209 de4x5_rx(struct device *dev)
1210 {
1211 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1212 int i, entry;
1213 s32 status;
1214 char *buf;
1215
1216 for (entry=lp->rx_new; lp->rx_ring[entry].status>=0;entry=lp->rx_new) {
1217 status = lp->rx_ring[entry].status;
1218
1219 if (status & RD_FS) {
1220 lp->rx_old = entry;
1221 }
1222
1223 if (status & RD_LS) {
1224 lp->linkOK++;
1225 if (status & RD_ES) {
1226 lp->stats.rx_errors++;
1227 if (status & (RD_RF | RD_TL)) lp->stats.rx_frame_errors++;
1228 if (status & RD_CE) lp->stats.rx_crc_errors++;
1229 if (status & RD_OF) lp->stats.rx_fifo_errors++;
1230 if (status & RD_TL) lp->stats.rx_length_errors++;
1231 if (status & RD_RF) lp->pktStats.rx_runt_frames++;
1232 if (status & RD_CS) lp->pktStats.rx_collision++;
1233 if (status & RD_DB) lp->pktStats.rx_dribble++;
1234 if (status & RD_OF) lp->pktStats.rx_overflow++;
1235 } else {
1236 struct sk_buff *skb;
1237 short pkt_len = (short)(lp->rx_ring[entry].status >> 16) - 4;
1238
1239 if ((skb = de4x5_alloc_rx_buff(dev, entry, pkt_len)) == NULL) {
1240 printk("%s: Insufficient memory; nuking packet.\n",
1241 dev->name);
1242 lp->stats.rx_dropped++;
1243 break;
1244 }
1245 de4x5_dbg_rx(skb, pkt_len);
1246
1247
1248 skb->protocol=eth_type_trans(skb,dev);
1249 netif_rx(skb);
1250
1251
1252 lp->stats.rx_packets++;
1253 for (i=1; i<DE4X5_PKT_STAT_SZ-1; i++) {
1254 if (pkt_len < (i*DE4X5_PKT_BIN_SZ)) {
1255 lp->pktStats.bins[i]++;
1256 i = DE4X5_PKT_STAT_SZ;
1257 }
1258 }
1259 buf = skb->data;
1260 if (buf[0] & 0x01) {
1261 if ((*(s32 *)&buf[0] == -1) && (*(s16 *)&buf[4] == -1)) {
1262 lp->pktStats.broadcast++;
1263 } else {
1264 lp->pktStats.multicast++;
1265 }
1266 } else if ((*(s32 *)&buf[0] == *(s32 *)&dev->dev_addr[0]) &&
1267 (*(s16 *)&buf[4] == *(s16 *)&dev->dev_addr[4])) {
1268 lp->pktStats.unicast++;
1269 }
1270
1271 lp->pktStats.bins[0]++;
1272 if (lp->pktStats.bins[0] == 0) {
1273 memset((char *)&lp->pktStats, 0, sizeof(lp->pktStats));
1274 }
1275 }
1276
1277
1278 for (;lp->rx_old!=entry;lp->rx_old=(++lp->rx_old)%lp->rxRingSize) {
1279 lp->rx_ring[lp->rx_old].status = R_OWN;
1280 barrier();
1281 }
1282 lp->rx_ring[entry].status = R_OWN;
1283 barrier();
1284 }
1285
1286
1287
1288
1289 lp->rx_new = (++lp->rx_new) % lp->rxRingSize;
1290 }
1291
1292 return 0;
1293 }
1294
1295
1296
1297
1298 static int
1299 de4x5_tx(struct device *dev)
1300 {
1301 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1302 u_long iobase = dev->base_addr;
1303 int entry;
1304 s32 status;
1305
1306 for (entry = lp->tx_old; entry != lp->tx_new; entry = lp->tx_old) {
1307 status = lp->tx_ring[entry].status;
1308 if (status < 0) {
1309 break;
1310 } else if (status != 0x7fffffff) {
1311 if (status & TD_ES) {
1312 lp->stats.tx_errors++;
1313 if (status & TD_NC) lp->stats.tx_carrier_errors++;
1314 if (status & TD_LC) lp->stats.tx_window_errors++;
1315 if (status & TD_UF) lp->stats.tx_fifo_errors++;
1316 if (status & TD_LC) lp->stats.collisions++;
1317 if (status & TD_EC) lp->pktStats.excessive_collisions++;
1318 if (status & TD_DE) lp->stats.tx_aborted_errors++;
1319
1320 if (status & (TD_LO | TD_NC | TD_EC | TD_LF)) {
1321 lp->lostMedia++;
1322 }
1323 if (TX_PKT_PENDING) {
1324 outl(POLL_DEMAND, DE4X5_TPD);
1325 }
1326 } else {
1327 lp->stats.tx_packets++;
1328 lp->lostMedia = 0;
1329 lp->linkOK++;
1330 }
1331
1332 if (lp->tx_skb[entry] != NULL) {
1333 dev_kfree_skb(lp->tx_skb[entry], FREE_WRITE);
1334 lp->tx_skb[entry] = NULL;
1335 }
1336 }
1337
1338
1339 lp->tx_old = (++lp->tx_old) % lp->txRingSize;
1340 }
1341
1342 if (TX_BUFFS_AVAIL && dev->tbusy) {
1343 dev->tbusy = 0;
1344 if (dev->interrupt) mark_bh(NET_BH);
1345 }
1346
1347 return 0;
1348 }
1349
1350 static int
1351 de4x5_ast(struct device *dev)
1352 {
1353 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1354 int next_tick = DE4X5_AUTOSENSE_MS;
1355
1356 disable_ast(dev);
1357
1358 if (lp->chipset == DC21140) {
1359 next_tick = dc21140m_autoconf(dev);
1360 } else if (lp->chipset == DC21041) {
1361 next_tick = dc21041_autoconf(dev);
1362 } else if (lp->chipset == DC21040) {
1363 next_tick = dc21040_autoconf(dev);
1364 }
1365 lp->linkOK = 0;
1366 enable_ast(dev, next_tick);
1367
1368 return 0;
1369 }
1370
1371 static int
1372 de4x5_txur(struct device *dev)
1373 {
1374 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1375 int iobase = dev->base_addr;
1376 int omr;
1377
1378 omr = inl(DE4X5_OMR);
1379 if (!(omr & OMR_SF)) {
1380 omr &= ~(OMR_ST|OMR_SR);
1381 outl(omr, DE4X5_OMR);
1382 while (inl(DE4X5_STS) & STS_TS);
1383 if ((omr & OMR_TR) < OMR_TR) {
1384 omr += 0x4000;
1385 } else {
1386 omr |= OMR_SF;
1387 }
1388 outl(omr | OMR_ST | OMR_SR, DE4X5_OMR);
1389 }
1390
1391 return 0;
1392 }
1393
1394 static int
1395 de4x5_close(struct device *dev)
1396 {
1397 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1398 u_long iobase = dev->base_addr;
1399 s32 imr, omr;
1400
1401 disable_ast(dev);
1402 dev->start = 0;
1403 dev->tbusy = 1;
1404
1405 if (de4x5_debug > 1) {
1406 printk("%s: Shutting down ethercard, status was %8.8x.\n",
1407 dev->name, inl(DE4X5_STS));
1408 }
1409
1410
1411
1412
1413 DISABLE_IRQs;
1414 STOP_DE4X5;
1415
1416
1417 free_irq(dev->irq, dev);
1418 lp->state = CLOSED;
1419
1420
1421 de4x5_free_rx_buffs(dev);
1422 de4x5_free_tx_buffs(dev);
1423
1424 MOD_DEC_USE_COUNT;
1425
1426
1427 if (lp->chipset == DC21041) {
1428 outl(0, DE4X5_SICR);
1429 outl(CFDA_PSM, PCI_CFDA);
1430 }
1431
1432 return 0;
1433 }
1434
1435 static struct enet_statistics *
1436 de4x5_get_stats(struct device *dev)
1437 {
1438 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1439 u_long iobase = dev->base_addr;
1440
1441 lp->stats.rx_missed_errors = (int)(inl(DE4X5_MFC) & (MFC_OVFL | MFC_CNTR));
1442
1443 return &lp->stats;
1444 }
1445
1446 static void
1447 load_packet(struct device *dev, char *buf, u32 flags, struct sk_buff *skb)
1448 {
1449 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1450
1451 lp->tx_ring[lp->tx_new].buf = virt_to_bus(buf);
1452 lp->tx_ring[lp->tx_new].des1 &= TD_TER;
1453 lp->tx_ring[lp->tx_new].des1 |= flags;
1454 lp->tx_skb[lp->tx_new] = skb;
1455 barrier();
1456 lp->tx_ring[lp->tx_new].status = T_OWN;
1457 barrier();
1458
1459 return;
1460 }
1461
1462
1463
1464
1465 static void
1466 set_multicast_list(struct device *dev)
1467 {
1468 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1469 u_long iobase = dev->base_addr;
1470
1471
1472 if (lp->state == OPEN) {
1473 if (dev->flags & IFF_PROMISC) {
1474 u32 omr;
1475 omr = inl(DE4X5_OMR);
1476 omr |= OMR_PR;
1477 outl(omr, DE4X5_OMR);
1478 } else {
1479 SetMulticastFilter(dev);
1480 load_packet(dev, lp->setup_frame, TD_IC | PERFECT_F | TD_SET |
1481 SETUP_FRAME_LEN, NULL);
1482
1483 lp->tx_new = (++lp->tx_new) % lp->txRingSize;
1484 outl(POLL_DEMAND, DE4X5_TPD);
1485 dev->trans_start = jiffies;
1486 }
1487 }
1488
1489 return;
1490 }
1491
1492
1493
1494
1495
1496
1497 static void
1498 SetMulticastFilter(struct device *dev)
1499 {
1500 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1501 struct dev_mc_list *dmi=dev->mc_list;
1502 u_long iobase = dev->base_addr;
1503 int i, j, bit, byte;
1504 u16 hashcode;
1505 u32 omr, crc, poly = CRC_POLYNOMIAL_LE;
1506 char *pa;
1507 unsigned char *addrs;
1508
1509 omr = inl(DE4X5_OMR);
1510 omr &= ~(OMR_PR | OMR_PM);
1511 pa = build_setup_frame(dev, ALL);
1512
1513 if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 14)) {
1514 omr |= OMR_PM;
1515 } else if (lp->setup_f == HASH_PERF) {
1516 for (i=0;i<dev->mc_count;i++) {
1517 addrs=dmi->dmi_addr;
1518 dmi=dmi->next;
1519 if ((*addrs & 0x01) == 1) {
1520 crc = 0xffffffff;
1521 for (byte=0;byte<ETH_ALEN;byte++) {
1522
1523 for (bit = *addrs++,j=0;j<8;j++, bit>>=1) {
1524 crc = (crc >> 1) ^ (((crc ^ bit) & 0x01) ? poly : 0);
1525 }
1526 }
1527 hashcode = crc & HASH_BITS;
1528
1529 byte = hashcode >> 3;
1530 bit = 1 << (hashcode & 0x07);
1531
1532 byte <<= 1;
1533 if (byte & 0x02) {
1534 byte -= 1;
1535 }
1536 lp->setup_frame[byte] |= bit;
1537 }
1538 }
1539 } else {
1540 for (j=0; j<dev->mc_count; j++) {
1541 addrs=dmi->dmi_addr;
1542 dmi=dmi->next;
1543 for (i=0; i<ETH_ALEN; i++) {
1544 *(pa + (i&1)) = *addrs++;
1545 if (i & 0x01) pa += 4;
1546 }
1547 }
1548 }
1549 outl(omr, DE4X5_OMR);
1550
1551 return;
1552 }
1553
1554
1555
1556
1557
1558 static void
1559 eisa_probe(struct device *dev, u_long ioaddr)
1560 {
1561 int i, maxSlots, status;
1562 u_short vendor, device;
1563 s32 cfid;
1564 u_long iobase;
1565 struct bus_type *lp = &bus;
1566 char name[DE4X5_STRLEN];
1567
1568 if (!ioaddr && autoprobed) return;
1569
1570 lp->bus = EISA;
1571
1572 if (ioaddr == 0) {
1573 iobase = EISA_SLOT_INC;
1574 i = 1;
1575 maxSlots = MAX_EISA_SLOTS;
1576 } else {
1577 iobase = ioaddr;
1578 i = (ioaddr >> 12);
1579 maxSlots = i + 1;
1580 }
1581
1582 for (status = -ENODEV; (i<maxSlots) && (dev!=NULL); i++, iobase+=EISA_SLOT_INC) {
1583 if (EISA_signature(name, EISA_ID)) {
1584 cfid = inl(PCI_CFID);
1585 device = (u_short)(cfid >> 16);
1586 vendor = (u_short) cfid;
1587
1588 lp->chipset = device;
1589 DevicePresent(EISA_APROM);
1590
1591 outl(PCI_COMMAND_IO | PCI_COMMAND_MASTER, PCI_CFCS);
1592 outl(0x00004000, PCI_CFLT);
1593 outl(iobase, PCI_CBIO);
1594
1595 if (check_region(iobase, DE4X5_EISA_TOTAL_SIZE) == 0) {
1596 if ((dev = alloc_device(dev, iobase)) != NULL) {
1597 if ((status = de4x5_hw_init(dev, iobase)) == 0) {
1598 num_de4x5s++;
1599 }
1600 num_eth++;
1601 }
1602 } else if (autoprobed) {
1603 printk("%s: region already allocated at 0x%04lx.\n", dev->name,iobase);
1604 }
1605 }
1606 }
1607
1608 return;
1609 }
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623 #define PCI_DEVICE (dev_num << 3)
1624 #define PCI_LAST_DEV 32
1625
1626 static void
1627 pci_probe(struct device *dev, u_long ioaddr)
1628 {
1629 u_char irq;
1630 u_char pb, pbus, dev_num, dnum, dev_fn;
1631 u_short vendor, device, index, status;
1632 u_int class = DE4X5_CLASS_CODE;
1633 u_int iobase;
1634 struct bus_type *lp = &bus;
1635
1636 if ((!ioaddr || !loading_module) && autoprobed) return;
1637
1638 if (!pcibios_present()) return;
1639
1640 lp->bus = PCI;
1641
1642 if (ioaddr < 0x1000) {
1643 pbus = (u_short)(ioaddr >> 8);
1644 dnum = (u_short)(ioaddr & 0xff);
1645 } else {
1646 pbus = 0;
1647 dnum = 0;
1648 }
1649
1650 for (index=0;
1651 (pcibios_find_class(class, index, &pb, &dev_fn)!= PCIBIOS_DEVICE_NOT_FOUND);
1652 index++) {
1653 dev_num = PCI_SLOT(dev_fn);
1654
1655 if ((!pbus && !dnum) || ((pbus == pb) && (dnum == dev_num))) {
1656 pcibios_read_config_word(pb, PCI_DEVICE, PCI_VENDOR_ID, &vendor);
1657 pcibios_read_config_word(pb, PCI_DEVICE, PCI_DEVICE_ID, &device);
1658 if (!(is_DC21040 || is_DC21041 || is_DC21140)) continue;
1659
1660
1661 lp->device = dev_num;
1662 lp->bus_num = pb;
1663
1664
1665 lp->chipset = device;
1666
1667
1668 pcibios_read_config_dword(pb, PCI_DEVICE, PCI_BASE_ADDRESS_0, &iobase);
1669 iobase &= CBIO_MASK;
1670
1671
1672 pcibios_read_config_byte(pb, PCI_DEVICE, PCI_INTERRUPT_LINE, &irq);
1673 if ((irq == 0) || (irq == (u_char) 0xff)) continue;
1674
1675
1676 pcibios_read_config_word(pb, PCI_DEVICE, PCI_COMMAND, &status);
1677 if (!(status & PCI_COMMAND_IO)) continue;
1678 if (!(status & PCI_COMMAND_MASTER)) {
1679 status |= PCI_COMMAND_MASTER;
1680 pcibios_write_config_word(pb, PCI_DEVICE, PCI_COMMAND, status);
1681 pcibios_read_config_word(pb, PCI_DEVICE, PCI_COMMAND, &status);
1682 }
1683 if (!(status & PCI_COMMAND_MASTER)) continue;
1684
1685 DevicePresent(DE4X5_APROM);
1686 if (check_region(iobase, DE4X5_PCI_TOTAL_SIZE) == 0) {
1687 if ((dev = alloc_device(dev, iobase)) != NULL) {
1688 dev->irq = irq;
1689 if ((status = de4x5_hw_init(dev, iobase)) == 0) {
1690 num_de4x5s++;
1691 }
1692 num_eth++;
1693 }
1694 } else if (autoprobed) {
1695 printk("%s: region already allocated at 0x%04x.\n", dev->name,
1696 (u_short)iobase);
1697 }
1698 }
1699 }
1700
1701 return;
1702 }
1703
1704
1705
1706
1707
1708 static struct device *
1709 alloc_device(struct device *dev, u_long iobase)
1710 {
1711 int addAutoProbe = 0;
1712 struct device *tmp = NULL, *ret;
1713 int (*init)(struct device *) = NULL;
1714
1715 if (loading_module) return dev;
1716
1717
1718
1719
1720 while (dev->next != NULL) {
1721 if ((dev->base_addr == DE4X5_NDA) || (dev->base_addr == 0)) break;
1722 dev = dev->next;
1723 num_eth++;
1724 }
1725
1726
1727
1728
1729
1730 if ((dev->base_addr == 0) && (num_de4x5s > 0)) {
1731 addAutoProbe++;
1732 tmp = dev->next;
1733 init = dev->init;
1734 }
1735
1736
1737
1738
1739
1740 if ((dev->next == NULL) &&
1741 !((dev->base_addr == DE4X5_NDA) || (dev->base_addr == 0))) {
1742 dev->next = (struct device *)kmalloc(sizeof(struct device)+8, GFP_KERNEL);
1743 dev = dev->next;
1744 if (dev == NULL) {
1745 printk("eth%d: Device not initialised, insufficient memory\n", num_eth);
1746 } else {
1747
1748
1749
1750
1751
1752 dev->name = (char *)(dev + 1);
1753 if (num_eth > 9999) {
1754 sprintf(dev->name,"eth????");
1755 } else {
1756 sprintf(dev->name,"eth%d", num_eth);
1757 }
1758 dev->base_addr = iobase;
1759 dev->next = NULL;
1760 dev->init = &de4x5_probe;
1761 num_de4x5s++;
1762 }
1763 }
1764 ret = dev;
1765
1766
1767
1768
1769
1770 if (ret != NULL) {
1771 if (addAutoProbe) {
1772 for (; (tmp->next!=NULL) && (tmp->base_addr!=DE4X5_NDA); tmp=tmp->next);
1773
1774
1775
1776
1777
1778 if ((tmp->next == NULL) && !(tmp->base_addr == DE4X5_NDA)) {
1779 tmp->next = (struct device *)kmalloc(sizeof(struct device) + 8,
1780 GFP_KERNEL);
1781 tmp = tmp->next;
1782 if (tmp == NULL) {
1783 printk("%s: Insufficient memory to extend the device list.\n",
1784 dev->name);
1785 } else {
1786
1787
1788
1789
1790
1791 tmp->name = (char *)(tmp + 1);
1792 if (num_eth > 9999) {
1793 sprintf(tmp->name,"eth????");
1794 } else {
1795 sprintf(tmp->name,"eth%d", num_eth);
1796 }
1797 tmp->base_addr = 0;
1798 tmp->next = NULL;
1799 tmp->init = init;
1800 }
1801 } else {
1802 tmp->base_addr = 0;
1803 }
1804 }
1805 }
1806
1807 return ret;
1808 }
1809
1810
1811
1812
1813
1814
1815
1816
1817 static int
1818 autoconf_media(struct device *dev)
1819 {
1820 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1821 u_long iobase = dev->base_addr;
1822 int next_tick = DE4X5_AUTOSENSE_MS;;
1823
1824 lp->linkOK = 0;
1825 lp->c_media = AUTO;
1826 disable_ast(dev);
1827 inl(DE4X5_MFC);
1828 lp->media = INIT;
1829 if (lp->chipset == DC21040) {
1830 next_tick = dc21040_autoconf(dev);
1831 } else if (lp->chipset == DC21041) {
1832 next_tick = dc21041_autoconf(dev);
1833 } else if (lp->chipset == DC21140) {
1834 next_tick = dc21140m_autoconf(dev);
1835 }
1836 enable_ast(dev, next_tick);
1837
1838 return (lp->media);
1839 }
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853 static int
1854 dc21040_autoconf(struct device *dev)
1855 {
1856 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1857 u_long iobase = dev->base_addr;
1858 int next_tick = DE4X5_AUTOSENSE_MS;
1859 s32 imr;
1860
1861 switch (lp->media) {
1862 case INIT:
1863 DISABLE_IRQs;
1864 lp->tx_enable = NO;
1865 lp->timeout = -1;
1866 de4x5_save_skbs(dev);
1867 if ((lp->autosense == AUTO) || (lp->autosense == TP)) {
1868 lp->media = TP;
1869 } else if ((lp->autosense == BNC) || (lp->autosense == AUI) || (lp->autosense == BNC_AUI)) {
1870 lp->media = BNC_AUI;
1871 } else if (lp->autosense == EXT_SIA) {
1872 lp->media = EXT_SIA;
1873 } else {
1874 lp->media = NC;
1875 }
1876 lp->local_state = 0;
1877 next_tick = dc21040_autoconf(dev);
1878 break;
1879
1880 case TP:
1881 next_tick = dc21040_state(dev, 0x8f01, 0xffff, 0x0000, 3000, BNC_AUI,
1882 TP_SUSPECT, test_tp);
1883 break;
1884
1885 case TP_SUSPECT:
1886 next_tick = de4x5_suspect_state(dev, 1000, TP, test_tp, dc21040_autoconf);
1887 break;
1888
1889 case BNC:
1890 case AUI:
1891 case BNC_AUI:
1892 next_tick = dc21040_state(dev, 0x8f09, 0x0705, 0x0006, 3000, EXT_SIA,
1893 BNC_AUI_SUSPECT, ping_media);
1894 break;
1895
1896 case BNC_AUI_SUSPECT:
1897 next_tick = de4x5_suspect_state(dev, 1000, BNC_AUI, ping_media, dc21040_autoconf);
1898 break;
1899
1900 case EXT_SIA:
1901 next_tick = dc21040_state(dev, 0x3041, 0x0000, 0x0006, 3000,
1902 NC, EXT_SIA_SUSPECT, ping_media);
1903 break;
1904
1905 case EXT_SIA_SUSPECT:
1906 next_tick = de4x5_suspect_state(dev, 1000, EXT_SIA, ping_media, dc21040_autoconf);
1907 break;
1908
1909 case NC:
1910 #ifndef __alpha__
1911 reset_init_sia(dev, 0x8f01, 0xffff, 0x0000);
1912 #else
1913
1914 reset_init_sia(dev, 0x8f09, 0x0705, 0x0006);
1915 #endif
1916 if (lp->media != lp->c_media) {
1917 de4x5_dbg_media(dev);
1918 lp->c_media = lp->media;
1919 }
1920 lp->media = INIT;
1921 lp->tx_enable = NO;
1922 break;
1923 }
1924
1925 return next_tick;
1926 }
1927
1928 static int
1929 dc21040_state(struct device *dev, int csr13, int csr14, int csr15, int timeout,
1930 int next_state, int suspect_state,
1931 int (*fn)(struct device *, int))
1932 {
1933 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1934 int next_tick = DE4X5_AUTOSENSE_MS;
1935 int linkBad;
1936
1937 switch (lp->local_state) {
1938 case 0:
1939 reset_init_sia(dev, csr13, csr14, csr15);
1940 lp->local_state++;
1941 next_tick = 500;
1942 break;
1943
1944 case 1:
1945 if (!lp->tx_enable) {
1946 linkBad = fn(dev, timeout);
1947 if (linkBad < 0) {
1948 next_tick = linkBad & ~TIMER_CB;
1949 } else {
1950 if (linkBad && (lp->autosense == AUTO)) {
1951 lp->local_state = 0;
1952 lp->media = next_state;
1953 } else {
1954 de4x5_init_connection(dev);
1955 }
1956 }
1957 } else if (!lp->linkOK && (lp->autosense == AUTO)) {
1958 lp->media = suspect_state;
1959 next_tick = 3000;
1960 }
1961 break;
1962 }
1963
1964 return next_tick;
1965 }
1966
1967 static int
1968 de4x5_suspect_state(struct device *dev, int timeout, int prev_state,
1969 int (*fn)(struct device *, int),
1970 int (*asfn)(struct device *))
1971 {
1972 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
1973 int next_tick = DE4X5_AUTOSENSE_MS;
1974 int linkBad;
1975
1976 switch (lp->local_state) {
1977 case 1:
1978 if (lp->linkOK && !LOST_MEDIA) {
1979 lp->media = prev_state;
1980 } else {
1981 lp->local_state++;
1982 next_tick = asfn(dev);
1983 }
1984 break;
1985
1986 case 2:
1987 linkBad = fn(dev, timeout);
1988 if (linkBad < 0) {
1989 next_tick = linkBad & ~TIMER_CB;
1990 } else if (!linkBad) {
1991 lp->local_state--;
1992 lp->media = prev_state;
1993 } else {
1994 lp->media = INIT;
1995 }
1996 }
1997
1998 return next_tick;
1999 }
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010 static int
2011 dc21041_autoconf(struct device *dev)
2012 {
2013 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2014 u_long iobase = dev->base_addr;
2015 s32 sts, irqs, irq_mask, imr, omr;
2016 int next_tick = DE4X5_AUTOSENSE_MS;
2017
2018 switch (lp->media) {
2019 case INIT:
2020 DISABLE_IRQs;
2021 lp->tx_enable = NO;
2022 lp->timeout = -1;
2023 de4x5_save_skbs(dev);
2024 if ((lp->autosense == AUTO) || (lp->autosense == TP_NW)) {
2025 lp->media = TP;
2026 } else if (lp->autosense == TP) {
2027 lp->media = TP;
2028 } else if (lp->autosense == BNC) {
2029 lp->media = BNC;
2030 } else if (lp->autosense == AUI) {
2031 lp->media = AUI;
2032 } else {
2033 lp->media = NC;
2034 }
2035 lp->local_state = 0;
2036 next_tick = dc21041_autoconf(dev);
2037 break;
2038
2039 case TP_NW:
2040 if (lp->timeout < 0) {
2041 omr = inl(DE4X5_OMR);
2042 outl(omr | OMR_FD, DE4X5_OMR);
2043 }
2044 irqs = STS_LNF | STS_LNP;
2045 irq_mask = IMR_LFM | IMR_LPM;
2046 sts = test_media(dev, irqs, irq_mask, 0xef01, 0xffff, 0x0008, 2400);
2047 if (sts < 0) {
2048 next_tick = sts & ~TIMER_CB;
2049 } else {
2050 if (sts & STS_LNP) {
2051 lp->media = ANS;
2052 } else {
2053 lp->media = AUI;
2054 }
2055 next_tick = dc21041_autoconf(dev);
2056 }
2057 break;
2058
2059 case ANS:
2060 if (!lp->tx_enable) {
2061 irqs = STS_LNP;
2062 irq_mask = IMR_LPM;
2063 sts = test_ans(dev, irqs, irq_mask, 3000);
2064 if (sts < 0) {
2065 next_tick = sts & ~TIMER_CB;
2066 } else {
2067 if (!(sts & STS_LNP) && (lp->autosense == AUTO)) {
2068 lp->media = TP;
2069 next_tick = dc21041_autoconf(dev);
2070 } else {
2071 lp->local_state = 1;
2072 de4x5_init_connection(dev);
2073 }
2074 }
2075 } else if (!lp->linkOK && (lp->autosense == AUTO)) {
2076 lp->media = ANS_SUSPECT;
2077 next_tick = 3000;
2078 }
2079 break;
2080
2081 case ANS_SUSPECT:
2082 next_tick = de4x5_suspect_state(dev, 1000, ANS, test_tp, dc21041_autoconf);
2083 break;
2084
2085 case TP:
2086 if (!lp->tx_enable) {
2087 if (lp->timeout < 0) {
2088 omr = inl(DE4X5_OMR);
2089 outl(omr & ~OMR_FD, DE4X5_OMR);
2090 }
2091 irqs = STS_LNF | STS_LNP;
2092 irq_mask = IMR_LFM | IMR_LPM;
2093 sts = test_media(dev,irqs, irq_mask, 0xef01, 0xff3f, 0x0008, 2400);
2094 if (sts < 0) {
2095 next_tick = sts & ~TIMER_CB;
2096 } else {
2097 if (!(sts & STS_LNP) && (lp->autosense == AUTO)) {
2098 if (inl(DE4X5_SISR) & SISR_NRA) {
2099 lp->media = AUI;
2100 } else {
2101 lp->media = BNC;
2102 }
2103 next_tick = dc21041_autoconf(dev);
2104 } else {
2105 lp->local_state = 1;
2106 de4x5_init_connection(dev);
2107 }
2108 }
2109 } else if (!lp->linkOK && (lp->autosense == AUTO)) {
2110 lp->media = TP_SUSPECT;
2111 next_tick = 3000;
2112 }
2113 break;
2114
2115 case TP_SUSPECT:
2116 next_tick = de4x5_suspect_state(dev, 1000, TP, test_tp, dc21041_autoconf);
2117 break;
2118
2119 case AUI:
2120 if (!lp->tx_enable) {
2121 if (lp->timeout < 0) {
2122 omr = inl(DE4X5_OMR);
2123 outl(omr & ~OMR_FD, DE4X5_OMR);
2124 }
2125 irqs = 0;
2126 irq_mask = 0;
2127 sts = test_media(dev,irqs, irq_mask, 0xef09, 0xf73d, 0x000e, 1000);
2128 if (sts < 0) {
2129 next_tick = sts & ~TIMER_CB;
2130 } else {
2131 if (!(inl(DE4X5_SISR) & SISR_SRA) && (lp->autosense == AUTO)) {
2132 lp->media = BNC;
2133 next_tick = dc21041_autoconf(dev);
2134 } else {
2135 lp->local_state = 1;
2136 de4x5_init_connection(dev);
2137 }
2138 }
2139 } else if (!lp->linkOK && (lp->autosense == AUTO)) {
2140 lp->media = AUI_SUSPECT;
2141 next_tick = 3000;
2142 }
2143 break;
2144
2145 case AUI_SUSPECT:
2146 next_tick = de4x5_suspect_state(dev, 1000, AUI, ping_media, dc21041_autoconf);
2147 break;
2148
2149 case BNC:
2150 switch (lp->local_state) {
2151 case 0:
2152 if (lp->timeout < 0) {
2153 omr = inl(DE4X5_OMR);
2154 outl(omr & ~OMR_FD, DE4X5_OMR);
2155 }
2156 irqs = 0;
2157 irq_mask = 0;
2158 sts = test_media(dev,irqs, irq_mask, 0xef09, 0xf73d, 0x0006, 1000);
2159 if (sts < 0) {
2160 next_tick = sts & ~TIMER_CB;
2161 } else {
2162 if (!(inl(DE4X5_SISR) & SISR_SRA) && (lp->autosense == AUTO)) {
2163 lp->media = NC;
2164 } else {
2165 lp->local_state++;
2166 next_tick = dc21041_autoconf(dev);
2167 }
2168 }
2169 break;
2170
2171 case 1:
2172 if (!lp->tx_enable) {
2173 if ((sts = ping_media(dev, 3000)) < 0) {
2174 next_tick = sts & ~TIMER_CB;
2175 } else {
2176 if (sts) {
2177 lp->local_state = 0;
2178 lp->media = NC;
2179 } else {
2180 de4x5_init_connection(dev);
2181 }
2182 }
2183 } else if (!lp->linkOK && (lp->autosense == AUTO)) {
2184 lp->media = BNC_SUSPECT;
2185 next_tick = 3000;
2186 }
2187 break;
2188 }
2189 break;
2190
2191 case BNC_SUSPECT:
2192 next_tick = de4x5_suspect_state(dev, 1000, BNC, ping_media, dc21041_autoconf);
2193 break;
2194
2195 case NC:
2196 omr = inl(DE4X5_OMR);
2197 outl(omr | OMR_FD, DE4X5_OMR);
2198 reset_init_sia(dev, 0xef01, 0xffff, 0x0008);
2199 if (lp->media != lp->c_media) {
2200 de4x5_dbg_media(dev);
2201 lp->c_media = lp->media;
2202 }
2203 lp->media = INIT;
2204 lp->tx_enable = NO;
2205 break;
2206 }
2207
2208 return next_tick;
2209 }
2210
2211
2212
2213
2214
2215
2216 static int
2217 dc21140m_autoconf(struct device *dev)
2218 {
2219 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2220 int ana, anlpa, cap, cr, slnk, sr, iobase = dev->base_addr;
2221 int next_tick = DE4X5_AUTOSENSE_MS;
2222 u_long imr, omr;
2223
2224 switch(lp->media) {
2225 case INIT:
2226 DISABLE_IRQs;
2227 lp->tx_enable = FALSE;
2228 lp->timeout = -1;
2229 if ((next_tick = de4x5_reset_phy(dev)) < 0) {
2230 next_tick &= ~TIMER_CB;
2231 } else {
2232 de4x5_save_skbs(dev);
2233 lp->tmp = MII_SR_ASSC;
2234 SET_10Mb;
2235 if (lp->autosense == _100Mb) {
2236 lp->media = _100Mb;
2237 } else if (lp->autosense == _10Mb) {
2238 lp->media = _10Mb;
2239 } else if ((lp->autosense == AUTO) &&
2240 ((sr=is_anc_capable(dev)) & MII_SR_ANC)) {
2241 ana = (((sr >> 6) & MII_ANA_TAF) | MII_ANA_CSMA);
2242 ana &= (de4x5_full_duplex ? ~0 : ~MII_ANA_FDAM);
2243 mii_wr(ana, MII_ANA, lp->phy[lp->active].addr, DE4X5_MII);
2244 lp->media = ANS;
2245 } else if (lp->autosense == AUTO) {
2246 lp->media = SPD_DET;
2247 } else if (is_spd_100(dev) && is_100_up(dev)) {
2248 lp->media = _100Mb;
2249 } else {
2250 lp->media = NC;
2251 }
2252 lp->local_state = 0;
2253 next_tick = dc21140m_autoconf(dev);
2254 }
2255 break;
2256
2257 case ANS:
2258 switch (lp->local_state) {
2259 case 0:
2260 if (lp->timeout < 0) {
2261 mii_wr(MII_CR_ASSE | MII_CR_RAN, MII_CR, lp->phy[lp->active].addr, DE4X5_MII);
2262 }
2263 cr = test_mii_reg(dev, MII_CR, MII_CR_RAN, FALSE, 500);
2264 if (cr < 0) {
2265 next_tick = cr & ~TIMER_CB;
2266 } else {
2267 if (cr) {
2268 lp->local_state = 0;
2269 lp->media = SPD_DET;
2270 } else {
2271 lp->local_state++;
2272 }
2273 next_tick = dc21140m_autoconf(dev);
2274 }
2275 break;
2276
2277 case 1:
2278 if ((sr=test_mii_reg(dev, MII_SR, MII_SR_ASSC, TRUE, 2000)) < 0) {
2279 next_tick = sr & ~TIMER_CB;
2280 } else {
2281 lp->media = SPD_DET;
2282 lp->local_state = 0;
2283 if (sr) {
2284 lp->tmp = MII_SR_ASSC;
2285 anlpa = mii_rd(MII_ANLPA, lp->phy[lp->active].addr, DE4X5_MII);
2286 ana = mii_rd(MII_ANA, lp->phy[lp->active].addr, DE4X5_MII);
2287 if (!(anlpa & MII_ANLPA_RF) &&
2288 (cap = anlpa & MII_ANLPA_TAF & ana)) {
2289 if (cap & MII_ANA_100M) {
2290 de4x5_full_duplex = ((ana & anlpa & MII_ANA_FDAM & MII_ANA_100M) ? TRUE : FALSE);
2291 lp->media = _100Mb;
2292 } else if (cap & MII_ANA_10M) {
2293 de4x5_full_duplex = ((ana & anlpa & MII_ANA_FDAM & MII_ANA_10M) ? TRUE : FALSE);
2294
2295 lp->media = _10Mb;
2296 }
2297 }
2298 }
2299 next_tick = dc21140m_autoconf(dev);
2300 }
2301 break;
2302 }
2303 break;
2304
2305 case SPD_DET:
2306 if (lp->timeout < 0) {
2307 lp->tmp = (lp->phy[lp->active].id ? MII_SR_LKS :
2308 (~inl(DE4X5_GEP) & GEP_LNP));
2309 SET_100Mb_PDET;
2310 }
2311 if ((slnk = test_sym_link(dev, 6200)) < 0) {
2312 next_tick = slnk & ~TIMER_CB;
2313 } else {
2314 if (is_spd_100(dev) && is_100_up(dev)) {
2315 lp->media = _100Mb;
2316 } else if ((!is_spd_100(dev) && (is_10_up(dev) & lp->tmp))) {
2317 lp->media = _10Mb;
2318 } else {
2319 lp->media = NC;
2320 }
2321 next_tick = dc21140m_autoconf(dev);
2322 }
2323 break;
2324
2325 case _100Mb:
2326 next_tick = 3000;
2327 if (!lp->tx_enable) {
2328 SET_100Mb;
2329 de4x5_init_connection(dev);
2330 } else {
2331 if (!lp->linkOK && (lp->autosense == AUTO)) {
2332 if (!(is_spd_100(dev) && is_100_up(dev))) {
2333 lp->media = INIT;
2334 next_tick = DE4X5_AUTOSENSE_MS;
2335 }
2336 }
2337 }
2338 break;
2339
2340 case _10Mb:
2341 next_tick = 3000;
2342 if (!lp->tx_enable) {
2343 SET_10Mb;
2344 de4x5_init_connection(dev);
2345 } else {
2346 if (!lp->linkOK && (lp->autosense == AUTO)) {
2347 if (!(!is_spd_100(dev) && is_10_up(dev))) {
2348 lp->media = INIT;
2349 next_tick = DE4X5_AUTOSENSE_MS;
2350 }
2351 }
2352 }
2353 break;
2354
2355 case NC:
2356 if (lp->media != lp->c_media) {
2357 de4x5_dbg_media(dev);
2358 lp->c_media = lp->media;
2359 }
2360 lp->media = INIT;
2361 lp->tx_enable = FALSE;
2362 break;
2363 }
2364
2365 return next_tick;
2366 }
2367
2368 static void
2369 de4x5_init_connection(struct device *dev)
2370 {
2371 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2372 u_long iobase = dev->base_addr;
2373
2374 if (lp->media != lp->c_media) {
2375 de4x5_dbg_media(dev);
2376 lp->c_media = lp->media;
2377 }
2378 de4x5_restore_skbs(dev);
2379 cli();
2380 de4x5_rx(dev);
2381 de4x5_setup_intr(dev);
2382 lp->lostMedia = 0;
2383 lp->tx_enable = YES;
2384 sti();
2385 outl(POLL_DEMAND, DE4X5_TPD);
2386
2387 return;
2388 }
2389
2390 static int
2391 de4x5_reset_phy(struct device *dev)
2392 {
2393 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2394 u_long iobase = dev->base_addr;
2395 int next_tick = 0;
2396
2397 if (lp->phy[lp->active].id) {
2398 if (lp->timeout < 0) {
2399 outl(GEP_HRST, DE4X5_GEP);
2400 udelay(1000);
2401 outl(0x00, DE4X5_GEP);
2402 udelay(2000);
2403 mii_wr(MII_CR_RST, MII_CR, lp->phy[lp->active].addr, DE4X5_MII);
2404 }
2405 next_tick = test_mii_reg(dev, MII_CR, MII_CR_RST, FALSE, 500);
2406 }
2407
2408 return next_tick;
2409 }
2410
2411 static int
2412 test_media(struct device *dev, s32 irqs, s32 irq_mask, s32 csr13, s32 csr14, s32 csr15, s32 msec)
2413 {
2414 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2415 u_long iobase = dev->base_addr;
2416 s32 sts, csr12;
2417
2418 if (lp->timeout < 0) {
2419 lp->timeout = msec/100;
2420 reset_init_sia(dev, csr13, csr14, csr15);
2421
2422
2423 outl(irq_mask, DE4X5_IMR);
2424
2425
2426 sts = inl(DE4X5_STS);
2427 outl(sts, DE4X5_STS);
2428
2429
2430 if (lp->chipset == DC21041) {
2431 csr12 = inl(DE4X5_SISR);
2432 outl(csr12, DE4X5_SISR);
2433 }
2434 }
2435
2436 sts = inl(DE4X5_STS) & ~TIMER_CB;
2437
2438 if (!(sts & irqs) && --lp->timeout) {
2439 sts = 100 | TIMER_CB;
2440 } else {
2441 lp->timeout = -1;
2442 }
2443
2444 return sts;
2445 }
2446
2447 static int
2448 test_tp(struct device *dev, s32 msec)
2449 {
2450 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2451 u_long iobase = dev->base_addr;
2452 int sisr;
2453
2454 if (lp->timeout < 0) {
2455 lp->timeout = msec/100;
2456 }
2457
2458 sisr = (inl(DE4X5_SISR) & ~TIMER_CB) & (SISR_LKF | SISR_NCR);
2459
2460 if (sisr && --lp->timeout) {
2461 sisr = 100 | TIMER_CB;
2462 } else {
2463 lp->timeout = -1;
2464 }
2465
2466 return sisr;
2467 }
2468
2469 static int
2470 test_sym_link(struct device *dev, int msec)
2471 {
2472 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2473 int iobase = dev->base_addr;
2474 int gep = 0;
2475
2476 if (lp->timeout < 0) {
2477 lp->timeout = msec/100;
2478 }
2479
2480 if (lp->phy[lp->active].id) {
2481 gep = ((is_100_up(dev) && is_spd_100(dev)) ? GEP_SLNK : 0);
2482 } else {
2483 gep = (~inl(DE4X5_GEP) & (GEP_SLNK | GEP_LNP));
2484 }
2485 if (!(gep & GEP_SLNK) && --lp->timeout) {
2486 gep = 100 | TIMER_CB;
2487 } else {
2488 lp->timeout = -1;
2489 }
2490
2491 return gep;
2492 }
2493
2494
2495
2496
2497
2498 static int
2499 test_mii_reg(struct device *dev, int reg, int mask, int pol, long msec)
2500 {
2501 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2502 int test, iobase = dev->base_addr;
2503
2504 if (lp->timeout < 0) {
2505 lp->timeout = msec/100;
2506 }
2507
2508 if (pol) pol = ~0;
2509 reg = mii_rd((u_char)reg, lp->phy[lp->active].addr, DE4X5_MII) & mask;
2510 test = (reg ^ pol) & mask;
2511
2512 if (test && --lp->timeout) {
2513 reg = 100 | TIMER_CB;
2514 } else {
2515 lp->timeout = -1;
2516 }
2517
2518 return reg;
2519 }
2520
2521 static int
2522 is_spd_100(struct device *dev)
2523 {
2524 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2525 u_long iobase = dev->base_addr;
2526 int spd;
2527
2528 if (lp->phy[lp->active].id) {
2529 spd = mii_rd(lp->phy[lp->active].spd.reg, lp->phy[lp->active].addr, DE4X5_MII);
2530 spd = ~(spd ^ lp->phy[lp->active].spd.value);
2531 spd &= lp->phy[lp->active].spd.mask;
2532 } else {
2533 spd = ((~inl(DE4X5_GEP)) & GEP_SLNK);
2534 }
2535
2536 return spd;
2537 }
2538
2539 static int
2540 is_100_up(struct device *dev)
2541 {
2542 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2543 u_long iobase = dev->base_addr;
2544
2545 if (lp->phy[lp->active].id) {
2546
2547 mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII);
2548 return (mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII) & MII_SR_LKS);
2549 } else {
2550 return ((~inl(DE4X5_GEP)) & GEP_SLNK);
2551 }
2552 }
2553
2554 static int
2555 is_10_up(struct device *dev)
2556 {
2557 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2558 u_long iobase = dev->base_addr;
2559
2560 if (lp->phy[lp->active].id) {
2561
2562 mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII);
2563 return (mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII) & MII_SR_LKS);
2564 } else {
2565 return ((~inl(DE4X5_GEP)) & GEP_LNP);
2566 }
2567 }
2568
2569 static int
2570 is_anc_capable(struct device *dev)
2571 {
2572 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2573 u_long iobase = dev->base_addr;
2574
2575 if (lp->phy[lp->active].id) {
2576 return (mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII));
2577 } else {
2578 return 0;
2579 }
2580 }
2581
2582
2583
2584
2585
2586 static int
2587 ping_media(struct device *dev, int msec)
2588 {
2589 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2590 u_long iobase = dev->base_addr;
2591 int sisr;
2592
2593 if (lp->timeout < 0) {
2594 lp->timeout = msec/100;
2595
2596 lp->tmp = lp->tx_new;
2597 load_packet(dev, lp->frame, TD_LS | TD_FS | sizeof(lp->frame), NULL);
2598 lp->tx_new = (++lp->tx_new) % lp->txRingSize;
2599 outl(POLL_DEMAND, DE4X5_TPD);
2600 }
2601
2602 sisr = inl(DE4X5_SISR);
2603
2604 if ((!(sisr & SISR_NCR)) && (lp->tx_ring[lp->tmp].status < 0) && (--lp->timeout)) {
2605 sisr = 100 | TIMER_CB;
2606 } else {
2607 if ((!(sisr & SISR_NCR)) &&
2608 !(lp->tx_ring[lp->tmp].status & (T_OWN | TD_ES)) && lp->timeout) {
2609 sisr = 0;
2610 } else {
2611 sisr = 1;
2612 }
2613 lp->timeout = -1;
2614 }
2615
2616 return sisr;
2617 }
2618
2619
2620
2621
2622
2623
2624 static struct sk_buff *
2625 de4x5_alloc_rx_buff(struct device *dev, int index, int len)
2626 {
2627 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2628 struct sk_buff *p;
2629
2630 #ifndef __alpha__
2631 struct sk_buff *ret;
2632 u_long i=0, tmp;
2633
2634 p = dev_alloc_skb(IEEE802_3_SZ + ALIGN + 2);
2635 if (!p) return NULL;
2636
2637 p->dev = dev;
2638 tmp = virt_to_bus(p->data);
2639 i = ((tmp + ALIGN) & ~ALIGN) - tmp;
2640 skb_reserve(p, i);
2641 lp->rx_ring[index].buf = tmp + i;
2642
2643 ret = lp->rx_skb[index];
2644 lp->rx_skb[index] = p;
2645 skb_put(ret, len);
2646
2647 return ret;
2648
2649 #else
2650 if (lp->state != OPEN) return (struct sk_buff *)1;
2651
2652 p = dev_alloc_skb(len + 2);
2653 if (!p) return NULL;
2654
2655 p->dev = dev;
2656 skb_reserve(p, 2);
2657 if (index < lp->rx_old) {
2658 short tlen = (lp->rxRingSize - lp->rx_old) * RX_BUFF_SZ;
2659 memcpy(skb_put(p,tlen), bus_to_virt(lp->rx_ring[lp->rx_old].buf),tlen);
2660 memcpy(skb_put(p,len-tlen), bus_to_virt(lp->rx_ring[0].buf), len-tlen);
2661 } else {
2662 memcpy(skb_put(p,len), bus_to_virt(lp->rx_ring[lp->rx_old].buf),len);
2663 }
2664
2665 return p;
2666 #endif
2667 }
2668
2669 static void
2670 de4x5_free_rx_buffs(struct device *dev)
2671 {
2672 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2673 int i;
2674
2675 for (i=0; i<lp->rxRingSize; i++) {
2676 if (lp->rx_skb[i]) {
2677 dev_kfree_skb(lp->rx_skb[i], FREE_WRITE);
2678 }
2679 lp->rx_ring[i].status = 0;
2680 lp->rx_skb[i] = (struct sk_buff *)1;
2681 }
2682
2683 return;
2684 }
2685
2686 static void
2687 de4x5_free_tx_buffs(struct device *dev)
2688 {
2689 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2690 int i;
2691
2692 for (i=0; i<lp->txRingSize; i++) {
2693 if (lp->tx_skb[i]) {
2694 dev_kfree_skb(lp->tx_skb[i], FREE_WRITE);
2695 lp->tx_skb[i] = NULL;
2696 }
2697 lp->tx_ring[i].status = 0;
2698 }
2699
2700
2701 while (lp->cache.skb) {
2702 dev_kfree_skb(de4x5_get_cache(dev), FREE_WRITE);
2703 }
2704
2705 return;
2706 }
2707
2708
2709
2710
2711
2712
2713
2714
2715 static void
2716 de4x5_save_skbs(struct device *dev)
2717 {
2718 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2719 u_long iobase = dev->base_addr;
2720 s32 omr;
2721
2722 if (!lp->cache.save_cnt) {
2723 STOP_DE4X5;
2724 de4x5_tx(dev);
2725 de4x5_free_tx_buffs(dev);
2726 de4x5_cache_state(dev, DE4X5_SAVE_STATE);
2727 de4x5_sw_reset(dev);
2728 de4x5_cache_state(dev, DE4X5_RESTORE_STATE);
2729 dev->tbusy = 0;
2730 lp->cache.save_cnt++;
2731 START_DE4X5;
2732 }
2733
2734 return;
2735 }
2736
2737 static void
2738 de4x5_restore_skbs(struct device *dev)
2739 {
2740 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2741 u_long iobase = dev->base_addr;
2742 s32 omr;
2743
2744 if (lp->cache.save_cnt) {
2745 STOP_DE4X5;
2746 de4x5_cache_state(dev, DE4X5_SAVE_STATE);
2747 de4x5_sw_reset(dev);
2748 de4x5_cache_state(dev, DE4X5_RESTORE_STATE);
2749 dev->tbusy = 0;
2750 lp->cache.save_cnt--;
2751 START_DE4X5;
2752 }
2753
2754 return;
2755 }
2756
2757 static void
2758 de4x5_cache_state(struct device *dev, int flag)
2759 {
2760 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2761 u_long iobase = dev->base_addr;
2762 s32 gep;
2763
2764 switch(flag) {
2765 case DE4X5_SAVE_STATE:
2766 lp->cache.csr0 = inl(DE4X5_BMR);
2767 lp->cache.csr6 = (inl(DE4X5_OMR) & ~(OMR_ST | OMR_SR));
2768 lp->cache.csr7 = inl(DE4X5_IMR);
2769 if (lp->chipset != DC21140) {
2770 lp->cache.csr13 = inl(DE4X5_SICR);
2771 lp->cache.csr14 = inl(DE4X5_STRR);
2772 lp->cache.csr15 = inl(DE4X5_SIGR);
2773 }
2774 break;
2775
2776 case DE4X5_RESTORE_STATE:
2777 outl(lp->cache.csr0, DE4X5_BMR);
2778 outl(lp->cache.csr6, DE4X5_OMR);
2779 outl(lp->cache.csr7, DE4X5_IMR);
2780 if (lp->chipset == DC21140) {
2781 outl(GEP_INIT, DE4X5_GEP);
2782 gep = (lp->media == _100Mb ? GEP_MODE : 0);
2783 if (!lp->phy[lp->active].id && !de4x5_full_duplex) {
2784 gep |= GEP_FDXD;
2785 }
2786 outl(gep, DE4X5_GEP);
2787 } else {
2788 reset_init_sia(dev, lp->cache.csr13, lp->cache.csr14,
2789 lp->cache.csr15);
2790 }
2791 break;
2792 }
2793
2794 return;
2795 }
2796
2797 static void
2798 de4x5_put_cache(struct device *dev, struct sk_buff *skb)
2799 {
2800 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2801 struct sk_buff *p;
2802
2803 if (lp->cache.skb) {
2804 for (p=lp->cache.skb; p->next; p=p->next);
2805 p->next = skb;
2806 } else {
2807 lp->cache.skb = skb;
2808 }
2809 skb->next = NULL;
2810
2811 return;
2812 }
2813
2814 static void
2815 de4x5_putb_cache(struct device *dev, struct sk_buff *skb)
2816 {
2817 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2818 struct sk_buff *p = lp->cache.skb;
2819
2820 lp->cache.skb = skb;
2821 skb->next = p;
2822
2823 return;
2824 }
2825
2826 static struct sk_buff *
2827 de4x5_get_cache(struct device *dev)
2828 {
2829 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2830 struct sk_buff *p = lp->cache.skb;
2831
2832 if (p) {
2833 lp->cache.skb = p->next;
2834 p->next = NULL;
2835 }
2836
2837 return p;
2838 }
2839
2840
2841
2842
2843
2844 static int
2845 test_ans(struct device *dev, s32 irqs, s32 irq_mask, s32 msec)
2846 {
2847 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2848 u_long iobase = dev->base_addr;
2849 s32 sts, ans;
2850
2851 if (lp->timeout < 0) {
2852 lp->timeout = msec/100;
2853 outl(irq_mask, DE4X5_IMR);
2854
2855
2856 sts = inl(DE4X5_STS);
2857 outl(sts, DE4X5_STS);
2858 }
2859
2860 ans = inl(DE4X5_SISR) & SISR_ANS;
2861 sts = inl(DE4X5_STS) & ~TIMER_CB;
2862
2863 if (!(sts & irqs) && (ans ^ ANS_NWOK) && --lp->timeout) {
2864 sts = 100 | TIMER_CB;
2865 } else {
2866 lp->timeout = -1;
2867 }
2868
2869 return sts;
2870 }
2871
2872 static void
2873 de4x5_setup_intr(struct device *dev)
2874 {
2875 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2876 u_long iobase = dev->base_addr;
2877 s32 imr, sts;
2878
2879 if (inl(DE4X5_OMR) & OMR_SR) {
2880 imr = 0;
2881 UNMASK_IRQs;
2882 sts = inl(DE4X5_STS);
2883 outl(sts, DE4X5_STS);
2884 ENABLE_IRQs;
2885 }
2886
2887 return;
2888 }
2889
2890
2891
2892
2893 static void
2894 reset_init_sia(struct device *dev, s32 sicr, s32 strr, s32 sigr)
2895 {
2896 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
2897 u_long iobase = dev->base_addr;
2898
2899 RESET_SIA;
2900 outl(sigr, DE4X5_SIGR);
2901 outl(strr, DE4X5_STRR);
2902 outl(sicr, DE4X5_SICR);
2903
2904 return;
2905 }
2906
2907
2908
2909
2910 static void
2911 create_packet(struct device *dev, char *frame, int len)
2912 {
2913 int i;
2914 char *buf = frame;
2915
2916 for (i=0; i<ETH_ALEN; i++) {
2917 *buf++ = dev->dev_addr[i];
2918 }
2919 for (i=0; i<ETH_ALEN; i++) {
2920 *buf++ = dev->dev_addr[i];
2921 }
2922
2923 *buf++ = 0;
2924 *buf++ = 1;
2925
2926 return;
2927 }
2928
2929
2930
2931
2932 static void
2933 de4x5_us_delay(u32 usec)
2934 {
2935 udelay(usec);
2936
2937 return;
2938 }
2939
2940
2941
2942
2943 static void
2944 de4x5_ms_delay(u32 msec)
2945 {
2946 u_int i;
2947
2948 for (i=0; i<msec; i++) {
2949 de4x5_us_delay(1000);
2950 }
2951
2952 return;
2953 }
2954
2955
2956
2957
2958
2959 static int
2960 EISA_signature(char *name, s32 eisa_id)
2961 {
2962 c_char *signatures[] = DE4X5_SIGNATURE;
2963 char ManCode[DE4X5_STRLEN];
2964 union {
2965 s32 ID;
2966 char Id[4];
2967 } Eisa;
2968 int i, status = 0, siglen = sizeof(signatures)/sizeof(c_char *);
2969
2970 *name = '\0';
2971 Eisa.ID = inl(eisa_id);
2972
2973 ManCode[0]=(((Eisa.Id[0]>>2)&0x1f)+0x40);
2974 ManCode[1]=(((Eisa.Id[1]&0xe0)>>5)+((Eisa.Id[0]&0x03)<<3)+0x40);
2975 ManCode[2]=(((Eisa.Id[2]>>4)&0x0f)+0x30);
2976 ManCode[3]=((Eisa.Id[2]&0x0f)+0x30);
2977 ManCode[4]=(((Eisa.Id[3]>>4)&0x0f)+0x30);
2978 ManCode[5]='\0';
2979
2980 for (i=0;i<siglen;i++) {
2981 if (strstr(ManCode, signatures[i]) != NULL) {
2982 strcpy(name,ManCode);
2983 status = 1;
2984 break;
2985 }
2986 }
2987
2988 return status;
2989 }
2990
2991
2992
2993
2994 static int
2995 PCI_signature(char *name, struct bus_type *lp)
2996 {
2997 c_char *de4x5_signatures[] = DE4X5_SIGNATURE;
2998 int i, status = 0, siglen = sizeof(de4x5_signatures)/sizeof(c_char *);
2999
3000 if (lp->chipset == DC21040) {
3001 strcpy(name, "DE434/5");
3002 } else {
3003 int i = *((char *)&lp->srom + 19) * 3;
3004 if (lp->chipset == DC21041) {
3005 strncpy(name, (char *)&lp->srom + 26 + i, 8);
3006 } else if (lp->chipset == DC21140) {
3007 strncpy(name, (char *)&lp->srom + 26 + i, 8);
3008 }
3009 }
3010 name[8] = '\0';
3011 for (i=0; i<siglen; i++) {
3012 if (strstr(name,de4x5_signatures[i])!=NULL) break;
3013 }
3014 if (i == siglen) {
3015 if (dec_only) {
3016 *name = '\0';
3017 } else {
3018 strcpy(name, (((lp->chipset == DC21040) ? "DC21040" :
3019 ((lp->chipset == DC21041) ? "DC21041" :
3020 ((lp->chipset == DC21140) ? "DC21140" : "UNKNOWN"
3021 )))));
3022 }
3023 }
3024
3025 return status;
3026 }
3027
3028
3029
3030
3031
3032 static void
3033 DevicePresent(u_long aprom_addr)
3034 {
3035 int i;
3036 struct bus_type *lp = &bus;
3037
3038 if (lp->chipset == DC21040) {
3039 outl(0, aprom_addr);
3040 } else {
3041 short *p = (short *)&lp->srom;
3042 for (i=0; i<(sizeof(struct de4x5_srom)>>1); i++) {
3043 *p++ = srom_rd(aprom_addr, i);
3044 }
3045 de4x5_dbg_srom((struct de4x5_srom *)&lp->srom);
3046 }
3047
3048 return;
3049 }
3050
3051 static int
3052 get_hw_addr(struct device *dev)
3053 {
3054 u_long iobase = dev->base_addr;
3055 int broken, i, k, tmp, status = 0;
3056 u_short j,chksum;
3057 struct bus_type *lp = &bus;
3058
3059 broken = de4x5_bad_srom(lp);
3060 for (i=0,k=0,j=0;j<3;j++) {
3061 k <<= 1;
3062 if (k > 0xffff) k-=0xffff;
3063
3064 if (lp->bus == PCI) {
3065 if (lp->chipset == DC21040) {
3066 while ((tmp = inl(DE4X5_APROM)) < 0);
3067 k += (u_char) tmp;
3068 dev->dev_addr[i++] = (u_char) tmp;
3069 while ((tmp = inl(DE4X5_APROM)) < 0);
3070 k += (u_short) (tmp << 8);
3071 dev->dev_addr[i++] = (u_char) tmp;
3072 } else if (!broken) {
3073 dev->dev_addr[i] = (u_char) lp->srom.ieee_addr[i]; i++;
3074 dev->dev_addr[i] = (u_char) lp->srom.ieee_addr[i]; i++;
3075 } else if (broken == SMC) {
3076 dev->dev_addr[i] = *((u_char *)&lp->srom + i); i++;
3077 dev->dev_addr[i] = *((u_char *)&lp->srom + i); i++;
3078 }
3079 } else {
3080 k += (u_char) (tmp = inb(EISA_APROM));
3081 dev->dev_addr[i++] = (u_char) tmp;
3082 k += (u_short) ((tmp = inb(EISA_APROM)) << 8);
3083 dev->dev_addr[i++] = (u_char) tmp;
3084 }
3085
3086 if (k > 0xffff) k-=0xffff;
3087 }
3088 if (k == 0xffff) k=0;
3089
3090 if (lp->bus == PCI) {
3091 if (lp->chipset == DC21040) {
3092 while ((tmp = inl(DE4X5_APROM)) < 0);
3093 chksum = (u_char) tmp;
3094 while ((tmp = inl(DE4X5_APROM)) < 0);
3095 chksum |= (u_short) (tmp << 8);
3096 if ((k != chksum) && (dec_only)) status = -1;
3097 }
3098 } else {
3099 chksum = (u_char) inb(EISA_APROM);
3100 chksum |= (u_short) (inb(EISA_APROM) << 8);
3101 if ((k != chksum) && (dec_only)) status = -1;
3102 }
3103
3104 return status;
3105 }
3106
3107
3108
3109
3110
3111 static int
3112 de4x5_bad_srom(struct bus_type *lp)
3113 {
3114 int i, status = 0;
3115
3116 for (i=0; i<sizeof(enet_det)/ETH_ALEN; i++) {
3117 if (!de4x5_strncmp((char *)&lp->srom, (char *)&enet_det[i], 3) &&
3118 !de4x5_strncmp((char *)&lp->srom+0x10, (char *)&enet_det[i], 3)) {
3119 status = SMC;
3120 break;
3121 }
3122 }
3123
3124 return status;
3125 }
3126
3127 static int
3128 de4x5_strncmp(char *a, char *b, int n)
3129 {
3130 int ret=0;
3131
3132 for (;n && !ret;n--) {
3133 ret = *a++ - *b++;
3134 }
3135
3136 return ret;
3137 }
3138
3139
3140
3141
3142 static short
3143 srom_rd(u_long addr, u_char offset)
3144 {
3145 sendto_srom(SROM_RD | SROM_SR, addr);
3146
3147 srom_latch(SROM_RD | SROM_SR | DT_CS, addr);
3148 srom_command(SROM_RD | SROM_SR | DT_IN | DT_CS, addr);
3149 srom_address(SROM_RD | SROM_SR | DT_CS, addr, offset);
3150
3151 return srom_data(SROM_RD | SROM_SR | DT_CS, addr);
3152 }
3153
3154 static void
3155 srom_latch(u_int command, u_long addr)
3156 {
3157 sendto_srom(command, addr);
3158 sendto_srom(command | DT_CLK, addr);
3159 sendto_srom(command, addr);
3160
3161 return;
3162 }
3163
3164 static void
3165 srom_command(u_int command, u_long addr)
3166 {
3167 srom_latch(command, addr);
3168 srom_latch(command, addr);
3169 srom_latch((command & 0x0000ff00) | DT_CS, addr);
3170
3171 return;
3172 }
3173
3174 static void
3175 srom_address(u_int command, u_long addr, u_char offset)
3176 {
3177 int i;
3178 char a;
3179
3180 a = (char)(offset << 2);
3181 for (i=0; i<6; i++, a <<= 1) {
3182 srom_latch(command | ((a < 0) ? DT_IN : 0), addr);
3183 }
3184 de4x5_us_delay(1);
3185
3186 i = (getfrom_srom(addr) >> 3) & 0x01;
3187 if (i != 0) {
3188 printk("Bad SROM address phase.....\n");
3189 }
3190
3191 return;
3192 }
3193
3194 static short
3195 srom_data(u_int command, u_long addr)
3196 {
3197 int i;
3198 short word = 0;
3199 s32 tmp;
3200
3201 for (i=0; i<16; i++) {
3202 sendto_srom(command | DT_CLK, addr);
3203 tmp = getfrom_srom(addr);
3204 sendto_srom(command, addr);
3205
3206 word = (word << 1) | ((tmp >> 3) & 0x01);
3207 }
3208
3209 sendto_srom(command & 0x0000ff00, addr);
3210
3211 return word;
3212 }
3213
3214
3215
3216
3217
3218
3219
3220
3221
3222
3223
3224
3225
3226
3227
3228
3229
3230 static void
3231 sendto_srom(u_int command, u_long addr)
3232 {
3233 outl(command, addr);
3234 udelay(1);
3235
3236 return;
3237 }
3238
3239 static int
3240 getfrom_srom(u_long addr)
3241 {
3242 s32 tmp;
3243
3244 tmp = inl(addr);
3245 udelay(1);
3246
3247 return tmp;
3248 }
3249
3250
3251
3252
3253
3254 static int
3255 mii_rd(u_char phyreg, u_char phyaddr, u_long ioaddr)
3256 {
3257 mii_wdata(MII_PREAMBLE, 2, ioaddr);
3258 mii_wdata(MII_PREAMBLE, 32, ioaddr);
3259 mii_wdata(MII_STRD, 4, ioaddr);
3260 mii_address(phyaddr, ioaddr);
3261 mii_address(phyreg, ioaddr);
3262 mii_ta(MII_STRD, ioaddr);
3263
3264 return mii_rdata(ioaddr);
3265 }
3266
3267 static void
3268 mii_wr(int data, u_char phyreg, u_char phyaddr, u_long ioaddr)
3269 {
3270 mii_wdata(MII_PREAMBLE, 2, ioaddr);
3271 mii_wdata(MII_PREAMBLE, 32, ioaddr);
3272 mii_wdata(MII_STWR, 4, ioaddr);
3273 mii_address(phyaddr, ioaddr);
3274 mii_address(phyreg, ioaddr);
3275 mii_ta(MII_STWR, ioaddr);
3276 data = mii_swap(data, 16);
3277 mii_wdata(data, 16, ioaddr);
3278
3279 return;
3280 }
3281
3282 static int
3283 mii_rdata(u_long ioaddr)
3284 {
3285 int i;
3286 s32 tmp = 0;
3287
3288 for (i=0; i<16; i++) {
3289 tmp <<= 1;
3290 tmp |= getfrom_mii(MII_MRD | MII_RD, ioaddr);
3291 }
3292
3293 return tmp;
3294 }
3295
3296 static void
3297 mii_wdata(int data, int len, u_long ioaddr)
3298 {
3299 int i;
3300
3301 for (i=0; i<len; i++) {
3302 sendto_mii(MII_MWR | MII_WR, data, ioaddr);
3303 data >>= 1;
3304 }
3305
3306 return;
3307 }
3308
3309 static void
3310 mii_address(u_char addr, u_long ioaddr)
3311 {
3312 int i;
3313
3314 addr = mii_swap(addr, 5);
3315 for (i=0; i<5; i++) {
3316 sendto_mii(MII_MWR | MII_WR, addr, ioaddr);
3317 addr >>= 1;
3318 }
3319
3320 return;
3321 }
3322
3323 static void
3324 mii_ta(u_long rw, u_long ioaddr)
3325 {
3326 if (rw == MII_STWR) {
3327 sendto_mii(MII_MWR | MII_WR, 1, ioaddr);
3328 sendto_mii(MII_MWR | MII_WR, 0, ioaddr);
3329 } else {
3330 getfrom_mii(MII_MRD | MII_RD, ioaddr);
3331 }
3332
3333 return;
3334 }
3335
3336 static int
3337 mii_swap(int data, int len)
3338 {
3339 int i, tmp = 0;
3340
3341 for (i=0; i<len; i++) {
3342 tmp <<= 1;
3343 tmp |= (data & 1);
3344 data >>= 1;
3345 }
3346
3347 return tmp;
3348 }
3349
3350 static void
3351 sendto_mii(u32 command, int data, u_long ioaddr)
3352 {
3353 u32 j;
3354
3355 j = (data & 1) << 17;
3356 outl(command | j, ioaddr);
3357 udelay(1);
3358 outl(command | MII_MDC | j, ioaddr);
3359 udelay(1);
3360
3361 return;
3362 }
3363
3364 static int
3365 getfrom_mii(u32 command, u_long ioaddr)
3366 {
3367 outl(command, ioaddr);
3368 udelay(1);
3369 outl(command | MII_MDC, ioaddr);
3370 udelay(1);
3371
3372 return ((inl(ioaddr) >> 19) & 1);
3373 }
3374
3375
3376
3377
3378
3379 static int
3380 mii_get_oui(u_char phyaddr, u_long ioaddr)
3381 {
3382
3383
3384
3385
3386
3387
3388 int r2, r3;
3389
3390
3391 r2 = mii_rd(MII_ID0, phyaddr, ioaddr);
3392 r3 = mii_rd(MII_ID1, phyaddr, ioaddr);
3393
3394
3395
3396
3397
3398
3399
3400
3401
3402
3403
3404
3405
3406
3407
3408
3409
3410
3411
3412
3413
3414
3415
3416
3417
3418
3419
3420 return r2;
3421 }
3422
3423 static int
3424 mii_get_phy(struct device *dev)
3425 {
3426 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
3427 int iobase = dev->base_addr;
3428 int i, j, k, limit=sizeof(phy_info)/sizeof(struct phy_table);
3429 int id;
3430
3431
3432 outl(GEP_HRST, DE4X5_GEP);
3433 udelay(1000);
3434 outl(0x00, DE4X5_GEP);
3435 udelay(2000);
3436
3437
3438 lp->active = 0;
3439 for (lp->mii_cnt=0, i=1; i<DE4X5_MAX_MII; i++) {
3440 id = mii_get_oui(i, DE4X5_MII);
3441 if ((id == 0) || (id == -1)) continue;
3442 for (j=0; j<limit; j++) {
3443 if (id != phy_info[j].id) continue;
3444 for (k=0; lp->phy[k].id && (k < DE4X5_MAX_PHY); k++);
3445 if (k < DE4X5_MAX_PHY) {
3446 memcpy((char *)&lp->phy[k],
3447 (char *)&phy_info[j], sizeof(struct phy_table));
3448 lp->phy[k].addr = i;
3449 lp->mii_cnt++;
3450 } else {
3451 i = DE4X5_MAX_MII;
3452 j = limit;
3453 }
3454 }
3455 }
3456 if (lp->phy[lp->active].id) {
3457 for (k=0; lp->phy[k].id && (k < DE4X5_MAX_PHY); k++) {
3458 mii_wr(MII_CR_RST, MII_CR, lp->phy[k].addr, DE4X5_MII);
3459 while (mii_rd(MII_CR, lp->phy[k].addr, DE4X5_MII) & MII_CR_RST);
3460
3461 de4x5_dbg_mii(dev, k);
3462 }
3463 }
3464
3465 return lp->mii_cnt;
3466 }
3467
3468 static char *
3469 build_setup_frame(struct device *dev, int mode)
3470 {
3471 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
3472 int i;
3473 char *pa = lp->setup_frame;
3474
3475
3476 if (mode == ALL) {
3477 memset(lp->setup_frame, 0, SETUP_FRAME_LEN);
3478 }
3479
3480 if (lp->setup_f == HASH_PERF) {
3481 for (pa=lp->setup_frame+IMPERF_PA_OFFSET, i=0; i<ETH_ALEN; i++) {
3482 *(pa + i) = dev->dev_addr[i];
3483 if (i & 0x01) pa += 2;
3484 }
3485 *(lp->setup_frame + (HASH_TABLE_LEN >> 3) - 3) = 0x80;
3486 } else {
3487 for (i=0; i<ETH_ALEN; i++) {
3488 *(pa + (i&1)) = dev->dev_addr[i];
3489 if (i & 0x01) pa += 4;
3490 }
3491 for (i=0; i<ETH_ALEN; i++) {
3492 *(pa + (i&1)) = (char) 0xff;
3493 if (i & 0x01) pa += 4;
3494 }
3495 }
3496
3497 return pa;
3498 }
3499
3500 static void
3501 enable_ast(struct device *dev, u32 time_out)
3502 {
3503 timeout(dev, (void *)&de4x5_ast, (u_long)dev, time_out);
3504
3505 return;
3506 }
3507
3508 static void
3509 disable_ast(struct device *dev)
3510 {
3511 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
3512
3513 del_timer(&lp->timer);
3514
3515 return;
3516 }
3517
3518 static long
3519 de4x5_switch_to_mii(struct device *dev)
3520 {
3521 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
3522 int iobase = dev->base_addr;
3523 long omr;
3524
3525
3526 omr = (inl(DE4X5_OMR) & ~(OMR_PS | OMR_HBD | OMR_TTM | OMR_PCS | OMR_SCR));
3527 omr |= (OMR_PS | OMR_HBD);
3528 outl(omr, DE4X5_OMR);
3529
3530
3531 RESET_DE4X5;
3532
3533
3534 if (lp->chipset == DC21140) {
3535 outl(GEP_INIT, DE4X5_GEP);
3536 outl(0, DE4X5_GEP);
3537 }
3538
3539
3540 outl(omr, DE4X5_OMR);
3541
3542 return omr;
3543 }
3544
3545 static long
3546 de4x5_switch_to_srl(struct device *dev)
3547 {
3548 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
3549 int iobase = dev->base_addr;
3550 long omr;
3551
3552
3553 omr = (inl(DE4X5_OMR) & ~(OMR_PS | OMR_HBD | OMR_TTM | OMR_PCS | OMR_SCR));
3554 outl(omr, DE4X5_OMR);
3555
3556
3557 RESET_DE4X5;
3558
3559
3560 if (lp->chipset == DC21140) {
3561 outl(GEP_INIT, DE4X5_GEP);
3562 outl(0, DE4X5_GEP);
3563 }
3564
3565
3566 outl(omr, DE4X5_OMR);
3567
3568 return omr;
3569 }
3570
3571 static void
3572 timeout(struct device *dev, void (*fn)(u_long data), u_long data, u_long msec)
3573 {
3574 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
3575 int dt;
3576
3577
3578 del_timer(&lp->timer);
3579
3580
3581 dt = (msec * HZ) / 1000;
3582 if (dt==0) dt=1;
3583
3584
3585 lp->timer.expires = jiffies + dt;
3586 lp->timer.function = fn;
3587 lp->timer.data = data;
3588 add_timer(&lp->timer);
3589
3590 return;
3591 }
3592
3593 static void
3594 de4x5_dbg_open(struct device *dev)
3595 {
3596 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
3597 int i;
3598
3599 if (de4x5_debug > 1) {
3600 printk("%s: de4x5 opening with irq %d\n",dev->name,dev->irq);
3601 printk("\tphysical address: ");
3602 for (i=0;i<6;i++) {
3603 printk("%2.2x:",(short)dev->dev_addr[i]);
3604 }
3605 printk("\n");
3606 printk("Descriptor head addresses:\n");
3607 printk("\t0x%8.8lx 0x%8.8lx\n",(u_long)lp->rx_ring,(u_long)lp->tx_ring);
3608 printk("Descriptor addresses:\nRX: ");
3609 for (i=0;i<lp->rxRingSize-1;i++){
3610 if (i < 3) {
3611 printk("0x%8.8lx ",(u_long)&lp->rx_ring[i].status);
3612 }
3613 }
3614 printk("...0x%8.8lx\n",(u_long)&lp->rx_ring[i].status);
3615 printk("TX: ");
3616 for (i=0;i<lp->txRingSize-1;i++){
3617 if (i < 3) {
3618 printk("0x%8.8lx ", (u_long)&lp->tx_ring[i].status);
3619 }
3620 }
3621 printk("...0x%8.8lx\n", (u_long)&lp->tx_ring[i].status);
3622 printk("Descriptor buffers:\nRX: ");
3623 for (i=0;i<lp->rxRingSize-1;i++){
3624 if (i < 3) {
3625 printk("0x%8.8x ",lp->rx_ring[i].buf);
3626 }
3627 }
3628 printk("...0x%8.8x\n",lp->rx_ring[i].buf);
3629 printk("TX: ");
3630 for (i=0;i<lp->txRingSize-1;i++){
3631 if (i < 3) {
3632 printk("0x%8.8x ", lp->tx_ring[i].buf);
3633 }
3634 }
3635 printk("...0x%8.8x\n", lp->tx_ring[i].buf);
3636 printk("Ring size: \nRX: %d\nTX: %d\n",
3637 (short)lp->rxRingSize,
3638 (short)lp->txRingSize);
3639 }
3640
3641 return;
3642 }
3643
3644 static void
3645 de4x5_dbg_mii(struct device *dev, int k)
3646 {
3647 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
3648 int iobase = dev->base_addr;
3649
3650 if (de4x5_debug > 2) {
3651 printk("\nMII CR: %x\n",mii_rd(MII_CR,lp->phy[k].addr,DE4X5_MII));
3652 printk("MII SR: %x\n",mii_rd(MII_SR,lp->phy[k].addr,DE4X5_MII));
3653 printk("MII ID0: %x\n",mii_rd(MII_ID0,lp->phy[k].addr,DE4X5_MII));
3654 printk("MII ID1: %x\n",mii_rd(MII_ID1,lp->phy[k].addr,DE4X5_MII));
3655 if (lp->phy[k].id != BROADCOM_T4) {
3656 printk("MII ANA: %x\n",mii_rd(0x04,lp->phy[k].addr,DE4X5_MII));
3657 printk("MII ANC: %x\n",mii_rd(0x05,lp->phy[k].addr,DE4X5_MII));
3658 }
3659 printk("MII 16: %x\n",mii_rd(0x10,lp->phy[k].addr,DE4X5_MII));
3660 if (lp->phy[k].id != BROADCOM_T4) {
3661 printk("MII 17: %x\n",mii_rd(0x11,lp->phy[k].addr,DE4X5_MII));
3662 printk("MII 18: %x\n",mii_rd(0x12,lp->phy[k].addr,DE4X5_MII));
3663 } else {
3664 printk("MII 20: %x\n",mii_rd(0x14,lp->phy[k].addr,DE4X5_MII));
3665 }
3666 }
3667
3668 return;
3669 }
3670
3671 static void
3672 de4x5_dbg_media(struct device *dev)
3673 {
3674 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
3675
3676 if (lp->media != lp->c_media) {
3677 if (de4x5_debug > 0) {
3678 if (lp->chipset != DC21140) {
3679 printk("%s: media is %s\n", dev->name,
3680 (lp->media == NC ? "unconnected!" :
3681 (lp->media == TP ? "TP." :
3682 (lp->media == ANS ? "TP/Nway." :
3683 (lp->media == BNC ? "BNC." :
3684 (lp->media == BNC_AUI ? "BNC/AUI." :
3685 (lp->media == EXT_SIA ? "EXT SIA." :
3686 "???."
3687 )))))));
3688 } else {
3689 printk("%s: mode is %s\n", dev->name,
3690 (lp->media == NC ? "link down or incompatible connection.":
3691 (lp->media == _100Mb ? "100Mb/s." :
3692 (lp->media == _10Mb ? "10Mb/s." :
3693 "\?\?\?"
3694 ))));
3695 }
3696 }
3697 lp->c_media = lp->media;
3698 }
3699
3700 return;
3701 }
3702
3703 static void
3704 de4x5_dbg_srom(struct de4x5_srom *p)
3705 {
3706 int i;
3707
3708 if (de4x5_debug > 1) {
3709 printk("Sub-system Vendor ID: %04x\n", (u_short)*(p->sub_vendor_id));
3710 printk("Sub-system ID: %04x\n", (u_short)*(p->sub_system_id));
3711 printk("ID Block CRC: %02x\n", (u_char)(p->id_block_crc));
3712
3713 printk("Hardware Address: ");
3714 for (i=0;i<ETH_ALEN-1;i++) {
3715 printk("%02x:", (u_char)*(p->ieee_addr+i));
3716 }
3717 printk("%02x\n", (u_char)*(p->ieee_addr+i));
3718 printk("CRC checksum: %04x\n", (u_short)(p->chksum));
3719 for (i=0; i<64; i++) {
3720 printk("%3d %04x\n", i<<1, (u_short)*((u_short *)p+i));
3721 }
3722 }
3723
3724 return;
3725 }
3726
3727 static void
3728 de4x5_dbg_rx(struct sk_buff *skb, int len)
3729 {
3730 int i, j;
3731
3732 if (de4x5_debug > 2) {
3733 printk("R: %02x:%02x:%02x:%02x:%02x:%02x <- %02x:%02x:%02x:%02x:%02x:%02x len/SAP:%02x%02x [%d]\n",
3734 (u_char)skb->data[0],
3735 (u_char)skb->data[1],
3736 (u_char)skb->data[2],
3737 (u_char)skb->data[3],
3738 (u_char)skb->data[4],
3739 (u_char)skb->data[5],
3740 (u_char)skb->data[6],
3741 (u_char)skb->data[7],
3742 (u_char)skb->data[8],
3743 (u_char)skb->data[9],
3744 (u_char)skb->data[10],
3745 (u_char)skb->data[11],
3746 (u_char)skb->data[12],
3747 (u_char)skb->data[13],
3748 len);
3749 if (de4x5_debug > 3) {
3750 for (j=0; len>0;j+=16, len-=16) {
3751 printk(" %03x: ",j);
3752 for (i=0; i<16 && i<len; i++) {
3753 printk("%02x ",(u_char)skb->data[i+j]);
3754 }
3755 printk("\n");
3756 }
3757 }
3758 }
3759
3760 return;
3761 }
3762
3763
3764
3765
3766
3767 static int
3768 de4x5_ioctl(struct device *dev, struct ifreq *rq, int cmd)
3769 {
3770 struct de4x5_private *lp = (struct de4x5_private *)dev->priv;
3771 struct de4x5_ioctl *ioc = (struct de4x5_ioctl *) &rq->ifr_data;
3772 u_long iobase = dev->base_addr;
3773 int i, j, status = 0;
3774 s32 omr;
3775 union {
3776 u8 addr[(HASH_TABLE_LEN * ETH_ALEN)];
3777 u16 sval[(HASH_TABLE_LEN * ETH_ALEN) >> 1];
3778 u32 lval[(HASH_TABLE_LEN * ETH_ALEN) >> 2];
3779 } tmp;
3780
3781 switch(ioc->cmd) {
3782 case DE4X5_GET_HWADDR:
3783 ioc->len = ETH_ALEN;
3784 status = verify_area(VERIFY_WRITE, (void *)ioc->data, ioc->len);
3785 if (status)
3786 break;
3787 for (i=0; i<ETH_ALEN; i++) {
3788 tmp.addr[i] = dev->dev_addr[i];
3789 }
3790 memcpy_tofs(ioc->data, tmp.addr, ioc->len);
3791
3792 break;
3793 case DE4X5_SET_HWADDR:
3794 status = verify_area(VERIFY_READ, (void *)ioc->data, ETH_ALEN);
3795 if (status)
3796 break;
3797 status = -EPERM;
3798 if (!suser())
3799 break;
3800 status = 0;
3801 memcpy_fromfs(tmp.addr, ioc->data, ETH_ALEN);
3802 for (i=0; i<ETH_ALEN; i++) {
3803 dev->dev_addr[i] = tmp.addr[i];
3804 }
3805 build_setup_frame(dev, PHYS_ADDR_ONLY);
3806
3807 while (set_bit(0, (void *)&dev->tbusy) != 0);
3808 load_packet(dev, lp->setup_frame, TD_IC | PERFECT_F | TD_SET |
3809 SETUP_FRAME_LEN, NULL);
3810 lp->tx_new = (++lp->tx_new) % lp->txRingSize;
3811 outl(POLL_DEMAND, DE4X5_TPD);
3812 dev->tbusy = 0;
3813
3814 break;
3815 case DE4X5_SET_PROM:
3816 if (suser()) {
3817 omr = inl(DE4X5_OMR);
3818 omr |= OMR_PR;
3819 outl(omr, DE4X5_OMR);
3820 } else {
3821 status = -EPERM;
3822 }
3823
3824 break;
3825 case DE4X5_CLR_PROM:
3826 if (suser()) {
3827 omr = inl(DE4X5_OMR);
3828 omr &= ~OMR_PR;
3829 outb(omr, DE4X5_OMR);
3830 } else {
3831 status = -EPERM;
3832 }
3833
3834 break;
3835 case DE4X5_SAY_BOO:
3836 printk("%s: Boo!\n", dev->name);
3837
3838 break;
3839 case DE4X5_GET_MCA:
3840 ioc->len = (HASH_TABLE_LEN >> 3);
3841 status = verify_area(VERIFY_WRITE, ioc->data, ioc->len);
3842 if (!status) {
3843 memcpy_tofs(ioc->data, lp->setup_frame, ioc->len);
3844 }
3845
3846 break;
3847 case DE4X5_SET_MCA:
3848 if (suser()) {
3849
3850 if (ioc->len != HASH_TABLE_LEN) {
3851 if (!(status = verify_area(VERIFY_READ, (void *)ioc->data, ETH_ALEN * ioc->len))) {
3852 memcpy_fromfs(tmp.addr, ioc->data, ETH_ALEN * ioc->len);
3853 set_multicast_list(dev);
3854 }
3855 } else {
3856 set_multicast_list(dev);
3857 }
3858 } else {
3859 status = -EPERM;
3860 }
3861
3862 break;
3863 case DE4X5_CLR_MCA:
3864 if (suser()) {
3865
3866 set_multicast_list(dev);
3867 } else {
3868 status = -EPERM;
3869 }
3870
3871 break;
3872 case DE4X5_MCA_EN:
3873 if (suser()) {
3874 omr = inl(DE4X5_OMR);
3875 omr |= OMR_PM;
3876 outl(omr, DE4X5_OMR);
3877 } else {
3878 status = -EPERM;
3879 }
3880
3881 break;
3882 case DE4X5_GET_STATS:
3883 ioc->len = sizeof(lp->pktStats);
3884 status = verify_area(VERIFY_WRITE, (void *)ioc->data, ioc->len);
3885 if (status)
3886 break;
3887
3888 cli();
3889 memcpy_tofs(ioc->data, &lp->pktStats, ioc->len);
3890 sti();
3891
3892 break;
3893 case DE4X5_CLR_STATS:
3894 if (suser()) {
3895 cli();
3896 memset(&lp->pktStats, 0, sizeof(lp->pktStats));
3897 sti();
3898 } else {
3899 status = -EPERM;
3900 }
3901
3902 break;
3903 case DE4X5_GET_OMR:
3904 tmp.addr[0] = inl(DE4X5_OMR);
3905 if (!(status = verify_area(VERIFY_WRITE, (void *)ioc->data, 1))) {
3906 memcpy_tofs(ioc->data, tmp.addr, 1);
3907 }
3908
3909 break;
3910 case DE4X5_SET_OMR:
3911 if (suser()) {
3912 if (!(status = verify_area(VERIFY_READ, (void *)ioc->data, 1))) {
3913 memcpy_fromfs(tmp.addr, ioc->data, 1);
3914 outl(tmp.addr[0], DE4X5_OMR);
3915 }
3916 } else {
3917 status = -EPERM;
3918 }
3919
3920 break;
3921 case DE4X5_GET_REG:
3922 j = 0;
3923 tmp.lval[0] = inl(DE4X5_STS); j+=4;
3924 tmp.lval[1] = inl(DE4X5_BMR); j+=4;
3925 tmp.lval[2] = inl(DE4X5_IMR); j+=4;
3926 tmp.lval[3] = inl(DE4X5_OMR); j+=4;
3927 tmp.lval[4] = inl(DE4X5_SISR); j+=4;
3928 tmp.lval[5] = inl(DE4X5_SICR); j+=4;
3929 tmp.lval[6] = inl(DE4X5_STRR); j+=4;
3930 tmp.lval[7] = inl(DE4X5_SIGR); j+=4;
3931 ioc->len = j;
3932 if (!(status = verify_area(VERIFY_WRITE, (void *)ioc->data, ioc->len))) {
3933 memcpy_tofs(ioc->data, tmp.addr, ioc->len);
3934 }
3935 break;
3936
3937 #define DE4X5_DUMP 0x0f
3938
3939 case DE4X5_DUMP:
3940 j = 0;
3941 tmp.addr[j++] = dev->irq;
3942 for (i=0; i<ETH_ALEN; i++) {
3943 tmp.addr[j++] = dev->dev_addr[i];
3944 }
3945 tmp.addr[j++] = lp->rxRingSize;
3946 tmp.lval[j>>2] = (long)lp->rx_ring; j+=4;
3947 tmp.lval[j>>2] = (long)lp->tx_ring; j+=4;
3948
3949 for (i=0;i<lp->rxRingSize-1;i++){
3950 if (i < 3) {
3951 tmp.lval[j>>2] = (long)&lp->rx_ring[i].status; j+=4;
3952 }
3953 }
3954 tmp.lval[j>>2] = (long)&lp->rx_ring[i].status; j+=4;
3955 for (i=0;i<lp->txRingSize-1;i++){
3956 if (i < 3) {
3957 tmp.lval[j>>2] = (long)&lp->tx_ring[i].status; j+=4;
3958 }
3959 }
3960 tmp.lval[j>>2] = (long)&lp->tx_ring[i].status; j+=4;
3961
3962 for (i=0;i<lp->rxRingSize-1;i++){
3963 if (i < 3) {
3964 tmp.lval[j>>2] = (s32)lp->rx_ring[i].buf; j+=4;
3965 }
3966 }
3967 tmp.lval[j>>2] = (s32)lp->rx_ring[i].buf; j+=4;
3968 for (i=0;i<lp->txRingSize-1;i++){
3969 if (i < 3) {
3970 tmp.lval[j>>2] = (s32)lp->tx_ring[i].buf; j+=4;
3971 }
3972 }
3973 tmp.lval[j>>2] = (s32)lp->tx_ring[i].buf; j+=4;
3974
3975 for (i=0;i<lp->rxRingSize;i++){
3976 tmp.lval[j>>2] = lp->rx_ring[i].status; j+=4;
3977 }
3978 for (i=0;i<lp->txRingSize;i++){
3979 tmp.lval[j>>2] = lp->tx_ring[i].status; j+=4;
3980 }
3981
3982 tmp.lval[j>>2] = inl(DE4X5_BMR); j+=4;
3983 tmp.lval[j>>2] = inl(DE4X5_TPD); j+=4;
3984 tmp.lval[j>>2] = inl(DE4X5_RPD); j+=4;
3985 tmp.lval[j>>2] = inl(DE4X5_RRBA); j+=4;
3986 tmp.lval[j>>2] = inl(DE4X5_TRBA); j+=4;
3987 tmp.lval[j>>2] = inl(DE4X5_STS); j+=4;
3988 tmp.lval[j>>2] = inl(DE4X5_OMR); j+=4;
3989 tmp.lval[j>>2] = inl(DE4X5_IMR); j+=4;
3990 tmp.lval[j>>2] = lp->chipset; j+=4;
3991 if (lp->chipset == DC21140) {
3992 tmp.lval[j>>2] = inl(DE4X5_GEP); j+=4;
3993 } else {
3994 tmp.lval[j>>2] = inl(DE4X5_SISR); j+=4;
3995 tmp.lval[j>>2] = inl(DE4X5_SICR); j+=4;
3996 tmp.lval[j>>2] = inl(DE4X5_STRR); j+=4;
3997 tmp.lval[j>>2] = inl(DE4X5_SIGR); j+=4;
3998 }
3999 tmp.lval[j>>2] = lp->phy[lp->active].id; j+=4;
4000 if (lp->phy[lp->active].id) {
4001 tmp.lval[j>>2] = lp->active; j+=4;
4002 tmp.lval[j>>2]=mii_rd(MII_CR,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
4003 tmp.lval[j>>2]=mii_rd(MII_SR,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
4004 tmp.lval[j>>2]=mii_rd(MII_ID0,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
4005 tmp.lval[j>>2]=mii_rd(MII_ID1,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
4006 if (lp->phy[lp->active].id != BROADCOM_T4) {
4007 tmp.lval[j>>2]=mii_rd(MII_ANA,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
4008 tmp.lval[j>>2]=mii_rd(MII_ANLPA,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
4009 }
4010 tmp.lval[j>>2]=mii_rd(0x10,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
4011 if (lp->phy[lp->active].id != BROADCOM_T4) {
4012 tmp.lval[j>>2]=mii_rd(0x11,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
4013 tmp.lval[j>>2]=mii_rd(0x12,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
4014 } else {
4015 tmp.lval[j>>2]=mii_rd(0x14,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
4016 }
4017 }
4018
4019 tmp.addr[j++] = lp->txRingSize;
4020 tmp.addr[j++] = dev->tbusy;
4021
4022 ioc->len = j;
4023 if (!(status = verify_area(VERIFY_WRITE, (void *)ioc->data, ioc->len))) {
4024 memcpy_tofs(ioc->data, tmp.addr, ioc->len);
4025 }
4026
4027 break;
4028 default:
4029 status = -EOPNOTSUPP;
4030 }
4031
4032 return status;
4033 }
4034
4035 #ifdef MODULE
4036
4037
4038
4039
4040
4041
4042
4043
4044
4045
4046 static char devicename[9] = { 0, };
4047 static struct device thisDE4X5 = {
4048 devicename,
4049 0, 0, 0, 0,
4050 0, 0,
4051 0, 0, 0, NULL, de4x5_probe };
4052
4053 static int io=0x0b;
4054
4055 int
4056 init_module(void)
4057 {
4058 struct device *p = (struct device *)&thisDE4X5;
4059
4060 thisDE4X5.base_addr = io;
4061 thisDE4X5.irq = 0;
4062
4063 for (; p!=NULL; p=p->next) {
4064 if (register_netdev(p) != 0)
4065 return -EIO;
4066 }
4067 io=0;
4068 return 0;
4069 }
4070
4071 void
4072 cleanup_module(void)
4073 {
4074 struct de4x5_private *lp = (struct de4x5_private *) thisDE4X5.priv;
4075 struct device *p = (struct device *)&thisDE4X5;
4076 int keep_loaded = 0;
4077
4078 for (; p!=NULL; p=p->next) {
4079 keep_loaded += (p->flags & IFF_UP);
4080 }
4081
4082 if (keep_loaded) {
4083 printk("de4x5: Cannot unload modules - %d interface%s%s still active.\n",
4084 keep_loaded, (keep_loaded>1 ? "s ": " "),
4085 (keep_loaded>1 ? "are": "is"));
4086 return;
4087 }
4088
4089 for (p=thisDE4X5.next; p!=NULL; p=p->next) {
4090 if (p->priv) {
4091 struct de4x5_private *lp = (struct de4x5_private *)p->priv;
4092 if (lp->cache.buf) {
4093 kfree(lp->cache.buf);
4094 }
4095 release_region(p->base_addr, (lp->bus == PCI ?
4096 DE4X5_PCI_TOTAL_SIZE :
4097 DE4X5_EISA_TOTAL_SIZE));
4098 kfree(lp->cache.priv);
4099 }
4100 unregister_netdev(p);
4101 kfree(p);
4102 }
4103
4104 if (thisDE4X5.priv) {
4105 if (lp->cache.buf) {
4106 kfree(lp->cache.buf);
4107 }
4108 release_region(thisDE4X5.base_addr,
4109 (lp->bus == PCI ?
4110 DE4X5_PCI_TOTAL_SIZE :
4111 DE4X5_EISA_TOTAL_SIZE));
4112 kfree(lp->cache.priv);
4113 thisDE4X5.priv = NULL;
4114 }
4115 unregister_netdev(&thisDE4X5);
4116
4117 return;
4118 }
4119 #endif
4120
4121
4122
4123
4124
4125
4126
4127
4128