This source file includes following definitions.
- eepro_probe
- eepro_probe1
- eepro_grab_irq
- eepro_open
- eepro_send_packet
- eepro_interrupt
- eepro_close
- eepro_get_stats
- set_multicast_list
- read_eeprom
- hardware_send_packet
- eepro_rx
- eepro_transmit_interrupt
- init_module
- cleanup_module
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58 static const char *version =
59 "eepro.c: v0.08 4/8/96 Bao C. Ha (bao.ha@srs.gov)\n";
60
61 #include <linux/module.h>
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85 #include <linux/kernel.h>
86 #include <linux/sched.h>
87 #include <linux/types.h>
88 #include <linux/fcntl.h>
89 #include <linux/interrupt.h>
90 #include <linux/ptrace.h>
91 #include <linux/ioport.h>
92 #include <linux/in.h>
93 #include <linux/malloc.h>
94 #include <linux/string.h>
95 #include <asm/system.h>
96 #include <asm/bitops.h>
97 #include <asm/io.h>
98 #include <asm/dma.h>
99 #include <linux/errno.h>
100
101 #include <linux/netdevice.h>
102 #include <linux/etherdevice.h>
103 #include <linux/skbuff.h>
104
105
106
107
108 static unsigned int eepro_portlist[] =
109 { 0x200, 0x240, 0x280, 0x2C0, 0x300, 0x320, 0x340, 0x360, 0};
110
111
112 #ifndef NET_DEBUG
113 #define NET_DEBUG 2
114 #endif
115 static unsigned int net_debug = NET_DEBUG;
116
117
118 #define EEPRO_IO_EXTENT 16
119
120
121 #define LAN595 0
122 #define LAN595TX 1
123 #define LAN595FX 2
124
125
126 struct eepro_local {
127 struct enet_statistics stats;
128 unsigned rx_start;
129 unsigned tx_start;
130 int tx_last;
131 unsigned tx_end;
132 int eepro;
133
134 int version;
135
136 int stepping;
137 };
138
139
140 #define SA_ADDR0 0x00
141 #define SA_ADDR1 0xaa
142 #define SA_ADDR2 0x00
143
144
145
146 extern int eepro_probe(struct device *dev);
147
148 static int eepro_probe1(struct device *dev, short ioaddr);
149 static int eepro_open(struct device *dev);
150 static int eepro_send_packet(struct sk_buff *skb, struct device *dev);
151 static void eepro_interrupt(int irq, void *dev_id, struct pt_regs *regs);
152 static void eepro_rx(struct device *dev);
153 static void eepro_transmit_interrupt(struct device *dev);
154 static int eepro_close(struct device *dev);
155 static struct enet_statistics *eepro_get_stats(struct device *dev);
156 static void set_multicast_list(struct device *dev);
157
158 static int read_eeprom(int ioaddr, int location);
159 static void hardware_send_packet(struct device *dev, void *buf, short length);
160 static int eepro_grab_irq(struct device *dev);
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184 #define RAM_SIZE 0x8000
185 #define RCV_HEADER 8
186 #define RCV_RAM 0x6000
187 #define RCV_LOWER_LIMIT 0x00
188 #define RCV_UPPER_LIMIT ((RCV_RAM - 2) >> 8)
189 #define XMT_RAM (RAM_SIZE - RCV_RAM)
190 #define XMT_LOWER_LIMIT (RCV_RAM >> 8)
191 #define XMT_UPPER_LIMIT ((RAM_SIZE - 2) >> 8)
192 #define XMT_HEADER 8
193
194 #define RCV_DONE 0x0008
195 #define RX_OK 0x2000
196 #define RX_ERROR 0x0d81
197
198 #define TX_DONE_BIT 0x0080
199 #define CHAIN_BIT 0x8000
200 #define XMT_STATUS 0x02
201 #define XMT_CHAIN 0x04
202 #define XMT_COUNT 0x06
203
204 #define BANK0_SELECT 0x00
205 #define BANK1_SELECT 0x40
206 #define BANK2_SELECT 0x80
207
208
209 #define COMMAND_REG 0x00
210 #define MC_SETUP 0x03
211 #define XMT_CMD 0x04
212 #define DIAGNOSE_CMD 0x07
213 #define RCV_ENABLE_CMD 0x08
214 #define RCV_DISABLE_CMD 0x0a
215 #define STOP_RCV_CMD 0x0b
216 #define RESET_CMD 0x0e
217 #define POWER_DOWN_CMD 0x18
218 #define RESUME_XMT_CMD 0x1c
219 #define SEL_RESET_CMD 0x1e
220 #define STATUS_REG 0x01
221 #define RX_INT 0x02
222 #define TX_INT 0x04
223 #define EXEC_STATUS 0x30
224 #define ID_REG 0x02
225 #define R_ROBIN_BITS 0xc0
226 #define ID_REG_MASK 0x2c
227 #define ID_REG_SIG 0x24
228 #define AUTO_ENABLE 0x10
229 #define INT_MASK_REG 0x03
230 #define RX_STOP_MASK 0x01
231 #define RX_MASK 0x02
232 #define TX_MASK 0x04
233 #define EXEC_MASK 0x08
234 #define ALL_MASK 0x0f
235 #define IO_32_BIT 0x10
236 #define RCV_BAR 0x04
237 #define RCV_STOP 0x06
238 #define XMT_BAR 0x0a
239 #define HOST_ADDRESS_REG 0x0c
240 #define IO_PORT 0x0e
241 #define IO_PORT_32_BIT 0x0c
242
243
244 #define REG1 0x01
245 #define WORD_WIDTH 0x02
246 #define INT_ENABLE 0x80
247 #define INT_NO_REG 0x02
248 #define RCV_LOWER_LIMIT_REG 0x08
249 #define RCV_UPPER_LIMIT_REG 0x09
250 #define XMT_LOWER_LIMIT_REG 0x0a
251 #define XMT_UPPER_LIMIT_REG 0x0b
252
253
254 #define XMT_Chain_Int 0x20
255 #define XMT_Chain_ErrStop 0x40
256 #define RCV_Discard_BadFrame 0x80
257 #define REG2 0x02
258 #define PRMSC_Mode 0x01
259 #define Multi_IA 0x20
260 #define REG3 0x03
261 #define TPE_BIT 0x04
262 #define BNC_BIT 0x20
263 #define REG13 0x0d
264 #define FDX 0x00
265 #define A_N_ENABLE 0x02
266
267 #define I_ADD_REG0 0x04
268 #define I_ADD_REG1 0x05
269 #define I_ADD_REG2 0x06
270 #define I_ADD_REG3 0x07
271 #define I_ADD_REG4 0x08
272 #define I_ADD_REG5 0x09
273
274 #define EEPROM_REG 0x0a
275 #define EESK 0x01
276 #define EECS 0x02
277 #define EEDI 0x04
278 #define EEDO 0x08
279
280
281
282
283
284
285
286
287 #ifdef HAVE_DEVLIST
288
289
290 struct netdev_entry netcard_drv =
291 {"eepro", eepro_probe1, EEPRO_IO_EXTENT, eepro_portlist};
292 #else
293 int
294 eepro_probe(struct device *dev)
295 {
296 int i;
297 int base_addr = dev ? dev->base_addr : 0;
298
299 if (base_addr > 0x1ff)
300 return eepro_probe1(dev, base_addr);
301 else if (base_addr != 0)
302 return ENXIO;
303
304 for (i = 0; eepro_portlist[i]; i++) {
305 int ioaddr = eepro_portlist[i];
306 if (check_region(ioaddr, EEPRO_IO_EXTENT))
307 continue;
308 if (eepro_probe1(dev, ioaddr) == 0)
309 return 0;
310 }
311
312 return ENODEV;
313 }
314 #endif
315
316
317
318
319
320 int eepro_probe1(struct device *dev, short ioaddr)
321 {
322 unsigned short station_addr[6], id, counter;
323 int i;
324 int eepro;
325
326 const char *ifmap[] = {"AUI", "10Base2", "10BaseT"};
327 enum iftype { AUI=0, BNC=1, TPE=2 };
328
329
330
331
332 if (((id=inb(ioaddr + ID_REG)) & ID_REG_MASK) == ID_REG_SIG) {
333
334
335
336
337
338 counter = (id & R_ROBIN_BITS);
339 if (((id=inb(ioaddr+ID_REG)) & R_ROBIN_BITS) ==
340 (counter + 0x40)) {
341
342
343
344
345
346
347 station_addr[0] = read_eeprom(ioaddr, 2);
348 station_addr[1] = read_eeprom(ioaddr, 3);
349 station_addr[2] = read_eeprom(ioaddr, 4);
350
351
352
353 if (station_addr[2] != 0x00aa || (station_addr[1] & 0xff00) != 0x0000) {
354 eepro = 0;
355 printk("%s: Intel 82595-based lan card at %#x,",
356 dev->name, ioaddr);
357 }
358 else {
359 eepro = 1;
360 printk("%s: Intel EtherExpress Pro/10 at %#x,",
361 dev->name, ioaddr);
362 }
363
364
365 dev->base_addr = ioaddr;
366
367 for (i=0; i < 6; i++) {
368 dev->dev_addr[i] = ((unsigned char *) station_addr)[5-i];
369 printk("%c%02x", i ? ':' : ' ', dev->dev_addr[i]);
370 }
371
372 outb(BANK2_SELECT, ioaddr);
373 id = inb(ioaddr + REG3);
374 if (id & TPE_BIT)
375 dev->if_port = TPE;
376 else dev->if_port = BNC;
377
378 if (dev->irq < 2 && eepro) {
379 i = read_eeprom(ioaddr, 1);
380 switch (i & 0x07) {
381 case 0: dev->irq = 9; break;
382 case 1: dev->irq = 3; break;
383 case 2: dev->irq = 5; break;
384 case 3: dev->irq = 10; break;
385 case 4: dev->irq = 11; break;
386 default:
387 printk(" illegal interrupt vector stored in EEPROM.\n");
388 return ENODEV;
389 }
390 }
391 else if (dev->irq == 2)
392 dev->irq = 9;
393
394 if (dev->irq > 2) {
395 printk(", IRQ %d, %s.\n", dev->irq,
396 ifmap[dev->if_port]);
397 if (request_irq(dev->irq, &eepro_interrupt, 0, "eepro", NULL)) {
398 printk("%s: unable to get IRQ %d.\n", dev->name, dev->irq);
399 return -EAGAIN;
400 }
401 }
402 else printk(", %s.\n", ifmap[dev->if_port]);
403
404 if ((dev->mem_start & 0xf) > 0)
405 net_debug = dev->mem_start & 7;
406
407 if (net_debug > 3) {
408 i = read_eeprom(ioaddr, 5);
409 if (i & 0x2000)
410 printk("%s: Concurrent Processing is enabled but not used!\n",
411 dev->name);
412 }
413
414 if (net_debug)
415 printk(version);
416
417
418 request_region(ioaddr, EEPRO_IO_EXTENT, "eepro");
419
420
421 dev->priv = kmalloc(sizeof(struct eepro_local), GFP_KERNEL);
422 if (dev->priv == NULL)
423 return -ENOMEM;
424 memset(dev->priv, 0, sizeof(struct eepro_local));
425
426 dev->open = eepro_open;
427 dev->stop = eepro_close;
428 dev->hard_start_xmit = eepro_send_packet;
429 dev->get_stats = eepro_get_stats;
430 dev->set_multicast_list = &set_multicast_list;
431
432
433
434
435 ether_setup(dev);
436
437 outb(RESET_CMD, ioaddr);
438
439 return 0;
440 }
441 else return ENODEV;
442 }
443 else if (net_debug > 3)
444 printk ("EtherExpress Pro probed failed!\n");
445 return ENODEV;
446 }
447
448
449
450
451
452
453
454
455
456 static char irqrmap[] = {-1,-1,0,1,-1,2,-1,-1,-1,0,3,4,-1,-1,-1,-1};
457 static int eepro_grab_irq(struct device *dev)
458 {
459 int irqlist[] = { 5, 9, 10, 11, 4, 3, 0};
460 int *irqp = irqlist, temp_reg, ioaddr = dev->base_addr;
461
462 outb(BANK1_SELECT, ioaddr);
463
464
465 temp_reg = inb(ioaddr + REG1);
466 outb(temp_reg | INT_ENABLE, ioaddr + REG1);
467
468 outb(BANK0_SELECT, ioaddr);
469
470
471 outb(ALL_MASK, ioaddr + STATUS_REG);
472
473 outb(ALL_MASK & ~(EXEC_MASK), ioaddr + INT_MASK_REG);
474
475 do {
476 outb(BANK1_SELECT, ioaddr);
477
478 temp_reg = inb(ioaddr + INT_NO_REG);
479 outb((temp_reg & 0xf8) | irqrmap[*irqp], ioaddr + INT_NO_REG);
480
481 outb(BANK0_SELECT, ioaddr);
482
483 if (request_irq (*irqp, NULL, 0, "bogus", NULL) != EBUSY) {
484
485 autoirq_setup(0);
486
487 outb(DIAGNOSE_CMD, ioaddr);
488
489 if (*irqp == autoirq_report(2) &&
490 (request_irq(dev->irq = *irqp, &eepro_interrupt, 0, "eepro", NULL) == 0))
491 break;
492
493
494 outb(ALL_MASK, ioaddr + STATUS_REG);
495 }
496 } while (*++irqp);
497
498 outb(BANK1_SELECT, ioaddr);
499
500
501 temp_reg = inb(ioaddr + REG1);
502 outb(temp_reg & 0x7f, ioaddr + REG1);
503
504 outb(BANK0_SELECT, ioaddr);
505
506
507 outb(ALL_MASK, ioaddr + INT_MASK_REG);
508
509
510 outb(ALL_MASK, ioaddr + STATUS_REG);
511
512 return dev->irq;
513 }
514
515 static int
516 eepro_open(struct device *dev)
517 {
518 unsigned short temp_reg, old8, old9;
519 int i, ioaddr = dev->base_addr;
520 struct eepro_local *lp = (struct eepro_local *)dev->priv;
521
522 if (net_debug > 3)
523 printk("eepro: entering eepro_open routine.\n");
524
525 if (dev->dev_addr[0] == SA_ADDR0 &&
526 dev->dev_addr[1] == SA_ADDR1 &&
527 dev->dev_addr[2] == SA_ADDR2)
528 lp->eepro = 1;
529 else lp->eepro = 0;
530
531
532 if (dev->irq < 2 && eepro_grab_irq(dev) == 0) {
533 printk("%s: unable to get IRQ %d.\n", dev->name, dev->irq);
534 return -EAGAIN;
535 }
536
537 if (irq2dev_map[dev->irq] != 0
538 || (irq2dev_map[dev->irq] = dev) == 0)
539 return -EAGAIN;
540
541
542
543 outb(BANK2_SELECT, ioaddr);
544 temp_reg = inb(ioaddr + EEPROM_REG);
545
546 lp->stepping = temp_reg >> 5;
547
548 if (net_debug > 3)
549 printk("The stepping of the 82595 is %d\n", lp->stepping);
550
551 if (temp_reg & 0x10)
552 outb(temp_reg & 0xef, ioaddr + EEPROM_REG);
553 for (i=0; i < 6; i++)
554 outb(dev->dev_addr[i] , ioaddr + I_ADD_REG0 + i);
555
556 temp_reg = inb(ioaddr + REG1);
557 outb(temp_reg | XMT_Chain_Int | XMT_Chain_ErrStop
558 | RCV_Discard_BadFrame, ioaddr + REG1);
559
560 temp_reg = inb(ioaddr + REG2);
561 outb(temp_reg | 0x14, ioaddr + REG2);
562
563 temp_reg = inb(ioaddr + REG3);
564 outb(temp_reg & 0x3f, ioaddr + REG3);
565
566
567 outb(BANK1_SELECT, ioaddr);
568
569 temp_reg = inb(ioaddr + INT_NO_REG);
570 outb((temp_reg & 0xf8) | irqrmap[dev->irq], ioaddr + INT_NO_REG);
571
572
573 outb(RCV_LOWER_LIMIT, ioaddr + RCV_LOWER_LIMIT_REG);
574 outb(RCV_UPPER_LIMIT, ioaddr + RCV_UPPER_LIMIT_REG);
575 outb(XMT_LOWER_LIMIT, ioaddr + XMT_LOWER_LIMIT_REG);
576 outb(XMT_UPPER_LIMIT, ioaddr + XMT_UPPER_LIMIT_REG);
577
578
579 temp_reg = inb(ioaddr + REG1);
580 outb(temp_reg | INT_ENABLE, ioaddr + REG1);
581
582 outb(BANK0_SELECT, ioaddr);
583
584
585 outb(ALL_MASK & ~(RX_MASK | TX_MASK), ioaddr + INT_MASK_REG);
586
587 outb(ALL_MASK, ioaddr + STATUS_REG);
588
589
590 outw(RCV_LOWER_LIMIT << 8, ioaddr + RCV_BAR);
591 lp->rx_start = (RCV_LOWER_LIMIT << 8) ;
592 outw((RCV_UPPER_LIMIT << 8) | 0xfe, ioaddr + RCV_STOP);
593
594
595 outw(XMT_LOWER_LIMIT << 8, ioaddr + XMT_BAR);
596
597
598 old8 = inb(ioaddr + 8);
599 outb(~old8, ioaddr + 8);
600 if ((temp_reg = inb(ioaddr + 8)) == old8) {
601 if (net_debug > 3)
602 printk("i82595 detected!\n");
603 lp->version = LAN595;
604 }
605 else {
606 lp->version = LAN595TX;
607 outb(old8, ioaddr + 8);
608 old9 = inb(ioaddr + 9);
609 outb(~old9, ioaddr + 9);
610 if ((temp_reg = inb(ioaddr + 9)) == ~old9) {
611 enum iftype { AUI=0, BNC=1, TPE=2 };
612 if (net_debug > 3)
613 printk("i82595FX detected!\n");
614 lp->version = LAN595FX;
615 outb(old9, ioaddr + 9);
616 if (dev->if_port != TPE) {
617
618
619 outb(BANK2_SELECT, ioaddr);
620 temp_reg = inb(ioaddr + REG13);
621
622
623 outb(temp_reg & ~(FDX | A_N_ENABLE), REG13);
624 outb(BANK0_SELECT, ioaddr);
625 }
626 }
627 else if (net_debug > 3)
628 printk("i82595TX detected!\n");
629 }
630
631 outb(SEL_RESET_CMD, ioaddr);
632
633 SLOW_DOWN_IO;
634 SLOW_DOWN_IO;
635
636 lp->tx_start = lp->tx_end = XMT_LOWER_LIMIT << 8;
637 lp->tx_last = 0;
638
639 dev->tbusy = 0;
640 dev->interrupt = 0;
641 dev->start = 1;
642
643 if (net_debug > 3)
644 printk("eepro: exiting eepro_open routine.\n");
645
646 outb(RCV_ENABLE_CMD, ioaddr);
647
648 MOD_INC_USE_COUNT;
649 return 0;
650 }
651
652 static int
653 eepro_send_packet(struct sk_buff *skb, struct device *dev)
654 {
655 struct eepro_local *lp = (struct eepro_local *)dev->priv;
656 int ioaddr = dev->base_addr;
657
658 if (net_debug > 5)
659 printk("eepro: entering eepro_send_packet routine.\n");
660
661 if (dev->tbusy) {
662
663
664 int tickssofar = jiffies - dev->trans_start;
665 if (tickssofar < 5)
666 return 1;
667 if (net_debug > 1)
668 printk("%s: transmit timed out, %s?\n", dev->name,
669 "network cable problem");
670 lp->stats.tx_errors++;
671
672 outb(SEL_RESET_CMD, ioaddr);
673
674 SLOW_DOWN_IO;
675 SLOW_DOWN_IO;
676
677
678 lp->tx_start = lp->tx_end = RCV_RAM;
679 lp->tx_last = 0;
680
681 dev->tbusy=0;
682 dev->trans_start = jiffies;
683
684 outb(RCV_ENABLE_CMD, ioaddr);
685
686 }
687
688
689
690
691 if (skb == NULL) {
692 dev_tint(dev);
693 return 0;
694 }
695
696
697 if (set_bit(0, (void*)&dev->tbusy) != 0)
698 printk("%s: Transmitter access conflict.\n", dev->name);
699 else {
700 short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
701 unsigned char *buf = skb->data;
702
703 hardware_send_packet(dev, buf, length);
704 dev->trans_start = jiffies;
705 }
706
707 dev_kfree_skb (skb, FREE_WRITE);
708
709
710
711
712 if (net_debug > 5)
713 printk("eepro: exiting eepro_send_packet routine.\n");
714
715 return 0;
716 }
717
718
719
720
721 static void
722 eepro_interrupt(int irq, void *dev_id, struct pt_regs * regs)
723 {
724 struct device *dev = (struct device *)(irq2dev_map[irq]);
725 int ioaddr, status, boguscount = 0;
726
727 if (net_debug > 5)
728 printk("eepro: entering eepro_interrupt routine.\n");
729
730 if (dev == NULL) {
731 printk ("eepro_interrupt(): irq %d for unknown device.\n", irq);
732 return;
733 }
734 dev->interrupt = 1;
735
736 ioaddr = dev->base_addr;
737
738 do {
739 status = inb(ioaddr + STATUS_REG);
740
741 if (status & RX_INT) {
742 if (net_debug > 4)
743 printk("eepro: packet received interrupt.\n");
744
745
746 outb(RX_INT, ioaddr + STATUS_REG);
747
748
749 eepro_rx(dev);
750 }
751 else if (status & TX_INT) {
752 if (net_debug > 4)
753 printk("eepro: packet transmit interrupt.\n");
754
755
756 outb(TX_INT, ioaddr + STATUS_REG);
757
758
759 eepro_transmit_interrupt(dev);
760 dev->tbusy = 0;
761 mark_bh(NET_BH);
762 }
763 } while ((++boguscount < 10) && (status & 0x06));
764
765 dev->interrupt = 0;
766 if (net_debug > 5)
767 printk("eepro: exiting eepro_interrupt routine.\n");
768
769 return;
770 }
771
772 static int
773 eepro_close(struct device *dev)
774 {
775 struct eepro_local *lp = (struct eepro_local *)dev->priv;
776 int ioaddr = dev->base_addr;
777 short temp_reg;
778
779 dev->tbusy = 1;
780 dev->start = 0;
781
782 outb(BANK1_SELECT, ioaddr);
783
784
785 temp_reg = inb(ioaddr + REG1);
786 outb(temp_reg & 0x7f, ioaddr + REG1);
787
788 outb(BANK0_SELECT, ioaddr);
789
790
791 outb(STOP_RCV_CMD, ioaddr);
792 lp->tx_start = lp->tx_end = RCV_RAM ;
793 lp->tx_last = 0;
794
795
796 outb(ALL_MASK, ioaddr + INT_MASK_REG);
797
798
799 outb(ALL_MASK, ioaddr + STATUS_REG);
800
801
802 outb(RESET_CMD, ioaddr);
803
804
805 free_irq(dev->irq, NULL);
806
807 irq2dev_map[dev->irq] = 0;
808
809
810
811
812 SLOW_DOWN_IO;
813 SLOW_DOWN_IO;
814
815 MOD_DEC_USE_COUNT;
816 return 0;
817 }
818
819
820
821 static struct enet_statistics *
822 eepro_get_stats(struct device *dev)
823 {
824 struct eepro_local *lp = (struct eepro_local *)dev->priv;
825
826 return &lp->stats;
827 }
828
829
830
831 static void
832 set_multicast_list(struct device *dev)
833 {
834 struct eepro_local *lp = (struct eepro_local *)dev->priv;
835 short ioaddr = dev->base_addr;
836 unsigned short mode;
837 struct dev_mc_list *dmi=dev->mc_list;
838
839 if (dev->flags&(IFF_ALLMULTI|IFF_PROMISC) || dev->mc_count > 63)
840 {
841
842
843
844
845
846
847 dev->flags|=IFF_PROMISC;
848
849 outb(BANK2_SELECT, ioaddr);
850 mode = inb(ioaddr + REG2);
851 outb(mode | PRMSC_Mode, ioaddr + REG2);
852 mode = inb(ioaddr + REG3);
853 outb(mode, ioaddr + REG3);
854 outb(BANK0_SELECT, ioaddr);
855 printk("%s: promiscuous mode enabled.\n", dev->name);
856 }
857 else if (dev->mc_count==0 )
858 {
859 outb(BANK2_SELECT, ioaddr);
860 mode = inb(ioaddr + REG2);
861 outb(mode & 0xd6, ioaddr + REG2);
862 mode = inb(ioaddr + REG3);
863 outb(mode, ioaddr + REG3);
864 outb(BANK0_SELECT, ioaddr);
865 }
866 else
867 {
868 unsigned short status, *eaddrs;
869 int i, boguscount = 0;
870
871
872
873
874 outb(ALL_MASK, ioaddr + INT_MASK_REG);
875
876 outb(BANK2_SELECT, ioaddr);
877 mode = inb(ioaddr + REG2);
878 outb(mode | Multi_IA, ioaddr + REG2);
879 mode = inb(ioaddr + REG3);
880 outb(mode, ioaddr + REG3);
881 outb(BANK0_SELECT, ioaddr);
882 outw(lp->tx_end, ioaddr + HOST_ADDRESS_REG);
883 outw(MC_SETUP, ioaddr + IO_PORT);
884 outw(0, ioaddr + IO_PORT);
885 outw(0, ioaddr + IO_PORT);
886 outw(6*(dev->mc_count + 1), ioaddr + IO_PORT);
887 for (i = 0; i < dev->mc_count; i++)
888 {
889 eaddrs=(unsigned short *)dmi->dmi_addr;
890 dmi=dmi->next;
891 outw(*eaddrs++, ioaddr + IO_PORT);
892 outw(*eaddrs++, ioaddr + IO_PORT);
893 outw(*eaddrs++, ioaddr + IO_PORT);
894 }
895 eaddrs = (unsigned short *) dev->dev_addr;
896 outw(eaddrs[0], ioaddr + IO_PORT);
897 outw(eaddrs[1], ioaddr + IO_PORT);
898 outw(eaddrs[2], ioaddr + IO_PORT);
899 outw(lp->tx_end, ioaddr + XMT_BAR);
900 outb(MC_SETUP, ioaddr);
901
902
903 i = lp->tx_end + XMT_HEADER + 6*(dev->mc_count + 1);
904 if (lp->tx_start != lp->tx_end)
905 {
906
907
908 outw(lp->tx_last + XMT_CHAIN, ioaddr + HOST_ADDRESS_REG);
909 outw(i, ioaddr + IO_PORT);
910 outw(lp->tx_last + XMT_COUNT, ioaddr + HOST_ADDRESS_REG);
911 status = inw(ioaddr + IO_PORT);
912 outw(status | CHAIN_BIT, ioaddr + IO_PORT);
913 lp->tx_end = i ;
914 }
915 else lp->tx_start = lp->tx_end = i ;
916
917
918 do {
919 SLOW_DOWN_IO;
920 SLOW_DOWN_IO;
921 if (inb(ioaddr + STATUS_REG) & 0x08)
922 {
923 i = inb(ioaddr);
924 outb(0x08, ioaddr + STATUS_REG);
925 if (i & 0x20) {
926 printk("%s: multicast setup failed.\n",
927 dev->name);
928 break;
929 } else if ((i & 0x0f) == 0x03) {
930 printk("%s: set Rx mode to %d addresses.\n",
931 dev->name, dev->mc_count);
932 break;
933 }
934 }
935 } while (++boguscount < 100);
936
937
938 outb(ALL_MASK & ~(RX_MASK | TX_MASK), ioaddr + INT_MASK_REG);
939
940 }
941 outb(RCV_ENABLE_CMD, ioaddr);
942 }
943
944
945
946
947
948 #define eeprom_delay() { int _i = 40; while (--_i > 0) { __SLOW_DOWN_IO; }}
949 #define EE_READ_CMD (6 << 6)
950
951 int
952 read_eeprom(int ioaddr, int location)
953 {
954 int i;
955 unsigned short retval = 0;
956 short ee_addr = ioaddr + EEPROM_REG;
957 int read_cmd = location | EE_READ_CMD;
958 short ctrl_val = EECS ;
959
960 outb(BANK2_SELECT, ioaddr);
961 outb(ctrl_val, ee_addr);
962
963
964 for (i = 8; i >= 0; i--) {
965 short outval = (read_cmd & (1 << i)) ? ctrl_val | EEDI
966 : ctrl_val;
967 outb(outval, ee_addr);
968 outb(outval | EESK, ee_addr);
969 eeprom_delay();
970 outb(outval, ee_addr);
971 eeprom_delay();
972 }
973 outb(ctrl_val, ee_addr);
974
975 for (i = 16; i > 0; i--) {
976 outb(ctrl_val | EESK, ee_addr); eeprom_delay();
977 retval = (retval << 1) | ((inb(ee_addr) & EEDO) ? 1 : 0);
978 outb(ctrl_val, ee_addr); eeprom_delay();
979 }
980
981
982 ctrl_val &= ~EECS;
983 outb(ctrl_val | EESK, ee_addr);
984 eeprom_delay();
985 outb(ctrl_val, ee_addr);
986 eeprom_delay();
987 outb(BANK0_SELECT, ioaddr);
988 return retval;
989 }
990
991 static void
992 hardware_send_packet(struct device *dev, void *buf, short length)
993 {
994 struct eepro_local *lp = (struct eepro_local *)dev->priv;
995 short ioaddr = dev->base_addr;
996 unsigned status, tx_available, last, end, boguscount = 10;
997
998 if (net_debug > 5)
999 printk("eepro: entering hardware_send_packet routine.\n");
1000
1001 while (boguscount-- > 0) {
1002
1003
1004 if (lp->tx_end > lp->tx_start)
1005 tx_available = XMT_RAM - (lp->tx_end - lp->tx_start);
1006 else if (lp->tx_end < lp->tx_start)
1007 tx_available = lp->tx_start - lp->tx_end;
1008 else tx_available = XMT_RAM;
1009
1010
1011
1012
1013 outb(ALL_MASK, ioaddr + INT_MASK_REG);
1014
1015 if (((((length + 3) >> 1) << 1) + 2*XMT_HEADER)
1016 >= tx_available)
1017 continue;
1018
1019 last = lp->tx_end;
1020 end = last + (((length + 3) >> 1) << 1) + XMT_HEADER;
1021
1022 if (end >= RAM_SIZE) {
1023 if ((RAM_SIZE - last) <= XMT_HEADER) {
1024
1025
1026 last = RCV_RAM;
1027 end = last + (((length + 3) >> 1) << 1) + XMT_HEADER;
1028 }
1029 else end = RCV_RAM + (end - RAM_SIZE);
1030 }
1031
1032 outw(last, ioaddr + HOST_ADDRESS_REG);
1033 outw(XMT_CMD, ioaddr + IO_PORT);
1034 outw(0, ioaddr + IO_PORT);
1035 outw(end, ioaddr + IO_PORT);
1036 outw(length, ioaddr + IO_PORT);
1037
1038 if (lp->version == LAN595)
1039 outsw(ioaddr + IO_PORT, buf, (length + 3) >> 1);
1040 else {
1041 unsigned short temp = inb(ioaddr + INT_MASK_REG);
1042 outb(temp | IO_32_BIT, ioaddr + INT_MASK_REG);
1043 outsl(ioaddr + IO_PORT_32_BIT, buf, (length + 3) >> 2);
1044 outb(temp & ~(IO_32_BIT), ioaddr + INT_MASK_REG);
1045 }
1046
1047 if (lp->tx_start != lp->tx_end) {
1048
1049
1050 if (lp->tx_end != last) {
1051 outw(lp->tx_last + XMT_CHAIN, ioaddr + HOST_ADDRESS_REG);
1052 outw(last, ioaddr + IO_PORT);
1053 }
1054 outw(lp->tx_last + XMT_COUNT, ioaddr + HOST_ADDRESS_REG);
1055 status = inw(ioaddr + IO_PORT);
1056 outw(status | CHAIN_BIT, ioaddr + IO_PORT);
1057 }
1058
1059
1060 status = inw(ioaddr + IO_PORT);
1061
1062
1063 outb(ALL_MASK & ~(RX_MASK | TX_MASK), ioaddr + INT_MASK_REG);
1064
1065 if (lp->tx_start == lp->tx_end) {
1066 outw(last, ioaddr + XMT_BAR);
1067 outb(XMT_CMD, ioaddr);
1068 lp->tx_start = last;
1069 }
1070 else outb(RESUME_XMT_CMD, ioaddr);
1071
1072 lp->tx_last = last;
1073 lp->tx_end = end;
1074
1075 if (dev->tbusy) {
1076 dev->tbusy = 0;
1077 mark_bh(NET_BH);
1078 }
1079
1080 if (net_debug > 5)
1081 printk("eepro: exiting hardware_send_packet routine.\n");
1082 return;
1083 }
1084 dev->tbusy = 1;
1085 if (net_debug > 5)
1086 printk("eepro: exiting hardware_send_packet routine.\n");
1087 }
1088
1089 static void
1090 eepro_rx(struct device *dev)
1091 {
1092 struct eepro_local *lp = (struct eepro_local *)dev->priv;
1093 short ioaddr = dev->base_addr;
1094 short boguscount = 20;
1095 short rcv_car = lp->rx_start;
1096 unsigned rcv_event, rcv_status, rcv_next_frame, rcv_size;
1097
1098 if (net_debug > 5)
1099 printk("eepro: entering eepro_rx routine.\n");
1100
1101
1102 outw(rcv_car, ioaddr + HOST_ADDRESS_REG);
1103 rcv_event = inw(ioaddr + IO_PORT);
1104
1105 while (rcv_event == RCV_DONE) {
1106 rcv_status = inw(ioaddr + IO_PORT);
1107 rcv_next_frame = inw(ioaddr + IO_PORT);
1108 rcv_size = inw(ioaddr + IO_PORT);
1109
1110 if ((rcv_status & (RX_OK | RX_ERROR)) == RX_OK) {
1111
1112 struct sk_buff *skb;
1113
1114 rcv_size &= 0x3fff;
1115 skb = dev_alloc_skb(rcv_size+5);
1116 if (skb == NULL) {
1117 printk("%s: Memory squeeze, dropping packet.\n", dev->name);
1118 lp->stats.rx_dropped++;
1119 break;
1120 }
1121 skb->dev = dev;
1122 skb_reserve(skb,2);
1123
1124 if (lp->version == LAN595)
1125 insw(ioaddr+IO_PORT, skb_put(skb,rcv_size), (rcv_size + 3) >> 1);
1126 else {
1127 unsigned short temp = inb(ioaddr + INT_MASK_REG);
1128 outb(temp | IO_32_BIT, ioaddr + INT_MASK_REG);
1129 insl(ioaddr+IO_PORT_32_BIT, skb_put(skb,rcv_size), (rcv_size + 3) >> 2);
1130 outb(temp & ~(IO_32_BIT), ioaddr + INT_MASK_REG);
1131 }
1132
1133
1134 skb->protocol = eth_type_trans(skb,dev);
1135 netif_rx(skb);
1136 lp->stats.rx_packets++;
1137 }
1138 else {
1139
1140 lp->stats.rx_errors++;
1141 if (rcv_status & 0x0100)
1142 lp->stats.rx_over_errors++;
1143 else if (rcv_status & 0x0400)
1144 lp->stats.rx_frame_errors++;
1145 else if (rcv_status & 0x0800)
1146 lp->stats.rx_crc_errors++;
1147 printk("%s: event = %#x, status = %#x, next = %#x, size = %#x\n",
1148 dev->name, rcv_event, rcv_status, rcv_next_frame, rcv_size);
1149 }
1150 if (rcv_status & 0x1000)
1151 lp->stats.rx_length_errors++;
1152 if (--boguscount == 0)
1153 break;
1154
1155 rcv_car = lp->rx_start + RCV_HEADER + rcv_size;
1156 lp->rx_start = rcv_next_frame;
1157 outw(rcv_next_frame, ioaddr + HOST_ADDRESS_REG);
1158 rcv_event = inw(ioaddr + IO_PORT);
1159
1160 }
1161 if (rcv_car == 0)
1162 rcv_car = (RCV_UPPER_LIMIT << 8) | 0xff;
1163 outw(rcv_car - 1, ioaddr + RCV_STOP);
1164
1165 if (net_debug > 5)
1166 printk("eepro: exiting eepro_rx routine.\n");
1167 }
1168
1169 static void
1170 eepro_transmit_interrupt(struct device *dev)
1171 {
1172 struct eepro_local *lp = (struct eepro_local *)dev->priv;
1173 short ioaddr = dev->base_addr;
1174 short boguscount = 10;
1175 short xmt_status;
1176
1177 while (lp->tx_start != lp->tx_end) {
1178
1179 outw(lp->tx_start, ioaddr + HOST_ADDRESS_REG);
1180 xmt_status = inw(ioaddr+IO_PORT);
1181 if ((xmt_status & TX_DONE_BIT) == 0) break;
1182 xmt_status = inw(ioaddr+IO_PORT);
1183 lp->tx_start = inw(ioaddr+IO_PORT);
1184
1185 if (dev->tbusy) {
1186 dev->tbusy = 0;
1187 mark_bh(NET_BH);
1188 }
1189
1190 if (xmt_status & 0x2000)
1191 lp->stats.tx_packets++;
1192 else {
1193 lp->stats.tx_errors++;
1194 if (xmt_status & 0x0400)
1195 lp->stats.tx_carrier_errors++;
1196 printk("%s: XMT status = %#x\n",
1197 dev->name, xmt_status);
1198 }
1199 if (xmt_status & 0x000f)
1200 lp->stats.collisions += (xmt_status & 0x000f);
1201 if ((xmt_status & 0x0040) == 0x0)
1202 lp->stats.tx_heartbeat_errors++;
1203
1204 if (--boguscount == 0)
1205 break;
1206 }
1207 }
1208
1209 #ifdef MODULE
1210 static char devicename[9] = { 0, };
1211 static struct device dev_eepro = {
1212 devicename,
1213 0, 0, 0, 0,
1214 0, 0,
1215 0, 0, 0, NULL, eepro_probe };
1216
1217 static int io = 0x200;
1218 static int irq = 0;
1219
1220 int
1221 init_module(void)
1222 {
1223 if (io == 0)
1224 printk("eepro: You should not use auto-probing with insmod!\n");
1225 dev_eepro.base_addr = io;
1226 dev_eepro.irq = irq;
1227
1228 if (register_netdev(&dev_eepro) != 0)
1229 return -EIO;
1230 return 0;
1231 }
1232
1233 void
1234 cleanup_module(void)
1235 {
1236 unregister_netdev(&dev_eepro);
1237 kfree_s(dev_eepro.priv,sizeof(struct eepro_local));
1238 dev_eepro.priv=NULL;
1239
1240
1241 release_region(dev_eepro.base_addr, EEPRO_IO_EXTENT);
1242 }
1243 #endif