This source file includes following definitions.
- eepro_probe
- eepro_probe1
- eepro_grab_irq
- eepro_open
- eepro_send_packet
- eepro_interrupt
- eepro_close
- eepro_get_stats
- set_multicast_list
- read_eeprom
- hardware_send_packet
- eepro_rx
- eepro_transmit_interrupt
- init_module
- cleanup_module
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54 static char *version =
55 "eepro.c: v0.07a 6/5/95 Bao C. Ha (bao@saigon.async.com)\n";
56
57
58
59 #include <linux/config.h>
60
61 #ifdef MODULE
62 #include <linux/module.h>
63 #include <linux/version.h>
64 #endif
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87 #include <linux/kernel.h>
88 #include <linux/sched.h>
89 #include <linux/types.h>
90 #include <linux/fcntl.h>
91 #include <linux/interrupt.h>
92 #include <linux/ptrace.h>
93 #include <linux/ioport.h>
94 #include <linux/in.h>
95 #include <linux/malloc.h>
96 #include <linux/string.h>
97 #include <asm/system.h>
98 #include <asm/bitops.h>
99 #include <asm/io.h>
100 #include <asm/dma.h>
101 #include <linux/errno.h>
102
103 #include <linux/netdevice.h>
104 #include <linux/etherdevice.h>
105 #include <linux/skbuff.h>
106 extern struct device *init_etherdev(struct device *dev, int sizeof_private,
107 unsigned long *mem_startp);
108
109
110
111 static unsigned int eepro_portlist[] =
112 { 0x200, 0x240, 0x280, 0x2C0, 0x300, 0x320, 0x340, 0x360, 0};
113
114
115 #ifndef NET_DEBUG
116 #define NET_DEBUG 2
117 #endif
118 static unsigned int net_debug = NET_DEBUG;
119
120
121 #define EEPRO_IO_EXTENT 16
122
123
124 struct eepro_local {
125 struct enet_statistics stats;
126 unsigned rx_start;
127 unsigned tx_start;
128 int tx_last;
129 unsigned tx_end;
130 int eepro;
131
132 };
133
134
135 #define SA_ADDR0 0x00
136 #define SA_ADDR1 0xaa
137 #define SA_ADDR2 0x00
138
139
140
141 extern int eepro_probe(struct device *dev);
142
143 static int eepro_probe1(struct device *dev, short ioaddr);
144 static int eepro_open(struct device *dev);
145 static int eepro_send_packet(struct sk_buff *skb, struct device *dev);
146 static void eepro_interrupt(int irq, struct pt_regs *regs);
147 static void eepro_rx(struct device *dev);
148 static void eepro_transmit_interrupt(struct device *dev);
149 static int eepro_close(struct device *dev);
150 static struct enet_statistics *eepro_get_stats(struct device *dev);
151 static void set_multicast_list(struct device *dev, int num_addrs, void *addrs);
152
153 static int read_eeprom(int ioaddr, int location);
154 static void hardware_send_packet(struct device *dev, void *buf, short length);
155 static int eepro_grab_irq(struct device *dev);
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179 #define RAM_SIZE 0x8000
180 #define RCV_HEADER 8
181 #define RCV_RAM 0x6000
182 #define RCV_LOWER_LIMIT 0x00
183 #define RCV_UPPER_LIMIT ((RCV_RAM - 2) >> 8)
184 #define XMT_RAM (RAM_SIZE - RCV_RAM)
185 #define XMT_LOWER_LIMIT (RCV_RAM >> 8)
186 #define XMT_UPPER_LIMIT ((RAM_SIZE - 2) >> 8)
187 #define XMT_HEADER 8
188
189 #define RCV_DONE 0x0008
190 #define RX_OK 0x2000
191 #define RX_ERROR 0x0d81
192
193 #define TX_DONE_BIT 0x0080
194 #define CHAIN_BIT 0x8000
195 #define XMT_STATUS 0x02
196 #define XMT_CHAIN 0x04
197 #define XMT_COUNT 0x06
198
199 #define BANK0_SELECT 0x00
200 #define BANK1_SELECT 0x40
201 #define BANK2_SELECT 0x80
202
203
204 #define COMMAND_REG 0x00
205 #define MC_SETUP 0x03
206 #define XMT_CMD 0x04
207 #define DIAGNOSE_CMD 0x07
208 #define RCV_ENABLE_CMD 0x08
209 #define RCV_DISABLE_CMD 0x0a
210 #define STOP_RCV_CMD 0x0b
211 #define RESET_CMD 0x0e
212 #define POWER_DOWN_CMD 0x18
213 #define RESUME_XMT_CMD 0x1c
214 #define SEL_RESET_CMD 0x1e
215 #define STATUS_REG 0x01
216 #define RX_INT 0x02
217 #define TX_INT 0x04
218 #define EXEC_STATUS 0x30
219 #define ID_REG 0x02
220 #define R_ROBIN_BITS 0xc0
221 #define ID_REG_MASK 0x2c
222 #define ID_REG_SIG 0x24
223 #define AUTO_ENABLE 0x10
224 #define INT_MASK_REG 0x03
225 #define RX_STOP_MASK 0x01
226 #define RX_MASK 0x02
227 #define TX_MASK 0x04
228 #define EXEC_MASK 0x08
229 #define ALL_MASK 0x0f
230 #define RCV_BAR 0x04
231 #define RCV_STOP 0x06
232 #define XMT_BAR 0x0a
233 #define HOST_ADDRESS_REG 0x0c
234 #define IO_PORT 0x0e
235
236
237 #define REG1 0x01
238 #define WORD_WIDTH 0x02
239 #define INT_ENABLE 0x80
240 #define INT_NO_REG 0x02
241 #define RCV_LOWER_LIMIT_REG 0x08
242 #define RCV_UPPER_LIMIT_REG 0x09
243 #define XMT_LOWER_LIMIT_REG 0x0a
244 #define XMT_UPPER_LIMIT_REG 0x0b
245
246
247 #define XMT_Chain_Int 0x20
248 #define XMT_Chain_ErrStop 0x40
249 #define RCV_Discard_BadFrame 0x80
250 #define REG2 0x02
251 #define PRMSC_Mode 0x01
252 #define Multi_IA 0x20
253 #define REG3 0x03
254 #define TPE_BIT 0x04
255 #define BNC_BIT 0x20
256
257 #define I_ADD_REG0 0x04
258 #define I_ADD_REG1 0x05
259 #define I_ADD_REG2 0x06
260 #define I_ADD_REG3 0x07
261 #define I_ADD_REG4 0x08
262 #define I_ADD_REG5 0x09
263
264 #define EEPROM_REG 0x0a
265 #define EESK 0x01
266 #define EECS 0x02
267 #define EEDI 0x04
268 #define EEDO 0x08
269
270
271
272
273
274
275
276
277 #ifdef HAVE_DEVLIST
278
279
280 struct netdev_entry netcard_drv =
281 {"eepro", eepro_probe1, EEPRO_IO_EXTENT, eepro_portlist};
282 #else
283 int
284 eepro_probe(struct device *dev)
285 {
286 int i;
287 int base_addr = dev ? dev->base_addr : 0;
288
289 if (base_addr > 0x1ff)
290 return eepro_probe1(dev, base_addr);
291 else if (base_addr != 0)
292 return ENXIO;
293
294 for (i = 0; eepro_portlist[i]; i++) {
295 int ioaddr = eepro_portlist[i];
296 if (check_region(ioaddr, EEPRO_IO_EXTENT))
297 continue;
298 if (eepro_probe1(dev, ioaddr) == 0)
299 return 0;
300 }
301
302 return ENODEV;
303 }
304 #endif
305
306
307
308
309
310 int eepro_probe1(struct device *dev, short ioaddr)
311 {
312 unsigned short station_addr[6], id, counter;
313 int i;
314 int eepro;
315
316 char *ifmap[] = {"AUI", "10Base2", "10BaseT"};
317 enum iftype { AUI=0, BNC=1, TPE=2 };
318
319
320
321
322 if (((id=inb(ioaddr + ID_REG)) & ID_REG_MASK) == ID_REG_SIG) {
323
324
325
326
327
328 counter = (id & R_ROBIN_BITS);
329 if (((id=inb(ioaddr+ID_REG)) & R_ROBIN_BITS) ==
330 (counter + 0x40)) {
331
332
333
334
335
336
337 station_addr[0] = read_eeprom(ioaddr, 2);
338 station_addr[1] = read_eeprom(ioaddr, 3);
339 station_addr[2] = read_eeprom(ioaddr, 4);
340
341
342
343 if (station_addr[2] != 0x00aa || (station_addr[1] & 0xff00) != 0x0000) {
344 eepro = 0;
345 printk("%s: Intel 82595-based lan card at %#x,",
346 dev->name, ioaddr);
347 }
348 else {
349 eepro = 1;
350 printk("%s: Intel EtherExpress Pro/10 at %#x,",
351 dev->name, ioaddr);
352 }
353
354
355 dev->base_addr = ioaddr;
356
357 for (i=0; i < 6; i++) {
358 dev->dev_addr[i] = ((unsigned char *) station_addr)[5-i];
359 printk("%c%02x", i ? ':' : ' ', dev->dev_addr[i]);
360 }
361
362 outb(BANK2_SELECT, ioaddr);
363 id = inb(ioaddr + REG3);
364 if (id & TPE_BIT)
365 dev->if_port = TPE;
366 else dev->if_port = BNC;
367
368 if (dev->irq < 2 && eepro) {
369 i = read_eeprom(ioaddr, 1);
370 switch (i & 0x07) {
371 case 0: dev->irq = 9; break;
372 case 1: dev->irq = 3; break;
373 case 2: dev->irq = 5; break;
374 case 3: dev->irq = 10; break;
375 case 4: dev->irq = 11; break;
376 default:
377 printk(" illegal interrupt vector stored in EEPROM.\n");
378 return ENODEV;
379 }
380 }
381 else if (dev->irq == 2)
382 dev->irq = 9;
383
384 if (dev->irq > 2) {
385 printk(", IRQ %d, %s.\n", dev->irq,
386 ifmap[dev->if_port]);
387 if (request_irq(dev->irq, &eepro_interrupt, 0, "eepro")) {
388 printk("%s: unable to get IRQ %d.\n", dev->name, dev->irq);
389 return -EAGAIN;
390 }
391 }
392 else printk(", %s.\n", ifmap[dev->if_port]);
393
394 if ((dev->mem_start & 0xf) > 0)
395 net_debug = dev->mem_start & 7;
396
397 if (net_debug > 3) {
398 i = read_eeprom(ioaddr, 5);
399 if (i & 0x2000)
400 printk("%s: Concurrent Processing is enabled but not used!\n",
401 dev->name);
402 }
403
404 if (net_debug)
405 printk(version);
406
407
408 request_region(ioaddr, EEPRO_IO_EXTENT,"eepro");
409
410
411 if (dev->priv == NULL)
412 dev->priv = kmalloc(sizeof(struct eepro_local), GFP_KERNEL);
413 memset(dev->priv, 0, sizeof(struct eepro_local));
414
415 dev->open = eepro_open;
416 dev->stop = eepro_close;
417 dev->hard_start_xmit = eepro_send_packet;
418 dev->get_stats = eepro_get_stats;
419 dev->set_multicast_list = &set_multicast_list;
420
421
422
423
424 ether_setup(dev);
425
426 outb(RESET_CMD, ioaddr);
427
428 return 0;
429 }
430 else return ENODEV;
431 }
432 else if (net_debug > 3)
433 printk ("EtherExpress Pro probed failed!\n");
434 return ENODEV;
435 }
436
437
438
439
440
441
442
443
444
445 static char irqrmap[] = {-1,-1,0,1,-1,2,-1,-1,-1,0,3,4,-1,-1,-1,-1};
446 static int eepro_grab_irq(struct device *dev)
447 {
448 int irqlist[] = { 5, 9, 10, 11, 4, 3, 0};
449 int *irqp = irqlist, temp_reg, ioaddr = dev->base_addr;
450
451 outb(BANK1_SELECT, ioaddr);
452
453
454 temp_reg = inb(ioaddr + REG1);
455 outb(temp_reg | INT_ENABLE, ioaddr + REG1);
456
457 outb(BANK0_SELECT, ioaddr);
458
459
460 outb(ALL_MASK, ioaddr + STATUS_REG);
461
462 outb(ALL_MASK & ~(EXEC_MASK), ioaddr + INT_MASK_REG);
463
464 do {
465 outb(BANK1_SELECT, ioaddr);
466
467 temp_reg = inb(ioaddr + INT_NO_REG);
468 outb((temp_reg & 0xf8) | irqrmap[*irqp], ioaddr + INT_NO_REG);
469
470 outb(BANK0_SELECT, ioaddr);
471
472 if (request_irq (*irqp, NULL, 0, "bogus") != EBUSY) {
473
474 autoirq_setup(0);
475
476 outb(DIAGNOSE_CMD, ioaddr);
477
478 if (*irqp == autoirq_report(2) &&
479 (request_irq(dev->irq = *irqp, &eepro_interrupt, 0, "eepro") == 0))
480 break;
481
482
483 outb(ALL_MASK, ioaddr + STATUS_REG);
484 }
485 } while (*++irqp);
486
487 outb(BANK1_SELECT, ioaddr);
488
489
490 temp_reg = inb(ioaddr + REG1);
491 outb(temp_reg & 0x7f, ioaddr + REG1);
492
493 outb(BANK0_SELECT, ioaddr);
494
495
496 outb(ALL_MASK, ioaddr + INT_MASK_REG);
497
498
499 outb(ALL_MASK, ioaddr + STATUS_REG);
500
501 return dev->irq;
502 }
503
504 static int
505 eepro_open(struct device *dev)
506 {
507 unsigned short temp_reg;
508 int i, ioaddr = dev->base_addr;
509 struct eepro_local *lp = (struct eepro_local *)dev->priv;
510
511 if (net_debug > 3)
512 printk("eepro: entering eepro_open routine.\n");
513
514 if (dev->dev_addr[0] == SA_ADDR0 &&
515 dev->dev_addr[1] == SA_ADDR1 &&
516 dev->dev_addr[2] == SA_ADDR2)
517 lp->eepro = 1;
518 else lp->eepro = 0;
519
520
521 if (dev->irq < 2 && eepro_grab_irq(dev) == 0) {
522 printk("%s: unable to get IRQ %d.\n", dev->name, dev->irq);
523 return -EAGAIN;
524 }
525
526 if (irq2dev_map[dev->irq] != 0
527 || (irq2dev_map[dev->irq] = dev) == 0)
528 return -EAGAIN;
529
530
531
532 outb(BANK2_SELECT, ioaddr);
533 temp_reg = inb(ioaddr + EEPROM_REG);
534 if (temp_reg & 0x10)
535 outb(temp_reg & 0xef, ioaddr + EEPROM_REG);
536 for (i=0; i < 6; i++)
537 outb(dev->dev_addr[i] , ioaddr + I_ADD_REG0 + i);
538
539 temp_reg = inb(ioaddr + REG1);
540 outb(temp_reg | XMT_Chain_Int | XMT_Chain_ErrStop
541 | RCV_Discard_BadFrame, ioaddr + REG1);
542
543 temp_reg = inb(ioaddr + REG2);
544 outb(temp_reg | 0x14, ioaddr + REG2);
545
546 temp_reg = inb(ioaddr + REG3);
547 outb(temp_reg & 0x3f, ioaddr + REG3);
548
549
550 outb(BANK1_SELECT, ioaddr);
551
552 temp_reg = inb(ioaddr + INT_NO_REG);
553 outb((temp_reg & 0xf8) | irqrmap[dev->irq], ioaddr + INT_NO_REG);
554
555
556 outb(RCV_LOWER_LIMIT, ioaddr + RCV_LOWER_LIMIT_REG);
557 outb(RCV_UPPER_LIMIT, ioaddr + RCV_UPPER_LIMIT_REG);
558 outb(XMT_LOWER_LIMIT, ioaddr + XMT_LOWER_LIMIT_REG);
559 outb(XMT_UPPER_LIMIT, ioaddr + XMT_UPPER_LIMIT_REG);
560
561
562 temp_reg = inb(ioaddr + REG1);
563 outb(temp_reg | INT_ENABLE, ioaddr + REG1);
564
565 outb(BANK0_SELECT, ioaddr);
566
567
568 outb(ALL_MASK & ~(RX_MASK | TX_MASK), ioaddr + INT_MASK_REG);
569
570 outb(ALL_MASK, ioaddr + STATUS_REG);
571
572
573 outw(RCV_LOWER_LIMIT << 8, ioaddr + RCV_BAR);
574 lp->rx_start = (RCV_LOWER_LIMIT << 8) ;
575 outw((RCV_UPPER_LIMIT << 8) | 0xfe, ioaddr + RCV_STOP);
576
577
578 outw(XMT_LOWER_LIMIT << 8, ioaddr + XMT_BAR);
579
580 outb(SEL_RESET_CMD, ioaddr);
581
582 SLOW_DOWN_IO;
583 SLOW_DOWN_IO;
584
585 lp->tx_start = lp->tx_end = XMT_LOWER_LIMIT << 8;
586 lp->tx_last = 0;
587
588 dev->tbusy = 0;
589 dev->interrupt = 0;
590 dev->start = 1;
591
592 if (net_debug > 3)
593 printk("eepro: exiting eepro_open routine.\n");
594
595 outb(RCV_ENABLE_CMD, ioaddr);
596
597 #ifdef MODULE
598 MOD_INC_USE_COUNT;
599 #endif
600 return 0;
601 }
602
603 static int
604 eepro_send_packet(struct sk_buff *skb, struct device *dev)
605 {
606 struct eepro_local *lp = (struct eepro_local *)dev->priv;
607 int ioaddr = dev->base_addr;
608
609 if (net_debug > 5)
610 printk("eepro: entering eepro_send_packet routine.\n");
611
612 if (dev->tbusy) {
613
614
615 int tickssofar = jiffies - dev->trans_start;
616 if (tickssofar < 5)
617 return 1;
618 if (net_debug > 1)
619 printk("%s: transmit timed out, %s?\n", dev->name,
620 "network cable problem");
621 lp->stats.tx_errors++;
622
623 outb(SEL_RESET_CMD, ioaddr);
624
625 SLOW_DOWN_IO;
626 SLOW_DOWN_IO;
627
628
629 lp->tx_start = lp->tx_end = RCV_RAM;
630 lp->tx_last = 0;
631
632 dev->tbusy=0;
633 dev->trans_start = jiffies;
634
635 outb(RCV_ENABLE_CMD, ioaddr);
636
637 }
638
639
640
641
642 if (skb == NULL) {
643 dev_tint(dev);
644 return 0;
645 }
646
647
648 if (set_bit(0, (void*)&dev->tbusy) != 0)
649 printk("%s: Transmitter access conflict.\n", dev->name);
650 else {
651 short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
652 unsigned char *buf = skb->data;
653
654 hardware_send_packet(dev, buf, length);
655 dev->trans_start = jiffies;
656 }
657
658 dev_kfree_skb (skb, FREE_WRITE);
659
660
661
662
663 if (net_debug > 5)
664 printk("eepro: exiting eepro_send_packet routine.\n");
665
666 return 0;
667 }
668
669
670
671
672 static void
673 eepro_interrupt(int irq, struct pt_regs * regs)
674 {
675 struct device *dev = (struct device *)(irq2dev_map[irq]);
676 int ioaddr, status, boguscount = 0;
677
678 if (net_debug > 5)
679 printk("eepro: entering eepro_interrupt routine.\n");
680
681 if (dev == NULL) {
682 printk ("eepro_interrupt(): irq %d for unknown device.\n", irq);
683 return;
684 }
685 dev->interrupt = 1;
686
687 ioaddr = dev->base_addr;
688
689 do {
690 status = inb(ioaddr + STATUS_REG);
691
692 if (status & RX_INT) {
693 if (net_debug > 4)
694 printk("eepro: packet received interrupt.\n");
695
696
697 outb(RX_INT, ioaddr + STATUS_REG);
698
699
700 eepro_rx(dev);
701 }
702 else if (status & TX_INT) {
703 if (net_debug > 4)
704 printk("eepro: packet transmit interrupt.\n");
705
706
707 outb(TX_INT, ioaddr + STATUS_REG);
708
709
710 eepro_transmit_interrupt(dev);
711 dev->tbusy = 0;
712 mark_bh(NET_BH);
713 }
714 } while ((++boguscount < 10) && (status & 0x06));
715
716 dev->interrupt = 0;
717 if (net_debug > 5)
718 printk("eepro: exiting eepro_interrupt routine.\n");
719
720 return;
721 }
722
723 static int
724 eepro_close(struct device *dev)
725 {
726 struct eepro_local *lp = (struct eepro_local *)dev->priv;
727 int ioaddr = dev->base_addr;
728 short temp_reg;
729
730 dev->tbusy = 1;
731 dev->start = 0;
732
733 outb(BANK1_SELECT, ioaddr);
734
735
736 temp_reg = inb(ioaddr + REG1);
737 outb(temp_reg & 0x7f, ioaddr + REG1);
738
739 outb(BANK0_SELECT, ioaddr);
740
741
742 outb(STOP_RCV_CMD, ioaddr);
743 lp->tx_start = lp->tx_end = RCV_RAM ;
744 lp->tx_last = 0;
745
746
747 outb(ALL_MASK, ioaddr + INT_MASK_REG);
748
749
750 outb(ALL_MASK, ioaddr + STATUS_REG);
751
752
753 outb(RESET_CMD, ioaddr);
754
755
756 free_irq(dev->irq);
757
758 irq2dev_map[dev->irq] = 0;
759
760
761 release_region(ioaddr, 16);
762
763
764
765
766 SLOW_DOWN_IO;
767 SLOW_DOWN_IO;
768
769 #ifdef MODULE
770 MOD_DEC_USE_COUNT;
771 #endif
772 return 0;
773 }
774
775
776
777 static struct enet_statistics *
778 eepro_get_stats(struct device *dev)
779 {
780 struct eepro_local *lp = (struct eepro_local *)dev->priv;
781
782 return &lp->stats;
783 }
784
785
786
787
788
789
790
791 static void
792 set_multicast_list(struct device *dev, int num_addrs, void *addrs)
793 {
794 struct eepro_local *lp = (struct eepro_local *)dev->priv;
795 short ioaddr = dev->base_addr;
796 unsigned short mode;
797
798 if (num_addrs < -1 || num_addrs > 63) {
799 outb(BANK2_SELECT, ioaddr);
800 mode = inb(ioaddr + REG2);
801 outb(mode | PRMSC_Mode, ioaddr + REG2);
802 mode = inb(ioaddr + REG3);
803 outb(mode, ioaddr + REG3);
804 outb(BANK0_SELECT, ioaddr);
805 printk("%s: promiscuous mode enabled.\n", dev->name);
806 }
807 else if (num_addrs == 0) {
808 outb(BANK2_SELECT, ioaddr);
809 mode = inb(ioaddr + REG2);
810 outb(mode & 0xd6, ioaddr + REG2);
811 mode = inb(ioaddr + REG3);
812 outb(mode, ioaddr + REG3);
813 outb(BANK0_SELECT, ioaddr);
814 }
815 else {
816 unsigned short status, *eaddrs = addrs;
817 int i, boguscount = 0;
818
819
820
821
822 outb(ALL_MASK, ioaddr + INT_MASK_REG);
823
824 outb(BANK2_SELECT, ioaddr);
825 mode = inb(ioaddr + REG2);
826 outb(mode | Multi_IA, ioaddr + REG2);
827 mode = inb(ioaddr + REG3);
828 outb(mode, ioaddr + REG3);
829 outb(BANK0_SELECT, ioaddr);
830 outw(lp->tx_end, ioaddr + HOST_ADDRESS_REG);
831 outw(MC_SETUP, ioaddr + IO_PORT);
832 outw(0, ioaddr + IO_PORT);
833 outw(0, ioaddr + IO_PORT);
834 outw(6*(num_addrs + 1), ioaddr + IO_PORT);
835 for (i = 0; i < num_addrs; i++) {
836 outw(*eaddrs++, ioaddr + IO_PORT);
837 outw(*eaddrs++, ioaddr + IO_PORT);
838 outw(*eaddrs++, ioaddr + IO_PORT);
839 }
840 eaddrs = (unsigned short *) dev->dev_addr;
841 outw(eaddrs[0], ioaddr + IO_PORT);
842 outw(eaddrs[1], ioaddr + IO_PORT);
843 outw(eaddrs[2], ioaddr + IO_PORT);
844 outw(lp->tx_end, ioaddr + XMT_BAR);
845 outb(MC_SETUP, ioaddr);
846
847
848 i = lp->tx_end + XMT_HEADER + 6*(num_addrs + 1);
849 if (lp->tx_start != lp->tx_end) {
850
851
852 outw(lp->tx_last + XMT_CHAIN, ioaddr + HOST_ADDRESS_REG);
853 outw(i, ioaddr + IO_PORT);
854 outw(lp->tx_last + XMT_COUNT, ioaddr + HOST_ADDRESS_REG);
855 status = inw(ioaddr + IO_PORT);
856 outw(status | CHAIN_BIT, ioaddr + IO_PORT);
857 lp->tx_end = i ;
858 } else lp->tx_start = lp->tx_end = i ;
859
860
861 do {
862 SLOW_DOWN_IO;
863 SLOW_DOWN_IO;
864 if (inb(ioaddr + STATUS_REG) & 0x08) {
865 i = inb(ioaddr);
866 outb(0x08, ioaddr + STATUS_REG);
867 if (i & 0x20) {
868 printk("%s: multicast setup failed.\n",
869 dev->name);
870 break;
871 } else if ((i & 0x0f) == 0x03) {
872 printk("%s: set Rx mode to %d addresses.\n",
873 dev->name, num_addrs);
874 break;
875 }
876 }
877 } while (++boguscount < 100);
878
879
880 outb(ALL_MASK & ~(RX_MASK | TX_MASK), ioaddr + INT_MASK_REG);
881
882 }
883 outb(RCV_ENABLE_CMD, ioaddr);
884 }
885
886
887
888
889
890 #define eeprom_delay() { int _i = 40; while (--_i > 0) { __SLOW_DOWN_IO; }}
891 #define EE_READ_CMD (6 << 6)
892
893 int
894 read_eeprom(int ioaddr, int location)
895 {
896 int i;
897 unsigned short retval = 0;
898 short ee_addr = ioaddr + EEPROM_REG;
899 int read_cmd = location | EE_READ_CMD;
900 short ctrl_val = EECS ;
901
902 outb(BANK2_SELECT, ioaddr);
903 outb(ctrl_val, ee_addr);
904
905
906 for (i = 8; i >= 0; i--) {
907 short outval = (read_cmd & (1 << i)) ? ctrl_val | EEDI
908 : ctrl_val;
909 outb(outval, ee_addr);
910 outb(outval | EESK, ee_addr);
911 eeprom_delay();
912 outb(outval, ee_addr);
913 eeprom_delay();
914 }
915 outb(ctrl_val, ee_addr);
916
917 for (i = 16; i > 0; i--) {
918 outb(ctrl_val | EESK, ee_addr); eeprom_delay();
919 retval = (retval << 1) | ((inb(ee_addr) & EEDO) ? 1 : 0);
920 outb(ctrl_val, ee_addr); eeprom_delay();
921 }
922
923
924 ctrl_val &= ~EECS;
925 outb(ctrl_val | EESK, ee_addr);
926 eeprom_delay();
927 outb(ctrl_val, ee_addr);
928 eeprom_delay();
929 outb(BANK0_SELECT, ioaddr);
930 return retval;
931 }
932
933 static void
934 hardware_send_packet(struct device *dev, void *buf, short length)
935 {
936 struct eepro_local *lp = (struct eepro_local *)dev->priv;
937 short ioaddr = dev->base_addr;
938 unsigned status, tx_available, last, end, boguscount = 10;
939
940 if (net_debug > 5)
941 printk("eepro: entering hardware_send_packet routine.\n");
942
943 while (boguscount-- > 0) {
944
945
946 if (lp->tx_end > lp->tx_start)
947 tx_available = XMT_RAM - (lp->tx_end - lp->tx_start);
948 else if (lp->tx_end < lp->tx_start)
949 tx_available = lp->tx_start - lp->tx_end;
950 else tx_available = XMT_RAM;
951
952
953
954
955 outb(ALL_MASK, ioaddr + INT_MASK_REG);
956
957 if (((((length + 1) >> 1) << 1) + 2*XMT_HEADER)
958 >= tx_available)
959 continue;
960
961 last = lp->tx_end;
962 end = last + (((length + 1) >> 1) << 1) + XMT_HEADER;
963
964 if (end >= RAM_SIZE) {
965 if ((RAM_SIZE - last) <= XMT_HEADER) {
966
967
968 last = RCV_RAM;
969 end = last + (((length + 1) >> 1) << 1) + XMT_HEADER;
970 }
971 else end = RCV_RAM + (end - RAM_SIZE);
972 }
973
974 outw(last, ioaddr + HOST_ADDRESS_REG);
975 outw(XMT_CMD, ioaddr + IO_PORT);
976 outw(0, ioaddr + IO_PORT);
977 outw(end, ioaddr + IO_PORT);
978 outw(length, ioaddr + IO_PORT);
979 outsw(ioaddr + IO_PORT, buf, (length + 1) >> 1);
980
981 if (lp->tx_start != lp->tx_end) {
982
983
984 if (lp->tx_end != last) {
985 outw(lp->tx_last + XMT_CHAIN, ioaddr + HOST_ADDRESS_REG);
986 outw(last, ioaddr + IO_PORT);
987 }
988 outw(lp->tx_last + XMT_COUNT, ioaddr + HOST_ADDRESS_REG);
989 status = inw(ioaddr + IO_PORT);
990 outw(status | CHAIN_BIT, ioaddr + IO_PORT);
991 }
992
993
994 status = inw(ioaddr + IO_PORT);
995
996
997 outb(ALL_MASK & ~(RX_MASK | TX_MASK), ioaddr + INT_MASK_REG);
998
999 if (lp->tx_start == lp->tx_end) {
1000 outw(last, ioaddr + XMT_BAR);
1001 outb(XMT_CMD, ioaddr);
1002 lp->tx_start = last;
1003 }
1004 else outb(RESUME_XMT_CMD, ioaddr);
1005
1006 lp->tx_last = last;
1007 lp->tx_end = end;
1008
1009 if (dev->tbusy) {
1010 dev->tbusy = 0;
1011 mark_bh(NET_BH);
1012 }
1013
1014 if (net_debug > 5)
1015 printk("eepro: exiting hardware_send_packet routine.\n");
1016 return;
1017 }
1018 dev->tbusy = 1;
1019 if (net_debug > 5)
1020 printk("eepro: exiting hardware_send_packet routine.\n");
1021 }
1022
1023 static void
1024 eepro_rx(struct device *dev)
1025 {
1026 struct eepro_local *lp = (struct eepro_local *)dev->priv;
1027 short ioaddr = dev->base_addr;
1028 short boguscount = 20;
1029 short rcv_car = lp->rx_start;
1030 unsigned rcv_event, rcv_status, rcv_next_frame, rcv_size;
1031
1032 if (net_debug > 5)
1033 printk("eepro: entering eepro_rx routine.\n");
1034
1035
1036 outw(rcv_car, ioaddr + HOST_ADDRESS_REG);
1037 rcv_event = inw(ioaddr + IO_PORT);
1038
1039 while (rcv_event == RCV_DONE) {
1040 rcv_status = inw(ioaddr + IO_PORT);
1041 rcv_next_frame = inw(ioaddr + IO_PORT);
1042 rcv_size = inw(ioaddr + IO_PORT);
1043
1044 if ((rcv_status & (RX_OK | RX_ERROR)) == RX_OK) {
1045
1046 struct sk_buff *skb;
1047
1048 rcv_size &= 0x3fff;
1049 skb = dev_alloc_skb(rcv_size+2);
1050 if (skb == NULL) {
1051 printk("%s: Memory squeeze, dropping packet.\n", dev->name);
1052 lp->stats.rx_dropped++;
1053 break;
1054 }
1055 skb->dev = dev;
1056 skb_reserve(skb,2);
1057
1058 insw(ioaddr+IO_PORT, skb_put(skb,rcv_size), (rcv_size + 1) >> 1);
1059
1060 skb->protocol = eth_type_trans(skb,dev);
1061 netif_rx(skb);
1062 lp->stats.rx_packets++;
1063 }
1064 else {
1065
1066 lp->stats.rx_errors++;
1067 if (rcv_status & 0x0100)
1068 lp->stats.rx_over_errors++;
1069 else if (rcv_status & 0x0400)
1070 lp->stats.rx_frame_errors++;
1071 else if (rcv_status & 0x0800)
1072 lp->stats.rx_crc_errors++;
1073 printk("%s: event = %#x, status = %#x, next = %#x, size = %#x\n",
1074 dev->name, rcv_event, rcv_status, rcv_next_frame, rcv_size);
1075 }
1076 if (rcv_status & 0x1000)
1077 lp->stats.rx_length_errors++;
1078 if (--boguscount == 0)
1079 break;
1080
1081 rcv_car = lp->rx_start + RCV_HEADER + rcv_size;
1082 lp->rx_start = rcv_next_frame;
1083 outw(rcv_next_frame, ioaddr + HOST_ADDRESS_REG);
1084 rcv_event = inw(ioaddr + IO_PORT);
1085
1086 }
1087 if (rcv_car == 0)
1088 rcv_car = (RCV_UPPER_LIMIT << 8) | 0xff;
1089 outw(rcv_car - 1, ioaddr + RCV_STOP);
1090
1091 if (net_debug > 5)
1092 printk("eepro: exiting eepro_rx routine.\n");
1093 }
1094
1095 static void
1096 eepro_transmit_interrupt(struct device *dev)
1097 {
1098 struct eepro_local *lp = (struct eepro_local *)dev->priv;
1099 short ioaddr = dev->base_addr;
1100 short boguscount = 10;
1101 short xmt_status;
1102
1103 while (lp->tx_start != lp->tx_end) {
1104
1105 outw(lp->tx_start, ioaddr + HOST_ADDRESS_REG);
1106 xmt_status = inw(ioaddr+IO_PORT);
1107 if ((xmt_status & TX_DONE_BIT) == 0) break;
1108 xmt_status = inw(ioaddr+IO_PORT);
1109 lp->tx_start = inw(ioaddr+IO_PORT);
1110
1111 if (dev->tbusy) {
1112 dev->tbusy = 0;
1113 mark_bh(NET_BH);
1114 }
1115
1116 if (xmt_status & 0x2000)
1117 lp->stats.tx_packets++;
1118 else {
1119 lp->stats.tx_errors++;
1120 if (xmt_status & 0x0400)
1121 lp->stats.tx_carrier_errors++;
1122 printk("%s: XMT status = %#x\n",
1123 dev->name, xmt_status);
1124 }
1125 if (xmt_status & 0x000f)
1126 lp->stats.collisions += (xmt_status & 0x000f);
1127 if ((xmt_status & 0x0040) == 0x0)
1128 lp->stats.tx_heartbeat_errors++;
1129
1130 if (--boguscount == 0)
1131 break;
1132 }
1133 }
1134
1135 #ifdef MODULE
1136 char kernel_version[] = UTS_RELEASE;
1137 static struct device dev_eepro = {
1138 " " , 0, 0, 0, 0, 0, 0, 0, 0, 0, NULL, eepro_probe };
1139
1140 int
1141 init_module(void)
1142 {
1143 if (register_netdev(&dev_eepro) != 0)
1144 return -EIO;
1145 return 0;
1146 }
1147
1148 void
1149 cleanup_module(void)
1150 {
1151 if (MOD_IN_USE)
1152 printk("eepro: device busy, remove delayed\n");
1153 else
1154 {
1155 unregister_netdev(&dev_eepro);
1156 kfree_s(dev_eepro.priv,sizeof(struct eepro_local));
1157 dev_eepro.priv=NULL;
1158 }
1159 }
1160 #endif