This source file includes following definitions.
- eepro_probe
- eepro_probe1
- eepro_grab_irq
- eepro_open
- eepro_send_packet
- eepro_interrupt
- eepro_close
- eepro_get_stats
- set_multicast_list
- read_eeprom
- hardware_send_packet
- eepro_rx
- eepro_transmit_interrupt
- init_module
- cleanup_module
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54 static const char *version =
55 "eepro.c: v0.07a 6/5/95 Bao C. Ha (bao@saigon.async.com)\n";
56
57
58
59 #include <linux/config.h>
60
61 #ifdef MODULE
62 #include <linux/module.h>
63 #include <linux/version.h>
64 #endif
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87 #include <linux/kernel.h>
88 #include <linux/sched.h>
89 #include <linux/types.h>
90 #include <linux/fcntl.h>
91 #include <linux/interrupt.h>
92 #include <linux/ptrace.h>
93 #include <linux/ioport.h>
94 #include <linux/in.h>
95 #include <linux/malloc.h>
96 #include <linux/string.h>
97 #include <asm/system.h>
98 #include <asm/bitops.h>
99 #include <asm/io.h>
100 #include <asm/dma.h>
101 #include <linux/errno.h>
102
103 #include <linux/netdevice.h>
104 #include <linux/etherdevice.h>
105 #include <linux/skbuff.h>
106
107
108
109
110 static unsigned int eepro_portlist[] =
111 { 0x200, 0x240, 0x280, 0x2C0, 0x300, 0x320, 0x340, 0x360, 0};
112
113
114 #ifndef NET_DEBUG
115 #define NET_DEBUG 2
116 #endif
117 static unsigned int net_debug = NET_DEBUG;
118
119
120 #define EEPRO_IO_EXTENT 16
121
122
123 struct eepro_local {
124 struct enet_statistics stats;
125 unsigned rx_start;
126 unsigned tx_start;
127 int tx_last;
128 unsigned tx_end;
129 int eepro;
130
131 };
132
133
134 #define SA_ADDR0 0x00
135 #define SA_ADDR1 0xaa
136 #define SA_ADDR2 0x00
137
138
139
140 extern int eepro_probe(struct device *dev);
141
142 static int eepro_probe1(struct device *dev, short ioaddr);
143 static int eepro_open(struct device *dev);
144 static int eepro_send_packet(struct sk_buff *skb, struct device *dev);
145 static void eepro_interrupt(int irq, struct pt_regs *regs);
146 static void eepro_rx(struct device *dev);
147 static void eepro_transmit_interrupt(struct device *dev);
148 static int eepro_close(struct device *dev);
149 static struct enet_statistics *eepro_get_stats(struct device *dev);
150 static void set_multicast_list(struct device *dev, int num_addrs, void *addrs);
151
152 static int read_eeprom(int ioaddr, int location);
153 static void hardware_send_packet(struct device *dev, void *buf, short length);
154 static int eepro_grab_irq(struct device *dev);
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178 #define RAM_SIZE 0x8000
179 #define RCV_HEADER 8
180 #define RCV_RAM 0x6000
181 #define RCV_LOWER_LIMIT 0x00
182 #define RCV_UPPER_LIMIT ((RCV_RAM - 2) >> 8)
183 #define XMT_RAM (RAM_SIZE - RCV_RAM)
184 #define XMT_LOWER_LIMIT (RCV_RAM >> 8)
185 #define XMT_UPPER_LIMIT ((RAM_SIZE - 2) >> 8)
186 #define XMT_HEADER 8
187
188 #define RCV_DONE 0x0008
189 #define RX_OK 0x2000
190 #define RX_ERROR 0x0d81
191
192 #define TX_DONE_BIT 0x0080
193 #define CHAIN_BIT 0x8000
194 #define XMT_STATUS 0x02
195 #define XMT_CHAIN 0x04
196 #define XMT_COUNT 0x06
197
198 #define BANK0_SELECT 0x00
199 #define BANK1_SELECT 0x40
200 #define BANK2_SELECT 0x80
201
202
203 #define COMMAND_REG 0x00
204 #define MC_SETUP 0x03
205 #define XMT_CMD 0x04
206 #define DIAGNOSE_CMD 0x07
207 #define RCV_ENABLE_CMD 0x08
208 #define RCV_DISABLE_CMD 0x0a
209 #define STOP_RCV_CMD 0x0b
210 #define RESET_CMD 0x0e
211 #define POWER_DOWN_CMD 0x18
212 #define RESUME_XMT_CMD 0x1c
213 #define SEL_RESET_CMD 0x1e
214 #define STATUS_REG 0x01
215 #define RX_INT 0x02
216 #define TX_INT 0x04
217 #define EXEC_STATUS 0x30
218 #define ID_REG 0x02
219 #define R_ROBIN_BITS 0xc0
220 #define ID_REG_MASK 0x2c
221 #define ID_REG_SIG 0x24
222 #define AUTO_ENABLE 0x10
223 #define INT_MASK_REG 0x03
224 #define RX_STOP_MASK 0x01
225 #define RX_MASK 0x02
226 #define TX_MASK 0x04
227 #define EXEC_MASK 0x08
228 #define ALL_MASK 0x0f
229 #define RCV_BAR 0x04
230 #define RCV_STOP 0x06
231 #define XMT_BAR 0x0a
232 #define HOST_ADDRESS_REG 0x0c
233 #define IO_PORT 0x0e
234
235
236 #define REG1 0x01
237 #define WORD_WIDTH 0x02
238 #define INT_ENABLE 0x80
239 #define INT_NO_REG 0x02
240 #define RCV_LOWER_LIMIT_REG 0x08
241 #define RCV_UPPER_LIMIT_REG 0x09
242 #define XMT_LOWER_LIMIT_REG 0x0a
243 #define XMT_UPPER_LIMIT_REG 0x0b
244
245
246 #define XMT_Chain_Int 0x20
247 #define XMT_Chain_ErrStop 0x40
248 #define RCV_Discard_BadFrame 0x80
249 #define REG2 0x02
250 #define PRMSC_Mode 0x01
251 #define Multi_IA 0x20
252 #define REG3 0x03
253 #define TPE_BIT 0x04
254 #define BNC_BIT 0x20
255
256 #define I_ADD_REG0 0x04
257 #define I_ADD_REG1 0x05
258 #define I_ADD_REG2 0x06
259 #define I_ADD_REG3 0x07
260 #define I_ADD_REG4 0x08
261 #define I_ADD_REG5 0x09
262
263 #define EEPROM_REG 0x0a
264 #define EESK 0x01
265 #define EECS 0x02
266 #define EEDI 0x04
267 #define EEDO 0x08
268
269
270
271
272
273
274
275
276 #ifdef HAVE_DEVLIST
277
278
279 struct netdev_entry netcard_drv =
280 {"eepro", eepro_probe1, EEPRO_IO_EXTENT, eepro_portlist};
281 #else
282 int
283 eepro_probe(struct device *dev)
284 {
285 int i;
286 int base_addr = dev ? dev->base_addr : 0;
287
288 if (base_addr > 0x1ff)
289 return eepro_probe1(dev, base_addr);
290 else if (base_addr != 0)
291 return ENXIO;
292
293 for (i = 0; eepro_portlist[i]; i++) {
294 int ioaddr = eepro_portlist[i];
295 if (check_region(ioaddr, EEPRO_IO_EXTENT))
296 continue;
297 if (eepro_probe1(dev, ioaddr) == 0)
298 return 0;
299 }
300
301 return ENODEV;
302 }
303 #endif
304
305
306
307
308
309 int eepro_probe1(struct device *dev, short ioaddr)
310 {
311 unsigned short station_addr[6], id, counter;
312 int i;
313 int eepro;
314
315 const char *ifmap[] = {"AUI", "10Base2", "10BaseT"};
316 enum iftype { AUI=0, BNC=1, TPE=2 };
317
318
319
320
321 if (((id=inb(ioaddr + ID_REG)) & ID_REG_MASK) == ID_REG_SIG) {
322
323
324
325
326
327 counter = (id & R_ROBIN_BITS);
328 if (((id=inb(ioaddr+ID_REG)) & R_ROBIN_BITS) ==
329 (counter + 0x40)) {
330
331
332
333
334
335
336 station_addr[0] = read_eeprom(ioaddr, 2);
337 station_addr[1] = read_eeprom(ioaddr, 3);
338 station_addr[2] = read_eeprom(ioaddr, 4);
339
340
341
342 if (station_addr[2] != 0x00aa || (station_addr[1] & 0xff00) != 0x0000) {
343 eepro = 0;
344 printk("%s: Intel 82595-based lan card at %#x,",
345 dev->name, ioaddr);
346 }
347 else {
348 eepro = 1;
349 printk("%s: Intel EtherExpress Pro/10 at %#x,",
350 dev->name, ioaddr);
351 }
352
353
354 dev->base_addr = ioaddr;
355
356 for (i=0; i < 6; i++) {
357 dev->dev_addr[i] = ((unsigned char *) station_addr)[5-i];
358 printk("%c%02x", i ? ':' : ' ', dev->dev_addr[i]);
359 }
360
361 outb(BANK2_SELECT, ioaddr);
362 id = inb(ioaddr + REG3);
363 if (id & TPE_BIT)
364 dev->if_port = TPE;
365 else dev->if_port = BNC;
366
367 if (dev->irq < 2 && eepro) {
368 i = read_eeprom(ioaddr, 1);
369 switch (i & 0x07) {
370 case 0: dev->irq = 9; break;
371 case 1: dev->irq = 3; break;
372 case 2: dev->irq = 5; break;
373 case 3: dev->irq = 10; break;
374 case 4: dev->irq = 11; break;
375 default:
376 printk(" illegal interrupt vector stored in EEPROM.\n");
377 return ENODEV;
378 }
379 }
380 else if (dev->irq == 2)
381 dev->irq = 9;
382
383 if (dev->irq > 2) {
384 printk(", IRQ %d, %s.\n", dev->irq,
385 ifmap[dev->if_port]);
386 if (request_irq(dev->irq, &eepro_interrupt, 0, "eepro")) {
387 printk("%s: unable to get IRQ %d.\n", dev->name, dev->irq);
388 return -EAGAIN;
389 }
390 }
391 else printk(", %s.\n", ifmap[dev->if_port]);
392
393 if ((dev->mem_start & 0xf) > 0)
394 net_debug = dev->mem_start & 7;
395
396 if (net_debug > 3) {
397 i = read_eeprom(ioaddr, 5);
398 if (i & 0x2000)
399 printk("%s: Concurrent Processing is enabled but not used!\n",
400 dev->name);
401 }
402
403 if (net_debug)
404 printk(version);
405
406
407 request_region(ioaddr, EEPRO_IO_EXTENT, "eepro");
408
409
410 if (dev->priv == NULL)
411 dev->priv = kmalloc(sizeof(struct eepro_local), GFP_KERNEL);
412 memset(dev->priv, 0, sizeof(struct eepro_local));
413
414 dev->open = eepro_open;
415 dev->stop = eepro_close;
416 dev->hard_start_xmit = eepro_send_packet;
417 dev->get_stats = eepro_get_stats;
418 dev->set_multicast_list = &set_multicast_list;
419
420
421
422
423 ether_setup(dev);
424
425 outb(RESET_CMD, ioaddr);
426
427 return 0;
428 }
429 else return ENODEV;
430 }
431 else if (net_debug > 3)
432 printk ("EtherExpress Pro probed failed!\n");
433 return ENODEV;
434 }
435
436
437
438
439
440
441
442
443
444 static char irqrmap[] = {-1,-1,0,1,-1,2,-1,-1,-1,0,3,4,-1,-1,-1,-1};
445 static int eepro_grab_irq(struct device *dev)
446 {
447 int irqlist[] = { 5, 9, 10, 11, 4, 3, 0};
448 int *irqp = irqlist, temp_reg, ioaddr = dev->base_addr;
449
450 outb(BANK1_SELECT, ioaddr);
451
452
453 temp_reg = inb(ioaddr + REG1);
454 outb(temp_reg | INT_ENABLE, ioaddr + REG1);
455
456 outb(BANK0_SELECT, ioaddr);
457
458
459 outb(ALL_MASK, ioaddr + STATUS_REG);
460
461 outb(ALL_MASK & ~(EXEC_MASK), ioaddr + INT_MASK_REG);
462
463 do {
464 outb(BANK1_SELECT, ioaddr);
465
466 temp_reg = inb(ioaddr + INT_NO_REG);
467 outb((temp_reg & 0xf8) | irqrmap[*irqp], ioaddr + INT_NO_REG);
468
469 outb(BANK0_SELECT, ioaddr);
470
471 if (request_irq (*irqp, NULL, 0, "bogus") != EBUSY) {
472
473 autoirq_setup(0);
474
475 outb(DIAGNOSE_CMD, ioaddr);
476
477 if (*irqp == autoirq_report(2) &&
478 (request_irq(dev->irq = *irqp, &eepro_interrupt, 0, "eepro") == 0))
479 break;
480
481
482 outb(ALL_MASK, ioaddr + STATUS_REG);
483 }
484 } while (*++irqp);
485
486 outb(BANK1_SELECT, ioaddr);
487
488
489 temp_reg = inb(ioaddr + REG1);
490 outb(temp_reg & 0x7f, ioaddr + REG1);
491
492 outb(BANK0_SELECT, ioaddr);
493
494
495 outb(ALL_MASK, ioaddr + INT_MASK_REG);
496
497
498 outb(ALL_MASK, ioaddr + STATUS_REG);
499
500 return dev->irq;
501 }
502
503 static int
504 eepro_open(struct device *dev)
505 {
506 unsigned short temp_reg;
507 int i, ioaddr = dev->base_addr;
508 struct eepro_local *lp = (struct eepro_local *)dev->priv;
509
510 if (net_debug > 3)
511 printk("eepro: entering eepro_open routine.\n");
512
513 if (dev->dev_addr[0] == SA_ADDR0 &&
514 dev->dev_addr[1] == SA_ADDR1 &&
515 dev->dev_addr[2] == SA_ADDR2)
516 lp->eepro = 1;
517 else lp->eepro = 0;
518
519
520 if (dev->irq < 2 && eepro_grab_irq(dev) == 0) {
521 printk("%s: unable to get IRQ %d.\n", dev->name, dev->irq);
522 return -EAGAIN;
523 }
524
525 if (irq2dev_map[dev->irq] != 0
526 || (irq2dev_map[dev->irq] = dev) == 0)
527 return -EAGAIN;
528
529
530
531 outb(BANK2_SELECT, ioaddr);
532 temp_reg = inb(ioaddr + EEPROM_REG);
533 if (temp_reg & 0x10)
534 outb(temp_reg & 0xef, ioaddr + EEPROM_REG);
535 for (i=0; i < 6; i++)
536 outb(dev->dev_addr[i] , ioaddr + I_ADD_REG0 + i);
537
538 temp_reg = inb(ioaddr + REG1);
539 outb(temp_reg | XMT_Chain_Int | XMT_Chain_ErrStop
540 | RCV_Discard_BadFrame, ioaddr + REG1);
541
542 temp_reg = inb(ioaddr + REG2);
543 outb(temp_reg | 0x14, ioaddr + REG2);
544
545 temp_reg = inb(ioaddr + REG3);
546 outb(temp_reg & 0x3f, ioaddr + REG3);
547
548
549 outb(BANK1_SELECT, ioaddr);
550
551 temp_reg = inb(ioaddr + INT_NO_REG);
552 outb((temp_reg & 0xf8) | irqrmap[dev->irq], ioaddr + INT_NO_REG);
553
554
555 outb(RCV_LOWER_LIMIT, ioaddr + RCV_LOWER_LIMIT_REG);
556 outb(RCV_UPPER_LIMIT, ioaddr + RCV_UPPER_LIMIT_REG);
557 outb(XMT_LOWER_LIMIT, ioaddr + XMT_LOWER_LIMIT_REG);
558 outb(XMT_UPPER_LIMIT, ioaddr + XMT_UPPER_LIMIT_REG);
559
560
561 temp_reg = inb(ioaddr + REG1);
562 outb(temp_reg | INT_ENABLE, ioaddr + REG1);
563
564 outb(BANK0_SELECT, ioaddr);
565
566
567 outb(ALL_MASK & ~(RX_MASK | TX_MASK), ioaddr + INT_MASK_REG);
568
569 outb(ALL_MASK, ioaddr + STATUS_REG);
570
571
572 outw(RCV_LOWER_LIMIT << 8, ioaddr + RCV_BAR);
573 lp->rx_start = (RCV_LOWER_LIMIT << 8) ;
574 outw((RCV_UPPER_LIMIT << 8) | 0xfe, ioaddr + RCV_STOP);
575
576
577 outw(XMT_LOWER_LIMIT << 8, ioaddr + XMT_BAR);
578
579 outb(SEL_RESET_CMD, ioaddr);
580
581 SLOW_DOWN_IO;
582 SLOW_DOWN_IO;
583
584 lp->tx_start = lp->tx_end = XMT_LOWER_LIMIT << 8;
585 lp->tx_last = 0;
586
587 dev->tbusy = 0;
588 dev->interrupt = 0;
589 dev->start = 1;
590
591 if (net_debug > 3)
592 printk("eepro: exiting eepro_open routine.\n");
593
594 outb(RCV_ENABLE_CMD, ioaddr);
595
596 #ifdef MODULE
597 MOD_INC_USE_COUNT;
598 #endif
599 return 0;
600 }
601
602 static int
603 eepro_send_packet(struct sk_buff *skb, struct device *dev)
604 {
605 struct eepro_local *lp = (struct eepro_local *)dev->priv;
606 int ioaddr = dev->base_addr;
607
608 if (net_debug > 5)
609 printk("eepro: entering eepro_send_packet routine.\n");
610
611 if (dev->tbusy) {
612
613
614 int tickssofar = jiffies - dev->trans_start;
615 if (tickssofar < 5)
616 return 1;
617 if (net_debug > 1)
618 printk("%s: transmit timed out, %s?\n", dev->name,
619 "network cable problem");
620 lp->stats.tx_errors++;
621
622 outb(SEL_RESET_CMD, ioaddr);
623
624 SLOW_DOWN_IO;
625 SLOW_DOWN_IO;
626
627
628 lp->tx_start = lp->tx_end = RCV_RAM;
629 lp->tx_last = 0;
630
631 dev->tbusy=0;
632 dev->trans_start = jiffies;
633
634 outb(RCV_ENABLE_CMD, ioaddr);
635
636 }
637
638
639
640
641 if (skb == NULL) {
642 dev_tint(dev);
643 return 0;
644 }
645
646
647 if (set_bit(0, (void*)&dev->tbusy) != 0)
648 printk("%s: Transmitter access conflict.\n", dev->name);
649 else {
650 short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
651 unsigned char *buf = skb->data;
652
653 hardware_send_packet(dev, buf, length);
654 dev->trans_start = jiffies;
655 }
656
657 dev_kfree_skb (skb, FREE_WRITE);
658
659
660
661
662 if (net_debug > 5)
663 printk("eepro: exiting eepro_send_packet routine.\n");
664
665 return 0;
666 }
667
668
669
670
671 static void
672 eepro_interrupt(int irq, struct pt_regs * regs)
673 {
674 struct device *dev = (struct device *)(irq2dev_map[irq]);
675 int ioaddr, status, boguscount = 0;
676
677 if (net_debug > 5)
678 printk("eepro: entering eepro_interrupt routine.\n");
679
680 if (dev == NULL) {
681 printk ("eepro_interrupt(): irq %d for unknown device.\n", irq);
682 return;
683 }
684 dev->interrupt = 1;
685
686 ioaddr = dev->base_addr;
687
688 do {
689 status = inb(ioaddr + STATUS_REG);
690
691 if (status & RX_INT) {
692 if (net_debug > 4)
693 printk("eepro: packet received interrupt.\n");
694
695
696 outb(RX_INT, ioaddr + STATUS_REG);
697
698
699 eepro_rx(dev);
700 }
701 else if (status & TX_INT) {
702 if (net_debug > 4)
703 printk("eepro: packet transmit interrupt.\n");
704
705
706 outb(TX_INT, ioaddr + STATUS_REG);
707
708
709 eepro_transmit_interrupt(dev);
710 dev->tbusy = 0;
711 mark_bh(NET_BH);
712 }
713 } while ((++boguscount < 10) && (status & 0x06));
714
715 dev->interrupt = 0;
716 if (net_debug > 5)
717 printk("eepro: exiting eepro_interrupt routine.\n");
718
719 return;
720 }
721
722 static int
723 eepro_close(struct device *dev)
724 {
725 struct eepro_local *lp = (struct eepro_local *)dev->priv;
726 int ioaddr = dev->base_addr;
727 short temp_reg;
728
729 dev->tbusy = 1;
730 dev->start = 0;
731
732 outb(BANK1_SELECT, ioaddr);
733
734
735 temp_reg = inb(ioaddr + REG1);
736 outb(temp_reg & 0x7f, ioaddr + REG1);
737
738 outb(BANK0_SELECT, ioaddr);
739
740
741 outb(STOP_RCV_CMD, ioaddr);
742 lp->tx_start = lp->tx_end = RCV_RAM ;
743 lp->tx_last = 0;
744
745
746 outb(ALL_MASK, ioaddr + INT_MASK_REG);
747
748
749 outb(ALL_MASK, ioaddr + STATUS_REG);
750
751
752 outb(RESET_CMD, ioaddr);
753
754
755 free_irq(dev->irq);
756
757 irq2dev_map[dev->irq] = 0;
758
759
760
761
762 SLOW_DOWN_IO;
763 SLOW_DOWN_IO;
764
765 #ifdef MODULE
766 MOD_DEC_USE_COUNT;
767 #endif
768 return 0;
769 }
770
771
772
773 static struct enet_statistics *
774 eepro_get_stats(struct device *dev)
775 {
776 struct eepro_local *lp = (struct eepro_local *)dev->priv;
777
778 return &lp->stats;
779 }
780
781
782
783
784
785
786
787 static void
788 set_multicast_list(struct device *dev, int num_addrs, void *addrs)
789 {
790 struct eepro_local *lp = (struct eepro_local *)dev->priv;
791 short ioaddr = dev->base_addr;
792 unsigned short mode;
793
794 if (num_addrs < -1 || num_addrs > 63) {
795 outb(BANK2_SELECT, ioaddr);
796 mode = inb(ioaddr + REG2);
797 outb(mode | PRMSC_Mode, ioaddr + REG2);
798 mode = inb(ioaddr + REG3);
799 outb(mode, ioaddr + REG3);
800 outb(BANK0_SELECT, ioaddr);
801 printk("%s: promiscuous mode enabled.\n", dev->name);
802 }
803 else if (num_addrs == 0) {
804 outb(BANK2_SELECT, ioaddr);
805 mode = inb(ioaddr + REG2);
806 outb(mode & 0xd6, ioaddr + REG2);
807 mode = inb(ioaddr + REG3);
808 outb(mode, ioaddr + REG3);
809 outb(BANK0_SELECT, ioaddr);
810 }
811 else {
812 unsigned short status, *eaddrs = addrs;
813 int i, boguscount = 0;
814
815
816
817
818 outb(ALL_MASK, ioaddr + INT_MASK_REG);
819
820 outb(BANK2_SELECT, ioaddr);
821 mode = inb(ioaddr + REG2);
822 outb(mode | Multi_IA, ioaddr + REG2);
823 mode = inb(ioaddr + REG3);
824 outb(mode, ioaddr + REG3);
825 outb(BANK0_SELECT, ioaddr);
826 outw(lp->tx_end, ioaddr + HOST_ADDRESS_REG);
827 outw(MC_SETUP, ioaddr + IO_PORT);
828 outw(0, ioaddr + IO_PORT);
829 outw(0, ioaddr + IO_PORT);
830 outw(6*(num_addrs + 1), ioaddr + IO_PORT);
831 for (i = 0; i < num_addrs; i++) {
832 outw(*eaddrs++, ioaddr + IO_PORT);
833 outw(*eaddrs++, ioaddr + IO_PORT);
834 outw(*eaddrs++, ioaddr + IO_PORT);
835 }
836 eaddrs = (unsigned short *) dev->dev_addr;
837 outw(eaddrs[0], ioaddr + IO_PORT);
838 outw(eaddrs[1], ioaddr + IO_PORT);
839 outw(eaddrs[2], ioaddr + IO_PORT);
840 outw(lp->tx_end, ioaddr + XMT_BAR);
841 outb(MC_SETUP, ioaddr);
842
843
844 i = lp->tx_end + XMT_HEADER + 6*(num_addrs + 1);
845 if (lp->tx_start != lp->tx_end) {
846
847
848 outw(lp->tx_last + XMT_CHAIN, ioaddr + HOST_ADDRESS_REG);
849 outw(i, ioaddr + IO_PORT);
850 outw(lp->tx_last + XMT_COUNT, ioaddr + HOST_ADDRESS_REG);
851 status = inw(ioaddr + IO_PORT);
852 outw(status | CHAIN_BIT, ioaddr + IO_PORT);
853 lp->tx_end = i ;
854 } else lp->tx_start = lp->tx_end = i ;
855
856
857 do {
858 SLOW_DOWN_IO;
859 SLOW_DOWN_IO;
860 if (inb(ioaddr + STATUS_REG) & 0x08) {
861 i = inb(ioaddr);
862 outb(0x08, ioaddr + STATUS_REG);
863 if (i & 0x20) {
864 printk("%s: multicast setup failed.\n",
865 dev->name);
866 break;
867 } else if ((i & 0x0f) == 0x03) {
868 printk("%s: set Rx mode to %d addresses.\n",
869 dev->name, num_addrs);
870 break;
871 }
872 }
873 } while (++boguscount < 100);
874
875
876 outb(ALL_MASK & ~(RX_MASK | TX_MASK), ioaddr + INT_MASK_REG);
877
878 }
879 outb(RCV_ENABLE_CMD, ioaddr);
880 }
881
882
883
884
885
886 #define eeprom_delay() { int _i = 40; while (--_i > 0) { __SLOW_DOWN_IO; }}
887 #define EE_READ_CMD (6 << 6)
888
889 int
890 read_eeprom(int ioaddr, int location)
891 {
892 int i;
893 unsigned short retval = 0;
894 short ee_addr = ioaddr + EEPROM_REG;
895 int read_cmd = location | EE_READ_CMD;
896 short ctrl_val = EECS ;
897
898 outb(BANK2_SELECT, ioaddr);
899 outb(ctrl_val, ee_addr);
900
901
902 for (i = 8; i >= 0; i--) {
903 short outval = (read_cmd & (1 << i)) ? ctrl_val | EEDI
904 : ctrl_val;
905 outb(outval, ee_addr);
906 outb(outval | EESK, ee_addr);
907 eeprom_delay();
908 outb(outval, ee_addr);
909 eeprom_delay();
910 }
911 outb(ctrl_val, ee_addr);
912
913 for (i = 16; i > 0; i--) {
914 outb(ctrl_val | EESK, ee_addr); eeprom_delay();
915 retval = (retval << 1) | ((inb(ee_addr) & EEDO) ? 1 : 0);
916 outb(ctrl_val, ee_addr); eeprom_delay();
917 }
918
919
920 ctrl_val &= ~EECS;
921 outb(ctrl_val | EESK, ee_addr);
922 eeprom_delay();
923 outb(ctrl_val, ee_addr);
924 eeprom_delay();
925 outb(BANK0_SELECT, ioaddr);
926 return retval;
927 }
928
929 static void
930 hardware_send_packet(struct device *dev, void *buf, short length)
931 {
932 struct eepro_local *lp = (struct eepro_local *)dev->priv;
933 short ioaddr = dev->base_addr;
934 unsigned status, tx_available, last, end, boguscount = 10;
935
936 if (net_debug > 5)
937 printk("eepro: entering hardware_send_packet routine.\n");
938
939 while (boguscount-- > 0) {
940
941
942 if (lp->tx_end > lp->tx_start)
943 tx_available = XMT_RAM - (lp->tx_end - lp->tx_start);
944 else if (lp->tx_end < lp->tx_start)
945 tx_available = lp->tx_start - lp->tx_end;
946 else tx_available = XMT_RAM;
947
948
949
950
951 outb(ALL_MASK, ioaddr + INT_MASK_REG);
952
953 if (((((length + 1) >> 1) << 1) + 2*XMT_HEADER)
954 >= tx_available)
955 continue;
956
957 last = lp->tx_end;
958 end = last + (((length + 1) >> 1) << 1) + XMT_HEADER;
959
960 if (end >= RAM_SIZE) {
961 if ((RAM_SIZE - last) <= XMT_HEADER) {
962
963
964 last = RCV_RAM;
965 end = last + (((length + 1) >> 1) << 1) + XMT_HEADER;
966 }
967 else end = RCV_RAM + (end - RAM_SIZE);
968 }
969
970 outw(last, ioaddr + HOST_ADDRESS_REG);
971 outw(XMT_CMD, ioaddr + IO_PORT);
972 outw(0, ioaddr + IO_PORT);
973 outw(end, ioaddr + IO_PORT);
974 outw(length, ioaddr + IO_PORT);
975 outsw(ioaddr + IO_PORT, buf, (length + 1) >> 1);
976
977 if (lp->tx_start != lp->tx_end) {
978
979
980 if (lp->tx_end != last) {
981 outw(lp->tx_last + XMT_CHAIN, ioaddr + HOST_ADDRESS_REG);
982 outw(last, ioaddr + IO_PORT);
983 }
984 outw(lp->tx_last + XMT_COUNT, ioaddr + HOST_ADDRESS_REG);
985 status = inw(ioaddr + IO_PORT);
986 outw(status | CHAIN_BIT, ioaddr + IO_PORT);
987 }
988
989
990 status = inw(ioaddr + IO_PORT);
991
992
993 outb(ALL_MASK & ~(RX_MASK | TX_MASK), ioaddr + INT_MASK_REG);
994
995 if (lp->tx_start == lp->tx_end) {
996 outw(last, ioaddr + XMT_BAR);
997 outb(XMT_CMD, ioaddr);
998 lp->tx_start = last;
999 }
1000 else outb(RESUME_XMT_CMD, ioaddr);
1001
1002 lp->tx_last = last;
1003 lp->tx_end = end;
1004
1005 if (dev->tbusy) {
1006 dev->tbusy = 0;
1007 mark_bh(NET_BH);
1008 }
1009
1010 if (net_debug > 5)
1011 printk("eepro: exiting hardware_send_packet routine.\n");
1012 return;
1013 }
1014 dev->tbusy = 1;
1015 if (net_debug > 5)
1016 printk("eepro: exiting hardware_send_packet routine.\n");
1017 }
1018
1019 static void
1020 eepro_rx(struct device *dev)
1021 {
1022 struct eepro_local *lp = (struct eepro_local *)dev->priv;
1023 short ioaddr = dev->base_addr;
1024 short boguscount = 20;
1025 short rcv_car = lp->rx_start;
1026 unsigned rcv_event, rcv_status, rcv_next_frame, rcv_size;
1027
1028 if (net_debug > 5)
1029 printk("eepro: entering eepro_rx routine.\n");
1030
1031
1032 outw(rcv_car, ioaddr + HOST_ADDRESS_REG);
1033 rcv_event = inw(ioaddr + IO_PORT);
1034
1035 while (rcv_event == RCV_DONE) {
1036 rcv_status = inw(ioaddr + IO_PORT);
1037 rcv_next_frame = inw(ioaddr + IO_PORT);
1038 rcv_size = inw(ioaddr + IO_PORT);
1039
1040 if ((rcv_status & (RX_OK | RX_ERROR)) == RX_OK) {
1041
1042 struct sk_buff *skb;
1043
1044 rcv_size &= 0x3fff;
1045 skb = dev_alloc_skb(rcv_size+2);
1046 if (skb == NULL) {
1047 printk("%s: Memory squeeze, dropping packet.\n", dev->name);
1048 lp->stats.rx_dropped++;
1049 break;
1050 }
1051 skb->dev = dev;
1052 skb_reserve(skb,2);
1053
1054 insw(ioaddr+IO_PORT, skb_put(skb,rcv_size), (rcv_size + 1) >> 1);
1055
1056 skb->protocol = eth_type_trans(skb,dev);
1057 netif_rx(skb);
1058 lp->stats.rx_packets++;
1059 }
1060 else {
1061
1062 lp->stats.rx_errors++;
1063 if (rcv_status & 0x0100)
1064 lp->stats.rx_over_errors++;
1065 else if (rcv_status & 0x0400)
1066 lp->stats.rx_frame_errors++;
1067 else if (rcv_status & 0x0800)
1068 lp->stats.rx_crc_errors++;
1069 printk("%s: event = %#x, status = %#x, next = %#x, size = %#x\n",
1070 dev->name, rcv_event, rcv_status, rcv_next_frame, rcv_size);
1071 }
1072 if (rcv_status & 0x1000)
1073 lp->stats.rx_length_errors++;
1074 if (--boguscount == 0)
1075 break;
1076
1077 rcv_car = lp->rx_start + RCV_HEADER + rcv_size;
1078 lp->rx_start = rcv_next_frame;
1079 outw(rcv_next_frame, ioaddr + HOST_ADDRESS_REG);
1080 rcv_event = inw(ioaddr + IO_PORT);
1081
1082 }
1083 if (rcv_car == 0)
1084 rcv_car = (RCV_UPPER_LIMIT << 8) | 0xff;
1085 outw(rcv_car - 1, ioaddr + RCV_STOP);
1086
1087 if (net_debug > 5)
1088 printk("eepro: exiting eepro_rx routine.\n");
1089 }
1090
1091 static void
1092 eepro_transmit_interrupt(struct device *dev)
1093 {
1094 struct eepro_local *lp = (struct eepro_local *)dev->priv;
1095 short ioaddr = dev->base_addr;
1096 short boguscount = 10;
1097 short xmt_status;
1098
1099 while (lp->tx_start != lp->tx_end) {
1100
1101 outw(lp->tx_start, ioaddr + HOST_ADDRESS_REG);
1102 xmt_status = inw(ioaddr+IO_PORT);
1103 if ((xmt_status & TX_DONE_BIT) == 0) break;
1104 xmt_status = inw(ioaddr+IO_PORT);
1105 lp->tx_start = inw(ioaddr+IO_PORT);
1106
1107 if (dev->tbusy) {
1108 dev->tbusy = 0;
1109 mark_bh(NET_BH);
1110 }
1111
1112 if (xmt_status & 0x2000)
1113 lp->stats.tx_packets++;
1114 else {
1115 lp->stats.tx_errors++;
1116 if (xmt_status & 0x0400)
1117 lp->stats.tx_carrier_errors++;
1118 printk("%s: XMT status = %#x\n",
1119 dev->name, xmt_status);
1120 }
1121 if (xmt_status & 0x000f)
1122 lp->stats.collisions += (xmt_status & 0x000f);
1123 if ((xmt_status & 0x0040) == 0x0)
1124 lp->stats.tx_heartbeat_errors++;
1125
1126 if (--boguscount == 0)
1127 break;
1128 }
1129 }
1130
1131 #ifdef MODULE
1132 char kernel_version[] = UTS_RELEASE;
1133 static char devicename[9] = { 0, };
1134 static struct device dev_eepro = {
1135 devicename,
1136 0, 0, 0, 0,
1137 0, 0,
1138 0, 0, 0, NULL, eepro_probe };
1139
1140 int io = 0x200;
1141 int irq = 0;
1142
1143 int
1144 init_module(void)
1145 {
1146 if (io == 0)
1147 printk("eepro: You should not use auto-probing with insmod!\n");
1148 dev_eepro.base_addr = io;
1149 dev_eepro.irq = irq;
1150
1151 if (register_netdev(&dev_eepro) != 0)
1152 return -EIO;
1153 return 0;
1154 }
1155
1156 void
1157 cleanup_module(void)
1158 {
1159 if (MOD_IN_USE)
1160 printk("eepro: device busy, remove delayed\n");
1161 else
1162 {
1163 unregister_netdev(&dev_eepro);
1164 kfree_s(dev_eepro.priv,sizeof(struct eepro_local));
1165 dev_eepro.priv=NULL;
1166
1167
1168 release_region(dev_eepro.base_addr, EEPRO_IO_EXTENT);
1169 }
1170 }
1171 #endif