This source file includes following definitions.
- eepro_probe
- eepro_probe1
- eepro_grab_irq
- eepro_open
- eepro_send_packet
- eepro_interrupt
- eepro_close
- eepro_get_stats
- set_multicast_list
- read_eeprom
- hardware_send_packet
- eepro_rx
- eepro_transmit_interrupt
- init_module
- cleanup_module
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54 static const char *version =
55 "eepro.c: v0.07a 6/5/95 Bao C. Ha (bao@saigon.async.com)\n";
56
57 #include <linux/module.h>
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80 #include <linux/kernel.h>
81 #include <linux/sched.h>
82 #include <linux/types.h>
83 #include <linux/fcntl.h>
84 #include <linux/interrupt.h>
85 #include <linux/ptrace.h>
86 #include <linux/ioport.h>
87 #include <linux/in.h>
88 #include <linux/malloc.h>
89 #include <linux/string.h>
90 #include <asm/system.h>
91 #include <asm/bitops.h>
92 #include <asm/io.h>
93 #include <asm/dma.h>
94 #include <linux/errno.h>
95
96 #include <linux/netdevice.h>
97 #include <linux/etherdevice.h>
98 #include <linux/skbuff.h>
99
100
101
102
103 static unsigned int eepro_portlist[] =
104 { 0x200, 0x240, 0x280, 0x2C0, 0x300, 0x320, 0x340, 0x360, 0};
105
106
107 #ifndef NET_DEBUG
108 #define NET_DEBUG 2
109 #endif
110 static unsigned int net_debug = NET_DEBUG;
111
112
113 #define EEPRO_IO_EXTENT 16
114
115
116 struct eepro_local {
117 struct enet_statistics stats;
118 unsigned rx_start;
119 unsigned tx_start;
120 int tx_last;
121 unsigned tx_end;
122 int eepro;
123
124 };
125
126
127 #define SA_ADDR0 0x00
128 #define SA_ADDR1 0xaa
129 #define SA_ADDR2 0x00
130
131
132
133 extern int eepro_probe(struct device *dev);
134
135 static int eepro_probe1(struct device *dev, short ioaddr);
136 static int eepro_open(struct device *dev);
137 static int eepro_send_packet(struct sk_buff *skb, struct device *dev);
138 static void eepro_interrupt(int irq, struct pt_regs *regs);
139 static void eepro_rx(struct device *dev);
140 static void eepro_transmit_interrupt(struct device *dev);
141 static int eepro_close(struct device *dev);
142 static struct enet_statistics *eepro_get_stats(struct device *dev);
143 static void set_multicast_list(struct device *dev, int num_addrs, void *addrs);
144
145 static int read_eeprom(int ioaddr, int location);
146 static void hardware_send_packet(struct device *dev, void *buf, short length);
147 static int eepro_grab_irq(struct device *dev);
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171 #define RAM_SIZE 0x8000
172 #define RCV_HEADER 8
173 #define RCV_RAM 0x6000
174 #define RCV_LOWER_LIMIT 0x00
175 #define RCV_UPPER_LIMIT ((RCV_RAM - 2) >> 8)
176 #define XMT_RAM (RAM_SIZE - RCV_RAM)
177 #define XMT_LOWER_LIMIT (RCV_RAM >> 8)
178 #define XMT_UPPER_LIMIT ((RAM_SIZE - 2) >> 8)
179 #define XMT_HEADER 8
180
181 #define RCV_DONE 0x0008
182 #define RX_OK 0x2000
183 #define RX_ERROR 0x0d81
184
185 #define TX_DONE_BIT 0x0080
186 #define CHAIN_BIT 0x8000
187 #define XMT_STATUS 0x02
188 #define XMT_CHAIN 0x04
189 #define XMT_COUNT 0x06
190
191 #define BANK0_SELECT 0x00
192 #define BANK1_SELECT 0x40
193 #define BANK2_SELECT 0x80
194
195
196 #define COMMAND_REG 0x00
197 #define MC_SETUP 0x03
198 #define XMT_CMD 0x04
199 #define DIAGNOSE_CMD 0x07
200 #define RCV_ENABLE_CMD 0x08
201 #define RCV_DISABLE_CMD 0x0a
202 #define STOP_RCV_CMD 0x0b
203 #define RESET_CMD 0x0e
204 #define POWER_DOWN_CMD 0x18
205 #define RESUME_XMT_CMD 0x1c
206 #define SEL_RESET_CMD 0x1e
207 #define STATUS_REG 0x01
208 #define RX_INT 0x02
209 #define TX_INT 0x04
210 #define EXEC_STATUS 0x30
211 #define ID_REG 0x02
212 #define R_ROBIN_BITS 0xc0
213 #define ID_REG_MASK 0x2c
214 #define ID_REG_SIG 0x24
215 #define AUTO_ENABLE 0x10
216 #define INT_MASK_REG 0x03
217 #define RX_STOP_MASK 0x01
218 #define RX_MASK 0x02
219 #define TX_MASK 0x04
220 #define EXEC_MASK 0x08
221 #define ALL_MASK 0x0f
222 #define RCV_BAR 0x04
223 #define RCV_STOP 0x06
224 #define XMT_BAR 0x0a
225 #define HOST_ADDRESS_REG 0x0c
226 #define IO_PORT 0x0e
227
228
229 #define REG1 0x01
230 #define WORD_WIDTH 0x02
231 #define INT_ENABLE 0x80
232 #define INT_NO_REG 0x02
233 #define RCV_LOWER_LIMIT_REG 0x08
234 #define RCV_UPPER_LIMIT_REG 0x09
235 #define XMT_LOWER_LIMIT_REG 0x0a
236 #define XMT_UPPER_LIMIT_REG 0x0b
237
238
239 #define XMT_Chain_Int 0x20
240 #define XMT_Chain_ErrStop 0x40
241 #define RCV_Discard_BadFrame 0x80
242 #define REG2 0x02
243 #define PRMSC_Mode 0x01
244 #define Multi_IA 0x20
245 #define REG3 0x03
246 #define TPE_BIT 0x04
247 #define BNC_BIT 0x20
248
249 #define I_ADD_REG0 0x04
250 #define I_ADD_REG1 0x05
251 #define I_ADD_REG2 0x06
252 #define I_ADD_REG3 0x07
253 #define I_ADD_REG4 0x08
254 #define I_ADD_REG5 0x09
255
256 #define EEPROM_REG 0x0a
257 #define EESK 0x01
258 #define EECS 0x02
259 #define EEDI 0x04
260 #define EEDO 0x08
261
262
263
264
265
266
267
268
269 #ifdef HAVE_DEVLIST
270
271
272 struct netdev_entry netcard_drv =
273 {"eepro", eepro_probe1, EEPRO_IO_EXTENT, eepro_portlist};
274 #else
275 int
276 eepro_probe(struct device *dev)
277 {
278 int i;
279 int base_addr = dev ? dev->base_addr : 0;
280
281 if (base_addr > 0x1ff)
282 return eepro_probe1(dev, base_addr);
283 else if (base_addr != 0)
284 return ENXIO;
285
286 for (i = 0; eepro_portlist[i]; i++) {
287 int ioaddr = eepro_portlist[i];
288 if (check_region(ioaddr, EEPRO_IO_EXTENT))
289 continue;
290 if (eepro_probe1(dev, ioaddr) == 0)
291 return 0;
292 }
293
294 return ENODEV;
295 }
296 #endif
297
298
299
300
301
302 int eepro_probe1(struct device *dev, short ioaddr)
303 {
304 unsigned short station_addr[6], id, counter;
305 int i;
306 int eepro;
307
308 const char *ifmap[] = {"AUI", "10Base2", "10BaseT"};
309 enum iftype { AUI=0, BNC=1, TPE=2 };
310
311
312
313
314 if (((id=inb(ioaddr + ID_REG)) & ID_REG_MASK) == ID_REG_SIG) {
315
316
317
318
319
320 counter = (id & R_ROBIN_BITS);
321 if (((id=inb(ioaddr+ID_REG)) & R_ROBIN_BITS) ==
322 (counter + 0x40)) {
323
324
325
326
327
328
329 station_addr[0] = read_eeprom(ioaddr, 2);
330 station_addr[1] = read_eeprom(ioaddr, 3);
331 station_addr[2] = read_eeprom(ioaddr, 4);
332
333
334
335 if (station_addr[2] != 0x00aa || (station_addr[1] & 0xff00) != 0x0000) {
336 eepro = 0;
337 printk("%s: Intel 82595-based lan card at %#x,",
338 dev->name, ioaddr);
339 }
340 else {
341 eepro = 1;
342 printk("%s: Intel EtherExpress Pro/10 at %#x,",
343 dev->name, ioaddr);
344 }
345
346
347 dev->base_addr = ioaddr;
348
349 for (i=0; i < 6; i++) {
350 dev->dev_addr[i] = ((unsigned char *) station_addr)[5-i];
351 printk("%c%02x", i ? ':' : ' ', dev->dev_addr[i]);
352 }
353
354 outb(BANK2_SELECT, ioaddr);
355 id = inb(ioaddr + REG3);
356 if (id & TPE_BIT)
357 dev->if_port = TPE;
358 else dev->if_port = BNC;
359
360 if (dev->irq < 2 && eepro) {
361 i = read_eeprom(ioaddr, 1);
362 switch (i & 0x07) {
363 case 0: dev->irq = 9; break;
364 case 1: dev->irq = 3; break;
365 case 2: dev->irq = 5; break;
366 case 3: dev->irq = 10; break;
367 case 4: dev->irq = 11; break;
368 default:
369 printk(" illegal interrupt vector stored in EEPROM.\n");
370 return ENODEV;
371 }
372 }
373 else if (dev->irq == 2)
374 dev->irq = 9;
375
376 if (dev->irq > 2) {
377 printk(", IRQ %d, %s.\n", dev->irq,
378 ifmap[dev->if_port]);
379 if (request_irq(dev->irq, &eepro_interrupt, 0, "eepro")) {
380 printk("%s: unable to get IRQ %d.\n", dev->name, dev->irq);
381 return -EAGAIN;
382 }
383 }
384 else printk(", %s.\n", ifmap[dev->if_port]);
385
386 if ((dev->mem_start & 0xf) > 0)
387 net_debug = dev->mem_start & 7;
388
389 if (net_debug > 3) {
390 i = read_eeprom(ioaddr, 5);
391 if (i & 0x2000)
392 printk("%s: Concurrent Processing is enabled but not used!\n",
393 dev->name);
394 }
395
396 if (net_debug)
397 printk(version);
398
399
400 request_region(ioaddr, EEPRO_IO_EXTENT, "eepro");
401
402
403 dev->priv = kmalloc(sizeof(struct eepro_local), GFP_KERNEL);
404 if (dev->priv == NULL)
405 return -ENOMEM;
406 memset(dev->priv, 0, sizeof(struct eepro_local));
407
408 dev->open = eepro_open;
409 dev->stop = eepro_close;
410 dev->hard_start_xmit = eepro_send_packet;
411 dev->get_stats = eepro_get_stats;
412 dev->set_multicast_list = &set_multicast_list;
413
414
415
416
417 ether_setup(dev);
418
419 outb(RESET_CMD, ioaddr);
420
421 return 0;
422 }
423 else return ENODEV;
424 }
425 else if (net_debug > 3)
426 printk ("EtherExpress Pro probed failed!\n");
427 return ENODEV;
428 }
429
430
431
432
433
434
435
436
437
438 static char irqrmap[] = {-1,-1,0,1,-1,2,-1,-1,-1,0,3,4,-1,-1,-1,-1};
439 static int eepro_grab_irq(struct device *dev)
440 {
441 int irqlist[] = { 5, 9, 10, 11, 4, 3, 0};
442 int *irqp = irqlist, temp_reg, ioaddr = dev->base_addr;
443
444 outb(BANK1_SELECT, ioaddr);
445
446
447 temp_reg = inb(ioaddr + REG1);
448 outb(temp_reg | INT_ENABLE, ioaddr + REG1);
449
450 outb(BANK0_SELECT, ioaddr);
451
452
453 outb(ALL_MASK, ioaddr + STATUS_REG);
454
455 outb(ALL_MASK & ~(EXEC_MASK), ioaddr + INT_MASK_REG);
456
457 do {
458 outb(BANK1_SELECT, ioaddr);
459
460 temp_reg = inb(ioaddr + INT_NO_REG);
461 outb((temp_reg & 0xf8) | irqrmap[*irqp], ioaddr + INT_NO_REG);
462
463 outb(BANK0_SELECT, ioaddr);
464
465 if (request_irq (*irqp, NULL, 0, "bogus") != EBUSY) {
466
467 autoirq_setup(0);
468
469 outb(DIAGNOSE_CMD, ioaddr);
470
471 if (*irqp == autoirq_report(2) &&
472 (request_irq(dev->irq = *irqp, &eepro_interrupt, 0, "eepro") == 0))
473 break;
474
475
476 outb(ALL_MASK, ioaddr + STATUS_REG);
477 }
478 } while (*++irqp);
479
480 outb(BANK1_SELECT, ioaddr);
481
482
483 temp_reg = inb(ioaddr + REG1);
484 outb(temp_reg & 0x7f, ioaddr + REG1);
485
486 outb(BANK0_SELECT, ioaddr);
487
488
489 outb(ALL_MASK, ioaddr + INT_MASK_REG);
490
491
492 outb(ALL_MASK, ioaddr + STATUS_REG);
493
494 return dev->irq;
495 }
496
497 static int
498 eepro_open(struct device *dev)
499 {
500 unsigned short temp_reg;
501 int i, ioaddr = dev->base_addr;
502 struct eepro_local *lp = (struct eepro_local *)dev->priv;
503
504 if (net_debug > 3)
505 printk("eepro: entering eepro_open routine.\n");
506
507 if (dev->dev_addr[0] == SA_ADDR0 &&
508 dev->dev_addr[1] == SA_ADDR1 &&
509 dev->dev_addr[2] == SA_ADDR2)
510 lp->eepro = 1;
511 else lp->eepro = 0;
512
513
514 if (dev->irq < 2 && eepro_grab_irq(dev) == 0) {
515 printk("%s: unable to get IRQ %d.\n", dev->name, dev->irq);
516 return -EAGAIN;
517 }
518
519 if (irq2dev_map[dev->irq] != 0
520 || (irq2dev_map[dev->irq] = dev) == 0)
521 return -EAGAIN;
522
523
524
525 outb(BANK2_SELECT, ioaddr);
526 temp_reg = inb(ioaddr + EEPROM_REG);
527 if (temp_reg & 0x10)
528 outb(temp_reg & 0xef, ioaddr + EEPROM_REG);
529 for (i=0; i < 6; i++)
530 outb(dev->dev_addr[i] , ioaddr + I_ADD_REG0 + i);
531
532 temp_reg = inb(ioaddr + REG1);
533 outb(temp_reg | XMT_Chain_Int | XMT_Chain_ErrStop
534 | RCV_Discard_BadFrame, ioaddr + REG1);
535
536 temp_reg = inb(ioaddr + REG2);
537 outb(temp_reg | 0x14, ioaddr + REG2);
538
539 temp_reg = inb(ioaddr + REG3);
540 outb(temp_reg & 0x3f, ioaddr + REG3);
541
542
543 outb(BANK1_SELECT, ioaddr);
544
545 temp_reg = inb(ioaddr + INT_NO_REG);
546 outb((temp_reg & 0xf8) | irqrmap[dev->irq], ioaddr + INT_NO_REG);
547
548
549 outb(RCV_LOWER_LIMIT, ioaddr + RCV_LOWER_LIMIT_REG);
550 outb(RCV_UPPER_LIMIT, ioaddr + RCV_UPPER_LIMIT_REG);
551 outb(XMT_LOWER_LIMIT, ioaddr + XMT_LOWER_LIMIT_REG);
552 outb(XMT_UPPER_LIMIT, ioaddr + XMT_UPPER_LIMIT_REG);
553
554
555 temp_reg = inb(ioaddr + REG1);
556 outb(temp_reg | INT_ENABLE, ioaddr + REG1);
557
558 outb(BANK0_SELECT, ioaddr);
559
560
561 outb(ALL_MASK & ~(RX_MASK | TX_MASK), ioaddr + INT_MASK_REG);
562
563 outb(ALL_MASK, ioaddr + STATUS_REG);
564
565
566 outw(RCV_LOWER_LIMIT << 8, ioaddr + RCV_BAR);
567 lp->rx_start = (RCV_LOWER_LIMIT << 8) ;
568 outw((RCV_UPPER_LIMIT << 8) | 0xfe, ioaddr + RCV_STOP);
569
570
571 outw(XMT_LOWER_LIMIT << 8, ioaddr + XMT_BAR);
572
573 outb(SEL_RESET_CMD, ioaddr);
574
575 SLOW_DOWN_IO;
576 SLOW_DOWN_IO;
577
578 lp->tx_start = lp->tx_end = XMT_LOWER_LIMIT << 8;
579 lp->tx_last = 0;
580
581 dev->tbusy = 0;
582 dev->interrupt = 0;
583 dev->start = 1;
584
585 if (net_debug > 3)
586 printk("eepro: exiting eepro_open routine.\n");
587
588 outb(RCV_ENABLE_CMD, ioaddr);
589
590 MOD_INC_USE_COUNT;
591 return 0;
592 }
593
594 static int
595 eepro_send_packet(struct sk_buff *skb, struct device *dev)
596 {
597 struct eepro_local *lp = (struct eepro_local *)dev->priv;
598 int ioaddr = dev->base_addr;
599
600 if (net_debug > 5)
601 printk("eepro: entering eepro_send_packet routine.\n");
602
603 if (dev->tbusy) {
604
605
606 int tickssofar = jiffies - dev->trans_start;
607 if (tickssofar < 5)
608 return 1;
609 if (net_debug > 1)
610 printk("%s: transmit timed out, %s?\n", dev->name,
611 "network cable problem");
612 lp->stats.tx_errors++;
613
614 outb(SEL_RESET_CMD, ioaddr);
615
616 SLOW_DOWN_IO;
617 SLOW_DOWN_IO;
618
619
620 lp->tx_start = lp->tx_end = RCV_RAM;
621 lp->tx_last = 0;
622
623 dev->tbusy=0;
624 dev->trans_start = jiffies;
625
626 outb(RCV_ENABLE_CMD, ioaddr);
627
628 }
629
630
631
632
633 if (skb == NULL) {
634 dev_tint(dev);
635 return 0;
636 }
637
638
639 if (set_bit(0, (void*)&dev->tbusy) != 0)
640 printk("%s: Transmitter access conflict.\n", dev->name);
641 else {
642 short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
643 unsigned char *buf = skb->data;
644
645 hardware_send_packet(dev, buf, length);
646 dev->trans_start = jiffies;
647 }
648
649 dev_kfree_skb (skb, FREE_WRITE);
650
651
652
653
654 if (net_debug > 5)
655 printk("eepro: exiting eepro_send_packet routine.\n");
656
657 return 0;
658 }
659
660
661
662
663 static void
664 eepro_interrupt(int irq, struct pt_regs * regs)
665 {
666 struct device *dev = (struct device *)(irq2dev_map[irq]);
667 int ioaddr, status, boguscount = 0;
668
669 if (net_debug > 5)
670 printk("eepro: entering eepro_interrupt routine.\n");
671
672 if (dev == NULL) {
673 printk ("eepro_interrupt(): irq %d for unknown device.\n", irq);
674 return;
675 }
676 dev->interrupt = 1;
677
678 ioaddr = dev->base_addr;
679
680 do {
681 status = inb(ioaddr + STATUS_REG);
682
683 if (status & RX_INT) {
684 if (net_debug > 4)
685 printk("eepro: packet received interrupt.\n");
686
687
688 outb(RX_INT, ioaddr + STATUS_REG);
689
690
691 eepro_rx(dev);
692 }
693 else if (status & TX_INT) {
694 if (net_debug > 4)
695 printk("eepro: packet transmit interrupt.\n");
696
697
698 outb(TX_INT, ioaddr + STATUS_REG);
699
700
701 eepro_transmit_interrupt(dev);
702 dev->tbusy = 0;
703 mark_bh(NET_BH);
704 }
705 } while ((++boguscount < 10) && (status & 0x06));
706
707 dev->interrupt = 0;
708 if (net_debug > 5)
709 printk("eepro: exiting eepro_interrupt routine.\n");
710
711 return;
712 }
713
714 static int
715 eepro_close(struct device *dev)
716 {
717 struct eepro_local *lp = (struct eepro_local *)dev->priv;
718 int ioaddr = dev->base_addr;
719 short temp_reg;
720
721 dev->tbusy = 1;
722 dev->start = 0;
723
724 outb(BANK1_SELECT, ioaddr);
725
726
727 temp_reg = inb(ioaddr + REG1);
728 outb(temp_reg & 0x7f, ioaddr + REG1);
729
730 outb(BANK0_SELECT, ioaddr);
731
732
733 outb(STOP_RCV_CMD, ioaddr);
734 lp->tx_start = lp->tx_end = RCV_RAM ;
735 lp->tx_last = 0;
736
737
738 outb(ALL_MASK, ioaddr + INT_MASK_REG);
739
740
741 outb(ALL_MASK, ioaddr + STATUS_REG);
742
743
744 outb(RESET_CMD, ioaddr);
745
746
747 free_irq(dev->irq);
748
749 irq2dev_map[dev->irq] = 0;
750
751
752
753
754 SLOW_DOWN_IO;
755 SLOW_DOWN_IO;
756
757 MOD_DEC_USE_COUNT;
758 return 0;
759 }
760
761
762
763 static struct enet_statistics *
764 eepro_get_stats(struct device *dev)
765 {
766 struct eepro_local *lp = (struct eepro_local *)dev->priv;
767
768 return &lp->stats;
769 }
770
771
772
773
774
775
776
777 static void
778 set_multicast_list(struct device *dev, int num_addrs, void *addrs)
779 {
780 struct eepro_local *lp = (struct eepro_local *)dev->priv;
781 short ioaddr = dev->base_addr;
782 unsigned short mode;
783
784 if (num_addrs <= -1 || num_addrs > 63) {
785
786
787
788
789
790
791 dev->flags|=IFF_PROMISC;
792
793 outb(BANK2_SELECT, ioaddr);
794 mode = inb(ioaddr + REG2);
795 outb(mode | PRMSC_Mode, ioaddr + REG2);
796 mode = inb(ioaddr + REG3);
797 outb(mode, ioaddr + REG3);
798 outb(BANK0_SELECT, ioaddr);
799 printk("%s: promiscuous mode enabled.\n", dev->name);
800 }
801 else if (num_addrs == 0) {
802 outb(BANK2_SELECT, ioaddr);
803 mode = inb(ioaddr + REG2);
804 outb(mode & 0xd6, ioaddr + REG2);
805 mode = inb(ioaddr + REG3);
806 outb(mode, ioaddr + REG3);
807 outb(BANK0_SELECT, ioaddr);
808 }
809 else {
810 unsigned short status, *eaddrs = addrs;
811 int i, boguscount = 0;
812
813
814
815
816 outb(ALL_MASK, ioaddr + INT_MASK_REG);
817
818 outb(BANK2_SELECT, ioaddr);
819 mode = inb(ioaddr + REG2);
820 outb(mode | Multi_IA, ioaddr + REG2);
821 mode = inb(ioaddr + REG3);
822 outb(mode, ioaddr + REG3);
823 outb(BANK0_SELECT, ioaddr);
824 outw(lp->tx_end, ioaddr + HOST_ADDRESS_REG);
825 outw(MC_SETUP, ioaddr + IO_PORT);
826 outw(0, ioaddr + IO_PORT);
827 outw(0, ioaddr + IO_PORT);
828 outw(6*(num_addrs + 1), ioaddr + IO_PORT);
829 for (i = 0; i < num_addrs; i++) {
830 outw(*eaddrs++, ioaddr + IO_PORT);
831 outw(*eaddrs++, ioaddr + IO_PORT);
832 outw(*eaddrs++, ioaddr + IO_PORT);
833 }
834 eaddrs = (unsigned short *) dev->dev_addr;
835 outw(eaddrs[0], ioaddr + IO_PORT);
836 outw(eaddrs[1], ioaddr + IO_PORT);
837 outw(eaddrs[2], ioaddr + IO_PORT);
838 outw(lp->tx_end, ioaddr + XMT_BAR);
839 outb(MC_SETUP, ioaddr);
840
841
842 i = lp->tx_end + XMT_HEADER + 6*(num_addrs + 1);
843 if (lp->tx_start != lp->tx_end) {
844
845
846 outw(lp->tx_last + XMT_CHAIN, ioaddr + HOST_ADDRESS_REG);
847 outw(i, ioaddr + IO_PORT);
848 outw(lp->tx_last + XMT_COUNT, ioaddr + HOST_ADDRESS_REG);
849 status = inw(ioaddr + IO_PORT);
850 outw(status | CHAIN_BIT, ioaddr + IO_PORT);
851 lp->tx_end = i ;
852 } else lp->tx_start = lp->tx_end = i ;
853
854
855 do {
856 SLOW_DOWN_IO;
857 SLOW_DOWN_IO;
858 if (inb(ioaddr + STATUS_REG) & 0x08) {
859 i = inb(ioaddr);
860 outb(0x08, ioaddr + STATUS_REG);
861 if (i & 0x20) {
862 printk("%s: multicast setup failed.\n",
863 dev->name);
864 break;
865 } else if ((i & 0x0f) == 0x03) {
866 printk("%s: set Rx mode to %d addresses.\n",
867 dev->name, num_addrs);
868 break;
869 }
870 }
871 } while (++boguscount < 100);
872
873
874 outb(ALL_MASK & ~(RX_MASK | TX_MASK), ioaddr + INT_MASK_REG);
875
876 }
877 outb(RCV_ENABLE_CMD, ioaddr);
878 }
879
880
881
882
883
884 #define eeprom_delay() { int _i = 40; while (--_i > 0) { __SLOW_DOWN_IO; }}
885 #define EE_READ_CMD (6 << 6)
886
887 int
888 read_eeprom(int ioaddr, int location)
889 {
890 int i;
891 unsigned short retval = 0;
892 short ee_addr = ioaddr + EEPROM_REG;
893 int read_cmd = location | EE_READ_CMD;
894 short ctrl_val = EECS ;
895
896 outb(BANK2_SELECT, ioaddr);
897 outb(ctrl_val, ee_addr);
898
899
900 for (i = 8; i >= 0; i--) {
901 short outval = (read_cmd & (1 << i)) ? ctrl_val | EEDI
902 : ctrl_val;
903 outb(outval, ee_addr);
904 outb(outval | EESK, ee_addr);
905 eeprom_delay();
906 outb(outval, ee_addr);
907 eeprom_delay();
908 }
909 outb(ctrl_val, ee_addr);
910
911 for (i = 16; i > 0; i--) {
912 outb(ctrl_val | EESK, ee_addr); eeprom_delay();
913 retval = (retval << 1) | ((inb(ee_addr) & EEDO) ? 1 : 0);
914 outb(ctrl_val, ee_addr); eeprom_delay();
915 }
916
917
918 ctrl_val &= ~EECS;
919 outb(ctrl_val | EESK, ee_addr);
920 eeprom_delay();
921 outb(ctrl_val, ee_addr);
922 eeprom_delay();
923 outb(BANK0_SELECT, ioaddr);
924 return retval;
925 }
926
927 static void
928 hardware_send_packet(struct device *dev, void *buf, short length)
929 {
930 struct eepro_local *lp = (struct eepro_local *)dev->priv;
931 short ioaddr = dev->base_addr;
932 unsigned status, tx_available, last, end, boguscount = 10;
933
934 if (net_debug > 5)
935 printk("eepro: entering hardware_send_packet routine.\n");
936
937 while (boguscount-- > 0) {
938
939
940 if (lp->tx_end > lp->tx_start)
941 tx_available = XMT_RAM - (lp->tx_end - lp->tx_start);
942 else if (lp->tx_end < lp->tx_start)
943 tx_available = lp->tx_start - lp->tx_end;
944 else tx_available = XMT_RAM;
945
946
947
948
949 outb(ALL_MASK, ioaddr + INT_MASK_REG);
950
951 if (((((length + 1) >> 1) << 1) + 2*XMT_HEADER)
952 >= tx_available)
953 continue;
954
955 last = lp->tx_end;
956 end = last + (((length + 1) >> 1) << 1) + XMT_HEADER;
957
958 if (end >= RAM_SIZE) {
959 if ((RAM_SIZE - last) <= XMT_HEADER) {
960
961
962 last = RCV_RAM;
963 end = last + (((length + 1) >> 1) << 1) + XMT_HEADER;
964 }
965 else end = RCV_RAM + (end - RAM_SIZE);
966 }
967
968 outw(last, ioaddr + HOST_ADDRESS_REG);
969 outw(XMT_CMD, ioaddr + IO_PORT);
970 outw(0, ioaddr + IO_PORT);
971 outw(end, ioaddr + IO_PORT);
972 outw(length, ioaddr + IO_PORT);
973 outsw(ioaddr + IO_PORT, buf, (length + 1) >> 1);
974
975 if (lp->tx_start != lp->tx_end) {
976
977
978 if (lp->tx_end != last) {
979 outw(lp->tx_last + XMT_CHAIN, ioaddr + HOST_ADDRESS_REG);
980 outw(last, ioaddr + IO_PORT);
981 }
982 outw(lp->tx_last + XMT_COUNT, ioaddr + HOST_ADDRESS_REG);
983 status = inw(ioaddr + IO_PORT);
984 outw(status | CHAIN_BIT, ioaddr + IO_PORT);
985 }
986
987
988 status = inw(ioaddr + IO_PORT);
989
990
991 outb(ALL_MASK & ~(RX_MASK | TX_MASK), ioaddr + INT_MASK_REG);
992
993 if (lp->tx_start == lp->tx_end) {
994 outw(last, ioaddr + XMT_BAR);
995 outb(XMT_CMD, ioaddr);
996 lp->tx_start = last;
997 }
998 else outb(RESUME_XMT_CMD, ioaddr);
999
1000 lp->tx_last = last;
1001 lp->tx_end = end;
1002
1003 if (dev->tbusy) {
1004 dev->tbusy = 0;
1005 mark_bh(NET_BH);
1006 }
1007
1008 if (net_debug > 5)
1009 printk("eepro: exiting hardware_send_packet routine.\n");
1010 return;
1011 }
1012 dev->tbusy = 1;
1013 if (net_debug > 5)
1014 printk("eepro: exiting hardware_send_packet routine.\n");
1015 }
1016
1017 static void
1018 eepro_rx(struct device *dev)
1019 {
1020 struct eepro_local *lp = (struct eepro_local *)dev->priv;
1021 short ioaddr = dev->base_addr;
1022 short boguscount = 20;
1023 short rcv_car = lp->rx_start;
1024 unsigned rcv_event, rcv_status, rcv_next_frame, rcv_size;
1025
1026 if (net_debug > 5)
1027 printk("eepro: entering eepro_rx routine.\n");
1028
1029
1030 outw(rcv_car, ioaddr + HOST_ADDRESS_REG);
1031 rcv_event = inw(ioaddr + IO_PORT);
1032
1033 while (rcv_event == RCV_DONE) {
1034 rcv_status = inw(ioaddr + IO_PORT);
1035 rcv_next_frame = inw(ioaddr + IO_PORT);
1036 rcv_size = inw(ioaddr + IO_PORT);
1037
1038 if ((rcv_status & (RX_OK | RX_ERROR)) == RX_OK) {
1039
1040 struct sk_buff *skb;
1041
1042 rcv_size &= 0x3fff;
1043 skb = dev_alloc_skb(rcv_size+2);
1044 if (skb == NULL) {
1045 printk("%s: Memory squeeze, dropping packet.\n", dev->name);
1046 lp->stats.rx_dropped++;
1047 break;
1048 }
1049 skb->dev = dev;
1050 skb_reserve(skb,2);
1051
1052 insw(ioaddr+IO_PORT, skb_put(skb,rcv_size), (rcv_size + 1) >> 1);
1053
1054 skb->protocol = eth_type_trans(skb,dev);
1055 netif_rx(skb);
1056 lp->stats.rx_packets++;
1057 }
1058 else {
1059
1060 lp->stats.rx_errors++;
1061 if (rcv_status & 0x0100)
1062 lp->stats.rx_over_errors++;
1063 else if (rcv_status & 0x0400)
1064 lp->stats.rx_frame_errors++;
1065 else if (rcv_status & 0x0800)
1066 lp->stats.rx_crc_errors++;
1067 printk("%s: event = %#x, status = %#x, next = %#x, size = %#x\n",
1068 dev->name, rcv_event, rcv_status, rcv_next_frame, rcv_size);
1069 }
1070 if (rcv_status & 0x1000)
1071 lp->stats.rx_length_errors++;
1072 if (--boguscount == 0)
1073 break;
1074
1075 rcv_car = lp->rx_start + RCV_HEADER + rcv_size;
1076 lp->rx_start = rcv_next_frame;
1077 outw(rcv_next_frame, ioaddr + HOST_ADDRESS_REG);
1078 rcv_event = inw(ioaddr + IO_PORT);
1079
1080 }
1081 if (rcv_car == 0)
1082 rcv_car = (RCV_UPPER_LIMIT << 8) | 0xff;
1083 outw(rcv_car - 1, ioaddr + RCV_STOP);
1084
1085 if (net_debug > 5)
1086 printk("eepro: exiting eepro_rx routine.\n");
1087 }
1088
1089 static void
1090 eepro_transmit_interrupt(struct device *dev)
1091 {
1092 struct eepro_local *lp = (struct eepro_local *)dev->priv;
1093 short ioaddr = dev->base_addr;
1094 short boguscount = 10;
1095 short xmt_status;
1096
1097 while (lp->tx_start != lp->tx_end) {
1098
1099 outw(lp->tx_start, ioaddr + HOST_ADDRESS_REG);
1100 xmt_status = inw(ioaddr+IO_PORT);
1101 if ((xmt_status & TX_DONE_BIT) == 0) break;
1102 xmt_status = inw(ioaddr+IO_PORT);
1103 lp->tx_start = inw(ioaddr+IO_PORT);
1104
1105 if (dev->tbusy) {
1106 dev->tbusy = 0;
1107 mark_bh(NET_BH);
1108 }
1109
1110 if (xmt_status & 0x2000)
1111 lp->stats.tx_packets++;
1112 else {
1113 lp->stats.tx_errors++;
1114 if (xmt_status & 0x0400)
1115 lp->stats.tx_carrier_errors++;
1116 printk("%s: XMT status = %#x\n",
1117 dev->name, xmt_status);
1118 }
1119 if (xmt_status & 0x000f)
1120 lp->stats.collisions += (xmt_status & 0x000f);
1121 if ((xmt_status & 0x0040) == 0x0)
1122 lp->stats.tx_heartbeat_errors++;
1123
1124 if (--boguscount == 0)
1125 break;
1126 }
1127 }
1128
1129 #ifdef MODULE
1130 static char devicename[9] = { 0, };
1131 static struct device dev_eepro = {
1132 devicename,
1133 0, 0, 0, 0,
1134 0, 0,
1135 0, 0, 0, NULL, eepro_probe };
1136
1137 static int io = 0x200;
1138 static int irq = 0;
1139
1140 int
1141 init_module(void)
1142 {
1143 if (io == 0)
1144 printk("eepro: You should not use auto-probing with insmod!\n");
1145 dev_eepro.base_addr = io;
1146 dev_eepro.irq = irq;
1147
1148 if (register_netdev(&dev_eepro) != 0)
1149 return -EIO;
1150 return 0;
1151 }
1152
1153 void
1154 cleanup_module(void)
1155 {
1156 unregister_netdev(&dev_eepro);
1157 kfree_s(dev_eepro.priv,sizeof(struct eepro_local));
1158 dev_eepro.priv=NULL;
1159
1160
1161 release_region(dev_eepro.base_addr, EEPRO_IO_EXTENT);
1162 }
1163 #endif