This source file includes following definitions.
- lance_init
- lance_probe1
- lance_open
- lance_init_ring
- lance_start_xmit
- lance_interrupt
- lance_rx
- lance_close
- lance_get_stats
- set_multicast_list
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18 static char *version = "lance.c:v1.07 1/18/95 becker@cesdis.gsfc.nasa.gov\n";
19
20 #include <linux/config.h>
21 #include <linux/kernel.h>
22 #include <linux/sched.h>
23 #include <linux/string.h>
24 #include <linux/ptrace.h>
25 #include <linux/errno.h>
26 #include <linux/ioport.h>
27 #include <linux/malloc.h>
28 #include <linux/interrupt.h>
29 #include <linux/pci.h>
30 #include <linux/bios32.h>
31 #include <asm/bitops.h>
32 #include <asm/io.h>
33 #include <asm/dma.h>
34
35 #include <linux/netdevice.h>
36 #include <linux/etherdevice.h>
37 #include <linux/skbuff.h>
38
39 struct device *init_etherdev(struct device *dev, int sizeof_private,
40 unsigned long *mem_startp);
41 static unsigned int lance_portlist[] = {0x300, 0x320, 0x340, 0x360, 0};
42 unsigned long lance_probe1(int ioaddr, unsigned long mem_start);
43
44 #ifdef HAVE_DEVLIST
45 struct netdev_entry lance_drv =
46 {"lance", lance_probe1, LANCE_TOTAL_SIZE, lance_portlist};
47 #endif
48
49 #ifdef LANCE_DEBUG
50 int lance_debug = LANCE_DEBUG;
51 #else
52 int lance_debug = 1;
53 #endif
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145 #ifndef LANCE_LOG_TX_BUFFERS
146 #define LANCE_LOG_TX_BUFFERS 4
147 #define LANCE_LOG_RX_BUFFERS 4
148 #endif
149
150 #define TX_RING_SIZE (1 << (LANCE_LOG_TX_BUFFERS))
151 #define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
152 #define TX_RING_LEN_BITS ((LANCE_LOG_TX_BUFFERS) << 29)
153
154 #define RX_RING_SIZE (1 << (LANCE_LOG_RX_BUFFERS))
155 #define RX_RING_MOD_MASK (RX_RING_SIZE - 1)
156 #define RX_RING_LEN_BITS ((LANCE_LOG_RX_BUFFERS) << 29)
157
158 #define PKT_BUF_SZ 1544
159
160
161 #define LANCE_DATA 0x10
162 #define LANCE_ADDR 0x12
163 #define LANCE_RESET 0x14
164 #define LANCE_BUS_IF 0x16
165 #define LANCE_TOTAL_SIZE 0x18
166
167
168 struct lance_rx_head {
169 int base;
170 short buf_length;
171 short msg_length;
172 };
173
174 struct lance_tx_head {
175 int base;
176 short length;
177 short misc;
178 };
179
180
181 struct lance_init_block {
182 unsigned short mode;
183 unsigned char phys_addr[6];
184 unsigned filter[2];
185
186 unsigned rx_ring;
187 unsigned tx_ring;
188 };
189
190 struct lance_private {
191 char *name;
192 void *pad;
193
194 struct lance_rx_head rx_ring[RX_RING_SIZE];
195 struct lance_tx_head tx_ring[TX_RING_SIZE];
196 struct lance_init_block init_block;
197
198 struct sk_buff* tx_skbuff[TX_RING_SIZE];
199 long rx_buffs;
200
201 char (*tx_bounce_buffs)[PKT_BUF_SZ];
202 int cur_rx, cur_tx;
203 int dirty_rx, dirty_tx;
204 int dma;
205 struct enet_statistics stats;
206 char chip_version;
207 char tx_full;
208 char lock;
209 int pad0, pad1;
210 };
211
212
213
214
215 static struct lance_chip_type {
216 int id_number;
217 char *name;
218 int flags;
219 } chip_table[] = {
220 {0x0000, "LANCE 7990", 0},
221 {0x0003, "PCnet/ISA 79C960", 0},
222 {0x2260, "PCnet/ISA+ 79C961", 0},
223 {0x2420, "PCnet/PCI 79C970", 0},
224
225
226 {0x2430, "PCnet32", 0},
227 {0x0, "PCnet (unknown)", 0},
228 };
229
230 enum {OLD_LANCE = 0, PCNET_ISA=1, PCNET_ISAP=2, PCNET_PCI=3, PCNET_VLB=4, LANCE_UNKNOWN=5};
231
232
233 static unsigned char pci_irq_line = 0;
234
235 static int lance_open(struct device *dev);
236 static void lance_init_ring(struct device *dev);
237 static int lance_start_xmit(struct sk_buff *skb, struct device *dev);
238 static int lance_rx(struct device *dev);
239 static void lance_interrupt(int irq, struct pt_regs *regs);
240 static int lance_close(struct device *dev);
241 static struct enet_statistics *lance_get_stats(struct device *dev);
242 #ifdef HAVE_MULTICAST
243 static void set_multicast_list(struct device *dev, int num_addrs, void *addrs);
244 #endif
245
246
247
248
249
250
251
252
253
254 unsigned long lance_init(unsigned long mem_start, unsigned long mem_end)
255 {
256 int *port;
257
258 #if defined(CONFIG_PCI)
259 #define AMD_VENDOR_ID 0x1022
260 #define AMD_DEVICE_ID 0x2000
261 if (pcibios_present()) {
262 int pci_index;
263 printk("lance.c: PCI bios is present, checking for devices...\n");
264 for (pci_index = 0; pci_index < 8; pci_index++) {
265 unsigned char pci_bus, pci_device_fn;
266 unsigned long pci_ioaddr;
267 unsigned short pci_command;
268
269 if (pcibios_find_device (AMD_VENDOR_ID, AMD_DEVICE_ID, pci_index,
270 &pci_bus, &pci_device_fn) != 0)
271 break;
272 pcibios_read_config_byte(pci_bus, pci_device_fn,
273 PCI_INTERRUPT_LINE, &pci_irq_line);
274 pcibios_read_config_dword(pci_bus, pci_device_fn,
275 PCI_BASE_ADDRESS_0, &pci_ioaddr);
276
277 pci_ioaddr &= ~3;
278
279
280
281
282 pcibios_read_config_word(pci_bus, pci_device_fn,
283 PCI_COMMAND, &pci_command);
284 if ( ! (pci_command & PCI_COMMAND_MASTER)) {
285 printk("PCI Master Bit has not been set. Setting...\n");
286 pci_command |= PCI_COMMAND_MASTER;
287 pcibios_write_config_word(pci_bus, pci_device_fn,
288 PCI_COMMAND, pci_command);
289 }
290 printk("Found PCnet/PCI at %#lx, irq %d (mem_start is %#lx).\n",
291 pci_ioaddr, pci_irq_line, mem_start);
292 mem_start = lance_probe1(pci_ioaddr, mem_start);
293 pci_irq_line = 0;
294 }
295 }
296 #endif
297
298 for (port = lance_portlist; *port; port++) {
299 int ioaddr = *port;
300
301 if ( check_region(ioaddr, LANCE_TOTAL_SIZE) == 0
302 && inb(ioaddr + 14) == 0x57
303 && inb(ioaddr + 15) == 0x57) {
304 mem_start = lance_probe1(ioaddr, mem_start);
305 }
306 }
307
308 return mem_start;
309 }
310
311 unsigned long lance_probe1(int ioaddr, unsigned long mem_start)
312 {
313 struct device *dev;
314 struct lance_private *lp;
315 short dma_channels;
316 int i, reset_val, lance_version;
317 char *chipname;
318
319 unsigned char hpJ2405A = 0;
320 int hp_builtin = 0;
321 static int did_version = 0;
322
323
324
325
326
327
328 if ( *((unsigned short *) 0x000f0102) == 0x5048) {
329 short ioaddr_table[] = { 0x300, 0x320, 0x340, 0x360};
330 int hp_port = ( *((unsigned char *) 0x000f00f1) & 1) ? 0x499 : 0x99;
331
332 if ((inb(hp_port) & 0xc0) == 0x80
333 && ioaddr_table[inb(hp_port) & 3] == ioaddr)
334 hp_builtin = hp_port;
335 }
336
337 hpJ2405A = (inb(ioaddr) == 0x08 && inb(ioaddr+1) == 0x00
338 && inb(ioaddr+2) == 0x09);
339
340
341 reset_val = inw(ioaddr+LANCE_RESET);
342
343
344
345 if (!hpJ2405A)
346 outw(reset_val, ioaddr+LANCE_RESET);
347
348 outw(0x0000, ioaddr+LANCE_ADDR);
349 if (inw(ioaddr+LANCE_DATA) != 0x0004)
350 return mem_start;
351
352
353 outw(88, ioaddr+LANCE_ADDR);
354 if (inw(ioaddr+LANCE_ADDR) != 88) {
355 lance_version = 0;
356 } else {
357 int chip_version = inw(ioaddr+LANCE_DATA);
358 outw(89, ioaddr+LANCE_ADDR);
359 chip_version |= inw(ioaddr+LANCE_DATA) << 16;
360 if (lance_debug > 2)
361 printk(" LANCE chip version is %#x.\n", chip_version);
362 if ((chip_version & 0xfff) != 0x003)
363 return mem_start;
364 chip_version = (chip_version >> 12) & 0xffff;
365 for (lance_version = 1; chip_table[lance_version].id_number; lance_version++) {
366 if (chip_table[lance_version].id_number == chip_version)
367 break;
368 }
369 }
370
371 dev = init_etherdev(0, sizeof(struct lance_private)
372 + PKT_BUF_SZ*(RX_RING_SIZE + TX_RING_SIZE),
373 &mem_start);
374
375 chipname = chip_table[lance_version].name;
376 printk("%s: %s at %#3x,", dev->name, chipname, ioaddr);
377
378
379
380 for (i = 0; i < 6; i++)
381 printk(" %2.2x", dev->dev_addr[i] = inb(ioaddr + i));
382
383 dev->base_addr = ioaddr;
384 request_region(ioaddr, LANCE_TOTAL_SIZE, chip_table[lance_version].name);
385
386
387 dev->priv = (void *)(((int)dev->priv + 7) & ~7);
388 lp = (struct lance_private *)dev->priv;
389 lp->name = chipname;
390 lp->rx_buffs = (long)dev->priv + sizeof(struct lance_private);
391 lp->tx_bounce_buffs = (char (*)[PKT_BUF_SZ])
392 (lp->rx_buffs + PKT_BUF_SZ*RX_RING_SIZE);
393
394 #ifndef final_version
395
396 if ((int)(lp->rx_ring) & 0x07) {
397 printk(" **ERROR** LANCE Rx and Tx rings not on even boundary.\n");
398 return mem_start;
399 }
400 #endif
401
402 lp->chip_version = lance_version;
403
404 lp->init_block.mode = 0x0003;
405 for (i = 0; i < 6; i++)
406 lp->init_block.phys_addr[i] = dev->dev_addr[i];
407 lp->init_block.filter[0] = 0x00000000;
408 lp->init_block.filter[1] = 0x00000000;
409 lp->init_block.rx_ring = (int)lp->rx_ring | RX_RING_LEN_BITS;
410 lp->init_block.tx_ring = (int)lp->tx_ring | TX_RING_LEN_BITS;
411
412 outw(0x0001, ioaddr+LANCE_ADDR);
413 inw(ioaddr+LANCE_ADDR);
414 outw((short) (int) &lp->init_block, ioaddr+LANCE_DATA);
415 outw(0x0002, ioaddr+LANCE_ADDR);
416 inw(ioaddr+LANCE_ADDR);
417 outw(((int)&lp->init_block) >> 16, ioaddr+LANCE_DATA);
418 outw(0x0000, ioaddr+LANCE_ADDR);
419 inw(ioaddr+LANCE_ADDR);
420
421 if (pci_irq_line) {
422 dev->dma = 4;
423 dev->irq = pci_irq_line;
424 } else if (hp_builtin) {
425 char dma_tbl[4] = {3, 5, 6, 0};
426 char irq_tbl[8] = {3, 4, 5, 9};
427 unsigned char port_val = inb(hp_builtin);
428 dev->dma = dma_tbl[(port_val >> 4) & 3];
429 dev->irq = irq_tbl[(port_val >> 2) & 3];
430 printk(" HP Vectra IRQ %d DMA %d.\n", dev->irq, dev->dma);
431 } else if (hpJ2405A) {
432 char dma_tbl[4] = {3, 5, 6, 7};
433 char irq_tbl[8] = {3, 4, 5, 9, 10, 11, 12, 15};
434 short reset_val = inw(ioaddr+LANCE_RESET);
435 dev->dma = dma_tbl[(reset_val >> 2) & 3];
436 dev->irq = irq_tbl[(reset_val >> 4) & 7];
437 printk(" HP J2405A IRQ %d DMA %d.\n", dev->irq, dev->dma);
438 } else if (lance_version == PCNET_ISAP) {
439 short bus_info;
440 outw(8, ioaddr+LANCE_ADDR);
441 bus_info = inw(ioaddr+LANCE_BUS_IF);
442 dev->dma = bus_info & 0x07;
443 dev->irq = (bus_info >> 4) & 0x0F;
444 } else {
445
446 if (dev->mem_start & 0x07)
447 dev->dma = dev->mem_start & 0x07;
448 }
449
450 if (dev->dma == 0) {
451
452
453 dma_channels = ((inb(DMA1_STAT_REG) >> 4) & 0x0f) |
454 (inb(DMA2_STAT_REG) & 0xf0);
455 }
456 if (dev->irq >= 2)
457 printk(" assigned IRQ %d", dev->irq);
458 else {
459
460
461
462 autoirq_setup(0);
463
464
465 outw(0x0041, ioaddr+LANCE_DATA);
466
467 dev->irq = autoirq_report(1);
468 if (dev->irq)
469 printk(", probed IRQ %d", dev->irq);
470 else {
471 printk(", failed to detect IRQ line.\n");
472 return mem_start;
473 }
474
475
476
477 if (inw(ioaddr+LANCE_DATA) & 0x0100)
478 dev->dma = 4;
479 }
480
481 if (dev->dma == 4) {
482 printk(", no DMA needed.\n");
483 } else if (dev->dma) {
484 if (request_dma(dev->dma, chipname)) {
485 printk("DMA %d allocation failed.\n", dev->dma);
486 return mem_start;
487 } else
488 printk(", assigned DMA %d.\n", dev->dma);
489 } else {
490 int dmas[] = { 5, 6, 7, 3 }, boguscnt;
491
492 for (i = 0; i < 4; i++) {
493 int dma = dmas[i];
494
495
496
497 if (test_bit(dma, &dma_channels))
498 continue;
499 outw(0x7f04, ioaddr+LANCE_DATA);
500 if (request_dma(dma, chipname))
501 continue;
502 set_dma_mode(dma, DMA_MODE_CASCADE);
503 enable_dma(dma);
504
505
506 outw(0x0001, ioaddr+LANCE_DATA);
507 for (boguscnt = 100; boguscnt > 0; --boguscnt)
508 if (inw(ioaddr+LANCE_DATA) & 0x0900)
509 break;
510 if (inw(ioaddr+LANCE_DATA) & 0x0100) {
511 dev->dma = dma;
512 printk(", DMA %d.\n", dev->dma);
513 break;
514 } else {
515 disable_dma(dma);
516 free_dma(dma);
517 }
518 }
519 if (i == 4) {
520 printk("DMA detection failed.\n");
521 return mem_start;
522 }
523 }
524
525 if (lp->chip_version != OLD_LANCE) {
526
527
528 outw(0x0002, ioaddr+LANCE_ADDR);
529 outw(0x0002, ioaddr+LANCE_BUS_IF);
530 }
531
532 if (lance_debug > 0 && did_version++ == 0)
533 printk(version);
534
535
536 dev->open = &lance_open;
537 dev->hard_start_xmit = &lance_start_xmit;
538 dev->stop = &lance_close;
539 dev->get_stats = &lance_get_stats;
540 dev->set_multicast_list = &set_multicast_list;
541
542 return mem_start;
543 }
544
545
546 static int
547 lance_open(struct device *dev)
548 {
549 struct lance_private *lp = (struct lance_private *)dev->priv;
550 int ioaddr = dev->base_addr;
551 int i;
552
553 if (dev->irq == 0 ||
554 request_irq(dev->irq, &lance_interrupt, 0, lp->name)) {
555 return -EAGAIN;
556 }
557
558
559
560
561 irq2dev_map[dev->irq] = dev;
562
563
564 inw(ioaddr+LANCE_RESET);
565
566
567 if (dev->dma != 4) {
568 enable_dma(dev->dma);
569 set_dma_mode(dev->dma, DMA_MODE_CASCADE);
570 }
571
572
573 if (lp->chip_version == OLD_LANCE)
574 outw(0, ioaddr+LANCE_RESET);
575
576 if (lp->chip_version != OLD_LANCE) {
577
578 outw(0x0002, ioaddr+LANCE_ADDR);
579 outw(0x0002, ioaddr+LANCE_BUS_IF);
580 }
581
582 if (lance_debug > 1)
583 printk("%s: lance_open() irq %d dma %d tx/rx rings %#x/%#x init %#x.\n",
584 dev->name, dev->irq, dev->dma, (int) lp->tx_ring, (int) lp->rx_ring,
585 (int) &lp->init_block);
586
587 lance_init_ring(dev);
588
589 outw(0x0001, ioaddr+LANCE_ADDR);
590 outw((short) (int) &lp->init_block, ioaddr+LANCE_DATA);
591 outw(0x0002, ioaddr+LANCE_ADDR);
592 outw(((int)&lp->init_block) >> 16, ioaddr+LANCE_DATA);
593
594 outw(0x0004, ioaddr+LANCE_ADDR);
595 outw(0x0d15, ioaddr+LANCE_DATA);
596
597 outw(0x0000, ioaddr+LANCE_ADDR);
598 outw(0x0001, ioaddr+LANCE_DATA);
599
600 dev->tbusy = 0;
601 dev->interrupt = 0;
602 dev->start = 1;
603 i = 0;
604 while (i++ < 100)
605 if (inw(ioaddr+LANCE_DATA) & 0x0100)
606 break;
607
608
609
610
611 outw(0x0042, ioaddr+LANCE_DATA);
612
613 if (lance_debug > 2)
614 printk("%s: LANCE open after %d ticks, init block %#x csr0 %4.4x.\n",
615 dev->name, i, (int) &lp->init_block, inw(ioaddr+LANCE_DATA));
616
617 return 0;
618 }
619
620
621 static void
622 lance_init_ring(struct device *dev)
623 {
624 struct lance_private *lp = (struct lance_private *)dev->priv;
625 int i;
626
627 lp->lock = 0, lp->tx_full = 0;
628 lp->cur_rx = lp->cur_tx = 0;
629 lp->dirty_rx = lp->dirty_tx = 0;
630
631 for (i = 0; i < RX_RING_SIZE; i++) {
632 lp->rx_ring[i].base = (lp->rx_buffs + i*PKT_BUF_SZ) | 0x80000000;
633 lp->rx_ring[i].buf_length = -PKT_BUF_SZ;
634 }
635
636
637 for (i = 0; i < TX_RING_SIZE; i++) {
638 lp->tx_ring[i].base = 0;
639 }
640
641 lp->init_block.mode = 0x0000;
642 for (i = 0; i < 6; i++)
643 lp->init_block.phys_addr[i] = dev->dev_addr[i];
644 lp->init_block.filter[0] = 0x00000000;
645 lp->init_block.filter[1] = 0x00000000;
646 lp->init_block.rx_ring = (int)lp->rx_ring | RX_RING_LEN_BITS;
647 lp->init_block.tx_ring = (int)lp->tx_ring | TX_RING_LEN_BITS;
648 }
649
650 static int
651 lance_start_xmit(struct sk_buff *skb, struct device *dev)
652 {
653 struct lance_private *lp = (struct lance_private *)dev->priv;
654 int ioaddr = dev->base_addr;
655 int entry;
656
657
658 if (dev->tbusy) {
659 int tickssofar = jiffies - dev->trans_start;
660 if (tickssofar < 20)
661 return 1;
662 outw(0, ioaddr+LANCE_ADDR);
663 printk("%s: transmit timed out, status %4.4x, resetting.\n",
664 dev->name, inw(ioaddr+LANCE_DATA));
665 outw(0x0004, ioaddr+LANCE_DATA);
666 lp->stats.tx_errors++;
667 #ifndef final_version
668 {
669 int i;
670 printk(" Ring data dump: dirty_tx %d cur_tx %d%s cur_rx %d.",
671 lp->dirty_tx, lp->cur_tx, lp->tx_full ? " (full)" : "",
672 lp->cur_rx);
673 for (i = 0 ; i < RX_RING_SIZE; i++)
674 printk("%s %08x %04x %04x", i & 0x3 ? "" : "\n ",
675 lp->rx_ring[i].base, -lp->rx_ring[i].buf_length,
676 lp->rx_ring[i].msg_length);
677 for (i = 0 ; i < TX_RING_SIZE; i++)
678 printk("%s %08x %04x %04x", i & 0x3 ? "" : "\n ",
679 lp->tx_ring[i].base, -lp->tx_ring[i].length,
680 lp->tx_ring[i].misc);
681 printk("\n");
682 }
683 #endif
684 lance_init_ring(dev);
685 outw(0x0043, ioaddr+LANCE_DATA);
686
687 dev->tbusy=0;
688 dev->trans_start = jiffies;
689
690 return 0;
691 }
692
693 if (skb == NULL) {
694 dev_tint(dev);
695 return 0;
696 }
697
698 if (skb->len <= 0)
699 return 0;
700
701 if (lance_debug > 3) {
702 outw(0x0000, ioaddr+LANCE_ADDR);
703 printk("%s: lance_start_xmit() called, csr0 %4.4x.\n", dev->name,
704 inw(ioaddr+LANCE_DATA));
705 outw(0x0000, ioaddr+LANCE_DATA);
706 }
707
708
709
710 if (set_bit(0, (void*)&dev->tbusy) != 0) {
711 printk("%s: Transmitter access conflict.\n", dev->name);
712 return 1;
713 }
714
715 if (set_bit(0, (void*)&lp->lock) != 0) {
716 if (lance_debug > 0)
717 printk("%s: tx queue lock!.\n", dev->name);
718
719 return 1;
720 }
721
722
723
724
725 entry = lp->cur_tx & TX_RING_MOD_MASK;
726
727
728
729
730
731 if (lp->chip_version == OLD_LANCE) {
732 lp->tx_ring[entry].length =
733 -(ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN);
734 } else
735 lp->tx_ring[entry].length = -skb->len;
736
737 lp->tx_ring[entry].misc = 0x0000;
738
739
740
741 if ((int)(skb->data) + skb->len > 0x01000000) {
742 if (lance_debug > 5)
743 printk("%s: bouncing a high-memory packet (%#x).\n",
744 dev->name, (int)(skb->data));
745 memcpy(&lp->tx_bounce_buffs[entry], skb->data, skb->len);
746 lp->tx_ring[entry].base =
747 (int)(lp->tx_bounce_buffs + entry) | 0x83000000;
748 dev_kfree_skb (skb, FREE_WRITE);
749 } else {
750 lp->tx_skbuff[entry] = skb;
751 lp->tx_ring[entry].base = (int)(skb->data) | 0x83000000;
752 }
753 lp->cur_tx++;
754
755
756 outw(0x0000, ioaddr+LANCE_ADDR);
757 outw(0x0048, ioaddr+LANCE_DATA);
758
759 dev->trans_start = jiffies;
760
761 cli();
762 lp->lock = 0;
763 if (lp->tx_ring[(entry+1) & TX_RING_MOD_MASK].base == 0)
764 dev->tbusy=0;
765 else
766 lp->tx_full = 1;
767 sti();
768
769 return 0;
770 }
771
772
773 static void
774 lance_interrupt(int irq, struct pt_regs * regs)
775 {
776 struct device *dev = (struct device *)(irq2dev_map[irq]);
777 struct lance_private *lp;
778 int csr0, ioaddr, boguscnt=10;
779
780 if (dev == NULL) {
781 printk ("lance_interrupt(): irq %d for unknown device.\n", irq);
782 return;
783 }
784
785 ioaddr = dev->base_addr;
786 lp = (struct lance_private *)dev->priv;
787 if (dev->interrupt)
788 printk("%s: Re-entering the interrupt handler.\n", dev->name);
789
790 dev->interrupt = 1;
791
792 outw(0x00, dev->base_addr + LANCE_ADDR);
793 while ((csr0 = inw(dev->base_addr + LANCE_DATA)) & 0x8600
794 && --boguscnt >= 0) {
795
796 outw(csr0 & ~0x004f, dev->base_addr + LANCE_DATA);
797
798 if (lance_debug > 5)
799 printk("%s: interrupt csr0=%#2.2x new csr=%#2.2x.\n",
800 dev->name, csr0, inw(dev->base_addr + LANCE_DATA));
801
802 if (csr0 & 0x0400)
803 lance_rx(dev);
804
805 if (csr0 & 0x0200) {
806 int dirty_tx = lp->dirty_tx;
807
808 while (dirty_tx < lp->cur_tx) {
809 int entry = dirty_tx & TX_RING_MOD_MASK;
810 int status = lp->tx_ring[entry].base;
811
812 if (status < 0)
813 break;
814
815 lp->tx_ring[entry].base = 0;
816
817 if (status & 0x40000000) {
818
819 int err_status = lp->tx_ring[entry].misc;
820 lp->stats.tx_errors++;
821 if (err_status & 0x0400) lp->stats.tx_aborted_errors++;
822 if (err_status & 0x0800) lp->stats.tx_carrier_errors++;
823 if (err_status & 0x1000) lp->stats.tx_window_errors++;
824 if (err_status & 0x4000) {
825
826 lp->stats.tx_fifo_errors++;
827
828 printk("%s: Tx FIFO error! Status %4.4x.\n",
829 dev->name, csr0);
830
831 outw(0x0002, dev->base_addr + LANCE_DATA);
832 }
833 } else {
834 if (status & 0x18000000)
835 lp->stats.collisions++;
836 lp->stats.tx_packets++;
837 }
838
839
840
841 if (lp->tx_skbuff[entry]) {
842 dev_kfree_skb(lp->tx_skbuff[entry],FREE_WRITE);
843 lp->tx_skbuff[entry] = 0;
844 }
845 dirty_tx++;
846 }
847
848 #ifndef final_version
849 if (lp->cur_tx - dirty_tx >= TX_RING_SIZE) {
850 printk("out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
851 dirty_tx, lp->cur_tx, lp->tx_full);
852 dirty_tx += TX_RING_SIZE;
853 }
854 #endif
855
856 if (lp->tx_full && dev->tbusy
857 && dirty_tx > lp->cur_tx - TX_RING_SIZE + 2) {
858
859 lp->tx_full = 0;
860 dev->tbusy = 0;
861 mark_bh(NET_BH);
862 }
863
864 lp->dirty_tx = dirty_tx;
865 }
866
867
868 if (csr0 & 0x4000) lp->stats.tx_errors++;
869 if (csr0 & 0x1000) lp->stats.rx_errors++;
870 if (csr0 & 0x0800) {
871 printk("%s: Bus master arbitration failure, status %4.4x.\n",
872 dev->name, csr0);
873
874 outw(0x0002, dev->base_addr + LANCE_DATA);
875 }
876 }
877
878
879 outw(0x0000, dev->base_addr + LANCE_ADDR);
880 outw(0x7940, dev->base_addr + LANCE_DATA);
881
882 if (lance_debug > 4)
883 printk("%s: exiting interrupt, csr%d=%#4.4x.\n",
884 dev->name, inw(ioaddr + LANCE_ADDR),
885 inw(dev->base_addr + LANCE_DATA));
886
887 dev->interrupt = 0;
888 return;
889 }
890
891 static int
892 lance_rx(struct device *dev)
893 {
894 struct lance_private *lp = (struct lance_private *)dev->priv;
895 int entry = lp->cur_rx & RX_RING_MOD_MASK;
896 int i;
897
898
899 while (lp->rx_ring[entry].base >= 0) {
900 int status = lp->rx_ring[entry].base >> 24;
901
902 if (status != 0x03) {
903
904
905
906
907 if (status & 0x01)
908 lp->stats.rx_errors++;
909 if (status & 0x20) lp->stats.rx_frame_errors++;
910 if (status & 0x10) lp->stats.rx_over_errors++;
911 if (status & 0x08) lp->stats.rx_crc_errors++;
912 if (status & 0x04) lp->stats.rx_fifo_errors++;
913 lp->rx_ring[entry].base &= 0x03ffffff;
914 } else {
915
916 short pkt_len = (lp->rx_ring[entry].msg_length & 0xfff)-4;
917 struct sk_buff *skb;
918
919 skb = alloc_skb(pkt_len, GFP_ATOMIC);
920 if (skb == NULL) {
921 printk("%s: Memory squeeze, deferring packet.\n", dev->name);
922 for (i=0; i < RX_RING_SIZE; i++)
923 if (lp->rx_ring[(entry+i) & RX_RING_MOD_MASK].base < 0)
924 break;
925
926 if (i > RX_RING_SIZE -2) {
927 lp->stats.rx_dropped++;
928 lp->rx_ring[entry].base |= 0x80000000;
929 lp->cur_rx++;
930 }
931 break;
932 }
933 skb->len = pkt_len;
934 skb->dev = dev;
935 memcpy(skb->data,
936 (unsigned char *)(lp->rx_ring[entry].base & 0x00ffffff),
937 pkt_len);
938 netif_rx(skb);
939 lp->stats.rx_packets++;
940 }
941
942
943
944 lp->rx_ring[entry].buf_length = -PKT_BUF_SZ;
945 lp->rx_ring[entry].base |= 0x80000000;
946 entry = (++lp->cur_rx) & RX_RING_MOD_MASK;
947 }
948
949
950
951
952 return 0;
953 }
954
955 static int
956 lance_close(struct device *dev)
957 {
958 int ioaddr = dev->base_addr;
959 struct lance_private *lp = (struct lance_private *)dev->priv;
960
961 dev->start = 0;
962 dev->tbusy = 1;
963
964 if (lp->chip_version != OLD_LANCE) {
965 outw(112, ioaddr+LANCE_ADDR);
966 lp->stats.rx_missed_errors = inw(ioaddr+LANCE_DATA);
967 }
968 outw(0, ioaddr+LANCE_ADDR);
969
970 if (lance_debug > 1)
971 printk("%s: Shutting down ethercard, status was %2.2x.\n",
972 dev->name, inw(ioaddr+LANCE_DATA));
973
974
975
976 outw(0x0004, ioaddr+LANCE_DATA);
977
978 if (dev->dma != 4)
979 disable_dma(dev->dma);
980
981 free_irq(dev->irq);
982
983 irq2dev_map[dev->irq] = 0;
984
985 return 0;
986 }
987
988 static struct enet_statistics *
989 lance_get_stats(struct device *dev)
990 {
991 struct lance_private *lp = (struct lance_private *)dev->priv;
992 short ioaddr = dev->base_addr;
993 short saved_addr;
994
995 if (lp->chip_version != OLD_LANCE) {
996 cli();
997 saved_addr = inw(ioaddr+LANCE_ADDR);
998 outw(112, ioaddr+LANCE_ADDR);
999 lp->stats.rx_missed_errors = inw(ioaddr+LANCE_DATA);
1000 outw(saved_addr, ioaddr+LANCE_ADDR);
1001 sti();
1002 }
1003
1004 return &lp->stats;
1005 }
1006
1007
1008
1009
1010
1011
1012
1013 static void
1014 set_multicast_list(struct device *dev, int num_addrs, void *addrs)
1015 {
1016 short ioaddr = dev->base_addr;
1017
1018
1019 outw(0, ioaddr+LANCE_ADDR);
1020 outw(0x0004, ioaddr+LANCE_DATA);
1021
1022 outw(15, ioaddr+LANCE_ADDR);
1023 if (num_addrs >= 0) {
1024 short multicast_table[4];
1025 int i;
1026
1027 memset(multicast_table, (num_addrs == 0) ? 0 : -1, sizeof(multicast_table));
1028 for (i = 0; i < 4; i++) {
1029 outw(8 + i, ioaddr+LANCE_ADDR);
1030 outw(multicast_table[i], ioaddr+LANCE_DATA);
1031 }
1032 outw(0x0000, ioaddr+LANCE_DATA);
1033 } else {
1034
1035 printk("%s: Promiscuous mode enabled.\n", dev->name);
1036 outw(0x8000, ioaddr+LANCE_DATA);
1037 }
1038
1039 outw(0, ioaddr+LANCE_ADDR);
1040 outw(0x0142, ioaddr+LANCE_DATA);
1041 }
1042
1043
1044
1045
1046
1047
1048
1049
1050