This source file includes following definitions.
- lance_init
- lance_probe1
- lance_open
- lance_init_ring
- lance_start_xmit
- lance_interrupt
- lance_rx
- lance_close
- lance_get_stats
- set_multicast_list
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18 static char *version = "lance.c:v1.06 11/29/94 becker@cesdis.gsfc.nasa.gov\n";
19
20 #include <linux/config.h>
21 #include <linux/kernel.h>
22 #include <linux/sched.h>
23 #include <linux/string.h>
24 #include <linux/ptrace.h>
25 #include <linux/errno.h>
26 #include <linux/ioport.h>
27 #include <linux/malloc.h>
28 #include <linux/interrupt.h>
29 #include <linux/pci.h>
30 #include <linux/bios32.h>
31 #include <asm/bitops.h>
32 #include <asm/io.h>
33 #include <asm/dma.h>
34
35 #include <linux/netdevice.h>
36 #include <linux/etherdevice.h>
37 #include <linux/skbuff.h>
38
39 struct device *init_etherdev(struct device *dev, int sizeof_private,
40 unsigned long *mem_startp);
41 static unsigned int lance_portlist[] = {0x300, 0x320, 0x340, 0x360, 0};
42 unsigned long lance_probe1(int ioaddr, unsigned long mem_start);
43
44 #ifdef HAVE_DEVLIST
45 struct netdev_entry lance_drv =
46 {"lance", lance_probe1, LANCE_TOTAL_SIZE, lance_portlist};
47 #endif
48
49 #ifdef LANCE_DEBUG
50 int lance_debug = LANCE_DEBUG;
51 #else
52 int lance_debug = 1;
53 #endif
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145 #ifndef LANCE_LOG_TX_BUFFERS
146 #define LANCE_LOG_TX_BUFFERS 4
147 #define LANCE_LOG_RX_BUFFERS 4
148 #endif
149
150 #define TX_RING_SIZE (1 << (LANCE_LOG_TX_BUFFERS))
151 #define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
152 #define TX_RING_LEN_BITS ((LANCE_LOG_TX_BUFFERS) << 29)
153
154 #define RX_RING_SIZE (1 << (LANCE_LOG_RX_BUFFERS))
155 #define RX_RING_MOD_MASK (RX_RING_SIZE - 1)
156 #define RX_RING_LEN_BITS ((LANCE_LOG_RX_BUFFERS) << 29)
157
158 #define PKT_BUF_SZ 1544
159
160
161 #define LANCE_DATA 0x10
162 #define LANCE_ADDR 0x12
163 #define LANCE_RESET 0x14
164 #define LANCE_BUS_IF 0x16
165 #define LANCE_TOTAL_SIZE 0x18
166
167
168 struct lance_rx_head {
169 int base;
170 short buf_length;
171 short msg_length;
172 };
173
174 struct lance_tx_head {
175 int base;
176 short length;
177 short misc;
178 };
179
180
181 struct lance_init_block {
182 unsigned short mode;
183 unsigned char phys_addr[6];
184 unsigned filter[2];
185
186 unsigned rx_ring;
187 unsigned tx_ring;
188 };
189
190 struct lance_private {
191 char devname[8];
192
193 struct lance_rx_head rx_ring[RX_RING_SIZE];
194 struct lance_tx_head tx_ring[TX_RING_SIZE];
195 struct lance_init_block init_block;
196
197 struct sk_buff* tx_skbuff[TX_RING_SIZE];
198 long rx_buffs;
199
200 char (*tx_bounce_buffs)[PKT_BUF_SZ];
201 int cur_rx, cur_tx;
202 int dirty_rx, dirty_tx;
203 int dma;
204 struct enet_statistics stats;
205 char chip_version;
206 char tx_full;
207 char lock;
208 int pad0, pad1;
209 };
210
211
212
213
214 static struct lance_chip_type {
215 int id_number;
216 char *name;
217 int flags;
218 } chip_table[] = {
219 {0x0000, "LANCE 7990", 0},
220 {0x0003, "PCnet/ISA 79C960", 0},
221 {0x2260, "PCnet/ISA+ 79C961", 0},
222 {0x2420, "PCnet/PCI 79C970", 0},
223
224
225 {0x2430, "PCnet32", 0},
226 {0x0, "PCnet (unknown)", 0},
227 };
228
229 enum {OLD_LANCE = 0, PCNET_ISA=1, PCNET_ISAP=2, PCNET_PCI=3, PCNET_VLB=4, LANCE_UNKNOWN=5};
230
231
232 static unsigned char pci_irq_line = 0;
233
234 static int lance_open(struct device *dev);
235 static void lance_init_ring(struct device *dev);
236 static int lance_start_xmit(struct sk_buff *skb, struct device *dev);
237 static int lance_rx(struct device *dev);
238 static void lance_interrupt(int reg_ptr);
239 static int lance_close(struct device *dev);
240 static struct enet_statistics *lance_get_stats(struct device *dev);
241 #ifdef HAVE_MULTICAST
242 static void set_multicast_list(struct device *dev, int num_addrs, void *addrs);
243 #endif
244
245
246
247
248
249
250
251
252
253 unsigned long lance_init(unsigned long mem_start, unsigned long mem_end)
254 {
255 int *port;
256
257 #if defined(CONFIG_PCI)
258 #define AMD_VENDOR_ID 0x1022
259 #define AMD_DEVICE_ID 0x2000
260 if (pcibios_present()) {
261 int pci_index;
262 printk("lance.c: PCI bios is present, checking for devices...\n");
263 for (pci_index = 0; pci_index < 8; pci_index++) {
264 unsigned char pci_bus, pci_device_fn;
265 unsigned long pci_ioaddr;
266
267 if (pcibios_find_device (AMD_VENDOR_ID, AMD_DEVICE_ID, pci_index,
268 &pci_bus, &pci_device_fn) != 0)
269 break;
270 pcibios_read_config_byte(pci_bus, pci_device_fn,
271 PCI_INTERRUPT_LINE, &pci_irq_line);
272 pcibios_read_config_dword(pci_bus, pci_device_fn,
273 PCI_BASE_ADDRESS_0, &pci_ioaddr);
274
275 pci_ioaddr &= ~3;
276 printk("Found PCnet/PCI at %#lx, irq %d (mem_start is %#lx).\n",
277 pci_ioaddr, pci_irq_line, mem_start);
278 mem_start = lance_probe1(pci_ioaddr, mem_start);
279 pci_irq_line = 0;
280 }
281 }
282 #endif
283
284 for (port = lance_portlist; *port; port++) {
285 int ioaddr = *port;
286
287 if ( check_region(ioaddr, LANCE_TOTAL_SIZE) == 0
288 && inb(ioaddr + 14) == 0x57
289 && inb(ioaddr + 15) == 0x57) {
290 mem_start = lance_probe1(ioaddr, mem_start);
291 }
292 }
293
294 return mem_start;
295 }
296
297 unsigned long lance_probe1(int ioaddr, unsigned long mem_start)
298 {
299 struct device *dev;
300 struct lance_private *lp;
301 short dma_channels;
302 int i, reset_val, lance_version;
303
304 unsigned char hpJ2405A = 0;
305 int hp_builtin = 0;
306 static int did_version = 0;
307
308
309
310
311
312
313 if ( *((unsigned short *) 0x000f0102) == 0x5048) {
314 short ioaddr_table[] = { 0x300, 0x320, 0x340, 0x360};
315 int hp_port = ( *((unsigned char *) 0x000f00f1) & 1) ? 0x499 : 0x99;
316
317 if ((inb(hp_port) & 0xc0) == 0x80
318 && ioaddr_table[inb(hp_port) & 3] == ioaddr)
319 hp_builtin = hp_port;
320 }
321
322 hpJ2405A = (inb(ioaddr) == 0x08 && inb(ioaddr+1) == 0x00
323 && inb(ioaddr+2) == 0x09);
324
325
326 reset_val = inw(ioaddr+LANCE_RESET);
327
328
329
330 if (!hpJ2405A)
331 outw(reset_val, ioaddr+LANCE_RESET);
332
333 outw(0x0000, ioaddr+LANCE_ADDR);
334 if (inw(ioaddr+LANCE_DATA) != 0x0004)
335 return mem_start;
336
337
338 outw(88, ioaddr+LANCE_ADDR);
339 if (inw(ioaddr+LANCE_ADDR) != 88) {
340 lance_version = 0;
341 } else {
342 int chip_version = inw(ioaddr+LANCE_DATA);
343 outw(89, ioaddr+LANCE_ADDR);
344 chip_version |= inw(ioaddr+LANCE_DATA) << 16;
345 if (lance_debug > 2)
346 printk(" LANCE chip version is %#x.\n", chip_version);
347 if ((chip_version & 0xfff) != 0x003)
348 return mem_start;
349 chip_version = (chip_version >> 12) & 0xffff;
350 for (lance_version = 1; chip_table[lance_version].id_number; lance_version++) {
351 if (chip_table[lance_version].id_number == chip_version)
352 break;
353 }
354 }
355
356 dev = init_etherdev(0, sizeof(struct lance_private)
357 + PKT_BUF_SZ*(RX_RING_SIZE + TX_RING_SIZE),
358 &mem_start);
359
360 printk("%s: %s at %#3x,", dev->name, chip_table[lance_version].name, ioaddr);
361
362
363
364 for (i = 0; i < 6; i++)
365 printk(" %2.2x", dev->dev_addr[i] = inb(ioaddr + i));
366
367 dev->base_addr = ioaddr;
368 snarf_region(ioaddr, LANCE_TOTAL_SIZE);
369
370
371 dev->priv = (void *)(((int)dev->priv + 7) & ~7);
372 lp = (struct lance_private *)dev->priv;
373 lp->rx_buffs = (long)dev->priv + sizeof(struct lance_private);
374 lp->tx_bounce_buffs = (char (*)[PKT_BUF_SZ])
375 (lp->rx_buffs + PKT_BUF_SZ*RX_RING_SIZE);
376
377 #ifndef final_version
378
379 if ((int)(lp->rx_ring) & 0x07) {
380 printk(" **ERROR** LANCE Rx and Tx rings not on even boundary.\n");
381 return mem_start;
382 }
383 #endif
384
385 lp->chip_version = lance_version;
386
387 lp->init_block.mode = 0x0003;
388 for (i = 0; i < 6; i++)
389 lp->init_block.phys_addr[i] = dev->dev_addr[i];
390 lp->init_block.filter[0] = 0x00000000;
391 lp->init_block.filter[1] = 0x00000000;
392 lp->init_block.rx_ring = (int)lp->rx_ring | RX_RING_LEN_BITS;
393 lp->init_block.tx_ring = (int)lp->tx_ring | TX_RING_LEN_BITS;
394
395 outw(0x0001, ioaddr+LANCE_ADDR);
396 inw(ioaddr+LANCE_ADDR);
397 outw((short) (int) &lp->init_block, ioaddr+LANCE_DATA);
398 outw(0x0002, ioaddr+LANCE_ADDR);
399 inw(ioaddr+LANCE_ADDR);
400 outw(((int)&lp->init_block) >> 16, ioaddr+LANCE_DATA);
401 outw(0x0000, ioaddr+LANCE_ADDR);
402 inw(ioaddr+LANCE_ADDR);
403
404 if (pci_irq_line) {
405 dev->dma = 4;
406 dev->irq = pci_irq_line;
407 } else if (hp_builtin) {
408 char dma_tbl[4] = {3, 5, 6, 0};
409 char irq_tbl[8] = {3, 4, 5, 9};
410 unsigned char port_val = inb(hp_builtin);
411 dev->dma = dma_tbl[(port_val >> 4) & 3];
412 dev->irq = irq_tbl[(port_val >> 2) & 3];
413 printk(" HP Vectra IRQ %d DMA %d.\n", dev->irq, dev->dma);
414 } else if (hpJ2405A) {
415 char dma_tbl[4] = {3, 5, 6, 7};
416 char irq_tbl[8] = {3, 4, 5, 9, 10, 11, 12, 15};
417 short reset_val = inw(ioaddr+LANCE_RESET);
418 dev->dma = dma_tbl[(reset_val >> 2) & 3];
419 dev->irq = irq_tbl[(reset_val >> 4) & 7];
420 printk(" HP J2405A IRQ %d DMA %d.\n", dev->irq, dev->dma);
421 } else if (lance_version == PCNET_ISAP) {
422 short bus_info;
423 outw(8, ioaddr+LANCE_ADDR);
424 bus_info = inw(ioaddr+LANCE_BUS_IF);
425 dev->dma = bus_info & 0x07;
426 dev->irq = (bus_info >> 4) & 0x0F;
427 } else {
428
429 if (dev->mem_start & 0x07)
430 dev->dma = dev->mem_start & 0x07;
431 }
432
433 if (dev->dma == 0) {
434
435
436 dma_channels = ((inb(DMA1_STAT_REG) >> 4) & 0x0f) |
437 (inb(DMA2_STAT_REG) & 0xf0);
438 }
439 if (dev->irq >= 2)
440 printk(" assigned IRQ %d", dev->irq);
441 else {
442
443
444
445 autoirq_setup(0);
446
447
448 outw(0x0041, ioaddr+LANCE_DATA);
449
450 dev->irq = autoirq_report(1);
451 if (dev->irq)
452 printk(", probed IRQ %d", dev->irq);
453 else {
454 printk(", failed to detect IRQ line.\n");
455 return mem_start;
456 }
457
458
459
460 if (inw(ioaddr+LANCE_DATA) & 0x0100)
461 dev->dma = 4;
462 }
463
464 if (dev->dma == 4) {
465 printk(", no DMA needed.\n");
466 } else if (dev->dma) {
467 if (request_dma(dev->dma, "lance")) {
468 printk("DMA %d allocation failed.\n", dev->dma);
469 return mem_start;
470 } else
471 printk(", assigned DMA %d.\n", dev->dma);
472 } else {
473 int dmas[] = { 5, 6, 7, 3 }, boguscnt;
474
475 for (i = 0; i < 4; i++) {
476 int dma = dmas[i];
477
478
479
480 if (test_bit(dma, &dma_channels))
481 continue;
482 outw(0x7f04, ioaddr+LANCE_DATA);
483 if (request_dma(dma, "lance"))
484 continue;
485 set_dma_mode(dma, DMA_MODE_CASCADE);
486 enable_dma(dma);
487
488
489 outw(0x0001, ioaddr+LANCE_DATA);
490 for (boguscnt = 100; boguscnt > 0; --boguscnt)
491 if (inw(ioaddr+LANCE_DATA) & 0x0900)
492 break;
493 if (inw(ioaddr+LANCE_DATA) & 0x0100) {
494 dev->dma = dma;
495 printk(", DMA %d.\n", dev->dma);
496 break;
497 } else {
498 disable_dma(dma);
499 free_dma(dma);
500 }
501 }
502 if (i == 4) {
503 printk("DMA detection failed.\n");
504 return mem_start;
505 }
506 }
507
508 if (lp->chip_version != OLD_LANCE) {
509
510
511 outw(0x0002, ioaddr+LANCE_ADDR);
512 outw(0x0002, ioaddr+LANCE_BUS_IF);
513 }
514
515 if (lance_debug > 0 && did_version++ == 0)
516 printk(version);
517
518
519 dev->open = &lance_open;
520 dev->hard_start_xmit = &lance_start_xmit;
521 dev->stop = &lance_close;
522 dev->get_stats = &lance_get_stats;
523 dev->set_multicast_list = &set_multicast_list;
524
525 return mem_start;
526 }
527
528
529 static int
530 lance_open(struct device *dev)
531 {
532 struct lance_private *lp = (struct lance_private *)dev->priv;
533 int ioaddr = dev->base_addr;
534 int i;
535
536 if (dev->irq == 0 ||
537 request_irq(dev->irq, &lance_interrupt, 0, "lance")) {
538 return -EAGAIN;
539 }
540
541
542
543
544 irq2dev_map[dev->irq] = dev;
545
546
547 inw(ioaddr+LANCE_RESET);
548
549
550 if (dev->dma != 4) {
551 enable_dma(dev->dma);
552 set_dma_mode(dev->dma, DMA_MODE_CASCADE);
553 }
554
555
556 if (lp->chip_version == OLD_LANCE)
557 outw(0, ioaddr+LANCE_RESET);
558
559 if (lp->chip_version != OLD_LANCE) {
560
561 outw(0x0002, ioaddr+LANCE_ADDR);
562 outw(0x0002, ioaddr+LANCE_BUS_IF);
563 }
564
565 if (lance_debug > 1)
566 printk("%s: lance_open() irq %d dma %d tx/rx rings %#x/%#x init %#x.\n",
567 dev->name, dev->irq, dev->dma, (int) lp->tx_ring, (int) lp->rx_ring,
568 (int) &lp->init_block);
569
570 lance_init_ring(dev);
571
572 outw(0x0001, ioaddr+LANCE_ADDR);
573 outw((short) (int) &lp->init_block, ioaddr+LANCE_DATA);
574 outw(0x0002, ioaddr+LANCE_ADDR);
575 outw(((int)&lp->init_block) >> 16, ioaddr+LANCE_DATA);
576
577 outw(0x0004, ioaddr+LANCE_ADDR);
578 outw(0x0d15, ioaddr+LANCE_DATA);
579
580 outw(0x0000, ioaddr+LANCE_ADDR);
581 outw(0x0001, ioaddr+LANCE_DATA);
582
583 dev->tbusy = 0;
584 dev->interrupt = 0;
585 dev->start = 1;
586 i = 0;
587 while (i++ < 100)
588 if (inw(ioaddr+LANCE_DATA) & 0x0100)
589 break;
590 outw(0x0142, ioaddr+LANCE_DATA);
591
592 if (lance_debug > 2)
593 printk("%s: LANCE open after %d ticks, init block %#x csr0 %4.4x.\n",
594 dev->name, i, (int) &lp->init_block, inw(ioaddr+LANCE_DATA));
595
596 return 0;
597 }
598
599
600 static void
601 lance_init_ring(struct device *dev)
602 {
603 struct lance_private *lp = (struct lance_private *)dev->priv;
604 int i;
605
606 lp->lock = 0, lp->tx_full = 0;
607 lp->cur_rx = lp->cur_tx = 0;
608 lp->dirty_rx = lp->dirty_tx = 0;
609
610 for (i = 0; i < RX_RING_SIZE; i++) {
611 lp->rx_ring[i].base = (lp->rx_buffs + i*PKT_BUF_SZ) | 0x80000000;
612 lp->rx_ring[i].buf_length = -PKT_BUF_SZ;
613 }
614
615
616 for (i = 0; i < TX_RING_SIZE; i++) {
617 lp->tx_ring[i].base = 0;
618 }
619
620 lp->init_block.mode = 0x0000;
621 for (i = 0; i < 6; i++)
622 lp->init_block.phys_addr[i] = dev->dev_addr[i];
623 lp->init_block.filter[0] = 0x00000000;
624 lp->init_block.filter[1] = 0x00000000;
625 lp->init_block.rx_ring = (int)lp->rx_ring | RX_RING_LEN_BITS;
626 lp->init_block.tx_ring = (int)lp->tx_ring | TX_RING_LEN_BITS;
627 }
628
629 static int
630 lance_start_xmit(struct sk_buff *skb, struct device *dev)
631 {
632 struct lance_private *lp = (struct lance_private *)dev->priv;
633 int ioaddr = dev->base_addr;
634 int entry;
635
636
637 if (dev->tbusy) {
638 int tickssofar = jiffies - dev->trans_start;
639 if (tickssofar < 20)
640 return 1;
641 outw(0, ioaddr+LANCE_ADDR);
642 printk("%s: transmit timed out, status %4.4x, resetting.\n",
643 dev->name, inw(ioaddr+LANCE_DATA));
644 outw(0x0004, ioaddr+LANCE_DATA);
645 lp->stats.tx_errors++;
646 #ifndef final_version
647 {
648 int i;
649 printk(" Ring data dump: dirty_tx %d cur_tx %d%s cur_rx %d.",
650 lp->dirty_tx, lp->cur_tx, lp->tx_full ? " (full)" : "",
651 lp->cur_rx);
652 for (i = 0 ; i < RX_RING_SIZE; i++)
653 printk("%s %08x %04x %04x", i & 0x3 ? "" : "\n ",
654 lp->rx_ring[i].base, -lp->rx_ring[i].buf_length,
655 lp->rx_ring[i].msg_length);
656 for (i = 0 ; i < TX_RING_SIZE; i++)
657 printk("%s %08x %04x %04x", i & 0x3 ? "" : "\n ",
658 lp->tx_ring[i].base, -lp->tx_ring[i].length,
659 lp->tx_ring[i].misc);
660 printk("\n");
661 }
662 #endif
663 lance_init_ring(dev);
664 outw(0x0043, ioaddr+LANCE_DATA);
665
666 dev->tbusy=0;
667 dev->trans_start = jiffies;
668
669 return 0;
670 }
671
672 if (skb == NULL) {
673 dev_tint(dev);
674 return 0;
675 }
676
677 if (skb->len <= 0)
678 return 0;
679
680 if (lance_debug > 3) {
681 outw(0x0000, ioaddr+LANCE_ADDR);
682 printk("%s: lance_start_xmit() called, csr0 %4.4x.\n", dev->name,
683 inw(ioaddr+LANCE_DATA));
684 outw(0x0000, ioaddr+LANCE_DATA);
685 }
686
687
688
689 if (set_bit(0, (void*)&dev->tbusy) != 0) {
690 printk("%s: Transmitter access conflict.\n", dev->name);
691 return 1;
692 }
693
694 if (set_bit(0, (void*)&lp->lock) != 0) {
695 if (lance_debug > 0)
696 printk("%s: tx queue lock!.\n", dev->name);
697
698 return 1;
699 }
700
701
702
703
704 entry = lp->cur_tx & TX_RING_MOD_MASK;
705
706
707
708
709
710 if (lp->chip_version == OLD_LANCE) {
711 lp->tx_ring[entry].length =
712 -(ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN);
713 } else
714 lp->tx_ring[entry].length = -skb->len;
715
716 lp->tx_ring[entry].misc = 0x0000;
717
718
719
720 if ((int)(skb->data) + skb->len > 0x01000000) {
721 if (lance_debug > 5)
722 printk("%s: bouncing a high-memory packet (%#x).\n",
723 dev->name, (int)(skb->data));
724 memcpy(&lp->tx_bounce_buffs[entry], skb->data, skb->len);
725 lp->tx_ring[entry].base =
726 (int)(lp->tx_bounce_buffs + entry) | 0x83000000;
727 dev_kfree_skb (skb, FREE_WRITE);
728 } else {
729 lp->tx_skbuff[entry] = skb;
730 lp->tx_ring[entry].base = (int)(skb->data) | 0x83000000;
731 }
732 lp->cur_tx++;
733
734
735 outw(0x0000, ioaddr+LANCE_ADDR);
736 outw(0x0048, ioaddr+LANCE_DATA);
737
738 dev->trans_start = jiffies;
739
740 cli();
741 lp->lock = 0;
742 if (lp->tx_ring[(entry+1) & TX_RING_MOD_MASK].base == 0)
743 dev->tbusy=0;
744 else
745 lp->tx_full = 1;
746 sti();
747
748 return 0;
749 }
750
751
752 static void
753 lance_interrupt(int reg_ptr)
754 {
755 int irq = -(((struct pt_regs *)reg_ptr)->orig_eax+2);
756 struct device *dev = (struct device *)(irq2dev_map[irq]);
757 struct lance_private *lp;
758 int csr0, ioaddr, boguscnt=10;
759
760 if (dev == NULL) {
761 printk ("lance_interrupt(): irq %d for unknown device.\n", irq);
762 return;
763 }
764
765 ioaddr = dev->base_addr;
766 lp = (struct lance_private *)dev->priv;
767 if (dev->interrupt)
768 printk("%s: Re-entering the interrupt handler.\n", dev->name);
769
770 dev->interrupt = 1;
771
772 outw(0x00, dev->base_addr + LANCE_ADDR);
773 while ((csr0 = inw(dev->base_addr + LANCE_DATA)) & 0x8600
774 && --boguscnt >= 0) {
775
776 outw(csr0 & ~0x004f, dev->base_addr + LANCE_DATA);
777
778 if (lance_debug > 5)
779 printk("%s: interrupt csr0=%#2.2x new csr=%#2.2x.\n",
780 dev->name, csr0, inw(dev->base_addr + LANCE_DATA));
781
782 if (csr0 & 0x0400)
783 lance_rx(dev);
784
785 if (csr0 & 0x0200) {
786 int dirty_tx = lp->dirty_tx;
787
788 while (dirty_tx < lp->cur_tx) {
789 int entry = dirty_tx & TX_RING_MOD_MASK;
790 int status = lp->tx_ring[entry].base;
791
792 if (status < 0)
793 break;
794
795 lp->tx_ring[entry].base = 0;
796
797 if (status & 0x40000000) {
798
799 int err_status = lp->tx_ring[entry].misc;
800 lp->stats.tx_errors++;
801 if (err_status & 0x0400) lp->stats.tx_aborted_errors++;
802 if (err_status & 0x0800) lp->stats.tx_carrier_errors++;
803 if (err_status & 0x1000) lp->stats.tx_window_errors++;
804 if (err_status & 0x4000) {
805
806 lp->stats.tx_fifo_errors++;
807
808 printk("%s: Tx FIFO error! Status %4.4x.\n",
809 dev->name, csr0);
810
811 outw(0x0002, dev->base_addr + LANCE_DATA);
812 }
813 } else {
814 if (status & 0x18000000)
815 lp->stats.collisions++;
816 lp->stats.tx_packets++;
817 }
818
819
820
821 if (lp->tx_skbuff[entry]) {
822 dev_kfree_skb(lp->tx_skbuff[entry],FREE_WRITE);
823 lp->tx_skbuff[entry] = 0;
824 }
825 dirty_tx++;
826 }
827
828 #ifndef final_version
829 if (lp->cur_tx - dirty_tx >= TX_RING_SIZE) {
830 printk("out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
831 dirty_tx, lp->cur_tx, lp->tx_full);
832 dirty_tx += TX_RING_SIZE;
833 }
834 #endif
835
836 if (lp->tx_full && dev->tbusy
837 && dirty_tx > lp->cur_tx - TX_RING_SIZE + 2) {
838
839 lp->tx_full = 0;
840 dev->tbusy = 0;
841 mark_bh(NET_BH);
842 }
843
844 lp->dirty_tx = dirty_tx;
845 }
846
847
848 if (csr0 & 0x4000) lp->stats.tx_errors++;
849 if (csr0 & 0x1000) lp->stats.rx_errors++;
850 if (csr0 & 0x0800) {
851 printk("%s: Bus master arbitration failure, status %4.4x.\n",
852 dev->name, csr0);
853
854 outw(0x0002, dev->base_addr + LANCE_DATA);
855 }
856 }
857
858
859 outw(0x0000, dev->base_addr + LANCE_ADDR);
860 outw(0x7940, dev->base_addr + LANCE_DATA);
861
862 if (lance_debug > 4)
863 printk("%s: exiting interrupt, csr%d=%#4.4x.\n",
864 dev->name, inw(ioaddr + LANCE_ADDR),
865 inw(dev->base_addr + LANCE_DATA));
866
867 dev->interrupt = 0;
868 return;
869 }
870
871 static int
872 lance_rx(struct device *dev)
873 {
874 struct lance_private *lp = (struct lance_private *)dev->priv;
875 int entry = lp->cur_rx & RX_RING_MOD_MASK;
876 int i;
877
878
879 while (lp->rx_ring[entry].base >= 0) {
880 int status = lp->rx_ring[entry].base >> 24;
881
882 if (status != 0x03) {
883
884
885
886
887 if (status & 0x01)
888 lp->stats.rx_errors++;
889 if (status & 0x20) lp->stats.rx_frame_errors++;
890 if (status & 0x10) lp->stats.rx_over_errors++;
891 if (status & 0x08) lp->stats.rx_crc_errors++;
892 if (status & 0x04) lp->stats.rx_fifo_errors++;
893 lp->rx_ring[entry].base &= 0x03ffffff;
894 } else {
895
896 short pkt_len = lp->rx_ring[entry].msg_length;
897 struct sk_buff *skb;
898
899 skb = alloc_skb(pkt_len, GFP_ATOMIC);
900 if (skb == NULL) {
901 printk("%s: Memory squeeze, deferring packet.\n", dev->name);
902 for (i=0; i < RX_RING_SIZE; i++)
903 if (lp->rx_ring[(entry+i) & RX_RING_MOD_MASK].base < 0)
904 break;
905
906 if (i > RX_RING_SIZE -2) {
907 lp->stats.rx_dropped++;
908 lp->rx_ring[entry].base |= 0x80000000;
909 lp->cur_rx++;
910 }
911 break;
912 }
913 skb->len = pkt_len;
914 skb->dev = dev;
915 memcpy(skb->data,
916 (unsigned char *)(lp->rx_ring[entry].base & 0x00ffffff),
917 pkt_len);
918 netif_rx(skb);
919 lp->stats.rx_packets++;
920 }
921
922
923
924 lp->rx_ring[entry].buf_length = -PKT_BUF_SZ;
925 lp->rx_ring[entry].base |= 0x80000000;
926 entry = (++lp->cur_rx) & RX_RING_MOD_MASK;
927 }
928
929
930
931
932 return 0;
933 }
934
935 static int
936 lance_close(struct device *dev)
937 {
938 int ioaddr = dev->base_addr;
939 struct lance_private *lp = (struct lance_private *)dev->priv;
940
941 dev->start = 0;
942 dev->tbusy = 1;
943
944 if (lp->chip_version != OLD_LANCE) {
945 outw(112, ioaddr+LANCE_ADDR);
946 lp->stats.rx_missed_errors = inw(ioaddr+LANCE_DATA);
947 }
948 outw(0, ioaddr+LANCE_ADDR);
949
950 if (lance_debug > 1)
951 printk("%s: Shutting down ethercard, status was %2.2x.\n",
952 dev->name, inw(ioaddr+LANCE_DATA));
953
954
955
956 outw(0x0004, ioaddr+LANCE_DATA);
957
958 if (dev->dma != 4)
959 disable_dma(dev->dma);
960
961 free_irq(dev->irq);
962
963 irq2dev_map[dev->irq] = 0;
964
965 return 0;
966 }
967
968 static struct enet_statistics *
969 lance_get_stats(struct device *dev)
970 {
971 struct lance_private *lp = (struct lance_private *)dev->priv;
972 short ioaddr = dev->base_addr;
973 short saved_addr;
974
975 if (lp->chip_version != OLD_LANCE) {
976 cli();
977 saved_addr = inw(ioaddr+LANCE_ADDR);
978 outw(112, ioaddr+LANCE_ADDR);
979 lp->stats.rx_missed_errors = inw(ioaddr+LANCE_DATA);
980 outw(saved_addr, ioaddr+LANCE_ADDR);
981 sti();
982 }
983
984 return &lp->stats;
985 }
986
987
988
989
990
991
992
993 static void
994 set_multicast_list(struct device *dev, int num_addrs, void *addrs)
995 {
996 short ioaddr = dev->base_addr;
997
998
999 outw(0, ioaddr+LANCE_ADDR);
1000 outw(0x0004, ioaddr+LANCE_DATA);
1001
1002 outw(15, ioaddr+LANCE_ADDR);
1003 if (num_addrs >= 0) {
1004 short multicast_table[4];
1005 int i;
1006
1007 memset(multicast_table, (num_addrs == 0) ? 0 : -1, sizeof(multicast_table));
1008 for (i = 0; i < 4; i++) {
1009 outw(8 + i, ioaddr+LANCE_ADDR);
1010 outw(multicast_table[i], ioaddr+LANCE_DATA);
1011 }
1012 outw(0x0000, ioaddr+LANCE_DATA);
1013 } else {
1014
1015 printk("%s: Promiscuous mode enabled.\n", dev->name);
1016 outw(0x8000, ioaddr+LANCE_DATA);
1017 }
1018
1019 outw(0, ioaddr+LANCE_ADDR);
1020 outw(0x0142, ioaddr+LANCE_DATA);
1021 }
1022
1023
1024
1025
1026
1027
1028
1029
1030