This source file includes following definitions.
- lance_init
- lance_probe1
- lance_open
- lance_purge_tx_ring
- lance_init_ring
- lance_restart
- lance_start_xmit
- lance_interrupt
- lance_rx
- lance_close
- lance_get_stats
- set_multicast_list
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35 static const char *version = "lance.c:v1.08.01 Mar 6 1996 saw@shade.msu.ru\n";
36
37 #include <linux/config.h>
38 #include <linux/kernel.h>
39 #include <linux/sched.h>
40 #include <linux/string.h>
41 #include <linux/ptrace.h>
42 #include <linux/errno.h>
43 #include <linux/ioport.h>
44 #include <linux/malloc.h>
45 #include <linux/interrupt.h>
46 #include <linux/pci.h>
47 #include <linux/bios32.h>
48 #include <asm/bitops.h>
49 #include <asm/io.h>
50 #include <asm/dma.h>
51
52 #include <linux/netdevice.h>
53 #include <linux/etherdevice.h>
54 #include <linux/skbuff.h>
55
56 static unsigned int lance_portlist[] = {0x300, 0x320, 0x340, 0x360, 0};
57 void lance_probe1(int ioaddr);
58
59 #ifdef HAVE_DEVLIST
60 struct netdev_entry lance_drv =
61 {"lance", lance_probe1, LANCE_TOTAL_SIZE, lance_portlist};
62 #endif
63
64 #ifdef LANCE_DEBUG
65 int lance_debug = LANCE_DEBUG;
66 #else
67 int lance_debug = 1;
68 #endif
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159 #define LANCE_KMALLOC(x) \
160 ((void *) (((unsigned long)kmalloc((x)+7, GFP_DMA | GFP_KERNEL)+7) & ~7))
161
162
163
164
165 #ifndef LANCE_LOG_TX_BUFFERS
166 #define LANCE_LOG_TX_BUFFERS 4
167 #define LANCE_LOG_RX_BUFFERS 4
168 #endif
169
170 #define TX_RING_SIZE (1 << (LANCE_LOG_TX_BUFFERS))
171 #define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
172 #define TX_RING_LEN_BITS ((LANCE_LOG_TX_BUFFERS) << 29)
173
174 #define RX_RING_SIZE (1 << (LANCE_LOG_RX_BUFFERS))
175 #define RX_RING_MOD_MASK (RX_RING_SIZE - 1)
176 #define RX_RING_LEN_BITS ((LANCE_LOG_RX_BUFFERS) << 29)
177
178 #define PKT_BUF_SZ 1544
179
180
181 #define LANCE_DATA 0x10
182 #define LANCE_ADDR 0x12
183 #define LANCE_RESET 0x14
184 #define LANCE_BUS_IF 0x16
185 #define LANCE_TOTAL_SIZE 0x18
186
187
188 struct lance_rx_head {
189 int base;
190 short buf_length;
191 short msg_length;
192 };
193
194 struct lance_tx_head {
195 int base;
196 short length;
197 short misc;
198 };
199
200
201 struct lance_init_block {
202 unsigned short mode;
203 unsigned char phys_addr[6];
204 unsigned filter[2];
205
206 unsigned rx_ring;
207 unsigned tx_ring;
208 };
209
210 struct lance_private {
211
212 struct lance_rx_head rx_ring[RX_RING_SIZE];
213 struct lance_tx_head tx_ring[TX_RING_SIZE];
214 struct lance_init_block init_block;
215 const char *name;
216
217 struct sk_buff* tx_skbuff[TX_RING_SIZE];
218 unsigned long rx_buffs;
219
220 char (*tx_bounce_buffs)[PKT_BUF_SZ];
221 int cur_rx, cur_tx;
222 int dirty_rx, dirty_tx;
223 int dma;
224 struct enet_statistics stats;
225 unsigned char chip_version;
226 char tx_full;
227 char lock;
228 };
229
230 #define LANCE_MUST_PAD 0x00000001
231 #define LANCE_ENABLE_AUTOSELECT 0x00000002
232 #define LANCE_MUST_REINIT_RING 0x00000004
233 #define LANCE_MUST_UNRESET 0x00000008
234 #define LANCE_HAS_MISSED_FRAME 0x00000010
235
236
237
238
239 static struct lance_chip_type {
240 int id_number;
241 const char *name;
242 int flags;
243 } chip_table[] = {
244 {0x0000, "LANCE 7990",
245 LANCE_MUST_PAD + LANCE_MUST_UNRESET},
246 {0x0003, "PCnet/ISA 79C960",
247 LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
248 LANCE_HAS_MISSED_FRAME},
249 {0x2260, "PCnet/ISA+ 79C961",
250 LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
251 LANCE_HAS_MISSED_FRAME},
252 {0x2420, "PCnet/PCI 79C970",
253 LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
254 LANCE_HAS_MISSED_FRAME},
255
256
257 {0x2430, "PCnet32",
258 LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
259 LANCE_HAS_MISSED_FRAME},
260 {0x0, "PCnet (unknown)",
261 LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
262 LANCE_HAS_MISSED_FRAME},
263 };
264
265 enum {OLD_LANCE = 0, PCNET_ISA=1, PCNET_ISAP=2, PCNET_PCI=3, PCNET_VLB=4, LANCE_UNKNOWN=5};
266
267
268 static unsigned char pci_irq_line = 0;
269
270
271
272 static unsigned char lance_need_isa_bounce_buffers = 1;
273
274 static int lance_open(struct device *dev);
275 static void lance_init_ring(struct device *dev);
276 static int lance_start_xmit(struct sk_buff *skb, struct device *dev);
277 static int lance_rx(struct device *dev);
278 static void lance_interrupt(int irq, void *dev_id, struct pt_regs *regs);
279 static int lance_close(struct device *dev);
280 static struct enet_statistics *lance_get_stats(struct device *dev);
281 static void set_multicast_list(struct device *dev);
282
283
284
285
286
287
288
289
290
291 int lance_init(void)
292 {
293 int *port;
294
295 if (high_memory <= 16*1024*1024)
296 lance_need_isa_bounce_buffers = 0;
297
298 #if defined(CONFIG_PCI)
299 if (pcibios_present()) {
300 int pci_index;
301 printk("lance.c: PCI bios is present, checking for devices...\n");
302 for (pci_index = 0; pci_index < 8; pci_index++) {
303 unsigned char pci_bus, pci_device_fn;
304 unsigned int pci_ioaddr;
305 unsigned short pci_command;
306
307 if (pcibios_find_device (PCI_VENDOR_ID_AMD,
308 PCI_DEVICE_ID_AMD_LANCE, pci_index,
309 &pci_bus, &pci_device_fn) != 0)
310 break;
311 pcibios_read_config_byte(pci_bus, pci_device_fn,
312 PCI_INTERRUPT_LINE, &pci_irq_line);
313 pcibios_read_config_dword(pci_bus, pci_device_fn,
314 PCI_BASE_ADDRESS_0, &pci_ioaddr);
315
316 pci_ioaddr &= ~3;
317
318
319
320
321 pcibios_read_config_word(pci_bus, pci_device_fn,
322 PCI_COMMAND, &pci_command);
323 if ( ! (pci_command & PCI_COMMAND_MASTER)) {
324 printk("PCI Master Bit has not been set. Setting...\n");
325 pci_command |= PCI_COMMAND_MASTER;
326 pcibios_write_config_word(pci_bus, pci_device_fn,
327 PCI_COMMAND, pci_command);
328 }
329 printk("Found PCnet/PCI at %#x, irq %d.\n",
330 pci_ioaddr, pci_irq_line);
331 lance_probe1(pci_ioaddr);
332 pci_irq_line = 0;
333 }
334 }
335 #endif
336
337 for (port = lance_portlist; *port; port++) {
338 int ioaddr = *port;
339
340 if ( check_region(ioaddr, LANCE_TOTAL_SIZE) == 0) {
341
342
343 char offset15, offset14 = inb(ioaddr + 14);
344
345 if ((offset14 == 0x52 || offset14 == 0x57) &&
346 ((offset15 = inb(ioaddr + 15)) == 0x57 || offset15 == 0x44))
347 lance_probe1(ioaddr);
348 }
349 }
350
351 return 0;
352 }
353
354 void lance_probe1(int ioaddr)
355 {
356 struct device *dev;
357 struct lance_private *lp;
358 short dma_channels;
359 int i, reset_val, lance_version;
360 const char *chipname;
361
362 unsigned char hpJ2405A = 0;
363 int hp_builtin = 0;
364 static int did_version = 0;
365
366
367
368
369
370
371 if ( *((unsigned short *) 0x000f0102) == 0x5048) {
372 static const short ioaddr_table[] = { 0x300, 0x320, 0x340, 0x360};
373 int hp_port = ( *((unsigned char *) 0x000f00f1) & 1) ? 0x499 : 0x99;
374
375 if ((inb(hp_port) & 0xc0) == 0x80
376 && ioaddr_table[inb(hp_port) & 3] == ioaddr)
377 hp_builtin = hp_port;
378 }
379
380 hpJ2405A = (inb(ioaddr) == 0x08 && inb(ioaddr+1) == 0x00
381 && inb(ioaddr+2) == 0x09);
382
383
384 reset_val = inw(ioaddr+LANCE_RESET);
385
386
387
388 if (!hpJ2405A)
389 outw(reset_val, ioaddr+LANCE_RESET);
390
391 outw(0x0000, ioaddr+LANCE_ADDR);
392 if (inw(ioaddr+LANCE_DATA) != 0x0004)
393 return;
394
395
396 outw(88, ioaddr+LANCE_ADDR);
397 if (inw(ioaddr+LANCE_ADDR) != 88) {
398 lance_version = 0;
399 } else {
400 int chip_version = inw(ioaddr+LANCE_DATA);
401 outw(89, ioaddr+LANCE_ADDR);
402 chip_version |= inw(ioaddr+LANCE_DATA) << 16;
403 if (lance_debug > 2)
404 printk(" LANCE chip version is %#x.\n", chip_version);
405 if ((chip_version & 0xfff) != 0x003)
406 return;
407 chip_version = (chip_version >> 12) & 0xffff;
408 for (lance_version = 1; chip_table[lance_version].id_number; lance_version++) {
409 if (chip_table[lance_version].id_number == chip_version)
410 break;
411 }
412 }
413
414 dev = init_etherdev(0, 0);
415 chipname = chip_table[lance_version].name;
416 printk("%s: %s at %#3x,", dev->name, chipname, ioaddr);
417
418
419
420 for (i = 0; i < 6; i++)
421 printk(" %2.2x", dev->dev_addr[i] = inb(ioaddr + i));
422
423 dev->base_addr = ioaddr;
424 request_region(ioaddr, LANCE_TOTAL_SIZE, chip_table[lance_version].name);
425
426
427 lp = (struct lance_private *) LANCE_KMALLOC(sizeof(*lp));
428 if (lance_debug > 6) printk(" (#0x%05lx)", (unsigned long)lp);
429 memset(lp, 0, sizeof(*lp));
430 dev->priv = lp;
431 lp->name = chipname;
432
433 lp->rx_buffs = (unsigned long) LANCE_KMALLOC(PKT_BUF_SZ*RX_RING_SIZE);
434 lp->tx_bounce_buffs = NULL;
435 if (lance_need_isa_bounce_buffers)
436 lp->tx_bounce_buffs = LANCE_KMALLOC(PKT_BUF_SZ*TX_RING_SIZE);
437
438 lp->chip_version = lance_version;
439
440 lp->init_block.mode = 0x0003;
441 for (i = 0; i < 6; i++)
442 lp->init_block.phys_addr[i] = dev->dev_addr[i];
443 lp->init_block.filter[0] = 0x00000000;
444 lp->init_block.filter[1] = 0x00000000;
445 lp->init_block.rx_ring = (int)lp->rx_ring | RX_RING_LEN_BITS;
446 lp->init_block.tx_ring = (int)lp->tx_ring | TX_RING_LEN_BITS;
447
448 outw(0x0001, ioaddr+LANCE_ADDR);
449 inw(ioaddr+LANCE_ADDR);
450 outw((short) (int) &lp->init_block, ioaddr+LANCE_DATA);
451 outw(0x0002, ioaddr+LANCE_ADDR);
452 inw(ioaddr+LANCE_ADDR);
453 outw(((int)&lp->init_block) >> 16, ioaddr+LANCE_DATA);
454 outw(0x0000, ioaddr+LANCE_ADDR);
455 inw(ioaddr+LANCE_ADDR);
456
457 if (pci_irq_line) {
458 dev->dma = 4;
459 dev->irq = pci_irq_line;
460 } else if (hp_builtin) {
461 static const char dma_tbl[4] = {3, 5, 6, 0};
462 static const char irq_tbl[4] = {3, 4, 5, 9};
463 unsigned char port_val = inb(hp_builtin);
464 dev->dma = dma_tbl[(port_val >> 4) & 3];
465 dev->irq = irq_tbl[(port_val >> 2) & 3];
466 printk(" HP Vectra IRQ %d DMA %d.\n", dev->irq, dev->dma);
467 } else if (hpJ2405A) {
468 static const char dma_tbl[4] = {3, 5, 6, 7};
469 static const char irq_tbl[8] = {3, 4, 5, 9, 10, 11, 12, 15};
470 short reset_val = inw(ioaddr+LANCE_RESET);
471 dev->dma = dma_tbl[(reset_val >> 2) & 3];
472 dev->irq = irq_tbl[(reset_val >> 4) & 7];
473 printk(" HP J2405A IRQ %d DMA %d.\n", dev->irq, dev->dma);
474 } else if (lance_version == PCNET_ISAP) {
475 short bus_info;
476 outw(8, ioaddr+LANCE_ADDR);
477 bus_info = inw(ioaddr+LANCE_BUS_IF);
478 dev->dma = bus_info & 0x07;
479 dev->irq = (bus_info >> 4) & 0x0F;
480 } else {
481
482 if (dev->mem_start & 0x07)
483 dev->dma = dev->mem_start & 0x07;
484 }
485
486 if (dev->dma == 0) {
487
488
489 dma_channels = ((inb(DMA1_STAT_REG) >> 4) & 0x0f) |
490 (inb(DMA2_STAT_REG) & 0xf0);
491 }
492 if (dev->irq >= 2)
493 printk(" assigned IRQ %d", dev->irq);
494 else {
495
496
497
498 autoirq_setup(0);
499
500
501 outw(0x0041, ioaddr+LANCE_DATA);
502
503 dev->irq = autoirq_report(1);
504 if (dev->irq)
505 printk(", probed IRQ %d", dev->irq);
506 else {
507 printk(", failed to detect IRQ line.\n");
508 return;
509 }
510
511
512
513 if (inw(ioaddr+LANCE_DATA) & 0x0100)
514 dev->dma = 4;
515 }
516
517 if (dev->dma == 4) {
518 printk(", no DMA needed.\n");
519 } else if (dev->dma) {
520 if (request_dma(dev->dma, chipname)) {
521 printk("DMA %d allocation failed.\n", dev->dma);
522 return;
523 } else
524 printk(", assigned DMA %d.\n", dev->dma);
525 } else {
526 for (i = 0; i < 4; i++) {
527 static const char dmas[] = { 5, 6, 7, 3 };
528 int dma = dmas[i];
529 int boguscnt;
530
531
532
533 if (test_bit(dma, &dma_channels))
534 continue;
535 outw(0x7f04, ioaddr+LANCE_DATA);
536 if (request_dma(dma, chipname))
537 continue;
538 set_dma_mode(dma, DMA_MODE_CASCADE);
539 enable_dma(dma);
540
541
542 outw(0x0001, ioaddr+LANCE_DATA);
543 for (boguscnt = 100; boguscnt > 0; --boguscnt)
544 if (inw(ioaddr+LANCE_DATA) & 0x0900)
545 break;
546 if (inw(ioaddr+LANCE_DATA) & 0x0100) {
547 dev->dma = dma;
548 printk(", DMA %d.\n", dev->dma);
549 break;
550 } else {
551 disable_dma(dma);
552 free_dma(dma);
553 }
554 }
555 if (i == 4) {
556 printk("DMA detection failed.\n");
557 return;
558 }
559 }
560
561 if (chip_table[lp->chip_version].flags & LANCE_ENABLE_AUTOSELECT) {
562
563
564 outw(0x0002, ioaddr+LANCE_ADDR);
565 outw(0x0002, ioaddr+LANCE_BUS_IF);
566 }
567
568 if (lance_debug > 0 && did_version++ == 0)
569 printk(version);
570
571
572 dev->open = &lance_open;
573 dev->hard_start_xmit = &lance_start_xmit;
574 dev->stop = &lance_close;
575 dev->get_stats = &lance_get_stats;
576 dev->set_multicast_list = &set_multicast_list;
577
578 return;
579 }
580
581
582 static int
583 lance_open(struct device *dev)
584 {
585 struct lance_private *lp = (struct lance_private *)dev->priv;
586 int ioaddr = dev->base_addr;
587 int i;
588
589 if (dev->irq == 0 ||
590 request_irq(dev->irq, &lance_interrupt, 0, lp->name, NULL)) {
591 return -EAGAIN;
592 }
593
594
595
596
597 irq2dev_map[dev->irq] = dev;
598
599
600 inw(ioaddr+LANCE_RESET);
601
602
603 if (dev->dma != 4) {
604 enable_dma(dev->dma);
605 set_dma_mode(dev->dma, DMA_MODE_CASCADE);
606 }
607
608
609 if (chip_table[lp->chip_version].flags & LANCE_MUST_UNRESET)
610 outw(0, ioaddr+LANCE_RESET);
611
612 if (chip_table[lp->chip_version].flags & LANCE_ENABLE_AUTOSELECT) {
613
614 outw(0x0002, ioaddr+LANCE_ADDR);
615 outw(0x0002, ioaddr+LANCE_BUS_IF);
616 }
617
618 if (lance_debug > 1)
619 printk("%s: lance_open() irq %d dma %d tx/rx rings %#x/%#x init %#x.\n",
620 dev->name, dev->irq, dev->dma, (int) lp->tx_ring, (int) lp->rx_ring,
621 (int) &lp->init_block);
622
623 lance_init_ring(dev);
624
625 outw(0x0001, ioaddr+LANCE_ADDR);
626 outw((short) (int) &lp->init_block, ioaddr+LANCE_DATA);
627 outw(0x0002, ioaddr+LANCE_ADDR);
628 outw(((int)&lp->init_block) >> 16, ioaddr+LANCE_DATA);
629
630 outw(0x0004, ioaddr+LANCE_ADDR);
631 outw(0x0915, ioaddr+LANCE_DATA);
632
633 outw(0x0000, ioaddr+LANCE_ADDR);
634 outw(0x0001, ioaddr+LANCE_DATA);
635
636 dev->tbusy = 0;
637 dev->interrupt = 0;
638 dev->start = 1;
639 i = 0;
640 while (i++ < 100)
641 if (inw(ioaddr+LANCE_DATA) & 0x0100)
642 break;
643
644
645
646
647 outw(0x0042, ioaddr+LANCE_DATA);
648
649 if (lance_debug > 2)
650 printk("%s: LANCE open after %d ticks, init block %#x csr0 %4.4x.\n",
651 dev->name, i, (int) &lp->init_block, inw(ioaddr+LANCE_DATA));
652
653 return 0;
654 }
655
656
657
658
659
660
661
662
663
664
665
666
667
668 static void
669 lance_purge_tx_ring(struct device *dev)
670 {
671 struct lance_private *lp = (struct lance_private *)dev->priv;
672 int i;
673
674 for (i = 0; i < TX_RING_SIZE; i++) {
675 if (lp->tx_skbuff[i]) {
676 dev_kfree_skb(lp->tx_skbuff[i],FREE_WRITE);
677 lp->tx_skbuff[i] = NULL;
678 }
679 }
680 }
681
682
683
684 static void
685 lance_init_ring(struct device *dev)
686 {
687 struct lance_private *lp = (struct lance_private *)dev->priv;
688 int i;
689
690 lp->lock = 0, lp->tx_full = 0;
691 lp->cur_rx = lp->cur_tx = 0;
692 lp->dirty_rx = lp->dirty_tx = 0;
693
694 for (i = 0; i < RX_RING_SIZE; i++) {
695 lp->rx_ring[i].base = (lp->rx_buffs + i*PKT_BUF_SZ) | 0x80000000;
696 lp->rx_ring[i].buf_length = -PKT_BUF_SZ;
697 }
698
699
700 for (i = 0; i < TX_RING_SIZE; i++) {
701 lp->tx_ring[i].base = 0;
702 }
703
704 lp->init_block.mode = 0x0000;
705 for (i = 0; i < 6; i++)
706 lp->init_block.phys_addr[i] = dev->dev_addr[i];
707 lp->init_block.filter[0] = 0x00000000;
708 lp->init_block.filter[1] = 0x00000000;
709 lp->init_block.rx_ring = (int)lp->rx_ring | RX_RING_LEN_BITS;
710 lp->init_block.tx_ring = (int)lp->tx_ring | TX_RING_LEN_BITS;
711 }
712
713 static void
714 lance_restart(struct device *dev, unsigned int csr0_bits, int must_reinit)
715 {
716 struct lance_private *lp = (struct lance_private *)dev->priv;
717
718 if (must_reinit ||
719 (chip_table[lp->chip_version].flags & LANCE_MUST_REINIT_RING)) {
720 lance_purge_tx_ring(dev);
721 lance_init_ring(dev);
722 }
723 outw(0x0000, dev->base_addr + LANCE_ADDR);
724 outw(csr0_bits, dev->base_addr + LANCE_DATA);
725 }
726
727 static int
728 lance_start_xmit(struct sk_buff *skb, struct device *dev)
729 {
730 struct lance_private *lp = (struct lance_private *)dev->priv;
731 int ioaddr = dev->base_addr;
732 int entry;
733 unsigned long flags;
734
735
736 if (dev->tbusy) {
737 int tickssofar = jiffies - dev->trans_start;
738 if (tickssofar < 20)
739 return 1;
740 outw(0, ioaddr+LANCE_ADDR);
741 printk("%s: transmit timed out, status %4.4x, resetting.\n",
742 dev->name, inw(ioaddr+LANCE_DATA));
743 outw(0x0004, ioaddr+LANCE_DATA);
744 lp->stats.tx_errors++;
745 #ifndef final_version
746 {
747 int i;
748 printk(" Ring data dump: dirty_tx %d cur_tx %d%s cur_rx %d.",
749 lp->dirty_tx, lp->cur_tx, lp->tx_full ? " (full)" : "",
750 lp->cur_rx);
751 for (i = 0 ; i < RX_RING_SIZE; i++)
752 printk("%s %08x %04x %04x", i & 0x3 ? "" : "\n ",
753 lp->rx_ring[i].base, -lp->rx_ring[i].buf_length,
754 lp->rx_ring[i].msg_length);
755 for (i = 0 ; i < TX_RING_SIZE; i++)
756 printk("%s %08x %04x %04x", i & 0x3 ? "" : "\n ",
757 lp->tx_ring[i].base, -lp->tx_ring[i].length,
758 lp->tx_ring[i].misc);
759 printk("\n");
760 }
761 #endif
762 lance_restart(dev, 0x0043, 1);
763
764 dev->tbusy=0;
765 dev->trans_start = jiffies;
766
767 return 0;
768 }
769
770 if (skb == NULL) {
771 dev_tint(dev);
772 return 0;
773 }
774
775 if (skb->len <= 0)
776 return 0;
777
778 if (lance_debug > 3) {
779 outw(0x0000, ioaddr+LANCE_ADDR);
780 printk("%s: lance_start_xmit() called, csr0 %4.4x.\n", dev->name,
781 inw(ioaddr+LANCE_DATA));
782 outw(0x0000, ioaddr+LANCE_DATA);
783 }
784
785
786
787 if (set_bit(0, (void*)&dev->tbusy) != 0) {
788 printk("%s: Transmitter access conflict.\n", dev->name);
789 return 1;
790 }
791
792 if (set_bit(0, (void*)&lp->lock) != 0) {
793 if (lance_debug > 0)
794 printk("%s: tx queue lock!.\n", dev->name);
795
796 return 1;
797 }
798
799
800
801
802 entry = lp->cur_tx & TX_RING_MOD_MASK;
803
804
805
806
807
808 if (chip_table[lp->chip_version].flags & LANCE_MUST_PAD) {
809 lp->tx_ring[entry].length =
810 -(ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN);
811 } else
812 lp->tx_ring[entry].length = -skb->len;
813
814 lp->tx_ring[entry].misc = 0x0000;
815
816
817
818 if ((int)(skb->data) + skb->len > 0x01000000) {
819 if (lance_debug > 5)
820 printk("%s: bouncing a high-memory packet (%#x).\n",
821 dev->name, (int)(skb->data));
822 memcpy(&lp->tx_bounce_buffs[entry], skb->data, skb->len);
823 lp->tx_ring[entry].base =
824 (int)(lp->tx_bounce_buffs + entry) | 0x83000000;
825 dev_kfree_skb (skb, FREE_WRITE);
826 } else {
827 lp->tx_skbuff[entry] = skb;
828 lp->tx_ring[entry].base = (int)(skb->data) | 0x83000000;
829 }
830 lp->cur_tx++;
831
832
833 outw(0x0000, ioaddr+LANCE_ADDR);
834 outw(0x0048, ioaddr+LANCE_DATA);
835
836 dev->trans_start = jiffies;
837
838 save_flags(flags);
839 cli();
840 lp->lock = 0;
841 if (lp->tx_ring[(entry+1) & TX_RING_MOD_MASK].base == 0)
842 dev->tbusy=0;
843 else
844 lp->tx_full = 1;
845 restore_flags(flags);
846
847 return 0;
848 }
849
850
851 static void
852 lance_interrupt(int irq, void *dev_id, struct pt_regs * regs)
853 {
854 struct device *dev = (struct device *)(irq2dev_map[irq]);
855 struct lance_private *lp;
856 int csr0, ioaddr, boguscnt=10;
857 int must_restart;
858
859 if (dev == NULL) {
860 printk ("lance_interrupt(): irq %d for unknown device.\n", irq);
861 return;
862 }
863
864 ioaddr = dev->base_addr;
865 lp = (struct lance_private *)dev->priv;
866 if (dev->interrupt)
867 printk("%s: Re-entering the interrupt handler.\n", dev->name);
868
869 dev->interrupt = 1;
870
871 outw(0x00, dev->base_addr + LANCE_ADDR);
872 while ((csr0 = inw(dev->base_addr + LANCE_DATA)) & 0x8600
873 && --boguscnt >= 0) {
874
875 outw(csr0 & ~0x004f, dev->base_addr + LANCE_DATA);
876
877 must_restart = 0;
878
879 if (lance_debug > 5)
880 printk("%s: interrupt csr0=%#2.2x new csr=%#2.2x.\n",
881 dev->name, csr0, inw(dev->base_addr + LANCE_DATA));
882
883 if (csr0 & 0x0400)
884 lance_rx(dev);
885
886 if (csr0 & 0x0200) {
887 int dirty_tx = lp->dirty_tx;
888
889 while (dirty_tx < lp->cur_tx) {
890 int entry = dirty_tx & TX_RING_MOD_MASK;
891 int status = lp->tx_ring[entry].base;
892
893 if (status < 0)
894 break;
895
896 lp->tx_ring[entry].base = 0;
897
898 if (status & 0x40000000) {
899
900 int err_status = lp->tx_ring[entry].misc;
901 lp->stats.tx_errors++;
902 if (err_status & 0x0400) lp->stats.tx_aborted_errors++;
903 if (err_status & 0x0800) lp->stats.tx_carrier_errors++;
904 if (err_status & 0x1000) lp->stats.tx_window_errors++;
905 if (err_status & 0x4000) {
906
907 lp->stats.tx_fifo_errors++;
908
909 printk("%s: Tx FIFO error! Status %4.4x.\n",
910 dev->name, csr0);
911
912 must_restart = 1;
913 }
914 } else {
915 if (status & 0x18000000)
916 lp->stats.collisions++;
917 lp->stats.tx_packets++;
918 }
919
920
921
922 if (lp->tx_skbuff[entry]) {
923 dev_kfree_skb(lp->tx_skbuff[entry],FREE_WRITE);
924 lp->tx_skbuff[entry] = 0;
925 }
926 dirty_tx++;
927 }
928
929 #ifndef final_version
930 if (lp->cur_tx - dirty_tx >= TX_RING_SIZE) {
931 printk("out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
932 dirty_tx, lp->cur_tx, lp->tx_full);
933 dirty_tx += TX_RING_SIZE;
934 }
935 #endif
936
937 if (lp->tx_full && dev->tbusy
938 && dirty_tx > lp->cur_tx - TX_RING_SIZE + 2) {
939
940 lp->tx_full = 0;
941 dev->tbusy = 0;
942 mark_bh(NET_BH);
943 }
944
945 lp->dirty_tx = dirty_tx;
946 }
947
948
949 if (csr0 & 0x4000) lp->stats.tx_errors++;
950 if (csr0 & 0x1000) lp->stats.rx_errors++;
951 if (csr0 & 0x0800) {
952 printk("%s: Bus master arbitration failure, status %4.4x.\n",
953 dev->name, csr0);
954
955 must_restart = 1;
956 }
957
958 if (must_restart) {
959
960 outw(0x0000, dev->base_addr + LANCE_ADDR);
961 outw(0x0004, dev->base_addr + LANCE_DATA);
962 lance_restart(dev, 0x0002, 0);
963 }
964 }
965
966
967 outw(0x0000, dev->base_addr + LANCE_ADDR);
968 outw(0x7940, dev->base_addr + LANCE_DATA);
969
970 if (lance_debug > 4)
971 printk("%s: exiting interrupt, csr%d=%#4.4x.\n",
972 dev->name, inw(ioaddr + LANCE_ADDR),
973 inw(dev->base_addr + LANCE_DATA));
974
975 dev->interrupt = 0;
976 return;
977 }
978
979 static int
980 lance_rx(struct device *dev)
981 {
982 struct lance_private *lp = (struct lance_private *)dev->priv;
983 int entry = lp->cur_rx & RX_RING_MOD_MASK;
984 int i;
985
986
987 while (lp->rx_ring[entry].base >= 0) {
988 int status = lp->rx_ring[entry].base >> 24;
989
990 if (status != 0x03) {
991
992
993
994
995 if (status & 0x01)
996 lp->stats.rx_errors++;
997 if (status & 0x20) lp->stats.rx_frame_errors++;
998 if (status & 0x10) lp->stats.rx_over_errors++;
999 if (status & 0x08) lp->stats.rx_crc_errors++;
1000 if (status & 0x04) lp->stats.rx_fifo_errors++;
1001 lp->rx_ring[entry].base &= 0x03ffffff;
1002 }
1003 else
1004 {
1005
1006 short pkt_len = (lp->rx_ring[entry].msg_length & 0xfff)-4;
1007 struct sk_buff *skb;
1008
1009 if(pkt_len<60)
1010 {
1011 printk("%s: Runt packet!\n",dev->name);
1012 lp->stats.rx_errors++;
1013 }
1014 else
1015 {
1016 skb = dev_alloc_skb(pkt_len+2);
1017 if (skb == NULL)
1018 {
1019 printk("%s: Memory squeeze, deferring packet.\n", dev->name);
1020 for (i=0; i < RX_RING_SIZE; i++)
1021 if (lp->rx_ring[(entry+i) & RX_RING_MOD_MASK].base < 0)
1022 break;
1023
1024 if (i > RX_RING_SIZE -2)
1025 {
1026 lp->stats.rx_dropped++;
1027 lp->rx_ring[entry].base |= 0x80000000;
1028 lp->cur_rx++;
1029 }
1030 break;
1031 }
1032 skb->dev = dev;
1033 skb_reserve(skb,2);
1034 skb_put(skb,pkt_len);
1035 eth_copy_and_sum(skb,
1036 (unsigned char *)(lp->rx_ring[entry].base & 0x00ffffff),
1037 pkt_len,0);
1038 skb->protocol=eth_type_trans(skb,dev);
1039 netif_rx(skb);
1040 lp->stats.rx_packets++;
1041 }
1042 }
1043
1044
1045 lp->rx_ring[entry].buf_length = -PKT_BUF_SZ;
1046 lp->rx_ring[entry].base |= 0x80000000;
1047 entry = (++lp->cur_rx) & RX_RING_MOD_MASK;
1048 }
1049
1050
1051
1052
1053 return 0;
1054 }
1055
1056 static int
1057 lance_close(struct device *dev)
1058 {
1059 int ioaddr = dev->base_addr;
1060 struct lance_private *lp = (struct lance_private *)dev->priv;
1061
1062 dev->start = 0;
1063 dev->tbusy = 1;
1064
1065 if (chip_table[lp->chip_version].flags & LANCE_HAS_MISSED_FRAME) {
1066 outw(112, ioaddr+LANCE_ADDR);
1067 lp->stats.rx_missed_errors = inw(ioaddr+LANCE_DATA);
1068 }
1069 outw(0, ioaddr+LANCE_ADDR);
1070
1071 if (lance_debug > 1)
1072 printk("%s: Shutting down ethercard, status was %2.2x.\n",
1073 dev->name, inw(ioaddr+LANCE_DATA));
1074
1075
1076
1077 outw(0x0004, ioaddr+LANCE_DATA);
1078
1079 if (dev->dma != 4)
1080 disable_dma(dev->dma);
1081
1082 free_irq(dev->irq, NULL);
1083
1084 irq2dev_map[dev->irq] = 0;
1085
1086 return 0;
1087 }
1088
1089 static struct enet_statistics *
1090 lance_get_stats(struct device *dev)
1091 {
1092 struct lance_private *lp = (struct lance_private *)dev->priv;
1093 short ioaddr = dev->base_addr;
1094 short saved_addr;
1095 unsigned long flags;
1096
1097 if (chip_table[lp->chip_version].flags & LANCE_HAS_MISSED_FRAME) {
1098 save_flags(flags);
1099 cli();
1100 saved_addr = inw(ioaddr+LANCE_ADDR);
1101 outw(112, ioaddr+LANCE_ADDR);
1102 lp->stats.rx_missed_errors = inw(ioaddr+LANCE_DATA);
1103 outw(saved_addr, ioaddr+LANCE_ADDR);
1104 restore_flags(flags);
1105 }
1106
1107 return &lp->stats;
1108 }
1109
1110
1111
1112
1113 static void set_multicast_list(struct device *dev)
1114 {
1115 short ioaddr = dev->base_addr;
1116
1117 outw(0, ioaddr+LANCE_ADDR);
1118 outw(0x0004, ioaddr+LANCE_DATA);
1119
1120 if (dev->flags&IFF_PROMISC) {
1121
1122 printk("%s: Promiscuous mode enabled.\n", dev->name);
1123 outw(15, ioaddr+LANCE_ADDR);
1124 outw(0x8000, ioaddr+LANCE_DATA);
1125 } else {
1126 short multicast_table[4];
1127 int i;
1128 int num_addrs=dev->mc_count;
1129 if(dev->flags&IFF_ALLMULTI)
1130 num_addrs=1;
1131
1132 memset(multicast_table, (num_addrs == 0) ? 0 : -1, sizeof(multicast_table));
1133 for (i = 0; i < 4; i++) {
1134 outw(8 + i, ioaddr+LANCE_ADDR);
1135 outw(multicast_table[i], ioaddr+LANCE_DATA);
1136 }
1137 outw(15, ioaddr+LANCE_ADDR);
1138 outw(0x0000, ioaddr+LANCE_DATA);
1139 }
1140
1141 lance_restart(dev, 0x0142, 0);
1142
1143 }
1144
1145
1146
1147
1148
1149
1150
1151
1152