1 /* lance.c: An AMD LANCE ethernet driver for linux. */
2 /*
3 Written 1993,1994,1995 by Donald Becker.
4
5 Copyright 1993 United States Government as represented by the
6 Director, National Security Agency.
7 This software may be used and distributed according to the terms
8 of the GNU Public License, incorporated herein by reference.
9
10 This driver is for the Allied Telesis AT1500 and HP J2405A, and should work
11 with most other LANCE-based bus-master (NE2100 clone) ethercards.
12
13 The author may be reached as becker@CESDIS.gsfc.nasa.gov, or C/O
14 Center of Excellence in Space Data and Information Sciences
15 Code 930.5, Goddard Space Flight Center, Greenbelt MD 20771
16
17
18 Fixing alignment problem with 1.3.* kernel and some minor changes
19 by Andrey V. Savochkin, 1996.
20
21 Problems or questions may be send to Donald Becker (see above) or to
22 Andrey Savochkin -- saw@shade.msu.ru or
23 Laboratory of Computation Methods,
24 Department of Mathematics and Mechanics,
25 Moscow State University,
26 Leninskye Gory, Moscow 119899
27
28 But I should to inform you that I'm not an expert in the LANCE card
29 and it may occurs that you will receive no answer on your mail
30 to Donald Becker. I didn't receive any answer on all my letters
31 to him. Who knows why... But may be you are more lucky? ;-)
32 SAW
33 */
34
35 static const char *version = "lance.c:v1.08.01 Mar 6 1996 saw@shade.msu.ru\n";
36
37 #include <linux/config.h>
38 #include <linux/kernel.h>
39 #include <linux/sched.h>
40 #include <linux/string.h>
41 #include <linux/ptrace.h>
42 #include <linux/errno.h>
43 #include <linux/ioport.h>
44 #include <linux/malloc.h>
45 #include <linux/interrupt.h>
46 #include <linux/pci.h>
47 #include <linux/bios32.h>
48 #include <asm/bitops.h>
49 #include <asm/io.h>
50 #include <asm/dma.h>
51
52 #include <linux/netdevice.h>
53 #include <linux/etherdevice.h>
54 #include <linux/skbuff.h>
55
56 static unsigned int lance_portlist[] = {0x300, 0x320, 0x340, 0x360, 0};
57 void lance_probe1(int ioaddr);
58
59 #ifdef HAVE_DEVLIST
60 struct netdev_entry lance_drv =
61 {"lance", lance_probe1, LANCE_TOTAL_SIZE, lance_portlist};
62 #endif
63
64 #ifdef LANCE_DEBUG
65 int lance_debug = LANCE_DEBUG;
66 #else
67 int lance_debug = 1;
68 #endif
69
70 /*
71 Theory of Operation
72
73 I. Board Compatibility
74
75 This device driver is designed for the AMD 79C960, the "PCnet-ISA
76 single-chip ethernet controller for ISA". This chip is used in a wide
77 variety of boards from vendors such as Allied Telesis, HP, Kingston,
78 and Boca. This driver is also intended to work with older AMD 7990
79 designs, such as the NE1500 and NE2100, and newer 79C961. For convenience,
80 I use the name LANCE to refer to all of the AMD chips, even though it properly
81 refers only to the original 7990.
82
83 II. Board-specific settings
84
85 The driver is designed to work the boards that use the faster
86 bus-master mode, rather than in shared memory mode. (Only older designs
87 have on-board buffer memory needed to support the slower shared memory mode.)
88
89 Most ISA boards have jumpered settings for the I/O base, IRQ line, and DMA
90 channel. This driver probes the likely base addresses:
91 {0x300, 0x320, 0x340, 0x360}.
92 After the board is found it generates a DMA-timeout interrupt and uses
93 autoIRQ to find the IRQ line. The DMA channel can be set with the low bits
94 of the otherwise-unused dev->mem_start value (aka PARAM1). If unset it is
95 probed for by enabling each free DMA channel in turn and checking if
96 initialization succeeds.
97
98 The HP-J2405A board is an exception: with this board it's easy to read the
99 EEPROM-set values for the base, IRQ, and DMA. (Of course you must already
100 _know_ the base address -- that field is for writing the EEPROM.)
101
102 III. Driver operation
103
104 IIIa. Ring buffers
105 The LANCE uses ring buffers of Tx and Rx descriptors. Each entry describes
106 the base and length of the data buffer, along with status bits. The length
107 of these buffers is set by LANCE_LOG_{RX,TX}_BUFFERS, which is log_2() of
108 the buffer length (rather than being directly the buffer length) for
109 implementation ease. The current values are 2 (Tx) and 4 (Rx), which leads to
110 ring sizes of 4 (Tx) and 16 (Rx). Increasing the number of ring entries
111 needlessly uses extra space and reduces the chance that an upper layer will
112 be able to reorder queued Tx packets based on priority. Decreasing the number
113 of entries makes it more difficult to achieve back-to-back packet transmission
114 and increases the chance that Rx ring will overflow. (Consider the worst case
115 of receiving back-to-back minimum-sized packets.)
116
117 The LANCE has the capability to "chain" both Rx and Tx buffers, but this driver
118 statically allocates full-sized (slightly oversized -- PKT_BUF_SZ) buffers to
119 avoid the administrative overhead. For the Rx side this avoids dynamically
120 allocating full-sized buffers "just in case", at the expense of a
121 memory-to-memory data copy for each packet received. For most systems this
122 is a good tradeoff: the Rx buffer will always be in low memory, the copy
123 is inexpensive, and it primes the cache for later packet processing. For Tx
124 the buffers are only used when needed as low-memory bounce buffers.
125
126 IIIB. 16M memory limitations.
127 For the ISA bus master mode all structures used directly by the LANCE,
128 the initialization block, Rx and Tx rings, and data buffers, must be
129 accessible from the ISA bus, i.e. in the lower 16M of real memory.
130 This is a problem for current Linux kernels on >16M machines. The network
131 devices are initialized after memory initialization, and the kernel doles out
132 memory from the top of memory downward. The current solution is to have a
133 special network initialization routine that's called before memory
134 initialization; this will eventually be generalized for all network devices.
135 As mentioned before, low-memory "bounce-buffers" are used when needed.
136
137 IIIC. Synchronization
138 The driver runs as two independent, single-threaded flows of control. One
139 is the send-packet routine, which enforces single-threaded use by the
140 dev->tbusy flag. The other thread is the interrupt handler, which is single
141 threaded by the hardware and other software.
142
143 The send packet thread has partial control over the Tx ring and 'dev->tbusy'
144 flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
145 queue slot is empty, it clears the tbusy flag when finished otherwise it sets
146 the 'lp->tx_full' flag.
147
148 The interrupt handler has exclusive control over the Rx ring and records stats
149 from the Tx ring. (The Tx-done interrupt can't be selectively turned off, so
150 we can't avoid the interrupt overhead by having the Tx routine reap the Tx
151 stats.) After reaping the stats, it marks the queue entry as empty by setting
152 the 'base' to zero. Iff the 'lp->tx_full' flag is set, it clears both the
153 tx_full and tbusy flags.
154
155 */
156
157 /* Memory accessed from LANCE card must be aligned on 8-byte boundaries.
158 But we can't believe that kmalloc()'ed memory satisfyes it. -- SAW */
159 #define LANCE_KMALLOC(x) \
160 ((void *) (((unsigned long)kmalloc((x)+7, GFP_DMA | GFP_KERNEL)+7) & ~7))
161
162 /* Set the number of Tx and Rx buffers, using Log_2(# buffers).
163 Reasonable default values are 16 Tx buffers, and 16 Rx buffers.
164 That translates to 4 and 4 (16 == 2^^4). */
165 #ifndef LANCE_LOG_TX_BUFFERS
166 #define LANCE_LOG_TX_BUFFERS 4
167 #define LANCE_LOG_RX_BUFFERS 4
168 #endif
169
170 #define TX_RING_SIZE (1 << (LANCE_LOG_TX_BUFFERS))
171 #define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
172 #define TX_RING_LEN_BITS ((LANCE_LOG_TX_BUFFERS) << 29)
173
174 #define RX_RING_SIZE (1 << (LANCE_LOG_RX_BUFFERS))
175 #define RX_RING_MOD_MASK (RX_RING_SIZE - 1)
176 #define RX_RING_LEN_BITS ((LANCE_LOG_RX_BUFFERS) << 29)
177
178 #define PKT_BUF_SZ 1544
179
180 /* Offsets from base I/O address. */
181 #define LANCE_DATA 0x10
182 #define LANCE_ADDR 0x12
183 #define LANCE_RESET 0x14
184 #define LANCE_BUS_IF 0x16
185 #define LANCE_TOTAL_SIZE 0x18
186
187 /* The LANCE Rx and Tx ring descriptors. */
188 struct lance_rx_head {
189 int base;
190 short buf_length; /* This length is 2s complement (negative)! */
191 short msg_length; /* This length is "normal". */
192 };
193
194 struct lance_tx_head {
195 int base;
196 short length; /* Length is 2s complement (negative)! */
197 short misc;
198 };
199
200 /* The LANCE initialization block, described in databook. */
201 struct lance_init_block {
202 unsigned short mode; /* Pre-set mode (reg. 15) */
203 unsigned char phys_addr[6]; /* Physical ethernet address */
204 unsigned filter[2]; /* Multicast filter (unused). */
205 /* Receive and transmit ring base, along with extra bits. */
206 unsigned rx_ring; /* Tx and Rx ring base pointers */
207 unsigned tx_ring;
208 };
209
210 struct lance_private {
211 /* The Tx and Rx ring entries must be aligned on 8-byte boundaries. */
212 struct lance_rx_head rx_ring[RX_RING_SIZE];
213 struct lance_tx_head tx_ring[TX_RING_SIZE];
214 struct lance_init_block init_block;
215 const char *name;
216 /* The saved address of a sent-in-place packet/buffer, for skfree(). */
217 struct sk_buff* tx_skbuff[TX_RING_SIZE];
218 unsigned long rx_buffs; /* Address of Rx and Tx buffers. */
219 /* Tx low-memory "bounce buffer" address. */
220 char (*tx_bounce_buffs)[PKT_BUF_SZ];
221 int cur_rx, cur_tx; /* The next free ring entry */
222 int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */
223 int dma;
224 struct enet_statistics stats;
225 unsigned char chip_version; /* See lance_chip_type. */
226 char tx_full;
227 char lock;
228 };
229
230 #define LANCE_MUST_PAD 0x00000001
231 #define LANCE_ENABLE_AUTOSELECT 0x00000002
232 #define LANCE_MUST_REINIT_RING 0x00000004
233 #define LANCE_MUST_UNRESET 0x00000008
234 #define LANCE_HAS_MISSED_FRAME 0x00000010
235
236 /* A mapping from the chip ID number to the part number and features.
237 These are from the datasheets -- in real life the '970 version
238 reportedly has the same ID as the '965. */
239 static struct lance_chip_type {
240 int id_number;
241 const char *name;
242 int flags;
243 } chip_table[] = {
244 {0x0000, "LANCE 7990", /* Ancient lance chip. */
245 LANCE_MUST_PAD + LANCE_MUST_UNRESET},
246 {0x0003, "PCnet/ISA 79C960", /* 79C960 PCnet/ISA. */
247 LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
248 LANCE_HAS_MISSED_FRAME},
249 {0x2260, "PCnet/ISA+ 79C961", /* 79C961 PCnet/ISA+, Plug-n-Play. */
250 LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
251 LANCE_HAS_MISSED_FRAME},
252 {0x2420, "PCnet/PCI 79C970", /* 79C970 or 79C974 PCnet-SCSI, PCI. */
253 LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
254 LANCE_HAS_MISSED_FRAME},
255 /* Bug: the PCnet/PCI actually uses the PCnet/VLB ID number, so just call
256 it the PCnet32. */
257 {0x2430, "PCnet32", /* 79C965 PCnet for VL bus. */
258 LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
259 LANCE_HAS_MISSED_FRAME},
260 {0x0, "PCnet (unknown)",
261 LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
262 LANCE_HAS_MISSED_FRAME},
263 };
264
265 enum {OLD_LANCE = 0, PCNET_ISA=1, PCNET_ISAP=2, PCNET_PCI=3, PCNET_VLB=4, LANCE_UNKNOWN=5};
266
267 /* Non-zero only if the current card is a PCI with BIOS-set IRQ. */
268 static unsigned char pci_irq_line = 0;
269
270 /* Non-zero if lance_probe1() needs to allocate low-memory bounce buffers.
271 Assume yes until we know the memory size. */
272 static unsigned char lance_need_isa_bounce_buffers = 1;
273
274 static int lance_open(struct device *dev);
275 static void lance_init_ring(struct device *dev);
276 static int lance_start_xmit(struct sk_buff *skb, struct device *dev);
277 static int lance_rx(struct device *dev);
278 static void lance_interrupt(int irq, void *dev_id, struct pt_regs *regs);
279 static int lance_close(struct device *dev);
280 static struct enet_statistics *lance_get_stats(struct device *dev);
281 static void set_multicast_list(struct device *dev);
282
283
284
285 /* This lance probe is unlike the other board probes in 1.0.*. The LANCE may
286 have to allocate a contiguous low-memory region for bounce buffers.
287 This requirement is satisfied by having the lance initialization occur
288 before the memory management system is started, and thus well before the
289 other probes. */
290
291 int lance_init(void)
/* ![[previous]](../icons/n_left.png)
![[next]](../icons/right.png)
![[first]](../icons/n_first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
292 {
293 int *port;
294
295 if (high_memory <= 16*1024*1024)
296 lance_need_isa_bounce_buffers = 0;
297
298 #if defined(CONFIG_PCI)
299 if (pcibios_present()) {
300 int pci_index;
301 printk("lance.c: PCI bios is present, checking for devices...\n");
302 for (pci_index = 0; pci_index < 8; pci_index++) {
303 unsigned char pci_bus, pci_device_fn;
304 unsigned int pci_ioaddr;
305 unsigned short pci_command;
306
307 if (pcibios_find_device (PCI_VENDOR_ID_AMD,
308 PCI_DEVICE_ID_AMD_LANCE, pci_index,
309 &pci_bus, &pci_device_fn) != 0)
310 break;
311 pcibios_read_config_byte(pci_bus, pci_device_fn,
312 PCI_INTERRUPT_LINE, &pci_irq_line);
313 pcibios_read_config_dword(pci_bus, pci_device_fn,
314 PCI_BASE_ADDRESS_0, &pci_ioaddr);
315 /* Remove I/O space marker in bit 0. */
316 pci_ioaddr &= ~3;
317 /* PCI Spec 2.1 states that it is either the driver or PCI card's
318 * responsibility to set the PCI Master Enable Bit if needed.
319 * (From Mark Stockton <marks@schooner.sys.hou.compaq.com>)
320 */
321 pcibios_read_config_word(pci_bus, pci_device_fn,
322 PCI_COMMAND, &pci_command);
323 if ( ! (pci_command & PCI_COMMAND_MASTER)) {
324 printk("PCI Master Bit has not been set. Setting...\n");
325 pci_command |= PCI_COMMAND_MASTER;
326 pcibios_write_config_word(pci_bus, pci_device_fn,
327 PCI_COMMAND, pci_command);
328 }
329 printk("Found PCnet/PCI at %#x, irq %d.\n",
330 pci_ioaddr, pci_irq_line);
331 lance_probe1(pci_ioaddr);
332 pci_irq_line = 0;
333 }
334 }
335 #endif /* defined(CONFIG_PCI) */
336
337 for (port = lance_portlist; *port; port++) {
338 int ioaddr = *port;
339
340 if ( check_region(ioaddr, LANCE_TOTAL_SIZE) == 0) {
341 /* Detect "normal" 0x57 0x57 and the NI6510EB 0x52 0x44
342 signatures w/ minimal I/O reads */
343 char offset15, offset14 = inb(ioaddr + 14);
344
345 if ((offset14 == 0x52 || offset14 == 0x57) &&
346 ((offset15 = inb(ioaddr + 15)) == 0x57 || offset15 == 0x44))
347 lance_probe1(ioaddr);
348 }
349 }
350
351 return 0;
352 }
353
354 void lance_probe1(int ioaddr)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
355 {
356 struct device *dev;
357 struct lance_private *lp;
358 short dma_channels; /* Mark spuriously-busy DMA channels */
359 int i, reset_val, lance_version;
360 const char *chipname;
361 /* Flags for specific chips or boards. */
362 unsigned char hpJ2405A = 0; /* HP ISA adaptor */
363 int hp_builtin = 0; /* HP on-board ethernet. */
364 static int did_version = 0; /* Already printed version info. */
365
366 /* First we look for special cases.
367 Check for HP's on-board ethernet by looking for 'HP' in the BIOS.
368 There are two HP versions, check the BIOS for the configuration port.
369 This method provided by L. Julliard, Laurent_Julliard@grenoble.hp.com.
370 */
371 if ( *((unsigned short *) 0x000f0102) == 0x5048) {
372 static const short ioaddr_table[] = { 0x300, 0x320, 0x340, 0x360};
373 int hp_port = ( *((unsigned char *) 0x000f00f1) & 1) ? 0x499 : 0x99;
374 /* We can have boards other than the built-in! Verify this is on-board. */
375 if ((inb(hp_port) & 0xc0) == 0x80
376 && ioaddr_table[inb(hp_port) & 3] == ioaddr)
377 hp_builtin = hp_port;
378 }
379 /* We also recognize the HP Vectra on-board here, but check below. */
380 hpJ2405A = (inb(ioaddr) == 0x08 && inb(ioaddr+1) == 0x00
381 && inb(ioaddr+2) == 0x09);
382
383 /* Reset the LANCE. */
384 reset_val = inw(ioaddr+LANCE_RESET); /* Reset the LANCE */
385
386 /* The Un-Reset needed is only needed for the real NE2100, and will
387 confuse the HP board. */
388 if (!hpJ2405A)
389 outw(reset_val, ioaddr+LANCE_RESET);
390
391 outw(0x0000, ioaddr+LANCE_ADDR); /* Switch to window 0 */
392 if (inw(ioaddr+LANCE_DATA) != 0x0004)
393 return;
394
395 /* Get the version of the chip. */
396 outw(88, ioaddr+LANCE_ADDR);
397 if (inw(ioaddr+LANCE_ADDR) != 88) {
398 lance_version = 0;
399 } else { /* Good, it's a newer chip. */
400 int chip_version = inw(ioaddr+LANCE_DATA);
401 outw(89, ioaddr+LANCE_ADDR);
402 chip_version |= inw(ioaddr+LANCE_DATA) << 16;
403 if (lance_debug > 2)
404 printk(" LANCE chip version is %#x.\n", chip_version);
405 if ((chip_version & 0xfff) != 0x003)
406 return;
407 chip_version = (chip_version >> 12) & 0xffff;
408 for (lance_version = 1; chip_table[lance_version].id_number; lance_version++) {
409 if (chip_table[lance_version].id_number == chip_version)
410 break;
411 }
412 }
413
414 dev = init_etherdev(0, 0);
415 chipname = chip_table[lance_version].name;
416 printk("%s: %s at %#3x,", dev->name, chipname, ioaddr);
417
418 /* There is a 16 byte station address PROM at the base address.
419 The first six bytes are the station address. */
420 for (i = 0; i < 6; i++)
421 printk(" %2.2x", dev->dev_addr[i] = inb(ioaddr + i));
422
423 dev->base_addr = ioaddr;
424 request_region(ioaddr, LANCE_TOTAL_SIZE, chip_table[lance_version].name);
425
426 /* Make certain the data structures used by the LANCE are aligned and DMAble. */
427 lp = (struct lance_private *) LANCE_KMALLOC(sizeof(*lp));
428 if (lance_debug > 6) printk(" (#0x%05lx)", (unsigned long)lp);
429 memset(lp, 0, sizeof(*lp));
430 dev->priv = lp;
431 lp->name = chipname;
432 /* I'm not sure that buffs also must be aligned but it's safer to do it -- SAW */
433 lp->rx_buffs = (unsigned long) LANCE_KMALLOC(PKT_BUF_SZ*RX_RING_SIZE);
434 lp->tx_bounce_buffs = NULL;
435 if (lance_need_isa_bounce_buffers)
436 lp->tx_bounce_buffs = LANCE_KMALLOC(PKT_BUF_SZ*TX_RING_SIZE);
437
438 lp->chip_version = lance_version;
439
440 lp->init_block.mode = 0x0003; /* Disable Rx and Tx. */
441 for (i = 0; i < 6; i++)
442 lp->init_block.phys_addr[i] = dev->dev_addr[i];
443 lp->init_block.filter[0] = 0x00000000;
444 lp->init_block.filter[1] = 0x00000000;
445 lp->init_block.rx_ring = (int)lp->rx_ring | RX_RING_LEN_BITS;
446 lp->init_block.tx_ring = (int)lp->tx_ring | TX_RING_LEN_BITS;
447
448 outw(0x0001, ioaddr+LANCE_ADDR);
449 inw(ioaddr+LANCE_ADDR);
450 outw((short) (int) &lp->init_block, ioaddr+LANCE_DATA);
451 outw(0x0002, ioaddr+LANCE_ADDR);
452 inw(ioaddr+LANCE_ADDR);
453 outw(((int)&lp->init_block) >> 16, ioaddr+LANCE_DATA);
454 outw(0x0000, ioaddr+LANCE_ADDR);
455 inw(ioaddr+LANCE_ADDR);
456
457 if (pci_irq_line) {
458 dev->dma = 4; /* Native bus-master, no DMA channel needed. */
459 dev->irq = pci_irq_line;
460 } else if (hp_builtin) {
461 static const char dma_tbl[4] = {3, 5, 6, 0};
462 static const char irq_tbl[4] = {3, 4, 5, 9};
463 unsigned char port_val = inb(hp_builtin);
464 dev->dma = dma_tbl[(port_val >> 4) & 3];
465 dev->irq = irq_tbl[(port_val >> 2) & 3];
466 printk(" HP Vectra IRQ %d DMA %d.\n", dev->irq, dev->dma);
467 } else if (hpJ2405A) {
468 static const char dma_tbl[4] = {3, 5, 6, 7};
469 static const char irq_tbl[8] = {3, 4, 5, 9, 10, 11, 12, 15};
470 short reset_val = inw(ioaddr+LANCE_RESET);
471 dev->dma = dma_tbl[(reset_val >> 2) & 3];
472 dev->irq = irq_tbl[(reset_val >> 4) & 7];
473 printk(" HP J2405A IRQ %d DMA %d.\n", dev->irq, dev->dma);
474 } else if (lance_version == PCNET_ISAP) { /* The plug-n-play version. */
475 short bus_info;
476 outw(8, ioaddr+LANCE_ADDR);
477 bus_info = inw(ioaddr+LANCE_BUS_IF);
478 dev->dma = bus_info & 0x07;
479 dev->irq = (bus_info >> 4) & 0x0F;
480 } else {
481 /* The DMA channel may be passed in PARAM1. */
482 if (dev->mem_start & 0x07)
483 dev->dma = dev->mem_start & 0x07;
484 }
485
486 if (dev->dma == 0) {
487 /* Read the DMA channel status register, so that we can avoid
488 stuck DMA channels in the DMA detection below. */
489 dma_channels = ((inb(DMA1_STAT_REG) >> 4) & 0x0f) |
490 (inb(DMA2_STAT_REG) & 0xf0);
491 }
492 if (dev->irq >= 2)
493 printk(" assigned IRQ %d", dev->irq);
494 else {
495 /* To auto-IRQ we enable the initialization-done and DMA error
496 interrupts. For ISA boards we get a DMA error, but VLB and PCI
497 boards will work. */
498 autoirq_setup(0);
499
500 /* Trigger an initialization just for the interrupt. */
501 outw(0x0041, ioaddr+LANCE_DATA);
502
503 dev->irq = autoirq_report(1);
504 if (dev->irq)
505 printk(", probed IRQ %d", dev->irq);
506 else {
507 printk(", failed to detect IRQ line.\n");
508 return;
509 }
510
511 /* Check for the initialization done bit, 0x0100, which means
512 that we don't need a DMA channel. */
513 if (inw(ioaddr+LANCE_DATA) & 0x0100)
514 dev->dma = 4;
515 }
516
517 if (dev->dma == 4) {
518 printk(", no DMA needed.\n");
519 } else if (dev->dma) {
520 if (request_dma(dev->dma, chipname)) {
521 printk("DMA %d allocation failed.\n", dev->dma);
522 return;
523 } else
524 printk(", assigned DMA %d.\n", dev->dma);
525 } else { /* OK, we have to auto-DMA. */
526 for (i = 0; i < 4; i++) {
527 static const char dmas[] = { 5, 6, 7, 3 };
528 int dma = dmas[i];
529 int boguscnt;
530
531 /* Don't enable a permanently busy DMA channel, or the machine
532 will hang. */
533 if (test_bit(dma, &dma_channels))
534 continue;
535 outw(0x7f04, ioaddr+LANCE_DATA); /* Clear the memory error bits. */
536 if (request_dma(dma, chipname))
537 continue;
538 set_dma_mode(dma, DMA_MODE_CASCADE);
539 enable_dma(dma);
540
541 /* Trigger an initialization. */
542 outw(0x0001, ioaddr+LANCE_DATA);
543 for (boguscnt = 100; boguscnt > 0; --boguscnt)
544 if (inw(ioaddr+LANCE_DATA) & 0x0900)
545 break;
546 if (inw(ioaddr+LANCE_DATA) & 0x0100) {
547 dev->dma = dma;
548 printk(", DMA %d.\n", dev->dma);
549 break;
550 } else {
551 disable_dma(dma);
552 free_dma(dma);
553 }
554 }
555 if (i == 4) { /* Failure: bail. */
556 printk("DMA detection failed.\n");
557 return;
558 }
559 }
560
561 if (chip_table[lp->chip_version].flags & LANCE_ENABLE_AUTOSELECT) {
562 /* Turn on auto-select of media (10baseT or BNC) so that the user
563 can watch the LEDs even if the board isn't opened. */
564 outw(0x0002, ioaddr+LANCE_ADDR);
565 outw(0x0002, ioaddr+LANCE_BUS_IF);
566 }
567
568 if (lance_debug > 0 && did_version++ == 0)
569 printk(version);
570
571 /* The LANCE-specific entries in the device structure. */
572 dev->open = &lance_open;
573 dev->hard_start_xmit = &lance_start_xmit;
574 dev->stop = &lance_close;
575 dev->get_stats = &lance_get_stats;
576 dev->set_multicast_list = &set_multicast_list;
577
578 return;
579 }
580
581
582 static int
583 lance_open(struct device *dev)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
584 {
585 struct lance_private *lp = (struct lance_private *)dev->priv;
586 int ioaddr = dev->base_addr;
587 int i;
588
589 if (dev->irq == 0 ||
590 request_irq(dev->irq, &lance_interrupt, 0, lp->name, NULL)) {
591 return -EAGAIN;
592 }
593
594 /* We used to allocate DMA here, but that was silly.
595 DMA lines can't be shared! We now permanently allocate them. */
596
597 irq2dev_map[dev->irq] = dev;
598
599 /* Reset the LANCE */
600 inw(ioaddr+LANCE_RESET);
601
602 /* The DMA controller is used as a no-operation slave, "cascade mode". */
603 if (dev->dma != 4) {
604 enable_dma(dev->dma);
605 set_dma_mode(dev->dma, DMA_MODE_CASCADE);
606 }
607
608 /* Un-Reset the LANCE, needed only for the NE2100. */
609 if (chip_table[lp->chip_version].flags & LANCE_MUST_UNRESET)
610 outw(0, ioaddr+LANCE_RESET);
611
612 if (chip_table[lp->chip_version].flags & LANCE_ENABLE_AUTOSELECT) {
613 /* This is 79C960-specific: Turn on auto-select of media (AUI, BNC). */
614 outw(0x0002, ioaddr+LANCE_ADDR);
615 outw(0x0002, ioaddr+LANCE_BUS_IF);
616 }
617
618 if (lance_debug > 1)
619 printk("%s: lance_open() irq %d dma %d tx/rx rings %#x/%#x init %#x.\n",
620 dev->name, dev->irq, dev->dma, (int) lp->tx_ring, (int) lp->rx_ring,
621 (int) &lp->init_block);
622
623 lance_init_ring(dev);
624 /* Re-initialize the LANCE, and start it when done. */
625 outw(0x0001, ioaddr+LANCE_ADDR);
626 outw((short) (int) &lp->init_block, ioaddr+LANCE_DATA);
627 outw(0x0002, ioaddr+LANCE_ADDR);
628 outw(((int)&lp->init_block) >> 16, ioaddr+LANCE_DATA);
629
630 outw(0x0004, ioaddr+LANCE_ADDR);
631 outw(0x0915, ioaddr+LANCE_DATA);
632
633 outw(0x0000, ioaddr+LANCE_ADDR);
634 outw(0x0001, ioaddr+LANCE_DATA);
635
636 dev->tbusy = 0;
637 dev->interrupt = 0;
638 dev->start = 1;
639 i = 0;
640 while (i++ < 100)
641 if (inw(ioaddr+LANCE_DATA) & 0x0100)
642 break;
643 /*
644 * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
645 * reports that doing so triggers a bug in the '974.
646 */
647 outw(0x0042, ioaddr+LANCE_DATA);
648
649 if (lance_debug > 2)
650 printk("%s: LANCE open after %d ticks, init block %#x csr0 %4.4x.\n",
651 dev->name, i, (int) &lp->init_block, inw(ioaddr+LANCE_DATA));
652
653 return 0; /* Always succeed */
654 }
655
656 /* The LANCE has been halted for one reason or another (busmaster memory
657 arbitration error, Tx FIFO underflow, driver stopped it to reconfigure,
658 etc.). Modern LANCE variants always reload their ring-buffer
659 configuration when restarted, so we must reinitialize our ring
660 context before restarting. As part of this reinitialization,
661 find all packets still on the Tx ring and pretend that they had been
662 sent (in effect, drop the packets on the floor) - the higher-level
663 protocols will time out and retransmit. It'd be better to shuffle
664 these skbs to a temp list and then actually re-Tx them after
665 restarting the chip, but I'm too lazy to do so right now. dplatt@3do.com
666 */
667
668 static void
669 lance_purge_tx_ring(struct device *dev)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
670 {
671 struct lance_private *lp = (struct lance_private *)dev->priv;
672 int i;
673
674 for (i = 0; i < TX_RING_SIZE; i++) {
675 if (lp->tx_skbuff[i]) {
676 dev_kfree_skb(lp->tx_skbuff[i],FREE_WRITE);
677 lp->tx_skbuff[i] = NULL;
678 }
679 }
680 }
681
682
683 /* Initialize the LANCE Rx and Tx rings. */
684 static void
685 lance_init_ring(struct device *dev)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
686 {
687 struct lance_private *lp = (struct lance_private *)dev->priv;
688 int i;
689
690 lp->lock = 0, lp->tx_full = 0;
691 lp->cur_rx = lp->cur_tx = 0;
692 lp->dirty_rx = lp->dirty_tx = 0;
693
694 for (i = 0; i < RX_RING_SIZE; i++) {
695 lp->rx_ring[i].base = (lp->rx_buffs + i*PKT_BUF_SZ) | 0x80000000;
696 lp->rx_ring[i].buf_length = -PKT_BUF_SZ;
697 }
698 /* The Tx buffer address is filled in as needed, but we do need to clear
699 the upper ownership bit. */
700 for (i = 0; i < TX_RING_SIZE; i++) {
701 lp->tx_ring[i].base = 0;
702 }
703
704 lp->init_block.mode = 0x0000;
705 for (i = 0; i < 6; i++)
706 lp->init_block.phys_addr[i] = dev->dev_addr[i];
707 lp->init_block.filter[0] = 0x00000000;
708 lp->init_block.filter[1] = 0x00000000;
709 lp->init_block.rx_ring = (int)lp->rx_ring | RX_RING_LEN_BITS;
710 lp->init_block.tx_ring = (int)lp->tx_ring | TX_RING_LEN_BITS;
711 }
712
713 static void
714 lance_restart(struct device *dev, unsigned int csr0_bits, int must_reinit)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
715 {
716 struct lance_private *lp = (struct lance_private *)dev->priv;
717
718 if (must_reinit ||
719 (chip_table[lp->chip_version].flags & LANCE_MUST_REINIT_RING)) {
720 lance_purge_tx_ring(dev);
721 lance_init_ring(dev);
722 }
723 outw(0x0000, dev->base_addr + LANCE_ADDR);
724 outw(csr0_bits, dev->base_addr + LANCE_DATA);
725 }
726
727 static int
728 lance_start_xmit(struct sk_buff *skb, struct device *dev)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
729 {
730 struct lance_private *lp = (struct lance_private *)dev->priv;
731 int ioaddr = dev->base_addr;
732 int entry;
733 unsigned long flags;
734
735 /* Transmitter timeout, serious problems. */
736 if (dev->tbusy) {
737 int tickssofar = jiffies - dev->trans_start;
738 if (tickssofar < 20)
739 return 1;
740 outw(0, ioaddr+LANCE_ADDR);
741 printk("%s: transmit timed out, status %4.4x, resetting.\n",
742 dev->name, inw(ioaddr+LANCE_DATA));
743 outw(0x0004, ioaddr+LANCE_DATA);
744 lp->stats.tx_errors++;
745 #ifndef final_version
746 {
747 int i;
748 printk(" Ring data dump: dirty_tx %d cur_tx %d%s cur_rx %d.",
749 lp->dirty_tx, lp->cur_tx, lp->tx_full ? " (full)" : "",
750 lp->cur_rx);
751 for (i = 0 ; i < RX_RING_SIZE; i++)
752 printk("%s %08x %04x %04x", i & 0x3 ? "" : "\n ",
753 lp->rx_ring[i].base, -lp->rx_ring[i].buf_length,
754 lp->rx_ring[i].msg_length);
755 for (i = 0 ; i < TX_RING_SIZE; i++)
756 printk("%s %08x %04x %04x", i & 0x3 ? "" : "\n ",
757 lp->tx_ring[i].base, -lp->tx_ring[i].length,
758 lp->tx_ring[i].misc);
759 printk("\n");
760 }
761 #endif
762 lance_restart(dev, 0x0043, 1);
763
764 dev->tbusy=0;
765 dev->trans_start = jiffies;
766
767 return 0;
768 }
769
770 if (skb == NULL) {
771 dev_tint(dev);
772 return 0;
773 }
774
775 if (skb->len <= 0)
776 return 0;
777
778 if (lance_debug > 3) {
779 outw(0x0000, ioaddr+LANCE_ADDR);
780 printk("%s: lance_start_xmit() called, csr0 %4.4x.\n", dev->name,
781 inw(ioaddr+LANCE_DATA));
782 outw(0x0000, ioaddr+LANCE_DATA);
783 }
784
785 /* Block a timer-based transmit from overlapping. This could better be
786 done with atomic_swap(1, dev->tbusy), but set_bit() works as well. */
787 if (set_bit(0, (void*)&dev->tbusy) != 0) {
788 printk("%s: Transmitter access conflict.\n", dev->name);
789 return 1;
790 }
791
792 if (set_bit(0, (void*)&lp->lock) != 0) {
793 if (lance_debug > 0)
794 printk("%s: tx queue lock!.\n", dev->name);
795 /* don't clear dev->tbusy flag. */
796 return 1;
797 }
798
799 /* Fill in a Tx ring entry */
800
801 /* Mask to ring buffer boundary. */
802 entry = lp->cur_tx & TX_RING_MOD_MASK;
803
804 /* Caution: the write order is important here, set the base address
805 with the "ownership" bits last. */
806
807 /* The old LANCE chips doesn't automatically pad buffers to min. size. */
808 if (chip_table[lp->chip_version].flags & LANCE_MUST_PAD) {
809 lp->tx_ring[entry].length =
810 -(ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN);
811 } else
812 lp->tx_ring[entry].length = -skb->len;
813
814 lp->tx_ring[entry].misc = 0x0000;
815
816 /* If any part of this buffer is >16M we must copy it to a low-memory
817 buffer. */
818 if ((int)(skb->data) + skb->len > 0x01000000) {
819 if (lance_debug > 5)
820 printk("%s: bouncing a high-memory packet (%#x).\n",
821 dev->name, (int)(skb->data));
822 memcpy(&lp->tx_bounce_buffs[entry], skb->data, skb->len);
823 lp->tx_ring[entry].base =
824 (int)(lp->tx_bounce_buffs + entry) | 0x83000000;
825 dev_kfree_skb (skb, FREE_WRITE);
826 } else {
827 lp->tx_skbuff[entry] = skb;
828 lp->tx_ring[entry].base = (int)(skb->data) | 0x83000000;
829 }
830 lp->cur_tx++;
831
832 /* Trigger an immediate send poll. */
833 outw(0x0000, ioaddr+LANCE_ADDR);
834 outw(0x0048, ioaddr+LANCE_DATA);
835
836 dev->trans_start = jiffies;
837
838 save_flags(flags);
839 cli();
840 lp->lock = 0;
841 if (lp->tx_ring[(entry+1) & TX_RING_MOD_MASK].base == 0)
842 dev->tbusy=0;
843 else
844 lp->tx_full = 1;
845 restore_flags(flags);
846
847 return 0;
848 }
849
850 /* The LANCE interrupt handler. */
851 static void
852 lance_interrupt(int irq, void *dev_id, struct pt_regs * regs)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
853 {
854 struct device *dev = (struct device *)(irq2dev_map[irq]);
855 struct lance_private *lp;
856 int csr0, ioaddr, boguscnt=10;
857 int must_restart;
858
859 if (dev == NULL) {
860 printk ("lance_interrupt(): irq %d for unknown device.\n", irq);
861 return;
862 }
863
864 ioaddr = dev->base_addr;
865 lp = (struct lance_private *)dev->priv;
866 if (dev->interrupt)
867 printk("%s: Re-entering the interrupt handler.\n", dev->name);
868
869 dev->interrupt = 1;
870
871 outw(0x00, dev->base_addr + LANCE_ADDR);
872 while ((csr0 = inw(dev->base_addr + LANCE_DATA)) & 0x8600
873 && --boguscnt >= 0) {
874 /* Acknowledge all of the current interrupt sources ASAP. */
875 outw(csr0 & ~0x004f, dev->base_addr + LANCE_DATA);
876
877 must_restart = 0;
878
879 if (lance_debug > 5)
880 printk("%s: interrupt csr0=%#2.2x new csr=%#2.2x.\n",
881 dev->name, csr0, inw(dev->base_addr + LANCE_DATA));
882
883 if (csr0 & 0x0400) /* Rx interrupt */
884 lance_rx(dev);
885
886 if (csr0 & 0x0200) { /* Tx-done interrupt */
887 int dirty_tx = lp->dirty_tx;
888
889 while (dirty_tx < lp->cur_tx) {
890 int entry = dirty_tx & TX_RING_MOD_MASK;
891 int status = lp->tx_ring[entry].base;
892
893 if (status < 0)
894 break; /* It still hasn't been Txed */
895
896 lp->tx_ring[entry].base = 0;
897
898 if (status & 0x40000000) {
899 /* There was an major error, log it. */
900 int err_status = lp->tx_ring[entry].misc;
901 lp->stats.tx_errors++;
902 if (err_status & 0x0400) lp->stats.tx_aborted_errors++;
903 if (err_status & 0x0800) lp->stats.tx_carrier_errors++;
904 if (err_status & 0x1000) lp->stats.tx_window_errors++;
905 if (err_status & 0x4000) {
906 /* Ackk! On FIFO errors the Tx unit is turned off! */
907 lp->stats.tx_fifo_errors++;
908 /* Remove this verbosity later! */
909 printk("%s: Tx FIFO error! Status %4.4x.\n",
910 dev->name, csr0);
911 /* Restart the chip. */
912 must_restart = 1;
913 }
914 } else {
915 if (status & 0x18000000)
916 lp->stats.collisions++;
917 lp->stats.tx_packets++;
918 }
919
920 /* We must free the original skb if it's not a data-only copy
921 in the bounce buffer. */
922 if (lp->tx_skbuff[entry]) {
923 dev_kfree_skb(lp->tx_skbuff[entry],FREE_WRITE);
924 lp->tx_skbuff[entry] = 0;
925 }
926 dirty_tx++;
927 }
928
929 #ifndef final_version
930 if (lp->cur_tx - dirty_tx >= TX_RING_SIZE) {
931 printk("out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
932 dirty_tx, lp->cur_tx, lp->tx_full);
933 dirty_tx += TX_RING_SIZE;
934 }
935 #endif
936
937 if (lp->tx_full && dev->tbusy
938 && dirty_tx > lp->cur_tx - TX_RING_SIZE + 2) {
939 /* The ring is no longer full, clear tbusy. */
940 lp->tx_full = 0;
941 dev->tbusy = 0;
942 mark_bh(NET_BH);
943 }
944
945 lp->dirty_tx = dirty_tx;
946 }
947
948 /* Log misc errors. */
949 if (csr0 & 0x4000) lp->stats.tx_errors++; /* Tx babble. */
950 if (csr0 & 0x1000) lp->stats.rx_errors++; /* Missed a Rx frame. */
951 if (csr0 & 0x0800) {
952 printk("%s: Bus master arbitration failure, status %4.4x.\n",
953 dev->name, csr0);
954 /* Restart the chip. */
955 must_restart = 1;
956 }
957
958 if (must_restart) {
959 /* stop the chip to clear the error condition, then restart */
960 outw(0x0000, dev->base_addr + LANCE_ADDR);
961 outw(0x0004, dev->base_addr + LANCE_DATA);
962 lance_restart(dev, 0x0002, 0);
963 }
964 }
965
966 /* Clear any other interrupt, and set interrupt enable. */
967 outw(0x0000, dev->base_addr + LANCE_ADDR);
968 outw(0x7940, dev->base_addr + LANCE_DATA);
969
970 if (lance_debug > 4)
971 printk("%s: exiting interrupt, csr%d=%#4.4x.\n",
972 dev->name, inw(ioaddr + LANCE_ADDR),
973 inw(dev->base_addr + LANCE_DATA));
974
975 dev->interrupt = 0;
976 return;
977 }
978
979 static int
980 lance_rx(struct device *dev)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
981 {
982 struct lance_private *lp = (struct lance_private *)dev->priv;
983 int entry = lp->cur_rx & RX_RING_MOD_MASK;
984 int i;
985
986 /* If we own the next entry, it's a new packet. Send it up. */
987 while (lp->rx_ring[entry].base >= 0) {
988 int status = lp->rx_ring[entry].base >> 24;
989
990 if (status != 0x03) { /* There was an error. */
991 /* There is a tricky error noted by John Murphy,
992 <murf@perftech.com> to Russ Nelson: Even with full-sized
993 buffers it's possible for a jabber packet to use two
994 buffers, with only the last correctly noting the error. */
995 if (status & 0x01) /* Only count a general error at the */
996 lp->stats.rx_errors++; /* end of a packet.*/
997 if (status & 0x20) lp->stats.rx_frame_errors++;
998 if (status & 0x10) lp->stats.rx_over_errors++;
999 if (status & 0x08) lp->stats.rx_crc_errors++;
1000 if (status & 0x04) lp->stats.rx_fifo_errors++;
1001 lp->rx_ring[entry].base &= 0x03ffffff;
1002 }
1003 else
1004 {
1005 /* Malloc up new buffer, compatible with net3. */
1006 short pkt_len = (lp->rx_ring[entry].msg_length & 0xfff)-4;
1007 struct sk_buff *skb;
1008
1009 if(pkt_len<60)
1010 {
1011 printk("%s: Runt packet!\n",dev->name);
1012 lp->stats.rx_errors++;
1013 }
1014 else
1015 {
1016 skb = dev_alloc_skb(pkt_len+2);
1017 if (skb == NULL)
1018 {
1019 printk("%s: Memory squeeze, deferring packet.\n", dev->name);
1020 for (i=0; i < RX_RING_SIZE; i++)
1021 if (lp->rx_ring[(entry+i) & RX_RING_MOD_MASK].base < 0)
1022 break;
1023
1024 if (i > RX_RING_SIZE -2)
1025 {
1026 lp->stats.rx_dropped++;
1027 lp->rx_ring[entry].base |= 0x80000000;
1028 lp->cur_rx++;
1029 }
1030 break;
1031 }
1032 skb->dev = dev;
1033 skb_reserve(skb,2); /* 16 byte align */
1034 skb_put(skb,pkt_len); /* Make room */
1035 eth_copy_and_sum(skb,
1036 (unsigned char *)(lp->rx_ring[entry].base & 0x00ffffff),
1037 pkt_len,0);
1038 skb->protocol=eth_type_trans(skb,dev);
1039 netif_rx(skb);
1040 lp->stats.rx_packets++;
1041 }
1042 }
1043 /* The docs say that the buffer length isn't touched, but Andrew Boyd
1044 of QNX reports that some revs of the 79C965 clear it. */
1045 lp->rx_ring[entry].buf_length = -PKT_BUF_SZ;
1046 lp->rx_ring[entry].base |= 0x80000000;
1047 entry = (++lp->cur_rx) & RX_RING_MOD_MASK;
1048 }
1049
1050 /* We should check that at least two ring entries are free. If not,
1051 we should free one and mark stats->rx_dropped++. */
1052
1053 return 0;
1054 }
1055
1056 static int
1057 lance_close(struct device *dev)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
1058 {
1059 int ioaddr = dev->base_addr;
1060 struct lance_private *lp = (struct lance_private *)dev->priv;
1061
1062 dev->start = 0;
1063 dev->tbusy = 1;
1064
1065 if (chip_table[lp->chip_version].flags & LANCE_HAS_MISSED_FRAME) {
1066 outw(112, ioaddr+LANCE_ADDR);
1067 lp->stats.rx_missed_errors = inw(ioaddr+LANCE_DATA);
1068 }
1069 outw(0, ioaddr+LANCE_ADDR);
1070
1071 if (lance_debug > 1)
1072 printk("%s: Shutting down ethercard, status was %2.2x.\n",
1073 dev->name, inw(ioaddr+LANCE_DATA));
1074
1075 /* We stop the LANCE here -- it occasionally polls
1076 memory if we don't. */
1077 outw(0x0004, ioaddr+LANCE_DATA);
1078
1079 if (dev->dma != 4)
1080 disable_dma(dev->dma);
1081
1082 free_irq(dev->irq, NULL);
1083
1084 irq2dev_map[dev->irq] = 0;
1085
1086 return 0;
1087 }
1088
1089 static struct enet_statistics *
1090 lance_get_stats(struct device *dev)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
1091 {
1092 struct lance_private *lp = (struct lance_private *)dev->priv;
1093 short ioaddr = dev->base_addr;
1094 short saved_addr;
1095 unsigned long flags;
1096
1097 if (chip_table[lp->chip_version].flags & LANCE_HAS_MISSED_FRAME) {
1098 save_flags(flags);
1099 cli();
1100 saved_addr = inw(ioaddr+LANCE_ADDR);
1101 outw(112, ioaddr+LANCE_ADDR);
1102 lp->stats.rx_missed_errors = inw(ioaddr+LANCE_DATA);
1103 outw(saved_addr, ioaddr+LANCE_ADDR);
1104 restore_flags(flags);
1105 }
1106
1107 return &lp->stats;
1108 }
1109
1110 /* Set or clear the multicast filter for this adaptor.
1111 */
1112
1113 static void set_multicast_list(struct device *dev)
/* ![[previous]](../icons/left.png)
![[next]](../icons/n_right.png)
![[first]](../icons/first.png)
![[last]](../icons/n_last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
1114 {
1115 short ioaddr = dev->base_addr;
1116
1117 outw(0, ioaddr+LANCE_ADDR);
1118 outw(0x0004, ioaddr+LANCE_DATA); /* Temporarily stop the lance. */
1119
1120 if (dev->flags&IFF_PROMISC) {
1121 /* Log any net taps. */
1122 printk("%s: Promiscuous mode enabled.\n", dev->name);
1123 outw(15, ioaddr+LANCE_ADDR);
1124 outw(0x8000, ioaddr+LANCE_DATA); /* Set promiscuous mode */
1125 } else {
1126 short multicast_table[4];
1127 int i;
1128 int num_addrs=dev->mc_count;
1129 if(dev->flags&IFF_ALLMULTI)
1130 num_addrs=1;
1131 /* FIXIT: We don't use the multicast table, but rely on upper-layer filtering. */
1132 memset(multicast_table, (num_addrs == 0) ? 0 : -1, sizeof(multicast_table));
1133 for (i = 0; i < 4; i++) {
1134 outw(8 + i, ioaddr+LANCE_ADDR);
1135 outw(multicast_table[i], ioaddr+LANCE_DATA);
1136 }
1137 outw(15, ioaddr+LANCE_ADDR);
1138 outw(0x0000, ioaddr+LANCE_DATA); /* Unset promiscuous mode */
1139 }
1140
1141 lance_restart(dev, 0x0142, 0); /* Resume normal operation */
1142
1143 }
1144
1145
1146 /*
1147 * Local variables:
1148 * compile-command: "gcc -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -m486 -c lance.c"
1149 * c-indent-level: 4
1150 * tab-width: 4
1151 * End:
1152 */