1 /* lance.c: An AMD LANCE ethernet driver for linux. */
2 /*
3 Written 1993,1994,1995 by Donald Becker.
4
5 Copyright 1993 United States Government as represented by the
6 Director, National Security Agency.
7 This software may be used and distributed according to the terms
8 of the GNU Public License, incorporated herein by reference.
9
10 This driver is for the Allied Telesis AT1500 and HP J2405A, and should work
11 with most other LANCE-based bus-master (NE2100 clone) ethercards.
12
13 The author may be reached as becker@CESDIS.gsfc.nasa.gov, or C/O
14 Center of Excellence in Space Data and Information Sciences
15 Code 930.5, Goddard Space Flight Center, Greenbelt MD 20771
16 */
17
18 static const char *version = "lance.c:v1.08 4/10/95 dplatt@3do.com\n";
19
20 #include <linux/config.h>
21 #include <linux/kernel.h>
22 #include <linux/sched.h>
23 #include <linux/string.h>
24 #include <linux/ptrace.h>
25 #include <linux/errno.h>
26 #include <linux/ioport.h>
27 #include <linux/malloc.h>
28 #include <linux/interrupt.h>
29 #include <linux/pci.h>
30 #include <linux/bios32.h>
31 #include <asm/bitops.h>
32 #include <asm/io.h>
33 #include <asm/dma.h>
34
35 #include <linux/netdevice.h>
36 #include <linux/etherdevice.h>
37 #include <linux/skbuff.h>
38
39 static unsigned int lance_portlist[] = {0x300, 0x320, 0x340, 0x360, 0};
40 unsigned long lance_probe1(int ioaddr, unsigned long mem_start);
41
42 #ifdef HAVE_DEVLIST
43 struct netdev_entry lance_drv =
44 {"lance", lance_probe1, LANCE_TOTAL_SIZE, lance_portlist};
45 #endif
46
47 #ifdef LANCE_DEBUG
48 int lance_debug = LANCE_DEBUG;
49 #else
50 int lance_debug = 1;
51 #endif
52
53 /*
54 Theory of Operation
55
56 I. Board Compatibility
57
58 This device driver is designed for the AMD 79C960, the "PCnet-ISA
59 single-chip ethernet controller for ISA". This chip is used in a wide
60 variety of boards from vendors such as Allied Telesis, HP, Kingston,
61 and Boca. This driver is also intended to work with older AMD 7990
62 designs, such as the NE1500 and NE2100, and newer 79C961. For convenience,
63 I use the name LANCE to refer to all of the AMD chips, even though it properly
64 refers only to the original 7990.
65
66 II. Board-specific settings
67
68 The driver is designed to work the boards that use the faster
69 bus-master mode, rather than in shared memory mode. (Only older designs
70 have on-board buffer memory needed to support the slower shared memory mode.)
71
72 Most ISA boards have jumpered settings for the I/O base, IRQ line, and DMA
73 channel. This driver probes the likely base addresses:
74 {0x300, 0x320, 0x340, 0x360}.
75 After the board is found it generates a DMA-timeout interrupt and uses
76 autoIRQ to find the IRQ line. The DMA channel can be set with the low bits
77 of the otherwise-unused dev->mem_start value (aka PARAM1). If unset it is
78 probed for by enabling each free DMA channel in turn and checking if
79 initialization succeeds.
80
81 The HP-J2405A board is an exception: with this board it's easy to read the
82 EEPROM-set values for the base, IRQ, and DMA. (Of course you must already
83 _know_ the base address -- that field is for writing the EEPROM.)
84
85 III. Driver operation
86
87 IIIa. Ring buffers
88 The LANCE uses ring buffers of Tx and Rx descriptors. Each entry describes
89 the base and length of the data buffer, along with status bits. The length
90 of these buffers is set by LANCE_LOG_{RX,TX}_BUFFERS, which is log_2() of
91 the buffer length (rather than being directly the buffer length) for
92 implementation ease. The current values are 2 (Tx) and 4 (Rx), which leads to
93 ring sizes of 4 (Tx) and 16 (Rx). Increasing the number of ring entries
94 needlessly uses extra space and reduces the chance that an upper layer will
95 be able to reorder queued Tx packets based on priority. Decreasing the number
96 of entries makes it more difficult to achieve back-to-back packet transmission
97 and increases the chance that Rx ring will overflow. (Consider the worst case
98 of receiving back-to-back minimum-sized packets.)
99
100 The LANCE has the capability to "chain" both Rx and Tx buffers, but this driver
101 statically allocates full-sized (slightly oversized -- PKT_BUF_SZ) buffers to
102 avoid the administrative overhead. For the Rx side this avoids dynamically
103 allocating full-sized buffers "just in case", at the expense of a
104 memory-to-memory data copy for each packet received. For most systems this
105 is a good tradeoff: the Rx buffer will always be in low memory, the copy
106 is inexpensive, and it primes the cache for later packet processing. For Tx
107 the buffers are only used when needed as low-memory bounce buffers.
108
109 IIIB. 16M memory limitations.
110 For the ISA bus master mode all structures used directly by the LANCE,
111 the initialization block, Rx and Tx rings, and data buffers, must be
112 accessible from the ISA bus, i.e. in the lower 16M of real memory.
113 This is a problem for current Linux kernels on >16M machines. The network
114 devices are initialized after memory initialization, and the kernel doles out
115 memory from the top of memory downward. The current solution is to have a
116 special network initialization routine that's called before memory
117 initialization; this will eventually be generalized for all network devices.
118 As mentioned before, low-memory "bounce-buffers" are used when needed.
119
120 IIIC. Synchronization
121 The driver runs as two independent, single-threaded flows of control. One
122 is the send-packet routine, which enforces single-threaded use by the
123 dev->tbusy flag. The other thread is the interrupt handler, which is single
124 threaded by the hardware and other software.
125
126 The send packet thread has partial control over the Tx ring and 'dev->tbusy'
127 flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
128 queue slot is empty, it clears the tbusy flag when finished otherwise it sets
129 the 'lp->tx_full' flag.
130
131 The interrupt handler has exclusive control over the Rx ring and records stats
132 from the Tx ring. (The Tx-done interrupt can't be selectively turned off, so
133 we can't avoid the interrupt overhead by having the Tx routine reap the Tx
134 stats.) After reaping the stats, it marks the queue entry as empty by setting
135 the 'base' to zero. Iff the 'lp->tx_full' flag is set, it clears both the
136 tx_full and tbusy flags.
137
138 */
139
140 /* Set the number of Tx and Rx buffers, using Log_2(# buffers).
141 Reasonable default values are 4 Tx buffers, and 16 Rx buffers.
142 That translates to 2 (4 == 2^^2) and 4 (16 == 2^^4). */
143 #ifndef LANCE_LOG_TX_BUFFERS
144 #define LANCE_LOG_TX_BUFFERS 4
145 #define LANCE_LOG_RX_BUFFERS 4
146 #endif
147
148 #define TX_RING_SIZE (1 << (LANCE_LOG_TX_BUFFERS))
149 #define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
150 #define TX_RING_LEN_BITS ((LANCE_LOG_TX_BUFFERS) << 29)
151
152 #define RX_RING_SIZE (1 << (LANCE_LOG_RX_BUFFERS))
153 #define RX_RING_MOD_MASK (RX_RING_SIZE - 1)
154 #define RX_RING_LEN_BITS ((LANCE_LOG_RX_BUFFERS) << 29)
155
156 #define PKT_BUF_SZ 1544
157
158 /* Offsets from base I/O address. */
159 #define LANCE_DATA 0x10
160 #define LANCE_ADDR 0x12
161 #define LANCE_RESET 0x14
162 #define LANCE_BUS_IF 0x16
163 #define LANCE_TOTAL_SIZE 0x18
164
165 /* The LANCE Rx and Tx ring descriptors. */
166 struct lance_rx_head {
167 int base;
168 short buf_length; /* This length is 2s complement (negative)! */
169 short msg_length; /* This length is "normal". */
170 };
171
172 struct lance_tx_head {
173 int base;
174 short length; /* Length is 2s complement (negative)! */
175 short misc;
176 };
177
178 /* The LANCE initialization block, described in databook. */
179 struct lance_init_block {
180 unsigned short mode; /* Pre-set mode (reg. 15) */
181 unsigned char phys_addr[6]; /* Physical ethernet address */
182 unsigned filter[2]; /* Multicast filter (unused). */
183 /* Receive and transmit ring base, along with extra bits. */
184 unsigned rx_ring; /* Tx and Rx ring base pointers */
185 unsigned tx_ring;
186 };
187
188 struct lance_private {
189 /* The Tx and Rx ring entries must be aligned on 8-byte boundaries.
190 This is accomplished by allocating 7 extra bytes for the struct
191 and adjusting the start of the struct to be 8-byte aligned. */
192 struct lance_rx_head rx_ring[RX_RING_SIZE];
193 struct lance_tx_head tx_ring[TX_RING_SIZE];
194 struct lance_init_block init_block;
195 const char *name;
196 /* The saved address of a sent-in-place packet/buffer, for skfree(). */
197 struct sk_buff* tx_skbuff[TX_RING_SIZE];
198 long rx_buffs; /* Address of Rx and Tx buffers. */
199 /* Tx low-memory "bounce buffer" address. */
200 char (*tx_bounce_buffs)[PKT_BUF_SZ];
201 int cur_rx, cur_tx; /* The next free ring entry */
202 int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */
203 int dma;
204 struct enet_statistics stats;
205 unsigned char chip_version; /* See lance_chip_type. */
206 char tx_full;
207 char lock;
208 };
209
210 #define LANCE_MUST_PAD 0x00000001
211 #define LANCE_ENABLE_AUTOSELECT 0x00000002
212 #define LANCE_MUST_REINIT_RING 0x00000004
213 #define LANCE_MUST_UNRESET 0x00000008
214 #define LANCE_HAS_MISSED_FRAME 0x00000010
215
216 /* A mapping from the chip ID number to the part number and features.
217 These are from the datasheets -- in real life the '970 version
218 reportedly has the same ID as the '965. */
219 static struct lance_chip_type {
220 int id_number;
221 const char *name;
222 int flags;
223 } chip_table[] = {
224 {0x0000, "LANCE 7990", /* Ancient lance chip. */
225 LANCE_MUST_PAD + LANCE_MUST_UNRESET},
226 {0x0003, "PCnet/ISA 79C960", /* 79C960 PCnet/ISA. */
227 LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
228 LANCE_HAS_MISSED_FRAME},
229 {0x2260, "PCnet/ISA+ 79C961", /* 79C961 PCnet/ISA+, Plug-n-Play. */
230 LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
231 LANCE_HAS_MISSED_FRAME},
232 {0x2420, "PCnet/PCI 79C970", /* 79C970 or 79C974 PCnet-SCSI, PCI. */
233 LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
234 LANCE_HAS_MISSED_FRAME},
235 /* Bug: the PCnet/PCI actually uses the PCnet/VLB ID number, so just call
236 it the PCnet32. */
237 {0x2430, "PCnet32", /* 79C965 PCnet for VL bus. */
238 LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
239 LANCE_HAS_MISSED_FRAME},
240 {0x0, "PCnet (unknown)",
241 LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
242 LANCE_HAS_MISSED_FRAME},
243 };
244
245 enum {OLD_LANCE = 0, PCNET_ISA=1, PCNET_ISAP=2, PCNET_PCI=3, PCNET_VLB=4, LANCE_UNKNOWN=5};
246
247 /* Non-zero only if the current card is a PCI with BIOS-set IRQ. */
248 static unsigned char pci_irq_line = 0;
249
250 /* Non-zero if lance_probe1() needs to allocate low-memory bounce buffers.
251 Assume yes until we know the memory size. */
252 static unsigned char lance_need_isa_bounce_buffers = 1;
253
254 static int lance_open(struct device *dev);
255 static void lance_init_ring(struct device *dev);
256 static int lance_start_xmit(struct sk_buff *skb, struct device *dev);
257 static int lance_rx(struct device *dev);
258 static void lance_interrupt(int irq, struct pt_regs *regs);
259 static int lance_close(struct device *dev);
260 static struct enet_statistics *lance_get_stats(struct device *dev);
261 #ifdef HAVE_MULTICAST
262 static void set_multicast_list(struct device *dev, int num_addrs, void *addrs);
263 #endif
264
265
266
267 /* This lance probe is unlike the other board probes in 1.0.*. The LANCE may
268 have to allocate a contiguous low-memory region for bounce buffers.
269 This requirement is satisfied by having the lance initialization occur
270 before the memory management system is started, and thus well before the
271 other probes. */
272
273 unsigned long lance_init(unsigned long mem_start, unsigned long mem_end)
/* ![[previous]](../icons/n_left.png)
![[next]](../icons/right.png)
![[first]](../icons/n_first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
274 {
275 int *port;
276
277 if (mem_end <= 16*1024*1024)
278 lance_need_isa_bounce_buffers = 0;
279
280 #if defined(CONFIG_PCI)
281 if (pcibios_present()) {
282 int pci_index;
283 printk("lance.c: PCI bios is present, checking for devices...\n");
284 for (pci_index = 0; pci_index < 8; pci_index++) {
285 unsigned char pci_bus, pci_device_fn;
286 unsigned int pci_ioaddr;
287 unsigned short pci_command;
288
289 if (pcibios_find_device (PCI_VENDOR_ID_AMD,
290 PCI_DEVICE_ID_AMD_LANCE, pci_index,
291 &pci_bus, &pci_device_fn) != 0)
292 break;
293 pcibios_read_config_byte(pci_bus, pci_device_fn,
294 PCI_INTERRUPT_LINE, &pci_irq_line);
295 pcibios_read_config_dword(pci_bus, pci_device_fn,
296 PCI_BASE_ADDRESS_0, &pci_ioaddr);
297 /* Remove I/O space marker in bit 0. */
298 pci_ioaddr &= ~3;
299 /* PCI Spec 2.1 states that it is either the driver or PCI card's
300 * responsibility to set the PCI Master Enable Bit if needed.
301 * (From Mark Stockton <marks@schooner.sys.hou.compaq.com>)
302 */
303 pcibios_read_config_word(pci_bus, pci_device_fn,
304 PCI_COMMAND, &pci_command);
305 if ( ! (pci_command & PCI_COMMAND_MASTER)) {
306 printk("PCI Master Bit has not been set. Setting...\n");
307 pci_command |= PCI_COMMAND_MASTER;
308 pcibios_write_config_word(pci_bus, pci_device_fn,
309 PCI_COMMAND, pci_command);
310 }
311 printk("Found PCnet/PCI at %#x, irq %d (mem_start is %#lx).\n",
312 pci_ioaddr, pci_irq_line, mem_start);
313 mem_start = lance_probe1(pci_ioaddr, mem_start);
314 pci_irq_line = 0;
315 }
316 }
317 #endif /* defined(CONFIG_PCI) */
318
319 for (port = lance_portlist; *port; port++) {
320 int ioaddr = *port;
321
322 if ( check_region(ioaddr, LANCE_TOTAL_SIZE) == 0) {
323 /* Detect "normal" 0x57 0x57 and the NI6510EB 0x52 0x44
324 signatures w/ minimal I/O reads */
325 char offset15, offset14 = inb(ioaddr + 14);
326
327 if ((offset14 == 0x52 || offset14 == 0x57) &&
328 ((offset15 = inb(ioaddr + 15)) == 0x57 || offset15 == 0x44))
329 mem_start = lance_probe1(ioaddr, mem_start);
330 }
331 }
332
333 return mem_start;
334 }
335
336 unsigned long lance_probe1(int ioaddr, unsigned long mem_start)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
337 {
338 struct device *dev;
339 struct lance_private *lp;
340 short dma_channels; /* Mark spuriously-busy DMA channels */
341 int i, reset_val, lance_version;
342 const char *chipname;
343 /* Flags for specific chips or boards. */
344 unsigned char hpJ2405A = 0; /* HP ISA adaptor */
345 int hp_builtin = 0; /* HP on-board ethernet. */
346 static int did_version = 0; /* Already printed version info. */
347
348 /* First we look for special cases.
349 Check for HP's on-board ethernet by looking for 'HP' in the BIOS.
350 There are two HP versions, check the BIOS for the configuration port.
351 This method provided by L. Julliard, Laurent_Julliard@grenoble.hp.com.
352 */
353 if ( *((unsigned short *) 0x000f0102) == 0x5048) {
354 static const short ioaddr_table[] = { 0x300, 0x320, 0x340, 0x360};
355 int hp_port = ( *((unsigned char *) 0x000f00f1) & 1) ? 0x499 : 0x99;
356 /* We can have boards other than the built-in! Verify this is on-board. */
357 if ((inb(hp_port) & 0xc0) == 0x80
358 && ioaddr_table[inb(hp_port) & 3] == ioaddr)
359 hp_builtin = hp_port;
360 }
361 /* We also recognize the HP Vectra on-board here, but check below. */
362 hpJ2405A = (inb(ioaddr) == 0x08 && inb(ioaddr+1) == 0x00
363 && inb(ioaddr+2) == 0x09);
364
365 /* Reset the LANCE. */
366 reset_val = inw(ioaddr+LANCE_RESET); /* Reset the LANCE */
367
368 /* The Un-Reset needed is only needed for the real NE2100, and will
369 confuse the HP board. */
370 if (!hpJ2405A)
371 outw(reset_val, ioaddr+LANCE_RESET);
372
373 outw(0x0000, ioaddr+LANCE_ADDR); /* Switch to window 0 */
374 if (inw(ioaddr+LANCE_DATA) != 0x0004)
375 return mem_start;
376
377 /* Get the version of the chip. */
378 outw(88, ioaddr+LANCE_ADDR);
379 if (inw(ioaddr+LANCE_ADDR) != 88) {
380 lance_version = 0;
381 } else { /* Good, it's a newer chip. */
382 int chip_version = inw(ioaddr+LANCE_DATA);
383 outw(89, ioaddr+LANCE_ADDR);
384 chip_version |= inw(ioaddr+LANCE_DATA) << 16;
385 if (lance_debug > 2)
386 printk(" LANCE chip version is %#x.\n", chip_version);
387 if ((chip_version & 0xfff) != 0x003)
388 return mem_start;
389 chip_version = (chip_version >> 12) & 0xffff;
390 for (lance_version = 1; chip_table[lance_version].id_number; lance_version++) {
391 if (chip_table[lance_version].id_number == chip_version)
392 break;
393 }
394 }
395
396 dev = init_etherdev(0, 7
397 + ((sizeof(struct lance_private) + 7) & ~7)
398 + PKT_BUF_SZ*RX_RING_SIZE
399 + (lance_need_isa_bounce_buffers
400 ? PKT_BUF_SZ*TX_RING_SIZE
401 : 0),
402 &mem_start);
403
404 chipname = chip_table[lance_version].name;
405 printk("%s: %s at %#3x,", dev->name, chipname, ioaddr);
406
407 /* There is a 16 byte station address PROM at the base address.
408 The first six bytes are the station address. */
409 for (i = 0; i < 6; i++)
410 printk(" %2.2x", dev->dev_addr[i] = inb(ioaddr + i));
411
412 dev->base_addr = ioaddr;
413 request_region(ioaddr, LANCE_TOTAL_SIZE, chip_table[lance_version].name);
414
415 /* Make certain the data structures used by the LANCE are aligned. */
416 dev->priv = (void *)(((int)dev->priv + 7) & ~7);
417 lp = (struct lance_private *)dev->priv;
418 lp->name = chipname;
419 lp->rx_buffs = (long)lp + ((sizeof(struct lance_private) + 7) & ~7);
420 lp->tx_bounce_buffs = (char (*)[PKT_BUF_SZ])
421 (lp->rx_buffs + PKT_BUF_SZ*RX_RING_SIZE);
422
423 lp->chip_version = lance_version;
424
425 lp->init_block.mode = 0x0003; /* Disable Rx and Tx. */
426 for (i = 0; i < 6; i++)
427 lp->init_block.phys_addr[i] = dev->dev_addr[i];
428 lp->init_block.filter[0] = 0x00000000;
429 lp->init_block.filter[1] = 0x00000000;
430 lp->init_block.rx_ring = (int)lp->rx_ring | RX_RING_LEN_BITS;
431 lp->init_block.tx_ring = (int)lp->tx_ring | TX_RING_LEN_BITS;
432
433 outw(0x0001, ioaddr+LANCE_ADDR);
434 inw(ioaddr+LANCE_ADDR);
435 outw((short) (int) &lp->init_block, ioaddr+LANCE_DATA);
436 outw(0x0002, ioaddr+LANCE_ADDR);
437 inw(ioaddr+LANCE_ADDR);
438 outw(((int)&lp->init_block) >> 16, ioaddr+LANCE_DATA);
439 outw(0x0000, ioaddr+LANCE_ADDR);
440 inw(ioaddr+LANCE_ADDR);
441
442 if (pci_irq_line) {
443 dev->dma = 4; /* Native bus-master, no DMA channel needed. */
444 dev->irq = pci_irq_line;
445 } else if (hp_builtin) {
446 static const char dma_tbl[4] = {3, 5, 6, 0};
447 static const char irq_tbl[4] = {3, 4, 5, 9};
448 unsigned char port_val = inb(hp_builtin);
449 dev->dma = dma_tbl[(port_val >> 4) & 3];
450 dev->irq = irq_tbl[(port_val >> 2) & 3];
451 printk(" HP Vectra IRQ %d DMA %d.\n", dev->irq, dev->dma);
452 } else if (hpJ2405A) {
453 static const char dma_tbl[4] = {3, 5, 6, 7};
454 static const char irq_tbl[8] = {3, 4, 5, 9, 10, 11, 12, 15};
455 short reset_val = inw(ioaddr+LANCE_RESET);
456 dev->dma = dma_tbl[(reset_val >> 2) & 3];
457 dev->irq = irq_tbl[(reset_val >> 4) & 7];
458 printk(" HP J2405A IRQ %d DMA %d.\n", dev->irq, dev->dma);
459 } else if (lance_version == PCNET_ISAP) { /* The plug-n-play version. */
460 short bus_info;
461 outw(8, ioaddr+LANCE_ADDR);
462 bus_info = inw(ioaddr+LANCE_BUS_IF);
463 dev->dma = bus_info & 0x07;
464 dev->irq = (bus_info >> 4) & 0x0F;
465 } else {
466 /* The DMA channel may be passed in PARAM1. */
467 if (dev->mem_start & 0x07)
468 dev->dma = dev->mem_start & 0x07;
469 }
470
471 if (dev->dma == 0) {
472 /* Read the DMA channel status register, so that we can avoid
473 stuck DMA channels in the DMA detection below. */
474 dma_channels = ((inb(DMA1_STAT_REG) >> 4) & 0x0f) |
475 (inb(DMA2_STAT_REG) & 0xf0);
476 }
477 if (dev->irq >= 2)
478 printk(" assigned IRQ %d", dev->irq);
479 else {
480 /* To auto-IRQ we enable the initialization-done and DMA error
481 interrupts. For ISA boards we get a DMA error, but VLB and PCI
482 boards will work. */
483 autoirq_setup(0);
484
485 /* Trigger an initialization just for the interrupt. */
486 outw(0x0041, ioaddr+LANCE_DATA);
487
488 dev->irq = autoirq_report(1);
489 if (dev->irq)
490 printk(", probed IRQ %d", dev->irq);
491 else {
492 printk(", failed to detect IRQ line.\n");
493 return mem_start;
494 }
495
496 /* Check for the initialization done bit, 0x0100, which means
497 that we don't need a DMA channel. */
498 if (inw(ioaddr+LANCE_DATA) & 0x0100)
499 dev->dma = 4;
500 }
501
502 if (dev->dma == 4) {
503 printk(", no DMA needed.\n");
504 } else if (dev->dma) {
505 if (request_dma(dev->dma, chipname)) {
506 printk("DMA %d allocation failed.\n", dev->dma);
507 return mem_start;
508 } else
509 printk(", assigned DMA %d.\n", dev->dma);
510 } else { /* OK, we have to auto-DMA. */
511 for (i = 0; i < 4; i++) {
512 static const char dmas[] = { 5, 6, 7, 3 };
513 int dma = dmas[i];
514 int boguscnt;
515
516 /* Don't enable a permanently busy DMA channel, or the machine
517 will hang. */
518 if (test_bit(dma, &dma_channels))
519 continue;
520 outw(0x7f04, ioaddr+LANCE_DATA); /* Clear the memory error bits. */
521 if (request_dma(dma, chipname))
522 continue;
523 set_dma_mode(dma, DMA_MODE_CASCADE);
524 enable_dma(dma);
525
526 /* Trigger an initialization. */
527 outw(0x0001, ioaddr+LANCE_DATA);
528 for (boguscnt = 100; boguscnt > 0; --boguscnt)
529 if (inw(ioaddr+LANCE_DATA) & 0x0900)
530 break;
531 if (inw(ioaddr+LANCE_DATA) & 0x0100) {
532 dev->dma = dma;
533 printk(", DMA %d.\n", dev->dma);
534 break;
535 } else {
536 disable_dma(dma);
537 free_dma(dma);
538 }
539 }
540 if (i == 4) { /* Failure: bail. */
541 printk("DMA detection failed.\n");
542 return mem_start;
543 }
544 }
545
546 if (chip_table[lp->chip_version].flags & LANCE_ENABLE_AUTOSELECT) {
547 /* Turn on auto-select of media (10baseT or BNC) so that the user
548 can watch the LEDs even if the board isn't opened. */
549 outw(0x0002, ioaddr+LANCE_ADDR);
550 outw(0x0002, ioaddr+LANCE_BUS_IF);
551 }
552
553 if (lance_debug > 0 && did_version++ == 0)
554 printk(version);
555
556 /* The LANCE-specific entries in the device structure. */
557 dev->open = &lance_open;
558 dev->hard_start_xmit = &lance_start_xmit;
559 dev->stop = &lance_close;
560 dev->get_stats = &lance_get_stats;
561 dev->set_multicast_list = &set_multicast_list;
562
563 return mem_start;
564 }
565
566
567 static int
568 lance_open(struct device *dev)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
569 {
570 struct lance_private *lp = (struct lance_private *)dev->priv;
571 int ioaddr = dev->base_addr;
572 int i;
573
574 if (dev->irq == 0 ||
575 request_irq(dev->irq, &lance_interrupt, 0, lp->name)) {
576 return -EAGAIN;
577 }
578
579 /* We used to allocate DMA here, but that was silly.
580 DMA lines can't be shared! We now permanently snarf them. */
581
582 irq2dev_map[dev->irq] = dev;
583
584 /* Reset the LANCE */
585 inw(ioaddr+LANCE_RESET);
586
587 /* The DMA controller is used as a no-operation slave, "cascade mode". */
588 if (dev->dma != 4) {
589 enable_dma(dev->dma);
590 set_dma_mode(dev->dma, DMA_MODE_CASCADE);
591 }
592
593 /* Un-Reset the LANCE, needed only for the NE2100. */
594 if (chip_table[lp->chip_version].flags & LANCE_MUST_UNRESET)
595 outw(0, ioaddr+LANCE_RESET);
596
597 if (chip_table[lp->chip_version].flags & LANCE_ENABLE_AUTOSELECT) {
598 /* This is 79C960-specific: Turn on auto-select of media (AUI, BNC). */
599 outw(0x0002, ioaddr+LANCE_ADDR);
600 outw(0x0002, ioaddr+LANCE_BUS_IF);
601 }
602
603 if (lance_debug > 1)
604 printk("%s: lance_open() irq %d dma %d tx/rx rings %#x/%#x init %#x.\n",
605 dev->name, dev->irq, dev->dma, (int) lp->tx_ring, (int) lp->rx_ring,
606 (int) &lp->init_block);
607
608 lance_init_ring(dev);
609 /* Re-initialize the LANCE, and start it when done. */
610 outw(0x0001, ioaddr+LANCE_ADDR);
611 outw((short) (int) &lp->init_block, ioaddr+LANCE_DATA);
612 outw(0x0002, ioaddr+LANCE_ADDR);
613 outw(((int)&lp->init_block) >> 16, ioaddr+LANCE_DATA);
614
615 outw(0x0004, ioaddr+LANCE_ADDR);
616 outw(0x0915, ioaddr+LANCE_DATA);
617
618 outw(0x0000, ioaddr+LANCE_ADDR);
619 outw(0x0001, ioaddr+LANCE_DATA);
620
621 dev->tbusy = 0;
622 dev->interrupt = 0;
623 dev->start = 1;
624 i = 0;
625 while (i++ < 100)
626 if (inw(ioaddr+LANCE_DATA) & 0x0100)
627 break;
628 /*
629 * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
630 * reports that doing so triggers a bug in the '974.
631 */
632 outw(0x0042, ioaddr+LANCE_DATA);
633
634 if (lance_debug > 2)
635 printk("%s: LANCE open after %d ticks, init block %#x csr0 %4.4x.\n",
636 dev->name, i, (int) &lp->init_block, inw(ioaddr+LANCE_DATA));
637
638 return 0; /* Always succeed */
639 }
640
641 /* The LANCE has been halted for one reason or another (busmaster memory
642 arbitration error, Tx FIFO underflow, driver stopped it to reconfigure,
643 etc.). Modern LANCE variants always reload their ring-buffer
644 configuration when restarted, so we must reinitialize our ring
645 context before restarting. As part of this reinitialization,
646 find all packets still on the Tx ring and pretend that they had been
647 sent (in effect, drop the packets on the floor) - the higher-level
648 protocols will time out and retransmit. It'd be better to shuffle
649 these skbs to a temp list and then actually re-Tx them after
650 restarting the chip, but I'm too lazy to do so right now. dplatt@3do.com
651 */
652
653 static void
654 lance_purge_tx_ring(struct device *dev)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
655 {
656 struct lance_private *lp = (struct lance_private *)dev->priv;
657 int i;
658
659 for (i = 0; i < TX_RING_SIZE; i++) {
660 if (lp->tx_skbuff[i]) {
661 dev_kfree_skb(lp->tx_skbuff[i],FREE_WRITE);
662 lp->tx_skbuff[i] = NULL;
663 }
664 }
665 }
666
667
668 /* Initialize the LANCE Rx and Tx rings. */
669 static void
670 lance_init_ring(struct device *dev)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
671 {
672 struct lance_private *lp = (struct lance_private *)dev->priv;
673 int i;
674
675 lp->lock = 0, lp->tx_full = 0;
676 lp->cur_rx = lp->cur_tx = 0;
677 lp->dirty_rx = lp->dirty_tx = 0;
678
679 for (i = 0; i < RX_RING_SIZE; i++) {
680 lp->rx_ring[i].base = (lp->rx_buffs + i*PKT_BUF_SZ) | 0x80000000;
681 lp->rx_ring[i].buf_length = -PKT_BUF_SZ;
682 }
683 /* The Tx buffer address is filled in as needed, but we do need to clear
684 the upper ownership bit. */
685 for (i = 0; i < TX_RING_SIZE; i++) {
686 lp->tx_ring[i].base = 0;
687 }
688
689 lp->init_block.mode = 0x0000;
690 for (i = 0; i < 6; i++)
691 lp->init_block.phys_addr[i] = dev->dev_addr[i];
692 lp->init_block.filter[0] = 0x00000000;
693 lp->init_block.filter[1] = 0x00000000;
694 lp->init_block.rx_ring = (int)lp->rx_ring | RX_RING_LEN_BITS;
695 lp->init_block.tx_ring = (int)lp->tx_ring | TX_RING_LEN_BITS;
696 }
697
698 static void
699 lance_restart(struct device *dev, unsigned int csr0_bits, int must_reinit)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
700 {
701 struct lance_private *lp = (struct lance_private *)dev->priv;
702
703 if (must_reinit ||
704 (chip_table[lp->chip_version].flags & LANCE_MUST_REINIT_RING)) {
705 lance_purge_tx_ring(dev);
706 lance_init_ring(dev);
707 }
708 outw(0x0000, dev->base_addr + LANCE_ADDR);
709 outw(csr0_bits, dev->base_addr + LANCE_DATA);
710 }
711
712 static int
713 lance_start_xmit(struct sk_buff *skb, struct device *dev)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
714 {
715 struct lance_private *lp = (struct lance_private *)dev->priv;
716 int ioaddr = dev->base_addr;
717 int entry;
718 unsigned long flags;
719
720 /* Transmitter timeout, serious problems. */
721 if (dev->tbusy) {
722 int tickssofar = jiffies - dev->trans_start;
723 if (tickssofar < 20)
724 return 1;
725 outw(0, ioaddr+LANCE_ADDR);
726 printk("%s: transmit timed out, status %4.4x, resetting.\n",
727 dev->name, inw(ioaddr+LANCE_DATA));
728 outw(0x0004, ioaddr+LANCE_DATA);
729 lp->stats.tx_errors++;
730 #ifndef final_version
731 {
732 int i;
733 printk(" Ring data dump: dirty_tx %d cur_tx %d%s cur_rx %d.",
734 lp->dirty_tx, lp->cur_tx, lp->tx_full ? " (full)" : "",
735 lp->cur_rx);
736 for (i = 0 ; i < RX_RING_SIZE; i++)
737 printk("%s %08x %04x %04x", i & 0x3 ? "" : "\n ",
738 lp->rx_ring[i].base, -lp->rx_ring[i].buf_length,
739 lp->rx_ring[i].msg_length);
740 for (i = 0 ; i < TX_RING_SIZE; i++)
741 printk("%s %08x %04x %04x", i & 0x3 ? "" : "\n ",
742 lp->tx_ring[i].base, -lp->tx_ring[i].length,
743 lp->tx_ring[i].misc);
744 printk("\n");
745 }
746 #endif
747 lance_restart(dev, 0x0043, 1);
748
749 dev->tbusy=0;
750 dev->trans_start = jiffies;
751
752 return 0;
753 }
754
755 if (skb == NULL) {
756 dev_tint(dev);
757 return 0;
758 }
759
760 if (skb->len <= 0)
761 return 0;
762
763 if (lance_debug > 3) {
764 outw(0x0000, ioaddr+LANCE_ADDR);
765 printk("%s: lance_start_xmit() called, csr0 %4.4x.\n", dev->name,
766 inw(ioaddr+LANCE_DATA));
767 outw(0x0000, ioaddr+LANCE_DATA);
768 }
769
770 /* Block a timer-based transmit from overlapping. This could better be
771 done with atomic_swap(1, dev->tbusy), but set_bit() works as well. */
772 if (set_bit(0, (void*)&dev->tbusy) != 0) {
773 printk("%s: Transmitter access conflict.\n", dev->name);
774 return 1;
775 }
776
777 if (set_bit(0, (void*)&lp->lock) != 0) {
778 if (lance_debug > 0)
779 printk("%s: tx queue lock!.\n", dev->name);
780 /* don't clear dev->tbusy flag. */
781 return 1;
782 }
783
784 /* Fill in a Tx ring entry */
785
786 /* Mask to ring buffer boundary. */
787 entry = lp->cur_tx & TX_RING_MOD_MASK;
788
789 /* Caution: the write order is important here, set the base address
790 with the "ownership" bits last. */
791
792 /* The old LANCE chips doesn't automatically pad buffers to min. size. */
793 if (chip_table[lp->chip_version].flags & LANCE_MUST_PAD) {
794 lp->tx_ring[entry].length =
795 -(ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN);
796 } else
797 lp->tx_ring[entry].length = -skb->len;
798
799 lp->tx_ring[entry].misc = 0x0000;
800
801 /* If any part of this buffer is >16M we must copy it to a low-memory
802 buffer. */
803 if ((int)(skb->data) + skb->len > 0x01000000) {
804 if (lance_debug > 5)
805 printk("%s: bouncing a high-memory packet (%#x).\n",
806 dev->name, (int)(skb->data));
807 memcpy(&lp->tx_bounce_buffs[entry], skb->data, skb->len);
808 lp->tx_ring[entry].base =
809 (int)(lp->tx_bounce_buffs + entry) | 0x83000000;
810 dev_kfree_skb (skb, FREE_WRITE);
811 } else {
812 lp->tx_skbuff[entry] = skb;
813 lp->tx_ring[entry].base = (int)(skb->data) | 0x83000000;
814 }
815 lp->cur_tx++;
816
817 /* Trigger an immediate send poll. */
818 outw(0x0000, ioaddr+LANCE_ADDR);
819 outw(0x0048, ioaddr+LANCE_DATA);
820
821 dev->trans_start = jiffies;
822
823 save_flags(flags);
824 cli();
825 lp->lock = 0;
826 if (lp->tx_ring[(entry+1) & TX_RING_MOD_MASK].base == 0)
827 dev->tbusy=0;
828 else
829 lp->tx_full = 1;
830 restore_flags(flags);
831
832 return 0;
833 }
834
835 /* The LANCE interrupt handler. */
836 static void
837 lance_interrupt(int irq, struct pt_regs * regs)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
838 {
839 struct device *dev = (struct device *)(irq2dev_map[irq]);
840 struct lance_private *lp;
841 int csr0, ioaddr, boguscnt=10;
842 int must_restart;
843
844 if (dev == NULL) {
845 printk ("lance_interrupt(): irq %d for unknown device.\n", irq);
846 return;
847 }
848
849 ioaddr = dev->base_addr;
850 lp = (struct lance_private *)dev->priv;
851 if (dev->interrupt)
852 printk("%s: Re-entering the interrupt handler.\n", dev->name);
853
854 dev->interrupt = 1;
855
856 outw(0x00, dev->base_addr + LANCE_ADDR);
857 while ((csr0 = inw(dev->base_addr + LANCE_DATA)) & 0x8600
858 && --boguscnt >= 0) {
859 /* Acknowledge all of the current interrupt sources ASAP. */
860 outw(csr0 & ~0x004f, dev->base_addr + LANCE_DATA);
861
862 must_restart = 0;
863
864 if (lance_debug > 5)
865 printk("%s: interrupt csr0=%#2.2x new csr=%#2.2x.\n",
866 dev->name, csr0, inw(dev->base_addr + LANCE_DATA));
867
868 if (csr0 & 0x0400) /* Rx interrupt */
869 lance_rx(dev);
870
871 if (csr0 & 0x0200) { /* Tx-done interrupt */
872 int dirty_tx = lp->dirty_tx;
873
874 while (dirty_tx < lp->cur_tx) {
875 int entry = dirty_tx & TX_RING_MOD_MASK;
876 int status = lp->tx_ring[entry].base;
877
878 if (status < 0)
879 break; /* It still hasn't been Txed */
880
881 lp->tx_ring[entry].base = 0;
882
883 if (status & 0x40000000) {
884 /* There was an major error, log it. */
885 int err_status = lp->tx_ring[entry].misc;
886 lp->stats.tx_errors++;
887 if (err_status & 0x0400) lp->stats.tx_aborted_errors++;
888 if (err_status & 0x0800) lp->stats.tx_carrier_errors++;
889 if (err_status & 0x1000) lp->stats.tx_window_errors++;
890 if (err_status & 0x4000) {
891 /* Ackk! On FIFO errors the Tx unit is turned off! */
892 lp->stats.tx_fifo_errors++;
893 /* Remove this verbosity later! */
894 printk("%s: Tx FIFO error! Status %4.4x.\n",
895 dev->name, csr0);
896 /* Restart the chip. */
897 must_restart = 1;
898 }
899 } else {
900 if (status & 0x18000000)
901 lp->stats.collisions++;
902 lp->stats.tx_packets++;
903 }
904
905 /* We must free the original skb if it's not a data-only copy
906 in the bounce buffer. */
907 if (lp->tx_skbuff[entry]) {
908 dev_kfree_skb(lp->tx_skbuff[entry],FREE_WRITE);
909 lp->tx_skbuff[entry] = 0;
910 }
911 dirty_tx++;
912 }
913
914 #ifndef final_version
915 if (lp->cur_tx - dirty_tx >= TX_RING_SIZE) {
916 printk("out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
917 dirty_tx, lp->cur_tx, lp->tx_full);
918 dirty_tx += TX_RING_SIZE;
919 }
920 #endif
921
922 if (lp->tx_full && dev->tbusy
923 && dirty_tx > lp->cur_tx - TX_RING_SIZE + 2) {
924 /* The ring is no longer full, clear tbusy. */
925 lp->tx_full = 0;
926 dev->tbusy = 0;
927 mark_bh(NET_BH);
928 }
929
930 lp->dirty_tx = dirty_tx;
931 }
932
933 /* Log misc errors. */
934 if (csr0 & 0x4000) lp->stats.tx_errors++; /* Tx babble. */
935 if (csr0 & 0x1000) lp->stats.rx_errors++; /* Missed a Rx frame. */
936 if (csr0 & 0x0800) {
937 printk("%s: Bus master arbitration failure, status %4.4x.\n",
938 dev->name, csr0);
939 /* Restart the chip. */
940 must_restart = 1;
941 }
942
943 if (must_restart) {
944 /* stop the chip to clear the error condition, then restart */
945 outw(0x0000, dev->base_addr + LANCE_ADDR);
946 outw(0x0004, dev->base_addr + LANCE_DATA);
947 lance_restart(dev, 0x0002, 0);
948 }
949 }
950
951 /* Clear any other interrupt, and set interrupt enable. */
952 outw(0x0000, dev->base_addr + LANCE_ADDR);
953 outw(0x7940, dev->base_addr + LANCE_DATA);
954
955 if (lance_debug > 4)
956 printk("%s: exiting interrupt, csr%d=%#4.4x.\n",
957 dev->name, inw(ioaddr + LANCE_ADDR),
958 inw(dev->base_addr + LANCE_DATA));
959
960 dev->interrupt = 0;
961 return;
962 }
963
964 static int
965 lance_rx(struct device *dev)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
966 {
967 struct lance_private *lp = (struct lance_private *)dev->priv;
968 int entry = lp->cur_rx & RX_RING_MOD_MASK;
969 int i;
970
971 /* If we own the next entry, it's a new packet. Send it up. */
972 while (lp->rx_ring[entry].base >= 0) {
973 int status = lp->rx_ring[entry].base >> 24;
974
975 if (status != 0x03) { /* There was an error. */
976 /* There is a tricky error noted by John Murphy,
977 <murf@perftech.com> to Russ Nelson: Even with full-sized
978 buffers it's possible for a jabber packet to use two
979 buffers, with only the last correctly noting the error. */
980 if (status & 0x01) /* Only count a general error at the */
981 lp->stats.rx_errors++; /* end of a packet.*/
982 if (status & 0x20) lp->stats.rx_frame_errors++;
983 if (status & 0x10) lp->stats.rx_over_errors++;
984 if (status & 0x08) lp->stats.rx_crc_errors++;
985 if (status & 0x04) lp->stats.rx_fifo_errors++;
986 lp->rx_ring[entry].base &= 0x03ffffff;
987 } else {
988 /* Malloc up new buffer, compatible with net-2e. */
989 short pkt_len = (lp->rx_ring[entry].msg_length & 0xfff)-4;
990 struct sk_buff *skb;
991
992 skb = dev_alloc_skb(pkt_len+2);
993 if (skb == NULL) {
994 printk("%s: Memory squeeze, deferring packet.\n", dev->name);
995 for (i=0; i < RX_RING_SIZE; i++)
996 if (lp->rx_ring[(entry+i) & RX_RING_MOD_MASK].base < 0)
997 break;
998
999 if (i > RX_RING_SIZE -2) {
1000 lp->stats.rx_dropped++;
1001 lp->rx_ring[entry].base |= 0x80000000;
1002 lp->cur_rx++;
1003 }
1004 break;
1005 }
1006 skb->dev = dev;
1007 skb_reserve(skb,2); /* 16 byte align */
1008 skb_put(skb,pkt_len); /* Make room */
1009 eth_copy_and_sum(skb,
1010 (unsigned char *)(lp->rx_ring[entry].base & 0x00ffffff),
1011 pkt_len,0);
1012 skb->protocol=eth_type_trans(skb,dev);
1013 netif_rx(skb);
1014 lp->stats.rx_packets++;
1015 }
1016
1017 /* The docs say that the buffer length isn't touched, but Andrew Boyd
1018 of QNX reports that some revs of the 79C965 clear it. */
1019 lp->rx_ring[entry].buf_length = -PKT_BUF_SZ;
1020 lp->rx_ring[entry].base |= 0x80000000;
1021 entry = (++lp->cur_rx) & RX_RING_MOD_MASK;
1022 }
1023
1024 /* We should check that at least two ring entries are free. If not,
1025 we should free one and mark stats->rx_dropped++. */
1026
1027 return 0;
1028 }
1029
1030 static int
1031 lance_close(struct device *dev)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
1032 {
1033 int ioaddr = dev->base_addr;
1034 struct lance_private *lp = (struct lance_private *)dev->priv;
1035
1036 dev->start = 0;
1037 dev->tbusy = 1;
1038
1039 if (chip_table[lp->chip_version].flags & LANCE_HAS_MISSED_FRAME) {
1040 outw(112, ioaddr+LANCE_ADDR);
1041 lp->stats.rx_missed_errors = inw(ioaddr+LANCE_DATA);
1042 }
1043 outw(0, ioaddr+LANCE_ADDR);
1044
1045 if (lance_debug > 1)
1046 printk("%s: Shutting down ethercard, status was %2.2x.\n",
1047 dev->name, inw(ioaddr+LANCE_DATA));
1048
1049 /* We stop the LANCE here -- it occasionally polls
1050 memory if we don't. */
1051 outw(0x0004, ioaddr+LANCE_DATA);
1052
1053 if (dev->dma != 4)
1054 disable_dma(dev->dma);
1055
1056 free_irq(dev->irq);
1057
1058 irq2dev_map[dev->irq] = 0;
1059
1060 return 0;
1061 }
1062
1063 static struct enet_statistics *
1064 lance_get_stats(struct device *dev)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
1065 {
1066 struct lance_private *lp = (struct lance_private *)dev->priv;
1067 short ioaddr = dev->base_addr;
1068 short saved_addr;
1069 unsigned long flags;
1070
1071 if (chip_table[lp->chip_version].flags & LANCE_HAS_MISSED_FRAME) {
1072 save_flags(flags);
1073 cli();
1074 saved_addr = inw(ioaddr+LANCE_ADDR);
1075 outw(112, ioaddr+LANCE_ADDR);
1076 lp->stats.rx_missed_errors = inw(ioaddr+LANCE_DATA);
1077 outw(saved_addr, ioaddr+LANCE_ADDR);
1078 restore_flags(flags);
1079 }
1080
1081 return &lp->stats;
1082 }
1083
1084 /* Set or clear the multicast filter for this adaptor.
1085 num_addrs == -1 Promiscuous mode, receive all packets
1086 num_addrs == 0 Normal mode, clear multicast list
1087 num_addrs > 0 Multicast mode, receive normal and MC packets, and do
1088 best-effort filtering.
1089 */
1090 static void
1091 set_multicast_list(struct device *dev, int num_addrs, void *addrs)
/* ![[previous]](../icons/left.png)
![[next]](../icons/n_right.png)
![[first]](../icons/first.png)
![[last]](../icons/n_last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
1092 {
1093 short ioaddr = dev->base_addr;
1094
1095 outw(0, ioaddr+LANCE_ADDR);
1096 outw(0x0004, ioaddr+LANCE_DATA); /* Temporarily stop the lance. */
1097
1098 if (num_addrs >= 0) {
1099 short multicast_table[4];
1100 int i;
1101 /* We don't use the multicast table, but rely on upper-layer filtering. */
1102 memset(multicast_table, (num_addrs == 0) ? 0 : -1, sizeof(multicast_table));
1103 for (i = 0; i < 4; i++) {
1104 outw(8 + i, ioaddr+LANCE_ADDR);
1105 outw(multicast_table[i], ioaddr+LANCE_DATA);
1106 }
1107 outw(15, ioaddr+LANCE_ADDR);
1108 outw(0x0000, ioaddr+LANCE_DATA); /* Unset promiscuous mode */
1109 } else {
1110 /* Log any net taps. */
1111 printk("%s: Promiscuous mode enabled.\n", dev->name);
1112 outw(15, ioaddr+LANCE_ADDR);
1113 outw(0x8000, ioaddr+LANCE_DATA); /* Set promiscuous mode */
1114 }
1115
1116 lance_restart(dev, 0x0142, 0); /* Resume normal operation */
1117
1118 }
1119
1120
1121 /*
1122 * Local variables:
1123 * compile-command: "gcc -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -m486 -c lance.c"
1124 * c-indent-level: 4
1125 * tab-width: 4
1126 * End:
1127 */