1 /* lance.c: An AMD LANCE ethernet driver for linux. */
2 /*
3 Written 1993,1994,1995 by Donald Becker.
4
5 Copyright 1993 United States Government as represented by the
6 Director, National Security Agency.
7 This software may be used and distributed according to the terms
8 of the GNU Public License, incorporated herein by reference.
9
10 This driver is for the Allied Telesis AT1500 and HP J2405A, and should work
11 with most other LANCE-based bus-master (NE2100 clone) ethercards.
12
13 The author may be reached as becker@CESDIS.gsfc.nasa.gov, or C/O
14 Center of Excellence in Space Data and Information Sciences
15 Code 930.5, Goddard Space Flight Center, Greenbelt MD 20771
16 */
17
18 static const char *version = "lance.c:v1.08 4/10/95 dplatt@3do.com\n";
19
20 #include <linux/config.h>
21 #include <linux/kernel.h>
22 #include <linux/sched.h>
23 #include <linux/string.h>
24 #include <linux/ptrace.h>
25 #include <linux/errno.h>
26 #include <linux/ioport.h>
27 #include <linux/malloc.h>
28 #include <linux/interrupt.h>
29 #include <linux/pci.h>
30 #include <linux/bios32.h>
31 #include <asm/bitops.h>
32 #include <asm/io.h>
33 #include <asm/dma.h>
34
35 #include <linux/netdevice.h>
36 #include <linux/etherdevice.h>
37 #include <linux/skbuff.h>
38
39 static unsigned int lance_portlist[] = {0x300, 0x320, 0x340, 0x360, 0};
40 void lance_probe1(int ioaddr);
41
42 #ifdef HAVE_DEVLIST
43 struct netdev_entry lance_drv =
44 {"lance", lance_probe1, LANCE_TOTAL_SIZE, lance_portlist};
45 #endif
46
47 #ifdef LANCE_DEBUG
48 int lance_debug = LANCE_DEBUG;
49 #else
50 int lance_debug = 1;
51 #endif
52
53 /*
54 Theory of Operation
55
56 I. Board Compatibility
57
58 This device driver is designed for the AMD 79C960, the "PCnet-ISA
59 single-chip ethernet controller for ISA". This chip is used in a wide
60 variety of boards from vendors such as Allied Telesis, HP, Kingston,
61 and Boca. This driver is also intended to work with older AMD 7990
62 designs, such as the NE1500 and NE2100, and newer 79C961. For convenience,
63 I use the name LANCE to refer to all of the AMD chips, even though it properly
64 refers only to the original 7990.
65
66 II. Board-specific settings
67
68 The driver is designed to work the boards that use the faster
69 bus-master mode, rather than in shared memory mode. (Only older designs
70 have on-board buffer memory needed to support the slower shared memory mode.)
71
72 Most ISA boards have jumpered settings for the I/O base, IRQ line, and DMA
73 channel. This driver probes the likely base addresses:
74 {0x300, 0x320, 0x340, 0x360}.
75 After the board is found it generates a DMA-timeout interrupt and uses
76 autoIRQ to find the IRQ line. The DMA channel can be set with the low bits
77 of the otherwise-unused dev->mem_start value (aka PARAM1). If unset it is
78 probed for by enabling each free DMA channel in turn and checking if
79 initialization succeeds.
80
81 The HP-J2405A board is an exception: with this board it's easy to read the
82 EEPROM-set values for the base, IRQ, and DMA. (Of course you must already
83 _know_ the base address -- that field is for writing the EEPROM.)
84
85 III. Driver operation
86
87 IIIa. Ring buffers
88 The LANCE uses ring buffers of Tx and Rx descriptors. Each entry describes
89 the base and length of the data buffer, along with status bits. The length
90 of these buffers is set by LANCE_LOG_{RX,TX}_BUFFERS, which is log_2() of
91 the buffer length (rather than being directly the buffer length) for
92 implementation ease. The current values are 2 (Tx) and 4 (Rx), which leads to
93 ring sizes of 4 (Tx) and 16 (Rx). Increasing the number of ring entries
94 needlessly uses extra space and reduces the chance that an upper layer will
95 be able to reorder queued Tx packets based on priority. Decreasing the number
96 of entries makes it more difficult to achieve back-to-back packet transmission
97 and increases the chance that Rx ring will overflow. (Consider the worst case
98 of receiving back-to-back minimum-sized packets.)
99
100 The LANCE has the capability to "chain" both Rx and Tx buffers, but this driver
101 statically allocates full-sized (slightly oversized -- PKT_BUF_SZ) buffers to
102 avoid the administrative overhead. For the Rx side this avoids dynamically
103 allocating full-sized buffers "just in case", at the expense of a
104 memory-to-memory data copy for each packet received. For most systems this
105 is a good tradeoff: the Rx buffer will always be in low memory, the copy
106 is inexpensive, and it primes the cache for later packet processing. For Tx
107 the buffers are only used when needed as low-memory bounce buffers.
108
109 IIIB. 16M memory limitations.
110 For the ISA bus master mode all structures used directly by the LANCE,
111 the initialization block, Rx and Tx rings, and data buffers, must be
112 accessible from the ISA bus, i.e. in the lower 16M of real memory.
113 This is a problem for current Linux kernels on >16M machines. The network
114 devices are initialized after memory initialization, and the kernel doles out
115 memory from the top of memory downward. The current solution is to have a
116 special network initialization routine that's called before memory
117 initialization; this will eventually be generalized for all network devices.
118 As mentioned before, low-memory "bounce-buffers" are used when needed.
119
120 IIIC. Synchronization
121 The driver runs as two independent, single-threaded flows of control. One
122 is the send-packet routine, which enforces single-threaded use by the
123 dev->tbusy flag. The other thread is the interrupt handler, which is single
124 threaded by the hardware and other software.
125
126 The send packet thread has partial control over the Tx ring and 'dev->tbusy'
127 flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
128 queue slot is empty, it clears the tbusy flag when finished otherwise it sets
129 the 'lp->tx_full' flag.
130
131 The interrupt handler has exclusive control over the Rx ring and records stats
132 from the Tx ring. (The Tx-done interrupt can't be selectively turned off, so
133 we can't avoid the interrupt overhead by having the Tx routine reap the Tx
134 stats.) After reaping the stats, it marks the queue entry as empty by setting
135 the 'base' to zero. Iff the 'lp->tx_full' flag is set, it clears both the
136 tx_full and tbusy flags.
137
138 */
139
140 /* Set the number of Tx and Rx buffers, using Log_2(# buffers).
141 Reasonable default values are 4 Tx buffers, and 16 Rx buffers.
142 That translates to 2 (4 == 2^^2) and 4 (16 == 2^^4). */
143 #ifndef LANCE_LOG_TX_BUFFERS
144 #define LANCE_LOG_TX_BUFFERS 4
145 #define LANCE_LOG_RX_BUFFERS 4
146 #endif
147
148 #define TX_RING_SIZE (1 << (LANCE_LOG_TX_BUFFERS))
149 #define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
150 #define TX_RING_LEN_BITS ((LANCE_LOG_TX_BUFFERS) << 29)
151
152 #define RX_RING_SIZE (1 << (LANCE_LOG_RX_BUFFERS))
153 #define RX_RING_MOD_MASK (RX_RING_SIZE - 1)
154 #define RX_RING_LEN_BITS ((LANCE_LOG_RX_BUFFERS) << 29)
155
156 #define PKT_BUF_SZ 1544
157
158 /* Offsets from base I/O address. */
159 #define LANCE_DATA 0x10
160 #define LANCE_ADDR 0x12
161 #define LANCE_RESET 0x14
162 #define LANCE_BUS_IF 0x16
163 #define LANCE_TOTAL_SIZE 0x18
164
165 /* The LANCE Rx and Tx ring descriptors. */
166 struct lance_rx_head {
167 int base;
168 short buf_length; /* This length is 2s complement (negative)! */
169 short msg_length; /* This length is "normal". */
170 };
171
172 struct lance_tx_head {
173 int base;
174 short length; /* Length is 2s complement (negative)! */
175 short misc;
176 };
177
178 /* The LANCE initialization block, described in databook. */
179 struct lance_init_block {
180 unsigned short mode; /* Pre-set mode (reg. 15) */
181 unsigned char phys_addr[6]; /* Physical ethernet address */
182 unsigned filter[2]; /* Multicast filter (unused). */
183 /* Receive and transmit ring base, along with extra bits. */
184 unsigned rx_ring; /* Tx and Rx ring base pointers */
185 unsigned tx_ring;
186 };
187
188 struct lance_private {
189 /* The Tx and Rx ring entries must be aligned on 8-byte boundaries.
190 This is always true for kmalloc'ed memory */
191 struct lance_rx_head rx_ring[RX_RING_SIZE];
192 struct lance_tx_head tx_ring[TX_RING_SIZE];
193 struct lance_init_block init_block;
194 const char *name;
195 /* The saved address of a sent-in-place packet/buffer, for skfree(). */
196 struct sk_buff* tx_skbuff[TX_RING_SIZE];
197 long rx_buffs; /* Address of Rx and Tx buffers. */
198 /* Tx low-memory "bounce buffer" address. */
199 char (*tx_bounce_buffs)[PKT_BUF_SZ];
200 int cur_rx, cur_tx; /* The next free ring entry */
201 int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */
202 int dma;
203 struct enet_statistics stats;
204 unsigned char chip_version; /* See lance_chip_type. */
205 char tx_full;
206 char lock;
207 };
208
209 #define LANCE_MUST_PAD 0x00000001
210 #define LANCE_ENABLE_AUTOSELECT 0x00000002
211 #define LANCE_MUST_REINIT_RING 0x00000004
212 #define LANCE_MUST_UNRESET 0x00000008
213 #define LANCE_HAS_MISSED_FRAME 0x00000010
214
215 /* A mapping from the chip ID number to the part number and features.
216 These are from the datasheets -- in real life the '970 version
217 reportedly has the same ID as the '965. */
218 static struct lance_chip_type {
219 int id_number;
220 const char *name;
221 int flags;
222 } chip_table[] = {
223 {0x0000, "LANCE 7990", /* Ancient lance chip. */
224 LANCE_MUST_PAD + LANCE_MUST_UNRESET},
225 {0x0003, "PCnet/ISA 79C960", /* 79C960 PCnet/ISA. */
226 LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
227 LANCE_HAS_MISSED_FRAME},
228 {0x2260, "PCnet/ISA+ 79C961", /* 79C961 PCnet/ISA+, Plug-n-Play. */
229 LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
230 LANCE_HAS_MISSED_FRAME},
231 {0x2420, "PCnet/PCI 79C970", /* 79C970 or 79C974 PCnet-SCSI, PCI. */
232 LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
233 LANCE_HAS_MISSED_FRAME},
234 /* Bug: the PCnet/PCI actually uses the PCnet/VLB ID number, so just call
235 it the PCnet32. */
236 {0x2430, "PCnet32", /* 79C965 PCnet for VL bus. */
237 LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
238 LANCE_HAS_MISSED_FRAME},
239 {0x0, "PCnet (unknown)",
240 LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
241 LANCE_HAS_MISSED_FRAME},
242 };
243
244 enum {OLD_LANCE = 0, PCNET_ISA=1, PCNET_ISAP=2, PCNET_PCI=3, PCNET_VLB=4, LANCE_UNKNOWN=5};
245
246 /* Non-zero only if the current card is a PCI with BIOS-set IRQ. */
247 static unsigned char pci_irq_line = 0;
248
249 /* Non-zero if lance_probe1() needs to allocate low-memory bounce buffers.
250 Assume yes until we know the memory size. */
251 static unsigned char lance_need_isa_bounce_buffers = 1;
252
253 static int lance_open(struct device *dev);
254 static void lance_init_ring(struct device *dev);
255 static int lance_start_xmit(struct sk_buff *skb, struct device *dev);
256 static int lance_rx(struct device *dev);
257 static void lance_interrupt(int irq, struct pt_regs *regs);
258 static int lance_close(struct device *dev);
259 static struct enet_statistics *lance_get_stats(struct device *dev);
260 #ifdef HAVE_MULTICAST
261 static void set_multicast_list(struct device *dev, int num_addrs, void *addrs);
262 #endif
263
264
265
266 /* This lance probe is unlike the other board probes in 1.0.*. The LANCE may
267 have to allocate a contiguous low-memory region for bounce buffers.
268 This requirement is satisfied by having the lance initialization occur
269 before the memory management system is started, and thus well before the
270 other probes. */
271
272 int lance_init(void)
/* ![[previous]](../icons/n_left.png)
![[next]](../icons/right.png)
![[first]](../icons/n_first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
273 {
274 int *port;
275
276 if (high_memory <= 16*1024*1024)
277 lance_need_isa_bounce_buffers = 0;
278
279 #if defined(CONFIG_PCI)
280 if (pcibios_present()) {
281 int pci_index;
282 printk("lance.c: PCI bios is present, checking for devices...\n");
283 for (pci_index = 0; pci_index < 8; pci_index++) {
284 unsigned char pci_bus, pci_device_fn;
285 unsigned int pci_ioaddr;
286 unsigned short pci_command;
287
288 if (pcibios_find_device (PCI_VENDOR_ID_AMD,
289 PCI_DEVICE_ID_AMD_LANCE, pci_index,
290 &pci_bus, &pci_device_fn) != 0)
291 break;
292 pcibios_read_config_byte(pci_bus, pci_device_fn,
293 PCI_INTERRUPT_LINE, &pci_irq_line);
294 pcibios_read_config_dword(pci_bus, pci_device_fn,
295 PCI_BASE_ADDRESS_0, &pci_ioaddr);
296 /* Remove I/O space marker in bit 0. */
297 pci_ioaddr &= ~3;
298 /* PCI Spec 2.1 states that it is either the driver or PCI card's
299 * responsibility to set the PCI Master Enable Bit if needed.
300 * (From Mark Stockton <marks@schooner.sys.hou.compaq.com>)
301 */
302 pcibios_read_config_word(pci_bus, pci_device_fn,
303 PCI_COMMAND, &pci_command);
304 if ( ! (pci_command & PCI_COMMAND_MASTER)) {
305 printk("PCI Master Bit has not been set. Setting...\n");
306 pci_command |= PCI_COMMAND_MASTER;
307 pcibios_write_config_word(pci_bus, pci_device_fn,
308 PCI_COMMAND, pci_command);
309 }
310 printk("Found PCnet/PCI at %#x, irq %d.\n",
311 pci_ioaddr, pci_irq_line);
312 lance_probe1(pci_ioaddr);
313 pci_irq_line = 0;
314 }
315 }
316 #endif /* defined(CONFIG_PCI) */
317
318 for (port = lance_portlist; *port; port++) {
319 int ioaddr = *port;
320
321 if ( check_region(ioaddr, LANCE_TOTAL_SIZE) == 0) {
322 /* Detect "normal" 0x57 0x57 and the NI6510EB 0x52 0x44
323 signatures w/ minimal I/O reads */
324 char offset15, offset14 = inb(ioaddr + 14);
325
326 if ((offset14 == 0x52 || offset14 == 0x57) &&
327 ((offset15 = inb(ioaddr + 15)) == 0x57 || offset15 == 0x44))
328 lance_probe1(ioaddr);
329 }
330 }
331
332 return 0;
333 }
334
335 void lance_probe1(int ioaddr)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
336 {
337 struct device *dev;
338 struct lance_private *lp;
339 short dma_channels; /* Mark spuriously-busy DMA channels */
340 int i, reset_val, lance_version;
341 const char *chipname;
342 /* Flags for specific chips or boards. */
343 unsigned char hpJ2405A = 0; /* HP ISA adaptor */
344 int hp_builtin = 0; /* HP on-board ethernet. */
345 static int did_version = 0; /* Already printed version info. */
346
347 /* First we look for special cases.
348 Check for HP's on-board ethernet by looking for 'HP' in the BIOS.
349 There are two HP versions, check the BIOS for the configuration port.
350 This method provided by L. Julliard, Laurent_Julliard@grenoble.hp.com.
351 */
352 if ( *((unsigned short *) 0x000f0102) == 0x5048) {
353 static const short ioaddr_table[] = { 0x300, 0x320, 0x340, 0x360};
354 int hp_port = ( *((unsigned char *) 0x000f00f1) & 1) ? 0x499 : 0x99;
355 /* We can have boards other than the built-in! Verify this is on-board. */
356 if ((inb(hp_port) & 0xc0) == 0x80
357 && ioaddr_table[inb(hp_port) & 3] == ioaddr)
358 hp_builtin = hp_port;
359 }
360 /* We also recognize the HP Vectra on-board here, but check below. */
361 hpJ2405A = (inb(ioaddr) == 0x08 && inb(ioaddr+1) == 0x00
362 && inb(ioaddr+2) == 0x09);
363
364 /* Reset the LANCE. */
365 reset_val = inw(ioaddr+LANCE_RESET); /* Reset the LANCE */
366
367 /* The Un-Reset needed is only needed for the real NE2100, and will
368 confuse the HP board. */
369 if (!hpJ2405A)
370 outw(reset_val, ioaddr+LANCE_RESET);
371
372 outw(0x0000, ioaddr+LANCE_ADDR); /* Switch to window 0 */
373 if (inw(ioaddr+LANCE_DATA) != 0x0004)
374 return;
375
376 /* Get the version of the chip. */
377 outw(88, ioaddr+LANCE_ADDR);
378 if (inw(ioaddr+LANCE_ADDR) != 88) {
379 lance_version = 0;
380 } else { /* Good, it's a newer chip. */
381 int chip_version = inw(ioaddr+LANCE_DATA);
382 outw(89, ioaddr+LANCE_ADDR);
383 chip_version |= inw(ioaddr+LANCE_DATA) << 16;
384 if (lance_debug > 2)
385 printk(" LANCE chip version is %#x.\n", chip_version);
386 if ((chip_version & 0xfff) != 0x003)
387 return;
388 chip_version = (chip_version >> 12) & 0xffff;
389 for (lance_version = 1; chip_table[lance_version].id_number; lance_version++) {
390 if (chip_table[lance_version].id_number == chip_version)
391 break;
392 }
393 }
394
395 dev = init_etherdev(0, 0);
396 chipname = chip_table[lance_version].name;
397 printk("%s: %s at %#3x,", dev->name, chipname, ioaddr);
398
399 /* There is a 16 byte station address PROM at the base address.
400 The first six bytes are the station address. */
401 for (i = 0; i < 6; i++)
402 printk(" %2.2x", dev->dev_addr[i] = inb(ioaddr + i));
403
404 dev->base_addr = ioaddr;
405 request_region(ioaddr, LANCE_TOTAL_SIZE, chip_table[lance_version].name);
406
407 /* Make certain the data structures used by the LANCE are aligned and DMAble. */
408 lp = (struct lance_private *) kmalloc(sizeof(*lp), GFP_DMA | GFP_KERNEL);
409 dev->priv = lp;
410 lp->name = chipname;
411 lp->rx_buffs = (unsigned long) kmalloc(PKT_BUF_SZ*RX_RING_SIZE, GFP_DMA | GFP_KERNEL);
412 lp->tx_bounce_buffs = NULL;
413 if (lance_need_isa_bounce_buffers)
414 lp->tx_bounce_buffs = kmalloc(PKT_BUF_SZ*TX_RING_SIZE, GFP_DMA | GFP_KERNEL);
415
416 lp->chip_version = lance_version;
417
418 lp->init_block.mode = 0x0003; /* Disable Rx and Tx. */
419 for (i = 0; i < 6; i++)
420 lp->init_block.phys_addr[i] = dev->dev_addr[i];
421 lp->init_block.filter[0] = 0x00000000;
422 lp->init_block.filter[1] = 0x00000000;
423 lp->init_block.rx_ring = (int)lp->rx_ring | RX_RING_LEN_BITS;
424 lp->init_block.tx_ring = (int)lp->tx_ring | TX_RING_LEN_BITS;
425
426 outw(0x0001, ioaddr+LANCE_ADDR);
427 inw(ioaddr+LANCE_ADDR);
428 outw((short) (int) &lp->init_block, ioaddr+LANCE_DATA);
429 outw(0x0002, ioaddr+LANCE_ADDR);
430 inw(ioaddr+LANCE_ADDR);
431 outw(((int)&lp->init_block) >> 16, ioaddr+LANCE_DATA);
432 outw(0x0000, ioaddr+LANCE_ADDR);
433 inw(ioaddr+LANCE_ADDR);
434
435 if (pci_irq_line) {
436 dev->dma = 4; /* Native bus-master, no DMA channel needed. */
437 dev->irq = pci_irq_line;
438 } else if (hp_builtin) {
439 static const char dma_tbl[4] = {3, 5, 6, 0};
440 static const char irq_tbl[4] = {3, 4, 5, 9};
441 unsigned char port_val = inb(hp_builtin);
442 dev->dma = dma_tbl[(port_val >> 4) & 3];
443 dev->irq = irq_tbl[(port_val >> 2) & 3];
444 printk(" HP Vectra IRQ %d DMA %d.\n", dev->irq, dev->dma);
445 } else if (hpJ2405A) {
446 static const char dma_tbl[4] = {3, 5, 6, 7};
447 static const char irq_tbl[8] = {3, 4, 5, 9, 10, 11, 12, 15};
448 short reset_val = inw(ioaddr+LANCE_RESET);
449 dev->dma = dma_tbl[(reset_val >> 2) & 3];
450 dev->irq = irq_tbl[(reset_val >> 4) & 7];
451 printk(" HP J2405A IRQ %d DMA %d.\n", dev->irq, dev->dma);
452 } else if (lance_version == PCNET_ISAP) { /* The plug-n-play version. */
453 short bus_info;
454 outw(8, ioaddr+LANCE_ADDR);
455 bus_info = inw(ioaddr+LANCE_BUS_IF);
456 dev->dma = bus_info & 0x07;
457 dev->irq = (bus_info >> 4) & 0x0F;
458 } else {
459 /* The DMA channel may be passed in PARAM1. */
460 if (dev->mem_start & 0x07)
461 dev->dma = dev->mem_start & 0x07;
462 }
463
464 if (dev->dma == 0) {
465 /* Read the DMA channel status register, so that we can avoid
466 stuck DMA channels in the DMA detection below. */
467 dma_channels = ((inb(DMA1_STAT_REG) >> 4) & 0x0f) |
468 (inb(DMA2_STAT_REG) & 0xf0);
469 }
470 if (dev->irq >= 2)
471 printk(" assigned IRQ %d", dev->irq);
472 else {
473 /* To auto-IRQ we enable the initialization-done and DMA error
474 interrupts. For ISA boards we get a DMA error, but VLB and PCI
475 boards will work. */
476 autoirq_setup(0);
477
478 /* Trigger an initialization just for the interrupt. */
479 outw(0x0041, ioaddr+LANCE_DATA);
480
481 dev->irq = autoirq_report(1);
482 if (dev->irq)
483 printk(", probed IRQ %d", dev->irq);
484 else {
485 printk(", failed to detect IRQ line.\n");
486 return;
487 }
488
489 /* Check for the initialization done bit, 0x0100, which means
490 that we don't need a DMA channel. */
491 if (inw(ioaddr+LANCE_DATA) & 0x0100)
492 dev->dma = 4;
493 }
494
495 if (dev->dma == 4) {
496 printk(", no DMA needed.\n");
497 } else if (dev->dma) {
498 if (request_dma(dev->dma, chipname)) {
499 printk("DMA %d allocation failed.\n", dev->dma);
500 return;
501 } else
502 printk(", assigned DMA %d.\n", dev->dma);
503 } else { /* OK, we have to auto-DMA. */
504 for (i = 0; i < 4; i++) {
505 static const char dmas[] = { 5, 6, 7, 3 };
506 int dma = dmas[i];
507 int boguscnt;
508
509 /* Don't enable a permanently busy DMA channel, or the machine
510 will hang. */
511 if (test_bit(dma, &dma_channels))
512 continue;
513 outw(0x7f04, ioaddr+LANCE_DATA); /* Clear the memory error bits. */
514 if (request_dma(dma, chipname))
515 continue;
516 set_dma_mode(dma, DMA_MODE_CASCADE);
517 enable_dma(dma);
518
519 /* Trigger an initialization. */
520 outw(0x0001, ioaddr+LANCE_DATA);
521 for (boguscnt = 100; boguscnt > 0; --boguscnt)
522 if (inw(ioaddr+LANCE_DATA) & 0x0900)
523 break;
524 if (inw(ioaddr+LANCE_DATA) & 0x0100) {
525 dev->dma = dma;
526 printk(", DMA %d.\n", dev->dma);
527 break;
528 } else {
529 disable_dma(dma);
530 free_dma(dma);
531 }
532 }
533 if (i == 4) { /* Failure: bail. */
534 printk("DMA detection failed.\n");
535 return;
536 }
537 }
538
539 if (chip_table[lp->chip_version].flags & LANCE_ENABLE_AUTOSELECT) {
540 /* Turn on auto-select of media (10baseT or BNC) so that the user
541 can watch the LEDs even if the board isn't opened. */
542 outw(0x0002, ioaddr+LANCE_ADDR);
543 outw(0x0002, ioaddr+LANCE_BUS_IF);
544 }
545
546 if (lance_debug > 0 && did_version++ == 0)
547 printk(version);
548
549 /* The LANCE-specific entries in the device structure. */
550 dev->open = &lance_open;
551 dev->hard_start_xmit = &lance_start_xmit;
552 dev->stop = &lance_close;
553 dev->get_stats = &lance_get_stats;
554 dev->set_multicast_list = &set_multicast_list;
555
556 return;
557 }
558
559
560 static int
561 lance_open(struct device *dev)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
562 {
563 struct lance_private *lp = (struct lance_private *)dev->priv;
564 int ioaddr = dev->base_addr;
565 int i;
566
567 if (dev->irq == 0 ||
568 request_irq(dev->irq, &lance_interrupt, 0, lp->name)) {
569 return -EAGAIN;
570 }
571
572 /* We used to allocate DMA here, but that was silly.
573 DMA lines can't be shared! We now permanently snarf them. */
574
575 irq2dev_map[dev->irq] = dev;
576
577 /* Reset the LANCE */
578 inw(ioaddr+LANCE_RESET);
579
580 /* The DMA controller is used as a no-operation slave, "cascade mode". */
581 if (dev->dma != 4) {
582 enable_dma(dev->dma);
583 set_dma_mode(dev->dma, DMA_MODE_CASCADE);
584 }
585
586 /* Un-Reset the LANCE, needed only for the NE2100. */
587 if (chip_table[lp->chip_version].flags & LANCE_MUST_UNRESET)
588 outw(0, ioaddr+LANCE_RESET);
589
590 if (chip_table[lp->chip_version].flags & LANCE_ENABLE_AUTOSELECT) {
591 /* This is 79C960-specific: Turn on auto-select of media (AUI, BNC). */
592 outw(0x0002, ioaddr+LANCE_ADDR);
593 outw(0x0002, ioaddr+LANCE_BUS_IF);
594 }
595
596 if (lance_debug > 1)
597 printk("%s: lance_open() irq %d dma %d tx/rx rings %#x/%#x init %#x.\n",
598 dev->name, dev->irq, dev->dma, (int) lp->tx_ring, (int) lp->rx_ring,
599 (int) &lp->init_block);
600
601 lance_init_ring(dev);
602 /* Re-initialize the LANCE, and start it when done. */
603 outw(0x0001, ioaddr+LANCE_ADDR);
604 outw((short) (int) &lp->init_block, ioaddr+LANCE_DATA);
605 outw(0x0002, ioaddr+LANCE_ADDR);
606 outw(((int)&lp->init_block) >> 16, ioaddr+LANCE_DATA);
607
608 outw(0x0004, ioaddr+LANCE_ADDR);
609 outw(0x0915, ioaddr+LANCE_DATA);
610
611 outw(0x0000, ioaddr+LANCE_ADDR);
612 outw(0x0001, ioaddr+LANCE_DATA);
613
614 dev->tbusy = 0;
615 dev->interrupt = 0;
616 dev->start = 1;
617 i = 0;
618 while (i++ < 100)
619 if (inw(ioaddr+LANCE_DATA) & 0x0100)
620 break;
621 /*
622 * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
623 * reports that doing so triggers a bug in the '974.
624 */
625 outw(0x0042, ioaddr+LANCE_DATA);
626
627 if (lance_debug > 2)
628 printk("%s: LANCE open after %d ticks, init block %#x csr0 %4.4x.\n",
629 dev->name, i, (int) &lp->init_block, inw(ioaddr+LANCE_DATA));
630
631 return 0; /* Always succeed */
632 }
633
634 /* The LANCE has been halted for one reason or another (busmaster memory
635 arbitration error, Tx FIFO underflow, driver stopped it to reconfigure,
636 etc.). Modern LANCE variants always reload their ring-buffer
637 configuration when restarted, so we must reinitialize our ring
638 context before restarting. As part of this reinitialization,
639 find all packets still on the Tx ring and pretend that they had been
640 sent (in effect, drop the packets on the floor) - the higher-level
641 protocols will time out and retransmit. It'd be better to shuffle
642 these skbs to a temp list and then actually re-Tx them after
643 restarting the chip, but I'm too lazy to do so right now. dplatt@3do.com
644 */
645
646 static void
647 lance_purge_tx_ring(struct device *dev)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
648 {
649 struct lance_private *lp = (struct lance_private *)dev->priv;
650 int i;
651
652 for (i = 0; i < TX_RING_SIZE; i++) {
653 if (lp->tx_skbuff[i]) {
654 dev_kfree_skb(lp->tx_skbuff[i],FREE_WRITE);
655 lp->tx_skbuff[i] = NULL;
656 }
657 }
658 }
659
660
661 /* Initialize the LANCE Rx and Tx rings. */
662 static void
663 lance_init_ring(struct device *dev)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
664 {
665 struct lance_private *lp = (struct lance_private *)dev->priv;
666 int i;
667
668 lp->lock = 0, lp->tx_full = 0;
669 lp->cur_rx = lp->cur_tx = 0;
670 lp->dirty_rx = lp->dirty_tx = 0;
671
672 for (i = 0; i < RX_RING_SIZE; i++) {
673 lp->rx_ring[i].base = (lp->rx_buffs + i*PKT_BUF_SZ) | 0x80000000;
674 lp->rx_ring[i].buf_length = -PKT_BUF_SZ;
675 }
676 /* The Tx buffer address is filled in as needed, but we do need to clear
677 the upper ownership bit. */
678 for (i = 0; i < TX_RING_SIZE; i++) {
679 lp->tx_ring[i].base = 0;
680 }
681
682 lp->init_block.mode = 0x0000;
683 for (i = 0; i < 6; i++)
684 lp->init_block.phys_addr[i] = dev->dev_addr[i];
685 lp->init_block.filter[0] = 0x00000000;
686 lp->init_block.filter[1] = 0x00000000;
687 lp->init_block.rx_ring = (int)lp->rx_ring | RX_RING_LEN_BITS;
688 lp->init_block.tx_ring = (int)lp->tx_ring | TX_RING_LEN_BITS;
689 }
690
691 static void
692 lance_restart(struct device *dev, unsigned int csr0_bits, int must_reinit)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
693 {
694 struct lance_private *lp = (struct lance_private *)dev->priv;
695
696 if (must_reinit ||
697 (chip_table[lp->chip_version].flags & LANCE_MUST_REINIT_RING)) {
698 lance_purge_tx_ring(dev);
699 lance_init_ring(dev);
700 }
701 outw(0x0000, dev->base_addr + LANCE_ADDR);
702 outw(csr0_bits, dev->base_addr + LANCE_DATA);
703 }
704
705 static int
706 lance_start_xmit(struct sk_buff *skb, struct device *dev)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
707 {
708 struct lance_private *lp = (struct lance_private *)dev->priv;
709 int ioaddr = dev->base_addr;
710 int entry;
711 unsigned long flags;
712
713 /* Transmitter timeout, serious problems. */
714 if (dev->tbusy) {
715 int tickssofar = jiffies - dev->trans_start;
716 if (tickssofar < 20)
717 return 1;
718 outw(0, ioaddr+LANCE_ADDR);
719 printk("%s: transmit timed out, status %4.4x, resetting.\n",
720 dev->name, inw(ioaddr+LANCE_DATA));
721 outw(0x0004, ioaddr+LANCE_DATA);
722 lp->stats.tx_errors++;
723 #ifndef final_version
724 {
725 int i;
726 printk(" Ring data dump: dirty_tx %d cur_tx %d%s cur_rx %d.",
727 lp->dirty_tx, lp->cur_tx, lp->tx_full ? " (full)" : "",
728 lp->cur_rx);
729 for (i = 0 ; i < RX_RING_SIZE; i++)
730 printk("%s %08x %04x %04x", i & 0x3 ? "" : "\n ",
731 lp->rx_ring[i].base, -lp->rx_ring[i].buf_length,
732 lp->rx_ring[i].msg_length);
733 for (i = 0 ; i < TX_RING_SIZE; i++)
734 printk("%s %08x %04x %04x", i & 0x3 ? "" : "\n ",
735 lp->tx_ring[i].base, -lp->tx_ring[i].length,
736 lp->tx_ring[i].misc);
737 printk("\n");
738 }
739 #endif
740 lance_restart(dev, 0x0043, 1);
741
742 dev->tbusy=0;
743 dev->trans_start = jiffies;
744
745 return 0;
746 }
747
748 if (skb == NULL) {
749 dev_tint(dev);
750 return 0;
751 }
752
753 if (skb->len <= 0)
754 return 0;
755
756 if (lance_debug > 3) {
757 outw(0x0000, ioaddr+LANCE_ADDR);
758 printk("%s: lance_start_xmit() called, csr0 %4.4x.\n", dev->name,
759 inw(ioaddr+LANCE_DATA));
760 outw(0x0000, ioaddr+LANCE_DATA);
761 }
762
763 /* Block a timer-based transmit from overlapping. This could better be
764 done with atomic_swap(1, dev->tbusy), but set_bit() works as well. */
765 if (set_bit(0, (void*)&dev->tbusy) != 0) {
766 printk("%s: Transmitter access conflict.\n", dev->name);
767 return 1;
768 }
769
770 if (set_bit(0, (void*)&lp->lock) != 0) {
771 if (lance_debug > 0)
772 printk("%s: tx queue lock!.\n", dev->name);
773 /* don't clear dev->tbusy flag. */
774 return 1;
775 }
776
777 /* Fill in a Tx ring entry */
778
779 /* Mask to ring buffer boundary. */
780 entry = lp->cur_tx & TX_RING_MOD_MASK;
781
782 /* Caution: the write order is important here, set the base address
783 with the "ownership" bits last. */
784
785 /* The old LANCE chips doesn't automatically pad buffers to min. size. */
786 if (chip_table[lp->chip_version].flags & LANCE_MUST_PAD) {
787 lp->tx_ring[entry].length =
788 -(ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN);
789 } else
790 lp->tx_ring[entry].length = -skb->len;
791
792 lp->tx_ring[entry].misc = 0x0000;
793
794 /* If any part of this buffer is >16M we must copy it to a low-memory
795 buffer. */
796 if ((int)(skb->data) + skb->len > 0x01000000) {
797 if (lance_debug > 5)
798 printk("%s: bouncing a high-memory packet (%#x).\n",
799 dev->name, (int)(skb->data));
800 memcpy(&lp->tx_bounce_buffs[entry], skb->data, skb->len);
801 lp->tx_ring[entry].base =
802 (int)(lp->tx_bounce_buffs + entry) | 0x83000000;
803 dev_kfree_skb (skb, FREE_WRITE);
804 } else {
805 lp->tx_skbuff[entry] = skb;
806 lp->tx_ring[entry].base = (int)(skb->data) | 0x83000000;
807 }
808 lp->cur_tx++;
809
810 /* Trigger an immediate send poll. */
811 outw(0x0000, ioaddr+LANCE_ADDR);
812 outw(0x0048, ioaddr+LANCE_DATA);
813
814 dev->trans_start = jiffies;
815
816 save_flags(flags);
817 cli();
818 lp->lock = 0;
819 if (lp->tx_ring[(entry+1) & TX_RING_MOD_MASK].base == 0)
820 dev->tbusy=0;
821 else
822 lp->tx_full = 1;
823 restore_flags(flags);
824
825 return 0;
826 }
827
828 /* The LANCE interrupt handler. */
829 static void
830 lance_interrupt(int irq, struct pt_regs * regs)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
831 {
832 struct device *dev = (struct device *)(irq2dev_map[irq]);
833 struct lance_private *lp;
834 int csr0, ioaddr, boguscnt=10;
835 int must_restart;
836
837 if (dev == NULL) {
838 printk ("lance_interrupt(): irq %d for unknown device.\n", irq);
839 return;
840 }
841
842 ioaddr = dev->base_addr;
843 lp = (struct lance_private *)dev->priv;
844 if (dev->interrupt)
845 printk("%s: Re-entering the interrupt handler.\n", dev->name);
846
847 dev->interrupt = 1;
848
849 outw(0x00, dev->base_addr + LANCE_ADDR);
850 while ((csr0 = inw(dev->base_addr + LANCE_DATA)) & 0x8600
851 && --boguscnt >= 0) {
852 /* Acknowledge all of the current interrupt sources ASAP. */
853 outw(csr0 & ~0x004f, dev->base_addr + LANCE_DATA);
854
855 must_restart = 0;
856
857 if (lance_debug > 5)
858 printk("%s: interrupt csr0=%#2.2x new csr=%#2.2x.\n",
859 dev->name, csr0, inw(dev->base_addr + LANCE_DATA));
860
861 if (csr0 & 0x0400) /* Rx interrupt */
862 lance_rx(dev);
863
864 if (csr0 & 0x0200) { /* Tx-done interrupt */
865 int dirty_tx = lp->dirty_tx;
866
867 while (dirty_tx < lp->cur_tx) {
868 int entry = dirty_tx & TX_RING_MOD_MASK;
869 int status = lp->tx_ring[entry].base;
870
871 if (status < 0)
872 break; /* It still hasn't been Txed */
873
874 lp->tx_ring[entry].base = 0;
875
876 if (status & 0x40000000) {
877 /* There was an major error, log it. */
878 int err_status = lp->tx_ring[entry].misc;
879 lp->stats.tx_errors++;
880 if (err_status & 0x0400) lp->stats.tx_aborted_errors++;
881 if (err_status & 0x0800) lp->stats.tx_carrier_errors++;
882 if (err_status & 0x1000) lp->stats.tx_window_errors++;
883 if (err_status & 0x4000) {
884 /* Ackk! On FIFO errors the Tx unit is turned off! */
885 lp->stats.tx_fifo_errors++;
886 /* Remove this verbosity later! */
887 printk("%s: Tx FIFO error! Status %4.4x.\n",
888 dev->name, csr0);
889 /* Restart the chip. */
890 must_restart = 1;
891 }
892 } else {
893 if (status & 0x18000000)
894 lp->stats.collisions++;
895 lp->stats.tx_packets++;
896 }
897
898 /* We must free the original skb if it's not a data-only copy
899 in the bounce buffer. */
900 if (lp->tx_skbuff[entry]) {
901 dev_kfree_skb(lp->tx_skbuff[entry],FREE_WRITE);
902 lp->tx_skbuff[entry] = 0;
903 }
904 dirty_tx++;
905 }
906
907 #ifndef final_version
908 if (lp->cur_tx - dirty_tx >= TX_RING_SIZE) {
909 printk("out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
910 dirty_tx, lp->cur_tx, lp->tx_full);
911 dirty_tx += TX_RING_SIZE;
912 }
913 #endif
914
915 if (lp->tx_full && dev->tbusy
916 && dirty_tx > lp->cur_tx - TX_RING_SIZE + 2) {
917 /* The ring is no longer full, clear tbusy. */
918 lp->tx_full = 0;
919 dev->tbusy = 0;
920 mark_bh(NET_BH);
921 }
922
923 lp->dirty_tx = dirty_tx;
924 }
925
926 /* Log misc errors. */
927 if (csr0 & 0x4000) lp->stats.tx_errors++; /* Tx babble. */
928 if (csr0 & 0x1000) lp->stats.rx_errors++; /* Missed a Rx frame. */
929 if (csr0 & 0x0800) {
930 printk("%s: Bus master arbitration failure, status %4.4x.\n",
931 dev->name, csr0);
932 /* Restart the chip. */
933 must_restart = 1;
934 }
935
936 if (must_restart) {
937 /* stop the chip to clear the error condition, then restart */
938 outw(0x0000, dev->base_addr + LANCE_ADDR);
939 outw(0x0004, dev->base_addr + LANCE_DATA);
940 lance_restart(dev, 0x0002, 0);
941 }
942 }
943
944 /* Clear any other interrupt, and set interrupt enable. */
945 outw(0x0000, dev->base_addr + LANCE_ADDR);
946 outw(0x7940, dev->base_addr + LANCE_DATA);
947
948 if (lance_debug > 4)
949 printk("%s: exiting interrupt, csr%d=%#4.4x.\n",
950 dev->name, inw(ioaddr + LANCE_ADDR),
951 inw(dev->base_addr + LANCE_DATA));
952
953 dev->interrupt = 0;
954 return;
955 }
956
957 static int
958 lance_rx(struct device *dev)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
959 {
960 struct lance_private *lp = (struct lance_private *)dev->priv;
961 int entry = lp->cur_rx & RX_RING_MOD_MASK;
962 int i;
963
964 /* If we own the next entry, it's a new packet. Send it up. */
965 while (lp->rx_ring[entry].base >= 0) {
966 int status = lp->rx_ring[entry].base >> 24;
967
968 if (status != 0x03) { /* There was an error. */
969 /* There is a tricky error noted by John Murphy,
970 <murf@perftech.com> to Russ Nelson: Even with full-sized
971 buffers it's possible for a jabber packet to use two
972 buffers, with only the last correctly noting the error. */
973 if (status & 0x01) /* Only count a general error at the */
974 lp->stats.rx_errors++; /* end of a packet.*/
975 if (status & 0x20) lp->stats.rx_frame_errors++;
976 if (status & 0x10) lp->stats.rx_over_errors++;
977 if (status & 0x08) lp->stats.rx_crc_errors++;
978 if (status & 0x04) lp->stats.rx_fifo_errors++;
979 lp->rx_ring[entry].base &= 0x03ffffff;
980 } else {
981 /* Malloc up new buffer, compatible with net-2e. */
982 short pkt_len = (lp->rx_ring[entry].msg_length & 0xfff)-4;
983 struct sk_buff *skb;
984
985 skb = dev_alloc_skb(pkt_len+2);
986 if (skb == NULL) {
987 printk("%s: Memory squeeze, deferring packet.\n", dev->name);
988 for (i=0; i < RX_RING_SIZE; i++)
989 if (lp->rx_ring[(entry+i) & RX_RING_MOD_MASK].base < 0)
990 break;
991
992 if (i > RX_RING_SIZE -2) {
993 lp->stats.rx_dropped++;
994 lp->rx_ring[entry].base |= 0x80000000;
995 lp->cur_rx++;
996 }
997 break;
998 }
999 skb->dev = dev;
1000 skb_reserve(skb,2); /* 16 byte align */
1001 skb_put(skb,pkt_len); /* Make room */
1002 eth_copy_and_sum(skb,
1003 (unsigned char *)(lp->rx_ring[entry].base & 0x00ffffff),
1004 pkt_len,0);
1005 skb->protocol=eth_type_trans(skb,dev);
1006 netif_rx(skb);
1007 lp->stats.rx_packets++;
1008 }
1009
1010 /* The docs say that the buffer length isn't touched, but Andrew Boyd
1011 of QNX reports that some revs of the 79C965 clear it. */
1012 lp->rx_ring[entry].buf_length = -PKT_BUF_SZ;
1013 lp->rx_ring[entry].base |= 0x80000000;
1014 entry = (++lp->cur_rx) & RX_RING_MOD_MASK;
1015 }
1016
1017 /* We should check that at least two ring entries are free. If not,
1018 we should free one and mark stats->rx_dropped++. */
1019
1020 return 0;
1021 }
1022
1023 static int
1024 lance_close(struct device *dev)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
1025 {
1026 int ioaddr = dev->base_addr;
1027 struct lance_private *lp = (struct lance_private *)dev->priv;
1028
1029 dev->start = 0;
1030 dev->tbusy = 1;
1031
1032 if (chip_table[lp->chip_version].flags & LANCE_HAS_MISSED_FRAME) {
1033 outw(112, ioaddr+LANCE_ADDR);
1034 lp->stats.rx_missed_errors = inw(ioaddr+LANCE_DATA);
1035 }
1036 outw(0, ioaddr+LANCE_ADDR);
1037
1038 if (lance_debug > 1)
1039 printk("%s: Shutting down ethercard, status was %2.2x.\n",
1040 dev->name, inw(ioaddr+LANCE_DATA));
1041
1042 /* We stop the LANCE here -- it occasionally polls
1043 memory if we don't. */
1044 outw(0x0004, ioaddr+LANCE_DATA);
1045
1046 if (dev->dma != 4)
1047 disable_dma(dev->dma);
1048
1049 free_irq(dev->irq);
1050
1051 irq2dev_map[dev->irq] = 0;
1052
1053 return 0;
1054 }
1055
1056 static struct enet_statistics *
1057 lance_get_stats(struct device *dev)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
1058 {
1059 struct lance_private *lp = (struct lance_private *)dev->priv;
1060 short ioaddr = dev->base_addr;
1061 short saved_addr;
1062 unsigned long flags;
1063
1064 if (chip_table[lp->chip_version].flags & LANCE_HAS_MISSED_FRAME) {
1065 save_flags(flags);
1066 cli();
1067 saved_addr = inw(ioaddr+LANCE_ADDR);
1068 outw(112, ioaddr+LANCE_ADDR);
1069 lp->stats.rx_missed_errors = inw(ioaddr+LANCE_DATA);
1070 outw(saved_addr, ioaddr+LANCE_ADDR);
1071 restore_flags(flags);
1072 }
1073
1074 return &lp->stats;
1075 }
1076
1077 /* Set or clear the multicast filter for this adaptor.
1078 num_addrs == -2 All multicasts
1079 num_addrs == -1 Promiscuous mode, receive all packets
1080 num_addrs == 0 Normal mode, clear multicast list
1081 num_addrs > 0 Multicast mode, receive normal and MC packets, and do
1082 best-effort filtering.
1083 */
1084 static void
1085 set_multicast_list(struct device *dev, int num_addrs, void *addrs)
/* ![[previous]](../icons/left.png)
![[next]](../icons/n_right.png)
![[first]](../icons/first.png)
![[last]](../icons/n_last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
1086 {
1087 short ioaddr = dev->base_addr;
1088
1089 outw(0, ioaddr+LANCE_ADDR);
1090 outw(0x0004, ioaddr+LANCE_DATA); /* Temporarily stop the lance. */
1091
1092 if (num_addrs >= 0 || num_addrs==-2) {
1093 short multicast_table[4];
1094 int i;
1095 /* We don't use the multicast table, but rely on upper-layer filtering. */
1096 memset(multicast_table, (num_addrs == 0) ? 0 : -1, sizeof(multicast_table));
1097 for (i = 0; i < 4; i++) {
1098 outw(8 + i, ioaddr+LANCE_ADDR);
1099 outw(multicast_table[i], ioaddr+LANCE_DATA);
1100 }
1101 outw(15, ioaddr+LANCE_ADDR);
1102 outw(0x0000, ioaddr+LANCE_DATA); /* Unset promiscuous mode */
1103 } else {
1104 /* Log any net taps. */
1105 printk("%s: Promiscuous mode enabled.\n", dev->name);
1106 outw(15, ioaddr+LANCE_ADDR);
1107 outw(0x8000, ioaddr+LANCE_DATA); /* Set promiscuous mode */
1108 }
1109
1110 lance_restart(dev, 0x0142, 0); /* Resume normal operation */
1111
1112 }
1113
1114
1115 /*
1116 * Local variables:
1117 * compile-command: "gcc -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -m486 -c lance.c"
1118 * c-indent-level: 4
1119 * tab-width: 4
1120 * End:
1121 */