1 /* lance.c: An AMD LANCE ethernet driver for linux. */
2 /*
3 Written 1993,1994,1995 by Donald Becker.
4
5 Copyright 1993 United States Government as represented by the
6 Director, National Security Agency.
7 This software may be used and distributed according to the terms
8 of the GNU Public License, incorporated herein by reference.
9
10 This driver is for the Allied Telesis AT1500 and HP J2405A, and should work
11 with most other LANCE-based bus-master (NE2100 clone) ethercards.
12
13 The author may be reached as becker@CESDIS.gsfc.nasa.gov, or C/O
14 Center of Excellence in Space Data and Information Sciences
15 Code 930.5, Goddard Space Flight Center, Greenbelt MD 20771
16 */
17
18 static const char *version = "lance.c:v1.08 4/10/95 dplatt@3do.com\n";
19
20 #include <linux/config.h>
21 #include <linux/kernel.h>
22 #include <linux/sched.h>
23 #include <linux/string.h>
24 #include <linux/ptrace.h>
25 #include <linux/errno.h>
26 #include <linux/ioport.h>
27 #include <linux/malloc.h>
28 #include <linux/interrupt.h>
29 #include <linux/pci.h>
30 #include <linux/bios32.h>
31 #include <asm/bitops.h>
32 #include <asm/io.h>
33 #include <asm/dma.h>
34
35 #include <linux/netdevice.h>
36 #include <linux/etherdevice.h>
37 #include <linux/skbuff.h>
38
39 static unsigned int lance_portlist[] = {0x300, 0x320, 0x340, 0x360, 0};
40 void lance_probe1(int ioaddr);
41
42 #ifdef HAVE_DEVLIST
43 struct netdev_entry lance_drv =
44 {"lance", lance_probe1, LANCE_TOTAL_SIZE, lance_portlist};
45 #endif
46
47 #ifdef LANCE_DEBUG
48 int lance_debug = LANCE_DEBUG;
49 #else
50 int lance_debug = 1;
51 #endif
52
53 /*
54 Theory of Operation
55
56 I. Board Compatibility
57
58 This device driver is designed for the AMD 79C960, the "PCnet-ISA
59 single-chip ethernet controller for ISA". This chip is used in a wide
60 variety of boards from vendors such as Allied Telesis, HP, Kingston,
61 and Boca. This driver is also intended to work with older AMD 7990
62 designs, such as the NE1500 and NE2100, and newer 79C961. For convenience,
63 I use the name LANCE to refer to all of the AMD chips, even though it properly
64 refers only to the original 7990.
65
66 II. Board-specific settings
67
68 The driver is designed to work the boards that use the faster
69 bus-master mode, rather than in shared memory mode. (Only older designs
70 have on-board buffer memory needed to support the slower shared memory mode.)
71
72 Most ISA boards have jumpered settings for the I/O base, IRQ line, and DMA
73 channel. This driver probes the likely base addresses:
74 {0x300, 0x320, 0x340, 0x360}.
75 After the board is found it generates a DMA-timeout interrupt and uses
76 autoIRQ to find the IRQ line. The DMA channel can be set with the low bits
77 of the otherwise-unused dev->mem_start value (aka PARAM1). If unset it is
78 probed for by enabling each free DMA channel in turn and checking if
79 initialization succeeds.
80
81 The HP-J2405A board is an exception: with this board it's easy to read the
82 EEPROM-set values for the base, IRQ, and DMA. (Of course you must already
83 _know_ the base address -- that field is for writing the EEPROM.)
84
85 III. Driver operation
86
87 IIIa. Ring buffers
88 The LANCE uses ring buffers of Tx and Rx descriptors. Each entry describes
89 the base and length of the data buffer, along with status bits. The length
90 of these buffers is set by LANCE_LOG_{RX,TX}_BUFFERS, which is log_2() of
91 the buffer length (rather than being directly the buffer length) for
92 implementation ease. The current values are 2 (Tx) and 4 (Rx), which leads to
93 ring sizes of 4 (Tx) and 16 (Rx). Increasing the number of ring entries
94 needlessly uses extra space and reduces the chance that an upper layer will
95 be able to reorder queued Tx packets based on priority. Decreasing the number
96 of entries makes it more difficult to achieve back-to-back packet transmission
97 and increases the chance that Rx ring will overflow. (Consider the worst case
98 of receiving back-to-back minimum-sized packets.)
99
100 The LANCE has the capability to "chain" both Rx and Tx buffers, but this driver
101 statically allocates full-sized (slightly oversized -- PKT_BUF_SZ) buffers to
102 avoid the administrative overhead. For the Rx side this avoids dynamically
103 allocating full-sized buffers "just in case", at the expense of a
104 memory-to-memory data copy for each packet received. For most systems this
105 is a good tradeoff: the Rx buffer will always be in low memory, the copy
106 is inexpensive, and it primes the cache for later packet processing. For Tx
107 the buffers are only used when needed as low-memory bounce buffers.
108
109 IIIB. 16M memory limitations.
110 For the ISA bus master mode all structures used directly by the LANCE,
111 the initialization block, Rx and Tx rings, and data buffers, must be
112 accessible from the ISA bus, i.e. in the lower 16M of real memory.
113 This is a problem for current Linux kernels on >16M machines. The network
114 devices are initialized after memory initialization, and the kernel doles out
115 memory from the top of memory downward. The current solution is to have a
116 special network initialization routine that's called before memory
117 initialization; this will eventually be generalized for all network devices.
118 As mentioned before, low-memory "bounce-buffers" are used when needed.
119
120 IIIC. Synchronization
121 The driver runs as two independent, single-threaded flows of control. One
122 is the send-packet routine, which enforces single-threaded use by the
123 dev->tbusy flag. The other thread is the interrupt handler, which is single
124 threaded by the hardware and other software.
125
126 The send packet thread has partial control over the Tx ring and 'dev->tbusy'
127 flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
128 queue slot is empty, it clears the tbusy flag when finished otherwise it sets
129 the 'lp->tx_full' flag.
130
131 The interrupt handler has exclusive control over the Rx ring and records stats
132 from the Tx ring. (The Tx-done interrupt can't be selectively turned off, so
133 we can't avoid the interrupt overhead by having the Tx routine reap the Tx
134 stats.) After reaping the stats, it marks the queue entry as empty by setting
135 the 'base' to zero. Iff the 'lp->tx_full' flag is set, it clears both the
136 tx_full and tbusy flags.
137
138 */
139
140 /* Set the number of Tx and Rx buffers, using Log_2(# buffers).
141 Reasonable default values are 4 Tx buffers, and 16 Rx buffers.
142 That translates to 2 (4 == 2^^2) and 4 (16 == 2^^4). */
143 #ifndef LANCE_LOG_TX_BUFFERS
144 #define LANCE_LOG_TX_BUFFERS 4
145 #define LANCE_LOG_RX_BUFFERS 4
146 #endif
147
148 #define TX_RING_SIZE (1 << (LANCE_LOG_TX_BUFFERS))
149 #define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
150 #define TX_RING_LEN_BITS ((LANCE_LOG_TX_BUFFERS) << 29)
151
152 #define RX_RING_SIZE (1 << (LANCE_LOG_RX_BUFFERS))
153 #define RX_RING_MOD_MASK (RX_RING_SIZE - 1)
154 #define RX_RING_LEN_BITS ((LANCE_LOG_RX_BUFFERS) << 29)
155
156 #define PKT_BUF_SZ 1544
157
158 /* Offsets from base I/O address. */
159 #define LANCE_DATA 0x10
160 #define LANCE_ADDR 0x12
161 #define LANCE_RESET 0x14
162 #define LANCE_BUS_IF 0x16
163 #define LANCE_TOTAL_SIZE 0x18
164
165 /* The LANCE Rx and Tx ring descriptors. */
166 struct lance_rx_head {
167 int base;
168 short buf_length; /* This length is 2s complement (negative)! */
169 short msg_length; /* This length is "normal". */
170 };
171
172 struct lance_tx_head {
173 int base;
174 short length; /* Length is 2s complement (negative)! */
175 short misc;
176 };
177
178 /* The LANCE initialization block, described in databook. */
179 struct lance_init_block {
180 unsigned short mode; /* Pre-set mode (reg. 15) */
181 unsigned char phys_addr[6]; /* Physical ethernet address */
182 unsigned filter[2]; /* Multicast filter (unused). */
183 /* Receive and transmit ring base, along with extra bits. */
184 unsigned rx_ring; /* Tx and Rx ring base pointers */
185 unsigned tx_ring;
186 };
187
188 struct lance_private {
189 /* The Tx and Rx ring entries must be aligned on 8-byte boundaries.
190 This is always true for kmalloc'ed memory */
191 struct lance_rx_head rx_ring[RX_RING_SIZE];
192 struct lance_tx_head tx_ring[TX_RING_SIZE];
193 struct lance_init_block init_block;
194 const char *name;
195 /* The saved address of a sent-in-place packet/buffer, for skfree(). */
196 struct sk_buff* tx_skbuff[TX_RING_SIZE];
197 long rx_buffs; /* Address of Rx and Tx buffers. */
198 /* Tx low-memory "bounce buffer" address. */
199 char (*tx_bounce_buffs)[PKT_BUF_SZ];
200 int cur_rx, cur_tx; /* The next free ring entry */
201 int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */
202 int dma;
203 struct enet_statistics stats;
204 unsigned char chip_version; /* See lance_chip_type. */
205 char tx_full;
206 char lock;
207 };
208
209 #define LANCE_MUST_PAD 0x00000001
210 #define LANCE_ENABLE_AUTOSELECT 0x00000002
211 #define LANCE_MUST_REINIT_RING 0x00000004
212 #define LANCE_MUST_UNRESET 0x00000008
213 #define LANCE_HAS_MISSED_FRAME 0x00000010
214
215 /* A mapping from the chip ID number to the part number and features.
216 These are from the datasheets -- in real life the '970 version
217 reportedly has the same ID as the '965. */
218 static struct lance_chip_type {
219 int id_number;
220 const char *name;
221 int flags;
222 } chip_table[] = {
223 {0x0000, "LANCE 7990", /* Ancient lance chip. */
224 LANCE_MUST_PAD + LANCE_MUST_UNRESET},
225 {0x0003, "PCnet/ISA 79C960", /* 79C960 PCnet/ISA. */
226 LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
227 LANCE_HAS_MISSED_FRAME},
228 {0x2260, "PCnet/ISA+ 79C961", /* 79C961 PCnet/ISA+, Plug-n-Play. */
229 LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
230 LANCE_HAS_MISSED_FRAME},
231 {0x2420, "PCnet/PCI 79C970", /* 79C970 or 79C974 PCnet-SCSI, PCI. */
232 LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
233 LANCE_HAS_MISSED_FRAME},
234 /* Bug: the PCnet/PCI actually uses the PCnet/VLB ID number, so just call
235 it the PCnet32. */
236 {0x2430, "PCnet32", /* 79C965 PCnet for VL bus. */
237 LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
238 LANCE_HAS_MISSED_FRAME},
239 {0x0, "PCnet (unknown)",
240 LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
241 LANCE_HAS_MISSED_FRAME},
242 };
243
244 enum {OLD_LANCE = 0, PCNET_ISA=1, PCNET_ISAP=2, PCNET_PCI=3, PCNET_VLB=4, LANCE_UNKNOWN=5};
245
246 /* Non-zero only if the current card is a PCI with BIOS-set IRQ. */
247 static unsigned char pci_irq_line = 0;
248
249 /* Non-zero if lance_probe1() needs to allocate low-memory bounce buffers.
250 Assume yes until we know the memory size. */
251 static unsigned char lance_need_isa_bounce_buffers = 1;
252
253 static int lance_open(struct device *dev);
254 static void lance_init_ring(struct device *dev);
255 static int lance_start_xmit(struct sk_buff *skb, struct device *dev);
256 static int lance_rx(struct device *dev);
257 static void lance_interrupt(int irq, struct pt_regs *regs);
258 static int lance_close(struct device *dev);
259 static struct enet_statistics *lance_get_stats(struct device *dev);
260 static void set_multicast_list(struct device *dev);
261
262
263
264 /* This lance probe is unlike the other board probes in 1.0.*. The LANCE may
265 have to allocate a contiguous low-memory region for bounce buffers.
266 This requirement is satisfied by having the lance initialization occur
267 before the memory management system is started, and thus well before the
268 other probes. */
269
270 int lance_init(void)
/* ![[previous]](../icons/n_left.png)
![[next]](../icons/right.png)
![[first]](../icons/n_first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
271 {
272 int *port;
273
274 if (high_memory <= 16*1024*1024)
275 lance_need_isa_bounce_buffers = 0;
276
277 #if defined(CONFIG_PCI)
278 if (pcibios_present()) {
279 int pci_index;
280 printk("lance.c: PCI bios is present, checking for devices...\n");
281 for (pci_index = 0; pci_index < 8; pci_index++) {
282 unsigned char pci_bus, pci_device_fn;
283 unsigned int pci_ioaddr;
284 unsigned short pci_command;
285
286 if (pcibios_find_device (PCI_VENDOR_ID_AMD,
287 PCI_DEVICE_ID_AMD_LANCE, pci_index,
288 &pci_bus, &pci_device_fn) != 0)
289 break;
290 pcibios_read_config_byte(pci_bus, pci_device_fn,
291 PCI_INTERRUPT_LINE, &pci_irq_line);
292 pcibios_read_config_dword(pci_bus, pci_device_fn,
293 PCI_BASE_ADDRESS_0, &pci_ioaddr);
294 /* Remove I/O space marker in bit 0. */
295 pci_ioaddr &= ~3;
296 /* PCI Spec 2.1 states that it is either the driver or PCI card's
297 * responsibility to set the PCI Master Enable Bit if needed.
298 * (From Mark Stockton <marks@schooner.sys.hou.compaq.com>)
299 */
300 pcibios_read_config_word(pci_bus, pci_device_fn,
301 PCI_COMMAND, &pci_command);
302 if ( ! (pci_command & PCI_COMMAND_MASTER)) {
303 printk("PCI Master Bit has not been set. Setting...\n");
304 pci_command |= PCI_COMMAND_MASTER;
305 pcibios_write_config_word(pci_bus, pci_device_fn,
306 PCI_COMMAND, pci_command);
307 }
308 printk("Found PCnet/PCI at %#x, irq %d.\n",
309 pci_ioaddr, pci_irq_line);
310 lance_probe1(pci_ioaddr);
311 pci_irq_line = 0;
312 }
313 }
314 #endif /* defined(CONFIG_PCI) */
315
316 for (port = lance_portlist; *port; port++) {
317 int ioaddr = *port;
318
319 if ( check_region(ioaddr, LANCE_TOTAL_SIZE) == 0) {
320 /* Detect "normal" 0x57 0x57 and the NI6510EB 0x52 0x44
321 signatures w/ minimal I/O reads */
322 char offset15, offset14 = inb(ioaddr + 14);
323
324 if ((offset14 == 0x52 || offset14 == 0x57) &&
325 ((offset15 = inb(ioaddr + 15)) == 0x57 || offset15 == 0x44))
326 lance_probe1(ioaddr);
327 }
328 }
329
330 return 0;
331 }
332
333 void lance_probe1(int ioaddr)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
334 {
335 struct device *dev;
336 struct lance_private *lp;
337 short dma_channels; /* Mark spuriously-busy DMA channels */
338 int i, reset_val, lance_version;
339 const char *chipname;
340 /* Flags for specific chips or boards. */
341 unsigned char hpJ2405A = 0; /* HP ISA adaptor */
342 int hp_builtin = 0; /* HP on-board ethernet. */
343 static int did_version = 0; /* Already printed version info. */
344
345 /* First we look for special cases.
346 Check for HP's on-board ethernet by looking for 'HP' in the BIOS.
347 There are two HP versions, check the BIOS for the configuration port.
348 This method provided by L. Julliard, Laurent_Julliard@grenoble.hp.com.
349 */
350 if ( *((unsigned short *) 0x000f0102) == 0x5048) {
351 static const short ioaddr_table[] = { 0x300, 0x320, 0x340, 0x360};
352 int hp_port = ( *((unsigned char *) 0x000f00f1) & 1) ? 0x499 : 0x99;
353 /* We can have boards other than the built-in! Verify this is on-board. */
354 if ((inb(hp_port) & 0xc0) == 0x80
355 && ioaddr_table[inb(hp_port) & 3] == ioaddr)
356 hp_builtin = hp_port;
357 }
358 /* We also recognize the HP Vectra on-board here, but check below. */
359 hpJ2405A = (inb(ioaddr) == 0x08 && inb(ioaddr+1) == 0x00
360 && inb(ioaddr+2) == 0x09);
361
362 /* Reset the LANCE. */
363 reset_val = inw(ioaddr+LANCE_RESET); /* Reset the LANCE */
364
365 /* The Un-Reset needed is only needed for the real NE2100, and will
366 confuse the HP board. */
367 if (!hpJ2405A)
368 outw(reset_val, ioaddr+LANCE_RESET);
369
370 outw(0x0000, ioaddr+LANCE_ADDR); /* Switch to window 0 */
371 if (inw(ioaddr+LANCE_DATA) != 0x0004)
372 return;
373
374 /* Get the version of the chip. */
375 outw(88, ioaddr+LANCE_ADDR);
376 if (inw(ioaddr+LANCE_ADDR) != 88) {
377 lance_version = 0;
378 } else { /* Good, it's a newer chip. */
379 int chip_version = inw(ioaddr+LANCE_DATA);
380 outw(89, ioaddr+LANCE_ADDR);
381 chip_version |= inw(ioaddr+LANCE_DATA) << 16;
382 if (lance_debug > 2)
383 printk(" LANCE chip version is %#x.\n", chip_version);
384 if ((chip_version & 0xfff) != 0x003)
385 return;
386 chip_version = (chip_version >> 12) & 0xffff;
387 for (lance_version = 1; chip_table[lance_version].id_number; lance_version++) {
388 if (chip_table[lance_version].id_number == chip_version)
389 break;
390 }
391 }
392
393 dev = init_etherdev(0, 0);
394 chipname = chip_table[lance_version].name;
395 printk("%s: %s at %#3x,", dev->name, chipname, ioaddr);
396
397 /* There is a 16 byte station address PROM at the base address.
398 The first six bytes are the station address. */
399 for (i = 0; i < 6; i++)
400 printk(" %2.2x", dev->dev_addr[i] = inb(ioaddr + i));
401
402 dev->base_addr = ioaddr;
403 request_region(ioaddr, LANCE_TOTAL_SIZE, chip_table[lance_version].name);
404
405 /* Make certain the data structures used by the LANCE are aligned and DMAble. */
406 lp = (struct lance_private *) kmalloc(sizeof(*lp), GFP_DMA | GFP_KERNEL);
407 memset(lp, 0, sizeof(*lp));
408 dev->priv = lp;
409 lp->name = chipname;
410 lp->rx_buffs = (unsigned long) kmalloc(PKT_BUF_SZ*RX_RING_SIZE, GFP_DMA | GFP_KERNEL);
411 lp->tx_bounce_buffs = NULL;
412 if (lance_need_isa_bounce_buffers)
413 lp->tx_bounce_buffs = kmalloc(PKT_BUF_SZ*TX_RING_SIZE, GFP_DMA | GFP_KERNEL);
414
415 lp->chip_version = lance_version;
416
417 lp->init_block.mode = 0x0003; /* Disable Rx and Tx. */
418 for (i = 0; i < 6; i++)
419 lp->init_block.phys_addr[i] = dev->dev_addr[i];
420 lp->init_block.filter[0] = 0x00000000;
421 lp->init_block.filter[1] = 0x00000000;
422 lp->init_block.rx_ring = (int)lp->rx_ring | RX_RING_LEN_BITS;
423 lp->init_block.tx_ring = (int)lp->tx_ring | TX_RING_LEN_BITS;
424
425 outw(0x0001, ioaddr+LANCE_ADDR);
426 inw(ioaddr+LANCE_ADDR);
427 outw((short) (int) &lp->init_block, ioaddr+LANCE_DATA);
428 outw(0x0002, ioaddr+LANCE_ADDR);
429 inw(ioaddr+LANCE_ADDR);
430 outw(((int)&lp->init_block) >> 16, ioaddr+LANCE_DATA);
431 outw(0x0000, ioaddr+LANCE_ADDR);
432 inw(ioaddr+LANCE_ADDR);
433
434 if (pci_irq_line) {
435 dev->dma = 4; /* Native bus-master, no DMA channel needed. */
436 dev->irq = pci_irq_line;
437 } else if (hp_builtin) {
438 static const char dma_tbl[4] = {3, 5, 6, 0};
439 static const char irq_tbl[4] = {3, 4, 5, 9};
440 unsigned char port_val = inb(hp_builtin);
441 dev->dma = dma_tbl[(port_val >> 4) & 3];
442 dev->irq = irq_tbl[(port_val >> 2) & 3];
443 printk(" HP Vectra IRQ %d DMA %d.\n", dev->irq, dev->dma);
444 } else if (hpJ2405A) {
445 static const char dma_tbl[4] = {3, 5, 6, 7};
446 static const char irq_tbl[8] = {3, 4, 5, 9, 10, 11, 12, 15};
447 short reset_val = inw(ioaddr+LANCE_RESET);
448 dev->dma = dma_tbl[(reset_val >> 2) & 3];
449 dev->irq = irq_tbl[(reset_val >> 4) & 7];
450 printk(" HP J2405A IRQ %d DMA %d.\n", dev->irq, dev->dma);
451 } else if (lance_version == PCNET_ISAP) { /* The plug-n-play version. */
452 short bus_info;
453 outw(8, ioaddr+LANCE_ADDR);
454 bus_info = inw(ioaddr+LANCE_BUS_IF);
455 dev->dma = bus_info & 0x07;
456 dev->irq = (bus_info >> 4) & 0x0F;
457 } else {
458 /* The DMA channel may be passed in PARAM1. */
459 if (dev->mem_start & 0x07)
460 dev->dma = dev->mem_start & 0x07;
461 }
462
463 if (dev->dma == 0) {
464 /* Read the DMA channel status register, so that we can avoid
465 stuck DMA channels in the DMA detection below. */
466 dma_channels = ((inb(DMA1_STAT_REG) >> 4) & 0x0f) |
467 (inb(DMA2_STAT_REG) & 0xf0);
468 }
469 if (dev->irq >= 2)
470 printk(" assigned IRQ %d", dev->irq);
471 else {
472 /* To auto-IRQ we enable the initialization-done and DMA error
473 interrupts. For ISA boards we get a DMA error, but VLB and PCI
474 boards will work. */
475 autoirq_setup(0);
476
477 /* Trigger an initialization just for the interrupt. */
478 outw(0x0041, ioaddr+LANCE_DATA);
479
480 dev->irq = autoirq_report(1);
481 if (dev->irq)
482 printk(", probed IRQ %d", dev->irq);
483 else {
484 printk(", failed to detect IRQ line.\n");
485 return;
486 }
487
488 /* Check for the initialization done bit, 0x0100, which means
489 that we don't need a DMA channel. */
490 if (inw(ioaddr+LANCE_DATA) & 0x0100)
491 dev->dma = 4;
492 }
493
494 if (dev->dma == 4) {
495 printk(", no DMA needed.\n");
496 } else if (dev->dma) {
497 if (request_dma(dev->dma, chipname)) {
498 printk("DMA %d allocation failed.\n", dev->dma);
499 return;
500 } else
501 printk(", assigned DMA %d.\n", dev->dma);
502 } else { /* OK, we have to auto-DMA. */
503 for (i = 0; i < 4; i++) {
504 static const char dmas[] = { 5, 6, 7, 3 };
505 int dma = dmas[i];
506 int boguscnt;
507
508 /* Don't enable a permanently busy DMA channel, or the machine
509 will hang. */
510 if (test_bit(dma, &dma_channels))
511 continue;
512 outw(0x7f04, ioaddr+LANCE_DATA); /* Clear the memory error bits. */
513 if (request_dma(dma, chipname))
514 continue;
515 set_dma_mode(dma, DMA_MODE_CASCADE);
516 enable_dma(dma);
517
518 /* Trigger an initialization. */
519 outw(0x0001, ioaddr+LANCE_DATA);
520 for (boguscnt = 100; boguscnt > 0; --boguscnt)
521 if (inw(ioaddr+LANCE_DATA) & 0x0900)
522 break;
523 if (inw(ioaddr+LANCE_DATA) & 0x0100) {
524 dev->dma = dma;
525 printk(", DMA %d.\n", dev->dma);
526 break;
527 } else {
528 disable_dma(dma);
529 free_dma(dma);
530 }
531 }
532 if (i == 4) { /* Failure: bail. */
533 printk("DMA detection failed.\n");
534 return;
535 }
536 }
537
538 if (chip_table[lp->chip_version].flags & LANCE_ENABLE_AUTOSELECT) {
539 /* Turn on auto-select of media (10baseT or BNC) so that the user
540 can watch the LEDs even if the board isn't opened. */
541 outw(0x0002, ioaddr+LANCE_ADDR);
542 outw(0x0002, ioaddr+LANCE_BUS_IF);
543 }
544
545 if (lance_debug > 0 && did_version++ == 0)
546 printk(version);
547
548 /* The LANCE-specific entries in the device structure. */
549 dev->open = &lance_open;
550 dev->hard_start_xmit = &lance_start_xmit;
551 dev->stop = &lance_close;
552 dev->get_stats = &lance_get_stats;
553 dev->set_multicast_list = &set_multicast_list;
554
555 return;
556 }
557
558
559 static int
560 lance_open(struct device *dev)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
561 {
562 struct lance_private *lp = (struct lance_private *)dev->priv;
563 int ioaddr = dev->base_addr;
564 int i;
565
566 if (dev->irq == 0 ||
567 request_irq(dev->irq, &lance_interrupt, 0, lp->name)) {
568 return -EAGAIN;
569 }
570
571 /* We used to allocate DMA here, but that was silly.
572 DMA lines can't be shared! We now permanently allocate them. */
573
574 irq2dev_map[dev->irq] = dev;
575
576 /* Reset the LANCE */
577 inw(ioaddr+LANCE_RESET);
578
579 /* The DMA controller is used as a no-operation slave, "cascade mode". */
580 if (dev->dma != 4) {
581 enable_dma(dev->dma);
582 set_dma_mode(dev->dma, DMA_MODE_CASCADE);
583 }
584
585 /* Un-Reset the LANCE, needed only for the NE2100. */
586 if (chip_table[lp->chip_version].flags & LANCE_MUST_UNRESET)
587 outw(0, ioaddr+LANCE_RESET);
588
589 if (chip_table[lp->chip_version].flags & LANCE_ENABLE_AUTOSELECT) {
590 /* This is 79C960-specific: Turn on auto-select of media (AUI, BNC). */
591 outw(0x0002, ioaddr+LANCE_ADDR);
592 outw(0x0002, ioaddr+LANCE_BUS_IF);
593 }
594
595 if (lance_debug > 1)
596 printk("%s: lance_open() irq %d dma %d tx/rx rings %#x/%#x init %#x.\n",
597 dev->name, dev->irq, dev->dma, (int) lp->tx_ring, (int) lp->rx_ring,
598 (int) &lp->init_block);
599
600 lance_init_ring(dev);
601 /* Re-initialize the LANCE, and start it when done. */
602 outw(0x0001, ioaddr+LANCE_ADDR);
603 outw((short) (int) &lp->init_block, ioaddr+LANCE_DATA);
604 outw(0x0002, ioaddr+LANCE_ADDR);
605 outw(((int)&lp->init_block) >> 16, ioaddr+LANCE_DATA);
606
607 outw(0x0004, ioaddr+LANCE_ADDR);
608 outw(0x0915, ioaddr+LANCE_DATA);
609
610 outw(0x0000, ioaddr+LANCE_ADDR);
611 outw(0x0001, ioaddr+LANCE_DATA);
612
613 dev->tbusy = 0;
614 dev->interrupt = 0;
615 dev->start = 1;
616 i = 0;
617 while (i++ < 100)
618 if (inw(ioaddr+LANCE_DATA) & 0x0100)
619 break;
620 /*
621 * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
622 * reports that doing so triggers a bug in the '974.
623 */
624 outw(0x0042, ioaddr+LANCE_DATA);
625
626 if (lance_debug > 2)
627 printk("%s: LANCE open after %d ticks, init block %#x csr0 %4.4x.\n",
628 dev->name, i, (int) &lp->init_block, inw(ioaddr+LANCE_DATA));
629
630 return 0; /* Always succeed */
631 }
632
633 /* The LANCE has been halted for one reason or another (busmaster memory
634 arbitration error, Tx FIFO underflow, driver stopped it to reconfigure,
635 etc.). Modern LANCE variants always reload their ring-buffer
636 configuration when restarted, so we must reinitialize our ring
637 context before restarting. As part of this reinitialization,
638 find all packets still on the Tx ring and pretend that they had been
639 sent (in effect, drop the packets on the floor) - the higher-level
640 protocols will time out and retransmit. It'd be better to shuffle
641 these skbs to a temp list and then actually re-Tx them after
642 restarting the chip, but I'm too lazy to do so right now. dplatt@3do.com
643 */
644
645 static void
646 lance_purge_tx_ring(struct device *dev)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
647 {
648 struct lance_private *lp = (struct lance_private *)dev->priv;
649 int i;
650
651 for (i = 0; i < TX_RING_SIZE; i++) {
652 if (lp->tx_skbuff[i]) {
653 dev_kfree_skb(lp->tx_skbuff[i],FREE_WRITE);
654 lp->tx_skbuff[i] = NULL;
655 }
656 }
657 }
658
659
660 /* Initialize the LANCE Rx and Tx rings. */
661 static void
662 lance_init_ring(struct device *dev)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
663 {
664 struct lance_private *lp = (struct lance_private *)dev->priv;
665 int i;
666
667 lp->lock = 0, lp->tx_full = 0;
668 lp->cur_rx = lp->cur_tx = 0;
669 lp->dirty_rx = lp->dirty_tx = 0;
670
671 for (i = 0; i < RX_RING_SIZE; i++) {
672 lp->rx_ring[i].base = (lp->rx_buffs + i*PKT_BUF_SZ) | 0x80000000;
673 lp->rx_ring[i].buf_length = -PKT_BUF_SZ;
674 }
675 /* The Tx buffer address is filled in as needed, but we do need to clear
676 the upper ownership bit. */
677 for (i = 0; i < TX_RING_SIZE; i++) {
678 lp->tx_ring[i].base = 0;
679 }
680
681 lp->init_block.mode = 0x0000;
682 for (i = 0; i < 6; i++)
683 lp->init_block.phys_addr[i] = dev->dev_addr[i];
684 lp->init_block.filter[0] = 0x00000000;
685 lp->init_block.filter[1] = 0x00000000;
686 lp->init_block.rx_ring = (int)lp->rx_ring | RX_RING_LEN_BITS;
687 lp->init_block.tx_ring = (int)lp->tx_ring | TX_RING_LEN_BITS;
688 }
689
690 static void
691 lance_restart(struct device *dev, unsigned int csr0_bits, int must_reinit)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
692 {
693 struct lance_private *lp = (struct lance_private *)dev->priv;
694
695 if (must_reinit ||
696 (chip_table[lp->chip_version].flags & LANCE_MUST_REINIT_RING)) {
697 lance_purge_tx_ring(dev);
698 lance_init_ring(dev);
699 }
700 outw(0x0000, dev->base_addr + LANCE_ADDR);
701 outw(csr0_bits, dev->base_addr + LANCE_DATA);
702 }
703
704 static int
705 lance_start_xmit(struct sk_buff *skb, struct device *dev)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
706 {
707 struct lance_private *lp = (struct lance_private *)dev->priv;
708 int ioaddr = dev->base_addr;
709 int entry;
710 unsigned long flags;
711
712 /* Transmitter timeout, serious problems. */
713 if (dev->tbusy) {
714 int tickssofar = jiffies - dev->trans_start;
715 if (tickssofar < 20)
716 return 1;
717 outw(0, ioaddr+LANCE_ADDR);
718 printk("%s: transmit timed out, status %4.4x, resetting.\n",
719 dev->name, inw(ioaddr+LANCE_DATA));
720 outw(0x0004, ioaddr+LANCE_DATA);
721 lp->stats.tx_errors++;
722 #ifndef final_version
723 {
724 int i;
725 printk(" Ring data dump: dirty_tx %d cur_tx %d%s cur_rx %d.",
726 lp->dirty_tx, lp->cur_tx, lp->tx_full ? " (full)" : "",
727 lp->cur_rx);
728 for (i = 0 ; i < RX_RING_SIZE; i++)
729 printk("%s %08x %04x %04x", i & 0x3 ? "" : "\n ",
730 lp->rx_ring[i].base, -lp->rx_ring[i].buf_length,
731 lp->rx_ring[i].msg_length);
732 for (i = 0 ; i < TX_RING_SIZE; i++)
733 printk("%s %08x %04x %04x", i & 0x3 ? "" : "\n ",
734 lp->tx_ring[i].base, -lp->tx_ring[i].length,
735 lp->tx_ring[i].misc);
736 printk("\n");
737 }
738 #endif
739 lance_restart(dev, 0x0043, 1);
740
741 dev->tbusy=0;
742 dev->trans_start = jiffies;
743
744 return 0;
745 }
746
747 if (skb == NULL) {
748 dev_tint(dev);
749 return 0;
750 }
751
752 if (skb->len <= 0)
753 return 0;
754
755 if (lance_debug > 3) {
756 outw(0x0000, ioaddr+LANCE_ADDR);
757 printk("%s: lance_start_xmit() called, csr0 %4.4x.\n", dev->name,
758 inw(ioaddr+LANCE_DATA));
759 outw(0x0000, ioaddr+LANCE_DATA);
760 }
761
762 /* Block a timer-based transmit from overlapping. This could better be
763 done with atomic_swap(1, dev->tbusy), but set_bit() works as well. */
764 if (set_bit(0, (void*)&dev->tbusy) != 0) {
765 printk("%s: Transmitter access conflict.\n", dev->name);
766 return 1;
767 }
768
769 if (set_bit(0, (void*)&lp->lock) != 0) {
770 if (lance_debug > 0)
771 printk("%s: tx queue lock!.\n", dev->name);
772 /* don't clear dev->tbusy flag. */
773 return 1;
774 }
775
776 /* Fill in a Tx ring entry */
777
778 /* Mask to ring buffer boundary. */
779 entry = lp->cur_tx & TX_RING_MOD_MASK;
780
781 /* Caution: the write order is important here, set the base address
782 with the "ownership" bits last. */
783
784 /* The old LANCE chips doesn't automatically pad buffers to min. size. */
785 if (chip_table[lp->chip_version].flags & LANCE_MUST_PAD) {
786 lp->tx_ring[entry].length =
787 -(ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN);
788 } else
789 lp->tx_ring[entry].length = -skb->len;
790
791 lp->tx_ring[entry].misc = 0x0000;
792
793 /* If any part of this buffer is >16M we must copy it to a low-memory
794 buffer. */
795 if ((int)(skb->data) + skb->len > 0x01000000) {
796 if (lance_debug > 5)
797 printk("%s: bouncing a high-memory packet (%#x).\n",
798 dev->name, (int)(skb->data));
799 memcpy(&lp->tx_bounce_buffs[entry], skb->data, skb->len);
800 lp->tx_ring[entry].base =
801 (int)(lp->tx_bounce_buffs + entry) | 0x83000000;
802 dev_kfree_skb (skb, FREE_WRITE);
803 } else {
804 lp->tx_skbuff[entry] = skb;
805 lp->tx_ring[entry].base = (int)(skb->data) | 0x83000000;
806 }
807 lp->cur_tx++;
808
809 /* Trigger an immediate send poll. */
810 outw(0x0000, ioaddr+LANCE_ADDR);
811 outw(0x0048, ioaddr+LANCE_DATA);
812
813 dev->trans_start = jiffies;
814
815 save_flags(flags);
816 cli();
817 lp->lock = 0;
818 if (lp->tx_ring[(entry+1) & TX_RING_MOD_MASK].base == 0)
819 dev->tbusy=0;
820 else
821 lp->tx_full = 1;
822 restore_flags(flags);
823
824 return 0;
825 }
826
827 /* The LANCE interrupt handler. */
828 static void
829 lance_interrupt(int irq, struct pt_regs * regs)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
830 {
831 struct device *dev = (struct device *)(irq2dev_map[irq]);
832 struct lance_private *lp;
833 int csr0, ioaddr, boguscnt=10;
834 int must_restart;
835
836 if (dev == NULL) {
837 printk ("lance_interrupt(): irq %d for unknown device.\n", irq);
838 return;
839 }
840
841 ioaddr = dev->base_addr;
842 lp = (struct lance_private *)dev->priv;
843 if (dev->interrupt)
844 printk("%s: Re-entering the interrupt handler.\n", dev->name);
845
846 dev->interrupt = 1;
847
848 outw(0x00, dev->base_addr + LANCE_ADDR);
849 while ((csr0 = inw(dev->base_addr + LANCE_DATA)) & 0x8600
850 && --boguscnt >= 0) {
851 /* Acknowledge all of the current interrupt sources ASAP. */
852 outw(csr0 & ~0x004f, dev->base_addr + LANCE_DATA);
853
854 must_restart = 0;
855
856 if (lance_debug > 5)
857 printk("%s: interrupt csr0=%#2.2x new csr=%#2.2x.\n",
858 dev->name, csr0, inw(dev->base_addr + LANCE_DATA));
859
860 if (csr0 & 0x0400) /* Rx interrupt */
861 lance_rx(dev);
862
863 if (csr0 & 0x0200) { /* Tx-done interrupt */
864 int dirty_tx = lp->dirty_tx;
865
866 while (dirty_tx < lp->cur_tx) {
867 int entry = dirty_tx & TX_RING_MOD_MASK;
868 int status = lp->tx_ring[entry].base;
869
870 if (status < 0)
871 break; /* It still hasn't been Txed */
872
873 lp->tx_ring[entry].base = 0;
874
875 if (status & 0x40000000) {
876 /* There was an major error, log it. */
877 int err_status = lp->tx_ring[entry].misc;
878 lp->stats.tx_errors++;
879 if (err_status & 0x0400) lp->stats.tx_aborted_errors++;
880 if (err_status & 0x0800) lp->stats.tx_carrier_errors++;
881 if (err_status & 0x1000) lp->stats.tx_window_errors++;
882 if (err_status & 0x4000) {
883 /* Ackk! On FIFO errors the Tx unit is turned off! */
884 lp->stats.tx_fifo_errors++;
885 /* Remove this verbosity later! */
886 printk("%s: Tx FIFO error! Status %4.4x.\n",
887 dev->name, csr0);
888 /* Restart the chip. */
889 must_restart = 1;
890 }
891 } else {
892 if (status & 0x18000000)
893 lp->stats.collisions++;
894 lp->stats.tx_packets++;
895 }
896
897 /* We must free the original skb if it's not a data-only copy
898 in the bounce buffer. */
899 if (lp->tx_skbuff[entry]) {
900 dev_kfree_skb(lp->tx_skbuff[entry],FREE_WRITE);
901 lp->tx_skbuff[entry] = 0;
902 }
903 dirty_tx++;
904 }
905
906 #ifndef final_version
907 if (lp->cur_tx - dirty_tx >= TX_RING_SIZE) {
908 printk("out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
909 dirty_tx, lp->cur_tx, lp->tx_full);
910 dirty_tx += TX_RING_SIZE;
911 }
912 #endif
913
914 if (lp->tx_full && dev->tbusy
915 && dirty_tx > lp->cur_tx - TX_RING_SIZE + 2) {
916 /* The ring is no longer full, clear tbusy. */
917 lp->tx_full = 0;
918 dev->tbusy = 0;
919 mark_bh(NET_BH);
920 }
921
922 lp->dirty_tx = dirty_tx;
923 }
924
925 /* Log misc errors. */
926 if (csr0 & 0x4000) lp->stats.tx_errors++; /* Tx babble. */
927 if (csr0 & 0x1000) lp->stats.rx_errors++; /* Missed a Rx frame. */
928 if (csr0 & 0x0800) {
929 printk("%s: Bus master arbitration failure, status %4.4x.\n",
930 dev->name, csr0);
931 /* Restart the chip. */
932 must_restart = 1;
933 }
934
935 if (must_restart) {
936 /* stop the chip to clear the error condition, then restart */
937 outw(0x0000, dev->base_addr + LANCE_ADDR);
938 outw(0x0004, dev->base_addr + LANCE_DATA);
939 lance_restart(dev, 0x0002, 0);
940 }
941 }
942
943 /* Clear any other interrupt, and set interrupt enable. */
944 outw(0x0000, dev->base_addr + LANCE_ADDR);
945 outw(0x7940, dev->base_addr + LANCE_DATA);
946
947 if (lance_debug > 4)
948 printk("%s: exiting interrupt, csr%d=%#4.4x.\n",
949 dev->name, inw(ioaddr + LANCE_ADDR),
950 inw(dev->base_addr + LANCE_DATA));
951
952 dev->interrupt = 0;
953 return;
954 }
955
956 static int
957 lance_rx(struct device *dev)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
958 {
959 struct lance_private *lp = (struct lance_private *)dev->priv;
960 int entry = lp->cur_rx & RX_RING_MOD_MASK;
961 int i;
962
963 /* If we own the next entry, it's a new packet. Send it up. */
964 while (lp->rx_ring[entry].base >= 0) {
965 int status = lp->rx_ring[entry].base >> 24;
966
967 if (status != 0x03) { /* There was an error. */
968 /* There is a tricky error noted by John Murphy,
969 <murf@perftech.com> to Russ Nelson: Even with full-sized
970 buffers it's possible for a jabber packet to use two
971 buffers, with only the last correctly noting the error. */
972 if (status & 0x01) /* Only count a general error at the */
973 lp->stats.rx_errors++; /* end of a packet.*/
974 if (status & 0x20) lp->stats.rx_frame_errors++;
975 if (status & 0x10) lp->stats.rx_over_errors++;
976 if (status & 0x08) lp->stats.rx_crc_errors++;
977 if (status & 0x04) lp->stats.rx_fifo_errors++;
978 lp->rx_ring[entry].base &= 0x03ffffff;
979 }
980 else
981 {
982 /* Malloc up new buffer, compatible with net-2e. */
983 short pkt_len = (lp->rx_ring[entry].msg_length & 0xfff)-4;
984 struct sk_buff *skb;
985
986 if(pkt_len<60)
987 {
988 printk("%s: Runt packet!\n",dev->name);
989 lp->stats.rx_errors++;
990 }
991 else
992 {
993 skb = dev_alloc_skb(pkt_len+2);
994 if (skb == NULL)
995 {
996 printk("%s: Memory squeeze, deferring packet.\n", dev->name);
997 for (i=0; i < RX_RING_SIZE; i++)
998 if (lp->rx_ring[(entry+i) & RX_RING_MOD_MASK].base < 0)
999 break;
1000
1001 if (i > RX_RING_SIZE -2)
1002 {
1003 lp->stats.rx_dropped++;
1004 lp->rx_ring[entry].base |= 0x80000000;
1005 lp->cur_rx++;
1006 }
1007 break;
1008 }
1009 skb->dev = dev;
1010 skb_reserve(skb,2); /* 16 byte align */
1011 skb_put(skb,pkt_len); /* Make room */
1012 eth_copy_and_sum(skb,
1013 (unsigned char *)(lp->rx_ring[entry].base & 0x00ffffff),
1014 pkt_len,0);
1015 skb->protocol=eth_type_trans(skb,dev);
1016 netif_rx(skb);
1017 lp->stats.rx_packets++;
1018 }
1019 }
1020 /* The docs say that the buffer length isn't touched, but Andrew Boyd
1021 of QNX reports that some revs of the 79C965 clear it. */
1022 lp->rx_ring[entry].buf_length = -PKT_BUF_SZ;
1023 lp->rx_ring[entry].base |= 0x80000000;
1024 entry = (++lp->cur_rx) & RX_RING_MOD_MASK;
1025 }
1026
1027 /* We should check that at least two ring entries are free. If not,
1028 we should free one and mark stats->rx_dropped++. */
1029
1030 return 0;
1031 }
1032
1033 static int
1034 lance_close(struct device *dev)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
1035 {
1036 int ioaddr = dev->base_addr;
1037 struct lance_private *lp = (struct lance_private *)dev->priv;
1038
1039 dev->start = 0;
1040 dev->tbusy = 1;
1041
1042 if (chip_table[lp->chip_version].flags & LANCE_HAS_MISSED_FRAME) {
1043 outw(112, ioaddr+LANCE_ADDR);
1044 lp->stats.rx_missed_errors = inw(ioaddr+LANCE_DATA);
1045 }
1046 outw(0, ioaddr+LANCE_ADDR);
1047
1048 if (lance_debug > 1)
1049 printk("%s: Shutting down ethercard, status was %2.2x.\n",
1050 dev->name, inw(ioaddr+LANCE_DATA));
1051
1052 /* We stop the LANCE here -- it occasionally polls
1053 memory if we don't. */
1054 outw(0x0004, ioaddr+LANCE_DATA);
1055
1056 if (dev->dma != 4)
1057 disable_dma(dev->dma);
1058
1059 free_irq(dev->irq);
1060
1061 irq2dev_map[dev->irq] = 0;
1062
1063 return 0;
1064 }
1065
1066 static struct enet_statistics *
1067 lance_get_stats(struct device *dev)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
1068 {
1069 struct lance_private *lp = (struct lance_private *)dev->priv;
1070 short ioaddr = dev->base_addr;
1071 short saved_addr;
1072 unsigned long flags;
1073
1074 if (chip_table[lp->chip_version].flags & LANCE_HAS_MISSED_FRAME) {
1075 save_flags(flags);
1076 cli();
1077 saved_addr = inw(ioaddr+LANCE_ADDR);
1078 outw(112, ioaddr+LANCE_ADDR);
1079 lp->stats.rx_missed_errors = inw(ioaddr+LANCE_DATA);
1080 outw(saved_addr, ioaddr+LANCE_ADDR);
1081 restore_flags(flags);
1082 }
1083
1084 return &lp->stats;
1085 }
1086
1087 /* Set or clear the multicast filter for this adaptor.
1088 */
1089
1090 static void set_multicast_list(struct device *dev)
/* ![[previous]](../icons/left.png)
![[next]](../icons/n_right.png)
![[first]](../icons/first.png)
![[last]](../icons/n_last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
1091 {
1092 short ioaddr = dev->base_addr;
1093
1094 outw(0, ioaddr+LANCE_ADDR);
1095 outw(0x0004, ioaddr+LANCE_DATA); /* Temporarily stop the lance. */
1096
1097 if (dev->flags&IFF_PROMISC) {
1098 /* Log any net taps. */
1099 printk("%s: Promiscuous mode enabled.\n", dev->name);
1100 outw(15, ioaddr+LANCE_ADDR);
1101 outw(0x8000, ioaddr+LANCE_DATA); /* Set promiscuous mode */
1102 } else {
1103 short multicast_table[4];
1104 int i;
1105 int num_addrs=dev->mc_count;
1106 if(dev->flags&IFF_ALLMULTI)
1107 num_addrs=1;
1108 /* FIXIT: We don't use the multicast table, but rely on upper-layer filtering. */
1109 memset(multicast_table, (num_addrs == 0) ? 0 : -1, sizeof(multicast_table));
1110 for (i = 0; i < 4; i++) {
1111 outw(8 + i, ioaddr+LANCE_ADDR);
1112 outw(multicast_table[i], ioaddr+LANCE_DATA);
1113 }
1114 outw(15, ioaddr+LANCE_ADDR);
1115 outw(0x0000, ioaddr+LANCE_DATA); /* Unset promiscuous mode */
1116 }
1117
1118 lance_restart(dev, 0x0142, 0); /* Resume normal operation */
1119
1120 }
1121
1122
1123 /*
1124 * Local variables:
1125 * compile-command: "gcc -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -m486 -c lance.c"
1126 * c-indent-level: 4
1127 * tab-width: 4
1128 * End:
1129 */