1 /* lance.c: An AMD LANCE ethernet driver for linux. */
2 /*
3 Written 1993-94 by Donald Becker.
4
5 Copyright 1993 United States Government as represented by the
6 Director, National Security Agency.
7 This software may be used and distributed according to the terms
8 of the GNU Public License, incorporated herein by reference.
9
10 This driver is for the Allied Telesis AT1500 and HP J2405A, and should work
11 with most other LANCE-based bus-master (NE2100 clone) ethercards.
12
13 The author may be reached as becker@CESDIS.gsfc.nasa.gov, or C/O
14 Center of Excellence in Space Data and Information Sciences
15 Code 930.5, Goddard Space Flight Center, Greenbelt MD 20771
16 */
17
18 static char *version = "lance.c:v1.05 9/23/94 becker@cesdis.gsfc.nasa.gov\n";
19
20 #include <linux/config.h>
21 #include <linux/kernel.h>
22 #include <linux/sched.h>
23 #include <linux/string.h>
24 #include <linux/ptrace.h>
25 #include <linux/errno.h>
26 #include <linux/ioport.h>
27 #include <linux/malloc.h>
28 #include <linux/interrupt.h>
29 #include <asm/bitops.h>
30 #include <asm/io.h>
31 #include <asm/dma.h>
32
33 #include <linux/netdevice.h>
34 #include <linux/etherdevice.h>
35 #include <linux/skbuff.h>
36
37 struct device *init_etherdev(struct device *dev, int sizeof_private,
38 unsigned long *mem_startp);
39 static unsigned int lance_portlist[] = {0x300, 0x320, 0x340, 0x360, 0};
40 unsigned long lance_probe1(short ioaddr, unsigned long mem_start);
41
42 #ifdef HAVE_DEVLIST
43 struct netdev_entry lance_drv =
44 {"lance", lance_probe1, LANCE_TOTAL_SIZE, lance_portlist};
45 #endif
46
47 #ifdef LANCE_DEBUG
48 int lance_debug = LANCE_DEBUG;
49 #else
50 int lance_debug = 1;
51 #endif
52
53 /*
54 Theory of Operation
55
56 I. Board Compatibility
57
58 This device driver is designed for the AMD 79C960, the "PCnet-ISA
59 single-chip ethernet controller for ISA". This chip is used in a wide
60 variety of boards from vendors such as Allied Telesis, HP, Kingston,
61 and Boca. This driver is also intended to work with older AMD 7990
62 designs, such as the NE1500 and NE2100, and newer 79C961. For convenience,
63 I use the name LANCE to refer to all of the AMD chips, even though it properly
64 refers only to the original 7990.
65
66 II. Board-specific settings
67
68 The driver is designed to work the boards that use the faster
69 bus-master mode, rather than in shared memory mode. (Only older designs
70 have on-board buffer memory needed to support the slower shared memory mode.)
71
72 Most ISA boards have jumpered settings for the I/O base, IRQ line, and DMA
73 channel. This driver probes the likely base addresses:
74 {0x300, 0x320, 0x340, 0x360}.
75 After the board is found it generates an DMA-timeout interrupt and uses
76 autoIRQ to find the IRQ line. The DMA channel can be set with the low bits
77 of the otherwise-unused dev->mem_start value (aka PARAM1). If unset it is
78 probed for by enabling each free DMA channel in turn and checking if
79 initialization succeeds.
80
81 The HP-J2405A board is an exception: with this board it's easy to read the
82 EEPROM-set values for the base, IRQ, and DMA. (Of course you must already
83 _know_ the base address -- that field is for writing the EEPROM.)
84
85 III. Driver operation
86
87 IIIa. Ring buffers
88 The LANCE uses ring buffers of Tx and Rx descriptors. Each entry describes
89 the base and length of the data buffer, along with status bits. The length
90 of these buffers is set by LANCE_LOG_{RX,TX}_BUFFERS, which is log_2() of
91 the buffer length (rather than being directly the buffer length) for
92 implementation ease. The current values are 2 (Tx) and 4 (Rx), which leads to
93 ring sizes of 4 (Tx) and 16 (Rx). Increasing the number of ring entries
94 needlessly uses extra space and reduces the chance that an upper layer will
95 be able to reorder queued Tx packets based on priority. Decreasing the number
96 of entries makes it more difficult to achieve back-to-back packet transmission
97 and increases the chance that Rx ring will overflow. (Consider the worst case
98 of receiving back-to-back minimum-sized packets.)
99
100 The LANCE has the capability to "chain" both Rx and Tx buffers, but this driver
101 statically allocates full-sized (slightly oversized -- PKT_BUF_SZ) buffers to
102 avoid the administrative overhead. For the Rx side this avoids dynamically
103 allocating full-sized buffers "just in case", at the expense of a
104 memory-to-memory data copy for each packet received. For most systems this
105 is an good tradeoff: the Rx buffer will always be in low memory, the copy
106 is inexpensive, and it primes the cache for later packet processing. For Tx
107 the buffers are only used when needed as low-memory bounce buffers.
108
109 IIIB. 16M memory limitations.
110 For the ISA bus master mode all structures used directly by the LANCE,
111 the initialization block, Rx and Tx rings, and data buffers, must be
112 accessable from the ISA bus, i.e. in the lower 16M of real memory.
113 This is a problem for current Linux kernels on >16M machines. The network
114 devices are initialized after memory initialization, and the kernel doles out
115 memory from the top of memory downward. The current solution is to have a
116 special network initialization routine that's called before memory
117 initialization; this will eventually be generalized for all network devices.
118 As mentioned before, low-memory "bounce-buffers" are used when needed.
119
120 IIIC. Synchronization
121 The driver runs as two independent, single-threaded flows of control. One
122 is the send-packet routine, which enforces single-threaded use by the
123 dev->tbusy flag. The other thread is the interrupt handler, which is single
124 threaded by the hardware and other software.
125
126 The send packet thread has partial control over the Tx ring and 'dev->tbusy'
127 flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
128 queue slot is empty, it clears the tbusy flag when finished otherwise it sets
129 the 'lp->tx_full' flag.
130
131 The interrupt handler has exclusive control over the Rx ring and records stats
132 from the Tx ring. (The Tx-done interrupt can't be selectively turned off, so
133 we can't avoid the interrupt overhead by having the Tx routine reap the Tx
134 stats.) After reaping the stats, it marks the queue entry as empty by setting
135 the 'base' to zero. Iff the 'lp->tx_full' flag is set, it clears both the
136 tx_full and tbusy flags.
137
138 */
139
140 /* Set the number of Tx and Rx buffers, using Log_2(# buffers).
141 Reasonable default values are 4 Tx buffers, and 16 Rx buffers.
142 That translates to 2 (4 == 2^^2) and 4 (16 == 2^^4). */
143 #ifndef LANCE_LOG_TX_BUFFERS
144 #define LANCE_LOG_TX_BUFFERS 4
145 #define LANCE_LOG_RX_BUFFERS 4
146 #endif
147
148 #define TX_RING_SIZE (1 << (LANCE_LOG_TX_BUFFERS))
149 #define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
150 #define TX_RING_LEN_BITS ((LANCE_LOG_TX_BUFFERS) << 29)
151
152 #define RX_RING_SIZE (1 << (LANCE_LOG_RX_BUFFERS))
153 #define RX_RING_MOD_MASK (RX_RING_SIZE - 1)
154 #define RX_RING_LEN_BITS ((LANCE_LOG_RX_BUFFERS) << 29)
155
156 #define PKT_BUF_SZ 1544
157
158 /* Offsets from base I/O address. */
159 #define LANCE_DATA 0x10
160 #define LANCE_ADDR 0x12
161 #define LANCE_RESET 0x14
162 #define LANCE_BUS_IF 0x16
163 #define LANCE_TOTAL_SIZE 0x18
164
165 /* The LANCE Rx and Tx ring descriptors. */
166 struct lance_rx_head {
167 int base;
168 short buf_length; /* This length is 2s complement (negative)! */
169 short msg_length; /* This length is "normal". */
170 };
171
172 struct lance_tx_head {
173 int base;
174 short length; /* Length is 2s complement (negative)! */
175 short misc;
176 };
177
178 /* The LANCE initialization block, described in databook. */
179 struct lance_init_block {
180 unsigned short mode; /* Pre-set mode (reg. 15) */
181 unsigned char phys_addr[6]; /* Physical ethernet address */
182 unsigned filter[2]; /* Multicast filter (unused). */
183 /* Receive and transmit ring base, along with extra bits. */
184 unsigned rx_ring; /* Tx and Rx ring base pointers */
185 unsigned tx_ring;
186 };
187
188 struct lance_private {
189 char devname[8];
190 /* The Tx and Rx ring entries must aligned on 8-byte boundaries. */
191 struct lance_rx_head rx_ring[RX_RING_SIZE];
192 struct lance_tx_head tx_ring[TX_RING_SIZE];
193 struct lance_init_block init_block;
194 /* The saved address of a sent-in-place packet/buffer, for skfree(). */
195 struct sk_buff* tx_skbuff[TX_RING_SIZE];
196 long rx_buffs; /* Address of Rx and Tx buffers. */
197 /* Tx low-memory "bounce buffer" address. */
198 char (*tx_bounce_buffs)[PKT_BUF_SZ];
199 int cur_rx, cur_tx; /* The next free ring entry */
200 int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */
201 int dma;
202 struct enet_statistics stats;
203 char chip_version; /* See lance_chip_type. */
204 char tx_full;
205 char lock;
206 int pad0, pad1; /* Used for 8-byte alignment */
207 };
208
209 /* A mapping from the chip ID number to the part number and features.
210 These are fro the datasheets -- in real life the '970 version
211 reportedly has the same ID as the '965. */
212 static struct lance_chip_type {
213 int id_number;
214 char *name;
215 int flags;
216 } chip_table[] = {
217 {0x0000, "LANCE 7990", 0}, /* Ancient lance chip. */
218 {0x0003, "PCnet/ISA 79C960", 0}, /* 79C960 PCnet/ISA. */
219 {0x2260, "PCnet/ISA+ 79C961", 0}, /* 79C961 PCnet/ISA+, Plug-n-Play. */
220 {0x2420, "PCnet/PCI 79C970", 0}, /* 79C970 or 79C974 PCnet-SCSI, PCI. */
221 {0x2430, "PCnet/VLB 79C965", 0}, /* 79C965 PCnet for VL bus. */
222 {0x0, "PCnet (unknown)", 0},
223 };
224
225 enum {OLD_LANCE = 0, PCNET_ISA=1, PCNET_ISAP=2, PCNET_PCI=3, PCNET_VLB=4, LANCE_UNKNOWN=5};
226
227 static int lance_open(struct device *dev);
228 static void lance_init_ring(struct device *dev);
229 static int lance_start_xmit(struct sk_buff *skb, struct device *dev);
230 static int lance_rx(struct device *dev);
231 static void lance_interrupt(int reg_ptr);
232 static int lance_close(struct device *dev);
233 static struct enet_statistics *lance_get_stats(struct device *dev);
234 #ifdef HAVE_MULTICAST
235 static void set_multicast_list(struct device *dev, int num_addrs, void *addrs);
236 #endif
237
238
239
240 /* This lance probe is unlike the other board probes in 1.0.*. The LANCE may
241 have to allocate a contiguous low-memory region for bounce buffers.
242 This requirement is satified by having the lance initialization occur before the
243 memory management system is started, and thus well before the other probes. */
244 unsigned long lance_init(unsigned long mem_start, unsigned long mem_end)
/* ![[previous]](../icons/n_left.png)
![[next]](../icons/right.png)
![[first]](../icons/n_first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
245 {
246 int *port;
247
248 for (port = lance_portlist; *port; port++) {
249 int ioaddr = *port;
250
251 if ( check_region(ioaddr, LANCE_TOTAL_SIZE) == 0
252 && inb(ioaddr + 14) == 0x57
253 && inb(ioaddr + 15) == 0x57) {
254 mem_start = lance_probe1(ioaddr, mem_start);
255 }
256 }
257
258 return mem_start;
259 }
260
261 unsigned long lance_probe1(short ioaddr, unsigned long mem_start)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
262 {
263 struct device *dev;
264 struct lance_private *lp;
265 short dma_channels; /* Mark spuriously-busy DMA channels */
266 int i, reset_val, lance_version;
267 /* Flags for specific chips or boards. */
268 unsigned char hpJ2405A = 0; /* HP ISA adaptor */
269 int hp_builtin = 0; /* HP on-board ethernet. */
270 static int did_version = 0; /* Already printed version info. */
271
272 /* First we look for special cases.
273 Check for HP's on-board ethernet by looking for 'HP' in the BIOS.
274 There are two HP versions, check the BIOS for the configuration port.
275 This method provided by L. Julliard, Laurent_Julliard@grenoble.hp.com.
276 */
277 if ( *((unsigned short *) 0x000f0102) == 0x5048) {
278 short ioaddr_table[] = { 0x300, 0x320, 0x340, 0x360};
279 int hp_port = ( *((unsigned char *) 0x000f00f1) & 1) ? 0x499 : 0x99;
280 /* We can have boards other than the built-in! Verify this is on-board. */
281 if ((inb(hp_port) & 0xc0) == 0x80
282 && ioaddr_table[inb(hp_port) & 3] == ioaddr)
283 hp_builtin = hp_port;
284 }
285 /* We also recognize the HP Vectra on-board here, but check below. */
286 hpJ2405A = (inb(ioaddr) == 0x08 && inb(ioaddr+1) == 0x00
287 && inb(ioaddr+2) == 0x09);
288
289 /* Reset the LANCE. */
290 reset_val = inw(ioaddr+LANCE_RESET); /* Reset the LANCE */
291
292 /* The Un-Reset needed is only needed for the real NE2100, and will
293 confuse the HP board. */
294 if (!hpJ2405A)
295 outw(reset_val, ioaddr+LANCE_RESET);
296
297 outw(0x0000, ioaddr+LANCE_ADDR); /* Switch to window 0 */
298 if (inw(ioaddr+LANCE_DATA) != 0x0004)
299 return mem_start;
300
301 /* Get the version of the chip. */
302 outw(88, ioaddr+LANCE_ADDR);
303 if (inw(ioaddr+LANCE_ADDR) != 88) {
304 lance_version = 0;
305 } else { /* Good, it's a newer chip. */
306 int chip_version = inw(ioaddr+LANCE_DATA);
307 outw(89, ioaddr+LANCE_ADDR);
308 chip_version |= inw(ioaddr+LANCE_DATA) << 16;
309 if (lance_debug > 2)
310 printk(" LANCE chip version is %#x.\n", chip_version);
311 if ((chip_version & 0xfff) != 0x003)
312 return mem_start;
313 chip_version = (chip_version >> 12) & 0xffff;
314 for (lance_version = 1; chip_table[lance_version].id_number; lance_version++) {
315 if (chip_table[lance_version].id_number == chip_version)
316 break;
317 }
318 }
319
320 dev = init_etherdev(0, sizeof(struct lance_private)
321 + PKT_BUF_SZ*(RX_RING_SIZE + TX_RING_SIZE),
322 &mem_start);
323
324 printk("%s: %s at %#3x,", dev->name, chip_table[lance_version].name, ioaddr);
325
326 /* There is a 16 byte station address PROM at the base address.
327 The first six bytes are the station address. */
328 for (i = 0; i < 6; i++)
329 printk(" %2.2x", dev->dev_addr[i] = inb(ioaddr + i));
330
331 dev->base_addr = ioaddr;
332 snarf_region(ioaddr, LANCE_TOTAL_SIZE);
333
334 /* Make certain the data structures used by the LANCE are aligned. */
335 dev->priv = (void *)(((int)dev->priv + 7) & ~7);
336 lp = (struct lance_private *)dev->priv;
337 lp->rx_buffs = (long)dev->priv + sizeof(struct lance_private);
338 lp->tx_bounce_buffs = (char (*)[PKT_BUF_SZ])
339 (lp->rx_buffs + PKT_BUF_SZ*RX_RING_SIZE);
340
341 #ifndef final_version
342 /* This should never happen. */
343 if ((int)(lp->rx_ring) & 0x07) {
344 printk(" **ERROR** LANCE Rx and Tx rings not on even boundary.\n");
345 return mem_start;
346 }
347 #endif
348
349 lp->chip_version = lance_version;
350
351 lp->init_block.mode = 0x0003; /* Disable Rx and Tx. */
352 for (i = 0; i < 6; i++)
353 lp->init_block.phys_addr[i] = dev->dev_addr[i];
354 lp->init_block.filter[0] = 0x00000000;
355 lp->init_block.filter[1] = 0x00000000;
356 lp->init_block.rx_ring = (int)lp->rx_ring | RX_RING_LEN_BITS;
357 lp->init_block.tx_ring = (int)lp->tx_ring | TX_RING_LEN_BITS;
358
359 outw(0x0001, ioaddr+LANCE_ADDR);
360 inw(ioaddr+LANCE_ADDR);
361 outw((short) (int) &lp->init_block, ioaddr+LANCE_DATA);
362 outw(0x0002, ioaddr+LANCE_ADDR);
363 inw(ioaddr+LANCE_ADDR);
364 outw(((int)&lp->init_block) >> 16, ioaddr+LANCE_DATA);
365 outw(0x0000, ioaddr+LANCE_ADDR);
366 inw(ioaddr+LANCE_ADDR);
367
368 if (hp_builtin) {
369 char dma_tbl[4] = {3, 5, 6, 0};
370 char irq_tbl[8] = {3, 4, 5, 9};
371 unsigned char port_val = inb(hp_builtin);
372 dev->dma = dma_tbl[(port_val >> 4) & 3];
373 dev->irq = irq_tbl[(port_val >> 2) & 3];
374 printk(" HP Vectra IRQ %d DMA %d.\n", dev->irq, dev->dma);
375 } else if (hpJ2405A) {
376 char dma_tbl[4] = {3, 5, 6, 7};
377 char irq_tbl[8] = {3, 4, 5, 9, 10, 11, 12, 15};
378 short reset_val = inw(ioaddr+LANCE_RESET);
379 dev->dma = dma_tbl[(reset_val >> 2) & 3];
380 dev->irq = irq_tbl[(reset_val >> 4) & 7];
381 printk(" HP J2405A IRQ %d DMA %d.\n", dev->irq, dev->dma);
382 } else if (lance_version == PCNET_ISAP) { /* The plug-n-play version. */
383 short bus_info;
384 outw(8, ioaddr+LANCE_ADDR);
385 bus_info = inw(ioaddr+LANCE_BUS_IF);
386 dev->dma = bus_info & 0x07;
387 dev->irq = (bus_info >> 4) & 0x0F;
388 } else {
389 /* The DMA channel may be passed in PARAM1. */
390 if (dev->mem_start & 0x07)
391 dev->dma = dev->mem_start & 0x07;
392 }
393
394 if (dev->dma == 0) {
395 /* Read the DMA channel status register, so that we can avoid
396 stuck DMA channels in the DMA detection below. */
397 dma_channels = ((inb(DMA1_STAT_REG) >> 4) & 0x0f) |
398 (inb(DMA2_STAT_REG) & 0xf0);
399 }
400 if (dev->irq >= 2)
401 printk(" assigned IRQ %d", dev->irq);
402 else {
403 /* To auto-IRQ we enable the initialization-done and DMA error
404 interrupts. For ISA boards we get a DMA error, but VLB and PCI
405 boards will work. */
406 autoirq_setup(0);
407
408 /* Trigger an initialization just for the interrupt. */
409 outw(0x0041, ioaddr+LANCE_DATA);
410
411 dev->irq = autoirq_report(1);
412 if (dev->irq)
413 printk(", probed IRQ %d", dev->irq);
414 else {
415 printk(", failed to detect IRQ line.\n");
416 return mem_start;
417 }
418
419 /* Check for the initialization done bit, 0x0100, which means
420 that we don't need a DMA channel. */
421 if (inw(ioaddr+LANCE_DATA) & 0x0100)
422 dev->dma = 4;
423 }
424
425 if (dev->dma == 4) {
426 printk(", no DMA needed.\n");
427 } else if (dev->dma) {
428 if (request_dma(dev->dma, "lance")) {
429 printk("DMA %d allocation failed.\n", dev->dma);
430 return mem_start;
431 } else
432 printk(", assigned DMA %d.\n", dev->dma);
433 } else { /* OK, we have to auto-DMA. */
434 int dmas[] = { 5, 6, 7, 3 }, boguscnt;
435
436 for (i = 0; i < 4; i++) {
437 int dma = dmas[i];
438
439 /* Don't enable a permanently busy DMA channel, or the machine
440 will hang. */
441 if (test_bit(dma, &dma_channels))
442 continue;
443 outw(0x7f04, ioaddr+LANCE_DATA); /* Clear the memory error bits. */
444 if (request_dma(dma, "lance"))
445 continue;
446 set_dma_mode(dma, DMA_MODE_CASCADE);
447 enable_dma(dma);
448
449 /* Trigger an initialization. */
450 outw(0x0001, ioaddr+LANCE_DATA);
451 for (boguscnt = 100; boguscnt > 0; --boguscnt)
452 if (inw(ioaddr+LANCE_DATA) & 0x0900)
453 break;
454 if (inw(ioaddr+LANCE_DATA) & 0x0100) {
455 dev->dma = dma;
456 printk(", DMA %d.\n", dev->dma);
457 break;
458 } else {
459 disable_dma(dma);
460 free_dma(dma);
461 }
462 }
463 if (i == 4) { /* Failure: bail. */
464 printk("DMA detection failed.\n");
465 return mem_start;
466 }
467 }
468
469 if (lp->chip_version != OLD_LANCE) {
470 /* Turn on auto-select of media (10baseT or BNC) so that the user
471 can watch the LEDs even if the board isn't opened. */
472 outw(0x0002, ioaddr+LANCE_ADDR);
473 outw(0x0002, ioaddr+LANCE_BUS_IF);
474 }
475
476 if (lance_debug > 0 && did_version++ == 0)
477 printk(version);
478
479 /* The LANCE-specific entries in the device structure. */
480 dev->open = &lance_open;
481 dev->hard_start_xmit = &lance_start_xmit;
482 dev->stop = &lance_close;
483 dev->get_stats = &lance_get_stats;
484 dev->set_multicast_list = &set_multicast_list;
485
486 return mem_start;
487 }
488
489
490 static int
491 lance_open(struct device *dev)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
492 {
493 struct lance_private *lp = (struct lance_private *)dev->priv;
494 int ioaddr = dev->base_addr;
495 int i;
496
497 if (request_irq(dev->irq, &lance_interrupt, 0, "lance")) {
498 return -EAGAIN;
499 }
500
501 /* We used to allocate DMA here, but that was silly.
502 DMA lines can't be shared! We now permanently snarf them. */
503
504 irq2dev_map[dev->irq] = dev;
505
506 /* Reset the LANCE */
507 inw(ioaddr+LANCE_RESET);
508
509 /* The DMA controller is used as a no-operation slave, "cascade mode". */
510 if (dev->dma != 4) {
511 enable_dma(dev->dma);
512 set_dma_mode(dev->dma, DMA_MODE_CASCADE);
513 }
514
515 /* Un-Reset the LANCE, needed only for the NE2100. */
516 if (lp->chip_version == OLD_LANCE)
517 outw(0, ioaddr+LANCE_RESET);
518
519 if (lp->chip_version != OLD_LANCE) {
520 /* This is 79C960-specific: Turn on auto-select of media (AUI, BNC). */
521 outw(0x0002, ioaddr+LANCE_ADDR);
522 outw(0x0002, ioaddr+LANCE_BUS_IF);
523 }
524
525 if (lance_debug > 1)
526 printk("%s: lance_open() irq %d dma %d tx/rx rings %#x/%#x init %#x.\n",
527 dev->name, dev->irq, dev->dma, (int) lp->tx_ring, (int) lp->rx_ring,
528 (int) &lp->init_block);
529
530 lance_init_ring(dev);
531 /* Re-initialize the LANCE, and start it when done. */
532 outw(0x0001, ioaddr+LANCE_ADDR);
533 outw((short) (int) &lp->init_block, ioaddr+LANCE_DATA);
534 outw(0x0002, ioaddr+LANCE_ADDR);
535 outw(((int)&lp->init_block) >> 16, ioaddr+LANCE_DATA);
536
537 outw(0x0004, ioaddr+LANCE_ADDR);
538 outw(0x0d15, ioaddr+LANCE_DATA);
539
540 outw(0x0000, ioaddr+LANCE_ADDR);
541 outw(0x0001, ioaddr+LANCE_DATA);
542
543 dev->tbusy = 0;
544 dev->interrupt = 0;
545 dev->start = 1;
546 i = 0;
547 while (i++ < 100)
548 if (inw(ioaddr+LANCE_DATA) & 0x0100)
549 break;
550 outw(0x0142, ioaddr+LANCE_DATA);
551
552 if (lance_debug > 2)
553 printk("%s: LANCE open after %d ticks, init block %#x csr0 %4.4x.\n",
554 dev->name, i, (int) &lp->init_block, inw(ioaddr+LANCE_DATA));
555
556 return 0; /* Always succeed */
557 }
558
559 /* Initialize the LANCE Rx and Tx rings. */
560 static void
561 lance_init_ring(struct device *dev)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
562 {
563 struct lance_private *lp = (struct lance_private *)dev->priv;
564 int i;
565
566 lp->lock = 0, lp->tx_full = 0;
567 lp->cur_rx = lp->cur_tx = 0;
568 lp->dirty_rx = lp->dirty_tx = 0;
569
570 for (i = 0; i < RX_RING_SIZE; i++) {
571 lp->rx_ring[i].base = (lp->rx_buffs + i*PKT_BUF_SZ) | 0x80000000;
572 lp->rx_ring[i].buf_length = -PKT_BUF_SZ;
573 }
574 /* The Tx buffer address is filled in as needed, but we do need to clear
575 the upper ownership bit. */
576 for (i = 0; i < TX_RING_SIZE; i++) {
577 lp->tx_ring[i].base = 0;
578 }
579
580 lp->init_block.mode = 0x0000;
581 for (i = 0; i < 6; i++)
582 lp->init_block.phys_addr[i] = dev->dev_addr[i];
583 lp->init_block.filter[0] = 0x00000000;
584 lp->init_block.filter[1] = 0x00000000;
585 lp->init_block.rx_ring = (int)lp->rx_ring | RX_RING_LEN_BITS;
586 lp->init_block.tx_ring = (int)lp->tx_ring | TX_RING_LEN_BITS;
587 }
588
589 static int
590 lance_start_xmit(struct sk_buff *skb, struct device *dev)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
591 {
592 struct lance_private *lp = (struct lance_private *)dev->priv;
593 int ioaddr = dev->base_addr;
594 int entry;
595
596 /* Transmitter timeout, serious problems. */
597 if (dev->tbusy) {
598 int tickssofar = jiffies - dev->trans_start;
599 if (tickssofar < 20)
600 return 1;
601 outw(0, ioaddr+LANCE_ADDR);
602 printk("%s: transmit timed out, status %4.4x, resetting.\n",
603 dev->name, inw(ioaddr+LANCE_DATA));
604 outw(0x0004, ioaddr+LANCE_DATA);
605 lp->stats.tx_errors++;
606 #ifndef final_version
607 {
608 int i;
609 printk(" Ring data dump: dirty_tx %d cur_tx %d%s cur_rx %d.",
610 lp->dirty_tx, lp->cur_tx, lp->tx_full ? " (full)" : "",
611 lp->cur_rx);
612 for (i = 0 ; i < RX_RING_SIZE; i++)
613 printk("%s %08x %04x %04x", i & 0x3 ? "" : "\n ",
614 lp->rx_ring[i].base, -lp->rx_ring[i].buf_length,
615 lp->rx_ring[i].msg_length);
616 for (i = 0 ; i < TX_RING_SIZE; i++)
617 printk("%s %08x %04x %04x", i & 0x3 ? "" : "\n ",
618 lp->tx_ring[i].base, -lp->tx_ring[i].length,
619 lp->tx_ring[i].misc);
620 printk("\n");
621 }
622 #endif
623 lance_init_ring(dev);
624 outw(0x0043, ioaddr+LANCE_DATA);
625
626 dev->tbusy=0;
627 dev->trans_start = jiffies;
628
629 return 0;
630 }
631
632 if (skb == NULL) {
633 dev_tint(dev);
634 return 0;
635 }
636
637 if (skb->len <= 0)
638 return 0;
639
640 if (lance_debug > 3) {
641 outw(0x0000, ioaddr+LANCE_ADDR);
642 printk("%s: lance_start_xmit() called, csr0 %4.4x.\n", dev->name,
643 inw(ioaddr+LANCE_DATA));
644 outw(0x0000, ioaddr+LANCE_DATA);
645 }
646
647 /* Block a timer-based transmit from overlapping. This could better be
648 done with atomic_swap(1, dev->tbusy), but set_bit() works as well. */
649 if (set_bit(0, (void*)&dev->tbusy) != 0) {
650 printk("%s: Transmitter access conflict.\n", dev->name);
651 return 1;
652 }
653
654 if (set_bit(0, (void*)&lp->lock) != 0) {
655 if (lance_debug > 0)
656 printk("%s: tx queue lock!.\n", dev->name);
657 /* don't clear dev->tbusy flag. */
658 return 1;
659 }
660
661 /* Fill in a Tx ring entry */
662
663 /* Mask to ring buffer boundary. */
664 entry = lp->cur_tx & TX_RING_MOD_MASK;
665
666 /* Caution: the write order is important here, set the base address
667 with the "ownership" bits last. */
668
669 /* The old LANCE chips doesn't automatically pad buffers to min. size. */
670 if (lp->chip_version == OLD_LANCE) {
671 lp->tx_ring[entry].length =
672 -(ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN);
673 } else
674 lp->tx_ring[entry].length = -skb->len;
675
676 lp->tx_ring[entry].misc = 0x0000;
677
678 /* If any part of this buffer is >16M we must copy it to a low-memory
679 buffer. */
680 if ((int)(skb->data) + skb->len > 0x01000000) {
681 if (lance_debug > 5)
682 printk("%s: bouncing a high-memory packet (%#x).\n",
683 dev->name, (int)(skb->data));
684 memcpy(&lp->tx_bounce_buffs[entry], skb->data, skb->len);
685 lp->tx_ring[entry].base =
686 (int)(lp->tx_bounce_buffs + entry) | 0x83000000;
687 dev_kfree_skb (skb, FREE_WRITE);
688 } else {
689 lp->tx_skbuff[entry] = skb;
690 lp->tx_ring[entry].base = (int)(skb->data) | 0x83000000;
691 }
692 lp->cur_tx++;
693
694 /* Trigger an immediate send poll. */
695 outw(0x0000, ioaddr+LANCE_ADDR);
696 outw(0x0048, ioaddr+LANCE_DATA);
697
698 dev->trans_start = jiffies;
699
700 cli();
701 lp->lock = 0;
702 if (lp->tx_ring[(entry+1) & TX_RING_MOD_MASK].base == 0)
703 dev->tbusy=0;
704 else
705 lp->tx_full = 1;
706 sti();
707
708 return 0;
709 }
710
711 /* The LANCE interrupt handler. */
712 static void
713 lance_interrupt(int reg_ptr)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
714 {
715 int irq = -(((struct pt_regs *)reg_ptr)->orig_eax+2);
716 struct device *dev = (struct device *)(irq2dev_map[irq]);
717 struct lance_private *lp;
718 int csr0, ioaddr, boguscnt=10;
719
720 if (dev == NULL) {
721 printk ("lance_interrupt(): irq %d for unknown device.\n", irq);
722 return;
723 }
724
725 ioaddr = dev->base_addr;
726 lp = (struct lance_private *)dev->priv;
727 if (dev->interrupt)
728 printk("%s: Re-entering the interrupt handler.\n", dev->name);
729
730 dev->interrupt = 1;
731
732 outw(0x00, dev->base_addr + LANCE_ADDR);
733 while ((csr0 = inw(dev->base_addr + LANCE_DATA)) & 0x8600
734 && --boguscnt >= 0) {
735 /* Acknowledge all of the current interrupt sources ASAP. */
736 outw(csr0 & ~0x004f, dev->base_addr + LANCE_DATA);
737
738 if (lance_debug > 5)
739 printk("%s: interrupt csr0=%#2.2x new csr=%#2.2x.\n",
740 dev->name, csr0, inw(dev->base_addr + LANCE_DATA));
741
742 if (csr0 & 0x0400) /* Rx interrupt */
743 lance_rx(dev);
744
745 if (csr0 & 0x0200) { /* Tx-done interrupt */
746 int dirty_tx = lp->dirty_tx;
747
748 while (dirty_tx < lp->cur_tx) {
749 int entry = dirty_tx & TX_RING_MOD_MASK;
750 int status = lp->tx_ring[entry].base;
751
752 if (status < 0)
753 break; /* It still hasn't been Txed */
754
755 lp->tx_ring[entry].base = 0;
756
757 if (status & 0x40000000) {
758 /* There was an major error, log it. */
759 int err_status = lp->tx_ring[entry].misc;
760 lp->stats.tx_errors++;
761 if (err_status & 0x0400) lp->stats.tx_aborted_errors++;
762 if (err_status & 0x0800) lp->stats.tx_carrier_errors++;
763 if (err_status & 0x1000) lp->stats.tx_window_errors++;
764 if (err_status & 0x4000) {
765 /* Ackk! On FIFO errors the Tx unit is turned off! */
766 lp->stats.tx_fifo_errors++;
767 /* Remove this verbosity later! */
768 printk("%s: Tx FIFO error! Status %4.4x.\n",
769 dev->name, csr0);
770 /* Restart the chip. */
771 outw(0x0002, dev->base_addr + LANCE_DATA);
772 }
773 } else {
774 if (status & 0x18000000)
775 lp->stats.collisions++;
776 lp->stats.tx_packets++;
777 }
778
779 /* We must free the original skb if it's not a data-only copy
780 in the bounce buffer. */
781 if (lp->tx_skbuff[entry]) {
782 dev_kfree_skb(lp->tx_skbuff[entry],FREE_WRITE);
783 lp->tx_skbuff[entry] = 0;
784 }
785 dirty_tx++;
786 }
787
788 #ifndef final_version
789 if (lp->cur_tx - dirty_tx >= TX_RING_SIZE) {
790 printk("out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
791 dirty_tx, lp->cur_tx, lp->tx_full);
792 dirty_tx += TX_RING_SIZE;
793 }
794 #endif
795
796 if (lp->tx_full && dev->tbusy
797 && dirty_tx > lp->cur_tx - TX_RING_SIZE + 2) {
798 /* The ring is no longer full, clear tbusy. */
799 lp->tx_full = 0;
800 dev->tbusy = 0;
801 mark_bh(NET_BH);
802 }
803
804 lp->dirty_tx = dirty_tx;
805 }
806
807 /* Log misc errors. */
808 if (csr0 & 0x4000) lp->stats.tx_errors++; /* Tx babble. */
809 if (csr0 & 0x1000) lp->stats.rx_errors++; /* Missed a Rx frame. */
810 if (csr0 & 0x0800) {
811 printk("%s: Bus master arbitration failure, status %4.4x.\n",
812 dev->name, csr0);
813 /* Restart the chip. */
814 outw(0x0002, dev->base_addr + LANCE_DATA);
815 }
816 }
817
818 /* Clear any other interrupt, and set interrupt enable. */
819 outw(0x0000, dev->base_addr + LANCE_ADDR);
820 outw(0x7940, dev->base_addr + LANCE_DATA);
821
822 if (lance_debug > 4)
823 printk("%s: exiting interrupt, csr%d=%#4.4x.\n",
824 dev->name, inw(ioaddr + LANCE_ADDR),
825 inw(dev->base_addr + LANCE_DATA));
826
827 dev->interrupt = 0;
828 return;
829 }
830
831 static int
832 lance_rx(struct device *dev)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
833 {
834 struct lance_private *lp = (struct lance_private *)dev->priv;
835 int entry = lp->cur_rx & RX_RING_MOD_MASK;
836 int i;
837
838 /* If we own the next entry, it's a new packet. Send it up. */
839 while (lp->rx_ring[entry].base >= 0) {
840 int status = lp->rx_ring[entry].base >> 24;
841
842 if (status != 0x03) { /* There was an error. */
843 /* There is an tricky error noted by John Murphy,
844 <murf@perftech.com> to Russ Nelson: Even with full-sized
845 buffers it's possible for a jabber packet to use two
846 buffers, with only the last correctly noting the error. */
847 if (status & 0x01) /* Only count a general error at the */
848 lp->stats.rx_errors++; /* end of a packet.*/
849 if (status & 0x20) lp->stats.rx_frame_errors++;
850 if (status & 0x10) lp->stats.rx_over_errors++;
851 if (status & 0x08) lp->stats.rx_crc_errors++;
852 if (status & 0x04) lp->stats.rx_fifo_errors++;
853 lp->rx_ring[entry].base &= 0x03ffffff;
854 } else {
855 /* Malloc up new buffer, compatible with net-2e. */
856 short pkt_len = lp->rx_ring[entry].msg_length;
857 struct sk_buff *skb;
858
859 skb = alloc_skb(pkt_len, GFP_ATOMIC);
860 if (skb == NULL) {
861 printk("%s: Memory squeeze, deferring packet.\n", dev->name);
862 for (i=0; i < RX_RING_SIZE; i++)
863 if (lp->rx_ring[(entry+i) & RX_RING_MOD_MASK].base < 0)
864 break;
865
866 if (i > RX_RING_SIZE -2) {
867 lp->stats.rx_dropped++;
868 lp->rx_ring[entry].base |= 0x80000000;
869 lp->cur_rx++;
870 }
871 break;
872 }
873 skb->len = pkt_len;
874 skb->dev = dev;
875 memcpy(skb->data,
876 (unsigned char *)(lp->rx_ring[entry].base & 0x00ffffff),
877 pkt_len);
878 netif_rx(skb);
879 lp->stats.rx_packets++;
880 }
881
882 /* The docs say that the buffer length isn't touched, but Andrew Boyd
883 of QNX reports that some revs of the 79C965 clear it. */
884 lp->rx_ring[entry].buf_length = -PKT_BUF_SZ;
885 lp->rx_ring[entry].base |= 0x80000000;
886 entry = (++lp->cur_rx) & RX_RING_MOD_MASK;
887 }
888
889 /* We should check that at least two ring entries are free. If not,
890 we should free one and mark stats->rx_dropped++. */
891
892 return 0;
893 }
894
895 static int
896 lance_close(struct device *dev)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
897 {
898 int ioaddr = dev->base_addr;
899 struct lance_private *lp = (struct lance_private *)dev->priv;
900
901 dev->start = 0;
902 dev->tbusy = 1;
903
904 if (lp->chip_version != OLD_LANCE) {
905 outw(112, ioaddr+LANCE_ADDR);
906 lp->stats.rx_missed_errors = inw(ioaddr+LANCE_DATA);
907 }
908 outw(0, ioaddr+LANCE_ADDR);
909
910 if (lance_debug > 1)
911 printk("%s: Shutting down ethercard, status was %2.2x.\n",
912 dev->name, inw(ioaddr+LANCE_DATA));
913
914 /* We stop the LANCE here -- it occasionally polls
915 memory if we don't. */
916 outw(0x0004, ioaddr+LANCE_DATA);
917
918 if (dev->dma != 4)
919 disable_dma(dev->dma);
920
921 free_irq(dev->irq);
922
923 irq2dev_map[dev->irq] = 0;
924
925 return 0;
926 }
927
928 static struct enet_statistics *
929 lance_get_stats(struct device *dev)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
930 {
931 struct lance_private *lp = (struct lance_private *)dev->priv;
932 short ioaddr = dev->base_addr;
933 short saved_addr;
934
935 if (lp->chip_version != OLD_LANCE) {
936 cli();
937 saved_addr = inw(ioaddr+LANCE_ADDR);
938 outw(112, ioaddr+LANCE_ADDR);
939 lp->stats.rx_missed_errors = inw(ioaddr+LANCE_DATA);
940 outw(saved_addr, ioaddr+LANCE_ADDR);
941 sti();
942 }
943
944 return &lp->stats;
945 }
946
947 /* Set or clear the multicast filter for this adaptor.
948 num_addrs == -1 Promiscuous mode, receive all packets
949 num_addrs == 0 Normal mode, clear multicast list
950 num_addrs > 0 Multicast mode, receive normal and MC packets, and do
951 best-effort filtering.
952 */
953 static void
954 set_multicast_list(struct device *dev, int num_addrs, void *addrs)
/* ![[previous]](../icons/left.png)
![[next]](../icons/n_right.png)
![[first]](../icons/first.png)
![[last]](../icons/n_last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
955 {
956 short ioaddr = dev->base_addr;
957
958 /* We take the simple way out and always enable promiscuous mode. */
959 outw(0, ioaddr+LANCE_ADDR);
960 outw(0x0004, ioaddr+LANCE_DATA); /* Temporarily stop the lance. */
961
962 outw(15, ioaddr+LANCE_ADDR);
963 if (num_addrs >= 0) {
964 short multicast_table[4];
965 int i;
966 /* We don't use the multicast table, but rely on upper-layer filtering. */
967 memset(multicast_table, (num_addrs == 0) ? 0 : -1, sizeof(multicast_table));
968 for (i = 0; i < 4; i++) {
969 outw(8 + i, ioaddr+LANCE_ADDR);
970 outw(multicast_table[i], ioaddr+LANCE_DATA);
971 }
972 outw(0x0000, ioaddr+LANCE_DATA); /* Unset promiscuous mode */
973 } else {
974 outw(0x8000, ioaddr+LANCE_DATA); /* Set promiscuous mode */
975 }
976
977 outw(0, ioaddr+LANCE_ADDR);
978 outw(0x0142, ioaddr+LANCE_DATA); /* Resume normal operation. */
979 }
980
981
982 /*
983 * Local variables:
984 * compile-command: "gcc -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -m486 -c lance.c"
985 * c-indent-level: 4
986 * tab-width: 4
987 * End:
988 */