root/drivers/net/lance.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. lance_init
  2. lance_probe1
  3. lance_open
  4. lance_init_ring
  5. lance_start_xmit
  6. lance_interrupt
  7. lance_rx
  8. lance_close
  9. lance_get_stats
  10. set_multicast_list

   1 /* lance.c: An AMD LANCE ethernet driver for linux. */
   2 /*
   3     Written 1993 by Donald Becker.
   4 
   5     Copyright 1993 United States Government as represented by the
   6     Director, National Security Agency.  This software may be used and
   7     distributed according to the terms of the GNU Public License,
   8     incorporated herein by reference.
   9 
  10     This driver is for the Allied Telesis AT1500 and HP J2405A, and should work
  11     with most other LANCE-based bus-master (NE2100 clone) ethercards.
  12 
  13     The author may be reached as becker@super.org or
  14     C/O Supercomputing Research Ctr., 17100 Science Dr., Bowie MD 20715
  15 */
  16 
  17 static char *version = "lance.c:v0.14g 12/21/93 becker@super.org\n";
  18 
  19 #include <linux/config.h>
  20 #include <linux/kernel.h>
  21 #include <linux/sched.h>
  22 #include <linux/string.h>
  23 #include <linux/ptrace.h>
  24 #include <linux/errno.h>
  25 #include <linux/ioport.h>
  26 #include <linux/malloc.h>
  27 #include <linux/interrupt.h>
  28 #include <asm/bitops.h>
  29 #include <asm/io.h>
  30 #include <asm/dma.h>
  31 
  32 #include <linux/netdevice.h>
  33 #include <linux/etherdevice.h>
  34 #include <linux/skbuff.h>
  35 
  36 #ifndef HAVE_PORTRESERVE
  37 #define check_region(addr, size)        0
  38 #define snarf_region(addr, size)        do ; while(0)
  39 #endif
  40 
  41 struct device *init_etherdev(struct device *dev, int sizeof_private,
  42                              unsigned long *mem_startp);
  43 
  44 #ifdef LANCE_DEBUG
  45 int lance_debug = LANCE_DEBUG;
  46 #else
  47 int lance_debug = 1;
  48 #endif
  49 
  50 #ifndef LANCE_DMA
  51 #define LANCE_DMA       5
  52 #endif
  53 
  54 /*
  55                 Theory of Operation
  56 
  57 I. Board Compatibility
  58 
  59 This device driver is designed for the AMD 79C960, the "PCnet-ISA
  60 single-chip ethernet controller for ISA".  This chip is used in a wide
  61 variety of boards from vendors such as Allied Telesis, HP, Kingston,
  62 and Boca.  This driver is also intended to work with older AMD 7990
  63 designs, such as the NE1500 and NE2100.  For convenience, I use the name
  64 LANCE to refer to either AMD chip.
  65 
  66 II. Board-specific settings
  67 
  68 The driver is designed to work the boards that use the faster
  69 bus-master mode, rather than in shared memory mode.  (Only older designs
  70 have on-board buffer memory needed to support the slower shared memory mode.)
  71 
  72 Most boards have jumpered settings for the I/O base, IRQ line, and DMA channel.
  73 This driver probes the likely base addresses, {0x300, 0x320, 0x340, 0x360}.
  74 After the board is found it generates an DMA-timeout interrupt and uses
  75 autoIRQ to find the IRQ line.  The DMA channel defaults to LANCE_DMA, or it
  76 can be set with the low bits of the otherwise-unused dev->mem_start value.
  77 
  78 The HP-J2405A board is an exception: with this board it's easy to read the
  79 EEPROM-set values for the base, IRQ, and DMA.  Of course you must already
  80 _know_ the base address, but that entry is for changing the EEPROM.
  81 
  82 III. Driver operation
  83 
  84 IIIa. Ring buffers
  85 The LANCE uses ring buffers of Tx and Rx descriptors.  Each entry describes
  86 the base and length of the data buffer, along with status bits.  The length
  87 of these buffers is set by LANCE_LOG_{RX,TX}_BUFFERS, which is log_2() of
  88 the buffer length (rather than being directly the buffer length) for
  89 implementation ease.  The current values are 2 (Tx) and 4 (Rx), which leads to
  90 ring sizes of 4 (Tx) and 16 (Rx).  Increasing the number of ring entries
  91 needlessly uses extra space and reduces the chance that an upper layer will
  92 be able to reorder queued Tx packets based on priority.  Decreasing the number
  93 of entries makes it more difficult to achieve back-to-back packet transmission
  94 and increases the chance that Rx ring will overflow.  (Consider the worst case
  95 of receiving back-to-back minimum-sized packets.)
  96 
  97 The LANCE has the capability to "chain" both Rx and Tx buffers, but this driver
  98 statically allocates full-sized (slightly oversized -- PKT_BUF_SZ) buffers to
  99 avoid the administrative overhead. For the Rx side this avoids dynamically
 100 allocating full-sized buffers "just in case", at the expense of a
 101 memory-to-memory data copy for each packet received.  For most systems this
 102 is an good tradeoff: the Rx buffer will always be in low memory, the copy
 103 is inexpensive, and it primes the cache for later packet processing.  For Tx
 104 the buffers are only used when needed as low-memory bounce buffers.
 105 
 106 IIIB. 16M memory limitations.
 107 For the ISA bus master mode all structures used directly by the LANCE,
 108 the initialization block, Rx and Tx rings, and data buffers, must be
 109 accessable from the ISA bus, i.e. in the lower 16M of real memory.
 110 This is a problem for current Linux kernels on >16M machines. The network
 111 devices are initialized after memory initialization, and the kernel doles out
 112 memory from the top of memory downward.  The current solution is to have a
 113 special network initialization routine that's called before memory
 114 initialization; this will eventually be generalized for all network devices.
 115 As mentioned before, low-memory "bounce-buffers" are used when needed.
 116 
 117 IIIC. Synchronization
 118 The driver runs as two independent, single-threaded flows of control.  One
 119 is the send-packet routine, which enforces single-threaded use by the
 120 dev->tbusy flag.  The other thread is the interrupt handler, which is single
 121 threaded by the hardware and other software.
 122 
 123 The send packet thread has partial control over the Tx ring and 'dev->tbusy'
 124 flag.  It sets the tbusy flag whenever it's queuing a Tx packet. If the next
 125 queue slot is empty, it clears the tbusy flag when finished otherwise it sets
 126 the 'lp->tx_full' flag.
 127 
 128 The interrupt handler has exclusive control over the Rx ring and records stats
 129 from the Tx ring.  (The Tx-done interrupt can't be selectively turned off, so
 130 we can't avoid the interrupt overhead by having the Tx routine reap the Tx
 131 stats.)  After reaping the stats, it marks the queue entry as empty by setting
 132 the 'base' to zero.  Iff the 'lp->tx_full' flag is set, it clears both the
 133 tx_full and tbusy flags.
 134 
 135 */
 136 
 137 /* Set the number of Tx and Rx buffers, using Log_2(# buffers).
 138    Reasonable default values are 4 Tx buffers, and 16 Rx buffers.
 139    That translates to 2 (4 == 2^^2) and 4 (16 == 2^^4). */
 140 #ifndef LANCE_LOG_TX_BUFFERS
 141 #define LANCE_LOG_TX_BUFFERS 4
 142 #define LANCE_LOG_RX_BUFFERS 4
 143 #endif
 144 
 145 #define TX_RING_SIZE            (1 << (LANCE_LOG_TX_BUFFERS))
 146 #define TX_RING_MOD_MASK        (TX_RING_SIZE - 1)
 147 #define TX_RING_LEN_BITS        ((LANCE_LOG_TX_BUFFERS) << 29)
 148 
 149 #define RX_RING_SIZE            (1 << (LANCE_LOG_RX_BUFFERS))
 150 #define RX_RING_MOD_MASK        (RX_RING_SIZE - 1)
 151 #define RX_RING_LEN_BITS        ((LANCE_LOG_RX_BUFFERS) << 29)
 152 
 153 #define PKT_BUF_SZ      1544
 154 
 155 /* Offsets from base I/O address. */
 156 #define LANCE_DATA 0x10
 157 #define LANCE_ADDR 0x12
 158 #define LANCE_RESET 0x14
 159 #define LANCE_BUS_IF 0x16
 160 #define LANCE_TOTAL_SIZE 0x18
 161 
 162 /* The LANCE Rx and Tx ring descriptors. */
 163 struct lance_rx_head {
 164     int base;
 165     short buf_length;           /* This length is 2's complement (negative)! */
 166     short msg_length;           /* This length is "normal". */
 167 };
 168 
 169 struct lance_tx_head {
 170     int   base;
 171     short length;               /* Length is 2's complement (negative)! */
 172     short misc;
 173 };
 174 
 175 /* The LANCE initialization block, described in databook. */
 176 struct lance_init_block {
 177     unsigned short mode;        /* Pre-set mode (reg. 15) */
 178     unsigned char phys_addr[6]; /* Physical ethernet address */
 179     unsigned filter[2];         /* Multicast filter (unused). */
 180     /* Receive and transmit ring base, along with extra bits. */
 181     unsigned rx_ring;           /* Tx and Rx ring base pointers */
 182     unsigned tx_ring;
 183 };
 184 
 185 struct lance_private {
 186     char devname[8];
 187     /* These must aligned on 8-byte boundaries. */
 188     struct lance_rx_head rx_ring[RX_RING_SIZE];
 189     struct lance_tx_head tx_ring[TX_RING_SIZE];
 190     struct lance_init_block     init_block;
 191     long rx_buffs;              /* Address of Rx and Tx buffers. */
 192     /* Tx low-memory "bounce buffer" address. */
 193     char (*tx_bounce_buffs)[PKT_BUF_SZ];
 194     int cur_rx, cur_tx;         /* The next free ring entry */
 195     int dirty_rx, dirty_tx;     /* The ring entries to be free()ed. */
 196     int dma;
 197     struct enet_statistics stats;
 198     char old_lance;
 199     int pad0, pad1;             /* Used for alignment */
 200 };
 201 
 202 unsigned long lance_probe1(short ioaddr, unsigned long mem_start);
 203 static int lance_open(struct device *dev);
 204 static void lance_init_ring(struct device *dev);
 205 static int lance_start_xmit(struct sk_buff *skb, struct device *dev);
 206 static int lance_rx(struct device *dev);
 207 static void lance_interrupt(int reg_ptr);
 208 static int lance_close(struct device *dev);
 209 static struct enet_statistics *lance_get_stats(struct device *dev);
 210 #ifdef HAVE_MULTICAST
 211 static void set_multicast_list(struct device *dev, int num_addrs, void *addrs);
 212 #endif
 213 
 214 
 215 
 216 unsigned long lance_init(unsigned long mem_start, unsigned long mem_end)
     /* [previous][next][first][last][top][bottom][index][help] */
 217 {
 218     int *port, ports[] = {0x300, 0x320, 0x340, 0x360, 0};
 219 
 220     for (port = &ports[0]; *port; port++) {
 221         int ioaddr = *port;
 222 
 223         if (   check_region(ioaddr, LANCE_TOTAL_SIZE) == 0
 224             && inb(ioaddr + 14) == 0x57
 225             && inb(ioaddr + 15) == 0x57) {
 226             mem_start = lance_probe1(ioaddr, mem_start);
 227         }
 228     }
 229 
 230     return mem_start;
 231 }
 232 
 233 unsigned long lance_probe1(short ioaddr, unsigned long mem_start)
     /* [previous][next][first][last][top][bottom][index][help] */
 234 {
 235     struct device *dev;
 236     struct lance_private *lp;
 237     int hpJ2405A = 0;
 238     int i, reset_val;
 239 
 240     hpJ2405A = (inb(ioaddr) == 0x08 && inb(ioaddr+1) == 0x00
 241                 && inb(ioaddr+2) == 0x09);
 242 
 243     /* Reset the LANCE.  */
 244     reset_val = inw(ioaddr+LANCE_RESET); /* Reset the LANCE */
 245 
 246     /* The Un-Reset needed is only needed for the real NE2100, and will
 247        confuse the HP board. */
 248     if (!hpJ2405A)
 249         outw(reset_val, ioaddr+LANCE_RESET);
 250 
 251     outw(0x0000, ioaddr+LANCE_ADDR); /* Switch to window 0 */
 252     if (inw(ioaddr+LANCE_DATA) != 0x0004)
 253         return mem_start;
 254 
 255     dev = init_etherdev(0, sizeof(struct lance_private)
 256                         + PKT_BUF_SZ*(RX_RING_SIZE + TX_RING_SIZE),
 257                         &mem_start);
 258 
 259     printk("%s: LANCE at %#3x,", dev->name, ioaddr);
 260 
 261     /* There is a 16 byte station address PROM at the base address.
 262        The first six bytes are the station address. */
 263     for (i = 0; i < 6; i++)
 264         printk(" %2.2x", dev->dev_addr[i] = inb(ioaddr + i));
 265 
 266     dev->base_addr = ioaddr;
 267     snarf_region(ioaddr, LANCE_TOTAL_SIZE);
 268 
 269     /* Make certain the data structures used by the LANCE are aligned. */
 270     dev->priv = (void *)(((int)dev->priv + 7) & ~7);
 271     lp = (struct lance_private *)dev->priv;
 272     lp->rx_buffs = (long)dev->priv + sizeof(struct lance_private);
 273     lp->tx_bounce_buffs = (char (*)[PKT_BUF_SZ])
 274                            (lp->rx_buffs + PKT_BUF_SZ*RX_RING_SIZE);
 275 
 276 #ifndef final_version
 277     /* This should never happen. */
 278     if ((int)(lp->rx_ring) & 0x07) {
 279         printk(" **ERROR** LANCE Rx and Tx rings not on even boundary.\n");
 280         return mem_start;
 281     }
 282 #endif
 283 
 284     outw(88, ioaddr+LANCE_ADDR);
 285     lp->old_lance = (inw(ioaddr+LANCE_DATA) != 0x3003);
 286 
 287 #if defined(notdef)
 288     printk(lp->old_lance ? " original LANCE (%04x)" : " PCnet-ISA LANCE (%04x)",
 289            inw(ioaddr+LANCE_DATA));
 290 #endif
 291 
 292     lp->init_block.mode = 0x0003;       /* Disable Rx and Tx. */
 293     for (i = 0; i < 6; i++)
 294         lp->init_block.phys_addr[i] = dev->dev_addr[i];
 295     lp->init_block.filter[0] = 0x00000000;
 296     lp->init_block.filter[1] = 0x00000000;
 297     lp->init_block.rx_ring = (int)lp->rx_ring | RX_RING_LEN_BITS;
 298     lp->init_block.tx_ring = (int)lp->tx_ring | TX_RING_LEN_BITS;
 299 
 300     outw(0x0001, ioaddr+LANCE_ADDR);
 301     outw((short) (int) &lp->init_block, ioaddr+LANCE_DATA);
 302     outw(0x0002, ioaddr+LANCE_ADDR);
 303     outw(((int)&lp->init_block) >> 16, ioaddr+LANCE_DATA);
 304     outw(0x0000, ioaddr+LANCE_ADDR);
 305 
 306     if (hpJ2405A) {
 307         char dma_tbl[4] = {3, 5, 6, 7};
 308         char irq_tbl[8] = {3, 4, 5, 9, 10, 11, 12, 15};
 309         short reset_val = inw(ioaddr+LANCE_RESET);
 310         dev->dma = dma_tbl[(reset_val >> 2) & 3];
 311         dev->irq = irq_tbl[(reset_val >> 4) & 7];
 312         printk(" HP J2405A IRQ %d DMA %d.\n", dev->irq, dev->dma);
 313     } else {
 314         /* The DMA channel may be passed in on this parameter. */
 315         if (dev->mem_start & 0x07)
 316             dev->dma = dev->mem_start & 0x07;
 317         else if (dev->dma == 0)
 318             dev->dma = LANCE_DMA;
 319 
 320         /* To auto-IRQ we enable the initialization-done and DMA err,
 321            interrupts. For now we will always get a DMA error. */
 322         if (dev->irq < 2) {
 323 
 324             autoirq_setup(0);
 325 
 326             /* Trigger an initialization just for the interrupt. */
 327             outw(0x0041, ioaddr+LANCE_DATA);
 328 
 329             dev->irq = autoirq_report(1);
 330             if (dev->irq)
 331                 printk(", probed IRQ %d, fixed at DMA %d.\n",
 332                        dev->irq, dev->dma);
 333             else {
 334                 printk(", failed to detect IRQ line.\n");
 335                 return mem_start;
 336             }
 337         } else
 338             printk(" assigned IRQ %d DMA %d.\n", dev->irq, dev->dma);
 339     }
 340 
 341     if (! lp->old_lance) {
 342         /* Turn on auto-select of media (10baseT or BNC) so that the user
 343            can watch the LEDs even if the board isn't opened. */
 344         outw(0x0002, ioaddr+LANCE_ADDR);
 345         outw(0x0002, ioaddr+LANCE_BUS_IF);
 346     }
 347 
 348     if (lance_debug > 0)
 349         printk(version);
 350 
 351     /* The LANCE-specific entries in the device structure. */
 352     dev->open = &lance_open;
 353     dev->hard_start_xmit = &lance_start_xmit;
 354     dev->stop = &lance_close;
 355     dev->get_stats = &lance_get_stats;
 356     dev->set_multicast_list = &set_multicast_list;
 357 
 358     return mem_start;
 359 }
 360 
 361 
 362 static int
 363 lance_open(struct device *dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 364 {
 365     struct lance_private *lp = (struct lance_private *)dev->priv;
 366     int ioaddr = dev->base_addr;
 367     int i;
 368 
 369     if (request_irq(dev->irq, &lance_interrupt)) {
 370         return -EAGAIN;
 371     }
 372 
 373     if (request_dma(dev->dma)) {
 374         free_irq(dev->irq);
 375         return -EAGAIN;
 376     }
 377     irq2dev_map[dev->irq] = dev;
 378 
 379     /* Reset the LANCE */
 380     inw(ioaddr+LANCE_RESET);
 381 
 382     /* The DMA controller is used as a no-operation slave, "cascade mode". */
 383     enable_dma(dev->dma);
 384     set_dma_mode(dev->dma, DMA_MODE_CASCADE);
 385 
 386     /* Un-Reset the LANCE, needed only for the NE2100. */
 387     if (lp->old_lance)
 388         outw(0, ioaddr+LANCE_RESET);
 389 
 390     if (! lp->old_lance) {
 391         /* This is 79C960-specific: Turn on auto-select of media (AUI, BNC). */
 392         outw(0x0002, ioaddr+LANCE_ADDR);
 393         outw(0x0002, ioaddr+LANCE_BUS_IF);
 394     }
 395 
 396     if (lance_debug > 1)
 397         printk("%s: lance_open() irq %d dma %d tx/rx rings %#x/%#x init %#x.\n",
 398                dev->name, dev->irq, dev->dma, (int) lp->tx_ring, (int) lp->rx_ring,
 399                (int) &lp->init_block);
 400 
 401     lance_init_ring(dev);
 402     /* Re-initialize the LANCE, and start it when done. */
 403     outw(0x0001, ioaddr+LANCE_ADDR);
 404     outw((short) (int) &lp->init_block, ioaddr+LANCE_DATA);
 405     outw(0x0002, ioaddr+LANCE_ADDR);
 406     outw(((int)&lp->init_block) >> 16, ioaddr+LANCE_DATA);
 407 
 408     outw(0x0004, ioaddr+LANCE_ADDR);
 409     outw(0x0d15, ioaddr+LANCE_DATA);
 410 
 411     outw(0x0000, ioaddr+LANCE_ADDR);
 412     outw(0x0001, ioaddr+LANCE_DATA);
 413 
 414     dev->tbusy = 0;
 415     dev->interrupt = 0;
 416     dev->start = 1;
 417     i = 0;
 418     while (i++ < 100)
 419         if (inw(ioaddr+LANCE_DATA) & 0x0100)
 420             break;
 421     outw(0x0142, ioaddr+LANCE_DATA);
 422 
 423     if (lance_debug > 2)
 424         printk("%s: LANCE open after %d ticks, init block %#x csr0 %4.4x.\n",
 425                dev->name, i, (int) &lp->init_block, inw(ioaddr+LANCE_DATA));
 426 
 427     return 0;                   /* Always succeed */
 428 }
 429 
 430 /* Initialize the LANCE Rx and Tx rings. */
 431 static void
 432 lance_init_ring(struct device *dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 433 {
 434     struct lance_private *lp = (struct lance_private *)dev->priv;
 435     int i;
 436 
 437     lp->cur_rx = lp->cur_tx = 0;
 438     lp->dirty_rx = lp->dirty_tx = 0;
 439 
 440     for (i = 0; i < RX_RING_SIZE; i++) {
 441         lp->rx_ring[i].base = (lp->rx_buffs + i*PKT_BUF_SZ) | 0x80000000;
 442         lp->rx_ring[i].buf_length = -PKT_BUF_SZ;
 443     }
 444     /* The Tx buffer address is filled in as needed, but we do need to clear
 445        the upper ownership bit. */
 446     for (i = 0; i < TX_RING_SIZE; i++) {
 447         lp->tx_ring[i].base = 0;
 448     }
 449 
 450     lp->init_block.mode = 0x0000;
 451     for (i = 0; i < 6; i++)
 452         lp->init_block.phys_addr[i] = dev->dev_addr[i];
 453     lp->init_block.filter[0] = 0x00000000;
 454     lp->init_block.filter[1] = 0x00000000;
 455     lp->init_block.rx_ring = (int)lp->rx_ring | RX_RING_LEN_BITS;
 456     lp->init_block.tx_ring = (int)lp->tx_ring | TX_RING_LEN_BITS;
 457 }
 458 
 459 static int
 460 lance_start_xmit(struct sk_buff *skb, struct device *dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 461 {
 462     struct lance_private *lp = (struct lance_private *)dev->priv;
 463     int ioaddr = dev->base_addr;
 464     int entry;
 465 
 466     /* Transmitter timeout, serious problems. */
 467     if (dev->tbusy) {
 468         int tickssofar = jiffies - dev->trans_start;
 469         if (tickssofar < 10)
 470             return 1;
 471         outw(0, ioaddr+LANCE_ADDR);
 472         printk("%s: transmit timed out, status %4.4x, resetting.\n",
 473                dev->name, inw(ioaddr+LANCE_DATA));
 474         outw(0x0001, ioaddr+LANCE_DATA);
 475         lp->stats.tx_errors++;
 476 #ifndef final_version
 477         {
 478             int i;
 479             printk(" Ring data dump: dirty_tx %d cur_tx %d cur_rx %d.",
 480                    lp->dirty_tx, lp->cur_tx, lp->cur_rx);
 481             for (i = 0 ; i < RX_RING_SIZE; i++)
 482                 printk("%s %08x %04x %04x", i & 0x3 ? "" : "\n ",
 483                        lp->rx_ring[i].base, -lp->rx_ring[i].buf_length,
 484                        lp->rx_ring[i].msg_length);
 485             for (i = 0 ; i < TX_RING_SIZE; i++)
 486                 printk(" %s%08x %04x %04x", i & 0x3 ? "" : "\n ",
 487                        lp->tx_ring[i].base, -lp->tx_ring[i].length,
 488                        lp->tx_ring[i].misc);
 489             printk("\n");
 490         }
 491 #endif
 492         lance_init_ring(dev);
 493         outw(0x0043, ioaddr+LANCE_DATA);
 494 
 495         dev->tbusy=0;
 496         dev->trans_start = jiffies;
 497 
 498         return 0;
 499     }
 500 
 501     if (skb == NULL) {
 502         dev_tint(dev);
 503         return 0;
 504     }
 505 
 506     if (skb->len <= 0)
 507         return 0;
 508 
 509     if (lance_debug > 3) {
 510         outw(0x0000, ioaddr+LANCE_ADDR);
 511         printk("%s: lance_start_xmit() called, csr0 %4.4x.\n", dev->name,
 512                inw(ioaddr+LANCE_DATA));
 513         outw(0x0000, ioaddr+LANCE_DATA);
 514     }
 515 
 516     /* Block a timer-based transmit from overlapping.  This could better be
 517        done with atomic_swap(1, dev->tbusy), but set_bit() works as well. */
 518     if (set_bit(0, (void*)&dev->tbusy) != 0)
 519         printk("%s: Transmitter access conflict.\n", dev->name);
 520 
 521     /* Fill in a Tx ring entry */
 522 
 523     /* Mask to ring buffer boundary. */
 524     entry = lp->cur_tx & TX_RING_MOD_MASK;
 525 
 526     /* Caution: the write order is important here, set the base address
 527        with the "ownership" bits last. */
 528 
 529     /* The old LANCE chips doesn't automatically pad buffers to min. size. */
 530     if (lp->old_lance) {
 531         lp->tx_ring[entry].length =
 532             -(ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN);
 533     } else
 534         lp->tx_ring[entry].length = -skb->len;
 535 
 536     lp->tx_ring[entry].misc = 0x0000;
 537 
 538     /* If any part of this buffer is >16M we must copy it to a low-memory
 539        buffer. */
 540     if ((int)(skb->data) + skb->len > 0x01000000) {
 541         if (lance_debug > 5)
 542             printk("%s: bouncing a high-memory packet (%#x).\n",
 543                    dev->name, (int)(skb->data));
 544         memcpy(&lp->tx_bounce_buffs[entry], skb->data, skb->len);
 545         lp->tx_ring[entry].base =
 546             (int)(lp->tx_bounce_buffs + entry) | 0x83000000;
 547         if (skb->free)
 548             kfree_skb (skb, FREE_WRITE);
 549     } else {
 550         /* We can't free the packet yet, so we inform the memory management
 551            code that we are still using it. */
 552         if(skb->free==0)
 553                 skb_kept_by_device(skb);
 554         lp->tx_ring[entry].base = (int)(skb->data) | 0x83000000;
 555     }
 556     lp->cur_tx++;
 557 
 558     /* Trigger an immediate send poll. */
 559     outw(0x0000, ioaddr+LANCE_ADDR);
 560     outw(0x0048, ioaddr+LANCE_DATA);
 561 
 562     dev->trans_start = jiffies;
 563 
 564     if (lp->tx_ring[(entry+1) & TX_RING_MOD_MASK].base == 0)
 565         dev->tbusy=0;
 566 
 567     return 0;
 568 }
 569 
 570 /* The LANCE interrupt handler. */
 571 static void
 572 lance_interrupt(int reg_ptr)
     /* [previous][next][first][last][top][bottom][index][help] */
 573 {
 574     int irq = -(((struct pt_regs *)reg_ptr)->orig_eax+2);
 575     struct device *dev = (struct device *)(irq2dev_map[irq]);
 576     struct lance_private *lp;
 577     int csr0, ioaddr;
 578 
 579     if (dev == NULL) {
 580         printk ("lance_interrupt(): irq %d for unknown device.\n", irq);
 581         return;
 582     }
 583 
 584     ioaddr = dev->base_addr;
 585     lp = (struct lance_private *)dev->priv;
 586     if (dev->interrupt)
 587         printk("%s: Re-entering the interrupt handler.\n", dev->name);
 588 
 589     dev->interrupt = 1;
 590 
 591     outw(0x00, dev->base_addr + LANCE_ADDR);
 592     csr0 = inw(dev->base_addr + LANCE_DATA);
 593 
 594     /* Acknowledge all of the current interrupt sources ASAP. */
 595     outw(csr0 & ~0x004f, dev->base_addr + LANCE_DATA);
 596 
 597     if (lance_debug > 5)
 598         printk("%s: interrupt  csr0=%#2.2x new csr=%#2.2x.\n",
 599                dev->name, csr0, inw(dev->base_addr + LANCE_DATA));
 600 
 601     if (csr0 & 0x0400)          /* Rx interrupt */
 602         lance_rx(dev);
 603 
 604     if (csr0 & 0x0200) {        /* Tx-done interrupt */
 605         int dirty_tx = lp->dirty_tx;
 606 
 607         while (dirty_tx < lp->cur_tx) {
 608             int entry = dirty_tx & TX_RING_MOD_MASK;
 609             int status = lp->tx_ring[entry].base;
 610             void *databuff;
 611             
 612             if (status < 0)
 613                 break;          /* It still hasn't been Txed */
 614 
 615             lp->tx_ring[entry].base = 0;
 616             databuff = (void*)(status & 0x00ffffff);
 617 
 618             if (status & 0x40000000) { /* There was an major error, log it. */
 619                 int err_status = lp->tx_ring[entry].misc;
 620                 lp->stats.tx_errors++;
 621                 if (err_status & 0x0400) lp->stats.tx_aborted_errors++;
 622                 if (err_status & 0x0800) lp->stats.tx_carrier_errors++;
 623                 if (err_status & 0x1000) lp->stats.tx_window_errors++;
 624                 if (err_status & 0x4000) lp->stats.tx_fifo_errors++;
 625                 /* Perhaps we should re-init() after the FIFO error. */
 626             } else {
 627                 if (status & 0x18000000)
 628                     lp->stats.collisions++;
 629                 lp->stats.tx_packets++;
 630             }
 631 
 632             /* We don't free the skb if it's a data-only copy in the bounce
 633                buffer.  The address checks here are sorted -- the first test
 634                should always work.  */
 635             if (databuff >= (void*)(&lp->tx_bounce_buffs[TX_RING_SIZE])
 636                 || databuff < (void*)(lp->tx_bounce_buffs)) {
 637                 struct sk_buff *skb = ((struct sk_buff *)databuff) - 1;
 638                 if (skb->free)
 639                     kfree_skb(skb, FREE_WRITE);
 640                 else
 641                     skb_device_release(skb,FREE_WRITE);
 642                 /* Warning: skb may well vanish at the point you call
 643                    device_release! */
 644             }
 645             dirty_tx++;
 646         }
 647 
 648 #ifndef final_version
 649         if (lp->cur_tx - dirty_tx >= TX_RING_SIZE) {
 650             printk("out-of-sync dirty pointer, %d vs. %d.\n",
 651                    dirty_tx, lp->cur_tx);
 652             dirty_tx += TX_RING_SIZE;
 653         }
 654 #endif
 655 
 656         if (dev->tbusy  &&  dirty_tx > lp->cur_tx - TX_RING_SIZE + 2) {
 657             /* The ring is no longer full, clear tbusy. */
 658             dev->tbusy = 0;
 659             mark_bh(INET_BH);
 660         }
 661 
 662         lp->dirty_tx = dirty_tx;
 663     }
 664 
 665     if (csr0 & 0x8000) {
 666         if (csr0 & 0x4000) lp->stats.tx_errors++;
 667         if (csr0 & 0x1000) lp->stats.rx_errors++;
 668     }
 669 
 670     /* Clear the interrupts we've handled. */
 671     outw(0x0000, dev->base_addr + LANCE_ADDR);
 672     outw(0x7f40, dev->base_addr + LANCE_DATA);
 673 
 674     if (lance_debug > 4)
 675         printk("%s: exiting interrupt, csr%d=%#4.4x.\n",
 676                dev->name, inw(ioaddr + LANCE_ADDR),
 677                inw(dev->base_addr + LANCE_DATA));
 678 
 679     dev->interrupt = 0;
 680     return;
 681 }
 682 
 683 static int
 684 lance_rx(struct device *dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 685 {
 686     struct lance_private *lp = (struct lance_private *)dev->priv;
 687     int entry = lp->cur_rx & RX_RING_MOD_MASK;
 688         
 689     /* If we own the next entry, it's a new packet. Send it up. */
 690     while (lp->rx_ring[entry].base >= 0) {
 691         int status = lp->rx_ring[entry].base >> 24;
 692 
 693         if (status != 0x03) {           /* There was an error. */
 694             /* There is an tricky error noted by John Murphy,
 695                <murf@perftech.com> to Russ Nelson: Even with full-sized
 696                buffers it's possible for a jabber packet to use two
 697                buffers, with only the last correctly noting the error. */
 698             if (status & 0x01)  /* Only count a general error at the */
 699                 lp->stats.rx_errors++; /* end of a packet.*/
 700             if (status & 0x20) lp->stats.rx_frame_errors++;
 701             if (status & 0x10) lp->stats.rx_over_errors++;
 702             if (status & 0x08) lp->stats.rx_crc_errors++;
 703             if (status & 0x04) lp->stats.rx_fifo_errors++;
 704         } else {
 705             /* Malloc up new buffer, compatible with net-2e. */
 706             short pkt_len = lp->rx_ring[entry].msg_length;
 707             struct sk_buff *skb;
 708 
 709             skb = alloc_skb(pkt_len, GFP_ATOMIC);
 710             if (skb == NULL) {
 711                 printk("%s: Memory squeeze, deferring packet.\n", dev->name);
 712                 lp->stats.rx_dropped++; /* Really, deferred. */
 713                 break;
 714             }
 715             skb->len = pkt_len;
 716             skb->dev = dev;
 717             memcpy(skb->data,
 718                    (unsigned char *)(lp->rx_ring[entry].base & 0x00ffffff),
 719                    pkt_len);
 720             netif_rx(skb);
 721             lp->stats.rx_packets++;
 722         }
 723 
 724         lp->rx_ring[entry].base |= 0x80000000;
 725         entry = (++lp->cur_rx) & RX_RING_MOD_MASK;
 726     }
 727 
 728     /* We should check that at least two ring entries are free.  If not,
 729        we should free one and mark stats->rx_dropped++. */
 730 
 731     return 0;
 732 }
 733 
 734 static int
 735 lance_close(struct device *dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 736 {
 737     int ioaddr = dev->base_addr;
 738     struct lance_private *lp = (struct lance_private *)dev->priv;
 739 
 740     dev->start = 0;
 741     dev->tbusy = 1;
 742 
 743     outw(112, ioaddr+LANCE_ADDR);
 744     lp->stats.rx_missed_errors = inw(ioaddr+LANCE_DATA);
 745 
 746     outw(0, ioaddr+LANCE_ADDR);
 747 
 748     if (lance_debug > 1)
 749         printk("%s: Shutting down ethercard, status was %2.2x.\n",
 750                dev->name, inw(ioaddr+LANCE_DATA));
 751 
 752     /* We stop the LANCE here -- it occasionally polls
 753        memory if we don't. */
 754     outw(0x0004, ioaddr+LANCE_DATA);
 755 
 756     disable_dma(dev->dma);
 757 
 758     free_irq(dev->irq);
 759     free_dma(dev->dma);
 760 
 761     irq2dev_map[dev->irq] = 0;
 762 
 763     return 0;
 764 }
 765 
 766 static struct enet_statistics *
 767 lance_get_stats(struct device *dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 768 {
 769     struct lance_private *lp = (struct lance_private *)dev->priv;
 770     short ioaddr = dev->base_addr;
 771     short saved_addr;
 772 
 773     cli();
 774     saved_addr = inw(ioaddr+LANCE_ADDR);
 775     outw(112, ioaddr+LANCE_ADDR);
 776     lp->stats.rx_missed_errors = inw(ioaddr+LANCE_DATA);
 777     outw(saved_addr, ioaddr+LANCE_ADDR);
 778     sti();
 779 
 780     return &lp->stats;
 781 }
 782 
 783 /* Set or clear the multicast filter for this adaptor.
 784    num_addrs == -1      Promiscuous mode, receive all packets
 785    num_addrs == 0       Normal mode, clear multicast list
 786    num_addrs > 0        Multicast mode, receive normal and MC packets, and do
 787                         best-effort filtering.
 788  */
 789 static void
 790 set_multicast_list(struct device *dev, int num_addrs, void *addrs)
     /* [previous][next][first][last][top][bottom][index][help] */
 791 {
 792     short ioaddr = dev->base_addr;
 793 
 794     /* We take the simple way out and always enable promiscuous mode. */
 795     outw(0, ioaddr+LANCE_ADDR);
 796     outw(0x0004, ioaddr+LANCE_DATA); /* Temporarily stop the lance.  */
 797 
 798     outw(15, ioaddr+LANCE_ADDR);
 799     if (num_addrs >= 0) {
 800         short multicast_table[4];
 801         int i;
 802         /* We don't use the multicast table, but rely on upper-layer filtering. */
 803         memset(multicast_table, (num_addrs == 0) ? 0 : -1, sizeof(multicast_table));
 804         for (i = 0; i < 4; i++) {
 805             outw(8 + i, ioaddr+LANCE_ADDR);
 806             outw(multicast_table[i], ioaddr+LANCE_DATA);
 807         }
 808         outw(0x0000, ioaddr+LANCE_DATA); /* Unset promiscuous mode */
 809     } else {
 810         outw(0x8000, ioaddr+LANCE_DATA); /* Set promiscuous mode */
 811     }
 812 
 813     outw(0, ioaddr+LANCE_ADDR);
 814     outw(0x0142, ioaddr+LANCE_DATA); /* Resume normal operation. */
 815 }
 816 
 817 #ifdef HAVE_DEVLIST
 818 static unsigned int lance_portlist[] = {0x300, 0x320, 0x340, 0x360, 0};
 819 struct netdev_entry lance_drv =
 820 {"lance", lance_probe1, LANCE_TOTAL_SIZE, lance_portlist};
 821 #endif
 822 
 823 /*
 824  * Local variables:
 825  *  compile-command: "gcc -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -m486 -c lance.c"
 826  * End:
 827  */

/* [previous][next][first][last][top][bottom][index][help] */