root/drivers/net/lance.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. lance_init
  2. lance_probe1
  3. lance_open
  4. lance_init_ring
  5. lance_start_xmit
  6. lance_interrupt
  7. lance_rx
  8. lance_close
  9. lance_get_stats
  10. set_multicast_list

   1 /* lance.c: An AMD LANCE ethernet driver for linux. */
   2 /*
   3     Written 1993 by Donald Becker.
   4 
   5     Copyright 1993 United States Government as represented by the
   6     Director, National Security Agency.  This software may be used and
   7     distributed according to the terms of the GNU Public License,
   8     incorporated herein by reference.
   9 
  10     This driver is for the Allied Telesis AT1500 and HP J2405A, and should work
  11     with most other LANCE-based bus-master (NE2100 clone) ethercards.
  12 
  13     The author may be reached as becker@super.org or
  14     C/O Supercomputing Research Ctr., 17100 Science Dr., Bowie MD 20715
  15 */
  16 
  17 static char *version = "lance.c:v0.14g 12/21/93 becker@super.org\n";
  18 
  19 #include <linux/config.h>
  20 #include <linux/kernel.h>
  21 #include <linux/sched.h>
  22 #include <linux/string.h>
  23 #include <linux/ptrace.h>
  24 #include <linux/errno.h>
  25 #include <linux/ioport.h>
  26 #include <linux/malloc.h>
  27 #include <linux/interrupt.h>
  28 #include <asm/bitops.h>
  29 #include <asm/io.h>
  30 #include <asm/dma.h>
  31 
  32 #include <linux/netdevice.h>
  33 #include <linux/etherdevice.h>
  34 #include <linux/skbuff.h>
  35 
  36 #ifndef HAVE_PORTRESERVE
  37 #define check_region(addr, size)        0
  38 #define snarf_region(addr, size)        do ; while(0)
  39 #endif
  40 
  41 struct device *init_etherdev(struct device *dev, int sizeof_private,
  42                              unsigned long *mem_startp);
  43 
  44 #ifdef LANCE_DEBUG
  45 int lance_debug = LANCE_DEBUG;
  46 #else
  47 int lance_debug = 1;
  48 #endif
  49 
  50 #ifndef LANCE_DMA
  51 #define LANCE_DMA       5
  52 #endif
  53 
  54 /*
  55                 Theory of Operation
  56 
  57 I. Board Compatibility
  58 
  59 This device driver is designed for the AMD 79C960, the "PCnet-ISA
  60 single-chip ethernet controller for ISA".  This chip is used in a wide
  61 variety of boards from vendors such as Allied Telesis, HP, Kingston,
  62 and Boca.  This driver is also intended to work with older AMD 7990
  63 designs, such as the NE1500 and NE2100.  For convenience, I use the name
  64 LANCE to refer to either AMD chip.
  65 
  66 II. Board-specific settings
  67 
  68 The driver is designed to work the boards that use the faster
  69 bus-master mode, rather than in shared memory mode.  (Only older designs
  70 have on-board buffer memory needed to support the slower shared memory mode.)
  71 
  72 Most boards have jumpered settings for the I/O base, IRQ line, and DMA channel.
  73 This driver probes the likely base addresses, {0x300, 0x320, 0x340, 0x360}.
  74 After the board is found it generates an DMA-timeout interrupt and uses
  75 autoIRQ to find the IRQ line.  The DMA channel defaults to LANCE_DMA, or it
  76 can be set with the low bits of the otherwise-unused dev->mem_start value.
  77 
  78 The HP-J2405A board is an exception: with this board it's easy to read the
  79 EEPROM-set values for the base, IRQ, and DMA.  Of course you must already
  80 _know_ the base address, but that entry is for changing the EEPROM.
  81 
  82 III. Driver operation
  83 
  84 IIIa. Ring buffers
  85 The LANCE uses ring buffers of Tx and Rx descriptors.  Each entry describes
  86 the base and length of the data buffer, along with status bits.  The length
  87 of these buffers is set by LANCE_LOG_{RX,TX}_BUFFERS, which is log_2() of
  88 the buffer length (rather than being directly the buffer length) for
  89 implementation ease.  The current values are 2 (Tx) and 4 (Rx), which leads to
  90 ring sizes of 4 (Tx) and 16 (Rx).  Increasing the number of ring entries
  91 needlessly uses extra space and reduces the chance that an upper layer will
  92 be able to reorder queued Tx packets based on priority.  Decreasing the number
  93 of entries makes it more difficult to achieve back-to-back packet transmission
  94 and increases the chance that Rx ring will overflow.  (Consider the worst case
  95 of receiving back-to-back minimum-sized packets.)
  96 
  97 The LANCE has the capability to "chain" both Rx and Tx buffers, but this driver
  98 statically allocates full-sized (slightly oversized -- PKT_BUF_SZ) buffers to
  99 avoid the administrative overhead. For the Rx side this avoids dynamically
 100 allocating full-sized buffers "just in case", at the expense of a
 101 memory-to-memory data copy for each packet received.  For most systems this
 102 is an good tradeoff: the Rx buffer will always be in low memory, the copy
 103 is inexpensive, and it primes the cache for later packet processing.  For Tx
 104 the buffers are only used when needed as low-memory bounce buffers.
 105 
 106 IIIB. 16M memory limitations.
 107 For the ISA bus master mode all structures used directly by the LANCE,
 108 the initialization block, Rx and Tx rings, and data buffers, must be
 109 accessible from the ISA bus, i.e. in the lower 16M of real memory.
 110 This is a problem for current Linux kernels on >16M machines. The network
 111 devices are initialized after memory initialization, and the kernel doles out
 112 memory from the top of memory downward.  The current solution is to have a
 113 special network initialization routine that's called before memory
 114 initialization; this will eventually be generalized for all network devices.
 115 As mentioned before, low-memory "bounce-buffers" are used when needed.
 116 
 117 IIIC. Synchronization
 118 The driver runs as two independent, single-threaded flows of control.  One
 119 is the send-packet routine, which enforces single-threaded use by the
 120 dev->tbusy flag.  The other thread is the interrupt handler, which is single
 121 threaded by the hardware and other software.
 122 
 123 The send packet thread has partial control over the Tx ring and 'dev->tbusy'
 124 flag.  It sets the tbusy flag whenever it's queuing a Tx packet. If the next
 125 queue slot is empty, it clears the tbusy flag when finished otherwise it sets
 126 the 'lp->tx_full' flag.
 127 
 128 The interrupt handler has exclusive control over the Rx ring and records stats
 129 from the Tx ring.  (The Tx-done interrupt can't be selectively turned off, so
 130 we can't avoid the interrupt overhead by having the Tx routine reap the Tx
 131 stats.)  After reaping the stats, it marks the queue entry as empty by setting
 132 the 'base' to zero.  Iff the 'lp->tx_full' flag is set, it clears both the
 133 tx_full and tbusy flags.
 134 
 135 */
 136 
 137 /* Set the number of Tx and Rx buffers, using Log_2(# buffers).
 138    Reasonable default values are 4 Tx buffers, and 16 Rx buffers.
 139    That translates to 2 (4 == 2^^2) and 4 (16 == 2^^4). */
 140 #ifndef LANCE_LOG_TX_BUFFERS
 141 #define LANCE_LOG_TX_BUFFERS 4
 142 #define LANCE_LOG_RX_BUFFERS 4
 143 #endif
 144 
 145 #define TX_RING_SIZE            (1 << (LANCE_LOG_TX_BUFFERS))
 146 #define TX_RING_MOD_MASK        (TX_RING_SIZE - 1)
 147 #define TX_RING_LEN_BITS        ((LANCE_LOG_TX_BUFFERS) << 29)
 148 
 149 #define RX_RING_SIZE            (1 << (LANCE_LOG_RX_BUFFERS))
 150 #define RX_RING_MOD_MASK        (RX_RING_SIZE - 1)
 151 #define RX_RING_LEN_BITS        ((LANCE_LOG_RX_BUFFERS) << 29)
 152 
 153 #define PKT_BUF_SZ      1544
 154 
 155 /* Offsets from base I/O address. */
 156 #define LANCE_DATA 0x10
 157 #define LANCE_ADDR 0x12
 158 #define LANCE_RESET 0x14
 159 #define LANCE_BUS_IF 0x16
 160 #define LANCE_TOTAL_SIZE 0x18
 161 
 162 /* The LANCE Rx and Tx ring descriptors. */
 163 struct lance_rx_head {
 164     int base;
 165     short buf_length;           /* This length is 2's complement (negative)! */
 166     short msg_length;           /* This length is "normal". */
 167 };
 168 
 169 struct lance_tx_head {
 170     int   base;
 171     short length;               /* Length is 2's complement (negative)! */
 172     short misc;
 173 };
 174 
 175 /* The LANCE initialization block, described in databook. */
 176 struct lance_init_block {
 177     unsigned short mode;        /* Pre-set mode (reg. 15) */
 178     unsigned char phys_addr[6]; /* Physical ethernet address */
 179     unsigned filter[2];         /* Multicast filter (unused). */
 180     /* Receive and transmit ring base, along with extra bits. */
 181     unsigned rx_ring;           /* Tx and Rx ring base pointers */
 182     unsigned tx_ring;
 183 };
 184 
 185 struct lance_private {
 186     char devname[8];
 187     /* These must aligned on 8-byte boundaries. */
 188     struct lance_rx_head rx_ring[RX_RING_SIZE];
 189     struct lance_tx_head tx_ring[TX_RING_SIZE];
 190     struct lance_init_block     init_block;
 191     long rx_buffs;              /* Address of Rx and Tx buffers. */
 192     /* Tx low-memory "bounce buffer" address. */
 193     char (*tx_bounce_buffs)[PKT_BUF_SZ];
 194     int cur_rx, cur_tx;         /* The next free ring entry */
 195     int dirty_rx, dirty_tx;     /* The ring entries to be free()ed. */
 196     int dma;
 197     struct enet_statistics stats;
 198     char old_lance;
 199     char lock;
 200     int pad0, pad1;             /* Used for alignment */
 201 };
 202 
 203 unsigned long lance_probe1(short ioaddr, unsigned long mem_start);
 204 static int lance_open(struct device *dev);
 205 static void lance_init_ring(struct device *dev);
 206 static int lance_start_xmit(struct sk_buff *skb, struct device *dev);
 207 static int lance_rx(struct device *dev);
 208 static void lance_interrupt(int reg_ptr);
 209 static int lance_close(struct device *dev);
 210 static struct enet_statistics *lance_get_stats(struct device *dev);
 211 #ifdef HAVE_MULTICAST
 212 static void set_multicast_list(struct device *dev, int num_addrs, void *addrs);
 213 #endif
 214 
 215 
 216 
 217 unsigned long lance_init(unsigned long mem_start, unsigned long mem_end)
     /* [previous][next][first][last][top][bottom][index][help] */
 218 {
 219     int *port, ports[] = {0x300, 0x320, 0x340, 0x360, 0};
 220 
 221     for (port = &ports[0]; *port; port++) {
 222         int ioaddr = *port;
 223 
 224         if (   check_region(ioaddr, LANCE_TOTAL_SIZE) == 0
 225             && inb(ioaddr + 14) == 0x57
 226             && inb(ioaddr + 15) == 0x57) {
 227             mem_start = lance_probe1(ioaddr, mem_start);
 228         }
 229     }
 230 
 231     return mem_start;
 232 }
 233 
 234 unsigned long lance_probe1(short ioaddr, unsigned long mem_start)
     /* [previous][next][first][last][top][bottom][index][help] */
 235 {
 236     struct device *dev;
 237     struct lance_private *lp;
 238     int hpJ2405A = 0;
 239     int i, reset_val;
 240 
 241     hpJ2405A = (inb(ioaddr) == 0x08 && inb(ioaddr+1) == 0x00
 242                 && inb(ioaddr+2) == 0x09);
 243 
 244     /* Reset the LANCE.  */
 245     reset_val = inw(ioaddr+LANCE_RESET); /* Reset the LANCE */
 246 
 247     /* The Un-Reset needed is only needed for the real NE2100, and will
 248        confuse the HP board. */
 249     if (!hpJ2405A)
 250         outw(reset_val, ioaddr+LANCE_RESET);
 251 
 252     outw(0x0000, ioaddr+LANCE_ADDR); /* Switch to window 0 */
 253     if (inw(ioaddr+LANCE_DATA) != 0x0004)
 254         return mem_start;
 255 
 256     dev = init_etherdev(0, sizeof(struct lance_private)
 257                         + PKT_BUF_SZ*(RX_RING_SIZE + TX_RING_SIZE),
 258                         &mem_start);
 259 
 260     printk("%s: LANCE at %#3x,", dev->name, ioaddr);
 261 
 262     /* There is a 16 byte station address PROM at the base address.
 263        The first six bytes are the station address. */
 264     for (i = 0; i < 6; i++)
 265         printk(" %2.2x", dev->dev_addr[i] = inb(ioaddr + i));
 266 
 267     dev->base_addr = ioaddr;
 268     snarf_region(ioaddr, LANCE_TOTAL_SIZE);
 269 
 270     /* Make certain the data structures used by the LANCE are aligned. */
 271     dev->priv = (void *)(((int)dev->priv + 7) & ~7);
 272     lp = (struct lance_private *)dev->priv;
 273     lp->rx_buffs = (long)dev->priv + sizeof(struct lance_private);
 274     lp->tx_bounce_buffs = (char (*)[PKT_BUF_SZ])
 275                            (lp->rx_buffs + PKT_BUF_SZ*RX_RING_SIZE);
 276 
 277 #ifndef final_version
 278     /* This should never happen. */
 279     if ((int)(lp->rx_ring) & 0x07) {
 280         printk(" **ERROR** LANCE Rx and Tx rings not on even boundary.\n");
 281         return mem_start;
 282     }
 283 #endif
 284 
 285     outw(88, ioaddr+LANCE_ADDR);
 286     lp->old_lance = (inw(ioaddr+LANCE_DATA) != 0x3003);
 287 
 288 #if defined(notdef)
 289     printk(lp->old_lance ? " original LANCE (%04x)" : " PCnet-ISA LANCE (%04x)",
 290            inw(ioaddr+LANCE_DATA));
 291 #endif
 292 
 293     lp->init_block.mode = 0x0003;       /* Disable Rx and Tx. */
 294     for (i = 0; i < 6; i++)
 295         lp->init_block.phys_addr[i] = dev->dev_addr[i];
 296     lp->init_block.filter[0] = 0x00000000;
 297     lp->init_block.filter[1] = 0x00000000;
 298     lp->init_block.rx_ring = (int)lp->rx_ring | RX_RING_LEN_BITS;
 299     lp->init_block.tx_ring = (int)lp->tx_ring | TX_RING_LEN_BITS;
 300 
 301     outw(0x0001, ioaddr+LANCE_ADDR);
 302     outw((short) (int) &lp->init_block, ioaddr+LANCE_DATA);
 303     outw(0x0002, ioaddr+LANCE_ADDR);
 304     outw(((int)&lp->init_block) >> 16, ioaddr+LANCE_DATA);
 305     outw(0x0000, ioaddr+LANCE_ADDR);
 306 
 307     if (hpJ2405A) {
 308         char dma_tbl[4] = {3, 5, 6, 7};
 309         char irq_tbl[8] = {3, 4, 5, 9, 10, 11, 12, 15};
 310         short reset_val = inw(ioaddr+LANCE_RESET);
 311         dev->dma = dma_tbl[(reset_val >> 2) & 3];
 312         dev->irq = irq_tbl[(reset_val >> 4) & 7];
 313         printk(" HP J2405A IRQ %d DMA %d.\n", dev->irq, dev->dma);
 314     } else {
 315         /* The DMA channel may be passed in on this parameter. */
 316         if (dev->mem_start & 0x07)
 317             dev->dma = dev->mem_start & 0x07;
 318         else if (dev->dma == 0)
 319             dev->dma = LANCE_DMA;
 320 
 321         /* To auto-IRQ we enable the initialization-done and DMA err,
 322            interrupts. For now we will always get a DMA error. */
 323         if (dev->irq < 2) {
 324 
 325             autoirq_setup(0);
 326 
 327             /* Trigger an initialization just for the interrupt. */
 328             outw(0x0041, ioaddr+LANCE_DATA);
 329 
 330             dev->irq = autoirq_report(1);
 331             if (dev->irq)
 332                 printk(", probed IRQ %d, fixed at DMA %d.\n",
 333                        dev->irq, dev->dma);
 334             else {
 335                 printk(", failed to detect IRQ line.\n");
 336                 return mem_start;
 337             }
 338         } else
 339             printk(" assigned IRQ %d DMA %d.\n", dev->irq, dev->dma);
 340     }
 341 
 342     if (! lp->old_lance) {
 343         /* Turn on auto-select of media (10baseT or BNC) so that the user
 344            can watch the LEDs even if the board isn't opened. */
 345         outw(0x0002, ioaddr+LANCE_ADDR);
 346         outw(0x0002, ioaddr+LANCE_BUS_IF);
 347     }
 348 
 349     if (lance_debug > 0)
 350         printk(version);
 351 
 352     /* The LANCE-specific entries in the device structure. */
 353     dev->open = &lance_open;
 354     dev->hard_start_xmit = &lance_start_xmit;
 355     dev->stop = &lance_close;
 356     dev->get_stats = &lance_get_stats;
 357     dev->set_multicast_list = &set_multicast_list;
 358 
 359     return mem_start;
 360 }
 361 
 362 
 363 static int
 364 lance_open(struct device *dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 365 {
 366     struct lance_private *lp = (struct lance_private *)dev->priv;
 367     int ioaddr = dev->base_addr;
 368     int i;
 369 
 370     if (request_irq(dev->irq, &lance_interrupt, 0, "lance")) {
 371         return -EAGAIN;
 372     }
 373 
 374     if (request_dma(dev->dma)) {
 375         free_irq(dev->irq);
 376         return -EAGAIN;
 377     }
 378     irq2dev_map[dev->irq] = dev;
 379 
 380     /* Reset the LANCE */
 381     inw(ioaddr+LANCE_RESET);
 382 
 383     /* The DMA controller is used as a no-operation slave, "cascade mode". */
 384     enable_dma(dev->dma);
 385     set_dma_mode(dev->dma, DMA_MODE_CASCADE);
 386 
 387     /* Un-Reset the LANCE, needed only for the NE2100. */
 388     if (lp->old_lance)
 389         outw(0, ioaddr+LANCE_RESET);
 390 
 391     if (! lp->old_lance) {
 392         /* This is 79C960-specific: Turn on auto-select of media (AUI, BNC). */
 393         outw(0x0002, ioaddr+LANCE_ADDR);
 394         outw(0x0002, ioaddr+LANCE_BUS_IF);
 395     }
 396 
 397     if (lance_debug > 1)
 398         printk("%s: lance_open() irq %d dma %d tx/rx rings %#x/%#x init %#x.\n",
 399                dev->name, dev->irq, dev->dma, (int) lp->tx_ring, (int) lp->rx_ring,
 400                (int) &lp->init_block);
 401 
 402     lance_init_ring(dev);
 403     /* Re-initialize the LANCE, and start it when done. */
 404     outw(0x0001, ioaddr+LANCE_ADDR);
 405     outw((short) (int) &lp->init_block, ioaddr+LANCE_DATA);
 406     outw(0x0002, ioaddr+LANCE_ADDR);
 407     outw(((int)&lp->init_block) >> 16, ioaddr+LANCE_DATA);
 408 
 409     outw(0x0004, ioaddr+LANCE_ADDR);
 410     outw(0x0d15, ioaddr+LANCE_DATA);
 411 
 412     outw(0x0000, ioaddr+LANCE_ADDR);
 413     outw(0x0001, ioaddr+LANCE_DATA);
 414 
 415     dev->tbusy = 0;
 416     dev->interrupt = 0;
 417     dev->start = 1;
 418     i = 0;
 419     while (i++ < 100)
 420         if (inw(ioaddr+LANCE_DATA) & 0x0100)
 421             break;
 422     outw(0x0142, ioaddr+LANCE_DATA);
 423 
 424     if (lance_debug > 2)
 425         printk("%s: LANCE open after %d ticks, init block %#x csr0 %4.4x.\n",
 426                dev->name, i, (int) &lp->init_block, inw(ioaddr+LANCE_DATA));
 427 
 428     return 0;                   /* Always succeed */
 429 }
 430 
 431 /* Initialize the LANCE Rx and Tx rings. */
 432 static void
 433 lance_init_ring(struct device *dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 434 {
 435     struct lance_private *lp = (struct lance_private *)dev->priv;
 436     int i;
 437 
 438     lp->lock = 0;
 439     lp->cur_rx = lp->cur_tx = 0;
 440     lp->dirty_rx = lp->dirty_tx = 0;
 441 
 442     for (i = 0; i < RX_RING_SIZE; i++) {
 443         lp->rx_ring[i].base = (lp->rx_buffs + i*PKT_BUF_SZ) | 0x80000000;
 444         lp->rx_ring[i].buf_length = -PKT_BUF_SZ;
 445     }
 446     /* The Tx buffer address is filled in as needed, but we do need to clear
 447        the upper ownership bit. */
 448     for (i = 0; i < TX_RING_SIZE; i++) {
 449         lp->tx_ring[i].base = 0;
 450     }
 451 
 452     lp->init_block.mode = 0x0000;
 453     for (i = 0; i < 6; i++)
 454         lp->init_block.phys_addr[i] = dev->dev_addr[i];
 455     lp->init_block.filter[0] = 0x00000000;
 456     lp->init_block.filter[1] = 0x00000000;
 457     lp->init_block.rx_ring = (int)lp->rx_ring | RX_RING_LEN_BITS;
 458     lp->init_block.tx_ring = (int)lp->tx_ring | TX_RING_LEN_BITS;
 459 }
 460 
 461 static int
 462 lance_start_xmit(struct sk_buff *skb, struct device *dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 463 {
 464     struct lance_private *lp = (struct lance_private *)dev->priv;
 465     int ioaddr = dev->base_addr;
 466     int entry;
 467 
 468     /* Transmitter timeout, serious problems. */
 469     if (dev->tbusy) {
 470         int tickssofar = jiffies - dev->trans_start;
 471         if (tickssofar < 10)
 472             return 1;
 473         outw(0, ioaddr+LANCE_ADDR);
 474         printk("%s: transmit timed out, status %4.4x, resetting.\n",
 475                dev->name, inw(ioaddr+LANCE_DATA));
 476         outw(0x0001, ioaddr+LANCE_DATA);
 477         lp->stats.tx_errors++;
 478 #ifndef final_version
 479         {
 480             int i;
 481             printk(" Ring data dump: dirty_tx %d cur_tx %d cur_rx %d.",
 482                    lp->dirty_tx, lp->cur_tx, lp->cur_rx);
 483             for (i = 0 ; i < RX_RING_SIZE; i++)
 484                 printk("%s %08x %04x %04x", i & 0x3 ? "" : "\n ",
 485                        lp->rx_ring[i].base, -lp->rx_ring[i].buf_length,
 486                        lp->rx_ring[i].msg_length);
 487             for (i = 0 ; i < TX_RING_SIZE; i++)
 488                 printk(" %s%08x %04x %04x", i & 0x3 ? "" : "\n ",
 489                        lp->tx_ring[i].base, -lp->tx_ring[i].length,
 490                        lp->tx_ring[i].misc);
 491             printk("\n");
 492         }
 493 #endif
 494         lance_init_ring(dev);
 495         outw(0x0043, ioaddr+LANCE_DATA);
 496 
 497         dev->tbusy=0;
 498         dev->trans_start = jiffies;
 499 
 500         return 0;
 501     }
 502 
 503     if (skb == NULL) {
 504         dev_tint(dev);
 505         return 0;
 506     }
 507 
 508     if (skb->len <= 0)
 509         return 0;
 510 
 511     if (lance_debug > 3) {
 512         outw(0x0000, ioaddr+LANCE_ADDR);
 513         printk("%s: lance_start_xmit() called, csr0 %4.4x.\n", dev->name,
 514                inw(ioaddr+LANCE_DATA));
 515         outw(0x0000, ioaddr+LANCE_DATA);
 516     }
 517 
 518     /* Block a timer-based transmit from overlapping.  This could better be
 519        done with atomic_swap(1, dev->tbusy), but set_bit() works as well. */
 520     if (set_bit(0, (void*)&dev->tbusy) != 0) {
 521         printk("%s: Transmitter access conflict.\n", dev->name);
 522         return 1;
 523     }
 524 
 525     if (set_bit(0, (void*)&lp->lock) != 0) {
 526         if (lance_debug > 2)
 527             printk("%s: tx queue lock!.\n", dev->name);
 528         /* don't clear dev->tbusy flag. */
 529         return 1;
 530     }
 531 
 532     /* Fill in a Tx ring entry */
 533 
 534     /* Mask to ring buffer boundary. */
 535     entry = lp->cur_tx & TX_RING_MOD_MASK;
 536 
 537     /* Caution: the write order is important here, set the base address
 538        with the "ownership" bits last. */
 539 
 540     /* The old LANCE chips doesn't automatically pad buffers to min. size. */
 541     if (lp->old_lance) {
 542         lp->tx_ring[entry].length =
 543             -(ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN);
 544     } else
 545         lp->tx_ring[entry].length = -skb->len;
 546 
 547     lp->tx_ring[entry].misc = 0x0000;
 548 
 549     /* If any part of this buffer is >16M we must copy it to a low-memory
 550        buffer. */
 551     if ((int)(skb->data) + skb->len > 0x01000000) {
 552         if (lance_debug > 5)
 553             printk("%s: bouncing a high-memory packet (%#x).\n",
 554                    dev->name, (int)(skb->data));
 555         memcpy(&lp->tx_bounce_buffs[entry], skb->data, skb->len);
 556         lp->tx_ring[entry].base =
 557             (int)(lp->tx_bounce_buffs + entry) | 0x83000000;
 558         dev_kfree_skb (skb, FREE_WRITE);
 559     } else {
 560         lp->tx_ring[entry].base = (int)(skb->data) | 0x83000000;
 561     }
 562     lp->cur_tx++;
 563 
 564     /* Trigger an immediate send poll. */
 565     outw(0x0000, ioaddr+LANCE_ADDR);
 566     outw(0x0048, ioaddr+LANCE_DATA);
 567 
 568     dev->trans_start = jiffies;
 569 
 570     cli();
 571     lp->lock = 0;
 572     if (lp->tx_ring[(entry+1) & TX_RING_MOD_MASK].base == 0)
 573         dev->tbusy=0;
 574     sti();
 575 
 576     return 0;
 577 }
 578 
 579 /* The LANCE interrupt handler. */
 580 static void
 581 lance_interrupt(int reg_ptr)
     /* [previous][next][first][last][top][bottom][index][help] */
 582 {
 583     int irq = -(((struct pt_regs *)reg_ptr)->orig_eax+2);
 584     struct device *dev = (struct device *)(irq2dev_map[irq]);
 585     struct lance_private *lp;
 586     int csr0, ioaddr;
 587 
 588     if (dev == NULL) {
 589         printk ("lance_interrupt(): irq %d for unknown device.\n", irq);
 590         return;
 591     }
 592 
 593     ioaddr = dev->base_addr;
 594     lp = (struct lance_private *)dev->priv;
 595     if (dev->interrupt)
 596         printk("%s: Re-entering the interrupt handler.\n", dev->name);
 597 
 598     dev->interrupt = 1;
 599 
 600     outw(0x00, dev->base_addr + LANCE_ADDR);
 601     csr0 = inw(dev->base_addr + LANCE_DATA);
 602 
 603     /* Acknowledge all of the current interrupt sources ASAP. */
 604     outw(csr0 & ~0x004f, dev->base_addr + LANCE_DATA);
 605 
 606     if (lance_debug > 5)
 607         printk("%s: interrupt  csr0=%#2.2x new csr=%#2.2x.\n",
 608                dev->name, csr0, inw(dev->base_addr + LANCE_DATA));
 609 
 610     if (csr0 & 0x0400)          /* Rx interrupt */
 611         lance_rx(dev);
 612 
 613     if (csr0 & 0x0200) {        /* Tx-done interrupt */
 614         int dirty_tx = lp->dirty_tx;
 615 
 616         while (dirty_tx < lp->cur_tx) {
 617             int entry = dirty_tx & TX_RING_MOD_MASK;
 618             int status = lp->tx_ring[entry].base;
 619             void *databuff;
 620             
 621             if (status < 0)
 622                 break;          /* It still hasn't been Txed */
 623 
 624             lp->tx_ring[entry].base = 0;
 625             databuff = (void*)(status & 0x00ffffff);
 626 
 627             if (status & 0x40000000) { /* There was an major error, log it. */
 628                 int err_status = lp->tx_ring[entry].misc;
 629                 lp->stats.tx_errors++;
 630                 if (err_status & 0x0400) lp->stats.tx_aborted_errors++;
 631                 if (err_status & 0x0800) lp->stats.tx_carrier_errors++;
 632                 if (err_status & 0x1000) lp->stats.tx_window_errors++;
 633                 if (err_status & 0x4000) lp->stats.tx_fifo_errors++;
 634                 /* Perhaps we should re-init() after the FIFO error. */
 635             } else {
 636                 if (status & 0x18000000)
 637                     lp->stats.collisions++;
 638                 lp->stats.tx_packets++;
 639             }
 640 
 641             /* We don't free the skb if it's a data-only copy in the bounce
 642                buffer.  The address checks here are sorted -- the first test
 643                should always work.  */
 644             if (databuff >= (void*)(&lp->tx_bounce_buffs[TX_RING_SIZE])
 645                 || databuff < (void*)(lp->tx_bounce_buffs)) {
 646                 struct sk_buff *skb = ((struct sk_buff *)databuff) - 1;
 647                 dev_kfree_skb(skb,FREE_WRITE);
 648 
 649                 /* Warning: skb may well vanish at the point you call
 650                    device_release! */
 651             }
 652             dirty_tx++;
 653         }
 654 
 655 #ifndef final_version
 656         if (lp->cur_tx - dirty_tx >= TX_RING_SIZE) {
 657             printk("out-of-sync dirty pointer, %d vs. %d.\n",
 658                    dirty_tx, lp->cur_tx);
 659             dirty_tx += TX_RING_SIZE;
 660         }
 661 #endif
 662 
 663         if (dev->tbusy  &&  dirty_tx > lp->cur_tx - TX_RING_SIZE + 2) {
 664             /* The ring is no longer full, clear tbusy. */
 665             dev->tbusy = 0;
 666             mark_bh(NET_BH);
 667         }
 668 
 669         lp->dirty_tx = dirty_tx;
 670     }
 671 
 672     if (csr0 & 0x8000) {
 673         if (csr0 & 0x4000) lp->stats.tx_errors++;
 674         if (csr0 & 0x1000) lp->stats.rx_errors++;
 675     }
 676 
 677     /* Clear the interrupts we've handled. */
 678     outw(0x0000, dev->base_addr + LANCE_ADDR);
 679     outw(0x7f40, dev->base_addr + LANCE_DATA);
 680 
 681     if (lance_debug > 4)
 682         printk("%s: exiting interrupt, csr%d=%#4.4x.\n",
 683                dev->name, inw(ioaddr + LANCE_ADDR),
 684                inw(dev->base_addr + LANCE_DATA));
 685 
 686     dev->interrupt = 0;
 687     return;
 688 }
 689 
 690 static int
 691 lance_rx(struct device *dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 692 {
 693     struct lance_private *lp = (struct lance_private *)dev->priv;
 694     int entry = lp->cur_rx & RX_RING_MOD_MASK;
 695     int i;
 696         
 697     /* If we own the next entry, it's a new packet. Send it up. */
 698     while (lp->rx_ring[entry].base >= 0) {
 699         int status = lp->rx_ring[entry].base >> 24;
 700 
 701         if (status != 0x03) {           /* There was an error. */
 702             /* There is an tricky error noted by John Murphy,
 703                <murf@perftech.com> to Russ Nelson: Even with full-sized
 704                buffers it's possible for a jabber packet to use two
 705                buffers, with only the last correctly noting the error. */
 706             if (status & 0x01)  /* Only count a general error at the */
 707                 lp->stats.rx_errors++; /* end of a packet.*/
 708             if (status & 0x20) lp->stats.rx_frame_errors++;
 709             if (status & 0x10) lp->stats.rx_over_errors++;
 710             if (status & 0x08) lp->stats.rx_crc_errors++;
 711             if (status & 0x04) lp->stats.rx_fifo_errors++;
 712             lp->rx_ring[entry].base &= 0x03ffffff;
 713         } else {
 714             /* Malloc up new buffer, compatible with net-2e. */
 715             short pkt_len = lp->rx_ring[entry].msg_length;
 716             struct sk_buff *skb;
 717 
 718             skb = alloc_skb(pkt_len, GFP_ATOMIC);
 719             if (skb == NULL) {
 720                 printk("%s: Memory squeeze, deferring packet.\n", dev->name);
 721                 for (i=0; i < RX_RING_SIZE; i++)
 722                   if (lp->rx_ring[(entry+i) & RX_RING_MOD_MASK].base < 0)
 723                     break;
 724 
 725                 if (i > RX_RING_SIZE -2) {
 726                   lp->stats.rx_dropped++;
 727                   lp->rx_ring[entry].base |= 0x80000000;
 728                   lp->cur_rx++;
 729                 }
 730                 break;
 731             }
 732             skb->len = pkt_len;
 733             skb->dev = dev;
 734             memcpy(skb->data,
 735                    (unsigned char *)(lp->rx_ring[entry].base & 0x00ffffff),
 736                    pkt_len);
 737             netif_rx(skb);
 738             lp->stats.rx_packets++;
 739         }
 740 
 741         lp->rx_ring[entry].base |= 0x80000000;
 742         entry = (++lp->cur_rx) & RX_RING_MOD_MASK;
 743     }
 744 
 745     /* We should check that at least two ring entries are free.  If not,
 746        we should free one and mark stats->rx_dropped++. */
 747 
 748     return 0;
 749 }
 750 
 751 static int
 752 lance_close(struct device *dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 753 {
 754     int ioaddr = dev->base_addr;
 755     struct lance_private *lp = (struct lance_private *)dev->priv;
 756 
 757     dev->start = 0;
 758     dev->tbusy = 1;
 759 
 760     outw(112, ioaddr+LANCE_ADDR);
 761     lp->stats.rx_missed_errors = inw(ioaddr+LANCE_DATA);
 762 
 763     outw(0, ioaddr+LANCE_ADDR);
 764 
 765     if (lance_debug > 1)
 766         printk("%s: Shutting down ethercard, status was %2.2x.\n",
 767                dev->name, inw(ioaddr+LANCE_DATA));
 768 
 769     /* We stop the LANCE here -- it occasionally polls
 770        memory if we don't. */
 771     outw(0x0004, ioaddr+LANCE_DATA);
 772 
 773     disable_dma(dev->dma);
 774 
 775     free_irq(dev->irq);
 776     free_dma(dev->dma);
 777 
 778     irq2dev_map[dev->irq] = 0;
 779 
 780     return 0;
 781 }
 782 
 783 static struct enet_statistics *
 784 lance_get_stats(struct device *dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 785 {
 786     struct lance_private *lp = (struct lance_private *)dev->priv;
 787     short ioaddr = dev->base_addr;
 788     short saved_addr;
 789 
 790     cli();
 791     saved_addr = inw(ioaddr+LANCE_ADDR);
 792     outw(112, ioaddr+LANCE_ADDR);
 793     lp->stats.rx_missed_errors = inw(ioaddr+LANCE_DATA);
 794     outw(saved_addr, ioaddr+LANCE_ADDR);
 795     sti();
 796 
 797     return &lp->stats;
 798 }
 799 
 800 /* Set or clear the multicast filter for this adaptor.
 801    num_addrs == -1      Promiscuous mode, receive all packets
 802    num_addrs == 0       Normal mode, clear multicast list
 803    num_addrs > 0        Multicast mode, receive normal and MC packets, and do
 804                         best-effort filtering.
 805  */
 806 static void
 807 set_multicast_list(struct device *dev, int num_addrs, void *addrs)
     /* [previous][next][first][last][top][bottom][index][help] */
 808 {
 809     short ioaddr = dev->base_addr;
 810 
 811     /* We take the simple way out and always enable promiscuous mode. */
 812     outw(0, ioaddr+LANCE_ADDR);
 813     outw(0x0004, ioaddr+LANCE_DATA); /* Temporarily stop the lance.  */
 814 
 815     outw(15, ioaddr+LANCE_ADDR);
 816     if (num_addrs >= 0) {
 817         short multicast_table[4];
 818         int i;
 819         /* We don't use the multicast table, but rely on upper-layer filtering. */
 820         memset(multicast_table, (num_addrs == 0) ? 0 : -1, sizeof(multicast_table));
 821         for (i = 0; i < 4; i++) {
 822             outw(8 + i, ioaddr+LANCE_ADDR);
 823             outw(multicast_table[i], ioaddr+LANCE_DATA);
 824         }
 825         outw(0x0000, ioaddr+LANCE_DATA); /* Unset promiscuous mode */
 826     } else {
 827         outw(0x8000, ioaddr+LANCE_DATA); /* Set promiscuous mode */
 828     }
 829 
 830     outw(0, ioaddr+LANCE_ADDR);
 831     outw(0x0142, ioaddr+LANCE_DATA); /* Resume normal operation. */
 832 }
 833 
 834 #ifdef HAVE_DEVLIST
 835 static unsigned int lance_portlist[] = {0x300, 0x320, 0x340, 0x360, 0};
 836 struct netdev_entry lance_drv =
 837 {"lance", lance_probe1, LANCE_TOTAL_SIZE, lance_portlist};
 838 #endif
 839 
 840 /*
 841  * Local variables:
 842  *  compile-command: "gcc -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -m486 -c lance.c"
 843  * End:
 844  */

/* [previous][next][first][last][top][bottom][index][help] */