root/drivers/net/lance.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. lance_init
  2. lance_probe1
  3. lance_open
  4. lance_init_ring
  5. lance_start_xmit
  6. lance_interrupt
  7. lance_rx
  8. lance_close
  9. lance_get_stats
  10. set_multicast_list

   1 /* lance.c: An AMD LANCE ethernet driver for linux. */
   2 /*
   3     Written 1993 by Donald Becker.
   4 
   5     Copyright 1993 United States Government as represented by the
   6     Director, National Security Agency.  This software may be used and
   7     distributed according to the terms of the GNU Public License,
   8     incorporated herein by reference.
   9 
  10     This driver is for the Allied Telesis AT1500 and HP J2405A, and should work
  11     with most other LANCE-based bus-master (NE2100 clone) ethercards.
  12 
  13     The author may be reached as becker@super.org or
  14     C/O Supercomputing Research Ctr., 17100 Science Dr., Bowie MD 20715
  15 */
  16 
  17 static char *version = "lance.c:v0.13s 11/15/93 becker@super.org\n";
  18 
  19 #include <linux/config.h>
  20 #include <linux/kernel.h>
  21 #include <linux/sched.h>
  22 #include <linux/string.h>
  23 #include <linux/ptrace.h>
  24 #include <linux/errno.h>
  25 #include <linux/ioport.h>
  26 #include <linux/malloc.h>
  27 #include <linux/interrupt.h>
  28 #include <asm/bitops.h>
  29 #include <asm/io.h>
  30 #include <asm/dma.h>
  31 
  32 #include "dev.h"
  33 #include "iow.h"
  34 #include "eth.h"
  35 #include "skbuff.h"
  36 #include "arp.h"
  37 
  38 #ifndef HAVE_PORTRESERVE
  39 #define check_region(addr, size)        0
  40 #define snarf_region(addr, size)        do ; while(0)
  41 #endif
  42 
  43 #ifndef HAVE_ALLOC_SKB
  44 #define alloc_skb(size, priority) (struct sk_buff *) kmalloc(size,priority)
  45 #define kfree_skbmem(buff, size) kfree_s(buff,size)
  46 #endif
  47 
  48 struct device *init_etherdev(struct device *dev, int sizeof_private,
  49                              unsigned long *mem_startp);
  50 
  51 #ifdef LANCE_DEBUG
  52 int lance_debug = LANCE_DEBUG;
  53 #else
  54 int lance_debug = 1;
  55 #endif
  56 
  57 #ifndef LANCE_DMA
  58 #define LANCE_DMA       5
  59 #endif
  60 
  61 /*
  62                 Theory of Operation
  63 
  64 I. Board Compatibility
  65 
  66 This device driver is designed for the AMD 79C960, the "PCnet-ISA
  67 single-chip ethernet controller for ISA".  This chip is used in a wide
  68 variety of boards from vendors such as Allied Telesis, HP, Kingston,
  69 and Boca.  This driver is also intended to work with older AMD 7990
  70 designs, such as the NE1500 and NE2100.  For convenience, I use the name
  71 LANCE to refer to either AMD chip.
  72 
  73 II. Board-specific settings
  74 
  75 The driver is designed to work the boards that use the faster
  76 bus-master mode, rather than in shared memory mode.  (Only older designs
  77 have on-board buffer memory needed to support the slower shared memory mode.)
  78 
  79 Most boards have jumpered settings for the I/O base, IRQ line, and DMA channel.
  80 This driver probes the likely base addresses, {0x300, 0x320, 0x340, 0x360}.
  81 After the board is found it generates an DMA-timeout interrupt and uses
  82 autoIRQ to find the IRQ line.  The DMA channel defaults to LANCE_DMA, or it
  83 can be set with the low bits of the otherwise-unused dev->mem_start value.
  84 
  85 The HP-J2405A board is an exception: with this board it's easy to read the
  86 EEPROM-set values for the base, IRQ, and DMA.  Of course you must already
  87 _know_ the base address, but that entry is for changing the EEPROM.
  88 
  89 III. Driver operation
  90 
  91 IIIa. Ring buffers
  92 The LANCE uses ring buffers of Tx and Rx descriptors.  Each entry describes
  93 the base and length of the data buffer, along with status bits.  The length
  94 of these buffers is set by LANCE_LOG_{RX,TX}_BUFFERS, which is log_2() of
  95 the buffer length (rather than being directly the buffer length) for
  96 implementation ease.  The current values are 2 (Tx) and 4 (Rx), which leads to
  97 ring sizes of 4 (Tx) and 16 (Rx).  Increasing the number of ring entries
  98 needlessly uses extra space and reduces the chance that an upper layer will
  99 be able to reorder queued Tx packets based on priority.  Decreasing the number
 100 of entries makes it more difficult to achieve back-to-back packet transmission
 101 and increases the chance that Rx ring will overflow.  (Consider the worst case
 102 of receiving back-to-back minimum-sized packets.)
 103 
 104 The LANCE has the capability to "chain" both Rx and Tx buffers, but this driver
 105 statically allocates full-sized (slightly oversized -- PKT_BUF_SZ) buffers to
 106 avoid the administrative overhead. For the Rx side this avoids dynamically
 107 allocating full-sized buffers "just in case", at the expense of a
 108 memory-to-memory data copy for each packet received.  For most systems this
 109 is an good tradeoff: the Rx buffer will always be in low memory, the copy
 110 is inexpensive, and it primes the cache for later packet processing.  For Tx
 111 the buffers are only used when needed as low-memory bounce buffers.
 112 
 113 IIIB. 16M memory limitations.
 114 For the ISA bus master mode all structures used directly by the LANCE,
 115 the initialization block, Rx and Tx rings, and data buffers, must be
 116 accessable from the ISA bus, i.e. in the lower 16M of real memory.
 117 This is a problem for current Linux kernels on >16M machines. The network
 118 devices are initialized after memory initialization, and the kernel doles out
 119 memory from the top of memory downward.  The current solution is to have a
 120 special network initialization routine that's called before memory
 121 initialization; this will eventually be generalized for all network devices.
 122 As mentioned before, low-memory "bounce-buffers" are used when needed.
 123 
 124 IIIC. Synchronization
 125 The driver runs as two independent, single-threaded flows of control.  One
 126 is the send-packet routine, which enforces single-threaded use by the
 127 dev->tbusy flag.  The other thread is the interrupt handler, which is single
 128 threaded by the hardware and other software.
 129 
 130 The send packet thread has partial control over the Tx ring and 'dev->tbusy'
 131 flag.  It sets the tbusy flag whenever it's queuing a Tx packet. If the next
 132 queue slot is empty, it clears the tbusy flag when finished otherwise it sets
 133 the 'lp->tx_full' flag.
 134 
 135 The interrupt handler has exclusive control over the Rx ring and records stats
 136 from the Tx ring.  (The Tx-done interrupt can't be selectively turned off, so
 137 we can't avoid the interrupt overhead by having the Tx routine reap the Tx
 138 stats.)  After reaping the stats, it marks the queue entry as empty by setting
 139 the 'base' to zero.  Iff the 'lp->tx_full' flag is set, it clears both the
 140 tx_full and tbusy flags.
 141 
 142 */
 143 
 144 /* Set the number of Tx and Rx buffers, using Log_2(# buffers).
 145    Reasonable default values are 4 Tx buffers, and 16 Rx buffers.
 146    That translates to 2 (4 == 2^^2) and 4 (16 == 2^^4). */
 147 #ifndef LANCE_LOG_TX_BUFFERS
 148 #define LANCE_LOG_TX_BUFFERS 2
 149 #define LANCE_LOG_RX_BUFFERS 4
 150 #endif
 151 
 152 #define TX_RING_SIZE            (1 << (LANCE_LOG_TX_BUFFERS))
 153 #define TX_RING_MOD_MASK        (TX_RING_SIZE - 1)
 154 #define TX_RING_LEN_BITS        ((LANCE_LOG_TX_BUFFERS) << 29)
 155 
 156 #define RX_RING_SIZE            (1 << (LANCE_LOG_RX_BUFFERS))
 157 #define RX_RING_MOD_MASK        (RX_RING_SIZE - 1)
 158 #define RX_RING_LEN_BITS        ((LANCE_LOG_RX_BUFFERS) << 29)
 159 
 160 #define PKT_BUF_SZ      1544
 161 
 162 /* Offsets from base I/O address. */
 163 #define LANCE_DATA 0x10
 164 #define LANCE_ADDR 0x12
 165 #define LANCE_RESET 0x14
 166 #define LANCE_BUS_IF 0x16
 167 #define LANCE_TOTAL_SIZE 0x18
 168 
 169 /* The LANCE Rx and Tx ring descriptors. */
 170 struct lance_rx_head {
 171     int base;
 172     short buf_length;           /* This length is 2's complement (negative)! */
 173     short msg_length;           /* This length is "normal". */
 174 };
 175 
 176 struct lance_tx_head {
 177     int   base;
 178     short length;               /* Length is 2's complement (negative)! */
 179     short misc;
 180 };
 181 
 182 /* The LANCE initialization block, described in databook. */
 183 struct lance_init_block {
 184     unsigned short mode;        /* Pre-set mode (reg. 15) */
 185     unsigned char phys_addr[6]; /* Physical ethernet address */
 186     unsigned filter[2];         /* Multicast filter (unused). */
 187     /* Receive and transmit ring base, along with extra bits. */
 188     unsigned rx_ring;           /* Tx and Rx ring base pointers */
 189     unsigned tx_ring;
 190 };
 191 
 192 struct lance_private {
 193     char devname[8];
 194     /* These must aligned on 8-byte boundaries. */
 195     struct lance_rx_head rx_ring[RX_RING_SIZE];
 196     struct lance_tx_head tx_ring[TX_RING_SIZE];
 197     struct lance_init_block     init_block;
 198     long rx_buffs;              /* Address of Rx and Tx buffers. */
 199     /* Tx low-memory "bounce buffer" address. */
 200     char (*tx_bounce_buffs)[PKT_BUF_SZ];
 201     int cur_rx, cur_tx;         /* The next free ring entry */
 202     int dirty_rx, dirty_tx;     /* The ring entries to be free()ed. */
 203     int dma;
 204     struct enet_statistics stats;
 205     char old_lance;
 206     int pad0, pad1;             /* Used for alignment */
 207 };
 208 
 209 static unsigned long lance_probe1(short ioaddr, unsigned long mem_start);
 210 static int lance_open(struct device *dev);
 211 static void lance_init_ring(struct device *dev);
 212 static int lance_start_xmit(struct sk_buff *skb, struct device *dev);
 213 static int lance_rx(struct device *dev);
 214 static void lance_interrupt(int reg_ptr);
 215 static int lance_close(struct device *dev);
 216 static struct enet_statistics *lance_get_stats(struct device *dev);
 217 #ifdef HAVE_MULTICAST
 218 static void set_multicast_list(struct device *dev, int num_addrs, void *addrs);
 219 #endif
 220 
 221 
 222 
 223 unsigned long lance_init(unsigned long mem_start, unsigned long mem_end)
     /* [previous][next][first][last][top][bottom][index][help] */
 224 {
 225     int *port, ports[] = {0x300, 0x320, 0x340, 0x360, 0};
 226 
 227     printk("lance_init(%#x, %#x).\n", mem_start, mem_end);
 228     for (port = &ports[0]; *port; port++) {
 229         int ioaddr = *port;
 230 
 231         if (   check_region(ioaddr, LANCE_TOTAL_SIZE) == 0
 232             && inb(ioaddr + 14) == 0x57
 233             && inb(ioaddr + 15) == 0x57) {
 234             mem_start = lance_probe1(ioaddr, mem_start);
 235         }
 236     }
 237 
 238     return mem_start;
 239 }
 240 
 241 static unsigned long lance_probe1(short ioaddr, unsigned long mem_start)
     /* [previous][next][first][last][top][bottom][index][help] */
 242 {
 243     struct device *dev;
 244     struct lance_private *lp;
 245     int hpJ2405A = 0;
 246     int i, reset_val;
 247 
 248     hpJ2405A = (inb(ioaddr) == 0x08 && inb(ioaddr+1) == 0x00
 249                 && inb(ioaddr+2) == 0x09);
 250 
 251     /* Reset the LANCE.  */
 252     reset_val = inw(ioaddr+LANCE_RESET); /* Reset the LANCE */
 253 
 254     /* The Un-Reset needed is only needed for the real NE2100, and will
 255        confuse the HP board. */
 256     if (!hpJ2405A)
 257         outw(reset_val, ioaddr+LANCE_RESET);
 258 
 259     outw(0x0000, ioaddr+LANCE_ADDR); /* Switch to window 0 */
 260     if (inw(ioaddr+LANCE_DATA) != 0x0004)
 261         return mem_start;
 262 
 263     dev = init_etherdev(0, sizeof(struct lance_private)
 264                         + PKT_BUF_SZ*(RX_RING_SIZE + TX_RING_SIZE),
 265                         &mem_start);
 266 
 267     printk("%s: LANCE at %#3x,", dev->name, ioaddr);
 268 
 269     /* There is a 16 byte station address PROM at the base address.
 270        The first six bytes are the station address. */
 271     for (i = 0; i < 6; i++)
 272         printk(" %2.2x", dev->dev_addr[i] = inb(ioaddr + i));
 273 
 274     dev->base_addr = ioaddr;
 275     snarf_region(ioaddr, LANCE_TOTAL_SIZE);
 276 
 277     /* Make certain the data structures used by the LANCE are aligned. */
 278     dev->priv = (void *)(((int)dev->priv + 7) & ~7);
 279     lp = (struct lance_private *)dev->priv;
 280     lp->rx_buffs = (long)dev->priv + sizeof(struct lance_private);
 281     lp->tx_bounce_buffs = (char (*)[PKT_BUF_SZ])
 282                            (lp->rx_buffs + PKT_BUF_SZ*RX_RING_SIZE);
 283 
 284 #ifndef final_version
 285     /* This should never happen. */
 286     if ((int)(lp->rx_ring) & 0x07) {
 287         printk(" **ERROR** LANCE Rx and Tx rings not on even boundary.\n");
 288         return mem_start;
 289     }
 290 #endif
 291 
 292     outw(88, ioaddr+LANCE_ADDR);
 293     lp->old_lance = (inw(ioaddr+LANCE_DATA) != 0x3003);
 294 
 295 #if defined(notdef)
 296     printk(lp->old_lance ? " original LANCE (%04x)" : " PCnet-ISA LANCE (%04x)",
 297            inw(ioaddr+LANCE_DATA));
 298 #endif
 299 
 300     lp->init_block.mode = 0x0003;       /* Disable Rx and Tx. */
 301     for (i = 0; i < 6; i++)
 302         lp->init_block.phys_addr[i] = dev->dev_addr[i];
 303     lp->init_block.filter[0] = 0x00000000;
 304     lp->init_block.filter[1] = 0x00000000;
 305     lp->init_block.rx_ring = (int)lp->rx_ring | RX_RING_LEN_BITS;
 306     lp->init_block.tx_ring = (int)lp->tx_ring | TX_RING_LEN_BITS;
 307 
 308     outw(0x0001, ioaddr+LANCE_ADDR);
 309     outw((short) (int) &lp->init_block, ioaddr+LANCE_DATA);
 310     outw(0x0002, ioaddr+LANCE_ADDR);
 311     outw(((int)&lp->init_block) >> 16, ioaddr+LANCE_DATA);
 312     outw(0x0000, ioaddr+LANCE_ADDR);
 313 
 314     if (hpJ2405A) {
 315         char dma_tbl[4] = {3, 5, 6, 7};
 316         char irq_tbl[8] = {3, 4, 5, 9, 10, 11, 12, 15};
 317         short reset_val = inw(ioaddr+LANCE_RESET);
 318         dev->dma = dma_tbl[(reset_val >> 2) & 3];
 319         dev->irq = irq_tbl[(reset_val >> 4) & 7];
 320         printk(" HP J2405A IRQ %d DMA %d.\n", dev->irq, dev->dma);
 321     } else {
 322         /* The DMA channel may be passed in on this parameter. */
 323         if (dev->mem_start & 0x07)
 324             dev->dma = dev->mem_start & 0x07;
 325         else if (dev->dma == 0)
 326             dev->dma = LANCE_DMA;
 327 
 328         /* To auto-IRQ we enable the initialization-done and DMA err,
 329            interrupts. For now we will always get a DMA error. */
 330         if (dev->irq < 2) {
 331 
 332             autoirq_setup(0);
 333 
 334             /* Trigger an initialization just for the interrupt. */
 335             outw(0x0041, ioaddr+LANCE_DATA);
 336 
 337             dev->irq = autoirq_report(1);
 338             if (dev->irq)
 339                 printk(", probed IRQ %d, fixed at DMA %d.\n", dev->irq, dev->dma);
 340             else {
 341                 printk(", failed to detect IRQ line.\n");
 342                 return mem_start;
 343             }
 344         } else
 345             printk(" assigned IRQ %d DMA %d.\n", dev->irq, dev->dma);
 346     }
 347 
 348     if (! lp->old_lance) {
 349         /* Turn on auto-select of media (10baseT or BNC) so that the user
 350            can watch the LEDs even if the board isn't opened. */
 351         outw(0x0002, ioaddr+LANCE_ADDR);
 352         outw(0x0002, ioaddr+LANCE_BUS_IF);
 353     }
 354 
 355     if (lance_debug > 0)
 356         printk(version);
 357 
 358     /* The LANCE-specific entries in the device structure. */
 359     dev->open = &lance_open;
 360     dev->hard_start_xmit = &lance_start_xmit;
 361     dev->stop = &lance_close;
 362     dev->get_stats = &lance_get_stats;
 363 #ifdef HAVE_MULTICAST
 364     dev->set_multicast_list = &set_multicast_list;
 365 #endif
 366 
 367     return mem_start;
 368 }
 369 
 370 
 371 static int
 372 lance_open(struct device *dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 373 {
 374     struct lance_private *lp = (struct lance_private *)dev->priv;
 375     int ioaddr = dev->base_addr;
 376     int i;
 377 
 378     if (request_irq(dev->irq, &lance_interrupt)) {
 379         return -EAGAIN;
 380     }
 381 
 382     if (request_dma(dev->dma)) {
 383         free_irq(dev->irq);
 384         return -EAGAIN;
 385     }
 386     irq2dev_map[dev->irq] = dev;
 387 
 388     /* Reset the LANCE */
 389     inw(ioaddr+LANCE_RESET);
 390 
 391     /* The DMA controller is used as a no-operation slave, "cascade mode". */
 392     enable_dma(dev->dma);
 393     set_dma_mode(dev->dma, DMA_MODE_CASCADE);
 394 
 395     /* Un-Reset the LANCE, needed only for the NE2100. */
 396     if (lp->old_lance)
 397         outw(0, ioaddr+LANCE_RESET);
 398 
 399     if (! lp->old_lance) {
 400         /* This is 79C960-specific: Turn on auto-select of media (AUI, BNC). */
 401         outw(0x0002, ioaddr+LANCE_ADDR);
 402         outw(0x0002, ioaddr+LANCE_BUS_IF);
 403     }
 404 
 405     if (lance_debug > 1)
 406         printk("%s: lance_open() irq %d dma %d tx/rx rings %#x/%#x init %#x.\n",
 407                dev->name, dev->irq, dev->dma, (int) lp->tx_ring, (int) lp->rx_ring,
 408                (int) &lp->init_block);
 409 
 410     lance_init_ring(dev);
 411     /* Re-initialize the LANCE, and start it when done. */
 412     outw(0x0001, ioaddr+LANCE_ADDR);
 413     outw((short) (int) &lp->init_block, ioaddr+LANCE_DATA);
 414     outw(0x0002, ioaddr+LANCE_ADDR);
 415     outw(((int)&lp->init_block) >> 16, ioaddr+LANCE_DATA);
 416 
 417     outw(0x0004, ioaddr+LANCE_ADDR);
 418     outw(0x0d15, ioaddr+LANCE_DATA);
 419 
 420     outw(0x0000, ioaddr+LANCE_ADDR);
 421     outw(0x0001, ioaddr+LANCE_DATA);
 422 
 423     dev->tbusy = 0;
 424     dev->interrupt = 0;
 425     dev->start = 1;
 426     i = 0;
 427     while (i++ < 100)
 428         if (inw(ioaddr+LANCE_DATA) & 0x0100)
 429             break;
 430     outw(0x0142, ioaddr+LANCE_DATA);
 431 
 432     if (lance_debug > 2)
 433         printk("%s: LANCE open after %d ticks, init block %#x csr0 %4.4x.\n",
 434                dev->name, i, (int) &lp->init_block, inw(ioaddr+LANCE_DATA));
 435 
 436     return 0;                   /* Always succeed */
 437 }
 438 
 439 /* Initialize the LANCE Rx and Tx rings. */
 440 static void
 441 lance_init_ring(struct device *dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 442 {
 443     struct lance_private *lp = (struct lance_private *)dev->priv;
 444     int i;
 445 
 446     lp->cur_rx = lp->cur_tx = 0;
 447     lp->dirty_rx = lp->dirty_tx = 0;
 448 
 449     for (i = 0; i < RX_RING_SIZE; i++) {
 450         lp->rx_ring[i].base = (lp->rx_buffs + i*PKT_BUF_SZ) | 0x80000000;
 451         lp->rx_ring[i].buf_length = -PKT_BUF_SZ;
 452     }
 453     /* The Tx buffer address is filled in as needed, but we do need to clear
 454        the upper ownership bit. */
 455     for (i = 0; i < TX_RING_SIZE; i++) {
 456         lp->tx_ring[i].base = 0;
 457     }
 458 
 459     lp->init_block.mode = 0x0000;
 460     for (i = 0; i < 6; i++)
 461         lp->init_block.phys_addr[i] = dev->dev_addr[i];
 462     lp->init_block.filter[0] = 0x00000000;
 463     lp->init_block.filter[1] = 0x00000000;
 464     lp->init_block.rx_ring = (int)lp->rx_ring | RX_RING_LEN_BITS;
 465     lp->init_block.tx_ring = (int)lp->tx_ring | TX_RING_LEN_BITS;
 466 }
 467 
 468 static int
 469 lance_start_xmit(struct sk_buff *skb, struct device *dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 470 {
 471     struct lance_private *lp = (struct lance_private *)dev->priv;
 472     int ioaddr = dev->base_addr;
 473     int entry;
 474 
 475     /* Transmitter timeout, serious problems. */
 476     if (dev->tbusy) {
 477         int tickssofar = jiffies - dev->trans_start;
 478         if (tickssofar < 10)
 479             return 1;
 480         outw(0, ioaddr+LANCE_ADDR);
 481         printk("%s: transmit timed out, status %4.4x, resetting.\n",
 482                dev->name, inw(ioaddr+LANCE_DATA));
 483         outw(0x0001, ioaddr+LANCE_DATA);
 484         lp->stats.tx_errors++;
 485 #ifndef final_version
 486         {
 487             int i;
 488             printk(" Ring data dump: dirty_tx %d cur_tx %d cur_rx %d.",
 489                    lp->dirty_tx, lp->cur_tx, lp->cur_rx);
 490             for (i = 0 ; i < RX_RING_SIZE; i++)
 491                 printk("%s %08x %04x %04x", i & 0x3 ? "" : "\n ",
 492                        lp->rx_ring[i].base, -lp->rx_ring[i].buf_length,
 493                        lp->rx_ring[i].msg_length);
 494             for (i = 0 ; i < TX_RING_SIZE; i++)
 495                 printk(" %s%08x %04x %04x", i & 0x3 ? "" : "\n ",
 496                        lp->tx_ring[i].base, -lp->tx_ring[i].length,
 497                        lp->tx_ring[i].misc);
 498             printk("\n");
 499         }
 500 #endif
 501         lance_init_ring(dev);
 502         outw(0x0043, ioaddr+LANCE_DATA);
 503 
 504         dev->tbusy=0;
 505         dev->trans_start = jiffies;
 506 
 507         return 0;
 508     }
 509 
 510     if (skb == NULL) {
 511         dev_tint(dev);
 512         return 0;
 513     }
 514 
 515     /* Fill in the ethernet header. */
 516     if (!skb->arp  &&  dev->rebuild_header(skb+1, dev)) {
 517         skb->dev = dev;
 518         arp_queue (skb);
 519         return 0;
 520     }
 521     skb->arp=1;
 522 
 523     if (skb->len <= 0)
 524         return 0;
 525 
 526     if (lance_debug > 3) {
 527         outw(0x0000, ioaddr+LANCE_ADDR);
 528         printk("%s: lance_start_xmit() called, csr0 %4.4x.\n", dev->name,
 529                inw(ioaddr+LANCE_DATA));
 530         outw(0x0000, ioaddr+LANCE_DATA);
 531     }
 532 
 533     /* Block a timer-based transmit from overlapping.  This could better be
 534        done with atomic_swap(1, dev->tbusy), but set_bit() works as well. */
 535     if (set_bit(0, (void*)&dev->tbusy) != 0)
 536         printk("%s: Transmitter access conflict.\n", dev->name);
 537 
 538     /* Fill in a Tx ring entry */
 539 
 540     /* Mask to ring buffer boundary. */
 541     entry = lp->cur_tx & TX_RING_MOD_MASK;
 542 
 543     /* Caution: the write order is important here, set the base address
 544        with the "ownership" bits last. */
 545 
 546     /* The old LANCE chips doesn't automatically pad buffers to min. size. */
 547     if (lp->old_lance) {
 548         lp->tx_ring[entry].length =
 549             -(ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN);
 550     } else
 551         lp->tx_ring[entry].length = -skb->len;
 552 
 553     lp->tx_ring[entry].misc = 0x0000;
 554 
 555     /* If any part of this buffer is >16M we must copy it to a low-memory
 556        buffer. */
 557     if ((int)(skb+1) + skb->len > 0x01000000) {
 558         if (lance_debug > 5)
 559             printk("%s: bouncing a high-memory packet (%#x).\n",
 560                    dev->name, (int)(skb+1));
 561         memcpy(&lp->tx_bounce_buffs[entry], skb+1, skb->len);
 562         lp->tx_ring[entry].base =
 563             (int)(lp->tx_bounce_buffs + entry) | 0x83000000;
 564         if (skb->free)
 565             kfree_skb (skb, FREE_WRITE);
 566     } else
 567         lp->tx_ring[entry].base = (int)(skb+1) | 0x83000000;
 568 
 569     lp->cur_tx++;
 570 
 571     /* Trigger an immediate send poll. */
 572     outw(0x0000, ioaddr+LANCE_ADDR);
 573     outw(0x0048, ioaddr+LANCE_DATA);
 574 
 575     dev->trans_start = jiffies;
 576 
 577     if (lp->tx_ring[(entry+1) & TX_RING_MOD_MASK].base == 0)
 578         dev->tbusy=0;
 579 
 580     return 0;
 581 }
 582 
 583 /* The LANCE interrupt handler. */
 584 static void
 585 lance_interrupt(int reg_ptr)
     /* [previous][next][first][last][top][bottom][index][help] */
 586 {
 587     int irq = -(((struct pt_regs *)reg_ptr)->orig_eax+2);
 588     struct device *dev = (struct device *)(irq2dev_map[irq]);
 589     struct lance_private *lp;
 590     int csr0, ioaddr;
 591 
 592     if (dev == NULL) {
 593         printk ("lance_interrupt(): irq %d for unknown device.\n", irq);
 594         return;
 595     }
 596 
 597     ioaddr = dev->base_addr;
 598     lp = (struct lance_private *)dev->priv;
 599     if (dev->interrupt)
 600         printk("%s: Re-entering the interrupt handler.\n", dev->name);
 601 
 602     dev->interrupt = 1;
 603 
 604     outw(0x00, dev->base_addr + LANCE_ADDR);
 605     csr0 = inw(dev->base_addr + LANCE_DATA);
 606 
 607     /* Acknowledge all of the current interrupt sources ASAP. */
 608     outw(csr0 & ~0x004f, dev->base_addr + LANCE_DATA);
 609 
 610     if (lance_debug > 5)
 611         printk("%s: interrupt  csr0=%#2.2x new csr=%#2.2x.\n",
 612                dev->name, csr0, inw(dev->base_addr + LANCE_DATA));
 613 
 614     if (csr0 & 0x0400)          /* Rx interrupt */
 615         lance_rx(dev);
 616 
 617     if (csr0 & 0x0200) {        /* Tx-done interrupt */
 618         int dirty_tx = lp->dirty_tx;
 619 
 620         if (dirty_tx == lp->cur_tx - TX_RING_SIZE
 621             && dev->tbusy) {
 622             /* The ring is full, clear tbusy. */
 623             dev->tbusy = 0;
 624             mark_bh(INET_BH);
 625         }
 626 
 627         while (dirty_tx < lp->cur_tx) {
 628             int entry = dirty_tx & TX_RING_MOD_MASK;
 629             int status = lp->tx_ring[entry].base;
 630             void *databuff;
 631             
 632             if (status < 0)
 633                 break;          /* It still hasn't been Txed */
 634 
 635             lp->tx_ring[entry].base = 0;
 636             databuff = (void*)(status & 0x00ffffff);
 637 
 638             if (status & 0x40000000) { /* There was an major error, log it. */
 639                 int err_status = lp->tx_ring[entry].misc;
 640                 lp->stats.tx_errors++;
 641                 if (err_status & 0x0400) lp->stats.tx_aborted_errors++;
 642                 if (err_status & 0x0800) lp->stats.tx_carrier_errors++;
 643                 if (err_status & 0x1000) lp->stats.tx_window_errors++;
 644                 if (err_status & 0x4000) lp->stats.tx_fifo_errors++;
 645                 /* We should re-init() after the FIFO error. */
 646             } else if (status & 0x18000000)
 647                 lp->stats.collisions++;
 648             else
 649                 lp->stats.tx_packets++;
 650 
 651             /* We don't free the skb if it's a data-only copy in the bounce
 652                buffer.  The address checks here are sorted -- the first test
 653                should always works.  */
 654             if (databuff >= (void*)(&lp->tx_bounce_buffs[TX_RING_SIZE])
 655                 || databuff < (void*)(lp->tx_bounce_buffs)) {
 656                 struct sk_buff *skb = ((struct sk_buff *)databuff) - 1;
 657                 if (skb->free)
 658                     kfree_skb(skb, FREE_WRITE);
 659             }
 660             dirty_tx++;
 661         }
 662 
 663         lp->dirty_tx = dirty_tx;
 664 
 665 #ifndef final_version
 666         if (lp->cur_tx - dirty_tx >= TX_RING_SIZE) {
 667             printk("out-of-sync dirty pointer, %d vs. %d.\n",
 668                    dirty_tx, lp->cur_tx);
 669             lp->dirty_tx += TX_RING_SIZE;
 670         }
 671 #endif
 672     }
 673 
 674     if (csr0 & 0x8000) {
 675         if (csr0 & 0x4000) lp->stats.tx_errors++;
 676         if (csr0 & 0x1000) lp->stats.rx_errors++;
 677     }
 678 
 679     /* Clear the interrupts we've handled. */
 680     outw(0x0000, dev->base_addr + LANCE_ADDR);
 681     outw(0x7f40, dev->base_addr + LANCE_DATA);
 682 
 683     if (lance_debug > 4)
 684         printk("%s: exiting interrupt, csr%d=%#4.4x.\n",
 685                dev->name, inw(ioaddr + LANCE_ADDR),
 686                inw(dev->base_addr + LANCE_DATA));
 687 
 688     dev->interrupt = 0;
 689     return;
 690 }
 691 
 692 static int
 693 lance_rx(struct device *dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 694 {
 695     struct lance_private *lp = (struct lance_private *)dev->priv;
 696     int entry = lp->cur_rx & RX_RING_MOD_MASK;
 697         
 698     /* If we own the next entry, it's a new packet. Send it up. */
 699     while (lp->rx_ring[entry].base >= 0) {
 700         int status = lp->rx_ring[entry].base >> 24;
 701 
 702         if (status & 0x40) {    /* There was an error. */
 703             lp->stats.rx_errors++;
 704             if (status & 0x20) lp->stats.rx_frame_errors++;
 705             if (status & 0x10) lp->stats.rx_over_errors++;
 706             if (status & 0x08) lp->stats.rx_crc_errors++;
 707             if (status & 0x04) lp->stats.rx_fifo_errors++;
 708         } else {
 709             /* Malloc up new buffer, compatible with net-2e. */
 710             short pkt_len = lp->rx_ring[entry].msg_length;
 711             int sksize = sizeof(struct sk_buff) + pkt_len;
 712             struct sk_buff *skb;
 713 
 714             skb = alloc_skb(sksize, GFP_ATOMIC);
 715             if (skb == NULL) {
 716                 printk("%s: Memory squeeze, deferring packet.\n", dev->name);
 717                 lp->stats.rx_dropped++; /* Really, deferred. */
 718                 break;
 719             }
 720             skb->mem_len = sksize;
 721             skb->mem_addr = skb;
 722             skb->len = pkt_len;
 723             skb->dev = dev;
 724             memcpy((unsigned char *) (skb + 1),
 725                    (unsigned char *)(lp->rx_ring[entry].base & 0x00ffffff),
 726                    pkt_len);
 727 #ifdef HAVE_NETIF_RX
 728             netif_rx(skb);
 729 #else
 730             skb->lock = 0;
 731             if (dev_rint((unsigned char*)skb, pkt_len, IN_SKBUFF, dev) != 0) {
 732                 kfree_skbmem(skb, sksize);
 733                 lp->stats.rx_dropped++;
 734                 break;
 735             }
 736 #endif
 737             lp->stats.rx_packets++;
 738         }
 739 
 740         lp->rx_ring[entry].base |= 0x80000000;
 741         entry = (++lp->cur_rx) & RX_RING_MOD_MASK;
 742     }
 743 
 744     /* We should check that at least two ring entries are free.  If not,
 745        we should free one and mark stats->rx_dropped++. */
 746 
 747     return 0;
 748 }
 749 
 750 static int
 751 lance_close(struct device *dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 752 {
 753     int ioaddr = dev->base_addr;
 754     struct lance_private *lp = (struct lance_private *)dev->priv;
 755 
 756     dev->start = 0;
 757     dev->tbusy = 1;
 758 
 759     outw(112, ioaddr+LANCE_ADDR);
 760     lp->stats.rx_missed_errors = inw(ioaddr+LANCE_DATA);
 761 
 762     outw(0, ioaddr+LANCE_ADDR);
 763 
 764     if (lance_debug > 1)
 765         printk("%s: Shutting down ethercard, status was %2.2x.\n",
 766                dev->name, inw(ioaddr+LANCE_DATA));
 767 
 768     /* We stop the LANCE here -- it occasionally polls
 769        memory if we don't. */
 770     outw(0x0004, ioaddr+LANCE_DATA);
 771 
 772     disable_dma(dev->dma);
 773 
 774     free_irq(dev->irq);
 775     free_dma(dev->dma);
 776 
 777     irq2dev_map[dev->irq] = 0;
 778 
 779     return 0;
 780 }
 781 
 782 static struct enet_statistics *
 783 lance_get_stats(struct device *dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 784 {
 785     struct lance_private *lp = (struct lance_private *)dev->priv;
 786     short ioaddr = dev->base_addr;
 787     short saved_addr;
 788 
 789     cli();
 790     saved_addr = inw(ioaddr+LANCE_ADDR);
 791     outw(112, ioaddr+LANCE_ADDR);
 792     lp->stats.rx_missed_errors = inw(ioaddr+LANCE_DATA);
 793     outw(saved_addr, ioaddr+LANCE_ADDR);
 794     sti();
 795 
 796     return &lp->stats;
 797 }
 798 
 799 #ifdef HAVE_MULTICAST
 800 /* Set or clear the multicast filter for this adaptor.
 801    num_addrs == -1      Promiscuous mode, receive all packets
 802    num_addrs == 0       Normal mode, clear multicast list
 803    num_addrs > 0        Multicast mode, receive normal and MC packets, and do
 804                         best-effort filtering.
 805  */
 806 static void
 807 set_multicast_list(struct device *dev, int num_addrs, void *addrs)
     /* [previous][next][first][last][top][bottom][index][help] */
 808 {
 809     short ioaddr = dev->base_addr;
 810 
 811     /* We take the simple way out and always enable promiscuous mode. */
 812     outw(0, ioaddr+LANCE_ADDR);
 813     outw(0x0004, ioaddr+LANCE_DATA); /* Temporarily stop the lance.  */
 814 
 815     outw(15, ioaddr+LANCE_ADDR);
 816     if (num_addrs >= 0) {
 817         short multicast_table[4];
 818         int i;
 819         /* We don't use the multicast table, but rely on upper-layer filtering. */
 820         memset(multicast_table, (num_addrs == 0) ? 0 : -1, sizeof(multicast_table));
 821         for (i = 0; i < 4; i++) {
 822             outw(8 + i, ioaddr+LANCE_ADDR);
 823             outw(multicast_table[i], ioaddr+LANCE_DATA);
 824         }
 825         outw(0x0000, ioaddr+LANCE_DATA); /* Unset promiscuous mode */
 826     } else {
 827         outw(0x8000, ioaddr+LANCE_DATA); /* Set promiscuous mode */
 828     }
 829 
 830     outw(0, ioaddr+LANCE_ADDR);
 831     outw(0x0142, ioaddr+LANCE_DATA); /* Resume normal operation. */
 832 }
 833 #endif
 834 
 835 /*
 836  * Local variables:
 837  *  compile-command: "gcc -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -m486 -c lance.c"
 838  * End:
 839  */

/* [previous][next][first][last][top][bottom][index][help] */