root/drivers/net/lance.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. lance_init
  2. lance_probe1
  3. lance_open
  4. lance_init_ring
  5. lance_start_xmit
  6. lance_interrupt
  7. lance_rx
  8. lance_close
  9. lance_get_stats
  10. set_multicast_list

   1 /* lance.c: An AMD LANCE ethernet driver for linux. */
   2 /*
   3     Written 1993 by Donald Becker.
   4 
   5     Copyright 1993 United States Government as represented by the
   6     Director, National Security Agency.  This software may be used and
   7     distributed according to the terms of the GNU Public License,
   8     incorporated herein by reference.
   9 
  10     This driver is for the Allied Telesis AT1500 and HP J2405A, and should work
  11     with most other LANCE-based bus-master (NE2100 clone) ethercards.
  12 
  13     The author may be reached as becker@super.org or
  14     C/O Supercomputing Research Ctr., 17100 Science Dr., Bowie MD 20715
  15 */
  16 
  17 static char *version = "lance.c:v0.14g 12/21/93 becker@super.org\n";
  18 
  19 #include <linux/config.h>
  20 #include <linux/kernel.h>
  21 #include <linux/sched.h>
  22 #include <linux/string.h>
  23 #include <linux/ptrace.h>
  24 #include <linux/errno.h>
  25 #include <linux/ioport.h>
  26 #include <linux/malloc.h>
  27 #include <linux/interrupt.h>
  28 #include <asm/bitops.h>
  29 #include <asm/io.h>
  30 #include <asm/dma.h>
  31 
  32 #include "dev.h"
  33 #include "eth.h"
  34 #include "skbuff.h"
  35 #include "arp.h"
  36 
  37 #ifndef HAVE_PORTRESERVE
  38 #define check_region(addr, size)        0
  39 #define snarf_region(addr, size)        do ; while(0)
  40 #endif
  41 
  42 #ifndef HAVE_ALLOC_SKB
  43 #define alloc_skb(size, priority) (struct sk_buff *) kmalloc(size,priority)
  44 #define kfree_skbmem(buff, size) kfree_s(buff,size)
  45 #endif
  46 
  47 struct device *init_etherdev(struct device *dev, int sizeof_private,
  48                              unsigned long *mem_startp);
  49 
  50 #ifdef LANCE_DEBUG
  51 int lance_debug = LANCE_DEBUG;
  52 #else
  53 int lance_debug = 1;
  54 #endif
  55 
  56 #ifndef LANCE_DMA
  57 #define LANCE_DMA       5
  58 #endif
  59 
  60 /*
  61                 Theory of Operation
  62 
  63 I. Board Compatibility
  64 
  65 This device driver is designed for the AMD 79C960, the "PCnet-ISA
  66 single-chip ethernet controller for ISA".  This chip is used in a wide
  67 variety of boards from vendors such as Allied Telesis, HP, Kingston,
  68 and Boca.  This driver is also intended to work with older AMD 7990
  69 designs, such as the NE1500 and NE2100.  For convenience, I use the name
  70 LANCE to refer to either AMD chip.
  71 
  72 II. Board-specific settings
  73 
  74 The driver is designed to work the boards that use the faster
  75 bus-master mode, rather than in shared memory mode.  (Only older designs
  76 have on-board buffer memory needed to support the slower shared memory mode.)
  77 
  78 Most boards have jumpered settings for the I/O base, IRQ line, and DMA channel.
  79 This driver probes the likely base addresses, {0x300, 0x320, 0x340, 0x360}.
  80 After the board is found it generates an DMA-timeout interrupt and uses
  81 autoIRQ to find the IRQ line.  The DMA channel defaults to LANCE_DMA, or it
  82 can be set with the low bits of the otherwise-unused dev->mem_start value.
  83 
  84 The HP-J2405A board is an exception: with this board it's easy to read the
  85 EEPROM-set values for the base, IRQ, and DMA.  Of course you must already
  86 _know_ the base address, but that entry is for changing the EEPROM.
  87 
  88 III. Driver operation
  89 
  90 IIIa. Ring buffers
  91 The LANCE uses ring buffers of Tx and Rx descriptors.  Each entry describes
  92 the base and length of the data buffer, along with status bits.  The length
  93 of these buffers is set by LANCE_LOG_{RX,TX}_BUFFERS, which is log_2() of
  94 the buffer length (rather than being directly the buffer length) for
  95 implementation ease.  The current values are 2 (Tx) and 4 (Rx), which leads to
  96 ring sizes of 4 (Tx) and 16 (Rx).  Increasing the number of ring entries
  97 needlessly uses extra space and reduces the chance that an upper layer will
  98 be able to reorder queued Tx packets based on priority.  Decreasing the number
  99 of entries makes it more difficult to achieve back-to-back packet transmission
 100 and increases the chance that Rx ring will overflow.  (Consider the worst case
 101 of receiving back-to-back minimum-sized packets.)
 102 
 103 The LANCE has the capability to "chain" both Rx and Tx buffers, but this driver
 104 statically allocates full-sized (slightly oversized -- PKT_BUF_SZ) buffers to
 105 avoid the administrative overhead. For the Rx side this avoids dynamically
 106 allocating full-sized buffers "just in case", at the expense of a
 107 memory-to-memory data copy for each packet received.  For most systems this
 108 is an good tradeoff: the Rx buffer will always be in low memory, the copy
 109 is inexpensive, and it primes the cache for later packet processing.  For Tx
 110 the buffers are only used when needed as low-memory bounce buffers.
 111 
 112 IIIB. 16M memory limitations.
 113 For the ISA bus master mode all structures used directly by the LANCE,
 114 the initialization block, Rx and Tx rings, and data buffers, must be
 115 accessable from the ISA bus, i.e. in the lower 16M of real memory.
 116 This is a problem for current Linux kernels on >16M machines. The network
 117 devices are initialized after memory initialization, and the kernel doles out
 118 memory from the top of memory downward.  The current solution is to have a
 119 special network initialization routine that's called before memory
 120 initialization; this will eventually be generalized for all network devices.
 121 As mentioned before, low-memory "bounce-buffers" are used when needed.
 122 
 123 IIIC. Synchronization
 124 The driver runs as two independent, single-threaded flows of control.  One
 125 is the send-packet routine, which enforces single-threaded use by the
 126 dev->tbusy flag.  The other thread is the interrupt handler, which is single
 127 threaded by the hardware and other software.
 128 
 129 The send packet thread has partial control over the Tx ring and 'dev->tbusy'
 130 flag.  It sets the tbusy flag whenever it's queuing a Tx packet. If the next
 131 queue slot is empty, it clears the tbusy flag when finished otherwise it sets
 132 the 'lp->tx_full' flag.
 133 
 134 The interrupt handler has exclusive control over the Rx ring and records stats
 135 from the Tx ring.  (The Tx-done interrupt can't be selectively turned off, so
 136 we can't avoid the interrupt overhead by having the Tx routine reap the Tx
 137 stats.)  After reaping the stats, it marks the queue entry as empty by setting
 138 the 'base' to zero.  Iff the 'lp->tx_full' flag is set, it clears both the
 139 tx_full and tbusy flags.
 140 
 141 */
 142 
 143 /* Set the number of Tx and Rx buffers, using Log_2(# buffers).
 144    Reasonable default values are 4 Tx buffers, and 16 Rx buffers.
 145    That translates to 2 (4 == 2^^2) and 4 (16 == 2^^4). */
 146 #ifndef LANCE_LOG_TX_BUFFERS
 147 #define LANCE_LOG_TX_BUFFERS 4
 148 #define LANCE_LOG_RX_BUFFERS 4
 149 #endif
 150 
 151 #define TX_RING_SIZE            (1 << (LANCE_LOG_TX_BUFFERS))
 152 #define TX_RING_MOD_MASK        (TX_RING_SIZE - 1)
 153 #define TX_RING_LEN_BITS        ((LANCE_LOG_TX_BUFFERS) << 29)
 154 
 155 #define RX_RING_SIZE            (1 << (LANCE_LOG_RX_BUFFERS))
 156 #define RX_RING_MOD_MASK        (RX_RING_SIZE - 1)
 157 #define RX_RING_LEN_BITS        ((LANCE_LOG_RX_BUFFERS) << 29)
 158 
 159 #define PKT_BUF_SZ      1544
 160 
 161 /* Offsets from base I/O address. */
 162 #define LANCE_DATA 0x10
 163 #define LANCE_ADDR 0x12
 164 #define LANCE_RESET 0x14
 165 #define LANCE_BUS_IF 0x16
 166 #define LANCE_TOTAL_SIZE 0x18
 167 
 168 /* The LANCE Rx and Tx ring descriptors. */
 169 struct lance_rx_head {
 170     int base;
 171     short buf_length;           /* This length is 2's complement (negative)! */
 172     short msg_length;           /* This length is "normal". */
 173 };
 174 
 175 struct lance_tx_head {
 176     int   base;
 177     short length;               /* Length is 2's complement (negative)! */
 178     short misc;
 179 };
 180 
 181 /* The LANCE initialization block, described in databook. */
 182 struct lance_init_block {
 183     unsigned short mode;        /* Pre-set mode (reg. 15) */
 184     unsigned char phys_addr[6]; /* Physical ethernet address */
 185     unsigned filter[2];         /* Multicast filter (unused). */
 186     /* Receive and transmit ring base, along with extra bits. */
 187     unsigned rx_ring;           /* Tx and Rx ring base pointers */
 188     unsigned tx_ring;
 189 };
 190 
 191 struct lance_private {
 192     char devname[8];
 193     /* These must aligned on 8-byte boundaries. */
 194     struct lance_rx_head rx_ring[RX_RING_SIZE];
 195     struct lance_tx_head tx_ring[TX_RING_SIZE];
 196     struct lance_init_block     init_block;
 197     long rx_buffs;              /* Address of Rx and Tx buffers. */
 198     /* Tx low-memory "bounce buffer" address. */
 199     char (*tx_bounce_buffs)[PKT_BUF_SZ];
 200     int cur_rx, cur_tx;         /* The next free ring entry */
 201     int dirty_rx, dirty_tx;     /* The ring entries to be free()ed. */
 202     int dma;
 203     struct enet_statistics stats;
 204     char old_lance;
 205     int pad0, pad1;             /* Used for alignment */
 206 };
 207 
 208 static unsigned long lance_probe1(short ioaddr, unsigned long mem_start);
 209 static int lance_open(struct device *dev);
 210 static void lance_init_ring(struct device *dev);
 211 static int lance_start_xmit(struct sk_buff *skb, struct device *dev);
 212 static int lance_rx(struct device *dev);
 213 static void lance_interrupt(int reg_ptr);
 214 static int lance_close(struct device *dev);
 215 static struct enet_statistics *lance_get_stats(struct device *dev);
 216 #ifdef HAVE_MULTICAST
 217 static void set_multicast_list(struct device *dev, int num_addrs, void *addrs);
 218 #endif
 219 
 220 
 221 
 222 unsigned long lance_init(unsigned long mem_start, unsigned long mem_end)
     /* [previous][next][first][last][top][bottom][index][help] */
 223 {
 224     int *port, ports[] = {0x300, 0x320, 0x340, 0x360, 0};
 225 
 226     for (port = &ports[0]; *port; port++) {
 227         int ioaddr = *port;
 228 
 229         if (   check_region(ioaddr, LANCE_TOTAL_SIZE) == 0
 230             && inb(ioaddr + 14) == 0x57
 231             && inb(ioaddr + 15) == 0x57) {
 232             mem_start = lance_probe1(ioaddr, mem_start);
 233         }
 234     }
 235 
 236     return mem_start;
 237 }
 238 
 239 static unsigned long lance_probe1(short ioaddr, unsigned long mem_start)
     /* [previous][next][first][last][top][bottom][index][help] */
 240 {
 241     struct device *dev;
 242     struct lance_private *lp;
 243     int hpJ2405A = 0;
 244     int i, reset_val;
 245 
 246     hpJ2405A = (inb(ioaddr) == 0x08 && inb(ioaddr+1) == 0x00
 247                 && inb(ioaddr+2) == 0x09);
 248 
 249     /* Reset the LANCE.  */
 250     reset_val = inw(ioaddr+LANCE_RESET); /* Reset the LANCE */
 251 
 252     /* The Un-Reset needed is only needed for the real NE2100, and will
 253        confuse the HP board. */
 254     if (!hpJ2405A)
 255         outw(reset_val, ioaddr+LANCE_RESET);
 256 
 257     outw(0x0000, ioaddr+LANCE_ADDR); /* Switch to window 0 */
 258     if (inw(ioaddr+LANCE_DATA) != 0x0004)
 259         return mem_start;
 260 
 261     dev = init_etherdev(0, sizeof(struct lance_private)
 262                         + PKT_BUF_SZ*(RX_RING_SIZE + TX_RING_SIZE),
 263                         &mem_start);
 264 
 265     printk("%s: LANCE at %#3x,", dev->name, ioaddr);
 266 
 267     /* There is a 16 byte station address PROM at the base address.
 268        The first six bytes are the station address. */
 269     for (i = 0; i < 6; i++)
 270         printk(" %2.2x", dev->dev_addr[i] = inb(ioaddr + i));
 271 
 272     dev->base_addr = ioaddr;
 273     snarf_region(ioaddr, LANCE_TOTAL_SIZE);
 274 
 275     /* Make certain the data structures used by the LANCE are aligned. */
 276     dev->priv = (void *)(((int)dev->priv + 7) & ~7);
 277     lp = (struct lance_private *)dev->priv;
 278     lp->rx_buffs = (long)dev->priv + sizeof(struct lance_private);
 279     lp->tx_bounce_buffs = (char (*)[PKT_BUF_SZ])
 280                            (lp->rx_buffs + PKT_BUF_SZ*RX_RING_SIZE);
 281 
 282 #ifndef final_version
 283     /* This should never happen. */
 284     if ((int)(lp->rx_ring) & 0x07) {
 285         printk(" **ERROR** LANCE Rx and Tx rings not on even boundary.\n");
 286         return mem_start;
 287     }
 288 #endif
 289 
 290     outw(88, ioaddr+LANCE_ADDR);
 291     lp->old_lance = (inw(ioaddr+LANCE_DATA) != 0x3003);
 292 
 293 #if defined(notdef)
 294     printk(lp->old_lance ? " original LANCE (%04x)" : " PCnet-ISA LANCE (%04x)",
 295            inw(ioaddr+LANCE_DATA));
 296 #endif
 297 
 298     lp->init_block.mode = 0x0003;       /* Disable Rx and Tx. */
 299     for (i = 0; i < 6; i++)
 300         lp->init_block.phys_addr[i] = dev->dev_addr[i];
 301     lp->init_block.filter[0] = 0x00000000;
 302     lp->init_block.filter[1] = 0x00000000;
 303     lp->init_block.rx_ring = (int)lp->rx_ring | RX_RING_LEN_BITS;
 304     lp->init_block.tx_ring = (int)lp->tx_ring | TX_RING_LEN_BITS;
 305 
 306     outw(0x0001, ioaddr+LANCE_ADDR);
 307     outw((short) (int) &lp->init_block, ioaddr+LANCE_DATA);
 308     outw(0x0002, ioaddr+LANCE_ADDR);
 309     outw(((int)&lp->init_block) >> 16, ioaddr+LANCE_DATA);
 310     outw(0x0000, ioaddr+LANCE_ADDR);
 311 
 312     if (hpJ2405A) {
 313         char dma_tbl[4] = {3, 5, 6, 7};
 314         char irq_tbl[8] = {3, 4, 5, 9, 10, 11, 12, 15};
 315         short reset_val = inw(ioaddr+LANCE_RESET);
 316         dev->dma = dma_tbl[(reset_val >> 2) & 3];
 317         dev->irq = irq_tbl[(reset_val >> 4) & 7];
 318         printk(" HP J2405A IRQ %d DMA %d.\n", dev->irq, dev->dma);
 319     } else {
 320         /* The DMA channel may be passed in on this parameter. */
 321         if (dev->mem_start & 0x07)
 322             dev->dma = dev->mem_start & 0x07;
 323         else if (dev->dma == 0)
 324             dev->dma = LANCE_DMA;
 325 
 326         /* To auto-IRQ we enable the initialization-done and DMA err,
 327            interrupts. For now we will always get a DMA error. */
 328         if (dev->irq < 2) {
 329 
 330             autoirq_setup(0);
 331 
 332             /* Trigger an initialization just for the interrupt. */
 333             outw(0x0041, ioaddr+LANCE_DATA);
 334 
 335             dev->irq = autoirq_report(1);
 336             if (dev->irq)
 337                 printk(", probed IRQ %d, fixed at DMA %d.\n",
 338                        dev->irq, dev->dma);
 339             else {
 340                 printk(", failed to detect IRQ line.\n");
 341                 return mem_start;
 342             }
 343         } else
 344             printk(" assigned IRQ %d DMA %d.\n", dev->irq, dev->dma);
 345     }
 346 
 347     if (! lp->old_lance) {
 348         /* Turn on auto-select of media (10baseT or BNC) so that the user
 349            can watch the LEDs even if the board isn't opened. */
 350         outw(0x0002, ioaddr+LANCE_ADDR);
 351         outw(0x0002, ioaddr+LANCE_BUS_IF);
 352     }
 353 
 354     if (lance_debug > 0)
 355         printk(version);
 356 
 357     /* The LANCE-specific entries in the device structure. */
 358     dev->open = &lance_open;
 359     dev->hard_start_xmit = &lance_start_xmit;
 360     dev->stop = &lance_close;
 361     dev->get_stats = &lance_get_stats;
 362 #ifdef HAVE_MULTICAST
 363     dev->set_multicast_list = &set_multicast_list;
 364 #endif
 365 
 366     return mem_start;
 367 }
 368 
 369 
 370 static int
 371 lance_open(struct device *dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 372 {
 373     struct lance_private *lp = (struct lance_private *)dev->priv;
 374     int ioaddr = dev->base_addr;
 375     int i;
 376 
 377     if (request_irq(dev->irq, &lance_interrupt)) {
 378         return -EAGAIN;
 379     }
 380 
 381     if (request_dma(dev->dma)) {
 382         free_irq(dev->irq);
 383         return -EAGAIN;
 384     }
 385     irq2dev_map[dev->irq] = dev;
 386 
 387     /* Reset the LANCE */
 388     inw(ioaddr+LANCE_RESET);
 389 
 390     /* The DMA controller is used as a no-operation slave, "cascade mode". */
 391     enable_dma(dev->dma);
 392     set_dma_mode(dev->dma, DMA_MODE_CASCADE);
 393 
 394     /* Un-Reset the LANCE, needed only for the NE2100. */
 395     if (lp->old_lance)
 396         outw(0, ioaddr+LANCE_RESET);
 397 
 398     if (! lp->old_lance) {
 399         /* This is 79C960-specific: Turn on auto-select of media (AUI, BNC). */
 400         outw(0x0002, ioaddr+LANCE_ADDR);
 401         outw(0x0002, ioaddr+LANCE_BUS_IF);
 402     }
 403 
 404     if (lance_debug > 1)
 405         printk("%s: lance_open() irq %d dma %d tx/rx rings %#x/%#x init %#x.\n",
 406                dev->name, dev->irq, dev->dma, (int) lp->tx_ring, (int) lp->rx_ring,
 407                (int) &lp->init_block);
 408 
 409     lance_init_ring(dev);
 410     /* Re-initialize the LANCE, and start it when done. */
 411     outw(0x0001, ioaddr+LANCE_ADDR);
 412     outw((short) (int) &lp->init_block, ioaddr+LANCE_DATA);
 413     outw(0x0002, ioaddr+LANCE_ADDR);
 414     outw(((int)&lp->init_block) >> 16, ioaddr+LANCE_DATA);
 415 
 416     outw(0x0004, ioaddr+LANCE_ADDR);
 417     outw(0x0d15, ioaddr+LANCE_DATA);
 418 
 419     outw(0x0000, ioaddr+LANCE_ADDR);
 420     outw(0x0001, ioaddr+LANCE_DATA);
 421 
 422     dev->tbusy = 0;
 423     dev->interrupt = 0;
 424     dev->start = 1;
 425     i = 0;
 426     while (i++ < 100)
 427         if (inw(ioaddr+LANCE_DATA) & 0x0100)
 428             break;
 429     outw(0x0142, ioaddr+LANCE_DATA);
 430 
 431     if (lance_debug > 2)
 432         printk("%s: LANCE open after %d ticks, init block %#x csr0 %4.4x.\n",
 433                dev->name, i, (int) &lp->init_block, inw(ioaddr+LANCE_DATA));
 434 
 435     return 0;                   /* Always succeed */
 436 }
 437 
 438 /* Initialize the LANCE Rx and Tx rings. */
 439 static void
 440 lance_init_ring(struct device *dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 441 {
 442     struct lance_private *lp = (struct lance_private *)dev->priv;
 443     int i;
 444 
 445     lp->cur_rx = lp->cur_tx = 0;
 446     lp->dirty_rx = lp->dirty_tx = 0;
 447 
 448     for (i = 0; i < RX_RING_SIZE; i++) {
 449         lp->rx_ring[i].base = (lp->rx_buffs + i*PKT_BUF_SZ) | 0x80000000;
 450         lp->rx_ring[i].buf_length = -PKT_BUF_SZ;
 451     }
 452     /* The Tx buffer address is filled in as needed, but we do need to clear
 453        the upper ownership bit. */
 454     for (i = 0; i < TX_RING_SIZE; i++) {
 455         lp->tx_ring[i].base = 0;
 456     }
 457 
 458     lp->init_block.mode = 0x0000;
 459     for (i = 0; i < 6; i++)
 460         lp->init_block.phys_addr[i] = dev->dev_addr[i];
 461     lp->init_block.filter[0] = 0x00000000;
 462     lp->init_block.filter[1] = 0x00000000;
 463     lp->init_block.rx_ring = (int)lp->rx_ring | RX_RING_LEN_BITS;
 464     lp->init_block.tx_ring = (int)lp->tx_ring | TX_RING_LEN_BITS;
 465 }
 466 
 467 static int
 468 lance_start_xmit(struct sk_buff *skb, struct device *dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 469 {
 470     struct lance_private *lp = (struct lance_private *)dev->priv;
 471     int ioaddr = dev->base_addr;
 472     int entry;
 473 
 474     /* Transmitter timeout, serious problems. */
 475     if (dev->tbusy) {
 476         int tickssofar = jiffies - dev->trans_start;
 477         if (tickssofar < 10)
 478             return 1;
 479         outw(0, ioaddr+LANCE_ADDR);
 480         printk("%s: transmit timed out, status %4.4x, resetting.\n",
 481                dev->name, inw(ioaddr+LANCE_DATA));
 482         outw(0x0001, ioaddr+LANCE_DATA);
 483         lp->stats.tx_errors++;
 484 #ifndef final_version
 485         {
 486             int i;
 487             printk(" Ring data dump: dirty_tx %d cur_tx %d cur_rx %d.",
 488                    lp->dirty_tx, lp->cur_tx, lp->cur_rx);
 489             for (i = 0 ; i < RX_RING_SIZE; i++)
 490                 printk("%s %08x %04x %04x", i & 0x3 ? "" : "\n ",
 491                        lp->rx_ring[i].base, -lp->rx_ring[i].buf_length,
 492                        lp->rx_ring[i].msg_length);
 493             for (i = 0 ; i < TX_RING_SIZE; i++)
 494                 printk(" %s%08x %04x %04x", i & 0x3 ? "" : "\n ",
 495                        lp->tx_ring[i].base, -lp->tx_ring[i].length,
 496                        lp->tx_ring[i].misc);
 497             printk("\n");
 498         }
 499 #endif
 500         lance_init_ring(dev);
 501         outw(0x0043, ioaddr+LANCE_DATA);
 502 
 503         dev->tbusy=0;
 504         dev->trans_start = jiffies;
 505 
 506         return 0;
 507     }
 508 
 509     if (skb == NULL) {
 510         dev_tint(dev);
 511         return 0;
 512     }
 513 
 514     /* Fill in the ethernet header. */
 515     if (!skb->arp  &&  dev->rebuild_header(skb+1, dev)) {
 516         skb->dev = dev;
 517         arp_queue (skb);
 518         return 0;
 519     }
 520     skb->arp=1;
 521 
 522     if (skb->len <= 0)
 523         return 0;
 524 
 525     if (lance_debug > 3) {
 526         outw(0x0000, ioaddr+LANCE_ADDR);
 527         printk("%s: lance_start_xmit() called, csr0 %4.4x.\n", dev->name,
 528                inw(ioaddr+LANCE_DATA));
 529         outw(0x0000, ioaddr+LANCE_DATA);
 530     }
 531 
 532     /* Block a timer-based transmit from overlapping.  This could better be
 533        done with atomic_swap(1, dev->tbusy), but set_bit() works as well. */
 534     if (set_bit(0, (void*)&dev->tbusy) != 0)
 535         printk("%s: Transmitter access conflict.\n", dev->name);
 536 
 537     /* Fill in a Tx ring entry */
 538 
 539     /* Mask to ring buffer boundary. */
 540     entry = lp->cur_tx & TX_RING_MOD_MASK;
 541 
 542     /* Caution: the write order is important here, set the base address
 543        with the "ownership" bits last. */
 544 
 545     /* The old LANCE chips doesn't automatically pad buffers to min. size. */
 546     if (lp->old_lance) {
 547         lp->tx_ring[entry].length =
 548             -(ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN);
 549     } else
 550         lp->tx_ring[entry].length = -skb->len;
 551 
 552     lp->tx_ring[entry].misc = 0x0000;
 553 
 554     /* If any part of this buffer is >16M we must copy it to a low-memory
 555        buffer. */
 556     if ((int)(skb+1) + skb->len > 0x01000000) {
 557         if (lance_debug > 5)
 558             printk("%s: bouncing a high-memory packet (%#x).\n",
 559                    dev->name, (int)(skb+1));
 560         memcpy(&lp->tx_bounce_buffs[entry], skb+1, skb->len);
 561         lp->tx_ring[entry].base =
 562             (int)(lp->tx_bounce_buffs + entry) | 0x83000000;
 563         if (skb->free)
 564             kfree_skb (skb, FREE_WRITE);
 565     } else
 566     {
 567         /* Gimme!!! */
 568         if(skb->free==0)
 569                 skb_kept_by_device(skb);
 570         lp->tx_ring[entry].base = (int)(skb+1) | 0x83000000;
 571     }
 572     lp->cur_tx++;
 573 
 574     /* Trigger an immediate send poll. */
 575     outw(0x0000, ioaddr+LANCE_ADDR);
 576     outw(0x0048, ioaddr+LANCE_DATA);
 577 
 578     dev->trans_start = jiffies;
 579 
 580     if (lp->tx_ring[(entry+1) & TX_RING_MOD_MASK].base == 0)
 581         dev->tbusy=0;
 582 
 583     return 0;
 584 }
 585 
 586 /* The LANCE interrupt handler. */
 587 static void
 588 lance_interrupt(int reg_ptr)
     /* [previous][next][first][last][top][bottom][index][help] */
 589 {
 590     int irq = -(((struct pt_regs *)reg_ptr)->orig_eax+2);
 591     struct device *dev = (struct device *)(irq2dev_map[irq]);
 592     struct lance_private *lp;
 593     int csr0, ioaddr;
 594 
 595     if (dev == NULL) {
 596         printk ("lance_interrupt(): irq %d for unknown device.\n", irq);
 597         return;
 598     }
 599 
 600     ioaddr = dev->base_addr;
 601     lp = (struct lance_private *)dev->priv;
 602     if (dev->interrupt)
 603         printk("%s: Re-entering the interrupt handler.\n", dev->name);
 604 
 605     dev->interrupt = 1;
 606 
 607     outw(0x00, dev->base_addr + LANCE_ADDR);
 608     csr0 = inw(dev->base_addr + LANCE_DATA);
 609 
 610     /* Acknowledge all of the current interrupt sources ASAP. */
 611     outw(csr0 & ~0x004f, dev->base_addr + LANCE_DATA);
 612 
 613     if (lance_debug > 5)
 614         printk("%s: interrupt  csr0=%#2.2x new csr=%#2.2x.\n",
 615                dev->name, csr0, inw(dev->base_addr + LANCE_DATA));
 616 
 617     if (csr0 & 0x0400)          /* Rx interrupt */
 618         lance_rx(dev);
 619 
 620     if (csr0 & 0x0200) {        /* Tx-done interrupt */
 621         int dirty_tx = lp->dirty_tx;
 622 
 623         while (dirty_tx < lp->cur_tx) {
 624             int entry = dirty_tx & TX_RING_MOD_MASK;
 625             int status = lp->tx_ring[entry].base;
 626             void *databuff;
 627             
 628             if (status < 0)
 629                 break;          /* It still hasn't been Txed */
 630 
 631             lp->tx_ring[entry].base = 0;
 632             databuff = (void*)(status & 0x00ffffff);
 633 
 634             if (status & 0x40000000) { /* There was an major error, log it. */
 635                 int err_status = lp->tx_ring[entry].misc;
 636                 lp->stats.tx_errors++;
 637                 if (err_status & 0x0400) lp->stats.tx_aborted_errors++;
 638                 if (err_status & 0x0800) lp->stats.tx_carrier_errors++;
 639                 if (err_status & 0x1000) lp->stats.tx_window_errors++;
 640                 if (err_status & 0x4000) lp->stats.tx_fifo_errors++;
 641                 /* We should re-init() after the FIFO error. */
 642             } else if (status & 0x18000000)
 643                 lp->stats.collisions++;
 644             else
 645                 lp->stats.tx_packets++;
 646 
 647             /* We don't free the skb if it's a data-only copy in the bounce
 648                buffer.  The address checks here are sorted -- the first test
 649                should always work.  */
 650             if (databuff >= (void*)(&lp->tx_bounce_buffs[TX_RING_SIZE])
 651                 || databuff < (void*)(lp->tx_bounce_buffs)) {
 652                 struct sk_buff *skb = ((struct sk_buff *)databuff) - 1;
 653                 if (skb->free)
 654                     kfree_skb(skb, FREE_WRITE);
 655                 else
 656                     skb_device_release(skb,FREE_WRITE);
 657                 /* Warning: skb may well vanish at the point you call device_release! */
 658             }
 659             dirty_tx++;
 660         }
 661 
 662 #ifndef final_version
 663         if (lp->cur_tx - dirty_tx >= TX_RING_SIZE) {
 664             printk("out-of-sync dirty pointer, %d vs. %d.\n",
 665                    dirty_tx, lp->cur_tx);
 666             dirty_tx += TX_RING_SIZE;
 667         }
 668 #endif
 669 
 670         if (dev->tbusy  &&  dirty_tx > lp->cur_tx - TX_RING_SIZE + 2) {
 671             /* The ring is no longer full, clear tbusy. */
 672             dev->tbusy = 0;
 673             mark_bh(INET_BH);
 674         }
 675 
 676         lp->dirty_tx = dirty_tx;
 677     }
 678 
 679     if (csr0 & 0x8000) {
 680         if (csr0 & 0x4000) lp->stats.tx_errors++;
 681         if (csr0 & 0x1000) lp->stats.rx_errors++;
 682     }
 683 
 684     /* Clear the interrupts we've handled. */
 685     outw(0x0000, dev->base_addr + LANCE_ADDR);
 686     outw(0x7f40, dev->base_addr + LANCE_DATA);
 687 
 688     if (lance_debug > 4)
 689         printk("%s: exiting interrupt, csr%d=%#4.4x.\n",
 690                dev->name, inw(ioaddr + LANCE_ADDR),
 691                inw(dev->base_addr + LANCE_DATA));
 692 
 693     dev->interrupt = 0;
 694     return;
 695 }
 696 
 697 static int
 698 lance_rx(struct device *dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 699 {
 700     struct lance_private *lp = (struct lance_private *)dev->priv;
 701     int entry = lp->cur_rx & RX_RING_MOD_MASK;
 702         
 703     /* If we own the next entry, it's a new packet. Send it up. */
 704     while (lp->rx_ring[entry].base >= 0) {
 705         int status = lp->rx_ring[entry].base >> 24;
 706 
 707         if (status & 0x40) {    /* There was an error. */
 708             lp->stats.rx_errors++;
 709             if (status & 0x20) lp->stats.rx_frame_errors++;
 710             if (status & 0x10) lp->stats.rx_over_errors++;
 711             if (status & 0x08) lp->stats.rx_crc_errors++;
 712             if (status & 0x04) lp->stats.rx_fifo_errors++;
 713         } else {
 714             /* Malloc up new buffer, compatible with net-2e. */
 715             short pkt_len = lp->rx_ring[entry].msg_length;
 716             int sksize = sizeof(struct sk_buff) + pkt_len;
 717             struct sk_buff *skb;
 718 
 719             skb = alloc_skb(sksize, GFP_ATOMIC);
 720             if (skb == NULL) {
 721                 printk("%s: Memory squeeze, deferring packet.\n", dev->name);
 722                 lp->stats.rx_dropped++; /* Really, deferred. */
 723                 break;
 724             }
 725             skb->mem_len = sksize;
 726             skb->mem_addr = skb;
 727             skb->len = pkt_len;
 728             skb->dev = dev;
 729             memcpy((unsigned char *) (skb + 1),
 730                    (unsigned char *)(lp->rx_ring[entry].base & 0x00ffffff),
 731                    pkt_len);
 732 #ifdef HAVE_NETIF_RX
 733             netif_rx(skb);
 734 #else
 735             skb->lock = 0;
 736             if (dev_rint((unsigned char*)skb, pkt_len, IN_SKBUFF, dev) != 0) {
 737                 kfree_skbmem(skb, sksize);
 738                 lp->stats.rx_dropped++;
 739                 break;
 740             }
 741 #endif
 742             lp->stats.rx_packets++;
 743         }
 744 
 745         lp->rx_ring[entry].base |= 0x80000000;
 746         entry = (++lp->cur_rx) & RX_RING_MOD_MASK;
 747     }
 748 
 749     /* We should check that at least two ring entries are free.  If not,
 750        we should free one and mark stats->rx_dropped++. */
 751 
 752     return 0;
 753 }
 754 
 755 static int
 756 lance_close(struct device *dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 757 {
 758     int ioaddr = dev->base_addr;
 759     struct lance_private *lp = (struct lance_private *)dev->priv;
 760 
 761     dev->start = 0;
 762     dev->tbusy = 1;
 763 
 764     outw(112, ioaddr+LANCE_ADDR);
 765     lp->stats.rx_missed_errors = inw(ioaddr+LANCE_DATA);
 766 
 767     outw(0, ioaddr+LANCE_ADDR);
 768 
 769     if (lance_debug > 1)
 770         printk("%s: Shutting down ethercard, status was %2.2x.\n",
 771                dev->name, inw(ioaddr+LANCE_DATA));
 772 
 773     /* We stop the LANCE here -- it occasionally polls
 774        memory if we don't. */
 775     outw(0x0004, ioaddr+LANCE_DATA);
 776 
 777     disable_dma(dev->dma);
 778 
 779     free_irq(dev->irq);
 780     free_dma(dev->dma);
 781 
 782     irq2dev_map[dev->irq] = 0;
 783 
 784     return 0;
 785 }
 786 
 787 static struct enet_statistics *
 788 lance_get_stats(struct device *dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 789 {
 790     struct lance_private *lp = (struct lance_private *)dev->priv;
 791     short ioaddr = dev->base_addr;
 792     short saved_addr;
 793 
 794     cli();
 795     saved_addr = inw(ioaddr+LANCE_ADDR);
 796     outw(112, ioaddr+LANCE_ADDR);
 797     lp->stats.rx_missed_errors = inw(ioaddr+LANCE_DATA);
 798     outw(saved_addr, ioaddr+LANCE_ADDR);
 799     sti();
 800 
 801     return &lp->stats;
 802 }
 803 
 804 #ifdef HAVE_MULTICAST
 805 /* Set or clear the multicast filter for this adaptor.
 806    num_addrs == -1      Promiscuous mode, receive all packets
 807    num_addrs == 0       Normal mode, clear multicast list
 808    num_addrs > 0        Multicast mode, receive normal and MC packets, and do
 809                         best-effort filtering.
 810  */
 811 static void
 812 set_multicast_list(struct device *dev, int num_addrs, void *addrs)
     /* [previous][next][first][last][top][bottom][index][help] */
 813 {
 814     short ioaddr = dev->base_addr;
 815 
 816     /* We take the simple way out and always enable promiscuous mode. */
 817     outw(0, ioaddr+LANCE_ADDR);
 818     outw(0x0004, ioaddr+LANCE_DATA); /* Temporarily stop the lance.  */
 819 
 820     outw(15, ioaddr+LANCE_ADDR);
 821     if (num_addrs >= 0) {
 822         short multicast_table[4];
 823         int i;
 824         /* We don't use the multicast table, but rely on upper-layer filtering. */
 825         memset(multicast_table, (num_addrs == 0) ? 0 : -1, sizeof(multicast_table));
 826         for (i = 0; i < 4; i++) {
 827             outw(8 + i, ioaddr+LANCE_ADDR);
 828             outw(multicast_table[i], ioaddr+LANCE_DATA);
 829         }
 830         outw(0x0000, ioaddr+LANCE_DATA); /* Unset promiscuous mode */
 831     } else {
 832         outw(0x8000, ioaddr+LANCE_DATA); /* Set promiscuous mode */
 833     }
 834 
 835     outw(0, ioaddr+LANCE_ADDR);
 836     outw(0x0142, ioaddr+LANCE_DATA); /* Resume normal operation. */
 837 }
 838 #endif
 839 
 840 /*
 841  * Local variables:
 842  *  compile-command: "gcc -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -m486 -c lance.c"
 843  * End:
 844  */

/* [previous][next][first][last][top][bottom][index][help] */