This source file includes following definitions.
- ni65_open
- ni65_close
- ni65_probe
- ni65_probe1
- am7990_reinit
- ni65_interrupt
- xmit_intr
- recv_intr
- ni65_send_packet
- ni65_get_stats
- set_multicast_list
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33 #include <linux/config.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/string.h>
37 #include <linux/ptrace.h>
38 #include <linux/errno.h>
39 #include <linux/ioport.h>
40 #include <linux/malloc.h>
41 #include <linux/interrupt.h>
42 #include <asm/bitops.h>
43 #include <asm/io.h>
44 #include <asm/dma.h>
45
46 #include <linux/netdevice.h>
47 #include <linux/etherdevice.h>
48 #include <linux/skbuff.h>
49
50 #include "ni65.h"
51
52
53
54
55
56 #ifndef HAVE_PORTRESERVE
57 #define check_region(ioaddr, size) 0
58 #define snarf_region(ioaddr, size); do ; while (0)
59 #endif
60
61 #ifndef NET_DEBUG
62 #define NET_DEBUG 2
63 #endif
64
65
66
67
68 #define NI65_TOTAL_SIZE 16
69
70 #define SA_ADDR0 0x02
71 #define SA_ADDR1 0x07
72 #define SA_ADDR2 0x01
73 #define CARD_ID0 0x00
74 #define CARD_ID1 0x55
75
76
77
78 #define PORT dev->base_addr
79
80 #define RMDNUM 8
81 #define RMDNUMMASK 0x6000
82 #define TMDNUM 4
83 #define TMDNUMMASK 0x4000
84
85 #define R_BUF_SIZE 1518
86 #define T_BUF_SIZE 1518
87
88 #define MEMSIZE 8+RMDNUM*8+TMDNUM*8
89
90 #define L_DATAREG 0x00
91 #define L_ADDRREG 0x02
92
93 #define L_RESET 0x04
94 #define L_CONFIG 0x05
95 #define L_EBASE 0x08
96
97
98
99
100
101 #define CSR0 0x00
102 #define CSR1 0x01
103 #define CSR2 0x02
104 #define CSR3 0x03
105
106
107 #undef NO_STATIC
108
109 #define writereg(val,reg) {outw(reg,PORT+L_ADDRREG);inw(PORT+L_ADDRREG); \
110 outw(val,PORT+L_DATAREG);inw(PORT+L_DATAREG);}
111 #define readreg(reg) (outw(reg,PORT+L_ADDRREG),inw(PORT+L_ADDRREG),\
112 inw(PORT+L_DATAREG))
113 #define writedatareg(val) {outw(val,PORT+L_DATAREG);inw(PORT+L_DATAREG);}
114
115 static int ni65_probe1(struct device *dev,int);
116 static void ni65_interrupt(int reg_ptr);
117 static void recv_intr(struct device *dev);
118 static void xmit_intr(struct device *dev);
119 static int ni65_open(struct device *dev);
120 static int am7990_reinit(struct device *dev);
121 static int ni65_send_packet(struct sk_buff *skb, struct device *dev);
122 static int ni65_close(struct device *dev);
123 static struct enet_statistics *ni65_get_stats(struct device *);
124
125 static void set_multicast_list(struct device *dev, int num_addrs, void *addrs);
126
127 extern void *irq2dev_map[16];
128
129 struct priv
130 {
131 struct init_block ib;
132 void *memptr;
133 struct rmd *rmdhead;
134 struct tmd *tmdhead;
135 int rmdnum;
136 int tmdnum,tmdlast;
137 struct sk_buff *recv_skb[RMDNUM];
138 void *tmdbufs[TMDNUM];
139 int lock,xmit_queued;
140 struct enet_statistics stats;
141 };
142
143 int irqtab[] = { 9,12,15,5 };
144 int dmatab[] = { 0,3,5,6 };
145
146
147
148
149
150 static int ni65_open(struct device *dev)
151 {
152 if(am7990_reinit(dev))
153 {
154 dev->tbusy = 0;
155 dev->interrupt = 0;
156 dev->start = 1;
157 return 0;
158 }
159 else
160 {
161 dev->start = 0;
162 return -EAGAIN;
163 }
164 }
165
166 static int ni65_close(struct device *dev)
167 {
168 outw(0,PORT+L_RESET);
169 dev->tbusy = 1;
170 dev->start = 0;
171 return 0;
172 }
173
174
175
176
177
178
179 int ni65_probe(struct device *dev)
180 {
181 int *port, ports[] = {0x300,0x320,0x340,0x360, 0};
182 int base_addr = dev->base_addr;
183
184 if (base_addr > 0x1ff)
185 return ni65_probe1(dev, base_addr);
186 else if (base_addr > 0)
187 return ENXIO;
188
189 for (port = ports; *port; port++)
190 {
191 int ioaddr = *port;
192 if (check_region(ioaddr, NI65_TOTAL_SIZE))
193 continue;
194 if( !(inb(ioaddr+L_EBASE+6) == CARD_ID0) ||
195 !(inb(ioaddr+L_EBASE+7) == CARD_ID1) )
196 continue;
197 dev->base_addr = ioaddr;
198 if (ni65_probe1(dev, ioaddr) == 0)
199 return 0;
200 }
201
202 dev->base_addr = base_addr;
203 return ENODEV;
204 }
205
206
207 static int ni65_probe1(struct device *dev,int ioaddr)
208 {
209 int i;
210 unsigned char station_addr[6];
211 struct priv *p;
212
213 for(i=0;i<6;i++)
214 station_addr[i] = dev->dev_addr[i] = inb(PORT+L_EBASE+i);
215
216 if(station_addr[0] != SA_ADDR0 || station_addr[1] != SA_ADDR1)
217 {
218 printk("%s: wrong Hardaddress \n",dev->name);
219 return ENODEV;
220 }
221
222 if(dev->irq == 0)
223 dev->irq = irqtab[(inw(PORT+L_CONFIG)>>2)&3];
224 if(dev->dma == 0)
225 dev->dma = dmatab[inw(PORT+L_CONFIG)&3];
226
227 printk("%s: %s found at %#3x, IRQ %d DMA %d.\n", dev->name,
228 "network card", dev->base_addr, dev->irq,dev->dma);
229
230 {
231 int irqval = request_irq(dev->irq, &ni65_interrupt,0,"ni65");
232 if (irqval) {
233 printk ("%s: unable to get IRQ %d (irqval=%d).\n",
234 dev->name,dev->irq, irqval);
235 return EAGAIN;
236 }
237 if(request_dma(dev->dma, "ni65") != 0)
238 {
239 printk("%s: Can't request dma-channel %d\n",dev->name,(int) dev->dma);
240 free_irq(dev->irq);
241 return EAGAIN;
242 }
243 }
244 irq2dev_map[dev->irq] = dev;
245
246
247 snarf_region(ioaddr,NI65_TOTAL_SIZE);
248
249 p = dev->priv = (void *) kmalloc(sizeof(struct priv),GFP_KERNEL);
250 memset((char *) dev->priv,0,sizeof(struct priv));
251
252 dev->open = ni65_open;
253 dev->stop = ni65_close;
254 dev->hard_start_xmit = ni65_send_packet;
255 dev->get_stats = ni65_get_stats;
256 dev->set_multicast_list = set_multicast_list;
257
258 ether_setup(dev);
259
260 dev->interrupt = 0;
261 dev->tbusy = 0;
262 dev->start = 0;
263
264 if( (p->memptr = kmalloc(MEMSIZE,GFP_KERNEL)) == NULL) {
265 printk("%s: Can't alloc TMD/RMD-buffer.\n",dev->name);
266 return EAGAIN;
267 }
268 if( (unsigned long) (p->memptr + MEMSIZE) & 0xff000000) {
269 printk("%s: Can't alloc TMD/RMD buffer in lower 16MB!\n",dev->name);
270 return EAGAIN;
271 }
272 p->tmdhead = (struct tmd *) ((( (unsigned long)p->memptr ) + 8) & 0xfffffff8);
273 p->rmdhead = (struct rmd *) (p->tmdhead + TMDNUM);
274
275 #ifndef NO_STATIC
276 for(i=0;i<TMDNUM;i++)
277 {
278 if( (p->tmdbufs[i] = kmalloc(T_BUF_SIZE,GFP_ATOMIC)) == NULL) {
279 printk("%s: Can't alloc Xmit-Mem.\n",dev->name);
280 return EAGAIN;
281 }
282 if( (unsigned long) (p->tmdbufs[i]+T_BUF_SIZE) & 0xff000000) {
283 printk("%s: Can't alloc Xmit-Mem in lower 16MB!\n",dev->name);
284 return EAGAIN;
285 }
286 }
287 #endif
288
289 for(i=0;i<RMDNUM;i++)
290 {
291 if( (p->recv_skb[i] = (struct sk_buff *) alloc_skb(R_BUF_SIZE,GFP_ATOMIC)) == NULL) {
292 printk("%s: unable to alloc recv-mem\n",dev->name);
293 return EAGAIN;
294 }
295 if( (unsigned long) (p->recv_skb[i]->data + R_BUF_SIZE) & 0xff000000) {
296 printk("%s: unable to alloc receive-memory in lower 16MB!\n",dev->name);
297 return EAGAIN;
298 }
299 }
300
301 return 0;
302 }
303
304
305
306
307
308 static int am7990_reinit(struct device *dev)
309 {
310 int i,j;
311 struct tmd *tmdp;
312 struct rmd *rmdp;
313 struct priv *p = (struct priv *) dev->priv;
314
315 p->lock = 0;
316 p->xmit_queued = 0;
317
318 disable_dma(dev->dma);
319 set_dma_mode(dev->dma,DMA_MODE_CASCADE);
320 enable_dma(dev->dma);
321
322 outw(0,PORT+L_RESET);
323 if(inw(PORT+L_DATAREG) != 0x4)
324 {
325 printk("%s: can't RESET ni6510 card: %04x\n",dev->name,(int) inw(PORT+L_DATAREG));
326 disable_dma(dev->dma);
327 free_dma(dev->dma);
328 free_irq(dev->irq);
329 return 0;
330 }
331
332
333
334 memset(p->memptr,0,MEMSIZE);
335
336 p->tmdnum = 0; p->tmdlast = 0;
337 for(i=0;i<TMDNUM;i++)
338 {
339 tmdp = p->tmdhead + i;
340 #ifndef NO_STATIC
341 tmdp->u.buffer = (unsigned long) p->tmdbufs[i];
342 #endif
343 tmdp->u.s.status = XMIT_START | XMIT_END;
344 }
345
346 p->rmdnum = 0;
347 for(i=0;i<RMDNUM;i++)
348 {
349 rmdp = p->rmdhead + i;
350 rmdp->u.buffer = (unsigned long) p->recv_skb[i]->data;
351 rmdp->u.s.status = RCV_OWN;
352 rmdp->blen = -R_BUF_SIZE;
353 rmdp->mlen = 0;
354 }
355
356 for(i=0;i<6;i++)
357 {
358 p->ib.eaddr[i] = dev->dev_addr[i];
359 }
360 p->ib.mode = 0;
361 for(i=0;i<8;i++)
362 p->ib.filter[i] = 0;
363 p->ib.trplow = (unsigned short) (( (unsigned long) p->tmdhead ) & 0xffff);
364 p->ib.trphigh = (unsigned short) ((( (unsigned long) p->tmdhead )>>16) & 0x00ff) | TMDNUMMASK;
365 p->ib.rrplow = (unsigned short) (( (unsigned long) p->rmdhead ) & 0xffff);
366 p->ib.rrphigh = (unsigned short) ((( (unsigned long) p->rmdhead )>>16) & 0x00ff) | RMDNUMMASK;
367
368 writereg(0,CSR3);
369 writereg((unsigned short) (((unsigned long) &(p->ib)) & 0xffff),CSR1);
370 writereg((unsigned short) (((unsigned long) &(p->ib))>>16),CSR2);
371
372 writereg(CSR0_INIT,CSR0);
373
374
375
376
377
378 for(i=0;i<5;i++)
379 {
380 for(j=0;j<2000000;j++);
381 if(inw(PORT+L_DATAREG) & CSR0_IDON) break;
382 }
383 if(i == 5)
384 {
385 printk("%s: can't init am7990, status: %04x\n",dev->name,(int) inw(PORT+L_DATAREG));
386 disable_dma(dev->dma);
387 free_dma(dev->dma);
388 free_irq(dev->irq);
389 return 0;
390 }
391
392 writedatareg(CSR0_CLRALL | CSR0_INEA | CSR0_STRT);
393
394 return 1;
395 }
396
397
398
399
400
401 static void ni65_interrupt(int reg_ptr)
402 {
403 int irq = -(((struct pt_regs *)reg_ptr)->orig_eax+2);
404 int csr0;
405 struct device *dev = (struct device *) irq2dev_map[irq];
406
407 if (dev == NULL) {
408 printk ("net_interrupt(): irq %d for unknown device.\n", irq);
409 return;
410 }
411
412 csr0 = inw(PORT+L_DATAREG);
413 writedatareg(csr0 & CSR0_CLRALL);
414
415 dev->interrupt = 1;
416
417 if(csr0 & CSR0_ERR)
418 {
419 struct priv *p = (struct priv *) dev->priv;
420
421 if(csr0 & CSR0_BABL)
422 p->stats.tx_errors++;
423 if(csr0 & CSR0_MISS)
424 p->stats.rx_errors++;
425 }
426
427 if(csr0 & CSR0_RINT)
428 {
429 recv_intr(dev);
430 }
431 if(csr0 & CSR0_TINT)
432 {
433 xmit_intr(dev);
434 }
435
436 writedatareg(CSR0_INEA);
437 dev->interrupt = 0;
438
439 return;
440 }
441
442
443
444
445
446
447 static void xmit_intr(struct device *dev)
448 {
449 int tmdstat;
450 struct tmd *tmdp;
451 struct priv *p = (struct priv *) dev->priv;
452
453 #ifdef NO_STATIC
454 struct sk_buff *skb;
455 #endif
456
457 while(p->xmit_queued)
458 {
459 tmdp = p->tmdhead + p->tmdlast;
460 tmdstat = tmdp->u.s.status;
461 if(tmdstat & XMIT_OWN)
462 break;
463 #ifdef NO_STATIC
464 skb = (struct sk_buff *) p->tmdbufs[p->tmdlast];
465 dev_kfree_skb(skb,FREE_WRITE);
466 #endif
467
468 if(tmdstat & XMIT_ERR)
469 {
470 printk("%s: xmit-error: %04x %04x\n",dev->name,(int) tmdstat,(int) tmdp->status2);
471 if(tmdp->status2 & XMIT_TDRMASK)
472 printk("%s: tdr-problems (e.g. no resistor)\n",dev->name);
473
474
475 if(tmdp->status2 & XMIT_RTRY)
476 p->stats.tx_aborted_errors++;
477 if(tmdp->status2 & XMIT_LCAR)
478 p->stats.tx_carrier_errors++;
479 p->stats.tx_errors++;
480 tmdp->status2 = 0;
481 }
482 else
483 p->stats.tx_packets++;
484
485 p->tmdlast = (p->tmdlast + 1) & (TMDNUM-1);
486 if(p->tmdlast == p->tmdnum)
487 p->xmit_queued = 0;
488 }
489
490 dev->tbusy = 0;
491 mark_bh(NET_BH);
492 }
493
494
495
496
497
498 static void recv_intr(struct device *dev)
499 {
500 struct rmd *rmdp;
501 int rmdstat,len;
502 struct sk_buff *skb,*skb1;
503 struct priv *p = (struct priv *) dev->priv;
504
505 rmdp = p->rmdhead + p->rmdnum;
506 while(!( (rmdstat = rmdp->u.s.status) & RCV_OWN))
507 {
508 if( (rmdstat & (RCV_START | RCV_END)) != (RCV_START | RCV_END) )
509 {
510 if(rmdstat & RCV_START)
511 {
512 p->stats.rx_errors++;
513 p->stats.rx_length_errors++;
514 printk("%s: packet too long\n",dev->name);
515 }
516 rmdp->u.s.status = RCV_OWN;
517 }
518 else if(rmdstat & RCV_ERR)
519 {
520 printk("%s: receive-error: %04x\n",dev->name,(int) rmdstat );
521 p->stats.rx_errors++;
522 if(rmdstat & RCV_FRAM) p->stats.rx_frame_errors++;
523 if(rmdstat & RCV_OFLO) p->stats.rx_over_errors++;
524 if(rmdstat & RCV_CRC) p->stats.rx_crc_errors++;
525 rmdp->u.s.status = RCV_OWN;
526 printk("%s: lance-status: %04x\n",dev->name,(int) inw(PORT+L_DATAREG));
527 }
528 else
529 {
530 len = (rmdp->mlen & 0x0fff) - 4;
531 skb = alloc_skb(R_BUF_SIZE,GFP_ATOMIC);
532 if(skb != NULL)
533 {
534 if( (unsigned long) (skb->data + R_BUF_SIZE) & 0xff000000) {
535 memcpy(skb->data,p->recv_skb[p->rmdnum]->data,len);
536 skb1 = skb;
537 }
538 else {
539 skb1 = p->recv_skb[p->rmdnum];
540 p->recv_skb[p->rmdnum] = skb;
541 rmdp->u.buffer = (unsigned long) (skb->data);
542 }
543 rmdp->u.s.status = RCV_OWN;
544 rmdp->mlen = 0;
545 skb1->len = len;
546 skb1->dev = dev;
547 p->stats.rx_packets++;
548 netif_rx(skb1);
549 }
550 else
551 {
552 rmdp->u.s.status = RCV_OWN;
553 printk("%s: can't alloc new sk_buff\n",dev->name);
554 p->stats.rx_dropped++;
555 }
556 }
557 p->rmdnum++; p->rmdnum &= RMDNUM-1;
558 rmdp = p->rmdhead + p->rmdnum;
559 }
560 }
561
562
563
564
565
566 static int ni65_send_packet(struct sk_buff *skb, struct device *dev)
567 {
568 struct priv *p = (struct priv *) dev->priv;
569 struct tmd *tmdp;
570
571 if(dev->tbusy)
572 {
573 int tickssofar = jiffies - dev->trans_start;
574 if (tickssofar < 25)
575 return 1;
576
577 printk("%s: xmitter timed out, try to restart!\n",dev->name);
578 am7990_reinit(dev);
579 dev->tbusy=0;
580 dev->trans_start = jiffies;
581 }
582
583 if(skb == NULL)
584 {
585 dev_tint(dev);
586 return 0;
587 }
588
589 if (skb->len <= 0)
590 return 0;
591
592 if (set_bit(0, (void*)&dev->tbusy) != 0)
593 {
594 printk("%s: Transmitter access conflict.\n", dev->name);
595 return 1;
596 }
597 if(set_bit(0,(void*) &p->lock) != 0)
598 {
599 printk("%s: Queue was locked!\n",dev->name);
600 return 1;
601 }
602
603 {
604 short len = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
605
606 tmdp = p->tmdhead + p->tmdnum;
607
608 #ifdef NO_STATIC
609 tmdp->u.buffer = (unsigned long) (skb->data);
610 p->tmdbufs[p->tmdnum] = skb;
611 #else
612 memcpy((char *) (tmdp->u.buffer & 0x00ffffff),(char *)skb->data,skb->len);
613 dev_kfree_skb (skb, FREE_WRITE);
614 #endif
615 tmdp->blen = -len;
616 tmdp->u.s.status = XMIT_OWN | XMIT_START | XMIT_END;
617
618 cli();
619 p->xmit_queued = 1;
620 writedatareg(CSR0_TDMD | CSR0_INEA);
621 p->tmdnum++; p->tmdnum &= TMDNUM-1;
622
623 if( !((p->tmdhead + p->tmdnum)->u.s.status & XMIT_OWN) )
624 dev->tbusy = 0;
625 p->lock = 0;
626 sti();
627
628 dev->trans_start = jiffies;
629
630 }
631
632 return 0;
633 }
634
635 static struct enet_statistics *ni65_get_stats(struct device *dev)
636 {
637 return &((struct priv *) dev->priv)->stats;
638 }
639
640 static void set_multicast_list(struct device *dev, int num_addrs, void *addrs)
641 {
642 }
643
644
645
646
647