This source file includes following definitions.
- ni65_open
- ni65_close
- ni65_probe
- ni65_probe1
- am7990_reinit
- ni65_interrupt
- xmit_intr
- recv_intr
- ni65_send_packet
- ni65_get_stats
- set_multicast_list
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35 #include <linux/kernel.h>
36 #include <linux/sched.h>
37 #include <linux/string.h>
38 #include <linux/ptrace.h>
39 #include <linux/errno.h>
40 #include <linux/ioport.h>
41 #include <linux/malloc.h>
42 #include <linux/interrupt.h>
43 #include <asm/bitops.h>
44 #include <asm/io.h>
45 #include <asm/dma.h>
46
47 #include <linux/netdevice.h>
48 #include <linux/etherdevice.h>
49 #include <linux/skbuff.h>
50
51 #include "ni65.h"
52
53
54
55
56
57 #ifndef HAVE_PORTRESERVE
58 #define check_region(ioaddr, size) 0
59 #define request_region(ioaddr, size,name) do ; while (0)
60 #endif
61
62 #ifndef NET_DEBUG
63 #define NET_DEBUG 2
64 #endif
65
66
67
68
69 #define NI65_TOTAL_SIZE 16
70
71 #define SA_ADDR0 0x02
72 #define SA_ADDR1 0x07
73 #define SA_ADDR2 0x01
74 #define CARD_ID0 0x00
75 #define CARD_ID1 0x55
76
77
78
79 #define PORT dev->base_addr
80
81 #define RMDNUM 8
82 #define RMDNUMMASK 0x6000
83 #define TMDNUM 4
84 #define TMDNUMMASK 0x4000
85
86 #define R_BUF_SIZE 1518
87 #define T_BUF_SIZE 1518
88
89 #define MEMSIZE 8+RMDNUM*8+TMDNUM*8
90
91 #define L_DATAREG 0x00
92 #define L_ADDRREG 0x02
93
94 #define L_RESET 0x04
95 #define L_CONFIG 0x05
96 #define L_EBASE 0x08
97
98
99
100
101
102 #define CSR0 0x00
103 #define CSR1 0x01
104 #define CSR2 0x02
105 #define CSR3 0x03
106
107
108 #undef NO_STATIC
109
110 #define writereg(val,reg) {outw(reg,PORT+L_ADDRREG);inw(PORT+L_ADDRREG); \
111 outw(val,PORT+L_DATAREG);inw(PORT+L_DATAREG);}
112 #define readreg(reg) (outw(reg,PORT+L_ADDRREG),inw(PORT+L_ADDRREG),\
113 inw(PORT+L_DATAREG))
114 #define writedatareg(val) {outw(val,PORT+L_DATAREG);inw(PORT+L_DATAREG);}
115
116 static int ni65_probe1(struct device *dev,int);
117 static void ni65_interrupt(int irq, struct pt_regs *regs);
118 static void recv_intr(struct device *dev);
119 static void xmit_intr(struct device *dev);
120 static int ni65_open(struct device *dev);
121 static int am7990_reinit(struct device *dev);
122 static int ni65_send_packet(struct sk_buff *skb, struct device *dev);
123 static int ni65_close(struct device *dev);
124 static struct enet_statistics *ni65_get_stats(struct device *);
125
126 static void set_multicast_list(struct device *dev);
127
128 struct priv
129 {
130 struct init_block ib;
131 void *memptr;
132 struct rmd *rmdhead;
133 struct tmd *tmdhead;
134 int rmdnum;
135 int tmdnum,tmdlast;
136 struct sk_buff *recv_skb[RMDNUM];
137 void *tmdbufs[TMDNUM];
138 int lock,xmit_queued;
139 struct enet_statistics stats;
140 };
141
142 int irqtab[] = { 9,12,15,5 };
143 int dmatab[] = { 0,3,5,6 };
144
145
146
147
148
149 static int ni65_open(struct device *dev)
150 {
151 if(am7990_reinit(dev))
152 {
153 dev->tbusy = 0;
154 dev->interrupt = 0;
155 dev->start = 1;
156 return 0;
157 }
158 else
159 {
160 dev->start = 0;
161 return -EAGAIN;
162 }
163 }
164
165 static int ni65_close(struct device *dev)
166 {
167 outw(0,PORT+L_RESET);
168 dev->tbusy = 1;
169 dev->start = 0;
170 return 0;
171 }
172
173
174
175
176
177
178 int ni65_probe(struct device *dev)
179 {
180 int *port, ports[] = {0x300,0x320,0x340,0x360, 0};
181 int base_addr = dev->base_addr;
182
183 if (base_addr > 0x1ff)
184 return ni65_probe1(dev, base_addr);
185 else if (base_addr > 0)
186 return ENXIO;
187
188 for (port = ports; *port; port++)
189 {
190 int ioaddr = *port;
191 if (check_region(ioaddr, NI65_TOTAL_SIZE))
192 continue;
193 if( !(inb(ioaddr+L_EBASE+6) == CARD_ID0) ||
194 !(inb(ioaddr+L_EBASE+7) == CARD_ID1) )
195 continue;
196 dev->base_addr = ioaddr;
197 if (ni65_probe1(dev, ioaddr) == 0)
198 return 0;
199 }
200
201 dev->base_addr = base_addr;
202 return ENODEV;
203 }
204
205
206 static int ni65_probe1(struct device *dev,int ioaddr)
207 {
208 int i;
209 unsigned char station_addr[6];
210 struct priv *p;
211
212 for(i=0;i<6;i++)
213 station_addr[i] = dev->dev_addr[i] = inb(PORT+L_EBASE+i);
214
215 if(station_addr[0] != SA_ADDR0 || station_addr[1] != SA_ADDR1)
216 {
217 printk("%s: wrong Hardaddress \n",dev->name);
218 return ENODEV;
219 }
220
221 if(dev->irq == 0)
222 dev->irq = irqtab[(inw(PORT+L_CONFIG)>>2)&3];
223 if(dev->dma == 0)
224 dev->dma = dmatab[inw(PORT+L_CONFIG)&3];
225
226 printk("%s: %s found at %#3lx, IRQ %d DMA %d.\n", dev->name,
227 "network card", dev->base_addr, dev->irq,dev->dma);
228
229 {
230 int irqval = request_irq(dev->irq, &ni65_interrupt,0,"ni65");
231 if (irqval) {
232 printk ("%s: unable to get IRQ %d (irqval=%d).\n",
233 dev->name,dev->irq, irqval);
234 return EAGAIN;
235 }
236 if(request_dma(dev->dma, "ni65") != 0)
237 {
238 printk("%s: Can't request dma-channel %d\n",dev->name,(int) dev->dma);
239 free_irq(dev->irq);
240 return EAGAIN;
241 }
242 }
243 irq2dev_map[dev->irq] = dev;
244
245
246 request_region(ioaddr,NI65_TOTAL_SIZE,"ni65");
247
248 p = dev->priv = (void *) kmalloc(sizeof(struct priv),GFP_KERNEL);
249 if (p == NULL)
250 return -ENOMEM;
251 memset((char *) dev->priv,0,sizeof(struct priv));
252
253 dev->open = ni65_open;
254 dev->stop = ni65_close;
255 dev->hard_start_xmit = ni65_send_packet;
256 dev->get_stats = ni65_get_stats;
257 dev->set_multicast_list = set_multicast_list;
258
259 ether_setup(dev);
260
261 dev->flags &= ~IFF_MULTICAST;
262 dev->interrupt = 0;
263 dev->tbusy = 0;
264 dev->start = 0;
265
266 if( (p->memptr = kmalloc(MEMSIZE,GFP_KERNEL)) == NULL) {
267 printk("%s: Can't alloc TMD/RMD-buffer.\n",dev->name);
268 return EAGAIN;
269 }
270 if( (unsigned long) (p->memptr + MEMSIZE) & 0xff000000) {
271 printk("%s: Can't alloc TMD/RMD buffer in lower 16MB!\n",dev->name);
272 return EAGAIN;
273 }
274 p->tmdhead = (struct tmd *) ((( (unsigned long)p->memptr ) + 8) & 0xfffffff8);
275 p->rmdhead = (struct rmd *) (p->tmdhead + TMDNUM);
276
277 #ifndef NO_STATIC
278 for(i=0;i<TMDNUM;i++)
279 {
280 if( (p->tmdbufs[i] = kmalloc(T_BUF_SIZE,GFP_ATOMIC)) == NULL) {
281 printk("%s: Can't alloc Xmit-Mem.\n",dev->name);
282 return EAGAIN;
283 }
284 if( (unsigned long) (p->tmdbufs[i]+T_BUF_SIZE) & 0xff000000) {
285 printk("%s: Can't alloc Xmit-Mem in lower 16MB!\n",dev->name);
286 return EAGAIN;
287 }
288 }
289 #endif
290
291 for(i=0;i<RMDNUM;i++)
292 {
293 if( (p->recv_skb[i] = dev_alloc_skb(R_BUF_SIZE)) == NULL) {
294 printk("%s: unable to alloc recv-mem\n",dev->name);
295 return EAGAIN;
296 }
297 if( (unsigned long) (p->recv_skb[i]->data + R_BUF_SIZE) & 0xff000000) {
298 printk("%s: unable to alloc receive-memory in lower 16MB!\n",dev->name);
299 return EAGAIN;
300 }
301 }
302
303 return 0;
304 }
305
306
307
308
309
310 static int am7990_reinit(struct device *dev)
311 {
312 int i,j;
313 struct tmd *tmdp;
314 struct rmd *rmdp;
315 struct priv *p = (struct priv *) dev->priv;
316
317 p->lock = 0;
318 p->xmit_queued = 0;
319
320 disable_dma(dev->dma);
321 set_dma_mode(dev->dma,DMA_MODE_CASCADE);
322 enable_dma(dev->dma);
323
324 outw(0,PORT+L_RESET);
325 if(inw(PORT+L_DATAREG) != 0x4)
326 {
327 printk("%s: can't RESET ni6510 card: %04x\n",dev->name,(int) inw(PORT+L_DATAREG));
328 disable_dma(dev->dma);
329 free_dma(dev->dma);
330 free_irq(dev->irq);
331 return 0;
332 }
333
334
335
336 memset(p->memptr,0,MEMSIZE);
337
338 p->tmdnum = 0; p->tmdlast = 0;
339 for(i=0;i<TMDNUM;i++)
340 {
341 tmdp = p->tmdhead + i;
342 #ifndef NO_STATIC
343 tmdp->u.buffer = (unsigned long) p->tmdbufs[i];
344 #endif
345 tmdp->u.s.status = XMIT_START | XMIT_END;
346 }
347
348 p->rmdnum = 0;
349 for(i=0;i<RMDNUM;i++)
350 {
351 rmdp = p->rmdhead + i;
352 rmdp->u.buffer = (unsigned long) p->recv_skb[i]->data;
353 rmdp->u.s.status = RCV_OWN;
354 rmdp->blen = -R_BUF_SIZE;
355 rmdp->mlen = 0;
356 }
357
358 for(i=0;i<6;i++)
359 {
360 p->ib.eaddr[i] = dev->dev_addr[i];
361 }
362 p->ib.mode = 0;
363 for(i=0;i<8;i++)
364 p->ib.filter[i] = 0;
365 p->ib.trplow = (unsigned short) (( (unsigned long) p->tmdhead ) & 0xffff);
366 p->ib.trphigh = (unsigned short) ((( (unsigned long) p->tmdhead )>>16) & 0x00ff) | TMDNUMMASK;
367 p->ib.rrplow = (unsigned short) (( (unsigned long) p->rmdhead ) & 0xffff);
368 p->ib.rrphigh = (unsigned short) ((( (unsigned long) p->rmdhead )>>16) & 0x00ff) | RMDNUMMASK;
369
370 writereg(0,CSR3);
371 writereg((unsigned short) (((unsigned long) &(p->ib)) & 0xffff),CSR1);
372 writereg((unsigned short) (((unsigned long) &(p->ib))>>16),CSR2);
373
374 writereg(CSR0_INIT,CSR0);
375
376
377
378
379
380 for(i=0;i<5;i++)
381 {
382 for(j=0;j<2000000;j++);
383 if(inw(PORT+L_DATAREG) & CSR0_IDON) break;
384 }
385 if(i == 5)
386 {
387 printk("%s: can't init am7990, status: %04x\n",dev->name,(int) inw(PORT+L_DATAREG));
388 disable_dma(dev->dma);
389 free_dma(dev->dma);
390 free_irq(dev->irq);
391 return 0;
392 }
393
394 writedatareg(CSR0_CLRALL | CSR0_INEA | CSR0_STRT);
395
396 return 1;
397 }
398
399
400
401
402
403 static void ni65_interrupt(int irq, struct pt_regs * regs)
404 {
405 int csr0;
406 struct device *dev = (struct device *) irq2dev_map[irq];
407
408 if (dev == NULL) {
409 printk ("net_interrupt(): irq %d for unknown device.\n", irq);
410 return;
411 }
412
413 csr0 = inw(PORT+L_DATAREG);
414 writedatareg(csr0 & CSR0_CLRALL);
415
416 dev->interrupt = 1;
417
418 if(csr0 & CSR0_ERR)
419 {
420 struct priv *p = (struct priv *) dev->priv;
421
422 if(csr0 & CSR0_BABL)
423 p->stats.tx_errors++;
424 if(csr0 & CSR0_MISS)
425 p->stats.rx_errors++;
426 }
427
428 if(csr0 & CSR0_RINT)
429 {
430 recv_intr(dev);
431 }
432 if(csr0 & CSR0_TINT)
433 {
434 xmit_intr(dev);
435 }
436
437 writedatareg(CSR0_INEA);
438 dev->interrupt = 0;
439
440 return;
441 }
442
443
444
445
446
447
448 static void xmit_intr(struct device *dev)
449 {
450 int tmdstat;
451 struct tmd *tmdp;
452 struct priv *p = (struct priv *) dev->priv;
453
454 #ifdef NO_STATIC
455 struct sk_buff *skb;
456 #endif
457
458 while(p->xmit_queued)
459 {
460 tmdp = p->tmdhead + p->tmdlast;
461 tmdstat = tmdp->u.s.status;
462 if(tmdstat & XMIT_OWN)
463 break;
464 #ifdef NO_STATIC
465 skb = (struct sk_buff *) p->tmdbufs[p->tmdlast];
466 dev_kfree_skb(skb,FREE_WRITE);
467 #endif
468
469 if(tmdstat & XMIT_ERR)
470 {
471 printk("%s: xmit-error: %04x %04x\n",dev->name,(int) tmdstat,(int) tmdp->status2);
472 if(tmdp->status2 & XMIT_TDRMASK)
473 printk("%s: tdr-problems (e.g. no resistor)\n",dev->name);
474
475
476 if(tmdp->status2 & XMIT_RTRY)
477 p->stats.tx_aborted_errors++;
478 if(tmdp->status2 & XMIT_LCAR)
479 p->stats.tx_carrier_errors++;
480 p->stats.tx_errors++;
481 tmdp->status2 = 0;
482 }
483 else
484 p->stats.tx_packets++;
485
486 p->tmdlast = (p->tmdlast + 1) & (TMDNUM-1);
487 if(p->tmdlast == p->tmdnum)
488 p->xmit_queued = 0;
489 }
490
491 dev->tbusy = 0;
492 mark_bh(NET_BH);
493 }
494
495
496
497
498
499 static void recv_intr(struct device *dev)
500 {
501 struct rmd *rmdp;
502 int rmdstat,len;
503 struct sk_buff *skb,*skb1;
504 struct priv *p = (struct priv *) dev->priv;
505
506 rmdp = p->rmdhead + p->rmdnum;
507 while(!( (rmdstat = rmdp->u.s.status) & RCV_OWN))
508 {
509 if( (rmdstat & (RCV_START | RCV_END)) != (RCV_START | RCV_END) )
510 {
511 if(rmdstat & RCV_START)
512 {
513 p->stats.rx_errors++;
514 p->stats.rx_length_errors++;
515 printk("%s: packet too long\n",dev->name);
516 }
517 rmdp->u.s.status = RCV_OWN;
518 }
519 else if(rmdstat & RCV_ERR)
520 {
521 printk("%s: receive-error: %04x\n",dev->name,(int) rmdstat );
522 p->stats.rx_errors++;
523 if(rmdstat & RCV_FRAM) p->stats.rx_frame_errors++;
524 if(rmdstat & RCV_OFLO) p->stats.rx_over_errors++;
525 if(rmdstat & RCV_CRC) p->stats.rx_crc_errors++;
526 rmdp->u.s.status = RCV_OWN;
527 printk("%s: lance-status: %04x\n",dev->name,(int) inw(PORT+L_DATAREG));
528 }
529 else
530 {
531 len = (rmdp->mlen & 0x0fff) - 4;
532 skb = dev_alloc_skb(R_BUF_SIZE);
533 if(skb != NULL)
534 {
535 if( (unsigned long) (skb->data + R_BUF_SIZE) & 0xff000000) {
536 memcpy(skb_put(skb,len),p->recv_skb[p->rmdnum]->data,len);
537 skb1 = skb;
538 }
539 else {
540 skb1 = p->recv_skb[p->rmdnum];
541 p->recv_skb[p->rmdnum] = skb;
542 rmdp->u.buffer = (unsigned long) skb_put(skb1,len);
543 }
544 rmdp->u.s.status = RCV_OWN;
545 rmdp->mlen = 0;
546 skb1->dev = dev;
547 p->stats.rx_packets++;
548 skb1->protocol=eth_type_trans(skb1,dev);
549 netif_rx(skb1);
550 }
551 else
552 {
553 rmdp->u.s.status = RCV_OWN;
554 printk("%s: can't alloc new sk_buff\n",dev->name);
555 p->stats.rx_dropped++;
556 }
557 }
558 p->rmdnum++; p->rmdnum &= RMDNUM-1;
559 rmdp = p->rmdhead + p->rmdnum;
560 }
561 }
562
563
564
565
566
567 static int ni65_send_packet(struct sk_buff *skb, struct device *dev)
568 {
569 struct priv *p = (struct priv *) dev->priv;
570 struct tmd *tmdp;
571
572 if(dev->tbusy)
573 {
574 int tickssofar = jiffies - dev->trans_start;
575 if (tickssofar < 25)
576 return 1;
577
578 printk("%s: xmitter timed out, try to restart!\n",dev->name);
579 am7990_reinit(dev);
580 dev->tbusy=0;
581 dev->trans_start = jiffies;
582 }
583
584 if(skb == NULL)
585 {
586 dev_tint(dev);
587 return 0;
588 }
589
590 if (skb->len <= 0)
591 return 0;
592
593 if (set_bit(0, (void*)&dev->tbusy) != 0)
594 {
595 printk("%s: Transmitter access conflict.\n", dev->name);
596 return 1;
597 }
598 if(set_bit(0,(void*) &p->lock) != 0)
599 {
600 printk("%s: Queue was locked!\n",dev->name);
601 return 1;
602 }
603
604 {
605 short len = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
606
607 tmdp = p->tmdhead + p->tmdnum;
608
609 #ifdef NO_STATIC
610 tmdp->u.buffer = (unsigned long) (skb->data);
611 p->tmdbufs[p->tmdnum] = skb;
612 #else
613 memcpy((char *) (tmdp->u.buffer & 0x00ffffff),(char *)skb->data,skb->len);
614 dev_kfree_skb (skb, FREE_WRITE);
615 #endif
616 tmdp->blen = -len;
617 tmdp->u.s.status = XMIT_OWN | XMIT_START | XMIT_END;
618
619 cli();
620 p->xmit_queued = 1;
621 writedatareg(CSR0_TDMD | CSR0_INEA);
622 p->tmdnum++; p->tmdnum &= TMDNUM-1;
623
624 if( !((p->tmdhead + p->tmdnum)->u.s.status & XMIT_OWN) )
625 dev->tbusy = 0;
626 p->lock = 0;
627 sti();
628
629 dev->trans_start = jiffies;
630
631 }
632
633 return 0;
634 }
635
636 static struct enet_statistics *ni65_get_stats(struct device *dev)
637 {
638 return &((struct priv *) dev->priv)->stats;
639 }
640
641 static void set_multicast_list(struct device *dev)
642 {
643 }
644
645
646
647
648