This source file includes following definitions.
- ni65_open
- ni65_close
- ni65_probe
- ni65_probe1
- am7990_reinit
- ni65_interrupt
- xmit_intr
- recv_intr
- ni65_send_packet
- ni65_get_stats
- set_multicast_list
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33 #include <linux/kernel.h>
34 #include <linux/sched.h>
35 #include <linux/string.h>
36 #include <linux/ptrace.h>
37 #include <linux/errno.h>
38 #include <linux/ioport.h>
39 #include <linux/malloc.h>
40 #include <linux/interrupt.h>
41 #include <asm/bitops.h>
42 #include <asm/io.h>
43 #include <asm/dma.h>
44
45 #include <linux/netdevice.h>
46 #include <linux/etherdevice.h>
47 #include <linux/skbuff.h>
48
49 #include "ni65.h"
50
51
52
53
54
55 #ifndef HAVE_PORTRESERVE
56 #define check_region(ioaddr, size) 0
57 #define request_region(ioaddr, size,name) do ; while (0)
58 #endif
59
60 #ifndef NET_DEBUG
61 #define NET_DEBUG 2
62 #endif
63
64
65
66
67 #define NI65_TOTAL_SIZE 16
68
69 #define SA_ADDR0 0x02
70 #define SA_ADDR1 0x07
71 #define SA_ADDR2 0x01
72 #define CARD_ID0 0x00
73 #define CARD_ID1 0x55
74
75
76
77 #define PORT dev->base_addr
78
79 #define RMDNUM 8
80 #define RMDNUMMASK 0x6000
81 #define TMDNUM 4
82 #define TMDNUMMASK 0x4000
83
84 #define R_BUF_SIZE 1518
85 #define T_BUF_SIZE 1518
86
87 #define MEMSIZE 8+RMDNUM*8+TMDNUM*8
88
89 #define L_DATAREG 0x00
90 #define L_ADDRREG 0x02
91
92 #define L_RESET 0x04
93 #define L_CONFIG 0x05
94 #define L_EBASE 0x08
95
96
97
98
99
100 #define CSR0 0x00
101 #define CSR1 0x01
102 #define CSR2 0x02
103 #define CSR3 0x03
104
105
106 #undef NO_STATIC
107
108 #define writereg(val,reg) {outw(reg,PORT+L_ADDRREG);inw(PORT+L_ADDRREG); \
109 outw(val,PORT+L_DATAREG);inw(PORT+L_DATAREG);}
110 #define readreg(reg) (outw(reg,PORT+L_ADDRREG),inw(PORT+L_ADDRREG),\
111 inw(PORT+L_DATAREG))
112 #define writedatareg(val) {outw(val,PORT+L_DATAREG);inw(PORT+L_DATAREG);}
113
114 static int ni65_probe1(struct device *dev,int);
115 static void ni65_interrupt(int irq, struct pt_regs *regs);
116 static void recv_intr(struct device *dev);
117 static void xmit_intr(struct device *dev);
118 static int ni65_open(struct device *dev);
119 static int am7990_reinit(struct device *dev);
120 static int ni65_send_packet(struct sk_buff *skb, struct device *dev);
121 static int ni65_close(struct device *dev);
122 static struct enet_statistics *ni65_get_stats(struct device *);
123
124 static void set_multicast_list(struct device *dev, int num_addrs, void *addrs);
125
126 struct priv
127 {
128 struct init_block ib;
129 void *memptr;
130 struct rmd *rmdhead;
131 struct tmd *tmdhead;
132 int rmdnum;
133 int tmdnum,tmdlast;
134 struct sk_buff *recv_skb[RMDNUM];
135 void *tmdbufs[TMDNUM];
136 int lock,xmit_queued;
137 struct enet_statistics stats;
138 };
139
140 int irqtab[] = { 9,12,15,5 };
141 int dmatab[] = { 0,3,5,6 };
142
143
144
145
146
147 static int ni65_open(struct device *dev)
148 {
149 if(am7990_reinit(dev))
150 {
151 dev->tbusy = 0;
152 dev->interrupt = 0;
153 dev->start = 1;
154 return 0;
155 }
156 else
157 {
158 dev->start = 0;
159 return -EAGAIN;
160 }
161 }
162
163 static int ni65_close(struct device *dev)
164 {
165 outw(0,PORT+L_RESET);
166 dev->tbusy = 1;
167 dev->start = 0;
168 return 0;
169 }
170
171
172
173
174
175
176 int ni65_probe(struct device *dev)
177 {
178 int *port, ports[] = {0x300,0x320,0x340,0x360, 0};
179 int base_addr = dev->base_addr;
180
181 if (base_addr > 0x1ff)
182 return ni65_probe1(dev, base_addr);
183 else if (base_addr > 0)
184 return ENXIO;
185
186 for (port = ports; *port; port++)
187 {
188 int ioaddr = *port;
189 if (check_region(ioaddr, NI65_TOTAL_SIZE))
190 continue;
191 if( !(inb(ioaddr+L_EBASE+6) == CARD_ID0) ||
192 !(inb(ioaddr+L_EBASE+7) == CARD_ID1) )
193 continue;
194 dev->base_addr = ioaddr;
195 if (ni65_probe1(dev, ioaddr) == 0)
196 return 0;
197 }
198
199 dev->base_addr = base_addr;
200 return ENODEV;
201 }
202
203
204 static int ni65_probe1(struct device *dev,int ioaddr)
205 {
206 int i;
207 unsigned char station_addr[6];
208 struct priv *p;
209
210 for(i=0;i<6;i++)
211 station_addr[i] = dev->dev_addr[i] = inb(PORT+L_EBASE+i);
212
213 if(station_addr[0] != SA_ADDR0 || station_addr[1] != SA_ADDR1)
214 {
215 printk("%s: wrong Hardaddress \n",dev->name);
216 return ENODEV;
217 }
218
219 if(dev->irq == 0)
220 dev->irq = irqtab[(inw(PORT+L_CONFIG)>>2)&3];
221 if(dev->dma == 0)
222 dev->dma = dmatab[inw(PORT+L_CONFIG)&3];
223
224 printk("%s: %s found at %#3lx, IRQ %d DMA %d.\n", dev->name,
225 "network card", dev->base_addr, dev->irq,dev->dma);
226
227 {
228 int irqval = request_irq(dev->irq, &ni65_interrupt,0,"ni65");
229 if (irqval) {
230 printk ("%s: unable to get IRQ %d (irqval=%d).\n",
231 dev->name,dev->irq, irqval);
232 return EAGAIN;
233 }
234 if(request_dma(dev->dma, "ni65") != 0)
235 {
236 printk("%s: Can't request dma-channel %d\n",dev->name,(int) dev->dma);
237 free_irq(dev->irq);
238 return EAGAIN;
239 }
240 }
241 irq2dev_map[dev->irq] = dev;
242
243
244 request_region(ioaddr,NI65_TOTAL_SIZE,"ni65");
245
246 p = dev->priv = (void *) kmalloc(sizeof(struct priv),GFP_KERNEL);
247 memset((char *) dev->priv,0,sizeof(struct priv));
248
249 dev->open = ni65_open;
250 dev->stop = ni65_close;
251 dev->hard_start_xmit = ni65_send_packet;
252 dev->get_stats = ni65_get_stats;
253 dev->set_multicast_list = set_multicast_list;
254
255 ether_setup(dev);
256
257 dev->interrupt = 0;
258 dev->tbusy = 0;
259 dev->start = 0;
260
261 if( (p->memptr = kmalloc(MEMSIZE,GFP_KERNEL)) == NULL) {
262 printk("%s: Can't alloc TMD/RMD-buffer.\n",dev->name);
263 return EAGAIN;
264 }
265 if( (unsigned long) (p->memptr + MEMSIZE) & 0xff000000) {
266 printk("%s: Can't alloc TMD/RMD buffer in lower 16MB!\n",dev->name);
267 return EAGAIN;
268 }
269 p->tmdhead = (struct tmd *) ((( (unsigned long)p->memptr ) + 8) & 0xfffffff8);
270 p->rmdhead = (struct rmd *) (p->tmdhead + TMDNUM);
271
272 #ifndef NO_STATIC
273 for(i=0;i<TMDNUM;i++)
274 {
275 if( (p->tmdbufs[i] = kmalloc(T_BUF_SIZE,GFP_ATOMIC)) == NULL) {
276 printk("%s: Can't alloc Xmit-Mem.\n",dev->name);
277 return EAGAIN;
278 }
279 if( (unsigned long) (p->tmdbufs[i]+T_BUF_SIZE) & 0xff000000) {
280 printk("%s: Can't alloc Xmit-Mem in lower 16MB!\n",dev->name);
281 return EAGAIN;
282 }
283 }
284 #endif
285
286 for(i=0;i<RMDNUM;i++)
287 {
288 if( (p->recv_skb[i] = dev_alloc_skb(R_BUF_SIZE)) == NULL) {
289 printk("%s: unable to alloc recv-mem\n",dev->name);
290 return EAGAIN;
291 }
292 if( (unsigned long) (p->recv_skb[i]->data + R_BUF_SIZE) & 0xff000000) {
293 printk("%s: unable to alloc receive-memory in lower 16MB!\n",dev->name);
294 return EAGAIN;
295 }
296 }
297
298 return 0;
299 }
300
301
302
303
304
305 static int am7990_reinit(struct device *dev)
306 {
307 int i,j;
308 struct tmd *tmdp;
309 struct rmd *rmdp;
310 struct priv *p = (struct priv *) dev->priv;
311
312 p->lock = 0;
313 p->xmit_queued = 0;
314
315 disable_dma(dev->dma);
316 set_dma_mode(dev->dma,DMA_MODE_CASCADE);
317 enable_dma(dev->dma);
318
319 outw(0,PORT+L_RESET);
320 if(inw(PORT+L_DATAREG) != 0x4)
321 {
322 printk("%s: can't RESET ni6510 card: %04x\n",dev->name,(int) inw(PORT+L_DATAREG));
323 disable_dma(dev->dma);
324 free_dma(dev->dma);
325 free_irq(dev->irq);
326 return 0;
327 }
328
329
330
331 memset(p->memptr,0,MEMSIZE);
332
333 p->tmdnum = 0; p->tmdlast = 0;
334 for(i=0;i<TMDNUM;i++)
335 {
336 tmdp = p->tmdhead + i;
337 #ifndef NO_STATIC
338 tmdp->u.buffer = (unsigned long) p->tmdbufs[i];
339 #endif
340 tmdp->u.s.status = XMIT_START | XMIT_END;
341 }
342
343 p->rmdnum = 0;
344 for(i=0;i<RMDNUM;i++)
345 {
346 rmdp = p->rmdhead + i;
347 rmdp->u.buffer = (unsigned long) p->recv_skb[i]->data;
348 rmdp->u.s.status = RCV_OWN;
349 rmdp->blen = -R_BUF_SIZE;
350 rmdp->mlen = 0;
351 }
352
353 for(i=0;i<6;i++)
354 {
355 p->ib.eaddr[i] = dev->dev_addr[i];
356 }
357 p->ib.mode = 0;
358 for(i=0;i<8;i++)
359 p->ib.filter[i] = 0;
360 p->ib.trplow = (unsigned short) (( (unsigned long) p->tmdhead ) & 0xffff);
361 p->ib.trphigh = (unsigned short) ((( (unsigned long) p->tmdhead )>>16) & 0x00ff) | TMDNUMMASK;
362 p->ib.rrplow = (unsigned short) (( (unsigned long) p->rmdhead ) & 0xffff);
363 p->ib.rrphigh = (unsigned short) ((( (unsigned long) p->rmdhead )>>16) & 0x00ff) | RMDNUMMASK;
364
365 writereg(0,CSR3);
366 writereg((unsigned short) (((unsigned long) &(p->ib)) & 0xffff),CSR1);
367 writereg((unsigned short) (((unsigned long) &(p->ib))>>16),CSR2);
368
369 writereg(CSR0_INIT,CSR0);
370
371
372
373
374
375 for(i=0;i<5;i++)
376 {
377 for(j=0;j<2000000;j++);
378 if(inw(PORT+L_DATAREG) & CSR0_IDON) break;
379 }
380 if(i == 5)
381 {
382 printk("%s: can't init am7990, status: %04x\n",dev->name,(int) inw(PORT+L_DATAREG));
383 disable_dma(dev->dma);
384 free_dma(dev->dma);
385 free_irq(dev->irq);
386 return 0;
387 }
388
389 writedatareg(CSR0_CLRALL | CSR0_INEA | CSR0_STRT);
390
391 return 1;
392 }
393
394
395
396
397
398 static void ni65_interrupt(int irq, struct pt_regs * regs)
399 {
400 int csr0;
401 struct device *dev = (struct device *) irq2dev_map[irq];
402
403 if (dev == NULL) {
404 printk ("net_interrupt(): irq %d for unknown device.\n", irq);
405 return;
406 }
407
408 csr0 = inw(PORT+L_DATAREG);
409 writedatareg(csr0 & CSR0_CLRALL);
410
411 dev->interrupt = 1;
412
413 if(csr0 & CSR0_ERR)
414 {
415 struct priv *p = (struct priv *) dev->priv;
416
417 if(csr0 & CSR0_BABL)
418 p->stats.tx_errors++;
419 if(csr0 & CSR0_MISS)
420 p->stats.rx_errors++;
421 }
422
423 if(csr0 & CSR0_RINT)
424 {
425 recv_intr(dev);
426 }
427 if(csr0 & CSR0_TINT)
428 {
429 xmit_intr(dev);
430 }
431
432 writedatareg(CSR0_INEA);
433 dev->interrupt = 0;
434
435 return;
436 }
437
438
439
440
441
442
443 static void xmit_intr(struct device *dev)
444 {
445 int tmdstat;
446 struct tmd *tmdp;
447 struct priv *p = (struct priv *) dev->priv;
448
449 #ifdef NO_STATIC
450 struct sk_buff *skb;
451 #endif
452
453 while(p->xmit_queued)
454 {
455 tmdp = p->tmdhead + p->tmdlast;
456 tmdstat = tmdp->u.s.status;
457 if(tmdstat & XMIT_OWN)
458 break;
459 #ifdef NO_STATIC
460 skb = (struct sk_buff *) p->tmdbufs[p->tmdlast];
461 dev_kfree_skb(skb,FREE_WRITE);
462 #endif
463
464 if(tmdstat & XMIT_ERR)
465 {
466 printk("%s: xmit-error: %04x %04x\n",dev->name,(int) tmdstat,(int) tmdp->status2);
467 if(tmdp->status2 & XMIT_TDRMASK)
468 printk("%s: tdr-problems (e.g. no resistor)\n",dev->name);
469
470
471 if(tmdp->status2 & XMIT_RTRY)
472 p->stats.tx_aborted_errors++;
473 if(tmdp->status2 & XMIT_LCAR)
474 p->stats.tx_carrier_errors++;
475 p->stats.tx_errors++;
476 tmdp->status2 = 0;
477 }
478 else
479 p->stats.tx_packets++;
480
481 p->tmdlast = (p->tmdlast + 1) & (TMDNUM-1);
482 if(p->tmdlast == p->tmdnum)
483 p->xmit_queued = 0;
484 }
485
486 dev->tbusy = 0;
487 mark_bh(NET_BH);
488 }
489
490
491
492
493
494 static void recv_intr(struct device *dev)
495 {
496 struct rmd *rmdp;
497 int rmdstat,len;
498 struct sk_buff *skb,*skb1;
499 struct priv *p = (struct priv *) dev->priv;
500
501 rmdp = p->rmdhead + p->rmdnum;
502 while(!( (rmdstat = rmdp->u.s.status) & RCV_OWN))
503 {
504 if( (rmdstat & (RCV_START | RCV_END)) != (RCV_START | RCV_END) )
505 {
506 if(rmdstat & RCV_START)
507 {
508 p->stats.rx_errors++;
509 p->stats.rx_length_errors++;
510 printk("%s: packet too long\n",dev->name);
511 }
512 rmdp->u.s.status = RCV_OWN;
513 }
514 else if(rmdstat & RCV_ERR)
515 {
516 printk("%s: receive-error: %04x\n",dev->name,(int) rmdstat );
517 p->stats.rx_errors++;
518 if(rmdstat & RCV_FRAM) p->stats.rx_frame_errors++;
519 if(rmdstat & RCV_OFLO) p->stats.rx_over_errors++;
520 if(rmdstat & RCV_CRC) p->stats.rx_crc_errors++;
521 rmdp->u.s.status = RCV_OWN;
522 printk("%s: lance-status: %04x\n",dev->name,(int) inw(PORT+L_DATAREG));
523 }
524 else
525 {
526 len = (rmdp->mlen & 0x0fff) - 4;
527 skb = dev_alloc_skb(R_BUF_SIZE);
528 if(skb != NULL)
529 {
530 if( (unsigned long) (skb->data + R_BUF_SIZE) & 0xff000000) {
531 memcpy(skb_put(skb,len),p->recv_skb[p->rmdnum]->data,len);
532 skb1 = skb;
533 }
534 else {
535 skb1 = p->recv_skb[p->rmdnum];
536 p->recv_skb[p->rmdnum] = skb;
537 rmdp->u.buffer = (unsigned long) skb_put(skb1,len);
538 }
539 rmdp->u.s.status = RCV_OWN;
540 rmdp->mlen = 0;
541 skb1->dev = dev;
542 p->stats.rx_packets++;
543 skb1->protocol=eth_type_trans(skb1,dev);
544 netif_rx(skb1);
545 }
546 else
547 {
548 rmdp->u.s.status = RCV_OWN;
549 printk("%s: can't alloc new sk_buff\n",dev->name);
550 p->stats.rx_dropped++;
551 }
552 }
553 p->rmdnum++; p->rmdnum &= RMDNUM-1;
554 rmdp = p->rmdhead + p->rmdnum;
555 }
556 }
557
558
559
560
561
562 static int ni65_send_packet(struct sk_buff *skb, struct device *dev)
563 {
564 struct priv *p = (struct priv *) dev->priv;
565 struct tmd *tmdp;
566
567 if(dev->tbusy)
568 {
569 int tickssofar = jiffies - dev->trans_start;
570 if (tickssofar < 25)
571 return 1;
572
573 printk("%s: xmitter timed out, try to restart!\n",dev->name);
574 am7990_reinit(dev);
575 dev->tbusy=0;
576 dev->trans_start = jiffies;
577 }
578
579 if(skb == NULL)
580 {
581 dev_tint(dev);
582 return 0;
583 }
584
585 if (skb->len <= 0)
586 return 0;
587
588 if (set_bit(0, (void*)&dev->tbusy) != 0)
589 {
590 printk("%s: Transmitter access conflict.\n", dev->name);
591 return 1;
592 }
593 if(set_bit(0,(void*) &p->lock) != 0)
594 {
595 printk("%s: Queue was locked!\n",dev->name);
596 return 1;
597 }
598
599 {
600 short len = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
601
602 tmdp = p->tmdhead + p->tmdnum;
603
604 #ifdef NO_STATIC
605 tmdp->u.buffer = (unsigned long) (skb->data);
606 p->tmdbufs[p->tmdnum] = skb;
607 #else
608 memcpy((char *) (tmdp->u.buffer & 0x00ffffff),(char *)skb->data,skb->len);
609 dev_kfree_skb (skb, FREE_WRITE);
610 #endif
611 tmdp->blen = -len;
612 tmdp->u.s.status = XMIT_OWN | XMIT_START | XMIT_END;
613
614 cli();
615 p->xmit_queued = 1;
616 writedatareg(CSR0_TDMD | CSR0_INEA);
617 p->tmdnum++; p->tmdnum &= TMDNUM-1;
618
619 if( !((p->tmdhead + p->tmdnum)->u.s.status & XMIT_OWN) )
620 dev->tbusy = 0;
621 p->lock = 0;
622 sti();
623
624 dev->trans_start = jiffies;
625
626 }
627
628 return 0;
629 }
630
631 static struct enet_statistics *ni65_get_stats(struct device *dev)
632 {
633 return &((struct priv *) dev->priv)->stats;
634 }
635
636 static void set_multicast_list(struct device *dev, int num_addrs, void *addrs)
637 {
638 }
639
640
641
642
643