This source file includes following definitions.
- ni65_open
- ni65_close
- ni65_probe
- ni65_probe1
- am7990_reinit
- ni65_interrupt
- xmit_intr
- recv_intr
- ni65_send_packet
- ni65_get_stats
- set_multicast_list
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33 #include <linux/kernel.h>
34 #include <linux/sched.h>
35 #include <linux/string.h>
36 #include <linux/ptrace.h>
37 #include <linux/errno.h>
38 #include <linux/ioport.h>
39 #include <linux/malloc.h>
40 #include <linux/interrupt.h>
41 #include <asm/bitops.h>
42 #include <asm/io.h>
43 #include <asm/dma.h>
44
45 #include <linux/netdevice.h>
46 #include <linux/etherdevice.h>
47 #include <linux/skbuff.h>
48
49 #include "ni65.h"
50
51
52
53
54
55 #ifndef HAVE_PORTRESERVE
56 #define check_region(ioaddr, size) 0
57 #define request_region(ioaddr, size,name) do ; while (0)
58 #endif
59
60 #ifndef NET_DEBUG
61 #define NET_DEBUG 2
62 #endif
63
64
65
66
67 #define NI65_TOTAL_SIZE 16
68
69 #define SA_ADDR0 0x02
70 #define SA_ADDR1 0x07
71 #define SA_ADDR2 0x01
72 #define CARD_ID0 0x00
73 #define CARD_ID1 0x55
74
75
76
77 #define PORT dev->base_addr
78
79 #define RMDNUM 8
80 #define RMDNUMMASK 0x6000
81 #define TMDNUM 4
82 #define TMDNUMMASK 0x4000
83
84 #define R_BUF_SIZE 1518
85 #define T_BUF_SIZE 1518
86
87 #define MEMSIZE 8+RMDNUM*8+TMDNUM*8
88
89 #define L_DATAREG 0x00
90 #define L_ADDRREG 0x02
91
92 #define L_RESET 0x04
93 #define L_CONFIG 0x05
94 #define L_EBASE 0x08
95
96
97
98
99
100 #define CSR0 0x00
101 #define CSR1 0x01
102 #define CSR2 0x02
103 #define CSR3 0x03
104
105
106 #undef NO_STATIC
107
108 #define writereg(val,reg) {outw(reg,PORT+L_ADDRREG);inw(PORT+L_ADDRREG); \
109 outw(val,PORT+L_DATAREG);inw(PORT+L_DATAREG);}
110 #define readreg(reg) (outw(reg,PORT+L_ADDRREG),inw(PORT+L_ADDRREG),\
111 inw(PORT+L_DATAREG))
112 #define writedatareg(val) {outw(val,PORT+L_DATAREG);inw(PORT+L_DATAREG);}
113
114 static int ni65_probe1(struct device *dev,int);
115 static void ni65_interrupt(int irq, struct pt_regs *regs);
116 static void recv_intr(struct device *dev);
117 static void xmit_intr(struct device *dev);
118 static int ni65_open(struct device *dev);
119 static int am7990_reinit(struct device *dev);
120 static int ni65_send_packet(struct sk_buff *skb, struct device *dev);
121 static int ni65_close(struct device *dev);
122 static struct enet_statistics *ni65_get_stats(struct device *);
123
124 static void set_multicast_list(struct device *dev, int num_addrs, void *addrs);
125
126 extern void *irq2dev_map[16];
127
128 struct priv
129 {
130 struct init_block ib;
131 void *memptr;
132 struct rmd *rmdhead;
133 struct tmd *tmdhead;
134 int rmdnum;
135 int tmdnum,tmdlast;
136 struct sk_buff *recv_skb[RMDNUM];
137 void *tmdbufs[TMDNUM];
138 int lock,xmit_queued;
139 struct enet_statistics stats;
140 };
141
142 int irqtab[] = { 9,12,15,5 };
143 int dmatab[] = { 0,3,5,6 };
144
145
146
147
148
149 static int ni65_open(struct device *dev)
150 {
151 if(am7990_reinit(dev))
152 {
153 dev->tbusy = 0;
154 dev->interrupt = 0;
155 dev->start = 1;
156 return 0;
157 }
158 else
159 {
160 dev->start = 0;
161 return -EAGAIN;
162 }
163 }
164
165 static int ni65_close(struct device *dev)
166 {
167 outw(0,PORT+L_RESET);
168 dev->tbusy = 1;
169 dev->start = 0;
170 return 0;
171 }
172
173
174
175
176
177
178 int ni65_probe(struct device *dev)
179 {
180 int *port, ports[] = {0x300,0x320,0x340,0x360, 0};
181 int base_addr = dev->base_addr;
182
183 if (base_addr > 0x1ff)
184 return ni65_probe1(dev, base_addr);
185 else if (base_addr > 0)
186 return ENXIO;
187
188 for (port = ports; *port; port++)
189 {
190 int ioaddr = *port;
191 if (check_region(ioaddr, NI65_TOTAL_SIZE))
192 continue;
193 if( !(inb(ioaddr+L_EBASE+6) == CARD_ID0) ||
194 !(inb(ioaddr+L_EBASE+7) == CARD_ID1) )
195 continue;
196 dev->base_addr = ioaddr;
197 if (ni65_probe1(dev, ioaddr) == 0)
198 return 0;
199 }
200
201 dev->base_addr = base_addr;
202 return ENODEV;
203 }
204
205
206 static int ni65_probe1(struct device *dev,int ioaddr)
207 {
208 int i;
209 unsigned char station_addr[6];
210 struct priv *p;
211
212 for(i=0;i<6;i++)
213 station_addr[i] = dev->dev_addr[i] = inb(PORT+L_EBASE+i);
214
215 if(station_addr[0] != SA_ADDR0 || station_addr[1] != SA_ADDR1)
216 {
217 printk("%s: wrong Hardaddress \n",dev->name);
218 return ENODEV;
219 }
220
221 if(dev->irq == 0)
222 dev->irq = irqtab[(inw(PORT+L_CONFIG)>>2)&3];
223 if(dev->dma == 0)
224 dev->dma = dmatab[inw(PORT+L_CONFIG)&3];
225
226 printk("%s: %s found at %#3x, IRQ %d DMA %d.\n", dev->name,
227 "network card", dev->base_addr, dev->irq,dev->dma);
228
229 {
230 int irqval = request_irq(dev->irq, &ni65_interrupt,0,"ni65");
231 if (irqval) {
232 printk ("%s: unable to get IRQ %d (irqval=%d).\n",
233 dev->name,dev->irq, irqval);
234 return EAGAIN;
235 }
236 if(request_dma(dev->dma, "ni65") != 0)
237 {
238 printk("%s: Can't request dma-channel %d\n",dev->name,(int) dev->dma);
239 free_irq(dev->irq);
240 return EAGAIN;
241 }
242 }
243 irq2dev_map[dev->irq] = dev;
244
245
246 request_region(ioaddr,NI65_TOTAL_SIZE,"ni65");
247
248 p = dev->priv = (void *) kmalloc(sizeof(struct priv),GFP_KERNEL);
249 memset((char *) dev->priv,0,sizeof(struct priv));
250
251 dev->open = ni65_open;
252 dev->stop = ni65_close;
253 dev->hard_start_xmit = ni65_send_packet;
254 dev->get_stats = ni65_get_stats;
255 dev->set_multicast_list = set_multicast_list;
256
257 ether_setup(dev);
258
259 dev->interrupt = 0;
260 dev->tbusy = 0;
261 dev->start = 0;
262
263 if( (p->memptr = kmalloc(MEMSIZE,GFP_KERNEL)) == NULL) {
264 printk("%s: Can't alloc TMD/RMD-buffer.\n",dev->name);
265 return EAGAIN;
266 }
267 if( (unsigned long) (p->memptr + MEMSIZE) & 0xff000000) {
268 printk("%s: Can't alloc TMD/RMD buffer in lower 16MB!\n",dev->name);
269 return EAGAIN;
270 }
271 p->tmdhead = (struct tmd *) ((( (unsigned long)p->memptr ) + 8) & 0xfffffff8);
272 p->rmdhead = (struct rmd *) (p->tmdhead + TMDNUM);
273
274 #ifndef NO_STATIC
275 for(i=0;i<TMDNUM;i++)
276 {
277 if( (p->tmdbufs[i] = kmalloc(T_BUF_SIZE,GFP_ATOMIC)) == NULL) {
278 printk("%s: Can't alloc Xmit-Mem.\n",dev->name);
279 return EAGAIN;
280 }
281 if( (unsigned long) (p->tmdbufs[i]+T_BUF_SIZE) & 0xff000000) {
282 printk("%s: Can't alloc Xmit-Mem in lower 16MB!\n",dev->name);
283 return EAGAIN;
284 }
285 }
286 #endif
287
288 for(i=0;i<RMDNUM;i++)
289 {
290 if( (p->recv_skb[i] = dev_alloc_skb(R_BUF_SIZE)) == NULL) {
291 printk("%s: unable to alloc recv-mem\n",dev->name);
292 return EAGAIN;
293 }
294 if( (unsigned long) (p->recv_skb[i]->data + R_BUF_SIZE) & 0xff000000) {
295 printk("%s: unable to alloc receive-memory in lower 16MB!\n",dev->name);
296 return EAGAIN;
297 }
298 }
299
300 return 0;
301 }
302
303
304
305
306
307 static int am7990_reinit(struct device *dev)
308 {
309 int i,j;
310 struct tmd *tmdp;
311 struct rmd *rmdp;
312 struct priv *p = (struct priv *) dev->priv;
313
314 p->lock = 0;
315 p->xmit_queued = 0;
316
317 disable_dma(dev->dma);
318 set_dma_mode(dev->dma,DMA_MODE_CASCADE);
319 enable_dma(dev->dma);
320
321 outw(0,PORT+L_RESET);
322 if(inw(PORT+L_DATAREG) != 0x4)
323 {
324 printk("%s: can't RESET ni6510 card: %04x\n",dev->name,(int) inw(PORT+L_DATAREG));
325 disable_dma(dev->dma);
326 free_dma(dev->dma);
327 free_irq(dev->irq);
328 return 0;
329 }
330
331
332
333 memset(p->memptr,0,MEMSIZE);
334
335 p->tmdnum = 0; p->tmdlast = 0;
336 for(i=0;i<TMDNUM;i++)
337 {
338 tmdp = p->tmdhead + i;
339 #ifndef NO_STATIC
340 tmdp->u.buffer = (unsigned long) p->tmdbufs[i];
341 #endif
342 tmdp->u.s.status = XMIT_START | XMIT_END;
343 }
344
345 p->rmdnum = 0;
346 for(i=0;i<RMDNUM;i++)
347 {
348 rmdp = p->rmdhead + i;
349 rmdp->u.buffer = (unsigned long) p->recv_skb[i]->data;
350 rmdp->u.s.status = RCV_OWN;
351 rmdp->blen = -R_BUF_SIZE;
352 rmdp->mlen = 0;
353 }
354
355 for(i=0;i<6;i++)
356 {
357 p->ib.eaddr[i] = dev->dev_addr[i];
358 }
359 p->ib.mode = 0;
360 for(i=0;i<8;i++)
361 p->ib.filter[i] = 0;
362 p->ib.trplow = (unsigned short) (( (unsigned long) p->tmdhead ) & 0xffff);
363 p->ib.trphigh = (unsigned short) ((( (unsigned long) p->tmdhead )>>16) & 0x00ff) | TMDNUMMASK;
364 p->ib.rrplow = (unsigned short) (( (unsigned long) p->rmdhead ) & 0xffff);
365 p->ib.rrphigh = (unsigned short) ((( (unsigned long) p->rmdhead )>>16) & 0x00ff) | RMDNUMMASK;
366
367 writereg(0,CSR3);
368 writereg((unsigned short) (((unsigned long) &(p->ib)) & 0xffff),CSR1);
369 writereg((unsigned short) (((unsigned long) &(p->ib))>>16),CSR2);
370
371 writereg(CSR0_INIT,CSR0);
372
373
374
375
376
377 for(i=0;i<5;i++)
378 {
379 for(j=0;j<2000000;j++);
380 if(inw(PORT+L_DATAREG) & CSR0_IDON) break;
381 }
382 if(i == 5)
383 {
384 printk("%s: can't init am7990, status: %04x\n",dev->name,(int) inw(PORT+L_DATAREG));
385 disable_dma(dev->dma);
386 free_dma(dev->dma);
387 free_irq(dev->irq);
388 return 0;
389 }
390
391 writedatareg(CSR0_CLRALL | CSR0_INEA | CSR0_STRT);
392
393 return 1;
394 }
395
396
397
398
399
400 static void ni65_interrupt(int irq, struct pt_regs * regs)
401 {
402 int csr0;
403 struct device *dev = (struct device *) irq2dev_map[irq];
404
405 if (dev == NULL) {
406 printk ("net_interrupt(): irq %d for unknown device.\n", irq);
407 return;
408 }
409
410 csr0 = inw(PORT+L_DATAREG);
411 writedatareg(csr0 & CSR0_CLRALL);
412
413 dev->interrupt = 1;
414
415 if(csr0 & CSR0_ERR)
416 {
417 struct priv *p = (struct priv *) dev->priv;
418
419 if(csr0 & CSR0_BABL)
420 p->stats.tx_errors++;
421 if(csr0 & CSR0_MISS)
422 p->stats.rx_errors++;
423 }
424
425 if(csr0 & CSR0_RINT)
426 {
427 recv_intr(dev);
428 }
429 if(csr0 & CSR0_TINT)
430 {
431 xmit_intr(dev);
432 }
433
434 writedatareg(CSR0_INEA);
435 dev->interrupt = 0;
436
437 return;
438 }
439
440
441
442
443
444
445 static void xmit_intr(struct device *dev)
446 {
447 int tmdstat;
448 struct tmd *tmdp;
449 struct priv *p = (struct priv *) dev->priv;
450
451 #ifdef NO_STATIC
452 struct sk_buff *skb;
453 #endif
454
455 while(p->xmit_queued)
456 {
457 tmdp = p->tmdhead + p->tmdlast;
458 tmdstat = tmdp->u.s.status;
459 if(tmdstat & XMIT_OWN)
460 break;
461 #ifdef NO_STATIC
462 skb = (struct sk_buff *) p->tmdbufs[p->tmdlast];
463 dev_kfree_skb(skb,FREE_WRITE);
464 #endif
465
466 if(tmdstat & XMIT_ERR)
467 {
468 printk("%s: xmit-error: %04x %04x\n",dev->name,(int) tmdstat,(int) tmdp->status2);
469 if(tmdp->status2 & XMIT_TDRMASK)
470 printk("%s: tdr-problems (e.g. no resistor)\n",dev->name);
471
472
473 if(tmdp->status2 & XMIT_RTRY)
474 p->stats.tx_aborted_errors++;
475 if(tmdp->status2 & XMIT_LCAR)
476 p->stats.tx_carrier_errors++;
477 p->stats.tx_errors++;
478 tmdp->status2 = 0;
479 }
480 else
481 p->stats.tx_packets++;
482
483 p->tmdlast = (p->tmdlast + 1) & (TMDNUM-1);
484 if(p->tmdlast == p->tmdnum)
485 p->xmit_queued = 0;
486 }
487
488 dev->tbusy = 0;
489 mark_bh(NET_BH);
490 }
491
492
493
494
495
496 static void recv_intr(struct device *dev)
497 {
498 struct rmd *rmdp;
499 int rmdstat,len;
500 struct sk_buff *skb,*skb1;
501 struct priv *p = (struct priv *) dev->priv;
502
503 rmdp = p->rmdhead + p->rmdnum;
504 while(!( (rmdstat = rmdp->u.s.status) & RCV_OWN))
505 {
506 if( (rmdstat & (RCV_START | RCV_END)) != (RCV_START | RCV_END) )
507 {
508 if(rmdstat & RCV_START)
509 {
510 p->stats.rx_errors++;
511 p->stats.rx_length_errors++;
512 printk("%s: packet too long\n",dev->name);
513 }
514 rmdp->u.s.status = RCV_OWN;
515 }
516 else if(rmdstat & RCV_ERR)
517 {
518 printk("%s: receive-error: %04x\n",dev->name,(int) rmdstat );
519 p->stats.rx_errors++;
520 if(rmdstat & RCV_FRAM) p->stats.rx_frame_errors++;
521 if(rmdstat & RCV_OFLO) p->stats.rx_over_errors++;
522 if(rmdstat & RCV_CRC) p->stats.rx_crc_errors++;
523 rmdp->u.s.status = RCV_OWN;
524 printk("%s: lance-status: %04x\n",dev->name,(int) inw(PORT+L_DATAREG));
525 }
526 else
527 {
528 len = (rmdp->mlen & 0x0fff) - 4;
529 skb = dev_alloc_skb(R_BUF_SIZE);
530 if(skb != NULL)
531 {
532 if( (unsigned long) (skb->data + R_BUF_SIZE) & 0xff000000) {
533 memcpy(skb_put(skb,len),p->recv_skb[p->rmdnum]->data,len);
534 skb1 = skb;
535 }
536 else {
537 skb1 = p->recv_skb[p->rmdnum];
538 p->recv_skb[p->rmdnum] = skb;
539 rmdp->u.buffer = (unsigned long) skb_put(skb1,len);
540 }
541 rmdp->u.s.status = RCV_OWN;
542 rmdp->mlen = 0;
543 skb1->dev = dev;
544 p->stats.rx_packets++;
545 skb1->protocol=eth_type_trans(skb1,dev);
546 netif_rx(skb1);
547 }
548 else
549 {
550 rmdp->u.s.status = RCV_OWN;
551 printk("%s: can't alloc new sk_buff\n",dev->name);
552 p->stats.rx_dropped++;
553 }
554 }
555 p->rmdnum++; p->rmdnum &= RMDNUM-1;
556 rmdp = p->rmdhead + p->rmdnum;
557 }
558 }
559
560
561
562
563
564 static int ni65_send_packet(struct sk_buff *skb, struct device *dev)
565 {
566 struct priv *p = (struct priv *) dev->priv;
567 struct tmd *tmdp;
568
569 if(dev->tbusy)
570 {
571 int tickssofar = jiffies - dev->trans_start;
572 if (tickssofar < 25)
573 return 1;
574
575 printk("%s: xmitter timed out, try to restart!\n",dev->name);
576 am7990_reinit(dev);
577 dev->tbusy=0;
578 dev->trans_start = jiffies;
579 }
580
581 if(skb == NULL)
582 {
583 dev_tint(dev);
584 return 0;
585 }
586
587 if (skb->len <= 0)
588 return 0;
589
590 if (set_bit(0, (void*)&dev->tbusy) != 0)
591 {
592 printk("%s: Transmitter access conflict.\n", dev->name);
593 return 1;
594 }
595 if(set_bit(0,(void*) &p->lock) != 0)
596 {
597 printk("%s: Queue was locked!\n",dev->name);
598 return 1;
599 }
600
601 {
602 short len = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
603
604 tmdp = p->tmdhead + p->tmdnum;
605
606 #ifdef NO_STATIC
607 tmdp->u.buffer = (unsigned long) (skb->data);
608 p->tmdbufs[p->tmdnum] = skb;
609 #else
610 memcpy((char *) (tmdp->u.buffer & 0x00ffffff),(char *)skb->data,skb->len);
611 dev_kfree_skb (skb, FREE_WRITE);
612 #endif
613 tmdp->blen = -len;
614 tmdp->u.s.status = XMIT_OWN | XMIT_START | XMIT_END;
615
616 cli();
617 p->xmit_queued = 1;
618 writedatareg(CSR0_TDMD | CSR0_INEA);
619 p->tmdnum++; p->tmdnum &= TMDNUM-1;
620
621 if( !((p->tmdhead + p->tmdnum)->u.s.status & XMIT_OWN) )
622 dev->tbusy = 0;
623 p->lock = 0;
624 sti();
625
626 dev->trans_start = jiffies;
627
628 }
629
630 return 0;
631 }
632
633 static struct enet_statistics *ni65_get_stats(struct device *dev)
634 {
635 return &((struct priv *) dev->priv)->stats;
636 }
637
638 static void set_multicast_list(struct device *dev, int num_addrs, void *addrs)
639 {
640 }
641
642
643
644
645