This source file includes following definitions.
- ni65_open
- ni65_close
- ni65_probe
- ni65_probe1
- am7990_reinit
- ni65_interrupt
- xmit_intr
- recv_intr
- ni65_send_packet
- ni65_get_stats
- set_multicast_list
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33 #include <linux/kernel.h>
34 #include <linux/sched.h>
35 #include <linux/string.h>
36 #include <linux/ptrace.h>
37 #include <linux/errno.h>
38 #include <linux/ioport.h>
39 #include <linux/malloc.h>
40 #include <linux/interrupt.h>
41 #include <asm/bitops.h>
42 #include <asm/io.h>
43 #include <asm/dma.h>
44
45 #include <linux/netdevice.h>
46 #include <linux/etherdevice.h>
47 #include <linux/skbuff.h>
48
49 #include "ni65.h"
50
51
52
53
54
55 #ifndef HAVE_PORTRESERVE
56 #define check_region(ioaddr, size) 0
57 #define register_iomem(ioaddr, size,name); do ; while (0)
58 #endif
59
60 #ifndef NET_DEBUG
61 #define NET_DEBUG 2
62 #endif
63
64
65
66
67 #define NI65_TOTAL_SIZE 16
68
69 #define SA_ADDR0 0x02
70 #define SA_ADDR1 0x07
71 #define SA_ADDR2 0x01
72 #define CARD_ID0 0x00
73 #define CARD_ID1 0x55
74
75
76
77 #define PORT dev->base_addr
78
79 #define RMDNUM 8
80 #define RMDNUMMASK 0x6000
81 #define TMDNUM 4
82 #define TMDNUMMASK 0x4000
83
84 #define R_BUF_SIZE 1518
85 #define T_BUF_SIZE 1518
86
87 #define MEMSIZE 8+RMDNUM*8+TMDNUM*8
88
89 #define L_DATAREG 0x00
90 #define L_ADDRREG 0x02
91
92 #define L_RESET 0x04
93 #define L_CONFIG 0x05
94 #define L_EBASE 0x08
95
96
97
98
99
100 #define CSR0 0x00
101 #define CSR1 0x01
102 #define CSR2 0x02
103 #define CSR3 0x03
104
105
106 #undef NO_STATIC
107
108 #define writereg(val,reg) {outw(reg,PORT+L_ADDRREG);inw(PORT+L_ADDRREG); \
109 outw(val,PORT+L_DATAREG);inw(PORT+L_DATAREG);}
110 #define readreg(reg) (outw(reg,PORT+L_ADDRREG),inw(PORT+L_ADDRREG),\
111 inw(PORT+L_DATAREG))
112 #define writedatareg(val) {outw(val,PORT+L_DATAREG);inw(PORT+L_DATAREG);}
113
114 static int ni65_probe1(struct device *dev,int);
115 static void ni65_interrupt(int reg_ptr);
116 static void recv_intr(struct device *dev);
117 static void xmit_intr(struct device *dev);
118 static int ni65_open(struct device *dev);
119 static int am7990_reinit(struct device *dev);
120 static int ni65_send_packet(struct sk_buff *skb, struct device *dev);
121 static int ni65_close(struct device *dev);
122 static struct enet_statistics *ni65_get_stats(struct device *);
123
124 static void set_multicast_list(struct device *dev, int num_addrs, void *addrs);
125
126 extern void *irq2dev_map[16];
127
128 struct priv
129 {
130 struct init_block ib;
131 void *memptr;
132 struct rmd *rmdhead;
133 struct tmd *tmdhead;
134 int rmdnum;
135 int tmdnum,tmdlast;
136 struct sk_buff *recv_skb[RMDNUM];
137 void *tmdbufs[TMDNUM];
138 int lock,xmit_queued;
139 struct enet_statistics stats;
140 };
141
142 int irqtab[] = { 9,12,15,5 };
143 int dmatab[] = { 0,3,5,6 };
144
145
146
147
148
149 static int ni65_open(struct device *dev)
150 {
151 if(am7990_reinit(dev))
152 {
153 dev->tbusy = 0;
154 dev->interrupt = 0;
155 dev->start = 1;
156 return 0;
157 }
158 else
159 {
160 dev->start = 0;
161 return -EAGAIN;
162 }
163 }
164
165 static int ni65_close(struct device *dev)
166 {
167 outw(0,PORT+L_RESET);
168 dev->tbusy = 1;
169 dev->start = 0;
170 return 0;
171 }
172
173
174
175
176
177
178 int ni65_probe(struct device *dev)
179 {
180 int *port, ports[] = {0x300,0x320,0x340,0x360, 0};
181 int base_addr = dev->base_addr;
182
183 if (base_addr > 0x1ff)
184 return ni65_probe1(dev, base_addr);
185 else if (base_addr > 0)
186 return ENXIO;
187
188 for (port = ports; *port; port++)
189 {
190 int ioaddr = *port;
191 if (check_region(ioaddr, NI65_TOTAL_SIZE))
192 continue;
193 if( !(inb(ioaddr+L_EBASE+6) == CARD_ID0) ||
194 !(inb(ioaddr+L_EBASE+7) == CARD_ID1) )
195 continue;
196 dev->base_addr = ioaddr;
197 if (ni65_probe1(dev, ioaddr) == 0)
198 return 0;
199 }
200
201 dev->base_addr = base_addr;
202 return ENODEV;
203 }
204
205
206 static int ni65_probe1(struct device *dev,int ioaddr)
207 {
208 int i;
209 unsigned char station_addr[6];
210 struct priv *p;
211
212 for(i=0;i<6;i++)
213 station_addr[i] = dev->dev_addr[i] = inb(PORT+L_EBASE+i);
214
215 if(station_addr[0] != SA_ADDR0 || station_addr[1] != SA_ADDR1)
216 {
217 printk("%s: wrong Hardaddress \n",dev->name);
218 return ENODEV;
219 }
220
221 if(dev->irq == 0)
222 dev->irq = irqtab[(inw(PORT+L_CONFIG)>>2)&3];
223 if(dev->dma == 0)
224 dev->dma = dmatab[inw(PORT+L_CONFIG)&3];
225
226 printk("%s: %s found at %#3x, IRQ %d DMA %d.\n", dev->name,
227 "network card", dev->base_addr, dev->irq,dev->dma);
228
229 {
230 int irqval = request_irq(dev->irq, &ni65_interrupt,0,"ni65");
231 if (irqval) {
232 printk ("%s: unable to get IRQ %d (irqval=%d).\n",
233 dev->name,dev->irq, irqval);
234 return EAGAIN;
235 }
236 if(request_dma(dev->dma, "ni65") != 0)
237 {
238 printk("%s: Can't request dma-channel %d\n",dev->name,(int) dev->dma);
239 free_irq(dev->irq);
240 return EAGAIN;
241 }
242 }
243 irq2dev_map[dev->irq] = dev;
244
245
246 register_iomem(ioaddr,NI65_TOTAL_SIZE,"ni65");
247
248 p = dev->priv = (void *) kmalloc(sizeof(struct priv),GFP_KERNEL);
249 memset((char *) dev->priv,0,sizeof(struct priv));
250
251 dev->open = ni65_open;
252 dev->stop = ni65_close;
253 dev->hard_start_xmit = ni65_send_packet;
254 dev->get_stats = ni65_get_stats;
255 dev->set_multicast_list = set_multicast_list;
256
257 ether_setup(dev);
258
259 dev->interrupt = 0;
260 dev->tbusy = 0;
261 dev->start = 0;
262
263 if( (p->memptr = kmalloc(MEMSIZE,GFP_KERNEL)) == NULL) {
264 printk("%s: Can't alloc TMD/RMD-buffer.\n",dev->name);
265 return EAGAIN;
266 }
267 if( (unsigned long) (p->memptr + MEMSIZE) & 0xff000000) {
268 printk("%s: Can't alloc TMD/RMD buffer in lower 16MB!\n",dev->name);
269 return EAGAIN;
270 }
271 p->tmdhead = (struct tmd *) ((( (unsigned long)p->memptr ) + 8) & 0xfffffff8);
272 p->rmdhead = (struct rmd *) (p->tmdhead + TMDNUM);
273
274 #ifndef NO_STATIC
275 for(i=0;i<TMDNUM;i++)
276 {
277 if( (p->tmdbufs[i] = kmalloc(T_BUF_SIZE,GFP_ATOMIC)) == NULL) {
278 printk("%s: Can't alloc Xmit-Mem.\n",dev->name);
279 return EAGAIN;
280 }
281 if( (unsigned long) (p->tmdbufs[i]+T_BUF_SIZE) & 0xff000000) {
282 printk("%s: Can't alloc Xmit-Mem in lower 16MB!\n",dev->name);
283 return EAGAIN;
284 }
285 }
286 #endif
287
288 for(i=0;i<RMDNUM;i++)
289 {
290 if( (p->recv_skb[i] = (struct sk_buff *) alloc_skb(R_BUF_SIZE,GFP_ATOMIC)) == NULL) {
291 printk("%s: unable to alloc recv-mem\n",dev->name);
292 return EAGAIN;
293 }
294 if( (unsigned long) (p->recv_skb[i]->data + R_BUF_SIZE) & 0xff000000) {
295 printk("%s: unable to alloc receive-memory in lower 16MB!\n",dev->name);
296 return EAGAIN;
297 }
298 }
299
300 return 0;
301 }
302
303
304
305
306
307 static int am7990_reinit(struct device *dev)
308 {
309 int i,j;
310 struct tmd *tmdp;
311 struct rmd *rmdp;
312 struct priv *p = (struct priv *) dev->priv;
313
314 p->lock = 0;
315 p->xmit_queued = 0;
316
317 disable_dma(dev->dma);
318 set_dma_mode(dev->dma,DMA_MODE_CASCADE);
319 enable_dma(dev->dma);
320
321 outw(0,PORT+L_RESET);
322 if(inw(PORT+L_DATAREG) != 0x4)
323 {
324 printk("%s: can't RESET ni6510 card: %04x\n",dev->name,(int) inw(PORT+L_DATAREG));
325 disable_dma(dev->dma);
326 free_dma(dev->dma);
327 free_irq(dev->irq);
328 return 0;
329 }
330
331
332
333 memset(p->memptr,0,MEMSIZE);
334
335 p->tmdnum = 0; p->tmdlast = 0;
336 for(i=0;i<TMDNUM;i++)
337 {
338 tmdp = p->tmdhead + i;
339 #ifndef NO_STATIC
340 tmdp->u.buffer = (unsigned long) p->tmdbufs[i];
341 #endif
342 tmdp->u.s.status = XMIT_START | XMIT_END;
343 }
344
345 p->rmdnum = 0;
346 for(i=0;i<RMDNUM;i++)
347 {
348 rmdp = p->rmdhead + i;
349 rmdp->u.buffer = (unsigned long) p->recv_skb[i]->data;
350 rmdp->u.s.status = RCV_OWN;
351 rmdp->blen = -R_BUF_SIZE;
352 rmdp->mlen = 0;
353 }
354
355 for(i=0;i<6;i++)
356 {
357 p->ib.eaddr[i] = dev->dev_addr[i];
358 }
359 p->ib.mode = 0;
360 for(i=0;i<8;i++)
361 p->ib.filter[i] = 0;
362 p->ib.trplow = (unsigned short) (( (unsigned long) p->tmdhead ) & 0xffff);
363 p->ib.trphigh = (unsigned short) ((( (unsigned long) p->tmdhead )>>16) & 0x00ff) | TMDNUMMASK;
364 p->ib.rrplow = (unsigned short) (( (unsigned long) p->rmdhead ) & 0xffff);
365 p->ib.rrphigh = (unsigned short) ((( (unsigned long) p->rmdhead )>>16) & 0x00ff) | RMDNUMMASK;
366
367 writereg(0,CSR3);
368 writereg((unsigned short) (((unsigned long) &(p->ib)) & 0xffff),CSR1);
369 writereg((unsigned short) (((unsigned long) &(p->ib))>>16),CSR2);
370
371 writereg(CSR0_INIT,CSR0);
372
373
374
375
376
377 for(i=0;i<5;i++)
378 {
379 for(j=0;j<2000000;j++);
380 if(inw(PORT+L_DATAREG) & CSR0_IDON) break;
381 }
382 if(i == 5)
383 {
384 printk("%s: can't init am7990, status: %04x\n",dev->name,(int) inw(PORT+L_DATAREG));
385 disable_dma(dev->dma);
386 free_dma(dev->dma);
387 free_irq(dev->irq);
388 return 0;
389 }
390
391 writedatareg(CSR0_CLRALL | CSR0_INEA | CSR0_STRT);
392
393 return 1;
394 }
395
396
397
398
399
400 static void ni65_interrupt(int reg_ptr)
401 {
402 int irq = -(((struct pt_regs *)reg_ptr)->orig_eax+2);
403 int csr0;
404 struct device *dev = (struct device *) irq2dev_map[irq];
405
406 if (dev == NULL) {
407 printk ("net_interrupt(): irq %d for unknown device.\n", irq);
408 return;
409 }
410
411 csr0 = inw(PORT+L_DATAREG);
412 writedatareg(csr0 & CSR0_CLRALL);
413
414 dev->interrupt = 1;
415
416 if(csr0 & CSR0_ERR)
417 {
418 struct priv *p = (struct priv *) dev->priv;
419
420 if(csr0 & CSR0_BABL)
421 p->stats.tx_errors++;
422 if(csr0 & CSR0_MISS)
423 p->stats.rx_errors++;
424 }
425
426 if(csr0 & CSR0_RINT)
427 {
428 recv_intr(dev);
429 }
430 if(csr0 & CSR0_TINT)
431 {
432 xmit_intr(dev);
433 }
434
435 writedatareg(CSR0_INEA);
436 dev->interrupt = 0;
437
438 return;
439 }
440
441
442
443
444
445
446 static void xmit_intr(struct device *dev)
447 {
448 int tmdstat;
449 struct tmd *tmdp;
450 struct priv *p = (struct priv *) dev->priv;
451
452 #ifdef NO_STATIC
453 struct sk_buff *skb;
454 #endif
455
456 while(p->xmit_queued)
457 {
458 tmdp = p->tmdhead + p->tmdlast;
459 tmdstat = tmdp->u.s.status;
460 if(tmdstat & XMIT_OWN)
461 break;
462 #ifdef NO_STATIC
463 skb = (struct sk_buff *) p->tmdbufs[p->tmdlast];
464 dev_kfree_skb(skb,FREE_WRITE);
465 #endif
466
467 if(tmdstat & XMIT_ERR)
468 {
469 printk("%s: xmit-error: %04x %04x\n",dev->name,(int) tmdstat,(int) tmdp->status2);
470 if(tmdp->status2 & XMIT_TDRMASK)
471 printk("%s: tdr-problems (e.g. no resistor)\n",dev->name);
472
473
474 if(tmdp->status2 & XMIT_RTRY)
475 p->stats.tx_aborted_errors++;
476 if(tmdp->status2 & XMIT_LCAR)
477 p->stats.tx_carrier_errors++;
478 p->stats.tx_errors++;
479 tmdp->status2 = 0;
480 }
481 else
482 p->stats.tx_packets++;
483
484 p->tmdlast = (p->tmdlast + 1) & (TMDNUM-1);
485 if(p->tmdlast == p->tmdnum)
486 p->xmit_queued = 0;
487 }
488
489 dev->tbusy = 0;
490 mark_bh(NET_BH);
491 }
492
493
494
495
496
497 static void recv_intr(struct device *dev)
498 {
499 struct rmd *rmdp;
500 int rmdstat,len;
501 struct sk_buff *skb,*skb1;
502 struct priv *p = (struct priv *) dev->priv;
503
504 rmdp = p->rmdhead + p->rmdnum;
505 while(!( (rmdstat = rmdp->u.s.status) & RCV_OWN))
506 {
507 if( (rmdstat & (RCV_START | RCV_END)) != (RCV_START | RCV_END) )
508 {
509 if(rmdstat & RCV_START)
510 {
511 p->stats.rx_errors++;
512 p->stats.rx_length_errors++;
513 printk("%s: packet too long\n",dev->name);
514 }
515 rmdp->u.s.status = RCV_OWN;
516 }
517 else if(rmdstat & RCV_ERR)
518 {
519 printk("%s: receive-error: %04x\n",dev->name,(int) rmdstat );
520 p->stats.rx_errors++;
521 if(rmdstat & RCV_FRAM) p->stats.rx_frame_errors++;
522 if(rmdstat & RCV_OFLO) p->stats.rx_over_errors++;
523 if(rmdstat & RCV_CRC) p->stats.rx_crc_errors++;
524 rmdp->u.s.status = RCV_OWN;
525 printk("%s: lance-status: %04x\n",dev->name,(int) inw(PORT+L_DATAREG));
526 }
527 else
528 {
529 len = (rmdp->mlen & 0x0fff) - 4;
530 skb = alloc_skb(R_BUF_SIZE,GFP_ATOMIC);
531 if(skb != NULL)
532 {
533 if( (unsigned long) (skb->data + R_BUF_SIZE) & 0xff000000) {
534 memcpy(skb->data,p->recv_skb[p->rmdnum]->data,len);
535 skb1 = skb;
536 }
537 else {
538 skb1 = p->recv_skb[p->rmdnum];
539 p->recv_skb[p->rmdnum] = skb;
540 rmdp->u.buffer = (unsigned long) (skb->data);
541 }
542 rmdp->u.s.status = RCV_OWN;
543 rmdp->mlen = 0;
544 skb1->len = len;
545 skb1->dev = dev;
546 p->stats.rx_packets++;
547 netif_rx(skb1);
548 }
549 else
550 {
551 rmdp->u.s.status = RCV_OWN;
552 printk("%s: can't alloc new sk_buff\n",dev->name);
553 p->stats.rx_dropped++;
554 }
555 }
556 p->rmdnum++; p->rmdnum &= RMDNUM-1;
557 rmdp = p->rmdhead + p->rmdnum;
558 }
559 }
560
561
562
563
564
565 static int ni65_send_packet(struct sk_buff *skb, struct device *dev)
566 {
567 struct priv *p = (struct priv *) dev->priv;
568 struct tmd *tmdp;
569
570 if(dev->tbusy)
571 {
572 int tickssofar = jiffies - dev->trans_start;
573 if (tickssofar < 25)
574 return 1;
575
576 printk("%s: xmitter timed out, try to restart!\n",dev->name);
577 am7990_reinit(dev);
578 dev->tbusy=0;
579 dev->trans_start = jiffies;
580 }
581
582 if(skb == NULL)
583 {
584 dev_tint(dev);
585 return 0;
586 }
587
588 if (skb->len <= 0)
589 return 0;
590
591 if (set_bit(0, (void*)&dev->tbusy) != 0)
592 {
593 printk("%s: Transmitter access conflict.\n", dev->name);
594 return 1;
595 }
596 if(set_bit(0,(void*) &p->lock) != 0)
597 {
598 printk("%s: Queue was locked!\n",dev->name);
599 return 1;
600 }
601
602 {
603 short len = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
604
605 tmdp = p->tmdhead + p->tmdnum;
606
607 #ifdef NO_STATIC
608 tmdp->u.buffer = (unsigned long) (skb->data);
609 p->tmdbufs[p->tmdnum] = skb;
610 #else
611 memcpy((char *) (tmdp->u.buffer & 0x00ffffff),(char *)skb->data,skb->len);
612 dev_kfree_skb (skb, FREE_WRITE);
613 #endif
614 tmdp->blen = -len;
615 tmdp->u.s.status = XMIT_OWN | XMIT_START | XMIT_END;
616
617 cli();
618 p->xmit_queued = 1;
619 writedatareg(CSR0_TDMD | CSR0_INEA);
620 p->tmdnum++; p->tmdnum &= TMDNUM-1;
621
622 if( !((p->tmdhead + p->tmdnum)->u.s.status & XMIT_OWN) )
623 dev->tbusy = 0;
624 p->lock = 0;
625 sti();
626
627 dev->trans_start = jiffies;
628
629 }
630
631 return 0;
632 }
633
634 static struct enet_statistics *ni65_get_stats(struct device *dev)
635 {
636 return &((struct priv *) dev->priv)->stats;
637 }
638
639 static void set_multicast_list(struct device *dev, int num_addrs, void *addrs)
640 {
641 }
642
643
644
645
646