This source file includes following definitions.
- ni65_open
- ni65_close
- ni65_probe
- ni65_init
- ni65_probe1
- ni65_am7990_reinit
- ni65_interrupt
- ni65_xmit_intr
- ni65_recv_intr
- ni65_send_packet
- ni65_get_stats
- set_multicast_list
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42 #include <linux/kernel.h>
43 #include <linux/sched.h>
44 #include <linux/string.h>
45 #include <linux/ptrace.h>
46 #include <linux/errno.h>
47 #include <linux/ioport.h>
48 #include <linux/malloc.h>
49 #include <linux/interrupt.h>
50 #include <linux/delay.h>
51 #include <asm/bitops.h>
52 #include <asm/io.h>
53 #include <asm/dma.h>
54
55 #include <linux/netdevice.h>
56 #include <linux/etherdevice.h>
57 #include <linux/skbuff.h>
58
59 #include "ni65.h"
60
61
62
63
64
65
66 #define RCV_VIA_SKB
67 #undef RCV_PARANOIA_CHECK
68 #define XMT_VIA_SKB
69
70
71
72
73 #define NI65_TOTAL_SIZE 16
74 #define NI65_ADDR0 0x02
75 #define NI65_ADDR1 0x07
76 #define NI65_ADDR2 0x01
77 #define NI65_ID0 0x00
78 #define NI65_ID1 0x55
79
80 #define PORT dev->base_addr
81
82
83
84
85 #define RMDNUM 8
86 #define RMDNUMMASK 0x60000000
87 #define TMDNUM 4
88 #define TMDNUMMASK 0x40000000
89
90 #define R_BUF_SIZE 1536
91 #define T_BUF_SIZE 1536
92
93
94
95
96 #define L_DATAREG 0x00
97 #define L_ADDRREG 0x02
98
99 #define L_RESET 0x04
100 #define L_CONFIG 0x05
101 #define L_EBASE 0x08
102
103
104
105
106
107 #define CSR0 0x00
108 #define CSR1 0x01
109 #define CSR2 0x02
110 #define CSR3 0x03
111
112 #define writereg(val,reg) {outw(reg,PORT+L_ADDRREG);inw(PORT+L_ADDRREG); \
113 outw(val,PORT+L_DATAREG);inw(PORT+L_DATAREG);}
114 #define readreg(reg) (outw(reg,PORT+L_ADDRREG),inw(PORT+L_ADDRREG),\
115 inw(PORT+L_DATAREG))
116 #define writedatareg(val) {outw(val,PORT+L_DATAREG);inw(PORT+L_DATAREG);}
117
118 static int ni65_probe1(struct device **dev,int);
119 static void ni65_interrupt(int irq, void * dev_id, struct pt_regs *regs);
120 static void ni65_recv_intr(struct device *dev,int);
121 static void ni65_xmit_intr(struct device *dev,int);
122 static int ni65_open(struct device *dev);
123 static int ni65_am7990_reinit(struct device *dev);
124 static int ni65_send_packet(struct sk_buff *skb, struct device *dev);
125 static int ni65_close(struct device *dev);
126 static struct enet_statistics *ni65_get_stats(struct device *);
127 static void set_multicast_list(struct device *dev);
128
129 struct priv
130 {
131 struct rmd rmdhead[RMDNUM];
132 struct tmd tmdhead[TMDNUM];
133 struct init_block ib;
134 int rmdnum;
135 int tmdnum,tmdlast;
136 #ifdef RCV_VIA_SKB
137 struct sk_buff *recv_skb[RMDNUM];
138 #else
139 void *recvbounce[RMDNUM];
140 #endif
141 #ifdef XMT_VIA_SKB
142 struct sk_buff *tmd_skb[TMDNUM];
143 #endif
144 void *tmdbounce[TMDNUM];
145 int lock,xmit_queued;
146 struct enet_statistics stats;
147 };
148
149 static int irqtab[] = { 9,12,15,5 };
150 static int dmatab[] = { 0,3,5,6 };
151 static int debuglevel = 0;
152
153
154
155
156 static int ni65_open(struct device *dev)
157 {
158 if(ni65_am7990_reinit(dev))
159 {
160 dev->tbusy = 0;
161 dev->interrupt = 0;
162 dev->start = 1;
163 return 0;
164 }
165 else
166 {
167 dev->start = 0;
168 return -EAGAIN;
169 }
170 }
171
172 static int ni65_close(struct device *dev)
173 {
174 outw(0,PORT+L_RESET);
175 dev->tbusy = 1;
176 dev->start = 0;
177 return 0;
178 }
179
180
181
182
183
184 int ni65_probe(struct device *dev)
185 {
186 int *port;
187 static int ports[] = {0x300,0x320,0x340,0x360, 0};
188
189 if(dev) {
190 int base_addr = dev->base_addr;
191 if (base_addr > 0x1ff)
192 return ni65_probe1(&dev, base_addr);
193 else if (base_addr > 0)
194 return -ENXIO;
195 dev->base_addr = base_addr;
196 }
197
198 for (port = ports; *port; port++)
199 {
200 int ioaddr = *port;
201
202 if (check_region(ioaddr, NI65_TOTAL_SIZE))
203 continue;
204 if( !(inb(ioaddr+L_EBASE+6) == NI65_ID0) ||
205 !(inb(ioaddr+L_EBASE+7) == NI65_ID1) )
206 continue;
207 if (ni65_probe1(&dev, ioaddr) == 0)
208 return 0;
209 }
210
211 return -ENODEV;
212 }
213
214 int ni65_init(void)
215 {
216 ni65_probe(NULL);
217 return 0;
218 }
219
220 static int ni65_probe1(struct device **dev1,int ioaddr)
221 {
222 int i;
223 unsigned char *ptr;
224 struct priv *p;
225 struct device *dev = *dev1;
226
227 if(inb(ioaddr+L_EBASE+0) != NI65_ADDR0 || inb(ioaddr+L_EBASE+1) != NI65_ADDR1
228 || inb(ioaddr+L_EBASE+2) != NI65_ADDR2)
229 {
230 printk("%s: wrong Hardaddress \n",dev ? dev->name : "ni6510" );
231 return -ENODEV;
232 }
233
234 if(!dev) {
235 dev = init_etherdev(0,0);
236 *dev1 = dev;
237 }
238 dev->base_addr = ioaddr;
239
240 for(i=0;i<6;i++)
241 dev->dev_addr[i] = inb(PORT+L_EBASE+i);
242
243 if(dev->irq == 0)
244 dev->irq = irqtab[(inw(PORT+L_CONFIG)>>2)&3];
245 if(dev->dma == 0)
246 dev->dma = dmatab[inw(PORT+L_CONFIG)&3];
247
248 printk("%s: %s found at %#3lx, IRQ %d DMA %d.\n", dev->name,
249 "ni6510", dev->base_addr, dev->irq,dev->dma);
250
251 {
252 int irqval = request_irq(dev->irq, &ni65_interrupt,0,"ni6510",NULL);
253 if (irqval) {
254 printk ("%s: unable to get IRQ %d (irqval=%d).\n",
255 dev->name,dev->irq, irqval);
256 return -EAGAIN;
257 }
258 if(request_dma(dev->dma, "ni6510") != 0)
259 {
260 printk("%s: Can't request dma-channel %d\n",dev->name,(int) dev->dma);
261 free_irq(dev->irq,NULL);
262 return -EAGAIN;
263 }
264 }
265 irq2dev_map[dev->irq] = dev;
266
267
268
269
270 request_region(ioaddr,NI65_TOTAL_SIZE,"ni6510");
271
272 dev->open = ni65_open;
273 dev->stop = ni65_close;
274 dev->hard_start_xmit = ni65_send_packet;
275 dev->get_stats = ni65_get_stats;
276 dev->set_multicast_list = set_multicast_list;
277
278 ether_setup(dev);
279
280 dev->interrupt = 0;
281 dev->tbusy = 0;
282 dev->start = 0;
283
284
285
286
287 ptr = kmalloc(sizeof(struct priv)+8,GFP_KERNEL|GFP_DMA);
288 if(!ptr)
289 return -ENOMEM;
290 ptr = (unsigned char *) (((unsigned long) ptr + 7) & ~0x7);
291 if( (unsigned long) ptr + sizeof(struct priv) > 0x1000000) {
292 printk("%s: Can't alloc buffer in lower 16MB!\n",dev->name);
293 return -EAGAIN;
294 }
295 p = dev->priv = (struct priv *) ptr;
296 memset((char *) dev->priv,0,sizeof(struct priv));
297
298 for(i=0;i<TMDNUM;i++)
299 {
300 if( (ptr = kmalloc(T_BUF_SIZE,GFP_KERNEL | GFP_DMA )) == NULL) {
301 printk("%s: Can't alloc Xmit-Mem.\n",dev->name);
302 return -ENOMEM;
303 }
304 if( (unsigned long) (ptr+T_BUF_SIZE) > 0x1000000) {
305 printk("%s: Can't alloc Xmit-Mem in lower 16MB!\n",dev->name);
306 return -EAGAIN;
307 }
308 p->tmdbounce[i] = ptr;
309 #ifdef XMT_VIA_SKB
310 p->tmd_skb[i] = NULL;
311 #endif
312 }
313
314 #ifdef RCV_VIA_SKB
315 for(i=0;i<RMDNUM;i++)
316 {
317 struct sk_buff *skb;
318 if( !(skb = dev_alloc_skb(R_BUF_SIZE+2)) ) {
319 printk("%s: unable to alloc recv-mem\n",dev->name);
320 return -ENOMEM;
321 }
322 skb->dev = dev;
323 skb_reserve(skb,2);
324 skb_put(skb,R_BUF_SIZE);
325 if( (unsigned long) (skb->data + R_BUF_SIZE) > 0x1000000 ) {
326 printk("%s: unable to alloc receive-memory in lower 16MB!\n",dev->name);
327 return -EAGAIN;
328 }
329 p->recv_skb[i] = skb;
330 }
331 #else
332 for(i=0;i<RMDNUM;i++)
333 {
334 if( !(p->recvbounce[i] = kmalloc(R_BUF_SIZE,GFP_KERNEL | GFP_DMA )) ) {
335 printk("%s: unable to alloc recv-mem\n",dev->name);
336 return -ENOMEM;
337 }
338 if( (unsigned long) p->recvbounce[i] + R_BUF_SIZE > 0x1000000 ) {
339 printk("%s: unable to alloc receive-memory in lower 16MB!\n",dev->name);
340 return -EAGAIN;
341 }
342 }
343 #endif
344
345 return 0;
346 }
347
348
349
350
351
352 static int ni65_am7990_reinit(struct device *dev)
353 {
354 int i;
355 struct priv *p = (struct priv *) dev->priv;
356
357 p->lock = 0;
358 p->xmit_queued = 0;
359
360 disable_dma(dev->dma);
361 set_dma_mode(dev->dma,DMA_MODE_CASCADE);
362 enable_dma(dev->dma);
363
364 outw(0,PORT+L_RESET);
365 if(inw(PORT+L_DATAREG) != 0x4)
366 {
367 printk(KERN_ERR "%s: can't RESET ni6510 card: %04x\n",dev->name,(int) inw(PORT+L_DATAREG));
368 disable_dma(dev->dma);
369 free_dma(dev->dma);
370 free_irq(dev->irq, NULL);
371 return 0;
372 }
373
374 p->tmdnum = 0; p->tmdlast = 0;
375 for(i=0;i<TMDNUM;i++)
376 {
377 struct tmd *tmdp = p->tmdhead + i;
378 #ifdef XMT_VIA_SKB
379 if(p->tmd_skb[i]) {
380 dev_kfree_skb(p->tmd_skb[i],FREE_WRITE);
381 p->tmd_skb[i] = NULL;
382 }
383 #endif
384 tmdp->u.buffer = 0x0;
385 tmdp->u.s.status = XMIT_START | XMIT_END;
386 tmdp->blen = tmdp->status2 = 0;
387 }
388
389 p->rmdnum = 0;
390 for(i=0;i<RMDNUM;i++)
391 {
392 struct rmd *rmdp = p->rmdhead + i;
393 #ifdef RCV_VIA_SKB
394 rmdp->u.buffer = (unsigned long) p->recv_skb[i]->data;
395 #else
396 rmdp->u.buffer = (unsigned long) p->recvbounce[i];
397 #endif
398 rmdp->blen = -(R_BUF_SIZE-8);
399 rmdp->mlen = 0;
400 rmdp->u.s.status = RCV_OWN;
401 }
402
403 for(i=0;i<6;i++)
404 p->ib.eaddr[i] = dev->dev_addr[i];
405
406 for(i=0;i<8;i++)
407 p->ib.filter[i] = 0x0;
408 p->ib.mode = 0x0;
409
410 if(dev->flags & IFF_PROMISC) {
411 p->ib.mode = M_PROM;
412 }
413 else if(dev->mc_count || dev->flags & IFF_ALLMULTI) {
414 for(i=0;i<8;i++)
415 p->ib.filter[i] = 0xff;
416 }
417
418 p->ib.trp = (unsigned long) p->tmdhead | TMDNUMMASK;
419 p->ib.rrp = (unsigned long) p->rmdhead | RMDNUMMASK;
420
421 writereg(0,CSR3);
422 writereg((unsigned short) (((unsigned long) &(p->ib)) & 0xffff),CSR1);
423 writereg((unsigned short) (((unsigned long) &(p->ib))>>16),CSR2);
424
425 writereg(CSR0_INIT,CSR0);
426
427
428
429
430
431 for(i=0;i<32;i++)
432 {
433 __delay((loops_per_sec>>8));
434 if(inw(PORT+L_DATAREG) & CSR0_IDON)
435 break;
436 }
437 if(i == 32)
438 {
439 printk(KERN_ERR "%s: can't init am7990/lance, status: %04x\n",dev->name,(int) inw(PORT+L_DATAREG));
440 disable_dma(dev->dma);
441 free_dma(dev->dma);
442 free_irq(dev->irq, NULL);
443 return 0;
444 }
445
446 writedatareg(CSR0_CLRALL | CSR0_INEA | CSR0_STRT);
447
448 return 1;
449 }
450
451
452
453
454 static void ni65_interrupt(int irq, void * dev_id, struct pt_regs * regs)
455 {
456 int csr0;
457 struct device *dev = (struct device *) irq2dev_map[irq];
458 int bcnt = 32;
459
460 if (dev == NULL) {
461 printk (KERN_ERR "ni65_interrupt(): irq %d for unknown device.\n", irq);
462 return;
463 }
464
465 dev->interrupt = 1;
466
467 while(--bcnt) {
468
469 csr0 = inw(PORT+L_DATAREG);
470 writedatareg(csr0 & CSR0_CLRALL);
471
472 if(!(csr0 & (CSR0_ERR | CSR0_RINT | CSR0_TINT)))
473 break;
474
475 if(csr0 & CSR0_ERR)
476 {
477 struct priv *p = (struct priv *) dev->priv;
478
479 if(csr0 & CSR0_BABL)
480 p->stats.tx_errors++;
481 if(csr0 & CSR0_MISS)
482 p->stats.rx_errors++;
483 if(csr0 & CSR0_MERR) {
484 writedatareg(CSR0_STOP);
485 writedatareg(CSR0_STRT);
486 }
487 }
488 if(csr0 & CSR0_RINT)
489 ni65_recv_intr(dev,csr0);
490 if(csr0 & CSR0_TINT)
491 ni65_xmit_intr(dev,csr0);
492 }
493
494 #ifdef RCV_PARANOIA_CHECK
495 {
496 struct priv *p = (struct priv *) dev->priv;
497 int i,f=0;
498 for(i=0;i<RMDNUM;i++) {
499 struct rmd *rmdp = p->rmdhead + ((p->rmdnum - i - 1) & (RMDNUM-1));
500 if(! (rmdp->u.s.status & RCV_OWN) )
501 f = 1;
502 else if(f)
503 break;
504 }
505
506 if(i < RMDNUM) {
507 p->rmdnum = (p->rmdnum + 8 - i) & (RMDNUM - 1);
508 printk(KERN_ERR "%s: Ooops, receive ring currupted\n",dev->name);
509
510 ni65_recv_intr(dev,csr0);
511 }
512 }
513 #endif
514
515 if(csr0 & (CSR0_RXON | CSR0_TXON) != (CSR0_RXON | CSR0_TXON) ) {
516 writedatareg(CSR0_STOP);
517 writedatareg(CSR0_STRT | CSR0_INEA);
518 }
519 else
520 writedatareg(CSR0_INEA);
521 dev->interrupt = 0;
522
523 return;
524 }
525
526
527
528
529
530 static void ni65_xmit_intr(struct device *dev,int csr0)
531 {
532 struct priv *p = (struct priv *) dev->priv;
533
534 while(p->xmit_queued)
535 {
536 struct tmd *tmdp = p->tmdhead + p->tmdlast;
537 int tmdstat = tmdp->u.s.status;
538
539 if(tmdstat & XMIT_OWN)
540 break;
541
542 #ifdef XMT_VIA_SKB
543 if(p->tmd_skb[p->tmdlast]) {
544 dev_kfree_skb(p->tmd_skb[p->tmdlast],FREE_WRITE);
545 p->tmd_skb[p->tmdlast] = NULL;
546 }
547 #endif
548
549 if(tmdstat & XMIT_ERR)
550 {
551 #if 0
552 if(tmdp->status2 & XMIT_TDRMASK && debuglevel > 3)
553 printk(KERN_ERR "%s: tdr-problems (e.g. no resistor)\n",dev->name);
554 #endif
555
556 if(tmdp->status2 & XMIT_RTRY)
557 p->stats.tx_aborted_errors++;
558 if(tmdp->status2 & XMIT_LCAR)
559 p->stats.tx_carrier_errors++;
560 if(tmdp->status2 & (XMIT_BUFF | XMIT_UFLO )) {
561 p->stats.tx_fifo_errors++;
562 writedatareg(CSR0_STOP);
563 writedatareg(CSR0_STRT);
564 if(debuglevel > 1)
565 printk(KERN_ERR "%s: Xmit FIFO/BUFF error\n",dev->name);
566 }
567 if(debuglevel > 2)
568 printk(KERN_ERR "%s: xmit-error: %04x %02x-%04x\n",dev->name,csr0,(int) tmdstat,(int) tmdp->status2);
569 p->stats.tx_errors++;
570 tmdp->status2 = 0;
571 }
572 else
573 p->stats.tx_packets++;
574
575 p->tmdlast = (p->tmdlast + 1) & (TMDNUM-1);
576 if(p->tmdlast == p->tmdnum)
577 p->xmit_queued = 0;
578 }
579 dev->tbusy = 0;
580 mark_bh(NET_BH);
581 }
582
583
584
585
586
587 static void ni65_recv_intr(struct device *dev,int csr0)
588 {
589 struct rmd *rmdp;
590 int rmdstat,len;
591 struct priv *p = (struct priv *) dev->priv;
592
593 rmdp = p->rmdhead + p->rmdnum;
594 while(!( (rmdstat = rmdp->u.s.status) & RCV_OWN))
595 {
596 if( (rmdstat & (RCV_START | RCV_END | RCV_ERR)) != (RCV_START | RCV_END) )
597 {
598 if(!(rmdstat & RCV_ERR)) {
599 if(rmdstat & RCV_START)
600 {
601 p->stats.rx_length_errors++;
602 printk(KERN_ERR "%s: recv, packet too long: %d\n",dev->name,rmdp->mlen & 0x0fff);
603 }
604 }
605 else {
606 printk(KERN_ERR "%s: receive-error: %04x, lance-status: %04x/%04x\n",
607 dev->name,(int) rmdstat,csr0,(int) inw(PORT+L_DATAREG) );
608 if(rmdstat & RCV_FRAM)
609 p->stats.rx_frame_errors++;
610 if(rmdstat & RCV_OFLO)
611 p->stats.rx_over_errors++;
612 if(rmdstat & (RCV_OFLO | RCV_BUF_ERR) ) {
613 writedatareg(CSR0_STOP);
614 writedatareg(CSR0_STRT);
615 if(debuglevel > 1)
616 printk(KERN_ERR "%s: Rcv FIFO/BUFF error.\n",dev->name);
617 }
618 if(rmdstat & RCV_CRC) p->stats.rx_crc_errors++;
619 }
620 rmdp->u.s.status = RCV_OWN;
621 p->stats.rx_errors++;
622 }
623 else if( (len = (rmdp->mlen & 0x0fff) - 4) >= 60)
624 {
625 #ifdef RCV_VIA_SKB
626 struct sk_buff *skb = dev_alloc_skb(R_BUF_SIZE+2);
627 #else
628 struct sk_buff *skb = dev_alloc_skb(len+2);
629 #endif
630 if(skb)
631 {
632 skb_reserve(skb,2);
633 skb->dev = dev;
634 #ifdef RCV_VIA_SKB
635 if( (unsigned long) (skb->data + R_BUF_SIZE) > 0x1000000) {
636 skb_put(skb,len);
637 eth_copy_and_sum(skb, (unsigned char *)(p->recv_skb[p->rmdnum]->data),len,0);
638 }
639 else {
640 struct sk_buff *skb1 = p->recv_skb[p->rmdnum];
641 skb_put(skb,R_BUF_SIZE);
642 p->recv_skb[p->rmdnum] = skb;
643 rmdp->u.buffer = (unsigned long) skb->data;
644 skb = skb1;
645 skb_trim(skb,len);
646 }
647 #else
648 skb_put(skb,len);
649 eth_copy_and_sum(skb, (unsigned char *) p->recvbounce[p->rmdnum],len,0);
650 #endif
651 rmdp->u.s.status = RCV_OWN;
652 p->stats.rx_packets++;
653 skb->protocol=eth_type_trans(skb,dev);
654 netif_rx(skb);
655 }
656 else
657 {
658 rmdp->u.s.status = RCV_OWN;
659 printk(KERN_ERR "%s: can't alloc new sk_buff\n",dev->name);
660 p->stats.rx_dropped++;
661 }
662 }
663 else {
664 rmdp->u.s.status = RCV_OWN;
665 printk(KERN_INFO "%s: received runt packet\n",dev->name);
666 p->stats.rx_errors++;
667 }
668 p->rmdnum = (p->rmdnum + 1) & (RMDNUM-1);
669 rmdp = p->rmdhead + p->rmdnum;
670 }
671 }
672
673
674
675
676 static int ni65_send_packet(struct sk_buff *skb, struct device *dev)
677 {
678 struct priv *p = (struct priv *) dev->priv;
679
680 if(dev->tbusy)
681 {
682 int tickssofar = jiffies - dev->trans_start;
683 if (tickssofar < 5)
684 return 1;
685
686 printk(KERN_ERR "%s: xmitter timed out, try to restart!\n",dev->name);
687 ni65_am7990_reinit(dev);
688 dev->tbusy=0;
689 dev->trans_start = jiffies;
690 }
691
692 if(skb == NULL) {
693 dev_tint(dev);
694 return 0;
695 }
696
697 if (skb->len <= 0)
698 return 0;
699
700 if (set_bit(0, (void*)&dev->tbusy) != 0) {
701 printk(KERN_ERR "%s: Transmitter access conflict.\n", dev->name);
702 return 1;
703 }
704 if (set_bit(0, (void*)&p->lock)) {
705 printk(KERN_ERR "%s: Queue was locked.\n", dev->name);
706 return 1;
707 }
708
709 {
710 short len = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
711 struct tmd *tmdp = p->tmdhead + p->tmdnum;
712 long flags;
713
714 #ifdef XMT_VIA_SKB
715 if( (unsigned long) (skb->data + skb->len) > 0x1000000) {
716 #endif
717 tmdp->u.buffer = (unsigned long ) p->tmdbounce[p->tmdnum];
718 memcpy((char *) tmdp->u.buffer,(char *)skb->data,
719 (skb->len > T_BUF_SIZE) ? T_BUF_SIZE : skb->len);
720 dev_kfree_skb (skb, FREE_WRITE);
721 #ifdef XMT_VIA_SKB
722 }
723 else {
724 tmdp->u.buffer = (unsigned long) skb->data;
725 p->tmd_skb[p->tmdnum] = skb;
726 }
727 #endif
728 tmdp->blen = -len;
729
730 save_flags(flags);
731 cli();
732
733 tmdp->u.s.status = XMIT_OWN | XMIT_START | XMIT_END;
734 writedatareg(CSR0_TDMD | CSR0_INEA);
735
736 p->xmit_queued = 1;
737 p->tmdnum = (p->tmdnum + 1) & (TMDNUM-1);
738
739 dev->tbusy = (p->tmdnum == p->tmdlast) ? 1 : 0;
740 p->lock = 0;
741 dev->trans_start = jiffies;
742
743 restore_flags(flags);
744 }
745
746 return 0;
747 }
748
749 static struct enet_statistics *ni65_get_stats(struct device *dev)
750 {
751 return &((struct priv *) dev->priv)->stats;
752 }
753
754 static void set_multicast_list(struct device *dev)
755 {
756 if(!ni65_am7990_reinit(dev))
757 printk(KERN_ERR "%s: Can't switch card into MC mode!\n",dev->name);
758 dev->tbusy = 0;
759 }
760
761
762
763
764