This source file includes following definitions.
- atp_init
- atp_probe1
- get_node_ID
- eeprom_op
- net_open
- hardware_init
- trigger_send
- write_packet
- net_send_packet
- net_interrupt
- net_rx
- read_block
- net_close
- net_get_stats
- set_multicast_list
1
2
3
4
5
6
7
8
9
10
11
12
13
14 static char *version =
15 "atp.c:v0.04 2/25/94 Donald Becker (becker@super.org)\n";
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77 #include <linux/config.h>
78 #include <linux/kernel.h>
79 #include <linux/sched.h>
80 #include <linux/types.h>
81 #include <linux/fcntl.h>
82 #include <linux/interrupt.h>
83 #include <linux/ptrace.h>
84 #include <linux/ioport.h>
85 #include <linux/in.h>
86 #include <linux/malloc.h>
87 #include <linux/string.h>
88 #include <asm/system.h>
89 #include <asm/bitops.h>
90 #include <asm/io.h>
91 #include <asm/dma.h>
92 #include <errno.h>
93
94 #include <linux/netdevice.h>
95 #include <linux/etherdevice.h>
96 #include <linux/skbuff.h>
97
98 #include "atp.h"
99
100
101 #ifndef HAVE_AUTOIRQ
102
103 extern void autoirq_setup(int waittime);
104 extern int autoirq_report(int waittime);
105
106
107 extern struct device *irq2dev_map[16];
108 #endif
109
110 #ifndef HAVE_ALLOC_SKB
111 #define alloc_skb(size, priority) (struct sk_buff *) kmalloc(size,priority)
112 #define kfree_skbmem(addr, size) kfree_s(addr,size);
113 #endif
114
115 #ifndef HAVE_PORTRESERVE
116 #define check_region(ioaddr, size) 0
117 #define snarf_region(ioaddr, size); do ; while (0)
118 #endif
119
120
121 #ifndef NET_DEBUG
122 #define NET_DEBUG 4
123 #endif
124 static unsigned int net_debug = NET_DEBUG;
125
126
127 #define ETHERCARD_TOTAL_SIZE 3
128
129
130
131 extern int atp_probe(struct device *dev);
132
133 static int atp_probe1(struct device *dev, short ioaddr);
134 static void get_node_ID(struct device *dev);
135 static unsigned short eeprom_op(short ioaddr, unsigned int cmd);
136 static int net_open(struct device *dev);
137 static void hardware_init(struct device *dev);
138 static void write_packet(short ioaddr, int length, unsigned char *packet, int mode);
139 static void trigger_send(short ioaddr, int length);
140 static int net_send_packet(struct sk_buff *skb, struct device *dev);
141 static void net_interrupt(int reg_ptr);
142 static void net_rx(struct device *dev);
143 static void read_block(short ioaddr, int length, unsigned char *buffer, int data_mode);
144 static int net_close(struct device *dev);
145 static struct enet_statistics *net_get_stats(struct device *dev);
146 static void set_multicast_list(struct device *dev, int num_addrs, void *addrs);
147
148
149
150
151
152
153
154
155 int
156 atp_init(struct device *dev)
157 {
158 int *port, ports[] = {0x378, 0x278, 0x3bc, 0};
159 int base_addr = dev->base_addr;
160
161 if (base_addr > 0x1ff)
162 return atp_probe1(dev, base_addr);
163 else if (base_addr == 1)
164 return ENXIO;
165
166 for (port = ports; *port; port++) {
167 int ioaddr = *port;
168 outb(0x57, ioaddr + PAR_DATA);
169 if (inb(ioaddr + PAR_DATA) != 0x57)
170 continue;
171 if (atp_probe1(dev, ioaddr) == 0)
172 return 0;
173 }
174
175 return ENODEV;
176 }
177
178 static int atp_probe1(struct device *dev, short ioaddr)
179 {
180 int saved_ctrl_reg, status;
181
182 outb(0xff, ioaddr + PAR_DATA);
183
184
185 saved_ctrl_reg = inb(ioaddr + PAR_CONTROL);
186
187 outb(0x04, ioaddr + PAR_CONTROL);
188 write_reg_high(ioaddr, CMR1, CMR1h_RESET);
189 eeprom_delay(2048);
190 status = read_nibble(ioaddr, CMR1);
191
192 if ((status & 0x78) != 0x08) {
193
194 outb(saved_ctrl_reg, ioaddr + PAR_CONTROL);
195 return 1;
196 }
197 status = read_nibble(ioaddr, CMR2_h);
198 if ((status & 0x78) != 0x10) {
199 outb(saved_ctrl_reg, ioaddr + PAR_CONTROL);
200 return 1;
201 }
202
203 write_reg_byte(ioaddr, CMR2, 0x01);
204 write_reg_high(ioaddr, CMR1, CMR1h_RxENABLE | CMR1h_TxENABLE);
205
206
207 if (ioaddr == 0x378)
208 dev->irq = 7;
209 else
210 dev->irq = 5;
211 write_reg_high(ioaddr, CMR1, CMR1h_TxRxOFF);
212 write_reg(ioaddr, CMR2, CMR2_NULL);
213
214 dev->base_addr = ioaddr;
215
216
217 get_node_ID(dev);
218
219 printk("%s: Pocket adaptor found at %#3x, IRQ %d, SAPROM "
220 "%02X:%02X:%02X:%02X:%02X:%02X.\n", dev->name, dev->base_addr,
221 dev->irq, dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
222 dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
223
224
225 write_reg_high(ioaddr, CMR1, CMR1h_RESET);
226
227 if (net_debug)
228 printk(version);
229
230
231 ether_setup(dev);
232 dev->priv = kmalloc(sizeof(struct net_local), GFP_KERNEL);
233 memset(dev->priv, 0, sizeof(struct net_local));
234
235
236 {
237 struct net_local *lp = (struct net_local *)dev->priv;
238 lp->addr_mode = CMR2h_Normal;
239 }
240
241
242 dev->if_port = (dev->mem_start & 0xf) ? dev->mem_start & 0x7 : 4;
243 if (dev->mem_end & 0xf)
244 net_debug = dev->mem_end & 7;
245
246 dev->open = net_open;
247 dev->stop = net_close;
248 dev->hard_start_xmit = net_send_packet;
249 dev->get_stats = net_get_stats;
250 dev->set_multicast_list = &set_multicast_list;
251
252 return 0;
253 }
254
255
256 static void get_node_ID(struct device *dev)
257 {
258 short ioaddr = dev->base_addr;
259 int sa_offset = 0;
260 int i;
261
262 write_reg(ioaddr, CMR2, CMR2_EEPROM);
263
264
265
266 if (eeprom_op(ioaddr, EE_READ(0)) == 0xffff)
267 sa_offset = 15;
268
269 for (i = 0; i < 3; i++)
270 ((unsigned short *)dev->dev_addr)[i] =
271 ntohs(eeprom_op(ioaddr, EE_READ(sa_offset + i)));
272
273 write_reg(ioaddr, CMR2, CMR2_NULL);
274 }
275
276
277
278
279
280
281
282
283
284
285
286
287
288 static unsigned short eeprom_op(short ioaddr, unsigned int cmd)
289 {
290 unsigned eedata_out = 0;
291 int num_bits = EE_CMD_SIZE;
292
293 while (--num_bits >= 0) {
294 char outval = test_bit(num_bits, &cmd) ? EE_DATA_WRITE : 0;
295 write_reg_high(ioaddr, PROM_CMD, outval | EE_CLK_LOW);
296 eeprom_delay(5);
297 write_reg_high(ioaddr, PROM_CMD, outval | EE_CLK_HIGH);
298 eedata_out <<= 1;
299 if (read_nibble(ioaddr, PROM_DATA) & EE_DATA_READ)
300 eedata_out++;
301 eeprom_delay(5);
302 }
303 write_reg_high(ioaddr, PROM_CMD, EE_CLK_LOW & ~EE_CS);
304 return eedata_out;
305 }
306
307
308
309
310
311
312
313
314
315
316
317
318 static int net_open(struct device *dev)
319 {
320
321
322
323
324 if (irq2dev_map[dev->irq] != 0
325 || (irq2dev_map[dev->irq] = dev) == 0
326 || request_irq(dev->irq, &net_interrupt, 0, "atp")) {
327 return -EAGAIN;
328 }
329
330 hardware_init(dev);
331 dev->start = 1;
332 return 0;
333 }
334
335
336
337 static void hardware_init(struct device *dev)
338 {
339 struct net_local *lp = (struct net_local *)dev->priv;
340 int ioaddr = dev->base_addr;
341 int i;
342
343 write_reg_high(ioaddr, CMR1, CMR1h_RESET);
344
345 for (i = 0; i < 6; i++)
346 write_reg_byte(ioaddr, PAR0 + i, dev->dev_addr[i]);
347
348 write_reg_high(ioaddr, CMR2, lp->addr_mode);
349
350 if (net_debug > 2) {
351 printk("%s: Reset: current Rx mode %d.\n", dev->name,
352 (read_nibble(ioaddr, CMR2_h) >> 3) & 0x0f);
353 }
354
355 write_reg(ioaddr, CMR2, CMR2_IRQOUT);
356 write_reg_high(ioaddr, CMR1, CMR1h_RxENABLE | CMR1h_TxENABLE);
357
358
359 outb(Ctrl_SelData + Ctrl_IRQEN, ioaddr + PAR_CONTROL);
360
361
362 write_reg(ioaddr, IMR, ISR_RxOK | ISR_TxErr | ISR_TxOK);
363 write_reg_high(ioaddr, IMR, ISRh_RxErr);
364
365 lp->tx_unit_busy = 0;
366 lp->pac_cnt_in_tx_buf = 0;
367 lp->saved_tx_size = 0;
368
369 dev->tbusy = 0;
370 dev->interrupt = 0;
371 }
372
373 static void trigger_send(short ioaddr, int length)
374 {
375 write_reg_byte(ioaddr, TxCNT0, length & 0xff);
376 write_reg(ioaddr, TxCNT1, length >> 8);
377 write_reg(ioaddr, CMR1, CMR1_Xmit);
378 }
379
380 static void write_packet(short ioaddr, int length, unsigned char *packet, int data_mode)
381 {
382 length = (length + 1) & ~1;
383 outb(EOC+MAR, ioaddr + PAR_DATA);
384 if ((data_mode & 1) == 0) {
385
386 outb(WrAddr+MAR, ioaddr + PAR_DATA);
387 do {
388 write_byte_mode0(ioaddr, *packet++);
389 } while (--length > 0) ;
390 } else {
391
392 unsigned char outbyte = *packet++;
393
394 outb(Ctrl_LNibWrite + Ctrl_IRQEN, ioaddr + PAR_CONTROL);
395 outb(WrAddr+MAR, ioaddr + PAR_DATA);
396
397 outb((outbyte & 0x0f)|0x40, ioaddr + PAR_DATA);
398 outb(outbyte & 0x0f, ioaddr + PAR_DATA);
399 outbyte >>= 4;
400 outb(outbyte & 0x0f, ioaddr + PAR_DATA);
401 outb(Ctrl_HNibWrite + Ctrl_IRQEN, ioaddr + PAR_CONTROL);
402 while (--length > 0)
403 write_byte_mode1(ioaddr, *packet++);
404 }
405
406 outb(0xff, ioaddr + PAR_DATA);
407 outb(Ctrl_HNibWrite | Ctrl_SelData | Ctrl_IRQEN, ioaddr + PAR_CONTROL);
408 }
409
410 static int
411 net_send_packet(struct sk_buff *skb, struct device *dev)
412 {
413 struct net_local *lp = (struct net_local *)dev->priv;
414 int ioaddr = dev->base_addr;
415
416 if (dev->tbusy) {
417
418
419 int tickssofar = jiffies - dev->trans_start;
420 if (tickssofar < 5)
421 return 1;
422 printk("%s: transmit timed out, %s?\n", dev->name,
423 inb(ioaddr + PAR_CONTROL) & 0x10 ? "network cable problem"
424 : "IRQ conflict");
425 lp->stats.tx_errors++;
426
427 hardware_init(dev);
428 dev->tbusy=0;
429 dev->trans_start = jiffies;
430 }
431
432
433
434
435 if (skb == NULL) {
436 dev_tint(dev);
437 return 0;
438 }
439
440
441
442 if (set_bit(0, (void*)&dev->tbusy) != 0)
443 printk("%s: Transmitter access conflict.\n", dev->name);
444 else {
445 short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
446 unsigned char *buf = skb->data;
447 int flags;
448
449
450
451 save_flags(flags);
452 cli();
453 write_reg(ioaddr, IMR, 0);
454 write_reg_high(ioaddr, IMR, 0);
455 restore_flags(flags);
456
457 write_packet(ioaddr, length, buf, dev->if_port);
458
459 lp->pac_cnt_in_tx_buf++;
460 if (lp->tx_unit_busy == 0) {
461 trigger_send(ioaddr, length);
462 lp->saved_tx_size = 0;
463 lp->re_tx = 0;
464 lp->tx_unit_busy = 1;
465 } else
466 lp->saved_tx_size = length;
467
468 dev->trans_start = jiffies;
469
470 write_reg(ioaddr, IMR, ISR_RxOK | ISR_TxErr | ISR_TxOK);
471 write_reg_high(ioaddr, IMR, ISRh_RxErr);
472 }
473
474 dev_kfree_skb (skb, FREE_WRITE);
475
476 return 0;
477 }
478
479
480
481 static void
482 net_interrupt(int reg_ptr)
483 {
484 int irq = -(((struct pt_regs *)reg_ptr)->orig_eax+2);
485 struct device *dev = (struct device *)(irq2dev_map[irq]);
486 struct net_local *lp;
487 int ioaddr, status, boguscount = 20;
488 static int num_tx_since_rx = 0;
489
490 if (dev == NULL) {
491 printk ("ATP_interrupt(): irq %d for unknown device.\n", irq);
492 return;
493 }
494 dev->interrupt = 1;
495
496 ioaddr = dev->base_addr;
497 lp = (struct net_local *)dev->priv;
498
499
500 outb(Ctrl_SelData, ioaddr + PAR_CONTROL);
501
502
503 write_reg(ioaddr, CMR2, CMR2_NULL);
504 write_reg(ioaddr, IMR, 0);
505
506 if (net_debug > 5) printk("%s: In interrupt ", dev->name);
507 while (--boguscount > 0) {
508 status = read_nibble(ioaddr, ISR);
509 if (net_debug > 5) printk("loop status %02x..", status);
510
511 if (status & (ISR_RxOK<<3)) {
512 write_reg(ioaddr, ISR, ISR_RxOK);
513 do {
514 int read_status = read_nibble(ioaddr, CMR1);
515 if (net_debug > 6)
516 printk("handling Rx packet %02x..", read_status);
517
518
519 if (read_status & (CMR1_IRQ << 3)) {
520 lp->stats.rx_over_errors++;
521
522 write_reg_high(ioaddr, CMR2, CMR2h_OFF);
523 net_rx(dev);
524
525 write_reg_high(ioaddr, ISR, ISRh_RxErr);
526 write_reg_high(ioaddr, CMR2, lp->addr_mode);
527 } else if ((read_status & (CMR1_BufEnb << 3)) == 0) {
528 net_rx(dev);
529 dev->last_rx = jiffies;
530 num_tx_since_rx = 0;
531 } else
532 break;
533 } while (--boguscount > 0);
534 } else if (status & ((ISR_TxErr + ISR_TxOK)<<3)) {
535 if (net_debug > 6) printk("handling Tx done..");
536
537
538 write_reg(ioaddr, ISR, ISR_TxErr + ISR_TxOK);
539 if (status & (ISR_TxErr<<3)) {
540 lp->stats.collisions++;
541 if (++lp->re_tx > 15) {
542 lp->stats.tx_aborted_errors++;
543 hardware_init(dev);
544 break;
545 }
546
547 if (net_debug > 6) printk("attempting to ReTx");
548 write_reg(ioaddr, CMR1, CMR1_ReXmit + CMR1_Xmit);
549 } else {
550
551 lp->stats.tx_packets++;
552 lp->pac_cnt_in_tx_buf--;
553 if ( lp->saved_tx_size) {
554 trigger_send(ioaddr, lp->saved_tx_size);
555 lp->saved_tx_size = 0;
556 lp->re_tx = 0;
557 } else
558 lp->tx_unit_busy = 0;
559 dev->tbusy = 0;
560 mark_bh(NET_BH);
561 }
562 num_tx_since_rx++;
563 } else if (num_tx_since_rx > 8
564 && jiffies > dev->last_rx + 100) {
565 if (net_debug > 2)
566 printk("%s: Missed packet? No Rx after %d Tx and %ld jiffies"
567 " status %02x CMR1 %02x.\n", dev->name,
568 num_tx_since_rx, jiffies - dev->last_rx, status,
569 (read_nibble(ioaddr, CMR1) >> 3) & 15);
570 lp->stats.rx_missed_errors++;
571 hardware_init(dev);
572 num_tx_since_rx = 0;
573 break;
574 } else
575 break;
576 }
577
578
579
580 {
581 int i;
582 for (i = 0; i < 6; i++)
583 write_reg_byte(ioaddr, PAR0 + i, dev->dev_addr[i]);
584 }
585
586
587 write_reg(ioaddr, CMR2, CMR2_IRQOUT);
588
589 outb(Ctrl_SelData + Ctrl_IRQEN, ioaddr + PAR_CONTROL);
590
591 write_reg(ioaddr, IMR, ISR_RxOK | ISR_TxErr | ISR_TxOK);
592 write_reg_high(ioaddr, IMR, ISRh_RxErr);
593
594 if (net_debug > 5) printk("exiting interrupt.\n");
595
596 dev->interrupt = 0;
597
598 return;
599 }
600
601
602 static void net_rx(struct device *dev)
603 {
604 struct net_local *lp = (struct net_local *)dev->priv;
605 int ioaddr = dev->base_addr;
606 #ifdef notdef
607 ushort header[4];
608 #else
609 struct rx_header rx_head;
610 #endif
611
612
613 outb(EOC+MAR, ioaddr + PAR_DATA);
614 read_block(ioaddr, 8, (unsigned char*)&rx_head, dev->if_port);
615 if (net_debug > 5)
616 printk(" rx_count %04x %04x %04x %04x..", rx_head.pad,
617 rx_head.rx_count, rx_head.rx_status, rx_head.cur_addr);
618 if ((rx_head.rx_status & 0x77) != 0x01) {
619 lp->stats.rx_errors++;
620
621
622 if (net_debug > 3) printk("%s: Unknown ATP Rx error %04x.\n",
623 dev->name, rx_head.rx_status);
624 hardware_init(dev);
625 return;
626 } else {
627
628 int pkt_len = (rx_head.rx_count & 0x7ff) - 4;
629 struct sk_buff *skb;
630
631 skb = alloc_skb(pkt_len, GFP_ATOMIC);
632 if (skb == NULL) {
633 printk("%s: Memory squeeze, dropping packet.\n", dev->name);
634 lp->stats.rx_dropped++;
635 goto done;
636 }
637 skb->len = pkt_len;
638 skb->dev = dev;
639
640 read_block(ioaddr, pkt_len, skb->data, dev->if_port);
641
642 if (net_debug > 6) {
643 unsigned char *data = skb->data;
644 printk(" data %02x%02x%02x %02x%02x%02x %02x%02x%02x"
645 "%02x%02x%02x %02x%02x..",
646 data[0], data[1], data[2], data[3], data[4], data[5],
647 data[6], data[7], data[8], data[9], data[10], data[11],
648 data[12], data[13]);
649 }
650
651 netif_rx(skb);
652 lp->stats.rx_packets++;
653 }
654 done:
655 write_reg(ioaddr, CMR1, CMR1_NextPkt);
656 return;
657 }
658
659 static void read_block(short ioaddr, int length, unsigned char *p, int data_mode)
660 {
661
662 if (data_mode <= 3) {
663 outb(Ctrl_LNibRead, ioaddr + PAR_CONTROL);
664 outb(length == 8 ? RdAddr | HNib | MAR : RdAddr | MAR,
665 ioaddr + PAR_DATA);
666 if (data_mode <= 1) {
667 do *p++ = read_byte_mode0(ioaddr); while (--length > 0);
668 } else
669 do *p++ = read_byte_mode2(ioaddr); while (--length > 0);
670 } else if (data_mode <= 5)
671 do *p++ = read_byte_mode4(ioaddr); while (--length > 0);
672 else
673 do *p++ = read_byte_mode6(ioaddr); while (--length > 0);
674
675 outb(EOC+HNib+MAR, ioaddr + PAR_DATA);
676 outb(Ctrl_SelData, ioaddr + PAR_CONTROL);
677 }
678
679
680 static int
681 net_close(struct device *dev)
682 {
683 struct net_local *lp = (struct net_local *)dev->priv;
684 int ioaddr = dev->base_addr;
685
686 dev->tbusy = 1;
687 dev->start = 0;
688
689
690 lp->addr_mode = CMR2h_OFF;
691 write_reg_high(ioaddr, CMR2, CMR2h_OFF);
692
693
694 outb(0x00, ioaddr + PAR_CONTROL);
695 free_irq(dev->irq);
696 irq2dev_map[dev->irq] = 0;
697
698
699 write_reg_high(ioaddr, CMR1, CMR1h_RESET);
700
701 return 0;
702 }
703
704
705
706 static struct enet_statistics *
707 net_get_stats(struct device *dev)
708 {
709 struct net_local *lp = (struct net_local *)dev->priv;
710 return &lp->stats;
711 }
712
713
714
715
716
717
718
719 static void
720 set_multicast_list(struct device *dev, int num_addrs, void *addrs)
721 {
722 struct net_local *lp = (struct net_local *)dev->priv;
723 short ioaddr = dev->base_addr;
724 lp->addr_mode = num_addrs ? CMR2h_PROMISC : CMR2h_Normal;
725 write_reg_high(ioaddr, CMR2, lp->addr_mode);
726 }
727
728
729
730
731
732
733
734
735