This source file includes following definitions.
- atp_init
- atp_probe1
- get_node_ID
- eeprom_op
- net_open
- hardware_init
- trigger_send
- write_packet
- net_send_packet
- net_interrupt
- net_rx
- read_block
- net_close
- net_get_stats
- set_multicast_list
1
2
3
4
5
6
7
8
9
10
11
12
13
14 static char *version =
15 "atp.c:v0.04 2/25/94 Donald Becker (becker@super.org)\n";
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77 #include <linux/kernel.h>
78 #include <linux/sched.h>
79 #include <linux/types.h>
80 #include <linux/fcntl.h>
81 #include <linux/interrupt.h>
82 #include <linux/ptrace.h>
83 #include <linux/ioport.h>
84 #include <linux/in.h>
85 #include <linux/malloc.h>
86 #include <linux/string.h>
87 #include <asm/system.h>
88 #include <asm/bitops.h>
89 #include <asm/io.h>
90 #include <asm/dma.h>
91 #include <linux/errno.h>
92
93 #include <linux/netdevice.h>
94 #include <linux/etherdevice.h>
95 #include <linux/skbuff.h>
96
97 #include "atp.h"
98
99
100 #ifndef HAVE_AUTOIRQ
101
102 extern void autoirq_setup(int waittime);
103 extern int autoirq_report(int waittime);
104
105
106 extern struct device *irq2dev_map[16];
107 #endif
108
109 #ifndef HAVE_ALLOC_SKB
110 #define alloc_skb(size, priority) (struct sk_buff *) kmalloc(size,priority)
111 #define kfree_skbmem(addr, size) kfree_s(addr,size);
112 #endif
113
114 #ifndef HAVE_PORTRESERVE
115 #define check_region(ioaddr, size) 0
116 #define request_region(ioaddr, size,name) do ; while (0)
117 #endif
118
119
120 #ifndef NET_DEBUG
121 #define NET_DEBUG 4
122 #endif
123 static unsigned int net_debug = NET_DEBUG;
124
125
126 #define ETHERCARD_TOTAL_SIZE 3
127
128
129
130 extern int atp_probe(struct device *dev);
131
132 static int atp_probe1(struct device *dev, short ioaddr);
133 static void get_node_ID(struct device *dev);
134 static unsigned short eeprom_op(short ioaddr, unsigned int cmd);
135 static int net_open(struct device *dev);
136 static void hardware_init(struct device *dev);
137 static void write_packet(short ioaddr, int length, unsigned char *packet, int mode);
138 static void trigger_send(short ioaddr, int length);
139 static int net_send_packet(struct sk_buff *skb, struct device *dev);
140 static void net_interrupt(int reg_ptr);
141 static void net_rx(struct device *dev);
142 static void read_block(short ioaddr, int length, unsigned char *buffer, int data_mode);
143 static int net_close(struct device *dev);
144 static struct enet_statistics *net_get_stats(struct device *dev);
145 static void set_multicast_list(struct device *dev, int num_addrs, void *addrs);
146
147
148
149
150
151
152
153
154 int
155 atp_init(struct device *dev)
156 {
157 int *port, ports[] = {0x378, 0x278, 0x3bc, 0};
158 int base_addr = dev->base_addr;
159
160 if (base_addr > 0x1ff)
161 return atp_probe1(dev, base_addr);
162 else if (base_addr == 1)
163 return ENXIO;
164
165 for (port = ports; *port; port++) {
166 int ioaddr = *port;
167 outb(0x57, ioaddr + PAR_DATA);
168 if (inb(ioaddr + PAR_DATA) != 0x57)
169 continue;
170 if (atp_probe1(dev, ioaddr) == 0)
171 return 0;
172 }
173
174 return ENODEV;
175 }
176
177 static int atp_probe1(struct device *dev, short ioaddr)
178 {
179 int saved_ctrl_reg, status;
180
181 outb(0xff, ioaddr + PAR_DATA);
182
183
184 saved_ctrl_reg = inb(ioaddr + PAR_CONTROL);
185
186 outb(0x04, ioaddr + PAR_CONTROL);
187 write_reg_high(ioaddr, CMR1, CMR1h_RESET);
188 eeprom_delay(2048);
189 status = read_nibble(ioaddr, CMR1);
190
191 if ((status & 0x78) != 0x08) {
192
193 outb(saved_ctrl_reg, ioaddr + PAR_CONTROL);
194 return 1;
195 }
196 status = read_nibble(ioaddr, CMR2_h);
197 if ((status & 0x78) != 0x10) {
198 outb(saved_ctrl_reg, ioaddr + PAR_CONTROL);
199 return 1;
200 }
201
202 write_reg_byte(ioaddr, CMR2, 0x01);
203 write_reg_high(ioaddr, CMR1, CMR1h_RxENABLE | CMR1h_TxENABLE);
204
205
206 if (ioaddr == 0x378)
207 dev->irq = 7;
208 else
209 dev->irq = 5;
210 write_reg_high(ioaddr, CMR1, CMR1h_TxRxOFF);
211 write_reg(ioaddr, CMR2, CMR2_NULL);
212
213 dev->base_addr = ioaddr;
214
215
216 get_node_ID(dev);
217
218 printk("%s: Pocket adaptor found at %#3x, IRQ %d, SAPROM "
219 "%02X:%02X:%02X:%02X:%02X:%02X.\n", dev->name, dev->base_addr,
220 dev->irq, dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
221 dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
222
223
224 write_reg_high(ioaddr, CMR1, CMR1h_RESET);
225
226 if (net_debug)
227 printk(version);
228
229
230 ether_setup(dev);
231 dev->priv = kmalloc(sizeof(struct net_local), GFP_KERNEL);
232 memset(dev->priv, 0, sizeof(struct net_local));
233
234
235 {
236 struct net_local *lp = (struct net_local *)dev->priv;
237 lp->addr_mode = CMR2h_Normal;
238 }
239
240
241 dev->if_port = (dev->mem_start & 0xf) ? dev->mem_start & 0x7 : 4;
242 if (dev->mem_end & 0xf)
243 net_debug = dev->mem_end & 7;
244
245 dev->open = net_open;
246 dev->stop = net_close;
247 dev->hard_start_xmit = net_send_packet;
248 dev->get_stats = net_get_stats;
249 dev->set_multicast_list = &set_multicast_list;
250
251 return 0;
252 }
253
254
255 static void get_node_ID(struct device *dev)
256 {
257 short ioaddr = dev->base_addr;
258 int sa_offset = 0;
259 int i;
260
261 write_reg(ioaddr, CMR2, CMR2_EEPROM);
262
263
264
265 if (eeprom_op(ioaddr, EE_READ(0)) == 0xffff)
266 sa_offset = 15;
267
268 for (i = 0; i < 3; i++)
269 ((unsigned short *)dev->dev_addr)[i] =
270 ntohs(eeprom_op(ioaddr, EE_READ(sa_offset + i)));
271
272 write_reg(ioaddr, CMR2, CMR2_NULL);
273 }
274
275
276
277
278
279
280
281
282
283
284
285
286
287 static unsigned short eeprom_op(short ioaddr, unsigned int cmd)
288 {
289 unsigned eedata_out = 0;
290 int num_bits = EE_CMD_SIZE;
291
292 while (--num_bits >= 0) {
293 char outval = test_bit(num_bits, &cmd) ? EE_DATA_WRITE : 0;
294 write_reg_high(ioaddr, PROM_CMD, outval | EE_CLK_LOW);
295 eeprom_delay(5);
296 write_reg_high(ioaddr, PROM_CMD, outval | EE_CLK_HIGH);
297 eedata_out <<= 1;
298 if (read_nibble(ioaddr, PROM_DATA) & EE_DATA_READ)
299 eedata_out++;
300 eeprom_delay(5);
301 }
302 write_reg_high(ioaddr, PROM_CMD, EE_CLK_LOW & ~EE_CS);
303 return eedata_out;
304 }
305
306
307
308
309
310
311
312
313
314
315
316
317 static int net_open(struct device *dev)
318 {
319
320
321
322
323 if (irq2dev_map[dev->irq] != 0
324 || (irq2dev_map[dev->irq] = dev) == 0
325 || request_irq(dev->irq, &net_interrupt, 0, "atp")) {
326 return -EAGAIN;
327 }
328
329 hardware_init(dev);
330 dev->start = 1;
331 return 0;
332 }
333
334
335
336 static void hardware_init(struct device *dev)
337 {
338 struct net_local *lp = (struct net_local *)dev->priv;
339 int ioaddr = dev->base_addr;
340 int i;
341
342 write_reg_high(ioaddr, CMR1, CMR1h_RESET);
343
344 for (i = 0; i < 6; i++)
345 write_reg_byte(ioaddr, PAR0 + i, dev->dev_addr[i]);
346
347 write_reg_high(ioaddr, CMR2, lp->addr_mode);
348
349 if (net_debug > 2) {
350 printk("%s: Reset: current Rx mode %d.\n", dev->name,
351 (read_nibble(ioaddr, CMR2_h) >> 3) & 0x0f);
352 }
353
354 write_reg(ioaddr, CMR2, CMR2_IRQOUT);
355 write_reg_high(ioaddr, CMR1, CMR1h_RxENABLE | CMR1h_TxENABLE);
356
357
358 outb(Ctrl_SelData + Ctrl_IRQEN, ioaddr + PAR_CONTROL);
359
360
361 write_reg(ioaddr, IMR, ISR_RxOK | ISR_TxErr | ISR_TxOK);
362 write_reg_high(ioaddr, IMR, ISRh_RxErr);
363
364 lp->tx_unit_busy = 0;
365 lp->pac_cnt_in_tx_buf = 0;
366 lp->saved_tx_size = 0;
367
368 dev->tbusy = 0;
369 dev->interrupt = 0;
370 }
371
372 static void trigger_send(short ioaddr, int length)
373 {
374 write_reg_byte(ioaddr, TxCNT0, length & 0xff);
375 write_reg(ioaddr, TxCNT1, length >> 8);
376 write_reg(ioaddr, CMR1, CMR1_Xmit);
377 }
378
379 static void write_packet(short ioaddr, int length, unsigned char *packet, int data_mode)
380 {
381 length = (length + 1) & ~1;
382 outb(EOC+MAR, ioaddr + PAR_DATA);
383 if ((data_mode & 1) == 0) {
384
385 outb(WrAddr+MAR, ioaddr + PAR_DATA);
386 do {
387 write_byte_mode0(ioaddr, *packet++);
388 } while (--length > 0) ;
389 } else {
390
391 unsigned char outbyte = *packet++;
392
393 outb(Ctrl_LNibWrite + Ctrl_IRQEN, ioaddr + PAR_CONTROL);
394 outb(WrAddr+MAR, ioaddr + PAR_DATA);
395
396 outb((outbyte & 0x0f)|0x40, ioaddr + PAR_DATA);
397 outb(outbyte & 0x0f, ioaddr + PAR_DATA);
398 outbyte >>= 4;
399 outb(outbyte & 0x0f, ioaddr + PAR_DATA);
400 outb(Ctrl_HNibWrite + Ctrl_IRQEN, ioaddr + PAR_CONTROL);
401 while (--length > 0)
402 write_byte_mode1(ioaddr, *packet++);
403 }
404
405 outb(0xff, ioaddr + PAR_DATA);
406 outb(Ctrl_HNibWrite | Ctrl_SelData | Ctrl_IRQEN, ioaddr + PAR_CONTROL);
407 }
408
409 static int
410 net_send_packet(struct sk_buff *skb, struct device *dev)
411 {
412 struct net_local *lp = (struct net_local *)dev->priv;
413 int ioaddr = dev->base_addr;
414
415 if (dev->tbusy) {
416
417
418 int tickssofar = jiffies - dev->trans_start;
419 if (tickssofar < 5)
420 return 1;
421 printk("%s: transmit timed out, %s?\n", dev->name,
422 inb(ioaddr + PAR_CONTROL) & 0x10 ? "network cable problem"
423 : "IRQ conflict");
424 lp->stats.tx_errors++;
425
426 hardware_init(dev);
427 dev->tbusy=0;
428 dev->trans_start = jiffies;
429 }
430
431
432
433
434 if (skb == NULL) {
435 dev_tint(dev);
436 return 0;
437 }
438
439
440
441 if (set_bit(0, (void*)&dev->tbusy) != 0)
442 printk("%s: Transmitter access conflict.\n", dev->name);
443 else {
444 short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
445 unsigned char *buf = skb->data;
446 int flags;
447
448
449
450 save_flags(flags);
451 cli();
452 write_reg(ioaddr, IMR, 0);
453 write_reg_high(ioaddr, IMR, 0);
454 restore_flags(flags);
455
456 write_packet(ioaddr, length, buf, dev->if_port);
457
458 lp->pac_cnt_in_tx_buf++;
459 if (lp->tx_unit_busy == 0) {
460 trigger_send(ioaddr, length);
461 lp->saved_tx_size = 0;
462 lp->re_tx = 0;
463 lp->tx_unit_busy = 1;
464 } else
465 lp->saved_tx_size = length;
466
467 dev->trans_start = jiffies;
468
469 write_reg(ioaddr, IMR, ISR_RxOK | ISR_TxErr | ISR_TxOK);
470 write_reg_high(ioaddr, IMR, ISRh_RxErr);
471 }
472
473 dev_kfree_skb (skb, FREE_WRITE);
474
475 return 0;
476 }
477
478
479
480 static void
481 net_interrupt(int reg_ptr)
482 {
483 int irq = -(((struct pt_regs *)reg_ptr)->orig_eax+2);
484 struct device *dev = (struct device *)(irq2dev_map[irq]);
485 struct net_local *lp;
486 int ioaddr, status, boguscount = 20;
487 static int num_tx_since_rx = 0;
488
489 if (dev == NULL) {
490 printk ("ATP_interrupt(): irq %d for unknown device.\n", irq);
491 return;
492 }
493 dev->interrupt = 1;
494
495 ioaddr = dev->base_addr;
496 lp = (struct net_local *)dev->priv;
497
498
499 outb(Ctrl_SelData, ioaddr + PAR_CONTROL);
500
501
502 write_reg(ioaddr, CMR2, CMR2_NULL);
503 write_reg(ioaddr, IMR, 0);
504
505 if (net_debug > 5) printk("%s: In interrupt ", dev->name);
506 while (--boguscount > 0) {
507 status = read_nibble(ioaddr, ISR);
508 if (net_debug > 5) printk("loop status %02x..", status);
509
510 if (status & (ISR_RxOK<<3)) {
511 write_reg(ioaddr, ISR, ISR_RxOK);
512 do {
513 int read_status = read_nibble(ioaddr, CMR1);
514 if (net_debug > 6)
515 printk("handling Rx packet %02x..", read_status);
516
517
518 if (read_status & (CMR1_IRQ << 3)) {
519 lp->stats.rx_over_errors++;
520
521 write_reg_high(ioaddr, CMR2, CMR2h_OFF);
522 net_rx(dev);
523
524 write_reg_high(ioaddr, ISR, ISRh_RxErr);
525 write_reg_high(ioaddr, CMR2, lp->addr_mode);
526 } else if ((read_status & (CMR1_BufEnb << 3)) == 0) {
527 net_rx(dev);
528 dev->last_rx = jiffies;
529 num_tx_since_rx = 0;
530 } else
531 break;
532 } while (--boguscount > 0);
533 } else if (status & ((ISR_TxErr + ISR_TxOK)<<3)) {
534 if (net_debug > 6) printk("handling Tx done..");
535
536
537 write_reg(ioaddr, ISR, ISR_TxErr + ISR_TxOK);
538 if (status & (ISR_TxErr<<3)) {
539 lp->stats.collisions++;
540 if (++lp->re_tx > 15) {
541 lp->stats.tx_aborted_errors++;
542 hardware_init(dev);
543 break;
544 }
545
546 if (net_debug > 6) printk("attempting to ReTx");
547 write_reg(ioaddr, CMR1, CMR1_ReXmit + CMR1_Xmit);
548 } else {
549
550 lp->stats.tx_packets++;
551 lp->pac_cnt_in_tx_buf--;
552 if ( lp->saved_tx_size) {
553 trigger_send(ioaddr, lp->saved_tx_size);
554 lp->saved_tx_size = 0;
555 lp->re_tx = 0;
556 } else
557 lp->tx_unit_busy = 0;
558 dev->tbusy = 0;
559 mark_bh(NET_BH);
560 }
561 num_tx_since_rx++;
562 } else if (num_tx_since_rx > 8
563 && jiffies > dev->last_rx + 100) {
564 if (net_debug > 2)
565 printk("%s: Missed packet? No Rx after %d Tx and %ld jiffies"
566 " status %02x CMR1 %02x.\n", dev->name,
567 num_tx_since_rx, jiffies - dev->last_rx, status,
568 (read_nibble(ioaddr, CMR1) >> 3) & 15);
569 lp->stats.rx_missed_errors++;
570 hardware_init(dev);
571 num_tx_since_rx = 0;
572 break;
573 } else
574 break;
575 }
576
577
578
579 {
580 int i;
581 for (i = 0; i < 6; i++)
582 write_reg_byte(ioaddr, PAR0 + i, dev->dev_addr[i]);
583 }
584
585
586 write_reg(ioaddr, CMR2, CMR2_IRQOUT);
587
588 outb(Ctrl_SelData + Ctrl_IRQEN, ioaddr + PAR_CONTROL);
589
590 write_reg(ioaddr, IMR, ISR_RxOK | ISR_TxErr | ISR_TxOK);
591 write_reg_high(ioaddr, IMR, ISRh_RxErr);
592
593 if (net_debug > 5) printk("exiting interrupt.\n");
594
595 dev->interrupt = 0;
596
597 return;
598 }
599
600
601 static void net_rx(struct device *dev)
602 {
603 struct net_local *lp = (struct net_local *)dev->priv;
604 int ioaddr = dev->base_addr;
605 #ifdef notdef
606 ushort header[4];
607 #else
608 struct rx_header rx_head;
609 #endif
610
611
612 outb(EOC+MAR, ioaddr + PAR_DATA);
613 read_block(ioaddr, 8, (unsigned char*)&rx_head, dev->if_port);
614 if (net_debug > 5)
615 printk(" rx_count %04x %04x %04x %04x..", rx_head.pad,
616 rx_head.rx_count, rx_head.rx_status, rx_head.cur_addr);
617 if ((rx_head.rx_status & 0x77) != 0x01) {
618 lp->stats.rx_errors++;
619
620
621 if (net_debug > 3) printk("%s: Unknown ATP Rx error %04x.\n",
622 dev->name, rx_head.rx_status);
623 hardware_init(dev);
624 return;
625 } else {
626
627 int pkt_len = (rx_head.rx_count & 0x7ff) - 4;
628 struct sk_buff *skb;
629
630 skb = alloc_skb(pkt_len, GFP_ATOMIC);
631 if (skb == NULL) {
632 printk("%s: Memory squeeze, dropping packet.\n", dev->name);
633 lp->stats.rx_dropped++;
634 goto done;
635 }
636 skb->len = pkt_len;
637 skb->dev = dev;
638
639 read_block(ioaddr, pkt_len, skb->data, dev->if_port);
640
641 if (net_debug > 6) {
642 unsigned char *data = skb->data;
643 printk(" data %02x%02x%02x %02x%02x%02x %02x%02x%02x"
644 "%02x%02x%02x %02x%02x..",
645 data[0], data[1], data[2], data[3], data[4], data[5],
646 data[6], data[7], data[8], data[9], data[10], data[11],
647 data[12], data[13]);
648 }
649
650 netif_rx(skb);
651 lp->stats.rx_packets++;
652 }
653 done:
654 write_reg(ioaddr, CMR1, CMR1_NextPkt);
655 return;
656 }
657
658 static void read_block(short ioaddr, int length, unsigned char *p, int data_mode)
659 {
660
661 if (data_mode <= 3) {
662 outb(Ctrl_LNibRead, ioaddr + PAR_CONTROL);
663 outb(length == 8 ? RdAddr | HNib | MAR : RdAddr | MAR,
664 ioaddr + PAR_DATA);
665 if (data_mode <= 1) {
666 do *p++ = read_byte_mode0(ioaddr); while (--length > 0);
667 } else
668 do *p++ = read_byte_mode2(ioaddr); while (--length > 0);
669 } else if (data_mode <= 5)
670 do *p++ = read_byte_mode4(ioaddr); while (--length > 0);
671 else
672 do *p++ = read_byte_mode6(ioaddr); while (--length > 0);
673
674 outb(EOC+HNib+MAR, ioaddr + PAR_DATA);
675 outb(Ctrl_SelData, ioaddr + PAR_CONTROL);
676 }
677
678
679 static int
680 net_close(struct device *dev)
681 {
682 struct net_local *lp = (struct net_local *)dev->priv;
683 int ioaddr = dev->base_addr;
684
685 dev->tbusy = 1;
686 dev->start = 0;
687
688
689 lp->addr_mode = CMR2h_OFF;
690 write_reg_high(ioaddr, CMR2, CMR2h_OFF);
691
692
693 outb(0x00, ioaddr + PAR_CONTROL);
694 free_irq(dev->irq);
695 irq2dev_map[dev->irq] = 0;
696
697
698 write_reg_high(ioaddr, CMR1, CMR1h_RESET);
699
700 return 0;
701 }
702
703
704
705 static struct enet_statistics *
706 net_get_stats(struct device *dev)
707 {
708 struct net_local *lp = (struct net_local *)dev->priv;
709 return &lp->stats;
710 }
711
712
713
714
715
716
717
718 static void
719 set_multicast_list(struct device *dev, int num_addrs, void *addrs)
720 {
721 struct net_local *lp = (struct net_local *)dev->priv;
722 short ioaddr = dev->base_addr;
723 lp->addr_mode = num_addrs ? CMR2h_PROMISC : CMR2h_Normal;
724 write_reg_high(ioaddr, CMR2, lp->addr_mode);
725 }
726
727
728
729
730
731
732
733
734