This source file includes following definitions.
- sd_open
- sd_release
- sd_geninit
- rw_intr
- do_sd_request
- requeue_sd_request
- check_scsidisk_media_change
- sd_init_done
- sd_init_onedisk
- sd_init
- sd_init1
- sd_attach
- revalidate_scsidisk
1
2
3
4
5
6
7
8
9
10
11
12
13 #include <linux/fs.h>
14 #include <linux/kernel.h>
15 #include <linux/sched.h>
16 #include <linux/string.h>
17 #include <linux/errno.h>
18 #include <asm/system.h>
19
20 #define MAJOR_NR SCSI_DISK_MAJOR
21 #include "../block/blk.h"
22 #include "scsi.h"
23 #include "hosts.h"
24 #include "sd.h"
25 #include "scsi_ioctl.h"
26 #include "constants.h"
27
28 #include <linux/genhd.h>
29
30
31
32
33
34 #define MAX_RETRIES 5
35
36
37
38
39
40 #define SD_TIMEOUT 300
41 #define SD_MOD_TIMEOUT 750
42
43 #define CLUSTERABLE_DEVICE(SC) (SC->host->sg_tablesize < 64 && \
44 scsi_devices[SC->index].type != TYPE_MOD)
45
46 struct hd_struct * sd;
47
48 int NR_SD=0;
49 int MAX_SD=0;
50 Scsi_Disk * rscsi_disks;
51 static int * sd_sizes;
52 static int * sd_blocksizes;
53
54 extern int sd_ioctl(struct inode *, struct file *, unsigned int, unsigned long);
55
56 static sd_init_onedisk(int);
57
58 static void requeue_sd_request (Scsi_Cmnd * SCpnt);
59
60 static int sd_open(struct inode * inode, struct file * filp)
61 {
62 int target;
63 target = DEVICE_NR(MINOR(inode->i_rdev));
64
65 if(target >= NR_SD || !rscsi_disks[target].device)
66 return -ENXIO;
67
68
69
70
71 while (rscsi_disks[target].device->busy);
72
73 if(rscsi_disks[target].device->removable) {
74 check_disk_change(inode->i_rdev);
75
76 if(!rscsi_disks[target].device->access_count)
77 sd_ioctl(inode, NULL, SCSI_IOCTL_DOORLOCK, 0);
78 };
79 rscsi_disks[target].device->access_count++;
80 return 0;
81 }
82
83 static void sd_release(struct inode * inode, struct file * file)
84 {
85 int target;
86 sync_dev(inode->i_rdev);
87
88 target = DEVICE_NR(MINOR(inode->i_rdev));
89
90 rscsi_disks[target].device->access_count--;
91
92 if(rscsi_disks[target].device->removable) {
93 if(!rscsi_disks[target].device->access_count)
94 sd_ioctl(inode, NULL, SCSI_IOCTL_DOORUNLOCK, 0);
95 };
96 }
97
98 static void sd_geninit(void);
99
100 static struct file_operations sd_fops = {
101 NULL,
102 block_read,
103 block_write,
104 NULL,
105 NULL,
106 sd_ioctl,
107 NULL,
108 sd_open,
109 sd_release,
110 block_fsync
111 };
112
113 static struct gendisk sd_gendisk = {
114 MAJOR_NR,
115 "sd",
116 4,
117 1 << 4,
118 0,
119 sd_geninit,
120 NULL,
121 NULL,
122 0,
123 NULL,
124 NULL
125 };
126
127 static void sd_geninit (void)
128 {
129 int i;
130
131 for (i = 0; i < NR_SD; ++i)
132 sd[i << 4].nr_sects = rscsi_disks[i].capacity;
133 sd_gendisk.nr_real = NR_SD;
134 }
135
136
137
138
139
140
141
142 static void rw_intr (Scsi_Cmnd *SCpnt)
143 {
144 int result = SCpnt->result;
145 int this_count = SCpnt->bufflen >> 9;
146
147 #ifdef DEBUG
148 printk("sd%d : rw_intr(%d, %d)\n", MINOR(SCpnt->request.dev), SCpnt->host->host_no, result);
149 #endif
150
151
152
153
154
155
156
157 if (!result) {
158
159 #ifdef DEBUG
160 printk("sd%d : %d sectors remain.\n", MINOR(SCpnt->request.dev), SCpnt->request.nr_sectors);
161 printk("use_sg is %d\n ",SCpnt->use_sg);
162 #endif
163 if (SCpnt->use_sg) {
164 struct scatterlist * sgpnt;
165 int i;
166 sgpnt = (struct scatterlist *) SCpnt->buffer;
167 for(i=0; i<SCpnt->use_sg; i++) {
168 #ifdef DEBUG
169 printk(":%x %x %d\n",sgpnt[i].alt_address, sgpnt[i].address, sgpnt[i].length);
170 #endif
171 if (sgpnt[i].alt_address) {
172 if (SCpnt->request.cmd == READ)
173 memcpy(sgpnt[i].alt_address, sgpnt[i].address, sgpnt[i].length);
174 scsi_free(sgpnt[i].address, sgpnt[i].length);
175 };
176 };
177 scsi_free(SCpnt->buffer, SCpnt->sglist_len);
178 } else {
179 if (SCpnt->buffer != SCpnt->request.buffer) {
180 #ifdef DEBUG
181 printk("nosg: %x %x %d\n",SCpnt->request.buffer, SCpnt->buffer,
182 SCpnt->bufflen);
183 #endif
184 if (SCpnt->request.cmd == READ)
185 memcpy(SCpnt->request.buffer, SCpnt->buffer,
186 SCpnt->bufflen);
187 scsi_free(SCpnt->buffer, SCpnt->bufflen);
188 };
189 };
190
191
192
193
194
195 if (SCpnt->request.nr_sectors > this_count)
196 {
197 SCpnt->request.errors = 0;
198
199 if (!SCpnt->request.bh)
200 {
201 #ifdef DEBUG
202 printk("sd%d : handling page request, no buffer\n",
203 MINOR(SCpnt->request.dev));
204 #endif
205
206
207
208
209 panic("sd.c: linked page request (%lx %x)",
210 SCpnt->request.sector, this_count);
211 }
212 }
213 end_scsi_request(SCpnt, 1, this_count);
214 requeue_sd_request(SCpnt);
215 return;
216 }
217
218
219 if (SCpnt->use_sg) {
220 struct scatterlist * sgpnt;
221 int i;
222 sgpnt = (struct scatterlist *) SCpnt->buffer;
223 for(i=0; i<SCpnt->use_sg; i++) {
224 #ifdef DEBUG
225 printk("err: %x %x %d\n",SCpnt->request.buffer, SCpnt->buffer,
226 SCpnt->bufflen);
227 #endif
228 if (sgpnt[i].alt_address) {
229 scsi_free(sgpnt[i].address, sgpnt[i].length);
230 };
231 };
232 scsi_free(SCpnt->buffer, SCpnt->sglist_len);
233 } else {
234 #ifdef DEBUG
235 printk("nosgerr: %x %x %d\n",SCpnt->request.buffer, SCpnt->buffer,
236 SCpnt->bufflen);
237 #endif
238 if (SCpnt->buffer != SCpnt->request.buffer)
239 scsi_free(SCpnt->buffer, SCpnt->bufflen);
240 };
241
242
243
244
245
246
247
248 if (driver_byte(result) != 0) {
249 if (sugestion(result) == SUGGEST_REMAP) {
250 #ifdef REMAP
251
252
253
254
255 if rscsi_disks[DEVICE_NR(SCpnt->request.dev)].remap
256 {
257 result = 0;
258 }
259 else
260
261 #endif
262 }
263
264 if ((SCpnt->sense_buffer[0] & 0x7f) == 0x70) {
265 if ((SCpnt->sense_buffer[2] & 0xf) == UNIT_ATTENTION) {
266 if(rscsi_disks[DEVICE_NR(SCpnt->request.dev)].device->removable) {
267
268
269
270 rscsi_disks[DEVICE_NR(SCpnt->request.dev)].device->changed = 1;
271 end_scsi_request(SCpnt, 0, this_count);
272 requeue_sd_request(SCpnt);
273 return;
274 }
275 }
276 }
277
278
279
280
281
282
283
284
285
286 if (SCpnt->sense_buffer[2] == ILLEGAL_REQUEST) {
287 if (rscsi_disks[DEVICE_NR(SCpnt->request.dev)].ten) {
288 rscsi_disks[DEVICE_NR(SCpnt->request.dev)].ten = 0;
289 requeue_sd_request(SCpnt);
290 result = 0;
291 } else {
292 }
293 }
294 }
295 if (result) {
296 printk("SCSI disk error : host %d id %d lun %d return code = %x\n",
297 rscsi_disks[DEVICE_NR(SCpnt->request.dev)].device->host->host_no,
298 rscsi_disks[DEVICE_NR(SCpnt->request.dev)].device->id,
299 rscsi_disks[DEVICE_NR(SCpnt->request.dev)].device->lun, result);
300
301 if (driver_byte(result) & DRIVER_SENSE)
302 print_sense("sd", SCpnt);
303 end_scsi_request(SCpnt, 0, SCpnt->request.current_nr_sectors);
304 requeue_sd_request(SCpnt);
305 return;
306 }
307 }
308
309
310
311
312
313
314
315 static void do_sd_request (void)
316 {
317 Scsi_Cmnd * SCpnt = NULL;
318 struct request * req = NULL;
319 int flag = 0;
320 while (1==1){
321 cli();
322 if (CURRENT != NULL && CURRENT->dev == -1) {
323 sti();
324 return;
325 };
326
327 INIT_SCSI_REQUEST;
328
329
330
331
332
333
334
335
336
337
338
339
340
341 if (flag++ == 0)
342 SCpnt = allocate_device(&CURRENT,
343 rscsi_disks[DEVICE_NR(MINOR(CURRENT->dev))].device->index, 0);
344 else SCpnt = NULL;
345 sti();
346
347
348
349
350
351
352
353
354 if (!SCpnt && NR_SD > 1){
355 struct request *req1;
356 req1 = NULL;
357 cli();
358 req = CURRENT;
359 while(req){
360 SCpnt = request_queueable(req,
361 rscsi_disks[DEVICE_NR(MINOR(req->dev))].device->index);
362 if(SCpnt) break;
363 req1 = req;
364 req = req->next;
365 };
366 if (SCpnt && req->dev == -1) {
367 if (req == CURRENT)
368 CURRENT = CURRENT->next;
369 else
370 req1->next = req->next;
371 };
372 sti();
373 };
374
375 if (!SCpnt) return;
376
377
378 requeue_sd_request(SCpnt);
379 };
380 }
381
382 static void requeue_sd_request (Scsi_Cmnd * SCpnt)
383 {
384 int dev, block, this_count;
385 unsigned char cmd[10];
386 int bounce_size, contiguous;
387 int max_sg;
388 struct buffer_head * bh, *bhp;
389 char * buff, *bounce_buffer;
390
391 repeat:
392
393 if(SCpnt->request.dev <= 0) {
394 do_sd_request();
395 return;
396 }
397
398 dev = MINOR(SCpnt->request.dev);
399 block = SCpnt->request.sector;
400 this_count = 0;
401
402 #ifdef DEBUG
403 printk("Doing sd request, dev = %d, block = %d\n", dev, block);
404 #endif
405
406 if (dev >= (NR_SD << 4) || block + SCpnt->request.nr_sectors > sd[dev].nr_sects)
407 {
408 end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
409 goto repeat;
410 }
411
412 block += sd[dev].start_sect;
413 dev = DEVICE_NR(dev);
414
415 if (rscsi_disks[dev].device->changed)
416 {
417
418
419
420
421 end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
422 goto repeat;
423 }
424
425 #ifdef DEBUG
426 printk("sd%d : real dev = /dev/sd%d, block = %d\n", MINOR(SCpnt->request.dev), dev, block);
427 #endif
428
429 switch (SCpnt->request.cmd)
430 {
431 case WRITE :
432 if (!rscsi_disks[dev].device->writeable)
433 {
434 end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
435 goto repeat;
436 }
437 cmd[0] = WRITE_6;
438 break;
439 case READ :
440 cmd[0] = READ_6;
441 break;
442 default :
443 panic ("Unknown sd command %d\n", SCpnt->request.cmd);
444 }
445
446 SCpnt->this_count = 0;
447
448
449
450 contiguous = (!CLUSTERABLE_DEVICE(SCpnt) ? 0 :1);
451 bounce_buffer = NULL;
452 bounce_size = (SCpnt->request.nr_sectors << 9);
453
454
455
456
457
458
459 if (contiguous && SCpnt->request.bh &&
460 ((int) SCpnt->request.bh->b_data) + (SCpnt->request.nr_sectors << 9) - 1 >
461 ISA_DMA_THRESHOLD && SCpnt->host->unchecked_isa_dma) {
462 if(((int) SCpnt->request.bh->b_data) > ISA_DMA_THRESHOLD)
463 bounce_buffer = scsi_malloc(bounce_size);
464 if(!bounce_buffer) contiguous = 0;
465 };
466
467 if(contiguous && SCpnt->request.bh && SCpnt->request.bh->b_reqnext)
468 for(bh = SCpnt->request.bh, bhp = bh->b_reqnext; bhp; bh = bhp,
469 bhp = bhp->b_reqnext) {
470 if(!CONTIGUOUS_BUFFERS(bh,bhp)) {
471 if(bounce_buffer) scsi_free(bounce_buffer, bounce_size);
472 contiguous = 0;
473 break;
474 }
475 };
476 if (!SCpnt->request.bh || contiguous) {
477
478
479 this_count = SCpnt->request.nr_sectors;
480 buff = SCpnt->request.buffer;
481 SCpnt->use_sg = 0;
482
483 } else if (SCpnt->host->sg_tablesize == 0 ||
484 (need_isa_buffer &&
485 dma_free_sectors <= 10)) {
486
487
488
489
490
491
492
493
494 if (SCpnt->host->sg_tablesize != 0 &&
495 need_isa_buffer &&
496 dma_free_sectors <= 10)
497 printk("Warning: SCSI DMA buffer space running low. Using non scatter-gather I/O.\n");
498
499 this_count = SCpnt->request.current_nr_sectors;
500 buff = SCpnt->request.buffer;
501 SCpnt->use_sg = 0;
502
503 } else {
504
505
506 struct scatterlist * sgpnt;
507 int count, this_count_max;
508 int counted;
509
510 bh = SCpnt->request.bh;
511 this_count = 0;
512 this_count_max = (rscsi_disks[dev].ten ? 0xffff : 0xff);
513 count = 0;
514 bhp = NULL;
515 while(bh) {
516 if ((this_count + (bh->b_size >> 9)) > this_count_max) break;
517 if(!bhp || !CONTIGUOUS_BUFFERS(bhp,bh) ||
518 !CLUSTERABLE_DEVICE(SCpnt) ||
519 (SCpnt->host->unchecked_isa_dma &&
520 ((unsigned int) bh->b_data-1) == ISA_DMA_THRESHOLD)) {
521 if (count < SCpnt->host->sg_tablesize) count++;
522 else break;
523 };
524 this_count += (bh->b_size >> 9);
525 bhp = bh;
526 bh = bh->b_reqnext;
527 };
528 #if 0
529 if(SCpnt->host->unchecked_isa_dma &&
530 ((unsigned int) SCpnt->request.bh->b_data-1) == ISA_DMA_THRESHOLD) count--;
531 #endif
532 SCpnt->use_sg = count;
533 count = 512;
534 while( count < (SCpnt->use_sg * sizeof(struct scatterlist)))
535 count = count << 1;
536 SCpnt->sglist_len = count;
537 max_sg = count / sizeof(struct scatterlist);
538 if(SCpnt->host->sg_tablesize < max_sg) max_sg = SCpnt->host->sg_tablesize;
539 sgpnt = (struct scatterlist * ) scsi_malloc(count);
540 memset(sgpnt, 0, count);
541 if (!sgpnt) {
542 printk("Warning - running *really* short on DMA buffers\n");
543 SCpnt->use_sg = 0;
544 this_count = SCpnt->request.current_nr_sectors;
545 buff = SCpnt->request.buffer;
546 } else {
547 buff = (char *) sgpnt;
548 counted = 0;
549 for(count = 0, bh = SCpnt->request.bh, bhp = bh->b_reqnext;
550 count < SCpnt->use_sg && bh;
551 count++, bh = bhp) {
552
553 bhp = bh->b_reqnext;
554
555 if(!sgpnt[count].address) sgpnt[count].address = bh->b_data;
556 sgpnt[count].length += bh->b_size;
557 counted += bh->b_size >> 9;
558
559 if (((int) sgpnt[count].address) + sgpnt[count].length - 1 >
560 ISA_DMA_THRESHOLD && (SCpnt->host->unchecked_isa_dma) &&
561 !sgpnt[count].alt_address) {
562 sgpnt[count].alt_address = sgpnt[count].address;
563
564
565
566 if(dma_free_sectors < (sgpnt[count].length >> 9) + 10) {
567 sgpnt[count].address = NULL;
568 } else {
569 sgpnt[count].address = (char *) scsi_malloc(sgpnt[count].length);
570 };
571
572
573
574
575 if(sgpnt[count].address == NULL){
576 #if 0
577 printk("Warning: Running low on SCSI DMA buffers");
578
579 while(--count >= 0){
580 if(sgpnt[count].alt_address)
581 scsi_free(sgpnt[count].address, sgpnt[count].length);
582 };
583 this_count = SCpnt->request.current_nr_sectors;
584 buff = SCpnt->request.buffer;
585 SCpnt->use_sg = 0;
586 scsi_free(sgpnt, SCpnt->sglist_len);
587 #endif
588 SCpnt->use_sg = count;
589 this_count = counted -= bh->b_size >> 9;
590 break;
591 };
592
593 };
594
595
596
597
598
599 if(bhp && CONTIGUOUS_BUFFERS(bh,bhp) && CLUSTERABLE_DEVICE(SCpnt)) {
600 char * tmp;
601
602 if (((int) sgpnt[count].address) + sgpnt[count].length +
603 bhp->b_size - 1 > ISA_DMA_THRESHOLD &&
604 (SCpnt->host->unchecked_isa_dma) &&
605 !sgpnt[count].alt_address) continue;
606
607 if(!sgpnt[count].alt_address) {count--; continue; }
608 if(dma_free_sectors > 10)
609 tmp = scsi_malloc(sgpnt[count].length + bhp->b_size);
610 else {
611 tmp = NULL;
612 max_sg = SCpnt->use_sg;
613 };
614 if(tmp){
615 scsi_free(sgpnt[count].address, sgpnt[count].length);
616 sgpnt[count].address = tmp;
617 count--;
618 continue;
619 };
620
621
622
623
624 if (SCpnt->use_sg < max_sg) SCpnt->use_sg++;
625 };
626 };
627
628 this_count = counted;
629
630 if(count < SCpnt->use_sg || SCpnt->use_sg > SCpnt->host->sg_tablesize){
631 bh = SCpnt->request.bh;
632 printk("Use sg, count %d %x %d\n", SCpnt->use_sg, count, dma_free_sectors);
633 printk("maxsg = %x, counted = %d this_count = %d\n", max_sg, counted, this_count);
634 while(bh){
635 printk("[%p %lx] ", bh->b_data, bh->b_size);
636 bh = bh->b_reqnext;
637 };
638 if(SCpnt->use_sg < 16)
639 for(count=0; count<SCpnt->use_sg; count++)
640 printk("{%d:%p %p %d} ", count,
641 sgpnt[count].address,
642 sgpnt[count].alt_address,
643 sgpnt[count].length);
644 panic("Ooops");
645 };
646
647 if (SCpnt->request.cmd == WRITE)
648 for(count=0; count<SCpnt->use_sg; count++)
649 if(sgpnt[count].alt_address)
650 memcpy(sgpnt[count].address, sgpnt[count].alt_address,
651 sgpnt[count].length);
652 };
653 };
654
655
656
657 if(SCpnt->use_sg == 0){
658 if (((int) buff) + (this_count << 9) - 1 > ISA_DMA_THRESHOLD &&
659 (SCpnt->host->unchecked_isa_dma)) {
660 if(bounce_buffer)
661 buff = bounce_buffer;
662 else
663 buff = (char *) scsi_malloc(this_count << 9);
664 if(buff == NULL) {
665 this_count = SCpnt->request.current_nr_sectors;
666 buff = (char *) scsi_malloc(this_count << 9);
667 if(!buff) panic("Ran out of DMA buffers.");
668 };
669 if (SCpnt->request.cmd == WRITE)
670 memcpy(buff, (char *)SCpnt->request.buffer, this_count << 9);
671 };
672 };
673 #ifdef DEBUG
674 printk("sd%d : %s %d/%d 512 byte blocks.\n", MINOR(SCpnt->request.dev),
675 (SCpnt->request.cmd == WRITE) ? "writing" : "reading",
676 this_count, SCpnt->request.nr_sectors);
677 #endif
678
679 cmd[1] = (SCpnt->lun << 5) & 0xe0;
680
681 if (rscsi_disks[dev].sector_size == 1024){
682 if(block & 1) panic("sd.c:Bad block number requested");
683 if(this_count & 1) panic("sd.c:Bad block number requested");
684 block = block >> 1;
685 this_count = this_count >> 1;
686 };
687
688 if (rscsi_disks[dev].sector_size == 256){
689 block = block << 1;
690 this_count = this_count << 1;
691 };
692
693 if (((this_count > 0xff) || (block > 0x1fffff)) && rscsi_disks[dev].ten)
694 {
695 if (this_count > 0xffff)
696 this_count = 0xffff;
697
698 cmd[0] += READ_10 - READ_6 ;
699 cmd[2] = (unsigned char) (block >> 24) & 0xff;
700 cmd[3] = (unsigned char) (block >> 16) & 0xff;
701 cmd[4] = (unsigned char) (block >> 8) & 0xff;
702 cmd[5] = (unsigned char) block & 0xff;
703 cmd[6] = cmd[9] = 0;
704 cmd[7] = (unsigned char) (this_count >> 8) & 0xff;
705 cmd[8] = (unsigned char) this_count & 0xff;
706 }
707 else
708 {
709 if (this_count > 0xff)
710 this_count = 0xff;
711
712 cmd[1] |= (unsigned char) ((block >> 16) & 0x1f);
713 cmd[2] = (unsigned char) ((block >> 8) & 0xff);
714 cmd[3] = (unsigned char) block & 0xff;
715 cmd[4] = (unsigned char) this_count;
716 cmd[5] = 0;
717 }
718
719
720
721
722
723
724
725 SCpnt->transfersize = rscsi_disks[dev].sector_size;
726 SCpnt->underflow = this_count << 9;
727 scsi_do_cmd (SCpnt, (void *) cmd, buff,
728 this_count * rscsi_disks[dev].sector_size,
729 rw_intr,
730 (scsi_devices[SCpnt->index].type == TYPE_DISK ?
731 SD_TIMEOUT : SD_MOD_TIMEOUT),
732 MAX_RETRIES);
733 }
734
735 int check_scsidisk_media_change(int full_dev, int flag){
736 int retval;
737 int target;
738 struct inode inode;
739
740 target = DEVICE_NR(MINOR(full_dev));
741
742 if (target >= NR_SD) {
743 printk("SCSI disk request error: invalid device.\n");
744 return 0;
745 };
746
747 if(!rscsi_disks[target].device->removable) return 0;
748
749 inode.i_rdev = full_dev;
750 retval = sd_ioctl(&inode, NULL, SCSI_IOCTL_TEST_UNIT_READY, 0);
751
752 if(retval){
753
754
755
756
757 rscsi_disks[target].device->changed = 1;
758 return 1;
759
760 };
761
762 retval = rscsi_disks[target].device->changed;
763 if(!flag) rscsi_disks[target].device->changed = 0;
764 return retval;
765 }
766
767 static void sd_init_done (Scsi_Cmnd * SCpnt)
768 {
769 struct request * req;
770 struct task_struct * p;
771
772 req = &SCpnt->request;
773 req->dev = 0xfffe;
774
775 if ((p = req->waiting) != NULL) {
776 req->waiting = NULL;
777 p->state = TASK_RUNNING;
778 if (p->counter > current->counter)
779 need_resched = 1;
780 }
781 }
782
783 static int sd_init_onedisk(int i)
784 {
785 int j = 0;
786 unsigned char cmd[10];
787 unsigned char *buffer;
788 char spintime;
789 int the_result, retries;
790 Scsi_Cmnd * SCpnt;
791
792
793
794
795
796 SCpnt = allocate_device(NULL, rscsi_disks[i].device->index, 1);
797 buffer = (unsigned char *) scsi_malloc(512);
798
799 spintime = 0;
800
801
802 if (current == task[0]){
803 do{
804 cmd[0] = TEST_UNIT_READY;
805 cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0;
806 memset ((void *) &cmd[2], 0, 8);
807 SCpnt->request.dev = 0xffff;
808 SCpnt->sense_buffer[0] = 0;
809 SCpnt->sense_buffer[2] = 0;
810
811 scsi_do_cmd (SCpnt,
812 (void *) cmd, (void *) buffer,
813 512, sd_init_done, SD_TIMEOUT,
814 MAX_RETRIES);
815
816 while(SCpnt->request.dev != 0xfffe);
817
818 the_result = SCpnt->result;
819
820
821
822 if(the_result && !rscsi_disks[i].device->removable &&
823 SCpnt->sense_buffer[2] == NOT_READY) {
824 int time1;
825 if(!spintime){
826 printk( "sd%d: Spinning up disk...", i );
827 cmd[0] = START_STOP;
828 cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0;
829 cmd[1] |= 1;
830 memset ((void *) &cmd[2], 0, 8);
831 cmd[4] = 1;
832 SCpnt->request.dev = 0xffff;
833 SCpnt->sense_buffer[0] = 0;
834 SCpnt->sense_buffer[2] = 0;
835
836 scsi_do_cmd (SCpnt,
837 (void *) cmd, (void *) buffer,
838 512, sd_init_done, SD_TIMEOUT,
839 MAX_RETRIES);
840
841 while(SCpnt->request.dev != 0xfffe);
842
843 spintime = jiffies;
844 };
845
846 time1 = jiffies;
847 while(jiffies < time1 + 100);
848 printk( "." );
849 };
850 } while(the_result && spintime && spintime+5000 > jiffies);
851 if (spintime) {
852 if (the_result)
853 printk( "not responding...\n" );
854 else
855 printk( "ready\n" );
856 }
857 };
858
859
860 retries = 3;
861 do {
862 cmd[0] = READ_CAPACITY;
863 cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0;
864 memset ((void *) &cmd[2], 0, 8);
865 memset ((void *) buffer, 0, 8);
866 SCpnt->request.dev = 0xffff;
867 SCpnt->sense_buffer[0] = 0;
868 SCpnt->sense_buffer[2] = 0;
869
870 scsi_do_cmd (SCpnt,
871 (void *) cmd, (void *) buffer,
872 8, sd_init_done, SD_TIMEOUT,
873 MAX_RETRIES);
874
875 if (current == task[0])
876 while(SCpnt->request.dev != 0xfffe);
877 else
878 if (SCpnt->request.dev != 0xfffe){
879 SCpnt->request.waiting = current;
880 current->state = TASK_UNINTERRUPTIBLE;
881 while (SCpnt->request.dev != 0xfffe) schedule();
882 };
883
884 the_result = SCpnt->result;
885 retries--;
886
887 } while(the_result && retries);
888
889 SCpnt->request.dev = -1;
890
891 wake_up(&scsi_devices[SCpnt->index].device_wait);
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908 if (the_result)
909 {
910 printk ("sd%d : READ CAPACITY failed.\n"
911 "sd%d : status = %x, message = %02x, host = %d, driver = %02x \n",
912 i,i,
913 status_byte(the_result),
914 msg_byte(the_result),
915 host_byte(the_result),
916 driver_byte(the_result)
917 );
918 if (driver_byte(the_result) & DRIVER_SENSE)
919 printk("sd%d : extended sense code = %1x \n", i, SCpnt->sense_buffer[2] & 0xf);
920 else
921 printk("sd%d : sense not available. \n", i);
922
923 printk("sd%d : block size assumed to be 512 bytes, disk size 1GB. \n", i);
924 rscsi_disks[i].capacity = 0x1fffff;
925 rscsi_disks[i].sector_size = 512;
926
927
928
929 if(rscsi_disks[i].device->removable &&
930 SCpnt->sense_buffer[2] == NOT_READY)
931 rscsi_disks[i].device->changed = 1;
932
933 }
934 else
935 {
936 rscsi_disks[i].capacity = (buffer[0] << 24) |
937 (buffer[1] << 16) |
938 (buffer[2] << 8) |
939 buffer[3];
940
941 rscsi_disks[i].sector_size = (buffer[4] << 24) |
942 (buffer[5] << 16) | (buffer[6] << 8) | buffer[7];
943
944 if (rscsi_disks[i].sector_size != 512 &&
945 rscsi_disks[i].sector_size != 1024 &&
946 rscsi_disks[i].sector_size != 256)
947 {
948 printk ("sd%d : unsupported sector size %d.\n",
949 i, rscsi_disks[i].sector_size);
950 if(rscsi_disks[i].device->removable){
951 rscsi_disks[i].capacity = 0;
952 } else {
953 printk ("scsi : deleting disk entry.\n");
954 for (j=i; j < NR_SD - 1;)
955 rscsi_disks[j] = rscsi_disks[++j];
956 --i;
957 --NR_SD;
958 scsi_free(buffer, 512);
959 return i;
960 };
961 }
962 if(rscsi_disks[i].sector_size == 1024)
963 rscsi_disks[i].capacity <<= 1;
964 if(rscsi_disks[i].sector_size == 256)
965 rscsi_disks[i].capacity >>= 1;
966 }
967
968 rscsi_disks[i].ten = 1;
969 rscsi_disks[i].remap = 1;
970 scsi_free(buffer, 512);
971 return i;
972 }
973
974
975
976
977
978
979
980 unsigned long sd_init(unsigned long memory_start, unsigned long memory_end)
981 {
982 int i;
983
984 if (register_blkdev(MAJOR_NR,"sd",&sd_fops)) {
985 printk("Unable to get major %d for SCSI disk\n",MAJOR_NR);
986 return memory_start;
987 }
988 if (MAX_SD == 0) return memory_start;
989
990 sd_sizes = (int *) memory_start;
991 memory_start += (MAX_SD << 4) * sizeof(int);
992 memset(sd_sizes, 0, (MAX_SD << 4) * sizeof(int));
993
994 sd_blocksizes = (int *) memory_start;
995 memory_start += (MAX_SD << 4) * sizeof(int);
996 for(i=0;i<(MAX_SD << 4);i++) sd_blocksizes[i] = 1024;
997 blksize_size[MAJOR_NR] = sd_blocksizes;
998
999 sd = (struct hd_struct *) memory_start;
1000 memory_start += (MAX_SD << 4) * sizeof(struct hd_struct);
1001
1002 sd_gendisk.max_nr = MAX_SD;
1003 sd_gendisk.part = sd;
1004 sd_gendisk.sizes = sd_sizes;
1005 sd_gendisk.real_devices = (void *) rscsi_disks;
1006
1007 for (i = 0; i < NR_SD; ++i)
1008 i = sd_init_onedisk(i);
1009
1010 blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST;
1011
1012
1013
1014
1015 if(rscsi_disks[0].device->host->sg_tablesize)
1016 read_ahead[MAJOR_NR] = 120;
1017
1018 else
1019 read_ahead[MAJOR_NR] = 4;
1020
1021 sd_gendisk.next = gendisk_head;
1022 gendisk_head = &sd_gendisk;
1023 return memory_start;
1024 }
1025
1026 unsigned long sd_init1(unsigned long mem_start, unsigned long mem_end){
1027 rscsi_disks = (Scsi_Disk *) mem_start;
1028 mem_start += MAX_SD * sizeof(Scsi_Disk);
1029 return mem_start;
1030 };
1031
1032 void sd_attach(Scsi_Device * SDp){
1033 SDp->scsi_request_fn = do_sd_request;
1034 rscsi_disks[NR_SD++].device = SDp;
1035 if(NR_SD > MAX_SD) panic ("scsi_devices corrupt (sd)");
1036 };
1037
1038 #define DEVICE_BUSY rscsi_disks[target].device->busy
1039 #define USAGE rscsi_disks[target].device->access_count
1040 #define CAPACITY rscsi_disks[target].capacity
1041 #define MAYBE_REINIT sd_init_onedisk(target)
1042 #define GENDISK_STRUCT sd_gendisk
1043
1044
1045
1046
1047
1048
1049
1050
1051 int revalidate_scsidisk(int dev, int maxusage){
1052 int target, major;
1053 struct gendisk * gdev;
1054 int max_p;
1055 int start;
1056 int i;
1057
1058 target = DEVICE_NR(MINOR(dev));
1059 gdev = &GENDISK_STRUCT;
1060
1061 cli();
1062 if (DEVICE_BUSY || USAGE > maxusage) {
1063 sti();
1064 printk("Device busy for revalidation (usage=%d)\n", USAGE);
1065 return -EBUSY;
1066 };
1067 DEVICE_BUSY = 1;
1068 sti();
1069
1070 max_p = gdev->max_p;
1071 start = target << gdev->minor_shift;
1072 major = MAJOR_NR << 8;
1073
1074 for (i=max_p - 1; i >=0 ; i--) {
1075 sync_dev(major | start | i);
1076 invalidate_inodes(major | start | i);
1077 invalidate_buffers(major | start | i);
1078 gdev->part[start+i].start_sect = 0;
1079 gdev->part[start+i].nr_sects = 0;
1080 };
1081
1082 #ifdef MAYBE_REINIT
1083 MAYBE_REINIT;
1084 #endif
1085
1086 gdev->part[start].nr_sects = CAPACITY;
1087 resetup_one_dev(gdev, target);
1088
1089 DEVICE_BUSY = 0;
1090 return 0;
1091 }
1092