This source file includes following definitions.
- sd_open
- sd_release
- sd_geninit
- rw_intr
- do_sd_request
- requeue_sd_request
- check_scsidisk_media_change
- sd_init_done
- sd_init_onedisk
- sd_init
- sd_init1
- sd_attach
- revalidate_scsidisk
1
2
3
4
5
6
7
8
9
10
11
12
13
14 #include <linux/fs.h>
15 #include <linux/kernel.h>
16 #include <linux/sched.h>
17 #include <linux/string.h>
18 #include <linux/errno.h>
19 #include <asm/system.h>
20
21 #define MAJOR_NR SCSI_DISK_MAJOR
22 #include "../block/blk.h"
23 #include "scsi.h"
24 #include "hosts.h"
25 #include "sd.h"
26 #include "scsi_ioctl.h"
27 #include "constants.h"
28
29 #include <linux/genhd.h>
30
31
32
33
34
35 #define MAX_RETRIES 5
36
37
38
39
40
41 #define SD_TIMEOUT 300
42 #define SD_MOD_TIMEOUT 750
43
44 #define CLUSTERABLE_DEVICE(SC) (SC->host->hostt->use_clustering && \
45 scsi_devices[SC->index].type != TYPE_MOD)
46
47 struct hd_struct * sd;
48
49 int NR_SD=0;
50 int MAX_SD=0;
51 Scsi_Disk * rscsi_disks;
52 static int * sd_sizes;
53 static int * sd_blocksizes;
54
55 extern int sd_ioctl(struct inode *, struct file *, unsigned int, unsigned long);
56
57 static sd_init_onedisk(int);
58
59 static void requeue_sd_request (Scsi_Cmnd * SCpnt);
60
61 static int sd_open(struct inode * inode, struct file * filp)
62 {
63 int target;
64 target = DEVICE_NR(MINOR(inode->i_rdev));
65
66 if(target >= NR_SD || !rscsi_disks[target].device)
67 return -ENXIO;
68
69
70
71
72 while (rscsi_disks[target].device->busy);
73
74 if(rscsi_disks[target].device->removable) {
75 check_disk_change(inode->i_rdev);
76
77 if(!rscsi_disks[target].device->access_count)
78 sd_ioctl(inode, NULL, SCSI_IOCTL_DOORLOCK, 0);
79 };
80 rscsi_disks[target].device->access_count++;
81 return 0;
82 }
83
84 static void sd_release(struct inode * inode, struct file * file)
85 {
86 int target;
87 sync_dev(inode->i_rdev);
88
89 target = DEVICE_NR(MINOR(inode->i_rdev));
90
91 rscsi_disks[target].device->access_count--;
92
93 if(rscsi_disks[target].device->removable) {
94 if(!rscsi_disks[target].device->access_count)
95 sd_ioctl(inode, NULL, SCSI_IOCTL_DOORUNLOCK, 0);
96 };
97 }
98
99 static void sd_geninit(void);
100
101 static struct file_operations sd_fops = {
102 NULL,
103 block_read,
104 block_write,
105 NULL,
106 NULL,
107 sd_ioctl,
108 NULL,
109 sd_open,
110 sd_release,
111 block_fsync
112 };
113
114 static struct gendisk sd_gendisk = {
115 MAJOR_NR,
116 "sd",
117 4,
118 1 << 4,
119 0,
120 sd_geninit,
121 NULL,
122 NULL,
123 0,
124 NULL,
125 NULL
126 };
127
128 static void sd_geninit (void)
129 {
130 int i;
131
132 for (i = 0; i < NR_SD; ++i)
133 sd[i << 4].nr_sects = rscsi_disks[i].capacity;
134 sd_gendisk.nr_real = NR_SD;
135 }
136
137
138
139
140
141
142
143 static void rw_intr (Scsi_Cmnd *SCpnt)
144 {
145 int result = SCpnt->result;
146 int this_count = SCpnt->bufflen >> 9;
147
148 #ifdef DEBUG
149 printk("sd%d : rw_intr(%d, %d)\n", MINOR(SCpnt->request.dev), SCpnt->host->host_no, result);
150 #endif
151
152
153
154
155
156
157
158 if (!result) {
159
160 #ifdef DEBUG
161 printk("sd%d : %d sectors remain.\n", MINOR(SCpnt->request.dev), SCpnt->request.nr_sectors);
162 printk("use_sg is %d\n ",SCpnt->use_sg);
163 #endif
164 if (SCpnt->use_sg) {
165 struct scatterlist * sgpnt;
166 int i;
167 sgpnt = (struct scatterlist *) SCpnt->buffer;
168 for(i=0; i<SCpnt->use_sg; i++) {
169 #ifdef DEBUG
170 printk(":%x %x %d\n",sgpnt[i].alt_address, sgpnt[i].address, sgpnt[i].length);
171 #endif
172 if (sgpnt[i].alt_address) {
173 if (SCpnt->request.cmd == READ)
174 memcpy(sgpnt[i].alt_address, sgpnt[i].address, sgpnt[i].length);
175 scsi_free(sgpnt[i].address, sgpnt[i].length);
176 };
177 };
178 scsi_free(SCpnt->buffer, SCpnt->sglist_len);
179 } else {
180 if (SCpnt->buffer != SCpnt->request.buffer) {
181 #ifdef DEBUG
182 printk("nosg: %x %x %d\n",SCpnt->request.buffer, SCpnt->buffer,
183 SCpnt->bufflen);
184 #endif
185 if (SCpnt->request.cmd == READ)
186 memcpy(SCpnt->request.buffer, SCpnt->buffer,
187 SCpnt->bufflen);
188 scsi_free(SCpnt->buffer, SCpnt->bufflen);
189 };
190 };
191
192
193
194
195
196 if (SCpnt->request.nr_sectors > this_count)
197 {
198 SCpnt->request.errors = 0;
199
200 if (!SCpnt->request.bh)
201 {
202 #ifdef DEBUG
203 printk("sd%d : handling page request, no buffer\n",
204 MINOR(SCpnt->request.dev));
205 #endif
206
207
208
209
210 panic("sd.c: linked page request (%lx %x)",
211 SCpnt->request.sector, this_count);
212 }
213 }
214 end_scsi_request(SCpnt, 1, this_count);
215 requeue_sd_request(SCpnt);
216 return;
217 }
218
219
220 if (SCpnt->use_sg) {
221 struct scatterlist * sgpnt;
222 int i;
223 sgpnt = (struct scatterlist *) SCpnt->buffer;
224 for(i=0; i<SCpnt->use_sg; i++) {
225 #ifdef DEBUG
226 printk("err: %x %x %d\n",SCpnt->request.buffer, SCpnt->buffer,
227 SCpnt->bufflen);
228 #endif
229 if (sgpnt[i].alt_address) {
230 scsi_free(sgpnt[i].address, sgpnt[i].length);
231 };
232 };
233 scsi_free(SCpnt->buffer, SCpnt->sglist_len);
234 } else {
235 #ifdef DEBUG
236 printk("nosgerr: %x %x %d\n",SCpnt->request.buffer, SCpnt->buffer,
237 SCpnt->bufflen);
238 #endif
239 if (SCpnt->buffer != SCpnt->request.buffer)
240 scsi_free(SCpnt->buffer, SCpnt->bufflen);
241 };
242
243
244
245
246
247
248
249 if (driver_byte(result) != 0) {
250 if (sugestion(result) == SUGGEST_REMAP) {
251 #ifdef REMAP
252
253
254
255
256 if rscsi_disks[DEVICE_NR(SCpnt->request.dev)].remap
257 {
258 result = 0;
259 }
260 else
261
262 #endif
263 }
264
265 if ((SCpnt->sense_buffer[0] & 0x7f) == 0x70) {
266 if ((SCpnt->sense_buffer[2] & 0xf) == UNIT_ATTENTION) {
267 if(rscsi_disks[DEVICE_NR(SCpnt->request.dev)].device->removable) {
268
269
270
271 rscsi_disks[DEVICE_NR(SCpnt->request.dev)].device->changed = 1;
272 end_scsi_request(SCpnt, 0, this_count);
273 requeue_sd_request(SCpnt);
274 return;
275 }
276 }
277 }
278
279
280
281
282
283
284
285
286
287 if (SCpnt->sense_buffer[2] == ILLEGAL_REQUEST) {
288 if (rscsi_disks[DEVICE_NR(SCpnt->request.dev)].ten) {
289 rscsi_disks[DEVICE_NR(SCpnt->request.dev)].ten = 0;
290 requeue_sd_request(SCpnt);
291 result = 0;
292 } else {
293 }
294 }
295 }
296 if (result) {
297 printk("SCSI disk error : host %d id %d lun %d return code = %x\n",
298 rscsi_disks[DEVICE_NR(SCpnt->request.dev)].device->host->host_no,
299 rscsi_disks[DEVICE_NR(SCpnt->request.dev)].device->id,
300 rscsi_disks[DEVICE_NR(SCpnt->request.dev)].device->lun, result);
301
302 if (driver_byte(result) & DRIVER_SENSE)
303 print_sense("sd", SCpnt);
304 end_scsi_request(SCpnt, 0, SCpnt->request.current_nr_sectors);
305 requeue_sd_request(SCpnt);
306 return;
307 }
308 }
309
310
311
312
313
314
315
316 static void do_sd_request (void)
317 {
318 Scsi_Cmnd * SCpnt = NULL;
319 struct request * req = NULL;
320 int flag = 0;
321 while (1==1){
322 cli();
323 if (CURRENT != NULL && CURRENT->dev == -1) {
324 sti();
325 return;
326 };
327
328 INIT_SCSI_REQUEST;
329
330
331
332
333
334
335
336
337
338
339
340
341
342 if (flag++ == 0)
343 SCpnt = allocate_device(&CURRENT,
344 rscsi_disks[DEVICE_NR(MINOR(CURRENT->dev))].device->index, 0);
345 else SCpnt = NULL;
346 sti();
347
348
349
350
351
352
353
354
355 if (!SCpnt && NR_SD > 1){
356 struct request *req1;
357 req1 = NULL;
358 cli();
359 req = CURRENT;
360 while(req){
361 SCpnt = request_queueable(req,
362 rscsi_disks[DEVICE_NR(MINOR(req->dev))].device->index);
363 if(SCpnt) break;
364 req1 = req;
365 req = req->next;
366 };
367 if (SCpnt && req->dev == -1) {
368 if (req == CURRENT)
369 CURRENT = CURRENT->next;
370 else
371 req1->next = req->next;
372 };
373 sti();
374 };
375
376 if (!SCpnt) return;
377
378
379 requeue_sd_request(SCpnt);
380 };
381 }
382
383 static void requeue_sd_request (Scsi_Cmnd * SCpnt)
384 {
385 int dev, block, this_count;
386 unsigned char cmd[10];
387 int bounce_size, contiguous;
388 int max_sg;
389 struct buffer_head * bh, *bhp;
390 char * buff, *bounce_buffer;
391
392 repeat:
393
394 if(SCpnt->request.dev <= 0) {
395 do_sd_request();
396 return;
397 }
398
399 dev = MINOR(SCpnt->request.dev);
400 block = SCpnt->request.sector;
401 this_count = 0;
402
403 #ifdef DEBUG
404 printk("Doing sd request, dev = %d, block = %d\n", dev, block);
405 #endif
406
407 if (dev >= (NR_SD << 4) || block + SCpnt->request.nr_sectors > sd[dev].nr_sects)
408 {
409 end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
410 goto repeat;
411 }
412
413 block += sd[dev].start_sect;
414 dev = DEVICE_NR(dev);
415
416 if (rscsi_disks[dev].device->changed)
417 {
418
419
420
421
422 end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
423 goto repeat;
424 }
425
426 #ifdef DEBUG
427 printk("sd%d : real dev = /dev/sd%d, block = %d\n", MINOR(SCpnt->request.dev), dev, block);
428 #endif
429
430 switch (SCpnt->request.cmd)
431 {
432 case WRITE :
433 if (!rscsi_disks[dev].device->writeable)
434 {
435 end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
436 goto repeat;
437 }
438 cmd[0] = WRITE_6;
439 break;
440 case READ :
441 cmd[0] = READ_6;
442 break;
443 default :
444 panic ("Unknown sd command %d\n", SCpnt->request.cmd);
445 }
446
447 SCpnt->this_count = 0;
448
449
450
451 contiguous = (!CLUSTERABLE_DEVICE(SCpnt) ? 0 :1);
452 bounce_buffer = NULL;
453 bounce_size = (SCpnt->request.nr_sectors << 9);
454
455
456
457
458
459
460 if (contiguous && SCpnt->request.bh &&
461 ((int) SCpnt->request.bh->b_data) + (SCpnt->request.nr_sectors << 9) - 1 >
462 ISA_DMA_THRESHOLD && SCpnt->host->unchecked_isa_dma) {
463 if(((int) SCpnt->request.bh->b_data) > ISA_DMA_THRESHOLD)
464 bounce_buffer = (char *) scsi_malloc(bounce_size);
465 if(!bounce_buffer) contiguous = 0;
466 };
467
468 if(contiguous && SCpnt->request.bh && SCpnt->request.bh->b_reqnext)
469 for(bh = SCpnt->request.bh, bhp = bh->b_reqnext; bhp; bh = bhp,
470 bhp = bhp->b_reqnext) {
471 if(!CONTIGUOUS_BUFFERS(bh,bhp)) {
472 if(bounce_buffer) scsi_free(bounce_buffer, bounce_size);
473 contiguous = 0;
474 break;
475 }
476 };
477 if (!SCpnt->request.bh || contiguous) {
478
479
480 this_count = SCpnt->request.nr_sectors;
481 buff = SCpnt->request.buffer;
482 SCpnt->use_sg = 0;
483
484 } else if (SCpnt->host->sg_tablesize == 0 ||
485 (need_isa_buffer &&
486 dma_free_sectors <= 10)) {
487
488
489
490
491
492
493
494
495 if (SCpnt->host->sg_tablesize != 0 &&
496 need_isa_buffer &&
497 dma_free_sectors <= 10)
498 printk("Warning: SCSI DMA buffer space running low. Using non scatter-gather I/O.\n");
499
500 this_count = SCpnt->request.current_nr_sectors;
501 buff = SCpnt->request.buffer;
502 SCpnt->use_sg = 0;
503
504 } else {
505
506
507 struct scatterlist * sgpnt;
508 int count, this_count_max;
509 int counted;
510
511 bh = SCpnt->request.bh;
512 this_count = 0;
513 this_count_max = (rscsi_disks[dev].ten ? 0xffff : 0xff);
514 count = 0;
515 bhp = NULL;
516 while(bh) {
517 if ((this_count + (bh->b_size >> 9)) > this_count_max) break;
518 if(!bhp || !CONTIGUOUS_BUFFERS(bhp,bh) ||
519 !CLUSTERABLE_DEVICE(SCpnt) ||
520 (SCpnt->host->unchecked_isa_dma &&
521 ((unsigned int) bh->b_data-1) == ISA_DMA_THRESHOLD)) {
522 if (count < SCpnt->host->sg_tablesize) count++;
523 else break;
524 };
525 this_count += (bh->b_size >> 9);
526 bhp = bh;
527 bh = bh->b_reqnext;
528 };
529 #if 0
530 if(SCpnt->host->unchecked_isa_dma &&
531 ((unsigned int) SCpnt->request.bh->b_data-1) == ISA_DMA_THRESHOLD) count--;
532 #endif
533 SCpnt->use_sg = count;
534 count = 512;
535 while( count < (SCpnt->use_sg * sizeof(struct scatterlist)))
536 count = count << 1;
537 SCpnt->sglist_len = count;
538 max_sg = count / sizeof(struct scatterlist);
539 if(SCpnt->host->sg_tablesize < max_sg) max_sg = SCpnt->host->sg_tablesize;
540 sgpnt = (struct scatterlist * ) scsi_malloc(count);
541 memset(sgpnt, 0, count);
542 if (!sgpnt) {
543 printk("Warning - running *really* short on DMA buffers\n");
544 SCpnt->use_sg = 0;
545 this_count = SCpnt->request.current_nr_sectors;
546 buff = SCpnt->request.buffer;
547 } else {
548 buff = (char *) sgpnt;
549 counted = 0;
550 for(count = 0, bh = SCpnt->request.bh, bhp = bh->b_reqnext;
551 count < SCpnt->use_sg && bh;
552 count++, bh = bhp) {
553
554 bhp = bh->b_reqnext;
555
556 if(!sgpnt[count].address) sgpnt[count].address = bh->b_data;
557 sgpnt[count].length += bh->b_size;
558 counted += bh->b_size >> 9;
559
560 if (((int) sgpnt[count].address) + sgpnt[count].length - 1 >
561 ISA_DMA_THRESHOLD && (SCpnt->host->unchecked_isa_dma) &&
562 !sgpnt[count].alt_address) {
563 sgpnt[count].alt_address = sgpnt[count].address;
564
565
566
567 if(dma_free_sectors < (sgpnt[count].length >> 9) + 10) {
568 sgpnt[count].address = NULL;
569 } else {
570 sgpnt[count].address = (char *) scsi_malloc(sgpnt[count].length);
571 };
572
573
574
575
576 if(sgpnt[count].address == NULL){
577 #if 0
578 printk("Warning: Running low on SCSI DMA buffers");
579
580 while(--count >= 0){
581 if(sgpnt[count].alt_address)
582 scsi_free(sgpnt[count].address, sgpnt[count].length);
583 };
584 this_count = SCpnt->request.current_nr_sectors;
585 buff = SCpnt->request.buffer;
586 SCpnt->use_sg = 0;
587 scsi_free(sgpnt, SCpnt->sglist_len);
588 #endif
589 SCpnt->use_sg = count;
590 this_count = counted -= bh->b_size >> 9;
591 break;
592 };
593
594 };
595
596
597
598
599
600 if(bhp && CONTIGUOUS_BUFFERS(bh,bhp) && CLUSTERABLE_DEVICE(SCpnt)) {
601 char * tmp;
602
603 if (((int) sgpnt[count].address) + sgpnt[count].length +
604 bhp->b_size - 1 > ISA_DMA_THRESHOLD &&
605 (SCpnt->host->unchecked_isa_dma) &&
606 !sgpnt[count].alt_address) continue;
607
608 if(!sgpnt[count].alt_address) {count--; continue; }
609 if(dma_free_sectors > 10)
610 tmp = (char *) scsi_malloc(sgpnt[count].length + bhp->b_size);
611 else {
612 tmp = NULL;
613 max_sg = SCpnt->use_sg;
614 };
615 if(tmp){
616 scsi_free(sgpnt[count].address, sgpnt[count].length);
617 sgpnt[count].address = tmp;
618 count--;
619 continue;
620 };
621
622
623
624
625 if (SCpnt->use_sg < max_sg) SCpnt->use_sg++;
626 };
627 };
628
629 this_count = counted;
630
631 if(count < SCpnt->use_sg || SCpnt->use_sg > SCpnt->host->sg_tablesize){
632 bh = SCpnt->request.bh;
633 printk("Use sg, count %d %x %d\n", SCpnt->use_sg, count, dma_free_sectors);
634 printk("maxsg = %x, counted = %d this_count = %d\n", max_sg, counted, this_count);
635 while(bh){
636 printk("[%p %lx] ", bh->b_data, bh->b_size);
637 bh = bh->b_reqnext;
638 };
639 if(SCpnt->use_sg < 16)
640 for(count=0; count<SCpnt->use_sg; count++)
641 printk("{%d:%p %p %d} ", count,
642 sgpnt[count].address,
643 sgpnt[count].alt_address,
644 sgpnt[count].length);
645 panic("Ooops");
646 };
647
648 if (SCpnt->request.cmd == WRITE)
649 for(count=0; count<SCpnt->use_sg; count++)
650 if(sgpnt[count].alt_address)
651 memcpy(sgpnt[count].address, sgpnt[count].alt_address,
652 sgpnt[count].length);
653 };
654 };
655
656
657
658 if(SCpnt->use_sg == 0){
659 if (((int) buff) + (this_count << 9) - 1 > ISA_DMA_THRESHOLD &&
660 (SCpnt->host->unchecked_isa_dma)) {
661 if(bounce_buffer)
662 buff = bounce_buffer;
663 else
664 buff = (char *) scsi_malloc(this_count << 9);
665 if(buff == NULL) {
666 this_count = SCpnt->request.current_nr_sectors;
667 buff = (char *) scsi_malloc(this_count << 9);
668 if(!buff) panic("Ran out of DMA buffers.");
669 };
670 if (SCpnt->request.cmd == WRITE)
671 memcpy(buff, (char *)SCpnt->request.buffer, this_count << 9);
672 };
673 };
674 #ifdef DEBUG
675 printk("sd%d : %s %d/%d 512 byte blocks.\n", MINOR(SCpnt->request.dev),
676 (SCpnt->request.cmd == WRITE) ? "writing" : "reading",
677 this_count, SCpnt->request.nr_sectors);
678 #endif
679
680 cmd[1] = (SCpnt->lun << 5) & 0xe0;
681
682 if (rscsi_disks[dev].sector_size == 1024){
683 if(block & 1) panic("sd.c:Bad block number requested");
684 if(this_count & 1) panic("sd.c:Bad block number requested");
685 block = block >> 1;
686 this_count = this_count >> 1;
687 };
688
689 if (rscsi_disks[dev].sector_size == 256){
690 block = block << 1;
691 this_count = this_count << 1;
692 };
693
694 if (((this_count > 0xff) || (block > 0x1fffff)) && rscsi_disks[dev].ten)
695 {
696 if (this_count > 0xffff)
697 this_count = 0xffff;
698
699 cmd[0] += READ_10 - READ_6 ;
700 cmd[2] = (unsigned char) (block >> 24) & 0xff;
701 cmd[3] = (unsigned char) (block >> 16) & 0xff;
702 cmd[4] = (unsigned char) (block >> 8) & 0xff;
703 cmd[5] = (unsigned char) block & 0xff;
704 cmd[6] = cmd[9] = 0;
705 cmd[7] = (unsigned char) (this_count >> 8) & 0xff;
706 cmd[8] = (unsigned char) this_count & 0xff;
707 }
708 else
709 {
710 if (this_count > 0xff)
711 this_count = 0xff;
712
713 cmd[1] |= (unsigned char) ((block >> 16) & 0x1f);
714 cmd[2] = (unsigned char) ((block >> 8) & 0xff);
715 cmd[3] = (unsigned char) block & 0xff;
716 cmd[4] = (unsigned char) this_count;
717 cmd[5] = 0;
718 }
719
720
721
722
723
724
725
726 SCpnt->transfersize = rscsi_disks[dev].sector_size;
727 SCpnt->underflow = this_count << 9;
728 scsi_do_cmd (SCpnt, (void *) cmd, buff,
729 this_count * rscsi_disks[dev].sector_size,
730 rw_intr,
731 (scsi_devices[SCpnt->index].type == TYPE_DISK ?
732 SD_TIMEOUT : SD_MOD_TIMEOUT),
733 MAX_RETRIES);
734 }
735
736 int check_scsidisk_media_change(int full_dev, int flag){
737 int retval;
738 int target;
739 struct inode inode;
740
741 target = DEVICE_NR(MINOR(full_dev));
742
743 if (target >= NR_SD) {
744 printk("SCSI disk request error: invalid device.\n");
745 return 0;
746 };
747
748 if(!rscsi_disks[target].device->removable) return 0;
749
750 inode.i_rdev = full_dev;
751 retval = sd_ioctl(&inode, NULL, SCSI_IOCTL_TEST_UNIT_READY, 0);
752
753 if(retval){
754
755
756
757
758 rscsi_disks[target].device->changed = 1;
759 return 1;
760
761 };
762
763 retval = rscsi_disks[target].device->changed;
764 if(!flag) rscsi_disks[target].device->changed = 0;
765 return retval;
766 }
767
768 static void sd_init_done (Scsi_Cmnd * SCpnt)
769 {
770 struct request * req;
771 struct task_struct * p;
772
773 req = &SCpnt->request;
774 req->dev = 0xfffe;
775
776 if ((p = req->waiting) != NULL) {
777 req->waiting = NULL;
778 p->state = TASK_RUNNING;
779 if (p->counter > current->counter)
780 need_resched = 1;
781 }
782 }
783
784 static int sd_init_onedisk(int i)
785 {
786 int j = 0;
787 unsigned char cmd[10];
788 unsigned char *buffer;
789 char spintime;
790 int the_result, retries;
791 Scsi_Cmnd * SCpnt;
792
793
794
795
796
797 SCpnt = allocate_device(NULL, rscsi_disks[i].device->index, 1);
798 buffer = (unsigned char *) scsi_malloc(512);
799
800 spintime = 0;
801
802
803 if (current == task[0]){
804 do{
805 cmd[0] = TEST_UNIT_READY;
806 cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0;
807 memset ((void *) &cmd[2], 0, 8);
808 SCpnt->request.dev = 0xffff;
809 SCpnt->sense_buffer[0] = 0;
810 SCpnt->sense_buffer[2] = 0;
811
812 scsi_do_cmd (SCpnt,
813 (void *) cmd, (void *) buffer,
814 512, sd_init_done, SD_TIMEOUT,
815 MAX_RETRIES);
816
817 while(SCpnt->request.dev != 0xfffe);
818
819 the_result = SCpnt->result;
820
821
822
823 if(the_result && !rscsi_disks[i].device->removable &&
824 SCpnt->sense_buffer[2] == NOT_READY) {
825 int time1;
826 if(!spintime){
827 printk( "sd%d: Spinning up disk...", i );
828 cmd[0] = START_STOP;
829 cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0;
830 cmd[1] |= 1;
831 memset ((void *) &cmd[2], 0, 8);
832 cmd[4] = 1;
833 SCpnt->request.dev = 0xffff;
834 SCpnt->sense_buffer[0] = 0;
835 SCpnt->sense_buffer[2] = 0;
836
837 scsi_do_cmd (SCpnt,
838 (void *) cmd, (void *) buffer,
839 512, sd_init_done, SD_TIMEOUT,
840 MAX_RETRIES);
841
842 while(SCpnt->request.dev != 0xfffe);
843
844 spintime = jiffies;
845 };
846
847 time1 = jiffies;
848 while(jiffies < time1 + 100);
849 printk( "." );
850 };
851 } while(the_result && spintime && spintime+5000 > jiffies);
852 if (spintime) {
853 if (the_result)
854 printk( "not responding...\n" );
855 else
856 printk( "ready\n" );
857 }
858 };
859
860
861 retries = 3;
862 do {
863 cmd[0] = READ_CAPACITY;
864 cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0;
865 memset ((void *) &cmd[2], 0, 8);
866 memset ((void *) buffer, 0, 8);
867 SCpnt->request.dev = 0xffff;
868 SCpnt->sense_buffer[0] = 0;
869 SCpnt->sense_buffer[2] = 0;
870
871 scsi_do_cmd (SCpnt,
872 (void *) cmd, (void *) buffer,
873 8, sd_init_done, SD_TIMEOUT,
874 MAX_RETRIES);
875
876 if (current == task[0])
877 while(SCpnt->request.dev != 0xfffe);
878 else
879 if (SCpnt->request.dev != 0xfffe){
880 SCpnt->request.waiting = current;
881 current->state = TASK_UNINTERRUPTIBLE;
882 while (SCpnt->request.dev != 0xfffe) schedule();
883 };
884
885 the_result = SCpnt->result;
886 retries--;
887
888 } while(the_result && retries);
889
890 SCpnt->request.dev = -1;
891
892 wake_up(&scsi_devices[SCpnt->index].device_wait);
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909 if (the_result)
910 {
911 printk ("sd%d : READ CAPACITY failed.\n"
912 "sd%d : status = %x, message = %02x, host = %d, driver = %02x \n",
913 i,i,
914 status_byte(the_result),
915 msg_byte(the_result),
916 host_byte(the_result),
917 driver_byte(the_result)
918 );
919 if (driver_byte(the_result) & DRIVER_SENSE)
920 printk("sd%d : extended sense code = %1x \n", i, SCpnt->sense_buffer[2] & 0xf);
921 else
922 printk("sd%d : sense not available. \n", i);
923
924 printk("sd%d : block size assumed to be 512 bytes, disk size 1GB. \n", i);
925 rscsi_disks[i].capacity = 0x1fffff;
926 rscsi_disks[i].sector_size = 512;
927
928
929
930 if(rscsi_disks[i].device->removable &&
931 SCpnt->sense_buffer[2] == NOT_READY)
932 rscsi_disks[i].device->changed = 1;
933
934 }
935 else
936 {
937 rscsi_disks[i].capacity = (buffer[0] << 24) |
938 (buffer[1] << 16) |
939 (buffer[2] << 8) |
940 buffer[3];
941
942 rscsi_disks[i].sector_size = (buffer[4] << 24) |
943 (buffer[5] << 16) | (buffer[6] << 8) | buffer[7];
944
945 if (rscsi_disks[i].sector_size != 512 &&
946 rscsi_disks[i].sector_size != 1024 &&
947 rscsi_disks[i].sector_size != 256)
948 {
949 printk ("sd%d : unsupported sector size %d.\n",
950 i, rscsi_disks[i].sector_size);
951 if(rscsi_disks[i].device->removable){
952 rscsi_disks[i].capacity = 0;
953 } else {
954 printk ("scsi : deleting disk entry.\n");
955 for (j=i; j < NR_SD - 1;)
956 rscsi_disks[j] = rscsi_disks[++j];
957 --i;
958 --NR_SD;
959 scsi_free(buffer, 512);
960 return i;
961 };
962 }
963 if(rscsi_disks[i].sector_size == 1024)
964 rscsi_disks[i].capacity <<= 1;
965 if(rscsi_disks[i].sector_size == 256)
966 rscsi_disks[i].capacity >>= 1;
967 }
968
969 rscsi_disks[i].ten = 1;
970 rscsi_disks[i].remap = 1;
971 scsi_free(buffer, 512);
972 return i;
973 }
974
975
976
977
978
979
980
981 unsigned long sd_init(unsigned long memory_start, unsigned long memory_end)
982 {
983 int i;
984
985 if (register_blkdev(MAJOR_NR,"sd",&sd_fops)) {
986 printk("Unable to get major %d for SCSI disk\n",MAJOR_NR);
987 return memory_start;
988 }
989 if (MAX_SD == 0) return memory_start;
990
991 sd_sizes = (int *) memory_start;
992 memory_start += (MAX_SD << 4) * sizeof(int);
993 memset(sd_sizes, 0, (MAX_SD << 4) * sizeof(int));
994
995 sd_blocksizes = (int *) memory_start;
996 memory_start += (MAX_SD << 4) * sizeof(int);
997 for(i=0;i<(MAX_SD << 4);i++) sd_blocksizes[i] = 1024;
998 blksize_size[MAJOR_NR] = sd_blocksizes;
999
1000 sd = (struct hd_struct *) memory_start;
1001 memory_start += (MAX_SD << 4) * sizeof(struct hd_struct);
1002
1003 sd_gendisk.max_nr = MAX_SD;
1004 sd_gendisk.part = sd;
1005 sd_gendisk.sizes = sd_sizes;
1006 sd_gendisk.real_devices = (void *) rscsi_disks;
1007
1008 for (i = 0; i < NR_SD; ++i)
1009 i = sd_init_onedisk(i);
1010
1011 blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST;
1012
1013
1014
1015
1016 if(rscsi_disks[0].device->host->sg_tablesize)
1017 read_ahead[MAJOR_NR] = 120;
1018
1019 else
1020 read_ahead[MAJOR_NR] = 4;
1021
1022 sd_gendisk.next = gendisk_head;
1023 gendisk_head = &sd_gendisk;
1024 return memory_start;
1025 }
1026
1027 unsigned long sd_init1(unsigned long mem_start, unsigned long mem_end){
1028 rscsi_disks = (Scsi_Disk *) mem_start;
1029 mem_start += MAX_SD * sizeof(Scsi_Disk);
1030 return mem_start;
1031 };
1032
1033 void sd_attach(Scsi_Device * SDp){
1034 SDp->scsi_request_fn = do_sd_request;
1035 rscsi_disks[NR_SD++].device = SDp;
1036 if(NR_SD > MAX_SD) panic ("scsi_devices corrupt (sd)");
1037 };
1038
1039 #define DEVICE_BUSY rscsi_disks[target].device->busy
1040 #define USAGE rscsi_disks[target].device->access_count
1041 #define CAPACITY rscsi_disks[target].capacity
1042 #define MAYBE_REINIT sd_init_onedisk(target)
1043 #define GENDISK_STRUCT sd_gendisk
1044
1045
1046
1047
1048
1049
1050
1051
1052 int revalidate_scsidisk(int dev, int maxusage){
1053 int target, major;
1054 struct gendisk * gdev;
1055 int max_p;
1056 int start;
1057 int i;
1058
1059 target = DEVICE_NR(MINOR(dev));
1060 gdev = &GENDISK_STRUCT;
1061
1062 cli();
1063 if (DEVICE_BUSY || USAGE > maxusage) {
1064 sti();
1065 printk("Device busy for revalidation (usage=%d)\n", USAGE);
1066 return -EBUSY;
1067 };
1068 DEVICE_BUSY = 1;
1069 sti();
1070
1071 max_p = gdev->max_p;
1072 start = target << gdev->minor_shift;
1073 major = MAJOR_NR << 8;
1074
1075 for (i=max_p - 1; i >=0 ; i--) {
1076 sync_dev(major | start | i);
1077 invalidate_inodes(major | start | i);
1078 invalidate_buffers(major | start | i);
1079 gdev->part[start+i].start_sect = 0;
1080 gdev->part[start+i].nr_sects = 0;
1081 };
1082
1083 #ifdef MAYBE_REINIT
1084 MAYBE_REINIT;
1085 #endif
1086
1087 gdev->part[start].nr_sects = CAPACITY;
1088 resetup_one_dev(gdev, target);
1089
1090 DEVICE_BUSY = 0;
1091 return 0;
1092 }
1093