This source file includes following definitions.
- sd_open
- sd_release
- sd_geninit
- rw_intr
- do_sd_request
- requeue_sd_request
- check_scsidisk_media_change
- sd_init_done
- sd_init_onedisk
- sd_init
- sd_init1
- sd_attach
- revalidate_scsidisk
- fop_revalidate_scsidisk
1
2
3
4
5
6
7
8
9
10
11
12
13
14 #include <linux/fs.h>
15 #include <linux/kernel.h>
16 #include <linux/sched.h>
17 #include <linux/string.h>
18 #include <linux/errno.h>
19 #include <asm/system.h>
20
21 #define MAJOR_NR SCSI_DISK_MAJOR
22 #include "../block/blk.h"
23 #include "scsi.h"
24 #include "hosts.h"
25 #include "sd.h"
26 #include "scsi_ioctl.h"
27 #include "constants.h"
28
29 #include <linux/genhd.h>
30
31
32
33
34
35 #define MAX_RETRIES 5
36
37
38
39
40
41 #define SD_TIMEOUT 600
42 #define SD_MOD_TIMEOUT 750
43
44 #define CLUSTERABLE_DEVICE(SC) (SC->host->hostt->use_clustering && \
45 SC->device->type != TYPE_MOD)
46
47 struct hd_struct * sd;
48
49 int NR_SD=0;
50 int MAX_SD=0;
51 Scsi_Disk * rscsi_disks;
52 static int * sd_sizes;
53 static int * sd_blocksizes;
54
55 extern int sd_ioctl(struct inode *, struct file *, unsigned int, unsigned long);
56
57 static int check_scsidisk_media_change(dev_t);
58 static int fop_revalidate_scsidisk(dev_t);
59
60 static sd_init_onedisk(int);
61
62 static void requeue_sd_request (Scsi_Cmnd * SCpnt);
63
64 static int sd_open(struct inode * inode, struct file * filp)
65 {
66 int target;
67 target = DEVICE_NR(MINOR(inode->i_rdev));
68
69 if(target >= NR_SD || !rscsi_disks[target].device)
70 return -ENXIO;
71
72
73
74
75 while (rscsi_disks[target].device->busy);
76
77 if(rscsi_disks[target].device->removable) {
78 check_disk_change(inode->i_rdev);
79
80 if(!rscsi_disks[target].device->access_count)
81 sd_ioctl(inode, NULL, SCSI_IOCTL_DOORLOCK, 0);
82 };
83 rscsi_disks[target].device->access_count++;
84 return 0;
85 }
86
87 static void sd_release(struct inode * inode, struct file * file)
88 {
89 int target;
90 sync_dev(inode->i_rdev);
91
92 target = DEVICE_NR(MINOR(inode->i_rdev));
93
94 rscsi_disks[target].device->access_count--;
95
96 if(rscsi_disks[target].device->removable) {
97 if(!rscsi_disks[target].device->access_count)
98 sd_ioctl(inode, NULL, SCSI_IOCTL_DOORUNLOCK, 0);
99 };
100 }
101
102 static void sd_geninit(void);
103
104 static struct file_operations sd_fops = {
105 NULL,
106 block_read,
107 block_write,
108 NULL,
109 NULL,
110 sd_ioctl,
111 NULL,
112 sd_open,
113 sd_release,
114 block_fsync,
115 NULL,
116 check_scsidisk_media_change,
117 fop_revalidate_scsidisk
118 };
119
120 static struct gendisk sd_gendisk = {
121 MAJOR_NR,
122 "sd",
123 4,
124 1 << 4,
125 0,
126 sd_geninit,
127 NULL,
128 NULL,
129 0,
130 NULL,
131 NULL
132 };
133
134 static void sd_geninit (void)
135 {
136 int i;
137
138 for (i = 0; i < NR_SD; ++i)
139 sd[i << 4].nr_sects = rscsi_disks[i].capacity;
140 sd_gendisk.nr_real = NR_SD;
141 }
142
143
144
145
146
147
148
149 static void rw_intr (Scsi_Cmnd *SCpnt)
150 {
151 int result = SCpnt->result;
152 int this_count = SCpnt->bufflen >> 9;
153
154 #ifdef DEBUG
155 printk("sd%d : rw_intr(%d, %d)\n", MINOR(SCpnt->request.dev), SCpnt->host->host_no, result);
156 #endif
157
158
159
160
161
162
163
164 if (!result) {
165
166 #ifdef DEBUG
167 printk("sd%d : %d sectors remain.\n", MINOR(SCpnt->request.dev), SCpnt->request.nr_sectors);
168 printk("use_sg is %d\n ",SCpnt->use_sg);
169 #endif
170 if (SCpnt->use_sg) {
171 struct scatterlist * sgpnt;
172 int i;
173 sgpnt = (struct scatterlist *) SCpnt->buffer;
174 for(i=0; i<SCpnt->use_sg; i++) {
175 #ifdef DEBUG
176 printk(":%x %x %d\n",sgpnt[i].alt_address, sgpnt[i].address, sgpnt[i].length);
177 #endif
178 if (sgpnt[i].alt_address) {
179 if (SCpnt->request.cmd == READ)
180 memcpy(sgpnt[i].alt_address, sgpnt[i].address, sgpnt[i].length);
181 scsi_free(sgpnt[i].address, sgpnt[i].length);
182 };
183 };
184 scsi_free(SCpnt->buffer, SCpnt->sglist_len);
185 } else {
186 if (SCpnt->buffer != SCpnt->request.buffer) {
187 #ifdef DEBUG
188 printk("nosg: %x %x %d\n",SCpnt->request.buffer, SCpnt->buffer,
189 SCpnt->bufflen);
190 #endif
191 if (SCpnt->request.cmd == READ)
192 memcpy(SCpnt->request.buffer, SCpnt->buffer,
193 SCpnt->bufflen);
194 scsi_free(SCpnt->buffer, SCpnt->bufflen);
195 };
196 };
197
198
199
200
201
202 if (SCpnt->request.nr_sectors > this_count)
203 {
204 SCpnt->request.errors = 0;
205
206 if (!SCpnt->request.bh)
207 {
208 #ifdef DEBUG
209 printk("sd%d : handling page request, no buffer\n",
210 MINOR(SCpnt->request.dev));
211 #endif
212
213
214
215
216 panic("sd.c: linked page request (%lx %x)",
217 SCpnt->request.sector, this_count);
218 }
219 }
220 end_scsi_request(SCpnt, 1, this_count);
221 requeue_sd_request(SCpnt);
222 return;
223 }
224
225
226 if (SCpnt->use_sg) {
227 struct scatterlist * sgpnt;
228 int i;
229 sgpnt = (struct scatterlist *) SCpnt->buffer;
230 for(i=0; i<SCpnt->use_sg; i++) {
231 #ifdef DEBUG
232 printk("err: %x %x %d\n",SCpnt->request.buffer, SCpnt->buffer,
233 SCpnt->bufflen);
234 #endif
235 if (sgpnt[i].alt_address) {
236 scsi_free(sgpnt[i].address, sgpnt[i].length);
237 };
238 };
239 scsi_free(SCpnt->buffer, SCpnt->sglist_len);
240 } else {
241 #ifdef DEBUG
242 printk("nosgerr: %x %x %d\n",SCpnt->request.buffer, SCpnt->buffer,
243 SCpnt->bufflen);
244 #endif
245 if (SCpnt->buffer != SCpnt->request.buffer)
246 scsi_free(SCpnt->buffer, SCpnt->bufflen);
247 };
248
249
250
251
252
253
254
255 if (driver_byte(result) != 0) {
256 if (sugestion(result) == SUGGEST_REMAP) {
257 #ifdef REMAP
258
259
260
261
262 if rscsi_disks[DEVICE_NR(SCpnt->request.dev)].remap
263 {
264 result = 0;
265 }
266 else
267
268 #endif
269 }
270
271 if ((SCpnt->sense_buffer[0] & 0x7f) == 0x70) {
272 if ((SCpnt->sense_buffer[2] & 0xf) == UNIT_ATTENTION) {
273 if(rscsi_disks[DEVICE_NR(SCpnt->request.dev)].device->removable) {
274
275
276
277 rscsi_disks[DEVICE_NR(SCpnt->request.dev)].device->changed = 1;
278 end_scsi_request(SCpnt, 0, this_count);
279 requeue_sd_request(SCpnt);
280 return;
281 }
282 }
283 }
284
285
286
287
288
289
290
291
292
293 if (SCpnt->sense_buffer[2] == ILLEGAL_REQUEST) {
294 if (rscsi_disks[DEVICE_NR(SCpnt->request.dev)].ten) {
295 rscsi_disks[DEVICE_NR(SCpnt->request.dev)].ten = 0;
296 requeue_sd_request(SCpnt);
297 result = 0;
298 } else {
299 }
300 }
301 }
302 if (result) {
303 printk("SCSI disk error : host %d id %d lun %d return code = %x\n",
304 rscsi_disks[DEVICE_NR(SCpnt->request.dev)].device->host->host_no,
305 rscsi_disks[DEVICE_NR(SCpnt->request.dev)].device->id,
306 rscsi_disks[DEVICE_NR(SCpnt->request.dev)].device->lun, result);
307
308 if (driver_byte(result) & DRIVER_SENSE)
309 print_sense("sd", SCpnt);
310 end_scsi_request(SCpnt, 0, SCpnt->request.current_nr_sectors);
311 requeue_sd_request(SCpnt);
312 return;
313 }
314 }
315
316
317
318
319
320
321
322 static void do_sd_request (void)
323 {
324 Scsi_Cmnd * SCpnt = NULL;
325 struct request * req = NULL;
326 int flag = 0;
327 while (1==1){
328 cli();
329 if (CURRENT != NULL && CURRENT->dev == -1) {
330 sti();
331 return;
332 };
333
334 INIT_SCSI_REQUEST;
335
336
337
338
339
340
341
342
343
344
345
346
347
348 if (flag++ == 0)
349 SCpnt = allocate_device(&CURRENT,
350 rscsi_disks[DEVICE_NR(MINOR(CURRENT->dev))].device, 0);
351 else SCpnt = NULL;
352 sti();
353
354
355
356
357
358
359
360
361 if (!SCpnt && NR_SD > 1){
362 struct request *req1;
363 req1 = NULL;
364 cli();
365 req = CURRENT;
366 while(req){
367 SCpnt = request_queueable(req,
368 rscsi_disks[DEVICE_NR(MINOR(req->dev))].device);
369 if(SCpnt) break;
370 req1 = req;
371 req = req->next;
372 };
373 if (SCpnt && req->dev == -1) {
374 if (req == CURRENT)
375 CURRENT = CURRENT->next;
376 else
377 req1->next = req->next;
378 };
379 sti();
380 };
381
382 if (!SCpnt) return;
383
384
385 requeue_sd_request(SCpnt);
386 };
387 }
388
389 static void requeue_sd_request (Scsi_Cmnd * SCpnt)
390 {
391 int dev, block, this_count;
392 unsigned char cmd[10];
393 int bounce_size, contiguous;
394 int max_sg;
395 struct buffer_head * bh, *bhp;
396 char * buff, *bounce_buffer;
397
398 repeat:
399
400 if(SCpnt->request.dev <= 0) {
401 do_sd_request();
402 return;
403 }
404
405 dev = MINOR(SCpnt->request.dev);
406 block = SCpnt->request.sector;
407 this_count = 0;
408
409 #ifdef DEBUG
410 printk("Doing sd request, dev = %d, block = %d\n", dev, block);
411 #endif
412
413 if (dev >= (NR_SD << 4) || block + SCpnt->request.nr_sectors > sd[dev].nr_sects)
414 {
415 end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
416 goto repeat;
417 }
418
419 block += sd[dev].start_sect;
420 dev = DEVICE_NR(dev);
421
422 if (rscsi_disks[dev].device->changed)
423 {
424
425
426
427
428 end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
429 goto repeat;
430 }
431
432 #ifdef DEBUG
433 printk("sd%d : real dev = /dev/sd%d, block = %d\n", MINOR(SCpnt->request.dev), dev, block);
434 #endif
435
436 switch (SCpnt->request.cmd)
437 {
438 case WRITE :
439 if (!rscsi_disks[dev].device->writeable)
440 {
441 end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
442 goto repeat;
443 }
444 cmd[0] = WRITE_6;
445 break;
446 case READ :
447 cmd[0] = READ_6;
448 break;
449 default :
450 panic ("Unknown sd command %d\n", SCpnt->request.cmd);
451 }
452
453 SCpnt->this_count = 0;
454
455
456
457 contiguous = (!CLUSTERABLE_DEVICE(SCpnt) ? 0 :1);
458 bounce_buffer = NULL;
459 bounce_size = (SCpnt->request.nr_sectors << 9);
460
461
462
463
464
465
466 if (contiguous && SCpnt->request.bh &&
467 ((int) SCpnt->request.bh->b_data) + (SCpnt->request.nr_sectors << 9) - 1 >
468 ISA_DMA_THRESHOLD && SCpnt->host->unchecked_isa_dma) {
469 if(((int) SCpnt->request.bh->b_data) > ISA_DMA_THRESHOLD)
470 bounce_buffer = (char *) scsi_malloc(bounce_size);
471 if(!bounce_buffer) contiguous = 0;
472 };
473
474 if(contiguous && SCpnt->request.bh && SCpnt->request.bh->b_reqnext)
475 for(bh = SCpnt->request.bh, bhp = bh->b_reqnext; bhp; bh = bhp,
476 bhp = bhp->b_reqnext) {
477 if(!CONTIGUOUS_BUFFERS(bh,bhp)) {
478 if(bounce_buffer) scsi_free(bounce_buffer, bounce_size);
479 contiguous = 0;
480 break;
481 }
482 };
483 if (!SCpnt->request.bh || contiguous) {
484
485
486 this_count = SCpnt->request.nr_sectors;
487 buff = SCpnt->request.buffer;
488 SCpnt->use_sg = 0;
489
490 } else if (SCpnt->host->sg_tablesize == 0 ||
491 (need_isa_buffer &&
492 dma_free_sectors <= 10)) {
493
494
495
496
497
498
499
500
501 if (SCpnt->host->sg_tablesize != 0 &&
502 need_isa_buffer &&
503 dma_free_sectors <= 10)
504 printk("Warning: SCSI DMA buffer space running low. Using non scatter-gather I/O.\n");
505
506 this_count = SCpnt->request.current_nr_sectors;
507 buff = SCpnt->request.buffer;
508 SCpnt->use_sg = 0;
509
510 } else {
511
512
513 struct scatterlist * sgpnt;
514 int count, this_count_max;
515 int counted;
516
517 bh = SCpnt->request.bh;
518 this_count = 0;
519 this_count_max = (rscsi_disks[dev].ten ? 0xffff : 0xff);
520 count = 0;
521 bhp = NULL;
522 while(bh) {
523 if ((this_count + (bh->b_size >> 9)) > this_count_max) break;
524 if(!bhp || !CONTIGUOUS_BUFFERS(bhp,bh) ||
525 !CLUSTERABLE_DEVICE(SCpnt) ||
526 (SCpnt->host->unchecked_isa_dma &&
527 ((unsigned int) bh->b_data-1) == ISA_DMA_THRESHOLD)) {
528 if (count < SCpnt->host->sg_tablesize) count++;
529 else break;
530 };
531 this_count += (bh->b_size >> 9);
532 bhp = bh;
533 bh = bh->b_reqnext;
534 };
535 #if 0
536 if(SCpnt->host->unchecked_isa_dma &&
537 ((unsigned int) SCpnt->request.bh->b_data-1) == ISA_DMA_THRESHOLD) count--;
538 #endif
539 SCpnt->use_sg = count;
540 count = 512;
541 while( count < (SCpnt->use_sg * sizeof(struct scatterlist)))
542 count = count << 1;
543 SCpnt->sglist_len = count;
544 max_sg = count / sizeof(struct scatterlist);
545 if(SCpnt->host->sg_tablesize < max_sg) max_sg = SCpnt->host->sg_tablesize;
546 sgpnt = (struct scatterlist * ) scsi_malloc(count);
547 memset(sgpnt, 0, count);
548 if (!sgpnt) {
549 printk("Warning - running *really* short on DMA buffers\n");
550 SCpnt->use_sg = 0;
551 this_count = SCpnt->request.current_nr_sectors;
552 buff = SCpnt->request.buffer;
553 } else {
554 buff = (char *) sgpnt;
555 counted = 0;
556 for(count = 0, bh = SCpnt->request.bh, bhp = bh->b_reqnext;
557 count < SCpnt->use_sg && bh;
558 count++, bh = bhp) {
559
560 bhp = bh->b_reqnext;
561
562 if(!sgpnt[count].address) sgpnt[count].address = bh->b_data;
563 sgpnt[count].length += bh->b_size;
564 counted += bh->b_size >> 9;
565
566 if (((int) sgpnt[count].address) + sgpnt[count].length - 1 >
567 ISA_DMA_THRESHOLD && (SCpnt->host->unchecked_isa_dma) &&
568 !sgpnt[count].alt_address) {
569 sgpnt[count].alt_address = sgpnt[count].address;
570
571
572
573 if(dma_free_sectors < (sgpnt[count].length >> 9) + 10) {
574 sgpnt[count].address = NULL;
575 } else {
576 sgpnt[count].address = (char *) scsi_malloc(sgpnt[count].length);
577 };
578
579
580
581
582 if(sgpnt[count].address == NULL){
583 #if 0
584 printk("Warning: Running low on SCSI DMA buffers");
585
586 while(--count >= 0){
587 if(sgpnt[count].alt_address)
588 scsi_free(sgpnt[count].address, sgpnt[count].length);
589 };
590 this_count = SCpnt->request.current_nr_sectors;
591 buff = SCpnt->request.buffer;
592 SCpnt->use_sg = 0;
593 scsi_free(sgpnt, SCpnt->sglist_len);
594 #endif
595 SCpnt->use_sg = count;
596 this_count = counted -= bh->b_size >> 9;
597 break;
598 };
599
600 };
601
602
603
604
605
606 if(bhp && CONTIGUOUS_BUFFERS(bh,bhp) && CLUSTERABLE_DEVICE(SCpnt)) {
607 char * tmp;
608
609 if (((int) sgpnt[count].address) + sgpnt[count].length +
610 bhp->b_size - 1 > ISA_DMA_THRESHOLD &&
611 (SCpnt->host->unchecked_isa_dma) &&
612 !sgpnt[count].alt_address) continue;
613
614 if(!sgpnt[count].alt_address) {count--; continue; }
615 if(dma_free_sectors > 10)
616 tmp = (char *) scsi_malloc(sgpnt[count].length + bhp->b_size);
617 else {
618 tmp = NULL;
619 max_sg = SCpnt->use_sg;
620 };
621 if(tmp){
622 scsi_free(sgpnt[count].address, sgpnt[count].length);
623 sgpnt[count].address = tmp;
624 count--;
625 continue;
626 };
627
628
629
630
631 if (SCpnt->use_sg < max_sg) SCpnt->use_sg++;
632 };
633 };
634
635 this_count = counted;
636
637 if(count < SCpnt->use_sg || SCpnt->use_sg > SCpnt->host->sg_tablesize){
638 bh = SCpnt->request.bh;
639 printk("Use sg, count %d %x %d\n", SCpnt->use_sg, count, dma_free_sectors);
640 printk("maxsg = %x, counted = %d this_count = %d\n", max_sg, counted, this_count);
641 while(bh){
642 printk("[%p %lx] ", bh->b_data, bh->b_size);
643 bh = bh->b_reqnext;
644 };
645 if(SCpnt->use_sg < 16)
646 for(count=0; count<SCpnt->use_sg; count++)
647 printk("{%d:%p %p %d} ", count,
648 sgpnt[count].address,
649 sgpnt[count].alt_address,
650 sgpnt[count].length);
651 panic("Ooops");
652 };
653
654 if (SCpnt->request.cmd == WRITE)
655 for(count=0; count<SCpnt->use_sg; count++)
656 if(sgpnt[count].alt_address)
657 memcpy(sgpnt[count].address, sgpnt[count].alt_address,
658 sgpnt[count].length);
659 };
660 };
661
662
663
664 if(SCpnt->use_sg == 0){
665 if (((int) buff) + (this_count << 9) - 1 > ISA_DMA_THRESHOLD &&
666 (SCpnt->host->unchecked_isa_dma)) {
667 if(bounce_buffer)
668 buff = bounce_buffer;
669 else
670 buff = (char *) scsi_malloc(this_count << 9);
671 if(buff == NULL) {
672 this_count = SCpnt->request.current_nr_sectors;
673 buff = (char *) scsi_malloc(this_count << 9);
674 if(!buff) panic("Ran out of DMA buffers.");
675 };
676 if (SCpnt->request.cmd == WRITE)
677 memcpy(buff, (char *)SCpnt->request.buffer, this_count << 9);
678 };
679 };
680 #ifdef DEBUG
681 printk("sd%d : %s %d/%d 512 byte blocks.\n", MINOR(SCpnt->request.dev),
682 (SCpnt->request.cmd == WRITE) ? "writing" : "reading",
683 this_count, SCpnt->request.nr_sectors);
684 #endif
685
686 cmd[1] = (SCpnt->lun << 5) & 0xe0;
687
688 if (rscsi_disks[dev].sector_size == 1024){
689 if(block & 1) panic("sd.c:Bad block number requested");
690 if(this_count & 1) panic("sd.c:Bad block number requested");
691 block = block >> 1;
692 this_count = this_count >> 1;
693 };
694
695 if (rscsi_disks[dev].sector_size == 256){
696 block = block << 1;
697 this_count = this_count << 1;
698 };
699
700 if (((this_count > 0xff) || (block > 0x1fffff)) && rscsi_disks[dev].ten)
701 {
702 if (this_count > 0xffff)
703 this_count = 0xffff;
704
705 cmd[0] += READ_10 - READ_6 ;
706 cmd[2] = (unsigned char) (block >> 24) & 0xff;
707 cmd[3] = (unsigned char) (block >> 16) & 0xff;
708 cmd[4] = (unsigned char) (block >> 8) & 0xff;
709 cmd[5] = (unsigned char) block & 0xff;
710 cmd[6] = cmd[9] = 0;
711 cmd[7] = (unsigned char) (this_count >> 8) & 0xff;
712 cmd[8] = (unsigned char) this_count & 0xff;
713 }
714 else
715 {
716 if (this_count > 0xff)
717 this_count = 0xff;
718
719 cmd[1] |= (unsigned char) ((block >> 16) & 0x1f);
720 cmd[2] = (unsigned char) ((block >> 8) & 0xff);
721 cmd[3] = (unsigned char) block & 0xff;
722 cmd[4] = (unsigned char) this_count;
723 cmd[5] = 0;
724 }
725
726
727
728
729
730
731
732 SCpnt->transfersize = rscsi_disks[dev].sector_size;
733 SCpnt->underflow = this_count << 9;
734 scsi_do_cmd (SCpnt, (void *) cmd, buff,
735 this_count * rscsi_disks[dev].sector_size,
736 rw_intr,
737 (SCpnt->device->type == TYPE_DISK ?
738 SD_TIMEOUT : SD_MOD_TIMEOUT),
739 MAX_RETRIES);
740 }
741
742 static int check_scsidisk_media_change(dev_t full_dev){
743 int retval;
744 int target;
745 struct inode inode;
746 int flag = 0;
747
748 target = DEVICE_NR(MINOR(full_dev));
749
750 if (target >= NR_SD) {
751 printk("SCSI disk request error: invalid device.\n");
752 return 0;
753 };
754
755 if(!rscsi_disks[target].device->removable) return 0;
756
757 inode.i_rdev = full_dev;
758 retval = sd_ioctl(&inode, NULL, SCSI_IOCTL_TEST_UNIT_READY, 0);
759
760 if(retval){
761
762
763
764
765 rscsi_disks[target].device->changed = 1;
766 return 1;
767
768 };
769
770 retval = rscsi_disks[target].device->changed;
771 if(!flag) rscsi_disks[target].device->changed = 0;
772 return retval;
773 }
774
775 static void sd_init_done (Scsi_Cmnd * SCpnt)
776 {
777 struct request * req;
778
779 req = &SCpnt->request;
780 req->dev = 0xfffe;
781
782 if (req->sem != NULL) {
783 up(req->sem);
784 }
785 }
786
787 static int sd_init_onedisk(int i)
788 {
789 int j = 0;
790 unsigned char cmd[10];
791 unsigned char *buffer;
792 char spintime;
793 int the_result, retries;
794 Scsi_Cmnd * SCpnt;
795
796
797
798
799
800 SCpnt = allocate_device(NULL, rscsi_disks[i].device, 1);
801 buffer = (unsigned char *) scsi_malloc(512);
802
803 spintime = 0;
804
805
806 if (current == task[0]){
807 do{
808 cmd[0] = TEST_UNIT_READY;
809 cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0;
810 memset ((void *) &cmd[2], 0, 8);
811 SCpnt->request.dev = 0xffff;
812 SCpnt->sense_buffer[0] = 0;
813 SCpnt->sense_buffer[2] = 0;
814
815 scsi_do_cmd (SCpnt,
816 (void *) cmd, (void *) buffer,
817 512, sd_init_done, SD_TIMEOUT,
818 MAX_RETRIES);
819
820 while(SCpnt->request.dev != 0xfffe);
821
822 the_result = SCpnt->result;
823
824
825
826 if(the_result && !rscsi_disks[i].device->removable &&
827 SCpnt->sense_buffer[2] == NOT_READY) {
828 int time1;
829 if(!spintime){
830 printk( "sd%d: Spinning up disk...", i );
831 cmd[0] = START_STOP;
832 cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0;
833 cmd[1] |= 1;
834 memset ((void *) &cmd[2], 0, 8);
835 cmd[4] = 1;
836 SCpnt->request.dev = 0xffff;
837 SCpnt->sense_buffer[0] = 0;
838 SCpnt->sense_buffer[2] = 0;
839
840 scsi_do_cmd (SCpnt,
841 (void *) cmd, (void *) buffer,
842 512, sd_init_done, SD_TIMEOUT,
843 MAX_RETRIES);
844
845 while(SCpnt->request.dev != 0xfffe);
846
847 spintime = jiffies;
848 };
849
850 time1 = jiffies;
851 while(jiffies < time1 + HZ);
852 printk( "." );
853 };
854 } while(the_result && spintime && spintime+5000 > jiffies);
855 if (spintime) {
856 if (the_result)
857 printk( "not responding...\n" );
858 else
859 printk( "ready\n" );
860 }
861 };
862
863
864 retries = 3;
865 do {
866 cmd[0] = READ_CAPACITY;
867 cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0;
868 memset ((void *) &cmd[2], 0, 8);
869 memset ((void *) buffer, 0, 8);
870 SCpnt->request.dev = 0xffff;
871 SCpnt->sense_buffer[0] = 0;
872 SCpnt->sense_buffer[2] = 0;
873
874 scsi_do_cmd (SCpnt,
875 (void *) cmd, (void *) buffer,
876 8, sd_init_done, SD_TIMEOUT,
877 MAX_RETRIES);
878
879 if (current == task[0])
880 while(SCpnt->request.dev != 0xfffe);
881 else
882 if (SCpnt->request.dev != 0xfffe){
883 struct semaphore sem = MUTEX_LOCKED;
884 SCpnt->request.sem = &sem;
885 down(&sem);
886
887 while (SCpnt->request.dev != 0xfffe) schedule();
888 };
889
890 the_result = SCpnt->result;
891 retries--;
892
893 } while(the_result && retries);
894
895 SCpnt->request.dev = -1;
896
897 wake_up(&SCpnt->device->device_wait);
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914 if (the_result)
915 {
916 printk ("sd%d : READ CAPACITY failed.\n"
917 "sd%d : status = %x, message = %02x, host = %d, driver = %02x \n",
918 i,i,
919 status_byte(the_result),
920 msg_byte(the_result),
921 host_byte(the_result),
922 driver_byte(the_result)
923 );
924 if (driver_byte(the_result) & DRIVER_SENSE)
925 printk("sd%d : extended sense code = %1x \n", i, SCpnt->sense_buffer[2] & 0xf);
926 else
927 printk("sd%d : sense not available. \n", i);
928
929 printk("sd%d : block size assumed to be 512 bytes, disk size 1GB. \n", i);
930 rscsi_disks[i].capacity = 0x1fffff;
931 rscsi_disks[i].sector_size = 512;
932
933
934
935 if(rscsi_disks[i].device->removable &&
936 SCpnt->sense_buffer[2] == NOT_READY)
937 rscsi_disks[i].device->changed = 1;
938
939 }
940 else
941 {
942 rscsi_disks[i].capacity = (buffer[0] << 24) |
943 (buffer[1] << 16) |
944 (buffer[2] << 8) |
945 buffer[3];
946
947 rscsi_disks[i].sector_size = (buffer[4] << 24) |
948 (buffer[5] << 16) | (buffer[6] << 8) | buffer[7];
949
950 if (rscsi_disks[i].sector_size != 512 &&
951 rscsi_disks[i].sector_size != 1024 &&
952 rscsi_disks[i].sector_size != 256)
953 {
954 printk ("sd%d : unsupported sector size %d.\n",
955 i, rscsi_disks[i].sector_size);
956 if(rscsi_disks[i].device->removable){
957 rscsi_disks[i].capacity = 0;
958 } else {
959 printk ("scsi : deleting disk entry.\n");
960 for (j=i; j < NR_SD - 1;)
961 rscsi_disks[j] = rscsi_disks[++j];
962 --i;
963 --NR_SD;
964 scsi_free(buffer, 512);
965 return i;
966 };
967 }
968 if(rscsi_disks[i].sector_size == 1024)
969 rscsi_disks[i].capacity <<= 1;
970 if(rscsi_disks[i].sector_size == 256)
971 rscsi_disks[i].capacity >>= 1;
972 }
973
974 rscsi_disks[i].ten = 1;
975 rscsi_disks[i].remap = 1;
976 scsi_free(buffer, 512);
977 return i;
978 }
979
980
981
982
983
984
985
986 unsigned long sd_init(unsigned long memory_start, unsigned long memory_end)
987 {
988 int i;
989
990 if (register_blkdev(MAJOR_NR,"sd",&sd_fops)) {
991 printk("Unable to get major %d for SCSI disk\n",MAJOR_NR);
992 return memory_start;
993 }
994 if (MAX_SD == 0) return memory_start;
995
996 sd_sizes = (int *) memory_start;
997 memory_start += (MAX_SD << 4) * sizeof(int);
998 memset(sd_sizes, 0, (MAX_SD << 4) * sizeof(int));
999
1000 sd_blocksizes = (int *) memory_start;
1001 memory_start += (MAX_SD << 4) * sizeof(int);
1002 for(i=0;i<(MAX_SD << 4);i++) sd_blocksizes[i] = 1024;
1003 blksize_size[MAJOR_NR] = sd_blocksizes;
1004
1005 sd = (struct hd_struct *) memory_start;
1006 memory_start += (MAX_SD << 4) * sizeof(struct hd_struct);
1007
1008 sd_gendisk.max_nr = MAX_SD;
1009 sd_gendisk.part = sd;
1010 sd_gendisk.sizes = sd_sizes;
1011 sd_gendisk.real_devices = (void *) rscsi_disks;
1012
1013 for (i = 0; i < NR_SD; ++i)
1014 i = sd_init_onedisk(i);
1015
1016 blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST;
1017
1018
1019
1020
1021 if(rscsi_disks[0].device->host->sg_tablesize)
1022 read_ahead[MAJOR_NR] = 120;
1023
1024 else
1025 read_ahead[MAJOR_NR] = 4;
1026
1027 sd_gendisk.next = gendisk_head;
1028 gendisk_head = &sd_gendisk;
1029 return memory_start;
1030 }
1031
1032 void sd_init1(){
1033 rscsi_disks = (Scsi_Disk *) scsi_init_malloc(MAX_SD * sizeof(Scsi_Disk));
1034 };
1035
1036 void sd_attach(Scsi_Device * SDp){
1037 SDp->scsi_request_fn = do_sd_request;
1038 rscsi_disks[NR_SD++].device = SDp;
1039 if(NR_SD > MAX_SD) panic ("scsi_devices corrupt (sd)");
1040 };
1041
1042 #define DEVICE_BUSY rscsi_disks[target].device->busy
1043 #define USAGE rscsi_disks[target].device->access_count
1044 #define CAPACITY rscsi_disks[target].capacity
1045 #define MAYBE_REINIT sd_init_onedisk(target)
1046 #define GENDISK_STRUCT sd_gendisk
1047
1048
1049
1050
1051
1052
1053
1054
1055 int revalidate_scsidisk(int dev, int maxusage){
1056 int target, major;
1057 struct gendisk * gdev;
1058 int max_p;
1059 int start;
1060 int i;
1061
1062 target = DEVICE_NR(MINOR(dev));
1063 gdev = &GENDISK_STRUCT;
1064
1065 cli();
1066 if (DEVICE_BUSY || USAGE > maxusage) {
1067 sti();
1068 printk("Device busy for revalidation (usage=%d)\n", USAGE);
1069 return -EBUSY;
1070 };
1071 DEVICE_BUSY = 1;
1072 sti();
1073
1074 max_p = gdev->max_p;
1075 start = target << gdev->minor_shift;
1076 major = MAJOR_NR << 8;
1077
1078 for (i=max_p - 1; i >=0 ; i--) {
1079 sync_dev(major | start | i);
1080 invalidate_inodes(major | start | i);
1081 invalidate_buffers(major | start | i);
1082 gdev->part[start+i].start_sect = 0;
1083 gdev->part[start+i].nr_sects = 0;
1084 };
1085
1086 #ifdef MAYBE_REINIT
1087 MAYBE_REINIT;
1088 #endif
1089
1090 gdev->part[start].nr_sects = CAPACITY;
1091 resetup_one_dev(gdev, target);
1092
1093 DEVICE_BUSY = 0;
1094 return 0;
1095 }
1096
1097 static int fop_revalidate_scsidisk(dev_t dev){
1098 return revalidate_scsidisk(dev, 0);
1099 }
1100