This source file includes following definitions.
- sd_open
- sd_release
- sd_geninit
- rw_intr
- do_sd_request
- requeue_sd_request
- check_scsidisk_media_change
- sd_init_done
- sd_init_onedisk
- sd_init
- sd_finish
- sd_detect
- sd_attach
- revalidate_scsidisk
- fop_revalidate_scsidisk
1
2
3
4
5
6
7
8
9
10
11
12
13
14 #include <linux/fs.h>
15 #include <linux/kernel.h>
16 #include <linux/sched.h>
17 #include <linux/string.h>
18 #include <linux/errno.h>
19 #include <asm/system.h>
20
21 #define MAJOR_NR SCSI_DISK_MAJOR
22 #include "../block/blk.h"
23 #include "scsi.h"
24 #include "hosts.h"
25 #include "sd.h"
26 #include "scsi_ioctl.h"
27 #include "constants.h"
28
29 #include <linux/genhd.h>
30
31
32
33
34
35 #define MAX_RETRIES 5
36
37
38
39
40
41 #define SD_TIMEOUT 600
42 #define SD_MOD_TIMEOUT 750
43
44 #define CLUSTERABLE_DEVICE(SC) (SC->host->hostt->use_clustering && \
45 SC->device->type != TYPE_MOD)
46
47 struct hd_struct * sd;
48
49 Scsi_Disk * rscsi_disks;
50 static int * sd_sizes;
51 static int * sd_blocksizes;
52
53 extern int sd_ioctl(struct inode *, struct file *, unsigned int, unsigned long);
54
55 static int check_scsidisk_media_change(dev_t);
56 static int fop_revalidate_scsidisk(dev_t);
57
58 static sd_init_onedisk(int);
59
60 static void requeue_sd_request (Scsi_Cmnd * SCpnt);
61
62 static void sd_init(void);
63 static void sd_finish(void);
64 static void sd_attach(Scsi_Device *);
65 static int sd_detect(Scsi_Device *);
66
67 struct Scsi_Device_Template sd_template = {NULL, "disk", "sd", TYPE_DISK,
68 SCSI_DISK_MAJOR, 0, 0, 0, 1,
69 sd_detect, sd_init,
70 sd_finish, sd_attach, NULL};
71
72 static int sd_open(struct inode * inode, struct file * filp)
73 {
74 int target;
75 target = DEVICE_NR(MINOR(inode->i_rdev));
76
77 if(target >= sd_template.dev_max || !rscsi_disks[target].device)
78 return -ENXIO;
79
80
81
82
83 while (rscsi_disks[target].device->busy);
84
85 if(rscsi_disks[target].device->removable) {
86 check_disk_change(inode->i_rdev);
87
88 if(!rscsi_disks[target].device->access_count)
89 sd_ioctl(inode, NULL, SCSI_IOCTL_DOORLOCK, 0);
90 };
91 rscsi_disks[target].device->access_count++;
92 return 0;
93 }
94
95 static void sd_release(struct inode * inode, struct file * file)
96 {
97 int target;
98 sync_dev(inode->i_rdev);
99
100 target = DEVICE_NR(MINOR(inode->i_rdev));
101
102 rscsi_disks[target].device->access_count--;
103
104 if(rscsi_disks[target].device->removable) {
105 if(!rscsi_disks[target].device->access_count)
106 sd_ioctl(inode, NULL, SCSI_IOCTL_DOORUNLOCK, 0);
107 };
108 }
109
110 static void sd_geninit(void);
111
112 static struct file_operations sd_fops = {
113 NULL,
114 block_read,
115 block_write,
116 NULL,
117 NULL,
118 sd_ioctl,
119 NULL,
120 sd_open,
121 sd_release,
122 block_fsync,
123 NULL,
124 check_scsidisk_media_change,
125 fop_revalidate_scsidisk
126 };
127
128 static struct gendisk sd_gendisk = {
129 MAJOR_NR,
130 "sd",
131 4,
132 1 << 4,
133 0,
134 sd_geninit,
135 NULL,
136 NULL,
137 0,
138 NULL,
139 NULL
140 };
141
142 static void sd_geninit (void)
143 {
144 int i;
145
146 for (i = 0; i < sd_template.dev_max; ++i)
147 if(rscsi_disks[i].device)
148 sd[i << 4].nr_sects = rscsi_disks[i].capacity;
149 sd_gendisk.nr_real = sd_template.dev_max;
150 }
151
152
153
154
155
156
157
158 static void rw_intr (Scsi_Cmnd *SCpnt)
159 {
160 int result = SCpnt->result;
161 int this_count = SCpnt->bufflen >> 9;
162
163 #ifdef DEBUG
164 printk("sd%c : rw_intr(%d, %d)\n", 'a' + MINOR(SCpnt->request.dev), SCpnt->host->host_no, result);
165 #endif
166
167
168
169
170
171
172
173 if (!result) {
174
175 #ifdef DEBUG
176 printk("sd%c : %d sectors remain.\n", 'a' + MINOR(SCpnt->request.dev), SCpnt->request.nr_sectors);
177 printk("use_sg is %d\n ",SCpnt->use_sg);
178 #endif
179 if (SCpnt->use_sg) {
180 struct scatterlist * sgpnt;
181 int i;
182 sgpnt = (struct scatterlist *) SCpnt->buffer;
183 for(i=0; i<SCpnt->use_sg; i++) {
184 #ifdef DEBUG
185 printk(":%x %x %d\n",sgpnt[i].alt_address, sgpnt[i].address, sgpnt[i].length);
186 #endif
187 if (sgpnt[i].alt_address) {
188 if (SCpnt->request.cmd == READ)
189 memcpy(sgpnt[i].alt_address, sgpnt[i].address, sgpnt[i].length);
190 scsi_free(sgpnt[i].address, sgpnt[i].length);
191 };
192 };
193 scsi_free(SCpnt->buffer, SCpnt->sglist_len);
194 } else {
195 if (SCpnt->buffer != SCpnt->request.buffer) {
196 #ifdef DEBUG
197 printk("nosg: %x %x %d\n",SCpnt->request.buffer, SCpnt->buffer,
198 SCpnt->bufflen);
199 #endif
200 if (SCpnt->request.cmd == READ)
201 memcpy(SCpnt->request.buffer, SCpnt->buffer,
202 SCpnt->bufflen);
203 scsi_free(SCpnt->buffer, SCpnt->bufflen);
204 };
205 };
206
207
208
209
210
211 if (SCpnt->request.nr_sectors > this_count)
212 {
213 SCpnt->request.errors = 0;
214
215 if (!SCpnt->request.bh)
216 {
217 #ifdef DEBUG
218 printk("sd%c : handling page request, no buffer\n",
219 'a' + MINOR(SCpnt->request.dev));
220 #endif
221
222
223
224
225 panic("sd.c: linked page request (%lx %x)",
226 SCpnt->request.sector, this_count);
227 }
228 }
229 SCpnt = end_scsi_request(SCpnt, 1, this_count);
230 requeue_sd_request(SCpnt);
231 return;
232 }
233
234
235 if (SCpnt->use_sg) {
236 struct scatterlist * sgpnt;
237 int i;
238 sgpnt = (struct scatterlist *) SCpnt->buffer;
239 for(i=0; i<SCpnt->use_sg; i++) {
240 #ifdef DEBUG
241 printk("err: %x %x %d\n",SCpnt->request.buffer, SCpnt->buffer,
242 SCpnt->bufflen);
243 #endif
244 if (sgpnt[i].alt_address) {
245 scsi_free(sgpnt[i].address, sgpnt[i].length);
246 };
247 };
248 scsi_free(SCpnt->buffer, SCpnt->sglist_len);
249 } else {
250 #ifdef DEBUG
251 printk("nosgerr: %x %x %d\n",SCpnt->request.buffer, SCpnt->buffer,
252 SCpnt->bufflen);
253 #endif
254 if (SCpnt->buffer != SCpnt->request.buffer)
255 scsi_free(SCpnt->buffer, SCpnt->bufflen);
256 };
257
258
259
260
261
262
263
264 if (driver_byte(result) != 0) {
265 if (suggestion(result) == SUGGEST_REMAP) {
266 #ifdef REMAP
267
268
269
270
271 if rscsi_disks[DEVICE_NR(SCpnt->request.dev)].remap
272 {
273 result = 0;
274 }
275 else
276
277 #endif
278 }
279
280 if ((SCpnt->sense_buffer[0] & 0x7f) == 0x70) {
281 if ((SCpnt->sense_buffer[2] & 0xf) == UNIT_ATTENTION) {
282 if(rscsi_disks[DEVICE_NR(SCpnt->request.dev)].device->removable) {
283
284
285
286 rscsi_disks[DEVICE_NR(SCpnt->request.dev)].device->changed = 1;
287 SCpnt = end_scsi_request(SCpnt, 0, this_count);
288 requeue_sd_request(SCpnt);
289 return;
290 }
291 }
292 }
293
294
295
296
297
298
299
300
301
302 if (SCpnt->sense_buffer[2] == ILLEGAL_REQUEST) {
303 if (rscsi_disks[DEVICE_NR(SCpnt->request.dev)].ten) {
304 rscsi_disks[DEVICE_NR(SCpnt->request.dev)].ten = 0;
305 requeue_sd_request(SCpnt);
306 result = 0;
307 } else {
308 }
309 }
310 }
311 if (result) {
312 printk("SCSI disk error : host %d id %d lun %d return code = %x\n",
313 rscsi_disks[DEVICE_NR(SCpnt->request.dev)].device->host->host_no,
314 rscsi_disks[DEVICE_NR(SCpnt->request.dev)].device->id,
315 rscsi_disks[DEVICE_NR(SCpnt->request.dev)].device->lun, result);
316
317 if (driver_byte(result) & DRIVER_SENSE)
318 print_sense("sd", SCpnt);
319 SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.current_nr_sectors);
320 requeue_sd_request(SCpnt);
321 return;
322 }
323 }
324
325
326
327
328
329
330
331 static void do_sd_request (void)
332 {
333 Scsi_Cmnd * SCpnt = NULL;
334 struct request * req = NULL;
335 int flag = 0;
336 while (1==1){
337 cli();
338 if (CURRENT != NULL && CURRENT->dev == -1) {
339 sti();
340 return;
341 };
342
343 INIT_SCSI_REQUEST;
344
345
346
347
348
349
350
351
352
353
354
355
356
357 if (flag++ == 0)
358 SCpnt = allocate_device(&CURRENT,
359 rscsi_disks[DEVICE_NR(MINOR(CURRENT->dev))].device, 0);
360 else SCpnt = NULL;
361 sti();
362
363
364
365
366
367
368
369
370 if (!SCpnt && sd_template.nr_dev > 1){
371 struct request *req1;
372 req1 = NULL;
373 cli();
374 req = CURRENT;
375 while(req){
376 SCpnt = request_queueable(req,
377 rscsi_disks[DEVICE_NR(MINOR(req->dev))].device);
378 if(SCpnt) break;
379 req1 = req;
380 req = req->next;
381 };
382 if (SCpnt && req->dev == -1) {
383 if (req == CURRENT)
384 CURRENT = CURRENT->next;
385 else
386 req1->next = req->next;
387 };
388 sti();
389 };
390
391 if (!SCpnt) return;
392
393
394 requeue_sd_request(SCpnt);
395 };
396 }
397
398 static void requeue_sd_request (Scsi_Cmnd * SCpnt)
399 {
400 int dev, block, this_count;
401 unsigned char cmd[10];
402 int bounce_size, contiguous;
403 int max_sg;
404 struct buffer_head * bh, *bhp;
405 char * buff, *bounce_buffer;
406
407 repeat:
408
409 if(!SCpnt || SCpnt->request.dev <= 0) {
410 do_sd_request();
411 return;
412 }
413
414 dev = MINOR(SCpnt->request.dev);
415 block = SCpnt->request.sector;
416 this_count = 0;
417
418 #ifdef DEBUG
419 printk("Doing sd request, dev = %d, block = %d\n", dev, block);
420 #endif
421
422 if (dev >= (sd_template.dev_max << 4) ||
423 !rscsi_disks[DEVICE_NR(dev)].device ||
424 block + SCpnt->request.nr_sectors > sd[dev].nr_sects)
425 {
426 SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
427 goto repeat;
428 }
429
430 block += sd[dev].start_sect;
431 dev = DEVICE_NR(dev);
432
433 if (rscsi_disks[dev].device->changed)
434 {
435
436
437
438
439 SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
440 goto repeat;
441 }
442
443 #ifdef DEBUG
444 printk("sd%c : real dev = /dev/sd%c, block = %d\n", 'a' + MINOR(SCpnt->request.dev), dev, block);
445 #endif
446
447
448
449
450
451
452
453
454
455
456
457
458 if (rscsi_disks[dev].sector_size == 1024)
459 if((block & 1) || (SCpnt->request.nr_sectors & 1)) {
460 printk("sd.c:Bad block number requested");
461 SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
462 goto repeat;
463 }
464
465 switch (SCpnt->request.cmd)
466 {
467 case WRITE :
468 if (!rscsi_disks[dev].device->writeable)
469 {
470 SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
471 goto repeat;
472 }
473 cmd[0] = WRITE_6;
474 break;
475 case READ :
476 cmd[0] = READ_6;
477 break;
478 default :
479 panic ("Unknown sd command %d\n", SCpnt->request.cmd);
480 }
481
482 SCpnt->this_count = 0;
483
484
485
486 contiguous = (!CLUSTERABLE_DEVICE(SCpnt) ? 0 :1);
487 bounce_buffer = NULL;
488 bounce_size = (SCpnt->request.nr_sectors << 9);
489
490
491
492
493
494
495 if (contiguous && SCpnt->request.bh &&
496 ((int) SCpnt->request.bh->b_data) + (SCpnt->request.nr_sectors << 9) - 1 >
497 ISA_DMA_THRESHOLD && SCpnt->host->unchecked_isa_dma) {
498 if(((int) SCpnt->request.bh->b_data) > ISA_DMA_THRESHOLD)
499 bounce_buffer = (char *) scsi_malloc(bounce_size);
500 if(!bounce_buffer) contiguous = 0;
501 };
502
503 if(contiguous && SCpnt->request.bh && SCpnt->request.bh->b_reqnext)
504 for(bh = SCpnt->request.bh, bhp = bh->b_reqnext; bhp; bh = bhp,
505 bhp = bhp->b_reqnext) {
506 if(!CONTIGUOUS_BUFFERS(bh,bhp)) {
507 if(bounce_buffer) scsi_free(bounce_buffer, bounce_size);
508 contiguous = 0;
509 break;
510 }
511 };
512 if (!SCpnt->request.bh || contiguous) {
513
514
515 this_count = SCpnt->request.nr_sectors;
516 buff = SCpnt->request.buffer;
517 SCpnt->use_sg = 0;
518
519 } else if (SCpnt->host->sg_tablesize == 0 ||
520 (need_isa_buffer &&
521 dma_free_sectors <= 10)) {
522
523
524
525
526
527
528
529
530 if (SCpnt->host->sg_tablesize != 0 &&
531 need_isa_buffer &&
532 dma_free_sectors <= 10)
533 printk("Warning: SCSI DMA buffer space running low. Using non scatter-gather I/O.\n");
534
535 this_count = SCpnt->request.current_nr_sectors;
536 buff = SCpnt->request.buffer;
537 SCpnt->use_sg = 0;
538
539 } else {
540
541
542 struct scatterlist * sgpnt;
543 int count, this_count_max;
544 int counted;
545
546 bh = SCpnt->request.bh;
547 this_count = 0;
548 this_count_max = (rscsi_disks[dev].ten ? 0xffff : 0xff);
549 count = 0;
550 bhp = NULL;
551 while(bh) {
552 if ((this_count + (bh->b_size >> 9)) > this_count_max) break;
553 if(!bhp || !CONTIGUOUS_BUFFERS(bhp,bh) ||
554 !CLUSTERABLE_DEVICE(SCpnt) ||
555 (SCpnt->host->unchecked_isa_dma &&
556 ((unsigned int) bh->b_data-1) == ISA_DMA_THRESHOLD)) {
557 if (count < SCpnt->host->sg_tablesize) count++;
558 else break;
559 };
560 this_count += (bh->b_size >> 9);
561 bhp = bh;
562 bh = bh->b_reqnext;
563 };
564 #if 0
565 if(SCpnt->host->unchecked_isa_dma &&
566 ((unsigned int) SCpnt->request.bh->b_data-1) == ISA_DMA_THRESHOLD) count--;
567 #endif
568 SCpnt->use_sg = count;
569 count = 512;
570 while( count < (SCpnt->use_sg * sizeof(struct scatterlist)))
571 count = count << 1;
572 SCpnt->sglist_len = count;
573 max_sg = count / sizeof(struct scatterlist);
574 if(SCpnt->host->sg_tablesize < max_sg) max_sg = SCpnt->host->sg_tablesize;
575 sgpnt = (struct scatterlist * ) scsi_malloc(count);
576 memset(sgpnt, 0, count);
577 if (!sgpnt) {
578 printk("Warning - running *really* short on DMA buffers\n");
579 SCpnt->use_sg = 0;
580 this_count = SCpnt->request.current_nr_sectors;
581 buff = SCpnt->request.buffer;
582 } else {
583 buff = (char *) sgpnt;
584 counted = 0;
585 for(count = 0, bh = SCpnt->request.bh, bhp = bh->b_reqnext;
586 count < SCpnt->use_sg && bh;
587 count++, bh = bhp) {
588
589 bhp = bh->b_reqnext;
590
591 if(!sgpnt[count].address) sgpnt[count].address = bh->b_data;
592 sgpnt[count].length += bh->b_size;
593 counted += bh->b_size >> 9;
594
595 if (((int) sgpnt[count].address) + sgpnt[count].length - 1 >
596 ISA_DMA_THRESHOLD && (SCpnt->host->unchecked_isa_dma) &&
597 !sgpnt[count].alt_address) {
598 sgpnt[count].alt_address = sgpnt[count].address;
599
600
601
602 if(dma_free_sectors < (sgpnt[count].length >> 9) + 10) {
603 sgpnt[count].address = NULL;
604 } else {
605 sgpnt[count].address = (char *) scsi_malloc(sgpnt[count].length);
606 };
607
608
609
610
611 if(sgpnt[count].address == NULL){
612 #if 0
613 printk("Warning: Running low on SCSI DMA buffers");
614
615 while(--count >= 0){
616 if(sgpnt[count].alt_address)
617 scsi_free(sgpnt[count].address, sgpnt[count].length);
618 };
619 this_count = SCpnt->request.current_nr_sectors;
620 buff = SCpnt->request.buffer;
621 SCpnt->use_sg = 0;
622 scsi_free(sgpnt, SCpnt->sglist_len);
623 #endif
624 SCpnt->use_sg = count;
625 this_count = counted -= bh->b_size >> 9;
626 break;
627 };
628
629 };
630
631
632
633
634
635 if(bhp && CONTIGUOUS_BUFFERS(bh,bhp) && CLUSTERABLE_DEVICE(SCpnt)) {
636 char * tmp;
637
638 if (((int) sgpnt[count].address) + sgpnt[count].length +
639 bhp->b_size - 1 > ISA_DMA_THRESHOLD &&
640 (SCpnt->host->unchecked_isa_dma) &&
641 !sgpnt[count].alt_address) continue;
642
643 if(!sgpnt[count].alt_address) {count--; continue; }
644 if(dma_free_sectors > 10)
645 tmp = (char *) scsi_malloc(sgpnt[count].length + bhp->b_size);
646 else {
647 tmp = NULL;
648 max_sg = SCpnt->use_sg;
649 };
650 if(tmp){
651 scsi_free(sgpnt[count].address, sgpnt[count].length);
652 sgpnt[count].address = tmp;
653 count--;
654 continue;
655 };
656
657
658
659
660 if (SCpnt->use_sg < max_sg) SCpnt->use_sg++;
661 };
662 };
663
664 this_count = counted;
665
666 if(count < SCpnt->use_sg || SCpnt->use_sg > SCpnt->host->sg_tablesize){
667 bh = SCpnt->request.bh;
668 printk("Use sg, count %d %x %d\n", SCpnt->use_sg, count, dma_free_sectors);
669 printk("maxsg = %x, counted = %d this_count = %d\n", max_sg, counted, this_count);
670 while(bh){
671 printk("[%p %lx] ", bh->b_data, bh->b_size);
672 bh = bh->b_reqnext;
673 };
674 if(SCpnt->use_sg < 16)
675 for(count=0; count<SCpnt->use_sg; count++)
676 printk("{%d:%p %p %d} ", count,
677 sgpnt[count].address,
678 sgpnt[count].alt_address,
679 sgpnt[count].length);
680 panic("Ooops");
681 };
682
683 if (SCpnt->request.cmd == WRITE)
684 for(count=0; count<SCpnt->use_sg; count++)
685 if(sgpnt[count].alt_address)
686 memcpy(sgpnt[count].address, sgpnt[count].alt_address,
687 sgpnt[count].length);
688 };
689 };
690
691
692
693 if(SCpnt->use_sg == 0){
694 if (((int) buff) + (this_count << 9) - 1 > ISA_DMA_THRESHOLD &&
695 (SCpnt->host->unchecked_isa_dma)) {
696 if(bounce_buffer)
697 buff = bounce_buffer;
698 else
699 buff = (char *) scsi_malloc(this_count << 9);
700 if(buff == NULL) {
701 this_count = SCpnt->request.current_nr_sectors;
702 buff = (char *) scsi_malloc(this_count << 9);
703 if(!buff) panic("Ran out of DMA buffers.");
704 };
705 if (SCpnt->request.cmd == WRITE)
706 memcpy(buff, (char *)SCpnt->request.buffer, this_count << 9);
707 };
708 };
709 #ifdef DEBUG
710 printk("sd%c : %s %d/%d 512 byte blocks.\n", 'a' + MINOR(SCpnt->request.dev),
711 (SCpnt->request.cmd == WRITE) ? "writing" : "reading",
712 this_count, SCpnt->request.nr_sectors);
713 #endif
714
715 cmd[1] = (SCpnt->lun << 5) & 0xe0;
716
717 if (rscsi_disks[dev].sector_size == 1024){
718 if(block & 1) panic("sd.c:Bad block number requested");
719 if(this_count & 1) panic("sd.c:Bad block number requested");
720 block = block >> 1;
721 this_count = this_count >> 1;
722 };
723
724 if (rscsi_disks[dev].sector_size == 256){
725 block = block << 1;
726 this_count = this_count << 1;
727 };
728
729 if (((this_count > 0xff) || (block > 0x1fffff)) && rscsi_disks[dev].ten)
730 {
731 if (this_count > 0xffff)
732 this_count = 0xffff;
733
734 cmd[0] += READ_10 - READ_6 ;
735 cmd[2] = (unsigned char) (block >> 24) & 0xff;
736 cmd[3] = (unsigned char) (block >> 16) & 0xff;
737 cmd[4] = (unsigned char) (block >> 8) & 0xff;
738 cmd[5] = (unsigned char) block & 0xff;
739 cmd[6] = cmd[9] = 0;
740 cmd[7] = (unsigned char) (this_count >> 8) & 0xff;
741 cmd[8] = (unsigned char) this_count & 0xff;
742 }
743 else
744 {
745 if (this_count > 0xff)
746 this_count = 0xff;
747
748 cmd[1] |= (unsigned char) ((block >> 16) & 0x1f);
749 cmd[2] = (unsigned char) ((block >> 8) & 0xff);
750 cmd[3] = (unsigned char) block & 0xff;
751 cmd[4] = (unsigned char) this_count;
752 cmd[5] = 0;
753 }
754
755
756
757
758
759
760
761 SCpnt->transfersize = rscsi_disks[dev].sector_size;
762 SCpnt->underflow = this_count << 9;
763 scsi_do_cmd (SCpnt, (void *) cmd, buff,
764 this_count * rscsi_disks[dev].sector_size,
765 rw_intr,
766 (SCpnt->device->type == TYPE_DISK ?
767 SD_TIMEOUT : SD_MOD_TIMEOUT),
768 MAX_RETRIES);
769 }
770
771 static int check_scsidisk_media_change(dev_t full_dev){
772 int retval;
773 int target;
774 struct inode inode;
775 int flag = 0;
776
777 target = DEVICE_NR(MINOR(full_dev));
778
779 if (target >= sd_template.dev_max ||
780 !rscsi_disks[target].device) {
781 printk("SCSI disk request error: invalid device.\n");
782 return 0;
783 };
784
785 if(!rscsi_disks[target].device->removable) return 0;
786
787 inode.i_rdev = full_dev;
788 retval = sd_ioctl(&inode, NULL, SCSI_IOCTL_TEST_UNIT_READY, 0);
789
790 if(retval){
791
792
793
794
795 rscsi_disks[target].device->changed = 1;
796 return 1;
797
798 };
799
800 retval = rscsi_disks[target].device->changed;
801 if(!flag) rscsi_disks[target].device->changed = 0;
802 return retval;
803 }
804
805 static void sd_init_done (Scsi_Cmnd * SCpnt)
806 {
807 struct request * req;
808
809 req = &SCpnt->request;
810 req->dev = 0xfffe;
811
812 if (req->sem != NULL) {
813 up(req->sem);
814 }
815 }
816
817 static int sd_init_onedisk(int i)
818 {
819 unsigned char cmd[10];
820 unsigned char *buffer;
821 char spintime;
822 int the_result, retries;
823 Scsi_Cmnd * SCpnt;
824
825
826
827
828
829 SCpnt = allocate_device(NULL, rscsi_disks[i].device, 1);
830 buffer = (unsigned char *) scsi_malloc(512);
831
832 spintime = 0;
833
834
835 if (current == task[0]){
836 do{
837 cmd[0] = TEST_UNIT_READY;
838 cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0;
839 memset ((void *) &cmd[2], 0, 8);
840 SCpnt->request.dev = 0xffff;
841 SCpnt->cmd_len = 0;
842 SCpnt->sense_buffer[0] = 0;
843 SCpnt->sense_buffer[2] = 0;
844
845 scsi_do_cmd (SCpnt,
846 (void *) cmd, (void *) buffer,
847 512, sd_init_done, SD_TIMEOUT,
848 MAX_RETRIES);
849
850 while(SCpnt->request.dev != 0xfffe);
851
852 the_result = SCpnt->result;
853
854
855
856 if(the_result && !rscsi_disks[i].device->removable &&
857 SCpnt->sense_buffer[2] == NOT_READY) {
858 int time1;
859 if(!spintime){
860 printk( "sd%c: Spinning up disk...", 'a' + i );
861 cmd[0] = START_STOP;
862 cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0;
863 cmd[1] |= 1;
864 memset ((void *) &cmd[2], 0, 8);
865 cmd[4] = 1;
866 SCpnt->request.dev = 0xffff;
867 SCpnt->cmd_len = 0;
868 SCpnt->sense_buffer[0] = 0;
869 SCpnt->sense_buffer[2] = 0;
870
871 scsi_do_cmd (SCpnt,
872 (void *) cmd, (void *) buffer,
873 512, sd_init_done, SD_TIMEOUT,
874 MAX_RETRIES);
875
876 while(SCpnt->request.dev != 0xfffe);
877
878 spintime = jiffies;
879 };
880
881 time1 = jiffies;
882 while(jiffies < time1 + HZ);
883 printk( "." );
884 };
885 } while(the_result && spintime && spintime+5000 > jiffies);
886 if (spintime) {
887 if (the_result)
888 printk( "not responding...\n" );
889 else
890 printk( "ready\n" );
891 }
892 };
893
894
895 retries = 3;
896 do {
897 cmd[0] = READ_CAPACITY;
898 cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0;
899 memset ((void *) &cmd[2], 0, 8);
900 memset ((void *) buffer, 0, 8);
901 SCpnt->request.dev = 0xffff;
902 SCpnt->cmd_len = 0;
903 SCpnt->sense_buffer[0] = 0;
904 SCpnt->sense_buffer[2] = 0;
905
906 scsi_do_cmd (SCpnt,
907 (void *) cmd, (void *) buffer,
908 8, sd_init_done, SD_TIMEOUT,
909 MAX_RETRIES);
910
911 if (current == task[0])
912 while(SCpnt->request.dev != 0xfffe);
913 else
914 if (SCpnt->request.dev != 0xfffe){
915 struct semaphore sem = MUTEX_LOCKED;
916 SCpnt->request.sem = &sem;
917 down(&sem);
918
919 while (SCpnt->request.dev != 0xfffe) schedule();
920 };
921
922 the_result = SCpnt->result;
923 retries--;
924
925 } while(the_result && retries);
926
927 SCpnt->request.dev = -1;
928
929 wake_up(&SCpnt->device->device_wait);
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946 if (the_result)
947 {
948 printk ("sd%c : READ CAPACITY failed.\n"
949 "sd%c : status = %x, message = %02x, host = %d, driver = %02x \n",
950 'a' + i, 'a' + i,
951 status_byte(the_result),
952 msg_byte(the_result),
953 host_byte(the_result),
954 driver_byte(the_result)
955 );
956 if (driver_byte(the_result) & DRIVER_SENSE)
957 printk("sd%c : extended sense code = %1x \n", 'a' + i, SCpnt->sense_buffer[2] & 0xf);
958 else
959 printk("sd%c : sense not available. \n", 'a' + i);
960
961 printk("sd%c : block size assumed to be 512 bytes, disk size 1GB. \n", 'a' + i);
962 rscsi_disks[i].capacity = 0x1fffff;
963 rscsi_disks[i].sector_size = 512;
964
965
966
967 if(rscsi_disks[i].device->removable &&
968 SCpnt->sense_buffer[2] == NOT_READY)
969 rscsi_disks[i].device->changed = 1;
970
971 }
972 else
973 {
974 rscsi_disks[i].capacity = (buffer[0] << 24) |
975 (buffer[1] << 16) |
976 (buffer[2] << 8) |
977 buffer[3];
978
979 rscsi_disks[i].sector_size = (buffer[4] << 24) |
980 (buffer[5] << 16) | (buffer[6] << 8) | buffer[7];
981
982 if (rscsi_disks[i].sector_size != 512 &&
983 rscsi_disks[i].sector_size != 1024 &&
984 rscsi_disks[i].sector_size != 256)
985 {
986 printk ("sd%c : unsupported sector size %d.\n",
987 'a' + i, rscsi_disks[i].sector_size);
988 if(rscsi_disks[i].device->removable){
989 rscsi_disks[i].capacity = 0;
990 } else {
991 printk ("scsi : deleting disk entry.\n");
992 rscsi_disks[i].device = NULL;
993 sd_template.nr_dev--;
994 return i;
995 };
996 }
997 if(rscsi_disks[i].sector_size == 1024)
998 rscsi_disks[i].capacity <<= 1;
999 if(rscsi_disks[i].sector_size == 256)
1000 rscsi_disks[i].capacity >>= 1;
1001 }
1002
1003 rscsi_disks[i].ten = 1;
1004 rscsi_disks[i].remap = 1;
1005 scsi_free(buffer, 512);
1006 return i;
1007 }
1008
1009
1010
1011
1012
1013
1014
1015 static void sd_init()
1016 {
1017 int i;
1018 static int sd_registered = 0;
1019
1020 if (sd_template.dev_noticed == 0) return;
1021
1022 if(!sd_registered) {
1023 if (register_blkdev(MAJOR_NR,"sd",&sd_fops)) {
1024 printk("Unable to get major %d for SCSI disk\n",MAJOR_NR);
1025 return;
1026 }
1027 sd_registered++;
1028 }
1029
1030
1031 if(scsi_loadable_module_flag) return;
1032
1033 sd_template.dev_max = sd_template.dev_noticed;
1034
1035 rscsi_disks = (Scsi_Disk *)
1036 scsi_init_malloc(sd_template.dev_max * sizeof(Scsi_Disk));
1037 memset(rscsi_disks, 0, sd_template.dev_max * sizeof(Scsi_Disk));
1038
1039 sd_sizes = (int *) scsi_init_malloc((sd_template.dev_max << 4) *
1040 sizeof(int));
1041 memset(sd_sizes, 0, (sd_template.dev_max << 4) * sizeof(int));
1042
1043 sd_blocksizes = (int *) scsi_init_malloc((sd_template.dev_max << 4) *
1044 sizeof(int));
1045 for(i=0;i<(sd_template.dev_max << 4);i++) sd_blocksizes[i] = 1024;
1046 blksize_size[MAJOR_NR] = sd_blocksizes;
1047
1048 sd = (struct hd_struct *) scsi_init_malloc((sd_template.dev_max << 4) *
1049 sizeof(struct hd_struct));
1050
1051
1052 sd_gendisk.max_nr = sd_template.dev_max;
1053 sd_gendisk.part = sd;
1054 sd_gendisk.sizes = sd_sizes;
1055 sd_gendisk.real_devices = (void *) rscsi_disks;
1056
1057 }
1058
1059 static void sd_finish()
1060 {
1061 int i;
1062
1063 for (i = 0; i < sd_template.dev_max; ++i)
1064 if (rscsi_disks[i].device) i = sd_init_onedisk(i);
1065
1066 blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST;
1067
1068
1069
1070
1071 if(rscsi_disks[0].device->host->sg_tablesize)
1072 read_ahead[MAJOR_NR] = 120;
1073
1074 else
1075 read_ahead[MAJOR_NR] = 4;
1076
1077 sd_gendisk.next = gendisk_head;
1078 gendisk_head = &sd_gendisk;
1079 return;
1080 }
1081
1082 static int sd_detect(Scsi_Device * SDp){
1083
1084 if(scsi_loadable_module_flag) return 0;
1085 if(SDp->type != TYPE_DISK && SDp->type != TYPE_MOD) return 0;
1086
1087 printk("Detected scsi disk sd%c at scsi%d, id %d, lun %d\n",
1088 'a'+ (sd_template.dev_noticed++),
1089 SDp->host->host_no , SDp->id, SDp->lun);
1090
1091 return 1;
1092
1093 }
1094
1095 static void sd_attach(Scsi_Device * SDp){
1096 Scsi_Disk * dpnt;
1097 int i;
1098
1099
1100 if(scsi_loadable_module_flag) return;
1101 if(SDp->type != TYPE_DISK && SDp->type != TYPE_MOD) return;
1102
1103 if(sd_template.nr_dev >= sd_template.dev_max)
1104 panic ("scsi_devices corrupt (sd)");
1105
1106 for(dpnt = rscsi_disks, i=0; i<sd_template.dev_max; i++, dpnt++)
1107 if(!dpnt->device) break;
1108
1109 if(i >= sd_template.dev_max) panic ("scsi_devices corrupt (sd)");
1110
1111 SDp->scsi_request_fn = do_sd_request;
1112 rscsi_disks[i].device = SDp;
1113 sd_template.nr_dev++;
1114 };
1115
1116 #define DEVICE_BUSY rscsi_disks[target].device->busy
1117 #define USAGE rscsi_disks[target].device->access_count
1118 #define CAPACITY rscsi_disks[target].capacity
1119 #define MAYBE_REINIT sd_init_onedisk(target)
1120 #define GENDISK_STRUCT sd_gendisk
1121
1122
1123
1124
1125
1126
1127
1128
1129 int revalidate_scsidisk(int dev, int maxusage){
1130 int target, major;
1131 struct gendisk * gdev;
1132 int max_p;
1133 int start;
1134 int i;
1135
1136 target = DEVICE_NR(MINOR(dev));
1137 gdev = &GENDISK_STRUCT;
1138
1139 cli();
1140 if (DEVICE_BUSY || USAGE > maxusage) {
1141 sti();
1142 printk("Device busy for revalidation (usage=%d)\n", USAGE);
1143 return -EBUSY;
1144 };
1145 DEVICE_BUSY = 1;
1146 sti();
1147
1148 max_p = gdev->max_p;
1149 start = target << gdev->minor_shift;
1150 major = MAJOR_NR << 8;
1151
1152 for (i=max_p - 1; i >=0 ; i--) {
1153 sync_dev(major | start | i);
1154 invalidate_inodes(major | start | i);
1155 invalidate_buffers(major | start | i);
1156 gdev->part[start+i].start_sect = 0;
1157 gdev->part[start+i].nr_sects = 0;
1158 };
1159
1160 #ifdef MAYBE_REINIT
1161 MAYBE_REINIT;
1162 #endif
1163
1164 gdev->part[start].nr_sects = CAPACITY;
1165 resetup_one_dev(gdev, target);
1166
1167 DEVICE_BUSY = 0;
1168 return 0;
1169 }
1170
1171 static int fop_revalidate_scsidisk(dev_t dev){
1172 return revalidate_scsidisk(dev, 0);
1173 }
1174