This source file includes following definitions.
- sd_open
- sd_release
- sd_geninit
- rw_intr
- do_sd_request
- requeue_sd_request
- check_scsidisk_media_change
- sd_init_done
- sd_init_onedisk
- sd_init
- sd_init1
- sd_attach
- revalidate_scsidisk
- fop_revalidate_scsidisk
1
2
3
4
5
6
7
8
9
10
11
12
13
14 #include <linux/fs.h>
15 #include <linux/kernel.h>
16 #include <linux/sched.h>
17 #include <linux/string.h>
18 #include <linux/errno.h>
19 #include <asm/system.h>
20
21 #define MAJOR_NR SCSI_DISK_MAJOR
22 #include "../block/blk.h"
23 #include "scsi.h"
24 #include "hosts.h"
25 #include "sd.h"
26 #include "scsi_ioctl.h"
27 #include "constants.h"
28
29 #include <linux/genhd.h>
30
31
32
33
34
35 #define MAX_RETRIES 5
36
37
38
39
40
41 #define SD_TIMEOUT 300
42 #define SD_MOD_TIMEOUT 750
43
44 #define CLUSTERABLE_DEVICE(SC) (SC->host->hostt->use_clustering && \
45 SC->device->type != TYPE_MOD)
46
47 struct hd_struct * sd;
48
49 int NR_SD=0;
50 int MAX_SD=0;
51 Scsi_Disk * rscsi_disks;
52 static int * sd_sizes;
53 static int * sd_blocksizes;
54
55 extern int sd_ioctl(struct inode *, struct file *, unsigned int, unsigned long);
56
57 static int check_scsidisk_media_change(dev_t);
58 static int fop_revalidate_scsidisk(dev_t);
59
60 static sd_init_onedisk(int);
61
62 static void requeue_sd_request (Scsi_Cmnd * SCpnt);
63
64 static int sd_open(struct inode * inode, struct file * filp)
65 {
66 int target;
67 target = DEVICE_NR(MINOR(inode->i_rdev));
68
69 if(target >= NR_SD || !rscsi_disks[target].device)
70 return -ENXIO;
71
72
73
74
75 while (rscsi_disks[target].device->busy);
76
77 if(rscsi_disks[target].device->removable) {
78 check_disk_change(inode->i_rdev);
79
80 if(!rscsi_disks[target].device->access_count)
81 sd_ioctl(inode, NULL, SCSI_IOCTL_DOORLOCK, 0);
82 };
83 rscsi_disks[target].device->access_count++;
84 return 0;
85 }
86
87 static void sd_release(struct inode * inode, struct file * file)
88 {
89 int target;
90 sync_dev(inode->i_rdev);
91
92 target = DEVICE_NR(MINOR(inode->i_rdev));
93
94 rscsi_disks[target].device->access_count--;
95
96 if(rscsi_disks[target].device->removable) {
97 if(!rscsi_disks[target].device->access_count)
98 sd_ioctl(inode, NULL, SCSI_IOCTL_DOORUNLOCK, 0);
99 };
100 }
101
102 static void sd_geninit(void);
103
104 static struct file_operations sd_fops = {
105 NULL,
106 block_read,
107 block_write,
108 NULL,
109 NULL,
110 sd_ioctl,
111 NULL,
112 sd_open,
113 sd_release,
114 block_fsync,
115 NULL,
116 check_scsidisk_media_change,
117 fop_revalidate_scsidisk
118 };
119
120 static struct gendisk sd_gendisk = {
121 MAJOR_NR,
122 "sd",
123 4,
124 1 << 4,
125 0,
126 sd_geninit,
127 NULL,
128 NULL,
129 0,
130 NULL,
131 NULL
132 };
133
134 static void sd_geninit (void)
135 {
136 int i;
137
138 for (i = 0; i < NR_SD; ++i)
139 sd[i << 4].nr_sects = rscsi_disks[i].capacity;
140 sd_gendisk.nr_real = NR_SD;
141 }
142
143
144
145
146
147
148
149 static void rw_intr (Scsi_Cmnd *SCpnt)
150 {
151 int result = SCpnt->result;
152 int this_count = SCpnt->bufflen >> 9;
153
154 #ifdef DEBUG
155 printk("sd%d : rw_intr(%d, %d)\n", MINOR(SCpnt->request.dev), SCpnt->host->host_no, result);
156 #endif
157
158
159
160
161
162
163
164 if (!result) {
165
166 #ifdef DEBUG
167 printk("sd%d : %d sectors remain.\n", MINOR(SCpnt->request.dev), SCpnt->request.nr_sectors);
168 printk("use_sg is %d\n ",SCpnt->use_sg);
169 #endif
170 if (SCpnt->use_sg) {
171 struct scatterlist * sgpnt;
172 int i;
173 sgpnt = (struct scatterlist *) SCpnt->buffer;
174 for(i=0; i<SCpnt->use_sg; i++) {
175 #ifdef DEBUG
176 printk(":%x %x %d\n",sgpnt[i].alt_address, sgpnt[i].address, sgpnt[i].length);
177 #endif
178 if (sgpnt[i].alt_address) {
179 if (SCpnt->request.cmd == READ)
180 memcpy(sgpnt[i].alt_address, sgpnt[i].address, sgpnt[i].length);
181 scsi_free(sgpnt[i].address, sgpnt[i].length);
182 };
183 };
184 scsi_free(SCpnt->buffer, SCpnt->sglist_len);
185 } else {
186 if (SCpnt->buffer != SCpnt->request.buffer) {
187 #ifdef DEBUG
188 printk("nosg: %x %x %d\n",SCpnt->request.buffer, SCpnt->buffer,
189 SCpnt->bufflen);
190 #endif
191 if (SCpnt->request.cmd == READ)
192 memcpy(SCpnt->request.buffer, SCpnt->buffer,
193 SCpnt->bufflen);
194 scsi_free(SCpnt->buffer, SCpnt->bufflen);
195 };
196 };
197
198
199
200
201
202 if (SCpnt->request.nr_sectors > this_count)
203 {
204 SCpnt->request.errors = 0;
205
206 if (!SCpnt->request.bh)
207 {
208 #ifdef DEBUG
209 printk("sd%d : handling page request, no buffer\n",
210 MINOR(SCpnt->request.dev));
211 #endif
212
213
214
215
216 panic("sd.c: linked page request (%lx %x)",
217 SCpnt->request.sector, this_count);
218 }
219 }
220 end_scsi_request(SCpnt, 1, this_count);
221 requeue_sd_request(SCpnt);
222 return;
223 }
224
225
226 if (SCpnt->use_sg) {
227 struct scatterlist * sgpnt;
228 int i;
229 sgpnt = (struct scatterlist *) SCpnt->buffer;
230 for(i=0; i<SCpnt->use_sg; i++) {
231 #ifdef DEBUG
232 printk("err: %x %x %d\n",SCpnt->request.buffer, SCpnt->buffer,
233 SCpnt->bufflen);
234 #endif
235 if (sgpnt[i].alt_address) {
236 scsi_free(sgpnt[i].address, sgpnt[i].length);
237 };
238 };
239 scsi_free(SCpnt->buffer, SCpnt->sglist_len);
240 } else {
241 #ifdef DEBUG
242 printk("nosgerr: %x %x %d\n",SCpnt->request.buffer, SCpnt->buffer,
243 SCpnt->bufflen);
244 #endif
245 if (SCpnt->buffer != SCpnt->request.buffer)
246 scsi_free(SCpnt->buffer, SCpnt->bufflen);
247 };
248
249
250
251
252
253
254
255 if (driver_byte(result) != 0) {
256 if (sugestion(result) == SUGGEST_REMAP) {
257 #ifdef REMAP
258
259
260
261
262 if rscsi_disks[DEVICE_NR(SCpnt->request.dev)].remap
263 {
264 result = 0;
265 }
266 else
267
268 #endif
269 }
270
271 if ((SCpnt->sense_buffer[0] & 0x7f) == 0x70) {
272 if ((SCpnt->sense_buffer[2] & 0xf) == UNIT_ATTENTION) {
273 if(rscsi_disks[DEVICE_NR(SCpnt->request.dev)].device->removable) {
274
275
276
277 rscsi_disks[DEVICE_NR(SCpnt->request.dev)].device->changed = 1;
278 end_scsi_request(SCpnt, 0, this_count);
279 requeue_sd_request(SCpnt);
280 return;
281 }
282 }
283 }
284
285
286
287
288
289
290
291
292
293 if (SCpnt->sense_buffer[2] == ILLEGAL_REQUEST) {
294 if (rscsi_disks[DEVICE_NR(SCpnt->request.dev)].ten) {
295 rscsi_disks[DEVICE_NR(SCpnt->request.dev)].ten = 0;
296 requeue_sd_request(SCpnt);
297 result = 0;
298 } else {
299 }
300 }
301 }
302 if (result) {
303 printk("SCSI disk error : host %d id %d lun %d return code = %x\n",
304 rscsi_disks[DEVICE_NR(SCpnt->request.dev)].device->host->host_no,
305 rscsi_disks[DEVICE_NR(SCpnt->request.dev)].device->id,
306 rscsi_disks[DEVICE_NR(SCpnt->request.dev)].device->lun, result);
307
308 if (driver_byte(result) & DRIVER_SENSE)
309 print_sense("sd", SCpnt);
310 end_scsi_request(SCpnt, 0, SCpnt->request.current_nr_sectors);
311 requeue_sd_request(SCpnt);
312 return;
313 }
314 }
315
316
317
318
319
320
321
322 static void do_sd_request (void)
323 {
324 Scsi_Cmnd * SCpnt = NULL;
325 struct request * req = NULL;
326 int flag = 0;
327 while (1==1){
328 cli();
329 if (CURRENT != NULL && CURRENT->dev == -1) {
330 sti();
331 return;
332 };
333
334 INIT_SCSI_REQUEST;
335
336
337
338
339
340
341
342
343
344
345
346
347
348 if (flag++ == 0)
349 SCpnt = allocate_device(&CURRENT,
350 rscsi_disks[DEVICE_NR(MINOR(CURRENT->dev))].device, 0);
351 else SCpnt = NULL;
352 sti();
353
354
355
356
357
358
359
360
361 if (!SCpnt && NR_SD > 1){
362 struct request *req1;
363 req1 = NULL;
364 cli();
365 req = CURRENT;
366 while(req){
367 SCpnt = request_queueable(req,
368 rscsi_disks[DEVICE_NR(MINOR(req->dev))].device);
369 if(SCpnt) break;
370 req1 = req;
371 req = req->next;
372 };
373 if (SCpnt && req->dev == -1) {
374 if (req == CURRENT)
375 CURRENT = CURRENT->next;
376 else
377 req1->next = req->next;
378 };
379 sti();
380 };
381
382 if (!SCpnt) return;
383
384
385 requeue_sd_request(SCpnt);
386 };
387 }
388
389 static void requeue_sd_request (Scsi_Cmnd * SCpnt)
390 {
391 int dev, block, this_count;
392 unsigned char cmd[10];
393 int bounce_size, contiguous;
394 int max_sg;
395 struct buffer_head * bh, *bhp;
396 char * buff, *bounce_buffer;
397
398 repeat:
399
400 if(SCpnt->request.dev <= 0) {
401 do_sd_request();
402 return;
403 }
404
405 dev = MINOR(SCpnt->request.dev);
406 block = SCpnt->request.sector;
407 this_count = 0;
408
409 #ifdef DEBUG
410 printk("Doing sd request, dev = %d, block = %d\n", dev, block);
411 #endif
412
413 if (dev >= (NR_SD << 4) || block + SCpnt->request.nr_sectors > sd[dev].nr_sects)
414 {
415 end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
416 goto repeat;
417 }
418
419 block += sd[dev].start_sect;
420 dev = DEVICE_NR(dev);
421
422 if (rscsi_disks[dev].device->changed)
423 {
424
425
426
427
428 end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
429 goto repeat;
430 }
431
432 #ifdef DEBUG
433 printk("sd%d : real dev = /dev/sd%d, block = %d\n", MINOR(SCpnt->request.dev), dev, block);
434 #endif
435
436 switch (SCpnt->request.cmd)
437 {
438 case WRITE :
439 if (!rscsi_disks[dev].device->writeable)
440 {
441 end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
442 goto repeat;
443 }
444 cmd[0] = WRITE_6;
445 break;
446 case READ :
447 cmd[0] = READ_6;
448 break;
449 default :
450 panic ("Unknown sd command %d\n", SCpnt->request.cmd);
451 }
452
453 SCpnt->this_count = 0;
454
455
456
457 contiguous = (!CLUSTERABLE_DEVICE(SCpnt) ? 0 :1);
458 bounce_buffer = NULL;
459 bounce_size = (SCpnt->request.nr_sectors << 9);
460
461
462
463
464
465
466 if (contiguous && SCpnt->request.bh &&
467 ((int) SCpnt->request.bh->b_data) + (SCpnt->request.nr_sectors << 9) - 1 >
468 ISA_DMA_THRESHOLD && SCpnt->host->unchecked_isa_dma) {
469 if(((int) SCpnt->request.bh->b_data) > ISA_DMA_THRESHOLD)
470 bounce_buffer = (char *) scsi_malloc(bounce_size);
471 if(!bounce_buffer) contiguous = 0;
472 };
473
474 if(contiguous && SCpnt->request.bh && SCpnt->request.bh->b_reqnext)
475 for(bh = SCpnt->request.bh, bhp = bh->b_reqnext; bhp; bh = bhp,
476 bhp = bhp->b_reqnext) {
477 if(!CONTIGUOUS_BUFFERS(bh,bhp)) {
478 if(bounce_buffer) scsi_free(bounce_buffer, bounce_size);
479 contiguous = 0;
480 break;
481 }
482 };
483 if (!SCpnt->request.bh || contiguous) {
484
485
486 this_count = SCpnt->request.nr_sectors;
487 buff = SCpnt->request.buffer;
488 SCpnt->use_sg = 0;
489
490 } else if (SCpnt->host->sg_tablesize == 0 ||
491 (need_isa_buffer &&
492 dma_free_sectors <= 10)) {
493
494
495
496
497
498
499
500
501 if (SCpnt->host->sg_tablesize != 0 &&
502 need_isa_buffer &&
503 dma_free_sectors <= 10)
504 printk("Warning: SCSI DMA buffer space running low. Using non scatter-gather I/O.\n");
505
506 this_count = SCpnt->request.current_nr_sectors;
507 buff = SCpnt->request.buffer;
508 SCpnt->use_sg = 0;
509
510 } else {
511
512
513 struct scatterlist * sgpnt;
514 int count, this_count_max;
515 int counted;
516
517 bh = SCpnt->request.bh;
518 this_count = 0;
519 this_count_max = (rscsi_disks[dev].ten ? 0xffff : 0xff);
520 count = 0;
521 bhp = NULL;
522 while(bh) {
523 if ((this_count + (bh->b_size >> 9)) > this_count_max) break;
524 if(!bhp || !CONTIGUOUS_BUFFERS(bhp,bh) ||
525 !CLUSTERABLE_DEVICE(SCpnt) ||
526 (SCpnt->host->unchecked_isa_dma &&
527 ((unsigned int) bh->b_data-1) == ISA_DMA_THRESHOLD)) {
528 if (count < SCpnt->host->sg_tablesize) count++;
529 else break;
530 };
531 this_count += (bh->b_size >> 9);
532 bhp = bh;
533 bh = bh->b_reqnext;
534 };
535 #if 0
536 if(SCpnt->host->unchecked_isa_dma &&
537 ((unsigned int) SCpnt->request.bh->b_data-1) == ISA_DMA_THRESHOLD) count--;
538 #endif
539 SCpnt->use_sg = count;
540 count = 512;
541 while( count < (SCpnt->use_sg * sizeof(struct scatterlist)))
542 count = count << 1;
543 SCpnt->sglist_len = count;
544 max_sg = count / sizeof(struct scatterlist);
545 if(SCpnt->host->sg_tablesize < max_sg) max_sg = SCpnt->host->sg_tablesize;
546 sgpnt = (struct scatterlist * ) scsi_malloc(count);
547 memset(sgpnt, 0, count);
548 if (!sgpnt) {
549 printk("Warning - running *really* short on DMA buffers\n");
550 SCpnt->use_sg = 0;
551 this_count = SCpnt->request.current_nr_sectors;
552 buff = SCpnt->request.buffer;
553 } else {
554 buff = (char *) sgpnt;
555 counted = 0;
556 for(count = 0, bh = SCpnt->request.bh, bhp = bh->b_reqnext;
557 count < SCpnt->use_sg && bh;
558 count++, bh = bhp) {
559
560 bhp = bh->b_reqnext;
561
562 if(!sgpnt[count].address) sgpnt[count].address = bh->b_data;
563 sgpnt[count].length += bh->b_size;
564 counted += bh->b_size >> 9;
565
566 if (((int) sgpnt[count].address) + sgpnt[count].length - 1 >
567 ISA_DMA_THRESHOLD && (SCpnt->host->unchecked_isa_dma) &&
568 !sgpnt[count].alt_address) {
569 sgpnt[count].alt_address = sgpnt[count].address;
570
571
572
573 if(dma_free_sectors < (sgpnt[count].length >> 9) + 10) {
574 sgpnt[count].address = NULL;
575 } else {
576 sgpnt[count].address = (char *) scsi_malloc(sgpnt[count].length);
577 };
578
579
580
581
582 if(sgpnt[count].address == NULL){
583 #if 0
584 printk("Warning: Running low on SCSI DMA buffers");
585
586 while(--count >= 0){
587 if(sgpnt[count].alt_address)
588 scsi_free(sgpnt[count].address, sgpnt[count].length);
589 };
590 this_count = SCpnt->request.current_nr_sectors;
591 buff = SCpnt->request.buffer;
592 SCpnt->use_sg = 0;
593 scsi_free(sgpnt, SCpnt->sglist_len);
594 #endif
595 SCpnt->use_sg = count;
596 this_count = counted -= bh->b_size >> 9;
597 break;
598 };
599
600 };
601
602
603
604
605
606 if(bhp && CONTIGUOUS_BUFFERS(bh,bhp) && CLUSTERABLE_DEVICE(SCpnt)) {
607 char * tmp;
608
609 if (((int) sgpnt[count].address) + sgpnt[count].length +
610 bhp->b_size - 1 > ISA_DMA_THRESHOLD &&
611 (SCpnt->host->unchecked_isa_dma) &&
612 !sgpnt[count].alt_address) continue;
613
614 if(!sgpnt[count].alt_address) {count--; continue; }
615 if(dma_free_sectors > 10)
616 tmp = (char *) scsi_malloc(sgpnt[count].length + bhp->b_size);
617 else {
618 tmp = NULL;
619 max_sg = SCpnt->use_sg;
620 };
621 if(tmp){
622 scsi_free(sgpnt[count].address, sgpnt[count].length);
623 sgpnt[count].address = tmp;
624 count--;
625 continue;
626 };
627
628
629
630
631 if (SCpnt->use_sg < max_sg) SCpnt->use_sg++;
632 };
633 };
634
635 this_count = counted;
636
637 if(count < SCpnt->use_sg || SCpnt->use_sg > SCpnt->host->sg_tablesize){
638 bh = SCpnt->request.bh;
639 printk("Use sg, count %d %x %d\n", SCpnt->use_sg, count, dma_free_sectors);
640 printk("maxsg = %x, counted = %d this_count = %d\n", max_sg, counted, this_count);
641 while(bh){
642 printk("[%p %lx] ", bh->b_data, bh->b_size);
643 bh = bh->b_reqnext;
644 };
645 if(SCpnt->use_sg < 16)
646 for(count=0; count<SCpnt->use_sg; count++)
647 printk("{%d:%p %p %d} ", count,
648 sgpnt[count].address,
649 sgpnt[count].alt_address,
650 sgpnt[count].length);
651 panic("Ooops");
652 };
653
654 if (SCpnt->request.cmd == WRITE)
655 for(count=0; count<SCpnt->use_sg; count++)
656 if(sgpnt[count].alt_address)
657 memcpy(sgpnt[count].address, sgpnt[count].alt_address,
658 sgpnt[count].length);
659 };
660 };
661
662
663
664 if(SCpnt->use_sg == 0){
665 if (((int) buff) + (this_count << 9) - 1 > ISA_DMA_THRESHOLD &&
666 (SCpnt->host->unchecked_isa_dma)) {
667 if(bounce_buffer)
668 buff = bounce_buffer;
669 else
670 buff = (char *) scsi_malloc(this_count << 9);
671 if(buff == NULL) {
672 this_count = SCpnt->request.current_nr_sectors;
673 buff = (char *) scsi_malloc(this_count << 9);
674 if(!buff) panic("Ran out of DMA buffers.");
675 };
676 if (SCpnt->request.cmd == WRITE)
677 memcpy(buff, (char *)SCpnt->request.buffer, this_count << 9);
678 };
679 };
680 #ifdef DEBUG
681 printk("sd%d : %s %d/%d 512 byte blocks.\n", MINOR(SCpnt->request.dev),
682 (SCpnt->request.cmd == WRITE) ? "writing" : "reading",
683 this_count, SCpnt->request.nr_sectors);
684 #endif
685
686 cmd[1] = (SCpnt->lun << 5) & 0xe0;
687
688 if (rscsi_disks[dev].sector_size == 1024){
689 if(block & 1) panic("sd.c:Bad block number requested");
690 if(this_count & 1) panic("sd.c:Bad block number requested");
691 block = block >> 1;
692 this_count = this_count >> 1;
693 };
694
695 if (rscsi_disks[dev].sector_size == 256){
696 block = block << 1;
697 this_count = this_count << 1;
698 };
699
700 if (((this_count > 0xff) || (block > 0x1fffff)) && rscsi_disks[dev].ten)
701 {
702 if (this_count > 0xffff)
703 this_count = 0xffff;
704
705 cmd[0] += READ_10 - READ_6 ;
706 cmd[2] = (unsigned char) (block >> 24) & 0xff;
707 cmd[3] = (unsigned char) (block >> 16) & 0xff;
708 cmd[4] = (unsigned char) (block >> 8) & 0xff;
709 cmd[5] = (unsigned char) block & 0xff;
710 cmd[6] = cmd[9] = 0;
711 cmd[7] = (unsigned char) (this_count >> 8) & 0xff;
712 cmd[8] = (unsigned char) this_count & 0xff;
713 }
714 else
715 {
716 if (this_count > 0xff)
717 this_count = 0xff;
718
719 cmd[1] |= (unsigned char) ((block >> 16) & 0x1f);
720 cmd[2] = (unsigned char) ((block >> 8) & 0xff);
721 cmd[3] = (unsigned char) block & 0xff;
722 cmd[4] = (unsigned char) this_count;
723 cmd[5] = 0;
724 }
725
726
727
728
729
730
731
732 SCpnt->transfersize = rscsi_disks[dev].sector_size;
733 SCpnt->underflow = this_count << 9;
734 scsi_do_cmd (SCpnt, (void *) cmd, buff,
735 this_count * rscsi_disks[dev].sector_size,
736 rw_intr,
737 (SCpnt->device->type == TYPE_DISK ?
738 SD_TIMEOUT : SD_MOD_TIMEOUT),
739 MAX_RETRIES);
740 }
741
742 static int check_scsidisk_media_change(dev_t full_dev){
743 int retval;
744 int target;
745 struct inode inode;
746 int flag = 0;
747
748 target = DEVICE_NR(MINOR(full_dev));
749
750 if (target >= NR_SD) {
751 printk("SCSI disk request error: invalid device.\n");
752 return 0;
753 };
754
755 if(!rscsi_disks[target].device->removable) return 0;
756
757 inode.i_rdev = full_dev;
758 retval = sd_ioctl(&inode, NULL, SCSI_IOCTL_TEST_UNIT_READY, 0);
759
760 if(retval){
761
762
763
764
765 rscsi_disks[target].device->changed = 1;
766 return 1;
767
768 };
769
770 retval = rscsi_disks[target].device->changed;
771 if(!flag) rscsi_disks[target].device->changed = 0;
772 return retval;
773 }
774
775 static void sd_init_done (Scsi_Cmnd * SCpnt)
776 {
777 struct request * req;
778 struct task_struct * p;
779
780 req = &SCpnt->request;
781 req->dev = 0xfffe;
782
783 if ((p = req->waiting) != NULL) {
784 req->waiting = NULL;
785 p->state = TASK_RUNNING;
786 if (p->counter > current->counter)
787 need_resched = 1;
788 }
789 }
790
791 static int sd_init_onedisk(int i)
792 {
793 int j = 0;
794 unsigned char cmd[10];
795 unsigned char *buffer;
796 char spintime;
797 int the_result, retries;
798 Scsi_Cmnd * SCpnt;
799
800
801
802
803
804 SCpnt = allocate_device(NULL, rscsi_disks[i].device, 1);
805 buffer = (unsigned char *) scsi_malloc(512);
806
807 spintime = 0;
808
809
810 if (current == task[0]){
811 do{
812 cmd[0] = TEST_UNIT_READY;
813 cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0;
814 memset ((void *) &cmd[2], 0, 8);
815 SCpnt->request.dev = 0xffff;
816 SCpnt->sense_buffer[0] = 0;
817 SCpnt->sense_buffer[2] = 0;
818
819 scsi_do_cmd (SCpnt,
820 (void *) cmd, (void *) buffer,
821 512, sd_init_done, SD_TIMEOUT,
822 MAX_RETRIES);
823
824 while(SCpnt->request.dev != 0xfffe);
825
826 the_result = SCpnt->result;
827
828
829
830 if(the_result && !rscsi_disks[i].device->removable &&
831 SCpnt->sense_buffer[2] == NOT_READY) {
832 int time1;
833 if(!spintime){
834 printk( "sd%d: Spinning up disk...", i );
835 cmd[0] = START_STOP;
836 cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0;
837 cmd[1] |= 1;
838 memset ((void *) &cmd[2], 0, 8);
839 cmd[4] = 1;
840 SCpnt->request.dev = 0xffff;
841 SCpnt->sense_buffer[0] = 0;
842 SCpnt->sense_buffer[2] = 0;
843
844 scsi_do_cmd (SCpnt,
845 (void *) cmd, (void *) buffer,
846 512, sd_init_done, SD_TIMEOUT,
847 MAX_RETRIES);
848
849 while(SCpnt->request.dev != 0xfffe);
850
851 spintime = jiffies;
852 };
853
854 time1 = jiffies;
855 while(jiffies < time1 + HZ);
856 printk( "." );
857 };
858 } while(the_result && spintime && spintime+5000 > jiffies);
859 if (spintime) {
860 if (the_result)
861 printk( "not responding...\n" );
862 else
863 printk( "ready\n" );
864 }
865 };
866
867
868 retries = 3;
869 do {
870 cmd[0] = READ_CAPACITY;
871 cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0;
872 memset ((void *) &cmd[2], 0, 8);
873 memset ((void *) buffer, 0, 8);
874 SCpnt->request.dev = 0xffff;
875 SCpnt->sense_buffer[0] = 0;
876 SCpnt->sense_buffer[2] = 0;
877
878 scsi_do_cmd (SCpnt,
879 (void *) cmd, (void *) buffer,
880 8, sd_init_done, SD_TIMEOUT,
881 MAX_RETRIES);
882
883 if (current == task[0])
884 while(SCpnt->request.dev != 0xfffe);
885 else
886 if (SCpnt->request.dev != 0xfffe){
887 SCpnt->request.waiting = current;
888 current->state = TASK_UNINTERRUPTIBLE;
889 while (SCpnt->request.dev != 0xfffe) schedule();
890 };
891
892 the_result = SCpnt->result;
893 retries--;
894
895 } while(the_result && retries);
896
897 SCpnt->request.dev = -1;
898
899 wake_up(&SCpnt->device->device_wait);
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916 if (the_result)
917 {
918 printk ("sd%d : READ CAPACITY failed.\n"
919 "sd%d : status = %x, message = %02x, host = %d, driver = %02x \n",
920 i,i,
921 status_byte(the_result),
922 msg_byte(the_result),
923 host_byte(the_result),
924 driver_byte(the_result)
925 );
926 if (driver_byte(the_result) & DRIVER_SENSE)
927 printk("sd%d : extended sense code = %1x \n", i, SCpnt->sense_buffer[2] & 0xf);
928 else
929 printk("sd%d : sense not available. \n", i);
930
931 printk("sd%d : block size assumed to be 512 bytes, disk size 1GB. \n", i);
932 rscsi_disks[i].capacity = 0x1fffff;
933 rscsi_disks[i].sector_size = 512;
934
935
936
937 if(rscsi_disks[i].device->removable &&
938 SCpnt->sense_buffer[2] == NOT_READY)
939 rscsi_disks[i].device->changed = 1;
940
941 }
942 else
943 {
944 rscsi_disks[i].capacity = (buffer[0] << 24) |
945 (buffer[1] << 16) |
946 (buffer[2] << 8) |
947 buffer[3];
948
949 rscsi_disks[i].sector_size = (buffer[4] << 24) |
950 (buffer[5] << 16) | (buffer[6] << 8) | buffer[7];
951
952 if (rscsi_disks[i].sector_size != 512 &&
953 rscsi_disks[i].sector_size != 1024 &&
954 rscsi_disks[i].sector_size != 256)
955 {
956 printk ("sd%d : unsupported sector size %d.\n",
957 i, rscsi_disks[i].sector_size);
958 if(rscsi_disks[i].device->removable){
959 rscsi_disks[i].capacity = 0;
960 } else {
961 printk ("scsi : deleting disk entry.\n");
962 for (j=i; j < NR_SD - 1;)
963 rscsi_disks[j] = rscsi_disks[++j];
964 --i;
965 --NR_SD;
966 scsi_free(buffer, 512);
967 return i;
968 };
969 }
970 if(rscsi_disks[i].sector_size == 1024)
971 rscsi_disks[i].capacity <<= 1;
972 if(rscsi_disks[i].sector_size == 256)
973 rscsi_disks[i].capacity >>= 1;
974 }
975
976 rscsi_disks[i].ten = 1;
977 rscsi_disks[i].remap = 1;
978 scsi_free(buffer, 512);
979 return i;
980 }
981
982
983
984
985
986
987
988 unsigned long sd_init(unsigned long memory_start, unsigned long memory_end)
989 {
990 int i;
991
992 if (register_blkdev(MAJOR_NR,"sd",&sd_fops)) {
993 printk("Unable to get major %d for SCSI disk\n",MAJOR_NR);
994 return memory_start;
995 }
996 if (MAX_SD == 0) return memory_start;
997
998 sd_sizes = (int *) memory_start;
999 memory_start += (MAX_SD << 4) * sizeof(int);
1000 memset(sd_sizes, 0, (MAX_SD << 4) * sizeof(int));
1001
1002 sd_blocksizes = (int *) memory_start;
1003 memory_start += (MAX_SD << 4) * sizeof(int);
1004 for(i=0;i<(MAX_SD << 4);i++) sd_blocksizes[i] = 1024;
1005 blksize_size[MAJOR_NR] = sd_blocksizes;
1006
1007 sd = (struct hd_struct *) memory_start;
1008 memory_start += (MAX_SD << 4) * sizeof(struct hd_struct);
1009
1010 sd_gendisk.max_nr = MAX_SD;
1011 sd_gendisk.part = sd;
1012 sd_gendisk.sizes = sd_sizes;
1013 sd_gendisk.real_devices = (void *) rscsi_disks;
1014
1015 for (i = 0; i < NR_SD; ++i)
1016 i = sd_init_onedisk(i);
1017
1018 blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST;
1019
1020
1021
1022
1023 if(rscsi_disks[0].device->host->sg_tablesize)
1024 read_ahead[MAJOR_NR] = 120;
1025
1026 else
1027 read_ahead[MAJOR_NR] = 4;
1028
1029 sd_gendisk.next = gendisk_head;
1030 gendisk_head = &sd_gendisk;
1031 return memory_start;
1032 }
1033
1034 void sd_init1(){
1035 rscsi_disks = (Scsi_Disk *) scsi_init_malloc(MAX_SD * sizeof(Scsi_Disk));
1036 };
1037
1038 void sd_attach(Scsi_Device * SDp){
1039 SDp->scsi_request_fn = do_sd_request;
1040 rscsi_disks[NR_SD++].device = SDp;
1041 if(NR_SD > MAX_SD) panic ("scsi_devices corrupt (sd)");
1042 };
1043
1044 #define DEVICE_BUSY rscsi_disks[target].device->busy
1045 #define USAGE rscsi_disks[target].device->access_count
1046 #define CAPACITY rscsi_disks[target].capacity
1047 #define MAYBE_REINIT sd_init_onedisk(target)
1048 #define GENDISK_STRUCT sd_gendisk
1049
1050
1051
1052
1053
1054
1055
1056
1057 int revalidate_scsidisk(int dev, int maxusage){
1058 int target, major;
1059 struct gendisk * gdev;
1060 int max_p;
1061 int start;
1062 int i;
1063
1064 target = DEVICE_NR(MINOR(dev));
1065 gdev = &GENDISK_STRUCT;
1066
1067 cli();
1068 if (DEVICE_BUSY || USAGE > maxusage) {
1069 sti();
1070 printk("Device busy for revalidation (usage=%d)\n", USAGE);
1071 return -EBUSY;
1072 };
1073 DEVICE_BUSY = 1;
1074 sti();
1075
1076 max_p = gdev->max_p;
1077 start = target << gdev->minor_shift;
1078 major = MAJOR_NR << 8;
1079
1080 for (i=max_p - 1; i >=0 ; i--) {
1081 sync_dev(major | start | i);
1082 invalidate_inodes(major | start | i);
1083 invalidate_buffers(major | start | i);
1084 gdev->part[start+i].start_sect = 0;
1085 gdev->part[start+i].nr_sects = 0;
1086 };
1087
1088 #ifdef MAYBE_REINIT
1089 MAYBE_REINIT;
1090 #endif
1091
1092 gdev->part[start].nr_sects = CAPACITY;
1093 resetup_one_dev(gdev, target);
1094
1095 DEVICE_BUSY = 0;
1096 return 0;
1097 }
1098
1099 static int fop_revalidate_scsidisk(dev_t dev){
1100 return revalidate_scsidisk(dev, 0);
1101 }
1102