This source file includes following definitions.
- sd_open
- sd_release
- sd_geninit
- rw_intr
- do_sd_request
- requeue_sd_request
- check_scsidisk_media_change
- sd_init_done
- sd_init_onedisk
- sd_init
- sd_finish
- sd_detect
- sd_attach
- revalidate_scsidisk
- fop_revalidate_scsidisk
1
2
3
4
5
6
7
8
9
10
11
12
13
14 #include <linux/fs.h>
15 #include <linux/kernel.h>
16 #include <linux/sched.h>
17 #include <linux/string.h>
18 #include <linux/errno.h>
19 #include <asm/system.h>
20
21 #define MAJOR_NR SCSI_DISK_MAJOR
22 #include "../block/blk.h"
23 #include "scsi.h"
24 #include "hosts.h"
25 #include "sd.h"
26 #include "scsi_ioctl.h"
27 #include "constants.h"
28
29 #include <linux/genhd.h>
30
31
32
33
34
35 #define MAX_RETRIES 5
36
37
38
39
40
41 #define SD_TIMEOUT 600
42 #define SD_MOD_TIMEOUT 750
43
44 #define CLUSTERABLE_DEVICE(SC) (SC->host->hostt->use_clustering && \
45 SC->device->type != TYPE_MOD)
46
47 struct hd_struct * sd;
48
49 Scsi_Disk * rscsi_disks;
50 static int * sd_sizes;
51 static int * sd_blocksizes;
52 static int * sd_hardsizes;
53
54 extern int sd_ioctl(struct inode *, struct file *, unsigned int, unsigned long);
55
56 static int check_scsidisk_media_change(dev_t);
57 static int fop_revalidate_scsidisk(dev_t);
58
59 static sd_init_onedisk(int);
60
61 static void requeue_sd_request (Scsi_Cmnd * SCpnt);
62
63 static void sd_init(void);
64 static void sd_finish(void);
65 static void sd_attach(Scsi_Device *);
66 static int sd_detect(Scsi_Device *);
67
68 struct Scsi_Device_Template sd_template = {NULL, "disk", "sd", TYPE_DISK,
69 SCSI_DISK_MAJOR, 0, 0, 0, 1,
70 sd_detect, sd_init,
71 sd_finish, sd_attach, NULL};
72
73 static int sd_open(struct inode * inode, struct file * filp)
74 {
75 int target;
76 target = DEVICE_NR(MINOR(inode->i_rdev));
77
78 if(target >= sd_template.dev_max || !rscsi_disks[target].device)
79 return -ENXIO;
80
81
82
83
84 while (rscsi_disks[target].device->busy);
85
86 if(rscsi_disks[target].device->removable) {
87 check_disk_change(inode->i_rdev);
88
89 if(!rscsi_disks[target].device->access_count)
90 sd_ioctl(inode, NULL, SCSI_IOCTL_DOORLOCK, 0);
91 };
92 rscsi_disks[target].device->access_count++;
93 return 0;
94 }
95
96 static void sd_release(struct inode * inode, struct file * file)
97 {
98 int target;
99 sync_dev(inode->i_rdev);
100
101 target = DEVICE_NR(MINOR(inode->i_rdev));
102
103 rscsi_disks[target].device->access_count--;
104
105 if(rscsi_disks[target].device->removable) {
106 if(!rscsi_disks[target].device->access_count)
107 sd_ioctl(inode, NULL, SCSI_IOCTL_DOORUNLOCK, 0);
108 };
109 }
110
111 static void sd_geninit(void);
112
113 static struct file_operations sd_fops = {
114 NULL,
115 block_read,
116 block_write,
117 NULL,
118 NULL,
119 sd_ioctl,
120 NULL,
121 sd_open,
122 sd_release,
123 block_fsync,
124 NULL,
125 check_scsidisk_media_change,
126 fop_revalidate_scsidisk
127 };
128
129 static struct gendisk sd_gendisk = {
130 MAJOR_NR,
131 "sd",
132 4,
133 1 << 4,
134 0,
135 sd_geninit,
136 NULL,
137 NULL,
138 0,
139 NULL,
140 NULL
141 };
142
143 static void sd_geninit (void)
144 {
145 int i;
146
147 for (i = 0; i < sd_template.dev_max; ++i)
148 if(rscsi_disks[i].device)
149 sd[i << 4].nr_sects = rscsi_disks[i].capacity;
150 sd_gendisk.nr_real = sd_template.dev_max;
151 }
152
153
154
155
156
157
158
159 static void rw_intr (Scsi_Cmnd *SCpnt)
160 {
161 int result = SCpnt->result;
162 int this_count = SCpnt->bufflen >> 9;
163
164 #ifdef DEBUG
165 printk("sd%c : rw_intr(%d, %d)\n", 'a' + MINOR(SCpnt->request.dev), SCpnt->host->host_no, result);
166 #endif
167
168
169
170
171
172
173
174 if (!result) {
175
176 #ifdef DEBUG
177 printk("sd%c : %d sectors remain.\n", 'a' + MINOR(SCpnt->request.dev), SCpnt->request.nr_sectors);
178 printk("use_sg is %d\n ",SCpnt->use_sg);
179 #endif
180 if (SCpnt->use_sg) {
181 struct scatterlist * sgpnt;
182 int i;
183 sgpnt = (struct scatterlist *) SCpnt->buffer;
184 for(i=0; i<SCpnt->use_sg; i++) {
185 #ifdef DEBUG
186 printk(":%x %x %d\n",sgpnt[i].alt_address, sgpnt[i].address, sgpnt[i].length);
187 #endif
188 if (sgpnt[i].alt_address) {
189 if (SCpnt->request.cmd == READ)
190 memcpy(sgpnt[i].alt_address, sgpnt[i].address, sgpnt[i].length);
191 scsi_free(sgpnt[i].address, sgpnt[i].length);
192 };
193 };
194 scsi_free(SCpnt->buffer, SCpnt->sglist_len);
195 } else {
196 if (SCpnt->buffer != SCpnt->request.buffer) {
197 #ifdef DEBUG
198 printk("nosg: %x %x %d\n",SCpnt->request.buffer, SCpnt->buffer,
199 SCpnt->bufflen);
200 #endif
201 if (SCpnt->request.cmd == READ)
202 memcpy(SCpnt->request.buffer, SCpnt->buffer,
203 SCpnt->bufflen);
204 scsi_free(SCpnt->buffer, SCpnt->bufflen);
205 };
206 };
207
208
209
210
211
212 if (SCpnt->request.nr_sectors > this_count)
213 {
214 SCpnt->request.errors = 0;
215
216 if (!SCpnt->request.bh)
217 {
218 #ifdef DEBUG
219 printk("sd%c : handling page request, no buffer\n",
220 'a' + MINOR(SCpnt->request.dev));
221 #endif
222
223
224
225
226 panic("sd.c: linked page request (%lx %x)",
227 SCpnt->request.sector, this_count);
228 }
229 }
230 SCpnt = end_scsi_request(SCpnt, 1, this_count);
231 requeue_sd_request(SCpnt);
232 return;
233 }
234
235
236 if (SCpnt->use_sg) {
237 struct scatterlist * sgpnt;
238 int i;
239 sgpnt = (struct scatterlist *) SCpnt->buffer;
240 for(i=0; i<SCpnt->use_sg; i++) {
241 #ifdef DEBUG
242 printk("err: %x %x %d\n",SCpnt->request.buffer, SCpnt->buffer,
243 SCpnt->bufflen);
244 #endif
245 if (sgpnt[i].alt_address) {
246 scsi_free(sgpnt[i].address, sgpnt[i].length);
247 };
248 };
249 scsi_free(SCpnt->buffer, SCpnt->sglist_len);
250 } else {
251 #ifdef DEBUG
252 printk("nosgerr: %x %x %d\n",SCpnt->request.buffer, SCpnt->buffer,
253 SCpnt->bufflen);
254 #endif
255 if (SCpnt->buffer != SCpnt->request.buffer)
256 scsi_free(SCpnt->buffer, SCpnt->bufflen);
257 };
258
259
260
261
262
263
264
265 if (driver_byte(result) != 0) {
266 if (suggestion(result) == SUGGEST_REMAP) {
267 #ifdef REMAP
268
269
270
271
272 if rscsi_disks[DEVICE_NR(SCpnt->request.dev)].remap
273 {
274 result = 0;
275 }
276 else
277
278 #endif
279 }
280
281 if ((SCpnt->sense_buffer[0] & 0x7f) == 0x70) {
282 if ((SCpnt->sense_buffer[2] & 0xf) == UNIT_ATTENTION) {
283 if(rscsi_disks[DEVICE_NR(SCpnt->request.dev)].device->removable) {
284
285
286
287 rscsi_disks[DEVICE_NR(SCpnt->request.dev)].device->changed = 1;
288 SCpnt = end_scsi_request(SCpnt, 0, this_count);
289 requeue_sd_request(SCpnt);
290 return;
291 }
292 }
293 }
294
295
296
297
298
299
300
301
302
303 if (SCpnt->sense_buffer[2] == ILLEGAL_REQUEST) {
304 if (rscsi_disks[DEVICE_NR(SCpnt->request.dev)].ten) {
305 rscsi_disks[DEVICE_NR(SCpnt->request.dev)].ten = 0;
306 requeue_sd_request(SCpnt);
307 result = 0;
308 } else {
309 }
310 }
311 }
312 if (result) {
313 printk("SCSI disk error : host %d id %d lun %d return code = %x\n",
314 rscsi_disks[DEVICE_NR(SCpnt->request.dev)].device->host->host_no,
315 rscsi_disks[DEVICE_NR(SCpnt->request.dev)].device->id,
316 rscsi_disks[DEVICE_NR(SCpnt->request.dev)].device->lun, result);
317
318 if (driver_byte(result) & DRIVER_SENSE)
319 print_sense("sd", SCpnt);
320 SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.current_nr_sectors);
321 requeue_sd_request(SCpnt);
322 return;
323 }
324 }
325
326
327
328
329
330
331
332 static void do_sd_request (void)
333 {
334 Scsi_Cmnd * SCpnt = NULL;
335 struct request * req = NULL;
336 unsigned long flags;
337 int flag = 0;
338
339 while (1==1){
340 save_flags(flags);
341 cli();
342 if (CURRENT != NULL && CURRENT->dev == -1) {
343 restore_flags(flags);
344 return;
345 };
346
347 INIT_SCSI_REQUEST;
348
349
350
351
352
353
354
355
356
357
358
359
360
361 if (flag++ == 0)
362 SCpnt = allocate_device(&CURRENT,
363 rscsi_disks[DEVICE_NR(MINOR(CURRENT->dev))].device, 0);
364 else SCpnt = NULL;
365 restore_flags(flags);
366
367
368
369
370
371
372
373
374 if (!SCpnt && sd_template.nr_dev > 1){
375 struct request *req1;
376 req1 = NULL;
377 save_flags(flags);
378 cli();
379 req = CURRENT;
380 while(req){
381 SCpnt = request_queueable(req,
382 rscsi_disks[DEVICE_NR(MINOR(req->dev))].device);
383 if(SCpnt) break;
384 req1 = req;
385 req = req->next;
386 };
387 if (SCpnt && req->dev == -1) {
388 if (req == CURRENT)
389 CURRENT = CURRENT->next;
390 else
391 req1->next = req->next;
392 };
393 restore_flags(flags);
394 };
395
396 if (!SCpnt) return;
397
398
399 requeue_sd_request(SCpnt);
400 };
401 }
402
403 static void requeue_sd_request (Scsi_Cmnd * SCpnt)
404 {
405 int dev, block, this_count;
406 unsigned char cmd[10];
407 int bounce_size, contiguous;
408 int max_sg;
409 struct buffer_head * bh, *bhp;
410 char * buff, *bounce_buffer;
411
412 repeat:
413
414 if(!SCpnt || SCpnt->request.dev <= 0) {
415 do_sd_request();
416 return;
417 }
418
419 dev = MINOR(SCpnt->request.dev);
420 block = SCpnt->request.sector;
421 this_count = 0;
422
423 #ifdef DEBUG
424 printk("Doing sd request, dev = %d, block = %d\n", dev, block);
425 #endif
426
427 if (dev >= (sd_template.dev_max << 4) ||
428 !rscsi_disks[DEVICE_NR(dev)].device ||
429 block + SCpnt->request.nr_sectors > sd[dev].nr_sects)
430 {
431 SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
432 goto repeat;
433 }
434
435 block += sd[dev].start_sect;
436 dev = DEVICE_NR(dev);
437
438 if (rscsi_disks[dev].device->changed)
439 {
440
441
442
443
444 SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
445 goto repeat;
446 }
447
448 #ifdef DEBUG
449 printk("sd%c : real dev = /dev/sd%c, block = %d\n", 'a' + MINOR(SCpnt->request.dev), dev, block);
450 #endif
451
452
453
454
455
456
457
458
459
460
461
462
463 if (rscsi_disks[dev].sector_size == 1024)
464 if((block & 1) || (SCpnt->request.nr_sectors & 1)) {
465 printk("sd.c:Bad block number requested");
466 SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
467 goto repeat;
468 }
469
470 switch (SCpnt->request.cmd)
471 {
472 case WRITE :
473 if (!rscsi_disks[dev].device->writeable)
474 {
475 SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
476 goto repeat;
477 }
478 cmd[0] = WRITE_6;
479 break;
480 case READ :
481 cmd[0] = READ_6;
482 break;
483 default :
484 panic ("Unknown sd command %d\n", SCpnt->request.cmd);
485 }
486
487 SCpnt->this_count = 0;
488
489
490
491 contiguous = (!CLUSTERABLE_DEVICE(SCpnt) ? 0 :1);
492 bounce_buffer = NULL;
493 bounce_size = (SCpnt->request.nr_sectors << 9);
494
495
496
497
498
499
500 if (contiguous && SCpnt->request.bh &&
501 ((long) SCpnt->request.bh->b_data) + (SCpnt->request.nr_sectors << 9) - 1 >
502 ISA_DMA_THRESHOLD && SCpnt->host->unchecked_isa_dma) {
503 if(((long) SCpnt->request.bh->b_data) > ISA_DMA_THRESHOLD)
504 bounce_buffer = (char *) scsi_malloc(bounce_size);
505 if(!bounce_buffer) contiguous = 0;
506 };
507
508 if(contiguous && SCpnt->request.bh && SCpnt->request.bh->b_reqnext)
509 for(bh = SCpnt->request.bh, bhp = bh->b_reqnext; bhp; bh = bhp,
510 bhp = bhp->b_reqnext) {
511 if(!CONTIGUOUS_BUFFERS(bh,bhp)) {
512 if(bounce_buffer) scsi_free(bounce_buffer, bounce_size);
513 contiguous = 0;
514 break;
515 }
516 };
517 if (!SCpnt->request.bh || contiguous) {
518
519
520 this_count = SCpnt->request.nr_sectors;
521 buff = SCpnt->request.buffer;
522 SCpnt->use_sg = 0;
523
524 } else if (SCpnt->host->sg_tablesize == 0 ||
525 (need_isa_buffer &&
526 dma_free_sectors <= 10)) {
527
528
529
530
531
532
533
534
535 if (SCpnt->host->sg_tablesize != 0 &&
536 need_isa_buffer &&
537 dma_free_sectors <= 10)
538 printk("Warning: SCSI DMA buffer space running low. Using non scatter-gather I/O.\n");
539
540 this_count = SCpnt->request.current_nr_sectors;
541 buff = SCpnt->request.buffer;
542 SCpnt->use_sg = 0;
543
544 } else {
545
546
547 struct scatterlist * sgpnt;
548 int count, this_count_max;
549 int counted;
550
551 bh = SCpnt->request.bh;
552 this_count = 0;
553 this_count_max = (rscsi_disks[dev].ten ? 0xffff : 0xff);
554 count = 0;
555 bhp = NULL;
556 while(bh) {
557 if ((this_count + (bh->b_size >> 9)) > this_count_max) break;
558 if(!bhp || !CONTIGUOUS_BUFFERS(bhp,bh) ||
559 !CLUSTERABLE_DEVICE(SCpnt) ||
560 (SCpnt->host->unchecked_isa_dma &&
561 ((unsigned long) bh->b_data-1) == ISA_DMA_THRESHOLD)) {
562 if (count < SCpnt->host->sg_tablesize) count++;
563 else break;
564 };
565 this_count += (bh->b_size >> 9);
566 bhp = bh;
567 bh = bh->b_reqnext;
568 };
569 #if 0
570 if(SCpnt->host->unchecked_isa_dma &&
571 ((unsigned int) SCpnt->request.bh->b_data-1) == ISA_DMA_THRESHOLD) count--;
572 #endif
573 SCpnt->use_sg = count;
574 count = 512;
575 while( count < (SCpnt->use_sg * sizeof(struct scatterlist)))
576 count = count << 1;
577 SCpnt->sglist_len = count;
578 max_sg = count / sizeof(struct scatterlist);
579 if(SCpnt->host->sg_tablesize < max_sg) max_sg = SCpnt->host->sg_tablesize;
580 sgpnt = (struct scatterlist * ) scsi_malloc(count);
581 memset(sgpnt, 0, count);
582 if (!sgpnt) {
583 printk("Warning - running *really* short on DMA buffers\n");
584 SCpnt->use_sg = 0;
585 this_count = SCpnt->request.current_nr_sectors;
586 buff = SCpnt->request.buffer;
587 } else {
588 buff = (char *) sgpnt;
589 counted = 0;
590 for(count = 0, bh = SCpnt->request.bh, bhp = bh->b_reqnext;
591 count < SCpnt->use_sg && bh;
592 count++, bh = bhp) {
593
594 bhp = bh->b_reqnext;
595
596 if(!sgpnt[count].address) sgpnt[count].address = bh->b_data;
597 sgpnt[count].length += bh->b_size;
598 counted += bh->b_size >> 9;
599
600 if (((long) sgpnt[count].address) + sgpnt[count].length - 1 >
601 ISA_DMA_THRESHOLD && (SCpnt->host->unchecked_isa_dma) &&
602 !sgpnt[count].alt_address) {
603 sgpnt[count].alt_address = sgpnt[count].address;
604
605
606
607 if(dma_free_sectors < (sgpnt[count].length >> 9) + 10) {
608 sgpnt[count].address = NULL;
609 } else {
610 sgpnt[count].address = (char *) scsi_malloc(sgpnt[count].length);
611 };
612
613
614
615
616 if(sgpnt[count].address == NULL){
617 #if 0
618 printk("Warning: Running low on SCSI DMA buffers");
619
620 while(--count >= 0){
621 if(sgpnt[count].alt_address)
622 scsi_free(sgpnt[count].address, sgpnt[count].length);
623 };
624 this_count = SCpnt->request.current_nr_sectors;
625 buff = SCpnt->request.buffer;
626 SCpnt->use_sg = 0;
627 scsi_free(sgpnt, SCpnt->sglist_len);
628 #endif
629 SCpnt->use_sg = count;
630 this_count = counted -= bh->b_size >> 9;
631 break;
632 };
633
634 };
635
636
637
638
639
640 if(bhp && CONTIGUOUS_BUFFERS(bh,bhp) && CLUSTERABLE_DEVICE(SCpnt)) {
641 char * tmp;
642
643 if (((long) sgpnt[count].address) + sgpnt[count].length +
644 bhp->b_size - 1 > ISA_DMA_THRESHOLD &&
645 (SCpnt->host->unchecked_isa_dma) &&
646 !sgpnt[count].alt_address) continue;
647
648 if(!sgpnt[count].alt_address) {count--; continue; }
649 if(dma_free_sectors > 10)
650 tmp = (char *) scsi_malloc(sgpnt[count].length + bhp->b_size);
651 else {
652 tmp = NULL;
653 max_sg = SCpnt->use_sg;
654 };
655 if(tmp){
656 scsi_free(sgpnt[count].address, sgpnt[count].length);
657 sgpnt[count].address = tmp;
658 count--;
659 continue;
660 };
661
662
663
664
665 if (SCpnt->use_sg < max_sg) SCpnt->use_sg++;
666 };
667 };
668
669 this_count = counted;
670
671 if(count < SCpnt->use_sg || SCpnt->use_sg > SCpnt->host->sg_tablesize){
672 bh = SCpnt->request.bh;
673 printk("Use sg, count %d %x %d\n", SCpnt->use_sg, count, dma_free_sectors);
674 printk("maxsg = %x, counted = %d this_count = %d\n", max_sg, counted, this_count);
675 while(bh){
676 printk("[%p %lx] ", bh->b_data, bh->b_size);
677 bh = bh->b_reqnext;
678 };
679 if(SCpnt->use_sg < 16)
680 for(count=0; count<SCpnt->use_sg; count++)
681 printk("{%d:%p %p %d} ", count,
682 sgpnt[count].address,
683 sgpnt[count].alt_address,
684 sgpnt[count].length);
685 panic("Ooops");
686 };
687
688 if (SCpnt->request.cmd == WRITE)
689 for(count=0; count<SCpnt->use_sg; count++)
690 if(sgpnt[count].alt_address)
691 memcpy(sgpnt[count].address, sgpnt[count].alt_address,
692 sgpnt[count].length);
693 };
694 };
695
696
697
698 if(SCpnt->use_sg == 0){
699 if (((long) buff) + (this_count << 9) - 1 > ISA_DMA_THRESHOLD &&
700 (SCpnt->host->unchecked_isa_dma)) {
701 if(bounce_buffer)
702 buff = bounce_buffer;
703 else
704 buff = (char *) scsi_malloc(this_count << 9);
705 if(buff == NULL) {
706 this_count = SCpnt->request.current_nr_sectors;
707 buff = (char *) scsi_malloc(this_count << 9);
708 if(!buff) panic("Ran out of DMA buffers.");
709 };
710 if (SCpnt->request.cmd == WRITE)
711 memcpy(buff, (char *)SCpnt->request.buffer, this_count << 9);
712 };
713 };
714 #ifdef DEBUG
715 printk("sd%c : %s %d/%d 512 byte blocks.\n", 'a' + MINOR(SCpnt->request.dev),
716 (SCpnt->request.cmd == WRITE) ? "writing" : "reading",
717 this_count, SCpnt->request.nr_sectors);
718 #endif
719
720 cmd[1] = (SCpnt->lun << 5) & 0xe0;
721
722 if (rscsi_disks[dev].sector_size == 1024){
723 if(block & 1) panic("sd.c:Bad block number requested");
724 if(this_count & 1) panic("sd.c:Bad block number requested");
725 block = block >> 1;
726 this_count = this_count >> 1;
727 };
728
729 if (rscsi_disks[dev].sector_size == 256){
730 block = block << 1;
731 this_count = this_count << 1;
732 };
733
734 if (((this_count > 0xff) || (block > 0x1fffff)) && rscsi_disks[dev].ten)
735 {
736 if (this_count > 0xffff)
737 this_count = 0xffff;
738
739 cmd[0] += READ_10 - READ_6 ;
740 cmd[2] = (unsigned char) (block >> 24) & 0xff;
741 cmd[3] = (unsigned char) (block >> 16) & 0xff;
742 cmd[4] = (unsigned char) (block >> 8) & 0xff;
743 cmd[5] = (unsigned char) block & 0xff;
744 cmd[6] = cmd[9] = 0;
745 cmd[7] = (unsigned char) (this_count >> 8) & 0xff;
746 cmd[8] = (unsigned char) this_count & 0xff;
747 }
748 else
749 {
750 if (this_count > 0xff)
751 this_count = 0xff;
752
753 cmd[1] |= (unsigned char) ((block >> 16) & 0x1f);
754 cmd[2] = (unsigned char) ((block >> 8) & 0xff);
755 cmd[3] = (unsigned char) block & 0xff;
756 cmd[4] = (unsigned char) this_count;
757 cmd[5] = 0;
758 }
759
760
761
762
763
764
765
766 SCpnt->transfersize = rscsi_disks[dev].sector_size;
767 SCpnt->underflow = this_count << 9;
768 scsi_do_cmd (SCpnt, (void *) cmd, buff,
769 this_count * rscsi_disks[dev].sector_size,
770 rw_intr,
771 (SCpnt->device->type == TYPE_DISK ?
772 SD_TIMEOUT : SD_MOD_TIMEOUT),
773 MAX_RETRIES);
774 }
775
776 static int check_scsidisk_media_change(dev_t full_dev){
777 int retval;
778 int target;
779 struct inode inode;
780 int flag = 0;
781
782 target = DEVICE_NR(MINOR(full_dev));
783
784 if (target >= sd_template.dev_max ||
785 !rscsi_disks[target].device) {
786 printk("SCSI disk request error: invalid device.\n");
787 return 0;
788 };
789
790 if(!rscsi_disks[target].device->removable) return 0;
791
792 inode.i_rdev = full_dev;
793 retval = sd_ioctl(&inode, NULL, SCSI_IOCTL_TEST_UNIT_READY, 0);
794
795 if(retval){
796
797
798
799
800 rscsi_disks[target].device->changed = 1;
801 return 1;
802
803 };
804
805 retval = rscsi_disks[target].device->changed;
806 if(!flag) rscsi_disks[target].device->changed = 0;
807 return retval;
808 }
809
810 static void sd_init_done (Scsi_Cmnd * SCpnt)
811 {
812 struct request * req;
813
814 req = &SCpnt->request;
815 req->dev = 0xfffe;
816
817 if (req->sem != NULL) {
818 up(req->sem);
819 }
820 }
821
822 static int sd_init_onedisk(int i)
823 {
824 unsigned char cmd[10];
825 unsigned char *buffer;
826 char spintime;
827 int the_result, retries;
828 Scsi_Cmnd * SCpnt;
829
830
831
832
833
834 SCpnt = allocate_device(NULL, rscsi_disks[i].device, 1);
835 buffer = (unsigned char *) scsi_malloc(512);
836
837 spintime = 0;
838
839
840 if (current == task[0]){
841 do{
842 cmd[0] = TEST_UNIT_READY;
843 cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0;
844 memset ((void *) &cmd[2], 0, 8);
845 SCpnt->request.dev = 0xffff;
846 SCpnt->cmd_len = 0;
847 SCpnt->sense_buffer[0] = 0;
848 SCpnt->sense_buffer[2] = 0;
849
850 scsi_do_cmd (SCpnt,
851 (void *) cmd, (void *) buffer,
852 512, sd_init_done, SD_TIMEOUT,
853 MAX_RETRIES);
854
855 while(SCpnt->request.dev != 0xfffe);
856
857 the_result = SCpnt->result;
858
859
860
861 if(the_result && !rscsi_disks[i].device->removable &&
862 SCpnt->sense_buffer[2] == NOT_READY) {
863 int time1;
864 if(!spintime){
865 printk( "sd%c: Spinning up disk...", 'a' + i );
866 cmd[0] = START_STOP;
867 cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0;
868 cmd[1] |= 1;
869 memset ((void *) &cmd[2], 0, 8);
870 cmd[4] = 1;
871 SCpnt->request.dev = 0xffff;
872 SCpnt->cmd_len = 0;
873 SCpnt->sense_buffer[0] = 0;
874 SCpnt->sense_buffer[2] = 0;
875
876 scsi_do_cmd (SCpnt,
877 (void *) cmd, (void *) buffer,
878 512, sd_init_done, SD_TIMEOUT,
879 MAX_RETRIES);
880
881 while(SCpnt->request.dev != 0xfffe);
882
883 spintime = jiffies;
884 };
885
886 time1 = jiffies;
887 while(jiffies < time1 + HZ);
888 printk( "." );
889 };
890 } while(the_result && spintime && spintime+5000 > jiffies);
891 if (spintime) {
892 if (the_result)
893 printk( "not responding...\n" );
894 else
895 printk( "ready\n" );
896 }
897 };
898
899
900 retries = 3;
901 do {
902 cmd[0] = READ_CAPACITY;
903 cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0;
904 memset ((void *) &cmd[2], 0, 8);
905 memset ((void *) buffer, 0, 8);
906 SCpnt->request.dev = 0xffff;
907 SCpnt->cmd_len = 0;
908 SCpnt->sense_buffer[0] = 0;
909 SCpnt->sense_buffer[2] = 0;
910
911 scsi_do_cmd (SCpnt,
912 (void *) cmd, (void *) buffer,
913 8, sd_init_done, SD_TIMEOUT,
914 MAX_RETRIES);
915
916 if (current == task[0])
917 while(SCpnt->request.dev != 0xfffe);
918 else
919 if (SCpnt->request.dev != 0xfffe){
920 struct semaphore sem = MUTEX_LOCKED;
921 SCpnt->request.sem = &sem;
922 down(&sem);
923
924 while (SCpnt->request.dev != 0xfffe) schedule();
925 };
926
927 the_result = SCpnt->result;
928 retries--;
929
930 } while(the_result && retries);
931
932 SCpnt->request.dev = -1;
933
934 wake_up(&SCpnt->device->device_wait);
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951 if (the_result)
952 {
953 printk ("sd%c : READ CAPACITY failed.\n"
954 "sd%c : status = %x, message = %02x, host = %d, driver = %02x \n",
955 'a' + i, 'a' + i,
956 status_byte(the_result),
957 msg_byte(the_result),
958 host_byte(the_result),
959 driver_byte(the_result)
960 );
961 if (driver_byte(the_result) & DRIVER_SENSE)
962 printk("sd%c : extended sense code = %1x \n", 'a' + i, SCpnt->sense_buffer[2] & 0xf);
963 else
964 printk("sd%c : sense not available. \n", 'a' + i);
965
966 printk("sd%c : block size assumed to be 512 bytes, disk size 1GB. \n", 'a' + i);
967 rscsi_disks[i].capacity = 0x1fffff;
968 rscsi_disks[i].sector_size = 512;
969
970
971
972 if(rscsi_disks[i].device->removable &&
973 SCpnt->sense_buffer[2] == NOT_READY)
974 rscsi_disks[i].device->changed = 1;
975
976 }
977 else
978 {
979 rscsi_disks[i].capacity = (buffer[0] << 24) |
980 (buffer[1] << 16) |
981 (buffer[2] << 8) |
982 buffer[3];
983
984 rscsi_disks[i].sector_size = (buffer[4] << 24) |
985 (buffer[5] << 16) | (buffer[6] << 8) | buffer[7];
986
987 if (rscsi_disks[i].sector_size != 512 &&
988 rscsi_disks[i].sector_size != 1024 &&
989 rscsi_disks[i].sector_size != 256)
990 {
991 printk ("sd%c : unsupported sector size %d.\n",
992 'a' + i, rscsi_disks[i].sector_size);
993 if(rscsi_disks[i].device->removable){
994 rscsi_disks[i].capacity = 0;
995 } else {
996 printk ("scsi : deleting disk entry.\n");
997 rscsi_disks[i].device = NULL;
998 sd_template.nr_dev--;
999 return i;
1000 };
1001 }
1002 {
1003
1004
1005
1006
1007
1008 int m;
1009 int hard_sector = rscsi_disks[i].sector_size;
1010
1011 for (m=i<<4; m<((i+1)<<4); m++){
1012 sd_hardsizes[m] = hard_sector;
1013 }
1014 printk ("SCSI Hardware sector size is %d bytes on device sd%c\n"
1015 ,hard_sector,i+'a');
1016 }
1017 if(rscsi_disks[i].sector_size == 1024)
1018 rscsi_disks[i].capacity <<= 1;
1019 if(rscsi_disks[i].sector_size == 256)
1020 rscsi_disks[i].capacity >>= 1;
1021 }
1022
1023 rscsi_disks[i].ten = 1;
1024 rscsi_disks[i].remap = 1;
1025 scsi_free(buffer, 512);
1026 return i;
1027 }
1028
1029
1030
1031
1032
1033
1034
1035 static void sd_init()
1036 {
1037 int i;
1038 static int sd_registered = 0;
1039
1040 if (sd_template.dev_noticed == 0) return;
1041
1042 if(!sd_registered) {
1043 if (register_blkdev(MAJOR_NR,"sd",&sd_fops)) {
1044 printk("Unable to get major %d for SCSI disk\n",MAJOR_NR);
1045 return;
1046 }
1047 sd_registered++;
1048 }
1049
1050
1051 if(scsi_loadable_module_flag) return;
1052
1053 sd_template.dev_max = sd_template.dev_noticed;
1054
1055 rscsi_disks = (Scsi_Disk *)
1056 scsi_init_malloc(sd_template.dev_max * sizeof(Scsi_Disk));
1057 memset(rscsi_disks, 0, sd_template.dev_max * sizeof(Scsi_Disk));
1058
1059 sd_sizes = (int *) scsi_init_malloc((sd_template.dev_max << 4) *
1060 sizeof(int));
1061 memset(sd_sizes, 0, (sd_template.dev_max << 4) * sizeof(int));
1062
1063 sd_blocksizes = (int *) scsi_init_malloc((sd_template.dev_max << 4) *
1064 sizeof(int));
1065 sd_hardsizes = (int *) scsi_init_malloc((sd_template.dev_max << 4) *
1066 sizeof(int));
1067 for(i=0;i<(sd_template.dev_max << 4);i++){
1068 sd_blocksizes[i] = 1024;
1069 sd_hardsizes[i] = 512;
1070 }
1071 blksize_size[MAJOR_NR] = sd_blocksizes;
1072 hardsect_size[MAJOR_NR] = sd_hardsizes;
1073 sd = (struct hd_struct *) scsi_init_malloc((sd_template.dev_max << 4) *
1074 sizeof(struct hd_struct));
1075
1076
1077 sd_gendisk.max_nr = sd_template.dev_max;
1078 sd_gendisk.part = sd;
1079 sd_gendisk.sizes = sd_sizes;
1080 sd_gendisk.real_devices = (void *) rscsi_disks;
1081
1082 }
1083
1084 static void sd_finish()
1085 {
1086 int i;
1087
1088 for (i = 0; i < sd_template.dev_max; ++i)
1089 if (rscsi_disks[i].device) i = sd_init_onedisk(i);
1090
1091 blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST;
1092
1093
1094
1095
1096 if(rscsi_disks[0].device->host->sg_tablesize)
1097 read_ahead[MAJOR_NR] = 120;
1098
1099 else
1100 read_ahead[MAJOR_NR] = 4;
1101
1102 sd_gendisk.next = gendisk_head;
1103 gendisk_head = &sd_gendisk;
1104 return;
1105 }
1106
1107 static int sd_detect(Scsi_Device * SDp){
1108
1109 if(scsi_loadable_module_flag) return 0;
1110 if(SDp->type != TYPE_DISK && SDp->type != TYPE_MOD) return 0;
1111
1112 printk("Detected scsi disk sd%c at scsi%d, id %d, lun %d\n",
1113 'a'+ (sd_template.dev_noticed++),
1114 SDp->host->host_no , SDp->id, SDp->lun);
1115
1116 return 1;
1117
1118 }
1119
1120 static void sd_attach(Scsi_Device * SDp){
1121 Scsi_Disk * dpnt;
1122 int i;
1123
1124
1125 if(scsi_loadable_module_flag) return;
1126 if(SDp->type != TYPE_DISK && SDp->type != TYPE_MOD) return;
1127
1128 if(sd_template.nr_dev >= sd_template.dev_max)
1129 panic ("scsi_devices corrupt (sd)");
1130
1131 for(dpnt = rscsi_disks, i=0; i<sd_template.dev_max; i++, dpnt++)
1132 if(!dpnt->device) break;
1133
1134 if(i >= sd_template.dev_max) panic ("scsi_devices corrupt (sd)");
1135
1136 SDp->scsi_request_fn = do_sd_request;
1137 rscsi_disks[i].device = SDp;
1138 sd_template.nr_dev++;
1139 };
1140
1141 #define DEVICE_BUSY rscsi_disks[target].device->busy
1142 #define USAGE rscsi_disks[target].device->access_count
1143 #define CAPACITY rscsi_disks[target].capacity
1144 #define MAYBE_REINIT sd_init_onedisk(target)
1145 #define GENDISK_STRUCT sd_gendisk
1146
1147
1148
1149
1150
1151
1152
1153
1154 int revalidate_scsidisk(int dev, int maxusage){
1155 int target, major;
1156 struct gendisk * gdev;
1157 unsigned long flags;
1158 int max_p;
1159 int start;
1160 int i;
1161
1162 target = DEVICE_NR(MINOR(dev));
1163 gdev = &GENDISK_STRUCT;
1164
1165 save_flags(flags);
1166 cli();
1167 if (DEVICE_BUSY || USAGE > maxusage) {
1168 restore_flags(flags);
1169 printk("Device busy for revalidation (usage=%d)\n", USAGE);
1170 return -EBUSY;
1171 };
1172 DEVICE_BUSY = 1;
1173 restore_flags(flags);
1174
1175 max_p = gdev->max_p;
1176 start = target << gdev->minor_shift;
1177 major = MAJOR_NR << 8;
1178
1179 for (i=max_p - 1; i >=0 ; i--) {
1180 sync_dev(major | start | i);
1181 invalidate_inodes(major | start | i);
1182 invalidate_buffers(major | start | i);
1183 gdev->part[start+i].start_sect = 0;
1184 gdev->part[start+i].nr_sects = 0;
1185 };
1186
1187 #ifdef MAYBE_REINIT
1188 MAYBE_REINIT;
1189 #endif
1190
1191 gdev->part[start].nr_sects = CAPACITY;
1192 resetup_one_dev(gdev, target);
1193
1194 DEVICE_BUSY = 0;
1195 return 0;
1196 }
1197
1198 static int fop_revalidate_scsidisk(dev_t dev){
1199 return revalidate_scsidisk(dev, 0);
1200 }
1201