This source file includes following definitions.
- sd_open
- sd_release
- sd_geninit
- rw_intr
- do_sd_request
- requeue_sd_request
- check_scsidisk_media_change
- sd_init_done
- sd_init_onedisk
- sd_init
- sd_finish
- sd_detect
- sd_attach
- revalidate_scsidisk
- fop_revalidate_scsidisk
- sd_detach
- init_module
- cleanup_module
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19 #include <linux/module.h>
20 #ifdef MODULE
21
22
23
24
25
26 #define MODULE_FLAG 1
27 #else
28 #define MODULE_FLAG scsi_loadable_module_flag
29 #endif
30
31 #include <linux/fs.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/mm.h>
35 #include <linux/string.h>
36 #include <linux/errno.h>
37 #include <asm/system.h>
38
39 #define MAJOR_NR SCSI_DISK_MAJOR
40 #include <linux/blk.h>
41 #include "scsi.h"
42 #include "hosts.h"
43 #include "sd.h"
44 #include "scsi_ioctl.h"
45 #include "constants.h"
46
47 #include <linux/genhd.h>
48
49
50
51
52
53 #define MAX_RETRIES 5
54
55
56
57
58
59 #define SD_TIMEOUT (7 * HZ)
60 #define SD_MOD_TIMEOUT (8 * HZ)
61
62 #define CLUSTERABLE_DEVICE(SC) (SC->host->use_clustering && \
63 SC->device->type != TYPE_MOD)
64
65 struct hd_struct * sd;
66
67 Scsi_Disk * rscsi_disks = NULL;
68 static int * sd_sizes;
69 static int * sd_blocksizes;
70 static int * sd_hardsizes;
71
72 extern int sd_ioctl(struct inode *, struct file *, unsigned int, unsigned long);
73
74 static int check_scsidisk_media_change(kdev_t);
75 static int fop_revalidate_scsidisk(kdev_t);
76
77 static sd_init_onedisk(int);
78
79 static void requeue_sd_request (Scsi_Cmnd * SCpnt);
80
81 static int sd_init(void);
82 static void sd_finish(void);
83 static int sd_attach(Scsi_Device *);
84 static int sd_detect(Scsi_Device *);
85 static void sd_detach(Scsi_Device *);
86
87 struct Scsi_Device_Template sd_template =
88 { NULL, "disk", "sd", NULL, TYPE_DISK,
89 SCSI_DISK_MAJOR, 0, 0, 0, 1,
90 sd_detect, sd_init,
91 sd_finish, sd_attach, sd_detach
92 };
93
94 static int sd_open(struct inode * inode, struct file * filp)
95 {
96 int target;
97 target = DEVICE_NR(inode->i_rdev);
98
99 if(target >= sd_template.dev_max || !rscsi_disks[target].device)
100 return -ENXIO;
101
102
103
104
105
106
107
108 while (rscsi_disks[target].device->busy)
109 barrier();
110 if(rscsi_disks[target].device->removable) {
111 check_disk_change(inode->i_rdev);
112
113
114
115
116 if ( !rscsi_disks[target].ready ) {
117 return -ENXIO;
118 }
119
120
121
122
123
124
125 if ( (rscsi_disks[target].write_prot) && (filp->f_mode & 2) ) {
126 return -EROFS;
127 }
128
129 if(!rscsi_disks[target].device->access_count)
130 sd_ioctl(inode, NULL, SCSI_IOCTL_DOORLOCK, 0);
131 };
132
133
134
135
136
137 if(sd_sizes[MINOR(inode->i_rdev)] == 0)
138 return -ENXIO;
139
140 rscsi_disks[target].device->access_count++;
141 if (rscsi_disks[target].device->host->hostt->usage_count)
142 (*rscsi_disks[target].device->host->hostt->usage_count)++;
143 if(sd_template.usage_count) (*sd_template.usage_count)++;
144 return 0;
145 }
146
147 static void sd_release(struct inode * inode, struct file * file)
148 {
149 int target;
150 sync_dev(inode->i_rdev);
151
152 target = DEVICE_NR(inode->i_rdev);
153
154 rscsi_disks[target].device->access_count--;
155 if (rscsi_disks[target].device->host->hostt->usage_count)
156 (*rscsi_disks[target].device->host->hostt->usage_count)--;
157 if(sd_template.usage_count) (*sd_template.usage_count)--;
158
159 if(rscsi_disks[target].device->removable) {
160 if(!rscsi_disks[target].device->access_count)
161 sd_ioctl(inode, NULL, SCSI_IOCTL_DOORUNLOCK, 0);
162 }
163 }
164
165 static void sd_geninit(struct gendisk *);
166
167 static struct file_operations sd_fops = {
168 NULL,
169 block_read,
170 block_write,
171 NULL,
172 NULL,
173 sd_ioctl,
174 NULL,
175 sd_open,
176 sd_release,
177 block_fsync,
178 NULL,
179 check_scsidisk_media_change,
180 fop_revalidate_scsidisk
181 };
182
183 static struct gendisk sd_gendisk = {
184 MAJOR_NR,
185 "sd",
186 4,
187 1 << 4,
188 0,
189 sd_geninit,
190 NULL,
191 NULL,
192 0,
193 NULL,
194 NULL
195 };
196
197 static void sd_geninit (struct gendisk *ignored)
198 {
199 int i;
200
201 for (i = 0; i < sd_template.dev_max; ++i)
202 if(rscsi_disks[i].device)
203 sd[i << 4].nr_sects = rscsi_disks[i].capacity;
204 #if 0
205
206 sd_gendisk.nr_real = sd_template.dev_max;
207 #endif
208 }
209
210
211
212
213
214
215
216 static void rw_intr (Scsi_Cmnd *SCpnt)
217 {
218 int result = SCpnt->result;
219 int this_count = SCpnt->bufflen >> 9;
220
221 #ifdef DEBUG
222 printk("sd%c : rw_intr(%d, %d)\n", 'a' + MINOR(SCpnt->request.rq_dev),
223 SCpnt->host->host_no, result);
224 #endif
225
226
227
228
229
230
231
232 if (!result) {
233
234 #ifdef DEBUG
235 printk("sd%c : %d sectors remain.\n", 'a' + MINOR(SCpnt->request.rq_dev),
236 SCpnt->request.nr_sectors);
237 printk("use_sg is %d\n ",SCpnt->use_sg);
238 #endif
239 if (SCpnt->use_sg) {
240 struct scatterlist * sgpnt;
241 int i;
242 sgpnt = (struct scatterlist *) SCpnt->buffer;
243 for(i=0; i<SCpnt->use_sg; i++) {
244 #ifdef DEBUG
245 printk(":%x %x %d\n",sgpnt[i].alt_address, sgpnt[i].address,
246 sgpnt[i].length);
247 #endif
248 if (sgpnt[i].alt_address) {
249 if (SCpnt->request.cmd == READ)
250 memcpy(sgpnt[i].alt_address, sgpnt[i].address,
251 sgpnt[i].length);
252 scsi_free(sgpnt[i].address, sgpnt[i].length);
253 };
254 };
255
256
257 scsi_free(SCpnt->buffer, SCpnt->sglist_len);
258 } else {
259 if (SCpnt->buffer != SCpnt->request.buffer) {
260 #ifdef DEBUG
261 printk("nosg: %x %x %d\n",SCpnt->request.buffer, SCpnt->buffer,
262 SCpnt->bufflen);
263 #endif
264 if (SCpnt->request.cmd == READ)
265 memcpy(SCpnt->request.buffer, SCpnt->buffer,
266 SCpnt->bufflen);
267 scsi_free(SCpnt->buffer, SCpnt->bufflen);
268 };
269 };
270
271
272
273
274
275 if (SCpnt->request.nr_sectors > this_count)
276 {
277 SCpnt->request.errors = 0;
278
279 if (!SCpnt->request.bh)
280 {
281 #ifdef DEBUG
282 printk("sd%c : handling page request, no buffer\n",
283 'a' + MINOR(SCpnt->request.rq_dev));
284 #endif
285
286
287
288
289 panic("sd.c: linked page request (%lx %x)",
290 SCpnt->request.sector, this_count);
291 }
292 }
293 SCpnt = end_scsi_request(SCpnt, 1, this_count);
294 requeue_sd_request(SCpnt);
295 return;
296 }
297
298
299 if (SCpnt->use_sg) {
300 struct scatterlist * sgpnt;
301 int i;
302 sgpnt = (struct scatterlist *) SCpnt->buffer;
303 for(i=0; i<SCpnt->use_sg; i++) {
304 #ifdef DEBUG
305 printk("err: %x %x %d\n",SCpnt->request.buffer, SCpnt->buffer,
306 SCpnt->bufflen);
307 #endif
308 if (sgpnt[i].alt_address) {
309 scsi_free(sgpnt[i].address, sgpnt[i].length);
310 };
311 };
312 scsi_free(SCpnt->buffer, SCpnt->sglist_len);
313 } else {
314 #ifdef DEBUG
315 printk("nosgerr: %x %x %d\n",SCpnt->request.buffer, SCpnt->buffer,
316 SCpnt->bufflen);
317 #endif
318 if (SCpnt->buffer != SCpnt->request.buffer)
319 scsi_free(SCpnt->buffer, SCpnt->bufflen);
320 };
321
322
323
324
325
326
327
328 if (driver_byte(result) != 0) {
329 if (suggestion(result) == SUGGEST_REMAP) {
330 #ifdef REMAP
331
332
333
334
335 if rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].remap
336 {
337 result = 0;
338 }
339 else
340 #endif
341 }
342
343 if ((SCpnt->sense_buffer[0] & 0x7f) == 0x70) {
344 if ((SCpnt->sense_buffer[2] & 0xf) == UNIT_ATTENTION) {
345 if(rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].device->removable) {
346
347
348
349 rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].device->changed = 1;
350 SCpnt = end_scsi_request(SCpnt, 0, this_count);
351 requeue_sd_request(SCpnt);
352 return;
353 }
354 else
355 {
356
357
358
359
360
361 requeue_sd_request(SCpnt);
362 return;
363 }
364 }
365 }
366
367
368
369
370
371
372
373
374
375 if (SCpnt->sense_buffer[2] == ILLEGAL_REQUEST) {
376 if (rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].ten) {
377 rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].ten = 0;
378 requeue_sd_request(SCpnt);
379 result = 0;
380 } else {
381
382 }
383 }
384 }
385 if (result) {
386 printk("SCSI disk error : host %d channel %d id %d lun %d return code = %x\n",
387 rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].device->host->host_no,
388 rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].device->channel,
389 rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].device->id,
390 rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].device->lun, result);
391
392 if (driver_byte(result) & DRIVER_SENSE)
393 print_sense("sd", SCpnt);
394 SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.current_nr_sectors);
395 requeue_sd_request(SCpnt);
396 return;
397 }
398 }
399
400
401
402
403
404
405
406 static void do_sd_request (void)
407 {
408 Scsi_Cmnd * SCpnt = NULL;
409 Scsi_Device * SDev;
410 struct request * req = NULL;
411 unsigned long flags;
412 int flag = 0;
413
414 save_flags(flags);
415 while (1==1){
416 cli();
417 if (CURRENT != NULL && CURRENT->rq_status == RQ_INACTIVE) {
418 restore_flags(flags);
419 return;
420 };
421
422 INIT_SCSI_REQUEST;
423 SDev = rscsi_disks[DEVICE_NR(CURRENT->rq_dev)].device;
424
425
426
427
428
429
430 if( SDev->was_reset )
431 {
432
433
434
435
436
437
438 if( SDev->removable && !intr_count )
439 {
440 scsi_ioctl(SDev, SCSI_IOCTL_DOORLOCK, 0);
441 }
442 SDev->was_reset = 0;
443 }
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458 if (flag++ == 0)
459 SCpnt = allocate_device(&CURRENT,
460 rscsi_disks[DEVICE_NR(CURRENT->rq_dev)].device, 0);
461 else SCpnt = NULL;
462
463
464
465
466
467
468 restore_flags(flags);
469
470
471
472
473
474
475
476
477
478
479 if (!SCpnt && sd_template.nr_dev > 1){
480 struct request *req1;
481 req1 = NULL;
482 cli();
483 req = CURRENT;
484 while(req){
485 SCpnt = request_queueable(req, rscsi_disks[DEVICE_NR(req->rq_dev)].device);
486 if(SCpnt) break;
487 req1 = req;
488 req = req->next;
489 };
490 if (SCpnt && req->rq_status == RQ_INACTIVE) {
491 if (req == CURRENT)
492 CURRENT = CURRENT->next;
493 else
494 req1->next = req->next;
495 };
496 restore_flags(flags);
497 };
498
499 if (!SCpnt) return;
500
501
502 requeue_sd_request(SCpnt);
503 };
504 }
505
506 static void requeue_sd_request (Scsi_Cmnd * SCpnt)
507 {
508 int dev, devm, block, this_count;
509 unsigned char cmd[10];
510 int bounce_size, contiguous;
511 int max_sg;
512 struct buffer_head * bh, *bhp;
513 char * buff, *bounce_buffer;
514
515 repeat:
516
517 if(!SCpnt || SCpnt->request.rq_status == RQ_INACTIVE) {
518 do_sd_request();
519 return;
520 }
521
522 devm = MINOR(SCpnt->request.rq_dev);
523 dev = DEVICE_NR(SCpnt->request.rq_dev);
524
525 block = SCpnt->request.sector;
526 this_count = 0;
527
528 #ifdef DEBUG
529 printk("Doing sd request, dev = %d, block = %d\n", devm, block);
530 #endif
531
532 if (devm >= (sd_template.dev_max << 4) ||
533 !rscsi_disks[dev].device ||
534 block + SCpnt->request.nr_sectors > sd[devm].nr_sects)
535 {
536 SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
537 goto repeat;
538 }
539
540 block += sd[devm].start_sect;
541
542 if (rscsi_disks[dev].device->changed)
543 {
544
545
546
547
548
549 SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
550 goto repeat;
551 }
552
553 #ifdef DEBUG
554 printk("sd%c : real dev = /dev/sd%c, block = %d\n",
555 'a' + devm, dev, block);
556 #endif
557
558
559
560
561
562
563
564
565
566
567
568
569 if (rscsi_disks[dev].sector_size == 1024)
570 if((block & 1) || (SCpnt->request.nr_sectors & 1)) {
571 printk("sd.c:Bad block number requested");
572 SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
573 goto repeat;
574 }
575
576 switch (SCpnt->request.cmd)
577 {
578 case WRITE :
579 if (!rscsi_disks[dev].device->writeable)
580 {
581 SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
582 goto repeat;
583 }
584 cmd[0] = WRITE_6;
585 break;
586 case READ :
587 cmd[0] = READ_6;
588 break;
589 default :
590 panic ("Unknown sd command %d\n", SCpnt->request.cmd);
591 }
592
593 SCpnt->this_count = 0;
594
595
596
597
598 contiguous = (!CLUSTERABLE_DEVICE(SCpnt) ? 0 :1);
599 bounce_buffer = NULL;
600 bounce_size = (SCpnt->request.nr_sectors << 9);
601
602
603
604
605
606 if (contiguous && SCpnt->request.bh &&
607 ((long) SCpnt->request.bh->b_data)
608 + (SCpnt->request.nr_sectors << 9) - 1 > ISA_DMA_THRESHOLD
609 && SCpnt->host->unchecked_isa_dma) {
610 if(((long) SCpnt->request.bh->b_data) > ISA_DMA_THRESHOLD)
611 bounce_buffer = (char *) scsi_malloc(bounce_size);
612 if(!bounce_buffer) contiguous = 0;
613 };
614
615 if(contiguous && SCpnt->request.bh && SCpnt->request.bh->b_reqnext)
616 for(bh = SCpnt->request.bh, bhp = bh->b_reqnext; bhp; bh = bhp,
617 bhp = bhp->b_reqnext) {
618 if(!CONTIGUOUS_BUFFERS(bh,bhp)) {
619 if(bounce_buffer) scsi_free(bounce_buffer, bounce_size);
620 contiguous = 0;
621 break;
622 }
623 };
624 if (!SCpnt->request.bh || contiguous) {
625
626
627 this_count = SCpnt->request.nr_sectors;
628 buff = SCpnt->request.buffer;
629 SCpnt->use_sg = 0;
630
631 } else if (SCpnt->host->sg_tablesize == 0 ||
632 (need_isa_buffer && dma_free_sectors <= 10)) {
633
634
635
636
637
638
639
640
641 if (SCpnt->host->sg_tablesize != 0 &&
642 need_isa_buffer &&
643 dma_free_sectors <= 10)
644 printk("Warning: SCSI DMA buffer space running low. Using non scatter-gather I/O.\n");
645
646 this_count = SCpnt->request.current_nr_sectors;
647 buff = SCpnt->request.buffer;
648 SCpnt->use_sg = 0;
649
650 } else {
651
652
653 struct scatterlist * sgpnt;
654 int count, this_count_max;
655 int counted;
656
657 bh = SCpnt->request.bh;
658 this_count = 0;
659 this_count_max = (rscsi_disks[dev].ten ? 0xffff : 0xff);
660 count = 0;
661 bhp = NULL;
662 while(bh) {
663 if ((this_count + (bh->b_size >> 9)) > this_count_max) break;
664 if(!bhp || !CONTIGUOUS_BUFFERS(bhp,bh) ||
665 !CLUSTERABLE_DEVICE(SCpnt) ||
666 (SCpnt->host->unchecked_isa_dma &&
667 ((unsigned long) bh->b_data-1) == ISA_DMA_THRESHOLD)) {
668 if (count < SCpnt->host->sg_tablesize) count++;
669 else break;
670 };
671 this_count += (bh->b_size >> 9);
672 bhp = bh;
673 bh = bh->b_reqnext;
674 };
675 #if 0
676 if(SCpnt->host->unchecked_isa_dma &&
677 ((unsigned int) SCpnt->request.bh->b_data-1) == ISA_DMA_THRESHOLD) count--;
678 #endif
679 SCpnt->use_sg = count;
680 count = 512;
681 while( count < (SCpnt->use_sg * sizeof(struct scatterlist)))
682 count = count << 1;
683 SCpnt->sglist_len = count;
684 max_sg = count / sizeof(struct scatterlist);
685 if(SCpnt->host->sg_tablesize < max_sg)
686 max_sg = SCpnt->host->sg_tablesize;
687 sgpnt = (struct scatterlist * ) scsi_malloc(count);
688 if (!sgpnt) {
689 printk("Warning - running *really* short on DMA buffers\n");
690 SCpnt->use_sg = 0;
691 this_count = SCpnt->request.current_nr_sectors;
692 buff = SCpnt->request.buffer;
693 } else {
694 memset(sgpnt, 0, count);
695
696
697 buff = (char *) sgpnt;
698 counted = 0;
699 for(count = 0, bh = SCpnt->request.bh, bhp = bh->b_reqnext;
700 count < SCpnt->use_sg && bh;
701 count++, bh = bhp) {
702
703 bhp = bh->b_reqnext;
704
705 if(!sgpnt[count].address) sgpnt[count].address = bh->b_data;
706 sgpnt[count].length += bh->b_size;
707 counted += bh->b_size >> 9;
708
709 if (((long) sgpnt[count].address) + sgpnt[count].length - 1 >
710 ISA_DMA_THRESHOLD && (SCpnt->host->unchecked_isa_dma) &&
711 !sgpnt[count].alt_address) {
712 sgpnt[count].alt_address = sgpnt[count].address;
713
714
715
716
717 if(dma_free_sectors < (sgpnt[count].length >> 9) + 10) {
718 sgpnt[count].address = NULL;
719 } else {
720 sgpnt[count].address =
721 (char *) scsi_malloc(sgpnt[count].length);
722 };
723
724
725
726
727
728 if(sgpnt[count].address == NULL){
729 #if 0
730 printk("Warning: Running low on SCSI DMA buffers");
731
732 while(--count >= 0){
733 if(sgpnt[count].alt_address)
734 scsi_free(sgpnt[count].address,
735 sgpnt[count].length);
736 };
737 this_count = SCpnt->request.current_nr_sectors;
738 buff = SCpnt->request.buffer;
739 SCpnt->use_sg = 0;
740 scsi_free(sgpnt, SCpnt->sglist_len);
741 #endif
742 SCpnt->use_sg = count;
743 this_count = counted -= bh->b_size >> 9;
744 break;
745 };
746
747 };
748
749
750
751
752
753 if(bhp && CONTIGUOUS_BUFFERS(bh,bhp)
754 && CLUSTERABLE_DEVICE(SCpnt)) {
755 char * tmp;
756
757 if (((long) sgpnt[count].address) + sgpnt[count].length +
758 bhp->b_size - 1 > ISA_DMA_THRESHOLD &&
759 (SCpnt->host->unchecked_isa_dma) &&
760 !sgpnt[count].alt_address) continue;
761
762 if(!sgpnt[count].alt_address) {count--; continue; }
763 if(dma_free_sectors > 10)
764 tmp = (char *) scsi_malloc(sgpnt[count].length
765 + bhp->b_size);
766 else {
767 tmp = NULL;
768 max_sg = SCpnt->use_sg;
769 };
770 if(tmp){
771 scsi_free(sgpnt[count].address, sgpnt[count].length);
772 sgpnt[count].address = tmp;
773 count--;
774 continue;
775 };
776
777
778
779
780
781 if (SCpnt->use_sg < max_sg) SCpnt->use_sg++;
782 };
783 };
784
785
786 this_count = counted;
787
788 if(count < SCpnt->use_sg || SCpnt->use_sg
789 > SCpnt->host->sg_tablesize){
790 bh = SCpnt->request.bh;
791 printk("Use sg, count %d %x %d\n",
792 SCpnt->use_sg, count, dma_free_sectors);
793 printk("maxsg = %x, counted = %d this_count = %d\n",
794 max_sg, counted, this_count);
795 while(bh){
796 printk("[%p %lx] ", bh->b_data, bh->b_size);
797 bh = bh->b_reqnext;
798 };
799 if(SCpnt->use_sg < 16)
800 for(count=0; count<SCpnt->use_sg; count++)
801 printk("{%d:%p %p %d} ", count,
802 sgpnt[count].address,
803 sgpnt[count].alt_address,
804 sgpnt[count].length);
805 panic("Ooops");
806 };
807
808 if (SCpnt->request.cmd == WRITE)
809 for(count=0; count<SCpnt->use_sg; count++)
810 if(sgpnt[count].alt_address)
811 memcpy(sgpnt[count].address, sgpnt[count].alt_address,
812 sgpnt[count].length);
813 };
814 };
815
816
817
818 if(SCpnt->use_sg == 0){
819 if (((long) buff) + (this_count << 9) - 1 > ISA_DMA_THRESHOLD &&
820 (SCpnt->host->unchecked_isa_dma)) {
821 if(bounce_buffer)
822 buff = bounce_buffer;
823 else
824 buff = (char *) scsi_malloc(this_count << 9);
825 if(buff == NULL) {
826 this_count = SCpnt->request.current_nr_sectors;
827 buff = (char *) scsi_malloc(this_count << 9);
828 if(!buff) panic("Ran out of DMA buffers.");
829 };
830 if (SCpnt->request.cmd == WRITE)
831 memcpy(buff, (char *)SCpnt->request.buffer, this_count << 9);
832 };
833 };
834 #ifdef DEBUG
835 printk("sd%c : %s %d/%d 512 byte blocks.\n",
836 'a' + devm,
837 (SCpnt->request.cmd == WRITE) ? "writing" : "reading",
838 this_count, SCpnt->request.nr_sectors);
839 #endif
840
841 cmd[1] = (SCpnt->lun << 5) & 0xe0;
842
843 if (rscsi_disks[dev].sector_size == 1024){
844 if(block & 1) panic("sd.c:Bad block number requested");
845 if(this_count & 1) panic("sd.c:Bad block number requested");
846 block = block >> 1;
847 this_count = this_count >> 1;
848 };
849
850 if (rscsi_disks[dev].sector_size == 256){
851 block = block << 1;
852 this_count = this_count << 1;
853 };
854
855 if (((this_count > 0xff) || (block > 0x1fffff)) && rscsi_disks[dev].ten)
856 {
857 if (this_count > 0xffff)
858 this_count = 0xffff;
859
860 cmd[0] += READ_10 - READ_6 ;
861 cmd[2] = (unsigned char) (block >> 24) & 0xff;
862 cmd[3] = (unsigned char) (block >> 16) & 0xff;
863 cmd[4] = (unsigned char) (block >> 8) & 0xff;
864 cmd[5] = (unsigned char) block & 0xff;
865 cmd[6] = cmd[9] = 0;
866 cmd[7] = (unsigned char) (this_count >> 8) & 0xff;
867 cmd[8] = (unsigned char) this_count & 0xff;
868 }
869 else
870 {
871 if (this_count > 0xff)
872 this_count = 0xff;
873
874 cmd[1] |= (unsigned char) ((block >> 16) & 0x1f);
875 cmd[2] = (unsigned char) ((block >> 8) & 0xff);
876 cmd[3] = (unsigned char) block & 0xff;
877 cmd[4] = (unsigned char) this_count;
878 cmd[5] = 0;
879 }
880
881
882
883
884
885
886
887 SCpnt->transfersize = rscsi_disks[dev].sector_size;
888 SCpnt->underflow = this_count << 9;
889 scsi_do_cmd (SCpnt, (void *) cmd, buff,
890 this_count * rscsi_disks[dev].sector_size,
891 rw_intr,
892 (SCpnt->device->type == TYPE_DISK ?
893 SD_TIMEOUT : SD_MOD_TIMEOUT),
894 MAX_RETRIES);
895 }
896
897 static int check_scsidisk_media_change(kdev_t full_dev){
898 int retval;
899 int target;
900 struct inode inode;
901 int flag = 0;
902
903 target = DEVICE_NR(full_dev);
904
905 if (target >= sd_template.dev_max ||
906 !rscsi_disks[target].device) {
907 printk("SCSI disk request error: invalid device.\n");
908 return 0;
909 };
910
911 if(!rscsi_disks[target].device->removable) return 0;
912
913 inode.i_rdev = full_dev;
914 retval = sd_ioctl(&inode, NULL, SCSI_IOCTL_TEST_UNIT_READY, 0);
915
916 if(retval){
917
918
919
920
921 rscsi_disks[target].ready = 0;
922 rscsi_disks[target].device->changed = 1;
923 return 1;
924
925 };
926
927
928
929
930
931
932
933 rscsi_disks[target].ready = 1;
934
935 retval = rscsi_disks[target].device->changed;
936 if(!flag) rscsi_disks[target].device->changed = 0;
937 return retval;
938 }
939
940 static void sd_init_done (Scsi_Cmnd * SCpnt)
941 {
942 struct request * req;
943
944 req = &SCpnt->request;
945 req->rq_status = RQ_SCSI_DONE;
946
947 if (req->sem != NULL) {
948 up(req->sem);
949 }
950 }
951
952 static int sd_init_onedisk(int i)
953 {
954 unsigned char cmd[10];
955 unsigned char *buffer;
956 unsigned long spintime;
957 int the_result, retries;
958 Scsi_Cmnd * SCpnt;
959
960
961
962
963
964
965 SCpnt = allocate_device(NULL, rscsi_disks[i].device, 1);
966 buffer = (unsigned char *) scsi_malloc(512);
967
968 spintime = 0;
969
970
971 if (!MODULE_FLAG){
972 do{
973 retries = 0;
974 while(retries < 3)
975 {
976 cmd[0] = TEST_UNIT_READY;
977 cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0;
978 memset ((void *) &cmd[2], 0, 8);
979 SCpnt->cmd_len = 0;
980 SCpnt->sense_buffer[0] = 0;
981 SCpnt->sense_buffer[2] = 0;
982
983 {
984 struct semaphore sem = MUTEX_LOCKED;
985
986 SCpnt->request.rq_status = RQ_SCSI_BUSY;
987 SCpnt->request.sem = &sem;
988 scsi_do_cmd (SCpnt,
989 (void *) cmd, (void *) buffer,
990 512, sd_init_done, SD_TIMEOUT,
991 MAX_RETRIES);
992 down(&sem);
993 }
994
995 the_result = SCpnt->result;
996 retries++;
997 if( the_result == 0
998 || SCpnt->sense_buffer[2] != UNIT_ATTENTION)
999 break;
1000 }
1001
1002
1003
1004 if(the_result && !rscsi_disks[i].device->removable &&
1005 SCpnt->sense_buffer[2] == NOT_READY) {
1006 int time1;
1007 if(!spintime){
1008 printk( "sd%c: Spinning up disk...", 'a' + i );
1009 cmd[0] = START_STOP;
1010 cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0;
1011 cmd[1] |= 1;
1012 memset ((void *) &cmd[2], 0, 8);
1013 cmd[4] = 1;
1014 SCpnt->cmd_len = 0;
1015 SCpnt->sense_buffer[0] = 0;
1016 SCpnt->sense_buffer[2] = 0;
1017
1018 {
1019 struct semaphore sem = MUTEX_LOCKED;
1020
1021 SCpnt->request.rq_status = RQ_SCSI_BUSY;
1022 SCpnt->request.sem = &sem;
1023 scsi_do_cmd (SCpnt,
1024 (void *) cmd, (void *) buffer,
1025 512, sd_init_done, SD_TIMEOUT,
1026 MAX_RETRIES);
1027 down(&sem);
1028 }
1029
1030 spintime = jiffies;
1031 }
1032
1033 time1 = jiffies;
1034 while(jiffies < time1 + HZ);
1035 printk( "." );
1036 };
1037 } while(the_result && spintime && spintime+100*HZ > jiffies);
1038 if (spintime) {
1039 if (the_result)
1040 printk( "not responding...\n" );
1041 else
1042 printk( "ready\n" );
1043 }
1044 };
1045
1046
1047 retries = 3;
1048 do {
1049 cmd[0] = READ_CAPACITY;
1050 cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0;
1051 memset ((void *) &cmd[2], 0, 8);
1052 memset ((void *) buffer, 0, 8);
1053 SCpnt->cmd_len = 0;
1054 SCpnt->sense_buffer[0] = 0;
1055 SCpnt->sense_buffer[2] = 0;
1056
1057 {
1058 struct semaphore sem = MUTEX_LOCKED;
1059
1060 SCpnt->request.rq_status = RQ_SCSI_BUSY;
1061 SCpnt->request.sem = &sem;
1062 scsi_do_cmd (SCpnt,
1063 (void *) cmd, (void *) buffer,
1064 8, sd_init_done, SD_TIMEOUT,
1065 MAX_RETRIES);
1066 down(&sem);
1067 }
1068
1069 the_result = SCpnt->result;
1070 retries--;
1071
1072 } while(the_result && retries);
1073
1074 SCpnt->request.rq_status = RQ_INACTIVE;
1075
1076 wake_up(&SCpnt->device->device_wait);
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094 if (the_result)
1095 {
1096 printk ("sd%c : READ CAPACITY failed.\n"
1097 "sd%c : status = %x, message = %02x, host = %d, driver = %02x \n",
1098 'a' + i, 'a' + i,
1099 status_byte(the_result),
1100 msg_byte(the_result),
1101 host_byte(the_result),
1102 driver_byte(the_result)
1103 );
1104 if (driver_byte(the_result) & DRIVER_SENSE)
1105 printk("sd%c : extended sense code = %1x \n",
1106 'a' + i, SCpnt->sense_buffer[2] & 0xf);
1107 else
1108 printk("sd%c : sense not available. \n", 'a' + i);
1109
1110 printk("sd%c : block size assumed to be 512 bytes, disk size 1GB. \n",
1111 'a' + i);
1112 rscsi_disks[i].capacity = 0x1fffff;
1113 rscsi_disks[i].sector_size = 512;
1114
1115
1116
1117 if(rscsi_disks[i].device->removable &&
1118 SCpnt->sense_buffer[2] == NOT_READY)
1119 rscsi_disks[i].device->changed = 1;
1120
1121 }
1122 else
1123 {
1124
1125
1126
1127 rscsi_disks[i].ready = 1;
1128
1129 rscsi_disks[i].capacity = (buffer[0] << 24) |
1130 (buffer[1] << 16) |
1131 (buffer[2] << 8) |
1132 buffer[3];
1133
1134 rscsi_disks[i].sector_size = (buffer[4] << 24) |
1135 (buffer[5] << 16) | (buffer[6] << 8) | buffer[7];
1136
1137 if (rscsi_disks[i].sector_size == 0) {
1138 rscsi_disks[i].sector_size = 512;
1139 printk("sd%c : sector size 0 reported, assuming 512.\n", 'a' + i);
1140 }
1141
1142
1143 if (rscsi_disks[i].sector_size != 512 &&
1144 rscsi_disks[i].sector_size != 1024 &&
1145 rscsi_disks[i].sector_size != 256)
1146 {
1147 printk ("sd%c : unsupported sector size %d.\n",
1148 'a' + i, rscsi_disks[i].sector_size);
1149 if(rscsi_disks[i].device->removable){
1150 rscsi_disks[i].capacity = 0;
1151 } else {
1152 printk ("scsi : deleting disk entry.\n");
1153 rscsi_disks[i].device = NULL;
1154 sd_template.nr_dev--;
1155 return i;
1156 };
1157 }
1158 {
1159
1160
1161
1162
1163
1164 int m;
1165 int hard_sector = rscsi_disks[i].sector_size;
1166
1167 for (m=i<<4; m<((i+1)<<4); m++){
1168 sd_hardsizes[m] = hard_sector;
1169 }
1170 printk ("SCSI Hardware sector size is %d bytes on device sd%c\n",
1171 hard_sector,i+'a');
1172 }
1173 if(rscsi_disks[i].sector_size == 1024)
1174 rscsi_disks[i].capacity <<= 1;
1175 if(rscsi_disks[i].sector_size == 256)
1176 rscsi_disks[i].capacity >>= 1;
1177 }
1178
1179
1180
1181
1182
1183 rscsi_disks[i].write_prot = 0;
1184 if ( rscsi_disks[i].device->removable && rscsi_disks[i].ready ) {
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194 memset ((void *) &cmd[0], 0, 8);
1195 cmd[0] = MODE_SENSE;
1196 cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0;
1197 cmd[2] = 1;
1198 cmd[4] = 12;
1199 SCpnt->cmd_len = 0;
1200 SCpnt->sense_buffer[0] = 0;
1201 SCpnt->sense_buffer[2] = 0;
1202
1203
1204 {
1205 struct semaphore sem = MUTEX_LOCKED;
1206 SCpnt->request.rq_status = RQ_SCSI_BUSY;
1207 SCpnt->request.sem = &sem;
1208 scsi_do_cmd (SCpnt,
1209 (void *) cmd, (void *) buffer,
1210 512, sd_init_done, SD_TIMEOUT,
1211 MAX_RETRIES);
1212 down(&sem);
1213 }
1214
1215 the_result = SCpnt->result;
1216 SCpnt->request.rq_status = RQ_INACTIVE;
1217 wake_up(&SCpnt->device->device_wait);
1218
1219 if ( the_result ) {
1220 printk ("sd%c: test WP failed, assume Write Protected\n",i+'a');
1221 rscsi_disks[i].write_prot = 1;
1222 } else {
1223 rscsi_disks[i].write_prot = ((buffer[2] & 0x80) != 0);
1224 printk ("sd%c: Write Protect is %s\n",i+'a',
1225 rscsi_disks[i].write_prot ? "on" : "off");
1226 }
1227
1228 }
1229
1230 rscsi_disks[i].ten = 1;
1231 rscsi_disks[i].remap = 1;
1232 scsi_free(buffer, 512);
1233 return i;
1234 }
1235
1236
1237
1238
1239
1240
1241 static int sd_registered = 0;
1242
1243 static int sd_init()
1244 {
1245 int i;
1246
1247 if (sd_template.dev_noticed == 0) return 0;
1248
1249 if(!sd_registered) {
1250 if (register_blkdev(MAJOR_NR,"sd",&sd_fops)) {
1251 printk("Unable to get major %d for SCSI disk\n",MAJOR_NR);
1252 return 1;
1253 }
1254 sd_registered++;
1255 }
1256
1257
1258 if(rscsi_disks) return 0;
1259
1260 sd_template.dev_max = sd_template.dev_noticed + SD_EXTRA_DEVS;
1261
1262 rscsi_disks = (Scsi_Disk *)
1263 scsi_init_malloc(sd_template.dev_max * sizeof(Scsi_Disk), GFP_ATOMIC);
1264 memset(rscsi_disks, 0, sd_template.dev_max * sizeof(Scsi_Disk));
1265
1266 sd_sizes = (int *) scsi_init_malloc((sd_template.dev_max << 4) *
1267 sizeof(int), GFP_ATOMIC);
1268 memset(sd_sizes, 0, (sd_template.dev_max << 4) * sizeof(int));
1269
1270 sd_blocksizes = (int *) scsi_init_malloc((sd_template.dev_max << 4) *
1271 sizeof(int), GFP_ATOMIC);
1272
1273 sd_hardsizes = (int *) scsi_init_malloc((sd_template.dev_max << 4) *
1274 sizeof(int), GFP_ATOMIC);
1275
1276 for(i=0;i<(sd_template.dev_max << 4);i++){
1277 sd_blocksizes[i] = 1024;
1278 sd_hardsizes[i] = 512;
1279 }
1280 blksize_size[MAJOR_NR] = sd_blocksizes;
1281 hardsect_size[MAJOR_NR] = sd_hardsizes;
1282 sd = (struct hd_struct *) scsi_init_malloc((sd_template.dev_max << 4) *
1283 sizeof(struct hd_struct),
1284 GFP_ATOMIC);
1285
1286
1287 sd_gendisk.max_nr = sd_template.dev_max;
1288 sd_gendisk.part = sd;
1289 sd_gendisk.sizes = sd_sizes;
1290 sd_gendisk.real_devices = (void *) rscsi_disks;
1291 return 0;
1292 }
1293
1294 static void sd_finish()
1295 {
1296 int i;
1297
1298 blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST;
1299
1300 sd_gendisk.next = gendisk_head;
1301 gendisk_head = &sd_gendisk;
1302
1303 for (i = 0; i < sd_template.dev_max; ++i)
1304 if (!rscsi_disks[i].capacity &&
1305 rscsi_disks[i].device)
1306 {
1307 if (MODULE_FLAG
1308 && !rscsi_disks[i].has_part_table) {
1309 sd_sizes[i << 4] = rscsi_disks[i].capacity;
1310
1311 revalidate_scsidisk(MKDEV(MAJOR_NR, i << 4), 0);
1312 }
1313 else
1314 i=sd_init_onedisk(i);
1315 rscsi_disks[i].has_part_table = 1;
1316 }
1317
1318
1319
1320
1321
1322 if(rscsi_disks[0].device && rscsi_disks[0].device->host->sg_tablesize)
1323 read_ahead[MAJOR_NR] = 120;
1324 else
1325 read_ahead[MAJOR_NR] = 4;
1326
1327 return;
1328 }
1329
1330 static int sd_detect(Scsi_Device * SDp){
1331 if(SDp->type != TYPE_DISK && SDp->type != TYPE_MOD) return 0;
1332
1333 printk("Detected scsi disk sd%c at scsi%d, channel %d, id %d, lun %d\n",
1334 'a'+ (sd_template.dev_noticed++),
1335 SDp->host->host_no, SDp->channel, SDp->id, SDp->lun);
1336
1337 return 1;
1338 }
1339
1340 static int sd_attach(Scsi_Device * SDp){
1341 Scsi_Disk * dpnt;
1342 int i;
1343
1344 if(SDp->type != TYPE_DISK && SDp->type != TYPE_MOD) return 0;
1345
1346 if(sd_template.nr_dev >= sd_template.dev_max) {
1347 SDp->attached--;
1348 return 1;
1349 }
1350
1351 for(dpnt = rscsi_disks, i=0; i<sd_template.dev_max; i++, dpnt++)
1352 if(!dpnt->device) break;
1353
1354 if(i >= sd_template.dev_max) panic ("scsi_devices corrupt (sd)");
1355
1356 SDp->scsi_request_fn = do_sd_request;
1357 rscsi_disks[i].device = SDp;
1358 rscsi_disks[i].has_part_table = 0;
1359 sd_template.nr_dev++;
1360 sd_gendisk.nr_real++;
1361 return 0;
1362 }
1363
1364 #define DEVICE_BUSY rscsi_disks[target].device->busy
1365 #define USAGE rscsi_disks[target].device->access_count
1366 #define CAPACITY rscsi_disks[target].capacity
1367 #define MAYBE_REINIT sd_init_onedisk(target)
1368 #define GENDISK_STRUCT sd_gendisk
1369
1370
1371
1372
1373
1374
1375
1376
1377 int revalidate_scsidisk(kdev_t dev, int maxusage){
1378 int target;
1379 struct gendisk * gdev;
1380 unsigned long flags;
1381 int max_p;
1382 int start;
1383 int i;
1384
1385 target = DEVICE_NR(dev);
1386 gdev = &GENDISK_STRUCT;
1387
1388 save_flags(flags);
1389 cli();
1390 if (DEVICE_BUSY || USAGE > maxusage) {
1391 restore_flags(flags);
1392 printk("Device busy for revalidation (usage=%d)\n", USAGE);
1393 return -EBUSY;
1394 };
1395 DEVICE_BUSY = 1;
1396 restore_flags(flags);
1397
1398 max_p = gdev->max_p;
1399 start = target << gdev->minor_shift;
1400
1401 for (i=max_p - 1; i >=0 ; i--) {
1402 int minor = start+i;
1403 kdev_t devi = MKDEV(MAJOR_NR, minor);
1404 sync_dev(devi);
1405 invalidate_inodes(devi);
1406 invalidate_buffers(devi);
1407 gdev->part[minor].start_sect = 0;
1408 gdev->part[minor].nr_sects = 0;
1409
1410
1411
1412
1413 blksize_size[MAJOR_NR][minor] = 1024;
1414 };
1415
1416 #ifdef MAYBE_REINIT
1417 MAYBE_REINIT;
1418 #endif
1419
1420 gdev->part[start].nr_sects = CAPACITY;
1421 resetup_one_dev(gdev, target);
1422
1423 DEVICE_BUSY = 0;
1424 return 0;
1425 }
1426
1427 static int fop_revalidate_scsidisk(kdev_t dev){
1428 return revalidate_scsidisk(dev, 0);
1429 }
1430
1431
1432 static void sd_detach(Scsi_Device * SDp)
1433 {
1434 Scsi_Disk * dpnt;
1435 int i;
1436 int max_p;
1437 int start;
1438
1439 for(dpnt = rscsi_disks, i=0; i<sd_template.dev_max; i++, dpnt++)
1440 if(dpnt->device == SDp) {
1441
1442
1443
1444 max_p = sd_gendisk.max_p;
1445 start = i << sd_gendisk.minor_shift;
1446
1447 for (i=max_p - 1; i >=0 ; i--) {
1448 int minor = start+i;
1449 kdev_t devi = MKDEV(MAJOR_NR, minor);
1450 sync_dev(devi);
1451 invalidate_inodes(devi);
1452 invalidate_buffers(devi);
1453 sd_gendisk.part[minor].start_sect = 0;
1454 sd_gendisk.part[minor].nr_sects = 0;
1455 sd_sizes[minor] = 0;
1456 };
1457
1458 dpnt->has_part_table = 0;
1459 dpnt->device = NULL;
1460 dpnt->capacity = 0;
1461 SDp->attached--;
1462 sd_template.dev_noticed--;
1463 sd_template.nr_dev--;
1464 sd_gendisk.nr_real--;
1465 return;
1466 }
1467 return;
1468 }
1469
1470 #ifdef MODULE
1471
1472 int init_module(void) {
1473 sd_template.usage_count = &mod_use_count_;
1474 return scsi_register_module(MODULE_SCSI_DEV, &sd_template);
1475 }
1476
1477 void cleanup_module( void)
1478 {
1479 struct gendisk * prev_sdgd;
1480 struct gendisk * sdgd;
1481
1482 scsi_unregister_module(MODULE_SCSI_DEV, &sd_template);
1483 unregister_blkdev(SCSI_DISK_MAJOR, "sd");
1484 sd_registered--;
1485 if( rscsi_disks != NULL )
1486 {
1487 scsi_init_free((char *) rscsi_disks,
1488 (sd_template.dev_noticed + SD_EXTRA_DEVS)
1489 * sizeof(Scsi_Disk));
1490
1491 scsi_init_free((char *) sd_sizes, sd_template.dev_max * sizeof(int));
1492 scsi_init_free((char *) sd_blocksizes, sd_template.dev_max * sizeof(int));
1493 scsi_init_free((char *) sd_hardsizes, sd_template.dev_max * sizeof(int));
1494 scsi_init_free((char *) sd,
1495 (sd_template.dev_max << 4) * sizeof(struct hd_struct));
1496
1497
1498
1499 sdgd = gendisk_head;
1500 prev_sdgd = NULL;
1501 while(sdgd != &sd_gendisk)
1502 {
1503 prev_sdgd = sdgd;
1504 sdgd = sdgd->next;
1505 }
1506
1507 if(sdgd != &sd_gendisk)
1508 printk("sd_gendisk not in disk chain.\n");
1509 else {
1510 if(prev_sdgd != NULL)
1511 prev_sdgd->next = sdgd->next;
1512 else
1513 gendisk_head = sdgd->next;
1514 }
1515 }
1516
1517 blksize_size[MAJOR_NR] = NULL;
1518 blk_dev[MAJOR_NR].request_fn = NULL;
1519 blk_size[MAJOR_NR] = NULL;
1520 hardsect_size[MAJOR_NR] = NULL;
1521 read_ahead[MAJOR_NR] = 0;
1522 sd_template.dev_max = 0;
1523 }
1524 #endif
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543