This source file includes following definitions.
- sd_open
- sd_release
- sd_geninit
- rw_intr
- do_sd_request
- requeue_sd_request
- check_scsidisk_media_change
- sd_init_done
- sd_init_onedisk
- sd_init
- sd_finish
- sd_detect
- sd_attach
- revalidate_scsidisk
- fop_revalidate_scsidisk
- sd_detach
- init_module
- cleanup_module
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19 #include <linux/module.h>
20 #ifdef MODULE
21
22
23
24
25
26 #define MODULE_FLAG 1
27 #else
28 #define MODULE_FLAG scsi_loadable_module_flag
29 #endif
30
31 #include <linux/fs.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/mm.h>
35 #include <linux/string.h>
36 #include <linux/errno.h>
37 #include <asm/system.h>
38
39 #define MAJOR_NR SCSI_DISK_MAJOR
40 #include <linux/blk.h>
41 #include "scsi.h"
42 #include "hosts.h"
43 #include "sd.h"
44 #include <scsi/scsi_ioctl.h>
45 #include "constants.h"
46
47 #include <linux/genhd.h>
48
49
50
51
52
53 #define MAX_RETRIES 5
54
55
56
57
58
59 #define SD_TIMEOUT (15 * HZ)
60 #define SD_MOD_TIMEOUT (15 * HZ)
61
62 #define CLUSTERABLE_DEVICE(SC) (SC->host->use_clustering && \
63 SC->device->type != TYPE_MOD)
64
65 struct hd_struct * sd;
66
67 Scsi_Disk * rscsi_disks = NULL;
68 static int * sd_sizes;
69 static int * sd_blocksizes;
70 static int * sd_hardsizes;
71
72 extern int sd_ioctl(struct inode *, struct file *, unsigned int, unsigned long);
73
74 static int check_scsidisk_media_change(kdev_t);
75 static int fop_revalidate_scsidisk(kdev_t);
76
77 static sd_init_onedisk(int);
78
79 static void requeue_sd_request (Scsi_Cmnd * SCpnt);
80
81 static int sd_init(void);
82 static void sd_finish(void);
83 static int sd_attach(Scsi_Device *);
84 static int sd_detect(Scsi_Device *);
85 static void sd_detach(Scsi_Device *);
86
87 struct Scsi_Device_Template sd_template =
88 { NULL, "disk", "sd", NULL, TYPE_DISK,
89 SCSI_DISK_MAJOR, 0, 0, 0, 1,
90 sd_detect, sd_init,
91 sd_finish, sd_attach, sd_detach
92 };
93
94 static int sd_open(struct inode * inode, struct file * filp)
95 {
96 int target;
97 target = DEVICE_NR(inode->i_rdev);
98
99 if(target >= sd_template.dev_max || !rscsi_disks[target].device)
100 return -ENXIO;
101
102
103
104
105
106
107
108 while (rscsi_disks[target].device->busy)
109 barrier();
110 if(rscsi_disks[target].device->removable) {
111 check_disk_change(inode->i_rdev);
112
113
114
115
116 if ( !rscsi_disks[target].ready ) {
117 return -ENXIO;
118 }
119
120
121
122
123
124
125 if ( (rscsi_disks[target].write_prot) && (filp->f_mode & 2) ) {
126 return -EROFS;
127 }
128
129 if(!rscsi_disks[target].device->access_count)
130 sd_ioctl(inode, NULL, SCSI_IOCTL_DOORLOCK, 0);
131 }
132
133
134
135
136
137 if(sd_sizes[MINOR(inode->i_rdev)] == 0)
138 return -ENXIO;
139
140 rscsi_disks[target].device->access_count++;
141 if (rscsi_disks[target].device->host->hostt->usage_count)
142 (*rscsi_disks[target].device->host->hostt->usage_count)++;
143 if(sd_template.usage_count) (*sd_template.usage_count)++;
144 return 0;
145 }
146
147 static void sd_release(struct inode * inode, struct file * file)
148 {
149 int target;
150 fsync_dev(inode->i_rdev);
151
152 target = DEVICE_NR(inode->i_rdev);
153
154 rscsi_disks[target].device->access_count--;
155 if (rscsi_disks[target].device->host->hostt->usage_count)
156 (*rscsi_disks[target].device->host->hostt->usage_count)--;
157 if(sd_template.usage_count) (*sd_template.usage_count)--;
158
159 if(rscsi_disks[target].device->removable) {
160 if(!rscsi_disks[target].device->access_count)
161 sd_ioctl(inode, NULL, SCSI_IOCTL_DOORUNLOCK, 0);
162 }
163 }
164
165 static void sd_geninit(struct gendisk *);
166
167 static struct file_operations sd_fops = {
168 NULL,
169 block_read,
170 block_write,
171 NULL,
172 NULL,
173 sd_ioctl,
174 NULL,
175 sd_open,
176 sd_release,
177 block_fsync,
178 NULL,
179 check_scsidisk_media_change,
180 fop_revalidate_scsidisk
181 };
182
183 static struct gendisk sd_gendisk = {
184 MAJOR_NR,
185 "sd",
186 4,
187 1 << 4,
188 0,
189 sd_geninit,
190 NULL,
191 NULL,
192 0,
193 NULL,
194 NULL
195 };
196
197 static void sd_geninit (struct gendisk *ignored)
198 {
199 int i;
200
201 for (i = 0; i < sd_template.dev_max; ++i)
202 if(rscsi_disks[i].device)
203 sd[i << 4].nr_sects = rscsi_disks[i].capacity;
204 #if 0
205
206 sd_gendisk.nr_real = sd_template.dev_max;
207 #endif
208 }
209
210
211
212
213
214
215
216 static void rw_intr (Scsi_Cmnd *SCpnt)
217 {
218 int result = SCpnt->result;
219 int this_count = SCpnt->bufflen >> 9;
220
221 #ifdef DEBUG
222 printk("sd%c : rw_intr(%d, %d)\n", 'a' + MINOR(SCpnt->request.rq_dev),
223 SCpnt->host->host_no, result);
224 #endif
225
226
227
228
229
230
231
232 if (!result) {
233
234 #ifdef DEBUG
235 printk("sd%c : %d sectors remain.\n", 'a' + MINOR(SCpnt->request.rq_dev),
236 SCpnt->request.nr_sectors);
237 printk("use_sg is %d\n ",SCpnt->use_sg);
238 #endif
239 if (SCpnt->use_sg) {
240 struct scatterlist * sgpnt;
241 int i;
242 sgpnt = (struct scatterlist *) SCpnt->buffer;
243 for(i=0; i<SCpnt->use_sg; i++) {
244 #ifdef DEBUG
245 printk(":%x %x %d\n",sgpnt[i].alt_address, sgpnt[i].address,
246 sgpnt[i].length);
247 #endif
248 if (sgpnt[i].alt_address) {
249 if (SCpnt->request.cmd == READ)
250 memcpy(sgpnt[i].alt_address, sgpnt[i].address,
251 sgpnt[i].length);
252 scsi_free(sgpnt[i].address, sgpnt[i].length);
253 }
254 }
255
256
257 scsi_free(SCpnt->buffer, SCpnt->sglist_len);
258 } else {
259 if (SCpnt->buffer != SCpnt->request.buffer) {
260 #ifdef DEBUG
261 printk("nosg: %x %x %d\n",SCpnt->request.buffer, SCpnt->buffer,
262 SCpnt->bufflen);
263 #endif
264 if (SCpnt->request.cmd == READ)
265 memcpy(SCpnt->request.buffer, SCpnt->buffer,
266 SCpnt->bufflen);
267 scsi_free(SCpnt->buffer, SCpnt->bufflen);
268 }
269 }
270
271
272
273
274
275 if (SCpnt->request.nr_sectors > this_count)
276 {
277 SCpnt->request.errors = 0;
278
279 if (!SCpnt->request.bh)
280 {
281 #ifdef DEBUG
282 printk("sd%c : handling page request, no buffer\n",
283 'a' + MINOR(SCpnt->request.rq_dev));
284 #endif
285
286
287
288
289 panic("sd.c: linked page request (%lx %x)",
290 SCpnt->request.sector, this_count);
291 }
292 }
293 SCpnt = end_scsi_request(SCpnt, 1, this_count);
294 requeue_sd_request(SCpnt);
295 return;
296 }
297
298
299 if (SCpnt->use_sg) {
300 struct scatterlist * sgpnt;
301 int i;
302 sgpnt = (struct scatterlist *) SCpnt->buffer;
303 for(i=0; i<SCpnt->use_sg; i++) {
304 #ifdef DEBUG
305 printk("err: %x %x %d\n",SCpnt->request.buffer, SCpnt->buffer,
306 SCpnt->bufflen);
307 #endif
308 if (sgpnt[i].alt_address) {
309 scsi_free(sgpnt[i].address, sgpnt[i].length);
310 }
311 }
312 scsi_free(SCpnt->buffer, SCpnt->sglist_len);
313 } else {
314 #ifdef DEBUG
315 printk("nosgerr: %x %x %d\n",SCpnt->request.buffer, SCpnt->buffer,
316 SCpnt->bufflen);
317 #endif
318 if (SCpnt->buffer != SCpnt->request.buffer)
319 scsi_free(SCpnt->buffer, SCpnt->bufflen);
320 }
321
322
323
324
325
326
327
328 if (driver_byte(result) != 0) {
329 if (suggestion(result) == SUGGEST_REMAP) {
330 #ifdef REMAP
331
332
333
334
335 if rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].remap
336 {
337 result = 0;
338 }
339 else
340 #endif
341 }
342
343 if ((SCpnt->sense_buffer[0] & 0x7f) == 0x70) {
344 if ((SCpnt->sense_buffer[2] & 0xf) == UNIT_ATTENTION) {
345 if(rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].device->removable) {
346
347
348
349 rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].device->changed = 1;
350 SCpnt = end_scsi_request(SCpnt, 0, this_count);
351 requeue_sd_request(SCpnt);
352 return;
353 }
354 else
355 {
356
357
358
359
360
361 requeue_sd_request(SCpnt);
362 return;
363 }
364 }
365 }
366
367
368
369
370
371
372
373
374
375 if (SCpnt->sense_buffer[2] == ILLEGAL_REQUEST) {
376 if (rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].ten) {
377 rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].ten = 0;
378 requeue_sd_request(SCpnt);
379 result = 0;
380 } else {
381
382 }
383 }
384 }
385 if (result) {
386 printk("SCSI disk error : host %d channel %d id %d lun %d return code = %x\n",
387 rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].device->host->host_no,
388 rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].device->channel,
389 rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].device->id,
390 rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].device->lun, result);
391
392 if (driver_byte(result) & DRIVER_SENSE)
393 print_sense("sd", SCpnt);
394 SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.current_nr_sectors);
395 requeue_sd_request(SCpnt);
396 return;
397 }
398 }
399
400
401
402
403
404
405
406 static void do_sd_request (void)
407 {
408 Scsi_Cmnd * SCpnt = NULL;
409 Scsi_Device * SDev;
410 struct request * req = NULL;
411 unsigned long flags;
412 int flag = 0;
413
414 save_flags(flags);
415 while (1==1){
416 cli();
417 if (CURRENT != NULL && CURRENT->rq_status == RQ_INACTIVE) {
418 restore_flags(flags);
419 return;
420 }
421
422 INIT_SCSI_REQUEST;
423 SDev = rscsi_disks[DEVICE_NR(CURRENT->rq_dev)].device;
424
425
426
427
428
429
430 if( SDev->was_reset )
431 {
432
433
434
435
436
437
438 if( SDev->removable && !intr_count )
439 {
440 scsi_ioctl(SDev, SCSI_IOCTL_DOORLOCK, 0);
441 }
442 SDev->was_reset = 0;
443 }
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458 if (flag++ == 0)
459 SCpnt = allocate_device(&CURRENT,
460 rscsi_disks[DEVICE_NR(CURRENT->rq_dev)].device, 0);
461 else SCpnt = NULL;
462
463
464
465
466
467
468 restore_flags(flags);
469
470
471
472
473
474
475
476
477
478
479 if (!SCpnt && sd_template.nr_dev > 1){
480 struct request *req1;
481 req1 = NULL;
482 cli();
483 req = CURRENT;
484 while(req){
485 SCpnt = request_queueable(req, rscsi_disks[DEVICE_NR(req->rq_dev)].device);
486 if(SCpnt) break;
487 req1 = req;
488 req = req->next;
489 }
490 if (SCpnt && req->rq_status == RQ_INACTIVE) {
491 if (req == CURRENT)
492 CURRENT = CURRENT->next;
493 else
494 req1->next = req->next;
495 }
496 restore_flags(flags);
497 }
498
499 if (!SCpnt) return;
500
501
502 requeue_sd_request(SCpnt);
503 }
504 }
505
506 static void requeue_sd_request (Scsi_Cmnd * SCpnt)
507 {
508 int dev, devm, block, this_count;
509 unsigned char cmd[10];
510 int bounce_size, contiguous;
511 int max_sg;
512 struct buffer_head * bh, *bhp;
513 char * buff, *bounce_buffer;
514
515 repeat:
516
517 if(!SCpnt || SCpnt->request.rq_status == RQ_INACTIVE) {
518 do_sd_request();
519 return;
520 }
521
522 devm = MINOR(SCpnt->request.rq_dev);
523 dev = DEVICE_NR(SCpnt->request.rq_dev);
524
525 block = SCpnt->request.sector;
526 this_count = 0;
527
528 #ifdef DEBUG
529 printk("Doing sd request, dev = %d, block = %d\n", devm, block);
530 #endif
531
532 if (devm >= (sd_template.dev_max << 4) ||
533 !rscsi_disks[dev].device ||
534 block + SCpnt->request.nr_sectors > sd[devm].nr_sects)
535 {
536 SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
537 goto repeat;
538 }
539
540 block += sd[devm].start_sect;
541
542 if (rscsi_disks[dev].device->changed)
543 {
544
545
546
547
548
549 SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
550 goto repeat;
551 }
552
553 #ifdef DEBUG
554 printk("sd%c : real dev = /dev/sd%c, block = %d\n",
555 'a' + devm, dev, block);
556 #endif
557
558
559
560
561
562
563
564
565
566
567
568
569 if (rscsi_disks[dev].sector_size == 1024)
570 if((block & 1) || (SCpnt->request.nr_sectors & 1)) {
571 printk("sd.c:Bad block number requested");
572 SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
573 goto repeat;
574 }
575
576 switch (SCpnt->request.cmd)
577 {
578 case WRITE :
579 if (!rscsi_disks[dev].device->writeable)
580 {
581 SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
582 goto repeat;
583 }
584 cmd[0] = WRITE_6;
585 break;
586 case READ :
587 cmd[0] = READ_6;
588 break;
589 default :
590 panic ("Unknown sd command %d\n", SCpnt->request.cmd);
591 }
592
593 SCpnt->this_count = 0;
594
595
596
597
598 contiguous = (!CLUSTERABLE_DEVICE(SCpnt) ? 0 :1);
599 bounce_buffer = NULL;
600 bounce_size = (SCpnt->request.nr_sectors << 9);
601
602
603
604
605
606 if (contiguous && SCpnt->request.bh &&
607 ((long) SCpnt->request.bh->b_data)
608 + (SCpnt->request.nr_sectors << 9) - 1 > ISA_DMA_THRESHOLD
609 && SCpnt->host->unchecked_isa_dma) {
610 if(((long) SCpnt->request.bh->b_data) > ISA_DMA_THRESHOLD)
611 bounce_buffer = (char *) scsi_malloc(bounce_size);
612 if(!bounce_buffer) contiguous = 0;
613 }
614
615 if(contiguous && SCpnt->request.bh && SCpnt->request.bh->b_reqnext)
616 for(bh = SCpnt->request.bh, bhp = bh->b_reqnext; bhp; bh = bhp,
617 bhp = bhp->b_reqnext) {
618 if(!CONTIGUOUS_BUFFERS(bh,bhp)) {
619 if(bounce_buffer) scsi_free(bounce_buffer, bounce_size);
620 contiguous = 0;
621 break;
622 }
623 }
624 if (!SCpnt->request.bh || contiguous) {
625
626
627 this_count = SCpnt->request.nr_sectors;
628 buff = SCpnt->request.buffer;
629 SCpnt->use_sg = 0;
630
631 } else if (SCpnt->host->sg_tablesize == 0 ||
632 (need_isa_buffer && dma_free_sectors <= 10)) {
633
634
635
636
637
638
639
640
641 if (SCpnt->host->sg_tablesize != 0 &&
642 need_isa_buffer &&
643 dma_free_sectors <= 10)
644 printk("Warning: SCSI DMA buffer space running low. Using non scatter-gather I/O.\n");
645
646 this_count = SCpnt->request.current_nr_sectors;
647 buff = SCpnt->request.buffer;
648 SCpnt->use_sg = 0;
649
650 } else {
651
652
653 struct scatterlist * sgpnt;
654 int count, this_count_max;
655 int counted;
656
657 bh = SCpnt->request.bh;
658 this_count = 0;
659 this_count_max = (rscsi_disks[dev].ten ? 0xffff : 0xff);
660 count = 0;
661 bhp = NULL;
662 while(bh) {
663 if ((this_count + (bh->b_size >> 9)) > this_count_max) break;
664 if(!bhp || !CONTIGUOUS_BUFFERS(bhp,bh) ||
665 !CLUSTERABLE_DEVICE(SCpnt) ||
666 (SCpnt->host->unchecked_isa_dma &&
667 ((unsigned long) bh->b_data-1) == ISA_DMA_THRESHOLD)) {
668 if (count < SCpnt->host->sg_tablesize) count++;
669 else break;
670 }
671 this_count += (bh->b_size >> 9);
672 bhp = bh;
673 bh = bh->b_reqnext;
674 }
675 #if 0
676 if(SCpnt->host->unchecked_isa_dma &&
677 ((unsigned int) SCpnt->request.bh->b_data-1) == ISA_DMA_THRESHOLD) count--;
678 #endif
679 SCpnt->use_sg = count;
680 count = 512;
681 while( count < (SCpnt->use_sg * sizeof(struct scatterlist)))
682 count = count << 1;
683 SCpnt->sglist_len = count;
684 max_sg = count / sizeof(struct scatterlist);
685 if(SCpnt->host->sg_tablesize < max_sg)
686 max_sg = SCpnt->host->sg_tablesize;
687 sgpnt = (struct scatterlist * ) scsi_malloc(count);
688 if (!sgpnt) {
689 printk("Warning - running *really* short on DMA buffers\n");
690 SCpnt->use_sg = 0;
691 this_count = SCpnt->request.current_nr_sectors;
692 buff = SCpnt->request.buffer;
693 } else {
694 memset(sgpnt, 0, count);
695
696
697 buff = (char *) sgpnt;
698 counted = 0;
699 for(count = 0, bh = SCpnt->request.bh, bhp = bh->b_reqnext;
700 count < SCpnt->use_sg && bh;
701 count++, bh = bhp) {
702
703 bhp = bh->b_reqnext;
704
705 if(!sgpnt[count].address) sgpnt[count].address = bh->b_data;
706 sgpnt[count].length += bh->b_size;
707 counted += bh->b_size >> 9;
708
709 if (((long) sgpnt[count].address) + sgpnt[count].length - 1 >
710 ISA_DMA_THRESHOLD && (SCpnt->host->unchecked_isa_dma) &&
711 !sgpnt[count].alt_address) {
712 sgpnt[count].alt_address = sgpnt[count].address;
713
714
715
716
717 if(dma_free_sectors < (sgpnt[count].length >> 9) + 10) {
718 sgpnt[count].address = NULL;
719 } else {
720 sgpnt[count].address =
721 (char *) scsi_malloc(sgpnt[count].length);
722 }
723
724
725
726
727
728 if(sgpnt[count].address == NULL){
729 #if 0
730 printk("Warning: Running low on SCSI DMA buffers");
731
732 while(--count >= 0){
733 if(sgpnt[count].alt_address)
734 scsi_free(sgpnt[count].address,
735 sgpnt[count].length);
736 }
737 this_count = SCpnt->request.current_nr_sectors;
738 buff = SCpnt->request.buffer;
739 SCpnt->use_sg = 0;
740 scsi_free(sgpnt, SCpnt->sglist_len);
741 #endif
742 SCpnt->use_sg = count;
743 this_count = counted -= bh->b_size >> 9;
744 break;
745 }
746 }
747
748
749
750
751
752 if(bhp && CONTIGUOUS_BUFFERS(bh,bhp)
753 && CLUSTERABLE_DEVICE(SCpnt)) {
754 char * tmp;
755
756 if (((long) sgpnt[count].address) + sgpnt[count].length +
757 bhp->b_size - 1 > ISA_DMA_THRESHOLD &&
758 (SCpnt->host->unchecked_isa_dma) &&
759 !sgpnt[count].alt_address) continue;
760
761 if(!sgpnt[count].alt_address) {count--; continue; }
762 if(dma_free_sectors > 10)
763 tmp = (char *) scsi_malloc(sgpnt[count].length
764 + bhp->b_size);
765 else {
766 tmp = NULL;
767 max_sg = SCpnt->use_sg;
768 }
769 if(tmp){
770 scsi_free(sgpnt[count].address, sgpnt[count].length);
771 sgpnt[count].address = tmp;
772 count--;
773 continue;
774 }
775
776
777
778
779
780 if (SCpnt->use_sg < max_sg) SCpnt->use_sg++;
781 }
782 }
783
784
785 this_count = counted;
786
787 if(count < SCpnt->use_sg || SCpnt->use_sg
788 > SCpnt->host->sg_tablesize){
789 bh = SCpnt->request.bh;
790 printk("Use sg, count %d %x %d\n",
791 SCpnt->use_sg, count, dma_free_sectors);
792 printk("maxsg = %x, counted = %d this_count = %d\n",
793 max_sg, counted, this_count);
794 while(bh){
795 printk("[%p %lx] ", bh->b_data, bh->b_size);
796 bh = bh->b_reqnext;
797 }
798 if(SCpnt->use_sg < 16)
799 for(count=0; count<SCpnt->use_sg; count++)
800 printk("{%d:%p %p %d} ", count,
801 sgpnt[count].address,
802 sgpnt[count].alt_address,
803 sgpnt[count].length);
804 panic("Ooops");
805 }
806
807 if (SCpnt->request.cmd == WRITE)
808 for(count=0; count<SCpnt->use_sg; count++)
809 if(sgpnt[count].alt_address)
810 memcpy(sgpnt[count].address, sgpnt[count].alt_address,
811 sgpnt[count].length);
812 }
813 }
814
815
816
817 if(SCpnt->use_sg == 0){
818 if (((long) buff) + (this_count << 9) - 1 > ISA_DMA_THRESHOLD &&
819 (SCpnt->host->unchecked_isa_dma)) {
820 if(bounce_buffer)
821 buff = bounce_buffer;
822 else
823 buff = (char *) scsi_malloc(this_count << 9);
824 if(buff == NULL) {
825 this_count = SCpnt->request.current_nr_sectors;
826 buff = (char *) scsi_malloc(this_count << 9);
827 if(!buff) panic("Ran out of DMA buffers.");
828 }
829 if (SCpnt->request.cmd == WRITE)
830 memcpy(buff, (char *)SCpnt->request.buffer, this_count << 9);
831 }
832 }
833 #ifdef DEBUG
834 printk("sd%c : %s %d/%d 512 byte blocks.\n",
835 'a' + devm,
836 (SCpnt->request.cmd == WRITE) ? "writing" : "reading",
837 this_count, SCpnt->request.nr_sectors);
838 #endif
839
840 cmd[1] = (SCpnt->lun << 5) & 0xe0;
841
842 if (rscsi_disks[dev].sector_size == 1024){
843 if(block & 1) panic("sd.c:Bad block number requested");
844 if(this_count & 1) panic("sd.c:Bad block number requested");
845 block = block >> 1;
846 this_count = this_count >> 1;
847 }
848
849 if (rscsi_disks[dev].sector_size == 256){
850 block = block << 1;
851 this_count = this_count << 1;
852 }
853
854 if (((this_count > 0xff) || (block > 0x1fffff)) && rscsi_disks[dev].ten)
855 {
856 if (this_count > 0xffff)
857 this_count = 0xffff;
858
859 cmd[0] += READ_10 - READ_6 ;
860 cmd[2] = (unsigned char) (block >> 24) & 0xff;
861 cmd[3] = (unsigned char) (block >> 16) & 0xff;
862 cmd[4] = (unsigned char) (block >> 8) & 0xff;
863 cmd[5] = (unsigned char) block & 0xff;
864 cmd[6] = cmd[9] = 0;
865 cmd[7] = (unsigned char) (this_count >> 8) & 0xff;
866 cmd[8] = (unsigned char) this_count & 0xff;
867 }
868 else
869 {
870 if (this_count > 0xff)
871 this_count = 0xff;
872
873 cmd[1] |= (unsigned char) ((block >> 16) & 0x1f);
874 cmd[2] = (unsigned char) ((block >> 8) & 0xff);
875 cmd[3] = (unsigned char) block & 0xff;
876 cmd[4] = (unsigned char) this_count;
877 cmd[5] = 0;
878 }
879
880
881
882
883
884
885
886 SCpnt->transfersize = rscsi_disks[dev].sector_size;
887 SCpnt->underflow = this_count << 9;
888 scsi_do_cmd (SCpnt, (void *) cmd, buff,
889 this_count * rscsi_disks[dev].sector_size,
890 rw_intr,
891 (SCpnt->device->type == TYPE_DISK ?
892 SD_TIMEOUT : SD_MOD_TIMEOUT),
893 MAX_RETRIES);
894 }
895
896 static int check_scsidisk_media_change(kdev_t full_dev){
897 int retval;
898 int target;
899 struct inode inode;
900 int flag = 0;
901
902 target = DEVICE_NR(full_dev);
903
904 if (target >= sd_template.dev_max ||
905 !rscsi_disks[target].device) {
906 printk("SCSI disk request error: invalid device.\n");
907 return 0;
908 }
909
910 if(!rscsi_disks[target].device->removable) return 0;
911
912 inode.i_rdev = full_dev;
913 retval = sd_ioctl(&inode, NULL, SCSI_IOCTL_TEST_UNIT_READY, 0);
914
915 if(retval){
916
917
918
919
920 rscsi_disks[target].ready = 0;
921 rscsi_disks[target].device->changed = 1;
922 return 1;
923
924 }
925
926
927
928
929
930
931
932 rscsi_disks[target].ready = 1;
933
934 retval = rscsi_disks[target].device->changed;
935 if(!flag) rscsi_disks[target].device->changed = 0;
936 return retval;
937 }
938
939 static void sd_init_done (Scsi_Cmnd * SCpnt)
940 {
941 struct request * req;
942
943 req = &SCpnt->request;
944 req->rq_status = RQ_SCSI_DONE;
945
946 if (req->sem != NULL) {
947 up(req->sem);
948 }
949 }
950
951 static int sd_init_onedisk(int i)
952 {
953 unsigned char cmd[10];
954 unsigned char *buffer;
955 unsigned long spintime;
956 int the_result, retries;
957 Scsi_Cmnd * SCpnt;
958
959
960
961
962
963
964 SCpnt = allocate_device(NULL, rscsi_disks[i].device, 1);
965 buffer = (unsigned char *) scsi_malloc(512);
966
967 spintime = 0;
968
969
970 if (!MODULE_FLAG){
971 do{
972 retries = 0;
973 while(retries < 3)
974 {
975 cmd[0] = TEST_UNIT_READY;
976 cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0;
977 memset ((void *) &cmd[2], 0, 8);
978 SCpnt->cmd_len = 0;
979 SCpnt->sense_buffer[0] = 0;
980 SCpnt->sense_buffer[2] = 0;
981
982 {
983 struct semaphore sem = MUTEX_LOCKED;
984
985 SCpnt->request.rq_status = RQ_SCSI_BUSY;
986 SCpnt->request.sem = &sem;
987 scsi_do_cmd (SCpnt,
988 (void *) cmd, (void *) buffer,
989 512, sd_init_done, SD_TIMEOUT,
990 MAX_RETRIES);
991 down(&sem);
992 }
993
994 the_result = SCpnt->result;
995 retries++;
996 if( the_result == 0
997 || SCpnt->sense_buffer[2] != UNIT_ATTENTION)
998 break;
999 }
1000
1001
1002
1003 if(the_result && !rscsi_disks[i].device->removable &&
1004 SCpnt->sense_buffer[2] == NOT_READY) {
1005 int time1;
1006 if(!spintime){
1007 printk( "sd%c: Spinning up disk...", 'a' + i );
1008 cmd[0] = START_STOP;
1009 cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0;
1010 cmd[1] |= 1;
1011 memset ((void *) &cmd[2], 0, 8);
1012 cmd[4] = 1;
1013 SCpnt->cmd_len = 0;
1014 SCpnt->sense_buffer[0] = 0;
1015 SCpnt->sense_buffer[2] = 0;
1016
1017 {
1018 struct semaphore sem = MUTEX_LOCKED;
1019
1020 SCpnt->request.rq_status = RQ_SCSI_BUSY;
1021 SCpnt->request.sem = &sem;
1022 scsi_do_cmd (SCpnt,
1023 (void *) cmd, (void *) buffer,
1024 512, sd_init_done, SD_TIMEOUT,
1025 MAX_RETRIES);
1026 down(&sem);
1027 }
1028
1029 spintime = jiffies;
1030 }
1031
1032 time1 = jiffies;
1033 while(jiffies < time1 + HZ);
1034 printk( "." );
1035 }
1036 } while(the_result && spintime && spintime+100*HZ > jiffies);
1037 if (spintime) {
1038 if (the_result)
1039 printk( "not responding...\n" );
1040 else
1041 printk( "ready\n" );
1042 }
1043 }
1044
1045 retries = 3;
1046 do {
1047 cmd[0] = READ_CAPACITY;
1048 cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0;
1049 memset ((void *) &cmd[2], 0, 8);
1050 memset ((void *) buffer, 0, 8);
1051 SCpnt->cmd_len = 0;
1052 SCpnt->sense_buffer[0] = 0;
1053 SCpnt->sense_buffer[2] = 0;
1054
1055 {
1056 struct semaphore sem = MUTEX_LOCKED;
1057
1058 SCpnt->request.rq_status = RQ_SCSI_BUSY;
1059 SCpnt->request.sem = &sem;
1060 scsi_do_cmd (SCpnt,
1061 (void *) cmd, (void *) buffer,
1062 8, sd_init_done, SD_TIMEOUT,
1063 MAX_RETRIES);
1064 down(&sem);
1065 }
1066
1067 the_result = SCpnt->result;
1068 retries--;
1069
1070 } while(the_result && retries);
1071
1072 SCpnt->request.rq_status = RQ_INACTIVE;
1073
1074 wake_up(&SCpnt->device->device_wait);
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092 if (the_result)
1093 {
1094 printk ("sd%c : READ CAPACITY failed.\n"
1095 "sd%c : status = %x, message = %02x, host = %d, driver = %02x \n",
1096 'a' + i, 'a' + i,
1097 status_byte(the_result),
1098 msg_byte(the_result),
1099 host_byte(the_result),
1100 driver_byte(the_result)
1101 );
1102 if (driver_byte(the_result) & DRIVER_SENSE)
1103 printk("sd%c : extended sense code = %1x \n",
1104 'a' + i, SCpnt->sense_buffer[2] & 0xf);
1105 else
1106 printk("sd%c : sense not available. \n", 'a' + i);
1107
1108 printk("sd%c : block size assumed to be 512 bytes, disk size 1GB. \n",
1109 'a' + i);
1110 rscsi_disks[i].capacity = 0x1fffff;
1111 rscsi_disks[i].sector_size = 512;
1112
1113
1114
1115 if(rscsi_disks[i].device->removable &&
1116 SCpnt->sense_buffer[2] == NOT_READY)
1117 rscsi_disks[i].device->changed = 1;
1118
1119 }
1120 else
1121 {
1122
1123
1124
1125 rscsi_disks[i].ready = 1;
1126
1127 rscsi_disks[i].capacity = 1 + ((buffer[0] << 24) |
1128 (buffer[1] << 16) |
1129 (buffer[2] << 8) |
1130 buffer[3]);
1131
1132 rscsi_disks[i].sector_size = (buffer[4] << 24) |
1133 (buffer[5] << 16) | (buffer[6] << 8) | buffer[7];
1134
1135 if (rscsi_disks[i].sector_size == 0) {
1136 rscsi_disks[i].sector_size = 512;
1137 printk("sd%c : sector size 0 reported, assuming 512.\n", 'a' + i);
1138 }
1139
1140
1141 if (rscsi_disks[i].sector_size != 512 &&
1142 rscsi_disks[i].sector_size != 1024 &&
1143 rscsi_disks[i].sector_size != 256)
1144 {
1145 printk ("sd%c : unsupported sector size %d.\n",
1146 'a' + i, rscsi_disks[i].sector_size);
1147 if(rscsi_disks[i].device->removable){
1148 rscsi_disks[i].capacity = 0;
1149 } else {
1150 printk ("scsi : deleting disk entry.\n");
1151 rscsi_disks[i].device = NULL;
1152 sd_template.nr_dev--;
1153 return i;
1154 }
1155 }
1156 {
1157
1158
1159
1160
1161
1162 int m, mb;
1163 int sz_quot, sz_rem;
1164 int hard_sector = rscsi_disks[i].sector_size;
1165
1166 for (m=i<<4; m<((i+1)<<4); m++){
1167 sd_hardsizes[m] = hard_sector;
1168 }
1169 mb = rscsi_disks[i].capacity / 1024 * hard_sector / 1024;
1170
1171 m = (mb + 50) / 100;
1172 sz_quot = m / 10;
1173 sz_rem = m - (10 * sz_quot);
1174 printk ("SCSI device sd%c: hdwr sector= %d bytes."
1175 " Sectors= %d [%d MB] [%d.%1d GB]\n",
1176 i+'a', hard_sector, rscsi_disks[i].capacity,
1177 mb, sz_quot, sz_rem);
1178 }
1179 if(rscsi_disks[i].sector_size == 1024)
1180 rscsi_disks[i].capacity <<= 1;
1181 if(rscsi_disks[i].sector_size == 256)
1182 rscsi_disks[i].capacity >>= 1;
1183 }
1184
1185
1186
1187
1188
1189 rscsi_disks[i].write_prot = 0;
1190 if ( rscsi_disks[i].device->removable && rscsi_disks[i].ready ) {
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200 memset ((void *) &cmd[0], 0, 8);
1201 cmd[0] = MODE_SENSE;
1202 cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0;
1203 cmd[2] = 1;
1204 cmd[4] = 12;
1205 SCpnt->cmd_len = 0;
1206 SCpnt->sense_buffer[0] = 0;
1207 SCpnt->sense_buffer[2] = 0;
1208
1209
1210 {
1211 struct semaphore sem = MUTEX_LOCKED;
1212 SCpnt->request.rq_status = RQ_SCSI_BUSY;
1213 SCpnt->request.sem = &sem;
1214 scsi_do_cmd (SCpnt,
1215 (void *) cmd, (void *) buffer,
1216 512, sd_init_done, SD_TIMEOUT,
1217 MAX_RETRIES);
1218 down(&sem);
1219 }
1220
1221 the_result = SCpnt->result;
1222 SCpnt->request.rq_status = RQ_INACTIVE;
1223 wake_up(&SCpnt->device->device_wait);
1224
1225 if ( the_result ) {
1226 printk ("sd%c: test WP failed, assume Write Protected\n",i+'a');
1227 rscsi_disks[i].write_prot = 1;
1228 } else {
1229 rscsi_disks[i].write_prot = ((buffer[2] & 0x80) != 0);
1230 printk ("sd%c: Write Protect is %s\n",i+'a',
1231 rscsi_disks[i].write_prot ? "on" : "off");
1232 }
1233
1234 }
1235
1236 rscsi_disks[i].ten = 1;
1237 rscsi_disks[i].remap = 1;
1238 scsi_free(buffer, 512);
1239 return i;
1240 }
1241
1242
1243
1244
1245
1246
1247 static int sd_registered = 0;
1248
1249 static int sd_init()
1250 {
1251 int i;
1252
1253 if (sd_template.dev_noticed == 0) return 0;
1254
1255 if(!sd_registered) {
1256 if (register_blkdev(MAJOR_NR,"sd",&sd_fops)) {
1257 printk("Unable to get major %d for SCSI disk\n",MAJOR_NR);
1258 return 1;
1259 }
1260 sd_registered++;
1261 }
1262
1263
1264 if(rscsi_disks) return 0;
1265
1266 sd_template.dev_max = sd_template.dev_noticed + SD_EXTRA_DEVS;
1267
1268 rscsi_disks = (Scsi_Disk *)
1269 scsi_init_malloc(sd_template.dev_max * sizeof(Scsi_Disk), GFP_ATOMIC);
1270 memset(rscsi_disks, 0, sd_template.dev_max * sizeof(Scsi_Disk));
1271
1272 sd_sizes = (int *) scsi_init_malloc((sd_template.dev_max << 4) *
1273 sizeof(int), GFP_ATOMIC);
1274 memset(sd_sizes, 0, (sd_template.dev_max << 4) * sizeof(int));
1275
1276 sd_blocksizes = (int *) scsi_init_malloc((sd_template.dev_max << 4) *
1277 sizeof(int), GFP_ATOMIC);
1278
1279 sd_hardsizes = (int *) scsi_init_malloc((sd_template.dev_max << 4) *
1280 sizeof(int), GFP_ATOMIC);
1281
1282 for(i=0;i<(sd_template.dev_max << 4);i++){
1283 sd_blocksizes[i] = 1024;
1284 sd_hardsizes[i] = 512;
1285 }
1286 blksize_size[MAJOR_NR] = sd_blocksizes;
1287 hardsect_size[MAJOR_NR] = sd_hardsizes;
1288 sd = (struct hd_struct *) scsi_init_malloc((sd_template.dev_max << 4) *
1289 sizeof(struct hd_struct),
1290 GFP_ATOMIC);
1291
1292
1293 sd_gendisk.max_nr = sd_template.dev_max;
1294 sd_gendisk.part = sd;
1295 sd_gendisk.sizes = sd_sizes;
1296 sd_gendisk.real_devices = (void *) rscsi_disks;
1297 return 0;
1298 }
1299
1300 static void sd_finish()
1301 {
1302 int i;
1303
1304 blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST;
1305
1306 sd_gendisk.next = gendisk_head;
1307 gendisk_head = &sd_gendisk;
1308
1309 for (i = 0; i < sd_template.dev_max; ++i)
1310 if (!rscsi_disks[i].capacity &&
1311 rscsi_disks[i].device)
1312 {
1313 if (MODULE_FLAG
1314 && !rscsi_disks[i].has_part_table) {
1315 sd_sizes[i << 4] = rscsi_disks[i].capacity;
1316
1317 revalidate_scsidisk(MKDEV(MAJOR_NR, i << 4), 0);
1318 }
1319 else
1320 i=sd_init_onedisk(i);
1321 rscsi_disks[i].has_part_table = 1;
1322 }
1323
1324
1325
1326
1327
1328 if(rscsi_disks[0].device && rscsi_disks[0].device->host->sg_tablesize)
1329 read_ahead[MAJOR_NR] = 120;
1330 else
1331 read_ahead[MAJOR_NR] = 4;
1332
1333 return;
1334 }
1335
1336 static int sd_detect(Scsi_Device * SDp){
1337 if(SDp->type != TYPE_DISK && SDp->type != TYPE_MOD) return 0;
1338
1339 printk("Detected scsi disk sd%c at scsi%d, channel %d, id %d, lun %d\n",
1340 'a'+ (sd_template.dev_noticed++),
1341 SDp->host->host_no, SDp->channel, SDp->id, SDp->lun);
1342
1343 return 1;
1344 }
1345
1346 static int sd_attach(Scsi_Device * SDp){
1347 Scsi_Disk * dpnt;
1348 int i;
1349
1350 if(SDp->type != TYPE_DISK && SDp->type != TYPE_MOD) return 0;
1351
1352 if(sd_template.nr_dev >= sd_template.dev_max) {
1353 SDp->attached--;
1354 return 1;
1355 }
1356
1357 for(dpnt = rscsi_disks, i=0; i<sd_template.dev_max; i++, dpnt++)
1358 if(!dpnt->device) break;
1359
1360 if(i >= sd_template.dev_max) panic ("scsi_devices corrupt (sd)");
1361
1362 SDp->scsi_request_fn = do_sd_request;
1363 rscsi_disks[i].device = SDp;
1364 rscsi_disks[i].has_part_table = 0;
1365 sd_template.nr_dev++;
1366 sd_gendisk.nr_real++;
1367 return 0;
1368 }
1369
1370 #define DEVICE_BUSY rscsi_disks[target].device->busy
1371 #define USAGE rscsi_disks[target].device->access_count
1372 #define CAPACITY rscsi_disks[target].capacity
1373 #define MAYBE_REINIT sd_init_onedisk(target)
1374 #define GENDISK_STRUCT sd_gendisk
1375
1376
1377
1378
1379
1380
1381
1382
1383 int revalidate_scsidisk(kdev_t dev, int maxusage){
1384 int target;
1385 struct gendisk * gdev;
1386 unsigned long flags;
1387 int max_p;
1388 int start;
1389 int i;
1390
1391 target = DEVICE_NR(dev);
1392 gdev = &GENDISK_STRUCT;
1393
1394 save_flags(flags);
1395 cli();
1396 if (DEVICE_BUSY || USAGE > maxusage) {
1397 restore_flags(flags);
1398 printk("Device busy for revalidation (usage=%d)\n", USAGE);
1399 return -EBUSY;
1400 }
1401 DEVICE_BUSY = 1;
1402 restore_flags(flags);
1403
1404 max_p = gdev->max_p;
1405 start = target << gdev->minor_shift;
1406
1407 for (i=max_p - 1; i >=0 ; i--) {
1408 int minor = start+i;
1409 kdev_t devi = MKDEV(MAJOR_NR, minor);
1410 sync_dev(devi);
1411 invalidate_inodes(devi);
1412 invalidate_buffers(devi);
1413 gdev->part[minor].start_sect = 0;
1414 gdev->part[minor].nr_sects = 0;
1415
1416
1417
1418
1419 blksize_size[MAJOR_NR][minor] = 1024;
1420 }
1421
1422 #ifdef MAYBE_REINIT
1423 MAYBE_REINIT;
1424 #endif
1425
1426 gdev->part[start].nr_sects = CAPACITY;
1427 resetup_one_dev(gdev, target);
1428
1429 DEVICE_BUSY = 0;
1430 return 0;
1431 }
1432
1433 static int fop_revalidate_scsidisk(kdev_t dev){
1434 return revalidate_scsidisk(dev, 0);
1435 }
1436
1437
1438 static void sd_detach(Scsi_Device * SDp)
1439 {
1440 Scsi_Disk * dpnt;
1441 int i;
1442 int max_p;
1443 int start;
1444
1445 for(dpnt = rscsi_disks, i=0; i<sd_template.dev_max; i++, dpnt++)
1446 if(dpnt->device == SDp) {
1447
1448
1449
1450 max_p = sd_gendisk.max_p;
1451 start = i << sd_gendisk.minor_shift;
1452
1453 for (i=max_p - 1; i >=0 ; i--) {
1454 int minor = start+i;
1455 kdev_t devi = MKDEV(MAJOR_NR, minor);
1456 sync_dev(devi);
1457 invalidate_inodes(devi);
1458 invalidate_buffers(devi);
1459 sd_gendisk.part[minor].start_sect = 0;
1460 sd_gendisk.part[minor].nr_sects = 0;
1461 sd_sizes[minor] = 0;
1462 }
1463
1464 dpnt->has_part_table = 0;
1465 dpnt->device = NULL;
1466 dpnt->capacity = 0;
1467 SDp->attached--;
1468 sd_template.dev_noticed--;
1469 sd_template.nr_dev--;
1470 sd_gendisk.nr_real--;
1471 return;
1472 }
1473 return;
1474 }
1475
1476 #ifdef MODULE
1477
1478 int init_module(void) {
1479 sd_template.usage_count = &mod_use_count_;
1480 return scsi_register_module(MODULE_SCSI_DEV, &sd_template);
1481 }
1482
1483 void cleanup_module( void)
1484 {
1485 struct gendisk * prev_sdgd;
1486 struct gendisk * sdgd;
1487
1488 scsi_unregister_module(MODULE_SCSI_DEV, &sd_template);
1489 unregister_blkdev(SCSI_DISK_MAJOR, "sd");
1490 sd_registered--;
1491 if( rscsi_disks != NULL )
1492 {
1493 scsi_init_free((char *) rscsi_disks,
1494 (sd_template.dev_noticed + SD_EXTRA_DEVS)
1495 * sizeof(Scsi_Disk));
1496
1497 scsi_init_free((char *) sd_sizes, sd_template.dev_max * sizeof(int));
1498 scsi_init_free((char *) sd_blocksizes, sd_template.dev_max * sizeof(int));
1499 scsi_init_free((char *) sd_hardsizes, sd_template.dev_max * sizeof(int));
1500 scsi_init_free((char *) sd,
1501 (sd_template.dev_max << 4) * sizeof(struct hd_struct));
1502
1503
1504
1505 sdgd = gendisk_head;
1506 prev_sdgd = NULL;
1507 while(sdgd != &sd_gendisk)
1508 {
1509 prev_sdgd = sdgd;
1510 sdgd = sdgd->next;
1511 }
1512
1513 if(sdgd != &sd_gendisk)
1514 printk("sd_gendisk not in disk chain.\n");
1515 else {
1516 if(prev_sdgd != NULL)
1517 prev_sdgd->next = sdgd->next;
1518 else
1519 gendisk_head = sdgd->next;
1520 }
1521 }
1522
1523 blksize_size[MAJOR_NR] = NULL;
1524 blk_dev[MAJOR_NR].request_fn = NULL;
1525 blk_size[MAJOR_NR] = NULL;
1526 hardsect_size[MAJOR_NR] = NULL;
1527 read_ahead[MAJOR_NR] = 0;
1528 sd_template.dev_max = 0;
1529 }
1530 #endif
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549