This source file includes following definitions.
- sd_open
- sd_release
- sd_geninit
- rw_intr
- do_sd_request
- requeue_sd_request
- check_scsidisk_media_change
- sd_init_done
- sd_init_onedisk
- sd_init
- sd_finish
- sd_detect
- sd_attach
- revalidate_scsidisk
- fop_revalidate_scsidisk
- sd_detach
- init_module
- cleanup_module
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19 #ifdef MODULE
20 #include <linux/autoconf.h>
21 #include <linux/module.h>
22 #include <linux/version.h>
23
24
25
26
27
28 #define MODULE_FLAG 1
29 #else
30 #define MODULE_FLAG scsi_loadable_module_flag
31 #endif
32
33 #include <linux/fs.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/mm.h>
37 #include <linux/string.h>
38 #include <linux/errno.h>
39 #include <asm/system.h>
40
41 #define MAJOR_NR SCSI_DISK_MAJOR
42 #include "../block/blk.h"
43 #include "scsi.h"
44 #include "hosts.h"
45 #include "sd.h"
46 #include "scsi_ioctl.h"
47 #include "constants.h"
48
49 #include <linux/genhd.h>
50
51
52
53
54
55 #define MAX_RETRIES 5
56
57
58
59
60
61 #define SD_TIMEOUT (7 * HZ)
62 #define SD_MOD_TIMEOUT (8 * HZ)
63
64 #define CLUSTERABLE_DEVICE(SC) (SC->host->use_clustering && \
65 SC->device->type != TYPE_MOD)
66
67 struct hd_struct * sd;
68
69 Scsi_Disk * rscsi_disks = NULL;
70 static int * sd_sizes;
71 static int * sd_blocksizes;
72 static int * sd_hardsizes;
73
74 extern int sd_ioctl(struct inode *, struct file *, unsigned int, unsigned long);
75
76 static int check_scsidisk_media_change(kdev_t);
77 static int fop_revalidate_scsidisk(kdev_t);
78
79 static sd_init_onedisk(int);
80
81 static void requeue_sd_request (Scsi_Cmnd * SCpnt);
82
83 static int sd_init(void);
84 static void sd_finish(void);
85 static int sd_attach(Scsi_Device *);
86 static int sd_detect(Scsi_Device *);
87 static void sd_detach(Scsi_Device *);
88
89 struct Scsi_Device_Template sd_template =
90 { NULL, "disk", "sd", NULL, TYPE_DISK,
91 SCSI_DISK_MAJOR, 0, 0, 0, 1,
92 sd_detect, sd_init,
93 sd_finish, sd_attach, sd_detach
94 };
95
96 static int sd_open(struct inode * inode, struct file * filp)
97 {
98 int target;
99 target = DEVICE_NR(inode->i_rdev);
100
101 if(target >= sd_template.dev_max || !rscsi_disks[target].device)
102 return -ENXIO;
103
104
105
106
107
108
109
110 while (rscsi_disks[target].device->busy)
111 barrier();
112 if(rscsi_disks[target].device->removable) {
113 check_disk_change(inode->i_rdev);
114
115 if(!rscsi_disks[target].device->access_count)
116 sd_ioctl(inode, NULL, SCSI_IOCTL_DOORLOCK, 0);
117 };
118
119
120
121
122
123 if(sd_sizes[MINOR(inode->i_rdev)] == 0)
124 return -ENXIO;
125
126 rscsi_disks[target].device->access_count++;
127 if (rscsi_disks[target].device->host->hostt->usage_count)
128 (*rscsi_disks[target].device->host->hostt->usage_count)++;
129 if(sd_template.usage_count) (*sd_template.usage_count)++;
130 return 0;
131 }
132
133 static void sd_release(struct inode * inode, struct file * file)
134 {
135 int target;
136 sync_dev(inode->i_rdev);
137
138 target = DEVICE_NR(inode->i_rdev);
139
140 rscsi_disks[target].device->access_count--;
141 if (rscsi_disks[target].device->host->hostt->usage_count)
142 (*rscsi_disks[target].device->host->hostt->usage_count)--;
143 if(sd_template.usage_count) (*sd_template.usage_count)--;
144
145 if(rscsi_disks[target].device->removable) {
146 if(!rscsi_disks[target].device->access_count)
147 sd_ioctl(inode, NULL, SCSI_IOCTL_DOORUNLOCK, 0);
148 }
149 }
150
151 static void sd_geninit(struct gendisk *);
152
153 static struct file_operations sd_fops = {
154 NULL,
155 block_read,
156 block_write,
157 NULL,
158 NULL,
159 sd_ioctl,
160 NULL,
161 sd_open,
162 sd_release,
163 block_fsync,
164 NULL,
165 check_scsidisk_media_change,
166 fop_revalidate_scsidisk
167 };
168
169 static struct gendisk sd_gendisk = {
170 MAJOR_NR,
171 "sd",
172 4,
173 1 << 4,
174 0,
175 sd_geninit,
176 NULL,
177 NULL,
178 0,
179 NULL,
180 NULL
181 };
182
183 static void sd_geninit (struct gendisk *ignored)
184 {
185 int i;
186
187 for (i = 0; i < sd_template.dev_max; ++i)
188 if(rscsi_disks[i].device)
189 sd[i << 4].nr_sects = rscsi_disks[i].capacity;
190 #if 0
191
192 sd_gendisk.nr_real = sd_template.dev_max;
193 #endif
194 }
195
196
197
198
199
200
201
202 static void rw_intr (Scsi_Cmnd *SCpnt)
203 {
204 int result = SCpnt->result;
205 int this_count = SCpnt->bufflen >> 9;
206
207 #ifdef DEBUG
208 printk("sd%c : rw_intr(%d, %d)\n", 'a' + MINOR(SCpnt->request.rq_dev),
209 SCpnt->host->host_no, result);
210 #endif
211
212
213
214
215
216
217
218 if (!result) {
219
220 #ifdef DEBUG
221 printk("sd%c : %d sectors remain.\n", 'a' + MINOR(SCpnt->request.rq_dev),
222 SCpnt->request.nr_sectors);
223 printk("use_sg is %d\n ",SCpnt->use_sg);
224 #endif
225 if (SCpnt->use_sg) {
226 struct scatterlist * sgpnt;
227 int i;
228 sgpnt = (struct scatterlist *) SCpnt->buffer;
229 for(i=0; i<SCpnt->use_sg; i++) {
230 #ifdef DEBUG
231 printk(":%x %x %d\n",sgpnt[i].alt_address, sgpnt[i].address,
232 sgpnt[i].length);
233 #endif
234 if (sgpnt[i].alt_address) {
235 if (SCpnt->request.cmd == READ)
236 memcpy(sgpnt[i].alt_address, sgpnt[i].address,
237 sgpnt[i].length);
238 scsi_free(sgpnt[i].address, sgpnt[i].length);
239 };
240 };
241
242
243 scsi_free(SCpnt->buffer, SCpnt->sglist_len);
244 } else {
245 if (SCpnt->buffer != SCpnt->request.buffer) {
246 #ifdef DEBUG
247 printk("nosg: %x %x %d\n",SCpnt->request.buffer, SCpnt->buffer,
248 SCpnt->bufflen);
249 #endif
250 if (SCpnt->request.cmd == READ)
251 memcpy(SCpnt->request.buffer, SCpnt->buffer,
252 SCpnt->bufflen);
253 scsi_free(SCpnt->buffer, SCpnt->bufflen);
254 };
255 };
256
257
258
259
260
261 if (SCpnt->request.nr_sectors > this_count)
262 {
263 SCpnt->request.errors = 0;
264
265 if (!SCpnt->request.bh)
266 {
267 #ifdef DEBUG
268 printk("sd%c : handling page request, no buffer\n",
269 'a' + MINOR(SCpnt->request.rq_dev));
270 #endif
271
272
273
274
275 panic("sd.c: linked page request (%lx %x)",
276 SCpnt->request.sector, this_count);
277 }
278 }
279 SCpnt = end_scsi_request(SCpnt, 1, this_count);
280 requeue_sd_request(SCpnt);
281 return;
282 }
283
284
285 if (SCpnt->use_sg) {
286 struct scatterlist * sgpnt;
287 int i;
288 sgpnt = (struct scatterlist *) SCpnt->buffer;
289 for(i=0; i<SCpnt->use_sg; i++) {
290 #ifdef DEBUG
291 printk("err: %x %x %d\n",SCpnt->request.buffer, SCpnt->buffer,
292 SCpnt->bufflen);
293 #endif
294 if (sgpnt[i].alt_address) {
295 scsi_free(sgpnt[i].address, sgpnt[i].length);
296 };
297 };
298 scsi_free(SCpnt->buffer, SCpnt->sglist_len);
299 } else {
300 #ifdef DEBUG
301 printk("nosgerr: %x %x %d\n",SCpnt->request.buffer, SCpnt->buffer,
302 SCpnt->bufflen);
303 #endif
304 if (SCpnt->buffer != SCpnt->request.buffer)
305 scsi_free(SCpnt->buffer, SCpnt->bufflen);
306 };
307
308
309
310
311
312
313
314 if (driver_byte(result) != 0) {
315 if (suggestion(result) == SUGGEST_REMAP) {
316 #ifdef REMAP
317
318
319
320
321 if rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].remap
322 {
323 result = 0;
324 }
325 else
326 #endif
327 }
328
329 if ((SCpnt->sense_buffer[0] & 0x7f) == 0x70) {
330 if ((SCpnt->sense_buffer[2] & 0xf) == UNIT_ATTENTION) {
331 if(rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].device->removable) {
332
333
334
335 rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].device->changed = 1;
336 SCpnt = end_scsi_request(SCpnt, 0, this_count);
337 requeue_sd_request(SCpnt);
338 return;
339 }
340 else
341 {
342
343
344
345
346
347 requeue_sd_request(SCpnt);
348 return;
349 }
350 }
351 }
352
353
354
355
356
357
358
359
360
361 if (SCpnt->sense_buffer[2] == ILLEGAL_REQUEST) {
362 if (rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].ten) {
363 rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].ten = 0;
364 requeue_sd_request(SCpnt);
365 result = 0;
366 } else {
367
368 }
369 }
370 }
371 if (result) {
372 printk("SCSI disk error : host %d channel %d id %d lun %d return code = %x\n",
373 rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].device->host->host_no,
374 rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].device->channel,
375 rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].device->id,
376 rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].device->lun, result);
377
378 if (driver_byte(result) & DRIVER_SENSE)
379 print_sense("sd", SCpnt);
380 SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.current_nr_sectors);
381 requeue_sd_request(SCpnt);
382 return;
383 }
384 }
385
386
387
388
389
390
391
392 static void do_sd_request (void)
393 {
394 Scsi_Cmnd * SCpnt = NULL;
395 Scsi_Device * SDev;
396 struct request * req = NULL;
397 unsigned long flags;
398 int flag = 0;
399
400 save_flags(flags);
401 while (1==1){
402 cli();
403 if (CURRENT != NULL && CURRENT->rq_status == RQ_INACTIVE) {
404 restore_flags(flags);
405 return;
406 };
407
408 INIT_SCSI_REQUEST;
409 SDev = rscsi_disks[DEVICE_NR(CURRENT->rq_dev)].device;
410
411
412
413
414
415
416 if( SDev->was_reset )
417 {
418
419
420
421
422
423
424 if( SDev->removable && !intr_count )
425 {
426 scsi_ioctl(SDev, SCSI_IOCTL_DOORLOCK, 0);
427 }
428 SDev->was_reset = 0;
429 }
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444 if (flag++ == 0)
445 SCpnt = allocate_device(&CURRENT,
446 rscsi_disks[DEVICE_NR(CURRENT->rq_dev)].device, 0);
447 else SCpnt = NULL;
448
449
450
451
452
453
454 restore_flags(flags);
455
456
457
458
459
460
461
462
463
464
465 if (!SCpnt && sd_template.nr_dev > 1){
466 struct request *req1;
467 req1 = NULL;
468 cli();
469 req = CURRENT;
470 while(req){
471 SCpnt = request_queueable(req, rscsi_disks[DEVICE_NR(req->rq_dev)].device);
472 if(SCpnt) break;
473 req1 = req;
474 req = req->next;
475 };
476 if (SCpnt && req->rq_status == RQ_INACTIVE) {
477 if (req == CURRENT)
478 CURRENT = CURRENT->next;
479 else
480 req1->next = req->next;
481 };
482 restore_flags(flags);
483 };
484
485 if (!SCpnt) return;
486
487
488 requeue_sd_request(SCpnt);
489 };
490 }
491
492 static void requeue_sd_request (Scsi_Cmnd * SCpnt)
493 {
494 int dev, devm, block, this_count;
495 unsigned char cmd[10];
496 int bounce_size, contiguous;
497 int max_sg;
498 struct buffer_head * bh, *bhp;
499 char * buff, *bounce_buffer;
500
501 repeat:
502
503 if(!SCpnt || SCpnt->request.rq_status == RQ_INACTIVE) {
504 do_sd_request();
505 return;
506 }
507
508 devm = MINOR(SCpnt->request.rq_dev);
509 dev = DEVICE_NR(SCpnt->request.rq_dev);
510
511 block = SCpnt->request.sector;
512 this_count = 0;
513
514 #ifdef DEBUG
515 printk("Doing sd request, dev = %d, block = %d\n", devm, block);
516 #endif
517
518 if (devm >= (sd_template.dev_max << 4) ||
519 !rscsi_disks[dev].device ||
520 block + SCpnt->request.nr_sectors > sd[devm].nr_sects)
521 {
522 SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
523 goto repeat;
524 }
525
526 block += sd[devm].start_sect;
527
528 if (rscsi_disks[dev].device->changed)
529 {
530
531
532
533
534
535 SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
536 goto repeat;
537 }
538
539 #ifdef DEBUG
540 printk("sd%c : real dev = /dev/sd%c, block = %d\n",
541 'a' + devm, dev, block);
542 #endif
543
544
545
546
547
548
549
550
551
552
553
554
555 if (rscsi_disks[dev].sector_size == 1024)
556 if((block & 1) || (SCpnt->request.nr_sectors & 1)) {
557 printk("sd.c:Bad block number requested");
558 SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
559 goto repeat;
560 }
561
562 switch (SCpnt->request.cmd)
563 {
564 case WRITE :
565 if (!rscsi_disks[dev].device->writeable)
566 {
567 SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
568 goto repeat;
569 }
570 cmd[0] = WRITE_6;
571 break;
572 case READ :
573 cmd[0] = READ_6;
574 break;
575 default :
576 panic ("Unknown sd command %d\n", SCpnt->request.cmd);
577 }
578
579 SCpnt->this_count = 0;
580
581
582
583
584 contiguous = (!CLUSTERABLE_DEVICE(SCpnt) ? 0 :1);
585 bounce_buffer = NULL;
586 bounce_size = (SCpnt->request.nr_sectors << 9);
587
588
589
590
591
592 if (contiguous && SCpnt->request.bh &&
593 ((long) SCpnt->request.bh->b_data)
594 + (SCpnt->request.nr_sectors << 9) - 1 > ISA_DMA_THRESHOLD
595 && SCpnt->host->unchecked_isa_dma) {
596 if(((long) SCpnt->request.bh->b_data) > ISA_DMA_THRESHOLD)
597 bounce_buffer = (char *) scsi_malloc(bounce_size);
598 if(!bounce_buffer) contiguous = 0;
599 };
600
601 if(contiguous && SCpnt->request.bh && SCpnt->request.bh->b_reqnext)
602 for(bh = SCpnt->request.bh, bhp = bh->b_reqnext; bhp; bh = bhp,
603 bhp = bhp->b_reqnext) {
604 if(!CONTIGUOUS_BUFFERS(bh,bhp)) {
605 if(bounce_buffer) scsi_free(bounce_buffer, bounce_size);
606 contiguous = 0;
607 break;
608 }
609 };
610 if (!SCpnt->request.bh || contiguous) {
611
612
613 this_count = SCpnt->request.nr_sectors;
614 buff = SCpnt->request.buffer;
615 SCpnt->use_sg = 0;
616
617 } else if (SCpnt->host->sg_tablesize == 0 ||
618 (need_isa_buffer && dma_free_sectors <= 10)) {
619
620
621
622
623
624
625
626
627 if (SCpnt->host->sg_tablesize != 0 &&
628 need_isa_buffer &&
629 dma_free_sectors <= 10)
630 printk("Warning: SCSI DMA buffer space running low. Using non scatter-gather I/O.\n");
631
632 this_count = SCpnt->request.current_nr_sectors;
633 buff = SCpnt->request.buffer;
634 SCpnt->use_sg = 0;
635
636 } else {
637
638
639 struct scatterlist * sgpnt;
640 int count, this_count_max;
641 int counted;
642
643 bh = SCpnt->request.bh;
644 this_count = 0;
645 this_count_max = (rscsi_disks[dev].ten ? 0xffff : 0xff);
646 count = 0;
647 bhp = NULL;
648 while(bh) {
649 if ((this_count + (bh->b_size >> 9)) > this_count_max) break;
650 if(!bhp || !CONTIGUOUS_BUFFERS(bhp,bh) ||
651 !CLUSTERABLE_DEVICE(SCpnt) ||
652 (SCpnt->host->unchecked_isa_dma &&
653 ((unsigned long) bh->b_data-1) == ISA_DMA_THRESHOLD)) {
654 if (count < SCpnt->host->sg_tablesize) count++;
655 else break;
656 };
657 this_count += (bh->b_size >> 9);
658 bhp = bh;
659 bh = bh->b_reqnext;
660 };
661 #if 0
662 if(SCpnt->host->unchecked_isa_dma &&
663 ((unsigned int) SCpnt->request.bh->b_data-1) == ISA_DMA_THRESHOLD) count--;
664 #endif
665 SCpnt->use_sg = count;
666 count = 512;
667 while( count < (SCpnt->use_sg * sizeof(struct scatterlist)))
668 count = count << 1;
669 SCpnt->sglist_len = count;
670 max_sg = count / sizeof(struct scatterlist);
671 if(SCpnt->host->sg_tablesize < max_sg)
672 max_sg = SCpnt->host->sg_tablesize;
673 sgpnt = (struct scatterlist * ) scsi_malloc(count);
674 if (!sgpnt) {
675 printk("Warning - running *really* short on DMA buffers\n");
676 SCpnt->use_sg = 0;
677 this_count = SCpnt->request.current_nr_sectors;
678 buff = SCpnt->request.buffer;
679 } else {
680 memset(sgpnt, 0, count);
681
682
683 buff = (char *) sgpnt;
684 counted = 0;
685 for(count = 0, bh = SCpnt->request.bh, bhp = bh->b_reqnext;
686 count < SCpnt->use_sg && bh;
687 count++, bh = bhp) {
688
689 bhp = bh->b_reqnext;
690
691 if(!sgpnt[count].address) sgpnt[count].address = bh->b_data;
692 sgpnt[count].length += bh->b_size;
693 counted += bh->b_size >> 9;
694
695 if (((long) sgpnt[count].address) + sgpnt[count].length - 1 >
696 ISA_DMA_THRESHOLD && (SCpnt->host->unchecked_isa_dma) &&
697 !sgpnt[count].alt_address) {
698 sgpnt[count].alt_address = sgpnt[count].address;
699
700
701
702
703 if(dma_free_sectors < (sgpnt[count].length >> 9) + 10) {
704 sgpnt[count].address = NULL;
705 } else {
706 sgpnt[count].address =
707 (char *) scsi_malloc(sgpnt[count].length);
708 };
709
710
711
712
713
714 if(sgpnt[count].address == NULL){
715 #if 0
716 printk("Warning: Running low on SCSI DMA buffers");
717
718 while(--count >= 0){
719 if(sgpnt[count].alt_address)
720 scsi_free(sgpnt[count].address,
721 sgpnt[count].length);
722 };
723 this_count = SCpnt->request.current_nr_sectors;
724 buff = SCpnt->request.buffer;
725 SCpnt->use_sg = 0;
726 scsi_free(sgpnt, SCpnt->sglist_len);
727 #endif
728 SCpnt->use_sg = count;
729 this_count = counted -= bh->b_size >> 9;
730 break;
731 };
732
733 };
734
735
736
737
738
739 if(bhp && CONTIGUOUS_BUFFERS(bh,bhp)
740 && CLUSTERABLE_DEVICE(SCpnt)) {
741 char * tmp;
742
743 if (((long) sgpnt[count].address) + sgpnt[count].length +
744 bhp->b_size - 1 > ISA_DMA_THRESHOLD &&
745 (SCpnt->host->unchecked_isa_dma) &&
746 !sgpnt[count].alt_address) continue;
747
748 if(!sgpnt[count].alt_address) {count--; continue; }
749 if(dma_free_sectors > 10)
750 tmp = (char *) scsi_malloc(sgpnt[count].length
751 + bhp->b_size);
752 else {
753 tmp = NULL;
754 max_sg = SCpnt->use_sg;
755 };
756 if(tmp){
757 scsi_free(sgpnt[count].address, sgpnt[count].length);
758 sgpnt[count].address = tmp;
759 count--;
760 continue;
761 };
762
763
764
765
766
767 if (SCpnt->use_sg < max_sg) SCpnt->use_sg++;
768 };
769 };
770
771
772 this_count = counted;
773
774 if(count < SCpnt->use_sg || SCpnt->use_sg
775 > SCpnt->host->sg_tablesize){
776 bh = SCpnt->request.bh;
777 printk("Use sg, count %d %x %d\n",
778 SCpnt->use_sg, count, dma_free_sectors);
779 printk("maxsg = %x, counted = %d this_count = %d\n",
780 max_sg, counted, this_count);
781 while(bh){
782 printk("[%p %lx] ", bh->b_data, bh->b_size);
783 bh = bh->b_reqnext;
784 };
785 if(SCpnt->use_sg < 16)
786 for(count=0; count<SCpnt->use_sg; count++)
787 printk("{%d:%p %p %d} ", count,
788 sgpnt[count].address,
789 sgpnt[count].alt_address,
790 sgpnt[count].length);
791 panic("Ooops");
792 };
793
794 if (SCpnt->request.cmd == WRITE)
795 for(count=0; count<SCpnt->use_sg; count++)
796 if(sgpnt[count].alt_address)
797 memcpy(sgpnt[count].address, sgpnt[count].alt_address,
798 sgpnt[count].length);
799 };
800 };
801
802
803
804 if(SCpnt->use_sg == 0){
805 if (((long) buff) + (this_count << 9) - 1 > ISA_DMA_THRESHOLD &&
806 (SCpnt->host->unchecked_isa_dma)) {
807 if(bounce_buffer)
808 buff = bounce_buffer;
809 else
810 buff = (char *) scsi_malloc(this_count << 9);
811 if(buff == NULL) {
812 this_count = SCpnt->request.current_nr_sectors;
813 buff = (char *) scsi_malloc(this_count << 9);
814 if(!buff) panic("Ran out of DMA buffers.");
815 };
816 if (SCpnt->request.cmd == WRITE)
817 memcpy(buff, (char *)SCpnt->request.buffer, this_count << 9);
818 };
819 };
820 #ifdef DEBUG
821 printk("sd%c : %s %d/%d 512 byte blocks.\n",
822 'a' + devm,
823 (SCpnt->request.cmd == WRITE) ? "writing" : "reading",
824 this_count, SCpnt->request.nr_sectors);
825 #endif
826
827 cmd[1] = (SCpnt->lun << 5) & 0xe0;
828
829 if (rscsi_disks[dev].sector_size == 1024){
830 if(block & 1) panic("sd.c:Bad block number requested");
831 if(this_count & 1) panic("sd.c:Bad block number requested");
832 block = block >> 1;
833 this_count = this_count >> 1;
834 };
835
836 if (rscsi_disks[dev].sector_size == 256){
837 block = block << 1;
838 this_count = this_count << 1;
839 };
840
841 if (((this_count > 0xff) || (block > 0x1fffff)) && rscsi_disks[dev].ten)
842 {
843 if (this_count > 0xffff)
844 this_count = 0xffff;
845
846 cmd[0] += READ_10 - READ_6 ;
847 cmd[2] = (unsigned char) (block >> 24) & 0xff;
848 cmd[3] = (unsigned char) (block >> 16) & 0xff;
849 cmd[4] = (unsigned char) (block >> 8) & 0xff;
850 cmd[5] = (unsigned char) block & 0xff;
851 cmd[6] = cmd[9] = 0;
852 cmd[7] = (unsigned char) (this_count >> 8) & 0xff;
853 cmd[8] = (unsigned char) this_count & 0xff;
854 }
855 else
856 {
857 if (this_count > 0xff)
858 this_count = 0xff;
859
860 cmd[1] |= (unsigned char) ((block >> 16) & 0x1f);
861 cmd[2] = (unsigned char) ((block >> 8) & 0xff);
862 cmd[3] = (unsigned char) block & 0xff;
863 cmd[4] = (unsigned char) this_count;
864 cmd[5] = 0;
865 }
866
867
868
869
870
871
872
873 SCpnt->transfersize = rscsi_disks[dev].sector_size;
874 SCpnt->underflow = this_count << 9;
875 scsi_do_cmd (SCpnt, (void *) cmd, buff,
876 this_count * rscsi_disks[dev].sector_size,
877 rw_intr,
878 (SCpnt->device->type == TYPE_DISK ?
879 SD_TIMEOUT : SD_MOD_TIMEOUT),
880 MAX_RETRIES);
881 }
882
883 static int check_scsidisk_media_change(kdev_t full_dev){
884 int retval;
885 int target;
886 struct inode inode;
887 int flag = 0;
888
889 target = DEVICE_NR(full_dev);
890
891 if (target >= sd_template.dev_max ||
892 !rscsi_disks[target].device) {
893 printk("SCSI disk request error: invalid device.\n");
894 return 0;
895 };
896
897 if(!rscsi_disks[target].device->removable) return 0;
898
899 inode.i_rdev = full_dev;
900 retval = sd_ioctl(&inode, NULL, SCSI_IOCTL_TEST_UNIT_READY, 0);
901
902 if(retval){
903
904
905
906
907 rscsi_disks[target].device->changed = 1;
908 return 1;
909
910 };
911
912 retval = rscsi_disks[target].device->changed;
913 if(!flag) rscsi_disks[target].device->changed = 0;
914 return retval;
915 }
916
917 static void sd_init_done (Scsi_Cmnd * SCpnt)
918 {
919 struct request * req;
920
921 req = &SCpnt->request;
922 req->rq_status = RQ_SCSI_DONE;
923
924 if (req->sem != NULL) {
925 up(req->sem);
926 }
927 }
928
929 static int sd_init_onedisk(int i)
930 {
931 unsigned char cmd[10];
932 unsigned char *buffer;
933 unsigned long spintime;
934 int the_result, retries;
935 Scsi_Cmnd * SCpnt;
936
937
938
939
940
941
942 SCpnt = allocate_device(NULL, rscsi_disks[i].device, 1);
943 buffer = (unsigned char *) scsi_malloc(512);
944
945 spintime = 0;
946
947
948 if (current->pid == 0){
949 do{
950 retries = 0;
951 while(retries < 3)
952 {
953 cmd[0] = TEST_UNIT_READY;
954 cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0;
955 memset ((void *) &cmd[2], 0, 8);
956 SCpnt->request.rq_status = RQ_SCSI_BUSY;
957 SCpnt->cmd_len = 0;
958 SCpnt->sense_buffer[0] = 0;
959 SCpnt->sense_buffer[2] = 0;
960
961 scsi_do_cmd (SCpnt,
962 (void *) cmd, (void *) buffer,
963 512, sd_init_done, SD_TIMEOUT,
964 MAX_RETRIES);
965
966 while(SCpnt->request.rq_status != RQ_SCSI_DONE) barrier();
967
968 the_result = SCpnt->result;
969 retries++;
970 if( the_result == 0
971 || SCpnt->sense_buffer[2] != UNIT_ATTENTION)
972 break;
973 }
974
975
976
977 if(the_result && !rscsi_disks[i].device->removable &&
978 SCpnt->sense_buffer[2] == NOT_READY) {
979 int time1;
980 if(!spintime){
981 printk( "sd%c: Spinning up disk...", 'a' + i );
982 cmd[0] = START_STOP;
983 cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0;
984 cmd[1] |= 1;
985 memset ((void *) &cmd[2], 0, 8);
986 cmd[4] = 1;
987
988 SCpnt->request.rq_status = RQ_SCSI_BUSY;
989 SCpnt->cmd_len = 0;
990 SCpnt->sense_buffer[0] = 0;
991 SCpnt->sense_buffer[2] = 0;
992
993 scsi_do_cmd (SCpnt,
994 (void *) cmd, (void *) buffer,
995 512, sd_init_done, SD_TIMEOUT,
996 MAX_RETRIES);
997
998 while(SCpnt->request.rq_status != RQ_SCSI_DONE)
999 barrier();
1000
1001 spintime = jiffies;
1002 };
1003
1004 time1 = jiffies;
1005 while(jiffies < time1 + HZ);
1006 printk( "." );
1007 };
1008 } while(the_result && spintime && spintime+100*HZ > jiffies);
1009 if (spintime) {
1010 if (the_result)
1011 printk( "not responding...\n" );
1012 else
1013 printk( "ready\n" );
1014 }
1015 };
1016
1017
1018 retries = 3;
1019 do {
1020 cmd[0] = READ_CAPACITY;
1021 cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0;
1022 memset ((void *) &cmd[2], 0, 8);
1023 memset ((void *) buffer, 0, 8);
1024 SCpnt->request.rq_status = RQ_SCSI_BUSY;
1025 SCpnt->cmd_len = 0;
1026 SCpnt->sense_buffer[0] = 0;
1027 SCpnt->sense_buffer[2] = 0;
1028
1029 scsi_do_cmd (SCpnt,
1030 (void *) cmd, (void *) buffer,
1031 8, sd_init_done, SD_TIMEOUT,
1032 MAX_RETRIES);
1033
1034 if (current->pid == 0) {
1035 while(SCpnt->request.rq_status != RQ_SCSI_DONE)
1036 barrier();
1037 } else {
1038 if (SCpnt->request.rq_status != RQ_SCSI_DONE){
1039 struct semaphore sem = MUTEX_LOCKED;
1040 SCpnt->request.sem = &sem;
1041 down(&sem);
1042
1043 while (SCpnt->request.rq_status != RQ_SCSI_DONE)
1044 schedule();
1045 }
1046 }
1047
1048 the_result = SCpnt->result;
1049 retries--;
1050
1051 } while(the_result && retries);
1052
1053 SCpnt->request.rq_status = RQ_INACTIVE;
1054
1055 wake_up(&SCpnt->device->device_wait);
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073 if (the_result)
1074 {
1075 printk ("sd%c : READ CAPACITY failed.\n"
1076 "sd%c : status = %x, message = %02x, host = %d, driver = %02x \n",
1077 'a' + i, 'a' + i,
1078 status_byte(the_result),
1079 msg_byte(the_result),
1080 host_byte(the_result),
1081 driver_byte(the_result)
1082 );
1083 if (driver_byte(the_result) & DRIVER_SENSE)
1084 printk("sd%c : extended sense code = %1x \n",
1085 'a' + i, SCpnt->sense_buffer[2] & 0xf);
1086 else
1087 printk("sd%c : sense not available. \n", 'a' + i);
1088
1089 printk("sd%c : block size assumed to be 512 bytes, disk size 1GB. \n",
1090 'a' + i);
1091 rscsi_disks[i].capacity = 0x1fffff;
1092 rscsi_disks[i].sector_size = 512;
1093
1094
1095
1096 if(rscsi_disks[i].device->removable &&
1097 SCpnt->sense_buffer[2] == NOT_READY)
1098 rscsi_disks[i].device->changed = 1;
1099
1100 }
1101 else
1102 {
1103 rscsi_disks[i].capacity = (buffer[0] << 24) |
1104 (buffer[1] << 16) |
1105 (buffer[2] << 8) |
1106 buffer[3];
1107
1108 rscsi_disks[i].sector_size = (buffer[4] << 24) |
1109 (buffer[5] << 16) | (buffer[6] << 8) | buffer[7];
1110
1111 if (rscsi_disks[i].sector_size != 512 &&
1112 rscsi_disks[i].sector_size != 1024 &&
1113 rscsi_disks[i].sector_size != 256)
1114 {
1115 printk ("sd%c : unsupported sector size %d.\n",
1116 'a' + i, rscsi_disks[i].sector_size);
1117 if(rscsi_disks[i].device->removable){
1118 rscsi_disks[i].capacity = 0;
1119 } else {
1120 printk ("scsi : deleting disk entry.\n");
1121 rscsi_disks[i].device = NULL;
1122 sd_template.nr_dev--;
1123 return i;
1124 };
1125 }
1126 {
1127
1128
1129
1130
1131
1132 int m;
1133 int hard_sector = rscsi_disks[i].sector_size;
1134
1135 for (m=i<<4; m<((i+1)<<4); m++){
1136 sd_hardsizes[m] = hard_sector;
1137 }
1138 printk ("SCSI Hardware sector size is %d bytes on device sd%c\n",
1139 hard_sector,i+'a');
1140 }
1141 if(rscsi_disks[i].sector_size == 1024)
1142 rscsi_disks[i].capacity <<= 1;
1143 if(rscsi_disks[i].sector_size == 256)
1144 rscsi_disks[i].capacity >>= 1;
1145 }
1146
1147 rscsi_disks[i].ten = 1;
1148 rscsi_disks[i].remap = 1;
1149 scsi_free(buffer, 512);
1150 return i;
1151 }
1152
1153
1154
1155
1156
1157
1158 static int sd_registered = 0;
1159
1160 static int sd_init()
1161 {
1162 int i;
1163
1164 if (sd_template.dev_noticed == 0) return 0;
1165
1166 if(!sd_registered) {
1167 if (register_blkdev(MAJOR_NR,"sd",&sd_fops)) {
1168 printk("Unable to get major %d for SCSI disk\n",MAJOR_NR);
1169 return 1;
1170 }
1171 sd_registered++;
1172 }
1173
1174
1175 if(rscsi_disks) return 0;
1176
1177 sd_template.dev_max = sd_template.dev_noticed + SD_EXTRA_DEVS;
1178
1179 rscsi_disks = (Scsi_Disk *)
1180 scsi_init_malloc(sd_template.dev_max * sizeof(Scsi_Disk), GFP_ATOMIC);
1181 memset(rscsi_disks, 0, sd_template.dev_max * sizeof(Scsi_Disk));
1182
1183 sd_sizes = (int *) scsi_init_malloc((sd_template.dev_max << 4) *
1184 sizeof(int), GFP_ATOMIC);
1185 memset(sd_sizes, 0, (sd_template.dev_max << 4) * sizeof(int));
1186
1187 sd_blocksizes = (int *) scsi_init_malloc((sd_template.dev_max << 4) *
1188 sizeof(int), GFP_ATOMIC);
1189
1190 sd_hardsizes = (int *) scsi_init_malloc((sd_template.dev_max << 4) *
1191 sizeof(int), GFP_ATOMIC);
1192
1193 for(i=0;i<(sd_template.dev_max << 4);i++){
1194 sd_blocksizes[i] = 1024;
1195 sd_hardsizes[i] = 512;
1196 }
1197 blksize_size[MAJOR_NR] = sd_blocksizes;
1198 hardsect_size[MAJOR_NR] = sd_hardsizes;
1199 sd = (struct hd_struct *) scsi_init_malloc((sd_template.dev_max << 4) *
1200 sizeof(struct hd_struct),
1201 GFP_ATOMIC);
1202
1203
1204 sd_gendisk.max_nr = sd_template.dev_max;
1205 sd_gendisk.part = sd;
1206 sd_gendisk.sizes = sd_sizes;
1207 sd_gendisk.real_devices = (void *) rscsi_disks;
1208 return 0;
1209 }
1210
1211 static void sd_finish()
1212 {
1213 int i;
1214
1215 blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST;
1216
1217 sd_gendisk.next = gendisk_head;
1218 gendisk_head = &sd_gendisk;
1219
1220 for (i = 0; i < sd_template.dev_max; ++i)
1221 if (!rscsi_disks[i].capacity &&
1222 rscsi_disks[i].device)
1223 {
1224 i = sd_init_onedisk(i);
1225 if (MODULE_FLAG
1226 && !rscsi_disks[i].has_part_table) {
1227 sd_sizes[i << 4] = rscsi_disks[i].capacity;
1228 revalidate_scsidisk(MKDEV(MAJOR_NR, i << 4), 0);
1229 }
1230 rscsi_disks[i].has_part_table = 1;
1231 }
1232
1233
1234
1235
1236
1237 if(rscsi_disks[0].device && rscsi_disks[0].device->host->sg_tablesize)
1238 read_ahead[MAJOR_NR] = 120;
1239 else
1240 read_ahead[MAJOR_NR] = 4;
1241
1242 return;
1243 }
1244
1245 static int sd_detect(Scsi_Device * SDp){
1246 if(SDp->type != TYPE_DISK && SDp->type != TYPE_MOD) return 0;
1247
1248 printk("Detected scsi disk sd%c at scsi%d, channel %d, id %d, lun %d\n",
1249 'a'+ (sd_template.dev_noticed++),
1250 SDp->host->host_no, SDp->channel, SDp->id, SDp->lun);
1251
1252 return 1;
1253 }
1254
1255 static int sd_attach(Scsi_Device * SDp){
1256 Scsi_Disk * dpnt;
1257 int i;
1258
1259 if(SDp->type != TYPE_DISK && SDp->type != TYPE_MOD) return 0;
1260
1261 if(sd_template.nr_dev >= sd_template.dev_max) {
1262 SDp->attached--;
1263 return 1;
1264 }
1265
1266 for(dpnt = rscsi_disks, i=0; i<sd_template.dev_max; i++, dpnt++)
1267 if(!dpnt->device) break;
1268
1269 if(i >= sd_template.dev_max) panic ("scsi_devices corrupt (sd)");
1270
1271 SDp->scsi_request_fn = do_sd_request;
1272 rscsi_disks[i].device = SDp;
1273 rscsi_disks[i].has_part_table = 0;
1274 sd_template.nr_dev++;
1275 sd_gendisk.nr_real++;
1276 return 0;
1277 }
1278
1279 #define DEVICE_BUSY rscsi_disks[target].device->busy
1280 #define USAGE rscsi_disks[target].device->access_count
1281 #define CAPACITY rscsi_disks[target].capacity
1282 #define MAYBE_REINIT sd_init_onedisk(target)
1283 #define GENDISK_STRUCT sd_gendisk
1284
1285
1286
1287
1288
1289
1290
1291
1292 int revalidate_scsidisk(kdev_t dev, int maxusage){
1293 int target;
1294 struct gendisk * gdev;
1295 unsigned long flags;
1296 int max_p;
1297 int start;
1298 int i;
1299
1300 target = DEVICE_NR(dev);
1301 gdev = &GENDISK_STRUCT;
1302
1303 save_flags(flags);
1304 cli();
1305 if (DEVICE_BUSY || USAGE > maxusage) {
1306 restore_flags(flags);
1307 printk("Device busy for revalidation (usage=%d)\n", USAGE);
1308 return -EBUSY;
1309 };
1310 DEVICE_BUSY = 1;
1311 restore_flags(flags);
1312
1313 max_p = gdev->max_p;
1314 start = target << gdev->minor_shift;
1315
1316 for (i=max_p - 1; i >=0 ; i--) {
1317 int minor = start+i;
1318 kdev_t devi = MKDEV(MAJOR_NR, minor);
1319 sync_dev(devi);
1320 invalidate_inodes(devi);
1321 invalidate_buffers(devi);
1322 gdev->part[minor].start_sect = 0;
1323 gdev->part[minor].nr_sects = 0;
1324
1325
1326
1327
1328 blksize_size[MAJOR_NR][i] = 1024;
1329 };
1330
1331 #ifdef MAYBE_REINIT
1332 MAYBE_REINIT;
1333 #endif
1334
1335 gdev->part[start].nr_sects = CAPACITY;
1336 resetup_one_dev(gdev, target);
1337
1338 DEVICE_BUSY = 0;
1339 return 0;
1340 }
1341
1342 static int fop_revalidate_scsidisk(kdev_t dev){
1343 return revalidate_scsidisk(dev, 0);
1344 }
1345
1346
1347 static void sd_detach(Scsi_Device * SDp)
1348 {
1349 Scsi_Disk * dpnt;
1350 int i;
1351 int max_p;
1352 int start;
1353
1354 for(dpnt = rscsi_disks, i=0; i<sd_template.dev_max; i++, dpnt++)
1355 if(dpnt->device == SDp) {
1356
1357
1358
1359 max_p = sd_gendisk.max_p;
1360 start = i << sd_gendisk.minor_shift;
1361
1362 for (i=max_p - 1; i >=0 ; i--) {
1363 int minor = start+i;
1364 kdev_t devi = MKDEV(MAJOR_NR, minor);
1365 sync_dev(devi);
1366 invalidate_inodes(devi);
1367 invalidate_buffers(devi);
1368 sd_gendisk.part[minor].start_sect = 0;
1369 sd_gendisk.part[minor].nr_sects = 0;
1370 sd_sizes[minor] = 0;
1371 };
1372
1373 dpnt->has_part_table = 0;
1374 dpnt->device = NULL;
1375 dpnt->capacity = 0;
1376 SDp->attached--;
1377 sd_template.dev_noticed--;
1378 sd_template.nr_dev--;
1379 sd_gendisk.nr_real--;
1380 return;
1381 }
1382 return;
1383 }
1384
1385 #ifdef MODULE
1386 #include <linux/module.h>
1387 #include <linux/version.h>
1388
1389 char kernel_version[] = UTS_RELEASE;
1390
1391 int init_module(void) {
1392 sd_template.usage_count = &mod_use_count_;
1393 return scsi_register_module(MODULE_SCSI_DEV, &sd_template);
1394 }
1395
1396 void cleanup_module( void)
1397 {
1398 struct gendisk * prev_sdgd;
1399 struct gendisk * sdgd;
1400
1401 if (MOD_IN_USE) {
1402 printk(KERN_INFO __FILE__ ": module is in use, remove rejected\n");
1403 return;
1404 }
1405 scsi_unregister_module(MODULE_SCSI_DEV, &sd_template);
1406 unregister_blkdev(SCSI_DISK_MAJOR, "sd");
1407 sd_registered--;
1408 if( rscsi_disks != NULL )
1409 {
1410 scsi_init_free((char *) rscsi_disks,
1411 (sd_template.dev_noticed + SD_EXTRA_DEVS)
1412 * sizeof(Scsi_Disk));
1413
1414 scsi_init_free((char *) sd_sizes, sd_template.dev_max * sizeof(int));
1415 scsi_init_free((char *) sd_blocksizes, sd_template.dev_max * sizeof(int));
1416 scsi_init_free((char *) sd_hardsizes, sd_template.dev_max * sizeof(int));
1417 scsi_init_free((char *) sd,
1418 (sd_template.dev_max << 4) * sizeof(struct hd_struct));
1419
1420
1421
1422 sdgd = gendisk_head;
1423 prev_sdgd = NULL;
1424 while(sdgd != &sd_gendisk)
1425 {
1426 prev_sdgd = sdgd;
1427 sdgd = sdgd->next;
1428 }
1429
1430 if(sdgd != &sd_gendisk)
1431 printk("sd_gendisk not in disk chain.\n");
1432 else {
1433 if(prev_sdgd != NULL)
1434 prev_sdgd->next = sdgd->next;
1435 else
1436 gendisk_head = sdgd->next;
1437 }
1438 }
1439
1440 blksize_size[MAJOR_NR] = NULL;
1441 blk_dev[MAJOR_NR].request_fn = NULL;
1442 blk_size[MAJOR_NR] = NULL;
1443 hardsect_size[MAJOR_NR] = NULL;
1444 read_ahead[MAJOR_NR] = 0;
1445 sd_template.dev_max = 0;
1446 }
1447 #endif
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466