This source file includes following definitions.
- sd_open
- sd_release
- sd_geninit
- rw_intr
- do_sd_request
- requeue_sd_request
- check_scsidisk_media_change
- sd_init_done
- sd_init_onedisk
- sd_init
- sd_finish
- sd_detect
- sd_attach
- revalidate_scsidisk
- fop_revalidate_scsidisk
- sd_detach
- init_module
- cleanup_module
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19 #ifdef MODULE
20 #include <linux/autoconf.h>
21 #include <linux/module.h>
22 #include <linux/version.h>
23
24
25
26
27
28 #define MODULE_FLAG 1
29 #else
30 #define MODULE_FLAG scsi_loadable_module_flag
31 #endif
32
33 #include <linux/fs.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/mm.h>
37 #include <linux/string.h>
38 #include <linux/errno.h>
39 #include <asm/system.h>
40
41 #define MAJOR_NR SCSI_DISK_MAJOR
42 #include "../block/blk.h"
43 #include "scsi.h"
44 #include "hosts.h"
45 #include "sd.h"
46 #include "scsi_ioctl.h"
47 #include "constants.h"
48
49 #include <linux/genhd.h>
50
51
52
53
54
55 #define MAX_RETRIES 5
56
57
58
59
60
61 #define SD_TIMEOUT (7 * HZ)
62 #define SD_MOD_TIMEOUT (8 * HZ)
63
64 #define CLUSTERABLE_DEVICE(SC) (SC->host->use_clustering && \
65 SC->device->type != TYPE_MOD)
66
67 struct hd_struct * sd;
68 int revalidate_scsidisk(int dev, int maxusage);
69
70 Scsi_Disk * rscsi_disks = NULL;
71 static int * sd_sizes;
72 static int * sd_blocksizes;
73 static int * sd_hardsizes;
74
75 extern int sd_ioctl(struct inode *, struct file *, unsigned int, unsigned long);
76
77 static int check_scsidisk_media_change(dev_t);
78 static int fop_revalidate_scsidisk(dev_t);
79
80 static sd_init_onedisk(int);
81
82 static void requeue_sd_request (Scsi_Cmnd * SCpnt);
83
84 static int sd_init(void);
85 static void sd_finish(void);
86 static int sd_attach(Scsi_Device *);
87 static int sd_detect(Scsi_Device *);
88 static void sd_detach(Scsi_Device *);
89
90 struct Scsi_Device_Template sd_template =
91 { NULL, "disk", "sd", NULL, TYPE_DISK,
92 SCSI_DISK_MAJOR, 0, 0, 0, 1,
93 sd_detect, sd_init,
94 sd_finish, sd_attach, sd_detach
95 };
96
97 static int sd_open(struct inode * inode, struct file * filp)
98 {
99 int target;
100 target = DEVICE_NR(MINOR(inode->i_rdev));
101
102 if(target >= sd_template.dev_max || !rscsi_disks[target].device)
103 return -ENXIO;
104
105
106
107
108
109
110
111 while (rscsi_disks[target].device->busy)
112 barrier();
113 if(rscsi_disks[target].device->removable) {
114 check_disk_change(inode->i_rdev);
115
116 if(!rscsi_disks[target].device->access_count)
117 sd_ioctl(inode, NULL, SCSI_IOCTL_DOORLOCK, 0);
118 };
119
120
121
122
123
124 if(sd_sizes[MINOR(inode->i_rdev)] == 0)
125 return -ENXIO;
126
127 rscsi_disks[target].device->access_count++;
128 if (rscsi_disks[target].device->host->hostt->usage_count)
129 (*rscsi_disks[target].device->host->hostt->usage_count)++;
130 if(sd_template.usage_count) (*sd_template.usage_count)++;
131 return 0;
132 }
133
134 static void sd_release(struct inode * inode, struct file * file)
135 {
136 int target;
137 sync_dev(inode->i_rdev);
138
139 target = DEVICE_NR(MINOR(inode->i_rdev));
140
141 rscsi_disks[target].device->access_count--;
142 if (rscsi_disks[target].device->host->hostt->usage_count)
143 (*rscsi_disks[target].device->host->hostt->usage_count)--;
144 if(sd_template.usage_count) (*sd_template.usage_count)--;
145
146 if(rscsi_disks[target].device->removable) {
147 if(!rscsi_disks[target].device->access_count)
148 sd_ioctl(inode, NULL, SCSI_IOCTL_DOORUNLOCK, 0);
149 }
150 }
151
152 static void sd_geninit(struct gendisk *);
153
154 static struct file_operations sd_fops = {
155 NULL,
156 block_read,
157 block_write,
158 NULL,
159 NULL,
160 sd_ioctl,
161 NULL,
162 sd_open,
163 sd_release,
164 block_fsync,
165 NULL,
166 check_scsidisk_media_change,
167 fop_revalidate_scsidisk
168 };
169
170 static struct gendisk sd_gendisk = {
171 MAJOR_NR,
172 "sd",
173 4,
174 1 << 4,
175 0,
176 sd_geninit,
177 NULL,
178 NULL,
179 0,
180 NULL,
181 NULL
182 };
183
184 static void sd_geninit (struct gendisk *ignored)
185 {
186 int i;
187
188 for (i = 0; i < sd_template.dev_max; ++i)
189 if(rscsi_disks[i].device)
190 sd[i << 4].nr_sects = rscsi_disks[i].capacity;
191 #if 0
192
193 sd_gendisk.nr_real = sd_template.dev_max;
194 #endif
195 }
196
197
198
199
200
201
202
203 static void rw_intr (Scsi_Cmnd *SCpnt)
204 {
205 int result = SCpnt->result;
206 int this_count = SCpnt->bufflen >> 9;
207
208 #ifdef DEBUG
209 printk("sd%c : rw_intr(%d, %d)\n", 'a' + MINOR(SCpnt->request.dev),
210 SCpnt->host->host_no, result);
211 #endif
212
213
214
215
216
217
218
219 if (!result) {
220
221 #ifdef DEBUG
222 printk("sd%c : %d sectors remain.\n", 'a' + MINOR(SCpnt->request.dev),
223 SCpnt->request.nr_sectors);
224 printk("use_sg is %d\n ",SCpnt->use_sg);
225 #endif
226 if (SCpnt->use_sg) {
227 struct scatterlist * sgpnt;
228 int i;
229 sgpnt = (struct scatterlist *) SCpnt->buffer;
230 for(i=0; i<SCpnt->use_sg; i++) {
231 #ifdef DEBUG
232 printk(":%x %x %d\n",sgpnt[i].alt_address, sgpnt[i].address,
233 sgpnt[i].length);
234 #endif
235 if (sgpnt[i].alt_address) {
236 if (SCpnt->request.cmd == READ)
237 memcpy(sgpnt[i].alt_address, sgpnt[i].address,
238 sgpnt[i].length);
239 scsi_free(sgpnt[i].address, sgpnt[i].length);
240 };
241 };
242
243
244 scsi_free(SCpnt->buffer, SCpnt->sglist_len);
245 } else {
246 if (SCpnt->buffer != SCpnt->request.buffer) {
247 #ifdef DEBUG
248 printk("nosg: %x %x %d\n",SCpnt->request.buffer, SCpnt->buffer,
249 SCpnt->bufflen);
250 #endif
251 if (SCpnt->request.cmd == READ)
252 memcpy(SCpnt->request.buffer, SCpnt->buffer,
253 SCpnt->bufflen);
254 scsi_free(SCpnt->buffer, SCpnt->bufflen);
255 };
256 };
257
258
259
260
261
262 if (SCpnt->request.nr_sectors > this_count)
263 {
264 SCpnt->request.errors = 0;
265
266 if (!SCpnt->request.bh)
267 {
268 #ifdef DEBUG
269 printk("sd%c : handling page request, no buffer\n",
270 'a' + MINOR(SCpnt->request.dev));
271 #endif
272
273
274
275
276 panic("sd.c: linked page request (%lx %x)",
277 SCpnt->request.sector, this_count);
278 }
279 }
280 SCpnt = end_scsi_request(SCpnt, 1, this_count);
281 requeue_sd_request(SCpnt);
282 return;
283 }
284
285
286 if (SCpnt->use_sg) {
287 struct scatterlist * sgpnt;
288 int i;
289 sgpnt = (struct scatterlist *) SCpnt->buffer;
290 for(i=0; i<SCpnt->use_sg; i++) {
291 #ifdef DEBUG
292 printk("err: %x %x %d\n",SCpnt->request.buffer, SCpnt->buffer,
293 SCpnt->bufflen);
294 #endif
295 if (sgpnt[i].alt_address) {
296 scsi_free(sgpnt[i].address, sgpnt[i].length);
297 };
298 };
299 scsi_free(SCpnt->buffer, SCpnt->sglist_len);
300 } else {
301 #ifdef DEBUG
302 printk("nosgerr: %x %x %d\n",SCpnt->request.buffer, SCpnt->buffer,
303 SCpnt->bufflen);
304 #endif
305 if (SCpnt->buffer != SCpnt->request.buffer)
306 scsi_free(SCpnt->buffer, SCpnt->bufflen);
307 };
308
309
310
311
312
313
314
315 if (driver_byte(result) != 0) {
316 if (suggestion(result) == SUGGEST_REMAP) {
317 #ifdef REMAP
318
319
320
321
322 if rscsi_disks[DEVICE_NR(SCpnt->request.dev)].remap
323 {
324 result = 0;
325 }
326 else
327 #endif
328 }
329
330 if ((SCpnt->sense_buffer[0] & 0x7f) == 0x70) {
331 if ((SCpnt->sense_buffer[2] & 0xf) == UNIT_ATTENTION) {
332 if(rscsi_disks[DEVICE_NR(SCpnt->request.dev)].device->removable) {
333
334
335
336 rscsi_disks[DEVICE_NR(SCpnt->request.dev)].device->changed = 1;
337 SCpnt = end_scsi_request(SCpnt, 0, this_count);
338 requeue_sd_request(SCpnt);
339 return;
340 }
341 else
342 {
343
344
345
346
347
348 requeue_sd_request(SCpnt);
349 return;
350 }
351 }
352 }
353
354
355
356
357
358
359
360
361
362 if (SCpnt->sense_buffer[2] == ILLEGAL_REQUEST) {
363 if (rscsi_disks[DEVICE_NR(SCpnt->request.dev)].ten) {
364 rscsi_disks[DEVICE_NR(SCpnt->request.dev)].ten = 0;
365 requeue_sd_request(SCpnt);
366 result = 0;
367 } else {
368
369 }
370 }
371 }
372 if (result) {
373 printk("SCSI disk error : host %d channel %d id %d lun %d return code = %x\n",
374 rscsi_disks[DEVICE_NR(SCpnt->request.dev)].device->host->host_no,
375 rscsi_disks[DEVICE_NR(SCpnt->request.dev)].device->channel,
376 rscsi_disks[DEVICE_NR(SCpnt->request.dev)].device->id,
377 rscsi_disks[DEVICE_NR(SCpnt->request.dev)].device->lun, result);
378
379 if (driver_byte(result) & DRIVER_SENSE)
380 print_sense("sd", SCpnt);
381 SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.current_nr_sectors);
382 requeue_sd_request(SCpnt);
383 return;
384 }
385 }
386
387
388
389
390
391
392
393 static void do_sd_request (void)
394 {
395 Scsi_Cmnd * SCpnt = NULL;
396 Scsi_Device * SDev;
397 struct request * req = NULL;
398 unsigned long flags;
399 int flag = 0;
400
401 save_flags(flags);
402 while (1==1){
403 cli();
404 if (CURRENT != NULL && CURRENT->dev == -1) {
405 restore_flags(flags);
406 return;
407 };
408
409 INIT_SCSI_REQUEST;
410 SDev = rscsi_disks[DEVICE_NR(MINOR(CURRENT->dev))].device;
411
412
413
414
415
416
417 if( SDev->was_reset )
418 {
419
420
421
422
423
424
425 if( SDev->removable && !intr_count )
426 {
427 scsi_ioctl(SDev, SCSI_IOCTL_DOORLOCK, 0);
428 }
429 SDev->was_reset = 0;
430 }
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445 if (flag++ == 0)
446 SCpnt = allocate_device(&CURRENT,
447 rscsi_disks[DEVICE_NR(MINOR(CURRENT->dev))].device, 0);
448 else SCpnt = NULL;
449
450
451
452
453
454
455 restore_flags(flags);
456
457
458
459
460
461
462
463
464
465
466 if (!SCpnt && sd_template.nr_dev > 1){
467 struct request *req1;
468 req1 = NULL;
469 cli();
470 req = CURRENT;
471 while(req){
472 SCpnt = request_queueable(req, rscsi_disks[DEVICE_NR(MINOR(req->dev))].device);
473 if(SCpnt) break;
474 req1 = req;
475 req = req->next;
476 };
477 if (SCpnt && req->dev == -1) {
478 if (req == CURRENT)
479 CURRENT = CURRENT->next;
480 else
481 req1->next = req->next;
482 };
483 restore_flags(flags);
484 };
485
486 if (!SCpnt) return;
487
488
489 requeue_sd_request(SCpnt);
490 };
491 }
492
493 static void requeue_sd_request (Scsi_Cmnd * SCpnt)
494 {
495 int dev, block, this_count;
496 unsigned char cmd[10];
497 int bounce_size, contiguous;
498 int max_sg;
499 struct buffer_head * bh, *bhp;
500 char * buff, *bounce_buffer;
501
502 repeat:
503
504 if(!SCpnt || SCpnt->request.dev <= 0) {
505 do_sd_request();
506 return;
507 }
508
509 dev = MINOR(SCpnt->request.dev);
510 block = SCpnt->request.sector;
511 this_count = 0;
512
513 #ifdef DEBUG
514 printk("Doing sd request, dev = %d, block = %d\n", dev, block);
515 #endif
516
517 if (dev >= (sd_template.dev_max << 4) ||
518 !rscsi_disks[DEVICE_NR(dev)].device ||
519 block + SCpnt->request.nr_sectors > sd[dev].nr_sects)
520 {
521 SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
522 goto repeat;
523 }
524
525 block += sd[dev].start_sect;
526 dev = DEVICE_NR(dev);
527
528 if (rscsi_disks[dev].device->changed)
529 {
530
531
532
533
534
535 SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
536 goto repeat;
537 }
538
539 #ifdef DEBUG
540 printk("sd%c : real dev = /dev/sd%c, block = %d\n",
541 'a' + MINOR(SCpnt->request.dev), dev, block);
542 #endif
543
544
545
546
547
548
549
550
551
552
553
554
555 if (rscsi_disks[dev].sector_size == 1024)
556 if((block & 1) || (SCpnt->request.nr_sectors & 1)) {
557 printk("sd.c:Bad block number requested");
558 SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
559 goto repeat;
560 }
561
562 switch (SCpnt->request.cmd)
563 {
564 case WRITE :
565 if (!rscsi_disks[dev].device->writeable)
566 {
567 SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
568 goto repeat;
569 }
570 cmd[0] = WRITE_6;
571 break;
572 case READ :
573 cmd[0] = READ_6;
574 break;
575 default :
576 panic ("Unknown sd command %d\n", SCpnt->request.cmd);
577 }
578
579 SCpnt->this_count = 0;
580
581
582
583
584 contiguous = (!CLUSTERABLE_DEVICE(SCpnt) ? 0 :1);
585 bounce_buffer = NULL;
586 bounce_size = (SCpnt->request.nr_sectors << 9);
587
588
589
590
591
592 if (contiguous && SCpnt->request.bh &&
593 ((long) SCpnt->request.bh->b_data)
594 + (SCpnt->request.nr_sectors << 9) - 1 > ISA_DMA_THRESHOLD
595 && SCpnt->host->unchecked_isa_dma) {
596 if(((long) SCpnt->request.bh->b_data) > ISA_DMA_THRESHOLD)
597 bounce_buffer = (char *) scsi_malloc(bounce_size);
598 if(!bounce_buffer) contiguous = 0;
599 };
600
601 if(contiguous && SCpnt->request.bh && SCpnt->request.bh->b_reqnext)
602 for(bh = SCpnt->request.bh, bhp = bh->b_reqnext; bhp; bh = bhp,
603 bhp = bhp->b_reqnext) {
604 if(!CONTIGUOUS_BUFFERS(bh,bhp)) {
605 if(bounce_buffer) scsi_free(bounce_buffer, bounce_size);
606 contiguous = 0;
607 break;
608 }
609 };
610 if (!SCpnt->request.bh || contiguous) {
611
612
613 this_count = SCpnt->request.nr_sectors;
614 buff = SCpnt->request.buffer;
615 SCpnt->use_sg = 0;
616
617 } else if (SCpnt->host->sg_tablesize == 0 ||
618 (need_isa_buffer && dma_free_sectors <= 10)) {
619
620
621
622
623
624
625
626
627 if (SCpnt->host->sg_tablesize != 0 &&
628 need_isa_buffer &&
629 dma_free_sectors <= 10)
630 printk("Warning: SCSI DMA buffer space running low. Using non scatter-gather I/O.\n");
631
632 this_count = SCpnt->request.current_nr_sectors;
633 buff = SCpnt->request.buffer;
634 SCpnt->use_sg = 0;
635
636 } else {
637
638
639 struct scatterlist * sgpnt;
640 int count, this_count_max;
641 int counted;
642
643 bh = SCpnt->request.bh;
644 this_count = 0;
645 this_count_max = (rscsi_disks[dev].ten ? 0xffff : 0xff);
646 count = 0;
647 bhp = NULL;
648 while(bh) {
649 if ((this_count + (bh->b_size >> 9)) > this_count_max) break;
650 if(!bhp || !CONTIGUOUS_BUFFERS(bhp,bh) ||
651 !CLUSTERABLE_DEVICE(SCpnt) ||
652 (SCpnt->host->unchecked_isa_dma &&
653 ((unsigned long) bh->b_data-1) == ISA_DMA_THRESHOLD)) {
654 if (count < SCpnt->host->sg_tablesize) count++;
655 else break;
656 };
657 this_count += (bh->b_size >> 9);
658 bhp = bh;
659 bh = bh->b_reqnext;
660 };
661 #if 0
662 if(SCpnt->host->unchecked_isa_dma &&
663 ((unsigned int) SCpnt->request.bh->b_data-1) == ISA_DMA_THRESHOLD) count--;
664 #endif
665 SCpnt->use_sg = count;
666 count = 512;
667 while( count < (SCpnt->use_sg * sizeof(struct scatterlist)))
668 count = count << 1;
669 SCpnt->sglist_len = count;
670 max_sg = count / sizeof(struct scatterlist);
671 if(SCpnt->host->sg_tablesize < max_sg)
672 max_sg = SCpnt->host->sg_tablesize;
673 sgpnt = (struct scatterlist * ) scsi_malloc(count);
674 if (!sgpnt) {
675 printk("Warning - running *really* short on DMA buffers\n");
676 SCpnt->use_sg = 0;
677 this_count = SCpnt->request.current_nr_sectors;
678 buff = SCpnt->request.buffer;
679 } else {
680 memset(sgpnt, 0, count);
681
682
683 buff = (char *) sgpnt;
684 counted = 0;
685 for(count = 0, bh = SCpnt->request.bh, bhp = bh->b_reqnext;
686 count < SCpnt->use_sg && bh;
687 count++, bh = bhp) {
688
689 bhp = bh->b_reqnext;
690
691 if(!sgpnt[count].address) sgpnt[count].address = bh->b_data;
692 sgpnt[count].length += bh->b_size;
693 counted += bh->b_size >> 9;
694
695 if (((long) sgpnt[count].address) + sgpnt[count].length - 1 >
696 ISA_DMA_THRESHOLD && (SCpnt->host->unchecked_isa_dma) &&
697 !sgpnt[count].alt_address) {
698 sgpnt[count].alt_address = sgpnt[count].address;
699
700
701
702
703 if(dma_free_sectors < (sgpnt[count].length >> 9) + 10) {
704 sgpnt[count].address = NULL;
705 } else {
706 sgpnt[count].address =
707 (char *) scsi_malloc(sgpnt[count].length);
708 };
709
710
711
712
713
714 if(sgpnt[count].address == NULL){
715 #if 0
716 printk("Warning: Running low on SCSI DMA buffers");
717
718 while(--count >= 0){
719 if(sgpnt[count].alt_address)
720 scsi_free(sgpnt[count].address,
721 sgpnt[count].length);
722 };
723 this_count = SCpnt->request.current_nr_sectors;
724 buff = SCpnt->request.buffer;
725 SCpnt->use_sg = 0;
726 scsi_free(sgpnt, SCpnt->sglist_len);
727 #endif
728 SCpnt->use_sg = count;
729 this_count = counted -= bh->b_size >> 9;
730 break;
731 };
732
733 };
734
735
736
737
738
739 if(bhp && CONTIGUOUS_BUFFERS(bh,bhp)
740 && CLUSTERABLE_DEVICE(SCpnt)) {
741 char * tmp;
742
743 if (((long) sgpnt[count].address) + sgpnt[count].length +
744 bhp->b_size - 1 > ISA_DMA_THRESHOLD &&
745 (SCpnt->host->unchecked_isa_dma) &&
746 !sgpnt[count].alt_address) continue;
747
748 if(!sgpnt[count].alt_address) {count--; continue; }
749 if(dma_free_sectors > 10)
750 tmp = (char *) scsi_malloc(sgpnt[count].length
751 + bhp->b_size);
752 else {
753 tmp = NULL;
754 max_sg = SCpnt->use_sg;
755 };
756 if(tmp){
757 scsi_free(sgpnt[count].address, sgpnt[count].length);
758 sgpnt[count].address = tmp;
759 count--;
760 continue;
761 };
762
763
764
765
766
767 if (SCpnt->use_sg < max_sg) SCpnt->use_sg++;
768 };
769 };
770
771
772 this_count = counted;
773
774 if(count < SCpnt->use_sg || SCpnt->use_sg
775 > SCpnt->host->sg_tablesize){
776 bh = SCpnt->request.bh;
777 printk("Use sg, count %d %x %d\n",
778 SCpnt->use_sg, count, dma_free_sectors);
779 printk("maxsg = %x, counted = %d this_count = %d\n",
780 max_sg, counted, this_count);
781 while(bh){
782 printk("[%p %lx] ", bh->b_data, bh->b_size);
783 bh = bh->b_reqnext;
784 };
785 if(SCpnt->use_sg < 16)
786 for(count=0; count<SCpnt->use_sg; count++)
787 printk("{%d:%p %p %d} ", count,
788 sgpnt[count].address,
789 sgpnt[count].alt_address,
790 sgpnt[count].length);
791 panic("Ooops");
792 };
793
794 if (SCpnt->request.cmd == WRITE)
795 for(count=0; count<SCpnt->use_sg; count++)
796 if(sgpnt[count].alt_address)
797 memcpy(sgpnt[count].address, sgpnt[count].alt_address,
798 sgpnt[count].length);
799 };
800 };
801
802
803
804 if(SCpnt->use_sg == 0){
805 if (((long) buff) + (this_count << 9) - 1 > ISA_DMA_THRESHOLD &&
806 (SCpnt->host->unchecked_isa_dma)) {
807 if(bounce_buffer)
808 buff = bounce_buffer;
809 else
810 buff = (char *) scsi_malloc(this_count << 9);
811 if(buff == NULL) {
812 this_count = SCpnt->request.current_nr_sectors;
813 buff = (char *) scsi_malloc(this_count << 9);
814 if(!buff) panic("Ran out of DMA buffers.");
815 };
816 if (SCpnt->request.cmd == WRITE)
817 memcpy(buff, (char *)SCpnt->request.buffer, this_count << 9);
818 };
819 };
820 #ifdef DEBUG
821 printk("sd%c : %s %d/%d 512 byte blocks.\n",
822 'a' + MINOR(SCpnt->request.dev),
823 (SCpnt->request.cmd == WRITE) ? "writing" : "reading",
824 this_count, SCpnt->request.nr_sectors);
825 #endif
826
827 cmd[1] = (SCpnt->lun << 5) & 0xe0;
828
829 if (rscsi_disks[dev].sector_size == 1024){
830 if(block & 1) panic("sd.c:Bad block number requested");
831 if(this_count & 1) panic("sd.c:Bad block number requested");
832 block = block >> 1;
833 this_count = this_count >> 1;
834 };
835
836 if (rscsi_disks[dev].sector_size == 256){
837 block = block << 1;
838 this_count = this_count << 1;
839 };
840
841 if (((this_count > 0xff) || (block > 0x1fffff)) && rscsi_disks[dev].ten)
842 {
843 if (this_count > 0xffff)
844 this_count = 0xffff;
845
846 cmd[0] += READ_10 - READ_6 ;
847 cmd[2] = (unsigned char) (block >> 24) & 0xff;
848 cmd[3] = (unsigned char) (block >> 16) & 0xff;
849 cmd[4] = (unsigned char) (block >> 8) & 0xff;
850 cmd[5] = (unsigned char) block & 0xff;
851 cmd[6] = cmd[9] = 0;
852 cmd[7] = (unsigned char) (this_count >> 8) & 0xff;
853 cmd[8] = (unsigned char) this_count & 0xff;
854 }
855 else
856 {
857 if (this_count > 0xff)
858 this_count = 0xff;
859
860 cmd[1] |= (unsigned char) ((block >> 16) & 0x1f);
861 cmd[2] = (unsigned char) ((block >> 8) & 0xff);
862 cmd[3] = (unsigned char) block & 0xff;
863 cmd[4] = (unsigned char) this_count;
864 cmd[5] = 0;
865 }
866
867
868
869
870
871
872
873 SCpnt->transfersize = rscsi_disks[dev].sector_size;
874 SCpnt->underflow = this_count << 9;
875 scsi_do_cmd (SCpnt, (void *) cmd, buff,
876 this_count * rscsi_disks[dev].sector_size,
877 rw_intr,
878 (SCpnt->device->type == TYPE_DISK ?
879 SD_TIMEOUT : SD_MOD_TIMEOUT),
880 MAX_RETRIES);
881 }
882
883 static int check_scsidisk_media_change(dev_t full_dev){
884 int retval;
885 int target;
886 struct inode inode;
887 int flag = 0;
888
889 target = DEVICE_NR(MINOR(full_dev));
890
891 if (target >= sd_template.dev_max ||
892 !rscsi_disks[target].device) {
893 printk("SCSI disk request error: invalid device.\n");
894 return 0;
895 };
896
897 if(!rscsi_disks[target].device->removable) return 0;
898
899 inode.i_rdev = full_dev;
900 retval = sd_ioctl(&inode, NULL, SCSI_IOCTL_TEST_UNIT_READY, 0);
901
902 if(retval){
903
904
905
906
907 rscsi_disks[target].device->changed = 1;
908 return 1;
909
910 };
911
912 retval = rscsi_disks[target].device->changed;
913 if(!flag) rscsi_disks[target].device->changed = 0;
914 return retval;
915 }
916
917 static void sd_init_done (Scsi_Cmnd * SCpnt)
918 {
919 struct request * req;
920
921 req = &SCpnt->request;
922 req->dev = 0xfffe;
923
924 if (req->sem != NULL) {
925 up(req->sem);
926 }
927 }
928
929 static int sd_init_onedisk(int i)
930 {
931 unsigned char cmd[10];
932 unsigned char *buffer;
933 unsigned long spintime;
934 int the_result, retries;
935 Scsi_Cmnd * SCpnt;
936
937
938
939
940
941
942 SCpnt = allocate_device(NULL, rscsi_disks[i].device, 1);
943 buffer = (unsigned char *) scsi_malloc(512);
944
945 spintime = 0;
946
947
948 if (current->pid == 0){
949 do{
950 retries = 0;
951 while(retries < 3)
952 {
953 cmd[0] = TEST_UNIT_READY;
954 cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0;
955 memset ((void *) &cmd[2], 0, 8);
956 SCpnt->request.dev = 0xffff;
957 SCpnt->cmd_len = 0;
958 SCpnt->sense_buffer[0] = 0;
959 SCpnt->sense_buffer[2] = 0;
960
961 scsi_do_cmd (SCpnt,
962 (void *) cmd, (void *) buffer,
963 512, sd_init_done, SD_TIMEOUT,
964 MAX_RETRIES);
965
966 while(SCpnt->request.dev != 0xfffe) barrier();
967
968 the_result = SCpnt->result;
969 retries++;
970 if( the_result == 0
971 || SCpnt->sense_buffer[2] != UNIT_ATTENTION)
972 break;
973 }
974
975
976
977 if(the_result && !rscsi_disks[i].device->removable &&
978 SCpnt->sense_buffer[2] == NOT_READY) {
979 int time1;
980 if(!spintime){
981 printk( "sd%c: Spinning up disk...", 'a' + i );
982 cmd[0] = START_STOP;
983 cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0;
984 cmd[1] |= 1;
985 memset ((void *) &cmd[2], 0, 8);
986 cmd[4] = 1;
987
988 SCpnt->request.dev = 0xffff;
989 SCpnt->cmd_len = 0;
990 SCpnt->sense_buffer[0] = 0;
991 SCpnt->sense_buffer[2] = 0;
992
993 scsi_do_cmd (SCpnt,
994 (void *) cmd, (void *) buffer,
995 512, sd_init_done, SD_TIMEOUT,
996 MAX_RETRIES);
997
998 while(SCpnt->request.dev != 0xfffe)
999 barrier();
1000
1001 spintime = jiffies;
1002 };
1003
1004 time1 = jiffies;
1005 while(jiffies < time1 + HZ);
1006 printk( "." );
1007 };
1008 } while(the_result && spintime && spintime+100*HZ > jiffies);
1009 if (spintime) {
1010 if (the_result)
1011 printk( "not responding...\n" );
1012 else
1013 printk( "ready\n" );
1014 }
1015 };
1016
1017
1018 retries = 3;
1019 do {
1020 cmd[0] = READ_CAPACITY;
1021 cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0;
1022 memset ((void *) &cmd[2], 0, 8);
1023 memset ((void *) buffer, 0, 8);
1024 SCpnt->request.dev = 0xffff;
1025 SCpnt->cmd_len = 0;
1026 SCpnt->sense_buffer[0] = 0;
1027 SCpnt->sense_buffer[2] = 0;
1028
1029 scsi_do_cmd (SCpnt,
1030 (void *) cmd, (void *) buffer,
1031 8, sd_init_done, SD_TIMEOUT,
1032 MAX_RETRIES);
1033
1034 if (current->pid == 0)
1035 while(SCpnt->request.dev != 0xfffe)
1036 barrier();
1037 else
1038 if (SCpnt->request.dev != 0xfffe){
1039 struct semaphore sem = MUTEX_LOCKED;
1040 SCpnt->request.sem = &sem;
1041 down(&sem);
1042
1043 while (SCpnt->request.dev != 0xfffe)
1044 schedule();
1045 };
1046
1047 the_result = SCpnt->result;
1048 retries--;
1049
1050 } while(the_result && retries);
1051
1052 SCpnt->request.dev = -1;
1053
1054 wake_up(&SCpnt->device->device_wait);
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072 if (the_result)
1073 {
1074 printk ("sd%c : READ CAPACITY failed.\n"
1075 "sd%c : status = %x, message = %02x, host = %d, driver = %02x \n",
1076 'a' + i, 'a' + i,
1077 status_byte(the_result),
1078 msg_byte(the_result),
1079 host_byte(the_result),
1080 driver_byte(the_result)
1081 );
1082 if (driver_byte(the_result) & DRIVER_SENSE)
1083 printk("sd%c : extended sense code = %1x \n",
1084 'a' + i, SCpnt->sense_buffer[2] & 0xf);
1085 else
1086 printk("sd%c : sense not available. \n", 'a' + i);
1087
1088 printk("sd%c : block size assumed to be 512 bytes, disk size 1GB. \n",
1089 'a' + i);
1090 rscsi_disks[i].capacity = 0x1fffff;
1091 rscsi_disks[i].sector_size = 512;
1092
1093
1094
1095 if(rscsi_disks[i].device->removable &&
1096 SCpnt->sense_buffer[2] == NOT_READY)
1097 rscsi_disks[i].device->changed = 1;
1098
1099 }
1100 else
1101 {
1102 rscsi_disks[i].capacity = (buffer[0] << 24) |
1103 (buffer[1] << 16) |
1104 (buffer[2] << 8) |
1105 buffer[3];
1106
1107 rscsi_disks[i].sector_size = (buffer[4] << 24) |
1108 (buffer[5] << 16) | (buffer[6] << 8) | buffer[7];
1109
1110 if (rscsi_disks[i].sector_size != 512 &&
1111 rscsi_disks[i].sector_size != 1024 &&
1112 rscsi_disks[i].sector_size != 256)
1113 {
1114 printk ("sd%c : unsupported sector size %d.\n",
1115 'a' + i, rscsi_disks[i].sector_size);
1116 if(rscsi_disks[i].device->removable){
1117 rscsi_disks[i].capacity = 0;
1118 } else {
1119 printk ("scsi : deleting disk entry.\n");
1120 rscsi_disks[i].device = NULL;
1121 sd_template.nr_dev--;
1122 return i;
1123 };
1124 }
1125 {
1126
1127
1128
1129
1130
1131 int m;
1132 int hard_sector = rscsi_disks[i].sector_size;
1133
1134 for (m=i<<4; m<((i+1)<<4); m++){
1135 sd_hardsizes[m] = hard_sector;
1136 }
1137 printk ("SCSI Hardware sector size is %d bytes on device sd%c\n",
1138 hard_sector,i+'a');
1139 }
1140 if(rscsi_disks[i].sector_size == 1024)
1141 rscsi_disks[i].capacity <<= 1;
1142 if(rscsi_disks[i].sector_size == 256)
1143 rscsi_disks[i].capacity >>= 1;
1144 }
1145
1146 rscsi_disks[i].ten = 1;
1147 rscsi_disks[i].remap = 1;
1148 scsi_free(buffer, 512);
1149 return i;
1150 }
1151
1152
1153
1154
1155
1156
1157 static int sd_registered = 0;
1158
1159 static int sd_init()
1160 {
1161 int i;
1162
1163 if (sd_template.dev_noticed == 0) return 0;
1164
1165 if(!sd_registered) {
1166 if (register_blkdev(MAJOR_NR,"sd",&sd_fops)) {
1167 printk("Unable to get major %d for SCSI disk\n",MAJOR_NR);
1168 return 1;
1169 }
1170 sd_registered++;
1171 }
1172
1173
1174 if(rscsi_disks) return 0;
1175
1176 sd_template.dev_max = sd_template.dev_noticed + SD_EXTRA_DEVS;
1177
1178 rscsi_disks = (Scsi_Disk *)
1179 scsi_init_malloc(sd_template.dev_max * sizeof(Scsi_Disk), GFP_ATOMIC);
1180 memset(rscsi_disks, 0, sd_template.dev_max * sizeof(Scsi_Disk));
1181
1182 sd_sizes = (int *) scsi_init_malloc((sd_template.dev_max << 4) *
1183 sizeof(int), GFP_ATOMIC);
1184 memset(sd_sizes, 0, (sd_template.dev_max << 4) * sizeof(int));
1185
1186 sd_blocksizes = (int *) scsi_init_malloc((sd_template.dev_max << 4) *
1187 sizeof(int), GFP_ATOMIC);
1188
1189 sd_hardsizes = (int *) scsi_init_malloc((sd_template.dev_max << 4) *
1190 sizeof(int), GFP_ATOMIC);
1191
1192 for(i=0;i<(sd_template.dev_max << 4);i++){
1193 sd_blocksizes[i] = 1024;
1194 sd_hardsizes[i] = 512;
1195 }
1196 blksize_size[MAJOR_NR] = sd_blocksizes;
1197 hardsect_size[MAJOR_NR] = sd_hardsizes;
1198 sd = (struct hd_struct *) scsi_init_malloc((sd_template.dev_max << 4) *
1199 sizeof(struct hd_struct),
1200 GFP_ATOMIC);
1201
1202
1203 sd_gendisk.max_nr = sd_template.dev_max;
1204 sd_gendisk.part = sd;
1205 sd_gendisk.sizes = sd_sizes;
1206 sd_gendisk.real_devices = (void *) rscsi_disks;
1207 return 0;
1208 }
1209
1210 static void sd_finish()
1211 {
1212 int i;
1213
1214 blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST;
1215
1216 sd_gendisk.next = gendisk_head;
1217 gendisk_head = &sd_gendisk;
1218
1219 for (i = 0; i < sd_template.dev_max; ++i)
1220 if (!rscsi_disks[i].capacity &&
1221 rscsi_disks[i].device)
1222 {
1223 i = sd_init_onedisk(i);
1224 if (MODULE_FLAG
1225 && !rscsi_disks[i].has_part_table) {
1226 sd_sizes[i << 4] = rscsi_disks[i].capacity;
1227 revalidate_scsidisk(i << 4, 0);
1228 }
1229 rscsi_disks[i].has_part_table = 1;
1230 }
1231
1232
1233
1234
1235
1236 if(rscsi_disks[0].device && rscsi_disks[0].device->host->sg_tablesize)
1237 read_ahead[MAJOR_NR] = 120;
1238 else
1239 read_ahead[MAJOR_NR] = 4;
1240
1241 return;
1242 }
1243
1244 static int sd_detect(Scsi_Device * SDp){
1245 if(SDp->type != TYPE_DISK && SDp->type != TYPE_MOD) return 0;
1246
1247 printk("Detected scsi disk sd%c at scsi%d, channel %d, id %d, lun %d\n",
1248 'a'+ (sd_template.dev_noticed++),
1249 SDp->host->host_no, SDp->channel, SDp->id, SDp->lun);
1250
1251 return 1;
1252 }
1253
1254 static int sd_attach(Scsi_Device * SDp){
1255 Scsi_Disk * dpnt;
1256 int i;
1257
1258 if(SDp->type != TYPE_DISK && SDp->type != TYPE_MOD) return 0;
1259
1260 if(sd_template.nr_dev >= sd_template.dev_max) {
1261 SDp->attached--;
1262 return 1;
1263 }
1264
1265 for(dpnt = rscsi_disks, i=0; i<sd_template.dev_max; i++, dpnt++)
1266 if(!dpnt->device) break;
1267
1268 if(i >= sd_template.dev_max) panic ("scsi_devices corrupt (sd)");
1269
1270 SDp->scsi_request_fn = do_sd_request;
1271 rscsi_disks[i].device = SDp;
1272 rscsi_disks[i].has_part_table = 0;
1273 sd_template.nr_dev++;
1274 sd_gendisk.nr_real++;
1275 return 0;
1276 }
1277
1278 #define DEVICE_BUSY rscsi_disks[target].device->busy
1279 #define USAGE rscsi_disks[target].device->access_count
1280 #define CAPACITY rscsi_disks[target].capacity
1281 #define MAYBE_REINIT sd_init_onedisk(target)
1282 #define GENDISK_STRUCT sd_gendisk
1283
1284
1285
1286
1287
1288
1289
1290
1291 int revalidate_scsidisk(int dev, int maxusage){
1292 int target, major;
1293 struct gendisk * gdev;
1294 unsigned long flags;
1295 int max_p;
1296 int start;
1297 int i;
1298
1299 target = DEVICE_NR(MINOR(dev));
1300 gdev = &GENDISK_STRUCT;
1301
1302 save_flags(flags);
1303 cli();
1304 if (DEVICE_BUSY || USAGE > maxusage) {
1305 restore_flags(flags);
1306 printk("Device busy for revalidation (usage=%d)\n", USAGE);
1307 return -EBUSY;
1308 };
1309 DEVICE_BUSY = 1;
1310 restore_flags(flags);
1311
1312 max_p = gdev->max_p;
1313 start = target << gdev->minor_shift;
1314 major = MAJOR_NR << 8;
1315
1316 for (i=max_p - 1; i >=0 ; i--) {
1317 sync_dev(major | start | i);
1318 invalidate_inodes(major | start | i);
1319 invalidate_buffers(major | start | i);
1320 gdev->part[start+i].start_sect = 0;
1321 gdev->part[start+i].nr_sects = 0;
1322
1323
1324
1325
1326 blksize_size[MAJOR_NR][i] = 1024;
1327 };
1328
1329 #ifdef MAYBE_REINIT
1330 MAYBE_REINIT;
1331 #endif
1332
1333 gdev->part[start].nr_sects = CAPACITY;
1334 resetup_one_dev(gdev, target);
1335
1336 DEVICE_BUSY = 0;
1337 return 0;
1338 }
1339
1340 static int fop_revalidate_scsidisk(dev_t dev){
1341 return revalidate_scsidisk(dev, 0);
1342 }
1343
1344
1345 static void sd_detach(Scsi_Device * SDp)
1346 {
1347 Scsi_Disk * dpnt;
1348 int i;
1349 int max_p;
1350 int major;
1351 int start;
1352
1353 for(dpnt = rscsi_disks, i=0; i<sd_template.dev_max; i++, dpnt++)
1354 if(dpnt->device == SDp) {
1355
1356
1357
1358 max_p = sd_gendisk.max_p;
1359 start = i << sd_gendisk.minor_shift;
1360 major = MAJOR_NR << 8;
1361
1362 for (i=max_p - 1; i >=0 ; i--) {
1363 sync_dev(major | start | i);
1364 invalidate_inodes(major | start | i);
1365 invalidate_buffers(major | start | i);
1366 sd_gendisk.part[start+i].start_sect = 0;
1367 sd_gendisk.part[start+i].nr_sects = 0;
1368 sd_sizes[start+i] = 0;
1369 };
1370
1371 dpnt->has_part_table = 0;
1372 dpnt->device = NULL;
1373 dpnt->capacity = 0;
1374 SDp->attached--;
1375 sd_template.dev_noticed--;
1376 sd_template.nr_dev--;
1377 sd_gendisk.nr_real--;
1378 return;
1379 }
1380 return;
1381 }
1382
1383 #ifdef MODULE
1384 #include <linux/module.h>
1385 #include <linux/version.h>
1386
1387 char kernel_version[] = UTS_RELEASE;
1388
1389 int init_module(void) {
1390 sd_template.usage_count = &mod_use_count_;
1391 return scsi_register_module(MODULE_SCSI_DEV, &sd_template);
1392 }
1393
1394 void cleanup_module( void)
1395 {
1396 struct gendisk * prev_sdgd;
1397 struct gendisk * sdgd;
1398
1399 if (MOD_IN_USE) {
1400 printk(KERN_INFO __FILE__ ": module is in use, remove rejected\n");
1401 return;
1402 }
1403 scsi_unregister_module(MODULE_SCSI_DEV, &sd_template);
1404 unregister_blkdev(SCSI_DISK_MAJOR, "sd");
1405 sd_registered--;
1406 if( rscsi_disks != NULL )
1407 {
1408 scsi_init_free((char *) rscsi_disks,
1409 (sd_template.dev_noticed + SD_EXTRA_DEVS)
1410 * sizeof(Scsi_Disk));
1411
1412 scsi_init_free((char *) sd_sizes, sd_template.dev_max * sizeof(int));
1413 scsi_init_free((char *) sd_blocksizes, sd_template.dev_max * sizeof(int));
1414 scsi_init_free((char *) sd_hardsizes, sd_template.dev_max * sizeof(int));
1415 scsi_init_free((char *) sd,
1416 (sd_template.dev_max << 4) * sizeof(struct hd_struct));
1417
1418
1419
1420 sdgd = gendisk_head;
1421 prev_sdgd = NULL;
1422 while(sdgd != &sd_gendisk)
1423 {
1424 prev_sdgd = sdgd;
1425 sdgd = sdgd->next;
1426 }
1427
1428 if(sdgd != &sd_gendisk)
1429 printk("sd_gendisk not in disk chain.\n");
1430 else {
1431 if(prev_sdgd != NULL)
1432 prev_sdgd->next = sdgd->next;
1433 else
1434 gendisk_head = sdgd->next;
1435 }
1436 }
1437
1438 blksize_size[MAJOR_NR] = NULL;
1439 blk_dev[MAJOR_NR].request_fn = NULL;
1440 blk_size[MAJOR_NR] = NULL;
1441 hardsect_size[MAJOR_NR] = NULL;
1442 read_ahead[MAJOR_NR] = 0;
1443 sd_template.dev_max = 0;
1444 }
1445 #endif
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464