This source file includes following definitions.
- sd_open
- sd_release
- sd_geninit
- rw_intr
- do_sd_request
- requeue_sd_request
- check_scsidisk_media_change
- sd_init_done
- sd_init_onedisk
- sd_init
- sd_finish
- sd_detect
- sd_attach
- revalidate_scsidisk
- fop_revalidate_scsidisk
- sd_detach
- init_module
- cleanup_module
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19 #ifdef MODULE
20 #include <linux/autoconf.h>
21 #include <linux/module.h>
22 #include <linux/version.h>
23
24
25
26
27
28 #define MODULE_FLAG 1
29 #else
30 #define MODULE_FLAG scsi_loadable_module_flag
31 #endif
32
33 #include <linux/fs.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/mm.h>
37 #include <linux/string.h>
38 #include <linux/errno.h>
39 #include <asm/system.h>
40
41 #define MAJOR_NR SCSI_DISK_MAJOR
42 #include "../block/blk.h"
43 #include "scsi.h"
44 #include "hosts.h"
45 #include "sd.h"
46 #include "scsi_ioctl.h"
47 #include "constants.h"
48
49 #include <linux/genhd.h>
50
51
52
53
54
55 #define MAX_RETRIES 5
56
57
58
59
60
61 #define SD_TIMEOUT (7 * HZ)
62 #define SD_MOD_TIMEOUT (8 * HZ)
63
64 #define CLUSTERABLE_DEVICE(SC) (SC->host->use_clustering && \
65 SC->device->type != TYPE_MOD)
66
67 struct hd_struct * sd;
68 int revalidate_scsidisk(int dev, int maxusage);
69
70 Scsi_Disk * rscsi_disks = NULL;
71 static int * sd_sizes;
72 static int * sd_blocksizes;
73 static int * sd_hardsizes;
74
75 extern int sd_ioctl(struct inode *, struct file *, unsigned int, unsigned long);
76
77 static int check_scsidisk_media_change(dev_t);
78 static int fop_revalidate_scsidisk(dev_t);
79
80 static sd_init_onedisk(int);
81
82 static void requeue_sd_request (Scsi_Cmnd * SCpnt);
83
84 static void sd_init(void);
85 static void sd_finish(void);
86 static int sd_attach(Scsi_Device *);
87 static int sd_detect(Scsi_Device *);
88 static void sd_detach(Scsi_Device *);
89
90 struct Scsi_Device_Template sd_template =
91 { NULL, "disk", "sd", NULL, TYPE_DISK,
92 SCSI_DISK_MAJOR, 0, 0, 0, 1,
93 sd_detect, sd_init,
94 sd_finish, sd_attach, sd_detach
95 };
96
97 static int sd_open(struct inode * inode, struct file * filp)
98 {
99 int target;
100 target = DEVICE_NR(MINOR(inode->i_rdev));
101
102 if(target >= sd_template.dev_max || !rscsi_disks[target].device)
103 return -ENXIO;
104
105
106
107
108
109
110
111 while (rscsi_disks[target].device->busy)
112 barrier();
113 if(rscsi_disks[target].device->removable) {
114 check_disk_change(inode->i_rdev);
115
116 if(!rscsi_disks[target].device->access_count)
117 sd_ioctl(inode, NULL, SCSI_IOCTL_DOORLOCK, 0);
118 };
119
120
121
122
123
124 if(sd_sizes[MINOR(inode->i_rdev)] == 0)
125 return -ENXIO;
126
127 rscsi_disks[target].device->access_count++;
128 if (rscsi_disks[target].device->host->hostt->usage_count)
129 (*rscsi_disks[target].device->host->hostt->usage_count)++;
130 if(sd_template.usage_count) (*sd_template.usage_count)++;
131 return 0;
132 }
133
134 static void sd_release(struct inode * inode, struct file * file)
135 {
136 int target;
137 sync_dev(inode->i_rdev);
138
139 target = DEVICE_NR(MINOR(inode->i_rdev));
140
141 rscsi_disks[target].device->access_count--;
142 if (rscsi_disks[target].device->host->hostt->usage_count)
143 (*rscsi_disks[target].device->host->hostt->usage_count)--;
144 if(sd_template.usage_count) (*sd_template.usage_count)--;
145
146 if(rscsi_disks[target].device->removable) {
147 if(!rscsi_disks[target].device->access_count)
148 sd_ioctl(inode, NULL, SCSI_IOCTL_DOORUNLOCK, 0);
149 }
150 }
151
152 static void sd_geninit(void);
153
154 static struct file_operations sd_fops = {
155 NULL,
156 block_read,
157 block_write,
158 NULL,
159 NULL,
160 sd_ioctl,
161 NULL,
162 sd_open,
163 sd_release,
164 block_fsync,
165 NULL,
166 check_scsidisk_media_change,
167 fop_revalidate_scsidisk
168 };
169
170 static struct gendisk sd_gendisk = {
171 MAJOR_NR,
172 "sd",
173 4,
174 1 << 4,
175 0,
176 sd_geninit,
177 NULL,
178 NULL,
179 0,
180 NULL,
181 NULL
182 };
183
184 static void sd_geninit (void)
185 {
186 int i;
187
188 for (i = 0; i < sd_template.dev_max; ++i)
189 if(rscsi_disks[i].device)
190 sd[i << 4].nr_sects = rscsi_disks[i].capacity;
191 #if 0
192
193 sd_gendisk.nr_real = sd_template.dev_max;
194 #endif
195 }
196
197
198
199
200
201
202
203 static void rw_intr (Scsi_Cmnd *SCpnt)
204 {
205 int result = SCpnt->result;
206 int this_count = SCpnt->bufflen >> 9;
207
208 #ifdef DEBUG
209 printk("sd%c : rw_intr(%d, %d)\n", 'a' + MINOR(SCpnt->request.dev),
210 SCpnt->host->host_no, result);
211 #endif
212
213
214
215
216
217
218
219 if (!result) {
220
221 #ifdef DEBUG
222 printk("sd%c : %d sectors remain.\n", 'a' + MINOR(SCpnt->request.dev),
223 SCpnt->request.nr_sectors);
224 printk("use_sg is %d\n ",SCpnt->use_sg);
225 #endif
226 if (SCpnt->use_sg) {
227 struct scatterlist * sgpnt;
228 int i;
229 sgpnt = (struct scatterlist *) SCpnt->buffer;
230 for(i=0; i<SCpnt->use_sg; i++) {
231 #ifdef DEBUG
232 printk(":%x %x %d\n",sgpnt[i].alt_address, sgpnt[i].address,
233 sgpnt[i].length);
234 #endif
235 if (sgpnt[i].alt_address) {
236 if (SCpnt->request.cmd == READ)
237 memcpy(sgpnt[i].alt_address, sgpnt[i].address,
238 sgpnt[i].length);
239 scsi_free(sgpnt[i].address, sgpnt[i].length);
240 };
241 };
242
243
244 scsi_free(SCpnt->buffer, SCpnt->sglist_len);
245 } else {
246 if (SCpnt->buffer != SCpnt->request.buffer) {
247 #ifdef DEBUG
248 printk("nosg: %x %x %d\n",SCpnt->request.buffer, SCpnt->buffer,
249 SCpnt->bufflen);
250 #endif
251 if (SCpnt->request.cmd == READ)
252 memcpy(SCpnt->request.buffer, SCpnt->buffer,
253 SCpnt->bufflen);
254 scsi_free(SCpnt->buffer, SCpnt->bufflen);
255 };
256 };
257
258
259
260
261
262 if (SCpnt->request.nr_sectors > this_count)
263 {
264 SCpnt->request.errors = 0;
265
266 if (!SCpnt->request.bh)
267 {
268 #ifdef DEBUG
269 printk("sd%c : handling page request, no buffer\n",
270 'a' + MINOR(SCpnt->request.dev));
271 #endif
272
273
274
275
276 panic("sd.c: linked page request (%lx %x)",
277 SCpnt->request.sector, this_count);
278 }
279 }
280 SCpnt = end_scsi_request(SCpnt, 1, this_count);
281 requeue_sd_request(SCpnt);
282 return;
283 }
284
285
286 if (SCpnt->use_sg) {
287 struct scatterlist * sgpnt;
288 int i;
289 sgpnt = (struct scatterlist *) SCpnt->buffer;
290 for(i=0; i<SCpnt->use_sg; i++) {
291 #ifdef DEBUG
292 printk("err: %x %x %d\n",SCpnt->request.buffer, SCpnt->buffer,
293 SCpnt->bufflen);
294 #endif
295 if (sgpnt[i].alt_address) {
296 scsi_free(sgpnt[i].address, sgpnt[i].length);
297 };
298 };
299 scsi_free(SCpnt->buffer, SCpnt->sglist_len);
300 } else {
301 #ifdef DEBUG
302 printk("nosgerr: %x %x %d\n",SCpnt->request.buffer, SCpnt->buffer,
303 SCpnt->bufflen);
304 #endif
305 if (SCpnt->buffer != SCpnt->request.buffer)
306 scsi_free(SCpnt->buffer, SCpnt->bufflen);
307 };
308
309
310
311
312
313
314
315 if (driver_byte(result) != 0) {
316 if (suggestion(result) == SUGGEST_REMAP) {
317 #ifdef REMAP
318
319
320
321
322 if rscsi_disks[DEVICE_NR(SCpnt->request.dev)].remap
323 {
324 result = 0;
325 }
326 else
327 #endif
328 }
329
330 if ((SCpnt->sense_buffer[0] & 0x7f) == 0x70) {
331 if ((SCpnt->sense_buffer[2] & 0xf) == UNIT_ATTENTION) {
332 if(rscsi_disks[DEVICE_NR(SCpnt->request.dev)].device->removable) {
333
334
335
336 rscsi_disks[DEVICE_NR(SCpnt->request.dev)].device->changed = 1;
337 SCpnt = end_scsi_request(SCpnt, 0, this_count);
338 requeue_sd_request(SCpnt);
339 return;
340 }
341 }
342 }
343
344
345
346
347
348
349
350
351
352 if (SCpnt->sense_buffer[2] == ILLEGAL_REQUEST) {
353 if (rscsi_disks[DEVICE_NR(SCpnt->request.dev)].ten) {
354 rscsi_disks[DEVICE_NR(SCpnt->request.dev)].ten = 0;
355 requeue_sd_request(SCpnt);
356 result = 0;
357 } else {
358
359 }
360 }
361 }
362 if (result) {
363 printk("SCSI disk error : host %d channel %d id %d lun %d return code = %x\n",
364 rscsi_disks[DEVICE_NR(SCpnt->request.dev)].device->host->host_no,
365 rscsi_disks[DEVICE_NR(SCpnt->request.dev)].device->channel,
366 rscsi_disks[DEVICE_NR(SCpnt->request.dev)].device->id,
367 rscsi_disks[DEVICE_NR(SCpnt->request.dev)].device->lun, result);
368
369 if (driver_byte(result) & DRIVER_SENSE)
370 print_sense("sd", SCpnt);
371 SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.current_nr_sectors);
372 requeue_sd_request(SCpnt);
373 return;
374 }
375 }
376
377
378
379
380
381
382
383 static void do_sd_request (void)
384 {
385 Scsi_Cmnd * SCpnt = NULL;
386 struct request * req = NULL;
387 unsigned long flags;
388 int flag = 0;
389
390 save_flags(flags);
391 while (1==1){
392 cli();
393 if (CURRENT != NULL && CURRENT->dev == -1) {
394 restore_flags(flags);
395 return;
396 };
397
398 INIT_SCSI_REQUEST;
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413 if (flag++ == 0)
414 SCpnt = allocate_device(&CURRENT,
415 rscsi_disks[DEVICE_NR(MINOR(CURRENT->dev))].device, 0);
416 else SCpnt = NULL;
417
418
419
420
421
422
423 restore_flags(flags);
424
425
426
427
428
429
430
431
432
433
434 if (!SCpnt && sd_template.nr_dev > 1){
435 struct request *req1;
436 req1 = NULL;
437 cli();
438 req = CURRENT;
439 while(req){
440 SCpnt = request_queueable(req, rscsi_disks[DEVICE_NR(MINOR(req->dev))].device);
441 if(SCpnt) break;
442 req1 = req;
443 req = req->next;
444 };
445 if (SCpnt && req->dev == -1) {
446 if (req == CURRENT)
447 CURRENT = CURRENT->next;
448 else
449 req1->next = req->next;
450 };
451 restore_flags(flags);
452 };
453
454 if (!SCpnt) return;
455
456
457 requeue_sd_request(SCpnt);
458 };
459 }
460
461 static void requeue_sd_request (Scsi_Cmnd * SCpnt)
462 {
463 int dev, block, this_count;
464 unsigned char cmd[10];
465 int bounce_size, contiguous;
466 int max_sg;
467 struct buffer_head * bh, *bhp;
468 char * buff, *bounce_buffer;
469
470 repeat:
471
472 if(!SCpnt || SCpnt->request.dev <= 0) {
473 do_sd_request();
474 return;
475 }
476
477 dev = MINOR(SCpnt->request.dev);
478 block = SCpnt->request.sector;
479 this_count = 0;
480
481 #ifdef DEBUG
482 printk("Doing sd request, dev = %d, block = %d\n", dev, block);
483 #endif
484
485 if (dev >= (sd_template.dev_max << 4) ||
486 !rscsi_disks[DEVICE_NR(dev)].device ||
487 block + SCpnt->request.nr_sectors > sd[dev].nr_sects)
488 {
489 SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
490 goto repeat;
491 }
492
493 block += sd[dev].start_sect;
494 dev = DEVICE_NR(dev);
495
496 if (rscsi_disks[dev].device->changed)
497 {
498
499
500
501
502
503 SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
504 goto repeat;
505 }
506
507 #ifdef DEBUG
508 printk("sd%c : real dev = /dev/sd%c, block = %d\n",
509 'a' + MINOR(SCpnt->request.dev), dev, block);
510 #endif
511
512
513
514
515
516
517
518
519
520
521
522
523 if (rscsi_disks[dev].sector_size == 1024)
524 if((block & 1) || (SCpnt->request.nr_sectors & 1)) {
525 printk("sd.c:Bad block number requested");
526 SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
527 goto repeat;
528 }
529
530 switch (SCpnt->request.cmd)
531 {
532 case WRITE :
533 if (!rscsi_disks[dev].device->writeable)
534 {
535 SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
536 goto repeat;
537 }
538 cmd[0] = WRITE_6;
539 break;
540 case READ :
541 cmd[0] = READ_6;
542 break;
543 default :
544 panic ("Unknown sd command %d\n", SCpnt->request.cmd);
545 }
546
547 SCpnt->this_count = 0;
548
549
550
551
552 contiguous = (!CLUSTERABLE_DEVICE(SCpnt) ? 0 :1);
553 bounce_buffer = NULL;
554 bounce_size = (SCpnt->request.nr_sectors << 9);
555
556
557
558
559
560 if (contiguous && SCpnt->request.bh &&
561 ((long) SCpnt->request.bh->b_data)
562 + (SCpnt->request.nr_sectors << 9) - 1 > ISA_DMA_THRESHOLD
563 && SCpnt->host->unchecked_isa_dma) {
564 if(((long) SCpnt->request.bh->b_data) > ISA_DMA_THRESHOLD)
565 bounce_buffer = (char *) scsi_malloc(bounce_size);
566 if(!bounce_buffer) contiguous = 0;
567 };
568
569 if(contiguous && SCpnt->request.bh && SCpnt->request.bh->b_reqnext)
570 for(bh = SCpnt->request.bh, bhp = bh->b_reqnext; bhp; bh = bhp,
571 bhp = bhp->b_reqnext) {
572 if(!CONTIGUOUS_BUFFERS(bh,bhp)) {
573 if(bounce_buffer) scsi_free(bounce_buffer, bounce_size);
574 contiguous = 0;
575 break;
576 }
577 };
578 if (!SCpnt->request.bh || contiguous) {
579
580
581 this_count = SCpnt->request.nr_sectors;
582 buff = SCpnt->request.buffer;
583 SCpnt->use_sg = 0;
584
585 } else if (SCpnt->host->sg_tablesize == 0 ||
586 (need_isa_buffer && dma_free_sectors <= 10)) {
587
588
589
590
591
592
593
594
595 if (SCpnt->host->sg_tablesize != 0 &&
596 need_isa_buffer &&
597 dma_free_sectors <= 10)
598 printk("Warning: SCSI DMA buffer space running low. Using non scatter-gather I/O.\n");
599
600 this_count = SCpnt->request.current_nr_sectors;
601 buff = SCpnt->request.buffer;
602 SCpnt->use_sg = 0;
603
604 } else {
605
606
607 struct scatterlist * sgpnt;
608 int count, this_count_max;
609 int counted;
610
611 bh = SCpnt->request.bh;
612 this_count = 0;
613 this_count_max = (rscsi_disks[dev].ten ? 0xffff : 0xff);
614 count = 0;
615 bhp = NULL;
616 while(bh) {
617 if ((this_count + (bh->b_size >> 9)) > this_count_max) break;
618 if(!bhp || !CONTIGUOUS_BUFFERS(bhp,bh) ||
619 !CLUSTERABLE_DEVICE(SCpnt) ||
620 (SCpnt->host->unchecked_isa_dma &&
621 ((unsigned long) bh->b_data-1) == ISA_DMA_THRESHOLD)) {
622 if (count < SCpnt->host->sg_tablesize) count++;
623 else break;
624 };
625 this_count += (bh->b_size >> 9);
626 bhp = bh;
627 bh = bh->b_reqnext;
628 };
629 #if 0
630 if(SCpnt->host->unchecked_isa_dma &&
631 ((unsigned int) SCpnt->request.bh->b_data-1) == ISA_DMA_THRESHOLD) count--;
632 #endif
633 SCpnt->use_sg = count;
634 count = 512;
635 while( count < (SCpnt->use_sg * sizeof(struct scatterlist)))
636 count = count << 1;
637 SCpnt->sglist_len = count;
638 max_sg = count / sizeof(struct scatterlist);
639 if(SCpnt->host->sg_tablesize < max_sg)
640 max_sg = SCpnt->host->sg_tablesize;
641 sgpnt = (struct scatterlist * ) scsi_malloc(count);
642 if (!sgpnt) {
643 printk("Warning - running *really* short on DMA buffers\n");
644 SCpnt->use_sg = 0;
645 this_count = SCpnt->request.current_nr_sectors;
646 buff = SCpnt->request.buffer;
647 } else {
648 memset(sgpnt, 0, count);
649
650
651 buff = (char *) sgpnt;
652 counted = 0;
653 for(count = 0, bh = SCpnt->request.bh, bhp = bh->b_reqnext;
654 count < SCpnt->use_sg && bh;
655 count++, bh = bhp) {
656
657 bhp = bh->b_reqnext;
658
659 if(!sgpnt[count].address) sgpnt[count].address = bh->b_data;
660 sgpnt[count].length += bh->b_size;
661 counted += bh->b_size >> 9;
662
663 if (((long) sgpnt[count].address) + sgpnt[count].length - 1 >
664 ISA_DMA_THRESHOLD && (SCpnt->host->unchecked_isa_dma) &&
665 !sgpnt[count].alt_address) {
666 sgpnt[count].alt_address = sgpnt[count].address;
667
668
669
670
671 if(dma_free_sectors < (sgpnt[count].length >> 9) + 10) {
672 sgpnt[count].address = NULL;
673 } else {
674 sgpnt[count].address =
675 (char *) scsi_malloc(sgpnt[count].length);
676 };
677
678
679
680
681
682 if(sgpnt[count].address == NULL){
683 #if 0
684 printk("Warning: Running low on SCSI DMA buffers");
685
686 while(--count >= 0){
687 if(sgpnt[count].alt_address)
688 scsi_free(sgpnt[count].address,
689 sgpnt[count].length);
690 };
691 this_count = SCpnt->request.current_nr_sectors;
692 buff = SCpnt->request.buffer;
693 SCpnt->use_sg = 0;
694 scsi_free(sgpnt, SCpnt->sglist_len);
695 #endif
696 SCpnt->use_sg = count;
697 this_count = counted -= bh->b_size >> 9;
698 break;
699 };
700
701 };
702
703
704
705
706
707 if(bhp && CONTIGUOUS_BUFFERS(bh,bhp)
708 && CLUSTERABLE_DEVICE(SCpnt)) {
709 char * tmp;
710
711 if (((long) sgpnt[count].address) + sgpnt[count].length +
712 bhp->b_size - 1 > ISA_DMA_THRESHOLD &&
713 (SCpnt->host->unchecked_isa_dma) &&
714 !sgpnt[count].alt_address) continue;
715
716 if(!sgpnt[count].alt_address) {count--; continue; }
717 if(dma_free_sectors > 10)
718 tmp = (char *) scsi_malloc(sgpnt[count].length
719 + bhp->b_size);
720 else {
721 tmp = NULL;
722 max_sg = SCpnt->use_sg;
723 };
724 if(tmp){
725 scsi_free(sgpnt[count].address, sgpnt[count].length);
726 sgpnt[count].address = tmp;
727 count--;
728 continue;
729 };
730
731
732
733
734
735 if (SCpnt->use_sg < max_sg) SCpnt->use_sg++;
736 };
737 };
738
739
740 this_count = counted;
741
742 if(count < SCpnt->use_sg || SCpnt->use_sg
743 > SCpnt->host->sg_tablesize){
744 bh = SCpnt->request.bh;
745 printk("Use sg, count %d %x %d\n",
746 SCpnt->use_sg, count, dma_free_sectors);
747 printk("maxsg = %x, counted = %d this_count = %d\n",
748 max_sg, counted, this_count);
749 while(bh){
750 printk("[%p %lx] ", bh->b_data, bh->b_size);
751 bh = bh->b_reqnext;
752 };
753 if(SCpnt->use_sg < 16)
754 for(count=0; count<SCpnt->use_sg; count++)
755 printk("{%d:%p %p %d} ", count,
756 sgpnt[count].address,
757 sgpnt[count].alt_address,
758 sgpnt[count].length);
759 panic("Ooops");
760 };
761
762 if (SCpnt->request.cmd == WRITE)
763 for(count=0; count<SCpnt->use_sg; count++)
764 if(sgpnt[count].alt_address)
765 memcpy(sgpnt[count].address, sgpnt[count].alt_address,
766 sgpnt[count].length);
767 };
768 };
769
770
771
772 if(SCpnt->use_sg == 0){
773 if (((long) buff) + (this_count << 9) - 1 > ISA_DMA_THRESHOLD &&
774 (SCpnt->host->unchecked_isa_dma)) {
775 if(bounce_buffer)
776 buff = bounce_buffer;
777 else
778 buff = (char *) scsi_malloc(this_count << 9);
779 if(buff == NULL) {
780 this_count = SCpnt->request.current_nr_sectors;
781 buff = (char *) scsi_malloc(this_count << 9);
782 if(!buff) panic("Ran out of DMA buffers.");
783 };
784 if (SCpnt->request.cmd == WRITE)
785 memcpy(buff, (char *)SCpnt->request.buffer, this_count << 9);
786 };
787 };
788 #ifdef DEBUG
789 printk("sd%c : %s %d/%d 512 byte blocks.\n",
790 'a' + MINOR(SCpnt->request.dev),
791 (SCpnt->request.cmd == WRITE) ? "writing" : "reading",
792 this_count, SCpnt->request.nr_sectors);
793 #endif
794
795 cmd[1] = (SCpnt->lun << 5) & 0xe0;
796
797 if (rscsi_disks[dev].sector_size == 1024){
798 if(block & 1) panic("sd.c:Bad block number requested");
799 if(this_count & 1) panic("sd.c:Bad block number requested");
800 block = block >> 1;
801 this_count = this_count >> 1;
802 };
803
804 if (rscsi_disks[dev].sector_size == 256){
805 block = block << 1;
806 this_count = this_count << 1;
807 };
808
809 if (((this_count > 0xff) || (block > 0x1fffff)) && rscsi_disks[dev].ten)
810 {
811 if (this_count > 0xffff)
812 this_count = 0xffff;
813
814 cmd[0] += READ_10 - READ_6 ;
815 cmd[2] = (unsigned char) (block >> 24) & 0xff;
816 cmd[3] = (unsigned char) (block >> 16) & 0xff;
817 cmd[4] = (unsigned char) (block >> 8) & 0xff;
818 cmd[5] = (unsigned char) block & 0xff;
819 cmd[6] = cmd[9] = 0;
820 cmd[7] = (unsigned char) (this_count >> 8) & 0xff;
821 cmd[8] = (unsigned char) this_count & 0xff;
822 }
823 else
824 {
825 if (this_count > 0xff)
826 this_count = 0xff;
827
828 cmd[1] |= (unsigned char) ((block >> 16) & 0x1f);
829 cmd[2] = (unsigned char) ((block >> 8) & 0xff);
830 cmd[3] = (unsigned char) block & 0xff;
831 cmd[4] = (unsigned char) this_count;
832 cmd[5] = 0;
833 }
834
835
836
837
838
839
840
841 SCpnt->transfersize = rscsi_disks[dev].sector_size;
842 SCpnt->underflow = this_count << 9;
843 scsi_do_cmd (SCpnt, (void *) cmd, buff,
844 this_count * rscsi_disks[dev].sector_size,
845 rw_intr,
846 (SCpnt->device->type == TYPE_DISK ?
847 SD_TIMEOUT : SD_MOD_TIMEOUT),
848 MAX_RETRIES);
849 }
850
851 static int check_scsidisk_media_change(dev_t full_dev){
852 int retval;
853 int target;
854 struct inode inode;
855 int flag = 0;
856
857 target = DEVICE_NR(MINOR(full_dev));
858
859 if (target >= sd_template.dev_max ||
860 !rscsi_disks[target].device) {
861 printk("SCSI disk request error: invalid device.\n");
862 return 0;
863 };
864
865 if(!rscsi_disks[target].device->removable) return 0;
866
867 inode.i_rdev = full_dev;
868 retval = sd_ioctl(&inode, NULL, SCSI_IOCTL_TEST_UNIT_READY, 0);
869
870 if(retval){
871
872
873
874
875 rscsi_disks[target].device->changed = 1;
876 return 1;
877
878 };
879
880 retval = rscsi_disks[target].device->changed;
881 if(!flag) rscsi_disks[target].device->changed = 0;
882 return retval;
883 }
884
885 static void sd_init_done (Scsi_Cmnd * SCpnt)
886 {
887 struct request * req;
888
889 req = &SCpnt->request;
890 req->dev = 0xfffe;
891
892 if (req->sem != NULL) {
893 up(req->sem);
894 }
895 }
896
897 static int sd_init_onedisk(int i)
898 {
899 unsigned char cmd[10];
900 unsigned char *buffer;
901 unsigned long spintime;
902 int the_result, retries;
903 Scsi_Cmnd * SCpnt;
904
905
906
907
908
909
910 SCpnt = allocate_device(NULL, rscsi_disks[i].device, 1);
911 buffer = (unsigned char *) scsi_malloc(512);
912
913 spintime = 0;
914
915
916 if (current->pid == 0){
917 do{
918 retries = 0;
919 while(retries < 3)
920 {
921 cmd[0] = TEST_UNIT_READY;
922 cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0;
923 memset ((void *) &cmd[2], 0, 8);
924 SCpnt->request.dev = 0xffff;
925 SCpnt->cmd_len = 0;
926 SCpnt->sense_buffer[0] = 0;
927 SCpnt->sense_buffer[2] = 0;
928
929 scsi_do_cmd (SCpnt,
930 (void *) cmd, (void *) buffer,
931 512, sd_init_done, SD_TIMEOUT,
932 MAX_RETRIES);
933
934 while(SCpnt->request.dev != 0xfffe) barrier();
935
936 the_result = SCpnt->result;
937 retries++;
938 if( the_result == 0
939 || SCpnt->sense_buffer[2] != UNIT_ATTENTION)
940 break;
941 }
942
943
944
945 if(the_result && !rscsi_disks[i].device->removable &&
946 SCpnt->sense_buffer[2] == NOT_READY) {
947 int time1;
948 if(!spintime){
949 printk( "sd%c: Spinning up disk...", 'a' + i );
950 cmd[0] = START_STOP;
951 cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0;
952 cmd[1] |= 1;
953 memset ((void *) &cmd[2], 0, 8);
954 cmd[4] = 1;
955
956 SCpnt->request.dev = 0xffff;
957 SCpnt->cmd_len = 0;
958 SCpnt->sense_buffer[0] = 0;
959 SCpnt->sense_buffer[2] = 0;
960
961 scsi_do_cmd (SCpnt,
962 (void *) cmd, (void *) buffer,
963 512, sd_init_done, SD_TIMEOUT,
964 MAX_RETRIES);
965
966 while(SCpnt->request.dev != 0xfffe)
967 barrier();
968
969 spintime = jiffies;
970 };
971
972 time1 = jiffies;
973 while(jiffies < time1 + HZ);
974 printk( "." );
975 };
976 } while(the_result && spintime && spintime+100*HZ > jiffies);
977 if (spintime) {
978 if (the_result)
979 printk( "not responding...\n" );
980 else
981 printk( "ready\n" );
982 }
983 };
984
985
986 retries = 3;
987 do {
988 cmd[0] = READ_CAPACITY;
989 cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0;
990 memset ((void *) &cmd[2], 0, 8);
991 memset ((void *) buffer, 0, 8);
992 SCpnt->request.dev = 0xffff;
993 SCpnt->cmd_len = 0;
994 SCpnt->sense_buffer[0] = 0;
995 SCpnt->sense_buffer[2] = 0;
996
997 scsi_do_cmd (SCpnt,
998 (void *) cmd, (void *) buffer,
999 8, sd_init_done, SD_TIMEOUT,
1000 MAX_RETRIES);
1001
1002 if (current->pid == 0)
1003 while(SCpnt->request.dev != 0xfffe)
1004 barrier();
1005 else
1006 if (SCpnt->request.dev != 0xfffe){
1007 struct semaphore sem = MUTEX_LOCKED;
1008 SCpnt->request.sem = &sem;
1009 down(&sem);
1010
1011 while (SCpnt->request.dev != 0xfffe)
1012 schedule();
1013 };
1014
1015 the_result = SCpnt->result;
1016 retries--;
1017
1018 } while(the_result && retries);
1019
1020 SCpnt->request.dev = -1;
1021
1022 wake_up(&SCpnt->device->device_wait);
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040 if (the_result)
1041 {
1042 printk ("sd%c : READ CAPACITY failed.\n"
1043 "sd%c : status = %x, message = %02x, host = %d, driver = %02x \n",
1044 'a' + i, 'a' + i,
1045 status_byte(the_result),
1046 msg_byte(the_result),
1047 host_byte(the_result),
1048 driver_byte(the_result)
1049 );
1050 if (driver_byte(the_result) & DRIVER_SENSE)
1051 printk("sd%c : extended sense code = %1x \n",
1052 'a' + i, SCpnt->sense_buffer[2] & 0xf);
1053 else
1054 printk("sd%c : sense not available. \n", 'a' + i);
1055
1056 printk("sd%c : block size assumed to be 512 bytes, disk size 1GB. \n",
1057 'a' + i);
1058 rscsi_disks[i].capacity = 0x1fffff;
1059 rscsi_disks[i].sector_size = 512;
1060
1061
1062
1063 if(rscsi_disks[i].device->removable &&
1064 SCpnt->sense_buffer[2] == NOT_READY)
1065 rscsi_disks[i].device->changed = 1;
1066
1067 }
1068 else
1069 {
1070 rscsi_disks[i].capacity = (buffer[0] << 24) |
1071 (buffer[1] << 16) |
1072 (buffer[2] << 8) |
1073 buffer[3];
1074
1075 rscsi_disks[i].sector_size = (buffer[4] << 24) |
1076 (buffer[5] << 16) | (buffer[6] << 8) | buffer[7];
1077
1078 if (rscsi_disks[i].sector_size != 512 &&
1079 rscsi_disks[i].sector_size != 1024 &&
1080 rscsi_disks[i].sector_size != 256)
1081 {
1082 printk ("sd%c : unsupported sector size %d.\n",
1083 'a' + i, rscsi_disks[i].sector_size);
1084 if(rscsi_disks[i].device->removable){
1085 rscsi_disks[i].capacity = 0;
1086 } else {
1087 printk ("scsi : deleting disk entry.\n");
1088 rscsi_disks[i].device = NULL;
1089 sd_template.nr_dev--;
1090 return i;
1091 };
1092 }
1093 {
1094
1095
1096
1097
1098
1099 int m;
1100 int hard_sector = rscsi_disks[i].sector_size;
1101
1102 for (m=i<<4; m<((i+1)<<4); m++){
1103 sd_hardsizes[m] = hard_sector;
1104 }
1105 printk ("SCSI Hardware sector size is %d bytes on device sd%c\n",
1106 hard_sector,i+'a');
1107 }
1108 if(rscsi_disks[i].sector_size == 1024)
1109 rscsi_disks[i].capacity <<= 1;
1110 if(rscsi_disks[i].sector_size == 256)
1111 rscsi_disks[i].capacity >>= 1;
1112 }
1113
1114 rscsi_disks[i].ten = 1;
1115 rscsi_disks[i].remap = 1;
1116 scsi_free(buffer, 512);
1117 return i;
1118 }
1119
1120
1121
1122
1123
1124
1125
1126 static void sd_init()
1127 {
1128 int i;
1129 static int sd_registered = 0;
1130
1131 if (sd_template.dev_noticed == 0) return;
1132
1133 if(!sd_registered) {
1134 if (register_blkdev(MAJOR_NR,"sd",&sd_fops)) {
1135 printk("Unable to get major %d for SCSI disk\n",MAJOR_NR);
1136 return;
1137 }
1138 sd_registered++;
1139 }
1140
1141
1142 if(rscsi_disks) return;
1143
1144 sd_template.dev_max = sd_template.dev_noticed + SD_EXTRA_DEVS;
1145
1146 rscsi_disks = (Scsi_Disk *)
1147 scsi_init_malloc(sd_template.dev_max * sizeof(Scsi_Disk), GFP_ATOMIC);
1148 memset(rscsi_disks, 0, sd_template.dev_max * sizeof(Scsi_Disk));
1149
1150 sd_sizes = (int *) scsi_init_malloc((sd_template.dev_max << 4) *
1151 sizeof(int), GFP_ATOMIC);
1152 memset(sd_sizes, 0, (sd_template.dev_max << 4) * sizeof(int));
1153
1154 sd_blocksizes = (int *) scsi_init_malloc((sd_template.dev_max << 4) *
1155 sizeof(int), GFP_ATOMIC);
1156
1157 sd_hardsizes = (int *) scsi_init_malloc((sd_template.dev_max << 4) *
1158 sizeof(int), GFP_ATOMIC);
1159
1160 for(i=0;i<(sd_template.dev_max << 4);i++){
1161 sd_blocksizes[i] = 1024;
1162 sd_hardsizes[i] = 512;
1163 }
1164 blksize_size[MAJOR_NR] = sd_blocksizes;
1165 hardsect_size[MAJOR_NR] = sd_hardsizes;
1166 sd = (struct hd_struct *) scsi_init_malloc((sd_template.dev_max << 4) *
1167 sizeof(struct hd_struct),
1168 GFP_ATOMIC);
1169
1170
1171 sd_gendisk.max_nr = sd_template.dev_max;
1172 sd_gendisk.part = sd;
1173 sd_gendisk.sizes = sd_sizes;
1174 sd_gendisk.real_devices = (void *) rscsi_disks;
1175
1176 }
1177
1178 static void sd_finish()
1179 {
1180 int i;
1181
1182 blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST;
1183
1184 sd_gendisk.next = gendisk_head;
1185 gendisk_head = &sd_gendisk;
1186
1187 for (i = 0; i < sd_template.dev_max; ++i)
1188 if (!rscsi_disks[i].capacity &&
1189 rscsi_disks[i].device)
1190 {
1191 i = sd_init_onedisk(i);
1192 if (MODULE_FLAG
1193 && !rscsi_disks[i].has_part_table) {
1194 sd_sizes[i << 4] = rscsi_disks[i].capacity;
1195 revalidate_scsidisk(i << 4, 0);
1196 }
1197 rscsi_disks[i].has_part_table = 1;
1198 }
1199
1200
1201
1202
1203
1204 if(rscsi_disks[0].device && rscsi_disks[0].device->host->sg_tablesize)
1205 read_ahead[MAJOR_NR] = 120;
1206 else
1207 read_ahead[MAJOR_NR] = 4;
1208
1209 return;
1210 }
1211
1212 static int sd_detect(Scsi_Device * SDp){
1213 if(SDp->type != TYPE_DISK && SDp->type != TYPE_MOD) return 0;
1214
1215 printk("Detected scsi disk sd%c at scsi%d, channel %d, id %d, lun %d\n",
1216 'a'+ (sd_template.dev_noticed++),
1217 SDp->host->host_no, SDp->channel, SDp->id, SDp->lun);
1218
1219 return 1;
1220 }
1221
1222 static int sd_attach(Scsi_Device * SDp){
1223 Scsi_Disk * dpnt;
1224 int i;
1225
1226 if(SDp->type != TYPE_DISK && SDp->type != TYPE_MOD) return 0;
1227
1228 if(sd_template.nr_dev >= sd_template.dev_max) {
1229 SDp->attached--;
1230 return 1;
1231 }
1232
1233 for(dpnt = rscsi_disks, i=0; i<sd_template.dev_max; i++, dpnt++)
1234 if(!dpnt->device) break;
1235
1236 if(i >= sd_template.dev_max) panic ("scsi_devices corrupt (sd)");
1237
1238 SDp->scsi_request_fn = do_sd_request;
1239 rscsi_disks[i].device = SDp;
1240 rscsi_disks[i].has_part_table = 0;
1241 sd_template.nr_dev++;
1242 sd_gendisk.nr_real++;
1243 return 0;
1244 }
1245
1246 #define DEVICE_BUSY rscsi_disks[target].device->busy
1247 #define USAGE rscsi_disks[target].device->access_count
1248 #define CAPACITY rscsi_disks[target].capacity
1249 #define MAYBE_REINIT sd_init_onedisk(target)
1250 #define GENDISK_STRUCT sd_gendisk
1251
1252
1253
1254
1255
1256
1257
1258
1259 int revalidate_scsidisk(int dev, int maxusage){
1260 int target, major;
1261 struct gendisk * gdev;
1262 unsigned long flags;
1263 int max_p;
1264 int start;
1265 int i;
1266
1267 target = DEVICE_NR(MINOR(dev));
1268 gdev = &GENDISK_STRUCT;
1269
1270 save_flags(flags);
1271 cli();
1272 if (DEVICE_BUSY || USAGE > maxusage) {
1273 restore_flags(flags);
1274 printk("Device busy for revalidation (usage=%d)\n", USAGE);
1275 return -EBUSY;
1276 };
1277 DEVICE_BUSY = 1;
1278 restore_flags(flags);
1279
1280 max_p = gdev->max_p;
1281 start = target << gdev->minor_shift;
1282 major = MAJOR_NR << 8;
1283
1284 for (i=max_p - 1; i >=0 ; i--) {
1285 sync_dev(major | start | i);
1286 invalidate_inodes(major | start | i);
1287 invalidate_buffers(major | start | i);
1288 gdev->part[start+i].start_sect = 0;
1289 gdev->part[start+i].nr_sects = 0;
1290 };
1291
1292 #ifdef MAYBE_REINIT
1293 MAYBE_REINIT;
1294 #endif
1295
1296 gdev->part[start].nr_sects = CAPACITY;
1297 resetup_one_dev(gdev, target);
1298
1299 DEVICE_BUSY = 0;
1300 return 0;
1301 }
1302
1303 static int fop_revalidate_scsidisk(dev_t dev){
1304 return revalidate_scsidisk(dev, 0);
1305 }
1306
1307
1308 static void sd_detach(Scsi_Device * SDp)
1309 {
1310 Scsi_Disk * dpnt;
1311 int i;
1312 int max_p;
1313 int major;
1314 int start;
1315
1316 for(dpnt = rscsi_disks, i=0; i<sd_template.dev_max; i++, dpnt++)
1317 if(dpnt->device == SDp) {
1318
1319
1320
1321 max_p = sd_gendisk.max_p;
1322 start = i << sd_gendisk.minor_shift;
1323 major = MAJOR_NR << 8;
1324
1325 for (i=max_p - 1; i >=0 ; i--) {
1326 sync_dev(major | start | i);
1327 invalidate_inodes(major | start | i);
1328 invalidate_buffers(major | start | i);
1329 sd_gendisk.part[start+i].start_sect = 0;
1330 sd_gendisk.part[start+i].nr_sects = 0;
1331 sd_sizes[start+i] = 0;
1332 };
1333
1334 dpnt->has_part_table = 0;
1335 dpnt->device = NULL;
1336 dpnt->capacity = 0;
1337 SDp->attached--;
1338 sd_template.dev_noticed--;
1339 sd_template.nr_dev--;
1340 sd_gendisk.nr_real--;
1341 return;
1342 }
1343 return;
1344 }
1345
1346 #ifdef MODULE
1347 #include <linux/module.h>
1348 #include <linux/version.h>
1349
1350 char kernel_version[] = UTS_RELEASE;
1351
1352 int init_module(void) {
1353 sd_template.usage_count = &mod_use_count_;
1354 return scsi_register_module(MODULE_SCSI_DEV, &sd_template);
1355 }
1356
1357 void cleanup_module( void)
1358 {
1359 struct gendisk * prev_sdgd;
1360 struct gendisk * sdgd;
1361
1362 if (MOD_IN_USE) {
1363 printk(KERN_INFO __FILE__ ": module is in use, remove rejected\n");
1364 return;
1365 }
1366 scsi_unregister_module(MODULE_SCSI_DEV, &sd_template);
1367 unregister_blkdev(SCSI_GENERIC_MAJOR, "sd");
1368 if( rscsi_disks != NULL )
1369 {
1370 scsi_init_free((char *) rscsi_disks,
1371 (sd_template.dev_noticed + SD_EXTRA_DEVS)
1372 * sizeof(Scsi_Disk));
1373
1374 scsi_init_free((char *) sd_sizes, sd_template.dev_max * sizeof(int));
1375 scsi_init_free((char *) sd_blocksizes, sd_template.dev_max * sizeof(int));
1376 scsi_init_free((char *) sd_hardsizes, sd_template.dev_max * sizeof(int));
1377 scsi_init_free((char *) sd,
1378 (sd_template.dev_max << 4) * sizeof(struct hd_struct));
1379
1380
1381
1382 sdgd = gendisk_head;
1383 prev_sdgd = NULL;
1384 while(sdgd != &sd_gendisk)
1385 {
1386 prev_sdgd = sdgd;
1387 sdgd = sdgd->next;
1388 }
1389
1390 if(sdgd != &sd_gendisk)
1391 printk("sd_gendisk not in disk chain.\n");
1392 else {
1393 if(prev_sdgd != NULL)
1394 prev_sdgd->next = sdgd->next;
1395 else
1396 gendisk_head = sdgd->next;
1397 }
1398 }
1399
1400 blksize_size[MAJOR_NR] = NULL;
1401 blk_dev[MAJOR_NR].request_fn = NULL;
1402 blk_size[MAJOR_NR] = NULL;
1403 hardsect_size[MAJOR_NR] = NULL;
1404 read_ahead[MAJOR_NR] = 0;
1405 sd_template.dev_max = 0;
1406 }
1407 #endif
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426