This source file includes following definitions.
- sd_open
- sd_release
- sd_geninit
- rw_intr
- do_sd_request
- requeue_sd_request
- check_scsidisk_media_change
- sd_init_done
- sd_init_onedisk
- sd_init
- sd_finish
- sd_detect
- sd_attach
- revalidate_scsidisk
- fop_revalidate_scsidisk
- sd_detach
- init_module
- cleanup_module
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19 #include <linux/fs.h>
20 #include <linux/kernel.h>
21 #include <linux/sched.h>
22 #include <linux/mm.h>
23 #include <linux/string.h>
24 #include <linux/errno.h>
25 #include <asm/system.h>
26
27 #define MAJOR_NR SCSI_DISK_MAJOR
28 #include "../block/blk.h"
29 #include "scsi.h"
30 #include "hosts.h"
31 #include "sd.h"
32 #include "scsi_ioctl.h"
33 #include "constants.h"
34
35 #include <linux/genhd.h>
36
37
38
39
40
41 #define MAX_RETRIES 5
42
43
44
45
46
47 #define SD_TIMEOUT 700
48 #define SD_MOD_TIMEOUT 750
49
50 #define CLUSTERABLE_DEVICE(SC) (SC->host->use_clustering && \
51 SC->device->type != TYPE_MOD)
52
53 struct hd_struct * sd;
54 int revalidate_scsidisk(int dev, int maxusage);
55
56 Scsi_Disk * rscsi_disks = NULL;
57 static int * sd_sizes;
58 static int * sd_blocksizes;
59 static int * sd_hardsizes;
60
61 extern int sd_ioctl(struct inode *, struct file *, unsigned int, unsigned long);
62
63 static int check_scsidisk_media_change(dev_t);
64 static int fop_revalidate_scsidisk(dev_t);
65
66 static sd_init_onedisk(int);
67
68 static void requeue_sd_request (Scsi_Cmnd * SCpnt);
69
70 static void sd_init(void);
71 static void sd_finish(void);
72 static int sd_attach(Scsi_Device *);
73 static int sd_detect(Scsi_Device *);
74 static void sd_detach(Scsi_Device *);
75
76 struct Scsi_Device_Template sd_template =
77 { NULL, "disk", "sd", NULL, TYPE_DISK,
78 SCSI_DISK_MAJOR, 0, 0, 0, 1,
79 sd_detect, sd_init,
80 sd_finish, sd_attach, sd_detach
81 };
82
83 static int sd_open(struct inode * inode, struct file * filp)
84 {
85 int target;
86 target = DEVICE_NR(MINOR(inode->i_rdev));
87
88 if(target >= sd_template.dev_max || !rscsi_disks[target].device)
89 return -ENXIO;
90
91
92
93
94
95
96
97 while (rscsi_disks[target].device->busy)
98 barrier();
99 if(rscsi_disks[target].device->removable) {
100 check_disk_change(inode->i_rdev);
101
102 if(!rscsi_disks[target].device->access_count)
103 sd_ioctl(inode, NULL, SCSI_IOCTL_DOORLOCK, 0);
104 };
105
106
107
108
109
110 if(sd_sizes[MINOR(inode->i_rdev)] == 0)
111 return -ENXIO;
112
113 rscsi_disks[target].device->access_count++;
114 if (rscsi_disks[target].device->host->hostt->usage_count)
115 (*rscsi_disks[target].device->host->hostt->usage_count)++;
116 if(sd_template.usage_count) (*sd_template.usage_count)++;
117 return 0;
118 }
119
120 static void sd_release(struct inode * inode, struct file * file)
121 {
122 int target;
123 sync_dev(inode->i_rdev);
124
125 target = DEVICE_NR(MINOR(inode->i_rdev));
126
127 rscsi_disks[target].device->access_count--;
128 if (rscsi_disks[target].device->host->hostt->usage_count)
129 (*rscsi_disks[target].device->host->hostt->usage_count)--;
130 if(sd_template.usage_count) (*sd_template.usage_count)--;
131
132 if(rscsi_disks[target].device->removable) {
133 if(!rscsi_disks[target].device->access_count)
134 sd_ioctl(inode, NULL, SCSI_IOCTL_DOORUNLOCK, 0);
135 }
136 }
137
138 static void sd_geninit(void);
139
140 static struct file_operations sd_fops = {
141 NULL,
142 block_read,
143 block_write,
144 NULL,
145 NULL,
146 sd_ioctl,
147 NULL,
148 sd_open,
149 sd_release,
150 block_fsync,
151 NULL,
152 check_scsidisk_media_change,
153 fop_revalidate_scsidisk
154 };
155
156 static struct gendisk sd_gendisk = {
157 MAJOR_NR,
158 "sd",
159 4,
160 1 << 4,
161 0,
162 sd_geninit,
163 NULL,
164 NULL,
165 0,
166 NULL,
167 NULL
168 };
169
170 static void sd_geninit (void)
171 {
172 int i;
173
174 for (i = 0; i < sd_template.dev_max; ++i)
175 if(rscsi_disks[i].device)
176 sd[i << 4].nr_sects = rscsi_disks[i].capacity;
177 #if 0
178
179 sd_gendisk.nr_real = sd_template.dev_max;
180 #endif
181 }
182
183
184
185
186
187
188
189 static void rw_intr (Scsi_Cmnd *SCpnt)
190 {
191 int result = SCpnt->result;
192 int this_count = SCpnt->bufflen >> 9;
193
194 #ifdef DEBUG
195 printk("sd%c : rw_intr(%d, %d)\n", 'a' + MINOR(SCpnt->request.dev),
196 SCpnt->host->host_no, result);
197 #endif
198
199
200
201
202
203
204
205 if (!result) {
206
207 #ifdef DEBUG
208 printk("sd%c : %d sectors remain.\n", 'a' + MINOR(SCpnt->request.dev),
209 SCpnt->request.nr_sectors);
210 printk("use_sg is %d\n ",SCpnt->use_sg);
211 #endif
212 if (SCpnt->use_sg) {
213 struct scatterlist * sgpnt;
214 int i;
215 sgpnt = (struct scatterlist *) SCpnt->buffer;
216 for(i=0; i<SCpnt->use_sg; i++) {
217 #ifdef DEBUG
218 printk(":%x %x %d\n",sgpnt[i].alt_address, sgpnt[i].address,
219 sgpnt[i].length);
220 #endif
221 if (sgpnt[i].alt_address) {
222 if (SCpnt->request.cmd == READ)
223 memcpy(sgpnt[i].alt_address, sgpnt[i].address,
224 sgpnt[i].length);
225 scsi_free(sgpnt[i].address, sgpnt[i].length);
226 };
227 };
228
229
230 scsi_free(SCpnt->buffer, SCpnt->sglist_len);
231 } else {
232 if (SCpnt->buffer != SCpnt->request.buffer) {
233 #ifdef DEBUG
234 printk("nosg: %x %x %d\n",SCpnt->request.buffer, SCpnt->buffer,
235 SCpnt->bufflen);
236 #endif
237 if (SCpnt->request.cmd == READ)
238 memcpy(SCpnt->request.buffer, SCpnt->buffer,
239 SCpnt->bufflen);
240 scsi_free(SCpnt->buffer, SCpnt->bufflen);
241 };
242 };
243
244
245
246
247
248 if (SCpnt->request.nr_sectors > this_count)
249 {
250 SCpnt->request.errors = 0;
251
252 if (!SCpnt->request.bh)
253 {
254 #ifdef DEBUG
255 printk("sd%c : handling page request, no buffer\n",
256 'a' + MINOR(SCpnt->request.dev));
257 #endif
258
259
260
261
262 panic("sd.c: linked page request (%lx %x)",
263 SCpnt->request.sector, this_count);
264 }
265 }
266 SCpnt = end_scsi_request(SCpnt, 1, this_count);
267 requeue_sd_request(SCpnt);
268 return;
269 }
270
271
272 if (SCpnt->use_sg) {
273 struct scatterlist * sgpnt;
274 int i;
275 sgpnt = (struct scatterlist *) SCpnt->buffer;
276 for(i=0; i<SCpnt->use_sg; i++) {
277 #ifdef DEBUG
278 printk("err: %x %x %d\n",SCpnt->request.buffer, SCpnt->buffer,
279 SCpnt->bufflen);
280 #endif
281 if (sgpnt[i].alt_address) {
282 scsi_free(sgpnt[i].address, sgpnt[i].length);
283 };
284 };
285 scsi_free(SCpnt->buffer, SCpnt->sglist_len);
286 } else {
287 #ifdef DEBUG
288 printk("nosgerr: %x %x %d\n",SCpnt->request.buffer, SCpnt->buffer,
289 SCpnt->bufflen);
290 #endif
291 if (SCpnt->buffer != SCpnt->request.buffer)
292 scsi_free(SCpnt->buffer, SCpnt->bufflen);
293 };
294
295
296
297
298
299
300
301 if (driver_byte(result) != 0) {
302 if (suggestion(result) == SUGGEST_REMAP) {
303 #ifdef REMAP
304
305
306
307
308 if rscsi_disks[DEVICE_NR(SCpnt->request.dev)].remap
309 {
310 result = 0;
311 }
312 else
313 #endif
314 }
315
316 if ((SCpnt->sense_buffer[0] & 0x7f) == 0x70) {
317 if ((SCpnt->sense_buffer[2] & 0xf) == UNIT_ATTENTION) {
318 if(rscsi_disks[DEVICE_NR(SCpnt->request.dev)].device->removable) {
319
320
321
322 rscsi_disks[DEVICE_NR(SCpnt->request.dev)].device->changed = 1;
323 SCpnt = end_scsi_request(SCpnt, 0, this_count);
324 requeue_sd_request(SCpnt);
325 return;
326 }
327 }
328 }
329
330
331
332
333
334
335
336
337
338 if (SCpnt->sense_buffer[2] == ILLEGAL_REQUEST) {
339 if (rscsi_disks[DEVICE_NR(SCpnt->request.dev)].ten) {
340 rscsi_disks[DEVICE_NR(SCpnt->request.dev)].ten = 0;
341 requeue_sd_request(SCpnt);
342 result = 0;
343 } else {
344
345 }
346 }
347 }
348 if (result) {
349 printk("SCSI disk error : host %d channel %d id %d lun %d return code = %x\n",
350 rscsi_disks[DEVICE_NR(SCpnt->request.dev)].device->host->host_no,
351 rscsi_disks[DEVICE_NR(SCpnt->request.dev)].device->channel,
352 rscsi_disks[DEVICE_NR(SCpnt->request.dev)].device->id,
353 rscsi_disks[DEVICE_NR(SCpnt->request.dev)].device->lun, result);
354
355 if (driver_byte(result) & DRIVER_SENSE)
356 print_sense("sd", SCpnt);
357 SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.current_nr_sectors);
358 requeue_sd_request(SCpnt);
359 return;
360 }
361 }
362
363
364
365
366
367
368
369 static void do_sd_request (void)
370 {
371 Scsi_Cmnd * SCpnt = NULL;
372 struct request * req = NULL;
373 unsigned long flags;
374 int flag = 0;
375
376 save_flags(flags);
377 while (1==1){
378 cli();
379 if (CURRENT != NULL && CURRENT->dev == -1) {
380 restore_flags(flags);
381 return;
382 };
383
384 INIT_SCSI_REQUEST;
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399 if (flag++ == 0)
400 SCpnt = allocate_device(&CURRENT,
401 rscsi_disks[DEVICE_NR(MINOR(CURRENT->dev))].device, 0);
402 else SCpnt = NULL;
403
404
405
406
407
408
409 restore_flags(flags);
410
411
412
413
414
415
416
417
418
419
420 if (!SCpnt && sd_template.nr_dev > 1){
421 struct request *req1;
422 req1 = NULL;
423 cli();
424 req = CURRENT;
425 while(req){
426 SCpnt = request_queueable(req, rscsi_disks[DEVICE_NR(MINOR(req->dev))].device);
427 if(SCpnt) break;
428 req1 = req;
429 req = req->next;
430 };
431 if (SCpnt && req->dev == -1) {
432 if (req == CURRENT)
433 CURRENT = CURRENT->next;
434 else
435 req1->next = req->next;
436 };
437 restore_flags(flags);
438 };
439
440 if (!SCpnt) return;
441
442
443 requeue_sd_request(SCpnt);
444 };
445 }
446
447 static void requeue_sd_request (Scsi_Cmnd * SCpnt)
448 {
449 int dev, block, this_count;
450 unsigned char cmd[10];
451 int bounce_size, contiguous;
452 int max_sg;
453 struct buffer_head * bh, *bhp;
454 char * buff, *bounce_buffer;
455
456 repeat:
457
458 if(!SCpnt || SCpnt->request.dev <= 0) {
459 do_sd_request();
460 return;
461 }
462
463 dev = MINOR(SCpnt->request.dev);
464 block = SCpnt->request.sector;
465 this_count = 0;
466
467 #ifdef DEBUG
468 printk("Doing sd request, dev = %d, block = %d\n", dev, block);
469 #endif
470
471 if (dev >= (sd_template.dev_max << 4) ||
472 !rscsi_disks[DEVICE_NR(dev)].device ||
473 block + SCpnt->request.nr_sectors > sd[dev].nr_sects)
474 {
475 SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
476 goto repeat;
477 }
478
479 block += sd[dev].start_sect;
480 dev = DEVICE_NR(dev);
481
482 if (rscsi_disks[dev].device->changed)
483 {
484
485
486
487
488
489 SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
490 goto repeat;
491 }
492
493 #ifdef DEBUG
494 printk("sd%c : real dev = /dev/sd%c, block = %d\n",
495 'a' + MINOR(SCpnt->request.dev), dev, block);
496 #endif
497
498
499
500
501
502
503
504
505
506
507
508
509 if (rscsi_disks[dev].sector_size == 1024)
510 if((block & 1) || (SCpnt->request.nr_sectors & 1)) {
511 printk("sd.c:Bad block number requested");
512 SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
513 goto repeat;
514 }
515
516 switch (SCpnt->request.cmd)
517 {
518 case WRITE :
519 if (!rscsi_disks[dev].device->writeable)
520 {
521 SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
522 goto repeat;
523 }
524 cmd[0] = WRITE_6;
525 break;
526 case READ :
527 cmd[0] = READ_6;
528 break;
529 default :
530 panic ("Unknown sd command %d\n", SCpnt->request.cmd);
531 }
532
533 SCpnt->this_count = 0;
534
535
536
537
538 contiguous = (!CLUSTERABLE_DEVICE(SCpnt) ? 0 :1);
539 bounce_buffer = NULL;
540 bounce_size = (SCpnt->request.nr_sectors << 9);
541
542
543
544
545
546 if (contiguous && SCpnt->request.bh &&
547 ((long) SCpnt->request.bh->b_data)
548 + (SCpnt->request.nr_sectors << 9) - 1 > ISA_DMA_THRESHOLD
549 && SCpnt->host->unchecked_isa_dma) {
550 if(((long) SCpnt->request.bh->b_data) > ISA_DMA_THRESHOLD)
551 bounce_buffer = (char *) scsi_malloc(bounce_size);
552 if(!bounce_buffer) contiguous = 0;
553 };
554
555 if(contiguous && SCpnt->request.bh && SCpnt->request.bh->b_reqnext)
556 for(bh = SCpnt->request.bh, bhp = bh->b_reqnext; bhp; bh = bhp,
557 bhp = bhp->b_reqnext) {
558 if(!CONTIGUOUS_BUFFERS(bh,bhp)) {
559 if(bounce_buffer) scsi_free(bounce_buffer, bounce_size);
560 contiguous = 0;
561 break;
562 }
563 };
564 if (!SCpnt->request.bh || contiguous) {
565
566
567 this_count = SCpnt->request.nr_sectors;
568 buff = SCpnt->request.buffer;
569 SCpnt->use_sg = 0;
570
571 } else if (SCpnt->host->sg_tablesize == 0 ||
572 (need_isa_buffer && dma_free_sectors <= 10)) {
573
574
575
576
577
578
579
580
581 if (SCpnt->host->sg_tablesize != 0 &&
582 need_isa_buffer &&
583 dma_free_sectors <= 10)
584 printk("Warning: SCSI DMA buffer space running low. Using non scatter-gather I/O.\n");
585
586 this_count = SCpnt->request.current_nr_sectors;
587 buff = SCpnt->request.buffer;
588 SCpnt->use_sg = 0;
589
590 } else {
591
592
593 struct scatterlist * sgpnt;
594 int count, this_count_max;
595 int counted;
596
597 bh = SCpnt->request.bh;
598 this_count = 0;
599 this_count_max = (rscsi_disks[dev].ten ? 0xffff : 0xff);
600 count = 0;
601 bhp = NULL;
602 while(bh) {
603 if ((this_count + (bh->b_size >> 9)) > this_count_max) break;
604 if(!bhp || !CONTIGUOUS_BUFFERS(bhp,bh) ||
605 !CLUSTERABLE_DEVICE(SCpnt) ||
606 (SCpnt->host->unchecked_isa_dma &&
607 ((unsigned long) bh->b_data-1) == ISA_DMA_THRESHOLD)) {
608 if (count < SCpnt->host->sg_tablesize) count++;
609 else break;
610 };
611 this_count += (bh->b_size >> 9);
612 bhp = bh;
613 bh = bh->b_reqnext;
614 };
615 #if 0
616 if(SCpnt->host->unchecked_isa_dma &&
617 ((unsigned int) SCpnt->request.bh->b_data-1) == ISA_DMA_THRESHOLD) count--;
618 #endif
619 SCpnt->use_sg = count;
620 count = 512;
621 while( count < (SCpnt->use_sg * sizeof(struct scatterlist)))
622 count = count << 1;
623 SCpnt->sglist_len = count;
624 max_sg = count / sizeof(struct scatterlist);
625 if(SCpnt->host->sg_tablesize < max_sg)
626 max_sg = SCpnt->host->sg_tablesize;
627 sgpnt = (struct scatterlist * ) scsi_malloc(count);
628 if (!sgpnt) {
629 printk("Warning - running *really* short on DMA buffers\n");
630 SCpnt->use_sg = 0;
631 this_count = SCpnt->request.current_nr_sectors;
632 buff = SCpnt->request.buffer;
633 } else {
634 memset(sgpnt, 0, count);
635
636
637 buff = (char *) sgpnt;
638 counted = 0;
639 for(count = 0, bh = SCpnt->request.bh, bhp = bh->b_reqnext;
640 count < SCpnt->use_sg && bh;
641 count++, bh = bhp) {
642
643 bhp = bh->b_reqnext;
644
645 if(!sgpnt[count].address) sgpnt[count].address = bh->b_data;
646 sgpnt[count].length += bh->b_size;
647 counted += bh->b_size >> 9;
648
649 if (((long) sgpnt[count].address) + sgpnt[count].length - 1 >
650 ISA_DMA_THRESHOLD && (SCpnt->host->unchecked_isa_dma) &&
651 !sgpnt[count].alt_address) {
652 sgpnt[count].alt_address = sgpnt[count].address;
653
654
655
656
657 if(dma_free_sectors < (sgpnt[count].length >> 9) + 10) {
658 sgpnt[count].address = NULL;
659 } else {
660 sgpnt[count].address =
661 (char *) scsi_malloc(sgpnt[count].length);
662 };
663
664
665
666
667
668 if(sgpnt[count].address == NULL){
669 #if 0
670 printk("Warning: Running low on SCSI DMA buffers");
671
672 while(--count >= 0){
673 if(sgpnt[count].alt_address)
674 scsi_free(sgpnt[count].address,
675 sgpnt[count].length);
676 };
677 this_count = SCpnt->request.current_nr_sectors;
678 buff = SCpnt->request.buffer;
679 SCpnt->use_sg = 0;
680 scsi_free(sgpnt, SCpnt->sglist_len);
681 #endif
682 SCpnt->use_sg = count;
683 this_count = counted -= bh->b_size >> 9;
684 break;
685 };
686
687 };
688
689
690
691
692
693 if(bhp && CONTIGUOUS_BUFFERS(bh,bhp)
694 && CLUSTERABLE_DEVICE(SCpnt)) {
695 char * tmp;
696
697 if (((long) sgpnt[count].address) + sgpnt[count].length +
698 bhp->b_size - 1 > ISA_DMA_THRESHOLD &&
699 (SCpnt->host->unchecked_isa_dma) &&
700 !sgpnt[count].alt_address) continue;
701
702 if(!sgpnt[count].alt_address) {count--; continue; }
703 if(dma_free_sectors > 10)
704 tmp = (char *) scsi_malloc(sgpnt[count].length
705 + bhp->b_size);
706 else {
707 tmp = NULL;
708 max_sg = SCpnt->use_sg;
709 };
710 if(tmp){
711 scsi_free(sgpnt[count].address, sgpnt[count].length);
712 sgpnt[count].address = tmp;
713 count--;
714 continue;
715 };
716
717
718
719
720
721 if (SCpnt->use_sg < max_sg) SCpnt->use_sg++;
722 };
723 };
724
725
726 this_count = counted;
727
728 if(count < SCpnt->use_sg || SCpnt->use_sg
729 > SCpnt->host->sg_tablesize){
730 bh = SCpnt->request.bh;
731 printk("Use sg, count %d %x %d\n",
732 SCpnt->use_sg, count, dma_free_sectors);
733 printk("maxsg = %x, counted = %d this_count = %d\n",
734 max_sg, counted, this_count);
735 while(bh){
736 printk("[%p %lx] ", bh->b_data, bh->b_size);
737 bh = bh->b_reqnext;
738 };
739 if(SCpnt->use_sg < 16)
740 for(count=0; count<SCpnt->use_sg; count++)
741 printk("{%d:%p %p %d} ", count,
742 sgpnt[count].address,
743 sgpnt[count].alt_address,
744 sgpnt[count].length);
745 panic("Ooops");
746 };
747
748 if (SCpnt->request.cmd == WRITE)
749 for(count=0; count<SCpnt->use_sg; count++)
750 if(sgpnt[count].alt_address)
751 memcpy(sgpnt[count].address, sgpnt[count].alt_address,
752 sgpnt[count].length);
753 };
754 };
755
756
757
758 if(SCpnt->use_sg == 0){
759 if (((long) buff) + (this_count << 9) - 1 > ISA_DMA_THRESHOLD &&
760 (SCpnt->host->unchecked_isa_dma)) {
761 if(bounce_buffer)
762 buff = bounce_buffer;
763 else
764 buff = (char *) scsi_malloc(this_count << 9);
765 if(buff == NULL) {
766 this_count = SCpnt->request.current_nr_sectors;
767 buff = (char *) scsi_malloc(this_count << 9);
768 if(!buff) panic("Ran out of DMA buffers.");
769 };
770 if (SCpnt->request.cmd == WRITE)
771 memcpy(buff, (char *)SCpnt->request.buffer, this_count << 9);
772 };
773 };
774 #ifdef DEBUG
775 printk("sd%c : %s %d/%d 512 byte blocks.\n",
776 'a' + MINOR(SCpnt->request.dev),
777 (SCpnt->request.cmd == WRITE) ? "writing" : "reading",
778 this_count, SCpnt->request.nr_sectors);
779 #endif
780
781 cmd[1] = (SCpnt->lun << 5) & 0xe0;
782
783 if (rscsi_disks[dev].sector_size == 1024){
784 if(block & 1) panic("sd.c:Bad block number requested");
785 if(this_count & 1) panic("sd.c:Bad block number requested");
786 block = block >> 1;
787 this_count = this_count >> 1;
788 };
789
790 if (rscsi_disks[dev].sector_size == 256){
791 block = block << 1;
792 this_count = this_count << 1;
793 };
794
795 if (((this_count > 0xff) || (block > 0x1fffff)) && rscsi_disks[dev].ten)
796 {
797 if (this_count > 0xffff)
798 this_count = 0xffff;
799
800 cmd[0] += READ_10 - READ_6 ;
801 cmd[2] = (unsigned char) (block >> 24) & 0xff;
802 cmd[3] = (unsigned char) (block >> 16) & 0xff;
803 cmd[4] = (unsigned char) (block >> 8) & 0xff;
804 cmd[5] = (unsigned char) block & 0xff;
805 cmd[6] = cmd[9] = 0;
806 cmd[7] = (unsigned char) (this_count >> 8) & 0xff;
807 cmd[8] = (unsigned char) this_count & 0xff;
808 }
809 else
810 {
811 if (this_count > 0xff)
812 this_count = 0xff;
813
814 cmd[1] |= (unsigned char) ((block >> 16) & 0x1f);
815 cmd[2] = (unsigned char) ((block >> 8) & 0xff);
816 cmd[3] = (unsigned char) block & 0xff;
817 cmd[4] = (unsigned char) this_count;
818 cmd[5] = 0;
819 }
820
821
822
823
824
825
826
827 SCpnt->transfersize = rscsi_disks[dev].sector_size;
828 SCpnt->underflow = this_count << 9;
829 scsi_do_cmd (SCpnt, (void *) cmd, buff,
830 this_count * rscsi_disks[dev].sector_size,
831 rw_intr,
832 (SCpnt->device->type == TYPE_DISK ?
833 SD_TIMEOUT : SD_MOD_TIMEOUT),
834 MAX_RETRIES);
835 }
836
837 static int check_scsidisk_media_change(dev_t full_dev){
838 int retval;
839 int target;
840 struct inode inode;
841 int flag = 0;
842
843 target = DEVICE_NR(MINOR(full_dev));
844
845 if (target >= sd_template.dev_max ||
846 !rscsi_disks[target].device) {
847 printk("SCSI disk request error: invalid device.\n");
848 return 0;
849 };
850
851 if(!rscsi_disks[target].device->removable) return 0;
852
853 inode.i_rdev = full_dev;
854 retval = sd_ioctl(&inode, NULL, SCSI_IOCTL_TEST_UNIT_READY, 0);
855
856 if(retval){
857
858
859
860
861 rscsi_disks[target].device->changed = 1;
862 return 1;
863
864 };
865
866 retval = rscsi_disks[target].device->changed;
867 if(!flag) rscsi_disks[target].device->changed = 0;
868 return retval;
869 }
870
871 static void sd_init_done (Scsi_Cmnd * SCpnt)
872 {
873 struct request * req;
874
875 req = &SCpnt->request;
876 req->dev = 0xfffe;
877
878 if (req->sem != NULL) {
879 up(req->sem);
880 }
881 }
882
883 static int sd_init_onedisk(int i)
884 {
885 unsigned char cmd[10];
886 unsigned char *buffer;
887 unsigned long spintime;
888 int the_result, retries;
889 Scsi_Cmnd * SCpnt;
890
891
892
893
894
895 SCpnt = allocate_device(NULL, rscsi_disks[i].device, 1);
896 buffer = (unsigned char *) scsi_malloc(512);
897
898 spintime = 0;
899
900
901 if (current->pid == 0){
902 do{
903 retries = 0;
904 while(retries < 3)
905 {
906 cmd[0] = TEST_UNIT_READY;
907 cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0;
908 memset ((void *) &cmd[2], 0, 8);
909 SCpnt->request.dev = 0xffff;
910 SCpnt->cmd_len = 0;
911 SCpnt->sense_buffer[0] = 0;
912 SCpnt->sense_buffer[2] = 0;
913
914 scsi_do_cmd (SCpnt,
915 (void *) cmd, (void *) buffer,
916 512, sd_init_done, SD_TIMEOUT,
917 MAX_RETRIES);
918
919 while(SCpnt->request.dev != 0xfffe) barrier();
920
921 the_result = SCpnt->result;
922 retries++;
923 if( the_result == 0
924 || SCpnt->sense_buffer[2] != UNIT_ATTENTION)
925 break;
926 }
927
928
929
930 if(the_result && !rscsi_disks[i].device->removable &&
931 SCpnt->sense_buffer[2] == NOT_READY) {
932 int time1;
933 if(!spintime){
934 printk( "sd%c: Spinning up disk...", 'a' + i );
935 cmd[0] = START_STOP;
936 cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0;
937 cmd[1] |= 1;
938 memset ((void *) &cmd[2], 0, 8);
939 cmd[4] = 1;
940
941 SCpnt->request.dev = 0xffff;
942 SCpnt->cmd_len = 0;
943 SCpnt->sense_buffer[0] = 0;
944 SCpnt->sense_buffer[2] = 0;
945
946 scsi_do_cmd (SCpnt,
947 (void *) cmd, (void *) buffer,
948 512, sd_init_done, SD_TIMEOUT,
949 MAX_RETRIES);
950
951 while(SCpnt->request.dev != 0xfffe)
952 barrier();
953
954 spintime = jiffies;
955 };
956
957 time1 = jiffies;
958 while(jiffies < time1 + HZ);
959 printk( "." );
960 };
961 } while(the_result && spintime && spintime+100*HZ > jiffies);
962 if (spintime) {
963 if (the_result)
964 printk( "not responding...\n" );
965 else
966 printk( "ready\n" );
967 }
968 };
969
970
971 retries = 3;
972 do {
973 cmd[0] = READ_CAPACITY;
974 cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0;
975 memset ((void *) &cmd[2], 0, 8);
976 memset ((void *) buffer, 0, 8);
977 SCpnt->request.dev = 0xffff;
978 SCpnt->cmd_len = 0;
979 SCpnt->sense_buffer[0] = 0;
980 SCpnt->sense_buffer[2] = 0;
981
982 scsi_do_cmd (SCpnt,
983 (void *) cmd, (void *) buffer,
984 8, sd_init_done, SD_TIMEOUT,
985 MAX_RETRIES);
986
987 if (current->pid == 0)
988 while(SCpnt->request.dev != 0xfffe)
989 barrier();
990 else
991 if (SCpnt->request.dev != 0xfffe){
992 struct semaphore sem = MUTEX_LOCKED;
993 SCpnt->request.sem = &sem;
994 down(&sem);
995
996 while (SCpnt->request.dev != 0xfffe)
997 schedule();
998 };
999
1000 the_result = SCpnt->result;
1001 retries--;
1002
1003 } while(the_result && retries);
1004
1005 SCpnt->request.dev = -1;
1006
1007 wake_up(&SCpnt->device->device_wait);
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025 if (the_result)
1026 {
1027 printk ("sd%c : READ CAPACITY failed.\n"
1028 "sd%c : status = %x, message = %02x, host = %d, driver = %02x \n",
1029 'a' + i, 'a' + i,
1030 status_byte(the_result),
1031 msg_byte(the_result),
1032 host_byte(the_result),
1033 driver_byte(the_result)
1034 );
1035 if (driver_byte(the_result) & DRIVER_SENSE)
1036 printk("sd%c : extended sense code = %1x \n",
1037 'a' + i, SCpnt->sense_buffer[2] & 0xf);
1038 else
1039 printk("sd%c : sense not available. \n", 'a' + i);
1040
1041 printk("sd%c : block size assumed to be 512 bytes, disk size 1GB. \n",
1042 'a' + i);
1043 rscsi_disks[i].capacity = 0x1fffff;
1044 rscsi_disks[i].sector_size = 512;
1045
1046
1047
1048 if(rscsi_disks[i].device->removable &&
1049 SCpnt->sense_buffer[2] == NOT_READY)
1050 rscsi_disks[i].device->changed = 1;
1051
1052 }
1053 else
1054 {
1055 rscsi_disks[i].capacity = (buffer[0] << 24) |
1056 (buffer[1] << 16) |
1057 (buffer[2] << 8) |
1058 buffer[3];
1059
1060 rscsi_disks[i].sector_size = (buffer[4] << 24) |
1061 (buffer[5] << 16) | (buffer[6] << 8) | buffer[7];
1062
1063 if (rscsi_disks[i].sector_size != 512 &&
1064 rscsi_disks[i].sector_size != 1024 &&
1065 rscsi_disks[i].sector_size != 256)
1066 {
1067 printk ("sd%c : unsupported sector size %d.\n",
1068 'a' + i, rscsi_disks[i].sector_size);
1069 if(rscsi_disks[i].device->removable){
1070 rscsi_disks[i].capacity = 0;
1071 } else {
1072 printk ("scsi : deleting disk entry.\n");
1073 rscsi_disks[i].device = NULL;
1074 sd_template.nr_dev--;
1075 return i;
1076 };
1077 }
1078 {
1079
1080
1081
1082
1083
1084 int m;
1085 int hard_sector = rscsi_disks[i].sector_size;
1086
1087 for (m=i<<4; m<((i+1)<<4); m++){
1088 sd_hardsizes[m] = hard_sector;
1089 }
1090 printk ("SCSI Hardware sector size is %d bytes on device sd%c\n",
1091 hard_sector,i+'a');
1092 }
1093 if(rscsi_disks[i].sector_size == 1024)
1094 rscsi_disks[i].capacity <<= 1;
1095 if(rscsi_disks[i].sector_size == 256)
1096 rscsi_disks[i].capacity >>= 1;
1097 }
1098
1099 rscsi_disks[i].ten = 1;
1100 rscsi_disks[i].remap = 1;
1101 scsi_free(buffer, 512);
1102 return i;
1103 }
1104
1105
1106
1107
1108
1109
1110
1111 static void sd_init()
1112 {
1113 int i;
1114 static int sd_registered = 0;
1115
1116 if (sd_template.dev_noticed == 0) return;
1117
1118 if(!sd_registered) {
1119 if (register_blkdev(MAJOR_NR,"sd",&sd_fops)) {
1120 printk("Unable to get major %d for SCSI disk\n",MAJOR_NR);
1121 return;
1122 }
1123 sd_registered++;
1124 }
1125
1126
1127 if(rscsi_disks) return;
1128
1129 sd_template.dev_max = sd_template.dev_noticed + SD_EXTRA_DEVS;
1130
1131 rscsi_disks = (Scsi_Disk *)
1132 scsi_init_malloc(sd_template.dev_max * sizeof(Scsi_Disk), GFP_ATOMIC);
1133 memset(rscsi_disks, 0, sd_template.dev_max * sizeof(Scsi_Disk));
1134
1135 sd_sizes = (int *) scsi_init_malloc((sd_template.dev_max << 4) *
1136 sizeof(int), GFP_ATOMIC);
1137 memset(sd_sizes, 0, (sd_template.dev_max << 4) * sizeof(int));
1138
1139 sd_blocksizes = (int *) scsi_init_malloc((sd_template.dev_max << 4) *
1140 sizeof(int), GFP_ATOMIC);
1141
1142 sd_hardsizes = (int *) scsi_init_malloc((sd_template.dev_max << 4) *
1143 sizeof(int), GFP_ATOMIC);
1144
1145 for(i=0;i<(sd_template.dev_max << 4);i++){
1146 sd_blocksizes[i] = 1024;
1147 sd_hardsizes[i] = 512;
1148 }
1149 blksize_size[MAJOR_NR] = sd_blocksizes;
1150 hardsect_size[MAJOR_NR] = sd_hardsizes;
1151 sd = (struct hd_struct *) scsi_init_malloc((sd_template.dev_max << 4) *
1152 sizeof(struct hd_struct),
1153 GFP_ATOMIC);
1154
1155
1156 sd_gendisk.max_nr = sd_template.dev_max;
1157 sd_gendisk.part = sd;
1158 sd_gendisk.sizes = sd_sizes;
1159 sd_gendisk.real_devices = (void *) rscsi_disks;
1160
1161 }
1162
1163 static void sd_finish()
1164 {
1165 int i;
1166
1167 blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST;
1168
1169 sd_gendisk.next = gendisk_head;
1170 gendisk_head = &sd_gendisk;
1171
1172 for (i = 0; i < sd_template.dev_max; ++i)
1173 if (!rscsi_disks[i].capacity &&
1174 rscsi_disks[i].device)
1175 {
1176 i = sd_init_onedisk(i);
1177 if (scsi_loadable_module_flag
1178 && !rscsi_disks[i].has_part_table) {
1179 sd_sizes[i << 4] = rscsi_disks[i].capacity;
1180 revalidate_scsidisk(i << 4, 0);
1181 }
1182 rscsi_disks[i].has_part_table = 1;
1183 }
1184
1185
1186
1187
1188
1189 if(rscsi_disks[0].device && rscsi_disks[0].device->host->sg_tablesize)
1190 read_ahead[MAJOR_NR] = 120;
1191
1192 else
1193 read_ahead[MAJOR_NR] = 4;
1194
1195 return;
1196 }
1197
1198 static int sd_detect(Scsi_Device * SDp){
1199 if(SDp->type != TYPE_DISK && SDp->type != TYPE_MOD) return 0;
1200
1201 printk("Detected scsi disk sd%c at scsi%d, channel %d, id %d, lun %d\n",
1202 'a'+ (sd_template.dev_noticed++),
1203 SDp->host->host_no, SDp->channel, SDp->id, SDp->lun);
1204
1205 return 1;
1206 }
1207
1208 static int sd_attach(Scsi_Device * SDp){
1209 Scsi_Disk * dpnt;
1210 int i;
1211
1212 if(SDp->type != TYPE_DISK && SDp->type != TYPE_MOD) return 0;
1213
1214 if(sd_template.nr_dev >= sd_template.dev_max) {
1215 SDp->attached--;
1216 return 1;
1217 }
1218
1219 for(dpnt = rscsi_disks, i=0; i<sd_template.dev_max; i++, dpnt++)
1220 if(!dpnt->device) break;
1221
1222 if(i >= sd_template.dev_max) panic ("scsi_devices corrupt (sd)");
1223
1224 SDp->scsi_request_fn = do_sd_request;
1225 rscsi_disks[i].device = SDp;
1226 rscsi_disks[i].has_part_table = 0;
1227 sd_template.nr_dev++;
1228 sd_gendisk.nr_real++;
1229 return 0;
1230 }
1231
1232 #define DEVICE_BUSY rscsi_disks[target].device->busy
1233 #define USAGE rscsi_disks[target].device->access_count
1234 #define CAPACITY rscsi_disks[target].capacity
1235 #define MAYBE_REINIT sd_init_onedisk(target)
1236 #define GENDISK_STRUCT sd_gendisk
1237
1238
1239
1240
1241
1242
1243
1244
1245 int revalidate_scsidisk(int dev, int maxusage){
1246 int target, major;
1247 struct gendisk * gdev;
1248 unsigned long flags;
1249 int max_p;
1250 int start;
1251 int i;
1252
1253 target = DEVICE_NR(MINOR(dev));
1254 gdev = &GENDISK_STRUCT;
1255
1256 save_flags(flags);
1257 cli();
1258 if (DEVICE_BUSY || USAGE > maxusage) {
1259 restore_flags(flags);
1260 printk("Device busy for revalidation (usage=%d)\n", USAGE);
1261 return -EBUSY;
1262 };
1263 DEVICE_BUSY = 1;
1264 restore_flags(flags);
1265
1266 max_p = gdev->max_p;
1267 start = target << gdev->minor_shift;
1268 major = MAJOR_NR << 8;
1269
1270 for (i=max_p - 1; i >=0 ; i--) {
1271 sync_dev(major | start | i);
1272 invalidate_inodes(major | start | i);
1273 invalidate_buffers(major | start | i);
1274 gdev->part[start+i].start_sect = 0;
1275 gdev->part[start+i].nr_sects = 0;
1276 };
1277
1278 #ifdef MAYBE_REINIT
1279 MAYBE_REINIT;
1280 #endif
1281
1282 gdev->part[start].nr_sects = CAPACITY;
1283 resetup_one_dev(gdev, target);
1284
1285 DEVICE_BUSY = 0;
1286 return 0;
1287 }
1288
1289 static int fop_revalidate_scsidisk(dev_t dev){
1290 return revalidate_scsidisk(dev, 0);
1291 }
1292
1293
1294 static void sd_detach(Scsi_Device * SDp)
1295 {
1296 Scsi_Disk * dpnt;
1297 int i;
1298 int max_p;
1299 int major;
1300 int start;
1301
1302 for(dpnt = rscsi_disks, i=0; i<sd_template.dev_max; i++, dpnt++)
1303 if(dpnt->device == SDp) {
1304
1305
1306
1307 max_p = sd_gendisk.max_p;
1308 start = i << sd_gendisk.minor_shift;
1309 major = MAJOR_NR << 8;
1310
1311 for (i=max_p - 1; i >=0 ; i--) {
1312 sync_dev(major | start | i);
1313 invalidate_inodes(major | start | i);
1314 invalidate_buffers(major | start | i);
1315 sd_gendisk.part[start+i].start_sect = 0;
1316 sd_gendisk.part[start+i].nr_sects = 0;
1317 sd_sizes[start+i] = 0;
1318 };
1319
1320 dpnt->has_part_table = 0;
1321 dpnt->device = NULL;
1322 dpnt->capacity = 0;
1323 SDp->attached--;
1324 sd_template.dev_noticed--;
1325 sd_template.nr_dev--;
1326 sd_gendisk.nr_real--;
1327 return;
1328 }
1329 return;
1330 }
1331
1332 #ifdef MODULE
1333 #include <linux/module.h>
1334 #include <linux/version.h>
1335
1336 char kernel_version[] = UTS_RELEASE;
1337
1338 int init_module(void) {
1339 sd_template.usage_count = &mod_use_count_;
1340 return scsi_register_module(MODULE_SCSI_DEV, &sd_template);
1341 }
1342
1343 void cleanup_module( void)
1344 {
1345 struct gendisk * prev_sdgd;
1346 struct gendisk * sdgd;
1347
1348 if (MOD_IN_USE) {
1349 printk(KERN_INFO __FILE__ ": module is in use, remove rejected\n");
1350 return;
1351 }
1352 scsi_unregister_module(MODULE_SCSI_DEV, &sd_template);
1353 unregister_blkdev(SCSI_GENERIC_MAJOR, "sd");
1354 if( rscsi_disks != NULL )
1355 {
1356 scsi_init_free((char *) rscsi_disks,
1357 (sd_template.dev_noticed + SD_EXTRA_DEVS)
1358 * sizeof(Scsi_Disk));
1359
1360 scsi_init_free((char *) sd_sizes, sd_template.dev_max * sizeof(int));
1361 scsi_init_free((char *) sd_blocksizes, sd_template.dev_max * sizeof(int));
1362 scsi_init_free((char *) sd_hardsizes, sd_template.dev_max * sizeof(int));
1363 scsi_init_free((char *) sd,
1364 (sd_template.dev_max << 4) * sizeof(struct hd_struct));
1365
1366
1367
1368 sdgd = gendisk_head;
1369 prev_sdgd = NULL;
1370 while(sdgd != &sd_gendisk)
1371 {
1372 prev_sdgd = sdgd;
1373 sdgd = sdgd->next;
1374 }
1375
1376 if(sdgd != &sd_gendisk)
1377 printk("sd_gendisk not in disk chain.\n");
1378 else {
1379 if(prev_sdgd != NULL)
1380 prev_sdgd->next = sdgd->next;
1381 else
1382 gendisk_head = sdgd->next;
1383 }
1384 }
1385
1386 blksize_size[MAJOR_NR] = NULL;
1387 blk_dev[MAJOR_NR].request_fn = NULL;
1388 blk_size[MAJOR_NR] = NULL;
1389 hardsect_size[MAJOR_NR] = NULL;
1390 read_ahead[MAJOR_NR] = 0;
1391 sd_template.dev_max = 0;
1392 }
1393 #endif
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412