This source file includes following definitions.
- sd_open
- sd_release
- sd_geninit
- rw_intr
- do_sd_request
- requeue_sd_request
- check_scsidisk_media_change
- sd_init_done
- sd_init_onedisk
- sd_init
- sd_finish
- sd_detect
- sd_attach
- revalidate_scsidisk
- fop_revalidate_scsidisk
- sd_detach
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19 #include <linux/fs.h>
20 #include <linux/kernel.h>
21 #include <linux/sched.h>
22 #include <linux/mm.h>
23 #include <linux/string.h>
24 #include <linux/errno.h>
25 #include <asm/system.h>
26
27 #define MAJOR_NR SCSI_DISK_MAJOR
28 #include "../block/blk.h"
29 #include "scsi.h"
30 #include "hosts.h"
31 #include "sd.h"
32 #include "scsi_ioctl.h"
33 #include "constants.h"
34
35 #include <linux/genhd.h>
36
37
38
39
40
41 #define MAX_RETRIES 5
42
43
44
45
46
47 #define SD_TIMEOUT 600
48 #define SD_MOD_TIMEOUT 750
49
50 #define CLUSTERABLE_DEVICE(SC) (SC->host->hostt->use_clustering && \
51 SC->device->type != TYPE_MOD)
52
53 struct hd_struct * sd;
54 int revalidate_scsidisk(int dev, int maxusage);
55
56 Scsi_Disk * rscsi_disks = NULL;
57 static int * sd_sizes;
58 static int * sd_blocksizes;
59 static int * sd_hardsizes;
60
61 extern int sd_ioctl(struct inode *, struct file *, unsigned int, unsigned long);
62
63 static int check_scsidisk_media_change(dev_t);
64 static int fop_revalidate_scsidisk(dev_t);
65
66 static sd_init_onedisk(int);
67
68 static void requeue_sd_request (Scsi_Cmnd * SCpnt);
69
70 static void sd_init(void);
71 static void sd_finish(void);
72 static int sd_attach(Scsi_Device *);
73 static int sd_detect(Scsi_Device *);
74 static void sd_detach(Scsi_Device *);
75
76 struct Scsi_Device_Template sd_template = {NULL, "disk", "sd", TYPE_DISK,
77 SCSI_DISK_MAJOR, 0, 0, 0, 1,
78 sd_detect, sd_init,
79 sd_finish, sd_attach, sd_detach};
80
81 static int sd_open(struct inode * inode, struct file * filp)
82 {
83 int target;
84 target = DEVICE_NR(MINOR(inode->i_rdev));
85
86 if(target >= sd_template.dev_max || !rscsi_disks[target].device)
87 return -ENXIO;
88
89
90
91
92 while (rscsi_disks[target].device->busy);
93
94 if(rscsi_disks[target].device->removable) {
95 check_disk_change(inode->i_rdev);
96
97 if(!rscsi_disks[target].device->access_count)
98 sd_ioctl(inode, NULL, SCSI_IOCTL_DOORLOCK, 0);
99 };
100
101
102
103
104 if(sd_sizes[MINOR(inode->i_rdev)] == 0)
105 return -ENXIO;
106
107 rscsi_disks[target].device->access_count++;
108 if (rscsi_disks[target].device->host->hostt->usage_count)
109 (*rscsi_disks[target].device->host->hostt->usage_count)++;
110 return 0;
111 }
112
113 static void sd_release(struct inode * inode, struct file * file)
114 {
115 int target;
116 sync_dev(inode->i_rdev);
117
118 target = DEVICE_NR(MINOR(inode->i_rdev));
119
120 rscsi_disks[target].device->access_count--;
121 if (rscsi_disks[target].device->host->hostt->usage_count)
122 (*rscsi_disks[target].device->host->hostt->usage_count)--;
123
124 if(rscsi_disks[target].device->removable) {
125 if(!rscsi_disks[target].device->access_count)
126 sd_ioctl(inode, NULL, SCSI_IOCTL_DOORUNLOCK, 0);
127 };
128 }
129
130 static void sd_geninit(void);
131
132 static struct file_operations sd_fops = {
133 NULL,
134 block_read,
135 block_write,
136 NULL,
137 NULL,
138 sd_ioctl,
139 NULL,
140 sd_open,
141 sd_release,
142 block_fsync,
143 NULL,
144 check_scsidisk_media_change,
145 fop_revalidate_scsidisk
146 };
147
148 static struct gendisk sd_gendisk = {
149 MAJOR_NR,
150 "sd",
151 4,
152 1 << 4,
153 0,
154 sd_geninit,
155 NULL,
156 NULL,
157 0,
158 NULL,
159 NULL
160 };
161
162 static void sd_geninit (void)
163 {
164 int i;
165
166 for (i = 0; i < sd_template.dev_max; ++i)
167 if(rscsi_disks[i].device)
168 sd[i << 4].nr_sects = rscsi_disks[i].capacity;
169 #if 0
170
171 sd_gendisk.nr_real = sd_template.dev_max;
172 #endif
173 }
174
175
176
177
178
179
180
181 static void rw_intr (Scsi_Cmnd *SCpnt)
182 {
183 int result = SCpnt->result;
184 int this_count = SCpnt->bufflen >> 9;
185
186 #ifdef DEBUG
187 printk("sd%c : rw_intr(%d, %d)\n", 'a' + MINOR(SCpnt->request.dev), SCpnt->host->host_no, result);
188 #endif
189
190
191
192
193
194
195
196 if (!result) {
197
198 #ifdef DEBUG
199 printk("sd%c : %d sectors remain.\n", 'a' + MINOR(SCpnt->request.dev), SCpnt->request.nr_sectors);
200 printk("use_sg is %d\n ",SCpnt->use_sg);
201 #endif
202 if (SCpnt->use_sg) {
203 struct scatterlist * sgpnt;
204 int i;
205 sgpnt = (struct scatterlist *) SCpnt->buffer;
206 for(i=0; i<SCpnt->use_sg; i++) {
207 #ifdef DEBUG
208 printk(":%x %x %d\n",sgpnt[i].alt_address, sgpnt[i].address, sgpnt[i].length);
209 #endif
210 if (sgpnt[i].alt_address) {
211 if (SCpnt->request.cmd == READ)
212 memcpy(sgpnt[i].alt_address, sgpnt[i].address, sgpnt[i].length);
213 scsi_free(sgpnt[i].address, sgpnt[i].length);
214 };
215 };
216 scsi_free(SCpnt->buffer, SCpnt->sglist_len);
217 } else {
218 if (SCpnt->buffer != SCpnt->request.buffer) {
219 #ifdef DEBUG
220 printk("nosg: %x %x %d\n",SCpnt->request.buffer, SCpnt->buffer,
221 SCpnt->bufflen);
222 #endif
223 if (SCpnt->request.cmd == READ)
224 memcpy(SCpnt->request.buffer, SCpnt->buffer,
225 SCpnt->bufflen);
226 scsi_free(SCpnt->buffer, SCpnt->bufflen);
227 };
228 };
229
230
231
232
233
234 if (SCpnt->request.nr_sectors > this_count)
235 {
236 SCpnt->request.errors = 0;
237
238 if (!SCpnt->request.bh)
239 {
240 #ifdef DEBUG
241 printk("sd%c : handling page request, no buffer\n",
242 'a' + MINOR(SCpnt->request.dev));
243 #endif
244
245
246
247
248 panic("sd.c: linked page request (%lx %x)",
249 SCpnt->request.sector, this_count);
250 }
251 }
252 SCpnt = end_scsi_request(SCpnt, 1, this_count);
253 requeue_sd_request(SCpnt);
254 return;
255 }
256
257
258 if (SCpnt->use_sg) {
259 struct scatterlist * sgpnt;
260 int i;
261 sgpnt = (struct scatterlist *) SCpnt->buffer;
262 for(i=0; i<SCpnt->use_sg; i++) {
263 #ifdef DEBUG
264 printk("err: %x %x %d\n",SCpnt->request.buffer, SCpnt->buffer,
265 SCpnt->bufflen);
266 #endif
267 if (sgpnt[i].alt_address) {
268 scsi_free(sgpnt[i].address, sgpnt[i].length);
269 };
270 };
271 scsi_free(SCpnt->buffer, SCpnt->sglist_len);
272 } else {
273 #ifdef DEBUG
274 printk("nosgerr: %x %x %d\n",SCpnt->request.buffer, SCpnt->buffer,
275 SCpnt->bufflen);
276 #endif
277 if (SCpnt->buffer != SCpnt->request.buffer)
278 scsi_free(SCpnt->buffer, SCpnt->bufflen);
279 };
280
281
282
283
284
285
286
287 if (driver_byte(result) != 0) {
288 if (suggestion(result) == SUGGEST_REMAP) {
289 #ifdef REMAP
290
291
292
293
294 if rscsi_disks[DEVICE_NR(SCpnt->request.dev)].remap
295 {
296 result = 0;
297 }
298 else
299
300 #endif
301 }
302
303 if ((SCpnt->sense_buffer[0] & 0x7f) == 0x70) {
304 if ((SCpnt->sense_buffer[2] & 0xf) == UNIT_ATTENTION) {
305 if(rscsi_disks[DEVICE_NR(SCpnt->request.dev)].device->removable) {
306
307
308
309 rscsi_disks[DEVICE_NR(SCpnt->request.dev)].device->changed = 1;
310 SCpnt = end_scsi_request(SCpnt, 0, this_count);
311 requeue_sd_request(SCpnt);
312 return;
313 }
314 }
315 }
316
317
318
319
320
321
322
323
324
325 if (SCpnt->sense_buffer[2] == ILLEGAL_REQUEST) {
326 if (rscsi_disks[DEVICE_NR(SCpnt->request.dev)].ten) {
327 rscsi_disks[DEVICE_NR(SCpnt->request.dev)].ten = 0;
328 requeue_sd_request(SCpnt);
329 result = 0;
330 } else {
331 }
332 }
333 }
334 if (result) {
335 printk("SCSI disk error : host %d id %d lun %d return code = %x\n",
336 rscsi_disks[DEVICE_NR(SCpnt->request.dev)].device->host->host_no,
337 rscsi_disks[DEVICE_NR(SCpnt->request.dev)].device->id,
338 rscsi_disks[DEVICE_NR(SCpnt->request.dev)].device->lun, result);
339
340 if (driver_byte(result) & DRIVER_SENSE)
341 print_sense("sd", SCpnt);
342 SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.current_nr_sectors);
343 requeue_sd_request(SCpnt);
344 return;
345 }
346 }
347
348
349
350
351
352
353
354 static void do_sd_request (void)
355 {
356 Scsi_Cmnd * SCpnt = NULL;
357 struct request * req = NULL;
358 unsigned long flags;
359 int flag = 0;
360
361 while (1==1){
362 save_flags(flags);
363 cli();
364 if (CURRENT != NULL && CURRENT->dev == -1) {
365 restore_flags(flags);
366 return;
367 };
368
369 INIT_SCSI_REQUEST;
370
371
372
373
374
375
376
377
378
379
380
381
382
383 if (flag++ == 0)
384 SCpnt = allocate_device(&CURRENT,
385 rscsi_disks[DEVICE_NR(MINOR(CURRENT->dev))].device, 0);
386 else SCpnt = NULL;
387
388
389
390
391 #if 0
392 restore_flags(flags);
393 #else
394 sti();
395 #endif
396
397
398
399
400
401
402
403
404 if (!SCpnt && sd_template.nr_dev > 1){
405 struct request *req1;
406 req1 = NULL;
407 save_flags(flags);
408 cli();
409 req = CURRENT;
410 while(req){
411 SCpnt = request_queueable(req,
412 rscsi_disks[DEVICE_NR(MINOR(req->dev))].device);
413 if(SCpnt) break;
414 req1 = req;
415 req = req->next;
416 };
417 if (SCpnt && req->dev == -1) {
418 if (req == CURRENT)
419 CURRENT = CURRENT->next;
420 else
421 req1->next = req->next;
422 };
423 restore_flags(flags);
424 };
425
426 if (!SCpnt) return;
427
428
429 requeue_sd_request(SCpnt);
430 };
431 }
432
433 static void requeue_sd_request (Scsi_Cmnd * SCpnt)
434 {
435 int dev, block, this_count;
436 unsigned char cmd[10];
437 int bounce_size, contiguous;
438 int max_sg;
439 struct buffer_head * bh, *bhp;
440 char * buff, *bounce_buffer;
441
442 repeat:
443
444 if(!SCpnt || SCpnt->request.dev <= 0) {
445 do_sd_request();
446 return;
447 }
448
449 dev = MINOR(SCpnt->request.dev);
450 block = SCpnt->request.sector;
451 this_count = 0;
452
453 #ifdef DEBUG
454 printk("Doing sd request, dev = %d, block = %d\n", dev, block);
455 #endif
456
457 if (dev >= (sd_template.dev_max << 4) ||
458 !rscsi_disks[DEVICE_NR(dev)].device ||
459 block + SCpnt->request.nr_sectors > sd[dev].nr_sects)
460 {
461 SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
462 goto repeat;
463 }
464
465 block += sd[dev].start_sect;
466 dev = DEVICE_NR(dev);
467
468 if (rscsi_disks[dev].device->changed)
469 {
470
471
472
473
474 SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
475 goto repeat;
476 }
477
478 #ifdef DEBUG
479 printk("sd%c : real dev = /dev/sd%c, block = %d\n", 'a' + MINOR(SCpnt->request.dev), dev, block);
480 #endif
481
482
483
484
485
486
487
488
489
490
491
492
493 if (rscsi_disks[dev].sector_size == 1024)
494 if((block & 1) || (SCpnt->request.nr_sectors & 1)) {
495 printk("sd.c:Bad block number requested");
496 SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
497 goto repeat;
498 }
499
500 switch (SCpnt->request.cmd)
501 {
502 case WRITE :
503 if (!rscsi_disks[dev].device->writeable)
504 {
505 SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
506 goto repeat;
507 }
508 cmd[0] = WRITE_6;
509 break;
510 case READ :
511 cmd[0] = READ_6;
512 break;
513 default :
514 panic ("Unknown sd command %d\n", SCpnt->request.cmd);
515 }
516
517 SCpnt->this_count = 0;
518
519
520
521 contiguous = (!CLUSTERABLE_DEVICE(SCpnt) ? 0 :1);
522 bounce_buffer = NULL;
523 bounce_size = (SCpnt->request.nr_sectors << 9);
524
525
526
527
528
529
530 if (contiguous && SCpnt->request.bh &&
531 ((long) SCpnt->request.bh->b_data) + (SCpnt->request.nr_sectors << 9) - 1 >
532 ISA_DMA_THRESHOLD && SCpnt->host->unchecked_isa_dma) {
533 if(((long) SCpnt->request.bh->b_data) > ISA_DMA_THRESHOLD)
534 bounce_buffer = (char *) scsi_malloc(bounce_size);
535 if(!bounce_buffer) contiguous = 0;
536 };
537
538 if(contiguous && SCpnt->request.bh && SCpnt->request.bh->b_reqnext)
539 for(bh = SCpnt->request.bh, bhp = bh->b_reqnext; bhp; bh = bhp,
540 bhp = bhp->b_reqnext) {
541 if(!CONTIGUOUS_BUFFERS(bh,bhp)) {
542 if(bounce_buffer) scsi_free(bounce_buffer, bounce_size);
543 contiguous = 0;
544 break;
545 }
546 };
547 if (!SCpnt->request.bh || contiguous) {
548
549
550 this_count = SCpnt->request.nr_sectors;
551 buff = SCpnt->request.buffer;
552 SCpnt->use_sg = 0;
553
554 } else if (SCpnt->host->sg_tablesize == 0 ||
555 (need_isa_buffer &&
556 dma_free_sectors <= 10)) {
557
558
559
560
561
562
563
564
565 if (SCpnt->host->sg_tablesize != 0 &&
566 need_isa_buffer &&
567 dma_free_sectors <= 10)
568 printk("Warning: SCSI DMA buffer space running low. Using non scatter-gather I/O.\n");
569
570 this_count = SCpnt->request.current_nr_sectors;
571 buff = SCpnt->request.buffer;
572 SCpnt->use_sg = 0;
573
574 } else {
575
576
577 struct scatterlist * sgpnt;
578 int count, this_count_max;
579 int counted;
580
581 bh = SCpnt->request.bh;
582 this_count = 0;
583 this_count_max = (rscsi_disks[dev].ten ? 0xffff : 0xff);
584 count = 0;
585 bhp = NULL;
586 while(bh) {
587 if ((this_count + (bh->b_size >> 9)) > this_count_max) break;
588 if(!bhp || !CONTIGUOUS_BUFFERS(bhp,bh) ||
589 !CLUSTERABLE_DEVICE(SCpnt) ||
590 (SCpnt->host->unchecked_isa_dma &&
591 ((unsigned long) bh->b_data-1) == ISA_DMA_THRESHOLD)) {
592 if (count < SCpnt->host->sg_tablesize) count++;
593 else break;
594 };
595 this_count += (bh->b_size >> 9);
596 bhp = bh;
597 bh = bh->b_reqnext;
598 };
599 #if 0
600 if(SCpnt->host->unchecked_isa_dma &&
601 ((unsigned int) SCpnt->request.bh->b_data-1) == ISA_DMA_THRESHOLD) count--;
602 #endif
603 SCpnt->use_sg = count;
604 count = 512;
605 while( count < (SCpnt->use_sg * sizeof(struct scatterlist)))
606 count = count << 1;
607 SCpnt->sglist_len = count;
608 max_sg = count / sizeof(struct scatterlist);
609 if(SCpnt->host->sg_tablesize < max_sg) max_sg = SCpnt->host->sg_tablesize;
610 sgpnt = (struct scatterlist * ) scsi_malloc(count);
611 memset(sgpnt, 0, count);
612 if (!sgpnt) {
613 printk("Warning - running *really* short on DMA buffers\n");
614 SCpnt->use_sg = 0;
615 this_count = SCpnt->request.current_nr_sectors;
616 buff = SCpnt->request.buffer;
617 } else {
618 buff = (char *) sgpnt;
619 counted = 0;
620 for(count = 0, bh = SCpnt->request.bh, bhp = bh->b_reqnext;
621 count < SCpnt->use_sg && bh;
622 count++, bh = bhp) {
623
624 bhp = bh->b_reqnext;
625
626 if(!sgpnt[count].address) sgpnt[count].address = bh->b_data;
627 sgpnt[count].length += bh->b_size;
628 counted += bh->b_size >> 9;
629
630 if (((long) sgpnt[count].address) + sgpnt[count].length - 1 >
631 ISA_DMA_THRESHOLD && (SCpnt->host->unchecked_isa_dma) &&
632 !sgpnt[count].alt_address) {
633 sgpnt[count].alt_address = sgpnt[count].address;
634
635
636
637 if(dma_free_sectors < (sgpnt[count].length >> 9) + 10) {
638 sgpnt[count].address = NULL;
639 } else {
640 sgpnt[count].address = (char *) scsi_malloc(sgpnt[count].length);
641 };
642
643
644
645
646 if(sgpnt[count].address == NULL){
647 #if 0
648 printk("Warning: Running low on SCSI DMA buffers");
649
650 while(--count >= 0){
651 if(sgpnt[count].alt_address)
652 scsi_free(sgpnt[count].address, sgpnt[count].length);
653 };
654 this_count = SCpnt->request.current_nr_sectors;
655 buff = SCpnt->request.buffer;
656 SCpnt->use_sg = 0;
657 scsi_free(sgpnt, SCpnt->sglist_len);
658 #endif
659 SCpnt->use_sg = count;
660 this_count = counted -= bh->b_size >> 9;
661 break;
662 };
663
664 };
665
666
667
668
669
670 if(bhp && CONTIGUOUS_BUFFERS(bh,bhp) && CLUSTERABLE_DEVICE(SCpnt)) {
671 char * tmp;
672
673 if (((long) sgpnt[count].address) + sgpnt[count].length +
674 bhp->b_size - 1 > ISA_DMA_THRESHOLD &&
675 (SCpnt->host->unchecked_isa_dma) &&
676 !sgpnt[count].alt_address) continue;
677
678 if(!sgpnt[count].alt_address) {count--; continue; }
679 if(dma_free_sectors > 10)
680 tmp = (char *) scsi_malloc(sgpnt[count].length + bhp->b_size);
681 else {
682 tmp = NULL;
683 max_sg = SCpnt->use_sg;
684 };
685 if(tmp){
686 scsi_free(sgpnt[count].address, sgpnt[count].length);
687 sgpnt[count].address = tmp;
688 count--;
689 continue;
690 };
691
692
693
694
695 if (SCpnt->use_sg < max_sg) SCpnt->use_sg++;
696 };
697 };
698
699 this_count = counted;
700
701 if(count < SCpnt->use_sg || SCpnt->use_sg > SCpnt->host->sg_tablesize){
702 bh = SCpnt->request.bh;
703 printk("Use sg, count %d %x %d\n", SCpnt->use_sg, count, dma_free_sectors);
704 printk("maxsg = %x, counted = %d this_count = %d\n", max_sg, counted, this_count);
705 while(bh){
706 printk("[%p %lx] ", bh->b_data, bh->b_size);
707 bh = bh->b_reqnext;
708 };
709 if(SCpnt->use_sg < 16)
710 for(count=0; count<SCpnt->use_sg; count++)
711 printk("{%d:%p %p %d} ", count,
712 sgpnt[count].address,
713 sgpnt[count].alt_address,
714 sgpnt[count].length);
715 panic("Ooops");
716 };
717
718 if (SCpnt->request.cmd == WRITE)
719 for(count=0; count<SCpnt->use_sg; count++)
720 if(sgpnt[count].alt_address)
721 memcpy(sgpnt[count].address, sgpnt[count].alt_address,
722 sgpnt[count].length);
723 };
724 };
725
726
727
728 if(SCpnt->use_sg == 0){
729 if (((long) buff) + (this_count << 9) - 1 > ISA_DMA_THRESHOLD &&
730 (SCpnt->host->unchecked_isa_dma)) {
731 if(bounce_buffer)
732 buff = bounce_buffer;
733 else
734 buff = (char *) scsi_malloc(this_count << 9);
735 if(buff == NULL) {
736 this_count = SCpnt->request.current_nr_sectors;
737 buff = (char *) scsi_malloc(this_count << 9);
738 if(!buff) panic("Ran out of DMA buffers.");
739 };
740 if (SCpnt->request.cmd == WRITE)
741 memcpy(buff, (char *)SCpnt->request.buffer, this_count << 9);
742 };
743 };
744 #ifdef DEBUG
745 printk("sd%c : %s %d/%d 512 byte blocks.\n", 'a' + MINOR(SCpnt->request.dev),
746 (SCpnt->request.cmd == WRITE) ? "writing" : "reading",
747 this_count, SCpnt->request.nr_sectors);
748 #endif
749
750 cmd[1] = (SCpnt->lun << 5) & 0xe0;
751
752 if (rscsi_disks[dev].sector_size == 1024){
753 if(block & 1) panic("sd.c:Bad block number requested");
754 if(this_count & 1) panic("sd.c:Bad block number requested");
755 block = block >> 1;
756 this_count = this_count >> 1;
757 };
758
759 if (rscsi_disks[dev].sector_size == 256){
760 block = block << 1;
761 this_count = this_count << 1;
762 };
763
764 if (((this_count > 0xff) || (block > 0x1fffff)) && rscsi_disks[dev].ten)
765 {
766 if (this_count > 0xffff)
767 this_count = 0xffff;
768
769 cmd[0] += READ_10 - READ_6 ;
770 cmd[2] = (unsigned char) (block >> 24) & 0xff;
771 cmd[3] = (unsigned char) (block >> 16) & 0xff;
772 cmd[4] = (unsigned char) (block >> 8) & 0xff;
773 cmd[5] = (unsigned char) block & 0xff;
774 cmd[6] = cmd[9] = 0;
775 cmd[7] = (unsigned char) (this_count >> 8) & 0xff;
776 cmd[8] = (unsigned char) this_count & 0xff;
777 }
778 else
779 {
780 if (this_count > 0xff)
781 this_count = 0xff;
782
783 cmd[1] |= (unsigned char) ((block >> 16) & 0x1f);
784 cmd[2] = (unsigned char) ((block >> 8) & 0xff);
785 cmd[3] = (unsigned char) block & 0xff;
786 cmd[4] = (unsigned char) this_count;
787 cmd[5] = 0;
788 }
789
790
791
792
793
794
795
796 SCpnt->transfersize = rscsi_disks[dev].sector_size;
797 SCpnt->underflow = this_count << 9;
798 scsi_do_cmd (SCpnt, (void *) cmd, buff,
799 this_count * rscsi_disks[dev].sector_size,
800 rw_intr,
801 (SCpnt->device->type == TYPE_DISK ?
802 SD_TIMEOUT : SD_MOD_TIMEOUT),
803 MAX_RETRIES);
804 }
805
806 static int check_scsidisk_media_change(dev_t full_dev){
807 int retval;
808 int target;
809 struct inode inode;
810 int flag = 0;
811
812 target = DEVICE_NR(MINOR(full_dev));
813
814 if (target >= sd_template.dev_max ||
815 !rscsi_disks[target].device) {
816 printk("SCSI disk request error: invalid device.\n");
817 return 0;
818 };
819
820 if(!rscsi_disks[target].device->removable) return 0;
821
822 inode.i_rdev = full_dev;
823 retval = sd_ioctl(&inode, NULL, SCSI_IOCTL_TEST_UNIT_READY, 0);
824
825 if(retval){
826
827
828
829
830 rscsi_disks[target].device->changed = 1;
831 return 1;
832
833 };
834
835 retval = rscsi_disks[target].device->changed;
836 if(!flag) rscsi_disks[target].device->changed = 0;
837 return retval;
838 }
839
840 static void sd_init_done (Scsi_Cmnd * SCpnt)
841 {
842 struct request * req;
843
844 req = &SCpnt->request;
845 req->dev = 0xfffe;
846
847 if (req->sem != NULL) {
848 up(req->sem);
849 }
850 }
851
852 static int sd_init_onedisk(int i)
853 {
854 unsigned char cmd[10];
855 unsigned char *buffer;
856 char spintime;
857 int the_result, retries;
858 Scsi_Cmnd * SCpnt;
859
860
861
862
863
864 SCpnt = allocate_device(NULL, rscsi_disks[i].device, 1);
865 buffer = (unsigned char *) scsi_malloc(512);
866
867 spintime = 0;
868
869
870 if (current == task[0]){
871 do{
872 cmd[0] = TEST_UNIT_READY;
873 cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0;
874 memset ((void *) &cmd[2], 0, 8);
875 SCpnt->request.dev = 0xffff;
876 SCpnt->cmd_len = 0;
877 SCpnt->sense_buffer[0] = 0;
878 SCpnt->sense_buffer[2] = 0;
879
880 scsi_do_cmd (SCpnt,
881 (void *) cmd, (void *) buffer,
882 512, sd_init_done, SD_TIMEOUT,
883 MAX_RETRIES);
884
885 while(SCpnt->request.dev != 0xfffe);
886
887 the_result = SCpnt->result;
888
889
890
891 if(the_result && !rscsi_disks[i].device->removable &&
892 SCpnt->sense_buffer[2] == NOT_READY) {
893 int time1;
894 if(!spintime){
895 printk( "sd%c: Spinning up disk...", 'a' + i );
896 cmd[0] = START_STOP;
897 cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0;
898 cmd[1] |= 1;
899 memset ((void *) &cmd[2], 0, 8);
900 cmd[4] = 1;
901 SCpnt->request.dev = 0xffff;
902 SCpnt->cmd_len = 0;
903 SCpnt->sense_buffer[0] = 0;
904 SCpnt->sense_buffer[2] = 0;
905
906 scsi_do_cmd (SCpnt,
907 (void *) cmd, (void *) buffer,
908 512, sd_init_done, SD_TIMEOUT,
909 MAX_RETRIES);
910
911 while(SCpnt->request.dev != 0xfffe);
912
913 spintime = jiffies;
914 };
915
916 time1 = jiffies;
917 while(jiffies < time1 + HZ);
918 printk( "." );
919 };
920 } while(the_result && spintime && spintime+5000 > jiffies);
921 if (spintime) {
922 if (the_result)
923 printk( "not responding...\n" );
924 else
925 printk( "ready\n" );
926 }
927 };
928
929
930 retries = 3;
931 do {
932 cmd[0] = READ_CAPACITY;
933 cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0;
934 memset ((void *) &cmd[2], 0, 8);
935 memset ((void *) buffer, 0, 8);
936 SCpnt->request.dev = 0xffff;
937 SCpnt->cmd_len = 0;
938 SCpnt->sense_buffer[0] = 0;
939 SCpnt->sense_buffer[2] = 0;
940
941 scsi_do_cmd (SCpnt,
942 (void *) cmd, (void *) buffer,
943 8, sd_init_done, SD_TIMEOUT,
944 MAX_RETRIES);
945
946 if (current == task[0])
947 while(SCpnt->request.dev != 0xfffe);
948 else
949 if (SCpnt->request.dev != 0xfffe){
950 struct semaphore sem = MUTEX_LOCKED;
951 SCpnt->request.sem = &sem;
952 down(&sem);
953
954 while (SCpnt->request.dev != 0xfffe) schedule();
955 };
956
957 the_result = SCpnt->result;
958 retries--;
959
960 } while(the_result && retries);
961
962 SCpnt->request.dev = -1;
963
964 wake_up(&SCpnt->device->device_wait);
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981 if (the_result)
982 {
983 printk ("sd%c : READ CAPACITY failed.\n"
984 "sd%c : status = %x, message = %02x, host = %d, driver = %02x \n",
985 'a' + i, 'a' + i,
986 status_byte(the_result),
987 msg_byte(the_result),
988 host_byte(the_result),
989 driver_byte(the_result)
990 );
991 if (driver_byte(the_result) & DRIVER_SENSE)
992 printk("sd%c : extended sense code = %1x \n", 'a' + i, SCpnt->sense_buffer[2] & 0xf);
993 else
994 printk("sd%c : sense not available. \n", 'a' + i);
995
996 printk("sd%c : block size assumed to be 512 bytes, disk size 1GB. \n", 'a' + i);
997 rscsi_disks[i].capacity = 0x1fffff;
998 rscsi_disks[i].sector_size = 512;
999
1000
1001
1002 if(rscsi_disks[i].device->removable &&
1003 SCpnt->sense_buffer[2] == NOT_READY)
1004 rscsi_disks[i].device->changed = 1;
1005
1006 }
1007 else
1008 {
1009 rscsi_disks[i].capacity = (buffer[0] << 24) |
1010 (buffer[1] << 16) |
1011 (buffer[2] << 8) |
1012 buffer[3];
1013
1014 rscsi_disks[i].sector_size = (buffer[4] << 24) |
1015 (buffer[5] << 16) | (buffer[6] << 8) | buffer[7];
1016
1017 if (rscsi_disks[i].sector_size != 512 &&
1018 rscsi_disks[i].sector_size != 1024 &&
1019 rscsi_disks[i].sector_size != 256)
1020 {
1021 printk ("sd%c : unsupported sector size %d.\n",
1022 'a' + i, rscsi_disks[i].sector_size);
1023 if(rscsi_disks[i].device->removable){
1024 rscsi_disks[i].capacity = 0;
1025 } else {
1026 printk ("scsi : deleting disk entry.\n");
1027 rscsi_disks[i].device = NULL;
1028 sd_template.nr_dev--;
1029 return i;
1030 };
1031 }
1032 {
1033
1034
1035
1036
1037
1038 int m;
1039 int hard_sector = rscsi_disks[i].sector_size;
1040
1041 for (m=i<<4; m<((i+1)<<4); m++){
1042 sd_hardsizes[m] = hard_sector;
1043 }
1044 printk ("SCSI Hardware sector size is %d bytes on device sd%c\n"
1045 ,hard_sector,i+'a');
1046 }
1047 if(rscsi_disks[i].sector_size == 1024)
1048 rscsi_disks[i].capacity <<= 1;
1049 if(rscsi_disks[i].sector_size == 256)
1050 rscsi_disks[i].capacity >>= 1;
1051 }
1052
1053 rscsi_disks[i].ten = 1;
1054 rscsi_disks[i].remap = 1;
1055 scsi_free(buffer, 512);
1056 return i;
1057 }
1058
1059
1060
1061
1062
1063
1064
1065 static void sd_init()
1066 {
1067 int i;
1068 static int sd_registered = 0;
1069
1070 if (sd_template.dev_noticed == 0) return;
1071
1072 if(!sd_registered) {
1073 if (register_blkdev(MAJOR_NR,"sd",&sd_fops)) {
1074 printk("Unable to get major %d for SCSI disk\n",MAJOR_NR);
1075 return;
1076 }
1077 sd_registered++;
1078 }
1079
1080
1081 if(rscsi_disks) return;
1082
1083 sd_template.dev_max = sd_template.dev_noticed + SD_EXTRA_DEVS;
1084
1085 rscsi_disks = (Scsi_Disk *)
1086 scsi_init_malloc(sd_template.dev_max * sizeof(Scsi_Disk), GFP_ATOMIC);
1087 memset(rscsi_disks, 0, sd_template.dev_max * sizeof(Scsi_Disk));
1088
1089 sd_sizes = (int *) scsi_init_malloc((sd_template.dev_max << 4) *
1090 sizeof(int), GFP_ATOMIC);
1091 memset(sd_sizes, 0, (sd_template.dev_max << 4) * sizeof(int));
1092
1093 sd_blocksizes = (int *) scsi_init_malloc((sd_template.dev_max << 4) *
1094 sizeof(int), GFP_ATOMIC);
1095
1096 sd_hardsizes = (int *) scsi_init_malloc((sd_template.dev_max << 4) *
1097 sizeof(struct hd_struct), GFP_ATOMIC);
1098
1099 for(i=0;i<(sd_template.dev_max << 4);i++){
1100 sd_blocksizes[i] = 1024;
1101 sd_hardsizes[i] = 512;
1102 }
1103 blksize_size[MAJOR_NR] = sd_blocksizes;
1104 hardsect_size[MAJOR_NR] = sd_hardsizes;
1105 sd = (struct hd_struct *) scsi_init_malloc((sd_template.dev_max << 4) *
1106 sizeof(struct hd_struct),
1107 GFP_ATOMIC);
1108
1109
1110 sd_gendisk.max_nr = sd_template.dev_max;
1111 sd_gendisk.part = sd;
1112 sd_gendisk.sizes = sd_sizes;
1113 sd_gendisk.real_devices = (void *) rscsi_disks;
1114
1115 }
1116
1117 static void sd_finish()
1118 {
1119 int i;
1120
1121 blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST;
1122
1123 sd_gendisk.next = gendisk_head;
1124 gendisk_head = &sd_gendisk;
1125
1126 for (i = 0; i < sd_template.dev_max; ++i)
1127 if (!rscsi_disks[i].capacity &&
1128 rscsi_disks[i].device)
1129 {
1130 i = sd_init_onedisk(i);
1131 if (scsi_loadable_module_flag
1132 && !rscsi_disks[i].has_part_table) {
1133 sd_sizes[i << 4] = rscsi_disks[i].capacity;
1134 revalidate_scsidisk(i << 4, 0);
1135 }
1136 rscsi_disks[i].has_part_table = 1;
1137 }
1138
1139
1140
1141
1142 if(rscsi_disks[0].device && rscsi_disks[0].device->host->sg_tablesize)
1143 read_ahead[MAJOR_NR] = 120;
1144
1145 else
1146 read_ahead[MAJOR_NR] = 4;
1147
1148 return;
1149 }
1150
1151 static int sd_detect(Scsi_Device * SDp){
1152 if(SDp->type != TYPE_DISK && SDp->type != TYPE_MOD) return 0;
1153
1154 printk("Detected scsi disk sd%c at scsi%d, id %d, lun %d\n",
1155 'a'+ (sd_template.dev_noticed++),
1156 SDp->host->host_no , SDp->id, SDp->lun);
1157
1158 return 1;
1159
1160 }
1161
1162 static int sd_attach(Scsi_Device * SDp){
1163 Scsi_Disk * dpnt;
1164 int i;
1165
1166 if(SDp->type != TYPE_DISK && SDp->type != TYPE_MOD) return 0;
1167
1168 if(sd_template.nr_dev >= sd_template.dev_max) {
1169 SDp->attached--;
1170 return 1;
1171 }
1172
1173 for(dpnt = rscsi_disks, i=0; i<sd_template.dev_max; i++, dpnt++)
1174 if(!dpnt->device) break;
1175
1176 if(i >= sd_template.dev_max) panic ("scsi_devices corrupt (sd)");
1177
1178 SDp->scsi_request_fn = do_sd_request;
1179 rscsi_disks[i].device = SDp;
1180 rscsi_disks[i].has_part_table = 0;
1181 sd_template.nr_dev++;
1182 sd_gendisk.nr_real++;
1183 return 0;
1184 }
1185
1186 #define DEVICE_BUSY rscsi_disks[target].device->busy
1187 #define USAGE rscsi_disks[target].device->access_count
1188 #define CAPACITY rscsi_disks[target].capacity
1189 #define MAYBE_REINIT sd_init_onedisk(target)
1190 #define GENDISK_STRUCT sd_gendisk
1191
1192
1193
1194
1195
1196
1197
1198
1199 int revalidate_scsidisk(int dev, int maxusage){
1200 int target, major;
1201 struct gendisk * gdev;
1202 unsigned long flags;
1203 int max_p;
1204 int start;
1205 int i;
1206
1207 target = DEVICE_NR(MINOR(dev));
1208 gdev = &GENDISK_STRUCT;
1209
1210 save_flags(flags);
1211 cli();
1212 if (DEVICE_BUSY || USAGE > maxusage) {
1213 restore_flags(flags);
1214 printk("Device busy for revalidation (usage=%d)\n", USAGE);
1215 return -EBUSY;
1216 };
1217 DEVICE_BUSY = 1;
1218 restore_flags(flags);
1219
1220 max_p = gdev->max_p;
1221 start = target << gdev->minor_shift;
1222 major = MAJOR_NR << 8;
1223
1224 for (i=max_p - 1; i >=0 ; i--) {
1225 sync_dev(major | start | i);
1226 invalidate_inodes(major | start | i);
1227 invalidate_buffers(major | start | i);
1228 gdev->part[start+i].start_sect = 0;
1229 gdev->part[start+i].nr_sects = 0;
1230 };
1231
1232 #ifdef MAYBE_REINIT
1233 MAYBE_REINIT;
1234 #endif
1235
1236 gdev->part[start].nr_sects = CAPACITY;
1237 resetup_one_dev(gdev, target);
1238
1239 DEVICE_BUSY = 0;
1240 return 0;
1241 }
1242
1243 static int fop_revalidate_scsidisk(dev_t dev){
1244 return revalidate_scsidisk(dev, 0);
1245 }
1246
1247
1248 static void sd_detach(Scsi_Device * SDp)
1249 {
1250 Scsi_Disk * dpnt;
1251 int i;
1252 int max_p;
1253 int major;
1254 int start;
1255
1256 for(dpnt = rscsi_disks, i=0; i<sd_template.dev_max; i++, dpnt++)
1257 if(dpnt->device == SDp) {
1258
1259
1260 max_p = sd_gendisk.max_p;
1261 start = i << sd_gendisk.minor_shift;
1262 major = MAJOR_NR << 8;
1263
1264 for (i=max_p - 1; i >=0 ; i--) {
1265 sync_dev(major | start | i);
1266 invalidate_inodes(major | start | i);
1267 invalidate_buffers(major | start | i);
1268 sd_gendisk.part[start+i].start_sect = 0;
1269 sd_gendisk.part[start+i].nr_sects = 0;
1270 sd_sizes[start+i] = 0;
1271 };
1272
1273 dpnt->has_part_table = 0;
1274 dpnt->device = NULL;
1275 dpnt->capacity = 0;
1276 SDp->attached--;
1277 sd_template.dev_noticed--;
1278 sd_template.nr_dev--;
1279 sd_gendisk.nr_real--;
1280 return;
1281 }
1282 return;
1283 }
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300