This source file includes following definitions.
- sd_open
- sd_release
- sd_geninit
- rw_intr
- do_sd_request
- requeue_sd_request
- check_scsidisk_media_change
- sd_init_done
- sd_init_onedisk
- sd_init
- sd_finish
- sd_detect
- sd_attach
- revalidate_scsidisk
- fop_revalidate_scsidisk
- sd_detach
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19 #include <linux/fs.h>
20 #include <linux/kernel.h>
21 #include <linux/sched.h>
22 #include <linux/mm.h>
23 #include <linux/string.h>
24 #include <linux/errno.h>
25 #include <asm/system.h>
26
27 #define MAJOR_NR SCSI_DISK_MAJOR
28 #include "../block/blk.h"
29 #include "scsi.h"
30 #include "hosts.h"
31 #include "sd.h"
32 #include "scsi_ioctl.h"
33 #include "constants.h"
34
35 #include <linux/genhd.h>
36
37
38
39
40
41 #define MAX_RETRIES 5
42
43
44
45
46
47 #define SD_TIMEOUT 600
48 #define SD_MOD_TIMEOUT 750
49
50 #define CLUSTERABLE_DEVICE(SC) (SC->host->hostt->use_clustering && \
51 SC->device->type != TYPE_MOD)
52
53 struct hd_struct * sd;
54 int revalidate_scsidisk(int dev, int maxusage);
55
56 Scsi_Disk * rscsi_disks = NULL;
57 static int * sd_sizes;
58 static int * sd_blocksizes;
59 static int * sd_hardsizes;
60
61 extern int sd_ioctl(struct inode *, struct file *, unsigned int, unsigned long);
62
63 static int check_scsidisk_media_change(dev_t);
64 static int fop_revalidate_scsidisk(dev_t);
65
66 static sd_init_onedisk(int);
67
68 static void requeue_sd_request (Scsi_Cmnd * SCpnt);
69
70 static void sd_init(void);
71 static void sd_finish(void);
72 static int sd_attach(Scsi_Device *);
73 static int sd_detect(Scsi_Device *);
74 static void sd_detach(Scsi_Device *);
75
76 struct Scsi_Device_Template sd_template = {NULL, "disk", "sd", TYPE_DISK,
77 SCSI_DISK_MAJOR, 0, 0, 0, 1,
78 sd_detect, sd_init,
79 sd_finish, sd_attach, sd_detach};
80
81 static int sd_open(struct inode * inode, struct file * filp)
82 {
83 int target;
84 target = DEVICE_NR(MINOR(inode->i_rdev));
85
86 if(target >= sd_template.dev_max || !rscsi_disks[target].device)
87 return -ENXIO;
88
89
90
91
92 while (rscsi_disks[target].device->busy);
93
94 if(rscsi_disks[target].device->removable) {
95 check_disk_change(inode->i_rdev);
96
97 if(!rscsi_disks[target].device->access_count)
98 sd_ioctl(inode, NULL, SCSI_IOCTL_DOORLOCK, 0);
99 };
100
101
102
103
104 if(sd_sizes[MINOR(inode->i_rdev)] == 0)
105 return -ENXIO;
106
107 rscsi_disks[target].device->access_count++;
108 if (rscsi_disks[target].device->host->hostt->usage_count)
109 (*rscsi_disks[target].device->host->hostt->usage_count)++;
110 return 0;
111 }
112
113 static void sd_release(struct inode * inode, struct file * file)
114 {
115 int target;
116 sync_dev(inode->i_rdev);
117
118 target = DEVICE_NR(MINOR(inode->i_rdev));
119
120 rscsi_disks[target].device->access_count--;
121 if (rscsi_disks[target].device->host->hostt->usage_count)
122 (*rscsi_disks[target].device->host->hostt->usage_count)--;
123
124 if(rscsi_disks[target].device->removable) {
125 if(!rscsi_disks[target].device->access_count)
126 sd_ioctl(inode, NULL, SCSI_IOCTL_DOORUNLOCK, 0);
127 };
128 }
129
130 static void sd_geninit(void);
131
132 static struct file_operations sd_fops = {
133 NULL,
134 block_read,
135 block_write,
136 NULL,
137 NULL,
138 sd_ioctl,
139 NULL,
140 sd_open,
141 sd_release,
142 block_fsync,
143 NULL,
144 check_scsidisk_media_change,
145 fop_revalidate_scsidisk
146 };
147
148 static struct gendisk sd_gendisk = {
149 MAJOR_NR,
150 "sd",
151 4,
152 1 << 4,
153 0,
154 sd_geninit,
155 NULL,
156 NULL,
157 0,
158 NULL,
159 NULL
160 };
161
162 static void sd_geninit (void)
163 {
164 int i;
165
166 for (i = 0; i < sd_template.dev_max; ++i)
167 if(rscsi_disks[i].device)
168 sd[i << 4].nr_sects = rscsi_disks[i].capacity;
169 #if 0
170
171 sd_gendisk.nr_real = sd_template.dev_max;
172 #endif
173 }
174
175
176
177
178
179
180
181 static void rw_intr (Scsi_Cmnd *SCpnt)
182 {
183 int result = SCpnt->result;
184 int this_count = SCpnt->bufflen >> 9;
185
186 #ifdef DEBUG
187 printk("sd%c : rw_intr(%d, %d)\n", 'a' + MINOR(SCpnt->request.dev), SCpnt->host->host_no, result);
188 #endif
189
190
191
192
193
194
195
196 if (!result) {
197
198 #ifdef DEBUG
199 printk("sd%c : %d sectors remain.\n", 'a' + MINOR(SCpnt->request.dev), SCpnt->request.nr_sectors);
200 printk("use_sg is %d\n ",SCpnt->use_sg);
201 #endif
202 if (SCpnt->use_sg) {
203 struct scatterlist * sgpnt;
204 int i;
205 sgpnt = (struct scatterlist *) SCpnt->buffer;
206 for(i=0; i<SCpnt->use_sg; i++) {
207 #ifdef DEBUG
208 printk(":%x %x %d\n",sgpnt[i].alt_address, sgpnt[i].address, sgpnt[i].length);
209 #endif
210 if (sgpnt[i].alt_address) {
211 if (SCpnt->request.cmd == READ)
212 memcpy(sgpnt[i].alt_address, sgpnt[i].address, sgpnt[i].length);
213 scsi_free(sgpnt[i].address, sgpnt[i].length);
214 };
215 };
216 scsi_free(SCpnt->buffer, SCpnt->sglist_len);
217 } else {
218 if (SCpnt->buffer != SCpnt->request.buffer) {
219 #ifdef DEBUG
220 printk("nosg: %x %x %d\n",SCpnt->request.buffer, SCpnt->buffer,
221 SCpnt->bufflen);
222 #endif
223 if (SCpnt->request.cmd == READ)
224 memcpy(SCpnt->request.buffer, SCpnt->buffer,
225 SCpnt->bufflen);
226 scsi_free(SCpnt->buffer, SCpnt->bufflen);
227 };
228 };
229
230
231
232
233
234 if (SCpnt->request.nr_sectors > this_count)
235 {
236 SCpnt->request.errors = 0;
237
238 if (!SCpnt->request.bh)
239 {
240 #ifdef DEBUG
241 printk("sd%c : handling page request, no buffer\n",
242 'a' + MINOR(SCpnt->request.dev));
243 #endif
244
245
246
247
248 panic("sd.c: linked page request (%lx %x)",
249 SCpnt->request.sector, this_count);
250 }
251 }
252 SCpnt = end_scsi_request(SCpnt, 1, this_count);
253 requeue_sd_request(SCpnt);
254 return;
255 }
256
257
258 if (SCpnt->use_sg) {
259 struct scatterlist * sgpnt;
260 int i;
261 sgpnt = (struct scatterlist *) SCpnt->buffer;
262 for(i=0; i<SCpnt->use_sg; i++) {
263 #ifdef DEBUG
264 printk("err: %x %x %d\n",SCpnt->request.buffer, SCpnt->buffer,
265 SCpnt->bufflen);
266 #endif
267 if (sgpnt[i].alt_address) {
268 scsi_free(sgpnt[i].address, sgpnt[i].length);
269 };
270 };
271 scsi_free(SCpnt->buffer, SCpnt->sglist_len);
272 } else {
273 #ifdef DEBUG
274 printk("nosgerr: %x %x %d\n",SCpnt->request.buffer, SCpnt->buffer,
275 SCpnt->bufflen);
276 #endif
277 if (SCpnt->buffer != SCpnt->request.buffer)
278 scsi_free(SCpnt->buffer, SCpnt->bufflen);
279 };
280
281
282
283
284
285
286
287 if (driver_byte(result) != 0) {
288 if (suggestion(result) == SUGGEST_REMAP) {
289 #ifdef REMAP
290
291
292
293
294 if rscsi_disks[DEVICE_NR(SCpnt->request.dev)].remap
295 {
296 result = 0;
297 }
298 else
299
300 #endif
301 }
302
303 if ((SCpnt->sense_buffer[0] & 0x7f) == 0x70) {
304 if ((SCpnt->sense_buffer[2] & 0xf) == UNIT_ATTENTION) {
305 if(rscsi_disks[DEVICE_NR(SCpnt->request.dev)].device->removable) {
306
307
308
309 rscsi_disks[DEVICE_NR(SCpnt->request.dev)].device->changed = 1;
310 SCpnt = end_scsi_request(SCpnt, 0, this_count);
311 requeue_sd_request(SCpnt);
312 return;
313 }
314 }
315 }
316
317
318
319
320
321
322
323
324
325 if (SCpnt->sense_buffer[2] == ILLEGAL_REQUEST) {
326 if (rscsi_disks[DEVICE_NR(SCpnt->request.dev)].ten) {
327 rscsi_disks[DEVICE_NR(SCpnt->request.dev)].ten = 0;
328 requeue_sd_request(SCpnt);
329 result = 0;
330 } else {
331 }
332 }
333 }
334 if (result) {
335 printk("SCSI disk error : host %d id %d lun %d return code = %x\n",
336 rscsi_disks[DEVICE_NR(SCpnt->request.dev)].device->host->host_no,
337 rscsi_disks[DEVICE_NR(SCpnt->request.dev)].device->id,
338 rscsi_disks[DEVICE_NR(SCpnt->request.dev)].device->lun, result);
339
340 if (driver_byte(result) & DRIVER_SENSE)
341 print_sense("sd", SCpnt);
342 SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.current_nr_sectors);
343 requeue_sd_request(SCpnt);
344 return;
345 }
346 }
347
348
349
350
351
352
353
354 static void do_sd_request (void)
355 {
356 Scsi_Cmnd * SCpnt = NULL;
357 struct request * req = NULL;
358 unsigned long flags;
359 int flag = 0;
360
361 save_flags(flags);
362 while (1==1){
363 cli();
364 if (CURRENT != NULL && CURRENT->dev == -1) {
365 restore_flags(flags);
366 return;
367 };
368
369 INIT_SCSI_REQUEST;
370
371
372
373
374
375
376
377
378
379
380
381
382
383 if (flag++ == 0)
384 SCpnt = allocate_device(&CURRENT,
385 rscsi_disks[DEVICE_NR(MINOR(CURRENT->dev))].device, 0);
386 else SCpnt = NULL;
387
388
389
390
391
392
393 restore_flags(flags);
394
395
396
397
398
399
400
401
402 if (!SCpnt && sd_template.nr_dev > 1){
403 struct request *req1;
404 req1 = NULL;
405 cli();
406 req = CURRENT;
407 while(req){
408 SCpnt = request_queueable(req,
409 rscsi_disks[DEVICE_NR(MINOR(req->dev))].device);
410 if(SCpnt) break;
411 req1 = req;
412 req = req->next;
413 };
414 if (SCpnt && req->dev == -1) {
415 if (req == CURRENT)
416 CURRENT = CURRENT->next;
417 else
418 req1->next = req->next;
419 };
420 restore_flags(flags);
421 };
422
423 if (!SCpnt) return;
424
425
426 requeue_sd_request(SCpnt);
427 };
428 }
429
430 static void requeue_sd_request (Scsi_Cmnd * SCpnt)
431 {
432 int dev, block, this_count;
433 unsigned char cmd[10];
434 int bounce_size, contiguous;
435 int max_sg;
436 struct buffer_head * bh, *bhp;
437 char * buff, *bounce_buffer;
438
439 repeat:
440
441 if(!SCpnt || SCpnt->request.dev <= 0) {
442 do_sd_request();
443 return;
444 }
445
446 dev = MINOR(SCpnt->request.dev);
447 block = SCpnt->request.sector;
448 this_count = 0;
449
450 #ifdef DEBUG
451 printk("Doing sd request, dev = %d, block = %d\n", dev, block);
452 #endif
453
454 if (dev >= (sd_template.dev_max << 4) ||
455 !rscsi_disks[DEVICE_NR(dev)].device ||
456 block + SCpnt->request.nr_sectors > sd[dev].nr_sects)
457 {
458 SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
459 goto repeat;
460 }
461
462 block += sd[dev].start_sect;
463 dev = DEVICE_NR(dev);
464
465 if (rscsi_disks[dev].device->changed)
466 {
467
468
469
470
471 SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
472 goto repeat;
473 }
474
475 #ifdef DEBUG
476 printk("sd%c : real dev = /dev/sd%c, block = %d\n", 'a' + MINOR(SCpnt->request.dev), dev, block);
477 #endif
478
479
480
481
482
483
484
485
486
487
488
489
490 if (rscsi_disks[dev].sector_size == 1024)
491 if((block & 1) || (SCpnt->request.nr_sectors & 1)) {
492 printk("sd.c:Bad block number requested");
493 SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
494 goto repeat;
495 }
496
497 switch (SCpnt->request.cmd)
498 {
499 case WRITE :
500 if (!rscsi_disks[dev].device->writeable)
501 {
502 SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
503 goto repeat;
504 }
505 cmd[0] = WRITE_6;
506 break;
507 case READ :
508 cmd[0] = READ_6;
509 break;
510 default :
511 panic ("Unknown sd command %d\n", SCpnt->request.cmd);
512 }
513
514 SCpnt->this_count = 0;
515
516
517
518 contiguous = (!CLUSTERABLE_DEVICE(SCpnt) ? 0 :1);
519 bounce_buffer = NULL;
520 bounce_size = (SCpnt->request.nr_sectors << 9);
521
522
523
524
525
526
527 if (contiguous && SCpnt->request.bh &&
528 ((long) SCpnt->request.bh->b_data) + (SCpnt->request.nr_sectors << 9) - 1 >
529 ISA_DMA_THRESHOLD && SCpnt->host->unchecked_isa_dma) {
530 if(((long) SCpnt->request.bh->b_data) > ISA_DMA_THRESHOLD)
531 bounce_buffer = (char *) scsi_malloc(bounce_size);
532 if(!bounce_buffer) contiguous = 0;
533 };
534
535 if(contiguous && SCpnt->request.bh && SCpnt->request.bh->b_reqnext)
536 for(bh = SCpnt->request.bh, bhp = bh->b_reqnext; bhp; bh = bhp,
537 bhp = bhp->b_reqnext) {
538 if(!CONTIGUOUS_BUFFERS(bh,bhp)) {
539 if(bounce_buffer) scsi_free(bounce_buffer, bounce_size);
540 contiguous = 0;
541 break;
542 }
543 };
544 if (!SCpnt->request.bh || contiguous) {
545
546
547 this_count = SCpnt->request.nr_sectors;
548 buff = SCpnt->request.buffer;
549 SCpnt->use_sg = 0;
550
551 } else if (SCpnt->host->sg_tablesize == 0 ||
552 (need_isa_buffer &&
553 dma_free_sectors <= 10)) {
554
555
556
557
558
559
560
561
562 if (SCpnt->host->sg_tablesize != 0 &&
563 need_isa_buffer &&
564 dma_free_sectors <= 10)
565 printk("Warning: SCSI DMA buffer space running low. Using non scatter-gather I/O.\n");
566
567 this_count = SCpnt->request.current_nr_sectors;
568 buff = SCpnt->request.buffer;
569 SCpnt->use_sg = 0;
570
571 } else {
572
573
574 struct scatterlist * sgpnt;
575 int count, this_count_max;
576 int counted;
577
578 bh = SCpnt->request.bh;
579 this_count = 0;
580 this_count_max = (rscsi_disks[dev].ten ? 0xffff : 0xff);
581 count = 0;
582 bhp = NULL;
583 while(bh) {
584 if ((this_count + (bh->b_size >> 9)) > this_count_max) break;
585 if(!bhp || !CONTIGUOUS_BUFFERS(bhp,bh) ||
586 !CLUSTERABLE_DEVICE(SCpnt) ||
587 (SCpnt->host->unchecked_isa_dma &&
588 ((unsigned long) bh->b_data-1) == ISA_DMA_THRESHOLD)) {
589 if (count < SCpnt->host->sg_tablesize) count++;
590 else break;
591 };
592 this_count += (bh->b_size >> 9);
593 bhp = bh;
594 bh = bh->b_reqnext;
595 };
596 #if 0
597 if(SCpnt->host->unchecked_isa_dma &&
598 ((unsigned int) SCpnt->request.bh->b_data-1) == ISA_DMA_THRESHOLD) count--;
599 #endif
600 SCpnt->use_sg = count;
601 count = 512;
602 while( count < (SCpnt->use_sg * sizeof(struct scatterlist)))
603 count = count << 1;
604 SCpnt->sglist_len = count;
605 max_sg = count / sizeof(struct scatterlist);
606 if(SCpnt->host->sg_tablesize < max_sg) max_sg = SCpnt->host->sg_tablesize;
607 sgpnt = (struct scatterlist * ) scsi_malloc(count);
608 memset(sgpnt, 0, count);
609 if (!sgpnt) {
610 printk("Warning - running *really* short on DMA buffers\n");
611 SCpnt->use_sg = 0;
612 this_count = SCpnt->request.current_nr_sectors;
613 buff = SCpnt->request.buffer;
614 } else {
615 buff = (char *) sgpnt;
616 counted = 0;
617 for(count = 0, bh = SCpnt->request.bh, bhp = bh->b_reqnext;
618 count < SCpnt->use_sg && bh;
619 count++, bh = bhp) {
620
621 bhp = bh->b_reqnext;
622
623 if(!sgpnt[count].address) sgpnt[count].address = bh->b_data;
624 sgpnt[count].length += bh->b_size;
625 counted += bh->b_size >> 9;
626
627 if (((long) sgpnt[count].address) + sgpnt[count].length - 1 >
628 ISA_DMA_THRESHOLD && (SCpnt->host->unchecked_isa_dma) &&
629 !sgpnt[count].alt_address) {
630 sgpnt[count].alt_address = sgpnt[count].address;
631
632
633
634 if(dma_free_sectors < (sgpnt[count].length >> 9) + 10) {
635 sgpnt[count].address = NULL;
636 } else {
637 sgpnt[count].address = (char *) scsi_malloc(sgpnt[count].length);
638 };
639
640
641
642
643 if(sgpnt[count].address == NULL){
644 #if 0
645 printk("Warning: Running low on SCSI DMA buffers");
646
647 while(--count >= 0){
648 if(sgpnt[count].alt_address)
649 scsi_free(sgpnt[count].address, sgpnt[count].length);
650 };
651 this_count = SCpnt->request.current_nr_sectors;
652 buff = SCpnt->request.buffer;
653 SCpnt->use_sg = 0;
654 scsi_free(sgpnt, SCpnt->sglist_len);
655 #endif
656 SCpnt->use_sg = count;
657 this_count = counted -= bh->b_size >> 9;
658 break;
659 };
660
661 };
662
663
664
665
666
667 if(bhp && CONTIGUOUS_BUFFERS(bh,bhp) && CLUSTERABLE_DEVICE(SCpnt)) {
668 char * tmp;
669
670 if (((long) sgpnt[count].address) + sgpnt[count].length +
671 bhp->b_size - 1 > ISA_DMA_THRESHOLD &&
672 (SCpnt->host->unchecked_isa_dma) &&
673 !sgpnt[count].alt_address) continue;
674
675 if(!sgpnt[count].alt_address) {count--; continue; }
676 if(dma_free_sectors > 10)
677 tmp = (char *) scsi_malloc(sgpnt[count].length + bhp->b_size);
678 else {
679 tmp = NULL;
680 max_sg = SCpnt->use_sg;
681 };
682 if(tmp){
683 scsi_free(sgpnt[count].address, sgpnt[count].length);
684 sgpnt[count].address = tmp;
685 count--;
686 continue;
687 };
688
689
690
691
692 if (SCpnt->use_sg < max_sg) SCpnt->use_sg++;
693 };
694 };
695
696 this_count = counted;
697
698 if(count < SCpnt->use_sg || SCpnt->use_sg > SCpnt->host->sg_tablesize){
699 bh = SCpnt->request.bh;
700 printk("Use sg, count %d %x %d\n", SCpnt->use_sg, count, dma_free_sectors);
701 printk("maxsg = %x, counted = %d this_count = %d\n", max_sg, counted, this_count);
702 while(bh){
703 printk("[%p %lx] ", bh->b_data, bh->b_size);
704 bh = bh->b_reqnext;
705 };
706 if(SCpnt->use_sg < 16)
707 for(count=0; count<SCpnt->use_sg; count++)
708 printk("{%d:%p %p %d} ", count,
709 sgpnt[count].address,
710 sgpnt[count].alt_address,
711 sgpnt[count].length);
712 panic("Ooops");
713 };
714
715 if (SCpnt->request.cmd == WRITE)
716 for(count=0; count<SCpnt->use_sg; count++)
717 if(sgpnt[count].alt_address)
718 memcpy(sgpnt[count].address, sgpnt[count].alt_address,
719 sgpnt[count].length);
720 };
721 };
722
723
724
725 if(SCpnt->use_sg == 0){
726 if (((long) buff) + (this_count << 9) - 1 > ISA_DMA_THRESHOLD &&
727 (SCpnt->host->unchecked_isa_dma)) {
728 if(bounce_buffer)
729 buff = bounce_buffer;
730 else
731 buff = (char *) scsi_malloc(this_count << 9);
732 if(buff == NULL) {
733 this_count = SCpnt->request.current_nr_sectors;
734 buff = (char *) scsi_malloc(this_count << 9);
735 if(!buff) panic("Ran out of DMA buffers.");
736 };
737 if (SCpnt->request.cmd == WRITE)
738 memcpy(buff, (char *)SCpnt->request.buffer, this_count << 9);
739 };
740 };
741 #ifdef DEBUG
742 printk("sd%c : %s %d/%d 512 byte blocks.\n", 'a' + MINOR(SCpnt->request.dev),
743 (SCpnt->request.cmd == WRITE) ? "writing" : "reading",
744 this_count, SCpnt->request.nr_sectors);
745 #endif
746
747 cmd[1] = (SCpnt->lun << 5) & 0xe0;
748
749 if (rscsi_disks[dev].sector_size == 1024){
750 if(block & 1) panic("sd.c:Bad block number requested");
751 if(this_count & 1) panic("sd.c:Bad block number requested");
752 block = block >> 1;
753 this_count = this_count >> 1;
754 };
755
756 if (rscsi_disks[dev].sector_size == 256){
757 block = block << 1;
758 this_count = this_count << 1;
759 };
760
761 if (((this_count > 0xff) || (block > 0x1fffff)) && rscsi_disks[dev].ten)
762 {
763 if (this_count > 0xffff)
764 this_count = 0xffff;
765
766 cmd[0] += READ_10 - READ_6 ;
767 cmd[2] = (unsigned char) (block >> 24) & 0xff;
768 cmd[3] = (unsigned char) (block >> 16) & 0xff;
769 cmd[4] = (unsigned char) (block >> 8) & 0xff;
770 cmd[5] = (unsigned char) block & 0xff;
771 cmd[6] = cmd[9] = 0;
772 cmd[7] = (unsigned char) (this_count >> 8) & 0xff;
773 cmd[8] = (unsigned char) this_count & 0xff;
774 }
775 else
776 {
777 if (this_count > 0xff)
778 this_count = 0xff;
779
780 cmd[1] |= (unsigned char) ((block >> 16) & 0x1f);
781 cmd[2] = (unsigned char) ((block >> 8) & 0xff);
782 cmd[3] = (unsigned char) block & 0xff;
783 cmd[4] = (unsigned char) this_count;
784 cmd[5] = 0;
785 }
786
787
788
789
790
791
792
793 SCpnt->transfersize = rscsi_disks[dev].sector_size;
794 SCpnt->underflow = this_count << 9;
795 scsi_do_cmd (SCpnt, (void *) cmd, buff,
796 this_count * rscsi_disks[dev].sector_size,
797 rw_intr,
798 (SCpnt->device->type == TYPE_DISK ?
799 SD_TIMEOUT : SD_MOD_TIMEOUT),
800 MAX_RETRIES);
801 }
802
803 static int check_scsidisk_media_change(dev_t full_dev){
804 int retval;
805 int target;
806 struct inode inode;
807 int flag = 0;
808
809 target = DEVICE_NR(MINOR(full_dev));
810
811 if (target >= sd_template.dev_max ||
812 !rscsi_disks[target].device) {
813 printk("SCSI disk request error: invalid device.\n");
814 return 0;
815 };
816
817 if(!rscsi_disks[target].device->removable) return 0;
818
819 inode.i_rdev = full_dev;
820 retval = sd_ioctl(&inode, NULL, SCSI_IOCTL_TEST_UNIT_READY, 0);
821
822 if(retval){
823
824
825
826
827 rscsi_disks[target].device->changed = 1;
828 return 1;
829
830 };
831
832 retval = rscsi_disks[target].device->changed;
833 if(!flag) rscsi_disks[target].device->changed = 0;
834 return retval;
835 }
836
837 static void sd_init_done (Scsi_Cmnd * SCpnt)
838 {
839 struct request * req;
840
841 req = &SCpnt->request;
842 req->dev = 0xfffe;
843
844 if (req->sem != NULL) {
845 up(req->sem);
846 }
847 }
848
849 static int sd_init_onedisk(int i)
850 {
851 unsigned char cmd[10];
852 unsigned char *buffer;
853 char spintime;
854 int the_result, retries;
855 Scsi_Cmnd * SCpnt;
856
857
858
859
860
861 SCpnt = allocate_device(NULL, rscsi_disks[i].device, 1);
862 buffer = (unsigned char *) scsi_malloc(512);
863
864 spintime = 0;
865
866
867 if (current == task[0]){
868 do{
869 cmd[0] = TEST_UNIT_READY;
870 cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0;
871 memset ((void *) &cmd[2], 0, 8);
872 SCpnt->request.dev = 0xffff;
873 SCpnt->cmd_len = 0;
874 SCpnt->sense_buffer[0] = 0;
875 SCpnt->sense_buffer[2] = 0;
876
877 scsi_do_cmd (SCpnt,
878 (void *) cmd, (void *) buffer,
879 512, sd_init_done, SD_TIMEOUT,
880 MAX_RETRIES);
881
882 while(SCpnt->request.dev != 0xfffe);
883
884 the_result = SCpnt->result;
885
886
887
888 if(the_result && !rscsi_disks[i].device->removable &&
889 SCpnt->sense_buffer[2] == NOT_READY) {
890 int time1;
891 if(!spintime){
892 printk( "sd%c: Spinning up disk...", 'a' + i );
893 cmd[0] = START_STOP;
894 cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0;
895 cmd[1] |= 1;
896 memset ((void *) &cmd[2], 0, 8);
897 cmd[4] = 1;
898 SCpnt->request.dev = 0xffff;
899 SCpnt->cmd_len = 0;
900 SCpnt->sense_buffer[0] = 0;
901 SCpnt->sense_buffer[2] = 0;
902
903 scsi_do_cmd (SCpnt,
904 (void *) cmd, (void *) buffer,
905 512, sd_init_done, SD_TIMEOUT,
906 MAX_RETRIES);
907
908 while(SCpnt->request.dev != 0xfffe);
909
910 spintime = jiffies;
911 };
912
913 time1 = jiffies;
914 while(jiffies < time1 + HZ);
915 printk( "." );
916 };
917 } while(the_result && spintime && spintime+5000 > jiffies);
918 if (spintime) {
919 if (the_result)
920 printk( "not responding...\n" );
921 else
922 printk( "ready\n" );
923 }
924 };
925
926
927 retries = 3;
928 do {
929 cmd[0] = READ_CAPACITY;
930 cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0;
931 memset ((void *) &cmd[2], 0, 8);
932 memset ((void *) buffer, 0, 8);
933 SCpnt->request.dev = 0xffff;
934 SCpnt->cmd_len = 0;
935 SCpnt->sense_buffer[0] = 0;
936 SCpnt->sense_buffer[2] = 0;
937
938 scsi_do_cmd (SCpnt,
939 (void *) cmd, (void *) buffer,
940 8, sd_init_done, SD_TIMEOUT,
941 MAX_RETRIES);
942
943 if (current == task[0])
944 while(SCpnt->request.dev != 0xfffe);
945 else
946 if (SCpnt->request.dev != 0xfffe){
947 struct semaphore sem = MUTEX_LOCKED;
948 SCpnt->request.sem = &sem;
949 down(&sem);
950
951 while (SCpnt->request.dev != 0xfffe) schedule();
952 };
953
954 the_result = SCpnt->result;
955 retries--;
956
957 } while(the_result && retries);
958
959 SCpnt->request.dev = -1;
960
961 wake_up(&SCpnt->device->device_wait);
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978 if (the_result)
979 {
980 printk ("sd%c : READ CAPACITY failed.\n"
981 "sd%c : status = %x, message = %02x, host = %d, driver = %02x \n",
982 'a' + i, 'a' + i,
983 status_byte(the_result),
984 msg_byte(the_result),
985 host_byte(the_result),
986 driver_byte(the_result)
987 );
988 if (driver_byte(the_result) & DRIVER_SENSE)
989 printk("sd%c : extended sense code = %1x \n", 'a' + i, SCpnt->sense_buffer[2] & 0xf);
990 else
991 printk("sd%c : sense not available. \n", 'a' + i);
992
993 printk("sd%c : block size assumed to be 512 bytes, disk size 1GB. \n", 'a' + i);
994 rscsi_disks[i].capacity = 0x1fffff;
995 rscsi_disks[i].sector_size = 512;
996
997
998
999 if(rscsi_disks[i].device->removable &&
1000 SCpnt->sense_buffer[2] == NOT_READY)
1001 rscsi_disks[i].device->changed = 1;
1002
1003 }
1004 else
1005 {
1006 rscsi_disks[i].capacity = (buffer[0] << 24) |
1007 (buffer[1] << 16) |
1008 (buffer[2] << 8) |
1009 buffer[3];
1010
1011 rscsi_disks[i].sector_size = (buffer[4] << 24) |
1012 (buffer[5] << 16) | (buffer[6] << 8) | buffer[7];
1013
1014 if (rscsi_disks[i].sector_size != 512 &&
1015 rscsi_disks[i].sector_size != 1024 &&
1016 rscsi_disks[i].sector_size != 256)
1017 {
1018 printk ("sd%c : unsupported sector size %d.\n",
1019 'a' + i, rscsi_disks[i].sector_size);
1020 if(rscsi_disks[i].device->removable){
1021 rscsi_disks[i].capacity = 0;
1022 } else {
1023 printk ("scsi : deleting disk entry.\n");
1024 rscsi_disks[i].device = NULL;
1025 sd_template.nr_dev--;
1026 return i;
1027 };
1028 }
1029 {
1030
1031
1032
1033
1034
1035 int m;
1036 int hard_sector = rscsi_disks[i].sector_size;
1037
1038 for (m=i<<4; m<((i+1)<<4); m++){
1039 sd_hardsizes[m] = hard_sector;
1040 }
1041 printk ("SCSI Hardware sector size is %d bytes on device sd%c\n"
1042 ,hard_sector,i+'a');
1043 }
1044 if(rscsi_disks[i].sector_size == 1024)
1045 rscsi_disks[i].capacity <<= 1;
1046 if(rscsi_disks[i].sector_size == 256)
1047 rscsi_disks[i].capacity >>= 1;
1048 }
1049
1050 rscsi_disks[i].ten = 1;
1051 rscsi_disks[i].remap = 1;
1052 scsi_free(buffer, 512);
1053 return i;
1054 }
1055
1056
1057
1058
1059
1060
1061
1062 static void sd_init()
1063 {
1064 int i;
1065 static int sd_registered = 0;
1066
1067 if (sd_template.dev_noticed == 0) return;
1068
1069 if(!sd_registered) {
1070 if (register_blkdev(MAJOR_NR,"sd",&sd_fops)) {
1071 printk("Unable to get major %d for SCSI disk\n",MAJOR_NR);
1072 return;
1073 }
1074 sd_registered++;
1075 }
1076
1077
1078 if(rscsi_disks) return;
1079
1080 sd_template.dev_max = sd_template.dev_noticed + SD_EXTRA_DEVS;
1081
1082 rscsi_disks = (Scsi_Disk *)
1083 scsi_init_malloc(sd_template.dev_max * sizeof(Scsi_Disk), GFP_ATOMIC);
1084 memset(rscsi_disks, 0, sd_template.dev_max * sizeof(Scsi_Disk));
1085
1086 sd_sizes = (int *) scsi_init_malloc((sd_template.dev_max << 4) *
1087 sizeof(int), GFP_ATOMIC);
1088 memset(sd_sizes, 0, (sd_template.dev_max << 4) * sizeof(int));
1089
1090 sd_blocksizes = (int *) scsi_init_malloc((sd_template.dev_max << 4) *
1091 sizeof(int), GFP_ATOMIC);
1092
1093 sd_hardsizes = (int *) scsi_init_malloc((sd_template.dev_max << 4) *
1094 sizeof(struct hd_struct), GFP_ATOMIC);
1095
1096 for(i=0;i<(sd_template.dev_max << 4);i++){
1097 sd_blocksizes[i] = 1024;
1098 sd_hardsizes[i] = 512;
1099 }
1100 blksize_size[MAJOR_NR] = sd_blocksizes;
1101 hardsect_size[MAJOR_NR] = sd_hardsizes;
1102 sd = (struct hd_struct *) scsi_init_malloc((sd_template.dev_max << 4) *
1103 sizeof(struct hd_struct),
1104 GFP_ATOMIC);
1105
1106
1107 sd_gendisk.max_nr = sd_template.dev_max;
1108 sd_gendisk.part = sd;
1109 sd_gendisk.sizes = sd_sizes;
1110 sd_gendisk.real_devices = (void *) rscsi_disks;
1111
1112 }
1113
1114 static void sd_finish()
1115 {
1116 int i;
1117
1118 blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST;
1119
1120 sd_gendisk.next = gendisk_head;
1121 gendisk_head = &sd_gendisk;
1122
1123 for (i = 0; i < sd_template.dev_max; ++i)
1124 if (!rscsi_disks[i].capacity &&
1125 rscsi_disks[i].device)
1126 {
1127 i = sd_init_onedisk(i);
1128 if (scsi_loadable_module_flag
1129 && !rscsi_disks[i].has_part_table) {
1130 sd_sizes[i << 4] = rscsi_disks[i].capacity;
1131 revalidate_scsidisk(i << 4, 0);
1132 }
1133 rscsi_disks[i].has_part_table = 1;
1134 }
1135
1136
1137
1138
1139 if(rscsi_disks[0].device && rscsi_disks[0].device->host->sg_tablesize)
1140 read_ahead[MAJOR_NR] = 120;
1141
1142 else
1143 read_ahead[MAJOR_NR] = 4;
1144
1145 return;
1146 }
1147
1148 static int sd_detect(Scsi_Device * SDp){
1149 if(SDp->type != TYPE_DISK && SDp->type != TYPE_MOD) return 0;
1150
1151 printk("Detected scsi disk sd%c at scsi%d, id %d, lun %d\n",
1152 'a'+ (sd_template.dev_noticed++),
1153 SDp->host->host_no , SDp->id, SDp->lun);
1154
1155 return 1;
1156
1157 }
1158
1159 static int sd_attach(Scsi_Device * SDp){
1160 Scsi_Disk * dpnt;
1161 int i;
1162
1163 if(SDp->type != TYPE_DISK && SDp->type != TYPE_MOD) return 0;
1164
1165 if(sd_template.nr_dev >= sd_template.dev_max) {
1166 SDp->attached--;
1167 return 1;
1168 }
1169
1170 for(dpnt = rscsi_disks, i=0; i<sd_template.dev_max; i++, dpnt++)
1171 if(!dpnt->device) break;
1172
1173 if(i >= sd_template.dev_max) panic ("scsi_devices corrupt (sd)");
1174
1175 SDp->scsi_request_fn = do_sd_request;
1176 rscsi_disks[i].device = SDp;
1177 rscsi_disks[i].has_part_table = 0;
1178 sd_template.nr_dev++;
1179 sd_gendisk.nr_real++;
1180 return 0;
1181 }
1182
1183 #define DEVICE_BUSY rscsi_disks[target].device->busy
1184 #define USAGE rscsi_disks[target].device->access_count
1185 #define CAPACITY rscsi_disks[target].capacity
1186 #define MAYBE_REINIT sd_init_onedisk(target)
1187 #define GENDISK_STRUCT sd_gendisk
1188
1189
1190
1191
1192
1193
1194
1195
1196 int revalidate_scsidisk(int dev, int maxusage){
1197 int target, major;
1198 struct gendisk * gdev;
1199 unsigned long flags;
1200 int max_p;
1201 int start;
1202 int i;
1203
1204 target = DEVICE_NR(MINOR(dev));
1205 gdev = &GENDISK_STRUCT;
1206
1207 save_flags(flags);
1208 cli();
1209 if (DEVICE_BUSY || USAGE > maxusage) {
1210 restore_flags(flags);
1211 printk("Device busy for revalidation (usage=%d)\n", USAGE);
1212 return -EBUSY;
1213 };
1214 DEVICE_BUSY = 1;
1215 restore_flags(flags);
1216
1217 max_p = gdev->max_p;
1218 start = target << gdev->minor_shift;
1219 major = MAJOR_NR << 8;
1220
1221 for (i=max_p - 1; i >=0 ; i--) {
1222 sync_dev(major | start | i);
1223 invalidate_inodes(major | start | i);
1224 invalidate_buffers(major | start | i);
1225 gdev->part[start+i].start_sect = 0;
1226 gdev->part[start+i].nr_sects = 0;
1227 };
1228
1229 #ifdef MAYBE_REINIT
1230 MAYBE_REINIT;
1231 #endif
1232
1233 gdev->part[start].nr_sects = CAPACITY;
1234 resetup_one_dev(gdev, target);
1235
1236 DEVICE_BUSY = 0;
1237 return 0;
1238 }
1239
1240 static int fop_revalidate_scsidisk(dev_t dev){
1241 return revalidate_scsidisk(dev, 0);
1242 }
1243
1244
1245 static void sd_detach(Scsi_Device * SDp)
1246 {
1247 Scsi_Disk * dpnt;
1248 int i;
1249 int max_p;
1250 int major;
1251 int start;
1252
1253 for(dpnt = rscsi_disks, i=0; i<sd_template.dev_max; i++, dpnt++)
1254 if(dpnt->device == SDp) {
1255
1256
1257 max_p = sd_gendisk.max_p;
1258 start = i << sd_gendisk.minor_shift;
1259 major = MAJOR_NR << 8;
1260
1261 for (i=max_p - 1; i >=0 ; i--) {
1262 sync_dev(major | start | i);
1263 invalidate_inodes(major | start | i);
1264 invalidate_buffers(major | start | i);
1265 sd_gendisk.part[start+i].start_sect = 0;
1266 sd_gendisk.part[start+i].nr_sects = 0;
1267 sd_sizes[start+i] = 0;
1268 };
1269
1270 dpnt->has_part_table = 0;
1271 dpnt->device = NULL;
1272 dpnt->capacity = 0;
1273 SDp->attached--;
1274 sd_template.dev_noticed--;
1275 sd_template.nr_dev--;
1276 sd_gendisk.nr_real--;
1277 return;
1278 }
1279 return;
1280 }
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297