This source file includes following definitions.
- sd_open
- sd_release
- sd_geninit
- rw_intr
- do_sd_request
- requeue_sd_request
- check_scsidisk_media_change
- sd_init_done
- sd_init_onedisk
- sd_init
- sd_finish
- sd_detect
- sd_attach
- revalidate_scsidisk
- fop_revalidate_scsidisk
- sd_detach
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19 #include <linux/fs.h>
20 #include <linux/kernel.h>
21 #include <linux/sched.h>
22 #include <linux/string.h>
23 #include <linux/errno.h>
24 #include <asm/system.h>
25
26 #define MAJOR_NR SCSI_DISK_MAJOR
27 #include "../block/blk.h"
28 #include "scsi.h"
29 #include "hosts.h"
30 #include "sd.h"
31 #include "scsi_ioctl.h"
32 #include "constants.h"
33
34 #include <linux/genhd.h>
35
36
37
38
39
40 #define MAX_RETRIES 5
41
42
43
44
45
46 #define SD_TIMEOUT 600
47 #define SD_MOD_TIMEOUT 750
48
49 #define CLUSTERABLE_DEVICE(SC) (SC->host->hostt->use_clustering && \
50 SC->device->type != TYPE_MOD)
51
52 struct hd_struct * sd;
53 int revalidate_scsidisk(int dev, int maxusage);
54
55 Scsi_Disk * rscsi_disks = NULL;
56 static int * sd_sizes;
57 static int * sd_blocksizes;
58 static int * sd_hardsizes;
59
60 extern int sd_ioctl(struct inode *, struct file *, unsigned int, unsigned long);
61
62 static int check_scsidisk_media_change(dev_t);
63 static int fop_revalidate_scsidisk(dev_t);
64
65 static sd_init_onedisk(int);
66
67 static void requeue_sd_request (Scsi_Cmnd * SCpnt);
68
69 static void sd_init(void);
70 static void sd_finish(void);
71 static int sd_attach(Scsi_Device *);
72 static int sd_detect(Scsi_Device *);
73 static void sd_detach(Scsi_Device *);
74
75 struct Scsi_Device_Template sd_template = {NULL, "disk", "sd", TYPE_DISK,
76 SCSI_DISK_MAJOR, 0, 0, 0, 1,
77 sd_detect, sd_init,
78 sd_finish, sd_attach, sd_detach};
79
80 static int sd_open(struct inode * inode, struct file * filp)
81 {
82 int target;
83 target = DEVICE_NR(MINOR(inode->i_rdev));
84
85 if(target >= sd_template.dev_max || !rscsi_disks[target].device)
86 return -ENXIO;
87
88
89
90
91 if(sd_sizes[MINOR(inode->i_rdev)] == 0)
92 return -ENXIO;
93
94
95
96
97 while (rscsi_disks[target].device->busy);
98
99 if(rscsi_disks[target].device->removable) {
100 check_disk_change(inode->i_rdev);
101
102 if(!rscsi_disks[target].device->access_count)
103 sd_ioctl(inode, NULL, SCSI_IOCTL_DOORLOCK, 0);
104 };
105 rscsi_disks[target].device->access_count++;
106 if (rscsi_disks[target].device->host->hostt->usage_count)
107 (*rscsi_disks[target].device->host->hostt->usage_count)++;
108 return 0;
109 }
110
111 static void sd_release(struct inode * inode, struct file * file)
112 {
113 int target;
114 sync_dev(inode->i_rdev);
115
116 target = DEVICE_NR(MINOR(inode->i_rdev));
117
118 rscsi_disks[target].device->access_count--;
119 if (rscsi_disks[target].device->host->hostt->usage_count)
120 (*rscsi_disks[target].device->host->hostt->usage_count)--;
121
122 if(rscsi_disks[target].device->removable) {
123 if(!rscsi_disks[target].device->access_count)
124 sd_ioctl(inode, NULL, SCSI_IOCTL_DOORUNLOCK, 0);
125 };
126 }
127
128 static void sd_geninit(void);
129
130 static struct file_operations sd_fops = {
131 NULL,
132 block_read,
133 block_write,
134 NULL,
135 NULL,
136 sd_ioctl,
137 NULL,
138 sd_open,
139 sd_release,
140 block_fsync,
141 NULL,
142 check_scsidisk_media_change,
143 fop_revalidate_scsidisk
144 };
145
146 static struct gendisk sd_gendisk = {
147 MAJOR_NR,
148 "sd",
149 4,
150 1 << 4,
151 0,
152 sd_geninit,
153 NULL,
154 NULL,
155 0,
156 NULL,
157 NULL
158 };
159
160 static void sd_geninit (void)
161 {
162 int i;
163
164 for (i = 0; i < sd_template.dev_max; ++i)
165 if(rscsi_disks[i].device)
166 sd[i << 4].nr_sects = rscsi_disks[i].capacity;
167 #if 0
168
169 sd_gendisk.nr_real = sd_template.dev_max;
170 #endif
171 }
172
173
174
175
176
177
178
179 static void rw_intr (Scsi_Cmnd *SCpnt)
180 {
181 int result = SCpnt->result;
182 int this_count = SCpnt->bufflen >> 9;
183
184 #ifdef DEBUG
185 printk("sd%c : rw_intr(%d, %d)\n", 'a' + MINOR(SCpnt->request.dev), SCpnt->host->host_no, result);
186 #endif
187
188
189
190
191
192
193
194 if (!result) {
195
196 #ifdef DEBUG
197 printk("sd%c : %d sectors remain.\n", 'a' + MINOR(SCpnt->request.dev), SCpnt->request.nr_sectors);
198 printk("use_sg is %d\n ",SCpnt->use_sg);
199 #endif
200 if (SCpnt->use_sg) {
201 struct scatterlist * sgpnt;
202 int i;
203 sgpnt = (struct scatterlist *) SCpnt->buffer;
204 for(i=0; i<SCpnt->use_sg; i++) {
205 #ifdef DEBUG
206 printk(":%x %x %d\n",sgpnt[i].alt_address, sgpnt[i].address, sgpnt[i].length);
207 #endif
208 if (sgpnt[i].alt_address) {
209 if (SCpnt->request.cmd == READ)
210 memcpy(sgpnt[i].alt_address, sgpnt[i].address, sgpnt[i].length);
211 scsi_free(sgpnt[i].address, sgpnt[i].length);
212 };
213 };
214 scsi_free(SCpnt->buffer, SCpnt->sglist_len);
215 } else {
216 if (SCpnt->buffer != SCpnt->request.buffer) {
217 #ifdef DEBUG
218 printk("nosg: %x %x %d\n",SCpnt->request.buffer, SCpnt->buffer,
219 SCpnt->bufflen);
220 #endif
221 if (SCpnt->request.cmd == READ)
222 memcpy(SCpnt->request.buffer, SCpnt->buffer,
223 SCpnt->bufflen);
224 scsi_free(SCpnt->buffer, SCpnt->bufflen);
225 };
226 };
227
228
229
230
231
232 if (SCpnt->request.nr_sectors > this_count)
233 {
234 SCpnt->request.errors = 0;
235
236 if (!SCpnt->request.bh)
237 {
238 #ifdef DEBUG
239 printk("sd%c : handling page request, no buffer\n",
240 'a' + MINOR(SCpnt->request.dev));
241 #endif
242
243
244
245
246 panic("sd.c: linked page request (%lx %x)",
247 SCpnt->request.sector, this_count);
248 }
249 }
250 SCpnt = end_scsi_request(SCpnt, 1, this_count);
251 requeue_sd_request(SCpnt);
252 return;
253 }
254
255
256 if (SCpnt->use_sg) {
257 struct scatterlist * sgpnt;
258 int i;
259 sgpnt = (struct scatterlist *) SCpnt->buffer;
260 for(i=0; i<SCpnt->use_sg; i++) {
261 #ifdef DEBUG
262 printk("err: %x %x %d\n",SCpnt->request.buffer, SCpnt->buffer,
263 SCpnt->bufflen);
264 #endif
265 if (sgpnt[i].alt_address) {
266 scsi_free(sgpnt[i].address, sgpnt[i].length);
267 };
268 };
269 scsi_free(SCpnt->buffer, SCpnt->sglist_len);
270 } else {
271 #ifdef DEBUG
272 printk("nosgerr: %x %x %d\n",SCpnt->request.buffer, SCpnt->buffer,
273 SCpnt->bufflen);
274 #endif
275 if (SCpnt->buffer != SCpnt->request.buffer)
276 scsi_free(SCpnt->buffer, SCpnt->bufflen);
277 };
278
279
280
281
282
283
284
285 if (driver_byte(result) != 0) {
286 if (suggestion(result) == SUGGEST_REMAP) {
287 #ifdef REMAP
288
289
290
291
292 if rscsi_disks[DEVICE_NR(SCpnt->request.dev)].remap
293 {
294 result = 0;
295 }
296 else
297
298 #endif
299 }
300
301 if ((SCpnt->sense_buffer[0] & 0x7f) == 0x70) {
302 if ((SCpnt->sense_buffer[2] & 0xf) == UNIT_ATTENTION) {
303 if(rscsi_disks[DEVICE_NR(SCpnt->request.dev)].device->removable) {
304
305
306
307 rscsi_disks[DEVICE_NR(SCpnt->request.dev)].device->changed = 1;
308 SCpnt = end_scsi_request(SCpnt, 0, this_count);
309 requeue_sd_request(SCpnt);
310 return;
311 }
312 }
313 }
314
315
316
317
318
319
320
321
322
323 if (SCpnt->sense_buffer[2] == ILLEGAL_REQUEST) {
324 if (rscsi_disks[DEVICE_NR(SCpnt->request.dev)].ten) {
325 rscsi_disks[DEVICE_NR(SCpnt->request.dev)].ten = 0;
326 requeue_sd_request(SCpnt);
327 result = 0;
328 } else {
329 }
330 }
331 }
332 if (result) {
333 printk("SCSI disk error : host %d id %d lun %d return code = %x\n",
334 rscsi_disks[DEVICE_NR(SCpnt->request.dev)].device->host->host_no,
335 rscsi_disks[DEVICE_NR(SCpnt->request.dev)].device->id,
336 rscsi_disks[DEVICE_NR(SCpnt->request.dev)].device->lun, result);
337
338 if (driver_byte(result) & DRIVER_SENSE)
339 print_sense("sd", SCpnt);
340 SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.current_nr_sectors);
341 requeue_sd_request(SCpnt);
342 return;
343 }
344 }
345
346
347
348
349
350
351
352 static void do_sd_request (void)
353 {
354 Scsi_Cmnd * SCpnt = NULL;
355 struct request * req = NULL;
356 unsigned long flags;
357 int flag = 0;
358
359 while (1==1){
360 save_flags(flags);
361 cli();
362 if (CURRENT != NULL && CURRENT->dev == -1) {
363 restore_flags(flags);
364 return;
365 };
366
367 INIT_SCSI_REQUEST;
368
369
370
371
372
373
374
375
376
377
378
379
380
381 if (flag++ == 0)
382 SCpnt = allocate_device(&CURRENT,
383 rscsi_disks[DEVICE_NR(MINOR(CURRENT->dev))].device, 0);
384 else SCpnt = NULL;
385 restore_flags(flags);
386
387
388
389
390
391
392
393
394 if (!SCpnt && sd_template.nr_dev > 1){
395 struct request *req1;
396 req1 = NULL;
397 save_flags(flags);
398 cli();
399 req = CURRENT;
400 while(req){
401 SCpnt = request_queueable(req,
402 rscsi_disks[DEVICE_NR(MINOR(req->dev))].device);
403 if(SCpnt) break;
404 req1 = req;
405 req = req->next;
406 };
407 if (SCpnt && req->dev == -1) {
408 if (req == CURRENT)
409 CURRENT = CURRENT->next;
410 else
411 req1->next = req->next;
412 };
413 restore_flags(flags);
414 };
415
416 if (!SCpnt) return;
417
418
419 requeue_sd_request(SCpnt);
420 };
421 }
422
423 static void requeue_sd_request (Scsi_Cmnd * SCpnt)
424 {
425 int dev, block, this_count;
426 unsigned char cmd[10];
427 int bounce_size, contiguous;
428 int max_sg;
429 struct buffer_head * bh, *bhp;
430 char * buff, *bounce_buffer;
431
432 repeat:
433
434 if(!SCpnt || SCpnt->request.dev <= 0) {
435 do_sd_request();
436 return;
437 }
438
439 dev = MINOR(SCpnt->request.dev);
440 block = SCpnt->request.sector;
441 this_count = 0;
442
443 #ifdef DEBUG
444 printk("Doing sd request, dev = %d, block = %d\n", dev, block);
445 #endif
446
447 if (dev >= (sd_template.dev_max << 4) ||
448 !rscsi_disks[DEVICE_NR(dev)].device ||
449 block + SCpnt->request.nr_sectors > sd[dev].nr_sects)
450 {
451 SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
452 goto repeat;
453 }
454
455 block += sd[dev].start_sect;
456 dev = DEVICE_NR(dev);
457
458 if (rscsi_disks[dev].device->changed)
459 {
460
461
462
463
464 SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
465 goto repeat;
466 }
467
468 #ifdef DEBUG
469 printk("sd%c : real dev = /dev/sd%c, block = %d\n", 'a' + MINOR(SCpnt->request.dev), dev, block);
470 #endif
471
472
473
474
475
476
477
478
479
480
481
482
483 if (rscsi_disks[dev].sector_size == 1024)
484 if((block & 1) || (SCpnt->request.nr_sectors & 1)) {
485 printk("sd.c:Bad block number requested");
486 SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
487 goto repeat;
488 }
489
490 switch (SCpnt->request.cmd)
491 {
492 case WRITE :
493 if (!rscsi_disks[dev].device->writeable)
494 {
495 SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
496 goto repeat;
497 }
498 cmd[0] = WRITE_6;
499 break;
500 case READ :
501 cmd[0] = READ_6;
502 break;
503 default :
504 panic ("Unknown sd command %d\n", SCpnt->request.cmd);
505 }
506
507 SCpnt->this_count = 0;
508
509
510
511 contiguous = (!CLUSTERABLE_DEVICE(SCpnt) ? 0 :1);
512 bounce_buffer = NULL;
513 bounce_size = (SCpnt->request.nr_sectors << 9);
514
515
516
517
518
519
520 if (contiguous && SCpnt->request.bh &&
521 ((long) SCpnt->request.bh->b_data) + (SCpnt->request.nr_sectors << 9) - 1 >
522 ISA_DMA_THRESHOLD && SCpnt->host->unchecked_isa_dma) {
523 if(((long) SCpnt->request.bh->b_data) > ISA_DMA_THRESHOLD)
524 bounce_buffer = (char *) scsi_malloc(bounce_size);
525 if(!bounce_buffer) contiguous = 0;
526 };
527
528 if(contiguous && SCpnt->request.bh && SCpnt->request.bh->b_reqnext)
529 for(bh = SCpnt->request.bh, bhp = bh->b_reqnext; bhp; bh = bhp,
530 bhp = bhp->b_reqnext) {
531 if(!CONTIGUOUS_BUFFERS(bh,bhp)) {
532 if(bounce_buffer) scsi_free(bounce_buffer, bounce_size);
533 contiguous = 0;
534 break;
535 }
536 };
537 if (!SCpnt->request.bh || contiguous) {
538
539
540 this_count = SCpnt->request.nr_sectors;
541 buff = SCpnt->request.buffer;
542 SCpnt->use_sg = 0;
543
544 } else if (SCpnt->host->sg_tablesize == 0 ||
545 (need_isa_buffer &&
546 dma_free_sectors <= 10)) {
547
548
549
550
551
552
553
554
555 if (SCpnt->host->sg_tablesize != 0 &&
556 need_isa_buffer &&
557 dma_free_sectors <= 10)
558 printk("Warning: SCSI DMA buffer space running low. Using non scatter-gather I/O.\n");
559
560 this_count = SCpnt->request.current_nr_sectors;
561 buff = SCpnt->request.buffer;
562 SCpnt->use_sg = 0;
563
564 } else {
565
566
567 struct scatterlist * sgpnt;
568 int count, this_count_max;
569 int counted;
570
571 bh = SCpnt->request.bh;
572 this_count = 0;
573 this_count_max = (rscsi_disks[dev].ten ? 0xffff : 0xff);
574 count = 0;
575 bhp = NULL;
576 while(bh) {
577 if ((this_count + (bh->b_size >> 9)) > this_count_max) break;
578 if(!bhp || !CONTIGUOUS_BUFFERS(bhp,bh) ||
579 !CLUSTERABLE_DEVICE(SCpnt) ||
580 (SCpnt->host->unchecked_isa_dma &&
581 ((unsigned long) bh->b_data-1) == ISA_DMA_THRESHOLD)) {
582 if (count < SCpnt->host->sg_tablesize) count++;
583 else break;
584 };
585 this_count += (bh->b_size >> 9);
586 bhp = bh;
587 bh = bh->b_reqnext;
588 };
589 #if 0
590 if(SCpnt->host->unchecked_isa_dma &&
591 ((unsigned int) SCpnt->request.bh->b_data-1) == ISA_DMA_THRESHOLD) count--;
592 #endif
593 SCpnt->use_sg = count;
594 count = 512;
595 while( count < (SCpnt->use_sg * sizeof(struct scatterlist)))
596 count = count << 1;
597 SCpnt->sglist_len = count;
598 max_sg = count / sizeof(struct scatterlist);
599 if(SCpnt->host->sg_tablesize < max_sg) max_sg = SCpnt->host->sg_tablesize;
600 sgpnt = (struct scatterlist * ) scsi_malloc(count);
601 memset(sgpnt, 0, count);
602 if (!sgpnt) {
603 printk("Warning - running *really* short on DMA buffers\n");
604 SCpnt->use_sg = 0;
605 this_count = SCpnt->request.current_nr_sectors;
606 buff = SCpnt->request.buffer;
607 } else {
608 buff = (char *) sgpnt;
609 counted = 0;
610 for(count = 0, bh = SCpnt->request.bh, bhp = bh->b_reqnext;
611 count < SCpnt->use_sg && bh;
612 count++, bh = bhp) {
613
614 bhp = bh->b_reqnext;
615
616 if(!sgpnt[count].address) sgpnt[count].address = bh->b_data;
617 sgpnt[count].length += bh->b_size;
618 counted += bh->b_size >> 9;
619
620 if (((long) sgpnt[count].address) + sgpnt[count].length - 1 >
621 ISA_DMA_THRESHOLD && (SCpnt->host->unchecked_isa_dma) &&
622 !sgpnt[count].alt_address) {
623 sgpnt[count].alt_address = sgpnt[count].address;
624
625
626
627 if(dma_free_sectors < (sgpnt[count].length >> 9) + 10) {
628 sgpnt[count].address = NULL;
629 } else {
630 sgpnt[count].address = (char *) scsi_malloc(sgpnt[count].length);
631 };
632
633
634
635
636 if(sgpnt[count].address == NULL){
637 #if 0
638 printk("Warning: Running low on SCSI DMA buffers");
639
640 while(--count >= 0){
641 if(sgpnt[count].alt_address)
642 scsi_free(sgpnt[count].address, sgpnt[count].length);
643 };
644 this_count = SCpnt->request.current_nr_sectors;
645 buff = SCpnt->request.buffer;
646 SCpnt->use_sg = 0;
647 scsi_free(sgpnt, SCpnt->sglist_len);
648 #endif
649 SCpnt->use_sg = count;
650 this_count = counted -= bh->b_size >> 9;
651 break;
652 };
653
654 };
655
656
657
658
659
660 if(bhp && CONTIGUOUS_BUFFERS(bh,bhp) && CLUSTERABLE_DEVICE(SCpnt)) {
661 char * tmp;
662
663 if (((long) sgpnt[count].address) + sgpnt[count].length +
664 bhp->b_size - 1 > ISA_DMA_THRESHOLD &&
665 (SCpnt->host->unchecked_isa_dma) &&
666 !sgpnt[count].alt_address) continue;
667
668 if(!sgpnt[count].alt_address) {count--; continue; }
669 if(dma_free_sectors > 10)
670 tmp = (char *) scsi_malloc(sgpnt[count].length + bhp->b_size);
671 else {
672 tmp = NULL;
673 max_sg = SCpnt->use_sg;
674 };
675 if(tmp){
676 scsi_free(sgpnt[count].address, sgpnt[count].length);
677 sgpnt[count].address = tmp;
678 count--;
679 continue;
680 };
681
682
683
684
685 if (SCpnt->use_sg < max_sg) SCpnt->use_sg++;
686 };
687 };
688
689 this_count = counted;
690
691 if(count < SCpnt->use_sg || SCpnt->use_sg > SCpnt->host->sg_tablesize){
692 bh = SCpnt->request.bh;
693 printk("Use sg, count %d %x %d\n", SCpnt->use_sg, count, dma_free_sectors);
694 printk("maxsg = %x, counted = %d this_count = %d\n", max_sg, counted, this_count);
695 while(bh){
696 printk("[%p %lx] ", bh->b_data, bh->b_size);
697 bh = bh->b_reqnext;
698 };
699 if(SCpnt->use_sg < 16)
700 for(count=0; count<SCpnt->use_sg; count++)
701 printk("{%d:%p %p %d} ", count,
702 sgpnt[count].address,
703 sgpnt[count].alt_address,
704 sgpnt[count].length);
705 panic("Ooops");
706 };
707
708 if (SCpnt->request.cmd == WRITE)
709 for(count=0; count<SCpnt->use_sg; count++)
710 if(sgpnt[count].alt_address)
711 memcpy(sgpnt[count].address, sgpnt[count].alt_address,
712 sgpnt[count].length);
713 };
714 };
715
716
717
718 if(SCpnt->use_sg == 0){
719 if (((long) buff) + (this_count << 9) - 1 > ISA_DMA_THRESHOLD &&
720 (SCpnt->host->unchecked_isa_dma)) {
721 if(bounce_buffer)
722 buff = bounce_buffer;
723 else
724 buff = (char *) scsi_malloc(this_count << 9);
725 if(buff == NULL) {
726 this_count = SCpnt->request.current_nr_sectors;
727 buff = (char *) scsi_malloc(this_count << 9);
728 if(!buff) panic("Ran out of DMA buffers.");
729 };
730 if (SCpnt->request.cmd == WRITE)
731 memcpy(buff, (char *)SCpnt->request.buffer, this_count << 9);
732 };
733 };
734 #ifdef DEBUG
735 printk("sd%c : %s %d/%d 512 byte blocks.\n", 'a' + MINOR(SCpnt->request.dev),
736 (SCpnt->request.cmd == WRITE) ? "writing" : "reading",
737 this_count, SCpnt->request.nr_sectors);
738 #endif
739
740 cmd[1] = (SCpnt->lun << 5) & 0xe0;
741
742 if (rscsi_disks[dev].sector_size == 1024){
743 if(block & 1) panic("sd.c:Bad block number requested");
744 if(this_count & 1) panic("sd.c:Bad block number requested");
745 block = block >> 1;
746 this_count = this_count >> 1;
747 };
748
749 if (rscsi_disks[dev].sector_size == 256){
750 block = block << 1;
751 this_count = this_count << 1;
752 };
753
754 if (((this_count > 0xff) || (block > 0x1fffff)) && rscsi_disks[dev].ten)
755 {
756 if (this_count > 0xffff)
757 this_count = 0xffff;
758
759 cmd[0] += READ_10 - READ_6 ;
760 cmd[2] = (unsigned char) (block >> 24) & 0xff;
761 cmd[3] = (unsigned char) (block >> 16) & 0xff;
762 cmd[4] = (unsigned char) (block >> 8) & 0xff;
763 cmd[5] = (unsigned char) block & 0xff;
764 cmd[6] = cmd[9] = 0;
765 cmd[7] = (unsigned char) (this_count >> 8) & 0xff;
766 cmd[8] = (unsigned char) this_count & 0xff;
767 }
768 else
769 {
770 if (this_count > 0xff)
771 this_count = 0xff;
772
773 cmd[1] |= (unsigned char) ((block >> 16) & 0x1f);
774 cmd[2] = (unsigned char) ((block >> 8) & 0xff);
775 cmd[3] = (unsigned char) block & 0xff;
776 cmd[4] = (unsigned char) this_count;
777 cmd[5] = 0;
778 }
779
780
781
782
783
784
785
786 SCpnt->transfersize = rscsi_disks[dev].sector_size;
787 SCpnt->underflow = this_count << 9;
788 scsi_do_cmd (SCpnt, (void *) cmd, buff,
789 this_count * rscsi_disks[dev].sector_size,
790 rw_intr,
791 (SCpnt->device->type == TYPE_DISK ?
792 SD_TIMEOUT : SD_MOD_TIMEOUT),
793 MAX_RETRIES);
794 }
795
796 static int check_scsidisk_media_change(dev_t full_dev){
797 int retval;
798 int target;
799 struct inode inode;
800 int flag = 0;
801
802 target = DEVICE_NR(MINOR(full_dev));
803
804 if (target >= sd_template.dev_max ||
805 !rscsi_disks[target].device) {
806 printk("SCSI disk request error: invalid device.\n");
807 return 0;
808 };
809
810 if(!rscsi_disks[target].device->removable) return 0;
811
812 inode.i_rdev = full_dev;
813 retval = sd_ioctl(&inode, NULL, SCSI_IOCTL_TEST_UNIT_READY, 0);
814
815 if(retval){
816
817
818
819
820 rscsi_disks[target].device->changed = 1;
821 return 1;
822
823 };
824
825 retval = rscsi_disks[target].device->changed;
826 if(!flag) rscsi_disks[target].device->changed = 0;
827 return retval;
828 }
829
830 static void sd_init_done (Scsi_Cmnd * SCpnt)
831 {
832 struct request * req;
833
834 req = &SCpnt->request;
835 req->dev = 0xfffe;
836
837 if (req->sem != NULL) {
838 up(req->sem);
839 }
840 }
841
842 static int sd_init_onedisk(int i)
843 {
844 unsigned char cmd[10];
845 unsigned char *buffer;
846 char spintime;
847 int the_result, retries;
848 Scsi_Cmnd * SCpnt;
849
850
851
852
853
854 SCpnt = allocate_device(NULL, rscsi_disks[i].device, 1);
855 buffer = (unsigned char *) scsi_malloc(512);
856
857 spintime = 0;
858
859
860 if (current == task[0]){
861 do{
862 cmd[0] = TEST_UNIT_READY;
863 cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0;
864 memset ((void *) &cmd[2], 0, 8);
865 SCpnt->request.dev = 0xffff;
866 SCpnt->cmd_len = 0;
867 SCpnt->sense_buffer[0] = 0;
868 SCpnt->sense_buffer[2] = 0;
869
870 scsi_do_cmd (SCpnt,
871 (void *) cmd, (void *) buffer,
872 512, sd_init_done, SD_TIMEOUT,
873 MAX_RETRIES);
874
875 while(SCpnt->request.dev != 0xfffe);
876
877 the_result = SCpnt->result;
878
879
880
881 if(the_result && !rscsi_disks[i].device->removable &&
882 SCpnt->sense_buffer[2] == NOT_READY) {
883 int time1;
884 if(!spintime){
885 printk( "sd%c: Spinning up disk...", 'a' + i );
886 cmd[0] = START_STOP;
887 cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0;
888 cmd[1] |= 1;
889 memset ((void *) &cmd[2], 0, 8);
890 cmd[4] = 1;
891 SCpnt->request.dev = 0xffff;
892 SCpnt->cmd_len = 0;
893 SCpnt->sense_buffer[0] = 0;
894 SCpnt->sense_buffer[2] = 0;
895
896 scsi_do_cmd (SCpnt,
897 (void *) cmd, (void *) buffer,
898 512, sd_init_done, SD_TIMEOUT,
899 MAX_RETRIES);
900
901 while(SCpnt->request.dev != 0xfffe);
902
903 spintime = jiffies;
904 };
905
906 time1 = jiffies;
907 while(jiffies < time1 + HZ);
908 printk( "." );
909 };
910 } while(the_result && spintime && spintime+5000 > jiffies);
911 if (spintime) {
912 if (the_result)
913 printk( "not responding...\n" );
914 else
915 printk( "ready\n" );
916 }
917 };
918
919
920 retries = 3;
921 do {
922 cmd[0] = READ_CAPACITY;
923 cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0;
924 memset ((void *) &cmd[2], 0, 8);
925 memset ((void *) buffer, 0, 8);
926 SCpnt->request.dev = 0xffff;
927 SCpnt->cmd_len = 0;
928 SCpnt->sense_buffer[0] = 0;
929 SCpnt->sense_buffer[2] = 0;
930
931 scsi_do_cmd (SCpnt,
932 (void *) cmd, (void *) buffer,
933 8, sd_init_done, SD_TIMEOUT,
934 MAX_RETRIES);
935
936 if (current == task[0])
937 while(SCpnt->request.dev != 0xfffe);
938 else
939 if (SCpnt->request.dev != 0xfffe){
940 struct semaphore sem = MUTEX_LOCKED;
941 SCpnt->request.sem = &sem;
942 down(&sem);
943
944 while (SCpnt->request.dev != 0xfffe) schedule();
945 };
946
947 the_result = SCpnt->result;
948 retries--;
949
950 } while(the_result && retries);
951
952 SCpnt->request.dev = -1;
953
954 wake_up(&SCpnt->device->device_wait);
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971 if (the_result)
972 {
973 printk ("sd%c : READ CAPACITY failed.\n"
974 "sd%c : status = %x, message = %02x, host = %d, driver = %02x \n",
975 'a' + i, 'a' + i,
976 status_byte(the_result),
977 msg_byte(the_result),
978 host_byte(the_result),
979 driver_byte(the_result)
980 );
981 if (driver_byte(the_result) & DRIVER_SENSE)
982 printk("sd%c : extended sense code = %1x \n", 'a' + i, SCpnt->sense_buffer[2] & 0xf);
983 else
984 printk("sd%c : sense not available. \n", 'a' + i);
985
986 printk("sd%c : block size assumed to be 512 bytes, disk size 1GB. \n", 'a' + i);
987 rscsi_disks[i].capacity = 0x1fffff;
988 rscsi_disks[i].sector_size = 512;
989
990
991
992 if(rscsi_disks[i].device->removable &&
993 SCpnt->sense_buffer[2] == NOT_READY)
994 rscsi_disks[i].device->changed = 1;
995
996 }
997 else
998 {
999 rscsi_disks[i].capacity = (buffer[0] << 24) |
1000 (buffer[1] << 16) |
1001 (buffer[2] << 8) |
1002 buffer[3];
1003
1004 rscsi_disks[i].sector_size = (buffer[4] << 24) |
1005 (buffer[5] << 16) | (buffer[6] << 8) | buffer[7];
1006
1007 if (rscsi_disks[i].sector_size != 512 &&
1008 rscsi_disks[i].sector_size != 1024 &&
1009 rscsi_disks[i].sector_size != 256)
1010 {
1011 printk ("sd%c : unsupported sector size %d.\n",
1012 'a' + i, rscsi_disks[i].sector_size);
1013 if(rscsi_disks[i].device->removable){
1014 rscsi_disks[i].capacity = 0;
1015 } else {
1016 printk ("scsi : deleting disk entry.\n");
1017 rscsi_disks[i].device = NULL;
1018 sd_template.nr_dev--;
1019 return i;
1020 };
1021 }
1022 {
1023
1024
1025
1026
1027
1028 int m;
1029 int hard_sector = rscsi_disks[i].sector_size;
1030
1031 for (m=i<<4; m<((i+1)<<4); m++){
1032 sd_hardsizes[m] = hard_sector;
1033 }
1034 printk ("SCSI Hardware sector size is %d bytes on device sd%c\n"
1035 ,hard_sector,i+'a');
1036 }
1037 if(rscsi_disks[i].sector_size == 1024)
1038 rscsi_disks[i].capacity <<= 1;
1039 if(rscsi_disks[i].sector_size == 256)
1040 rscsi_disks[i].capacity >>= 1;
1041 }
1042
1043 rscsi_disks[i].ten = 1;
1044 rscsi_disks[i].remap = 1;
1045 scsi_free(buffer, 512);
1046 return i;
1047 }
1048
1049
1050
1051
1052
1053
1054
1055 static void sd_init()
1056 {
1057 int i;
1058 static int sd_registered = 0;
1059
1060 if (sd_template.dev_noticed == 0) return;
1061
1062 if(!sd_registered) {
1063 if (register_blkdev(MAJOR_NR,"sd",&sd_fops)) {
1064 printk("Unable to get major %d for SCSI disk\n",MAJOR_NR);
1065 return;
1066 }
1067 sd_registered++;
1068 }
1069
1070
1071 if(rscsi_disks) return;
1072
1073 sd_template.dev_max = sd_template.dev_noticed + SD_EXTRA_DEVS;
1074
1075 rscsi_disks = (Scsi_Disk *)
1076 scsi_init_malloc(sd_template.dev_max * sizeof(Scsi_Disk), GFP_ATOMIC);
1077 memset(rscsi_disks, 0, sd_template.dev_max * sizeof(Scsi_Disk));
1078
1079 sd_sizes = (int *) scsi_init_malloc((sd_template.dev_max << 4) *
1080 sizeof(int), GFP_ATOMIC);
1081 memset(sd_sizes, 0, (sd_template.dev_max << 4) * sizeof(int));
1082
1083 sd_blocksizes = (int *) scsi_init_malloc((sd_template.dev_max << 4) *
1084 sizeof(int), GFP_ATOMIC);
1085
1086 sd_hardsizes = (int *) scsi_init_malloc((sd_template.dev_max << 4) *
1087 sizeof(struct hd_struct), GFP_ATOMIC);
1088
1089 for(i=0;i<(sd_template.dev_max << 4);i++){
1090 sd_blocksizes[i] = 1024;
1091 sd_hardsizes[i] = 512;
1092 }
1093 blksize_size[MAJOR_NR] = sd_blocksizes;
1094 hardsect_size[MAJOR_NR] = sd_hardsizes;
1095 sd = (struct hd_struct *) scsi_init_malloc((sd_template.dev_max << 4) *
1096 sizeof(struct hd_struct),
1097 GFP_ATOMIC);
1098
1099
1100 sd_gendisk.max_nr = sd_template.dev_max;
1101 sd_gendisk.part = sd;
1102 sd_gendisk.sizes = sd_sizes;
1103 sd_gendisk.real_devices = (void *) rscsi_disks;
1104
1105 }
1106
1107 static void sd_finish()
1108 {
1109 int i;
1110
1111 blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST;
1112
1113 sd_gendisk.next = gendisk_head;
1114 gendisk_head = &sd_gendisk;
1115
1116 for (i = 0; i < sd_template.dev_max; ++i)
1117 if (!rscsi_disks[i].capacity &&
1118 rscsi_disks[i].device)
1119 {
1120 i = sd_init_onedisk(i);
1121 if (scsi_loadable_module_flag
1122 && !rscsi_disks[i].has_part_table) {
1123 sd_sizes[i << 4] = rscsi_disks[i].capacity;
1124 revalidate_scsidisk(i << 4, 0);
1125 }
1126 rscsi_disks[i].has_part_table = 1;
1127 }
1128
1129
1130
1131
1132 if(rscsi_disks[0].device && rscsi_disks[0].device->host->sg_tablesize)
1133 read_ahead[MAJOR_NR] = 120;
1134
1135 else
1136 read_ahead[MAJOR_NR] = 4;
1137
1138 return;
1139 }
1140
1141 static int sd_detect(Scsi_Device * SDp){
1142 if(SDp->type != TYPE_DISK && SDp->type != TYPE_MOD) return 0;
1143
1144 printk("Detected scsi disk sd%c at scsi%d, id %d, lun %d\n",
1145 'a'+ (sd_template.dev_noticed++),
1146 SDp->host->host_no , SDp->id, SDp->lun);
1147
1148 return 1;
1149
1150 }
1151
1152 static int sd_attach(Scsi_Device * SDp){
1153 Scsi_Disk * dpnt;
1154 int i;
1155
1156 if(SDp->type != TYPE_DISK && SDp->type != TYPE_MOD) return 0;
1157
1158 if(sd_template.nr_dev >= sd_template.dev_max) {
1159 SDp->attached--;
1160 return 1;
1161 }
1162
1163 for(dpnt = rscsi_disks, i=0; i<sd_template.dev_max; i++, dpnt++)
1164 if(!dpnt->device) break;
1165
1166 if(i >= sd_template.dev_max) panic ("scsi_devices corrupt (sd)");
1167
1168 SDp->scsi_request_fn = do_sd_request;
1169 rscsi_disks[i].device = SDp;
1170 rscsi_disks[i].has_part_table = 0;
1171 sd_template.nr_dev++;
1172 sd_gendisk.nr_real++;
1173 return 0;
1174 }
1175
1176 #define DEVICE_BUSY rscsi_disks[target].device->busy
1177 #define USAGE rscsi_disks[target].device->access_count
1178 #define CAPACITY rscsi_disks[target].capacity
1179 #define MAYBE_REINIT sd_init_onedisk(target)
1180 #define GENDISK_STRUCT sd_gendisk
1181
1182
1183
1184
1185
1186
1187
1188
1189 int revalidate_scsidisk(int dev, int maxusage){
1190 int target, major;
1191 struct gendisk * gdev;
1192 unsigned long flags;
1193 int max_p;
1194 int start;
1195 int i;
1196
1197 target = DEVICE_NR(MINOR(dev));
1198 gdev = &GENDISK_STRUCT;
1199
1200 save_flags(flags);
1201 cli();
1202 if (DEVICE_BUSY || USAGE > maxusage) {
1203 restore_flags(flags);
1204 printk("Device busy for revalidation (usage=%d)\n", USAGE);
1205 return -EBUSY;
1206 };
1207 DEVICE_BUSY = 1;
1208 restore_flags(flags);
1209
1210 max_p = gdev->max_p;
1211 start = target << gdev->minor_shift;
1212 major = MAJOR_NR << 8;
1213
1214 for (i=max_p - 1; i >=0 ; i--) {
1215 sync_dev(major | start | i);
1216 invalidate_inodes(major | start | i);
1217 invalidate_buffers(major | start | i);
1218 gdev->part[start+i].start_sect = 0;
1219 gdev->part[start+i].nr_sects = 0;
1220 };
1221
1222 #ifdef MAYBE_REINIT
1223 MAYBE_REINIT;
1224 #endif
1225
1226 gdev->part[start].nr_sects = CAPACITY;
1227 resetup_one_dev(gdev, target);
1228
1229 DEVICE_BUSY = 0;
1230 return 0;
1231 }
1232
1233 static int fop_revalidate_scsidisk(dev_t dev){
1234 return revalidate_scsidisk(dev, 0);
1235 }
1236
1237
1238 static void sd_detach(Scsi_Device * SDp)
1239 {
1240 Scsi_Disk * dpnt;
1241 int i;
1242 int max_p;
1243 int major;
1244 int start;
1245
1246 for(dpnt = rscsi_disks, i=0; i<sd_template.dev_max; i++, dpnt++)
1247 if(dpnt->device == SDp) {
1248
1249
1250 max_p = sd_gendisk.max_p;
1251 start = i << sd_gendisk.minor_shift;
1252 major = MAJOR_NR << 8;
1253
1254 for (i=max_p - 1; i >=0 ; i--) {
1255 sync_dev(major | start | i);
1256 invalidate_inodes(major | start | i);
1257 invalidate_buffers(major | start | i);
1258 sd_gendisk.part[start+i].start_sect = 0;
1259 sd_gendisk.part[start+i].nr_sects = 0;
1260 sd_sizes[start+i] = 0;
1261 };
1262
1263 dpnt->has_part_table = 0;
1264 dpnt->device = NULL;
1265 dpnt->capacity = 0;
1266 SDp->attached--;
1267 sd_template.dev_noticed--;
1268 sd_template.nr_dev--;
1269 sd_gendisk.nr_real--;
1270 return;
1271 }
1272 return;
1273 }