root/drivers/scsi/sd.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. sd_open
  2. sd_release
  3. sd_geninit
  4. rw_intr
  5. do_sd_request
  6. requeue_sd_request
  7. check_scsidisk_media_change
  8. sd_init_done
  9. sd_init_onedisk
  10. sd_init
  11. sd_finish
  12. sd_detect
  13. sd_attach
  14. revalidate_scsidisk
  15. fop_revalidate_scsidisk
  16. sd_detach
  17. init_module
  18. cleanup_module

   1 /*
   2  *      sd.c Copyright (C) 1992 Drew Eckhardt 
   3  *           Copyright (C) 1993, 1994, 1995 Eric Youngdale
   4  *
   5  *      Linux scsi disk driver
   6  *              Initial versions: Drew Eckhardt 
   7  *              Subsequent revisions: Eric Youngdale
   8  *
   9  *      <drew@colorado.edu>
  10  *
  11  *       Modified by Eric Youngdale ericy@cais.com to
  12  *       add scatter-gather, multiple outstanding request, and other
  13  *       enhancements.
  14  *
  15  *       Modified by Eric Youngdale eric@aib.com to support loadable
  16  *       low-level scsi drivers.
  17  */
  18 
  19 #include <linux/module.h>
  20 #ifdef MODULE
  21 /*
  22  * This is a variable in scsi.c that is set when we are processing something
  23  * after boot time.  By definition, this is true when we are a loadable module
  24  * ourselves.
  25  */
  26 #define MODULE_FLAG 1
  27 #else
  28 #define MODULE_FLAG scsi_loadable_module_flag
  29 #endif /* MODULE */
  30 
  31 #include <linux/fs.h>
  32 #include <linux/kernel.h>
  33 #include <linux/sched.h>
  34 #include <linux/mm.h>
  35 #include <linux/string.h>
  36 #include <linux/errno.h>
  37 #include <asm/system.h>
  38 
  39 #define MAJOR_NR SCSI_DISK_MAJOR
  40 #include <linux/blk.h>
  41 #include "scsi.h"
  42 #include "hosts.h"
  43 #include "sd.h"
  44 #include "scsi_ioctl.h"
  45 #include "constants.h"
  46 
  47 #include <linux/genhd.h>
  48 
  49 /*
  50  *  static const char RCSid[] = "$Header:";
  51  */
  52 
  53 #define MAX_RETRIES 5
  54 
  55 /*
  56  *  Time out in seconds for disks and Magneto-opticals (which are slower).
  57  */
  58 
  59 #define SD_TIMEOUT (7 * HZ)
  60 #define SD_MOD_TIMEOUT (8 * HZ)
  61 
  62 #define CLUSTERABLE_DEVICE(SC) (SC->host->use_clustering && \
  63                                 SC->device->type != TYPE_MOD)
  64 
  65 struct hd_struct * sd;
  66 
  67 Scsi_Disk * rscsi_disks = NULL;
  68 static int * sd_sizes;
  69 static int * sd_blocksizes;
  70 static int * sd_hardsizes;              /* Hardware sector size */
  71 
  72 extern int sd_ioctl(struct inode *, struct file *, unsigned int, unsigned long);
  73 
  74 static int check_scsidisk_media_change(kdev_t);
  75 static int fop_revalidate_scsidisk(kdev_t);
  76 
  77 static sd_init_onedisk(int);
  78 
  79 static void requeue_sd_request (Scsi_Cmnd * SCpnt);
  80 
  81 static int sd_init(void);
  82 static void sd_finish(void);
  83 static int sd_attach(Scsi_Device *);
  84 static int sd_detect(Scsi_Device *);
  85 static void sd_detach(Scsi_Device *);
  86 
  87 struct Scsi_Device_Template sd_template = 
  88 { NULL, "disk", "sd", NULL, TYPE_DISK, 
  89       SCSI_DISK_MAJOR, 0, 0, 0, 1,
  90       sd_detect, sd_init,
  91       sd_finish, sd_attach, sd_detach
  92 };
  93 
  94 static int sd_open(struct inode * inode, struct file * filp)
     /* [previous][next][first][last][top][bottom][index][help] */
  95 {
  96     int target;
  97     target =  DEVICE_NR(inode->i_rdev);
  98     
  99     if(target >= sd_template.dev_max || !rscsi_disks[target].device)
 100         return -ENXIO;   /* No such device */
 101     
 102     /* 
 103      * Make sure that only one process can do a check_change_disk at one time.
 104      * This is also used to lock out further access when the partition table 
 105      * is being re-read. 
 106      */
 107     
 108     while (rscsi_disks[target].device->busy)
 109     barrier();   
 110     if(rscsi_disks[target].device->removable) {
 111         check_disk_change(inode->i_rdev);
 112         
 113         /*
 114          * If the drive is empty, just let the open fail.
 115          */
 116         if ( !rscsi_disks[target].ready ) {
 117             return -ENXIO;
 118         }
 119 
 120         /*
 121          * Similarily, if the device has the write protect tab set,
 122          * have the open fail if the user expects to be able to write
 123          * to the thing.
 124          */
 125         if ( (rscsi_disks[target].write_prot) && (filp->f_mode & 2) ) { 
 126             return -EROFS;
 127         }
 128 
 129         if(!rscsi_disks[target].device->access_count)
 130             sd_ioctl(inode, NULL, SCSI_IOCTL_DOORLOCK, 0);
 131     };
 132 
 133     /*
 134      * See if we are requesting a non-existent partition.  Do this
 135      * after checking for disk change.
 136      */
 137     if(sd_sizes[MINOR(inode->i_rdev)] == 0)
 138         return -ENXIO;
 139     
 140     rscsi_disks[target].device->access_count++;
 141     if (rscsi_disks[target].device->host->hostt->usage_count)
 142         (*rscsi_disks[target].device->host->hostt->usage_count)++;
 143     if(sd_template.usage_count) (*sd_template.usage_count)++;
 144     return 0;
 145 }
 146 
 147 static void sd_release(struct inode * inode, struct file * file)
     /* [previous][next][first][last][top][bottom][index][help] */
 148 {
 149     int target;
 150     sync_dev(inode->i_rdev);
 151     
 152     target =  DEVICE_NR(inode->i_rdev);
 153     
 154     rscsi_disks[target].device->access_count--;
 155     if (rscsi_disks[target].device->host->hostt->usage_count)
 156         (*rscsi_disks[target].device->host->hostt->usage_count)--;
 157     if(sd_template.usage_count) (*sd_template.usage_count)--;
 158     
 159     if(rscsi_disks[target].device->removable) {
 160         if(!rscsi_disks[target].device->access_count)
 161             sd_ioctl(inode, NULL, SCSI_IOCTL_DOORUNLOCK, 0);
 162     }
 163 }
 164 
 165 static void sd_geninit(struct gendisk *);
 166 
 167 static struct file_operations sd_fops = {
 168     NULL,                        /* lseek - default */
 169     block_read,                  /* read - general block-dev read */
 170     block_write,                 /* write - general block-dev write */
 171     NULL,                        /* readdir - bad */
 172     NULL,                        /* select */
 173     sd_ioctl,                    /* ioctl */
 174     NULL,                        /* mmap */
 175     sd_open,                     /* open code */
 176     sd_release,                  /* release */
 177     block_fsync,                 /* fsync */
 178     NULL,                        /* fasync */
 179     check_scsidisk_media_change, /* Disk change */
 180     fop_revalidate_scsidisk      /* revalidate */
 181 };
 182 
 183 static struct gendisk sd_gendisk = {
 184     MAJOR_NR,                    /* Major number */
 185     "sd",                        /* Major name */
 186     4,                           /* Bits to shift to get real from partition */
 187     1 << 4,                      /* Number of partitions per real */
 188     0,                           /* maximum number of real */
 189     sd_geninit,                  /* init function */
 190     NULL,                        /* hd struct */
 191     NULL,                        /* block sizes */
 192     0,                           /* number */
 193     NULL,                        /* internal */
 194     NULL                         /* next */
 195 };
 196 
 197 static void sd_geninit (struct gendisk *ignored)
     /* [previous][next][first][last][top][bottom][index][help] */
 198 {
 199     int i;
 200     
 201     for (i = 0; i < sd_template.dev_max; ++i)
 202         if(rscsi_disks[i].device) 
 203             sd[i << 4].nr_sects = rscsi_disks[i].capacity;
 204 #if 0
 205     /* No longer needed - we keep track of this as we attach/detach */
 206     sd_gendisk.nr_real = sd_template.dev_max;
 207 #endif
 208 }
 209 
 210 /*
 211  * rw_intr is the interrupt routine for the device driver.  It will
 212  * be notified on the end of a SCSI read / write, and
 213  * will take on of several actions based on success or failure.
 214  */
 215 
 216 static void rw_intr (Scsi_Cmnd *SCpnt)
     /* [previous][next][first][last][top][bottom][index][help] */
 217 {
 218     int result = SCpnt->result;
 219     int this_count = SCpnt->bufflen >> 9;
 220     
 221 #ifdef DEBUG
 222     printk("sd%c : rw_intr(%d, %d)\n", 'a' + MINOR(SCpnt->request.rq_dev), 
 223            SCpnt->host->host_no, result);
 224 #endif
 225     
 226     /*
 227      * First case : we assume that the command succeeded.  One of two things 
 228      * will happen here.  Either we will be finished, or there will be more
 229      * sectors that we were unable to read last time.
 230      */
 231 
 232     if (!result) {
 233         
 234 #ifdef DEBUG
 235         printk("sd%c : %d sectors remain.\n", 'a' + MINOR(SCpnt->request.rq_dev),
 236                SCpnt->request.nr_sectors);
 237         printk("use_sg is %d\n ",SCpnt->use_sg);
 238 #endif
 239         if (SCpnt->use_sg) {
 240             struct scatterlist * sgpnt;
 241             int i;
 242             sgpnt = (struct scatterlist *) SCpnt->buffer;
 243             for(i=0; i<SCpnt->use_sg; i++) {
 244 #ifdef DEBUG
 245                 printk(":%x %x %d\n",sgpnt[i].alt_address, sgpnt[i].address, 
 246                        sgpnt[i].length);
 247 #endif
 248                 if (sgpnt[i].alt_address) {
 249                     if (SCpnt->request.cmd == READ)
 250                         memcpy(sgpnt[i].alt_address, sgpnt[i].address, 
 251                                sgpnt[i].length);
 252                     scsi_free(sgpnt[i].address, sgpnt[i].length);
 253                 };
 254             };
 255 
 256             /* Free list of scatter-gather pointers */
 257             scsi_free(SCpnt->buffer, SCpnt->sglist_len);  
 258         } else {
 259             if (SCpnt->buffer != SCpnt->request.buffer) {
 260 #ifdef DEBUG
 261                 printk("nosg: %x %x %d\n",SCpnt->request.buffer, SCpnt->buffer,
 262                        SCpnt->bufflen);
 263 #endif  
 264                 if (SCpnt->request.cmd == READ)
 265                     memcpy(SCpnt->request.buffer, SCpnt->buffer,
 266                            SCpnt->bufflen);
 267                 scsi_free(SCpnt->buffer, SCpnt->bufflen);
 268             };
 269         };
 270         /*
 271          * If multiple sectors are requested in one buffer, then
 272          * they will have been finished off by the first command.
 273          * If not, then we have a multi-buffer command.
 274          */
 275         if (SCpnt->request.nr_sectors > this_count)
 276         {
 277             SCpnt->request.errors = 0;
 278             
 279             if (!SCpnt->request.bh)
 280             {
 281 #ifdef DEBUG
 282                 printk("sd%c : handling page request, no buffer\n",
 283                        'a' + MINOR(SCpnt->request.rq_dev));
 284 #endif
 285                 /*
 286                  * The SCpnt->request.nr_sectors field is always done in 
 287                  * 512 byte sectors, even if this really isn't the case.
 288                  */
 289                 panic("sd.c: linked page request (%lx %x)",
 290                       SCpnt->request.sector, this_count);
 291             }
 292         }
 293         SCpnt = end_scsi_request(SCpnt, 1, this_count);
 294         requeue_sd_request(SCpnt);
 295         return;
 296     }
 297     
 298     /* Free up any indirection buffers we allocated for DMA purposes. */
 299     if (SCpnt->use_sg) {
 300         struct scatterlist * sgpnt;
 301         int i;
 302         sgpnt = (struct scatterlist *) SCpnt->buffer;
 303         for(i=0; i<SCpnt->use_sg; i++) {
 304 #ifdef DEBUG
 305             printk("err: %x %x %d\n",SCpnt->request.buffer, SCpnt->buffer,
 306                    SCpnt->bufflen);
 307 #endif
 308             if (sgpnt[i].alt_address) {
 309                 scsi_free(sgpnt[i].address, sgpnt[i].length);
 310             };
 311         };
 312         scsi_free(SCpnt->buffer, SCpnt->sglist_len);  /* Free list of scatter-gather pointers */
 313     } else {
 314 #ifdef DEBUG
 315         printk("nosgerr: %x %x %d\n",SCpnt->request.buffer, SCpnt->buffer,
 316                SCpnt->bufflen);
 317 #endif
 318         if (SCpnt->buffer != SCpnt->request.buffer)
 319             scsi_free(SCpnt->buffer, SCpnt->bufflen);
 320     };
 321     
 322     /*
 323      * Now, if we were good little boys and girls, Santa left us a request
 324      * sense buffer.  We can extract information from this, so we
 325      * can choose a block to remap, etc.
 326      */
 327 
 328     if (driver_byte(result) != 0) {
 329         if (suggestion(result) == SUGGEST_REMAP) {
 330 #ifdef REMAP
 331             /*
 332              * Not yet implemented.  A read will fail after being remapped,
 333              * a write will call the strategy routine again.
 334              */
 335             if rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].remap
 336             {
 337                 result = 0;
 338             }
 339             else
 340 #endif
 341         }
 342         
 343         if ((SCpnt->sense_buffer[0] & 0x7f) == 0x70) {
 344             if ((SCpnt->sense_buffer[2] & 0xf) == UNIT_ATTENTION) {
 345                 if(rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].device->removable) {
 346                     /* detected disc change.  set a bit and quietly refuse
 347                      * further access.
 348                      */  
 349                     rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].device->changed = 1;
 350                     SCpnt = end_scsi_request(SCpnt, 0, this_count);
 351                     requeue_sd_request(SCpnt);
 352                     return;
 353                 }
 354                 else
 355                 {
 356                     /*
 357                      * Must have been a power glitch, or a bus reset.
 358                      * Could not have been a media change, so we just retry
 359                      * the request and see what happens.
 360                      */
 361                     requeue_sd_request(SCpnt);
 362                     return;
 363                 }
 364             }
 365         }
 366         
 367         
 368         /* If we had an ILLEGAL REQUEST returned, then we may have
 369          * performed an unsupported command.  The only thing this should be 
 370          * would be a ten byte read where only a six byte read was supported.
 371          * Also, on a system where READ CAPACITY failed, we have have read 
 372          * past the end of the disk. 
 373          */
 374 
 375         if (SCpnt->sense_buffer[2] == ILLEGAL_REQUEST) {
 376             if (rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].ten) {
 377                 rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].ten = 0;
 378                 requeue_sd_request(SCpnt);
 379                 result = 0;
 380             } else {
 381                 /* ???? */
 382             }
 383         }
 384     }  /* driver byte != 0 */
 385     if (result) {
 386         printk("SCSI disk error : host %d channel %d id %d lun %d return code = %x\n",
 387                rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].device->host->host_no,
 388                rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].device->channel,
 389            rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].device->id,
 390              rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].device->lun, result);
 391         
 392         if (driver_byte(result) & DRIVER_SENSE)
 393             print_sense("sd", SCpnt);
 394         SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.current_nr_sectors);
 395         requeue_sd_request(SCpnt);
 396         return;
 397     }
 398 }
 399 
 400 /*
 401  * requeue_sd_request() is the request handler function for the sd driver.
 402  * Its function in life is to take block device requests, and translate
 403  * them to SCSI commands.
 404  */
 405 
 406 static void do_sd_request (void)
     /* [previous][next][first][last][top][bottom][index][help] */
 407 {
 408     Scsi_Cmnd * SCpnt = NULL;
 409     Scsi_Device * SDev;
 410     struct request * req = NULL;
 411     unsigned long flags;
 412     int flag = 0;
 413     
 414     save_flags(flags);
 415     while (1==1){
 416         cli();
 417         if (CURRENT != NULL && CURRENT->rq_status == RQ_INACTIVE) {
 418             restore_flags(flags);
 419             return;
 420         };
 421         
 422         INIT_SCSI_REQUEST;
 423         SDev = rscsi_disks[DEVICE_NR(CURRENT->rq_dev)].device;
 424         
 425         /*
 426          * I am not sure where the best place to do this is.  We need
 427          * to hook in a place where we are likely to come if in user
 428          * space.
 429          */
 430         if( SDev->was_reset )
 431         {
 432             /*
 433              * We need to relock the door, but we might
 434              * be in an interrupt handler.  Only do this
 435              * from user space, since we do not want to
 436              * sleep from an interrupt.
 437              */
 438             if( SDev->removable && !intr_count )
 439             {
 440                 scsi_ioctl(SDev, SCSI_IOCTL_DOORLOCK, 0);
 441             }
 442             SDev->was_reset = 0;
 443         }
 444                 
 445         /* We have to be careful here. allocate_device will get a free pointer,
 446          * but there is no guarantee that it is queueable.  In normal usage, 
 447          * we want to call this, because other types of devices may have the 
 448          * host all tied up, and we want to make sure that we have at least 
 449          * one request pending for this type of device. We can also come 
 450          * through here while servicing an interrupt, because of the need to 
 451          * start another command. If we call allocate_device more than once, 
 452          * then the system can wedge if the command is not queueable. The 
 453          * request_queueable function is safe because it checks to make sure 
 454          * that the host is able to take another command before it returns
 455          * a pointer.  
 456          */
 457 
 458         if (flag++ == 0)
 459             SCpnt = allocate_device(&CURRENT,
 460                                     rscsi_disks[DEVICE_NR(CURRENT->rq_dev)].device, 0); 
 461         else SCpnt = NULL;
 462         
 463         /*
 464          * The following restore_flags leads to latency problems.  FIXME.
 465          * Using a "sti()" gets rid of the latency problems but causes
 466          * race conditions and crashes.
 467          */
 468         restore_flags(flags);
 469 
 470         /* This is a performance enhancement. We dig down into the request 
 471          * list and try and find a queueable request (i.e. device not busy, 
 472          * and host able to accept another command. If we find one, then we 
 473          * queue it. This can make a big difference on systems with more than 
 474          * one disk drive.  We want to have the interrupts off when monkeying 
 475          * with the request list, because otherwise the kernel might try and 
 476          * slip in a request in between somewhere. 
 477          */
 478 
 479         if (!SCpnt && sd_template.nr_dev > 1){
 480             struct request *req1;
 481             req1 = NULL;
 482             cli();
 483             req = CURRENT;
 484             while(req){
 485                 SCpnt = request_queueable(req, rscsi_disks[DEVICE_NR(req->rq_dev)].device);
 486                 if(SCpnt) break;
 487                 req1 = req;
 488                 req = req->next;
 489             };
 490             if (SCpnt && req->rq_status == RQ_INACTIVE) {
 491                 if (req == CURRENT) 
 492                     CURRENT = CURRENT->next;
 493                 else
 494                     req1->next = req->next;
 495             };
 496             restore_flags(flags);
 497         };
 498         
 499         if (!SCpnt) return; /* Could not find anything to do */
 500         
 501         /* Queue command */
 502         requeue_sd_request(SCpnt);
 503     };  /* While */
 504 }    
 505 
 506 static void requeue_sd_request (Scsi_Cmnd * SCpnt)
     /* [previous][next][first][last][top][bottom][index][help] */
 507 {
 508     int dev, devm, block, this_count;
 509     unsigned char cmd[10];
 510     int bounce_size, contiguous;
 511     int max_sg;
 512     struct buffer_head * bh, *bhp;
 513     char * buff, *bounce_buffer;
 514     
 515  repeat:
 516     
 517     if(!SCpnt || SCpnt->request.rq_status == RQ_INACTIVE) {
 518         do_sd_request();
 519         return;
 520     }
 521     
 522     devm =  MINOR(SCpnt->request.rq_dev);
 523     dev = DEVICE_NR(SCpnt->request.rq_dev);
 524 
 525     block = SCpnt->request.sector;
 526     this_count = 0;
 527 
 528 #ifdef DEBUG
 529     printk("Doing sd request, dev = %d, block = %d\n", devm, block);
 530 #endif
 531     
 532     if (devm >= (sd_template.dev_max << 4) || 
 533         !rscsi_disks[dev].device ||
 534         block + SCpnt->request.nr_sectors > sd[devm].nr_sects)
 535     {
 536         SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
 537         goto repeat;
 538     }
 539     
 540     block += sd[devm].start_sect;
 541     
 542     if (rscsi_disks[dev].device->changed)
 543     {
 544         /*
 545          * quietly refuse to do anything to a changed disc until the changed 
 546          * bit has been reset
 547          */
 548         /* printk("SCSI disk has been changed. Prohibiting further I/O.\n"); */
 549         SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
 550         goto repeat;
 551     }
 552     
 553 #ifdef DEBUG
 554     printk("sd%c : real dev = /dev/sd%c, block = %d\n", 
 555            'a' + devm, dev, block);
 556 #endif
 557     
 558     /*
 559      * If we have a 1K hardware sectorsize, prevent access to single
 560      * 512 byte sectors.  In theory we could handle this - in fact
 561      * the scsi cdrom driver must be able to handle this because
 562      * we typically use 1K blocksizes, and cdroms typically have
 563      * 2K hardware sectorsizes.  Of course, things are simpler
 564      * with the cdrom, since it is read-only.  For performance
 565      * reasons, the filesystems should be able to handle this
 566      * and not force the scsi disk driver to use bounce buffers
 567      * for this.
 568      */
 569     if (rscsi_disks[dev].sector_size == 1024)
 570         if((block & 1) || (SCpnt->request.nr_sectors & 1)) {
 571             printk("sd.c:Bad block number requested");
 572             SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
 573             goto repeat;
 574         }
 575     
 576     switch (SCpnt->request.cmd)
 577     {
 578     case WRITE :
 579         if (!rscsi_disks[dev].device->writeable)
 580         {
 581             SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
 582             goto repeat;
 583         }
 584         cmd[0] = WRITE_6;
 585         break;
 586     case READ :
 587         cmd[0] = READ_6;
 588         break;
 589     default :
 590         panic ("Unknown sd command %d\n", SCpnt->request.cmd);
 591     }
 592     
 593     SCpnt->this_count = 0;
 594     
 595     /* If the host adapter can deal with very large scatter-gather
 596      * requests, it is a waste of time to cluster 
 597      */
 598     contiguous = (!CLUSTERABLE_DEVICE(SCpnt) ? 0 :1);
 599     bounce_buffer = NULL;
 600     bounce_size = (SCpnt->request.nr_sectors << 9);
 601     
 602     /* First see if we need a bounce buffer for this request. If we do, make 
 603      * sure that we can allocate a buffer. Do not waste space by allocating 
 604      * a bounce buffer if we are straddling the 16Mb line 
 605      */ 
 606     if (contiguous && SCpnt->request.bh &&
 607         ((long) SCpnt->request.bh->b_data) 
 608         + (SCpnt->request.nr_sectors << 9) - 1 > ISA_DMA_THRESHOLD 
 609         && SCpnt->host->unchecked_isa_dma) {
 610         if(((long) SCpnt->request.bh->b_data) > ISA_DMA_THRESHOLD)
 611             bounce_buffer = (char *) scsi_malloc(bounce_size);
 612         if(!bounce_buffer) contiguous = 0;
 613     };
 614     
 615     if(contiguous && SCpnt->request.bh && SCpnt->request.bh->b_reqnext)
 616         for(bh = SCpnt->request.bh, bhp = bh->b_reqnext; bhp; bh = bhp, 
 617             bhp = bhp->b_reqnext) {
 618             if(!CONTIGUOUS_BUFFERS(bh,bhp)) { 
 619                 if(bounce_buffer) scsi_free(bounce_buffer, bounce_size);
 620                 contiguous = 0;
 621                 break;
 622             } 
 623         };
 624     if (!SCpnt->request.bh || contiguous) {
 625         
 626         /* case of page request (i.e. raw device), or unlinked buffer */
 627         this_count = SCpnt->request.nr_sectors;
 628         buff = SCpnt->request.buffer;
 629         SCpnt->use_sg = 0;
 630         
 631     } else if (SCpnt->host->sg_tablesize == 0 ||
 632                (need_isa_buffer && dma_free_sectors <= 10)) {
 633         
 634         /* Case of host adapter that cannot scatter-gather.  We also
 635          * come here if we are running low on DMA buffer memory.  We set
 636          * a threshold higher than that we would need for this request so
 637          * we leave room for other requests.  Even though we would not need
 638          * it all, we need to be conservative, because if we run low enough
 639          * we have no choice but to panic. 
 640          */
 641         if (SCpnt->host->sg_tablesize != 0 &&
 642             need_isa_buffer && 
 643             dma_free_sectors <= 10)
 644             printk("Warning: SCSI DMA buffer space running low.  Using non scatter-gather I/O.\n");
 645         
 646         this_count = SCpnt->request.current_nr_sectors;
 647         buff = SCpnt->request.buffer;
 648         SCpnt->use_sg = 0;
 649         
 650     } else {
 651         
 652         /* Scatter-gather capable host adapter */
 653         struct scatterlist * sgpnt;
 654         int count, this_count_max;
 655         int counted;
 656         
 657         bh = SCpnt->request.bh;
 658         this_count = 0;
 659         this_count_max = (rscsi_disks[dev].ten ? 0xffff : 0xff);
 660         count = 0;
 661         bhp = NULL;
 662         while(bh) {
 663             if ((this_count + (bh->b_size >> 9)) > this_count_max) break;
 664             if(!bhp || !CONTIGUOUS_BUFFERS(bhp,bh) ||
 665                !CLUSTERABLE_DEVICE(SCpnt) ||
 666                (SCpnt->host->unchecked_isa_dma &&
 667                 ((unsigned long) bh->b_data-1) == ISA_DMA_THRESHOLD)) {
 668                 if (count < SCpnt->host->sg_tablesize) count++;
 669                 else break;
 670             };
 671             this_count += (bh->b_size >> 9);
 672             bhp = bh;
 673             bh = bh->b_reqnext;
 674         };
 675 #if 0
 676         if(SCpnt->host->unchecked_isa_dma &&
 677            ((unsigned int) SCpnt->request.bh->b_data-1) == ISA_DMA_THRESHOLD) count--;
 678 #endif
 679         SCpnt->use_sg = count;  /* Number of chains */
 680         count = 512;/* scsi_malloc can only allocate in chunks of 512 bytes */
 681         while( count < (SCpnt->use_sg * sizeof(struct scatterlist))) 
 682             count = count << 1;
 683         SCpnt->sglist_len = count;
 684         max_sg = count / sizeof(struct scatterlist);
 685         if(SCpnt->host->sg_tablesize < max_sg) 
 686             max_sg = SCpnt->host->sg_tablesize;
 687         sgpnt = (struct scatterlist * ) scsi_malloc(count);
 688         if (!sgpnt) {
 689             printk("Warning - running *really* short on DMA buffers\n");
 690             SCpnt->use_sg = 0;    /* No memory left - bail out */
 691             this_count = SCpnt->request.current_nr_sectors;
 692             buff = SCpnt->request.buffer;
 693         } else {
 694             memset(sgpnt, 0, count);  /* Zero so it is easy to fill, but only
 695                                        * if memory is available 
 696                                        */
 697             buff = (char *) sgpnt;
 698             counted = 0;
 699             for(count = 0, bh = SCpnt->request.bh, bhp = bh->b_reqnext;
 700                 count < SCpnt->use_sg && bh; 
 701                 count++, bh = bhp) {
 702                 
 703                 bhp = bh->b_reqnext;
 704                 
 705                 if(!sgpnt[count].address) sgpnt[count].address = bh->b_data;
 706                 sgpnt[count].length += bh->b_size;
 707                 counted += bh->b_size >> 9;
 708                 
 709                 if (((long) sgpnt[count].address) + sgpnt[count].length - 1 > 
 710                     ISA_DMA_THRESHOLD && (SCpnt->host->unchecked_isa_dma) &&
 711                     !sgpnt[count].alt_address) {
 712                     sgpnt[count].alt_address = sgpnt[count].address;
 713                     /* We try and avoid exhausting the DMA pool, since it is 
 714                      * easier to control usage here. In other places we might 
 715                      * have a more pressing need, and we would be screwed if 
 716                      * we ran out */
 717                     if(dma_free_sectors < (sgpnt[count].length >> 9) + 10) {
 718                         sgpnt[count].address = NULL;
 719                     } else {
 720                         sgpnt[count].address = 
 721                             (char *) scsi_malloc(sgpnt[count].length);
 722                     };
 723                     /* If we start running low on DMA buffers, we abort the 
 724                      * scatter-gather operation, and free all of the memory 
 725                      * we have allocated.  We want to ensure that all scsi 
 726                      * operations are able to do at least a non-scatter/gather
 727                      * operation */
 728                     if(sgpnt[count].address == NULL){ /* Out of dma memory */
 729 #if 0
 730                         printk("Warning: Running low on SCSI DMA buffers");
 731                         /* Try switching back to a non s-g operation. */
 732                         while(--count >= 0){
 733                             if(sgpnt[count].alt_address) 
 734                                 scsi_free(sgpnt[count].address, 
 735                                           sgpnt[count].length);
 736                         };
 737                         this_count = SCpnt->request.current_nr_sectors;
 738                         buff = SCpnt->request.buffer;
 739                         SCpnt->use_sg = 0;
 740                         scsi_free(sgpnt, SCpnt->sglist_len);
 741 #endif
 742                         SCpnt->use_sg = count;
 743                         this_count = counted -= bh->b_size >> 9;
 744                         break;
 745                     };
 746                     
 747                 };
 748                 
 749                 /* Only cluster buffers if we know that we can supply DMA 
 750                  * buffers large enough to satisfy the request. Do not cluster
 751                  * a new request if this would mean that we suddenly need to 
 752                  * start using DMA bounce buffers */
 753                 if(bhp && CONTIGUOUS_BUFFERS(bh,bhp) 
 754                    && CLUSTERABLE_DEVICE(SCpnt)) {
 755                     char * tmp;
 756                     
 757                     if (((long) sgpnt[count].address) + sgpnt[count].length +
 758                         bhp->b_size - 1 > ISA_DMA_THRESHOLD && 
 759                         (SCpnt->host->unchecked_isa_dma) &&
 760                         !sgpnt[count].alt_address) continue;
 761                     
 762                     if(!sgpnt[count].alt_address) {count--; continue; }
 763                     if(dma_free_sectors > 10)
 764                         tmp = (char *) scsi_malloc(sgpnt[count].length 
 765                                                    + bhp->b_size);
 766                     else {
 767                         tmp = NULL;
 768                         max_sg = SCpnt->use_sg;
 769                     };
 770                     if(tmp){
 771                         scsi_free(sgpnt[count].address, sgpnt[count].length);
 772                         sgpnt[count].address = tmp;
 773                         count--;
 774                         continue;
 775                     };
 776                     
 777                     /* If we are allowed another sg chain, then increment 
 778                      * counter so we can insert it.  Otherwise we will end 
 779                      up truncating */
 780                     
 781                     if (SCpnt->use_sg < max_sg) SCpnt->use_sg++;
 782                 };  /* contiguous buffers */
 783             }; /* for loop */
 784             
 785             /* This is actually how many we are going to transfer */
 786             this_count = counted; 
 787             
 788             if(count < SCpnt->use_sg || SCpnt->use_sg 
 789                > SCpnt->host->sg_tablesize){
 790                 bh = SCpnt->request.bh;
 791                 printk("Use sg, count %d %x %d\n", 
 792                        SCpnt->use_sg, count, dma_free_sectors);
 793                 printk("maxsg = %x, counted = %d this_count = %d\n", 
 794                        max_sg, counted, this_count);
 795                 while(bh){
 796                     printk("[%p %lx] ", bh->b_data, bh->b_size);
 797                     bh = bh->b_reqnext;
 798                 };
 799                 if(SCpnt->use_sg < 16)
 800                     for(count=0; count<SCpnt->use_sg; count++)
 801                         printk("{%d:%p %p %d}  ", count,
 802                                sgpnt[count].address,
 803                                sgpnt[count].alt_address,
 804                                sgpnt[count].length);
 805                 panic("Ooops");
 806             };
 807             
 808             if (SCpnt->request.cmd == WRITE)
 809                 for(count=0; count<SCpnt->use_sg; count++)
 810                     if(sgpnt[count].alt_address)
 811                         memcpy(sgpnt[count].address, sgpnt[count].alt_address, 
 812                                sgpnt[count].length);
 813         };  /* Able to malloc sgpnt */
 814     };  /* Host adapter capable of scatter-gather */
 815     
 816     /* Now handle the possibility of DMA to addresses > 16Mb */
 817     
 818     if(SCpnt->use_sg == 0){
 819         if (((long) buff) + (this_count << 9) - 1 > ISA_DMA_THRESHOLD && 
 820             (SCpnt->host->unchecked_isa_dma)) {
 821             if(bounce_buffer)
 822                 buff = bounce_buffer;
 823             else
 824                 buff = (char *) scsi_malloc(this_count << 9);
 825             if(buff == NULL) {  /* Try backing off a bit if we are low on mem*/
 826                 this_count = SCpnt->request.current_nr_sectors;
 827                 buff = (char *) scsi_malloc(this_count << 9);
 828                 if(!buff) panic("Ran out of DMA buffers.");
 829             };
 830             if (SCpnt->request.cmd == WRITE)
 831                 memcpy(buff, (char *)SCpnt->request.buffer, this_count << 9);
 832         };
 833     };
 834 #ifdef DEBUG
 835     printk("sd%c : %s %d/%d 512 byte blocks.\n", 
 836            'a' + devm,
 837            (SCpnt->request.cmd == WRITE) ? "writing" : "reading",
 838            this_count, SCpnt->request.nr_sectors);
 839 #endif
 840     
 841     cmd[1] = (SCpnt->lun << 5) & 0xe0;
 842     
 843     if (rscsi_disks[dev].sector_size == 1024){
 844         if(block & 1) panic("sd.c:Bad block number requested");
 845         if(this_count & 1) panic("sd.c:Bad block number requested");
 846         block = block >> 1;
 847         this_count = this_count >> 1;
 848     };
 849     
 850     if (rscsi_disks[dev].sector_size == 256){
 851         block = block << 1;
 852         this_count = this_count << 1;
 853     };
 854     
 855     if (((this_count > 0xff) ||  (block > 0x1fffff)) && rscsi_disks[dev].ten)
 856     {
 857         if (this_count > 0xffff)
 858             this_count = 0xffff;
 859         
 860         cmd[0] += READ_10 - READ_6 ;
 861         cmd[2] = (unsigned char) (block >> 24) & 0xff;
 862         cmd[3] = (unsigned char) (block >> 16) & 0xff;
 863         cmd[4] = (unsigned char) (block >> 8) & 0xff;
 864         cmd[5] = (unsigned char) block & 0xff;
 865         cmd[6] = cmd[9] = 0;
 866         cmd[7] = (unsigned char) (this_count >> 8) & 0xff;
 867         cmd[8] = (unsigned char) this_count & 0xff;
 868     }
 869     else
 870     {
 871         if (this_count > 0xff)
 872             this_count = 0xff;
 873         
 874         cmd[1] |= (unsigned char) ((block >> 16) & 0x1f);
 875         cmd[2] = (unsigned char) ((block >> 8) & 0xff);
 876         cmd[3] = (unsigned char) block & 0xff;
 877         cmd[4] = (unsigned char) this_count;
 878         cmd[5] = 0;
 879     }
 880     
 881     /*
 882      * We shouldn't disconnect in the middle of a sector, so with a dumb 
 883      * host adapter, it's safe to assume that we can at least transfer 
 884      * this many bytes between each connect / disconnect.  
 885      */
 886     
 887     SCpnt->transfersize = rscsi_disks[dev].sector_size;
 888     SCpnt->underflow = this_count << 9; 
 889     scsi_do_cmd (SCpnt, (void *) cmd, buff, 
 890                  this_count * rscsi_disks[dev].sector_size,
 891                  rw_intr, 
 892                  (SCpnt->device->type == TYPE_DISK ? 
 893                   SD_TIMEOUT : SD_MOD_TIMEOUT),
 894                  MAX_RETRIES);
 895 }
 896 
 897 static int check_scsidisk_media_change(kdev_t full_dev){
     /* [previous][next][first][last][top][bottom][index][help] */
 898     int retval;
 899     int target;
 900     struct inode inode;
 901     int flag = 0;
 902     
 903     target =  DEVICE_NR(full_dev);
 904     
 905     if (target >= sd_template.dev_max ||
 906         !rscsi_disks[target].device) {
 907         printk("SCSI disk request error: invalid device.\n");
 908         return 0;
 909     };
 910     
 911     if(!rscsi_disks[target].device->removable) return 0;
 912     
 913     inode.i_rdev = full_dev;  /* This is all we really need here */
 914     retval = sd_ioctl(&inode, NULL, SCSI_IOCTL_TEST_UNIT_READY, 0);
 915     
 916     if(retval){ /* Unable to test, unit probably not ready.  This usually
 917                  * means there is no disc in the drive.  Mark as changed,
 918                  * and we will figure it out later once the drive is
 919                  * available again.  */
 920         
 921         rscsi_disks[target].ready = 0;
 922         rscsi_disks[target].device->changed = 1;
 923         return 1; /* This will force a flush, if called from
 924                    * check_disk_change */
 925     };
 926     
 927     /* 
 928      * for removable scsi disk ( FLOPTICAL ) we have to recognise the
 929      * presence of disk in the drive. This is kept in the Scsi_Disk
 930      * struct and tested at open !  Daniel Roche ( dan@lectra.fr ) 
 931      */
 932     
 933     rscsi_disks[target].ready = 1;      /* FLOPTICAL */
 934 
 935     retval = rscsi_disks[target].device->changed;
 936     if(!flag) rscsi_disks[target].device->changed = 0;
 937     return retval;
 938 }
 939 
 940 static void sd_init_done (Scsi_Cmnd * SCpnt)
     /* [previous][next][first][last][top][bottom][index][help] */
 941 {
 942     struct request * req;
 943     
 944     req = &SCpnt->request;
 945     req->rq_status = RQ_SCSI_DONE; /* Busy, but indicate request done */
 946     
 947     if (req->sem != NULL) {
 948         up(req->sem);
 949     }
 950 }
 951 
 952 static int sd_init_onedisk(int i)
     /* [previous][next][first][last][top][bottom][index][help] */
 953 {
 954     unsigned char cmd[10];
 955     unsigned char *buffer;
 956     unsigned long spintime;
 957     int the_result, retries;
 958     Scsi_Cmnd * SCpnt;
 959     
 960     /* We need to retry the READ_CAPACITY because a UNIT_ATTENTION is 
 961      * considered a fatal error, and many devices report such an error 
 962      * just after a scsi bus reset. 
 963      */
 964     
 965     SCpnt = allocate_device(NULL, rscsi_disks[i].device, 1);
 966     buffer = (unsigned char *) scsi_malloc(512);
 967     
 968     spintime = 0;
 969     
 970     /* Spin up drives, as required.  Only do this at boot time */
 971     if (!MODULE_FLAG){
 972         do{
 973             retries = 0;
 974             while(retries < 3)
 975             {
 976                 cmd[0] = TEST_UNIT_READY;
 977                 cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0;
 978                 memset ((void *) &cmd[2], 0, 8);
 979                 SCpnt->cmd_len = 0;
 980                 SCpnt->sense_buffer[0] = 0;
 981                 SCpnt->sense_buffer[2] = 0;
 982 
 983                 {
 984                     struct semaphore sem = MUTEX_LOCKED;
 985                     /* Mark as really busy again */
 986                     SCpnt->request.rq_status = RQ_SCSI_BUSY;
 987                     SCpnt->request.sem = &sem;
 988                     scsi_do_cmd (SCpnt,
 989                                  (void *) cmd, (void *) buffer,
 990                                  512, sd_init_done,  SD_TIMEOUT,
 991                                  MAX_RETRIES);
 992                     down(&sem);
 993                 }
 994 
 995                 the_result = SCpnt->result;
 996                 retries++;
 997                 if(   the_result == 0
 998                    || SCpnt->sense_buffer[2] != UNIT_ATTENTION)
 999                     break;
1000             }
1001             
1002             /* Look for non-removable devices that return NOT_READY.  
1003              * Issue command to spin up drive for these cases. */
1004             if(the_result && !rscsi_disks[i].device->removable && 
1005                SCpnt->sense_buffer[2] == NOT_READY) {
1006                 int time1;
1007                 if(!spintime){
1008                     printk( "sd%c: Spinning up disk...", 'a' + i );
1009                     cmd[0] = START_STOP;
1010                     cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0;
1011                     cmd[1] |= 1;  /* Return immediately */
1012                     memset ((void *) &cmd[2], 0, 8);
1013                     cmd[4] = 1; /* Start spin cycle */
1014                     SCpnt->cmd_len = 0;
1015                     SCpnt->sense_buffer[0] = 0;
1016                     SCpnt->sense_buffer[2] = 0;
1017                     
1018                     {
1019                         struct semaphore sem = MUTEX_LOCKED;
1020                         /* Mark as really busy again */
1021                         SCpnt->request.rq_status = RQ_SCSI_BUSY; 
1022                         SCpnt->request.sem = &sem;
1023                         scsi_do_cmd (SCpnt,
1024                                      (void *) cmd, (void *) buffer,
1025                                      512, sd_init_done,  SD_TIMEOUT,
1026                                      MAX_RETRIES);
1027                         down(&sem);
1028                     }
1029                     
1030                     spintime = jiffies;
1031                 }
1032                 
1033                 time1 = jiffies;
1034                 while(jiffies < time1 + HZ); /* Wait 1 second for next try */
1035                 printk( "." );
1036             };
1037         } while(the_result && spintime && spintime+100*HZ > jiffies);
1038         if (spintime) {
1039             if (the_result)
1040                 printk( "not responding...\n" );
1041             else
1042                 printk( "ready\n" );
1043         }
1044     };  /* !MODULE_FLAG */
1045     
1046     
1047     retries = 3;
1048     do {
1049         cmd[0] = READ_CAPACITY;
1050         cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0;
1051         memset ((void *) &cmd[2], 0, 8);
1052         memset ((void *) buffer, 0, 8);
1053         SCpnt->cmd_len = 0;
1054         SCpnt->sense_buffer[0] = 0;
1055         SCpnt->sense_buffer[2] = 0;
1056 
1057         {
1058             struct semaphore sem = MUTEX_LOCKED;
1059             /* Mark as really busy again */
1060             SCpnt->request.rq_status = RQ_SCSI_BUSY;
1061             SCpnt->request.sem = &sem;
1062             scsi_do_cmd (SCpnt,
1063                          (void *) cmd, (void *) buffer,
1064                          8, sd_init_done,  SD_TIMEOUT,
1065                          MAX_RETRIES);
1066             down(&sem); /* sleep until it is ready */
1067         }
1068         
1069         the_result = SCpnt->result;
1070         retries--;
1071         
1072     } while(the_result && retries);
1073     
1074     SCpnt->request.rq_status = RQ_INACTIVE;  /* Mark as not busy */
1075     
1076     wake_up(&SCpnt->device->device_wait); 
1077     
1078     /* Wake up a process waiting for device */
1079     
1080     /*
1081      * The SCSI standard says: 
1082      * "READ CAPACITY is necessary for self configuring software"
1083      *  While not mandatory, support of READ CAPACITY is strongly encouraged.
1084      *  We used to die if we couldn't successfully do a READ CAPACITY.
1085      *  But, now we go on about our way.  The side effects of this are
1086      *
1087      *  1. We can't know block size with certainty. I have said "512 bytes 
1088      *     is it" as this is most common.
1089      *
1090      *  2. Recovery from when some one attempts to read past the end of the 
1091      *     raw device will be slower.
1092      */
1093     
1094     if (the_result)
1095     {
1096         printk ("sd%c : READ CAPACITY failed.\n"
1097                 "sd%c : status = %x, message = %02x, host = %d, driver = %02x \n",
1098                 'a' + i, 'a' + i,
1099                 status_byte(the_result),
1100                 msg_byte(the_result),
1101                 host_byte(the_result),
1102                 driver_byte(the_result)
1103                 );
1104         if (driver_byte(the_result)  & DRIVER_SENSE)
1105             printk("sd%c : extended sense code = %1x \n", 
1106                    'a' + i, SCpnt->sense_buffer[2] & 0xf);
1107         else
1108             printk("sd%c : sense not available. \n", 'a' + i);
1109         
1110         printk("sd%c : block size assumed to be 512 bytes, disk size 1GB.  \n",
1111                'a' + i);
1112         rscsi_disks[i].capacity = 0x1fffff;
1113         rscsi_disks[i].sector_size = 512;
1114         
1115         /* Set dirty bit for removable devices if not ready - sometimes drives
1116          * will not report this properly. */
1117         if(rscsi_disks[i].device->removable && 
1118            SCpnt->sense_buffer[2] == NOT_READY)
1119             rscsi_disks[i].device->changed = 1;
1120         
1121     }
1122     else
1123     {
1124         /*
1125          * FLOPTICAL , if read_capa is ok , drive is assumed to be ready 
1126          */
1127         rscsi_disks[i].ready = 1;
1128 
1129         rscsi_disks[i].capacity = (buffer[0] << 24) |
1130             (buffer[1] << 16) |
1131                 (buffer[2] << 8) |
1132                     buffer[3];
1133         
1134         rscsi_disks[i].sector_size = (buffer[4] << 24) |
1135             (buffer[5] << 16) | (buffer[6] << 8) | buffer[7];
1136 
1137         if (rscsi_disks[i].sector_size == 0) {
1138           rscsi_disks[i].sector_size = 512;
1139           printk("sd%c : sector size 0 reported, assuming 512.\n", 'a' + i);
1140         }
1141  
1142         
1143         if (rscsi_disks[i].sector_size != 512 &&
1144             rscsi_disks[i].sector_size != 1024 &&
1145             rscsi_disks[i].sector_size != 256)
1146         {
1147             printk ("sd%c : unsupported sector size %d.\n",
1148                     'a' + i, rscsi_disks[i].sector_size);
1149             if(rscsi_disks[i].device->removable){
1150                 rscsi_disks[i].capacity = 0;
1151             } else {
1152                 printk ("scsi : deleting disk entry.\n");
1153                 rscsi_disks[i].device = NULL;
1154                 sd_template.nr_dev--;
1155                 return i;
1156             };
1157         }
1158     {
1159         /*
1160          * The msdos fs need to know the hardware sector size
1161          * So I have created this table. See ll_rw_blk.c
1162          * Jacques Gelinas (Jacques@solucorp.qc.ca)
1163          */
1164         int m;
1165         int hard_sector = rscsi_disks[i].sector_size;
1166         /* There is 16 minor allocated for each devices */
1167         for (m=i<<4; m<((i+1)<<4); m++){
1168             sd_hardsizes[m] = hard_sector;
1169         }
1170         printk ("SCSI Hardware sector size is %d bytes on device sd%c\n",
1171                 hard_sector,i+'a');
1172     }
1173         if(rscsi_disks[i].sector_size == 1024)
1174             rscsi_disks[i].capacity <<= 1;  /* Change into 512 byte sectors */
1175         if(rscsi_disks[i].sector_size == 256)
1176             rscsi_disks[i].capacity >>= 1;  /* Change into 512 byte sectors */
1177     }
1178     
1179 
1180     /*
1181      * Unless otherwise specified, this is not write protected.
1182      */
1183     rscsi_disks[i].write_prot = 0;
1184     if ( rscsi_disks[i].device->removable && rscsi_disks[i].ready ) {
1185         /* FLOPTICAL */
1186 
1187         /* 
1188          *      for removable scsi disk ( FLOPTICAL ) we have to recognise  
1189          * the Write Protect Flag. This flag is kept in the Scsi_Disk struct
1190          * and tested at open !
1191          * Daniel Roche ( dan@lectra.fr )
1192          */
1193         
1194         memset ((void *) &cmd[0], 0, 8);
1195         cmd[0] = MODE_SENSE;
1196         cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0;
1197         cmd[2] = 1;      /* page code 1 ?? */
1198         cmd[4] = 12;
1199         SCpnt->cmd_len = 0;
1200         SCpnt->sense_buffer[0] = 0;
1201         SCpnt->sense_buffer[2] = 0;
1202 
1203         /* same code as READCAPA !! */
1204         {
1205             struct semaphore sem = MUTEX_LOCKED;
1206             SCpnt->request.rq_status = RQ_SCSI_BUSY;  /* Mark as really busy again */
1207             SCpnt->request.sem = &sem;
1208             scsi_do_cmd (SCpnt,
1209                          (void *) cmd, (void *) buffer,
1210                          512, sd_init_done,  SD_TIMEOUT,
1211                          MAX_RETRIES);
1212             down(&sem);
1213         }
1214         
1215         the_result = SCpnt->result;
1216         SCpnt->request.rq_status = RQ_INACTIVE;  /* Mark as not busy */
1217         wake_up(&SCpnt->device->device_wait); 
1218         
1219         if ( the_result ) {
1220             printk ("sd%c: test WP failed, assume Write Protected\n",i+'a');
1221             rscsi_disks[i].write_prot = 1;
1222         } else {
1223             rscsi_disks[i].write_prot = ((buffer[2] & 0x80) != 0);
1224             printk ("sd%c: Write Protect is %s\n",i+'a',
1225                     rscsi_disks[i].write_prot ? "on" : "off");
1226         }
1227         
1228     }   /* check for write protect */
1229  
1230     rscsi_disks[i].ten = 1;
1231     rscsi_disks[i].remap = 1;
1232     scsi_free(buffer, 512);
1233     return i;
1234 }
1235 
1236 /*
1237  * The sd_init() function looks at all SCSI drives present, determines
1238  * their size, and reads partition table entries for them.
1239  */
1240 
1241 static int sd_registered = 0;
1242 
1243 static int sd_init()
     /* [previous][next][first][last][top][bottom][index][help] */
1244 {
1245     int i;
1246     
1247     if (sd_template.dev_noticed == 0) return 0;
1248     
1249     if(!sd_registered) {
1250           if (register_blkdev(MAJOR_NR,"sd",&sd_fops)) {
1251               printk("Unable to get major %d for SCSI disk\n",MAJOR_NR);
1252               return 1;
1253           }
1254           sd_registered++;
1255       }
1256     
1257     /* We do not support attaching loadable devices yet. */
1258     if(rscsi_disks) return 0;
1259     
1260     sd_template.dev_max = sd_template.dev_noticed + SD_EXTRA_DEVS;
1261     
1262     rscsi_disks = (Scsi_Disk *) 
1263         scsi_init_malloc(sd_template.dev_max * sizeof(Scsi_Disk), GFP_ATOMIC);
1264     memset(rscsi_disks, 0, sd_template.dev_max * sizeof(Scsi_Disk));
1265     
1266     sd_sizes = (int *) scsi_init_malloc((sd_template.dev_max << 4) * 
1267                                         sizeof(int), GFP_ATOMIC);
1268     memset(sd_sizes, 0, (sd_template.dev_max << 4) * sizeof(int));
1269     
1270     sd_blocksizes = (int *) scsi_init_malloc((sd_template.dev_max << 4) * 
1271                                              sizeof(int), GFP_ATOMIC);
1272     
1273     sd_hardsizes = (int *) scsi_init_malloc((sd_template.dev_max << 4) * 
1274                                             sizeof(int), GFP_ATOMIC);
1275     
1276     for(i=0;i<(sd_template.dev_max << 4);i++){
1277         sd_blocksizes[i] = 1024;
1278         sd_hardsizes[i] = 512;
1279     }
1280     blksize_size[MAJOR_NR] = sd_blocksizes;
1281     hardsect_size[MAJOR_NR] = sd_hardsizes;
1282     sd = (struct hd_struct *) scsi_init_malloc((sd_template.dev_max << 4) *
1283                                                sizeof(struct hd_struct),
1284                                                GFP_ATOMIC);
1285     
1286     
1287     sd_gendisk.max_nr = sd_template.dev_max;
1288     sd_gendisk.part = sd;
1289     sd_gendisk.sizes = sd_sizes;
1290     sd_gendisk.real_devices = (void *) rscsi_disks;
1291     return 0;
1292 }
1293 
1294 static void sd_finish()
     /* [previous][next][first][last][top][bottom][index][help] */
1295 {
1296     int i;
1297 
1298     blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST;
1299     
1300     sd_gendisk.next = gendisk_head;
1301     gendisk_head = &sd_gendisk;
1302     
1303     for (i = 0; i < sd_template.dev_max; ++i)
1304         if (!rscsi_disks[i].capacity && 
1305             rscsi_disks[i].device)
1306         {
1307             if (MODULE_FLAG
1308                 && !rscsi_disks[i].has_part_table) {
1309                 sd_sizes[i << 4] = rscsi_disks[i].capacity;
1310                 /* revalidate does sd_init_onedisk via MAYBE_REINIT*/
1311                 revalidate_scsidisk(MKDEV(MAJOR_NR, i << 4), 0);
1312             }
1313             else
1314                 i=sd_init_onedisk(i);
1315             rscsi_disks[i].has_part_table = 1;
1316         }
1317     
1318     /* If our host adapter is capable of scatter-gather, then we increase
1319      * the read-ahead to 16 blocks (32 sectors).  If not, we use
1320      * a two block (4 sector) read ahead. 
1321      */
1322     if(rscsi_disks[0].device && rscsi_disks[0].device->host->sg_tablesize)
1323         read_ahead[MAJOR_NR] = 120;  /* 120 sector read-ahead */
1324     else
1325         read_ahead[MAJOR_NR] = 4;  /* 4 sector read-ahead */
1326 
1327     return;
1328 }
1329 
1330 static int sd_detect(Scsi_Device * SDp){
     /* [previous][next][first][last][top][bottom][index][help] */
1331     if(SDp->type != TYPE_DISK && SDp->type != TYPE_MOD) return 0;
1332     
1333     printk("Detected scsi disk sd%c at scsi%d, channel %d, id %d, lun %d\n", 
1334            'a'+ (sd_template.dev_noticed++),
1335            SDp->host->host_no, SDp->channel, SDp->id, SDp->lun); 
1336     
1337     return 1;
1338 }
1339 
1340 static int sd_attach(Scsi_Device * SDp){
     /* [previous][next][first][last][top][bottom][index][help] */
1341     Scsi_Disk * dpnt;
1342     int i;
1343     
1344     if(SDp->type != TYPE_DISK && SDp->type != TYPE_MOD) return 0;
1345     
1346     if(sd_template.nr_dev >= sd_template.dev_max) {
1347         SDp->attached--;
1348         return 1;
1349     }
1350     
1351     for(dpnt = rscsi_disks, i=0; i<sd_template.dev_max; i++, dpnt++) 
1352         if(!dpnt->device) break;
1353     
1354     if(i >= sd_template.dev_max) panic ("scsi_devices corrupt (sd)");
1355     
1356     SDp->scsi_request_fn = do_sd_request;
1357     rscsi_disks[i].device = SDp;
1358     rscsi_disks[i].has_part_table = 0;
1359     sd_template.nr_dev++;
1360     sd_gendisk.nr_real++;
1361     return 0;
1362 }
1363 
1364 #define DEVICE_BUSY rscsi_disks[target].device->busy
1365 #define USAGE rscsi_disks[target].device->access_count
1366 #define CAPACITY rscsi_disks[target].capacity
1367 #define MAYBE_REINIT  sd_init_onedisk(target)
1368 #define GENDISK_STRUCT sd_gendisk
1369 
1370 /* This routine is called to flush all partitions and partition tables
1371  * for a changed scsi disk, and then re-read the new partition table.
1372  * If we are revalidating a disk because of a media change, then we
1373  * enter with usage == 0.  If we are using an ioctl, we automatically have
1374  * usage == 1 (we need an open channel to use an ioctl :-), so this
1375  * is our limit.
1376  */
1377 int revalidate_scsidisk(kdev_t dev, int maxusage){
     /* [previous][next][first][last][top][bottom][index][help] */
1378     int target;
1379     struct gendisk * gdev;
1380     unsigned long flags;
1381     int max_p;
1382     int start;
1383     int i;
1384     
1385     target =  DEVICE_NR(dev);
1386     gdev = &GENDISK_STRUCT;
1387     
1388     save_flags(flags);
1389     cli();
1390     if (DEVICE_BUSY || USAGE > maxusage) {
1391         restore_flags(flags);
1392         printk("Device busy for revalidation (usage=%d)\n", USAGE);
1393         return -EBUSY;
1394     };
1395     DEVICE_BUSY = 1;
1396     restore_flags(flags);
1397     
1398     max_p = gdev->max_p;
1399     start = target << gdev->minor_shift;
1400     
1401     for (i=max_p - 1; i >=0 ; i--) {
1402         int minor = start+i;
1403         kdev_t devi = MKDEV(MAJOR_NR, minor);
1404         sync_dev(devi);
1405         invalidate_inodes(devi);
1406         invalidate_buffers(devi);
1407         gdev->part[minor].start_sect = 0;
1408         gdev->part[minor].nr_sects = 0;
1409         /*
1410          * Reset the blocksize for everything so that we can read
1411          * the partition table.
1412          */
1413         blksize_size[MAJOR_NR][minor] = 1024;
1414     };
1415     
1416 #ifdef MAYBE_REINIT
1417     MAYBE_REINIT;
1418 #endif
1419     
1420     gdev->part[start].nr_sects = CAPACITY;
1421     resetup_one_dev(gdev, target);
1422     
1423     DEVICE_BUSY = 0;
1424     return 0;
1425 }
1426 
1427 static int fop_revalidate_scsidisk(kdev_t dev){
     /* [previous][next][first][last][top][bottom][index][help] */
1428     return revalidate_scsidisk(dev, 0);
1429 }
1430 
1431 
1432 static void sd_detach(Scsi_Device * SDp)
     /* [previous][next][first][last][top][bottom][index][help] */
1433 {
1434     Scsi_Disk * dpnt;
1435     int i;
1436     int max_p;
1437     int start;
1438     
1439     for(dpnt = rscsi_disks, i=0; i<sd_template.dev_max; i++, dpnt++) 
1440         if(dpnt->device == SDp) {
1441             
1442             /* If we are disconnecting a disk driver, sync and invalidate 
1443              * everything */
1444             max_p = sd_gendisk.max_p;
1445             start = i << sd_gendisk.minor_shift;
1446             
1447             for (i=max_p - 1; i >=0 ; i--) {
1448                 int minor = start+i;
1449                 kdev_t devi = MKDEV(MAJOR_NR, minor);
1450                 sync_dev(devi);
1451                 invalidate_inodes(devi);
1452                 invalidate_buffers(devi);
1453                 sd_gendisk.part[minor].start_sect = 0;
1454                 sd_gendisk.part[minor].nr_sects = 0;
1455                 sd_sizes[minor] = 0;
1456             };
1457             
1458             dpnt->has_part_table = 0;
1459             dpnt->device = NULL;
1460             dpnt->capacity = 0;
1461             SDp->attached--;
1462             sd_template.dev_noticed--;
1463             sd_template.nr_dev--;
1464             sd_gendisk.nr_real--;
1465             return;
1466         }
1467     return;
1468 }
1469 
1470 #ifdef MODULE
1471 
1472 int init_module(void) {
     /* [previous][next][first][last][top][bottom][index][help] */
1473     sd_template.usage_count = &mod_use_count_;
1474     return scsi_register_module(MODULE_SCSI_DEV, &sd_template);
1475 }
1476 
1477 void cleanup_module( void) 
     /* [previous][next][first][last][top][bottom][index][help] */
1478 {
1479     struct gendisk * prev_sdgd;
1480     struct gendisk * sdgd;
1481     
1482     scsi_unregister_module(MODULE_SCSI_DEV, &sd_template);
1483     unregister_blkdev(SCSI_DISK_MAJOR, "sd");
1484     sd_registered--;
1485     if( rscsi_disks != NULL )
1486     {
1487         scsi_init_free((char *) rscsi_disks,
1488                        (sd_template.dev_noticed + SD_EXTRA_DEVS) 
1489                        * sizeof(Scsi_Disk));
1490         
1491         scsi_init_free((char *) sd_sizes, sd_template.dev_max * sizeof(int));
1492         scsi_init_free((char *) sd_blocksizes, sd_template.dev_max * sizeof(int));
1493         scsi_init_free((char *) sd_hardsizes, sd_template.dev_max * sizeof(int));
1494         scsi_init_free((char *) sd, 
1495                        (sd_template.dev_max << 4) * sizeof(struct hd_struct));
1496         /*
1497          * Now remove sd_gendisk from the linked list
1498          */
1499         sdgd = gendisk_head;
1500         prev_sdgd = NULL;
1501         while(sdgd != &sd_gendisk)
1502         {
1503             prev_sdgd = sdgd;
1504             sdgd = sdgd->next;
1505         }
1506         
1507         if(sdgd != &sd_gendisk)
1508             printk("sd_gendisk not in disk chain.\n");
1509         else {
1510             if(prev_sdgd != NULL)
1511                 prev_sdgd->next = sdgd->next;
1512             else
1513                 gendisk_head = sdgd->next;
1514         }
1515     }
1516     
1517     blksize_size[MAJOR_NR] = NULL;
1518     blk_dev[MAJOR_NR].request_fn = NULL;
1519     blk_size[MAJOR_NR] = NULL;  
1520     hardsect_size[MAJOR_NR] = NULL;
1521     read_ahead[MAJOR_NR] = 0;
1522     sd_template.dev_max = 0;
1523 }
1524 #endif /* MODULE */
1525 
1526 /*
1527  * Overrides for Emacs so that we almost follow Linus's tabbing style.
1528  * Emacs will notice this stuff at the end of the file and automatically
1529  * adjust the settings for this buffer only.  This must remain at the end
1530  * of the file.
1531  * ---------------------------------------------------------------------------
1532  * Local variables:
1533  * c-indent-level: 4
1534  * c-brace-imaginary-offset: 0
1535  * c-brace-offset: -4
1536  * c-argdecl-indent: 4
1537  * c-label-offset: -4
1538  * c-continued-statement-offset: 4
1539  * c-continued-brace-offset: 0
1540  * indent-tabs-mode: nil
1541  * tab-width: 8
1542  * End:
1543  */

/* [previous][next][first][last][top][bottom][index][help] */