root/drivers/scsi/sd.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. sd_open
  2. sd_release
  3. sd_geninit
  4. rw_intr
  5. do_sd_request
  6. requeue_sd_request
  7. check_scsidisk_media_change
  8. sd_init_done
  9. sd_init_onedisk
  10. sd_init
  11. sd_finish
  12. sd_detect
  13. sd_attach
  14. revalidate_scsidisk
  15. fop_revalidate_scsidisk
  16. sd_detach
  17. init_module
  18. cleanup_module

   1 /*
   2  *      sd.c Copyright (C) 1992 Drew Eckhardt 
   3  *           Copyright (C) 1993, 1994, 1995 Eric Youngdale
   4  *
   5  *      Linux scsi disk driver
   6  *              Initial versions: Drew Eckhardt 
   7  *              Subsequent revisions: Eric Youngdale
   8  *
   9  *      <drew@colorado.edu>
  10  *
  11  *       Modified by Eric Youngdale ericy@cais.com to
  12  *       add scatter-gather, multiple outstanding request, and other
  13  *       enhancements.
  14  *
  15  *       Modified by Eric Youngdale eric@aib.com to support loadable
  16  *       low-level scsi drivers.
  17  */
  18 
  19 #ifdef MODULE
  20 #include <linux/autoconf.h>
  21 #include <linux/module.h>
  22 #include <linux/version.h>
  23 /*
  24  * This is a variable in scsi.c that is set when we are processing something
  25  * after boot time.  By definition, this is true when we are a loadable module
  26  * ourselves.
  27  */
  28 #define MODULE_FLAG 1
  29 #else
  30 #define MODULE_FLAG scsi_loadable_module_flag
  31 #endif /* MODULE */
  32 
  33 #include <linux/fs.h>
  34 #include <linux/kernel.h>
  35 #include <linux/sched.h>
  36 #include <linux/mm.h>
  37 #include <linux/string.h>
  38 #include <linux/errno.h>
  39 #include <asm/system.h>
  40 
  41 #define MAJOR_NR SCSI_DISK_MAJOR
  42 #include <linux/blk.h>
  43 #include "scsi.h"
  44 #include "hosts.h"
  45 #include "sd.h"
  46 #include "scsi_ioctl.h"
  47 #include "constants.h"
  48 
  49 #include <linux/genhd.h>
  50 
  51 /*
  52  *  static const char RCSid[] = "$Header:";
  53  */
  54 
  55 #define MAX_RETRIES 5
  56 
  57 /*
  58  *  Time out in seconds for disks and Magneto-opticals (which are slower).
  59  */
  60 
  61 #define SD_TIMEOUT (7 * HZ)
  62 #define SD_MOD_TIMEOUT (8 * HZ)
  63 
  64 #define CLUSTERABLE_DEVICE(SC) (SC->host->use_clustering && \
  65                                 SC->device->type != TYPE_MOD)
  66 
  67 struct hd_struct * sd;
  68 
  69 Scsi_Disk * rscsi_disks = NULL;
  70 static int * sd_sizes;
  71 static int * sd_blocksizes;
  72 static int * sd_hardsizes;              /* Hardware sector size */
  73 
  74 extern int sd_ioctl(struct inode *, struct file *, unsigned int, unsigned long);
  75 
  76 static int check_scsidisk_media_change(kdev_t);
  77 static int fop_revalidate_scsidisk(kdev_t);
  78 
  79 static sd_init_onedisk(int);
  80 
  81 static void requeue_sd_request (Scsi_Cmnd * SCpnt);
  82 
  83 static int sd_init(void);
  84 static void sd_finish(void);
  85 static int sd_attach(Scsi_Device *);
  86 static int sd_detect(Scsi_Device *);
  87 static void sd_detach(Scsi_Device *);
  88 
  89 struct Scsi_Device_Template sd_template = 
  90 { NULL, "disk", "sd", NULL, TYPE_DISK, 
  91       SCSI_DISK_MAJOR, 0, 0, 0, 1,
  92       sd_detect, sd_init,
  93       sd_finish, sd_attach, sd_detach
  94 };
  95 
  96 static int sd_open(struct inode * inode, struct file * filp)
     /* [previous][next][first][last][top][bottom][index][help] */
  97 {
  98     int target;
  99     target =  DEVICE_NR(inode->i_rdev);
 100     
 101     if(target >= sd_template.dev_max || !rscsi_disks[target].device)
 102         return -ENXIO;   /* No such device */
 103     
 104     /* 
 105      * Make sure that only one process can do a check_change_disk at one time.
 106      * This is also used to lock out further access when the partition table 
 107      * is being re-read. 
 108      */
 109     
 110     while (rscsi_disks[target].device->busy)
 111     barrier();   
 112     if(rscsi_disks[target].device->removable) {
 113         check_disk_change(inode->i_rdev);
 114         
 115         /*
 116          * If the drive is empty, just let the open fail.
 117          */
 118         if ( !rscsi_disks[target].ready ) {
 119             return -ENXIO;
 120         }
 121 
 122         /*
 123          * Similarily, if the device has the write protect tab set,
 124          * have the open fail if the user expects to be able to write
 125          * to the thing.
 126          */
 127         if ( (rscsi_disks[target].write_prot) && (filp->f_mode & 2) ) { 
 128             return -EROFS;
 129         }
 130 
 131         if(!rscsi_disks[target].device->access_count)
 132             sd_ioctl(inode, NULL, SCSI_IOCTL_DOORLOCK, 0);
 133     };
 134 
 135     /*
 136      * See if we are requesting a non-existent partition.  Do this
 137      * after checking for disk change.
 138      */
 139     if(sd_sizes[MINOR(inode->i_rdev)] == 0)
 140         return -ENXIO;
 141     
 142     rscsi_disks[target].device->access_count++;
 143     if (rscsi_disks[target].device->host->hostt->usage_count)
 144         (*rscsi_disks[target].device->host->hostt->usage_count)++;
 145     if(sd_template.usage_count) (*sd_template.usage_count)++;
 146     return 0;
 147 }
 148 
 149 static void sd_release(struct inode * inode, struct file * file)
     /* [previous][next][first][last][top][bottom][index][help] */
 150 {
 151     int target;
 152     sync_dev(inode->i_rdev);
 153     
 154     target =  DEVICE_NR(inode->i_rdev);
 155     
 156     rscsi_disks[target].device->access_count--;
 157     if (rscsi_disks[target].device->host->hostt->usage_count)
 158         (*rscsi_disks[target].device->host->hostt->usage_count)--;
 159     if(sd_template.usage_count) (*sd_template.usage_count)--;
 160     
 161     if(rscsi_disks[target].device->removable) {
 162         if(!rscsi_disks[target].device->access_count)
 163             sd_ioctl(inode, NULL, SCSI_IOCTL_DOORUNLOCK, 0);
 164     }
 165 }
 166 
 167 static void sd_geninit(struct gendisk *);
 168 
 169 static struct file_operations sd_fops = {
 170     NULL,                        /* lseek - default */
 171     block_read,                  /* read - general block-dev read */
 172     block_write,                 /* write - general block-dev write */
 173     NULL,                        /* readdir - bad */
 174     NULL,                        /* select */
 175     sd_ioctl,                    /* ioctl */
 176     NULL,                        /* mmap */
 177     sd_open,                     /* open code */
 178     sd_release,                  /* release */
 179     block_fsync,                 /* fsync */
 180     NULL,                        /* fasync */
 181     check_scsidisk_media_change, /* Disk change */
 182     fop_revalidate_scsidisk      /* revalidate */
 183 };
 184 
 185 static struct gendisk sd_gendisk = {
 186     MAJOR_NR,                    /* Major number */
 187     "sd",                        /* Major name */
 188     4,                           /* Bits to shift to get real from partition */
 189     1 << 4,                      /* Number of partitions per real */
 190     0,                           /* maximum number of real */
 191     sd_geninit,                  /* init function */
 192     NULL,                        /* hd struct */
 193     NULL,                        /* block sizes */
 194     0,                           /* number */
 195     NULL,                        /* internal */
 196     NULL                         /* next */
 197 };
 198 
 199 static void sd_geninit (struct gendisk *ignored)
     /* [previous][next][first][last][top][bottom][index][help] */
 200 {
 201     int i;
 202     
 203     for (i = 0; i < sd_template.dev_max; ++i)
 204         if(rscsi_disks[i].device) 
 205             sd[i << 4].nr_sects = rscsi_disks[i].capacity;
 206 #if 0
 207     /* No longer needed - we keep track of this as we attach/detach */
 208     sd_gendisk.nr_real = sd_template.dev_max;
 209 #endif
 210 }
 211 
 212 /*
 213  * rw_intr is the interrupt routine for the device driver.  It will
 214  * be notified on the end of a SCSI read / write, and
 215  * will take on of several actions based on success or failure.
 216  */
 217 
 218 static void rw_intr (Scsi_Cmnd *SCpnt)
     /* [previous][next][first][last][top][bottom][index][help] */
 219 {
 220     int result = SCpnt->result;
 221     int this_count = SCpnt->bufflen >> 9;
 222     
 223 #ifdef DEBUG
 224     printk("sd%c : rw_intr(%d, %d)\n", 'a' + MINOR(SCpnt->request.rq_dev), 
 225            SCpnt->host->host_no, result);
 226 #endif
 227     
 228     /*
 229      * First case : we assume that the command succeeded.  One of two things 
 230      * will happen here.  Either we will be finished, or there will be more
 231      * sectors that we were unable to read last time.
 232      */
 233 
 234     if (!result) {
 235         
 236 #ifdef DEBUG
 237         printk("sd%c : %d sectors remain.\n", 'a' + MINOR(SCpnt->request.rq_dev),
 238                SCpnt->request.nr_sectors);
 239         printk("use_sg is %d\n ",SCpnt->use_sg);
 240 #endif
 241         if (SCpnt->use_sg) {
 242             struct scatterlist * sgpnt;
 243             int i;
 244             sgpnt = (struct scatterlist *) SCpnt->buffer;
 245             for(i=0; i<SCpnt->use_sg; i++) {
 246 #ifdef DEBUG
 247                 printk(":%x %x %d\n",sgpnt[i].alt_address, sgpnt[i].address, 
 248                        sgpnt[i].length);
 249 #endif
 250                 if (sgpnt[i].alt_address) {
 251                     if (SCpnt->request.cmd == READ)
 252                         memcpy(sgpnt[i].alt_address, sgpnt[i].address, 
 253                                sgpnt[i].length);
 254                     scsi_free(sgpnt[i].address, sgpnt[i].length);
 255                 };
 256             };
 257 
 258             /* Free list of scatter-gather pointers */
 259             scsi_free(SCpnt->buffer, SCpnt->sglist_len);  
 260         } else {
 261             if (SCpnt->buffer != SCpnt->request.buffer) {
 262 #ifdef DEBUG
 263                 printk("nosg: %x %x %d\n",SCpnt->request.buffer, SCpnt->buffer,
 264                        SCpnt->bufflen);
 265 #endif  
 266                 if (SCpnt->request.cmd == READ)
 267                     memcpy(SCpnt->request.buffer, SCpnt->buffer,
 268                            SCpnt->bufflen);
 269                 scsi_free(SCpnt->buffer, SCpnt->bufflen);
 270             };
 271         };
 272         /*
 273          * If multiple sectors are requested in one buffer, then
 274          * they will have been finished off by the first command.
 275          * If not, then we have a multi-buffer command.
 276          */
 277         if (SCpnt->request.nr_sectors > this_count)
 278         {
 279             SCpnt->request.errors = 0;
 280             
 281             if (!SCpnt->request.bh)
 282             {
 283 #ifdef DEBUG
 284                 printk("sd%c : handling page request, no buffer\n",
 285                        'a' + MINOR(SCpnt->request.rq_dev));
 286 #endif
 287                 /*
 288                  * The SCpnt->request.nr_sectors field is always done in 
 289                  * 512 byte sectors, even if this really isn't the case.
 290                  */
 291                 panic("sd.c: linked page request (%lx %x)",
 292                       SCpnt->request.sector, this_count);
 293             }
 294         }
 295         SCpnt = end_scsi_request(SCpnt, 1, this_count);
 296         requeue_sd_request(SCpnt);
 297         return;
 298     }
 299     
 300     /* Free up any indirection buffers we allocated for DMA purposes. */
 301     if (SCpnt->use_sg) {
 302         struct scatterlist * sgpnt;
 303         int i;
 304         sgpnt = (struct scatterlist *) SCpnt->buffer;
 305         for(i=0; i<SCpnt->use_sg; i++) {
 306 #ifdef DEBUG
 307             printk("err: %x %x %d\n",SCpnt->request.buffer, SCpnt->buffer,
 308                    SCpnt->bufflen);
 309 #endif
 310             if (sgpnt[i].alt_address) {
 311                 scsi_free(sgpnt[i].address, sgpnt[i].length);
 312             };
 313         };
 314         scsi_free(SCpnt->buffer, SCpnt->sglist_len);  /* Free list of scatter-gather pointers */
 315     } else {
 316 #ifdef DEBUG
 317         printk("nosgerr: %x %x %d\n",SCpnt->request.buffer, SCpnt->buffer,
 318                SCpnt->bufflen);
 319 #endif
 320         if (SCpnt->buffer != SCpnt->request.buffer)
 321             scsi_free(SCpnt->buffer, SCpnt->bufflen);
 322     };
 323     
 324     /*
 325      * Now, if we were good little boys and girls, Santa left us a request
 326      * sense buffer.  We can extract information from this, so we
 327      * can choose a block to remap, etc.
 328      */
 329 
 330     if (driver_byte(result) != 0) {
 331         if (suggestion(result) == SUGGEST_REMAP) {
 332 #ifdef REMAP
 333             /*
 334              * Not yet implemented.  A read will fail after being remapped,
 335              * a write will call the strategy routine again.
 336              */
 337             if rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].remap
 338             {
 339                 result = 0;
 340             }
 341             else
 342 #endif
 343         }
 344         
 345         if ((SCpnt->sense_buffer[0] & 0x7f) == 0x70) {
 346             if ((SCpnt->sense_buffer[2] & 0xf) == UNIT_ATTENTION) {
 347                 if(rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].device->removable) {
 348                     /* detected disc change.  set a bit and quietly refuse
 349                      * further access.
 350                      */  
 351                     rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].device->changed = 1;
 352                     SCpnt = end_scsi_request(SCpnt, 0, this_count);
 353                     requeue_sd_request(SCpnt);
 354                     return;
 355                 }
 356                 else
 357                 {
 358                     /*
 359                      * Must have been a power glitch, or a bus reset.
 360                      * Could not have been a media change, so we just retry
 361                      * the request and see what happens.
 362                      */
 363                     requeue_sd_request(SCpnt);
 364                     return;
 365                 }
 366             }
 367         }
 368         
 369         
 370         /* If we had an ILLEGAL REQUEST returned, then we may have
 371          * performed an unsupported command.  The only thing this should be 
 372          * would be a ten byte read where only a six byte read was supported.
 373          * Also, on a system where READ CAPACITY failed, we have have read 
 374          * past the end of the disk. 
 375          */
 376 
 377         if (SCpnt->sense_buffer[2] == ILLEGAL_REQUEST) {
 378             if (rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].ten) {
 379                 rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].ten = 0;
 380                 requeue_sd_request(SCpnt);
 381                 result = 0;
 382             } else {
 383                 /* ???? */
 384             }
 385         }
 386     }  /* driver byte != 0 */
 387     if (result) {
 388         printk("SCSI disk error : host %d channel %d id %d lun %d return code = %x\n",
 389                rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].device->host->host_no,
 390                rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].device->channel,
 391            rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].device->id,
 392              rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].device->lun, result);
 393         
 394         if (driver_byte(result) & DRIVER_SENSE)
 395             print_sense("sd", SCpnt);
 396         SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.current_nr_sectors);
 397         requeue_sd_request(SCpnt);
 398         return;
 399     }
 400 }
 401 
 402 /*
 403  * requeue_sd_request() is the request handler function for the sd driver.
 404  * Its function in life is to take block device requests, and translate
 405  * them to SCSI commands.
 406  */
 407 
 408 static void do_sd_request (void)
     /* [previous][next][first][last][top][bottom][index][help] */
 409 {
 410     Scsi_Cmnd * SCpnt = NULL;
 411     Scsi_Device * SDev;
 412     struct request * req = NULL;
 413     unsigned long flags;
 414     int flag = 0;
 415     
 416     save_flags(flags);
 417     while (1==1){
 418         cli();
 419         if (CURRENT != NULL && CURRENT->rq_status == RQ_INACTIVE) {
 420             restore_flags(flags);
 421             return;
 422         };
 423         
 424         INIT_SCSI_REQUEST;
 425         SDev = rscsi_disks[DEVICE_NR(CURRENT->rq_dev)].device;
 426         
 427         /*
 428          * I am not sure where the best place to do this is.  We need
 429          * to hook in a place where we are likely to come if in user
 430          * space.
 431          */
 432         if( SDev->was_reset )
 433         {
 434             /*
 435              * We need to relock the door, but we might
 436              * be in an interrupt handler.  Only do this
 437              * from user space, since we do not want to
 438              * sleep from an interrupt.
 439              */
 440             if( SDev->removable && !intr_count )
 441             {
 442                 scsi_ioctl(SDev, SCSI_IOCTL_DOORLOCK, 0);
 443             }
 444             SDev->was_reset = 0;
 445         }
 446                 
 447         /* We have to be careful here. allocate_device will get a free pointer,
 448          * but there is no guarantee that it is queueable.  In normal usage, 
 449          * we want to call this, because other types of devices may have the 
 450          * host all tied up, and we want to make sure that we have at least 
 451          * one request pending for this type of device. We can also come 
 452          * through here while servicing an interrupt, because of the need to 
 453          * start another command. If we call allocate_device more than once, 
 454          * then the system can wedge if the command is not queueable. The 
 455          * request_queueable function is safe because it checks to make sure 
 456          * that the host is able to take another command before it returns
 457          * a pointer.  
 458          */
 459 
 460         if (flag++ == 0)
 461             SCpnt = allocate_device(&CURRENT,
 462                                     rscsi_disks[DEVICE_NR(CURRENT->rq_dev)].device, 0); 
 463         else SCpnt = NULL;
 464         
 465         /*
 466          * The following restore_flags leads to latency problems.  FIXME.
 467          * Using a "sti()" gets rid of the latency problems but causes
 468          * race conditions and crashes.
 469          */
 470         restore_flags(flags);
 471 
 472         /* This is a performance enhancement. We dig down into the request 
 473          * list and try and find a queueable request (i.e. device not busy, 
 474          * and host able to accept another command. If we find one, then we 
 475          * queue it. This can make a big difference on systems with more than 
 476          * one disk drive.  We want to have the interrupts off when monkeying 
 477          * with the request list, because otherwise the kernel might try and 
 478          * slip in a request in between somewhere. 
 479          */
 480 
 481         if (!SCpnt && sd_template.nr_dev > 1){
 482             struct request *req1;
 483             req1 = NULL;
 484             cli();
 485             req = CURRENT;
 486             while(req){
 487                 SCpnt = request_queueable(req, rscsi_disks[DEVICE_NR(req->rq_dev)].device);
 488                 if(SCpnt) break;
 489                 req1 = req;
 490                 req = req->next;
 491             };
 492             if (SCpnt && req->rq_status == RQ_INACTIVE) {
 493                 if (req == CURRENT) 
 494                     CURRENT = CURRENT->next;
 495                 else
 496                     req1->next = req->next;
 497             };
 498             restore_flags(flags);
 499         };
 500         
 501         if (!SCpnt) return; /* Could not find anything to do */
 502         
 503         /* Queue command */
 504         requeue_sd_request(SCpnt);
 505     };  /* While */
 506 }    
 507 
 508 static void requeue_sd_request (Scsi_Cmnd * SCpnt)
     /* [previous][next][first][last][top][bottom][index][help] */
 509 {
 510     int dev, devm, block, this_count;
 511     unsigned char cmd[10];
 512     int bounce_size, contiguous;
 513     int max_sg;
 514     struct buffer_head * bh, *bhp;
 515     char * buff, *bounce_buffer;
 516     
 517  repeat:
 518     
 519     if(!SCpnt || SCpnt->request.rq_status == RQ_INACTIVE) {
 520         do_sd_request();
 521         return;
 522     }
 523     
 524     devm =  MINOR(SCpnt->request.rq_dev);
 525     dev = DEVICE_NR(SCpnt->request.rq_dev);
 526 
 527     block = SCpnt->request.sector;
 528     this_count = 0;
 529 
 530 #ifdef DEBUG
 531     printk("Doing sd request, dev = %d, block = %d\n", devm, block);
 532 #endif
 533     
 534     if (devm >= (sd_template.dev_max << 4) || 
 535         !rscsi_disks[dev].device ||
 536         block + SCpnt->request.nr_sectors > sd[devm].nr_sects)
 537     {
 538         SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
 539         goto repeat;
 540     }
 541     
 542     block += sd[devm].start_sect;
 543     
 544     if (rscsi_disks[dev].device->changed)
 545     {
 546         /*
 547          * quietly refuse to do anything to a changed disc until the changed 
 548          * bit has been reset
 549          */
 550         /* printk("SCSI disk has been changed. Prohibiting further I/O.\n"); */
 551         SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
 552         goto repeat;
 553     }
 554     
 555 #ifdef DEBUG
 556     printk("sd%c : real dev = /dev/sd%c, block = %d\n", 
 557            'a' + devm, dev, block);
 558 #endif
 559     
 560     /*
 561      * If we have a 1K hardware sectorsize, prevent access to single
 562      * 512 byte sectors.  In theory we could handle this - in fact
 563      * the scsi cdrom driver must be able to handle this because
 564      * we typically use 1K blocksizes, and cdroms typically have
 565      * 2K hardware sectorsizes.  Of course, things are simpler
 566      * with the cdrom, since it is read-only.  For performance
 567      * reasons, the filesystems should be able to handle this
 568      * and not force the scsi disk driver to use bounce buffers
 569      * for this.
 570      */
 571     if (rscsi_disks[dev].sector_size == 1024)
 572         if((block & 1) || (SCpnt->request.nr_sectors & 1)) {
 573             printk("sd.c:Bad block number requested");
 574             SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
 575             goto repeat;
 576         }
 577     
 578     switch (SCpnt->request.cmd)
 579     {
 580     case WRITE :
 581         if (!rscsi_disks[dev].device->writeable)
 582         {
 583             SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
 584             goto repeat;
 585         }
 586         cmd[0] = WRITE_6;
 587         break;
 588     case READ :
 589         cmd[0] = READ_6;
 590         break;
 591     default :
 592         panic ("Unknown sd command %d\n", SCpnt->request.cmd);
 593     }
 594     
 595     SCpnt->this_count = 0;
 596     
 597     /* If the host adapter can deal with very large scatter-gather
 598      * requests, it is a waste of time to cluster 
 599      */
 600     contiguous = (!CLUSTERABLE_DEVICE(SCpnt) ? 0 :1);
 601     bounce_buffer = NULL;
 602     bounce_size = (SCpnt->request.nr_sectors << 9);
 603     
 604     /* First see if we need a bounce buffer for this request. If we do, make 
 605      * sure that we can allocate a buffer. Do not waste space by allocating 
 606      * a bounce buffer if we are straddling the 16Mb line 
 607      */ 
 608     if (contiguous && SCpnt->request.bh &&
 609         ((long) SCpnt->request.bh->b_data) 
 610         + (SCpnt->request.nr_sectors << 9) - 1 > ISA_DMA_THRESHOLD 
 611         && SCpnt->host->unchecked_isa_dma) {
 612         if(((long) SCpnt->request.bh->b_data) > ISA_DMA_THRESHOLD)
 613             bounce_buffer = (char *) scsi_malloc(bounce_size);
 614         if(!bounce_buffer) contiguous = 0;
 615     };
 616     
 617     if(contiguous && SCpnt->request.bh && SCpnt->request.bh->b_reqnext)
 618         for(bh = SCpnt->request.bh, bhp = bh->b_reqnext; bhp; bh = bhp, 
 619             bhp = bhp->b_reqnext) {
 620             if(!CONTIGUOUS_BUFFERS(bh,bhp)) { 
 621                 if(bounce_buffer) scsi_free(bounce_buffer, bounce_size);
 622                 contiguous = 0;
 623                 break;
 624             } 
 625         };
 626     if (!SCpnt->request.bh || contiguous) {
 627         
 628         /* case of page request (i.e. raw device), or unlinked buffer */
 629         this_count = SCpnt->request.nr_sectors;
 630         buff = SCpnt->request.buffer;
 631         SCpnt->use_sg = 0;
 632         
 633     } else if (SCpnt->host->sg_tablesize == 0 ||
 634                (need_isa_buffer && dma_free_sectors <= 10)) {
 635         
 636         /* Case of host adapter that cannot scatter-gather.  We also
 637          * come here if we are running low on DMA buffer memory.  We set
 638          * a threshold higher than that we would need for this request so
 639          * we leave room for other requests.  Even though we would not need
 640          * it all, we need to be conservative, because if we run low enough
 641          * we have no choice but to panic. 
 642          */
 643         if (SCpnt->host->sg_tablesize != 0 &&
 644             need_isa_buffer && 
 645             dma_free_sectors <= 10)
 646             printk("Warning: SCSI DMA buffer space running low.  Using non scatter-gather I/O.\n");
 647         
 648         this_count = SCpnt->request.current_nr_sectors;
 649         buff = SCpnt->request.buffer;
 650         SCpnt->use_sg = 0;
 651         
 652     } else {
 653         
 654         /* Scatter-gather capable host adapter */
 655         struct scatterlist * sgpnt;
 656         int count, this_count_max;
 657         int counted;
 658         
 659         bh = SCpnt->request.bh;
 660         this_count = 0;
 661         this_count_max = (rscsi_disks[dev].ten ? 0xffff : 0xff);
 662         count = 0;
 663         bhp = NULL;
 664         while(bh) {
 665             if ((this_count + (bh->b_size >> 9)) > this_count_max) break;
 666             if(!bhp || !CONTIGUOUS_BUFFERS(bhp,bh) ||
 667                !CLUSTERABLE_DEVICE(SCpnt) ||
 668                (SCpnt->host->unchecked_isa_dma &&
 669                 ((unsigned long) bh->b_data-1) == ISA_DMA_THRESHOLD)) {
 670                 if (count < SCpnt->host->sg_tablesize) count++;
 671                 else break;
 672             };
 673             this_count += (bh->b_size >> 9);
 674             bhp = bh;
 675             bh = bh->b_reqnext;
 676         };
 677 #if 0
 678         if(SCpnt->host->unchecked_isa_dma &&
 679            ((unsigned int) SCpnt->request.bh->b_data-1) == ISA_DMA_THRESHOLD) count--;
 680 #endif
 681         SCpnt->use_sg = count;  /* Number of chains */
 682         count = 512;/* scsi_malloc can only allocate in chunks of 512 bytes */
 683         while( count < (SCpnt->use_sg * sizeof(struct scatterlist))) 
 684             count = count << 1;
 685         SCpnt->sglist_len = count;
 686         max_sg = count / sizeof(struct scatterlist);
 687         if(SCpnt->host->sg_tablesize < max_sg) 
 688             max_sg = SCpnt->host->sg_tablesize;
 689         sgpnt = (struct scatterlist * ) scsi_malloc(count);
 690         if (!sgpnt) {
 691             printk("Warning - running *really* short on DMA buffers\n");
 692             SCpnt->use_sg = 0;    /* No memory left - bail out */
 693             this_count = SCpnt->request.current_nr_sectors;
 694             buff = SCpnt->request.buffer;
 695         } else {
 696             memset(sgpnt, 0, count);  /* Zero so it is easy to fill, but only
 697                                        * if memory is available 
 698                                        */
 699             buff = (char *) sgpnt;
 700             counted = 0;
 701             for(count = 0, bh = SCpnt->request.bh, bhp = bh->b_reqnext;
 702                 count < SCpnt->use_sg && bh; 
 703                 count++, bh = bhp) {
 704                 
 705                 bhp = bh->b_reqnext;
 706                 
 707                 if(!sgpnt[count].address) sgpnt[count].address = bh->b_data;
 708                 sgpnt[count].length += bh->b_size;
 709                 counted += bh->b_size >> 9;
 710                 
 711                 if (((long) sgpnt[count].address) + sgpnt[count].length - 1 > 
 712                     ISA_DMA_THRESHOLD && (SCpnt->host->unchecked_isa_dma) &&
 713                     !sgpnt[count].alt_address) {
 714                     sgpnt[count].alt_address = sgpnt[count].address;
 715                     /* We try and avoid exhausting the DMA pool, since it is 
 716                      * easier to control usage here. In other places we might 
 717                      * have a more pressing need, and we would be screwed if 
 718                      * we ran out */
 719                     if(dma_free_sectors < (sgpnt[count].length >> 9) + 10) {
 720                         sgpnt[count].address = NULL;
 721                     } else {
 722                         sgpnt[count].address = 
 723                             (char *) scsi_malloc(sgpnt[count].length);
 724                     };
 725                     /* If we start running low on DMA buffers, we abort the 
 726                      * scatter-gather operation, and free all of the memory 
 727                      * we have allocated.  We want to ensure that all scsi 
 728                      * operations are able to do at least a non-scatter/gather
 729                      * operation */
 730                     if(sgpnt[count].address == NULL){ /* Out of dma memory */
 731 #if 0
 732                         printk("Warning: Running low on SCSI DMA buffers");
 733                         /* Try switching back to a non s-g operation. */
 734                         while(--count >= 0){
 735                             if(sgpnt[count].alt_address) 
 736                                 scsi_free(sgpnt[count].address, 
 737                                           sgpnt[count].length);
 738                         };
 739                         this_count = SCpnt->request.current_nr_sectors;
 740                         buff = SCpnt->request.buffer;
 741                         SCpnt->use_sg = 0;
 742                         scsi_free(sgpnt, SCpnt->sglist_len);
 743 #endif
 744                         SCpnt->use_sg = count;
 745                         this_count = counted -= bh->b_size >> 9;
 746                         break;
 747                     };
 748                     
 749                 };
 750                 
 751                 /* Only cluster buffers if we know that we can supply DMA 
 752                  * buffers large enough to satisfy the request. Do not cluster
 753                  * a new request if this would mean that we suddenly need to 
 754                  * start using DMA bounce buffers */
 755                 if(bhp && CONTIGUOUS_BUFFERS(bh,bhp) 
 756                    && CLUSTERABLE_DEVICE(SCpnt)) {
 757                     char * tmp;
 758                     
 759                     if (((long) sgpnt[count].address) + sgpnt[count].length +
 760                         bhp->b_size - 1 > ISA_DMA_THRESHOLD && 
 761                         (SCpnt->host->unchecked_isa_dma) &&
 762                         !sgpnt[count].alt_address) continue;
 763                     
 764                     if(!sgpnt[count].alt_address) {count--; continue; }
 765                     if(dma_free_sectors > 10)
 766                         tmp = (char *) scsi_malloc(sgpnt[count].length 
 767                                                    + bhp->b_size);
 768                     else {
 769                         tmp = NULL;
 770                         max_sg = SCpnt->use_sg;
 771                     };
 772                     if(tmp){
 773                         scsi_free(sgpnt[count].address, sgpnt[count].length);
 774                         sgpnt[count].address = tmp;
 775                         count--;
 776                         continue;
 777                     };
 778                     
 779                     /* If we are allowed another sg chain, then increment 
 780                      * counter so we can insert it.  Otherwise we will end 
 781                      up truncating */
 782                     
 783                     if (SCpnt->use_sg < max_sg) SCpnt->use_sg++;
 784                 };  /* contiguous buffers */
 785             }; /* for loop */
 786             
 787             /* This is actually how many we are going to transfer */
 788             this_count = counted; 
 789             
 790             if(count < SCpnt->use_sg || SCpnt->use_sg 
 791                > SCpnt->host->sg_tablesize){
 792                 bh = SCpnt->request.bh;
 793                 printk("Use sg, count %d %x %d\n", 
 794                        SCpnt->use_sg, count, dma_free_sectors);
 795                 printk("maxsg = %x, counted = %d this_count = %d\n", 
 796                        max_sg, counted, this_count);
 797                 while(bh){
 798                     printk("[%p %lx] ", bh->b_data, bh->b_size);
 799                     bh = bh->b_reqnext;
 800                 };
 801                 if(SCpnt->use_sg < 16)
 802                     for(count=0; count<SCpnt->use_sg; count++)
 803                         printk("{%d:%p %p %d}  ", count,
 804                                sgpnt[count].address,
 805                                sgpnt[count].alt_address,
 806                                sgpnt[count].length);
 807                 panic("Ooops");
 808             };
 809             
 810             if (SCpnt->request.cmd == WRITE)
 811                 for(count=0; count<SCpnt->use_sg; count++)
 812                     if(sgpnt[count].alt_address)
 813                         memcpy(sgpnt[count].address, sgpnt[count].alt_address, 
 814                                sgpnt[count].length);
 815         };  /* Able to malloc sgpnt */
 816     };  /* Host adapter capable of scatter-gather */
 817     
 818     /* Now handle the possibility of DMA to addresses > 16Mb */
 819     
 820     if(SCpnt->use_sg == 0){
 821         if (((long) buff) + (this_count << 9) - 1 > ISA_DMA_THRESHOLD && 
 822             (SCpnt->host->unchecked_isa_dma)) {
 823             if(bounce_buffer)
 824                 buff = bounce_buffer;
 825             else
 826                 buff = (char *) scsi_malloc(this_count << 9);
 827             if(buff == NULL) {  /* Try backing off a bit if we are low on mem*/
 828                 this_count = SCpnt->request.current_nr_sectors;
 829                 buff = (char *) scsi_malloc(this_count << 9);
 830                 if(!buff) panic("Ran out of DMA buffers.");
 831             };
 832             if (SCpnt->request.cmd == WRITE)
 833                 memcpy(buff, (char *)SCpnt->request.buffer, this_count << 9);
 834         };
 835     };
 836 #ifdef DEBUG
 837     printk("sd%c : %s %d/%d 512 byte blocks.\n", 
 838            'a' + devm,
 839            (SCpnt->request.cmd == WRITE) ? "writing" : "reading",
 840            this_count, SCpnt->request.nr_sectors);
 841 #endif
 842     
 843     cmd[1] = (SCpnt->lun << 5) & 0xe0;
 844     
 845     if (rscsi_disks[dev].sector_size == 1024){
 846         if(block & 1) panic("sd.c:Bad block number requested");
 847         if(this_count & 1) panic("sd.c:Bad block number requested");
 848         block = block >> 1;
 849         this_count = this_count >> 1;
 850     };
 851     
 852     if (rscsi_disks[dev].sector_size == 256){
 853         block = block << 1;
 854         this_count = this_count << 1;
 855     };
 856     
 857     if (((this_count > 0xff) ||  (block > 0x1fffff)) && rscsi_disks[dev].ten)
 858     {
 859         if (this_count > 0xffff)
 860             this_count = 0xffff;
 861         
 862         cmd[0] += READ_10 - READ_6 ;
 863         cmd[2] = (unsigned char) (block >> 24) & 0xff;
 864         cmd[3] = (unsigned char) (block >> 16) & 0xff;
 865         cmd[4] = (unsigned char) (block >> 8) & 0xff;
 866         cmd[5] = (unsigned char) block & 0xff;
 867         cmd[6] = cmd[9] = 0;
 868         cmd[7] = (unsigned char) (this_count >> 8) & 0xff;
 869         cmd[8] = (unsigned char) this_count & 0xff;
 870     }
 871     else
 872     {
 873         if (this_count > 0xff)
 874             this_count = 0xff;
 875         
 876         cmd[1] |= (unsigned char) ((block >> 16) & 0x1f);
 877         cmd[2] = (unsigned char) ((block >> 8) & 0xff);
 878         cmd[3] = (unsigned char) block & 0xff;
 879         cmd[4] = (unsigned char) this_count;
 880         cmd[5] = 0;
 881     }
 882     
 883     /*
 884      * We shouldn't disconnect in the middle of a sector, so with a dumb 
 885      * host adapter, it's safe to assume that we can at least transfer 
 886      * this many bytes between each connect / disconnect.  
 887      */
 888     
 889     SCpnt->transfersize = rscsi_disks[dev].sector_size;
 890     SCpnt->underflow = this_count << 9; 
 891     scsi_do_cmd (SCpnt, (void *) cmd, buff, 
 892                  this_count * rscsi_disks[dev].sector_size,
 893                  rw_intr, 
 894                  (SCpnt->device->type == TYPE_DISK ? 
 895                   SD_TIMEOUT : SD_MOD_TIMEOUT),
 896                  MAX_RETRIES);
 897 }
 898 
 899 static int check_scsidisk_media_change(kdev_t full_dev){
     /* [previous][next][first][last][top][bottom][index][help] */
 900     int retval;
 901     int target;
 902     struct inode inode;
 903     int flag = 0;
 904     
 905     target =  DEVICE_NR(full_dev);
 906     
 907     if (target >= sd_template.dev_max ||
 908         !rscsi_disks[target].device) {
 909         printk("SCSI disk request error: invalid device.\n");
 910         return 0;
 911     };
 912     
 913     if(!rscsi_disks[target].device->removable) return 0;
 914     
 915     inode.i_rdev = full_dev;  /* This is all we really need here */
 916     retval = sd_ioctl(&inode, NULL, SCSI_IOCTL_TEST_UNIT_READY, 0);
 917     
 918     if(retval){ /* Unable to test, unit probably not ready.  This usually
 919                  * means there is no disc in the drive.  Mark as changed,
 920                  * and we will figure it out later once the drive is
 921                  * available again.  */
 922         
 923         rscsi_disks[target].ready = 0;
 924         rscsi_disks[target].device->changed = 1;
 925         return 1; /* This will force a flush, if called from
 926                    * check_disk_change */
 927     };
 928     
 929     /* 
 930      * for removable scsi disk ( FLOPTICAL ) we have to recognise the
 931      * presence of disk in the drive. This is kept in the Scsi_Disk
 932      * struct and tested at open !  Daniel Roche ( dan@lectra.fr ) 
 933      */
 934     
 935     rscsi_disks[target].ready = 1;      /* FLOPTICAL */
 936 
 937     retval = rscsi_disks[target].device->changed;
 938     if(!flag) rscsi_disks[target].device->changed = 0;
 939     return retval;
 940 }
 941 
 942 static void sd_init_done (Scsi_Cmnd * SCpnt)
     /* [previous][next][first][last][top][bottom][index][help] */
 943 {
 944     struct request * req;
 945     
 946     req = &SCpnt->request;
 947     req->rq_status = RQ_SCSI_DONE; /* Busy, but indicate request done */
 948     
 949     if (req->sem != NULL) {
 950         up(req->sem);
 951     }
 952 }
 953 
 954 static int sd_init_onedisk(int i)
     /* [previous][next][first][last][top][bottom][index][help] */
 955 {
 956     unsigned char cmd[10];
 957     unsigned char *buffer;
 958     unsigned long spintime;
 959     int the_result, retries;
 960     Scsi_Cmnd * SCpnt;
 961     
 962     /* We need to retry the READ_CAPACITY because a UNIT_ATTENTION is 
 963      * considered a fatal error, and many devices report such an error 
 964      * just after a scsi bus reset. 
 965      */
 966     
 967     SCpnt = allocate_device(NULL, rscsi_disks[i].device, 1);
 968     buffer = (unsigned char *) scsi_malloc(512);
 969     
 970     spintime = 0;
 971     
 972     /* Spin up drives, as required.  Only do this at boot time */
 973     if (!MODULE_FLAG){
 974         do{
 975             retries = 0;
 976             while(retries < 3)
 977             {
 978                 cmd[0] = TEST_UNIT_READY;
 979                 cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0;
 980                 memset ((void *) &cmd[2], 0, 8);
 981                 SCpnt->cmd_len = 0;
 982                 SCpnt->sense_buffer[0] = 0;
 983                 SCpnt->sense_buffer[2] = 0;
 984 
 985                 {
 986                     struct semaphore sem = MUTEX_LOCKED;
 987                     /* Mark as really busy again */
 988                     SCpnt->request.rq_status = RQ_SCSI_BUSY;
 989                     SCpnt->request.sem = &sem;
 990                     scsi_do_cmd (SCpnt,
 991                                  (void *) cmd, (void *) buffer,
 992                                  512, sd_init_done,  SD_TIMEOUT,
 993                                  MAX_RETRIES);
 994                     down(&sem);
 995                 }
 996 
 997                 the_result = SCpnt->result;
 998                 retries++;
 999                 if(   the_result == 0
1000                    || SCpnt->sense_buffer[2] != UNIT_ATTENTION)
1001                     break;
1002             }
1003             
1004             /* Look for non-removable devices that return NOT_READY.  
1005              * Issue command to spin up drive for these cases. */
1006             if(the_result && !rscsi_disks[i].device->removable && 
1007                SCpnt->sense_buffer[2] == NOT_READY) {
1008                 int time1;
1009                 if(!spintime){
1010                     printk( "sd%c: Spinning up disk...", 'a' + i );
1011                     cmd[0] = START_STOP;
1012                     cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0;
1013                     cmd[1] |= 1;  /* Return immediately */
1014                     memset ((void *) &cmd[2], 0, 8);
1015                     cmd[4] = 1; /* Start spin cycle */
1016                     SCpnt->cmd_len = 0;
1017                     SCpnt->sense_buffer[0] = 0;
1018                     SCpnt->sense_buffer[2] = 0;
1019                     
1020                     {
1021                         struct semaphore sem = MUTEX_LOCKED;
1022                         /* Mark as really busy again */
1023                         SCpnt->request.rq_status = RQ_SCSI_BUSY; 
1024                         SCpnt->request.sem = &sem;
1025                         scsi_do_cmd (SCpnt,
1026                                      (void *) cmd, (void *) buffer,
1027                                      512, sd_init_done,  SD_TIMEOUT,
1028                                      MAX_RETRIES);
1029                         down(&sem);
1030                     }
1031                     
1032                     spintime = jiffies;
1033                 }
1034                 
1035                 time1 = jiffies;
1036                 while(jiffies < time1 + HZ); /* Wait 1 second for next try */
1037                 printk( "." );
1038             };
1039         } while(the_result && spintime && spintime+100*HZ > jiffies);
1040         if (spintime) {
1041             if (the_result)
1042                 printk( "not responding...\n" );
1043             else
1044                 printk( "ready\n" );
1045         }
1046     };  /* !MODULE_FLAG */
1047     
1048     
1049     retries = 3;
1050     do {
1051         cmd[0] = READ_CAPACITY;
1052         cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0;
1053         memset ((void *) &cmd[2], 0, 8);
1054         memset ((void *) buffer, 0, 8);
1055         SCpnt->cmd_len = 0;
1056         SCpnt->sense_buffer[0] = 0;
1057         SCpnt->sense_buffer[2] = 0;
1058 
1059         {
1060             struct semaphore sem = MUTEX_LOCKED;
1061             /* Mark as really busy again */
1062             SCpnt->request.rq_status = RQ_SCSI_BUSY;
1063             SCpnt->request.sem = &sem;
1064             scsi_do_cmd (SCpnt,
1065                          (void *) cmd, (void *) buffer,
1066                          8, sd_init_done,  SD_TIMEOUT,
1067                          MAX_RETRIES);
1068             down(&sem); /* sleep until it is ready */
1069         }
1070         
1071         the_result = SCpnt->result;
1072         retries--;
1073         
1074     } while(the_result && retries);
1075     
1076     SCpnt->request.rq_status = RQ_INACTIVE;  /* Mark as not busy */
1077     
1078     wake_up(&SCpnt->device->device_wait); 
1079     
1080     /* Wake up a process waiting for device */
1081     
1082     /*
1083      * The SCSI standard says: 
1084      * "READ CAPACITY is necessary for self configuring software"
1085      *  While not mandatory, support of READ CAPACITY is strongly encouraged.
1086      *  We used to die if we couldn't successfully do a READ CAPACITY.
1087      *  But, now we go on about our way.  The side effects of this are
1088      *
1089      *  1. We can't know block size with certainty. I have said "512 bytes 
1090      *     is it" as this is most common.
1091      *
1092      *  2. Recovery from when some one attempts to read past the end of the 
1093      *     raw device will be slower.
1094      */
1095     
1096     if (the_result)
1097     {
1098         printk ("sd%c : READ CAPACITY failed.\n"
1099                 "sd%c : status = %x, message = %02x, host = %d, driver = %02x \n",
1100                 'a' + i, 'a' + i,
1101                 status_byte(the_result),
1102                 msg_byte(the_result),
1103                 host_byte(the_result),
1104                 driver_byte(the_result)
1105                 );
1106         if (driver_byte(the_result)  & DRIVER_SENSE)
1107             printk("sd%c : extended sense code = %1x \n", 
1108                    'a' + i, SCpnt->sense_buffer[2] & 0xf);
1109         else
1110             printk("sd%c : sense not available. \n", 'a' + i);
1111         
1112         printk("sd%c : block size assumed to be 512 bytes, disk size 1GB.  \n",
1113                'a' + i);
1114         rscsi_disks[i].capacity = 0x1fffff;
1115         rscsi_disks[i].sector_size = 512;
1116         
1117         /* Set dirty bit for removable devices if not ready - sometimes drives
1118          * will not report this properly. */
1119         if(rscsi_disks[i].device->removable && 
1120            SCpnt->sense_buffer[2] == NOT_READY)
1121             rscsi_disks[i].device->changed = 1;
1122         
1123     }
1124     else
1125     {
1126         /*
1127          * FLOPTICAL , if read_capa is ok , drive is assumed to be ready 
1128          */
1129         rscsi_disks[i].ready = 1;
1130 
1131         rscsi_disks[i].capacity = (buffer[0] << 24) |
1132             (buffer[1] << 16) |
1133                 (buffer[2] << 8) |
1134                     buffer[3];
1135         
1136         rscsi_disks[i].sector_size = (buffer[4] << 24) |
1137             (buffer[5] << 16) | (buffer[6] << 8) | buffer[7];
1138         
1139         if (rscsi_disks[i].sector_size != 512 &&
1140             rscsi_disks[i].sector_size != 1024 &&
1141             rscsi_disks[i].sector_size != 256)
1142         {
1143             printk ("sd%c : unsupported sector size %d.\n",
1144                     'a' + i, rscsi_disks[i].sector_size);
1145             if(rscsi_disks[i].device->removable){
1146                 rscsi_disks[i].capacity = 0;
1147             } else {
1148                 printk ("scsi : deleting disk entry.\n");
1149                 rscsi_disks[i].device = NULL;
1150                 sd_template.nr_dev--;
1151                 return i;
1152             };
1153         }
1154     {
1155         /*
1156          * The msdos fs need to know the hardware sector size
1157          * So I have created this table. See ll_rw_blk.c
1158          * Jacques Gelinas (Jacques@solucorp.qc.ca)
1159          */
1160         int m;
1161         int hard_sector = rscsi_disks[i].sector_size;
1162         /* There is 16 minor allocated for each devices */
1163         for (m=i<<4; m<((i+1)<<4); m++){
1164             sd_hardsizes[m] = hard_sector;
1165         }
1166         printk ("SCSI Hardware sector size is %d bytes on device sd%c\n",
1167                 hard_sector,i+'a');
1168     }
1169         if(rscsi_disks[i].sector_size == 1024)
1170             rscsi_disks[i].capacity <<= 1;  /* Change into 512 byte sectors */
1171         if(rscsi_disks[i].sector_size == 256)
1172             rscsi_disks[i].capacity >>= 1;  /* Change into 512 byte sectors */
1173     }
1174     
1175 
1176     /*
1177      * Unless otherwise specified, this is not write protected.
1178      */
1179     rscsi_disks[i].write_prot = 0;
1180     if ( rscsi_disks[i].device->removable && rscsi_disks[i].ready ) {
1181         /* FLOPTICAL */
1182 
1183         /* 
1184          *      for removable scsi disk ( FLOPTICAL ) we have to recognise  
1185          * the Write Protect Flag. This flag is kept in the Scsi_Disk struct
1186          * and tested at open !
1187          * Daniel Roche ( dan@lectra.fr )
1188          */
1189         
1190         memset ((void *) &cmd[0], 0, 8);
1191         cmd[0] = MODE_SENSE;
1192         cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0;
1193         cmd[2] = 1;      /* page code 1 ?? */
1194         cmd[4] = 12;
1195         SCpnt->cmd_len = 0;
1196         SCpnt->sense_buffer[0] = 0;
1197         SCpnt->sense_buffer[2] = 0;
1198 
1199         /* same code as READCAPA !! */
1200         {
1201             struct semaphore sem = MUTEX_LOCKED;
1202             SCpnt->request.rq_status = RQ_SCSI_BUSY;  /* Mark as really busy again */
1203             SCpnt->request.sem = &sem;
1204             scsi_do_cmd (SCpnt,
1205                          (void *) cmd, (void *) buffer,
1206                          512, sd_init_done,  SD_TIMEOUT,
1207                          MAX_RETRIES);
1208             down(&sem);
1209         }
1210         
1211         the_result = SCpnt->result;
1212         SCpnt->request.rq_status = RQ_INACTIVE;  /* Mark as not busy */
1213         wake_up(&SCpnt->device->device_wait); 
1214         
1215         if ( the_result ) {
1216             printk ("sd%c: test WP failed, assume Write Protected\n",i+'a');
1217             rscsi_disks[i].write_prot = 1;
1218         } else {
1219             rscsi_disks[i].write_prot = ((buffer[2] & 0x80) != 0);
1220             printk ("sd%c: Write Protect is %s\n",i+'a',
1221                     rscsi_disks[i].write_prot ? "on" : "off");
1222         }
1223         
1224     }   /* check for write protect */
1225  
1226     rscsi_disks[i].ten = 1;
1227     rscsi_disks[i].remap = 1;
1228     scsi_free(buffer, 512);
1229     return i;
1230 }
1231 
1232 /*
1233  * The sd_init() function looks at all SCSI drives present, determines
1234  * their size, and reads partition table entries for them.
1235  */
1236 
1237 static int sd_registered = 0;
1238 
1239 static int sd_init()
     /* [previous][next][first][last][top][bottom][index][help] */
1240 {
1241     int i;
1242     
1243     if (sd_template.dev_noticed == 0) return 0;
1244     
1245     if(!sd_registered) {
1246           if (register_blkdev(MAJOR_NR,"sd",&sd_fops)) {
1247               printk("Unable to get major %d for SCSI disk\n",MAJOR_NR);
1248               return 1;
1249           }
1250           sd_registered++;
1251       }
1252     
1253     /* We do not support attaching loadable devices yet. */
1254     if(rscsi_disks) return 0;
1255     
1256     sd_template.dev_max = sd_template.dev_noticed + SD_EXTRA_DEVS;
1257     
1258     rscsi_disks = (Scsi_Disk *) 
1259         scsi_init_malloc(sd_template.dev_max * sizeof(Scsi_Disk), GFP_ATOMIC);
1260     memset(rscsi_disks, 0, sd_template.dev_max * sizeof(Scsi_Disk));
1261     
1262     sd_sizes = (int *) scsi_init_malloc((sd_template.dev_max << 4) * 
1263                                         sizeof(int), GFP_ATOMIC);
1264     memset(sd_sizes, 0, (sd_template.dev_max << 4) * sizeof(int));
1265     
1266     sd_blocksizes = (int *) scsi_init_malloc((sd_template.dev_max << 4) * 
1267                                              sizeof(int), GFP_ATOMIC);
1268     
1269     sd_hardsizes = (int *) scsi_init_malloc((sd_template.dev_max << 4) * 
1270                                             sizeof(int), GFP_ATOMIC);
1271     
1272     for(i=0;i<(sd_template.dev_max << 4);i++){
1273         sd_blocksizes[i] = 1024;
1274         sd_hardsizes[i] = 512;
1275     }
1276     blksize_size[MAJOR_NR] = sd_blocksizes;
1277     hardsect_size[MAJOR_NR] = sd_hardsizes;
1278     sd = (struct hd_struct *) scsi_init_malloc((sd_template.dev_max << 4) *
1279                                                sizeof(struct hd_struct),
1280                                                GFP_ATOMIC);
1281     
1282     
1283     sd_gendisk.max_nr = sd_template.dev_max;
1284     sd_gendisk.part = sd;
1285     sd_gendisk.sizes = sd_sizes;
1286     sd_gendisk.real_devices = (void *) rscsi_disks;
1287     return 0;
1288 }
1289 
1290 static void sd_finish()
     /* [previous][next][first][last][top][bottom][index][help] */
1291 {
1292     int i;
1293 
1294     blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST;
1295     
1296     sd_gendisk.next = gendisk_head;
1297     gendisk_head = &sd_gendisk;
1298     
1299     for (i = 0; i < sd_template.dev_max; ++i)
1300         if (!rscsi_disks[i].capacity && 
1301             rscsi_disks[i].device)
1302         {
1303             i = sd_init_onedisk(i);
1304             if (MODULE_FLAG
1305                 && !rscsi_disks[i].has_part_table) {
1306                 sd_sizes[i << 4] = rscsi_disks[i].capacity;
1307                 revalidate_scsidisk(MKDEV(MAJOR_NR, i << 4), 0);
1308             }
1309             rscsi_disks[i].has_part_table = 1;
1310         }
1311     
1312     /* If our host adapter is capable of scatter-gather, then we increase
1313      * the read-ahead to 16 blocks (32 sectors).  If not, we use
1314      * a two block (4 sector) read ahead. 
1315      */
1316     if(rscsi_disks[0].device && rscsi_disks[0].device->host->sg_tablesize)
1317         read_ahead[MAJOR_NR] = 120;  /* 120 sector read-ahead */
1318     else
1319         read_ahead[MAJOR_NR] = 4;  /* 4 sector read-ahead */
1320 
1321     return;
1322 }
1323 
1324 static int sd_detect(Scsi_Device * SDp){
     /* [previous][next][first][last][top][bottom][index][help] */
1325     if(SDp->type != TYPE_DISK && SDp->type != TYPE_MOD) return 0;
1326     
1327     printk("Detected scsi disk sd%c at scsi%d, channel %d, id %d, lun %d\n", 
1328            'a'+ (sd_template.dev_noticed++),
1329            SDp->host->host_no, SDp->channel, SDp->id, SDp->lun); 
1330     
1331     return 1;
1332 }
1333 
1334 static int sd_attach(Scsi_Device * SDp){
     /* [previous][next][first][last][top][bottom][index][help] */
1335     Scsi_Disk * dpnt;
1336     int i;
1337     
1338     if(SDp->type != TYPE_DISK && SDp->type != TYPE_MOD) return 0;
1339     
1340     if(sd_template.nr_dev >= sd_template.dev_max) {
1341         SDp->attached--;
1342         return 1;
1343     }
1344     
1345     for(dpnt = rscsi_disks, i=0; i<sd_template.dev_max; i++, dpnt++) 
1346         if(!dpnt->device) break;
1347     
1348     if(i >= sd_template.dev_max) panic ("scsi_devices corrupt (sd)");
1349     
1350     SDp->scsi_request_fn = do_sd_request;
1351     rscsi_disks[i].device = SDp;
1352     rscsi_disks[i].has_part_table = 0;
1353     sd_template.nr_dev++;
1354     sd_gendisk.nr_real++;
1355     return 0;
1356 }
1357 
1358 #define DEVICE_BUSY rscsi_disks[target].device->busy
1359 #define USAGE rscsi_disks[target].device->access_count
1360 #define CAPACITY rscsi_disks[target].capacity
1361 #define MAYBE_REINIT  sd_init_onedisk(target)
1362 #define GENDISK_STRUCT sd_gendisk
1363 
1364 /* This routine is called to flush all partitions and partition tables
1365  * for a changed scsi disk, and then re-read the new partition table.
1366  * If we are revalidating a disk because of a media change, then we
1367  * enter with usage == 0.  If we are using an ioctl, we automatically have
1368  * usage == 1 (we need an open channel to use an ioctl :-), so this
1369  * is our limit.
1370  */
1371 int revalidate_scsidisk(kdev_t dev, int maxusage){
     /* [previous][next][first][last][top][bottom][index][help] */
1372     int target;
1373     struct gendisk * gdev;
1374     unsigned long flags;
1375     int max_p;
1376     int start;
1377     int i;
1378     
1379     target =  DEVICE_NR(dev);
1380     gdev = &GENDISK_STRUCT;
1381     
1382     save_flags(flags);
1383     cli();
1384     if (DEVICE_BUSY || USAGE > maxusage) {
1385         restore_flags(flags);
1386         printk("Device busy for revalidation (usage=%d)\n", USAGE);
1387         return -EBUSY;
1388     };
1389     DEVICE_BUSY = 1;
1390     restore_flags(flags);
1391     
1392     max_p = gdev->max_p;
1393     start = target << gdev->minor_shift;
1394     
1395     for (i=max_p - 1; i >=0 ; i--) {
1396         int minor = start+i;
1397         kdev_t devi = MKDEV(MAJOR_NR, minor);
1398         sync_dev(devi);
1399         invalidate_inodes(devi);
1400         invalidate_buffers(devi);
1401         gdev->part[minor].start_sect = 0;
1402         gdev->part[minor].nr_sects = 0;
1403         /*
1404          * Reset the blocksize for everything so that we can read
1405          * the partition table.
1406          */
1407         blksize_size[MAJOR_NR][minor] = 1024;
1408     };
1409     
1410 #ifdef MAYBE_REINIT
1411     MAYBE_REINIT;
1412 #endif
1413     
1414     gdev->part[start].nr_sects = CAPACITY;
1415     resetup_one_dev(gdev, target);
1416     
1417     DEVICE_BUSY = 0;
1418     return 0;
1419 }
1420 
1421 static int fop_revalidate_scsidisk(kdev_t dev){
     /* [previous][next][first][last][top][bottom][index][help] */
1422     return revalidate_scsidisk(dev, 0);
1423 }
1424 
1425 
1426 static void sd_detach(Scsi_Device * SDp)
     /* [previous][next][first][last][top][bottom][index][help] */
1427 {
1428     Scsi_Disk * dpnt;
1429     int i;
1430     int max_p;
1431     int start;
1432     
1433     for(dpnt = rscsi_disks, i=0; i<sd_template.dev_max; i++, dpnt++) 
1434         if(dpnt->device == SDp) {
1435             
1436             /* If we are disconnecting a disk driver, sync and invalidate 
1437              * everything */
1438             max_p = sd_gendisk.max_p;
1439             start = i << sd_gendisk.minor_shift;
1440             
1441             for (i=max_p - 1; i >=0 ; i--) {
1442                 int minor = start+i;
1443                 kdev_t devi = MKDEV(MAJOR_NR, minor);
1444                 sync_dev(devi);
1445                 invalidate_inodes(devi);
1446                 invalidate_buffers(devi);
1447                 sd_gendisk.part[minor].start_sect = 0;
1448                 sd_gendisk.part[minor].nr_sects = 0;
1449                 sd_sizes[minor] = 0;
1450             };
1451             
1452             dpnt->has_part_table = 0;
1453             dpnt->device = NULL;
1454             dpnt->capacity = 0;
1455             SDp->attached--;
1456             sd_template.dev_noticed--;
1457             sd_template.nr_dev--;
1458             sd_gendisk.nr_real--;
1459             return;
1460         }
1461     return;
1462 }
1463 
1464 #ifdef MODULE
1465 #include <linux/module.h>
1466 #include <linux/version.h>
1467 
1468 char kernel_version[] = UTS_RELEASE;
1469 
1470 int init_module(void) {
     /* [previous][next][first][last][top][bottom][index][help] */
1471     sd_template.usage_count = &mod_use_count_;
1472     return scsi_register_module(MODULE_SCSI_DEV, &sd_template);
1473 }
1474 
1475 void cleanup_module( void) 
     /* [previous][next][first][last][top][bottom][index][help] */
1476 {
1477     struct gendisk * prev_sdgd;
1478     struct gendisk * sdgd;
1479     
1480     if (MOD_IN_USE) {
1481         printk(KERN_INFO __FILE__ ": module is in use, remove rejected\n");
1482         return;
1483     }
1484     scsi_unregister_module(MODULE_SCSI_DEV, &sd_template);
1485     unregister_blkdev(SCSI_DISK_MAJOR, "sd");
1486     sd_registered--;
1487     if( rscsi_disks != NULL )
1488     {
1489         scsi_init_free((char *) rscsi_disks,
1490                        (sd_template.dev_noticed + SD_EXTRA_DEVS) 
1491                        * sizeof(Scsi_Disk));
1492         
1493         scsi_init_free((char *) sd_sizes, sd_template.dev_max * sizeof(int));
1494         scsi_init_free((char *) sd_blocksizes, sd_template.dev_max * sizeof(int));
1495         scsi_init_free((char *) sd_hardsizes, sd_template.dev_max * sizeof(int));
1496         scsi_init_free((char *) sd, 
1497                        (sd_template.dev_max << 4) * sizeof(struct hd_struct));
1498         /*
1499          * Now remove sd_gendisk from the linked list
1500          */
1501         sdgd = gendisk_head;
1502         prev_sdgd = NULL;
1503         while(sdgd != &sd_gendisk)
1504         {
1505             prev_sdgd = sdgd;
1506             sdgd = sdgd->next;
1507         }
1508         
1509         if(sdgd != &sd_gendisk)
1510             printk("sd_gendisk not in disk chain.\n");
1511         else {
1512             if(prev_sdgd != NULL)
1513                 prev_sdgd->next = sdgd->next;
1514             else
1515                 gendisk_head = sdgd->next;
1516         }
1517     }
1518     
1519     blksize_size[MAJOR_NR] = NULL;
1520     blk_dev[MAJOR_NR].request_fn = NULL;
1521     blk_size[MAJOR_NR] = NULL;  
1522     hardsect_size[MAJOR_NR] = NULL;
1523     read_ahead[MAJOR_NR] = 0;
1524     sd_template.dev_max = 0;
1525 }
1526 #endif /* MODULE */
1527 
1528 /*
1529  * Overrides for Emacs so that we almost follow Linus's tabbing style.
1530  * Emacs will notice this stuff at the end of the file and automatically
1531  * adjust the settings for this buffer only.  This must remain at the end
1532  * of the file.
1533  * ---------------------------------------------------------------------------
1534  * Local variables:
1535  * c-indent-level: 4
1536  * c-brace-imaginary-offset: 0
1537  * c-brace-offset: -4
1538  * c-argdecl-indent: 4
1539  * c-label-offset: -4
1540  * c-continued-statement-offset: 4
1541  * c-continued-brace-offset: 0
1542  * indent-tabs-mode: nil
1543  * tab-width: 8
1544  * End:
1545  */

/* [previous][next][first][last][top][bottom][index][help] */