root/drivers/scsi/sd.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. sd_open
  2. sd_release
  3. sd_geninit
  4. rw_intr
  5. do_sd_request
  6. requeue_sd_request
  7. check_scsidisk_media_change
  8. sd_init_done
  9. sd_init_onedisk
  10. sd_init
  11. sd_finish
  12. sd_detect
  13. sd_attach
  14. revalidate_scsidisk
  15. fop_revalidate_scsidisk
  16. sd_detach
  17. init_module
  18. cleanup_module

   1 /*
   2  *      sd.c Copyright (C) 1992 Drew Eckhardt 
   3  *           Copyright (C) 1993, 1994, 1995 Eric Youngdale
   4  *
   5  *      Linux scsi disk driver
   6  *              Initial versions: Drew Eckhardt 
   7  *              Subsequent revisions: Eric Youngdale
   8  *
   9  *      <drew@colorado.edu>
  10  *
  11  *       Modified by Eric Youngdale ericy@cais.com to
  12  *       add scatter-gather, multiple outstanding request, and other
  13  *       enhancements.
  14  *
  15  *       Modified by Eric Youngdale eric@aib.com to support loadable
  16  *       low-level scsi drivers.
  17  */
  18 
  19 #ifdef MODULE
  20 #include <linux/autoconf.h>
  21 #include <linux/module.h>
  22 #include <linux/version.h>
  23 /*
  24  * This is a variable in scsi.c that is set when we are processing something
  25  * after boot time.  By definition, this is true when we are a loadable module
  26  * ourselves.
  27  */
  28 #define MODULE_FLAG 1
  29 #else
  30 #define MODULE_FLAG scsi_loadable_module_flag
  31 #endif /* MODULE */
  32 
  33 #include <linux/fs.h>
  34 #include <linux/kernel.h>
  35 #include <linux/sched.h>
  36 #include <linux/mm.h>
  37 #include <linux/string.h>
  38 #include <linux/errno.h>
  39 #include <asm/system.h>
  40 
  41 #define MAJOR_NR SCSI_DISK_MAJOR
  42 #include "../block/blk.h"
  43 #include "scsi.h"
  44 #include "hosts.h"
  45 #include "sd.h"
  46 #include "scsi_ioctl.h"
  47 #include "constants.h"
  48 
  49 #include <linux/genhd.h>
  50 
  51 /*
  52  *  static const char RCSid[] = "$Header:";
  53  */
  54 
  55 #define MAX_RETRIES 5
  56 
  57 /*
  58  *  Time out in seconds for disks and Magneto-opticals (which are slower).
  59  */
  60 
  61 #define SD_TIMEOUT (7 * HZ)
  62 #define SD_MOD_TIMEOUT (8 * HZ)
  63 
  64 #define CLUSTERABLE_DEVICE(SC) (SC->host->use_clustering && \
  65                                 SC->device->type != TYPE_MOD)
  66 
  67 struct hd_struct * sd;
  68 
  69 Scsi_Disk * rscsi_disks = NULL;
  70 static int * sd_sizes;
  71 static int * sd_blocksizes;
  72 static int * sd_hardsizes;              /* Hardware sector size */
  73 
  74 extern int sd_ioctl(struct inode *, struct file *, unsigned int, unsigned long);
  75 
  76 static int check_scsidisk_media_change(kdev_t);
  77 static int fop_revalidate_scsidisk(kdev_t);
  78 
  79 static sd_init_onedisk(int);
  80 
  81 static void requeue_sd_request (Scsi_Cmnd * SCpnt);
  82 
  83 static int sd_init(void);
  84 static void sd_finish(void);
  85 static int sd_attach(Scsi_Device *);
  86 static int sd_detect(Scsi_Device *);
  87 static void sd_detach(Scsi_Device *);
  88 
  89 struct Scsi_Device_Template sd_template = 
  90 { NULL, "disk", "sd", NULL, TYPE_DISK, 
  91       SCSI_DISK_MAJOR, 0, 0, 0, 1,
  92       sd_detect, sd_init,
  93       sd_finish, sd_attach, sd_detach
  94 };
  95 
  96 static int sd_open(struct inode * inode, struct file * filp)
     /* [previous][next][first][last][top][bottom][index][help] */
  97 {
  98     int target;
  99     target =  DEVICE_NR(inode->i_rdev);
 100     
 101     if(target >= sd_template.dev_max || !rscsi_disks[target].device)
 102         return -ENXIO;   /* No such device */
 103     
 104     /* 
 105      * Make sure that only one process can do a check_change_disk at one time.
 106      * This is also used to lock out further access when the partition table 
 107      * is being re-read. 
 108      */
 109     
 110     while (rscsi_disks[target].device->busy)
 111     barrier();   
 112     if(rscsi_disks[target].device->removable) {
 113         check_disk_change(inode->i_rdev);
 114         
 115         if(!rscsi_disks[target].device->access_count)
 116             sd_ioctl(inode, NULL, SCSI_IOCTL_DOORLOCK, 0);
 117     };
 118 
 119     /*
 120      * See if we are requesting a non-existent partition.  Do this
 121      * after checking for disk change.
 122      */
 123     if(sd_sizes[MINOR(inode->i_rdev)] == 0)
 124         return -ENXIO;
 125     
 126     rscsi_disks[target].device->access_count++;
 127     if (rscsi_disks[target].device->host->hostt->usage_count)
 128         (*rscsi_disks[target].device->host->hostt->usage_count)++;
 129     if(sd_template.usage_count) (*sd_template.usage_count)++;
 130     return 0;
 131 }
 132 
 133 static void sd_release(struct inode * inode, struct file * file)
     /* [previous][next][first][last][top][bottom][index][help] */
 134 {
 135     int target;
 136     sync_dev(inode->i_rdev);
 137     
 138     target =  DEVICE_NR(inode->i_rdev);
 139     
 140     rscsi_disks[target].device->access_count--;
 141     if (rscsi_disks[target].device->host->hostt->usage_count)
 142         (*rscsi_disks[target].device->host->hostt->usage_count)--;
 143     if(sd_template.usage_count) (*sd_template.usage_count)--;
 144     
 145     if(rscsi_disks[target].device->removable) {
 146         if(!rscsi_disks[target].device->access_count)
 147             sd_ioctl(inode, NULL, SCSI_IOCTL_DOORUNLOCK, 0);
 148     }
 149 }
 150 
 151 static void sd_geninit(struct gendisk *);
 152 
 153 static struct file_operations sd_fops = {
 154     NULL,                        /* lseek - default */
 155     block_read,                  /* read - general block-dev read */
 156     block_write,                 /* write - general block-dev write */
 157     NULL,                        /* readdir - bad */
 158     NULL,                        /* select */
 159     sd_ioctl,                    /* ioctl */
 160     NULL,                        /* mmap */
 161     sd_open,                     /* open code */
 162     sd_release,                  /* release */
 163     block_fsync,                 /* fsync */
 164     NULL,                        /* fasync */
 165     check_scsidisk_media_change, /* Disk change */
 166     fop_revalidate_scsidisk      /* revalidate */
 167 };
 168 
 169 static struct gendisk sd_gendisk = {
 170     MAJOR_NR,                    /* Major number */
 171     "sd",                        /* Major name */
 172     4,                           /* Bits to shift to get real from partition */
 173     1 << 4,                      /* Number of partitions per real */
 174     0,                           /* maximum number of real */
 175     sd_geninit,                  /* init function */
 176     NULL,                        /* hd struct */
 177     NULL,                        /* block sizes */
 178     0,                           /* number */
 179     NULL,                        /* internal */
 180     NULL                         /* next */
 181 };
 182 
 183 static void sd_geninit (struct gendisk *ignored)
     /* [previous][next][first][last][top][bottom][index][help] */
 184 {
 185     int i;
 186     
 187     for (i = 0; i < sd_template.dev_max; ++i)
 188         if(rscsi_disks[i].device) 
 189             sd[i << 4].nr_sects = rscsi_disks[i].capacity;
 190 #if 0
 191     /* No longer needed - we keep track of this as we attach/detach */
 192     sd_gendisk.nr_real = sd_template.dev_max;
 193 #endif
 194 }
 195 
 196 /*
 197  * rw_intr is the interrupt routine for the device driver.  It will
 198  * be notified on the end of a SCSI read / write, and
 199  * will take on of several actions based on success or failure.
 200  */
 201 
 202 static void rw_intr (Scsi_Cmnd *SCpnt)
     /* [previous][next][first][last][top][bottom][index][help] */
 203 {
 204     int result = SCpnt->result;
 205     int this_count = SCpnt->bufflen >> 9;
 206     
 207 #ifdef DEBUG
 208     printk("sd%c : rw_intr(%d, %d)\n", 'a' + MINOR(SCpnt->request.rq_dev), 
 209            SCpnt->host->host_no, result);
 210 #endif
 211     
 212     /*
 213      * First case : we assume that the command succeeded.  One of two things 
 214      * will happen here.  Either we will be finished, or there will be more
 215      * sectors that we were unable to read last time.
 216      */
 217 
 218     if (!result) {
 219         
 220 #ifdef DEBUG
 221         printk("sd%c : %d sectors remain.\n", 'a' + MINOR(SCpnt->request.rq_dev),
 222                SCpnt->request.nr_sectors);
 223         printk("use_sg is %d\n ",SCpnt->use_sg);
 224 #endif
 225         if (SCpnt->use_sg) {
 226             struct scatterlist * sgpnt;
 227             int i;
 228             sgpnt = (struct scatterlist *) SCpnt->buffer;
 229             for(i=0; i<SCpnt->use_sg; i++) {
 230 #ifdef DEBUG
 231                 printk(":%x %x %d\n",sgpnt[i].alt_address, sgpnt[i].address, 
 232                        sgpnt[i].length);
 233 #endif
 234                 if (sgpnt[i].alt_address) {
 235                     if (SCpnt->request.cmd == READ)
 236                         memcpy(sgpnt[i].alt_address, sgpnt[i].address, 
 237                                sgpnt[i].length);
 238                     scsi_free(sgpnt[i].address, sgpnt[i].length);
 239                 };
 240             };
 241 
 242             /* Free list of scatter-gather pointers */
 243             scsi_free(SCpnt->buffer, SCpnt->sglist_len);  
 244         } else {
 245             if (SCpnt->buffer != SCpnt->request.buffer) {
 246 #ifdef DEBUG
 247                 printk("nosg: %x %x %d\n",SCpnt->request.buffer, SCpnt->buffer,
 248                        SCpnt->bufflen);
 249 #endif  
 250                 if (SCpnt->request.cmd == READ)
 251                     memcpy(SCpnt->request.buffer, SCpnt->buffer,
 252                            SCpnt->bufflen);
 253                 scsi_free(SCpnt->buffer, SCpnt->bufflen);
 254             };
 255         };
 256         /*
 257          * If multiple sectors are requested in one buffer, then
 258          * they will have been finished off by the first command.
 259          * If not, then we have a multi-buffer command.
 260          */
 261         if (SCpnt->request.nr_sectors > this_count)
 262         {
 263             SCpnt->request.errors = 0;
 264             
 265             if (!SCpnt->request.bh)
 266             {
 267 #ifdef DEBUG
 268                 printk("sd%c : handling page request, no buffer\n",
 269                        'a' + MINOR(SCpnt->request.rq_dev));
 270 #endif
 271                 /*
 272                  * The SCpnt->request.nr_sectors field is always done in 
 273                  * 512 byte sectors, even if this really isn't the case.
 274                  */
 275                 panic("sd.c: linked page request (%lx %x)",
 276                       SCpnt->request.sector, this_count);
 277             }
 278         }
 279         SCpnt = end_scsi_request(SCpnt, 1, this_count);
 280         requeue_sd_request(SCpnt);
 281         return;
 282     }
 283     
 284     /* Free up any indirection buffers we allocated for DMA purposes. */
 285     if (SCpnt->use_sg) {
 286         struct scatterlist * sgpnt;
 287         int i;
 288         sgpnt = (struct scatterlist *) SCpnt->buffer;
 289         for(i=0; i<SCpnt->use_sg; i++) {
 290 #ifdef DEBUG
 291             printk("err: %x %x %d\n",SCpnt->request.buffer, SCpnt->buffer,
 292                    SCpnt->bufflen);
 293 #endif
 294             if (sgpnt[i].alt_address) {
 295                 scsi_free(sgpnt[i].address, sgpnt[i].length);
 296             };
 297         };
 298         scsi_free(SCpnt->buffer, SCpnt->sglist_len);  /* Free list of scatter-gather pointers */
 299     } else {
 300 #ifdef DEBUG
 301         printk("nosgerr: %x %x %d\n",SCpnt->request.buffer, SCpnt->buffer,
 302                SCpnt->bufflen);
 303 #endif
 304         if (SCpnt->buffer != SCpnt->request.buffer)
 305             scsi_free(SCpnt->buffer, SCpnt->bufflen);
 306     };
 307     
 308     /*
 309      * Now, if we were good little boys and girls, Santa left us a request
 310      * sense buffer.  We can extract information from this, so we
 311      * can choose a block to remap, etc.
 312      */
 313 
 314     if (driver_byte(result) != 0) {
 315         if (suggestion(result) == SUGGEST_REMAP) {
 316 #ifdef REMAP
 317             /*
 318              * Not yet implemented.  A read will fail after being remapped,
 319              * a write will call the strategy routine again.
 320              */
 321             if rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].remap
 322             {
 323                 result = 0;
 324             }
 325             else
 326 #endif
 327         }
 328         
 329         if ((SCpnt->sense_buffer[0] & 0x7f) == 0x70) {
 330             if ((SCpnt->sense_buffer[2] & 0xf) == UNIT_ATTENTION) {
 331                 if(rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].device->removable) {
 332                     /* detected disc change.  set a bit and quietly refuse
 333                      * further access.
 334                      */  
 335                     rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].device->changed = 1;
 336                     SCpnt = end_scsi_request(SCpnt, 0, this_count);
 337                     requeue_sd_request(SCpnt);
 338                     return;
 339                 }
 340                 else
 341                 {
 342                     /*
 343                      * Must have been a power glitch, or a bus reset.
 344                      * Could not have been a media change, so we just retry
 345                      * the request and see what happens.
 346                      */
 347                     requeue_sd_request(SCpnt);
 348                     return;
 349                 }
 350             }
 351         }
 352         
 353         
 354         /* If we had an ILLEGAL REQUEST returned, then we may have
 355          * performed an unsupported command.  The only thing this should be 
 356          * would be a ten byte read where only a six byte read was supported.
 357          * Also, on a system where READ CAPACITY failed, we have have read 
 358          * past the end of the disk. 
 359          */
 360 
 361         if (SCpnt->sense_buffer[2] == ILLEGAL_REQUEST) {
 362             if (rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].ten) {
 363                 rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].ten = 0;
 364                 requeue_sd_request(SCpnt);
 365                 result = 0;
 366             } else {
 367                 /* ???? */
 368             }
 369         }
 370     }  /* driver byte != 0 */
 371     if (result) {
 372         printk("SCSI disk error : host %d channel %d id %d lun %d return code = %x\n",
 373                rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].device->host->host_no,
 374                rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].device->channel,
 375            rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].device->id,
 376              rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)].device->lun, result);
 377         
 378         if (driver_byte(result) & DRIVER_SENSE)
 379             print_sense("sd", SCpnt);
 380         SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.current_nr_sectors);
 381         requeue_sd_request(SCpnt);
 382         return;
 383     }
 384 }
 385 
 386 /*
 387  * requeue_sd_request() is the request handler function for the sd driver.
 388  * Its function in life is to take block device requests, and translate
 389  * them to SCSI commands.
 390  */
 391 
 392 static void do_sd_request (void)
     /* [previous][next][first][last][top][bottom][index][help] */
 393 {
 394     Scsi_Cmnd * SCpnt = NULL;
 395     Scsi_Device * SDev;
 396     struct request * req = NULL;
 397     unsigned long flags;
 398     int flag = 0;
 399     
 400     save_flags(flags);
 401     while (1==1){
 402         cli();
 403         if (CURRENT != NULL && CURRENT->rq_status == RQ_INACTIVE) {
 404             restore_flags(flags);
 405             return;
 406         };
 407         
 408         INIT_SCSI_REQUEST;
 409         SDev = rscsi_disks[DEVICE_NR(CURRENT->rq_dev)].device;
 410         
 411         /*
 412          * I am not sure where the best place to do this is.  We need
 413          * to hook in a place where we are likely to come if in user
 414          * space.
 415          */
 416         if( SDev->was_reset )
 417         {
 418             /*
 419              * We need to relock the door, but we might
 420              * be in an interrupt handler.  Only do this
 421              * from user space, since we do not want to
 422              * sleep from an interrupt.
 423              */
 424             if( SDev->removable && !intr_count )
 425             {
 426                 scsi_ioctl(SDev, SCSI_IOCTL_DOORLOCK, 0);
 427             }
 428             SDev->was_reset = 0;
 429         }
 430                 
 431         /* We have to be careful here. allocate_device will get a free pointer,
 432          * but there is no guarantee that it is queueable.  In normal usage, 
 433          * we want to call this, because other types of devices may have the 
 434          * host all tied up, and we want to make sure that we have at least 
 435          * one request pending for this type of device. We can also come 
 436          * through here while servicing an interrupt, because of the need to 
 437          * start another command. If we call allocate_device more than once, 
 438          * then the system can wedge if the command is not queueable. The 
 439          * request_queueable function is safe because it checks to make sure 
 440          * that the host is able to take another command before it returns
 441          * a pointer.  
 442          */
 443 
 444         if (flag++ == 0)
 445             SCpnt = allocate_device(&CURRENT,
 446                                     rscsi_disks[DEVICE_NR(CURRENT->rq_dev)].device, 0); 
 447         else SCpnt = NULL;
 448         
 449         /*
 450          * The following restore_flags leads to latency problems.  FIXME.
 451          * Using a "sti()" gets rid of the latency problems but causes
 452          * race conditions and crashes.
 453          */
 454         restore_flags(flags);
 455 
 456         /* This is a performance enhancement. We dig down into the request 
 457          * list and try and find a queueable request (i.e. device not busy, 
 458          * and host able to accept another command. If we find one, then we 
 459          * queue it. This can make a big difference on systems with more than 
 460          * one disk drive.  We want to have the interrupts off when monkeying 
 461          * with the request list, because otherwise the kernel might try and 
 462          * slip in a request in between somewhere. 
 463          */
 464 
 465         if (!SCpnt && sd_template.nr_dev > 1){
 466             struct request *req1;
 467             req1 = NULL;
 468             cli();
 469             req = CURRENT;
 470             while(req){
 471                 SCpnt = request_queueable(req, rscsi_disks[DEVICE_NR(req->rq_dev)].device);
 472                 if(SCpnt) break;
 473                 req1 = req;
 474                 req = req->next;
 475             };
 476             if (SCpnt && req->rq_status == RQ_INACTIVE) {
 477                 if (req == CURRENT) 
 478                     CURRENT = CURRENT->next;
 479                 else
 480                     req1->next = req->next;
 481             };
 482             restore_flags(flags);
 483         };
 484         
 485         if (!SCpnt) return; /* Could not find anything to do */
 486         
 487         /* Queue command */
 488         requeue_sd_request(SCpnt);
 489     };  /* While */
 490 }    
 491 
 492 static void requeue_sd_request (Scsi_Cmnd * SCpnt)
     /* [previous][next][first][last][top][bottom][index][help] */
 493 {
 494     int dev, devm, block, this_count;
 495     unsigned char cmd[10];
 496     int bounce_size, contiguous;
 497     int max_sg;
 498     struct buffer_head * bh, *bhp;
 499     char * buff, *bounce_buffer;
 500     
 501  repeat:
 502     
 503     if(!SCpnt || SCpnt->request.rq_status == RQ_INACTIVE) {
 504         do_sd_request();
 505         return;
 506     }
 507     
 508     devm =  MINOR(SCpnt->request.rq_dev);
 509     dev = DEVICE_NR(SCpnt->request.rq_dev);
 510 
 511     block = SCpnt->request.sector;
 512     this_count = 0;
 513 
 514 #ifdef DEBUG
 515     printk("Doing sd request, dev = %d, block = %d\n", devm, block);
 516 #endif
 517     
 518     if (devm >= (sd_template.dev_max << 4) || 
 519         !rscsi_disks[dev].device ||
 520         block + SCpnt->request.nr_sectors > sd[devm].nr_sects)
 521     {
 522         SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
 523         goto repeat;
 524     }
 525     
 526     block += sd[devm].start_sect;
 527     
 528     if (rscsi_disks[dev].device->changed)
 529     {
 530         /*
 531          * quietly refuse to do anything to a changed disc until the changed 
 532          * bit has been reset
 533          */
 534         /* printk("SCSI disk has been changed. Prohibiting further I/O.\n"); */
 535         SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
 536         goto repeat;
 537     }
 538     
 539 #ifdef DEBUG
 540     printk("sd%c : real dev = /dev/sd%c, block = %d\n", 
 541            'a' + devm, dev, block);
 542 #endif
 543     
 544     /*
 545      * If we have a 1K hardware sectorsize, prevent access to single
 546      * 512 byte sectors.  In theory we could handle this - in fact
 547      * the scsi cdrom driver must be able to handle this because
 548      * we typically use 1K blocksizes, and cdroms typically have
 549      * 2K hardware sectorsizes.  Of course, things are simpler
 550      * with the cdrom, since it is read-only.  For performance
 551      * reasons, the filesystems should be able to handle this
 552      * and not force the scsi disk driver to use bounce buffers
 553      * for this.
 554      */
 555     if (rscsi_disks[dev].sector_size == 1024)
 556         if((block & 1) || (SCpnt->request.nr_sectors & 1)) {
 557             printk("sd.c:Bad block number requested");
 558             SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
 559             goto repeat;
 560         }
 561     
 562     switch (SCpnt->request.cmd)
 563     {
 564     case WRITE :
 565         if (!rscsi_disks[dev].device->writeable)
 566         {
 567             SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
 568             goto repeat;
 569         }
 570         cmd[0] = WRITE_6;
 571         break;
 572     case READ :
 573         cmd[0] = READ_6;
 574         break;
 575     default :
 576         panic ("Unknown sd command %d\n", SCpnt->request.cmd);
 577     }
 578     
 579     SCpnt->this_count = 0;
 580     
 581     /* If the host adapter can deal with very large scatter-gather
 582      * requests, it is a waste of time to cluster 
 583      */
 584     contiguous = (!CLUSTERABLE_DEVICE(SCpnt) ? 0 :1);
 585     bounce_buffer = NULL;
 586     bounce_size = (SCpnt->request.nr_sectors << 9);
 587     
 588     /* First see if we need a bounce buffer for this request. If we do, make 
 589      * sure that we can allocate a buffer. Do not waste space by allocating 
 590      * a bounce buffer if we are straddling the 16Mb line 
 591      */ 
 592     if (contiguous && SCpnt->request.bh &&
 593         ((long) SCpnt->request.bh->b_data) 
 594         + (SCpnt->request.nr_sectors << 9) - 1 > ISA_DMA_THRESHOLD 
 595         && SCpnt->host->unchecked_isa_dma) {
 596         if(((long) SCpnt->request.bh->b_data) > ISA_DMA_THRESHOLD)
 597             bounce_buffer = (char *) scsi_malloc(bounce_size);
 598         if(!bounce_buffer) contiguous = 0;
 599     };
 600     
 601     if(contiguous && SCpnt->request.bh && SCpnt->request.bh->b_reqnext)
 602         for(bh = SCpnt->request.bh, bhp = bh->b_reqnext; bhp; bh = bhp, 
 603             bhp = bhp->b_reqnext) {
 604             if(!CONTIGUOUS_BUFFERS(bh,bhp)) { 
 605                 if(bounce_buffer) scsi_free(bounce_buffer, bounce_size);
 606                 contiguous = 0;
 607                 break;
 608             } 
 609         };
 610     if (!SCpnt->request.bh || contiguous) {
 611         
 612         /* case of page request (i.e. raw device), or unlinked buffer */
 613         this_count = SCpnt->request.nr_sectors;
 614         buff = SCpnt->request.buffer;
 615         SCpnt->use_sg = 0;
 616         
 617     } else if (SCpnt->host->sg_tablesize == 0 ||
 618                (need_isa_buffer && dma_free_sectors <= 10)) {
 619         
 620         /* Case of host adapter that cannot scatter-gather.  We also
 621          * come here if we are running low on DMA buffer memory.  We set
 622          * a threshold higher than that we would need for this request so
 623          * we leave room for other requests.  Even though we would not need
 624          * it all, we need to be conservative, because if we run low enough
 625          * we have no choice but to panic. 
 626          */
 627         if (SCpnt->host->sg_tablesize != 0 &&
 628             need_isa_buffer && 
 629             dma_free_sectors <= 10)
 630             printk("Warning: SCSI DMA buffer space running low.  Using non scatter-gather I/O.\n");
 631         
 632         this_count = SCpnt->request.current_nr_sectors;
 633         buff = SCpnt->request.buffer;
 634         SCpnt->use_sg = 0;
 635         
 636     } else {
 637         
 638         /* Scatter-gather capable host adapter */
 639         struct scatterlist * sgpnt;
 640         int count, this_count_max;
 641         int counted;
 642         
 643         bh = SCpnt->request.bh;
 644         this_count = 0;
 645         this_count_max = (rscsi_disks[dev].ten ? 0xffff : 0xff);
 646         count = 0;
 647         bhp = NULL;
 648         while(bh) {
 649             if ((this_count + (bh->b_size >> 9)) > this_count_max) break;
 650             if(!bhp || !CONTIGUOUS_BUFFERS(bhp,bh) ||
 651                !CLUSTERABLE_DEVICE(SCpnt) ||
 652                (SCpnt->host->unchecked_isa_dma &&
 653                 ((unsigned long) bh->b_data-1) == ISA_DMA_THRESHOLD)) {
 654                 if (count < SCpnt->host->sg_tablesize) count++;
 655                 else break;
 656             };
 657             this_count += (bh->b_size >> 9);
 658             bhp = bh;
 659             bh = bh->b_reqnext;
 660         };
 661 #if 0
 662         if(SCpnt->host->unchecked_isa_dma &&
 663            ((unsigned int) SCpnt->request.bh->b_data-1) == ISA_DMA_THRESHOLD) count--;
 664 #endif
 665         SCpnt->use_sg = count;  /* Number of chains */
 666         count = 512;/* scsi_malloc can only allocate in chunks of 512 bytes */
 667         while( count < (SCpnt->use_sg * sizeof(struct scatterlist))) 
 668             count = count << 1;
 669         SCpnt->sglist_len = count;
 670         max_sg = count / sizeof(struct scatterlist);
 671         if(SCpnt->host->sg_tablesize < max_sg) 
 672             max_sg = SCpnt->host->sg_tablesize;
 673         sgpnt = (struct scatterlist * ) scsi_malloc(count);
 674         if (!sgpnt) {
 675             printk("Warning - running *really* short on DMA buffers\n");
 676             SCpnt->use_sg = 0;    /* No memory left - bail out */
 677             this_count = SCpnt->request.current_nr_sectors;
 678             buff = SCpnt->request.buffer;
 679         } else {
 680             memset(sgpnt, 0, count);  /* Zero so it is easy to fill, but only
 681                                        * if memory is available 
 682                                        */
 683             buff = (char *) sgpnt;
 684             counted = 0;
 685             for(count = 0, bh = SCpnt->request.bh, bhp = bh->b_reqnext;
 686                 count < SCpnt->use_sg && bh; 
 687                 count++, bh = bhp) {
 688                 
 689                 bhp = bh->b_reqnext;
 690                 
 691                 if(!sgpnt[count].address) sgpnt[count].address = bh->b_data;
 692                 sgpnt[count].length += bh->b_size;
 693                 counted += bh->b_size >> 9;
 694                 
 695                 if (((long) sgpnt[count].address) + sgpnt[count].length - 1 > 
 696                     ISA_DMA_THRESHOLD && (SCpnt->host->unchecked_isa_dma) &&
 697                     !sgpnt[count].alt_address) {
 698                     sgpnt[count].alt_address = sgpnt[count].address;
 699                     /* We try and avoid exhausting the DMA pool, since it is 
 700                      * easier to control usage here. In other places we might 
 701                      * have a more pressing need, and we would be screwed if 
 702                      * we ran out */
 703                     if(dma_free_sectors < (sgpnt[count].length >> 9) + 10) {
 704                         sgpnt[count].address = NULL;
 705                     } else {
 706                         sgpnt[count].address = 
 707                             (char *) scsi_malloc(sgpnt[count].length);
 708                     };
 709                     /* If we start running low on DMA buffers, we abort the 
 710                      * scatter-gather operation, and free all of the memory 
 711                      * we have allocated.  We want to ensure that all scsi 
 712                      * operations are able to do at least a non-scatter/gather
 713                      * operation */
 714                     if(sgpnt[count].address == NULL){ /* Out of dma memory */
 715 #if 0
 716                         printk("Warning: Running low on SCSI DMA buffers");
 717                         /* Try switching back to a non s-g operation. */
 718                         while(--count >= 0){
 719                             if(sgpnt[count].alt_address) 
 720                                 scsi_free(sgpnt[count].address, 
 721                                           sgpnt[count].length);
 722                         };
 723                         this_count = SCpnt->request.current_nr_sectors;
 724                         buff = SCpnt->request.buffer;
 725                         SCpnt->use_sg = 0;
 726                         scsi_free(sgpnt, SCpnt->sglist_len);
 727 #endif
 728                         SCpnt->use_sg = count;
 729                         this_count = counted -= bh->b_size >> 9;
 730                         break;
 731                     };
 732                     
 733                 };
 734                 
 735                 /* Only cluster buffers if we know that we can supply DMA 
 736                  * buffers large enough to satisfy the request. Do not cluster
 737                  * a new request if this would mean that we suddenly need to 
 738                  * start using DMA bounce buffers */
 739                 if(bhp && CONTIGUOUS_BUFFERS(bh,bhp) 
 740                    && CLUSTERABLE_DEVICE(SCpnt)) {
 741                     char * tmp;
 742                     
 743                     if (((long) sgpnt[count].address) + sgpnt[count].length +
 744                         bhp->b_size - 1 > ISA_DMA_THRESHOLD && 
 745                         (SCpnt->host->unchecked_isa_dma) &&
 746                         !sgpnt[count].alt_address) continue;
 747                     
 748                     if(!sgpnt[count].alt_address) {count--; continue; }
 749                     if(dma_free_sectors > 10)
 750                         tmp = (char *) scsi_malloc(sgpnt[count].length 
 751                                                    + bhp->b_size);
 752                     else {
 753                         tmp = NULL;
 754                         max_sg = SCpnt->use_sg;
 755                     };
 756                     if(tmp){
 757                         scsi_free(sgpnt[count].address, sgpnt[count].length);
 758                         sgpnt[count].address = tmp;
 759                         count--;
 760                         continue;
 761                     };
 762                     
 763                     /* If we are allowed another sg chain, then increment 
 764                      * counter so we can insert it.  Otherwise we will end 
 765                      up truncating */
 766                     
 767                     if (SCpnt->use_sg < max_sg) SCpnt->use_sg++;
 768                 };  /* contiguous buffers */
 769             }; /* for loop */
 770             
 771             /* This is actually how many we are going to transfer */
 772             this_count = counted; 
 773             
 774             if(count < SCpnt->use_sg || SCpnt->use_sg 
 775                > SCpnt->host->sg_tablesize){
 776                 bh = SCpnt->request.bh;
 777                 printk("Use sg, count %d %x %d\n", 
 778                        SCpnt->use_sg, count, dma_free_sectors);
 779                 printk("maxsg = %x, counted = %d this_count = %d\n", 
 780                        max_sg, counted, this_count);
 781                 while(bh){
 782                     printk("[%p %lx] ", bh->b_data, bh->b_size);
 783                     bh = bh->b_reqnext;
 784                 };
 785                 if(SCpnt->use_sg < 16)
 786                     for(count=0; count<SCpnt->use_sg; count++)
 787                         printk("{%d:%p %p %d}  ", count,
 788                                sgpnt[count].address,
 789                                sgpnt[count].alt_address,
 790                                sgpnt[count].length);
 791                 panic("Ooops");
 792             };
 793             
 794             if (SCpnt->request.cmd == WRITE)
 795                 for(count=0; count<SCpnt->use_sg; count++)
 796                     if(sgpnt[count].alt_address)
 797                         memcpy(sgpnt[count].address, sgpnt[count].alt_address, 
 798                                sgpnt[count].length);
 799         };  /* Able to malloc sgpnt */
 800     };  /* Host adapter capable of scatter-gather */
 801     
 802     /* Now handle the possibility of DMA to addresses > 16Mb */
 803     
 804     if(SCpnt->use_sg == 0){
 805         if (((long) buff) + (this_count << 9) - 1 > ISA_DMA_THRESHOLD && 
 806             (SCpnt->host->unchecked_isa_dma)) {
 807             if(bounce_buffer)
 808                 buff = bounce_buffer;
 809             else
 810                 buff = (char *) scsi_malloc(this_count << 9);
 811             if(buff == NULL) {  /* Try backing off a bit if we are low on mem*/
 812                 this_count = SCpnt->request.current_nr_sectors;
 813                 buff = (char *) scsi_malloc(this_count << 9);
 814                 if(!buff) panic("Ran out of DMA buffers.");
 815             };
 816             if (SCpnt->request.cmd == WRITE)
 817                 memcpy(buff, (char *)SCpnt->request.buffer, this_count << 9);
 818         };
 819     };
 820 #ifdef DEBUG
 821     printk("sd%c : %s %d/%d 512 byte blocks.\n", 
 822            'a' + devm,
 823            (SCpnt->request.cmd == WRITE) ? "writing" : "reading",
 824            this_count, SCpnt->request.nr_sectors);
 825 #endif
 826     
 827     cmd[1] = (SCpnt->lun << 5) & 0xe0;
 828     
 829     if (rscsi_disks[dev].sector_size == 1024){
 830         if(block & 1) panic("sd.c:Bad block number requested");
 831         if(this_count & 1) panic("sd.c:Bad block number requested");
 832         block = block >> 1;
 833         this_count = this_count >> 1;
 834     };
 835     
 836     if (rscsi_disks[dev].sector_size == 256){
 837         block = block << 1;
 838         this_count = this_count << 1;
 839     };
 840     
 841     if (((this_count > 0xff) ||  (block > 0x1fffff)) && rscsi_disks[dev].ten)
 842     {
 843         if (this_count > 0xffff)
 844             this_count = 0xffff;
 845         
 846         cmd[0] += READ_10 - READ_6 ;
 847         cmd[2] = (unsigned char) (block >> 24) & 0xff;
 848         cmd[3] = (unsigned char) (block >> 16) & 0xff;
 849         cmd[4] = (unsigned char) (block >> 8) & 0xff;
 850         cmd[5] = (unsigned char) block & 0xff;
 851         cmd[6] = cmd[9] = 0;
 852         cmd[7] = (unsigned char) (this_count >> 8) & 0xff;
 853         cmd[8] = (unsigned char) this_count & 0xff;
 854     }
 855     else
 856     {
 857         if (this_count > 0xff)
 858             this_count = 0xff;
 859         
 860         cmd[1] |= (unsigned char) ((block >> 16) & 0x1f);
 861         cmd[2] = (unsigned char) ((block >> 8) & 0xff);
 862         cmd[3] = (unsigned char) block & 0xff;
 863         cmd[4] = (unsigned char) this_count;
 864         cmd[5] = 0;
 865     }
 866     
 867     /*
 868      * We shouldn't disconnect in the middle of a sector, so with a dumb 
 869      * host adapter, it's safe to assume that we can at least transfer 
 870      * this many bytes between each connect / disconnect.  
 871      */
 872     
 873     SCpnt->transfersize = rscsi_disks[dev].sector_size;
 874     SCpnt->underflow = this_count << 9; 
 875     scsi_do_cmd (SCpnt, (void *) cmd, buff, 
 876                  this_count * rscsi_disks[dev].sector_size,
 877                  rw_intr, 
 878                  (SCpnt->device->type == TYPE_DISK ? 
 879                   SD_TIMEOUT : SD_MOD_TIMEOUT),
 880                  MAX_RETRIES);
 881 }
 882 
 883 static int check_scsidisk_media_change(kdev_t full_dev){
     /* [previous][next][first][last][top][bottom][index][help] */
 884     int retval;
 885     int target;
 886     struct inode inode;
 887     int flag = 0;
 888     
 889     target =  DEVICE_NR(full_dev);
 890     
 891     if (target >= sd_template.dev_max ||
 892         !rscsi_disks[target].device) {
 893         printk("SCSI disk request error: invalid device.\n");
 894         return 0;
 895     };
 896     
 897     if(!rscsi_disks[target].device->removable) return 0;
 898     
 899     inode.i_rdev = full_dev;  /* This is all we really need here */
 900     retval = sd_ioctl(&inode, NULL, SCSI_IOCTL_TEST_UNIT_READY, 0);
 901     
 902     if(retval){ /* Unable to test, unit probably not ready.  This usually
 903                  * means there is no disc in the drive.  Mark as changed,
 904                  * and we will figure it out later once the drive is
 905                  * available again.  */
 906         
 907         rscsi_disks[target].device->changed = 1;
 908         return 1; /* This will force a flush, if called from
 909                    * check_disk_change */
 910     };
 911     
 912     retval = rscsi_disks[target].device->changed;
 913     if(!flag) rscsi_disks[target].device->changed = 0;
 914     return retval;
 915 }
 916 
 917 static void sd_init_done (Scsi_Cmnd * SCpnt)
     /* [previous][next][first][last][top][bottom][index][help] */
 918 {
 919     struct request * req;
 920     
 921     req = &SCpnt->request;
 922     req->rq_status = RQ_SCSI_DONE; /* Busy, but indicate request done */
 923     
 924     if (req->sem != NULL) {
 925         up(req->sem);
 926     }
 927 }
 928 
 929 static int sd_init_onedisk(int i)
     /* [previous][next][first][last][top][bottom][index][help] */
 930 {
 931     unsigned char cmd[10];
 932     unsigned char *buffer;
 933     unsigned long spintime;
 934     int the_result, retries;
 935     Scsi_Cmnd * SCpnt;
 936     
 937     /* We need to retry the READ_CAPACITY because a UNIT_ATTENTION is 
 938      * considered a fatal error, and many devices report such an error 
 939      * just after a scsi bus reset. 
 940      */
 941     
 942     SCpnt = allocate_device(NULL, rscsi_disks[i].device, 1);
 943     buffer = (unsigned char *) scsi_malloc(512);
 944     
 945     spintime = 0;
 946     
 947     /* Spin up drives, as required.  Only do this at boot time */
 948     if (current->pid == 0){
 949         do{
 950             retries = 0;
 951             while(retries < 3)
 952             {
 953                 cmd[0] = TEST_UNIT_READY;
 954                 cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0;
 955                 memset ((void *) &cmd[2], 0, 8);
 956                 SCpnt->request.rq_status = RQ_SCSI_BUSY;  /* Mark as really busy again */
 957                 SCpnt->cmd_len = 0;
 958                 SCpnt->sense_buffer[0] = 0;
 959                 SCpnt->sense_buffer[2] = 0;
 960                 
 961                 scsi_do_cmd (SCpnt,
 962                              (void *) cmd, (void *) buffer,
 963                              512, sd_init_done,  SD_TIMEOUT,
 964                              MAX_RETRIES);
 965                 
 966                 while(SCpnt->request.rq_status != RQ_SCSI_DONE) barrier();
 967                 
 968                 the_result = SCpnt->result;
 969                 retries++;
 970                 if(   the_result == 0
 971                    || SCpnt->sense_buffer[2] != UNIT_ATTENTION)
 972                     break;
 973             }
 974             
 975             /* Look for non-removable devices that return NOT_READY.  
 976              * Issue command to spin up drive for these cases. */
 977             if(the_result && !rscsi_disks[i].device->removable && 
 978                SCpnt->sense_buffer[2] == NOT_READY) {
 979                 int time1;
 980                 if(!spintime){
 981                     printk( "sd%c: Spinning up disk...", 'a' + i );
 982                     cmd[0] = START_STOP;
 983                     cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0;
 984                     cmd[1] |= 1;  /* Return immediately */
 985                     memset ((void *) &cmd[2], 0, 8);
 986                     cmd[4] = 1; /* Start spin cycle */
 987                     /* Mark as really busy again */
 988                     SCpnt->request.rq_status = RQ_SCSI_BUSY; 
 989                     SCpnt->cmd_len = 0;
 990                     SCpnt->sense_buffer[0] = 0;
 991                     SCpnt->sense_buffer[2] = 0;
 992                     
 993                     scsi_do_cmd (SCpnt,
 994                                  (void *) cmd, (void *) buffer,
 995                                  512, sd_init_done,  SD_TIMEOUT,
 996                                  MAX_RETRIES);
 997                     
 998                     while(SCpnt->request.rq_status != RQ_SCSI_DONE)
 999                       barrier();
1000                     
1001                     spintime = jiffies;
1002                 };
1003                 
1004                 time1 = jiffies;
1005                 while(jiffies < time1 + HZ); /* Wait 1 second for next try */
1006                 printk( "." );
1007             };
1008         } while(the_result && spintime && spintime+100*HZ > jiffies);
1009         if (spintime) {
1010             if (the_result)
1011                 printk( "not responding...\n" );
1012             else
1013                 printk( "ready\n" );
1014         }
1015     };  /* current->pid == 0 */
1016     
1017     
1018     retries = 3;
1019     do {
1020         cmd[0] = READ_CAPACITY;
1021         cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0;
1022         memset ((void *) &cmd[2], 0, 8);
1023         memset ((void *) buffer, 0, 8);
1024         SCpnt->request.rq_status = RQ_SCSI_BUSY;  /* Mark as really busy again */
1025         SCpnt->cmd_len = 0;
1026         SCpnt->sense_buffer[0] = 0;
1027         SCpnt->sense_buffer[2] = 0;
1028         
1029         scsi_do_cmd (SCpnt,
1030                      (void *) cmd, (void *) buffer,
1031                      8, sd_init_done,  SD_TIMEOUT,
1032                      MAX_RETRIES);
1033         
1034         if (current->pid == 0) {
1035             while(SCpnt->request.rq_status != RQ_SCSI_DONE)
1036               barrier();
1037         } else {
1038             if (SCpnt->request.rq_status != RQ_SCSI_DONE){
1039                 struct semaphore sem = MUTEX_LOCKED;
1040                 SCpnt->request.sem = &sem;
1041                 down(&sem);
1042                 /* Hmm.. Have to ask about this one.. */
1043                 while (SCpnt->request.rq_status != RQ_SCSI_DONE) 
1044                   schedule();
1045             }
1046         }
1047         
1048         the_result = SCpnt->result;
1049         retries--;
1050         
1051     } while(the_result && retries);
1052     
1053     SCpnt->request.rq_status = RQ_INACTIVE;  /* Mark as not busy */
1054     
1055     wake_up(&SCpnt->device->device_wait); 
1056     
1057     /* Wake up a process waiting for device */
1058     
1059     /*
1060      * The SCSI standard says: 
1061      * "READ CAPACITY is necessary for self configuring software"
1062      *  While not mandatory, support of READ CAPACITY is strongly encouraged.
1063      *  We used to die if we couldn't successfully do a READ CAPACITY.
1064      *  But, now we go on about our way.  The side effects of this are
1065      *
1066      *  1. We can't know block size with certainty. I have said "512 bytes 
1067      *     is it" as this is most common.
1068      *
1069      *  2. Recovery from when some one attempts to read past the end of the 
1070      *     raw device will be slower.
1071      */
1072     
1073     if (the_result)
1074     {
1075         printk ("sd%c : READ CAPACITY failed.\n"
1076                 "sd%c : status = %x, message = %02x, host = %d, driver = %02x \n",
1077                 'a' + i, 'a' + i,
1078                 status_byte(the_result),
1079                 msg_byte(the_result),
1080                 host_byte(the_result),
1081                 driver_byte(the_result)
1082                 );
1083         if (driver_byte(the_result)  & DRIVER_SENSE)
1084             printk("sd%c : extended sense code = %1x \n", 
1085                    'a' + i, SCpnt->sense_buffer[2] & 0xf);
1086         else
1087             printk("sd%c : sense not available. \n", 'a' + i);
1088         
1089         printk("sd%c : block size assumed to be 512 bytes, disk size 1GB.  \n",
1090                'a' + i);
1091         rscsi_disks[i].capacity = 0x1fffff;
1092         rscsi_disks[i].sector_size = 512;
1093         
1094         /* Set dirty bit for removable devices if not ready - sometimes drives
1095          * will not report this properly. */
1096         if(rscsi_disks[i].device->removable && 
1097            SCpnt->sense_buffer[2] == NOT_READY)
1098             rscsi_disks[i].device->changed = 1;
1099         
1100     }
1101     else
1102     {
1103         rscsi_disks[i].capacity = (buffer[0] << 24) |
1104             (buffer[1] << 16) |
1105                 (buffer[2] << 8) |
1106                     buffer[3];
1107         
1108         rscsi_disks[i].sector_size = (buffer[4] << 24) |
1109             (buffer[5] << 16) | (buffer[6] << 8) | buffer[7];
1110         
1111         if (rscsi_disks[i].sector_size != 512 &&
1112             rscsi_disks[i].sector_size != 1024 &&
1113             rscsi_disks[i].sector_size != 256)
1114         {
1115             printk ("sd%c : unsupported sector size %d.\n",
1116                     'a' + i, rscsi_disks[i].sector_size);
1117             if(rscsi_disks[i].device->removable){
1118                 rscsi_disks[i].capacity = 0;
1119             } else {
1120                 printk ("scsi : deleting disk entry.\n");
1121                 rscsi_disks[i].device = NULL;
1122                 sd_template.nr_dev--;
1123                 return i;
1124             };
1125         }
1126     {
1127         /*
1128          * The msdos fs need to know the hardware sector size
1129          * So I have created this table. See ll_rw_blk.c
1130          * Jacques Gelinas (Jacques@solucorp.qc.ca)
1131          */
1132         int m;
1133         int hard_sector = rscsi_disks[i].sector_size;
1134         /* There is 16 minor allocated for each devices */
1135         for (m=i<<4; m<((i+1)<<4); m++){
1136             sd_hardsizes[m] = hard_sector;
1137         }
1138         printk ("SCSI Hardware sector size is %d bytes on device sd%c\n",
1139                 hard_sector,i+'a');
1140     }
1141         if(rscsi_disks[i].sector_size == 1024)
1142             rscsi_disks[i].capacity <<= 1;  /* Change into 512 byte sectors */
1143         if(rscsi_disks[i].sector_size == 256)
1144             rscsi_disks[i].capacity >>= 1;  /* Change into 512 byte sectors */
1145     }
1146     
1147     rscsi_disks[i].ten = 1;
1148     rscsi_disks[i].remap = 1;
1149     scsi_free(buffer, 512);
1150     return i;
1151 }
1152 
1153 /*
1154  * The sd_init() function looks at all SCSI drives present, determines
1155  * their size, and reads partition table entries for them.
1156  */
1157 
1158 static int sd_registered = 0;
1159 
1160 static int sd_init()
     /* [previous][next][first][last][top][bottom][index][help] */
1161 {
1162     int i;
1163     
1164     if (sd_template.dev_noticed == 0) return 0;
1165     
1166     if(!sd_registered) {
1167           if (register_blkdev(MAJOR_NR,"sd",&sd_fops)) {
1168               printk("Unable to get major %d for SCSI disk\n",MAJOR_NR);
1169               return 1;
1170           }
1171           sd_registered++;
1172       }
1173     
1174     /* We do not support attaching loadable devices yet. */
1175     if(rscsi_disks) return 0;
1176     
1177     sd_template.dev_max = sd_template.dev_noticed + SD_EXTRA_DEVS;
1178     
1179     rscsi_disks = (Scsi_Disk *) 
1180         scsi_init_malloc(sd_template.dev_max * sizeof(Scsi_Disk), GFP_ATOMIC);
1181     memset(rscsi_disks, 0, sd_template.dev_max * sizeof(Scsi_Disk));
1182     
1183     sd_sizes = (int *) scsi_init_malloc((sd_template.dev_max << 4) * 
1184                                         sizeof(int), GFP_ATOMIC);
1185     memset(sd_sizes, 0, (sd_template.dev_max << 4) * sizeof(int));
1186     
1187     sd_blocksizes = (int *) scsi_init_malloc((sd_template.dev_max << 4) * 
1188                                              sizeof(int), GFP_ATOMIC);
1189     
1190     sd_hardsizes = (int *) scsi_init_malloc((sd_template.dev_max << 4) * 
1191                                             sizeof(int), GFP_ATOMIC);
1192     
1193     for(i=0;i<(sd_template.dev_max << 4);i++){
1194         sd_blocksizes[i] = 1024;
1195         sd_hardsizes[i] = 512;
1196     }
1197     blksize_size[MAJOR_NR] = sd_blocksizes;
1198     hardsect_size[MAJOR_NR] = sd_hardsizes;
1199     sd = (struct hd_struct *) scsi_init_malloc((sd_template.dev_max << 4) *
1200                                                sizeof(struct hd_struct),
1201                                                GFP_ATOMIC);
1202     
1203     
1204     sd_gendisk.max_nr = sd_template.dev_max;
1205     sd_gendisk.part = sd;
1206     sd_gendisk.sizes = sd_sizes;
1207     sd_gendisk.real_devices = (void *) rscsi_disks;
1208     return 0;
1209 }
1210 
1211 static void sd_finish()
     /* [previous][next][first][last][top][bottom][index][help] */
1212 {
1213     int i;
1214 
1215     blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST;
1216     
1217     sd_gendisk.next = gendisk_head;
1218     gendisk_head = &sd_gendisk;
1219     
1220     for (i = 0; i < sd_template.dev_max; ++i)
1221         if (!rscsi_disks[i].capacity && 
1222             rscsi_disks[i].device)
1223         {
1224             i = sd_init_onedisk(i);
1225             if (MODULE_FLAG
1226                 && !rscsi_disks[i].has_part_table) {
1227                 sd_sizes[i << 4] = rscsi_disks[i].capacity;
1228                 revalidate_scsidisk(MKDEV(MAJOR_NR, i << 4), 0);
1229             }
1230             rscsi_disks[i].has_part_table = 1;
1231         }
1232     
1233     /* If our host adapter is capable of scatter-gather, then we increase
1234      * the read-ahead to 16 blocks (32 sectors).  If not, we use
1235      * a two block (4 sector) read ahead. 
1236      */
1237     if(rscsi_disks[0].device && rscsi_disks[0].device->host->sg_tablesize)
1238         read_ahead[MAJOR_NR] = 120;  /* 120 sector read-ahead */
1239     else
1240         read_ahead[MAJOR_NR] = 4;  /* 4 sector read-ahead */
1241 
1242     return;
1243 }
1244 
1245 static int sd_detect(Scsi_Device * SDp){
     /* [previous][next][first][last][top][bottom][index][help] */
1246     if(SDp->type != TYPE_DISK && SDp->type != TYPE_MOD) return 0;
1247     
1248     printk("Detected scsi disk sd%c at scsi%d, channel %d, id %d, lun %d\n", 
1249            'a'+ (sd_template.dev_noticed++),
1250            SDp->host->host_no, SDp->channel, SDp->id, SDp->lun); 
1251     
1252     return 1;
1253 }
1254 
1255 static int sd_attach(Scsi_Device * SDp){
     /* [previous][next][first][last][top][bottom][index][help] */
1256     Scsi_Disk * dpnt;
1257     int i;
1258     
1259     if(SDp->type != TYPE_DISK && SDp->type != TYPE_MOD) return 0;
1260     
1261     if(sd_template.nr_dev >= sd_template.dev_max) {
1262         SDp->attached--;
1263         return 1;
1264     }
1265     
1266     for(dpnt = rscsi_disks, i=0; i<sd_template.dev_max; i++, dpnt++) 
1267         if(!dpnt->device) break;
1268     
1269     if(i >= sd_template.dev_max) panic ("scsi_devices corrupt (sd)");
1270     
1271     SDp->scsi_request_fn = do_sd_request;
1272     rscsi_disks[i].device = SDp;
1273     rscsi_disks[i].has_part_table = 0;
1274     sd_template.nr_dev++;
1275     sd_gendisk.nr_real++;
1276     return 0;
1277 }
1278 
1279 #define DEVICE_BUSY rscsi_disks[target].device->busy
1280 #define USAGE rscsi_disks[target].device->access_count
1281 #define CAPACITY rscsi_disks[target].capacity
1282 #define MAYBE_REINIT  sd_init_onedisk(target)
1283 #define GENDISK_STRUCT sd_gendisk
1284 
1285 /* This routine is called to flush all partitions and partition tables
1286  * for a changed scsi disk, and then re-read the new partition table.
1287  * If we are revalidating a disk because of a media change, then we
1288  * enter with usage == 0.  If we are using an ioctl, we automatically have
1289  * usage == 1 (we need an open channel to use an ioctl :-), so this
1290  * is our limit.
1291  */
1292 int revalidate_scsidisk(kdev_t dev, int maxusage){
     /* [previous][next][first][last][top][bottom][index][help] */
1293     int target;
1294     struct gendisk * gdev;
1295     unsigned long flags;
1296     int max_p;
1297     int start;
1298     int i;
1299     
1300     target =  DEVICE_NR(dev);
1301     gdev = &GENDISK_STRUCT;
1302     
1303     save_flags(flags);
1304     cli();
1305     if (DEVICE_BUSY || USAGE > maxusage) {
1306         restore_flags(flags);
1307         printk("Device busy for revalidation (usage=%d)\n", USAGE);
1308         return -EBUSY;
1309     };
1310     DEVICE_BUSY = 1;
1311     restore_flags(flags);
1312     
1313     max_p = gdev->max_p;
1314     start = target << gdev->minor_shift;
1315     
1316     for (i=max_p - 1; i >=0 ; i--) {
1317         int minor = start+i;
1318         kdev_t devi = MKDEV(MAJOR_NR, minor);
1319         sync_dev(devi);
1320         invalidate_inodes(devi);
1321         invalidate_buffers(devi);
1322         gdev->part[minor].start_sect = 0;
1323         gdev->part[minor].nr_sects = 0;
1324         /*
1325          * Reset the blocksize for everything so that we can read
1326          * the partition table.
1327          */
1328         blksize_size[MAJOR_NR][i] = 1024;
1329     };
1330     
1331 #ifdef MAYBE_REINIT
1332     MAYBE_REINIT;
1333 #endif
1334     
1335     gdev->part[start].nr_sects = CAPACITY;
1336     resetup_one_dev(gdev, target);
1337     
1338     DEVICE_BUSY = 0;
1339     return 0;
1340 }
1341 
1342 static int fop_revalidate_scsidisk(kdev_t dev){
     /* [previous][next][first][last][top][bottom][index][help] */
1343     return revalidate_scsidisk(dev, 0);
1344 }
1345 
1346 
1347 static void sd_detach(Scsi_Device * SDp)
     /* [previous][next][first][last][top][bottom][index][help] */
1348 {
1349     Scsi_Disk * dpnt;
1350     int i;
1351     int max_p;
1352     int start;
1353     
1354     for(dpnt = rscsi_disks, i=0; i<sd_template.dev_max; i++, dpnt++) 
1355         if(dpnt->device == SDp) {
1356             
1357             /* If we are disconnecting a disk driver, sync and invalidate 
1358              * everything */
1359             max_p = sd_gendisk.max_p;
1360             start = i << sd_gendisk.minor_shift;
1361             
1362             for (i=max_p - 1; i >=0 ; i--) {
1363                 int minor = start+i;
1364                 kdev_t devi = MKDEV(MAJOR_NR, minor);
1365                 sync_dev(devi);
1366                 invalidate_inodes(devi);
1367                 invalidate_buffers(devi);
1368                 sd_gendisk.part[minor].start_sect = 0;
1369                 sd_gendisk.part[minor].nr_sects = 0;
1370                 sd_sizes[minor] = 0;
1371             };
1372             
1373             dpnt->has_part_table = 0;
1374             dpnt->device = NULL;
1375             dpnt->capacity = 0;
1376             SDp->attached--;
1377             sd_template.dev_noticed--;
1378             sd_template.nr_dev--;
1379             sd_gendisk.nr_real--;
1380             return;
1381         }
1382     return;
1383 }
1384 
1385 #ifdef MODULE
1386 #include <linux/module.h>
1387 #include <linux/version.h>
1388 
1389 char kernel_version[] = UTS_RELEASE;
1390 
1391 int init_module(void) {
     /* [previous][next][first][last][top][bottom][index][help] */
1392     sd_template.usage_count = &mod_use_count_;
1393     return scsi_register_module(MODULE_SCSI_DEV, &sd_template);
1394 }
1395 
1396 void cleanup_module( void) 
     /* [previous][next][first][last][top][bottom][index][help] */
1397 {
1398     struct gendisk * prev_sdgd;
1399     struct gendisk * sdgd;
1400     
1401     if (MOD_IN_USE) {
1402         printk(KERN_INFO __FILE__ ": module is in use, remove rejected\n");
1403         return;
1404     }
1405     scsi_unregister_module(MODULE_SCSI_DEV, &sd_template);
1406     unregister_blkdev(SCSI_DISK_MAJOR, "sd");
1407     sd_registered--;
1408     if( rscsi_disks != NULL )
1409     {
1410         scsi_init_free((char *) rscsi_disks,
1411                        (sd_template.dev_noticed + SD_EXTRA_DEVS) 
1412                        * sizeof(Scsi_Disk));
1413         
1414         scsi_init_free((char *) sd_sizes, sd_template.dev_max * sizeof(int));
1415         scsi_init_free((char *) sd_blocksizes, sd_template.dev_max * sizeof(int));
1416         scsi_init_free((char *) sd_hardsizes, sd_template.dev_max * sizeof(int));
1417         scsi_init_free((char *) sd, 
1418                        (sd_template.dev_max << 4) * sizeof(struct hd_struct));
1419         /*
1420          * Now remove sd_gendisk from the linked list
1421          */
1422         sdgd = gendisk_head;
1423         prev_sdgd = NULL;
1424         while(sdgd != &sd_gendisk)
1425         {
1426             prev_sdgd = sdgd;
1427             sdgd = sdgd->next;
1428         }
1429         
1430         if(sdgd != &sd_gendisk)
1431             printk("sd_gendisk not in disk chain.\n");
1432         else {
1433             if(prev_sdgd != NULL)
1434                 prev_sdgd->next = sdgd->next;
1435             else
1436                 gendisk_head = sdgd->next;
1437         }
1438     }
1439     
1440     blksize_size[MAJOR_NR] = NULL;
1441     blk_dev[MAJOR_NR].request_fn = NULL;
1442     blk_size[MAJOR_NR] = NULL;  
1443     hardsect_size[MAJOR_NR] = NULL;
1444     read_ahead[MAJOR_NR] = 0;
1445     sd_template.dev_max = 0;
1446 }
1447 #endif /* MODULE */
1448 
1449 /*
1450  * Overrides for Emacs so that we almost follow Linus's tabbing style.
1451  * Emacs will notice this stuff at the end of the file and automatically
1452  * adjust the settings for this buffer only.  This must remain at the end
1453  * of the file.
1454  * ---------------------------------------------------------------------------
1455  * Local variables:
1456  * c-indent-level: 4
1457  * c-brace-imaginary-offset: 0
1458  * c-brace-offset: -4
1459  * c-argdecl-indent: 4
1460  * c-label-offset: -4
1461  * c-continued-statement-offset: 4
1462  * c-continued-brace-offset: 0
1463  * indent-tabs-mode: nil
1464  * tab-width: 8
1465  * End:
1466  */

/* [previous][next][first][last][top][bottom][index][help] */