root/drivers/scsi/sd.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. sd_open
  2. sd_release
  3. sd_geninit
  4. rw_intr
  5. do_sd_request
  6. requeue_sd_request
  7. check_scsidisk_media_change
  8. sd_init_done
  9. sd_init_onedisk
  10. sd_init
  11. sd_finish
  12. sd_detect
  13. sd_attach
  14. revalidate_scsidisk
  15. fop_revalidate_scsidisk

   1 /*
   2  *      sd.c Copyright (C) 1992 Drew Eckhardt 
   3  *           Copyright (C) 1993, 1994 Eric Youngdale
   4  *      Linux scsi disk driver by
   5  *              Drew Eckhardt 
   6  *
   7  *      <drew@colorado.edu>
   8  *
   9  *       Modified by Eric Youngdale ericy@cais.com to
  10  *       add scatter-gather, multiple outstanding request, and other
  11  *       enhancements.
  12  */
  13 
  14 #include <linux/fs.h>
  15 #include <linux/kernel.h>
  16 #include <linux/sched.h>
  17 #include <linux/string.h>
  18 #include <linux/errno.h>
  19 #include <asm/system.h>
  20 
  21 #define MAJOR_NR SCSI_DISK_MAJOR
  22 #include "../block/blk.h"
  23 #include "scsi.h"
  24 #include "hosts.h"
  25 #include "sd.h"
  26 #include "scsi_ioctl.h"
  27 #include "constants.h"
  28 
  29 #include <linux/genhd.h>
  30 
  31 /*
  32 static const char RCSid[] = "$Header:";
  33 */
  34 
  35 #define MAX_RETRIES 5
  36 
  37 /*
  38  *      Time out in seconds for disks and Magneto-opticals (which are slower).
  39  */
  40 
  41 #define SD_TIMEOUT 600
  42 #define SD_MOD_TIMEOUT 750
  43 
  44 #define CLUSTERABLE_DEVICE(SC) (SC->host->hostt->use_clustering && \
  45                             SC->device->type != TYPE_MOD)
  46 
  47 struct hd_struct * sd;
  48 
  49 Scsi_Disk * rscsi_disks;
  50 static int * sd_sizes;
  51 static int * sd_blocksizes;
  52 
  53 extern int sd_ioctl(struct inode *, struct file *, unsigned int, unsigned long);
  54 
  55 static int check_scsidisk_media_change(dev_t);
  56 static int fop_revalidate_scsidisk(dev_t);
  57 
  58 static sd_init_onedisk(int);
  59 
  60 static void requeue_sd_request (Scsi_Cmnd * SCpnt);
  61 
  62 static void sd_init(void);
  63 static void sd_finish(void);
  64 static void sd_attach(Scsi_Device *);
  65 static int sd_detect(Scsi_Device *);
  66 
  67 struct Scsi_Device_Template sd_template = {NULL, "disk", "sd", TYPE_DISK, 
  68                                              SCSI_DISK_MAJOR, 0, 0, 0, 1,
  69                                              sd_detect, sd_init,
  70                                              sd_finish, sd_attach, NULL};
  71 
  72 static int sd_open(struct inode * inode, struct file * filp)
     /* [previous][next][first][last][top][bottom][index][help] */
  73 {
  74         int target;
  75         target =  DEVICE_NR(MINOR(inode->i_rdev));
  76 
  77         if(target >= sd_template.dev_max || !rscsi_disks[target].device)
  78           return -ENXIO;   /* No such device */
  79         
  80 /* Make sure that only one process can do a check_change_disk at one time.
  81  This is also used to lock out further access when the partition table is being re-read. */
  82 
  83         while (rscsi_disks[target].device->busy);
  84 
  85         if(rscsi_disks[target].device->removable) {
  86           check_disk_change(inode->i_rdev);
  87 
  88           if(!rscsi_disks[target].device->access_count)
  89             sd_ioctl(inode, NULL, SCSI_IOCTL_DOORLOCK, 0);
  90         };
  91         rscsi_disks[target].device->access_count++;
  92         return 0;
  93 }
  94 
  95 static void sd_release(struct inode * inode, struct file * file)
     /* [previous][next][first][last][top][bottom][index][help] */
  96 {
  97         int target;
  98         sync_dev(inode->i_rdev);
  99 
 100         target =  DEVICE_NR(MINOR(inode->i_rdev));
 101 
 102         rscsi_disks[target].device->access_count--;
 103 
 104         if(rscsi_disks[target].device->removable) {
 105           if(!rscsi_disks[target].device->access_count)
 106             sd_ioctl(inode, NULL, SCSI_IOCTL_DOORUNLOCK, 0);
 107         };
 108 }
 109 
 110 static void sd_geninit(void);
 111 
 112 static struct file_operations sd_fops = {
 113         NULL,                   /* lseek - default */
 114         block_read,             /* read - general block-dev read */
 115         block_write,            /* write - general block-dev write */
 116         NULL,                   /* readdir - bad */
 117         NULL,                   /* select */
 118         sd_ioctl,               /* ioctl */
 119         NULL,                   /* mmap */
 120         sd_open,                /* open code */
 121         sd_release,             /* release */
 122         block_fsync,            /* fsync */
 123         NULL,                   /* fasync */
 124         check_scsidisk_media_change,  /* Disk change */
 125         fop_revalidate_scsidisk     /* revalidate */
 126 };
 127 
 128 static struct gendisk sd_gendisk = {
 129         MAJOR_NR,               /* Major number */
 130         "sd",           /* Major name */
 131         4,              /* Bits to shift to get real from partition */
 132         1 << 4,         /* Number of partitions per real */
 133         0,              /* maximum number of real */
 134         sd_geninit,     /* init function */
 135         NULL,           /* hd struct */
 136         NULL,   /* block sizes */
 137         0,              /* number */
 138         NULL,   /* internal */
 139         NULL            /* next */
 140 };
 141 
 142 static void sd_geninit (void)
     /* [previous][next][first][last][top][bottom][index][help] */
 143 {
 144         int i;
 145 
 146         for (i = 0; i < sd_template.dev_max; ++i)
 147           if(rscsi_disks[i].device) 
 148             sd[i << 4].nr_sects = rscsi_disks[i].capacity;
 149         sd_gendisk.nr_real = sd_template.dev_max;
 150 }
 151 
 152 /*
 153         rw_intr is the interrupt routine for the device driver.  It will
 154         be notified on the end of a SCSI read / write, and
 155         will take on of several actions based on success or failure.
 156 */
 157 
 158 static void rw_intr (Scsi_Cmnd *SCpnt)
     /* [previous][next][first][last][top][bottom][index][help] */
 159 {
 160   int result = SCpnt->result;
 161   int this_count = SCpnt->bufflen >> 9;
 162 
 163 #ifdef DEBUG
 164   printk("sd%c : rw_intr(%d, %d)\n", 'a' + MINOR(SCpnt->request.dev), SCpnt->host->host_no, result);
 165 #endif
 166 
 167 /*
 168   First case : we assume that the command succeeded.  One of two things will
 169   happen here.  Either we will be finished, or there will be more
 170   sectors that we were unable to read last time.
 171 */
 172 
 173   if (!result) {
 174 
 175 #ifdef DEBUG
 176     printk("sd%c : %d sectors remain.\n", 'a' + MINOR(SCpnt->request.dev), SCpnt->request.nr_sectors);
 177     printk("use_sg is %d\n ",SCpnt->use_sg);
 178 #endif
 179     if (SCpnt->use_sg) {
 180       struct scatterlist * sgpnt;
 181       int i;
 182       sgpnt = (struct scatterlist *) SCpnt->buffer;
 183       for(i=0; i<SCpnt->use_sg; i++) {
 184 #ifdef DEBUG
 185         printk(":%x %x %d\n",sgpnt[i].alt_address, sgpnt[i].address, sgpnt[i].length);
 186 #endif
 187         if (sgpnt[i].alt_address) {
 188           if (SCpnt->request.cmd == READ)
 189             memcpy(sgpnt[i].alt_address, sgpnt[i].address, sgpnt[i].length);
 190           scsi_free(sgpnt[i].address, sgpnt[i].length);
 191         };
 192       };
 193       scsi_free(SCpnt->buffer, SCpnt->sglist_len);  /* Free list of scatter-gather pointers */
 194     } else {
 195       if (SCpnt->buffer != SCpnt->request.buffer) {
 196 #ifdef DEBUG
 197         printk("nosg: %x %x %d\n",SCpnt->request.buffer, SCpnt->buffer,
 198                    SCpnt->bufflen);
 199 #endif  
 200           if (SCpnt->request.cmd == READ)
 201             memcpy(SCpnt->request.buffer, SCpnt->buffer,
 202                    SCpnt->bufflen);
 203           scsi_free(SCpnt->buffer, SCpnt->bufflen);
 204       };
 205     };
 206 /*
 207  *      If multiple sectors are requested in one buffer, then
 208  *      they will have been finished off by the first command.  If
 209  *      not, then we have a multi-buffer command.
 210  */
 211     if (SCpnt->request.nr_sectors > this_count)
 212       {
 213         SCpnt->request.errors = 0;
 214         
 215         if (!SCpnt->request.bh)
 216           {
 217 #ifdef DEBUG
 218             printk("sd%c : handling page request, no buffer\n",
 219                    'a' + MINOR(SCpnt->request.dev));
 220 #endif
 221 /*
 222   The SCpnt->request.nr_sectors field is always done in 512 byte sectors,
 223   even if this really isn't the case.
 224 */
 225             panic("sd.c: linked page request (%lx %x)",
 226                   SCpnt->request.sector, this_count);
 227           }
 228       }
 229     SCpnt = end_scsi_request(SCpnt, 1, this_count);
 230     requeue_sd_request(SCpnt);
 231     return;
 232   }
 233 
 234 /* Free up any indirection buffers we allocated for DMA purposes. */
 235     if (SCpnt->use_sg) {
 236       struct scatterlist * sgpnt;
 237       int i;
 238       sgpnt = (struct scatterlist *) SCpnt->buffer;
 239       for(i=0; i<SCpnt->use_sg; i++) {
 240 #ifdef DEBUG
 241         printk("err: %x %x %d\n",SCpnt->request.buffer, SCpnt->buffer,
 242                    SCpnt->bufflen);
 243 #endif
 244         if (sgpnt[i].alt_address) {
 245           scsi_free(sgpnt[i].address, sgpnt[i].length);
 246         };
 247       };
 248       scsi_free(SCpnt->buffer, SCpnt->sglist_len);  /* Free list of scatter-gather pointers */
 249     } else {
 250 #ifdef DEBUG
 251       printk("nosgerr: %x %x %d\n",SCpnt->request.buffer, SCpnt->buffer,
 252                    SCpnt->bufflen);
 253 #endif
 254       if (SCpnt->buffer != SCpnt->request.buffer)
 255         scsi_free(SCpnt->buffer, SCpnt->bufflen);
 256     };
 257 
 258 /*
 259         Now, if we were good little boys and girls, Santa left us a request
 260         sense buffer.  We can extract information from this, so we
 261         can choose a block to remap, etc.
 262 */
 263 
 264         if (driver_byte(result) != 0) {
 265           if (suggestion(result) == SUGGEST_REMAP) {
 266 #ifdef REMAP
 267 /*
 268         Not yet implemented.  A read will fail after being remapped,
 269         a write will call the strategy routine again.
 270 */
 271             if rscsi_disks[DEVICE_NR(SCpnt->request.dev)].remap
 272               {
 273                 result = 0;
 274               }
 275             else
 276               
 277 #endif
 278             }
 279 
 280           if ((SCpnt->sense_buffer[0] & 0x7f) == 0x70) {
 281             if ((SCpnt->sense_buffer[2] & 0xf) == UNIT_ATTENTION) {
 282               if(rscsi_disks[DEVICE_NR(SCpnt->request.dev)].device->removable) {
 283               /* detected disc change.  set a bit and quietly refuse    */
 284               /* further access.                                        */
 285               
 286                 rscsi_disks[DEVICE_NR(SCpnt->request.dev)].device->changed = 1;
 287                 SCpnt = end_scsi_request(SCpnt, 0, this_count);
 288                 requeue_sd_request(SCpnt);
 289                 return;
 290               }
 291             }
 292           }
 293           
 294 
 295 /*      If we had an ILLEGAL REQUEST returned, then we may have
 296 performed an unsupported command.  The only thing this should be would
 297 be a ten byte read where only a six byte read was supported.  Also,
 298 on a system where READ CAPACITY failed, we have have read past the end
 299 of the  disk. 
 300 */
 301 
 302           if (SCpnt->sense_buffer[2] == ILLEGAL_REQUEST) {
 303             if (rscsi_disks[DEVICE_NR(SCpnt->request.dev)].ten) {
 304               rscsi_disks[DEVICE_NR(SCpnt->request.dev)].ten = 0;
 305               requeue_sd_request(SCpnt);
 306               result = 0;
 307             } else {
 308             }
 309           }
 310         }  /* driver byte != 0 */
 311         if (result) {
 312                 printk("SCSI disk error : host %d id %d lun %d return code = %x\n",
 313                        rscsi_disks[DEVICE_NR(SCpnt->request.dev)].device->host->host_no,
 314                        rscsi_disks[DEVICE_NR(SCpnt->request.dev)].device->id,
 315                        rscsi_disks[DEVICE_NR(SCpnt->request.dev)].device->lun, result);
 316 
 317                 if (driver_byte(result) & DRIVER_SENSE)
 318                         print_sense("sd", SCpnt);
 319                 SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.current_nr_sectors);
 320                 requeue_sd_request(SCpnt);
 321                 return;
 322         }
 323 }
 324 
 325 /*
 326         requeue_sd_request() is the request handler function for the sd driver.
 327         Its function in life is to take block device requests, and translate
 328         them to SCSI commands.
 329 */
 330 
 331 static void do_sd_request (void)
     /* [previous][next][first][last][top][bottom][index][help] */
 332 {
 333   Scsi_Cmnd * SCpnt = NULL;
 334   struct request * req = NULL;
 335   int flag = 0;
 336   while (1==1){
 337     cli();
 338     if (CURRENT != NULL && CURRENT->dev == -1) {
 339       sti();
 340       return;
 341     };
 342 
 343     INIT_SCSI_REQUEST;
 344 
 345 
 346 /* We have to be careful here.  allocate_device will get a free pointer, but
 347    there is no guarantee that it is queueable.  In normal usage, we want to
 348    call this, because other types of devices may have the host all tied up,
 349    and we want to make sure that we have at least one request pending for this
 350    type of device.   We can also come through here while servicing an
 351    interrupt, because of the need to start another command.  If we call
 352    allocate_device more than once, then the system can wedge if the command
 353    is not queueable.  The request_queueable function is safe because it checks
 354    to make sure that the host is able to take another command before it returns
 355    a pointer.  */
 356 
 357     if (flag++ == 0)
 358       SCpnt = allocate_device(&CURRENT,
 359                               rscsi_disks[DEVICE_NR(MINOR(CURRENT->dev))].device, 0); 
 360     else SCpnt = NULL;
 361     sti();
 362 
 363 /* This is a performance enhancement.  We dig down into the request list and
 364    try and find a queueable request (i.e. device not busy, and host able to
 365    accept another command.  If we find one, then we queue it. This can
 366    make a big difference on systems with more than one disk drive.  We want
 367    to have the interrupts off when monkeying with the request list, because
 368    otherwise the kernel might try and slip in a request in between somewhere. */
 369 
 370     if (!SCpnt && sd_template.nr_dev > 1){
 371       struct request *req1;
 372       req1 = NULL;
 373       cli();
 374       req = CURRENT;
 375       while(req){
 376         SCpnt = request_queueable(req,
 377                                   rscsi_disks[DEVICE_NR(MINOR(req->dev))].device);
 378         if(SCpnt) break;
 379         req1 = req;
 380         req = req->next;
 381       };
 382       if (SCpnt && req->dev == -1) {
 383         if (req == CURRENT) 
 384           CURRENT = CURRENT->next;
 385         else
 386           req1->next = req->next;
 387       };
 388       sti();
 389     };
 390     
 391     if (!SCpnt) return; /* Could not find anything to do */
 392         
 393     /* Queue command */
 394     requeue_sd_request(SCpnt);
 395   };  /* While */
 396 }    
 397 
 398 static void requeue_sd_request (Scsi_Cmnd * SCpnt)
     /* [previous][next][first][last][top][bottom][index][help] */
 399 {
 400         int dev, block, this_count;
 401         unsigned char cmd[10];
 402         int bounce_size, contiguous;
 403         int max_sg;
 404         struct buffer_head * bh, *bhp;
 405         char * buff, *bounce_buffer;
 406 
 407 repeat:
 408 
 409         if(!SCpnt || SCpnt->request.dev <= 0) {
 410           do_sd_request();
 411           return;
 412         }
 413 
 414         dev =  MINOR(SCpnt->request.dev);
 415         block = SCpnt->request.sector;
 416         this_count = 0;
 417 
 418 #ifdef DEBUG
 419         printk("Doing sd request, dev = %d, block = %d\n", dev, block);
 420 #endif
 421 
 422         if (dev >= (sd_template.dev_max << 4) || 
 423             !rscsi_disks[DEVICE_NR(dev)].device ||
 424             block + SCpnt->request.nr_sectors > sd[dev].nr_sects)
 425                 {
 426                 SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
 427                 goto repeat;
 428                 }
 429 
 430         block += sd[dev].start_sect;
 431         dev = DEVICE_NR(dev);
 432 
 433         if (rscsi_disks[dev].device->changed)
 434                 {
 435 /*
 436  * quietly refuse to do anything to a changed disc until the changed bit has been reset
 437  */
 438                 /* printk("SCSI disk has been changed.  Prohibiting further I/O.\n");   */
 439                 SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
 440                 goto repeat;
 441                 }
 442 
 443 #ifdef DEBUG
 444         printk("sd%c : real dev = /dev/sd%c, block = %d\n", 'a' + MINOR(SCpnt->request.dev), dev, block);
 445 #endif
 446 
 447         /*
 448          * If we have a 1K hardware sectorsize, prevent access to single
 449          * 512 byte sectors.  In theory we could handle this - in fact
 450          * the scsi cdrom driver must be able to handle this because
 451          * we typically use 1K blocksizes, and cdroms typically have
 452          * 2K hardware sectorsizes.  Of course, things are simpler
 453          * with the cdrom, since it is read-only.  For performance
 454          * reasons, the filesystems should be able to handle this
 455          * and not force the scsi disk driver to use bounce buffers
 456          * for this.
 457          */
 458         if (rscsi_disks[dev].sector_size == 1024)
 459           if((block & 1) || (SCpnt->request.nr_sectors & 1)) {
 460                 printk("sd.c:Bad block number requested");
 461                 SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
 462                 goto repeat;
 463         }
 464         
 465         switch (SCpnt->request.cmd)
 466                 {
 467                 case WRITE :
 468                         if (!rscsi_disks[dev].device->writeable)
 469                                 {
 470                                 SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
 471                                 goto repeat;
 472                                 }
 473                         cmd[0] = WRITE_6;
 474                         break;
 475                 case READ :
 476                         cmd[0] = READ_6;
 477                         break;
 478                 default :
 479                         panic ("Unknown sd command %d\n", SCpnt->request.cmd);
 480                       }
 481 
 482         SCpnt->this_count = 0;
 483 
 484         /* If the host adapter can deal with very large scatter-gather
 485            requests, it is a waste of time to cluster */
 486         contiguous = (!CLUSTERABLE_DEVICE(SCpnt) ? 0 :1);
 487         bounce_buffer = NULL;
 488         bounce_size = (SCpnt->request.nr_sectors << 9);
 489 
 490         /* First see if we need a bounce buffer for this request.  If we do, make sure
 491            that we can allocate a buffer.  Do not waste space by allocating a bounce
 492            buffer if we are straddling the 16Mb line */
 493 
 494         
 495         if (contiguous && SCpnt->request.bh &&
 496             ((int) SCpnt->request.bh->b_data) + (SCpnt->request.nr_sectors << 9) - 1 > 
 497             ISA_DMA_THRESHOLD && SCpnt->host->unchecked_isa_dma) {
 498           if(((int) SCpnt->request.bh->b_data) > ISA_DMA_THRESHOLD)
 499             bounce_buffer = (char *) scsi_malloc(bounce_size);
 500           if(!bounce_buffer) contiguous = 0;
 501         };
 502 
 503         if(contiguous && SCpnt->request.bh && SCpnt->request.bh->b_reqnext)
 504           for(bh = SCpnt->request.bh, bhp = bh->b_reqnext; bhp; bh = bhp, 
 505               bhp = bhp->b_reqnext) {
 506             if(!CONTIGUOUS_BUFFERS(bh,bhp)) { 
 507               if(bounce_buffer) scsi_free(bounce_buffer, bounce_size);
 508               contiguous = 0;
 509               break;
 510             } 
 511           };
 512         if (!SCpnt->request.bh || contiguous) {
 513 
 514           /* case of page request (i.e. raw device), or unlinked buffer */
 515           this_count = SCpnt->request.nr_sectors;
 516           buff = SCpnt->request.buffer;
 517           SCpnt->use_sg = 0;
 518 
 519         } else if (SCpnt->host->sg_tablesize == 0 ||
 520                    (need_isa_buffer && 
 521                     dma_free_sectors <= 10)) {
 522 
 523           /* Case of host adapter that cannot scatter-gather.  We also
 524            come here if we are running low on DMA buffer memory.  We set
 525            a threshold higher than that we would need for this request so
 526            we leave room for other requests.  Even though we would not need
 527            it all, we need to be conservative, because if we run low enough
 528            we have no choice but to panic. */
 529 
 530           if (SCpnt->host->sg_tablesize != 0 &&
 531               need_isa_buffer && 
 532               dma_free_sectors <= 10)
 533             printk("Warning: SCSI DMA buffer space running low.  Using non scatter-gather I/O.\n");
 534 
 535           this_count = SCpnt->request.current_nr_sectors;
 536           buff = SCpnt->request.buffer;
 537           SCpnt->use_sg = 0;
 538 
 539         } else {
 540 
 541           /* Scatter-gather capable host adapter */
 542           struct scatterlist * sgpnt;
 543           int count, this_count_max;
 544           int counted;
 545 
 546           bh = SCpnt->request.bh;
 547           this_count = 0;
 548           this_count_max = (rscsi_disks[dev].ten ? 0xffff : 0xff);
 549           count = 0;
 550           bhp = NULL;
 551           while(bh) {
 552             if ((this_count + (bh->b_size >> 9)) > this_count_max) break;
 553             if(!bhp || !CONTIGUOUS_BUFFERS(bhp,bh) ||
 554                !CLUSTERABLE_DEVICE(SCpnt) ||
 555                (SCpnt->host->unchecked_isa_dma &&
 556                ((unsigned int) bh->b_data-1) == ISA_DMA_THRESHOLD)) {
 557               if (count < SCpnt->host->sg_tablesize) count++;
 558               else break;
 559             };
 560             this_count += (bh->b_size >> 9);
 561             bhp = bh;
 562             bh = bh->b_reqnext;
 563           };
 564 #if 0
 565           if(SCpnt->host->unchecked_isa_dma &&
 566              ((unsigned int) SCpnt->request.bh->b_data-1) == ISA_DMA_THRESHOLD) count--;
 567 #endif
 568           SCpnt->use_sg = count;  /* Number of chains */
 569           count = 512;/* scsi_malloc can only allocate in chunks of 512 bytes*/
 570           while( count < (SCpnt->use_sg * sizeof(struct scatterlist))) 
 571             count = count << 1;
 572           SCpnt->sglist_len = count;
 573           max_sg = count / sizeof(struct scatterlist);
 574           if(SCpnt->host->sg_tablesize < max_sg) max_sg = SCpnt->host->sg_tablesize;
 575           sgpnt = (struct scatterlist * ) scsi_malloc(count);
 576           memset(sgpnt, 0, count);  /* Zero so it is easy to fill */
 577           if (!sgpnt) {
 578             printk("Warning - running *really* short on DMA buffers\n");
 579             SCpnt->use_sg = 0;  /* No memory left - bail out */
 580             this_count = SCpnt->request.current_nr_sectors;
 581             buff = SCpnt->request.buffer;
 582           } else {
 583             buff = (char *) sgpnt;
 584             counted = 0;
 585             for(count = 0, bh = SCpnt->request.bh, bhp = bh->b_reqnext;
 586                 count < SCpnt->use_sg && bh; 
 587                 count++, bh = bhp) {
 588 
 589               bhp = bh->b_reqnext;
 590 
 591               if(!sgpnt[count].address) sgpnt[count].address = bh->b_data;
 592               sgpnt[count].length += bh->b_size;
 593               counted += bh->b_size >> 9;
 594 
 595               if (((int) sgpnt[count].address) + sgpnt[count].length - 1 > 
 596                   ISA_DMA_THRESHOLD && (SCpnt->host->unchecked_isa_dma) &&
 597                   !sgpnt[count].alt_address) {
 598                 sgpnt[count].alt_address = sgpnt[count].address;
 599                 /* We try and avoid exhausting the DMA pool, since it is easier
 600                    to control usage here.  In other places we might have a more
 601                    pressing need, and we would be screwed if we ran out */
 602                 if(dma_free_sectors < (sgpnt[count].length >> 9) + 10) {
 603                   sgpnt[count].address = NULL;
 604                 } else {
 605                   sgpnt[count].address = (char *) scsi_malloc(sgpnt[count].length);
 606                 };
 607 /* If we start running low on DMA buffers, we abort the scatter-gather
 608    operation, and free all of the memory we have allocated.  We want to
 609    ensure that all scsi operations are able to do at least a non-scatter/gather
 610    operation */
 611                 if(sgpnt[count].address == NULL){ /* Out of dma memory */
 612 #if 0
 613                   printk("Warning: Running low on SCSI DMA buffers");
 614                   /* Try switching back to a non scatter-gather operation. */
 615                   while(--count >= 0){
 616                     if(sgpnt[count].alt_address) 
 617                       scsi_free(sgpnt[count].address, sgpnt[count].length);
 618                   };
 619                   this_count = SCpnt->request.current_nr_sectors;
 620                   buff = SCpnt->request.buffer;
 621                   SCpnt->use_sg = 0;
 622                   scsi_free(sgpnt, SCpnt->sglist_len);
 623 #endif
 624                   SCpnt->use_sg = count;
 625                   this_count = counted -= bh->b_size >> 9;
 626                   break;
 627                 };
 628 
 629               };
 630 
 631               /* Only cluster buffers if we know that we can supply DMA buffers
 632                  large enough to satisfy the request.  Do not cluster a new
 633                  request if this would mean that we suddenly need to start
 634                  using DMA bounce buffers */
 635               if(bhp && CONTIGUOUS_BUFFERS(bh,bhp) && CLUSTERABLE_DEVICE(SCpnt)) {
 636                 char * tmp;
 637 
 638                 if (((int) sgpnt[count].address) + sgpnt[count].length +
 639                     bhp->b_size - 1 > ISA_DMA_THRESHOLD && 
 640                     (SCpnt->host->unchecked_isa_dma) &&
 641                     !sgpnt[count].alt_address) continue;
 642 
 643                 if(!sgpnt[count].alt_address) {count--; continue; }
 644                 if(dma_free_sectors > 10)
 645                   tmp = (char *) scsi_malloc(sgpnt[count].length + bhp->b_size);
 646                 else {
 647                   tmp = NULL;
 648                   max_sg = SCpnt->use_sg;
 649                 };
 650                 if(tmp){
 651                   scsi_free(sgpnt[count].address, sgpnt[count].length);
 652                   sgpnt[count].address = tmp;
 653                   count--;
 654                   continue;
 655                 };
 656 
 657                 /* If we are allowed another sg chain, then increment counter so we
 658                    can insert it.  Otherwise we will end up truncating */
 659 
 660                 if (SCpnt->use_sg < max_sg) SCpnt->use_sg++;
 661               };  /* contiguous buffers */
 662             }; /* for loop */
 663 
 664             this_count = counted; /* This is actually how many we are going to transfer */
 665 
 666             if(count < SCpnt->use_sg || SCpnt->use_sg > SCpnt->host->sg_tablesize){
 667               bh = SCpnt->request.bh;
 668               printk("Use sg, count %d %x %d\n", SCpnt->use_sg, count, dma_free_sectors);
 669               printk("maxsg = %x, counted = %d this_count = %d\n", max_sg, counted, this_count);
 670               while(bh){
 671                 printk("[%p %lx] ", bh->b_data, bh->b_size);
 672                 bh = bh->b_reqnext;
 673               };
 674               if(SCpnt->use_sg < 16)
 675                 for(count=0; count<SCpnt->use_sg; count++)
 676                   printk("{%d:%p %p %d}  ", count,
 677                          sgpnt[count].address,
 678                          sgpnt[count].alt_address,
 679                          sgpnt[count].length);
 680               panic("Ooops");
 681             };
 682 
 683             if (SCpnt->request.cmd == WRITE)
 684               for(count=0; count<SCpnt->use_sg; count++)
 685                 if(sgpnt[count].alt_address)
 686                   memcpy(sgpnt[count].address, sgpnt[count].alt_address, 
 687                          sgpnt[count].length);
 688           };  /* Able to malloc sgpnt */
 689         };  /* Host adapter capable of scatter-gather */
 690 
 691 /* Now handle the possibility of DMA to addresses > 16Mb */
 692 
 693         if(SCpnt->use_sg == 0){
 694           if (((int) buff) + (this_count << 9) - 1 > ISA_DMA_THRESHOLD && 
 695             (SCpnt->host->unchecked_isa_dma)) {
 696             if(bounce_buffer)
 697               buff = bounce_buffer;
 698             else
 699               buff = (char *) scsi_malloc(this_count << 9);
 700             if(buff == NULL) {  /* Try backing off a bit if we are low on mem*/
 701               this_count = SCpnt->request.current_nr_sectors;
 702               buff = (char *) scsi_malloc(this_count << 9);
 703               if(!buff) panic("Ran out of DMA buffers.");
 704             };
 705             if (SCpnt->request.cmd == WRITE)
 706               memcpy(buff, (char *)SCpnt->request.buffer, this_count << 9);
 707           };
 708         };
 709 #ifdef DEBUG
 710         printk("sd%c : %s %d/%d 512 byte blocks.\n", 'a' + MINOR(SCpnt->request.dev),
 711                 (SCpnt->request.cmd == WRITE) ? "writing" : "reading",
 712                 this_count, SCpnt->request.nr_sectors);
 713 #endif
 714 
 715         cmd[1] = (SCpnt->lun << 5) & 0xe0;
 716 
 717         if (rscsi_disks[dev].sector_size == 1024){
 718           if(block & 1) panic("sd.c:Bad block number requested");
 719           if(this_count & 1) panic("sd.c:Bad block number requested");
 720           block = block >> 1;
 721           this_count = this_count >> 1;
 722         };
 723 
 724         if (rscsi_disks[dev].sector_size == 256){
 725           block = block << 1;
 726           this_count = this_count << 1;
 727         };
 728 
 729         if (((this_count > 0xff) ||  (block > 0x1fffff)) && rscsi_disks[dev].ten)
 730                 {
 731                 if (this_count > 0xffff)
 732                         this_count = 0xffff;
 733 
 734                 cmd[0] += READ_10 - READ_6 ;
 735                 cmd[2] = (unsigned char) (block >> 24) & 0xff;
 736                 cmd[3] = (unsigned char) (block >> 16) & 0xff;
 737                 cmd[4] = (unsigned char) (block >> 8) & 0xff;
 738                 cmd[5] = (unsigned char) block & 0xff;
 739                 cmd[6] = cmd[9] = 0;
 740                 cmd[7] = (unsigned char) (this_count >> 8) & 0xff;
 741                 cmd[8] = (unsigned char) this_count & 0xff;
 742                 }
 743         else
 744                 {
 745                 if (this_count > 0xff)
 746                         this_count = 0xff;
 747 
 748                 cmd[1] |= (unsigned char) ((block >> 16) & 0x1f);
 749                 cmd[2] = (unsigned char) ((block >> 8) & 0xff);
 750                 cmd[3] = (unsigned char) block & 0xff;
 751                 cmd[4] = (unsigned char) this_count;
 752                 cmd[5] = 0;
 753                 }
 754 
 755 /*
 756  * We shouldn't disconnect in the middle of a sector, so with a dumb 
 757  * host adapter, it's safe to assume that we can at least transfer 
 758  * this many bytes between each connect / disconnect.  
 759  */
 760 
 761         SCpnt->transfersize = rscsi_disks[dev].sector_size;
 762         SCpnt->underflow = this_count << 9; 
 763         scsi_do_cmd (SCpnt, (void *) cmd, buff, 
 764                      this_count * rscsi_disks[dev].sector_size,
 765                      rw_intr, 
 766                      (SCpnt->device->type == TYPE_DISK ? 
 767                                      SD_TIMEOUT : SD_MOD_TIMEOUT),
 768                      MAX_RETRIES);
 769 }
 770 
 771 static int check_scsidisk_media_change(dev_t full_dev){
     /* [previous][next][first][last][top][bottom][index][help] */
 772         int retval;
 773         int target;
 774         struct inode inode;
 775         int flag = 0;
 776 
 777         target =  DEVICE_NR(MINOR(full_dev));
 778 
 779         if (target >= sd_template.dev_max ||
 780             !rscsi_disks[target].device) {
 781                 printk("SCSI disk request error: invalid device.\n");
 782                 return 0;
 783         };
 784 
 785         if(!rscsi_disks[target].device->removable) return 0;
 786 
 787         inode.i_rdev = full_dev;  /* This is all we really need here */
 788         retval = sd_ioctl(&inode, NULL, SCSI_IOCTL_TEST_UNIT_READY, 0);
 789 
 790         if(retval){ /* Unable to test, unit probably not ready.  This usually
 791                      means there is no disc in the drive.  Mark as changed,
 792                      and we will figure it out later once the drive is
 793                      available again.  */
 794 
 795           rscsi_disks[target].device->changed = 1;
 796           return 1; /* This will force a flush, if called from
 797                        check_disk_change */
 798         };
 799 
 800         retval = rscsi_disks[target].device->changed;
 801         if(!flag) rscsi_disks[target].device->changed = 0;
 802         return retval;
 803 }
 804 
 805 static void sd_init_done (Scsi_Cmnd * SCpnt)
     /* [previous][next][first][last][top][bottom][index][help] */
 806 {
 807   struct request * req;
 808   
 809   req = &SCpnt->request;
 810   req->dev = 0xfffe; /* Busy, but indicate request done */
 811   
 812   if (req->sem != NULL) {
 813     up(req->sem);
 814   }
 815 }
 816 
 817 static int sd_init_onedisk(int i)
     /* [previous][next][first][last][top][bottom][index][help] */
 818 {
 819   unsigned char cmd[10];
 820   unsigned char *buffer;
 821   char spintime;
 822   int the_result, retries;
 823   Scsi_Cmnd * SCpnt;
 824 
 825   /* We need to retry the READ_CAPACITY because a UNIT_ATTENTION is considered
 826      a fatal error, and many devices report such an error just after a scsi
 827      bus reset. */
 828 
 829   SCpnt = allocate_device(NULL, rscsi_disks[i].device, 1);
 830   buffer = (unsigned char *) scsi_malloc(512);
 831 
 832   spintime = 0;
 833 
 834   /* Spin up drives, as required.  Only do this at boot time */
 835   if (current == task[0]){
 836     do{
 837       cmd[0] = TEST_UNIT_READY;
 838       cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0;
 839       memset ((void *) &cmd[2], 0, 8);
 840       SCpnt->request.dev = 0xffff;  /* Mark as really busy again */
 841       SCpnt->cmd_len = 0;
 842       SCpnt->sense_buffer[0] = 0;
 843       SCpnt->sense_buffer[2] = 0;
 844       
 845       scsi_do_cmd (SCpnt,
 846                    (void *) cmd, (void *) buffer,
 847                    512, sd_init_done,  SD_TIMEOUT,
 848                    MAX_RETRIES);
 849       
 850       while(SCpnt->request.dev != 0xfffe);
 851       
 852       the_result = SCpnt->result;
 853       
 854       /* Look for non-removable devices that return NOT_READY.  Issue command
 855          to spin up drive for these cases. */
 856       if(the_result && !rscsi_disks[i].device->removable && 
 857          SCpnt->sense_buffer[2] == NOT_READY) {
 858         int time1;
 859         if(!spintime){
 860           printk( "sd%c: Spinning up disk...", 'a' + i );
 861           cmd[0] = START_STOP;
 862           cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0;
 863           cmd[1] |= 1;  /* Return immediately */
 864           memset ((void *) &cmd[2], 0, 8);
 865           cmd[4] = 1; /* Start spin cycle */
 866           SCpnt->request.dev = 0xffff;  /* Mark as really busy again */
 867           SCpnt->cmd_len = 0;
 868           SCpnt->sense_buffer[0] = 0;
 869           SCpnt->sense_buffer[2] = 0;
 870           
 871           scsi_do_cmd (SCpnt,
 872                        (void *) cmd, (void *) buffer,
 873                        512, sd_init_done,  SD_TIMEOUT,
 874                        MAX_RETRIES);
 875           
 876           while(SCpnt->request.dev != 0xfffe);
 877 
 878           spintime = jiffies;
 879         };
 880 
 881         time1 = jiffies;
 882         while(jiffies < time1 + HZ); /* Wait 1 second for next try */
 883         printk( "." );
 884       };
 885     } while(the_result && spintime && spintime+5000 > jiffies);
 886     if (spintime) {
 887        if (the_result)
 888            printk( "not responding...\n" );
 889        else
 890            printk( "ready\n" );
 891     }
 892   };  /* current == task[0] */
 893 
 894 
 895   retries = 3;
 896   do {
 897     cmd[0] = READ_CAPACITY;
 898     cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0;
 899     memset ((void *) &cmd[2], 0, 8);
 900     memset ((void *) buffer, 0, 8);
 901     SCpnt->request.dev = 0xffff;  /* Mark as really busy again */
 902     SCpnt->cmd_len = 0;
 903     SCpnt->sense_buffer[0] = 0;
 904     SCpnt->sense_buffer[2] = 0;
 905     
 906     scsi_do_cmd (SCpnt,
 907                  (void *) cmd, (void *) buffer,
 908                  8, sd_init_done,  SD_TIMEOUT,
 909                  MAX_RETRIES);
 910     
 911     if (current == task[0])
 912       while(SCpnt->request.dev != 0xfffe);
 913     else
 914       if (SCpnt->request.dev != 0xfffe){
 915         struct semaphore sem = MUTEX_LOCKED;
 916         SCpnt->request.sem = &sem;
 917         down(&sem);
 918         /* Hmm.. Have to ask about this one.. */
 919         while (SCpnt->request.dev != 0xfffe) schedule();
 920       };
 921     
 922     the_result = SCpnt->result;
 923     retries--;
 924 
 925   } while(the_result && retries);
 926 
 927   SCpnt->request.dev = -1;  /* Mark as not busy */
 928 
 929   wake_up(&SCpnt->device->device_wait); 
 930 
 931   /* Wake up a process waiting for device*/
 932 
 933   /*
 934    *    The SCSI standard says "READ CAPACITY is necessary for self configuring software"
 935    *    While not mandatory, support of READ CAPACITY is strongly encouraged.
 936    *    We used to die if we couldn't successfully do a READ CAPACITY.
 937    *    But, now we go on about our way.  The side effects of this are
 938    *
 939    *    1.  We can't know block size with certainty.  I have said "512 bytes is it"
 940    *            as this is most common.
 941    *
 942    *    2.  Recovery from when some one attempts to read past the end of the raw device will
 943    *        be slower.
 944    */
 945 
 946   if (the_result)
 947     {
 948       printk ("sd%c : READ CAPACITY failed.\n"
 949               "sd%c : status = %x, message = %02x, host = %d, driver = %02x \n",
 950               'a' + i, 'a' + i,
 951               status_byte(the_result),
 952               msg_byte(the_result),
 953               host_byte(the_result),
 954               driver_byte(the_result)
 955               );
 956       if (driver_byte(the_result)  & DRIVER_SENSE)
 957         printk("sd%c : extended sense code = %1x \n", 'a' + i, SCpnt->sense_buffer[2] & 0xf);
 958       else
 959         printk("sd%c : sense not available. \n", 'a' + i);
 960 
 961       printk("sd%c : block size assumed to be 512 bytes, disk size 1GB.  \n", 'a' + i);
 962       rscsi_disks[i].capacity = 0x1fffff;
 963       rscsi_disks[i].sector_size = 512;
 964 
 965       /* Set dirty bit for removable devices if not ready - sometimes drives
 966          will not report this properly. */
 967       if(rscsi_disks[i].device->removable && 
 968          SCpnt->sense_buffer[2] == NOT_READY)
 969         rscsi_disks[i].device->changed = 1;
 970 
 971     }
 972   else
 973     {
 974       rscsi_disks[i].capacity = (buffer[0] << 24) |
 975         (buffer[1] << 16) |
 976           (buffer[2] << 8) |
 977             buffer[3];
 978 
 979       rscsi_disks[i].sector_size = (buffer[4] << 24) |
 980         (buffer[5] << 16) | (buffer[6] << 8) | buffer[7];
 981 
 982       if (rscsi_disks[i].sector_size != 512 &&
 983           rscsi_disks[i].sector_size != 1024 &&
 984           rscsi_disks[i].sector_size != 256)
 985         {
 986           printk ("sd%c : unsupported sector size %d.\n",
 987                   'a' + i, rscsi_disks[i].sector_size);
 988           if(rscsi_disks[i].device->removable){
 989             rscsi_disks[i].capacity = 0;
 990           } else {
 991             printk ("scsi : deleting disk entry.\n");
 992             rscsi_disks[i].device = NULL;
 993             sd_template.nr_dev--;
 994             return i;
 995           };
 996         }
 997       if(rscsi_disks[i].sector_size == 1024)
 998         rscsi_disks[i].capacity <<= 1;  /* Change this into 512 byte sectors */
 999       if(rscsi_disks[i].sector_size == 256)
1000         rscsi_disks[i].capacity >>= 1;  /* Change this into 512 byte sectors */
1001     }
1002 
1003   rscsi_disks[i].ten = 1;
1004   rscsi_disks[i].remap = 1;
1005   scsi_free(buffer, 512);
1006   return i;
1007 }
1008 
1009 /*
1010         The sd_init() function looks at all SCSI drives present, determines
1011         their size, and reads partition table entries for them.
1012 */
1013 
1014 
1015 static void sd_init()
     /* [previous][next][first][last][top][bottom][index][help] */
1016 {
1017         int i;
1018         static int sd_registered = 0;
1019 
1020         if (sd_template.dev_noticed == 0) return;
1021 
1022         if(!sd_registered) {
1023           if (register_blkdev(MAJOR_NR,"sd",&sd_fops)) {
1024             printk("Unable to get major %d for SCSI disk\n",MAJOR_NR);
1025             return;
1026           }
1027           sd_registered++;
1028         }
1029 
1030         /* We do not support attaching loadable devices yet. */
1031         if(scsi_loadable_module_flag) return;
1032 
1033         sd_template.dev_max = sd_template.dev_noticed;
1034 
1035         rscsi_disks = (Scsi_Disk *) 
1036           scsi_init_malloc(sd_template.dev_max * sizeof(Scsi_Disk));
1037         memset(rscsi_disks, 0, sd_template.dev_max * sizeof(Scsi_Disk));
1038 
1039         sd_sizes = (int *) scsi_init_malloc((sd_template.dev_max << 4) * 
1040                                             sizeof(int));
1041         memset(sd_sizes, 0, (sd_template.dev_max << 4) * sizeof(int));
1042 
1043         sd_blocksizes = (int *) scsi_init_malloc((sd_template.dev_max << 4) * 
1044                                                  sizeof(int));
1045         for(i=0;i<(sd_template.dev_max << 4);i++) sd_blocksizes[i] = 1024;
1046         blksize_size[MAJOR_NR] = sd_blocksizes;
1047 
1048         sd = (struct hd_struct *) scsi_init_malloc((sd_template.dev_max << 4) *
1049                                                    sizeof(struct hd_struct));
1050 
1051 
1052         sd_gendisk.max_nr = sd_template.dev_max;
1053         sd_gendisk.part = sd;
1054         sd_gendisk.sizes = sd_sizes;
1055         sd_gendisk.real_devices = (void *) rscsi_disks;
1056 
1057 }
1058 
1059 static void sd_finish()
     /* [previous][next][first][last][top][bottom][index][help] */
1060 {
1061         int i;
1062 
1063         for (i = 0; i < sd_template.dev_max; ++i)
1064           if (rscsi_disks[i].device) i = sd_init_onedisk(i);
1065 
1066         blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST;
1067 
1068         /* If our host adapter is capable of scatter-gather, then we increase
1069            the read-ahead to 16 blocks (32 sectors).  If not, we use
1070            a two block (4 sector) read ahead. */
1071         if(rscsi_disks[0].device->host->sg_tablesize)
1072           read_ahead[MAJOR_NR] = 120;
1073         /* 64 sector read-ahead */
1074         else
1075           read_ahead[MAJOR_NR] = 4;  /* 4 sector read-ahead */
1076         
1077         sd_gendisk.next = gendisk_head;
1078         gendisk_head = &sd_gendisk;
1079         return;
1080 }
1081 
1082 static int sd_detect(Scsi_Device * SDp){
     /* [previous][next][first][last][top][bottom][index][help] */
1083   /* We do not support attaching loadable devices yet. */
1084   if(scsi_loadable_module_flag) return 0;
1085   if(SDp->type != TYPE_DISK && SDp->type != TYPE_MOD) return 0;
1086 
1087   printk("Detected scsi disk sd%c at scsi%d, id %d, lun %d\n", 
1088          'a'+ (sd_template.dev_noticed++),
1089          SDp->host->host_no , SDp->id, SDp->lun); 
1090 
1091          return 1;
1092 
1093 }
1094 
1095 static void sd_attach(Scsi_Device * SDp){
     /* [previous][next][first][last][top][bottom][index][help] */
1096    Scsi_Disk * dpnt;
1097    int i;
1098 
1099    /* We do not support attaching loadable devices yet. */
1100    if(scsi_loadable_module_flag) return;
1101    if(SDp->type != TYPE_DISK && SDp->type != TYPE_MOD) return;
1102 
1103    if(sd_template.nr_dev >= sd_template.dev_max) 
1104      panic ("scsi_devices corrupt (sd)");
1105 
1106    for(dpnt = rscsi_disks, i=0; i<sd_template.dev_max; i++, dpnt++) 
1107      if(!dpnt->device) break;
1108 
1109    if(i >= sd_template.dev_max) panic ("scsi_devices corrupt (sd)");
1110 
1111    SDp->scsi_request_fn = do_sd_request;
1112    rscsi_disks[i].device = SDp;
1113    sd_template.nr_dev++;
1114 };
1115 
1116 #define DEVICE_BUSY rscsi_disks[target].device->busy
1117 #define USAGE rscsi_disks[target].device->access_count
1118 #define CAPACITY rscsi_disks[target].capacity
1119 #define MAYBE_REINIT  sd_init_onedisk(target)
1120 #define GENDISK_STRUCT sd_gendisk
1121 
1122 /* This routine is called to flush all partitions and partition tables
1123    for a changed scsi disk, and then re-read the new partition table.
1124    If we are revalidating a disk because of a media change, then we
1125    enter with usage == 0.  If we are using an ioctl, we automatically have
1126    usage == 1 (we need an open channel to use an ioctl :-), so this
1127    is our limit.
1128  */
1129 int revalidate_scsidisk(int dev, int maxusage){
     /* [previous][next][first][last][top][bottom][index][help] */
1130           int target, major;
1131           struct gendisk * gdev;
1132           int max_p;
1133           int start;
1134           int i;
1135 
1136           target =  DEVICE_NR(MINOR(dev));
1137           gdev = &GENDISK_STRUCT;
1138 
1139           cli();
1140           if (DEVICE_BUSY || USAGE > maxusage) {
1141             sti();
1142             printk("Device busy for revalidation (usage=%d)\n", USAGE);
1143             return -EBUSY;
1144           };
1145           DEVICE_BUSY = 1;
1146           sti();
1147 
1148           max_p = gdev->max_p;
1149           start = target << gdev->minor_shift;
1150           major = MAJOR_NR << 8;
1151 
1152           for (i=max_p - 1; i >=0 ; i--) {
1153             sync_dev(major | start | i);
1154             invalidate_inodes(major | start | i);
1155             invalidate_buffers(major | start | i);
1156             gdev->part[start+i].start_sect = 0;
1157             gdev->part[start+i].nr_sects = 0;
1158           };
1159 
1160 #ifdef MAYBE_REINIT
1161           MAYBE_REINIT;
1162 #endif
1163 
1164           gdev->part[start].nr_sects = CAPACITY;
1165           resetup_one_dev(gdev, target);
1166 
1167           DEVICE_BUSY = 0;
1168           return 0;
1169 }
1170 
1171 static int fop_revalidate_scsidisk(dev_t dev){
     /* [previous][next][first][last][top][bottom][index][help] */
1172   return revalidate_scsidisk(dev, 0);
1173 }
1174 

/* [previous][next][first][last][top][bottom][index][help] */