root/drivers/scsi/sd.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. sd_open
  2. sd_release
  3. sd_geninit
  4. rw_intr
  5. do_sd_request
  6. requeue_sd_request
  7. check_scsidisk_media_change
  8. sd_init_done
  9. sd_init_onedisk
  10. sd_init
  11. sd_finish
  12. sd_detect
  13. sd_attach
  14. revalidate_scsidisk
  15. fop_revalidate_scsidisk

   1 /*
   2  *      sd.c Copyright (C) 1992 Drew Eckhardt 
   3  *           Copyright (C) 1993, 1994 Eric Youngdale
   4  *      Linux scsi disk driver by
   5  *              Drew Eckhardt 
   6  *
   7  *      <drew@colorado.edu>
   8  *
   9  *       Modified by Eric Youngdale ericy@cais.com to
  10  *       add scatter-gather, multiple outstanding request, and other
  11  *       enhancements.
  12  */
  13 
  14 #include <linux/fs.h>
  15 #include <linux/kernel.h>
  16 #include <linux/sched.h>
  17 #include <linux/string.h>
  18 #include <linux/errno.h>
  19 #include <asm/system.h>
  20 
  21 #define MAJOR_NR SCSI_DISK_MAJOR
  22 #include "../block/blk.h"
  23 #include "scsi.h"
  24 #include "hosts.h"
  25 #include "sd.h"
  26 #include "scsi_ioctl.h"
  27 #include "constants.h"
  28 
  29 #include <linux/genhd.h>
  30 
  31 /*
  32 static const char RCSid[] = "$Header:";
  33 */
  34 
  35 #define MAX_RETRIES 5
  36 
  37 /*
  38  *      Time out in seconds for disks and Magneto-opticals (which are slower).
  39  */
  40 
  41 #define SD_TIMEOUT 600
  42 #define SD_MOD_TIMEOUT 750
  43 
  44 #define CLUSTERABLE_DEVICE(SC) (SC->host->hostt->use_clustering && \
  45                             SC->device->type != TYPE_MOD)
  46 
  47 struct hd_struct * sd;
  48 
  49 Scsi_Disk * rscsi_disks;
  50 static int * sd_sizes;
  51 static int * sd_blocksizes;
  52 
  53 extern int sd_ioctl(struct inode *, struct file *, unsigned int, unsigned long);
  54 
  55 static int check_scsidisk_media_change(dev_t);
  56 static int fop_revalidate_scsidisk(dev_t);
  57 
  58 static sd_init_onedisk(int);
  59 
  60 static void requeue_sd_request (Scsi_Cmnd * SCpnt);
  61 
  62 static void sd_init(void);
  63 static void sd_finish(void);
  64 static void sd_attach(Scsi_Device *);
  65 static int sd_detect(Scsi_Device *);
  66 
  67 struct Scsi_Device_Template sd_template = {NULL, "disk", "sd", TYPE_DISK, 
  68                                              SCSI_DISK_MAJOR, 0, 0, 0, 1,
  69                                              sd_detect, sd_init,
  70                                              sd_finish, sd_attach, NULL};
  71 
  72 static int sd_open(struct inode * inode, struct file * filp)
     /* [previous][next][first][last][top][bottom][index][help] */
  73 {
  74         int target;
  75         target =  DEVICE_NR(MINOR(inode->i_rdev));
  76 
  77         if(target >= sd_template.dev_max || !rscsi_disks[target].device)
  78           return -ENXIO;   /* No such device */
  79         
  80 /* Make sure that only one process can do a check_change_disk at one time.
  81  This is also used to lock out further access when the partition table is being re-read. */
  82 
  83         while (rscsi_disks[target].device->busy);
  84 
  85         if(rscsi_disks[target].device->removable) {
  86           check_disk_change(inode->i_rdev);
  87 
  88           if(!rscsi_disks[target].device->access_count)
  89             sd_ioctl(inode, NULL, SCSI_IOCTL_DOORLOCK, 0);
  90         };
  91         rscsi_disks[target].device->access_count++;
  92         return 0;
  93 }
  94 
  95 static void sd_release(struct inode * inode, struct file * file)
     /* [previous][next][first][last][top][bottom][index][help] */
  96 {
  97         int target;
  98         sync_dev(inode->i_rdev);
  99 
 100         target =  DEVICE_NR(MINOR(inode->i_rdev));
 101 
 102         rscsi_disks[target].device->access_count--;
 103 
 104         if(rscsi_disks[target].device->removable) {
 105           if(!rscsi_disks[target].device->access_count)
 106             sd_ioctl(inode, NULL, SCSI_IOCTL_DOORUNLOCK, 0);
 107         };
 108 }
 109 
 110 static void sd_geninit(void);
 111 
 112 static struct file_operations sd_fops = {
 113         NULL,                   /* lseek - default */
 114         block_read,             /* read - general block-dev read */
 115         block_write,            /* write - general block-dev write */
 116         NULL,                   /* readdir - bad */
 117         NULL,                   /* select */
 118         sd_ioctl,               /* ioctl */
 119         NULL,                   /* mmap */
 120         sd_open,                /* open code */
 121         sd_release,             /* release */
 122         block_fsync,            /* fsync */
 123         NULL,                   /* fasync */
 124         check_scsidisk_media_change,  /* Disk change */
 125         fop_revalidate_scsidisk     /* revalidate */
 126 };
 127 
 128 static struct gendisk sd_gendisk = {
 129         MAJOR_NR,               /* Major number */
 130         "sd",           /* Major name */
 131         4,              /* Bits to shift to get real from partition */
 132         1 << 4,         /* Number of partitions per real */
 133         0,              /* maximum number of real */
 134         sd_geninit,     /* init function */
 135         NULL,           /* hd struct */
 136         NULL,   /* block sizes */
 137         0,              /* number */
 138         NULL,   /* internal */
 139         NULL            /* next */
 140 };
 141 
 142 static void sd_geninit (void)
     /* [previous][next][first][last][top][bottom][index][help] */
 143 {
 144         int i;
 145 
 146         for (i = 0; i < sd_template.dev_max; ++i)
 147           if(rscsi_disks[i].device) 
 148             sd[i << 4].nr_sects = rscsi_disks[i].capacity;
 149         sd_gendisk.nr_real = sd_template.dev_max;
 150 }
 151 
 152 /*
 153         rw_intr is the interrupt routine for the device driver.  It will
 154         be notified on the end of a SCSI read / write, and
 155         will take on of several actions based on success or failure.
 156 */
 157 
 158 static void rw_intr (Scsi_Cmnd *SCpnt)
     /* [previous][next][first][last][top][bottom][index][help] */
 159 {
 160   int result = SCpnt->result;
 161   int this_count = SCpnt->bufflen >> 9;
 162 
 163 #ifdef DEBUG
 164   printk("sd%c : rw_intr(%d, %d)\n", 'a' + MINOR(SCpnt->request.dev), SCpnt->host->host_no, result);
 165 #endif
 166 
 167 /*
 168   First case : we assume that the command succeeded.  One of two things will
 169   happen here.  Either we will be finished, or there will be more
 170   sectors that we were unable to read last time.
 171 */
 172 
 173   if (!result) {
 174 
 175 #ifdef DEBUG
 176     printk("sd%c : %d sectors remain.\n", 'a' + MINOR(SCpnt->request.dev), SCpnt->request.nr_sectors);
 177     printk("use_sg is %d\n ",SCpnt->use_sg);
 178 #endif
 179     if (SCpnt->use_sg) {
 180       struct scatterlist * sgpnt;
 181       int i;
 182       sgpnt = (struct scatterlist *) SCpnt->buffer;
 183       for(i=0; i<SCpnt->use_sg; i++) {
 184 #ifdef DEBUG
 185         printk(":%x %x %d\n",sgpnt[i].alt_address, sgpnt[i].address, sgpnt[i].length);
 186 #endif
 187         if (sgpnt[i].alt_address) {
 188           if (SCpnt->request.cmd == READ)
 189             memcpy(sgpnt[i].alt_address, sgpnt[i].address, sgpnt[i].length);
 190           scsi_free(sgpnt[i].address, sgpnt[i].length);
 191         };
 192       };
 193       scsi_free(SCpnt->buffer, SCpnt->sglist_len);  /* Free list of scatter-gather pointers */
 194     } else {
 195       if (SCpnt->buffer != SCpnt->request.buffer) {
 196 #ifdef DEBUG
 197         printk("nosg: %x %x %d\n",SCpnt->request.buffer, SCpnt->buffer,
 198                    SCpnt->bufflen);
 199 #endif  
 200           if (SCpnt->request.cmd == READ)
 201             memcpy(SCpnt->request.buffer, SCpnt->buffer,
 202                    SCpnt->bufflen);
 203           scsi_free(SCpnt->buffer, SCpnt->bufflen);
 204       };
 205     };
 206 /*
 207  *      If multiple sectors are requested in one buffer, then
 208  *      they will have been finished off by the first command.  If
 209  *      not, then we have a multi-buffer command.
 210  */
 211     if (SCpnt->request.nr_sectors > this_count)
 212       {
 213         SCpnt->request.errors = 0;
 214         
 215         if (!SCpnt->request.bh)
 216           {
 217 #ifdef DEBUG
 218             printk("sd%c : handling page request, no buffer\n",
 219                    'a' + MINOR(SCpnt->request.dev));
 220 #endif
 221 /*
 222   The SCpnt->request.nr_sectors field is always done in 512 byte sectors,
 223   even if this really isn't the case.
 224 */
 225             panic("sd.c: linked page request (%lx %x)",
 226                   SCpnt->request.sector, this_count);
 227           }
 228       }
 229     SCpnt = end_scsi_request(SCpnt, 1, this_count);
 230     requeue_sd_request(SCpnt);
 231     return;
 232   }
 233 
 234 /* Free up any indirection buffers we allocated for DMA purposes. */
 235     if (SCpnt->use_sg) {
 236       struct scatterlist * sgpnt;
 237       int i;
 238       sgpnt = (struct scatterlist *) SCpnt->buffer;
 239       for(i=0; i<SCpnt->use_sg; i++) {
 240 #ifdef DEBUG
 241         printk("err: %x %x %d\n",SCpnt->request.buffer, SCpnt->buffer,
 242                    SCpnt->bufflen);
 243 #endif
 244         if (sgpnt[i].alt_address) {
 245           scsi_free(sgpnt[i].address, sgpnt[i].length);
 246         };
 247       };
 248       scsi_free(SCpnt->buffer, SCpnt->sglist_len);  /* Free list of scatter-gather pointers */
 249     } else {
 250 #ifdef DEBUG
 251       printk("nosgerr: %x %x %d\n",SCpnt->request.buffer, SCpnt->buffer,
 252                    SCpnt->bufflen);
 253 #endif
 254       if (SCpnt->buffer != SCpnt->request.buffer)
 255         scsi_free(SCpnt->buffer, SCpnt->bufflen);
 256     };
 257 
 258 /*
 259         Now, if we were good little boys and girls, Santa left us a request
 260         sense buffer.  We can extract information from this, so we
 261         can choose a block to remap, etc.
 262 */
 263 
 264         if (driver_byte(result) != 0) {
 265           if (suggestion(result) == SUGGEST_REMAP) {
 266 #ifdef REMAP
 267 /*
 268         Not yet implemented.  A read will fail after being remapped,
 269         a write will call the strategy routine again.
 270 */
 271             if rscsi_disks[DEVICE_NR(SCpnt->request.dev)].remap
 272               {
 273                 result = 0;
 274               }
 275             else
 276               
 277 #endif
 278             }
 279 
 280           if ((SCpnt->sense_buffer[0] & 0x7f) == 0x70) {
 281             if ((SCpnt->sense_buffer[2] & 0xf) == UNIT_ATTENTION) {
 282               if(rscsi_disks[DEVICE_NR(SCpnt->request.dev)].device->removable) {
 283               /* detected disc change.  set a bit and quietly refuse    */
 284               /* further access.                                        */
 285               
 286                 rscsi_disks[DEVICE_NR(SCpnt->request.dev)].device->changed = 1;
 287                 SCpnt = end_scsi_request(SCpnt, 0, this_count);
 288                 requeue_sd_request(SCpnt);
 289                 return;
 290               }
 291             }
 292           }
 293           
 294 
 295 /*      If we had an ILLEGAL REQUEST returned, then we may have
 296 performed an unsupported command.  The only thing this should be would
 297 be a ten byte read where only a six byte read was supported.  Also,
 298 on a system where READ CAPACITY failed, we have have read past the end
 299 of the  disk. 
 300 */
 301 
 302           if (SCpnt->sense_buffer[2] == ILLEGAL_REQUEST) {
 303             if (rscsi_disks[DEVICE_NR(SCpnt->request.dev)].ten) {
 304               rscsi_disks[DEVICE_NR(SCpnt->request.dev)].ten = 0;
 305               requeue_sd_request(SCpnt);
 306               result = 0;
 307             } else {
 308             }
 309           }
 310         }  /* driver byte != 0 */
 311         if (result) {
 312                 printk("SCSI disk error : host %d id %d lun %d return code = %x\n",
 313                        rscsi_disks[DEVICE_NR(SCpnt->request.dev)].device->host->host_no,
 314                        rscsi_disks[DEVICE_NR(SCpnt->request.dev)].device->id,
 315                        rscsi_disks[DEVICE_NR(SCpnt->request.dev)].device->lun, result);
 316 
 317                 if (driver_byte(result) & DRIVER_SENSE)
 318                         print_sense("sd", SCpnt);
 319                 SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.current_nr_sectors);
 320                 requeue_sd_request(SCpnt);
 321                 return;
 322         }
 323 }
 324 
 325 /*
 326         requeue_sd_request() is the request handler function for the sd driver.
 327         Its function in life is to take block device requests, and translate
 328         them to SCSI commands.
 329 */
 330 
 331 static void do_sd_request (void)
     /* [previous][next][first][last][top][bottom][index][help] */
 332 {
 333   Scsi_Cmnd * SCpnt = NULL;
 334   struct request * req = NULL;
 335   int flag = 0;
 336   while (1==1){
 337     cli();
 338     if (CURRENT != NULL && CURRENT->dev == -1) {
 339       sti();
 340       return;
 341     };
 342 
 343     INIT_SCSI_REQUEST;
 344 
 345 
 346 /* We have to be careful here.  allocate_device will get a free pointer, but
 347    there is no guarantee that it is queueable.  In normal usage, we want to
 348    call this, because other types of devices may have the host all tied up,
 349    and we want to make sure that we have at least one request pending for this
 350    type of device.   We can also come through here while servicing an
 351    interrupt, because of the need to start another command.  If we call
 352    allocate_device more than once, then the system can wedge if the command
 353    is not queueable.  The request_queueable function is safe because it checks
 354    to make sure that the host is able to take another command before it returns
 355    a pointer.  */
 356 
 357     if (flag++ == 0)
 358       SCpnt = allocate_device(&CURRENT,
 359                               rscsi_disks[DEVICE_NR(MINOR(CURRENT->dev))].device, 0); 
 360     else SCpnt = NULL;
 361     sti();
 362 
 363 /* This is a performance enhancement.  We dig down into the request list and
 364    try and find a queueable request (i.e. device not busy, and host able to
 365    accept another command.  If we find one, then we queue it. This can
 366    make a big difference on systems with more than one disk drive.  We want
 367    to have the interrupts off when monkeying with the request list, because
 368    otherwise the kernel might try and slip in a request in between somewhere. */
 369 
 370     if (!SCpnt && sd_template.nr_dev > 1){
 371       struct request *req1;
 372       req1 = NULL;
 373       cli();
 374       req = CURRENT;
 375       while(req){
 376         SCpnt = request_queueable(req,
 377                                   rscsi_disks[DEVICE_NR(MINOR(req->dev))].device);
 378         if(SCpnt) break;
 379         req1 = req;
 380         req = req->next;
 381       };
 382       if (SCpnt && req->dev == -1) {
 383         if (req == CURRENT) 
 384           CURRENT = CURRENT->next;
 385         else
 386           req1->next = req->next;
 387       };
 388       sti();
 389     };
 390     
 391     if (!SCpnt) return; /* Could not find anything to do */
 392         
 393     /* Queue command */
 394     requeue_sd_request(SCpnt);
 395   };  /* While */
 396 }    
 397 
 398 static void requeue_sd_request (Scsi_Cmnd * SCpnt)
     /* [previous][next][first][last][top][bottom][index][help] */
 399 {
 400         int dev, block, this_count;
 401         unsigned char cmd[10];
 402         int bounce_size, contiguous;
 403         int max_sg;
 404         struct buffer_head * bh, *bhp;
 405         char * buff, *bounce_buffer;
 406 
 407 repeat:
 408 
 409         if(!SCpnt || SCpnt->request.dev <= 0) {
 410           do_sd_request();
 411           return;
 412         }
 413 
 414         dev =  MINOR(SCpnt->request.dev);
 415         block = SCpnt->request.sector;
 416         this_count = 0;
 417 
 418 #ifdef DEBUG
 419         printk("Doing sd request, dev = %d, block = %d\n", dev, block);
 420 #endif
 421 
 422         if (dev >= (sd_template.dev_max << 4) || 
 423             !rscsi_disks[DEVICE_NR(dev)].device ||
 424             block + SCpnt->request.nr_sectors > sd[dev].nr_sects)
 425                 {
 426                 SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
 427                 goto repeat;
 428                 }
 429 
 430         block += sd[dev].start_sect;
 431         dev = DEVICE_NR(dev);
 432 
 433         if (rscsi_disks[dev].device->changed)
 434                 {
 435 /*
 436  * quietly refuse to do anything to a changed disc until the changed bit has been reset
 437  */
 438                 /* printk("SCSI disk has been changed.  Prohibiting further I/O.\n");   */
 439                 SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
 440                 goto repeat;
 441                 }
 442 
 443 #ifdef DEBUG
 444         printk("sd%c : real dev = /dev/sd%c, block = %d\n", 'a' + MINOR(SCpnt->request.dev), dev, block);
 445 #endif
 446 
 447         /*
 448          * If we have a 1K hardware sectorsize, prevent access to single
 449          * 512 byte sectors.  In theory we could handle this - in fact
 450          * the scsi cdrom driver must be able to handle this because
 451          * we typically use 1K blocksizes, and cdroms typically have
 452          * 2K hardware sectorsizes.  Of course, things are simpler
 453          * with the cdrom, since it is read-only.  For performance
 454          * reasons, the filesystems should be able to handle this
 455          * and not force the scsi disk driver to use bounce buffers
 456          * for this.
 457          */
 458         if (rscsi_disks[dev].sector_size == 1024)
 459           if((block & 1) || (SCpnt->request.nr_sectors & 1)) {
 460                 printk("sd.c:Bad block number requested");
 461                 SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
 462                 goto repeat;
 463         }
 464         
 465         switch (SCpnt->request.cmd)
 466                 {
 467                 case WRITE :
 468                         if (!rscsi_disks[dev].device->writeable)
 469                                 {
 470                                 SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
 471                                 goto repeat;
 472                                 }
 473                         cmd[0] = WRITE_6;
 474                         break;
 475                 case READ :
 476                         cmd[0] = READ_6;
 477                         break;
 478                 default :
 479                         panic ("Unknown sd command %d\n", SCpnt->request.cmd);
 480                       }
 481 
 482         SCpnt->this_count = 0;
 483 
 484         /* If the host adapter can deal with very large scatter-gather
 485            requests, it is a waste of time to cluster */
 486         contiguous = (!CLUSTERABLE_DEVICE(SCpnt) ? 0 :1);
 487         bounce_buffer = NULL;
 488         bounce_size = (SCpnt->request.nr_sectors << 9);
 489 
 490         /* First see if we need a bounce buffer for this request.  If we do, make sure
 491            that we can allocate a buffer.  Do not waste space by allocating a bounce
 492            buffer if we are straddling the 16Mb line */
 493 
 494         
 495         if (contiguous && SCpnt->request.bh &&
 496             ((int) SCpnt->request.bh->b_data) + (SCpnt->request.nr_sectors << 9) - 1 > 
 497             ISA_DMA_THRESHOLD && SCpnt->host->unchecked_isa_dma) {
 498           if(((int) SCpnt->request.bh->b_data) > ISA_DMA_THRESHOLD)
 499             bounce_buffer = (char *) scsi_malloc(bounce_size);
 500           if(!bounce_buffer) contiguous = 0;
 501         };
 502 
 503         if(contiguous && SCpnt->request.bh && SCpnt->request.bh->b_reqnext)
 504           for(bh = SCpnt->request.bh, bhp = bh->b_reqnext; bhp; bh = bhp, 
 505               bhp = bhp->b_reqnext) {
 506             if(!CONTIGUOUS_BUFFERS(bh,bhp)) { 
 507               if(bounce_buffer) scsi_free(bounce_buffer, bounce_size);
 508               contiguous = 0;
 509               break;
 510             } 
 511           };
 512         if (!SCpnt->request.bh || contiguous) {
 513 
 514           /* case of page request (i.e. raw device), or unlinked buffer */
 515           this_count = SCpnt->request.nr_sectors;
 516           buff = SCpnt->request.buffer;
 517           SCpnt->use_sg = 0;
 518 
 519         } else if (SCpnt->host->sg_tablesize == 0 ||
 520                    (need_isa_buffer && 
 521                     dma_free_sectors <= 10)) {
 522 
 523           /* Case of host adapter that cannot scatter-gather.  We also
 524            come here if we are running low on DMA buffer memory.  We set
 525            a threshold higher than that we would need for this request so
 526            we leave room for other requests.  Even though we would not need
 527            it all, we need to be conservative, because if we run low enough
 528            we have no choice but to panic. */
 529 
 530           if (SCpnt->host->sg_tablesize != 0 &&
 531               need_isa_buffer && 
 532               dma_free_sectors <= 10)
 533             printk("Warning: SCSI DMA buffer space running low.  Using non scatter-gather I/O.\n");
 534 
 535           this_count = SCpnt->request.current_nr_sectors;
 536           buff = SCpnt->request.buffer;
 537           SCpnt->use_sg = 0;
 538 
 539         } else {
 540 
 541           /* Scatter-gather capable host adapter */
 542           struct scatterlist * sgpnt;
 543           int count, this_count_max;
 544           int counted;
 545 
 546           bh = SCpnt->request.bh;
 547           this_count = 0;
 548           this_count_max = (rscsi_disks[dev].ten ? 0xffff : 0xff);
 549           count = 0;
 550           bhp = NULL;
 551           while(bh) {
 552             if ((this_count + (bh->b_size >> 9)) > this_count_max) break;
 553             if(!bhp || !CONTIGUOUS_BUFFERS(bhp,bh) ||
 554                !CLUSTERABLE_DEVICE(SCpnt) ||
 555                (SCpnt->host->unchecked_isa_dma &&
 556                ((unsigned int) bh->b_data-1) == ISA_DMA_THRESHOLD)) {
 557               if (count < SCpnt->host->sg_tablesize) count++;
 558               else break;
 559             };
 560             this_count += (bh->b_size >> 9);
 561             bhp = bh;
 562             bh = bh->b_reqnext;
 563           };
 564 #if 0
 565           if(SCpnt->host->unchecked_isa_dma &&
 566              ((unsigned int) SCpnt->request.bh->b_data-1) == ISA_DMA_THRESHOLD) count--;
 567 #endif
 568           SCpnt->use_sg = count;  /* Number of chains */
 569           count = 512;/* scsi_malloc can only allocate in chunks of 512 bytes*/
 570           while( count < (SCpnt->use_sg * sizeof(struct scatterlist))) 
 571             count = count << 1;
 572           SCpnt->sglist_len = count;
 573           max_sg = count / sizeof(struct scatterlist);
 574           if(SCpnt->host->sg_tablesize < max_sg) max_sg = SCpnt->host->sg_tablesize;
 575           sgpnt = (struct scatterlist * ) scsi_malloc(count);
 576           memset(sgpnt, 0, count);  /* Zero so it is easy to fill */
 577           if (!sgpnt) {
 578             printk("Warning - running *really* short on DMA buffers\n");
 579             SCpnt->use_sg = 0;  /* No memory left - bail out */
 580             this_count = SCpnt->request.current_nr_sectors;
 581             buff = SCpnt->request.buffer;
 582           } else {
 583             buff = (char *) sgpnt;
 584             counted = 0;
 585             for(count = 0, bh = SCpnt->request.bh, bhp = bh->b_reqnext;
 586                 count < SCpnt->use_sg && bh; 
 587                 count++, bh = bhp) {
 588 
 589               bhp = bh->b_reqnext;
 590 
 591               if(!sgpnt[count].address) sgpnt[count].address = bh->b_data;
 592               sgpnt[count].length += bh->b_size;
 593               counted += bh->b_size >> 9;
 594 
 595               if (((int) sgpnt[count].address) + sgpnt[count].length - 1 > 
 596                   ISA_DMA_THRESHOLD && (SCpnt->host->unchecked_isa_dma) &&
 597                   !sgpnt[count].alt_address) {
 598                 sgpnt[count].alt_address = sgpnt[count].address;
 599                 /* We try and avoid exhausting the DMA pool, since it is easier
 600                    to control usage here.  In other places we might have a more
 601                    pressing need, and we would be screwed if we ran out */
 602                 if(dma_free_sectors < (sgpnt[count].length >> 9) + 10) {
 603                   sgpnt[count].address = NULL;
 604                 } else {
 605                   sgpnt[count].address = (char *) scsi_malloc(sgpnt[count].length);
 606                 };
 607 /* If we start running low on DMA buffers, we abort the scatter-gather
 608    operation, and free all of the memory we have allocated.  We want to
 609    ensure that all scsi operations are able to do at least a non-scatter/gather
 610    operation */
 611                 if(sgpnt[count].address == NULL){ /* Out of dma memory */
 612 #if 0
 613                   printk("Warning: Running low on SCSI DMA buffers");
 614                   /* Try switching back to a non scatter-gather operation. */
 615                   while(--count >= 0){
 616                     if(sgpnt[count].alt_address) 
 617                       scsi_free(sgpnt[count].address, sgpnt[count].length);
 618                   };
 619                   this_count = SCpnt->request.current_nr_sectors;
 620                   buff = SCpnt->request.buffer;
 621                   SCpnt->use_sg = 0;
 622                   scsi_free(sgpnt, SCpnt->sglist_len);
 623 #endif
 624                   SCpnt->use_sg = count;
 625                   this_count = counted -= bh->b_size >> 9;
 626                   break;
 627                 };
 628 
 629               };
 630 
 631               /* Only cluster buffers if we know that we can supply DMA buffers
 632                  large enough to satisfy the request.  Do not cluster a new
 633                  request if this would mean that we suddenly need to start
 634                  using DMA bounce buffers */
 635               if(bhp && CONTIGUOUS_BUFFERS(bh,bhp) && CLUSTERABLE_DEVICE(SCpnt)) {
 636                 char * tmp;
 637 
 638                 if (((int) sgpnt[count].address) + sgpnt[count].length +
 639                     bhp->b_size - 1 > ISA_DMA_THRESHOLD && 
 640                     (SCpnt->host->unchecked_isa_dma) &&
 641                     !sgpnt[count].alt_address) continue;
 642 
 643                 if(!sgpnt[count].alt_address) {count--; continue; }
 644                 if(dma_free_sectors > 10)
 645                   tmp = (char *) scsi_malloc(sgpnt[count].length + bhp->b_size);
 646                 else {
 647                   tmp = NULL;
 648                   max_sg = SCpnt->use_sg;
 649                 };
 650                 if(tmp){
 651                   scsi_free(sgpnt[count].address, sgpnt[count].length);
 652                   sgpnt[count].address = tmp;
 653                   count--;
 654                   continue;
 655                 };
 656 
 657                 /* If we are allowed another sg chain, then increment counter so we
 658                    can insert it.  Otherwise we will end up truncating */
 659 
 660                 if (SCpnt->use_sg < max_sg) SCpnt->use_sg++;
 661               };  /* contiguous buffers */
 662             }; /* for loop */
 663 
 664             this_count = counted; /* This is actually how many we are going to transfer */
 665 
 666             if(count < SCpnt->use_sg || SCpnt->use_sg > SCpnt->host->sg_tablesize){
 667               bh = SCpnt->request.bh;
 668               printk("Use sg, count %d %x %d\n", SCpnt->use_sg, count, dma_free_sectors);
 669               printk("maxsg = %x, counted = %d this_count = %d\n", max_sg, counted, this_count);
 670               while(bh){
 671                 printk("[%p %lx] ", bh->b_data, bh->b_size);
 672                 bh = bh->b_reqnext;
 673               };
 674               if(SCpnt->use_sg < 16)
 675                 for(count=0; count<SCpnt->use_sg; count++)
 676                   printk("{%d:%p %p %d}  ", count,
 677                          sgpnt[count].address,
 678                          sgpnt[count].alt_address,
 679                          sgpnt[count].length);
 680               panic("Ooops");
 681             };
 682 
 683             if (SCpnt->request.cmd == WRITE)
 684               for(count=0; count<SCpnt->use_sg; count++)
 685                 if(sgpnt[count].alt_address)
 686                   memcpy(sgpnt[count].address, sgpnt[count].alt_address, 
 687                          sgpnt[count].length);
 688           };  /* Able to malloc sgpnt */
 689         };  /* Host adapter capable of scatter-gather */
 690 
 691 /* Now handle the possibility of DMA to addresses > 16Mb */
 692 
 693         if(SCpnt->use_sg == 0){
 694           if (((int) buff) + (this_count << 9) - 1 > ISA_DMA_THRESHOLD && 
 695             (SCpnt->host->unchecked_isa_dma)) {
 696             if(bounce_buffer)
 697               buff = bounce_buffer;
 698             else
 699               buff = (char *) scsi_malloc(this_count << 9);
 700             if(buff == NULL) {  /* Try backing off a bit if we are low on mem*/
 701               this_count = SCpnt->request.current_nr_sectors;
 702               buff = (char *) scsi_malloc(this_count << 9);
 703               if(!buff) panic("Ran out of DMA buffers.");
 704             };
 705             if (SCpnt->request.cmd == WRITE)
 706               memcpy(buff, (char *)SCpnt->request.buffer, this_count << 9);
 707           };
 708         };
 709 #ifdef DEBUG
 710         printk("sd%c : %s %d/%d 512 byte blocks.\n", 'a' + MINOR(SCpnt->request.dev),
 711                 (SCpnt->request.cmd == WRITE) ? "writing" : "reading",
 712                 this_count, SCpnt->request.nr_sectors);
 713 #endif
 714 
 715         cmd[1] = (SCpnt->lun << 5) & 0xe0;
 716 
 717         if (rscsi_disks[dev].sector_size == 1024){
 718           if(block & 1) panic("sd.c:Bad block number requested");
 719           if(this_count & 1) panic("sd.c:Bad block number requested");
 720           block = block >> 1;
 721           this_count = this_count >> 1;
 722         };
 723 
 724         if (rscsi_disks[dev].sector_size == 256){
 725           block = block << 1;
 726           this_count = this_count << 1;
 727         };
 728 
 729         if (((this_count > 0xff) ||  (block > 0x1fffff)) && rscsi_disks[dev].ten)
 730                 {
 731                 if (this_count > 0xffff)
 732                         this_count = 0xffff;
 733 
 734                 cmd[0] += READ_10 - READ_6 ;
 735                 cmd[2] = (unsigned char) (block >> 24) & 0xff;
 736                 cmd[3] = (unsigned char) (block >> 16) & 0xff;
 737                 cmd[4] = (unsigned char) (block >> 8) & 0xff;
 738                 cmd[5] = (unsigned char) block & 0xff;
 739                 cmd[6] = cmd[9] = 0;
 740                 cmd[7] = (unsigned char) (this_count >> 8) & 0xff;
 741                 cmd[8] = (unsigned char) this_count & 0xff;
 742                 }
 743         else
 744                 {
 745                 if (this_count > 0xff)
 746                         this_count = 0xff;
 747 
 748                 cmd[1] |= (unsigned char) ((block >> 16) & 0x1f);
 749                 cmd[2] = (unsigned char) ((block >> 8) & 0xff);
 750                 cmd[3] = (unsigned char) block & 0xff;
 751                 cmd[4] = (unsigned char) this_count;
 752                 cmd[5] = 0;
 753                 }
 754 
 755 /*
 756  * We shouldn't disconnect in the middle of a sector, so with a dumb 
 757  * host adapter, it's safe to assume that we can at least transfer 
 758  * this many bytes between each connect / disconnect.  
 759  */
 760 
 761         SCpnt->transfersize = rscsi_disks[dev].sector_size;
 762         SCpnt->underflow = this_count << 9; 
 763         scsi_do_cmd (SCpnt, (void *) cmd, buff, 
 764                      this_count * rscsi_disks[dev].sector_size,
 765                      rw_intr, 
 766                      (SCpnt->device->type == TYPE_DISK ? 
 767                                      SD_TIMEOUT : SD_MOD_TIMEOUT),
 768                      MAX_RETRIES);
 769 }
 770 
 771 static int check_scsidisk_media_change(dev_t full_dev){
     /* [previous][next][first][last][top][bottom][index][help] */
 772         int retval;
 773         int target;
 774         struct inode inode;
 775         int flag = 0;
 776 
 777         target =  DEVICE_NR(MINOR(full_dev));
 778 
 779         if (target >= sd_template.dev_max ||
 780             !rscsi_disks[target].device) {
 781                 printk("SCSI disk request error: invalid device.\n");
 782                 return 0;
 783         };
 784 
 785         if(!rscsi_disks[target].device->removable) return 0;
 786 
 787         inode.i_rdev = full_dev;  /* This is all we really need here */
 788         retval = sd_ioctl(&inode, NULL, SCSI_IOCTL_TEST_UNIT_READY, 0);
 789 
 790         if(retval){ /* Unable to test, unit probably not ready.  This usually
 791                      means there is no disc in the drive.  Mark as changed,
 792                      and we will figure it out later once the drive is
 793                      available again.  */
 794 
 795           rscsi_disks[target].device->changed = 1;
 796           return 1; /* This will force a flush, if called from
 797                        check_disk_change */
 798         };
 799 
 800         retval = rscsi_disks[target].device->changed;
 801         if(!flag) rscsi_disks[target].device->changed = 0;
 802         return retval;
 803 }
 804 
 805 static void sd_init_done (Scsi_Cmnd * SCpnt)
     /* [previous][next][first][last][top][bottom][index][help] */
 806 {
 807   struct request * req;
 808   
 809   req = &SCpnt->request;
 810   req->dev = 0xfffe; /* Busy, but indicate request done */
 811   
 812   if (req->sem != NULL) {
 813     up(req->sem);
 814   }
 815 }
 816 
 817 static int sd_init_onedisk(int i)
     /* [previous][next][first][last][top][bottom][index][help] */
 818 {
 819   unsigned char cmd[10];
 820   unsigned char *buffer;
 821   char spintime;
 822   int the_result, retries;
 823   Scsi_Cmnd * SCpnt;
 824 
 825   /* We need to retry the READ_CAPACITY because a UNIT_ATTENTION is considered
 826      a fatal error, and many devices report such an error just after a scsi
 827      bus reset. */
 828 
 829   SCpnt = allocate_device(NULL, rscsi_disks[i].device, 1);
 830   buffer = (unsigned char *) scsi_malloc(512);
 831 
 832   spintime = 0;
 833 
 834   /* Spin up drives, as required.  Only do this at boot time */
 835   if (current == task[0]){
 836     do{
 837       cmd[0] = TEST_UNIT_READY;
 838       cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0;
 839       memset ((void *) &cmd[2], 0, 8);
 840       SCpnt->request.dev = 0xffff;  /* Mark as really busy again */
 841       SCpnt->sense_buffer[0] = 0;
 842       SCpnt->sense_buffer[2] = 0;
 843       
 844       scsi_do_cmd (SCpnt,
 845                    (void *) cmd, (void *) buffer,
 846                    512, sd_init_done,  SD_TIMEOUT,
 847                    MAX_RETRIES);
 848       
 849       while(SCpnt->request.dev != 0xfffe);
 850       
 851       the_result = SCpnt->result;
 852       
 853       /* Look for non-removable devices that return NOT_READY.  Issue command
 854          to spin up drive for these cases. */
 855       if(the_result && !rscsi_disks[i].device->removable && 
 856          SCpnt->sense_buffer[2] == NOT_READY) {
 857         int time1;
 858         if(!spintime){
 859           printk( "sd%c: Spinning up disk...", 'a' + i );
 860           cmd[0] = START_STOP;
 861           cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0;
 862           cmd[1] |= 1;  /* Return immediately */
 863           memset ((void *) &cmd[2], 0, 8);
 864           cmd[4] = 1; /* Start spin cycle */
 865           SCpnt->request.dev = 0xffff;  /* Mark as really busy again */
 866           SCpnt->sense_buffer[0] = 0;
 867           SCpnt->sense_buffer[2] = 0;
 868           
 869           scsi_do_cmd (SCpnt,
 870                        (void *) cmd, (void *) buffer,
 871                        512, sd_init_done,  SD_TIMEOUT,
 872                        MAX_RETRIES);
 873           
 874           while(SCpnt->request.dev != 0xfffe);
 875 
 876           spintime = jiffies;
 877         };
 878 
 879         time1 = jiffies;
 880         while(jiffies < time1 + HZ); /* Wait 1 second for next try */
 881         printk( "." );
 882       };
 883     } while(the_result && spintime && spintime+5000 > jiffies);
 884     if (spintime) {
 885        if (the_result)
 886            printk( "not responding...\n" );
 887        else
 888            printk( "ready\n" );
 889     }
 890   };  /* current == task[0] */
 891 
 892 
 893   retries = 3;
 894   do {
 895     cmd[0] = READ_CAPACITY;
 896     cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0;
 897     memset ((void *) &cmd[2], 0, 8);
 898     memset ((void *) buffer, 0, 8);
 899     SCpnt->request.dev = 0xffff;  /* Mark as really busy again */
 900     SCpnt->sense_buffer[0] = 0;
 901     SCpnt->sense_buffer[2] = 0;
 902     
 903     scsi_do_cmd (SCpnt,
 904                  (void *) cmd, (void *) buffer,
 905                  8, sd_init_done,  SD_TIMEOUT,
 906                  MAX_RETRIES);
 907     
 908     if (current == task[0])
 909       while(SCpnt->request.dev != 0xfffe);
 910     else
 911       if (SCpnt->request.dev != 0xfffe){
 912         struct semaphore sem = MUTEX_LOCKED;
 913         SCpnt->request.sem = &sem;
 914         down(&sem);
 915         /* Hmm.. Have to ask about this one.. */
 916         while (SCpnt->request.dev != 0xfffe) schedule();
 917       };
 918     
 919     the_result = SCpnt->result;
 920     retries--;
 921 
 922   } while(the_result && retries);
 923 
 924   SCpnt->request.dev = -1;  /* Mark as not busy */
 925 
 926   wake_up(&SCpnt->device->device_wait); 
 927 
 928   /* Wake up a process waiting for device*/
 929 
 930   /*
 931    *    The SCSI standard says "READ CAPACITY is necessary for self configuring software"
 932    *    While not mandatory, support of READ CAPACITY is strongly encouraged.
 933    *    We used to die if we couldn't successfully do a READ CAPACITY.
 934    *    But, now we go on about our way.  The side effects of this are
 935    *
 936    *    1.  We can't know block size with certainty.  I have said "512 bytes is it"
 937    *            as this is most common.
 938    *
 939    *    2.  Recovery from when some one attempts to read past the end of the raw device will
 940    *        be slower.
 941    */
 942 
 943   if (the_result)
 944     {
 945       printk ("sd%c : READ CAPACITY failed.\n"
 946               "sd%c : status = %x, message = %02x, host = %d, driver = %02x \n",
 947               'a' + i, 'a' + i,
 948               status_byte(the_result),
 949               msg_byte(the_result),
 950               host_byte(the_result),
 951               driver_byte(the_result)
 952               );
 953       if (driver_byte(the_result)  & DRIVER_SENSE)
 954         printk("sd%c : extended sense code = %1x \n", 'a' + i, SCpnt->sense_buffer[2] & 0xf);
 955       else
 956         printk("sd%c : sense not available. \n", 'a' + i);
 957 
 958       printk("sd%c : block size assumed to be 512 bytes, disk size 1GB.  \n", 'a' + i);
 959       rscsi_disks[i].capacity = 0x1fffff;
 960       rscsi_disks[i].sector_size = 512;
 961 
 962       /* Set dirty bit for removable devices if not ready - sometimes drives
 963          will not report this properly. */
 964       if(rscsi_disks[i].device->removable && 
 965          SCpnt->sense_buffer[2] == NOT_READY)
 966         rscsi_disks[i].device->changed = 1;
 967 
 968     }
 969   else
 970     {
 971       rscsi_disks[i].capacity = (buffer[0] << 24) |
 972         (buffer[1] << 16) |
 973           (buffer[2] << 8) |
 974             buffer[3];
 975 
 976       rscsi_disks[i].sector_size = (buffer[4] << 24) |
 977         (buffer[5] << 16) | (buffer[6] << 8) | buffer[7];
 978 
 979       if (rscsi_disks[i].sector_size != 512 &&
 980           rscsi_disks[i].sector_size != 1024 &&
 981           rscsi_disks[i].sector_size != 256)
 982         {
 983           printk ("sd%c : unsupported sector size %d.\n",
 984                   'a' + i, rscsi_disks[i].sector_size);
 985           if(rscsi_disks[i].device->removable){
 986             rscsi_disks[i].capacity = 0;
 987           } else {
 988             printk ("scsi : deleting disk entry.\n");
 989             rscsi_disks[i].device = NULL;
 990             sd_template.nr_dev--;
 991             return i;
 992           };
 993         }
 994       if(rscsi_disks[i].sector_size == 1024)
 995         rscsi_disks[i].capacity <<= 1;  /* Change this into 512 byte sectors */
 996       if(rscsi_disks[i].sector_size == 256)
 997         rscsi_disks[i].capacity >>= 1;  /* Change this into 512 byte sectors */
 998     }
 999 
1000   rscsi_disks[i].ten = 1;
1001   rscsi_disks[i].remap = 1;
1002   scsi_free(buffer, 512);
1003   return i;
1004 }
1005 
1006 /*
1007         The sd_init() function looks at all SCSI drives present, determines
1008         their size, and reads partition table entries for them.
1009 */
1010 
1011 
1012 static void sd_init()
     /* [previous][next][first][last][top][bottom][index][help] */
1013 {
1014         int i;
1015         static int sd_registered = 0;
1016 
1017         if (sd_template.dev_noticed == 0) return;
1018 
1019         if(!sd_registered) {
1020           if (register_blkdev(MAJOR_NR,"sd",&sd_fops)) {
1021             printk("Unable to get major %d for SCSI disk\n",MAJOR_NR);
1022             return;
1023           }
1024           sd_registered++;
1025         }
1026 
1027         /* We do not support attaching loadable devices yet. */
1028         if(scsi_loadable_module_flag) return;
1029 
1030         sd_template.dev_max = sd_template.dev_noticed;
1031 
1032         rscsi_disks = (Scsi_Disk *) 
1033           scsi_init_malloc(sd_template.dev_max * sizeof(Scsi_Disk));
1034         memset(rscsi_disks, 0, sd_template.dev_max * sizeof(Scsi_Disk));
1035 
1036         sd_sizes = (int *) scsi_init_malloc((sd_template.dev_max << 4) * 
1037                                             sizeof(int));
1038         memset(sd_sizes, 0, (sd_template.dev_max << 4) * sizeof(int));
1039 
1040         sd_blocksizes = (int *) scsi_init_malloc((sd_template.dev_max << 4) * 
1041                                                  sizeof(int));
1042         for(i=0;i<(sd_template.dev_max << 4);i++) sd_blocksizes[i] = 1024;
1043         blksize_size[MAJOR_NR] = sd_blocksizes;
1044 
1045         sd = (struct hd_struct *) scsi_init_malloc((sd_template.dev_max << 4) *
1046                                                    sizeof(struct hd_struct));
1047 
1048 
1049         sd_gendisk.max_nr = sd_template.dev_max;
1050         sd_gendisk.part = sd;
1051         sd_gendisk.sizes = sd_sizes;
1052         sd_gendisk.real_devices = (void *) rscsi_disks;
1053 
1054 }
1055 
1056 static void sd_finish()
     /* [previous][next][first][last][top][bottom][index][help] */
1057 {
1058         int i;
1059 
1060         for (i = 0; i < sd_template.dev_max; ++i)
1061           if (rscsi_disks[i].device) i = sd_init_onedisk(i);
1062 
1063         blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST;
1064 
1065         /* If our host adapter is capable of scatter-gather, then we increase
1066            the read-ahead to 16 blocks (32 sectors).  If not, we use
1067            a two block (4 sector) read ahead. */
1068         if(rscsi_disks[0].device->host->sg_tablesize)
1069           read_ahead[MAJOR_NR] = 120;
1070         /* 64 sector read-ahead */
1071         else
1072           read_ahead[MAJOR_NR] = 4;  /* 4 sector read-ahead */
1073         
1074         sd_gendisk.next = gendisk_head;
1075         gendisk_head = &sd_gendisk;
1076         return;
1077 }
1078 
1079 static int sd_detect(Scsi_Device * SDp){
     /* [previous][next][first][last][top][bottom][index][help] */
1080   /* We do not support attaching loadable devices yet. */
1081   if(scsi_loadable_module_flag) return 0;
1082   if(SDp->type != TYPE_DISK && SDp->type != TYPE_MOD) return 0;
1083 
1084   printk("Detected scsi disk sd%c at scsi%d, id %d, lun %d\n", 
1085          'a'+ (sd_template.dev_noticed++),
1086          SDp->host->host_no , SDp->id, SDp->lun); 
1087 
1088          return 1;
1089 
1090 }
1091 
1092 static void sd_attach(Scsi_Device * SDp){
     /* [previous][next][first][last][top][bottom][index][help] */
1093    Scsi_Disk * dpnt;
1094    int i;
1095 
1096    /* We do not support attaching loadable devices yet. */
1097    if(scsi_loadable_module_flag) return;
1098    if(SDp->type != TYPE_DISK && SDp->type != TYPE_MOD) return;
1099 
1100    if(sd_template.nr_dev >= sd_template.dev_max) 
1101      panic ("scsi_devices corrupt (sd)");
1102 
1103    for(dpnt = rscsi_disks, i=0; i<sd_template.dev_max; i++, dpnt++) 
1104      if(!dpnt->device) break;
1105 
1106    if(i >= sd_template.dev_max) panic ("scsi_devices corrupt (sd)");
1107 
1108    SDp->scsi_request_fn = do_sd_request;
1109    rscsi_disks[i].device = SDp;
1110    sd_template.nr_dev++;
1111 };
1112 
1113 #define DEVICE_BUSY rscsi_disks[target].device->busy
1114 #define USAGE rscsi_disks[target].device->access_count
1115 #define CAPACITY rscsi_disks[target].capacity
1116 #define MAYBE_REINIT  sd_init_onedisk(target)
1117 #define GENDISK_STRUCT sd_gendisk
1118 
1119 /* This routine is called to flush all partitions and partition tables
1120    for a changed scsi disk, and then re-read the new partition table.
1121    If we are revalidating a disk because of a media change, then we
1122    enter with usage == 0.  If we are using an ioctl, we automatically have
1123    usage == 1 (we need an open channel to use an ioctl :-), so this
1124    is our limit.
1125  */
1126 int revalidate_scsidisk(int dev, int maxusage){
     /* [previous][next][first][last][top][bottom][index][help] */
1127           int target, major;
1128           struct gendisk * gdev;
1129           int max_p;
1130           int start;
1131           int i;
1132 
1133           target =  DEVICE_NR(MINOR(dev));
1134           gdev = &GENDISK_STRUCT;
1135 
1136           cli();
1137           if (DEVICE_BUSY || USAGE > maxusage) {
1138             sti();
1139             printk("Device busy for revalidation (usage=%d)\n", USAGE);
1140             return -EBUSY;
1141           };
1142           DEVICE_BUSY = 1;
1143           sti();
1144 
1145           max_p = gdev->max_p;
1146           start = target << gdev->minor_shift;
1147           major = MAJOR_NR << 8;
1148 
1149           for (i=max_p - 1; i >=0 ; i--) {
1150             sync_dev(major | start | i);
1151             invalidate_inodes(major | start | i);
1152             invalidate_buffers(major | start | i);
1153             gdev->part[start+i].start_sect = 0;
1154             gdev->part[start+i].nr_sects = 0;
1155           };
1156 
1157 #ifdef MAYBE_REINIT
1158           MAYBE_REINIT;
1159 #endif
1160 
1161           gdev->part[start].nr_sects = CAPACITY;
1162           resetup_one_dev(gdev, target);
1163 
1164           DEVICE_BUSY = 0;
1165           return 0;
1166 }
1167 
1168 static int fop_revalidate_scsidisk(dev_t dev){
     /* [previous][next][first][last][top][bottom][index][help] */
1169   return revalidate_scsidisk(dev, 0);
1170 }
1171 

/* [previous][next][first][last][top][bottom][index][help] */