root/drivers/scsi/sd.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. sd_open
  2. sd_release
  3. sd_geninit
  4. rw_intr
  5. do_sd_request
  6. requeue_sd_request
  7. check_scsidisk_media_change
  8. sd_init_done
  9. sd_init_onedisk
  10. sd_init
  11. sd_finish
  12. sd_detect
  13. sd_attach
  14. revalidate_scsidisk
  15. fop_revalidate_scsidisk

   1 /*
   2  *      sd.c Copyright (C) 1992 Drew Eckhardt 
   3  *           Copyright (C) 1993, 1994 Eric Youngdale
   4  *      Linux scsi disk driver by
   5  *              Drew Eckhardt 
   6  *
   7  *      <drew@colorado.edu>
   8  *
   9  *       Modified by Eric Youngdale ericy@cais.com to
  10  *       add scatter-gather, multiple outstanding request, and other
  11  *       enhancements.
  12  */
  13 
  14 #include <linux/fs.h>
  15 #include <linux/kernel.h>
  16 #include <linux/sched.h>
  17 #include <linux/string.h>
  18 #include <linux/errno.h>
  19 #include <asm/system.h>
  20 
  21 #define MAJOR_NR SCSI_DISK_MAJOR
  22 #include "../block/blk.h"
  23 #include "scsi.h"
  24 #include "hosts.h"
  25 #include "sd.h"
  26 #include "scsi_ioctl.h"
  27 #include "constants.h"
  28 
  29 #include <linux/genhd.h>
  30 
  31 /*
  32 static const char RCSid[] = "$Header:";
  33 */
  34 
  35 #define MAX_RETRIES 5
  36 
  37 /*
  38  *      Time out in seconds for disks and Magneto-opticals (which are slower).
  39  */
  40 
  41 #define SD_TIMEOUT 600
  42 #define SD_MOD_TIMEOUT 750
  43 
  44 #define CLUSTERABLE_DEVICE(SC) (SC->host->hostt->use_clustering && \
  45                             SC->device->type != TYPE_MOD)
  46 
  47 struct hd_struct * sd;
  48 
  49 Scsi_Disk * rscsi_disks;
  50 static int * sd_sizes;
  51 static int * sd_blocksizes;
  52 static int * sd_hardsizes;              /* Hardware sector size */
  53 
  54 extern int sd_ioctl(struct inode *, struct file *, unsigned int, unsigned long);
  55 
  56 static int check_scsidisk_media_change(dev_t);
  57 static int fop_revalidate_scsidisk(dev_t);
  58 
  59 static sd_init_onedisk(int);
  60 
  61 static void requeue_sd_request (Scsi_Cmnd * SCpnt);
  62 
  63 static void sd_init(void);
  64 static void sd_finish(void);
  65 static void sd_attach(Scsi_Device *);
  66 static int sd_detect(Scsi_Device *);
  67 
  68 struct Scsi_Device_Template sd_template = {NULL, "disk", "sd", TYPE_DISK, 
  69                                              SCSI_DISK_MAJOR, 0, 0, 0, 1,
  70                                              sd_detect, sd_init,
  71                                              sd_finish, sd_attach, NULL};
  72 
  73 static int sd_open(struct inode * inode, struct file * filp)
     /* [previous][next][first][last][top][bottom][index][help] */
  74 {
  75         int target;
  76         target =  DEVICE_NR(MINOR(inode->i_rdev));
  77 
  78         if(target >= sd_template.dev_max || !rscsi_disks[target].device)
  79           return -ENXIO;   /* No such device */
  80         
  81 /* Make sure that only one process can do a check_change_disk at one time.
  82  This is also used to lock out further access when the partition table is being re-read. */
  83 
  84         while (rscsi_disks[target].device->busy);
  85 
  86         if(rscsi_disks[target].device->removable) {
  87           check_disk_change(inode->i_rdev);
  88 
  89           if(!rscsi_disks[target].device->access_count)
  90             sd_ioctl(inode, NULL, SCSI_IOCTL_DOORLOCK, 0);
  91         };
  92         rscsi_disks[target].device->access_count++;
  93         return 0;
  94 }
  95 
  96 static void sd_release(struct inode * inode, struct file * file)
     /* [previous][next][first][last][top][bottom][index][help] */
  97 {
  98         int target;
  99         sync_dev(inode->i_rdev);
 100 
 101         target =  DEVICE_NR(MINOR(inode->i_rdev));
 102 
 103         rscsi_disks[target].device->access_count--;
 104 
 105         if(rscsi_disks[target].device->removable) {
 106           if(!rscsi_disks[target].device->access_count)
 107             sd_ioctl(inode, NULL, SCSI_IOCTL_DOORUNLOCK, 0);
 108         };
 109 }
 110 
 111 static void sd_geninit(void);
 112 
 113 static struct file_operations sd_fops = {
 114         NULL,                   /* lseek - default */
 115         block_read,             /* read - general block-dev read */
 116         block_write,            /* write - general block-dev write */
 117         NULL,                   /* readdir - bad */
 118         NULL,                   /* select */
 119         sd_ioctl,               /* ioctl */
 120         NULL,                   /* mmap */
 121         sd_open,                /* open code */
 122         sd_release,             /* release */
 123         block_fsync,            /* fsync */
 124         NULL,                   /* fasync */
 125         check_scsidisk_media_change,  /* Disk change */
 126         fop_revalidate_scsidisk     /* revalidate */
 127 };
 128 
 129 static struct gendisk sd_gendisk = {
 130         MAJOR_NR,               /* Major number */
 131         "sd",           /* Major name */
 132         4,              /* Bits to shift to get real from partition */
 133         1 << 4,         /* Number of partitions per real */
 134         0,              /* maximum number of real */
 135         sd_geninit,     /* init function */
 136         NULL,           /* hd struct */
 137         NULL,   /* block sizes */
 138         0,              /* number */
 139         NULL,   /* internal */
 140         NULL            /* next */
 141 };
 142 
 143 static void sd_geninit (void)
     /* [previous][next][first][last][top][bottom][index][help] */
 144 {
 145         int i;
 146 
 147         for (i = 0; i < sd_template.dev_max; ++i)
 148           if(rscsi_disks[i].device) 
 149             sd[i << 4].nr_sects = rscsi_disks[i].capacity;
 150         sd_gendisk.nr_real = sd_template.dev_max;
 151 }
 152 
 153 /*
 154         rw_intr is the interrupt routine for the device driver.  It will
 155         be notified on the end of a SCSI read / write, and
 156         will take on of several actions based on success or failure.
 157 */
 158 
 159 static void rw_intr (Scsi_Cmnd *SCpnt)
     /* [previous][next][first][last][top][bottom][index][help] */
 160 {
 161   int result = SCpnt->result;
 162   int this_count = SCpnt->bufflen >> 9;
 163 
 164 #ifdef DEBUG
 165   printk("sd%c : rw_intr(%d, %d)\n", 'a' + MINOR(SCpnt->request.dev), SCpnt->host->host_no, result);
 166 #endif
 167 
 168 /*
 169   First case : we assume that the command succeeded.  One of two things will
 170   happen here.  Either we will be finished, or there will be more
 171   sectors that we were unable to read last time.
 172 */
 173 
 174   if (!result) {
 175 
 176 #ifdef DEBUG
 177     printk("sd%c : %d sectors remain.\n", 'a' + MINOR(SCpnt->request.dev), SCpnt->request.nr_sectors);
 178     printk("use_sg is %d\n ",SCpnt->use_sg);
 179 #endif
 180     if (SCpnt->use_sg) {
 181       struct scatterlist * sgpnt;
 182       int i;
 183       sgpnt = (struct scatterlist *) SCpnt->buffer;
 184       for(i=0; i<SCpnt->use_sg; i++) {
 185 #ifdef DEBUG
 186         printk(":%x %x %d\n",sgpnt[i].alt_address, sgpnt[i].address, sgpnt[i].length);
 187 #endif
 188         if (sgpnt[i].alt_address) {
 189           if (SCpnt->request.cmd == READ)
 190             memcpy(sgpnt[i].alt_address, sgpnt[i].address, sgpnt[i].length);
 191           scsi_free(sgpnt[i].address, sgpnt[i].length);
 192         };
 193       };
 194       scsi_free(SCpnt->buffer, SCpnt->sglist_len);  /* Free list of scatter-gather pointers */
 195     } else {
 196       if (SCpnt->buffer != SCpnt->request.buffer) {
 197 #ifdef DEBUG
 198         printk("nosg: %x %x %d\n",SCpnt->request.buffer, SCpnt->buffer,
 199                    SCpnt->bufflen);
 200 #endif  
 201           if (SCpnt->request.cmd == READ)
 202             memcpy(SCpnt->request.buffer, SCpnt->buffer,
 203                    SCpnt->bufflen);
 204           scsi_free(SCpnt->buffer, SCpnt->bufflen);
 205       };
 206     };
 207 /*
 208  *      If multiple sectors are requested in one buffer, then
 209  *      they will have been finished off by the first command.  If
 210  *      not, then we have a multi-buffer command.
 211  */
 212     if (SCpnt->request.nr_sectors > this_count)
 213       {
 214         SCpnt->request.errors = 0;
 215         
 216         if (!SCpnt->request.bh)
 217           {
 218 #ifdef DEBUG
 219             printk("sd%c : handling page request, no buffer\n",
 220                    'a' + MINOR(SCpnt->request.dev));
 221 #endif
 222 /*
 223   The SCpnt->request.nr_sectors field is always done in 512 byte sectors,
 224   even if this really isn't the case.
 225 */
 226             panic("sd.c: linked page request (%lx %x)",
 227                   SCpnt->request.sector, this_count);
 228           }
 229       }
 230     SCpnt = end_scsi_request(SCpnt, 1, this_count);
 231     requeue_sd_request(SCpnt);
 232     return;
 233   }
 234 
 235 /* Free up any indirection buffers we allocated for DMA purposes. */
 236     if (SCpnt->use_sg) {
 237       struct scatterlist * sgpnt;
 238       int i;
 239       sgpnt = (struct scatterlist *) SCpnt->buffer;
 240       for(i=0; i<SCpnt->use_sg; i++) {
 241 #ifdef DEBUG
 242         printk("err: %x %x %d\n",SCpnt->request.buffer, SCpnt->buffer,
 243                    SCpnt->bufflen);
 244 #endif
 245         if (sgpnt[i].alt_address) {
 246           scsi_free(sgpnt[i].address, sgpnt[i].length);
 247         };
 248       };
 249       scsi_free(SCpnt->buffer, SCpnt->sglist_len);  /* Free list of scatter-gather pointers */
 250     } else {
 251 #ifdef DEBUG
 252       printk("nosgerr: %x %x %d\n",SCpnt->request.buffer, SCpnt->buffer,
 253                    SCpnt->bufflen);
 254 #endif
 255       if (SCpnt->buffer != SCpnt->request.buffer)
 256         scsi_free(SCpnt->buffer, SCpnt->bufflen);
 257     };
 258 
 259 /*
 260         Now, if we were good little boys and girls, Santa left us a request
 261         sense buffer.  We can extract information from this, so we
 262         can choose a block to remap, etc.
 263 */
 264 
 265         if (driver_byte(result) != 0) {
 266           if (suggestion(result) == SUGGEST_REMAP) {
 267 #ifdef REMAP
 268 /*
 269         Not yet implemented.  A read will fail after being remapped,
 270         a write will call the strategy routine again.
 271 */
 272             if rscsi_disks[DEVICE_NR(SCpnt->request.dev)].remap
 273               {
 274                 result = 0;
 275               }
 276             else
 277               
 278 #endif
 279             }
 280 
 281           if ((SCpnt->sense_buffer[0] & 0x7f) == 0x70) {
 282             if ((SCpnt->sense_buffer[2] & 0xf) == UNIT_ATTENTION) {
 283               if(rscsi_disks[DEVICE_NR(SCpnt->request.dev)].device->removable) {
 284               /* detected disc change.  set a bit and quietly refuse    */
 285               /* further access.                                        */
 286               
 287                 rscsi_disks[DEVICE_NR(SCpnt->request.dev)].device->changed = 1;
 288                 SCpnt = end_scsi_request(SCpnt, 0, this_count);
 289                 requeue_sd_request(SCpnt);
 290                 return;
 291               }
 292             }
 293           }
 294           
 295 
 296 /*      If we had an ILLEGAL REQUEST returned, then we may have
 297 performed an unsupported command.  The only thing this should be would
 298 be a ten byte read where only a six byte read was supported.  Also,
 299 on a system where READ CAPACITY failed, we have have read past the end
 300 of the  disk. 
 301 */
 302 
 303           if (SCpnt->sense_buffer[2] == ILLEGAL_REQUEST) {
 304             if (rscsi_disks[DEVICE_NR(SCpnt->request.dev)].ten) {
 305               rscsi_disks[DEVICE_NR(SCpnt->request.dev)].ten = 0;
 306               requeue_sd_request(SCpnt);
 307               result = 0;
 308             } else {
 309             }
 310           }
 311         }  /* driver byte != 0 */
 312         if (result) {
 313                 printk("SCSI disk error : host %d id %d lun %d return code = %x\n",
 314                        rscsi_disks[DEVICE_NR(SCpnt->request.dev)].device->host->host_no,
 315                        rscsi_disks[DEVICE_NR(SCpnt->request.dev)].device->id,
 316                        rscsi_disks[DEVICE_NR(SCpnt->request.dev)].device->lun, result);
 317 
 318                 if (driver_byte(result) & DRIVER_SENSE)
 319                         print_sense("sd", SCpnt);
 320                 SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.current_nr_sectors);
 321                 requeue_sd_request(SCpnt);
 322                 return;
 323         }
 324 }
 325 
 326 /*
 327         requeue_sd_request() is the request handler function for the sd driver.
 328         Its function in life is to take block device requests, and translate
 329         them to SCSI commands.
 330 */
 331 
 332 static void do_sd_request (void)
     /* [previous][next][first][last][top][bottom][index][help] */
 333 {
 334   Scsi_Cmnd * SCpnt = NULL;
 335   struct request * req = NULL;
 336   unsigned long flags;
 337   int flag = 0;
 338 
 339   while (1==1){
 340     save_flags(flags);
 341     cli();
 342     if (CURRENT != NULL && CURRENT->dev == -1) {
 343       restore_flags(flags);
 344       return;
 345     };
 346 
 347     INIT_SCSI_REQUEST;
 348 
 349 
 350 /* We have to be careful here.  allocate_device will get a free pointer, but
 351    there is no guarantee that it is queueable.  In normal usage, we want to
 352    call this, because other types of devices may have the host all tied up,
 353    and we want to make sure that we have at least one request pending for this
 354    type of device.   We can also come through here while servicing an
 355    interrupt, because of the need to start another command.  If we call
 356    allocate_device more than once, then the system can wedge if the command
 357    is not queueable.  The request_queueable function is safe because it checks
 358    to make sure that the host is able to take another command before it returns
 359    a pointer.  */
 360 
 361     if (flag++ == 0)
 362       SCpnt = allocate_device(&CURRENT,
 363                               rscsi_disks[DEVICE_NR(MINOR(CURRENT->dev))].device, 0); 
 364     else SCpnt = NULL;
 365     restore_flags(flags);
 366 
 367 /* This is a performance enhancement.  We dig down into the request list and
 368    try and find a queueable request (i.e. device not busy, and host able to
 369    accept another command.  If we find one, then we queue it. This can
 370    make a big difference on systems with more than one disk drive.  We want
 371    to have the interrupts off when monkeying with the request list, because
 372    otherwise the kernel might try and slip in a request in between somewhere. */
 373 
 374     if (!SCpnt && sd_template.nr_dev > 1){
 375       struct request *req1;
 376       req1 = NULL;
 377       save_flags(flags);
 378       cli();
 379       req = CURRENT;
 380       while(req){
 381         SCpnt = request_queueable(req,
 382                                   rscsi_disks[DEVICE_NR(MINOR(req->dev))].device);
 383         if(SCpnt) break;
 384         req1 = req;
 385         req = req->next;
 386       };
 387       if (SCpnt && req->dev == -1) {
 388         if (req == CURRENT) 
 389           CURRENT = CURRENT->next;
 390         else
 391           req1->next = req->next;
 392       };
 393       restore_flags(flags);
 394     };
 395     
 396     if (!SCpnt) return; /* Could not find anything to do */
 397         
 398     /* Queue command */
 399     requeue_sd_request(SCpnt);
 400   };  /* While */
 401 }    
 402 
 403 static void requeue_sd_request (Scsi_Cmnd * SCpnt)
     /* [previous][next][first][last][top][bottom][index][help] */
 404 {
 405         int dev, block, this_count;
 406         unsigned char cmd[10];
 407         int bounce_size, contiguous;
 408         int max_sg;
 409         struct buffer_head * bh, *bhp;
 410         char * buff, *bounce_buffer;
 411 
 412 repeat:
 413 
 414         if(!SCpnt || SCpnt->request.dev <= 0) {
 415           do_sd_request();
 416           return;
 417         }
 418 
 419         dev =  MINOR(SCpnt->request.dev);
 420         block = SCpnt->request.sector;
 421         this_count = 0;
 422 
 423 #ifdef DEBUG
 424         printk("Doing sd request, dev = %d, block = %d\n", dev, block);
 425 #endif
 426 
 427         if (dev >= (sd_template.dev_max << 4) || 
 428             !rscsi_disks[DEVICE_NR(dev)].device ||
 429             block + SCpnt->request.nr_sectors > sd[dev].nr_sects)
 430                 {
 431                 SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
 432                 goto repeat;
 433                 }
 434 
 435         block += sd[dev].start_sect;
 436         dev = DEVICE_NR(dev);
 437 
 438         if (rscsi_disks[dev].device->changed)
 439                 {
 440 /*
 441  * quietly refuse to do anything to a changed disc until the changed bit has been reset
 442  */
 443                 /* printk("SCSI disk has been changed.  Prohibiting further I/O.\n");   */
 444                 SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
 445                 goto repeat;
 446                 }
 447 
 448 #ifdef DEBUG
 449         printk("sd%c : real dev = /dev/sd%c, block = %d\n", 'a' + MINOR(SCpnt->request.dev), dev, block);
 450 #endif
 451 
 452         /*
 453          * If we have a 1K hardware sectorsize, prevent access to single
 454          * 512 byte sectors.  In theory we could handle this - in fact
 455          * the scsi cdrom driver must be able to handle this because
 456          * we typically use 1K blocksizes, and cdroms typically have
 457          * 2K hardware sectorsizes.  Of course, things are simpler
 458          * with the cdrom, since it is read-only.  For performance
 459          * reasons, the filesystems should be able to handle this
 460          * and not force the scsi disk driver to use bounce buffers
 461          * for this.
 462          */
 463         if (rscsi_disks[dev].sector_size == 1024)
 464           if((block & 1) || (SCpnt->request.nr_sectors & 1)) {
 465                 printk("sd.c:Bad block number requested");
 466                 SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
 467                 goto repeat;
 468         }
 469         
 470         switch (SCpnt->request.cmd)
 471                 {
 472                 case WRITE :
 473                         if (!rscsi_disks[dev].device->writeable)
 474                                 {
 475                                 SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
 476                                 goto repeat;
 477                                 }
 478                         cmd[0] = WRITE_6;
 479                         break;
 480                 case READ :
 481                         cmd[0] = READ_6;
 482                         break;
 483                 default :
 484                         panic ("Unknown sd command %d\n", SCpnt->request.cmd);
 485                       }
 486 
 487         SCpnt->this_count = 0;
 488 
 489         /* If the host adapter can deal with very large scatter-gather
 490            requests, it is a waste of time to cluster */
 491         contiguous = (!CLUSTERABLE_DEVICE(SCpnt) ? 0 :1);
 492         bounce_buffer = NULL;
 493         bounce_size = (SCpnt->request.nr_sectors << 9);
 494 
 495         /* First see if we need a bounce buffer for this request.  If we do, make sure
 496            that we can allocate a buffer.  Do not waste space by allocating a bounce
 497            buffer if we are straddling the 16Mb line */
 498 
 499         
 500         if (contiguous && SCpnt->request.bh &&
 501             ((long) SCpnt->request.bh->b_data) + (SCpnt->request.nr_sectors << 9) - 1 > 
 502             ISA_DMA_THRESHOLD && SCpnt->host->unchecked_isa_dma) {
 503           if(((long) SCpnt->request.bh->b_data) > ISA_DMA_THRESHOLD)
 504             bounce_buffer = (char *) scsi_malloc(bounce_size);
 505           if(!bounce_buffer) contiguous = 0;
 506         };
 507 
 508         if(contiguous && SCpnt->request.bh && SCpnt->request.bh->b_reqnext)
 509           for(bh = SCpnt->request.bh, bhp = bh->b_reqnext; bhp; bh = bhp, 
 510               bhp = bhp->b_reqnext) {
 511             if(!CONTIGUOUS_BUFFERS(bh,bhp)) { 
 512               if(bounce_buffer) scsi_free(bounce_buffer, bounce_size);
 513               contiguous = 0;
 514               break;
 515             } 
 516           };
 517         if (!SCpnt->request.bh || contiguous) {
 518 
 519           /* case of page request (i.e. raw device), or unlinked buffer */
 520           this_count = SCpnt->request.nr_sectors;
 521           buff = SCpnt->request.buffer;
 522           SCpnt->use_sg = 0;
 523 
 524         } else if (SCpnt->host->sg_tablesize == 0 ||
 525                    (need_isa_buffer && 
 526                     dma_free_sectors <= 10)) {
 527 
 528           /* Case of host adapter that cannot scatter-gather.  We also
 529            come here if we are running low on DMA buffer memory.  We set
 530            a threshold higher than that we would need for this request so
 531            we leave room for other requests.  Even though we would not need
 532            it all, we need to be conservative, because if we run low enough
 533            we have no choice but to panic. */
 534 
 535           if (SCpnt->host->sg_tablesize != 0 &&
 536               need_isa_buffer && 
 537               dma_free_sectors <= 10)
 538             printk("Warning: SCSI DMA buffer space running low.  Using non scatter-gather I/O.\n");
 539 
 540           this_count = SCpnt->request.current_nr_sectors;
 541           buff = SCpnt->request.buffer;
 542           SCpnt->use_sg = 0;
 543 
 544         } else {
 545 
 546           /* Scatter-gather capable host adapter */
 547           struct scatterlist * sgpnt;
 548           int count, this_count_max;
 549           int counted;
 550 
 551           bh = SCpnt->request.bh;
 552           this_count = 0;
 553           this_count_max = (rscsi_disks[dev].ten ? 0xffff : 0xff);
 554           count = 0;
 555           bhp = NULL;
 556           while(bh) {
 557             if ((this_count + (bh->b_size >> 9)) > this_count_max) break;
 558             if(!bhp || !CONTIGUOUS_BUFFERS(bhp,bh) ||
 559                !CLUSTERABLE_DEVICE(SCpnt) ||
 560                (SCpnt->host->unchecked_isa_dma &&
 561                ((unsigned long) bh->b_data-1) == ISA_DMA_THRESHOLD)) {
 562               if (count < SCpnt->host->sg_tablesize) count++;
 563               else break;
 564             };
 565             this_count += (bh->b_size >> 9);
 566             bhp = bh;
 567             bh = bh->b_reqnext;
 568           };
 569 #if 0
 570           if(SCpnt->host->unchecked_isa_dma &&
 571              ((unsigned int) SCpnt->request.bh->b_data-1) == ISA_DMA_THRESHOLD) count--;
 572 #endif
 573           SCpnt->use_sg = count;  /* Number of chains */
 574           count = 512;/* scsi_malloc can only allocate in chunks of 512 bytes*/
 575           while( count < (SCpnt->use_sg * sizeof(struct scatterlist))) 
 576             count = count << 1;
 577           SCpnt->sglist_len = count;
 578           max_sg = count / sizeof(struct scatterlist);
 579           if(SCpnt->host->sg_tablesize < max_sg) max_sg = SCpnt->host->sg_tablesize;
 580           sgpnt = (struct scatterlist * ) scsi_malloc(count);
 581           memset(sgpnt, 0, count);  /* Zero so it is easy to fill */
 582           if (!sgpnt) {
 583             printk("Warning - running *really* short on DMA buffers\n");
 584             SCpnt->use_sg = 0;  /* No memory left - bail out */
 585             this_count = SCpnt->request.current_nr_sectors;
 586             buff = SCpnt->request.buffer;
 587           } else {
 588             buff = (char *) sgpnt;
 589             counted = 0;
 590             for(count = 0, bh = SCpnt->request.bh, bhp = bh->b_reqnext;
 591                 count < SCpnt->use_sg && bh; 
 592                 count++, bh = bhp) {
 593 
 594               bhp = bh->b_reqnext;
 595 
 596               if(!sgpnt[count].address) sgpnt[count].address = bh->b_data;
 597               sgpnt[count].length += bh->b_size;
 598               counted += bh->b_size >> 9;
 599 
 600               if (((long) sgpnt[count].address) + sgpnt[count].length - 1 > 
 601                   ISA_DMA_THRESHOLD && (SCpnt->host->unchecked_isa_dma) &&
 602                   !sgpnt[count].alt_address) {
 603                 sgpnt[count].alt_address = sgpnt[count].address;
 604                 /* We try and avoid exhausting the DMA pool, since it is easier
 605                    to control usage here.  In other places we might have a more
 606                    pressing need, and we would be screwed if we ran out */
 607                 if(dma_free_sectors < (sgpnt[count].length >> 9) + 10) {
 608                   sgpnt[count].address = NULL;
 609                 } else {
 610                   sgpnt[count].address = (char *) scsi_malloc(sgpnt[count].length);
 611                 };
 612 /* If we start running low on DMA buffers, we abort the scatter-gather
 613    operation, and free all of the memory we have allocated.  We want to
 614    ensure that all scsi operations are able to do at least a non-scatter/gather
 615    operation */
 616                 if(sgpnt[count].address == NULL){ /* Out of dma memory */
 617 #if 0
 618                   printk("Warning: Running low on SCSI DMA buffers");
 619                   /* Try switching back to a non scatter-gather operation. */
 620                   while(--count >= 0){
 621                     if(sgpnt[count].alt_address) 
 622                       scsi_free(sgpnt[count].address, sgpnt[count].length);
 623                   };
 624                   this_count = SCpnt->request.current_nr_sectors;
 625                   buff = SCpnt->request.buffer;
 626                   SCpnt->use_sg = 0;
 627                   scsi_free(sgpnt, SCpnt->sglist_len);
 628 #endif
 629                   SCpnt->use_sg = count;
 630                   this_count = counted -= bh->b_size >> 9;
 631                   break;
 632                 };
 633 
 634               };
 635 
 636               /* Only cluster buffers if we know that we can supply DMA buffers
 637                  large enough to satisfy the request.  Do not cluster a new
 638                  request if this would mean that we suddenly need to start
 639                  using DMA bounce buffers */
 640               if(bhp && CONTIGUOUS_BUFFERS(bh,bhp) && CLUSTERABLE_DEVICE(SCpnt)) {
 641                 char * tmp;
 642 
 643                 if (((long) sgpnt[count].address) + sgpnt[count].length +
 644                     bhp->b_size - 1 > ISA_DMA_THRESHOLD && 
 645                     (SCpnt->host->unchecked_isa_dma) &&
 646                     !sgpnt[count].alt_address) continue;
 647 
 648                 if(!sgpnt[count].alt_address) {count--; continue; }
 649                 if(dma_free_sectors > 10)
 650                   tmp = (char *) scsi_malloc(sgpnt[count].length + bhp->b_size);
 651                 else {
 652                   tmp = NULL;
 653                   max_sg = SCpnt->use_sg;
 654                 };
 655                 if(tmp){
 656                   scsi_free(sgpnt[count].address, sgpnt[count].length);
 657                   sgpnt[count].address = tmp;
 658                   count--;
 659                   continue;
 660                 };
 661 
 662                 /* If we are allowed another sg chain, then increment counter so we
 663                    can insert it.  Otherwise we will end up truncating */
 664 
 665                 if (SCpnt->use_sg < max_sg) SCpnt->use_sg++;
 666               };  /* contiguous buffers */
 667             }; /* for loop */
 668 
 669             this_count = counted; /* This is actually how many we are going to transfer */
 670 
 671             if(count < SCpnt->use_sg || SCpnt->use_sg > SCpnt->host->sg_tablesize){
 672               bh = SCpnt->request.bh;
 673               printk("Use sg, count %d %x %d\n", SCpnt->use_sg, count, dma_free_sectors);
 674               printk("maxsg = %x, counted = %d this_count = %d\n", max_sg, counted, this_count);
 675               while(bh){
 676                 printk("[%p %lx] ", bh->b_data, bh->b_size);
 677                 bh = bh->b_reqnext;
 678               };
 679               if(SCpnt->use_sg < 16)
 680                 for(count=0; count<SCpnt->use_sg; count++)
 681                   printk("{%d:%p %p %d}  ", count,
 682                          sgpnt[count].address,
 683                          sgpnt[count].alt_address,
 684                          sgpnt[count].length);
 685               panic("Ooops");
 686             };
 687 
 688             if (SCpnt->request.cmd == WRITE)
 689               for(count=0; count<SCpnt->use_sg; count++)
 690                 if(sgpnt[count].alt_address)
 691                   memcpy(sgpnt[count].address, sgpnt[count].alt_address, 
 692                          sgpnt[count].length);
 693           };  /* Able to malloc sgpnt */
 694         };  /* Host adapter capable of scatter-gather */
 695 
 696 /* Now handle the possibility of DMA to addresses > 16Mb */
 697 
 698         if(SCpnt->use_sg == 0){
 699           if (((long) buff) + (this_count << 9) - 1 > ISA_DMA_THRESHOLD && 
 700             (SCpnt->host->unchecked_isa_dma)) {
 701             if(bounce_buffer)
 702               buff = bounce_buffer;
 703             else
 704               buff = (char *) scsi_malloc(this_count << 9);
 705             if(buff == NULL) {  /* Try backing off a bit if we are low on mem*/
 706               this_count = SCpnt->request.current_nr_sectors;
 707               buff = (char *) scsi_malloc(this_count << 9);
 708               if(!buff) panic("Ran out of DMA buffers.");
 709             };
 710             if (SCpnt->request.cmd == WRITE)
 711               memcpy(buff, (char *)SCpnt->request.buffer, this_count << 9);
 712           };
 713         };
 714 #ifdef DEBUG
 715         printk("sd%c : %s %d/%d 512 byte blocks.\n", 'a' + MINOR(SCpnt->request.dev),
 716                 (SCpnt->request.cmd == WRITE) ? "writing" : "reading",
 717                 this_count, SCpnt->request.nr_sectors);
 718 #endif
 719 
 720         cmd[1] = (SCpnt->lun << 5) & 0xe0;
 721 
 722         if (rscsi_disks[dev].sector_size == 1024){
 723           if(block & 1) panic("sd.c:Bad block number requested");
 724           if(this_count & 1) panic("sd.c:Bad block number requested");
 725           block = block >> 1;
 726           this_count = this_count >> 1;
 727         };
 728 
 729         if (rscsi_disks[dev].sector_size == 256){
 730           block = block << 1;
 731           this_count = this_count << 1;
 732         };
 733 
 734         if (((this_count > 0xff) ||  (block > 0x1fffff)) && rscsi_disks[dev].ten)
 735                 {
 736                 if (this_count > 0xffff)
 737                         this_count = 0xffff;
 738 
 739                 cmd[0] += READ_10 - READ_6 ;
 740                 cmd[2] = (unsigned char) (block >> 24) & 0xff;
 741                 cmd[3] = (unsigned char) (block >> 16) & 0xff;
 742                 cmd[4] = (unsigned char) (block >> 8) & 0xff;
 743                 cmd[5] = (unsigned char) block & 0xff;
 744                 cmd[6] = cmd[9] = 0;
 745                 cmd[7] = (unsigned char) (this_count >> 8) & 0xff;
 746                 cmd[8] = (unsigned char) this_count & 0xff;
 747                 }
 748         else
 749                 {
 750                 if (this_count > 0xff)
 751                         this_count = 0xff;
 752 
 753                 cmd[1] |= (unsigned char) ((block >> 16) & 0x1f);
 754                 cmd[2] = (unsigned char) ((block >> 8) & 0xff);
 755                 cmd[3] = (unsigned char) block & 0xff;
 756                 cmd[4] = (unsigned char) this_count;
 757                 cmd[5] = 0;
 758                 }
 759 
 760 /*
 761  * We shouldn't disconnect in the middle of a sector, so with a dumb 
 762  * host adapter, it's safe to assume that we can at least transfer 
 763  * this many bytes between each connect / disconnect.  
 764  */
 765 
 766         SCpnt->transfersize = rscsi_disks[dev].sector_size;
 767         SCpnt->underflow = this_count << 9; 
 768         scsi_do_cmd (SCpnt, (void *) cmd, buff, 
 769                      this_count * rscsi_disks[dev].sector_size,
 770                      rw_intr, 
 771                      (SCpnt->device->type == TYPE_DISK ? 
 772                                      SD_TIMEOUT : SD_MOD_TIMEOUT),
 773                      MAX_RETRIES);
 774 }
 775 
 776 static int check_scsidisk_media_change(dev_t full_dev){
     /* [previous][next][first][last][top][bottom][index][help] */
 777         int retval;
 778         int target;
 779         struct inode inode;
 780         int flag = 0;
 781 
 782         target =  DEVICE_NR(MINOR(full_dev));
 783 
 784         if (target >= sd_template.dev_max ||
 785             !rscsi_disks[target].device) {
 786                 printk("SCSI disk request error: invalid device.\n");
 787                 return 0;
 788         };
 789 
 790         if(!rscsi_disks[target].device->removable) return 0;
 791 
 792         inode.i_rdev = full_dev;  /* This is all we really need here */
 793         retval = sd_ioctl(&inode, NULL, SCSI_IOCTL_TEST_UNIT_READY, 0);
 794 
 795         if(retval){ /* Unable to test, unit probably not ready.  This usually
 796                      means there is no disc in the drive.  Mark as changed,
 797                      and we will figure it out later once the drive is
 798                      available again.  */
 799 
 800           rscsi_disks[target].device->changed = 1;
 801           return 1; /* This will force a flush, if called from
 802                        check_disk_change */
 803         };
 804 
 805         retval = rscsi_disks[target].device->changed;
 806         if(!flag) rscsi_disks[target].device->changed = 0;
 807         return retval;
 808 }
 809 
 810 static void sd_init_done (Scsi_Cmnd * SCpnt)
     /* [previous][next][first][last][top][bottom][index][help] */
 811 {
 812   struct request * req;
 813   
 814   req = &SCpnt->request;
 815   req->dev = 0xfffe; /* Busy, but indicate request done */
 816   
 817   if (req->sem != NULL) {
 818     up(req->sem);
 819   }
 820 }
 821 
 822 static int sd_init_onedisk(int i)
     /* [previous][next][first][last][top][bottom][index][help] */
 823 {
 824   unsigned char cmd[10];
 825   unsigned char *buffer;
 826   char spintime;
 827   int the_result, retries;
 828   Scsi_Cmnd * SCpnt;
 829 
 830   /* We need to retry the READ_CAPACITY because a UNIT_ATTENTION is considered
 831      a fatal error, and many devices report such an error just after a scsi
 832      bus reset. */
 833 
 834   SCpnt = allocate_device(NULL, rscsi_disks[i].device, 1);
 835   buffer = (unsigned char *) scsi_malloc(512);
 836 
 837   spintime = 0;
 838 
 839   /* Spin up drives, as required.  Only do this at boot time */
 840   if (current == task[0]){
 841     do{
 842       cmd[0] = TEST_UNIT_READY;
 843       cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0;
 844       memset ((void *) &cmd[2], 0, 8);
 845       SCpnt->request.dev = 0xffff;  /* Mark as really busy again */
 846       SCpnt->cmd_len = 0;
 847       SCpnt->sense_buffer[0] = 0;
 848       SCpnt->sense_buffer[2] = 0;
 849       
 850       scsi_do_cmd (SCpnt,
 851                    (void *) cmd, (void *) buffer,
 852                    512, sd_init_done,  SD_TIMEOUT,
 853                    MAX_RETRIES);
 854       
 855       while(SCpnt->request.dev != 0xfffe);
 856       
 857       the_result = SCpnt->result;
 858       
 859       /* Look for non-removable devices that return NOT_READY.  Issue command
 860          to spin up drive for these cases. */
 861       if(the_result && !rscsi_disks[i].device->removable && 
 862          SCpnt->sense_buffer[2] == NOT_READY) {
 863         int time1;
 864         if(!spintime){
 865           printk( "sd%c: Spinning up disk...", 'a' + i );
 866           cmd[0] = START_STOP;
 867           cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0;
 868           cmd[1] |= 1;  /* Return immediately */
 869           memset ((void *) &cmd[2], 0, 8);
 870           cmd[4] = 1; /* Start spin cycle */
 871           SCpnt->request.dev = 0xffff;  /* Mark as really busy again */
 872           SCpnt->cmd_len = 0;
 873           SCpnt->sense_buffer[0] = 0;
 874           SCpnt->sense_buffer[2] = 0;
 875           
 876           scsi_do_cmd (SCpnt,
 877                        (void *) cmd, (void *) buffer,
 878                        512, sd_init_done,  SD_TIMEOUT,
 879                        MAX_RETRIES);
 880           
 881           while(SCpnt->request.dev != 0xfffe);
 882 
 883           spintime = jiffies;
 884         };
 885 
 886         time1 = jiffies;
 887         while(jiffies < time1 + HZ); /* Wait 1 second for next try */
 888         printk( "." );
 889       };
 890     } while(the_result && spintime && spintime+5000 > jiffies);
 891     if (spintime) {
 892        if (the_result)
 893            printk( "not responding...\n" );
 894        else
 895            printk( "ready\n" );
 896     }
 897   };  /* current == task[0] */
 898 
 899 
 900   retries = 3;
 901   do {
 902     cmd[0] = READ_CAPACITY;
 903     cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0;
 904     memset ((void *) &cmd[2], 0, 8);
 905     memset ((void *) buffer, 0, 8);
 906     SCpnt->request.dev = 0xffff;  /* Mark as really busy again */
 907     SCpnt->cmd_len = 0;
 908     SCpnt->sense_buffer[0] = 0;
 909     SCpnt->sense_buffer[2] = 0;
 910     
 911     scsi_do_cmd (SCpnt,
 912                  (void *) cmd, (void *) buffer,
 913                  8, sd_init_done,  SD_TIMEOUT,
 914                  MAX_RETRIES);
 915     
 916     if (current == task[0])
 917       while(SCpnt->request.dev != 0xfffe);
 918     else
 919       if (SCpnt->request.dev != 0xfffe){
 920         struct semaphore sem = MUTEX_LOCKED;
 921         SCpnt->request.sem = &sem;
 922         down(&sem);
 923         /* Hmm.. Have to ask about this one.. */
 924         while (SCpnt->request.dev != 0xfffe) schedule();
 925       };
 926     
 927     the_result = SCpnt->result;
 928     retries--;
 929 
 930   } while(the_result && retries);
 931 
 932   SCpnt->request.dev = -1;  /* Mark as not busy */
 933 
 934   wake_up(&SCpnt->device->device_wait); 
 935 
 936   /* Wake up a process waiting for device*/
 937 
 938   /*
 939    *    The SCSI standard says "READ CAPACITY is necessary for self configuring software"
 940    *    While not mandatory, support of READ CAPACITY is strongly encouraged.
 941    *    We used to die if we couldn't successfully do a READ CAPACITY.
 942    *    But, now we go on about our way.  The side effects of this are
 943    *
 944    *    1.  We can't know block size with certainty.  I have said "512 bytes is it"
 945    *            as this is most common.
 946    *
 947    *    2.  Recovery from when some one attempts to read past the end of the raw device will
 948    *        be slower.
 949    */
 950 
 951   if (the_result)
 952     {
 953       printk ("sd%c : READ CAPACITY failed.\n"
 954               "sd%c : status = %x, message = %02x, host = %d, driver = %02x \n",
 955               'a' + i, 'a' + i,
 956               status_byte(the_result),
 957               msg_byte(the_result),
 958               host_byte(the_result),
 959               driver_byte(the_result)
 960               );
 961       if (driver_byte(the_result)  & DRIVER_SENSE)
 962         printk("sd%c : extended sense code = %1x \n", 'a' + i, SCpnt->sense_buffer[2] & 0xf);
 963       else
 964         printk("sd%c : sense not available. \n", 'a' + i);
 965 
 966       printk("sd%c : block size assumed to be 512 bytes, disk size 1GB.  \n", 'a' + i);
 967       rscsi_disks[i].capacity = 0x1fffff;
 968       rscsi_disks[i].sector_size = 512;
 969 
 970       /* Set dirty bit for removable devices if not ready - sometimes drives
 971          will not report this properly. */
 972       if(rscsi_disks[i].device->removable && 
 973          SCpnt->sense_buffer[2] == NOT_READY)
 974         rscsi_disks[i].device->changed = 1;
 975 
 976     }
 977   else
 978     {
 979       rscsi_disks[i].capacity = (buffer[0] << 24) |
 980         (buffer[1] << 16) |
 981           (buffer[2] << 8) |
 982             buffer[3];
 983 
 984       rscsi_disks[i].sector_size = (buffer[4] << 24) |
 985         (buffer[5] << 16) | (buffer[6] << 8) | buffer[7];
 986 
 987       if (rscsi_disks[i].sector_size != 512 &&
 988           rscsi_disks[i].sector_size != 1024 &&
 989           rscsi_disks[i].sector_size != 256)
 990         {
 991           printk ("sd%c : unsupported sector size %d.\n",
 992                   'a' + i, rscsi_disks[i].sector_size);
 993           if(rscsi_disks[i].device->removable){
 994             rscsi_disks[i].capacity = 0;
 995           } else {
 996             printk ("scsi : deleting disk entry.\n");
 997             rscsi_disks[i].device = NULL;
 998             sd_template.nr_dev--;
 999             return i;
1000           };
1001         }
1002     {
1003        /*
1004           The msdos fs need to know the hardware sector size
1005           So I have created this table. See ll_rw_blk.c
1006           Jacques Gelinas (Jacques@solucorp.qc.ca)
1007        */
1008        int m;
1009        int hard_sector = rscsi_disks[i].sector_size;
1010        /* There is 16 minor allocated for each devices */
1011        for (m=i<<4; m<((i+1)<<4); m++){
1012          sd_hardsizes[m] = hard_sector;
1013        }
1014        printk ("SCSI Hardware sector size is %d bytes on device sd%c\n"
1015          ,hard_sector,i+'a');
1016     }
1017       if(rscsi_disks[i].sector_size == 1024)
1018         rscsi_disks[i].capacity <<= 1;  /* Change this into 512 byte sectors */
1019       if(rscsi_disks[i].sector_size == 256)
1020         rscsi_disks[i].capacity >>= 1;  /* Change this into 512 byte sectors */
1021     }
1022 
1023   rscsi_disks[i].ten = 1;
1024   rscsi_disks[i].remap = 1;
1025   scsi_free(buffer, 512);
1026   return i;
1027 }
1028 
1029 /*
1030         The sd_init() function looks at all SCSI drives present, determines
1031         their size, and reads partition table entries for them.
1032 */
1033 
1034 
1035 static void sd_init()
     /* [previous][next][first][last][top][bottom][index][help] */
1036 {
1037         int i;
1038         static int sd_registered = 0;
1039 
1040         if (sd_template.dev_noticed == 0) return;
1041 
1042         if(!sd_registered) {
1043           if (register_blkdev(MAJOR_NR,"sd",&sd_fops)) {
1044             printk("Unable to get major %d for SCSI disk\n",MAJOR_NR);
1045             return;
1046           }
1047           sd_registered++;
1048         }
1049 
1050         /* We do not support attaching loadable devices yet. */
1051         if(scsi_loadable_module_flag) return;
1052 
1053         sd_template.dev_max = sd_template.dev_noticed;
1054 
1055         rscsi_disks = (Scsi_Disk *) 
1056           scsi_init_malloc(sd_template.dev_max * sizeof(Scsi_Disk));
1057         memset(rscsi_disks, 0, sd_template.dev_max * sizeof(Scsi_Disk));
1058 
1059         sd_sizes = (int *) scsi_init_malloc((sd_template.dev_max << 4) * 
1060                                             sizeof(int));
1061         memset(sd_sizes, 0, (sd_template.dev_max << 4) * sizeof(int));
1062 
1063         sd_blocksizes = (int *) scsi_init_malloc((sd_template.dev_max << 4) * 
1064                                                  sizeof(int));
1065         sd_hardsizes = (int *) scsi_init_malloc((sd_template.dev_max << 4) * 
1066                                                  sizeof(int));
1067         for(i=0;i<(sd_template.dev_max << 4);i++){
1068                 sd_blocksizes[i] = 1024;
1069                 sd_hardsizes[i] = 512;
1070         }
1071         blksize_size[MAJOR_NR] = sd_blocksizes;
1072         hardsect_size[MAJOR_NR] = sd_hardsizes;
1073         sd = (struct hd_struct *) scsi_init_malloc((sd_template.dev_max << 4) *
1074                                                    sizeof(struct hd_struct));
1075 
1076 
1077         sd_gendisk.max_nr = sd_template.dev_max;
1078         sd_gendisk.part = sd;
1079         sd_gendisk.sizes = sd_sizes;
1080         sd_gendisk.real_devices = (void *) rscsi_disks;
1081 
1082 }
1083 
1084 static void sd_finish()
     /* [previous][next][first][last][top][bottom][index][help] */
1085 {
1086         int i;
1087 
1088         for (i = 0; i < sd_template.dev_max; ++i)
1089           if (rscsi_disks[i].device) i = sd_init_onedisk(i);
1090 
1091         blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST;
1092 
1093         /* If our host adapter is capable of scatter-gather, then we increase
1094            the read-ahead to 16 blocks (32 sectors).  If not, we use
1095            a two block (4 sector) read ahead. */
1096         if(rscsi_disks[0].device->host->sg_tablesize)
1097           read_ahead[MAJOR_NR] = 120;
1098         /* 64 sector read-ahead */
1099         else
1100           read_ahead[MAJOR_NR] = 4;  /* 4 sector read-ahead */
1101         
1102         sd_gendisk.next = gendisk_head;
1103         gendisk_head = &sd_gendisk;
1104         return;
1105 }
1106 
1107 static int sd_detect(Scsi_Device * SDp){
     /* [previous][next][first][last][top][bottom][index][help] */
1108   /* We do not support attaching loadable devices yet. */
1109   if(scsi_loadable_module_flag) return 0;
1110   if(SDp->type != TYPE_DISK && SDp->type != TYPE_MOD) return 0;
1111 
1112   printk("Detected scsi disk sd%c at scsi%d, id %d, lun %d\n", 
1113          'a'+ (sd_template.dev_noticed++),
1114          SDp->host->host_no , SDp->id, SDp->lun); 
1115 
1116          return 1;
1117 
1118 }
1119 
1120 static void sd_attach(Scsi_Device * SDp){
     /* [previous][next][first][last][top][bottom][index][help] */
1121    Scsi_Disk * dpnt;
1122    int i;
1123 
1124    /* We do not support attaching loadable devices yet. */
1125    if(scsi_loadable_module_flag) return;
1126    if(SDp->type != TYPE_DISK && SDp->type != TYPE_MOD) return;
1127 
1128    if(sd_template.nr_dev >= sd_template.dev_max) 
1129      panic ("scsi_devices corrupt (sd)");
1130 
1131    for(dpnt = rscsi_disks, i=0; i<sd_template.dev_max; i++, dpnt++) 
1132      if(!dpnt->device) break;
1133 
1134    if(i >= sd_template.dev_max) panic ("scsi_devices corrupt (sd)");
1135 
1136    SDp->scsi_request_fn = do_sd_request;
1137    rscsi_disks[i].device = SDp;
1138    sd_template.nr_dev++;
1139 };
1140 
1141 #define DEVICE_BUSY rscsi_disks[target].device->busy
1142 #define USAGE rscsi_disks[target].device->access_count
1143 #define CAPACITY rscsi_disks[target].capacity
1144 #define MAYBE_REINIT  sd_init_onedisk(target)
1145 #define GENDISK_STRUCT sd_gendisk
1146 
1147 /* This routine is called to flush all partitions and partition tables
1148    for a changed scsi disk, and then re-read the new partition table.
1149    If we are revalidating a disk because of a media change, then we
1150    enter with usage == 0.  If we are using an ioctl, we automatically have
1151    usage == 1 (we need an open channel to use an ioctl :-), so this
1152    is our limit.
1153  */
1154 int revalidate_scsidisk(int dev, int maxusage){
     /* [previous][next][first][last][top][bottom][index][help] */
1155           int target, major;
1156           struct gendisk * gdev;
1157           unsigned long flags;
1158           int max_p;
1159           int start;
1160           int i;
1161 
1162           target =  DEVICE_NR(MINOR(dev));
1163           gdev = &GENDISK_STRUCT;
1164 
1165           save_flags(flags);
1166           cli();
1167           if (DEVICE_BUSY || USAGE > maxusage) {
1168             restore_flags(flags);
1169             printk("Device busy for revalidation (usage=%d)\n", USAGE);
1170             return -EBUSY;
1171           };
1172           DEVICE_BUSY = 1;
1173           restore_flags(flags);
1174 
1175           max_p = gdev->max_p;
1176           start = target << gdev->minor_shift;
1177           major = MAJOR_NR << 8;
1178 
1179           for (i=max_p - 1; i >=0 ; i--) {
1180             sync_dev(major | start | i);
1181             invalidate_inodes(major | start | i);
1182             invalidate_buffers(major | start | i);
1183             gdev->part[start+i].start_sect = 0;
1184             gdev->part[start+i].nr_sects = 0;
1185           };
1186 
1187 #ifdef MAYBE_REINIT
1188           MAYBE_REINIT;
1189 #endif
1190 
1191           gdev->part[start].nr_sects = CAPACITY;
1192           resetup_one_dev(gdev, target);
1193 
1194           DEVICE_BUSY = 0;
1195           return 0;
1196 }
1197 
1198 static int fop_revalidate_scsidisk(dev_t dev){
     /* [previous][next][first][last][top][bottom][index][help] */
1199   return revalidate_scsidisk(dev, 0);
1200 }
1201 

/* [previous][next][first][last][top][bottom][index][help] */