root/drivers/scsi/sd.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. sd_open
  2. sd_release
  3. sd_geninit
  4. rw_intr
  5. do_sd_request
  6. requeue_sd_request
  7. check_scsidisk_media_change
  8. sd_init_done
  9. sd_init_onedisk
  10. sd_init
  11. sd_init1
  12. sd_attach
  13. revalidate_scsidisk
  14. fop_revalidate_scsidisk

   1 /*
   2  *      sd.c Copyright (C) 1992 Drew Eckhardt 
   3  *           Copyright (C) 1993, 1994 Eric Youngdale
   4  *      Linux scsi disk driver by
   5  *              Drew Eckhardt 
   6  *
   7  *      <drew@colorado.edu>
   8  *
   9  *       Modified by Eric Youngdale ericy@cais.com to
  10  *       add scatter-gather, multiple outstanding request, and other
  11  *       enhancements.
  12  */
  13 
  14 #include <linux/fs.h>
  15 #include <linux/kernel.h>
  16 #include <linux/sched.h>
  17 #include <linux/string.h>
  18 #include <linux/errno.h>
  19 #include <asm/system.h>
  20 
  21 #define MAJOR_NR SCSI_DISK_MAJOR
  22 #include "../block/blk.h"
  23 #include "scsi.h"
  24 #include "hosts.h"
  25 #include "sd.h"
  26 #include "scsi_ioctl.h"
  27 #include "constants.h"
  28 
  29 #include <linux/genhd.h>
  30 
  31 /*
  32 static const char RCSid[] = "$Header:";
  33 */
  34 
  35 #define MAX_RETRIES 5
  36 
  37 /*
  38  *      Time out in seconds for disks and Magneto-opticals (which are slower).
  39  */
  40 
  41 #define SD_TIMEOUT 300
  42 #define SD_MOD_TIMEOUT 750
  43 
  44 #define CLUSTERABLE_DEVICE(SC) (SC->host->hostt->use_clustering && \
  45                             SC->device->type != TYPE_MOD)
  46 
  47 struct hd_struct * sd;
  48 
  49 int NR_SD=0;
  50 int MAX_SD=0;
  51 Scsi_Disk * rscsi_disks;
  52 static int * sd_sizes;
  53 static int * sd_blocksizes;
  54 
  55 extern int sd_ioctl(struct inode *, struct file *, unsigned int, unsigned long);
  56 
  57 static int check_scsidisk_media_change(dev_t);
  58 static int fop_revalidate_scsidisk(dev_t);
  59 
  60 static sd_init_onedisk(int);
  61 
  62 static void requeue_sd_request (Scsi_Cmnd * SCpnt);
  63 
  64 static int sd_open(struct inode * inode, struct file * filp)
     /* [previous][next][first][last][top][bottom][index][help] */
  65 {
  66         int target;
  67         target =  DEVICE_NR(MINOR(inode->i_rdev));
  68 
  69         if(target >= NR_SD || !rscsi_disks[target].device)
  70           return -ENXIO;   /* No such device */
  71         
  72 /* Make sure that only one process can do a check_change_disk at one time.
  73  This is also used to lock out further access when the partition table is being re-read. */
  74 
  75         while (rscsi_disks[target].device->busy);
  76 
  77         if(rscsi_disks[target].device->removable) {
  78           check_disk_change(inode->i_rdev);
  79 
  80           if(!rscsi_disks[target].device->access_count)
  81             sd_ioctl(inode, NULL, SCSI_IOCTL_DOORLOCK, 0);
  82         };
  83         rscsi_disks[target].device->access_count++;
  84         return 0;
  85 }
  86 
  87 static void sd_release(struct inode * inode, struct file * file)
     /* [previous][next][first][last][top][bottom][index][help] */
  88 {
  89         int target;
  90         sync_dev(inode->i_rdev);
  91 
  92         target =  DEVICE_NR(MINOR(inode->i_rdev));
  93 
  94         rscsi_disks[target].device->access_count--;
  95 
  96         if(rscsi_disks[target].device->removable) {
  97           if(!rscsi_disks[target].device->access_count)
  98             sd_ioctl(inode, NULL, SCSI_IOCTL_DOORUNLOCK, 0);
  99         };
 100 }
 101 
 102 static void sd_geninit(void);
 103 
 104 static struct file_operations sd_fops = {
 105         NULL,                   /* lseek - default */
 106         block_read,             /* read - general block-dev read */
 107         block_write,            /* write - general block-dev write */
 108         NULL,                   /* readdir - bad */
 109         NULL,                   /* select */
 110         sd_ioctl,               /* ioctl */
 111         NULL,                   /* mmap */
 112         sd_open,                /* open code */
 113         sd_release,             /* release */
 114         block_fsync,            /* fsync */
 115         NULL,                   /* fasync */
 116         check_scsidisk_media_change,  /* Disk change */
 117         fop_revalidate_scsidisk     /* revalidate */
 118 };
 119 
 120 static struct gendisk sd_gendisk = {
 121         MAJOR_NR,               /* Major number */
 122         "sd",           /* Major name */
 123         4,              /* Bits to shift to get real from partition */
 124         1 << 4,         /* Number of partitions per real */
 125         0,              /* maximum number of real */
 126         sd_geninit,     /* init function */
 127         NULL,           /* hd struct */
 128         NULL,   /* block sizes */
 129         0,              /* number */
 130         NULL,   /* internal */
 131         NULL            /* next */
 132 };
 133 
 134 static void sd_geninit (void)
     /* [previous][next][first][last][top][bottom][index][help] */
 135 {
 136         int i;
 137 
 138         for (i = 0; i < NR_SD; ++i)
 139                 sd[i << 4].nr_sects = rscsi_disks[i].capacity;
 140         sd_gendisk.nr_real = NR_SD;
 141 }
 142 
 143 /*
 144         rw_intr is the interrupt routine for the device driver.  It will
 145         be notified on the end of a SCSI read / write, and
 146         will take on of several actions based on success or failure.
 147 */
 148 
 149 static void rw_intr (Scsi_Cmnd *SCpnt)
     /* [previous][next][first][last][top][bottom][index][help] */
 150 {
 151   int result = SCpnt->result;
 152   int this_count = SCpnt->bufflen >> 9;
 153 
 154 #ifdef DEBUG
 155   printk("sd%d : rw_intr(%d, %d)\n", MINOR(SCpnt->request.dev), SCpnt->host->host_no, result);
 156 #endif
 157 
 158 /*
 159   First case : we assume that the command succeeded.  One of two things will
 160   happen here.  Either we will be finished, or there will be more
 161   sectors that we were unable to read last time.
 162 */
 163 
 164   if (!result) {
 165 
 166 #ifdef DEBUG
 167     printk("sd%d : %d sectors remain.\n", MINOR(SCpnt->request.dev), SCpnt->request.nr_sectors);
 168     printk("use_sg is %d\n ",SCpnt->use_sg);
 169 #endif
 170     if (SCpnt->use_sg) {
 171       struct scatterlist * sgpnt;
 172       int i;
 173       sgpnt = (struct scatterlist *) SCpnt->buffer;
 174       for(i=0; i<SCpnt->use_sg; i++) {
 175 #ifdef DEBUG
 176         printk(":%x %x %d\n",sgpnt[i].alt_address, sgpnt[i].address, sgpnt[i].length);
 177 #endif
 178         if (sgpnt[i].alt_address) {
 179           if (SCpnt->request.cmd == READ)
 180             memcpy(sgpnt[i].alt_address, sgpnt[i].address, sgpnt[i].length);
 181           scsi_free(sgpnt[i].address, sgpnt[i].length);
 182         };
 183       };
 184       scsi_free(SCpnt->buffer, SCpnt->sglist_len);  /* Free list of scatter-gather pointers */
 185     } else {
 186       if (SCpnt->buffer != SCpnt->request.buffer) {
 187 #ifdef DEBUG
 188         printk("nosg: %x %x %d\n",SCpnt->request.buffer, SCpnt->buffer,
 189                    SCpnt->bufflen);
 190 #endif  
 191           if (SCpnt->request.cmd == READ)
 192             memcpy(SCpnt->request.buffer, SCpnt->buffer,
 193                    SCpnt->bufflen);
 194           scsi_free(SCpnt->buffer, SCpnt->bufflen);
 195       };
 196     };
 197 /*
 198  *      If multiple sectors are requested in one buffer, then
 199  *      they will have been finished off by the first command.  If
 200  *      not, then we have a multi-buffer command.
 201  */
 202     if (SCpnt->request.nr_sectors > this_count)
 203       {
 204         SCpnt->request.errors = 0;
 205         
 206         if (!SCpnt->request.bh)
 207           {
 208 #ifdef DEBUG
 209             printk("sd%d : handling page request, no buffer\n",
 210                    MINOR(SCpnt->request.dev));
 211 #endif
 212 /*
 213   The SCpnt->request.nr_sectors field is always done in 512 byte sectors,
 214   even if this really isn't the case.
 215 */
 216             panic("sd.c: linked page request (%lx %x)",
 217                   SCpnt->request.sector, this_count);
 218           }
 219       }
 220     end_scsi_request(SCpnt, 1, this_count);
 221     requeue_sd_request(SCpnt);
 222     return;
 223   }
 224 
 225 /* Free up any indirection buffers we allocated for DMA purposes. */
 226     if (SCpnt->use_sg) {
 227       struct scatterlist * sgpnt;
 228       int i;
 229       sgpnt = (struct scatterlist *) SCpnt->buffer;
 230       for(i=0; i<SCpnt->use_sg; i++) {
 231 #ifdef DEBUG
 232         printk("err: %x %x %d\n",SCpnt->request.buffer, SCpnt->buffer,
 233                    SCpnt->bufflen);
 234 #endif
 235         if (sgpnt[i].alt_address) {
 236           scsi_free(sgpnt[i].address, sgpnt[i].length);
 237         };
 238       };
 239       scsi_free(SCpnt->buffer, SCpnt->sglist_len);  /* Free list of scatter-gather pointers */
 240     } else {
 241 #ifdef DEBUG
 242       printk("nosgerr: %x %x %d\n",SCpnt->request.buffer, SCpnt->buffer,
 243                    SCpnt->bufflen);
 244 #endif
 245       if (SCpnt->buffer != SCpnt->request.buffer)
 246         scsi_free(SCpnt->buffer, SCpnt->bufflen);
 247     };
 248 
 249 /*
 250         Now, if we were good little boys and girls, Santa left us a request
 251         sense buffer.  We can extract information from this, so we
 252         can choose a block to remap, etc.
 253 */
 254 
 255         if (driver_byte(result) != 0) {
 256           if (sugestion(result) == SUGGEST_REMAP) {
 257 #ifdef REMAP
 258 /*
 259         Not yet implemented.  A read will fail after being remapped,
 260         a write will call the strategy routine again.
 261 */
 262             if rscsi_disks[DEVICE_NR(SCpnt->request.dev)].remap
 263               {
 264                 result = 0;
 265               }
 266             else
 267               
 268 #endif
 269             }
 270 
 271           if ((SCpnt->sense_buffer[0] & 0x7f) == 0x70) {
 272             if ((SCpnt->sense_buffer[2] & 0xf) == UNIT_ATTENTION) {
 273               if(rscsi_disks[DEVICE_NR(SCpnt->request.dev)].device->removable) {
 274               /* detected disc change.  set a bit and quietly refuse    */
 275               /* further access.                                        */
 276               
 277                 rscsi_disks[DEVICE_NR(SCpnt->request.dev)].device->changed = 1;
 278                 end_scsi_request(SCpnt, 0, this_count);
 279                 requeue_sd_request(SCpnt);
 280                 return;
 281               }
 282             }
 283           }
 284           
 285 
 286 /*      If we had an ILLEGAL REQUEST returned, then we may have
 287 performed an unsupported command.  The only thing this should be would
 288 be a ten byte read where only a six byte read was supportted.  Also,
 289 on a system where READ CAPACITY failed, we mave have read past the end
 290 of the  disk. 
 291 */
 292 
 293           if (SCpnt->sense_buffer[2] == ILLEGAL_REQUEST) {
 294             if (rscsi_disks[DEVICE_NR(SCpnt->request.dev)].ten) {
 295               rscsi_disks[DEVICE_NR(SCpnt->request.dev)].ten = 0;
 296               requeue_sd_request(SCpnt);
 297               result = 0;
 298             } else {
 299             }
 300           }
 301         }  /* driver byte != 0 */
 302         if (result) {
 303                 printk("SCSI disk error : host %d id %d lun %d return code = %x\n",
 304                        rscsi_disks[DEVICE_NR(SCpnt->request.dev)].device->host->host_no,
 305                        rscsi_disks[DEVICE_NR(SCpnt->request.dev)].device->id,
 306                        rscsi_disks[DEVICE_NR(SCpnt->request.dev)].device->lun, result);
 307 
 308                 if (driver_byte(result) & DRIVER_SENSE)
 309                         print_sense("sd", SCpnt);
 310                 end_scsi_request(SCpnt, 0, SCpnt->request.current_nr_sectors);
 311                 requeue_sd_request(SCpnt);
 312                 return;
 313         }
 314 }
 315 
 316 /*
 317         requeue_sd_request() is the request handler function for the sd driver.
 318         Its function in life is to take block device requests, and translate
 319         them to SCSI commands.
 320 */
 321 
 322 static void do_sd_request (void)
     /* [previous][next][first][last][top][bottom][index][help] */
 323 {
 324   Scsi_Cmnd * SCpnt = NULL;
 325   struct request * req = NULL;
 326   int flag = 0;
 327   while (1==1){
 328     cli();
 329     if (CURRENT != NULL && CURRENT->dev == -1) {
 330       sti();
 331       return;
 332     };
 333 
 334     INIT_SCSI_REQUEST;
 335 
 336 
 337 /* We have to be careful here.  allocate_device will get a free pointer, but
 338    there is no guarantee that it is queueable.  In normal usage, we want to
 339    call this, because other types of devices may have the host all tied up,
 340    and we want to make sure that we have at least one request pending for this
 341    type of device.   We can also come through here while servicing an
 342    interrupt, because of the need to start another command.  If we call
 343    allocate_device more than once, then the system can wedge if the command
 344    is not queueable.  The request_queueable function is safe because it checks
 345    to make sure that the host is able to take another command before it returns
 346    a pointer.  */
 347 
 348     if (flag++ == 0)
 349       SCpnt = allocate_device(&CURRENT,
 350                               rscsi_disks[DEVICE_NR(MINOR(CURRENT->dev))].device, 0); 
 351     else SCpnt = NULL;
 352     sti();
 353 
 354 /* This is a performance enhancement.  We dig down into the request list and
 355    try and find a queueable request (i.e. device not busy, and host able to
 356    accept another command.  If we find one, then we queue it. This can
 357    make a big difference on systems with more than one disk drive.  We want
 358    to have the interrupts off when monkeying with the request list, because
 359    otherwise the kernel might try and slip in a request inbetween somewhere. */
 360 
 361     if (!SCpnt && NR_SD > 1){
 362       struct request *req1;
 363       req1 = NULL;
 364       cli();
 365       req = CURRENT;
 366       while(req){
 367         SCpnt = request_queueable(req,
 368                                   rscsi_disks[DEVICE_NR(MINOR(req->dev))].device);
 369         if(SCpnt) break;
 370         req1 = req;
 371         req = req->next;
 372       };
 373       if (SCpnt && req->dev == -1) {
 374         if (req == CURRENT) 
 375           CURRENT = CURRENT->next;
 376         else
 377           req1->next = req->next;
 378       };
 379       sti();
 380     };
 381     
 382     if (!SCpnt) return; /* Could not find anything to do */
 383         
 384     /* Queue command */
 385     requeue_sd_request(SCpnt);
 386   };  /* While */
 387 }    
 388 
 389 static void requeue_sd_request (Scsi_Cmnd * SCpnt)
     /* [previous][next][first][last][top][bottom][index][help] */
 390 {
 391         int dev, block, this_count;
 392         unsigned char cmd[10];
 393         int bounce_size, contiguous;
 394         int max_sg;
 395         struct buffer_head * bh, *bhp;
 396         char * buff, *bounce_buffer;
 397 
 398 repeat:
 399 
 400         if(SCpnt->request.dev <= 0) {
 401           do_sd_request();
 402           return;
 403         }
 404 
 405         dev =  MINOR(SCpnt->request.dev);
 406         block = SCpnt->request.sector;
 407         this_count = 0;
 408 
 409 #ifdef DEBUG
 410         printk("Doing sd request, dev = %d, block = %d\n", dev, block);
 411 #endif
 412 
 413         if (dev >= (NR_SD << 4) || block + SCpnt->request.nr_sectors > sd[dev].nr_sects)
 414                 {
 415                 end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
 416                 goto repeat;
 417                 }
 418 
 419         block += sd[dev].start_sect;
 420         dev = DEVICE_NR(dev);
 421 
 422         if (rscsi_disks[dev].device->changed)
 423                 {
 424 /*
 425  * quietly refuse to do anything to a changed disc until the changed bit has been reset
 426  */
 427                 /* printk("SCSI disk has been changed.  Prohibiting further I/O.\n");   */
 428                 end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
 429                 goto repeat;
 430                 }
 431 
 432 #ifdef DEBUG
 433         printk("sd%d : real dev = /dev/sd%d, block = %d\n", MINOR(SCpnt->request.dev), dev, block);
 434 #endif
 435 
 436         switch (SCpnt->request.cmd)
 437                 {
 438                 case WRITE :
 439                         if (!rscsi_disks[dev].device->writeable)
 440                                 {
 441                                 end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
 442                                 goto repeat;
 443                                 }
 444                         cmd[0] = WRITE_6;
 445                         break;
 446                 case READ :
 447                         cmd[0] = READ_6;
 448                         break;
 449                 default :
 450                         panic ("Unknown sd command %d\n", SCpnt->request.cmd);
 451                       }
 452 
 453         SCpnt->this_count = 0;
 454 
 455         /* If the host adapter can deal with very large scatter-gather
 456            requests, it is a waste of time to cluster */
 457         contiguous = (!CLUSTERABLE_DEVICE(SCpnt) ? 0 :1);
 458         bounce_buffer = NULL;
 459         bounce_size = (SCpnt->request.nr_sectors << 9);
 460 
 461         /* First see if we need a bounce buffer for this request.  If we do, make sure
 462            that we can allocate a buffer.  Do not waste space by allocating a bounce
 463            buffer if we are straddling the 16Mb line */
 464 
 465         
 466         if (contiguous && SCpnt->request.bh &&
 467             ((int) SCpnt->request.bh->b_data) + (SCpnt->request.nr_sectors << 9) - 1 > 
 468             ISA_DMA_THRESHOLD && SCpnt->host->unchecked_isa_dma) {
 469           if(((int) SCpnt->request.bh->b_data) > ISA_DMA_THRESHOLD)
 470             bounce_buffer = (char *) scsi_malloc(bounce_size);
 471           if(!bounce_buffer) contiguous = 0;
 472         };
 473 
 474         if(contiguous && SCpnt->request.bh && SCpnt->request.bh->b_reqnext)
 475           for(bh = SCpnt->request.bh, bhp = bh->b_reqnext; bhp; bh = bhp, 
 476               bhp = bhp->b_reqnext) {
 477             if(!CONTIGUOUS_BUFFERS(bh,bhp)) { 
 478               if(bounce_buffer) scsi_free(bounce_buffer, bounce_size);
 479               contiguous = 0;
 480               break;
 481             } 
 482           };
 483         if (!SCpnt->request.bh || contiguous) {
 484 
 485           /* case of page request (i.e. raw device), or unlinked buffer */
 486           this_count = SCpnt->request.nr_sectors;
 487           buff = SCpnt->request.buffer;
 488           SCpnt->use_sg = 0;
 489 
 490         } else if (SCpnt->host->sg_tablesize == 0 ||
 491                    (need_isa_buffer && 
 492                     dma_free_sectors <= 10)) {
 493 
 494           /* Case of host adapter that cannot scatter-gather.  We also
 495            come here if we are running low on DMA buffer memory.  We set
 496            a threshold higher than that we would need for this request so
 497            we leave room for other requests.  Even though we would not need
 498            it all, we need to be conservative, because if we run low enough
 499            we have no choice but to panic. */
 500 
 501           if (SCpnt->host->sg_tablesize != 0 &&
 502               need_isa_buffer && 
 503               dma_free_sectors <= 10)
 504             printk("Warning: SCSI DMA buffer space running low.  Using non scatter-gather I/O.\n");
 505 
 506           this_count = SCpnt->request.current_nr_sectors;
 507           buff = SCpnt->request.buffer;
 508           SCpnt->use_sg = 0;
 509 
 510         } else {
 511 
 512           /* Scatter-gather capable host adapter */
 513           struct scatterlist * sgpnt;
 514           int count, this_count_max;
 515           int counted;
 516 
 517           bh = SCpnt->request.bh;
 518           this_count = 0;
 519           this_count_max = (rscsi_disks[dev].ten ? 0xffff : 0xff);
 520           count = 0;
 521           bhp = NULL;
 522           while(bh) {
 523             if ((this_count + (bh->b_size >> 9)) > this_count_max) break;
 524             if(!bhp || !CONTIGUOUS_BUFFERS(bhp,bh) ||
 525                !CLUSTERABLE_DEVICE(SCpnt) ||
 526                (SCpnt->host->unchecked_isa_dma &&
 527                ((unsigned int) bh->b_data-1) == ISA_DMA_THRESHOLD)) {
 528               if (count < SCpnt->host->sg_tablesize) count++;
 529               else break;
 530             };
 531             this_count += (bh->b_size >> 9);
 532             bhp = bh;
 533             bh = bh->b_reqnext;
 534           };
 535 #if 0
 536           if(SCpnt->host->unchecked_isa_dma &&
 537              ((unsigned int) SCpnt->request.bh->b_data-1) == ISA_DMA_THRESHOLD) count--;
 538 #endif
 539           SCpnt->use_sg = count;  /* Number of chains */
 540           count = 512;/* scsi_malloc can only allocate in chunks of 512 bytes*/
 541           while( count < (SCpnt->use_sg * sizeof(struct scatterlist))) 
 542             count = count << 1;
 543           SCpnt->sglist_len = count;
 544           max_sg = count / sizeof(struct scatterlist);
 545           if(SCpnt->host->sg_tablesize < max_sg) max_sg = SCpnt->host->sg_tablesize;
 546           sgpnt = (struct scatterlist * ) scsi_malloc(count);
 547           memset(sgpnt, 0, count);  /* Zero so it is easy to fill */
 548           if (!sgpnt) {
 549             printk("Warning - running *really* short on DMA buffers\n");
 550             SCpnt->use_sg = 0;  /* No memory left - bail out */
 551             this_count = SCpnt->request.current_nr_sectors;
 552             buff = SCpnt->request.buffer;
 553           } else {
 554             buff = (char *) sgpnt;
 555             counted = 0;
 556             for(count = 0, bh = SCpnt->request.bh, bhp = bh->b_reqnext;
 557                 count < SCpnt->use_sg && bh; 
 558                 count++, bh = bhp) {
 559 
 560               bhp = bh->b_reqnext;
 561 
 562               if(!sgpnt[count].address) sgpnt[count].address = bh->b_data;
 563               sgpnt[count].length += bh->b_size;
 564               counted += bh->b_size >> 9;
 565 
 566               if (((int) sgpnt[count].address) + sgpnt[count].length - 1 > 
 567                   ISA_DMA_THRESHOLD && (SCpnt->host->unchecked_isa_dma) &&
 568                   !sgpnt[count].alt_address) {
 569                 sgpnt[count].alt_address = sgpnt[count].address;
 570                 /* We try and avoid exhausting the DMA pool, since it is easier
 571                    to control usage here.  In other places we might have a more
 572                    pressing need, and we would be screwed if we ran out */
 573                 if(dma_free_sectors < (sgpnt[count].length >> 9) + 10) {
 574                   sgpnt[count].address = NULL;
 575                 } else {
 576                   sgpnt[count].address = (char *) scsi_malloc(sgpnt[count].length);
 577                 };
 578 /* If we start running low on DMA buffers, we abort the scatter-gather
 579    operation, and free all of the memory we have allocated.  We want to
 580    ensure that all scsi operations are able to do at least a non-scatter/gather
 581    operation */
 582                 if(sgpnt[count].address == NULL){ /* Out of dma memory */
 583 #if 0
 584                   printk("Warning: Running low on SCSI DMA buffers");
 585                   /* Try switching back to a non scatter-gather operation. */
 586                   while(--count >= 0){
 587                     if(sgpnt[count].alt_address) 
 588                       scsi_free(sgpnt[count].address, sgpnt[count].length);
 589                   };
 590                   this_count = SCpnt->request.current_nr_sectors;
 591                   buff = SCpnt->request.buffer;
 592                   SCpnt->use_sg = 0;
 593                   scsi_free(sgpnt, SCpnt->sglist_len);
 594 #endif
 595                   SCpnt->use_sg = count;
 596                   this_count = counted -= bh->b_size >> 9;
 597                   break;
 598                 };
 599 
 600               };
 601 
 602               /* Only cluster buffers if we know that we can supply DMA buffers
 603                  large enough to satisfy the request.  Do not cluster a new
 604                  request if this would mean that we suddenly need to start
 605                  using DMA bounce buffers */
 606               if(bhp && CONTIGUOUS_BUFFERS(bh,bhp) && CLUSTERABLE_DEVICE(SCpnt)) {
 607                 char * tmp;
 608 
 609                 if (((int) sgpnt[count].address) + sgpnt[count].length +
 610                     bhp->b_size - 1 > ISA_DMA_THRESHOLD && 
 611                     (SCpnt->host->unchecked_isa_dma) &&
 612                     !sgpnt[count].alt_address) continue;
 613 
 614                 if(!sgpnt[count].alt_address) {count--; continue; }
 615                 if(dma_free_sectors > 10)
 616                   tmp = (char *) scsi_malloc(sgpnt[count].length + bhp->b_size);
 617                 else {
 618                   tmp = NULL;
 619                   max_sg = SCpnt->use_sg;
 620                 };
 621                 if(tmp){
 622                   scsi_free(sgpnt[count].address, sgpnt[count].length);
 623                   sgpnt[count].address = tmp;
 624                   count--;
 625                   continue;
 626                 };
 627 
 628                 /* If we are allowed another sg chain, then increment counter so we
 629                    can insert it.  Otherwise we will end up truncating */
 630 
 631                 if (SCpnt->use_sg < max_sg) SCpnt->use_sg++;
 632               };  /* contiguous buffers */
 633             }; /* for loop */
 634 
 635             this_count = counted; /* This is actually how many we are going to transfer */
 636 
 637             if(count < SCpnt->use_sg || SCpnt->use_sg > SCpnt->host->sg_tablesize){
 638               bh = SCpnt->request.bh;
 639               printk("Use sg, count %d %x %d\n", SCpnt->use_sg, count, dma_free_sectors);
 640               printk("maxsg = %x, counted = %d this_count = %d\n", max_sg, counted, this_count);
 641               while(bh){
 642                 printk("[%p %lx] ", bh->b_data, bh->b_size);
 643                 bh = bh->b_reqnext;
 644               };
 645               if(SCpnt->use_sg < 16)
 646                 for(count=0; count<SCpnt->use_sg; count++)
 647                   printk("{%d:%p %p %d}  ", count,
 648                          sgpnt[count].address,
 649                          sgpnt[count].alt_address,
 650                          sgpnt[count].length);
 651               panic("Ooops");
 652             };
 653 
 654             if (SCpnt->request.cmd == WRITE)
 655               for(count=0; count<SCpnt->use_sg; count++)
 656                 if(sgpnt[count].alt_address)
 657                   memcpy(sgpnt[count].address, sgpnt[count].alt_address, 
 658                          sgpnt[count].length);
 659           };  /* Able to malloc sgpnt */
 660         };  /* Host adapter capable of scatter-gather */
 661 
 662 /* Now handle the possibility of DMA to addresses > 16Mb */
 663 
 664         if(SCpnt->use_sg == 0){
 665           if (((int) buff) + (this_count << 9) - 1 > ISA_DMA_THRESHOLD && 
 666             (SCpnt->host->unchecked_isa_dma)) {
 667             if(bounce_buffer)
 668               buff = bounce_buffer;
 669             else
 670               buff = (char *) scsi_malloc(this_count << 9);
 671             if(buff == NULL) {  /* Try backing off a bit if we are low on mem*/
 672               this_count = SCpnt->request.current_nr_sectors;
 673               buff = (char *) scsi_malloc(this_count << 9);
 674               if(!buff) panic("Ran out of DMA buffers.");
 675             };
 676             if (SCpnt->request.cmd == WRITE)
 677               memcpy(buff, (char *)SCpnt->request.buffer, this_count << 9);
 678           };
 679         };
 680 #ifdef DEBUG
 681         printk("sd%d : %s %d/%d 512 byte blocks.\n", MINOR(SCpnt->request.dev),
 682                 (SCpnt->request.cmd == WRITE) ? "writing" : "reading",
 683                 this_count, SCpnt->request.nr_sectors);
 684 #endif
 685 
 686         cmd[1] = (SCpnt->lun << 5) & 0xe0;
 687 
 688         if (rscsi_disks[dev].sector_size == 1024){
 689           if(block & 1) panic("sd.c:Bad block number requested");
 690           if(this_count & 1) panic("sd.c:Bad block number requested");
 691           block = block >> 1;
 692           this_count = this_count >> 1;
 693         };
 694 
 695         if (rscsi_disks[dev].sector_size == 256){
 696           block = block << 1;
 697           this_count = this_count << 1;
 698         };
 699 
 700         if (((this_count > 0xff) ||  (block > 0x1fffff)) && rscsi_disks[dev].ten)
 701                 {
 702                 if (this_count > 0xffff)
 703                         this_count = 0xffff;
 704 
 705                 cmd[0] += READ_10 - READ_6 ;
 706                 cmd[2] = (unsigned char) (block >> 24) & 0xff;
 707                 cmd[3] = (unsigned char) (block >> 16) & 0xff;
 708                 cmd[4] = (unsigned char) (block >> 8) & 0xff;
 709                 cmd[5] = (unsigned char) block & 0xff;
 710                 cmd[6] = cmd[9] = 0;
 711                 cmd[7] = (unsigned char) (this_count >> 8) & 0xff;
 712                 cmd[8] = (unsigned char) this_count & 0xff;
 713                 }
 714         else
 715                 {
 716                 if (this_count > 0xff)
 717                         this_count = 0xff;
 718 
 719                 cmd[1] |= (unsigned char) ((block >> 16) & 0x1f);
 720                 cmd[2] = (unsigned char) ((block >> 8) & 0xff);
 721                 cmd[3] = (unsigned char) block & 0xff;
 722                 cmd[4] = (unsigned char) this_count;
 723                 cmd[5] = 0;
 724                 }
 725 
 726 /*
 727  * We shouldn't disconnect in the middle of a sector, so with a dumb 
 728  * host adapter, it's safe to assume that we can at least transfer 
 729  * this many bytes between each connect / disconnect.  
 730  */
 731 
 732         SCpnt->transfersize = rscsi_disks[dev].sector_size;
 733         SCpnt->underflow = this_count << 9; 
 734         scsi_do_cmd (SCpnt, (void *) cmd, buff, 
 735                      this_count * rscsi_disks[dev].sector_size,
 736                      rw_intr, 
 737                      (SCpnt->device->type == TYPE_DISK ? 
 738                                      SD_TIMEOUT : SD_MOD_TIMEOUT),
 739                      MAX_RETRIES);
 740 }
 741 
 742 static int check_scsidisk_media_change(dev_t full_dev){
     /* [previous][next][first][last][top][bottom][index][help] */
 743         int retval;
 744         int target;
 745         struct inode inode;
 746         int flag = 0;
 747 
 748         target =  DEVICE_NR(MINOR(full_dev));
 749 
 750         if (target >= NR_SD) {
 751                 printk("SCSI disk request error: invalid device.\n");
 752                 return 0;
 753         };
 754 
 755         if(!rscsi_disks[target].device->removable) return 0;
 756 
 757         inode.i_rdev = full_dev;  /* This is all we really need here */
 758         retval = sd_ioctl(&inode, NULL, SCSI_IOCTL_TEST_UNIT_READY, 0);
 759 
 760         if(retval){ /* Unable to test, unit probably not ready.  This usually
 761                      means there is no disc in the drive.  Mark as changed,
 762                      and we will figure it out later once the drive is
 763                      available again.  */
 764 
 765           rscsi_disks[target].device->changed = 1;
 766           return 1; /* This will force a flush, if called from
 767                        check_disk_change */
 768         };
 769 
 770         retval = rscsi_disks[target].device->changed;
 771         if(!flag) rscsi_disks[target].device->changed = 0;
 772         return retval;
 773 }
 774 
 775 static void sd_init_done (Scsi_Cmnd * SCpnt)
     /* [previous][next][first][last][top][bottom][index][help] */
 776 {
 777   struct request * req;
 778   struct task_struct * p;
 779   
 780   req = &SCpnt->request;
 781   req->dev = 0xfffe; /* Busy, but indicate request done */
 782   
 783   if ((p = req->waiting) != NULL) {
 784     req->waiting = NULL;
 785     p->state = TASK_RUNNING;
 786     if (p->counter > current->counter)
 787       need_resched = 1;
 788   }
 789 }
 790 
 791 static int sd_init_onedisk(int i)
     /* [previous][next][first][last][top][bottom][index][help] */
 792 {
 793   int j = 0;
 794   unsigned char cmd[10];
 795   unsigned char *buffer;
 796   char spintime;
 797   int the_result, retries;
 798   Scsi_Cmnd * SCpnt;
 799 
 800   /* We need to retry the READ_CAPACITY because a UNIT_ATTENTION is considered
 801      a fatal error, and many devices report such an error just after a scsi
 802      bus reset. */
 803 
 804   SCpnt = allocate_device(NULL, rscsi_disks[i].device, 1);
 805   buffer = (unsigned char *) scsi_malloc(512);
 806 
 807   spintime = 0;
 808 
 809   /* Spin up drives, as required.  Only do this at boot time */
 810   if (current == task[0]){
 811     do{
 812       cmd[0] = TEST_UNIT_READY;
 813       cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0;
 814       memset ((void *) &cmd[2], 0, 8);
 815       SCpnt->request.dev = 0xffff;  /* Mark as really busy again */
 816       SCpnt->sense_buffer[0] = 0;
 817       SCpnt->sense_buffer[2] = 0;
 818       
 819       scsi_do_cmd (SCpnt,
 820                    (void *) cmd, (void *) buffer,
 821                    512, sd_init_done,  SD_TIMEOUT,
 822                    MAX_RETRIES);
 823       
 824       while(SCpnt->request.dev != 0xfffe);
 825       
 826       the_result = SCpnt->result;
 827       
 828       /* Look for non-removable devices that return NOT_READY.  Issue command
 829          to spin up drive for these cases. */
 830       if(the_result && !rscsi_disks[i].device->removable && 
 831          SCpnt->sense_buffer[2] == NOT_READY) {
 832         int time1;
 833         if(!spintime){
 834           printk( "sd%d: Spinning up disk...", i );
 835           cmd[0] = START_STOP;
 836           cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0;
 837           cmd[1] |= 1;  /* Return immediately */
 838           memset ((void *) &cmd[2], 0, 8);
 839           cmd[4] = 1; /* Start spin cycle */
 840           SCpnt->request.dev = 0xffff;  /* Mark as really busy again */
 841           SCpnt->sense_buffer[0] = 0;
 842           SCpnt->sense_buffer[2] = 0;
 843           
 844           scsi_do_cmd (SCpnt,
 845                        (void *) cmd, (void *) buffer,
 846                        512, sd_init_done,  SD_TIMEOUT,
 847                        MAX_RETRIES);
 848           
 849           while(SCpnt->request.dev != 0xfffe);
 850 
 851           spintime = jiffies;
 852         };
 853 
 854         time1 = jiffies;
 855         while(jiffies < time1 + HZ); /* Wait 1 second for next try */
 856         printk( "." );
 857       };
 858     } while(the_result && spintime && spintime+5000 > jiffies);
 859     if (spintime) {
 860        if (the_result)
 861            printk( "not responding...\n" );
 862        else
 863            printk( "ready\n" );
 864     }
 865   };  /* current == task[0] */
 866 
 867 
 868   retries = 3;
 869   do {
 870     cmd[0] = READ_CAPACITY;
 871     cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0;
 872     memset ((void *) &cmd[2], 0, 8);
 873     memset ((void *) buffer, 0, 8);
 874     SCpnt->request.dev = 0xffff;  /* Mark as really busy again */
 875     SCpnt->sense_buffer[0] = 0;
 876     SCpnt->sense_buffer[2] = 0;
 877     
 878     scsi_do_cmd (SCpnt,
 879                  (void *) cmd, (void *) buffer,
 880                  8, sd_init_done,  SD_TIMEOUT,
 881                  MAX_RETRIES);
 882     
 883     if (current == task[0])
 884       while(SCpnt->request.dev != 0xfffe);
 885     else
 886       if (SCpnt->request.dev != 0xfffe){
 887         SCpnt->request.waiting = current;
 888         current->state = TASK_UNINTERRUPTIBLE;
 889         while (SCpnt->request.dev != 0xfffe) schedule();
 890       };
 891     
 892     the_result = SCpnt->result;
 893     retries--;
 894 
 895   } while(the_result && retries);
 896 
 897   SCpnt->request.dev = -1;  /* Mark as not busy */
 898 
 899   wake_up(&SCpnt->device->device_wait); 
 900 
 901   /* Wake up a process waiting for device*/
 902 
 903   /*
 904    *    The SCSI standard says "READ CAPACITY is necessary for self confuring software"
 905    *    While not mandatory, support of READ CAPACITY is strongly encouraged.
 906    *    We used to die if we couldn't successfully do a READ CAPACITY.
 907    *    But, now we go on about our way.  The side effects of this are
 908    *
 909    *    1.  We can't know block size with certainty.  I have said "512 bytes is it"
 910    *            as this is most common.
 911    *
 912    *    2.  Recovery from when some one attempts to read past the end of the raw device will
 913    *        be slower.
 914    */
 915 
 916   if (the_result)
 917     {
 918       printk ("sd%d : READ CAPACITY failed.\n"
 919               "sd%d : status = %x, message = %02x, host = %d, driver = %02x \n",
 920               i,i,
 921               status_byte(the_result),
 922               msg_byte(the_result),
 923               host_byte(the_result),
 924               driver_byte(the_result)
 925               );
 926       if (driver_byte(the_result)  & DRIVER_SENSE)
 927         printk("sd%d : extended sense code = %1x \n", i, SCpnt->sense_buffer[2] & 0xf);
 928       else
 929         printk("sd%d : sense not available. \n", i);
 930 
 931       printk("sd%d : block size assumed to be 512 bytes, disk size 1GB.  \n", i);
 932       rscsi_disks[i].capacity = 0x1fffff;
 933       rscsi_disks[i].sector_size = 512;
 934 
 935       /* Set dirty bit for removable devices if not ready - sometimes drives
 936          will not report this properly. */
 937       if(rscsi_disks[i].device->removable && 
 938          SCpnt->sense_buffer[2] == NOT_READY)
 939         rscsi_disks[i].device->changed = 1;
 940 
 941     }
 942   else
 943     {
 944       rscsi_disks[i].capacity = (buffer[0] << 24) |
 945         (buffer[1] << 16) |
 946           (buffer[2] << 8) |
 947             buffer[3];
 948 
 949       rscsi_disks[i].sector_size = (buffer[4] << 24) |
 950         (buffer[5] << 16) | (buffer[6] << 8) | buffer[7];
 951 
 952       if (rscsi_disks[i].sector_size != 512 &&
 953           rscsi_disks[i].sector_size != 1024 &&
 954           rscsi_disks[i].sector_size != 256)
 955         {
 956           printk ("sd%d : unsupported sector size %d.\n",
 957                   i, rscsi_disks[i].sector_size);
 958           if(rscsi_disks[i].device->removable){
 959             rscsi_disks[i].capacity = 0;
 960           } else {
 961             printk ("scsi : deleting disk entry.\n");
 962             for  (j=i;  j < NR_SD - 1;)
 963               rscsi_disks[j] = rscsi_disks[++j];
 964             --i;
 965             --NR_SD;
 966             scsi_free(buffer, 512);
 967             return i;
 968           };
 969         }
 970       if(rscsi_disks[i].sector_size == 1024)
 971         rscsi_disks[i].capacity <<= 1;  /* Change this into 512 byte sectors */
 972       if(rscsi_disks[i].sector_size == 256)
 973         rscsi_disks[i].capacity >>= 1;  /* Change this into 512 byte sectors */
 974     }
 975 
 976   rscsi_disks[i].ten = 1;
 977   rscsi_disks[i].remap = 1;
 978   scsi_free(buffer, 512);
 979   return i;
 980 }
 981 
 982 /*
 983         The sd_init() function looks at all SCSI drives present, determines
 984         their size, and reads partition table entries for them.
 985 */
 986 
 987 
 988 unsigned long sd_init(unsigned long memory_start, unsigned long memory_end)
     /* [previous][next][first][last][top][bottom][index][help] */
 989 {
 990         int i;
 991 
 992         if (register_blkdev(MAJOR_NR,"sd",&sd_fops)) {
 993                 printk("Unable to get major %d for SCSI disk\n",MAJOR_NR);
 994                 return memory_start;
 995         }
 996         if (MAX_SD == 0) return memory_start;
 997 
 998         sd_sizes = (int *) memory_start;
 999         memory_start += (MAX_SD << 4) * sizeof(int);
1000         memset(sd_sizes, 0, (MAX_SD << 4) * sizeof(int));
1001 
1002         sd_blocksizes = (int *) memory_start;
1003         memory_start += (MAX_SD << 4) * sizeof(int);
1004         for(i=0;i<(MAX_SD << 4);i++) sd_blocksizes[i] = 1024;
1005         blksize_size[MAJOR_NR] = sd_blocksizes;
1006 
1007         sd = (struct hd_struct *) memory_start;
1008         memory_start += (MAX_SD << 4) * sizeof(struct hd_struct);
1009 
1010         sd_gendisk.max_nr = MAX_SD;
1011         sd_gendisk.part = sd;
1012         sd_gendisk.sizes = sd_sizes;
1013         sd_gendisk.real_devices = (void *) rscsi_disks;
1014 
1015         for (i = 0; i < NR_SD; ++i)
1016           i = sd_init_onedisk(i);
1017 
1018         blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST;
1019 
1020         /* If our host adapter is capable of scatter-gather, then we increase
1021            the read-ahead to 16 blocks (32 sectors).  If not, we use
1022            a two block (4 sector) read ahead. */
1023         if(rscsi_disks[0].device->host->sg_tablesize)
1024           read_ahead[MAJOR_NR] = 120;
1025         /* 64 sector read-ahead */
1026         else
1027           read_ahead[MAJOR_NR] = 4;  /* 4 sector read-ahead */
1028         
1029         sd_gendisk.next = gendisk_head;
1030         gendisk_head = &sd_gendisk;
1031         return memory_start;
1032 }
1033 
1034 void sd_init1(){
     /* [previous][next][first][last][top][bottom][index][help] */
1035   rscsi_disks = (Scsi_Disk *) scsi_init_malloc(MAX_SD * sizeof(Scsi_Disk));
1036 };
1037 
1038 void sd_attach(Scsi_Device * SDp){
     /* [previous][next][first][last][top][bottom][index][help] */
1039   SDp->scsi_request_fn = do_sd_request;
1040   rscsi_disks[NR_SD++].device = SDp;
1041   if(NR_SD > MAX_SD) panic ("scsi_devices corrupt (sd)");
1042 };
1043 
1044 #define DEVICE_BUSY rscsi_disks[target].device->busy
1045 #define USAGE rscsi_disks[target].device->access_count
1046 #define CAPACITY rscsi_disks[target].capacity
1047 #define MAYBE_REINIT  sd_init_onedisk(target)
1048 #define GENDISK_STRUCT sd_gendisk
1049 
1050 /* This routine is called to flush all partitions and partition tables
1051    for a changed scsi disk, and then re-read the new partition table.
1052    If we are revalidating a disk because of a media change, then we
1053    enter with usage == 0.  If we are using an ioctl, we automatically have
1054    usage == 1 (we need an open channel to use an ioctl :-), so this
1055    is our limit.
1056  */
1057 int revalidate_scsidisk(int dev, int maxusage){
     /* [previous][next][first][last][top][bottom][index][help] */
1058           int target, major;
1059           struct gendisk * gdev;
1060           int max_p;
1061           int start;
1062           int i;
1063 
1064           target =  DEVICE_NR(MINOR(dev));
1065           gdev = &GENDISK_STRUCT;
1066 
1067           cli();
1068           if (DEVICE_BUSY || USAGE > maxusage) {
1069             sti();
1070             printk("Device busy for revalidation (usage=%d)\n", USAGE);
1071             return -EBUSY;
1072           };
1073           DEVICE_BUSY = 1;
1074           sti();
1075 
1076           max_p = gdev->max_p;
1077           start = target << gdev->minor_shift;
1078           major = MAJOR_NR << 8;
1079 
1080           for (i=max_p - 1; i >=0 ; i--) {
1081             sync_dev(major | start | i);
1082             invalidate_inodes(major | start | i);
1083             invalidate_buffers(major | start | i);
1084             gdev->part[start+i].start_sect = 0;
1085             gdev->part[start+i].nr_sects = 0;
1086           };
1087 
1088 #ifdef MAYBE_REINIT
1089           MAYBE_REINIT;
1090 #endif
1091 
1092           gdev->part[start].nr_sects = CAPACITY;
1093           resetup_one_dev(gdev, target);
1094 
1095           DEVICE_BUSY = 0;
1096           return 0;
1097 }
1098 
1099 static int fop_revalidate_scsidisk(dev_t dev){
     /* [previous][next][first][last][top][bottom][index][help] */
1100   return revalidate_scsidisk(dev, 0);
1101 }
1102 

/* [previous][next][first][last][top][bottom][index][help] */