root/drivers/scsi/sd.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. sd_open
  2. sd_release
  3. sd_geninit
  4. rw_intr
  5. do_sd_request
  6. requeue_sd_request
  7. check_scsidisk_media_change
  8. sd_init_done
  9. sd_init_onedisk
  10. sd_init
  11. sd_init1
  12. sd_attach
  13. revalidate_scsidisk

   1 /*
   2  *      sd.c Copyright (C) 1992 Drew Eckhardt 
   3  *           Copyright (C) 1993, 1994 Eric Youngdale
   4  *      Linux scsi disk driver by
   5  *              Drew Eckhardt 
   6  *
   7  *      <drew@colorado.edu>
   8  *
   9  *       Modified by Eric Youngdale ericy@cais.com to
  10  *       add scatter-gather, multiple outstanding request, and other
  11  *       enhancements.
  12  */
  13 
  14 #include <linux/fs.h>
  15 #include <linux/kernel.h>
  16 #include <linux/sched.h>
  17 #include <linux/string.h>
  18 #include <linux/errno.h>
  19 #include <asm/system.h>
  20 
  21 #define MAJOR_NR SCSI_DISK_MAJOR
  22 #include "../block/blk.h"
  23 #include "scsi.h"
  24 #include "hosts.h"
  25 #include "sd.h"
  26 #include "scsi_ioctl.h"
  27 #include "constants.h"
  28 
  29 #include <linux/genhd.h>
  30 
  31 /*
  32 static const char RCSid[] = "$Header:";
  33 */
  34 
  35 #define MAX_RETRIES 5
  36 
  37 /*
  38  *      Time out in seconds for disks and Magneto-opticals (which are slower).
  39  */
  40 
  41 #define SD_TIMEOUT 300
  42 #define SD_MOD_TIMEOUT 750
  43 
  44 #define CLUSTERABLE_DEVICE(SC) (SC->host->hostt->use_clustering && \
  45                             scsi_devices[SC->index].type != TYPE_MOD)
  46 
  47 struct hd_struct * sd;
  48 
  49 int NR_SD=0;
  50 int MAX_SD=0;
  51 Scsi_Disk * rscsi_disks;
  52 static int * sd_sizes;
  53 static int * sd_blocksizes;
  54 
  55 extern int sd_ioctl(struct inode *, struct file *, unsigned int, unsigned long);
  56 
  57 static sd_init_onedisk(int);
  58 
  59 static void requeue_sd_request (Scsi_Cmnd * SCpnt);
  60 
  61 static int sd_open(struct inode * inode, struct file * filp)
     /* [previous][next][first][last][top][bottom][index][help] */
  62 {
  63         int target;
  64         target =  DEVICE_NR(MINOR(inode->i_rdev));
  65 
  66         if(target >= NR_SD || !rscsi_disks[target].device)
  67           return -ENXIO;   /* No such device */
  68         
  69 /* Make sure that only one process can do a check_change_disk at one time.
  70  This is also used to lock out further access when the partition table is being re-read. */
  71 
  72         while (rscsi_disks[target].device->busy);
  73 
  74         if(rscsi_disks[target].device->removable) {
  75           check_disk_change(inode->i_rdev);
  76 
  77           if(!rscsi_disks[target].device->access_count)
  78             sd_ioctl(inode, NULL, SCSI_IOCTL_DOORLOCK, 0);
  79         };
  80         rscsi_disks[target].device->access_count++;
  81         return 0;
  82 }
  83 
  84 static void sd_release(struct inode * inode, struct file * file)
     /* [previous][next][first][last][top][bottom][index][help] */
  85 {
  86         int target;
  87         sync_dev(inode->i_rdev);
  88 
  89         target =  DEVICE_NR(MINOR(inode->i_rdev));
  90 
  91         rscsi_disks[target].device->access_count--;
  92 
  93         if(rscsi_disks[target].device->removable) {
  94           if(!rscsi_disks[target].device->access_count)
  95             sd_ioctl(inode, NULL, SCSI_IOCTL_DOORUNLOCK, 0);
  96         };
  97 }
  98 
  99 static void sd_geninit(void);
 100 
 101 static struct file_operations sd_fops = {
 102         NULL,                   /* lseek - default */
 103         block_read,             /* read - general block-dev read */
 104         block_write,            /* write - general block-dev write */
 105         NULL,                   /* readdir - bad */
 106         NULL,                   /* select */
 107         sd_ioctl,               /* ioctl */
 108         NULL,                   /* mmap */
 109         sd_open,                /* open code */
 110         sd_release,             /* release */
 111         block_fsync             /* fsync */
 112 };
 113 
 114 static struct gendisk sd_gendisk = {
 115         MAJOR_NR,               /* Major number */
 116         "sd",           /* Major name */
 117         4,              /* Bits to shift to get real from partition */
 118         1 << 4,         /* Number of partitions per real */
 119         0,              /* maximum number of real */
 120         sd_geninit,     /* init function */
 121         NULL,           /* hd struct */
 122         NULL,   /* block sizes */
 123         0,              /* number */
 124         NULL,   /* internal */
 125         NULL            /* next */
 126 };
 127 
 128 static void sd_geninit (void)
     /* [previous][next][first][last][top][bottom][index][help] */
 129 {
 130         int i;
 131 
 132         for (i = 0; i < NR_SD; ++i)
 133                 sd[i << 4].nr_sects = rscsi_disks[i].capacity;
 134         sd_gendisk.nr_real = NR_SD;
 135 }
 136 
 137 /*
 138         rw_intr is the interrupt routine for the device driver.  It will
 139         be notified on the end of a SCSI read / write, and
 140         will take on of several actions based on success or failure.
 141 */
 142 
 143 static void rw_intr (Scsi_Cmnd *SCpnt)
     /* [previous][next][first][last][top][bottom][index][help] */
 144 {
 145   int result = SCpnt->result;
 146   int this_count = SCpnt->bufflen >> 9;
 147 
 148 #ifdef DEBUG
 149   printk("sd%d : rw_intr(%d, %d)\n", MINOR(SCpnt->request.dev), SCpnt->host->host_no, result);
 150 #endif
 151 
 152 /*
 153   First case : we assume that the command succeeded.  One of two things will
 154   happen here.  Either we will be finished, or there will be more
 155   sectors that we were unable to read last time.
 156 */
 157 
 158   if (!result) {
 159 
 160 #ifdef DEBUG
 161     printk("sd%d : %d sectors remain.\n", MINOR(SCpnt->request.dev), SCpnt->request.nr_sectors);
 162     printk("use_sg is %d\n ",SCpnt->use_sg);
 163 #endif
 164     if (SCpnt->use_sg) {
 165       struct scatterlist * sgpnt;
 166       int i;
 167       sgpnt = (struct scatterlist *) SCpnt->buffer;
 168       for(i=0; i<SCpnt->use_sg; i++) {
 169 #ifdef DEBUG
 170         printk(":%x %x %d\n",sgpnt[i].alt_address, sgpnt[i].address, sgpnt[i].length);
 171 #endif
 172         if (sgpnt[i].alt_address) {
 173           if (SCpnt->request.cmd == READ)
 174             memcpy(sgpnt[i].alt_address, sgpnt[i].address, sgpnt[i].length);
 175           scsi_free(sgpnt[i].address, sgpnt[i].length);
 176         };
 177       };
 178       scsi_free(SCpnt->buffer, SCpnt->sglist_len);  /* Free list of scatter-gather pointers */
 179     } else {
 180       if (SCpnt->buffer != SCpnt->request.buffer) {
 181 #ifdef DEBUG
 182         printk("nosg: %x %x %d\n",SCpnt->request.buffer, SCpnt->buffer,
 183                    SCpnt->bufflen);
 184 #endif  
 185           if (SCpnt->request.cmd == READ)
 186             memcpy(SCpnt->request.buffer, SCpnt->buffer,
 187                    SCpnt->bufflen);
 188           scsi_free(SCpnt->buffer, SCpnt->bufflen);
 189       };
 190     };
 191 /*
 192  *      If multiple sectors are requested in one buffer, then
 193  *      they will have been finished off by the first command.  If
 194  *      not, then we have a multi-buffer command.
 195  */
 196     if (SCpnt->request.nr_sectors > this_count)
 197       {
 198         SCpnt->request.errors = 0;
 199         
 200         if (!SCpnt->request.bh)
 201           {
 202 #ifdef DEBUG
 203             printk("sd%d : handling page request, no buffer\n",
 204                    MINOR(SCpnt->request.dev));
 205 #endif
 206 /*
 207   The SCpnt->request.nr_sectors field is always done in 512 byte sectors,
 208   even if this really isn't the case.
 209 */
 210             panic("sd.c: linked page request (%lx %x)",
 211                   SCpnt->request.sector, this_count);
 212           }
 213       }
 214     end_scsi_request(SCpnt, 1, this_count);
 215     requeue_sd_request(SCpnt);
 216     return;
 217   }
 218 
 219 /* Free up any indirection buffers we allocated for DMA purposes. */
 220     if (SCpnt->use_sg) {
 221       struct scatterlist * sgpnt;
 222       int i;
 223       sgpnt = (struct scatterlist *) SCpnt->buffer;
 224       for(i=0; i<SCpnt->use_sg; i++) {
 225 #ifdef DEBUG
 226         printk("err: %x %x %d\n",SCpnt->request.buffer, SCpnt->buffer,
 227                    SCpnt->bufflen);
 228 #endif
 229         if (sgpnt[i].alt_address) {
 230           scsi_free(sgpnt[i].address, sgpnt[i].length);
 231         };
 232       };
 233       scsi_free(SCpnt->buffer, SCpnt->sglist_len);  /* Free list of scatter-gather pointers */
 234     } else {
 235 #ifdef DEBUG
 236       printk("nosgerr: %x %x %d\n",SCpnt->request.buffer, SCpnt->buffer,
 237                    SCpnt->bufflen);
 238 #endif
 239       if (SCpnt->buffer != SCpnt->request.buffer)
 240         scsi_free(SCpnt->buffer, SCpnt->bufflen);
 241     };
 242 
 243 /*
 244         Now, if we were good little boys and girls, Santa left us a request
 245         sense buffer.  We can extract information from this, so we
 246         can choose a block to remap, etc.
 247 */
 248 
 249         if (driver_byte(result) != 0) {
 250           if (sugestion(result) == SUGGEST_REMAP) {
 251 #ifdef REMAP
 252 /*
 253         Not yet implemented.  A read will fail after being remapped,
 254         a write will call the strategy routine again.
 255 */
 256             if rscsi_disks[DEVICE_NR(SCpnt->request.dev)].remap
 257               {
 258                 result = 0;
 259               }
 260             else
 261               
 262 #endif
 263             }
 264 
 265           if ((SCpnt->sense_buffer[0] & 0x7f) == 0x70) {
 266             if ((SCpnt->sense_buffer[2] & 0xf) == UNIT_ATTENTION) {
 267               if(rscsi_disks[DEVICE_NR(SCpnt->request.dev)].device->removable) {
 268               /* detected disc change.  set a bit and quietly refuse    */
 269               /* further access.                                        */
 270               
 271                 rscsi_disks[DEVICE_NR(SCpnt->request.dev)].device->changed = 1;
 272                 end_scsi_request(SCpnt, 0, this_count);
 273                 requeue_sd_request(SCpnt);
 274                 return;
 275               }
 276             }
 277           }
 278           
 279 
 280 /*      If we had an ILLEGAL REQUEST returned, then we may have
 281 performed an unsupported command.  The only thing this should be would
 282 be a ten byte read where only a six byte read was supportted.  Also,
 283 on a system where READ CAPACITY failed, we mave have read past the end
 284 of the  disk. 
 285 */
 286 
 287           if (SCpnt->sense_buffer[2] == ILLEGAL_REQUEST) {
 288             if (rscsi_disks[DEVICE_NR(SCpnt->request.dev)].ten) {
 289               rscsi_disks[DEVICE_NR(SCpnt->request.dev)].ten = 0;
 290               requeue_sd_request(SCpnt);
 291               result = 0;
 292             } else {
 293             }
 294           }
 295         }  /* driver byte != 0 */
 296         if (result) {
 297                 printk("SCSI disk error : host %d id %d lun %d return code = %x\n",
 298                        rscsi_disks[DEVICE_NR(SCpnt->request.dev)].device->host->host_no,
 299                        rscsi_disks[DEVICE_NR(SCpnt->request.dev)].device->id,
 300                        rscsi_disks[DEVICE_NR(SCpnt->request.dev)].device->lun, result);
 301 
 302                 if (driver_byte(result) & DRIVER_SENSE)
 303                         print_sense("sd", SCpnt);
 304                 end_scsi_request(SCpnt, 0, SCpnt->request.current_nr_sectors);
 305                 requeue_sd_request(SCpnt);
 306                 return;
 307         }
 308 }
 309 
 310 /*
 311         requeue_sd_request() is the request handler function for the sd driver.
 312         Its function in life is to take block device requests, and translate
 313         them to SCSI commands.
 314 */
 315 
 316 static void do_sd_request (void)
     /* [previous][next][first][last][top][bottom][index][help] */
 317 {
 318   Scsi_Cmnd * SCpnt = NULL;
 319   struct request * req = NULL;
 320   int flag = 0;
 321   while (1==1){
 322     cli();
 323     if (CURRENT != NULL && CURRENT->dev == -1) {
 324       sti();
 325       return;
 326     };
 327 
 328     INIT_SCSI_REQUEST;
 329 
 330 
 331 /* We have to be careful here.  allocate_device will get a free pointer, but
 332    there is no guarantee that it is queueable.  In normal usage, we want to
 333    call this, because other types of devices may have the host all tied up,
 334    and we want to make sure that we have at least one request pending for this
 335    type of device.   We can also come through here while servicing an
 336    interrupt, because of the need to start another command.  If we call
 337    allocate_device more than once, then the system can wedge if the command
 338    is not queueable.  The request_queueable function is safe because it checks
 339    to make sure that the host is able to take another command before it returns
 340    a pointer.  */
 341 
 342     if (flag++ == 0)
 343       SCpnt = allocate_device(&CURRENT,
 344                               rscsi_disks[DEVICE_NR(MINOR(CURRENT->dev))].device->index, 0); 
 345     else SCpnt = NULL;
 346     sti();
 347 
 348 /* This is a performance enhancement.  We dig down into the request list and
 349    try and find a queueable request (i.e. device not busy, and host able to
 350    accept another command.  If we find one, then we queue it. This can
 351    make a big difference on systems with more than one disk drive.  We want
 352    to have the interrupts off when monkeying with the request list, because
 353    otherwise the kernel might try and slip in a request inbetween somewhere. */
 354 
 355     if (!SCpnt && NR_SD > 1){
 356       struct request *req1;
 357       req1 = NULL;
 358       cli();
 359       req = CURRENT;
 360       while(req){
 361         SCpnt = request_queueable(req,
 362                                   rscsi_disks[DEVICE_NR(MINOR(req->dev))].device->index);
 363         if(SCpnt) break;
 364         req1 = req;
 365         req = req->next;
 366       };
 367       if (SCpnt && req->dev == -1) {
 368         if (req == CURRENT) 
 369           CURRENT = CURRENT->next;
 370         else
 371           req1->next = req->next;
 372       };
 373       sti();
 374     };
 375     
 376     if (!SCpnt) return; /* Could not find anything to do */
 377         
 378     /* Queue command */
 379     requeue_sd_request(SCpnt);
 380   };  /* While */
 381 }    
 382 
 383 static void requeue_sd_request (Scsi_Cmnd * SCpnt)
     /* [previous][next][first][last][top][bottom][index][help] */
 384 {
 385         int dev, block, this_count;
 386         unsigned char cmd[10];
 387         int bounce_size, contiguous;
 388         int max_sg;
 389         struct buffer_head * bh, *bhp;
 390         char * buff, *bounce_buffer;
 391 
 392 repeat:
 393 
 394         if(SCpnt->request.dev <= 0) {
 395           do_sd_request();
 396           return;
 397         }
 398 
 399         dev =  MINOR(SCpnt->request.dev);
 400         block = SCpnt->request.sector;
 401         this_count = 0;
 402 
 403 #ifdef DEBUG
 404         printk("Doing sd request, dev = %d, block = %d\n", dev, block);
 405 #endif
 406 
 407         if (dev >= (NR_SD << 4) || block + SCpnt->request.nr_sectors > sd[dev].nr_sects)
 408                 {
 409                 end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
 410                 goto repeat;
 411                 }
 412 
 413         block += sd[dev].start_sect;
 414         dev = DEVICE_NR(dev);
 415 
 416         if (rscsi_disks[dev].device->changed)
 417                 {
 418 /*
 419  * quietly refuse to do anything to a changed disc until the changed bit has been reset
 420  */
 421                 /* printk("SCSI disk has been changed.  Prohibiting further I/O.\n");   */
 422                 end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
 423                 goto repeat;
 424                 }
 425 
 426 #ifdef DEBUG
 427         printk("sd%d : real dev = /dev/sd%d, block = %d\n", MINOR(SCpnt->request.dev), dev, block);
 428 #endif
 429 
 430         switch (SCpnt->request.cmd)
 431                 {
 432                 case WRITE :
 433                         if (!rscsi_disks[dev].device->writeable)
 434                                 {
 435                                 end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
 436                                 goto repeat;
 437                                 }
 438                         cmd[0] = WRITE_6;
 439                         break;
 440                 case READ :
 441                         cmd[0] = READ_6;
 442                         break;
 443                 default :
 444                         panic ("Unknown sd command %d\n", SCpnt->request.cmd);
 445                       }
 446 
 447         SCpnt->this_count = 0;
 448 
 449         /* If the host adapter can deal with very large scatter-gather
 450            requests, it is a waste of time to cluster */
 451         contiguous = (!CLUSTERABLE_DEVICE(SCpnt) ? 0 :1);
 452         bounce_buffer = NULL;
 453         bounce_size = (SCpnt->request.nr_sectors << 9);
 454 
 455         /* First see if we need a bounce buffer for this request.  If we do, make sure
 456            that we can allocate a buffer.  Do not waste space by allocating a bounce
 457            buffer if we are straddling the 16Mb line */
 458 
 459         
 460         if (contiguous && SCpnt->request.bh &&
 461             ((int) SCpnt->request.bh->b_data) + (SCpnt->request.nr_sectors << 9) - 1 > 
 462             ISA_DMA_THRESHOLD && SCpnt->host->unchecked_isa_dma) {
 463           if(((int) SCpnt->request.bh->b_data) > ISA_DMA_THRESHOLD)
 464             bounce_buffer = (char *) scsi_malloc(bounce_size);
 465           if(!bounce_buffer) contiguous = 0;
 466         };
 467 
 468         if(contiguous && SCpnt->request.bh && SCpnt->request.bh->b_reqnext)
 469           for(bh = SCpnt->request.bh, bhp = bh->b_reqnext; bhp; bh = bhp, 
 470               bhp = bhp->b_reqnext) {
 471             if(!CONTIGUOUS_BUFFERS(bh,bhp)) { 
 472               if(bounce_buffer) scsi_free(bounce_buffer, bounce_size);
 473               contiguous = 0;
 474               break;
 475             } 
 476           };
 477         if (!SCpnt->request.bh || contiguous) {
 478 
 479           /* case of page request (i.e. raw device), or unlinked buffer */
 480           this_count = SCpnt->request.nr_sectors;
 481           buff = SCpnt->request.buffer;
 482           SCpnt->use_sg = 0;
 483 
 484         } else if (SCpnt->host->sg_tablesize == 0 ||
 485                    (need_isa_buffer && 
 486                     dma_free_sectors <= 10)) {
 487 
 488           /* Case of host adapter that cannot scatter-gather.  We also
 489            come here if we are running low on DMA buffer memory.  We set
 490            a threshold higher than that we would need for this request so
 491            we leave room for other requests.  Even though we would not need
 492            it all, we need to be conservative, because if we run low enough
 493            we have no choice but to panic. */
 494 
 495           if (SCpnt->host->sg_tablesize != 0 &&
 496               need_isa_buffer && 
 497               dma_free_sectors <= 10)
 498             printk("Warning: SCSI DMA buffer space running low.  Using non scatter-gather I/O.\n");
 499 
 500           this_count = SCpnt->request.current_nr_sectors;
 501           buff = SCpnt->request.buffer;
 502           SCpnt->use_sg = 0;
 503 
 504         } else {
 505 
 506           /* Scatter-gather capable host adapter */
 507           struct scatterlist * sgpnt;
 508           int count, this_count_max;
 509           int counted;
 510 
 511           bh = SCpnt->request.bh;
 512           this_count = 0;
 513           this_count_max = (rscsi_disks[dev].ten ? 0xffff : 0xff);
 514           count = 0;
 515           bhp = NULL;
 516           while(bh) {
 517             if ((this_count + (bh->b_size >> 9)) > this_count_max) break;
 518             if(!bhp || !CONTIGUOUS_BUFFERS(bhp,bh) ||
 519                !CLUSTERABLE_DEVICE(SCpnt) ||
 520                (SCpnt->host->unchecked_isa_dma &&
 521                ((unsigned int) bh->b_data-1) == ISA_DMA_THRESHOLD)) {
 522               if (count < SCpnt->host->sg_tablesize) count++;
 523               else break;
 524             };
 525             this_count += (bh->b_size >> 9);
 526             bhp = bh;
 527             bh = bh->b_reqnext;
 528           };
 529 #if 0
 530           if(SCpnt->host->unchecked_isa_dma &&
 531              ((unsigned int) SCpnt->request.bh->b_data-1) == ISA_DMA_THRESHOLD) count--;
 532 #endif
 533           SCpnt->use_sg = count;  /* Number of chains */
 534           count = 512;/* scsi_malloc can only allocate in chunks of 512 bytes*/
 535           while( count < (SCpnt->use_sg * sizeof(struct scatterlist))) 
 536             count = count << 1;
 537           SCpnt->sglist_len = count;
 538           max_sg = count / sizeof(struct scatterlist);
 539           if(SCpnt->host->sg_tablesize < max_sg) max_sg = SCpnt->host->sg_tablesize;
 540           sgpnt = (struct scatterlist * ) scsi_malloc(count);
 541           memset(sgpnt, 0, count);  /* Zero so it is easy to fill */
 542           if (!sgpnt) {
 543             printk("Warning - running *really* short on DMA buffers\n");
 544             SCpnt->use_sg = 0;  /* No memory left - bail out */
 545             this_count = SCpnt->request.current_nr_sectors;
 546             buff = SCpnt->request.buffer;
 547           } else {
 548             buff = (char *) sgpnt;
 549             counted = 0;
 550             for(count = 0, bh = SCpnt->request.bh, bhp = bh->b_reqnext;
 551                 count < SCpnt->use_sg && bh; 
 552                 count++, bh = bhp) {
 553 
 554               bhp = bh->b_reqnext;
 555 
 556               if(!sgpnt[count].address) sgpnt[count].address = bh->b_data;
 557               sgpnt[count].length += bh->b_size;
 558               counted += bh->b_size >> 9;
 559 
 560               if (((int) sgpnt[count].address) + sgpnt[count].length - 1 > 
 561                   ISA_DMA_THRESHOLD && (SCpnt->host->unchecked_isa_dma) &&
 562                   !sgpnt[count].alt_address) {
 563                 sgpnt[count].alt_address = sgpnt[count].address;
 564                 /* We try and avoid exhausting the DMA pool, since it is easier
 565                    to control usage here.  In other places we might have a more
 566                    pressing need, and we would be screwed if we ran out */
 567                 if(dma_free_sectors < (sgpnt[count].length >> 9) + 10) {
 568                   sgpnt[count].address = NULL;
 569                 } else {
 570                   sgpnt[count].address = (char *) scsi_malloc(sgpnt[count].length);
 571                 };
 572 /* If we start running low on DMA buffers, we abort the scatter-gather
 573    operation, and free all of the memory we have allocated.  We want to
 574    ensure that all scsi operations are able to do at least a non-scatter/gather
 575    operation */
 576                 if(sgpnt[count].address == NULL){ /* Out of dma memory */
 577 #if 0
 578                   printk("Warning: Running low on SCSI DMA buffers");
 579                   /* Try switching back to a non scatter-gather operation. */
 580                   while(--count >= 0){
 581                     if(sgpnt[count].alt_address) 
 582                       scsi_free(sgpnt[count].address, sgpnt[count].length);
 583                   };
 584                   this_count = SCpnt->request.current_nr_sectors;
 585                   buff = SCpnt->request.buffer;
 586                   SCpnt->use_sg = 0;
 587                   scsi_free(sgpnt, SCpnt->sglist_len);
 588 #endif
 589                   SCpnt->use_sg = count;
 590                   this_count = counted -= bh->b_size >> 9;
 591                   break;
 592                 };
 593 
 594               };
 595 
 596               /* Only cluster buffers if we know that we can supply DMA buffers
 597                  large enough to satisfy the request.  Do not cluster a new
 598                  request if this would mean that we suddenly need to start
 599                  using DMA bounce buffers */
 600               if(bhp && CONTIGUOUS_BUFFERS(bh,bhp) && CLUSTERABLE_DEVICE(SCpnt)) {
 601                 char * tmp;
 602 
 603                 if (((int) sgpnt[count].address) + sgpnt[count].length +
 604                     bhp->b_size - 1 > ISA_DMA_THRESHOLD && 
 605                     (SCpnt->host->unchecked_isa_dma) &&
 606                     !sgpnt[count].alt_address) continue;
 607 
 608                 if(!sgpnt[count].alt_address) {count--; continue; }
 609                 if(dma_free_sectors > 10)
 610                   tmp = (char *) scsi_malloc(sgpnt[count].length + bhp->b_size);
 611                 else {
 612                   tmp = NULL;
 613                   max_sg = SCpnt->use_sg;
 614                 };
 615                 if(tmp){
 616                   scsi_free(sgpnt[count].address, sgpnt[count].length);
 617                   sgpnt[count].address = tmp;
 618                   count--;
 619                   continue;
 620                 };
 621 
 622                 /* If we are allowed another sg chain, then increment counter so we
 623                    can insert it.  Otherwise we will end up truncating */
 624 
 625                 if (SCpnt->use_sg < max_sg) SCpnt->use_sg++;
 626               };  /* contiguous buffers */
 627             }; /* for loop */
 628 
 629             this_count = counted; /* This is actually how many we are going to transfer */
 630 
 631             if(count < SCpnt->use_sg || SCpnt->use_sg > SCpnt->host->sg_tablesize){
 632               bh = SCpnt->request.bh;
 633               printk("Use sg, count %d %x %d\n", SCpnt->use_sg, count, dma_free_sectors);
 634               printk("maxsg = %x, counted = %d this_count = %d\n", max_sg, counted, this_count);
 635               while(bh){
 636                 printk("[%p %lx] ", bh->b_data, bh->b_size);
 637                 bh = bh->b_reqnext;
 638               };
 639               if(SCpnt->use_sg < 16)
 640                 for(count=0; count<SCpnt->use_sg; count++)
 641                   printk("{%d:%p %p %d}  ", count,
 642                          sgpnt[count].address,
 643                          sgpnt[count].alt_address,
 644                          sgpnt[count].length);
 645               panic("Ooops");
 646             };
 647 
 648             if (SCpnt->request.cmd == WRITE)
 649               for(count=0; count<SCpnt->use_sg; count++)
 650                 if(sgpnt[count].alt_address)
 651                   memcpy(sgpnt[count].address, sgpnt[count].alt_address, 
 652                          sgpnt[count].length);
 653           };  /* Able to malloc sgpnt */
 654         };  /* Host adapter capable of scatter-gather */
 655 
 656 /* Now handle the possibility of DMA to addresses > 16Mb */
 657 
 658         if(SCpnt->use_sg == 0){
 659           if (((int) buff) + (this_count << 9) - 1 > ISA_DMA_THRESHOLD && 
 660             (SCpnt->host->unchecked_isa_dma)) {
 661             if(bounce_buffer)
 662               buff = bounce_buffer;
 663             else
 664               buff = (char *) scsi_malloc(this_count << 9);
 665             if(buff == NULL) {  /* Try backing off a bit if we are low on mem*/
 666               this_count = SCpnt->request.current_nr_sectors;
 667               buff = (char *) scsi_malloc(this_count << 9);
 668               if(!buff) panic("Ran out of DMA buffers.");
 669             };
 670             if (SCpnt->request.cmd == WRITE)
 671               memcpy(buff, (char *)SCpnt->request.buffer, this_count << 9);
 672           };
 673         };
 674 #ifdef DEBUG
 675         printk("sd%d : %s %d/%d 512 byte blocks.\n", MINOR(SCpnt->request.dev),
 676                 (SCpnt->request.cmd == WRITE) ? "writing" : "reading",
 677                 this_count, SCpnt->request.nr_sectors);
 678 #endif
 679 
 680         cmd[1] = (SCpnt->lun << 5) & 0xe0;
 681 
 682         if (rscsi_disks[dev].sector_size == 1024){
 683           if(block & 1) panic("sd.c:Bad block number requested");
 684           if(this_count & 1) panic("sd.c:Bad block number requested");
 685           block = block >> 1;
 686           this_count = this_count >> 1;
 687         };
 688 
 689         if (rscsi_disks[dev].sector_size == 256){
 690           block = block << 1;
 691           this_count = this_count << 1;
 692         };
 693 
 694         if (((this_count > 0xff) ||  (block > 0x1fffff)) && rscsi_disks[dev].ten)
 695                 {
 696                 if (this_count > 0xffff)
 697                         this_count = 0xffff;
 698 
 699                 cmd[0] += READ_10 - READ_6 ;
 700                 cmd[2] = (unsigned char) (block >> 24) & 0xff;
 701                 cmd[3] = (unsigned char) (block >> 16) & 0xff;
 702                 cmd[4] = (unsigned char) (block >> 8) & 0xff;
 703                 cmd[5] = (unsigned char) block & 0xff;
 704                 cmd[6] = cmd[9] = 0;
 705                 cmd[7] = (unsigned char) (this_count >> 8) & 0xff;
 706                 cmd[8] = (unsigned char) this_count & 0xff;
 707                 }
 708         else
 709                 {
 710                 if (this_count > 0xff)
 711                         this_count = 0xff;
 712 
 713                 cmd[1] |= (unsigned char) ((block >> 16) & 0x1f);
 714                 cmd[2] = (unsigned char) ((block >> 8) & 0xff);
 715                 cmd[3] = (unsigned char) block & 0xff;
 716                 cmd[4] = (unsigned char) this_count;
 717                 cmd[5] = 0;
 718                 }
 719 
 720 /*
 721  * We shouldn't disconnect in the middle of a sector, so with a dumb 
 722  * host adapter, it's safe to assume that we can at least transfer 
 723  * this many bytes between each connect / disconnect.  
 724  */
 725 
 726         SCpnt->transfersize = rscsi_disks[dev].sector_size;
 727         SCpnt->underflow = this_count << 9; 
 728         scsi_do_cmd (SCpnt, (void *) cmd, buff, 
 729                      this_count * rscsi_disks[dev].sector_size,
 730                      rw_intr, 
 731                      (scsi_devices[SCpnt->index].type == TYPE_DISK ? 
 732                                      SD_TIMEOUT : SD_MOD_TIMEOUT),
 733                      MAX_RETRIES);
 734 }
 735 
 736 int check_scsidisk_media_change(int full_dev, int flag){
     /* [previous][next][first][last][top][bottom][index][help] */
 737         int retval;
 738         int target;
 739         struct inode inode;
 740 
 741         target =  DEVICE_NR(MINOR(full_dev));
 742 
 743         if (target >= NR_SD) {
 744                 printk("SCSI disk request error: invalid device.\n");
 745                 return 0;
 746         };
 747 
 748         if(!rscsi_disks[target].device->removable) return 0;
 749 
 750         inode.i_rdev = full_dev;  /* This is all we really need here */
 751         retval = sd_ioctl(&inode, NULL, SCSI_IOCTL_TEST_UNIT_READY, 0);
 752 
 753         if(retval){ /* Unable to test, unit probably not ready.  This usually
 754                      means there is no disc in the drive.  Mark as changed,
 755                      and we will figure it out later once the drive is
 756                      available again.  */
 757 
 758           rscsi_disks[target].device->changed = 1;
 759           return 1; /* This will force a flush, if called from
 760                        check_disk_change */
 761         };
 762 
 763         retval = rscsi_disks[target].device->changed;
 764         if(!flag) rscsi_disks[target].device->changed = 0;
 765         return retval;
 766 }
 767 
 768 static void sd_init_done (Scsi_Cmnd * SCpnt)
     /* [previous][next][first][last][top][bottom][index][help] */
 769 {
 770   struct request * req;
 771   struct task_struct * p;
 772   
 773   req = &SCpnt->request;
 774   req->dev = 0xfffe; /* Busy, but indicate request done */
 775   
 776   if ((p = req->waiting) != NULL) {
 777     req->waiting = NULL;
 778     p->state = TASK_RUNNING;
 779     if (p->counter > current->counter)
 780       need_resched = 1;
 781   }
 782 }
 783 
 784 static int sd_init_onedisk(int i)
     /* [previous][next][first][last][top][bottom][index][help] */
 785 {
 786   int j = 0;
 787   unsigned char cmd[10];
 788   unsigned char *buffer;
 789   char spintime;
 790   int the_result, retries;
 791   Scsi_Cmnd * SCpnt;
 792 
 793   /* We need to retry the READ_CAPACITY because a UNIT_ATTENTION is considered
 794      a fatal error, and many devices report such an error just after a scsi
 795      bus reset. */
 796 
 797   SCpnt = allocate_device(NULL, rscsi_disks[i].device->index, 1);
 798   buffer = (unsigned char *) scsi_malloc(512);
 799 
 800   spintime = 0;
 801 
 802   /* Spin up drives, as required.  Only do this at boot time */
 803   if (current == task[0]){
 804     do{
 805       cmd[0] = TEST_UNIT_READY;
 806       cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0;
 807       memset ((void *) &cmd[2], 0, 8);
 808       SCpnt->request.dev = 0xffff;  /* Mark as really busy again */
 809       SCpnt->sense_buffer[0] = 0;
 810       SCpnt->sense_buffer[2] = 0;
 811       
 812       scsi_do_cmd (SCpnt,
 813                    (void *) cmd, (void *) buffer,
 814                    512, sd_init_done,  SD_TIMEOUT,
 815                    MAX_RETRIES);
 816       
 817       while(SCpnt->request.dev != 0xfffe);
 818       
 819       the_result = SCpnt->result;
 820       
 821       /* Look for non-removable devices that return NOT_READY.  Issue command
 822          to spin up drive for these cases. */
 823       if(the_result && !rscsi_disks[i].device->removable && 
 824          SCpnt->sense_buffer[2] == NOT_READY) {
 825         int time1;
 826         if(!spintime){
 827           printk( "sd%d: Spinning up disk...", i );
 828           cmd[0] = START_STOP;
 829           cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0;
 830           cmd[1] |= 1;  /* Return immediately */
 831           memset ((void *) &cmd[2], 0, 8);
 832           cmd[4] = 1; /* Start spin cycle */
 833           SCpnt->request.dev = 0xffff;  /* Mark as really busy again */
 834           SCpnt->sense_buffer[0] = 0;
 835           SCpnt->sense_buffer[2] = 0;
 836           
 837           scsi_do_cmd (SCpnt,
 838                        (void *) cmd, (void *) buffer,
 839                        512, sd_init_done,  SD_TIMEOUT,
 840                        MAX_RETRIES);
 841           
 842           while(SCpnt->request.dev != 0xfffe);
 843 
 844           spintime = jiffies;
 845         };
 846 
 847         time1 = jiffies;
 848         while(jiffies < time1 + 100); /* Wait 1 second for next try */
 849         printk( "." );
 850       };
 851     } while(the_result && spintime && spintime+5000 > jiffies);
 852     if (spintime) {
 853        if (the_result)
 854            printk( "not responding...\n" );
 855        else
 856            printk( "ready\n" );
 857     }
 858   };  /* current == task[0] */
 859 
 860 
 861   retries = 3;
 862   do {
 863     cmd[0] = READ_CAPACITY;
 864     cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0;
 865     memset ((void *) &cmd[2], 0, 8);
 866     memset ((void *) buffer, 0, 8);
 867     SCpnt->request.dev = 0xffff;  /* Mark as really busy again */
 868     SCpnt->sense_buffer[0] = 0;
 869     SCpnt->sense_buffer[2] = 0;
 870     
 871     scsi_do_cmd (SCpnt,
 872                  (void *) cmd, (void *) buffer,
 873                  8, sd_init_done,  SD_TIMEOUT,
 874                  MAX_RETRIES);
 875     
 876     if (current == task[0])
 877       while(SCpnt->request.dev != 0xfffe);
 878     else
 879       if (SCpnt->request.dev != 0xfffe){
 880         SCpnt->request.waiting = current;
 881         current->state = TASK_UNINTERRUPTIBLE;
 882         while (SCpnt->request.dev != 0xfffe) schedule();
 883       };
 884     
 885     the_result = SCpnt->result;
 886     retries--;
 887 
 888   } while(the_result && retries);
 889 
 890   SCpnt->request.dev = -1;  /* Mark as not busy */
 891 
 892   wake_up(&scsi_devices[SCpnt->index].device_wait); 
 893 
 894   /* Wake up a process waiting for device*/
 895 
 896   /*
 897    *    The SCSI standard says "READ CAPACITY is necessary for self confuring software"
 898    *    While not mandatory, support of READ CAPACITY is strongly encouraged.
 899    *    We used to die if we couldn't successfully do a READ CAPACITY.
 900    *    But, now we go on about our way.  The side effects of this are
 901    *
 902    *    1.  We can't know block size with certainty.  I have said "512 bytes is it"
 903    *            as this is most common.
 904    *
 905    *    2.  Recovery from when some one attempts to read past the end of the raw device will
 906    *        be slower.
 907    */
 908 
 909   if (the_result)
 910     {
 911       printk ("sd%d : READ CAPACITY failed.\n"
 912               "sd%d : status = %x, message = %02x, host = %d, driver = %02x \n",
 913               i,i,
 914               status_byte(the_result),
 915               msg_byte(the_result),
 916               host_byte(the_result),
 917               driver_byte(the_result)
 918               );
 919       if (driver_byte(the_result)  & DRIVER_SENSE)
 920         printk("sd%d : extended sense code = %1x \n", i, SCpnt->sense_buffer[2] & 0xf);
 921       else
 922         printk("sd%d : sense not available. \n", i);
 923 
 924       printk("sd%d : block size assumed to be 512 bytes, disk size 1GB.  \n", i);
 925       rscsi_disks[i].capacity = 0x1fffff;
 926       rscsi_disks[i].sector_size = 512;
 927 
 928       /* Set dirty bit for removable devices if not ready - sometimes drives
 929          will not report this properly. */
 930       if(rscsi_disks[i].device->removable && 
 931          SCpnt->sense_buffer[2] == NOT_READY)
 932         rscsi_disks[i].device->changed = 1;
 933 
 934     }
 935   else
 936     {
 937       rscsi_disks[i].capacity = (buffer[0] << 24) |
 938         (buffer[1] << 16) |
 939           (buffer[2] << 8) |
 940             buffer[3];
 941 
 942       rscsi_disks[i].sector_size = (buffer[4] << 24) |
 943         (buffer[5] << 16) | (buffer[6] << 8) | buffer[7];
 944 
 945       if (rscsi_disks[i].sector_size != 512 &&
 946           rscsi_disks[i].sector_size != 1024 &&
 947           rscsi_disks[i].sector_size != 256)
 948         {
 949           printk ("sd%d : unsupported sector size %d.\n",
 950                   i, rscsi_disks[i].sector_size);
 951           if(rscsi_disks[i].device->removable){
 952             rscsi_disks[i].capacity = 0;
 953           } else {
 954             printk ("scsi : deleting disk entry.\n");
 955             for  (j=i;  j < NR_SD - 1;)
 956               rscsi_disks[j] = rscsi_disks[++j];
 957             --i;
 958             --NR_SD;
 959             scsi_free(buffer, 512);
 960             return i;
 961           };
 962         }
 963       if(rscsi_disks[i].sector_size == 1024)
 964         rscsi_disks[i].capacity <<= 1;  /* Change this into 512 byte sectors */
 965       if(rscsi_disks[i].sector_size == 256)
 966         rscsi_disks[i].capacity >>= 1;  /* Change this into 512 byte sectors */
 967     }
 968 
 969   rscsi_disks[i].ten = 1;
 970   rscsi_disks[i].remap = 1;
 971   scsi_free(buffer, 512);
 972   return i;
 973 }
 974 
 975 /*
 976         The sd_init() function looks at all SCSI drives present, determines
 977         their size, and reads partition table entries for them.
 978 */
 979 
 980 
 981 unsigned long sd_init(unsigned long memory_start, unsigned long memory_end)
     /* [previous][next][first][last][top][bottom][index][help] */
 982 {
 983         int i;
 984 
 985         if (register_blkdev(MAJOR_NR,"sd",&sd_fops)) {
 986                 printk("Unable to get major %d for SCSI disk\n",MAJOR_NR);
 987                 return memory_start;
 988         }
 989         if (MAX_SD == 0) return memory_start;
 990 
 991         sd_sizes = (int *) memory_start;
 992         memory_start += (MAX_SD << 4) * sizeof(int);
 993         memset(sd_sizes, 0, (MAX_SD << 4) * sizeof(int));
 994 
 995         sd_blocksizes = (int *) memory_start;
 996         memory_start += (MAX_SD << 4) * sizeof(int);
 997         for(i=0;i<(MAX_SD << 4);i++) sd_blocksizes[i] = 1024;
 998         blksize_size[MAJOR_NR] = sd_blocksizes;
 999 
1000         sd = (struct hd_struct *) memory_start;
1001         memory_start += (MAX_SD << 4) * sizeof(struct hd_struct);
1002 
1003         sd_gendisk.max_nr = MAX_SD;
1004         sd_gendisk.part = sd;
1005         sd_gendisk.sizes = sd_sizes;
1006         sd_gendisk.real_devices = (void *) rscsi_disks;
1007 
1008         for (i = 0; i < NR_SD; ++i)
1009           i = sd_init_onedisk(i);
1010 
1011         blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST;
1012 
1013         /* If our host adapter is capable of scatter-gather, then we increase
1014            the read-ahead to 16 blocks (32 sectors).  If not, we use
1015            a two block (4 sector) read ahead. */
1016         if(rscsi_disks[0].device->host->sg_tablesize)
1017           read_ahead[MAJOR_NR] = 120;
1018         /* 64 sector read-ahead */
1019         else
1020           read_ahead[MAJOR_NR] = 4;  /* 4 sector read-ahead */
1021         
1022         sd_gendisk.next = gendisk_head;
1023         gendisk_head = &sd_gendisk;
1024         return memory_start;
1025 }
1026 
1027 unsigned long sd_init1(unsigned long mem_start, unsigned long mem_end){
     /* [previous][next][first][last][top][bottom][index][help] */
1028   rscsi_disks = (Scsi_Disk *) mem_start;
1029   mem_start += MAX_SD * sizeof(Scsi_Disk);
1030   return mem_start;
1031 };
1032 
1033 void sd_attach(Scsi_Device * SDp){
     /* [previous][next][first][last][top][bottom][index][help] */
1034   SDp->scsi_request_fn = do_sd_request;
1035   rscsi_disks[NR_SD++].device = SDp;
1036   if(NR_SD > MAX_SD) panic ("scsi_devices corrupt (sd)");
1037 };
1038 
1039 #define DEVICE_BUSY rscsi_disks[target].device->busy
1040 #define USAGE rscsi_disks[target].device->access_count
1041 #define CAPACITY rscsi_disks[target].capacity
1042 #define MAYBE_REINIT  sd_init_onedisk(target)
1043 #define GENDISK_STRUCT sd_gendisk
1044 
1045 /* This routine is called to flush all partitions and partition tables
1046    for a changed scsi disk, and then re-read the new partition table.
1047    If we are revalidating a disk because of a media change, then we
1048    enter with usage == 0.  If we are using an ioctl, we automatically have
1049    usage == 1 (we need an open channel to use an ioctl :-), so this
1050    is our limit.
1051  */
1052 int revalidate_scsidisk(int dev, int maxusage){
     /* [previous][next][first][last][top][bottom][index][help] */
1053           int target, major;
1054           struct gendisk * gdev;
1055           int max_p;
1056           int start;
1057           int i;
1058 
1059           target =  DEVICE_NR(MINOR(dev));
1060           gdev = &GENDISK_STRUCT;
1061 
1062           cli();
1063           if (DEVICE_BUSY || USAGE > maxusage) {
1064             sti();
1065             printk("Device busy for revalidation (usage=%d)\n", USAGE);
1066             return -EBUSY;
1067           };
1068           DEVICE_BUSY = 1;
1069           sti();
1070 
1071           max_p = gdev->max_p;
1072           start = target << gdev->minor_shift;
1073           major = MAJOR_NR << 8;
1074 
1075           for (i=max_p - 1; i >=0 ; i--) {
1076             sync_dev(major | start | i);
1077             invalidate_inodes(major | start | i);
1078             invalidate_buffers(major | start | i);
1079             gdev->part[start+i].start_sect = 0;
1080             gdev->part[start+i].nr_sects = 0;
1081           };
1082 
1083 #ifdef MAYBE_REINIT
1084           MAYBE_REINIT;
1085 #endif
1086 
1087           gdev->part[start].nr_sects = CAPACITY;
1088           resetup_one_dev(gdev, target);
1089 
1090           DEVICE_BUSY = 0;
1091           return 0;
1092 }
1093 

/* [previous][next][first][last][top][bottom][index][help] */