root/drivers/scsi/sd.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. sd_open
  2. sd_release
  3. sd_geninit
  4. rw_intr
  5. do_sd_request
  6. requeue_sd_request
  7. check_scsidisk_media_change
  8. sd_init_done
  9. sd_init_onedisk
  10. sd_init
  11. sd_init1
  12. sd_attach
  13. revalidate_scsidisk

   1 /*
   2  *      sd.c Copyright (C) 1992 Drew Eckhardt 
   3  *      Linux scsi disk driver by
   4  *              Drew Eckhardt 
   5  *
   6  *      <drew@colorado.edu>
   7  *
   8  *       Modified by Eric Youngdale eric@tantalus.nrl.navy.mil to
   9  *       add scatter-gather, multiple outstanding request, and other
  10  *       enhancements.
  11  */
  12 
  13 #include <linux/fs.h>
  14 #include <linux/kernel.h>
  15 #include <linux/sched.h>
  16 #include <linux/string.h>
  17 #include <linux/errno.h>
  18 #include <asm/system.h>
  19 
  20 #define MAJOR_NR SCSI_DISK_MAJOR
  21 #include "../block/blk.h"
  22 #include "scsi.h"
  23 #include "hosts.h"
  24 #include "sd.h"
  25 #include "scsi_ioctl.h"
  26 #include "constants.h"
  27 
  28 #include <linux/genhd.h>
  29 
  30 /*
  31 static const char RCSid[] = "$Header:";
  32 */
  33 
  34 #define MAX_RETRIES 5
  35 
  36 /*
  37  *      Time out in seconds for disks and Magneto-opticals (which are slower).
  38  */
  39 
  40 #define SD_TIMEOUT 300
  41 #define SD_MOD_TIMEOUT 750
  42 
  43 #define CLUSTERABLE_DEVICE(SC) (SC->host->sg_tablesize < 64 && \
  44                             scsi_devices[SC->index].type != TYPE_MOD)
  45 
  46 struct hd_struct * sd;
  47 
  48 int NR_SD=0;
  49 int MAX_SD=0;
  50 Scsi_Disk * rscsi_disks;
  51 static int * sd_sizes;
  52 static int * sd_blocksizes;
  53 
  54 extern int sd_ioctl(struct inode *, struct file *, unsigned int, unsigned long);
  55 
  56 static sd_init_onedisk(int);
  57 
  58 static void requeue_sd_request (Scsi_Cmnd * SCpnt);
  59 
  60 static int sd_open(struct inode * inode, struct file * filp)
     /* [previous][next][first][last][top][bottom][index][help] */
  61 {
  62         int target;
  63         target =  DEVICE_NR(MINOR(inode->i_rdev));
  64 
  65         if(target >= NR_SD || !rscsi_disks[target].device)
  66           return -ENXIO;   /* No such device */
  67         
  68 /* Make sure that only one process can do a check_change_disk at one time.
  69  This is also used to lock out further access when the partition table is being re-read. */
  70 
  71         while (rscsi_disks[target].device->busy);
  72 
  73         if(rscsi_disks[target].device->removable) {
  74           check_disk_change(inode->i_rdev);
  75 
  76           if(!rscsi_disks[target].device->access_count)
  77             sd_ioctl(inode, NULL, SCSI_IOCTL_DOORLOCK, 0);
  78         };
  79         rscsi_disks[target].device->access_count++;
  80         return 0;
  81 }
  82 
  83 static void sd_release(struct inode * inode, struct file * file)
     /* [previous][next][first][last][top][bottom][index][help] */
  84 {
  85         int target;
  86         sync_dev(inode->i_rdev);
  87 
  88         target =  DEVICE_NR(MINOR(inode->i_rdev));
  89 
  90         rscsi_disks[target].device->access_count--;
  91 
  92         if(rscsi_disks[target].device->removable) {
  93           if(!rscsi_disks[target].device->access_count)
  94             sd_ioctl(inode, NULL, SCSI_IOCTL_DOORUNLOCK, 0);
  95         };
  96 }
  97 
  98 static void sd_geninit(void);
  99 
 100 static struct file_operations sd_fops = {
 101         NULL,                   /* lseek - default */
 102         block_read,             /* read - general block-dev read */
 103         block_write,            /* write - general block-dev write */
 104         NULL,                   /* readdir - bad */
 105         NULL,                   /* select */
 106         sd_ioctl,               /* ioctl */
 107         NULL,                   /* mmap */
 108         sd_open,                /* open code */
 109         sd_release,             /* release */
 110         block_fsync             /* fsync */
 111 };
 112 
 113 static struct gendisk sd_gendisk = {
 114         MAJOR_NR,               /* Major number */
 115         "sd",           /* Major name */
 116         4,              /* Bits to shift to get real from partition */
 117         1 << 4,         /* Number of partitions per real */
 118         0,              /* maximum number of real */
 119         sd_geninit,     /* init function */
 120         NULL,           /* hd struct */
 121         NULL,   /* block sizes */
 122         0,              /* number */
 123         NULL,   /* internal */
 124         NULL            /* next */
 125 };
 126 
 127 static void sd_geninit (void)
     /* [previous][next][first][last][top][bottom][index][help] */
 128 {
 129         int i;
 130 
 131         for (i = 0; i < NR_SD; ++i)
 132                 sd[i << 4].nr_sects = rscsi_disks[i].capacity;
 133         sd_gendisk.nr_real = NR_SD;
 134 }
 135 
 136 /*
 137         rw_intr is the interrupt routine for the device driver.  It will
 138         be notified on the end of a SCSI read / write, and
 139         will take on of several actions based on success or failure.
 140 */
 141 
 142 static void rw_intr (Scsi_Cmnd *SCpnt)
     /* [previous][next][first][last][top][bottom][index][help] */
 143 {
 144   int result = SCpnt->result;
 145   int this_count = SCpnt->bufflen >> 9;
 146 
 147 #ifdef DEBUG
 148   printk("sd%d : rw_intr(%d, %d)\n", MINOR(SCpnt->request.dev), SCpnt->host->host_no, result);
 149 #endif
 150 
 151 /*
 152   First case : we assume that the command succeeded.  One of two things will
 153   happen here.  Either we will be finished, or there will be more
 154   sectors that we were unable to read last time.
 155 */
 156 
 157   if (!result) {
 158 
 159 #ifdef DEBUG
 160     printk("sd%d : %d sectors remain.\n", MINOR(SCpnt->request.dev), SCpnt->request.nr_sectors);
 161     printk("use_sg is %d\n ",SCpnt->use_sg);
 162 #endif
 163     if (SCpnt->use_sg) {
 164       struct scatterlist * sgpnt;
 165       int i;
 166       sgpnt = (struct scatterlist *) SCpnt->buffer;
 167       for(i=0; i<SCpnt->use_sg; i++) {
 168 #ifdef DEBUG
 169         printk(":%x %x %d\n",sgpnt[i].alt_address, sgpnt[i].address, sgpnt[i].length);
 170 #endif
 171         if (sgpnt[i].alt_address) {
 172           if (SCpnt->request.cmd == READ)
 173             memcpy(sgpnt[i].alt_address, sgpnt[i].address, sgpnt[i].length);
 174           scsi_free(sgpnt[i].address, sgpnt[i].length);
 175         };
 176       };
 177       scsi_free(SCpnt->buffer, SCpnt->sglist_len);  /* Free list of scatter-gather pointers */
 178     } else {
 179       if (SCpnt->buffer != SCpnt->request.buffer) {
 180 #ifdef DEBUG
 181         printk("nosg: %x %x %d\n",SCpnt->request.buffer, SCpnt->buffer,
 182                    SCpnt->bufflen);
 183 #endif  
 184           if (SCpnt->request.cmd == READ)
 185             memcpy(SCpnt->request.buffer, SCpnt->buffer,
 186                    SCpnt->bufflen);
 187           scsi_free(SCpnt->buffer, SCpnt->bufflen);
 188       };
 189     };
 190 /*
 191  *      If multiple sectors are requested in one buffer, then
 192  *      they will have been finished off by the first command.  If
 193  *      not, then we have a multi-buffer command.
 194  */
 195     if (SCpnt->request.nr_sectors > this_count)
 196       {
 197         SCpnt->request.errors = 0;
 198         
 199         if (!SCpnt->request.bh)
 200           {
 201 #ifdef DEBUG
 202             printk("sd%d : handling page request, no buffer\n",
 203                    MINOR(SCpnt->request.dev));
 204 #endif
 205 /*
 206   The SCpnt->request.nr_sectors field is always done in 512 byte sectors,
 207   even if this really isn't the case.
 208 */
 209             panic("sd.c: linked page request (%lx %x)",
 210                   SCpnt->request.sector, this_count);
 211           }
 212       }
 213     end_scsi_request(SCpnt, 1, this_count);
 214     requeue_sd_request(SCpnt);
 215     return;
 216   }
 217 
 218 /* Free up any indirection buffers we allocated for DMA purposes. */
 219     if (SCpnt->use_sg) {
 220       struct scatterlist * sgpnt;
 221       int i;
 222       sgpnt = (struct scatterlist *) SCpnt->buffer;
 223       for(i=0; i<SCpnt->use_sg; i++) {
 224 #ifdef DEBUG
 225         printk("err: %x %x %d\n",SCpnt->request.buffer, SCpnt->buffer,
 226                    SCpnt->bufflen);
 227 #endif
 228         if (sgpnt[i].alt_address) {
 229           scsi_free(sgpnt[i].address, sgpnt[i].length);
 230         };
 231       };
 232       scsi_free(SCpnt->buffer, SCpnt->sglist_len);  /* Free list of scatter-gather pointers */
 233     } else {
 234 #ifdef DEBUG
 235       printk("nosgerr: %x %x %d\n",SCpnt->request.buffer, SCpnt->buffer,
 236                    SCpnt->bufflen);
 237 #endif
 238       if (SCpnt->buffer != SCpnt->request.buffer)
 239         scsi_free(SCpnt->buffer, SCpnt->bufflen);
 240     };
 241 
 242 /*
 243         Now, if we were good little boys and girls, Santa left us a request
 244         sense buffer.  We can extract information from this, so we
 245         can choose a block to remap, etc.
 246 */
 247 
 248         if (driver_byte(result) != 0) {
 249           if (sugestion(result) == SUGGEST_REMAP) {
 250 #ifdef REMAP
 251 /*
 252         Not yet implemented.  A read will fail after being remapped,
 253         a write will call the strategy routine again.
 254 */
 255             if rscsi_disks[DEVICE_NR(SCpnt->request.dev)].remap
 256               {
 257                 result = 0;
 258               }
 259             else
 260               
 261 #endif
 262             }
 263 
 264           if ((SCpnt->sense_buffer[0] & 0x7f) == 0x70) {
 265             if ((SCpnt->sense_buffer[2] & 0xf) == UNIT_ATTENTION) {
 266               if(rscsi_disks[DEVICE_NR(SCpnt->request.dev)].device->removable) {
 267               /* detected disc change.  set a bit and quietly refuse    */
 268               /* further access.                                        */
 269               
 270                 rscsi_disks[DEVICE_NR(SCpnt->request.dev)].device->changed = 1;
 271                 end_scsi_request(SCpnt, 0, this_count);
 272                 requeue_sd_request(SCpnt);
 273                 return;
 274               }
 275             }
 276           }
 277           
 278 
 279 /*      If we had an ILLEGAL REQUEST returned, then we may have
 280 performed an unsupported command.  The only thing this should be would
 281 be a ten byte read where only a six byte read was supportted.  Also,
 282 on a system where READ CAPACITY failed, we mave have read past the end
 283 of the  disk. 
 284 */
 285 
 286           if (SCpnt->sense_buffer[2] == ILLEGAL_REQUEST) {
 287             if (rscsi_disks[DEVICE_NR(SCpnt->request.dev)].ten) {
 288               rscsi_disks[DEVICE_NR(SCpnt->request.dev)].ten = 0;
 289               requeue_sd_request(SCpnt);
 290               result = 0;
 291             } else {
 292             }
 293           }
 294         }  /* driver byte != 0 */
 295         if (result) {
 296                 printk("SCSI disk error : host %d id %d lun %d return code = %x\n",
 297                        rscsi_disks[DEVICE_NR(SCpnt->request.dev)].device->host->host_no,
 298                        rscsi_disks[DEVICE_NR(SCpnt->request.dev)].device->id,
 299                        rscsi_disks[DEVICE_NR(SCpnt->request.dev)].device->lun, result);
 300 
 301                 if (driver_byte(result) & DRIVER_SENSE)
 302                         print_sense("sd", SCpnt);
 303                 end_scsi_request(SCpnt, 0, SCpnt->request.current_nr_sectors);
 304                 requeue_sd_request(SCpnt);
 305                 return;
 306         }
 307 }
 308 
 309 /*
 310         requeue_sd_request() is the request handler function for the sd driver.
 311         Its function in life is to take block device requests, and translate
 312         them to SCSI commands.
 313 */
 314 
 315 static void do_sd_request (void)
     /* [previous][next][first][last][top][bottom][index][help] */
 316 {
 317   Scsi_Cmnd * SCpnt = NULL;
 318   struct request * req = NULL;
 319   int flag = 0;
 320   while (1==1){
 321     cli();
 322     if (CURRENT != NULL && CURRENT->dev == -1) {
 323       sti();
 324       return;
 325     };
 326 
 327     INIT_SCSI_REQUEST;
 328 
 329 
 330 /* We have to be careful here.  allocate_device will get a free pointer, but
 331    there is no guarantee that it is queueable.  In normal usage, we want to
 332    call this, because other types of devices may have the host all tied up,
 333    and we want to make sure that we have at least one request pending for this
 334    type of device.   We can also come through here while servicing an
 335    interrupt, because of the need to start another command.  If we call
 336    allocate_device more than once, then the system can wedge if the command
 337    is not queueable.  The request_queueable function is safe because it checks
 338    to make sure that the host is able to take another command before it returns
 339    a pointer.  */
 340 
 341     if (flag++ == 0)
 342       SCpnt = allocate_device(&CURRENT,
 343                               rscsi_disks[DEVICE_NR(MINOR(CURRENT->dev))].device->index, 0); 
 344     else SCpnt = NULL;
 345     sti();
 346 
 347 /* This is a performance enhancement.  We dig down into the request list and
 348    try and find a queueable request (i.e. device not busy, and host able to
 349    accept another command.  If we find one, then we queue it. This can
 350    make a big difference on systems with more than one disk drive.  We want
 351    to have the interrupts off when monkeying with the request list, because
 352    otherwise the kernel might try and slip in a request inbetween somewhere. */
 353 
 354     if (!SCpnt && NR_SD > 1){
 355       struct request *req1;
 356       req1 = NULL;
 357       cli();
 358       req = CURRENT;
 359       while(req){
 360         SCpnt = request_queueable(req,
 361                                   rscsi_disks[DEVICE_NR(MINOR(req->dev))].device->index);
 362         if(SCpnt) break;
 363         req1 = req;
 364         req = req->next;
 365       };
 366       if (SCpnt && req->dev == -1) {
 367         if (req == CURRENT) 
 368           CURRENT = CURRENT->next;
 369         else
 370           req1->next = req->next;
 371       };
 372       sti();
 373     };
 374     
 375     if (!SCpnt) return; /* Could not find anything to do */
 376         
 377     /* Queue command */
 378     requeue_sd_request(SCpnt);
 379   };  /* While */
 380 }    
 381 
 382 static void requeue_sd_request (Scsi_Cmnd * SCpnt)
     /* [previous][next][first][last][top][bottom][index][help] */
 383 {
 384         int dev, block, this_count;
 385         unsigned char cmd[10];
 386         int bounce_size, contiguous;
 387         int max_sg;
 388         struct buffer_head * bh, *bhp;
 389         char * buff, *bounce_buffer;
 390 
 391 repeat:
 392 
 393         if(SCpnt->request.dev <= 0) {
 394           do_sd_request();
 395           return;
 396         }
 397 
 398         dev =  MINOR(SCpnt->request.dev);
 399         block = SCpnt->request.sector;
 400         this_count = 0;
 401 
 402 #ifdef DEBUG
 403         printk("Doing sd request, dev = %d, block = %d\n", dev, block);
 404 #endif
 405 
 406         if (dev >= (NR_SD << 4) || block + SCpnt->request.nr_sectors > sd[dev].nr_sects)
 407                 {
 408                 end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
 409                 goto repeat;
 410                 }
 411 
 412         block += sd[dev].start_sect;
 413         dev = DEVICE_NR(dev);
 414 
 415         if (rscsi_disks[dev].device->changed)
 416                 {
 417 /*
 418  * quietly refuse to do anything to a changed disc until the changed bit has been reset
 419  */
 420                 /* printk("SCSI disk has been changed.  Prohibiting further I/O.\n");   */
 421                 end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
 422                 goto repeat;
 423                 }
 424 
 425 #ifdef DEBUG
 426         printk("sd%d : real dev = /dev/sd%d, block = %d\n", MINOR(SCpnt->request.dev), dev, block);
 427 #endif
 428 
 429         switch (SCpnt->request.cmd)
 430                 {
 431                 case WRITE :
 432                         if (!rscsi_disks[dev].device->writeable)
 433                                 {
 434                                 end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
 435                                 goto repeat;
 436                                 }
 437                         cmd[0] = WRITE_6;
 438                         break;
 439                 case READ :
 440                         cmd[0] = READ_6;
 441                         break;
 442                 default :
 443                         panic ("Unknown sd command %d\n", SCpnt->request.cmd);
 444                       }
 445 
 446         SCpnt->this_count = 0;
 447 
 448         /* If the host adapter can deal with very large scatter-gather
 449            requests, it is a waste of time to cluster */
 450         contiguous = (!CLUSTERABLE_DEVICE(SCpnt) ? 0 :1);
 451         bounce_buffer = NULL;
 452         bounce_size = (SCpnt->request.nr_sectors << 9);
 453 
 454         /* First see if we need a bounce buffer for this request.  If we do, make sure
 455            that we can allocate a buffer.  Do not waste space by allocating a bounce
 456            buffer if we are straddling the 16Mb line */
 457 
 458         
 459         if (contiguous && SCpnt->request.bh &&
 460             ((int) SCpnt->request.bh->b_data) + (SCpnt->request.nr_sectors << 9) - 1 > 
 461             ISA_DMA_THRESHOLD && SCpnt->host->unchecked_isa_dma) {
 462           if(((int) SCpnt->request.bh->b_data) > ISA_DMA_THRESHOLD)
 463             bounce_buffer = scsi_malloc(bounce_size);
 464           if(!bounce_buffer) contiguous = 0;
 465         };
 466 
 467         if(contiguous && SCpnt->request.bh && SCpnt->request.bh->b_reqnext)
 468           for(bh = SCpnt->request.bh, bhp = bh->b_reqnext; bhp; bh = bhp, 
 469               bhp = bhp->b_reqnext) {
 470             if(!CONTIGUOUS_BUFFERS(bh,bhp)) { 
 471               if(bounce_buffer) scsi_free(bounce_buffer, bounce_size);
 472               contiguous = 0;
 473               break;
 474             } 
 475           };
 476         if (!SCpnt->request.bh || contiguous) {
 477 
 478           /* case of page request (i.e. raw device), or unlinked buffer */
 479           this_count = SCpnt->request.nr_sectors;
 480           buff = SCpnt->request.buffer;
 481           SCpnt->use_sg = 0;
 482 
 483         } else if (SCpnt->host->sg_tablesize == 0 ||
 484                    (need_isa_buffer && 
 485                     dma_free_sectors <= 10)) {
 486 
 487           /* Case of host adapter that cannot scatter-gather.  We also
 488            come here if we are running low on DMA buffer memory.  We set
 489            a threshold higher than that we would need for this request so
 490            we leave room for other requests.  Even though we would not need
 491            it all, we need to be conservative, because if we run low enough
 492            we have no choice but to panic. */
 493 
 494           if (SCpnt->host->sg_tablesize != 0 &&
 495               need_isa_buffer && 
 496               dma_free_sectors <= 10)
 497             printk("Warning: SCSI DMA buffer space running low.  Using non scatter-gather I/O.\n");
 498 
 499           this_count = SCpnt->request.current_nr_sectors;
 500           buff = SCpnt->request.buffer;
 501           SCpnt->use_sg = 0;
 502 
 503         } else {
 504 
 505           /* Scatter-gather capable host adapter */
 506           struct scatterlist * sgpnt;
 507           int count, this_count_max;
 508           int counted;
 509 
 510           bh = SCpnt->request.bh;
 511           this_count = 0;
 512           this_count_max = (rscsi_disks[dev].ten ? 0xffff : 0xff);
 513           count = 0;
 514           bhp = NULL;
 515           while(bh) {
 516             if ((this_count + (bh->b_size >> 9)) > this_count_max) break;
 517             if(!bhp || !CONTIGUOUS_BUFFERS(bhp,bh) ||
 518                !CLUSTERABLE_DEVICE(SCpnt) ||
 519                (SCpnt->host->unchecked_isa_dma &&
 520                ((unsigned int) bh->b_data-1) == ISA_DMA_THRESHOLD)) {
 521               if (count < SCpnt->host->sg_tablesize) count++;
 522               else break;
 523             };
 524             this_count += (bh->b_size >> 9);
 525             bhp = bh;
 526             bh = bh->b_reqnext;
 527           };
 528 #if 0
 529           if(SCpnt->host->unchecked_isa_dma &&
 530              ((unsigned int) SCpnt->request.bh->b_data-1) == ISA_DMA_THRESHOLD) count--;
 531 #endif
 532           SCpnt->use_sg = count;  /* Number of chains */
 533           count = 512;/* scsi_malloc can only allocate in chunks of 512 bytes*/
 534           while( count < (SCpnt->use_sg * sizeof(struct scatterlist))) 
 535             count = count << 1;
 536           SCpnt->sglist_len = count;
 537           max_sg = count / sizeof(struct scatterlist);
 538           if(SCpnt->host->sg_tablesize < max_sg) max_sg = SCpnt->host->sg_tablesize;
 539           sgpnt = (struct scatterlist * ) scsi_malloc(count);
 540           memset(sgpnt, 0, count);  /* Zero so it is easy to fill */
 541           if (!sgpnt) {
 542             printk("Warning - running *really* short on DMA buffers\n");
 543             SCpnt->use_sg = 0;  /* No memory left - bail out */
 544             this_count = SCpnt->request.current_nr_sectors;
 545             buff = SCpnt->request.buffer;
 546           } else {
 547             buff = (char *) sgpnt;
 548             counted = 0;
 549             for(count = 0, bh = SCpnt->request.bh, bhp = bh->b_reqnext;
 550                 count < SCpnt->use_sg && bh; 
 551                 count++, bh = bhp) {
 552 
 553               bhp = bh->b_reqnext;
 554 
 555               if(!sgpnt[count].address) sgpnt[count].address = bh->b_data;
 556               sgpnt[count].length += bh->b_size;
 557               counted += bh->b_size >> 9;
 558 
 559               if (((int) sgpnt[count].address) + sgpnt[count].length - 1 > 
 560                   ISA_DMA_THRESHOLD && (SCpnt->host->unchecked_isa_dma) &&
 561                   !sgpnt[count].alt_address) {
 562                 sgpnt[count].alt_address = sgpnt[count].address;
 563                 /* We try and avoid exhausting the DMA pool, since it is easier
 564                    to control usage here.  In other places we might have a more
 565                    pressing need, and we would be screwed if we ran out */
 566                 if(dma_free_sectors < (sgpnt[count].length >> 9) + 10) {
 567                   sgpnt[count].address = NULL;
 568                 } else {
 569                   sgpnt[count].address = (char *) scsi_malloc(sgpnt[count].length);
 570                 };
 571 /* If we start running low on DMA buffers, we abort the scatter-gather
 572    operation, and free all of the memory we have allocated.  We want to
 573    ensure that all scsi operations are able to do at least a non-scatter/gather
 574    operation */
 575                 if(sgpnt[count].address == NULL){ /* Out of dma memory */
 576 #if 0
 577                   printk("Warning: Running low on SCSI DMA buffers");
 578                   /* Try switching back to a non scatter-gather operation. */
 579                   while(--count >= 0){
 580                     if(sgpnt[count].alt_address) 
 581                       scsi_free(sgpnt[count].address, sgpnt[count].length);
 582                   };
 583                   this_count = SCpnt->request.current_nr_sectors;
 584                   buff = SCpnt->request.buffer;
 585                   SCpnt->use_sg = 0;
 586                   scsi_free(sgpnt, SCpnt->sglist_len);
 587 #endif
 588                   SCpnt->use_sg = count;
 589                   this_count = counted -= bh->b_size >> 9;
 590                   break;
 591                 };
 592 
 593               };
 594 
 595               /* Only cluster buffers if we know that we can supply DMA buffers
 596                  large enough to satisfy the request.  Do not cluster a new
 597                  request if this would mean that we suddenly need to start
 598                  using DMA bounce buffers */
 599               if(bhp && CONTIGUOUS_BUFFERS(bh,bhp) && CLUSTERABLE_DEVICE(SCpnt)) {
 600                 char * tmp;
 601 
 602                 if (((int) sgpnt[count].address) + sgpnt[count].length +
 603                     bhp->b_size - 1 > ISA_DMA_THRESHOLD && 
 604                     (SCpnt->host->unchecked_isa_dma) &&
 605                     !sgpnt[count].alt_address) continue;
 606 
 607                 if(!sgpnt[count].alt_address) {count--; continue; }
 608                 if(dma_free_sectors > 10)
 609                   tmp = scsi_malloc(sgpnt[count].length + bhp->b_size);
 610                 else {
 611                   tmp = NULL;
 612                   max_sg = SCpnt->use_sg;
 613                 };
 614                 if(tmp){
 615                   scsi_free(sgpnt[count].address, sgpnt[count].length);
 616                   sgpnt[count].address = tmp;
 617                   count--;
 618                   continue;
 619                 };
 620 
 621                 /* If we are allowed another sg chain, then increment counter so we
 622                    can insert it.  Otherwise we will end up truncating */
 623 
 624                 if (SCpnt->use_sg < max_sg) SCpnt->use_sg++;
 625               };  /* contiguous buffers */
 626             }; /* for loop */
 627 
 628             this_count = counted; /* This is actually how many we are going to transfer */
 629 
 630             if(count < SCpnt->use_sg || SCpnt->use_sg > SCpnt->host->sg_tablesize){
 631               bh = SCpnt->request.bh;
 632               printk("Use sg, count %d %x %d\n", SCpnt->use_sg, count, dma_free_sectors);
 633               printk("maxsg = %x, counted = %d this_count = %d\n", max_sg, counted, this_count);
 634               while(bh){
 635                 printk("[%p %lx] ", bh->b_data, bh->b_size);
 636                 bh = bh->b_reqnext;
 637               };
 638               if(SCpnt->use_sg < 16)
 639                 for(count=0; count<SCpnt->use_sg; count++)
 640                   printk("{%d:%p %p %d}  ", count,
 641                          sgpnt[count].address,
 642                          sgpnt[count].alt_address,
 643                          sgpnt[count].length);
 644               panic("Ooops");
 645             };
 646 
 647             if (SCpnt->request.cmd == WRITE)
 648               for(count=0; count<SCpnt->use_sg; count++)
 649                 if(sgpnt[count].alt_address)
 650                   memcpy(sgpnt[count].address, sgpnt[count].alt_address, 
 651                          sgpnt[count].length);
 652           };  /* Able to malloc sgpnt */
 653         };  /* Host adapter capable of scatter-gather */
 654 
 655 /* Now handle the possibility of DMA to addresses > 16Mb */
 656 
 657         if(SCpnt->use_sg == 0){
 658           if (((int) buff) + (this_count << 9) - 1 > ISA_DMA_THRESHOLD && 
 659             (SCpnt->host->unchecked_isa_dma)) {
 660             if(bounce_buffer)
 661               buff = bounce_buffer;
 662             else
 663               buff = (char *) scsi_malloc(this_count << 9);
 664             if(buff == NULL) {  /* Try backing off a bit if we are low on mem*/
 665               this_count = SCpnt->request.current_nr_sectors;
 666               buff = (char *) scsi_malloc(this_count << 9);
 667               if(!buff) panic("Ran out of DMA buffers.");
 668             };
 669             if (SCpnt->request.cmd == WRITE)
 670               memcpy(buff, (char *)SCpnt->request.buffer, this_count << 9);
 671           };
 672         };
 673 #ifdef DEBUG
 674         printk("sd%d : %s %d/%d 512 byte blocks.\n", MINOR(SCpnt->request.dev),
 675                 (SCpnt->request.cmd == WRITE) ? "writing" : "reading",
 676                 this_count, SCpnt->request.nr_sectors);
 677 #endif
 678 
 679         cmd[1] = (SCpnt->lun << 5) & 0xe0;
 680 
 681         if (rscsi_disks[dev].sector_size == 1024){
 682           if(block & 1) panic("sd.c:Bad block number requested");
 683           if(this_count & 1) panic("sd.c:Bad block number requested");
 684           block = block >> 1;
 685           this_count = this_count >> 1;
 686         };
 687 
 688         if (rscsi_disks[dev].sector_size == 256){
 689           block = block << 1;
 690           this_count = this_count << 1;
 691         };
 692 
 693         if (((this_count > 0xff) ||  (block > 0x1fffff)) && rscsi_disks[dev].ten)
 694                 {
 695                 if (this_count > 0xffff)
 696                         this_count = 0xffff;
 697 
 698                 cmd[0] += READ_10 - READ_6 ;
 699                 cmd[2] = (unsigned char) (block >> 24) & 0xff;
 700                 cmd[3] = (unsigned char) (block >> 16) & 0xff;
 701                 cmd[4] = (unsigned char) (block >> 8) & 0xff;
 702                 cmd[5] = (unsigned char) block & 0xff;
 703                 cmd[6] = cmd[9] = 0;
 704                 cmd[7] = (unsigned char) (this_count >> 8) & 0xff;
 705                 cmd[8] = (unsigned char) this_count & 0xff;
 706                 }
 707         else
 708                 {
 709                 if (this_count > 0xff)
 710                         this_count = 0xff;
 711 
 712                 cmd[1] |= (unsigned char) ((block >> 16) & 0x1f);
 713                 cmd[2] = (unsigned char) ((block >> 8) & 0xff);
 714                 cmd[3] = (unsigned char) block & 0xff;
 715                 cmd[4] = (unsigned char) this_count;
 716                 cmd[5] = 0;
 717                 }
 718 
 719 /*
 720  * We shouldn't disconnect in the middle of a sector, so with a dumb 
 721  * host adapter, it's safe to assume that we can at least transfer 
 722  * this many bytes between each connect / disconnect.  
 723  */
 724 
 725         SCpnt->transfersize = rscsi_disks[dev].sector_size;
 726         SCpnt->underflow = this_count << 9; 
 727         scsi_do_cmd (SCpnt, (void *) cmd, buff, 
 728                      this_count * rscsi_disks[dev].sector_size,
 729                      rw_intr, 
 730                      (scsi_devices[SCpnt->index].type == TYPE_DISK ? 
 731                                      SD_TIMEOUT : SD_MOD_TIMEOUT),
 732                      MAX_RETRIES);
 733 }
 734 
 735 int check_scsidisk_media_change(int full_dev, int flag){
     /* [previous][next][first][last][top][bottom][index][help] */
 736         int retval;
 737         int target;
 738         struct inode inode;
 739 
 740         target =  DEVICE_NR(MINOR(full_dev));
 741 
 742         if (target >= NR_SD) {
 743                 printk("SCSI disk request error: invalid device.\n");
 744                 return 0;
 745         };
 746 
 747         if(!rscsi_disks[target].device->removable) return 0;
 748 
 749         inode.i_rdev = full_dev;  /* This is all we really need here */
 750         retval = sd_ioctl(&inode, NULL, SCSI_IOCTL_TEST_UNIT_READY, 0);
 751 
 752         if(retval){ /* Unable to test, unit probably not ready.  This usually
 753                      means there is no disc in the drive.  Mark as changed,
 754                      and we will figure it out later once the drive is
 755                      available again.  */
 756 
 757           rscsi_disks[target].device->changed = 1;
 758           return 1; /* This will force a flush, if called from
 759                        check_disk_change */
 760         };
 761 
 762         retval = rscsi_disks[target].device->changed;
 763         if(!flag) rscsi_disks[target].device->changed = 0;
 764         return retval;
 765 }
 766 
 767 static void sd_init_done (Scsi_Cmnd * SCpnt)
     /* [previous][next][first][last][top][bottom][index][help] */
 768 {
 769   struct request * req;
 770   struct task_struct * p;
 771   
 772   req = &SCpnt->request;
 773   req->dev = 0xfffe; /* Busy, but indicate request done */
 774   
 775   if ((p = req->waiting) != NULL) {
 776     req->waiting = NULL;
 777     p->state = TASK_RUNNING;
 778     if (p->counter > current->counter)
 779       need_resched = 1;
 780   }
 781 }
 782 
 783 static int sd_init_onedisk(int i)
     /* [previous][next][first][last][top][bottom][index][help] */
 784 {
 785   int j = 0;
 786   unsigned char cmd[10];
 787   unsigned char *buffer;
 788   char spintime;
 789   int the_result, retries;
 790   Scsi_Cmnd * SCpnt;
 791 
 792   /* We need to retry the READ_CAPACITY because a UNIT_ATTENTION is considered
 793      a fatal error, and many devices report such an error just after a scsi
 794      bus reset. */
 795 
 796   SCpnt = allocate_device(NULL, rscsi_disks[i].device->index, 1);
 797   buffer = (unsigned char *) scsi_malloc(512);
 798 
 799   spintime = 0;
 800 
 801   /* Spin up drives, as required.  Only do this at boot time */
 802   if (current == task[0]){
 803     do{
 804       cmd[0] = TEST_UNIT_READY;
 805       cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0;
 806       memset ((void *) &cmd[2], 0, 8);
 807       SCpnt->request.dev = 0xffff;  /* Mark as really busy again */
 808       SCpnt->sense_buffer[0] = 0;
 809       SCpnt->sense_buffer[2] = 0;
 810       
 811       scsi_do_cmd (SCpnt,
 812                    (void *) cmd, (void *) buffer,
 813                    512, sd_init_done,  SD_TIMEOUT,
 814                    MAX_RETRIES);
 815       
 816       while(SCpnt->request.dev != 0xfffe);
 817       
 818       the_result = SCpnt->result;
 819       
 820       /* Look for non-removable devices that return NOT_READY.  Issue command
 821          to spin up drive for these cases. */
 822       if(the_result && !rscsi_disks[i].device->removable && 
 823          SCpnt->sense_buffer[2] == NOT_READY) {
 824         int time1;
 825         if(!spintime){
 826           printk( "sd%d: Spinning up disk...", i );
 827           cmd[0] = START_STOP;
 828           cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0;
 829           cmd[1] |= 1;  /* Return immediately */
 830           memset ((void *) &cmd[2], 0, 8);
 831           cmd[4] = 1; /* Start spin cycle */
 832           SCpnt->request.dev = 0xffff;  /* Mark as really busy again */
 833           SCpnt->sense_buffer[0] = 0;
 834           SCpnt->sense_buffer[2] = 0;
 835           
 836           scsi_do_cmd (SCpnt,
 837                        (void *) cmd, (void *) buffer,
 838                        512, sd_init_done,  SD_TIMEOUT,
 839                        MAX_RETRIES);
 840           
 841           while(SCpnt->request.dev != 0xfffe);
 842 
 843           spintime = jiffies;
 844         };
 845 
 846         time1 = jiffies;
 847         while(jiffies < time1 + 100); /* Wait 1 second for next try */
 848         printk( "." );
 849       };
 850     } while(the_result && spintime && spintime+5000 > jiffies);
 851     if (spintime) {
 852        if (the_result)
 853            printk( "not responding...\n" );
 854        else
 855            printk( "ready\n" );
 856     }
 857   };  /* current == task[0] */
 858 
 859 
 860   retries = 3;
 861   do {
 862     cmd[0] = READ_CAPACITY;
 863     cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0;
 864     memset ((void *) &cmd[2], 0, 8);
 865     memset ((void *) buffer, 0, 8);
 866     SCpnt->request.dev = 0xffff;  /* Mark as really busy again */
 867     SCpnt->sense_buffer[0] = 0;
 868     SCpnt->sense_buffer[2] = 0;
 869     
 870     scsi_do_cmd (SCpnt,
 871                  (void *) cmd, (void *) buffer,
 872                  8, sd_init_done,  SD_TIMEOUT,
 873                  MAX_RETRIES);
 874     
 875     if (current == task[0])
 876       while(SCpnt->request.dev != 0xfffe);
 877     else
 878       if (SCpnt->request.dev != 0xfffe){
 879         SCpnt->request.waiting = current;
 880         current->state = TASK_UNINTERRUPTIBLE;
 881         while (SCpnt->request.dev != 0xfffe) schedule();
 882       };
 883     
 884     the_result = SCpnt->result;
 885     retries--;
 886 
 887   } while(the_result && retries);
 888 
 889   SCpnt->request.dev = -1;  /* Mark as not busy */
 890 
 891   wake_up(&scsi_devices[SCpnt->index].device_wait); 
 892 
 893   /* Wake up a process waiting for device*/
 894 
 895   /*
 896    *    The SCSI standard says "READ CAPACITY is necessary for self confuring software"
 897    *    While not mandatory, support of READ CAPACITY is strongly encouraged.
 898    *    We used to die if we couldn't successfully do a READ CAPACITY.
 899    *    But, now we go on about our way.  The side effects of this are
 900    *
 901    *    1.  We can't know block size with certainty.  I have said "512 bytes is it"
 902    *            as this is most common.
 903    *
 904    *    2.  Recovery from when some one attempts to read past the end of the raw device will
 905    *        be slower.
 906    */
 907 
 908   if (the_result)
 909     {
 910       printk ("sd%d : READ CAPACITY failed.\n"
 911               "sd%d : status = %x, message = %02x, host = %d, driver = %02x \n",
 912               i,i,
 913               status_byte(the_result),
 914               msg_byte(the_result),
 915               host_byte(the_result),
 916               driver_byte(the_result)
 917               );
 918       if (driver_byte(the_result)  & DRIVER_SENSE)
 919         printk("sd%d : extended sense code = %1x \n", i, SCpnt->sense_buffer[2] & 0xf);
 920       else
 921         printk("sd%d : sense not available. \n", i);
 922 
 923       printk("sd%d : block size assumed to be 512 bytes, disk size 1GB.  \n", i);
 924       rscsi_disks[i].capacity = 0x1fffff;
 925       rscsi_disks[i].sector_size = 512;
 926 
 927       /* Set dirty bit for removable devices if not ready - sometimes drives
 928          will not report this properly. */
 929       if(rscsi_disks[i].device->removable && 
 930          SCpnt->sense_buffer[2] == NOT_READY)
 931         rscsi_disks[i].device->changed = 1;
 932 
 933     }
 934   else
 935     {
 936       rscsi_disks[i].capacity = (buffer[0] << 24) |
 937         (buffer[1] << 16) |
 938           (buffer[2] << 8) |
 939             buffer[3];
 940 
 941       rscsi_disks[i].sector_size = (buffer[4] << 24) |
 942         (buffer[5] << 16) | (buffer[6] << 8) | buffer[7];
 943 
 944       if (rscsi_disks[i].sector_size != 512 &&
 945           rscsi_disks[i].sector_size != 1024 &&
 946           rscsi_disks[i].sector_size != 256)
 947         {
 948           printk ("sd%d : unsupported sector size %d.\n",
 949                   i, rscsi_disks[i].sector_size);
 950           if(rscsi_disks[i].device->removable){
 951             rscsi_disks[i].capacity = 0;
 952           } else {
 953             printk ("scsi : deleting disk entry.\n");
 954             for  (j=i;  j < NR_SD - 1;)
 955               rscsi_disks[j] = rscsi_disks[++j];
 956             --i;
 957             --NR_SD;
 958             scsi_free(buffer, 512);
 959             return i;
 960           };
 961         }
 962       if(rscsi_disks[i].sector_size == 1024)
 963         rscsi_disks[i].capacity <<= 1;  /* Change this into 512 byte sectors */
 964       if(rscsi_disks[i].sector_size == 256)
 965         rscsi_disks[i].capacity >>= 1;  /* Change this into 512 byte sectors */
 966     }
 967 
 968   rscsi_disks[i].ten = 1;
 969   rscsi_disks[i].remap = 1;
 970   scsi_free(buffer, 512);
 971   return i;
 972 }
 973 
 974 /*
 975         The sd_init() function looks at all SCSI drives present, determines
 976         their size, and reads partition table entries for them.
 977 */
 978 
 979 
 980 unsigned long sd_init(unsigned long memory_start, unsigned long memory_end)
     /* [previous][next][first][last][top][bottom][index][help] */
 981 {
 982         int i;
 983 
 984         if (register_blkdev(MAJOR_NR,"sd",&sd_fops)) {
 985                 printk("Unable to get major %d for SCSI disk\n",MAJOR_NR);
 986                 return memory_start;
 987         }
 988         if (MAX_SD == 0) return memory_start;
 989 
 990         sd_sizes = (int *) memory_start;
 991         memory_start += (MAX_SD << 4) * sizeof(int);
 992         memset(sd_sizes, 0, (MAX_SD << 4) * sizeof(int));
 993 
 994         sd_blocksizes = (int *) memory_start;
 995         memory_start += (MAX_SD << 4) * sizeof(int);
 996         for(i=0;i<(MAX_SD << 4);i++) sd_blocksizes[i] = 1024;
 997         blksize_size[MAJOR_NR] = sd_blocksizes;
 998 
 999         sd = (struct hd_struct *) memory_start;
1000         memory_start += (MAX_SD << 4) * sizeof(struct hd_struct);
1001 
1002         sd_gendisk.max_nr = MAX_SD;
1003         sd_gendisk.part = sd;
1004         sd_gendisk.sizes = sd_sizes;
1005         sd_gendisk.real_devices = (void *) rscsi_disks;
1006 
1007         for (i = 0; i < NR_SD; ++i)
1008           i = sd_init_onedisk(i);
1009 
1010         blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST;
1011 
1012         /* If our host adapter is capable of scatter-gather, then we increase
1013            the read-ahead to 16 blocks (32 sectors).  If not, we use
1014            a two block (4 sector) read ahead. */
1015         if(rscsi_disks[0].device->host->sg_tablesize)
1016           read_ahead[MAJOR_NR] = 120;
1017         /* 64 sector read-ahead */
1018         else
1019           read_ahead[MAJOR_NR] = 4;  /* 4 sector read-ahead */
1020         
1021         sd_gendisk.next = gendisk_head;
1022         gendisk_head = &sd_gendisk;
1023         return memory_start;
1024 }
1025 
1026 unsigned long sd_init1(unsigned long mem_start, unsigned long mem_end){
     /* [previous][next][first][last][top][bottom][index][help] */
1027   rscsi_disks = (Scsi_Disk *) mem_start;
1028   mem_start += MAX_SD * sizeof(Scsi_Disk);
1029   return mem_start;
1030 };
1031 
1032 void sd_attach(Scsi_Device * SDp){
     /* [previous][next][first][last][top][bottom][index][help] */
1033   SDp->scsi_request_fn = do_sd_request;
1034   rscsi_disks[NR_SD++].device = SDp;
1035   if(NR_SD > MAX_SD) panic ("scsi_devices corrupt (sd)");
1036 };
1037 
1038 #define DEVICE_BUSY rscsi_disks[target].device->busy
1039 #define USAGE rscsi_disks[target].device->access_count
1040 #define CAPACITY rscsi_disks[target].capacity
1041 #define MAYBE_REINIT  sd_init_onedisk(target)
1042 #define GENDISK_STRUCT sd_gendisk
1043 
1044 /* This routine is called to flush all partitions and partition tables
1045    for a changed scsi disk, and then re-read the new partition table.
1046    If we are revalidating a disk because of a media change, then we
1047    enter with usage == 0.  If we are using an ioctl, we automatically have
1048    usage == 1 (we need an open channel to use an ioctl :-), so this
1049    is our limit.
1050  */
1051 int revalidate_scsidisk(int dev, int maxusage){
     /* [previous][next][first][last][top][bottom][index][help] */
1052           int target, major;
1053           struct gendisk * gdev;
1054           int max_p;
1055           int start;
1056           int i;
1057 
1058           target =  DEVICE_NR(MINOR(dev));
1059           gdev = &GENDISK_STRUCT;
1060 
1061           cli();
1062           if (DEVICE_BUSY || USAGE > maxusage) {
1063             sti();
1064             printk("Device busy for revalidation (usage=%d)\n", USAGE);
1065             return -EBUSY;
1066           };
1067           DEVICE_BUSY = 1;
1068           sti();
1069 
1070           max_p = gdev->max_p;
1071           start = target << gdev->minor_shift;
1072           major = MAJOR_NR << 8;
1073 
1074           for (i=max_p - 1; i >=0 ; i--) {
1075             sync_dev(major | start | i);
1076             invalidate_inodes(major | start | i);
1077             invalidate_buffers(major | start | i);
1078             gdev->part[start+i].start_sect = 0;
1079             gdev->part[start+i].nr_sects = 0;
1080           };
1081 
1082 #ifdef MAYBE_REINIT
1083           MAYBE_REINIT;
1084 #endif
1085 
1086           gdev->part[start].nr_sects = CAPACITY;
1087           resetup_one_dev(gdev, target);
1088 
1089           DEVICE_BUSY = 0;
1090           return 0;
1091 }
1092 

/* [previous][next][first][last][top][bottom][index][help] */