root/drivers/scsi/sd.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. sd_open
  2. sd_release
  3. sd_geninit
  4. rw_intr
  5. do_sd_request
  6. requeue_sd_request
  7. check_scsidisk_media_change
  8. sd_init_done
  9. sd_init_onedisk
  10. sd_init
  11. sd_finish
  12. sd_detect
  13. sd_attach
  14. revalidate_scsidisk
  15. fop_revalidate_scsidisk
  16. sd_detach

   1 /*
   2  *      sd.c Copyright (C) 1992 Drew Eckhardt 
   3  *           Copyright (C) 1993, 1994, 1995 Eric Youngdale
   4  *
   5  *      Linux scsi disk driver
   6  *              Initial versions: Drew Eckhardt 
   7  *              Subsequent revisions: Eric Youngdale
   8  *
   9  *      <drew@colorado.edu>
  10  *
  11  *       Modified by Eric Youngdale ericy@cais.com to
  12  *       add scatter-gather, multiple outstanding request, and other
  13  *       enhancements.
  14  *
  15  *       Modified by Eric Youngdale eric@aib.com to support loadable
  16  *       low-level scsi drivers.
  17  */
  18 
  19 #include <linux/fs.h>
  20 #include <linux/kernel.h>
  21 #include <linux/sched.h>
  22 #include <linux/mm.h>
  23 #include <linux/string.h>
  24 #include <linux/errno.h>
  25 #include <asm/system.h>
  26 
  27 #define MAJOR_NR SCSI_DISK_MAJOR
  28 #include "../block/blk.h"
  29 #include "scsi.h"
  30 #include "hosts.h"
  31 #include "sd.h"
  32 #include "scsi_ioctl.h"
  33 #include "constants.h"
  34 
  35 #include <linux/genhd.h>
  36 
  37 /*
  38 static const char RCSid[] = "$Header:";
  39 */
  40 
  41 #define MAX_RETRIES 5
  42 
  43 /*
  44  *      Time out in seconds for disks and Magneto-opticals (which are slower).
  45  */
  46 
  47 #define SD_TIMEOUT 600
  48 #define SD_MOD_TIMEOUT 750
  49 
  50 #define CLUSTERABLE_DEVICE(SC) (SC->host->hostt->use_clustering && \
  51                             SC->device->type != TYPE_MOD)
  52 
  53 struct hd_struct * sd;
  54 int revalidate_scsidisk(int dev, int maxusage);
  55 
  56 Scsi_Disk * rscsi_disks = NULL;
  57 static int * sd_sizes;
  58 static int * sd_blocksizes;
  59 static int * sd_hardsizes;              /* Hardware sector size */
  60 
  61 extern int sd_ioctl(struct inode *, struct file *, unsigned int, unsigned long);
  62 
  63 static int check_scsidisk_media_change(dev_t);
  64 static int fop_revalidate_scsidisk(dev_t);
  65 
  66 static sd_init_onedisk(int);
  67 
  68 static void requeue_sd_request (Scsi_Cmnd * SCpnt);
  69 
  70 static void sd_init(void);
  71 static void sd_finish(void);
  72 static int sd_attach(Scsi_Device *);
  73 static int sd_detect(Scsi_Device *);
  74 static void sd_detach(Scsi_Device *);
  75 
  76 struct Scsi_Device_Template sd_template = {NULL, "disk", "sd", TYPE_DISK, 
  77                                              SCSI_DISK_MAJOR, 0, 0, 0, 1,
  78                                              sd_detect, sd_init,
  79                                              sd_finish, sd_attach, sd_detach};
  80 
  81 static int sd_open(struct inode * inode, struct file * filp)
     /* [previous][next][first][last][top][bottom][index][help] */
  82 {
  83         int target;
  84         target =  DEVICE_NR(MINOR(inode->i_rdev));
  85 
  86         if(target >= sd_template.dev_max || !rscsi_disks[target].device)
  87           return -ENXIO;   /* No such device */
  88         
  89 /* Make sure that only one process can do a check_change_disk at one time.
  90  This is also used to lock out further access when the partition table is being re-read. */
  91 
  92         while (rscsi_disks[target].device->busy)
  93           barrier();
  94 
  95         if(rscsi_disks[target].device->removable) {
  96           check_disk_change(inode->i_rdev);
  97 
  98           if(!rscsi_disks[target].device->access_count)
  99             sd_ioctl(inode, NULL, SCSI_IOCTL_DOORLOCK, 0);
 100         };
 101         /*
 102          * See if we are requesting a non-existent partition.  Do this
 103          * after checking for disk change.
 104          */
 105         if(sd_sizes[MINOR(inode->i_rdev)] == 0)
 106           return -ENXIO;
 107 
 108         rscsi_disks[target].device->access_count++;
 109         if (rscsi_disks[target].device->host->hostt->usage_count)
 110           (*rscsi_disks[target].device->host->hostt->usage_count)++;
 111         return 0;
 112 }
 113 
 114 static void sd_release(struct inode * inode, struct file * file)
     /* [previous][next][first][last][top][bottom][index][help] */
 115 {
 116         int target;
 117         sync_dev(inode->i_rdev);
 118 
 119         target =  DEVICE_NR(MINOR(inode->i_rdev));
 120 
 121         rscsi_disks[target].device->access_count--;
 122         if (rscsi_disks[target].device->host->hostt->usage_count)
 123           (*rscsi_disks[target].device->host->hostt->usage_count)--;
 124 
 125         if(rscsi_disks[target].device->removable) {
 126           if(!rscsi_disks[target].device->access_count)
 127             sd_ioctl(inode, NULL, SCSI_IOCTL_DOORUNLOCK, 0);
 128         };
 129 }
 130 
 131 static void sd_geninit(void);
 132 
 133 static struct file_operations sd_fops = {
 134         NULL,                   /* lseek - default */
 135         block_read,             /* read - general block-dev read */
 136         block_write,            /* write - general block-dev write */
 137         NULL,                   /* readdir - bad */
 138         NULL,                   /* select */
 139         sd_ioctl,               /* ioctl */
 140         NULL,                   /* mmap */
 141         sd_open,                /* open code */
 142         sd_release,             /* release */
 143         block_fsync,            /* fsync */
 144         NULL,                   /* fasync */
 145         check_scsidisk_media_change,  /* Disk change */
 146         fop_revalidate_scsidisk     /* revalidate */
 147 };
 148 
 149 static struct gendisk sd_gendisk = {
 150         MAJOR_NR,               /* Major number */
 151         "sd",           /* Major name */
 152         4,              /* Bits to shift to get real from partition */
 153         1 << 4,         /* Number of partitions per real */
 154         0,              /* maximum number of real */
 155         sd_geninit,     /* init function */
 156         NULL,           /* hd struct */
 157         NULL,   /* block sizes */
 158         0,              /* number */
 159         NULL,   /* internal */
 160         NULL            /* next */
 161 };
 162 
 163 static void sd_geninit (void)
     /* [previous][next][first][last][top][bottom][index][help] */
 164 {
 165         int i;
 166 
 167         for (i = 0; i < sd_template.dev_max; ++i)
 168           if(rscsi_disks[i].device) 
 169             sd[i << 4].nr_sects = rscsi_disks[i].capacity;
 170 #if 0
 171         /* No longer needed - we keep track of this as we attach/detach */
 172         sd_gendisk.nr_real = sd_template.dev_max;
 173 #endif
 174 }
 175 
 176 /*
 177         rw_intr is the interrupt routine for the device driver.  It will
 178         be notified on the end of a SCSI read / write, and
 179         will take on of several actions based on success or failure.
 180 */
 181 
 182 static void rw_intr (Scsi_Cmnd *SCpnt)
     /* [previous][next][first][last][top][bottom][index][help] */
 183 {
 184   int result = SCpnt->result;
 185   int this_count = SCpnt->bufflen >> 9;
 186 
 187 #ifdef DEBUG
 188   printk("sd%c : rw_intr(%d, %d)\n", 'a' + MINOR(SCpnt->request.dev), SCpnt->host->host_no, result);
 189 #endif
 190 
 191 /*
 192   First case : we assume that the command succeeded.  One of two things will
 193   happen here.  Either we will be finished, or there will be more
 194   sectors that we were unable to read last time.
 195 */
 196 
 197   if (!result) {
 198 
 199 #ifdef DEBUG
 200     printk("sd%c : %d sectors remain.\n", 'a' + MINOR(SCpnt->request.dev), SCpnt->request.nr_sectors);
 201     printk("use_sg is %d\n ",SCpnt->use_sg);
 202 #endif
 203     if (SCpnt->use_sg) {
 204       struct scatterlist * sgpnt;
 205       int i;
 206       sgpnt = (struct scatterlist *) SCpnt->buffer;
 207       for(i=0; i<SCpnt->use_sg; i++) {
 208 #ifdef DEBUG
 209         printk(":%x %x %d\n",sgpnt[i].alt_address, sgpnt[i].address, sgpnt[i].length);
 210 #endif
 211         if (sgpnt[i].alt_address) {
 212           if (SCpnt->request.cmd == READ)
 213             memcpy(sgpnt[i].alt_address, sgpnt[i].address, sgpnt[i].length);
 214           scsi_free(sgpnt[i].address, sgpnt[i].length);
 215         };
 216       };
 217       scsi_free(SCpnt->buffer, SCpnt->sglist_len);  /* Free list of scatter-gather pointers */
 218     } else {
 219       if (SCpnt->buffer != SCpnt->request.buffer) {
 220 #ifdef DEBUG
 221         printk("nosg: %x %x %d\n",SCpnt->request.buffer, SCpnt->buffer,
 222                    SCpnt->bufflen);
 223 #endif  
 224           if (SCpnt->request.cmd == READ)
 225             memcpy(SCpnt->request.buffer, SCpnt->buffer,
 226                    SCpnt->bufflen);
 227           scsi_free(SCpnt->buffer, SCpnt->bufflen);
 228       };
 229     };
 230 /*
 231  *      If multiple sectors are requested in one buffer, then
 232  *      they will have been finished off by the first command.  If
 233  *      not, then we have a multi-buffer command.
 234  */
 235     if (SCpnt->request.nr_sectors > this_count)
 236       {
 237         SCpnt->request.errors = 0;
 238         
 239         if (!SCpnt->request.bh)
 240           {
 241 #ifdef DEBUG
 242             printk("sd%c : handling page request, no buffer\n",
 243                    'a' + MINOR(SCpnt->request.dev));
 244 #endif
 245 /*
 246   The SCpnt->request.nr_sectors field is always done in 512 byte sectors,
 247   even if this really isn't the case.
 248 */
 249             panic("sd.c: linked page request (%lx %x)",
 250                   SCpnt->request.sector, this_count);
 251           }
 252       }
 253     SCpnt = end_scsi_request(SCpnt, 1, this_count);
 254     requeue_sd_request(SCpnt);
 255     return;
 256   }
 257 
 258 /* Free up any indirection buffers we allocated for DMA purposes. */
 259     if (SCpnt->use_sg) {
 260       struct scatterlist * sgpnt;
 261       int i;
 262       sgpnt = (struct scatterlist *) SCpnt->buffer;
 263       for(i=0; i<SCpnt->use_sg; i++) {
 264 #ifdef DEBUG
 265         printk("err: %x %x %d\n",SCpnt->request.buffer, SCpnt->buffer,
 266                    SCpnt->bufflen);
 267 #endif
 268         if (sgpnt[i].alt_address) {
 269           scsi_free(sgpnt[i].address, sgpnt[i].length);
 270         };
 271       };
 272       scsi_free(SCpnt->buffer, SCpnt->sglist_len);  /* Free list of scatter-gather pointers */
 273     } else {
 274 #ifdef DEBUG
 275       printk("nosgerr: %x %x %d\n",SCpnt->request.buffer, SCpnt->buffer,
 276                    SCpnt->bufflen);
 277 #endif
 278       if (SCpnt->buffer != SCpnt->request.buffer)
 279         scsi_free(SCpnt->buffer, SCpnt->bufflen);
 280     };
 281 
 282 /*
 283         Now, if we were good little boys and girls, Santa left us a request
 284         sense buffer.  We can extract information from this, so we
 285         can choose a block to remap, etc.
 286 */
 287 
 288         if (driver_byte(result) != 0) {
 289           if (suggestion(result) == SUGGEST_REMAP) {
 290 #ifdef REMAP
 291 /*
 292         Not yet implemented.  A read will fail after being remapped,
 293         a write will call the strategy routine again.
 294 */
 295             if rscsi_disks[DEVICE_NR(SCpnt->request.dev)].remap
 296               {
 297                 result = 0;
 298               }
 299             else
 300               
 301 #endif
 302             }
 303 
 304           if ((SCpnt->sense_buffer[0] & 0x7f) == 0x70) {
 305             if ((SCpnt->sense_buffer[2] & 0xf) == UNIT_ATTENTION) {
 306               if(rscsi_disks[DEVICE_NR(SCpnt->request.dev)].device->removable) {
 307               /* detected disc change.  set a bit and quietly refuse    */
 308               /* further access.                                        */
 309               
 310                 rscsi_disks[DEVICE_NR(SCpnt->request.dev)].device->changed = 1;
 311                 SCpnt = end_scsi_request(SCpnt, 0, this_count);
 312                 requeue_sd_request(SCpnt);
 313                 return;
 314               }
 315             }
 316           }
 317           
 318 
 319 /*      If we had an ILLEGAL REQUEST returned, then we may have
 320 performed an unsupported command.  The only thing this should be would
 321 be a ten byte read where only a six byte read was supported.  Also,
 322 on a system where READ CAPACITY failed, we have have read past the end
 323 of the  disk. 
 324 */
 325 
 326           if (SCpnt->sense_buffer[2] == ILLEGAL_REQUEST) {
 327             if (rscsi_disks[DEVICE_NR(SCpnt->request.dev)].ten) {
 328               rscsi_disks[DEVICE_NR(SCpnt->request.dev)].ten = 0;
 329               requeue_sd_request(SCpnt);
 330               result = 0;
 331             } else {
 332             }
 333           }
 334         }  /* driver byte != 0 */
 335         if (result) {
 336                 printk("SCSI disk error : host %d id %d lun %d return code = %x\n",
 337                        rscsi_disks[DEVICE_NR(SCpnt->request.dev)].device->host->host_no,
 338                        rscsi_disks[DEVICE_NR(SCpnt->request.dev)].device->id,
 339                        rscsi_disks[DEVICE_NR(SCpnt->request.dev)].device->lun, result);
 340 
 341                 if (driver_byte(result) & DRIVER_SENSE)
 342                         print_sense("sd", SCpnt);
 343                 SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.current_nr_sectors);
 344                 requeue_sd_request(SCpnt);
 345                 return;
 346         }
 347 }
 348 
 349 /*
 350         requeue_sd_request() is the request handler function for the sd driver.
 351         Its function in life is to take block device requests, and translate
 352         them to SCSI commands.
 353 */
 354 
 355 static void do_sd_request (void)
     /* [previous][next][first][last][top][bottom][index][help] */
 356 {
 357   Scsi_Cmnd * SCpnt = NULL;
 358   struct request * req = NULL;
 359   unsigned long flags;
 360   int flag = 0;
 361 
 362   save_flags(flags);
 363   while (1==1){
 364     cli();
 365     if (CURRENT != NULL && CURRENT->dev == -1) {
 366       restore_flags(flags);
 367       return;
 368     };
 369 
 370     INIT_SCSI_REQUEST;
 371 
 372 
 373 /* We have to be careful here.  allocate_device will get a free pointer, but
 374    there is no guarantee that it is queueable.  In normal usage, we want to
 375    call this, because other types of devices may have the host all tied up,
 376    and we want to make sure that we have at least one request pending for this
 377    type of device.   We can also come through here while servicing an
 378    interrupt, because of the need to start another command.  If we call
 379    allocate_device more than once, then the system can wedge if the command
 380    is not queueable.  The request_queueable function is safe because it checks
 381    to make sure that the host is able to take another command before it returns
 382    a pointer.  */
 383 
 384     if (flag++ == 0)
 385       SCpnt = allocate_device(&CURRENT,
 386                               rscsi_disks[DEVICE_NR(MINOR(CURRENT->dev))].device, 0); 
 387     else SCpnt = NULL;
 388 
 389     /*
 390      * The following restore_flags leads to latency problems.  FIXME.
 391      * Using a "sti()" gets rid of the latency problems but causes
 392      * race conditions and crashes.
 393      */
 394     restore_flags(flags);
 395 
 396 /* This is a performance enhancement.  We dig down into the request list and
 397    try and find a queueable request (i.e. device not busy, and host able to
 398    accept another command.  If we find one, then we queue it. This can
 399    make a big difference on systems with more than one disk drive.  We want
 400    to have the interrupts off when monkeying with the request list, because
 401    otherwise the kernel might try and slip in a request in between somewhere. */
 402 
 403     if (!SCpnt && sd_template.nr_dev > 1){
 404       struct request *req1;
 405       req1 = NULL;
 406       cli();
 407       req = CURRENT;
 408       while(req){
 409         SCpnt = request_queueable(req,
 410                                   rscsi_disks[DEVICE_NR(MINOR(req->dev))].device);
 411         if(SCpnt) break;
 412         req1 = req;
 413         req = req->next;
 414       };
 415       if (SCpnt && req->dev == -1) {
 416         if (req == CURRENT) 
 417           CURRENT = CURRENT->next;
 418         else
 419           req1->next = req->next;
 420       };
 421       restore_flags(flags);
 422     };
 423     
 424     if (!SCpnt) return; /* Could not find anything to do */
 425         
 426     /* Queue command */
 427     requeue_sd_request(SCpnt);
 428   };  /* While */
 429 }    
 430 
 431 static void requeue_sd_request (Scsi_Cmnd * SCpnt)
     /* [previous][next][first][last][top][bottom][index][help] */
 432 {
 433         int dev, block, this_count;
 434         unsigned char cmd[10];
 435         int bounce_size, contiguous;
 436         int max_sg;
 437         struct buffer_head * bh, *bhp;
 438         char * buff, *bounce_buffer;
 439 
 440 repeat:
 441 
 442         if(!SCpnt || SCpnt->request.dev <= 0) {
 443           do_sd_request();
 444           return;
 445         }
 446 
 447         dev =  MINOR(SCpnt->request.dev);
 448         block = SCpnt->request.sector;
 449         this_count = 0;
 450 
 451 #ifdef DEBUG
 452         printk("Doing sd request, dev = %d, block = %d\n", dev, block);
 453 #endif
 454 
 455         if (dev >= (sd_template.dev_max << 4) || 
 456             !rscsi_disks[DEVICE_NR(dev)].device ||
 457             block + SCpnt->request.nr_sectors > sd[dev].nr_sects)
 458                 {
 459                 SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
 460                 goto repeat;
 461                 }
 462 
 463         block += sd[dev].start_sect;
 464         dev = DEVICE_NR(dev);
 465 
 466         if (rscsi_disks[dev].device->changed)
 467                 {
 468 /*
 469  * quietly refuse to do anything to a changed disc until the changed bit has been reset
 470  */
 471                 /* printk("SCSI disk has been changed.  Prohibiting further I/O.\n");   */
 472                 SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
 473                 goto repeat;
 474                 }
 475 
 476 #ifdef DEBUG
 477         printk("sd%c : real dev = /dev/sd%c, block = %d\n", 'a' + MINOR(SCpnt->request.dev), dev, block);
 478 #endif
 479 
 480         /*
 481          * If we have a 1K hardware sectorsize, prevent access to single
 482          * 512 byte sectors.  In theory we could handle this - in fact
 483          * the scsi cdrom driver must be able to handle this because
 484          * we typically use 1K blocksizes, and cdroms typically have
 485          * 2K hardware sectorsizes.  Of course, things are simpler
 486          * with the cdrom, since it is read-only.  For performance
 487          * reasons, the filesystems should be able to handle this
 488          * and not force the scsi disk driver to use bounce buffers
 489          * for this.
 490          */
 491         if (rscsi_disks[dev].sector_size == 1024)
 492           if((block & 1) || (SCpnt->request.nr_sectors & 1)) {
 493                 printk("sd.c:Bad block number requested");
 494                 SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
 495                 goto repeat;
 496         }
 497         
 498         switch (SCpnt->request.cmd)
 499                 {
 500                 case WRITE :
 501                         if (!rscsi_disks[dev].device->writeable)
 502                                 {
 503                                 SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors);
 504                                 goto repeat;
 505                                 }
 506                         cmd[0] = WRITE_6;
 507                         break;
 508                 case READ :
 509                         cmd[0] = READ_6;
 510                         break;
 511                 default :
 512                         panic ("Unknown sd command %d\n", SCpnt->request.cmd);
 513                       }
 514 
 515         SCpnt->this_count = 0;
 516 
 517         /* If the host adapter can deal with very large scatter-gather
 518            requests, it is a waste of time to cluster */
 519         contiguous = (!CLUSTERABLE_DEVICE(SCpnt) ? 0 :1);
 520         bounce_buffer = NULL;
 521         bounce_size = (SCpnt->request.nr_sectors << 9);
 522 
 523         /* First see if we need a bounce buffer for this request.  If we do, make sure
 524            that we can allocate a buffer.  Do not waste space by allocating a bounce
 525            buffer if we are straddling the 16Mb line */
 526 
 527         
 528         if (contiguous && SCpnt->request.bh &&
 529             ((long) SCpnt->request.bh->b_data) + (SCpnt->request.nr_sectors << 9) - 1 > 
 530             ISA_DMA_THRESHOLD && SCpnt->host->unchecked_isa_dma) {
 531           if(((long) SCpnt->request.bh->b_data) > ISA_DMA_THRESHOLD)
 532             bounce_buffer = (char *) scsi_malloc(bounce_size);
 533           if(!bounce_buffer) contiguous = 0;
 534         };
 535 
 536         if(contiguous && SCpnt->request.bh && SCpnt->request.bh->b_reqnext)
 537           for(bh = SCpnt->request.bh, bhp = bh->b_reqnext; bhp; bh = bhp, 
 538               bhp = bhp->b_reqnext) {
 539             if(!CONTIGUOUS_BUFFERS(bh,bhp)) { 
 540               if(bounce_buffer) scsi_free(bounce_buffer, bounce_size);
 541               contiguous = 0;
 542               break;
 543             } 
 544           };
 545         if (!SCpnt->request.bh || contiguous) {
 546 
 547           /* case of page request (i.e. raw device), or unlinked buffer */
 548           this_count = SCpnt->request.nr_sectors;
 549           buff = SCpnt->request.buffer;
 550           SCpnt->use_sg = 0;
 551 
 552         } else if (SCpnt->host->sg_tablesize == 0 ||
 553                    (need_isa_buffer && 
 554                     dma_free_sectors <= 10)) {
 555 
 556           /* Case of host adapter that cannot scatter-gather.  We also
 557            come here if we are running low on DMA buffer memory.  We set
 558            a threshold higher than that we would need for this request so
 559            we leave room for other requests.  Even though we would not need
 560            it all, we need to be conservative, because if we run low enough
 561            we have no choice but to panic. */
 562 
 563           if (SCpnt->host->sg_tablesize != 0 &&
 564               need_isa_buffer && 
 565               dma_free_sectors <= 10)
 566             printk("Warning: SCSI DMA buffer space running low.  Using non scatter-gather I/O.\n");
 567 
 568           this_count = SCpnt->request.current_nr_sectors;
 569           buff = SCpnt->request.buffer;
 570           SCpnt->use_sg = 0;
 571 
 572         } else {
 573 
 574           /* Scatter-gather capable host adapter */
 575           struct scatterlist * sgpnt;
 576           int count, this_count_max;
 577           int counted;
 578 
 579           bh = SCpnt->request.bh;
 580           this_count = 0;
 581           this_count_max = (rscsi_disks[dev].ten ? 0xffff : 0xff);
 582           count = 0;
 583           bhp = NULL;
 584           while(bh) {
 585             if ((this_count + (bh->b_size >> 9)) > this_count_max) break;
 586             if(!bhp || !CONTIGUOUS_BUFFERS(bhp,bh) ||
 587                !CLUSTERABLE_DEVICE(SCpnt) ||
 588                (SCpnt->host->unchecked_isa_dma &&
 589                ((unsigned long) bh->b_data-1) == ISA_DMA_THRESHOLD)) {
 590               if (count < SCpnt->host->sg_tablesize) count++;
 591               else break;
 592             };
 593             this_count += (bh->b_size >> 9);
 594             bhp = bh;
 595             bh = bh->b_reqnext;
 596           };
 597 #if 0
 598           if(SCpnt->host->unchecked_isa_dma &&
 599              ((unsigned int) SCpnt->request.bh->b_data-1) == ISA_DMA_THRESHOLD) count--;
 600 #endif
 601           SCpnt->use_sg = count;  /* Number of chains */
 602           count = 512;/* scsi_malloc can only allocate in chunks of 512 bytes*/
 603           while( count < (SCpnt->use_sg * sizeof(struct scatterlist))) 
 604             count = count << 1;
 605           SCpnt->sglist_len = count;
 606           max_sg = count / sizeof(struct scatterlist);
 607           if(SCpnt->host->sg_tablesize < max_sg) max_sg = SCpnt->host->sg_tablesize;
 608           sgpnt = (struct scatterlist * ) scsi_malloc(count);
 609           memset(sgpnt, 0, count);  /* Zero so it is easy to fill */
 610           if (!sgpnt) {
 611             printk("Warning - running *really* short on DMA buffers\n");
 612             SCpnt->use_sg = 0;  /* No memory left - bail out */
 613             this_count = SCpnt->request.current_nr_sectors;
 614             buff = SCpnt->request.buffer;
 615           } else {
 616             buff = (char *) sgpnt;
 617             counted = 0;
 618             for(count = 0, bh = SCpnt->request.bh, bhp = bh->b_reqnext;
 619                 count < SCpnt->use_sg && bh; 
 620                 count++, bh = bhp) {
 621 
 622               bhp = bh->b_reqnext;
 623 
 624               if(!sgpnt[count].address) sgpnt[count].address = bh->b_data;
 625               sgpnt[count].length += bh->b_size;
 626               counted += bh->b_size >> 9;
 627 
 628               if (((long) sgpnt[count].address) + sgpnt[count].length - 1 > 
 629                   ISA_DMA_THRESHOLD && (SCpnt->host->unchecked_isa_dma) &&
 630                   !sgpnt[count].alt_address) {
 631                 sgpnt[count].alt_address = sgpnt[count].address;
 632                 /* We try and avoid exhausting the DMA pool, since it is easier
 633                    to control usage here.  In other places we might have a more
 634                    pressing need, and we would be screwed if we ran out */
 635                 if(dma_free_sectors < (sgpnt[count].length >> 9) + 10) {
 636                   sgpnt[count].address = NULL;
 637                 } else {
 638                   sgpnt[count].address = (char *) scsi_malloc(sgpnt[count].length);
 639                 };
 640 /* If we start running low on DMA buffers, we abort the scatter-gather
 641    operation, and free all of the memory we have allocated.  We want to
 642    ensure that all scsi operations are able to do at least a non-scatter/gather
 643    operation */
 644                 if(sgpnt[count].address == NULL){ /* Out of dma memory */
 645 #if 0
 646                   printk("Warning: Running low on SCSI DMA buffers");
 647                   /* Try switching back to a non scatter-gather operation. */
 648                   while(--count >= 0){
 649                     if(sgpnt[count].alt_address) 
 650                       scsi_free(sgpnt[count].address, sgpnt[count].length);
 651                   };
 652                   this_count = SCpnt->request.current_nr_sectors;
 653                   buff = SCpnt->request.buffer;
 654                   SCpnt->use_sg = 0;
 655                   scsi_free(sgpnt, SCpnt->sglist_len);
 656 #endif
 657                   SCpnt->use_sg = count;
 658                   this_count = counted -= bh->b_size >> 9;
 659                   break;
 660                 };
 661 
 662               };
 663 
 664               /* Only cluster buffers if we know that we can supply DMA buffers
 665                  large enough to satisfy the request.  Do not cluster a new
 666                  request if this would mean that we suddenly need to start
 667                  using DMA bounce buffers */
 668               if(bhp && CONTIGUOUS_BUFFERS(bh,bhp) && CLUSTERABLE_DEVICE(SCpnt)) {
 669                 char * tmp;
 670 
 671                 if (((long) sgpnt[count].address) + sgpnt[count].length +
 672                     bhp->b_size - 1 > ISA_DMA_THRESHOLD && 
 673                     (SCpnt->host->unchecked_isa_dma) &&
 674                     !sgpnt[count].alt_address) continue;
 675 
 676                 if(!sgpnt[count].alt_address) {count--; continue; }
 677                 if(dma_free_sectors > 10)
 678                   tmp = (char *) scsi_malloc(sgpnt[count].length + bhp->b_size);
 679                 else {
 680                   tmp = NULL;
 681                   max_sg = SCpnt->use_sg;
 682                 };
 683                 if(tmp){
 684                   scsi_free(sgpnt[count].address, sgpnt[count].length);
 685                   sgpnt[count].address = tmp;
 686                   count--;
 687                   continue;
 688                 };
 689 
 690                 /* If we are allowed another sg chain, then increment counter so we
 691                    can insert it.  Otherwise we will end up truncating */
 692 
 693                 if (SCpnt->use_sg < max_sg) SCpnt->use_sg++;
 694               };  /* contiguous buffers */
 695             }; /* for loop */
 696 
 697             this_count = counted; /* This is actually how many we are going to transfer */
 698 
 699             if(count < SCpnt->use_sg || SCpnt->use_sg > SCpnt->host->sg_tablesize){
 700               bh = SCpnt->request.bh;
 701               printk("Use sg, count %d %x %d\n", SCpnt->use_sg, count, dma_free_sectors);
 702               printk("maxsg = %x, counted = %d this_count = %d\n", max_sg, counted, this_count);
 703               while(bh){
 704                 printk("[%p %lx] ", bh->b_data, bh->b_size);
 705                 bh = bh->b_reqnext;
 706               };
 707               if(SCpnt->use_sg < 16)
 708                 for(count=0; count<SCpnt->use_sg; count++)
 709                   printk("{%d:%p %p %d}  ", count,
 710                          sgpnt[count].address,
 711                          sgpnt[count].alt_address,
 712                          sgpnt[count].length);
 713               panic("Ooops");
 714             };
 715 
 716             if (SCpnt->request.cmd == WRITE)
 717               for(count=0; count<SCpnt->use_sg; count++)
 718                 if(sgpnt[count].alt_address)
 719                   memcpy(sgpnt[count].address, sgpnt[count].alt_address, 
 720                          sgpnt[count].length);
 721           };  /* Able to malloc sgpnt */
 722         };  /* Host adapter capable of scatter-gather */
 723 
 724 /* Now handle the possibility of DMA to addresses > 16Mb */
 725 
 726         if(SCpnt->use_sg == 0){
 727           if (((long) buff) + (this_count << 9) - 1 > ISA_DMA_THRESHOLD && 
 728             (SCpnt->host->unchecked_isa_dma)) {
 729             if(bounce_buffer)
 730               buff = bounce_buffer;
 731             else
 732               buff = (char *) scsi_malloc(this_count << 9);
 733             if(buff == NULL) {  /* Try backing off a bit if we are low on mem*/
 734               this_count = SCpnt->request.current_nr_sectors;
 735               buff = (char *) scsi_malloc(this_count << 9);
 736               if(!buff) panic("Ran out of DMA buffers.");
 737             };
 738             if (SCpnt->request.cmd == WRITE)
 739               memcpy(buff, (char *)SCpnt->request.buffer, this_count << 9);
 740           };
 741         };
 742 #ifdef DEBUG
 743         printk("sd%c : %s %d/%d 512 byte blocks.\n", 'a' + MINOR(SCpnt->request.dev),
 744                 (SCpnt->request.cmd == WRITE) ? "writing" : "reading",
 745                 this_count, SCpnt->request.nr_sectors);
 746 #endif
 747 
 748         cmd[1] = (SCpnt->lun << 5) & 0xe0;
 749 
 750         if (rscsi_disks[dev].sector_size == 1024){
 751           if(block & 1) panic("sd.c:Bad block number requested");
 752           if(this_count & 1) panic("sd.c:Bad block number requested");
 753           block = block >> 1;
 754           this_count = this_count >> 1;
 755         };
 756 
 757         if (rscsi_disks[dev].sector_size == 256){
 758           block = block << 1;
 759           this_count = this_count << 1;
 760         };
 761 
 762         if (((this_count > 0xff) ||  (block > 0x1fffff)) && rscsi_disks[dev].ten)
 763                 {
 764                 if (this_count > 0xffff)
 765                         this_count = 0xffff;
 766 
 767                 cmd[0] += READ_10 - READ_6 ;
 768                 cmd[2] = (unsigned char) (block >> 24) & 0xff;
 769                 cmd[3] = (unsigned char) (block >> 16) & 0xff;
 770                 cmd[4] = (unsigned char) (block >> 8) & 0xff;
 771                 cmd[5] = (unsigned char) block & 0xff;
 772                 cmd[6] = cmd[9] = 0;
 773                 cmd[7] = (unsigned char) (this_count >> 8) & 0xff;
 774                 cmd[8] = (unsigned char) this_count & 0xff;
 775                 }
 776         else
 777                 {
 778                 if (this_count > 0xff)
 779                         this_count = 0xff;
 780 
 781                 cmd[1] |= (unsigned char) ((block >> 16) & 0x1f);
 782                 cmd[2] = (unsigned char) ((block >> 8) & 0xff);
 783                 cmd[3] = (unsigned char) block & 0xff;
 784                 cmd[4] = (unsigned char) this_count;
 785                 cmd[5] = 0;
 786                 }
 787 
 788 /*
 789  * We shouldn't disconnect in the middle of a sector, so with a dumb 
 790  * host adapter, it's safe to assume that we can at least transfer 
 791  * this many bytes between each connect / disconnect.  
 792  */
 793 
 794         SCpnt->transfersize = rscsi_disks[dev].sector_size;
 795         SCpnt->underflow = this_count << 9; 
 796         scsi_do_cmd (SCpnt, (void *) cmd, buff, 
 797                      this_count * rscsi_disks[dev].sector_size,
 798                      rw_intr, 
 799                      (SCpnt->device->type == TYPE_DISK ? 
 800                                      SD_TIMEOUT : SD_MOD_TIMEOUT),
 801                      MAX_RETRIES);
 802 }
 803 
 804 static int check_scsidisk_media_change(dev_t full_dev){
     /* [previous][next][first][last][top][bottom][index][help] */
 805         int retval;
 806         int target;
 807         struct inode inode;
 808         int flag = 0;
 809 
 810         target =  DEVICE_NR(MINOR(full_dev));
 811 
 812         if (target >= sd_template.dev_max ||
 813             !rscsi_disks[target].device) {
 814                 printk("SCSI disk request error: invalid device.\n");
 815                 return 0;
 816         };
 817 
 818         if(!rscsi_disks[target].device->removable) return 0;
 819 
 820         inode.i_rdev = full_dev;  /* This is all we really need here */
 821         retval = sd_ioctl(&inode, NULL, SCSI_IOCTL_TEST_UNIT_READY, 0);
 822 
 823         if(retval){ /* Unable to test, unit probably not ready.  This usually
 824                      means there is no disc in the drive.  Mark as changed,
 825                      and we will figure it out later once the drive is
 826                      available again.  */
 827 
 828           rscsi_disks[target].device->changed = 1;
 829           return 1; /* This will force a flush, if called from
 830                        check_disk_change */
 831         };
 832 
 833         retval = rscsi_disks[target].device->changed;
 834         if(!flag) rscsi_disks[target].device->changed = 0;
 835         return retval;
 836 }
 837 
 838 static void sd_init_done (Scsi_Cmnd * SCpnt)
     /* [previous][next][first][last][top][bottom][index][help] */
 839 {
 840   struct request * req;
 841   
 842   req = &SCpnt->request;
 843   req->dev = 0xfffe; /* Busy, but indicate request done */
 844   
 845   if (req->sem != NULL) {
 846     up(req->sem);
 847   }
 848 }
 849 
 850 static int sd_init_onedisk(int i)
     /* [previous][next][first][last][top][bottom][index][help] */
 851 {
 852   unsigned char cmd[10];
 853   unsigned char *buffer;
 854   unsigned long spintime;
 855   int the_result, retries;
 856   Scsi_Cmnd * SCpnt;
 857 
 858   /* We need to retry the READ_CAPACITY because a UNIT_ATTENTION is considered
 859      a fatal error, and many devices report such an error just after a scsi
 860      bus reset. */
 861 
 862   SCpnt = allocate_device(NULL, rscsi_disks[i].device, 1);
 863   buffer = (unsigned char *) scsi_malloc(512);
 864 
 865   spintime = 0;
 866 
 867   /* Spin up drives, as required.  Only do this at boot time */
 868   if (current == task[0]){
 869     do{
 870       cmd[0] = TEST_UNIT_READY;
 871       cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0;
 872       memset ((void *) &cmd[2], 0, 8);
 873       SCpnt->request.dev = 0xffff;  /* Mark as really busy again */
 874       SCpnt->cmd_len = 0;
 875       SCpnt->sense_buffer[0] = 0;
 876       SCpnt->sense_buffer[2] = 0;
 877       
 878       scsi_do_cmd (SCpnt,
 879                    (void *) cmd, (void *) buffer,
 880                    512, sd_init_done,  SD_TIMEOUT,
 881                    MAX_RETRIES);
 882       
 883       while(SCpnt->request.dev != 0xfffe) barrier();
 884       
 885       the_result = SCpnt->result;
 886       
 887       /* Look for non-removable devices that return NOT_READY.  Issue command
 888          to spin up drive for these cases. */
 889       if(the_result && !rscsi_disks[i].device->removable && 
 890          SCpnt->sense_buffer[2] == NOT_READY) {
 891         int time1;
 892         if(!spintime){
 893           printk( "sd%c: Spinning up disk...", 'a' + i );
 894           cmd[0] = START_STOP;
 895           cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0;
 896           cmd[1] |= 1;  /* Return immediately */
 897           memset ((void *) &cmd[2], 0, 8);
 898           cmd[4] = 1; /* Start spin cycle */
 899           SCpnt->request.dev = 0xffff;  /* Mark as really busy again */
 900           SCpnt->cmd_len = 0;
 901           SCpnt->sense_buffer[0] = 0;
 902           SCpnt->sense_buffer[2] = 0;
 903           
 904           scsi_do_cmd (SCpnt,
 905                        (void *) cmd, (void *) buffer,
 906                        512, sd_init_done,  SD_TIMEOUT,
 907                        MAX_RETRIES);
 908           
 909           while(SCpnt->request.dev != 0xfffe) barrier();
 910 
 911           spintime = jiffies;
 912         };
 913 
 914         time1 = jiffies;
 915         while(jiffies < time1 + HZ); /* Wait 1 second for next try */
 916         printk( "." );
 917       };
 918     } while(the_result && spintime && spintime+100*HZ > jiffies);
 919     if (spintime) {
 920        if (the_result)
 921            printk( "not responding...\n" );
 922        else
 923            printk( "ready\n" );
 924     }
 925   };  /* current == task[0] */
 926 
 927 
 928   retries = 3;
 929   do {
 930     cmd[0] = READ_CAPACITY;
 931     cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0;
 932     memset ((void *) &cmd[2], 0, 8);
 933     memset ((void *) buffer, 0, 8);
 934     SCpnt->request.dev = 0xffff;  /* Mark as really busy again */
 935     SCpnt->cmd_len = 0;
 936     SCpnt->sense_buffer[0] = 0;
 937     SCpnt->sense_buffer[2] = 0;
 938     
 939     scsi_do_cmd (SCpnt,
 940                  (void *) cmd, (void *) buffer,
 941                  8, sd_init_done,  SD_TIMEOUT,
 942                  MAX_RETRIES);
 943     
 944     if (current == task[0])
 945       while(SCpnt->request.dev != 0xfffe) barrier();
 946     else
 947       if (SCpnt->request.dev != 0xfffe){
 948         struct semaphore sem = MUTEX_LOCKED;
 949         SCpnt->request.sem = &sem;
 950         down(&sem);
 951         /* Hmm.. Have to ask about this one.. */
 952         while (SCpnt->request.dev != 0xfffe) schedule();
 953       };
 954     
 955     the_result = SCpnt->result;
 956     retries--;
 957 
 958   } while(the_result && retries);
 959 
 960   SCpnt->request.dev = -1;  /* Mark as not busy */
 961 
 962   wake_up(&SCpnt->device->device_wait); 
 963 
 964   /* Wake up a process waiting for device*/
 965 
 966   /*
 967    *    The SCSI standard says "READ CAPACITY is necessary for self configuring software"
 968    *    While not mandatory, support of READ CAPACITY is strongly encouraged.
 969    *    We used to die if we couldn't successfully do a READ CAPACITY.
 970    *    But, now we go on about our way.  The side effects of this are
 971    *
 972    *    1.  We can't know block size with certainty.  I have said "512 bytes is it"
 973    *            as this is most common.
 974    *
 975    *    2.  Recovery from when some one attempts to read past the end of the raw device will
 976    *        be slower.
 977    */
 978 
 979   if (the_result)
 980     {
 981       printk ("sd%c : READ CAPACITY failed.\n"
 982               "sd%c : status = %x, message = %02x, host = %d, driver = %02x \n",
 983               'a' + i, 'a' + i,
 984               status_byte(the_result),
 985               msg_byte(the_result),
 986               host_byte(the_result),
 987               driver_byte(the_result)
 988               );
 989       if (driver_byte(the_result)  & DRIVER_SENSE)
 990         printk("sd%c : extended sense code = %1x \n", 'a' + i, SCpnt->sense_buffer[2] & 0xf);
 991       else
 992         printk("sd%c : sense not available. \n", 'a' + i);
 993 
 994       printk("sd%c : block size assumed to be 512 bytes, disk size 1GB.  \n", 'a' + i);
 995       rscsi_disks[i].capacity = 0x1fffff;
 996       rscsi_disks[i].sector_size = 512;
 997 
 998       /* Set dirty bit for removable devices if not ready - sometimes drives
 999          will not report this properly. */
1000       if(rscsi_disks[i].device->removable && 
1001          SCpnt->sense_buffer[2] == NOT_READY)
1002         rscsi_disks[i].device->changed = 1;
1003 
1004     }
1005   else
1006     {
1007       rscsi_disks[i].capacity = (buffer[0] << 24) |
1008         (buffer[1] << 16) |
1009           (buffer[2] << 8) |
1010             buffer[3];
1011 
1012       rscsi_disks[i].sector_size = (buffer[4] << 24) |
1013         (buffer[5] << 16) | (buffer[6] << 8) | buffer[7];
1014 
1015       if (rscsi_disks[i].sector_size != 512 &&
1016           rscsi_disks[i].sector_size != 1024 &&
1017           rscsi_disks[i].sector_size != 256)
1018         {
1019           printk ("sd%c : unsupported sector size %d.\n",
1020                   'a' + i, rscsi_disks[i].sector_size);
1021           if(rscsi_disks[i].device->removable){
1022             rscsi_disks[i].capacity = 0;
1023           } else {
1024             printk ("scsi : deleting disk entry.\n");
1025             rscsi_disks[i].device = NULL;
1026             sd_template.nr_dev--;
1027             return i;
1028           };
1029         }
1030     {
1031        /*
1032           The msdos fs need to know the hardware sector size
1033           So I have created this table. See ll_rw_blk.c
1034           Jacques Gelinas (Jacques@solucorp.qc.ca)
1035        */
1036        int m;
1037        int hard_sector = rscsi_disks[i].sector_size;
1038        /* There is 16 minor allocated for each devices */
1039        for (m=i<<4; m<((i+1)<<4); m++){
1040          sd_hardsizes[m] = hard_sector;
1041        }
1042        printk ("SCSI Hardware sector size is %d bytes on device sd%c\n"
1043          ,hard_sector,i+'a');
1044     }
1045       if(rscsi_disks[i].sector_size == 1024)
1046         rscsi_disks[i].capacity <<= 1;  /* Change this into 512 byte sectors */
1047       if(rscsi_disks[i].sector_size == 256)
1048         rscsi_disks[i].capacity >>= 1;  /* Change this into 512 byte sectors */
1049     }
1050 
1051   rscsi_disks[i].ten = 1;
1052   rscsi_disks[i].remap = 1;
1053   scsi_free(buffer, 512);
1054   return i;
1055 }
1056 
1057 /*
1058         The sd_init() function looks at all SCSI drives present, determines
1059         their size, and reads partition table entries for them.
1060 */
1061 
1062 
1063 static void sd_init()
     /* [previous][next][first][last][top][bottom][index][help] */
1064 {
1065         int i;
1066         static int sd_registered = 0;
1067 
1068         if (sd_template.dev_noticed == 0) return;
1069 
1070         if(!sd_registered) {
1071           if (register_blkdev(MAJOR_NR,"sd",&sd_fops)) {
1072             printk("Unable to get major %d for SCSI disk\n",MAJOR_NR);
1073             return;
1074           }
1075           sd_registered++;
1076         }
1077 
1078         /* We do not support attaching loadable devices yet. */
1079         if(rscsi_disks) return;
1080 
1081         sd_template.dev_max = sd_template.dev_noticed + SD_EXTRA_DEVS;
1082 
1083         rscsi_disks = (Scsi_Disk *) 
1084           scsi_init_malloc(sd_template.dev_max * sizeof(Scsi_Disk), GFP_ATOMIC);
1085         memset(rscsi_disks, 0, sd_template.dev_max * sizeof(Scsi_Disk));
1086 
1087         sd_sizes = (int *) scsi_init_malloc((sd_template.dev_max << 4) * 
1088                                             sizeof(int), GFP_ATOMIC);
1089         memset(sd_sizes, 0, (sd_template.dev_max << 4) * sizeof(int));
1090 
1091         sd_blocksizes = (int *) scsi_init_malloc((sd_template.dev_max << 4) * 
1092                                                  sizeof(int), GFP_ATOMIC);
1093 
1094         sd_hardsizes = (int *) scsi_init_malloc((sd_template.dev_max << 4) * 
1095                                                    sizeof(struct hd_struct), GFP_ATOMIC);
1096 
1097         for(i=0;i<(sd_template.dev_max << 4);i++){
1098                 sd_blocksizes[i] = 1024;
1099                 sd_hardsizes[i] = 512;
1100         }
1101         blksize_size[MAJOR_NR] = sd_blocksizes;
1102         hardsect_size[MAJOR_NR] = sd_hardsizes;
1103         sd = (struct hd_struct *) scsi_init_malloc((sd_template.dev_max << 4) *
1104                                                    sizeof(struct hd_struct),
1105                                                    GFP_ATOMIC);
1106 
1107 
1108         sd_gendisk.max_nr = sd_template.dev_max;
1109         sd_gendisk.part = sd;
1110         sd_gendisk.sizes = sd_sizes;
1111         sd_gendisk.real_devices = (void *) rscsi_disks;
1112 
1113 }
1114 
1115 static void sd_finish()
     /* [previous][next][first][last][top][bottom][index][help] */
1116 {
1117         int i;
1118 
1119         blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST;
1120 
1121         sd_gendisk.next = gendisk_head;
1122         gendisk_head = &sd_gendisk;
1123 
1124         for (i = 0; i < sd_template.dev_max; ++i)
1125             if (!rscsi_disks[i].capacity && 
1126                   rscsi_disks[i].device)
1127               {
1128                 i = sd_init_onedisk(i);
1129                 if (scsi_loadable_module_flag 
1130                     && !rscsi_disks[i].has_part_table) {
1131                   sd_sizes[i << 4] = rscsi_disks[i].capacity;
1132                   revalidate_scsidisk(i << 4, 0);
1133                 }
1134                 rscsi_disks[i].has_part_table = 1;
1135               }
1136 
1137         /* If our host adapter is capable of scatter-gather, then we increase
1138            the read-ahead to 16 blocks (32 sectors).  If not, we use
1139            a two block (4 sector) read ahead. */
1140         if(rscsi_disks[0].device && rscsi_disks[0].device->host->sg_tablesize)
1141           read_ahead[MAJOR_NR] = 120;
1142         /* 64 sector read-ahead */
1143         else
1144           read_ahead[MAJOR_NR] = 4;  /* 4 sector read-ahead */
1145         
1146         return;
1147 }
1148 
1149 static int sd_detect(Scsi_Device * SDp){
     /* [previous][next][first][last][top][bottom][index][help] */
1150   if(SDp->type != TYPE_DISK && SDp->type != TYPE_MOD) return 0;
1151 
1152   printk("Detected scsi disk sd%c at scsi%d, id %d, lun %d\n", 
1153          'a'+ (sd_template.dev_noticed++),
1154          SDp->host->host_no , SDp->id, SDp->lun); 
1155 
1156          return 1;
1157 
1158 }
1159 
1160 static int sd_attach(Scsi_Device * SDp){
     /* [previous][next][first][last][top][bottom][index][help] */
1161    Scsi_Disk * dpnt;
1162    int i;
1163 
1164    if(SDp->type != TYPE_DISK && SDp->type != TYPE_MOD) return 0;
1165 
1166    if(sd_template.nr_dev >= sd_template.dev_max) {
1167         SDp->attached--;
1168         return 1;
1169    }
1170    
1171    for(dpnt = rscsi_disks, i=0; i<sd_template.dev_max; i++, dpnt++) 
1172      if(!dpnt->device) break;
1173 
1174    if(i >= sd_template.dev_max) panic ("scsi_devices corrupt (sd)");
1175 
1176    SDp->scsi_request_fn = do_sd_request;
1177    rscsi_disks[i].device = SDp;
1178    rscsi_disks[i].has_part_table = 0;
1179    sd_template.nr_dev++;
1180    sd_gendisk.nr_real++;
1181    return 0;
1182 }
1183 
1184 #define DEVICE_BUSY rscsi_disks[target].device->busy
1185 #define USAGE rscsi_disks[target].device->access_count
1186 #define CAPACITY rscsi_disks[target].capacity
1187 #define MAYBE_REINIT  sd_init_onedisk(target)
1188 #define GENDISK_STRUCT sd_gendisk
1189 
1190 /* This routine is called to flush all partitions and partition tables
1191    for a changed scsi disk, and then re-read the new partition table.
1192    If we are revalidating a disk because of a media change, then we
1193    enter with usage == 0.  If we are using an ioctl, we automatically have
1194    usage == 1 (we need an open channel to use an ioctl :-), so this
1195    is our limit.
1196  */
1197 int revalidate_scsidisk(int dev, int maxusage){
     /* [previous][next][first][last][top][bottom][index][help] */
1198           int target, major;
1199           struct gendisk * gdev;
1200           unsigned long flags;
1201           int max_p;
1202           int start;
1203           int i;
1204 
1205           target =  DEVICE_NR(MINOR(dev));
1206           gdev = &GENDISK_STRUCT;
1207 
1208           save_flags(flags);
1209           cli();
1210           if (DEVICE_BUSY || USAGE > maxusage) {
1211             restore_flags(flags);
1212             printk("Device busy for revalidation (usage=%d)\n", USAGE);
1213             return -EBUSY;
1214           };
1215           DEVICE_BUSY = 1;
1216           restore_flags(flags);
1217 
1218           max_p = gdev->max_p;
1219           start = target << gdev->minor_shift;
1220           major = MAJOR_NR << 8;
1221 
1222           for (i=max_p - 1; i >=0 ; i--) {
1223             sync_dev(major | start | i);
1224             invalidate_inodes(major | start | i);
1225             invalidate_buffers(major | start | i);
1226             gdev->part[start+i].start_sect = 0;
1227             gdev->part[start+i].nr_sects = 0;
1228           };
1229 
1230 #ifdef MAYBE_REINIT
1231           MAYBE_REINIT;
1232 #endif
1233 
1234           gdev->part[start].nr_sects = CAPACITY;
1235           resetup_one_dev(gdev, target);
1236 
1237           DEVICE_BUSY = 0;
1238           return 0;
1239 }
1240 
1241 static int fop_revalidate_scsidisk(dev_t dev){
     /* [previous][next][first][last][top][bottom][index][help] */
1242   return revalidate_scsidisk(dev, 0);
1243 }
1244 
1245 
1246 static void sd_detach(Scsi_Device * SDp)
     /* [previous][next][first][last][top][bottom][index][help] */
1247 {
1248   Scsi_Disk * dpnt;
1249   int i;
1250   int max_p;
1251   int major;
1252   int start;
1253   
1254   for(dpnt = rscsi_disks, i=0; i<sd_template.dev_max; i++, dpnt++) 
1255     if(dpnt->device == SDp) {
1256 
1257       /* If we are disconnecting a disk driver, sync and invalidate everything */
1258       max_p = sd_gendisk.max_p;
1259       start = i << sd_gendisk.minor_shift;
1260       major = MAJOR_NR << 8;
1261 
1262       for (i=max_p - 1; i >=0 ; i--) {
1263         sync_dev(major | start | i);
1264         invalidate_inodes(major | start | i);
1265         invalidate_buffers(major | start | i);
1266         sd_gendisk.part[start+i].start_sect = 0;
1267         sd_gendisk.part[start+i].nr_sects = 0;
1268         sd_sizes[start+i] = 0;
1269       };
1270       
1271       dpnt->has_part_table = 0;
1272       dpnt->device = NULL;
1273       dpnt->capacity = 0;
1274       SDp->attached--;
1275       sd_template.dev_noticed--;
1276       sd_template.nr_dev--;
1277       sd_gendisk.nr_real--;
1278       return;
1279     }
1280   return;
1281 }
1282 
1283 /*
1284  * Overrides for Emacs so that we follow Linus's tabbing style.
1285  * Emacs will notice this stuff at the end of the file and automatically
1286  * adjust the settings for this buffer only.  This must remain at the end
1287  * of the file.
1288  * ---------------------------------------------------------------------------
1289  * Local variables:
1290  * c-indent-level: 8
1291  * c-brace-imaginary-offset: 0
1292  * c-brace-offset: -8
1293  * c-argdecl-indent: 8
1294  * c-label-offset: -8
1295  * c-continued-statement-offset: 8
1296  * c-continued-brace-offset: 0
1297  * End:
1298  */

/* [previous][next][first][last][top][bottom][index][help] */