root/drivers/block/ll_rw_blk.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. plug_device
  2. unplug_device
  3. get_request
  4. __get_request_wait
  5. get_request_wait
  6. is_read_only
  7. set_device_ro
  8. drive_stat_acct
  9. add_request
  10. make_request
  11. get_md_request
  12. ll_rw_page
  13. ll_rw_block
  14. ll_rw_swap_file
  15. blk_dev_init

   1 /*
   2  *  linux/drivers/block/ll_rw_blk.c
   3  *
   4  * Copyright (C) 1991, 1992 Linus Torvalds
   5  * Copyright (C) 1994,      Karl Keyte: Added support for disk statistics
   6  */
   7 
   8 /*
   9  * This handles all read/write requests to block devices
  10  */
  11 #include <linux/sched.h>
  12 #include <linux/kernel.h>
  13 #include <linux/kernel_stat.h>
  14 #include <linux/errno.h>
  15 #include <linux/string.h>
  16 #include <linux/config.h>
  17 #include <linux/locks.h>
  18 #include <linux/mm.h>
  19 
  20 #include <asm/system.h>
  21 #include <asm/io.h>
  22 #include <linux/blk.h>
  23 
  24 /*
  25  * The request-struct contains all necessary data
  26  * to load a nr of sectors into memory
  27  */
  28 static struct request all_requests[NR_REQUEST];
  29 
  30 /*
  31  * used to wait on when there are no free requests
  32  */
  33 struct wait_queue * wait_for_request = NULL;
  34 
  35 /* This specifies how many sectors to read ahead on the disk.  */
  36 
  37 int read_ahead[MAX_BLKDEV] = {0, };
  38 
  39 /* blk_dev_struct is:
  40  *      *request_fn
  41  *      *current_request
  42  */
  43 struct blk_dev_struct blk_dev[MAX_BLKDEV]; /* initialized by blk_dev_init() */
  44 
  45 /*
  46  * blk_size contains the size of all block-devices in units of 1024 byte
  47  * sectors:
  48  *
  49  * blk_size[MAJOR][MINOR]
  50  *
  51  * if (!blk_size[MAJOR]) then no minor size checking is done.
  52  */
  53 int * blk_size[MAX_BLKDEV] = { NULL, NULL, };
  54 
  55 /*
  56  * blksize_size contains the size of all block-devices:
  57  *
  58  * blksize_size[MAJOR][MINOR]
  59  *
  60  * if (!blksize_size[MAJOR]) then 1024 bytes is assumed.
  61  */
  62 int * blksize_size[MAX_BLKDEV] = { NULL, NULL, };
  63 
  64 /*
  65  * hardsect_size contains the size of the hardware sector of a device.
  66  *
  67  * hardsect_size[MAJOR][MINOR]
  68  *
  69  * if (!hardsect_size[MAJOR])
  70  *              then 512 bytes is assumed.
  71  * else
  72  *              sector_size is hardsect_size[MAJOR][MINOR]
  73  * This is currently set by some scsi device and read by the msdos fs driver
  74  * This might be a some uses later.
  75  */
  76 int * hardsect_size[MAX_BLKDEV] = { NULL, NULL, };
  77 
  78 /*
  79  * "plug" the device if there are no outstanding requests: this will
  80  * force the transfer to start only after we have put all the requests
  81  * on the list.
  82  */
  83 static inline void plug_device(struct blk_dev_struct * dev, struct request * plug)
     /* [previous][next][first][last][top][bottom][index][help] */
  84 {
  85         unsigned long flags;
  86 
  87         plug->rq_status = RQ_INACTIVE;
  88         plug->cmd = -1;
  89         plug->next = NULL;
  90         save_flags(flags);
  91         cli();
  92         if (!dev->current_request)
  93                 dev->current_request = plug;
  94         restore_flags(flags);
  95 }
  96 
  97 /*
  98  * remove the plug and let it rip..
  99  */
 100 static inline void unplug_device(struct blk_dev_struct * dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 101 {
 102         struct request * req;
 103         unsigned long flags;
 104 
 105         save_flags(flags);
 106         cli();
 107         req = dev->current_request;
 108         if (req && req->rq_status == RQ_INACTIVE && req->cmd == -1) {
 109                 dev->current_request = req->next;
 110                 (dev->request_fn)();
 111         }
 112         restore_flags(flags);
 113 }
 114 
 115 /*
 116  * look for a free request in the first N entries.
 117  * NOTE: interrupts must be disabled on the way in, and will still
 118  *       be disabled on the way out.
 119  */
 120 static inline struct request * get_request(int n, kdev_t dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 121 {
 122         static struct request *prev_found = NULL, *prev_limit = NULL;
 123         register struct request *req, *limit;
 124 
 125         if (n <= 0)
 126                 panic("get_request(%d): impossible!\n", n);
 127 
 128         limit = all_requests + n;
 129         if (limit != prev_limit) {
 130                 prev_limit = limit;
 131                 prev_found = all_requests;
 132         }
 133         req = prev_found;
 134         for (;;) {
 135                 req = ((req > all_requests) ? req : limit) - 1;
 136                 if (req->rq_status == RQ_INACTIVE)
 137                         break;
 138                 if (req == prev_found)
 139                         return NULL;
 140         }
 141         prev_found = req;
 142         req->rq_status = RQ_ACTIVE;
 143         req->rq_dev = dev;
 144         return req;
 145 }
 146 
 147 /*
 148  * wait until a free request in the first N entries is available.
 149  */
 150 static struct request * __get_request_wait(int n, kdev_t dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 151 {
 152         register struct request *req;
 153         struct wait_queue wait = { current, NULL };
 154 
 155         add_wait_queue(&wait_for_request, &wait);
 156         for (;;) {
 157                 unplug_device(MAJOR(dev)+blk_dev);
 158                 current->state = TASK_UNINTERRUPTIBLE;
 159                 cli();
 160                 req = get_request(n, dev);
 161                 sti();
 162                 if (req)
 163                         break;
 164                 schedule();
 165         }
 166         remove_wait_queue(&wait_for_request, &wait);
 167         current->state = TASK_RUNNING;
 168         return req;
 169 }
 170 
 171 static inline struct request * get_request_wait(int n, kdev_t dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 172 {
 173         register struct request *req;
 174 
 175         cli();
 176         req = get_request(n, dev);
 177         sti();
 178         if (req)
 179                 return req;
 180         return __get_request_wait(n, dev);
 181 }
 182 
 183 /* RO fail safe mechanism */
 184 
 185 static long ro_bits[MAX_BLKDEV][8];
 186 
 187 int is_read_only(kdev_t dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 188 {
 189         int minor,major;
 190 
 191         major = MAJOR(dev);
 192         minor = MINOR(dev);
 193         if (major < 0 || major >= MAX_BLKDEV) return 0;
 194         return ro_bits[major][minor >> 5] & (1 << (minor & 31));
 195 }
 196 
 197 void set_device_ro(kdev_t dev,int flag)
     /* [previous][next][first][last][top][bottom][index][help] */
 198 {
 199         int minor,major;
 200 
 201         major = MAJOR(dev);
 202         minor = MINOR(dev);
 203         if (major < 0 || major >= MAX_BLKDEV) return;
 204         if (flag) ro_bits[major][minor >> 5] |= 1 << (minor & 31);
 205         else ro_bits[major][minor >> 5] &= ~(1 << (minor & 31));
 206 }
 207 
 208 static inline void drive_stat_acct(int cmd, unsigned long nr_sectors, short disk_index)
     /* [previous][next][first][last][top][bottom][index][help] */
 209 {
 210         kstat.dk_drive[disk_index]++;
 211         if (cmd == READ) {
 212                 kstat.dk_drive_rio[disk_index]++;
 213                 kstat.dk_drive_rblk[disk_index] += nr_sectors;
 214         }
 215         else if (cmd == WRITE) {
 216                 kstat.dk_drive_wio[disk_index]++;
 217                 kstat.dk_drive_wblk[disk_index] += nr_sectors;
 218         } else
 219                 printk("drive_stat_acct: cmd not R/W?\n");
 220 }
 221 
 222 /*
 223  * add-request adds a request to the linked list.
 224  * It disables interrupts so that it can muck with the
 225  * request-lists in peace.
 226  *
 227  * By this point, req->cmd is always either READ/WRITE, never READA/WRITEA,
 228  * which is important for drive_stat_acct() above.
 229  */
 230 
 231 struct semaphore request_lock = MUTEX;
 232 
 233 void add_request(struct blk_dev_struct * dev, struct request * req)
     /* [previous][next][first][last][top][bottom][index][help] */
 234 {
 235         struct request * tmp;
 236         short            disk_index;
 237 
 238         down (&request_lock);
 239         switch (MAJOR(req->rq_dev)) {
 240                 case SCSI_DISK_MAJOR:
 241                         disk_index = (MINOR(req->rq_dev) & 0x0070) >> 4;
 242                         if (disk_index < 4)
 243                                 drive_stat_acct(req->cmd, req->nr_sectors, disk_index);
 244                         break;
 245                 case IDE0_MAJOR:        /* same as HD_MAJOR */
 246                 case XT_DISK_MAJOR:
 247                         disk_index = (MINOR(req->rq_dev) & 0x0040) >> 6;
 248                         drive_stat_acct(req->cmd, req->nr_sectors, disk_index);
 249                         break;
 250                 case IDE1_MAJOR:
 251                         disk_index = ((MINOR(req->rq_dev) & 0x0040) >> 6) + 2;
 252                         drive_stat_acct(req->cmd, req->nr_sectors, disk_index);
 253                 default:
 254                         break;
 255         }
 256 
 257         req->next = NULL;
 258         cli();
 259         if (req->bh && req->bh->b_dev==req->bh->b_rdev)
 260                 mark_buffer_clean(req->bh);
 261         if (!(tmp = dev->current_request)) {
 262                 dev->current_request = req;
 263                 up (&request_lock);
 264                 (dev->request_fn)();
 265                 sti();
 266                 return;
 267         }
 268         for ( ; tmp->next ; tmp = tmp->next) {
 269                 if ((IN_ORDER(tmp,req) ||
 270                     !IN_ORDER(tmp,tmp->next)) &&
 271                     IN_ORDER(req,tmp->next))
 272                         break;
 273         }
 274         req->next = tmp->next;
 275         tmp->next = req;
 276 
 277         up (&request_lock);
 278 /* for SCSI devices, call request_fn unconditionally */
 279         if (scsi_major(MAJOR(req->rq_dev)) && MAJOR(req->rq_dev)!=MD_MAJOR)
 280                 (dev->request_fn)();
 281 
 282         sti();
 283 }
 284 
 285 static void make_request(int major,int rw, struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 286 {
 287         unsigned int sector, count;
 288         struct request * req;
 289         int rw_ahead, max_req;
 290 
 291         count = bh->b_size >> 9;
 292         sector = bh->b_blocknr * count;
 293         if (blk_size[major])
 294                 if (blk_size[major][MINOR(bh->b_dev)] < (sector + count)>>1) {
 295                         bh->b_state = 0;
 296                         printk("attempt to access beyond end of device\n");
 297                         printk("%s: rw=%d, want=%d, limit=%d\n", kdevname(bh->b_dev),
 298                          rw, (sector + count)>>1, blk_size[major][MINOR(bh->b_dev)]);
 299                         return;
 300                 }
 301         /* Uhhuh.. Nasty dead-lock possible here.. */
 302         if (buffer_locked(bh))
 303                 return;
 304         /* Maybe the above fixes it, and maybe it doesn't boot. Life is interesting */
 305         lock_buffer(bh);
 306 
 307         rw_ahead = 0;   /* normal case; gets changed below for READA/WRITEA */
 308         switch (rw) {
 309                 case READA:
 310                         rw_ahead = 1;
 311                         rw = READ;      /* drop into READ */
 312                 case READ:
 313                         if (buffer_uptodate(bh)) {
 314                                 unlock_buffer(bh); /* Hmmph! Already have it */
 315                                 return;
 316                         }
 317                         kstat.pgpgin++;
 318                         max_req = NR_REQUEST;   /* reads take precedence */
 319                         break;
 320                 case WRITEA:
 321                         rw_ahead = 1;
 322                         rw = WRITE;     /* drop into WRITE */
 323                 case WRITE:
 324                         if (!buffer_dirty(bh)) {
 325                                 unlock_buffer(bh); /* Hmmph! Nothing to write */
 326                                 return;
 327                         }
 328                         /* We don't allow the write-requests to fill up the
 329                          * queue completely:  we want some room for reads,
 330                          * as they take precedence. The last third of the
 331                          * requests are only for reads.
 332                          */
 333                         kstat.pgpgout++;
 334                         max_req = (NR_REQUEST * 2) / 3;
 335                         break;
 336                 default:
 337                         printk("make_request: bad block dev cmd, must be R/W/RA/WA\n");
 338                         unlock_buffer(bh);
 339                         return;
 340         }
 341 
 342 /* look for a free request. */
 343         cli();
 344         down (&request_lock);
 345 
 346 /* The scsi disk and cdrom drivers completely remove the request
 347  * from the queue when they start processing an entry.  For this reason
 348  * it is safe to continue to add links to the top entry for those devices.
 349  */
 350         if ((   major == IDE0_MAJOR     /* same as HD_MAJOR */
 351              || major == IDE1_MAJOR
 352              || major == MD_MAJOR
 353              || major == FLOPPY_MAJOR
 354              || major == SCSI_DISK_MAJOR
 355              || major == SCSI_CDROM_MAJOR
 356              || major == IDE2_MAJOR
 357              || major == IDE3_MAJOR)
 358             && (req = blk_dev[major].current_request))
 359         {
 360                 if (major != SCSI_DISK_MAJOR && major != SCSI_CDROM_MAJOR)
 361                         req = req->next;
 362 
 363                 while (req) {
 364                         if (req->rq_dev == bh->b_dev &&
 365                             !req->sem &&
 366                             req->cmd == rw &&
 367                             req->sector + req->nr_sectors == sector &&
 368                             req->nr_sectors < 244)
 369                         {
 370                                 req->bhtail->b_reqnext = bh;
 371                                 req->bhtail = bh;
 372                                 req->nr_sectors += count;
 373                                 mark_buffer_clean(bh);
 374                                 up (&request_lock);
 375                                 sti();
 376                                 return;
 377                         }
 378 
 379                         if (req->rq_dev == bh->b_dev &&
 380                             !req->sem &&
 381                             req->cmd == rw &&
 382                             req->sector - count == sector &&
 383                             req->nr_sectors < 244)
 384                         {
 385                                 req->nr_sectors += count;
 386                                 bh->b_reqnext = req->bh;
 387                                 req->buffer = bh->b_data;
 388                                 req->current_nr_sectors = count;
 389                                 req->sector = sector;
 390                                 mark_buffer_clean(bh);
 391                                 req->bh = bh;
 392                                 up (&request_lock);
 393                                 sti();
 394                                 return;
 395                         }    
 396 
 397                         req = req->next;
 398                 }
 399         }
 400 
 401         up (&request_lock);
 402         
 403 /* find an unused request. */
 404         req = get_request(max_req, bh->b_dev);
 405         sti();
 406 
 407 /* if no request available: if rw_ahead, forget it; otherwise try again blocking.. */
 408         if (!req) {
 409                 if (rw_ahead) {
 410                         unlock_buffer(bh);
 411                         return;
 412                 }
 413                 req = __get_request_wait(max_req, bh->b_dev);
 414         }
 415 
 416 /* fill up the request-info, and add it to the queue */
 417         req->cmd = rw;
 418         req->errors = 0;
 419         req->sector = sector;
 420         req->nr_sectors = count;
 421         req->current_nr_sectors = count;
 422         req->buffer = bh->b_data;
 423         req->sem = NULL;
 424         req->bh = bh;
 425         req->bhtail = bh;
 426         req->next = NULL;
 427         add_request(major+blk_dev,req);
 428 }
 429 
 430 #ifdef CONFIG_BLK_DEV_MD
 431 
 432 struct request *get_md_request (int max_req, kdev_t dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 433 {
 434   return (get_request_wait (max_req, dev));
 435 }
 436 
 437 #endif
 438 
 439 /*
 440  * Swap partitions are now read via brw_page.  ll_rw_page is an
 441  * asynchronous function now --- we must call wait_on_page afterwards
 442  * if synchronous IO is required.  
 443  */
 444 void ll_rw_page(int rw, kdev_t dev, unsigned long page, char * buffer)
     /* [previous][next][first][last][top][bottom][index][help] */
 445 {
 446         unsigned int major = MAJOR(dev);
 447         int block = page;
 448         
 449         if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
 450                 printk("Trying to read nonexistent block-device %s (%ld)\n",
 451                        kdevname(dev), page);
 452                 return;
 453         }
 454         switch (rw) {
 455                 case READ:
 456                         break;
 457                 case WRITE:
 458                         if (is_read_only(dev)) {
 459                                 printk("Can't page to read-only device %s\n",
 460                                         kdevname(dev));
 461                                 return;
 462                         }
 463                         break;
 464                 default:
 465                         panic("ll_rw_page: bad block dev cmd, must be R/W");
 466         }
 467         if (mem_map[MAP_NR(buffer)].locked)
 468                 panic ("ll_rw_page: page already locked");
 469         mem_map[MAP_NR(buffer)].locked = 1;
 470         brw_page(rw, (unsigned long) buffer, dev, &block, PAGE_SIZE, 0);
 471 }
 472 
 473 /* This function can be used to request a number of buffers from a block
 474    device. Currently the only restriction is that all buffers must belong to
 475    the same device */
 476 
 477 void ll_rw_block(int rw, int nr, struct buffer_head * bh[])
     /* [previous][next][first][last][top][bottom][index][help] */
 478 {
 479         unsigned int major;
 480         struct request plug;
 481         int correct_size;
 482         struct blk_dev_struct * dev;
 483         int i;
 484 
 485         /* Make sure that the first block contains something reasonable */
 486         while (!*bh) {
 487                 bh++;
 488                 if (--nr <= 0)
 489                         return;
 490         };
 491 
 492         dev = NULL;
 493         if ((major = MAJOR(bh[0]->b_dev)) < MAX_BLKDEV)
 494                 dev = blk_dev + major;
 495         if (!dev || !dev->request_fn) {
 496                 printk(
 497         "ll_rw_block: Trying to read nonexistent block-device %s (%ld)\n",
 498                 kdevname(bh[0]->b_dev), bh[0]->b_blocknr);
 499                 goto sorry;
 500         }
 501 
 502         /* Determine correct block size for this device.  */
 503         correct_size = BLOCK_SIZE;
 504         if (blksize_size[major]) {
 505                 i = blksize_size[major][MINOR(bh[0]->b_dev)];
 506                 if (i)
 507                         correct_size = i;
 508         }
 509 
 510         /* Verify requested block sizes.  */
 511         for (i = 0; i < nr; i++) {
 512                 if (bh[i] && bh[i]->b_size != correct_size) {
 513                         printk("ll_rw_block: device %s: "
 514                                "only %d-char blocks implemented (%lu)\n",
 515                                kdevname(bh[0]->b_dev),
 516                                correct_size, bh[i]->b_size);
 517                         goto sorry;
 518                 }
 519         }
 520 
 521         if ((rw == WRITE || rw == WRITEA) && is_read_only(bh[0]->b_dev)) {
 522                 printk("Can't write to read-only device %s\n",
 523                        kdevname(bh[0]->b_dev));
 524                 goto sorry;
 525         }
 526 
 527         /* If there are no pending requests for this device, then we insert
 528            a dummy request for that device.  This will prevent the request
 529            from starting until we have shoved all of the blocks into the
 530            queue, and then we let it rip.  */
 531 
 532         if (nr > 1)
 533                 plug_device(dev, &plug);
 534         for (i = 0; i < nr; i++) {
 535                 if (bh[i]) {
 536                         set_bit(BH_Req, &bh[i]->b_state);
 537 
 538                         /* Md needs this for error recovery */
 539                         bh[i]->b_rdev = bh[i]->b_dev;
 540 
 541                         make_request(major, rw, bh[i]);
 542                 }
 543         }
 544         unplug_device(dev);
 545         return;
 546 
 547       sorry:
 548         for (i = 0; i < nr; i++) {
 549                 if (bh[i]) {
 550                         clear_bit(BH_Dirty, &bh[i]->b_state);
 551                         clear_bit(BH_Uptodate, &bh[i]->b_state);
 552                 }
 553         }
 554         return;
 555 }
 556 
 557 void ll_rw_swap_file(int rw, kdev_t dev, unsigned int *b, int nb, char *buf)
     /* [previous][next][first][last][top][bottom][index][help] */
 558 {
 559         int i, j;
 560         int buffersize;
 561         struct request * req[8];
 562         unsigned int major = MAJOR(dev);
 563         struct semaphore sem = MUTEX_LOCKED;
 564 
 565         if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
 566                 printk("ll_rw_swap_file: trying to swap nonexistent block-device\n");
 567                 return;
 568         }
 569         switch (rw) {
 570                 case READ:
 571                         break;
 572                 case WRITE:
 573                         if (is_read_only(dev)) {
 574                                 printk("Can't swap to read-only device %s\n",
 575                                         kdevname(dev));
 576                                 return;
 577                         }
 578                         break;
 579                 default:
 580                         panic("ll_rw_swap: bad block dev cmd, must be R/W");
 581         }
 582         buffersize = PAGE_SIZE / nb;
 583 
 584         for (j=0, i=0; i<nb;)
 585         {
 586                 for (; j < 8 && i < nb; j++, i++, buf += buffersize)
 587                 {
 588                         if (j == 0) {
 589                                 req[j] = get_request_wait(NR_REQUEST, dev);
 590                         } else {
 591                                 cli();
 592                                 req[j] = get_request(NR_REQUEST, dev);
 593                                 sti();
 594                                 if (req[j] == NULL)
 595                                         break;
 596                         }
 597                         req[j]->cmd = rw;
 598                         req[j]->errors = 0;
 599                         req[j]->sector = (b[i] * buffersize) >> 9;
 600                         req[j]->nr_sectors = buffersize >> 9;
 601                         req[j]->current_nr_sectors = buffersize >> 9;
 602                         req[j]->buffer = buf;
 603                         req[j]->sem = &sem;
 604                         req[j]->bh = NULL;
 605                         req[j]->next = NULL;
 606                         add_request(major+blk_dev,req[j]);
 607                 }
 608                 while (j > 0) {
 609                         j--;
 610                         down(&sem);
 611                 }
 612         }
 613 }
 614 
 615 int blk_dev_init(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 616 {
 617         struct request * req;
 618         struct blk_dev_struct *dev;
 619 
 620         for (dev = blk_dev + MAX_BLKDEV; dev-- != blk_dev;) {
 621                 dev->request_fn      = NULL;
 622                 dev->current_request = NULL;
 623         }
 624 
 625         req = all_requests + NR_REQUEST;
 626         while (--req >= all_requests) {
 627                 req->rq_status = RQ_INACTIVE;
 628                 req->next = NULL;
 629         }
 630         memset(ro_bits,0,sizeof(ro_bits));
 631 #ifdef CONFIG_BLK_DEV_RAM
 632         rd_init();
 633 #endif
 634 #ifdef CONFIG_BLK_DEV_LOOP
 635         loop_init();
 636 #endif
 637 #ifdef CONFIG_BLK_DEV_IDE
 638         ide_init();             /* this MUST preceed hd_init */
 639 #endif
 640 #ifdef CONFIG_BLK_DEV_HD
 641         hd_init();
 642 #endif
 643 #ifdef CONFIG_BLK_DEV_XD
 644         xd_init();
 645 #endif
 646 #ifdef CONFIG_BLK_DEV_FD
 647         floppy_init();
 648 #else
 649         outb_p(0xc, 0x3f2);
 650 #endif
 651 #ifdef CONFIG_CDI_INIT
 652         cdi_init();
 653 #endif CONFIG_CDI_INIT
 654 #ifdef CONFIG_CDU31A
 655         cdu31a_init();
 656 #endif CONFIG_CDU31A
 657 #ifdef CONFIG_MCD
 658         mcd_init();
 659 #endif CONFIG_MCD
 660 #ifdef CONFIG_MCDX
 661         mcdx_init();
 662 #endif CONFIG_MCDX
 663 #ifdef CONFIG_SBPCD
 664         sbpcd_init();
 665 #endif CONFIG_SBPCD
 666 #ifdef CONFIG_AZTCD
 667         aztcd_init();
 668 #endif CONFIG_AZTCD
 669 #ifdef CONFIG_CDU535
 670         sony535_init();
 671 #endif CONFIG_CDU535
 672 #ifdef CONFIG_GSCD
 673         gscd_init();
 674 #endif CONFIG_GSCD
 675 #ifdef CONFIG_CM206
 676         cm206_init();
 677 #endif
 678 #ifdef CONFIG_OPTCD
 679         optcd_init();
 680 #endif CONFIG_OPTCD
 681 #ifdef CONFIG_SJCD
 682         sjcd_init();
 683 #endif CONFIG_SJCD
 684 #ifdef CONFIG_BLK_DEV_MD
 685         md_init();
 686 #endif CONFIG_BLK_DEV_MD
 687         return 0;
 688 }

/* [previous][next][first][last][top][bottom][index][help] */