root/drivers/block/ll_rw_blk.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. unplug_device
  2. plug_device
  3. get_request
  4. __get_request_wait
  5. get_request_wait
  6. is_read_only
  7. set_device_ro
  8. drive_stat_acct
  9. add_request
  10. make_request
  11. get_md_request
  12. ll_rw_page
  13. ll_rw_block
  14. ll_rw_swap_file
  15. blk_dev_init

   1 /*
   2  *  linux/drivers/block/ll_rw_blk.c
   3  *
   4  * Copyright (C) 1991, 1992 Linus Torvalds
   5  * Copyright (C) 1994,      Karl Keyte: Added support for disk statistics
   6  */
   7 
   8 /*
   9  * This handles all read/write requests to block devices
  10  */
  11 #include <linux/sched.h>
  12 #include <linux/kernel.h>
  13 #include <linux/kernel_stat.h>
  14 #include <linux/errno.h>
  15 #include <linux/string.h>
  16 #include <linux/config.h>
  17 #include <linux/locks.h>
  18 #include <linux/mm.h>
  19 
  20 #include <asm/system.h>
  21 #include <asm/io.h>
  22 #include <linux/blk.h>
  23 
  24 /*
  25  * The request-struct contains all necessary data
  26  * to load a nr of sectors into memory
  27  */
  28 static struct request all_requests[NR_REQUEST];
  29 
  30 /*
  31  * The "disk" task queue is used to start the actual requests
  32  * after a plug
  33  */
  34 DECLARE_TASK_QUEUE(tq_disk);
  35 
  36 /*
  37  * used to wait on when there are no free requests
  38  */
  39 struct wait_queue * wait_for_request = NULL;
  40 
  41 /* This specifies how many sectors to read ahead on the disk.  */
  42 
  43 int read_ahead[MAX_BLKDEV] = {0, };
  44 
  45 /* blk_dev_struct is:
  46  *      *request_fn
  47  *      *current_request
  48  */
  49 struct blk_dev_struct blk_dev[MAX_BLKDEV]; /* initialized by blk_dev_init() */
  50 
  51 /*
  52  * blk_size contains the size of all block-devices in units of 1024 byte
  53  * sectors:
  54  *
  55  * blk_size[MAJOR][MINOR]
  56  *
  57  * if (!blk_size[MAJOR]) then no minor size checking is done.
  58  */
  59 int * blk_size[MAX_BLKDEV] = { NULL, NULL, };
  60 
  61 /*
  62  * blksize_size contains the size of all block-devices:
  63  *
  64  * blksize_size[MAJOR][MINOR]
  65  *
  66  * if (!blksize_size[MAJOR]) then 1024 bytes is assumed.
  67  */
  68 int * blksize_size[MAX_BLKDEV] = { NULL, NULL, };
  69 
  70 /*
  71  * hardsect_size contains the size of the hardware sector of a device.
  72  *
  73  * hardsect_size[MAJOR][MINOR]
  74  *
  75  * if (!hardsect_size[MAJOR])
  76  *              then 512 bytes is assumed.
  77  * else
  78  *              sector_size is hardsect_size[MAJOR][MINOR]
  79  * This is currently set by some scsi device and read by the msdos fs driver
  80  * This might be a some uses later.
  81  */
  82 int * hardsect_size[MAX_BLKDEV] = { NULL, NULL, };
  83 
  84 /*
  85  * remove the plug and let it rip..
  86  */
  87 static void unplug_device(void * data)
     /* [previous][next][first][last][top][bottom][index][help] */
  88 {
  89         struct blk_dev_struct * dev = (struct blk_dev_struct *) data;
  90         unsigned long flags;
  91 
  92         save_flags(flags);
  93         cli();
  94         dev->current_request = dev->plug.next;
  95         dev->plug.next = NULL;
  96         (dev->request_fn)();
  97         restore_flags(flags);
  98 }
  99 
 100 /*
 101  * "plug" the device if there are no outstanding requests: this will
 102  * force the transfer to start only after we have put all the requests
 103  * on the list.
 104  *
 105  * This is called with interrupts off and no requests on the queue.
 106  */
 107 static inline void plug_device(struct blk_dev_struct * dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 108 {
 109         dev->current_request = &dev->plug;
 110         queue_task_irq_off(&dev->plug_tq, &tq_disk);
 111 }
 112 
 113 /*
 114  * look for a free request in the first N entries.
 115  * NOTE: interrupts must be disabled on the way in, and will still
 116  *       be disabled on the way out.
 117  */
 118 static inline struct request * get_request(int n, kdev_t dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 119 {
 120         static struct request *prev_found = NULL, *prev_limit = NULL;
 121         register struct request *req, *limit;
 122 
 123         if (n <= 0)
 124                 panic("get_request(%d): impossible!\n", n);
 125 
 126         limit = all_requests + n;
 127         if (limit != prev_limit) {
 128                 prev_limit = limit;
 129                 prev_found = all_requests;
 130         }
 131         req = prev_found;
 132         for (;;) {
 133                 req = ((req > all_requests) ? req : limit) - 1;
 134                 if (req->rq_status == RQ_INACTIVE)
 135                         break;
 136                 if (req == prev_found)
 137                         return NULL;
 138         }
 139         prev_found = req;
 140         req->rq_status = RQ_ACTIVE;
 141         req->rq_dev = dev;
 142         return req;
 143 }
 144 
 145 /*
 146  * wait until a free request in the first N entries is available.
 147  */
 148 static struct request * __get_request_wait(int n, kdev_t dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 149 {
 150         register struct request *req;
 151         struct wait_queue wait = { current, NULL };
 152 
 153         add_wait_queue(&wait_for_request, &wait);
 154         for (;;) {
 155                 current->state = TASK_UNINTERRUPTIBLE;
 156                 cli();
 157                 req = get_request(n, dev);
 158                 sti();
 159                 if (req)
 160                         break;
 161                 run_task_queue(&tq_disk);
 162                 schedule();
 163         }
 164         remove_wait_queue(&wait_for_request, &wait);
 165         current->state = TASK_RUNNING;
 166         return req;
 167 }
 168 
 169 static inline struct request * get_request_wait(int n, kdev_t dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 170 {
 171         register struct request *req;
 172 
 173         cli();
 174         req = get_request(n, dev);
 175         sti();
 176         if (req)
 177                 return req;
 178         return __get_request_wait(n, dev);
 179 }
 180 
 181 /* RO fail safe mechanism */
 182 
 183 static long ro_bits[MAX_BLKDEV][8];
 184 
 185 int is_read_only(kdev_t dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 186 {
 187         int minor,major;
 188 
 189         major = MAJOR(dev);
 190         minor = MINOR(dev);
 191         if (major < 0 || major >= MAX_BLKDEV) return 0;
 192         return ro_bits[major][minor >> 5] & (1 << (minor & 31));
 193 }
 194 
 195 void set_device_ro(kdev_t dev,int flag)
     /* [previous][next][first][last][top][bottom][index][help] */
 196 {
 197         int minor,major;
 198 
 199         major = MAJOR(dev);
 200         minor = MINOR(dev);
 201         if (major < 0 || major >= MAX_BLKDEV) return;
 202         if (flag) ro_bits[major][minor >> 5] |= 1 << (minor & 31);
 203         else ro_bits[major][minor >> 5] &= ~(1 << (minor & 31));
 204 }
 205 
 206 static inline void drive_stat_acct(int cmd, unsigned long nr_sectors, short disk_index)
     /* [previous][next][first][last][top][bottom][index][help] */
 207 {
 208         kstat.dk_drive[disk_index]++;
 209         if (cmd == READ) {
 210                 kstat.dk_drive_rio[disk_index]++;
 211                 kstat.dk_drive_rblk[disk_index] += nr_sectors;
 212         }
 213         else if (cmd == WRITE) {
 214                 kstat.dk_drive_wio[disk_index]++;
 215                 kstat.dk_drive_wblk[disk_index] += nr_sectors;
 216         } else
 217                 printk("drive_stat_acct: cmd not R/W?\n");
 218 }
 219 
 220 /*
 221  * add-request adds a request to the linked list.
 222  * It disables interrupts so that it can muck with the
 223  * request-lists in peace.
 224  *
 225  * By this point, req->cmd is always either READ/WRITE, never READA/WRITEA,
 226  * which is important for drive_stat_acct() above.
 227  */
 228 
 229 struct semaphore request_lock = MUTEX;
 230 
 231 void add_request(struct blk_dev_struct * dev, struct request * req)
     /* [previous][next][first][last][top][bottom][index][help] */
 232 {
 233         struct request * tmp;
 234         short            disk_index;
 235 
 236         down (&request_lock);
 237         switch (MAJOR(req->rq_dev)) {
 238                 case SCSI_DISK_MAJOR:
 239                         disk_index = (MINOR(req->rq_dev) & 0x0070) >> 4;
 240                         if (disk_index < 4)
 241                                 drive_stat_acct(req->cmd, req->nr_sectors, disk_index);
 242                         break;
 243                 case IDE0_MAJOR:        /* same as HD_MAJOR */
 244                 case XT_DISK_MAJOR:
 245                         disk_index = (MINOR(req->rq_dev) & 0x0040) >> 6;
 246                         drive_stat_acct(req->cmd, req->nr_sectors, disk_index);
 247                         break;
 248                 case IDE1_MAJOR:
 249                         disk_index = ((MINOR(req->rq_dev) & 0x0040) >> 6) + 2;
 250                         drive_stat_acct(req->cmd, req->nr_sectors, disk_index);
 251                 default:
 252                         break;
 253         }
 254 
 255         req->next = NULL;
 256         cli();
 257         if (req->bh && req->bh->b_dev==req->bh->b_rdev)
 258                 mark_buffer_clean(req->bh);
 259         if (!(tmp = dev->current_request)) {
 260                 dev->current_request = req;
 261                 up (&request_lock);
 262                 (dev->request_fn)();
 263                 sti();
 264                 return;
 265         }
 266         for ( ; tmp->next ; tmp = tmp->next) {
 267                 if ((IN_ORDER(tmp,req) ||
 268                     !IN_ORDER(tmp,tmp->next)) &&
 269                     IN_ORDER(req,tmp->next))
 270                         break;
 271         }
 272         req->next = tmp->next;
 273         tmp->next = req;
 274 
 275         up (&request_lock);
 276 /* for SCSI devices, call request_fn unconditionally */
 277         if (scsi_major(MAJOR(req->rq_dev)) && MAJOR(req->rq_dev)!=MD_MAJOR)
 278                 (dev->request_fn)();
 279 
 280         sti();
 281 }
 282 
 283 static void make_request(int major,int rw, struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 284 {
 285         unsigned int sector, count;
 286         struct request * req;
 287         int rw_ahead, max_req;
 288 
 289         count = bh->b_size >> 9;
 290         sector = bh->b_blocknr * count;
 291         if (blk_size[major])
 292                 if (blk_size[major][MINOR(bh->b_dev)] < (sector + count)>>1) {
 293                         bh->b_state = 0;
 294                         printk("attempt to access beyond end of device\n");
 295                         printk("%s: rw=%d, want=%d, limit=%d\n", kdevname(bh->b_dev),
 296                          rw, (sector + count)>>1, blk_size[major][MINOR(bh->b_dev)]);
 297                         return;
 298                 }
 299         /* Uhhuh.. Nasty dead-lock possible here.. */
 300         if (buffer_locked(bh))
 301                 return;
 302         /* Maybe the above fixes it, and maybe it doesn't boot. Life is interesting */
 303         lock_buffer(bh);
 304 
 305         rw_ahead = 0;   /* normal case; gets changed below for READA/WRITEA */
 306         switch (rw) {
 307                 case READA:
 308                         rw_ahead = 1;
 309                         rw = READ;      /* drop into READ */
 310                 case READ:
 311                         if (buffer_uptodate(bh)) {
 312                                 unlock_buffer(bh); /* Hmmph! Already have it */
 313                                 return;
 314                         }
 315                         kstat.pgpgin++;
 316                         max_req = (major == MD_MAJOR) ? NR_REQUEST/2 : NR_REQUEST;      /* reads take precedence */
 317                         break;
 318                 case WRITEA:
 319                         rw_ahead = 1;
 320                         rw = WRITE;     /* drop into WRITE */
 321                 case WRITE:
 322                         if (!buffer_dirty(bh)) {
 323                                 unlock_buffer(bh); /* Hmmph! Nothing to write */
 324                                 return;
 325                         }
 326                         /* We don't allow the write-requests to fill up the
 327                          * queue completely:  we want some room for reads,
 328                          * as they take precedence. The last third of the
 329                          * requests are only for reads.
 330                          */
 331                         kstat.pgpgout++;
 332                         max_req =  (major == MD_MAJOR) ? NR_REQUEST/3 : (NR_REQUEST * 2) / 3;
 333                         break;
 334                 default:
 335                         printk("make_request: bad block dev cmd, must be R/W/RA/WA\n");
 336                         unlock_buffer(bh);
 337                         return;
 338         }
 339 
 340 /* look for a free request. */
 341         down (&request_lock);
 342 
 343         /*
 344          * Try to coalesce the new request with old requests
 345          */
 346         cli();
 347         req = blk_dev[major].current_request;
 348         if (!req) {
 349                 plug_device(blk_dev + major);
 350         } else switch (major) {
 351              case IDE0_MAJOR:   /* same as HD_MAJOR */
 352              case IDE1_MAJOR:
 353              case FLOPPY_MAJOR:
 354              case IDE2_MAJOR:
 355              case IDE3_MAJOR:
 356                 /*
 357                  * The scsi disk and cdrom drivers completely remove the request
 358                  * from the queue when they start processing an entry.  For this
 359                  * reason it is safe to continue to add links to the top entry for
 360                  * those devices.
 361                  *
 362                  * All other drivers need to jump over the first entry, as that
 363                  * entry may be busy being processed and we thus can't change it.
 364                  */
 365                 req = req->next;
 366                 if (!req)
 367                         break;
 368                 /* fall through */
 369 
 370              case SCSI_DISK_MAJOR:
 371              case SCSI_CDROM_MAJOR:
 372              case MD_MAJOR:
 373 
 374                 do {
 375                         if (req->sem)
 376                                 continue;
 377                         if (req->cmd != rw)
 378                                 continue;
 379                         if (req->nr_sectors >= 244)
 380                                 continue;
 381                         if (req->rq_dev != bh->b_dev)
 382                                 continue;
 383                         /* Can we add it to the end of this request? */
 384                         if (req->sector + req->nr_sectors == sector) {
 385                                 req->bhtail->b_reqnext = bh;
 386                                 req->bhtail = bh;
 387                         /* or to the beginning? */
 388                         } else if (req->sector - count == sector) {
 389                                 bh->b_reqnext = req->bh;
 390                                 req->bh = bh;
 391                                 req->buffer = bh->b_data;
 392                                 req->current_nr_sectors = count;
 393                                 req->sector = sector;
 394                         } else
 395                                 continue;
 396 
 397                         req->nr_sectors += count;
 398                         mark_buffer_clean(bh);
 399                         up (&request_lock);
 400                         sti();
 401                         return;
 402                 } while ((req = req->next) != NULL);
 403         }
 404 
 405         up (&request_lock);
 406         
 407 /* find an unused request. */
 408         req = get_request(max_req, bh->b_dev);
 409         sti();
 410 
 411 /* if no request available: if rw_ahead, forget it; otherwise try again blocking.. */
 412         if (!req) {
 413                 if (rw_ahead) {
 414                         unlock_buffer(bh);
 415                         return;
 416                 }
 417                 req = __get_request_wait(max_req, bh->b_dev);
 418         }
 419 
 420 /* fill up the request-info, and add it to the queue */
 421         req->cmd = rw;
 422         req->errors = 0;
 423         req->sector = sector;
 424         req->nr_sectors = count;
 425         req->current_nr_sectors = count;
 426         req->buffer = bh->b_data;
 427         req->sem = NULL;
 428         req->bh = bh;
 429         req->bhtail = bh;
 430         req->next = NULL;
 431         add_request(major+blk_dev,req);
 432 }
 433 
 434 #ifdef CONFIG_BLK_DEV_MD
 435 
 436 struct request *get_md_request (int max_req, kdev_t dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 437 {
 438   return (get_request_wait (max_req, dev));
 439 }
 440 
 441 #endif
 442 
 443 /*
 444  * Swap partitions are now read via brw_page.  ll_rw_page is an
 445  * asynchronous function now --- we must call wait_on_page afterwards
 446  * if synchronous IO is required.  
 447  */
 448 void ll_rw_page(int rw, kdev_t dev, unsigned long page, char * buffer)
     /* [previous][next][first][last][top][bottom][index][help] */
 449 {
 450         unsigned int major = MAJOR(dev);
 451         int block = page;
 452         
 453         if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
 454                 printk("Trying to read nonexistent block-device %s (%ld)\n",
 455                        kdevname(dev), page);
 456                 return;
 457         }
 458         switch (rw) {
 459                 case READ:
 460                         break;
 461                 case WRITE:
 462                         if (is_read_only(dev)) {
 463                                 printk("Can't page to read-only device %s\n",
 464                                         kdevname(dev));
 465                                 return;
 466                         }
 467                         break;
 468                 default:
 469                         panic("ll_rw_page: bad block dev cmd, must be R/W");
 470         }
 471         if (set_bit(PG_locked, &mem_map[MAP_NR(buffer)].flags))
 472                 panic ("ll_rw_page: page already locked");
 473         brw_page(rw, (unsigned long) buffer, dev, &block, PAGE_SIZE, 0);
 474 }
 475 
 476 /* This function can be used to request a number of buffers from a block
 477    device. Currently the only restriction is that all buffers must belong to
 478    the same device */
 479 
 480 void ll_rw_block(int rw, int nr, struct buffer_head * bh[])
     /* [previous][next][first][last][top][bottom][index][help] */
 481 {
 482         unsigned int major;
 483         int correct_size;
 484         struct blk_dev_struct * dev;
 485         int i;
 486 
 487         /* Make sure that the first block contains something reasonable */
 488         while (!*bh) {
 489                 bh++;
 490                 if (--nr <= 0)
 491                         return;
 492         };
 493 
 494         dev = NULL;
 495         if ((major = MAJOR(bh[0]->b_dev)) < MAX_BLKDEV)
 496                 dev = blk_dev + major;
 497         if (!dev || !dev->request_fn) {
 498                 printk(
 499         "ll_rw_block: Trying to read nonexistent block-device %s (%ld)\n",
 500                 kdevname(bh[0]->b_dev), bh[0]->b_blocknr);
 501                 goto sorry;
 502         }
 503 
 504         /* Determine correct block size for this device.  */
 505         correct_size = BLOCK_SIZE;
 506         if (blksize_size[major]) {
 507                 i = blksize_size[major][MINOR(bh[0]->b_dev)];
 508                 if (i)
 509                         correct_size = i;
 510         }
 511 
 512         /* Verify requested block sizes.  */
 513         for (i = 0; i < nr; i++) {
 514                 if (bh[i] && bh[i]->b_size != correct_size) {
 515                         printk("ll_rw_block: device %s: "
 516                                "only %d-char blocks implemented (%lu)\n",
 517                                kdevname(bh[0]->b_dev),
 518                                correct_size, bh[i]->b_size);
 519                         goto sorry;
 520                 }
 521         }
 522 
 523         if ((rw == WRITE || rw == WRITEA) && is_read_only(bh[0]->b_dev)) {
 524                 printk("Can't write to read-only device %s\n",
 525                        kdevname(bh[0]->b_dev));
 526                 goto sorry;
 527         }
 528 
 529         for (i = 0; i < nr; i++) {
 530                 if (bh[i]) {
 531                         set_bit(BH_Req, &bh[i]->b_state);
 532 
 533                         /* Md needs this for error recovery */
 534                         bh[i]->b_rdev = bh[i]->b_dev;
 535 
 536                         make_request(major, rw, bh[i]);
 537                 }
 538         }
 539         return;
 540 
 541       sorry:
 542         for (i = 0; i < nr; i++) {
 543                 if (bh[i]) {
 544                         clear_bit(BH_Dirty, &bh[i]->b_state);
 545                         clear_bit(BH_Uptodate, &bh[i]->b_state);
 546                 }
 547         }
 548         return;
 549 }
 550 
 551 void ll_rw_swap_file(int rw, kdev_t dev, unsigned int *b, int nb, char *buf)
     /* [previous][next][first][last][top][bottom][index][help] */
 552 {
 553         int i, j;
 554         int buffersize;
 555         struct request * req[8];
 556         unsigned int major = MAJOR(dev);
 557         struct semaphore sem = MUTEX_LOCKED;
 558 
 559         if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
 560                 printk("ll_rw_swap_file: trying to swap nonexistent block-device\n");
 561                 return;
 562         }
 563         switch (rw) {
 564                 case READ:
 565                         break;
 566                 case WRITE:
 567                         if (is_read_only(dev)) {
 568                                 printk("Can't swap to read-only device %s\n",
 569                                         kdevname(dev));
 570                                 return;
 571                         }
 572                         break;
 573                 default:
 574                         panic("ll_rw_swap: bad block dev cmd, must be R/W");
 575         }
 576         buffersize = PAGE_SIZE / nb;
 577 
 578         for (j=0, i=0; i<nb;)
 579         {
 580                 for (; j < 8 && i < nb; j++, i++, buf += buffersize)
 581                 {
 582                         if (j == 0) {
 583                                 req[j] = get_request_wait(NR_REQUEST, dev);
 584                         } else {
 585                                 cli();
 586                                 req[j] = get_request(NR_REQUEST, dev);
 587                                 sti();
 588                                 if (req[j] == NULL)
 589                                         break;
 590                         }
 591                         req[j]->cmd = rw;
 592                         req[j]->errors = 0;
 593                         req[j]->sector = (b[i] * buffersize) >> 9;
 594                         req[j]->nr_sectors = buffersize >> 9;
 595                         req[j]->current_nr_sectors = buffersize >> 9;
 596                         req[j]->buffer = buf;
 597                         req[j]->sem = &sem;
 598                         req[j]->bh = NULL;
 599                         req[j]->next = NULL;
 600                         add_request(major+blk_dev,req[j]);
 601                 }
 602                 run_task_queue(&tq_disk);
 603                 while (j > 0) {
 604                         j--;
 605                         down(&sem);
 606                 }
 607         }
 608 }
 609 
 610 int blk_dev_init(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 611 {
 612         struct request * req;
 613         struct blk_dev_struct *dev;
 614 
 615         for (dev = blk_dev + MAX_BLKDEV; dev-- != blk_dev;) {
 616                 dev->request_fn      = NULL;
 617                 dev->current_request = NULL;
 618                 dev->plug.rq_status  = RQ_INACTIVE;
 619                 dev->plug.cmd        = -1;
 620                 dev->plug.next       = NULL;
 621                 dev->plug_tq.routine = &unplug_device;
 622                 dev->plug_tq.data    = dev;
 623         }
 624 
 625         req = all_requests + NR_REQUEST;
 626         while (--req >= all_requests) {
 627                 req->rq_status = RQ_INACTIVE;
 628                 req->next = NULL;
 629         }
 630         memset(ro_bits,0,sizeof(ro_bits));
 631 #ifdef CONFIG_BLK_DEV_RAM
 632         rd_init();
 633 #endif
 634 #ifdef CONFIG_BLK_DEV_LOOP
 635         loop_init();
 636 #endif
 637 #ifdef CONFIG_BLK_DEV_IDE
 638         ide_init();             /* this MUST precede hd_init */
 639 #endif
 640 #ifdef CONFIG_BLK_DEV_HD
 641         hd_init();
 642 #endif
 643 #ifdef CONFIG_BLK_DEV_XD
 644         xd_init();
 645 #endif
 646 #ifdef CONFIG_BLK_DEV_FD
 647         floppy_init();
 648 #else
 649         outb_p(0xc, 0x3f2);
 650 #endif
 651 #ifdef CONFIG_CDI_INIT
 652         cdi_init();
 653 #endif CONFIG_CDI_INIT
 654 #ifdef CONFIG_CDU31A
 655         cdu31a_init();
 656 #endif CONFIG_CDU31A
 657 #ifdef CONFIG_MCD
 658         mcd_init();
 659 #endif CONFIG_MCD
 660 #ifdef CONFIG_MCDX
 661         mcdx_init();
 662 #endif CONFIG_MCDX
 663 #ifdef CONFIG_SBPCD
 664         sbpcd_init();
 665 #endif CONFIG_SBPCD
 666 #ifdef CONFIG_AZTCD
 667         aztcd_init();
 668 #endif CONFIG_AZTCD
 669 #ifdef CONFIG_CDU535
 670         sony535_init();
 671 #endif CONFIG_CDU535
 672 #ifdef CONFIG_GSCD
 673         gscd_init();
 674 #endif CONFIG_GSCD
 675 #ifdef CONFIG_CM206
 676         cm206_init();
 677 #endif
 678 #ifdef CONFIG_OPTCD
 679         optcd_init();
 680 #endif CONFIG_OPTCD
 681 #ifdef CONFIG_SJCD
 682         sjcd_init();
 683 #endif CONFIG_SJCD
 684 #ifdef CONFIG_BLK_DEV_MD
 685         md_init();
 686 #endif CONFIG_BLK_DEV_MD
 687         return 0;
 688 }

/* [previous][next][first][last][top][bottom][index][help] */