root/drivers/block/ll_rw_blk.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. unplug_device
  2. plug_device
  3. get_request
  4. __get_request_wait
  5. get_request_wait
  6. is_read_only
  7. set_device_ro
  8. drive_stat_acct
  9. add_request
  10. make_request
  11. get_md_request
  12. ll_rw_page
  13. ll_rw_block
  14. ll_rw_swap_file
  15. blk_dev_init

   1 /*
   2  *  linux/drivers/block/ll_rw_blk.c
   3  *
   4  * Copyright (C) 1991, 1992 Linus Torvalds
   5  * Copyright (C) 1994,      Karl Keyte: Added support for disk statistics
   6  */
   7 
   8 /*
   9  * This handles all read/write requests to block devices
  10  */
  11 #include <linux/sched.h>
  12 #include <linux/kernel.h>
  13 #include <linux/kernel_stat.h>
  14 #include <linux/errno.h>
  15 #include <linux/string.h>
  16 #include <linux/config.h>
  17 #include <linux/locks.h>
  18 #include <linux/mm.h>
  19 
  20 #include <asm/system.h>
  21 #include <asm/io.h>
  22 #include <linux/blk.h>
  23 
  24 /*
  25  * The request-struct contains all necessary data
  26  * to load a nr of sectors into memory
  27  */
  28 static struct request all_requests[NR_REQUEST];
  29 
  30 /*
  31  * The "disk" task queue is used to start the actual requests
  32  * after a plug
  33  */
  34 DECLARE_TASK_QUEUE(tq_disk);
  35 
  36 /*
  37  * used to wait on when there are no free requests
  38  */
  39 struct wait_queue * wait_for_request = NULL;
  40 
  41 /* This specifies how many sectors to read ahead on the disk.  */
  42 
  43 int read_ahead[MAX_BLKDEV] = {0, };
  44 
  45 /* blk_dev_struct is:
  46  *      *request_fn
  47  *      *current_request
  48  */
  49 struct blk_dev_struct blk_dev[MAX_BLKDEV]; /* initialized by blk_dev_init() */
  50 
  51 /*
  52  * blk_size contains the size of all block-devices in units of 1024 byte
  53  * sectors:
  54  *
  55  * blk_size[MAJOR][MINOR]
  56  *
  57  * if (!blk_size[MAJOR]) then no minor size checking is done.
  58  */
  59 int * blk_size[MAX_BLKDEV] = { NULL, NULL, };
  60 
  61 /*
  62  * blksize_size contains the size of all block-devices:
  63  *
  64  * blksize_size[MAJOR][MINOR]
  65  *
  66  * if (!blksize_size[MAJOR]) then 1024 bytes is assumed.
  67  */
  68 int * blksize_size[MAX_BLKDEV] = { NULL, NULL, };
  69 
  70 /*
  71  * hardsect_size contains the size of the hardware sector of a device.
  72  *
  73  * hardsect_size[MAJOR][MINOR]
  74  *
  75  * if (!hardsect_size[MAJOR])
  76  *              then 512 bytes is assumed.
  77  * else
  78  *              sector_size is hardsect_size[MAJOR][MINOR]
  79  * This is currently set by some scsi device and read by the msdos fs driver
  80  * This might be a some uses later.
  81  */
  82 int * hardsect_size[MAX_BLKDEV] = { NULL, NULL, };
  83 
  84 /*
  85  * remove the plug and let it rip..
  86  */
  87 static void unplug_device(void * data)
     /* [previous][next][first][last][top][bottom][index][help] */
  88 {
  89         struct blk_dev_struct * dev = (struct blk_dev_struct *) data;
  90         unsigned long flags;
  91 
  92         save_flags(flags);
  93         cli();
  94         dev->current_request = dev->plug.next;
  95         dev->plug.next = NULL;
  96         (dev->request_fn)();
  97         restore_flags(flags);
  98 }
  99 
 100 /*
 101  * "plug" the device if there are no outstanding requests: this will
 102  * force the transfer to start only after we have put all the requests
 103  * on the list.
 104  *
 105  * This is called with interrupts off and no requests on the queue.
 106  */
 107 static inline void plug_device(struct blk_dev_struct * dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 108 {
 109         dev->current_request = &dev->plug;
 110         queue_task_irq_off(&dev->plug_tq, &tq_disk);
 111 }
 112 
 113 /*
 114  * look for a free request in the first N entries.
 115  * NOTE: interrupts must be disabled on the way in, and will still
 116  *       be disabled on the way out.
 117  */
 118 static inline struct request * get_request(int n, kdev_t dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 119 {
 120         static struct request *prev_found = NULL, *prev_limit = NULL;
 121         register struct request *req, *limit;
 122 
 123         if (n <= 0)
 124                 panic("get_request(%d): impossible!\n", n);
 125 
 126         limit = all_requests + n;
 127         if (limit != prev_limit) {
 128                 prev_limit = limit;
 129                 prev_found = all_requests;
 130         }
 131         req = prev_found;
 132         for (;;) {
 133                 req = ((req > all_requests) ? req : limit) - 1;
 134                 if (req->rq_status == RQ_INACTIVE)
 135                         break;
 136                 if (req == prev_found)
 137                         return NULL;
 138         }
 139         prev_found = req;
 140         req->rq_status = RQ_ACTIVE;
 141         req->rq_dev = dev;
 142         return req;
 143 }
 144 
 145 /*
 146  * wait until a free request in the first N entries is available.
 147  */
 148 static struct request * __get_request_wait(int n, kdev_t dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 149 {
 150         register struct request *req;
 151         struct wait_queue wait = { current, NULL };
 152 
 153         add_wait_queue(&wait_for_request, &wait);
 154         for (;;) {
 155                 current->state = TASK_UNINTERRUPTIBLE;
 156                 cli();
 157                 req = get_request(n, dev);
 158                 sti();
 159                 if (req)
 160                         break;
 161                 run_task_queue(&tq_disk);
 162                 schedule();
 163         }
 164         remove_wait_queue(&wait_for_request, &wait);
 165         current->state = TASK_RUNNING;
 166         return req;
 167 }
 168 
 169 static inline struct request * get_request_wait(int n, kdev_t dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 170 {
 171         register struct request *req;
 172 
 173         cli();
 174         req = get_request(n, dev);
 175         sti();
 176         if (req)
 177                 return req;
 178         return __get_request_wait(n, dev);
 179 }
 180 
 181 /* RO fail safe mechanism */
 182 
 183 static long ro_bits[MAX_BLKDEV][8];
 184 
 185 int is_read_only(kdev_t dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 186 {
 187         int minor,major;
 188 
 189         major = MAJOR(dev);
 190         minor = MINOR(dev);
 191         if (major < 0 || major >= MAX_BLKDEV) return 0;
 192         return ro_bits[major][minor >> 5] & (1 << (minor & 31));
 193 }
 194 
 195 void set_device_ro(kdev_t dev,int flag)
     /* [previous][next][first][last][top][bottom][index][help] */
 196 {
 197         int minor,major;
 198 
 199         major = MAJOR(dev);
 200         minor = MINOR(dev);
 201         if (major < 0 || major >= MAX_BLKDEV) return;
 202         if (flag) ro_bits[major][minor >> 5] |= 1 << (minor & 31);
 203         else ro_bits[major][minor >> 5] &= ~(1 << (minor & 31));
 204 }
 205 
 206 static inline void drive_stat_acct(int cmd, unsigned long nr_sectors, short disk_index)
     /* [previous][next][first][last][top][bottom][index][help] */
 207 {
 208         kstat.dk_drive[disk_index]++;
 209         if (cmd == READ) {
 210                 kstat.dk_drive_rio[disk_index]++;
 211                 kstat.dk_drive_rblk[disk_index] += nr_sectors;
 212         }
 213         else if (cmd == WRITE) {
 214                 kstat.dk_drive_wio[disk_index]++;
 215                 kstat.dk_drive_wblk[disk_index] += nr_sectors;
 216         } else
 217                 printk("drive_stat_acct: cmd not R/W?\n");
 218 }
 219 
 220 /*
 221  * add-request adds a request to the linked list.
 222  * It disables interrupts so that it can muck with the
 223  * request-lists in peace.
 224  *
 225  * By this point, req->cmd is always either READ/WRITE, never READA/WRITEA,
 226  * which is important for drive_stat_acct() above.
 227  */
 228 
 229 struct semaphore request_lock = MUTEX;
 230 
 231 void add_request(struct blk_dev_struct * dev, struct request * req)
     /* [previous][next][first][last][top][bottom][index][help] */
 232 {
 233         struct request * tmp;
 234         short            disk_index;
 235 
 236         down (&request_lock);
 237         switch (MAJOR(req->rq_dev)) {
 238                 case SCSI_DISK_MAJOR:
 239                         disk_index = (MINOR(req->rq_dev) & 0x0070) >> 4;
 240                         if (disk_index < 4)
 241                                 drive_stat_acct(req->cmd, req->nr_sectors, disk_index);
 242                         break;
 243                 case IDE0_MAJOR:        /* same as HD_MAJOR */
 244                 case XT_DISK_MAJOR:
 245                         disk_index = (MINOR(req->rq_dev) & 0x0040) >> 6;
 246                         drive_stat_acct(req->cmd, req->nr_sectors, disk_index);
 247                         break;
 248                 case IDE1_MAJOR:
 249                         disk_index = ((MINOR(req->rq_dev) & 0x0040) >> 6) + 2;
 250                         drive_stat_acct(req->cmd, req->nr_sectors, disk_index);
 251                 default:
 252                         break;
 253         }
 254 
 255         req->next = NULL;
 256         cli();
 257         if (req->bh && req->bh->b_dev==req->bh->b_rdev)
 258                 mark_buffer_clean(req->bh);
 259         if (!(tmp = dev->current_request)) {
 260                 dev->current_request = req;
 261                 up (&request_lock);
 262                 (dev->request_fn)();
 263                 sti();
 264                 return;
 265         }
 266         for ( ; tmp->next ; tmp = tmp->next) {
 267                 if ((IN_ORDER(tmp,req) ||
 268                     !IN_ORDER(tmp,tmp->next)) &&
 269                     IN_ORDER(req,tmp->next))
 270                         break;
 271         }
 272         req->next = tmp->next;
 273         tmp->next = req;
 274 
 275         up (&request_lock);
 276 /* for SCSI devices, call request_fn unconditionally */
 277         if (scsi_major(MAJOR(req->rq_dev)) && MAJOR(req->rq_dev)!=MD_MAJOR)
 278                 (dev->request_fn)();
 279 
 280         sti();
 281 }
 282 
 283 static void make_request(int major,int rw, struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 284 {
 285         unsigned int sector, count;
 286         struct request * req;
 287         int rw_ahead, max_req;
 288 
 289         count = bh->b_size >> 9;
 290         sector = bh->b_blocknr * count;
 291         if (blk_size[major])
 292                 if (blk_size[major][MINOR(bh->b_dev)] < (sector + count)>>1) {
 293                         bh->b_state = 0;
 294                         printk("attempt to access beyond end of device\n");
 295                         printk("%s: rw=%d, want=%d, limit=%d\n", kdevname(bh->b_dev),
 296                          rw, (sector + count)>>1, blk_size[major][MINOR(bh->b_dev)]);
 297                         return;
 298                 }
 299         /* Uhhuh.. Nasty dead-lock possible here.. */
 300         if (buffer_locked(bh))
 301                 return;
 302         /* Maybe the above fixes it, and maybe it doesn't boot. Life is interesting */
 303         lock_buffer(bh);
 304 
 305         rw_ahead = 0;   /* normal case; gets changed below for READA/WRITEA */
 306         switch (rw) {
 307                 case READA:
 308                         rw_ahead = 1;
 309                         rw = READ;      /* drop into READ */
 310                 case READ:
 311                         if (buffer_uptodate(bh)) {
 312                                 unlock_buffer(bh); /* Hmmph! Already have it */
 313                                 return;
 314                         }
 315                         kstat.pgpgin++;
 316                         max_req = (major == MD_MAJOR) ? NR_REQUEST/2 : NR_REQUEST;      /* reads take precedence */
 317                         break;
 318                 case WRITEA:
 319                         rw_ahead = 1;
 320                         rw = WRITE;     /* drop into WRITE */
 321                 case WRITE:
 322                         if (!buffer_dirty(bh)) {
 323                                 unlock_buffer(bh); /* Hmmph! Nothing to write */
 324                                 return;
 325                         }
 326                         /* We don't allow the write-requests to fill up the
 327                          * queue completely:  we want some room for reads,
 328                          * as they take precedence. The last third of the
 329                          * requests are only for reads.
 330                          */
 331                         kstat.pgpgout++;
 332                         max_req =  (major == MD_MAJOR) ? NR_REQUEST/3 : (NR_REQUEST * 2) / 3;
 333                         break;
 334                 default:
 335                         printk("make_request: bad block dev cmd, must be R/W/RA/WA\n");
 336                         unlock_buffer(bh);
 337                         return;
 338         }
 339 
 340 /* look for a free request. */
 341         down (&request_lock);
 342 
 343         /*
 344          * Try to coalesce the new request with old requests
 345          */
 346         cli();
 347         req = blk_dev[major].current_request;
 348         if (!req) {
 349                 plug_device(blk_dev + major);
 350         } else switch (major) {
 351              case IDE0_MAJOR:   /* same as HD_MAJOR */
 352              case IDE1_MAJOR:
 353              case FLOPPY_MAJOR:
 354              case IDE2_MAJOR:
 355              case IDE3_MAJOR:
 356                 /*
 357                  * The scsi disk and cdrom drivers completely remove the request
 358                  * from the queue when they start processing an entry.  For this
 359                  * reason it is safe to continue to add links to the top entry for
 360                  * those devices.
 361                  *
 362                  * All other drivers need to jump over the first entry, as that
 363                  * entry may be busy being processed and we thus can't change it.
 364                  */
 365                 req = req->next;
 366                 if (!req)
 367                         break;
 368                 /* fall through */
 369 
 370              case SCSI_DISK_MAJOR:
 371              case SCSI_CDROM_MAJOR:
 372              case MD_MAJOR:
 373 
 374                 do {
 375                         if (req->sem)
 376                                 continue;
 377                         if (req->cmd != rw)
 378                                 continue;
 379                         if (req->nr_sectors >= 244)
 380                                 continue;
 381                         if (req->rq_dev != bh->b_dev)
 382                                 continue;
 383                         /* Can we add it to the end of this request? */
 384                         if (req->sector + req->nr_sectors == sector) {
 385                                 req->bhtail->b_reqnext = bh;
 386                                 req->bhtail = bh;
 387                         /* or to the beginning? */
 388                         } else if (req->sector - count == sector) {
 389                                 bh->b_reqnext = req->bh;
 390                                 req->bh = bh;
 391                                 req->buffer = bh->b_data;
 392                                 req->current_nr_sectors = count;
 393                                 req->sector = sector;
 394                         } else
 395                                 continue;
 396 
 397                         req->nr_sectors += count;
 398                         mark_buffer_clean(bh);
 399                         up (&request_lock);
 400                         sti();
 401                         return;
 402                 } while ((req = req->next) != NULL);
 403         }
 404 
 405         up (&request_lock);
 406         
 407 /* find an unused request. */
 408         req = get_request(max_req, bh->b_dev);
 409         sti();
 410 
 411 /* if no request available: if rw_ahead, forget it; otherwise try again blocking.. */
 412         if (!req) {
 413                 if (rw_ahead) {
 414                         unlock_buffer(bh);
 415                         return;
 416                 }
 417                 req = __get_request_wait(max_req, bh->b_dev);
 418         }
 419 
 420 /* fill up the request-info, and add it to the queue */
 421         req->cmd = rw;
 422         req->errors = 0;
 423         req->sector = sector;
 424         req->nr_sectors = count;
 425         req->current_nr_sectors = count;
 426         req->buffer = bh->b_data;
 427         req->sem = NULL;
 428         req->bh = bh;
 429         req->bhtail = bh;
 430         req->next = NULL;
 431         add_request(major+blk_dev,req);
 432 }
 433 
 434 #ifdef CONFIG_BLK_DEV_MD
 435 
 436 struct request *get_md_request (int max_req, kdev_t dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 437 {
 438   return (get_request_wait (max_req, dev));
 439 }
 440 
 441 #endif
 442 
 443 /*
 444  * Swap partitions are now read via brw_page.  ll_rw_page is an
 445  * asynchronous function now --- we must call wait_on_page afterwards
 446  * if synchronous IO is required.  
 447  */
 448 void ll_rw_page(int rw, kdev_t dev, unsigned long page, char * buffer)
     /* [previous][next][first][last][top][bottom][index][help] */
 449 {
 450         unsigned int major = MAJOR(dev);
 451         int block = page;
 452         
 453         if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
 454                 printk("Trying to read nonexistent block-device %s (%ld)\n",
 455                        kdevname(dev), page);
 456                 return;
 457         }
 458         switch (rw) {
 459                 case READ:
 460                         break;
 461                 case WRITE:
 462                         if (is_read_only(dev)) {
 463                                 printk("Can't page to read-only device %s\n",
 464                                         kdevname(dev));
 465                                 return;
 466                         }
 467                         break;
 468                 default:
 469                         panic("ll_rw_page: bad block dev cmd, must be R/W");
 470         }
 471         if (mem_map[MAP_NR(buffer)].locked)
 472                 panic ("ll_rw_page: page already locked");
 473         mem_map[MAP_NR(buffer)].locked = 1;
 474         brw_page(rw, (unsigned long) buffer, dev, &block, PAGE_SIZE, 0);
 475 }
 476 
 477 /* This function can be used to request a number of buffers from a block
 478    device. Currently the only restriction is that all buffers must belong to
 479    the same device */
 480 
 481 void ll_rw_block(int rw, int nr, struct buffer_head * bh[])
     /* [previous][next][first][last][top][bottom][index][help] */
 482 {
 483         unsigned int major;
 484         int correct_size;
 485         struct blk_dev_struct * dev;
 486         int i;
 487 
 488         /* Make sure that the first block contains something reasonable */
 489         while (!*bh) {
 490                 bh++;
 491                 if (--nr <= 0)
 492                         return;
 493         };
 494 
 495         dev = NULL;
 496         if ((major = MAJOR(bh[0]->b_dev)) < MAX_BLKDEV)
 497                 dev = blk_dev + major;
 498         if (!dev || !dev->request_fn) {
 499                 printk(
 500         "ll_rw_block: Trying to read nonexistent block-device %s (%ld)\n",
 501                 kdevname(bh[0]->b_dev), bh[0]->b_blocknr);
 502                 goto sorry;
 503         }
 504 
 505         /* Determine correct block size for this device.  */
 506         correct_size = BLOCK_SIZE;
 507         if (blksize_size[major]) {
 508                 i = blksize_size[major][MINOR(bh[0]->b_dev)];
 509                 if (i)
 510                         correct_size = i;
 511         }
 512 
 513         /* Verify requested block sizes.  */
 514         for (i = 0; i < nr; i++) {
 515                 if (bh[i] && bh[i]->b_size != correct_size) {
 516                         printk("ll_rw_block: device %s: "
 517                                "only %d-char blocks implemented (%lu)\n",
 518                                kdevname(bh[0]->b_dev),
 519                                correct_size, bh[i]->b_size);
 520                         goto sorry;
 521                 }
 522         }
 523 
 524         if ((rw == WRITE || rw == WRITEA) && is_read_only(bh[0]->b_dev)) {
 525                 printk("Can't write to read-only device %s\n",
 526                        kdevname(bh[0]->b_dev));
 527                 goto sorry;
 528         }
 529 
 530         for (i = 0; i < nr; i++) {
 531                 if (bh[i]) {
 532                         set_bit(BH_Req, &bh[i]->b_state);
 533 
 534                         /* Md needs this for error recovery */
 535                         bh[i]->b_rdev = bh[i]->b_dev;
 536 
 537                         make_request(major, rw, bh[i]);
 538                 }
 539         }
 540         return;
 541 
 542       sorry:
 543         for (i = 0; i < nr; i++) {
 544                 if (bh[i]) {
 545                         clear_bit(BH_Dirty, &bh[i]->b_state);
 546                         clear_bit(BH_Uptodate, &bh[i]->b_state);
 547                 }
 548         }
 549         return;
 550 }
 551 
 552 void ll_rw_swap_file(int rw, kdev_t dev, unsigned int *b, int nb, char *buf)
     /* [previous][next][first][last][top][bottom][index][help] */
 553 {
 554         int i, j;
 555         int buffersize;
 556         struct request * req[8];
 557         unsigned int major = MAJOR(dev);
 558         struct semaphore sem = MUTEX_LOCKED;
 559 
 560         if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
 561                 printk("ll_rw_swap_file: trying to swap nonexistent block-device\n");
 562                 return;
 563         }
 564         switch (rw) {
 565                 case READ:
 566                         break;
 567                 case WRITE:
 568                         if (is_read_only(dev)) {
 569                                 printk("Can't swap to read-only device %s\n",
 570                                         kdevname(dev));
 571                                 return;
 572                         }
 573                         break;
 574                 default:
 575                         panic("ll_rw_swap: bad block dev cmd, must be R/W");
 576         }
 577         buffersize = PAGE_SIZE / nb;
 578 
 579         for (j=0, i=0; i<nb;)
 580         {
 581                 for (; j < 8 && i < nb; j++, i++, buf += buffersize)
 582                 {
 583                         if (j == 0) {
 584                                 req[j] = get_request_wait(NR_REQUEST, dev);
 585                         } else {
 586                                 cli();
 587                                 req[j] = get_request(NR_REQUEST, dev);
 588                                 sti();
 589                                 if (req[j] == NULL)
 590                                         break;
 591                         }
 592                         req[j]->cmd = rw;
 593                         req[j]->errors = 0;
 594                         req[j]->sector = (b[i] * buffersize) >> 9;
 595                         req[j]->nr_sectors = buffersize >> 9;
 596                         req[j]->current_nr_sectors = buffersize >> 9;
 597                         req[j]->buffer = buf;
 598                         req[j]->sem = &sem;
 599                         req[j]->bh = NULL;
 600                         req[j]->next = NULL;
 601                         add_request(major+blk_dev,req[j]);
 602                 }
 603                 run_task_queue(&tq_disk);
 604                 while (j > 0) {
 605                         j--;
 606                         down(&sem);
 607                 }
 608         }
 609 }
 610 
 611 int blk_dev_init(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 612 {
 613         struct request * req;
 614         struct blk_dev_struct *dev;
 615 
 616         for (dev = blk_dev + MAX_BLKDEV; dev-- != blk_dev;) {
 617                 dev->request_fn      = NULL;
 618                 dev->current_request = NULL;
 619                 dev->plug.rq_status  = RQ_INACTIVE;
 620                 dev->plug.cmd        = -1;
 621                 dev->plug.next       = NULL;
 622                 dev->plug_tq.routine = &unplug_device;
 623                 dev->plug_tq.data    = dev;
 624         }
 625 
 626         req = all_requests + NR_REQUEST;
 627         while (--req >= all_requests) {
 628                 req->rq_status = RQ_INACTIVE;
 629                 req->next = NULL;
 630         }
 631         memset(ro_bits,0,sizeof(ro_bits));
 632 #ifdef CONFIG_BLK_DEV_RAM
 633         rd_init();
 634 #endif
 635 #ifdef CONFIG_BLK_DEV_LOOP
 636         loop_init();
 637 #endif
 638 #ifdef CONFIG_BLK_DEV_IDE
 639         ide_init();             /* this MUST preceed hd_init */
 640 #endif
 641 #ifdef CONFIG_BLK_DEV_HD
 642         hd_init();
 643 #endif
 644 #ifdef CONFIG_BLK_DEV_XD
 645         xd_init();
 646 #endif
 647 #ifdef CONFIG_BLK_DEV_FD
 648         floppy_init();
 649 #else
 650         outb_p(0xc, 0x3f2);
 651 #endif
 652 #ifdef CONFIG_CDI_INIT
 653         cdi_init();
 654 #endif CONFIG_CDI_INIT
 655 #ifdef CONFIG_CDU31A
 656         cdu31a_init();
 657 #endif CONFIG_CDU31A
 658 #ifdef CONFIG_MCD
 659         mcd_init();
 660 #endif CONFIG_MCD
 661 #ifdef CONFIG_MCDX
 662         mcdx_init();
 663 #endif CONFIG_MCDX
 664 #ifdef CONFIG_SBPCD
 665         sbpcd_init();
 666 #endif CONFIG_SBPCD
 667 #ifdef CONFIG_AZTCD
 668         aztcd_init();
 669 #endif CONFIG_AZTCD
 670 #ifdef CONFIG_CDU535
 671         sony535_init();
 672 #endif CONFIG_CDU535
 673 #ifdef CONFIG_GSCD
 674         gscd_init();
 675 #endif CONFIG_GSCD
 676 #ifdef CONFIG_CM206
 677         cm206_init();
 678 #endif
 679 #ifdef CONFIG_OPTCD
 680         optcd_init();
 681 #endif CONFIG_OPTCD
 682 #ifdef CONFIG_SJCD
 683         sjcd_init();
 684 #endif CONFIG_SJCD
 685 #ifdef CONFIG_BLK_DEV_MD
 686         md_init();
 687 #endif CONFIG_BLK_DEV_MD
 688         return 0;
 689 }

/* [previous][next][first][last][top][bottom][index][help] */