root/drivers/block/ll_rw_blk.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. plug_device
  2. unplug_device
  3. get_request
  4. __get_request_wait
  5. get_request_wait
  6. is_read_only
  7. set_device_ro
  8. drive_stat_acct
  9. add_request
  10. make_request
  11. get_md_request
  12. ll_rw_page
  13. ll_rw_block
  14. ll_rw_swap_file
  15. blk_dev_init

   1 /*
   2  *  linux/drivers/block/ll_rw_blk.c
   3  *
   4  * Copyright (C) 1991, 1992 Linus Torvalds
   5  * Copyright (C) 1994,      Karl Keyte: Added support for disk statistics
   6  */
   7 
   8 /*
   9  * This handles all read/write requests to block devices
  10  */
  11 #include <linux/sched.h>
  12 #include <linux/kernel.h>
  13 #include <linux/kernel_stat.h>
  14 #include <linux/errno.h>
  15 #include <linux/string.h>
  16 #include <linux/config.h>
  17 #include <linux/locks.h>
  18 #include <linux/mm.h>
  19 
  20 #include <asm/system.h>
  21 #include <asm/io.h>
  22 #include <linux/blk.h>
  23 
  24 /*
  25  * The request-struct contains all necessary data
  26  * to load a nr of sectors into memory
  27  */
  28 static struct request all_requests[NR_REQUEST];
  29 
  30 /*
  31  * used to wait on when there are no free requests
  32  */
  33 struct wait_queue * wait_for_request = NULL;
  34 
  35 /* This specifies how many sectors to read ahead on the disk.  */
  36 
  37 int read_ahead[MAX_BLKDEV] = {0, };
  38 
  39 /* blk_dev_struct is:
  40  *      *request_fn
  41  *      *current_request
  42  */
  43 struct blk_dev_struct blk_dev[MAX_BLKDEV]; /* initialized by blk_dev_init() */
  44 
  45 /*
  46  * blk_size contains the size of all block-devices in units of 1024 byte
  47  * sectors:
  48  *
  49  * blk_size[MAJOR][MINOR]
  50  *
  51  * if (!blk_size[MAJOR]) then no minor size checking is done.
  52  */
  53 int * blk_size[MAX_BLKDEV] = { NULL, NULL, };
  54 
  55 /*
  56  * blksize_size contains the size of all block-devices:
  57  *
  58  * blksize_size[MAJOR][MINOR]
  59  *
  60  * if (!blksize_size[MAJOR]) then 1024 bytes is assumed.
  61  */
  62 int * blksize_size[MAX_BLKDEV] = { NULL, NULL, };
  63 
  64 /*
  65  * hardsect_size contains the size of the hardware sector of a device.
  66  *
  67  * hardsect_size[MAJOR][MINOR]
  68  *
  69  * if (!hardsect_size[MAJOR])
  70  *              then 512 bytes is assumed.
  71  * else
  72  *              sector_size is hardsect_size[MAJOR][MINOR]
  73  * This is currently set by some scsi device and read by the msdos fs driver
  74  * This might be a some uses later.
  75  */
  76 int * hardsect_size[MAX_BLKDEV] = { NULL, NULL, };
  77 
  78 /*
  79  * "plug" the device if there are no outstanding requests: this will
  80  * force the transfer to start only after we have put all the requests
  81  * on the list.
  82  */
  83 static inline void plug_device(struct blk_dev_struct * dev, struct request * plug)
     /* [previous][next][first][last][top][bottom][index][help] */
  84 {
  85         unsigned long flags;
  86 
  87         plug->rq_status = RQ_INACTIVE;
  88         plug->cmd = -1;
  89         plug->next = NULL;
  90         save_flags(flags);
  91         cli();
  92         if (!dev->current_request)
  93                 dev->current_request = plug;
  94         restore_flags(flags);
  95 }
  96 
  97 /*
  98  * remove the plug and let it rip..
  99  */
 100 static inline void unplug_device(struct blk_dev_struct * dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 101 {
 102         struct request * req;
 103         unsigned long flags;
 104 
 105         save_flags(flags);
 106         cli();
 107         req = dev->current_request;
 108         if (req && req->rq_status == RQ_INACTIVE && req->cmd == -1) {
 109                 dev->current_request = req->next;
 110                 (dev->request_fn)();
 111         }
 112         restore_flags(flags);
 113 }
 114 
 115 /*
 116  * look for a free request in the first N entries.
 117  * NOTE: interrupts must be disabled on the way in, and will still
 118  *       be disabled on the way out.
 119  */
 120 static inline struct request * get_request(int n, kdev_t dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 121 {
 122         static struct request *prev_found = NULL, *prev_limit = NULL;
 123         register struct request *req, *limit;
 124 
 125         if (n <= 0)
 126                 panic("get_request(%d): impossible!\n", n);
 127 
 128         limit = all_requests + n;
 129         if (limit != prev_limit) {
 130                 prev_limit = limit;
 131                 prev_found = all_requests;
 132         }
 133         req = prev_found;
 134         for (;;) {
 135                 req = ((req > all_requests) ? req : limit) - 1;
 136                 if (req->rq_status == RQ_INACTIVE)
 137                         break;
 138                 if (req == prev_found)
 139                         return NULL;
 140         }
 141         prev_found = req;
 142         req->rq_status = RQ_ACTIVE;
 143         req->rq_dev = dev;
 144         return req;
 145 }
 146 
 147 /*
 148  * wait until a free request in the first N entries is available.
 149  */
 150 static struct request * __get_request_wait(int n, kdev_t dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 151 {
 152         register struct request *req;
 153         struct wait_queue wait = { current, NULL };
 154 
 155         add_wait_queue(&wait_for_request, &wait);
 156         for (;;) {
 157                 unplug_device(MAJOR(dev)+blk_dev);
 158                 current->state = TASK_UNINTERRUPTIBLE;
 159                 cli();
 160                 req = get_request(n, dev);
 161                 sti();
 162                 if (req)
 163                         break;
 164                 schedule();
 165         }
 166         remove_wait_queue(&wait_for_request, &wait);
 167         current->state = TASK_RUNNING;
 168         return req;
 169 }
 170 
 171 static inline struct request * get_request_wait(int n, kdev_t dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 172 {
 173         register struct request *req;
 174 
 175         cli();
 176         req = get_request(n, dev);
 177         sti();
 178         if (req)
 179                 return req;
 180         return __get_request_wait(n, dev);
 181 }
 182 
 183 /* RO fail safe mechanism */
 184 
 185 static long ro_bits[MAX_BLKDEV][8];
 186 
 187 int is_read_only(kdev_t dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 188 {
 189         int minor,major;
 190 
 191         major = MAJOR(dev);
 192         minor = MINOR(dev);
 193         if (major < 0 || major >= MAX_BLKDEV) return 0;
 194         return ro_bits[major][minor >> 5] & (1 << (minor & 31));
 195 }
 196 
 197 void set_device_ro(kdev_t dev,int flag)
     /* [previous][next][first][last][top][bottom][index][help] */
 198 {
 199         int minor,major;
 200 
 201         major = MAJOR(dev);
 202         minor = MINOR(dev);
 203         if (major < 0 || major >= MAX_BLKDEV) return;
 204         if (flag) ro_bits[major][minor >> 5] |= 1 << (minor & 31);
 205         else ro_bits[major][minor >> 5] &= ~(1 << (minor & 31));
 206 }
 207 
 208 static inline void drive_stat_acct(int cmd, unsigned long nr_sectors, short disk_index)
     /* [previous][next][first][last][top][bottom][index][help] */
 209 {
 210         kstat.dk_drive[disk_index]++;
 211         if (cmd == READ) {
 212                 kstat.dk_drive_rio[disk_index]++;
 213                 kstat.dk_drive_rblk[disk_index] += nr_sectors;
 214         }
 215         else if (cmd == WRITE) {
 216                 kstat.dk_drive_wio[disk_index]++;
 217                 kstat.dk_drive_wblk[disk_index] += nr_sectors;
 218         } else
 219                 printk("drive_stat_acct: cmd not R/W?\n");
 220 }
 221 
 222 /*
 223  * add-request adds a request to the linked list.
 224  * It disables interrupts so that it can muck with the
 225  * request-lists in peace.
 226  *
 227  * By this point, req->cmd is always either READ/WRITE, never READA/WRITEA,
 228  * which is important for drive_stat_acct() above.
 229  */
 230 
 231 struct semaphore request_lock = MUTEX;
 232 
 233 void add_request(struct blk_dev_struct * dev, struct request * req)
     /* [previous][next][first][last][top][bottom][index][help] */
 234 {
 235         struct request * tmp;
 236         short            disk_index;
 237 
 238         down (&request_lock);
 239         switch (MAJOR(req->rq_dev)) {
 240                 case SCSI_DISK_MAJOR:
 241                         disk_index = (MINOR(req->rq_dev) & 0x0070) >> 4;
 242                         if (disk_index < 4)
 243                                 drive_stat_acct(req->cmd, req->nr_sectors, disk_index);
 244                         break;
 245                 case IDE0_MAJOR:        /* same as HD_MAJOR */
 246                 case XT_DISK_MAJOR:
 247                         disk_index = (MINOR(req->rq_dev) & 0x0040) >> 6;
 248                         drive_stat_acct(req->cmd, req->nr_sectors, disk_index);
 249                         break;
 250                 case IDE1_MAJOR:
 251                         disk_index = ((MINOR(req->rq_dev) & 0x0040) >> 6) + 2;
 252                         drive_stat_acct(req->cmd, req->nr_sectors, disk_index);
 253                 default:
 254                         break;
 255         }
 256 
 257         req->next = NULL;
 258         cli();
 259         if (req->bh && req->bh->b_dev==req->bh->b_rdev)
 260                 mark_buffer_clean(req->bh);
 261         if (!(tmp = dev->current_request)) {
 262                 dev->current_request = req;
 263                 up (&request_lock);
 264                 (dev->request_fn)();
 265                 sti();
 266                 return;
 267         }
 268         for ( ; tmp->next ; tmp = tmp->next) {
 269                 if ((IN_ORDER(tmp,req) ||
 270                     !IN_ORDER(tmp,tmp->next)) &&
 271                     IN_ORDER(req,tmp->next))
 272                         break;
 273         }
 274         req->next = tmp->next;
 275         tmp->next = req;
 276 
 277         up (&request_lock);
 278 /* for SCSI devices, call request_fn unconditionally */
 279         if (scsi_major(MAJOR(req->rq_dev)) && MAJOR(req->rq_dev)!=MD_MAJOR)
 280                 (dev->request_fn)();
 281 
 282         sti();
 283 }
 284 
 285 static void make_request(int major,int rw, struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 286 {
 287         unsigned int sector, count;
 288         struct request * req;
 289         int rw_ahead, max_req;
 290 
 291         count = bh->b_size >> 9;
 292         sector = bh->b_blocknr * count;
 293         if (blk_size[major])
 294                 if (blk_size[major][MINOR(bh->b_dev)] < (sector + count)>>1) {
 295                         bh->b_state = 0;
 296                         printk("attempt to access beyond end of device\n");
 297                         return;
 298                 }
 299         /* Uhhuh.. Nasty dead-lock possible here.. */
 300         if (buffer_locked(bh))
 301                 return;
 302         /* Maybe the above fixes it, and maybe it doesn't boot. Life is interesting */
 303         lock_buffer(bh);
 304 
 305         rw_ahead = 0;   /* normal case; gets changed below for READA/WRITEA */
 306         switch (rw) {
 307                 case READA:
 308                         rw_ahead = 1;
 309                         rw = READ;      /* drop into READ */
 310                 case READ:
 311                         if (buffer_uptodate(bh)) {
 312                                 unlock_buffer(bh); /* Hmmph! Already have it */
 313                                 return;
 314                         }
 315                         kstat.pgpgin++;
 316                         max_req = NR_REQUEST;   /* reads take precedence */
 317                         break;
 318                 case WRITEA:
 319                         rw_ahead = 1;
 320                         rw = WRITE;     /* drop into WRITE */
 321                 case WRITE:
 322                         if (!buffer_dirty(bh)) {
 323                                 unlock_buffer(bh); /* Hmmph! Nothing to write */
 324                                 return;
 325                         }
 326                         /* We don't allow the write-requests to fill up the
 327                          * queue completely:  we want some room for reads,
 328                          * as they take precedence. The last third of the
 329                          * requests are only for reads.
 330                          */
 331                         kstat.pgpgout++;
 332                         max_req = (NR_REQUEST * 2) / 3;
 333                         break;
 334                 default:
 335                         printk("make_request: bad block dev cmd, must be R/W/RA/WA\n");
 336                         unlock_buffer(bh);
 337                         return;
 338         }
 339 
 340 /* look for a free request. */
 341         cli();
 342         down (&request_lock);
 343 
 344 /* The scsi disk and cdrom drivers completely remove the request
 345  * from the queue when they start processing an entry.  For this reason
 346  * it is safe to continue to add links to the top entry for those devices.
 347  */
 348         if ((   major == IDE0_MAJOR     /* same as HD_MAJOR */
 349              || major == IDE1_MAJOR
 350              || major == MD_MAJOR
 351              || major == FLOPPY_MAJOR
 352              || major == SCSI_DISK_MAJOR
 353              || major == SCSI_CDROM_MAJOR
 354              || major == IDE2_MAJOR
 355              || major == IDE3_MAJOR)
 356             && (req = blk_dev[major].current_request))
 357         {
 358                 if (major != SCSI_DISK_MAJOR && major != SCSI_CDROM_MAJOR)
 359                         req = req->next;
 360 
 361                 while (req) {
 362                         if (req->rq_dev == bh->b_dev &&
 363                             !req->sem &&
 364                             req->cmd == rw &&
 365                             req->sector + req->nr_sectors == sector &&
 366                             req->nr_sectors < 244)
 367                         {
 368                                 req->bhtail->b_reqnext = bh;
 369                                 req->bhtail = bh;
 370                                 req->nr_sectors += count;
 371                                 mark_buffer_clean(bh);
 372                                 up (&request_lock);
 373                                 sti();
 374                                 return;
 375                         }
 376 
 377                         if (req->rq_dev == bh->b_dev &&
 378                             !req->sem &&
 379                             req->cmd == rw &&
 380                             req->sector - count == sector &&
 381                             req->nr_sectors < 244)
 382                         {
 383                                 req->nr_sectors += count;
 384                                 bh->b_reqnext = req->bh;
 385                                 req->buffer = bh->b_data;
 386                                 req->current_nr_sectors = count;
 387                                 req->sector = sector;
 388                                 mark_buffer_clean(bh);
 389                                 req->bh = bh;
 390                                 up (&request_lock);
 391                                 sti();
 392                                 return;
 393                         }    
 394 
 395                         req = req->next;
 396                 }
 397         }
 398 
 399         up (&request_lock);
 400         
 401 /* find an unused request. */
 402         req = get_request(max_req, bh->b_dev);
 403         sti();
 404 
 405 /* if no request available: if rw_ahead, forget it; otherwise try again blocking.. */
 406         if (!req) {
 407                 if (rw_ahead) {
 408                         unlock_buffer(bh);
 409                         return;
 410                 }
 411                 req = __get_request_wait(max_req, bh->b_dev);
 412         }
 413 
 414 /* fill up the request-info, and add it to the queue */
 415         req->cmd = rw;
 416         req->errors = 0;
 417         req->sector = sector;
 418         req->nr_sectors = count;
 419         req->current_nr_sectors = count;
 420         req->buffer = bh->b_data;
 421         req->sem = NULL;
 422         req->bh = bh;
 423         req->bhtail = bh;
 424         req->next = NULL;
 425         add_request(major+blk_dev,req);
 426 }
 427 
 428 #ifdef CONFIG_BLK_DEV_MD
 429 
 430 struct request *get_md_request (int max_req, kdev_t dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 431 {
 432   return (get_request_wait (max_req, dev));
 433 }
 434 
 435 #endif
 436 
 437 /*
 438  * Swap partitions are now read via brw_page.  ll_rw_page is an
 439  * asynchronous function now --- we must call wait_on_page afterwards
 440  * if synchronous IO is required.  
 441  */
 442 void ll_rw_page(int rw, kdev_t dev, unsigned long page, char * buffer)
     /* [previous][next][first][last][top][bottom][index][help] */
 443 {
 444         unsigned int major = MAJOR(dev);
 445         int block = page;
 446         
 447         if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
 448                 printk("Trying to read nonexistent block-device %s (%ld)\n",
 449                        kdevname(dev), page);
 450                 return;
 451         }
 452         switch (rw) {
 453                 case READ:
 454                         break;
 455                 case WRITE:
 456                         if (is_read_only(dev)) {
 457                                 printk("Can't page to read-only device %s\n",
 458                                         kdevname(dev));
 459                                 return;
 460                         }
 461                         break;
 462                 default:
 463                         panic("ll_rw_page: bad block dev cmd, must be R/W");
 464         }
 465         if (mem_map[MAP_NR(buffer)].locked)
 466                 panic ("ll_rw_page: page already locked");
 467         mem_map[MAP_NR(buffer)].locked = 1;
 468         brw_page(rw, (unsigned long) buffer, dev, &block, PAGE_SIZE, 0);
 469 }
 470 
 471 /* This function can be used to request a number of buffers from a block
 472    device. Currently the only restriction is that all buffers must belong to
 473    the same device */
 474 
 475 void ll_rw_block(int rw, int nr, struct buffer_head * bh[])
     /* [previous][next][first][last][top][bottom][index][help] */
 476 {
 477         unsigned int major;
 478         struct request plug;
 479         int correct_size;
 480         struct blk_dev_struct * dev;
 481         int i;
 482 
 483         /* Make sure that the first block contains something reasonable */
 484         while (!*bh) {
 485                 bh++;
 486                 if (--nr <= 0)
 487                         return;
 488         };
 489 
 490         dev = NULL;
 491         if ((major = MAJOR(bh[0]->b_dev)) < MAX_BLKDEV)
 492                 dev = blk_dev + major;
 493         if (!dev || !dev->request_fn) {
 494                 printk(
 495         "ll_rw_block: Trying to read nonexistent block-device %s (%ld)\n",
 496                 kdevname(bh[0]->b_dev), bh[0]->b_blocknr);
 497                 goto sorry;
 498         }
 499 
 500         /* Determine correct block size for this device.  */
 501         correct_size = BLOCK_SIZE;
 502         if (blksize_size[major]) {
 503                 i = blksize_size[major][MINOR(bh[0]->b_dev)];
 504                 if (i)
 505                         correct_size = i;
 506         }
 507 
 508         /* Verify requested block sizes.  */
 509         for (i = 0; i < nr; i++) {
 510                 if (bh[i] && bh[i]->b_size != correct_size) {
 511                         printk("ll_rw_block: device %s: "
 512                                "only %d-char blocks implemented (%lu)\n",
 513                                kdevname(bh[0]->b_dev),
 514                                correct_size, bh[i]->b_size);
 515                         goto sorry;
 516                 }
 517         }
 518 
 519         if ((rw == WRITE || rw == WRITEA) && is_read_only(bh[0]->b_dev)) {
 520                 printk("Can't write to read-only device %s\n",
 521                        kdevname(bh[0]->b_dev));
 522                 goto sorry;
 523         }
 524 
 525         /* If there are no pending requests for this device, then we insert
 526            a dummy request for that device.  This will prevent the request
 527            from starting until we have shoved all of the blocks into the
 528            queue, and then we let it rip.  */
 529 
 530         if (nr > 1)
 531                 plug_device(dev, &plug);
 532         for (i = 0; i < nr; i++) {
 533                 if (bh[i]) {
 534                         set_bit(BH_Req, &bh[i]->b_state);
 535 
 536                         /* Md needs this for error recovery */
 537                         bh[i]->b_rdev = bh[i]->b_dev;
 538 
 539                         make_request(major, rw, bh[i]);
 540                 }
 541         }
 542         unplug_device(dev);
 543         return;
 544 
 545       sorry:
 546         for (i = 0; i < nr; i++) {
 547                 if (bh[i]) {
 548                         clear_bit(BH_Dirty, &bh[i]->b_state);
 549                         clear_bit(BH_Uptodate, &bh[i]->b_state);
 550                 }
 551         }
 552         return;
 553 }
 554 
 555 void ll_rw_swap_file(int rw, kdev_t dev, unsigned int *b, int nb, char *buf)
     /* [previous][next][first][last][top][bottom][index][help] */
 556 {
 557         int i, j;
 558         int buffersize;
 559         struct request * req[8];
 560         unsigned int major = MAJOR(dev);
 561         struct semaphore sem = MUTEX_LOCKED;
 562 
 563         if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
 564                 printk("ll_rw_swap_file: trying to swap nonexistent block-device\n");
 565                 return;
 566         }
 567         switch (rw) {
 568                 case READ:
 569                         break;
 570                 case WRITE:
 571                         if (is_read_only(dev)) {
 572                                 printk("Can't swap to read-only device %s\n",
 573                                         kdevname(dev));
 574                                 return;
 575                         }
 576                         break;
 577                 default:
 578                         panic("ll_rw_swap: bad block dev cmd, must be R/W");
 579         }
 580         buffersize = PAGE_SIZE / nb;
 581 
 582         for (j=0, i=0; i<nb;)
 583         {
 584                 for (; j < 8 && i < nb; j++, i++, buf += buffersize)
 585                 {
 586                         if (j == 0) {
 587                                 req[j] = get_request_wait(NR_REQUEST, dev);
 588                         } else {
 589                                 cli();
 590                                 req[j] = get_request(NR_REQUEST, dev);
 591                                 sti();
 592                                 if (req[j] == NULL)
 593                                         break;
 594                         }
 595                         req[j]->cmd = rw;
 596                         req[j]->errors = 0;
 597                         req[j]->sector = (b[i] * buffersize) >> 9;
 598                         req[j]->nr_sectors = buffersize >> 9;
 599                         req[j]->current_nr_sectors = buffersize >> 9;
 600                         req[j]->buffer = buf;
 601                         req[j]->sem = &sem;
 602                         req[j]->bh = NULL;
 603                         req[j]->next = NULL;
 604                         add_request(major+blk_dev,req[j]);
 605                 }
 606                 while (j > 0) {
 607                         j--;
 608                         down(&sem);
 609                 }
 610         }
 611 }
 612 
 613 int blk_dev_init(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 614 {
 615         struct request * req;
 616         struct blk_dev_struct *dev;
 617 
 618         for (dev = blk_dev + MAX_BLKDEV; dev-- != blk_dev;) {
 619                 dev->request_fn      = NULL;
 620                 dev->current_request = NULL;
 621         }
 622 
 623         req = all_requests + NR_REQUEST;
 624         while (--req >= all_requests) {
 625                 req->rq_status = RQ_INACTIVE;
 626                 req->next = NULL;
 627         }
 628         memset(ro_bits,0,sizeof(ro_bits));
 629 #ifdef CONFIG_BLK_DEV_RAM
 630         rd_init();
 631 #endif
 632 #ifdef CONFIG_BLK_DEV_LOOP
 633         loop_init();
 634 #endif
 635 #ifdef CONFIG_BLK_DEV_IDE
 636         ide_init();             /* this MUST preceed hd_init */
 637 #endif
 638 #ifdef CONFIG_BLK_DEV_HD
 639         hd_init();
 640 #endif
 641 #ifdef CONFIG_BLK_DEV_XD
 642         xd_init();
 643 #endif
 644 #ifdef CONFIG_BLK_DEV_FD
 645         floppy_init();
 646 #else
 647         outb_p(0xc, 0x3f2);
 648 #endif
 649 #ifdef CONFIG_CDI_INIT
 650         cdi_init();
 651 #endif CONFIG_CDI_INIT
 652 #ifdef CONFIG_CDU31A
 653         cdu31a_init();
 654 #endif CONFIG_CDU31A
 655 #ifdef CONFIG_MCD
 656         mcd_init();
 657 #endif CONFIG_MCD
 658 #ifdef CONFIG_MCDX
 659         mcdx_init();
 660 #endif CONFIG_MCDX
 661 #ifdef CONFIG_SBPCD
 662         sbpcd_init();
 663 #endif CONFIG_SBPCD
 664 #ifdef CONFIG_AZTCD
 665         aztcd_init();
 666 #endif CONFIG_AZTCD
 667 #ifdef CONFIG_CDU535
 668         sony535_init();
 669 #endif CONFIG_CDU535
 670 #ifdef CONFIG_GSCD
 671         gscd_init();
 672 #endif CONFIG_GSCD
 673 #ifdef CONFIG_CM206
 674         cm206_init();
 675 #endif
 676 #ifdef CONFIG_OPTCD
 677         optcd_init();
 678 #endif CONFIG_OPTCD
 679 #ifdef CONFIG_SJCD
 680         sjcd_init();
 681 #endif CONFIG_SJCD
 682 #ifdef CONFIG_BLK_DEV_MD
 683         md_init();
 684 #endif CONFIG_BLK_DEV_MD
 685         return 0;
 686 }

/* [previous][next][first][last][top][bottom][index][help] */