root/drivers/block/ll_rw_blk.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. unplug_device
  2. plug_device
  3. get_request
  4. __get_request_wait
  5. get_request_wait
  6. is_read_only
  7. set_device_ro
  8. drive_stat_acct
  9. add_request
  10. make_request
  11. get_md_request
  12. ll_rw_page
  13. ll_rw_block
  14. ll_rw_swap_file
  15. blk_dev_init

   1 /*
   2  *  linux/drivers/block/ll_rw_blk.c
   3  *
   4  * Copyright (C) 1991, 1992 Linus Torvalds
   5  * Copyright (C) 1994,      Karl Keyte: Added support for disk statistics
   6  */
   7 
   8 /*
   9  * This handles all read/write requests to block devices
  10  */
  11 #include <linux/sched.h>
  12 #include <linux/kernel.h>
  13 #include <linux/kernel_stat.h>
  14 #include <linux/errno.h>
  15 #include <linux/string.h>
  16 #include <linux/config.h>
  17 #include <linux/locks.h>
  18 #include <linux/mm.h>
  19 
  20 #include <asm/system.h>
  21 #include <asm/io.h>
  22 #include <linux/blk.h>
  23 
  24 /*
  25  * The request-struct contains all necessary data
  26  * to load a nr of sectors into memory
  27  */
  28 static struct request all_requests[NR_REQUEST];
  29 
  30 /*
  31  * used to wait on when there are no free requests
  32  */
  33 struct wait_queue * wait_for_request = NULL;
  34 
  35 /* This specifies how many sectors to read ahead on the disk.  */
  36 
  37 int read_ahead[MAX_BLKDEV] = {0, };
  38 
  39 /* blk_dev_struct is:
  40  *      *request_fn
  41  *      *current_request
  42  */
  43 struct blk_dev_struct blk_dev[MAX_BLKDEV]; /* initialized by blk_dev_init() */
  44 
  45 /*
  46  * blk_size contains the size of all block-devices in units of 1024 byte
  47  * sectors:
  48  *
  49  * blk_size[MAJOR][MINOR]
  50  *
  51  * if (!blk_size[MAJOR]) then no minor size checking is done.
  52  */
  53 int * blk_size[MAX_BLKDEV] = { NULL, NULL, };
  54 
  55 /*
  56  * blksize_size contains the size of all block-devices:
  57  *
  58  * blksize_size[MAJOR][MINOR]
  59  *
  60  * if (!blksize_size[MAJOR]) then 1024 bytes is assumed.
  61  */
  62 int * blksize_size[MAX_BLKDEV] = { NULL, NULL, };
  63 
  64 /*
  65  * hardsect_size contains the size of the hardware sector of a device.
  66  *
  67  * hardsect_size[MAJOR][MINOR]
  68  *
  69  * if (!hardsect_size[MAJOR])
  70  *              then 512 bytes is assumed.
  71  * else
  72  *              sector_size is hardsect_size[MAJOR][MINOR]
  73  * This is currently set by some scsi device and read by the msdos fs driver
  74  * This might be a some uses later.
  75  */
  76 int * hardsect_size[MAX_BLKDEV] = { NULL, NULL, };
  77 
  78 /*
  79  * remove the plug and let it rip..
  80  */
  81 static void unplug_device(void * data)
     /* [previous][next][first][last][top][bottom][index][help] */
  82 {
  83         struct blk_dev_struct * dev = (struct blk_dev_struct *) data;
  84         unsigned long flags;
  85 
  86         save_flags(flags);
  87         cli();
  88         if (dev->current_request)
  89                 (dev->request_fn)();
  90         restore_flags(flags);
  91 }
  92 
  93 /*
  94  * "plug" the device if there are no outstanding requests: this will
  95  * force the transfer to start only after we have put all the requests
  96  * on the list.
  97  */
  98 static inline void plug_device(struct blk_dev_struct * dev)
     /* [previous][next][first][last][top][bottom][index][help] */
  99 {
 100         if (!dev->current_request && !IS_PLUGGED(dev)) {
 101                 queue_task_irq_off(&dev->plug_tq, &tq_scheduler);
 102         }
 103 }
 104 
 105 /*
 106  * look for a free request in the first N entries.
 107  * NOTE: interrupts must be disabled on the way in, and will still
 108  *       be disabled on the way out.
 109  */
 110 static inline struct request * get_request(int n, kdev_t dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 111 {
 112         static struct request *prev_found = NULL, *prev_limit = NULL;
 113         register struct request *req, *limit;
 114 
 115         if (n <= 0)
 116                 panic("get_request(%d): impossible!\n", n);
 117 
 118         limit = all_requests + n;
 119         if (limit != prev_limit) {
 120                 prev_limit = limit;
 121                 prev_found = all_requests;
 122         }
 123         req = prev_found;
 124         for (;;) {
 125                 req = ((req > all_requests) ? req : limit) - 1;
 126                 if (req->rq_status == RQ_INACTIVE)
 127                         break;
 128                 if (req == prev_found)
 129                         return NULL;
 130         }
 131         prev_found = req;
 132         req->rq_status = RQ_ACTIVE;
 133         req->rq_dev = dev;
 134         return req;
 135 }
 136 
 137 /*
 138  * wait until a free request in the first N entries is available.
 139  */
 140 static struct request * __get_request_wait(int n, kdev_t dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 141 {
 142         register struct request *req;
 143         struct wait_queue wait = { current, NULL };
 144 
 145         add_wait_queue(&wait_for_request, &wait);
 146         for (;;) {
 147                 current->state = TASK_UNINTERRUPTIBLE;
 148                 cli();
 149                 req = get_request(n, dev);
 150                 sti();
 151                 if (req)
 152                         break;
 153                 schedule();
 154         }
 155         remove_wait_queue(&wait_for_request, &wait);
 156         current->state = TASK_RUNNING;
 157         return req;
 158 }
 159 
 160 static inline struct request * get_request_wait(int n, kdev_t dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 161 {
 162         register struct request *req;
 163 
 164         cli();
 165         req = get_request(n, dev);
 166         sti();
 167         if (req)
 168                 return req;
 169         return __get_request_wait(n, dev);
 170 }
 171 
 172 /* RO fail safe mechanism */
 173 
 174 static long ro_bits[MAX_BLKDEV][8];
 175 
 176 int is_read_only(kdev_t dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 177 {
 178         int minor,major;
 179 
 180         major = MAJOR(dev);
 181         minor = MINOR(dev);
 182         if (major < 0 || major >= MAX_BLKDEV) return 0;
 183         return ro_bits[major][minor >> 5] & (1 << (minor & 31));
 184 }
 185 
 186 void set_device_ro(kdev_t dev,int flag)
     /* [previous][next][first][last][top][bottom][index][help] */
 187 {
 188         int minor,major;
 189 
 190         major = MAJOR(dev);
 191         minor = MINOR(dev);
 192         if (major < 0 || major >= MAX_BLKDEV) return;
 193         if (flag) ro_bits[major][minor >> 5] |= 1 << (minor & 31);
 194         else ro_bits[major][minor >> 5] &= ~(1 << (minor & 31));
 195 }
 196 
 197 static inline void drive_stat_acct(int cmd, unsigned long nr_sectors, short disk_index)
     /* [previous][next][first][last][top][bottom][index][help] */
 198 {
 199         kstat.dk_drive[disk_index]++;
 200         if (cmd == READ) {
 201                 kstat.dk_drive_rio[disk_index]++;
 202                 kstat.dk_drive_rblk[disk_index] += nr_sectors;
 203         }
 204         else if (cmd == WRITE) {
 205                 kstat.dk_drive_wio[disk_index]++;
 206                 kstat.dk_drive_wblk[disk_index] += nr_sectors;
 207         } else
 208                 printk("drive_stat_acct: cmd not R/W?\n");
 209 }
 210 
 211 /*
 212  * add-request adds a request to the linked list.
 213  * It disables interrupts so that it can muck with the
 214  * request-lists in peace.
 215  *
 216  * By this point, req->cmd is always either READ/WRITE, never READA/WRITEA,
 217  * which is important for drive_stat_acct() above.
 218  */
 219 
 220 struct semaphore request_lock = MUTEX;
 221 
 222 void add_request(struct blk_dev_struct * dev, struct request * req)
     /* [previous][next][first][last][top][bottom][index][help] */
 223 {
 224         struct request * tmp;
 225         short            disk_index;
 226 
 227         down (&request_lock);
 228         switch (MAJOR(req->rq_dev)) {
 229                 case SCSI_DISK_MAJOR:
 230                         disk_index = (MINOR(req->rq_dev) & 0x0070) >> 4;
 231                         if (disk_index < 4)
 232                                 drive_stat_acct(req->cmd, req->nr_sectors, disk_index);
 233                         break;
 234                 case IDE0_MAJOR:        /* same as HD_MAJOR */
 235                 case XT_DISK_MAJOR:
 236                         disk_index = (MINOR(req->rq_dev) & 0x0040) >> 6;
 237                         drive_stat_acct(req->cmd, req->nr_sectors, disk_index);
 238                         break;
 239                 case IDE1_MAJOR:
 240                         disk_index = ((MINOR(req->rq_dev) & 0x0040) >> 6) + 2;
 241                         drive_stat_acct(req->cmd, req->nr_sectors, disk_index);
 242                 default:
 243                         break;
 244         }
 245 
 246         req->next = NULL;
 247         cli();
 248         if (req->bh && req->bh->b_dev==req->bh->b_rdev)
 249                 mark_buffer_clean(req->bh);
 250         if (!(tmp = dev->current_request)) {
 251                 dev->current_request = req;
 252                 up (&request_lock);
 253                 if (!IS_PLUGGED(dev))
 254                         (dev->request_fn)();
 255                 sti();
 256                 return;
 257         }
 258         for ( ; tmp->next ; tmp = tmp->next) {
 259                 if ((IN_ORDER(tmp,req) ||
 260                     !IN_ORDER(tmp,tmp->next)) &&
 261                     IN_ORDER(req,tmp->next))
 262                         break;
 263         }
 264         req->next = tmp->next;
 265         tmp->next = req;
 266 
 267         up (&request_lock);
 268 /* for SCSI devices, call request_fn unconditionally */
 269         if (!IS_PLUGGED(dev) && scsi_major(MAJOR(req->rq_dev)) && MAJOR(req->rq_dev)!=MD_MAJOR)
 270                 (dev->request_fn)();
 271 
 272         sti();
 273 }
 274 
 275 static void make_request(int major,int rw, struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 276 {
 277         unsigned int sector, count;
 278         struct request * req;
 279         int rw_ahead, max_req;
 280 
 281         count = bh->b_size >> 9;
 282         sector = bh->b_blocknr * count;
 283         if (blk_size[major])
 284                 if (blk_size[major][MINOR(bh->b_dev)] < (sector + count)>>1) {
 285                         bh->b_state = 0;
 286                         printk("attempt to access beyond end of device\n");
 287                         printk("%s: rw=%d, want=%d, limit=%d\n", kdevname(bh->b_dev),
 288                          rw, (sector + count)>>1, blk_size[major][MINOR(bh->b_dev)]);
 289                         return;
 290                 }
 291         /* Uhhuh.. Nasty dead-lock possible here.. */
 292         if (buffer_locked(bh))
 293                 return;
 294         /* Maybe the above fixes it, and maybe it doesn't boot. Life is interesting */
 295         lock_buffer(bh);
 296 
 297         rw_ahead = 0;   /* normal case; gets changed below for READA/WRITEA */
 298         switch (rw) {
 299                 case READA:
 300                         rw_ahead = 1;
 301                         rw = READ;      /* drop into READ */
 302                 case READ:
 303                         if (buffer_uptodate(bh)) {
 304                                 unlock_buffer(bh); /* Hmmph! Already have it */
 305                                 return;
 306                         }
 307                         kstat.pgpgin++;
 308                         max_req = NR_REQUEST;   /* reads take precedence */
 309                         break;
 310                 case WRITEA:
 311                         rw_ahead = 1;
 312                         rw = WRITE;     /* drop into WRITE */
 313                 case WRITE:
 314                         if (!buffer_dirty(bh)) {
 315                                 unlock_buffer(bh); /* Hmmph! Nothing to write */
 316                                 return;
 317                         }
 318                         /* We don't allow the write-requests to fill up the
 319                          * queue completely:  we want some room for reads,
 320                          * as they take precedence. The last third of the
 321                          * requests are only for reads.
 322                          */
 323                         kstat.pgpgout++;
 324                         max_req = (NR_REQUEST * 2) / 3;
 325                         break;
 326                 default:
 327                         printk("make_request: bad block dev cmd, must be R/W/RA/WA\n");
 328                         unlock_buffer(bh);
 329                         return;
 330         }
 331 
 332 /* look for a free request. */
 333         down (&request_lock);
 334         cli();
 335 
 336 /* The scsi disk and cdrom drivers completely remove the request
 337  * from the queue when they start processing an entry.  For this reason
 338  * it is safe to continue to add links to the top entry for those devices.
 339  */
 340         if ((   major == IDE0_MAJOR     /* same as HD_MAJOR */
 341              || major == IDE1_MAJOR
 342              || major == MD_MAJOR
 343              || major == FLOPPY_MAJOR
 344              || major == SCSI_DISK_MAJOR
 345              || major == SCSI_CDROM_MAJOR
 346              || major == IDE2_MAJOR
 347              || major == IDE3_MAJOR)
 348             && (req = blk_dev[major].current_request))
 349         {
 350                 if (major != SCSI_DISK_MAJOR &&
 351                     major != SCSI_CDROM_MAJOR &&
 352                     major != MD_MAJOR)
 353                         req = req->next;
 354 
 355                 while (req) {
 356                         if (req->rq_dev == bh->b_dev &&
 357                             !req->sem &&
 358                             req->cmd == rw &&
 359                             req->sector + req->nr_sectors == sector &&
 360                             req->nr_sectors < 244)
 361                         {
 362                                 req->bhtail->b_reqnext = bh;
 363                                 req->bhtail = bh;
 364                                 req->nr_sectors += count;
 365                                 mark_buffer_clean(bh);
 366                                 up (&request_lock);
 367                                 sti();
 368                                 return;
 369                         }
 370 
 371                         if (req->rq_dev == bh->b_dev &&
 372                             !req->sem &&
 373                             req->cmd == rw &&
 374                             req->sector - count == sector &&
 375                             req->nr_sectors < 244)
 376                         {
 377                                 req->nr_sectors += count;
 378                                 bh->b_reqnext = req->bh;
 379                                 req->buffer = bh->b_data;
 380                                 req->current_nr_sectors = count;
 381                                 req->sector = sector;
 382                                 mark_buffer_clean(bh);
 383                                 req->bh = bh;
 384                                 up (&request_lock);
 385                                 sti();
 386                                 return;
 387                         }    
 388 
 389                         req = req->next;
 390                 }
 391         }
 392 
 393         up (&request_lock);
 394         
 395 /* find an unused request. */
 396         req = get_request(max_req, bh->b_dev);
 397         sti();
 398 
 399 /* if no request available: if rw_ahead, forget it; otherwise try again blocking.. */
 400         if (!req) {
 401                 if (rw_ahead) {
 402                         unlock_buffer(bh);
 403                         return;
 404                 }
 405                 req = __get_request_wait(max_req, bh->b_dev);
 406         }
 407 
 408 /* fill up the request-info, and add it to the queue */
 409         req->cmd = rw;
 410         req->errors = 0;
 411         req->sector = sector;
 412         req->nr_sectors = count;
 413         req->current_nr_sectors = count;
 414         req->buffer = bh->b_data;
 415         req->sem = NULL;
 416         req->bh = bh;
 417         req->bhtail = bh;
 418         req->next = NULL;
 419         add_request(major+blk_dev,req);
 420 }
 421 
 422 #ifdef CONFIG_BLK_DEV_MD
 423 
 424 struct request *get_md_request (int max_req, kdev_t dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 425 {
 426   return (get_request_wait (max_req, dev));
 427 }
 428 
 429 #endif
 430 
 431 /*
 432  * Swap partitions are now read via brw_page.  ll_rw_page is an
 433  * asynchronous function now --- we must call wait_on_page afterwards
 434  * if synchronous IO is required.  
 435  */
 436 void ll_rw_page(int rw, kdev_t dev, unsigned long page, char * buffer)
     /* [previous][next][first][last][top][bottom][index][help] */
 437 {
 438         unsigned int major = MAJOR(dev);
 439         int block = page;
 440         
 441         if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
 442                 printk("Trying to read nonexistent block-device %s (%ld)\n",
 443                        kdevname(dev), page);
 444                 return;
 445         }
 446         switch (rw) {
 447                 case READ:
 448                         break;
 449                 case WRITE:
 450                         if (is_read_only(dev)) {
 451                                 printk("Can't page to read-only device %s\n",
 452                                         kdevname(dev));
 453                                 return;
 454                         }
 455                         break;
 456                 default:
 457                         panic("ll_rw_page: bad block dev cmd, must be R/W");
 458         }
 459         if (mem_map[MAP_NR(buffer)].locked)
 460                 panic ("ll_rw_page: page already locked");
 461         mem_map[MAP_NR(buffer)].locked = 1;
 462         brw_page(rw, (unsigned long) buffer, dev, &block, PAGE_SIZE, 0);
 463 }
 464 
 465 /* This function can be used to request a number of buffers from a block
 466    device. Currently the only restriction is that all buffers must belong to
 467    the same device */
 468 
 469 void ll_rw_block(int rw, int nr, struct buffer_head * bh[])
     /* [previous][next][first][last][top][bottom][index][help] */
 470 {
 471         unsigned int major;
 472         int correct_size;
 473         struct blk_dev_struct * dev;
 474         int i;
 475 
 476         /* Make sure that the first block contains something reasonable */
 477         while (!*bh) {
 478                 bh++;
 479                 if (--nr <= 0)
 480                         return;
 481         };
 482 
 483         dev = NULL;
 484         if ((major = MAJOR(bh[0]->b_dev)) < MAX_BLKDEV)
 485                 dev = blk_dev + major;
 486         if (!dev || !dev->request_fn) {
 487                 printk(
 488         "ll_rw_block: Trying to read nonexistent block-device %s (%ld)\n",
 489                 kdevname(bh[0]->b_dev), bh[0]->b_blocknr);
 490                 goto sorry;
 491         }
 492 
 493         /* Determine correct block size for this device.  */
 494         correct_size = BLOCK_SIZE;
 495         if (blksize_size[major]) {
 496                 i = blksize_size[major][MINOR(bh[0]->b_dev)];
 497                 if (i)
 498                         correct_size = i;
 499         }
 500 
 501         /* Verify requested block sizes.  */
 502         for (i = 0; i < nr; i++) {
 503                 if (bh[i] && bh[i]->b_size != correct_size) {
 504                         printk("ll_rw_block: device %s: "
 505                                "only %d-char blocks implemented (%lu)\n",
 506                                kdevname(bh[0]->b_dev),
 507                                correct_size, bh[i]->b_size);
 508                         goto sorry;
 509                 }
 510         }
 511 
 512         if ((rw == WRITE || rw == WRITEA) && is_read_only(bh[0]->b_dev)) {
 513                 printk("Can't write to read-only device %s\n",
 514                        kdevname(bh[0]->b_dev));
 515                 goto sorry;
 516         }
 517 
 518         /* If there are no pending requests for this device, then we insert
 519            a dummy request for that device.  This will prevent the request
 520            from starting until we have shoved all of the blocks into the
 521            queue, and then we let it rip.  */
 522 
 523         plug_device(dev);
 524         for (i = 0; i < nr; i++) {
 525                 if (bh[i]) {
 526                         set_bit(BH_Req, &bh[i]->b_state);
 527 
 528                         /* Md needs this for error recovery */
 529                         bh[i]->b_rdev = bh[i]->b_dev;
 530 
 531                         make_request(major, rw, bh[i]);
 532                 }
 533         }
 534         return;
 535 
 536       sorry:
 537         for (i = 0; i < nr; i++) {
 538                 if (bh[i]) {
 539                         clear_bit(BH_Dirty, &bh[i]->b_state);
 540                         clear_bit(BH_Uptodate, &bh[i]->b_state);
 541                 }
 542         }
 543         return;
 544 }
 545 
 546 void ll_rw_swap_file(int rw, kdev_t dev, unsigned int *b, int nb, char *buf)
     /* [previous][next][first][last][top][bottom][index][help] */
 547 {
 548         int i, j;
 549         int buffersize;
 550         struct request * req[8];
 551         unsigned int major = MAJOR(dev);
 552         struct semaphore sem = MUTEX_LOCKED;
 553 
 554         if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
 555                 printk("ll_rw_swap_file: trying to swap nonexistent block-device\n");
 556                 return;
 557         }
 558         switch (rw) {
 559                 case READ:
 560                         break;
 561                 case WRITE:
 562                         if (is_read_only(dev)) {
 563                                 printk("Can't swap to read-only device %s\n",
 564                                         kdevname(dev));
 565                                 return;
 566                         }
 567                         break;
 568                 default:
 569                         panic("ll_rw_swap: bad block dev cmd, must be R/W");
 570         }
 571         buffersize = PAGE_SIZE / nb;
 572 
 573         for (j=0, i=0; i<nb;)
 574         {
 575                 for (; j < 8 && i < nb; j++, i++, buf += buffersize)
 576                 {
 577                         if (j == 0) {
 578                                 req[j] = get_request_wait(NR_REQUEST, dev);
 579                         } else {
 580                                 cli();
 581                                 req[j] = get_request(NR_REQUEST, dev);
 582                                 sti();
 583                                 if (req[j] == NULL)
 584                                         break;
 585                         }
 586                         req[j]->cmd = rw;
 587                         req[j]->errors = 0;
 588                         req[j]->sector = (b[i] * buffersize) >> 9;
 589                         req[j]->nr_sectors = buffersize >> 9;
 590                         req[j]->current_nr_sectors = buffersize >> 9;
 591                         req[j]->buffer = buf;
 592                         req[j]->sem = &sem;
 593                         req[j]->bh = NULL;
 594                         req[j]->next = NULL;
 595                         add_request(major+blk_dev,req[j]);
 596                 }
 597                 while (j > 0) {
 598                         j--;
 599                         down(&sem);
 600                 }
 601         }
 602 }
 603 
 604 int blk_dev_init(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 605 {
 606         struct request * req;
 607         struct blk_dev_struct *dev;
 608 
 609         for (dev = blk_dev + MAX_BLKDEV; dev-- != blk_dev;) {
 610                 dev->request_fn      = NULL;
 611                 dev->current_request = NULL;
 612                 dev->plug_tq.routine = &unplug_device;
 613                 dev->plug_tq.data    = dev;
 614         }
 615 
 616         req = all_requests + NR_REQUEST;
 617         while (--req >= all_requests) {
 618                 req->rq_status = RQ_INACTIVE;
 619                 req->next = NULL;
 620         }
 621         memset(ro_bits,0,sizeof(ro_bits));
 622 #ifdef CONFIG_BLK_DEV_RAM
 623         rd_init();
 624 #endif
 625 #ifdef CONFIG_BLK_DEV_LOOP
 626         loop_init();
 627 #endif
 628 #ifdef CONFIG_BLK_DEV_IDE
 629         ide_init();             /* this MUST preceed hd_init */
 630 #endif
 631 #ifdef CONFIG_BLK_DEV_HD
 632         hd_init();
 633 #endif
 634 #ifdef CONFIG_BLK_DEV_XD
 635         xd_init();
 636 #endif
 637 #ifdef CONFIG_BLK_DEV_FD
 638         floppy_init();
 639 #else
 640         outb_p(0xc, 0x3f2);
 641 #endif
 642 #ifdef CONFIG_CDI_INIT
 643         cdi_init();
 644 #endif CONFIG_CDI_INIT
 645 #ifdef CONFIG_CDU31A
 646         cdu31a_init();
 647 #endif CONFIG_CDU31A
 648 #ifdef CONFIG_MCD
 649         mcd_init();
 650 #endif CONFIG_MCD
 651 #ifdef CONFIG_MCDX
 652         mcdx_init();
 653 #endif CONFIG_MCDX
 654 #ifdef CONFIG_SBPCD
 655         sbpcd_init();
 656 #endif CONFIG_SBPCD
 657 #ifdef CONFIG_AZTCD
 658         aztcd_init();
 659 #endif CONFIG_AZTCD
 660 #ifdef CONFIG_CDU535
 661         sony535_init();
 662 #endif CONFIG_CDU535
 663 #ifdef CONFIG_GSCD
 664         gscd_init();
 665 #endif CONFIG_GSCD
 666 #ifdef CONFIG_CM206
 667         cm206_init();
 668 #endif
 669 #ifdef CONFIG_OPTCD
 670         optcd_init();
 671 #endif CONFIG_OPTCD
 672 #ifdef CONFIG_SJCD
 673         sjcd_init();
 674 #endif CONFIG_SJCD
 675 #ifdef CONFIG_BLK_DEV_MD
 676         md_init();
 677 #endif CONFIG_BLK_DEV_MD
 678         return 0;
 679 }

/* [previous][next][first][last][top][bottom][index][help] */