root/drivers/block/ll_rw_blk.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. plug_device
  2. unplug_device
  3. get_request
  4. __get_request_wait
  5. get_request_wait
  6. is_read_only
  7. set_device_ro
  8. drive_stat_acct
  9. add_request
  10. make_request
  11. ll_rw_page
  12. ll_rw_block
  13. ll_rw_swap_file
  14. blk_dev_init

   1 /*
   2  *  linux/drivers/block/ll_rw_blk.c
   3  *
   4  * Copyright (C) 1991, 1992 Linus Torvalds
   5  * Copyright (C) 1994,      Karl Keyte: Added support for disk statistics
   6  */
   7 
   8 /*
   9  * This handles all read/write requests to block devices
  10  */
  11 #include <linux/sched.h>
  12 #include <linux/kernel.h>
  13 #include <linux/kernel_stat.h>
  14 #include <linux/errno.h>
  15 #include <linux/string.h>
  16 #include <linux/config.h>
  17 #include <linux/locks.h>
  18 #include <linux/mm.h>
  19 
  20 #include <asm/system.h>
  21 #include <asm/io.h>
  22 #include <linux/blk.h>
  23 
  24 /*
  25  * The request-struct contains all necessary data
  26  * to load a nr of sectors into memory
  27  */
  28 static struct request all_requests[NR_REQUEST];
  29 
  30 /*
  31  * used to wait on when there are no free requests
  32  */
  33 struct wait_queue * wait_for_request = NULL;
  34 
  35 /* This specifies how many sectors to read ahead on the disk.  */
  36 
  37 int read_ahead[MAX_BLKDEV] = {0, };
  38 
  39 /* blk_dev_struct is:
  40  *      do_request-address
  41  *      next-request
  42  */
  43 struct blk_dev_struct blk_dev[MAX_BLKDEV] = {
  44         { NULL, NULL },         /* 0 no_dev */
  45         { NULL, NULL },         /* 1 dev mem */
  46         { NULL, NULL },         /* 2 dev fd */
  47         { NULL, NULL },         /* 3 dev ide0 or hd */
  48         { NULL, NULL },         /* 4 dev ttyx */
  49         { NULL, NULL },         /* 5 dev tty */
  50         { NULL, NULL },         /* 6 dev lp */
  51         { NULL, NULL },         /* 7 dev pipes */
  52         { NULL, NULL },         /* 8 dev sd */
  53         { NULL, NULL },         /* 9 dev st */
  54         { NULL, NULL },         /* 10 */
  55         { NULL, NULL },         /* 11 */
  56         { NULL, NULL },         /* 12 */
  57         { NULL, NULL },         /* 13 */
  58         { NULL, NULL },         /* 14 */
  59         { NULL, NULL },         /* 15 */
  60         { NULL, NULL },         /* 16 */
  61         { NULL, NULL },         /* 17 */
  62         { NULL, NULL },         /* 18 */
  63         { NULL, NULL },         /* 19 */
  64         { NULL, NULL },         /* 20 */
  65         { NULL, NULL },         /* 21 */
  66         { NULL, NULL }          /* 22 dev ide1 */
  67 };
  68 
  69 /*
  70  * blk_size contains the size of all block-devices in units of 1024 byte
  71  * sectors:
  72  *
  73  * blk_size[MAJOR][MINOR]
  74  *
  75  * if (!blk_size[MAJOR]) then no minor size checking is done.
  76  */
  77 int * blk_size[MAX_BLKDEV] = { NULL, NULL, };
  78 
  79 /*
  80  * blksize_size contains the size of all block-devices:
  81  *
  82  * blksize_size[MAJOR][MINOR]
  83  *
  84  * if (!blksize_size[MAJOR]) then 1024 bytes is assumed.
  85  */
  86 int * blksize_size[MAX_BLKDEV] = { NULL, NULL, };
  87 
  88 /*
  89  * hardsect_size contains the size of the hardware sector of a device.
  90  *
  91  * hardsect_size[MAJOR][MINOR]
  92  *
  93  * if (!hardsect_size[MAJOR])
  94  *              then 512 bytes is assumed.
  95  * else
  96  *              sector_size is hardsect_size[MAJOR][MINOR]
  97  * This is currently set by some scsi device and read by the msdos fs driver
  98  * This might be a some uses later.
  99  */
 100 int * hardsect_size[MAX_BLKDEV] = { NULL, NULL, };
 101 
 102 /*
 103  * "plug" the device if there are no outstanding requests: this will
 104  * force the transfer to start only after we have put all the requests
 105  * on the list.
 106  */
 107 static inline void plug_device(struct blk_dev_struct * dev, struct request * plug)
     /* [previous][next][first][last][top][bottom][index][help] */
 108 {
 109         unsigned long flags;
 110 
 111         plug->rq_status = RQ_INACTIVE;
 112         plug->cmd = -1;
 113         plug->next = NULL;
 114         save_flags(flags);
 115         cli();
 116         if (!dev->current_request)
 117                 dev->current_request = plug;
 118         restore_flags(flags);
 119 }
 120 
 121 /*
 122  * remove the plug and let it rip..
 123  */
 124 static inline void unplug_device(struct blk_dev_struct * dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 125 {
 126         struct request * req;
 127         unsigned long flags;
 128 
 129         save_flags(flags);
 130         cli();
 131         req = dev->current_request;
 132         if (req && req->rq_status == RQ_INACTIVE && req->cmd == -1) {
 133                 dev->current_request = req->next;
 134                 (dev->request_fn)();
 135         }
 136         restore_flags(flags);
 137 }
 138 
 139 /*
 140  * look for a free request in the first N entries.
 141  * NOTE: interrupts must be disabled on the way in, and will still
 142  *       be disabled on the way out.
 143  */
 144 static inline struct request * get_request(int n, kdev_t dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 145 {
 146         static struct request *prev_found = NULL, *prev_limit = NULL;
 147         register struct request *req, *limit;
 148 
 149         if (n <= 0)
 150                 panic("get_request(%d): impossible!\n", n);
 151 
 152         limit = all_requests + n;
 153         if (limit != prev_limit) {
 154                 prev_limit = limit;
 155                 prev_found = all_requests;
 156         }
 157         req = prev_found;
 158         for (;;) {
 159                 req = ((req > all_requests) ? req : limit) - 1;
 160                 if (req->rq_status == RQ_INACTIVE)
 161                         break;
 162                 if (req == prev_found)
 163                         return NULL;
 164         }
 165         prev_found = req;
 166         req->rq_status = RQ_ACTIVE;
 167         req->rq_dev = dev;
 168         return req;
 169 }
 170 
 171 /*
 172  * wait until a free request in the first N entries is available.
 173  */
 174 static struct request * __get_request_wait(int n, kdev_t dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 175 {
 176         register struct request *req;
 177         struct wait_queue wait = { current, NULL };
 178 
 179         add_wait_queue(&wait_for_request, &wait);
 180         for (;;) {
 181                 unplug_device(MAJOR(dev)+blk_dev);
 182                 current->state = TASK_UNINTERRUPTIBLE;
 183                 cli();
 184                 req = get_request(n, dev);
 185                 sti();
 186                 if (req)
 187                         break;
 188                 schedule();
 189         }
 190         remove_wait_queue(&wait_for_request, &wait);
 191         current->state = TASK_RUNNING;
 192         return req;
 193 }
 194 
 195 static inline struct request * get_request_wait(int n, kdev_t dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 196 {
 197         register struct request *req;
 198 
 199         cli();
 200         req = get_request(n, dev);
 201         sti();
 202         if (req)
 203                 return req;
 204         return __get_request_wait(n, dev);
 205 }
 206 
 207 /* RO fail safe mechanism */
 208 
 209 static long ro_bits[MAX_BLKDEV][8];
 210 
 211 int is_read_only(kdev_t dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 212 {
 213         int minor,major;
 214 
 215         major = MAJOR(dev);
 216         minor = MINOR(dev);
 217         if (major < 0 || major >= MAX_BLKDEV) return 0;
 218         return ro_bits[major][minor >> 5] & (1 << (minor & 31));
 219 }
 220 
 221 void set_device_ro(kdev_t dev,int flag)
     /* [previous][next][first][last][top][bottom][index][help] */
 222 {
 223         int minor,major;
 224 
 225         major = MAJOR(dev);
 226         minor = MINOR(dev);
 227         if (major < 0 || major >= MAX_BLKDEV) return;
 228         if (flag) ro_bits[major][minor >> 5] |= 1 << (minor & 31);
 229         else ro_bits[major][minor >> 5] &= ~(1 << (minor & 31));
 230 }
 231 
 232 static inline void drive_stat_acct(int cmd, unsigned long nr_sectors, short disk_index)
     /* [previous][next][first][last][top][bottom][index][help] */
 233 {
 234         kstat.dk_drive[disk_index]++;
 235         if (cmd == READ) {
 236                 kstat.dk_drive_rio[disk_index]++;
 237                 kstat.dk_drive_rblk[disk_index] += nr_sectors;
 238         }
 239         else if (cmd == WRITE) {
 240                 kstat.dk_drive_wio[disk_index]++;
 241                 kstat.dk_drive_wblk[disk_index] += nr_sectors;
 242         } else
 243                 printk("drive_stat_acct: cmd not R/W?\n");
 244 }
 245 
 246 /*
 247  * add-request adds a request to the linked list.
 248  * It disables interrupts so that it can muck with the
 249  * request-lists in peace.
 250  *
 251  * By this point, req->cmd is always either READ/WRITE, never READA/WRITEA,
 252  * which is important for drive_stat_acct() above.
 253  */
 254 static void add_request(struct blk_dev_struct * dev, struct request * req)
     /* [previous][next][first][last][top][bottom][index][help] */
 255 {
 256         struct request * tmp;
 257         short            disk_index;
 258 
 259         switch (MAJOR(req->rq_dev)) {
 260                 case SCSI_DISK_MAJOR:
 261                         disk_index = (MINOR(req->rq_dev) & 0x0070) >> 4;
 262                         if (disk_index < 4)
 263                                 drive_stat_acct(req->cmd, req->nr_sectors, disk_index);
 264                         break;
 265                 case IDE0_MAJOR:        /* same as HD_MAJOR */
 266                 case XT_DISK_MAJOR:
 267                         disk_index = (MINOR(req->rq_dev) & 0x0040) >> 6;
 268                         drive_stat_acct(req->cmd, req->nr_sectors, disk_index);
 269                         break;
 270                 case IDE1_MAJOR:
 271                         disk_index = ((MINOR(req->rq_dev) & 0x0040) >> 6) + 2;
 272                         drive_stat_acct(req->cmd, req->nr_sectors, disk_index);
 273                 default:
 274                         break;
 275         }
 276 
 277         req->next = NULL;
 278         cli();
 279         if (req->bh)
 280                 mark_buffer_clean(req->bh);
 281         if (!(tmp = dev->current_request)) {
 282                 dev->current_request = req;
 283                 (dev->request_fn)();
 284                 sti();
 285                 return;
 286         }
 287         for ( ; tmp->next ; tmp = tmp->next) {
 288                 if ((IN_ORDER(tmp,req) ||
 289                     !IN_ORDER(tmp,tmp->next)) &&
 290                     IN_ORDER(req,tmp->next))
 291                         break;
 292         }
 293         req->next = tmp->next;
 294         tmp->next = req;
 295 
 296 /* for SCSI devices, call request_fn unconditionally */
 297         if (scsi_major(MAJOR(req->rq_dev)))
 298                 (dev->request_fn)();
 299 
 300         sti();
 301 }
 302 
 303 static void make_request(int major,int rw, struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 304 {
 305         unsigned int sector, count;
 306         struct request * req;
 307         int rw_ahead, max_req;
 308 
 309         count = bh->b_size >> 9;
 310         sector = bh->b_blocknr * count;
 311         if (blk_size[major])
 312                 if (blk_size[major][MINOR(bh->b_dev)] < (sector + count)>>1) {
 313                         bh->b_state = 0;
 314                         printk("attempt to access beyond end of device\n");
 315                         return;
 316                 }
 317         /* Uhhuh.. Nasty dead-lock possible here.. */
 318         if (buffer_locked(bh))
 319                 return;
 320         /* Maybe the above fixes it, and maybe it doesn't boot. Life is interesting */
 321         lock_buffer(bh);
 322 
 323         rw_ahead = 0;   /* normal case; gets changed below for READA/WRITEA */
 324         switch (rw) {
 325                 case READA:
 326                         rw_ahead = 1;
 327                         rw = READ;      /* drop into READ */
 328                 case READ:
 329                         if (buffer_uptodate(bh)) {
 330                                 unlock_buffer(bh); /* Hmmph! Already have it */
 331                                 return;
 332                         }
 333                         kstat.pgpgin++;
 334                         max_req = NR_REQUEST;   /* reads take precedence */
 335                         break;
 336                 case WRITEA:
 337                         rw_ahead = 1;
 338                         rw = WRITE;     /* drop into WRITE */
 339                 case WRITE:
 340                         if (!buffer_dirty(bh)) {
 341                                 unlock_buffer(bh); /* Hmmph! Nothing to write */
 342                                 return;
 343                         }
 344                         /* We don't allow the write-requests to fill up the
 345                          * queue completely:  we want some room for reads,
 346                          * as they take precedence. The last third of the
 347                          * requests are only for reads.
 348                          */
 349                         kstat.pgpgout++;
 350                         max_req = (NR_REQUEST * 2) / 3;
 351                         break;
 352                 default:
 353                         printk("make_request: bad block dev cmd, must be R/W/RA/WA\n");
 354                         unlock_buffer(bh);
 355                         return;
 356         }
 357 
 358 /* look for a free request. */
 359         cli();
 360 
 361 /* The scsi disk and cdrom drivers completely remove the request
 362  * from the queue when they start processing an entry.  For this reason
 363  * it is safe to continue to add links to the top entry for those devices.
 364  */
 365         if ((   major == IDE0_MAJOR     /* same as HD_MAJOR */
 366              || major == IDE1_MAJOR
 367              || major == FLOPPY_MAJOR
 368              || major == SCSI_DISK_MAJOR
 369              || major == SCSI_CDROM_MAJOR
 370              || major == IDE2_MAJOR
 371              || major == IDE3_MAJOR)
 372             && (req = blk_dev[major].current_request))
 373         {
 374                 if (major != SCSI_DISK_MAJOR && major != SCSI_CDROM_MAJOR)
 375                         req = req->next;
 376                 while (req) {
 377                         if (req->rq_dev == bh->b_dev &&
 378                             !req->sem &&
 379                             req->cmd == rw &&
 380                             req->sector + req->nr_sectors == sector &&
 381                             req->nr_sectors < 244)
 382                         {
 383                                 req->bhtail->b_reqnext = bh;
 384                                 req->bhtail = bh;
 385                                 req->nr_sectors += count;
 386                                 mark_buffer_clean(bh);
 387                                 sti();
 388                                 return;
 389                         }
 390 
 391                         if (req->rq_dev == bh->b_dev &&
 392                             !req->sem &&
 393                             req->cmd == rw &&
 394                             req->sector - count == sector &&
 395                             req->nr_sectors < 244)
 396                         {
 397                                 req->nr_sectors += count;
 398                                 bh->b_reqnext = req->bh;
 399                                 req->buffer = bh->b_data;
 400                                 req->current_nr_sectors = count;
 401                                 req->sector = sector;
 402                                 mark_buffer_clean(bh);
 403                                 req->bh = bh;
 404                                 sti();
 405                                 return;
 406                         }    
 407 
 408                         req = req->next;
 409                 }
 410         }
 411 
 412 /* find an unused request. */
 413         req = get_request(max_req, bh->b_dev);
 414         sti();
 415 
 416 /* if no request available: if rw_ahead, forget it; otherwise try again blocking.. */
 417         if (!req) {
 418                 if (rw_ahead) {
 419                         unlock_buffer(bh);
 420                         return;
 421                 }
 422                 req = __get_request_wait(max_req, bh->b_dev);
 423         }
 424 
 425 /* fill up the request-info, and add it to the queue */
 426         req->cmd = rw;
 427         req->errors = 0;
 428         req->sector = sector;
 429         req->nr_sectors = count;
 430         req->current_nr_sectors = count;
 431         req->buffer = bh->b_data;
 432         req->sem = NULL;
 433         req->bh = bh;
 434         req->bhtail = bh;
 435         req->next = NULL;
 436         add_request(major+blk_dev,req);
 437 }
 438 
 439 /*
 440  * Swap partitions are now read via brw_page.  ll_rw_page is an
 441  * asynchronous function now --- we must call wait_on_page afterwards
 442  * if synchronous IO is required.  
 443  */
 444 void ll_rw_page(int rw, kdev_t dev, unsigned long page, char * buffer)
     /* [previous][next][first][last][top][bottom][index][help] */
 445 {
 446         unsigned int major = MAJOR(dev);
 447         int block = page;
 448         
 449         if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
 450                 printk("Trying to read nonexistent block-device %s (%ld)\n",
 451                        kdevname(dev), page);
 452                 return;
 453         }
 454         switch (rw) {
 455                 case READ:
 456                         break;
 457                 case WRITE:
 458                         if (is_read_only(dev)) {
 459                                 printk("Can't page to read-only device %s\n",
 460                                         kdevname(dev));
 461                                 return;
 462                         }
 463                         break;
 464                 default:
 465                         panic("ll_rw_page: bad block dev cmd, must be R/W");
 466         }
 467         if (mem_map[MAP_NR(buffer)].locked)
 468                 panic ("ll_rw_page: page already locked");
 469         mem_map[MAP_NR(buffer)].locked = 1;
 470         brw_page(rw, (unsigned long) buffer, dev, &block, PAGE_SIZE, 0);
 471 }
 472 
 473 /* This function can be used to request a number of buffers from a block
 474    device. Currently the only restriction is that all buffers must belong to
 475    the same device */
 476 
 477 void ll_rw_block(int rw, int nr, struct buffer_head * bh[])
     /* [previous][next][first][last][top][bottom][index][help] */
 478 {
 479         unsigned int major;
 480         struct request plug;
 481         int correct_size;
 482         struct blk_dev_struct * dev;
 483         int i;
 484 
 485         /* Make sure that the first block contains something reasonable */
 486         while (!*bh) {
 487                 bh++;
 488                 if (--nr <= 0)
 489                         return;
 490         };
 491 
 492         dev = NULL;
 493         if ((major = MAJOR(bh[0]->b_dev)) < MAX_BLKDEV)
 494                 dev = blk_dev + major;
 495         if (!dev || !dev->request_fn) {
 496                 printk(
 497         "ll_rw_block: Trying to read nonexistent block-device %s (%ld)\n",
 498                 kdevname(bh[0]->b_dev), bh[0]->b_blocknr);
 499                 goto sorry;
 500         }
 501 
 502         /* Determine correct block size for this device.  */
 503         correct_size = BLOCK_SIZE;
 504         if (blksize_size[major]) {
 505                 i = blksize_size[major][MINOR(bh[0]->b_dev)];
 506                 if (i)
 507                         correct_size = i;
 508         }
 509 
 510         /* Verify requested block sizes.  */
 511         for (i = 0; i < nr; i++) {
 512                 if (bh[i] && bh[i]->b_size != correct_size) {
 513                         printk(
 514                         "ll_rw_block: only %d-char blocks implemented (%lu)\n",
 515                                correct_size, bh[i]->b_size);
 516                         goto sorry;
 517                 }
 518         }
 519 
 520         if ((rw == WRITE || rw == WRITEA) && is_read_only(bh[0]->b_dev)) {
 521                 printk("Can't write to read-only device %s\n",
 522                        kdevname(bh[0]->b_dev));
 523                 goto sorry;
 524         }
 525 
 526         /* If there are no pending requests for this device, then we insert
 527            a dummy request for that device.  This will prevent the request
 528            from starting until we have shoved all of the blocks into the
 529            queue, and then we let it rip.  */
 530 
 531         if (nr > 1)
 532                 plug_device(dev, &plug);
 533         for (i = 0; i < nr; i++) {
 534                 if (bh[i]) {
 535                         set_bit(BH_Req, &bh[i]->b_state);
 536                         make_request(major, rw, bh[i]);
 537                 }
 538         }
 539         unplug_device(dev);
 540         return;
 541 
 542       sorry:
 543         for (i = 0; i < nr; i++) {
 544                 if (bh[i]) {
 545                         clear_bit(BH_Dirty, &bh[i]->b_state);
 546                         clear_bit(BH_Uptodate, &bh[i]->b_state);
 547                 }
 548         }
 549         return;
 550 }
 551 
 552 void ll_rw_swap_file(int rw, kdev_t dev, unsigned int *b, int nb, char *buf)
     /* [previous][next][first][last][top][bottom][index][help] */
 553 {
 554         int i, j;
 555         int buffersize;
 556         struct request * req[8];
 557         unsigned int major = MAJOR(dev);
 558         struct semaphore sem = MUTEX_LOCKED;
 559 
 560         if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
 561                 printk("ll_rw_swap_file: trying to swap nonexistent block-device\n");
 562                 return;
 563         }
 564         switch (rw) {
 565                 case READ:
 566                         break;
 567                 case WRITE:
 568                         if (is_read_only(dev)) {
 569                                 printk("Can't swap to read-only device %s\n",
 570                                         kdevname(dev));
 571                                 return;
 572                         }
 573                         break;
 574                 default:
 575                         panic("ll_rw_swap: bad block dev cmd, must be R/W");
 576         }
 577         buffersize = PAGE_SIZE / nb;
 578 
 579         for (j=0, i=0; i<nb;)
 580         {
 581                 for (; j < 8 && i < nb; j++, i++, buf += buffersize)
 582                 {
 583                         if (j == 0) {
 584                                 req[j] = get_request_wait(NR_REQUEST, dev);
 585                         } else {
 586                                 cli();
 587                                 req[j] = get_request(NR_REQUEST, dev);
 588                                 sti();
 589                                 if (req[j] == NULL)
 590                                         break;
 591                         }
 592                         req[j]->cmd = rw;
 593                         req[j]->errors = 0;
 594                         req[j]->sector = (b[i] * buffersize) >> 9;
 595                         req[j]->nr_sectors = buffersize >> 9;
 596                         req[j]->current_nr_sectors = buffersize >> 9;
 597                         req[j]->buffer = buf;
 598                         req[j]->sem = &sem;
 599                         req[j]->bh = NULL;
 600                         req[j]->next = NULL;
 601                         add_request(major+blk_dev,req[j]);
 602                 }
 603                 while (j > 0) {
 604                         j--;
 605                         down(&sem);
 606                 }
 607         }
 608 }
 609 
 610 int blk_dev_init(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 611 {
 612         struct request * req;
 613 
 614         req = all_requests + NR_REQUEST;
 615         while (--req >= all_requests) {
 616                 req->rq_status = RQ_INACTIVE;
 617                 req->next = NULL;
 618         }
 619         memset(ro_bits,0,sizeof(ro_bits));
 620 #ifdef CONFIG_BLK_DEV_RAM
 621         rd_init();
 622 #endif
 623 #ifdef CONFIG_BLK_DEV_IDE
 624         ide_init();             /* this MUST preceed hd_init */
 625 #endif
 626 #ifdef CONFIG_BLK_DEV_HD
 627         hd_init();
 628 #endif
 629 #ifdef CONFIG_BLK_DEV_XD
 630         xd_init();
 631 #endif
 632 #ifdef CONFIG_BLK_DEV_FD
 633         floppy_init();
 634 #else
 635         outb_p(0xc, 0x3f2);
 636 #endif
 637 #ifdef CONFIG_CDU31A
 638         cdu31a_init();
 639 #endif CONFIG_CDU31A
 640 #ifdef CONFIG_MCD
 641         mcd_init();
 642 #endif CONFIG_MCD
 643 #ifdef CONFIG_MCDX
 644         mcdx_init();
 645 #endif CONFIG_MCDX
 646 #ifdef CONFIG_SBPCD
 647         sbpcd_init();
 648 #endif CONFIG_SBPCD
 649 #ifdef CONFIG_AZTCD
 650         aztcd_init();
 651 #endif CONFIG_AZTCD
 652 #ifdef CONFIG_CDU535
 653         sony535_init();
 654 #endif CONFIG_CDU535
 655 #ifdef CONFIG_GSCD
 656         gscd_init();
 657 #endif CONFIG_GSCD
 658 #ifdef CONFIG_CM206
 659         cm206_init();
 660 #endif
 661 #ifdef CONFIG_OPTCD
 662         optcd_init();
 663 #endif CONFIG_OPTCD
 664 #ifdef CONFIG_SJCD
 665         sjcd_init();
 666 #endif CONFIG_SJCD
 667         return 0;
 668 }

/* [previous][next][first][last][top][bottom][index][help] */