root/drivers/block/ll_rw_blk.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. plug_device
  2. unplug_device
  3. get_request
  4. __get_request_wait
  5. get_request_wait
  6. is_read_only
  7. set_device_ro
  8. drive_stat_acct
  9. add_request
  10. make_request
  11. ll_rw_page
  12. ll_rw_block
  13. ll_rw_swap_file
  14. blk_dev_init

   1 /*
   2  *  linux/drivers/block/ll_rw_blk.c
   3  *
   4  * Copyright (C) 1991, 1992 Linus Torvalds
   5  * Copyright (C) 1994,      Karl Keyte: Added support for disk statistics
   6  */
   7 
   8 /*
   9  * This handles all read/write requests to block devices
  10  */
  11 #include <linux/sched.h>
  12 #include <linux/kernel.h>
  13 #include <linux/kernel_stat.h>
  14 #include <linux/errno.h>
  15 #include <linux/string.h>
  16 #include <linux/config.h>
  17 #include <linux/locks.h>
  18 #include <linux/mm.h>
  19 
  20 #include <asm/system.h>
  21 #include <asm/io.h>
  22 #include <linux/blk.h>
  23 
  24 /*
  25  * The request-struct contains all necessary data
  26  * to load a nr of sectors into memory
  27  */
  28 static struct request all_requests[NR_REQUEST];
  29 
  30 /*
  31  * used to wait on when there are no free requests
  32  */
  33 struct wait_queue * wait_for_request = NULL;
  34 
  35 /* This specifies how many sectors to read ahead on the disk.  */
  36 
  37 int read_ahead[MAX_BLKDEV] = {0, };
  38 
  39 /* blk_dev_struct is:
  40  *      do_request-address
  41  *      next-request
  42  */
  43 struct blk_dev_struct blk_dev[MAX_BLKDEV] = {
  44         { NULL, NULL },         /* 0 no_dev */
  45         { NULL, NULL },         /* 1 dev mem */
  46         { NULL, NULL },         /* 2 dev fd */
  47         { NULL, NULL },         /* 3 dev ide0 or hd */
  48         { NULL, NULL },         /* 4 dev ttyx */
  49         { NULL, NULL },         /* 5 dev tty */
  50         { NULL, NULL },         /* 6 dev lp */
  51         { NULL, NULL },         /* 7 dev pipes */
  52         { NULL, NULL },         /* 8 dev sd */
  53         { NULL, NULL },         /* 9 dev st */
  54         { NULL, NULL },         /* 10 */
  55         { NULL, NULL },         /* 11 */
  56         { NULL, NULL },         /* 12 */
  57         { NULL, NULL },         /* 13 */
  58         { NULL, NULL },         /* 14 */
  59         { NULL, NULL },         /* 15 */
  60         { NULL, NULL },         /* 16 */
  61         { NULL, NULL },         /* 17 */
  62         { NULL, NULL },         /* 18 */
  63         { NULL, NULL },         /* 19 */
  64         { NULL, NULL },         /* 20 */
  65         { NULL, NULL },         /* 21 */
  66         { NULL, NULL }          /* 22 dev ide1 */
  67 };
  68 
  69 /*
  70  * blk_size contains the size of all block-devices in units of 1024 byte
  71  * sectors:
  72  *
  73  * blk_size[MAJOR][MINOR]
  74  *
  75  * if (!blk_size[MAJOR]) then no minor size checking is done.
  76  */
  77 int * blk_size[MAX_BLKDEV] = { NULL, NULL, };
  78 
  79 /*
  80  * blksize_size contains the size of all block-devices:
  81  *
  82  * blksize_size[MAJOR][MINOR]
  83  *
  84  * if (!blksize_size[MAJOR]) then 1024 bytes is assumed.
  85  */
  86 int * blksize_size[MAX_BLKDEV] = { NULL, NULL, };
  87 
  88 /*
  89  * hardsect_size contains the size of the hardware sector of a device.
  90  *
  91  * hardsect_size[MAJOR][MINOR]
  92  *
  93  * if (!hardsect_size[MAJOR])
  94  *              then 512 bytes is assumed.
  95  * else
  96  *              sector_size is hardsect_size[MAJOR][MINOR]
  97  * This is currently set by some scsi device and read by the msdos fs driver
  98  * This might be a some uses later.
  99  */
 100 int * hardsect_size[MAX_BLKDEV] = { NULL, NULL, };
 101 
 102 /*
 103  * "plug" the device if there are no outstanding requests: this will
 104  * force the transfer to start only after we have put all the requests
 105  * on the list.
 106  */
 107 static inline void plug_device(struct blk_dev_struct * dev, struct request * plug)
     /* [previous][next][first][last][top][bottom][index][help] */
 108 {
 109         unsigned long flags;
 110 
 111         plug->rq_status = RQ_INACTIVE;
 112         plug->cmd = -1;
 113         plug->next = NULL;
 114         save_flags(flags);
 115         cli();
 116         if (!dev->current_request)
 117                 dev->current_request = plug;
 118         restore_flags(flags);
 119 }
 120 
 121 /*
 122  * remove the plug and let it rip..
 123  */
 124 static inline void unplug_device(struct blk_dev_struct * dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 125 {
 126         struct request * req;
 127         unsigned long flags;
 128 
 129         save_flags(flags);
 130         cli();
 131         req = dev->current_request;
 132         if (req && req->rq_status == RQ_INACTIVE && req->cmd == -1) {
 133                 dev->current_request = req->next;
 134                 (dev->request_fn)();
 135         }
 136         restore_flags(flags);
 137 }
 138 
 139 /*
 140  * look for a free request in the first N entries.
 141  * NOTE: interrupts must be disabled on the way in, and will still
 142  *       be disabled on the way out.
 143  */
 144 static inline struct request * get_request(int n, kdev_t dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 145 {
 146         static struct request *prev_found = NULL, *prev_limit = NULL;
 147         register struct request *req, *limit;
 148 
 149         if (n <= 0)
 150                 panic("get_request(%d): impossible!\n", n);
 151 
 152         limit = all_requests + n;
 153         if (limit != prev_limit) {
 154                 prev_limit = limit;
 155                 prev_found = all_requests;
 156         }
 157         req = prev_found;
 158         for (;;) {
 159                 req = ((req > all_requests) ? req : limit) - 1;
 160                 if (req->rq_status == RQ_INACTIVE)
 161                         break;
 162                 if (req == prev_found)
 163                         return NULL;
 164         }
 165         prev_found = req;
 166         req->rq_status = RQ_ACTIVE;
 167         req->rq_dev = dev;
 168         return req;
 169 }
 170 
 171 /*
 172  * wait until a free request in the first N entries is available.
 173  */
 174 static struct request * __get_request_wait(int n, kdev_t dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 175 {
 176         register struct request *req;
 177         struct wait_queue wait = { current, NULL };
 178 
 179         add_wait_queue(&wait_for_request, &wait);
 180         for (;;) {
 181                 unplug_device(MAJOR(dev)+blk_dev);
 182                 current->state = TASK_UNINTERRUPTIBLE;
 183                 cli();
 184                 req = get_request(n, dev);
 185                 sti();
 186                 if (req)
 187                         break;
 188                 schedule();
 189         }
 190         remove_wait_queue(&wait_for_request, &wait);
 191         current->state = TASK_RUNNING;
 192         return req;
 193 }
 194 
 195 static inline struct request * get_request_wait(int n, kdev_t dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 196 {
 197         register struct request *req;
 198 
 199         cli();
 200         req = get_request(n, dev);
 201         sti();
 202         if (req)
 203                 return req;
 204         return __get_request_wait(n, dev);
 205 }
 206 
 207 /* RO fail safe mechanism */
 208 
 209 static long ro_bits[MAX_BLKDEV][8];
 210 
 211 int is_read_only(kdev_t dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 212 {
 213         int minor,major;
 214 
 215         major = MAJOR(dev);
 216         minor = MINOR(dev);
 217         if (major < 0 || major >= MAX_BLKDEV) return 0;
 218         return ro_bits[major][minor >> 5] & (1 << (minor & 31));
 219 }
 220 
 221 void set_device_ro(kdev_t dev,int flag)
     /* [previous][next][first][last][top][bottom][index][help] */
 222 {
 223         int minor,major;
 224 
 225         major = MAJOR(dev);
 226         minor = MINOR(dev);
 227         if (major < 0 || major >= MAX_BLKDEV) return;
 228         if (flag) ro_bits[major][minor >> 5] |= 1 << (minor & 31);
 229         else ro_bits[major][minor >> 5] &= ~(1 << (minor & 31));
 230 }
 231 
 232 static inline void drive_stat_acct(int cmd, unsigned long nr_sectors, short disk_index)
     /* [previous][next][first][last][top][bottom][index][help] */
 233 {
 234         kstat.dk_drive[disk_index]++;
 235         if (cmd == READ) {
 236                 kstat.dk_drive_rio[disk_index]++;
 237                 kstat.dk_drive_rblk[disk_index] += nr_sectors;
 238         }
 239         else if (cmd == WRITE) {
 240                 kstat.dk_drive_wio[disk_index]++;
 241                 kstat.dk_drive_wblk[disk_index] += nr_sectors;
 242         } else
 243                 printk("drive_stat_acct: cmd not R/W?\n");
 244 }
 245 
 246 /*
 247  * add-request adds a request to the linked list.
 248  * It disables interrupts so that it can muck with the
 249  * request-lists in peace.
 250  *
 251  * By this point, req->cmd is always either READ/WRITE, never READA/WRITEA,
 252  * which is important for drive_stat_acct() above.
 253  */
 254 static void add_request(struct blk_dev_struct * dev, struct request * req)
     /* [previous][next][first][last][top][bottom][index][help] */
 255 {
 256         struct request * tmp;
 257         short            disk_index;
 258 
 259         switch (MAJOR(req->rq_dev)) {
 260                 case SCSI_DISK_MAJOR:
 261                         disk_index = (MINOR(req->rq_dev) & 0x0070) >> 4;
 262                         if (disk_index < 4)
 263                                 drive_stat_acct(req->cmd, req->nr_sectors, disk_index);
 264                         break;
 265                 case IDE0_MAJOR:        /* same as HD_MAJOR */
 266                 case XT_DISK_MAJOR:
 267                         disk_index = (MINOR(req->rq_dev) & 0x0040) >> 6;
 268                         drive_stat_acct(req->cmd, req->nr_sectors, disk_index);
 269                         break;
 270                 case IDE1_MAJOR:
 271                         disk_index = ((MINOR(req->rq_dev) & 0x0040) >> 6) + 2;
 272                         drive_stat_acct(req->cmd, req->nr_sectors, disk_index);
 273                 default:
 274                         break;
 275         }
 276 
 277         req->next = NULL;
 278         cli();
 279         if (req->bh)
 280                 mark_buffer_clean(req->bh);
 281         if (!(tmp = dev->current_request)) {
 282                 dev->current_request = req;
 283                 (dev->request_fn)();
 284                 sti();
 285                 return;
 286         }
 287         for ( ; tmp->next ; tmp = tmp->next) {
 288                 if ((IN_ORDER(tmp,req) ||
 289                     !IN_ORDER(tmp,tmp->next)) &&
 290                     IN_ORDER(req,tmp->next))
 291                         break;
 292         }
 293         req->next = tmp->next;
 294         tmp->next = req;
 295 
 296 /* for SCSI devices, call request_fn unconditionally */
 297         if (scsi_major(MAJOR(req->rq_dev)))
 298                 (dev->request_fn)();
 299 
 300         sti();
 301 }
 302 
 303 static void make_request(int major,int rw, struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 304 {
 305         unsigned int sector, count;
 306         struct request * req;
 307         int rw_ahead, max_req;
 308 
 309         count = bh->b_size >> 9;
 310         sector = bh->b_blocknr * count;
 311         if (blk_size[major])
 312                 if (blk_size[major][MINOR(bh->b_dev)] < (sector + count)>>1) {
 313                         bh->b_state = 0;
 314                         printk("attempt to access beyond end of device\n");
 315                         return;
 316                 }
 317         /* Uhhuh.. Nasty dead-lock possible here.. */
 318         if (buffer_locked(bh))
 319                 return;
 320         /* Maybe the above fixes it, and maybe it doesn't boot. Life is interesting */
 321         lock_buffer(bh);
 322 
 323         rw_ahead = 0;   /* normal case; gets changed below for READA/WRITEA */
 324         switch (rw) {
 325                 case READA:
 326                         rw_ahead = 1;
 327                         rw = READ;      /* drop into READ */
 328                 case READ:
 329                         if (buffer_uptodate(bh)) {
 330                                 unlock_buffer(bh); /* Hmmph! Already have it */
 331                                 return;
 332                         }
 333                         kstat.pgpgin++;
 334                         max_req = NR_REQUEST;   /* reads take precedence */
 335                         break;
 336                 case WRITEA:
 337                         rw_ahead = 1;
 338                         rw = WRITE;     /* drop into WRITE */
 339                 case WRITE:
 340                         if (!buffer_dirty(bh)) {
 341                                 unlock_buffer(bh); /* Hmmph! Nothing to write */
 342                                 return;
 343                         }
 344                         /* We don't allow the write-requests to fill up the
 345                          * queue completely:  we want some room for reads,
 346                          * as they take precedence. The last third of the
 347                          * requests are only for reads.
 348                          */
 349                         kstat.pgpgout++;
 350                         max_req = (NR_REQUEST * 2) / 3;
 351                         break;
 352                 default:
 353                         printk("make_request: bad block dev cmd, must be R/W/RA/WA\n");
 354                         unlock_buffer(bh);
 355                         return;
 356         }
 357 
 358 /* look for a free request. */
 359         cli();
 360 
 361 /* The scsi disk and cdrom drivers completely remove the request
 362  * from the queue when they start processing an entry.  For this reason
 363  * it is safe to continue to add links to the top entry for those devices.
 364  */
 365         if ((   major == IDE0_MAJOR     /* same as HD_MAJOR */
 366              || major == IDE1_MAJOR
 367              || major == FLOPPY_MAJOR
 368              || major == SCSI_DISK_MAJOR
 369              || major == SCSI_CDROM_MAJOR
 370              || major == IDE2_MAJOR
 371              || major == IDE3_MAJOR)
 372             && (req = blk_dev[major].current_request))
 373         {
 374                 if (major != SCSI_DISK_MAJOR && major != SCSI_CDROM_MAJOR)
 375                         req = req->next;
 376                 while (req) {
 377                         if (req->rq_dev == bh->b_dev &&
 378                             !req->sem &&
 379                             req->cmd == rw &&
 380                             req->sector + req->nr_sectors == sector &&
 381                             req->nr_sectors < 244)
 382                         {
 383                                 req->bhtail->b_reqnext = bh;
 384                                 req->bhtail = bh;
 385                                 req->nr_sectors += count;
 386                                 mark_buffer_clean(bh);
 387                                 sti();
 388                                 return;
 389                         }
 390 
 391                         if (req->rq_dev == bh->b_dev &&
 392                             !req->sem &&
 393                             req->cmd == rw &&
 394                             req->sector - count == sector &&
 395                             req->nr_sectors < 244)
 396                         {
 397                                 req->nr_sectors += count;
 398                                 bh->b_reqnext = req->bh;
 399                                 req->buffer = bh->b_data;
 400                                 req->current_nr_sectors = count;
 401                                 req->sector = sector;
 402                                 mark_buffer_clean(bh);
 403                                 req->bh = bh;
 404                                 sti();
 405                                 return;
 406                         }    
 407 
 408                         req = req->next;
 409                 }
 410         }
 411 
 412 /* find an unused request. */
 413         req = get_request(max_req, bh->b_dev);
 414         sti();
 415 
 416 /* if no request available: if rw_ahead, forget it; otherwise try again blocking.. */
 417         if (!req) {
 418                 if (rw_ahead) {
 419                         unlock_buffer(bh);
 420                         return;
 421                 }
 422                 req = __get_request_wait(max_req, bh->b_dev);
 423         }
 424 
 425 /* fill up the request-info, and add it to the queue */
 426         req->cmd = rw;
 427         req->errors = 0;
 428         req->sector = sector;
 429         req->nr_sectors = count;
 430         req->current_nr_sectors = count;
 431         req->buffer = bh->b_data;
 432         req->sem = NULL;
 433         req->bh = bh;
 434         req->bhtail = bh;
 435         req->next = NULL;
 436         add_request(major+blk_dev,req);
 437 }
 438 
 439 void ll_rw_page(int rw, kdev_t dev, unsigned long page, char * buffer)
     /* [previous][next][first][last][top][bottom][index][help] */
 440 {
 441         struct request * req;
 442         unsigned int major = MAJOR(dev);
 443         unsigned long sector = page * (PAGE_SIZE / 512);
 444         struct semaphore sem = MUTEX_LOCKED;
 445 
 446         if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
 447                 printk("Trying to read nonexistent block-device %s (%ld)\n",
 448                        kdevname(dev), sector);
 449                 return;
 450         }
 451         switch (rw) {
 452                 case READ:
 453                         break;
 454                 case WRITE:
 455                         if (is_read_only(dev)) {
 456                                 printk("Can't page to read-only device %s\n",
 457                                         kdevname(dev));
 458                                 return;
 459                         }
 460                         break;
 461                 default:
 462                         panic("ll_rw_page: bad block dev cmd, must be R/W");
 463         }
 464         req = get_request_wait(NR_REQUEST, dev);
 465 /* fill up the request-info, and add it to the queue */
 466         req->cmd = rw;
 467         req->errors = 0;
 468         req->sector = sector;
 469         req->nr_sectors = PAGE_SIZE / 512;
 470         req->current_nr_sectors = PAGE_SIZE / 512;
 471         req->buffer = buffer;
 472         req->sem = &sem;
 473         req->bh = NULL;
 474         req->next = NULL;
 475         add_request(major+blk_dev,req);
 476         down(&sem);
 477 }
 478 
 479 /* This function can be used to request a number of buffers from a block
 480    device. Currently the only restriction is that all buffers must belong to
 481    the same device */
 482 
 483 void ll_rw_block(int rw, int nr, struct buffer_head * bh[])
     /* [previous][next][first][last][top][bottom][index][help] */
 484 {
 485         unsigned int major;
 486         struct request plug;
 487         int correct_size;
 488         struct blk_dev_struct * dev;
 489         int i;
 490 
 491         /* Make sure that the first block contains something reasonable */
 492         while (!*bh) {
 493                 bh++;
 494                 if (--nr <= 0)
 495                         return;
 496         };
 497 
 498         dev = NULL;
 499         if ((major = MAJOR(bh[0]->b_dev)) < MAX_BLKDEV)
 500                 dev = blk_dev + major;
 501         if (!dev || !dev->request_fn) {
 502                 printk(
 503         "ll_rw_block: Trying to read nonexistent block-device %s (%ld)\n",
 504                 kdevname(bh[0]->b_dev), bh[0]->b_blocknr);
 505                 goto sorry;
 506         }
 507 
 508         /* Determine correct block size for this device.  */
 509         correct_size = BLOCK_SIZE;
 510         if (blksize_size[major]) {
 511                 i = blksize_size[major][MINOR(bh[0]->b_dev)];
 512                 if (i)
 513                         correct_size = i;
 514         }
 515 
 516         /* Verify requested block sizes.  */
 517         for (i = 0; i < nr; i++) {
 518                 if (bh[i] && bh[i]->b_size != correct_size) {
 519                         printk(
 520                         "ll_rw_block: only %d-char blocks implemented (%lu)\n",
 521                                correct_size, bh[i]->b_size);
 522                         goto sorry;
 523                 }
 524         }
 525 
 526         if ((rw == WRITE || rw == WRITEA) && is_read_only(bh[0]->b_dev)) {
 527                 printk("Can't write to read-only device %s\n",
 528                        kdevname(bh[0]->b_dev));
 529                 goto sorry;
 530         }
 531 
 532         /* If there are no pending requests for this device, then we insert
 533            a dummy request for that device.  This will prevent the request
 534            from starting until we have shoved all of the blocks into the
 535            queue, and then we let it rip.  */
 536 
 537         if (nr > 1)
 538                 plug_device(dev, &plug);
 539         for (i = 0; i < nr; i++) {
 540                 if (bh[i]) {
 541                         set_bit(BH_Req, &bh[i]->b_state);
 542                         make_request(major, rw, bh[i]);
 543                 }
 544         }
 545         unplug_device(dev);
 546         return;
 547 
 548       sorry:
 549         for (i = 0; i < nr; i++) {
 550                 if (bh[i]) {
 551                         clear_bit(BH_Dirty, &bh[i]->b_state);
 552                         clear_bit(BH_Uptodate, &bh[i]->b_state);
 553                 }
 554         }
 555         return;
 556 }
 557 
 558 void ll_rw_swap_file(int rw, kdev_t dev, unsigned int *b, int nb, char *buf)
     /* [previous][next][first][last][top][bottom][index][help] */
 559 {
 560         int i, j;
 561         int buffersize;
 562         struct request * req[8];
 563         unsigned int major = MAJOR(dev);
 564         struct semaphore sem = MUTEX_LOCKED;
 565 
 566         if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
 567                 printk("ll_rw_swap_file: trying to swap nonexistent block-device\n");
 568                 return;
 569         }
 570         switch (rw) {
 571                 case READ:
 572                         break;
 573                 case WRITE:
 574                         if (is_read_only(dev)) {
 575                                 printk("Can't swap to read-only device %s\n",
 576                                         kdevname(dev));
 577                                 return;
 578                         }
 579                         break;
 580                 default:
 581                         panic("ll_rw_swap: bad block dev cmd, must be R/W");
 582         }
 583         buffersize = PAGE_SIZE / nb;
 584 
 585         for (j=0, i=0; i<nb;)
 586         {
 587                 for (; j < 8 && i < nb; j++, i++, buf += buffersize)
 588                 {
 589                         if (j == 0) {
 590                                 req[j] = get_request_wait(NR_REQUEST, dev);
 591                         } else {
 592                                 cli();
 593                                 req[j] = get_request(NR_REQUEST, dev);
 594                                 sti();
 595                                 if (req[j] == NULL)
 596                                         break;
 597                         }
 598                         req[j]->cmd = rw;
 599                         req[j]->errors = 0;
 600                         req[j]->sector = (b[i] * buffersize) >> 9;
 601                         req[j]->nr_sectors = buffersize >> 9;
 602                         req[j]->current_nr_sectors = buffersize >> 9;
 603                         req[j]->buffer = buf;
 604                         req[j]->sem = &sem;
 605                         req[j]->bh = NULL;
 606                         req[j]->next = NULL;
 607                         add_request(major+blk_dev,req[j]);
 608                 }
 609                 while (j > 0) {
 610                         j--;
 611                         down(&sem);
 612                 }
 613         }
 614 }
 615 
 616 int blk_dev_init(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 617 {
 618         struct request * req;
 619 
 620         req = all_requests + NR_REQUEST;
 621         while (--req >= all_requests) {
 622                 req->rq_status = RQ_INACTIVE;
 623                 req->next = NULL;
 624         }
 625         memset(ro_bits,0,sizeof(ro_bits));
 626 #ifdef CONFIG_BLK_DEV_RAM
 627         rd_init();
 628 #endif
 629 #ifdef CONFIG_BLK_DEV_IDE
 630         ide_init();             /* this MUST preceed hd_init */
 631 #endif
 632 #ifdef CONFIG_BLK_DEV_HD
 633         hd_init();
 634 #endif
 635 #ifdef CONFIG_BLK_DEV_XD
 636         xd_init();
 637 #endif
 638 #ifdef CONFIG_BLK_DEV_FD
 639         floppy_init();
 640 #else
 641         outb_p(0xc, 0x3f2);
 642 #endif
 643 #ifdef CONFIG_CDU31A
 644         cdu31a_init();
 645 #endif CONFIG_CDU31A
 646 #ifdef CONFIG_MCD
 647         mcd_init();
 648 #endif CONFIG_MCD
 649 #ifdef CONFIG_MCDX
 650         mcdx_init();
 651 #endif CONFIG_MCDX
 652 #ifdef CONFIG_SBPCD
 653         sbpcd_init();
 654 #endif CONFIG_SBPCD
 655 #ifdef CONFIG_AZTCD
 656         aztcd_init();
 657 #endif CONFIG_AZTCD
 658 #ifdef CONFIG_CDU535
 659         sony535_init();
 660 #endif CONFIG_CDU535
 661 #ifdef CONFIG_GSCD
 662         gscd_init();
 663 #endif CONFIG_GSCD
 664 #ifdef CONFIG_CM206
 665         cm206_init();
 666 #endif
 667 #ifdef CONFIG_OPTCD
 668         optcd_init();
 669 #endif CONFIG_OPTCD
 670 #ifdef CONFIG_SJCD
 671         sjcd_init();
 672 #endif CONFIG_SJCD
 673         return 0;
 674 }

/* [previous][next][first][last][top][bottom][index][help] */