root/drivers/block/ll_rw_blk.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. plug_device
  2. unplug_device
  3. get_request
  4. __get_request_wait
  5. get_request_wait
  6. is_read_only
  7. set_device_ro
  8. drive_stat_acct
  9. add_request
  10. make_request
  11. ll_rw_page
  12. ll_rw_block
  13. ll_rw_swap_file
  14. blk_dev_init

   1 /*
   2  *  linux/drivers/block/ll_rw_blk.c
   3  *
   4  * Copyright (C) 1991, 1992 Linus Torvalds
   5  * Copyright (C) 1994,      Karl Keyte: Added support for disk statistics
   6  */
   7 
   8 /*
   9  * This handles all read/write requests to block devices
  10  */
  11 #include <linux/sched.h>
  12 #include <linux/kernel.h>
  13 #include <linux/kernel_stat.h>
  14 #include <linux/errno.h>
  15 #include <linux/string.h>
  16 #include <linux/config.h>
  17 #include <linux/locks.h>
  18 #include <linux/mm.h>
  19 
  20 #include <asm/system.h>
  21 #include <asm/io.h>
  22 #include <linux/blk.h>
  23 
  24 /*
  25  * The request-struct contains all necessary data
  26  * to load a nr of sectors into memory
  27  */
  28 static struct request all_requests[NR_REQUEST];
  29 
  30 /*
  31  * used to wait on when there are no free requests
  32  */
  33 struct wait_queue * wait_for_request = NULL;
  34 
  35 /* This specifies how many sectors to read ahead on the disk.  */
  36 
  37 int read_ahead[MAX_BLKDEV] = {0, };
  38 
  39 /* blk_dev_struct is:
  40  *      do_request-address
  41  *      next-request
  42  */
  43 struct blk_dev_struct blk_dev[MAX_BLKDEV] = {
  44         { NULL, NULL },         /* 0 no_dev */
  45         { NULL, NULL },         /* 1 dev mem */
  46         { NULL, NULL },         /* 2 dev fd */
  47         { NULL, NULL },         /* 3 dev ide0 or hd */
  48         { NULL, NULL },         /* 4 dev ttyx */
  49         { NULL, NULL },         /* 5 dev tty */
  50         { NULL, NULL },         /* 6 dev lp */
  51         { NULL, NULL },         /* 7 dev pipes */
  52         { NULL, NULL },         /* 8 dev sd */
  53         { NULL, NULL },         /* 9 dev st */
  54         { NULL, NULL },         /* 10 */
  55         { NULL, NULL },         /* 11 */
  56         { NULL, NULL },         /* 12 */
  57         { NULL, NULL },         /* 13 */
  58         { NULL, NULL },         /* 14 */
  59         { NULL, NULL },         /* 15 */
  60         { NULL, NULL },         /* 16 */
  61         { NULL, NULL },         /* 17 */
  62         { NULL, NULL },         /* 18 */
  63         { NULL, NULL },         /* 19 */
  64         { NULL, NULL },         /* 20 */
  65         { NULL, NULL },         /* 21 */
  66         { NULL, NULL }          /* 22 dev ide1 */
  67 };
  68 
  69 /*
  70  * blk_size contains the size of all block-devices in units of 1024 byte
  71  * sectors:
  72  *
  73  * blk_size[MAJOR][MINOR]
  74  *
  75  * if (!blk_size[MAJOR]) then no minor size checking is done.
  76  */
  77 int * blk_size[MAX_BLKDEV] = { NULL, NULL, };
  78 
  79 /*
  80  * blksize_size contains the size of all block-devices:
  81  *
  82  * blksize_size[MAJOR][MINOR]
  83  *
  84  * if (!blksize_size[MAJOR]) then 1024 bytes is assumed.
  85  */
  86 int * blksize_size[MAX_BLKDEV] = { NULL, NULL, };
  87 
  88 /*
  89  * hardsect_size contains the size of the hardware sector of a device.
  90  *
  91  * hardsect_size[MAJOR][MINOR]
  92  *
  93  * if (!hardsect_size[MAJOR])
  94  *              then 512 bytes is assumed.
  95  * else
  96  *              sector_size is hardsect_size[MAJOR][MINOR]
  97  * This is currently set by some scsi device and read by the msdos fs driver
  98  * This might be a some uses later.
  99  */
 100 int * hardsect_size[MAX_BLKDEV] = { NULL, NULL, };
 101 
 102 /*
 103  * "plug" the device if there are no outstanding requests: this will
 104  * force the transfer to start only after we have put all the requests
 105  * on the list.
 106  */
 107 static inline void plug_device(struct blk_dev_struct * dev, struct request * plug)
     /* [previous][next][first][last][top][bottom][index][help] */
 108 {
 109         unsigned long flags;
 110 
 111         plug->rq_status = RQ_INACTIVE;
 112         plug->cmd = -1;
 113         plug->next = NULL;
 114         save_flags(flags);
 115         cli();
 116         if (!dev->current_request)
 117                 dev->current_request = plug;
 118         restore_flags(flags);
 119 }
 120 
 121 /*
 122  * remove the plug and let it rip..
 123  */
 124 static inline void unplug_device(struct blk_dev_struct * dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 125 {
 126         struct request * req;
 127         unsigned long flags;
 128 
 129         save_flags(flags);
 130         cli();
 131         req = dev->current_request;
 132         if (req && req->rq_status == RQ_INACTIVE && req->cmd == -1) {
 133                 dev->current_request = req->next;
 134                 (dev->request_fn)();
 135         }
 136         restore_flags(flags);
 137 }
 138 
 139 /*
 140  * look for a free request in the first N entries.
 141  * NOTE: interrupts must be disabled on the way in, and will still
 142  *       be disabled on the way out.
 143  */
 144 static inline struct request * get_request(int n, kdev_t dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 145 {
 146         static struct request *prev_found = NULL, *prev_limit = NULL;
 147         register struct request *req, *limit;
 148 
 149         if (n <= 0)
 150                 panic("get_request(%d): impossible!\n", n);
 151 
 152         limit = all_requests + n;
 153         if (limit != prev_limit) {
 154                 prev_limit = limit;
 155                 prev_found = all_requests;
 156         }
 157         req = prev_found;
 158         for (;;) {
 159                 req = ((req > all_requests) ? req : limit) - 1;
 160                 if (req->rq_status == RQ_INACTIVE)
 161                         break;
 162                 if (req == prev_found)
 163                         return NULL;
 164         }
 165         prev_found = req;
 166         req->rq_status = RQ_ACTIVE;
 167         req->rq_dev = dev;
 168         return req;
 169 }
 170 
 171 /*
 172  * wait until a free request in the first N entries is available.
 173  */
 174 static struct request * __get_request_wait(int n, kdev_t dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 175 {
 176         register struct request *req;
 177         struct wait_queue wait = { current, NULL };
 178 
 179         add_wait_queue(&wait_for_request, &wait);
 180         for (;;) {
 181                 unplug_device(MAJOR(dev)+blk_dev);
 182                 current->state = TASK_UNINTERRUPTIBLE;
 183                 cli();
 184                 req = get_request(n, dev);
 185                 sti();
 186                 if (req)
 187                         break;
 188                 schedule();
 189         }
 190         remove_wait_queue(&wait_for_request, &wait);
 191         current->state = TASK_RUNNING;
 192         return req;
 193 }
 194 
 195 static inline struct request * get_request_wait(int n, kdev_t dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 196 {
 197         register struct request *req;
 198 
 199         cli();
 200         req = get_request(n, dev);
 201         sti();
 202         if (req)
 203                 return req;
 204         return __get_request_wait(n, dev);
 205 }
 206 
 207 /* RO fail safe mechanism */
 208 
 209 static long ro_bits[MAX_BLKDEV][8];
 210 
 211 int is_read_only(kdev_t dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 212 {
 213         int minor,major;
 214 
 215         major = MAJOR(dev);
 216         minor = MINOR(dev);
 217         if (major < 0 || major >= MAX_BLKDEV) return 0;
 218         return ro_bits[major][minor >> 5] & (1 << (minor & 31));
 219 }
 220 
 221 void set_device_ro(kdev_t dev,int flag)
     /* [previous][next][first][last][top][bottom][index][help] */
 222 {
 223         int minor,major;
 224 
 225         major = MAJOR(dev);
 226         minor = MINOR(dev);
 227         if (major < 0 || major >= MAX_BLKDEV) return;
 228         if (flag) ro_bits[major][minor >> 5] |= 1 << (minor & 31);
 229         else ro_bits[major][minor >> 5] &= ~(1 << (minor & 31));
 230 }
 231 
 232 static inline void drive_stat_acct(int cmd, unsigned long nr_sectors, short disk_index)
     /* [previous][next][first][last][top][bottom][index][help] */
 233 {
 234         kstat.dk_drive[disk_index]++;
 235         if (cmd == READ || cmd == READA) {
 236                 kstat.dk_drive_rio[disk_index]++;
 237                 kstat.dk_drive_rblk[disk_index] += nr_sectors;
 238         }
 239         else if (cmd == WRITE || cmd == WRITEA) {
 240                 kstat.dk_drive_wio[disk_index]++;
 241                 kstat.dk_drive_wblk[disk_index] += nr_sectors;
 242         }
 243 }
 244 
 245 /*
 246  * add-request adds a request to the linked list.
 247  * It disables interrupts so that it can muck with the
 248  * request-lists in peace.
 249  */
 250 static void add_request(struct blk_dev_struct * dev, struct request * req)
     /* [previous][next][first][last][top][bottom][index][help] */
 251 {
 252         struct request * tmp;
 253         short            disk_index;
 254 
 255         switch (MAJOR(req->rq_dev)) {
 256                 case SCSI_DISK_MAJOR:
 257                         disk_index = (MINOR(req->rq_dev) & 0x0070) >> 4;
 258                         if (disk_index < 4)
 259                                 drive_stat_acct(req->cmd, req->nr_sectors, disk_index);
 260                         break;
 261                 case IDE0_MAJOR:        /* same as HD_MAJOR */
 262                 case XT_DISK_MAJOR:
 263                         disk_index = (MINOR(req->rq_dev) & 0x0040) >> 6;
 264                         drive_stat_acct(req->cmd, req->nr_sectors, disk_index);
 265                         break;
 266                 case IDE1_MAJOR:
 267                         disk_index = ((MINOR(req->rq_dev) & 0x0040) >> 6) + 2;
 268                         drive_stat_acct(req->cmd, req->nr_sectors, disk_index);
 269                 default:
 270                         break;
 271         }
 272 
 273         req->next = NULL;
 274         cli();
 275         if (req->bh)
 276                 mark_buffer_clean(req->bh);
 277         if (!(tmp = dev->current_request)) {
 278                 dev->current_request = req;
 279                 (dev->request_fn)();
 280                 sti();
 281                 return;
 282         }
 283         for ( ; tmp->next ; tmp = tmp->next) {
 284                 if ((IN_ORDER(tmp,req) ||
 285                     !IN_ORDER(tmp,tmp->next)) &&
 286                     IN_ORDER(req,tmp->next))
 287                         break;
 288         }
 289         req->next = tmp->next;
 290         tmp->next = req;
 291 
 292 /* for SCSI devices, call request_fn unconditionally */
 293         if (scsi_major(MAJOR(req->rq_dev)))
 294                 (dev->request_fn)();
 295 
 296         sti();
 297 }
 298 
 299 static void make_request(int major,int rw, struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 300 {
 301         unsigned int sector, count;
 302         struct request * req;
 303         int rw_ahead, max_req;
 304 
 305 /* WRITEA/READA is special case - it is not really needed, so if the */
 306 /* buffer is locked, we just forget about it, else it's a normal read */
 307         rw_ahead = (rw == READA || rw == WRITEA);
 308         if (rw_ahead) {
 309                 if (buffer_locked(bh))
 310                         return;
 311                 if (rw == READA)
 312                         rw = READ;
 313                 else
 314                         rw = WRITE;
 315         }
 316         if (rw!=READ && rw!=WRITE) {
 317                 printk("Bad block dev command, must be R/W/RA/WA\n");
 318                 return;
 319         }
 320         count = bh->b_size >> 9;
 321         sector = bh->b_blocknr * count;
 322         if (blk_size[major])
 323                 if (blk_size[major][MINOR(bh->b_dev)] < (sector + count)>>1) {
 324                         bh->b_state = 0;
 325                         printk("attempt to access beyond end of device\n");
 326                         return;
 327                 }
 328         /* Uhhuh.. Nasty dead-lock possible here.. */
 329         if (buffer_locked(bh))
 330                 return;
 331         /* Maybe the above fixes it, and maybe it doesn't boot. Life is interesting */
 332         lock_buffer(bh);
 333         if ((rw == WRITE && !buffer_dirty(bh)) || (rw == READ && buffer_uptodate(bh))) {
 334                 unlock_buffer(bh);
 335                 return;
 336         }
 337 
 338 /* we don't allow the write-requests to fill up the queue completely:
 339  * we want some room for reads: they take precedence. The last third
 340  * of the requests are only for reads.
 341  */
 342         max_req = (rw == READ) ? NR_REQUEST : ((NR_REQUEST*2)/3);
 343 
 344 /* look for a free request. */
 345         cli();
 346 
 347 /* The scsi disk drivers and the IDE driver completely remove the request
 348  * from the queue when they start processing an entry.  For this reason
 349  * it is safe to continue to add links to the top entry for those devices.
 350  */
 351         if ((   major == IDE0_MAJOR     /* same as HD_MAJOR */
 352              || major == IDE1_MAJOR
 353              || major == FLOPPY_MAJOR
 354              || major == SCSI_DISK_MAJOR
 355              || major == SCSI_CDROM_MAJOR
 356              || major == IDE2_MAJOR
 357              || major == IDE3_MAJOR)
 358             && (req = blk_dev[major].current_request))
 359         {
 360 #ifdef CONFIG_BLK_DEV_HD
 361                 if (major == HD_MAJOR || major == FLOPPY_MAJOR)
 362 #else
 363                 if (major == FLOPPY_MAJOR)
 364 #endif CONFIG_BLK_DEV_HD
 365                         req = req->next;
 366                 while (req) {
 367                         if (req->rq_dev == bh->b_dev &&
 368                             !req->sem &&
 369                             req->cmd == rw &&
 370                             req->sector + req->nr_sectors == sector &&
 371                             req->nr_sectors < 244)
 372                         {
 373                                 req->bhtail->b_reqnext = bh;
 374                                 req->bhtail = bh;
 375                                 req->nr_sectors += count;
 376                                 mark_buffer_clean(bh);
 377                                 sti();
 378                                 return;
 379                         }
 380 
 381                         if (req->rq_dev == bh->b_dev &&
 382                             !req->sem &&
 383                             req->cmd == rw &&
 384                             req->sector - count == sector &&
 385                             req->nr_sectors < 244)
 386                         {
 387                                 req->nr_sectors += count;
 388                                 bh->b_reqnext = req->bh;
 389                                 req->buffer = bh->b_data;
 390                                 req->current_nr_sectors = count;
 391                                 req->sector = sector;
 392                                 mark_buffer_clean(bh);
 393                                 req->bh = bh;
 394                                 sti();
 395                                 return;
 396                         }    
 397 
 398                         req = req->next;
 399                 }
 400         }
 401 
 402 /* find an unused request. */
 403         req = get_request(max_req, bh->b_dev);
 404         sti();
 405 
 406 /* if no request available: if rw_ahead, forget it; otherwise try again blocking.. */
 407         if (!req) {
 408                 if (rw_ahead) {
 409                         unlock_buffer(bh);
 410                         return;
 411                 }
 412                 req = __get_request_wait(max_req, bh->b_dev);
 413         }
 414 
 415 /* fill up the request-info, and add it to the queue */
 416         req->cmd = rw;
 417         req->errors = 0;
 418         req->sector = sector;
 419         req->nr_sectors = count;
 420         req->current_nr_sectors = count;
 421         req->buffer = bh->b_data;
 422         req->sem = NULL;
 423         req->bh = bh;
 424         req->bhtail = bh;
 425         req->next = NULL;
 426         add_request(major+blk_dev,req);
 427 }
 428 
 429 void ll_rw_page(int rw, kdev_t dev, unsigned long page, char * buffer)
     /* [previous][next][first][last][top][bottom][index][help] */
 430 {
 431         struct request * req;
 432         unsigned int major = MAJOR(dev);
 433         unsigned long sector = page * (PAGE_SIZE / 512);
 434         struct semaphore sem = MUTEX_LOCKED;
 435 
 436         if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
 437                 printk("Trying to read nonexistent block-device %s (%ld)\n",
 438                        kdevname(dev), sector);
 439                 return;
 440         }
 441         if (rw!=READ && rw!=WRITE)
 442                 panic("Bad block dev command, must be R/W");
 443         if (rw == WRITE && is_read_only(dev)) {
 444                 printk("Can't page to read-only device %s\n",
 445                        kdevname(dev));
 446                 return;
 447         }
 448         req = get_request_wait(NR_REQUEST, dev);
 449 /* fill up the request-info, and add it to the queue */
 450         req->cmd = rw;
 451         req->errors = 0;
 452         req->sector = sector;
 453         req->nr_sectors = PAGE_SIZE / 512;
 454         req->current_nr_sectors = PAGE_SIZE / 512;
 455         req->buffer = buffer;
 456         req->sem = &sem;
 457         req->bh = NULL;
 458         req->next = NULL;
 459         add_request(major+blk_dev,req);
 460         down(&sem);
 461 }
 462 
 463 /* This function can be used to request a number of buffers from a block
 464    device. Currently the only restriction is that all buffers must belong to
 465    the same device */
 466 
 467 void ll_rw_block(int rw, int nr, struct buffer_head * bh[])
     /* [previous][next][first][last][top][bottom][index][help] */
 468 {
 469         unsigned int major;
 470         struct request plug;
 471         int correct_size;
 472         struct blk_dev_struct * dev;
 473         int i;
 474 
 475         /* Make sure that the first block contains something reasonable */
 476         while (!*bh) {
 477                 bh++;
 478                 if (--nr <= 0)
 479                         return;
 480         };
 481 
 482         dev = NULL;
 483         if ((major = MAJOR(bh[0]->b_dev)) < MAX_BLKDEV)
 484                 dev = blk_dev + major;
 485         if (!dev || !dev->request_fn) {
 486                 printk(
 487         "ll_rw_block: Trying to read nonexistent block-device %s (%ld)\n",
 488                 kdevname(bh[0]->b_dev), bh[0]->b_blocknr);
 489                 goto sorry;
 490         }
 491 
 492         /* Determine correct block size for this device.  */
 493         correct_size = BLOCK_SIZE;
 494         if (blksize_size[major]) {
 495                 i = blksize_size[major][MINOR(bh[0]->b_dev)];
 496                 if (i)
 497                         correct_size = i;
 498         }
 499 
 500         /* Verify requested block sizes.  */
 501         for (i = 0; i < nr; i++) {
 502                 if (bh[i] && bh[i]->b_size != correct_size) {
 503                         printk(
 504                         "ll_rw_block: only %d-char blocks implemented (%lu)\n",
 505                                correct_size, bh[i]->b_size);
 506                         goto sorry;
 507                 }
 508         }
 509 
 510         if ((rw == WRITE || rw == WRITEA) && is_read_only(bh[0]->b_dev)) {
 511                 printk("Can't write to read-only device %s\n",
 512                        kdevname(bh[0]->b_dev));
 513                 goto sorry;
 514         }
 515 
 516         /* If there are no pending requests for this device, then we insert
 517            a dummy request for that device.  This will prevent the request
 518            from starting until we have shoved all of the blocks into the
 519            queue, and then we let it rip.  */
 520 
 521         if (nr > 1)
 522                 plug_device(dev, &plug);
 523         for (i = 0; i < nr; i++) {
 524                 if (bh[i]) {
 525                         set_bit(BH_Req, &bh[i]->b_state);
 526                         make_request(major, rw, bh[i]);
 527                         if (rw == READ || rw == READA)
 528                                 kstat.pgpgin++;
 529                         else
 530                                 kstat.pgpgout++;
 531                 }
 532         }
 533         unplug_device(dev);
 534         return;
 535 
 536       sorry:
 537         for (i = 0; i < nr; i++) {
 538                 if (bh[i]) {
 539                         clear_bit(BH_Dirty, &bh[i]->b_state);
 540                         clear_bit(BH_Uptodate, &bh[i]->b_state);
 541                 }
 542         }
 543         return;
 544 }
 545 
 546 void ll_rw_swap_file(int rw, kdev_t dev, unsigned int *b, int nb, char *buf)
     /* [previous][next][first][last][top][bottom][index][help] */
 547 {
 548         int i, j;
 549         int buffersize;
 550         struct request * req[8];
 551         unsigned int major = MAJOR(dev);
 552         struct semaphore sem = MUTEX_LOCKED;
 553 
 554         if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
 555                 printk("ll_rw_swap_file: trying to swap nonexistent block-device\n");
 556                 return;
 557         }
 558 
 559         if (rw != READ && rw != WRITE) {
 560                 printk("ll_rw_swap: bad block dev command, must be R/W");
 561                 return;
 562         }
 563         if (rw == WRITE && is_read_only(dev)) {
 564                 printk("Can't swap to read-only device %s\n",
 565                        kdevname(dev));
 566                 return;
 567         }
 568         
 569         buffersize = PAGE_SIZE / nb;
 570 
 571         for (j=0, i=0; i<nb;)
 572         {
 573                 for (; j < 8 && i < nb; j++, i++, buf += buffersize)
 574                 {
 575                         if (j == 0) {
 576                                 req[j] = get_request_wait(NR_REQUEST, dev);
 577                         } else {
 578                                 cli();
 579                                 req[j] = get_request(NR_REQUEST, dev);
 580                                 sti();
 581                                 if (req[j] == NULL)
 582                                         break;
 583                         }
 584                         req[j]->cmd = rw;
 585                         req[j]->errors = 0;
 586                         req[j]->sector = (b[i] * buffersize) >> 9;
 587                         req[j]->nr_sectors = buffersize >> 9;
 588                         req[j]->current_nr_sectors = buffersize >> 9;
 589                         req[j]->buffer = buf;
 590                         req[j]->sem = &sem;
 591                         req[j]->bh = NULL;
 592                         req[j]->next = NULL;
 593                         add_request(major+blk_dev,req[j]);
 594                 }
 595                 while (j > 0) {
 596                         j--;
 597                         down(&sem);
 598                 }
 599         }
 600 }
 601 
 602 int blk_dev_init(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 603 {
 604         struct request * req;
 605 
 606         req = all_requests + NR_REQUEST;
 607         while (--req >= all_requests) {
 608                 req->rq_status = RQ_INACTIVE;
 609                 req->next = NULL;
 610         }
 611         memset(ro_bits,0,sizeof(ro_bits));
 612 #ifdef CONFIG_BLK_DEV_IDE
 613         ide_init();             /* this MUST preceed hd_init */
 614 #endif
 615 #ifdef CONFIG_BLK_DEV_HD
 616         hd_init();
 617 #endif
 618 #ifdef CONFIG_BLK_DEV_XD
 619         xd_init();
 620 #endif
 621 #ifdef CONFIG_BLK_DEV_FD
 622         floppy_init();
 623 #else
 624         outb_p(0xc, 0x3f2);
 625 #endif
 626 #ifdef CONFIG_CDU31A
 627         cdu31a_init();
 628 #endif CONFIG_CDU31A
 629 #ifdef CONFIG_MCD
 630         mcd_init();
 631 #endif CONFIG_MCD
 632 #ifdef CONFIG_MCDX
 633         mcdx_init();
 634 #endif CONFIG_MCDX
 635 #ifdef CONFIG_SBPCD
 636         sbpcd_init();
 637 #endif CONFIG_SBPCD
 638 #ifdef CONFIG_AZTCD
 639         aztcd_init();
 640 #endif CONFIG_AZTCD
 641 #ifdef CONFIG_CDU535
 642         sony535_init();
 643 #endif CONFIG_CDU535
 644 #ifdef CONFIG_GSCD
 645         gscd_init();
 646 #endif CONFIG_GSCD
 647 #ifdef CONFIG_CM206
 648         cm206_init();
 649 #endif
 650 #ifdef CONFIG_OPTCD
 651         optcd_init();
 652 #endif CONFIG_OPTCD
 653 #ifdef CONFIG_SJCD
 654         sjcd_init();
 655 #endif CONFIG_SJCD
 656         return 0;
 657 }

/* [previous][next][first][last][top][bottom][index][help] */