root/drivers/block/ll_rw_blk.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. plug_device
  2. unplug_device
  3. get_request
  4. __get_request_wait
  5. get_request_wait
  6. is_read_only
  7. set_device_ro
  8. drive_stat_acct
  9. add_request
  10. make_request
  11. ll_rw_page
  12. ll_rw_block
  13. ll_rw_swap_file
  14. blk_dev_init

   1 /*
   2  *  linux/drivers/block/ll_rw_blk.c
   3  *
   4  * Copyright (C) 1991, 1992 Linus Torvalds
   5  * Copyright (C) 1994,      Karl Keyte: Added support for disk statistics
   6  */
   7 
   8 /*
   9  * This handles all read/write requests to block devices
  10  */
  11 #include <linux/sched.h>
  12 #include <linux/kernel.h>
  13 #include <linux/kernel_stat.h>
  14 #include <linux/errno.h>
  15 #include <linux/string.h>
  16 #include <linux/config.h>
  17 #include <linux/locks.h>
  18 #include <linux/mm.h>
  19 
  20 #include <asm/system.h>
  21 #include <asm/io.h>
  22 #include <linux/blk.h>
  23 
  24 /*
  25  * The request-struct contains all necessary data
  26  * to load a nr of sectors into memory
  27  */
  28 static struct request all_requests[NR_REQUEST];
  29 
  30 /*
  31  * used to wait on when there are no free requests
  32  */
  33 struct wait_queue * wait_for_request = NULL;
  34 
  35 /* This specifies how many sectors to read ahead on the disk.  */
  36 
  37 int read_ahead[MAX_BLKDEV] = {0, };
  38 
  39 /* blk_dev_struct is:
  40  *      do_request-address
  41  *      next-request
  42  */
  43 struct blk_dev_struct blk_dev[MAX_BLKDEV] = {
  44         { NULL, NULL },         /* 0 no_dev */
  45         { NULL, NULL },         /* 1 dev mem */
  46         { NULL, NULL },         /* 2 dev fd */
  47         { NULL, NULL },         /* 3 dev ide0 or hd */
  48         { NULL, NULL },         /* 4 dev ttyx */
  49         { NULL, NULL },         /* 5 dev tty */
  50         { NULL, NULL },         /* 6 dev lp */
  51         { NULL, NULL },         /* 7 dev pipes */
  52         { NULL, NULL },         /* 8 dev sd */
  53         { NULL, NULL },         /* 9 dev st */
  54         { NULL, NULL },         /* 10 */
  55         { NULL, NULL },         /* 11 */
  56         { NULL, NULL },         /* 12 */
  57         { NULL, NULL },         /* 13 */
  58         { NULL, NULL },         /* 14 */
  59         { NULL, NULL },         /* 15 */
  60         { NULL, NULL },         /* 16 */
  61         { NULL, NULL },         /* 17 */
  62         { NULL, NULL },         /* 18 */
  63         { NULL, NULL },         /* 19 */
  64         { NULL, NULL },         /* 20 */
  65         { NULL, NULL },         /* 21 */
  66         { NULL, NULL }          /* 22 dev ide1 */
  67 };
  68 
  69 /*
  70  * blk_size contains the size of all block-devices in units of 1024 byte
  71  * sectors:
  72  *
  73  * blk_size[MAJOR][MINOR]
  74  *
  75  * if (!blk_size[MAJOR]) then no minor size checking is done.
  76  */
  77 int * blk_size[MAX_BLKDEV] = { NULL, NULL, };
  78 
  79 /*
  80  * blksize_size contains the size of all block-devices:
  81  *
  82  * blksize_size[MAJOR][MINOR]
  83  *
  84  * if (!blksize_size[MAJOR]) then 1024 bytes is assumed.
  85  */
  86 int * blksize_size[MAX_BLKDEV] = { NULL, NULL, };
  87 
  88 /*
  89  * hardsect_size contains the size of the hardware sector of a device.
  90  *
  91  * hardsect_size[MAJOR][MINOR]
  92  *
  93  * if (!hardsect_size[MAJOR])
  94  *              then 512 bytes is assumed.
  95  * else
  96  *              sector_size is hardsect_size[MAJOR][MINOR]
  97  * This is currently set by some scsi device and read by the msdos fs driver
  98  * This might be a some uses later.
  99  */
 100 int * hardsect_size[MAX_BLKDEV] = { NULL, NULL, };
 101 
 102 /*
 103  * "plug" the device if there are no outstanding requests: this will
 104  * force the transfer to start only after we have put all the requests
 105  * on the list.
 106  */
 107 static inline void plug_device(struct blk_dev_struct * dev, struct request * plug)
     /* [previous][next][first][last][top][bottom][index][help] */
 108 {
 109         unsigned long flags;
 110 
 111         plug->rq_status = RQ_INACTIVE;
 112         plug->cmd = -1;
 113         plug->next = NULL;
 114         save_flags(flags);
 115         cli();
 116         if (!dev->current_request)
 117                 dev->current_request = plug;
 118         restore_flags(flags);
 119 }
 120 
 121 /*
 122  * remove the plug and let it rip..
 123  */
 124 static inline void unplug_device(struct blk_dev_struct * dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 125 {
 126         struct request * req;
 127         unsigned long flags;
 128 
 129         save_flags(flags);
 130         cli();
 131         req = dev->current_request;
 132         if (req && req->rq_status == RQ_INACTIVE && req->cmd == -1) {
 133                 dev->current_request = req->next;
 134                 (dev->request_fn)();
 135         }
 136         restore_flags(flags);
 137 }
 138 
 139 /*
 140  * look for a free request in the first N entries.
 141  * NOTE: interrupts must be disabled on the way in, and will still
 142  *       be disabled on the way out.
 143  */
 144 static inline struct request * get_request(int n, kdev_t dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 145 {
 146         static struct request *prev_found = NULL, *prev_limit = NULL;
 147         register struct request *req, *limit;
 148 
 149         if (n <= 0)
 150                 panic("get_request(%d): impossible!\n", n);
 151 
 152         limit = all_requests + n;
 153         if (limit != prev_limit) {
 154                 prev_limit = limit;
 155                 prev_found = all_requests;
 156         }
 157         req = prev_found;
 158         for (;;) {
 159                 req = ((req > all_requests) ? req : limit) - 1;
 160                 if (req->rq_status == RQ_INACTIVE)
 161                         break;
 162                 if (req == prev_found)
 163                         return NULL;
 164         }
 165         prev_found = req;
 166         req->rq_status = RQ_ACTIVE;
 167         req->rq_dev = dev;
 168         return req;
 169 }
 170 
 171 /*
 172  * wait until a free request in the first N entries is available.
 173  */
 174 static struct request * __get_request_wait(int n, kdev_t dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 175 {
 176         register struct request *req;
 177         struct wait_queue wait = { current, NULL };
 178 
 179         add_wait_queue(&wait_for_request, &wait);
 180         for (;;) {
 181                 unplug_device(MAJOR(dev)+blk_dev);
 182                 current->state = TASK_UNINTERRUPTIBLE;
 183                 cli();
 184                 req = get_request(n, dev);
 185                 sti();
 186                 if (req)
 187                         break;
 188                 schedule();
 189         }
 190         remove_wait_queue(&wait_for_request, &wait);
 191         current->state = TASK_RUNNING;
 192         return req;
 193 }
 194 
 195 static inline struct request * get_request_wait(int n, kdev_t dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 196 {
 197         register struct request *req;
 198 
 199         cli();
 200         req = get_request(n, dev);
 201         sti();
 202         if (req)
 203                 return req;
 204         return __get_request_wait(n, dev);
 205 }
 206 
 207 /* RO fail safe mechanism */
 208 
 209 static long ro_bits[MAX_BLKDEV][8];
 210 
 211 int is_read_only(kdev_t dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 212 {
 213         int minor,major;
 214 
 215         major = MAJOR(dev);
 216         minor = MINOR(dev);
 217         if (major < 0 || major >= MAX_BLKDEV) return 0;
 218         return ro_bits[major][minor >> 5] & (1 << (minor & 31));
 219 }
 220 
 221 void set_device_ro(kdev_t dev,int flag)
     /* [previous][next][first][last][top][bottom][index][help] */
 222 {
 223         int minor,major;
 224 
 225         major = MAJOR(dev);
 226         minor = MINOR(dev);
 227         if (major < 0 || major >= MAX_BLKDEV) return;
 228         if (flag) ro_bits[major][minor >> 5] |= 1 << (minor & 31);
 229         else ro_bits[major][minor >> 5] &= ~(1 << (minor & 31));
 230 }
 231 
 232 static inline void drive_stat_acct(int cmd, unsigned long nr_sectors, short disk_index)
     /* [previous][next][first][last][top][bottom][index][help] */
 233 {
 234         kstat.dk_drive[disk_index]++;
 235         if (cmd == READ || cmd == READA) {
 236                 kstat.dk_drive_rio[disk_index]++;
 237                 kstat.dk_drive_rblk[disk_index] += nr_sectors;
 238         }
 239         else if (cmd == WRITE || cmd == WRITEA) {
 240                 kstat.dk_drive_wio[disk_index]++;
 241                 kstat.dk_drive_wblk[disk_index] += nr_sectors;
 242         }
 243 }
 244 
 245 /*
 246  * add-request adds a request to the linked list.
 247  * It disables interrupts so that it can muck with the
 248  * request-lists in peace.
 249  */
 250 static void add_request(struct blk_dev_struct * dev, struct request * req)
     /* [previous][next][first][last][top][bottom][index][help] */
 251 {
 252         struct request * tmp;
 253         short            disk_index;
 254 
 255         switch (MAJOR(req->rq_dev)) {
 256                 case SCSI_DISK_MAJOR:
 257                         disk_index = (MINOR(req->rq_dev) & 0x0070) >> 4;
 258                         if (disk_index < 4)
 259                                 drive_stat_acct(req->cmd, req->nr_sectors, disk_index);
 260                         break;
 261                 case IDE0_MAJOR:        /* same as HD_MAJOR */
 262                 case XT_DISK_MAJOR:
 263                         disk_index = (MINOR(req->rq_dev) & 0x0040) >> 6;
 264                         drive_stat_acct(req->cmd, req->nr_sectors, disk_index);
 265                         break;
 266                 case IDE1_MAJOR:
 267                         disk_index = ((MINOR(req->rq_dev) & 0x0040) >> 6) + 2;
 268                         drive_stat_acct(req->cmd, req->nr_sectors, disk_index);
 269                 default:
 270                         break;
 271         }
 272 
 273         req->next = NULL;
 274         cli();
 275         if (req->bh)
 276                 mark_buffer_clean(req->bh);
 277         if (!(tmp = dev->current_request)) {
 278                 dev->current_request = req;
 279                 (dev->request_fn)();
 280                 sti();
 281                 return;
 282         }
 283         for ( ; tmp->next ; tmp = tmp->next) {
 284                 if ((IN_ORDER(tmp,req) ||
 285                     !IN_ORDER(tmp,tmp->next)) &&
 286                     IN_ORDER(req,tmp->next))
 287                         break;
 288         }
 289         req->next = tmp->next;
 290         tmp->next = req;
 291 
 292 /* for SCSI devices, call request_fn unconditionally */
 293         if (scsi_major(MAJOR(req->rq_dev)))
 294                 (dev->request_fn)();
 295 
 296         sti();
 297 }
 298 
 299 static void make_request(int major,int rw, struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 300 {
 301         unsigned int sector, count;
 302         struct request * req;
 303         int rw_ahead, max_req;
 304 
 305 /* WRITEA/READA is special case - it is not really needed, so if the */
 306 /* buffer is locked, we just forget about it, else it's a normal read */
 307         rw_ahead = (rw == READA || rw == WRITEA);
 308         if (rw_ahead) {
 309                 if (bh->b_lock)
 310                         return;
 311                 if (rw == READA)
 312                         rw = READ;
 313                 else
 314                         rw = WRITE;
 315         }
 316         if (rw!=READ && rw!=WRITE) {
 317                 printk("Bad block dev command, must be R/W/RA/WA\n");
 318                 return;
 319         }
 320         count = bh->b_size >> 9;
 321         sector = bh->b_blocknr * count;
 322         if (blk_size[major])
 323                 if (blk_size[major][MINOR(bh->b_dev)] < (sector + count)>>1) {
 324                         bh->b_dirt = bh->b_uptodate = 0;
 325                         bh->b_req = 0;
 326                         printk("attempt to access beyond end of device\n");
 327                         return;
 328                 }
 329         /* Uhhuh.. Nasty dead-lock possible here.. */
 330         if (bh->b_lock)
 331                 return;
 332         /* Maybe the above fixes it, and maybe it doesn't boot. Life is interesting */
 333         lock_buffer(bh);
 334         if ((rw == WRITE && !bh->b_dirt) || (rw == READ && bh->b_uptodate)) {
 335                 unlock_buffer(bh);
 336                 return;
 337         }
 338 
 339 /* we don't allow the write-requests to fill up the queue completely:
 340  * we want some room for reads: they take precedence. The last third
 341  * of the requests are only for reads.
 342  */
 343         max_req = (rw == READ) ? NR_REQUEST : ((NR_REQUEST*2)/3);
 344 
 345 /* look for a free request. */
 346         cli();
 347 
 348 /* The scsi disk drivers and the IDE driver completely remove the request
 349  * from the queue when they start processing an entry.  For this reason
 350  * it is safe to continue to add links to the top entry for those devices.
 351  */
 352         if ((   major == IDE0_MAJOR     /* same as HD_MAJOR */
 353              || major == IDE1_MAJOR
 354              || major == FLOPPY_MAJOR
 355              || major == SCSI_DISK_MAJOR
 356              || major == SCSI_CDROM_MAJOR
 357              || major == IDE2_MAJOR
 358              || major == IDE3_MAJOR)
 359             && (req = blk_dev[major].current_request))
 360         {
 361 #ifdef CONFIG_BLK_DEV_HD
 362                 if (major == HD_MAJOR || major == FLOPPY_MAJOR)
 363 #else
 364                 if (major == FLOPPY_MAJOR)
 365 #endif CONFIG_BLK_DEV_HD
 366                         req = req->next;
 367                 while (req) {
 368                         if (req->rq_dev == bh->b_dev &&
 369                             !req->sem &&
 370                             req->cmd == rw &&
 371                             req->sector + req->nr_sectors == sector &&
 372                             req->nr_sectors < 244)
 373                         {
 374                                 req->bhtail->b_reqnext = bh;
 375                                 req->bhtail = bh;
 376                                 req->nr_sectors += count;
 377                                 mark_buffer_clean(bh);
 378                                 sti();
 379                                 return;
 380                         }
 381 
 382                         if (req->rq_dev == bh->b_dev &&
 383                             !req->sem &&
 384                             req->cmd == rw &&
 385                             req->sector - count == sector &&
 386                             req->nr_sectors < 244)
 387                         {
 388                                 req->nr_sectors += count;
 389                                 bh->b_reqnext = req->bh;
 390                                 req->buffer = bh->b_data;
 391                                 req->current_nr_sectors = count;
 392                                 req->sector = sector;
 393                                 mark_buffer_clean(bh);
 394                                 req->bh = bh;
 395                                 sti();
 396                                 return;
 397                         }    
 398 
 399                         req = req->next;
 400                 }
 401         }
 402 
 403 /* find an unused request. */
 404         req = get_request(max_req, bh->b_dev);
 405         sti();
 406 
 407 /* if no request available: if rw_ahead, forget it; otherwise try again blocking.. */
 408         if (!req) {
 409                 if (rw_ahead) {
 410                         unlock_buffer(bh);
 411                         return;
 412                 }
 413                 req = __get_request_wait(max_req, bh->b_dev);
 414         }
 415 
 416 /* fill up the request-info, and add it to the queue */
 417         req->cmd = rw;
 418         req->errors = 0;
 419         req->sector = sector;
 420         req->nr_sectors = count;
 421         req->current_nr_sectors = count;
 422         req->buffer = bh->b_data;
 423         req->sem = NULL;
 424         req->bh = bh;
 425         req->bhtail = bh;
 426         req->next = NULL;
 427         add_request(major+blk_dev,req);
 428 }
 429 
 430 void ll_rw_page(int rw, kdev_t dev, unsigned long page, char * buffer)
     /* [previous][next][first][last][top][bottom][index][help] */
 431 {
 432         struct request * req;
 433         unsigned int major = MAJOR(dev);
 434         unsigned long sector = page * (PAGE_SIZE / 512);
 435         struct semaphore sem = MUTEX_LOCKED;
 436 
 437         if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
 438                 printk("Trying to read nonexistent block-device %s (%ld)\n",
 439                        kdevname(dev), sector);
 440                 return;
 441         }
 442         if (rw!=READ && rw!=WRITE)
 443                 panic("Bad block dev command, must be R/W");
 444         if (rw == WRITE && is_read_only(dev)) {
 445                 printk("Can't page to read-only device %s\n",
 446                        kdevname(dev));
 447                 return;
 448         }
 449         req = get_request_wait(NR_REQUEST, dev);
 450 /* fill up the request-info, and add it to the queue */
 451         req->cmd = rw;
 452         req->errors = 0;
 453         req->sector = sector;
 454         req->nr_sectors = PAGE_SIZE / 512;
 455         req->current_nr_sectors = PAGE_SIZE / 512;
 456         req->buffer = buffer;
 457         req->sem = &sem;
 458         req->bh = NULL;
 459         req->next = NULL;
 460         add_request(major+blk_dev,req);
 461         down(&sem);
 462 }
 463 
 464 /* This function can be used to request a number of buffers from a block
 465    device. Currently the only restriction is that all buffers must belong to
 466    the same device */
 467 
 468 void ll_rw_block(int rw, int nr, struct buffer_head * bh[])
     /* [previous][next][first][last][top][bottom][index][help] */
 469 {
 470         unsigned int major;
 471         struct request plug;
 472         int correct_size;
 473         struct blk_dev_struct * dev;
 474         int i;
 475 
 476         /* Make sure that the first block contains something reasonable */
 477         while (!*bh) {
 478                 bh++;
 479                 if (--nr <= 0)
 480                         return;
 481         };
 482 
 483         dev = NULL;
 484         if ((major = MAJOR(bh[0]->b_dev)) < MAX_BLKDEV)
 485                 dev = blk_dev + major;
 486         if (!dev || !dev->request_fn) {
 487                 printk(
 488         "ll_rw_block: Trying to read nonexistent block-device %s (%ld)\n",
 489                 kdevname(bh[0]->b_dev), bh[0]->b_blocknr);
 490                 goto sorry;
 491         }
 492 
 493         /* Determine correct block size for this device.  */
 494         correct_size = BLOCK_SIZE;
 495         if (blksize_size[major]) {
 496                 i = blksize_size[major][MINOR(bh[0]->b_dev)];
 497                 if (i)
 498                         correct_size = i;
 499         }
 500 
 501         /* Verify requested block sizes.  */
 502         for (i = 0; i < nr; i++) {
 503                 if (bh[i] && bh[i]->b_size != correct_size) {
 504                         printk(
 505                         "ll_rw_block: only %d-char blocks implemented (%lu)\n",
 506                                correct_size, bh[i]->b_size);
 507                         goto sorry;
 508                 }
 509         }
 510 
 511         if ((rw == WRITE || rw == WRITEA) && is_read_only(bh[0]->b_dev)) {
 512                 printk("Can't write to read-only device %s\n",
 513                        kdevname(bh[0]->b_dev));
 514                 goto sorry;
 515         }
 516 
 517         /* If there are no pending requests for this device, then we insert
 518            a dummy request for that device.  This will prevent the request
 519            from starting until we have shoved all of the blocks into the
 520            queue, and then we let it rip.  */
 521 
 522         if (nr > 1)
 523                 plug_device(dev, &plug);
 524         for (i = 0; i < nr; i++) {
 525                 if (bh[i]) {
 526                         bh[i]->b_req = 1;
 527                         make_request(major, rw, bh[i]);
 528                         if (rw == READ || rw == READA)
 529                                 kstat.pgpgin++;
 530                         else
 531                                 kstat.pgpgout++;
 532                 }
 533         }
 534         unplug_device(dev);
 535         return;
 536 
 537       sorry:
 538         for (i = 0; i < nr; i++) {
 539                 if (bh[i])
 540                         bh[i]->b_dirt = bh[i]->b_uptodate = 0;
 541         }
 542         return;
 543 }
 544 
 545 void ll_rw_swap_file(int rw, kdev_t dev, unsigned int *b, int nb, char *buf)
     /* [previous][next][first][last][top][bottom][index][help] */
 546 {
 547         int i, j;
 548         int buffersize;
 549         struct request * req[8];
 550         unsigned int major = MAJOR(dev);
 551         struct semaphore sem = MUTEX_LOCKED;
 552 
 553         if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
 554                 printk("ll_rw_swap_file: trying to swap nonexistent block-device\n");
 555                 return;
 556         }
 557 
 558         if (rw != READ && rw != WRITE) {
 559                 printk("ll_rw_swap: bad block dev command, must be R/W");
 560                 return;
 561         }
 562         if (rw == WRITE && is_read_only(dev)) {
 563                 printk("Can't swap to read-only device %s\n",
 564                        kdevname(dev));
 565                 return;
 566         }
 567         
 568         buffersize = PAGE_SIZE / nb;
 569 
 570         for (j=0, i=0; i<nb;)
 571         {
 572                 for (; j < 8 && i < nb; j++, i++, buf += buffersize)
 573                 {
 574                         if (j == 0) {
 575                                 req[j] = get_request_wait(NR_REQUEST, dev);
 576                         } else {
 577                                 cli();
 578                                 req[j] = get_request(NR_REQUEST, dev);
 579                                 sti();
 580                                 if (req[j] == NULL)
 581                                         break;
 582                         }
 583                         req[j]->cmd = rw;
 584                         req[j]->errors = 0;
 585                         req[j]->sector = (b[i] * buffersize) >> 9;
 586                         req[j]->nr_sectors = buffersize >> 9;
 587                         req[j]->current_nr_sectors = buffersize >> 9;
 588                         req[j]->buffer = buf;
 589                         req[j]->sem = &sem;
 590                         req[j]->bh = NULL;
 591                         req[j]->next = NULL;
 592                         add_request(major+blk_dev,req[j]);
 593                 }
 594                 while (j > 0) {
 595                         j--;
 596                         down(&sem);
 597                 }
 598         }
 599 }
 600 
 601 int blk_dev_init(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 602 {
 603         struct request * req;
 604 
 605         req = all_requests + NR_REQUEST;
 606         while (--req >= all_requests) {
 607                 req->rq_status = RQ_INACTIVE;
 608                 req->next = NULL;
 609         }
 610         memset(ro_bits,0,sizeof(ro_bits));
 611 #ifdef CONFIG_BLK_DEV_IDE
 612         ide_init();             /* this MUST preceed hd_init */
 613 #endif
 614 #ifdef CONFIG_BLK_DEV_HD
 615         hd_init();
 616 #endif
 617 #ifdef CONFIG_BLK_DEV_XD
 618         xd_init();
 619 #endif
 620 #ifdef CONFIG_BLK_DEV_FD
 621         floppy_init();
 622 #else
 623         outb_p(0xc, 0x3f2);
 624 #endif
 625 #ifdef CONFIG_CDU31A
 626         cdu31a_init();
 627 #endif CONFIG_CDU31A
 628 #ifdef CONFIG_MCD
 629         mcd_init();
 630 #endif CONFIG_MCD
 631 #ifdef CONFIG_MCDX
 632         mcdx_init();
 633 #endif CONFIG_MCDX
 634 #ifdef CONFIG_SBPCD
 635         sbpcd_init();
 636 #endif CONFIG_SBPCD
 637 #ifdef CONFIG_AZTCD
 638         aztcd_init();
 639 #endif CONFIG_AZTCD
 640 #ifdef CONFIG_CDU535
 641         sony535_init();
 642 #endif CONFIG_CDU535
 643 #ifdef CONFIG_GSCD
 644         gscd_init();
 645 #endif CONFIG_GSCD
 646 #ifdef CONFIG_CM206
 647         cm206_init();
 648 #endif
 649 #ifdef CONFIG_OPTCD
 650         optcd_init();
 651 #endif CONFIG_OPTCD
 652 #ifdef CONFIG_SJCD
 653         sjcd_init();
 654 #endif CONFIG_SJCD
 655         return 0;
 656 }

/* [previous][next][first][last][top][bottom][index][help] */