root/drivers/block/ll_rw_blk.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. unplug_device
  2. plug_device
  3. get_request
  4. __get_request_wait
  5. get_request_wait
  6. is_read_only
  7. set_device_ro
  8. drive_stat_acct
  9. add_request
  10. make_request
  11. ll_rw_block
  12. ll_rw_swap_file
  13. blk_dev_init

   1 /*
   2  *  linux/drivers/block/ll_rw_blk.c
   3  *
   4  * Copyright (C) 1991, 1992 Linus Torvalds
   5  * Copyright (C) 1994,      Karl Keyte: Added support for disk statistics
   6  */
   7 
   8 /*
   9  * This handles all read/write requests to block devices
  10  */
  11 #include <linux/sched.h>
  12 #include <linux/kernel.h>
  13 #include <linux/kernel_stat.h>
  14 #include <linux/errno.h>
  15 #include <linux/string.h>
  16 #include <linux/config.h>
  17 #include <linux/locks.h>
  18 #include <linux/mm.h>
  19 
  20 #include <asm/system.h>
  21 #include <asm/io.h>
  22 #include <linux/blk.h>
  23 
  24 /*
  25  * The request-struct contains all necessary data
  26  * to load a nr of sectors into memory
  27  */
  28 static struct request all_requests[NR_REQUEST];
  29 
  30 /*
  31  * The "disk" task queue is used to start the actual requests
  32  * after a plug
  33  */
  34 DECLARE_TASK_QUEUE(tq_disk);
  35 
  36 /*
  37  * used to wait on when there are no free requests
  38  */
  39 struct wait_queue * wait_for_request = NULL;
  40 
  41 /* This specifies how many sectors to read ahead on the disk.  */
  42 
  43 int read_ahead[MAX_BLKDEV] = {0, };
  44 
  45 /* blk_dev_struct is:
  46  *      *request_fn
  47  *      *current_request
  48  */
  49 struct blk_dev_struct blk_dev[MAX_BLKDEV]; /* initialized by blk_dev_init() */
  50 
  51 /*
  52  * blk_size contains the size of all block-devices in units of 1024 byte
  53  * sectors:
  54  *
  55  * blk_size[MAJOR][MINOR]
  56  *
  57  * if (!blk_size[MAJOR]) then no minor size checking is done.
  58  */
  59 int * blk_size[MAX_BLKDEV] = { NULL, NULL, };
  60 
  61 /*
  62  * blksize_size contains the size of all block-devices:
  63  *
  64  * blksize_size[MAJOR][MINOR]
  65  *
  66  * if (!blksize_size[MAJOR]) then 1024 bytes is assumed.
  67  */
  68 int * blksize_size[MAX_BLKDEV] = { NULL, NULL, };
  69 
  70 /*
  71  * hardsect_size contains the size of the hardware sector of a device.
  72  *
  73  * hardsect_size[MAJOR][MINOR]
  74  *
  75  * if (!hardsect_size[MAJOR])
  76  *              then 512 bytes is assumed.
  77  * else
  78  *              sector_size is hardsect_size[MAJOR][MINOR]
  79  * This is currently set by some scsi device and read by the msdos fs driver
  80  * This might be a some uses later.
  81  */
  82 int * hardsect_size[MAX_BLKDEV] = { NULL, NULL, };
  83 
  84 /*
  85  * remove the plug and let it rip..
  86  */
  87 void unplug_device(void * data)
     /* [previous][next][first][last][top][bottom][index][help] */
  88 {
  89         struct blk_dev_struct * dev = (struct blk_dev_struct *) data;
  90         unsigned long flags;
  91 
  92         save_flags(flags);
  93         cli();
  94         if (dev->current_request == &dev->plug) {
  95                 dev->current_request = dev->plug.next;
  96                 dev->plug.next = NULL;
  97                 if (dev->current_request)
  98                         (dev->request_fn)();
  99         }
 100         restore_flags(flags);
 101 }
 102 
 103 /*
 104  * "plug" the device if there are no outstanding requests: this will
 105  * force the transfer to start only after we have put all the requests
 106  * on the list.
 107  *
 108  * This is called with interrupts off and no requests on the queue.
 109  */
 110 static inline void plug_device(struct blk_dev_struct * dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 111 {
 112         dev->current_request = &dev->plug;
 113         queue_task_irq_off(&dev->plug_tq, &tq_disk);
 114 }
 115 
 116 /*
 117  * look for a free request in the first N entries.
 118  * NOTE: interrupts must be disabled on the way in, and will still
 119  *       be disabled on the way out.
 120  */
 121 static inline struct request * get_request(int n, kdev_t dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 122 {
 123         static struct request *prev_found = NULL, *prev_limit = NULL;
 124         register struct request *req, *limit;
 125 
 126         if (n <= 0)
 127                 panic("get_request(%d): impossible!\n", n);
 128 
 129         limit = all_requests + n;
 130         if (limit != prev_limit) {
 131                 prev_limit = limit;
 132                 prev_found = all_requests;
 133         }
 134         req = prev_found;
 135         for (;;) {
 136                 req = ((req > all_requests) ? req : limit) - 1;
 137                 if (req->rq_status == RQ_INACTIVE)
 138                         break;
 139                 if (req == prev_found)
 140                         return NULL;
 141         }
 142         prev_found = req;
 143         req->rq_status = RQ_ACTIVE;
 144         req->rq_dev = dev;
 145         return req;
 146 }
 147 
 148 /*
 149  * wait until a free request in the first N entries is available.
 150  */
 151 static struct request * __get_request_wait(int n, kdev_t dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 152 {
 153         register struct request *req;
 154         struct wait_queue wait = { current, NULL };
 155 
 156         add_wait_queue(&wait_for_request, &wait);
 157         for (;;) {
 158                 current->state = TASK_UNINTERRUPTIBLE;
 159                 cli();
 160                 req = get_request(n, dev);
 161                 sti();
 162                 if (req)
 163                         break;
 164                 run_task_queue(&tq_disk);
 165                 schedule();
 166         }
 167         remove_wait_queue(&wait_for_request, &wait);
 168         current->state = TASK_RUNNING;
 169         return req;
 170 }
 171 
 172 static inline struct request * get_request_wait(int n, kdev_t dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 173 {
 174         register struct request *req;
 175 
 176         cli();
 177         req = get_request(n, dev);
 178         sti();
 179         if (req)
 180                 return req;
 181         return __get_request_wait(n, dev);
 182 }
 183 
 184 /* RO fail safe mechanism */
 185 
 186 static long ro_bits[MAX_BLKDEV][8];
 187 
 188 int is_read_only(kdev_t dev)
     /* [previous][next][first][last][top][bottom][index][help] */
 189 {
 190         int minor,major;
 191 
 192         major = MAJOR(dev);
 193         minor = MINOR(dev);
 194         if (major < 0 || major >= MAX_BLKDEV) return 0;
 195         return ro_bits[major][minor >> 5] & (1 << (minor & 31));
 196 }
 197 
 198 void set_device_ro(kdev_t dev,int flag)
     /* [previous][next][first][last][top][bottom][index][help] */
 199 {
 200         int minor,major;
 201 
 202         major = MAJOR(dev);
 203         minor = MINOR(dev);
 204         if (major < 0 || major >= MAX_BLKDEV) return;
 205         if (flag) ro_bits[major][minor >> 5] |= 1 << (minor & 31);
 206         else ro_bits[major][minor >> 5] &= ~(1 << (minor & 31));
 207 }
 208 
 209 static inline void drive_stat_acct(int cmd, unsigned long nr_sectors, short disk_index)
     /* [previous][next][first][last][top][bottom][index][help] */
 210 {
 211         kstat.dk_drive[disk_index]++;
 212         if (cmd == READ) {
 213                 kstat.dk_drive_rio[disk_index]++;
 214                 kstat.dk_drive_rblk[disk_index] += nr_sectors;
 215         }
 216         else if (cmd == WRITE) {
 217                 kstat.dk_drive_wio[disk_index]++;
 218                 kstat.dk_drive_wblk[disk_index] += nr_sectors;
 219         } else
 220                 printk("drive_stat_acct: cmd not R/W?\n");
 221 }
 222 
 223 /*
 224  * add-request adds a request to the linked list.
 225  * It disables interrupts so that it can muck with the
 226  * request-lists in peace.
 227  *
 228  * By this point, req->cmd is always either READ/WRITE, never READA/WRITEA,
 229  * which is important for drive_stat_acct() above.
 230  */
 231 
 232 void add_request(struct blk_dev_struct * dev, struct request * req)
     /* [previous][next][first][last][top][bottom][index][help] */
 233 {
 234         struct request * tmp;
 235         short            disk_index;
 236 
 237         switch (MAJOR(req->rq_dev)) {
 238                 case SCSI_DISK_MAJOR:
 239                         disk_index = (MINOR(req->rq_dev) & 0x0070) >> 4;
 240                         if (disk_index < 4)
 241                                 drive_stat_acct(req->cmd, req->nr_sectors, disk_index);
 242                         break;
 243                 case IDE0_MAJOR:        /* same as HD_MAJOR */
 244                 case XT_DISK_MAJOR:
 245                         disk_index = (MINOR(req->rq_dev) & 0x0040) >> 6;
 246                         drive_stat_acct(req->cmd, req->nr_sectors, disk_index);
 247                         break;
 248                 case IDE1_MAJOR:
 249                         disk_index = ((MINOR(req->rq_dev) & 0x0040) >> 6) + 2;
 250                         drive_stat_acct(req->cmd, req->nr_sectors, disk_index);
 251                 default:
 252                         break;
 253         }
 254 
 255         req->next = NULL;
 256         cli();
 257         if (req->bh)
 258                 mark_buffer_clean(req->bh);
 259         if (!(tmp = dev->current_request)) {
 260                 dev->current_request = req;
 261                 (dev->request_fn)();
 262                 sti();
 263                 return;
 264         }
 265         for ( ; tmp->next ; tmp = tmp->next) {
 266                 if ((IN_ORDER(tmp,req) ||
 267                     !IN_ORDER(tmp,tmp->next)) &&
 268                     IN_ORDER(req,tmp->next))
 269                         break;
 270         }
 271         req->next = tmp->next;
 272         tmp->next = req;
 273 
 274 /* for SCSI devices, call request_fn unconditionally */
 275         if (scsi_major(MAJOR(req->rq_dev)))
 276                 (dev->request_fn)();
 277 
 278         sti();
 279 }
 280 
 281 static void make_request(int major,int rw, struct buffer_head * bh)
     /* [previous][next][first][last][top][bottom][index][help] */
 282 {
 283         unsigned int sector, count;
 284         struct request * req;
 285         int rw_ahead, max_req;
 286 
 287         count = bh->b_size >> 9;
 288         sector = bh->b_rsector;
 289         if (blk_size[major])
 290                 if (blk_size[major][MINOR(bh->b_rdev)] < (sector + count)>>1) {
 291                         bh->b_state = 0;
 292                         printk("attempt to access beyond end of device\n");
 293                         printk("%s: rw=%d, want=%d, limit=%d\n", kdevname(bh->b_rdev),
 294                          rw, (sector + count)>>1, blk_size[major][MINOR(bh->b_rdev)]);
 295                         return;
 296                 }
 297         /* Uhhuh.. Nasty dead-lock possible here.. */
 298         if (buffer_locked(bh))
 299                 return;
 300         /* Maybe the above fixes it, and maybe it doesn't boot. Life is interesting */
 301         lock_buffer(bh);
 302 
 303         rw_ahead = 0;   /* normal case; gets changed below for READA/WRITEA */
 304         switch (rw) {
 305                 case READA:
 306                         rw_ahead = 1;
 307                         rw = READ;      /* drop into READ */
 308                 case READ:
 309                         if (buffer_uptodate(bh)) {
 310                                 unlock_buffer(bh); /* Hmmph! Already have it */
 311                                 return;
 312                         }
 313                         kstat.pgpgin++;
 314                         max_req = NR_REQUEST;   /* reads take precedence */
 315                         break;
 316                 case WRITEA:
 317                         rw_ahead = 1;
 318                         rw = WRITE;     /* drop into WRITE */
 319                 case WRITE:
 320                         if (!buffer_dirty(bh)) {
 321                                 unlock_buffer(bh); /* Hmmph! Nothing to write */
 322                                 return;
 323                         }
 324                         /* We don't allow the write-requests to fill up the
 325                          * queue completely:  we want some room for reads,
 326                          * as they take precedence. The last third of the
 327                          * requests are only for reads.
 328                          */
 329                         kstat.pgpgout++;
 330                         max_req = (NR_REQUEST * 2) / 3;
 331                         break;
 332                 default:
 333                         printk("make_request: bad block dev cmd, must be R/W/RA/WA\n");
 334                         unlock_buffer(bh);
 335                         return;
 336         }
 337 
 338 /* look for a free request. */
 339 
 340         /*
 341          * Try to coalesce the new request with old requests
 342          */
 343         cli();
 344         req = blk_dev[major].current_request;
 345         if (!req) {
 346                 /* MD and loop can't handle plugging without deadlocking */
 347                 if (major != MD_MAJOR && major != LOOP_MAJOR)
 348                         plug_device(blk_dev + major);
 349         } else switch (major) {
 350              case IDE0_MAJOR:   /* same as HD_MAJOR */
 351              case IDE1_MAJOR:
 352              case FLOPPY_MAJOR:
 353              case IDE2_MAJOR:
 354              case IDE3_MAJOR:
 355                 /*
 356                  * The scsi disk and cdrom drivers completely remove the request
 357                  * from the queue when they start processing an entry.  For this
 358                  * reason it is safe to continue to add links to the top entry for
 359                  * those devices.
 360                  *
 361                  * All other drivers need to jump over the first entry, as that
 362                  * entry may be busy being processed and we thus can't change it.
 363                  */
 364                 req = req->next;
 365                 if (!req)
 366                         break;
 367                 /* fall through */
 368 
 369              case SCSI_DISK_MAJOR:
 370              case SCSI_CDROM_MAJOR:
 371 
 372                 do {
 373                         if (req->sem)
 374                                 continue;
 375                         if (req->cmd != rw)
 376                                 continue;
 377                         if (req->nr_sectors >= 244)
 378                                 continue;
 379                         if (req->rq_dev != bh->b_rdev)
 380                                 continue;
 381                         /* Can we add it to the end of this request? */
 382                         if (req->sector + req->nr_sectors == sector) {
 383                                 req->bhtail->b_reqnext = bh;
 384                                 req->bhtail = bh;
 385                         /* or to the beginning? */
 386                         } else if (req->sector - count == sector) {
 387                                 bh->b_reqnext = req->bh;
 388                                 req->bh = bh;
 389                                 req->buffer = bh->b_data;
 390                                 req->current_nr_sectors = count;
 391                                 req->sector = sector;
 392                         } else
 393                                 continue;
 394 
 395                         req->nr_sectors += count;
 396                         mark_buffer_clean(bh);
 397                         sti();
 398                         return;
 399                 } while ((req = req->next) != NULL);
 400         }
 401 
 402 /* find an unused request. */
 403         req = get_request(max_req, bh->b_rdev);
 404         sti();
 405 
 406 /* if no request available: if rw_ahead, forget it; otherwise try again blocking.. */
 407         if (!req) {
 408                 if (rw_ahead) {
 409                         unlock_buffer(bh);
 410                         return;
 411                 }
 412                 req = __get_request_wait(max_req, bh->b_rdev);
 413         }
 414 
 415 /* fill up the request-info, and add it to the queue */
 416         req->cmd = rw;
 417         req->errors = 0;
 418         req->sector = sector;
 419         req->nr_sectors = count;
 420         req->current_nr_sectors = count;
 421         req->buffer = bh->b_data;
 422         req->sem = NULL;
 423         req->bh = bh;
 424         req->bhtail = bh;
 425         req->next = NULL;
 426         add_request(major+blk_dev,req);
 427 }
 428 
 429 /* This function can be used to request a number of buffers from a block
 430    device. Currently the only restriction is that all buffers must belong to
 431    the same device */
 432 
 433 void ll_rw_block(int rw, int nr, struct buffer_head * bh[])
     /* [previous][next][first][last][top][bottom][index][help] */
 434 {
 435         unsigned int major;
 436         int correct_size;
 437         struct blk_dev_struct * dev;
 438         int i;
 439 
 440         /* Make sure that the first block contains something reasonable */
 441         while (!*bh) {
 442                 bh++;
 443                 if (--nr <= 0)
 444                         return;
 445         };
 446 
 447         dev = NULL;
 448         if ((major = MAJOR(bh[0]->b_dev)) < MAX_BLKDEV)
 449                 dev = blk_dev + major;
 450         if (!dev || !dev->request_fn) {
 451                 printk(
 452         "ll_rw_block: Trying to read nonexistent block-device %s (%ld)\n",
 453                 kdevname(bh[0]->b_dev), bh[0]->b_blocknr);
 454                 goto sorry;
 455         }
 456 
 457         /* Determine correct block size for this device.  */
 458         correct_size = BLOCK_SIZE;
 459         if (blksize_size[major]) {
 460                 i = blksize_size[major][MINOR(bh[0]->b_dev)];
 461                 if (i)
 462                         correct_size = i;
 463         }
 464 
 465         /* Verify requested block sizes.  */
 466         for (i = 0; i < nr; i++) {
 467                 if (bh[i] && bh[i]->b_size != correct_size) {
 468                         printk("ll_rw_block: device %s: "
 469                                "only %d-char blocks implemented (%lu)\n",
 470                                kdevname(bh[0]->b_dev),
 471                                correct_size, bh[i]->b_size);
 472                         goto sorry;
 473                 }
 474 
 475                 /* Md remaps blocks now */
 476                 bh[i]->b_rdev = bh[i]->b_dev;
 477                 bh[i]->b_rsector=bh[i]->b_blocknr*(bh[i]->b_size >> 9);
 478 #ifdef CONFIG_BLK_DEV_MD
 479                 if (major==MD_MAJOR &&
 480                     md_map (MINOR(bh[i]->b_dev), &bh[i]->b_rdev,
 481                             &bh[i]->b_rsector, bh[i]->b_size >> 9))
 482                         goto sorry;
 483 #endif
 484         }
 485 
 486         if ((rw == WRITE || rw == WRITEA) && is_read_only(bh[0]->b_dev)) {
 487                 printk("Can't write to read-only device %s\n",
 488                        kdevname(bh[0]->b_dev));
 489                 goto sorry;
 490         }
 491 
 492         for (i = 0; i < nr; i++) {
 493                 if (bh[i]) {
 494                         set_bit(BH_Req, &bh[i]->b_state);
 495 
 496                         make_request(MAJOR(bh[i]->b_rdev), rw, bh[i]);
 497                 }
 498         }
 499         return;
 500 
 501       sorry:
 502         for (i = 0; i < nr; i++) {
 503                 if (bh[i]) {
 504                         clear_bit(BH_Dirty, &bh[i]->b_state);
 505                         clear_bit(BH_Uptodate, &bh[i]->b_state);
 506                 }
 507         }
 508         return;
 509 }
 510 
 511 void ll_rw_swap_file(int rw, kdev_t dev, unsigned int *b, int nb, char *buf)
     /* [previous][next][first][last][top][bottom][index][help] */
 512 {
 513         int i, j;
 514         int buffersize;
 515         unsigned long rsector;
 516         kdev_t rdev;
 517         struct request * req[8];
 518         unsigned int major = MAJOR(dev);
 519         struct semaphore sem = MUTEX_LOCKED;
 520 
 521         if (major >= MAX_BLKDEV || !(blk_dev[major].request_fn)) {
 522                 printk("ll_rw_swap_file: trying to swap nonexistent block-device\n");
 523                 return;
 524         }
 525         switch (rw) {
 526                 case READ:
 527                         break;
 528                 case WRITE:
 529                         if (is_read_only(dev)) {
 530                                 printk("Can't swap to read-only device %s\n",
 531                                         kdevname(dev));
 532                                 return;
 533                         }
 534                         break;
 535                 default:
 536                         panic("ll_rw_swap: bad block dev cmd, must be R/W");
 537         }
 538         buffersize = PAGE_SIZE / nb;
 539 
 540         for (j=0, i=0; i<nb;)
 541         {
 542                 for (; j < 8 && i < nb; j++, i++, buf += buffersize)
 543                 {
 544                         rdev = dev;
 545                         rsector = (b[i] * buffersize) >> 9;
 546 #ifdef CONFIG_BLK_DEV_MD
 547                         if (major==MD_MAJOR &&
 548                             md_map (MINOR(dev), &rdev,
 549                                     &rsector, buffersize >> 9)) {
 550                                 printk ("Bad md_map in ll_rw_page_size\n");
 551                                 return;
 552                         }
 553 #endif
 554                         
 555                         if (j == 0) {
 556                                 req[j] = get_request_wait(NR_REQUEST, rdev);
 557                         } else {
 558                                 cli();
 559                                 req[j] = get_request(NR_REQUEST, rdev);
 560                                 sti();
 561                                 if (req[j] == NULL)
 562                                         break;
 563                         }
 564                         req[j]->cmd = rw;
 565                         req[j]->errors = 0;
 566                         req[j]->sector = rsector;
 567                         req[j]->nr_sectors = buffersize >> 9;
 568                         req[j]->current_nr_sectors = buffersize >> 9;
 569                         req[j]->buffer = buf;
 570                         req[j]->sem = &sem;
 571                         req[j]->bh = NULL;
 572                         req[j]->next = NULL;
 573                         add_request(MAJOR(rdev)+blk_dev,req[j]);
 574                 }
 575                 run_task_queue(&tq_disk);
 576                 while (j > 0) {
 577                         j--;
 578                         down(&sem);
 579                 }
 580         }
 581 }
 582 
 583 int blk_dev_init(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 584 {
 585         struct request * req;
 586         struct blk_dev_struct *dev;
 587 
 588         for (dev = blk_dev + MAX_BLKDEV; dev-- != blk_dev;) {
 589                 dev->request_fn      = NULL;
 590                 dev->current_request = NULL;
 591                 dev->plug.rq_status  = RQ_INACTIVE;
 592                 dev->plug.cmd        = -1;
 593                 dev->plug.next       = NULL;
 594                 dev->plug_tq.routine = &unplug_device;
 595                 dev->plug_tq.data    = dev;
 596         }
 597 
 598         req = all_requests + NR_REQUEST;
 599         while (--req >= all_requests) {
 600                 req->rq_status = RQ_INACTIVE;
 601                 req->next = NULL;
 602         }
 603         memset(ro_bits,0,sizeof(ro_bits));
 604 #ifdef CONFIG_BLK_DEV_RAM
 605         rd_init();
 606 #endif
 607 #ifdef CONFIG_BLK_DEV_LOOP
 608         loop_init();
 609 #endif
 610 #ifdef CONFIG_BLK_DEV_IDE
 611         ide_init();             /* this MUST precede hd_init */
 612 #endif
 613 #ifdef CONFIG_BLK_DEV_HD
 614         hd_init();
 615 #endif
 616 #ifdef CONFIG_BLK_DEV_XD
 617         xd_init();
 618 #endif
 619 #ifdef CONFIG_BLK_DEV_FD
 620         floppy_init();
 621 #else
 622         outb_p(0xc, 0x3f2);
 623 #endif
 624 #ifdef CONFIG_CDI_INIT
 625         cdi_init();
 626 #endif CONFIG_CDI_INIT
 627 #ifdef CONFIG_CDU31A
 628         cdu31a_init();
 629 #endif CONFIG_CDU31A
 630 #ifdef CONFIG_MCD
 631         mcd_init();
 632 #endif CONFIG_MCD
 633 #ifdef CONFIG_MCDX
 634         mcdx_init();
 635 #endif CONFIG_MCDX
 636 #ifdef CONFIG_SBPCD
 637         sbpcd_init();
 638 #endif CONFIG_SBPCD
 639 #ifdef CONFIG_AZTCD
 640         aztcd_init();
 641 #endif CONFIG_AZTCD
 642 #ifdef CONFIG_CDU535
 643         sony535_init();
 644 #endif CONFIG_CDU535
 645 #ifdef CONFIG_GSCD
 646         gscd_init();
 647 #endif CONFIG_GSCD
 648 #ifdef CONFIG_CM206
 649         cm206_init();
 650 #endif
 651 #ifdef CONFIG_OPTCD
 652         optcd_init();
 653 #endif CONFIG_OPTCD
 654 #ifdef CONFIG_SJCD
 655         sjcd_init();
 656 #endif CONFIG_SJCD
 657 #ifdef CONFIG_BLK_DEV_MD
 658         md_init();
 659 #endif CONFIG_BLK_DEV_MD
 660         return 0;
 661 }

/* [previous][next][first][last][top][bottom][index][help] */