root/mm/page_io.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. rw_swap_page
  2. swap_after_unlock_page
  3. ll_rw_page

   1 /*
   2  *  linux/mm/page_io.c
   3  *
   4  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
   5  *
   6  *  Swap reorganised 29.12.95, 
   7  *  Asynchronous swapping added 30.12.95. Stephen Tweedie
   8  *  Removed race in async swapping. 14.4.1996. Bruno Haible
   9  */
  10 
  11 #include <linux/mm.h>
  12 #include <linux/sched.h>
  13 #include <linux/head.h>
  14 #include <linux/kernel.h>
  15 #include <linux/kernel_stat.h>
  16 #include <linux/errno.h>
  17 #include <linux/string.h>
  18 #include <linux/stat.h>
  19 #include <linux/swap.h>
  20 #include <linux/fs.h>
  21 #include <linux/locks.h>
  22 #include <linux/swapctl.h>
  23 
  24 #include <asm/dma.h>
  25 #include <asm/system.h> /* for cli()/sti() */
  26 #include <asm/segment.h> /* for memcpy_to/fromfs */
  27 #include <asm/bitops.h>
  28 #include <asm/pgtable.h>
  29 
  30 static struct wait_queue * lock_queue = NULL;
  31 
  32 /*
  33  * Reads or writes a swap page.
  34  * wait=1: start I/O and wait for completion. wait=0: start asynchronous I/O.
  35  *
  36  * Important prevention of race condition: The first thing we do is set a lock
  37  * on this swap page, which lasts until I/O completes. This way a
  38  * write_swap_page(entry) immediately followed by a read_swap_page(entry)
  39  * on the same entry will first complete the write_swap_page(). Fortunately,
  40  * not more than one write_swap_page() request can be pending per entry. So
  41  * all races the caller must catch are: multiple read_swap_page() requests
  42  * on the same entry.
  43  */
  44 void rw_swap_page(int rw, unsigned long entry, char * buf, int wait)
     /* [previous][next][first][last][top][bottom][index][help] */
  45 {
  46         unsigned long type, offset;
  47         struct swap_info_struct * p;
  48         struct page *page;
  49         
  50         type = SWP_TYPE(entry);
  51         if (type >= nr_swapfiles) {
  52                 printk("Internal error: bad swap-device\n");
  53                 return;
  54         }
  55         p = &swap_info[type];
  56         offset = SWP_OFFSET(entry);
  57         if (offset >= p->max) {
  58                 printk("rw_swap_page: weirdness\n");
  59                 return;
  60         }
  61         if (p->swap_map && !p->swap_map[offset]) {
  62                 printk("Hmm.. Trying to use unallocated swap (%08lx)\n", entry);
  63                 return;
  64         }
  65         if (!(p->flags & SWP_USED)) {
  66                 printk("Trying to swap to unused swap-device\n");
  67                 return;
  68         }
  69         /* Make sure we are the only process doing I/O with this swap page. */
  70         while (set_bit(offset,p->swap_lockmap)) {
  71                 run_task_queue(&tq_disk);
  72                 sleep_on(&lock_queue);
  73         }
  74         if (rw == READ)
  75                 kstat.pswpin++;
  76         else
  77                 kstat.pswpout++;
  78         page = mem_map + MAP_NR(buf);
  79         wait_on_page(page);
  80         if (p->swap_device) {
  81                 if (!wait) {
  82                         page->count++;
  83                         set_bit(PG_free_after, &page->flags);
  84                         set_bit(PG_decr_after, &page->flags);
  85                         set_bit(PG_swap_unlock_after, &page->flags);
  86                         page->swap_unlock_entry = entry;
  87                         nr_async_pages++;
  88                 }
  89                 ll_rw_page(rw,p->swap_device,offset,buf);
  90                 if (!wait)
  91                         return;
  92                 wait_on_page(page);
  93         } else if (p->swap_file) {
  94                 struct inode *swapf = p->swap_file;
  95                 unsigned int zones[PAGE_SIZE/512];
  96                 int i;
  97                 if (swapf->i_op->bmap == NULL
  98                         && swapf->i_op->smap != NULL){
  99                         /*
 100                                 With MsDOS, we use msdos_smap which return
 101                                 a sector number (not a cluster or block number).
 102                                 It is a patch to enable the UMSDOS project.
 103                                 Other people are working on better solution.
 104 
 105                                 It sounds like ll_rw_swap_file defined
 106                                 it operation size (sector size) based on
 107                                 PAGE_SIZE and the number of block to read.
 108                                 So using bmap or smap should work even if
 109                                 smap will require more blocks.
 110                         */
 111                         int j;
 112                         unsigned int block = offset << 3;
 113 
 114                         for (i=0, j=0; j< PAGE_SIZE ; i++, j += 512){
 115                                 if (!(zones[i] = swapf->i_op->smap(swapf,block++))) {
 116                                         printk("rw_swap_page: bad swap file\n");
 117                                         return;
 118                                 }
 119                         }
 120                 }else{
 121                         int j;
 122                         unsigned int block = offset
 123                                 << (PAGE_SHIFT - swapf->i_sb->s_blocksize_bits);
 124 
 125                         for (i=0, j=0; j< PAGE_SIZE ; i++, j +=swapf->i_sb->s_blocksize)
 126                                 if (!(zones[i] = bmap(swapf,block++))) {
 127                                         printk("rw_swap_page: bad swap file\n");
 128                                 }
 129                 }
 130                 ll_rw_swap_file(rw,swapf->i_dev, zones, i,buf);
 131         } else
 132                 printk("rw_swap_page: no swap file or device\n");
 133         if (offset && !clear_bit(offset,p->swap_lockmap))
 134                 printk("rw_swap_page: lock already cleared\n");
 135         wake_up(&lock_queue);
 136 }
 137 
 138 /* This is run when asynchronous page I/O has completed. */
 139 void swap_after_unlock_page (unsigned long entry)
     /* [previous][next][first][last][top][bottom][index][help] */
 140 {
 141         unsigned long type, offset;
 142         struct swap_info_struct * p;
 143 
 144         type = SWP_TYPE(entry);
 145         if (type >= nr_swapfiles) {
 146                 printk("swap_after_unlock_page: bad swap-device\n");
 147                 return;
 148         }
 149         p = &swap_info[type];
 150         offset = SWP_OFFSET(entry);
 151         if (offset >= p->max) {
 152                 printk("swap_after_unlock_page: weirdness\n");
 153                 return;
 154         }
 155         if (!clear_bit(offset,p->swap_lockmap))
 156                 printk("swap_after_unlock_page: lock already cleared\n");
 157         wake_up(&lock_queue);
 158 }
 159 
 160 /*
 161  * Swap partitions are now read via brw_page.  ll_rw_page is an
 162  * asynchronous function now --- we must call wait_on_page afterwards
 163  * if synchronous IO is required.  
 164  */
 165 void ll_rw_page(int rw, kdev_t dev, unsigned long page, char * buffer)
     /* [previous][next][first][last][top][bottom][index][help] */
 166 {
 167         int block = page;
 168 
 169         switch (rw) {
 170                 case READ:
 171                         break;
 172                 case WRITE:
 173                         if (is_read_only(dev)) {
 174                                 printk("Can't page to read-only device %s\n",
 175                                         kdevname(dev));
 176                                 return;
 177                         }
 178                         break;
 179                 default:
 180                         panic("ll_rw_page: bad block dev cmd, must be R/W");
 181         }
 182         if (set_bit(PG_locked, &mem_map[MAP_NR(buffer)].flags))
 183                 panic ("ll_rw_page: page already locked");
 184         brw_page(rw, (unsigned long) buffer, dev, &block, PAGE_SIZE, 0);
 185 }

/* [previous][next][first][last][top][bottom][index][help] */