root/mm/filemap.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. multi_bmap
  2. filemap_nopage
  3. filemap_sync_page
  4. filemap_swapout
  5. filemap_swapin
  6. filemap_sync_pte
  7. filemap_sync_pte_range
  8. filemap_sync_pmd_range
  9. filemap_sync
  10. filemap_unmap
  11. filemap_close
  12. generic_mmap

   1 /*
   2  *      linux/mm/filemmap.c
   3  *
   4  * Copyright (C) 1994 Linus Torvalds
   5  */
   6 
   7 /*
   8  * This file handles the generic file mmap semantics used by
   9  * most "normal" filesystems (but you don't /have/ to use this:
  10  * the NFS filesystem does this differently, for example)
  11  */
  12 #include <linux/stat.h>
  13 #include <linux/sched.h>
  14 #include <linux/kernel.h>
  15 #include <linux/mm.h>
  16 #include <linux/shm.h>
  17 #include <linux/errno.h>
  18 #include <linux/mman.h>
  19 #include <linux/string.h>
  20 #include <linux/malloc.h>
  21 
  22 #include <asm/segment.h>
  23 #include <asm/system.h>
  24 #include <asm/pgtable.h>
  25 
  26 /*
  27  * Shared mappings implemented 30.11.1994. It's not fully working yet,
  28  * though.
  29  */
  30 
  31 static inline void multi_bmap(struct inode * inode, unsigned int block, unsigned int * nr, int shift)
     /* [previous][next][first][last][top][bottom][index][help] */
  32 {
  33         int i = PAGE_SIZE >> shift;
  34         block >>= shift;
  35         do {
  36                 *nr = bmap(inode, block);
  37                 i--;
  38                 block++;
  39                 nr++;
  40         } while (i > 0);
  41 }
  42 
  43 static unsigned long filemap_nopage(struct vm_area_struct * area, unsigned long address,
     /* [previous][next][first][last][top][bottom][index][help] */
  44         unsigned long page, int no_share)
  45 {
  46         struct inode * inode = area->vm_inode;
  47         int nr[PAGE_SIZE/512];
  48 
  49         multi_bmap(inode, (address & PAGE_MASK) - area->vm_start + area->vm_offset, nr,
  50                 inode->i_sb->s_blocksize_bits);
  51         return bread_page(page, inode->i_dev, nr, inode->i_sb->s_blocksize, no_share);
  52 }
  53 
  54 /*
  55  * NOTE! mmap sync doesn't really work yet. This is mainly a stub for it,
  56  * which only works if the buffers and the page were already sharing the
  57  * same physical page (that's actually pretty common, especially if the
  58  * file has been mmap'ed before being read the normal way).
  59  *
  60  * Todo:
  61  * - non-shared pages also need to be synced with the buffers.
  62  * - the "swapout()" function needs to swap out the page to
  63  *   the shared file instead of using the swap device.
  64  */
  65 static void filemap_sync_page(struct vm_area_struct * vma,
     /* [previous][next][first][last][top][bottom][index][help] */
  66         unsigned long offset,
  67         unsigned long page)
  68 {
  69         struct inode * inode;
  70         int nr[PAGE_SIZE/512];
  71         struct buffer_head * bh;
  72 
  73         bh = buffer_pages[MAP_NR(page)];
  74         if (bh) {
  75                 /* whee.. just mark the buffer heads dirty */
  76                 struct buffer_head * tmp = bh;
  77                 do {
  78                         mark_buffer_dirty(tmp, 0);
  79                         tmp = tmp->b_this_page;
  80                 } while (tmp != bh);
  81                 return;
  82         }
  83         inode = vma->vm_inode;
  84         offset += vma->vm_offset;
  85         multi_bmap(inode, offset, nr, inode->i_sb->s_blocksize_bits);
  86         bwrite_page(page, inode->i_dev, nr, inode->i_sb->s_blocksize);
  87 }
  88 
  89 /*
  90  * Swapping to a shared file: while we're busy writing out the page
  91  * (and the page still exists in memory), we save the page information
  92  * in the page table, so that "filemap_swapin()" can re-use the page
  93  * immediately if it is called while we're busy swapping it out..
  94  *
  95  * Once we've written it all out, we mark the page entry "empty", which
  96  * will result in a normal page-in (instead of a swap-in) from the now
  97  * up-to-date shared file mapping.
  98  */
  99 void filemap_swapout(struct vm_area_struct * vma,
     /* [previous][next][first][last][top][bottom][index][help] */
 100         unsigned long offset,
 101         pte_t *page_table)
 102 {
 103         unsigned long page = pte_page(*page_table);
 104         unsigned long entry = SWP_ENTRY(SHM_SWP_TYPE, MAP_NR(page));
 105 
 106         pte_val(*page_table) = entry;
 107         invalidate();
 108         filemap_sync_page(vma, offset, page);
 109         if (pte_val(*page_table) == entry)
 110                 pte_clear(page_table);
 111 }
 112 
 113 /*
 114  * filemap_swapin() is called only if we have something in the page
 115  * tables that is non-zero (but not present), which we know to be the
 116  * page index of a page that is busy being swapped out (see above).
 117  * So we just use it directly..
 118  */
 119 static pte_t filemap_swapin(struct vm_area_struct * vma,
     /* [previous][next][first][last][top][bottom][index][help] */
 120         unsigned long offset,
 121         unsigned long entry)
 122 {
 123         unsigned long page = SWP_OFFSET(entry);
 124 
 125         mem_map[page]++;
 126         page = (page << PAGE_SHIFT) + PAGE_OFFSET;
 127         return pte_mkdirty(mk_pte(page,vma->vm_page_prot));
 128 }
 129 
 130 static inline void filemap_sync_pte(pte_t * pte, struct vm_area_struct *vma,
     /* [previous][next][first][last][top][bottom][index][help] */
 131         unsigned long address, unsigned int flags)
 132 {
 133         pte_t page = *pte;
 134 
 135         if (!pte_present(page))
 136                 return;
 137         if (!pte_dirty(page))
 138                 return;
 139         if (flags & MS_INVALIDATE) {
 140                 pte_clear(pte);
 141         } else {
 142                 mem_map[MAP_NR(pte_page(page))]++;
 143                 *pte = pte_mkclean(page);
 144         }
 145         filemap_sync_page(vma, address - vma->vm_start, pte_page(page));
 146         free_page(pte_page(page));
 147 }
 148 
 149 static inline void filemap_sync_pte_range(pmd_t * pmd,
     /* [previous][next][first][last][top][bottom][index][help] */
 150         unsigned long address, unsigned long size, 
 151         struct vm_area_struct *vma, unsigned long offset, unsigned int flags)
 152 {
 153         pte_t * pte;
 154         unsigned long end;
 155 
 156         if (pmd_none(*pmd))
 157                 return;
 158         if (pmd_bad(*pmd)) {
 159                 printk("filemap_sync_pte_range: bad pmd (%08lx)\n", pmd_val(*pmd));
 160                 pmd_clear(pmd);
 161                 return;
 162         }
 163         pte = pte_offset(pmd, address);
 164         offset += address & PMD_MASK;
 165         address &= ~PMD_MASK;
 166         end = address + size;
 167         if (end > PMD_SIZE)
 168                 end = PMD_SIZE;
 169         do {
 170                 filemap_sync_pte(pte, vma, address + offset, flags);
 171                 address += PAGE_SIZE;
 172                 pte++;
 173         } while (address < end);
 174 }
 175 
 176 static inline void filemap_sync_pmd_range(pgd_t * pgd,
     /* [previous][next][first][last][top][bottom][index][help] */
 177         unsigned long address, unsigned long size, 
 178         struct vm_area_struct *vma, unsigned int flags)
 179 {
 180         pmd_t * pmd;
 181         unsigned long offset, end;
 182 
 183         if (pgd_none(*pgd))
 184                 return;
 185         if (pgd_bad(*pgd)) {
 186                 printk("filemap_sync_pmd_range: bad pgd (%08lx)\n", pgd_val(*pgd));
 187                 pgd_clear(pgd);
 188                 return;
 189         }
 190         pmd = pmd_offset(pgd, address);
 191         offset = address & PMD_MASK;
 192         address &= ~PMD_MASK;
 193         end = address + size;
 194         if (end > PGDIR_SIZE)
 195                 end = PGDIR_SIZE;
 196         do {
 197                 filemap_sync_pte_range(pmd, address, end - address, vma, offset, flags);
 198                 address = (address + PMD_SIZE) & PMD_MASK;
 199                 pmd++;
 200         } while (address < end);
 201 }
 202 
 203 static void filemap_sync(struct vm_area_struct * vma, unsigned long address,
     /* [previous][next][first][last][top][bottom][index][help] */
 204         size_t size, unsigned int flags)
 205 {
 206         pgd_t * dir;
 207         unsigned long end = address + size;
 208 
 209         dir = pgd_offset(current, address);
 210         while (address < end) {
 211                 filemap_sync_pmd_range(dir, address, end - address, vma, flags);
 212                 address = (address + PGDIR_SIZE) & PGDIR_MASK;
 213                 dir++;
 214         }
 215         invalidate();
 216         return;
 217 }
 218 
 219 /*
 220  * This handles area unmaps..
 221  */
 222 static void filemap_unmap(struct vm_area_struct *vma, unsigned long start, size_t len)
     /* [previous][next][first][last][top][bottom][index][help] */
 223 {
 224         filemap_sync(vma, start, len, MS_ASYNC);
 225 }
 226 
 227 /*
 228  * This handles complete area closes..
 229  */
 230 static void filemap_close(struct vm_area_struct * vma)
     /* [previous][next][first][last][top][bottom][index][help] */
 231 {
 232         filemap_sync(vma, vma->vm_start, vma->vm_end - vma->vm_start, MS_ASYNC);
 233 }
 234 
 235 /*
 236  * Shared mappings need to be able to do the right thing at
 237  * close/unmap/sync. They will also use the private file as
 238  * backing-store for swapping..
 239  */
 240 static struct vm_operations_struct file_shared_mmap = {
 241         NULL,                   /* open */
 242         filemap_close,          /* close */
 243         filemap_unmap,          /* unmap */
 244         NULL,                   /* protect */
 245         filemap_sync,           /* sync */
 246         NULL,                   /* advise */
 247         filemap_nopage,         /* nopage */
 248         NULL,                   /* wppage */
 249         filemap_swapout,        /* swapout */
 250         filemap_swapin,         /* swapin */
 251 };
 252 
 253 /*
 254  * Private mappings just need to be able to load in the map
 255  *
 256  * (this is actually used for shared mappings as well, if we
 257  * know they can't ever get write permissions..)
 258  */
 259 static struct vm_operations_struct file_private_mmap = {
 260         NULL,                   /* open */
 261         NULL,                   /* close */
 262         NULL,                   /* unmap */
 263         NULL,                   /* protect */
 264         NULL,                   /* sync */
 265         NULL,                   /* advise */
 266         filemap_nopage,         /* nopage */
 267         NULL,                   /* wppage */
 268         NULL,                   /* swapout */
 269         NULL,                   /* swapin */
 270 };
 271 
 272 /* This is used for a general mmap of a disk file */
 273 int generic_mmap(struct inode * inode, struct file * file, struct vm_area_struct * vma)
     /* [previous][next][first][last][top][bottom][index][help] */
 274 {
 275         struct vm_operations_struct * ops;
 276 
 277         if (vma->vm_offset & (inode->i_sb->s_blocksize - 1))
 278                 return -EINVAL;
 279         if (!inode->i_sb || !S_ISREG(inode->i_mode))
 280                 return -EACCES;
 281         if (!inode->i_op || !inode->i_op->bmap)
 282                 return -ENOEXEC;
 283         ops = &file_private_mmap;
 284         if (vma->vm_flags & VM_SHARED) {
 285                 if (vma->vm_flags & (VM_WRITE | VM_MAYWRITE))
 286                         ops = &file_shared_mmap;
 287         }
 288         if (!IS_RDONLY(inode)) {
 289                 inode->i_atime = CURRENT_TIME;
 290                 inode->i_dirt = 1;
 291         }
 292         vma->vm_inode = inode;
 293         inode->i_count++;
 294         vma->vm_ops = ops;
 295         return 0;
 296 }

/* [previous][next][first][last][top][bottom][index][help] */