root/mm/filemap.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. multi_bmap
  2. filemap_nopage
  3. filemap_sync_page
  4. filemap_swapout
  5. filemap_swapin
  6. filemap_sync_pte
  7. filemap_sync_pte_range
  8. filemap_sync_pmd_range
  9. filemap_sync
  10. filemap_unmap
  11. filemap_close
  12. generic_mmap

   1 /*
   2  *      linux/mm/filemmap.c
   3  *
   4  * Copyright (C) 1994 Linus Torvalds
   5  */
   6 
   7 /*
   8  * This file handles the generic file mmap semantics used by
   9  * most "normal" filesystems (but you don't /have/ to use this:
  10  * the NFS filesystem does this differently, for example)
  11  */
  12 #include <linux/stat.h>
  13 #include <linux/sched.h>
  14 #include <linux/kernel.h>
  15 #include <linux/mm.h>
  16 #include <linux/shm.h>
  17 #include <linux/errno.h>
  18 #include <linux/mman.h>
  19 #include <linux/string.h>
  20 #include <linux/malloc.h>
  21 
  22 #include <asm/segment.h>
  23 #include <asm/system.h>
  24 #include <asm/pgtable.h>
  25 
  26 /*
  27  * Shared mappings implemented 30.11.1994. It's not fully working yet,
  28  * though.
  29  */
  30 
  31 static inline void multi_bmap(struct inode * inode, unsigned int block, unsigned int * nr, int shift)
     /* [previous][next][first][last][top][bottom][index][help] */
  32 {
  33         int i = PAGE_SIZE >> shift;
  34         block >>= shift;
  35         do {
  36                 *nr = bmap(inode, block);
  37                 i--;
  38                 block++;
  39                 nr++;
  40         } while (i > 0);
  41 }
  42 
  43 static unsigned long filemap_nopage(struct vm_area_struct * area, unsigned long address,
     /* [previous][next][first][last][top][bottom][index][help] */
  44         unsigned long page, int no_share)
  45 {
  46         struct inode * inode = area->vm_inode;
  47         int nr[PAGE_SIZE/512];
  48 
  49         multi_bmap(inode, (address & PAGE_MASK) - area->vm_start + area->vm_offset, nr,
  50                 inode->i_sb->s_blocksize_bits);
  51         return bread_page(page, inode->i_dev, nr, inode->i_sb->s_blocksize, no_share);
  52 }
  53 
  54 /*
  55  * NOTE! mmap sync doesn't really work yet. This is mainly a stub for it,
  56  * which only works if the buffers and the page were already sharing the
  57  * same physical page (that's actually pretty common, especially if the
  58  * file has been mmap'ed before being read the normal way).
  59  *
  60  * Todo:
  61  * - non-shared pages also need to be synced with the buffers.
  62  * - the "swapout()" function needs to swap out the page to
  63  *   the shared file instead of using the swap device.
  64  */
  65 static void filemap_sync_page(struct vm_area_struct * vma,
     /* [previous][next][first][last][top][bottom][index][help] */
  66         unsigned long offset,
  67         unsigned long page)
  68 {
  69         struct inode * inode;
  70         int nr[PAGE_SIZE/512];
  71         struct buffer_head * bh;
  72 
  73         bh = buffer_pages[MAP_NR(page)];
  74         if (bh) {
  75                 /* whee.. just mark the buffer heads dirty */
  76                 struct buffer_head * tmp = bh;
  77                 do {
  78                         mark_buffer_dirty(tmp, 0);
  79                         tmp = tmp->b_this_page;
  80                 } while (tmp != bh);
  81                 return;
  82         }
  83         inode = vma->vm_inode;
  84         multi_bmap(inode, offset, nr, inode->i_sb->s_blocksize_bits);
  85         bwrite_page(page, inode->i_dev, nr, inode->i_sb->s_blocksize);
  86 }
  87 
  88 /*
  89  * Swapping to a shared file: while we're busy writing out the page
  90  * (and the page still exists in memory), we save the page information
  91  * in the page table, so that "filemap_swapin()" can re-use the page
  92  * immediately if it is called while we're busy swapping it out..
  93  *
  94  * Once we've written it all out, we mark the page entry "empty", which
  95  * will result in a normal page-in (instead of a swap-in) from the now
  96  * up-to-date shared file mapping.
  97  */
  98 void filemap_swapout(struct vm_area_struct * vma,
     /* [previous][next][first][last][top][bottom][index][help] */
  99         unsigned long offset,
 100         pte_t *page_table)
 101 {
 102         unsigned long page = pte_page(*page_table);
 103         unsigned long entry = SWP_ENTRY(SHM_SWP_TYPE, MAP_NR(page));
 104 
 105         pte_val(*page_table) = entry;
 106         invalidate();
 107         filemap_sync_page(vma, offset, page);
 108         if (pte_val(*page_table) == entry)
 109                 pte_clear(page_table);
 110 }
 111 
 112 /*
 113  * filemap_swapin() is called only if we have something in the page
 114  * tables that is non-zero (but not present), which we know to be the
 115  * page index of a page that is busy being swapped out (see above).
 116  * So we just use it directly..
 117  */
 118 static pte_t filemap_swapin(struct vm_area_struct * vma,
     /* [previous][next][first][last][top][bottom][index][help] */
 119         unsigned long offset,
 120         unsigned long entry)
 121 {
 122         unsigned long page = SWP_OFFSET(entry);
 123 
 124         mem_map[page]++;
 125         page = (page << PAGE_SHIFT) + PAGE_OFFSET;
 126         return pte_mkdirty(mk_pte(page,vma->vm_page_prot));
 127 }
 128 
 129 static inline void filemap_sync_pte(pte_t * pte, struct vm_area_struct *vma,
     /* [previous][next][first][last][top][bottom][index][help] */
 130         unsigned long address, unsigned int flags)
 131 {
 132         pte_t page = *pte;
 133 
 134         if (!pte_present(page))
 135                 return;
 136         if (!pte_dirty(page))
 137                 return;
 138         if (flags & MS_INVALIDATE) {
 139                 pte_clear(pte);
 140         } else {
 141                 mem_map[MAP_NR(pte_page(page))]++;
 142                 *pte = pte_mkclean(page);
 143         }
 144         filemap_sync_page(vma, address - vma->vm_start, pte_page(page));
 145         free_page(pte_page(page));
 146 }
 147 
 148 static inline void filemap_sync_pte_range(pmd_t * pmd,
     /* [previous][next][first][last][top][bottom][index][help] */
 149         unsigned long address, unsigned long size, 
 150         struct vm_area_struct *vma, unsigned long offset, unsigned int flags)
 151 {
 152         pte_t * pte;
 153         unsigned long end;
 154 
 155         if (pmd_none(*pmd))
 156                 return;
 157         if (pmd_bad(*pmd)) {
 158                 printk("filemap_sync_pte_range: bad pmd (%08lx)\n", pmd_val(*pmd));
 159                 pmd_clear(pmd);
 160                 return;
 161         }
 162         pte = pte_offset(pmd, address);
 163         offset += address & PMD_MASK;
 164         address &= ~PMD_MASK;
 165         end = address + size;
 166         if (end > PMD_SIZE)
 167                 end = PMD_SIZE;
 168         do {
 169                 filemap_sync_pte(pte, vma, address + offset, flags);
 170                 address += PAGE_SIZE;
 171                 pte++;
 172         } while (address < end);
 173 }
 174 
 175 static inline void filemap_sync_pmd_range(pgd_t * pgd,
     /* [previous][next][first][last][top][bottom][index][help] */
 176         unsigned long address, unsigned long size, 
 177         struct vm_area_struct *vma, unsigned int flags)
 178 {
 179         pmd_t * pmd;
 180         unsigned long offset, end;
 181 
 182         if (pgd_none(*pgd))
 183                 return;
 184         if (pgd_bad(*pgd)) {
 185                 printk("filemap_sync_pmd_range: bad pgd (%08lx)\n", pgd_val(*pgd));
 186                 pgd_clear(pgd);
 187                 return;
 188         }
 189         pmd = pmd_offset(pgd, address);
 190         offset = address & PMD_MASK;
 191         address &= ~PMD_MASK;
 192         end = address + size;
 193         if (end > PGDIR_SIZE)
 194                 end = PGDIR_SIZE;
 195         do {
 196                 filemap_sync_pte_range(pmd, address, end - address, vma, offset, flags);
 197                 address = (address + PMD_SIZE) & PMD_MASK;
 198                 pmd++;
 199         } while (address < end);
 200 }
 201 
 202 static void filemap_sync(struct vm_area_struct * vma, unsigned long address,
     /* [previous][next][first][last][top][bottom][index][help] */
 203         size_t size, unsigned int flags)
 204 {
 205         pgd_t * dir;
 206         unsigned long end = address + size;
 207 
 208         dir = pgd_offset(current, address);
 209         while (address < end) {
 210                 filemap_sync_pmd_range(dir, address, end - address, vma, flags);
 211                 address = (address + PGDIR_SIZE) & PGDIR_MASK;
 212                 dir++;
 213         }
 214         invalidate();
 215         return;
 216 }
 217 
 218 /*
 219  * This handles area unmaps..
 220  */
 221 static void filemap_unmap(struct vm_area_struct *vma, unsigned long start, size_t len)
     /* [previous][next][first][last][top][bottom][index][help] */
 222 {
 223         filemap_sync(vma, start, len, MS_ASYNC);
 224 }
 225 
 226 /*
 227  * This handles complete area closes..
 228  */
 229 static void filemap_close(struct vm_area_struct * vma)
     /* [previous][next][first][last][top][bottom][index][help] */
 230 {
 231         filemap_sync(vma, vma->vm_start, vma->vm_end - vma->vm_start, MS_ASYNC);
 232 }
 233 
 234 /*
 235  * Shared mappings need to be able to do the right thing at
 236  * close/unmap/sync. They will also use the private file as
 237  * backing-store for swapping..
 238  */
 239 static struct vm_operations_struct file_shared_mmap = {
 240         NULL,                   /* open */
 241         filemap_close,          /* close */
 242         filemap_unmap,          /* unmap */
 243         NULL,                   /* protect */
 244         filemap_sync,           /* sync */
 245         NULL,                   /* advise */
 246         filemap_nopage,         /* nopage */
 247         NULL,                   /* wppage */
 248         filemap_swapout,        /* swapout */
 249         filemap_swapin,         /* swapin */
 250 };
 251 
 252 /*
 253  * Private mappings just need to be able to load in the map
 254  *
 255  * (this is actually used for shared mappings as well, if we
 256  * know they can't ever get write permissions..)
 257  */
 258 static struct vm_operations_struct file_private_mmap = {
 259         NULL,                   /* open */
 260         NULL,                   /* close */
 261         NULL,                   /* unmap */
 262         NULL,                   /* protect */
 263         NULL,                   /* sync */
 264         NULL,                   /* advise */
 265         filemap_nopage,         /* nopage */
 266         NULL,                   /* wppage */
 267         NULL,                   /* swapout */
 268         NULL,                   /* swapin */
 269 };
 270 
 271 /* This is used for a general mmap of a disk file */
 272 int generic_mmap(struct inode * inode, struct file * file, struct vm_area_struct * vma)
     /* [previous][next][first][last][top][bottom][index][help] */
 273 {
 274         struct vm_operations_struct * ops;
 275 
 276         if (vma->vm_offset & (inode->i_sb->s_blocksize - 1))
 277                 return -EINVAL;
 278         if (!inode->i_sb || !S_ISREG(inode->i_mode))
 279                 return -EACCES;
 280         if (!inode->i_op || !inode->i_op->bmap)
 281                 return -ENOEXEC;
 282         ops = &file_private_mmap;
 283         if (vma->vm_flags & VM_SHARED) {
 284                 if (vma->vm_flags & (VM_WRITE | VM_MAYWRITE))
 285                         ops = &file_shared_mmap;
 286         }
 287         if (!IS_RDONLY(inode)) {
 288                 inode->i_atime = CURRENT_TIME;
 289                 inode->i_dirt = 1;
 290         }
 291         vma->vm_inode = inode;
 292         inode->i_count++;
 293         vma->vm_ops = ops;
 294         return 0;
 295 }

/* [previous][next][first][last][top][bottom][index][help] */