root/mm/filemap.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. filemap_nopage
  2. filemap_sync_page
  3. filemap_sync_pte
  4. filemap_sync_pte_range
  5. filemap_sync_pmd_range
  6. filemap_sync
  7. filemap_unmap
  8. filemap_close
  9. filemap_swapout
  10. generic_mmap

   1 #define THREE_LEVEL
   2 /*
   3  *      linux/mm/filemmap.c
   4  *
   5  * Copyright (C) 1994 Linus Torvalds
   6  */
   7 
   8 /*
   9  * This file handles the generic file mmap semantics used by
  10  * most "normal" filesystems (but you don't /have/ to use this:
  11  * the NFS filesystem does this differently, for example)
  12  */
  13 #include <linux/stat.h>
  14 #include <linux/sched.h>
  15 #include <linux/kernel.h>
  16 #include <linux/mm.h>
  17 #include <linux/shm.h>
  18 #include <linux/errno.h>
  19 #include <linux/mman.h>
  20 #include <linux/string.h>
  21 #include <linux/malloc.h>
  22 
  23 #include <asm/segment.h>
  24 #include <asm/system.h>
  25 #include <asm/pgtable.h>
  26 
  27 /*
  28  * Shared mappings implemented 30.11.1994. It's not fully working yet,
  29  * though.
  30  */
  31 
  32 static unsigned long filemap_nopage(struct vm_area_struct * area, unsigned long address,
     /* [previous][next][first][last][top][bottom][index][help] */
  33         unsigned long page, int no_share)
  34 {
  35         struct inode * inode = area->vm_inode;
  36         unsigned int block;
  37         int nr[8];
  38         int i, *p;
  39 
  40         address &= PAGE_MASK;
  41         block = address - area->vm_start + area->vm_offset;
  42         block >>= inode->i_sb->s_blocksize_bits;
  43         i = PAGE_SIZE >> inode->i_sb->s_blocksize_bits;
  44         p = nr;
  45         do {
  46                 *p = bmap(inode,block);
  47                 i--;
  48                 block++;
  49                 p++;
  50         } while (i > 0);
  51         return bread_page(page, inode->i_dev, nr, inode->i_sb->s_blocksize, no_share);
  52 }
  53 
  54 /*
  55  * NOTE! mmap sync doesn't really work yet. This is mainly a stub for it,
  56  * which only works if the buffers and the page were already sharing the
  57  * same physical page (that's actually pretty common, especially if the
  58  * file has been mmap'ed before being read the normal way).
  59  *
  60  * Todo:
  61  * - non-shared pages also need to be synced with the buffers.
  62  * - the "swapout()" function needs to swap out the page to
  63  *   the shared file instead of using the swap device.
  64  */
  65 static void filemap_sync_page(struct vm_area_struct * vma,
     /* [previous][next][first][last][top][bottom][index][help] */
  66         unsigned long offset,
  67         unsigned long page)
  68 {
  69         struct buffer_head * bh;
  70 
  71         printk("msync: %ld: [%08lx]\n", offset, page);
  72         bh = buffer_pages[MAP_NR(page)];
  73         if (bh) {
  74                 /* whee.. just mark the buffer heads dirty */
  75                 struct buffer_head * tmp = bh;
  76                 do {
  77                         mark_buffer_dirty(tmp, 0);
  78                         tmp = tmp->b_this_page;
  79                 } while (tmp != bh);
  80                 return;
  81         }
  82         /* we'll need to go fetch the buffer heads etc.. RSN */
  83         printk("Can't handle non-shared page yet\n");
  84         return;
  85 }
  86 
  87 static inline void filemap_sync_pte(pte_t * pte, struct vm_area_struct *vma,
     /* [previous][next][first][last][top][bottom][index][help] */
  88         unsigned long address, unsigned int flags)
  89 {
  90         pte_t page = *pte;
  91 
  92         if (!pte_present(page))
  93                 return;
  94         if (!pte_dirty(page))
  95                 return;
  96         if (flags & MS_INVALIDATE) {
  97                 pte_clear(pte);
  98         } else {
  99                 mem_map[MAP_NR(pte_page(page))]++;
 100                 *pte = pte_mkclean(page);
 101         }
 102         filemap_sync_page(vma, address - vma->vm_start, pte_page(page));
 103         free_page(pte_page(page));
 104 }
 105 
 106 static inline void filemap_sync_pte_range(pmd_t * pmd,
     /* [previous][next][first][last][top][bottom][index][help] */
 107         unsigned long address, unsigned long size, 
 108         struct vm_area_struct *vma, unsigned long offset, unsigned int flags)
 109 {
 110         pte_t * pte;
 111         unsigned long end;
 112 
 113         if (pmd_none(*pmd))
 114                 return;
 115         if (pmd_bad(*pmd)) {
 116                 printk("filemap_sync_pte_range: bad pmd (%08lx)\n", pmd_val(*pmd));
 117                 pmd_clear(pmd);
 118                 return;
 119         }
 120         pte = pte_offset(pmd, address);
 121         offset += address & PMD_MASK;
 122         address &= ~PMD_MASK;
 123         end = address + size;
 124         if (end > PMD_SIZE)
 125                 end = PMD_SIZE;
 126         do {
 127                 filemap_sync_pte(pte, vma, address + offset, flags);
 128                 address += PAGE_SIZE;
 129                 pte++;
 130         } while (address < end);
 131 }
 132 
 133 static inline void filemap_sync_pmd_range(pgd_t * pgd,
     /* [previous][next][first][last][top][bottom][index][help] */
 134         unsigned long address, unsigned long size, 
 135         struct vm_area_struct *vma, unsigned int flags)
 136 {
 137         pmd_t * pmd;
 138         unsigned long offset, end;
 139 
 140         if (pgd_none(*pgd))
 141                 return;
 142         if (pgd_bad(*pgd)) {
 143                 printk("filemap_sync_pmd_range: bad pgd (%08lx)\n", pgd_val(*pgd));
 144                 pgd_clear(pgd);
 145                 return;
 146         }
 147         pmd = pmd_offset(pgd, address);
 148         offset = address & PMD_MASK;
 149         address &= ~PMD_MASK;
 150         end = address + size;
 151         if (end > PGDIR_SIZE)
 152                 end = PGDIR_SIZE;
 153         do {
 154                 filemap_sync_pte_range(pmd, address, end - address, vma, offset, flags);
 155                 address = (address + PMD_SIZE) & PMD_MASK;
 156                 pmd++;
 157         } while (address < end);
 158 }
 159 
 160 static void filemap_sync(struct vm_area_struct * vma, unsigned long address,
     /* [previous][next][first][last][top][bottom][index][help] */
 161         size_t size, unsigned int flags)
 162 {
 163         pgd_t * dir;
 164         unsigned long end = address + size;
 165 
 166         dir = pgd_offset(current, address);
 167         while (address < end) {
 168                 filemap_sync_pmd_range(dir, address, end - address, vma, flags);
 169                 address = (address + PGDIR_SIZE) & PGDIR_MASK;
 170                 dir++;
 171         }
 172         invalidate();
 173         return;
 174 }
 175 
 176 /*
 177  * This handles area unmaps..
 178  */
 179 static void filemap_unmap(struct vm_area_struct *vma, unsigned long start, size_t len)
     /* [previous][next][first][last][top][bottom][index][help] */
 180 {
 181         filemap_sync(vma, start, len, MS_ASYNC);
 182 }
 183 
 184 /*
 185  * This handles complete area closes..
 186  */
 187 static void filemap_close(struct vm_area_struct * vma)
     /* [previous][next][first][last][top][bottom][index][help] */
 188 {
 189         filemap_sync(vma, vma->vm_start, vma->vm_end - vma->vm_start, MS_ASYNC);
 190 }
 191 
 192 /*
 193  * This isn't implemented yet: you'll get a warning and incorrect behaviour.
 194  *
 195  * Note that the page is free'd by the higher-level after return,
 196  * so we have to either write it out or just forget it. We currently
 197  * forget it..
 198  */
 199 void filemap_swapout(struct vm_area_struct * vma,
     /* [previous][next][first][last][top][bottom][index][help] */
 200         unsigned long offset,
 201         pte_t *page_table)
 202 {
 203         printk("swapout not implemented on shared files..\n");
 204         pte_clear(page_table);
 205 }
 206 
 207 /*
 208  * Shared mappings need to be able to do the right thing at
 209  * close/unmap/sync. They will also use the private file as
 210  * backing-store for swapping..
 211  */
 212 static struct vm_operations_struct file_shared_mmap = {
 213         NULL,                   /* open */
 214         filemap_close,          /* close */
 215         filemap_unmap,          /* unmap */
 216         NULL,                   /* protect */
 217         filemap_sync,           /* sync */
 218         NULL,                   /* advise */
 219         filemap_nopage,         /* nopage */
 220         NULL,                   /* wppage */
 221         filemap_swapout,        /* swapout */
 222         NULL,                   /* swapin */
 223 };
 224 
 225 /*
 226  * Private mappings just need to be able to load in the map
 227  *
 228  * (this is actually used for shared mappings as well, if we
 229  * know they can't ever get write permissions..)
 230  */
 231 static struct vm_operations_struct file_private_mmap = {
 232         NULL,                   /* open */
 233         NULL,                   /* close */
 234         NULL,                   /* unmap */
 235         NULL,                   /* protect */
 236         NULL,                   /* sync */
 237         NULL,                   /* advise */
 238         filemap_nopage,         /* nopage */
 239         NULL,                   /* wppage */
 240         NULL,                   /* swapout */
 241         NULL,                   /* swapin */
 242 };
 243 
 244 /* This is used for a general mmap of a disk file */
 245 int generic_mmap(struct inode * inode, struct file * file, struct vm_area_struct * vma)
     /* [previous][next][first][last][top][bottom][index][help] */
 246 {
 247         struct vm_operations_struct * ops;
 248 
 249         if (vma->vm_offset & (inode->i_sb->s_blocksize - 1))
 250                 return -EINVAL;
 251         if (!inode->i_sb || !S_ISREG(inode->i_mode))
 252                 return -EACCES;
 253         if (!inode->i_op || !inode->i_op->bmap)
 254                 return -ENOEXEC;
 255         ops = &file_private_mmap;
 256         if (vma->vm_flags & VM_SHARED) {
 257                 if (vma->vm_flags & (VM_WRITE | VM_MAYWRITE)) {
 258                         static int nr = 0;
 259                         ops = &file_shared_mmap;
 260 #ifndef SHARED_MMAP_REALLY_WORKS /* it doesn't, yet */
 261                         if (nr++ < 5)
 262                                 printk("%s tried to do a shared writeable mapping\n", current->comm);
 263                         return -EINVAL;
 264 #endif
 265                 }
 266         }
 267         if (!IS_RDONLY(inode)) {
 268                 inode->i_atime = CURRENT_TIME;
 269                 inode->i_dirt = 1;
 270         }
 271         vma->vm_inode = inode;
 272         inode->i_count++;
 273         vma->vm_ops = ops;
 274         return 0;
 275 }

/* [previous][next][first][last][top][bottom][index][help] */