root/mm/filemap.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. file_mmap_nopage
  2. file_mmap_sync_page
  3. file_mmap_sync
  4. file_mmap_unmap
  5. file_mmap_close
  6. file_mmap_swapout
  7. generic_mmap

   1 /*
   2  *      linux/mm/filemmap.c
   3  *
   4  * Copyright (C) 1994 Linus Torvalds
   5  */
   6 
   7 /*
   8  * This file handles the generic file mmap semantics used by
   9  * most "normal" filesystems (but you don't /have/ to use this:
  10  * the NFS filesystem does this differently, for example)
  11  */
  12 #include <linux/stat.h>
  13 #include <linux/sched.h>
  14 #include <linux/kernel.h>
  15 #include <linux/mm.h>
  16 #include <linux/shm.h>
  17 #include <linux/errno.h>
  18 #include <linux/mman.h>
  19 #include <linux/string.h>
  20 #include <linux/malloc.h>
  21 
  22 #include <asm/segment.h>
  23 #include <asm/system.h>
  24 
  25 /*
  26  * Shared mappings implemented 30.11.1994. It's not fully working yet,
  27  * though.
  28  */
  29 
  30 static unsigned long file_mmap_nopage(struct vm_area_struct * area, unsigned long address,
     /* [previous][next][first][last][top][bottom][index][help] */
  31         unsigned long page, int no_share)
  32 {
  33         struct inode * inode = area->vm_inode;
  34         unsigned int block;
  35         int nr[8];
  36         int i, *p;
  37 
  38         address &= PAGE_MASK;
  39         block = address - area->vm_start + area->vm_offset;
  40         block >>= inode->i_sb->s_blocksize_bits;
  41         i = PAGE_SIZE >> inode->i_sb->s_blocksize_bits;
  42         p = nr;
  43         do {
  44                 *p = bmap(inode,block);
  45                 i--;
  46                 block++;
  47                 p++;
  48         } while (i > 0);
  49         return bread_page(page, inode->i_dev, nr, inode->i_sb->s_blocksize, no_share);
  50 }
  51 
  52 /*
  53  * NOTE! mmap sync doesn't really work yet. This is mainly a stub for it,
  54  * which only works if the buffers and the page were already sharing the
  55  * same physical page (that's actually pretty common, especially if the
  56  * file has been mmap'ed before being read the normal way).
  57  *
  58  * Todo:
  59  * - non-shared pages also need to be synced with the buffers.
  60  * - the "swapout()" function needs to swap out the page to
  61  *   the shared file instead of using the swap device.
  62  */
  63 static inline void file_mmap_sync_page(struct vm_area_struct * vma,
     /* [previous][next][first][last][top][bottom][index][help] */
  64         unsigned long offset,
  65         unsigned long page)
  66 {
  67         struct buffer_head * bh;
  68 
  69         bh = buffer_pages[MAP_NR(page)];
  70         if (bh) {
  71                 /* whee.. just mark the buffer heads dirty */
  72                 struct buffer_head * tmp = bh;
  73                 do {
  74                         mark_buffer_dirty(tmp, 0);
  75                         tmp = tmp->b_this_page;
  76                 } while (tmp != bh);
  77                 return;
  78         }
  79         /* we'll need to go fetch the buffer heads etc.. RSN */
  80         printk("msync: %ld: [%08lx]\n", offset, page);
  81         printk("Can't handle non-shared page yet\n");
  82         return;
  83 }
  84 
  85 static void file_mmap_sync(struct vm_area_struct * vma, unsigned long start,
     /* [previous][next][first][last][top][bottom][index][help] */
  86         size_t size, unsigned int flags)
  87 {
  88         pgd_t * dir;
  89         unsigned long poff, pcnt;
  90 
  91         size = size >> PAGE_SHIFT;
  92         dir = PAGE_DIR_OFFSET(current,start);
  93         poff = (start >> PAGE_SHIFT) & (PTRS_PER_PAGE-1);
  94         start -= vma->vm_start;
  95         pcnt = PTRS_PER_PAGE - poff;
  96         if (pcnt > size)
  97                 pcnt = size;
  98 
  99         for ( ; size > 0; ++dir, size -= pcnt, pcnt = (size > PTRS_PER_PAGE ? PTRS_PER_PAGE : size)) {
 100                 pte_t *page_table;
 101                 unsigned long pc;
 102 
 103                 if (pgd_none(*dir)) {
 104                         poff = 0;
 105                         start += pcnt*PAGE_SIZE;
 106                         continue;
 107                 }
 108                 if (pgd_bad(*dir)) {
 109                         printk("file_mmap_sync: bad page directory entry %08lx.\n", pgd_val(*dir));
 110                         pgd_clear(dir);
 111                         poff = 0;
 112                         start += pcnt*PAGE_SIZE;
 113                         continue;
 114                 }
 115                 page_table = poff + (pte_t *) pgd_page(*dir);
 116                 poff = 0;
 117                 for (pc = pcnt; pc--; page_table++, start += PAGE_SIZE) {
 118                         pte_t pte;
 119 
 120                         pte = *page_table;
 121                         if (!pte_present(pte))
 122                                 continue;
 123                         if (!pte_dirty(pte))
 124                                 continue;
 125                         if (flags & MS_INVALIDATE) {
 126                                 pte_clear(page_table);
 127                         } else {
 128                                 mem_map[MAP_NR(pte_page(pte))]++;
 129                                 *page_table = pte_mkclean(pte);
 130                         }
 131                         file_mmap_sync_page(vma, start, pte_page(pte));
 132                         free_page(pte_page(pte));
 133                 }
 134         }
 135         invalidate();
 136         return;
 137 }
 138 
 139 /*
 140  * This handles area unmaps..
 141  */
 142 static void file_mmap_unmap(struct vm_area_struct *vma, unsigned long start, size_t len)
     /* [previous][next][first][last][top][bottom][index][help] */
 143 {
 144         file_mmap_sync(vma, start, len, MS_ASYNC);
 145 }
 146 
 147 /*
 148  * This handles complete area closes..
 149  */
 150 static void file_mmap_close(struct vm_area_struct * vma)
     /* [previous][next][first][last][top][bottom][index][help] */
 151 {
 152         file_mmap_sync(vma, vma->vm_start, vma->vm_end - vma->vm_start, MS_ASYNC);
 153 }
 154 
 155 /*
 156  * This isn't implemented yet: you'll get a warning and incorrect behaviour.
 157  *
 158  * Note that the page is free'd by the higher-level after return,
 159  * so we have to either write it out or just forget it. We currently
 160  * forget it..
 161  */
 162 void file_mmap_swapout(struct vm_area_struct * vma,
     /* [previous][next][first][last][top][bottom][index][help] */
 163         unsigned long offset,
 164         pte_t *page_table)
 165 {
 166         printk("swapout not implemented on shared files..\n");
 167         pte_clear(page_table);
 168 }
 169 
 170 /*
 171  * Shared mappings need to be able to do the right thing at
 172  * close/unmap/sync. They will also use the private file as
 173  * backing-store for swapping..
 174  */
 175 static struct vm_operations_struct file_shared_mmap = {
 176         NULL,                   /* open */
 177         file_mmap_close,        /* close */
 178         file_mmap_unmap,        /* unmap */
 179         NULL,                   /* protect */
 180         file_mmap_sync,         /* sync */
 181         NULL,                   /* advise */
 182         file_mmap_nopage,       /* nopage */
 183         NULL,                   /* wppage */
 184         file_mmap_swapout,      /* swapout */
 185         NULL,                   /* swapin */
 186 };
 187 
 188 /*
 189  * Private mappings just need to be able to load in the map
 190  *
 191  * (this is actually used for shared mappings as well, if we
 192  * know they can't ever get write permissions..)
 193  */
 194 static struct vm_operations_struct file_private_mmap = {
 195         NULL,                   /* open */
 196         NULL,                   /* close */
 197         NULL,                   /* unmap */
 198         NULL,                   /* protect */
 199         NULL,                   /* sync */
 200         NULL,                   /* advise */
 201         file_mmap_nopage,       /* nopage */
 202         NULL,                   /* wppage */
 203         NULL,                   /* swapout */
 204         NULL,                   /* swapin */
 205 };
 206 
 207 /* This is used for a general mmap of a disk file */
 208 int generic_mmap(struct inode * inode, struct file * file, struct vm_area_struct * vma)
     /* [previous][next][first][last][top][bottom][index][help] */
 209 {
 210         struct vm_operations_struct * ops;
 211 
 212         if (vma->vm_offset & (inode->i_sb->s_blocksize - 1))
 213                 return -EINVAL;
 214         if (!inode->i_sb || !S_ISREG(inode->i_mode))
 215                 return -EACCES;
 216         if (!inode->i_op || !inode->i_op->bmap)
 217                 return -ENOEXEC;
 218         ops = &file_private_mmap;
 219         if (vma->vm_flags & VM_SHARED) {
 220                 if (vma->vm_flags & (VM_WRITE | VM_MAYWRITE)) {
 221                         static int nr = 0;
 222                         ops = &file_shared_mmap;
 223                         if (nr++ < 5)
 224                                 printk("%s tried to do a shared writeable mapping\n", current->comm);
 225                         return -EINVAL;
 226                 }
 227         }
 228         if (!IS_RDONLY(inode)) {
 229                 inode->i_atime = CURRENT_TIME;
 230                 inode->i_dirt = 1;
 231         }
 232         vma->vm_inode = inode;
 233         inode->i_count++;
 234         vma->vm_ops = ops;
 235         return 0;
 236 }

/* [previous][next][first][last][top][bottom][index][help] */