root/mm/filemap.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. file_mmap_nopage
  2. file_mmap_sync_page
  3. file_mmap_sync
  4. file_mmap_unmap
  5. file_mmap_close
  6. file_mmap_swapout
  7. generic_mmap

   1 /*
   2  *      linux/mm/filemmap.c
   3  *
   4  * Copyright (C) 1994 Linus Torvalds
   5  */
   6 
   7 /*
   8  * This file handles the generic file mmap semantics used by
   9  * most "normal" filesystems (but you don't /have/ to use this:
  10  * the NFS filesystem does this differently, for example)
  11  */
  12 #include <linux/stat.h>
  13 #include <linux/sched.h>
  14 #include <linux/kernel.h>
  15 #include <linux/mm.h>
  16 #include <linux/shm.h>
  17 #include <linux/errno.h>
  18 #include <linux/mman.h>
  19 #include <linux/string.h>
  20 #include <linux/malloc.h>
  21 
  22 #include <asm/segment.h>
  23 #include <asm/system.h>
  24 #include <asm/pgtable.h>
  25 
  26 /*
  27  * Shared mappings implemented 30.11.1994. It's not fully working yet,
  28  * though.
  29  */
  30 
  31 static unsigned long file_mmap_nopage(struct vm_area_struct * area, unsigned long address,
     /* [previous][next][first][last][top][bottom][index][help] */
  32         unsigned long page, int no_share)
  33 {
  34         struct inode * inode = area->vm_inode;
  35         unsigned int block;
  36         int nr[8];
  37         int i, *p;
  38 
  39         address &= PAGE_MASK;
  40         block = address - area->vm_start + area->vm_offset;
  41         block >>= inode->i_sb->s_blocksize_bits;
  42         i = PAGE_SIZE >> inode->i_sb->s_blocksize_bits;
  43         p = nr;
  44         do {
  45                 *p = bmap(inode,block);
  46                 i--;
  47                 block++;
  48                 p++;
  49         } while (i > 0);
  50         return bread_page(page, inode->i_dev, nr, inode->i_sb->s_blocksize, no_share);
  51 }
  52 
  53 /*
  54  * NOTE! mmap sync doesn't really work yet. This is mainly a stub for it,
  55  * which only works if the buffers and the page were already sharing the
  56  * same physical page (that's actually pretty common, especially if the
  57  * file has been mmap'ed before being read the normal way).
  58  *
  59  * Todo:
  60  * - non-shared pages also need to be synced with the buffers.
  61  * - the "swapout()" function needs to swap out the page to
  62  *   the shared file instead of using the swap device.
  63  */
  64 static inline void file_mmap_sync_page(struct vm_area_struct * vma,
     /* [previous][next][first][last][top][bottom][index][help] */
  65         unsigned long offset,
  66         unsigned long page)
  67 {
  68         struct buffer_head * bh;
  69 
  70         bh = buffer_pages[MAP_NR(page)];
  71         if (bh) {
  72                 /* whee.. just mark the buffer heads dirty */
  73                 struct buffer_head * tmp = bh;
  74                 do {
  75                         mark_buffer_dirty(tmp, 0);
  76                         tmp = tmp->b_this_page;
  77                 } while (tmp != bh);
  78                 return;
  79         }
  80         /* we'll need to go fetch the buffer heads etc.. RSN */
  81         printk("msync: %ld: [%08lx]\n", offset, page);
  82         printk("Can't handle non-shared page yet\n");
  83         return;
  84 }
  85 
  86 static void file_mmap_sync(struct vm_area_struct * vma, unsigned long start,
     /* [previous][next][first][last][top][bottom][index][help] */
  87         size_t size, unsigned int flags)
  88 {
  89         pgd_t * dir;
  90         unsigned long poff, pcnt;
  91 
  92         size = size >> PAGE_SHIFT;
  93         dir = PAGE_DIR_OFFSET(current,start);
  94         poff = (start >> PAGE_SHIFT) & (PTRS_PER_PAGE-1);
  95         start -= vma->vm_start;
  96         pcnt = PTRS_PER_PAGE - poff;
  97         if (pcnt > size)
  98                 pcnt = size;
  99 
 100         for ( ; size > 0; ++dir, size -= pcnt, pcnt = (size > PTRS_PER_PAGE ? PTRS_PER_PAGE : size)) {
 101                 pte_t *page_table;
 102                 unsigned long pc;
 103 
 104                 if (pgd_none(*dir)) {
 105                         poff = 0;
 106                         start += pcnt*PAGE_SIZE;
 107                         continue;
 108                 }
 109                 if (pgd_bad(*dir)) {
 110                         printk("file_mmap_sync: bad page directory entry %08lx.\n", pgd_val(*dir));
 111                         pgd_clear(dir);
 112                         poff = 0;
 113                         start += pcnt*PAGE_SIZE;
 114                         continue;
 115                 }
 116                 page_table = poff + (pte_t *) pgd_page(*dir);
 117                 poff = 0;
 118                 for (pc = pcnt; pc--; page_table++, start += PAGE_SIZE) {
 119                         pte_t pte;
 120 
 121                         pte = *page_table;
 122                         if (!pte_present(pte))
 123                                 continue;
 124                         if (!pte_dirty(pte))
 125                                 continue;
 126                         if (flags & MS_INVALIDATE) {
 127                                 pte_clear(page_table);
 128                         } else {
 129                                 mem_map[MAP_NR(pte_page(pte))]++;
 130                                 *page_table = pte_mkclean(pte);
 131                         }
 132                         file_mmap_sync_page(vma, start, pte_page(pte));
 133                         free_page(pte_page(pte));
 134                 }
 135         }
 136         invalidate();
 137         return;
 138 }
 139 
 140 /*
 141  * This handles area unmaps..
 142  */
 143 static void file_mmap_unmap(struct vm_area_struct *vma, unsigned long start, size_t len)
     /* [previous][next][first][last][top][bottom][index][help] */
 144 {
 145         file_mmap_sync(vma, start, len, MS_ASYNC);
 146 }
 147 
 148 /*
 149  * This handles complete area closes..
 150  */
 151 static void file_mmap_close(struct vm_area_struct * vma)
     /* [previous][next][first][last][top][bottom][index][help] */
 152 {
 153         file_mmap_sync(vma, vma->vm_start, vma->vm_end - vma->vm_start, MS_ASYNC);
 154 }
 155 
 156 /*
 157  * This isn't implemented yet: you'll get a warning and incorrect behaviour.
 158  *
 159  * Note that the page is free'd by the higher-level after return,
 160  * so we have to either write it out or just forget it. We currently
 161  * forget it..
 162  */
 163 void file_mmap_swapout(struct vm_area_struct * vma,
     /* [previous][next][first][last][top][bottom][index][help] */
 164         unsigned long offset,
 165         pte_t *page_table)
 166 {
 167         printk("swapout not implemented on shared files..\n");
 168         pte_clear(page_table);
 169 }
 170 
 171 /*
 172  * Shared mappings need to be able to do the right thing at
 173  * close/unmap/sync. They will also use the private file as
 174  * backing-store for swapping..
 175  */
 176 static struct vm_operations_struct file_shared_mmap = {
 177         NULL,                   /* open */
 178         file_mmap_close,        /* close */
 179         file_mmap_unmap,        /* unmap */
 180         NULL,                   /* protect */
 181         file_mmap_sync,         /* sync */
 182         NULL,                   /* advise */
 183         file_mmap_nopage,       /* nopage */
 184         NULL,                   /* wppage */
 185         file_mmap_swapout,      /* swapout */
 186         NULL,                   /* swapin */
 187 };
 188 
 189 /*
 190  * Private mappings just need to be able to load in the map
 191  *
 192  * (this is actually used for shared mappings as well, if we
 193  * know they can't ever get write permissions..)
 194  */
 195 static struct vm_operations_struct file_private_mmap = {
 196         NULL,                   /* open */
 197         NULL,                   /* close */
 198         NULL,                   /* unmap */
 199         NULL,                   /* protect */
 200         NULL,                   /* sync */
 201         NULL,                   /* advise */
 202         file_mmap_nopage,       /* nopage */
 203         NULL,                   /* wppage */
 204         NULL,                   /* swapout */
 205         NULL,                   /* swapin */
 206 };
 207 
 208 /* This is used for a general mmap of a disk file */
 209 int generic_mmap(struct inode * inode, struct file * file, struct vm_area_struct * vma)
     /* [previous][next][first][last][top][bottom][index][help] */
 210 {
 211         struct vm_operations_struct * ops;
 212 
 213         if (vma->vm_offset & (inode->i_sb->s_blocksize - 1))
 214                 return -EINVAL;
 215         if (!inode->i_sb || !S_ISREG(inode->i_mode))
 216                 return -EACCES;
 217         if (!inode->i_op || !inode->i_op->bmap)
 218                 return -ENOEXEC;
 219         ops = &file_private_mmap;
 220         if (vma->vm_flags & VM_SHARED) {
 221                 if (vma->vm_flags & (VM_WRITE | VM_MAYWRITE)) {
 222                         static int nr = 0;
 223                         ops = &file_shared_mmap;
 224                         if (nr++ < 5)
 225                                 printk("%s tried to do a shared writeable mapping\n", current->comm);
 226                         return -EINVAL;
 227                 }
 228         }
 229         if (!IS_RDONLY(inode)) {
 230                 inode->i_atime = CURRENT_TIME;
 231                 inode->i_dirt = 1;
 232         }
 233         vma->vm_inode = inode;
 234         inode->i_count++;
 235         vma->vm_ops = ops;
 236         return 0;
 237 }

/* [previous][next][first][last][top][bottom][index][help] */