root/mm/filemap.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. file_mmap_nopage
  2. file_mmap_sync_page
  3. file_mmap_sync
  4. file_mmap_unmap
  5. file_mmap_close
  6. file_mmap_swapout
  7. generic_mmap

   1 /*
   2  *      linux/mm/filemmap.c
   3  *
   4  * Copyright (C) 1994 Linus Torvalds
   5  */
   6 
   7 /*
   8  * This file handles the generic file mmap semantics used by
   9  * most "normal" filesystems (but you don't /have/ to use this:
  10  * the NFS filesystem does this differently, for example)
  11  */
  12 #include <linux/stat.h>
  13 #include <linux/sched.h>
  14 #include <linux/kernel.h>
  15 #include <linux/mm.h>
  16 #include <linux/shm.h>
  17 #include <linux/errno.h>
  18 #include <linux/mman.h>
  19 #include <linux/string.h>
  20 #include <linux/malloc.h>
  21 
  22 #include <asm/segment.h>
  23 #include <asm/system.h>
  24 
  25 /*
  26  * Shared mappings implemented 30.11.1994. It's not fully working yet,
  27  * though.
  28  */
  29 
  30 static unsigned long file_mmap_nopage(struct vm_area_struct * area, unsigned long address,
     /* [previous][next][first][last][top][bottom][index][help] */
  31         unsigned long page, int no_share)
  32 {
  33         struct inode * inode = area->vm_inode;
  34         unsigned int block;
  35         int nr[8];
  36         int i, *p;
  37 
  38         address &= PAGE_MASK;
  39         block = address - area->vm_start + area->vm_offset;
  40         block >>= inode->i_sb->s_blocksize_bits;
  41         i = PAGE_SIZE >> inode->i_sb->s_blocksize_bits;
  42         p = nr;
  43         do {
  44                 *p = bmap(inode,block);
  45                 i--;
  46                 block++;
  47                 p++;
  48         } while (i > 0);
  49         return bread_page(page, inode->i_dev, nr, inode->i_sb->s_blocksize, no_share);
  50 }
  51 
  52 /*
  53  * NOTE! mmap sync doesn't really work yet. This is mainly a stub for it,
  54  * which only works if the buffers and the page were already sharing the
  55  * same physical page (that's actually pretty common, especially if the
  56  * file has been mmap'ed before being read the normal way).
  57  *
  58  * Todo:
  59  * - non-shared pages also need to be synced with the buffers.
  60  * - the "swapout()" function needs to swap out the page to
  61  *   the shared file instead of using the swap device.
  62  */
  63 static inline void file_mmap_sync_page(struct vm_area_struct * vma,
     /* [previous][next][first][last][top][bottom][index][help] */
  64         unsigned long offset,
  65         unsigned long page)
  66 {
  67         struct buffer_head * bh;
  68 
  69         bh = buffer_pages[MAP_NR(page)];
  70         if (bh) {
  71                 /* whee.. just mark the buffer heads dirty */
  72                 struct buffer_head * tmp = bh;
  73                 do {
  74                         mark_buffer_dirty(tmp, 0);
  75                         tmp = tmp->b_this_page;
  76                 } while (tmp != bh);
  77                 return;
  78         }
  79         /* we'll need to go fetch the buffer heads etc.. RSN */
  80         printk("msync: %ld: [%08lx]\n", offset, page);
  81         printk("Can't handle non-shared page yet\n");
  82         return;
  83 }
  84 
  85 static void file_mmap_sync(struct vm_area_struct * vma, unsigned long start,
     /* [previous][next][first][last][top][bottom][index][help] */
  86         size_t size, unsigned int flags)
  87 {
  88         unsigned long page_dir;
  89         unsigned long *page_table, *dir;
  90         unsigned long poff, pcnt, pc;
  91 
  92         size = size >> PAGE_SHIFT;
  93         dir = PAGE_DIR_OFFSET(current->tss.cr3,start);
  94         poff = (start >> PAGE_SHIFT) & (PTRS_PER_PAGE-1);
  95         start -= vma->vm_start;
  96         if ((pcnt = PTRS_PER_PAGE - poff) > size)
  97                 pcnt = size;
  98 
  99         for ( ; size > 0; ++dir, size -= pcnt,
 100              pcnt = (size > PTRS_PER_PAGE ? PTRS_PER_PAGE : size)) {
 101                 if (!(PAGE_PRESENT & (page_dir = *dir))) {
 102                         if (page_dir)
 103                                 printk("file_mmap_sync: bad page directory.\n");
 104                         poff = 0;
 105                         start += pcnt*PAGE_SIZE;
 106                         continue;
 107                 }
 108                 page_table = (unsigned long *)(PAGE_MASK & page_dir);
 109                 if (poff) {
 110                         page_table += poff;
 111                         poff = 0;
 112                 }
 113                 for (pc = pcnt; pc--; page_table++, start += PAGE_SIZE) {
 114                         unsigned long page = *page_table;
 115                         if (!(page & PAGE_PRESENT))
 116                                 continue;
 117                         if (!(page & PAGE_DIRTY))
 118                                 continue;
 119                         mem_map[MAP_NR(page)]++;
 120                         if (flags & MS_INVALIDATE) {
 121                                 *page_table = 0;
 122                                 free_page(page);
 123                         } else
 124                                 *page_table = page & ~PAGE_DIRTY;
 125                         file_mmap_sync_page(vma, start, page);
 126                         free_page(page);
 127                 }
 128         }
 129         invalidate();
 130         return;
 131 }
 132 
 133 /*
 134  * This handles area unmaps..
 135  */
 136 static void file_mmap_unmap(struct vm_area_struct *vma, unsigned long start, size_t len)
     /* [previous][next][first][last][top][bottom][index][help] */
 137 {
 138         if (vma->vm_page_prot & PAGE_RW)
 139                 file_mmap_sync(vma, start, len, MS_ASYNC);
 140 }
 141 
 142 /*
 143  * This handles complete area closes..
 144  */
 145 static void file_mmap_close(struct vm_area_struct * vma)
     /* [previous][next][first][last][top][bottom][index][help] */
 146 {
 147         if (vma->vm_page_prot & PAGE_RW)
 148                 file_mmap_sync(vma, vma->vm_start, vma->vm_end - vma->vm_start, MS_ASYNC);
 149 }
 150 
 151 /*
 152  * This isn't implemented yet: you'll get a warning and incorrect behaviour.
 153  *
 154  * Note that the page is free'd by the higher-level after return,
 155  * so we have to either write it out or just forget it. We currently
 156  * forget it..
 157  */
 158 void file_mmap_swapout(struct vm_area_struct * vma,
     /* [previous][next][first][last][top][bottom][index][help] */
 159         unsigned long offset,
 160         unsigned long *pte)
 161 {
 162         printk("swapout not implemented on shared files..\n");
 163         *pte = 0;
 164 }
 165 
 166 /*
 167  * Shared mappings need to be able to do the right thing at
 168  * close/unmap/sync. They will also use the private file as
 169  * backing-store for swapping..
 170  */
 171 static struct vm_operations_struct file_shared_mmap = {
 172         NULL,                   /* open */
 173         file_mmap_close,        /* close */
 174         file_mmap_unmap,        /* unmap */
 175         NULL,                   /* protect */
 176         file_mmap_sync,         /* sync */
 177         NULL,                   /* advise */
 178         file_mmap_nopage,       /* nopage */
 179         NULL,                   /* wppage */
 180         file_mmap_swapout,      /* swapout */
 181         NULL,                   /* swapin */
 182 };
 183 
 184 /*
 185  * Private mappings just need to be able to load in the map
 186  *
 187  * (this is actually used for shared mappings as well, if we
 188  * know they can't ever get write permissions..)
 189  */
 190 static struct vm_operations_struct file_private_mmap = {
 191         NULL,                   /* open */
 192         NULL,                   /* close */
 193         NULL,                   /* unmap */
 194         NULL,                   /* protect */
 195         NULL,                   /* sync */
 196         NULL,                   /* advise */
 197         file_mmap_nopage,       /* nopage */
 198         NULL,                   /* wppage */
 199         NULL,                   /* swapout */
 200         NULL,                   /* swapin */
 201 };
 202 
 203 /* This is used for a general mmap of a disk file */
 204 int generic_mmap(struct inode * inode, struct file * file, struct vm_area_struct * vma)
     /* [previous][next][first][last][top][bottom][index][help] */
 205 {
 206         struct vm_operations_struct * ops;
 207 
 208         if (vma->vm_offset & (inode->i_sb->s_blocksize - 1))
 209                 return -EINVAL;
 210         if (!inode->i_sb || !S_ISREG(inode->i_mode))
 211                 return -EACCES;
 212         if (!inode->i_op || !inode->i_op->bmap)
 213                 return -ENOEXEC;
 214         ops = &file_private_mmap;
 215         if (vma->vm_flags & VM_SHARED) {
 216                 if (vma->vm_flags & (VM_WRITE | VM_MAYWRITE)) {
 217                         static int nr = 0;
 218                         ops = &file_shared_mmap;
 219                         if (nr++ < 5)
 220                                 printk("%s tried to do a shared writeable mapping\n", current->comm);
 221                         return -EINVAL;
 222                 }
 223         }
 224         if (!IS_RDONLY(inode)) {
 225                 inode->i_atime = CURRENT_TIME;
 226                 inode->i_dirt = 1;
 227         }
 228         vma->vm_inode = inode;
 229         inode->i_count++;
 230         vma->vm_ops = ops;
 231         return 0;
 232 }

/* [previous][next][first][last][top][bottom][index][help] */