This source file includes following definitions.
- file_mmap_nopage
- file_mmap_sync_page
- file_mmap_sync
- file_mmap_unmap
- file_mmap_close
- file_mmap_swapout
- generic_mmap
1
2
3
4
5
6
7
8
9
10
11
12 #include <linux/stat.h>
13 #include <linux/sched.h>
14 #include <linux/kernel.h>
15 #include <linux/mm.h>
16 #include <linux/shm.h>
17 #include <linux/errno.h>
18 #include <linux/mman.h>
19 #include <linux/string.h>
20 #include <linux/malloc.h>
21
22 #include <asm/segment.h>
23 #include <asm/system.h>
24 #include <asm/pgtable.h>
25
26
27
28
29
30
31 static unsigned long file_mmap_nopage(struct vm_area_struct * area, unsigned long address,
32 unsigned long page, int no_share)
33 {
34 struct inode * inode = area->vm_inode;
35 unsigned int block;
36 int nr[8];
37 int i, *p;
38
39 address &= PAGE_MASK;
40 block = address - area->vm_start + area->vm_offset;
41 block >>= inode->i_sb->s_blocksize_bits;
42 i = PAGE_SIZE >> inode->i_sb->s_blocksize_bits;
43 p = nr;
44 do {
45 *p = bmap(inode,block);
46 i--;
47 block++;
48 p++;
49 } while (i > 0);
50 return bread_page(page, inode->i_dev, nr, inode->i_sb->s_blocksize, no_share);
51 }
52
53
54
55
56
57
58
59
60
61
62
63
64 static inline void file_mmap_sync_page(struct vm_area_struct * vma,
65 unsigned long offset,
66 unsigned long page)
67 {
68 struct buffer_head * bh;
69
70 bh = buffer_pages[MAP_NR(page)];
71 if (bh) {
72
73 struct buffer_head * tmp = bh;
74 do {
75 mark_buffer_dirty(tmp, 0);
76 tmp = tmp->b_this_page;
77 } while (tmp != bh);
78 return;
79 }
80
81 printk("msync: %ld: [%08lx]\n", offset, page);
82 printk("Can't handle non-shared page yet\n");
83 return;
84 }
85
86 static void file_mmap_sync(struct vm_area_struct * vma, unsigned long start,
87 size_t size, unsigned int flags)
88 {
89 pgd_t * dir;
90 unsigned long poff, pcnt;
91
92 size = size >> PAGE_SHIFT;
93 dir = PAGE_DIR_OFFSET(current,start);
94 poff = (start >> PAGE_SHIFT) & (PTRS_PER_PAGE-1);
95 start -= vma->vm_start;
96 pcnt = PTRS_PER_PAGE - poff;
97 if (pcnt > size)
98 pcnt = size;
99
100 for ( ; size > 0; ++dir, size -= pcnt, pcnt = (size > PTRS_PER_PAGE ? PTRS_PER_PAGE : size)) {
101 pte_t *page_table;
102 unsigned long pc;
103
104 if (pgd_none(*dir)) {
105 poff = 0;
106 start += pcnt*PAGE_SIZE;
107 continue;
108 }
109 if (pgd_bad(*dir)) {
110 printk("file_mmap_sync: bad page directory entry %08lx.\n", pgd_val(*dir));
111 pgd_clear(dir);
112 poff = 0;
113 start += pcnt*PAGE_SIZE;
114 continue;
115 }
116 page_table = poff + (pte_t *) pgd_page(*dir);
117 poff = 0;
118 for (pc = pcnt; pc--; page_table++, start += PAGE_SIZE) {
119 pte_t pte;
120
121 pte = *page_table;
122 if (!pte_present(pte))
123 continue;
124 if (!pte_dirty(pte))
125 continue;
126 if (flags & MS_INVALIDATE) {
127 pte_clear(page_table);
128 } else {
129 mem_map[MAP_NR(pte_page(pte))]++;
130 *page_table = pte_mkclean(pte);
131 }
132 file_mmap_sync_page(vma, start, pte_page(pte));
133 free_page(pte_page(pte));
134 }
135 }
136 invalidate();
137 return;
138 }
139
140
141
142
143 static void file_mmap_unmap(struct vm_area_struct *vma, unsigned long start, size_t len)
144 {
145 file_mmap_sync(vma, start, len, MS_ASYNC);
146 }
147
148
149
150
151 static void file_mmap_close(struct vm_area_struct * vma)
152 {
153 file_mmap_sync(vma, vma->vm_start, vma->vm_end - vma->vm_start, MS_ASYNC);
154 }
155
156
157
158
159
160
161
162
163 void file_mmap_swapout(struct vm_area_struct * vma,
164 unsigned long offset,
165 pte_t *page_table)
166 {
167 printk("swapout not implemented on shared files..\n");
168 pte_clear(page_table);
169 }
170
171
172
173
174
175
176 static struct vm_operations_struct file_shared_mmap = {
177 NULL,
178 file_mmap_close,
179 file_mmap_unmap,
180 NULL,
181 file_mmap_sync,
182 NULL,
183 file_mmap_nopage,
184 NULL,
185 file_mmap_swapout,
186 NULL,
187 };
188
189
190
191
192
193
194
195 static struct vm_operations_struct file_private_mmap = {
196 NULL,
197 NULL,
198 NULL,
199 NULL,
200 NULL,
201 NULL,
202 file_mmap_nopage,
203 NULL,
204 NULL,
205 NULL,
206 };
207
208
209 int generic_mmap(struct inode * inode, struct file * file, struct vm_area_struct * vma)
210 {
211 struct vm_operations_struct * ops;
212
213 if (vma->vm_offset & (inode->i_sb->s_blocksize - 1))
214 return -EINVAL;
215 if (!inode->i_sb || !S_ISREG(inode->i_mode))
216 return -EACCES;
217 if (!inode->i_op || !inode->i_op->bmap)
218 return -ENOEXEC;
219 ops = &file_private_mmap;
220 if (vma->vm_flags & VM_SHARED) {
221 if (vma->vm_flags & (VM_WRITE | VM_MAYWRITE)) {
222 static int nr = 0;
223 ops = &file_shared_mmap;
224 if (nr++ < 5)
225 printk("%s tried to do a shared writeable mapping\n", current->comm);
226 return -EINVAL;
227 }
228 }
229 if (!IS_RDONLY(inode)) {
230 inode->i_atime = CURRENT_TIME;
231 inode->i_dirt = 1;
232 }
233 vma->vm_inode = inode;
234 inode->i_count++;
235 vma->vm_ops = ops;
236 return 0;
237 }