This source file includes following definitions.
- multi_bmap
- filemap_nopage
- filemap_write_page
- filemap_swapout
- filemap_swapin
- filemap_sync_pte
- filemap_sync_pte_range
- filemap_sync_pmd_range
- filemap_sync
- filemap_unmap
- filemap_close
- generic_mmap
- msync_interval
- sys_msync
1
2
3
4
5
6
7
8
9
10
11
12 #include <linux/stat.h>
13 #include <linux/sched.h>
14 #include <linux/kernel.h>
15 #include <linux/mm.h>
16 #include <linux/shm.h>
17 #include <linux/errno.h>
18 #include <linux/mman.h>
19 #include <linux/string.h>
20 #include <linux/malloc.h>
21 #include <linux/fs.h>
22 #include <linux/locks.h>
23
24 #include <asm/segment.h>
25 #include <asm/system.h>
26 #include <asm/pgtable.h>
27
28
29
30
31
32
33
34
35
36
37
38
39 static inline void multi_bmap(struct inode * inode, unsigned long block, unsigned int * nr, int shift)
40 {
41 int i = PAGE_SIZE >> shift;
42 block >>= shift;
43 do {
44 *nr = bmap(inode, block);
45 i--;
46 block++;
47 nr++;
48 } while (i > 0);
49 }
50
51 static unsigned long filemap_nopage(struct vm_area_struct * area, unsigned long address,
52 unsigned long page, int no_share)
53 {
54 struct inode * inode = area->vm_inode;
55 int nr[PAGE_SIZE/512];
56
57 multi_bmap(inode, (address & PAGE_MASK) - area->vm_start + area->vm_offset, nr,
58 inode->i_sb->s_blocksize_bits);
59 return bread_page(page, inode->i_dev, nr, inode->i_sb->s_blocksize, no_share);
60 }
61
62
63
64
65
66
67 static int filemap_write_page(struct vm_area_struct * vma,
68 unsigned long offset,
69 unsigned long page)
70 {
71 int old_fs;
72 unsigned long size, result;
73 struct file file;
74 struct inode * inode;
75 struct buffer_head * bh;
76
77 bh = buffer_pages[MAP_NR(page)];
78 if (bh) {
79
80 struct buffer_head * tmp = bh;
81 do {
82 mark_buffer_dirty(tmp, 0);
83 tmp = tmp->b_this_page;
84 } while (tmp != bh);
85 return 0;
86 }
87
88 inode = vma->vm_inode;
89 file.f_op = inode->i_op->default_file_ops;
90 if (!file.f_op->write)
91 return -EIO;
92 size = offset + PAGE_SIZE;
93
94 if (S_ISREG(inode->i_mode)) {
95 if (size > inode->i_size)
96 size = inode->i_size;
97
98 if (size < offset)
99 return -EIO;
100 }
101 size -= offset;
102 file.f_mode = 3;
103 file.f_flags = 0;
104 file.f_count = 1;
105 file.f_inode = inode;
106 file.f_pos = offset;
107 file.f_reada = 0;
108 old_fs = get_fs();
109 set_fs(KERNEL_DS);
110 result = file.f_op->write(inode, &file, (const char *) page, size);
111 set_fs(old_fs);
112 if (result != size)
113 return -EIO;
114 return 0;
115 }
116
117
118
119
120
121
122
123
124
125
126
127
128 int filemap_swapout(struct vm_area_struct * vma,
129 unsigned long offset,
130 pte_t *page_table)
131 {
132 int error;
133 unsigned long page = pte_page(*page_table);
134 unsigned long entry = SWP_ENTRY(SHM_SWP_TYPE, MAP_NR(page));
135
136 set_pte(page_table, __pte(entry));
137 invalidate();
138 error = filemap_write_page(vma, offset, page);
139 if (pte_val(*page_table) == entry)
140 pte_clear(page_table);
141 return error;
142 }
143
144
145
146
147
148
149
150 static pte_t filemap_swapin(struct vm_area_struct * vma,
151 unsigned long offset,
152 unsigned long entry)
153 {
154 unsigned long page = SWP_OFFSET(entry);
155
156 mem_map[page]++;
157 page = (page << PAGE_SHIFT) + PAGE_OFFSET;
158 return mk_pte(page,vma->vm_page_prot);
159 }
160
161
162 static inline int filemap_sync_pte(pte_t * ptep, struct vm_area_struct *vma,
163 unsigned long address, unsigned int flags)
164 {
165 pte_t pte = *ptep;
166 unsigned long page;
167 int error;
168
169 if (!(flags & MS_INVALIDATE)) {
170 if (!pte_present(pte))
171 return 0;
172 if (!pte_dirty(pte))
173 return 0;
174 set_pte(ptep, pte_mkclean(pte));
175 page = pte_page(pte);
176 mem_map[MAP_NR(page)]++;
177 } else {
178 if (pte_none(pte))
179 return 0;
180 pte_clear(ptep);
181 invalidate();
182 if (!pte_present(pte)) {
183 swap_free(pte_val(pte));
184 return 0;
185 }
186 page = pte_page(pte);
187 if (!pte_dirty(pte) || flags == MS_INVALIDATE) {
188 free_page(page);
189 return 0;
190 }
191 }
192 error = filemap_write_page(vma, address - vma->vm_start + vma->vm_offset, page);
193 free_page(page);
194 return error;
195 }
196
197 static inline int filemap_sync_pte_range(pmd_t * pmd,
198 unsigned long address, unsigned long size,
199 struct vm_area_struct *vma, unsigned long offset, unsigned int flags)
200 {
201 pte_t * pte;
202 unsigned long end;
203 int error;
204
205 if (pmd_none(*pmd))
206 return 0;
207 if (pmd_bad(*pmd)) {
208 printk("filemap_sync_pte_range: bad pmd (%08lx)\n", pmd_val(*pmd));
209 pmd_clear(pmd);
210 return 0;
211 }
212 pte = pte_offset(pmd, address);
213 offset += address & PMD_MASK;
214 address &= ~PMD_MASK;
215 end = address + size;
216 if (end > PMD_SIZE)
217 end = PMD_SIZE;
218 error = 0;
219 do {
220 error |= filemap_sync_pte(pte, vma, address + offset, flags);
221 address += PAGE_SIZE;
222 pte++;
223 } while (address < end);
224 return error;
225 }
226
227 static inline int filemap_sync_pmd_range(pgd_t * pgd,
228 unsigned long address, unsigned long size,
229 struct vm_area_struct *vma, unsigned int flags)
230 {
231 pmd_t * pmd;
232 unsigned long offset, end;
233 int error;
234
235 if (pgd_none(*pgd))
236 return 0;
237 if (pgd_bad(*pgd)) {
238 printk("filemap_sync_pmd_range: bad pgd (%08lx)\n", pgd_val(*pgd));
239 pgd_clear(pgd);
240 return 0;
241 }
242 pmd = pmd_offset(pgd, address);
243 offset = address & PMD_MASK;
244 address &= ~PMD_MASK;
245 end = address + size;
246 if (end > PGDIR_SIZE)
247 end = PGDIR_SIZE;
248 error = 0;
249 do {
250 error |= filemap_sync_pte_range(pmd, address, end - address, vma, offset, flags);
251 address = (address + PMD_SIZE) & PMD_MASK;
252 pmd++;
253 } while (address < end);
254 return error;
255 }
256
257 static int filemap_sync(struct vm_area_struct * vma, unsigned long address,
258 size_t size, unsigned int flags)
259 {
260 pgd_t * dir;
261 unsigned long end = address + size;
262 int error = 0;
263
264 dir = pgd_offset(current->mm, address);
265 while (address < end) {
266 error |= filemap_sync_pmd_range(dir, address, end - address, vma, flags);
267 address = (address + PGDIR_SIZE) & PGDIR_MASK;
268 dir++;
269 }
270 invalidate();
271 return error;
272 }
273
274
275
276
277 static void filemap_unmap(struct vm_area_struct *vma, unsigned long start, size_t len)
278 {
279 filemap_sync(vma, start, len, MS_ASYNC);
280 }
281
282
283
284
285 static void filemap_close(struct vm_area_struct * vma)
286 {
287 filemap_sync(vma, vma->vm_start, vma->vm_end - vma->vm_start, MS_ASYNC);
288 }
289
290
291
292
293
294
295 static struct vm_operations_struct file_shared_mmap = {
296 NULL,
297 filemap_close,
298 filemap_unmap,
299 NULL,
300 filemap_sync,
301 NULL,
302 filemap_nopage,
303 NULL,
304 filemap_swapout,
305 filemap_swapin,
306 };
307
308
309
310
311
312
313
314 static struct vm_operations_struct file_private_mmap = {
315 NULL,
316 NULL,
317 NULL,
318 NULL,
319 NULL,
320 NULL,
321 filemap_nopage,
322 NULL,
323 NULL,
324 NULL,
325 };
326
327
328 int generic_mmap(struct inode * inode, struct file * file, struct vm_area_struct * vma)
329 {
330 struct vm_operations_struct * ops;
331
332 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) {
333 ops = &file_shared_mmap;
334
335
336 if (vma->vm_offset & (PAGE_SIZE - 1))
337 return -EINVAL;
338 } else {
339 ops = &file_private_mmap;
340 if (vma->vm_offset & (inode->i_sb->s_blocksize - 1))
341 return -EINVAL;
342 }
343 if (!inode->i_sb || !S_ISREG(inode->i_mode))
344 return -EACCES;
345 if (!inode->i_op || !inode->i_op->bmap)
346 return -ENOEXEC;
347 if (!IS_RDONLY(inode)) {
348 inode->i_atime = CURRENT_TIME;
349 inode->i_dirt = 1;
350 }
351 vma->vm_inode = inode;
352 inode->i_count++;
353 vma->vm_ops = ops;
354 return 0;
355 }
356
357
358
359
360
361
362 static int msync_interval(struct vm_area_struct * vma,
363 unsigned long start, unsigned long end, int flags)
364 {
365 if (!vma->vm_inode)
366 return 0;
367 if (vma->vm_ops->sync) {
368 int error;
369 error = vma->vm_ops->sync(vma, start, end-start, flags);
370 if (error)
371 return error;
372 if (flags & MS_SYNC)
373 return file_fsync(vma->vm_inode, NULL);
374 return 0;
375 }
376 return 0;
377 }
378
379 asmlinkage int sys_msync(unsigned long start, size_t len, int flags)
380 {
381 unsigned long end;
382 struct vm_area_struct * vma;
383 int unmapped_error, error;
384
385 if (start & ~PAGE_MASK)
386 return -EINVAL;
387 len = (len + ~PAGE_MASK) & PAGE_MASK;
388 end = start + len;
389 if (end < start)
390 return -EINVAL;
391 if (flags & ~(MS_ASYNC | MS_INVALIDATE | MS_SYNC))
392 return -EINVAL;
393 if (end == start)
394 return 0;
395
396
397
398
399 vma = find_vma(current, start);
400 unmapped_error = 0;
401 for (;;) {
402
403 if (!vma)
404 return -EFAULT;
405
406 if (start < vma->vm_start) {
407 unmapped_error = -EFAULT;
408 start = vma->vm_start;
409 }
410
411 if (end <= vma->vm_end) {
412 if (start < end) {
413 error = msync_interval(vma, start, end, flags);
414 if (error)
415 return error;
416 }
417 return unmapped_error;
418 }
419
420 error = msync_interval(vma, start, vma->vm_end, flags);
421 if (error)
422 return error;
423 start = vma->vm_end;
424 vma = vma->vm_next;
425 }
426 }