This source file includes following definitions.
- multi_bmap
- filemap_nopage
- filemap_write_page
- filemap_swapout
- filemap_swapin
- filemap_sync_pte
- filemap_sync_pte_range
- filemap_sync_pmd_range
- filemap_sync
- filemap_unmap
- filemap_close
- generic_mmap
- msync_interval
- sys_msync
1
2
3
4
5
6
7
8
9
10
11
12 #include <linux/stat.h>
13 #include <linux/sched.h>
14 #include <linux/kernel.h>
15 #include <linux/mm.h>
16 #include <linux/shm.h>
17 #include <linux/errno.h>
18 #include <linux/mman.h>
19 #include <linux/string.h>
20 #include <linux/malloc.h>
21 #include <linux/fs.h>
22 #include <linux/locks.h>
23
24 #include <asm/segment.h>
25 #include <asm/system.h>
26 #include <asm/pgtable.h>
27
28
29
30
31
32
33
34
35
36
37
38
39 static inline void multi_bmap(struct inode * inode, unsigned long block, unsigned int * nr, int shift)
40 {
41 int i = PAGE_SIZE >> shift;
42 block >>= shift;
43 do {
44 *nr = bmap(inode, block);
45 i--;
46 block++;
47 nr++;
48 } while (i > 0);
49 }
50
51
52
53
54
55
56 static unsigned long filemap_nopage(struct vm_area_struct * area, unsigned long address,
57 unsigned long page, int no_share)
58 {
59 struct inode * inode = area->vm_inode;
60 int nr[PAGE_SIZE/512];
61
62 address = (address & PAGE_MASK) - area->vm_start + area->vm_offset;
63 if (address >= inode->i_size && (area->vm_flags & VM_SHARED) && area->vm_mm == current->mm)
64 send_sig(SIGBUS, current, 1);
65 multi_bmap(inode, address, nr, inode->i_sb->s_blocksize_bits);
66 return bread_page(page, inode->i_dev, nr, inode->i_sb->s_blocksize, no_share);
67 }
68
69
70
71
72
73
74 static int filemap_write_page(struct vm_area_struct * vma,
75 unsigned long offset,
76 unsigned long page)
77 {
78 int old_fs;
79 unsigned long size, result;
80 struct file file;
81 struct inode * inode;
82 struct buffer_head * bh;
83
84 bh = buffer_pages[MAP_NR(page)];
85 if (bh) {
86
87 struct buffer_head * tmp = bh;
88 do {
89 mark_buffer_dirty(tmp, 0);
90 tmp = tmp->b_this_page;
91 } while (tmp != bh);
92 return 0;
93 }
94
95 inode = vma->vm_inode;
96 file.f_op = inode->i_op->default_file_ops;
97 if (!file.f_op->write)
98 return -EIO;
99 size = offset + PAGE_SIZE;
100
101 if (S_ISREG(inode->i_mode)) {
102 if (size > inode->i_size)
103 size = inode->i_size;
104
105 if (size < offset)
106 return -EIO;
107 }
108 size -= offset;
109 file.f_mode = 3;
110 file.f_flags = 0;
111 file.f_count = 1;
112 file.f_inode = inode;
113 file.f_pos = offset;
114 file.f_reada = 0;
115 old_fs = get_fs();
116 set_fs(KERNEL_DS);
117 result = file.f_op->write(inode, &file, (const char *) page, size);
118 set_fs(old_fs);
119 if (result != size)
120 return -EIO;
121 return 0;
122 }
123
124
125
126
127
128
129
130
131
132
133
134
135 int filemap_swapout(struct vm_area_struct * vma,
136 unsigned long offset,
137 pte_t *page_table)
138 {
139 int error;
140 unsigned long page = pte_page(*page_table);
141 unsigned long entry = SWP_ENTRY(SHM_SWP_TYPE, MAP_NR(page));
142
143 set_pte(page_table, __pte(entry));
144 invalidate();
145 error = filemap_write_page(vma, offset, page);
146 if (pte_val(*page_table) == entry)
147 pte_clear(page_table);
148 return error;
149 }
150
151
152
153
154
155
156
157 static pte_t filemap_swapin(struct vm_area_struct * vma,
158 unsigned long offset,
159 unsigned long entry)
160 {
161 unsigned long page = SWP_OFFSET(entry);
162
163 mem_map[page].count++;
164 page = (page << PAGE_SHIFT) + PAGE_OFFSET;
165 return mk_pte(page,vma->vm_page_prot);
166 }
167
168
169 static inline int filemap_sync_pte(pte_t * ptep, struct vm_area_struct *vma,
170 unsigned long address, unsigned int flags)
171 {
172 pte_t pte = *ptep;
173 unsigned long page;
174 int error;
175
176 if (!(flags & MS_INVALIDATE)) {
177 if (!pte_present(pte))
178 return 0;
179 if (!pte_dirty(pte))
180 return 0;
181 set_pte(ptep, pte_mkclean(pte));
182 page = pte_page(pte);
183 mem_map[MAP_NR(page)].count++;
184 } else {
185 if (pte_none(pte))
186 return 0;
187 pte_clear(ptep);
188 invalidate();
189 if (!pte_present(pte)) {
190 swap_free(pte_val(pte));
191 return 0;
192 }
193 page = pte_page(pte);
194 if (!pte_dirty(pte) || flags == MS_INVALIDATE) {
195 free_page(page);
196 return 0;
197 }
198 }
199 error = filemap_write_page(vma, address - vma->vm_start + vma->vm_offset, page);
200 free_page(page);
201 return error;
202 }
203
204 static inline int filemap_sync_pte_range(pmd_t * pmd,
205 unsigned long address, unsigned long size,
206 struct vm_area_struct *vma, unsigned long offset, unsigned int flags)
207 {
208 pte_t * pte;
209 unsigned long end;
210 int error;
211
212 if (pmd_none(*pmd))
213 return 0;
214 if (pmd_bad(*pmd)) {
215 printk("filemap_sync_pte_range: bad pmd (%08lx)\n", pmd_val(*pmd));
216 pmd_clear(pmd);
217 return 0;
218 }
219 pte = pte_offset(pmd, address);
220 offset += address & PMD_MASK;
221 address &= ~PMD_MASK;
222 end = address + size;
223 if (end > PMD_SIZE)
224 end = PMD_SIZE;
225 error = 0;
226 do {
227 error |= filemap_sync_pte(pte, vma, address + offset, flags);
228 address += PAGE_SIZE;
229 pte++;
230 } while (address < end);
231 return error;
232 }
233
234 static inline int filemap_sync_pmd_range(pgd_t * pgd,
235 unsigned long address, unsigned long size,
236 struct vm_area_struct *vma, unsigned int flags)
237 {
238 pmd_t * pmd;
239 unsigned long offset, end;
240 int error;
241
242 if (pgd_none(*pgd))
243 return 0;
244 if (pgd_bad(*pgd)) {
245 printk("filemap_sync_pmd_range: bad pgd (%08lx)\n", pgd_val(*pgd));
246 pgd_clear(pgd);
247 return 0;
248 }
249 pmd = pmd_offset(pgd, address);
250 offset = address & PMD_MASK;
251 address &= ~PMD_MASK;
252 end = address + size;
253 if (end > PGDIR_SIZE)
254 end = PGDIR_SIZE;
255 error = 0;
256 do {
257 error |= filemap_sync_pte_range(pmd, address, end - address, vma, offset, flags);
258 address = (address + PMD_SIZE) & PMD_MASK;
259 pmd++;
260 } while (address < end);
261 return error;
262 }
263
264 static int filemap_sync(struct vm_area_struct * vma, unsigned long address,
265 size_t size, unsigned int flags)
266 {
267 pgd_t * dir;
268 unsigned long end = address + size;
269 int error = 0;
270
271 dir = pgd_offset(current->mm, address);
272 while (address < end) {
273 error |= filemap_sync_pmd_range(dir, address, end - address, vma, flags);
274 address = (address + PGDIR_SIZE) & PGDIR_MASK;
275 dir++;
276 }
277 invalidate();
278 return error;
279 }
280
281
282
283
284 static void filemap_unmap(struct vm_area_struct *vma, unsigned long start, size_t len)
285 {
286 filemap_sync(vma, start, len, MS_ASYNC);
287 }
288
289
290
291
292 static void filemap_close(struct vm_area_struct * vma)
293 {
294 filemap_sync(vma, vma->vm_start, vma->vm_end - vma->vm_start, MS_ASYNC);
295 }
296
297
298
299
300
301
302 static struct vm_operations_struct file_shared_mmap = {
303 NULL,
304 filemap_close,
305 filemap_unmap,
306 NULL,
307 filemap_sync,
308 NULL,
309 filemap_nopage,
310 NULL,
311 filemap_swapout,
312 filemap_swapin,
313 };
314
315
316
317
318
319
320
321 static struct vm_operations_struct file_private_mmap = {
322 NULL,
323 NULL,
324 NULL,
325 NULL,
326 NULL,
327 NULL,
328 filemap_nopage,
329 NULL,
330 NULL,
331 NULL,
332 };
333
334
335 int generic_mmap(struct inode * inode, struct file * file, struct vm_area_struct * vma)
336 {
337 struct vm_operations_struct * ops;
338
339 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) {
340 ops = &file_shared_mmap;
341
342
343 if (vma->vm_offset & (PAGE_SIZE - 1))
344 return -EINVAL;
345 } else {
346 ops = &file_private_mmap;
347 if (vma->vm_offset & (inode->i_sb->s_blocksize - 1))
348 return -EINVAL;
349 }
350 if (!inode->i_sb || !S_ISREG(inode->i_mode))
351 return -EACCES;
352 if (!inode->i_op || !inode->i_op->bmap)
353 return -ENOEXEC;
354 if (!IS_RDONLY(inode)) {
355 inode->i_atime = CURRENT_TIME;
356 inode->i_dirt = 1;
357 }
358 vma->vm_inode = inode;
359 inode->i_count++;
360 vma->vm_ops = ops;
361 return 0;
362 }
363
364
365
366
367
368
369 static int msync_interval(struct vm_area_struct * vma,
370 unsigned long start, unsigned long end, int flags)
371 {
372 if (!vma->vm_inode)
373 return 0;
374 if (vma->vm_ops->sync) {
375 int error;
376 error = vma->vm_ops->sync(vma, start, end-start, flags);
377 if (error)
378 return error;
379 if (flags & MS_SYNC)
380 return file_fsync(vma->vm_inode, NULL);
381 return 0;
382 }
383 return 0;
384 }
385
386 asmlinkage int sys_msync(unsigned long start, size_t len, int flags)
387 {
388 unsigned long end;
389 struct vm_area_struct * vma;
390 int unmapped_error, error;
391
392 if (start & ~PAGE_MASK)
393 return -EINVAL;
394 len = (len + ~PAGE_MASK) & PAGE_MASK;
395 end = start + len;
396 if (end < start)
397 return -EINVAL;
398 if (flags & ~(MS_ASYNC | MS_INVALIDATE | MS_SYNC))
399 return -EINVAL;
400 if (end == start)
401 return 0;
402
403
404
405
406 vma = find_vma(current, start);
407 unmapped_error = 0;
408 for (;;) {
409
410 if (!vma)
411 return -EFAULT;
412
413 if (start < vma->vm_start) {
414 unmapped_error = -EFAULT;
415 start = vma->vm_start;
416 }
417
418 if (end <= vma->vm_end) {
419 if (start < end) {
420 error = msync_interval(vma, start, end, flags);
421 if (error)
422 return error;
423 }
424 return unmapped_error;
425 }
426
427 error = msync_interval(vma, start, vma->vm_end, flags);
428 if (error)
429 return error;
430 start = vma->vm_end;
431 vma = vma->vm_next;
432 }
433 }