This source file includes following definitions.
- filemap_nopage
- filemap_sync_page
- filemap_sync_pte
- filemap_sync_pte_range
- filemap_sync_pmd_range
- filemap_sync
- filemap_unmap
- filemap_close
- filemap_swapout
- generic_mmap
1
2
3
4
5
6
7
8
9
10
11
12 #include <linux/stat.h>
13 #include <linux/sched.h>
14 #include <linux/kernel.h>
15 #include <linux/mm.h>
16 #include <linux/shm.h>
17 #include <linux/errno.h>
18 #include <linux/mman.h>
19 #include <linux/string.h>
20 #include <linux/malloc.h>
21
22 #include <asm/segment.h>
23 #include <asm/system.h>
24 #include <asm/pgtable.h>
25
26
27
28
29
30
31 static unsigned long filemap_nopage(struct vm_area_struct * area, unsigned long address,
32 unsigned long page, int no_share)
33 {
34 struct inode * inode = area->vm_inode;
35 unsigned int block;
36 int nr[8];
37 int i, *p;
38
39 address &= PAGE_MASK;
40 block = address - area->vm_start + area->vm_offset;
41 block >>= inode->i_sb->s_blocksize_bits;
42 i = PAGE_SIZE >> inode->i_sb->s_blocksize_bits;
43 p = nr;
44 do {
45 *p = bmap(inode,block);
46 i--;
47 block++;
48 p++;
49 } while (i > 0);
50 return bread_page(page, inode->i_dev, nr, inode->i_sb->s_blocksize, no_share);
51 }
52
53
54
55
56
57
58
59
60
61
62
63
64 static void filemap_sync_page(struct vm_area_struct * vma,
65 unsigned long offset,
66 unsigned long page)
67 {
68 struct buffer_head * bh;
69
70 printk("msync: %ld: [%08lx]\n", offset, page);
71 bh = buffer_pages[MAP_NR(page)];
72 if (bh) {
73
74 struct buffer_head * tmp = bh;
75 do {
76 mark_buffer_dirty(tmp, 0);
77 tmp = tmp->b_this_page;
78 } while (tmp != bh);
79 return;
80 }
81
82 printk("Can't handle non-shared page yet\n");
83 return;
84 }
85
86 static inline void filemap_sync_pte(pte_t * pte, struct vm_area_struct *vma,
87 unsigned long address, unsigned int flags)
88 {
89 pte_t page = *pte;
90
91 if (!pte_present(page))
92 return;
93 if (!pte_dirty(page))
94 return;
95 if (flags & MS_INVALIDATE) {
96 pte_clear(pte);
97 } else {
98 mem_map[MAP_NR(pte_page(page))]++;
99 *pte = pte_mkclean(page);
100 }
101 filemap_sync_page(vma, address - vma->vm_start, pte_page(page));
102 free_page(pte_page(page));
103 }
104
105 static inline void filemap_sync_pte_range(pmd_t * pmd,
106 unsigned long address, unsigned long size,
107 struct vm_area_struct *vma, unsigned long offset, unsigned int flags)
108 {
109 pte_t * pte;
110 unsigned long end;
111
112 if (pmd_none(*pmd))
113 return;
114 if (pmd_bad(*pmd)) {
115 printk("filemap_sync_pte_range: bad pmd (%08lx)\n", pmd_val(*pmd));
116 pmd_clear(pmd);
117 return;
118 }
119 pte = pte_offset(pmd, address);
120 offset += address & PMD_MASK;
121 address &= ~PMD_MASK;
122 end = address + size;
123 if (end > PMD_SIZE)
124 end = PMD_SIZE;
125 do {
126 filemap_sync_pte(pte, vma, address + offset, flags);
127 address += PAGE_SIZE;
128 pte++;
129 } while (address < end);
130 }
131
132 static inline void filemap_sync_pmd_range(pgd_t * pgd,
133 unsigned long address, unsigned long size,
134 struct vm_area_struct *vma, unsigned int flags)
135 {
136 pmd_t * pmd;
137 unsigned long offset, end;
138
139 if (pgd_none(*pgd))
140 return;
141 if (pgd_bad(*pgd)) {
142 printk("filemap_sync_pmd_range: bad pgd (%08lx)\n", pgd_val(*pgd));
143 pgd_clear(pgd);
144 return;
145 }
146 pmd = pmd_offset(pgd, address);
147 offset = address & PMD_MASK;
148 address &= ~PMD_MASK;
149 end = address + size;
150 if (end > PGDIR_SIZE)
151 end = PGDIR_SIZE;
152 do {
153 filemap_sync_pte_range(pmd, address, end - address, vma, offset, flags);
154 address = (address + PMD_SIZE) & PMD_MASK;
155 pmd++;
156 } while (address < end);
157 }
158
159 static void filemap_sync(struct vm_area_struct * vma, unsigned long address,
160 size_t size, unsigned int flags)
161 {
162 pgd_t * dir;
163 unsigned long end = address + size;
164
165 dir = pgd_offset(current, address);
166 while (address < end) {
167 filemap_sync_pmd_range(dir, address, end - address, vma, flags);
168 address = (address + PGDIR_SIZE) & PGDIR_MASK;
169 dir++;
170 }
171 invalidate();
172 return;
173 }
174
175
176
177
178 static void filemap_unmap(struct vm_area_struct *vma, unsigned long start, size_t len)
179 {
180 filemap_sync(vma, start, len, MS_ASYNC);
181 }
182
183
184
185
186 static void filemap_close(struct vm_area_struct * vma)
187 {
188 filemap_sync(vma, vma->vm_start, vma->vm_end - vma->vm_start, MS_ASYNC);
189 }
190
191
192
193
194
195
196
197
198 void filemap_swapout(struct vm_area_struct * vma,
199 unsigned long offset,
200 pte_t *page_table)
201 {
202 printk("swapout not implemented on shared files..\n");
203 pte_clear(page_table);
204 }
205
206
207
208
209
210
211 static struct vm_operations_struct file_shared_mmap = {
212 NULL,
213 filemap_close,
214 filemap_unmap,
215 NULL,
216 filemap_sync,
217 NULL,
218 filemap_nopage,
219 NULL,
220 filemap_swapout,
221 NULL,
222 };
223
224
225
226
227
228
229
230 static struct vm_operations_struct file_private_mmap = {
231 NULL,
232 NULL,
233 NULL,
234 NULL,
235 NULL,
236 NULL,
237 filemap_nopage,
238 NULL,
239 NULL,
240 NULL,
241 };
242
243
244 int generic_mmap(struct inode * inode, struct file * file, struct vm_area_struct * vma)
245 {
246 struct vm_operations_struct * ops;
247
248 if (vma->vm_offset & (inode->i_sb->s_blocksize - 1))
249 return -EINVAL;
250 if (!inode->i_sb || !S_ISREG(inode->i_mode))
251 return -EACCES;
252 if (!inode->i_op || !inode->i_op->bmap)
253 return -ENOEXEC;
254 ops = &file_private_mmap;
255 if (vma->vm_flags & VM_SHARED) {
256 if (vma->vm_flags & (VM_WRITE | VM_MAYWRITE)) {
257 static int nr = 0;
258 ops = &file_shared_mmap;
259 #ifndef SHARED_MMAP_REALLY_WORKS
260 if (nr++ < 5)
261 printk("%s tried to do a shared writeable mapping\n", current->comm);
262 return -EINVAL;
263 #endif
264 }
265 }
266 if (!IS_RDONLY(inode)) {
267 inode->i_atime = CURRENT_TIME;
268 inode->i_dirt = 1;
269 }
270 vma->vm_inode = inode;
271 inode->i_count++;
272 vma->vm_ops = ops;
273 return 0;
274 }