This source file includes following definitions.
- multi_bmap
- filemap_nopage
- filemap_sync_page
- filemap_swapout
- filemap_swapin
- filemap_sync_pte
- filemap_sync_pte_range
- filemap_sync_pmd_range
- filemap_sync
- filemap_unmap
- filemap_close
- generic_mmap
1
2
3
4
5
6
7
8
9
10
11
12 #include <linux/stat.h>
13 #include <linux/sched.h>
14 #include <linux/kernel.h>
15 #include <linux/mm.h>
16 #include <linux/shm.h>
17 #include <linux/errno.h>
18 #include <linux/mman.h>
19 #include <linux/string.h>
20 #include <linux/malloc.h>
21
22 #include <asm/segment.h>
23 #include <asm/system.h>
24 #include <asm/pgtable.h>
25
26
27
28
29
30
31 static inline void multi_bmap(struct inode * inode, unsigned int block, unsigned int * nr, int shift)
32 {
33 int i = PAGE_SIZE >> shift;
34 block >>= shift;
35 do {
36 *nr = bmap(inode, block);
37 i--;
38 block++;
39 nr++;
40 } while (i > 0);
41 }
42
43 static unsigned long filemap_nopage(struct vm_area_struct * area, unsigned long address,
44 unsigned long page, int no_share)
45 {
46 struct inode * inode = area->vm_inode;
47 int nr[PAGE_SIZE/512];
48
49 multi_bmap(inode, (address & PAGE_MASK) - area->vm_start + area->vm_offset, nr,
50 inode->i_sb->s_blocksize_bits);
51 return bread_page(page, inode->i_dev, nr, inode->i_sb->s_blocksize, no_share);
52 }
53
54
55
56
57
58
59
60
61
62
63
64
65 static void filemap_sync_page(struct vm_area_struct * vma,
66 unsigned long offset,
67 unsigned long page)
68 {
69 struct inode * inode;
70 int nr[PAGE_SIZE/512];
71 struct buffer_head * bh;
72
73 bh = buffer_pages[MAP_NR(page)];
74 if (bh) {
75
76 struct buffer_head * tmp = bh;
77 do {
78 mark_buffer_dirty(tmp, 0);
79 tmp = tmp->b_this_page;
80 } while (tmp != bh);
81 return;
82 }
83 inode = vma->vm_inode;
84 multi_bmap(inode, offset, nr, inode->i_sb->s_blocksize_bits);
85 bwrite_page(page, inode->i_dev, nr, inode->i_sb->s_blocksize);
86 }
87
88
89
90
91
92
93
94
95
96
97
98 void filemap_swapout(struct vm_area_struct * vma,
99 unsigned long offset,
100 pte_t *page_table)
101 {
102 unsigned long page = pte_page(*page_table);
103 unsigned long entry = SWP_ENTRY(SHM_SWP_TYPE, MAP_NR(page));
104
105 pte_val(*page_table) = entry;
106 invalidate();
107 filemap_sync_page(vma, offset, page);
108 if (pte_val(*page_table) == entry)
109 pte_clear(page_table);
110 }
111
112
113
114
115
116
117
118 static pte_t filemap_swapin(struct vm_area_struct * vma,
119 unsigned long offset,
120 unsigned long entry)
121 {
122 unsigned long page = SWP_OFFSET(entry);
123
124 mem_map[page]++;
125 page = (page << PAGE_SHIFT) + PAGE_OFFSET;
126 return pte_mkdirty(mk_pte(page,vma->vm_page_prot));
127 }
128
129 static inline void filemap_sync_pte(pte_t * pte, struct vm_area_struct *vma,
130 unsigned long address, unsigned int flags)
131 {
132 pte_t page = *pte;
133
134 if (!pte_present(page))
135 return;
136 if (!pte_dirty(page))
137 return;
138 if (flags & MS_INVALIDATE) {
139 pte_clear(pte);
140 } else {
141 mem_map[MAP_NR(pte_page(page))]++;
142 *pte = pte_mkclean(page);
143 }
144 filemap_sync_page(vma, address - vma->vm_start, pte_page(page));
145 free_page(pte_page(page));
146 }
147
148 static inline void filemap_sync_pte_range(pmd_t * pmd,
149 unsigned long address, unsigned long size,
150 struct vm_area_struct *vma, unsigned long offset, unsigned int flags)
151 {
152 pte_t * pte;
153 unsigned long end;
154
155 if (pmd_none(*pmd))
156 return;
157 if (pmd_bad(*pmd)) {
158 printk("filemap_sync_pte_range: bad pmd (%08lx)\n", pmd_val(*pmd));
159 pmd_clear(pmd);
160 return;
161 }
162 pte = pte_offset(pmd, address);
163 offset += address & PMD_MASK;
164 address &= ~PMD_MASK;
165 end = address + size;
166 if (end > PMD_SIZE)
167 end = PMD_SIZE;
168 do {
169 filemap_sync_pte(pte, vma, address + offset, flags);
170 address += PAGE_SIZE;
171 pte++;
172 } while (address < end);
173 }
174
175 static inline void filemap_sync_pmd_range(pgd_t * pgd,
176 unsigned long address, unsigned long size,
177 struct vm_area_struct *vma, unsigned int flags)
178 {
179 pmd_t * pmd;
180 unsigned long offset, end;
181
182 if (pgd_none(*pgd))
183 return;
184 if (pgd_bad(*pgd)) {
185 printk("filemap_sync_pmd_range: bad pgd (%08lx)\n", pgd_val(*pgd));
186 pgd_clear(pgd);
187 return;
188 }
189 pmd = pmd_offset(pgd, address);
190 offset = address & PMD_MASK;
191 address &= ~PMD_MASK;
192 end = address + size;
193 if (end > PGDIR_SIZE)
194 end = PGDIR_SIZE;
195 do {
196 filemap_sync_pte_range(pmd, address, end - address, vma, offset, flags);
197 address = (address + PMD_SIZE) & PMD_MASK;
198 pmd++;
199 } while (address < end);
200 }
201
202 static void filemap_sync(struct vm_area_struct * vma, unsigned long address,
203 size_t size, unsigned int flags)
204 {
205 pgd_t * dir;
206 unsigned long end = address + size;
207
208 dir = pgd_offset(current, address);
209 while (address < end) {
210 filemap_sync_pmd_range(dir, address, end - address, vma, flags);
211 address = (address + PGDIR_SIZE) & PGDIR_MASK;
212 dir++;
213 }
214 invalidate();
215 return;
216 }
217
218
219
220
221 static void filemap_unmap(struct vm_area_struct *vma, unsigned long start, size_t len)
222 {
223 filemap_sync(vma, start, len, MS_ASYNC);
224 }
225
226
227
228
229 static void filemap_close(struct vm_area_struct * vma)
230 {
231 filemap_sync(vma, vma->vm_start, vma->vm_end - vma->vm_start, MS_ASYNC);
232 }
233
234
235
236
237
238
239 static struct vm_operations_struct file_shared_mmap = {
240 NULL,
241 filemap_close,
242 filemap_unmap,
243 NULL,
244 filemap_sync,
245 NULL,
246 filemap_nopage,
247 NULL,
248 filemap_swapout,
249 filemap_swapin,
250 };
251
252
253
254
255
256
257
258 static struct vm_operations_struct file_private_mmap = {
259 NULL,
260 NULL,
261 NULL,
262 NULL,
263 NULL,
264 NULL,
265 filemap_nopage,
266 NULL,
267 NULL,
268 NULL,
269 };
270
271
272 int generic_mmap(struct inode * inode, struct file * file, struct vm_area_struct * vma)
273 {
274 struct vm_operations_struct * ops;
275
276 if (vma->vm_offset & (inode->i_sb->s_blocksize - 1))
277 return -EINVAL;
278 if (!inode->i_sb || !S_ISREG(inode->i_mode))
279 return -EACCES;
280 if (!inode->i_op || !inode->i_op->bmap)
281 return -ENOEXEC;
282 ops = &file_private_mmap;
283 if (vma->vm_flags & VM_SHARED) {
284 if (vma->vm_flags & (VM_WRITE | VM_MAYWRITE))
285 ops = &file_shared_mmap;
286 }
287 if (!IS_RDONLY(inode)) {
288 inode->i_atime = CURRENT_TIME;
289 inode->i_dirt = 1;
290 }
291 vma->vm_inode = inode;
292 inode->i_count++;
293 vma->vm_ops = ops;
294 return 0;
295 }