This source file includes following definitions.
- multi_bmap
- filemap_nopage
- filemap_sync_page
- filemap_swapout
- filemap_swapin
- filemap_sync_pte
- filemap_sync_pte_range
- filemap_sync_pmd_range
- filemap_sync
- filemap_unmap
- filemap_close
- generic_mmap
1
2
3
4
5
6
7
8
9
10
11
12 #include <linux/stat.h>
13 #include <linux/sched.h>
14 #include <linux/kernel.h>
15 #include <linux/mm.h>
16 #include <linux/shm.h>
17 #include <linux/errno.h>
18 #include <linux/mman.h>
19 #include <linux/string.h>
20 #include <linux/malloc.h>
21
22 #include <asm/segment.h>
23 #include <asm/system.h>
24 #include <asm/pgtable.h>
25
26
27
28
29
30
31 static inline void multi_bmap(struct inode * inode, unsigned int block, unsigned int * nr, int shift)
32 {
33 int i = PAGE_SIZE >> shift;
34 block >>= shift;
35 do {
36 *nr = bmap(inode, block);
37 i--;
38 block++;
39 nr++;
40 } while (i > 0);
41 }
42
43 static unsigned long filemap_nopage(struct vm_area_struct * area, unsigned long address,
44 unsigned long page, int no_share)
45 {
46 struct inode * inode = area->vm_inode;
47 int nr[PAGE_SIZE/512];
48
49 multi_bmap(inode, (address & PAGE_MASK) - area->vm_start + area->vm_offset, nr,
50 inode->i_sb->s_blocksize_bits);
51 return bread_page(page, inode->i_dev, nr, inode->i_sb->s_blocksize, no_share);
52 }
53
54
55
56
57
58
59
60
61
62
63
64
65 static void filemap_sync_page(struct vm_area_struct * vma,
66 unsigned long offset,
67 unsigned long page)
68 {
69 struct inode * inode;
70 int nr[PAGE_SIZE/512];
71 struct buffer_head * bh;
72
73 bh = buffer_pages[MAP_NR(page)];
74 if (bh) {
75
76 struct buffer_head * tmp = bh;
77 do {
78 mark_buffer_dirty(tmp, 0);
79 tmp = tmp->b_this_page;
80 } while (tmp != bh);
81 return;
82 }
83 inode = vma->vm_inode;
84 multi_bmap(inode, offset, nr, inode->i_sb->s_blocksize_bits);
85 bwrite_page(page, inode->i_dev, nr, inode->i_sb->s_blocksize);
86 }
87
88
89
90
91
92
93
94
95
96
97
98 void filemap_swapout(struct vm_area_struct * vma,
99 unsigned long offset,
100 pte_t *page_table)
101 {
102 unsigned long page = pte_page(*page_table);
103 unsigned long entry = SWP_ENTRY(SHM_SWP_TYPE, MAP_NR(page));
104
105 pte_val(*page_table) = entry;
106 filemap_sync_page(vma, offset, page);
107 if (pte_val(*page_table) == entry)
108 pte_clear(page_table);
109 }
110
111
112
113
114
115
116
117 static pte_t filemap_swapin(struct vm_area_struct * vma,
118 unsigned long offset,
119 unsigned long entry)
120 {
121 unsigned long page = SWP_OFFSET(entry);
122
123 mem_map[page]++;
124 page = (page << PAGE_SHIFT) + PAGE_OFFSET;
125 return pte_mkdirty(mk_pte(page,vma->vm_page_prot));
126 }
127
128 static inline void filemap_sync_pte(pte_t * pte, struct vm_area_struct *vma,
129 unsigned long address, unsigned int flags)
130 {
131 pte_t page = *pte;
132
133 if (!pte_present(page))
134 return;
135 if (!pte_dirty(page))
136 return;
137 if (flags & MS_INVALIDATE) {
138 pte_clear(pte);
139 } else {
140 mem_map[MAP_NR(pte_page(page))]++;
141 *pte = pte_mkclean(page);
142 }
143 filemap_sync_page(vma, address - vma->vm_start, pte_page(page));
144 free_page(pte_page(page));
145 }
146
147 static inline void filemap_sync_pte_range(pmd_t * pmd,
148 unsigned long address, unsigned long size,
149 struct vm_area_struct *vma, unsigned long offset, unsigned int flags)
150 {
151 pte_t * pte;
152 unsigned long end;
153
154 if (pmd_none(*pmd))
155 return;
156 if (pmd_bad(*pmd)) {
157 printk("filemap_sync_pte_range: bad pmd (%08lx)\n", pmd_val(*pmd));
158 pmd_clear(pmd);
159 return;
160 }
161 pte = pte_offset(pmd, address);
162 offset += address & PMD_MASK;
163 address &= ~PMD_MASK;
164 end = address + size;
165 if (end > PMD_SIZE)
166 end = PMD_SIZE;
167 do {
168 filemap_sync_pte(pte, vma, address + offset, flags);
169 address += PAGE_SIZE;
170 pte++;
171 } while (address < end);
172 }
173
174 static inline void filemap_sync_pmd_range(pgd_t * pgd,
175 unsigned long address, unsigned long size,
176 struct vm_area_struct *vma, unsigned int flags)
177 {
178 pmd_t * pmd;
179 unsigned long offset, end;
180
181 if (pgd_none(*pgd))
182 return;
183 if (pgd_bad(*pgd)) {
184 printk("filemap_sync_pmd_range: bad pgd (%08lx)\n", pgd_val(*pgd));
185 pgd_clear(pgd);
186 return;
187 }
188 pmd = pmd_offset(pgd, address);
189 offset = address & PMD_MASK;
190 address &= ~PMD_MASK;
191 end = address + size;
192 if (end > PGDIR_SIZE)
193 end = PGDIR_SIZE;
194 do {
195 filemap_sync_pte_range(pmd, address, end - address, vma, offset, flags);
196 address = (address + PMD_SIZE) & PMD_MASK;
197 pmd++;
198 } while (address < end);
199 }
200
201 static void filemap_sync(struct vm_area_struct * vma, unsigned long address,
202 size_t size, unsigned int flags)
203 {
204 pgd_t * dir;
205 unsigned long end = address + size;
206
207 dir = pgd_offset(current, address);
208 while (address < end) {
209 filemap_sync_pmd_range(dir, address, end - address, vma, flags);
210 address = (address + PGDIR_SIZE) & PGDIR_MASK;
211 dir++;
212 }
213 invalidate();
214 return;
215 }
216
217
218
219
220 static void filemap_unmap(struct vm_area_struct *vma, unsigned long start, size_t len)
221 {
222 filemap_sync(vma, start, len, MS_ASYNC);
223 }
224
225
226
227
228 static void filemap_close(struct vm_area_struct * vma)
229 {
230 filemap_sync(vma, vma->vm_start, vma->vm_end - vma->vm_start, MS_ASYNC);
231 }
232
233
234
235
236
237
238 static struct vm_operations_struct file_shared_mmap = {
239 NULL,
240 filemap_close,
241 filemap_unmap,
242 NULL,
243 filemap_sync,
244 NULL,
245 filemap_nopage,
246 NULL,
247 filemap_swapout,
248 filemap_swapin,
249 };
250
251
252
253
254
255
256
257 static struct vm_operations_struct file_private_mmap = {
258 NULL,
259 NULL,
260 NULL,
261 NULL,
262 NULL,
263 NULL,
264 filemap_nopage,
265 NULL,
266 NULL,
267 NULL,
268 };
269
270
271 int generic_mmap(struct inode * inode, struct file * file, struct vm_area_struct * vma)
272 {
273 struct vm_operations_struct * ops;
274
275 if (vma->vm_offset & (inode->i_sb->s_blocksize - 1))
276 return -EINVAL;
277 if (!inode->i_sb || !S_ISREG(inode->i_mode))
278 return -EACCES;
279 if (!inode->i_op || !inode->i_op->bmap)
280 return -ENOEXEC;
281 ops = &file_private_mmap;
282 if (vma->vm_flags & VM_SHARED) {
283 if (vma->vm_flags & (VM_WRITE | VM_MAYWRITE))
284 ops = &file_shared_mmap;
285 }
286 if (!IS_RDONLY(inode)) {
287 inode->i_atime = CURRENT_TIME;
288 inode->i_dirt = 1;
289 }
290 vma->vm_inode = inode;
291 inode->i_count++;
292 vma->vm_ops = ops;
293 return 0;
294 }