This source file includes following definitions.
- multi_bmap
- filemap_nopage
- filemap_sync_page
- filemap_swapout
- filemap_swapin
- filemap_sync_pte
- filemap_sync_pte_range
- filemap_sync_pmd_range
- filemap_sync
- filemap_unmap
- filemap_close
- generic_mmap
1
2
3
4
5
6
7
8
9
10
11
12 #include <linux/stat.h>
13 #include <linux/sched.h>
14 #include <linux/kernel.h>
15 #include <linux/mm.h>
16 #include <linux/shm.h>
17 #include <linux/errno.h>
18 #include <linux/mman.h>
19 #include <linux/string.h>
20 #include <linux/malloc.h>
21
22 #include <asm/segment.h>
23 #include <asm/system.h>
24 #include <asm/pgtable.h>
25
26
27
28
29
30
31 static inline void multi_bmap(struct inode * inode, unsigned int block, unsigned int * nr, int shift)
32 {
33 int i = PAGE_SIZE >> shift;
34 block >>= shift;
35 do {
36 *nr = bmap(inode, block);
37 i--;
38 block++;
39 nr++;
40 } while (i > 0);
41 }
42
43 static unsigned long filemap_nopage(struct vm_area_struct * area, unsigned long address,
44 unsigned long page, int no_share)
45 {
46 struct inode * inode = area->vm_inode;
47 int nr[PAGE_SIZE/512];
48
49 multi_bmap(inode, (address & PAGE_MASK) - area->vm_start + area->vm_offset, nr,
50 inode->i_sb->s_blocksize_bits);
51 return bread_page(page, inode->i_dev, nr, inode->i_sb->s_blocksize, no_share);
52 }
53
54
55
56
57
58
59
60
61
62
63
64
65 static void filemap_sync_page(struct vm_area_struct * vma,
66 unsigned long offset,
67 unsigned long page)
68 {
69 struct inode * inode;
70 int nr[PAGE_SIZE/512];
71 struct buffer_head * bh;
72
73 bh = buffer_pages[MAP_NR(page)];
74 if (bh) {
75
76 struct buffer_head * tmp = bh;
77 do {
78 mark_buffer_dirty(tmp, 0);
79 tmp = tmp->b_this_page;
80 } while (tmp != bh);
81 return;
82 }
83 inode = vma->vm_inode;
84 offset += vma->vm_offset;
85 multi_bmap(inode, offset, nr, inode->i_sb->s_blocksize_bits);
86 bwrite_page(page, inode->i_dev, nr, inode->i_sb->s_blocksize);
87 }
88
89
90
91
92
93
94
95
96
97
98
99 void filemap_swapout(struct vm_area_struct * vma,
100 unsigned long offset,
101 pte_t *page_table)
102 {
103 unsigned long page = pte_page(*page_table);
104 unsigned long entry = SWP_ENTRY(SHM_SWP_TYPE, MAP_NR(page));
105
106 pte_val(*page_table) = entry;
107 invalidate();
108 filemap_sync_page(vma, offset, page);
109 if (pte_val(*page_table) == entry)
110 pte_clear(page_table);
111 }
112
113
114
115
116
117
118
119 static pte_t filemap_swapin(struct vm_area_struct * vma,
120 unsigned long offset,
121 unsigned long entry)
122 {
123 unsigned long page = SWP_OFFSET(entry);
124
125 mem_map[page]++;
126 page = (page << PAGE_SHIFT) + PAGE_OFFSET;
127 return pte_mkdirty(mk_pte(page,vma->vm_page_prot));
128 }
129
130 static inline void filemap_sync_pte(pte_t * pte, struct vm_area_struct *vma,
131 unsigned long address, unsigned int flags)
132 {
133 pte_t page = *pte;
134
135 if (!pte_present(page))
136 return;
137 if (!pte_dirty(page))
138 return;
139 if (flags & MS_INVALIDATE) {
140 pte_clear(pte);
141 } else {
142 mem_map[MAP_NR(pte_page(page))]++;
143 *pte = pte_mkclean(page);
144 }
145 filemap_sync_page(vma, address - vma->vm_start, pte_page(page));
146 free_page(pte_page(page));
147 }
148
149 static inline void filemap_sync_pte_range(pmd_t * pmd,
150 unsigned long address, unsigned long size,
151 struct vm_area_struct *vma, unsigned long offset, unsigned int flags)
152 {
153 pte_t * pte;
154 unsigned long end;
155
156 if (pmd_none(*pmd))
157 return;
158 if (pmd_bad(*pmd)) {
159 printk("filemap_sync_pte_range: bad pmd (%08lx)\n", pmd_val(*pmd));
160 pmd_clear(pmd);
161 return;
162 }
163 pte = pte_offset(pmd, address);
164 offset += address & PMD_MASK;
165 address &= ~PMD_MASK;
166 end = address + size;
167 if (end > PMD_SIZE)
168 end = PMD_SIZE;
169 do {
170 filemap_sync_pte(pte, vma, address + offset, flags);
171 address += PAGE_SIZE;
172 pte++;
173 } while (address < end);
174 }
175
176 static inline void filemap_sync_pmd_range(pgd_t * pgd,
177 unsigned long address, unsigned long size,
178 struct vm_area_struct *vma, unsigned int flags)
179 {
180 pmd_t * pmd;
181 unsigned long offset, end;
182
183 if (pgd_none(*pgd))
184 return;
185 if (pgd_bad(*pgd)) {
186 printk("filemap_sync_pmd_range: bad pgd (%08lx)\n", pgd_val(*pgd));
187 pgd_clear(pgd);
188 return;
189 }
190 pmd = pmd_offset(pgd, address);
191 offset = address & PMD_MASK;
192 address &= ~PMD_MASK;
193 end = address + size;
194 if (end > PGDIR_SIZE)
195 end = PGDIR_SIZE;
196 do {
197 filemap_sync_pte_range(pmd, address, end - address, vma, offset, flags);
198 address = (address + PMD_SIZE) & PMD_MASK;
199 pmd++;
200 } while (address < end);
201 }
202
203 static void filemap_sync(struct vm_area_struct * vma, unsigned long address,
204 size_t size, unsigned int flags)
205 {
206 pgd_t * dir;
207 unsigned long end = address + size;
208
209 dir = pgd_offset(current, address);
210 while (address < end) {
211 filemap_sync_pmd_range(dir, address, end - address, vma, flags);
212 address = (address + PGDIR_SIZE) & PGDIR_MASK;
213 dir++;
214 }
215 invalidate();
216 return;
217 }
218
219
220
221
222 static void filemap_unmap(struct vm_area_struct *vma, unsigned long start, size_t len)
223 {
224 filemap_sync(vma, start, len, MS_ASYNC);
225 }
226
227
228
229
230 static void filemap_close(struct vm_area_struct * vma)
231 {
232 filemap_sync(vma, vma->vm_start, vma->vm_end - vma->vm_start, MS_ASYNC);
233 }
234
235
236
237
238
239
240 static struct vm_operations_struct file_shared_mmap = {
241 NULL,
242 filemap_close,
243 filemap_unmap,
244 NULL,
245 filemap_sync,
246 NULL,
247 filemap_nopage,
248 NULL,
249 filemap_swapout,
250 filemap_swapin,
251 };
252
253
254
255
256
257
258
259 static struct vm_operations_struct file_private_mmap = {
260 NULL,
261 NULL,
262 NULL,
263 NULL,
264 NULL,
265 NULL,
266 filemap_nopage,
267 NULL,
268 NULL,
269 NULL,
270 };
271
272
273 int generic_mmap(struct inode * inode, struct file * file, struct vm_area_struct * vma)
274 {
275 struct vm_operations_struct * ops;
276
277 if (vma->vm_offset & (inode->i_sb->s_blocksize - 1))
278 return -EINVAL;
279 if (!inode->i_sb || !S_ISREG(inode->i_mode))
280 return -EACCES;
281 if (!inode->i_op || !inode->i_op->bmap)
282 return -ENOEXEC;
283 ops = &file_private_mmap;
284 if (vma->vm_flags & VM_SHARED) {
285 if (vma->vm_flags & (VM_WRITE | VM_MAYWRITE))
286 ops = &file_shared_mmap;
287 }
288 if (!IS_RDONLY(inode)) {
289 inode->i_atime = CURRENT_TIME;
290 inode->i_dirt = 1;
291 }
292 vma->vm_inode = inode;
293 inode->i_count++;
294 vma->vm_ops = ops;
295 return 0;
296 }