This source file includes following definitions.
- filemap_nopage
- filemap_sync_page
- filemap_sync_pte
- filemap_sync_pte_range
- filemap_sync_pmd_range
- filemap_sync
- filemap_unmap
- filemap_close
- filemap_swapout
- generic_mmap
1 #define THREE_LEVEL
2
3
4
5
6
7
8
9
10
11
12
13 #include <linux/stat.h>
14 #include <linux/sched.h>
15 #include <linux/kernel.h>
16 #include <linux/mm.h>
17 #include <linux/shm.h>
18 #include <linux/errno.h>
19 #include <linux/mman.h>
20 #include <linux/string.h>
21 #include <linux/malloc.h>
22
23 #include <asm/segment.h>
24 #include <asm/system.h>
25 #include <asm/pgtable.h>
26
27
28
29
30
31
32 static unsigned long filemap_nopage(struct vm_area_struct * area, unsigned long address,
33 unsigned long page, int no_share)
34 {
35 struct inode * inode = area->vm_inode;
36 unsigned int block;
37 int nr[8];
38 int i, *p;
39
40 address &= PAGE_MASK;
41 block = address - area->vm_start + area->vm_offset;
42 block >>= inode->i_sb->s_blocksize_bits;
43 i = PAGE_SIZE >> inode->i_sb->s_blocksize_bits;
44 p = nr;
45 do {
46 *p = bmap(inode,block);
47 i--;
48 block++;
49 p++;
50 } while (i > 0);
51 return bread_page(page, inode->i_dev, nr, inode->i_sb->s_blocksize, no_share);
52 }
53
54
55
56
57
58
59
60
61
62
63
64
65 static void filemap_sync_page(struct vm_area_struct * vma,
66 unsigned long offset,
67 unsigned long page)
68 {
69 struct buffer_head * bh;
70
71 printk("msync: %ld: [%08lx]\n", offset, page);
72 bh = buffer_pages[MAP_NR(page)];
73 if (bh) {
74
75 struct buffer_head * tmp = bh;
76 do {
77 mark_buffer_dirty(tmp, 0);
78 tmp = tmp->b_this_page;
79 } while (tmp != bh);
80 return;
81 }
82
83 printk("Can't handle non-shared page yet\n");
84 return;
85 }
86
87 static inline void filemap_sync_pte(pte_t * pte, struct vm_area_struct *vma,
88 unsigned long address, unsigned int flags)
89 {
90 pte_t page = *pte;
91
92 if (!pte_present(page))
93 return;
94 if (!pte_dirty(page))
95 return;
96 if (flags & MS_INVALIDATE) {
97 pte_clear(pte);
98 } else {
99 mem_map[MAP_NR(pte_page(page))]++;
100 *pte = pte_mkclean(page);
101 }
102 filemap_sync_page(vma, address - vma->vm_start, pte_page(page));
103 free_page(pte_page(page));
104 }
105
106 static inline void filemap_sync_pte_range(pmd_t * pmd,
107 unsigned long address, unsigned long size,
108 struct vm_area_struct *vma, unsigned long offset, unsigned int flags)
109 {
110 pte_t * pte;
111 unsigned long end;
112
113 if (pmd_none(*pmd))
114 return;
115 if (pmd_bad(*pmd)) {
116 printk("filemap_sync_pte_range: bad pmd (%08lx)\n", pmd_val(*pmd));
117 pmd_clear(pmd);
118 return;
119 }
120 pte = pte_offset(pmd, address);
121 offset += address & PMD_MASK;
122 address &= ~PMD_MASK;
123 end = address + size;
124 if (end > PMD_SIZE)
125 end = PMD_SIZE;
126 do {
127 filemap_sync_pte(pte, vma, address + offset, flags);
128 address += PAGE_SIZE;
129 pte++;
130 } while (address < end);
131 }
132
133 static inline void filemap_sync_pmd_range(pgd_t * pgd,
134 unsigned long address, unsigned long size,
135 struct vm_area_struct *vma, unsigned int flags)
136 {
137 pmd_t * pmd;
138 unsigned long offset, end;
139
140 if (pgd_none(*pgd))
141 return;
142 if (pgd_bad(*pgd)) {
143 printk("filemap_sync_pmd_range: bad pgd (%08lx)\n", pgd_val(*pgd));
144 pgd_clear(pgd);
145 return;
146 }
147 pmd = pmd_offset(pgd, address);
148 offset = address & PMD_MASK;
149 address &= ~PMD_MASK;
150 end = address + size;
151 if (end > PGDIR_SIZE)
152 end = PGDIR_SIZE;
153 do {
154 filemap_sync_pte_range(pmd, address, end - address, vma, offset, flags);
155 address = (address + PMD_SIZE) & PMD_MASK;
156 pmd++;
157 } while (address < end);
158 }
159
160 static void filemap_sync(struct vm_area_struct * vma, unsigned long address,
161 size_t size, unsigned int flags)
162 {
163 pgd_t * dir;
164 unsigned long end = address + size;
165
166 dir = pgd_offset(current, address);
167 while (address < end) {
168 filemap_sync_pmd_range(dir, address, end - address, vma, flags);
169 address = (address + PGDIR_SIZE) & PGDIR_MASK;
170 dir++;
171 }
172 invalidate();
173 return;
174 }
175
176
177
178
179 static void filemap_unmap(struct vm_area_struct *vma, unsigned long start, size_t len)
180 {
181 filemap_sync(vma, start, len, MS_ASYNC);
182 }
183
184
185
186
187 static void filemap_close(struct vm_area_struct * vma)
188 {
189 filemap_sync(vma, vma->vm_start, vma->vm_end - vma->vm_start, MS_ASYNC);
190 }
191
192
193
194
195
196
197
198
199 void filemap_swapout(struct vm_area_struct * vma,
200 unsigned long offset,
201 pte_t *page_table)
202 {
203 printk("swapout not implemented on shared files..\n");
204 pte_clear(page_table);
205 }
206
207
208
209
210
211
212 static struct vm_operations_struct file_shared_mmap = {
213 NULL,
214 filemap_close,
215 filemap_unmap,
216 NULL,
217 filemap_sync,
218 NULL,
219 filemap_nopage,
220 NULL,
221 filemap_swapout,
222 NULL,
223 };
224
225
226
227
228
229
230
231 static struct vm_operations_struct file_private_mmap = {
232 NULL,
233 NULL,
234 NULL,
235 NULL,
236 NULL,
237 NULL,
238 filemap_nopage,
239 NULL,
240 NULL,
241 NULL,
242 };
243
244
245 int generic_mmap(struct inode * inode, struct file * file, struct vm_area_struct * vma)
246 {
247 struct vm_operations_struct * ops;
248
249 if (vma->vm_offset & (inode->i_sb->s_blocksize - 1))
250 return -EINVAL;
251 if (!inode->i_sb || !S_ISREG(inode->i_mode))
252 return -EACCES;
253 if (!inode->i_op || !inode->i_op->bmap)
254 return -ENOEXEC;
255 ops = &file_private_mmap;
256 if (vma->vm_flags & VM_SHARED) {
257 if (vma->vm_flags & (VM_WRITE | VM_MAYWRITE)) {
258 static int nr = 0;
259 ops = &file_shared_mmap;
260 #ifndef SHARED_MMAP_REALLY_WORKS
261 if (nr++ < 5)
262 printk("%s tried to do a shared writeable mapping\n", current->comm);
263 return -EINVAL;
264 #endif
265 }
266 }
267 if (!IS_RDONLY(inode)) {
268 inode->i_atime = CURRENT_TIME;
269 inode->i_dirt = 1;
270 }
271 vma->vm_inode = inode;
272 inode->i_count++;
273 vma->vm_ops = ops;
274 return 0;
275 }