This source file includes following definitions.
- file_mmap_nopage
- file_mmap_sync_page
- file_mmap_sync
- file_mmap_unmap
- file_mmap_close
- file_mmap_swapout
- generic_mmap
1
2
3
4
5
6
7
8
9
10
11
12 #include <linux/stat.h>
13 #include <linux/sched.h>
14 #include <linux/kernel.h>
15 #include <linux/mm.h>
16 #include <linux/shm.h>
17 #include <linux/errno.h>
18 #include <linux/mman.h>
19 #include <linux/string.h>
20 #include <linux/malloc.h>
21
22 #include <asm/segment.h>
23 #include <asm/system.h>
24
25
26
27
28
29
30 static unsigned long file_mmap_nopage(struct vm_area_struct * area, unsigned long address,
31 unsigned long page, int no_share)
32 {
33 struct inode * inode = area->vm_inode;
34 unsigned int block;
35 int nr[8];
36 int i, *p;
37
38 address &= PAGE_MASK;
39 block = address - area->vm_start + area->vm_offset;
40 block >>= inode->i_sb->s_blocksize_bits;
41 i = PAGE_SIZE >> inode->i_sb->s_blocksize_bits;
42 p = nr;
43 do {
44 *p = bmap(inode,block);
45 i--;
46 block++;
47 p++;
48 } while (i > 0);
49 return bread_page(page, inode->i_dev, nr, inode->i_sb->s_blocksize, no_share);
50 }
51
52
53
54
55
56
57
58
59
60
61
62
63 static inline void file_mmap_sync_page(struct vm_area_struct * vma,
64 unsigned long offset,
65 unsigned long page)
66 {
67 struct buffer_head * bh;
68
69 bh = buffer_pages[MAP_NR(page)];
70 if (bh) {
71
72 struct buffer_head * tmp = bh;
73 do {
74 mark_buffer_dirty(tmp, 0);
75 tmp = tmp->b_this_page;
76 } while (tmp != bh);
77 return;
78 }
79
80 printk("msync: %ld: [%08lx]\n", offset, page);
81 printk("Can't handle non-shared page yet\n");
82 return;
83 }
84
85 static void file_mmap_sync(struct vm_area_struct * vma, unsigned long start,
86 size_t size, unsigned int flags)
87 {
88 pgd_t * dir;
89 unsigned long poff, pcnt;
90
91 size = size >> PAGE_SHIFT;
92 dir = PAGE_DIR_OFFSET(current,start);
93 poff = (start >> PAGE_SHIFT) & (PTRS_PER_PAGE-1);
94 start -= vma->vm_start;
95 pcnt = PTRS_PER_PAGE - poff;
96 if (pcnt > size)
97 pcnt = size;
98
99 for ( ; size > 0; ++dir, size -= pcnt, pcnt = (size > PTRS_PER_PAGE ? PTRS_PER_PAGE : size)) {
100 pte_t *page_table;
101 unsigned long pc;
102
103 if (pgd_none(*dir)) {
104 poff = 0;
105 start += pcnt*PAGE_SIZE;
106 continue;
107 }
108 if (pgd_bad(*dir)) {
109 printk("file_mmap_sync: bad page directory entry %08lx.\n", pgd_val(*dir));
110 pgd_clear(dir);
111 poff = 0;
112 start += pcnt*PAGE_SIZE;
113 continue;
114 }
115 page_table = poff + (pte_t *) pgd_page(*dir);
116 poff = 0;
117 for (pc = pcnt; pc--; page_table++, start += PAGE_SIZE) {
118 pte_t pte;
119
120 pte = *page_table;
121 if (!pte_present(pte))
122 continue;
123 if (!pte_dirty(pte))
124 continue;
125 if (flags & MS_INVALIDATE) {
126 pte_clear(page_table);
127 } else {
128 mem_map[MAP_NR(pte_page(pte))]++;
129 *page_table = pte_mkclean(pte);
130 }
131 file_mmap_sync_page(vma, start, pte_page(pte));
132 free_page(pte_page(pte));
133 }
134 }
135 invalidate();
136 return;
137 }
138
139
140
141
142 static void file_mmap_unmap(struct vm_area_struct *vma, unsigned long start, size_t len)
143 {
144 file_mmap_sync(vma, start, len, MS_ASYNC);
145 }
146
147
148
149
150 static void file_mmap_close(struct vm_area_struct * vma)
151 {
152 file_mmap_sync(vma, vma->vm_start, vma->vm_end - vma->vm_start, MS_ASYNC);
153 }
154
155
156
157
158
159
160
161
162 void file_mmap_swapout(struct vm_area_struct * vma,
163 unsigned long offset,
164 pte_t *page_table)
165 {
166 printk("swapout not implemented on shared files..\n");
167 pte_clear(page_table);
168 }
169
170
171
172
173
174
175 static struct vm_operations_struct file_shared_mmap = {
176 NULL,
177 file_mmap_close,
178 file_mmap_unmap,
179 NULL,
180 file_mmap_sync,
181 NULL,
182 file_mmap_nopage,
183 NULL,
184 file_mmap_swapout,
185 NULL,
186 };
187
188
189
190
191
192
193
194 static struct vm_operations_struct file_private_mmap = {
195 NULL,
196 NULL,
197 NULL,
198 NULL,
199 NULL,
200 NULL,
201 file_mmap_nopage,
202 NULL,
203 NULL,
204 NULL,
205 };
206
207
208 int generic_mmap(struct inode * inode, struct file * file, struct vm_area_struct * vma)
209 {
210 struct vm_operations_struct * ops;
211
212 if (vma->vm_offset & (inode->i_sb->s_blocksize - 1))
213 return -EINVAL;
214 if (!inode->i_sb || !S_ISREG(inode->i_mode))
215 return -EACCES;
216 if (!inode->i_op || !inode->i_op->bmap)
217 return -ENOEXEC;
218 ops = &file_private_mmap;
219 if (vma->vm_flags & VM_SHARED) {
220 if (vma->vm_flags & (VM_WRITE | VM_MAYWRITE)) {
221 static int nr = 0;
222 ops = &file_shared_mmap;
223 if (nr++ < 5)
224 printk("%s tried to do a shared writeable mapping\n", current->comm);
225 return -EINVAL;
226 }
227 }
228 if (!IS_RDONLY(inode)) {
229 inode->i_atime = CURRENT_TIME;
230 inode->i_dirt = 1;
231 }
232 vma->vm_inode = inode;
233 inode->i_count++;
234 vma->vm_ops = ops;
235 return 0;
236 }