This source file includes following definitions.
- do_mmap
- sys_mmap
- unmap_fixup
- sys_munmap
- do_munmap
- generic_mmap
- insert_vm_struct
- merge_segments
- anon_map
1
2
3
4
5
6 #include <linux/stat.h>
7 #include <linux/sched.h>
8 #include <linux/kernel.h>
9 #include <linux/mm.h>
10 #include <linux/shm.h>
11 #include <linux/errno.h>
12 #include <linux/mman.h>
13 #include <linux/string.h>
14 #include <linux/malloc.h>
15
16 #include <asm/segment.h>
17 #include <asm/system.h>
18
19 static int anon_map(struct inode *, struct file *, struct vm_area_struct *);
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38 int do_mmap(struct file * file, unsigned long addr, unsigned long len,
39 unsigned long prot, unsigned long flags, unsigned long off)
40 {
41 int mask, error;
42 struct vm_area_struct * vma;
43
44 if ((len = PAGE_ALIGN(len)) == 0)
45 return addr;
46
47 if (addr > TASK_SIZE || len > TASK_SIZE || addr > TASK_SIZE-len)
48 return -EINVAL;
49
50
51 if (off + len < off)
52 return -EINVAL;
53
54
55
56
57
58
59
60 if (file != NULL) {
61 switch (flags & MAP_TYPE) {
62 case MAP_SHARED:
63 if ((prot & PROT_WRITE) && !(file->f_mode & 2))
64 return -EACCES;
65
66 case MAP_PRIVATE:
67 if (!(file->f_mode & 1))
68 return -EACCES;
69 break;
70
71 default:
72 return -EINVAL;
73 }
74 } else if ((flags & MAP_TYPE) == MAP_SHARED)
75 return -EINVAL;
76
77
78
79
80
81
82 if (flags & MAP_FIXED) {
83 if (addr & ~PAGE_MASK)
84 return -EINVAL;
85 if (len > TASK_SIZE || addr > TASK_SIZE - len)
86 return -EINVAL;
87 } else {
88 struct vm_area_struct * vmm;
89
90
91 addr = SHM_RANGE_START;
92 while (addr+len < SHM_RANGE_END) {
93 for (vmm = current->mm->mmap ; vmm ; vmm = vmm->vm_next) {
94 if (addr >= vmm->vm_end)
95 continue;
96 if (addr + len <= vmm->vm_start)
97 continue;
98 addr = PAGE_ALIGN(vmm->vm_end);
99 break;
100 }
101 if (!vmm)
102 break;
103 }
104 if (addr+len >= SHM_RANGE_END)
105 return -ENOMEM;
106 }
107
108
109
110
111
112
113 if (file && (!file->f_op || !file->f_op->mmap))
114 return -ENODEV;
115 mask = PAGE_PRESENT;
116 if (prot & (PROT_READ | PROT_EXEC))
117 mask |= PAGE_READONLY;
118 if (prot & PROT_WRITE)
119 if ((flags & MAP_TYPE) == MAP_PRIVATE)
120 mask |= PAGE_COPY;
121 else
122 mask |= PAGE_SHARED;
123
124 vma = (struct vm_area_struct *)kmalloc(sizeof(struct vm_area_struct),
125 GFP_KERNEL);
126 if (!vma)
127 return -ENOMEM;
128
129 vma->vm_task = current;
130 vma->vm_start = addr;
131 vma->vm_end = addr + len;
132 vma->vm_page_prot = mask;
133 vma->vm_flags = prot & (VM_READ | VM_WRITE | VM_EXEC);
134 vma->vm_flags |= flags & (VM_GROWSDOWN | VM_DENYWRITE | VM_EXECUTABLE);
135
136 if (file) {
137 if (file->f_mode & 1)
138 vma->vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
139 if (flags & MAP_SHARED) {
140 vma->vm_flags |= VM_SHARED | VM_MAYSHARE;
141 if (!(file->f_mode & 2))
142 vma->vm_flags &= ~VM_MAYWRITE;
143 }
144 } else
145 vma->vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
146 vma->vm_ops = NULL;
147 vma->vm_offset = off;
148 vma->vm_inode = NULL;
149 vma->vm_pte = 0;
150
151 do_munmap(addr, len);
152
153 if (file)
154 error = file->f_op->mmap(file->f_inode, file, vma);
155 else
156 error = anon_map(NULL, NULL, vma);
157
158 if (error) {
159 kfree(vma);
160 if (!current->errno)
161 current->errno = -error;
162 return -1;
163 }
164 insert_vm_struct(current, vma);
165 merge_segments(current->mm->mmap);
166 return addr;
167 }
168
169 asmlinkage int sys_mmap(unsigned long *buffer)
170 {
171 int error;
172 unsigned long flags;
173 struct file * file = NULL;
174
175 error = verify_area(VERIFY_READ, buffer, 6*4);
176 if (error)
177 return error;
178 flags = get_fs_long(buffer+3);
179 if (!(flags & MAP_ANONYMOUS)) {
180 unsigned long fd = get_fs_long(buffer+4);
181 if (fd >= NR_OPEN || !(file = current->files->fd[fd]))
182 return -EBADF;
183 }
184 return do_mmap(file, get_fs_long(buffer), get_fs_long(buffer+1),
185 get_fs_long(buffer+2), flags, get_fs_long(buffer+5));
186 }
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210 void unmap_fixup(struct vm_area_struct *area,
211 unsigned long addr, size_t len)
212 {
213 struct vm_area_struct *mpnt;
214 unsigned long end = addr + len;
215
216 if (addr < area->vm_start || addr >= area->vm_end ||
217 end <= area->vm_start || end > area->vm_end ||
218 end < addr)
219 {
220 printk("unmap_fixup: area=%lx-%lx, unmap %lx-%lx!!\n",
221 area->vm_start, area->vm_end, addr, end);
222 return;
223 }
224
225
226 if (addr == area->vm_start && end == area->vm_end) {
227 if (area->vm_ops && area->vm_ops->close)
228 area->vm_ops->close(area);
229 if (area->vm_inode)
230 iput(area->vm_inode);
231 return;
232 }
233
234
235 if (addr >= area->vm_start && end == area->vm_end)
236 area->vm_end = addr;
237 if (addr == area->vm_start && end <= area->vm_end) {
238 area->vm_offset += (end - area->vm_start);
239 area->vm_start = end;
240 }
241
242
243 if (addr > area->vm_start && end < area->vm_end)
244 {
245
246 mpnt = (struct vm_area_struct *)kmalloc(sizeof(*mpnt), GFP_KERNEL);
247
248 if (!mpnt)
249 return;
250 *mpnt = *area;
251 mpnt->vm_offset += (end - area->vm_start);
252 mpnt->vm_start = end;
253 if (mpnt->vm_inode)
254 mpnt->vm_inode->i_count++;
255 area->vm_end = addr;
256 insert_vm_struct(current, mpnt);
257 }
258
259
260 mpnt = (struct vm_area_struct *)kmalloc(sizeof(*mpnt), GFP_KERNEL);
261 if (!mpnt)
262 return;
263 *mpnt = *area;
264 insert_vm_struct(current, mpnt);
265 }
266
267 asmlinkage int sys_munmap(unsigned long addr, size_t len)
268 {
269 return do_munmap(addr, len);
270 }
271
272
273
274
275
276
277
278 int do_munmap(unsigned long addr, size_t len)
279 {
280 struct vm_area_struct *mpnt, **npp, *free;
281
282 if ((addr & ~PAGE_MASK) || addr > TASK_SIZE || len > TASK_SIZE-addr)
283 return -EINVAL;
284
285 if ((len = PAGE_ALIGN(len)) == 0)
286 return 0;
287
288
289
290
291
292
293
294 npp = ¤t->mm->mmap;
295 free = NULL;
296 for (mpnt = *npp; mpnt != NULL; mpnt = *npp) {
297 unsigned long end = addr+len;
298
299 if ((addr < mpnt->vm_start && end <= mpnt->vm_start) ||
300 (addr >= mpnt->vm_end && end > mpnt->vm_end))
301 {
302 npp = &mpnt->vm_next;
303 continue;
304 }
305
306 *npp = mpnt->vm_next;
307 mpnt->vm_next = free;
308 free = mpnt;
309 }
310
311 if (free == NULL)
312 return 0;
313
314
315
316
317
318
319
320 while (free) {
321 unsigned long st, end;
322
323 mpnt = free;
324 free = free->vm_next;
325
326 st = addr < mpnt->vm_start ? mpnt->vm_start : addr;
327 end = addr+len;
328 end = end > mpnt->vm_end ? mpnt->vm_end : end;
329
330 if (mpnt->vm_ops && mpnt->vm_ops->unmap)
331 mpnt->vm_ops->unmap(mpnt, st, end-st);
332 else
333 unmap_fixup(mpnt, st, end-st);
334
335 kfree(mpnt);
336 }
337
338 unmap_page_range(addr, len);
339 return 0;
340 }
341
342
343 int generic_mmap(struct inode * inode, struct file * file, struct vm_area_struct * vma)
344 {
345 extern struct vm_operations_struct file_mmap;
346
347 if (vma->vm_page_prot & PAGE_RW)
348 return -EINVAL;
349 if (vma->vm_offset & (inode->i_sb->s_blocksize - 1))
350 return -EINVAL;
351 if (!inode->i_sb || !S_ISREG(inode->i_mode))
352 return -EACCES;
353 if (!inode->i_op || !inode->i_op->bmap)
354 return -ENOEXEC;
355 if (!IS_RDONLY(inode)) {
356 inode->i_atime = CURRENT_TIME;
357 inode->i_dirt = 1;
358 }
359 vma->vm_inode = inode;
360 inode->i_count++;
361 vma->vm_ops = &file_mmap;
362 return 0;
363 }
364
365
366
367
368 void insert_vm_struct(struct task_struct *t, struct vm_area_struct *vmp)
369 {
370 struct vm_area_struct **p, *mpnt;
371
372 p = &t->mm->mmap;
373 while ((mpnt = *p) != NULL) {
374 if (mpnt->vm_start > vmp->vm_start)
375 break;
376 if (mpnt->vm_end > vmp->vm_start)
377 printk("insert_vm_struct: overlapping memory areas\n");
378 p = &mpnt->vm_next;
379 }
380 vmp->vm_next = mpnt;
381 *p = vmp;
382 }
383
384
385
386
387
388
389 void merge_segments(struct vm_area_struct *mpnt)
390 {
391 struct vm_area_struct *prev, *next;
392
393 if (mpnt == NULL)
394 return;
395
396 for(prev = mpnt, mpnt = mpnt->vm_next;
397 mpnt != NULL;
398 prev = mpnt, mpnt = next)
399 {
400 next = mpnt->vm_next;
401
402
403
404
405 if (mpnt->vm_inode != prev->vm_inode)
406 continue;
407 if (mpnt->vm_pte != prev->vm_pte)
408 continue;
409 if (mpnt->vm_ops != prev->vm_ops)
410 continue;
411 if (mpnt->vm_page_prot != prev->vm_page_prot ||
412 mpnt->vm_flags != prev->vm_flags)
413 continue;
414 if (prev->vm_end != mpnt->vm_start)
415 continue;
416
417
418
419 if (mpnt->vm_inode != NULL) {
420 if (prev->vm_offset + prev->vm_end - prev->vm_start != mpnt->vm_offset)
421 continue;
422 }
423
424
425
426
427
428
429 prev->vm_end = mpnt->vm_end;
430 prev->vm_next = mpnt->vm_next;
431 if (mpnt->vm_ops && mpnt->vm_ops->close) {
432 mpnt->vm_offset += mpnt->vm_end - mpnt->vm_start;
433 mpnt->vm_start = mpnt->vm_end;
434 mpnt->vm_ops->close(mpnt);
435 }
436 if (mpnt->vm_inode)
437 mpnt->vm_inode->i_count--;
438 kfree_s(mpnt, sizeof(*mpnt));
439 mpnt = prev;
440 }
441 }
442
443
444
445
446
447 static int anon_map(struct inode *ino, struct file * file, struct vm_area_struct * vma)
448 {
449 if (zeromap_page_range(vma->vm_start, vma->vm_end - vma->vm_start, vma->vm_page_prot))
450 return -ENOMEM;
451 return 0;
452 }