This source file includes following definitions.
- do_mmap
- sys_mmap
- unmap_fixup
- sys_munmap
- do_munmap
- generic_mmap
- insert_vm_struct
- merge_segments
- anon_map
1
2
3
4
5
6 #include <linux/stat.h>
7 #include <linux/sched.h>
8 #include <linux/kernel.h>
9 #include <linux/mm.h>
10 #include <linux/shm.h>
11 #include <linux/errno.h>
12 #include <linux/mman.h>
13 #include <linux/string.h>
14 #include <linux/malloc.h>
15
16 #include <asm/segment.h>
17 #include <asm/system.h>
18
19 static int anon_map(struct inode *, struct file *, struct vm_area_struct *);
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38 int do_mmap(struct file * file, unsigned long addr, unsigned long len,
39 unsigned long prot, unsigned long flags, unsigned long off)
40 {
41 int mask, error;
42 struct vm_area_struct * vma;
43
44 if ((len = PAGE_ALIGN(len)) == 0)
45 return addr;
46
47 if (addr > TASK_SIZE || len > TASK_SIZE || addr > TASK_SIZE-len)
48 return -EINVAL;
49
50
51 if (off + len < off)
52 return -EINVAL;
53
54
55
56
57
58
59
60 if (file != NULL) {
61 switch (flags & MAP_TYPE) {
62 case MAP_SHARED:
63 if ((prot & PROT_WRITE) && !(file->f_mode & 2))
64 return -EACCES;
65
66 case MAP_PRIVATE:
67 if (!(file->f_mode & 1))
68 return -EACCES;
69 break;
70
71 default:
72 return -EINVAL;
73 }
74 } else if ((flags & MAP_TYPE) == MAP_SHARED)
75 return -EINVAL;
76
77
78
79
80
81
82 if (flags & MAP_FIXED) {
83 if (addr & ~PAGE_MASK)
84 return -EINVAL;
85 if (len > TASK_SIZE || addr > TASK_SIZE - len)
86 return -EINVAL;
87 } else {
88 struct vm_area_struct * vmm;
89
90
91 addr = SHM_RANGE_START;
92 while (addr+len < SHM_RANGE_END) {
93 for (vmm = current->mm->mmap ; vmm ; vmm = vmm->vm_next) {
94 if (addr >= vmm->vm_end)
95 continue;
96 if (addr + len <= vmm->vm_start)
97 continue;
98 addr = PAGE_ALIGN(vmm->vm_end);
99 break;
100 }
101 if (!vmm)
102 break;
103 }
104 if (addr+len >= SHM_RANGE_END)
105 return -ENOMEM;
106 }
107
108
109
110
111
112
113 if (file && (!file->f_op || !file->f_op->mmap))
114 return -ENODEV;
115 mask = PAGE_PRESENT;
116 if (prot & (PROT_READ | PROT_EXEC))
117 mask |= PAGE_READONLY;
118 if (prot & PROT_WRITE)
119 if ((flags & MAP_TYPE) == MAP_PRIVATE)
120 mask |= PAGE_COPY;
121 else
122 mask |= PAGE_SHARED;
123
124 vma = (struct vm_area_struct *)kmalloc(sizeof(struct vm_area_struct),
125 GFP_KERNEL);
126 if (!vma)
127 return -ENOMEM;
128
129 vma->vm_task = current;
130 vma->vm_start = addr;
131 vma->vm_end = addr + len;
132 vma->vm_page_prot = mask;
133 vma->vm_flags = prot & (VM_READ | VM_WRITE | VM_EXEC);
134 vma->vm_flags |= flags & (VM_GROWSDOWN | VM_DENYWRITE | VM_EXECUTABLE);
135
136 if (file) {
137 if (file->f_mode & 1)
138 vma->vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
139 if (flags & MAP_SHARED) {
140 vma->vm_flags |= VM_SHARED | VM_MAYSHARE;
141 if (!(file->f_mode & 2))
142 vma->vm_flags &= ~VM_MAYWRITE;
143 }
144 } else
145 vma->vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
146 vma->vm_ops = NULL;
147 vma->vm_offset = off;
148 vma->vm_inode = NULL;
149 vma->vm_pte = 0;
150
151 do_munmap(addr, len);
152
153 if (file)
154 error = file->f_op->mmap(file->f_inode, file, vma);
155 else
156 error = anon_map(NULL, NULL, vma);
157
158 if (error) {
159 kfree(vma);
160 if (!current->errno)
161 current->errno = -error;
162 return -1;
163 }
164 insert_vm_struct(current, vma);
165 merge_segments(current->mm->mmap);
166 return addr;
167 }
168
169 asmlinkage int sys_mmap(unsigned long *buffer)
170 {
171 int error;
172 unsigned long flags;
173 struct file * file = NULL;
174
175 error = verify_area(VERIFY_READ, buffer, 6*4);
176 if (error)
177 return error;
178 flags = get_fs_long(buffer+3);
179 if (!(flags & MAP_ANONYMOUS)) {
180 unsigned long fd = get_fs_long(buffer+4);
181 if (fd >= NR_OPEN || !(file = current->files->fd[fd]))
182 return -EBADF;
183 }
184 return do_mmap(file, get_fs_long(buffer), get_fs_long(buffer+1),
185 get_fs_long(buffer+2), flags, get_fs_long(buffer+5));
186 }
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210 void unmap_fixup(struct vm_area_struct *area,
211 unsigned long addr, size_t len)
212 {
213 struct vm_area_struct *mpnt;
214 unsigned long end = addr + len;
215
216 if (addr < area->vm_start || addr >= area->vm_end ||
217 end <= area->vm_start || end > area->vm_end ||
218 end < addr)
219 {
220 printk("unmap_fixup: area=%lx-%lx, unmap %lx-%lx!!\n",
221 area->vm_start, area->vm_end, addr, end);
222 return;
223 }
224
225
226 if (addr == area->vm_start && end == area->vm_end) {
227 if (area->vm_ops && area->vm_ops->close)
228 area->vm_ops->close(area);
229 if (area->vm_inode)
230 iput(area->vm_inode);
231 return;
232 }
233
234
235 if (addr >= area->vm_start && end == area->vm_end)
236 area->vm_end = addr;
237 if (addr == area->vm_start && end <= area->vm_end) {
238 area->vm_offset += (end - area->vm_start);
239 area->vm_start = end;
240 }
241
242
243 if (addr > area->vm_start && end < area->vm_end)
244 {
245
246 mpnt = (struct vm_area_struct *)kmalloc(sizeof(*mpnt), GFP_KERNEL);
247
248 *mpnt = *area;
249 mpnt->vm_offset += (end - area->vm_start);
250 mpnt->vm_start = end;
251 if (mpnt->vm_inode)
252 mpnt->vm_inode->i_count++;
253 area->vm_end = addr;
254 insert_vm_struct(current, mpnt);
255 }
256
257
258 mpnt = (struct vm_area_struct *)kmalloc(sizeof(*mpnt), GFP_KERNEL);
259 *mpnt = *area;
260 insert_vm_struct(current, mpnt);
261 }
262
263 asmlinkage int sys_munmap(unsigned long addr, size_t len)
264 {
265 return do_munmap(addr, len);
266 }
267
268
269
270
271
272
273
274 int do_munmap(unsigned long addr, size_t len)
275 {
276 struct vm_area_struct *mpnt, **npp, *free;
277
278 if ((addr & ~PAGE_MASK) || addr > TASK_SIZE || len > TASK_SIZE-addr)
279 return -EINVAL;
280
281 if ((len = PAGE_ALIGN(len)) == 0)
282 return 0;
283
284
285
286
287
288
289
290 npp = ¤t->mm->mmap;
291 free = NULL;
292 for (mpnt = *npp; mpnt != NULL; mpnt = *npp) {
293 unsigned long end = addr+len;
294
295 if ((addr < mpnt->vm_start && end <= mpnt->vm_start) ||
296 (addr >= mpnt->vm_end && end > mpnt->vm_end))
297 {
298 npp = &mpnt->vm_next;
299 continue;
300 }
301
302 *npp = mpnt->vm_next;
303 mpnt->vm_next = free;
304 free = mpnt;
305 }
306
307 if (free == NULL)
308 return 0;
309
310
311
312
313
314
315
316 while (free) {
317 unsigned long st, end;
318
319 mpnt = free;
320 free = free->vm_next;
321
322 st = addr < mpnt->vm_start ? mpnt->vm_start : addr;
323 end = addr+len;
324 end = end > mpnt->vm_end ? mpnt->vm_end : end;
325
326 if (mpnt->vm_ops && mpnt->vm_ops->unmap)
327 mpnt->vm_ops->unmap(mpnt, st, end-st);
328 else
329 unmap_fixup(mpnt, st, end-st);
330
331 kfree(mpnt);
332 }
333
334 unmap_page_range(addr, len);
335 return 0;
336 }
337
338
339 int generic_mmap(struct inode * inode, struct file * file, struct vm_area_struct * vma)
340 {
341 extern struct vm_operations_struct file_mmap;
342
343 if (vma->vm_page_prot & PAGE_RW)
344 return -EINVAL;
345 if (vma->vm_offset & (inode->i_sb->s_blocksize - 1))
346 return -EINVAL;
347 if (!inode->i_sb || !S_ISREG(inode->i_mode))
348 return -EACCES;
349 if (!inode->i_op || !inode->i_op->bmap)
350 return -ENOEXEC;
351 if (!IS_RDONLY(inode)) {
352 inode->i_atime = CURRENT_TIME;
353 inode->i_dirt = 1;
354 }
355 vma->vm_inode = inode;
356 inode->i_count++;
357 vma->vm_ops = &file_mmap;
358 return 0;
359 }
360
361
362
363
364 void insert_vm_struct(struct task_struct *t, struct vm_area_struct *vmp)
365 {
366 struct vm_area_struct **p, *mpnt;
367
368 p = &t->mm->mmap;
369 while ((mpnt = *p) != NULL) {
370 if (mpnt->vm_start > vmp->vm_start)
371 break;
372 if (mpnt->vm_end > vmp->vm_start)
373 printk("insert_vm_struct: overlapping memory areas\n");
374 p = &mpnt->vm_next;
375 }
376 vmp->vm_next = mpnt;
377 *p = vmp;
378 }
379
380
381
382
383
384
385 void merge_segments(struct vm_area_struct *mpnt)
386 {
387 struct vm_area_struct *prev, *next;
388
389 if (mpnt == NULL)
390 return;
391
392 for(prev = mpnt, mpnt = mpnt->vm_next;
393 mpnt != NULL;
394 prev = mpnt, mpnt = next)
395 {
396 next = mpnt->vm_next;
397
398
399
400
401 if (mpnt->vm_inode != prev->vm_inode)
402 continue;
403 if (mpnt->vm_pte != prev->vm_pte)
404 continue;
405 if (mpnt->vm_ops != prev->vm_ops)
406 continue;
407 if (mpnt->vm_page_prot != prev->vm_page_prot ||
408 mpnt->vm_flags != prev->vm_flags)
409 continue;
410 if (prev->vm_end != mpnt->vm_start)
411 continue;
412
413
414
415 if (mpnt->vm_inode != NULL) {
416 if (prev->vm_offset + prev->vm_end - prev->vm_start != mpnt->vm_offset)
417 continue;
418 }
419
420
421
422
423
424
425 prev->vm_end = mpnt->vm_end;
426 prev->vm_next = mpnt->vm_next;
427 kfree_s(mpnt, sizeof(*mpnt));
428 mpnt = prev;
429 }
430 }
431
432
433
434
435
436 static int anon_map(struct inode *ino, struct file * file, struct vm_area_struct * vma)
437 {
438 if (zeromap_page_range(vma->vm_start, vma->vm_end - vma->vm_start, vma->vm_page_prot))
439 return -ENOMEM;
440 return 0;
441 }