This source file includes following definitions.
- do_mmap
- sys_mmap
- unmap_fixup
- sys_munmap
- do_munmap
- generic_mmap
- insert_vm_struct
- merge_segments
- anon_map
1
2
3
4
5
6 #include <linux/stat.h>
7 #include <linux/sched.h>
8 #include <linux/kernel.h>
9 #include <linux/mm.h>
10 #include <linux/shm.h>
11 #include <linux/errno.h>
12 #include <linux/mman.h>
13 #include <linux/string.h>
14 #include <linux/malloc.h>
15
16 #include <asm/segment.h>
17 #include <asm/system.h>
18
19 static int anon_map(struct inode *, struct file *, struct vm_area_struct *);
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38 int do_mmap(struct file * file, unsigned long addr, unsigned long len,
39 unsigned long prot, unsigned long flags, unsigned long off)
40 {
41 int mask, error;
42 struct vm_area_struct * vma;
43
44 if ((len = PAGE_ALIGN(len)) == 0)
45 return addr;
46
47 if (addr > TASK_SIZE || len > TASK_SIZE || addr > TASK_SIZE-len)
48 return -EINVAL;
49
50
51 if (off + len < off)
52 return -EINVAL;
53
54
55
56
57
58
59
60 if (file != NULL) {
61 switch (flags & MAP_TYPE) {
62 case MAP_SHARED:
63 if ((prot & PROT_WRITE) && !(file->f_mode & 2))
64 return -EACCES;
65
66 case MAP_PRIVATE:
67 if (!(file->f_mode & 1))
68 return -EACCES;
69 break;
70
71 default:
72 return -EINVAL;
73 }
74 } else if ((flags & MAP_TYPE) == MAP_SHARED)
75 return -EINVAL;
76
77
78
79
80
81
82 if (flags & MAP_FIXED) {
83 if (addr & ~PAGE_MASK)
84 return -EINVAL;
85 if (len > TASK_SIZE || addr > TASK_SIZE - len)
86 return -EINVAL;
87 } else {
88 struct vm_area_struct * vmm;
89
90
91 addr = SHM_RANGE_START;
92 while (addr+len < SHM_RANGE_END) {
93 for (vmm = current->mm->mmap ; vmm ; vmm = vmm->vm_next) {
94 if (addr >= vmm->vm_end)
95 continue;
96 if (addr + len <= vmm->vm_start)
97 continue;
98 addr = PAGE_ALIGN(vmm->vm_end);
99 break;
100 }
101 if (!vmm)
102 break;
103 }
104 if (addr+len >= SHM_RANGE_END)
105 return -ENOMEM;
106 }
107
108
109
110
111
112
113 if (file && (!file->f_op || !file->f_op->mmap))
114 return -ENODEV;
115 mask = PAGE_PRESENT;
116 if (prot & (PROT_READ | PROT_EXEC))
117 mask |= PAGE_READONLY;
118 if (prot & PROT_WRITE)
119 if ((flags & MAP_TYPE) == MAP_PRIVATE)
120 mask |= PAGE_COPY;
121 else
122 mask |= PAGE_SHARED;
123
124 vma = kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
125 if (!vma)
126 return -ENOMEM;
127
128 vma->vm_task = current;
129 vma->vm_start = addr;
130 vma->vm_end = addr + len;
131 vma->vm_page_prot = mask;
132 vma->vm_flags = prot & (VM_READ | VM_WRITE | VM_EXEC);
133 vma->vm_flags |= flags & (VM_GROWSDOWN | VM_DENYWRITE | VM_EXECUTABLE);
134
135 if (file) {
136 if (file->f_mode & 1)
137 vma->vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
138 if (flags & MAP_SHARED) {
139 vma->vm_flags |= VM_SHARED | VM_MAYSHARE;
140 if (!(file->f_mode & 2))
141 vma->vm_flags &= ~VM_MAYWRITE;
142 }
143 } else
144 vma->vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
145 vma->vm_ops = NULL;
146 vma->vm_offset = off;
147 vma->vm_inode = NULL;
148 vma->vm_pte = 0;
149
150 do_munmap(addr, len);
151
152 if (file)
153 error = file->f_op->mmap(file->f_inode, file, vma);
154 else
155 error = anon_map(NULL, NULL, vma);
156
157 if (error) {
158 kfree(vma);
159 if (!current->errno)
160 current->errno = -error;
161 return -1;
162 }
163 insert_vm_struct(current, vma);
164 merge_segments(current->mm->mmap);
165 return addr;
166 }
167
168 asmlinkage int sys_mmap(unsigned long *buffer)
169 {
170 int error;
171 unsigned long flags;
172 struct file * file = NULL;
173
174 error = verify_area(VERIFY_READ, buffer, 6*4);
175 if (error)
176 return error;
177 flags = get_fs_long(buffer+3);
178 if (!(flags & MAP_ANONYMOUS)) {
179 unsigned long fd = get_fs_long(buffer+4);
180 if (fd >= NR_OPEN || !(file = current->files->fd[fd]))
181 return -EBADF;
182 }
183 return do_mmap(file, get_fs_long(buffer), get_fs_long(buffer+1),
184 get_fs_long(buffer+2), flags, get_fs_long(buffer+5));
185 }
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209 void unmap_fixup(struct vm_area_struct *area,
210 unsigned long addr, size_t len)
211 {
212 struct vm_area_struct *mpnt;
213 unsigned long end = addr + len;
214
215 if (addr < area->vm_start || addr >= area->vm_end ||
216 end <= area->vm_start || end > area->vm_end ||
217 end < addr)
218 {
219 printk("unmap_fixup: area=%lx-%lx, unmap %lx-%lx!!\n",
220 area->vm_start, area->vm_end, addr, end);
221 return;
222 }
223
224
225 if (addr == area->vm_start && end == area->vm_end) {
226 if (area->vm_ops && area->vm_ops->close)
227 area->vm_ops->close(area);
228 if (area->vm_inode)
229 iput(area->vm_inode);
230 return;
231 }
232
233
234 if (addr >= area->vm_start && end == area->vm_end)
235 area->vm_end = addr;
236 if (addr == area->vm_start && end <= area->vm_end) {
237 area->vm_offset += (end - area->vm_start);
238 area->vm_start = end;
239 }
240
241
242 if (addr > area->vm_start && end < area->vm_end)
243 {
244
245 mpnt = (struct vm_area_struct *)kmalloc(sizeof(*mpnt), GFP_KERNEL);
246
247 *mpnt = *area;
248 mpnt->vm_offset += (end - area->vm_start);
249 mpnt->vm_start = end;
250 if (mpnt->vm_inode)
251 mpnt->vm_inode->i_count++;
252 area->vm_end = addr;
253 insert_vm_struct(current, mpnt);
254 }
255
256
257 mpnt = (struct vm_area_struct *)kmalloc(sizeof(*mpnt), GFP_KERNEL);
258 *mpnt = *area;
259 insert_vm_struct(current, mpnt);
260 }
261
262 asmlinkage int sys_munmap(unsigned long addr, size_t len)
263 {
264 return do_munmap(addr, len);
265 }
266
267
268
269
270
271
272
273 int do_munmap(unsigned long addr, size_t len)
274 {
275 struct vm_area_struct *mpnt, **npp, *free;
276
277 if ((addr & ~PAGE_MASK) || addr > TASK_SIZE || len > TASK_SIZE-addr)
278 return -EINVAL;
279
280 if ((len = PAGE_ALIGN(len)) == 0)
281 return 0;
282
283
284
285
286
287
288
289 npp = ¤t->mm->mmap;
290 free = NULL;
291 for (mpnt = *npp; mpnt != NULL; mpnt = *npp) {
292 unsigned long end = addr+len;
293
294 if ((addr < mpnt->vm_start && end <= mpnt->vm_start) ||
295 (addr >= mpnt->vm_end && end > mpnt->vm_end))
296 {
297 npp = &mpnt->vm_next;
298 continue;
299 }
300
301 *npp = mpnt->vm_next;
302 mpnt->vm_next = free;
303 free = mpnt;
304 }
305
306 if (free == NULL)
307 return 0;
308
309
310
311
312
313
314
315 while (free) {
316 unsigned long st, end;
317
318 mpnt = free;
319 free = free->vm_next;
320
321 st = addr < mpnt->vm_start ? mpnt->vm_start : addr;
322 end = addr+len;
323 end = end > mpnt->vm_end ? mpnt->vm_end : end;
324
325 if (mpnt->vm_ops && mpnt->vm_ops->unmap)
326 mpnt->vm_ops->unmap(mpnt, st, end-st);
327 else
328 unmap_fixup(mpnt, st, end-st);
329
330 kfree(mpnt);
331 }
332
333 unmap_page_range(addr, len);
334 return 0;
335 }
336
337
338 int generic_mmap(struct inode * inode, struct file * file, struct vm_area_struct * vma)
339 {
340 extern struct vm_operations_struct file_mmap;
341
342 if (vma->vm_page_prot & PAGE_RW)
343 return -EINVAL;
344 if (vma->vm_offset & (inode->i_sb->s_blocksize - 1))
345 return -EINVAL;
346 if (!inode->i_sb || !S_ISREG(inode->i_mode))
347 return -EACCES;
348 if (!inode->i_op || !inode->i_op->bmap)
349 return -ENOEXEC;
350 if (!IS_RDONLY(inode)) {
351 inode->i_atime = CURRENT_TIME;
352 inode->i_dirt = 1;
353 }
354 vma->vm_inode = inode;
355 inode->i_count++;
356 vma->vm_ops = &file_mmap;
357 return 0;
358 }
359
360
361
362
363 void insert_vm_struct(struct task_struct *t, struct vm_area_struct *vmp)
364 {
365 struct vm_area_struct **p, *mpnt;
366
367 p = &t->mm->mmap;
368 while ((mpnt = *p) != NULL) {
369 if (mpnt->vm_start > vmp->vm_start)
370 break;
371 if (mpnt->vm_end > vmp->vm_start)
372 printk("insert_vm_struct: overlapping memory areas\n");
373 p = &mpnt->vm_next;
374 }
375 vmp->vm_next = mpnt;
376 *p = vmp;
377 }
378
379
380
381
382
383
384 void merge_segments(struct vm_area_struct *mpnt)
385 {
386 struct vm_area_struct *prev, *next;
387
388 if (mpnt == NULL)
389 return;
390
391 for(prev = mpnt, mpnt = mpnt->vm_next;
392 mpnt != NULL;
393 prev = mpnt, mpnt = next)
394 {
395 next = mpnt->vm_next;
396
397
398
399
400 if (mpnt->vm_inode != prev->vm_inode)
401 continue;
402 if (mpnt->vm_pte != prev->vm_pte)
403 continue;
404 if (mpnt->vm_ops != prev->vm_ops)
405 continue;
406 if (mpnt->vm_page_prot != prev->vm_page_prot ||
407 mpnt->vm_flags != prev->vm_flags)
408 continue;
409 if (prev->vm_end != mpnt->vm_start)
410 continue;
411
412
413
414 if (mpnt->vm_inode != NULL) {
415 if (prev->vm_offset + prev->vm_end - prev->vm_start != mpnt->vm_offset)
416 continue;
417 }
418
419
420
421
422
423
424 prev->vm_end = mpnt->vm_end;
425 prev->vm_next = mpnt->vm_next;
426 kfree_s(mpnt, sizeof(*mpnt));
427 mpnt = prev;
428 }
429 }
430
431
432
433
434
435 static int anon_map(struct inode *ino, struct file * file, struct vm_area_struct * vma)
436 {
437 if (zeromap_page_range(vma->vm_start, vma->vm_end - vma->vm_start, vma->vm_page_prot))
438 return -ENOMEM;
439 return 0;
440 }