This source file includes following definitions.
- do_mmap
- sys_mmap
- unmap_fixup
- sys_mprotect
- sys_munmap
- do_munmap
- generic_mmap
- insert_vm_struct
- merge_segments
- anon_map
- ignoff_mergep
1
2
3
4
5
6 #include <linux/stat.h>
7 #include <linux/sched.h>
8 #include <linux/kernel.h>
9 #include <linux/mm.h>
10 #include <linux/shm.h>
11 #include <linux/errno.h>
12 #include <linux/mman.h>
13 #include <linux/string.h>
14 #include <linux/malloc.h>
15
16 #include <asm/segment.h>
17 #include <asm/system.h>
18
19 static int anon_map(struct inode *, struct file *,
20 unsigned long, size_t, int,
21 unsigned long);
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39 #define CODE_SPACE(addr) \
40 (PAGE_ALIGN(addr) < current->start_code + current->end_code)
41
42 int do_mmap(struct file * file, unsigned long addr, unsigned long len,
43 unsigned long prot, unsigned long flags, unsigned long off)
44 {
45 int mask, error;
46
47 if ((len = PAGE_ALIGN(len)) == 0)
48 return addr;
49
50 if (addr > TASK_SIZE || len > TASK_SIZE || addr > TASK_SIZE-len)
51 return -EINVAL;
52
53
54
55
56
57
58
59 if (file != NULL)
60 switch (flags & MAP_TYPE) {
61 case MAP_SHARED:
62 if ((prot & PROT_WRITE) && !(file->f_mode & 2))
63 return -EACCES;
64
65 case MAP_PRIVATE:
66 if (!(file->f_mode & 1))
67 return -EACCES;
68 break;
69
70 default:
71 return -EINVAL;
72 }
73
74
75
76
77
78 if (flags & MAP_FIXED) {
79 if (addr & ~PAGE_MASK)
80 return -EINVAL;
81 if (len > TASK_SIZE || addr > TASK_SIZE - len)
82 return -EINVAL;
83 } else {
84 struct vm_area_struct * vmm;
85
86
87 addr = SHM_RANGE_START;
88 while (addr+len < SHM_RANGE_END) {
89 for (vmm = current->mm->mmap ; vmm ; vmm = vmm->vm_next) {
90 if (addr >= vmm->vm_end)
91 continue;
92 if (addr + len <= vmm->vm_start)
93 continue;
94 addr = PAGE_ALIGN(vmm->vm_end);
95 break;
96 }
97 if (!vmm)
98 break;
99 }
100 if (addr+len >= SHM_RANGE_END)
101 return -ENOMEM;
102 }
103
104
105
106
107
108
109 if (file && (!file->f_op || !file->f_op->mmap))
110 return -ENODEV;
111 mask = 0;
112 if (prot & (PROT_READ | PROT_EXEC))
113 mask |= PAGE_READONLY;
114 if (prot & PROT_WRITE)
115 if ((flags & MAP_TYPE) == MAP_PRIVATE)
116 mask |= PAGE_COPY;
117 else
118 mask |= PAGE_SHARED;
119 if (!mask)
120 return -EINVAL;
121
122 do_munmap(addr, len);
123
124 if (file)
125 error = file->f_op->mmap(file->f_inode, file, addr, len, mask, off);
126 else
127 error = anon_map(NULL, NULL, addr, len, mask, off);
128
129 if (!error)
130 return addr;
131
132 if (!current->errno)
133 current->errno = -error;
134 return -1;
135 }
136
137 asmlinkage int sys_mmap(unsigned long *buffer)
138 {
139 int error;
140 unsigned long flags;
141 struct file * file = NULL;
142
143 error = verify_area(VERIFY_READ, buffer, 6*4);
144 if (error)
145 return error;
146 flags = get_fs_long(buffer+3);
147 if (!(flags & MAP_ANONYMOUS)) {
148 unsigned long fd = get_fs_long(buffer+4);
149 if (fd >= NR_OPEN || !(file = current->files->fd[fd]))
150 return -EBADF;
151 }
152 return do_mmap(file, get_fs_long(buffer), get_fs_long(buffer+1),
153 get_fs_long(buffer+2), flags, get_fs_long(buffer+5));
154 }
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178 void unmap_fixup(struct vm_area_struct *area,
179 unsigned long addr, size_t len)
180 {
181 struct vm_area_struct *mpnt;
182 unsigned long end = addr + len;
183
184 if (addr < area->vm_start || addr >= area->vm_end ||
185 end <= area->vm_start || end > area->vm_end ||
186 end < addr)
187 {
188 printk("unmap_fixup: area=%lx-%lx, unmap %lx-%lx!!\n",
189 area->vm_start, area->vm_end, addr, end);
190 return;
191 }
192
193
194 if (addr == area->vm_start && end == area->vm_end) {
195 if (area->vm_ops && area->vm_ops->close)
196 area->vm_ops->close(area);
197 if (area->vm_inode)
198 iput(area->vm_inode);
199 return;
200 }
201
202
203 if (addr >= area->vm_start && end == area->vm_end)
204 area->vm_end = addr;
205 if (addr == area->vm_start && end <= area->vm_end) {
206 area->vm_offset += (end - area->vm_start);
207 area->vm_start = end;
208 }
209
210
211 if (addr > area->vm_start && end < area->vm_end)
212 {
213
214 mpnt = (struct vm_area_struct *)kmalloc(sizeof(*mpnt), GFP_KERNEL);
215
216 *mpnt = *area;
217 mpnt->vm_offset += (end - area->vm_start);
218 mpnt->vm_start = end;
219 if (mpnt->vm_inode)
220 mpnt->vm_inode->i_count++;
221 area->vm_end = addr;
222 insert_vm_struct(current, mpnt);
223 }
224
225
226 mpnt = (struct vm_area_struct *)kmalloc(sizeof(*mpnt), GFP_KERNEL);
227 *mpnt = *area;
228 insert_vm_struct(current, mpnt);
229 }
230
231
232 asmlinkage int sys_mprotect(unsigned long addr, size_t len, unsigned long prot)
233 {
234 return -EINVAL;
235 }
236
237 asmlinkage int sys_munmap(unsigned long addr, size_t len)
238 {
239 return do_munmap(addr, len);
240 }
241
242
243
244
245
246
247
248 int do_munmap(unsigned long addr, size_t len)
249 {
250 struct vm_area_struct *mpnt, **npp, *free;
251
252 if ((addr & ~PAGE_MASK) || addr > TASK_SIZE || len > TASK_SIZE-addr)
253 return -EINVAL;
254
255 if ((len = PAGE_ALIGN(len)) == 0)
256 return 0;
257
258
259
260
261
262
263
264 npp = ¤t->mm->mmap;
265 free = NULL;
266 for (mpnt = *npp; mpnt != NULL; mpnt = *npp) {
267 unsigned long end = addr+len;
268
269 if ((addr < mpnt->vm_start && end <= mpnt->vm_start) ||
270 (addr >= mpnt->vm_end && end > mpnt->vm_end))
271 {
272 npp = &mpnt->vm_next;
273 continue;
274 }
275
276 *npp = mpnt->vm_next;
277 mpnt->vm_next = free;
278 free = mpnt;
279 }
280
281 if (free == NULL)
282 return 0;
283
284
285
286
287
288
289
290 while (free) {
291 unsigned long st, end;
292
293 mpnt = free;
294 free = free->vm_next;
295
296 st = addr < mpnt->vm_start ? mpnt->vm_start : addr;
297 end = addr+len;
298 end = end > mpnt->vm_end ? mpnt->vm_end : end;
299
300 if (mpnt->vm_ops && mpnt->vm_ops->unmap)
301 mpnt->vm_ops->unmap(mpnt, st, end-st);
302 else
303 unmap_fixup(mpnt, st, end-st);
304
305 kfree(mpnt);
306 }
307
308 unmap_page_range(addr, len);
309 return 0;
310 }
311
312
313 int generic_mmap(struct inode * inode, struct file * file,
314 unsigned long addr, size_t len, int prot, unsigned long off)
315 {
316 struct vm_area_struct * mpnt;
317 extern struct vm_operations_struct file_mmap;
318 struct buffer_head * bh;
319
320 if (prot & PAGE_RW)
321 return -EINVAL;
322 if (off & (inode->i_sb->s_blocksize - 1))
323 return -EINVAL;
324 if (!inode->i_sb || !S_ISREG(inode->i_mode))
325 return -EACCES;
326 if (!inode->i_op || !inode->i_op->bmap)
327 return -ENOEXEC;
328 if (!(bh = bread(inode->i_dev,bmap(inode,0),inode->i_sb->s_blocksize)))
329 return -EACCES;
330 if (!IS_RDONLY(inode)) {
331 inode->i_atime = CURRENT_TIME;
332 inode->i_dirt = 1;
333 }
334 brelse(bh);
335
336 mpnt = (struct vm_area_struct * ) kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
337 if (!mpnt)
338 return -ENOMEM;
339
340 unmap_page_range(addr, len);
341 mpnt->vm_task = current;
342 mpnt->vm_start = addr;
343 mpnt->vm_end = addr + len;
344 mpnt->vm_page_prot = prot;
345 mpnt->vm_flags = 0;
346 mpnt->vm_share = NULL;
347 mpnt->vm_inode = inode;
348 inode->i_count++;
349 mpnt->vm_offset = off;
350 mpnt->vm_ops = &file_mmap;
351 insert_vm_struct(current, mpnt);
352 merge_segments(current->mm->mmap, NULL, NULL);
353
354 return 0;
355 }
356
357
358
359
360
361
362
363 void insert_vm_struct(struct task_struct *t, struct vm_area_struct *vmp)
364 {
365 struct vm_area_struct **nxtpp, *mpnt;
366
367 nxtpp = &t->mm->mmap;
368
369 for(mpnt = t->mm->mmap; mpnt != NULL; mpnt = mpnt->vm_next)
370 {
371 if (mpnt->vm_start > vmp->vm_start)
372 break;
373 nxtpp = &mpnt->vm_next;
374
375 if ((vmp->vm_start >= mpnt->vm_start &&
376 vmp->vm_start < mpnt->vm_end) ||
377 (vmp->vm_end >= mpnt->vm_start &&
378 vmp->vm_end < mpnt->vm_end))
379 printk("insert_vm_struct: ins area %lx-%lx in area %lx-%lx\n",
380 vmp->vm_start, vmp->vm_end,
381 mpnt->vm_start, vmp->vm_end);
382 }
383
384 vmp->vm_next = mpnt;
385
386 *nxtpp = vmp;
387 }
388
389
390
391
392
393
394 void merge_segments(struct vm_area_struct *mpnt,
395 map_mergep_fnp mergep, void *mpd)
396 {
397 struct vm_area_struct *prev, *next;
398
399 if (mpnt == NULL)
400 return;
401
402 for(prev = mpnt, mpnt = mpnt->vm_next;
403 mpnt != NULL;
404 prev = mpnt, mpnt = next)
405 {
406 int mp;
407
408 next = mpnt->vm_next;
409
410 if (mergep == NULL)
411 {
412 unsigned long psz = prev->vm_end - prev->vm_start;
413 mp = prev->vm_offset + psz == mpnt->vm_offset;
414 }
415 else
416 mp = (*mergep)(prev, mpnt, mpd);
417
418
419
420
421
422
423 if (prev->vm_ops != mpnt->vm_ops ||
424 prev->vm_page_prot != mpnt->vm_page_prot ||
425 prev->vm_inode != mpnt->vm_inode ||
426 prev->vm_end != mpnt->vm_start ||
427 !mp ||
428 prev->vm_flags != mpnt->vm_flags ||
429 prev->vm_share != mpnt->vm_share ||
430 prev->vm_next != mpnt)
431 continue;
432
433
434
435
436
437
438 prev->vm_end = mpnt->vm_end;
439 prev->vm_next = mpnt->vm_next;
440 kfree_s(mpnt, sizeof(*mpnt));
441 mpnt = prev;
442 }
443 }
444
445
446
447
448
449 static int anon_map(struct inode *ino, struct file * file,
450 unsigned long addr, size_t len, int mask,
451 unsigned long off)
452 {
453 struct vm_area_struct * mpnt;
454
455 if (zeromap_page_range(addr, len, mask))
456 return -ENOMEM;
457
458 mpnt = (struct vm_area_struct * ) kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
459 if (!mpnt)
460 return -ENOMEM;
461
462 mpnt->vm_task = current;
463 mpnt->vm_start = addr;
464 mpnt->vm_end = addr + len;
465 mpnt->vm_page_prot = mask;
466 mpnt->vm_flags = 0;
467 mpnt->vm_share = NULL;
468 mpnt->vm_inode = NULL;
469 mpnt->vm_offset = 0;
470 mpnt->vm_ops = NULL;
471 insert_vm_struct(current, mpnt);
472 merge_segments(current->mm->mmap, ignoff_mergep, NULL);
473
474 return 0;
475 }
476
477
478 int ignoff_mergep(const struct vm_area_struct *m1,
479 const struct vm_area_struct *m2,
480 void *data)
481 {
482 if (m1->vm_inode != m2->vm_inode)
483 return 0;
484
485 return (struct inode *)data == m1->vm_inode;
486 }