This source file includes following definitions.
- do_mmap
- sys_mmap
- unmap_fixup
- sys_mprotect
- sys_munmap
- do_munmap
- generic_mmap
- insert_vm_struct
- merge_segments
- anon_map
- ignoff_mergep
1
2
3
4
5
6 #include <linux/stat.h>
7 #include <linux/sched.h>
8 #include <linux/kernel.h>
9 #include <linux/mm.h>
10 #include <linux/shm.h>
11 #include <linux/errno.h>
12 #include <linux/mman.h>
13 #include <linux/string.h>
14 #include <linux/malloc.h>
15
16 #include <asm/segment.h>
17 #include <asm/system.h>
18
19 static int anon_map(struct inode *, struct file *,
20 unsigned long, size_t, int,
21 unsigned long);
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39 #define CODE_SPACE(addr) \
40 (PAGE_ALIGN(addr) < current->start_code + current->end_code)
41
42 int do_mmap(struct file * file, unsigned long addr, unsigned long len,
43 unsigned long prot, unsigned long flags, unsigned long off)
44 {
45 int mask, error;
46
47 if ((len = PAGE_ALIGN(len)) == 0)
48 return addr;
49
50 if (addr > TASK_SIZE || len > TASK_SIZE || addr > TASK_SIZE-len)
51 return -EINVAL;
52
53
54
55
56
57
58
59 if (file != NULL)
60 switch (flags & MAP_TYPE) {
61 case MAP_SHARED:
62 if ((prot & PROT_WRITE) && !(file->f_mode & 2))
63 return -EACCES;
64
65 case MAP_PRIVATE:
66 if (!(file->f_mode & 1))
67 return -EACCES;
68 break;
69
70 default:
71 return -EINVAL;
72 }
73
74
75
76
77
78 if (flags & MAP_FIXED) {
79 if (addr & ~PAGE_MASK)
80 return -EINVAL;
81 if (len > TASK_SIZE || addr > TASK_SIZE - len)
82 return -EINVAL;
83 } else {
84 struct vm_area_struct * vmm;
85
86
87 addr = SHM_RANGE_START;
88 while (addr+len < SHM_RANGE_END) {
89 for (vmm = current->mmap ; vmm ; vmm = vmm->vm_next) {
90 if (addr >= vmm->vm_end)
91 continue;
92 if (addr + len <= vmm->vm_start)
93 continue;
94 addr = PAGE_ALIGN(vmm->vm_end);
95 break;
96 }
97 if (!vmm)
98 break;
99 }
100 if (addr+len >= SHM_RANGE_END)
101 return -ENOMEM;
102 }
103
104
105
106
107
108
109 if (file && (!file->f_op || !file->f_op->mmap))
110 return -ENODEV;
111 mask = 0;
112 if (prot & (PROT_READ | PROT_EXEC))
113 mask |= PAGE_READONLY;
114 if (prot & PROT_WRITE)
115 if ((flags & MAP_TYPE) == MAP_PRIVATE)
116 mask |= PAGE_COPY;
117 else
118 mask |= PAGE_SHARED;
119 if (!mask)
120 return -EINVAL;
121
122 do_munmap(addr, len);
123
124 if (file)
125 error = file->f_op->mmap(file->f_inode, file, addr, len, mask, off);
126 else
127 error = anon_map(NULL, NULL, addr, len, mask, off);
128
129 if (!error)
130 return addr;
131
132 if (!current->errno)
133 current->errno = -error;
134 return -1;
135 }
136
137 asmlinkage int sys_mmap(unsigned long *buffer)
138 {
139 int error;
140 unsigned long flags;
141 struct file * file = NULL;
142
143 error = verify_area(VERIFY_READ, buffer, 6*4);
144 if (error)
145 return error;
146 flags = get_fs_long(buffer+3);
147 if (!(flags & MAP_ANONYMOUS)) {
148 unsigned long fd = get_fs_long(buffer+4);
149 if (fd >= NR_OPEN || !(file = current->filp[fd]))
150 return -EBADF;
151 }
152 return do_mmap(file, get_fs_long(buffer), get_fs_long(buffer+1),
153 get_fs_long(buffer+2), flags, get_fs_long(buffer+5));
154 }
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178 void unmap_fixup(struct vm_area_struct *area,
179 unsigned long addr, size_t len)
180 {
181 struct vm_area_struct *mpnt;
182 unsigned long end = addr + len;
183
184 if (addr < area->vm_start || addr >= area->vm_end ||
185 end <= area->vm_start || end > area->vm_end ||
186 end < addr)
187 {
188 printk("unmap_fixup: area=%lx-%lx, unmap %lx-%lx!!\n",
189 area->vm_start, area->vm_end, addr, end);
190 return;
191 }
192
193
194 if (addr == area->vm_start && end == area->vm_end) {
195 if (area->vm_ops && area->vm_ops->close)
196 area->vm_ops->close(area);
197 return;
198 }
199
200
201 if (addr >= area->vm_start && end == area->vm_end)
202 area->vm_end = addr;
203 if (addr == area->vm_start && end <= area->vm_end) {
204 area->vm_offset += (end - area->vm_start);
205 area->vm_start = end;
206 }
207
208
209 if (addr > area->vm_start && end < area->vm_end)
210 {
211
212 mpnt = (struct vm_area_struct *)kmalloc(sizeof(*mpnt), GFP_KERNEL);
213
214 *mpnt = *area;
215 mpnt->vm_offset += (end - area->vm_start);
216 mpnt->vm_start = end;
217 if (mpnt->vm_inode)
218 mpnt->vm_inode->i_count++;
219 insert_vm_struct(current, mpnt);
220 area->vm_end = addr;
221 }
222
223
224 mpnt = (struct vm_area_struct *)kmalloc(sizeof(*mpnt), GFP_KERNEL);
225 *mpnt = *area;
226 insert_vm_struct(current, mpnt);
227 }
228
229
230 asmlinkage int sys_mprotect(unsigned long addr, size_t len, unsigned long prot)
231 {
232 return -EINVAL;
233 }
234
235 asmlinkage int sys_munmap(unsigned long addr, size_t len)
236 {
237 return do_munmap(addr, len);
238 }
239
240
241
242
243
244
245
246 int do_munmap(unsigned long addr, size_t len)
247 {
248 struct vm_area_struct *mpnt, **npp, *free;
249
250 if ((addr & ~PAGE_MASK) || addr > TASK_SIZE || len > TASK_SIZE-addr)
251 return -EINVAL;
252
253 if ((len = PAGE_ALIGN(len)) == 0)
254 return 0;
255
256
257
258
259
260
261
262 npp = ¤t->mmap;
263 free = NULL;
264 for (mpnt = *npp; mpnt != NULL; mpnt = *npp) {
265 unsigned long end = addr+len;
266
267 if ((addr < mpnt->vm_start && end <= mpnt->vm_start) ||
268 (addr >= mpnt->vm_end && end > mpnt->vm_end))
269 {
270 npp = &mpnt->vm_next;
271 continue;
272 }
273
274 *npp = mpnt->vm_next;
275 mpnt->vm_next = free;
276 free = mpnt;
277 }
278
279 if (free == NULL)
280 return 0;
281
282
283
284
285
286
287
288 while (free) {
289 unsigned long st, end;
290
291 mpnt = free;
292 free = free->vm_next;
293
294 st = addr < mpnt->vm_start ? mpnt->vm_start : addr;
295 end = addr+len;
296 end = end > mpnt->vm_end ? mpnt->vm_end : end;
297
298 if (mpnt->vm_ops && mpnt->vm_ops->unmap)
299 mpnt->vm_ops->unmap(mpnt, st, end-st);
300 else
301 unmap_fixup(mpnt, st, end-st);
302
303 kfree(mpnt);
304 }
305
306 unmap_page_range(addr, len);
307 return 0;
308 }
309
310
311 int generic_mmap(struct inode * inode, struct file * file,
312 unsigned long addr, size_t len, int prot, unsigned long off)
313 {
314 struct vm_area_struct * mpnt;
315 extern struct vm_operations_struct file_mmap;
316 struct buffer_head * bh;
317
318 if (prot & PAGE_RW)
319 return -EINVAL;
320 if (off & (inode->i_sb->s_blocksize - 1))
321 return -EINVAL;
322 if (!inode->i_sb || !S_ISREG(inode->i_mode))
323 return -EACCES;
324 if (!inode->i_op || !inode->i_op->bmap)
325 return -ENOEXEC;
326 if (!(bh = bread(inode->i_dev,bmap(inode,0),inode->i_sb->s_blocksize)))
327 return -EACCES;
328 if (!IS_RDONLY(inode)) {
329 inode->i_atime = CURRENT_TIME;
330 inode->i_dirt = 1;
331 }
332 brelse(bh);
333
334 mpnt = (struct vm_area_struct * ) kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
335 if (!mpnt)
336 return -ENOMEM;
337
338 unmap_page_range(addr, len);
339 mpnt->vm_task = current;
340 mpnt->vm_start = addr;
341 mpnt->vm_end = addr + len;
342 mpnt->vm_page_prot = prot;
343 mpnt->vm_share = NULL;
344 mpnt->vm_inode = inode;
345 inode->i_count++;
346 mpnt->vm_offset = off;
347 mpnt->vm_ops = &file_mmap;
348 insert_vm_struct(current, mpnt);
349 merge_segments(current->mmap, NULL, NULL);
350
351 return 0;
352 }
353
354
355
356
357
358
359
360 void insert_vm_struct(struct task_struct *t, struct vm_area_struct *vmp)
361 {
362 struct vm_area_struct **nxtpp, *mpnt;
363
364 nxtpp = &t->mmap;
365
366 for(mpnt = t->mmap; mpnt != NULL; mpnt = mpnt->vm_next)
367 {
368 if (mpnt->vm_start > vmp->vm_start)
369 break;
370 nxtpp = &mpnt->vm_next;
371
372 if ((vmp->vm_start >= mpnt->vm_start &&
373 vmp->vm_start < mpnt->vm_end) ||
374 (vmp->vm_end >= mpnt->vm_start &&
375 vmp->vm_end < mpnt->vm_end))
376 printk("insert_vm_struct: ins area %lx-%lx in area %lx-%lx\n",
377 vmp->vm_start, vmp->vm_end,
378 mpnt->vm_start, vmp->vm_end);
379 }
380
381 vmp->vm_next = mpnt;
382
383 *nxtpp = vmp;
384 }
385
386
387
388
389
390
391 void merge_segments(struct vm_area_struct *mpnt,
392 map_mergep_fnp mergep, void *mpd)
393 {
394 struct vm_area_struct *prev, *next;
395
396 if (mpnt == NULL)
397 return;
398
399 for(prev = mpnt, mpnt = mpnt->vm_next;
400 mpnt != NULL;
401 prev = mpnt, mpnt = next)
402 {
403 int mp;
404
405 next = mpnt->vm_next;
406
407 if (mergep == NULL)
408 {
409 unsigned long psz = prev->vm_end - prev->vm_start;
410 mp = prev->vm_offset + psz == mpnt->vm_offset;
411 }
412 else
413 mp = (*mergep)(prev, mpnt, mpd);
414
415
416
417
418
419
420 if (prev->vm_ops != mpnt->vm_ops ||
421 prev->vm_page_prot != mpnt->vm_page_prot ||
422 prev->vm_inode != mpnt->vm_inode ||
423 prev->vm_end != mpnt->vm_start ||
424 !mp ||
425 prev->vm_share != mpnt->vm_share ||
426 prev->vm_next != mpnt)
427 continue;
428
429
430
431
432
433
434 prev->vm_end = mpnt->vm_end;
435 prev->vm_next = mpnt->vm_next;
436 kfree_s(mpnt, sizeof(*mpnt));
437 mpnt = prev;
438 }
439 }
440
441
442
443
444
445 static int anon_map(struct inode *ino, struct file * file,
446 unsigned long addr, size_t len, int mask,
447 unsigned long off)
448 {
449 struct vm_area_struct * mpnt;
450
451 if (zeromap_page_range(addr, len, mask))
452 return -ENOMEM;
453
454 mpnt = (struct vm_area_struct * ) kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
455 if (!mpnt)
456 return -ENOMEM;
457
458 mpnt->vm_task = current;
459 mpnt->vm_start = addr;
460 mpnt->vm_end = addr + len;
461 mpnt->vm_page_prot = mask;
462 mpnt->vm_share = NULL;
463 mpnt->vm_inode = NULL;
464 mpnt->vm_offset = 0;
465 mpnt->vm_ops = NULL;
466 insert_vm_struct(current, mpnt);
467 merge_segments(current->mmap, ignoff_mergep, NULL);
468
469 return 0;
470 }
471
472
473 int ignoff_mergep(const struct vm_area_struct *m1,
474 const struct vm_area_struct *m2,
475 void *data)
476 {
477 if (m1->vm_inode != m2->vm_inode)
478 return 0;
479
480 return (struct inode *)data == m1->vm_inode;
481 }