This source file includes following definitions.
- do_mmap
- sys_mmap
- unmap_fixup
- sys_mprotect
- sys_munmap
- do_munmap
- generic_mmap
- insert_vm_struct
- merge_segments
- anon_map
- ignoff_mergep
1
2
3
4
5
6 #include <linux/stat.h>
7 #include <linux/sched.h>
8 #include <linux/kernel.h>
9 #include <linux/mm.h>
10 #include <linux/shm.h>
11 #include <linux/errno.h>
12 #include <linux/mman.h>
13 #include <linux/string.h>
14 #include <linux/malloc.h>
15
16 #include <asm/segment.h>
17 #include <asm/system.h>
18
19 static int anon_map(struct inode *, struct file *,
20 unsigned long, size_t, int,
21 unsigned long);
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39 #define CODE_SPACE(addr) \
40 (PAGE_ALIGN(addr) < current->start_code + current->end_code)
41
42 int do_mmap(struct file * file, unsigned long addr, unsigned long len,
43 unsigned long prot, unsigned long flags, unsigned long off)
44 {
45 int mask, error;
46
47 if ((len = PAGE_ALIGN(len)) == 0)
48 return addr;
49
50 if (addr > TASK_SIZE || len > TASK_SIZE || addr > TASK_SIZE-len)
51 return -EINVAL;
52
53
54
55
56
57
58
59 if (file != NULL)
60 switch (flags & MAP_TYPE) {
61 case MAP_SHARED:
62 if ((prot & PROT_WRITE) && !(file->f_mode & 2))
63 return -EACCES;
64
65 case MAP_PRIVATE:
66 if (!(file->f_mode & 1))
67 return -EACCES;
68 break;
69
70 default:
71 return -EINVAL;
72 }
73
74
75
76
77
78 if (flags & MAP_FIXED) {
79 if (addr & ~PAGE_MASK)
80 return -EINVAL;
81 if (len > TASK_SIZE || addr > TASK_SIZE - len)
82 return -EINVAL;
83 } else {
84 struct vm_area_struct * vmm;
85
86
87 addr = SHM_RANGE_START;
88 while (addr+len < SHM_RANGE_END) {
89 for (vmm = current->mmap ; vmm ; vmm = vmm->vm_next) {
90 if (addr >= vmm->vm_end)
91 continue;
92 if (addr + len <= vmm->vm_start)
93 continue;
94 addr = PAGE_ALIGN(vmm->vm_end);
95 break;
96 }
97 if (!vmm)
98 break;
99 }
100 if (addr+len >= SHM_RANGE_END)
101 return -ENOMEM;
102 }
103
104
105
106
107
108
109 if (file && (!file->f_op || !file->f_op->mmap))
110 return -ENODEV;
111 mask = 0;
112 if (prot & (PROT_READ | PROT_EXEC))
113 mask |= PAGE_READONLY;
114 if (prot & PROT_WRITE)
115 if ((flags & MAP_TYPE) == MAP_PRIVATE)
116 mask |= PAGE_COW;
117 else
118 mask |= PAGE_RW;
119 if (!mask)
120 return -EINVAL;
121
122 do_munmap(addr, len);
123
124 if (file)
125 error = file->f_op->mmap(file->f_inode, file, addr, len, mask, off);
126 else
127 error = anon_map(NULL, NULL, addr, len, mask, off);
128
129 if (!error)
130 return addr;
131
132 if (!current->errno)
133 current->errno = -error;
134 return -1;
135 }
136
137 asmlinkage int sys_mmap(unsigned long *buffer)
138 {
139 int error;
140 unsigned long fd;
141 struct file * file;
142
143 error = verify_area(VERIFY_READ, buffer, 6*4);
144 if (error)
145 return error;
146 fd = get_fs_long(buffer+4);
147 if (fd >= NR_OPEN || !(file = current->filp[fd]))
148 return -EBADF;
149 return do_mmap(file, get_fs_long(buffer), get_fs_long(buffer+1),
150 get_fs_long(buffer+2), get_fs_long(buffer+3), get_fs_long(buffer+5));
151 }
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175 void unmap_fixup(struct vm_area_struct *area,
176 unsigned long addr, size_t len)
177 {
178 struct vm_area_struct *mpnt;
179 unsigned long end = addr + len;
180
181 if (addr < area->vm_start || addr >= area->vm_end ||
182 end <= area->vm_start || end > area->vm_end ||
183 end < addr)
184 {
185 printk("unmap_fixup: area=%lx-%lx, unmap %lx-%lx!!\n",
186 area->vm_start, area->vm_end, addr, end);
187 return;
188 }
189
190
191 if (addr == area->vm_start && end == area->vm_end) {
192 if (area->vm_ops && area->vm_ops->close)
193 area->vm_ops->close(area);
194 return;
195 }
196
197
198 if (addr >= area->vm_start && end == area->vm_end)
199 area->vm_end = addr;
200 if (addr == area->vm_start && end <= area->vm_end) {
201 area->vm_offset += (end - area->vm_start);
202 area->vm_start = end;
203 }
204
205
206 if (addr > area->vm_start && end < area->vm_end)
207 {
208
209 mpnt = (struct vm_area_struct *)kmalloc(sizeof(*mpnt), GFP_KERNEL);
210
211 *mpnt = *area;
212 mpnt->vm_offset += (end - area->vm_start);
213 mpnt->vm_start = end;
214 if (mpnt->vm_inode)
215 mpnt->vm_inode->i_count++;
216 insert_vm_struct(current, mpnt);
217 area->vm_end = addr;
218 }
219
220
221 mpnt = (struct vm_area_struct *)kmalloc(sizeof(*mpnt), GFP_KERNEL);
222 *mpnt = *area;
223 insert_vm_struct(current, mpnt);
224 }
225
226
227 asmlinkage int sys_mprotect(unsigned long addr, size_t len, unsigned long prot)
228 {
229 return -EINVAL;
230 }
231
232 asmlinkage int sys_munmap(unsigned long addr, size_t len)
233 {
234 return do_munmap(addr, len);
235 }
236
237
238
239
240
241
242
243 int do_munmap(unsigned long addr, size_t len)
244 {
245 struct vm_area_struct *mpnt, **npp, *free;
246
247 if ((addr & ~PAGE_MASK) || addr > TASK_SIZE || len > TASK_SIZE-addr)
248 return -EINVAL;
249
250 if ((len = PAGE_ALIGN(len)) == 0)
251 return 0;
252
253
254
255
256
257
258
259 npp = ¤t->mmap;
260 free = NULL;
261 for (mpnt = *npp; mpnt != NULL; mpnt = *npp) {
262 unsigned long end = addr+len;
263
264 if ((addr < mpnt->vm_start && end <= mpnt->vm_start) ||
265 (addr >= mpnt->vm_end && end > mpnt->vm_end))
266 {
267 npp = &mpnt->vm_next;
268 continue;
269 }
270
271 *npp = mpnt->vm_next;
272 mpnt->vm_next = free;
273 free = mpnt;
274 }
275
276 if (free == NULL)
277 return 0;
278
279
280
281
282
283
284
285 while (free) {
286 unsigned long st, end;
287
288 mpnt = free;
289 free = free->vm_next;
290
291 st = addr < mpnt->vm_start ? mpnt->vm_start : addr;
292 end = addr+len;
293 end = end > mpnt->vm_end ? mpnt->vm_end : end;
294
295 if (mpnt->vm_ops && mpnt->vm_ops->unmap)
296 mpnt->vm_ops->unmap(mpnt, st, end-st);
297 else
298 unmap_fixup(mpnt, st, end-st);
299
300 kfree(mpnt);
301 }
302
303 unmap_page_range(addr, len);
304 return 0;
305 }
306
307
308 int generic_mmap(struct inode * inode, struct file * file,
309 unsigned long addr, size_t len, int prot, unsigned long off)
310 {
311 struct vm_area_struct * mpnt;
312 extern struct vm_operations_struct file_mmap;
313 struct buffer_head * bh;
314
315 if (prot & PAGE_RW)
316 return -EINVAL;
317 if (off & (inode->i_sb->s_blocksize - 1))
318 return -EINVAL;
319 if (!inode->i_sb || !S_ISREG(inode->i_mode))
320 return -EACCES;
321 if (!inode->i_op || !inode->i_op->bmap)
322 return -ENOEXEC;
323 if (!(bh = bread(inode->i_dev,bmap(inode,0),inode->i_sb->s_blocksize)))
324 return -EACCES;
325 if (!IS_RDONLY(inode)) {
326 inode->i_atime = CURRENT_TIME;
327 inode->i_dirt = 1;
328 }
329 brelse(bh);
330
331 mpnt = (struct vm_area_struct * ) kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
332 if (!mpnt)
333 return -ENOMEM;
334
335 unmap_page_range(addr, len);
336 mpnt->vm_task = current;
337 mpnt->vm_start = addr;
338 mpnt->vm_end = addr + len;
339 mpnt->vm_page_prot = prot;
340 mpnt->vm_share = NULL;
341 mpnt->vm_inode = inode;
342 inode->i_count++;
343 mpnt->vm_offset = off;
344 mpnt->vm_ops = &file_mmap;
345 insert_vm_struct(current, mpnt);
346 merge_segments(current->mmap, NULL, NULL);
347
348 return 0;
349 }
350
351
352
353
354
355
356
357 void insert_vm_struct(struct task_struct *t, struct vm_area_struct *vmp)
358 {
359 struct vm_area_struct **nxtpp, *mpnt;
360
361 nxtpp = &t->mmap;
362
363 for(mpnt = t->mmap; mpnt != NULL; mpnt = mpnt->vm_next)
364 {
365 if (mpnt->vm_start > vmp->vm_start)
366 break;
367 nxtpp = &mpnt->vm_next;
368
369 if ((vmp->vm_start >= mpnt->vm_start &&
370 vmp->vm_start < mpnt->vm_end) ||
371 (vmp->vm_end >= mpnt->vm_start &&
372 vmp->vm_end < mpnt->vm_end))
373 printk("insert_vm_struct: ins area %lx-%lx in area %lx-%lx\n",
374 vmp->vm_start, vmp->vm_end,
375 mpnt->vm_start, vmp->vm_end);
376 }
377
378 vmp->vm_next = mpnt;
379
380 *nxtpp = vmp;
381 }
382
383
384
385
386
387
388 void merge_segments(struct vm_area_struct *mpnt,
389 map_mergep_fnp mergep, void *mpd)
390 {
391 struct vm_area_struct *prev, *next;
392
393 if (mpnt == NULL)
394 return;
395
396 for(prev = mpnt, mpnt = mpnt->vm_next;
397 mpnt != NULL;
398 prev = mpnt, mpnt = next)
399 {
400 int mp;
401
402 next = mpnt->vm_next;
403
404 if (mergep == NULL)
405 {
406 unsigned long psz = prev->vm_end - prev->vm_start;
407 mp = prev->vm_offset + psz == mpnt->vm_offset;
408 }
409 else
410 mp = (*mergep)(prev, mpnt, mpd);
411
412
413
414
415
416
417 if (prev->vm_ops != mpnt->vm_ops ||
418 prev->vm_page_prot != mpnt->vm_page_prot ||
419 prev->vm_inode != mpnt->vm_inode ||
420 prev->vm_end != mpnt->vm_start ||
421 !mp ||
422 prev->vm_share != mpnt->vm_share ||
423 prev->vm_next != mpnt)
424 continue;
425
426
427
428
429
430
431 prev->vm_end = mpnt->vm_end;
432 prev->vm_next = mpnt->vm_next;
433 kfree_s(mpnt, sizeof(*mpnt));
434 mpnt = prev;
435 }
436 }
437
438
439
440
441
442 static int anon_map(struct inode *ino, struct file * file,
443 unsigned long addr, size_t len, int mask,
444 unsigned long off)
445 {
446 struct vm_area_struct * mpnt;
447
448 if (zeromap_page_range(addr, len, mask))
449 return -ENOMEM;
450
451 mpnt = (struct vm_area_struct * ) kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
452 if (!mpnt)
453 return -ENOMEM;
454
455 mpnt->vm_task = current;
456 mpnt->vm_start = addr;
457 mpnt->vm_end = addr + len;
458 mpnt->vm_page_prot = mask;
459 mpnt->vm_share = NULL;
460 mpnt->vm_inode = NULL;
461 mpnt->vm_offset = 0;
462 mpnt->vm_ops = NULL;
463 insert_vm_struct(current, mpnt);
464 merge_segments(current->mmap, ignoff_mergep, NULL);
465
466 return 0;
467 }
468
469
470 int ignoff_mergep(const struct vm_area_struct *m1,
471 const struct vm_area_struct *m2,
472 void *data)
473 {
474 if (m1->vm_inode != m2->vm_inode)
475 return 0;
476
477 return (struct inode *)data == m1->vm_inode;
478 }