This source file includes following definitions.
- do_mmap
- sys_mmap
- sys_munmap
- generic_mmap
1
2
3
4
5
6 #include <linux/stat.h>
7 #include <linux/sched.h>
8 #include <linux/kernel.h>
9 #include <linux/mm.h>
10 #include <linux/shm.h>
11 #include <linux/errno.h>
12 #include <linux/mman.h>
13 #include <linux/string.h>
14
15 #include <asm/segment.h>
16 #include <asm/system.h>
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35 #define CODE_SPACE(addr) \
36 (PAGE_ALIGN(addr) < current->start_code + current->end_code)
37
38 int do_mmap(struct file * file, unsigned long addr, unsigned long len,
39 unsigned long prot, unsigned long flags, unsigned long off)
40 {
41 int mask, error;
42
43 if (addr > TASK_SIZE || len > TASK_SIZE || addr > TASK_SIZE-len)
44 return -EINVAL;
45
46
47
48
49
50
51
52 switch (flags & MAP_TYPE) {
53 case MAP_SHARED:
54 if ((prot & PROT_WRITE) && !(file->f_mode & 2))
55 return -EINVAL;
56
57 case MAP_PRIVATE:
58 if (!(file->f_mode & 1))
59 return -EINVAL;
60 break;
61
62 default:
63 return -EINVAL;
64 }
65
66
67
68
69
70
71 if (flags & MAP_FIXED) {
72 if (addr & ~PAGE_MASK)
73 return -EINVAL;
74 if (len > TASK_SIZE || addr > TASK_SIZE - len)
75 return -ENOMEM;
76 } else {
77 struct vm_area_struct * vmm;
78
79
80 addr = SHM_RANGE_START;
81 while (addr+len < SHM_RANGE_END) {
82 for (vmm = current->mmap ; vmm ; vmm = vmm->vm_next) {
83 if (addr >= vmm->vm_end)
84 continue;
85 if (addr + len <= vmm->vm_start)
86 continue;
87 addr = PAGE_ALIGN(vmm->vm_end);
88 break;
89 }
90 if (!vmm)
91 break;
92 }
93 if (addr+len >= SHM_RANGE_END)
94 return -ENOMEM;
95 }
96
97
98
99
100
101
102 if (!file->f_op || !file->f_op->mmap)
103 return -ENODEV;
104 mask = 0;
105 if (prot & (PROT_READ | PROT_EXEC))
106 mask |= PAGE_READONLY;
107 if (prot & PROT_WRITE)
108 if ((flags & MAP_TYPE) == MAP_PRIVATE)
109 mask |= PAGE_COW;
110 else
111 mask |= PAGE_RW;
112 if (!mask)
113 return -EINVAL;
114
115 error = file->f_op->mmap(file->f_inode, file, addr, len, mask, off);
116 if (!error)
117 return addr;
118
119 if (!current->errno)
120 current->errno = -error;
121 return -1;
122 }
123
124 extern "C" int sys_mmap(unsigned long *buffer)
125 {
126 unsigned long fd;
127 struct file * file;
128
129 fd = get_fs_long(buffer+4);
130 if (fd >= NR_OPEN || !(file = current->filp[fd]))
131 return -EBADF;
132 return do_mmap(file, get_fs_long(buffer), get_fs_long(buffer+1),
133 get_fs_long(buffer+2), get_fs_long(buffer+3), get_fs_long(buffer+5));
134 }
135
136 extern "C" int sys_munmap(unsigned long addr, size_t len)
137 {
138 struct vm_area_struct *mpnt, **p, *free;
139
140 if ((addr & ~PAGE_MASK) || addr > LONG_MAX || addr == 0 || addr + len > TASK_SIZE)
141 return -EINVAL;
142
143
144
145
146 p = ¤t->mmap;
147 free = NULL;
148
149
150
151
152 while ((mpnt = *p) != NULL) {
153 if (addr > mpnt->vm_start && addr < mpnt->vm_end)
154 goto bad_munmap;
155 if (addr+len > mpnt->vm_start && addr + len < mpnt->vm_end)
156 goto bad_munmap;
157 if (addr <= mpnt->vm_start && addr + len >= mpnt->vm_end) {
158 *p = mpnt->vm_next;
159 mpnt->vm_next = free;
160 free = mpnt;
161 continue;
162 }
163 p = &mpnt->vm_next;
164 }
165
166
167
168
169 while (free) {
170 mpnt = free;
171 free = free->vm_next;
172 if (mpnt->vm_ops->close)
173 mpnt->vm_ops->close(mpnt);
174 kfree(mpnt);
175 }
176
177 unmap_page_range(addr, len);
178 return 0;
179 bad_munmap:
180
181
182
183 while (free) {
184 mpnt = free;
185 free = free->vm_next;
186 mpnt->vm_next = current->mmap;
187 current->mmap = mpnt;
188 }
189 return -EINVAL;
190 }
191
192
193 int generic_mmap(struct inode * inode, struct file * file,
194 unsigned long addr, size_t len, int prot, unsigned long off)
195 {
196 struct vm_area_struct * mpnt;
197 extern struct vm_operations_struct file_mmap;
198 struct buffer_head * bh;
199
200 if (off & (inode->i_sb->s_blocksize - 1))
201 return -EINVAL;
202 if (len > high_memory || off > high_memory - len)
203 return -ENXIO;
204 if (get_limit(USER_DS) != TASK_SIZE)
205 return -EINVAL;
206 if (!inode->i_sb || !S_ISREG(inode->i_mode))
207 return -EACCES;
208 if (!inode->i_op || !inode->i_op->bmap)
209 return -ENOEXEC;
210 if (!(bh = bread(inode->i_dev,bmap(inode,0),inode->i_sb->s_blocksize)))
211 return -EACCES;
212 if (!IS_RDONLY(inode)) {
213 inode->i_atime = CURRENT_TIME;
214 inode->i_dirt = 1;
215 }
216 brelse(bh);
217
218 mpnt = (struct vm_area_struct * ) kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
219 if (!mpnt)
220 return -ENOMEM;
221
222 unmap_page_range(addr, len);
223 mpnt->vm_task = current;
224 mpnt->vm_start = addr;
225 mpnt->vm_end = addr + len;
226 mpnt->vm_page_prot = prot;
227 mpnt->vm_share = NULL;
228 mpnt->vm_inode = inode;
229 inode->i_count++;
230 mpnt->vm_offset = off;
231 mpnt->vm_ops = &file_mmap;
232 mpnt->vm_next = current->mmap;
233 current->mmap = mpnt;
234 #if 0
235 printk("VFS: Loaded mmap at %08x - %08x\n",
236 mpnt->vm_start, mpnt->vm_end);
237 #endif
238 return 0;
239 }
240