This source file includes following definitions.
- change_protection
- mprotect_fixup_all
- mprotect_fixup_start
- mprotect_fixup_end
- mprotect_fixup_middle
- mprotect_fixup
- sys_mprotect
1
2
3
4
5
6 #include <linux/stat.h>
7 #include <linux/sched.h>
8 #include <linux/kernel.h>
9 #include <linux/mm.h>
10 #include <linux/shm.h>
11 #include <linux/errno.h>
12 #include <linux/mman.h>
13 #include <linux/string.h>
14 #include <linux/malloc.h>
15
16 #include <asm/segment.h>
17 #include <asm/system.h>
18
19 #define CHG_MASK (PAGE_MASK | PAGE_ACCESSED | PAGE_DIRTY | PAGE_PWT | PAGE_PCD)
20
21 static void change_protection(unsigned long start, unsigned long end, int prot)
22 {
23 unsigned long *page_table, *dir;
24 unsigned long page, offset;
25 int nr;
26
27 dir = PAGE_DIR_OFFSET(current->tss.cr3, start);
28 offset = (start >> PAGE_SHIFT) & (PTRS_PER_PAGE-1);
29 nr = (end - start) >> PAGE_SHIFT;
30 while (nr > 0) {
31 page = *dir;
32 dir++;
33 if (!(page & PAGE_PRESENT)) {
34 nr = nr - PTRS_PER_PAGE + offset;
35 offset = 0;
36 continue;
37 }
38 page_table = offset + (unsigned long *) (page & PAGE_MASK);
39 offset = PTRS_PER_PAGE - offset;
40 if (offset > nr)
41 offset = nr;
42 nr = nr - offset;
43 do {
44 page = *page_table;
45 if (page & PAGE_PRESENT)
46 *page_table = (page & CHG_MASK) | prot;
47 ++page_table;
48 } while (--offset);
49 }
50 return;
51 }
52
53 static inline int mprotect_fixup_all(struct vm_area_struct * vma,
54 int newflags, int prot)
55 {
56 vma->vm_flags = newflags;
57 vma->vm_page_prot = prot;
58 return 0;
59 }
60
61 static inline int mprotect_fixup_start(struct vm_area_struct * vma,
62 unsigned long end,
63 int newflags, int prot)
64 {
65 struct vm_area_struct * n;
66
67 n = (struct vm_area_struct *) kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
68 if (!n)
69 return -ENOMEM;
70 *n = *vma;
71 vma->vm_start = end;
72 n->vm_end = end;
73 vma->vm_offset += vma->vm_start - n->vm_start;
74 n->vm_flags = newflags;
75 n->vm_page_prot = prot;
76 if (n->vm_inode)
77 n->vm_inode->i_count++;
78 if (n->vm_ops && n->vm_ops->open)
79 n->vm_ops->open(n);
80 insert_vm_struct(current, n);
81 return 0;
82 }
83
84 static inline int mprotect_fixup_end(struct vm_area_struct * vma,
85 unsigned long start,
86 int newflags, int prot)
87 {
88 struct vm_area_struct * n;
89
90 n = (struct vm_area_struct *) kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
91 if (!n)
92 return -ENOMEM;
93 *n = *vma;
94 vma->vm_end = start;
95 n->vm_start = start;
96 n->vm_offset += n->vm_start - vma->vm_start;
97 n->vm_flags = newflags;
98 n->vm_page_prot = prot;
99 if (n->vm_inode)
100 n->vm_inode->i_count++;
101 if (n->vm_ops && n->vm_ops->open)
102 n->vm_ops->open(n);
103 insert_vm_struct(current, n);
104 return 0;
105 }
106
107 static inline int mprotect_fixup_middle(struct vm_area_struct * vma,
108 unsigned long start, unsigned long end,
109 int newflags, int prot)
110 {
111 struct vm_area_struct * left, * right;
112
113 left = (struct vm_area_struct *) kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
114 if (!left)
115 return -ENOMEM;
116 right = (struct vm_area_struct *) kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
117 if (!right) {
118 kfree(left);
119 return -ENOMEM;
120 }
121 *left = *vma;
122 *right = *vma;
123 left->vm_end = start;
124 vma->vm_start = start;
125 vma->vm_end = end;
126 right->vm_start = end;
127 vma->vm_offset += vma->vm_start - left->vm_start;
128 right->vm_offset += right->vm_start - left->vm_start;
129 vma->vm_flags = newflags;
130 vma->vm_page_prot = prot;
131 if (vma->vm_inode)
132 vma->vm_inode->i_count += 2;
133 if (vma->vm_ops && vma->vm_ops->open) {
134 vma->vm_ops->open(left);
135 vma->vm_ops->open(right);
136 }
137 insert_vm_struct(current, left);
138 insert_vm_struct(current, right);
139 return 0;
140 }
141
142 static int mprotect_fixup(struct vm_area_struct * vma,
143 unsigned long start, unsigned long end, unsigned int newflags)
144 {
145 int prot, error;
146
147 if (newflags == vma->vm_flags)
148 return 0;
149 prot = PAGE_PRESENT;
150 if (newflags & (VM_READ | VM_EXEC))
151 prot |= PAGE_READONLY;
152 if (newflags & VM_WRITE)
153 if (newflags & VM_SHARED)
154 prot |= PAGE_SHARED;
155 else
156 prot |= PAGE_COPY;
157
158 if (start == vma->vm_start)
159 if (end == vma->vm_end)
160 error = mprotect_fixup_all(vma, newflags, prot);
161 else
162 error = mprotect_fixup_start(vma, end, newflags, prot);
163 else if (end == vma->vm_end)
164 error = mprotect_fixup_end(vma, start, newflags, prot);
165 else
166 error = mprotect_fixup_middle(vma, start, end, newflags, prot);
167
168 if (error)
169 return error;
170
171 change_protection(start, end, prot);
172 return 0;
173 }
174
175 asmlinkage int sys_mprotect(unsigned long start, size_t len, unsigned long prot)
176 {
177 unsigned long end, tmp;
178 struct vm_area_struct * vma, * next;
179 int error;
180
181 if (start & ~PAGE_MASK)
182 return -EINVAL;
183 len = (len + ~PAGE_MASK) & PAGE_MASK;
184 end = start + len;
185 if (end < start)
186 return -EINVAL;
187 if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC))
188 return -EINVAL;
189 if (end == start)
190 return 0;
191 for (vma = current->mm->mmap ; ; vma = vma->vm_next) {
192 if (!vma)
193 return -EFAULT;
194 if (vma->vm_end > start)
195 break;
196 }
197 if (vma->vm_start > start)
198 return -EFAULT;
199
200 for ( ; ; ) {
201 unsigned int newflags;
202
203
204
205 newflags = prot | (vma->vm_flags & ~(PROT_READ | PROT_WRITE | PROT_EXEC));
206 if ((newflags & ~(newflags >> 4)) & 0xf) {
207 error = -EACCES;
208 break;
209 }
210
211 if (vma->vm_end >= end) {
212 error = mprotect_fixup(vma, start, end, newflags);
213 break;
214 }
215
216 tmp = vma->vm_end;
217 next = vma->vm_next;
218 error = mprotect_fixup(vma, start, tmp, newflags);
219 if (error)
220 break;
221 start = tmp;
222 vma = next;
223 if (!vma || vma->vm_start != start) {
224 error = -EFAULT;
225 break;
226 }
227 }
228 merge_segments(current->mm->mmap);
229 return error;
230 }