This source file includes following definitions.
- change_pte_range
- change_pmd_range
- change_protection
- mprotect_fixup_all
- mprotect_fixup_start
- mprotect_fixup_end
- mprotect_fixup_middle
- mprotect_fixup
- sys_mprotect
1
2
3
4
5
6 #include <linux/stat.h>
7 #include <linux/sched.h>
8 #include <linux/kernel.h>
9 #include <linux/mm.h>
10 #include <linux/shm.h>
11 #include <linux/errno.h>
12 #include <linux/mman.h>
13 #include <linux/string.h>
14 #include <linux/malloc.h>
15
16 #include <asm/segment.h>
17 #include <asm/system.h>
18 #include <asm/pgtable.h>
19
20 static inline void change_pte_range(pmd_t * pmd, unsigned long address,
21 unsigned long size, pgprot_t newprot)
22 {
23 pte_t * pte;
24 unsigned long end;
25
26 if (pmd_none(*pmd))
27 return;
28 if (pmd_bad(*pmd)) {
29 printk("change_pte_range: bad pmd (%08lx)\n", pmd_val(*pmd));
30 pmd_clear(pmd);
31 return;
32 }
33 pte = pte_offset(pmd, address);
34 address &= ~PMD_MASK;
35 end = address + size;
36 if (end > PMD_SIZE)
37 end = PMD_SIZE;
38 do {
39 pte_t entry = *pte;
40 if (pte_present(entry))
41 set_pte(pte, pte_modify(entry, newprot));
42 address += PAGE_SIZE;
43 pte++;
44 } while (address < end);
45 }
46
47 static inline void change_pmd_range(pgd_t * pgd, unsigned long address,
48 unsigned long size, pgprot_t newprot)
49 {
50 pmd_t * pmd;
51 unsigned long end;
52
53 if (pgd_none(*pgd))
54 return;
55 if (pgd_bad(*pgd)) {
56 printk("change_pmd_range: bad pgd (%08lx)\n", pgd_val(*pgd));
57 pgd_clear(pgd);
58 return;
59 }
60 pmd = pmd_offset(pgd, address);
61 address &= ~PGDIR_MASK;
62 end = address + size;
63 if (end > PGDIR_SIZE)
64 end = PGDIR_SIZE;
65 do {
66 change_pte_range(pmd, address, end - address, newprot);
67 address = (address + PMD_SIZE) & PMD_MASK;
68 pmd++;
69 } while (address < end);
70 }
71
72 static void change_protection(unsigned long start, unsigned long end, pgprot_t newprot)
73 {
74 pgd_t *dir;
75 unsigned long beg = start;
76
77 dir = pgd_offset(current->mm, start);
78 flush_cache_range(current->mm, beg, end);
79 while (start < end) {
80 change_pmd_range(dir, start, end - start, newprot);
81 start = (start + PGDIR_SIZE) & PGDIR_MASK;
82 dir++;
83 }
84 flush_tlb_range(current->mm, beg, end);
85 return;
86 }
87
88 static inline int mprotect_fixup_all(struct vm_area_struct * vma,
89 int newflags, pgprot_t prot)
90 {
91 vma->vm_flags = newflags;
92 vma->vm_page_prot = prot;
93 return 0;
94 }
95
96 static inline int mprotect_fixup_start(struct vm_area_struct * vma,
97 unsigned long end,
98 int newflags, pgprot_t prot)
99 {
100 struct vm_area_struct * n;
101
102 n = (struct vm_area_struct *) kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
103 if (!n)
104 return -ENOMEM;
105 *n = *vma;
106 vma->vm_start = end;
107 n->vm_end = end;
108 vma->vm_offset += vma->vm_start - n->vm_start;
109 n->vm_flags = newflags;
110 n->vm_page_prot = prot;
111 if (n->vm_inode)
112 n->vm_inode->i_count++;
113 if (n->vm_ops && n->vm_ops->open)
114 n->vm_ops->open(n);
115 insert_vm_struct(current, n);
116 return 0;
117 }
118
119 static inline int mprotect_fixup_end(struct vm_area_struct * vma,
120 unsigned long start,
121 int newflags, pgprot_t prot)
122 {
123 struct vm_area_struct * n;
124
125 n = (struct vm_area_struct *) kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
126 if (!n)
127 return -ENOMEM;
128 *n = *vma;
129 vma->vm_end = start;
130 n->vm_start = start;
131 n->vm_offset += n->vm_start - vma->vm_start;
132 n->vm_flags = newflags;
133 n->vm_page_prot = prot;
134 if (n->vm_inode)
135 n->vm_inode->i_count++;
136 if (n->vm_ops && n->vm_ops->open)
137 n->vm_ops->open(n);
138 insert_vm_struct(current, n);
139 return 0;
140 }
141
142 static inline int mprotect_fixup_middle(struct vm_area_struct * vma,
143 unsigned long start, unsigned long end,
144 int newflags, pgprot_t prot)
145 {
146 struct vm_area_struct * left, * right;
147
148 left = (struct vm_area_struct *) kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
149 if (!left)
150 return -ENOMEM;
151 right = (struct vm_area_struct *) kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
152 if (!right) {
153 kfree(left);
154 return -ENOMEM;
155 }
156 *left = *vma;
157 *right = *vma;
158 left->vm_end = start;
159 vma->vm_start = start;
160 vma->vm_end = end;
161 right->vm_start = end;
162 vma->vm_offset += vma->vm_start - left->vm_start;
163 right->vm_offset += right->vm_start - left->vm_start;
164 vma->vm_flags = newflags;
165 vma->vm_page_prot = prot;
166 if (vma->vm_inode)
167 vma->vm_inode->i_count += 2;
168 if (vma->vm_ops && vma->vm_ops->open) {
169 vma->vm_ops->open(left);
170 vma->vm_ops->open(right);
171 }
172 insert_vm_struct(current, left);
173 insert_vm_struct(current, right);
174 return 0;
175 }
176
177 static int mprotect_fixup(struct vm_area_struct * vma,
178 unsigned long start, unsigned long end, unsigned int newflags)
179 {
180 pgprot_t newprot;
181 int error;
182
183 if (newflags == vma->vm_flags)
184 return 0;
185 newprot = protection_map[newflags & 0xf];
186 if (start == vma->vm_start)
187 if (end == vma->vm_end)
188 error = mprotect_fixup_all(vma, newflags, newprot);
189 else
190 error = mprotect_fixup_start(vma, end, newflags, newprot);
191 else if (end == vma->vm_end)
192 error = mprotect_fixup_end(vma, start, newflags, newprot);
193 else
194 error = mprotect_fixup_middle(vma, start, end, newflags, newprot);
195
196 if (error)
197 return error;
198
199 change_protection(start, end, newprot);
200 return 0;
201 }
202
203 asmlinkage int sys_mprotect(unsigned long start, size_t len, unsigned long prot)
204 {
205 unsigned long nstart, end, tmp;
206 struct vm_area_struct * vma, * next;
207 int error;
208
209 if (start & ~PAGE_MASK)
210 return -EINVAL;
211 len = (len + ~PAGE_MASK) & PAGE_MASK;
212 end = start + len;
213 if (end < start)
214 return -EINVAL;
215 if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC))
216 return -EINVAL;
217 if (end == start)
218 return 0;
219 vma = find_vma(current, start);
220 if (!vma || vma->vm_start > start)
221 return -EFAULT;
222
223 for (nstart = start ; ; ) {
224 unsigned int newflags;
225
226
227
228 newflags = prot | (vma->vm_flags & ~(PROT_READ | PROT_WRITE | PROT_EXEC));
229 if ((newflags & ~(newflags >> 4)) & 0xf) {
230 error = -EACCES;
231 break;
232 }
233
234 if (vma->vm_end >= end) {
235 error = mprotect_fixup(vma, nstart, end, newflags);
236 break;
237 }
238
239 tmp = vma->vm_end;
240 next = vma->vm_next;
241 error = mprotect_fixup(vma, nstart, tmp, newflags);
242 if (error)
243 break;
244 nstart = tmp;
245 vma = next;
246 if (!vma || vma->vm_start != nstart) {
247 error = -EFAULT;
248 break;
249 }
250 }
251 merge_segments(current, start, end);
252 return error;
253 }