This source file includes following definitions.
- change_protection
- mprotect_fixup_all
- mprotect_fixup_start
- mprotect_fixup_end
- mprotect_fixup_middle
- mprotect_fixup
- sys_mprotect
1
2
3
4
5
6 #include <linux/stat.h>
7 #include <linux/sched.h>
8 #include <linux/kernel.h>
9 #include <linux/mm.h>
10 #include <linux/shm.h>
11 #include <linux/errno.h>
12 #include <linux/mman.h>
13 #include <linux/string.h>
14 #include <linux/malloc.h>
15
16 #include <asm/segment.h>
17 #include <asm/system.h>
18
19 static void change_protection(unsigned long start, unsigned long end, pgprot_t newprot)
20 {
21 pgd_t *dir;
22 pte_t *page_table, entry;
23 unsigned long offset;
24 int nr;
25
26 dir = PAGE_DIR_OFFSET(current, start);
27 offset = (start >> PAGE_SHIFT) & (PTRS_PER_PAGE-1);
28 nr = (end - start) >> PAGE_SHIFT;
29 while (nr > 0) {
30 if (pgd_none(*dir)) {
31 dir++;
32 nr = nr - PTRS_PER_PAGE + offset;
33 offset = 0;
34 continue;
35 }
36 if (pgd_bad(*dir)) {
37 printk("Bad page dir entry %08lx\n", pgd_val(*dir));
38 pgd_clear(dir);
39 dir++;
40 nr = nr - PTRS_PER_PAGE + offset;
41 offset = 0;
42 continue;
43 }
44 page_table = offset + (pte_t *) pgd_page(*dir);
45 offset = PTRS_PER_PAGE - offset;
46 if (offset > nr)
47 offset = nr;
48 nr = nr - offset;
49 do {
50 entry = *page_table;
51 if (pte_present(entry))
52 *page_table = pte_modify(entry, newprot);
53 ++page_table;
54 } while (--offset);
55 }
56 return;
57 }
58
59 static inline int mprotect_fixup_all(struct vm_area_struct * vma,
60 int newflags, pgprot_t prot)
61 {
62 vma->vm_flags = newflags;
63 vma->vm_page_prot = prot;
64 return 0;
65 }
66
67 static inline int mprotect_fixup_start(struct vm_area_struct * vma,
68 unsigned long end,
69 int newflags, pgprot_t prot)
70 {
71 struct vm_area_struct * n;
72
73 n = (struct vm_area_struct *) kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
74 if (!n)
75 return -ENOMEM;
76 *n = *vma;
77 vma->vm_start = end;
78 n->vm_end = end;
79 vma->vm_offset += vma->vm_start - n->vm_start;
80 n->vm_flags = newflags;
81 n->vm_page_prot = prot;
82 if (n->vm_inode)
83 n->vm_inode->i_count++;
84 if (n->vm_ops && n->vm_ops->open)
85 n->vm_ops->open(n);
86 insert_vm_struct(current, n);
87 return 0;
88 }
89
90 static inline int mprotect_fixup_end(struct vm_area_struct * vma,
91 unsigned long start,
92 int newflags, pgprot_t prot)
93 {
94 struct vm_area_struct * n;
95
96 n = (struct vm_area_struct *) kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
97 if (!n)
98 return -ENOMEM;
99 *n = *vma;
100 vma->vm_end = start;
101 n->vm_start = start;
102 n->vm_offset += n->vm_start - vma->vm_start;
103 n->vm_flags = newflags;
104 n->vm_page_prot = prot;
105 if (n->vm_inode)
106 n->vm_inode->i_count++;
107 if (n->vm_ops && n->vm_ops->open)
108 n->vm_ops->open(n);
109 insert_vm_struct(current, n);
110 return 0;
111 }
112
113 static inline int mprotect_fixup_middle(struct vm_area_struct * vma,
114 unsigned long start, unsigned long end,
115 int newflags, pgprot_t prot)
116 {
117 struct vm_area_struct * left, * right;
118
119 left = (struct vm_area_struct *) kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
120 if (!left)
121 return -ENOMEM;
122 right = (struct vm_area_struct *) kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
123 if (!right) {
124 kfree(left);
125 return -ENOMEM;
126 }
127 *left = *vma;
128 *right = *vma;
129 left->vm_end = start;
130 vma->vm_start = start;
131 vma->vm_end = end;
132 right->vm_start = end;
133 vma->vm_offset += vma->vm_start - left->vm_start;
134 right->vm_offset += right->vm_start - left->vm_start;
135 vma->vm_flags = newflags;
136 vma->vm_page_prot = prot;
137 if (vma->vm_inode)
138 vma->vm_inode->i_count += 2;
139 if (vma->vm_ops && vma->vm_ops->open) {
140 vma->vm_ops->open(left);
141 vma->vm_ops->open(right);
142 }
143 insert_vm_struct(current, left);
144 insert_vm_struct(current, right);
145 return 0;
146 }
147
148 static int mprotect_fixup(struct vm_area_struct * vma,
149 unsigned long start, unsigned long end, unsigned int newflags)
150 {
151 pgprot_t newprot;
152 int error;
153
154 if (newflags == vma->vm_flags)
155 return 0;
156 newprot = protection_map[vma->vm_flags & 0xf];
157 if (start == vma->vm_start)
158 if (end == vma->vm_end)
159 error = mprotect_fixup_all(vma, newflags, newprot);
160 else
161 error = mprotect_fixup_start(vma, end, newflags, newprot);
162 else if (end == vma->vm_end)
163 error = mprotect_fixup_end(vma, start, newflags, newprot);
164 else
165 error = mprotect_fixup_middle(vma, start, end, newflags, newprot);
166
167 if (error)
168 return error;
169
170 change_protection(start, end, newprot);
171 return 0;
172 }
173
174 asmlinkage int sys_mprotect(unsigned long start, size_t len, unsigned long prot)
175 {
176 unsigned long nstart, end, tmp;
177 struct vm_area_struct * vma, * next;
178 int error;
179
180 if (start & ~PAGE_MASK)
181 return -EINVAL;
182 len = (len + ~PAGE_MASK) & PAGE_MASK;
183 end = start + len;
184 if (end < start)
185 return -EINVAL;
186 if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC))
187 return -EINVAL;
188 if (end == start)
189 return 0;
190 vma = find_vma(current, start);
191 if (!vma || vma->vm_start > start)
192 return -EFAULT;
193
194 for (nstart = start ; ; ) {
195 unsigned int newflags;
196
197
198
199 newflags = prot | (vma->vm_flags & ~(PROT_READ | PROT_WRITE | PROT_EXEC));
200 if ((newflags & ~(newflags >> 4)) & 0xf) {
201 error = -EACCES;
202 break;
203 }
204
205 if (vma->vm_end >= end) {
206 error = mprotect_fixup(vma, nstart, end, newflags);
207 break;
208 }
209
210 tmp = vma->vm_end;
211 next = vma->vm_next;
212 error = mprotect_fixup(vma, nstart, tmp, newflags);
213 if (error)
214 break;
215 nstart = tmp;
216 vma = next;
217 if (!vma || vma->vm_start != nstart) {
218 error = -EFAULT;
219 break;
220 }
221 }
222 merge_segments(current, start, end);
223 return error;
224 }