This source file includes following definitions.
- change_protection
- mprotect_fixup_all
- mprotect_fixup_start
- mprotect_fixup_end
- mprotect_fixup_middle
- mprotect_fixup
- sys_mprotect
1
2
3
4
5
6 #include <linux/stat.h>
7 #include <linux/sched.h>
8 #include <linux/kernel.h>
9 #include <linux/mm.h>
10 #include <linux/shm.h>
11 #include <linux/errno.h>
12 #include <linux/mman.h>
13 #include <linux/string.h>
14 #include <linux/malloc.h>
15
16 #include <asm/segment.h>
17 #include <asm/system.h>
18 #include <asm/pgtable.h>
19
20 static void change_protection(unsigned long start, unsigned long end, pgprot_t newprot)
21 {
22 pgd_t *dir;
23 pte_t *page_table, entry;
24 unsigned long offset;
25 int nr;
26
27 dir = PAGE_DIR_OFFSET(current, start);
28 offset = (start >> PAGE_SHIFT) & (PTRS_PER_PAGE-1);
29 nr = (end - start) >> PAGE_SHIFT;
30 while (nr > 0) {
31 if (pgd_none(*dir)) {
32 dir++;
33 nr = nr - PTRS_PER_PAGE + offset;
34 offset = 0;
35 continue;
36 }
37 if (pgd_bad(*dir)) {
38 printk("Bad page dir entry %08lx\n", pgd_val(*dir));
39 pgd_clear(dir);
40 dir++;
41 nr = nr - PTRS_PER_PAGE + offset;
42 offset = 0;
43 continue;
44 }
45 page_table = offset + (pte_t *) pgd_page(*dir);
46 offset = PTRS_PER_PAGE - offset;
47 if (offset > nr)
48 offset = nr;
49 nr = nr - offset;
50 do {
51 entry = *page_table;
52 if (pte_present(entry))
53 *page_table = pte_modify(entry, newprot);
54 ++page_table;
55 } while (--offset);
56 }
57 return;
58 }
59
60 static inline int mprotect_fixup_all(struct vm_area_struct * vma,
61 int newflags, pgprot_t prot)
62 {
63 vma->vm_flags = newflags;
64 vma->vm_page_prot = prot;
65 return 0;
66 }
67
68 static inline int mprotect_fixup_start(struct vm_area_struct * vma,
69 unsigned long end,
70 int newflags, pgprot_t prot)
71 {
72 struct vm_area_struct * n;
73
74 n = (struct vm_area_struct *) kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
75 if (!n)
76 return -ENOMEM;
77 *n = *vma;
78 vma->vm_start = end;
79 n->vm_end = end;
80 vma->vm_offset += vma->vm_start - n->vm_start;
81 n->vm_flags = newflags;
82 n->vm_page_prot = prot;
83 if (n->vm_inode)
84 n->vm_inode->i_count++;
85 if (n->vm_ops && n->vm_ops->open)
86 n->vm_ops->open(n);
87 insert_vm_struct(current, n);
88 return 0;
89 }
90
91 static inline int mprotect_fixup_end(struct vm_area_struct * vma,
92 unsigned long start,
93 int newflags, pgprot_t prot)
94 {
95 struct vm_area_struct * n;
96
97 n = (struct vm_area_struct *) kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
98 if (!n)
99 return -ENOMEM;
100 *n = *vma;
101 vma->vm_end = start;
102 n->vm_start = start;
103 n->vm_offset += n->vm_start - vma->vm_start;
104 n->vm_flags = newflags;
105 n->vm_page_prot = prot;
106 if (n->vm_inode)
107 n->vm_inode->i_count++;
108 if (n->vm_ops && n->vm_ops->open)
109 n->vm_ops->open(n);
110 insert_vm_struct(current, n);
111 return 0;
112 }
113
114 static inline int mprotect_fixup_middle(struct vm_area_struct * vma,
115 unsigned long start, unsigned long end,
116 int newflags, pgprot_t prot)
117 {
118 struct vm_area_struct * left, * right;
119
120 left = (struct vm_area_struct *) kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
121 if (!left)
122 return -ENOMEM;
123 right = (struct vm_area_struct *) kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
124 if (!right) {
125 kfree(left);
126 return -ENOMEM;
127 }
128 *left = *vma;
129 *right = *vma;
130 left->vm_end = start;
131 vma->vm_start = start;
132 vma->vm_end = end;
133 right->vm_start = end;
134 vma->vm_offset += vma->vm_start - left->vm_start;
135 right->vm_offset += right->vm_start - left->vm_start;
136 vma->vm_flags = newflags;
137 vma->vm_page_prot = prot;
138 if (vma->vm_inode)
139 vma->vm_inode->i_count += 2;
140 if (vma->vm_ops && vma->vm_ops->open) {
141 vma->vm_ops->open(left);
142 vma->vm_ops->open(right);
143 }
144 insert_vm_struct(current, left);
145 insert_vm_struct(current, right);
146 return 0;
147 }
148
149 static int mprotect_fixup(struct vm_area_struct * vma,
150 unsigned long start, unsigned long end, unsigned int newflags)
151 {
152 pgprot_t newprot;
153 int error;
154
155 if (newflags == vma->vm_flags)
156 return 0;
157 newprot = protection_map[newflags & 0xf];
158 if (start == vma->vm_start)
159 if (end == vma->vm_end)
160 error = mprotect_fixup_all(vma, newflags, newprot);
161 else
162 error = mprotect_fixup_start(vma, end, newflags, newprot);
163 else if (end == vma->vm_end)
164 error = mprotect_fixup_end(vma, start, newflags, newprot);
165 else
166 error = mprotect_fixup_middle(vma, start, end, newflags, newprot);
167
168 if (error)
169 return error;
170
171 change_protection(start, end, newprot);
172 return 0;
173 }
174
175 asmlinkage int sys_mprotect(unsigned long start, size_t len, unsigned long prot)
176 {
177 unsigned long nstart, end, tmp;
178 struct vm_area_struct * vma, * next;
179 int error;
180
181 if (start & ~PAGE_MASK)
182 return -EINVAL;
183 len = (len + ~PAGE_MASK) & PAGE_MASK;
184 end = start + len;
185 if (end < start)
186 return -EINVAL;
187 if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC))
188 return -EINVAL;
189 if (end == start)
190 return 0;
191 vma = find_vma(current, start);
192 if (!vma || vma->vm_start > start)
193 return -EFAULT;
194
195 for (nstart = start ; ; ) {
196 unsigned int newflags;
197
198
199
200 newflags = prot | (vma->vm_flags & ~(PROT_READ | PROT_WRITE | PROT_EXEC));
201 if ((newflags & ~(newflags >> 4)) & 0xf) {
202 error = -EACCES;
203 break;
204 }
205
206 if (vma->vm_end >= end) {
207 error = mprotect_fixup(vma, nstart, end, newflags);
208 break;
209 }
210
211 tmp = vma->vm_end;
212 next = vma->vm_next;
213 error = mprotect_fixup(vma, nstart, tmp, newflags);
214 if (error)
215 break;
216 nstart = tmp;
217 vma = next;
218 if (!vma || vma->vm_start != nstart) {
219 error = -EFAULT;
220 break;
221 }
222 }
223 merge_segments(current, start, end);
224 return error;
225 }