This source file includes following definitions.
- change_protection
- mprotect_fixup_all
- mprotect_fixup_start
- mprotect_fixup_end
- mprotect_fixup_middle
- mprotect_fixup
- sys_mprotect
1
2
3
4
5
6 #include <linux/stat.h>
7 #include <linux/sched.h>
8 #include <linux/kernel.h>
9 #include <linux/mm.h>
10 #include <linux/shm.h>
11 #include <linux/errno.h>
12 #include <linux/mman.h>
13 #include <linux/string.h>
14 #include <linux/malloc.h>
15
16 #include <asm/segment.h>
17 #include <asm/system.h>
18
19 #define CHG_MASK (PAGE_MASK | PAGE_ACCESSED | PAGE_DIRTY | PAGE_PWT | PAGE_PCD)
20
21 static void change_protection(unsigned long start, unsigned long end, int prot)
22 {
23 unsigned long *page_table, *dir;
24 unsigned long page, offset;
25 int nr;
26
27 dir = PAGE_DIR_OFFSET(current->tss.cr3, start);
28 offset = (start >> PAGE_SHIFT) & (PTRS_PER_PAGE-1);
29 nr = (end - start) >> PAGE_SHIFT;
30 while (nr > 0) {
31 page = *dir;
32 dir++;
33 if (!(page & PAGE_PRESENT)) {
34 nr = nr - PTRS_PER_PAGE + offset;
35 offset = 0;
36 continue;
37 }
38 page_table = offset + (unsigned long *) (page & PAGE_MASK);
39 offset = PTRS_PER_PAGE - offset;
40 if (offset > nr)
41 offset = nr;
42 nr = nr - offset;
43 do {
44 page = *page_table;
45 if (page & PAGE_PRESENT)
46 *page_table = (page & CHG_MASK) | prot;
47 ++page_table;
48 } while (--offset);
49 }
50 return;
51 }
52
53 static inline int mprotect_fixup_all(struct vm_area_struct * vma,
54 int newflags, int prot)
55 {
56 vma->vm_flags = newflags;
57 vma->vm_page_prot = prot;
58 merge_segments(current->mm->mmap);
59 return 0;
60 }
61
62 static inline int mprotect_fixup_start(struct vm_area_struct * vma,
63 unsigned long end,
64 int newflags, int prot)
65 {
66 struct vm_area_struct * n;
67
68 n = (struct vm_area_struct *) kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
69 if (!n)
70 return -ENOMEM;
71 *n = *vma;
72 vma->vm_start = end;
73 n->vm_end = end;
74 vma->vm_offset += vma->vm_start - n->vm_start;
75 n->vm_flags = newflags;
76 n->vm_page_prot = prot;
77 if (n->vm_inode)
78 n->vm_inode->i_count++;
79 insert_vm_struct(current, n);
80 merge_segments(current->mm->mmap);
81 return 0;
82 }
83
84 static inline int mprotect_fixup_end(struct vm_area_struct * vma,
85 unsigned long start,
86 int newflags, int prot)
87 {
88 struct vm_area_struct * n;
89
90 n = (struct vm_area_struct *) kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
91 if (!n)
92 return -ENOMEM;
93 *n = *vma;
94 vma->vm_end = start;
95 n->vm_start = start;
96 n->vm_offset += n->vm_start - vma->vm_start;
97 n->vm_flags = newflags;
98 n->vm_page_prot = prot;
99 if (n->vm_inode)
100 n->vm_inode->i_count++;
101 insert_vm_struct(current, n);
102 merge_segments(current->mm->mmap);
103 return 0;
104 }
105
106 static inline int mprotect_fixup_middle(struct vm_area_struct * vma,
107 unsigned long start, unsigned long end,
108 int newflags, int prot)
109 {
110 struct vm_area_struct * left, * right;
111
112 left = (struct vm_area_struct *) kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
113 if (!left)
114 return -ENOMEM;
115 right = (struct vm_area_struct *) kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
116 if (!right) {
117 kfree(left);
118 return -ENOMEM;
119 }
120 *left = *vma;
121 *right = *vma;
122 left->vm_end = start;
123 vma->vm_start = start;
124 vma->vm_end = end;
125 right->vm_start = end;
126 vma->vm_offset += vma->vm_start - left->vm_start;
127 right->vm_offset += right->vm_start - left->vm_start;
128 vma->vm_flags = newflags;
129 vma->vm_page_prot = prot;
130 if (vma->vm_inode)
131 vma->vm_inode->i_count += 2;
132 insert_vm_struct(current, left);
133 insert_vm_struct(current, right);
134 merge_segments(current->mm->mmap);
135 return 0;
136 }
137
138 static int mprotect_fixup(struct vm_area_struct * vma,
139 unsigned long start, unsigned long end, unsigned int newflags)
140 {
141 int prot, error;
142
143 if (newflags == vma->vm_flags)
144 return 0;
145 prot = PAGE_PRESENT;
146 if (newflags & (VM_READ | VM_EXEC))
147 prot |= PAGE_READONLY;
148 if (newflags & VM_WRITE)
149 if (newflags & VM_SHARED)
150 prot |= PAGE_SHARED;
151 else
152 prot |= PAGE_COPY;
153
154 if (start == vma->vm_start)
155 if (end == vma->vm_end)
156 error = mprotect_fixup_all(vma, newflags, prot);
157 else
158 error = mprotect_fixup_start(vma, end, newflags, prot);
159 else if (end == vma->vm_end)
160 error = mprotect_fixup_end(vma, start, newflags, prot);
161 else
162 error = mprotect_fixup_middle(vma, start, end, newflags, prot);
163
164 if (error)
165 return error;
166
167 change_protection(start, end, prot);
168 return 0;
169 }
170
171 asmlinkage int sys_mprotect(unsigned long start, size_t len, unsigned long prot)
172 {
173 unsigned long end, tmp;
174 struct vm_area_struct * vma;
175
176 if (start & ~PAGE_MASK)
177 return -EINVAL;
178 len = (len + ~PAGE_MASK) & PAGE_MASK;
179 end = start + len;
180 if (end < start)
181 return -EINVAL;
182 if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC))
183 return -EINVAL;
184 if (end == start)
185 return 0;
186 for (vma = current->mm->mmap ; ; vma = vma->vm_next) {
187 if (!vma)
188 return -EFAULT;
189 if (vma->vm_end > start)
190 break;
191 }
192 if (vma->vm_start > start)
193 return -EFAULT;
194
195 for ( ; ; ) {
196 int error;
197 unsigned int newflags;
198
199 newflags = prot | (vma->vm_flags & ~(PROT_READ | PROT_WRITE | PROT_EXEC));
200 if ((newflags & ~(newflags >> 4)) & 0xf)
201 return -EACCES;
202
203 if (vma->vm_end >= end)
204 return mprotect_fixup(vma, start, end, newflags);
205
206 tmp = vma->vm_end;
207 error = mprotect_fixup(vma, start, tmp, newflags);
208 if (error)
209 return error;
210 start = tmp;
211 if (vma->vm_end <= start) {
212 vma = vma->vm_next;
213 if (!vma || vma->vm_start != start)
214 return -EFAULT;
215 }
216 }
217 }