This source file includes following definitions.
- change_protection
- mprotect_fixup_all
- mprotect_fixup_start
- mprotect_fixup_end
- mprotect_fixup_middle
- mprotect_fixup
- sys_mprotect
1
2
3
4
5
6 #include <linux/stat.h>
7 #include <linux/sched.h>
8 #include <linux/kernel.h>
9 #include <linux/mm.h>
10 #include <linux/shm.h>
11 #include <linux/errno.h>
12 #include <linux/mman.h>
13 #include <linux/string.h>
14 #include <linux/malloc.h>
15
16 #include <asm/segment.h>
17 #include <asm/system.h>
18
19 #define CHG_MASK (PAGE_MASK | PAGE_ACCESSED | PAGE_DIRTY | PAGE_PWT | PAGE_PCD)
20
21 static void change_protection(unsigned long start, unsigned long end, int prot)
22 {
23 unsigned long *page_table, *dir;
24 unsigned long page, offset;
25 int nr;
26
27 dir = PAGE_DIR_OFFSET(current->tss.cr3, start);
28 offset = (start >> PAGE_SHIFT) & (PTRS_PER_PAGE-1);
29 nr = (end - start) >> PAGE_SHIFT;
30 while (nr > 0) {
31 page = *dir;
32 dir++;
33 if (!(page & PAGE_PRESENT)) {
34 nr = nr - PTRS_PER_PAGE + offset;
35 offset = 0;
36 continue;
37 }
38 page_table = offset + (unsigned long *) (page & PAGE_MASK);
39 offset = PTRS_PER_PAGE - offset;
40 if (offset > nr)
41 offset = nr;
42 nr = nr - offset;
43 do {
44 page = *page_table;
45 if (page & PAGE_PRESENT)
46 *page_table = (page & CHG_MASK) | prot;
47 ++page_table;
48 } while (--offset);
49 }
50 return;
51 }
52
53 static inline int mprotect_fixup_all(struct vm_area_struct * vma,
54 int newflags, int prot)
55 {
56 vma->vm_flags = newflags;
57 vma->vm_page_prot = prot;
58 merge_segments(current->mm->mmap);
59 return 0;
60 }
61
62 static inline int mprotect_fixup_start(struct vm_area_struct * vma,
63 unsigned long end,
64 int newflags, int prot)
65 {
66 struct vm_area_struct * new;
67
68 new = kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
69 if (!new)
70 return -ENOMEM;
71 *new = *vma;
72 vma->vm_start = end;
73 new->vm_end = end;
74 vma->vm_offset += vma->vm_start - new->vm_start;
75 new->vm_flags = newflags;
76 new->vm_page_prot = prot;
77 if (new->vm_inode)
78 new->vm_inode->i_count++;
79 insert_vm_struct(current, new);
80 merge_segments(current->mm->mmap);
81 return 0;
82 }
83
84 static inline int mprotect_fixup_end(struct vm_area_struct * vma,
85 unsigned long start,
86 int newflags, int prot)
87 {
88 struct vm_area_struct * new;
89
90 new = kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
91 if (!new)
92 return -ENOMEM;
93 *new = *vma;
94 vma->vm_end = start;
95 new->vm_start = start;
96 new->vm_offset += new->vm_start - vma->vm_start;
97 new->vm_flags = newflags;
98 new->vm_page_prot = prot;
99 if (new->vm_inode)
100 new->vm_inode->i_count++;
101 insert_vm_struct(current, new);
102 merge_segments(current->mm->mmap);
103 return 0;
104 }
105
106 static inline int mprotect_fixup_middle(struct vm_area_struct * vma,
107 unsigned long start, unsigned long end,
108 int newflags, int prot)
109 {
110 int error;
111 unsigned long tmpflags, tmpprot;
112
113 tmpflags = vma->vm_flags;
114 tmpprot = vma->vm_page_prot;
115 vma->vm_flags = newflags;
116 vma->vm_page_prot = prot;
117 error = mprotect_fixup_end(vma, end, tmpflags, tmpprot);
118 if (!error)
119 error = mprotect_fixup_start(vma, start, tmpflags, tmpprot);
120 return error;
121 }
122
123 static int mprotect_fixup(struct vm_area_struct * vma,
124 unsigned long start, unsigned long end, unsigned int newflags)
125 {
126 int prot, error;
127
128 if (newflags == vma->vm_flags)
129 return 0;
130 prot = PAGE_PRESENT;
131 if (newflags & (VM_READ | VM_EXEC))
132 prot |= PAGE_READONLY;
133 if (newflags & VM_WRITE)
134 if (newflags & VM_SHARED)
135 prot |= PAGE_SHARED;
136 else
137 prot |= PAGE_COPY;
138
139 if (start == vma->vm_start)
140 if (end == vma->vm_end)
141 error = mprotect_fixup_all(vma, newflags, prot);
142 else
143 error = mprotect_fixup_start(vma, end, newflags, prot);
144 else if (end == vma->vm_end)
145 error = mprotect_fixup_end(vma, start, newflags, prot);
146 else
147 error = mprotect_fixup_middle(vma, start, end, newflags, prot);
148
149 if (error)
150 return error;
151
152 change_protection(start, end, prot);
153 return 0;
154 }
155
156 asmlinkage int sys_mprotect(unsigned long start, size_t len, unsigned long prot)
157 {
158 unsigned long end;
159 struct vm_area_struct * vma;
160
161 if (start & ~PAGE_MASK)
162 return -EINVAL;
163 len = (len + ~PAGE_MASK) & PAGE_MASK;
164 end = start + len;
165 if (end < start)
166 return -EINVAL;
167 if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC))
168 return -EINVAL;
169 if (end == start)
170 return 0;
171 for (vma = current->mm->mmap ; ; vma = vma->vm_next) {
172 if (!vma)
173 return -EFAULT;
174 if (vma->vm_end > start)
175 break;
176 }
177 if (vma->vm_start > start)
178 return -EFAULT;
179
180 for ( ; ; ) {
181 int error;
182 unsigned int newflags;
183
184 newflags = prot | (vma->vm_flags & ~(PROT_READ | PROT_WRITE | PROT_EXEC));
185 if ((newflags & ~(newflags >> 4)) & 0xf)
186 return -EACCES;
187
188 if (vma->vm_end >= end)
189 return mprotect_fixup(vma, start, end, newflags);
190
191 error = mprotect_fixup(vma, start, vma->vm_end, newflags);
192 if (error)
193 return error;
194 start = vma->vm_end;
195 vma = vma->vm_next;
196 if (!vma || vma->vm_start != start)
197 return -EFAULT;
198 }
199 }