This source file includes following definitions.
- get_one_pte
- alloc_one_pte
- copy_one_pte
- move_one_page
- move_page_tables
- move_vma
- sys_mremap
1
2
3
4
5
6
7 #include <linux/stat.h>
8 #include <linux/sched.h>
9 #include <linux/kernel.h>
10 #include <linux/mm.h>
11 #include <linux/shm.h>
12 #include <linux/errno.h>
13 #include <linux/mman.h>
14 #include <linux/string.h>
15 #include <linux/malloc.h>
16 #include <linux/swap.h>
17
18 #include <asm/segment.h>
19 #include <asm/system.h>
20 #include <asm/pgtable.h>
21
22 static inline pte_t *get_one_pte(struct mm_struct *mm, unsigned long addr)
23 {
24 pgd_t * pgd;
25 pmd_t * pmd;
26 pte_t * pte = NULL;
27
28 pgd = pgd_offset(mm, addr);
29 if (pgd_none(*pgd))
30 goto end;
31 if (pgd_bad(*pgd)) {
32 printk("move_one_page: bad source pgd (%08lx)\n", pgd_val(*pgd));
33 pgd_clear(pgd);
34 goto end;
35 }
36
37 pmd = pmd_offset(pgd, addr);
38 if (pmd_none(*pmd))
39 goto end;
40 if (pmd_bad(*pmd)) {
41 printk("move_one_page: bad source pmd (%08lx)\n", pmd_val(*pmd));
42 pmd_clear(pmd);
43 goto end;
44 }
45
46 pte = pte_offset(pmd, addr);
47 if (pte_none(*pte))
48 pte = NULL;
49 end:
50 return pte;
51 }
52
53 static inline pte_t *alloc_one_pte(struct mm_struct *mm, unsigned long addr)
54 {
55 pmd_t * pmd;
56 pte_t * pte = NULL;
57
58 pmd = pmd_alloc(pgd_offset(mm, addr), addr);
59 if (pmd)
60 pte = pte_alloc(pmd, addr);
61 return pte;
62 }
63
64 static inline int copy_one_pte(pte_t * src, pte_t * dst)
65 {
66 int error = 0;
67 pte_t pte = *src;
68
69 if (!pte_none(pte)) {
70 error++;
71 if (dst) {
72 pte_clear(src);
73 set_pte(dst, pte);
74 error--;
75 }
76 }
77 return error;
78 }
79
80 static int move_one_page(struct mm_struct *mm, unsigned long old_addr, unsigned long new_addr)
81 {
82 int error = 0;
83 pte_t * src;
84
85 src = get_one_pte(mm, old_addr);
86 if (src)
87 error = copy_one_pte(src, alloc_one_pte(mm, new_addr));
88 return error;
89 }
90
91 static int move_page_tables(struct mm_struct * mm,
92 unsigned long new_addr, unsigned long old_addr, unsigned long len)
93 {
94 unsigned long offset = len;
95
96 invalidate_range(mm, old_addr, old_addr + len);
97
98
99
100
101
102
103 while (offset) {
104 offset -= PAGE_SIZE;
105 if (move_one_page(mm, old_addr + offset, new_addr + offset))
106 goto oops_we_failed;
107 }
108 return 0;
109
110
111
112
113
114
115
116
117 oops_we_failed:
118 while ((offset += PAGE_SIZE) < len)
119 move_one_page(mm, new_addr + offset, old_addr + offset);
120 invalidate_range(mm, new_addr, new_addr + len);
121 zap_page_range(mm, new_addr, new_addr + len);
122 return -1;
123 }
124
125 static inline unsigned long move_vma(struct vm_area_struct * vma,
126 unsigned long addr, unsigned long old_len, unsigned long new_len)
127 {
128 struct vm_area_struct * new_vma;
129
130 new_vma = (struct vm_area_struct *)
131 kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
132 if (new_vma) {
133 unsigned long new_addr = get_unmapped_area(addr, new_len);
134
135 if (new_addr && !move_page_tables(current->mm, new_addr, addr, old_len)) {
136 *new_vma = *vma;
137 new_vma->vm_start = new_addr;
138 new_vma->vm_end = new_addr+new_len;
139 new_vma->vm_offset = vma->vm_offset + (addr - vma->vm_start);
140 if (new_vma->vm_inode)
141 new_vma->vm_inode->i_count++;
142 if (new_vma->vm_ops && new_vma->vm_ops->open)
143 new_vma->vm_ops->open(new_vma);
144 insert_vm_struct(current, new_vma);
145 merge_segments(current, new_vma->vm_start, new_vma->vm_end);
146 do_munmap(addr, old_len);
147 return new_addr;
148 }
149 kfree(new_vma);
150 }
151 return -ENOMEM;
152 }
153
154
155
156
157
158 asmlinkage unsigned long sys_mremap(unsigned long addr,
159 unsigned long old_len, unsigned long new_len,
160 int may_move)
161 {
162 struct vm_area_struct *vma;
163
164 if (addr & ~PAGE_MASK)
165 return -EINVAL;
166 old_len = PAGE_ALIGN(old_len);
167 new_len = PAGE_ALIGN(new_len);
168 if (old_len == new_len)
169 return addr;
170
171
172
173
174
175 if (old_len > new_len) {
176 do_munmap(addr+new_len, old_len - new_len);
177 return addr;
178 }
179
180
181
182
183 vma = find_vma(current, addr);
184 if (!vma || vma->vm_start > addr)
185 return -EFAULT;
186
187 if (old_len > vma->vm_end - addr)
188 return -EFAULT;
189 if (vma->vm_flags & VM_LOCKED) {
190 unsigned long locked = current->mm->locked_vm << PAGE_SHIFT;
191 locked += new_len - old_len;
192 if (locked > current->rlim[RLIMIT_MEMLOCK].rlim_cur)
193 return -EAGAIN;
194 }
195
196
197 if (old_len == vma->vm_end - addr) {
198 unsigned long max_addr = TASK_SIZE;
199 if (vma->vm_next)
200 max_addr = vma->vm_next->vm_start;
201
202 if (max_addr - addr >= new_len) {
203 int pages = (new_len - old_len) >> PAGE_SHIFT;
204 vma->vm_end = addr + new_len;
205 current->mm->total_vm += pages;
206 if (vma->vm_flags & VM_LOCKED)
207 current->mm->locked_vm += pages;
208 return addr;
209 }
210 }
211
212
213
214
215
216 if (!may_move)
217 return -ENOMEM;
218 return move_vma(vma, addr, old_len, new_len);
219 }