This source file includes following definitions.
- set_pgdir
- free_area_pte
- free_area_pmd
- free_area_pages
- alloc_area_pte
- alloc_area_pmd
- alloc_area_pages
- remap_area_pte
- remap_area_pmd
- remap_area_pages
- get_vm_area
- vfree
- vmalloc
- vremap
- vread
1
2
3
4
5
6
7 #include <asm/system.h>
8
9 #include <linux/signal.h>
10 #include <linux/sched.h>
11 #include <linux/head.h>
12 #include <linux/kernel.h>
13 #include <linux/errno.h>
14 #include <linux/types.h>
15 #include <linux/malloc.h>
16 #include <linux/mm.h>
17
18 #include <asm/segment.h>
19 #include <asm/pgtable.h>
20
21 struct vm_struct {
22 unsigned long flags;
23 void * addr;
24 unsigned long size;
25 struct vm_struct * next;
26 };
27
28 static struct vm_struct * vmlist = NULL;
29
30 static inline void set_pgdir(unsigned long address, pgd_t entry)
31 {
32 struct task_struct * p;
33
34 for_each_task(p)
35 *pgd_offset(p,address) = entry;
36 }
37
38 static inline void free_area_pte(pmd_t * pmd, unsigned long address, unsigned long size)
39 {
40 pte_t * pte;
41 unsigned long end;
42
43 if (pmd_none(*pmd))
44 return;
45 if (pmd_bad(*pmd)) {
46 printk("free_area_pte: bad pmd (%08lx)\n", pmd_val(*pmd));
47 pmd_clear(pmd);
48 return;
49 }
50 pte = pte_offset(pmd, address);
51 address &= ~PMD_MASK;
52 end = address + size;
53 if (end > PMD_SIZE)
54 end = PMD_SIZE;
55 while (address < end) {
56 pte_t page = *pte;
57 pte_clear(pte);
58 address += PAGE_SIZE;
59 pte++;
60 if (pte_none(page))
61 continue;
62 if (pte_present(page)) {
63 free_page(pte_page(page));
64 continue;
65 }
66 printk("Whee.. Swapped out page in kernel page table\n");
67 }
68 }
69
70 static inline void free_area_pmd(pgd_t * dir, unsigned long address, unsigned long size)
71 {
72 pmd_t * pmd;
73 unsigned long end;
74
75 if (pgd_none(*dir))
76 return;
77 if (pgd_bad(*dir)) {
78 printk("free_area_pmd: bad pgd (%08lx)\n", pgd_val(*dir));
79 pgd_clear(dir);
80 return;
81 }
82 pmd = pmd_offset(dir, address);
83 address &= ~PGDIR_MASK;
84 end = address + size;
85 if (end > PGDIR_SIZE)
86 end = PGDIR_SIZE;
87 while (address < end) {
88 free_area_pte(pmd, address, end - address);
89 address = (address + PMD_SIZE) & PMD_MASK;
90 pmd++;
91 }
92 }
93
94 static void free_area_pages(unsigned long address, unsigned long size)
95 {
96 pgd_t * dir;
97 unsigned long end = address + size;
98
99 dir = pgd_offset(&init_task, address);
100 while (address < end) {
101 free_area_pmd(dir, address, end - address);
102 address = (address + PGDIR_SIZE) & PGDIR_MASK;
103 dir++;
104 }
105 invalidate();
106 }
107
108 static inline int alloc_area_pte(pte_t * pte, unsigned long address, unsigned long size)
109 {
110 unsigned long end;
111
112 address &= ~PMD_MASK;
113 end = address + size;
114 if (end > PMD_SIZE)
115 end = PMD_SIZE;
116 while (address < end) {
117 unsigned long page;
118 if (!pte_none(*pte))
119 printk("alloc_area_pte: page already exists\n");
120 page = __get_free_page(GFP_KERNEL);
121 if (!page)
122 return -ENOMEM;
123 *pte = mk_pte(page, PAGE_KERNEL);
124 address += PAGE_SIZE;
125 pte++;
126 }
127 return 0;
128 }
129
130 static inline int alloc_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size)
131 {
132 unsigned long end;
133
134 address &= ~PGDIR_MASK;
135 end = address + size;
136 if (end > PGDIR_SIZE)
137 end = PGDIR_SIZE;
138 while (address < end) {
139 pte_t * pte = pte_alloc_kernel(pmd, address);
140 if (!pte)
141 return -ENOMEM;
142 if (alloc_area_pte(pte, address, end - address))
143 return -ENOMEM;
144 address = (address + PMD_SIZE) & PMD_MASK;
145 pmd++;
146 }
147 return 0;
148 }
149
150 static int alloc_area_pages(unsigned long address, unsigned long size)
151 {
152 pgd_t * dir;
153 unsigned long end = address + size;
154
155 dir = pgd_offset(&init_task, address);
156 while (address < end) {
157 pmd_t *pmd = pmd_alloc_kernel(dir, address);
158 if (!pmd)
159 return -ENOMEM;
160 if (alloc_area_pmd(pmd, address, end - address))
161 return -ENOMEM;
162 set_pgdir(address, *dir);
163 address = (address + PGDIR_SIZE) & PGDIR_MASK;
164 dir++;
165 }
166 invalidate();
167 return 0;
168 }
169
170 static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned long size,
171 unsigned long offset)
172 {
173 unsigned long end;
174
175 address &= ~PMD_MASK;
176 end = address + size;
177 if (end > PMD_SIZE)
178 end = PMD_SIZE;
179 do {
180 if (!pte_none(*pte))
181 printk("remap_area_pte: page already exists\n");
182 *pte = mk_pte(offset, PAGE_KERNEL);
183 address += PAGE_SIZE;
184 offset += PAGE_SIZE;
185 pte++;
186 } while (address < end);
187 }
188
189 static inline int remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size,
190 unsigned long offset)
191 {
192 unsigned long end;
193
194 address &= ~PGDIR_MASK;
195 end = address + size;
196 if (end > PGDIR_SIZE)
197 end = PGDIR_SIZE;
198 offset -= address;
199 do {
200 pte_t * pte = pte_alloc_kernel(pmd, address);
201 if (!pte)
202 return -ENOMEM;
203 remap_area_pte(pte, address, end - address, address + offset);
204 address = (address + PMD_SIZE) & PMD_MASK;
205 pmd++;
206 } while (address < end);
207 return 0;
208 }
209
210 static int remap_area_pages(unsigned long address, unsigned long offset, unsigned long size)
211 {
212 pgd_t * dir;
213 unsigned long end = address + size;
214
215 offset -= address;
216 dir = pgd_offset(&init_task, address);
217 while (address < end) {
218 pmd_t *pmd = pmd_alloc_kernel(dir, address);
219 if (!pmd)
220 return -ENOMEM;
221 if (remap_area_pmd(pmd, address, end - address, offset + address))
222 return -ENOMEM;
223 set_pgdir(address, *dir);
224 address = (address + PGDIR_SIZE) & PGDIR_MASK;
225 dir++;
226 }
227 invalidate();
228 return 0;
229 }
230
231 static struct vm_struct * get_vm_area(unsigned long size)
232 {
233 void *addr;
234 struct vm_struct **p, *tmp, *area;
235
236 area = (struct vm_struct *) kmalloc(sizeof(*area), GFP_KERNEL);
237 if (!area)
238 return NULL;
239 addr = (void *) VMALLOC_START;
240 area->size = size + PAGE_SIZE;
241 area->next = NULL;
242 for (p = &vmlist; (tmp = *p) ; p = &tmp->next) {
243 if (size + (unsigned long) addr < (unsigned long) tmp->addr)
244 break;
245 addr = (void *) (tmp->size + (unsigned long) tmp->addr);
246 }
247 area->addr = addr;
248 area->next = *p;
249 *p = area;
250 return area;
251 }
252
253 void vfree(void * addr)
254 {
255 struct vm_struct **p, *tmp;
256
257 if (!addr)
258 return;
259 if ((PAGE_SIZE-1) & (unsigned long) addr) {
260 printk("Trying to vfree() bad address (%p)\n", addr);
261 return;
262 }
263 for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) {
264 if (tmp->addr == addr) {
265 *p = tmp->next;
266 free_area_pages(VMALLOC_VMADDR(tmp->addr), tmp->size);
267 kfree(tmp);
268 return;
269 }
270 }
271 printk("Trying to vfree() nonexistent vm area (%p)\n", addr);
272 }
273
274 void * vmalloc(unsigned long size)
275 {
276 void * addr;
277 struct vm_struct *area;
278
279 size = PAGE_ALIGN(size);
280 if (!size || size > high_memory)
281 return NULL;
282 area = get_vm_area(size);
283 if (!area)
284 return NULL;
285 addr = area->addr;
286 if (alloc_area_pages(VMALLOC_VMADDR(addr), size)) {
287 vfree(addr);
288 return NULL;
289 }
290 return addr;
291 }
292
293
294
295
296
297
298 void * vremap(unsigned long offset, unsigned long size)
299 {
300 void * addr;
301 struct vm_struct * area;
302
303 if (offset < high_memory)
304 return NULL;
305 if (offset & ~PAGE_MASK)
306 return NULL;
307 size = PAGE_ALIGN(size);
308 if (!size || size > offset + size)
309 return NULL;
310 area = get_vm_area(size);
311 if (!area)
312 return NULL;
313 addr = area->addr;
314 if (remap_area_pages(VMALLOC_VMADDR(addr), offset, size)) {
315 vfree(addr);
316 return NULL;
317 }
318 return addr;
319 }
320
321 int vread(char *buf, char *addr, int count)
322 {
323 struct vm_struct **p, *tmp;
324 char *vaddr, *buf_start = buf;
325 int n;
326
327 for (p = &vmlist; (tmp = *p) ; p = &tmp->next) {
328 vaddr = (char *) tmp->addr;
329 while (addr < vaddr) {
330 if (count == 0)
331 goto finished;
332 put_user('\0', buf++), addr++, count--;
333 }
334 n = tmp->size - PAGE_SIZE;
335 if (addr > vaddr)
336 n -= addr - vaddr;
337 while (--n >= 0) {
338 if (count == 0)
339 goto finished;
340 put_user(*addr++, buf++), count--;
341 }
342 }
343 finished:
344 return buf - buf_start;
345 }