This source file includes following definitions.
- set_pgdir
- free_area_pte
- free_area_pmd
- free_area_pages
- alloc_area_pte
- alloc_area_pmd
- alloc_area_pages
- vfree
- vmalloc
- vread
1
2
3
4
5
6
7 #include <asm/system.h>
8
9 #include <linux/signal.h>
10 #include <linux/sched.h>
11 #include <linux/head.h>
12 #include <linux/kernel.h>
13 #include <linux/errno.h>
14 #include <linux/types.h>
15 #include <linux/malloc.h>
16 #include <linux/mm.h>
17
18 #include <asm/segment.h>
19 #include <asm/pgtable.h>
20
21 struct vm_struct {
22 unsigned long flags;
23 void * addr;
24 unsigned long size;
25 struct vm_struct * next;
26 };
27
28 static struct vm_struct * vmlist = NULL;
29
30 static inline void set_pgdir(unsigned long address, pgd_t entry)
31 {
32 struct task_struct * p;
33
34 for_each_task(p)
35 *pgd_offset(p,address) = entry;
36 }
37
38 static inline void free_area_pte(pmd_t * pmd, unsigned long address, unsigned long size)
39 {
40 pte_t * pte;
41 unsigned long end;
42
43 if (pmd_none(*pmd))
44 return;
45 if (pmd_bad(*pmd)) {
46 printk("free_area_pte: bad pmd (%08lx)\n", pmd_val(*pmd));
47 pmd_clear(pmd);
48 return;
49 }
50 pte = pte_offset(pmd, address);
51 address &= ~PMD_MASK;
52 end = address + size;
53 if (end > PMD_SIZE)
54 end = PMD_SIZE;
55 while (address < end) {
56 pte_t page = *pte;
57 pte_clear(pte);
58 address += PAGE_SIZE;
59 pte++;
60 if (pte_none(page))
61 continue;
62 if (pte_present(page)) {
63 free_page(pte_page(page));
64 continue;
65 }
66 printk("Whee.. Swapped out page in kernel page table\n");
67 }
68 }
69
70 static inline void free_area_pmd(pgd_t * dir, unsigned long address, unsigned long size)
71 {
72 pmd_t * pmd;
73 unsigned long end;
74
75 if (pgd_none(*dir))
76 return;
77 if (pgd_bad(*dir)) {
78 printk("free_area_pmd: bad pgd (%08lx)\n", pgd_val(*dir));
79 pgd_clear(dir);
80 return;
81 }
82 pmd = pmd_offset(dir, address);
83 address &= ~PGDIR_MASK;
84 end = address + size;
85 if (end > PGDIR_SIZE)
86 end = PGDIR_SIZE;
87 while (address < end) {
88 free_area_pte(pmd, address, end - address);
89 address = (address + PMD_SIZE) & PMD_MASK;
90 pmd++;
91 }
92 }
93
94 static void free_area_pages(unsigned long address, unsigned long size)
95 {
96 pgd_t * dir;
97 unsigned long end = address + size;
98
99 dir = pgd_offset(&init_task, address);
100 while (address < end) {
101 free_area_pmd(dir, address, end - address);
102 address = (address + PGDIR_SIZE) & PGDIR_MASK;
103 dir++;
104 }
105 invalidate();
106 }
107
108 static inline int alloc_area_pte(pte_t * pte, unsigned long address, unsigned long size)
109 {
110 unsigned long end;
111
112 address &= ~PMD_MASK;
113 end = address + size;
114 if (end > PMD_SIZE)
115 end = PMD_SIZE;
116 while (address < end) {
117 unsigned long page;
118 if (!pte_none(*pte))
119 printk("alloc_area_pte: page already exists\n");
120 page = __get_free_page(GFP_KERNEL);
121 if (!page)
122 return -ENOMEM;
123 *pte = mk_pte(page, PAGE_KERNEL);
124 address += PAGE_SIZE;
125 pte++;
126 }
127 return 0;
128 }
129
130 static inline int alloc_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size)
131 {
132 unsigned long end;
133
134 address &= ~PGDIR_MASK;
135 end = address + size;
136 if (end > PGDIR_SIZE)
137 end = PGDIR_SIZE;
138 while (address < end) {
139 pte_t * pte = pte_alloc_kernel(pmd, address);
140 if (!pte)
141 return -ENOMEM;
142 if (alloc_area_pte(pte, address, end - address))
143 return -ENOMEM;
144 address = (address + PMD_SIZE) & PMD_MASK;
145 pmd++;
146 }
147 return 0;
148 }
149
150 static int alloc_area_pages(unsigned long address, unsigned long size)
151 {
152 pgd_t * dir;
153 unsigned long end = address + size;
154
155 dir = pgd_offset(&init_task, address);
156 while (address < end) {
157 pmd_t *pmd = pmd_alloc_kernel(dir, address);
158 if (!pmd)
159 return -ENOMEM;
160 if (alloc_area_pmd(pmd, address, end - address))
161 return -ENOMEM;
162 set_pgdir(address, *dir);
163 address = (address + PGDIR_SIZE) & PGDIR_MASK;
164 dir++;
165 }
166 invalidate();
167 return 0;
168 }
169
170 void vfree(void * addr)
171 {
172 struct vm_struct **p, *tmp;
173
174 if (!addr)
175 return;
176 if ((PAGE_SIZE-1) & (unsigned long) addr) {
177 printk("Trying to vfree() bad address (%p)\n", addr);
178 return;
179 }
180 for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) {
181 if (tmp->addr == addr) {
182 *p = tmp->next;
183 free_area_pages(VMALLOC_VMADDR(tmp->addr), tmp->size);
184 kfree(tmp);
185 return;
186 }
187 }
188 printk("Trying to vfree() nonexistent vm area (%p)\n", addr);
189 }
190
191 void * vmalloc(unsigned long size)
192 {
193 void * addr;
194 struct vm_struct **p, *tmp, *area;
195
196 size = PAGE_ALIGN(size);
197 if (!size || size > high_memory)
198 return NULL;
199 area = (struct vm_struct *) kmalloc(sizeof(*area), GFP_KERNEL);
200 if (!area)
201 return NULL;
202 addr = (void *) VMALLOC_START;
203 area->size = size + PAGE_SIZE;
204 area->next = NULL;
205 for (p = &vmlist; (tmp = *p) ; p = &tmp->next) {
206 if (size + (unsigned long) addr < (unsigned long) tmp->addr)
207 break;
208 addr = (void *) (tmp->size + (unsigned long) tmp->addr);
209 }
210 area->addr = addr;
211 area->next = *p;
212 *p = area;
213 if (alloc_area_pages(VMALLOC_VMADDR(addr), size)) {
214 vfree(addr);
215 return NULL;
216 }
217 return addr;
218 }
219
220 int vread(char *buf, char *addr, int count)
221 {
222 struct vm_struct **p, *tmp;
223 char *vaddr, *buf_start = buf;
224 int n;
225
226 for (p = &vmlist; (tmp = *p) ; p = &tmp->next) {
227 vaddr = (char *) tmp->addr;
228 while (addr < vaddr) {
229 if (count == 0)
230 goto finished;
231 put_user('\0', buf++), addr++, count--;
232 }
233 n = tmp->size - PAGE_SIZE;
234 if (addr > vaddr)
235 n -= addr - vaddr;
236 while (--n >= 0) {
237 if (count == 0)
238 goto finished;
239 put_user(*addr++, buf++), count--;
240 }
241 }
242 finished:
243 return buf - buf_start;
244 }