This source file includes following definitions.
- set_pgdir
- free_area_pte
- free_area_pmd
- free_area_pages
- alloc_area_pte
- alloc_area_pmd
- alloc_area_pages
- vfree
- vmalloc
- vread
1 #define THREE_LEVEL
2
3
4
5
6
7
8 #include <asm/system.h>
9
10 #include <linux/signal.h>
11 #include <linux/sched.h>
12 #include <linux/head.h>
13 #include <linux/kernel.h>
14 #include <linux/errno.h>
15 #include <linux/types.h>
16 #include <linux/malloc.h>
17 #include <linux/mm.h>
18
19 #include <asm/segment.h>
20 #include <asm/pgtable.h>
21
22 struct vm_struct {
23 unsigned long flags;
24 void * addr;
25 unsigned long size;
26 struct vm_struct * next;
27 };
28
29 static struct vm_struct * vmlist = NULL;
30
31 static inline void set_pgdir(unsigned long address, pgd_t entry)
32 {
33 struct task_struct * p;
34
35 for_each_task(p)
36 *pgd_offset(p,address) = entry;
37 }
38
39 static inline void free_area_pte(pmd_t * pmd, unsigned long address, unsigned long size)
40 {
41 pte_t * pte;
42 unsigned long end;
43
44 if (pmd_none(*pmd))
45 return;
46 if (pmd_bad(*pmd)) {
47 printk("free_area_pte: bad pmd (%08lx)\n", pmd_val(*pmd));
48 pmd_clear(pmd);
49 return;
50 }
51 pte = pte_offset(pmd, address);
52 address &= ~PMD_MASK;
53 end = address + size;
54 if (end > PMD_SIZE)
55 end = PMD_SIZE;
56 while (address < end) {
57 pte_t page = *pte;
58 pte_clear(pte);
59 address += PAGE_SIZE;
60 pte++;
61 if (pte_none(page))
62 continue;
63 if (pte_present(page)) {
64 free_page(pte_page(page));
65 continue;
66 }
67 printk("Whee.. Swapped out page in kernel page table\n");
68 }
69 }
70
71 static inline void free_area_pmd(pgd_t * dir, unsigned long address, unsigned long size)
72 {
73 pmd_t * pmd;
74 unsigned long end;
75
76 if (pgd_none(*dir))
77 return;
78 if (pgd_bad(*dir)) {
79 printk("free_area_pmd: bad pgd (%08lx)\n", pgd_val(*dir));
80 pgd_clear(dir);
81 return;
82 }
83 pmd = pmd_offset(dir, address);
84 address &= ~PGDIR_MASK;
85 end = address + size;
86 if (end > PGDIR_SIZE)
87 end = PGDIR_SIZE;
88 while (address < end) {
89 free_area_pte(pmd, address, end - address);
90 address = (address + PMD_SIZE) & PMD_MASK;
91 pmd++;
92 }
93 }
94
95 static void free_area_pages(unsigned long address, unsigned long size)
96 {
97 pgd_t * dir;
98 unsigned long end = address + size;
99
100 dir = pgd_offset(&init_task, address);
101 while (address < end) {
102 free_area_pmd(dir, address, end - address);
103 address = (address + PGDIR_SIZE) & PGDIR_MASK;
104 dir++;
105 }
106 invalidate();
107 }
108
109 static inline int alloc_area_pte(pte_t * pte, unsigned long address, unsigned long size)
110 {
111 unsigned long end;
112
113 address &= ~PMD_MASK;
114 end = address + size;
115 if (end > PMD_SIZE)
116 end = PMD_SIZE;
117 while (address < end) {
118 unsigned long page;
119 if (!pte_none(*pte))
120 printk("alloc_area_pte: page already exists\n");
121 page = __get_free_page(GFP_KERNEL);
122 if (!page)
123 return -ENOMEM;
124 *pte = mk_pte(page, PAGE_KERNEL);
125 address += PAGE_SIZE;
126 pte++;
127 }
128 return 0;
129 }
130
131 static inline int alloc_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size)
132 {
133 unsigned long end;
134
135 address &= ~PGDIR_MASK;
136 end = address + size;
137 if (end > PGDIR_SIZE)
138 end = PGDIR_SIZE;
139 while (address < end) {
140 pte_t * pte = pte_alloc_kernel(pmd, address);
141 if (!pte)
142 return -ENOMEM;
143 if (alloc_area_pte(pte, address, end - address))
144 return -ENOMEM;
145 address = (address + PMD_SIZE) & PMD_MASK;
146 pmd++;
147 }
148 return 0;
149 }
150
151 static int alloc_area_pages(unsigned long address, unsigned long size)
152 {
153 pgd_t * dir;
154 unsigned long end = address + size;
155
156 dir = pgd_offset(&init_task, address);
157 while (address < end) {
158 pmd_t *pmd = pmd_alloc_kernel(dir, address);
159 if (!pmd)
160 return -ENOMEM;
161 if (alloc_area_pmd(pmd, address, end - address))
162 return -ENOMEM;
163 set_pgdir(address, *dir);
164 address = (address + PGDIR_SIZE) & PGDIR_MASK;
165 dir++;
166 }
167 invalidate();
168 return 0;
169 }
170
171 void vfree(void * addr)
172 {
173 struct vm_struct **p, *tmp;
174
175 if (!addr)
176 return;
177 if ((PAGE_SIZE-1) & (unsigned long) addr) {
178 printk("Trying to vfree() bad address (%p)\n", addr);
179 return;
180 }
181 for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) {
182 if (tmp->addr == addr) {
183 *p = tmp->next;
184 free_area_pages(VMALLOC_VMADDR(tmp->addr), tmp->size);
185 kfree(tmp);
186 return;
187 }
188 }
189 printk("Trying to vfree() nonexistent vm area (%p)\n", addr);
190 }
191
192 void * vmalloc(unsigned long size)
193 {
194 void * addr;
195 struct vm_struct **p, *tmp, *area;
196
197 size = PAGE_ALIGN(size);
198 if (!size || size > high_memory)
199 return NULL;
200 area = (struct vm_struct *) kmalloc(sizeof(*area), GFP_KERNEL);
201 if (!area)
202 return NULL;
203 addr = (void *) VMALLOC_START;
204 area->size = size + PAGE_SIZE;
205 area->next = NULL;
206 for (p = &vmlist; (tmp = *p) ; p = &tmp->next) {
207 if (size + (unsigned long) addr < (unsigned long) tmp->addr)
208 break;
209 addr = (void *) (tmp->size + (unsigned long) tmp->addr);
210 }
211 area->addr = addr;
212 area->next = *p;
213 *p = area;
214 if (alloc_area_pages(VMALLOC_VMADDR(addr), size)) {
215 vfree(addr);
216 return NULL;
217 }
218 return addr;
219 }
220
221 int vread(char *buf, char *addr, int count)
222 {
223 struct vm_struct **p, *tmp;
224 char *vaddr, *buf_start = buf;
225 int n;
226
227 for (p = &vmlist; (tmp = *p) ; p = &tmp->next) {
228 vaddr = (char *) tmp->addr;
229 while (addr < vaddr) {
230 if (count == 0)
231 goto finished;
232 put_fs_byte('\0', buf++), addr++, count--;
233 }
234 n = tmp->size - PAGE_SIZE;
235 if (addr > vaddr)
236 n -= addr - vaddr;
237 while (--n >= 0) {
238 if (count == 0)
239 goto finished;
240 put_fs_byte(*addr++, buf++), count--;
241 }
242 }
243 finished:
244 return buf - buf_start;
245 }