This source file includes following definitions.
- set_pgdir
- clear_pgdir
- free_area_pages
- alloc_area_pages
- do_area
- vfree
- vmalloc
- vread
1
2
3
4
5
6
7 #include <asm/system.h>
8
9 #include <linux/signal.h>
10 #include <linux/sched.h>
11 #include <linux/head.h>
12 #include <linux/kernel.h>
13 #include <linux/errno.h>
14 #include <linux/types.h>
15 #include <linux/malloc.h>
16 #include <linux/mm.h>
17
18 #include <asm/segment.h>
19
20 struct vm_struct {
21 unsigned long flags;
22 void * addr;
23 unsigned long size;
24 struct vm_struct * next;
25 };
26
27 static struct vm_struct * vmlist = NULL;
28
29 static inline void set_pgdir(unsigned long dindex, pte_t * page_table)
30 {
31 struct task_struct * p;
32
33 p = &init_task;
34 do {
35 pgd_set(PAGE_DIR_OFFSET(p,0) + dindex, page_table);
36 p = p->next_task;
37 } while (p != &init_task);
38 }
39
40 static inline void clear_pgdir(unsigned long dindex)
41 {
42 struct task_struct * p;
43
44 p = &init_task;
45 do {
46 pgd_clear(PAGE_DIR_OFFSET(p,0) + dindex);
47 p = p->next_task;
48 } while (p != &init_task);
49 }
50
51 static int free_area_pages(unsigned long dindex, unsigned long index, unsigned long nr)
52 {
53 pgd_t * dir;
54 pte_t * page_table;
55 unsigned long page;
56
57 dir = swapper_pg_dir + dindex;
58 if (pgd_none(*dir))
59 return 0;
60 if (pgd_bad(*dir)) {
61 printk("bad page directory entry in free_area_pages: %08lx\n", pgd_val(*dir));
62 pgd_clear(dir);
63 return 0;
64 }
65 page = pgd_page(*dir);
66 page_table = index + (pte_t *) page;
67 do {
68 pte_t pte = *page_table;
69 pte_clear(page_table);
70 if (pte_present(pte))
71 free_page(pte_page(pte));
72 page_table++;
73 } while (--nr);
74 page_table = (pte_t *) page;
75 for (nr = 0 ; nr < PTRS_PER_PAGE ; nr++, page_table++)
76 if (!pte_none(*page_table))
77 return 0;
78 clear_pgdir(dindex);
79 mem_map[MAP_NR(page)] = 1;
80 free_page(page);
81 invalidate();
82 return 0;
83 }
84
85 static int alloc_area_pages(unsigned long dindex, unsigned long index, unsigned long nr)
86 {
87 pgd_t *dir;
88 pte_t *page_table;
89
90 dir = swapper_pg_dir + dindex;
91 if (pgd_none(*dir)) {
92 unsigned long page = get_free_page(GFP_KERNEL);
93 if (!page)
94 return -ENOMEM;
95 if (!pgd_none(*dir)) {
96 free_page(page);
97 } else {
98 mem_map[MAP_NR(page)] = MAP_PAGE_RESERVED;
99 set_pgdir(dindex, (pte_t *) page);
100 }
101 }
102 if (pgd_bad(*dir)) {
103 printk("Bad page dir entry in alloc_area_pages (%08lx)\n", pgd_val(*dir));
104 return -ENOMEM;
105 }
106 page_table = index + (pte_t *) pgd_page(*dir);
107
108
109
110
111
112 *page_table = BAD_PAGE;
113 do {
114 unsigned long pg = get_free_page(GFP_KERNEL);
115
116 if (!pg)
117 return -ENOMEM;
118 *page_table = mk_pte(pg, PAGE_KERNEL);
119 page_table++;
120 } while (--nr);
121 invalidate();
122 return 0;
123 }
124
125 static int do_area(void * addr, unsigned long size,
126 int (*area_fn)(unsigned long,unsigned long,unsigned long))
127 {
128 unsigned long nr, dindex, index;
129
130 nr = size >> PAGE_SHIFT;
131 dindex = VMALLOC_VMADDR(addr);
132 index = (dindex >> PAGE_SHIFT) & (PTRS_PER_PAGE-1);
133 dindex = (dindex >> PGDIR_SHIFT) & (PTRS_PER_PAGE-1);
134 while (nr > 0) {
135 unsigned long i = PTRS_PER_PAGE - index;
136
137 if (i > nr)
138 i = nr;
139 nr -= i;
140 if (area_fn(dindex, index, i))
141 return -1;
142 index = 0;
143 dindex++;
144 }
145 return 0;
146 }
147
148 void vfree(void * addr)
149 {
150 struct vm_struct **p, *tmp;
151
152 if (!addr)
153 return;
154 if ((PAGE_SIZE-1) & (unsigned long) addr) {
155 printk("Trying to vfree() bad address (%p)\n", addr);
156 return;
157 }
158 for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) {
159 if (tmp->addr == addr) {
160 *p = tmp->next;
161 do_area(tmp->addr, tmp->size, free_area_pages);
162 kfree(tmp);
163 return;
164 }
165 }
166 printk("Trying to vfree() nonexistent vm area (%p)\n", addr);
167 }
168
169 void * vmalloc(unsigned long size)
170 {
171 void * addr;
172 struct vm_struct **p, *tmp, *area;
173
174 size = PAGE_ALIGN(size);
175 if (!size || size > high_memory)
176 return NULL;
177 area = (struct vm_struct *) kmalloc(sizeof(*area), GFP_KERNEL);
178 if (!area)
179 return NULL;
180 addr = (void *) VMALLOC_START;
181 area->size = size + PAGE_SIZE;
182 area->next = NULL;
183 for (p = &vmlist; (tmp = *p) ; p = &tmp->next) {
184 if (size + (unsigned long) addr < (unsigned long) tmp->addr)
185 break;
186 addr = (void *) (tmp->size + (unsigned long) tmp->addr);
187 }
188 area->addr = addr;
189 area->next = *p;
190 *p = area;
191 if (do_area(addr, size, alloc_area_pages)) {
192 vfree(addr);
193 return NULL;
194 }
195 return addr;
196 }
197
198 int vread(char *buf, char *addr, int count)
199 {
200 struct vm_struct **p, *tmp;
201 char *vaddr, *buf_start = buf;
202 int n;
203
204 for (p = &vmlist; (tmp = *p) ; p = &tmp->next) {
205 vaddr = (char *) tmp->addr;
206 while (addr < vaddr) {
207 if (count == 0)
208 goto finished;
209 put_fs_byte('\0', buf++), addr++, count--;
210 }
211 n = tmp->size - PAGE_SIZE;
212 if (addr > vaddr)
213 n -= addr - vaddr;
214 while (--n >= 0) {
215 if (count == 0)
216 goto finished;
217 put_fs_byte(*addr++, buf++), count--;
218 }
219 }
220 finished:
221 return buf - buf_start;
222 }