This source file includes following definitions.
- init_mem_queue
- add_mem_queue
- remove_mem_queue
- free_pages_ok
- check_free_buffers
- free_pages
- __get_free_pages
- show_free_areas
- free_area_init
- swap_in
1
2
3
4
5
6
7
8 #include <linux/mm.h>
9 #include <linux/sched.h>
10 #include <linux/head.h>
11 #include <linux/kernel.h>
12 #include <linux/kernel_stat.h>
13 #include <linux/errno.h>
14 #include <linux/string.h>
15 #include <linux/stat.h>
16 #include <linux/swap.h>
17 #include <linux/fs.h>
18 #include <linux/swapctl.h>
19
20 #include <asm/dma.h>
21 #include <asm/system.h>
22 #include <asm/segment.h>
23 #include <asm/bitops.h>
24 #include <asm/pgtable.h>
25
26 int nr_swap_pages = 0;
27 int nr_free_pages = 0;
28
29
30
31
32
33
34
35
36 #define NR_MEM_LISTS 6
37
38 struct free_area_struct {
39 struct page list;
40 unsigned int * map;
41 };
42
43 static struct free_area_struct free_area[NR_MEM_LISTS];
44
45 static inline void init_mem_queue(struct page * head)
46 {
47 head->next = head;
48 head->prev = head;
49 }
50
51 static inline void add_mem_queue(struct page * head, struct page * entry)
52 {
53 struct page * next = head->next;
54
55 entry->prev = head;
56 entry->next = next;
57 next->prev = entry;
58 head->next = entry;
59 }
60
61 static inline void remove_mem_queue(struct page * head, struct page * entry)
62 {
63 struct page * next = entry->next;
64 struct page * prev = entry->prev;
65 next->prev = prev;
66 prev->next = next;
67 }
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88 static inline void free_pages_ok(unsigned long map_nr, unsigned long order)
89 {
90 unsigned long index = map_nr >> (1 + order);
91 unsigned long mask = (~0UL) << order;
92
93 #define list(x) (mem_map+(x))
94
95 map_nr &= mask;
96 nr_free_pages += 1 << order;
97 while (order < NR_MEM_LISTS-1) {
98 if (!change_bit(index, free_area[order].map))
99 break;
100 remove_mem_queue(&free_area[order].list, list(map_nr ^ (1+~mask)));
101 mask <<= 1;
102 order++;
103 index >>= 1;
104 map_nr &= mask;
105 }
106 add_mem_queue(&free_area[order].list, list(map_nr));
107 #undef list
108 }
109
110 static inline void check_free_buffers(mem_map_t * map)
111 {
112 struct buffer_head * bh;
113
114 bh = map->buffers;
115 if (bh) {
116 struct buffer_head *tmp = bh;
117 do {
118 if (tmp->b_list == BUF_SHARED
119 && tmp->b_dev != B_FREE)
120 refile_buffer(tmp);
121 tmp = tmp->b_this_page;
122 } while (tmp != bh);
123 }
124 }
125
126 void free_pages(unsigned long addr, unsigned long order)
127 {
128 unsigned long map_nr = MAP_NR(addr);
129
130 if (map_nr < MAP_NR(high_memory)) {
131 mem_map_t * map = mem_map + map_nr;
132 if (map->reserved)
133 return;
134 if (map->count) {
135 unsigned long flag;
136 save_flags(flag);
137 cli();
138 if (!--map->count) {
139 free_pages_ok(map_nr, order);
140 delete_from_swap_cache(map_nr);
141 }
142 restore_flags(flag);
143 if (map->count == 1)
144 check_free_buffers(map);
145 return;
146 }
147 printk("Trying to free free memory (%08lx): memory probably corrupted\n",addr);
148 printk("PC = %p\n", __builtin_return_address(0));
149 return;
150 }
151 }
152
153
154
155
156 #define MARK_USED(index, order, area) \
157 change_bit((index) >> (1+(order)), (area)->map)
158 #define CAN_DMA(x) ((x)->dma)
159 #define ADDRESS(x) (PAGE_OFFSET + ((x) << PAGE_SHIFT))
160 #define RMQUEUE(order, dma) \
161 do { struct free_area_struct * area = free_area+order; \
162 unsigned long new_order = order; \
163 do { struct page *prev = &area->list, *ret; \
164 while (&area->list != (ret = prev->next)) { \
165 if (!dma || CAN_DMA(ret)) { \
166 unsigned long map_nr = ret - mem_map; \
167 (prev->next = ret->next)->prev = prev; \
168 MARK_USED(map_nr, new_order, area); \
169 nr_free_pages -= 1 << order; \
170 EXPAND(ret, map_nr, order, new_order, area); \
171 restore_flags(flags); \
172 return ADDRESS(map_nr); \
173 } \
174 prev = ret; \
175 } \
176 new_order++; area++; \
177 } while (new_order < NR_MEM_LISTS); \
178 } while (0)
179
180 #define EXPAND(map,index,low,high,area) \
181 do { unsigned long size = 1 << high; \
182 while (high > low) { \
183 area--; high--; size >>= 1; \
184 add_mem_queue(&area->list, map); \
185 MARK_USED(index, high, area); \
186 index += size; \
187 map += size; \
188 } \
189 map->count = 1; \
190 map->age = PAGE_INITIAL_AGE; \
191 } while (0)
192
193 unsigned long __get_free_pages(int priority, unsigned long order, int dma)
194 {
195 unsigned long flags;
196 int reserved_pages;
197
198 if (order >= NR_MEM_LISTS)
199 return 0;
200 if (intr_count && priority != GFP_ATOMIC) {
201 static int count = 0;
202 if (++count < 5) {
203 printk("gfp called nonatomically from interrupt %p\n",
204 __builtin_return_address(0));
205 priority = GFP_ATOMIC;
206 }
207 }
208 reserved_pages = 5;
209 if (priority != GFP_NFS)
210 reserved_pages = min_free_pages;
211 save_flags(flags);
212 repeat:
213 cli();
214 if ((priority==GFP_ATOMIC) || nr_free_pages > reserved_pages) {
215 RMQUEUE(order, dma);
216 restore_flags(flags);
217 return 0;
218 }
219 restore_flags(flags);
220 if (priority != GFP_BUFFER && try_to_free_page(priority, dma, 1))
221 goto repeat;
222 return 0;
223 }
224
225
226
227
228
229
230 void show_free_areas(void)
231 {
232 unsigned long order, flags;
233 unsigned long total = 0;
234
235 printk("Free pages: %6dkB\n ( ",nr_free_pages<<(PAGE_SHIFT-10));
236 save_flags(flags);
237 cli();
238 for (order=0 ; order < NR_MEM_LISTS; order++) {
239 struct page * tmp;
240 unsigned long nr = 0;
241 for (tmp = free_area[order].list.next ; tmp != &free_area[order].list ; tmp = tmp->next) {
242 nr ++;
243 }
244 total += nr * ((PAGE_SIZE>>10) << order);
245 printk("%lu*%lukB ", nr, (PAGE_SIZE>>10) << order);
246 }
247 restore_flags(flags);
248 printk("= %lukB)\n", total);
249 #ifdef SWAP_CACHE_INFO
250 show_swap_cache_info();
251 #endif
252 }
253
254 #define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
255
256
257
258
259
260
261
262 unsigned long free_area_init(unsigned long start_mem, unsigned long end_mem)
263 {
264 mem_map_t * p;
265 unsigned long mask = PAGE_MASK;
266 int i;
267
268
269
270
271
272 i = (end_mem - PAGE_OFFSET) >> (PAGE_SHIFT+7);
273 if (i < 16)
274 i = 16;
275 min_free_pages = i;
276 free_pages_low = i + (i>>1);
277 free_pages_high = i + i;
278 start_mem = init_swap_cache(start_mem, end_mem);
279 mem_map = (mem_map_t *) start_mem;
280 p = mem_map + MAP_NR(end_mem);
281 start_mem = LONG_ALIGN((unsigned long) p);
282 memset(mem_map, 0, start_mem - (unsigned long) mem_map);
283 do {
284 --p;
285 p->dma = 1;
286 p->reserved = 1;
287 } while (p > mem_map);
288
289 for (i = 0 ; i < NR_MEM_LISTS ; i++) {
290 unsigned long bitmap_size;
291 init_mem_queue(&free_area[i].list);
292 mask += mask;
293 end_mem = (end_mem + ~mask) & mask;
294 bitmap_size = (end_mem - PAGE_OFFSET) >> (PAGE_SHIFT + i);
295 bitmap_size = (bitmap_size + 7) >> 3;
296 bitmap_size = LONG_ALIGN(bitmap_size);
297 free_area[i].map = (unsigned int *) start_mem;
298 memset((void *) start_mem, 0, bitmap_size);
299 start_mem += bitmap_size;
300 }
301 return start_mem;
302 }
303
304
305
306
307
308
309
310
311 void swap_in(struct task_struct * tsk, struct vm_area_struct * vma,
312 pte_t * page_table, unsigned long entry, int write_access)
313 {
314 unsigned long page = __get_free_page(GFP_KERNEL);
315
316 if (pte_val(*page_table) != entry) {
317 free_page(page);
318 return;
319 }
320 if (!page) {
321 set_pte(page_table, BAD_PAGE);
322 swap_free(entry);
323 oom(tsk);
324 return;
325 }
326 read_swap_page(entry, (char *) page);
327 if (pte_val(*page_table) != entry) {
328 free_page(page);
329 return;
330 }
331 vma->vm_mm->rss++;
332 tsk->maj_flt++;
333 if (!write_access && add_to_swap_cache(MAP_NR(page), entry)) {
334 set_pte(page_table, mk_pte(page, vma->vm_page_prot));
335 return;
336 }
337 set_pte(page_table, pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot))));
338 swap_free(entry);
339 return;
340 }
341