This source file includes following definitions.
- add_mem_queue
- remove_mem_queue
- free_pages_ok
- check_free_buffers
- free_pages
- mark_used
- __get_free_pages
- show_free_areas
- free_area_init
- swap_in
1
2
3
4
5
6
7
8 #include <linux/mm.h>
9 #include <linux/sched.h>
10 #include <linux/head.h>
11 #include <linux/kernel.h>
12 #include <linux/kernel_stat.h>
13 #include <linux/errno.h>
14 #include <linux/string.h>
15 #include <linux/stat.h>
16 #include <linux/swap.h>
17 #include <linux/fs.h>
18 #include <linux/swapctl.h>
19
20 #include <asm/dma.h>
21 #include <asm/system.h>
22 #include <asm/segment.h>
23 #include <asm/bitops.h>
24 #include <asm/pgtable.h>
25
26 static inline void add_mem_queue(struct mem_list * head, struct mem_list * entry)
27 {
28 entry->prev = head;
29 (entry->next = head->next)->prev = entry;
30 head->next = entry;
31 }
32
33 static inline void remove_mem_queue(struct mem_list * head, struct mem_list * entry)
34 {
35 struct mem_list * next = entry->next;
36 (next->prev = entry->prev)->next = next;
37 }
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58 static inline void free_pages_ok(unsigned long addr, unsigned long order)
59 {
60 unsigned long index = MAP_NR(addr) >> (1 + order);
61 unsigned long mask = PAGE_MASK << order;
62
63 addr &= mask;
64 nr_free_pages += 1 << order;
65 while (order < NR_MEM_LISTS-1) {
66 if (!change_bit(index, free_area_map[order]))
67 break;
68 remove_mem_queue(free_area_list+order, (struct mem_list *) (addr ^ (1+~mask)));
69 order++;
70 index >>= 1;
71 mask <<= 1;
72 addr &= mask;
73 }
74 add_mem_queue(free_area_list+order, (struct mem_list *) addr);
75 }
76
77 static inline void check_free_buffers(unsigned long addr)
78 {
79 struct buffer_head * bh;
80
81 bh = buffer_pages[MAP_NR(addr)];
82 if (bh) {
83 struct buffer_head *tmp = bh;
84 do {
85 if (tmp->b_list == BUF_SHARED
86 && tmp->b_dev != B_FREE)
87 refile_buffer(tmp);
88 tmp = tmp->b_this_page;
89 } while (tmp != bh);
90 }
91 }
92
93 void free_pages(unsigned long addr, unsigned long order)
94 {
95 if (MAP_NR(addr) < MAP_NR(high_memory)) {
96 unsigned long flag;
97 mem_map_t * map = mem_map + MAP_NR(addr);
98 if (map->reserved)
99 return;
100 if (map->count) {
101 save_flags(flag);
102 cli();
103 if (!--map->count) {
104 free_pages_ok(addr, order);
105 delete_from_swap_cache(addr);
106 }
107 restore_flags(flag);
108 if (map->count == 1)
109 check_free_buffers(addr);
110 return;
111 }
112 printk("Trying to free free memory (%08lx): memory probably corrupted\n",addr);
113 printk("PC = %p\n", __builtin_return_address(0));
114 return;
115 }
116 }
117
118
119
120
121 #define RMQUEUE(order, limit) \
122 do { struct mem_list * queue = free_area_list+order; \
123 unsigned long new_order = order; \
124 do { struct mem_list *prev = queue, *ret; \
125 while (queue != (ret = prev->next)) { \
126 if ((unsigned long) ret < (limit)) { \
127 (prev->next = ret->next)->prev = prev; \
128 mark_used((unsigned long) ret, new_order); \
129 nr_free_pages -= 1 << order; \
130 restore_flags(flags); \
131 EXPAND(ret, order, new_order); \
132 return (unsigned long) ret; \
133 } \
134 prev = ret; \
135 } \
136 new_order++; queue++; \
137 } while (new_order < NR_MEM_LISTS); \
138 } while (0)
139
140 static inline int mark_used(unsigned long addr, unsigned long order)
141 {
142 return change_bit(MAP_NR(addr) >> (1+order), free_area_map[order]);
143 }
144
145 #define EXPAND(addr,low,high) \
146 do { unsigned long size = PAGE_SIZE << high; \
147 while (high > low) { \
148 high--; size >>= 1; cli(); \
149 add_mem_queue(free_area_list+high, addr); \
150 mark_used((unsigned long) addr, high); \
151 restore_flags(flags); \
152 addr = (struct mem_list *) (size + (unsigned long) addr); \
153 } mem_map[MAP_NR((unsigned long) addr)].count = 1; \
154 mem_map[MAP_NR((unsigned long) addr)].age = PAGE_INITIAL_AGE; \
155 } while (0)
156
157 unsigned long __get_free_pages(int priority, unsigned long order, unsigned long limit)
158 {
159 unsigned long flags;
160 int reserved_pages;
161
162 if (order >= NR_MEM_LISTS)
163 return 0;
164 if (intr_count && priority != GFP_ATOMIC) {
165 static int count = 0;
166 if (++count < 5) {
167 printk("gfp called nonatomically from interrupt %p\n",
168 __builtin_return_address(0));
169 priority = GFP_ATOMIC;
170 }
171 }
172 reserved_pages = 5;
173 if (priority != GFP_NFS)
174 reserved_pages = min_free_pages;
175 save_flags(flags);
176 repeat:
177 cli();
178 if ((priority==GFP_ATOMIC) || nr_free_pages > reserved_pages) {
179 RMQUEUE(order, limit);
180 restore_flags(flags);
181 return 0;
182 }
183 restore_flags(flags);
184 if (priority != GFP_BUFFER && try_to_free_page(priority, limit))
185 goto repeat;
186 return 0;
187 }
188
189
190
191
192
193
194 void show_free_areas(void)
195 {
196 unsigned long order, flags;
197 unsigned long total = 0;
198
199 printk("Free pages: %6dkB\n ( ",nr_free_pages<<(PAGE_SHIFT-10));
200 save_flags(flags);
201 cli();
202 for (order=0 ; order < NR_MEM_LISTS; order++) {
203 struct mem_list * tmp;
204 unsigned long nr = 0;
205 for (tmp = free_area_list[order].next ; tmp != free_area_list + order ; tmp = tmp->next) {
206 nr ++;
207 }
208 total += nr * ((PAGE_SIZE>>10) << order);
209 printk("%lu*%lukB ", nr, (PAGE_SIZE>>10) << order);
210 }
211 restore_flags(flags);
212 printk("= %lukB)\n", total);
213 #ifdef SWAP_CACHE_INFO
214 show_swap_cache_info();
215 #endif
216 }
217
218 #define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
219
220
221
222
223
224
225
226 unsigned long free_area_init(unsigned long start_mem, unsigned long end_mem)
227 {
228 mem_map_t * p;
229 unsigned long mask = PAGE_MASK;
230 int i;
231
232
233
234
235
236 i = (end_mem - PAGE_OFFSET) >> (PAGE_SHIFT+7);
237 if (i < 16)
238 i = 16;
239 min_free_pages = i;
240 free_pages_low = i + (i>>1);
241 free_pages_high = i + i;
242 start_mem = init_swap_cache(start_mem, end_mem);
243 mem_map = (mem_map_t *) start_mem;
244 p = mem_map + MAP_NR(end_mem);
245 start_mem = LONG_ALIGN((unsigned long) p);
246 memset(mem_map, 0, start_mem - (unsigned long) mem_map);
247 do {
248 --p;
249 p->reserved = 1;
250 } while (p > mem_map);
251
252 for (i = 0 ; i < NR_MEM_LISTS ; i++) {
253 unsigned long bitmap_size;
254 free_area_list[i].prev = free_area_list[i].next = &free_area_list[i];
255 mask += mask;
256 end_mem = (end_mem + ~mask) & mask;
257 bitmap_size = (end_mem - PAGE_OFFSET) >> (PAGE_SHIFT + i);
258 bitmap_size = (bitmap_size + 7) >> 3;
259 bitmap_size = LONG_ALIGN(bitmap_size);
260 free_area_map[i] = (unsigned int *) start_mem;
261 memset((void *) start_mem, 0, bitmap_size);
262 start_mem += bitmap_size;
263 }
264 return start_mem;
265 }
266
267
268
269
270
271
272
273
274 void swap_in(struct task_struct * tsk, struct vm_area_struct * vma,
275 pte_t * page_table, unsigned long entry, int write_access)
276 {
277 unsigned long page = __get_free_page(GFP_KERNEL);
278
279 if (pte_val(*page_table) != entry) {
280 free_page(page);
281 return;
282 }
283 if (!page) {
284 set_pte(page_table, BAD_PAGE);
285 swap_free(entry);
286 oom(tsk);
287 return;
288 }
289 read_swap_page(entry, (char *) page);
290 if (pte_val(*page_table) != entry) {
291 free_page(page);
292 return;
293 }
294 vma->vm_mm->rss++;
295 tsk->maj_flt++;
296 if (!write_access && add_to_swap_cache(page, entry)) {
297 set_pte(page_table, mk_pte(page, vma->vm_page_prot));
298 return;
299 }
300 set_pte(page_table, pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot))));
301 swap_free(entry);
302 return;
303 }
304