This source file includes following definitions.
- get_free_page
- expand_stack
- find_vma
- find_vma_intersection
- in_swap_cache
- find_in_swap_cache
- delete_from_swap_cache
1 #ifndef _LINUX_MM_H
2 #define _LINUX_MM_H
3
4 #include <linux/sched.h>
5 #include <linux/errno.h>
6 #include <linux/kernel.h>
7 #include <linux/string.h>
8
9 extern unsigned long high_memory;
10
11 #include <asm/page.h>
12
13 #ifdef __KERNEL__
14
15 #define VERIFY_READ 0
16 #define VERIFY_WRITE 1
17
18 extern int verify_area(int, const void *, unsigned long);
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35 struct vm_area_struct {
36 struct mm_struct * vm_mm;
37 unsigned long vm_start;
38 unsigned long vm_end;
39 pgprot_t vm_page_prot;
40 unsigned short vm_flags;
41
42 short vm_avl_height;
43 struct vm_area_struct * vm_avl_left;
44 struct vm_area_struct * vm_avl_right;
45
46 struct vm_area_struct * vm_next;
47
48
49
50 struct vm_area_struct * vm_next_share;
51 struct vm_area_struct * vm_prev_share;
52
53 struct vm_operations_struct * vm_ops;
54 unsigned long vm_offset;
55 struct inode * vm_inode;
56 unsigned long vm_pte;
57 };
58
59
60
61
62 #define VM_READ 0x0001
63 #define VM_WRITE 0x0002
64 #define VM_EXEC 0x0004
65 #define VM_SHARED 0x0008
66
67 #define VM_MAYREAD 0x0010
68 #define VM_MAYWRITE 0x0020
69 #define VM_MAYEXEC 0x0040
70 #define VM_MAYSHARE 0x0080
71
72 #define VM_GROWSDOWN 0x0100
73 #define VM_GROWSUP 0x0200
74 #define VM_SHM 0x0400
75 #define VM_DENYWRITE 0x0800
76
77 #define VM_EXECUTABLE 0x1000
78 #define VM_LOCKED 0x2000
79
80 #define VM_STACK_FLAGS 0x0177
81
82
83
84
85
86 extern pgprot_t protection_map[16];
87
88
89
90
91
92
93
94 struct vm_operations_struct {
95 void (*open)(struct vm_area_struct * area);
96 void (*close)(struct vm_area_struct * area);
97 void (*unmap)(struct vm_area_struct *area, unsigned long, size_t);
98 void (*protect)(struct vm_area_struct *area, unsigned long, size_t, unsigned int newprot);
99 int (*sync)(struct vm_area_struct *area, unsigned long, size_t, unsigned int flags);
100 void (*advise)(struct vm_area_struct *area, unsigned long, size_t, unsigned int advise);
101 unsigned long (*nopage)(struct vm_area_struct * area, unsigned long address, int write_access);
102 unsigned long (*wppage)(struct vm_area_struct * area, unsigned long address,
103 unsigned long page);
104 int (*swapout)(struct vm_area_struct *, unsigned long, pte_t *);
105 pte_t (*swapin)(struct vm_area_struct *, unsigned long, unsigned long);
106 };
107
108 typedef struct page {
109 unsigned int count;
110 unsigned dirty:16,
111 age:8,
112 uptodate:1,
113 error:1,
114 unused:5,
115 reserved:1;
116 unsigned long offset;
117 struct inode *inode;
118 struct wait_queue *wait;
119 struct page *write_list;
120 struct page *next, *prev;
121 struct page *next_hash, *prev_hash;
122 } mem_map_t;
123
124 extern mem_map_t * mem_map;
125
126
127
128
129
130 extern int nr_swap_pages;
131 extern int nr_free_pages;
132 extern int min_free_pages;
133
134 #define NR_MEM_LISTS 6
135
136 struct mem_list {
137 struct mem_list * next;
138 struct mem_list * prev;
139 };
140
141 extern struct mem_list free_area_list[NR_MEM_LISTS];
142 extern unsigned int * free_area_map[NR_MEM_LISTS];
143
144
145
146
147
148
149 #define __get_free_page(priority) __get_free_pages((priority),0,~0UL)
150 #define __get_dma_pages(priority, order) __get_free_pages((priority),(order),MAX_DMA_ADDRESS)
151 extern unsigned long __get_free_pages(int priority, unsigned long gfporder, unsigned long max_addr);
152
153 extern inline unsigned long get_free_page(int priority)
154 {
155 unsigned long page;
156
157 page = __get_free_page(priority);
158 if (page)
159 memset((void *) page, 0, PAGE_SIZE);
160 return page;
161 }
162
163
164
165 #define free_page(addr) free_pages((addr),0)
166 extern void free_pages(unsigned long addr, unsigned long order);
167
168 extern void show_free_areas(void);
169 extern unsigned long put_dirty_page(struct task_struct * tsk,unsigned long page,
170 unsigned long address);
171
172 extern void free_page_tables(struct task_struct * tsk);
173 extern void clear_page_tables(struct task_struct * tsk);
174 extern int new_page_tables(struct task_struct * tsk);
175 extern int copy_page_tables(struct task_struct * to);
176
177 extern int zap_page_range(struct mm_struct *mm, unsigned long address, unsigned long size);
178 extern int copy_page_range(struct mm_struct *dst, struct mm_struct *src, struct vm_area_struct *vma);
179 extern int remap_page_range(unsigned long from, unsigned long to, unsigned long size, pgprot_t prot);
180 extern int zeromap_page_range(unsigned long from, unsigned long size, pgprot_t prot);
181
182 extern void vmtruncate(struct inode * inode, unsigned long offset);
183 extern void handle_mm_fault(struct vm_area_struct *vma, unsigned long address, int write_access);
184 extern void do_wp_page(struct task_struct * tsk, struct vm_area_struct * vma, unsigned long address, int write_access);
185 extern void do_no_page(struct task_struct * tsk, struct vm_area_struct * vma, unsigned long address, int write_access);
186
187 extern unsigned long paging_init(unsigned long start_mem, unsigned long end_mem);
188 extern void mem_init(unsigned long start_mem, unsigned long end_mem);
189 extern void show_mem(void);
190 extern void oom(struct task_struct * tsk);
191 extern void si_meminfo(struct sysinfo * val);
192
193
194
195 extern void * vmalloc(unsigned long size);
196 extern void * vremap(unsigned long offset, unsigned long size);
197 extern void vfree(void * addr);
198 extern int vread(char *buf, char *addr, int count);
199
200
201
202 extern void swap_free(unsigned long);
203 extern void swap_duplicate(unsigned long);
204 extern void swap_in(struct task_struct *, struct vm_area_struct *, pte_t *, unsigned long id, int write_access);
205
206 extern void si_swapinfo(struct sysinfo * val);
207 extern void rw_swap_page(int rw, unsigned long nr, char * buf);
208
209
210 extern unsigned long do_mmap(struct file * file, unsigned long addr, unsigned long len,
211 unsigned long prot, unsigned long flags, unsigned long off);
212 extern void merge_segments(struct task_struct *, unsigned long, unsigned long);
213 extern void insert_vm_struct(struct task_struct *, struct vm_area_struct *);
214 extern void remove_shared_vm_struct(struct vm_area_struct *);
215 extern void build_mmap_avl(struct mm_struct *);
216 extern void exit_mmap(struct mm_struct *);
217 extern int do_munmap(unsigned long, size_t);
218 extern unsigned long get_unmapped_area(unsigned long, unsigned long);
219
220
221 extern unsigned long page_unuse(unsigned long);
222 extern int shrink_mmap(int, unsigned long);
223
224 #define read_swap_page(nr,buf) \
225 rw_swap_page(READ,(nr),(buf))
226 #define write_swap_page(nr,buf) \
227 rw_swap_page(WRITE,(nr),(buf))
228
229 #define GFP_BUFFER 0x00
230 #define GFP_ATOMIC 0x01
231 #define GFP_USER 0x02
232 #define GFP_KERNEL 0x03
233 #define GFP_NOBUFFER 0x04
234 #define GFP_NFS 0x05
235
236
237
238
239 #define GFP_DMA 0x80
240
241 #define GFP_LEVEL_MASK 0xf
242
243 #define avl_empty (struct vm_area_struct *) NULL
244
245 static inline int expand_stack(struct vm_area_struct * vma, unsigned long address)
246 {
247 unsigned long grow;
248
249 address &= PAGE_MASK;
250 if (vma->vm_end - address > current->rlim[RLIMIT_STACK].rlim_cur)
251 return -ENOMEM;
252 grow = vma->vm_start - address;
253 vma->vm_start = address;
254 vma->vm_offset -= grow;
255 vma->vm_mm->total_vm += grow >> PAGE_SHIFT;
256 if (vma->vm_flags & VM_LOCKED)
257 vma->vm_mm->locked_vm += grow >> PAGE_SHIFT;
258 return 0;
259 }
260
261
262 static inline struct vm_area_struct * find_vma (struct task_struct * task, unsigned long addr)
263 {
264 struct vm_area_struct * result = NULL;
265 struct vm_area_struct * tree;
266
267 if (!task->mm)
268 return NULL;
269 for (tree = task->mm->mmap_avl ; ; ) {
270 if (tree == avl_empty)
271 return result;
272 if (tree->vm_end > addr) {
273 if (tree->vm_start <= addr)
274 return tree;
275 result = tree;
276 tree = tree->vm_avl_left;
277 } else
278 tree = tree->vm_avl_right;
279 }
280 }
281
282
283
284 static inline struct vm_area_struct * find_vma_intersection (struct task_struct * task, unsigned long start_addr, unsigned long end_addr)
285 {
286 struct vm_area_struct * vma;
287
288 vma = find_vma(task,start_addr);
289 if (!vma || end_addr <= vma->vm_start)
290 return NULL;
291 return vma;
292 }
293
294
295
296
297
298
299 #define SHM_SWP_TYPE 0x40
300
301 extern void shm_no_page (ulong *);
302
303
304
305
306 #define SWAP_CACHE_INFO
307
308 extern unsigned long * swap_cache;
309
310 #ifdef SWAP_CACHE_INFO
311 extern unsigned long swap_cache_add_total;
312 extern unsigned long swap_cache_add_success;
313 extern unsigned long swap_cache_del_total;
314 extern unsigned long swap_cache_del_success;
315 extern unsigned long swap_cache_find_total;
316 extern unsigned long swap_cache_find_success;
317 #endif
318
319 extern inline unsigned long in_swap_cache(unsigned long addr)
320 {
321 return swap_cache[MAP_NR(addr)];
322 }
323
324 extern inline long find_in_swap_cache (unsigned long addr)
325 {
326 unsigned long entry;
327
328 #ifdef SWAP_CACHE_INFO
329 swap_cache_find_total++;
330 #endif
331 entry = xchg(swap_cache + MAP_NR(addr), 0);
332 #ifdef SWAP_CACHE_INFO
333 if (entry)
334 swap_cache_find_success++;
335 #endif
336 return entry;
337 }
338
339 extern inline int delete_from_swap_cache(unsigned long addr)
340 {
341 unsigned long entry;
342
343 #ifdef SWAP_CACHE_INFO
344 swap_cache_del_total++;
345 #endif
346 entry= xchg(swap_cache + MAP_NR(addr), 0);
347 if (entry) {
348 #ifdef SWAP_CACHE_INFO
349 swap_cache_del_success++;
350 #endif
351 swap_free(entry);
352 return 1;
353 }
354 return 0;
355 }
356
357 #endif
358
359 #endif