This source file includes following definitions.
- get_free_page
- expand_stack
- find_vma
- find_vma_intersection
- in_swap_cache
- find_in_swap_cache
- delete_from_swap_cache
1 #ifndef _LINUX_MM_H
2 #define _LINUX_MM_H
3
4 #include <linux/sched.h>
5 #include <linux/errno.h>
6 #include <linux/kernel.h>
7 #include <linux/string.h>
8
9 extern unsigned long high_memory;
10
11 #include <asm/page.h>
12
13 #ifdef __KERNEL__
14
15 #define VERIFY_READ 0
16 #define VERIFY_WRITE 1
17
18 extern int verify_area(int, const void *, unsigned long);
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35 struct vm_area_struct {
36 struct mm_struct * vm_mm;
37 unsigned long vm_start;
38 unsigned long vm_end;
39 pgprot_t vm_page_prot;
40 unsigned short vm_flags;
41
42 short vm_avl_height;
43 struct vm_area_struct * vm_avl_left;
44 struct vm_area_struct * vm_avl_right;
45
46 struct vm_area_struct * vm_next;
47
48
49
50 struct vm_area_struct * vm_next_share;
51 struct vm_area_struct * vm_prev_share;
52
53 struct vm_operations_struct * vm_ops;
54 unsigned long vm_offset;
55 struct inode * vm_inode;
56 unsigned long vm_pte;
57 };
58
59
60
61
62 #define VM_READ 0x0001
63 #define VM_WRITE 0x0002
64 #define VM_EXEC 0x0004
65 #define VM_SHARED 0x0008
66
67 #define VM_MAYREAD 0x0010
68 #define VM_MAYWRITE 0x0020
69 #define VM_MAYEXEC 0x0040
70 #define VM_MAYSHARE 0x0080
71
72 #define VM_GROWSDOWN 0x0100
73 #define VM_GROWSUP 0x0200
74 #define VM_SHM 0x0400
75 #define VM_DENYWRITE 0x0800
76
77 #define VM_EXECUTABLE 0x1000
78 #define VM_LOCKED 0x2000
79
80 #define VM_STACK_FLAGS 0x0177
81
82
83
84
85
86 extern pgprot_t protection_map[16];
87
88
89
90
91
92
93
94 struct vm_operations_struct {
95 void (*open)(struct vm_area_struct * area);
96 void (*close)(struct vm_area_struct * area);
97 void (*unmap)(struct vm_area_struct *area, unsigned long, size_t);
98 void (*protect)(struct vm_area_struct *area, unsigned long, size_t, unsigned int newprot);
99 int (*sync)(struct vm_area_struct *area, unsigned long, size_t, unsigned int flags);
100 void (*advise)(struct vm_area_struct *area, unsigned long, size_t, unsigned int advise);
101 unsigned long (*nopage)(struct vm_area_struct * area, unsigned long address,
102 unsigned long page, int write_access);
103 unsigned long (*wppage)(struct vm_area_struct * area, unsigned long address,
104 unsigned long page);
105 int (*swapout)(struct vm_area_struct *, unsigned long, pte_t *);
106 pte_t (*swapin)(struct vm_area_struct *, unsigned long, unsigned long);
107 };
108
109 typedef struct {
110 unsigned count:24,
111 age:6,
112 dirty:1,
113 reserved:1;
114 } mem_map_t;
115
116 extern mem_map_t * mem_map;
117
118
119
120
121
122 extern int nr_swap_pages;
123 extern int nr_free_pages;
124 extern int min_free_pages;
125
126 #define NR_MEM_LISTS 6
127
128 struct mem_list {
129 struct mem_list * next;
130 struct mem_list * prev;
131 };
132
133 extern struct mem_list free_area_list[NR_MEM_LISTS];
134 extern unsigned char * free_area_map[NR_MEM_LISTS];
135
136
137
138
139
140
141 #define __get_free_page(priority) __get_free_pages((priority),0,~0UL)
142 #define __get_dma_pages(priority, order) __get_free_pages((priority),(order),MAX_DMA_ADDRESS)
143 extern unsigned long __get_free_pages(int priority, unsigned long gfporder, unsigned long max_addr);
144
145 extern inline unsigned long get_free_page(int priority)
146 {
147 unsigned long page;
148
149 page = __get_free_page(priority);
150 if (page)
151 memset((void *) page, 0, PAGE_SIZE);
152 return page;
153 }
154
155
156
157 #define free_page(addr) free_pages((addr),0)
158 extern void free_pages(unsigned long addr, unsigned long order);
159
160 extern void show_free_areas(void);
161 extern unsigned long put_dirty_page(struct task_struct * tsk,unsigned long page,
162 unsigned long address);
163
164 extern void free_page_tables(struct task_struct * tsk);
165 extern void clear_page_tables(struct task_struct * tsk);
166 extern int new_page_tables(struct task_struct * tsk);
167 extern int copy_page_tables(struct task_struct * to);
168
169 extern int zap_page_range(struct mm_struct *mm, unsigned long address, unsigned long size);
170 extern int copy_page_range(struct mm_struct *dst, struct mm_struct *src, struct vm_area_struct *vma);
171 extern int remap_page_range(unsigned long from, unsigned long to, unsigned long size, pgprot_t prot);
172 extern int zeromap_page_range(unsigned long from, unsigned long size, pgprot_t prot);
173
174 extern void vmtruncate(struct inode * inode, unsigned long offset);
175 extern void handle_mm_fault(struct vm_area_struct *vma, unsigned long address, int write_access);
176 extern void do_wp_page(struct task_struct * tsk, struct vm_area_struct * vma, unsigned long address, int write_access);
177 extern void do_no_page(struct task_struct * tsk, struct vm_area_struct * vma, unsigned long address, int write_access);
178
179 extern unsigned long paging_init(unsigned long start_mem, unsigned long end_mem);
180 extern void mem_init(unsigned long start_mem, unsigned long end_mem);
181 extern void show_mem(void);
182 extern void oom(struct task_struct * tsk);
183 extern void si_meminfo(struct sysinfo * val);
184
185
186
187 extern void * vmalloc(unsigned long size);
188 extern void * vremap(unsigned long offset, unsigned long size);
189 extern void vfree(void * addr);
190 extern int vread(char *buf, char *addr, int count);
191
192
193
194 extern void swap_free(unsigned long);
195 extern void swap_duplicate(unsigned long);
196 extern void swap_in(struct task_struct *, struct vm_area_struct *, pte_t *, unsigned long id, int write_access);
197
198 extern void si_swapinfo(struct sysinfo * val);
199 extern void rw_swap_page(int rw, unsigned long nr, char * buf);
200
201
202 extern unsigned long do_mmap(struct file * file, unsigned long addr, unsigned long len,
203 unsigned long prot, unsigned long flags, unsigned long off);
204 extern void merge_segments(struct task_struct *, unsigned long, unsigned long);
205 extern void insert_vm_struct(struct task_struct *, struct vm_area_struct *);
206 extern void remove_shared_vm_struct(struct vm_area_struct *);
207 extern void build_mmap_avl(struct mm_struct *);
208 extern void exit_mmap(struct mm_struct *);
209 extern int do_munmap(unsigned long, size_t);
210 extern unsigned long get_unmapped_area(unsigned long, unsigned long);
211
212 #define read_swap_page(nr,buf) \
213 rw_swap_page(READ,(nr),(buf))
214 #define write_swap_page(nr,buf) \
215 rw_swap_page(WRITE,(nr),(buf))
216
217 #define GFP_BUFFER 0x00
218 #define GFP_ATOMIC 0x01
219 #define GFP_USER 0x02
220 #define GFP_KERNEL 0x03
221 #define GFP_NOBUFFER 0x04
222 #define GFP_NFS 0x05
223
224
225
226
227 #define GFP_DMA 0x80
228
229 #define GFP_LEVEL_MASK 0xf
230
231 #define avl_empty (struct vm_area_struct *) NULL
232
233 static inline int expand_stack(struct vm_area_struct * vma, unsigned long address)
234 {
235 unsigned long grow;
236
237 address &= PAGE_MASK;
238 if (vma->vm_end - address > current->rlim[RLIMIT_STACK].rlim_cur)
239 return -ENOMEM;
240 grow = vma->vm_start - address;
241 vma->vm_start = address;
242 vma->vm_offset -= grow;
243 vma->vm_mm->total_vm += grow >> PAGE_SHIFT;
244 if (vma->vm_flags & VM_LOCKED)
245 vma->vm_mm->locked_vm += grow >> PAGE_SHIFT;
246 return 0;
247 }
248
249
250 static inline struct vm_area_struct * find_vma (struct task_struct * task, unsigned long addr)
251 {
252 struct vm_area_struct * result = NULL;
253 struct vm_area_struct * tree;
254
255 if (!task->mm)
256 return NULL;
257 for (tree = task->mm->mmap_avl ; ; ) {
258 if (tree == avl_empty)
259 return result;
260 if (tree->vm_end > addr) {
261 if (tree->vm_start <= addr)
262 return tree;
263 result = tree;
264 tree = tree->vm_avl_left;
265 } else
266 tree = tree->vm_avl_right;
267 }
268 }
269
270
271
272 static inline struct vm_area_struct * find_vma_intersection (struct task_struct * task, unsigned long start_addr, unsigned long end_addr)
273 {
274 struct vm_area_struct * vma;
275
276 vma = find_vma(task,start_addr);
277 if (!vma || end_addr <= vma->vm_start)
278 return NULL;
279 return vma;
280 }
281
282
283
284
285
286
287 #define SHM_SWP_TYPE 0x40
288
289 extern void shm_no_page (ulong *);
290
291
292
293
294 #define SWAP_CACHE_INFO
295
296 extern unsigned long * swap_cache;
297
298 #ifdef SWAP_CACHE_INFO
299 extern unsigned long swap_cache_add_total;
300 extern unsigned long swap_cache_add_success;
301 extern unsigned long swap_cache_del_total;
302 extern unsigned long swap_cache_del_success;
303 extern unsigned long swap_cache_find_total;
304 extern unsigned long swap_cache_find_success;
305 #endif
306
307 extern inline unsigned long in_swap_cache(unsigned long addr)
308 {
309 return swap_cache[MAP_NR(addr)];
310 }
311
312 extern inline long find_in_swap_cache (unsigned long addr)
313 {
314 unsigned long entry;
315
316 #ifdef SWAP_CACHE_INFO
317 swap_cache_find_total++;
318 #endif
319 entry = xchg(swap_cache + MAP_NR(addr), 0);
320 #ifdef SWAP_CACHE_INFO
321 if (entry)
322 swap_cache_find_success++;
323 #endif
324 return entry;
325 }
326
327 extern inline int delete_from_swap_cache(unsigned long addr)
328 {
329 unsigned long entry;
330
331 #ifdef SWAP_CACHE_INFO
332 swap_cache_del_total++;
333 #endif
334 entry= xchg(swap_cache + MAP_NR(addr), 0);
335 if (entry) {
336 #ifdef SWAP_CACHE_INFO
337 swap_cache_del_success++;
338 #endif
339 swap_free(entry);
340 return 1;
341 }
342 return 0;
343 }
344
345 #endif
346
347 #endif