This source file includes following definitions.
- get_free_page
- expand_stack
- find_vma
- find_vma_intersection
1 #ifndef _LINUX_MM_H
2 #define _LINUX_MM_H
3
4 #include <linux/sched.h>
5 #include <linux/errno.h>
6 #include <linux/kernel.h>
7 #include <linux/string.h>
8
9 extern unsigned long high_memory;
10
11 #include <asm/page.h>
12 #include <asm/atomic.h>
13
14 #ifdef __KERNEL__
15
16 #define VERIFY_READ 0
17 #define VERIFY_WRITE 1
18
19 extern int verify_area(int, const void *, unsigned long);
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36 struct vm_area_struct {
37 struct mm_struct * vm_mm;
38 unsigned long vm_start;
39 unsigned long vm_end;
40 pgprot_t vm_page_prot;
41 unsigned short vm_flags;
42
43 short vm_avl_height;
44 struct vm_area_struct * vm_avl_left;
45 struct vm_area_struct * vm_avl_right;
46
47 struct vm_area_struct * vm_next;
48
49
50
51 struct vm_area_struct * vm_next_share;
52 struct vm_area_struct * vm_prev_share;
53
54 struct vm_operations_struct * vm_ops;
55 unsigned long vm_offset;
56 struct inode * vm_inode;
57 unsigned long vm_pte;
58 };
59
60
61
62
63 #define VM_READ 0x0001
64 #define VM_WRITE 0x0002
65 #define VM_EXEC 0x0004
66 #define VM_SHARED 0x0008
67
68 #define VM_MAYREAD 0x0010
69 #define VM_MAYWRITE 0x0020
70 #define VM_MAYEXEC 0x0040
71 #define VM_MAYSHARE 0x0080
72
73 #define VM_GROWSDOWN 0x0100
74 #define VM_GROWSUP 0x0200
75 #define VM_SHM 0x0400
76 #define VM_DENYWRITE 0x0800
77
78 #define VM_EXECUTABLE 0x1000
79 #define VM_LOCKED 0x2000
80
81 #define VM_STACK_FLAGS 0x0177
82
83
84
85
86
87 extern pgprot_t protection_map[16];
88
89
90
91
92
93
94
95 struct vm_operations_struct {
96 void (*open)(struct vm_area_struct * area);
97 void (*close)(struct vm_area_struct * area);
98 void (*unmap)(struct vm_area_struct *area, unsigned long, size_t);
99 void (*protect)(struct vm_area_struct *area, unsigned long, size_t, unsigned int newprot);
100 int (*sync)(struct vm_area_struct *area, unsigned long, size_t, unsigned int flags);
101 void (*advise)(struct vm_area_struct *area, unsigned long, size_t, unsigned int advise);
102 unsigned long (*nopage)(struct vm_area_struct * area, unsigned long address, int write_access);
103 unsigned long (*wppage)(struct vm_area_struct * area, unsigned long address,
104 unsigned long page);
105 int (*swapout)(struct vm_area_struct *, unsigned long, pte_t *);
106 pte_t (*swapin)(struct vm_area_struct *, unsigned long, unsigned long);
107 };
108
109
110
111
112
113
114
115
116
117
118 typedef struct page {
119 atomic_t count;
120 unsigned flags;
121 struct wait_queue *wait;
122 struct page *next;
123
124 struct page *next_hash;
125 unsigned long offset;
126 struct inode *inode;
127 struct page *write_list;
128
129 struct page *prev;
130 struct page *prev_hash;
131 struct buffer_head * buffers;
132 unsigned dirty:16,
133 age:8;
134 } mem_map_t;
135
136
137 #define PG_locked 0
138 #define PG_error 1
139 #define PG_referenced 2
140 #define PG_uptodate 3
141 #define PG_freeafter 4
142 #define PG_DMA 5
143 #define PG_reserved 31
144
145
146 #define PageLocked(page) (test_bit(PG_locked, &(page)->flags))
147 #define PageError(page) (test_bit(PG_error, &(page)->flags))
148 #define PageReferenced(page) (test_bit(PG_referenced, &(page)->flags))
149 #define PageDirty(page) (test_bit(PG_dirty, &(page)->flags))
150 #define PageUptodate(page) (test_bit(PG_uptodate, &(page)->flags))
151 #define PageFreeafter(page) (test_bit(PG_freeafter, &(page)->flags))
152 #define PageDMA(page) (test_bit(PG_DMA, &(page)->flags))
153 #define PageReserved(page) (test_bit(PG_reserved, &(page)->flags))
154
155 extern mem_map_t * mem_map;
156
157
158
159
160
161
162 #define __get_free_page(priority) __get_free_pages((priority),0,0)
163 #define __get_dma_pages(priority, order) __get_free_pages((priority),(order),1)
164 extern unsigned long __get_free_pages(int priority, unsigned long gfporder, int dma);
165
166 extern inline unsigned long get_free_page(int priority)
167 {
168 unsigned long page;
169
170 page = __get_free_page(priority);
171 if (page)
172 memset((void *) page, 0, PAGE_SIZE);
173 return page;
174 }
175
176
177
178 #define free_page(addr) free_pages((addr),0)
179 extern void free_pages(unsigned long addr, unsigned long order);
180
181 extern void show_free_areas(void);
182 extern unsigned long put_dirty_page(struct task_struct * tsk,unsigned long page,
183 unsigned long address);
184
185 extern void free_page_tables(struct task_struct * tsk);
186 extern void clear_page_tables(struct task_struct * tsk);
187 extern int new_page_tables(struct task_struct * tsk);
188 extern int copy_page_tables(struct task_struct * to);
189
190 extern int zap_page_range(struct mm_struct *mm, unsigned long address, unsigned long size);
191 extern int copy_page_range(struct mm_struct *dst, struct mm_struct *src, struct vm_area_struct *vma);
192 extern int remap_page_range(unsigned long from, unsigned long to, unsigned long size, pgprot_t prot);
193 extern int zeromap_page_range(unsigned long from, unsigned long size, pgprot_t prot);
194
195 extern void vmtruncate(struct inode * inode, unsigned long offset);
196 extern void handle_mm_fault(struct vm_area_struct *vma, unsigned long address, int write_access);
197 extern void do_wp_page(struct task_struct * tsk, struct vm_area_struct * vma, unsigned long address, int write_access);
198 extern void do_no_page(struct task_struct * tsk, struct vm_area_struct * vma, unsigned long address, int write_access);
199
200 extern unsigned long paging_init(unsigned long start_mem, unsigned long end_mem);
201 extern void mem_init(unsigned long start_mem, unsigned long end_mem);
202 extern void show_mem(void);
203 extern void oom(struct task_struct * tsk);
204 extern void si_meminfo(struct sysinfo * val);
205
206
207
208 extern void * vmalloc(unsigned long size);
209 extern void * vremap(unsigned long offset, unsigned long size);
210 extern void vfree(void * addr);
211 extern int vread(char *buf, char *addr, int count);
212
213
214 extern unsigned long do_mmap(struct file * file, unsigned long addr, unsigned long len,
215 unsigned long prot, unsigned long flags, unsigned long off);
216 extern void merge_segments(struct task_struct *, unsigned long, unsigned long);
217 extern void insert_vm_struct(struct task_struct *, struct vm_area_struct *);
218 extern void remove_shared_vm_struct(struct vm_area_struct *);
219 extern void build_mmap_avl(struct mm_struct *);
220 extern void exit_mmap(struct mm_struct *);
221 extern int do_munmap(unsigned long, size_t);
222 extern unsigned long get_unmapped_area(unsigned long, unsigned long);
223
224
225 extern unsigned long page_unuse(unsigned long);
226 extern int shrink_mmap(int, int);
227 extern void truncate_inode_pages(struct inode *, unsigned long);
228
229 #define GFP_BUFFER 0x00
230 #define GFP_ATOMIC 0x01
231 #define GFP_USER 0x02
232 #define GFP_KERNEL 0x03
233 #define GFP_NOBUFFER 0x04
234 #define GFP_NFS 0x05
235
236
237
238
239 #define GFP_DMA 0x80
240
241 #define GFP_LEVEL_MASK 0xf
242
243
244
245 static inline int expand_stack(struct vm_area_struct * vma, unsigned long address)
246 {
247 unsigned long grow;
248
249 address &= PAGE_MASK;
250 if (vma->vm_end - address > current->rlim[RLIMIT_STACK].rlim_cur)
251 return -ENOMEM;
252 grow = vma->vm_start - address;
253 vma->vm_start = address;
254 vma->vm_offset -= grow;
255 vma->vm_mm->total_vm += grow >> PAGE_SHIFT;
256 if (vma->vm_flags & VM_LOCKED)
257 vma->vm_mm->locked_vm += grow >> PAGE_SHIFT;
258 return 0;
259 }
260
261 #define avl_empty (struct vm_area_struct *) NULL
262
263
264 static inline struct vm_area_struct * find_vma (struct task_struct * task, unsigned long addr)
265 {
266 struct vm_area_struct * result = NULL;
267
268 if (task->mm) {
269 struct vm_area_struct * tree = task->mm->mmap_avl;
270 for (;;) {
271 if (tree == avl_empty)
272 break;
273 if (tree->vm_end > addr) {
274 result = tree;
275 if (tree->vm_start <= addr)
276 break;
277 tree = tree->vm_avl_left;
278 } else
279 tree = tree->vm_avl_right;
280 }
281 }
282 return result;
283 }
284
285
286
287 static inline struct vm_area_struct * find_vma_intersection (struct task_struct * task, unsigned long start_addr, unsigned long end_addr)
288 {
289 struct vm_area_struct * vma;
290
291 vma = find_vma(task,start_addr);
292 if (!vma || end_addr <= vma->vm_start)
293 return NULL;
294 return vma;
295 }
296
297 #endif
298
299 #endif