root/include/linux/mm.h

/* [previous][next][first][last][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. get_free_page
  2. find_vma
  3. find_vma_intersection
  4. in_swap_cache
  5. find_in_swap_cache
  6. delete_from_swap_cache

   1 #ifndef _LINUX_MM_H
   2 #define _LINUX_MM_H
   3 
   4 #include <linux/sched.h>
   5 #include <linux/errno.h>
   6 #include <linux/kernel.h>
   7 #include <linux/string.h>
   8 
   9 extern unsigned long high_memory;
  10 
  11 #include <asm/page.h>
  12 
  13 #ifdef __KERNEL__
  14 
  15 #define VERIFY_READ 0
  16 #define VERIFY_WRITE 1
  17 
  18 extern int verify_area(int, const void *, unsigned long);
  19 
  20 /*
  21  * Linux kernel virtual memory manager primitives.
  22  * The idea being to have a "virtual" mm in the same way
  23  * we have a virtual fs - giving a cleaner interface to the
  24  * mm details, and allowing different kinds of memory mappings
  25  * (from shared memory to executable loading to arbitrary
  26  * mmap() functions).
  27  */
  28 
  29 /*
  30  * This struct defines a memory VMM memory area. There is one of these
  31  * per VM-area/task.  A VM area is any part of the process virtual memory
  32  * space that has a special rule for the page-fault handlers (ie a shared
  33  * library, the executable area etc).
  34  */
  35 struct vm_area_struct {
  36         struct mm_struct * vm_mm;       /* VM area parameters */
  37         unsigned long vm_start;
  38         unsigned long vm_end;
  39         pgprot_t vm_page_prot;
  40         unsigned short vm_flags;
  41 /* AVL tree of VM areas per task, sorted by address */
  42         short vm_avl_height;
  43         struct vm_area_struct * vm_avl_left;
  44         struct vm_area_struct * vm_avl_right;
  45 /* linked list of VM areas per task, sorted by address */
  46         struct vm_area_struct * vm_next;
  47 /* for areas with inode, the circular list inode->i_mmap */
  48 /* for shm areas, the circular list of attaches */
  49 /* otherwise unused */
  50         struct vm_area_struct * vm_next_share;
  51         struct vm_area_struct * vm_prev_share;
  52 /* more */
  53         struct vm_operations_struct * vm_ops;
  54         unsigned long vm_offset;
  55         struct inode * vm_inode;
  56         unsigned long vm_pte;                   /* shared mem */
  57 };
  58 
  59 /*
  60  * vm_flags..
  61  */
  62 #define VM_READ         0x0001  /* currently active flags */
  63 #define VM_WRITE        0x0002
  64 #define VM_EXEC         0x0004
  65 #define VM_SHARED       0x0008
  66 
  67 #define VM_MAYREAD      0x0010  /* limits for mprotect() etc */
  68 #define VM_MAYWRITE     0x0020
  69 #define VM_MAYEXEC      0x0040
  70 #define VM_MAYSHARE     0x0080
  71 
  72 #define VM_GROWSDOWN    0x0100  /* general info on the segment */
  73 #define VM_GROWSUP      0x0200
  74 #define VM_SHM          0x0400
  75 #define VM_DENYWRITE    0x0800  /* ETXTBSY on write attempts.. */
  76 
  77 #define VM_EXECUTABLE   0x1000
  78 #define VM_DONTSWAP     0x2000  /* Some vm types have their own
  79                                  * hard-coded swap mechanism */
  80 
  81 #define VM_STACK_FLAGS  0x0177
  82 
  83 /*
  84  * mapping from the currently active vm_flags protection bits (the
  85  * low four bits) to a page protection mask..
  86  */
  87 extern pgprot_t protection_map[16];
  88 
  89 
  90 /*
  91  * These are the virtual MM functions - opening of an area, closing and
  92  * unmapping it (needed to keep files on disk up-to-date etc), pointer
  93  * to the functions called when a no-page or a wp-page exception occurs. 
  94  */
  95 struct vm_operations_struct {
  96         void (*open)(struct vm_area_struct * area);
  97         void (*close)(struct vm_area_struct * area);
  98         void (*unmap)(struct vm_area_struct *area, unsigned long, size_t);
  99         void (*protect)(struct vm_area_struct *area, unsigned long, size_t, unsigned int newprot);
 100         int (*sync)(struct vm_area_struct *area, unsigned long, size_t, unsigned int flags);
 101         void (*advise)(struct vm_area_struct *area, unsigned long, size_t, unsigned int advise);
 102         unsigned long (*nopage)(struct vm_area_struct * area, unsigned long address,
 103                 unsigned long page, int write_access);
 104         unsigned long (*wppage)(struct vm_area_struct * area, unsigned long address,
 105                 unsigned long page);
 106         int (*swapout)(struct vm_area_struct *,  unsigned long, pte_t *);
 107         pte_t (*swapin)(struct vm_area_struct *, unsigned long, unsigned long);
 108 };
 109 
 110 extern mem_map_t * mem_map;
 111 extern unsigned char *age_map;
 112 
 113 /* planning stage.. */
 114 #define P_DIRTY         0x0001
 115 #define P_LOCKED        0x0002
 116 #define P_UPTODATE      0x0004
 117 #define P_RESERVED      0x8000
 118 
 119 struct page_info {
 120         unsigned short flags;
 121         unsigned short count;
 122         struct inode * inode;
 123         unsigned long offset;
 124         struct page_info * next_same_inode;
 125         struct page_info * prev_same_inode;
 126         struct page_info * next_hash;
 127         struct page_info * prev_hash;
 128         struct wait_queue *wait;
 129 };
 130 /* end of planning stage */
 131 
 132 /*
 133  * Free area management
 134  */
 135 
 136 extern int nr_swap_pages;
 137 extern int nr_free_pages;
 138 extern int min_free_pages;
 139 
 140 #define NR_MEM_LISTS 6
 141 
 142 struct mem_list {
 143         struct mem_list * next;
 144         struct mem_list * prev;
 145 };
 146 
 147 extern struct mem_list free_area_list[NR_MEM_LISTS];
 148 extern unsigned char * free_area_map[NR_MEM_LISTS];
 149 
 150 /*
 151  * This is timing-critical - most of the time in getting a new page
 152  * goes to clearing the page. If you want a page without the clearing
 153  * overhead, just use __get_free_page() directly..
 154  */
 155 #define __get_free_page(priority) __get_free_pages((priority),0,~0UL)
 156 #define __get_dma_pages(priority, order) __get_free_pages((priority),(order),MAX_DMA_ADDRESS)
 157 extern unsigned long __get_free_pages(int priority, unsigned long gfporder, unsigned long max_addr);
 158 
 159 extern inline unsigned long get_free_page(int priority)
     /* [previous][next][first][last][top][bottom][index][help] */
 160 {
 161         unsigned long page;
 162 
 163         page = __get_free_page(priority);
 164         if (page)
 165                 memset((void *) page, 0, PAGE_SIZE);
 166         return page;
 167 }
 168 
 169 /* memory.c & swap.c*/
 170 
 171 #define free_page(addr) free_pages((addr),0)
 172 extern void free_pages(unsigned long addr, unsigned long order);
 173 
 174 extern void show_free_areas(void);
 175 extern unsigned long put_dirty_page(struct task_struct * tsk,unsigned long page,
 176         unsigned long address);
 177 
 178 extern void free_page_tables(struct task_struct * tsk);
 179 extern void clear_page_tables(struct task_struct * tsk);
 180 extern int new_page_tables(struct task_struct * tsk);
 181 extern int copy_page_tables(struct task_struct * to);
 182 
 183 extern int zap_page_range(struct mm_struct *mm, unsigned long address, unsigned long size);
 184 extern int copy_page_range(struct mm_struct *dst, struct mm_struct *src, struct vm_area_struct *vma);
 185 extern int remap_page_range(unsigned long from, unsigned long to, unsigned long size, pgprot_t prot);
 186 extern int zeromap_page_range(unsigned long from, unsigned long size, pgprot_t prot);
 187 
 188 extern void vmtruncate(struct inode * inode, unsigned long offset);
 189 extern void handle_mm_fault(struct vm_area_struct *vma, unsigned long address, int write_access);
 190 extern void do_wp_page(struct task_struct * tsk, struct vm_area_struct * vma, unsigned long address, int write_access);
 191 extern void do_no_page(struct task_struct * tsk, struct vm_area_struct * vma, unsigned long address, int write_access);
 192 
 193 extern unsigned long paging_init(unsigned long start_mem, unsigned long end_mem);
 194 extern void mem_init(unsigned long start_mem, unsigned long end_mem);
 195 extern void show_mem(void);
 196 extern void oom(struct task_struct * tsk);
 197 extern void si_meminfo(struct sysinfo * val);
 198 
 199 /* vmalloc.c */
 200 
 201 extern void * vmalloc(unsigned long size);
 202 extern void * vremap(unsigned long offset, unsigned long size);
 203 extern void vfree(void * addr);
 204 extern int vread(char *buf, char *addr, int count);
 205 
 206 /* swap.c */
 207 
 208 extern void swap_free(unsigned long);
 209 extern void swap_duplicate(unsigned long);
 210 extern void swap_in(struct task_struct *, struct vm_area_struct *, pte_t *, unsigned long id, int write_access);
 211 
 212 extern void si_swapinfo(struct sysinfo * val);
 213 extern void rw_swap_page(int rw, unsigned long nr, char * buf);
 214 
 215 /* mmap.c */
 216 extern unsigned long do_mmap(struct file * file, unsigned long addr, unsigned long len,
 217         unsigned long prot, unsigned long flags, unsigned long off);
 218 extern void merge_segments(struct task_struct *, unsigned long, unsigned long);
 219 extern void insert_vm_struct(struct task_struct *, struct vm_area_struct *);
 220 extern void remove_shared_vm_struct(struct vm_area_struct *);
 221 extern void build_mmap_avl(struct mm_struct *);
 222 extern void exit_mmap(struct mm_struct *);
 223 extern int do_munmap(unsigned long, size_t);
 224 extern unsigned long get_unmapped_area(unsigned long, unsigned long);
 225 
 226 #define read_swap_page(nr,buf) \
 227         rw_swap_page(READ,(nr),(buf))
 228 #define write_swap_page(nr,buf) \
 229         rw_swap_page(WRITE,(nr),(buf))
 230 
 231 #define GFP_BUFFER      0x00
 232 #define GFP_ATOMIC      0x01
 233 #define GFP_USER        0x02
 234 #define GFP_KERNEL      0x03
 235 #define GFP_NOBUFFER    0x04
 236 #define GFP_NFS         0x05
 237 
 238 /* Flag - indicates that the buffer will be suitable for DMA.  Ignored on some
 239    platforms, used as appropriate on others */
 240 
 241 #define GFP_DMA         0x80
 242 
 243 #define GFP_LEVEL_MASK 0xf
 244 
 245 #define avl_empty       (struct vm_area_struct *) NULL
 246 
 247 /* Look up the first VMA which satisfies  addr < vm_end,  NULL if none. */
 248 static inline struct vm_area_struct * find_vma (struct task_struct * task, unsigned long addr)
     /* [previous][next][first][last][top][bottom][index][help] */
 249 {
 250         struct vm_area_struct * result = NULL;
 251         struct vm_area_struct * tree;
 252 
 253         if (!task->mm)
 254                 return NULL;
 255         for (tree = task->mm->mmap_avl ; ; ) {
 256                 if (tree == avl_empty)
 257                         return result;
 258                 if (tree->vm_end > addr) {
 259                         if (tree->vm_start <= addr)
 260                                 return tree;
 261                         result = tree;
 262                         tree = tree->vm_avl_left;
 263                 } else
 264                         tree = tree->vm_avl_right;
 265         }
 266 }
 267 
 268 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
 269    NULL if none.  Assume start_addr < end_addr. */
 270 static inline struct vm_area_struct * find_vma_intersection (struct task_struct * task, unsigned long start_addr, unsigned long end_addr)
     /* [previous][next][first][last][top][bottom][index][help] */
 271 {
 272         struct vm_area_struct * vma;
 273 
 274         vma = find_vma(task,start_addr);
 275         if (!vma || end_addr <= vma->vm_start)
 276                 return NULL;
 277         return vma;
 278 }
 279 
 280 /*
 281  * vm_ops not present page codes for shared memory.
 282  *
 283  * Will go away eventually..
 284  */
 285 #define SHM_SWP_TYPE 0x40
 286 
 287 extern void shm_no_page (ulong *);
 288 
 289 /*
 290  * swap cache stuff (in swap.c)
 291  */
 292 #define SWAP_CACHE_INFO
 293 
 294 extern unsigned long * swap_cache;
 295 
 296 #ifdef SWAP_CACHE_INFO
 297 extern unsigned long swap_cache_add_total;
 298 extern unsigned long swap_cache_add_success;
 299 extern unsigned long swap_cache_del_total;
 300 extern unsigned long swap_cache_del_success;
 301 extern unsigned long swap_cache_find_total;
 302 extern unsigned long swap_cache_find_success;
 303 #endif
 304 
 305 extern inline unsigned long in_swap_cache(unsigned long addr)
     /* [previous][next][first][last][top][bottom][index][help] */
 306 {
 307         return swap_cache[MAP_NR(addr)]; 
 308 }
 309 
 310 extern inline long find_in_swap_cache (unsigned long addr)
     /* [previous][next][first][last][top][bottom][index][help] */
 311 {
 312         unsigned long entry;
 313 
 314 #ifdef SWAP_CACHE_INFO
 315         swap_cache_find_total++;
 316 #endif
 317         entry = xchg(swap_cache + MAP_NR(addr), 0);
 318 #ifdef SWAP_CACHE_INFO
 319         if (entry)
 320                 swap_cache_find_success++;
 321 #endif  
 322         return entry;
 323 }
 324 
 325 extern inline int delete_from_swap_cache(unsigned long addr)
     /* [previous][next][first][last][top][bottom][index][help] */
 326 {
 327         unsigned long entry;
 328         
 329 #ifdef SWAP_CACHE_INFO
 330         swap_cache_del_total++;
 331 #endif  
 332         entry= xchg(swap_cache + MAP_NR(addr), 0);
 333         if (entry)  {
 334 #ifdef SWAP_CACHE_INFO
 335                 swap_cache_del_success++;
 336 #endif
 337                 swap_free(entry);
 338                 return 1;
 339         }
 340         return 0;
 341 }
 342 
 343 #endif /* __KERNEL__ */
 344 
 345 #endif

/* [previous][next][first][last][top][bottom][index][help] */