root/include/linux/mm.h

/* [previous][next][first][last][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. get_free_page
  2. expand_stack
  3. find_vma
  4. find_vma_intersection
  5. in_swap_cache
  6. find_in_swap_cache
  7. delete_from_swap_cache

   1 #ifndef _LINUX_MM_H
   2 #define _LINUX_MM_H
   3 
   4 #include <linux/sched.h>
   5 #include <linux/errno.h>
   6 #include <linux/kernel.h>
   7 #include <linux/string.h>
   8 
   9 extern unsigned long high_memory;
  10 
  11 #include <asm/page.h>
  12 
  13 #ifdef __KERNEL__
  14 
  15 #define VERIFY_READ 0
  16 #define VERIFY_WRITE 1
  17 
  18 extern int verify_area(int, const void *, unsigned long);
  19 
  20 /*
  21  * Linux kernel virtual memory manager primitives.
  22  * The idea being to have a "virtual" mm in the same way
  23  * we have a virtual fs - giving a cleaner interface to the
  24  * mm details, and allowing different kinds of memory mappings
  25  * (from shared memory to executable loading to arbitrary
  26  * mmap() functions).
  27  */
  28 
  29 /*
  30  * This struct defines a memory VMM memory area. There is one of these
  31  * per VM-area/task.  A VM area is any part of the process virtual memory
  32  * space that has a special rule for the page-fault handlers (ie a shared
  33  * library, the executable area etc).
  34  */
  35 struct vm_area_struct {
  36         struct mm_struct * vm_mm;       /* VM area parameters */
  37         unsigned long vm_start;
  38         unsigned long vm_end;
  39         pgprot_t vm_page_prot;
  40         unsigned short vm_flags;
  41 /* AVL tree of VM areas per task, sorted by address */
  42         short vm_avl_height;
  43         struct vm_area_struct * vm_avl_left;
  44         struct vm_area_struct * vm_avl_right;
  45 /* linked list of VM areas per task, sorted by address */
  46         struct vm_area_struct * vm_next;
  47 /* for areas with inode, the circular list inode->i_mmap */
  48 /* for shm areas, the circular list of attaches */
  49 /* otherwise unused */
  50         struct vm_area_struct * vm_next_share;
  51         struct vm_area_struct * vm_prev_share;
  52 /* more */
  53         struct vm_operations_struct * vm_ops;
  54         unsigned long vm_offset;
  55         struct inode * vm_inode;
  56         unsigned long vm_pte;                   /* shared mem */
  57 };
  58 
  59 /*
  60  * vm_flags..
  61  */
  62 #define VM_READ         0x0001  /* currently active flags */
  63 #define VM_WRITE        0x0002
  64 #define VM_EXEC         0x0004
  65 #define VM_SHARED       0x0008
  66 
  67 #define VM_MAYREAD      0x0010  /* limits for mprotect() etc */
  68 #define VM_MAYWRITE     0x0020
  69 #define VM_MAYEXEC      0x0040
  70 #define VM_MAYSHARE     0x0080
  71 
  72 #define VM_GROWSDOWN    0x0100  /* general info on the segment */
  73 #define VM_GROWSUP      0x0200
  74 #define VM_SHM          0x0400  /* shared memory area, don't swap out */
  75 #define VM_DENYWRITE    0x0800  /* ETXTBSY on write attempts.. */
  76 
  77 #define VM_EXECUTABLE   0x1000
  78 #define VM_LOCKED       0x2000
  79 
  80 #define VM_STACK_FLAGS  0x0177
  81 
  82 /*
  83  * mapping from the currently active vm_flags protection bits (the
  84  * low four bits) to a page protection mask..
  85  */
  86 extern pgprot_t protection_map[16];
  87 
  88 
  89 /*
  90  * These are the virtual MM functions - opening of an area, closing and
  91  * unmapping it (needed to keep files on disk up-to-date etc), pointer
  92  * to the functions called when a no-page or a wp-page exception occurs. 
  93  */
  94 struct vm_operations_struct {
  95         void (*open)(struct vm_area_struct * area);
  96         void (*close)(struct vm_area_struct * area);
  97         void (*unmap)(struct vm_area_struct *area, unsigned long, size_t);
  98         void (*protect)(struct vm_area_struct *area, unsigned long, size_t, unsigned int newprot);
  99         int (*sync)(struct vm_area_struct *area, unsigned long, size_t, unsigned int flags);
 100         void (*advise)(struct vm_area_struct *area, unsigned long, size_t, unsigned int advise);
 101         unsigned long (*nopage)(struct vm_area_struct * area, unsigned long address, int write_access);
 102         unsigned long (*wppage)(struct vm_area_struct * area, unsigned long address,
 103                 unsigned long page);
 104         int (*swapout)(struct vm_area_struct *,  unsigned long, pte_t *);
 105         pte_t (*swapin)(struct vm_area_struct *, unsigned long, unsigned long);
 106 };
 107 
 108 typedef struct page {
 109         unsigned int count;
 110         unsigned dirty:16,
 111                  age:8,
 112                  uptodate:1,
 113                  error:1,
 114                  unused:5,
 115                  reserved:1;
 116         unsigned long offset;
 117         struct inode *inode;
 118         struct wait_queue *wait;
 119         struct page *write_list;
 120         struct page *next, *prev;
 121         struct page *next_hash, *prev_hash;
 122 } mem_map_t;
 123 
 124 extern mem_map_t * mem_map;
 125 
 126 /*
 127  * Free area management
 128  */
 129 
 130 extern int nr_swap_pages;
 131 extern int nr_free_pages;
 132 extern int min_free_pages;
 133 
 134 #define NR_MEM_LISTS 6
 135 
 136 struct mem_list {
 137         struct mem_list * next;
 138         struct mem_list * prev;
 139 };
 140 
 141 extern struct mem_list free_area_list[NR_MEM_LISTS];
 142 extern unsigned int * free_area_map[NR_MEM_LISTS];
 143 
 144 /*
 145  * This is timing-critical - most of the time in getting a new page
 146  * goes to clearing the page. If you want a page without the clearing
 147  * overhead, just use __get_free_page() directly..
 148  */
 149 #define __get_free_page(priority) __get_free_pages((priority),0,~0UL)
 150 #define __get_dma_pages(priority, order) __get_free_pages((priority),(order),MAX_DMA_ADDRESS)
 151 extern unsigned long __get_free_pages(int priority, unsigned long gfporder, unsigned long max_addr);
 152 
 153 extern inline unsigned long get_free_page(int priority)
     /* [previous][next][first][last][top][bottom][index][help] */
 154 {
 155         unsigned long page;
 156 
 157         page = __get_free_page(priority);
 158         if (page)
 159                 memset((void *) page, 0, PAGE_SIZE);
 160         return page;
 161 }
 162 
 163 /* memory.c & swap.c*/
 164 
 165 #define free_page(addr) free_pages((addr),0)
 166 extern void free_pages(unsigned long addr, unsigned long order);
 167 
 168 extern void show_free_areas(void);
 169 extern unsigned long put_dirty_page(struct task_struct * tsk,unsigned long page,
 170         unsigned long address);
 171 
 172 extern void free_page_tables(struct task_struct * tsk);
 173 extern void clear_page_tables(struct task_struct * tsk);
 174 extern int new_page_tables(struct task_struct * tsk);
 175 extern int copy_page_tables(struct task_struct * to);
 176 
 177 extern int zap_page_range(struct mm_struct *mm, unsigned long address, unsigned long size);
 178 extern int copy_page_range(struct mm_struct *dst, struct mm_struct *src, struct vm_area_struct *vma);
 179 extern int remap_page_range(unsigned long from, unsigned long to, unsigned long size, pgprot_t prot);
 180 extern int zeromap_page_range(unsigned long from, unsigned long size, pgprot_t prot);
 181 
 182 extern void vmtruncate(struct inode * inode, unsigned long offset);
 183 extern void handle_mm_fault(struct vm_area_struct *vma, unsigned long address, int write_access);
 184 extern void do_wp_page(struct task_struct * tsk, struct vm_area_struct * vma, unsigned long address, int write_access);
 185 extern void do_no_page(struct task_struct * tsk, struct vm_area_struct * vma, unsigned long address, int write_access);
 186 
 187 extern unsigned long paging_init(unsigned long start_mem, unsigned long end_mem);
 188 extern void mem_init(unsigned long start_mem, unsigned long end_mem);
 189 extern void show_mem(void);
 190 extern void oom(struct task_struct * tsk);
 191 extern void si_meminfo(struct sysinfo * val);
 192 
 193 /* vmalloc.c */
 194 
 195 extern void * vmalloc(unsigned long size);
 196 extern void * vremap(unsigned long offset, unsigned long size);
 197 extern void vfree(void * addr);
 198 extern int vread(char *buf, char *addr, int count);
 199 
 200 /* swap.c */
 201 
 202 extern void swap_free(unsigned long);
 203 extern void swap_duplicate(unsigned long);
 204 extern void swap_in(struct task_struct *, struct vm_area_struct *, pte_t *, unsigned long id, int write_access);
 205 
 206 extern void si_swapinfo(struct sysinfo * val);
 207 extern void rw_swap_page(int rw, unsigned long nr, char * buf);
 208 
 209 /* mmap.c */
 210 extern unsigned long do_mmap(struct file * file, unsigned long addr, unsigned long len,
 211         unsigned long prot, unsigned long flags, unsigned long off);
 212 extern void merge_segments(struct task_struct *, unsigned long, unsigned long);
 213 extern void insert_vm_struct(struct task_struct *, struct vm_area_struct *);
 214 extern void remove_shared_vm_struct(struct vm_area_struct *);
 215 extern void build_mmap_avl(struct mm_struct *);
 216 extern void exit_mmap(struct mm_struct *);
 217 extern int do_munmap(unsigned long, size_t);
 218 extern unsigned long get_unmapped_area(unsigned long, unsigned long);
 219 
 220 /* filemap.c */
 221 extern unsigned long page_unuse(unsigned long);
 222 extern int shrink_mmap(int, unsigned long);
 223 
 224 #define read_swap_page(nr,buf) \
 225         rw_swap_page(READ,(nr),(buf))
 226 #define write_swap_page(nr,buf) \
 227         rw_swap_page(WRITE,(nr),(buf))
 228 
 229 #define GFP_BUFFER      0x00
 230 #define GFP_ATOMIC      0x01
 231 #define GFP_USER        0x02
 232 #define GFP_KERNEL      0x03
 233 #define GFP_NOBUFFER    0x04
 234 #define GFP_NFS         0x05
 235 
 236 /* Flag - indicates that the buffer will be suitable for DMA.  Ignored on some
 237    platforms, used as appropriate on others */
 238 
 239 #define GFP_DMA         0x80
 240 
 241 #define GFP_LEVEL_MASK 0xf
 242 
 243 #define avl_empty       (struct vm_area_struct *) NULL
 244 
 245 static inline int expand_stack(struct vm_area_struct * vma, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 246 {
 247         unsigned long grow;
 248 
 249         address &= PAGE_MASK;
 250         if (vma->vm_end - address > current->rlim[RLIMIT_STACK].rlim_cur)
 251                 return -ENOMEM;
 252         grow = vma->vm_start - address;
 253         vma->vm_start = address;
 254         vma->vm_offset -= grow;
 255         vma->vm_mm->total_vm += grow >> PAGE_SHIFT;
 256         if (vma->vm_flags & VM_LOCKED)
 257                 vma->vm_mm->locked_vm += grow >> PAGE_SHIFT;
 258         return 0;
 259 }
 260 
 261 /* Look up the first VMA which satisfies  addr < vm_end,  NULL if none. */
 262 static inline struct vm_area_struct * find_vma (struct task_struct * task, unsigned long addr)
     /* [previous][next][first][last][top][bottom][index][help] */
 263 {
 264         struct vm_area_struct * result = NULL;
 265         struct vm_area_struct * tree;
 266 
 267         if (!task->mm)
 268                 return NULL;
 269         for (tree = task->mm->mmap_avl ; ; ) {
 270                 if (tree == avl_empty)
 271                         return result;
 272                 if (tree->vm_end > addr) {
 273                         if (tree->vm_start <= addr)
 274                                 return tree;
 275                         result = tree;
 276                         tree = tree->vm_avl_left;
 277                 } else
 278                         tree = tree->vm_avl_right;
 279         }
 280 }
 281 
 282 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
 283    NULL if none.  Assume start_addr < end_addr. */
 284 static inline struct vm_area_struct * find_vma_intersection (struct task_struct * task, unsigned long start_addr, unsigned long end_addr)
     /* [previous][next][first][last][top][bottom][index][help] */
 285 {
 286         struct vm_area_struct * vma;
 287 
 288         vma = find_vma(task,start_addr);
 289         if (!vma || end_addr <= vma->vm_start)
 290                 return NULL;
 291         return vma;
 292 }
 293 
 294 /*
 295  * vm_ops not present page codes for shared memory.
 296  *
 297  * Will go away eventually..
 298  */
 299 #define SHM_SWP_TYPE 0x40
 300 
 301 extern void shm_no_page (ulong *);
 302 
 303 /*
 304  * swap cache stuff (in swap.c)
 305  */
 306 #define SWAP_CACHE_INFO
 307 
 308 extern unsigned long * swap_cache;
 309 
 310 #ifdef SWAP_CACHE_INFO
 311 extern unsigned long swap_cache_add_total;
 312 extern unsigned long swap_cache_add_success;
 313 extern unsigned long swap_cache_del_total;
 314 extern unsigned long swap_cache_del_success;
 315 extern unsigned long swap_cache_find_total;
 316 extern unsigned long swap_cache_find_success;
 317 #endif
 318 
 319 extern inline unsigned long in_swap_cache(unsigned long addr)
     /* [previous][next][first][last][top][bottom][index][help] */
 320 {
 321         return swap_cache[MAP_NR(addr)]; 
 322 }
 323 
 324 extern inline long find_in_swap_cache (unsigned long addr)
     /* [previous][next][first][last][top][bottom][index][help] */
 325 {
 326         unsigned long entry;
 327 
 328 #ifdef SWAP_CACHE_INFO
 329         swap_cache_find_total++;
 330 #endif
 331         entry = xchg(swap_cache + MAP_NR(addr), 0);
 332 #ifdef SWAP_CACHE_INFO
 333         if (entry)
 334                 swap_cache_find_success++;
 335 #endif  
 336         return entry;
 337 }
 338 
 339 extern inline int delete_from_swap_cache(unsigned long addr)
     /* [previous][next][first][last][top][bottom][index][help] */
 340 {
 341         unsigned long entry;
 342         
 343 #ifdef SWAP_CACHE_INFO
 344         swap_cache_del_total++;
 345 #endif  
 346         entry= xchg(swap_cache + MAP_NR(addr), 0);
 347         if (entry)  {
 348 #ifdef SWAP_CACHE_INFO
 349                 swap_cache_del_success++;
 350 #endif
 351                 swap_free(entry);
 352                 return 1;
 353         }
 354         return 0;
 355 }
 356 
 357 #endif /* __KERNEL__ */
 358 
 359 #endif

/* [previous][next][first][last][top][bottom][index][help] */