root/include/linux/mm.h

/* [previous][next][first][last][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. get_free_page
  2. in_swap_cache
  3. find_in_swap_cache
  4. delete_from_swap_cache

   1 #ifndef _LINUX_MM_H
   2 #define _LINUX_MM_H
   3 
   4 #include <linux/sched.h>
   5 #include <linux/errno.h>
   6 #include <linux/kernel.h>
   7 #include <linux/string.h>
   8 
   9 extern unsigned long high_memory;
  10 
  11 #include <asm/page.h>
  12 
  13 #define VERIFY_READ 0
  14 #define VERIFY_WRITE 1
  15 
  16 extern int verify_area(int, const void *, unsigned long);
  17 
  18 /*
  19  * Linux kernel virtual memory manager primitives.
  20  * The idea being to have a "virtual" mm in the same way
  21  * we have a virtual fs - giving a cleaner interface to the
  22  * mm details, and allowing different kinds of memory mappings
  23  * (from shared memory to executable loading to arbitrary
  24  * mmap() functions).
  25  */
  26 
  27 /*
  28  * This struct defines a memory VMM memory area. There is one of these
  29  * per VM-area/task.  A VM area is any part of the process virtual memory
  30  * space that has a special rule for the page-fault handlers (ie a shared
  31  * library, the executable area etc).
  32  */
  33 struct vm_area_struct {
  34         struct task_struct * vm_task;           /* VM area parameters */
  35         unsigned long vm_start;
  36         unsigned long vm_end;
  37         pgprot_t vm_page_prot;
  38         unsigned short vm_flags;
  39 /* AVL tree of VM areas per task, sorted by address */
  40         short vm_avl_height;
  41         struct vm_area_struct * vm_avl_left;
  42         struct vm_area_struct * vm_avl_right;
  43 /* linked list of VM areas per task, sorted by address */
  44         struct vm_area_struct * vm_next;
  45 /* for areas with inode, the circular list inode->i_mmap */
  46 /* for shm areas, the circular list of attaches */
  47 /* otherwise unused */
  48         struct vm_area_struct * vm_next_share;
  49         struct vm_area_struct * vm_prev_share;
  50 /* more */
  51         struct vm_operations_struct * vm_ops;
  52         unsigned long vm_offset;
  53         struct inode * vm_inode;
  54         unsigned long vm_pte;                   /* shared mem */
  55 };
  56 
  57 /*
  58  * vm_flags..
  59  */
  60 #define VM_READ         0x0001  /* currently active flags */
  61 #define VM_WRITE        0x0002
  62 #define VM_EXEC         0x0004
  63 #define VM_SHARED       0x0008
  64 
  65 #define VM_MAYREAD      0x0010  /* limits for mprotect() etc */
  66 #define VM_MAYWRITE     0x0020
  67 #define VM_MAYEXEC      0x0040
  68 #define VM_MAYSHARE     0x0080
  69 
  70 #define VM_GROWSDOWN    0x0100  /* general info on the segment */
  71 #define VM_GROWSUP      0x0200
  72 #define VM_SHM          0x0400
  73 #define VM_DENYWRITE    0x0800  /* ETXTBSY on write attempts.. */
  74 
  75 #define VM_EXECUTABLE   0x1000
  76 
  77 #define VM_STACK_FLAGS  0x0177
  78 
  79 /*
  80  * mapping from the currently active vm_flags protection bits (the
  81  * low four bits) to a page protection mask..
  82  */
  83 extern pgprot_t protection_map[16];
  84 
  85 
  86 /*
  87  * These are the virtual MM functions - opening of an area, closing and
  88  * unmapping it (needed to keep files on disk up-to-date etc), pointer
  89  * to the functions called when a no-page or a wp-page exception occurs. 
  90  */
  91 struct vm_operations_struct {
  92         void (*open)(struct vm_area_struct * area);
  93         void (*close)(struct vm_area_struct * area);
  94         void (*unmap)(struct vm_area_struct *area, unsigned long, size_t);
  95         void (*protect)(struct vm_area_struct *area, unsigned long, size_t, unsigned int newprot);
  96         void (*sync)(struct vm_area_struct *area, unsigned long, size_t, unsigned int flags);
  97         void (*advise)(struct vm_area_struct *area, unsigned long, size_t, unsigned int advise);
  98         unsigned long (*nopage)(struct vm_area_struct * area, unsigned long address,
  99                 unsigned long page, int write_access);
 100         unsigned long (*wppage)(struct vm_area_struct * area, unsigned long address,
 101                 unsigned long page);
 102         void (*swapout)(struct vm_area_struct *,  unsigned long, pte_t *);
 103         pte_t (*swapin)(struct vm_area_struct *, unsigned long, unsigned long);
 104 };
 105 
 106 extern mem_map_t * mem_map;
 107 
 108 /* planning stage.. */
 109 #define P_DIRTY         0x0001
 110 #define P_LOCKED        0x0002
 111 #define P_UPTODATE      0x0004
 112 #define P_RESERVED      0x8000
 113 
 114 struct page_info {
 115         unsigned short flags;
 116         unsigned short count;
 117         struct inode * inode;
 118         unsigned long offset;
 119         struct page_info * next_same_inode;
 120         struct page_info * prev_same_inode;
 121         struct page_info * next_hash;
 122         struct page_info * prev_hash;
 123         struct wait_queue *wait;
 124 };
 125 /* end of planning stage */
 126 
 127 #ifdef __KERNEL__
 128 
 129 /*
 130  * Free area management
 131  */
 132 
 133 extern int nr_swap_pages;
 134 extern int nr_free_pages;
 135 extern int min_free_pages;
 136 
 137 #define NR_MEM_LISTS 6
 138 
 139 struct mem_list {
 140         struct mem_list * next;
 141         struct mem_list * prev;
 142 };
 143 
 144 extern struct mem_list free_area_list[NR_MEM_LISTS];
 145 extern unsigned char * free_area_map[NR_MEM_LISTS];
 146 
 147 /*
 148  * This is timing-critical - most of the time in getting a new page
 149  * goes to clearing the page. If you want a page without the clearing
 150  * overhead, just use __get_free_page() directly..
 151  */
 152 #define __get_free_page(priority) __get_free_pages((priority),0)
 153 extern unsigned long __get_free_pages(int priority, unsigned long gfporder);
 154 extern unsigned long __get_dma_pages(int priority, unsigned long gfporder);
 155 extern inline unsigned long get_free_page(int priority)
     /* [previous][next][first][last][top][bottom][index][help] */
 156 {
 157         unsigned long page;
 158 
 159         page = __get_free_page(priority);
 160         if (page)
 161                 memset((void *) page, 0, PAGE_SIZE);
 162         return page;
 163 }
 164 
 165 /* memory.c & swap.c*/
 166 
 167 #define free_page(addr) free_pages((addr),0)
 168 extern void free_pages(unsigned long addr, unsigned long order);
 169 
 170 extern void show_free_areas(void);
 171 extern unsigned long put_dirty_page(struct task_struct * tsk,unsigned long page,
 172         unsigned long address);
 173 
 174 extern void free_page_tables(struct task_struct * tsk);
 175 extern void clear_page_tables(struct task_struct * tsk);
 176 extern int copy_page_tables(struct task_struct * to);
 177 extern int clone_page_tables(struct task_struct * to);
 178 extern int unmap_page_range(unsigned long from, unsigned long size);
 179 extern int remap_page_range(unsigned long from, unsigned long to, unsigned long size, pgprot_t prot);
 180 extern int zeromap_page_range(unsigned long from, unsigned long size, pgprot_t prot);
 181 
 182 extern void do_wp_page(struct vm_area_struct * vma, unsigned long address, int write_access);
 183 extern void do_no_page(struct vm_area_struct * vma, unsigned long address, int write_access);
 184 
 185 extern unsigned long paging_init(unsigned long start_mem, unsigned long end_mem);
 186 extern void mem_init(unsigned long start_mem, unsigned long end_mem);
 187 extern void show_mem(void);
 188 extern void oom(struct task_struct * task);
 189 extern void si_meminfo(struct sysinfo * val);
 190 
 191 /* vmalloc.c */
 192 
 193 extern void * vmalloc(unsigned long size);
 194 extern void vfree(void * addr);
 195 extern int vread(char *buf, char *addr, int count);
 196 
 197 /* swap.c */
 198 
 199 extern void swap_free(unsigned long);
 200 extern void swap_duplicate(unsigned long);
 201 extern void swap_in(struct vm_area_struct *, pte_t *, unsigned long id, int write_access);
 202 
 203 extern void si_swapinfo(struct sysinfo * val);
 204 extern void rw_swap_page(int rw, unsigned long nr, char * buf);
 205 
 206 /* mmap.c */
 207 extern int do_mmap(struct file * file, unsigned long addr, unsigned long len,
 208         unsigned long prot, unsigned long flags, unsigned long off);
 209 extern struct vm_area_struct * find_vma (struct task_struct *, unsigned long);
 210 extern struct vm_area_struct * find_vma_intersection (struct task_struct *, unsigned long, unsigned long);
 211 extern void merge_segments(struct task_struct *, unsigned long, unsigned long);
 212 extern void insert_vm_struct(struct task_struct *, struct vm_area_struct *);
 213 extern void remove_shared_vm_struct(struct vm_area_struct *);
 214 extern void build_mmap_avl(struct task_struct *);
 215 extern void exit_mmap(struct task_struct *);
 216 extern int do_munmap(unsigned long, size_t);
 217 extern unsigned long get_unmapped_area(unsigned long);
 218 
 219 #define read_swap_page(nr,buf) \
 220         rw_swap_page(READ,(nr),(buf))
 221 #define write_swap_page(nr,buf) \
 222         rw_swap_page(WRITE,(nr),(buf))
 223 
 224 #define GFP_BUFFER      0x00
 225 #define GFP_ATOMIC      0x01
 226 #define GFP_USER        0x02
 227 #define GFP_KERNEL      0x03
 228 #define GFP_NOBUFFER    0x04
 229 #define GFP_NFS         0x05
 230 
 231 /* Flag - indicates that the buffer will be suitable for DMA.  Ignored on some
 232    platforms, used as appropriate on others */
 233 
 234 #define GFP_DMA         0x80
 235 
 236 /*
 237  * vm_ops not present page codes for shared memory.
 238  *
 239  * Will go away eventually..
 240  */
 241 #define SHM_SWP_TYPE 0x41
 242 extern void shm_no_page (ulong *);
 243 
 244 /*
 245  * swap cache stuff (in swap.c)
 246  */
 247 #define SWAP_CACHE_INFO
 248 
 249 extern unsigned long * swap_cache;
 250 
 251 #ifdef SWAP_CACHE_INFO
 252 extern unsigned long swap_cache_add_total;
 253 extern unsigned long swap_cache_add_success;
 254 extern unsigned long swap_cache_del_total;
 255 extern unsigned long swap_cache_del_success;
 256 extern unsigned long swap_cache_find_total;
 257 extern unsigned long swap_cache_find_success;
 258 #endif
 259 
 260 extern inline unsigned long in_swap_cache(unsigned long addr)
     /* [previous][next][first][last][top][bottom][index][help] */
 261 {
 262         return swap_cache[MAP_NR(addr)]; 
 263 }
 264 
 265 extern inline long find_in_swap_cache (unsigned long addr)
     /* [previous][next][first][last][top][bottom][index][help] */
 266 {
 267         unsigned long entry;
 268 
 269 #ifdef SWAP_CACHE_INFO
 270         swap_cache_find_total++;
 271 #endif
 272         entry = (unsigned long) xchg_ptr(swap_cache + MAP_NR(addr), NULL);
 273 #ifdef SWAP_CACHE_INFO
 274         if (entry)
 275                 swap_cache_find_success++;
 276 #endif  
 277         return entry;
 278 }
 279 
 280 extern inline int delete_from_swap_cache(unsigned long addr)
     /* [previous][next][first][last][top][bottom][index][help] */
 281 {
 282         unsigned long entry;
 283         
 284 #ifdef SWAP_CACHE_INFO
 285         swap_cache_del_total++;
 286 #endif  
 287         entry= (unsigned long) xchg_ptr(swap_cache + MAP_NR(addr), NULL);
 288         if (entry)  {
 289 #ifdef SWAP_CACHE_INFO
 290                 swap_cache_del_success++;
 291 #endif
 292                 swap_free(entry);
 293                 return 1;
 294         }
 295         return 0;
 296 }
 297 
 298 #endif /* __KERNEL__ */
 299 
 300 #endif

/* [previous][next][first][last][top][bottom][index][help] */