root/include/linux/mm.h

/* [previous][next][first][last][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. get_free_page
  2. in_swap_cache
  3. find_in_swap_cache
  4. delete_from_swap_cache

   1 #ifndef _LINUX_MM_H
   2 #define _LINUX_MM_H
   3 
   4 #include <asm/page.h>
   5 
   6 #include <linux/sched.h>
   7 #include <linux/errno.h>
   8 #include <linux/kernel.h>
   9 #include <linux/string.h>
  10 
  11 #define VERIFY_READ 0
  12 #define VERIFY_WRITE 1
  13 
  14 extern int verify_area(int, const void *, unsigned long);
  15 
  16 /*
  17  * Linux kernel virtual memory manager primitives.
  18  * The idea being to have a "virtual" mm in the same way
  19  * we have a virtual fs - giving a cleaner interface to the
  20  * mm details, and allowing different kinds of memory mappings
  21  * (from shared memory to executable loading to arbitrary
  22  * mmap() functions).
  23  */
  24 
  25 /*
  26  * This struct defines a memory VMM memory area. There is one of these
  27  * per VM-area/task.  A VM area is any part of the process virtual memory
  28  * space that has a special rule for the page-fault handlers (ie a shared
  29  * library, the executable area etc).
  30  */
  31 struct vm_area_struct {
  32         struct task_struct * vm_task;           /* VM area parameters */
  33         unsigned long vm_start;
  34         unsigned long vm_end;
  35         unsigned short vm_page_prot;
  36         unsigned short vm_flags;
  37 /* linked list of VM areas per task, sorted by address */
  38         struct vm_area_struct * vm_next;
  39 /* for areas with inode, the circular list inode->i_mmap */
  40 /* for shm areas, the linked list of attaches */
  41 /* otherwise unused */
  42         struct vm_area_struct * vm_next_share;
  43         struct vm_area_struct * vm_prev_share;
  44 /* more */
  45         struct vm_operations_struct * vm_ops;
  46         unsigned long vm_offset;
  47         struct inode * vm_inode;
  48         unsigned long vm_pte;                   /* shared mem */
  49 };
  50 
  51 /*
  52  * vm_flags..
  53  */
  54 #define VM_READ         0x0001  /* currently active flags */
  55 #define VM_WRITE        0x0002
  56 #define VM_EXEC         0x0004
  57 #define VM_SHARED       0x0008
  58 
  59 #define VM_MAYREAD      0x0010  /* limits for mprotect() etc */
  60 #define VM_MAYWRITE     0x0020
  61 #define VM_MAYEXEC      0x0040
  62 #define VM_MAYSHARE     0x0080
  63 
  64 #define VM_GROWSDOWN    0x0100  /* general info on the segment */
  65 #define VM_GROWSUP      0x0200
  66 #define VM_SHM          0x0400
  67 #define VM_DENYWRITE    0x0800  /* ETXTBSY on write attempts.. */
  68 
  69 #define VM_EXECUTABLE   0x1000
  70 
  71 #define VM_STACK_FLAGS  0x0177
  72 
  73 /*
  74  * These are the virtual MM functions - opening of an area, closing and
  75  * unmapping it (needed to keep files on disk up-to-date etc), pointer
  76  * to the functions called when a no-page or a wp-page exception occurs. 
  77  */
  78 struct vm_operations_struct {
  79         void (*open)(struct vm_area_struct * area);
  80         void (*close)(struct vm_area_struct * area);
  81         void (*unmap)(struct vm_area_struct *area, unsigned long, size_t);
  82         void (*protect)(struct vm_area_struct *area, unsigned long, size_t, unsigned int newprot);
  83         void (*sync)(struct vm_area_struct *area, unsigned long, size_t, unsigned int flags);
  84         void (*advise)(struct vm_area_struct *area, unsigned long, size_t, unsigned int advise);
  85         unsigned long (*nopage)(struct vm_area_struct * area, unsigned long address,
  86                 unsigned long page, int write_access);
  87         unsigned long (*wppage)(struct vm_area_struct * area, unsigned long address,
  88                 unsigned long page);
  89         void (*swapout)(struct vm_area_struct *,  unsigned long, unsigned long *);
  90         unsigned long (*swapin)(struct vm_area_struct *, unsigned long, unsigned long);
  91 };
  92 
  93 extern mem_map_t * mem_map;
  94 
  95 /* planning stage.. */
  96 #define P_DIRTY         0x0001
  97 #define P_LOCKED        0x0002
  98 #define P_UPTODATE      0x0004
  99 #define P_RESERVED      0x8000
 100 
 101 struct page_info {
 102         unsigned short flags;
 103         unsigned short count;
 104         struct inode * inode;
 105         unsigned long offset;
 106         struct page_info * next_same_inode;
 107         struct page_info * prev_same_inode;
 108         struct page_info * next_hash;
 109         struct page_info * prev_hash;
 110         struct wait_queue *wait;
 111 };
 112 /* end of planning stage */
 113 
 114 #ifdef __KERNEL__
 115 
 116 /*
 117  * Free area management
 118  */
 119 
 120 extern int nr_swap_pages;
 121 extern int nr_free_pages;
 122 extern int min_free_pages;
 123 
 124 #define NR_MEM_LISTS 6
 125 
 126 struct mem_list {
 127         struct mem_list * next;
 128         struct mem_list * prev;
 129 };
 130 
 131 extern struct mem_list free_area_list[NR_MEM_LISTS];
 132 extern unsigned char * free_area_map[NR_MEM_LISTS];
 133 
 134 /*
 135  * This is timing-critical - most of the time in getting a new page
 136  * goes to clearing the page. If you want a page without the clearing
 137  * overhead, just use __get_free_page() directly..
 138  */
 139 #define __get_free_page(priority) __get_free_pages((priority),0)
 140 extern unsigned long __get_free_pages(int priority, unsigned long gfporder);
 141 extern unsigned long __get_dma_pages(int priority, unsigned long gfporder);
 142 extern inline unsigned long get_free_page(int priority)
     /* [previous][next][first][last][top][bottom][index][help] */
 143 {
 144         unsigned long page;
 145 
 146         page = __get_free_page(priority);
 147         if (page)
 148                 memset((void *) page, 0, PAGE_SIZE);
 149         return page;
 150 }
 151 
 152 /* memory.c & swap.c*/
 153 
 154 #define free_page(addr) free_pages((addr),0)
 155 extern void free_pages(unsigned long addr, unsigned long order);
 156 
 157 extern void show_free_areas(void);
 158 extern unsigned long put_dirty_page(struct task_struct * tsk,unsigned long page,
 159         unsigned long address);
 160 
 161 extern void free_page_tables(struct task_struct * tsk);
 162 extern void clear_page_tables(struct task_struct * tsk);
 163 extern int copy_page_tables(struct task_struct * to);
 164 extern int clone_page_tables(struct task_struct * to);
 165 extern int unmap_page_range(unsigned long from, unsigned long size);
 166 extern int remap_page_range(unsigned long from, unsigned long to, unsigned long size, int mask);
 167 extern int zeromap_page_range(unsigned long from, unsigned long size, int mask);
 168 
 169 extern void do_wp_page(struct vm_area_struct * vma, unsigned long address, int write_access);
 170 extern void do_no_page(struct vm_area_struct * vma, unsigned long address, int write_access);
 171 
 172 extern unsigned long paging_init(unsigned long start_mem, unsigned long end_mem);
 173 extern void mem_init(unsigned long start_mem, unsigned long end_mem);
 174 extern void show_mem(void);
 175 extern void oom(struct task_struct * task);
 176 extern void si_meminfo(struct sysinfo * val);
 177 
 178 /* vmalloc.c */
 179 
 180 extern void * vmalloc(unsigned long size);
 181 extern void vfree(void * addr);
 182 extern int vread(char *buf, char *addr, int count);
 183 
 184 /* swap.c */
 185 
 186 extern void swap_free(unsigned long page_nr);
 187 extern unsigned long swap_duplicate(unsigned long page_nr);
 188 extern unsigned long swap_in(unsigned long entry);
 189 extern void si_swapinfo(struct sysinfo * val);
 190 extern void rw_swap_page(int rw, unsigned long nr, char * buf);
 191 
 192 /* mmap.c */
 193 extern int do_mmap(struct file * file, unsigned long addr, unsigned long len,
 194         unsigned long prot, unsigned long flags, unsigned long off);
 195 extern void merge_segments(struct vm_area_struct *);
 196 extern void insert_vm_struct(struct task_struct *, struct vm_area_struct *);
 197 extern void remove_shared_vm_struct(struct vm_area_struct *);
 198 extern int do_munmap(unsigned long, size_t);
 199 extern unsigned long get_unmapped_area(unsigned long);
 200 
 201 #define read_swap_page(nr,buf) \
 202         rw_swap_page(READ,(nr),(buf))
 203 #define write_swap_page(nr,buf) \
 204         rw_swap_page(WRITE,(nr),(buf))
 205 
 206 extern unsigned long high_memory;
 207 
 208 #define GFP_BUFFER      0x00
 209 #define GFP_ATOMIC      0x01
 210 #define GFP_USER        0x02
 211 #define GFP_KERNEL      0x03
 212 #define GFP_NOBUFFER    0x04
 213 #define GFP_NFS         0x05
 214 
 215 /* Flag - indicates that the buffer will be suitable for DMA.  Ignored on some
 216    platforms, used as appropriate on others */
 217 
 218 #define GFP_DMA         0x80
 219 
 220 /*
 221  * vm_ops not present page codes for shared memory.
 222  *
 223  * Will go away eventually..
 224  */
 225 #define SHM_SWP_TYPE 0x41
 226 extern void shm_no_page (ulong *);
 227 
 228 /*
 229  * swap cache stuff (in swap.c)
 230  */
 231 #define SWAP_CACHE_INFO
 232 
 233 extern unsigned long * swap_cache;
 234 
 235 #ifdef SWAP_CACHE_INFO
 236 extern unsigned long swap_cache_add_total;
 237 extern unsigned long swap_cache_add_success;
 238 extern unsigned long swap_cache_del_total;
 239 extern unsigned long swap_cache_del_success;
 240 extern unsigned long swap_cache_find_total;
 241 extern unsigned long swap_cache_find_success;
 242 #endif
 243 
 244 extern inline unsigned long in_swap_cache(unsigned long addr)
     /* [previous][next][first][last][top][bottom][index][help] */
 245 {
 246         return swap_cache[MAP_NR(addr)]; 
 247 }
 248 
 249 extern inline long find_in_swap_cache (unsigned long addr)
     /* [previous][next][first][last][top][bottom][index][help] */
 250 {
 251         unsigned long entry;
 252 
 253 #ifdef SWAP_CACHE_INFO
 254         swap_cache_find_total++;
 255 #endif
 256         entry = (unsigned long) xchg_ptr(swap_cache + MAP_NR(addr), NULL);
 257 #ifdef SWAP_CACHE_INFO
 258         if (entry)
 259                 swap_cache_find_success++;
 260 #endif  
 261         return entry;
 262 }
 263 
 264 extern inline int delete_from_swap_cache(unsigned long addr)
     /* [previous][next][first][last][top][bottom][index][help] */
 265 {
 266         unsigned long entry;
 267         
 268 #ifdef SWAP_CACHE_INFO
 269         swap_cache_del_total++;
 270 #endif  
 271         entry = (unsigned long) xchg_ptr(swap_cache + MAP_NR(addr), NULL);
 272         if (entry)  {
 273 #ifdef SWAP_CACHE_INFO
 274                 swap_cache_del_success++;
 275 #endif
 276                 swap_free(entry);
 277                 return 1;
 278         }
 279         return 0;
 280 }
 281 
 282 #endif /* __KERNEL__ */
 283 
 284 #endif

/* [previous][next][first][last][top][bottom][index][help] */