root/include/linux/mm.h

/* [previous][next][first][last][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. get_free_page
  2. in_swap_cache
  3. find_in_swap_cache
  4. delete_from_swap_cache

   1 #ifndef _LINUX_MM_H
   2 #define _LINUX_MM_H
   3 
   4 #include <asm/page.h>
   5 
   6 #include <linux/sched.h>
   7 #include <linux/errno.h>
   8 #include <linux/kernel.h>
   9 
  10 #define VERIFY_READ 0
  11 #define VERIFY_WRITE 1
  12 
  13 extern int verify_area(int, const void *, unsigned long);
  14 
  15 /*
  16  * Linux kernel virtual memory manager primitives.
  17  * The idea being to have a "virtual" mm in the same way
  18  * we have a virtual fs - giving a cleaner interface to the
  19  * mm details, and allowing different kinds of memory mappings
  20  * (from shared memory to executable loading to arbitrary
  21  * mmap() functions).
  22  */
  23 
  24 /*
  25  * This struct defines a memory VMM memory area. There is one of these
  26  * per VM-area/task.  A VM area is any part of the process virtual memory
  27  * space that has a special rule for the page-fault handlers (ie a shared
  28  * library, the executable area etc).
  29  */
  30 struct vm_area_struct {
  31         struct task_struct * vm_task;           /* VM area parameters */
  32         unsigned long vm_start;
  33         unsigned long vm_end;
  34         unsigned short vm_page_prot;
  35         unsigned short vm_flags;
  36 /* linked list of VM areas per task, sorted by address */
  37         struct vm_area_struct * vm_next;
  38 /* for areas with inode, the circular list inode->i_mmap */
  39 /* for shm areas, the linked list of attaches */
  40 /* otherwise unused */
  41         struct vm_area_struct * vm_next_share;
  42         struct vm_area_struct * vm_prev_share;
  43 /* more */
  44         struct vm_operations_struct * vm_ops;
  45         unsigned long vm_offset;
  46         struct inode * vm_inode;
  47         unsigned long vm_pte;                   /* shared mem */
  48 };
  49 
  50 /*
  51  * vm_flags..
  52  */
  53 #define VM_READ         0x0001  /* currently active flags */
  54 #define VM_WRITE        0x0002
  55 #define VM_EXEC         0x0004
  56 #define VM_SHARED       0x0008
  57 
  58 #define VM_MAYREAD      0x0010  /* limits for mprotect() etc */
  59 #define VM_MAYWRITE     0x0020
  60 #define VM_MAYEXEC      0x0040
  61 #define VM_MAYSHARE     0x0080
  62 
  63 #define VM_GROWSDOWN    0x0100  /* general info on the segment */
  64 #define VM_GROWSUP      0x0200
  65 #define VM_SHM          0x0400
  66 #define VM_DENYWRITE    0x0800  /* ETXTBSY on write attempts.. */
  67 
  68 #define VM_EXECUTABLE   0x1000
  69 
  70 #define VM_STACK_FLAGS  0x0177
  71 
  72 /*
  73  * These are the virtual MM functions - opening of an area, closing and
  74  * unmapping it (needed to keep files on disk up-to-date etc), pointer
  75  * to the functions called when a no-page or a wp-page exception occurs. 
  76  */
  77 struct vm_operations_struct {
  78         void (*open)(struct vm_area_struct * area);
  79         void (*close)(struct vm_area_struct * area);
  80         void (*unmap)(struct vm_area_struct *area, unsigned long, size_t);
  81         void (*protect)(struct vm_area_struct *area, unsigned long, size_t, unsigned int newprot);
  82         void (*sync)(struct vm_area_struct *area, unsigned long, size_t, unsigned int flags);
  83         void (*advise)(struct vm_area_struct *area, unsigned long, size_t, unsigned int advise);
  84         unsigned long (*nopage)(struct vm_area_struct * area, unsigned long address,
  85                 unsigned long page, int error_code);
  86         unsigned long (*wppage)(struct vm_area_struct * area, unsigned long address,
  87                 unsigned long page);
  88         void (*swapout)(struct vm_area_struct *,  unsigned long, unsigned long *);
  89         unsigned long (*swapin)(struct vm_area_struct *,  unsigned long);
  90 };
  91 
  92 extern unsigned long __bad_page(void);
  93 extern unsigned long __bad_pagetable(void);
  94 extern unsigned long __zero_page(void);
  95 
  96 #define BAD_PAGETABLE __bad_pagetable()
  97 #define BAD_PAGE __bad_page()
  98 #define ZERO_PAGE __zero_page()
  99 
 100 /* planning stage.. */
 101 #define P_DIRTY         0x0001
 102 #define P_LOCKED        0x0002
 103 #define P_UPTODATE      0x0004
 104 #define P_RESERVED      0x8000
 105 
 106 struct page_info {
 107         unsigned short flags;
 108         unsigned short count;
 109         struct inode * inode;
 110         unsigned long offset;
 111         struct page_info * next_same_inode;
 112         struct page_info * prev_same_inode;
 113         struct page_info * next_hash;
 114         struct page_info * prev_hash;
 115         struct wait_queue *wait;
 116 };
 117 /* end of planning stage */
 118 
 119 #ifdef __KERNEL__
 120 
 121 /*
 122  * Free area management
 123  */
 124 
 125 extern int nr_swap_pages;
 126 extern int nr_free_pages;
 127 extern int min_free_pages;
 128 
 129 #define NR_MEM_LISTS 6
 130 
 131 struct mem_list {
 132         struct mem_list * next;
 133         struct mem_list * prev;
 134 };
 135 
 136 extern struct mem_list free_area_list[NR_MEM_LISTS];
 137 extern unsigned char * free_area_map[NR_MEM_LISTS];
 138 
 139 /*
 140  * This is timing-critical - most of the time in getting a new page
 141  * goes to clearing the page. If you want a page without the clearing
 142  * overhead, just use __get_free_page() directly..
 143  */
 144 #define __get_free_page(priority) __get_free_pages((priority),0)
 145 extern unsigned long __get_free_pages(int priority, unsigned long gfporder);
 146 extern unsigned long __get_dma_pages(int priority, unsigned long gfporder);
 147 extern inline unsigned long get_free_page(int priority)
     /* [previous][next][first][last][top][bottom][index][help] */
 148 {
 149         unsigned long page;
 150 
 151         page = __get_free_page(priority);
 152         if (page)
 153                 __asm__ __volatile__("rep ; stosl"
 154                         : /* no outputs */ \
 155                         :"a" (0),"c" (1024),"D" (page)
 156                         :"di","cx");
 157         return page;
 158 }
 159 
 160 /* memory.c & swap.c*/
 161 
 162 #define free_page(addr) free_pages((addr),0)
 163 extern void free_pages(unsigned long addr, unsigned long order);
 164 
 165 extern void show_free_areas(void);
 166 extern unsigned long put_dirty_page(struct task_struct * tsk,unsigned long page,
 167         unsigned long address);
 168 
 169 extern void free_page_tables(struct task_struct * tsk);
 170 extern void clear_page_tables(struct task_struct * tsk);
 171 extern int copy_page_tables(struct task_struct * to);
 172 extern int clone_page_tables(struct task_struct * to);
 173 extern int unmap_page_range(unsigned long from, unsigned long size);
 174 extern int remap_page_range(unsigned long from, unsigned long to, unsigned long size, int mask);
 175 extern int zeromap_page_range(unsigned long from, unsigned long size, int mask);
 176 
 177 extern void do_wp_page(struct vm_area_struct * vma, unsigned long address,
 178         unsigned long error_code);
 179 extern void do_no_page(struct vm_area_struct * vma, unsigned long address,
 180         unsigned long error_code);
 181 
 182 extern unsigned long paging_init(unsigned long start_mem, unsigned long end_mem);
 183 extern void mem_init(unsigned long low_start_mem,
 184                      unsigned long start_mem, unsigned long end_mem);
 185 extern void show_mem(void);
 186 extern void oom(struct task_struct * task);
 187 extern void si_meminfo(struct sysinfo * val);
 188 
 189 /* vmalloc.c */
 190 
 191 extern void * vmalloc(unsigned long size);
 192 extern void vfree(void * addr);
 193 extern int vread(char *buf, char *addr, int count);
 194 
 195 /* swap.c */
 196 
 197 extern void swap_free(unsigned long page_nr);
 198 extern unsigned long swap_duplicate(unsigned long page_nr);
 199 extern unsigned long swap_in(unsigned long entry);
 200 extern void si_swapinfo(struct sysinfo * val);
 201 extern void rw_swap_page(int rw, unsigned long nr, char * buf);
 202 
 203 /* mmap.c */
 204 extern int do_mmap(struct file * file, unsigned long addr, unsigned long len,
 205         unsigned long prot, unsigned long flags, unsigned long off);
 206 extern void merge_segments(struct vm_area_struct *);
 207 extern void insert_vm_struct(struct task_struct *, struct vm_area_struct *);
 208 extern void remove_shared_vm_struct(struct vm_area_struct *);
 209 extern int do_munmap(unsigned long, size_t);
 210 extern unsigned long get_unmapped_area(unsigned long);
 211 
 212 #define read_swap_page(nr,buf) \
 213         rw_swap_page(READ,(nr),(buf))
 214 #define write_swap_page(nr,buf) \
 215         rw_swap_page(WRITE,(nr),(buf))
 216 
 217 extern unsigned long high_memory;
 218 
 219 #define MAP_NR(addr) ((addr) >> PAGE_SHIFT)
 220 #define MAP_PAGE_RESERVED (1<<15)
 221 
 222 extern unsigned short * mem_map;
 223 
 224 #define PAGE_PRESENT    0x001
 225 #define PAGE_RW         0x002
 226 #define PAGE_USER       0x004
 227 #define PAGE_PWT        0x008   /* 486 only - not used currently */
 228 #define PAGE_PCD        0x010   /* 486 only - not used currently */
 229 #define PAGE_ACCESSED   0x020
 230 #define PAGE_DIRTY      0x040
 231 #define PAGE_COW        0x200   /* implemented in software (one of the AVL bits) */
 232 
 233 #define PAGE_PRIVATE    (PAGE_PRESENT | PAGE_RW | PAGE_USER | PAGE_ACCESSED | PAGE_COW)
 234 #define PAGE_SHARED     (PAGE_PRESENT | PAGE_RW | PAGE_USER | PAGE_ACCESSED)
 235 #define PAGE_COPY       (PAGE_PRESENT | PAGE_USER | PAGE_ACCESSED | PAGE_COW)
 236 #define PAGE_READONLY   (PAGE_PRESENT | PAGE_USER | PAGE_ACCESSED)
 237 #define PAGE_TABLE      (PAGE_PRESENT | PAGE_RW | PAGE_USER | PAGE_ACCESSED)
 238 
 239 #define GFP_BUFFER      0x00
 240 #define GFP_ATOMIC      0x01
 241 #define GFP_USER        0x02
 242 #define GFP_KERNEL      0x03
 243 #define GFP_NOBUFFER    0x04
 244 #define GFP_NFS         0x05
 245 
 246 /* Flag - indicates that the buffer will be suitable for DMA.  Ignored on some
 247    platforms, used as appropriate on others */
 248 
 249 #define GFP_DMA         0x80
 250 
 251 /*
 252  * vm_ops not present page codes for shared memory.
 253  *
 254  * Will go away eventually..
 255  */
 256 #define SHM_SWP_TYPE 0x41
 257 extern void shm_no_page (ulong *);
 258 
 259 /*
 260  * swap cache stuff (in swap.c)
 261  */
 262 #define SWAP_CACHE_INFO
 263 
 264 extern unsigned long * swap_cache;
 265 
 266 #ifdef SWAP_CACHE_INFO
 267 extern unsigned long swap_cache_add_total;
 268 extern unsigned long swap_cache_add_success;
 269 extern unsigned long swap_cache_del_total;
 270 extern unsigned long swap_cache_del_success;
 271 extern unsigned long swap_cache_find_total;
 272 extern unsigned long swap_cache_find_success;
 273 #endif
 274 
 275 extern inline unsigned long in_swap_cache(unsigned long addr)
     /* [previous][next][first][last][top][bottom][index][help] */
 276 {
 277         return swap_cache[addr >> PAGE_SHIFT]; 
 278 }
 279 
 280 extern inline long find_in_swap_cache (unsigned long addr)
     /* [previous][next][first][last][top][bottom][index][help] */
 281 {
 282         unsigned long entry;
 283 
 284 #ifdef SWAP_CACHE_INFO
 285         swap_cache_find_total++;
 286 #endif
 287         __asm__ __volatile__("xchgl %0,%1"
 288                 :"=m" (swap_cache[addr >> PAGE_SHIFT]),
 289                  "=r" (entry)
 290                 :"0" (swap_cache[addr >> PAGE_SHIFT]),
 291                  "1" (0));
 292 #ifdef SWAP_CACHE_INFO
 293         if (entry)
 294                 swap_cache_find_success++;
 295 #endif  
 296         return entry;
 297 }
 298 
 299 extern inline int delete_from_swap_cache(unsigned long addr)
     /* [previous][next][first][last][top][bottom][index][help] */
 300 {
 301         unsigned long entry;
 302         
 303 #ifdef SWAP_CACHE_INFO
 304         swap_cache_del_total++;
 305 #endif  
 306         __asm__ __volatile__("xchgl %0,%1"
 307                 :"=m" (swap_cache[addr >> PAGE_SHIFT]),
 308                  "=r" (entry)
 309                 :"0" (swap_cache[addr >> PAGE_SHIFT]),
 310                  "1" (0));
 311         if (entry)  {
 312 #ifdef SWAP_CACHE_INFO
 313                 swap_cache_del_success++;
 314 #endif
 315                 swap_free(entry);
 316                 return 1;
 317         }
 318         return 0;
 319 }
 320 
 321 #endif /* __KERNEL__ */
 322 
 323 #endif

/* [previous][next][first][last][top][bottom][index][help] */