root/include/linux/mm.h

/* [previous][next][first][last][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. get_free_page
  2. expand_stack
  3. find_vma
  4. find_vma_intersection

   1 #ifndef _LINUX_MM_H
   2 #define _LINUX_MM_H
   3 
   4 #include <linux/sched.h>
   5 #include <linux/errno.h>
   6 #include <linux/kernel.h>
   7 #include <linux/string.h>
   8 
   9 extern unsigned long high_memory;
  10 
  11 #include <asm/page.h>
  12 #include <asm/atomic.h>
  13 
  14 #ifdef __KERNEL__
  15 
  16 #define VERIFY_READ 0
  17 #define VERIFY_WRITE 1
  18 
  19 extern int verify_area(int, const void *, unsigned long);
  20 
  21 /*
  22  * Linux kernel virtual memory manager primitives.
  23  * The idea being to have a "virtual" mm in the same way
  24  * we have a virtual fs - giving a cleaner interface to the
  25  * mm details, and allowing different kinds of memory mappings
  26  * (from shared memory to executable loading to arbitrary
  27  * mmap() functions).
  28  */
  29 
  30 /*
  31  * This struct defines a memory VMM memory area. There is one of these
  32  * per VM-area/task.  A VM area is any part of the process virtual memory
  33  * space that has a special rule for the page-fault handlers (ie a shared
  34  * library, the executable area etc).
  35  */
  36 struct vm_area_struct {
  37         struct mm_struct * vm_mm;       /* VM area parameters */
  38         unsigned long vm_start;
  39         unsigned long vm_end;
  40         pgprot_t vm_page_prot;
  41         unsigned short vm_flags;
  42 /* AVL tree of VM areas per task, sorted by address */
  43         short vm_avl_height;
  44         struct vm_area_struct * vm_avl_left;
  45         struct vm_area_struct * vm_avl_right;
  46 /* linked list of VM areas per task, sorted by address */
  47         struct vm_area_struct * vm_next;
  48 /* for areas with inode, the circular list inode->i_mmap */
  49 /* for shm areas, the circular list of attaches */
  50 /* otherwise unused */
  51         struct vm_area_struct * vm_next_share;
  52         struct vm_area_struct * vm_prev_share;
  53 /* more */
  54         struct vm_operations_struct * vm_ops;
  55         unsigned long vm_offset;
  56         struct inode * vm_inode;
  57         unsigned long vm_pte;                   /* shared mem */
  58 };
  59 
  60 /*
  61  * vm_flags..
  62  */
  63 #define VM_READ         0x0001  /* currently active flags */
  64 #define VM_WRITE        0x0002
  65 #define VM_EXEC         0x0004
  66 #define VM_SHARED       0x0008
  67 
  68 #define VM_MAYREAD      0x0010  /* limits for mprotect() etc */
  69 #define VM_MAYWRITE     0x0020
  70 #define VM_MAYEXEC      0x0040
  71 #define VM_MAYSHARE     0x0080
  72 
  73 #define VM_GROWSDOWN    0x0100  /* general info on the segment */
  74 #define VM_GROWSUP      0x0200
  75 #define VM_SHM          0x0400  /* shared memory area, don't swap out */
  76 #define VM_DENYWRITE    0x0800  /* ETXTBSY on write attempts.. */
  77 
  78 #define VM_EXECUTABLE   0x1000
  79 #define VM_LOCKED       0x2000
  80 
  81 #define VM_STACK_FLAGS  0x0177
  82 
  83 /*
  84  * mapping from the currently active vm_flags protection bits (the
  85  * low four bits) to a page protection mask..
  86  */
  87 extern pgprot_t protection_map[16];
  88 
  89 
  90 /*
  91  * These are the virtual MM functions - opening of an area, closing and
  92  * unmapping it (needed to keep files on disk up-to-date etc), pointer
  93  * to the functions called when a no-page or a wp-page exception occurs. 
  94  */
  95 struct vm_operations_struct {
  96         void (*open)(struct vm_area_struct * area);
  97         void (*close)(struct vm_area_struct * area);
  98         void (*unmap)(struct vm_area_struct *area, unsigned long, size_t);
  99         void (*protect)(struct vm_area_struct *area, unsigned long, size_t, unsigned int newprot);
 100         int (*sync)(struct vm_area_struct *area, unsigned long, size_t, unsigned int flags);
 101         void (*advise)(struct vm_area_struct *area, unsigned long, size_t, unsigned int advise);
 102         unsigned long (*nopage)(struct vm_area_struct * area, unsigned long address, int write_access);
 103         unsigned long (*wppage)(struct vm_area_struct * area, unsigned long address,
 104                 unsigned long page);
 105         int (*swapout)(struct vm_area_struct *,  unsigned long, pte_t *);
 106         pte_t (*swapin)(struct vm_area_struct *, unsigned long, unsigned long);
 107 };
 108 
 109 /*
 110  * Try to keep the most commonly accessed fields in single cache lines
 111  * here (16 bytes or greater).  This ordering should be particularly
 112  * beneficial on 32-bit processors.
 113  *
 114  * The first line is data used in linear searches (eg. clock algorithm
 115  * scans).  The second line is data used in page searches through the
 116  * page-cache.  -- sct 
 117  */
 118 typedef struct page {
 119         atomic_t count;
 120         unsigned flags; /* atomic flags, some possibly updated asynchronously */
 121         struct wait_queue *wait;
 122         struct page *next;
 123 
 124         struct page *next_hash;
 125         unsigned long offset;
 126         struct inode *inode;
 127         struct page *write_list;
 128 
 129         struct page *prev;
 130         struct page *prev_hash;
 131         struct buffer_head * buffers;
 132         unsigned dirty:16,
 133                  age:8;
 134 } mem_map_t;
 135 
 136 /* Page flag bit values */
 137 #define PG_locked        0
 138 #define PG_error         1
 139 #define PG_referenced    2
 140 #define PG_uptodate      3
 141 #define PG_freeafter     4
 142 #define PG_DMA           5
 143 #define PG_reserved     31
 144 
 145 /* Make it prettier to test the above... */
 146 #define PageLocked(page)        (test_bit(PG_locked, &(page)->flags))
 147 #define PageError(page)         (test_bit(PG_error, &(page)->flags))
 148 #define PageReferenced(page)    (test_bit(PG_referenced, &(page)->flags))
 149 #define PageDirty(page)         (test_bit(PG_dirty, &(page)->flags))
 150 #define PageUptodate(page)      (test_bit(PG_uptodate, &(page)->flags))
 151 #define PageFreeafter(page)     (test_bit(PG_freeafter, &(page)->flags))
 152 #define PageDMA(page)           (test_bit(PG_DMA, &(page)->flags))
 153 #define PageReserved(page)      (test_bit(PG_reserved, &(page)->flags))
 154 
 155 extern mem_map_t * mem_map;
 156 
 157 /*
 158  * This is timing-critical - most of the time in getting a new page
 159  * goes to clearing the page. If you want a page without the clearing
 160  * overhead, just use __get_free_page() directly..
 161  */
 162 #define __get_free_page(priority) __get_free_pages((priority),0,0)
 163 #define __get_dma_pages(priority, order) __get_free_pages((priority),(order),1)
 164 extern unsigned long __get_free_pages(int priority, unsigned long gfporder, int dma);
 165 
 166 extern inline unsigned long get_free_page(int priority)
     /* [previous][next][first][last][top][bottom][index][help] */
 167 {
 168         unsigned long page;
 169 
 170         page = __get_free_page(priority);
 171         if (page)
 172                 memset((void *) page, 0, PAGE_SIZE);
 173         return page;
 174 }
 175 
 176 /* memory.c & swap.c*/
 177 
 178 #define free_page(addr) free_pages((addr),0)
 179 extern void free_pages(unsigned long addr, unsigned long order);
 180 
 181 extern void show_free_areas(void);
 182 extern unsigned long put_dirty_page(struct task_struct * tsk,unsigned long page,
 183         unsigned long address);
 184 
 185 extern void free_page_tables(struct task_struct * tsk);
 186 extern void clear_page_tables(struct task_struct * tsk);
 187 extern int new_page_tables(struct task_struct * tsk);
 188 extern int copy_page_tables(struct task_struct * to);
 189 
 190 extern int zap_page_range(struct mm_struct *mm, unsigned long address, unsigned long size);
 191 extern int copy_page_range(struct mm_struct *dst, struct mm_struct *src, struct vm_area_struct *vma);
 192 extern int remap_page_range(unsigned long from, unsigned long to, unsigned long size, pgprot_t prot);
 193 extern int zeromap_page_range(unsigned long from, unsigned long size, pgprot_t prot);
 194 
 195 extern void vmtruncate(struct inode * inode, unsigned long offset);
 196 extern void handle_mm_fault(struct vm_area_struct *vma, unsigned long address, int write_access);
 197 extern void do_wp_page(struct task_struct * tsk, struct vm_area_struct * vma, unsigned long address, int write_access);
 198 extern void do_no_page(struct task_struct * tsk, struct vm_area_struct * vma, unsigned long address, int write_access);
 199 
 200 extern unsigned long paging_init(unsigned long start_mem, unsigned long end_mem);
 201 extern void mem_init(unsigned long start_mem, unsigned long end_mem);
 202 extern void show_mem(void);
 203 extern void oom(struct task_struct * tsk);
 204 extern void si_meminfo(struct sysinfo * val);
 205 
 206 /* vmalloc.c */
 207 
 208 extern void * vmalloc(unsigned long size);
 209 extern void * vremap(unsigned long offset, unsigned long size);
 210 extern void vfree(void * addr);
 211 extern int vread(char *buf, char *addr, int count);
 212 
 213 /* mmap.c */
 214 extern unsigned long do_mmap(struct file * file, unsigned long addr, unsigned long len,
 215         unsigned long prot, unsigned long flags, unsigned long off);
 216 extern void merge_segments(struct task_struct *, unsigned long, unsigned long);
 217 extern void insert_vm_struct(struct task_struct *, struct vm_area_struct *);
 218 extern void remove_shared_vm_struct(struct vm_area_struct *);
 219 extern void build_mmap_avl(struct mm_struct *);
 220 extern void exit_mmap(struct mm_struct *);
 221 extern int do_munmap(unsigned long, size_t);
 222 extern unsigned long get_unmapped_area(unsigned long, unsigned long);
 223 
 224 /* filemap.c */
 225 extern unsigned long page_unuse(unsigned long);
 226 extern int shrink_mmap(int, int);
 227 extern void truncate_inode_pages(struct inode *, unsigned long);
 228 
 229 #define GFP_BUFFER      0x00
 230 #define GFP_ATOMIC      0x01
 231 #define GFP_USER        0x02
 232 #define GFP_KERNEL      0x03
 233 #define GFP_NOBUFFER    0x04
 234 #define GFP_NFS         0x05
 235 
 236 /* Flag - indicates that the buffer will be suitable for DMA.  Ignored on some
 237    platforms, used as appropriate on others */
 238 
 239 #define GFP_DMA         0x80
 240 
 241 #define GFP_LEVEL_MASK 0xf
 242 
 243 /* vma is the first one with  address < vma->vm_end,
 244  * and even  address < vma->vm_start. Have to extend vma. */
 245 static inline int expand_stack(struct vm_area_struct * vma, unsigned long address)
     /* [previous][next][first][last][top][bottom][index][help] */
 246 {
 247         unsigned long grow;
 248 
 249         address &= PAGE_MASK;
 250         if (vma->vm_end - address > current->rlim[RLIMIT_STACK].rlim_cur)
 251                 return -ENOMEM;
 252         grow = vma->vm_start - address;
 253         vma->vm_start = address;
 254         vma->vm_offset -= grow;
 255         vma->vm_mm->total_vm += grow >> PAGE_SHIFT;
 256         if (vma->vm_flags & VM_LOCKED)
 257                 vma->vm_mm->locked_vm += grow >> PAGE_SHIFT;
 258         return 0;
 259 }
 260 
 261 #define avl_empty       (struct vm_area_struct *) NULL
 262 
 263 /* Look up the first VMA which satisfies  addr < vm_end,  NULL if none. */
 264 static inline struct vm_area_struct * find_vma (struct task_struct * task, unsigned long addr)
     /* [previous][next][first][last][top][bottom][index][help] */
 265 {
 266         struct vm_area_struct * result = NULL;
 267 
 268         if (task->mm) {
 269                 struct vm_area_struct * tree = task->mm->mmap_avl;
 270                 for (;;) {
 271                         if (tree == avl_empty)
 272                                 break;
 273                         if (tree->vm_end > addr) {
 274                                 result = tree;
 275                                 if (tree->vm_start <= addr)
 276                                         break;
 277                                 tree = tree->vm_avl_left;
 278                         } else
 279                                 tree = tree->vm_avl_right;
 280                 }
 281         }
 282         return result;
 283 }
 284 
 285 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
 286    NULL if none.  Assume start_addr < end_addr. */
 287 static inline struct vm_area_struct * find_vma_intersection (struct task_struct * task, unsigned long start_addr, unsigned long end_addr)
     /* [previous][next][first][last][top][bottom][index][help] */
 288 {
 289         struct vm_area_struct * vma;
 290 
 291         vma = find_vma(task,start_addr);
 292         if (!vma || end_addr <= vma->vm_start)
 293                 return NULL;
 294         return vma;
 295 }
 296 
 297 #endif /* __KERNEL__ */
 298 
 299 #endif

/* [previous][next][first][last][top][bottom][index][help] */