root/include/linux/mm.h

/* [previous][next][first][last][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. get_free_page

   1 #ifndef _LINUX_MM_H
   2 #define _LINUX_MM_H
   3 
   4 #include <linux/page.h>
   5 #include <linux/sched.h>
   6 #include <linux/errno.h>
   7 #include <linux/kernel.h>
   8 
   9 #define VERIFY_READ 0
  10 #define VERIFY_WRITE 1
  11 
  12 extern int verify_area(int, const void *, unsigned long);
  13 
  14 /*
  15  * Linux kernel virtual memory manager primitives.
  16  * The idea being to have a "virtual" mm in the same way
  17  * we have a virtual fs - giving a cleaner interface to the
  18  * mm details, and allowing different kinds of memory mappings
  19  * (from shared memory to executable loading to arbitrary
  20  * mmap() functions).
  21  */
  22 
  23 /*
  24  * This struct defines a memory VMM memory area. There is one of these
  25  * per VM-area/task.  A VM area is any part of the process virtual memory
  26  * space that has a special rule for the page-fault handlers (ie a shared
  27  * library, the executable area etc).
  28  */
  29 struct vm_area_struct {
  30         struct task_struct * vm_task;           /* VM area parameters */
  31         unsigned long vm_start;
  32         unsigned long vm_end;
  33         unsigned short vm_page_prot;
  34         unsigned short vm_flags;
  35         struct vm_area_struct * vm_next;        /* linked list */
  36         struct vm_area_struct * vm_share;       /* linked list */
  37         struct inode * vm_inode;
  38         unsigned long vm_offset;
  39         struct vm_operations_struct * vm_ops;
  40 };
  41 
  42 /*
  43  * vm_flags..
  44  */
  45 #define VM_GROWSDOWN    0x01
  46 #define VM_GROWSUP      0x02
  47 #define VM_SHM          0x04
  48 
  49 /*
  50  * These are the virtual MM functions - opening of an area, closing it (needed to
  51  * keep files on disk up-to-date etc), pointer to the functions called when a
  52  * no-page or a wp-page exception occurs, and the function which decides on sharing
  53  * of pages between different processes.
  54  */
  55 struct vm_operations_struct {
  56         void (*open)(struct vm_area_struct * area);
  57         void (*close)(struct vm_area_struct * area);
  58         void (*nopage)(int error_code,
  59                        struct vm_area_struct * area, unsigned long address);
  60         void (*wppage)(struct vm_area_struct * area, unsigned long address);
  61         int (*share)(struct vm_area_struct * from, struct vm_area_struct * to, unsigned long address);
  62         int (*unmap)(struct vm_area_struct *area, unsigned long, size_t);
  63 };
  64 
  65 extern unsigned long __bad_page(void);
  66 extern unsigned long __bad_pagetable(void);
  67 extern unsigned long __zero_page(void);
  68 
  69 #define BAD_PAGETABLE __bad_pagetable()
  70 #define BAD_PAGE __bad_page()
  71 #define ZERO_PAGE __zero_page()
  72 
  73 /* planning stage.. */
  74 #define P_DIRTY         0x0001
  75 #define P_LOCKED        0x0002
  76 #define P_UPTODATE      0x0004
  77 #define P_RESERVED      0x8000
  78 
  79 struct page_info {
  80         unsigned short flags;
  81         unsigned short count;
  82         struct inode * inode;
  83         unsigned long offset;
  84         struct page_info * next_same_inode;
  85         struct page_info * prev_same_inode;
  86         struct page_info * next_hash;
  87         struct page_info * prev_hash;
  88         struct wait_queue *wait;
  89 };
  90 /* end of planning stage */
  91 
  92 /*
  93  * Free area management
  94  */
  95 
  96 extern int nr_swap_pages;
  97 extern int nr_free_pages;
  98 
  99 #define MAX_SECONDARY_PAGES 20
 100 #define NR_MEM_LISTS 6
 101 
 102 struct mem_list {
 103         struct mem_list * next;
 104         struct mem_list * prev;
 105 };
 106 
 107 extern struct mem_list free_area_list[NR_MEM_LISTS];
 108 extern unsigned char * free_area_map[NR_MEM_LISTS];
 109 
 110 /*
 111  * This is timing-critical - most of the time in getting a new page
 112  * goes to clearing the page. If you want a page without the clearing
 113  * overhead, just use __get_free_page() directly..
 114  */
 115 #define __get_free_page(priority) __get_free_pages((priority),0)
 116 extern unsigned long __get_free_pages(int priority, unsigned long gfporder);
 117 extern inline unsigned long get_free_page(int priority)
     /* [previous][next][first][last][top][bottom][index][help] */
 118 {
 119         unsigned long page;
 120 
 121         page = __get_free_page(priority);
 122         if (page)
 123                 __asm__ __volatile__("rep ; stosl"
 124                         : /* no outputs */ \
 125                         :"a" (0),"c" (1024),"D" (page)
 126                         :"di","cx");
 127         return page;
 128 }
 129 
 130 /* memory.c & swap.c*/
 131 
 132 #define free_page(addr) free_pages((addr),0)
 133 extern void free_pages(unsigned long addr, unsigned long order);
 134 
 135 extern void show_free_areas(void);
 136 extern unsigned long put_dirty_page(struct task_struct * tsk,unsigned long page,
 137         unsigned long address);
 138 extern void free_page_tables(struct task_struct * tsk);
 139 extern void clear_page_tables(struct task_struct * tsk);
 140 extern int copy_page_tables(struct task_struct * to);
 141 extern int clone_page_tables(struct task_struct * to);
 142 extern int unmap_page_range(unsigned long from, unsigned long size);
 143 extern int remap_page_range(unsigned long from, unsigned long to, unsigned long size, int mask);
 144 extern int zeromap_page_range(unsigned long from, unsigned long size, int mask);
 145 
 146 extern void do_wp_page(unsigned long error_code, unsigned long address,
 147         struct task_struct *tsk, unsigned long user_esp);
 148 extern void do_no_page(unsigned long error_code, unsigned long address,
 149         struct task_struct *tsk, unsigned long user_esp);
 150 
 151 extern unsigned long paging_init(unsigned long start_mem, unsigned long end_mem);
 152 extern void mem_init(unsigned long low_start_mem,
 153                      unsigned long start_mem, unsigned long end_mem);
 154 extern void show_mem(void);
 155 extern void oom(struct task_struct * task);
 156 extern void si_meminfo(struct sysinfo * val);
 157 
 158 /* vmalloc.c */
 159 
 160 extern void * vmalloc(unsigned long size);
 161 extern void vfree(void * addr);
 162 extern int vread(char *buf, char *addr, int count);
 163 
 164 /* swap.c */
 165 
 166 extern void swap_free(unsigned long page_nr);
 167 extern unsigned long swap_duplicate(unsigned long page_nr);
 168 extern void swap_in(unsigned long *table_ptr);
 169 extern void si_swapinfo(struct sysinfo * val);
 170 extern void rw_swap_page(int rw, unsigned long nr, char * buf);
 171 
 172 /* mmap.c */
 173 extern int do_mmap(struct file * file, unsigned long addr, unsigned long len,
 174         unsigned long prot, unsigned long flags, unsigned long off);
 175 typedef int (*map_mergep_fnp)(const struct vm_area_struct *,
 176                               const struct vm_area_struct *, void *);
 177 extern void merge_segments(struct vm_area_struct *, map_mergep_fnp, void *);
 178 extern void insert_vm_struct(struct task_struct *, struct vm_area_struct *);
 179 extern int ignoff_mergep(const struct vm_area_struct *,
 180                          const struct vm_area_struct *, void *);
 181 extern int do_munmap(unsigned long, size_t);
 182 
 183 #define read_swap_page(nr,buf) \
 184         rw_swap_page(READ,(nr),(buf))
 185 #define write_swap_page(nr,buf) \
 186         rw_swap_page(WRITE,(nr),(buf))
 187 
 188 #define invalidate() \
 189 __asm__ __volatile__("movl %%cr3,%%eax\n\tmovl %%eax,%%cr3": : :"ax")
 190 
 191 extern unsigned long high_memory;
 192 
 193 #define MAP_NR(addr) ((addr) >> PAGE_SHIFT)
 194 #define MAP_PAGE_RESERVED (1<<15)
 195 
 196 extern unsigned short * mem_map;
 197 
 198 #define PAGE_PRESENT    0x001
 199 #define PAGE_RW         0x002
 200 #define PAGE_USER       0x004
 201 #define PAGE_PWT        0x008   /* 486 only - not used currently */
 202 #define PAGE_PCD        0x010   /* 486 only - not used currently */
 203 #define PAGE_ACCESSED   0x020
 204 #define PAGE_DIRTY      0x040
 205 #define PAGE_COW        0x200   /* implemented in software (one of the AVL bits) */
 206 
 207 #define PAGE_PRIVATE    (PAGE_PRESENT | PAGE_RW | PAGE_USER | PAGE_ACCESSED | PAGE_COW)
 208 #define PAGE_SHARED     (PAGE_PRESENT | PAGE_RW | PAGE_USER | PAGE_ACCESSED)
 209 #define PAGE_COPY       (PAGE_PRESENT | PAGE_USER | PAGE_ACCESSED | PAGE_COW)
 210 #define PAGE_READONLY   (PAGE_PRESENT | PAGE_USER | PAGE_ACCESSED)
 211 #define PAGE_TABLE      (PAGE_PRESENT | PAGE_RW | PAGE_USER | PAGE_ACCESSED)
 212 
 213 #define GFP_BUFFER      0x00
 214 #define GFP_ATOMIC      0x01
 215 #define GFP_USER        0x02
 216 #define GFP_KERNEL      0x03
 217 #define GFP_NOBUFFER    0x04
 218 
 219 
 220 /* vm_ops not present page codes */
 221 #define SHM_SWP_TYPE 0x41
 222 extern void shm_no_page (ulong *);
 223 
 224 #endif

/* [previous][next][first][last][top][bottom][index][help] */