root/include/linux/mm.h

/* [previous][next][first][last][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. get_free_page

   1 #ifndef _LINUX_MM_H
   2 #define _LINUX_MM_H
   3 
   4 #include <linux/page.h>
   5 #include <linux/fs.h>
   6 #include <linux/kernel.h>
   7 
   8 /*
   9  * Linux kernel virtual memory manager primitives.
  10  * The idea being to have a "virtual" mm in the same way
  11  * we have a virtual fs - giving a cleaner interface to the
  12  * mm details, and allowing different kinds of memory mappings
  13  * (from shared memory to executable loading to arbitrary
  14  * mmap() functions).
  15  */
  16 
  17 /*
  18  * This struct defines a memory VMM memory area. There is one of these
  19  * per VM-area/task.  A VM area is any part of the process virtual memory
  20  * space that has a special rule for the page-fault handlers (ie a shared
  21  * library, the executable area etc).
  22  */
  23 struct vm_area_struct {
  24         struct task_struct * vm_task;           /* VM area parameters */
  25         unsigned long vm_start;
  26         unsigned long vm_end;
  27         unsigned short vm_page_prot;
  28         struct vm_area_struct * vm_next;        /* linked list */
  29         struct vm_area_struct * vm_share;       /* linked list */
  30         struct inode * vm_inode;
  31         unsigned long vm_offset;
  32         struct vm_operations_struct * vm_ops;
  33 };
  34 
  35 /*
  36  * These are the virtual MM functions - opening of an area, closing it (needed to
  37  * keep files on disk up-to-date etc), pointer to the functions called when a
  38  * no-page or a wp-page exception occurs, and the function which decides on sharing
  39  * of pages between different processes.
  40  */
  41 struct vm_operations_struct {
  42         void (*open)(struct vm_area_struct * area);
  43         void (*close)(struct vm_area_struct * area);
  44         void (*nopage)(int error_code,
  45                        struct vm_area_struct * area, unsigned long address);
  46         void (*wppage)(struct vm_area_struct * area, unsigned long address);
  47         int (*share)(struct vm_area_struct * from, struct vm_area_struct * to, unsigned long address);
  48         int (*unmap)(struct vm_area_struct *area, unsigned long, size_t);
  49 };
  50 
  51 extern unsigned long __bad_page(void);
  52 extern unsigned long __bad_pagetable(void);
  53 extern unsigned long __zero_page(void);
  54 
  55 #define BAD_PAGETABLE __bad_pagetable()
  56 #define BAD_PAGE __bad_page()
  57 #define ZERO_PAGE __zero_page()
  58 
  59 extern volatile short free_page_ptr; /* used by malloc and tcp/ip. */
  60 
  61 extern int nr_swap_pages;
  62 extern int nr_free_pages;
  63 extern unsigned long free_page_list;
  64 extern int nr_secondary_pages;
  65 extern unsigned long secondary_page_list;
  66 
  67 #define MAX_SECONDARY_PAGES 10
  68 
  69 /*
  70  * This is timing-critical - most of the time in getting a new page
  71  * goes to clearing the page. If you want a page without the clearing
  72  * overhead, just use __get_free_page() directly..
  73  */
  74 extern unsigned long __get_free_page(int priority);
  75 extern inline unsigned long get_free_page(int priority)
     /* [previous][next][first][last][top][bottom][index][help] */
  76 {
  77         unsigned long page;
  78 
  79         page = __get_free_page(priority);
  80         if (page)
  81                 __asm__ __volatile__("rep ; stosl"
  82                         : /* no outputs */ \
  83                         :"a" (0),"c" (1024),"D" (page)
  84                         :"di","cx");
  85         return page;
  86 }
  87 
  88 /* memory.c */
  89 
  90 extern void free_page(unsigned long addr);
  91 extern unsigned long put_dirty_page(struct task_struct * tsk,unsigned long page,
  92         unsigned long address);
  93 extern void free_page_tables(struct task_struct * tsk);
  94 extern void clear_page_tables(struct task_struct * tsk);
  95 extern int copy_page_tables(struct task_struct * to);
  96 extern int clone_page_tables(struct task_struct * to);
  97 extern int unmap_page_range(unsigned long from, unsigned long size);
  98 extern int remap_page_range(unsigned long from, unsigned long to, unsigned long size, int mask);
  99 extern int zeromap_page_range(unsigned long from, unsigned long size, int mask);
 100 
 101 extern void do_wp_page(unsigned long error_code, unsigned long address,
 102         struct task_struct *tsk, unsigned long user_esp);
 103 extern void do_no_page(unsigned long error_code, unsigned long address,
 104         struct task_struct *tsk, unsigned long user_esp);
 105 
 106 extern unsigned long paging_init(unsigned long start_mem, unsigned long end_mem);
 107 extern void mem_init(unsigned long low_start_mem,
 108                      unsigned long start_mem, unsigned long end_mem);
 109 extern void show_mem(void);
 110 extern void oom(struct task_struct * task);
 111 extern void si_meminfo(struct sysinfo * val);
 112 
 113 /* vmalloc.c */
 114 
 115 extern void * vmalloc(unsigned long size);
 116 extern void vfree(void * addr);
 117 extern int vread(char *buf, char *addr, int count);
 118 
 119 /* swap.c */
 120 
 121 extern void swap_free(unsigned long page_nr);
 122 extern unsigned long swap_duplicate(unsigned long page_nr);
 123 extern void swap_in(unsigned long *table_ptr);
 124 extern void si_swapinfo(struct sysinfo * val);
 125 extern void rw_swap_page(int rw, unsigned long nr, char * buf);
 126 
 127 /* mmap.c */
 128 extern int do_mmap(struct file * file, unsigned long addr, unsigned long len,
 129         unsigned long prot, unsigned long flags, unsigned long off);
 130 typedef int (*map_mergep_fnp)(const struct vm_area_struct *,
 131                               const struct vm_area_struct *, void *);
 132 extern void merge_segments(struct vm_area_struct *, map_mergep_fnp, void *);
 133 extern void insert_vm_struct(struct task_struct *, struct vm_area_struct *);
 134 extern int ignoff_mergep(const struct vm_area_struct *,
 135                          const struct vm_area_struct *, void *);
 136 extern int do_munmap(unsigned long, size_t);
 137 
 138 #define read_swap_page(nr,buf) \
 139         rw_swap_page(READ,(nr),(buf))
 140 #define write_swap_page(nr,buf) \
 141         rw_swap_page(WRITE,(nr),(buf))
 142 
 143 #define invalidate() \
 144 __asm__ __volatile__("movl %%cr3,%%eax\n\tmovl %%eax,%%cr3": : :"ax")
 145 
 146 extern unsigned long high_memory;
 147 
 148 #define MAP_NR(addr) ((addr) >> PAGE_SHIFT)
 149 #define MAP_PAGE_RESERVED (1<<15)
 150 
 151 extern unsigned short * mem_map;
 152 
 153 #define PAGE_PRESENT    0x001
 154 #define PAGE_RW         0x002
 155 #define PAGE_USER       0x004
 156 #define PAGE_PWT        0x008   /* 486 only - not used currently */
 157 #define PAGE_PCD        0x010   /* 486 only - not used currently */
 158 #define PAGE_ACCESSED   0x020
 159 #define PAGE_DIRTY      0x040
 160 #define PAGE_COW        0x200   /* implemented in software (one of the AVL bits) */
 161 
 162 #define PAGE_PRIVATE    (PAGE_PRESENT | PAGE_RW | PAGE_USER | PAGE_ACCESSED | PAGE_COW)
 163 #define PAGE_SHARED     (PAGE_PRESENT | PAGE_RW | PAGE_USER | PAGE_ACCESSED)
 164 #define PAGE_COPY       (PAGE_PRESENT | PAGE_USER | PAGE_ACCESSED | PAGE_COW)
 165 #define PAGE_READONLY   (PAGE_PRESENT | PAGE_USER | PAGE_ACCESSED)
 166 #define PAGE_TABLE      (PAGE_PRESENT | PAGE_RW | PAGE_USER | PAGE_ACCESSED)
 167 
 168 #define GFP_BUFFER      0x00
 169 #define GFP_ATOMIC      0x01
 170 #define GFP_USER        0x02
 171 #define GFP_KERNEL      0x03
 172 
 173 
 174 /* vm_ops not present page codes */
 175 #define SHM_SWP_TYPE 0x41        
 176 extern void shm_no_page (ulong *);
 177 
 178 #endif

/* [previous][next][first][last][top][bottom][index][help] */