root/mm/page_alloc.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. init_mem_queue
  2. add_mem_queue
  3. remove_mem_queue
  4. free_pages_ok
  5. free_pages
  6. __get_free_pages
  7. show_free_areas
  8. free_area_init
  9. swap_in

   1 /*
   2  *  linux/mm/page_alloc.c
   3  *
   4  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
   5  *  Swap reorganised 29.12.95, Stephen Tweedie
   6  */
   7 
   8 #include <linux/mm.h>
   9 #include <linux/sched.h>
  10 #include <linux/head.h>
  11 #include <linux/kernel.h>
  12 #include <linux/kernel_stat.h>
  13 #include <linux/errno.h>
  14 #include <linux/string.h>
  15 #include <linux/stat.h>
  16 #include <linux/swap.h>
  17 #include <linux/fs.h>
  18 #include <linux/swapctl.h>
  19 
  20 #include <asm/dma.h>
  21 #include <asm/system.h> /* for cli()/sti() */
  22 #include <asm/segment.h> /* for memcpy_to/fromfs */
  23 #include <asm/bitops.h>
  24 #include <asm/pgtable.h>
  25 
  26 int nr_swap_pages = 0;
  27 int nr_free_pages = 0;
  28 
  29 /*
  30  * Free area management
  31  *
  32  * The free_area_list arrays point to the queue heads of the free areas
  33  * of different sizes
  34  */
  35 
  36 #define NR_MEM_LISTS 6
  37 
  38 struct free_area_struct {
  39         struct page list;
  40         unsigned int * map;
  41 };
  42 
  43 static struct free_area_struct free_area[NR_MEM_LISTS];
  44 
  45 static inline void init_mem_queue(struct page * head)
     /* [previous][next][first][last][top][bottom][index][help] */
  46 {
  47         head->next = head;
  48         head->prev = head;
  49 }
  50 
  51 static inline void add_mem_queue(struct page * head, struct page * entry)
     /* [previous][next][first][last][top][bottom][index][help] */
  52 {
  53         struct page * next = head->next;
  54 
  55         entry->prev = head;
  56         entry->next = next;
  57         next->prev = entry;
  58         head->next = entry;
  59 }
  60 
  61 static inline void remove_mem_queue(struct page * head, struct page * entry)
     /* [previous][next][first][last][top][bottom][index][help] */
  62 {
  63         struct page * next = entry->next;
  64         struct page * prev = entry->prev;
  65         next->prev = prev;
  66         prev->next = next;
  67 }
  68 
  69 /*
  70  * Free_page() adds the page to the free lists. This is optimized for
  71  * fast normal cases (no error jumps taken normally).
  72  *
  73  * The way to optimize jumps for gcc-2.2.2 is to:
  74  *  - select the "normal" case and put it inside the if () { XXX }
  75  *  - no else-statements if you can avoid them
  76  *
  77  * With the above two rules, you get a straight-line execution path
  78  * for the normal case, giving better asm-code.
  79  *
  80  * free_page() may sleep since the page being freed may be a buffer
  81  * page or present in the swap cache. It will not sleep, however,
  82  * for a freshly allocated page (get_free_page()).
  83  */
  84 
  85 /*
  86  * Buddy system. Hairy. You really aren't expected to understand this
  87  */
  88 static inline void free_pages_ok(unsigned long map_nr, unsigned long order)
     /* [previous][next][first][last][top][bottom][index][help] */
  89 {
  90         unsigned long index = map_nr >> (1 + order);
  91         unsigned long mask = (~0UL) << order;
  92         unsigned long flags;
  93 
  94         save_flags(flags);
  95         cli();
  96 
  97 #define list(x) (mem_map+(x))
  98 
  99         map_nr &= mask;
 100         nr_free_pages += 1 << order;
 101         while (order < NR_MEM_LISTS-1) {
 102                 if (!change_bit(index, free_area[order].map))
 103                         break;
 104                 remove_mem_queue(&free_area[order].list, list(map_nr ^ (1+~mask)));
 105                 mask <<= 1;
 106                 order++;
 107                 index >>= 1;
 108                 map_nr &= mask;
 109         }
 110         add_mem_queue(&free_area[order].list, list(map_nr));
 111 
 112 #undef list
 113 
 114         restore_flags(flags);
 115 }
 116 
 117 void free_pages(unsigned long addr, unsigned long order)
     /* [previous][next][first][last][top][bottom][index][help] */
 118 {
 119         unsigned long map_nr = MAP_NR(addr);
 120 
 121         if (map_nr < MAP_NR(high_memory)) {
 122                 mem_map_t * map = mem_map + map_nr;
 123                 if (PageReserved(map))
 124                         return;
 125                 if (atomic_dec_and_test(&map->count)) {
 126                         delete_from_swap_cache(map_nr);
 127                         free_pages_ok(map_nr, order);
 128                         return;
 129                 }
 130         }
 131 }
 132 
 133 /*
 134  * Some ugly macros to speed up __get_free_pages()..
 135  */
 136 #define MARK_USED(index, order, area) \
 137         change_bit((index) >> (1+(order)), (area)->map)
 138 #define CAN_DMA(x) (PageDMA(x))
 139 #define ADDRESS(x) (PAGE_OFFSET + ((x) << PAGE_SHIFT))
 140 #define RMQUEUE(order, dma) \
 141 do { struct free_area_struct * area = free_area+order; \
 142      unsigned long new_order = order; \
 143         do { struct page *prev = &area->list, *ret; \
 144                 while (&area->list != (ret = prev->next)) { \
 145                         if (!dma || CAN_DMA(ret)) { \
 146                                 unsigned long map_nr = ret->map_nr; \
 147                                 (prev->next = ret->next)->prev = prev; \
 148                                 MARK_USED(map_nr, new_order, area); \
 149                                 nr_free_pages -= 1 << order; \
 150                                 EXPAND(ret, map_nr, order, new_order, area); \
 151                                 restore_flags(flags); \
 152                                 return ADDRESS(map_nr); \
 153                         } \
 154                         prev = ret; \
 155                 } \
 156                 new_order++; area++; \
 157         } while (new_order < NR_MEM_LISTS); \
 158 } while (0)
 159 
 160 #define EXPAND(map,index,low,high,area) \
 161 do { unsigned long size = 1 << high; \
 162         while (high > low) { \
 163                 area--; high--; size >>= 1; \
 164                 add_mem_queue(&area->list, map); \
 165                 MARK_USED(index, high, area); \
 166                 index += size; \
 167                 map += size; \
 168         } \
 169         map->count = 1; \
 170         map->age = PAGE_INITIAL_AGE; \
 171 } while (0)
 172 
 173 unsigned long __get_free_pages(int priority, unsigned long order, int dma)
     /* [previous][next][first][last][top][bottom][index][help] */
 174 {
 175         unsigned long flags;
 176         int reserved_pages;
 177 
 178         if (order >= NR_MEM_LISTS)
 179                 return 0;
 180         if (intr_count && priority != GFP_ATOMIC) {
 181                 static int count = 0;
 182                 if (++count < 5) {
 183                         printk("gfp called nonatomically from interrupt %p\n",
 184                                 __builtin_return_address(0));
 185                         priority = GFP_ATOMIC;
 186                 }
 187         }
 188         reserved_pages = 5;
 189         if (priority != GFP_NFS)
 190                 reserved_pages = min_free_pages;
 191         save_flags(flags);
 192 repeat:
 193         cli();
 194         if ((priority==GFP_ATOMIC) || nr_free_pages > reserved_pages) {
 195                 RMQUEUE(order, dma);
 196                 restore_flags(flags);
 197                 return 0;
 198         }
 199         restore_flags(flags);
 200         if (priority != GFP_BUFFER && try_to_free_page(priority, dma, 1))
 201                 goto repeat;
 202         return 0;
 203 }
 204 
 205 /*
 206  * Show free area list (used inside shift_scroll-lock stuff)
 207  * We also calculate the percentage fragmentation. We do this by counting the
 208  * memory on each free list with the exception of the first item on the list.
 209  */
 210 void show_free_areas(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 211 {
 212         unsigned long order, flags;
 213         unsigned long total = 0;
 214 
 215         printk("Free pages:      %6dkB\n ( ",nr_free_pages<<(PAGE_SHIFT-10));
 216         save_flags(flags);
 217         cli();
 218         for (order=0 ; order < NR_MEM_LISTS; order++) {
 219                 struct page * tmp;
 220                 unsigned long nr = 0;
 221                 for (tmp = free_area[order].list.next ; tmp != &free_area[order].list ; tmp = tmp->next) {
 222                         nr ++;
 223                 }
 224                 total += nr * ((PAGE_SIZE>>10) << order);
 225                 printk("%lu*%lukB ", nr, (PAGE_SIZE>>10) << order);
 226         }
 227         restore_flags(flags);
 228         printk("= %lukB)\n", total);
 229 #ifdef SWAP_CACHE_INFO
 230         show_swap_cache_info();
 231 #endif  
 232 }
 233 
 234 #define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
 235 
 236 /*
 237  * set up the free-area data structures:
 238  *   - mark all pages reserved
 239  *   - mark all memory queues empty
 240  *   - clear the memory bitmaps
 241  */
 242 unsigned long free_area_init(unsigned long start_mem, unsigned long end_mem)
     /* [previous][next][first][last][top][bottom][index][help] */
 243 {
 244         mem_map_t * p;
 245         unsigned long mask = PAGE_MASK;
 246         int i;
 247 
 248         /*
 249          * select nr of pages we try to keep free for important stuff
 250          * with a minimum of 16 pages. This is totally arbitrary
 251          */
 252         i = (end_mem - PAGE_OFFSET) >> (PAGE_SHIFT+7);
 253         if (i < 16)
 254                 i = 16;
 255         min_free_pages = i;
 256         free_pages_low = i + (i>>1);
 257         free_pages_high = i + i;
 258         start_mem = init_swap_cache(start_mem, end_mem);
 259         mem_map = (mem_map_t *) start_mem;
 260         p = mem_map + MAP_NR(end_mem);
 261         start_mem = LONG_ALIGN((unsigned long) p);
 262         memset(mem_map, 0, start_mem - (unsigned long) mem_map);
 263         do {
 264                 --p;
 265                 p->flags = (1 << PG_DMA) | (1 << PG_reserved);
 266                 p->map_nr = p - mem_map;
 267         } while (p > mem_map);
 268 
 269         for (i = 0 ; i < NR_MEM_LISTS ; i++) {
 270                 unsigned long bitmap_size;
 271                 init_mem_queue(&free_area[i].list);
 272                 mask += mask;
 273                 end_mem = (end_mem + ~mask) & mask;
 274                 bitmap_size = (end_mem - PAGE_OFFSET) >> (PAGE_SHIFT + i);
 275                 bitmap_size = (bitmap_size + 7) >> 3;
 276                 bitmap_size = LONG_ALIGN(bitmap_size);
 277                 free_area[i].map = (unsigned int *) start_mem;
 278                 memset((void *) start_mem, 0, bitmap_size);
 279                 start_mem += bitmap_size;
 280         }
 281         return start_mem;
 282 }
 283 
 284 /*
 285  * The tests may look silly, but it essentially makes sure that
 286  * no other process did a swap-in on us just as we were waiting.
 287  *
 288  * Also, don't bother to add to the swap cache if this page-in
 289  * was due to a write access.
 290  */
 291 void swap_in(struct task_struct * tsk, struct vm_area_struct * vma,
     /* [previous][next][first][last][top][bottom][index][help] */
 292         pte_t * page_table, unsigned long entry, int write_access)
 293 {
 294         unsigned long page = __get_free_page(GFP_KERNEL);
 295 
 296         if (pte_val(*page_table) != entry) {
 297                 free_page(page);
 298                 return;
 299         }
 300         if (!page) {
 301                 set_pte(page_table, BAD_PAGE);
 302                 swap_free(entry);
 303                 oom(tsk);
 304                 return;
 305         }
 306         read_swap_page(entry, (char *) page);
 307         if (pte_val(*page_table) != entry) {
 308                 free_page(page);
 309                 return;
 310         }
 311         vma->vm_mm->rss++;
 312         tsk->maj_flt++;
 313         if (!write_access && add_to_swap_cache(MAP_NR(page), entry)) {
 314                 /* keep swap page allocated for the moment (swap cache) */
 315                 set_pte(page_table, mk_pte(page, vma->vm_page_prot));
 316                 return;
 317         }
 318         set_pte(page_table, pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot))));
 319         swap_free(entry);
 320         return;
 321 }
 322 

/* [previous][next][first][last][top][bottom][index][help] */