root/mm/swap.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. show_swap_cache_info
  2. add_to_swap_cache
  3. init_swap_cache
  4. rw_swap_page
  5. get_swap_page
  6. swap_duplicate
  7. swap_free
  8. swap_in
  9. try_to_swap_out
  10. swap_out_pmd
  11. swap_out_pgd
  12. swap_out_vma
  13. swap_out_process
  14. swap_out
  15. try_to_free_page
  16. add_mem_queue
  17. remove_mem_queue
  18. free_pages_ok
  19. check_free_buffers
  20. free_pages
  21. mark_used
  22. __get_free_pages
  23. __get_dma_pages
  24. show_free_areas
  25. unuse_pte
  26. unuse_pmd
  27. unuse_pgd
  28. unuse_vma
  29. unuse_process
  30. try_to_unuse
  31. sys_swapoff
  32. sys_swapon
  33. si_swapinfo
  34. free_area_init

   1 #define THREE_LEVEL
   2 /*
   3  *  linux/mm/swap.c
   4  *
   5  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
   6  */
   7 
   8 /*
   9  * This file should contain most things doing the swapping from/to disk.
  10  * Started 18.12.91
  11  */
  12 
  13 #include <linux/mm.h>
  14 #include <linux/sched.h>
  15 #include <linux/head.h>
  16 #include <linux/kernel.h>
  17 #include <linux/kernel_stat.h>
  18 #include <linux/errno.h>
  19 #include <linux/string.h>
  20 #include <linux/stat.h>
  21 #include <linux/fs.h>
  22 
  23 #include <asm/dma.h>
  24 #include <asm/system.h> /* for cli()/sti() */
  25 #include <asm/bitops.h>
  26 #include <asm/pgtable.h>
  27 
  28 #define MAX_SWAPFILES 8
  29 
  30 #define SWP_USED        1
  31 #define SWP_WRITEOK     3
  32 
  33 #define SWP_TYPE(entry) (((entry) >> 1) & 0x7f)
  34 #define SWP_OFFSET(entry) ((entry) >> 12)
  35 #define SWP_ENTRY(type,offset) (((type) << 1) | ((offset) << 12))
  36 
  37 int min_free_pages = 20;
  38 
  39 static int nr_swapfiles = 0;
  40 static struct wait_queue * lock_queue = NULL;
  41 
  42 static struct swap_info_struct {
  43         unsigned long flags;
  44         struct inode * swap_file;
  45         unsigned int swap_device;
  46         unsigned char * swap_map;
  47         unsigned char * swap_lockmap;
  48         int pages;
  49         int lowest_bit;
  50         int highest_bit;
  51         unsigned long max;
  52 } swap_info[MAX_SWAPFILES];
  53 
  54 extern int shm_swap (int);
  55 
  56 unsigned long *swap_cache;
  57 
  58 #ifdef SWAP_CACHE_INFO
  59 unsigned long swap_cache_add_total = 0;
  60 unsigned long swap_cache_add_success = 0;
  61 unsigned long swap_cache_del_total = 0;
  62 unsigned long swap_cache_del_success = 0;
  63 unsigned long swap_cache_find_total = 0;
  64 unsigned long swap_cache_find_success = 0;
  65 
  66 extern inline void show_swap_cache_info(void)
     /* [previous][next][first][last][top][bottom][index][help] */
  67 {
  68         printk("Swap cache: add %ld/%ld, delete %ld/%ld, find %ld/%ld\n",
  69                 swap_cache_add_total, swap_cache_add_success, 
  70                 swap_cache_del_total, swap_cache_del_success,
  71                 swap_cache_find_total, swap_cache_find_success);
  72 }
  73 #endif
  74 
  75 static int add_to_swap_cache(unsigned long addr, unsigned long entry)
     /* [previous][next][first][last][top][bottom][index][help] */
  76 {
  77         struct swap_info_struct * p = &swap_info[SWP_TYPE(entry)];
  78 
  79 #ifdef SWAP_CACHE_INFO
  80         swap_cache_add_total++;
  81 #endif
  82         if ((p->flags & SWP_WRITEOK) == SWP_WRITEOK) {
  83                 entry = (unsigned long) xchg_ptr(swap_cache + MAP_NR(addr), (void *) entry);
  84                 if (entry)  {
  85                         printk("swap_cache: replacing non-NULL entry\n");
  86                 }
  87 #ifdef SWAP_CACHE_INFO
  88                 swap_cache_add_success++;
  89 #endif
  90                 return 1;
  91         }
  92         return 0;
  93 }
  94 
  95 static unsigned long init_swap_cache(unsigned long mem_start,
     /* [previous][next][first][last][top][bottom][index][help] */
  96         unsigned long mem_end)
  97 {
  98         unsigned long swap_cache_size;
  99 
 100         mem_start = (mem_start + 15) & ~15;
 101         swap_cache = (unsigned long *) mem_start;
 102         swap_cache_size = MAP_NR(mem_end);
 103         memset(swap_cache, 0, swap_cache_size * sizeof (unsigned long));
 104         return (unsigned long) (swap_cache + swap_cache_size);
 105 }
 106 
 107 void rw_swap_page(int rw, unsigned long entry, char * buf)
     /* [previous][next][first][last][top][bottom][index][help] */
 108 {
 109         unsigned long type, offset;
 110         struct swap_info_struct * p;
 111 
 112         type = SWP_TYPE(entry);
 113         if (type >= nr_swapfiles) {
 114                 printk("Internal error: bad swap-device\n");
 115                 return;
 116         }
 117         p = &swap_info[type];
 118         offset = SWP_OFFSET(entry);
 119         if (offset >= p->max) {
 120                 printk("rw_swap_page: weirdness\n");
 121                 return;
 122         }
 123         if (p->swap_map && !p->swap_map[offset]) {
 124                 printk("Hmm.. Trying to use unallocated swap (%08lx)\n", entry);
 125                 return;
 126         }
 127         if (!(p->flags & SWP_USED)) {
 128                 printk("Trying to swap to unused swap-device\n");
 129                 return;
 130         }
 131         while (set_bit(offset,p->swap_lockmap))
 132                 sleep_on(&lock_queue);
 133         if (rw == READ)
 134                 kstat.pswpin++;
 135         else
 136                 kstat.pswpout++;
 137         if (p->swap_device) {
 138                 ll_rw_page(rw,p->swap_device,offset,buf);
 139         } else if (p->swap_file) {
 140                 struct inode *swapf = p->swap_file;
 141                 unsigned int zones[8];
 142                 int i;
 143                 if (swapf->i_op->bmap == NULL
 144                         && swapf->i_op->smap != NULL){
 145                         /*
 146                                 With MsDOS, we use msdos_smap which return
 147                                 a sector number (not a cluster or block number).
 148                                 It is a patch to enable the UMSDOS project.
 149                                 Other people are working on better solution.
 150 
 151                                 It sounds like ll_rw_swap_file defined
 152                                 it operation size (sector size) based on
 153                                 PAGE_SIZE and the number of block to read.
 154                                 So using bmap or smap should work even if
 155                                 smap will require more blocks.
 156                         */
 157                         int j;
 158                         unsigned int block = offset << 3;
 159 
 160                         for (i=0, j=0; j< PAGE_SIZE ; i++, j += 512){
 161                                 if (!(zones[i] = swapf->i_op->smap(swapf,block++))) {
 162                                         printk("rw_swap_page: bad swap file\n");
 163                                         return;
 164                                 }
 165                         }
 166                 }else{
 167                         int j;
 168                         unsigned int block = offset
 169                                 << (12 - swapf->i_sb->s_blocksize_bits);
 170 
 171                         for (i=0, j=0; j< PAGE_SIZE ; i++, j +=swapf->i_sb->s_blocksize)
 172                                 if (!(zones[i] = bmap(swapf,block++))) {
 173                                         printk("rw_swap_page: bad swap file\n");
 174                                         return;
 175                                 }
 176                 }
 177                 ll_rw_swap_file(rw,swapf->i_dev, zones, i,buf);
 178         } else
 179                 printk("re_swap_page: no swap file or device\n");
 180         if (offset && !clear_bit(offset,p->swap_lockmap))
 181                 printk("rw_swap_page: lock already cleared\n");
 182         wake_up(&lock_queue);
 183 }
 184 
 185 unsigned int get_swap_page(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 186 {
 187         struct swap_info_struct * p;
 188         unsigned int offset, type;
 189 
 190         p = swap_info;
 191         for (type = 0 ; type < nr_swapfiles ; type++,p++) {
 192                 if ((p->flags & SWP_WRITEOK) != SWP_WRITEOK)
 193                         continue;
 194                 for (offset = p->lowest_bit; offset <= p->highest_bit ; offset++) {
 195                         if (p->swap_map[offset])
 196                                 continue;
 197                         p->swap_map[offset] = 1;
 198                         nr_swap_pages--;
 199                         if (offset == p->highest_bit)
 200                                 p->highest_bit--;
 201                         p->lowest_bit = offset;
 202                         return SWP_ENTRY(type,offset);
 203                 }
 204         }
 205         return 0;
 206 }
 207 
 208 void swap_duplicate(unsigned long entry)
     /* [previous][next][first][last][top][bottom][index][help] */
 209 {
 210         struct swap_info_struct * p;
 211         unsigned long offset, type;
 212 
 213         if (!entry)
 214                 return;
 215         offset = SWP_OFFSET(entry);
 216         type = SWP_TYPE(entry);
 217         if (type == SHM_SWP_TYPE)
 218                 return;
 219         if (type >= nr_swapfiles) {
 220                 printk("Trying to duplicate nonexistent swap-page\n");
 221                 return;
 222         }
 223         p = type + swap_info;
 224         if (offset >= p->max) {
 225                 printk("swap_duplicate: weirdness\n");
 226                 return;
 227         }
 228         if (!p->swap_map[offset]) {
 229                 printk("swap_duplicate: trying to duplicate unused page\n");
 230                 return;
 231         }
 232         p->swap_map[offset]++;
 233         return;
 234 }
 235 
 236 void swap_free(unsigned long entry)
     /* [previous][next][first][last][top][bottom][index][help] */
 237 {
 238         struct swap_info_struct * p;
 239         unsigned long offset, type;
 240 
 241         if (!entry)
 242                 return;
 243         type = SWP_TYPE(entry);
 244         if (type == SHM_SWP_TYPE)
 245                 return;
 246         if (type >= nr_swapfiles) {
 247                 printk("Trying to free nonexistent swap-page\n");
 248                 return;
 249         }
 250         p = & swap_info[type];
 251         offset = SWP_OFFSET(entry);
 252         if (offset >= p->max) {
 253                 printk("swap_free: weirdness\n");
 254                 return;
 255         }
 256         if (!(p->flags & SWP_USED)) {
 257                 printk("Trying to free swap from unused swap-device\n");
 258                 return;
 259         }
 260         while (set_bit(offset,p->swap_lockmap))
 261                 sleep_on(&lock_queue);
 262         if (offset < p->lowest_bit)
 263                 p->lowest_bit = offset;
 264         if (offset > p->highest_bit)
 265                 p->highest_bit = offset;
 266         if (!p->swap_map[offset])
 267                 printk("swap_free: swap-space map bad (entry %08lx)\n",entry);
 268         else
 269                 if (!--p->swap_map[offset])
 270                         nr_swap_pages++;
 271         if (!clear_bit(offset,p->swap_lockmap))
 272                 printk("swap_free: lock already cleared\n");
 273         wake_up(&lock_queue);
 274 }
 275 
 276 /*
 277  * The tests may look silly, but it essentially makes sure that
 278  * no other process did a swap-in on us just as we were waiting.
 279  *
 280  * Also, don't bother to add to the swap cache if this page-in
 281  * was due to a write access.
 282  */
 283 void swap_in(struct vm_area_struct * vma, pte_t * page_table,
     /* [previous][next][first][last][top][bottom][index][help] */
 284         unsigned long entry, int write_access)
 285 {
 286         unsigned long page = get_free_page(GFP_KERNEL);
 287 
 288         if (pte_val(*page_table) != entry) {
 289                 free_page(page);
 290                 return;
 291         }
 292         if (!page) {
 293                 *page_table = BAD_PAGE;
 294                 swap_free(entry);
 295                 oom(current);
 296                 return;
 297         }
 298         read_swap_page(entry, (char *) page);
 299         if (pte_val(*page_table) != entry) {
 300                 free_page(page);
 301                 return;
 302         }
 303         vma->vm_task->mm->rss++;
 304         vma->vm_task->mm->maj_flt++;
 305         if (!write_access && add_to_swap_cache(page, entry)) {
 306                 *page_table = mk_pte(page, vma->vm_page_prot);
 307                 return;
 308         }
 309         *page_table = pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
 310         swap_free(entry);
 311         return;
 312 }
 313 
 314 static inline int try_to_swap_out(struct vm_area_struct* vma, unsigned offset, pte_t * page_table)
     /* [previous][next][first][last][top][bottom][index][help] */
 315 {
 316         pte_t pte;
 317         unsigned long entry;
 318         unsigned long page;
 319 
 320         pte = *page_table;
 321         if (!pte_present(pte))
 322                 return 0;
 323         page = pte_page(pte);
 324         if (page >= high_memory)
 325                 return 0;
 326         if (mem_map[MAP_NR(page)] & MAP_PAGE_RESERVED)
 327                 return 0;
 328         if ((pte_dirty(pte) && delete_from_swap_cache(page)) || pte_young(pte))  {
 329                 *page_table = pte_mkold(pte);
 330                 return 0;
 331         }       
 332         if (pte_dirty(pte)) {
 333                 if (mem_map[MAP_NR(page)] != 1)
 334                         return 0;
 335                 if (vma->vm_ops && vma->vm_ops->swapout)
 336                         vma->vm_ops->swapout(vma, offset, page_table);
 337                 else {
 338                         if (!(entry = get_swap_page()))
 339                                 return 0;
 340                         pte_val(*page_table) = entry;
 341                         invalidate();
 342                         write_swap_page(entry, (char *) page);
 343                 }
 344                 free_page(page);
 345                 return 1 + mem_map[MAP_NR(page)];
 346         }
 347         if ((entry = find_in_swap_cache(page)))  {
 348                 if (mem_map[MAP_NR(page)] != 1) {
 349                         *page_table = pte_mkdirty(pte);
 350                         printk("Aiee.. duplicated cached swap-cache entry\n");
 351                         return 0;
 352                 }
 353                 pte_val(*page_table) = entry;
 354                 invalidate();
 355                 free_page(page);
 356                 return 1;
 357         } 
 358         pte_clear(page_table);
 359         invalidate();
 360         free_page(page);
 361         return 1 + mem_map[MAP_NR(page)];
 362 }
 363 
 364 /*
 365  * A new implementation of swap_out().  We do not swap complete processes,
 366  * but only a small number of blocks, before we continue with the next
 367  * process.  The number of blocks actually swapped is determined on the
 368  * number of page faults, that this process actually had in the last time,
 369  * so we won't swap heavily used processes all the time ...
 370  *
 371  * Note: the priority argument is a hint on much CPU to waste with the
 372  *       swap block search, not a hint, of how much blocks to swap with
 373  *       each process.
 374  *
 375  * (C) 1993 Kai Petzke, wpp@marie.physik.tu-berlin.de
 376  */
 377 
 378 /*
 379  * These are the minimum and maximum number of pages to swap from one process,
 380  * before proceeding to the next:
 381  */
 382 #define SWAP_MIN        4
 383 #define SWAP_MAX        32
 384 
 385 /*
 386  * The actual number of pages to swap is determined as:
 387  * SWAP_RATIO / (number of recent major page faults)
 388  */
 389 #define SWAP_RATIO      128
 390 
 391 static inline int swap_out_pmd(struct vm_area_struct * vma, pmd_t *dir,
     /* [previous][next][first][last][top][bottom][index][help] */
 392         unsigned long address, unsigned long size, unsigned long offset)
 393 {
 394         pte_t * pte;
 395         unsigned long end;
 396 
 397         if (pmd_none(*dir))
 398                 return 0;
 399         if (pmd_bad(*dir)) {
 400                 printk("swap_out_pmd: bad pmd (%08lx)\n", pmd_val(*dir));
 401                 pmd_clear(dir);
 402                 return 0;
 403         }
 404         pte = pte_offset(dir, address);
 405         offset += address & PMD_MASK;
 406         address &= ~PMD_MASK;
 407         end = address + size;
 408         if (end > PMD_SIZE)
 409                 end = PMD_SIZE;
 410         do {
 411                 switch (try_to_swap_out(vma, offset+address-vma->vm_start, pte)) {
 412                         case 0:
 413                                 break;
 414 
 415                         case 1:
 416                                 vma->vm_task->mm->rss--;
 417                                 /* continue with the following page the next time */
 418                                 vma->vm_task->mm->swap_address = address + offset + PAGE_SIZE;
 419                                 return 1;
 420 
 421                         default:
 422                                 vma->vm_task->mm->rss--;
 423                                 break;
 424                 }
 425                 address += PAGE_SIZE;
 426                 pte++;
 427         } while (address < end);
 428         return 0;
 429 }
 430 
 431 static inline int swap_out_pgd(struct vm_area_struct * vma, pgd_t *dir,
     /* [previous][next][first][last][top][bottom][index][help] */
 432         unsigned long address, unsigned long size)
 433 {
 434         pmd_t * pmd;
 435         unsigned long offset, end;
 436 
 437         if (pgd_none(*dir))
 438                 return 0;
 439         if (pgd_bad(*dir)) {
 440                 printk("swap_out_pgd: bad pgd (%08lx)\n", pgd_val(*dir));
 441                 pgd_clear(dir);
 442                 return 0;
 443         }
 444         pmd = pmd_offset(dir, address);
 445         offset = address & PGDIR_MASK;
 446         address &= ~PGDIR_MASK;
 447         end = address + size;
 448         if (end > PGDIR_SIZE)
 449                 end = PGDIR_SIZE;
 450         do {
 451                 if (swap_out_pmd(vma, pmd, address, end - address, offset))
 452                         return 1;
 453                 address = (address + PMD_SIZE) & PMD_MASK;
 454                 pmd++;
 455         } while (address < end);
 456         return 0;
 457 }
 458 
 459 static int swap_out_vma(struct vm_area_struct * vma, pgd_t *pgdir,
     /* [previous][next][first][last][top][bottom][index][help] */
 460         unsigned long start, unsigned long end)
 461 {
 462         while (start < end) {
 463                 if (swap_out_pgd(vma, pgdir, start, end - start))
 464                         return 1;
 465                 start = (start + PGDIR_SIZE) & PGDIR_MASK;
 466                 pgdir++;
 467         }
 468         return 0;
 469 }
 470 
 471 static int swap_out_process(struct task_struct * p)
     /* [previous][next][first][last][top][bottom][index][help] */
 472 {
 473         unsigned long address;
 474         struct vm_area_struct* vma;
 475 
 476         /*
 477          * Go through process' page directory.
 478          */
 479         address = p->mm->swap_address;
 480         p->mm->swap_address = 0;
 481 
 482         /*
 483          * Find the proper vm-area
 484          */
 485         vma = find_vma(p, address);
 486         if (!vma)
 487                 return 0;
 488         if (address < vma->vm_start)
 489                 address = vma->vm_start;
 490 
 491         for (;;) {
 492                 if (swap_out_vma(vma, pgd_offset(p, address), address, vma->vm_end))
 493                         return 1;
 494                 vma = vma->vm_next;
 495                 if (!vma)
 496                         return 0;
 497                 address = vma->vm_start;
 498         }
 499 }
 500 
 501 static int swap_out(unsigned int priority)
     /* [previous][next][first][last][top][bottom][index][help] */
 502 {
 503         static int swap_task;
 504         int loop;
 505         int counter = NR_TASKS * 2 >> priority;
 506         struct task_struct *p;
 507 
 508         counter = NR_TASKS * 2 >> priority;
 509         for(; counter >= 0; counter--, swap_task++) {
 510                 /*
 511                  * Check that swap_task is suitable for swapping.  If not, look for
 512                  * the next suitable process.
 513                  */
 514                 loop = 0;
 515                 while(1) {
 516                         if (swap_task >= NR_TASKS) {
 517                                 swap_task = 1;
 518                                 if (loop)
 519                                         /* all processes are unswappable or already swapped out */
 520                                         return 0;
 521                                 loop = 1;
 522                         }
 523 
 524                         p = task[swap_task];
 525                         if (p && p->mm->swappable && p->mm->rss)
 526                                 break;
 527 
 528                         swap_task++;
 529                 }
 530 
 531                 /*
 532                  * Determine the number of pages to swap from this process.
 533                  */
 534                 if (!p->mm->swap_cnt) {
 535                         p->mm->dec_flt = (p->mm->dec_flt * 3) / 4 + p->mm->maj_flt - p->mm->old_maj_flt;
 536                         p->mm->old_maj_flt = p->mm->maj_flt;
 537 
 538                         if (p->mm->dec_flt >= SWAP_RATIO / SWAP_MIN) {
 539                                 p->mm->dec_flt = SWAP_RATIO / SWAP_MIN;
 540                                 p->mm->swap_cnt = SWAP_MIN;
 541                         } else if (p->mm->dec_flt <= SWAP_RATIO / SWAP_MAX)
 542                                 p->mm->swap_cnt = SWAP_MAX;
 543                         else
 544                                 p->mm->swap_cnt = SWAP_RATIO / p->mm->dec_flt;
 545                 }
 546                 if (swap_out_process(p)) {
 547                         if ((--p->mm->swap_cnt) == 0)
 548                                 swap_task++;
 549                         return 1;
 550                 }
 551         }
 552         return 0;
 553 }
 554 
 555 static int try_to_free_page(int priority)
     /* [previous][next][first][last][top][bottom][index][help] */
 556 {
 557         int i=6;
 558 
 559         while (i--) {
 560                 if (priority != GFP_NOBUFFER && shrink_buffers(i))
 561                         return 1;
 562                 if (shm_swap(i))
 563                         return 1;
 564                 if (swap_out(i))
 565                         return 1;
 566         }
 567         return 0;
 568 }
 569 
 570 static inline void add_mem_queue(struct mem_list * head, struct mem_list * entry)
     /* [previous][next][first][last][top][bottom][index][help] */
 571 {
 572         entry->prev = head;
 573         (entry->next = head->next)->prev = entry;
 574         head->next = entry;
 575 }
 576 
 577 static inline void remove_mem_queue(struct mem_list * head, struct mem_list * entry)
     /* [previous][next][first][last][top][bottom][index][help] */
 578 {
 579         entry->next->prev = entry->prev;
 580         entry->prev->next = entry->next;
 581 }
 582 
 583 /*
 584  * Free_page() adds the page to the free lists. This is optimized for
 585  * fast normal cases (no error jumps taken normally).
 586  *
 587  * The way to optimize jumps for gcc-2.2.2 is to:
 588  *  - select the "normal" case and put it inside the if () { XXX }
 589  *  - no else-statements if you can avoid them
 590  *
 591  * With the above two rules, you get a straight-line execution path
 592  * for the normal case, giving better asm-code.
 593  *
 594  * free_page() may sleep since the page being freed may be a buffer
 595  * page or present in the swap cache. It will not sleep, however,
 596  * for a freshly allocated page (get_free_page()).
 597  */
 598 
 599 /*
 600  * Buddy system. Hairy. You really aren't expected to understand this
 601  */
 602 static inline void free_pages_ok(unsigned long addr, unsigned long order)
     /* [previous][next][first][last][top][bottom][index][help] */
 603 {
 604         unsigned long index = MAP_NR(addr) >> (1 + order);
 605         unsigned long mask = PAGE_MASK << order;
 606 
 607         addr &= mask;
 608         nr_free_pages += 1 << order;
 609         while (order < NR_MEM_LISTS-1) {
 610                 if (!change_bit(index, free_area_map[order]))
 611                         break;
 612                 remove_mem_queue(free_area_list+order, (struct mem_list *) (addr ^ (1+~mask)));
 613                 order++;
 614                 index >>= 1;
 615                 mask <<= 1;
 616                 addr &= mask;
 617         }
 618         add_mem_queue(free_area_list+order, (struct mem_list *) addr);
 619 }
 620 
 621 static inline void check_free_buffers(unsigned long addr)
     /* [previous][next][first][last][top][bottom][index][help] */
 622 {
 623         struct buffer_head * bh;
 624 
 625         bh = buffer_pages[MAP_NR(addr)];
 626         if (bh) {
 627                 struct buffer_head *tmp = bh;
 628                 do {
 629                         if (tmp->b_list == BUF_SHARED && tmp->b_dev != 0xffff)
 630                                 refile_buffer(tmp);
 631                         tmp = tmp->b_this_page;
 632                 } while (tmp != bh);
 633         }
 634 }
 635 
 636 void free_pages(unsigned long addr, unsigned long order)
     /* [previous][next][first][last][top][bottom][index][help] */
 637 {
 638         if (addr < high_memory) {
 639                 unsigned long flag;
 640                 mem_map_t * map = mem_map + MAP_NR(addr);
 641                 if (*map) {
 642                         if (!(*map & MAP_PAGE_RESERVED)) {
 643                                 save_flags(flag);
 644                                 cli();
 645                                 if (!--*map)  {
 646                                         free_pages_ok(addr, order);
 647                                         delete_from_swap_cache(addr);
 648                                 }
 649                                 restore_flags(flag);
 650                                 if (*map == 1)
 651                                         check_free_buffers(addr);
 652                         }
 653                         return;
 654                 }
 655                 printk("Trying to free free memory (%08lx): memory probably corrupted\n",addr);
 656                 printk("PC = %p\n", __builtin_return_address(0));
 657                 return;
 658         }
 659 }
 660 
 661 /*
 662  * Some ugly macros to speed up __get_free_pages()..
 663  */
 664 #define RMQUEUE(order) \
 665 do { struct mem_list * queue = free_area_list+order; \
 666      unsigned long new_order = order; \
 667         do { struct mem_list *next = queue->next; \
 668                 if (queue != next) { \
 669                         (queue->next = next->next)->prev = queue; \
 670                         mark_used((unsigned long) next, new_order); \
 671                         nr_free_pages -= 1 << order; \
 672                         restore_flags(flags); \
 673                         EXPAND(next, order, new_order); \
 674                         return (unsigned long) next; \
 675                 } new_order++; queue++; \
 676         } while (new_order < NR_MEM_LISTS); \
 677 } while (0)
 678 
 679 static inline int mark_used(unsigned long addr, unsigned long order)
     /* [previous][next][first][last][top][bottom][index][help] */
 680 {
 681         return change_bit(MAP_NR(addr) >> (1+order), free_area_map[order]);
 682 }
 683 
 684 #define EXPAND(addr,low,high) \
 685 do { unsigned long size = PAGE_SIZE << high; \
 686         while (high > low) { \
 687                 high--; size >>= 1; cli(); \
 688                 add_mem_queue(free_area_list+high, addr); \
 689                 mark_used((unsigned long) addr, high); \
 690                 restore_flags(flags); \
 691                 addr = (struct mem_list *) (size + (unsigned long) addr); \
 692         } mem_map[MAP_NR((unsigned long) addr)] = 1; \
 693 } while (0)
 694 
 695 unsigned long __get_free_pages(int priority, unsigned long order)
     /* [previous][next][first][last][top][bottom][index][help] */
 696 {
 697         unsigned long flags;
 698         int reserved_pages;
 699 
 700         if (intr_count && priority != GFP_ATOMIC) {
 701                 static int count = 0;
 702                 if (++count < 5) {
 703                         printk("gfp called nonatomically from interrupt %p\n",
 704                                 __builtin_return_address(0));
 705                         priority = GFP_ATOMIC;
 706                 }
 707         }
 708         reserved_pages = 5;
 709         if (priority != GFP_NFS)
 710                 reserved_pages = min_free_pages;
 711         save_flags(flags);
 712 repeat:
 713         cli();
 714         if ((priority==GFP_ATOMIC) || nr_free_pages > reserved_pages) {
 715                 RMQUEUE(order);
 716                 restore_flags(flags);
 717                 return 0;
 718         }
 719         restore_flags(flags);
 720         if (priority != GFP_BUFFER && try_to_free_page(priority))
 721                 goto repeat;
 722         return 0;
 723 }
 724 
 725 /*
 726  * Yes, I know this is ugly. Don't tell me.
 727  */
 728 unsigned long __get_dma_pages(int priority, unsigned long order)
     /* [previous][next][first][last][top][bottom][index][help] */
 729 {
 730         unsigned long list = 0;
 731         unsigned long result;
 732         unsigned long limit = MAX_DMA_ADDRESS;
 733 
 734         /* if (EISA_bus) limit = ~0UL; */
 735         if (priority != GFP_ATOMIC)
 736                 priority = GFP_BUFFER;
 737         for (;;) {
 738                 result = __get_free_pages(priority, order);
 739                 if (result < limit) /* covers failure as well */
 740                         break;
 741                 *(unsigned long *) result = list;
 742                 list = result;
 743         }
 744         while (list) {
 745                 unsigned long tmp = list;
 746                 list = *(unsigned long *) list;
 747                 free_pages(tmp, order);
 748         }
 749         return result;
 750 }
 751 
 752 /*
 753  * Show free area list (used inside shift_scroll-lock stuff)
 754  * We also calculate the percentage fragmentation. We do this by counting the
 755  * memory on each free list with the exception of the first item on the list.
 756  */
 757 void show_free_areas(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 758 {
 759         unsigned long order, flags;
 760         unsigned long total = 0;
 761 
 762         printk("Free pages:      %6dkB\n ( ",nr_free_pages<<(PAGE_SHIFT-10));
 763         save_flags(flags);
 764         cli();
 765         for (order=0 ; order < NR_MEM_LISTS; order++) {
 766                 struct mem_list * tmp;
 767                 unsigned long nr = 0;
 768                 for (tmp = free_area_list[order].next ; tmp != free_area_list + order ; tmp = tmp->next) {
 769                         nr ++;
 770                 }
 771                 total += nr * (4 << order);
 772                 printk("%lu*%ukB ", nr, 4 << order);
 773         }
 774         restore_flags(flags);
 775         printk("= %lukB)\n", total);
 776 #ifdef SWAP_CACHE_INFO
 777         show_swap_cache_info();
 778 #endif  
 779 }
 780 
 781 /*
 782  * Trying to stop swapping from a file is fraught with races, so
 783  * we repeat quite a bit here when we have to pause. swapoff()
 784  * isn't exactly timing-critical, so who cares (but this is /really/
 785  * inefficient, ugh).
 786  *
 787  * We return 1 after having slept, which makes the process start over
 788  * from the beginning for this process..
 789  */
 790 static inline int unuse_pte(struct vm_area_struct * vma, unsigned long address,
     /* [previous][next][first][last][top][bottom][index][help] */
 791         pte_t *dir, unsigned int type, unsigned long page)
 792 {
 793         pte_t pte = *dir;
 794 
 795         if (pte_none(pte))
 796                 return 0;
 797         if (pte_present(pte)) {
 798                 unsigned long page = pte_page(pte);
 799                 if (page >= high_memory)
 800                         return 0;
 801                 if (!in_swap_cache(page))
 802                         return 0;
 803                 if (SWP_TYPE(in_swap_cache(page)) != type)
 804                         return 0;
 805                 delete_from_swap_cache(page);
 806                 *dir = pte_mkdirty(pte);
 807                 return 0;
 808         }
 809         if (SWP_TYPE(pte_val(pte)) != type)
 810                 return 0;
 811         read_swap_page(pte_val(pte), (char *) page);
 812         if (pte_val(*dir) != pte_val(pte)) {
 813                 free_page(page);
 814                 return 1;
 815         }
 816         *dir = pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
 817         ++vma->vm_task->mm->rss;
 818         swap_free(pte_val(pte));
 819         return 1;
 820 }
 821 
 822 static inline int unuse_pmd(struct vm_area_struct * vma, pmd_t *dir,
     /* [previous][next][first][last][top][bottom][index][help] */
 823         unsigned long address, unsigned long size, unsigned long offset,
 824         unsigned int type, unsigned long page)
 825 {
 826         pte_t * pte;
 827         unsigned long end;
 828 
 829         if (pmd_none(*dir))
 830                 return 0;
 831         if (pmd_bad(*dir)) {
 832                 printk("unuse_pmd: bad pmd (%08lx)\n", pmd_val(*dir));
 833                 pmd_clear(dir);
 834                 return 0;
 835         }
 836         pte = pte_offset(dir, address);
 837         offset += address & PMD_MASK;
 838         address &= ~PMD_MASK;
 839         end = address + size;
 840         if (end > PMD_SIZE)
 841                 end = PMD_SIZE;
 842         do {
 843                 if (unuse_pte(vma, offset+address-vma->vm_start, pte, type, page))
 844                         return 1;
 845                 address += PAGE_SIZE;
 846                 pte++;
 847         } while (address < end);
 848         return 0;
 849 }
 850 
 851 static inline int unuse_pgd(struct vm_area_struct * vma, pgd_t *dir,
     /* [previous][next][first][last][top][bottom][index][help] */
 852         unsigned long address, unsigned long size,
 853         unsigned int type, unsigned long page)
 854 {
 855         pmd_t * pmd;
 856         unsigned long offset, end;
 857 
 858         if (pgd_none(*dir))
 859                 return 0;
 860         if (pgd_bad(*dir)) {
 861                 printk("unuse_pgd: bad pgd (%08lx)\n", pgd_val(*dir));
 862                 pgd_clear(dir);
 863                 return 0;
 864         }
 865         pmd = pmd_offset(dir, address);
 866         offset = address & PGDIR_MASK;
 867         address &= ~PGDIR_MASK;
 868         end = address + size;
 869         if (end > PGDIR_SIZE)
 870                 end = PGDIR_SIZE;
 871         do {
 872                 if (unuse_pmd(vma, pmd, address, end - address, offset, type, page))
 873                         return 1;
 874                 address = (address + PMD_SIZE) & PMD_MASK;
 875                 pmd++;
 876         } while (address < end);
 877         return 0;
 878 }
 879 
 880 static int unuse_vma(struct vm_area_struct * vma, pgd_t *pgdir,
     /* [previous][next][first][last][top][bottom][index][help] */
 881         unsigned long start, unsigned long end,
 882         unsigned int type, unsigned long page)
 883 {
 884         while (start < end) {
 885                 if (unuse_pgd(vma, pgdir, start, end - start, type, page))
 886                         return 1;
 887                 start = (start + PGDIR_SIZE) & PGDIR_MASK;
 888                 pgdir++;
 889         }
 890         return 0;
 891 }
 892 
 893 static int unuse_process(struct task_struct * p, unsigned int type, unsigned long page)
     /* [previous][next][first][last][top][bottom][index][help] */
 894 {
 895         struct vm_area_struct* vma;
 896 
 897         /*
 898          * Go through process' page directory.
 899          */
 900         vma = p->mm->mmap;
 901         while (vma) {
 902                 pgd_t * pgd = pgd_offset(p, vma->vm_start);
 903                 if (unuse_vma(vma, pgd, vma->vm_start, vma->vm_end, type, page))
 904                         return 1;
 905                 vma = vma->vm_next;
 906         }
 907         return 0;
 908 }
 909 
 910 /*
 911  * To avoid races, we repeat for each process after having
 912  * swapped something in. That gets rid of a few pesky races,
 913  * and "swapoff" isn't exactly timing critical.
 914  */
 915 static int try_to_unuse(unsigned int type)
     /* [previous][next][first][last][top][bottom][index][help] */
 916 {
 917         int nr;
 918         unsigned long page = get_free_page(GFP_KERNEL);
 919 
 920         if (!page)
 921                 return -ENOMEM;
 922         nr = 0;
 923         while (nr < NR_TASKS) {
 924                 if (task[nr]) {
 925                         if (unuse_process(task[nr], type, page)) {
 926                                 page = get_free_page(GFP_KERNEL);
 927                                 if (!page)
 928                                         return -ENOMEM;
 929                                 continue;
 930                         }
 931                 }
 932                 nr++;
 933         }
 934         free_page(page);
 935         return 0;
 936 }
 937 
 938 asmlinkage int sys_swapoff(const char * specialfile)
     /* [previous][next][first][last][top][bottom][index][help] */
 939 {
 940         struct swap_info_struct * p;
 941         struct inode * inode;
 942         unsigned int type;
 943         struct file filp;
 944         int i;
 945 
 946         if (!suser())
 947                 return -EPERM;
 948         i = namei(specialfile,&inode);
 949         if (i)
 950                 return i;
 951         p = swap_info;
 952         for (type = 0 ; type < nr_swapfiles ; type++,p++) {
 953                 if ((p->flags & SWP_WRITEOK) != SWP_WRITEOK)
 954                         continue;
 955                 if (p->swap_file) {
 956                         if (p->swap_file == inode)
 957                                 break;
 958                 } else {
 959                         if (!S_ISBLK(inode->i_mode))
 960                                 continue;
 961                         if (p->swap_device == inode->i_rdev)
 962                                 break;
 963                 }
 964         }
 965 
 966         if (type >= nr_swapfiles){
 967                 iput(inode);
 968                 return -EINVAL;
 969         }
 970         p->flags = SWP_USED;
 971         i = try_to_unuse(type);
 972         if (i) {
 973                 iput(inode);
 974                 p->flags = SWP_WRITEOK;
 975                 return i;
 976         }
 977 
 978         if(p->swap_device){
 979                 memset(&filp, 0, sizeof(filp));         
 980                 filp.f_inode = inode;
 981                 filp.f_mode = 3; /* read write */
 982                 /* open it again to get fops */
 983                 if( !blkdev_open(inode, &filp) &&
 984                    filp.f_op && filp.f_op->release){
 985                         filp.f_op->release(inode,&filp);
 986                         filp.f_op->release(inode,&filp);
 987                 }
 988         }
 989         iput(inode);
 990 
 991         nr_swap_pages -= p->pages;
 992         iput(p->swap_file);
 993         p->swap_file = NULL;
 994         p->swap_device = 0;
 995         vfree(p->swap_map);
 996         p->swap_map = NULL;
 997         free_page((long) p->swap_lockmap);
 998         p->swap_lockmap = NULL;
 999         p->flags = 0;
1000         return 0;
1001 }
1002 
1003 /*
1004  * Written 01/25/92 by Simmule Turner, heavily changed by Linus.
1005  *
1006  * The swapon system call
1007  */
1008 asmlinkage int sys_swapon(const char * specialfile)
     /* [previous][next][first][last][top][bottom][index][help] */
1009 {
1010         struct swap_info_struct * p;
1011         struct inode * swap_inode;
1012         unsigned int type;
1013         int i,j;
1014         int error;
1015         struct file filp;
1016 
1017         memset(&filp, 0, sizeof(filp));
1018         if (!suser())
1019                 return -EPERM;
1020         p = swap_info;
1021         for (type = 0 ; type < nr_swapfiles ; type++,p++)
1022                 if (!(p->flags & SWP_USED))
1023                         break;
1024         if (type >= MAX_SWAPFILES)
1025                 return -EPERM;
1026         if (type >= nr_swapfiles)
1027                 nr_swapfiles = type+1;
1028         p->flags = SWP_USED;
1029         p->swap_file = NULL;
1030         p->swap_device = 0;
1031         p->swap_map = NULL;
1032         p->swap_lockmap = NULL;
1033         p->lowest_bit = 0;
1034         p->highest_bit = 0;
1035         p->max = 1;
1036         error = namei(specialfile,&swap_inode);
1037         if (error)
1038                 goto bad_swap_2;
1039         p->swap_file = swap_inode;
1040         error = -EBUSY;
1041         if (swap_inode->i_count != 1)
1042                 goto bad_swap_2;
1043         error = -EINVAL;
1044 
1045         if (S_ISBLK(swap_inode->i_mode)) {
1046                 p->swap_device = swap_inode->i_rdev;
1047 
1048                 filp.f_inode = swap_inode;
1049                 filp.f_mode = 3; /* read write */
1050                 error = blkdev_open(swap_inode, &filp);
1051                 p->swap_file = NULL;
1052                 iput(swap_inode);
1053                 if(error)
1054                         goto bad_swap_2;
1055                 error = -ENODEV;
1056                 if (!p->swap_device)
1057                         goto bad_swap;
1058                 error = -EBUSY;
1059                 for (i = 0 ; i < nr_swapfiles ; i++) {
1060                         if (i == type)
1061                                 continue;
1062                         if (p->swap_device == swap_info[i].swap_device)
1063                                 goto bad_swap;
1064                 }
1065         } else if (!S_ISREG(swap_inode->i_mode))
1066                 goto bad_swap;
1067         p->swap_lockmap = (unsigned char *) get_free_page(GFP_USER);
1068         if (!p->swap_lockmap) {
1069                 printk("Unable to start swapping: out of memory :-)\n");
1070                 error = -ENOMEM;
1071                 goto bad_swap;
1072         }
1073         read_swap_page(SWP_ENTRY(type,0), (char *) p->swap_lockmap);
1074         if (memcmp("SWAP-SPACE",p->swap_lockmap+4086,10)) {
1075                 printk("Unable to find swap-space signature\n");
1076                 error = -EINVAL;
1077                 goto bad_swap;
1078         }
1079         memset(p->swap_lockmap+PAGE_SIZE-10,0,10);
1080         j = 0;
1081         p->lowest_bit = 0;
1082         p->highest_bit = 0;
1083         for (i = 1 ; i < 8*PAGE_SIZE ; i++) {
1084                 if (test_bit(i,p->swap_lockmap)) {
1085                         if (!p->lowest_bit)
1086                                 p->lowest_bit = i;
1087                         p->highest_bit = i;
1088                         p->max = i+1;
1089                         j++;
1090                 }
1091         }
1092         if (!j) {
1093                 printk("Empty swap-file\n");
1094                 error = -EINVAL;
1095                 goto bad_swap;
1096         }
1097         p->swap_map = (unsigned char *) vmalloc(p->max);
1098         if (!p->swap_map) {
1099                 error = -ENOMEM;
1100                 goto bad_swap;
1101         }
1102         for (i = 1 ; i < p->max ; i++) {
1103                 if (test_bit(i,p->swap_lockmap))
1104                         p->swap_map[i] = 0;
1105                 else
1106                         p->swap_map[i] = 0x80;
1107         }
1108         p->swap_map[0] = 0x80;
1109         memset(p->swap_lockmap,0,PAGE_SIZE);
1110         p->flags = SWP_WRITEOK;
1111         p->pages = j;
1112         nr_swap_pages += j;
1113         printk("Adding Swap: %dk swap-space\n",j<<2);
1114         return 0;
1115 bad_swap:
1116         if(filp.f_op && filp.f_op->release)
1117                 filp.f_op->release(filp.f_inode,&filp);
1118 bad_swap_2:
1119         free_page((long) p->swap_lockmap);
1120         vfree(p->swap_map);
1121         iput(p->swap_file);
1122         p->swap_device = 0;
1123         p->swap_file = NULL;
1124         p->swap_map = NULL;
1125         p->swap_lockmap = NULL;
1126         p->flags = 0;
1127         return error;
1128 }
1129 
1130 void si_swapinfo(struct sysinfo *val)
     /* [previous][next][first][last][top][bottom][index][help] */
1131 {
1132         unsigned int i, j;
1133 
1134         val->freeswap = val->totalswap = 0;
1135         for (i = 0; i < nr_swapfiles; i++) {
1136                 if ((swap_info[i].flags & SWP_WRITEOK) != SWP_WRITEOK)
1137                         continue;
1138                 for (j = 0; j < swap_info[i].max; ++j)
1139                         switch (swap_info[i].swap_map[j]) {
1140                                 case 128:
1141                                         continue;
1142                                 case 0:
1143                                         ++val->freeswap;
1144                                 default:
1145                                         ++val->totalswap;
1146                         }
1147         }
1148         val->freeswap <<= PAGE_SHIFT;
1149         val->totalswap <<= PAGE_SHIFT;
1150         return;
1151 }
1152 
1153 /*
1154  * set up the free-area data structures:
1155  *   - mark all pages MAP_PAGE_RESERVED
1156  *   - mark all memory queues empty
1157  *   - clear the memory bitmaps
1158  */
1159 unsigned long free_area_init(unsigned long start_mem, unsigned long end_mem)
     /* [previous][next][first][last][top][bottom][index][help] */
1160 {
1161         mem_map_t * p;
1162         unsigned long mask = PAGE_MASK;
1163         int i;
1164 
1165         /*
1166          * select nr of pages we try to keep free for important stuff
1167          * with a minimum of 16 pages. This is totally arbitrary
1168          */
1169         i = (end_mem - PAGE_OFFSET) >> (PAGE_SHIFT+6);
1170         if (i < 16)
1171                 i = 16;
1172         min_free_pages = i;
1173         start_mem = init_swap_cache(start_mem, end_mem);
1174         mem_map = (mem_map_t *) start_mem;
1175         p = mem_map + MAP_NR(end_mem);
1176         start_mem = (unsigned long) p;
1177         while (p > mem_map)
1178                 *--p = MAP_PAGE_RESERVED;
1179 
1180         for (i = 0 ; i < NR_MEM_LISTS ; i++) {
1181                 unsigned long bitmap_size;
1182                 free_area_list[i].prev = free_area_list[i].next = &free_area_list[i];
1183                 mask += mask;
1184                 end_mem = (end_mem + ~mask) & mask;
1185                 bitmap_size = (end_mem - PAGE_OFFSET) >> (PAGE_SHIFT + i);
1186                 bitmap_size = (bitmap_size + 7) >> 3;
1187                 bitmap_size = (bitmap_size + sizeof(unsigned long) - 1) & ~(sizeof(unsigned long)-1);
1188                 free_area_map[i] = (unsigned char *) start_mem;
1189                 memset((void *) start_mem, 0, bitmap_size);
1190                 start_mem += bitmap_size;
1191         }
1192         return start_mem;
1193 }

/* [previous][next][first][last][top][bottom][index][help] */