root/mm/swap.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. show_swap_cache_info
  2. add_to_swap_cache
  3. init_swap_cache
  4. rw_swap_page
  5. get_swap_page
  6. swap_duplicate
  7. swap_free
  8. swap_in
  9. try_to_swap_out
  10. swap_out_pmd
  11. swap_out_pgd
  12. swap_out_vma
  13. swap_out_process
  14. swap_out
  15. try_to_free_page
  16. add_mem_queue
  17. remove_mem_queue
  18. free_pages_ok
  19. check_free_buffers
  20. free_pages
  21. mark_used
  22. __get_free_pages
  23. show_free_areas
  24. unuse_pte
  25. unuse_pmd
  26. unuse_pgd
  27. unuse_vma
  28. unuse_process
  29. try_to_unuse
  30. sys_swapoff
  31. sys_swapon
  32. si_swapinfo
  33. free_area_init

   1 /*
   2  *  linux/mm/swap.c
   3  *
   4  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
   5  */
   6 
   7 /*
   8  * This file should contain most things doing the swapping from/to disk.
   9  * Started 18.12.91
  10  */
  11 
  12 #include <linux/mm.h>
  13 #include <linux/sched.h>
  14 #include <linux/head.h>
  15 #include <linux/kernel.h>
  16 #include <linux/kernel_stat.h>
  17 #include <linux/errno.h>
  18 #include <linux/string.h>
  19 #include <linux/stat.h>
  20 #include <linux/swap.h>
  21 #include <linux/fs.h>
  22 
  23 #include <asm/dma.h>
  24 #include <asm/system.h> /* for cli()/sti() */
  25 #include <asm/bitops.h>
  26 #include <asm/pgtable.h>
  27 
  28 #define MAX_SWAPFILES 8
  29 
  30 #define SWP_USED        1
  31 #define SWP_WRITEOK     3
  32 
  33 int min_free_pages = 20;
  34 
  35 static int nr_swapfiles = 0;
  36 static struct wait_queue * lock_queue = NULL;
  37 static struct {
  38         int head;       /* head of priority-ordered swapfile list */
  39         int next;       /* swapfile to be used next */
  40 } swap_list = {-1, -1};
  41 
  42 static struct swap_info_struct {
  43         unsigned int flags;
  44         unsigned int swap_device;
  45         struct inode * swap_file;
  46         unsigned char * swap_map;
  47         unsigned char * swap_lockmap;
  48         int lowest_bit;
  49         int highest_bit;
  50         int prio;                       /* swap priority */
  51         int pages;
  52         unsigned long max;
  53         int next;                       /* next entry on swap list */
  54 } swap_info[MAX_SWAPFILES];
  55 
  56 extern int shm_swap (int, unsigned long);
  57 
  58 /*
  59  * To save us from swapping out pages which have just been swapped in and
  60  * have not been modified since then, we keep in swap_cache[page>>PAGE_SHIFT]
  61  * the swap entry which was last used to fill the page, or zero if the
  62  * page does not currently correspond to a page in swap. PAGE_DIRTY makes
  63  * this info useless.
  64  */
  65 unsigned long *swap_cache;
  66 
  67 #ifdef SWAP_CACHE_INFO
  68 unsigned long swap_cache_add_total = 0;
  69 unsigned long swap_cache_add_success = 0;
  70 unsigned long swap_cache_del_total = 0;
  71 unsigned long swap_cache_del_success = 0;
  72 unsigned long swap_cache_find_total = 0;
  73 unsigned long swap_cache_find_success = 0;
  74 
  75 extern inline void show_swap_cache_info(void)
     /* [previous][next][first][last][top][bottom][index][help] */
  76 {
  77         printk("Swap cache: add %ld/%ld, delete %ld/%ld, find %ld/%ld\n",
  78                 swap_cache_add_total, swap_cache_add_success, 
  79                 swap_cache_del_total, swap_cache_del_success,
  80                 swap_cache_find_total, swap_cache_find_success);
  81 }
  82 #endif
  83 
  84 static int add_to_swap_cache(unsigned long addr, unsigned long entry)
     /* [previous][next][first][last][top][bottom][index][help] */
  85 {
  86         struct swap_info_struct * p = &swap_info[SWP_TYPE(entry)];
  87 
  88 #ifdef SWAP_CACHE_INFO
  89         swap_cache_add_total++;
  90 #endif
  91         if ((p->flags & SWP_WRITEOK) == SWP_WRITEOK) {
  92                 entry = xchg(swap_cache + MAP_NR(addr), entry);
  93                 if (entry)  {
  94                         printk("swap_cache: replacing non-NULL entry\n");
  95                 }
  96 #ifdef SWAP_CACHE_INFO
  97                 swap_cache_add_success++;
  98 #endif
  99                 return 1;
 100         }
 101         return 0;
 102 }
 103 
 104 static unsigned long init_swap_cache(unsigned long mem_start,
     /* [previous][next][first][last][top][bottom][index][help] */
 105         unsigned long mem_end)
 106 {
 107         unsigned long swap_cache_size;
 108 
 109         mem_start = (mem_start + 15) & ~15;
 110         swap_cache = (unsigned long *) mem_start;
 111         swap_cache_size = MAP_NR(mem_end);
 112         memset(swap_cache, 0, swap_cache_size * sizeof (unsigned long));
 113         return (unsigned long) (swap_cache + swap_cache_size);
 114 }
 115 
 116 void rw_swap_page(int rw, unsigned long entry, char * buf)
     /* [previous][next][first][last][top][bottom][index][help] */
 117 {
 118         unsigned long type, offset;
 119         struct swap_info_struct * p;
 120 
 121         type = SWP_TYPE(entry);
 122         if (type >= nr_swapfiles) {
 123                 printk("Internal error: bad swap-device\n");
 124                 return;
 125         }
 126         p = &swap_info[type];
 127         offset = SWP_OFFSET(entry);
 128         if (offset >= p->max) {
 129                 printk("rw_swap_page: weirdness\n");
 130                 return;
 131         }
 132         if (p->swap_map && !p->swap_map[offset]) {
 133                 printk("Hmm.. Trying to use unallocated swap (%08lx)\n", entry);
 134                 return;
 135         }
 136         if (!(p->flags & SWP_USED)) {
 137                 printk("Trying to swap to unused swap-device\n");
 138                 return;
 139         }
 140         while (set_bit(offset,p->swap_lockmap))
 141                 sleep_on(&lock_queue);
 142         if (rw == READ)
 143                 kstat.pswpin++;
 144         else
 145                 kstat.pswpout++;
 146         if (p->swap_device) {
 147                 ll_rw_page(rw,p->swap_device,offset,buf);
 148         } else if (p->swap_file) {
 149                 struct inode *swapf = p->swap_file;
 150                 unsigned int zones[PAGE_SIZE/512];
 151                 int i;
 152                 if (swapf->i_op->bmap == NULL
 153                         && swapf->i_op->smap != NULL){
 154                         /*
 155                                 With MsDOS, we use msdos_smap which return
 156                                 a sector number (not a cluster or block number).
 157                                 It is a patch to enable the UMSDOS project.
 158                                 Other people are working on better solution.
 159 
 160                                 It sounds like ll_rw_swap_file defined
 161                                 it operation size (sector size) based on
 162                                 PAGE_SIZE and the number of block to read.
 163                                 So using bmap or smap should work even if
 164                                 smap will require more blocks.
 165                         */
 166                         int j;
 167                         unsigned int block = offset << 3;
 168 
 169                         for (i=0, j=0; j< PAGE_SIZE ; i++, j += 512){
 170                                 if (!(zones[i] = swapf->i_op->smap(swapf,block++))) {
 171                                         printk("rw_swap_page: bad swap file\n");
 172                                         return;
 173                                 }
 174                         }
 175                 }else{
 176                         int j;
 177                         unsigned int block = offset
 178                                 << (PAGE_SHIFT - swapf->i_sb->s_blocksize_bits);
 179 
 180                         for (i=0, j=0; j< PAGE_SIZE ; i++, j +=swapf->i_sb->s_blocksize)
 181                                 if (!(zones[i] = bmap(swapf,block++))) {
 182                                         printk("rw_swap_page: bad swap file\n");
 183                                         return;
 184                                 }
 185                 }
 186                 ll_rw_swap_file(rw,swapf->i_dev, zones, i,buf);
 187         } else
 188                 printk("re_swap_page: no swap file or device\n");
 189         if (offset && !clear_bit(offset,p->swap_lockmap))
 190                 printk("rw_swap_page: lock already cleared\n");
 191         wake_up(&lock_queue);
 192 }
 193 
 194 unsigned long get_swap_page(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 195 {
 196         struct swap_info_struct * p;
 197         unsigned long offset, entry;
 198         int type, wrapped = 0;
 199 
 200         type = swap_list.next;
 201         if (type < 0)
 202           return 0;
 203 
 204         while (1) {
 205                 p = &swap_info[type];
 206                 if ((p->flags & SWP_WRITEOK) == SWP_WRITEOK) {
 207                         for (offset = p->lowest_bit; offset <= p->highest_bit ; offset++) {
 208                                 if (p->swap_map[offset])
 209                                   continue;
 210                                 if (test_bit(offset, p->swap_lockmap))
 211                                   continue;
 212                                 p->swap_map[offset] = 1;
 213                                 nr_swap_pages--;
 214                                 if (offset == p->highest_bit)
 215                                   p->highest_bit--;
 216                                 p->lowest_bit = offset;
 217                                 entry = SWP_ENTRY(type,offset);
 218 
 219                                 type = swap_info[type].next;
 220                                 if (type < 0 || p->prio != swap_info[type].prio) {
 221                                     swap_list.next = swap_list.head;
 222                                 } else {
 223                                     swap_list.next = type;
 224                                 }
 225                                 return entry;
 226                         }
 227                 }
 228                 type = p->next;
 229                 if (!wrapped) {
 230                         if (type < 0 || p->prio != swap_info[type].prio) {
 231                                 type = swap_list.head;
 232                                 wrapped = 1;
 233                         }
 234                 } else if (type < 0) {
 235                         return 0;       /* out of swap space */
 236                 }
 237         }
 238 }
 239 
 240 void swap_duplicate(unsigned long entry)
     /* [previous][next][first][last][top][bottom][index][help] */
 241 {
 242         struct swap_info_struct * p;
 243         unsigned long offset, type;
 244 
 245         if (!entry)
 246                 return;
 247         offset = SWP_OFFSET(entry);
 248         type = SWP_TYPE(entry);
 249         if (type & SHM_SWP_TYPE)
 250                 return;
 251         if (type >= nr_swapfiles) {
 252                 printk("Trying to duplicate nonexistent swap-page\n");
 253                 return;
 254         }
 255         p = type + swap_info;
 256         if (offset >= p->max) {
 257                 printk("swap_duplicate: weirdness\n");
 258                 return;
 259         }
 260         if (!p->swap_map[offset]) {
 261                 printk("swap_duplicate: trying to duplicate unused page\n");
 262                 return;
 263         }
 264         p->swap_map[offset]++;
 265         return;
 266 }
 267 
 268 void swap_free(unsigned long entry)
     /* [previous][next][first][last][top][bottom][index][help] */
 269 {
 270         struct swap_info_struct * p;
 271         unsigned long offset, type;
 272 
 273         if (!entry)
 274                 return;
 275         type = SWP_TYPE(entry);
 276         if (type & SHM_SWP_TYPE)
 277                 return;
 278         if (type >= nr_swapfiles) {
 279                 printk("Trying to free nonexistent swap-page\n");
 280                 return;
 281         }
 282         p = & swap_info[type];
 283         offset = SWP_OFFSET(entry);
 284         if (offset >= p->max) {
 285                 printk("swap_free: weirdness\n");
 286                 return;
 287         }
 288         if (!(p->flags & SWP_USED)) {
 289                 printk("Trying to free swap from unused swap-device\n");
 290                 return;
 291         }
 292         if (offset < p->lowest_bit)
 293                 p->lowest_bit = offset;
 294         if (offset > p->highest_bit)
 295                 p->highest_bit = offset;
 296         if (!p->swap_map[offset])
 297                 printk("swap_free: swap-space map bad (entry %08lx)\n",entry);
 298         else
 299                 if (!--p->swap_map[offset])
 300                         nr_swap_pages++;
 301         if (p->prio > swap_info[swap_list.next].prio) {
 302             swap_list.next = swap_list.head;
 303         }
 304 }
 305 
 306 /*
 307  * The tests may look silly, but it essentially makes sure that
 308  * no other process did a swap-in on us just as we were waiting.
 309  *
 310  * Also, don't bother to add to the swap cache if this page-in
 311  * was due to a write access.
 312  */
 313 void swap_in(struct task_struct * tsk, struct vm_area_struct * vma,
     /* [previous][next][first][last][top][bottom][index][help] */
 314         pte_t * page_table, unsigned long entry, int write_access)
 315 {
 316         unsigned long page = __get_free_page(GFP_KERNEL);
 317 
 318         if (pte_val(*page_table) != entry) {
 319                 free_page(page);
 320                 return;
 321         }
 322         if (!page) {
 323                 set_pte(page_table, BAD_PAGE);
 324                 swap_free(entry);
 325                 oom(tsk);
 326                 return;
 327         }
 328         read_swap_page(entry, (char *) page);
 329         if (pte_val(*page_table) != entry) {
 330                 free_page(page);
 331                 return;
 332         }
 333         vma->vm_mm->rss++;
 334         vma->vm_mm->maj_flt++;
 335         if (!write_access && add_to_swap_cache(page, entry)) {
 336                 set_pte(page_table, mk_pte(page, vma->vm_page_prot));
 337                 return;
 338         }
 339         set_pte(page_table, pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot))));
 340         swap_free(entry);
 341         return;
 342 }
 343 
 344 /*
 345  * The swap-out functions return 1 if they successfully
 346  * threw something out, and we got a free page. It returns
 347  * zero if it couldn't do anything, and any other value
 348  * indicates it decreased rss, but the page was shared.
 349  *
 350  * NOTE! If it sleeps, it *must* return 1 to make sure we
 351  * don't continue with the swap-out. Otherwise we may be
 352  * using a process that no longer actually exists (it might
 353  * have died while we slept).
 354  */
 355 static inline int try_to_swap_out(struct task_struct * tsk, struct vm_area_struct* vma,
     /* [previous][next][first][last][top][bottom][index][help] */
 356         unsigned long address, pte_t * page_table, unsigned long limit)
 357 {
 358         pte_t pte;
 359         unsigned long entry;
 360         unsigned long page;
 361 
 362         pte = *page_table;
 363         if (!pte_present(pte))
 364                 return 0;
 365         page = pte_page(pte);
 366         if (page >= high_memory)
 367                 return 0;
 368         if (page >= limit)
 369                 return 0;
 370         if (mem_map[MAP_NR(page)] & MAP_PAGE_RESERVED)
 371                 return 0;
 372         if ((pte_dirty(pte) && delete_from_swap_cache(page)) || pte_young(pte))  {
 373                 set_pte(page_table, pte_mkold(pte));
 374                 return 0;
 375         }       
 376         if (pte_dirty(pte)) {
 377                 if (vma->vm_ops && vma->vm_ops->swapout) {
 378                         pid_t pid = tsk->pid;
 379                         vma->vm_mm->rss--;
 380                         if (vma->vm_ops->swapout(vma, address - vma->vm_start + vma->vm_offset, page_table))
 381                                 kill_proc(pid, SIGBUS, 1);
 382                 } else {
 383                         if (mem_map[MAP_NR(page)] != 1)
 384                                 return 0;
 385                         if (!(entry = get_swap_page()))
 386                                 return 0;
 387                         vma->vm_mm->rss--;
 388                         set_pte(page_table, __pte(entry));
 389                         invalidate();
 390                         write_swap_page(entry, (char *) page);
 391                 }
 392                 free_page(page);
 393                 return 1;       /* we slept: the process may not exist any more */
 394         }
 395         if ((entry = find_in_swap_cache(page)))  {
 396                 if (mem_map[MAP_NR(page)] != 1) {
 397                         set_pte(page_table, pte_mkdirty(pte));
 398                         printk("Aiee.. duplicated cached swap-cache entry\n");
 399                         return 0;
 400                 }
 401                 vma->vm_mm->rss--;
 402                 set_pte(page_table, __pte(entry));
 403                 invalidate();
 404                 free_page(page);
 405                 return 1;
 406         } 
 407         vma->vm_mm->rss--;
 408         pte_clear(page_table);
 409         invalidate();
 410         entry = mem_map[MAP_NR(page)];
 411         free_page(page);
 412         return entry;
 413 }
 414 
 415 /*
 416  * A new implementation of swap_out().  We do not swap complete processes,
 417  * but only a small number of blocks, before we continue with the next
 418  * process.  The number of blocks actually swapped is determined on the
 419  * number of page faults, that this process actually had in the last time,
 420  * so we won't swap heavily used processes all the time ...
 421  *
 422  * Note: the priority argument is a hint on much CPU to waste with the
 423  *       swap block search, not a hint, of how much blocks to swap with
 424  *       each process.
 425  *
 426  * (C) 1993 Kai Petzke, wpp@marie.physik.tu-berlin.de
 427  */
 428 
 429 /*
 430  * These are the minimum and maximum number of pages to swap from one process,
 431  * before proceeding to the next:
 432  */
 433 #define SWAP_MIN        4
 434 #define SWAP_MAX        32
 435 
 436 /*
 437  * The actual number of pages to swap is determined as:
 438  * SWAP_RATIO / (number of recent major page faults)
 439  */
 440 #define SWAP_RATIO      128
 441 
 442 static inline int swap_out_pmd(struct task_struct * tsk, struct vm_area_struct * vma,
     /* [previous][next][first][last][top][bottom][index][help] */
 443         pmd_t *dir, unsigned long address, unsigned long end, unsigned long limit)
 444 {
 445         pte_t * pte;
 446         unsigned long pmd_end;
 447 
 448         if (pmd_none(*dir))
 449                 return 0;
 450         if (pmd_bad(*dir)) {
 451                 printk("swap_out_pmd: bad pmd (%08lx)\n", pmd_val(*dir));
 452                 pmd_clear(dir);
 453                 return 0;
 454         }
 455         
 456         pte = pte_offset(dir, address);
 457         
 458         pmd_end = (address + PMD_SIZE) & PMD_MASK;
 459         if (end > pmd_end)
 460                 end = pmd_end;
 461 
 462         do {
 463                 int result;
 464                 vma->vm_mm->swap_address = address + PAGE_SIZE;
 465                 result = try_to_swap_out(tsk, vma, address, pte, limit);
 466                 if (result)
 467                         return result;
 468                 address += PAGE_SIZE;
 469                 pte++;
 470         } while (address < end);
 471         return 0;
 472 }
 473 
 474 static inline int swap_out_pgd(struct task_struct * tsk, struct vm_area_struct * vma,
     /* [previous][next][first][last][top][bottom][index][help] */
 475         pgd_t *dir, unsigned long address, unsigned long end, unsigned long limit)
 476 {
 477         pmd_t * pmd;
 478         unsigned long pgd_end;
 479 
 480         if (pgd_none(*dir))
 481                 return 0;
 482         if (pgd_bad(*dir)) {
 483                 printk("swap_out_pgd: bad pgd (%08lx)\n", pgd_val(*dir));
 484                 pgd_clear(dir);
 485                 return 0;
 486         }
 487 
 488         pmd = pmd_offset(dir, address);
 489 
 490         pgd_end = (address + PGDIR_SIZE) & PGDIR_MASK;  
 491         if (end > pgd_end)
 492                 end = pgd_end;
 493         
 494         do {
 495                 int result = swap_out_pmd(tsk, vma, pmd, address, end, limit);
 496                 if (result)
 497                         return result;
 498                 address = (address + PMD_SIZE) & PMD_MASK;
 499                 pmd++;
 500         } while (address < end);
 501         return 0;
 502 }
 503 
 504 static int swap_out_vma(struct task_struct * tsk, struct vm_area_struct * vma,
     /* [previous][next][first][last][top][bottom][index][help] */
 505         pgd_t *pgdir, unsigned long start, unsigned long limit)
 506 {
 507         unsigned long end;
 508 
 509         /* Don't swap out areas like shared memory which have their
 510             own separate swapping mechanism. */
 511         if (vma->vm_flags & VM_SHM)
 512                 return 0;
 513 
 514         end = vma->vm_end;
 515         while (start < end) {
 516                 int result = swap_out_pgd(tsk, vma, pgdir, start, end, limit);
 517                 if (result)
 518                         return result;
 519                 start = (start + PGDIR_SIZE) & PGDIR_MASK;
 520                 pgdir++;
 521         }
 522         return 0;
 523 }
 524 
 525 static int swap_out_process(struct task_struct * p, unsigned long limit)
     /* [previous][next][first][last][top][bottom][index][help] */
 526 {
 527         unsigned long address;
 528         struct vm_area_struct* vma;
 529 
 530         /*
 531          * Go through process' page directory.
 532          */
 533         address = p->mm->swap_address;
 534         p->mm->swap_address = 0;
 535 
 536         /*
 537          * Find the proper vm-area
 538          */
 539         vma = find_vma(p, address);
 540         if (!vma)
 541                 return 0;
 542         if (address < vma->vm_start)
 543                 address = vma->vm_start;
 544 
 545         for (;;) {
 546                 int result = swap_out_vma(p, vma, pgd_offset(p->mm, address), address, limit);
 547                 if (result)
 548                         return result;
 549                 vma = vma->vm_next;
 550                 if (!vma)
 551                         break;
 552                 address = vma->vm_start;
 553         }
 554         p->mm->swap_address = 0;
 555         return 0;
 556 }
 557 
 558 static int swap_out(unsigned int priority, unsigned long limit)
     /* [previous][next][first][last][top][bottom][index][help] */
 559 {
 560         static int swap_task;
 561         int loop, counter;
 562         struct task_struct *p;
 563 
 564         counter = 6*nr_tasks >> priority;
 565         for(; counter >= 0; counter--) {
 566                 /*
 567                  * Check that swap_task is suitable for swapping.  If not, look for
 568                  * the next suitable process.
 569                  */
 570                 loop = 0;
 571                 while(1) {
 572                         if (swap_task >= NR_TASKS) {
 573                                 swap_task = 1;
 574                                 if (loop)
 575                                         /* all processes are unswappable or already swapped out */
 576                                         return 0;
 577                                 loop = 1;
 578                         }
 579 
 580                         p = task[swap_task];
 581                         if (p && p->mm && p->mm->swappable && p->mm->rss)
 582                                 break;
 583 
 584                         swap_task++;
 585                 }
 586 
 587                 /*
 588                  * Determine the number of pages to swap from this process.
 589                  */
 590                 if (!p->mm->swap_cnt) {
 591                         p->mm->dec_flt = (p->mm->dec_flt * 3) / 4 + p->mm->maj_flt - p->mm->old_maj_flt;
 592                         p->mm->old_maj_flt = p->mm->maj_flt;
 593 
 594                         if (p->mm->dec_flt >= SWAP_RATIO / SWAP_MIN) {
 595                                 p->mm->dec_flt = SWAP_RATIO / SWAP_MIN;
 596                                 p->mm->swap_cnt = SWAP_MIN;
 597                         } else if (p->mm->dec_flt <= SWAP_RATIO / SWAP_MAX)
 598                                 p->mm->swap_cnt = SWAP_MAX;
 599                         else
 600                                 p->mm->swap_cnt = SWAP_RATIO / p->mm->dec_flt;
 601                 }
 602                 if (!--p->mm->swap_cnt)
 603                         swap_task++;
 604                 switch (swap_out_process(p, limit)) {
 605                         case 0:
 606                                 if (p->mm->swap_cnt)
 607                                         swap_task++;
 608                                 break;
 609                         case 1:
 610                                 return 1;
 611                         default:
 612                                 break;
 613                 }
 614         }
 615         return 0;
 616 }
 617 
 618 /*
 619  * we keep on shrinking one resource until it's considered "too hard",
 620  * and then switch to the next one (priority being an indication on how
 621  * hard we should try with the resource).
 622  *
 623  * This should automatically find the resource that can most easily be
 624  * free'd, so hopefully we'll get reasonable behaviour even under very
 625  * different circumstances.
 626  */
 627 static int try_to_free_page(int priority, unsigned long limit)
     /* [previous][next][first][last][top][bottom][index][help] */
 628 {
 629         static int state = 0;
 630         int i=6;
 631 
 632         switch (state) {
 633                 do {
 634                 case 0:
 635                         if (priority != GFP_NOBUFFER && shrink_buffers(i, limit))
 636                                 return 1;
 637                         state = 1;
 638                 case 1:
 639                         if (shm_swap(i, limit))
 640                                 return 1;
 641                         state = 2;
 642                 default:
 643                         if (swap_out(i, limit))
 644                                 return 1;
 645                         state = 0;
 646                 } while(i--);
 647         }
 648         return 0;
 649 }
 650 
 651 static inline void add_mem_queue(struct mem_list * head, struct mem_list * entry)
     /* [previous][next][first][last][top][bottom][index][help] */
 652 {
 653         entry->prev = head;
 654         (entry->next = head->next)->prev = entry;
 655         head->next = entry;
 656 }
 657 
 658 static inline void remove_mem_queue(struct mem_list * head, struct mem_list * entry)
     /* [previous][next][first][last][top][bottom][index][help] */
 659 {
 660         entry->next->prev = entry->prev;
 661         entry->prev->next = entry->next;
 662 }
 663 
 664 /*
 665  * Free_page() adds the page to the free lists. This is optimized for
 666  * fast normal cases (no error jumps taken normally).
 667  *
 668  * The way to optimize jumps for gcc-2.2.2 is to:
 669  *  - select the "normal" case and put it inside the if () { XXX }
 670  *  - no else-statements if you can avoid them
 671  *
 672  * With the above two rules, you get a straight-line execution path
 673  * for the normal case, giving better asm-code.
 674  *
 675  * free_page() may sleep since the page being freed may be a buffer
 676  * page or present in the swap cache. It will not sleep, however,
 677  * for a freshly allocated page (get_free_page()).
 678  */
 679 
 680 /*
 681  * Buddy system. Hairy. You really aren't expected to understand this
 682  */
 683 static inline void free_pages_ok(unsigned long addr, unsigned long order)
     /* [previous][next][first][last][top][bottom][index][help] */
 684 {
 685         unsigned long index = MAP_NR(addr) >> (1 + order);
 686         unsigned long mask = PAGE_MASK << order;
 687 
 688         addr &= mask;
 689         nr_free_pages += 1 << order;
 690         while (order < NR_MEM_LISTS-1) {
 691                 if (!change_bit(index, free_area_map[order]))
 692                         break;
 693                 remove_mem_queue(free_area_list+order, (struct mem_list *) (addr ^ (1+~mask)));
 694                 order++;
 695                 index >>= 1;
 696                 mask <<= 1;
 697                 addr &= mask;
 698         }
 699         add_mem_queue(free_area_list+order, (struct mem_list *) addr);
 700 }
 701 
 702 static inline void check_free_buffers(unsigned long addr)
     /* [previous][next][first][last][top][bottom][index][help] */
 703 {
 704         struct buffer_head * bh;
 705 
 706         bh = buffer_pages[MAP_NR(addr)];
 707         if (bh) {
 708                 struct buffer_head *tmp = bh;
 709                 do {
 710                         if (tmp->b_list == BUF_SHARED && tmp->b_dev != 0xffff)
 711                                 refile_buffer(tmp);
 712                         tmp = tmp->b_this_page;
 713                 } while (tmp != bh);
 714         }
 715 }
 716 
 717 void free_pages(unsigned long addr, unsigned long order)
     /* [previous][next][first][last][top][bottom][index][help] */
 718 {
 719         if (addr < high_memory) {
 720                 unsigned long flag;
 721                 mem_map_t * map = mem_map + MAP_NR(addr);
 722                 if (*map) {
 723                         if (!(*map & MAP_PAGE_RESERVED)) {
 724                                 save_flags(flag);
 725                                 cli();
 726                                 if (!--*map)  {
 727                                         free_pages_ok(addr, order);
 728                                         delete_from_swap_cache(addr);
 729                                 }
 730                                 restore_flags(flag);
 731                                 if (*map == 1)
 732                                         check_free_buffers(addr);
 733                         }
 734                         return;
 735                 }
 736                 printk("Trying to free free memory (%08lx): memory probably corrupted\n",addr);
 737                 printk("PC = %p\n", __builtin_return_address(0));
 738                 return;
 739         }
 740 }
 741 
 742 /*
 743  * Some ugly macros to speed up __get_free_pages()..
 744  */
 745 #define RMQUEUE(order, limit) \
 746 do { struct mem_list * queue = free_area_list+order; \
 747      unsigned long new_order = order; \
 748         do { struct mem_list *prev = queue, *ret; \
 749                 while (queue != (ret = prev->next)) { \
 750                         if ((unsigned long) ret < (limit)) { \
 751                                 (prev->next = ret->next)->prev = prev; \
 752                                 mark_used((unsigned long) ret, new_order); \
 753                                 nr_free_pages -= 1 << order; \
 754                                 restore_flags(flags); \
 755                                 EXPAND(ret, order, new_order); \
 756                                 return (unsigned long) ret; \
 757                         } \
 758                         prev = ret; \
 759                 } \
 760                 new_order++; queue++; \
 761         } while (new_order < NR_MEM_LISTS); \
 762 } while (0)
 763 
 764 static inline int mark_used(unsigned long addr, unsigned long order)
     /* [previous][next][first][last][top][bottom][index][help] */
 765 {
 766         return change_bit(MAP_NR(addr) >> (1+order), free_area_map[order]);
 767 }
 768 
 769 #define EXPAND(addr,low,high) \
 770 do { unsigned long size = PAGE_SIZE << high; \
 771         while (high > low) { \
 772                 high--; size >>= 1; cli(); \
 773                 add_mem_queue(free_area_list+high, addr); \
 774                 mark_used((unsigned long) addr, high); \
 775                 restore_flags(flags); \
 776                 addr = (struct mem_list *) (size + (unsigned long) addr); \
 777         } mem_map[MAP_NR((unsigned long) addr)] = 1; \
 778 } while (0)
 779 
 780 unsigned long __get_free_pages(int priority, unsigned long order, unsigned long limit)
     /* [previous][next][first][last][top][bottom][index][help] */
 781 {
 782         unsigned long flags;
 783         int reserved_pages;
 784 
 785         if (intr_count && priority != GFP_ATOMIC) {
 786                 static int count = 0;
 787                 if (++count < 5) {
 788                         printk("gfp called nonatomically from interrupt %p\n",
 789                                 __builtin_return_address(0));
 790                         priority = GFP_ATOMIC;
 791                 }
 792         }
 793         reserved_pages = 5;
 794         if (priority != GFP_NFS)
 795                 reserved_pages = min_free_pages;
 796         save_flags(flags);
 797 repeat:
 798         cli();
 799         if ((priority==GFP_ATOMIC) || nr_free_pages > reserved_pages) {
 800                 RMQUEUE(order, limit);
 801                 restore_flags(flags);
 802                 return 0;
 803         }
 804         restore_flags(flags);
 805         if (priority != GFP_BUFFER && try_to_free_page(priority, limit))
 806                 goto repeat;
 807         return 0;
 808 }
 809 
 810 /*
 811  * Show free area list (used inside shift_scroll-lock stuff)
 812  * We also calculate the percentage fragmentation. We do this by counting the
 813  * memory on each free list with the exception of the first item on the list.
 814  */
 815 void show_free_areas(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 816 {
 817         unsigned long order, flags;
 818         unsigned long total = 0;
 819 
 820         printk("Free pages:      %6dkB\n ( ",nr_free_pages<<(PAGE_SHIFT-10));
 821         save_flags(flags);
 822         cli();
 823         for (order=0 ; order < NR_MEM_LISTS; order++) {
 824                 struct mem_list * tmp;
 825                 unsigned long nr = 0;
 826                 for (tmp = free_area_list[order].next ; tmp != free_area_list + order ; tmp = tmp->next) {
 827                         nr ++;
 828                 }
 829                 total += nr * ((PAGE_SIZE>>10) << order);
 830                 printk("%lu*%lukB ", nr, (PAGE_SIZE>>10) << order);
 831         }
 832         restore_flags(flags);
 833         printk("= %lukB)\n", total);
 834 #ifdef SWAP_CACHE_INFO
 835         show_swap_cache_info();
 836 #endif  
 837 }
 838 
 839 /*
 840  * Trying to stop swapping from a file is fraught with races, so
 841  * we repeat quite a bit here when we have to pause. swapoff()
 842  * isn't exactly timing-critical, so who cares (but this is /really/
 843  * inefficient, ugh).
 844  *
 845  * We return 1 after having slept, which makes the process start over
 846  * from the beginning for this process..
 847  */
 848 static inline int unuse_pte(struct vm_area_struct * vma, unsigned long address,
     /* [previous][next][first][last][top][bottom][index][help] */
 849         pte_t *dir, unsigned int type, unsigned long page)
 850 {
 851         pte_t pte = *dir;
 852 
 853         if (pte_none(pte))
 854                 return 0;
 855         if (pte_present(pte)) {
 856                 unsigned long page = pte_page(pte);
 857                 if (page >= high_memory)
 858                         return 0;
 859                 if (!in_swap_cache(page))
 860                         return 0;
 861                 if (SWP_TYPE(in_swap_cache(page)) != type)
 862                         return 0;
 863                 delete_from_swap_cache(page);
 864                 set_pte(dir, pte_mkdirty(pte));
 865                 return 0;
 866         }
 867         if (SWP_TYPE(pte_val(pte)) != type)
 868                 return 0;
 869         read_swap_page(pte_val(pte), (char *) page);
 870         if (pte_val(*dir) != pte_val(pte)) {
 871                 free_page(page);
 872                 return 1;
 873         }
 874         set_pte(dir, pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot))));
 875         ++vma->vm_mm->rss;
 876         swap_free(pte_val(pte));
 877         return 1;
 878 }
 879 
 880 static inline int unuse_pmd(struct vm_area_struct * vma, pmd_t *dir,
     /* [previous][next][first][last][top][bottom][index][help] */
 881         unsigned long address, unsigned long size, unsigned long offset,
 882         unsigned int type, unsigned long page)
 883 {
 884         pte_t * pte;
 885         unsigned long end;
 886 
 887         if (pmd_none(*dir))
 888                 return 0;
 889         if (pmd_bad(*dir)) {
 890                 printk("unuse_pmd: bad pmd (%08lx)\n", pmd_val(*dir));
 891                 pmd_clear(dir);
 892                 return 0;
 893         }
 894         pte = pte_offset(dir, address);
 895         offset += address & PMD_MASK;
 896         address &= ~PMD_MASK;
 897         end = address + size;
 898         if (end > PMD_SIZE)
 899                 end = PMD_SIZE;
 900         do {
 901                 if (unuse_pte(vma, offset+address-vma->vm_start, pte, type, page))
 902                         return 1;
 903                 address += PAGE_SIZE;
 904                 pte++;
 905         } while (address < end);
 906         return 0;
 907 }
 908 
 909 static inline int unuse_pgd(struct vm_area_struct * vma, pgd_t *dir,
     /* [previous][next][first][last][top][bottom][index][help] */
 910         unsigned long address, unsigned long size,
 911         unsigned int type, unsigned long page)
 912 {
 913         pmd_t * pmd;
 914         unsigned long offset, end;
 915 
 916         if (pgd_none(*dir))
 917                 return 0;
 918         if (pgd_bad(*dir)) {
 919                 printk("unuse_pgd: bad pgd (%08lx)\n", pgd_val(*dir));
 920                 pgd_clear(dir);
 921                 return 0;
 922         }
 923         pmd = pmd_offset(dir, address);
 924         offset = address & PGDIR_MASK;
 925         address &= ~PGDIR_MASK;
 926         end = address + size;
 927         if (end > PGDIR_SIZE)
 928                 end = PGDIR_SIZE;
 929         do {
 930                 if (unuse_pmd(vma, pmd, address, end - address, offset, type, page))
 931                         return 1;
 932                 address = (address + PMD_SIZE) & PMD_MASK;
 933                 pmd++;
 934         } while (address < end);
 935         return 0;
 936 }
 937 
 938 static int unuse_vma(struct vm_area_struct * vma, pgd_t *pgdir,
     /* [previous][next][first][last][top][bottom][index][help] */
 939         unsigned long start, unsigned long end,
 940         unsigned int type, unsigned long page)
 941 {
 942         while (start < end) {
 943                 if (unuse_pgd(vma, pgdir, start, end - start, type, page))
 944                         return 1;
 945                 start = (start + PGDIR_SIZE) & PGDIR_MASK;
 946                 pgdir++;
 947         }
 948         return 0;
 949 }
 950 
 951 static int unuse_process(struct task_struct * p, unsigned int type, unsigned long page)
     /* [previous][next][first][last][top][bottom][index][help] */
 952 {
 953         struct vm_area_struct* vma;
 954 
 955         /*
 956          * Go through process' page directory.
 957          */
 958         vma = p->mm->mmap;
 959         while (vma) {
 960                 pgd_t * pgd = pgd_offset(p->mm, vma->vm_start);
 961                 if (unuse_vma(vma, pgd, vma->vm_start, vma->vm_end, type, page))
 962                         return 1;
 963                 vma = vma->vm_next;
 964         }
 965         return 0;
 966 }
 967 
 968 /*
 969  * To avoid races, we repeat for each process after having
 970  * swapped something in. That gets rid of a few pesky races,
 971  * and "swapoff" isn't exactly timing critical.
 972  */
 973 static int try_to_unuse(unsigned int type)
     /* [previous][next][first][last][top][bottom][index][help] */
 974 {
 975         int nr;
 976         unsigned long page = get_free_page(GFP_KERNEL);
 977 
 978         if (!page)
 979                 return -ENOMEM;
 980         nr = 0;
 981         while (nr < NR_TASKS) {
 982                 if (task[nr]) {
 983                         if (unuse_process(task[nr], type, page)) {
 984                                 page = get_free_page(GFP_KERNEL);
 985                                 if (!page)
 986                                         return -ENOMEM;
 987                                 continue;
 988                         }
 989                 }
 990                 nr++;
 991         }
 992         free_page(page);
 993         return 0;
 994 }
 995 
 996 asmlinkage int sys_swapoff(const char * specialfile)
     /* [previous][next][first][last][top][bottom][index][help] */
 997 {
 998         struct swap_info_struct * p;
 999         struct inode * inode;
1000         struct file filp;
1001         int i, type, prev;
1002 
1003         if (!suser())
1004                 return -EPERM;
1005         i = namei(specialfile,&inode);
1006         if (i)
1007                 return i;
1008         prev = -1;
1009         for (type = swap_list.head; type >= 0; type = swap_info[type].next) {
1010                 p = swap_info + type;
1011                 if ((p->flags & SWP_WRITEOK) == SWP_WRITEOK) {
1012                         if (p->swap_file) {
1013                                 if (p->swap_file == inode)
1014                                   break;
1015                         } else {
1016                                 if (S_ISBLK(inode->i_mode)
1017                                     && (p->swap_device == inode->i_rdev))
1018                                   break;
1019                         }
1020                 }
1021                 prev = type;
1022         }
1023         if (type < 0){
1024                 iput(inode);
1025                 return -EINVAL;
1026         }
1027         if (prev < 0) {
1028                 swap_list.head = p->next;
1029         } else {
1030                 swap_info[prev].next = p->next;
1031         }
1032         if (type == swap_list.next) {
1033                 /* just pick something that's safe... */
1034                 swap_list.next = swap_list.head;
1035         }
1036         p->flags = SWP_USED;
1037         i = try_to_unuse(type);
1038         if (i) {
1039                 iput(inode);
1040                 p->flags = SWP_WRITEOK;
1041                 return i;
1042         }
1043 
1044         if(p->swap_device){
1045                 memset(&filp, 0, sizeof(filp));         
1046                 filp.f_inode = inode;
1047                 filp.f_mode = 3; /* read write */
1048                 /* open it again to get fops */
1049                 if( !blkdev_open(inode, &filp) &&
1050                    filp.f_op && filp.f_op->release){
1051                         filp.f_op->release(inode,&filp);
1052                         filp.f_op->release(inode,&filp);
1053                 }
1054         }
1055         iput(inode);
1056 
1057         nr_swap_pages -= p->pages;
1058         iput(p->swap_file);
1059         p->swap_file = NULL;
1060         p->swap_device = 0;
1061         vfree(p->swap_map);
1062         p->swap_map = NULL;
1063         free_page((long) p->swap_lockmap);
1064         p->swap_lockmap = NULL;
1065         p->flags = 0;
1066         return 0;
1067 }
1068 
1069 /*
1070  * Written 01/25/92 by Simmule Turner, heavily changed by Linus.
1071  *
1072  * The swapon system call
1073  */
1074 asmlinkage int sys_swapon(const char * specialfile, int swap_flags)
     /* [previous][next][first][last][top][bottom][index][help] */
1075 {
1076         struct swap_info_struct * p;
1077         struct inode * swap_inode;
1078         unsigned int type;
1079         int i, j, prev;
1080         int error;
1081         struct file filp;
1082         static int least_priority = 0;
1083 
1084         memset(&filp, 0, sizeof(filp));
1085         if (!suser())
1086                 return -EPERM;
1087         p = swap_info;
1088         for (type = 0 ; type < nr_swapfiles ; type++,p++)
1089                 if (!(p->flags & SWP_USED))
1090                         break;
1091         if (type >= MAX_SWAPFILES)
1092                 return -EPERM;
1093         if (type >= nr_swapfiles)
1094                 nr_swapfiles = type+1;
1095         p->flags = SWP_USED;
1096         p->swap_file = NULL;
1097         p->swap_device = 0;
1098         p->swap_map = NULL;
1099         p->swap_lockmap = NULL;
1100         p->lowest_bit = 0;
1101         p->highest_bit = 0;
1102         p->max = 1;
1103         p->next = -1;
1104         if (swap_flags & SWAP_FLAG_PREFER) {
1105                 p->prio =
1106                   (swap_flags & SWAP_FLAG_PRIO_MASK)>>SWAP_FLAG_PRIO_SHIFT;
1107         } else {
1108                 p->prio = --least_priority;
1109         }
1110         error = namei(specialfile,&swap_inode);
1111         if (error)
1112                 goto bad_swap_2;
1113         p->swap_file = swap_inode;
1114         error = -EBUSY;
1115         if (swap_inode->i_count != 1)
1116                 goto bad_swap_2;
1117         error = -EINVAL;
1118 
1119         if (S_ISBLK(swap_inode->i_mode)) {
1120                 p->swap_device = swap_inode->i_rdev;
1121 
1122                 filp.f_inode = swap_inode;
1123                 filp.f_mode = 3; /* read write */
1124                 error = blkdev_open(swap_inode, &filp);
1125                 p->swap_file = NULL;
1126                 iput(swap_inode);
1127                 if(error)
1128                         goto bad_swap_2;
1129                 error = -ENODEV;
1130                 if (!p->swap_device)
1131                         goto bad_swap;
1132                 error = -EBUSY;
1133                 for (i = 0 ; i < nr_swapfiles ; i++) {
1134                         if (i == type)
1135                                 continue;
1136                         if (p->swap_device == swap_info[i].swap_device)
1137                                 goto bad_swap;
1138                 }
1139         } else if (!S_ISREG(swap_inode->i_mode))
1140                 goto bad_swap;
1141         p->swap_lockmap = (unsigned char *) get_free_page(GFP_USER);
1142         if (!p->swap_lockmap) {
1143                 printk("Unable to start swapping: out of memory :-)\n");
1144                 error = -ENOMEM;
1145                 goto bad_swap;
1146         }
1147         read_swap_page(SWP_ENTRY(type,0), (char *) p->swap_lockmap);
1148         if (memcmp("SWAP-SPACE",p->swap_lockmap+PAGE_SIZE-10,10)) {
1149                 printk("Unable to find swap-space signature\n");
1150                 error = -EINVAL;
1151                 goto bad_swap;
1152         }
1153         memset(p->swap_lockmap+PAGE_SIZE-10,0,10);
1154         j = 0;
1155         p->lowest_bit = 0;
1156         p->highest_bit = 0;
1157         for (i = 1 ; i < 8*PAGE_SIZE ; i++) {
1158                 if (test_bit(i,p->swap_lockmap)) {
1159                         if (!p->lowest_bit)
1160                                 p->lowest_bit = i;
1161                         p->highest_bit = i;
1162                         p->max = i+1;
1163                         j++;
1164                 }
1165         }
1166         if (!j) {
1167                 printk("Empty swap-file\n");
1168                 error = -EINVAL;
1169                 goto bad_swap;
1170         }
1171         p->swap_map = (unsigned char *) vmalloc(p->max);
1172         if (!p->swap_map) {
1173                 error = -ENOMEM;
1174                 goto bad_swap;
1175         }
1176         for (i = 1 ; i < p->max ; i++) {
1177                 if (test_bit(i,p->swap_lockmap))
1178                         p->swap_map[i] = 0;
1179                 else
1180                         p->swap_map[i] = 0x80;
1181         }
1182         p->swap_map[0] = 0x80;
1183         memset(p->swap_lockmap,0,PAGE_SIZE);
1184         p->flags = SWP_WRITEOK;
1185         p->pages = j;
1186         nr_swap_pages += j;
1187         printk("Adding Swap: %dk swap-space\n",j<<(PAGE_SHIFT-10));
1188 
1189         /* insert swap space into swap_list: */
1190         prev = -1;
1191         for (i = swap_list.head; i >= 0; i = swap_info[i].next) {
1192                 if (p->prio >= swap_info[i].prio) {
1193                         break;
1194                 }
1195                 prev = i;
1196         }
1197         p->next = i;
1198         if (prev < 0) {
1199                 swap_list.head = swap_list.next = p - swap_info;
1200         } else {
1201                 swap_info[prev].next = p - swap_info;
1202         }
1203         return 0;
1204 bad_swap:
1205         if(filp.f_op && filp.f_op->release)
1206                 filp.f_op->release(filp.f_inode,&filp);
1207 bad_swap_2:
1208         free_page((long) p->swap_lockmap);
1209         vfree(p->swap_map);
1210         iput(p->swap_file);
1211         p->swap_device = 0;
1212         p->swap_file = NULL;
1213         p->swap_map = NULL;
1214         p->swap_lockmap = NULL;
1215         p->flags = 0;
1216         return error;
1217 }
1218 
1219 void si_swapinfo(struct sysinfo *val)
     /* [previous][next][first][last][top][bottom][index][help] */
1220 {
1221         unsigned int i, j;
1222 
1223         val->freeswap = val->totalswap = 0;
1224         for (i = 0; i < nr_swapfiles; i++) {
1225                 if ((swap_info[i].flags & SWP_WRITEOK) != SWP_WRITEOK)
1226                         continue;
1227                 for (j = 0; j < swap_info[i].max; ++j)
1228                         switch (swap_info[i].swap_map[j]) {
1229                                 case 128:
1230                                         continue;
1231                                 case 0:
1232                                         ++val->freeswap;
1233                                 default:
1234                                         ++val->totalswap;
1235                         }
1236         }
1237         val->freeswap <<= PAGE_SHIFT;
1238         val->totalswap <<= PAGE_SHIFT;
1239         return;
1240 }
1241 
1242 #define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
1243 
1244 /*
1245  * set up the free-area data structures:
1246  *   - mark all pages MAP_PAGE_RESERVED
1247  *   - mark all memory queues empty
1248  *   - clear the memory bitmaps
1249  */
1250 unsigned long free_area_init(unsigned long start_mem, unsigned long end_mem)
     /* [previous][next][first][last][top][bottom][index][help] */
1251 {
1252         mem_map_t * p;
1253         unsigned long mask = PAGE_MASK;
1254         int i;
1255 
1256         /*
1257          * select nr of pages we try to keep free for important stuff
1258          * with a minimum of 16 pages. This is totally arbitrary
1259          */
1260         i = (end_mem - PAGE_OFFSET) >> (PAGE_SHIFT+6);
1261         if (i < 16)
1262                 i = 16;
1263         min_free_pages = i;
1264         start_mem = init_swap_cache(start_mem, end_mem);
1265         mem_map = (mem_map_t *) start_mem;
1266         p = mem_map + MAP_NR(end_mem);
1267         start_mem = LONG_ALIGN((unsigned long) p);
1268         while (p > mem_map)
1269                 *--p = MAP_PAGE_RESERVED;
1270 
1271         for (i = 0 ; i < NR_MEM_LISTS ; i++) {
1272                 unsigned long bitmap_size;
1273                 free_area_list[i].prev = free_area_list[i].next = &free_area_list[i];
1274                 mask += mask;
1275                 end_mem = (end_mem + ~mask) & mask;
1276                 bitmap_size = (end_mem - PAGE_OFFSET) >> (PAGE_SHIFT + i);
1277                 bitmap_size = (bitmap_size + 7) >> 3;
1278                 bitmap_size = LONG_ALIGN(bitmap_size);
1279                 free_area_map[i] = (unsigned char *) start_mem;
1280                 memset((void *) start_mem, 0, bitmap_size);
1281                 start_mem += bitmap_size;
1282         }
1283         return start_mem;
1284 }

/* [previous][next][first][last][top][bottom][index][help] */