root/mm/vmalloc.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. set_pgdir
  2. clear_pgdir
  3. free_area_pages
  4. alloc_area_pages
  5. do_area
  6. vfree
  7. vmalloc
  8. vread

   1 /*
   2  *  linux/mm/vmalloc.c
   3  *
   4  *  Copyright (C) 1993  Linus Torvalds
   5  */
   6 
   7 #include <asm/system.h>
   8 
   9 #include <linux/signal.h>
  10 #include <linux/sched.h>
  11 #include <linux/head.h>
  12 #include <linux/kernel.h>
  13 #include <linux/errno.h>
  14 #include <linux/types.h>
  15 #include <linux/malloc.h>
  16 #include <asm/segment.h>
  17 
  18 struct vm_struct {
  19         unsigned long flags;
  20         void * addr;
  21         unsigned long size;
  22         struct vm_struct * next;
  23 };
  24 
  25 static struct vm_struct * vmlist = NULL;
  26 
  27 /* Just any arbitrary offset to the start of the vmalloc VM area: the
  28  * current 8MB value just means that there will be a 8MB "hole" after the
  29  * physical memory until the kernel virtual memory starts.  That means that
  30  * any out-of-bounds memory accesses will hopefully be caught.
  31  * The vmalloc() routines leaves a hole of 4kB between each vmalloced
  32  * area for the same reason. ;)
  33  */
  34 #define VMALLOC_OFFSET  (8*1024*1024)
  35 
  36 static inline void set_pgdir(unsigned long dindex, pte_t * page_table)
     /* [previous][next][first][last][top][bottom][index][help] */
  37 {
  38         struct task_struct * p;
  39 
  40         p = &init_task;
  41         do {
  42                 pgd_set(PAGE_DIR_OFFSET(p,0) + dindex, page_table);
  43                 p = p->next_task;
  44         } while (p != &init_task);
  45 }
  46 
  47 static inline void clear_pgdir(unsigned long dindex)
     /* [previous][next][first][last][top][bottom][index][help] */
  48 {
  49         struct task_struct * p;
  50 
  51         p = &init_task;
  52         do {
  53                 pgd_clear(PAGE_DIR_OFFSET(p,0) + dindex);
  54                 p = p->next_task;
  55         } while (p != &init_task);
  56 }
  57 
  58 static int free_area_pages(unsigned long dindex, unsigned long index, unsigned long nr)
     /* [previous][next][first][last][top][bottom][index][help] */
  59 {
  60         pgd_t * dir;
  61         pte_t * page_table;
  62         unsigned long page;
  63 
  64         dir = swapper_pg_dir + dindex;
  65         if (pgd_none(*dir))
  66                 return 0;
  67         if (pgd_bad(*dir)) {
  68                 printk("bad page directory entry in free_area_pages: %08lx\n", pgd_val(*dir));
  69                 pgd_clear(dir);
  70                 return 0;
  71         }
  72         page = pgd_page(*dir);
  73         page_table = index + (pte_t *) page;
  74         do {
  75                 pte_t pte = *page_table;
  76                 pte_clear(page_table);
  77                 if (pte_present(pte))
  78                         free_page(pte_page(pte));
  79                 page_table++;
  80         } while (--nr);
  81         page_table = (pte_t *) page;
  82         for (nr = 0 ; nr < PTRS_PER_PAGE ; nr++, page_table++)
  83                 if (!pte_none(*page_table))
  84                         return 0;
  85         clear_pgdir(dindex);
  86         mem_map[MAP_NR(page)] = 1;
  87         free_page(page);
  88         invalidate();
  89         return 0;
  90 }
  91 
  92 static int alloc_area_pages(unsigned long dindex, unsigned long index, unsigned long nr)
     /* [previous][next][first][last][top][bottom][index][help] */
  93 {
  94         pgd_t *dir;
  95         pte_t *page_table;
  96 
  97         dir = swapper_pg_dir + dindex;
  98         if (pgd_none(*dir)) {
  99                 unsigned long page = get_free_page(GFP_KERNEL);
 100                 if (!page)
 101                         return -ENOMEM;
 102                 if (!pgd_none(*dir)) {
 103                         free_page(page);
 104                 } else {
 105                         mem_map[MAP_NR(page)] = MAP_PAGE_RESERVED;
 106                         set_pgdir(dindex, (pte_t *) page);
 107                 }
 108         }
 109         if (pgd_bad(*dir)) {
 110                 printk("Bad page dir entry in alloc_area_pages (%08lx)\n", pgd_val(*dir));
 111                 return -ENOMEM;
 112         }
 113         page_table = index + (pte_t *) pgd_page(*dir);
 114         /*
 115          * use a tempotary page-table entry to remove a race with
 116          * vfree(): it mustn't free the page table from under us
 117          * if we sleep in get_free_page()
 118          */
 119         *page_table = BAD_PAGE;
 120         do {
 121                 unsigned long pg = get_free_page(GFP_KERNEL);
 122 
 123                 if (!pg)
 124                         return -ENOMEM;
 125                 *page_table = mk_pte(pg, PAGE_KERNEL);
 126                 page_table++;
 127         } while (--nr);
 128         invalidate();
 129         return 0;
 130 }
 131 
 132 static int do_area(void * addr, unsigned long size,
     /* [previous][next][first][last][top][bottom][index][help] */
 133         int (*area_fn)(unsigned long,unsigned long,unsigned long))
 134 {
 135         unsigned long nr, dindex, index;
 136 
 137         nr = size >> PAGE_SHIFT;
 138         dindex = (TASK_SIZE + (unsigned long) addr) >> 22;
 139         index = (((unsigned long) addr) >> PAGE_SHIFT) & (PTRS_PER_PAGE-1);
 140         while (nr > 0) {
 141                 unsigned long i = PTRS_PER_PAGE - index;
 142 
 143                 if (i > nr)
 144                         i = nr;
 145                 nr -= i;
 146                 if (area_fn(dindex, index, i))
 147                         return -1;
 148                 index = 0;
 149                 dindex++;
 150         }
 151         return 0;
 152 }
 153 
 154 void vfree(void * addr)
     /* [previous][next][first][last][top][bottom][index][help] */
 155 {
 156         struct vm_struct **p, *tmp;
 157 
 158         if (!addr)
 159                 return;
 160         if ((PAGE_SIZE-1) & (unsigned long) addr) {
 161                 printk("Trying to vfree() bad address (%p)\n", addr);
 162                 return;
 163         }
 164         for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) {
 165                 if (tmp->addr == addr) {
 166                         *p = tmp->next;
 167                         do_area(tmp->addr, tmp->size, free_area_pages);
 168                         kfree(tmp);
 169                         return;
 170                 }
 171         }
 172         printk("Trying to vfree() nonexistent vm area (%p)\n", addr);
 173 }
 174 
 175 void * vmalloc(unsigned long size)
     /* [previous][next][first][last][top][bottom][index][help] */
 176 {
 177         void * addr;
 178         struct vm_struct **p, *tmp, *area;
 179 
 180         size = PAGE_ALIGN(size);
 181         if (!size || size > high_memory)
 182                 return NULL;
 183         area = (struct vm_struct *) kmalloc(sizeof(*area), GFP_KERNEL);
 184         if (!area)
 185                 return NULL;
 186         addr = (void *) ((high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1));
 187         area->size = size + PAGE_SIZE;
 188         area->next = NULL;
 189         for (p = &vmlist; (tmp = *p) ; p = &tmp->next) {
 190                 if (size + (unsigned long) addr < (unsigned long) tmp->addr)
 191                         break;
 192                 addr = (void *) (tmp->size + (unsigned long) tmp->addr);
 193         }
 194         area->addr = addr;
 195         area->next = *p;
 196         *p = area;
 197         if (do_area(addr, size, alloc_area_pages)) {
 198                 vfree(addr);
 199                 return NULL;
 200         }
 201         return addr;
 202 }
 203 
 204 int vread(char *buf, char *addr, int count)
     /* [previous][next][first][last][top][bottom][index][help] */
 205 {
 206         struct vm_struct **p, *tmp;
 207         char *vaddr, *buf_start = buf;
 208         int n;
 209 
 210         for (p = &vmlist; (tmp = *p) ; p = &tmp->next) {
 211                 vaddr = (char *) tmp->addr;
 212                 while (addr < vaddr) {
 213                         if (count == 0)
 214                                 goto finished;
 215                         put_fs_byte('\0', buf++), addr++, count--;
 216                 }
 217                 n = tmp->size - PAGE_SIZE;
 218                 if (addr > vaddr)
 219                         n -= addr - vaddr;
 220                 while (--n >= 0) {
 221                         if (count == 0)
 222                                 goto finished;
 223                         put_fs_byte(*addr++, buf++), count--;
 224                 }
 225         }
 226 finished:
 227         return buf - buf_start;
 228 }

/* [previous][next][first][last][top][bottom][index][help] */