root/mm/vmalloc.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. set_pgdir
  2. free_area_pages
  3. alloc_area_pages
  4. do_area
  5. vfree
  6. vmalloc
  7. vread

   1 /*
   2  *  linux/mm/vmalloc.c
   3  *
   4  *  Copyright (C) 1993  Linus Torvalds
   5  */
   6 
   7 #include <asm/system.h>
   8 #include <linux/config.h>
   9 
  10 #include <linux/signal.h>
  11 #include <linux/sched.h>
  12 #include <linux/head.h>
  13 #include <linux/kernel.h>
  14 #include <linux/errno.h>
  15 #include <linux/types.h>
  16 #include <linux/malloc.h>
  17 #include <asm/segment.h>
  18 
  19 struct vm_struct {
  20         unsigned long flags;
  21         void * addr;
  22         unsigned long size;
  23         struct vm_struct * next;
  24 };
  25 
  26 static struct vm_struct * vmlist = NULL;
  27 
  28 /* Just any arbitrary offset to the start of the vmalloc VM area: the
  29  * current 8MB value just means that there will be a 8MB "hole" after the
  30  * physical memory until the kernel virtual memory starts.  That means that
  31  * any out-of-bounds memory accesses will hopefully be caught.
  32  * The vmalloc() routines leaves a hole of 4kB between each vmalloced
  33  * area for the same reason. ;)
  34  */
  35 #define VMALLOC_OFFSET  (8*1024*1024)
  36 
  37 static inline void set_pgdir(unsigned long dindex, unsigned long value)
     /* [previous][next][first][last][top][bottom][index][help] */
  38 {
  39         struct task_struct * p;
  40 
  41         p = &init_task;
  42         do {
  43                 ((unsigned long *) p->tss.cr3)[dindex] = value;
  44                 p = p->next_task;
  45         } while (p != &init_task);
  46 }
  47 
  48 static int free_area_pages(unsigned long dindex, unsigned long index, unsigned long nr)
     /* [previous][next][first][last][top][bottom][index][help] */
  49 {
  50         unsigned long page, *pte;
  51 
  52         if (!(PAGE_PRESENT & (page = swapper_pg_dir[dindex])))
  53                 return 0;
  54         page &= PAGE_MASK;
  55         pte = index + (unsigned long *) page;
  56         do {
  57                 unsigned long pg = *pte;
  58                 *pte = 0;
  59                 if (pg & PAGE_PRESENT)
  60                         free_page(pg);
  61                 pte++;
  62         } while (--nr);
  63         pte = (unsigned long *) page;
  64         for (nr = 0 ; nr < 1024 ; nr++, pte++)
  65                 if (*pte)
  66                         return 0;
  67         set_pgdir(dindex,0);
  68         mem_map[MAP_NR(page)] = 1;
  69         free_page(page);
  70         invalidate();
  71         return 0;
  72 }
  73 
  74 static int alloc_area_pages(unsigned long dindex, unsigned long index, unsigned long nr)
     /* [previous][next][first][last][top][bottom][index][help] */
  75 {
  76         unsigned long page, *pte;
  77 
  78         page = swapper_pg_dir[dindex];
  79         if (!page) {
  80                 page = get_free_page(GFP_KERNEL);
  81                 if (!page)
  82                         return -ENOMEM;
  83                 if (swapper_pg_dir[dindex]) {
  84                         free_page(page);
  85                         page = swapper_pg_dir[dindex];
  86                 } else {
  87                         mem_map[MAP_NR(page)] = MAP_PAGE_RESERVED;
  88                         set_pgdir(dindex, page | PAGE_SHARED);
  89                 }
  90         }
  91         page &= PAGE_MASK;
  92         pte = index + (unsigned long *) page;
  93         *pte = PAGE_SHARED;             /* remove a race with vfree() */
  94         do {
  95                 unsigned long pg = get_free_page(GFP_KERNEL);
  96 
  97                 if (!pg)
  98                         return -ENOMEM;
  99                 *pte = pg | PAGE_SHARED;
 100                 pte++;
 101         } while (--nr);
 102         invalidate();
 103         return 0;
 104 }
 105 
 106 static int do_area(void * addr, unsigned long size,
     /* [previous][next][first][last][top][bottom][index][help] */
 107         int (*area_fn)(unsigned long,unsigned long,unsigned long))
 108 {
 109         unsigned long nr, dindex, index;
 110 
 111         nr = size >> PAGE_SHIFT;
 112         dindex = (TASK_SIZE + (unsigned long) addr) >> 22;
 113         index = (((unsigned long) addr) >> PAGE_SHIFT) & (PTRS_PER_PAGE-1);
 114         while (nr > 0) {
 115                 unsigned long i = PTRS_PER_PAGE - index;
 116 
 117                 if (i > nr)
 118                         i = nr;
 119                 nr -= i;
 120                 if (area_fn(dindex, index, i))
 121                         return -1;
 122                 index = 0;
 123                 dindex++;
 124         }
 125         return 0;
 126 }
 127 
 128 void vfree(void * addr)
     /* [previous][next][first][last][top][bottom][index][help] */
 129 {
 130         struct vm_struct **p, *tmp;
 131 
 132         if (!addr)
 133                 return;
 134         if ((PAGE_SIZE-1) & (unsigned long) addr) {
 135                 printk("Trying to vfree() bad address (%p)\n", addr);
 136                 return;
 137         }
 138         for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) {
 139                 if (tmp->addr == addr) {
 140                         *p = tmp->next;
 141                         do_area(tmp->addr, tmp->size, free_area_pages);
 142                         kfree(tmp);
 143                         return;
 144                 }
 145         }
 146         printk("Trying to vfree() nonexistent vm area (%p)\n", addr);
 147 }
 148 
 149 void * vmalloc(unsigned long size)
     /* [previous][next][first][last][top][bottom][index][help] */
 150 {
 151         void * addr;
 152         struct vm_struct **p, *tmp, *area;
 153 
 154         size = PAGE_ALIGN(size);
 155         if (!size || size > high_memory)
 156                 return NULL;
 157         area = (struct vm_struct *) kmalloc(sizeof(*area), GFP_KERNEL);
 158         if (!area)
 159                 return NULL;
 160         addr = (void *) ((high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1));
 161         area->size = size + PAGE_SIZE;
 162         area->next = NULL;
 163         for (p = &vmlist; (tmp = *p) ; p = &tmp->next) {
 164                 if (size + (unsigned long) addr < (unsigned long) tmp->addr)
 165                         break;
 166                 addr = (void *) (tmp->size + (unsigned long) tmp->addr);
 167         }
 168         area->addr = addr;
 169         area->next = *p;
 170         *p = area;
 171         if (do_area(addr, size, alloc_area_pages)) {
 172                 vfree(addr);
 173                 return NULL;
 174         }
 175         return addr;
 176 }
 177 
 178 int vread(char *buf, char *addr, int count)
     /* [previous][next][first][last][top][bottom][index][help] */
 179 {
 180         struct vm_struct **p, *tmp;
 181         char *vaddr, *buf_start = buf;
 182         int n;
 183 
 184         for (p = &vmlist; (tmp = *p) ; p = &tmp->next) {
 185                 vaddr = (char *) tmp->addr;
 186                 while (addr < vaddr) {
 187                         if (count == 0)
 188                                 goto finished;
 189                         put_fs_byte('\0', buf++), addr++, count--;
 190                 }
 191                 n = tmp->size - PAGE_SIZE;
 192                 if (addr > vaddr)
 193                         n -= addr - vaddr;
 194                 while (--n >= 0) {
 195                         if (count == 0)
 196                                 goto finished;
 197                         put_fs_byte(*addr++, buf++), count--;
 198                 }
 199         }
 200 finished:
 201         return buf - buf_start;
 202 }

/* [previous][next][first][last][top][bottom][index][help] */