root/mm/vmalloc.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. set_pgdir
  2. free_area_pages
  3. alloc_area_pages
  4. do_area
  5. vfree
  6. vmalloc
  7. vread

   1 /*
   2  *  linux/mm/vmalloc.c
   3  *
   4  *  Copyright (C) 1993  Linus Torvalds
   5  */
   6 
   7 #include <asm/system.h>
   8 #include <linux/config.h>
   9 
  10 #include <linux/signal.h>
  11 #include <linux/sched.h>
  12 #include <linux/head.h>
  13 #include <linux/kernel.h>
  14 #include <linux/errno.h>
  15 #include <linux/types.h>
  16 #include <linux/malloc.h>
  17 #include <asm/segment.h>
  18 
  19 struct vm_struct {
  20         unsigned long flags;
  21         void * addr;
  22         unsigned long size;
  23         struct vm_struct * next;
  24 };
  25 
  26 static struct vm_struct * vmlist = NULL;
  27 
  28 /* Just any arbitrary offset to the start of the vmalloc VM area: the
  29  * current 8MB value just means that there will be a 8MB "hole" after the
  30  * physical memory until the kernel virtual memory starts.  That means that
  31  * any out-of-bounds memory accesses will hopefully be caught.
  32  * The vmalloc() routines leaves a hole of 4kB between each vmalloced
  33  * area for the same reason. ;)
  34  */
  35 #define VMALLOC_OFFSET  (8*1024*1024)
  36 
  37 static inline void set_pgdir(unsigned long dindex, unsigned long value)
     /* [previous][next][first][last][top][bottom][index][help] */
  38 {
  39         struct task_struct * p;
  40 
  41         p = &init_task;
  42         do {
  43                 ((unsigned long *) p->tss.cr3)[dindex] = value;
  44                 p = p->next_task;
  45         } while (p != &init_task);
  46 }
  47 
  48 static int free_area_pages(unsigned long dindex, unsigned long index, unsigned long nr)
     /* [previous][next][first][last][top][bottom][index][help] */
  49 {
  50         unsigned long page, *pte;
  51 
  52         if (!(PAGE_PRESENT & (page = swapper_pg_dir[dindex])))
  53                 return 0;
  54         page &= PAGE_MASK;
  55         pte = index + (unsigned long *) page;
  56         for ( ; nr > 0 ; nr--, pte++) {
  57                 unsigned long pg = *pte;
  58                 *pte = 0;
  59                 if (!(pg & PAGE_PRESENT))
  60                         continue;
  61                 free_page(pg);
  62         }
  63         pte = (unsigned long *) page;
  64         for (nr = 0 ; nr < 1024 ; nr++, pte++)
  65                 if (*pte)
  66                         return 0;
  67         set_pgdir(dindex,0);
  68         mem_map[MAP_NR(page)] &= ~MAP_PAGE_RESERVED;
  69         free_page(page);
  70         return 0;
  71 }
  72 
  73 static int alloc_area_pages(unsigned long dindex, unsigned long index, unsigned long nr)
     /* [previous][next][first][last][top][bottom][index][help] */
  74 {
  75         unsigned long page, *pte;
  76 
  77         page = swapper_pg_dir[dindex];
  78         if (!page) {
  79                 page = get_free_page(GFP_KERNEL);
  80                 if (!page)
  81                         return -ENOMEM;
  82                 if (swapper_pg_dir[dindex]) {
  83                         free_page(page);
  84                         page = swapper_pg_dir[dindex];
  85                 } else {
  86                         mem_map[MAP_NR(page)] |= MAP_PAGE_RESERVED;
  87                         set_pgdir(dindex, page | PAGE_SHARED);
  88                 }
  89         }
  90         page &= PAGE_MASK;
  91         pte = index + (unsigned long *) page;
  92         *pte = PAGE_SHARED;             /* remove a race with vfree() */
  93         for ( ; nr > 0 ; nr--, pte++) {
  94                 unsigned long pg = get_free_page(GFP_KERNEL);
  95 
  96                 if (!pg)
  97                         return -ENOMEM;
  98                 *pte = pg | PAGE_SHARED;
  99         }
 100         return 0;
 101 }
 102 
 103 static int do_area(void * addr, unsigned long size,
     /* [previous][next][first][last][top][bottom][index][help] */
 104         int (*area_fn)(unsigned long,unsigned long,unsigned long))
 105 {
 106         unsigned long nr, dindex, index;
 107 
 108         nr = size >> PAGE_SHIFT;
 109         dindex = (TASK_SIZE + (unsigned long) addr) >> 22;
 110         index = (((unsigned long) addr) >> PAGE_SHIFT) & (PTRS_PER_PAGE-1);
 111         while (nr > 0) {
 112                 unsigned long i = PTRS_PER_PAGE - index;
 113 
 114                 if (i > nr)
 115                         i = nr;
 116                 if (area_fn(dindex, index, i))
 117                         return -1;
 118                 nr -= i;
 119                 index = 0;
 120                 dindex++;
 121         }
 122         return 0;
 123 }
 124 
 125 void vfree(void * addr)
     /* [previous][next][first][last][top][bottom][index][help] */
 126 {
 127         struct vm_struct **p, *tmp;
 128 
 129         if (!addr)
 130                 return;
 131         if ((PAGE_SIZE-1) & (unsigned long) addr) {
 132                 printk("Trying to vfree() bad address (%p)\n", addr);
 133                 return;
 134         }
 135         for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) {
 136                 if (tmp->addr == addr) {
 137                         *p = tmp->next;
 138                         do_area(tmp->addr, tmp->size, free_area_pages);
 139                         kfree(tmp);
 140                         return;
 141                 }
 142         }
 143         printk("Trying to vfree() nonexistent vm area (%p)\n", addr);
 144 }
 145 
 146 void * vmalloc(unsigned long size)
     /* [previous][next][first][last][top][bottom][index][help] */
 147 {
 148         void * addr;
 149         struct vm_struct **p, *tmp, *area;
 150 
 151         size = PAGE_ALIGN(size);
 152         if (!size || size > high_memory)
 153                 return NULL;
 154         area = (struct vm_struct *) kmalloc(sizeof(*area), GFP_KERNEL);
 155         if (!area)
 156                 return NULL;
 157         addr = (void *) ((high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1));
 158         area->size = size + PAGE_SIZE;
 159         area->next = NULL;
 160         for (p = &vmlist; (tmp = *p) ; p = &tmp->next) {
 161                 if (size + (unsigned long) addr <= (unsigned long) tmp->addr)
 162                         break;
 163                 addr = (void *) (tmp->size + (unsigned long) tmp->addr);
 164         }
 165         area->addr = addr;
 166         area->next = *p;
 167         *p = area;
 168         if (do_area(addr, size, alloc_area_pages)) {
 169                 vfree(addr);
 170                 return NULL;
 171         }
 172         return addr;
 173 }
 174 
 175 int vread(char *buf, char *addr, int count)
     /* [previous][next][first][last][top][bottom][index][help] */
 176 {
 177         struct vm_struct **p, *tmp;
 178         char *vaddr, *buf_start = buf;
 179         int n;
 180 
 181         for (p = &vmlist; (tmp = *p) ; p = &tmp->next) {
 182                 vaddr = (char *) tmp->addr;
 183                 while (addr < vaddr) {
 184                         if (count == 0)
 185                                 goto finished;
 186                         put_fs_byte('\0', buf++), addr++, count--;
 187                 }
 188                 n = tmp->size - PAGE_SIZE;
 189                 if (addr > vaddr)
 190                         n -= addr - vaddr;
 191                 while (--n >= 0) {
 192                         if (count == 0)
 193                                 goto finished;
 194                         put_fs_byte(*addr++, buf++), count--;
 195                 }
 196         }
 197 finished:
 198         return buf - buf_start;
 199 }

/* [previous][next][first][last][top][bottom][index][help] */