root/mm/vmalloc.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. set_pgdir
  2. free_area_pages
  3. alloc_area_pages
  4. do_area
  5. vfree
  6. vmalloc
  7. vread

   1 /*
   2  *  linux/mm/vmalloc.c
   3  *
   4  *  Copyright (C) 1993  Linus Torvalds
   5  */
   6 
   7 #include <asm/system.h>
   8 #include <linux/config.h>
   9 
  10 #include <linux/signal.h>
  11 #include <linux/sched.h>
  12 #include <linux/head.h>
  13 #include <linux/kernel.h>
  14 #include <linux/errno.h>
  15 #include <linux/types.h>
  16 #include <linux/malloc.h>
  17 #include <asm/segment.h>
  18 
  19 struct vm_struct {
  20         unsigned long flags;
  21         void * addr;
  22         unsigned long size;
  23         struct vm_struct * next;
  24 };
  25 
  26 static struct vm_struct * vmlist = NULL;
  27 
  28 /* Just any arbitrary offset to the start of the vmalloc VM area: the
  29  * current 8MB value just means that there will be a 8MB "hole" after the
  30  * physical memory until the kernel virtual memory starts.  That means that
  31  * any out-of-bounds memory accesses will hopefully be caught.
  32  * The vmalloc() routines leaves a hole of 4kB between each vmalloced
  33  * area for the same reason. ;)
  34  */
  35 #define VMALLOC_OFFSET  (8*1024*1024)
  36 
  37 static inline void set_pgdir(unsigned long dindex, unsigned long value)
     /* [previous][next][first][last][top][bottom][index][help] */
  38 {
  39         struct task_struct * p;
  40 
  41         p = &init_task;
  42         do {
  43                 ((unsigned long *) p->tss.cr3)[dindex] = value;
  44                 p = p->next_task;
  45         } while (p != &init_task);
  46 }
  47 
  48 static int free_area_pages(unsigned long dindex, unsigned long index, unsigned long nr)
     /* [previous][next][first][last][top][bottom][index][help] */
  49 {
  50         unsigned long page, *pte;
  51 
  52         if (!(PAGE_PRESENT & (page = swapper_pg_dir[dindex])))
  53                 return 0;
  54         page &= PAGE_MASK;
  55         pte = index + (unsigned long *) page;
  56         do {
  57                 unsigned long pg = *pte;
  58                 *pte = 0;
  59                 if (pg & PAGE_PRESENT)
  60                         free_page(pg);
  61                 pte++;
  62         } while (--nr);
  63         pte = (unsigned long *) page;
  64         for (nr = 0 ; nr < 1024 ; nr++, pte++)
  65                 if (*pte)
  66                         return 0;
  67         set_pgdir(dindex,0);
  68         mem_map[MAP_NR(page)] = 1;
  69         free_page(page);
  70         return 0;
  71 }
  72 
  73 static int alloc_area_pages(unsigned long dindex, unsigned long index, unsigned long nr)
     /* [previous][next][first][last][top][bottom][index][help] */
  74 {
  75         unsigned long page, *pte;
  76 
  77         page = swapper_pg_dir[dindex];
  78         if (!page) {
  79                 page = get_free_page(GFP_KERNEL);
  80                 if (!page)
  81                         return -ENOMEM;
  82                 if (swapper_pg_dir[dindex]) {
  83                         free_page(page);
  84                         page = swapper_pg_dir[dindex];
  85                 } else {
  86                         mem_map[MAP_NR(page)] = MAP_PAGE_RESERVED;
  87                         set_pgdir(dindex, page | PAGE_SHARED);
  88                 }
  89         }
  90         page &= PAGE_MASK;
  91         pte = index + (unsigned long *) page;
  92         *pte = PAGE_SHARED;             /* remove a race with vfree() */
  93         do {
  94                 unsigned long pg = get_free_page(GFP_KERNEL);
  95 
  96                 if (!pg)
  97                         return -ENOMEM;
  98                 *pte = pg | PAGE_SHARED;
  99                 pte++;
 100         } while (--nr);
 101         return 0;
 102 }
 103 
 104 static int do_area(void * addr, unsigned long size,
     /* [previous][next][first][last][top][bottom][index][help] */
 105         int (*area_fn)(unsigned long,unsigned long,unsigned long))
 106 {
 107         unsigned long nr, dindex, index;
 108 
 109         nr = size >> PAGE_SHIFT;
 110         dindex = (TASK_SIZE + (unsigned long) addr) >> 22;
 111         index = (((unsigned long) addr) >> PAGE_SHIFT) & (PTRS_PER_PAGE-1);
 112         while (nr > 0) {
 113                 unsigned long i = PTRS_PER_PAGE - index;
 114 
 115                 if (i > nr)
 116                         i = nr;
 117                 nr -= i;
 118                 if (area_fn(dindex, index, i))
 119                         return -1;
 120                 index = 0;
 121                 dindex++;
 122         }
 123         return 0;
 124 }
 125 
 126 void vfree(void * addr)
     /* [previous][next][first][last][top][bottom][index][help] */
 127 {
 128         struct vm_struct **p, *tmp;
 129 
 130         if (!addr)
 131                 return;
 132         if ((PAGE_SIZE-1) & (unsigned long) addr) {
 133                 printk("Trying to vfree() bad address (%p)\n", addr);
 134                 return;
 135         }
 136         for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) {
 137                 if (tmp->addr == addr) {
 138                         *p = tmp->next;
 139                         do_area(tmp->addr, tmp->size, free_area_pages);
 140                         kfree(tmp);
 141                         return;
 142                 }
 143         }
 144         printk("Trying to vfree() nonexistent vm area (%p)\n", addr);
 145 }
 146 
 147 void * vmalloc(unsigned long size)
     /* [previous][next][first][last][top][bottom][index][help] */
 148 {
 149         void * addr;
 150         struct vm_struct **p, *tmp, *area;
 151 
 152         size = PAGE_ALIGN(size);
 153         if (!size || size > high_memory)
 154                 return NULL;
 155         area = (struct vm_struct *) kmalloc(sizeof(*area), GFP_KERNEL);
 156         if (!area)
 157                 return NULL;
 158         addr = (void *) ((high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1));
 159         area->size = size + PAGE_SIZE;
 160         area->next = NULL;
 161         for (p = &vmlist; (tmp = *p) ; p = &tmp->next) {
 162                 if (size + (unsigned long) addr < (unsigned long) tmp->addr)
 163                         break;
 164                 addr = (void *) (tmp->size + (unsigned long) tmp->addr);
 165         }
 166         area->addr = addr;
 167         area->next = *p;
 168         *p = area;
 169         if (do_area(addr, size, alloc_area_pages)) {
 170                 vfree(addr);
 171                 return NULL;
 172         }
 173         return addr;
 174 }
 175 
 176 int vread(char *buf, char *addr, int count)
     /* [previous][next][first][last][top][bottom][index][help] */
 177 {
 178         struct vm_struct **p, *tmp;
 179         char *vaddr, *buf_start = buf;
 180         int n;
 181 
 182         for (p = &vmlist; (tmp = *p) ; p = &tmp->next) {
 183                 vaddr = (char *) tmp->addr;
 184                 while (addr < vaddr) {
 185                         if (count == 0)
 186                                 goto finished;
 187                         put_fs_byte('\0', buf++), addr++, count--;
 188                 }
 189                 n = tmp->size - PAGE_SIZE;
 190                 if (addr > vaddr)
 191                         n -= addr - vaddr;
 192                 while (--n >= 0) {
 193                         if (count == 0)
 194                                 goto finished;
 195                         put_fs_byte(*addr++, buf++), count--;
 196                 }
 197         }
 198 finished:
 199         return buf - buf_start;
 200 }

/* [previous][next][first][last][top][bottom][index][help] */