root/mm/vmalloc.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. set_pgdir
  2. free_area_pages
  3. alloc_area_pages
  4. do_area
  5. vfree
  6. vmalloc
  7. vread

   1 /*
   2  *  linux/mm/vmalloc.c
   3  *
   4  *  Copyright (C) 1993  Linus Torvalds
   5  */
   6 
   7 #include <asm/system.h>
   8 
   9 #include <linux/signal.h>
  10 #include <linux/sched.h>
  11 #include <linux/head.h>
  12 #include <linux/kernel.h>
  13 #include <linux/errno.h>
  14 #include <linux/types.h>
  15 #include <linux/malloc.h>
  16 #include <asm/segment.h>
  17 
  18 struct vm_struct {
  19         unsigned long flags;
  20         void * addr;
  21         unsigned long size;
  22         struct vm_struct * next;
  23 };
  24 
  25 static struct vm_struct * vmlist = NULL;
  26 
  27 /* Just any arbitrary offset to the start of the vmalloc VM area: the
  28  * current 8MB value just means that there will be a 8MB "hole" after the
  29  * physical memory until the kernel virtual memory starts.  That means that
  30  * any out-of-bounds memory accesses will hopefully be caught.
  31  * The vmalloc() routines leaves a hole of 4kB between each vmalloced
  32  * area for the same reason. ;)
  33  */
  34 #define VMALLOC_OFFSET  (8*1024*1024)
  35 
  36 static inline void set_pgdir(unsigned long dindex, unsigned long value)
     /* [previous][next][first][last][top][bottom][index][help] */
  37 {
  38         struct task_struct * p;
  39 
  40         p = &init_task;
  41         do {
  42                 ((unsigned long *) p->tss.cr3)[dindex] = value;
  43                 p = p->next_task;
  44         } while (p != &init_task);
  45 }
  46 
  47 static int free_area_pages(unsigned long dindex, unsigned long index, unsigned long nr)
     /* [previous][next][first][last][top][bottom][index][help] */
  48 {
  49         unsigned long page, *pte;
  50 
  51         if (!(PAGE_PRESENT & (page = swapper_pg_dir[dindex])))
  52                 return 0;
  53         page &= PAGE_MASK;
  54         pte = index + (unsigned long *) page;
  55         do {
  56                 unsigned long pg = *pte;
  57                 *pte = 0;
  58                 if (pg & PAGE_PRESENT)
  59                         free_page(pg);
  60                 pte++;
  61         } while (--nr);
  62         pte = (unsigned long *) page;
  63         for (nr = 0 ; nr < 1024 ; nr++, pte++)
  64                 if (*pte)
  65                         return 0;
  66         set_pgdir(dindex,0);
  67         mem_map[MAP_NR(page)] = 1;
  68         free_page(page);
  69         invalidate();
  70         return 0;
  71 }
  72 
  73 static int alloc_area_pages(unsigned long dindex, unsigned long index, unsigned long nr)
     /* [previous][next][first][last][top][bottom][index][help] */
  74 {
  75         unsigned long page, *pte;
  76 
  77         page = swapper_pg_dir[dindex];
  78         if (!page) {
  79                 page = get_free_page(GFP_KERNEL);
  80                 if (!page)
  81                         return -ENOMEM;
  82                 if (swapper_pg_dir[dindex]) {
  83                         free_page(page);
  84                         page = swapper_pg_dir[dindex];
  85                 } else {
  86                         mem_map[MAP_NR(page)] = MAP_PAGE_RESERVED;
  87                         set_pgdir(dindex, page | PAGE_SHARED);
  88                 }
  89         }
  90         page &= PAGE_MASK;
  91         pte = index + (unsigned long *) page;
  92         *pte = PAGE_SHARED;             /* remove a race with vfree() */
  93         do {
  94                 unsigned long pg = get_free_page(GFP_KERNEL);
  95 
  96                 if (!pg)
  97                         return -ENOMEM;
  98                 *pte = pg | PAGE_SHARED;
  99                 pte++;
 100         } while (--nr);
 101         invalidate();
 102         return 0;
 103 }
 104 
 105 static int do_area(void * addr, unsigned long size,
     /* [previous][next][first][last][top][bottom][index][help] */
 106         int (*area_fn)(unsigned long,unsigned long,unsigned long))
 107 {
 108         unsigned long nr, dindex, index;
 109 
 110         nr = size >> PAGE_SHIFT;
 111         dindex = (TASK_SIZE + (unsigned long) addr) >> 22;
 112         index = (((unsigned long) addr) >> PAGE_SHIFT) & (PTRS_PER_PAGE-1);
 113         while (nr > 0) {
 114                 unsigned long i = PTRS_PER_PAGE - index;
 115 
 116                 if (i > nr)
 117                         i = nr;
 118                 nr -= i;
 119                 if (area_fn(dindex, index, i))
 120                         return -1;
 121                 index = 0;
 122                 dindex++;
 123         }
 124         return 0;
 125 }
 126 
 127 void vfree(void * addr)
     /* [previous][next][first][last][top][bottom][index][help] */
 128 {
 129         struct vm_struct **p, *tmp;
 130 
 131         if (!addr)
 132                 return;
 133         if ((PAGE_SIZE-1) & (unsigned long) addr) {
 134                 printk("Trying to vfree() bad address (%p)\n", addr);
 135                 return;
 136         }
 137         for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) {
 138                 if (tmp->addr == addr) {
 139                         *p = tmp->next;
 140                         do_area(tmp->addr, tmp->size, free_area_pages);
 141                         kfree(tmp);
 142                         return;
 143                 }
 144         }
 145         printk("Trying to vfree() nonexistent vm area (%p)\n", addr);
 146 }
 147 
 148 void * vmalloc(unsigned long size)
     /* [previous][next][first][last][top][bottom][index][help] */
 149 {
 150         void * addr;
 151         struct vm_struct **p, *tmp, *area;
 152 
 153         size = PAGE_ALIGN(size);
 154         if (!size || size > high_memory)
 155                 return NULL;
 156         area = (struct vm_struct *) kmalloc(sizeof(*area), GFP_KERNEL);
 157         if (!area)
 158                 return NULL;
 159         addr = (void *) ((high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1));
 160         area->size = size + PAGE_SIZE;
 161         area->next = NULL;
 162         for (p = &vmlist; (tmp = *p) ; p = &tmp->next) {
 163                 if (size + (unsigned long) addr < (unsigned long) tmp->addr)
 164                         break;
 165                 addr = (void *) (tmp->size + (unsigned long) tmp->addr);
 166         }
 167         area->addr = addr;
 168         area->next = *p;
 169         *p = area;
 170         if (do_area(addr, size, alloc_area_pages)) {
 171                 vfree(addr);
 172                 return NULL;
 173         }
 174         return addr;
 175 }
 176 
 177 int vread(char *buf, char *addr, int count)
     /* [previous][next][first][last][top][bottom][index][help] */
 178 {
 179         struct vm_struct **p, *tmp;
 180         char *vaddr, *buf_start = buf;
 181         int n;
 182 
 183         for (p = &vmlist; (tmp = *p) ; p = &tmp->next) {
 184                 vaddr = (char *) tmp->addr;
 185                 while (addr < vaddr) {
 186                         if (count == 0)
 187                                 goto finished;
 188                         put_fs_byte('\0', buf++), addr++, count--;
 189                 }
 190                 n = tmp->size - PAGE_SIZE;
 191                 if (addr > vaddr)
 192                         n -= addr - vaddr;
 193                 while (--n >= 0) {
 194                         if (count == 0)
 195                                 goto finished;
 196                         put_fs_byte(*addr++, buf++), count--;
 197                 }
 198         }
 199 finished:
 200         return buf - buf_start;
 201 }

/* [previous][next][first][last][top][bottom][index][help] */