root/mm/kmalloc.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. kmalloc_init
  2. get_order
  3. kmalloc
  4. kfree

   1 /*
   2  *  linux/mm/kmalloc.c
   3  *
   4  *  Copyright (C) 1991, 1992  Linus Torvalds & Roger Wolff.
   5  *
   6  *  Written by R.E. Wolff Sept/Oct '93.
   7  *
   8  */
   9 
  10 /*
  11  * Modified by Alex Bligh (alex@cconcepts.co.uk) 4 Apr 1994 to use multiple
  12  * pages. So for 'page' throughout, read 'area'.
  13  */
  14 
  15 #include <linux/mm.h>
  16 #include <linux/delay.h>
  17 #include <asm/system.h>
  18 #include <asm/dma.h>
  19 
  20 /* I want this low enough for a while to catch errors.
  21    I want this number to be increased in the near future:
  22         loadable device drivers should use this function to get memory */
  23 
  24 #define MAX_KMALLOC_K ((PAGE_SIZE<<(NUM_AREA_ORDERS-1))>>10)
  25 
  26 
  27 /* This defines how many times we should try to allocate a free page before
  28    giving up. Normally this shouldn't happen at all. */
  29 #define MAX_GET_FREE_PAGE_TRIES 4
  30 
  31 
  32 /* Private flags. */
  33 
  34 #define MF_USED 0xffaa0055
  35 #define MF_DMA  0xff00aa55
  36 #define MF_FREE 0x0055ffaa
  37 
  38 
  39 /*
  40  * Much care has gone into making these routines in this file reentrant.
  41  *
  42  * The fancy bookkeeping of nbytesmalloced and the like are only used to
  43  * report them to the user (oooohhhhh, aaaaahhhhh....) are not
  44  * protected by cli(). (If that goes wrong. So what?)
  45  *
  46  * These routines restore the interrupt status to allow calling with ints
  47  * off.
  48  */
  49 
  50 /*
  51  * A block header. This is in front of every malloc-block, whether free or not.
  52  */
  53 struct block_header {
  54         unsigned long bh_flags;
  55         union {
  56                 unsigned long ubh_length;
  57                 struct block_header *fbh_next;
  58         } vp;
  59 };
  60 
  61 
  62 #define bh_length vp.ubh_length
  63 #define bh_next   vp.fbh_next
  64 #define BH(p) ((struct block_header *)(p))
  65 
  66 
  67 /*
  68  * The page descriptor is at the front of every page that malloc has in use.
  69  */
  70 struct page_descriptor {
  71         struct page_descriptor *next;
  72         struct block_header *firstfree;
  73         int order;
  74         int nfree;
  75 };
  76 
  77 
  78 #define PAGE_DESC(p) ((struct page_descriptor *)(((unsigned long)(p)) & PAGE_MASK))
  79 
  80 
  81 /*
  82  * A size descriptor describes a specific class of malloc sizes.
  83  * Each class of sizes has its own freelist.
  84  */
  85 struct size_descriptor {
  86         struct page_descriptor *firstfree;
  87         struct page_descriptor *dmafree;        /* DMA-able memory */
  88         int size;
  89         int nblocks;
  90 
  91         int nmallocs;
  92         int nfrees;
  93         int nbytesmalloced;
  94         int npages;
  95         unsigned long gfporder; /* number of pages in the area required */
  96 };
  97 
  98 /*
  99  * For now it is unsafe to allocate bucket sizes between n & n=16 where n is
 100  * 4096 * any power of two
 101  */
 102 #if PAGE_SIZE == 4096
 103 struct size_descriptor sizes[] =
 104 {
 105         {NULL, NULL, 32, 127, 0, 0, 0, 0, 0},
 106         {NULL, NULL, 64, 63, 0, 0, 0, 0, 0},
 107         {NULL, NULL, 128, 31, 0, 0, 0, 0, 0},
 108         {NULL, NULL, 252, 16, 0, 0, 0, 0, 0},
 109         {NULL, NULL, 508, 8, 0, 0, 0, 0, 0},
 110         {NULL, NULL, 1020, 4, 0, 0, 0, 0, 0},
 111         {NULL, NULL, 2040, 2, 0, 0, 0, 0, 0},
 112         {NULL, NULL, 4096 - 16, 1, 0, 0, 0, 0, 0},
 113         {NULL, NULL, 8192 - 16, 1, 0, 0, 0, 0, 1},
 114         {NULL, NULL, 16384 - 16, 1, 0, 0, 0, 0, 2},
 115         {NULL, NULL, 32768 - 16, 1, 0, 0, 0, 0, 3},
 116         {NULL, NULL, 65536 - 16, 1, 0, 0, 0, 0, 4},
 117         {NULL, NULL, 131072 - 16, 1, 0, 0, 0, 0, 5},
 118         {NULL, NULL, 0, 0, 0, 0, 0, 0, 0}
 119 };
 120 #elif PAGE_SIZE == 8192
 121 struct size_descriptor sizes[] =
 122 {
 123         {NULL, NULL, 64, 127, 0, 0, 0, 0, 0},
 124         {NULL, NULL, 128, 63, 0, 0, 0, 0, 0},
 125         {NULL, NULL, 248, 31, 0, 0, 0, 0, 0},
 126         {NULL, NULL, 504, 16, 0, 0, 0, 0, 0},
 127         {NULL, NULL, 1016, 8, 0, 0, 0, 0, 0},
 128         {NULL, NULL, 2040, 4, 0, 0, 0, 0, 0},
 129         {NULL, NULL, 4080, 2, 0, 0, 0, 0, 0},
 130         {NULL, NULL, 8192 - 32, 1, 0, 0, 0, 0, 0},
 131         {NULL, NULL, 16384 - 32, 1, 0, 0, 0, 0, 1},
 132         {NULL, NULL, 32768 - 32, 1, 0, 0, 0, 0, 2},
 133         {NULL, NULL, 65536 - 32, 1, 0, 0, 0, 0, 3},
 134         {NULL, NULL, 131072 - 32, 1, 0, 0, 0, 0, 4},
 135         {NULL, NULL, 262144 - 32, 1, 0, 0, 0, 0, 5},
 136         {NULL, NULL, 0, 0, 0, 0, 0, 0, 0}
 137 };
 138 #else
 139 #error you need to make a version for your pagesize
 140 #endif
 141 
 142 #define NBLOCKS(order)          (sizes[order].nblocks)
 143 #define BLOCKSIZE(order)        (sizes[order].size)
 144 #define AREASIZE(order)         (PAGE_SIZE<<(sizes[order].gfporder))
 145 
 146 
 147 long kmalloc_init(long start_mem, long end_mem)
     /* [previous][next][first][last][top][bottom][index][help] */
 148 {
 149         int order;
 150 
 151 /*
 152  * Check the static info array. Things will blow up terribly if it's
 153  * incorrect. This is a late "compile time" check.....
 154  */
 155         for (order = 0; BLOCKSIZE(order); order++) {
 156                 if ((NBLOCKS(order) * BLOCKSIZE(order) + sizeof(struct page_descriptor)) >
 157                     AREASIZE(order)) {
 158                         printk("Cannot use %d bytes out of %d in order = %d block mallocs\n",
 159                                (int) (NBLOCKS(order) * BLOCKSIZE(order) +
 160                                       sizeof(struct page_descriptor)),
 161                                 (int) AREASIZE(order),
 162                                BLOCKSIZE(order));
 163                         panic("This only happens if someone messes with kmalloc");
 164                 }
 165         }
 166         return start_mem;
 167 }
 168 
 169 
 170 
 171 int get_order(int size)
     /* [previous][next][first][last][top][bottom][index][help] */
 172 {
 173         int order;
 174 
 175         /* Add the size of the header */
 176         size += sizeof(struct block_header);
 177         for (order = 0; BLOCKSIZE(order); order++)
 178                 if (size <= BLOCKSIZE(order))
 179                         return order;
 180         return -1;
 181 }
 182 
 183 void *kmalloc(size_t size, int priority)
     /* [previous][next][first][last][top][bottom][index][help] */
 184 {
 185         unsigned long flags;
 186         unsigned long max_addr, type;
 187         int order, i, sz;
 188         struct block_header *p;
 189         struct page_descriptor *page, **pg;
 190 
 191         order = get_order(size);
 192         if (order < 0) {
 193                 printk("kmalloc of too large a block (%d bytes).\n", (int) size);
 194                 return (NULL);
 195         }
 196 
 197         max_addr = ~0UL;
 198         type = MF_USED;
 199         pg = &sizes[order].firstfree;
 200         if (priority & GFP_DMA) {
 201                 max_addr = MAX_DMA_ADDRESS;
 202                 type = MF_DMA;
 203                 pg = &sizes[order].dmafree;
 204         }
 205 
 206         priority &= GFP_LEVEL_MASK;
 207 
 208 /* Sanity check... */
 209         if (intr_count && priority != GFP_ATOMIC) {
 210                 static int count = 0;
 211                 if (++count < 5) {
 212                         printk("kmalloc called nonatomically from interrupt %p\n",
 213                                __builtin_return_address(0));
 214                         priority = GFP_ATOMIC;
 215                 }
 216         }
 217 
 218         save_flags(flags);
 219         cli();
 220         page = *pg;
 221         if (page) {
 222                 p = page->firstfree;
 223                 if (p->bh_flags != MF_FREE) {
 224                         restore_flags(flags);
 225                         printk("Problem: block on freelist at %08lx isn't free.\n", (long) p);
 226                         return NULL;
 227                 }
 228                 goto found_it;
 229         }
 230 
 231         /* We need to get a new free page..... */
 232 
 233         /* sz is the size of the blocks we're dealing with */
 234         sz = BLOCKSIZE(order);
 235 
 236         /* This can be done with ints on: This is private to this invocation */
 237         page = (struct page_descriptor *) __get_free_pages(priority & GFP_LEVEL_MASK,
 238                         sizes[order].gfporder, max_addr);
 239 
 240         if (!page) {
 241                 static unsigned long last = 0;
 242                 if (last + 10 * HZ < jiffies) {
 243                         last = jiffies;
 244                         printk("Couldn't get a free page.....\n");
 245                 }
 246                 return NULL;
 247         }
 248         sizes[order].npages++;
 249 
 250         /* Loop for all but last block: */
 251         for (i = NBLOCKS(order), p = BH(page + 1); i > 1; i--, p = p->bh_next) {
 252                 p->bh_flags = MF_FREE;
 253                 p->bh_next = BH(((long) p) + sz);
 254         }
 255         /* Last block: */
 256         p->bh_flags = MF_FREE;
 257         p->bh_next = NULL;
 258 
 259         page->order = order;
 260         page->nfree = NBLOCKS(order);
 261         p = BH(page+1);
 262 
 263         /*
 264          * Now we're going to muck with the "global" freelist
 265          * for this size: this should be uninterruptible
 266          */
 267         cli();
 268         page->next = *pg;
 269         *pg = page;
 270 
 271 found_it:
 272         page->firstfree = p->bh_next;
 273         page->nfree--;
 274         if (!page->nfree)
 275                 *pg = page->next;
 276         restore_flags(flags);
 277         sizes[order].nmallocs++;
 278         sizes[order].nbytesmalloced += size;
 279         p->bh_flags = type;     /* As of now this block is officially in use */
 280         p->bh_length = size;
 281         return p + 1;           /* Pointer arithmetic: increments past header */
 282 }
 283 
 284 void kfree(void *ptr)
     /* [previous][next][first][last][top][bottom][index][help] */
 285 {
 286         int size;
 287         unsigned long flags;
 288         int order;
 289         register struct block_header *p;
 290         struct page_descriptor *page, **pg;
 291 
 292         if (!ptr)
 293                 return;
 294         p = ((struct block_header *) ptr) - 1;
 295         page = PAGE_DESC(p);
 296         order = page->order;
 297         pg = &sizes[order].firstfree;
 298         if (p->bh_flags == MF_DMA) {
 299                 p->bh_flags = MF_USED;
 300                 pg = &sizes[order].dmafree;
 301         }
 302 
 303         if ((order < 0) ||
 304             (order > sizeof(sizes) / sizeof(sizes[0])) ||
 305             (((long) (page->next)) & ~PAGE_MASK) ||
 306             (p->bh_flags != MF_USED)) {
 307                 printk("kfree of non-kmalloced memory: %p, next= %p, order=%d\n",
 308                        p, page->next, page->order);
 309                 return;
 310         }
 311         size = p->bh_length;
 312         p->bh_flags = MF_FREE;  /* As of now this block is officially free */
 313         save_flags(flags);
 314         cli();
 315         p->bh_next = page->firstfree;
 316         page->firstfree = p;
 317         page->nfree++;
 318 
 319         if (page->nfree == 1) {
 320 /* Page went from full to one free block: put it on the freelist. */
 321                 page->next = *pg;
 322                 *pg = page;
 323         }
 324 /* If page is completely free, free it */
 325         if (page->nfree == NBLOCKS(order)) {
 326                 for (;;) {
 327                         struct page_descriptor *tmp = *pg;
 328                         if (!tmp) {
 329                                 printk("Ooops. page %p doesn't show on freelist.\n", page);
 330                                 break;
 331                         }
 332                         if (tmp == page) {
 333                                 *pg = page->next;
 334                                 break;
 335                         }
 336                         pg = &tmp->next;
 337                 }
 338                 sizes[order].npages--;
 339                 free_pages((long) page, sizes[order].gfporder);
 340         }
 341         sizes[order].nfrees++;
 342         sizes[order].nbytesmalloced -= size;
 343         restore_flags(flags);
 344 }

/* [previous][next][first][last][top][bottom][index][help] */