root/mm/kmalloc.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. kmalloc_init
  2. get_order
  3. kmalloc
  4. kfree

   1 /*
   2  *  linux/mm/kmalloc.c
   3  *
   4  *  Copyright (C) 1991, 1992  Linus Torvalds & Roger Wolff.
   5  *
   6  *  Written by R.E. Wolff Sept/Oct '93.
   7  *
   8  */
   9 
  10 /*
  11  * Modified by Alex Bligh (alex@cconcepts.co.uk) 4 Apr 1994 to use multiple
  12  * pages. So for 'page' throughout, read 'area'.
  13  *
  14  * Largely rewritten.. Linus
  15  */
  16 
  17 #include <linux/mm.h>
  18 #include <linux/delay.h>
  19 #include <asm/system.h>
  20 #include <asm/dma.h>
  21 
  22 /* Define this is you want slow routines that try to trip errors */
  23 #undef SADISTIC_KMALLOC
  24 
  25 /* Private flags. */
  26 
  27 #define MF_USED 0xffaa0055
  28 #define MF_DMA  0xff00aa55
  29 #define MF_FREE 0x0055ffaa
  30 
  31 
  32 /*
  33  * Much care has gone into making these routines in this file reentrant.
  34  *
  35  * The fancy bookkeeping of nbytesmalloced and the like are only used to
  36  * report them to the user (oooohhhhh, aaaaahhhhh....) are not
  37  * protected by cli(). (If that goes wrong. So what?)
  38  *
  39  * These routines restore the interrupt status to allow calling with ints
  40  * off.
  41  */
  42 
  43 /*
  44  * A block header. This is in front of every malloc-block, whether free or not.
  45  */
  46 struct block_header {
  47         unsigned long bh_flags;
  48         union {
  49                 unsigned long ubh_length;
  50                 struct block_header *fbh_next;
  51         } vp;
  52 };
  53 
  54 
  55 #define bh_length vp.ubh_length
  56 #define bh_next   vp.fbh_next
  57 #define BH(p) ((struct block_header *)(p))
  58 
  59 
  60 /*
  61  * The page descriptor is at the front of every page that malloc has in use.
  62  */
  63 struct page_descriptor {
  64         struct page_descriptor *next;
  65         struct block_header *firstfree;
  66         int order;
  67         int nfree;
  68 };
  69 
  70 
  71 #define PAGE_DESC(p) ((struct page_descriptor *)(((unsigned long)(p)) & PAGE_MASK))
  72 
  73 
  74 /*
  75  * A size descriptor describes a specific class of malloc sizes.
  76  * Each class of sizes has its own freelist.
  77  */
  78 struct size_descriptor {
  79         struct page_descriptor *firstfree;
  80         struct page_descriptor *dmafree;        /* DMA-able memory */
  81         int size;
  82         int nblocks;
  83 
  84         int nmallocs;
  85         int nfrees;
  86         int nbytesmalloced;
  87         int npages;
  88         unsigned long gfporder; /* number of pages in the area required */
  89 };
  90 
  91 /*
  92  * For now it is unsafe to allocate bucket sizes between n and n-16 where n is
  93  * 4096 * any power of two
  94  */
  95 #if PAGE_SIZE == 4096
  96 struct size_descriptor sizes[] =
  97 {
  98         {NULL, NULL, 32, 127, 0, 0, 0, 0, 0},
  99         {NULL, NULL, 64, 63, 0, 0, 0, 0, 0},
 100         {NULL, NULL, 128, 31, 0, 0, 0, 0, 0},
 101         {NULL, NULL, 252, 16, 0, 0, 0, 0, 0},
 102         {NULL, NULL, 508, 8, 0, 0, 0, 0, 0},
 103         {NULL, NULL, 1020, 4, 0, 0, 0, 0, 0},
 104         {NULL, NULL, 2040, 2, 0, 0, 0, 0, 0},
 105         {NULL, NULL, 4096 - 16, 1, 0, 0, 0, 0, 0},
 106         {NULL, NULL, 8192 - 16, 1, 0, 0, 0, 0, 1},
 107         {NULL, NULL, 16384 - 16, 1, 0, 0, 0, 0, 2},
 108         {NULL, NULL, 32768 - 16, 1, 0, 0, 0, 0, 3},
 109         {NULL, NULL, 65536 - 16, 1, 0, 0, 0, 0, 4},
 110         {NULL, NULL, 131072 - 16, 1, 0, 0, 0, 0, 5},
 111         {NULL, NULL, 0, 0, 0, 0, 0, 0, 0}
 112 };
 113 #elif PAGE_SIZE == 8192
 114 struct size_descriptor sizes[] =
 115 {
 116         {NULL, NULL, 64, 127, 0, 0, 0, 0, 0},
 117         {NULL, NULL, 128, 63, 0, 0, 0, 0, 0},
 118         {NULL, NULL, 248, 31, 0, 0, 0, 0, 0},
 119         {NULL, NULL, 504, 16, 0, 0, 0, 0, 0},
 120         {NULL, NULL, 1016, 8, 0, 0, 0, 0, 0},
 121         {NULL, NULL, 2040, 4, 0, 0, 0, 0, 0},
 122         {NULL, NULL, 4080, 2, 0, 0, 0, 0, 0},
 123         {NULL, NULL, 8192 - 32, 1, 0, 0, 0, 0, 0},
 124         {NULL, NULL, 16384 - 32, 1, 0, 0, 0, 0, 1},
 125         {NULL, NULL, 32768 - 32, 1, 0, 0, 0, 0, 2},
 126         {NULL, NULL, 65536 - 32, 1, 0, 0, 0, 0, 3},
 127         {NULL, NULL, 131072 - 32, 1, 0, 0, 0, 0, 4},
 128         {NULL, NULL, 262144 - 32, 1, 0, 0, 0, 0, 5},
 129         {NULL, NULL, 0, 0, 0, 0, 0, 0, 0}
 130 };
 131 #else
 132 #error you need to make a version for your pagesize
 133 #endif
 134 
 135 #define NBLOCKS(order)          (sizes[order].nblocks)
 136 #define BLOCKSIZE(order)        (sizes[order].size)
 137 #define AREASIZE(order)         (PAGE_SIZE<<(sizes[order].gfporder))
 138 
 139 
 140 long kmalloc_init(long start_mem, long end_mem)
     /* [previous][next][first][last][top][bottom][index][help] */
 141 {
 142         int order;
 143 
 144 /*
 145  * Check the static info array. Things will blow up terribly if it's
 146  * incorrect. This is a late "compile time" check.....
 147  */
 148         for (order = 0; BLOCKSIZE(order); order++) {
 149                 if ((NBLOCKS(order) * BLOCKSIZE(order) + sizeof(struct page_descriptor)) >
 150                     AREASIZE(order)) {
 151                         printk("Cannot use %d bytes out of %d in order = %d block mallocs\n",
 152                                (int) (NBLOCKS(order) * BLOCKSIZE(order) +
 153                                       sizeof(struct page_descriptor)),
 154                                 (int) AREASIZE(order),
 155                                BLOCKSIZE(order));
 156                         panic("This only happens if someone messes with kmalloc");
 157                 }
 158         }
 159         return start_mem;
 160 }
 161 
 162 
 163 
 164 int get_order(int size)
     /* [previous][next][first][last][top][bottom][index][help] */
 165 {
 166         int order;
 167 
 168         /* Add the size of the header */
 169         size += sizeof(struct block_header);
 170         for (order = 0; BLOCKSIZE(order); order++)
 171                 if (size <= BLOCKSIZE(order))
 172                         return order;
 173         return -1;
 174 }
 175 
 176 void *kmalloc(size_t size, int priority)
     /* [previous][next][first][last][top][bottom][index][help] */
 177 {
 178         unsigned long flags;
 179         unsigned long max_addr, type;
 180         int order, i, sz;
 181         struct block_header *p;
 182         struct page_descriptor *page, **pg;
 183 
 184         order = get_order(size);
 185         if (order < 0) {
 186                 printk("kmalloc of too large a block (%d bytes).\n", (int) size);
 187                 return (NULL);
 188         }
 189 
 190         max_addr = ~0UL;
 191         type = MF_USED;
 192         pg = &sizes[order].firstfree;
 193         if (priority & GFP_DMA) {
 194                 max_addr = MAX_DMA_ADDRESS;
 195                 type = MF_DMA;
 196                 pg = &sizes[order].dmafree;
 197         }
 198 
 199         priority &= GFP_LEVEL_MASK;
 200 
 201 /* Sanity check... */
 202         if (intr_count && priority != GFP_ATOMIC) {
 203                 static int count = 0;
 204                 if (++count < 5) {
 205                         printk("kmalloc called nonatomically from interrupt %p\n",
 206                                __builtin_return_address(0));
 207                         priority = GFP_ATOMIC;
 208                 }
 209         }
 210 
 211         save_flags(flags);
 212         cli();
 213         page = *pg;
 214         if (page) {
 215                 p = page->firstfree;
 216                 if (p->bh_flags != MF_FREE) {
 217                         restore_flags(flags);
 218                         printk("Problem: block on freelist at %08lx isn't free.\n", (long) p);
 219                         return NULL;
 220                 }
 221                 goto found_it;
 222         }
 223 
 224         /* We need to get a new free page..... */
 225         /* This can be done with ints on: This is private to this invocation */
 226         restore_flags(flags);
 227 
 228         /* sz is the size of the blocks we're dealing with */
 229         sz = BLOCKSIZE(order);
 230 
 231         page = (struct page_descriptor *) __get_free_pages(priority,
 232                         sizes[order].gfporder, max_addr);
 233 
 234         if (!page) {
 235                 static unsigned long last = 0;
 236                 if (priority != GFP_BUFFER && (last + 10 * HZ < jiffies)) {
 237                         last = jiffies;
 238                         printk("Couldn't get a free page.....\n");
 239                 }
 240                 return NULL;
 241         }
 242         sizes[order].npages++;
 243 
 244         /* Loop for all but last block: */
 245         for (i = NBLOCKS(order), p = BH(page + 1); i > 1; i--, p = p->bh_next) {
 246                 p->bh_flags = MF_FREE;
 247                 p->bh_next = BH(((long) p) + sz);
 248         }
 249         /* Last block: */
 250         p->bh_flags = MF_FREE;
 251         p->bh_next = NULL;
 252 
 253         page->order = order;
 254         page->nfree = NBLOCKS(order);
 255         p = BH(page+1);
 256 
 257         /*
 258          * Now we're going to muck with the "global" freelist
 259          * for this size: this should be uninterruptible
 260          */
 261         cli();
 262         page->next = *pg;
 263         *pg = page;
 264 
 265 found_it:
 266         page->firstfree = p->bh_next;
 267         page->nfree--;
 268         if (!page->nfree)
 269                 *pg = page->next;
 270         restore_flags(flags);
 271         sizes[order].nmallocs++;
 272         sizes[order].nbytesmalloced += size;
 273         p->bh_flags = type;     /* As of now this block is officially in use */
 274         p->bh_length = size;
 275 #ifdef SADISTIC_KMALLOC
 276         memset(p+1, 0xf0, size);
 277 #endif
 278         return p + 1;           /* Pointer arithmetic: increments past header */
 279 }
 280 
 281 void kfree(void *ptr)
     /* [previous][next][first][last][top][bottom][index][help] */
 282 {
 283         int size;
 284         unsigned long flags;
 285         int order;
 286         register struct block_header *p;
 287         struct page_descriptor *page, **pg;
 288 
 289         if (!ptr)
 290                 return;
 291         p = ((struct block_header *) ptr) - 1;
 292         page = PAGE_DESC(p);
 293         order = page->order;
 294         pg = &sizes[order].firstfree;
 295         if (p->bh_flags == MF_DMA) {
 296                 p->bh_flags = MF_USED;
 297                 pg = &sizes[order].dmafree;
 298         }
 299 
 300         if ((order < 0) ||
 301             (order >= sizeof(sizes) / sizeof(sizes[0])) ||
 302             (((long) (page->next)) & ~PAGE_MASK) ||
 303             (p->bh_flags != MF_USED)) {
 304                 printk("kfree of non-kmalloced memory: %p, next= %p, order=%d\n",
 305                        p, page->next, page->order);
 306                 return;
 307         }
 308         size = p->bh_length;
 309         p->bh_flags = MF_FREE;  /* As of now this block is officially free */
 310 #ifdef SADISTIC_KMALLOC
 311         memset(p+1, 0xe0, size);
 312 #endif
 313         save_flags(flags);
 314         cli();
 315         p->bh_next = page->firstfree;
 316         page->firstfree = p;
 317         page->nfree++;
 318 
 319         if (page->nfree == 1) {
 320 /* Page went from full to one free block: put it on the freelist. */
 321                 page->next = *pg;
 322                 *pg = page;
 323         }
 324 /* If page is completely free, free it */
 325         if (page->nfree == NBLOCKS(order)) {
 326                 for (;;) {
 327                         struct page_descriptor *tmp = *pg;
 328                         if (!tmp) {
 329                                 printk("Ooops. page %p doesn't show on freelist.\n", page);
 330                                 break;
 331                         }
 332                         if (tmp == page) {
 333                                 *pg = page->next;
 334                                 break;
 335                         }
 336                         pg = &tmp->next;
 337                 }
 338                 sizes[order].npages--;
 339                 free_pages((long) page, sizes[order].gfporder);
 340         }
 341         sizes[order].nfrees++;
 342         sizes[order].nbytesmalloced -= size;
 343         restore_flags(flags);
 344 }

/* [previous][next][first][last][top][bottom][index][help] */