root/mm/kmalloc.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. kmalloc_init
  2. kmalloc
  3. kfree

   1 /*
   2  *  linux/mm/kmalloc.c
   3  *
   4  *  Copyright (C) 1991, 1992  Linus Torvalds & Roger Wolff.
   5  *
   6  *  Written by R.E. Wolff Sept/Oct '93.
   7  *
   8  */
   9 
  10 /*
  11  * Modified by Alex Bligh (alex@cconcepts.co.uk) 4 Apr 1994 to use multiple
  12  * pages. So for 'page' throughout, read 'area'.
  13  *
  14  * Largely rewritten.. Linus
  15  */
  16 
  17 #include <linux/mm.h>
  18 #include <linux/delay.h>
  19 #include <asm/system.h>
  20 #include <asm/dma.h>
  21 
  22 /* Define this if you want slow routines that try to trip errors */
  23 #undef SADISTIC_KMALLOC
  24 
  25 /* Private flags. */
  26 
  27 #define MF_USED 0xffaa0055
  28 #define MF_DMA  0xff00aa55
  29 #define MF_FREE 0x0055ffaa
  30 
  31 
  32 /*
  33  * Much care has gone into making these routines in this file reentrant.
  34  *
  35  * The fancy bookkeeping of nbytesmalloced and the like are only used to
  36  * report them to the user (oooohhhhh, aaaaahhhhh....) are not
  37  * protected by cli(). (If that goes wrong. So what?)
  38  *
  39  * These routines restore the interrupt status to allow calling with ints
  40  * off.
  41  */
  42 
  43 /*
  44  * A block header. This is in front of every malloc-block, whether free or not.
  45  */
  46 struct block_header {
  47         unsigned long bh_flags;
  48         union {
  49                 unsigned long ubh_length;
  50                 struct block_header *fbh_next;
  51         } vp;
  52 };
  53 
  54 
  55 #define bh_length vp.ubh_length
  56 #define bh_next   vp.fbh_next
  57 #define BH(p) ((struct block_header *)(p))
  58 
  59 
  60 /*
  61  * The page descriptor is at the front of every page that malloc has in use.
  62  */
  63 struct page_descriptor {
  64         struct page_descriptor *next;
  65         struct block_header *firstfree;
  66         int order;
  67         int nfree;
  68 };
  69 
  70 
  71 #define PAGE_DESC(p) ((struct page_descriptor *)(((unsigned long)(p)) & PAGE_MASK))
  72 
  73 
  74 /*
  75  * A size descriptor describes a specific class of malloc sizes.
  76  * Each class of sizes has its own freelist.
  77  */
  78 struct size_descriptor {
  79         struct page_descriptor *firstfree;
  80         struct page_descriptor *dmafree;        /* DMA-able memory */
  81         int nblocks;
  82 
  83         int nmallocs;
  84         int nfrees;
  85         int nbytesmalloced;
  86         int npages;
  87         unsigned long gfporder; /* number of pages in the area required */
  88 };
  89 
  90 /*
  91  * For now it is unsafe to allocate bucket sizes between n and
  92  * n-sizeof(page_descriptor) where n is PAGE_SIZE * any power of two
  93  *
  94  * The blocksize and sizes arrays _must_ match!
  95  */
  96 #if PAGE_SIZE == 4096
  97 static const unsigned int blocksize[] = {
  98         32,
  99         64,
 100         128,
 101         252,
 102         508,
 103         1020,
 104         2040,
 105         4096 - 16,
 106         8192 - 16,
 107         16384 - 16,
 108         32768 - 16,
 109         65536 - 16,
 110         131072 - 16,
 111         0
 112 };
 113 
 114 static struct size_descriptor sizes[] =
 115 {
 116         {NULL, NULL, 127, 0, 0, 0, 0, 0},
 117         {NULL, NULL, 63, 0, 0, 0, 0, 0},
 118         {NULL, NULL, 31, 0, 0, 0, 0, 0},
 119         {NULL, NULL, 16, 0, 0, 0, 0, 0},
 120         {NULL, NULL, 8, 0, 0, 0, 0, 0},
 121         {NULL, NULL, 4, 0, 0, 0, 0, 0},
 122         {NULL, NULL, 2, 0, 0, 0, 0, 0},
 123         {NULL, NULL, 1, 0, 0, 0, 0, 0},
 124         {NULL, NULL, 1, 0, 0, 0, 0, 1},
 125         {NULL, NULL, 1, 0, 0, 0, 0, 2},
 126         {NULL, NULL, 1, 0, 0, 0, 0, 3},
 127         {NULL, NULL, 1, 0, 0, 0, 0, 4},
 128         {NULL, NULL, 1, 0, 0, 0, 0, 5},
 129         {NULL, NULL, 0, 0, 0, 0, 0, 0}
 130 };
 131 #elif PAGE_SIZE == 8192
 132 static const unsigned int blocksize[] = {
 133         64,
 134         128,
 135         248,
 136         504,
 137         1016,
 138         2040,
 139         4080,
 140         8192 - 32,
 141         16384 - 32,
 142         32768 - 32,
 143         65536 - 32,
 144         131072 - 32,
 145         262144 - 32,
 146         0
 147 };
 148 
 149 struct size_descriptor sizes[] =
 150 {
 151         {NULL, NULL, 127, 0, 0, 0, 0, 0},
 152         {NULL, NULL, 63, 0, 0, 0, 0, 0},
 153         {NULL, NULL, 31, 0, 0, 0, 0, 0},
 154         {NULL, NULL, 16, 0, 0, 0, 0, 0},
 155         {NULL, NULL, 8, 0, 0, 0, 0, 0},
 156         {NULL, NULL, 4, 0, 0, 0, 0, 0},
 157         {NULL, NULL, 2, 0, 0, 0, 0, 0},
 158         {NULL, NULL, 1, 0, 0, 0, 0, 0},
 159         {NULL, NULL, 1, 0, 0, 0, 0, 1},
 160         {NULL, NULL, 1, 0, 0, 0, 0, 2},
 161         {NULL, NULL, 1, 0, 0, 0, 0, 3},
 162         {NULL, NULL, 1, 0, 0, 0, 0, 4},
 163         {NULL, NULL, 1, 0, 0, 0, 0, 5},
 164         {NULL, NULL, 0, 0, 0, 0, 0, 0}
 165 };
 166 #else
 167 #error you need to make a version for your pagesize
 168 #endif
 169 
 170 #define NBLOCKS(order)          (sizes[order].nblocks)
 171 #define BLOCKSIZE(order)        (blocksize[order])
 172 #define AREASIZE(order)         (PAGE_SIZE<<(sizes[order].gfporder))
 173 
 174 
 175 long kmalloc_init(long start_mem, long end_mem)
     /* [previous][next][first][last][top][bottom][index][help] */
 176 {
 177         int order;
 178 
 179 /*
 180  * Check the static info array. Things will blow up terribly if it's
 181  * incorrect. This is a late "compile time" check.....
 182  */
 183         for (order = 0; BLOCKSIZE(order); order++) {
 184                 if ((NBLOCKS(order) * BLOCKSIZE(order) + sizeof(struct page_descriptor)) >
 185                     AREASIZE(order)) {
 186                         printk("Cannot use %d bytes out of %d in order = %d block mallocs\n",
 187                                (int) (NBLOCKS(order) * BLOCKSIZE(order) +
 188                                       sizeof(struct page_descriptor)),
 189                                 (int) AREASIZE(order),
 190                                BLOCKSIZE(order));
 191                         panic("This only happens if someone messes with kmalloc");
 192                 }
 193         }
 194         return start_mem;
 195 }
 196 
 197 
 198 void *kmalloc(size_t size, int priority)
     /* [previous][next][first][last][top][bottom][index][help] */
 199 {
 200         unsigned long flags;
 201         unsigned long type;
 202         int order, dma;
 203         struct block_header *p;
 204         struct page_descriptor *page, **pg;
 205 
 206         /* Get order */
 207         order = 0;
 208         {
 209                 unsigned int realsize = size + sizeof(struct block_header);
 210                 for (;;) {
 211                         int ordersize = BLOCKSIZE(order);
 212                         if (realsize <= ordersize)
 213                                 break;
 214                         order++;
 215                         if (ordersize)
 216                                 continue;
 217                         printk("kmalloc of too large a block (%d bytes).\n", (int) size);
 218                         return NULL;
 219                 }
 220         }
 221 
 222         dma = 0;
 223         type = MF_USED;
 224         pg = &sizes[order].firstfree;
 225         if (priority & GFP_DMA) {
 226                 dma = 1;
 227                 type = MF_DMA;
 228                 pg = &sizes[order].dmafree;
 229         }
 230 
 231         priority &= GFP_LEVEL_MASK;
 232 
 233 /* Sanity check... */
 234         if (intr_count && priority != GFP_ATOMIC) {
 235                 static int count = 0;
 236                 if (++count < 5) {
 237                         printk("kmalloc called nonatomically from interrupt %p\n",
 238                                __builtin_return_address(0));
 239                         priority = GFP_ATOMIC;
 240                 }
 241         }
 242 
 243         save_flags(flags);
 244         cli();
 245         page = *pg;
 246         if (page) {
 247                 p = page->firstfree;
 248                 if (p->bh_flags != MF_FREE)
 249                         goto not_free_on_freelist;
 250                 goto found_it;
 251         }
 252 
 253         /* We need to get a new free page..... */
 254         /* This can be done with ints on: This is private to this invocation */
 255         restore_flags(flags);
 256 
 257         {
 258                 int i, sz;
 259                 
 260                 /* sz is the size of the blocks we're dealing with */
 261                 sz = BLOCKSIZE(order);
 262 
 263                 page = (struct page_descriptor *) __get_free_pages(priority,
 264                                 sizes[order].gfporder, dma);
 265 
 266                 if (!page)
 267                         goto no_free_page;
 268                 sizes[order].npages++;
 269 
 270                 /* Loop for all but last block: */
 271                 for (i = NBLOCKS(order), p = BH(page + 1); i > 1; i--, p = p->bh_next) {
 272                         p->bh_flags = MF_FREE;
 273                         p->bh_next = BH(((long) p) + sz);
 274                 }
 275                 /* Last block: */
 276                 p->bh_flags = MF_FREE;
 277                 p->bh_next = NULL;
 278 
 279                 page->order = order;
 280                 page->nfree = NBLOCKS(order);
 281                 p = BH(page+1);
 282         }
 283 
 284         /*
 285          * Now we're going to muck with the "global" freelist
 286          * for this size: this should be uninterruptible
 287          */
 288         cli();
 289         page->next = *pg;
 290         *pg = page;
 291 
 292 found_it:
 293         page->firstfree = p->bh_next;
 294         page->nfree--;
 295         if (!page->nfree)
 296                 *pg = page->next;
 297         restore_flags(flags);
 298         sizes[order].nmallocs++;
 299         sizes[order].nbytesmalloced += size;
 300         p->bh_flags = type;     /* As of now this block is officially in use */
 301         p->bh_length = size;
 302 #ifdef SADISTIC_KMALLOC
 303         memset(p+1, 0xf0, size);
 304 #endif
 305         return p + 1;           /* Pointer arithmetic: increments past header */
 306 
 307 no_free_page:
 308         {
 309                 static unsigned long last = 0;
 310                 if (priority != GFP_BUFFER && (last + 10 * HZ < jiffies)) {
 311                         last = jiffies;
 312                         printk("Couldn't get a free page.....\n");
 313                 }
 314                 return NULL;
 315         }
 316 
 317 not_free_on_freelist:
 318         restore_flags(flags);
 319         printk("Problem: block on freelist at %08lx isn't free.\n", (long) p);
 320         return NULL;
 321 }
 322 
 323 void kfree(void *ptr)
     /* [previous][next][first][last][top][bottom][index][help] */
 324 {
 325         int size;
 326         unsigned long flags;
 327         int order;
 328         register struct block_header *p;
 329         struct page_descriptor *page, **pg;
 330 
 331         if (!ptr)
 332                 return;
 333         p = ((struct block_header *) ptr) - 1;
 334         page = PAGE_DESC(p);
 335         order = page->order;
 336         pg = &sizes[order].firstfree;
 337         if (p->bh_flags == MF_DMA) {
 338                 p->bh_flags = MF_USED;
 339                 pg = &sizes[order].dmafree;
 340         }
 341 
 342         if ((order < 0) ||
 343             (order >= sizeof(sizes) / sizeof(sizes[0])) ||
 344             (((long) (page->next)) & ~PAGE_MASK) ||
 345             (p->bh_flags != MF_USED)) {
 346                 printk("kfree of non-kmalloced memory: %p, next= %p, order=%d\n",
 347                        p, page->next, page->order);
 348                 return;
 349         }
 350         size = p->bh_length;
 351         p->bh_flags = MF_FREE;  /* As of now this block is officially free */
 352 #ifdef SADISTIC_KMALLOC
 353         memset(p+1, 0xe0, size);
 354 #endif
 355         save_flags(flags);
 356         cli();
 357         p->bh_next = page->firstfree;
 358         page->firstfree = p;
 359         page->nfree++;
 360 
 361         if (page->nfree == 1) {
 362 /* Page went from full to one free block: put it on the freelist. */
 363                 page->next = *pg;
 364                 *pg = page;
 365         }
 366 /* If page is completely free, free it */
 367         if (page->nfree == NBLOCKS(order)) {
 368                 for (;;) {
 369                         struct page_descriptor *tmp = *pg;
 370                         if (!tmp) {
 371                                 printk("Ooops. page %p doesn't show on freelist.\n", page);
 372                                 break;
 373                         }
 374                         if (tmp == page) {
 375                                 *pg = page->next;
 376                                 break;
 377                         }
 378                         pg = &tmp->next;
 379                 }
 380                 sizes[order].npages--;
 381                 free_pages((long) page, sizes[order].gfporder);
 382         }
 383         sizes[order].nfrees++;
 384         sizes[order].nbytesmalloced -= size;
 385         restore_flags(flags);
 386 }

/* [previous][next][first][last][top][bottom][index][help] */