root/mm/kmalloc.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. kmalloc_init
  2. get_order
  3. kmalloc
  4. kfree_s

   1 /*
   2  *  linux/mm/kmalloc.c
   3  *
   4  *  Copyright (C) 1991, 1992  Linus Torvalds & Roger Wolff.
   5  *
   6  *  Written by R.E. Wolff Sept/Oct '93.
   7  *
   8  */
   9 
  10 /*
  11  * Modified by Alex Bligh (alex@cconcepts.co.uk) 4 Apr 1994 to use multiple
  12  * pages. So for 'page' throughout, read 'area'.
  13  */
  14 
  15 #include <linux/mm.h>
  16 #include <asm/system.h>
  17 #include <linux/delay.h>
  18 
  19 #define GFP_LEVEL_MASK 0xf
  20 
  21 /* I want this low enough for a while to catch errors.
  22    I want this number to be increased in the near future:
  23         loadable device drivers should use this function to get memory */
  24 
  25 #define MAX_KMALLOC_K ((PAGE_SIZE<<(NUM_AREA_ORDERS-1))>>10)
  26 
  27 
  28 /* This defines how many times we should try to allocate a free page before
  29    giving up. Normally this shouldn't happen at all. */
  30 #define MAX_GET_FREE_PAGE_TRIES 4
  31 
  32 
  33 /* Private flags. */
  34 
  35 #define MF_USED 0xffaa0055
  36 #define MF_FREE 0x0055ffaa
  37 
  38 
  39 /* 
  40  * Much care has gone into making these routines in this file reentrant.
  41  *
  42  * The fancy bookkeeping of nbytesmalloced and the like are only used to
  43  * report them to the user (oooohhhhh, aaaaahhhhh....) are not 
  44  * protected by cli(). (If that goes wrong. So what?)
  45  *
  46  * These routines restore the interrupt status to allow calling with ints
  47  * off. 
  48  */
  49 
  50 /* 
  51  * A block header. This is in front of every malloc-block, whether free or not.
  52  */
  53 struct block_header {
  54         unsigned long bh_flags;
  55         union {
  56                 unsigned long ubh_length;
  57                 struct block_header *fbh_next;
  58         } vp;
  59 };
  60 
  61 
  62 #define bh_length vp.ubh_length
  63 #define bh_next   vp.fbh_next
  64 #define BH(p) ((struct block_header *)(p))
  65 
  66 
  67 /* 
  68  * The page descriptor is at the front of every page that malloc has in use. 
  69  */
  70 struct page_descriptor {
  71         struct page_descriptor *next;
  72         struct block_header *firstfree;
  73         int order;
  74         int nfree;
  75 };
  76 
  77 
  78 #define PAGE_DESC(p) ((struct page_descriptor *)(((unsigned long)(p)) & PAGE_MASK))
  79 
  80 
  81 /*
  82  * A size descriptor describes a specific class of malloc sizes.
  83  * Each class of sizes has its own freelist.
  84  */
  85 struct size_descriptor {
  86         struct page_descriptor *firstfree;
  87         struct page_descriptor *dmafree; /* DMA-able memory */
  88         int size;
  89         int nblocks;
  90 
  91         int nmallocs;
  92         int nfrees;
  93         int nbytesmalloced;
  94         int npages;
  95         unsigned long gfporder; /* number of pages in the area required */
  96 };
  97 
  98 /*
  99  * For now it is unsafe to allocate bucket sizes between n & n=16 where n is
 100  * 4096 * any power of two
 101  */
 102 
 103 struct size_descriptor sizes[] = { 
 104         { NULL, NULL,  32,127, 0,0,0,0, 0},
 105         { NULL, NULL,  64, 63, 0,0,0,0, 0 },
 106         { NULL, NULL, 128, 31, 0,0,0,0, 0 },
 107         { NULL, NULL, 252, 16, 0,0,0,0, 0 },
 108         { NULL, NULL, 508,  8, 0,0,0,0, 0 },
 109         { NULL, NULL,1020,  4, 0,0,0,0, 0 },
 110         { NULL, NULL,2040,  2, 0,0,0,0, 0 },
 111         { NULL, NULL,4096-16,  1, 0,0,0,0, 0 },
 112         { NULL, NULL,8192-16,  1, 0,0,0,0, 1 },
 113         { NULL, NULL,16384-16,  1, 0,0,0,0, 2 },
 114         { NULL, NULL,32768-16,  1, 0,0,0,0, 3 },
 115         { NULL, NULL,65536-16,  1, 0,0,0,0, 4 },
 116         { NULL, NULL,131072-16,  1, 0,0,0,0, 5 },
 117         { NULL, NULL,   0,  0, 0,0,0,0, 0 }
 118 };
 119 
 120 
 121 #define NBLOCKS(order)          (sizes[order].nblocks)
 122 #define BLOCKSIZE(order)        (sizes[order].size)
 123 #define AREASIZE(order)         (PAGE_SIZE<<(sizes[order].gfporder))
 124 
 125 
 126 long kmalloc_init (long start_mem,long end_mem)
     /* [previous][next][first][last][top][bottom][index][help] */
 127 {
 128         int order;
 129 
 130 /* 
 131  * Check the static info array. Things will blow up terribly if it's
 132  * incorrect. This is a late "compile time" check.....
 133  */
 134 for (order = 0;BLOCKSIZE(order);order++)
 135     {
 136     if ((NBLOCKS (order)*BLOCKSIZE(order) + sizeof (struct page_descriptor)) >
 137         AREASIZE(order)) 
 138         {
 139         printk ("Cannot use %d bytes out of %d in order = %d block mallocs\n",
 140                 (int) (NBLOCKS (order) * BLOCKSIZE(order) + 
 141                         sizeof (struct page_descriptor)),
 142                 (int) AREASIZE(order),
 143                 BLOCKSIZE (order));
 144         panic ("This only happens if someone messes with kmalloc");
 145         }
 146     }
 147 return start_mem;
 148 }
 149 
 150 
 151 
 152 int get_order (int size)
     /* [previous][next][first][last][top][bottom][index][help] */
 153 {
 154         int order;
 155 
 156         /* Add the size of the header */
 157         size += sizeof (struct block_header); 
 158         for (order = 0;BLOCKSIZE(order);order++)
 159                 if (size <= BLOCKSIZE (order))
 160                         return order; 
 161         return -1;
 162 }
 163 
 164 void * kmalloc (size_t size, int priority)
     /* [previous][next][first][last][top][bottom][index][help] */
 165 {
 166         unsigned long flags;
 167         int order,tries,i,sz;
 168         int dma_flag;
 169         struct block_header *p;
 170         struct page_descriptor *page;
 171 
 172         dma_flag = (priority & GFP_DMA);
 173         priority &= GFP_LEVEL_MASK;
 174           
 175 /* Sanity check... */
 176         if (intr_count && priority != GFP_ATOMIC) {
 177                 static int count = 0;
 178                 if (++count < 5) {
 179                         printk("kmalloc called nonatomically from interrupt %p\n",
 180                                 __builtin_return_address(0));
 181                         priority = GFP_ATOMIC;
 182                 }
 183         }
 184 
 185 order = get_order (size);
 186 if (order < 0)
 187     {
 188     printk ("kmalloc of too large a block (%d bytes).\n",(int) size);
 189     return (NULL);
 190     }
 191 
 192 save_flags(flags);
 193 
 194 /* It seems VERY unlikely to me that it would be possible that this 
 195    loop will get executed more than once. */
 196 tries = MAX_GET_FREE_PAGE_TRIES; 
 197 while (tries --)
 198     {
 199     /* Try to allocate a "recently" freed memory block */
 200     cli ();
 201     if ((page = (dma_flag ? sizes[order].dmafree : sizes[order].firstfree)) &&
 202         (p    =  page->firstfree))
 203         {
 204         if (p->bh_flags == MF_FREE)
 205             {
 206             page->firstfree = p->bh_next;
 207             page->nfree--;
 208             if (!page->nfree)
 209                 {
 210                   if(dma_flag)
 211                     sizes[order].dmafree = page->next;
 212                   else
 213                     sizes[order].firstfree = page->next;
 214                   page->next = NULL;
 215                 }
 216             restore_flags(flags);
 217 
 218             sizes [order].nmallocs++;
 219             sizes [order].nbytesmalloced += size;
 220             p->bh_flags =  MF_USED; /* As of now this block is officially in use */
 221             p->bh_length = size;
 222             return p+1; /* Pointer arithmetic: increments past header */
 223             }
 224         printk ("Problem: block on freelist at %08lx isn't free.\n",(long)p);
 225         return (NULL);
 226         }
 227     restore_flags(flags);
 228 
 229 
 230     /* Now we're in trouble: We need to get a new free page..... */
 231 
 232     sz = BLOCKSIZE(order); /* sz is the size of the blocks we're dealing with */
 233 
 234     /* This can be done with ints on: This is private to this invocation */
 235     if (dma_flag)
 236       page = (struct page_descriptor *) __get_dma_pages (priority & GFP_LEVEL_MASK, sizes[order].gfporder);
 237     else
 238       page = (struct page_descriptor *) __get_free_pages (priority & GFP_LEVEL_MASK, sizes[order].gfporder);
 239 
 240     if (!page) {
 241         static unsigned long last = 0;
 242         if (last + 10*HZ < jiffies) {
 243                 last = jiffies;
 244                 printk ("Couldn't get a free page.....\n");
 245         }
 246         return NULL;
 247     }
 248 #if 0
 249     printk ("Got page %08x to use for %d byte mallocs....",(long)page,sz);
 250 #endif
 251     sizes[order].npages++;
 252 
 253     /* Loop for all but last block: */
 254     for (i=NBLOCKS(order),p=BH (page+1);i > 1;i--,p=p->bh_next) 
 255         {
 256         p->bh_flags = MF_FREE;
 257         p->bh_next = BH ( ((long)p)+sz);
 258         }
 259     /* Last block: */
 260     p->bh_flags = MF_FREE;
 261     p->bh_next = NULL;
 262 
 263     page->order = order;
 264     page->nfree = NBLOCKS(order); 
 265     page->firstfree = BH(page+1);
 266 #if 0
 267     printk ("%d blocks per page\n",page->nfree);
 268 #endif
 269     /* Now we're going to muck with the "global" freelist for this size:
 270        this should be uninterruptible */
 271     cli ();
 272     /* 
 273      * sizes[order].firstfree used to be NULL, otherwise we wouldn't be
 274      * here, but you never know.... 
 275      */
 276     if (dma_flag) {
 277       page->next = sizes[order].dmafree;
 278       sizes[order].dmafree = page;
 279     } else {
 280       page->next = sizes[order].firstfree;
 281       sizes[order].firstfree = page;
 282     }
 283     restore_flags(flags);
 284     }
 285 
 286 /* Pray that printk won't cause this to happen again :-) */
 287 
 288 printk ("Hey. This is very funny. I tried %d times to allocate a whole\n"
 289         "new page for an object only %d bytes long, but some other process\n"
 290         "beat me to actually allocating it. Also note that this 'error'\n"
 291         "message is soooo very long to catch your attention. I'd appreciate\n"
 292         "it if you'd be so kind as to report what conditions caused this to\n"
 293         "the author of this kmalloc: wolff@dutecai.et.tudelft.nl.\n"
 294         "(Executive summary: This can't happen)\n", 
 295                 MAX_GET_FREE_PAGE_TRIES,
 296                 (int) size);
 297 return NULL;
 298 }
 299 
 300 void kfree_s (void *ptr,int size)
     /* [previous][next][first][last][top][bottom][index][help] */
 301 {
 302 unsigned long flags;
 303 int order;
 304 register struct block_header *p=((struct block_header *)ptr) -1;
 305 struct page_descriptor *page,*pg2;
 306 
 307 page = PAGE_DESC (p);
 308 order = page->order;
 309 if ((order < 0) || 
 310     (order > sizeof (sizes)/sizeof (sizes[0])) ||
 311     (((long)(page->next)) & ~PAGE_MASK) ||
 312     (p->bh_flags != MF_USED))
 313     {
 314     printk ("kfree of non-kmalloced memory: %p, next= %p, order=%d\n",
 315                 p, page->next, page->order);
 316     return;
 317     }
 318 if (size &&
 319     size != p->bh_length)
 320     {
 321     printk ("Trying to free pointer at %p with wrong size: %d instead of %lu.\n",
 322         p,size,p->bh_length);
 323     return;
 324     }
 325 size = p->bh_length;
 326 p->bh_flags = MF_FREE; /* As of now this block is officially free */
 327 save_flags(flags);
 328 cli ();
 329 p->bh_next = page->firstfree;
 330 page->firstfree = p;
 331 page->nfree ++;
 332 
 333 if (page->nfree == 1)
 334    { /* Page went from full to one free block: put it on the freelist.  Do not bother
 335       trying to put it on the DMA list. */
 336    if (page->next)
 337         {
 338         printk ("Page %p already on freelist dazed and confused....\n", page);
 339         }
 340    else
 341         {
 342         page->next = sizes[order].firstfree;
 343         sizes[order].firstfree = page;
 344         }
 345    }
 346 
 347 /* If page is completely free, free it */
 348 if (page->nfree == NBLOCKS (page->order))
 349     {
 350 #if 0
 351     printk ("Freeing page %08x.\n", (long)page);
 352 #endif
 353     if (sizes[order].firstfree == page)
 354         {
 355         sizes[order].firstfree = page->next;
 356         }
 357     else if (sizes[order].dmafree == page)
 358         {
 359         sizes[order].dmafree = page->next;
 360         }
 361     else
 362         {
 363         for (pg2=sizes[order].firstfree;
 364                 (pg2 != NULL) && (pg2->next != page);
 365                         pg2=pg2->next)
 366             /* Nothing */;
 367         if (!pg2)
 368           for (pg2=sizes[order].dmafree;
 369                (pg2 != NULL) && (pg2->next != page);
 370                pg2=pg2->next)
 371             /* Nothing */;
 372         if (pg2 != NULL)
 373             pg2->next = page->next;
 374         else
 375             printk ("Ooops. page %p doesn't show on freelist.\n", page);
 376         }
 377 /* FIXME: I'm sure we should do something with npages here (like npages--) */
 378     free_pages ((long)page, sizes[order].gfporder);
 379     }
 380 restore_flags(flags);
 381 
 382 /* FIXME: ?? Are these increment & decrement operations guaranteed to be
 383  *           atomic? Could an IRQ not occur between the read & the write?
 384  *           Maybe yes on a x86 with GCC...??
 385  */
 386 sizes[order].nfrees++;      /* Noncritical (monitoring) admin stuff */
 387 sizes[order].nbytesmalloced -= size;
 388 }

/* [previous][next][first][last][top][bottom][index][help] */