root/mm/kmalloc.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. kmalloc_init
  2. get_order
  3. kmalloc
  4. kfree_s

   1 /*
   2  *  linux/mm/kmalloc.c
   3  *
   4  *  Copyright (C) 1991, 1992  Linus Torvalds & Roger Wolff.
   5  *
   6  *  Written by R.E. Wolff Sept/Oct '93.
   7  *
   8  */
   9 
  10 #include <linux/mm.h>
  11 #include <asm/system.h>
  12 #include <linux/delay.h>
  13 
  14 #define GFP_LEVEL_MASK 0xf
  15 
  16 /* I want this low enough for a while to catch errors.
  17    I want this number to be increased in the near future:
  18         loadable device drivers should use this function to get memory */
  19 
  20 #define MAX_KMALLOC_K 4
  21 
  22 
  23 /* This defines how many times we should try to allocate a free page before
  24    giving up. Normally this shouldn't happen at all. */
  25 #define MAX_GET_FREE_PAGE_TRIES 4
  26 
  27 
  28 /* Private flags. */
  29 
  30 #define MF_USED 0xffaa0055
  31 #define MF_FREE 0x0055ffaa
  32 
  33 
  34 /* 
  35  * Much care has gone into making these routines in this file reentrant.
  36  *
  37  * The fancy bookkeeping of nbytesmalloced and the like are only used to
  38  * report them to the user (oooohhhhh, aaaaahhhhh....) are not 
  39  * protected by cli(). (If that goes wrong. So what?)
  40  *
  41  * These routines restore the interrupt status to allow calling with ints
  42  * off. 
  43  */
  44 
  45 /* 
  46  * A block header. This is in front of every malloc-block, whether free or not.
  47  */
  48 struct block_header {
  49         unsigned long bh_flags;
  50         union {
  51                 unsigned long ubh_length;
  52                 struct block_header *fbh_next;
  53         } vp;
  54 };
  55 
  56 
  57 #define bh_length vp.ubh_length
  58 #define bh_next   vp.fbh_next
  59 #define BH(p) ((struct block_header *)(p))
  60 
  61 
  62 /* 
  63  * The page descriptor is at the front of every page that malloc has in use. 
  64  */
  65 struct page_descriptor {
  66         struct page_descriptor *next;
  67         struct block_header *firstfree;
  68         int order;
  69         int nfree;
  70 };
  71 
  72 
  73 #define PAGE_DESC(p) ((struct page_descriptor *)(((unsigned long)(p)) & PAGE_MASK))
  74 
  75 
  76 /*
  77  * A size descriptor describes a specific class of malloc sizes.
  78  * Each class of sizes has its own freelist.
  79  */
  80 struct size_descriptor {
  81         struct page_descriptor *firstfree;
  82         int size;
  83         int nblocks;
  84 
  85         int nmallocs;
  86         int nfrees;
  87         int nbytesmalloced;
  88         int npages;
  89 };
  90 
  91 
  92 struct size_descriptor sizes[] = { 
  93         { NULL,  32,127, 0,0,0,0 },
  94         { NULL,  64, 63, 0,0,0,0 },
  95         { NULL, 128, 31, 0,0,0,0 },
  96         { NULL, 252, 16, 0,0,0,0 },
  97         { NULL, 508,  8, 0,0,0,0 },
  98         { NULL,1020,  4, 0,0,0,0 },
  99         { NULL,2040,  2, 0,0,0,0 },
 100         { NULL,4080,  1, 0,0,0,0 },
 101         { NULL,   0,  0, 0,0,0,0 }
 102 };
 103 
 104 
 105 #define NBLOCKS(order)          (sizes[order].nblocks)
 106 #define BLOCKSIZE(order)        (sizes[order].size)
 107 
 108 
 109 
 110 long kmalloc_init (long start_mem,long end_mem)
     /* [previous][next][first][last][top][bottom][index][help] */
 111 {
 112         int order;
 113 
 114 /* 
 115  * Check the static info array. Things will blow up terribly if it's
 116  * incorrect. This is a late "compile time" check.....
 117  */
 118 for (order = 0;BLOCKSIZE(order);order++)
 119     {
 120     if ((NBLOCKS (order)*BLOCKSIZE(order) + sizeof (struct page_descriptor)) >
 121         PAGE_SIZE) 
 122         {
 123         printk ("Cannot use %d bytes out of %d in order = %d block mallocs\n",
 124                 NBLOCKS (order) * BLOCKSIZE(order) + 
 125                         sizeof (struct page_descriptor),
 126                 (int) PAGE_SIZE,
 127                 BLOCKSIZE (order));
 128         panic ("This only happens if someone messes with kmalloc");
 129         }
 130     }
 131 return start_mem;
 132 }
 133 
 134 
 135 
 136 int get_order (int size)
     /* [previous][next][first][last][top][bottom][index][help] */
 137 {
 138         int order;
 139 
 140         /* Add the size of the header */
 141         size += sizeof (struct block_header); 
 142         for (order = 0;BLOCKSIZE(order);order++)
 143                 if (size <= BLOCKSIZE (order))
 144                         return order; 
 145         return -1;
 146 }
 147 
 148 void * kmalloc (size_t size, int priority)
     /* [previous][next][first][last][top][bottom][index][help] */
 149 {
 150         unsigned long flags;
 151         int order,tries,i,sz;
 152         struct block_header *p;
 153         struct page_descriptor *page;
 154         extern unsigned long intr_count;
 155 
 156 /* Sanity check... */
 157         if (intr_count && priority != GFP_ATOMIC) {
 158                 static int count = 0;
 159                 if (++count < 5) {
 160                         printk("kmalloc called nonatomically from interrupt %08lx\n",
 161                                 ((unsigned long *)&size)[-1]);
 162                         priority = GFP_ATOMIC;
 163                 }
 164         }
 165 if (size > MAX_KMALLOC_K * 1024) 
 166      {
 167      printk ("kmalloc: I refuse to allocate %d bytes (for now max = %d).\n",
 168                 size,MAX_KMALLOC_K*1024);
 169      return (NULL);
 170      }
 171 
 172 order = get_order (size);
 173 if (order < 0)
 174     {
 175     printk ("kmalloc of too large a block (%d bytes).\n",size);
 176     return (NULL);
 177     }
 178 
 179 save_flags(flags);
 180 
 181 /* It seems VERY unlikely to me that it would be possible that this 
 182    loop will get executed more than once. */
 183 tries = MAX_GET_FREE_PAGE_TRIES; 
 184 while (tries --)
 185     {
 186     /* Try to allocate a "recently" freed memory block */
 187     cli ();
 188     if ((page = sizes[order].firstfree) &&
 189         (p    =  page->firstfree))
 190         {
 191         if (p->bh_flags == MF_FREE)
 192             {
 193             page->firstfree = p->bh_next;
 194             page->nfree--;
 195             if (!page->nfree)
 196                 {
 197                 sizes[order].firstfree = page->next;
 198                 page->next = NULL;
 199                 }
 200             restore_flags(flags);
 201 
 202             sizes [order].nmallocs++;
 203             sizes [order].nbytesmalloced += size;
 204             p->bh_flags =  MF_USED; /* As of now this block is officially in use */
 205             p->bh_length = size;
 206             return p+1; /* Pointer arithmetic: increments past header */
 207             }
 208         printk ("Problem: block on freelist at %08lx isn't free.\n",(long)p);
 209         return (NULL);
 210         }
 211     restore_flags(flags);
 212 
 213 
 214     /* Now we're in trouble: We need to get a new free page..... */
 215 
 216     sz = BLOCKSIZE(order); /* sz is the size of the blocks we're dealing with */
 217 
 218     /* This can be done with ints on: This is private to this invocation */
 219     page = (struct page_descriptor *) __get_free_page (priority & GFP_LEVEL_MASK);
 220     if (!page) {
 221         static unsigned long last = 0;
 222         if (last + 10*HZ < jiffies) {
 223                 last = jiffies;
 224                 printk ("Couldn't get a free page.....\n");
 225         }
 226         return NULL;
 227     }
 228 #if 0
 229     printk ("Got page %08x to use for %d byte mallocs....",(long)page,sz);
 230 #endif
 231     sizes[order].npages++;
 232 
 233     /* Loop for all but last block: */
 234     for (i=NBLOCKS(order),p=BH (page+1);i > 1;i--,p=p->bh_next) 
 235         {
 236         p->bh_flags = MF_FREE;
 237         p->bh_next = BH ( ((long)p)+sz);
 238         }
 239     /* Last block: */
 240     p->bh_flags = MF_FREE;
 241     p->bh_next = NULL;
 242 
 243     page->order = order;
 244     page->nfree = NBLOCKS(order); 
 245     page->firstfree = BH(page+1);
 246 #if 0
 247     printk ("%d blocks per page\n",page->nfree);
 248 #endif
 249     /* Now we're going to muck with the "global" freelist for this size:
 250        this should be uniterruptible */
 251     cli ();
 252     /* 
 253      * sizes[order].firstfree used to be NULL, otherwise we wouldn't be
 254      * here, but you never know.... 
 255      */
 256     page->next = sizes[order].firstfree;
 257     sizes[order].firstfree = page;
 258     restore_flags(flags);
 259     }
 260 
 261 /* Pray that printk won't cause this to happen again :-) */
 262 
 263 printk ("Hey. This is very funny. I tried %d times to allocate a whole\n"
 264         "new page for an object only %d bytes long, but some other process\n"
 265         "beat me to actually allocating it. Also note that this 'error'\n"
 266         "message is soooo very long to catch your attention. I'd appreciate\n"
 267         "it if you'd be so kind as to report what conditions caused this to\n"
 268         "the author of this kmalloc: wolff@dutecai.et.tudelft.nl.\n"
 269         "(Executive summary: This can't happen)\n", 
 270                 MAX_GET_FREE_PAGE_TRIES,
 271                 size);
 272 return NULL;
 273 }
 274 
 275 
 276 void kfree_s (void *ptr,int size)
     /* [previous][next][first][last][top][bottom][index][help] */
 277 {
 278 unsigned long flags;
 279 int order;
 280 register struct block_header *p=((struct block_header *)ptr) -1;
 281 struct page_descriptor *page,*pg2;
 282 
 283 page = PAGE_DESC (p);
 284 order = page->order;
 285 if ((order < 0) || 
 286     (order > sizeof (sizes)/sizeof (sizes[0])) ||
 287     (((long)(page->next)) & ~PAGE_MASK) ||
 288     (p->bh_flags != MF_USED))
 289     {
 290     printk ("kfree of non-kmalloced memory: %p, next= %p, order=%d\n",
 291                 p, page->next, page->order);
 292     return;
 293     }
 294 if (size &&
 295     size != p->bh_length)
 296     {
 297     printk ("Trying to free pointer at %p with wrong size: %d instead of %lu.\n",
 298         p,size,p->bh_length);
 299     return;
 300     }
 301 size = p->bh_length;
 302 p->bh_flags = MF_FREE; /* As of now this block is officially free */
 303 save_flags(flags);
 304 cli ();
 305 p->bh_next = page->firstfree;
 306 page->firstfree = p;
 307 page->nfree ++;
 308 
 309 if (page->nfree == 1)
 310    { /* Page went from full to one free block: put it on the freelist */
 311    if (page->next)
 312         {
 313         printk ("Page %p already on freelist dazed and confused....\n", page);
 314         }
 315    else
 316         {
 317         page->next = sizes[order].firstfree;
 318         sizes[order].firstfree = page;
 319         }
 320    }
 321 
 322 /* If page is completely free, free it */
 323 if (page->nfree == NBLOCKS (page->order))
 324     {
 325 #if 0
 326     printk ("Freeing page %08x.\n", (long)page);
 327 #endif
 328     if (sizes[order].firstfree == page)
 329         {
 330         sizes[order].firstfree = page->next;
 331         }
 332     else
 333         {
 334         for (pg2=sizes[order].firstfree;
 335                 (pg2 != NULL) && (pg2->next != page);
 336                         pg2=pg2->next)
 337             /* Nothing */;
 338         if (pg2 != NULL)
 339             pg2->next = page->next;
 340         else
 341             printk ("Ooops. page %p doesn't show on freelist.\n", page);
 342         }
 343     free_page ((long)page);
 344     }
 345 restore_flags(flags);
 346 
 347 sizes[order].nfrees++;      /* Noncritical (monitoring) admin stuff */
 348 sizes[order].nbytesmalloced -= size;
 349 }

/* [previous][next][first][last][top][bottom][index][help] */