This source file includes following definitions.
- kmalloc_init
- get_order
- kmalloc
- kfree_s
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15 #include <linux/mm.h>
16 #include <asm/system.h>
17 #include <linux/delay.h>
18
19 #define GFP_LEVEL_MASK 0xf
20
21
22
23
24
25 #define MAX_KMALLOC_K ((PAGE_SIZE<<(NUM_AREA_ORDERS-1))>>10)
26
27
28
29
30 #define MAX_GET_FREE_PAGE_TRIES 4
31
32
33
34
35 #define MF_USED 0xffaa0055
36 #define MF_FREE 0x0055ffaa
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53 struct block_header {
54 unsigned long bh_flags;
55 union {
56 unsigned long ubh_length;
57 struct block_header *fbh_next;
58 } vp;
59 };
60
61
62 #define bh_length vp.ubh_length
63 #define bh_next vp.fbh_next
64 #define BH(p) ((struct block_header *)(p))
65
66
67
68
69
70 struct page_descriptor {
71 struct page_descriptor *next;
72 struct block_header *firstfree;
73 int order;
74 int nfree;
75 };
76
77
78 #define PAGE_DESC(p) ((struct page_descriptor *)(((unsigned long)(p)) & PAGE_MASK))
79
80
81
82
83
84
85 struct size_descriptor {
86 struct page_descriptor *firstfree;
87 struct page_descriptor *dmafree;
88 int size;
89 int nblocks;
90
91 int nmallocs;
92 int nfrees;
93 int nbytesmalloced;
94 int npages;
95 unsigned long gfporder;
96 };
97
98
99
100
101
102
103 struct size_descriptor sizes[] = {
104 { NULL, NULL, 32,127, 0,0,0,0, 0},
105 { NULL, NULL, 64, 63, 0,0,0,0, 0 },
106 { NULL, NULL, 128, 31, 0,0,0,0, 0 },
107 { NULL, NULL, 252, 16, 0,0,0,0, 0 },
108 { NULL, NULL, 508, 8, 0,0,0,0, 0 },
109 { NULL, NULL,1020, 4, 0,0,0,0, 0 },
110 { NULL, NULL,2040, 2, 0,0,0,0, 0 },
111 { NULL, NULL,4096-16, 1, 0,0,0,0, 0 },
112 { NULL, NULL,8192-16, 1, 0,0,0,0, 1 },
113 { NULL, NULL,16384-16, 1, 0,0,0,0, 2 },
114 { NULL, NULL,32768-16, 1, 0,0,0,0, 3 },
115 { NULL, NULL,65536-16, 1, 0,0,0,0, 4 },
116 { NULL, NULL,131072-16, 1, 0,0,0,0, 5 },
117 { NULL, NULL, 0, 0, 0,0,0,0, 0 }
118 };
119
120
121 #define NBLOCKS(order) (sizes[order].nblocks)
122 #define BLOCKSIZE(order) (sizes[order].size)
123 #define AREASIZE(order) (PAGE_SIZE<<(sizes[order].gfporder))
124
125
126 long kmalloc_init (long start_mem,long end_mem)
127 {
128 int order;
129
130
131
132
133
134 for (order = 0;BLOCKSIZE(order);order++)
135 {
136 if ((NBLOCKS (order)*BLOCKSIZE(order) + sizeof (struct page_descriptor)) >
137 AREASIZE(order))
138 {
139 printk ("Cannot use %d bytes out of %d in order = %d block mallocs\n",
140 (int) (NBLOCKS (order) * BLOCKSIZE(order) +
141 sizeof (struct page_descriptor)),
142 (int) AREASIZE(order),
143 BLOCKSIZE (order));
144 panic ("This only happens if someone messes with kmalloc");
145 }
146 }
147 return start_mem;
148 }
149
150
151
152 int get_order (int size)
153 {
154 int order;
155
156
157 size += sizeof (struct block_header);
158 for (order = 0;BLOCKSIZE(order);order++)
159 if (size <= BLOCKSIZE (order))
160 return order;
161 return -1;
162 }
163
164 void * kmalloc (size_t size, int priority)
165 {
166 unsigned long flags;
167 int order,tries,i,sz;
168 int dma_flag;
169 struct block_header *p;
170 struct page_descriptor *page;
171
172 dma_flag = (priority & GFP_DMA);
173 priority &= GFP_LEVEL_MASK;
174
175
176 if (intr_count && priority != GFP_ATOMIC) {
177 static int count = 0;
178 if (++count < 5) {
179 printk("kmalloc called nonatomically from interrupt %p\n",
180 __builtin_return_address(0));
181 priority = GFP_ATOMIC;
182 }
183 }
184
185 order = get_order (size);
186 if (order < 0)
187 {
188 printk ("kmalloc of too large a block (%d bytes).\n",(int) size);
189 return (NULL);
190 }
191
192 save_flags(flags);
193
194
195
196 tries = MAX_GET_FREE_PAGE_TRIES;
197 while (tries --)
198 {
199
200 cli ();
201 if ((page = (dma_flag ? sizes[order].dmafree : sizes[order].firstfree)) &&
202 (p = page->firstfree))
203 {
204 if (p->bh_flags == MF_FREE)
205 {
206 page->firstfree = p->bh_next;
207 page->nfree--;
208 if (!page->nfree)
209 {
210 sizes[order].firstfree = page->next;
211 page->next = NULL;
212 }
213 restore_flags(flags);
214
215 sizes [order].nmallocs++;
216 sizes [order].nbytesmalloced += size;
217 p->bh_flags = MF_USED;
218 p->bh_length = size;
219 return p+1;
220 }
221 printk ("Problem: block on freelist at %08lx isn't free.\n",(long)p);
222 return (NULL);
223 }
224 restore_flags(flags);
225
226
227
228
229 sz = BLOCKSIZE(order);
230
231
232 if (dma_flag)
233 page = (struct page_descriptor *) __get_dma_pages (priority & GFP_LEVEL_MASK, sizes[order].gfporder);
234 else
235 page = (struct page_descriptor *) __get_free_pages (priority & GFP_LEVEL_MASK, sizes[order].gfporder);
236
237 if (!page) {
238 static unsigned long last = 0;
239 if (last + 10*HZ < jiffies) {
240 last = jiffies;
241 printk ("Couldn't get a free page.....\n");
242 }
243 return NULL;
244 }
245 #if 0
246 printk ("Got page %08x to use for %d byte mallocs....",(long)page,sz);
247 #endif
248 sizes[order].npages++;
249
250
251 for (i=NBLOCKS(order),p=BH (page+1);i > 1;i--,p=p->bh_next)
252 {
253 p->bh_flags = MF_FREE;
254 p->bh_next = BH ( ((long)p)+sz);
255 }
256
257 p->bh_flags = MF_FREE;
258 p->bh_next = NULL;
259
260 page->order = order;
261 page->nfree = NBLOCKS(order);
262 page->firstfree = BH(page+1);
263 #if 0
264 printk ("%d blocks per page\n",page->nfree);
265 #endif
266
267
268 cli ();
269
270
271
272
273 if (dma_flag) {
274 page->next = sizes[order].dmafree;
275 sizes[order].dmafree = page;
276 } else {
277 page->next = sizes[order].firstfree;
278 sizes[order].firstfree = page;
279 }
280 restore_flags(flags);
281 }
282
283
284
285 printk ("Hey. This is very funny. I tried %d times to allocate a whole\n"
286 "new page for an object only %d bytes long, but some other process\n"
287 "beat me to actually allocating it. Also note that this 'error'\n"
288 "message is soooo very long to catch your attention. I'd appreciate\n"
289 "it if you'd be so kind as to report what conditions caused this to\n"
290 "the author of this kmalloc: wolff@dutecai.et.tudelft.nl.\n"
291 "(Executive summary: This can't happen)\n",
292 MAX_GET_FREE_PAGE_TRIES,
293 (int) size);
294 return NULL;
295 }
296
297 void kfree_s (void *ptr,int size)
298 {
299 unsigned long flags;
300 int order;
301 register struct block_header *p=((struct block_header *)ptr) -1;
302 struct page_descriptor *page,*pg2;
303
304 page = PAGE_DESC (p);
305 order = page->order;
306 if ((order < 0) ||
307 (order > sizeof (sizes)/sizeof (sizes[0])) ||
308 (((long)(page->next)) & ~PAGE_MASK) ||
309 (p->bh_flags != MF_USED))
310 {
311 printk ("kfree of non-kmalloced memory: %p, next= %p, order=%d\n",
312 p, page->next, page->order);
313 return;
314 }
315 if (size &&
316 size != p->bh_length)
317 {
318 printk ("Trying to free pointer at %p with wrong size: %d instead of %lu.\n",
319 p,size,p->bh_length);
320 return;
321 }
322 size = p->bh_length;
323 p->bh_flags = MF_FREE;
324 save_flags(flags);
325 cli ();
326 p->bh_next = page->firstfree;
327 page->firstfree = p;
328 page->nfree ++;
329
330 if (page->nfree == 1)
331 {
332
333 if (page->next)
334 {
335 printk ("Page %p already on freelist dazed and confused....\n", page);
336 }
337 else
338 {
339 page->next = sizes[order].firstfree;
340 sizes[order].firstfree = page;
341 }
342 }
343
344
345 if (page->nfree == NBLOCKS (page->order))
346 {
347 #if 0
348 printk ("Freeing page %08x.\n", (long)page);
349 #endif
350 if (sizes[order].firstfree == page)
351 {
352 sizes[order].firstfree = page->next;
353 }
354 else if (sizes[order].dmafree == page)
355 {
356 sizes[order].dmafree = page->next;
357 }
358 else
359 {
360 for (pg2=sizes[order].firstfree;
361 (pg2 != NULL) && (pg2->next != page);
362 pg2=pg2->next)
363 ;
364 if (!pg2)
365 for (pg2=sizes[order].dmafree;
366 (pg2 != NULL) && (pg2->next != page);
367 pg2=pg2->next)
368 ;
369 if (pg2 != NULL)
370 pg2->next = page->next;
371 else
372 printk ("Ooops. page %p doesn't show on freelist.\n", page);
373 }
374
375 free_pages ((long)page, sizes[order].gfporder);
376 }
377 restore_flags(flags);
378
379
380
381
382
383 sizes[order].nfrees++;
384 sizes[order].nbytesmalloced -= size;
385 }