This source file includes following definitions.
- kmalloc_init
- get_order
- kmalloc
- kfree_s
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15 #include <linux/mm.h>
16 #include <asm/system.h>
17 #include <linux/delay.h>
18
19 #define GFP_LEVEL_MASK 0xf
20
21
22
23
24
25 #define MAX_KMALLOC_K ((PAGE_SIZE<<(NUM_AREA_ORDERS-1))>>10)
26
27
28
29
30 #define MAX_GET_FREE_PAGE_TRIES 4
31
32
33
34
35 #define MF_USED 0xffaa0055
36 #define MF_FREE 0x0055ffaa
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53 struct block_header {
54 unsigned long bh_flags;
55 union {
56 unsigned long ubh_length;
57 struct block_header *fbh_next;
58 } vp;
59 };
60
61
62 #define bh_length vp.ubh_length
63 #define bh_next vp.fbh_next
64 #define BH(p) ((struct block_header *)(p))
65
66
67
68
69
70 struct page_descriptor {
71 struct page_descriptor *next;
72 struct block_header *firstfree;
73 int order;
74 int nfree;
75 };
76
77
78 #define PAGE_DESC(p) ((struct page_descriptor *)(((unsigned long)(p)) & PAGE_MASK))
79
80
81
82
83
84
85 struct size_descriptor {
86 struct page_descriptor *firstfree;
87 struct page_descriptor *dmafree;
88 int size;
89 int nblocks;
90
91 int nmallocs;
92 int nfrees;
93 int nbytesmalloced;
94 int npages;
95 unsigned long gfporder;
96 };
97
98
99
100
101
102
103 struct size_descriptor sizes[] = {
104 { NULL, NULL, 32,127, 0,0,0,0, 0},
105 { NULL, NULL, 64, 63, 0,0,0,0, 0 },
106 { NULL, NULL, 128, 31, 0,0,0,0, 0 },
107 { NULL, NULL, 252, 16, 0,0,0,0, 0 },
108 { NULL, NULL, 508, 8, 0,0,0,0, 0 },
109 { NULL, NULL,1020, 4, 0,0,0,0, 0 },
110 { NULL, NULL,2040, 2, 0,0,0,0, 0 },
111 { NULL, NULL,4096-16, 1, 0,0,0,0, 0 },
112 { NULL, NULL,8192-16, 1, 0,0,0,0, 1 },
113 { NULL, NULL,16384-16, 1, 0,0,0,0, 2 },
114 { NULL, NULL,32768-16, 1, 0,0,0,0, 3 },
115 { NULL, NULL,65536-16, 1, 0,0,0,0, 4 },
116 { NULL, NULL,131072-16, 1, 0,0,0,0, 5 },
117 { NULL, NULL, 0, 0, 0,0,0,0, 0 }
118 };
119
120
121 #define NBLOCKS(order) (sizes[order].nblocks)
122 #define BLOCKSIZE(order) (sizes[order].size)
123 #define AREASIZE(order) (PAGE_SIZE<<(sizes[order].gfporder))
124
125
126 long kmalloc_init (long start_mem,long end_mem)
127 {
128 int order;
129
130
131
132
133
134 for (order = 0;BLOCKSIZE(order);order++)
135 {
136 if ((NBLOCKS (order)*BLOCKSIZE(order) + sizeof (struct page_descriptor)) >
137 AREASIZE(order))
138 {
139 printk ("Cannot use %d bytes out of %d in order = %d block mallocs\n",
140 (int) (NBLOCKS (order) * BLOCKSIZE(order) +
141 sizeof (struct page_descriptor)),
142 (int) AREASIZE(order),
143 BLOCKSIZE (order));
144 panic ("This only happens if someone messes with kmalloc");
145 }
146 }
147 return start_mem;
148 }
149
150
151
152 int get_order (int size)
153 {
154 int order;
155
156
157 size += sizeof (struct block_header);
158 for (order = 0;BLOCKSIZE(order);order++)
159 if (size <= BLOCKSIZE (order))
160 return order;
161 return -1;
162 }
163
164 void * kmalloc (size_t size, int priority)
165 {
166 unsigned long flags;
167 int order,tries,i,sz;
168 int dma_flag;
169 struct block_header *p;
170 struct page_descriptor *page;
171
172 dma_flag = (priority & GFP_DMA);
173 priority &= GFP_LEVEL_MASK;
174
175
176 if (intr_count && priority != GFP_ATOMIC) {
177 static int count = 0;
178 if (++count < 5) {
179 printk("kmalloc called nonatomically from interrupt %p\n",
180 __builtin_return_address(0));
181 priority = GFP_ATOMIC;
182 }
183 }
184
185 order = get_order (size);
186 if (order < 0)
187 {
188 printk ("kmalloc of too large a block (%d bytes).\n",(int) size);
189 return (NULL);
190 }
191
192 save_flags(flags);
193
194
195
196 tries = MAX_GET_FREE_PAGE_TRIES;
197 while (tries --)
198 {
199
200 cli ();
201 if ((page = (dma_flag ? sizes[order].dmafree : sizes[order].firstfree)) &&
202 (p = page->firstfree))
203 {
204 if (p->bh_flags == MF_FREE)
205 {
206 page->firstfree = p->bh_next;
207 page->nfree--;
208 if (!page->nfree)
209 {
210 if(dma_flag)
211 sizes[order].dmafree = page->next;
212 else
213 sizes[order].firstfree = page->next;
214 page->next = NULL;
215 }
216 restore_flags(flags);
217
218 sizes [order].nmallocs++;
219 sizes [order].nbytesmalloced += size;
220 p->bh_flags = MF_USED;
221 p->bh_length = size;
222 return p+1;
223 }
224 printk ("Problem: block on freelist at %08lx isn't free.\n",(long)p);
225 return (NULL);
226 }
227 restore_flags(flags);
228
229
230
231
232 sz = BLOCKSIZE(order);
233
234
235 if (dma_flag)
236 page = (struct page_descriptor *) __get_dma_pages (priority & GFP_LEVEL_MASK, sizes[order].gfporder);
237 else
238 page = (struct page_descriptor *) __get_free_pages (priority & GFP_LEVEL_MASK, sizes[order].gfporder);
239
240 if (!page) {
241 static unsigned long last = 0;
242 if (last + 10*HZ < jiffies) {
243 last = jiffies;
244 printk ("Couldn't get a free page.....\n");
245 }
246 return NULL;
247 }
248 #if 0
249 printk ("Got page %08x to use for %d byte mallocs....",(long)page,sz);
250 #endif
251 sizes[order].npages++;
252
253
254 for (i=NBLOCKS(order),p=BH (page+1);i > 1;i--,p=p->bh_next)
255 {
256 p->bh_flags = MF_FREE;
257 p->bh_next = BH ( ((long)p)+sz);
258 }
259
260 p->bh_flags = MF_FREE;
261 p->bh_next = NULL;
262
263 page->order = order;
264 page->nfree = NBLOCKS(order);
265 page->firstfree = BH(page+1);
266 #if 0
267 printk ("%d blocks per page\n",page->nfree);
268 #endif
269
270
271 cli ();
272
273
274
275
276 if (dma_flag) {
277 page->next = sizes[order].dmafree;
278 sizes[order].dmafree = page;
279 } else {
280 page->next = sizes[order].firstfree;
281 sizes[order].firstfree = page;
282 }
283 restore_flags(flags);
284 }
285
286
287
288 printk ("Hey. This is very funny. I tried %d times to allocate a whole\n"
289 "new page for an object only %d bytes long, but some other process\n"
290 "beat me to actually allocating it. Also note that this 'error'\n"
291 "message is soooo very long to catch your attention. I'd appreciate\n"
292 "it if you'd be so kind as to report what conditions caused this to\n"
293 "the author of this kmalloc: wolff@dutecai.et.tudelft.nl.\n"
294 "(Executive summary: This can't happen)\n",
295 MAX_GET_FREE_PAGE_TRIES,
296 (int) size);
297 return NULL;
298 }
299
300 void kfree_s (void *ptr,int size)
301 {
302 unsigned long flags;
303 int order;
304 register struct block_header *p=((struct block_header *)ptr) -1;
305 struct page_descriptor *page,*pg2;
306
307 page = PAGE_DESC (p);
308 order = page->order;
309 if ((order < 0) ||
310 (order > sizeof (sizes)/sizeof (sizes[0])) ||
311 (((long)(page->next)) & ~PAGE_MASK) ||
312 (p->bh_flags != MF_USED))
313 {
314 printk ("kfree of non-kmalloced memory: %p, next= %p, order=%d\n",
315 p, page->next, page->order);
316 return;
317 }
318 if (size &&
319 size != p->bh_length)
320 {
321 printk ("Trying to free pointer at %p with wrong size: %d instead of %lu.\n",
322 p,size,p->bh_length);
323 return;
324 }
325 size = p->bh_length;
326 p->bh_flags = MF_FREE;
327 save_flags(flags);
328 cli ();
329 p->bh_next = page->firstfree;
330 page->firstfree = p;
331 page->nfree ++;
332
333 if (page->nfree == 1)
334 {
335
336 if (page->next)
337 {
338 printk ("Page %p already on freelist dazed and confused....\n", page);
339 }
340 else
341 {
342 page->next = sizes[order].firstfree;
343 sizes[order].firstfree = page;
344 }
345 }
346
347
348 if (page->nfree == NBLOCKS (page->order))
349 {
350 #if 0
351 printk ("Freeing page %08x.\n", (long)page);
352 #endif
353 if (sizes[order].firstfree == page)
354 {
355 sizes[order].firstfree = page->next;
356 }
357 else if (sizes[order].dmafree == page)
358 {
359 sizes[order].dmafree = page->next;
360 }
361 else
362 {
363 for (pg2=sizes[order].firstfree;
364 (pg2 != NULL) && (pg2->next != page);
365 pg2=pg2->next)
366 ;
367 if (!pg2)
368 for (pg2=sizes[order].dmafree;
369 (pg2 != NULL) && (pg2->next != page);
370 pg2=pg2->next)
371 ;
372 if (pg2 != NULL)
373 pg2->next = page->next;
374 else
375 printk ("Ooops. page %p doesn't show on freelist.\n", page);
376 }
377
378 free_pages ((long)page, sizes[order].gfporder);
379 }
380 restore_flags(flags);
381
382
383
384
385
386 sizes[order].nfrees++;
387 sizes[order].nbytesmalloced -= size;
388 }