This source file includes following definitions.
- kmalloc_init
- get_order
- kmalloc
- kfree_s
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15 #include <linux/mm.h>
16 #include <asm/system.h>
17 #include <linux/delay.h>
18
19 #define GFP_LEVEL_MASK 0xf
20
21
22
23
24
25 #define MAX_KMALLOC_K ((PAGE_SIZE<<(NUM_AREA_ORDERS-1))>>10)
26
27
28
29
30 #define MAX_GET_FREE_PAGE_TRIES 4
31
32
33
34
35 #define MF_USED 0xffaa0055
36 #define MF_FREE 0x0055ffaa
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53 struct block_header {
54 unsigned long bh_flags;
55 union {
56 unsigned long ubh_length;
57 struct block_header *fbh_next;
58 } vp;
59 };
60
61
62 #define bh_length vp.ubh_length
63 #define bh_next vp.fbh_next
64 #define BH(p) ((struct block_header *)(p))
65
66
67
68
69
70 struct page_descriptor {
71 struct page_descriptor *next;
72 struct block_header *firstfree;
73 int order;
74 int nfree;
75 };
76
77
78 #define PAGE_DESC(p) ((struct page_descriptor *)(((unsigned long)(p)) & PAGE_MASK))
79
80
81
82
83
84
85 struct size_descriptor {
86 struct page_descriptor *firstfree;
87 struct page_descriptor *dmafree;
88 int size;
89 int nblocks;
90
91 int nmallocs;
92 int nfrees;
93 int nbytesmalloced;
94 int npages;
95 unsigned long gfporder;
96 };
97
98
99
100
101
102
103 struct size_descriptor sizes[] = {
104 { NULL, NULL, 32,127, 0,0,0,0, 0},
105 { NULL, NULL, 64, 63, 0,0,0,0, 0 },
106 { NULL, NULL, 128, 31, 0,0,0,0, 0 },
107 { NULL, NULL, 252, 16, 0,0,0,0, 0 },
108 { NULL, NULL, 508, 8, 0,0,0,0, 0 },
109 { NULL, NULL,1020, 4, 0,0,0,0, 0 },
110 { NULL, NULL,2040, 2, 0,0,0,0, 0 },
111 { NULL, NULL,4096-16, 1, 0,0,0,0, 0 },
112 { NULL, NULL,8192-16, 1, 0,0,0,0, 1 },
113 { NULL, NULL,16384-16, 1, 0,0,0,0, 2 },
114 { NULL, NULL,32768-16, 1, 0,0,0,0, 3 },
115 { NULL, NULL,65536-16, 1, 0,0,0,0, 4 },
116 { NULL, NULL,131072-16, 1, 0,0,0,0, 5 },
117 { NULL, NULL, 0, 0, 0,0,0,0, 0 }
118 };
119
120
121 #define NBLOCKS(order) (sizes[order].nblocks)
122 #define BLOCKSIZE(order) (sizes[order].size)
123 #define AREASIZE(order) (PAGE_SIZE<<(sizes[order].gfporder))
124
125
126 long kmalloc_init (long start_mem,long end_mem)
127 {
128 int order;
129
130
131
132
133
134 for (order = 0;BLOCKSIZE(order);order++)
135 {
136 if ((NBLOCKS (order)*BLOCKSIZE(order) + sizeof (struct page_descriptor)) >
137 AREASIZE(order))
138 {
139 printk ("Cannot use %d bytes out of %d in order = %d block mallocs\n",
140 NBLOCKS (order) * BLOCKSIZE(order) +
141 sizeof (struct page_descriptor),
142 (int) AREASIZE(order),
143 BLOCKSIZE (order));
144 panic ("This only happens if someone messes with kmalloc");
145 }
146 }
147 return start_mem;
148 }
149
150
151
152 int get_order (int size)
153 {
154 int order;
155
156
157 size += sizeof (struct block_header);
158 for (order = 0;BLOCKSIZE(order);order++)
159 if (size <= BLOCKSIZE (order))
160 return order;
161 return -1;
162 }
163
164 void * kmalloc (size_t size, int priority)
165 {
166 unsigned long flags;
167 int order,tries,i,sz;
168 int dma_flag;
169 struct block_header *p;
170 struct page_descriptor *page;
171
172 dma_flag = (priority & GFP_DMA);
173 priority &= GFP_LEVEL_MASK;
174
175
176 if (intr_count && priority != GFP_ATOMIC) {
177 static int count = 0;
178 if (++count < 5) {
179 printk("kmalloc called nonatomically from interrupt %p\n",
180 __builtin_return_address(0));
181 priority = GFP_ATOMIC;
182 }
183 }
184
185 order = get_order (size);
186 if (order < 0)
187 {
188 printk ("kmalloc of too large a block (%d bytes).\n",size);
189 return (NULL);
190 }
191
192 save_flags(flags);
193
194
195
196 tries = MAX_GET_FREE_PAGE_TRIES;
197 while (tries --)
198 {
199
200 cli ();
201 if ((page = (dma_flag ? sizes[order].dmafree : sizes[order].firstfree)) &&
202 (p = page->firstfree))
203 {
204 if (p->bh_flags == MF_FREE)
205 {
206 page->firstfree = p->bh_next;
207 page->nfree--;
208 if (!page->nfree)
209 {
210 sizes[order].firstfree = page->next;
211 page->next = NULL;
212 }
213 restore_flags(flags);
214
215 sizes [order].nmallocs++;
216 sizes [order].nbytesmalloced += size;
217 p->bh_flags = MF_USED;
218 p->bh_length = size;
219 return p+1;
220 }
221 printk ("Problem: block on freelist at %08lx isn't free.\n",(long)p);
222 return (NULL);
223 }
224 restore_flags(flags);
225
226
227
228
229 sz = BLOCKSIZE(order);
230
231
232 if (dma_flag)
233 page = (struct page_descriptor *) __get_dma_pages (priority & GFP_LEVEL_MASK, sizes[order].gfporder);
234 else
235 page = (struct page_descriptor *) __get_free_pages (priority & GFP_LEVEL_MASK, sizes[order].gfporder);
236
237 if (!page) {
238 static unsigned long last = 0;
239 if (last + 10*HZ < jiffies) {
240 last = jiffies;
241 printk ("Couldn't get a free page.....\n");
242 }
243 return NULL;
244 }
245 #if 0
246 printk ("Got page %08x to use for %d byte mallocs....",(long)page,sz);
247 #endif
248 sizes[order].npages++;
249
250
251 for (i=NBLOCKS(order),p=BH (page+1);i > 1;i--,p=p->bh_next)
252 {
253 p->bh_flags = MF_FREE;
254 p->bh_next = BH ( ((long)p)+sz);
255 }
256
257 p->bh_flags = MF_FREE;
258 p->bh_next = NULL;
259
260 page->order = order;
261 page->nfree = NBLOCKS(order);
262 page->firstfree = BH(page+1);
263 #if 0
264 printk ("%d blocks per page\n",page->nfree);
265 #endif
266
267
268 cli ();
269
270
271
272
273 page->next = sizes[order].firstfree;
274 if (dma_flag)
275 sizes[order].dmafree = page;
276 else
277 sizes[order].firstfree = page;
278 restore_flags(flags);
279 }
280
281
282
283 printk ("Hey. This is very funny. I tried %d times to allocate a whole\n"
284 "new page for an object only %d bytes long, but some other process\n"
285 "beat me to actually allocating it. Also note that this 'error'\n"
286 "message is soooo very long to catch your attention. I'd appreciate\n"
287 "it if you'd be so kind as to report what conditions caused this to\n"
288 "the author of this kmalloc: wolff@dutecai.et.tudelft.nl.\n"
289 "(Executive summary: This can't happen)\n",
290 MAX_GET_FREE_PAGE_TRIES,
291 size);
292 return NULL;
293 }
294
295 void kfree_s (void *ptr,int size)
296 {
297 unsigned long flags;
298 int order;
299 register struct block_header *p=((struct block_header *)ptr) -1;
300 struct page_descriptor *page,*pg2;
301
302 page = PAGE_DESC (p);
303 order = page->order;
304 if ((order < 0) ||
305 (order > sizeof (sizes)/sizeof (sizes[0])) ||
306 (((long)(page->next)) & ~PAGE_MASK) ||
307 (p->bh_flags != MF_USED))
308 {
309 printk ("kfree of non-kmalloced memory: %p, next= %p, order=%d\n",
310 p, page->next, page->order);
311 return;
312 }
313 if (size &&
314 size != p->bh_length)
315 {
316 printk ("Trying to free pointer at %p with wrong size: %d instead of %lu.\n",
317 p,size,p->bh_length);
318 return;
319 }
320 size = p->bh_length;
321 p->bh_flags = MF_FREE;
322 save_flags(flags);
323 cli ();
324 p->bh_next = page->firstfree;
325 page->firstfree = p;
326 page->nfree ++;
327
328 if (page->nfree == 1)
329 {
330
331 if (page->next)
332 {
333 printk ("Page %p already on freelist dazed and confused....\n", page);
334 }
335 else
336 {
337 page->next = sizes[order].firstfree;
338 sizes[order].firstfree = page;
339 }
340 }
341
342
343 if (page->nfree == NBLOCKS (page->order))
344 {
345 #if 0
346 printk ("Freeing page %08x.\n", (long)page);
347 #endif
348 if (sizes[order].firstfree == page)
349 {
350 sizes[order].firstfree = page->next;
351 }
352 else if (sizes[order].dmafree == page)
353 {
354 sizes[order].dmafree = page->next;
355 }
356 else
357 {
358 for (pg2=sizes[order].firstfree;
359 (pg2 != NULL) && (pg2->next != page);
360 pg2=pg2->next)
361 ;
362 if (!pg2)
363 for (pg2=sizes[order].dmafree;
364 (pg2 != NULL) && (pg2->next != page);
365 pg2=pg2->next)
366 ;
367 if (pg2 != NULL)
368 pg2->next = page->next;
369 else
370 printk ("Ooops. page %p doesn't show on freelist.\n", page);
371 }
372
373 free_pages ((long)page, sizes[order].gfporder);
374 }
375 restore_flags(flags);
376
377
378
379
380
381 sizes[order].nfrees++;
382 sizes[order].nbytesmalloced -= size;
383 }