tag | line | file | source code |
order | 136 | include/linux/mm.h | extern void free_pages(unsigned long addr, unsigned long order); |
order | 73 | mm/kmalloc.c | int order; |
order | 120 | mm/kmalloc.c | #define NBLOCKS(order) (sizes[order].nblocks) |
order | 121 | mm/kmalloc.c | #define BLOCKSIZE(order) (sizes[order].size) |
order | 122 | mm/kmalloc.c | #define AREASIZE(order) (PAGE_SIZE<<(sizes[order].gfporder)) |
order | 127 | mm/kmalloc.c | int order; |
order | 133 | mm/kmalloc.c | for (order = 0;BLOCKSIZE(order);order++) |
order | 135 | mm/kmalloc.c | if ((NBLOCKS (order)*BLOCKSIZE(order) + sizeof (struct page_descriptor)) > |
order | 136 | mm/kmalloc.c | AREASIZE(order)) |
order | 139 | mm/kmalloc.c | NBLOCKS (order) * BLOCKSIZE(order) + |
order | 141 | mm/kmalloc.c | (int) AREASIZE(order), |
order | 142 | mm/kmalloc.c | BLOCKSIZE (order)); |
order | 153 | mm/kmalloc.c | int order; |
order | 157 | mm/kmalloc.c | for (order = 0;BLOCKSIZE(order);order++) |
order | 158 | mm/kmalloc.c | if (size <= BLOCKSIZE (order)) |
order | 159 | mm/kmalloc.c | return order; |
order | 166 | mm/kmalloc.c | int order,tries,i,sz; |
order | 180 | mm/kmalloc.c | order = get_order (size); |
order | 181 | mm/kmalloc.c | if (order < 0) |
order | 196 | mm/kmalloc.c | if ((page = sizes[order].firstfree) && |
order | 205 | mm/kmalloc.c | sizes[order].firstfree = page->next; |
order | 210 | mm/kmalloc.c | sizes [order].nmallocs++; |
order | 211 | mm/kmalloc.c | sizes [order].nbytesmalloced += size; |
order | 224 | mm/kmalloc.c | sz = BLOCKSIZE(order); /* sz is the size of the blocks we're dealing with */ |
order | 227 | mm/kmalloc.c | page = (struct page_descriptor *) __get_free_pages (priority & GFP_LEVEL_MASK, sizes[order].gfporder); |
order | 239 | mm/kmalloc.c | sizes[order].npages++; |
order | 242 | mm/kmalloc.c | for (i=NBLOCKS(order),p=BH (page+1);i > 1;i--,p=p->bh_next) |
order | 251 | mm/kmalloc.c | page->order = order; |
order | 252 | mm/kmalloc.c | page->nfree = NBLOCKS(order); |
order | 264 | mm/kmalloc.c | page->next = sizes[order].firstfree; |
order | 265 | mm/kmalloc.c | sizes[order].firstfree = page; |
order | 287 | mm/kmalloc.c | int order; |
order | 292 | mm/kmalloc.c | order = page->order; |
order | 293 | mm/kmalloc.c | if ((order < 0) || |
order | 294 | mm/kmalloc.c | (order > sizeof (sizes)/sizeof (sizes[0])) || |
order | 299 | mm/kmalloc.c | p, page->next, page->order); |
order | 325 | mm/kmalloc.c | page->next = sizes[order].firstfree; |
order | 326 | mm/kmalloc.c | sizes[order].firstfree = page; |
order | 331 | mm/kmalloc.c | if (page->nfree == NBLOCKS (page->order)) |
order | 336 | mm/kmalloc.c | if (sizes[order].firstfree == page) |
order | 338 | mm/kmalloc.c | sizes[order].firstfree = page->next; |
order | 342 | mm/kmalloc.c | for (pg2=sizes[order].firstfree; |
order | 352 | mm/kmalloc.c | free_pages ((long)page, sizes[order].gfporder); |
order | 360 | mm/kmalloc.c | sizes[order].nfrees++; /* Noncritical (monitoring) admin stuff */ |
order | 361 | mm/kmalloc.c | sizes[order].nbytesmalloced -= size; |
order | 517 | mm/swap.c | static inline void free_pages_ok(unsigned long addr, unsigned long order) |
order | 519 | mm/swap.c | unsigned long index = addr >> (PAGE_SHIFT + 1 + order); |
order | 520 | mm/swap.c | unsigned long mask = PAGE_MASK << order; |
order | 523 | mm/swap.c | nr_free_pages += 1 << order; |
order | 524 | mm/swap.c | while (order < NR_MEM_LISTS-1) { |
order | 525 | mm/swap.c | if (!change_bit(index, free_area_map[order])) |
order | 527 | mm/swap.c | remove_mem_queue(free_area_list+order, (struct mem_list *) (addr ^ (1+~mask))); |
order | 528 | mm/swap.c | order++; |
order | 533 | mm/swap.c | add_mem_queue(free_area_list+order, (struct mem_list *) addr); |
order | 536 | mm/swap.c | void free_pages(unsigned long addr, unsigned long order) |
order | 546 | mm/swap.c | free_pages_ok(addr, order); |
order | 571 | mm/swap.c | #define RMQUEUE(order) \ |
order | 572 | mm/swap.c | do { struct mem_list * queue = free_area_list+order; \ |
order | 573 | mm/swap.c | unsigned long new_order = order; \ |
order | 579 | mm/swap.c | nr_free_pages -= 1 << order; \ |
order | 581 | mm/swap.c | EXPAND(next, order, new_order); \ |
order | 587 | mm/swap.c | static inline int mark_used(unsigned long addr, unsigned long order) |
order | 589 | mm/swap.c | return change_bit(addr >> (PAGE_SHIFT+1+order), free_area_map[order]); |
order | 603 | mm/swap.c | unsigned long __get_free_pages(int priority, unsigned long order) |
order | 619 | mm/swap.c | RMQUEUE(order); |
order | 636 | mm/swap.c | unsigned long order, flags; |
order | 642 | mm/swap.c | for (order=0 ; order < NR_MEM_LISTS; order++) { |
order | 645 | mm/swap.c | for (tmp = free_area_list[order].next ; tmp != free_area_list + order ; tmp = tmp->next) { |
order | 648 | mm/swap.c | total += nr * (4 << order); |
order | 649 | mm/swap.c | printk("%lu*%ukB ", nr, 4 << order); |