| tag | line | file | source code |
| order | 159 | include/linux/mm.h | extern void free_pages(unsigned long addr, unsigned long order); |
| order | 73 | mm/kmalloc.c | int order; |
| order | 121 | mm/kmalloc.c | #define NBLOCKS(order) (sizes[order].nblocks) |
| order | 122 | mm/kmalloc.c | #define BLOCKSIZE(order) (sizes[order].size) |
| order | 123 | mm/kmalloc.c | #define AREASIZE(order) (PAGE_SIZE<<(sizes[order].gfporder)) |
| order | 128 | mm/kmalloc.c | int order; |
| order | 134 | mm/kmalloc.c | for (order = 0;BLOCKSIZE(order);order++) |
| order | 136 | mm/kmalloc.c | if ((NBLOCKS (order)*BLOCKSIZE(order) + sizeof (struct page_descriptor)) > |
| order | 137 | mm/kmalloc.c | AREASIZE(order)) |
| order | 140 | mm/kmalloc.c | (int) (NBLOCKS (order) * BLOCKSIZE(order) + |
| order | 142 | mm/kmalloc.c | (int) AREASIZE(order), |
| order | 143 | mm/kmalloc.c | BLOCKSIZE (order)); |
| order | 154 | mm/kmalloc.c | int order; |
| order | 158 | mm/kmalloc.c | for (order = 0;BLOCKSIZE(order);order++) |
| order | 159 | mm/kmalloc.c | if (size <= BLOCKSIZE (order)) |
| order | 160 | mm/kmalloc.c | return order; |
| order | 167 | mm/kmalloc.c | int order,tries,i,sz; |
| order | 185 | mm/kmalloc.c | order = get_order (size); |
| order | 186 | mm/kmalloc.c | if (order < 0) |
| order | 201 | mm/kmalloc.c | if ((page = (dma_flag ? sizes[order].dmafree : sizes[order].firstfree)) && |
| order | 210 | mm/kmalloc.c | sizes[order].firstfree = page->next; |
| order | 215 | mm/kmalloc.c | sizes [order].nmallocs++; |
| order | 216 | mm/kmalloc.c | sizes [order].nbytesmalloced += size; |
| order | 229 | mm/kmalloc.c | sz = BLOCKSIZE(order); /* sz is the size of the blocks we're dealing with */ |
| order | 233 | mm/kmalloc.c | page = (struct page_descriptor *) __get_dma_pages (priority & GFP_LEVEL_MASK, sizes[order].gfporder); |
| order | 235 | mm/kmalloc.c | page = (struct page_descriptor *) __get_free_pages (priority & GFP_LEVEL_MASK, sizes[order].gfporder); |
| order | 248 | mm/kmalloc.c | sizes[order].npages++; |
| order | 251 | mm/kmalloc.c | for (i=NBLOCKS(order),p=BH (page+1);i > 1;i--,p=p->bh_next) |
| order | 260 | mm/kmalloc.c | page->order = order; |
| order | 261 | mm/kmalloc.c | page->nfree = NBLOCKS(order); |
| order | 273 | mm/kmalloc.c | page->next = sizes[order].firstfree; |
| order | 275 | mm/kmalloc.c | sizes[order].dmafree = page; |
| order | 277 | mm/kmalloc.c | sizes[order].firstfree = page; |
| order | 298 | mm/kmalloc.c | int order; |
| order | 303 | mm/kmalloc.c | order = page->order; |
| order | 304 | mm/kmalloc.c | if ((order < 0) || |
| order | 305 | mm/kmalloc.c | (order > sizeof (sizes)/sizeof (sizes[0])) || |
| order | 310 | mm/kmalloc.c | p, page->next, page->order); |
| order | 337 | mm/kmalloc.c | page->next = sizes[order].firstfree; |
| order | 338 | mm/kmalloc.c | sizes[order].firstfree = page; |
| order | 343 | mm/kmalloc.c | if (page->nfree == NBLOCKS (page->order)) |
| order | 348 | mm/kmalloc.c | if (sizes[order].firstfree == page) |
| order | 350 | mm/kmalloc.c | sizes[order].firstfree = page->next; |
| order | 352 | mm/kmalloc.c | else if (sizes[order].dmafree == page) |
| order | 354 | mm/kmalloc.c | sizes[order].dmafree = page->next; |
| order | 358 | mm/kmalloc.c | for (pg2=sizes[order].firstfree; |
| order | 363 | mm/kmalloc.c | for (pg2=sizes[order].dmafree; |
| order | 373 | mm/kmalloc.c | free_pages ((long)page, sizes[order].gfporder); |
| order | 381 | mm/kmalloc.c | sizes[order].nfrees++; /* Noncritical (monitoring) admin stuff */ |
| order | 382 | mm/kmalloc.c | sizes[order].nbytesmalloced -= size; |
| order | 543 | mm/swap.c | static inline void free_pages_ok(unsigned long addr, unsigned long order) |
| order | 545 | mm/swap.c | unsigned long index = MAP_NR(addr) >> (1 + order); |
| order | 546 | mm/swap.c | unsigned long mask = PAGE_MASK << order; |
| order | 549 | mm/swap.c | nr_free_pages += 1 << order; |
| order | 550 | mm/swap.c | while (order < NR_MEM_LISTS-1) { |
| order | 551 | mm/swap.c | if (!change_bit(index, free_area_map[order])) |
| order | 553 | mm/swap.c | remove_mem_queue(free_area_list+order, (struct mem_list *) (addr ^ (1+~mask))); |
| order | 554 | mm/swap.c | order++; |
| order | 559 | mm/swap.c | add_mem_queue(free_area_list+order, (struct mem_list *) addr); |
| order | 577 | mm/swap.c | void free_pages(unsigned long addr, unsigned long order) |
| order | 587 | mm/swap.c | free_pages_ok(addr, order); |
| order | 605 | mm/swap.c | #define RMQUEUE(order) \ |
| order | 606 | mm/swap.c | do { struct mem_list * queue = free_area_list+order; \ |
| order | 607 | mm/swap.c | unsigned long new_order = order; \ |
| order | 612 | mm/swap.c | nr_free_pages -= 1 << order; \ |
| order | 614 | mm/swap.c | EXPAND(next, order, new_order); \ |
| order | 620 | mm/swap.c | static inline int mark_used(unsigned long addr, unsigned long order) |
| order | 622 | mm/swap.c | return change_bit(MAP_NR(addr) >> (1+order), free_area_map[order]); |
| order | 636 | mm/swap.c | unsigned long __get_free_pages(int priority, unsigned long order) |
| order | 656 | mm/swap.c | RMQUEUE(order); |
| order | 669 | mm/swap.c | unsigned long __get_dma_pages(int priority, unsigned long order) |
| order | 679 | mm/swap.c | result = __get_free_pages(priority, order); |
| order | 688 | mm/swap.c | free_pages(tmp, order); |
| order | 700 | mm/swap.c | unsigned long order, flags; |
| order | 706 | mm/swap.c | for (order=0 ; order < NR_MEM_LISTS; order++) { |
| order | 709 | mm/swap.c | for (tmp = free_area_list[order].next ; tmp != free_area_list + order ; tmp = tmp->next) { |
| order | 712 | mm/swap.c | total += nr * (4 << order); |
| order | 713 | mm/swap.c | printk("%lu*%ukB ", nr, 4 << order); |