tag | line | file | source code |
order | 168 | include/linux/mm.h | extern void free_pages(unsigned long addr, unsigned long order); |
order | 73 | mm/kmalloc.c | int order; |
order | 121 | mm/kmalloc.c | #define NBLOCKS(order) (sizes[order].nblocks) |
order | 122 | mm/kmalloc.c | #define BLOCKSIZE(order) (sizes[order].size) |
order | 123 | mm/kmalloc.c | #define AREASIZE(order) (PAGE_SIZE<<(sizes[order].gfporder)) |
order | 128 | mm/kmalloc.c | int order; |
order | 134 | mm/kmalloc.c | for (order = 0;BLOCKSIZE(order);order++) |
order | 136 | mm/kmalloc.c | if ((NBLOCKS (order)*BLOCKSIZE(order) + sizeof (struct page_descriptor)) > |
order | 137 | mm/kmalloc.c | AREASIZE(order)) |
order | 140 | mm/kmalloc.c | (int) (NBLOCKS (order) * BLOCKSIZE(order) + |
order | 142 | mm/kmalloc.c | (int) AREASIZE(order), |
order | 143 | mm/kmalloc.c | BLOCKSIZE (order)); |
order | 154 | mm/kmalloc.c | int order; |
order | 158 | mm/kmalloc.c | for (order = 0;BLOCKSIZE(order);order++) |
order | 159 | mm/kmalloc.c | if (size <= BLOCKSIZE (order)) |
order | 160 | mm/kmalloc.c | return order; |
order | 167 | mm/kmalloc.c | int order,tries,i,sz; |
order | 185 | mm/kmalloc.c | order = get_order (size); |
order | 186 | mm/kmalloc.c | if (order < 0) |
order | 201 | mm/kmalloc.c | if ((page = (dma_flag ? sizes[order].dmafree : sizes[order].firstfree)) && |
order | 211 | mm/kmalloc.c | sizes[order].dmafree = page->next; |
order | 213 | mm/kmalloc.c | sizes[order].firstfree = page->next; |
order | 218 | mm/kmalloc.c | sizes [order].nmallocs++; |
order | 219 | mm/kmalloc.c | sizes [order].nbytesmalloced += size; |
order | 232 | mm/kmalloc.c | sz = BLOCKSIZE(order); /* sz is the size of the blocks we're dealing with */ |
order | 236 | mm/kmalloc.c | page = (struct page_descriptor *) __get_dma_pages (priority & GFP_LEVEL_MASK, sizes[order].gfporder); |
order | 238 | mm/kmalloc.c | page = (struct page_descriptor *) __get_free_pages (priority & GFP_LEVEL_MASK, sizes[order].gfporder); |
order | 251 | mm/kmalloc.c | sizes[order].npages++; |
order | 254 | mm/kmalloc.c | for (i=NBLOCKS(order),p=BH (page+1);i > 1;i--,p=p->bh_next) |
order | 263 | mm/kmalloc.c | page->order = order; |
order | 264 | mm/kmalloc.c | page->nfree = NBLOCKS(order); |
order | 277 | mm/kmalloc.c | page->next = sizes[order].dmafree; |
order | 278 | mm/kmalloc.c | sizes[order].dmafree = page; |
order | 280 | mm/kmalloc.c | page->next = sizes[order].firstfree; |
order | 281 | mm/kmalloc.c | sizes[order].firstfree = page; |
order | 303 | mm/kmalloc.c | int order; |
order | 308 | mm/kmalloc.c | order = page->order; |
order | 309 | mm/kmalloc.c | if ((order < 0) || |
order | 310 | mm/kmalloc.c | (order > sizeof (sizes)/sizeof (sizes[0])) || |
order | 315 | mm/kmalloc.c | p, page->next, page->order); |
order | 342 | mm/kmalloc.c | page->next = sizes[order].firstfree; |
order | 343 | mm/kmalloc.c | sizes[order].firstfree = page; |
order | 348 | mm/kmalloc.c | if (page->nfree == NBLOCKS (page->order)) |
order | 353 | mm/kmalloc.c | if (sizes[order].firstfree == page) |
order | 355 | mm/kmalloc.c | sizes[order].firstfree = page->next; |
order | 357 | mm/kmalloc.c | else if (sizes[order].dmafree == page) |
order | 359 | mm/kmalloc.c | sizes[order].dmafree = page->next; |
order | 363 | mm/kmalloc.c | for (pg2=sizes[order].firstfree; |
order | 368 | mm/kmalloc.c | for (pg2=sizes[order].dmafree; |
order | 378 | mm/kmalloc.c | free_pages ((long)page, sizes[order].gfporder); |
order | 386 | mm/kmalloc.c | sizes[order].nfrees++; /* Noncritical (monitoring) admin stuff */ |
order | 387 | mm/kmalloc.c | sizes[order].nbytesmalloced -= size; |
order | 640 | mm/swap.c | static inline void free_pages_ok(unsigned long addr, unsigned long order) |
order | 642 | mm/swap.c | unsigned long index = MAP_NR(addr) >> (1 + order); |
order | 643 | mm/swap.c | unsigned long mask = PAGE_MASK << order; |
order | 646 | mm/swap.c | nr_free_pages += 1 << order; |
order | 647 | mm/swap.c | while (order < NR_MEM_LISTS-1) { |
order | 648 | mm/swap.c | if (!change_bit(index, free_area_map[order])) |
order | 650 | mm/swap.c | remove_mem_queue(free_area_list+order, (struct mem_list *) (addr ^ (1+~mask))); |
order | 651 | mm/swap.c | order++; |
order | 656 | mm/swap.c | add_mem_queue(free_area_list+order, (struct mem_list *) addr); |
order | 674 | mm/swap.c | void free_pages(unsigned long addr, unsigned long order) |
order | 684 | mm/swap.c | free_pages_ok(addr, order); |
order | 702 | mm/swap.c | #define RMQUEUE(order) \ |
order | 703 | mm/swap.c | do { struct mem_list * queue = free_area_list+order; \ |
order | 704 | mm/swap.c | unsigned long new_order = order; \ |
order | 709 | mm/swap.c | nr_free_pages -= 1 << order; \ |
order | 711 | mm/swap.c | EXPAND(next, order, new_order); \ |
order | 717 | mm/swap.c | static inline int mark_used(unsigned long addr, unsigned long order) |
order | 719 | mm/swap.c | return change_bit(MAP_NR(addr) >> (1+order), free_area_map[order]); |
order | 733 | mm/swap.c | unsigned long __get_free_pages(int priority, unsigned long order) |
order | 753 | mm/swap.c | RMQUEUE(order); |
order | 766 | mm/swap.c | unsigned long __get_dma_pages(int priority, unsigned long order) |
order | 776 | mm/swap.c | result = __get_free_pages(priority, order); |
order | 785 | mm/swap.c | free_pages(tmp, order); |
order | 797 | mm/swap.c | unsigned long order, flags; |
order | 803 | mm/swap.c | for (order=0 ; order < NR_MEM_LISTS; order++) { |
order | 806 | mm/swap.c | for (tmp = free_area_list[order].next ; tmp != free_area_list + order ; tmp = tmp->next) { |
order | 809 | mm/swap.c | total += nr * ((PAGE_SIZE>>10) << order); |
order | 810 | mm/swap.c | printk("%lu*%lukB ", nr, (PAGE_SIZE>>10) << order); |