taglinefilesource code
order176drivers/block/floppy.cint order;
order180drivers/block/floppy.c: "=r" (order)
order183drivers/block/floppy.cfor (order = 0; order < NR_MEM_LISTS; ++order)
order184drivers/block/floppy.cif (size <= (PAGE_SIZE << order))
order185drivers/block/floppy.creturn order;
order192drivers/block/floppy.cint order = __get_order(size);
order194drivers/block/floppy.cif (order >= NR_MEM_LISTS)
order196drivers/block/floppy.creturn __get_dma_pages(GFP_KERNEL,order);
order302drivers/pci/pci.clong order;
order304drivers/pci/pci.corder = dev_info[i].vendor - (long) vendor;
order305drivers/pci/pci.cif (!order)
order306drivers/pci/pci.corder = dev_info[i].device - (long) dev;
order308drivers/pci/pci.cif (order < 0)
order316drivers/pci/pci.cif (order > 0)
order2249drivers/scsi/scsi.cint order, a_size;
order2250drivers/scsi/scsi.cfor (order = 0, a_size = PAGE_SIZE;
order2251drivers/scsi/scsi.ca_size < size; order++, a_size <<= 1)
order2254drivers/scsi/scsi.corder);
order2272drivers/scsi/scsi.cint order, a_size;
order2274drivers/scsi/scsi.cfor (order = 0, a_size = PAGE_SIZE;
order2275drivers/scsi/scsi.ca_size < size; order++, a_size <<= 1)
order2277drivers/scsi/scsi.cfree_pages((unsigned long)ptr, order);
order282include/asm-mips/dma.h#define __get_dma_pages(priority, order) __get_free_pages((priority),(order), \
order162include/linux/mm.h#define __get_dma_pages(priority, order) __get_free_pages((priority),(order),MAX_DMA_ADDRESS)
order178include/linux/mm.hextern void free_pages(unsigned long addr, unsigned long order);
order64mm/kmalloc.cint order;
order133mm/kmalloc.c#define NBLOCKS(order)          (sizes[order].nblocks)
order134mm/kmalloc.c#define BLOCKSIZE(order)        (sizes[order].size)
order135mm/kmalloc.c#define AREASIZE(order)    (PAGE_SIZE<<(sizes[order].gfporder))
order140mm/kmalloc.cint order;
order146mm/kmalloc.cfor (order = 0; BLOCKSIZE(order); order++) {
order147mm/kmalloc.cif ((NBLOCKS(order) * BLOCKSIZE(order) + sizeof(struct page_descriptor)) >
order148mm/kmalloc.cAREASIZE(order)) {
order150mm/kmalloc.c(int) (NBLOCKS(order) * BLOCKSIZE(order) +
order152mm/kmalloc.c(int) AREASIZE(order),
order153mm/kmalloc.cBLOCKSIZE(order));
order164mm/kmalloc.cint order;
order168mm/kmalloc.cfor (order = 0; BLOCKSIZE(order); order++)
order169mm/kmalloc.cif (size <= BLOCKSIZE(order))
order170mm/kmalloc.creturn order;
order178mm/kmalloc.cint order, i, sz;
order182mm/kmalloc.corder = get_order(size);
order183mm/kmalloc.cif (order < 0) {
order190mm/kmalloc.cpg = &sizes[order].firstfree;
order194mm/kmalloc.cpg = &sizes[order].dmafree;
order227mm/kmalloc.csz = BLOCKSIZE(order);
order230mm/kmalloc.csizes[order].gfporder, max_addr);
order240mm/kmalloc.csizes[order].npages++;
order243mm/kmalloc.cfor (i = NBLOCKS(order), p = BH(page + 1); i > 1; i--, p = p->bh_next) {
order251mm/kmalloc.cpage->order = order;
order252mm/kmalloc.cpage->nfree = NBLOCKS(order);
order269mm/kmalloc.csizes[order].nmallocs++;
order270mm/kmalloc.csizes[order].nbytesmalloced += size;
order280mm/kmalloc.cint order;
order288mm/kmalloc.corder = page->order;
order289mm/kmalloc.cpg = &sizes[order].firstfree;
order292mm/kmalloc.cpg = &sizes[order].dmafree;
order295mm/kmalloc.cif ((order < 0) ||
order296mm/kmalloc.c(order >= sizeof(sizes) / sizeof(sizes[0])) ||
order300mm/kmalloc.cp, page->next, page->order);
order317mm/kmalloc.cif (page->nfree == NBLOCKS(order)) {
order330mm/kmalloc.csizes[order].npages--;
order331mm/kmalloc.cfree_pages((long) page, sizes[order].gfporder);
order333mm/kmalloc.csizes[order].nfrees++;
order334mm/kmalloc.csizes[order].nbytesmalloced -= size;
order58mm/page_alloc.cstatic inline void free_pages_ok(unsigned long addr, unsigned long order)
order60mm/page_alloc.cunsigned long index = MAP_NR(addr) >> (1 + order);
order61mm/page_alloc.cunsigned long mask = PAGE_MASK << order;
order64mm/page_alloc.cnr_free_pages += 1 << order;
order65mm/page_alloc.cwhile (order < NR_MEM_LISTS-1) {
order66mm/page_alloc.cif (!change_bit(index, free_area_map[order]))
order68mm/page_alloc.cremove_mem_queue(free_area_list+order, (struct mem_list *) (addr ^ (1+~mask)));
order69mm/page_alloc.corder++;
order74mm/page_alloc.cadd_mem_queue(free_area_list+order, (struct mem_list *) addr);
order93mm/page_alloc.cvoid free_pages(unsigned long addr, unsigned long order)
order104mm/page_alloc.cfree_pages_ok(addr, order);
order121mm/page_alloc.c#define RMQUEUE(order, limit) \
order122mm/page_alloc.cdo { struct mem_list * queue = free_area_list+order; \
order123mm/page_alloc.cunsigned long new_order = order; \
order129mm/page_alloc.cnr_free_pages -= 1 << order; \
order131mm/page_alloc.cEXPAND(ret, order, new_order); \
order140mm/page_alloc.cstatic inline int mark_used(unsigned long addr, unsigned long order)
order142mm/page_alloc.creturn change_bit(MAP_NR(addr) >> (1+order), free_area_map[order]);
order157mm/page_alloc.cunsigned long __get_free_pages(int priority, unsigned long order, unsigned long limit)
order162mm/page_alloc.cif (order >= NR_MEM_LISTS)
order179mm/page_alloc.cRMQUEUE(order, limit);
order196mm/page_alloc.cunsigned long order, flags;
order202mm/page_alloc.cfor (order=0 ; order < NR_MEM_LISTS; order++) {
order205mm/page_alloc.cfor (tmp = free_area_list[order].next ; tmp != free_area_list + order ; tmp = tmp->next) {
order208mm/page_alloc.ctotal += nr * ((PAGE_SIZE>>10) << order);
order209mm/page_alloc.cprintk("%lu*%lukB ", nr, (PAGE_SIZE>>10) << order);