tag | line | file | source code |
priority | 111 | drivers/net/atp.c | #define alloc_skb(size, priority) (struct sk_buff *) kmalloc(size,priority) |
priority | 58 | fs/buffer.c | static int shrink_specific_buffers(unsigned int priority, int size); |
priority | 1397 | fs/buffer.c | int shrink_buffers(unsigned int priority) |
priority | 1399 | fs/buffer.c | if (priority < 2) { |
priority | 1403 | fs/buffer.c | if(priority == 2) wakeup_bdflush(1); |
priority | 1408 | fs/buffer.c | return shrink_specific_buffers(priority, 0); |
priority | 1411 | fs/buffer.c | static int shrink_specific_buffers(unsigned int priority, int size) |
priority | 1442 | fs/buffer.c | if(priority > 3 && nlist == BUF_SHARED) continue; |
priority | 1445 | fs/buffer.c | i = nr_buffers_type[nlist] >> priority; |
priority | 1453 | fs/buffer.c | if (priority) |
priority | 335 | fs/proc/array.c | (*p)->priority, /* this is the nice value --- |
priority | 378 | include/linux/fs.h | extern int shrink_buffers(unsigned int priority); |
priority | 10 | include/linux/malloc.h | void *deb_kmalloc(const char *deb_file, unsigned short deb_line,unsigned int size, int priority); |
priority | 20 | include/linux/malloc.h | void * kmalloc(unsigned int size, int priority); |
priority | 118 | include/linux/mm.h | #define __get_free_page(priority) __get_free_pages((priority),0) |
priority | 119 | include/linux/mm.h | extern unsigned long __get_free_pages(int priority, unsigned long gfporder); |
priority | 120 | include/linux/mm.h | extern inline unsigned long get_free_page(int priority) |
priority | 124 | include/linux/mm.h | page = __get_free_page(priority); |
priority | 249 | include/linux/sched.h | long priority; |
priority | 110 | include/linux/skbuff.h | extern struct sk_buff * alloc_skb(unsigned int size, int priority); |
priority | 112 | include/linux/skbuff.h | extern struct sk_buff * skb_clone(struct sk_buff *skb, int priority); |
priority | 224 | kernel/sched.c | current->counter < current->priority*2) { |
priority | 239 | kernel/sched.c | p->counter = (p->counter >> 1) + p->priority; |
priority | 625 | kernel/sched.c | if (current->priority < 15) |
priority | 732 | kernel/sched.c | newprio = current->priority - increment; |
priority | 737 | kernel/sched.c | current->priority = newprio; |
priority | 78 | kernel/sys.c | int priority; |
priority | 83 | kernel/sys.c | if ((priority = PZERO - niceval) <= 0) |
priority | 84 | kernel/sys.c | priority = 1; |
priority | 96 | kernel/sys.c | if (priority > (*p)->priority && !suser()) |
priority | 99 | kernel/sys.c | (*p)->priority = priority; |
priority | 115 | kernel/sys.c | if ((*p)->priority > max_prio) |
priority | 116 | kernel/sys.c | max_prio = (*p)->priority; |
priority | 163 | mm/kmalloc.c | void * kmalloc (size_t size, int priority) |
priority | 171 | mm/kmalloc.c | if (intr_count && priority != GFP_ATOMIC) { |
priority | 176 | mm/kmalloc.c | priority = GFP_ATOMIC; |
priority | 227 | mm/kmalloc.c | page = (struct page_descriptor *) __get_free_pages (priority & GFP_LEVEL_MASK, sizes[order].gfporder); |
priority | 293 | mm/swap.c | static int swap_out(unsigned int priority) |
priority | 300 | mm/swap.c | int counter = NR_TASKS * 2 >> priority; |
priority | 303 | mm/swap.c | counter = NR_TASKS * 2 >> priority; |
priority | 404 | mm/swap.c | static int swap_out(unsigned int priority) |
priority | 413 | mm/swap.c | counter >>= priority; |
priority | 462 | mm/swap.c | static int try_to_free_page(int priority) |
priority | 467 | mm/swap.c | if (priority != GFP_NOBUFFER && shrink_buffers(i)) |
priority | 592 | mm/swap.c | unsigned long __get_free_pages(int priority, unsigned long order) |
priority | 596 | mm/swap.c | if (intr_count && priority != GFP_ATOMIC) { |
priority | 600 | mm/swap.c | ((unsigned long *)&priority)[-1]); |
priority | 601 | mm/swap.c | priority = GFP_ATOMIC; |
priority | 607 | mm/swap.c | if ((priority==GFP_ATOMIC) || nr_free_pages > MAX_SECONDARY_PAGES) { |
priority | 613 | mm/swap.c | if (priority != GFP_BUFFER && try_to_free_page(priority)) |
priority | 567 | net/inet/af_inet.c | sk->priority = 1; |
priority | 417 | net/inet/arp.c | dev_queue_xmit(skb,skb->dev,skb->sk->priority); |
priority | 1790 | net/inet/ip.c | dev_queue_xmit(skb, dev, sk->priority); |
priority | 1875 | net/inet/ip.c | dev_queue_xmit(skb, dev, sk->priority); |
priority | 214 | net/inet/packet.c | dev_queue_xmit(skb, dev, sk->priority); |
priority | 447 | net/inet/skbuff.c | struct sk_buff *alloc_skb(unsigned int size,int priority) |
priority | 451 | net/inet/skbuff.c | if (intr_count && priority!=GFP_ATOMIC) { |
priority | 456 | net/inet/skbuff.c | priority = GFP_ATOMIC; |
priority | 461 | net/inet/skbuff.c | skb=(struct sk_buff *)kmalloc(size,priority); |
priority | 527 | net/inet/skbuff.c | struct sk_buff *skb_clone(struct sk_buff *skb, int priority) |
priority | 532 | net/inet/skbuff.c | n=alloc_skb(skb->mem_len-sizeof(struct sk_buff),priority); |
priority | 202 | net/inet/sock.c | sk->priority = val; |
priority | 290 | net/inet/sock.c | val = sk->priority; |
priority | 312 | net/inet/sock.c | int priority) |
priority | 316 | net/inet/sock.c | struct sk_buff * c = alloc_skb(size, priority); |
priority | 326 | net/inet/sock.c | return(alloc_skb(size, priority)); |
priority | 331 | net/inet/sock.c | sock_rmalloc(struct sock *sk, unsigned long size, int force, int priority) |
priority | 335 | net/inet/sock.c | struct sk_buff *c = alloc_skb(size, priority); |
priority | 345 | net/inet/sock.c | return(alloc_skb(size, priority)); |
priority | 130 | net/inet/sock.h | unsigned char priority; |
priority | 179 | net/inet/sock.h | int priority); |
priority | 182 | net/inet/sock.h | int priority); |
priority | 261 | net/inet/sock.h | int priority); |
priority | 264 | net/inet/sock.h | int priority); |