tag | line | file | source code |
skb2 | 71 | drivers/net/loopback.c | struct sk_buff *skb2=skb; |
skb2 | 75 | drivers/net/loopback.c | dev_kfree_skb(skb2, FREE_READ); |
skb2 | 1776 | net/appletalk/ddp.c | struct sk_buff *skb2=skb_clone(skb, GFP_KERNEL); |
skb2 | 1777 | net/appletalk/ddp.c | if(skb2) |
skb2 | 1782 | net/appletalk/ddp.c | if(aarp_send_ddp(dev,skb2,&usat->sat_addr, NULL)==-1) |
skb2 | 1783 | net/appletalk/ddp.c | kfree_skb(skb2, FREE_WRITE); |
skb2 | 425 | net/core/dev.c | struct sk_buff *skb2; |
skb2 | 426 | net/core/dev.c | if ((skb2 = skb_clone(skb, GFP_ATOMIC)) == NULL) |
skb2 | 428 | net/core/dev.c | skb2->h.raw = skb2->data + dev->hard_header_len; |
skb2 | 429 | net/core/dev.c | skb2->mac.raw = skb2->data; |
skb2 | 430 | net/core/dev.c | ptype->func(skb2, skb->dev, ptype); |
skb2 | 612 | net/core/dev.c | struct sk_buff *skb2=skb_clone(skb, GFP_ATOMIC); |
skb2 | 613 | net/core/dev.c | if(skb2) |
skb2 | 614 | net/core/dev.c | pt_prev->func(skb2,skb->dev, pt_prev); |
skb2 | 629 | net/core/dev.c | struct sk_buff *skb2; |
skb2 | 631 | net/core/dev.c | skb2=skb_clone(skb, GFP_ATOMIC); |
skb2 | 638 | net/core/dev.c | if(skb2) |
skb2 | 639 | net/core/dev.c | pt_prev->func(skb2, skb->dev, pt_prev); |
skb2 | 112 | net/core/skbuff.c | struct sk_buff *skb2 = skb->next; |
skb2 | 114 | net/core/skbuff.c | while (skb2 != skb && i < 5) { |
skb2 | 115 | net/core/skbuff.c | if (skb_check(skb2, 0, line, file) < 0) { |
skb2 | 120 | net/core/skbuff.c | skb2 = skb2->next; |
skb2 | 351 | net/ipv4/af_inet.c | struct sk_buff *skb2; |
skb2 | 363 | net/ipv4/af_inet.c | skb2 = skb->link3; |
skb2 | 365 | net/ipv4/af_inet.c | skb = skb2; |
skb2 | 91 | net/ipv4/ip_forward.c | struct sk_buff *skb2; /* Output packet */ |
skb2 | 260 | net/ipv4/ip_forward.c | skb2 = alloc_skb(dev2->hard_header_len + skb->len + encap + 15, GFP_ATOMIC); |
skb2 | 264 | net/ipv4/ip_forward.c | skb2 = alloc_skb(dev2->hard_header_len + skb->len + 15, GFP_ATOMIC); |
skb2 | 271 | net/ipv4/ip_forward.c | if (skb2 == NULL) |
skb2 | 279 | net/ipv4/ip_forward.c | IS_SKB(skb2); |
skb2 | 283 | net/ipv4/ip_forward.c | skb2->protocol=htons(ETH_P_IP); |
skb2 | 288 | net/ipv4/ip_forward.c | ip_encap(skb2,skb->len, dev2, raddr); |
skb2 | 292 | net/ipv4/ip_forward.c | ip_send(rt,skb2,raddr,skb->len,dev2,dev2->pa_addr); |
skb2 | 299 | net/ipv4/ip_forward.c | ptr = skb_put(skb2,skb->len); |
skb2 | 300 | net/ipv4/ip_forward.c | skb2->free = 1; |
skb2 | 301 | net/ipv4/ip_forward.c | skb2->h.raw = ptr; |
skb2 | 307 | net/ipv4/ip_forward.c | memcpy(skb2->proto_priv, skb->proto_priv, sizeof(skb->proto_priv)); |
skb2 | 308 | net/ipv4/ip_forward.c | iph = skb2->ip_hdr = skb2->h.iph; |
skb2 | 316 | net/ipv4/ip_forward.c | skb2 = skb; |
skb2 | 317 | net/ipv4/ip_forward.c | skb2->dev=dev2; |
skb2 | 347 | net/ipv4/ip_forward.c | if((fw_res = call_out_firewall(PF_INET, skb2, iph)) < FW_ACCEPT) |
skb2 | 352 | net/ipv4/ip_forward.c | icmp_send(skb2, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0, dev); |
skb2 | 353 | net/ipv4/ip_forward.c | if (skb != skb2) |
skb2 | 354 | net/ipv4/ip_forward.c | kfree_skb(skb2,FREE_WRITE); |
skb2 | 418 | net/ipv4/ip_forward.c | if(skb2->len > dev2->mtu + dev2->hard_header_len) |
skb2 | 420 | net/ipv4/ip_forward.c | ip_fragment(NULL,skb2,dev2, is_frag); |
skb2 | 421 | net/ipv4/ip_forward.c | kfree_skb(skb2,FREE_WRITE); |
skb2 | 439 | net/ipv4/ip_forward.c | dev_queue_xmit(skb2, dev2, SOPRI_INTERACTIVE); |
skb2 | 441 | net/ipv4/ip_forward.c | dev_queue_xmit(skb2, dev2, SOPRI_BACKGROUND); |
skb2 | 443 | net/ipv4/ip_forward.c | dev_queue_xmit(skb2, dev2, SOPRI_NORMAL); |
skb2 | 459 | net/ipv4/ip_forward.c | if(skb==skb2) |
skb2 | 402 | net/ipv4/ip_fragment.c | struct sk_buff *skb2; |
skb2 | 587 | net/ipv4/ip_fragment.c | skb2 = ip_glue(qp); /* glue together the fragments */ |
skb2 | 588 | net/ipv4/ip_fragment.c | return(skb2); |
skb2 | 611 | net/ipv4/ip_fragment.c | struct sk_buff *skb2; |
skb2 | 698 | net/ipv4/ip_fragment.c | if ((skb2 = alloc_skb(len + hlen+15,GFP_ATOMIC)) == NULL) |
skb2 | 709 | net/ipv4/ip_fragment.c | skb2->arp = skb->arp; |
skb2 | 712 | net/ipv4/ip_fragment.c | skb2->free = 1; |
skb2 | 713 | net/ipv4/ip_fragment.c | skb_put(skb2,len + hlen); |
skb2 | 714 | net/ipv4/ip_fragment.c | skb2->h.raw=(char *) skb2->data; |
skb2 | 722 | net/ipv4/ip_fragment.c | atomic_add(skb2->truesize, &sk->wmem_alloc); |
skb2 | 723 | net/ipv4/ip_fragment.c | skb2->sk=sk; |
skb2 | 725 | net/ipv4/ip_fragment.c | skb2->raddr = skb->raddr; /* For rebuild_header - must be here */ |
skb2 | 731 | net/ipv4/ip_fragment.c | memcpy(skb2->h.raw, raw, hlen); |
skb2 | 736 | net/ipv4/ip_fragment.c | memcpy(skb2->h.raw + hlen, ptr, len); |
skb2 | 739 | net/ipv4/ip_fragment.c | skb2->h.raw+=dev->hard_header_len; |
skb2 | 744 | net/ipv4/ip_fragment.c | iph = (struct iphdr *)(skb2->h.raw/*+dev->hard_header_len*/); |
skb2 | 746 | net/ipv4/ip_fragment.c | skb2->ip_hdr = iph; |
skb2 | 772 | net/ipv4/ip_fragment.c | ip_queue_xmit(sk, dev, skb2, 2); |
skb2 | 620 | net/ipv4/ip_fw.c | struct sk_buff *skb2; |
skb2 | 759 | net/ipv4/ip_fw.c | skb2 = alloc_skb(MAX_HEADER + skb->len+diff, GFP_ATOMIC); |
skb2 | 760 | net/ipv4/ip_fw.c | if (skb2 == NULL) { |
skb2 | 764 | net/ipv4/ip_fw.c | skb2->free = skb->free; |
skb2 | 765 | net/ipv4/ip_fw.c | skb_reserve(skb2,MAX_HEADER); |
skb2 | 766 | net/ipv4/ip_fw.c | skb_put(skb2,skb->len + diff); |
skb2 | 767 | net/ipv4/ip_fw.c | skb2->h.raw = skb2->data + (skb->h.raw - skb->data); |
skb2 | 768 | net/ipv4/ip_fw.c | iph=skb2->h.iph; |
skb2 | 780 | net/ipv4/ip_fw.c | memcpy(skb2->data, skb->data, (p - (char *)skb->data)); |
skb2 | 781 | net/ipv4/ip_fw.c | memcpy(&skb2->data[(p - (char *)skb->data)], buf, strlen(buf)); |
skb2 | 782 | net/ipv4/ip_fw.c | memcpy(&skb2->data[(p - (char *)skb->data) + strlen(buf)], data, |
skb2 | 797 | net/ipv4/ip_fw.c | return skb2; |
skb2 | 515 | net/ipv4/ip_input.c | struct sk_buff *skb2; |
skb2 | 532 | net/ipv4/ip_input.c | skb2 = skb_clone(skb, GFP_ATOMIC); |
skb2 | 533 | net/ipv4/ip_input.c | if(skb2==NULL) |
skb2 | 538 | net/ipv4/ip_input.c | skb2 = skb; |
skb2 | 548 | net/ipv4/ip_input.c | ipprot->handler(skb2, dev, opt, daddr, |
skb2 | 574 | net/ipv4/ip_input.c | struct sk_buff *skb2=skb_clone(skb, GFP_ATOMIC); |
skb2 | 575 | net/ipv4/ip_input.c | if(skb2) |
skb2 | 577 | net/ipv4/ip_input.c | skb2->free=1; |
skb2 | 578 | net/ipv4/ip_input.c | ipmr_forward(skb2, is_frag); |
skb2 | 730 | net/ipv4/ipmr.c | struct sk_buff *skb2; |
skb2 | 769 | net/ipv4/ipmr.c | skb2=skb_copy(skb, GFP_ATOMIC); |
skb2 | 770 | net/ipv4/ipmr.c | if(skb2) |
skb2 | 772 | net/ipv4/ipmr.c | skb2->free=1; |
skb2 | 773 | net/ipv4/ipmr.c | ipmr_queue_xmit(skb2, &vif_table[psend], skb->dev, is_frag); |
skb2 | 535 | net/ipv4/tcp_input.c | struct sk_buff *skb2; |
skb2 | 538 | net/ipv4/tcp_input.c | skb2 = sk->send_head; |
skb2 | 547 | net/ipv4/tcp_input.c | while (skb2 != NULL) |
skb2 | 549 | net/ipv4/tcp_input.c | skb = skb2; |
skb2 | 550 | net/ipv4/tcp_input.c | skb2 = skb->link3; |
skb2 | 439 | net/ipx/af_ipx.c | struct sk_buff *skb1 = NULL, *skb2 = NULL; |
skb2 | 512 | net/ipx/af_ipx.c | skb2 = skb_clone(skb1, GFP_ATOMIC); |
skb2 | 513 | net/ipx/af_ipx.c | if (skb2 != NULL) |
skb2 | 514 | net/ipx/af_ipx.c | skb2->arp = skb2->free = 1; |
skb2 | 517 | net/ipx/af_ipx.c | skb2 = skb1; |
skb2 | 522 | net/ipx/af_ipx.c | if (skb2 == NULL) |
skb2 | 526 | net/ipx/af_ipx.c | (void) ipxitf_def_skb_handler(sock2, skb2); |
skb2 | 535 | net/ipx/af_ipx.c | struct sk_buff *skb2; |
skb2 | 548 | net/ipx/af_ipx.c | skb2 = alloc_skb(len, GFP_ATOMIC); |
skb2 | 549 | net/ipx/af_ipx.c | if (skb2 != NULL) { |
skb2 | 550 | net/ipx/af_ipx.c | skb_reserve(skb2,out_offset); |
skb2 | 551 | net/ipx/af_ipx.c | skb2->h.raw=skb_put(skb2,skb->len); |
skb2 | 552 | net/ipx/af_ipx.c | skb2->free=1; |
skb2 | 553 | net/ipx/af_ipx.c | skb2->arp=1; |
skb2 | 554 | net/ipx/af_ipx.c | memcpy(skb2->h.raw, skb->h.raw, skb->len); |
skb2 | 557 | net/ipx/af_ipx.c | return skb2; |