tag | line | file | source code |
skb | 1594 | drivers/isdn/isdn_common.c | void isdn_receive_skb_callback(int drvidx, int chan, struct sk_buff *skb) |
skb | 1602 | drivers/isdn/isdn_common.c | if (isdn_net_rcv_skb(i, skb) == 0) { |
skb | 1603 | drivers/isdn/isdn_common.c | isdn_receive_callback(drvidx, chan, skb->data, skb->len); |
skb | 1604 | drivers/isdn/isdn_common.c | skb->free = 1; |
skb | 1605 | drivers/isdn/isdn_common.c | kfree_skb(skb, FREE_READ); |
skb | 1616 | drivers/isdn/isdn_common.c | struct sk_buff * skb; |
skb | 1618 | drivers/isdn/isdn_common.c | skb = alloc_skb(dev->drv[drvidx]->interface->hl_hdrlen + len, GFP_ATOMIC); |
skb | 1619 | drivers/isdn/isdn_common.c | if (skb == NULL) |
skb | 1622 | drivers/isdn/isdn_common.c | skb_reserve(skb, dev->drv[drvidx]->interface->hl_hdrlen); |
skb | 1623 | drivers/isdn/isdn_common.c | skb->free = 1; |
skb | 1626 | drivers/isdn/isdn_common.c | memcpy_fromfs(skb_put(skb, len), buf, len); |
skb | 1628 | drivers/isdn/isdn_common.c | memcpy(skb_put(skb, len), buf, len); |
skb | 1630 | drivers/isdn/isdn_common.c | return dev->drv[drvidx]->interface->writebuf_skb(drvidx, chan, skb); |
skb | 632 | drivers/isdn/isdn_net.c | struct sk_buff *skb) |
skb | 636 | drivers/isdn/isdn_net.c | lp->transcount += skb->len; |
skb | 639 | drivers/isdn/isdn_net.c | writebuf_skb(lp->isdn_device, lp->isdn_channel, skb); |
skb | 641 | drivers/isdn/isdn_net.c | if ((ret = isdn_net_send(skb->data, lp->isdn_device, |
skb | 642 | drivers/isdn/isdn_net.c | lp->isdn_channel, skb->len))) |
skb | 643 | drivers/isdn/isdn_net.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 664 | drivers/isdn/isdn_net.c | isdn_net_xmit(struct device *ndev, isdn_net_local *lp, struct sk_buff *skb) |
skb | 671 | drivers/isdn/isdn_net.c | return (isdn_ppp_xmit(skb, ndev)); |
skb | 684 | drivers/isdn/isdn_net.c | ret = isdn_net_send_skb(ndev, lp, skb); |
skb | 689 | drivers/isdn/isdn_net.c | ret = isdn_net_send_skb(ndev, lp, skb); |
skb | 691 | drivers/isdn/isdn_net.c | ret = ndev->tbusy = isdn_net_start_xmit(skb, lp->srobin); |
skb | 713 | drivers/isdn/isdn_net.c | ret = isdn_net_send_skb(ndev, lp, skb); |
skb | 726 | drivers/isdn/isdn_net.c | isdn_net_start_xmit(struct sk_buff *skb, struct device *ndev) |
skb | 739 | drivers/isdn/isdn_net.c | if (skb == NULL) { |
skb | 749 | drivers/isdn/isdn_net.c | u_char *buf = skb->data; |
skb | 751 | drivers/isdn/isdn_net.c | isdn_dumppkt("S:", buf, skb->len, 40); |
skb | 793 | drivers/isdn/isdn_net.c | lp->first_skb = skb; |
skb | 814 | drivers/isdn/isdn_net.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 827 | drivers/isdn/isdn_net.c | return(isdn_net_xmit(ndev, lp, skb)); |
skb | 878 | drivers/isdn/isdn_net.c | unsigned short isdn_net_type_trans(struct sk_buff *skb, struct device *dev) |
skb | 883 | drivers/isdn/isdn_net.c | skb_pull(skb,ETH_HLEN); |
skb | 884 | drivers/isdn/isdn_net.c | eth= skb->mac.ethernet; |
skb | 888 | drivers/isdn/isdn_net.c | skb->pkt_type=PACKET_BROADCAST; |
skb | 890 | drivers/isdn/isdn_net.c | skb->pkt_type=PACKET_MULTICAST; |
skb | 900 | drivers/isdn/isdn_net.c | skb->pkt_type=PACKET_OTHERHOST; |
skb | 906 | drivers/isdn/isdn_net.c | rawp = skb->data; |
skb | 926 | drivers/isdn/isdn_net.c | isdn_net_receive(struct device *ndev, struct sk_buff *skb) |
skb | 933 | drivers/isdn/isdn_net.c | lp->transcount += skb->len; |
skb | 947 | drivers/isdn/isdn_net.c | skb->dev = ndev; |
skb | 948 | drivers/isdn/isdn_net.c | skb->pkt_type = PACKET_HOST; |
skb | 949 | drivers/isdn/isdn_net.c | skb->mac.raw = skb->data; |
skb | 951 | drivers/isdn/isdn_net.c | isdn_dumppkt("R:", skb->data, skb->len, 40); |
skb | 956 | drivers/isdn/isdn_net.c | skb->protocol = isdn_net_type_trans(skb,ndev); |
skb | 960 | drivers/isdn/isdn_net.c | skb->protocol = htons(ETH_P_IP); |
skb | 964 | drivers/isdn/isdn_net.c | skb_pull(skb, 2); |
skb | 968 | drivers/isdn/isdn_net.c | skb->protocol = *(unsigned short *)&(skb->data[0]); |
skb | 969 | drivers/isdn/isdn_net.c | skb_pull(skb, 2); |
skb | 970 | drivers/isdn/isdn_net.c | if (*(unsigned short *)skb->data == 0xFFFF) |
skb | 971 | drivers/isdn/isdn_net.c | skb->protocol = htons(ETH_P_802_3); |
skb | 975 | drivers/isdn/isdn_net.c | isdn_ppp_receive(lp->netdev, olp, skb); |
skb | 981 | drivers/isdn/isdn_net.c | kfree_skb(skb,FREE_READ); |
skb | 984 | drivers/isdn/isdn_net.c | netif_rx(skb); |
skb | 997 | drivers/isdn/isdn_net.c | struct sk_buff *skb; |
skb | 1003 | drivers/isdn/isdn_net.c | skb = dev_alloc_skb(len); |
skb | 1004 | drivers/isdn/isdn_net.c | if (skb == NULL) { |
skb | 1008 | drivers/isdn/isdn_net.c | memcpy(skb_put(skb, len), buf, len); |
skb | 1009 | drivers/isdn/isdn_net.c | isdn_net_receive(&p->dev, skb); |
skb | 1021 | drivers/isdn/isdn_net.c | isdn_net_rcv_skb(int idx, struct sk_buff *skb) |
skb | 1029 | drivers/isdn/isdn_net.c | isdn_net_receive(&p->dev, skb); |
skb | 1037 | drivers/isdn/isdn_net.c | my_eth_header(struct sk_buff *skb, struct device *dev, unsigned short type, |
skb | 1040 | drivers/isdn/isdn_net.c | struct ethhdr *eth = (struct ethhdr *)skb_push(skb,ETH_HLEN); |
skb | 1084 | drivers/isdn/isdn_net.c | isdn_net_header(struct sk_buff *skb, struct device *dev, unsigned short type, |
skb | 1092 | drivers/isdn/isdn_net.c | len = my_eth_header(skb, dev, type, daddr, saddr, plen); |
skb | 1100 | drivers/isdn/isdn_net.c | *((ushort*) skb_push(skb, 2)) = htons(type); |
skb | 1104 | drivers/isdn/isdn_net.c | skb_push(skb, 4); |
skb | 1105 | drivers/isdn/isdn_net.c | skb->data[0] = 0x0f; |
skb | 1106 | drivers/isdn/isdn_net.c | skb->data[1] = 0x00; |
skb | 1107 | drivers/isdn/isdn_net.c | *((ushort*)&skb->data[2]) = htons(type); |
skb | 1130 | drivers/isdn/isdn_net.c | *((unsigned long *)skb_push(skb, len)) = 0; |
skb | 1141 | drivers/isdn/isdn_net.c | struct sk_buff *skb) |
skb | 1164 | drivers/isdn/isdn_net.c | ret = arp_find((unsigned char *)&(eth->h_dest), dst, dev, dev->pa_addr,skb)? 1 : 0; |
skb | 60 | drivers/isdn/isdn_ppp.c | struct sk_buff *skb, int proto); |
skb | 67 | drivers/isdn/isdn_ppp.c | static int isdn_ppp_fill_mpqueue(isdn_net_dev *, struct sk_buff **skb, |
skb | 551 | drivers/isdn/isdn_ppp.c | void isdn_ppp_receive(isdn_net_dev * net_dev, isdn_net_local * lp, struct sk_buff *skb) |
skb | 554 | drivers/isdn/isdn_ppp.c | printk(KERN_DEBUG "recv, skb %d\n",skb->len); |
skb | 557 | drivers/isdn/isdn_ppp.c | if(skb->data[0] == 0xff && skb->data[1] == 0x03) |
skb | 558 | drivers/isdn/isdn_ppp.c | skb_pull(skb,2); |
skb | 566 | drivers/isdn/isdn_ppp.c | if (skb->data[0] & 0x1) { |
skb | 567 | drivers/isdn/isdn_ppp.c | proto = skb->data[0]; |
skb | 568 | drivers/isdn/isdn_ppp.c | skb_pull(skb,1); /* protocol ID is only 8 bit */ |
skb | 570 | drivers/isdn/isdn_ppp.c | proto = ((int) skb->data[0] << 8) + skb->data[1]; |
skb | 571 | drivers/isdn/isdn_ppp.c | skb_pull(skb,2); |
skb | 576 | drivers/isdn/isdn_ppp.c | u_char BEbyte = skb->data[0]; |
skb | 579 | drivers/isdn/isdn_ppp.c | (int) skb->len, (int) skb->data[0], (int) skb->data[1], (int) skb->data[2], |
skb | 580 | drivers/isdn/isdn_ppp.c | (int) skb->data[3], (int) skb->data[4], (int) skb->data[5]); |
skb | 583 | drivers/isdn/isdn_ppp.c | sqno = ((int) skb->data[1] << 16) + ((int) skb->data[2] << 8) + (int) skb->data[3]; |
skb | 584 | drivers/isdn/isdn_ppp.c | skb_pull(skb,4); |
skb | 586 | drivers/isdn/isdn_ppp.c | sqno = (((int) skb->data[0] & 0xf) << 8) + (int) skb->data[1]; |
skb | 587 | drivers/isdn/isdn_ppp.c | skb_pull(skb,2); |
skb | 630 | drivers/isdn/isdn_ppp.c | if ((sqno_end = isdn_ppp_fill_mpqueue(net_dev, &skb , BEbyte, &sqno, min_sqno)) < 0) |
skb | 655 | drivers/isdn/isdn_ppp.c | q->skb = skb; |
skb | 684 | drivers/isdn/isdn_ppp.c | isdn_ppp_push_higher(net_dev, lp, skb, -1); |
skb | 690 | drivers/isdn/isdn_ppp.c | isdn_ppp_push_higher(net_dev, lp, q->skb, -1); |
skb | 699 | drivers/isdn/isdn_ppp.c | isdn_ppp_push_higher(net_dev, lp, skb , proto); |
skb | 702 | drivers/isdn/isdn_ppp.c | isdn_ppp_push_higher(net_dev, lp, skb , -1); |
skb | 706 | drivers/isdn/isdn_ppp.c | static void isdn_ppp_push_higher(isdn_net_dev *net_dev, isdn_net_local *lp, struct sk_buff *skb,int proto) |
skb | 711 | drivers/isdn/isdn_ppp.c | if (skb->data[0] & 0x01) { /* is it odd? */ |
skb | 712 | drivers/isdn/isdn_ppp.c | proto = (unsigned char) skb->data[0]; |
skb | 713 | drivers/isdn/isdn_ppp.c | skb_pull(skb,1); /* protocol ID is only 8 bit */ |
skb | 715 | drivers/isdn/isdn_ppp.c | proto = ((int) (unsigned char) skb->data[0] << 8) + (unsigned char) skb->data[1]; |
skb | 716 | drivers/isdn/isdn_ppp.c | skb_pull(skb,2); |
skb | 721 | drivers/isdn/isdn_ppp.c | printk(KERN_DEBUG "push, skb %d %04x\n",skb->len,proto); |
skb | 726 | drivers/isdn/isdn_ppp.c | skb->dev = dev; |
skb | 727 | drivers/isdn/isdn_ppp.c | skb->mac.raw = skb->data; |
skb | 728 | drivers/isdn/isdn_ppp.c | skb->protocol = htons(ETH_P_IPX); |
skb | 732 | drivers/isdn/isdn_ppp.c | slhc_remember(ippp_table[net_dev->local.ppp_minor].slcomp, skb->data, skb->len); |
skb | 735 | drivers/isdn/isdn_ppp.c | skb->dev = dev; |
skb | 736 | drivers/isdn/isdn_ppp.c | skb->mac.raw = skb->data; |
skb | 737 | drivers/isdn/isdn_ppp.c | skb->protocol = htons(ETH_P_IP); |
skb | 742 | drivers/isdn/isdn_ppp.c | struct sk_buff *skb_old = skb; |
skb | 744 | drivers/isdn/isdn_ppp.c | skb = dev_alloc_skb(skb_old->len + 40); |
skb | 746 | drivers/isdn/isdn_ppp.c | if (!skb) { |
skb | 751 | drivers/isdn/isdn_ppp.c | skb->dev = dev; |
skb | 752 | drivers/isdn/isdn_ppp.c | skb_put(skb,skb_old->len + 40); |
skb | 753 | drivers/isdn/isdn_ppp.c | memcpy(skb->data, skb_old->data, skb_old->len); |
skb | 754 | drivers/isdn/isdn_ppp.c | skb->mac.raw = skb->data; |
skb | 756 | drivers/isdn/isdn_ppp.c | skb->data, skb_old->len); |
skb | 757 | drivers/isdn/isdn_ppp.c | skb_trim(skb, pkt_len); |
skb | 759 | drivers/isdn/isdn_ppp.c | skb->protocol = htons(ETH_P_IP); |
skb | 768 | drivers/isdn/isdn_ppp.c | skb_push(skb,4); |
skb | 769 | drivers/isdn/isdn_ppp.c | skb->data[0] = 0xff; |
skb | 770 | drivers/isdn/isdn_ppp.c | skb->data[1] = 0x03; |
skb | 771 | drivers/isdn/isdn_ppp.c | skb->data[2] = (proto>>8); |
skb | 772 | drivers/isdn/isdn_ppp.c | skb->data[3] = proto & 0xff; |
skb | 773 | drivers/isdn/isdn_ppp.c | isdn_ppp_fill_rq(skb->data, skb->len, lp->ppp_minor); /* push data to pppd device */ |
skb | 774 | drivers/isdn/isdn_ppp.c | dev_kfree_skb(skb,FREE_WRITE); |
skb | 778 | drivers/isdn/isdn_ppp.c | netif_rx(skb); |
skb | 790 | drivers/isdn/isdn_ppp.c | int isdn_ppp_xmit(struct sk_buff *skb, struct device *dev) |
skb | 802 | drivers/isdn/isdn_ppp.c | if (*((unsigned long *)skb->data) != 0) |
skb | 803 | drivers/isdn/isdn_ppp.c | return (isdn_net_send_skb(dev , lp , skb)); |
skb | 810 | drivers/isdn/isdn_ppp.c | printk(KERN_DEBUG "xmit, skb %d\n",skb->len); |
skb | 815 | drivers/isdn/isdn_ppp.c | u_char *buf = skb->data; |
skb | 826 | drivers/isdn/isdn_ppp.c | pktlen = slhc_compress(ipts->slcomp, buf, skb->len-len, ipts->cbuf, |
skb | 828 | drivers/isdn/isdn_ppp.c | skb_trim(skb,pktlen+len); |
skb | 829 | drivers/isdn/isdn_ppp.c | if(buf != skb->data+len) { /* copied to new buffer ??? (btw: WHY must slhc copy it?? *sigh*) */ |
skb | 830 | drivers/isdn/isdn_ppp.c | memcpy(skb->data+len,buf,pktlen); |
skb | 832 | drivers/isdn/isdn_ppp.c | if (skb->data[len] & SL_TYPE_COMPRESSED_TCP) { /* cslip? style -> PPP */ |
skb | 834 | drivers/isdn/isdn_ppp.c | skb->data[len] ^= SL_TYPE_COMPRESSED_TCP; |
skb | 836 | drivers/isdn/isdn_ppp.c | if (skb->data[len] >= SL_TYPE_UNCOMPRESSED_TCP) |
skb | 838 | drivers/isdn/isdn_ppp.c | skb->data[len] = (skb->data[len] & 0x0f) | 0x40; |
skb | 844 | drivers/isdn/isdn_ppp.c | printk(KERN_DEBUG "xmit, skb %d %04x\n",skb->len,proto); |
skb | 856 | drivers/isdn/isdn_ppp.c | skb->data[4] = MP_BEGIN_FRAG | MP_END_FRAG | (mp_seqno >> 8); /* (B)egin & (E)ndbit .. */ |
skb | 857 | drivers/isdn/isdn_ppp.c | skb->data[5] = mp_seqno & 0xff; |
skb | 858 | drivers/isdn/isdn_ppp.c | skb->data[6] = proto; /* PID compression */ |
skb | 861 | drivers/isdn/isdn_ppp.c | skb->data[4] = MP_BEGIN_FRAG | MP_END_FRAG; /* (B)egin & (E)ndbit .. */ |
skb | 862 | drivers/isdn/isdn_ppp.c | skb->data[5] = (mp_seqno >> 16) & 0xff; /* sequence nubmer: 24bit */ |
skb | 863 | drivers/isdn/isdn_ppp.c | skb->data[6] = (mp_seqno >> 8) & 0xff; |
skb | 864 | drivers/isdn/isdn_ppp.c | skb->data[7] = (mp_seqno >> 0) & 0xff; |
skb | 865 | drivers/isdn/isdn_ppp.c | skb->data[8] = proto; /* PID compression */ |
skb | 870 | drivers/isdn/isdn_ppp.c | skb->data[0] = 0xff; /* All Stations */ |
skb | 871 | drivers/isdn/isdn_ppp.c | skb->data[1] = 0x03; /* Unumbered information */ |
skb | 872 | drivers/isdn/isdn_ppp.c | skb->data[2] = proto >> 8; |
skb | 873 | drivers/isdn/isdn_ppp.c | skb->data[3] = proto & 0xff; |
skb | 881 | drivers/isdn/isdn_ppp.c | return (isdn_net_send_skb(dev , lp , skb)); |
skb | 889 | drivers/isdn/isdn_ppp.c | dev_kfree_skb(q->skb,FREE_WRITE); |
skb | 955 | drivers/isdn/isdn_ppp.c | static int isdn_ppp_fill_mpqueue(isdn_net_dev * dev, struct sk_buff ** skb, int BEbyte, int *sqnop, int min_sqno) |
skb | 971 | drivers/isdn/isdn_ppp.c | q1->skb = *skb; |
skb | 1011 | drivers/isdn/isdn_ppp.c | pktlen = -q1->skb->len; |
skb | 1021 | drivers/isdn/isdn_ppp.c | pktlen += q->skb->len; |
skb | 1024 | drivers/isdn/isdn_ppp.c | pktlen += q->skb->len; |
skb | 1036 | drivers/isdn/isdn_ppp.c | pktlen += q->skb->len; |
skb | 1039 | drivers/isdn/isdn_ppp.c | pktlen += q->skb->len; |
skb | 1055 | drivers/isdn/isdn_ppp.c | *skb = dev_alloc_skb(pktlen + 40); /* not needed: +40 for VJ compression .. */ |
skb | 1057 | drivers/isdn/isdn_ppp.c | if (!(*skb)) { |
skb | 1060 | drivers/isdn/isdn_ppp.c | dev_kfree_skb(q->skb,FREE_WRITE); |
skb | 1067 | drivers/isdn/isdn_ppp.c | skb_put(*skb,pktlen); |
skb | 1070 | drivers/isdn/isdn_ppp.c | memcpy((*skb)->data + cnt, q->skb->data, q->skb->len); |
skb | 1071 | drivers/isdn/isdn_ppp.c | cnt += q->skb->len; |
skb | 1072 | drivers/isdn/isdn_ppp.c | dev_kfree_skb(q->skb,FREE_WRITE); |
skb | 1102 | drivers/isdn/isdn_ppp.c | dev_kfree_skb(q->skb,FREE_WRITE); |
skb | 1141 | drivers/isdn/isdn_ppp.c | isdn_ppp_push_higher(net_dev, lp, ql->skb, -1); |
skb | 121 | drivers/net/3c501.c | static int el_start_xmit(struct sk_buff *skb, struct device *dev); |
skb | 376 | drivers/net/3c501.c | static int el_start_xmit(struct sk_buff *skb, struct device *dev) |
skb | 405 | drivers/net/3c501.c | if (skb == NULL) |
skb | 432 | drivers/net/3c501.c | int gp_start = 0x800 - (ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN); |
skb | 433 | drivers/net/3c501.c | unsigned char *buf = skb->data; |
skb | 461 | drivers/net/3c501.c | outsb(DATAPORT,buf,skb->len); /* load buffer (usual thing each byte increments the pointer) */ |
skb | 481 | drivers/net/3c501.c | dev_kfree_skb (skb, FREE_WRITE); |
skb | 678 | drivers/net/3c501.c | struct sk_buff *skb; |
skb | 698 | drivers/net/3c501.c | skb = dev_alloc_skb(pkt_len+2); |
skb | 705 | drivers/net/3c501.c | if (skb == NULL) |
skb | 713 | drivers/net/3c501.c | skb_reserve(skb,2); /* Force 16 byte alignment */ |
skb | 714 | drivers/net/3c501.c | skb->dev = dev; |
skb | 720 | drivers/net/3c501.c | insb(DATAPORT, skb_put(skb,pkt_len), pkt_len); |
skb | 721 | drivers/net/3c501.c | skb->protocol=eth_type_trans(skb,dev); |
skb | 722 | drivers/net/3c501.c | netif_rx(skb); |
skb | 79 | drivers/net/3c503.c | static void el2_block_input(struct device *dev, int count, struct sk_buff *skb, |
skb | 546 | drivers/net/3c503.c | el2_block_input(struct device *dev, int count, struct sk_buff *skb, int ring_offset) |
skb | 560 | drivers/net/3c503.c | memcpy_fromio(skb->data, dev->mem_start + ring_offset, semi_count); |
skb | 562 | drivers/net/3c503.c | memcpy_fromio(skb->data + semi_count, dev->rmem_start, count); |
skb | 565 | drivers/net/3c503.c | eth_io_copy_and_sum(skb, dev->mem_start + ring_offset, count, 0); |
skb | 590 | drivers/net/3c503.c | buf = (unsigned short int *) skb->data; |
skb | 557 | drivers/net/3c505.c | struct sk_buff *skb; |
skb | 572 | drivers/net/3c505.c | skb = dev_alloc_skb(rlen+2); |
skb | 583 | drivers/net/3c505.c | if (skb == NULL) { |
skb | 599 | drivers/net/3c505.c | skb_reserve(skb,2); /* 16 byte alignment */ |
skb | 600 | drivers/net/3c505.c | skb->dev = dev; |
skb | 605 | drivers/net/3c505.c | ptr = (unsigned short *)skb_put(skb,len); |
skb | 614 | drivers/net/3c505.c | kfree_skb(skb, FREE_WRITE); |
skb | 623 | drivers/net/3c505.c | skb->protocol=eth_type_trans(skb,dev); |
skb | 624 | drivers/net/3c505.c | netif_rx(skb); |
skb | 1005 | drivers/net/3c505.c | elp_start_xmit (struct sk_buff *skb, struct device *dev) |
skb | 1012 | drivers/net/3c505.c | if (skb == NULL) { |
skb | 1020 | drivers/net/3c505.c | if (skb->len <= 0) |
skb | 1024 | drivers/net/3c505.c | printk("%s: request to send packet of length %d\n", dev->name, (int)skb->len); |
skb | 1045 | drivers/net/3c505.c | if (!send_packet(dev, skb->data, skb->len)) { |
skb | 1051 | drivers/net/3c505.c | printk("%s: packet of length %d sent\n", dev->name, (int)skb->len); |
skb | 1067 | drivers/net/3c505.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 282 | drivers/net/3c507.c | static int el16_send_packet(struct sk_buff *skb, struct device *dev); |
skb | 451 | drivers/net/3c507.c | el16_send_packet(struct sk_buff *skb, struct device *dev) |
skb | 486 | drivers/net/3c507.c | if (skb == NULL) { |
skb | 495 | drivers/net/3c507.c | short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN; |
skb | 496 | drivers/net/3c507.c | unsigned char *buf = skb->data; |
skb | 506 | drivers/net/3c507.c | dev_kfree_skb (skb, FREE_WRITE); |
skb | 841 | drivers/net/3c507.c | struct sk_buff *skb; |
skb | 844 | drivers/net/3c507.c | skb = dev_alloc_skb(pkt_len+2); |
skb | 845 | drivers/net/3c507.c | if (skb == NULL) { |
skb | 851 | drivers/net/3c507.c | skb_reserve(skb,2); |
skb | 852 | drivers/net/3c507.c | skb->dev = dev; |
skb | 855 | drivers/net/3c507.c | memcpy(skb_put(skb,pkt_len), data_frame + 5, pkt_len); |
skb | 857 | drivers/net/3c507.c | skb->protocol=eth_type_trans(skb,dev); |
skb | 858 | drivers/net/3c507.c | netif_rx(skb); |
skb | 117 | drivers/net/3c509.c | static int el3_start_xmit(struct sk_buff *skb, struct device *dev); |
skb | 397 | drivers/net/3c509.c | struct sk_buff * skb; |
skb | 408 | drivers/net/3c509.c | skb = lp->queue[lp->head]; |
skb | 413 | drivers/net/3c509.c | outw(skb->len, ioaddr + TX_FIFO); |
skb | 416 | drivers/net/3c509.c | outsl(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2); |
skb | 419 | drivers/net/3c509.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 441 | drivers/net/3c509.c | el3_start_xmit(struct sk_buff *skb, struct device *dev) |
skb | 454 | drivers/net/3c509.c | lp->queue[tail] = skb; |
skb | 474 | drivers/net/3c509.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 605 | drivers/net/3c509.c | struct sk_buff *skb; |
skb | 607 | drivers/net/3c509.c | skb = dev_alloc_skb(pkt_len+5); |
skb | 611 | drivers/net/3c509.c | if (skb != NULL) { |
skb | 612 | drivers/net/3c509.c | skb->dev = dev; |
skb | 613 | drivers/net/3c509.c | skb_reserve(skb,2); /* Align IP on 16 byte boundaries */ |
skb | 616 | drivers/net/3c509.c | insl(ioaddr+RX_FIFO, skb_put(skb,pkt_len), |
skb | 619 | drivers/net/3c509.c | skb->protocol=eth_type_trans(skb,dev); |
skb | 620 | drivers/net/3c509.c | netif_rx(skb); |
skb | 238 | drivers/net/3c59x.c | static int vortex_start_xmit(struct sk_buff *skb, struct device *dev); |
skb | 636 | drivers/net/3c59x.c | vortex_start_xmit(struct sk_buff *skb, struct device *dev) |
skb | 663 | drivers/net/3c59x.c | if (skb == NULL || skb->len <= 0) { |
skb | 679 | drivers/net/3c59x.c | outl(skb->len, ioaddr + TX_FIFO); |
skb | 683 | drivers/net/3c59x.c | outl((int)(skb->data), ioaddr + Wn7_MasterAddr); |
skb | 684 | drivers/net/3c59x.c | outw((skb->len + 3) & ~3, ioaddr + Wn7_MasterLen); |
skb | 685 | drivers/net/3c59x.c | vp->tx_skb = skb; |
skb | 689 | drivers/net/3c59x.c | outsl(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2); |
skb | 690 | drivers/net/3c59x.c | dev_kfree_skb (skb, FREE_WRITE); |
skb | 699 | drivers/net/3c59x.c | outsl(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2); |
skb | 700 | drivers/net/3c59x.c | dev_kfree_skb (skb, FREE_WRITE); |
skb | 891 | drivers/net/3c59x.c | struct sk_buff *skb; |
skb | 893 | drivers/net/3c59x.c | skb = dev_alloc_skb(pkt_len + 5); |
skb | 897 | drivers/net/3c59x.c | if (skb != NULL) { |
skb | 898 | drivers/net/3c59x.c | skb->dev = dev; |
skb | 899 | drivers/net/3c59x.c | skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ |
skb | 901 | drivers/net/3c59x.c | insl(ioaddr + RX_FIFO, skb_put(skb, pkt_len), |
skb | 903 | drivers/net/3c59x.c | skb->protocol = eth_type_trans(skb, dev); |
skb | 904 | drivers/net/3c59x.c | netif_rx(skb); |
skb | 140 | drivers/net/8390.c | static int ei_start_xmit(struct sk_buff *skb, struct device *dev) |
skb | 182 | drivers/net/8390.c | if (skb == NULL) { |
skb | 187 | drivers/net/8390.c | length = skb->len; |
skb | 188 | drivers/net/8390.c | if (skb->len <= 0) |
skb | 228 | drivers/net/8390.c | ei_block_output(dev, length, skb->data, output_page); |
skb | 242 | drivers/net/8390.c | ei_block_output(dev, length, skb->data, ei_local->tx_start_page); |
skb | 253 | drivers/net/8390.c | dev_kfree_skb (skb, FREE_WRITE); |
skb | 467 | drivers/net/8390.c | struct sk_buff *skb; |
skb | 469 | drivers/net/8390.c | skb = dev_alloc_skb(pkt_len+2); |
skb | 470 | drivers/net/8390.c | if (skb == NULL) { |
skb | 477 | drivers/net/8390.c | skb_reserve(skb,2); /* IP headers on 16 byte boundaries */ |
skb | 478 | drivers/net/8390.c | skb->dev = dev; |
skb | 479 | drivers/net/8390.c | skb_put(skb, pkt_len); /* Make room */ |
skb | 480 | drivers/net/8390.c | ei_block_input(dev, pkt_len, skb, current_offset + sizeof(rx_frame)); |
skb | 481 | drivers/net/8390.c | skb->protocol=eth_type_trans(skb,dev); |
skb | 482 | drivers/net/8390.c | netif_rx(skb); |
skb | 76 | drivers/net/ac3200.c | struct sk_buff *skb, int ring_offset); |
skb | 269 | drivers/net/ac3200.c | static void ac_block_input(struct device *dev, int count, struct sk_buff *skb, |
skb | 277 | drivers/net/ac3200.c | memcpy_fromio(skb->data, xfer_start, semi_count); |
skb | 279 | drivers/net/ac3200.c | memcpy_fromio(skb->data + semi_count, dev->rmem_start, count); |
skb | 282 | drivers/net/ac3200.c | eth_io_copy_and_sum(skb, xfer_start, count, 0); |
skb | 185 | drivers/net/apricot.c | static int i596_start_xmit(struct sk_buff *skb, struct device *dev); |
skb | 340 | drivers/net/apricot.c | struct sk_buff *skb = dev_alloc_skb(pkt_len); |
skb | 344 | drivers/net/apricot.c | if (skb == NULL) |
skb | 351 | drivers/net/apricot.c | skb->dev = dev; |
skb | 352 | drivers/net/apricot.c | memcpy(skb_put(skb,pkt_len), lp->scb.rfd->data, pkt_len); |
skb | 354 | drivers/net/apricot.c | skb->protocol=eth_type_trans(skb,dev); |
skb | 355 | drivers/net/apricot.c | netif_rx(skb); |
skb | 358 | drivers/net/apricot.c | if (i596_debug > 4) print_eth(skb->data); |
skb | 406 | drivers/net/apricot.c | struct sk_buff *skb = ((struct sk_buff *)(tx_cmd->tbd->data)) -1; |
skb | 408 | drivers/net/apricot.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 569 | drivers/net/apricot.c | i596_start_xmit(struct sk_buff *skb, struct device *dev) |
skb | 607 | drivers/net/apricot.c | if (skb == NULL) { |
skb | 613 | drivers/net/apricot.c | if (skb->len <= 0) return 0; |
skb | 623 | drivers/net/apricot.c | short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN; |
skb | 632 | drivers/net/apricot.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 646 | drivers/net/apricot.c | tx_cmd->tbd->data = skb->data; |
skb | 648 | drivers/net/apricot.c | if (i596_debug > 3) print_eth(skb->data); |
skb | 804 | drivers/net/apricot.c | struct sk_buff *skb = ((struct sk_buff *)(tx_cmd->tbd->data)) -1; |
skb | 806 | drivers/net/apricot.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 810 | drivers/net/apricot.c | if (i596_debug >2) print_eth(skb->data); |
skb | 526 | drivers/net/arcnet.c | struct sk_buff *skb; /* packet data buffer */ |
skb | 534 | drivers/net/arcnet.c | struct sk_buff *skb; /* buffer from upper levels */ |
skb | 586 | drivers/net/arcnet.c | static void arcnet_dump_skb(struct device *dev,struct sk_buff *skb, |
skb | 589 | drivers/net/arcnet.c | # define arcnet_dump_skb(dev,skb,desc) ; |
skb | 607 | drivers/net/arcnet.c | static int arcnet_send_packet_bad(struct sk_buff *skb,struct device *dev); |
skb | 608 | drivers/net/arcnet.c | static int arcnetA_send_packet(struct sk_buff *skb, struct device *dev); |
skb | 623 | drivers/net/arcnet.c | int arcnetA_header(struct sk_buff *skb,struct device *dev, |
skb | 626 | drivers/net/arcnet.c | struct sk_buff *skb); |
skb | 627 | drivers/net/arcnet.c | unsigned short arcnetA_type_trans(struct sk_buff *skb,struct device *dev); |
skb | 632 | drivers/net/arcnet.c | static int arcnetE_send_packet(struct sk_buff *skb, struct device *dev); |
skb | 640 | drivers/net/arcnet.c | static int arcnetS_send_packet(struct sk_buff *skb, struct device *dev); |
skb | 643 | drivers/net/arcnet.c | int arcnetS_header(struct sk_buff *skb,struct device *dev, |
skb | 646 | drivers/net/arcnet.c | struct sk_buff *skb); |
skb | 647 | drivers/net/arcnet.c | unsigned short arcnetS_type_trans(struct sk_buff *skb,struct device *dev); |
skb | 669 | drivers/net/arcnet.c | void arcnet_dump_skb(struct device *dev,struct sk_buff *skb,char *desc) |
skb | 677 | drivers/net/arcnet.c | for(i=0; i<skb->len; i++) |
skb | 682 | drivers/net/arcnet.c | printk("%02X ",((u_char *)skb->data)[i]); |
skb | 1458 | drivers/net/arcnet.c | arcnet_send_packet_bad(struct sk_buff *skb, struct device *dev) |
skb | 1491 | drivers/net/arcnet.c | status,tickssofar,lp->outgoing.skb, |
skb | 1516 | drivers/net/arcnet.c | if (lp->outgoing.skb) |
skb | 1518 | drivers/net/arcnet.c | dev_kfree_skb(lp->outgoing.skb,FREE_WRITE); |
skb | 1521 | drivers/net/arcnet.c | lp->outgoing.skb=NULL; |
skb | 1533 | drivers/net/arcnet.c | if (skb == NULL) { |
skb | 1574 | drivers/net/arcnet.c | arcnetA_send_packet(struct sk_buff *skb, struct device *dev) |
skb | 1582 | drivers/net/arcnet.c | bad=arcnet_send_packet_bad(skb,dev); |
skb | 1591 | drivers/net/arcnet.c | out->length = 1 < skb->len ? skb->len : 1; |
skb | 1592 | drivers/net/arcnet.c | out->hdr=(struct ClientData*)skb->data; |
skb | 1593 | drivers/net/arcnet.c | out->skb=skb; |
skb | 1595 | drivers/net/arcnet.c | BUGLVL(D_SKB) arcnet_dump_skb(dev,skb,"tx"); |
skb | 1612 | drivers/net/arcnet.c | ((char *)skb->data)+sizeof(struct ClientData), |
skb | 1617 | drivers/net/arcnet.c | dev_kfree_skb(out->skb,FREE_WRITE); |
skb | 1618 | drivers/net/arcnet.c | out->skb=NULL; |
skb | 1631 | drivers/net/arcnet.c | out->data=(u_char *)skb->data |
skb | 1663 | drivers/net/arcnet.c | if (out->skb) |
skb | 1664 | drivers/net/arcnet.c | dev_kfree_skb(out->skb,FREE_WRITE); |
skb | 1665 | drivers/net/arcnet.c | out->skb=NULL; |
skb | 1987 | drivers/net/arcnet.c | status,out->numsegs,out->segnum,out->skb); |
skb | 2018 | drivers/net/arcnet.c | if (!lp->outgoing.skb) |
skb | 2046 | drivers/net/arcnet.c | if (out->skb) |
skb | 2047 | drivers/net/arcnet.c | dev_kfree_skb(out->skb,FREE_WRITE); |
skb | 2048 | drivers/net/arcnet.c | out->skb=NULL; |
skb | 2260 | drivers/net/arcnet.c | struct sk_buff *skb; |
skb | 2289 | drivers/net/arcnet.c | if (in->skb) /* already assembling one! */ |
skb | 2294 | drivers/net/arcnet.c | kfree_skb(in->skb,FREE_WRITE); |
skb | 2297 | drivers/net/arcnet.c | in->skb=NULL; |
skb | 2302 | drivers/net/arcnet.c | skb = alloc_skb(length, GFP_ATOMIC); |
skb | 2303 | drivers/net/arcnet.c | if (skb == NULL) { |
skb | 2308 | drivers/net/arcnet.c | soft=(struct ClientData *)skb->data; |
skb | 2310 | drivers/net/arcnet.c | skb->len = length; |
skb | 2311 | drivers/net/arcnet.c | skb->dev = dev; |
skb | 2356 | drivers/net/arcnet.c | BUGLVL(D_SKB) arcnet_dump_skb(dev,skb,"rx"); |
skb | 2358 | drivers/net/arcnet.c | skb->protocol=arcnetA_type_trans(skb,dev); |
skb | 2360 | drivers/net/arcnet.c | netif_rx(skb); |
skb | 2386 | drivers/net/arcnet.c | if (in->skb && in->sequence!=arcsoft->sequence) |
skb | 2391 | drivers/net/arcnet.c | kfree_skb(in->skb,FREE_WRITE); |
skb | 2392 | drivers/net/arcnet.c | in->skb=NULL; |
skb | 2402 | drivers/net/arcnet.c | if (in->skb) /* already assembling one! */ |
skb | 2409 | drivers/net/arcnet.c | kfree_skb(in->skb,FREE_WRITE); |
skb | 2425 | drivers/net/arcnet.c | in->skb=skb=alloc_skb(508*in->numpackets |
skb | 2428 | drivers/net/arcnet.c | if (skb == NULL) { |
skb | 2437 | drivers/net/arcnet.c | skb->free=1; |
skb | 2439 | drivers/net/arcnet.c | soft=(struct ClientData *)skb->data; |
skb | 2441 | drivers/net/arcnet.c | skb->len=sizeof(struct ClientData); |
skb | 2442 | drivers/net/arcnet.c | skb->dev=dev; |
skb | 2456 | drivers/net/arcnet.c | if (!in->skb) |
skb | 2482 | drivers/net/arcnet.c | kfree_skb(in->skb,FREE_WRITE); |
skb | 2483 | drivers/net/arcnet.c | in->skb=NULL; |
skb | 2490 | drivers/net/arcnet.c | soft=(struct ClientData *)in->skb->data; |
skb | 2493 | drivers/net/arcnet.c | skb=in->skb; |
skb | 2495 | drivers/net/arcnet.c | memcpy(skb->data+skb->len, |
skb | 2499 | drivers/net/arcnet.c | skb->len+=length-sizeof(struct ClientData); |
skb | 2507 | drivers/net/arcnet.c | if (!skb || !in->skb) |
skb | 2510 | drivers/net/arcnet.c | skb,in->skb); |
skb | 2514 | drivers/net/arcnet.c | in->skb=NULL; |
skb | 2517 | drivers/net/arcnet.c | BUGLVL(D_SKB) arcnet_dump_skb(dev,skb,"rx"); |
skb | 2519 | drivers/net/arcnet.c | skb->protocol=arcnetA_type_trans(skb,dev); |
skb | 2521 | drivers/net/arcnet.c | netif_rx(skb); |
skb | 2576 | drivers/net/arcnet.c | int arcnetA_header(struct sk_buff *skb,struct device *dev, |
skb | 2580 | drivers/net/arcnet.c | skb_push(skb,dev->hard_header_len); |
skb | 2651 | drivers/net/arcnet.c | struct sk_buff *skb) |
skb | 2678 | drivers/net/arcnet.c | status=arp_find(&(head->daddr), dst, dev, dev->pa_addr, skb)? 1 : 0; |
skb | 2692 | drivers/net/arcnet.c | unsigned short arcnetA_type_trans(struct sk_buff *skb,struct device *dev) |
skb | 2698 | drivers/net/arcnet.c | skb->mac.raw=skb->data; |
skb | 2699 | drivers/net/arcnet.c | skb_pull(skb,dev->hard_header_len); |
skb | 2700 | drivers/net/arcnet.c | head=(struct ClientData *)skb->mac.raw; |
skb | 2703 | drivers/net/arcnet.c | skb->pkt_type=PACKET_BROADCAST; |
skb | 2708 | drivers/net/arcnet.c | skb->pkt_type=PACKET_OTHERHOST; |
skb | 2763 | drivers/net/arcnet.c | arcnetE_send_packet(struct sk_buff *skb, struct device *dev) |
skb | 2770 | drivers/net/arcnet.c | short offset,length=skb->len+1; |
skb | 2774 | drivers/net/arcnet.c | bad=arcnet_send_packet_bad(skb,dev); |
skb | 2789 | drivers/net/arcnet.c | dev_kfree_skb(skb,FREE_WRITE); |
skb | 2805 | drivers/net/arcnet.c | if (((struct ethhdr*)(skb->data))->h_dest[0] == 0xFF) |
skb | 2809 | drivers/net/arcnet.c | ((struct ethhdr*)(skb->data))->h_dest[5]; |
skb | 2837 | drivers/net/arcnet.c | memcpy(arcsoft,skb->data,skb->len); |
skb | 2847 | drivers/net/arcnet.c | dev_kfree_skb(skb,FREE_WRITE); |
skb | 2874 | drivers/net/arcnet.c | struct sk_buff *skb; |
skb | 2879 | drivers/net/arcnet.c | skb = alloc_skb(length, GFP_ATOMIC); |
skb | 2880 | drivers/net/arcnet.c | if (skb == NULL) { |
skb | 2886 | drivers/net/arcnet.c | skb->len = length; |
skb | 2887 | drivers/net/arcnet.c | skb->dev = dev; |
skb | 2889 | drivers/net/arcnet.c | memcpy(skb->data,(u_char *)arcsoft+1,length-1); |
skb | 2891 | drivers/net/arcnet.c | BUGLVL(D_SKB) arcnet_dump_skb(dev,skb,"rx"); |
skb | 2893 | drivers/net/arcnet.c | skb->protocol=eth_type_trans(skb,dev); |
skb | 2895 | drivers/net/arcnet.c | netif_rx(skb); |
skb | 2934 | drivers/net/arcnet.c | arcnetS_send_packet(struct sk_buff *skb, struct device *dev) |
skb | 2938 | drivers/net/arcnet.c | struct S_ClientData *hdr=(struct S_ClientData *)skb->data; |
skb | 2942 | drivers/net/arcnet.c | bad=arcnet_send_packet_bad(skb,dev); |
skb | 2951 | drivers/net/arcnet.c | length = 1 < skb->len ? skb->len : 1; |
skb | 2953 | drivers/net/arcnet.c | BUGLVL(D_SKB) arcnet_dump_skb(dev,skb,"tx"); |
skb | 2959 | drivers/net/arcnet.c | skb->data+S_EXTRA_CLIENTDATA, |
skb | 2961 | drivers/net/arcnet.c | skb->data+sizeof(struct S_ClientData), |
skb | 2966 | drivers/net/arcnet.c | dev_kfree_skb(skb,FREE_WRITE); |
skb | 2979 | drivers/net/arcnet.c | dev_kfree_skb(skb,FREE_WRITE); |
skb | 3003 | drivers/net/arcnet.c | struct sk_buff *skb; |
skb | 3016 | drivers/net/arcnet.c | skb = alloc_skb(length, GFP_ATOMIC); |
skb | 3017 | drivers/net/arcnet.c | if (skb == NULL) { |
skb | 3022 | drivers/net/arcnet.c | soft=(struct S_ClientData *)skb->data; |
skb | 3023 | drivers/net/arcnet.c | skb->len = length; |
skb | 3033 | drivers/net/arcnet.c | skb->dev = dev; /* is already lp->sdev */ |
skb | 3035 | drivers/net/arcnet.c | BUGLVL(D_SKB) arcnet_dump_skb(dev,skb,"rx"); |
skb | 3037 | drivers/net/arcnet.c | skb->protocol=arcnetS_type_trans(skb,dev); |
skb | 3039 | drivers/net/arcnet.c | netif_rx(skb); |
skb | 3049 | drivers/net/arcnet.c | int arcnetS_header(struct sk_buff *skb,struct device *dev, |
skb | 3053 | drivers/net/arcnet.c | skb_push(skb,dev->hard_header_len); |
skb | 3106 | drivers/net/arcnet.c | struct sk_buff *skb) |
skb | 3130 | drivers/net/arcnet.c | return arp_find(&(head->daddr), dst, dev, dev->pa_addr, skb)? 1 : 0; |
skb | 3141 | drivers/net/arcnet.c | unsigned short arcnetS_type_trans(struct sk_buff *skb,struct device *dev) |
skb | 3147 | drivers/net/arcnet.c | skb->mac.raw=skb->data; |
skb | 3148 | drivers/net/arcnet.c | skb_pull(skb,dev->hard_header_len); |
skb | 3149 | drivers/net/arcnet.c | head=(struct S_ClientData *)skb->mac.raw; |
skb | 3152 | drivers/net/arcnet.c | skb->pkt_type=PACKET_BROADCAST; |
skb | 3157 | drivers/net/arcnet.c | skb->pkt_type=PACKET_OTHERHOST; |
skb | 119 | drivers/net/at1700.c | static int net_send_packet(struct sk_buff *skb, struct device *dev); |
skb | 364 | drivers/net/at1700.c | net_send_packet(struct sk_buff *skb, struct device *dev) |
skb | 398 | drivers/net/at1700.c | if (skb == NULL) { |
skb | 408 | drivers/net/at1700.c | short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN; |
skb | 409 | drivers/net/at1700.c | unsigned char *buf = skb->data; |
skb | 435 | drivers/net/at1700.c | dev_kfree_skb (skb, FREE_WRITE); |
skb | 520 | drivers/net/at1700.c | struct sk_buff *skb; |
skb | 531 | drivers/net/at1700.c | skb = dev_alloc_skb(pkt_len+3); |
skb | 532 | drivers/net/at1700.c | if (skb == NULL) { |
skb | 541 | drivers/net/at1700.c | skb->dev = dev; |
skb | 542 | drivers/net/at1700.c | skb_reserve(skb,2); |
skb | 544 | drivers/net/at1700.c | insw(ioaddr + DATAPORT, skb_put(skb,pkt_len), (pkt_len + 1) >> 1); |
skb | 545 | drivers/net/at1700.c | skb->protocol=eth_type_trans(skb, dev); |
skb | 546 | drivers/net/at1700.c | netif_rx(skb); |
skb | 137 | drivers/net/atp.c | static int net_send_packet(struct sk_buff *skb, struct device *dev); |
skb | 416 | drivers/net/atp.c | net_send_packet(struct sk_buff *skb, struct device *dev) |
skb | 440 | drivers/net/atp.c | if (skb == NULL) { |
skb | 450 | drivers/net/atp.c | short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN; |
skb | 451 | drivers/net/atp.c | unsigned char *buf = skb->data; |
skb | 479 | drivers/net/atp.c | dev_kfree_skb (skb, FREE_WRITE); |
skb | 673 | drivers/net/atp.c | struct sk_buff *skb; |
skb | 675 | drivers/net/atp.c | skb = dev_alloc_skb(pkt_len); |
skb | 676 | drivers/net/atp.c | if (skb == NULL) { |
skb | 681 | drivers/net/atp.c | skb->dev = dev; |
skb | 683 | drivers/net/atp.c | read_block(ioaddr, pkt_len, skb_put(skb,pkt_len), dev->if_port); |
skb | 686 | drivers/net/atp.c | unsigned char *data = skb->data; |
skb | 694 | drivers/net/atp.c | skb->protocol=eth_type_trans(skb,dev); |
skb | 695 | drivers/net/atp.c | netif_rx(skb); |
skb | 454 | drivers/net/de4x5.c | struct sk_buff *skb[NUM_TX_DESC]; /* TX skb for freeing when sent */ |
skb | 505 | drivers/net/de4x5.c | struct sk_buff *skb; /* Save the (re-ordered) skb's */ |
skb | 538 | drivers/net/de4x5.c | static int de4x5_queue_pkt(struct sk_buff *skb, struct device *dev); |
skb | 559 | drivers/net/de4x5.c | static void load_packet(struct device *dev, char *buf, u32 flags, struct sk_buff *skb); |
skb | 576 | drivers/net/de4x5.c | static void de4x5_put_cache(struct device *dev, struct sk_buff *skb); |
skb | 577 | drivers/net/de4x5.c | static void de4x5_putb_cache(struct device *dev, struct sk_buff *skb); |
skb | 1039 | drivers/net/de4x5.c | de4x5_queue_pkt(struct sk_buff *skb, struct device *dev) |
skb | 1045 | drivers/net/de4x5.c | if (skb == NULL) { |
skb | 1051 | drivers/net/de4x5.c | de4x5_put_cache(dev, skb); /* Queue the buffer locally */ |
skb | 1066 | drivers/net/de4x5.c | if (dev->tbusy || lp->skb[lp->tx_new]) { |
skb | 1068 | drivers/net/de4x5.c | de4x5_putb_cache(dev, skb); /* Requeue the buffer */ |
skb | 1070 | drivers/net/de4x5.c | de4x5_put_cache(dev, skb); |
skb | 1073 | drivers/net/de4x5.c | printk("%s: transmit busy, lost media or stale skb found:\n STS:%08x\n tbusy:%ld\n lostMedia:%d\n IMR:%08x\n OMR:%08x\n Stale skb: %s\n",dev->name, inl(DE4X5_STS), dev->tbusy, lp->lostMedia, inl(DE4X5_IMR), inl(DE4X5_OMR), (lp->skb[lp->tx_new] ? "YES" : "NO")); |
skb | 1075 | drivers/net/de4x5.c | } else if (skb->len > 0) { |
skb | 1077 | drivers/net/de4x5.c | if (lp->cache.skb && !dev->interrupt) { |
skb | 1078 | drivers/net/de4x5.c | de4x5_put_cache(dev, skb); |
skb | 1079 | drivers/net/de4x5.c | skb = de4x5_get_cache(dev); |
skb | 1082 | drivers/net/de4x5.c | while (skb && !dev->tbusy && !lp->skb[lp->tx_new]) { |
skb | 1086 | drivers/net/de4x5.c | load_packet(dev, skb->data, |
skb | 1087 | drivers/net/de4x5.c | TD_IC | TD_LS | TD_FS | skb->len, skb); |
skb | 1096 | drivers/net/de4x5.c | skb = de4x5_get_cache(dev); |
skb | 1100 | drivers/net/de4x5.c | if (skb && (dev->tbusy || lp->skb[lp->tx_new])) { |
skb | 1101 | drivers/net/de4x5.c | de4x5_putb_cache(dev, skb); |
skb | 1166 | drivers/net/de4x5.c | while (lp->cache.skb && !dev->tbusy && lp->tx_enable) { |
skb | 1204 | drivers/net/de4x5.c | struct sk_buff *skb; |
skb | 1207 | drivers/net/de4x5.c | if ((skb = dev_alloc_skb(pkt_len+2)) == NULL) { |
skb | 1214 | drivers/net/de4x5.c | skb->dev = dev; |
skb | 1215 | drivers/net/de4x5.c | skb_reserve(skb,2); /* Align */ |
skb | 1218 | drivers/net/de4x5.c | memcpy(skb_put(skb,len), bus_to_virt(lp->rx_ring[lp->rx_old].buf), len); |
skb | 1219 | drivers/net/de4x5.c | memcpy(skb_put(skb,pkt_len-len), bus_to_virt(lp->rx_ring[0].buf), pkt_len - len); |
skb | 1221 | drivers/net/de4x5.c | memcpy(skb_put(skb,pkt_len), bus_to_virt(lp->rx_ring[lp->rx_old].buf), pkt_len); |
skb | 1225 | drivers/net/de4x5.c | skb->protocol=eth_type_trans(skb,dev); |
skb | 1226 | drivers/net/de4x5.c | netif_rx(skb); |
skb | 1236 | drivers/net/de4x5.c | buf = skb->data; /* Look at the dest addr */ |
skb | 1307 | drivers/net/de4x5.c | if (lp->skb[entry] != NULL) { |
skb | 1308 | drivers/net/de4x5.c | dev_kfree_skb(lp->skb[entry], FREE_WRITE); |
skb | 1309 | drivers/net/de4x5.c | lp->skb[entry] = NULL; |
skb | 1396 | drivers/net/de4x5.c | static void load_packet(struct device *dev, char *buf, u32 flags, struct sk_buff *skb) |
skb | 1403 | drivers/net/de4x5.c | lp->skb[lp->tx_new] = skb; |
skb | 2521 | drivers/net/de4x5.c | if (lp->skb[i]) { |
skb | 2522 | drivers/net/de4x5.c | de4x5_putb_cache(dev, lp->skb[i]); |
skb | 2523 | drivers/net/de4x5.c | lp->skb[i] = NULL; |
skb | 2527 | drivers/net/de4x5.c | if (lp->skb[i]) { |
skb | 2528 | drivers/net/de4x5.c | de4x5_putb_cache(dev, lp->skb[i]); |
skb | 2529 | drivers/net/de4x5.c | lp->skb[i] = NULL; |
skb | 2547 | drivers/net/de4x5.c | struct sk_buff *skb; |
skb | 2558 | drivers/net/de4x5.c | for (i=0; TX_BUFFS_AVAIL && lp->cache.skb; i++) { |
skb | 2559 | drivers/net/de4x5.c | skb = de4x5_get_cache(dev); |
skb | 2560 | drivers/net/de4x5.c | load_packet(dev, skb->data, TD_IC | TD_LS | TD_FS | skb->len, skb); |
skb | 2612 | drivers/net/de4x5.c | static void de4x5_put_cache(struct device *dev, struct sk_buff *skb) |
skb | 2617 | drivers/net/de4x5.c | if (lp->cache.skb) { |
skb | 2618 | drivers/net/de4x5.c | for (p=lp->cache.skb; p->next; p=p->next); |
skb | 2619 | drivers/net/de4x5.c | p->next = skb; |
skb | 2621 | drivers/net/de4x5.c | lp->cache.skb = skb; |
skb | 2623 | drivers/net/de4x5.c | skb->next = NULL; |
skb | 2628 | drivers/net/de4x5.c | static void de4x5_putb_cache(struct device *dev, struct sk_buff *skb) |
skb | 2631 | drivers/net/de4x5.c | struct sk_buff *p = lp->cache.skb; |
skb | 2633 | drivers/net/de4x5.c | lp->cache.skb = skb; |
skb | 2634 | drivers/net/de4x5.c | skb->next = p; |
skb | 2642 | drivers/net/de4x5.c | struct sk_buff *p = lp->cache.skb; |
skb | 2645 | drivers/net/de4x5.c | lp->cache.skb = p->next; |
skb | 247 | drivers/net/de600.c | static int de600_start_xmit(struct sk_buff *skb, struct device *dev); |
skb | 397 | drivers/net/de600.c | de600_start_xmit(struct sk_buff *skb, struct device *dev) |
skb | 402 | drivers/net/de600.c | byte *buffer = skb->data; |
skb | 410 | drivers/net/de600.c | if (skb == NULL) { |
skb | 434 | drivers/net/de600.c | PRINTK(("de600_start_xmit:len=%d, page %d/%d\n", skb->len, tx_fifo_in, free_tx_pages)); |
skb | 436 | drivers/net/de600.c | if ((len = skb->len) < RUNT) |
skb | 476 | drivers/net/de600.c | if (skb->sk && (skb->sk->protocol == IPPROTO_TCP) && |
skb | 477 | drivers/net/de600.c | (skb->sk->prot->rspace != &de600_rspace)) |
skb | 478 | drivers/net/de600.c | skb->sk->prot->rspace = de600_rspace; /* Ugh! */ |
skb | 481 | drivers/net/de600.c | dev_kfree_skb (skb, FREE_WRITE); |
skb | 580 | drivers/net/de600.c | struct sk_buff *skb; |
skb | 605 | drivers/net/de600.c | skb = dev_alloc_skb(size+2); |
skb | 607 | drivers/net/de600.c | if (skb == NULL) { |
skb | 614 | drivers/net/de600.c | skb->dev = dev; |
skb | 615 | drivers/net/de600.c | skb_reserve(skb,2); /* Align */ |
skb | 618 | drivers/net/de600.c | buffer = skb_put(skb,size); |
skb | 627 | drivers/net/de600.c | skb->protocol=eth_type_trans(skb,dev); |
skb | 629 | drivers/net/de600.c | netif_rx(skb); |
skb | 511 | drivers/net/de620.c | de620_start_xmit(struct sk_buff *skb, struct device *dev) |
skb | 516 | drivers/net/de620.c | byte *buffer = skb->data; |
skb | 525 | drivers/net/de620.c | if (skb == NULL) { |
skb | 550 | drivers/net/de620.c | if ((len = skb->len) < RUNT) |
skb | 560 | drivers/net/de620.c | (int)skb->len, using_txbuf)); |
skb | 590 | drivers/net/de620.c | dev_kfree_skb (skb, FREE_WRITE); |
skb | 652 | drivers/net/de620.c | struct sk_buff *skb; |
skb | 707 | drivers/net/de620.c | skb = dev_alloc_skb(size+2); |
skb | 708 | drivers/net/de620.c | if (skb == NULL) { /* Yeah, but no place to put it... */ |
skb | 714 | drivers/net/de620.c | skb_reserve(skb,2); /* Align */ |
skb | 715 | drivers/net/de620.c | skb->dev = dev; |
skb | 716 | drivers/net/de620.c | skb->free = 1; |
skb | 718 | drivers/net/de620.c | buffer = skb_put(skb,size); |
skb | 722 | drivers/net/de620.c | skb->protocol=eth_type_trans(skb,dev); |
skb | 723 | drivers/net/de620.c | netif_rx(skb); /* deliver it "upstairs" */ |
skb | 383 | drivers/net/depca.c | static int depca_start_xmit(struct sk_buff *skb, struct device *dev); |
skb | 408 | drivers/net/depca.c | static int load_packet(struct device *dev, struct sk_buff *skb); |
skb | 779 | drivers/net/depca.c | depca_start_xmit(struct sk_buff *skb, struct device *dev) |
skb | 804 | drivers/net/depca.c | } else if (skb == NULL) { |
skb | 806 | drivers/net/depca.c | } else if (skb->len > 0) { |
skb | 813 | drivers/net/depca.c | status = load_packet(dev, skb); |
skb | 821 | drivers/net/depca.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 912 | drivers/net/depca.c | struct sk_buff *skb; |
skb | 914 | drivers/net/depca.c | skb = dev_alloc_skb(pkt_len+2); |
skb | 915 | drivers/net/depca.c | if (skb != NULL) { |
skb | 917 | drivers/net/depca.c | skb_reserve(skb,2); /* 16 byte align the IP header */ |
skb | 918 | drivers/net/depca.c | buf = skb_put(skb,pkt_len); |
skb | 919 | drivers/net/depca.c | skb->dev = dev; |
skb | 932 | drivers/net/depca.c | skb->protocol=eth_type_trans(skb,dev); |
skb | 933 | drivers/net/depca.c | netif_rx(skb); |
skb | 1532 | drivers/net/depca.c | static int load_packet(struct device *dev, struct sk_buff *skb) |
skb | 1538 | drivers/net/depca.c | end = (entry + (skb->len - 1) / TX_BUFF_SZ) & lp->txRingMask; |
skb | 1546 | drivers/net/depca.c | memcpy_toio(lp->tx_memcpy[entry], skb->data, len); |
skb | 1547 | drivers/net/depca.c | memcpy_toio(lp->tx_memcpy[0], skb->data + len, skb->len - len); |
skb | 1549 | drivers/net/depca.c | memcpy_toio(lp->tx_memcpy[entry], skb->data, skb->len); |
skb | 1553 | drivers/net/depca.c | len = (skb->len < ETH_ZLEN) ? ETH_ZLEN : skb->len; |
skb | 103 | drivers/net/dlci.c | static int dlci_header(struct sk_buff *skb, struct device *dev, |
skb | 133 | drivers/net/dlci.c | dest = skb_push(skb, hlen); |
skb | 142 | drivers/net/dlci.c | static void dlci_receive(struct sk_buff *skb, struct device *dev) |
skb | 149 | drivers/net/dlci.c | hdr = (struct fradhdr *) skb->data; |
skb | 152 | drivers/net/dlci.c | skb->dev = dev; |
skb | 179 | drivers/net/dlci.c | skb->protocol = htons(hdr->PID); |
skb | 185 | drivers/net/dlci.c | skb->protocol = htons(ETH_P_IP); |
skb | 205 | drivers/net/dlci.c | skb->mac.raw = skb->data; |
skb | 206 | drivers/net/dlci.c | skb_pull(skb, header); |
skb | 207 | drivers/net/dlci.c | netif_rx(skb); |
skb | 211 | drivers/net/dlci.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 214 | drivers/net/dlci.c | static int dlci_transmit(struct sk_buff *skb, struct device *dev) |
skb | 221 | drivers/net/dlci.c | if (!skb || !dev) |
skb | 233 | drivers/net/dlci.c | ret = dlp->slave->hard_start_xmit(skb, dlp->slave); |
skb | 56 | drivers/net/dummy.c | static int dummy_xmit(struct sk_buff *skb, struct device *dev); |
skb | 105 | drivers/net/dummy.c | dummy_xmit(struct sk_buff *skb, struct device *dev) |
skb | 111 | drivers/net/dummy.c | if (skb == NULL || dev == NULL) |
skb | 114 | drivers/net/dummy.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 102 | drivers/net/e2100.c | struct sk_buff *skb, int ring_offset); |
skb | 318 | drivers/net/e2100.c | e21_block_input(struct device *dev, int count, struct sk_buff *skb, int ring_offset) |
skb | 326 | drivers/net/e2100.c | eth_io_copy_and_sum(skb, dev->mem_start + (ring_offset & 0xff), count, 0); |
skb | 137 | drivers/net/eepro.c | static int eepro_send_packet(struct sk_buff *skb, struct device *dev); |
skb | 595 | drivers/net/eepro.c | eepro_send_packet(struct sk_buff *skb, struct device *dev) |
skb | 633 | drivers/net/eepro.c | if (skb == NULL) { |
skb | 642 | drivers/net/eepro.c | short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN; |
skb | 643 | drivers/net/eepro.c | unsigned char *buf = skb->data; |
skb | 649 | drivers/net/eepro.c | dev_kfree_skb (skb, FREE_WRITE); |
skb | 1046 | drivers/net/eepro.c | struct sk_buff *skb; |
skb | 1049 | drivers/net/eepro.c | skb = dev_alloc_skb(rcv_size+2); |
skb | 1050 | drivers/net/eepro.c | if (skb == NULL) { |
skb | 1055 | drivers/net/eepro.c | skb->dev = dev; |
skb | 1056 | drivers/net/eepro.c | skb_reserve(skb,2); |
skb | 1058 | drivers/net/eepro.c | insw(ioaddr+IO_PORT, skb_put(skb,rcv_size), (rcv_size + 1) >> 1); |
skb | 1060 | drivers/net/eepro.c | skb->protocol = eth_type_trans(skb,dev); |
skb | 1061 | drivers/net/eepro.c | netif_rx(skb); |
skb | 620 | drivers/net/eexpress.c | struct sk_buff *skb; |
skb | 622 | drivers/net/eexpress.c | skb = dev_alloc_skb(pkt_len+16); |
skb | 623 | drivers/net/eexpress.c | if (skb == NULL) |
skb | 629 | drivers/net/eexpress.c | skb->dev = dev; |
skb | 630 | drivers/net/eexpress.c | skb_reserve(skb, 2); |
skb | 632 | drivers/net/eexpress.c | insw(ioaddr,skb_put(skb,pkt_len),(pkt_len+1)>>1); |
skb | 633 | drivers/net/eexpress.c | skb->protocol = eth_type_trans(skb,dev); |
skb | 634 | drivers/net/eexpress.c | netif_rx(skb); |
skb | 149 | drivers/net/eql.c | static int eql_slave_xmit(struct sk_buff *skb, struct device *dev); /* */ |
skb | 152 | drivers/net/eql.c | static int eql_header(struct sk_buff *skb, struct device *dev, |
skb | 156 | drivers/net/eql.c | unsigned long raddr, struct sk_buff *skb); /* */ |
skb | 361 | drivers/net/eql.c | static int eql_slave_xmit(struct sk_buff *skb, struct device *dev) |
skb | 367 | drivers/net/eql.c | if (skb == NULL) |
skb | 380 | drivers/net/eql.c | dev->name, eql_number_slaves (eql->queue), skb->len, |
skb | 383 | drivers/net/eql.c | dev_queue_xmit (skb, slave_dev, 1); |
skb | 385 | drivers/net/eql.c | slave->bytes_queued += skb->len; |
skb | 395 | drivers/net/eql.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 408 | drivers/net/eql.c | static int eql_header(struct sk_buff *skb, struct device *dev, |
skb | 417 | drivers/net/eql.c | unsigned long raddr, struct sk_buff *skb) |
skb | 327 | drivers/net/eth16i.c | static int eth16i_tx(struct sk_buff *skb, struct device *dev); |
skb | 859 | drivers/net/eth16i.c | static int eth16i_tx(struct sk_buff *skb, struct device *dev) |
skb | 918 | drivers/net/eth16i.c | if(skb == NULL) { |
skb | 932 | drivers/net/eth16i.c | short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN; |
skb | 933 | drivers/net/eth16i.c | unsigned char *buf = skb->data; |
skb | 973 | drivers/net/eth16i.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 1003 | drivers/net/eth16i.c | struct sk_buff *skb; |
skb | 1016 | drivers/net/eth16i.c | skb = dev_alloc_skb(pkt_len + 3); |
skb | 1017 | drivers/net/eth16i.c | if( skb == NULL ) { |
skb | 1025 | drivers/net/eth16i.c | skb->dev = dev; |
skb | 1026 | drivers/net/eth16i.c | skb_reserve(skb,2); |
skb | 1034 | drivers/net/eth16i.c | insw(ioaddr + DATAPORT, skb_put(skb, pkt_len), (pkt_len + 1) >> 1); |
skb | 1036 | drivers/net/eth16i.c | unsigned char *buf = skb_put(skb, pkt_len); |
skb | 1051 | drivers/net/eth16i.c | skb->protocol=eth_type_trans(skb, dev); |
skb | 1052 | drivers/net/eth16i.c | netif_rx(skb); |
skb | 1059 | drivers/net/eth16i.c | printk(" %02x", skb->data[i]); |
skb | 290 | drivers/net/ewrk3.c | static int ewrk3_queue_pkt(struct sk_buff *skb, struct device *dev); |
skb | 722 | drivers/net/ewrk3.c | ewrk3_queue_pkt(struct sk_buff *skb, struct device *dev) |
skb | 758 | drivers/net/ewrk3.c | } else if (skb == NULL) { |
skb | 760 | drivers/net/ewrk3.c | } else if (skb->len > 0) { |
skb | 808 | drivers/net/ewrk3.c | u_char *p = skb->data; |
skb | 811 | drivers/net/ewrk3.c | outb((char)(skb->len & 0xff), EWRK3_DATA); |
skb | 812 | drivers/net/ewrk3.c | outb((char)((skb->len >> 8) & 0xff), EWRK3_DATA); |
skb | 814 | drivers/net/ewrk3.c | for (i=0; i<skb->len; i++) { |
skb | 821 | drivers/net/ewrk3.c | writeb((char)(skb->len & 0xff), (char *)buf);/* length (16 bit xfer)*/ |
skb | 824 | drivers/net/ewrk3.c | writeb((char)(((skb->len >> 8) & 0xff) | XCT), (char *)buf); |
skb | 828 | drivers/net/ewrk3.c | writeb(0x00, (char *)(buf + skb->len)); /* Write the XCT flag */ |
skb | 829 | drivers/net/ewrk3.c | memcpy_toio(buf, skb->data, PRELOAD);/* Write PRELOAD bytes*/ |
skb | 831 | drivers/net/ewrk3.c | memcpy_toio(buf+PRELOAD, skb->data+PRELOAD, skb->len-PRELOAD); |
skb | 832 | drivers/net/ewrk3.c | writeb(0xff, (char *)(buf + skb->len)); /* Write the XCT flag */ |
skb | 834 | drivers/net/ewrk3.c | writeb((char)((skb->len >> 8) & 0xff), (char *)buf); |
skb | 838 | drivers/net/ewrk3.c | memcpy_toio((char *)buf, skb->data, skb->len);/* Write data bytes */ |
skb | 844 | drivers/net/ewrk3.c | dev_kfree_skb (skb, FREE_WRITE); |
skb | 1000 | drivers/net/ewrk3.c | struct sk_buff *skb; |
skb | 1002 | drivers/net/ewrk3.c | if ((skb = dev_alloc_skb(pkt_len+2)) != NULL) { |
skb | 1004 | drivers/net/ewrk3.c | skb->dev = dev; |
skb | 1005 | drivers/net/ewrk3.c | skb_reserve(skb,2); /* Align to 16 bytes */ |
skb | 1006 | drivers/net/ewrk3.c | p = skb_put(skb,pkt_len); |
skb | 1021 | drivers/net/ewrk3.c | skb->protocol=eth_type_trans(skb,dev); |
skb | 1022 | drivers/net/ewrk3.c | netif_rx(skb); |
skb | 1034 | drivers/net/ewrk3.c | p = skb->data; /* Look at the dest addr */ |
skb | 112 | drivers/net/fmv18x.c | static int net_send_packet(struct sk_buff *skb, struct device *dev); |
skb | 310 | drivers/net/fmv18x.c | net_send_packet(struct sk_buff *skb, struct device *dev) |
skb | 351 | drivers/net/fmv18x.c | if (skb == NULL) { |
skb | 361 | drivers/net/fmv18x.c | short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN; |
skb | 362 | drivers/net/fmv18x.c | unsigned char *buf = skb->data; |
skb | 373 | drivers/net/fmv18x.c | (unsigned long)skb->len); |
skb | 399 | drivers/net/fmv18x.c | dev_kfree_skb (skb, FREE_WRITE); |
skb | 495 | drivers/net/fmv18x.c | struct sk_buff *skb; |
skb | 504 | drivers/net/fmv18x.c | skb = dev_alloc_skb(pkt_len+3); |
skb | 505 | drivers/net/fmv18x.c | if (skb == NULL) { |
skb | 512 | drivers/net/fmv18x.c | skb->dev = dev; |
skb | 513 | drivers/net/fmv18x.c | skb_reserve(skb,2); |
skb | 515 | drivers/net/fmv18x.c | insw(ioaddr + DATAPORT, skb_put(skb,pkt_len), (pkt_len + 1) >> 1); |
skb | 521 | drivers/net/fmv18x.c | printk(" %02x", skb->data[i]); |
skb | 525 | drivers/net/fmv18x.c | skb->protocol=eth_type_trans(skb, dev); |
skb | 526 | drivers/net/fmv18x.c | netif_rx(skb); |
skb | 102 | drivers/net/hp-plus.c | struct sk_buff *skb, int ring_offset); |
skb | 108 | drivers/net/hp-plus.c | struct sk_buff *skb, int ring_offset); |
skb | 340 | drivers/net/hp-plus.c | hpp_io_block_input(struct device *dev, int count, struct sk_buff *skb, int ring_offset) |
skb | 343 | drivers/net/hp-plus.c | char *buf = skb->data; |
skb | 367 | drivers/net/hp-plus.c | hpp_mem_block_input(struct device *dev, int count, struct sk_buff *skb, int ring_offset) |
skb | 380 | drivers/net/hp-plus.c | memcpy_fromio(skb->data, dev->mem_start, count); |
skb | 66 | drivers/net/hp.c | struct sk_buff *skb , int ring_offset); |
skb | 276 | drivers/net/hp.c | hp_block_input(struct device *dev, int count, struct sk_buff *skb, int ring_offset) |
skb | 281 | drivers/net/hp.c | char *buf = skb->data; |
skb | 193 | drivers/net/hp100.c | static int hp100_start_xmit( struct sk_buff *skb, struct device *dev ); |
skb | 574 | drivers/net/hp100.c | static int hp100_start_xmit( struct sk_buff *skb, struct device *dev ) |
skb | 595 | drivers/net/hp100.c | if ( ( i = ( hp100_inl( TX_MEM_FREE ) & ~0x7fffffff ) ) < skb -> len + 16 ) |
skb | 639 | drivers/net/hp100.c | if ( skb == NULL ) |
skb | 645 | drivers/net/hp100.c | if ( skb -> len <= 0 ) return 0; |
skb | 658 | drivers/net/hp100.c | printk( "hp100_start_xmit: irq_status = 0x%x, len = %d\n", val, (int)skb -> len ); |
skb | 660 | drivers/net/hp100.c | ok_flag = skb -> len >= HP100_MIN_PACKET_SIZE; |
skb | 661 | drivers/net/hp100.c | i = ok_flag ? skb -> len : HP100_MIN_PACKET_SIZE; |
skb | 668 | drivers/net/hp100.c | memcpy( lp -> mem_ptr_virt, skb -> data, skb -> len ); |
skb | 670 | drivers/net/hp100.c | memset( lp -> mem_ptr_virt, 0, HP100_MIN_PACKET_SIZE - skb -> len ); |
skb | 674 | drivers/net/hp100.c | memcpy_toio( lp -> mem_ptr_phys, skb -> data, skb -> len ); |
skb | 676 | drivers/net/hp100.c | memset_io( lp -> mem_ptr_phys, 0, HP100_MIN_PACKET_SIZE - skb -> len ); |
skb | 681 | drivers/net/hp100.c | outsl( ioaddr + HP100_REG_DATA32, skb -> data, ( skb -> len + 3 ) >> 2 ); |
skb | 683 | drivers/net/hp100.c | for ( i = ( skb -> len + 3 ) & ~3; i < HP100_MIN_PACKET_SIZE; i += 4 ) |
skb | 691 | drivers/net/hp100.c | dev_kfree_skb( skb, FREE_WRITE ); |
skb | 710 | drivers/net/hp100.c | struct sk_buff *skb; |
skb | 753 | drivers/net/hp100.c | skb = dev_alloc_skb( pkt_len ); |
skb | 754 | drivers/net/hp100.c | if ( skb == NULL ) |
skb | 765 | drivers/net/hp100.c | skb -> dev = dev; |
skb | 766 | drivers/net/hp100.c | ptr = (u_char *)skb_put( skb, pkt_len ); |
skb | 776 | drivers/net/hp100.c | skb -> protocol = eth_type_trans( skb, dev ); |
skb | 777 | drivers/net/hp100.c | netif_rx( skb ); |
skb | 169 | drivers/net/ibmtr.c | static int tok_send_packet(struct sk_buff *skb, struct device *dev); |
skb | 1250 | drivers/net/ibmtr.c | struct sk_buff *skb; |
skb | 1326 | drivers/net/ibmtr.c | if (!(skb=dev_alloc_skb(arb_frame_len-lan_hdr_len+sizeof(struct trh_hdr)))) { |
skb | 1334 | drivers/net/ibmtr.c | skb_put(skb, arb_frame_len-lan_hdr_len+sizeof(struct trh_hdr)); |
skb | 1335 | drivers/net/ibmtr.c | skb->dev=dev; |
skb | 1337 | drivers/net/ibmtr.c | data=skb->data; |
skb | 1376 | drivers/net/ibmtr.c | skb->protocol=tr_type_trans(skb,dev); |
skb | 1377 | drivers/net/ibmtr.c | netif_rx(skb); |
skb | 1381 | drivers/net/ibmtr.c | static int tok_send_packet(struct sk_buff *skb, struct device *dev) |
skb | 1403 | drivers/net/ibmtr.c | if (skb==NULL) { |
skb | 1412 | drivers/net/ibmtr.c | ti->current_skb=skb; |
skb | 276 | drivers/net/lance.c | static int lance_start_xmit(struct sk_buff *skb, struct device *dev); |
skb | 728 | drivers/net/lance.c | lance_start_xmit(struct sk_buff *skb, struct device *dev) |
skb | 770 | drivers/net/lance.c | if (skb == NULL) { |
skb | 775 | drivers/net/lance.c | if (skb->len <= 0) |
skb | 810 | drivers/net/lance.c | -(ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN); |
skb | 812 | drivers/net/lance.c | lp->tx_ring[entry].length = -skb->len; |
skb | 818 | drivers/net/lance.c | if ((int)(skb->data) + skb->len > 0x01000000) { |
skb | 821 | drivers/net/lance.c | dev->name, (int)(skb->data)); |
skb | 822 | drivers/net/lance.c | memcpy(&lp->tx_bounce_buffs[entry], skb->data, skb->len); |
skb | 825 | drivers/net/lance.c | dev_kfree_skb (skb, FREE_WRITE); |
skb | 827 | drivers/net/lance.c | lp->tx_skbuff[entry] = skb; |
skb | 828 | drivers/net/lance.c | lp->tx_ring[entry].base = (int)(skb->data) | 0x83000000; |
skb | 1007 | drivers/net/lance.c | struct sk_buff *skb; |
skb | 1016 | drivers/net/lance.c | skb = dev_alloc_skb(pkt_len+2); |
skb | 1017 | drivers/net/lance.c | if (skb == NULL) |
skb | 1032 | drivers/net/lance.c | skb->dev = dev; |
skb | 1033 | drivers/net/lance.c | skb_reserve(skb,2); /* 16 byte align */ |
skb | 1034 | drivers/net/lance.c | skb_put(skb,pkt_len); /* Make room */ |
skb | 1035 | drivers/net/lance.c | eth_copy_and_sum(skb, |
skb | 1038 | drivers/net/lance.c | skb->protocol=eth_type_trans(skb,dev); |
skb | 1039 | drivers/net/lance.c | netif_rx(skb); |
skb | 56 | drivers/net/loopback.c | static int loopback_xmit(struct sk_buff *skb, struct device *dev) |
skb | 61 | drivers/net/loopback.c | if (skb == NULL || dev == NULL) |
skb | 69 | drivers/net/loopback.c | if(skb->free==0) |
skb | 71 | drivers/net/loopback.c | struct sk_buff *skb2=skb; |
skb | 72 | drivers/net/loopback.c | skb=skb_clone(skb, GFP_ATOMIC); /* Clone the buffer */ |
skb | 73 | drivers/net/loopback.c | if(skb==NULL) |
skb | 78 | drivers/net/loopback.c | else if(skb->sk) |
skb | 84 | drivers/net/loopback.c | atomic_sub(skb->truesize, &skb->sk->wmem_alloc); |
skb | 85 | drivers/net/loopback.c | skb->sk->write_space(skb->sk); |
skb | 88 | drivers/net/loopback.c | skb->protocol=eth_type_trans(skb,dev); |
skb | 89 | drivers/net/loopback.c | skb->dev=dev; |
skb | 91 | drivers/net/loopback.c | skb->ip_summed = CHECKSUM_UNNECESSARY; |
skb | 93 | drivers/net/loopback.c | netif_rx(skb); |
skb | 95 | drivers/net/loopback.c | skb_device_unlock(skb); |
skb | 109 | drivers/net/ne.c | struct sk_buff *skb, int ring_offset); |
skb | 498 | drivers/net/ne.c | ne_block_input(struct device *dev, int count, struct sk_buff *skb, int ring_offset) |
skb | 504 | drivers/net/ne.c | char *buf = skb->data; |
skb | 127 | drivers/net/new_tunnel.c | static int tunnel_xmit(struct sk_buff *skb, struct device *dev) |
skb | 139 | drivers/net/new_tunnel.c | if (skb == NULL || dev == NULL) { |
skb | 166 | drivers/net/new_tunnel.c | iph = (struct iphdr *) skb->data; |
skb | 230 | drivers/net/new_tunnel.c | printk("Room left at head: %d\n", skb_headroom(skb)); |
skb | 231 | drivers/net/new_tunnel.c | printk("Room left at tail: %d\n", skb_tailroom(skb)); |
skb | 234 | drivers/net/new_tunnel.c | if (skb_headroom(skb) >= max_headroom) { |
skb | 235 | drivers/net/new_tunnel.c | skb->h.iph = (struct iphdr *) skb_push(skb, tunnel_hlen); |
skb | 239 | drivers/net/new_tunnel.c | if ( !(new_skb = dev_alloc_skb(skb->len+max_headroom)) ) |
skb | 260 | drivers/net/new_tunnel.c | new_skb->ip_hdr = (struct iphdr *) skb_put(new_skb, skb->len); |
skb | 261 | drivers/net/new_tunnel.c | memcpy(new_skb->ip_hdr, skb->data, skb->len); |
skb | 263 | drivers/net/new_tunnel.c | memcpy(new_skb->proto_priv, skb->proto_priv, sizeof(skb->proto_priv)); |
skb | 269 | drivers/net/new_tunnel.c | kfree_skb(skb, FREE_WRITE); |
skb | 270 | drivers/net/new_tunnel.c | skb = new_skb; |
skb | 277 | drivers/net/new_tunnel.c | iph = skb->h.iph; |
skb | 279 | drivers/net/new_tunnel.c | iph->tos = skb->ip_hdr->tos; |
skb | 280 | drivers/net/new_tunnel.c | iph->ttl = skb->ip_hdr->ttl; |
skb | 286 | drivers/net/new_tunnel.c | iph->tot_len = htons(skb->len); |
skb | 289 | drivers/net/new_tunnel.c | skb->ip_hdr = skb->h.iph; |
skb | 290 | drivers/net/new_tunnel.c | skb->protocol = htons(ETH_P_IP); |
skb | 302 | drivers/net/new_tunnel.c | if (ip_forward(skb, dev, 0, target) == 1) |
skb | 303 | drivers/net/new_tunnel.c | kfree_skb(skb, FREE_WRITE); |
skb | 895 | drivers/net/ni52.c | struct sk_buff *skb; |
skb | 912 | drivers/net/ni52.c | skb = (struct sk_buff *) dev_alloc_skb(totlen+2); |
skb | 913 | drivers/net/ni52.c | if(skb != NULL) |
skb | 915 | drivers/net/ni52.c | skb->dev = dev; |
skb | 916 | drivers/net/ni52.c | skb_reserve(skb,2); |
skb | 917 | drivers/net/ni52.c | skb_put(skb,totlen); |
skb | 918 | drivers/net/ni52.c | eth_copy_and_sum(skb,(char *) p->base+(unsigned long) rbd->buffer,totlen,0); |
skb | 919 | drivers/net/ni52.c | skb->protocol=eth_type_trans(skb,dev); |
skb | 920 | drivers/net/ni52.c | netif_rx(skb); |
skb | 1106 | drivers/net/ni52.c | static int ni52_send_packet(struct sk_buff *skb, struct device *dev) |
skb | 1153 | drivers/net/ni52.c | if(skb == NULL) |
skb | 1159 | drivers/net/ni52.c | if (skb->len <= 0) |
skb | 1161 | drivers/net/ni52.c | if(skb->len > XMIT_BUFF_SIZE) |
skb | 1163 | drivers/net/ni52.c | printk("%s: Sorry, max. framelength is %d bytes. The length of your frame is %ld bytes.\n",dev->name,XMIT_BUFF_SIZE,skb->len); |
skb | 1179 | drivers/net/ni52.c | memcpy((char *)p->xmit_cbuffs[p->xmit_count],(char *)(skb->data),skb->len); |
skb | 1180 | drivers/net/ni52.c | len = (ETH_ZLEN < skb->len) ? skb->len : ETH_ZLEN; |
skb | 1209 | drivers/net/ni52.c | dev_kfree_skb(skb,FREE_WRITE); |
skb | 1229 | drivers/net/ni52.c | dev_kfree_skb(skb,FREE_WRITE); |
skb | 1254 | drivers/net/ni52.c | dev_kfree_skb(skb,FREE_WRITE); |
skb | 124 | drivers/net/ni65.c | static int ni65_send_packet(struct sk_buff *skb, struct device *dev); |
skb | 317 | drivers/net/ni65.c | struct sk_buff *skb; |
skb | 318 | drivers/net/ni65.c | if( !(skb = dev_alloc_skb(R_BUF_SIZE+2)) ) { |
skb | 322 | drivers/net/ni65.c | skb->dev = dev; |
skb | 323 | drivers/net/ni65.c | skb_reserve(skb,2); |
skb | 324 | drivers/net/ni65.c | skb_put(skb,R_BUF_SIZE); /* grab the whole space .. (not necessary) */ |
skb | 325 | drivers/net/ni65.c | if( (unsigned long) (skb->data + R_BUF_SIZE) > 0x1000000 ) { |
skb | 329 | drivers/net/ni65.c | p->recv_skb[i] = skb; |
skb | 626 | drivers/net/ni65.c | struct sk_buff *skb = dev_alloc_skb(R_BUF_SIZE+2); |
skb | 628 | drivers/net/ni65.c | struct sk_buff *skb = dev_alloc_skb(len+2); |
skb | 630 | drivers/net/ni65.c | if(skb) |
skb | 632 | drivers/net/ni65.c | skb_reserve(skb,2); |
skb | 633 | drivers/net/ni65.c | skb->dev = dev; |
skb | 635 | drivers/net/ni65.c | if( (unsigned long) (skb->data + R_BUF_SIZE) > 0x1000000) { |
skb | 636 | drivers/net/ni65.c | skb_put(skb,len); |
skb | 637 | drivers/net/ni65.c | eth_copy_and_sum(skb, (unsigned char *)(p->recv_skb[p->rmdnum]->data),len,0); |
skb | 641 | drivers/net/ni65.c | skb_put(skb,R_BUF_SIZE); |
skb | 642 | drivers/net/ni65.c | p->recv_skb[p->rmdnum] = skb; |
skb | 643 | drivers/net/ni65.c | rmdp->u.buffer = (unsigned long) skb->data; |
skb | 644 | drivers/net/ni65.c | skb = skb1; |
skb | 645 | drivers/net/ni65.c | skb_trim(skb,len); |
skb | 648 | drivers/net/ni65.c | skb_put(skb,len); |
skb | 649 | drivers/net/ni65.c | eth_copy_and_sum(skb, (unsigned char *) p->recvbounce[p->rmdnum],len,0); |
skb | 653 | drivers/net/ni65.c | skb->protocol=eth_type_trans(skb,dev); |
skb | 654 | drivers/net/ni65.c | netif_rx(skb); |
skb | 676 | drivers/net/ni65.c | static int ni65_send_packet(struct sk_buff *skb, struct device *dev) |
skb | 692 | drivers/net/ni65.c | if(skb == NULL) { |
skb | 697 | drivers/net/ni65.c | if (skb->len <= 0) |
skb | 710 | drivers/net/ni65.c | short len = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN; |
skb | 715 | drivers/net/ni65.c | if( (unsigned long) (skb->data + skb->len) > 0x1000000) { |
skb | 718 | drivers/net/ni65.c | memcpy((char *) tmdp->u.buffer,(char *)skb->data, |
skb | 719 | drivers/net/ni65.c | (skb->len > T_BUF_SIZE) ? T_BUF_SIZE : skb->len); |
skb | 720 | drivers/net/ni65.c | dev_kfree_skb (skb, FREE_WRITE); |
skb | 724 | drivers/net/ni65.c | tmdp->u.buffer = (unsigned long) skb->data; |
skb | 725 | drivers/net/ni65.c | p->tmd_skb[p->tmdnum] = skb; |
skb | 151 | drivers/net/pi2.c | static int pi_send_packet(struct sk_buff *skb, struct device *dev); |
skb | 208 | drivers/net/pi2.c | static void hardware_send_packet(struct pi_local *lp, struct sk_buff *skb) |
skb | 220 | drivers/net/pi2.c | skb_queue_tail(&lp->sndq, skb); |
skb | 338 | drivers/net/pi2.c | static void free_p(struct sk_buff *skb) |
skb | 340 | drivers/net/pi2.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 510 | drivers/net/pi2.c | struct sk_buff *skb; |
skb | 554 | drivers/net/pi2.c | skb = dev_alloc_skb(sksize); |
skb | 555 | drivers/net/pi2.c | if (skb == NULL) { |
skb | 561 | drivers/net/pi2.c | skb->dev = dev; |
skb | 564 | drivers/net/pi2.c | cfix=skb_put(skb,pkt_len); |
skb | 569 | drivers/net/pi2.c | skb->protocol=htons(ETH_P_AX25); |
skb | 570 | drivers/net/pi2.c | skb->mac.raw=skb->data; |
skb | 571 | drivers/net/pi2.c | IS_SKB(skb); |
skb | 572 | drivers/net/pi2.c | netif_rx(skb); |
skb | 585 | drivers/net/pi2.c | struct sk_buff *skb; |
skb | 643 | drivers/net/pi2.c | skb = dev_alloc_skb(sksize); |
skb | 644 | drivers/net/pi2.c | if (skb == NULL) { |
skb | 650 | drivers/net/pi2.c | skb->dev = dev; |
skb | 653 | drivers/net/pi2.c | cfix=skb_put(skb,pkt_len); |
skb | 657 | drivers/net/pi2.c | skb->protocol=ntohs(ETH_P_AX25); |
skb | 658 | drivers/net/pi2.c | skb->mac.raw=skb->data; |
skb | 659 | drivers/net/pi2.c | IS_SKB(skb); |
skb | 660 | drivers/net/pi2.c | netif_rx(skb); |
skb | 1077 | drivers/net/pi2.c | static int pi_header(struct sk_buff *skb, struct device *dev, unsigned short type, |
skb | 1080 | drivers/net/pi2.c | return ax25_encapsulate(skb, dev, type, daddr, saddr, len); |
skb | 1085 | drivers/net/pi2.c | struct sk_buff *skb) |
skb | 1087 | drivers/net/pi2.c | return ax25_rebuild_header(buff, dev, raddr, skb); |
skb | 1493 | drivers/net/pi2.c | static int pi_send_packet(struct sk_buff *skb, struct device *dev) |
skb | 1500 | drivers/net/pi2.c | if (skb == NULL) { |
skb | 1504 | drivers/net/pi2.c | hardware_send_packet(lp, skb); |
skb | 146 | drivers/net/plip.c | unsigned long raddr, struct sk_buff *skb); |
skb | 147 | drivers/net/plip.c | static int plip_tx_packet(struct sk_buff *skb, struct device *dev); |
skb | 197 | drivers/net/plip.c | struct sk_buff *skb; |
skb | 212 | drivers/net/plip.c | unsigned long raddr, struct sk_buff *skb); |
skb | 415 | drivers/net/plip.c | if (rcv->skb) { |
skb | 416 | drivers/net/plip.c | rcv->skb->free = 1; |
skb | 417 | drivers/net/plip.c | kfree_skb(rcv->skb, FREE_READ); |
skb | 418 | drivers/net/plip.c | rcv->skb = NULL; |
skb | 421 | drivers/net/plip.c | if (snd->skb) { |
skb | 422 | drivers/net/plip.c | dev_kfree_skb(snd->skb, FREE_WRITE); |
skb | 423 | drivers/net/plip.c | snd->skb = NULL; |
skb | 543 | drivers/net/plip.c | rcv->skb = dev_alloc_skb(rcv->length.h); |
skb | 544 | drivers/net/plip.c | if (rcv->skb == NULL) { |
skb | 548 | drivers/net/plip.c | skb_put(rcv->skb,rcv->length.h); |
skb | 549 | drivers/net/plip.c | rcv->skb->dev = dev; |
skb | 555 | drivers/net/plip.c | lbuf = rcv->skb->data; |
skb | 580 | drivers/net/plip.c | rcv->skb->protocol=eth_type_trans(rcv->skb, dev); |
skb | 581 | drivers/net/plip.c | netif_rx(rcv->skb); |
skb | 583 | drivers/net/plip.c | rcv->skb = NULL; |
skb | 667 | drivers/net/plip.c | if (snd->skb == NULL || (lbuf = snd->skb->data) == NULL) { |
skb | 670 | drivers/net/plip.c | snd->skb = NULL; |
skb | 742 | drivers/net/plip.c | dev_kfree_skb(snd->skb, FREE_WRITE); |
skb | 749 | drivers/net/plip.c | snd->skb = NULL; |
skb | 858 | drivers/net/plip.c | struct sk_buff *skb) |
skb | 865 | drivers/net/plip.c | return nl->orig_rebuild_header(buff, dev, dst, skb); |
skb | 880 | drivers/net/plip.c | plip_tx_packet(struct sk_buff *skb, struct device *dev) |
skb | 891 | drivers/net/plip.c | if (skb == NULL) { |
skb | 901 | drivers/net/plip.c | if (skb->len > dev->mtu + dev->hard_header_len) { |
skb | 902 | drivers/net/plip.c | printk("%s: packet too big, %d.\n", dev->name, (int)skb->len); |
skb | 912 | drivers/net/plip.c | snd->skb = skb; |
skb | 913 | drivers/net/plip.c | snd->length.h = skb->len; |
skb | 959 | drivers/net/plip.c | nl->rcv_data.skb = nl->snd_data.skb = NULL; |
skb | 994 | drivers/net/plip.c | if (snd->skb) { |
skb | 995 | drivers/net/plip.c | dev_kfree_skb(snd->skb, FREE_WRITE); |
skb | 996 | drivers/net/plip.c | snd->skb = NULL; |
skb | 999 | drivers/net/plip.c | if (rcv->skb) { |
skb | 1000 | drivers/net/plip.c | rcv->skb->free = 1; |
skb | 1001 | drivers/net/plip.c | kfree_skb(rcv->skb, FREE_READ); |
skb | 1002 | drivers/net/plip.c | rcv->skb = NULL; |
skb | 89 | drivers/net/ppp.c | #define skb_data(skb) ((__u8 *) (skb)->data) |
skb | 186 | drivers/net/ppp.c | unsigned long raddr, struct sk_buff *skb); |
skb | 1189 | drivers/net/ppp.c | sk_buff *skb = dev_alloc_skb (count); |
skb | 1193 | drivers/net/ppp.c | if (skb == NULL) { |
skb | 1203 | drivers/net/ppp.c | skb->dev = ppp2dev (ppp); /* We are the device */ |
skb | 1204 | drivers/net/ppp.c | skb->protocol = proto; |
skb | 1205 | drivers/net/ppp.c | skb->mac.raw = skb_data(skb); |
skb | 1206 | drivers/net/ppp.c | memcpy (skb_put(skb,count), data, count); /* move data */ |
skb | 1210 | drivers/net/ppp.c | skb->free = 1; |
skb | 1212 | drivers/net/ppp.c | netif_rx (skb); |
skb | 3039 | drivers/net/ppp.c | ppp_dev_xmit (sk_buff *skb, struct device *dev) |
skb | 3048 | drivers/net/ppp.c | if (skb == NULL) { |
skb | 3057 | drivers/net/ppp.c | dev_kfree_skb (skb, FREE_WRITE); |
skb | 3066 | drivers/net/ppp.c | dev->name, skb); |
skb | 3075 | drivers/net/ppp.c | dev_kfree_skb (skb, FREE_WRITE); |
skb | 3081 | drivers/net/ppp.c | len = skb->len; |
skb | 3082 | drivers/net/ppp.c | data = skb_data(skb); |
skb | 3087 | drivers/net/ppp.c | switch (ntohs (skb->protocol)) { |
skb | 3097 | drivers/net/ppp.c | dev_kfree_skb (skb, FREE_WRITE); |
skb | 3104 | drivers/net/ppp.c | dev_kfree_skb (skb, FREE_WRITE); |
skb | 3143 | drivers/net/ppp.c | static int ppp_dev_header (sk_buff *skb, struct device *dev, |
skb | 3152 | drivers/net/ppp.c | unsigned long raddr, struct sk_buff *skb) |
skb | 122 | drivers/net/pt.c | static int pt_send_packet(struct sk_buff *skb, struct device *dev); |
skb | 177 | drivers/net/pt.c | static void hardware_send_packet(struct pt_local *lp, struct sk_buff *skb) |
skb | 185 | drivers/net/pt.c | ptr = skb->data; |
skb | 186 | drivers/net/pt.c | if (ptr[0] != 0 && skb->len >= 2) |
skb | 188 | drivers/net/pt.c | printk("Rx KISS... Control = %d, value = %d.\n", ptr[0], (skb->len > 1? ptr[1] : -1)); |
skb | 231 | drivers/net/pt.c | skb_queue_tail(&lp->sndq, skb); |
skb | 318 | drivers/net/pt.c | static void free_p(struct sk_buff *skb) |
skb | 320 | drivers/net/pt.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 339 | drivers/net/pt.c | static int pt_header (struct sk_buff *skb, struct device *dev, unsigned short type, |
skb | 342 | drivers/net/pt.c | return ax25_encapsulate(skb, dev, type, daddr, saddr, len); |
skb | 348 | drivers/net/pt.c | struct sk_buff *skb) |
skb | 350 | drivers/net/pt.c | return ax25_rebuild_header(buff, dev, raddr, skb); |
skb | 972 | drivers/net/pt.c | static int pt_send_packet(struct sk_buff *skb, struct device *dev) |
skb | 982 | drivers/net/pt.c | if (skb == NULL) { |
skb | 986 | drivers/net/pt.c | hardware_send_packet(lp, skb); |
skb | 1271 | drivers/net/pt.c | struct sk_buff *skb; |
skb | 1392 | drivers/net/pt.c | skb = dev_alloc_skb(sksize); |
skb | 1393 | drivers/net/pt.c | if (skb == NULL) |
skb | 1400 | drivers/net/pt.c | skb->dev = dev; |
skb | 1403 | drivers/net/pt.c | cfix=skb_put(skb,pkt_len); |
skb | 1410 | drivers/net/pt.c | skb->protocol = ntohs(ETH_P_AX25); |
skb | 1411 | drivers/net/pt.c | skb->mac.raw=skb->data; |
skb | 1412 | drivers/net/pt.c | IS_SKB(skb); |
skb | 1413 | drivers/net/pt.c | netif_rx(skb); |
skb | 1810 | drivers/net/pt.c | struct sk_buff *skb; |
skb | 1815 | drivers/net/pt.c | skb = dev_alloc_skb(2); |
skb | 1816 | drivers/net/pt.c | if (skb == NULL) |
skb | 1821 | drivers/net/pt.c | skb->dev = dev; |
skb | 1822 | drivers/net/pt.c | cfix = skb_put(skb, 2); |
skb | 1825 | drivers/net/pt.c | skb->protocol=htons(ETH_P_AX25); |
skb | 1826 | drivers/net/pt.c | skb->mac.raw=skb->data; |
skb | 1827 | drivers/net/pt.c | IS_SKB(skb); |
skb | 1828 | drivers/net/pt.c | netif_rx(skb); |
skb | 619 | drivers/net/sdla.c | static int sdla_transmit(struct sk_buff *skb, struct device *dev) |
skb | 633 | drivers/net/sdla.c | if (skb == NULL) |
skb | 644 | drivers/net/sdla.c | ret = sdla_cmd(dev, SDLA_INFORMATION_WRITE, *(short *)(skb->dev->dev_addr), 0, skb->data, skb->len, NULL, NULL); |
skb | 649 | drivers/net/sdla.c | ret = sdla_cmd(dev, SDLA_INFORMATION_WRITE, *(short *)(skb->dev->dev_addr), 0, NULL, skb->len, &addr, &size); |
skb | 657 | drivers/net/sdla.c | sdla_write(dev, pbuf->buf_addr, skb->data, skb->len); |
skb | 679 | drivers/net/sdla.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 691 | drivers/net/sdla.c | struct sk_buff *skb; |
skb | 707 | drivers/net/sdla.c | skb = NULL; |
skb | 738 | drivers/net/sdla.c | skb = dev_alloc_skb(len); |
skb | 739 | drivers/net/sdla.c | if (skb == NULL) |
skb | 748 | drivers/net/sdla.c | sdla_read(dev, dev->mem_start + ((SDLA_502_RCV_BUF + SDLA_502_DATA_OFS) & 0x1FFF), skb_put(skb,len), len); |
skb | 777 | drivers/net/sdla.c | skb = dev_alloc_skb(len); |
skb | 778 | drivers/net/sdla.c | if (skb == NULL) |
skb | 791 | drivers/net/sdla.c | sdla_read(dev, addr, skb_put(skb, len), len); |
skb | 795 | drivers/net/sdla.c | sdla_read(dev, pbufi->buf_base, skb_put(skb, split), split); |
skb | 811 | drivers/net/sdla.c | (*dlp->receive)(skb, master); |
skb | 83 | drivers/net/seeq8005.c | static int seeq8005_send_packet(struct sk_buff *skb, struct device *dev); |
skb | 374 | drivers/net/seeq8005.c | seeq8005_send_packet(struct sk_buff *skb, struct device *dev) |
skb | 395 | drivers/net/seeq8005.c | if (skb == NULL) { |
skb | 405 | drivers/net/seeq8005.c | short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN; |
skb | 406 | drivers/net/seeq8005.c | unsigned char *buf = skb->data; |
skb | 411 | drivers/net/seeq8005.c | dev_kfree_skb (skb, FREE_WRITE); |
skb | 533 | drivers/net/seeq8005.c | struct sk_buff *skb; |
skb | 536 | drivers/net/seeq8005.c | skb = dev_alloc_skb(pkt_len); |
skb | 537 | drivers/net/seeq8005.c | if (skb == NULL) { |
skb | 542 | drivers/net/seeq8005.c | skb->dev = dev; |
skb | 543 | drivers/net/seeq8005.c | skb_reserve(skb, 2); /* align data on 16 byte */ |
skb | 544 | drivers/net/seeq8005.c | buf = skb_put(skb,pkt_len); |
skb | 557 | drivers/net/seeq8005.c | skb->protocol=eth_type_trans(skb,dev); |
skb | 558 | drivers/net/seeq8005.c | netif_rx(skb); |
skb | 488 | drivers/net/sk_g16.c | static int SK_send_packet(struct sk_buff *skb, struct device *dev); |
skb | 1191 | drivers/net/sk_g16.c | static int SK_send_packet(struct sk_buff *skb, struct device *dev) |
skb | 1222 | drivers/net/sk_g16.c | if (skb == NULL) |
skb | 1248 | drivers/net/sk_g16.c | short len = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN; |
skb | 1256 | drivers/net/sk_g16.c | memcpy((char *) (tmdp->u.buffer & 0x00ffffff), (char *)skb->data, |
skb | 1257 | drivers/net/sk_g16.c | skb->len); |
skb | 1288 | drivers/net/sk_g16.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 1569 | drivers/net/sk_g16.c | struct sk_buff *skb; |
skb | 1571 | drivers/net/sk_g16.c | skb = dev_alloc_skb(len+2); /* allocate socket buffer */ |
skb | 1573 | drivers/net/sk_g16.c | if (skb == NULL) /* Could not get mem ? */ |
skb | 1591 | drivers/net/sk_g16.c | skb->dev = dev; |
skb | 1592 | drivers/net/sk_g16.c | skb_reserve(skb,2); /* Align IP header on 16 byte boundary */ |
skb | 1601 | drivers/net/sk_g16.c | memcpy(skb_put(skb,len), (unsigned char *) (rmdp->u.buffer & 0x00ffffff), |
skb | 1612 | drivers/net/sk_g16.c | skb->protocol=eth_type_trans(skb,dev); |
skb | 1613 | drivers/net/sk_g16.c | netif_rx(skb); /* queue packet and mark it for processing */ |
skb | 102 | drivers/net/skeleton.c | static int net_send_packet(struct sk_buff *skb, struct device *dev); |
skb | 344 | drivers/net/skeleton.c | net_send_packet(struct sk_buff *skb, struct device *dev) |
skb | 369 | drivers/net/skeleton.c | if (skb == NULL) { |
skb | 380 | drivers/net/skeleton.c | short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN; |
skb | 381 | drivers/net/skeleton.c | unsigned char *buf = skb->data; |
skb | 386 | drivers/net/skeleton.c | dev_kfree_skb (skb, FREE_WRITE); |
skb | 459 | drivers/net/skeleton.c | struct sk_buff *skb; |
skb | 461 | drivers/net/skeleton.c | skb = dev_alloc_skb(pkt_len); |
skb | 462 | drivers/net/skeleton.c | if (skb == NULL) { |
skb | 468 | drivers/net/skeleton.c | skb->dev = dev; |
skb | 471 | drivers/net/skeleton.c | memcpy(skb_put(skb,pkt_len), (void*)dev->rmem_start, |
skb | 474 | drivers/net/skeleton.c | insw(ioaddr, skb->data, (pkt_len + 1) >> 1); |
skb | 476 | drivers/net/skeleton.c | netif_rx(skb); |
skb | 346 | drivers/net/slip.c | struct sk_buff *skb; |
skb | 383 | drivers/net/slip.c | skb = dev_alloc_skb(count); |
skb | 384 | drivers/net/slip.c | if (skb == NULL) { |
skb | 389 | drivers/net/slip.c | skb->dev = sl->dev; |
skb | 390 | drivers/net/slip.c | memcpy(skb_put(skb,count), sl->rbuff, count); |
skb | 391 | drivers/net/slip.c | skb->mac.raw=skb->data; |
skb | 393 | drivers/net/slip.c | skb->protocol=htons(ETH_P_AX25); |
skb | 395 | drivers/net/slip.c | skb->protocol=htons(ETH_P_IP); |
skb | 396 | drivers/net/slip.c | netif_rx(skb); |
skb | 486 | drivers/net/slip.c | sl_xmit(struct sk_buff *skb, struct device *dev) |
skb | 525 | drivers/net/slip.c | if (skb != NULL) { |
skb | 527 | drivers/net/slip.c | sl_encaps(sl, skb->data, skb->len); |
skb | 528 | drivers/net/slip.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 538 | drivers/net/slip.c | sl_header(struct sk_buff *skb, struct device *dev, unsigned short type, |
skb | 546 | drivers/net/slip.c | return ax25_encapsulate(skb, dev, type, daddr, saddr, len); |
skb | 557 | drivers/net/slip.c | struct sk_buff *skb) |
skb | 564 | drivers/net/slip.c | return ax25_rebuild_header(buff, dev, raddr, skb); |
skb | 73 | drivers/net/smc-ultra.c | struct sk_buff *skb, int ring_offset); |
skb | 286 | drivers/net/smc-ultra.c | ultra_block_input(struct device *dev, int count, struct sk_buff *skb, int ring_offset) |
skb | 296 | drivers/net/smc-ultra.c | memcpy_fromio(skb->data, xfer_start, semi_count); |
skb | 298 | drivers/net/smc-ultra.c | memcpy_fromio(skb->data + semi_count, dev->rmem_start, count); |
skb | 301 | drivers/net/smc-ultra.c | eth_io_copy_and_sum(skb, xfer_start, count, 0); |
skb | 224 | drivers/net/smc9194.c | static int smc_send_packet(struct sk_buff *skb, struct device *dev); |
skb | 313 | drivers/net/smc9194.c | static int smc_wait_to_send_packet( struct sk_buff * skb, struct device *dev ); |
skb | 547 | drivers/net/smc9194.c | static int smc_wait_to_send_packet( struct sk_buff * skb, struct device * dev ) |
skb | 561 | drivers/net/smc9194.c | lp->saved_skb = skb; |
skb | 563 | drivers/net/smc9194.c | length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN; |
skb | 575 | drivers/net/smc9194.c | dev_kfree_skb (skb, FREE_WRITE); |
skb | 645 | drivers/net/smc9194.c | struct sk_buff * skb = lp->saved_skb; |
skb | 652 | drivers/net/smc9194.c | if ( !skb ) { |
skb | 656 | drivers/net/smc9194.c | length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN; |
skb | 657 | drivers/net/smc9194.c | buf = skb->data; |
skb | 664 | drivers/net/smc9194.c | kfree(skb); |
skb | 727 | drivers/net/smc9194.c | dev_kfree_skb (skb, FREE_WRITE); |
skb | 1224 | drivers/net/smc9194.c | static int smc_send_packet(struct sk_buff *skb, struct device *dev) |
skb | 1248 | drivers/net/smc9194.c | if (skb == NULL) { |
skb | 1257 | drivers/net/smc9194.c | dev_kfree_skb (skb, FREE_WRITE); |
skb | 1261 | drivers/net/smc9194.c | return smc_wait_to_send_packet( skb, dev ); |
skb | 1463 | drivers/net/smc9194.c | struct sk_buff * skb; |
skb | 1475 | drivers/net/smc9194.c | skb = alloc_skb( packet_length + 5, GFP_ATOMIC ); |
skb | 1477 | drivers/net/smc9194.c | skb = dev_alloc_skb( packet_length + 5); |
skb | 1480 | drivers/net/smc9194.c | if ( skb == NULL ) { |
skb | 1492 | drivers/net/smc9194.c | skb_reserve( skb, 2 ); /* 16 bit alignment */ |
skb | 1495 | drivers/net/smc9194.c | skb->dev = dev; |
skb | 1497 | drivers/net/smc9194.c | skb->len = packet_length; |
skb | 1498 | drivers/net/smc9194.c | data = skb->data; |
skb | 1500 | drivers/net/smc9194.c | data = skb_put( skb, packet_length); |
skb | 1529 | drivers/net/smc9194.c | skb->protocol = eth_type_trans(skb, dev ); |
skb | 1531 | drivers/net/smc9194.c | netif_rx(skb); |
skb | 339 | drivers/net/sunlance.c | struct sk_buff *skb; |
skb | 356 | drivers/net/sunlance.c | skb = dev_alloc_skb (pkt_len+2); |
skb | 357 | drivers/net/sunlance.c | if (skb == NULL){ |
skb | 365 | drivers/net/sunlance.c | skb->dev = dev; |
skb | 366 | drivers/net/sunlance.c | skb_reserve (skb, 2); /* 16 byte align */ |
skb | 367 | drivers/net/sunlance.c | buf = skb_put (skb, pkt_len); /* make room */ |
skb | 369 | drivers/net/sunlance.c | skb->protocol = eth_type_trans (skb,dev); |
skb | 370 | drivers/net/sunlance.c | netif_rx (skb); |
skb | 577 | drivers/net/sunlance.c | lance_start_xmit (struct sk_buff *skb, struct device *dev) |
skb | 600 | drivers/net/sunlance.c | if (skb == NULL){ |
skb | 606 | drivers/net/sunlance.c | if (skb->len <= 0){ |
skb | 607 | drivers/net/sunlance.c | printk ("skb len is %ld\n", skb->len); |
skb | 619 | drivers/net/sunlance.c | skblen = skb->len; |
skb | 632 | drivers/net/sunlance.c | printk ("%2.2x ", skb->data [i]); |
skb | 641 | drivers/net/sunlance.c | memcpy ((char *)&ib->tx_buf [entry][0], skb->data, skblen); |
skb | 657 | drivers/net/sunlance.c | dev_kfree_skb (skb, FREE_WRITE); |
skb | 156 | drivers/net/tulip.c | static int tulip_start_xmit(struct sk_buff *skb, struct device *dev); |
skb | 376 | drivers/net/tulip.c | tulip_start_xmit(struct sk_buff *skb, struct device *dev) |
skb | 406 | drivers/net/tulip.c | if (skb == NULL || skb->len <= 0) { |
skb | 428 | drivers/net/tulip.c | tp->tx_skbuff[entry] = skb; |
skb | 429 | drivers/net/tulip.c | tp->tx_ring[entry].length = skb->len | |
skb | 431 | drivers/net/tulip.c | tp->tx_ring[entry].buffer1 = skb->data; |
skb | 601 | drivers/net/tulip.c | struct sk_buff *skb; |
skb | 603 | drivers/net/tulip.c | skb = dev_alloc_skb(pkt_len+2); |
skb | 604 | drivers/net/tulip.c | if (skb == NULL) { |
skb | 619 | drivers/net/tulip.c | skb->dev = dev; |
skb | 620 | drivers/net/tulip.c | skb_reserve(skb,2); /* 16 byte align the data fields */ |
skb | 621 | drivers/net/tulip.c | memcpy(skb_put(skb,pkt_len), lp->rx_ring[entry].buffer1, pkt_len); |
skb | 622 | drivers/net/tulip.c | skb->protocol=eth_type_trans(skb,dev); |
skb | 623 | drivers/net/tulip.c | netif_rx(skb); |
skb | 1389 | drivers/net/wavelan.c | wavelan_send_packet(struct sk_buff *skb, device *dev) |
skb | 1430 | drivers/net/wavelan.c | if (skb == (struct sk_buff *)0) |
skb | 1444 | drivers/net/wavelan.c | length = (ETH_ZLEN < skb->len) ? skb->len : ETH_ZLEN; |
skb | 1445 | drivers/net/wavelan.c | buf = skb->data; |
skb | 1452 | drivers/net/wavelan.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 1497 | drivers/net/wavelan.c | struct sk_buff *skb; |
skb | 1640 | drivers/net/wavelan.c | if ((skb = dev_alloc_skb(sksize)) == (struct sk_buff *)0) |
skb | 1647 | drivers/net/wavelan.c | skb->dev = dev; |
skb | 1649 | drivers/net/wavelan.c | obram_read(ioaddr, rbd.rbd_bufl, skb_put(skb,pkt_len), pkt_len); |
skb | 1665 | drivers/net/wavelan.c | c = skb->data[i]; |
skb | 1667 | drivers/net/wavelan.c | printk(" %c", skb->data[i]); |
skb | 1669 | drivers/net/wavelan.c | printk("%02x", skb->data[i]); |
skb | 1678 | drivers/net/wavelan.c | skb->protocol=eth_type_trans(skb,dev); |
skb | 1679 | drivers/net/wavelan.c | netif_rx(skb); |
skb | 55 | drivers/net/wd.c | struct sk_buff *skb, int ring_offset); |
skb | 376 | drivers/net/wd.c | wd_block_input(struct device *dev, int count, struct sk_buff *skb, int ring_offset) |
skb | 384 | drivers/net/wd.c | memcpy_fromio(skb->data, xfer_start, semi_count); |
skb | 386 | drivers/net/wd.c | memcpy_fromio(skb->data + semi_count, dev->rmem_start, count); |
skb | 389 | drivers/net/wd.c | eth_io_copy_and_sum(skb, xfer_start, count, 0); |
skb | 184 | drivers/net/znet.c | static int znet_send_packet(struct sk_buff *skb, struct device *dev); |
skb | 317 | drivers/net/znet.c | static int znet_send_packet(struct sk_buff *skb, struct device *dev) |
skb | 343 | drivers/net/znet.c | if (skb == NULL) { |
skb | 360 | drivers/net/znet.c | short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN; |
skb | 361 | drivers/net/znet.c | unsigned char *buf = (void *)skb->data; |
skb | 386 | drivers/net/znet.c | memcpy(zn.tx_cur, buf, skb->len); |
skb | 400 | drivers/net/znet.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 549 | drivers/net/znet.c | struct sk_buff *skb; |
skb | 551 | drivers/net/znet.c | skb = dev_alloc_skb(pkt_len); |
skb | 552 | drivers/net/znet.c | if (skb == NULL) { |
skb | 558 | drivers/net/znet.c | skb->dev = dev; |
skb | 562 | drivers/net/znet.c | memcpy(skb_put(skb,semi_cnt), zn.rx_cur, semi_cnt); |
skb | 563 | drivers/net/znet.c | memcpy(skb_put(skb,pkt_len-semi_cnt), zn.rx_start, |
skb | 566 | drivers/net/znet.c | memcpy(skb_put(skb,pkt_len), zn.rx_cur, pkt_len); |
skb | 568 | drivers/net/znet.c | unsigned int *packet = (unsigned int *) skb->data; |
skb | 573 | drivers/net/znet.c | skb->protocol=eth_type_trans(skb,dev); |
skb | 574 | drivers/net/znet.c | netif_rx(skb); |
skb | 228 | fs/nfs/nfsroot.c | static int root_rarp_recv(struct sk_buff *skb, struct device *dev, |
skb | 265 | fs/nfs/nfsroot.c | static int root_rarp_recv(struct sk_buff *skb, struct device *dev, struct packet_type *pt) |
skb | 267 | fs/nfs/nfsroot.c | struct arphdr *rarp = (struct arphdr *)skb->h.raw; |
skb | 274 | fs/nfs/nfsroot.c | kfree_skb(skb, FREE_READ); |
skb | 280 | fs/nfs/nfsroot.c | kfree_skb(skb, FREE_READ); |
skb | 290 | fs/nfs/nfsroot.c | kfree_skb(skb, FREE_READ); |
skb | 305 | fs/nfs/nfsroot.c | kfree_skb(skb, FREE_READ); |
skb | 316 | fs/nfs/nfsroot.c | kfree_skb(skb, FREE_READ); |
skb | 331 | fs/nfs/nfsroot.c | kfree_skb(skb, FREE_READ); |
skb | 213 | include/asm-alpha/io.h | #define eth_io_copy_and_sum(skb,src,len,unused) memcpy_fromio((skb)->data,(src),(len)) |
skb | 141 | include/linux/atalk.h | extern int aarp_send_ddp(struct device *dev,struct sk_buff *skb, struct at_addr *sa, void *hwaddr); |
skb | 31 | include/linux/etherdevice.h | extern int eth_header(struct sk_buff *skb, struct device *dev, |
skb | 35 | include/linux/etherdevice.h | unsigned long dst, struct sk_buff *skb); |
skb | 36 | include/linux/etherdevice.h | extern unsigned short eth_type_trans(struct sk_buff *skb, struct device *dev); |
skb | 18 | include/linux/firewall.h | struct sk_buff *skb, void *phdr); |
skb | 20 | include/linux/firewall.h | struct sk_buff *skb, void *phdr); |
skb | 22 | include/linux/firewall.h | struct sk_buff *skb, void *phdr); |
skb | 34 | include/linux/firewall.h | extern int call_fw_firewall(int pf, struct sk_buff *skb, void *phdr); |
skb | 35 | include/linux/firewall.h | extern int call_in_firewall(int pf, struct sk_buff *skb, void *phdr); |
skb | 36 | include/linux/firewall.h | extern int call_out_firewall(int pf, struct sk_buff *skb, void *phdr); |
skb | 136 | include/linux/if_frad.h | void (*receive)(struct sk_buff *skb, struct device *); |
skb | 445 | include/linux/isdn.h | struct sk_buff *skb; |
skb | 453 | include/linux/isdn.h | struct sk_buff *skb; |
skb | 124 | include/linux/mroute.h | extern void ipmr_forward(struct sk_buff *skb, int is_frag); |
skb | 169 | include/linux/netdevice.h | int (*hard_start_xmit) (struct sk_buff *skb, |
skb | 171 | include/linux/netdevice.h | int (*hard_header) (struct sk_buff *skb, |
skb | 178 | include/linux/netdevice.h | unsigned long raddr, struct sk_buff *skb); |
skb | 232 | include/linux/netdevice.h | extern void dev_queue_xmit(struct sk_buff *skb, struct device *dev, |
skb | 235 | include/linux/netdevice.h | extern void netif_rx(struct sk_buff *skb); |
skb | 137 | include/linux/skbuff.h | extern void kfree_skb(struct sk_buff *skb, int rw); |
skb | 149 | include/linux/skbuff.h | extern void kfree_skbmem(struct sk_buff *skb); |
skb | 150 | include/linux/skbuff.h | extern struct sk_buff * skb_clone(struct sk_buff *skb, int priority); |
skb | 151 | include/linux/skbuff.h | extern struct sk_buff * skb_copy(struct sk_buff *skb, int priority); |
skb | 152 | include/linux/skbuff.h | extern void skb_device_lock(struct sk_buff *skb); |
skb | 153 | include/linux/skbuff.h | extern void skb_device_unlock(struct sk_buff *skb); |
skb | 154 | include/linux/skbuff.h | extern void dev_kfree_skb(struct sk_buff *skb, int mode); |
skb | 155 | include/linux/skbuff.h | extern int skb_device_locked(struct sk_buff *skb); |
skb | 156 | include/linux/skbuff.h | extern unsigned char * skb_put(struct sk_buff *skb, int len); |
skb | 157 | include/linux/skbuff.h | extern unsigned char * skb_push(struct sk_buff *skb, int len); |
skb | 158 | include/linux/skbuff.h | extern unsigned char * skb_pull(struct sk_buff *skb, int len); |
skb | 159 | include/linux/skbuff.h | extern int skb_headroom(struct sk_buff *skb); |
skb | 160 | include/linux/skbuff.h | extern int skb_tailroom(struct sk_buff *skb); |
skb | 161 | include/linux/skbuff.h | extern void skb_reserve(struct sk_buff *skb, int len); |
skb | 162 | include/linux/skbuff.h | extern void skb_trim(struct sk_buff *skb, int len); |
skb | 193 | include/linux/skbuff.h | extern int skb_check(struct sk_buff *skb,int,int, char *); |
skb | 194 | include/linux/skbuff.h | #define IS_SKB(skb) skb_check((skb), 0, __LINE__,__FILE__) |
skb | 195 | include/linux/skbuff.h | #define IS_SKB_HEAD(skb) skb_check((skb), 1, __LINE__,__FILE__) |
skb | 197 | include/linux/skbuff.h | #define IS_SKB(skb) |
skb | 198 | include/linux/skbuff.h | #define IS_SKB_HEAD(skb) |
skb | 360 | include/linux/skbuff.h | extern __inline__ void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) |
skb | 365 | include/linux/skbuff.h | next = skb->next; |
skb | 366 | include/linux/skbuff.h | prev = skb->prev; |
skb | 367 | include/linux/skbuff.h | skb->next = NULL; |
skb | 368 | include/linux/skbuff.h | skb->prev = NULL; |
skb | 369 | include/linux/skbuff.h | skb->list = NULL; |
skb | 381 | include/linux/skbuff.h | extern __inline__ void skb_unlink(struct sk_buff *skb) |
skb | 387 | include/linux/skbuff.h | if(skb->list) |
skb | 388 | include/linux/skbuff.h | __skb_unlink(skb, skb->list); |
skb | 396 | include/linux/skbuff.h | extern __inline__ unsigned char *skb_put(struct sk_buff *skb, int len) |
skb | 398 | include/linux/skbuff.h | unsigned char *tmp=skb->tail; |
skb | 399 | include/linux/skbuff.h | skb->tail+=len; |
skb | 400 | include/linux/skbuff.h | skb->len+=len; |
skb | 401 | include/linux/skbuff.h | if(skb->tail>skb->end) |
skb | 406 | include/linux/skbuff.h | extern __inline__ unsigned char *skb_push(struct sk_buff *skb, int len) |
skb | 408 | include/linux/skbuff.h | skb->data-=len; |
skb | 409 | include/linux/skbuff.h | skb->len+=len; |
skb | 410 | include/linux/skbuff.h | if(skb->data<skb->head) |
skb | 412 | include/linux/skbuff.h | return skb->data; |
skb | 415 | include/linux/skbuff.h | extern __inline__ unsigned char * skb_pull(struct sk_buff *skb, int len) |
skb | 417 | include/linux/skbuff.h | if(len > skb->len) |
skb | 419 | include/linux/skbuff.h | skb->data+=len; |
skb | 420 | include/linux/skbuff.h | skb->len-=len; |
skb | 421 | include/linux/skbuff.h | return skb->data; |
skb | 424 | include/linux/skbuff.h | extern __inline__ int skb_headroom(struct sk_buff *skb) |
skb | 426 | include/linux/skbuff.h | return skb->data-skb->head; |
skb | 429 | include/linux/skbuff.h | extern __inline__ int skb_tailroom(struct sk_buff *skb) |
skb | 431 | include/linux/skbuff.h | return skb->end-skb->tail; |
skb | 434 | include/linux/skbuff.h | extern __inline__ void skb_reserve(struct sk_buff *skb, int len) |
skb | 436 | include/linux/skbuff.h | skb->data+=len; |
skb | 437 | include/linux/skbuff.h | skb->tail+=len; |
skb | 440 | include/linux/skbuff.h | extern __inline__ void skb_trim(struct sk_buff *skb, int len) |
skb | 442 | include/linux/skbuff.h | if(skb->len>len) |
skb | 444 | include/linux/skbuff.h | skb->len=len; |
skb | 445 | include/linux/skbuff.h | skb->tail=skb->data+len; |
skb | 455 | include/linux/skbuff.h | extern void skb_free_datagram(struct sock * sk, struct sk_buff *skb); |
skb | 31 | include/linux/trdevice.h | extern int tr_header(struct sk_buff *skb, struct device *dev, |
skb | 35 | include/linux/trdevice.h | unsigned long raddr, struct sk_buff *skb); |
skb | 36 | include/linux/trdevice.h | extern unsigned short tr_type_trans(struct sk_buff *skb, struct device *dev); |
skb | 6 | include/net/arp.h | extern int arp_rcv(struct sk_buff *skb, struct device *dev, |
skb | 10 | include/net/arp.h | struct device *dev, u32 saddr, struct sk_buff *skb); |
skb | 58 | include/net/ip.h | struct sk_buff *skb; /* complete received fragment */ |
skb | 89 | include/net/ip.h | extern int ip_send(struct rtable *rt, struct sk_buff *skb, __u32 daddr, int len, struct device *dev, __u32 saddr); |
skb | 90 | include/net/ip.h | extern int ip_build_header(struct sk_buff *skb, |
skb | 96 | include/net/ip.h | extern int ip_rcv(struct sk_buff *skb, struct device *dev, |
skb | 100 | include/net/ip.h | struct sk_buff * skb); |
skb | 101 | include/net/ip.h | extern int ip_options_compile(struct options * opt, struct sk_buff * skb); |
skb | 105 | include/net/ip.h | struct device *dev, struct sk_buff *skb, |
skb | 129 | include/net/ip.h | struct sk_buff *ip_defrag(struct iphdr *iph, struct sk_buff *skb, struct device *dev); |
skb | 130 | include/net/ip.h | void ip_fragment(struct sock *sk, struct sk_buff *skb, struct device *dev, int is_frag); |
skb | 136 | include/net/ip.h | extern int ip_forward(struct sk_buff *skb, struct device *dev, int is_frag, __u32 target_addr); |
skb | 142 | include/net/ip.h | extern void ip_options_build(struct sk_buff *skb, struct options *opt, __u32 daddr, __u32 saddr, int is_frag); |
skb | 143 | include/net/ip.h | extern int ip_options_echo(struct options *dopt, struct options *sopt, __u32 daddr, __u32 saddr, struct sk_buff *skb); |
skb | 144 | include/net/ip.h | extern void ip_options_fragment(struct sk_buff *skb); |
skb | 145 | include/net/ip.h | extern int ip_options_compile(struct options *opt, struct sk_buff *skb); |
skb | 1 | include/net/ipip.h | extern int ipip_rcv(struct sk_buff *skb, struct device *dev, struct options *opt, |
skb | 50 | include/net/ipx.h | extern int ipx_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *pt); |
skb | 11 | include/net/netlink.h | extern int netlink_attach(int unit, int (*function)(struct sk_buff *skb)); |
skb | 12 | include/net/netlink.h | extern int netlink_donothing(struct sk_buff *skb); |
skb | 14 | include/net/netlink.h | extern int netlink_post(int unit, struct sk_buff *skb); |
skb | 31 | include/net/protocol.h | int (*handler)(struct sk_buff *skb, struct device *dev, |
skb | 336 | include/net/sock.h | int (*build_header)(struct sk_buff *skb, |
skb | 346 | include/net/sock.h | struct device *dev, struct sk_buff *skb, |
skb | 464 | include/net/sock.h | struct sk_buff *skb); |
skb | 466 | include/net/sock.h | struct sk_buff *skb); |
skb | 477 | include/net/sock.h | extern struct sk_buff *sock_alloc_send_skb(struct sock *skb, |
skb | 492 | include/net/sock.h | extern __inline__ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) |
skb | 494 | include/net/sock.h | if (sk->rmem_alloc + skb->truesize >= sk->rcvbuf) |
skb | 496 | include/net/sock.h | atomic_add(skb->truesize, &sk->rmem_alloc); |
skb | 497 | include/net/sock.h | skb->sk=sk; |
skb | 498 | include/net/sock.h | skb_queue_tail(&sk->receive_queue,skb); |
skb | 500 | include/net/sock.h | sk->data_ready(sk,skb->len); |
skb | 129 | include/net/tcp.h | extern int tcp_rcv(struct sk_buff *skb, struct device *dev, |
skb | 142 | include/net/tcp.h | unsigned long daddr, int len, struct sk_buff *skb); |
skb | 45 | include/net/udp.h | extern int udp_rcv(struct sk_buff *skb, struct device *dev, |
skb | 6 | net/802/llc.c | int llc_rx_adm(struct sock *sk,struct sk_buff *skb, int type, int cmd, int pf, int nr, int ns) |
skb | 36 | net/802/llc.c | int llc_rx_setup(struct sock *sk, struct sk_buff *skb, int type, int cmd, int pf, int nr, int ns) |
skb | 70 | net/802/llc.c | int llc_rx_reset(struct sock *sk, struct sk_buff *skb, int type, int cmd, int pf, int nr, int ns) |
skb | 114 | net/802/llc.c | int llc_rx_d_conn(struct sock *sk, struct sk_buff *skb, int type, int cmd, int pf, int nr, int ns) |
skb | 150 | net/802/llc.c | int llc_rx_error(struct sock *sk, struct sk_buff *skb, int type, int cmd, int pf, int nr, int ns) |
skb | 199 | net/802/llc.c | int llc_rx_nr_shared(struct sock *sk, struct sk_buff *skb, int type, int cmd, int pf, int nr, int ns) |
skb | 288 | net/802/llc.c | int llc_rx_normal(struct sock *sk, struct sk_buff *skb, int type, int cmd, int pf, int nr, int ns) |
skb | 290 | net/802/llc.c | if(llc_rx_nr_shared(sk, skb, type, cmd, pf, nr, ns)) |
skb | 364 | net/802/llc.c | llc_queue_data(sk,skb); |
skb | 32 | net/802/p8022.c | p8022_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *pt) |
skb | 36 | net/802/p8022.c | proto = find_8022_client(*(skb->h.raw)); |
skb | 38 | net/802/p8022.c | skb->h.raw += 3; |
skb | 39 | net/802/p8022.c | skb_pull(skb,3); |
skb | 40 | net/802/p8022.c | return proto->rcvfunc(skb, dev, pt); |
skb | 43 | net/802/p8022.c | skb->sk = NULL; |
skb | 44 | net/802/p8022.c | kfree_skb(skb, FREE_READ); |
skb | 50 | net/802/p8022.c | struct sk_buff *skb, unsigned char *dest_node) |
skb | 52 | net/802/p8022.c | struct device *dev = skb->dev; |
skb | 55 | net/802/p8022.c | rawp = skb_push(skb,3); |
skb | 59 | net/802/p8022.c | dev->hard_header(skb, dev, ETH_P_802_3, dest_node, NULL, skb->len); |
skb | 9 | net/802/p8023.c | struct sk_buff *skb, unsigned char *dest_node) |
skb | 11 | net/802/p8023.c | struct device *dev = skb->dev; |
skb | 13 | net/802/p8023.c | dev->hard_header(skb, dev, ETH_P_802_3, dest_node, NULL, skb->len); |
skb | 41 | net/802/psnap.c | int snap_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *pt) |
skb | 54 | net/802/psnap.c | proto = find_snap_client(skb->h.raw); |
skb | 61 | net/802/psnap.c | skb->h.raw += 5; |
skb | 62 | net/802/psnap.c | skb_pull(skb,5); |
skb | 65 | net/802/psnap.c | return proto->rcvfunc(skb, dev, &psnap_packet_type); |
skb | 67 | net/802/psnap.c | skb->sk = NULL; |
skb | 68 | net/802/psnap.c | kfree_skb(skb, FREE_READ); |
skb | 76 | net/802/psnap.c | static void snap_datalink_header(struct datalink_proto *dl, struct sk_buff *skb, unsigned char *dest_node) |
skb | 78 | net/802/psnap.c | memcpy(skb_push(skb,5),dl->type,5); |
skb | 79 | net/802/psnap.c | snap_dl->datalink_header(snap_dl, skb, dest_node); |
skb | 41 | net/802/tr.c | int tr_header(struct sk_buff *skb, struct device *dev, unsigned short type, |
skb | 45 | net/802/tr.c | struct trh_hdr *trh=(struct trh_hdr *)skb_push(skb,dev->hard_header_len); |
skb | 72 | net/802/tr.c | struct sk_buff *skb) { |
skb | 82 | net/802/tr.c | if(arp_find(trh->daddr, dest, dev, dev->pa_addr, skb)) { |
skb | 91 | net/802/tr.c | unsigned short tr_type_trans(struct sk_buff *skb, struct device *dev) { |
skb | 93 | net/802/tr.c | struct trh_hdr *trh=(struct trh_hdr *)skb->data; |
skb | 94 | net/802/tr.c | struct trllc *trllc=(struct trllc *)(skb->data+sizeof(struct trh_hdr)); |
skb | 96 | net/802/tr.c | skb->mac.raw = skb->data; |
skb | 98 | net/802/tr.c | skb_pull(skb,dev->hard_header_len); |
skb | 106 | net/802/tr.c | skb->pkt_type=PACKET_BROADCAST; |
skb | 108 | net/802/tr.c | skb->pkt_type=PACKET_MULTICAST; |
skb | 114 | net/802/tr.c | skb->pkt_type=PACKET_OTHERHOST; |
skb | 88 | net/appletalk/aarp.c | struct sk_buff *skb; |
skb | 90 | net/appletalk/aarp.c | while((skb=skb_dequeue(&a->packet_queue))!=NULL) |
skb | 91 | net/appletalk/aarp.c | kfree_skb(skb, FREE_WRITE); |
skb | 104 | net/appletalk/aarp.c | struct sk_buff *skb=alloc_skb(len, GFP_ATOMIC); |
skb | 108 | net/appletalk/aarp.c | if(skb==NULL || sat==NULL) |
skb | 115 | net/appletalk/aarp.c | skb_reserve(skb,dev->hard_header_len+aarp_dl->header_length); |
skb | 116 | net/appletalk/aarp.c | eah = (struct elapaarp *)skb_put(skb,sizeof(struct elapaarp)); |
skb | 117 | net/appletalk/aarp.c | skb->arp = 1; |
skb | 118 | net/appletalk/aarp.c | skb->free = 1; |
skb | 119 | net/appletalk/aarp.c | skb->dev = a->dev; |
skb | 147 | net/appletalk/aarp.c | aarp_dl->datalink_header(aarp_dl, skb, aarp_eth_multicast); |
skb | 153 | net/appletalk/aarp.c | dev_queue_xmit(skb, dev, SOPRI_NORMAL); |
skb | 165 | net/appletalk/aarp.c | struct sk_buff *skb=alloc_skb(len, GFP_ATOMIC); |
skb | 168 | net/appletalk/aarp.c | if(skb==NULL) |
skb | 175 | net/appletalk/aarp.c | skb_reserve(skb,dev->hard_header_len+aarp_dl->header_length); |
skb | 176 | net/appletalk/aarp.c | eah = (struct elapaarp *)skb_put(skb,sizeof(struct elapaarp)); |
skb | 177 | net/appletalk/aarp.c | skb->arp = 1; |
skb | 178 | net/appletalk/aarp.c | skb->free = 1; |
skb | 179 | net/appletalk/aarp.c | skb->dev = dev; |
skb | 210 | net/appletalk/aarp.c | aarp_dl->datalink_header(aarp_dl, skb, sha); |
skb | 216 | net/appletalk/aarp.c | dev_queue_xmit(skb, dev, SOPRI_NORMAL); |
skb | 227 | net/appletalk/aarp.c | struct sk_buff *skb=alloc_skb(len, GFP_ATOMIC); |
skb | 231 | net/appletalk/aarp.c | if(skb==NULL) |
skb | 238 | net/appletalk/aarp.c | skb_reserve(skb,dev->hard_header_len+aarp_dl->header_length); |
skb | 239 | net/appletalk/aarp.c | eah = (struct elapaarp *)skb_put(skb,sizeof(struct elapaarp)); |
skb | 241 | net/appletalk/aarp.c | skb->arp = 1; |
skb | 242 | net/appletalk/aarp.c | skb->free = 1; |
skb | 243 | net/appletalk/aarp.c | skb->dev = dev; |
skb | 271 | net/appletalk/aarp.c | aarp_dl->datalink_header(aarp_dl, skb, aarp_eth_multicast); |
skb | 277 | net/appletalk/aarp.c | dev_queue_xmit(skb, dev, SOPRI_NORMAL); |
skb | 425 | net/appletalk/aarp.c | int aarp_send_ddp(struct device *dev,struct sk_buff *skb, struct at_addr *sa, void *hwaddr) |
skb | 439 | net/appletalk/aarp.c | struct ddpehdr *ddp=(struct ddpehdr *)skb->data; |
skb | 450 | net/appletalk/aarp.c | skb_pull(skb,sizeof(struct ddpehdr)-4); |
skb | 456 | net/appletalk/aarp.c | *((__u16 *)skb->data)=htons(skb->len); |
skb | 464 | net/appletalk/aarp.c | skb_push(skb,3); |
skb | 465 | net/appletalk/aarp.c | skb->data[0]=sa->s_node; |
skb | 466 | net/appletalk/aarp.c | skb->data[1]=at->s_node; |
skb | 467 | net/appletalk/aarp.c | skb->data[2]=ft; |
skb | 469 | net/appletalk/aarp.c | if(skb->sk==NULL) |
skb | 470 | net/appletalk/aarp.c | dev_queue_xmit(skb, skb->dev, SOPRI_NORMAL); |
skb | 472 | net/appletalk/aarp.c | dev_queue_xmit(skb, skb->dev, skb->sk->priority); |
skb | 485 | net/appletalk/aarp.c | skb->dev = dev; |
skb | 486 | net/appletalk/aarp.c | skb->protocol = htons(ETH_P_ATALK); |
skb | 498 | net/appletalk/aarp.c | ddp_dl->datalink_header(ddp_dl, skb, ddp_eth_multicast); |
skb | 499 | net/appletalk/aarp.c | if(skb->sk==NULL) |
skb | 500 | net/appletalk/aarp.c | dev_queue_xmit(skb, skb->dev, SOPRI_NORMAL); |
skb | 502 | net/appletalk/aarp.c | dev_queue_xmit(skb, skb->dev, skb->sk->priority); |
skb | 514 | net/appletalk/aarp.c | ddp_dl->datalink_header(ddp_dl, skb, a->hwaddr); |
skb | 515 | net/appletalk/aarp.c | if(skb->sk==NULL) |
skb | 516 | net/appletalk/aarp.c | dev_queue_xmit(skb, skb->dev, SOPRI_NORMAL); |
skb | 518 | net/appletalk/aarp.c | dev_queue_xmit(skb, skb->dev, skb->sk->priority); |
skb | 534 | net/appletalk/aarp.c | skb_queue_tail(&a->packet_queue, skb); |
skb | 558 | net/appletalk/aarp.c | skb_queue_tail(&a->packet_queue, skb); |
skb | 600 | net/appletalk/aarp.c | struct sk_buff *skb; |
skb | 619 | net/appletalk/aarp.c | while((skb=skb_dequeue(&a->packet_queue))!=NULL) |
skb | 622 | net/appletalk/aarp.c | ddp_dl->datalink_header(ddp_dl,skb,a->hwaddr); |
skb | 623 | net/appletalk/aarp.c | if(skb->sk==NULL) |
skb | 624 | net/appletalk/aarp.c | dev_queue_xmit(skb, skb->dev, SOPRI_NORMAL); |
skb | 626 | net/appletalk/aarp.c | dev_queue_xmit(skb, skb->dev, skb->sk->priority); |
skb | 639 | net/appletalk/aarp.c | static int aarp_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *pt) |
skb | 641 | net/appletalk/aarp.c | struct elapaarp *ea=(struct elapaarp *)skb->h.raw; |
skb | 655 | net/appletalk/aarp.c | kfree_skb(skb, FREE_READ); |
skb | 663 | net/appletalk/aarp.c | if(!skb_pull(skb,sizeof(*ea))) |
skb | 665 | net/appletalk/aarp.c | kfree_skb(skb, FREE_READ); |
skb | 678 | net/appletalk/aarp.c | kfree_skb(skb, FREE_READ); |
skb | 709 | net/appletalk/aarp.c | kfree_skb(skb, FREE_READ); |
skb | 722 | net/appletalk/aarp.c | kfree_skb(skb, FREE_READ); |
skb | 781 | net/appletalk/aarp.c | kfree_skb(skb, FREE_READ); |
skb | 211 | net/appletalk/ddp.c | struct sk_buff *skb; |
skb | 214 | net/appletalk/ddp.c | while((skb=skb_dequeue(&sk->receive_queue))!=NULL) |
skb | 216 | net/appletalk/ddp.c | kfree_skb(skb,FREE_READ); |
skb | 1416 | net/appletalk/ddp.c | static int atalk_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *pt) |
skb | 1419 | net/appletalk/ddp.c | struct ddpehdr *ddp=(void *)skb->h.raw; |
skb | 1425 | net/appletalk/ddp.c | if(skb->len<sizeof(*ddp)) |
skb | 1427 | net/appletalk/ddp.c | kfree_skb(skb,FREE_READ); |
skb | 1444 | net/appletalk/ddp.c | origlen = skb->len; |
skb | 1446 | net/appletalk/ddp.c | skb_trim(skb,min(skb->len,ddp->deh_len)); |
skb | 1454 | net/appletalk/ddp.c | if(skb->len<sizeof(*ddp)) |
skb | 1456 | net/appletalk/ddp.c | kfree_skb(skb,FREE_READ); |
skb | 1468 | net/appletalk/ddp.c | kfree_skb(skb,FREE_READ); |
skb | 1474 | net/appletalk/ddp.c | if(call_in_firewall(AF_APPLETALK, skb, ddp)!=FW_ACCEPT) |
skb | 1476 | net/appletalk/ddp.c | kfree_skb(skb, FREE_READ); |
skb | 1497 | net/appletalk/ddp.c | if (skb->pkt_type != PACKET_HOST || ddp->deh_dnet == 0) |
skb | 1499 | net/appletalk/ddp.c | kfree_skb(skb, FREE_READ); |
skb | 1508 | net/appletalk/ddp.c | if(call_fw_firewall(AF_APPLETALK, skb, ddp)!=FW_ACCEPT) |
skb | 1510 | net/appletalk/ddp.c | kfree_skb(skb, FREE_READ); |
skb | 1521 | net/appletalk/ddp.c | kfree_skb(skb, FREE_READ); |
skb | 1538 | net/appletalk/ddp.c | skb_trim(skb,min(origlen, rt->dev->hard_header_len + |
skb | 1546 | net/appletalk/ddp.c | skb->arp = 1; /* Resolved */ |
skb | 1548 | net/appletalk/ddp.c | if(aarp_send_ddp(rt->dev, skb, &ta, NULL)==-1) |
skb | 1549 | net/appletalk/ddp.c | kfree_skb(skb, FREE_READ); |
skb | 1563 | net/appletalk/ddp.c | kfree_skb(skb,FREE_READ); |
skb | 1572 | net/appletalk/ddp.c | skb->sk = sock; |
skb | 1574 | net/appletalk/ddp.c | if(sock_queue_rcv_skb(sock,skb)<0) |
skb | 1576 | net/appletalk/ddp.c | skb->sk=NULL; |
skb | 1577 | net/appletalk/ddp.c | kfree_skb(skb, FREE_WRITE); |
skb | 1589 | net/appletalk/ddp.c | static int ltalk_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *pt) |
skb | 1597 | net/appletalk/ddp.c | if(skb->mac.raw[2]==1) |
skb | 1604 | net/appletalk/ddp.c | if(ap==NULL || skb->len<sizeof(struct ddpshdr)) |
skb | 1606 | net/appletalk/ddp.c | kfree_skb(skb, FREE_READ); |
skb | 1615 | net/appletalk/ddp.c | skb_push(skb, sizeof(*ddp)-4); |
skb | 1616 | net/appletalk/ddp.c | ddp=(struct ddpehdr *)skb->data; |
skb | 1628 | net/appletalk/ddp.c | ddp->deh_dnode=skb->mac.raw[0]; /* From physical header */ |
skb | 1629 | net/appletalk/ddp.c | ddp->deh_snode=skb->mac.raw[1]; /* From physical header */ |
skb | 1637 | net/appletalk/ddp.c | ddp->deh_len=skb->len; |
skb | 1642 | net/appletalk/ddp.c | skb->h.raw = skb->data; |
skb | 1643 | net/appletalk/ddp.c | return atalk_rcv(skb,dev,pt); |
skb | 1651 | net/appletalk/ddp.c | struct sk_buff *skb; |
skb | 1724 | net/appletalk/ddp.c | skb = sock_alloc_send_skb(sk, size, 0, 0 , &err); |
skb | 1725 | net/appletalk/ddp.c | if(skb==NULL) |
skb | 1728 | net/appletalk/ddp.c | skb->sk=sk; |
skb | 1729 | net/appletalk/ddp.c | skb->free=1; |
skb | 1730 | net/appletalk/ddp.c | skb->arp=1; |
skb | 1731 | net/appletalk/ddp.c | skb_reserve(skb,ddp_dl->header_length); |
skb | 1732 | net/appletalk/ddp.c | skb_reserve(skb,dev->hard_header_len); |
skb | 1734 | net/appletalk/ddp.c | skb->dev=dev; |
skb | 1739 | net/appletalk/ddp.c | ddp=(struct ddpehdr *)skb_put(skb,sizeof(struct ddpehdr)); |
skb | 1760 | net/appletalk/ddp.c | memcpy_fromiovec(skb_put(skb,len),msg->msg_iov,len); |
skb | 1769 | net/appletalk/ddp.c | if(call_out_firewall(AF_APPLETALK, skb, ddp)!=FW_ACCEPT) |
skb | 1771 | net/appletalk/ddp.c | kfree_skb(skb, FREE_WRITE); |
skb | 1786 | net/appletalk/ddp.c | struct sk_buff *skb2=skb_clone(skb, GFP_KERNEL); |
skb | 1804 | net/appletalk/ddp.c | atomic_sub(skb->truesize, &sk->wmem_alloc); |
skb | 1805 | net/appletalk/ddp.c | ddp_dl->datalink_header(ddp_dl, skb, dev->dev_addr); |
skb | 1806 | net/appletalk/ddp.c | skb->sk = NULL; |
skb | 1807 | net/appletalk/ddp.c | skb->mac.raw=skb->data; |
skb | 1808 | net/appletalk/ddp.c | skb->h.raw = skb->data + ddp_dl->header_length + dev->hard_header_len; |
skb | 1809 | net/appletalk/ddp.c | skb_pull(skb,dev->hard_header_len); |
skb | 1810 | net/appletalk/ddp.c | skb_pull(skb,ddp_dl->header_length); |
skb | 1811 | net/appletalk/ddp.c | atalk_rcv(skb,dev,NULL); |
skb | 1823 | net/appletalk/ddp.c | if(aarp_send_ddp(dev,skb,&usat->sat_addr, NULL)==-1) |
skb | 1824 | net/appletalk/ddp.c | kfree_skb(skb, FREE_WRITE); |
skb | 1839 | net/appletalk/ddp.c | struct sk_buff *skb; |
skb | 1848 | net/appletalk/ddp.c | skb=skb_recv_datagram(sk,flags,noblock,&er); |
skb | 1849 | net/appletalk/ddp.c | if(skb==NULL) |
skb | 1852 | net/appletalk/ddp.c | ddp = (struct ddpehdr *)(skb->h.raw); |
skb | 1858 | net/appletalk/ddp.c | skb_copy_datagram_iovec(skb,0,msg->msg_iov,copied); |
skb | 1865 | net/appletalk/ddp.c | skb_copy_datagram_iovec(skb,sizeof(*ddp),msg->msg_iov,copied); |
skb | 1874 | net/appletalk/ddp.c | skb_free_datagram(sk, skb); |
skb | 1913 | net/appletalk/ddp.c | struct sk_buff *skb; |
skb | 1915 | net/appletalk/ddp.c | if((skb=skb_peek(&sk->receive_queue))!=NULL) |
skb | 1916 | net/appletalk/ddp.c | amount=skb->len-sizeof(struct ddpehdr); |
skb | 370 | net/ax25/af_ax25.c | static void ax25_send_to_raw(struct sock *sk, struct sk_buff *skb, int proto) |
skb | 376 | net/ax25/af_ax25.c | if ((copy = skb_clone(skb, GFP_ATOMIC)) == NULL) |
skb | 383 | net/ax25/af_ax25.c | sk->data_ready(sk, skb->len); |
skb | 411 | net/ax25/af_ax25.c | struct sk_buff *skb; |
skb | 423 | net/ax25/af_ax25.c | while ((skb = skb_dequeue(&ax25->sk->receive_queue)) != NULL) { |
skb | 424 | net/ax25/af_ax25.c | if (skb->sk != ax25->sk) { /* A pending connection */ |
skb | 425 | net/ax25/af_ax25.c | skb->sk->dead = 1; /* Queue the unaccepted socket for death */ |
skb | 426 | net/ax25/af_ax25.c | ax25_set_timer(skb->sk->ax25); |
skb | 427 | net/ax25/af_ax25.c | skb->sk->ax25->state = AX25_STATE_0; |
skb | 430 | net/ax25/af_ax25.c | kfree_skb(skb, FREE_READ); |
skb | 755 | net/ax25/af_ax25.c | int ax25_send_frame(struct sk_buff *skb, ax25_address *src, ax25_address *dest, |
skb | 760 | net/ax25/af_ax25.c | if (skb == NULL) |
skb | 771 | net/ax25/af_ax25.c | ax25_output(ax25, skb); |
skb | 810 | net/ax25/af_ax25.c | ax25_output(ax25, skb); |
skb | 1501 | net/ax25/af_ax25.c | struct sk_buff *skb; |
skb | 1522 | net/ax25/af_ax25.c | if ((skb = skb_dequeue(&sk->receive_queue)) == NULL) { |
skb | 1533 | net/ax25/af_ax25.c | } while (skb == NULL); |
skb | 1535 | net/ax25/af_ax25.c | newsk = skb->sk; |
skb | 1540 | net/ax25/af_ax25.c | skb->sk = NULL; |
skb | 1541 | net/ax25/af_ax25.c | kfree_skb(skb, FREE_READ); |
skb | 1583 | net/ax25/af_ax25.c | static int ax25_rcv(struct sk_buff *skb, struct device *dev, ax25_address *dev_addr, struct packet_type *ptype) |
skb | 1599 | net/ax25/af_ax25.c | skb->h.raw = skb->data; |
skb | 1602 | net/ax25/af_ax25.c | if (call_in_firewall(PF_AX25, skb, skb->h.raw) != FW_ACCEPT) { |
skb | 1603 | net/ax25/af_ax25.c | kfree_skb(skb, FREE_READ); |
skb | 1612 | net/ax25/af_ax25.c | if (ax25_parse_addr(skb->data, skb->len, &src, &dest, &dp, &type, &dama) == NULL) { |
skb | 1613 | net/ax25/af_ax25.c | kfree_skb(skb, FREE_READ); |
skb | 1646 | net/ax25/af_ax25.c | kfree_skb(skb, FREE_READ); |
skb | 1652 | net/ax25/af_ax25.c | kfree_skb(skb, FREE_READ); |
skb | 1658 | net/ax25/af_ax25.c | build_ax25_addr(skb->data, &src, &dest, &dp, type, MODULUS); |
skb | 1660 | net/ax25/af_ax25.c | if (call_fw_firewall(PF_AX25, skb, skb->data) != FW_ACCEPT) { |
skb | 1661 | net/ax25/af_ax25.c | kfree_skb(skb, FREE_READ); |
skb | 1666 | net/ax25/af_ax25.c | skb->arp = 1; |
skb | 1667 | net/ax25/af_ax25.c | ax25_queue_xmit(skb, dev_out, SOPRI_NORMAL); |
skb | 1669 | net/ax25/af_ax25.c | kfree_skb(skb, FREE_READ); |
skb | 1678 | net/ax25/af_ax25.c | skb_pull(skb, size_ax25_addr(&dp)); |
skb | 1690 | net/ax25/af_ax25.c | if ((*skb->data & ~0x10) == LAPB_UI) { /* UI frame - bypass LAPB processing */ |
skb | 1691 | net/ax25/af_ax25.c | skb->h.raw = skb->data + 2; /* skip control and pid */ |
skb | 1694 | net/ax25/af_ax25.c | ax25_send_to_raw(raw, skb, skb->data[1]); |
skb | 1697 | net/ax25/af_ax25.c | kfree_skb(skb, FREE_READ); |
skb | 1702 | net/ax25/af_ax25.c | switch (skb->data[1]) { |
skb | 1706 | net/ax25/af_ax25.c | skb_pull(skb,2); /* drop PID/CTRL */ |
skb | 1708 | net/ax25/af_ax25.c | ip_rcv(skb, dev, ptype); /* Note ptype here is the wrong one, fix me later */ |
skb | 1713 | net/ax25/af_ax25.c | skb_pull(skb,2); |
skb | 1714 | net/ax25/af_ax25.c | arp_rcv(skb, dev, ptype); /* Note ptype here is wrong... */ |
skb | 1721 | net/ax25/af_ax25.c | kfree_skb(skb, FREE_READ); |
skb | 1727 | net/ax25/af_ax25.c | skb_pull(skb, 2); |
skb | 1728 | net/ax25/af_ax25.c | skb_queue_tail(&sk->receive_queue, skb); |
skb | 1729 | net/ax25/af_ax25.c | skb->sk = sk; |
skb | 1730 | net/ax25/af_ax25.c | atomic_add(skb->truesize, &sk->rmem_alloc); |
skb | 1732 | net/ax25/af_ax25.c | sk->data_ready(sk, skb->len); |
skb | 1735 | net/ax25/af_ax25.c | kfree_skb(skb, FREE_READ); |
skb | 1740 | net/ax25/af_ax25.c | kfree_skb(skb, FREE_READ); /* Will scan SOCK_AX25 RAW sockets */ |
skb | 1753 | net/ax25/af_ax25.c | kfree_skb(skb, FREE_READ); |
skb | 1768 | net/ax25/af_ax25.c | if (ax25_process_rx_frame(ax25, skb, type, dama) == 0) |
skb | 1769 | net/ax25/af_ax25.c | kfree_skb(skb, FREE_READ); |
skb | 1778 | net/ax25/af_ax25.c | if ((*skb->data & ~PF) != SABM && (*skb->data & ~PF) != SABME) { |
skb | 1783 | net/ax25/af_ax25.c | if ((*skb->data & ~PF) != DM && mine) |
skb | 1786 | net/ax25/af_ax25.c | kfree_skb(skb, FREE_READ); |
skb | 1798 | net/ax25/af_ax25.c | kfree_skb(skb, FREE_READ); |
skb | 1804 | net/ax25/af_ax25.c | skb_queue_head(&sk->receive_queue, skb); |
skb | 1806 | net/ax25/af_ax25.c | skb->sk = make; |
skb | 1814 | net/ax25/af_ax25.c | kfree_skb(skb, FREE_READ); |
skb | 1822 | net/ax25/af_ax25.c | kfree_skb(skb, FREE_READ); |
skb | 1834 | net/ax25/af_ax25.c | kfree_skb(skb, FREE_READ); |
skb | 1846 | net/ax25/af_ax25.c | kfree_skb(skb, FREE_READ); |
skb | 1861 | net/ax25/af_ax25.c | if ((*skb->data & ~PF) == SABME) { |
skb | 1885 | net/ax25/af_ax25.c | sk->data_ready(sk, skb->len ); |
skb | 1887 | net/ax25/af_ax25.c | kfree_skb(skb, FREE_READ); |
skb | 1896 | net/ax25/af_ax25.c | static int kiss_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *ptype) |
skb | 1898 | net/ax25/af_ax25.c | skb->sk = NULL; /* Initially we don't know who its for */ |
skb | 1900 | net/ax25/af_ax25.c | if ((*skb->data & 0x0F) != 0) { |
skb | 1901 | net/ax25/af_ax25.c | kfree_skb(skb, FREE_READ); /* Not a KISS data frame */ |
skb | 1905 | net/ax25/af_ax25.c | skb_pull(skb, AX25_KISS_HEADER_LEN); /* Remove the KISS byte */ |
skb | 1907 | net/ax25/af_ax25.c | return ax25_rcv(skb, dev, (ax25_address *)dev->dev_addr, ptype); |
skb | 1914 | net/ax25/af_ax25.c | static int bpq_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *ptype) |
skb | 1919 | net/ax25/af_ax25.c | skb->sk = NULL; /* Initially we don't know who its for */ |
skb | 1922 | net/ax25/af_ax25.c | kfree_skb(skb, FREE_READ); /* We have no port callsign */ |
skb | 1926 | net/ax25/af_ax25.c | len = skb->data[0] + skb->data[1] * 256 - 5; |
skb | 1928 | net/ax25/af_ax25.c | skb_pull(skb, 2); /* Remove the length bytes */ |
skb | 1929 | net/ax25/af_ax25.c | skb_trim(skb, len); /* Set the length of the data */ |
skb | 1931 | net/ax25/af_ax25.c | return ax25_rcv(skb, dev, port_call, ptype); |
skb | 1941 | net/ax25/af_ax25.c | struct sk_buff *skb; |
skb | 2011 | net/ax25/af_ax25.c | if ((skb = sock_alloc_send_skb(sk, size, 0, 0, &err)) == NULL) |
skb | 2014 | net/ax25/af_ax25.c | skb->sk = sk; |
skb | 2015 | net/ax25/af_ax25.c | skb->free = 1; |
skb | 2016 | net/ax25/af_ax25.c | skb->arp = 1; |
skb | 2018 | net/ax25/af_ax25.c | skb_reserve(skb, size - len); |
skb | 2024 | net/ax25/af_ax25.c | memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len); |
skb | 2027 | net/ax25/af_ax25.c | asmptr = skb_push(skb, 1); |
skb | 2036 | net/ax25/af_ax25.c | kfree_skb(skb, FREE_WRITE); |
skb | 2040 | net/ax25/af_ax25.c | ax25_output(sk->ax25, skb); /* Shove it onto the queue and kick */ |
skb | 2044 | net/ax25/af_ax25.c | asmptr = skb_push(skb, 1 + size_ax25_addr(dp)); |
skb | 2058 | net/ax25/af_ax25.c | skb->h.raw = asmptr; |
skb | 2061 | net/ax25/af_ax25.c | printk("base=%p pos=%p\n", skb->data, asmptr); |
skb | 2066 | net/ax25/af_ax25.c | ax25_queue_xmit(skb, sk->ax25->device, SOPRI_NORMAL); |
skb | 2078 | net/ax25/af_ax25.c | struct sk_buff *skb; |
skb | 2097 | net/ax25/af_ax25.c | if ((skb = skb_recv_datagram(sk, flags, noblock, &er)) == NULL) |
skb | 2101 | net/ax25/af_ax25.c | length = skb->len + (skb->data - skb->h.raw); |
skb | 2104 | net/ax25/af_ax25.c | skb_pull(skb, 1); /* Remove PID */ |
skb | 2105 | net/ax25/af_ax25.c | length = skb->len; |
skb | 2106 | net/ax25/af_ax25.c | skb->h.raw = skb->data; |
skb | 2110 | net/ax25/af_ax25.c | skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); |
skb | 2121 | net/ax25/af_ax25.c | ax25_parse_addr(skb->data, skb->len, NULL, &dest, &digi, NULL, &dama); |
skb | 2145 | net/ax25/af_ax25.c | skb_free_datagram(sk, skb); |
skb | 2181 | net/ax25/af_ax25.c | struct sk_buff *skb; |
skb | 2183 | net/ax25/af_ax25.c | if ((skb = skb_peek(&sk->receive_queue)) != NULL) |
skb | 2184 | net/ax25/af_ax25.c | amount = skb->len; |
skb | 2441 | net/ax25/af_ax25.c | void ax25_queue_xmit(struct sk_buff *skb, struct device *dev, int pri) |
skb | 2447 | net/ax25/af_ax25.c | if (call_out_firewall(PF_AX25, skb, skb->data) != FW_ACCEPT) { |
skb | 2448 | net/ax25/af_ax25.c | kfree_skb(skb, FREE_WRITE); |
skb | 2453 | net/ax25/af_ax25.c | skb->protocol = htons (ETH_P_AX25); |
skb | 2460 | net/ax25/af_ax25.c | if(skb_headroom(skb) < AX25_BPQ_HEADER_LEN) { |
skb | 2462 | net/ax25/af_ax25.c | skb->free = 1; |
skb | 2463 | net/ax25/af_ax25.c | kfree_skb(skb, FREE_WRITE); |
skb | 2467 | net/ax25/af_ax25.c | size = skb->len; |
skb | 2469 | net/ax25/af_ax25.c | ptr = skb_push(skb, 2); |
skb | 2474 | net/ax25/af_ax25.c | dev->hard_header(skb, dev, ETH_P_BPQ, bcast_addr, NULL, 0); |
skb | 2481 | net/ax25/af_ax25.c | was_locked = skb_device_locked(skb); |
skb | 2482 | net/ax25/af_ax25.c | dev_queue_xmit(skb, dev, pri); |
skb | 2483 | net/ax25/af_ax25.c | if (was_locked) skb_device_unlock(skb); |
skb | 2489 | net/ax25/af_ax25.c | ptr = skb_push(skb, 1); |
skb | 2498 | net/ax25/af_ax25.c | was_locked = skb_device_locked(skb); |
skb | 2499 | net/ax25/af_ax25.c | dev_queue_xmit(skb, dev, pri); |
skb | 2500 | net/ax25/af_ax25.c | if (was_locked) skb_device_unlock(skb); |
skb | 2513 | net/ax25/af_ax25.c | int ax25_encapsulate(struct sk_buff *skb, struct device *dev, unsigned short type, void *daddr, |
skb | 2517 | net/ax25/af_ax25.c | unsigned char *buff = skb_push(skb, AX25_HEADER_LEN); |
skb | 2562 | net/ax25/af_ax25.c | int ax25_rebuild_header(unsigned char *bp, struct device *dev, unsigned long dest, struct sk_buff *skb) |
skb | 2566 | net/ax25/af_ax25.c | if (arp_find(bp + 1, dest, dev, dev->pa_addr, skb)) |
skb | 2586 | net/ax25/af_ax25.c | struct sk_buff *ourskb=skb_clone(skb, GFP_ATOMIC); |
skb | 2587 | net/ax25/af_ax25.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 2604 | net/ax25/af_ax25.c | ax25_dg_build_path(skb, (ax25_address *)(bp + 1), dev); |
skb | 70 | net/ax25/ax25_in.c | static int ax25_rx_fragment(ax25_cb *ax25, struct sk_buff *skb) |
skb | 76 | net/ax25/ax25_in.c | if (!(*skb->data & SEG_FIRST)) { |
skb | 77 | net/ax25/ax25_in.c | if ((ax25->fragno - 1) == (*skb->data & SEG_REM)) { |
skb | 81 | net/ax25/ax25_in.c | ax25->fragno = *skb->data & SEG_REM; |
skb | 82 | net/ax25/ax25_in.c | skb_pull(skb, 1); /* skip fragno */ |
skb | 83 | net/ax25/ax25_in.c | ax25->fraglen += skb->len; |
skb | 84 | net/ax25/ax25_in.c | skb_queue_tail(&ax25->frag_queue, skb); |
skb | 94 | net/ax25/ax25_in.c | skbn->dev = skb->dev; |
skb | 144 | net/ax25/ax25_in.c | if (*skb->data & SEG_FIRST) { |
skb | 145 | net/ax25/ax25_in.c | ax25->fragno = *skb->data & SEG_REM; |
skb | 146 | net/ax25/ax25_in.c | skb_pull(skb, 1); /* skip fragno */ |
skb | 147 | net/ax25/ax25_in.c | ax25->fraglen = skb->len; |
skb | 148 | net/ax25/ax25_in.c | skb_queue_tail(&ax25->frag_queue, skb); |
skb | 160 | net/ax25/ax25_in.c | static int ax25_rx_iframe(ax25_cb *ax25, struct sk_buff *skb) |
skb | 165 | net/ax25/ax25_in.c | if (skb == NULL) return 0; |
skb | 167 | net/ax25/ax25_in.c | pid = *skb->data; |
skb | 174 | net/ax25/ax25_in.c | skb_pull(skb, 1); /* Remove PID */ |
skb | 175 | net/ax25/ax25_in.c | queued = nr_route_frame(skb, ax25); |
skb | 182 | net/ax25/ax25_in.c | skb_pull(skb, 1); /* Remove PID */ |
skb | 183 | net/ax25/ax25_in.c | skb->h.raw = skb->data; |
skb | 185 | net/ax25/ax25_in.c | ip_rcv(skb, ax25->device, NULL); /* Wrong ptype */ |
skb | 192 | net/ax25/ax25_in.c | skb_pull(skb, 1); /* Remove PID */ |
skb | 193 | net/ax25/ax25_in.c | queued = ax25_rx_fragment(ax25, skb); |
skb | 199 | net/ax25/ax25_in.c | if (sock_queue_rcv_skb(ax25->sk, skb) == 0) { |
skb | 216 | net/ax25/ax25_in.c | static int ax25_state1_machine(ax25_cb *ax25, struct sk_buff *skb, int frametype, int pf, int type, int dama) |
skb | 292 | net/ax25/ax25_in.c | static int ax25_state2_machine(ax25_cb *ax25, struct sk_buff *skb, int frametype, int pf, int type) |
skb | 372 | net/ax25/ax25_in.c | static int ax25_state3_machine(ax25_cb *ax25, struct sk_buff *skb, int frametype, int ns, int nr, int pf, int type, int dama) |
skb | 504 | net/ax25/ax25_in.c | queued = ax25_rx_iframe(ax25, skb); |
skb | 564 | net/ax25/ax25_in.c | static int ax25_state4_machine(ax25_cb *ax25, struct sk_buff *skb, int frametype, int ns, int nr, int pf, int type, int dama) |
skb | 750 | net/ax25/ax25_in.c | queued = ax25_rx_iframe(ax25, skb); |
skb | 808 | net/ax25/ax25_in.c | int ax25_process_rx_frame(ax25_cb *ax25, struct sk_buff *skb, int type, int dama) |
skb | 823 | net/ax25/ax25_in.c | frametype = ax25_decode(ax25, skb, &ns, &nr, &pf); |
skb | 827 | net/ax25/ax25_in.c | queued = ax25_state1_machine(ax25, skb, frametype, pf, type, dama); |
skb | 830 | net/ax25/ax25_in.c | queued = ax25_state2_machine(ax25, skb, frametype, pf, type); |
skb | 833 | net/ax25/ax25_in.c | queued = ax25_state3_machine(ax25, skb, frametype, ns, nr, pf, type, dama); |
skb | 836 | net/ax25/ax25_in.c | queued = ax25_state4_machine(ax25, skb, frametype, ns, nr, pf, type, dama); |
skb | 60 | net/ax25/ax25_out.c | void ax25_output(ax25_cb *ax25, struct sk_buff *skb) |
skb | 76 | net/ax25/ax25_out.c | if ((skb->len - 1) > mtu) { |
skb | 77 | net/ax25/ax25_out.c | switch (*skb->data) { |
skb | 89 | net/ax25/ax25_out.c | skb_pull(skb, 1); /* skip PID */ |
skb | 94 | net/ax25/ax25_out.c | fragno = skb->len / mtu; |
skb | 95 | net/ax25/ax25_out.c | if (skb->len % mtu == 0) fragno--; |
skb | 97 | net/ax25/ax25_out.c | frontlen = skb_headroom(skb); /* Address space + CTRL */ |
skb | 99 | net/ax25/ax25_out.c | while (skb->len > 0) { |
skb | 100 | net/ax25/ax25_out.c | if (skb->sk != NULL) { |
skb | 101 | net/ax25/ax25_out.c | if ((skbn = sock_alloc_send_skb(skb->sk, mtu + 2 + frontlen, 0, 0, &err)) == NULL) |
skb | 108 | net/ax25/ax25_out.c | skbn->sk = skb->sk; |
skb | 112 | net/ax25/ax25_out.c | len = (mtu > skb->len) ? skb->len : mtu; |
skb | 117 | net/ax25/ax25_out.c | memcpy(skb_put(skbn, len), skb->data, len); |
skb | 129 | net/ax25/ax25_out.c | memcpy(skb_put(skbn, len), skb->data, len); |
skb | 134 | net/ax25/ax25_out.c | skb_pull(skb, len); |
skb | 138 | net/ax25/ax25_out.c | skb->free = 1; |
skb | 139 | net/ax25/ax25_out.c | kfree_skb(skb, FREE_WRITE); |
skb | 141 | net/ax25/ax25_out.c | skb_queue_tail(&ax25->write_queue, skb); /* Throw it on the queue */ |
skb | 154 | net/ax25/ax25_out.c | static void ax25_send_iframe(ax25_cb *ax25, struct sk_buff *skb, int poll_bit) |
skb | 158 | net/ax25/ax25_out.c | if (skb == NULL) |
skb | 162 | net/ax25/ax25_out.c | frame = skb_push(skb, 1); |
skb | 169 | net/ax25/ax25_out.c | frame = skb_push(skb, 2); |
skb | 177 | net/ax25/ax25_out.c | ax25_transmit_buffer(ax25, skb, C_COMMAND); |
skb | 182 | net/ax25/ax25_out.c | struct sk_buff *skb, *skbn; |
skb | 206 | net/ax25/ax25_out.c | skb = skb_dequeue(&ax25->write_queue); |
skb | 209 | net/ax25/ax25_out.c | if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) { |
skb | 210 | net/ax25/ax25_out.c | skb_queue_head(&ax25->write_queue, skb); |
skb | 232 | net/ax25/ax25_out.c | skb_queue_tail(&ax25->ack_queue, skb); |
skb | 236 | net/ax25/ax25_out.c | } while (!last && (skb = skb_dequeue(&ax25->write_queue)) != NULL); |
skb | 249 | net/ax25/ax25_out.c | void ax25_transmit_buffer(ax25_cb *ax25, struct sk_buff *skb, int type) |
skb | 264 | net/ax25/ax25_out.c | if (skb_headroom(skb) < size_ax25_addr(ax25->digipeat)) { |
skb | 266 | net/ax25/ax25_out.c | skb->free = 1; |
skb | 267 | net/ax25/ax25_out.c | kfree_skb(skb, FREE_WRITE); |
skb | 271 | net/ax25/ax25_out.c | ptr = skb_push(skb, size_ax25_addr(ax25->digipeat)); |
skb | 274 | net/ax25/ax25_out.c | skb->arp = 1; |
skb | 276 | net/ax25/ax25_out.c | ax25_queue_xmit(skb, ax25->device, SOPRI_NORMAL); |
skb | 580 | net/ax25/ax25_route.c | void ax25_dg_build_path(struct sk_buff *skb, ax25_address *addr, struct device *dev) |
skb | 599 | net/ax25/ax25_route.c | if (skb_headroom(skb) < len) { |
skb | 604 | net/ax25/ax25_route.c | memcpy(&dest, skb->data + 1, AX25_ADDR_LEN); |
skb | 605 | net/ax25/ax25_route.c | memcpy(&src, skb->data + 8, AX25_ADDR_LEN); |
skb | 608 | net/ax25/ax25_route.c | bp = skb_push(skb, len); |
skb | 63 | net/ax25/ax25_subr.c | struct sk_buff *skb; |
skb | 65 | net/ax25/ax25_subr.c | while ((skb = skb_dequeue(&ax25->write_queue)) != NULL) { |
skb | 66 | net/ax25/ax25_subr.c | skb->free = 1; |
skb | 67 | net/ax25/ax25_subr.c | kfree_skb(skb, FREE_WRITE); |
skb | 70 | net/ax25/ax25_subr.c | while ((skb = skb_dequeue(&ax25->ack_queue)) != NULL) { |
skb | 71 | net/ax25/ax25_subr.c | skb->free = 1; |
skb | 72 | net/ax25/ax25_subr.c | kfree_skb(skb, FREE_WRITE); |
skb | 75 | net/ax25/ax25_subr.c | while ((skb = skb_dequeue(&ax25->reseq_queue)) != NULL) { |
skb | 76 | net/ax25/ax25_subr.c | kfree_skb(skb, FREE_READ); |
skb | 79 | net/ax25/ax25_subr.c | while ((skb = skb_dequeue(&ax25->frag_queue)) != NULL) { |
skb | 80 | net/ax25/ax25_subr.c | kfree_skb(skb, FREE_READ); |
skb | 91 | net/ax25/ax25_subr.c | struct sk_buff *skb; |
skb | 98 | net/ax25/ax25_subr.c | skb = skb_dequeue(&ax25->ack_queue); |
skb | 99 | net/ax25/ax25_subr.c | skb->free = 1; |
skb | 100 | net/ax25/ax25_subr.c | kfree_skb(skb, FREE_WRITE); |
skb | 114 | net/ax25/ax25_subr.c | struct sk_buff *skb, *skb_prev = NULL; |
skb | 121 | net/ax25/ax25_subr.c | while ((skb = skb_dequeue(&ax25->ack_queue)) != NULL) { |
skb | 123 | net/ax25/ax25_subr.c | skb_queue_head(&ax25->write_queue, skb); |
skb | 125 | net/ax25/ax25_subr.c | skb_append(skb_prev, skb); |
skb | 126 | net/ax25/ax25_subr.c | skb_prev = skb; |
skb | 152 | net/ax25/ax25_subr.c | int ax25_decode(ax25_cb *ax25, struct sk_buff *skb, int *ns, int *nr, int *pf) |
skb | 157 | net/ax25/ax25_subr.c | frame = skb->data; |
skb | 174 | net/ax25/ax25_subr.c | skb_pull(skb, 1); |
skb | 181 | net/ax25/ax25_subr.c | skb_pull(skb, 2); |
skb | 186 | net/ax25/ax25_subr.c | skb_pull(skb, 2); |
skb | 190 | net/ax25/ax25_subr.c | skb_pull(skb, 1); |
skb | 204 | net/ax25/ax25_subr.c | struct sk_buff *skb; |
skb | 211 | net/ax25/ax25_subr.c | if ((skb = alloc_skb(AX25_BPQ_HEADER_LEN + size_ax25_addr(ax25->digipeat) + 2, GFP_ATOMIC)) == NULL) |
skb | 214 | net/ax25/ax25_subr.c | skb_reserve(skb, AX25_BPQ_HEADER_LEN + size_ax25_addr(ax25->digipeat)); |
skb | 217 | net/ax25/ax25_subr.c | skb->sk = ax25->sk; |
skb | 218 | net/ax25/ax25_subr.c | atomic_add(skb->truesize, &ax25->sk->wmem_alloc); |
skb | 223 | net/ax25/ax25_subr.c | dptr = skb_put(skb, 1); |
skb | 230 | net/ax25/ax25_subr.c | dptr = skb_put(skb, 1); |
skb | 234 | net/ax25/ax25_subr.c | dptr = skb_put(skb, 2); |
skb | 241 | net/ax25/ax25_subr.c | skb->free = 1; |
skb | 243 | net/ax25/ax25_subr.c | ax25_transmit_buffer(ax25, skb, type); |
skb | 253 | net/ax25/ax25_subr.c | struct sk_buff *skb; |
skb | 260 | net/ax25/ax25_subr.c | if ((skb = alloc_skb(AX25_BPQ_HEADER_LEN + size_ax25_addr(digi) + 1, GFP_ATOMIC)) == NULL) |
skb | 263 | net/ax25/ax25_subr.c | skb_reserve(skb, AX25_BPQ_HEADER_LEN + size_ax25_addr(digi)); |
skb | 267 | net/ax25/ax25_subr.c | dptr = skb_put(skb, 1); |
skb | 268 | net/ax25/ax25_subr.c | skb->sk = NULL; |
skb | 276 | net/ax25/ax25_subr.c | dptr = skb_push(skb, size_ax25_addr(digi)); |
skb | 279 | net/ax25/ax25_subr.c | skb->arp = 1; |
skb | 280 | net/ax25/ax25_subr.c | skb->free = 1; |
skb | 282 | net/ax25/ax25_subr.c | ax25_queue_xmit(skb, dev, SOPRI_NORMAL); |
skb | 487 | net/ax25/ax25_subr.c | struct sk_buff *skb; |
skb | 493 | net/ax25/ax25_subr.c | if ((skb = alloc_skb(2, GFP_ATOMIC)) == NULL) |
skb | 496 | net/ax25/ax25_subr.c | skb->free = 1; |
skb | 497 | net/ax25/ax25_subr.c | skb->arp = 1; |
skb | 500 | net/ax25/ax25_subr.c | skb->sk = ax25->sk; |
skb | 501 | net/ax25/ax25_subr.c | atomic_add(skb->truesize, &ax25->sk->wmem_alloc); |
skb | 504 | net/ax25/ax25_subr.c | skb->protocol = htons(ETH_P_AX25); |
skb | 506 | net/ax25/ax25_subr.c | p = skb_put(skb, 2); |
skb | 511 | net/ax25/ax25_subr.c | dev_queue_xmit(skb, ax25->device, SOPRI_NORMAL); |
skb | 78 | net/core/datagram.c | struct sk_buff *skb; |
skb | 118 | net/core/datagram.c | skb=skb_peek(&sk->receive_queue); |
skb | 119 | net/core/datagram.c | if(skb!=NULL) |
skb | 120 | net/core/datagram.c | skb->users++; |
skb | 122 | net/core/datagram.c | if(skb==NULL) /* shouldn't happen but .. */ |
skb | 124 | net/core/datagram.c | return skb; |
skb | 126 | net/core/datagram.c | skb = skb_dequeue(&sk->receive_queue); |
skb | 127 | net/core/datagram.c | if (!skb) /* Avoid race if someone beats us to the data */ |
skb | 129 | net/core/datagram.c | skb->users++; |
skb | 130 | net/core/datagram.c | return skb; |
skb | 138 | net/core/datagram.c | void skb_free_datagram(struct sock * sk, struct sk_buff *skb) |
skb | 144 | net/core/datagram.c | skb->users--; |
skb | 145 | net/core/datagram.c | if(skb->users <= 0) { |
skb | 148 | net/core/datagram.c | if(!skb->next && !skb->prev) |
skb | 149 | net/core/datagram.c | kfree_skb(skb,FREE_READ); |
skb | 159 | net/core/datagram.c | void skb_copy_datagram(struct sk_buff *skb, int offset, char *to, int size) |
skb | 161 | net/core/datagram.c | memcpy_tofs(to,skb->h.raw+offset,size); |
skb | 169 | net/core/datagram.c | void skb_copy_datagram_iovec(struct sk_buff *skb, int offset, struct iovec *to, int size) |
skb | 171 | net/core/datagram.c | memcpy_toiovec(to,skb->h.raw+offset,size); |
skb | 305 | net/core/dev.c | struct sk_buff *skb; |
skb | 306 | net/core/dev.c | while((skb=skb_dequeue(&dev->buffs[ct]))!=NULL) |
skb | 307 | net/core/dev.c | if(skb->free) |
skb | 308 | net/core/dev.c | kfree_skb(skb,FREE_WRITE); |
skb | 338 | net/core/dev.c | void dev_queue_xmit(struct sk_buff *skb, struct device *dev, int pri) |
skb | 346 | net/core/dev.c | if(pri>=0 && !skb_device_locked(skb)) |
skb | 347 | net/core/dev.c | skb_device_lock(skb); /* Shove a lock on the frame */ |
skb | 349 | net/core/dev.c | IS_SKB(skb); |
skb | 351 | net/core/dev.c | skb->dev = dev; |
skb | 378 | net/core/dev.c | if (!skb->arp && dev->rebuild_header(skb->data, dev, skb->raddr, skb)) { |
skb | 392 | net/core/dev.c | skb->dev = dev = net_alias_main_dev(dev); |
skb | 402 | net/core/dev.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 406 | net/core/dev.c | skb_device_unlock(skb); /* Buffer is on the device queue and can be freed safely */ |
skb | 407 | net/core/dev.c | __skb_queue_tail(list, skb); |
skb | 408 | net/core/dev.c | skb = __skb_dequeue(list); |
skb | 409 | net/core/dev.c | skb_device_lock(skb); /* New buffer needs locking down */ |
skb | 416 | net/core/dev.c | skb->stamp=xtime; |
skb | 423 | net/core/dev.c | ((struct sock *)ptype->data != skb->sk)) |
skb | 426 | net/core/dev.c | if ((skb2 = skb_clone(skb, GFP_ATOMIC)) == NULL) |
skb | 430 | net/core/dev.c | ptype->func(skb2, skb->dev, ptype); |
skb | 436 | net/core/dev.c | if (dev->hard_start_xmit(skb, dev) == 0) { |
skb | 450 | net/core/dev.c | skb_device_unlock(skb); |
skb | 451 | net/core/dev.c | __skb_queue_head(list,skb); |
skb | 461 | net/core/dev.c | void netif_rx(struct sk_buff *skb) |
skb | 471 | net/core/dev.c | skb->sk = NULL; |
skb | 472 | net/core/dev.c | skb->free = 1; |
skb | 473 | net/core/dev.c | if(skb->stamp.tv_sec==0) |
skb | 474 | net/core/dev.c | skb->stamp = xtime; |
skb | 487 | net/core/dev.c | kfree_skb(skb, FREE_READ); |
skb | 495 | net/core/dev.c | IS_SKB(skb); |
skb | 497 | net/core/dev.c | skb_queue_tail(&backlog,skb); |
skb | 577 | net/core/dev.c | struct sk_buff * skb = backlog.next; |
skb | 583 | net/core/dev.c | __skb_unlink(skb, &backlog); |
skb | 594 | net/core/dev.c | skb->h.raw = skb->data; |
skb | 600 | net/core/dev.c | type = skb->protocol; |
skb | 612 | net/core/dev.c | struct sk_buff *skb2=skb_clone(skb, GFP_ATOMIC); |
skb | 614 | net/core/dev.c | pt_prev->func(skb2,skb->dev, pt_prev); |
skb | 621 | net/core/dev.c | if (ptype->type == type && (!ptype->dev || ptype->dev==skb->dev)) |
skb | 631 | net/core/dev.c | skb2=skb_clone(skb, GFP_ATOMIC); |
skb | 639 | net/core/dev.c | pt_prev->func(skb2, skb->dev, pt_prev); |
skb | 651 | net/core/dev.c | pt_prev->func(skb, skb->dev, pt_prev); |
skb | 657 | net/core/dev.c | kfree_skb(skb, FREE_WRITE); |
skb | 711 | net/core/dev.c | struct sk_buff *skb; |
skb | 713 | net/core/dev.c | skb = head->next; |
skb | 714 | net/core/dev.c | __skb_unlink(skb, head); |
skb | 718 | net/core/dev.c | skb_device_lock(skb); |
skb | 724 | net/core/dev.c | dev_queue_xmit(skb,dev,-i - 1); |
skb | 96 | net/core/firewall.c | int call_fw_firewall(int pf, struct sk_buff *skb, void *phdr) |
skb | 102 | net/core/firewall.c | int rc=fw->fw_forward(fw,pf,skb,phdr); |
skb | 114 | net/core/firewall.c | int call_in_firewall(int pf, struct sk_buff *skb, void *phdr) |
skb | 120 | net/core/firewall.c | int rc=fw->fw_input(fw,pf,skb,phdr); |
skb | 128 | net/core/firewall.c | int call_out_firewall(int pf, struct sk_buff *skb, void *phdr) |
skb | 134 | net/core/firewall.c | int rc=fw->fw_output(fw,pf,skb,phdr); |
skb | 71 | net/core/net_alias.c | static int net_alias_hard_start_xmit(struct sk_buff *skb, struct device *dev); |
skb | 230 | net/core/net_alias.c | net_alias_hard_start_xmit(struct sk_buff *skb, struct device *dev) |
skb | 233 | net/core/net_alias.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 86 | net/core/skbuff.c | int skb_check(struct sk_buff *skb, int head, int line, char *file) |
skb | 89 | net/core/skbuff.c | if (skb->magic_debug_cookie != SK_HEAD_SKB) { |
skb | 94 | net/core/skbuff.c | if (!skb->next || !skb->prev) { |
skb | 98 | net/core/skbuff.c | if (skb->next->magic_debug_cookie != SK_HEAD_SKB |
skb | 99 | net/core/skbuff.c | && skb->next->magic_debug_cookie != SK_GOOD_SKB) { |
skb | 104 | net/core/skbuff.c | if (skb->prev->magic_debug_cookie != SK_HEAD_SKB |
skb | 105 | net/core/skbuff.c | && skb->prev->magic_debug_cookie != SK_GOOD_SKB) { |
skb | 112 | net/core/skbuff.c | struct sk_buff *skb2 = skb->next; |
skb | 114 | net/core/skbuff.c | while (skb2 != skb && i < 5) { |
skb | 126 | net/core/skbuff.c | if (skb->next != NULL && skb->next->magic_debug_cookie != SK_HEAD_SKB |
skb | 127 | net/core/skbuff.c | && skb->next->magic_debug_cookie != SK_GOOD_SKB) { |
skb | 132 | net/core/skbuff.c | if (skb->prev != NULL && skb->prev->magic_debug_cookie != SK_HEAD_SKB |
skb | 133 | net/core/skbuff.c | && skb->prev->magic_debug_cookie != SK_GOOD_SKB) { |
skb | 140 | net/core/skbuff.c | if(skb->magic_debug_cookie==SK_FREED_SKB) |
skb | 145 | net/core/skbuff.c | skb,skb->truesize,skb->free); |
skb | 148 | net/core/skbuff.c | if(skb->magic_debug_cookie!=SK_GOOD_SKB) |
skb | 152 | net/core/skbuff.c | skb,skb->truesize,skb->free); |
skb | 155 | net/core/skbuff.c | if(skb->head>skb->data) |
skb | 159 | net/core/skbuff.c | skb,skb->head,skb->data); |
skb | 162 | net/core/skbuff.c | if(skb->tail>skb->end) |
skb | 166 | net/core/skbuff.c | skb,skb->tail,skb->end); |
skb | 169 | net/core/skbuff.c | if(skb->data>skb->tail) |
skb | 173 | net/core/skbuff.c | skb,skb->data,skb->tail); |
skb | 176 | net/core/skbuff.c | if(skb->tail-skb->data!=skb->len) |
skb | 180 | net/core/skbuff.c | skb,skb->data,skb->end,skb->len); |
skb | 183 | net/core/skbuff.c | if((unsigned long) skb->end > (unsigned long) skb) |
skb | 187 | net/core/skbuff.c | skb,skb->end); |
skb | 465 | net/core/skbuff.c | void skb_unlink(struct sk_buff *skb) |
skb | 472 | net/core/skbuff.c | IS_SKB(skb); |
skb | 474 | net/core/skbuff.c | if(skb->list) |
skb | 476 | net/core/skbuff.c | skb->list->qlen--; |
skb | 477 | net/core/skbuff.c | skb->next->prev = skb->prev; |
skb | 478 | net/core/skbuff.c | skb->prev->next = skb->next; |
skb | 479 | net/core/skbuff.c | skb->next = NULL; |
skb | 480 | net/core/skbuff.c | skb->prev = NULL; |
skb | 481 | net/core/skbuff.c | skb->list = NULL; |
skb | 490 | net/core/skbuff.c | void __skb_unlink(struct sk_buff *skb) |
skb | 492 | net/core/skbuff.c | IS_SKB(skb); |
skb | 494 | net/core/skbuff.c | if(skb->list) |
skb | 496 | net/core/skbuff.c | skb->list->qlen--; |
skb | 497 | net/core/skbuff.c | skb->next->prev = skb->prev; |
skb | 498 | net/core/skbuff.c | skb->prev->next = skb->next; |
skb | 499 | net/core/skbuff.c | skb->next = NULL; |
skb | 500 | net/core/skbuff.c | skb->prev = NULL; |
skb | 501 | net/core/skbuff.c | skb->list = NULL; |
skb | 513 | net/core/skbuff.c | unsigned char *skb_put(struct sk_buff *skb, int len) |
skb | 515 | net/core/skbuff.c | unsigned char *tmp=skb->tail; |
skb | 516 | net/core/skbuff.c | IS_SKB(skb); |
skb | 517 | net/core/skbuff.c | skb->tail+=len; |
skb | 518 | net/core/skbuff.c | skb->len+=len; |
skb | 519 | net/core/skbuff.c | IS_SKB(skb); |
skb | 520 | net/core/skbuff.c | if(skb->tail>skb->end) |
skb | 525 | net/core/skbuff.c | unsigned char *skb_push(struct sk_buff *skb, int len) |
skb | 527 | net/core/skbuff.c | IS_SKB(skb); |
skb | 528 | net/core/skbuff.c | skb->data-=len; |
skb | 529 | net/core/skbuff.c | skb->len+=len; |
skb | 530 | net/core/skbuff.c | IS_SKB(skb); |
skb | 531 | net/core/skbuff.c | if(skb->data<skb->head) |
skb | 533 | net/core/skbuff.c | return skb->data; |
skb | 536 | net/core/skbuff.c | unsigned char * skb_pull(struct sk_buff *skb, int len) |
skb | 538 | net/core/skbuff.c | IS_SKB(skb); |
skb | 539 | net/core/skbuff.c | if(len>skb->len) |
skb | 541 | net/core/skbuff.c | skb->data+=len; |
skb | 542 | net/core/skbuff.c | skb->len-=len; |
skb | 543 | net/core/skbuff.c | return skb->data; |
skb | 546 | net/core/skbuff.c | int skb_headroom(struct sk_buff *skb) |
skb | 548 | net/core/skbuff.c | IS_SKB(skb); |
skb | 549 | net/core/skbuff.c | return skb->data-skb->head; |
skb | 552 | net/core/skbuff.c | int skb_tailroom(struct sk_buff *skb) |
skb | 554 | net/core/skbuff.c | IS_SKB(skb); |
skb | 555 | net/core/skbuff.c | return skb->end-skb->tail; |
skb | 558 | net/core/skbuff.c | void skb_reserve(struct sk_buff *skb, int len) |
skb | 560 | net/core/skbuff.c | IS_SKB(skb); |
skb | 561 | net/core/skbuff.c | skb->data+=len; |
skb | 562 | net/core/skbuff.c | skb->tail+=len; |
skb | 563 | net/core/skbuff.c | if(skb->tail>skb->end) |
skb | 565 | net/core/skbuff.c | if(skb->data<skb->head) |
skb | 567 | net/core/skbuff.c | IS_SKB(skb); |
skb | 570 | net/core/skbuff.c | void skb_trim(struct sk_buff *skb, int len) |
skb | 572 | net/core/skbuff.c | IS_SKB(skb); |
skb | 573 | net/core/skbuff.c | if(skb->len>len) |
skb | 575 | net/core/skbuff.c | skb->len=len; |
skb | 576 | net/core/skbuff.c | skb->tail=skb->data+len; |
skb | 589 | net/core/skbuff.c | void kfree_skb(struct sk_buff *skb, int rw) |
skb | 591 | net/core/skbuff.c | if (skb == NULL) |
skb | 598 | net/core/skbuff.c | IS_SKB(skb); |
skb | 600 | net/core/skbuff.c | if (skb->lock) |
skb | 602 | net/core/skbuff.c | skb->free = 3; /* Free when unlocked */ |
skb | 606 | net/core/skbuff.c | if (skb->free == 2) |
skb | 609 | net/core/skbuff.c | if (skb->list) |
skb | 613 | net/core/skbuff.c | if(skb->destructor) |
skb | 614 | net/core/skbuff.c | skb->destructor(skb); |
skb | 615 | net/core/skbuff.c | if (skb->sk) |
skb | 617 | net/core/skbuff.c | struct sock * sk = skb->sk; |
skb | 621 | net/core/skbuff.c | sock_rfree(sk, skb); |
skb | 623 | net/core/skbuff.c | sock_wfree(sk, skb); |
skb | 629 | net/core/skbuff.c | atomic_sub(skb->truesize, &sk->rmem_alloc); |
skb | 631 | net/core/skbuff.c | atomic_sub(skb->truesize, &sk->wmem_alloc); |
skb | 635 | net/core/skbuff.c | kfree_skbmem(skb); |
skb | 639 | net/core/skbuff.c | kfree_skbmem(skb); |
skb | 648 | net/core/skbuff.c | struct sk_buff *skb; |
skb | 676 | net/core/skbuff.c | if(skb->magic_debug_cookie == SK_GOOD_SKB) |
skb | 677 | net/core/skbuff.c | printk("Kernel kmalloc handed us an existing skb (%p)\n",skb); |
skb | 688 | net/core/skbuff.c | skb=(struct sk_buff *)(bptr+size)-1; |
skb | 690 | net/core/skbuff.c | skb->count = 1; /* only one reference to this */ |
skb | 691 | net/core/skbuff.c | skb->data_skb = NULL; /* and we're our own data skb */ |
skb | 693 | net/core/skbuff.c | skb->free = 2; /* Invalid so we pick up forgetful users */ |
skb | 694 | net/core/skbuff.c | skb->lock = 0; |
skb | 695 | net/core/skbuff.c | skb->pkt_type = PACKET_HOST; /* Default type */ |
skb | 696 | net/core/skbuff.c | skb->prev = skb->next = skb->link3 = NULL; |
skb | 697 | net/core/skbuff.c | skb->list = NULL; |
skb | 698 | net/core/skbuff.c | skb->sk = NULL; |
skb | 699 | net/core/skbuff.c | skb->truesize=size; |
skb | 700 | net/core/skbuff.c | skb->localroute=0; |
skb | 701 | net/core/skbuff.c | skb->stamp.tv_sec=0; /* No idea about time */ |
skb | 702 | net/core/skbuff.c | skb->localroute = 0; |
skb | 703 | net/core/skbuff.c | skb->ip_summed = 0; |
skb | 704 | net/core/skbuff.c | memset(skb->proto_priv, 0, sizeof(skb->proto_priv)); |
skb | 707 | net/core/skbuff.c | skb->magic_debug_cookie = SK_GOOD_SKB; |
skb | 709 | net/core/skbuff.c | skb->users = 0; |
skb | 711 | net/core/skbuff.c | skb->head=bptr; |
skb | 712 | net/core/skbuff.c | skb->data=bptr; |
skb | 713 | net/core/skbuff.c | skb->tail=bptr; |
skb | 714 | net/core/skbuff.c | skb->end=bptr+len; |
skb | 715 | net/core/skbuff.c | skb->len=0; |
skb | 716 | net/core/skbuff.c | skb->destructor=NULL; |
skb | 717 | net/core/skbuff.c | return skb; |
skb | 724 | net/core/skbuff.c | static inline void __kfree_skbmem(struct sk_buff *skb) |
skb | 727 | net/core/skbuff.c | if (--skb->count <= 0) { |
skb | 728 | net/core/skbuff.c | kfree(skb->head); |
skb | 733 | net/core/skbuff.c | void kfree_skbmem(struct sk_buff *skb) |
skb | 736 | net/core/skbuff.c | void * addr = skb->head; |
skb | 741 | net/core/skbuff.c | if (--skb->count <= 0) { |
skb | 743 | net/core/skbuff.c | if (skb->data_skb) { |
skb | 744 | net/core/skbuff.c | addr = skb; |
skb | 745 | net/core/skbuff.c | __kfree_skbmem(skb->data_skb); |
skb | 758 | net/core/skbuff.c | struct sk_buff *skb_clone(struct sk_buff *skb, int priority) |
skb | 762 | net/core/skbuff.c | IS_SKB(skb); |
skb | 766 | net/core/skbuff.c | memcpy(n, skb, sizeof(*n)); |
skb | 768 | net/core/skbuff.c | if (skb->data_skb) |
skb | 769 | net/core/skbuff.c | skb = skb->data_skb; |
skb | 770 | net/core/skbuff.c | atomic_inc(&skb->count); |
skb | 773 | net/core/skbuff.c | n->data_skb = skb; |
skb | 788 | net/core/skbuff.c | struct sk_buff *skb_copy(struct sk_buff *skb, int priority) |
skb | 797 | net/core/skbuff.c | IS_SKB(skb); |
skb | 799 | net/core/skbuff.c | n=alloc_skb(skb->truesize-sizeof(struct sk_buff),priority); |
skb | 807 | net/core/skbuff.c | offset=n->head-skb->head; |
skb | 810 | net/core/skbuff.c | skb_reserve(n,skb->data-skb->head); |
skb | 812 | net/core/skbuff.c | skb_put(n,skb->len); |
skb | 814 | net/core/skbuff.c | memcpy(n->head,skb->head,skb->end-skb->head); |
skb | 818 | net/core/skbuff.c | n->when=skb->when; |
skb | 819 | net/core/skbuff.c | n->dev=skb->dev; |
skb | 820 | net/core/skbuff.c | n->h.raw=skb->h.raw+offset; |
skb | 821 | net/core/skbuff.c | n->mac.raw=skb->mac.raw+offset; |
skb | 822 | net/core/skbuff.c | n->ip_hdr=(struct iphdr *)(((char *)skb->ip_hdr)+offset); |
skb | 823 | net/core/skbuff.c | n->saddr=skb->saddr; |
skb | 824 | net/core/skbuff.c | n->daddr=skb->daddr; |
skb | 825 | net/core/skbuff.c | n->raddr=skb->raddr; |
skb | 826 | net/core/skbuff.c | n->seq=skb->seq; |
skb | 827 | net/core/skbuff.c | n->end_seq=skb->end_seq; |
skb | 828 | net/core/skbuff.c | n->ack_seq=skb->ack_seq; |
skb | 829 | net/core/skbuff.c | n->acked=skb->acked; |
skb | 830 | net/core/skbuff.c | memcpy(n->proto_priv, skb->proto_priv, sizeof(skb->proto_priv)); |
skb | 831 | net/core/skbuff.c | n->used=skb->used; |
skb | 833 | net/core/skbuff.c | n->arp=skb->arp; |
skb | 837 | net/core/skbuff.c | n->pkt_type=skb->pkt_type; |
skb | 838 | net/core/skbuff.c | n->stamp=skb->stamp; |
skb | 848 | net/core/skbuff.c | void skb_device_lock(struct sk_buff *skb) |
skb | 850 | net/core/skbuff.c | if(skb->lock) |
skb | 854 | net/core/skbuff.c | skb->lock++; |
skb | 857 | net/core/skbuff.c | void skb_device_unlock(struct sk_buff *skb) |
skb | 859 | net/core/skbuff.c | if(skb->lock==0) |
skb | 861 | net/core/skbuff.c | skb->lock--; |
skb | 862 | net/core/skbuff.c | if(skb->lock==0) |
skb | 866 | net/core/skbuff.c | void dev_kfree_skb(struct sk_buff *skb, int mode) |
skb | 872 | net/core/skbuff.c | if(skb->lock) |
skb | 875 | net/core/skbuff.c | skb->lock--; |
skb | 877 | net/core/skbuff.c | if (!skb->lock && (skb->free == 1 || skb->free == 3)) |
skb | 880 | net/core/skbuff.c | kfree_skb(skb,mode); |
skb | 888 | net/core/skbuff.c | struct sk_buff *skb; |
skb | 890 | net/core/skbuff.c | skb = alloc_skb(length+16, GFP_ATOMIC); |
skb | 891 | net/core/skbuff.c | if (skb) |
skb | 892 | net/core/skbuff.c | skb_reserve(skb,16); |
skb | 893 | net/core/skbuff.c | return skb; |
skb | 896 | net/core/skbuff.c | int skb_device_locked(struct sk_buff *skb) |
skb | 898 | net/core/skbuff.c | return skb->lock? 1 : 0; |
skb | 331 | net/core/sock.c | struct sk_buff * skb = alloc_skb(size, priority); |
skb | 332 | net/core/sock.c | if (skb) |
skb | 333 | net/core/sock.c | atomic_add(skb->truesize, &sk->wmem_alloc); |
skb | 334 | net/core/sock.c | return skb; |
skb | 345 | net/core/sock.c | struct sk_buff *skb = alloc_skb(size, priority); |
skb | 346 | net/core/sock.c | if (skb) |
skb | 347 | net/core/sock.c | atomic_add(skb->truesize, &sk->rmem_alloc); |
skb | 348 | net/core/sock.c | return skb; |
skb | 387 | net/core/sock.c | void sock_wfree(struct sock *sk, struct sk_buff *skb) |
skb | 389 | net/core/sock.c | int s=skb->truesize; |
skb | 391 | net/core/sock.c | IS_SKB(skb); |
skb | 393 | net/core/sock.c | kfree_skbmem(skb); |
skb | 403 | net/core/sock.c | void sock_rfree(struct sock *sk, struct sk_buff *skb) |
skb | 405 | net/core/sock.c | int s=skb->truesize; |
skb | 407 | net/core/sock.c | IS_SKB(skb); |
skb | 409 | net/core/sock.c | kfree_skbmem(skb); |
skb | 422 | net/core/sock.c | struct sk_buff *skb; |
skb | 444 | net/core/sock.c | skb = sock_wmalloc(sk, size, 0, sk->allocation); |
skb | 449 | net/core/sock.c | skb = sock_wmalloc(sk, size, 0 , GFP_BUFFER); |
skb | 450 | net/core/sock.c | if(!skb) |
skb | 451 | net/core/sock.c | skb=sock_wmalloc(sk, fallback, 0, GFP_KERNEL); |
skb | 458 | net/core/sock.c | if(skb==NULL) |
skb | 508 | net/core/sock.c | while(skb==NULL); |
skb | 510 | net/core/sock.c | return skb; |
skb | 523 | net/core/sock.c | struct sk_buff * skb = sk->back_log.next; |
skb | 524 | net/core/sock.c | __skb_unlink(skb, &sk->back_log); |
skb | 525 | net/core/sock.c | sk->prot->rcv(skb, skb->dev, (struct options*)skb->proto_priv, |
skb | 526 | net/core/sock.c | skb->saddr, skb->len, skb->daddr, 1, |
skb | 90 | net/ethernet/eth.c | int eth_header(struct sk_buff *skb, struct device *dev, unsigned short type, |
skb | 93 | net/ethernet/eth.c | struct ethhdr *eth = (struct ethhdr *)skb_push(skb,ETH_HLEN); |
skb | 141 | net/ethernet/eth.c | struct sk_buff *skb) |
skb | 160 | net/ethernet/eth.c | return arp_find(eth->h_dest, dst, dev, dev->pa_addr, skb)? 1 : 0; |
skb | 173 | net/ethernet/eth.c | unsigned short eth_type_trans(struct sk_buff *skb, struct device *dev) |
skb | 178 | net/ethernet/eth.c | skb->mac.raw=skb->data; |
skb | 179 | net/ethernet/eth.c | skb_pull(skb,dev->hard_header_len); |
skb | 180 | net/ethernet/eth.c | eth= skb->mac.ethernet; |
skb | 185 | net/ethernet/eth.c | skb->pkt_type=PACKET_BROADCAST; |
skb | 187 | net/ethernet/eth.c | skb->pkt_type=PACKET_MULTICAST; |
skb | 198 | net/ethernet/eth.c | skb->pkt_type=PACKET_OTHERHOST; |
skb | 204 | net/ethernet/eth.c | rawp = skb->data; |
skb | 9 | net/ethernet/pe2.c | struct sk_buff *skb, unsigned char *dest_node) |
skb | 11 | net/ethernet/pe2.c | struct device *dev = skb->dev; |
skb | 13 | net/ethernet/pe2.c | skb->protocol = htons (ETH_P_IPX); |
skb | 14 | net/ethernet/pe2.c | dev->hard_header(skb, dev, ETH_P_IPX, dest_node, NULL, skb->len); |
skb | 292 | net/ipv4/af_inet.c | struct sk_buff *skb; |
skb | 310 | net/ipv4/af_inet.c | while ((skb = tcp_dequeue_partial(sk)) != NULL) |
skb | 312 | net/ipv4/af_inet.c | IS_SKB(skb); |
skb | 313 | net/ipv4/af_inet.c | kfree_skb(skb, FREE_WRITE); |
skb | 320 | net/ipv4/af_inet.c | while((skb = skb_dequeue(&sk->write_queue)) != NULL) { |
skb | 321 | net/ipv4/af_inet.c | IS_SKB(skb); |
skb | 322 | net/ipv4/af_inet.c | kfree_skb(skb, FREE_WRITE); |
skb | 329 | net/ipv4/af_inet.c | while((skb=skb_dequeue(&sk->receive_queue))!=NULL) |
skb | 335 | net/ipv4/af_inet.c | if (skb->sk != NULL && skb->sk != sk) |
skb | 337 | net/ipv4/af_inet.c | IS_SKB(skb); |
skb | 338 | net/ipv4/af_inet.c | skb->sk->prot->close(skb->sk, 0); |
skb | 340 | net/ipv4/af_inet.c | IS_SKB(skb); |
skb | 341 | net/ipv4/af_inet.c | kfree_skb(skb, FREE_READ); |
skb | 349 | net/ipv4/af_inet.c | for(skb = sk->send_head; skb != NULL; ) |
skb | 357 | net/ipv4/af_inet.c | if (skb->next && skb->prev) |
skb | 359 | net/ipv4/af_inet.c | IS_SKB(skb); |
skb | 360 | net/ipv4/af_inet.c | skb_unlink(skb); |
skb | 362 | net/ipv4/af_inet.c | skb->dev = NULL; |
skb | 363 | net/ipv4/af_inet.c | skb2 = skb->link3; |
skb | 364 | net/ipv4/af_inet.c | kfree_skb(skb, FREE_WRITE); |
skb | 365 | net/ipv4/af_inet.c | skb = skb2; |
skb | 374 | net/ipv4/af_inet.c | while((skb=skb_dequeue(&sk->back_log))!=NULL) |
skb | 377 | net/ipv4/af_inet.c | skb->sk = NULL; |
skb | 378 | net/ipv4/af_inet.c | kfree_skb(skb, FREE_READ); |
skb | 138 | net/ipv4/arp.c | struct sk_buff_head skb; /* list of queued packets */ |
skb | 311 | net/ipv4/arp.c | struct sk_buff *skb; |
skb | 317 | net/ipv4/arp.c | while ((skb = skb_dequeue(&entry->skb)) != NULL) |
skb | 319 | net/ipv4/arp.c | skb_device_lock(skb); |
skb | 321 | net/ipv4/arp.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 539 | net/ipv4/arp.c | skb_queue_head_init(&entry->skb); |
skb | 818 | net/ipv4/arp.c | struct sk_buff *skb; |
skb | 833 | net/ipv4/arp.c | skb = alloc_skb(sizeof(struct arphdr)+ 2*(dev->addr_len+4) |
skb | 835 | net/ipv4/arp.c | if (skb == NULL) |
skb | 840 | net/ipv4/arp.c | skb_reserve(skb, dev->hard_header_len); |
skb | 841 | net/ipv4/arp.c | arp = (struct arphdr *) skb_put(skb,sizeof(struct arphdr) + 2*(dev->addr_len+4)); |
skb | 842 | net/ipv4/arp.c | skb->arp = 1; |
skb | 843 | net/ipv4/arp.c | skb->dev = dev; |
skb | 844 | net/ipv4/arp.c | skb->free = 1; |
skb | 845 | net/ipv4/arp.c | skb->protocol = htons (ETH_P_IP); |
skb | 851 | net/ipv4/arp.c | dev->hard_header(skb,dev,ptype,dest_hw?dest_hw:dev->broadcast,src_hw?src_hw:NULL,skb->len); |
skb | 881 | net/ipv4/arp.c | dev_queue_xmit(skb, dev, 0); |
skb | 890 | net/ipv4/arp.c | struct sk_buff *skb; |
skb | 912 | net/ipv4/arp.c | while((skb = skb_dequeue(&entry->skb)) != NULL) |
skb | 914 | net/ipv4/arp.c | IS_SKB(skb); |
skb | 915 | net/ipv4/arp.c | skb_device_lock(skb); |
skb | 917 | net/ipv4/arp.c | if(!skb->dev->rebuild_header(skb->data,skb->dev,skb->raddr,skb)) |
skb | 919 | net/ipv4/arp.c | skb->arp = 1; |
skb | 920 | net/ipv4/arp.c | if(skb->sk==NULL) |
skb | 921 | net/ipv4/arp.c | dev_queue_xmit(skb, skb->dev, 0); |
skb | 923 | net/ipv4/arp.c | dev_queue_xmit(skb,skb->dev,skb->sk->priority); |
skb | 963 | net/ipv4/arp.c | int arp_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *pt) |
skb | 969 | net/ipv4/arp.c | struct arphdr *arp = (struct arphdr *)skb->h.raw; |
skb | 990 | net/ipv4/arp.c | kfree_skb(skb, FREE_READ); |
skb | 1010 | net/ipv4/arp.c | kfree_skb(skb, FREE_READ); |
skb | 1019 | net/ipv4/arp.c | kfree_skb(skb, FREE_READ); |
skb | 1028 | net/ipv4/arp.c | kfree_skb(skb, FREE_READ); |
skb | 1036 | net/ipv4/arp.c | kfree_skb(skb, FREE_READ); |
skb | 1043 | net/ipv4/arp.c | kfree_skb(skb, FREE_READ); |
skb | 1065 | net/ipv4/arp.c | kfree_skb(skb, FREE_READ); |
skb | 1091 | net/ipv4/arp.c | if (tip != dev->pa_addr && net_alias_has(skb->dev)) |
skb | 1100 | net/ipv4/arp.c | kfree_skb(skb, FREE_READ); |
skb | 1149 | net/ipv4/arp.c | kfree_skb(skb, FREE_READ); |
skb | 1157 | net/ipv4/arp.c | kfree_skb(skb, FREE_READ); |
skb | 1177 | net/ipv4/arp.c | kfree_skb(skb, FREE_READ); |
skb | 1239 | net/ipv4/arp.c | kfree_skb(skb, FREE_READ); |
skb | 1260 | net/ipv4/arp.c | entry->dev = skb->dev; |
skb | 1262 | net/ipv4/arp.c | skb_queue_head_init(&entry->skb); |
skb | 1283 | net/ipv4/arp.c | kfree_skb(skb, FREE_READ); |
skb | 1396 | net/ipv4/arp.c | u32 saddr, struct sk_buff *skb) |
skb | 1403 | net/ipv4/arp.c | if (skb) |
skb | 1404 | net/ipv4/arp.c | skb->arp = 1; |
skb | 1427 | net/ipv4/arp.c | if (skb != NULL) |
skb | 1431 | net/ipv4/arp.c | skb_queue_tail(&entry->skb, skb); |
skb | 1432 | net/ipv4/arp.c | skb_device_unlock(skb); |
skb | 1445 | net/ipv4/arp.c | if (skb->sk) |
skb | 1447 | net/ipv4/arp.c | skb->sk->err = EHOSTDOWN; |
skb | 1448 | net/ipv4/arp.c | skb->sk->error_report(skb->sk); |
skb | 1451 | net/ipv4/arp.c | icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0, dev); |
skb | 1453 | net/ipv4/arp.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 1467 | net/ipv4/arp.c | if (skb) |
skb | 1468 | net/ipv4/arp.c | skb->arp = 1; |
skb | 1492 | net/ipv4/arp.c | skb_queue_head_init(&entry->skb); |
skb | 1493 | net/ipv4/arp.c | if (skb != NULL) |
skb | 1495 | net/ipv4/arp.c | skb_queue_tail(&entry->skb, skb); |
skb | 1496 | net/ipv4/arp.c | skb_device_unlock(skb); |
skb | 1514 | net/ipv4/arp.c | else if (skb != NULL) |
skb | 1515 | net/ipv4/arp.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 1731 | net/ipv4/arp.c | skb_queue_head_init(&entry->skb); |
skb | 1807 | net/ipv4/arp.c | struct sk_buff * skb; |
skb | 1826 | net/ipv4/arp.c | while ((skb = skb_dequeue(&entry->skb)) != NULL) |
skb | 1828 | net/ipv4/arp.c | skb_device_lock(skb); |
skb | 1830 | net/ipv4/arp.c | skb_queue_tail(&entry1->skb, skb); |
skb | 1831 | net/ipv4/arp.c | skb_device_unlock(skb); |
skb | 1976 | net/ipv4/arp.c | skb_queue_head_init(&entry->skb); |
skb | 152 | net/ipv4/icmp.c | void (*handler)(struct icmphdr *icmph, struct sk_buff *skb, struct device *dev, __u32 saddr, __u32 daddr, int len); |
skb | 340 | net/ipv4/icmp.c | static void icmp_unreach(struct icmphdr *icmph, struct sk_buff *skb, struct device *dev, __u32 saddr, __u32 daddr, int len) |
skb | 474 | net/ipv4/icmp.c | kfree_skb(skb, FREE_READ); |
skb | 482 | net/ipv4/icmp.c | static void icmp_redirect(struct icmphdr *icmph, struct sk_buff *skb, struct device *dev, __u32 source, __u32 daddr, int len) |
skb | 541 | net/ipv4/icmp.c | kfree_skb(skb, FREE_READ); |
skb | 552 | net/ipv4/icmp.c | static void icmp_echo(struct icmphdr *icmph, struct sk_buff *skb, struct device *dev, __u32 saddr, __u32 daddr, int len) |
skb | 559 | net/ipv4/icmp.c | if (ip_options_echo(&icmp_param.replyopts, NULL, daddr, saddr, skb)==0) |
skb | 561 | net/ipv4/icmp.c | kfree_skb(skb, FREE_READ); |
skb | 572 | net/ipv4/icmp.c | static void icmp_timestamp(struct icmphdr *icmph, struct sk_buff *skb, struct device *dev, __u32 saddr, __u32 daddr, int len) |
skb | 584 | net/ipv4/icmp.c | kfree_skb(skb, FREE_READ); |
skb | 604 | net/ipv4/icmp.c | if (ip_options_echo(&icmp_param.replyopts, NULL, daddr, saddr, skb)==0) |
skb | 606 | net/ipv4/icmp.c | kfree_skb(skb,FREE_READ); |
skb | 622 | net/ipv4/icmp.c | static void icmp_address(struct icmphdr *icmph, struct sk_buff *skb, struct device *dev, __u32 saddr, __u32 daddr, int len) |
skb | 633 | net/ipv4/icmp.c | if (ip_options_echo(&icmp_param.replyopts, NULL, daddr, saddr, skb)==0) |
skb | 636 | net/ipv4/icmp.c | kfree_skb(skb, FREE_READ); |
skb | 639 | net/ipv4/icmp.c | static void icmp_discard(struct icmphdr *icmph, struct sk_buff *skb, struct device *dev, __u32 saddr, __u32 daddr, int len) |
skb | 641 | net/ipv4/icmp.c | kfree_skb(skb, FREE_READ); |
skb | 648 | net/ipv4/icmp.c | int icmp_rcv(struct sk_buff *skb, struct device *dev, struct options *opt, |
skb | 652 | net/ipv4/icmp.c | struct icmphdr *icmph=(void *)skb->h.raw; |
skb | 664 | net/ipv4/icmp.c | kfree_skb(skb, FREE_READ); |
skb | 677 | net/ipv4/icmp.c | kfree_skb(skb,FREE_READ); |
skb | 695 | net/ipv4/icmp.c | kfree_skb(skb, FREE_READ); |
skb | 708 | net/ipv4/icmp.c | (icmp_pointers[icmph->type].handler)(icmph,skb,skb->dev,saddr,daddr,len); |
skb | 240 | net/ipv4/igmp.c | struct sk_buff *skb=alloc_skb(MAX_IGMP_SIZE, GFP_ATOMIC); |
skb | 244 | net/ipv4/igmp.c | if(skb==NULL) |
skb | 246 | net/ipv4/igmp.c | tmp=ip_build_header(skb, INADDR_ANY, address, &dev, IPPROTO_IGMP, NULL, |
skb | 250 | net/ipv4/igmp.c | kfree_skb(skb, FREE_WRITE); |
skb | 253 | net/ipv4/igmp.c | ih=(struct igmphdr *)skb_put(skb,sizeof(struct igmphdr)); |
skb | 259 | net/ipv4/igmp.c | ip_queue_xmit(NULL,dev,skb,1); |
skb | 429 | net/ipv4/igmp.c | int igmp_rcv(struct sk_buff *skb, struct device *dev, struct options *opt, |
skb | 448 | net/ipv4/igmp.c | ih=(struct igmphdr *)skb->h.raw; |
skb | 450 | net/ipv4/igmp.c | if(skb->len <sizeof(struct igmphdr) || skb->ip_hdr->ttl>1 || ip_compute_csum((void *)skb->h.raw,sizeof(struct igmphdr))) |
skb | 452 | net/ipv4/igmp.c | kfree_skb(skb, FREE_READ); |
skb | 464 | net/ipv4/igmp.c | kfree_skb(skb, FREE_READ); |
skb | 474 | net/ipv4/igmp.c | kfree_skb(skb, FREE_READ); |
skb | 45 | net/ipv4/ip_forward.c | static void ip_encap(struct sk_buff *skb, int len, struct device *out, __u32 daddr) |
skb | 52 | net/ipv4/ip_forward.c | struct iphdr *iph=(struct iphdr *)skb_push(skb,sizeof(struct iphdr)); |
skb | 56 | net/ipv4/ip_forward.c | iph->tos = skb->ip_hdr->tos; |
skb | 57 | net/ipv4/ip_forward.c | iph->ttl = skb->ip_hdr->ttl; |
skb | 63 | net/ipv4/ip_forward.c | iph->tot_len = htons(skb->len); |
skb | 67 | net/ipv4/ip_forward.c | skb->dev = out; |
skb | 68 | net/ipv4/ip_forward.c | skb->arp = 1; |
skb | 69 | net/ipv4/ip_forward.c | skb->raddr=daddr; |
skb | 73 | net/ipv4/ip_forward.c | if (out->hard_header && out->hard_header(skb, out, ETH_P_IP, NULL, NULL, len)<0) |
skb | 74 | net/ipv4/ip_forward.c | skb->arp=0; |
skb | 86 | net/ipv4/ip_forward.c | int ip_forward(struct sk_buff *skb, struct device *dev, int is_frag, |
skb | 95 | net/ipv4/ip_forward.c | struct options * opt = (struct options*)skb->proto_priv; |
skb | 101 | net/ipv4/ip_forward.c | struct sk_buff *skb_in = skb; /* So we can remember if the masquerader did some swaps */ |
skb | 112 | net/ipv4/ip_forward.c | fw_res=call_fw_firewall(PF_INET, skb, skb->h.iph); |
skb | 118 | net/ipv4/ip_forward.c | icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0, dev); |
skb | 136 | net/ipv4/ip_forward.c | iph = skb->h.iph; |
skb | 153 | net/ipv4/ip_forward.c | icmp_send(skb, ICMP_TIME_EXCEEDED, ICMP_EXC_TTL, 0, dev); |
skb | 174 | net/ipv4/ip_forward.c | icmp_send(skb, ICMP_DEST_UNREACH, ICMP_NET_UNREACH, 0, dev); |
skb | 195 | net/ipv4/ip_forward.c | icmp_send(skb, ICMP_DEST_UNREACH, ICMP_SR_FAILED, 0, dev); |
skb | 214 | net/ipv4/ip_forward.c | icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, raddr, dev); |
skb | 223 | net/ipv4/ip_forward.c | dev2=skb->dev; |
skb | 224 | net/ipv4/ip_forward.c | raddr=skb->raddr; |
skb | 245 | net/ipv4/ip_forward.c | ip_fw_masquerade(&skb, dev2); |
skb | 247 | net/ipv4/ip_forward.c | IS_SKB(skb); |
skb | 249 | net/ipv4/ip_forward.c | if (skb->len+encap > dev2->mtu && (ntohs(iph->frag_off) & IP_DF)) |
skb | 252 | net/ipv4/ip_forward.c | icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(dev2->mtu), dev); |
skb | 259 | net/ipv4/ip_forward.c | if(skb_headroom(skb)-encap<dev2->hard_header_len) |
skb | 261 | net/ipv4/ip_forward.c | skb2 = alloc_skb(dev2->hard_header_len + skb->len + encap + 15, GFP_ATOMIC); |
skb | 263 | net/ipv4/ip_forward.c | if(skb_headroom(skb)<dev2->hard_header_len) |
skb | 265 | net/ipv4/ip_forward.c | skb2 = alloc_skb(dev2->hard_header_len + skb->len + 15, GFP_ATOMIC); |
skb | 288 | net/ipv4/ip_forward.c | skb_reserve(skb,(encap+dev->hard_header_len+15)&~15); /* 16 byte aligned IP headers are good */ |
skb | 289 | net/ipv4/ip_forward.c | ip_encap(skb2,skb->len, dev2, raddr); |
skb | 293 | net/ipv4/ip_forward.c | ip_send(rt,skb2,raddr,skb->len,dev2,dev2->pa_addr); |
skb | 300 | net/ipv4/ip_forward.c | ptr = skb_put(skb2,skb->len); |
skb | 307 | net/ipv4/ip_forward.c | memcpy(ptr, skb->h.raw, skb->len); |
skb | 308 | net/ipv4/ip_forward.c | memcpy(skb2->proto_priv, skb->proto_priv, sizeof(skb->proto_priv)); |
skb | 317 | net/ipv4/ip_forward.c | skb2 = skb; |
skb | 321 | net/ipv4/ip_forward.c | ip_encap(skb,skb->len, dev2, raddr); |
skb | 325 | net/ipv4/ip_forward.c | skb->arp=1; |
skb | 326 | net/ipv4/ip_forward.c | skb->raddr=raddr; |
skb | 329 | net/ipv4/ip_forward.c | memcpy(skb_push(skb, dev2->hard_header_len), hh->hh_data, dev2->hard_header_len); |
skb | 335 | net/ipv4/ip_forward.c | skb->arp = 0; |
skb | 340 | net/ipv4/ip_forward.c | if(dev2->hard_header(skb, dev2, ETH_P_IP, NULL, NULL, skb->len)<0) |
skb | 341 | net/ipv4/ip_forward.c | skb->arp=0; |
skb | 354 | net/ipv4/ip_forward.c | if (skb != skb2) |
skb | 460 | net/ipv4/ip_forward.c | if(skb==skb2) |
skb | 468 | net/ipv4/ip_forward.c | if(skb!=skb_in) |
skb | 54 | net/ipv4/ip_fragment.c | extern __inline__ void frag_kfree_skb(struct sk_buff *skb, int type) |
skb | 56 | net/ipv4/ip_fragment.c | atomic_sub(skb->truesize, &ip_frag_mem); |
skb | 57 | net/ipv4/ip_fragment.c | kfree_skb(skb,type); |
skb | 79 | net/ipv4/ip_fragment.c | static struct ipfrag *ip_frag_create(int offset, int end, struct sk_buff *skb, unsigned char *ptr) |
skb | 96 | net/ipv4/ip_fragment.c | fp->skb = skb; |
skb | 105 | net/ipv4/ip_fragment.c | ip_frag_mem+=skb->truesize; |
skb | 177 | net/ipv4/ip_fragment.c | IS_SKB(fp->skb); |
skb | 178 | net/ipv4/ip_fragment.c | frag_kfree_skb(fp->skb,FREE_READ); |
skb | 210 | net/ipv4/ip_fragment.c | icmp_send(qp->fragments->skb,ICMP_TIME_EXCEEDED, |
skb | 241 | net/ipv4/ip_fragment.c | static struct ipq *ip_create(struct sk_buff *skb, struct iphdr *iph, struct device *dev) |
skb | 251 | net/ipv4/ip_fragment.c | skb->dev = qp->dev; |
skb | 331 | net/ipv4/ip_fragment.c | struct sk_buff *skb; |
skb | 342 | net/ipv4/ip_fragment.c | if ((skb = dev_alloc_skb(len)) == NULL) |
skb | 351 | net/ipv4/ip_fragment.c | skb_put(skb,len); |
skb | 352 | net/ipv4/ip_fragment.c | skb->h.raw = skb->data; |
skb | 353 | net/ipv4/ip_fragment.c | skb->free = 1; |
skb | 356 | net/ipv4/ip_fragment.c | ptr = (unsigned char *) skb->h.raw; |
skb | 366 | net/ipv4/ip_fragment.c | if(count+fp->len > skb->len) |
skb | 370 | net/ipv4/ip_fragment.c | frag_kfree_skb(skb,FREE_WRITE); |
skb | 383 | net/ipv4/ip_fragment.c | iph = skb->h.iph; |
skb | 386 | net/ipv4/ip_fragment.c | skb->ip_hdr = iph; |
skb | 389 | net/ipv4/ip_fragment.c | return(skb); |
skb | 397 | net/ipv4/ip_fragment.c | struct sk_buff *ip_defrag(struct iphdr *iph, struct sk_buff *skb, struct device *dev) |
skb | 429 | net/ipv4/ip_fragment.c | return(skb); |
skb | 462 | net/ipv4/ip_fragment.c | if ((qp = ip_create(skb, iph, dev)) == NULL) |
skb | 464 | net/ipv4/ip_fragment.c | skb->sk = NULL; |
skb | 465 | net/ipv4/ip_fragment.c | frag_kfree_skb(skb, FREE_READ); |
skb | 481 | net/ipv4/ip_fragment.c | ptr = skb->data + ihl; |
skb | 547 | net/ipv4/ip_fragment.c | frag_kfree_skb(tmp->skb,FREE_READ); |
skb | 557 | net/ipv4/ip_fragment.c | tfp = ip_frag_create(offset, end, skb, ptr); |
skb | 565 | net/ipv4/ip_fragment.c | skb->sk = NULL; |
skb | 566 | net/ipv4/ip_fragment.c | frag_kfree_skb(skb, FREE_READ); |
skb | 606 | net/ipv4/ip_fragment.c | void ip_fragment(struct sock *sk, struct sk_buff *skb, struct device *dev, int is_frag) |
skb | 619 | net/ipv4/ip_fragment.c | raw = skb->data; |
skb | 622 | net/ipv4/ip_fragment.c | skb->ip_hdr = iph; |
skb | 624 | net/ipv4/ip_fragment.c | iph = skb->ip_hdr; |
skb | 657 | net/ipv4/ip_fragment.c | icmp_send(skb,ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED,dev->mtu, dev); |
skb | 709 | net/ipv4/ip_fragment.c | skb2->arp = skb->arp; |
skb | 710 | net/ipv4/ip_fragment.c | if(skb->free==0) |
skb | 725 | net/ipv4/ip_fragment.c | skb2->raddr = skb->raddr; /* For rebuild_header - must be here */ |
skb | 755 | net/ipv4/ip_fragment.c | ip_options_fragment(skb); |
skb | 616 | net/ipv4/ip_fw.c | static struct sk_buff *revamp(struct sk_buff *skb, struct device *dev, struct ip_masq *ftp) |
skb | 618 | net/ipv4/ip_fw.c | struct iphdr *iph = skb->h.iph; |
skb | 657 | net/ipv4/ip_fw.c | while (skb->len - ((unsigned char *)data - skb->h.raw) > 18) |
skb | 694 | net/ipv4/ip_fw.c | return skb; |
skb | 733 | net/ipv4/ip_fw.c | return skb; |
skb | 757 | net/ipv4/ip_fw.c | printk("MASQUERADE: resizing needed for %d bytes (%ld)\n",diff, skb->len); |
skb | 759 | net/ipv4/ip_fw.c | skb2 = alloc_skb(MAX_HEADER + skb->len+diff, GFP_ATOMIC); |
skb | 762 | net/ipv4/ip_fw.c | return skb; |
skb | 764 | net/ipv4/ip_fw.c | skb2->free = skb->free; |
skb | 766 | net/ipv4/ip_fw.c | skb_put(skb2,skb->len + diff); |
skb | 767 | net/ipv4/ip_fw.c | skb2->h.raw = skb2->data + (skb->h.raw - skb->data); |
skb | 780 | net/ipv4/ip_fw.c | memcpy(skb2->data, skb->data, (p - (char *)skb->data)); |
skb | 781 | net/ipv4/ip_fw.c | memcpy(&skb2->data[(p - (char *)skb->data)], buf, strlen(buf)); |
skb | 782 | net/ipv4/ip_fw.c | memcpy(&skb2->data[(p - (char *)skb->data) + strlen(buf)], data, |
skb | 783 | net/ipv4/ip_fw.c | skb->len - (data-(char *)skb->data)); |
skb | 789 | net/ipv4/ip_fw.c | iph->tot_len = htons(skb->len + diff); |
skb | 796 | net/ipv4/ip_fw.c | kfree_skb(skb, FREE_WRITE); |
skb | 799 | net/ipv4/ip_fw.c | return skb; |
skb | 814 | net/ipv4/ip_fw.c | struct sk_buff *skb=*skb_ptr; |
skb | 815 | net/ipv4/ip_fw.c | struct iphdr *iph = skb->h.iph; |
skb | 875 | net/ipv4/ip_fw.c | size = skb->len - ((unsigned char *)portptr - skb->h.raw); |
skb | 893 | net/ipv4/ip_fw.c | skb = revamp(*skb_ptr, dev, ms); |
skb | 894 | net/ipv4/ip_fw.c | *skb_ptr = skb; |
skb | 895 | net/ipv4/ip_fw.c | iph = skb->h.iph; |
skb | 897 | net/ipv4/ip_fw.c | size = skb->len - ((unsigned char *)portptr-skb->h.raw); |
skb | 911 | net/ipv4/ip_fw.c | skb->csum = csum_partial((void *)(th + 1), size - sizeof(*th), 0); |
skb | 912 | net/ipv4/ip_fw.c | tcp_send_check(th,iph->saddr,iph->daddr,size,skb); |
skb | 931 | net/ipv4/ip_fw.c | int ip_fw_demasquerade(struct sk_buff *skb) |
skb | 933 | net/ipv4/ip_fw.c | struct iphdr *iph = skb->h.iph; |
skb | 936 | net/ipv4/ip_fw.c | struct tcphdr *th = (struct tcphdr *)(skb->h.raw+(iph->ihl<<2)); |
skb | 971 | net/ipv4/ip_fw.c | int size = skb->len - ((unsigned char *)portptr - skb->h.raw); |
skb | 1017 | net/ipv4/ip_fw.c | skb->csum = csum_partial((void *)(((struct tcphdr *)portptr) + 1), |
skb | 1019 | net/ipv4/ip_fw.c | tcp_send_check((struct tcphdr *)portptr,iph->saddr,iph->daddr,size,skb); |
skb | 1624 | net/ipv4/ip_fw.c | int ipfw_input_check(struct firewall_ops *this, int pf, struct sk_buff *skb, void *phdr) |
skb | 1626 | net/ipv4/ip_fw.c | return ip_fw_chk(phdr, skb->dev, ip_fw_in_chain, ip_fw_in_policy, 0); |
skb | 1629 | net/ipv4/ip_fw.c | int ipfw_output_check(struct firewall_ops *this, int pf, struct sk_buff *skb, void *phdr) |
skb | 1631 | net/ipv4/ip_fw.c | return ip_fw_chk(phdr, skb->dev, ip_fw_out_chain, ip_fw_out_policy, 0); |
skb | 1634 | net/ipv4/ip_fw.c | int ipfw_forward_check(struct firewall_ops *this, int pf, struct sk_buff *skb, void *phdr) |
skb | 1636 | net/ipv4/ip_fw.c | return ip_fw_chk(phdr, skb->dev, ip_fw_fwd_chain, ip_fw_fwd_policy, 0); |
skb | 198 | net/ipv4/ip_input.c | int ip_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *pt) |
skb | 200 | net/ipv4/ip_input.c | struct iphdr *iph = skb->h.iph; |
skb | 223 | net/ipv4/ip_input.c | return ipv6_rcv(skb,dev,pt); |
skb | 232 | net/ipv4/ip_input.c | skb->ip_hdr = iph; |
skb | 247 | net/ipv4/ip_input.c | if (skb->len<sizeof(struct iphdr) || iph->ihl<5 || iph->version != 4 || ip_fast_csum((unsigned char *)iph, iph->ihl) !=0 |
skb | 248 | net/ipv4/ip_input.c | || skb->len < ntohs(iph->tot_len)) |
skb | 251 | net/ipv4/ip_input.c | kfree_skb(skb, FREE_WRITE); |
skb | 261 | net/ipv4/ip_input.c | skb_trim(skb,ntohs(iph->tot_len)); |
skb | 270 | net/ipv4/ip_input.c | if (iph->daddr != skb->dev->pa_addr && net_alias_has(skb->dev)) |
skb | 271 | net/ipv4/ip_input.c | skb->dev = dev = net_alias_dev_rcv_sel32(skb->dev, AF_INET, iph->saddr, iph->daddr); |
skb | 276 | net/ipv4/ip_input.c | skb->ip_summed = 0; |
skb | 277 | net/ipv4/ip_input.c | if (ip_options_compile(NULL, skb)) |
skb | 279 | net/ipv4/ip_input.c | opt = (struct options*)skb->proto_priv; |
skb | 283 | net/ipv4/ip_input.c | kfree_skb(skb, FREE_READ); |
skb | 304 | net/ipv4/ip_input.c | if ((err=call_in_firewall(PF_INET, skb, iph))<FW_ACCEPT) |
skb | 307 | net/ipv4/ip_input.c | icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0, dev); |
skb | 308 | net/ipv4/ip_input.c | kfree_skb(skb, FREE_WRITE); |
skb | 342 | net/ipv4/ip_input.c | if ( iph->daddr == skb->dev->pa_addr || (brd = ip_chk_addr(iph->daddr)) != 0) |
skb | 350 | net/ipv4/ip_input.c | if (brd != IS_MYADDR || skb->pkt_type != PACKET_HOST) |
skb | 352 | net/ipv4/ip_input.c | kfree_skb(skb, FREE_WRITE); |
skb | 364 | net/ipv4/ip_input.c | icmp_send(skb, ICMP_PARAMETERPROB, 0, opt->srr+2, |
skb | 365 | net/ipv4/ip_input.c | skb->dev); |
skb | 366 | net/ipv4/ip_input.c | kfree_skb(skb, FREE_WRITE); |
skb | 380 | net/ipv4/ip_input.c | kfree_skb(skb, FREE_WRITE); |
skb | 390 | net/ipv4/ip_input.c | if (ip_forward(skb, dev, is_frag, nexthop)) |
skb | 391 | net/ipv4/ip_input.c | kfree_skb(skb, FREE_WRITE); |
skb | 394 | net/ipv4/ip_input.c | kfree_skb(skb, FREE_WRITE); |
skb | 411 | net/ipv4/ip_input.c | kfree_skb(skb, FREE_WRITE); |
skb | 426 | net/ipv4/ip_input.c | if (ip_fw_demasquerade(skb)) |
skb | 428 | net/ipv4/ip_input.c | struct iphdr *iph=skb->h.iph; |
skb | 429 | net/ipv4/ip_input.c | if (ip_forward(skb, dev, is_frag|IPFWD_MASQUERADED, iph->daddr)) |
skb | 430 | net/ipv4/ip_input.c | kfree_skb(skb, FREE_WRITE); |
skb | 442 | net/ipv4/ip_input.c | skb=ip_defrag(iph,skb,dev); |
skb | 443 | net/ipv4/ip_input.c | if(skb==NULL) |
skb | 445 | net/ipv4/ip_input.c | skb->dev = dev; |
skb | 446 | net/ipv4/ip_input.c | iph=skb->h.iph; |
skb | 453 | net/ipv4/ip_input.c | skb->ip_hdr = iph; |
skb | 454 | net/ipv4/ip_input.c | skb->h.raw += iph->ihl*4; |
skb | 489 | net/ipv4/ip_input.c | skb1=skb_clone(skb, GFP_ATOMIC); |
skb | 532 | net/ipv4/ip_input.c | skb2 = skb_clone(skb, GFP_ATOMIC); |
skb | 538 | net/ipv4/ip_input.c | skb2 = skb; |
skb | 571 | net/ipv4/ip_input.c | ipmr_forward(skb, is_frag); |
skb | 574 | net/ipv4/ip_input.c | struct sk_buff *skb2=skb_clone(skb, GFP_ATOMIC); |
skb | 585 | net/ipv4/ip_input.c | raw_rcv(raw_sk, skb, dev, iph->saddr, daddr); |
skb | 589 | net/ipv4/ip_input.c | icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PROT_UNREACH, 0, dev); |
skb | 590 | net/ipv4/ip_input.c | kfree_skb(skb, FREE_WRITE); |
skb | 604 | net/ipv4/ip_input.c | if(skb->pkt_type!=PACKET_HOST || brd==IS_BROADCAST) |
skb | 606 | net/ipv4/ip_input.c | kfree_skb(skb,FREE_WRITE); |
skb | 617 | net/ipv4/ip_input.c | icmp_send(skb, ICMP_PARAMETERPROB, 0, 16, skb->dev); |
skb | 618 | net/ipv4/ip_input.c | kfree_skb(skb, FREE_WRITE); |
skb | 621 | net/ipv4/ip_input.c | if (ip_forward(skb, dev, is_frag, iph->daddr)) |
skb | 622 | net/ipv4/ip_input.c | kfree_skb(skb, FREE_WRITE); |
skb | 627 | net/ipv4/ip_input.c | kfree_skb(skb, FREE_WRITE); |
skb | 29 | net/ipv4/ip_options.c | void ip_options_build(struct sk_buff * skb, struct options * opt, |
skb | 33 | net/ipv4/ip_options.c | unsigned char * iph = (unsigned char*)skb->ip_hdr; |
skb | 35 | net/ipv4/ip_options.c | memcpy(skb->proto_priv, opt, sizeof(struct options)); |
skb | 37 | net/ipv4/ip_options.c | opt = (struct options*)skb->proto_priv; |
skb | 75 | net/ipv4/ip_options.c | struct sk_buff * skb) |
skb | 86 | net/ipv4/ip_options.c | sopt = (struct options*)skb->proto_priv; |
skb | 95 | net/ipv4/ip_options.c | (unsigned char *)skb->ip_hdr); |
skb | 194 | net/ipv4/ip_options.c | void ip_options_fragment(struct sk_buff * skb) |
skb | 196 | net/ipv4/ip_options.c | unsigned char * optptr = (unsigned char*)skb->ip_hdr; |
skb | 197 | net/ipv4/ip_options.c | struct options * opt = (struct options*)skb->proto_priv; |
skb | 234 | net/ipv4/ip_options.c | int ip_options_compile(struct options * opt, struct sk_buff * skb) |
skb | 244 | net/ipv4/ip_options.c | opt = (struct options*)skb->proto_priv; |
skb | 246 | net/ipv4/ip_options.c | iph = (unsigned char*)skb->ip_hdr; |
skb | 253 | net/ipv4/ip_options.c | optptr = opt->is_data ? opt->__data : (unsigned char*)&skb->ip_hdr[1]; |
skb | 302 | net/ipv4/ip_options.c | if (!skb) |
skb | 339 | net/ipv4/ip_options.c | if (skb) |
skb | 341 | net/ipv4/ip_options.c | memcpy(&optptr[optptr[2]-1], &skb->dev->pa_addr, 4); |
skb | 378 | net/ipv4/ip_options.c | if (skb) |
skb | 390 | net/ipv4/ip_options.c | if (skb) |
skb | 392 | net/ipv4/ip_options.c | memcpy(&optptr[ts->ptr-1], &skb->dev->pa_addr, 4); |
skb | 411 | net/ipv4/ip_options.c | if (skb) |
skb | 441 | net/ipv4/ip_options.c | if (skb) |
skb | 451 | net/ipv4/ip_options.c | if (!skb) |
skb | 466 | net/ipv4/ip_options.c | if (skb) |
skb | 468 | net/ipv4/ip_options.c | icmp_send(skb, ICMP_PARAMETERPROB, 0, pp_ptr-iph, skb->dev); |
skb | 469 | net/ipv4/ip_options.c | kfree_skb(skb, FREE_READ); |
skb | 67 | net/ipv4/ip_output.c | static void ip_loopback(struct device *old_dev, struct sk_buff *skb) |
skb | 70 | net/ipv4/ip_output.c | int len=ntohs(skb->ip_hdr->tot_len); |
skb | 79 | net/ipv4/ip_output.c | newskb->saddr=skb->saddr; |
skb | 80 | net/ipv4/ip_output.c | newskb->daddr=skb->daddr; |
skb | 81 | net/ipv4/ip_output.c | newskb->raddr=skb->raddr; |
skb | 85 | net/ipv4/ip_output.c | newskb->pkt_type=skb->pkt_type; |
skb | 90 | net/ipv4/ip_output.c | ip_send(NULL,newskb, skb->ip_hdr->daddr, len, dev, skb->ip_hdr->saddr); |
skb | 95 | net/ipv4/ip_output.c | memcpy(newskb->proto_priv, skb->proto_priv, sizeof(skb->proto_priv)); |
skb | 100 | net/ipv4/ip_output.c | memcpy(newskb->ip_hdr,skb->ip_hdr,len); |
skb | 114 | net/ipv4/ip_output.c | int ip_send(struct rtable * rt, struct sk_buff *skb, __u32 daddr, int len, struct device *dev, __u32 saddr) |
skb | 118 | net/ipv4/ip_output.c | skb->dev = dev; |
skb | 119 | net/ipv4/ip_output.c | skb->arp = 1; |
skb | 120 | net/ipv4/ip_output.c | skb->protocol = htons(ETH_P_IP); |
skb | 127 | net/ipv4/ip_output.c | skb_reserve(skb,(dev->hard_header_len+15)&~15); /* 16 byte aligned IP headers are good */ |
skb | 130 | net/ipv4/ip_output.c | memcpy(skb_push(skb,dev->hard_header_len),rt->rt_hh->hh_data,dev->hard_header_len); |
skb | 136 | net/ipv4/ip_output.c | skb->arp = 0; |
skb | 137 | net/ipv4/ip_output.c | skb->raddr = daddr; |
skb | 140 | net/ipv4/ip_output.c | mac = dev->hard_header(skb, dev, ETH_P_IP, NULL, NULL, len); |
skb | 144 | net/ipv4/ip_output.c | skb->arp = 0; |
skb | 145 | net/ipv4/ip_output.c | skb->raddr = daddr; /* next routing address */ |
skb | 151 | net/ipv4/ip_output.c | static int ip_send_room(struct rtable * rt, struct sk_buff *skb, __u32 daddr, int len, struct device *dev, __u32 saddr) |
skb | 155 | net/ipv4/ip_output.c | skb->dev = dev; |
skb | 156 | net/ipv4/ip_output.c | skb->arp = 1; |
skb | 157 | net/ipv4/ip_output.c | skb->protocol = htons(ETH_P_IP); |
skb | 160 | net/ipv4/ip_output.c | skb_reserve(skb,MAX_HEADER); |
skb | 163 | net/ipv4/ip_output.c | memcpy(skb_push(skb,dev->hard_header_len),rt->rt_hh->hh_data,dev->hard_header_len); |
skb | 169 | net/ipv4/ip_output.c | skb->arp = 0; |
skb | 170 | net/ipv4/ip_output.c | skb->raddr = daddr; |
skb | 173 | net/ipv4/ip_output.c | mac = dev->hard_header(skb, dev, ETH_P_IP, NULL, NULL, len); |
skb | 177 | net/ipv4/ip_output.c | skb->arp = 0; |
skb | 178 | net/ipv4/ip_output.c | skb->raddr = daddr; /* next routing address */ |
skb | 192 | net/ipv4/ip_output.c | int ip_build_header(struct sk_buff *skb, __u32 saddr, __u32 daddr, |
skb | 211 | net/ipv4/ip_output.c | if(MULTICAST(daddr) && *dev==NULL && skb->sk && *skb->sk->ip_mc_name) |
skb | 212 | net/ipv4/ip_output.c | *dev=dev_get(skb->sk->ip_mc_name); |
skb | 216 | net/ipv4/ip_output.c | rt = ip_check_route(rp, daddr, skb->localroute); |
skb | 225 | net/ipv4/ip_output.c | rt = ip_rt_route(daddr, skb->localroute); |
skb | 263 | net/ipv4/ip_output.c | tmp = ip_send_room(rt, skb, raddr, len, *dev, saddr); |
skb | 265 | net/ipv4/ip_output.c | tmp = ip_send(rt, skb, raddr, len, *dev, saddr); |
skb | 273 | net/ipv4/ip_output.c | skb->dev = *dev; |
skb | 274 | net/ipv4/ip_output.c | skb->saddr = saddr; |
skb | 293 | net/ipv4/ip_output.c | iph=(struct iphdr *)skb_put(skb,sizeof(struct iphdr) + opt->optlen); |
skb | 295 | net/ipv4/ip_output.c | iph=(struct iphdr *)skb_put(skb,sizeof(struct iphdr)); |
skb | 305 | net/ipv4/ip_output.c | skb->ip_hdr = iph; |
skb | 310 | net/ipv4/ip_output.c | ip_options_build(skb, opt, final_daddr, (*dev)->pa_addr, 0); |
skb | 335 | net/ipv4/ip_output.c | struct sk_buff *skb, int free) |
skb | 347 | net/ipv4/ip_output.c | IS_SKB(skb); |
skb | 354 | net/ipv4/ip_output.c | skb->dev = dev; |
skb | 355 | net/ipv4/ip_output.c | skb->when = jiffies; |
skb | 365 | net/ipv4/ip_output.c | iph = skb->ip_hdr; |
skb | 366 | net/ipv4/ip_output.c | iph->tot_len = htons(skb->len-(((unsigned char *)iph)-skb->data)); |
skb | 369 | net/ipv4/ip_output.c | if(call_out_firewall(PF_INET, skb, iph) < FW_ACCEPT) |
skb | 387 | net/ipv4/ip_output.c | skb->free = free; |
skb | 397 | net/ipv4/ip_output.c | ip_fragment(sk,skb,dev,0); |
skb | 398 | net/ipv4/ip_output.c | IS_SKB(skb); |
skb | 399 | net/ipv4/ip_output.c | kfree_skb(skb,FREE_WRITE); |
skb | 417 | net/ipv4/ip_output.c | if (skb->next != NULL) |
skb | 420 | net/ipv4/ip_output.c | skb_unlink(skb); |
skb | 441 | net/ipv4/ip_output.c | if (skb->link3 != NULL) |
skb | 444 | net/ipv4/ip_output.c | skb->link3 = NULL; |
skb | 448 | net/ipv4/ip_output.c | sk->send_tail = skb; |
skb | 449 | net/ipv4/ip_output.c | sk->send_head = skb; |
skb | 453 | net/ipv4/ip_output.c | sk->send_tail->link3 = skb; |
skb | 454 | net/ipv4/ip_output.c | sk->send_tail = skb; |
skb | 463 | net/ipv4/ip_output.c | skb->sk = sk; |
skb | 486 | net/ipv4/ip_output.c | ip_loopback(dev,skb); |
skb | 495 | net/ipv4/ip_output.c | ip_loopback(dev,skb); |
skb | 504 | net/ipv4/ip_output.c | if(skb->ip_hdr->ttl==0) |
skb | 506 | net/ipv4/ip_output.c | kfree_skb(skb, FREE_READ); |
skb | 512 | net/ipv4/ip_output.c | ip_loopback(dev,skb); |
skb | 523 | net/ipv4/ip_output.c | dev_queue_xmit(skb, dev, sk->priority); |
skb | 527 | net/ipv4/ip_output.c | dev_queue_xmit(skb, dev, SOPRI_NORMAL); |
skb | 536 | net/ipv4/ip_output.c | kfree_skb(skb, FREE_WRITE); |
skb | 649 | net/ipv4/ip_output.c | struct sk_buff *skb=sock_alloc_send_skb(sk, length+15+dev->hard_header_len,0, noblock, &error); |
skb | 650 | net/ipv4/ip_output.c | if(skb==NULL) |
skb | 655 | net/ipv4/ip_output.c | skb->dev=dev; |
skb | 656 | net/ipv4/ip_output.c | skb->protocol = htons(ETH_P_IP); |
skb | 657 | net/ipv4/ip_output.c | skb->free=1; |
skb | 658 | net/ipv4/ip_output.c | skb->when=jiffies; |
skb | 659 | net/ipv4/ip_output.c | skb->sk=sk; |
skb | 660 | net/ipv4/ip_output.c | skb->arp=0; |
skb | 661 | net/ipv4/ip_output.c | skb->saddr=saddr; |
skb | 662 | net/ipv4/ip_output.c | skb->raddr = raddr; |
skb | 663 | net/ipv4/ip_output.c | skb_reserve(skb,(dev->hard_header_len+15)&~15); |
skb | 666 | net/ipv4/ip_output.c | skb->arp=1; |
skb | 667 | net/ipv4/ip_output.c | memcpy(skb_push(skb,dev->hard_header_len),hh->hh_data,dev->hard_header_len); |
skb | 670 | net/ipv4/ip_output.c | skb->arp = 0; |
skb | 678 | net/ipv4/ip_output.c | if(dev->hard_header(skb,dev,ETH_P_IP,NULL,NULL,0)>0) |
skb | 679 | net/ipv4/ip_output.c | skb->arp=1; |
skb | 682 | net/ipv4/ip_output.c | skb->arp=1; |
skb | 683 | net/ipv4/ip_output.c | skb->ip_hdr=iph=(struct iphdr *)skb_put(skb,length); |
skb | 700 | net/ipv4/ip_output.c | ip_options_build(skb, opt, |
skb | 711 | net/ipv4/ip_output.c | if(call_out_firewall(PF_INET, skb, iph)< FW_ACCEPT) |
skb | 713 | net/ipv4/ip_output.c | kfree_skb(skb, FREE_WRITE); |
skb | 721 | net/ipv4/ip_output.c | dev_queue_xmit(skb,dev,sk->priority); |
skb | 725 | net/ipv4/ip_output.c | kfree_skb(skb, FREE_WRITE); |
skb | 800 | net/ipv4/ip_output.c | struct sk_buff * skb; |
skb | 808 | net/ipv4/ip_output.c | skb = sock_alloc_send_skb(sk, fraglen+15, 0, noblock, &error); |
skb | 809 | net/ipv4/ip_output.c | if (skb == NULL) |
skb | 822 | net/ipv4/ip_output.c | skb->dev = dev; |
skb | 823 | net/ipv4/ip_output.c | skb->protocol = htons(ETH_P_IP); |
skb | 824 | net/ipv4/ip_output.c | skb->when = jiffies; |
skb | 825 | net/ipv4/ip_output.c | skb->free = 1; /* dubious, this one */ |
skb | 826 | net/ipv4/ip_output.c | skb->sk = sk; |
skb | 827 | net/ipv4/ip_output.c | skb->arp = 0; |
skb | 828 | net/ipv4/ip_output.c | skb->saddr = saddr; |
skb | 829 | net/ipv4/ip_output.c | skb->raddr = raddr; |
skb | 830 | net/ipv4/ip_output.c | skb_reserve(skb,(dev->hard_header_len+15)&~15); |
skb | 831 | net/ipv4/ip_output.c | data = skb_put(skb, fraglen-dev->hard_header_len); |
skb | 842 | net/ipv4/ip_output.c | skb->arp=1; |
skb | 843 | net/ipv4/ip_output.c | memcpy(skb_push(skb,dev->hard_header_len),hh->hh_data,dev->hard_header_len); |
skb | 846 | net/ipv4/ip_output.c | skb->arp = 0; |
skb | 854 | net/ipv4/ip_output.c | if(dev->hard_header(skb, dev, ETH_P_IP, |
skb | 856 | net/ipv4/ip_output.c | skb->arp=1; |
skb | 863 | net/ipv4/ip_output.c | skb->ip_hdr = iph = (struct iphdr *)data; |
skb | 876 | net/ipv4/ip_output.c | ip_options_build(skb, opt, |
skb | 915 | net/ipv4/ip_output.c | if(!offset && call_out_firewall(PF_INET, skb, iph) < FW_ACCEPT) |
skb | 917 | net/ipv4/ip_output.c | kfree_skb(skb, FREE_WRITE); |
skb | 946 | net/ipv4/ip_output.c | if(skb->daddr==IGMP_ALL_HOSTS || (dev->flags&IFF_ALLMULTI)) |
skb | 947 | net/ipv4/ip_output.c | ip_loopback(dev,skb); |
skb | 955 | net/ipv4/ip_output.c | ip_loopback(dev,skb); |
skb | 968 | net/ipv4/ip_output.c | if(skb->ip_hdr->ttl==0) |
skb | 969 | net/ipv4/ip_output.c | kfree_skb(skb, FREE_READ); |
skb | 980 | net/ipv4/ip_output.c | ip_loopback(dev,skb); |
skb | 988 | net/ipv4/ip_output.c | dev_queue_xmit(skb, dev, sk->priority); |
skb | 999 | net/ipv4/ip_output.c | kfree_skb(skb, FREE_WRITE); |
skb | 1038 | net/ipv4/ip_output.c | struct sk_buff *skb=alloc_skb(sizeof(struct netlink_rtinfo), GFP_ATOMIC); |
skb | 1041 | net/ipv4/ip_output.c | if(skb==NULL) |
skb | 1043 | net/ipv4/ip_output.c | skb->free=1; |
skb | 1044 | net/ipv4/ip_output.c | nrt=(struct netlink_rtinfo *)skb_put(skb, sizeof(struct netlink_rtinfo)); |
skb | 1058 | net/ipv4/ip_output.c | netlink_post(NETLINK_ROUTE, skb); |
skb | 52 | net/ipv4/ipip.c | int ipip_rcv(struct sk_buff *skb, struct device *dev, struct options *opt, |
skb | 68 | net/ipv4/ipip.c | skb_pull(skb, ((struct iphdr *)skb->data)->ihl<<2); |
skb | 74 | net/ipv4/ipip.c | skb->h.iph=(struct iphdr *)skb->data; |
skb | 75 | net/ipv4/ipip.c | skb->ip_hdr=(struct iphdr *)skb->data; |
skb | 76 | net/ipv4/ipip.c | memset(skb->proto_priv, 0, sizeof(struct options)); |
skb | 77 | net/ipv4/ipip.c | if (skb->ip_hdr->ihl > 5) |
skb | 79 | net/ipv4/ipip.c | if (ip_options_compile(NULL, skb)) |
skb | 88 | net/ipv4/ipip.c | if((err=call_in_firewall(PF_INET, skb, skb->ip_hdr))<FW_ACCEPT) |
skb | 91 | net/ipv4/ipip.c | icmp_send(skb,ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0 , dev); |
skb | 92 | net/ipv4/ipip.c | kfree_skb(skb, FREE_READ); |
skb | 108 | net/ipv4/ipip.c | if(ip_forward(skb, dev, 0, daddr)) |
skb | 109 | net/ipv4/ipip.c | kfree_skb(skb, FREE_READ); |
skb | 99 | net/ipv4/ipmr.c | struct sk_buff *skb; |
skb | 140 | net/ipv4/ipmr.c | while((skb=skb_dequeue(&cache->mfc_unresolved))) |
skb | 141 | net/ipv4/ipmr.c | kfree_skb(skb, FREE_WRITE); |
skb | 216 | net/ipv4/ipmr.c | struct sk_buff *skb; |
skb | 244 | net/ipv4/ipmr.c | while((skb=skb_dequeue(&cache->mfc_unresolved))) |
skb | 245 | net/ipv4/ipmr.c | ipmr_forward(skb, skb->protocol); |
skb | 255 | net/ipv4/ipmr.c | struct sk_buff *skb=alloc_skb(128, GFP_ATOMIC); |
skb | 258 | net/ipv4/ipmr.c | if(!skb) |
skb | 261 | net/ipv4/ipmr.c | skb->free=1; |
skb | 267 | net/ipv4/ipmr.c | skb->ip_hdr=(struct iphdr *)skb_put(skb,ihl); |
skb | 268 | net/ipv4/ipmr.c | skb->h.iph=skb->ip_hdr; |
skb | 269 | net/ipv4/ipmr.c | memcpy(skb->data,pkt->data,ihl); |
skb | 270 | net/ipv4/ipmr.c | skb->ip_hdr->protocol = 0; /* Flag to the kernel this is a route add */ |
skb | 276 | net/ipv4/ipmr.c | igmp=(struct igmphdr *)skb_put(skb,sizeof(struct igmphdr)); |
skb | 279 | net/ipv4/ipmr.c | skb->ip_hdr->tot_len=htons(skb->len); /* Fix the length */ |
skb | 284 | net/ipv4/ipmr.c | if(sock_queue_rcv_skb(mroute_socket,skb)<0) |
skb | 286 | net/ipv4/ipmr.c | skb->sk=NULL; |
skb | 287 | net/ipv4/ipmr.c | kfree_skb(skb, FREE_READ); |
skb | 296 | net/ipv4/ipmr.c | static void ipmr_cache_unresolved(struct mfc_cache *cache, vifi_t vifi, struct sk_buff *skb, int is_frag) |
skb | 305 | net/ipv4/ipmr.c | kfree_skb(skb, FREE_WRITE); |
skb | 312 | net/ipv4/ipmr.c | cache->mfc_origin=skb->ip_hdr->saddr; |
skb | 313 | net/ipv4/ipmr.c | cache->mfc_mcastgrp=skb->ip_hdr->daddr; |
skb | 330 | net/ipv4/ipmr.c | ipmr_cache_report(skb); |
skb | 337 | net/ipv4/ipmr.c | kfree_skb(skb, FREE_WRITE); |
skb | 345 | net/ipv4/ipmr.c | skb->protocol=is_frag; |
skb | 346 | net/ipv4/ipmr.c | skb_queue_tail(&cache->mfc_unresolved,skb); |
skb | 702 | net/ipv4/ipmr.c | static void ipmr_queue_xmit(struct sk_buff *skb, struct vif_device *vif, struct device *in_dev, int frag) |
skb | 705 | net/ipv4/ipmr.c | __u32 raddr=skb->raddr; |
skb | 712 | net/ipv4/ipmr.c | vif->bytes_out+=skb->len; |
skb | 713 | net/ipv4/ipmr.c | skb->dev=vif->dev; |
skb | 714 | net/ipv4/ipmr.c | skb->raddr=skb->h.iph->daddr; |
skb | 719 | net/ipv4/ipmr.c | if(vif->dev==NULL || ip_forward(skb, in_dev, frag|IPFWD_MULTICASTING|tunnel, raddr)==-1) |
skb | 720 | net/ipv4/ipmr.c | kfree_skb(skb, FREE_WRITE); |
skb | 727 | net/ipv4/ipmr.c | void ipmr_forward(struct sk_buff *skb, int is_frag) |
skb | 732 | net/ipv4/ipmr.c | int vif=ipmr_vifi_find(skb->dev); |
skb | 735 | net/ipv4/ipmr.c | kfree_skb(skb, FREE_WRITE); |
skb | 740 | net/ipv4/ipmr.c | vif_table[vif].bytes_in+=skb->len; |
skb | 742 | net/ipv4/ipmr.c | cache=ipmr_cache_find(skb->ip_hdr->saddr,skb->ip_hdr->daddr); |
skb | 749 | net/ipv4/ipmr.c | ipmr_cache_unresolved(cache,vif,skb, is_frag); |
skb | 761 | net/ipv4/ipmr.c | if(skb->ip_hdr->ttl > cache->mfc_ttls[ct] && cache->mfc_ttls[ct]>0) |
skb | 769 | net/ipv4/ipmr.c | skb2=skb_copy(skb, GFP_ATOMIC); |
skb | 773 | net/ipv4/ipmr.c | ipmr_queue_xmit(skb2, &vif_table[psend], skb->dev, is_frag); |
skb | 781 | net/ipv4/ipmr.c | kfree_skb(skb, FREE_WRITE); |
skb | 784 | net/ipv4/ipmr.c | ipmr_queue_xmit(skb, &vif_table[psend], skb->dev, is_frag); |
skb | 79 | net/ipv4/packet.c | int packet_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *pt) |
skb | 95 | net/ipv4/packet.c | skb_push(skb,skb->data-skb->mac.raw); |
skb | 101 | net/ipv4/packet.c | skb->dev = dev; |
skb | 108 | net/ipv4/packet.c | if(sock_queue_rcv_skb(sk,skb)<0) |
skb | 110 | net/ipv4/packet.c | skb->sk = NULL; |
skb | 111 | net/ipv4/packet.c | kfree_skb(skb, FREE_READ); |
skb | 130 | net/ipv4/packet.c | struct sk_buff *skb; |
skb | 175 | net/ipv4/packet.c | skb = sock_wmalloc(sk, len, 0, GFP_KERNEL); |
skb | 183 | net/ipv4/packet.c | if (skb == NULL) |
skb | 192 | net/ipv4/packet.c | skb->sk = sk; |
skb | 193 | net/ipv4/packet.c | skb->free = 1; |
skb | 194 | net/ipv4/packet.c | memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len); |
skb | 195 | net/ipv4/packet.c | skb->arp = 1; /* No ARP needs doing on this (complete) frame */ |
skb | 196 | net/ipv4/packet.c | skb->protocol = proto; |
skb | 203 | net/ipv4/packet.c | dev_queue_xmit(skb, dev, sk->priority); |
skb | 205 | net/ipv4/packet.c | kfree_skb(skb, FREE_WRITE); |
skb | 404 | net/ipv4/packet.c | struct sk_buff *skb; |
skb | 432 | net/ipv4/packet.c | skb=skb_recv_datagram(sk,flags,noblock,&err); |
skb | 440 | net/ipv4/packet.c | if(skb==NULL) |
skb | 448 | net/ipv4/packet.c | copied = min(len, skb->len); |
skb | 450 | net/ipv4/packet.c | memcpy_toiovec(msg->msg_iov, skb->data, copied); /* We can't use skb_copy_datagram here */ |
skb | 451 | net/ipv4/packet.c | sk->stamp=skb->stamp; |
skb | 459 | net/ipv4/packet.c | saddr->spkt_family = skb->dev->type; |
skb | 460 | net/ipv4/packet.c | strncpy(saddr->spkt_device,skb->dev->name, 15); |
skb | 461 | net/ipv4/packet.c | saddr->spkt_protocol = skb->protocol; |
skb | 469 | net/ipv4/packet.c | skb_free_datagram(sk, skb); |
skb | 194 | net/ipv4/rarp.c | static int rarp_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *pt) |
skb | 199 | net/ipv4/rarp.c | struct arphdr *rarp = (struct arphdr *) skb->data; |
skb | 200 | net/ipv4/rarp.c | unsigned char *rarp_ptr = skb_pull(skb,sizeof(struct arphdr)); |
skb | 212 | net/ipv4/rarp.c | kfree_skb(skb, FREE_READ); |
skb | 221 | net/ipv4/rarp.c | kfree_skb(skb, FREE_READ); |
skb | 239 | net/ipv4/rarp.c | kfree_skb(skb, FREE_READ); |
skb | 275 | net/ipv4/rarp.c | kfree_skb(skb, FREE_READ); |
skb | 119 | net/ipv4/raw.c | int raw_rcv(struct sock *sk, struct sk_buff *skb, struct device *dev, __u32 saddr, __u32 daddr) |
skb | 122 | net/ipv4/raw.c | skb->sk = sk; |
skb | 123 | net/ipv4/raw.c | skb_trim(skb,ntohs(skb->ip_hdr->tot_len)); |
skb | 125 | net/ipv4/raw.c | skb->h.raw = (unsigned char *) skb->ip_hdr; |
skb | 126 | net/ipv4/raw.c | skb->dev = dev; |
skb | 127 | net/ipv4/raw.c | skb->saddr = daddr; |
skb | 128 | net/ipv4/raw.c | skb->daddr = saddr; |
skb | 137 | net/ipv4/raw.c | skb->ip_hdr->tot_len=ntohs(skb->ip_hdr->tot_len-4*skb->ip_hdr->ihl); |
skb | 142 | net/ipv4/raw.c | if(sock_queue_rcv_skb(sk,skb)<0) |
skb | 145 | net/ipv4/raw.c | skb->sk=NULL; |
skb | 146 | net/ipv4/raw.c | kfree_skb(skb, FREE_READ); |
skb | 318 | net/ipv4/raw.c | struct sk_buff *skb; |
skb | 331 | net/ipv4/raw.c | skb=skb_recv_datagram(sk,flags,noblock,&err); |
skb | 332 | net/ipv4/raw.c | if(skb==NULL) |
skb | 335 | net/ipv4/raw.c | copied = min(len, skb->len); |
skb | 337 | net/ipv4/raw.c | skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); |
skb | 338 | net/ipv4/raw.c | sk->stamp=skb->stamp; |
skb | 344 | net/ipv4/raw.c | sin->sin_addr.s_addr = skb->daddr; |
skb | 346 | net/ipv4/raw.c | skb_free_datagram(sk, skb); |
skb | 460 | net/ipv4/tcp.c | struct sk_buff *skb; |
skb | 464 | net/ipv4/tcp.c | skb=tcp_find_established(s); |
skb | 465 | net/ipv4/tcp.c | if(skb!=NULL) |
skb | 466 | net/ipv4/tcp.c | skb_unlink(skb); /* Take it off the queue */ |
skb | 468 | net/ipv4/tcp.c | return skb; |
skb | 479 | net/ipv4/tcp.c | struct sk_buff *skb; |
skb | 481 | net/ipv4/tcp.c | while ((skb = skb_dequeue(&sk->receive_queue)) != NULL) |
skb | 483 | net/ipv4/tcp.c | tcp_close(skb->sk, 0); |
skb | 484 | net/ipv4/tcp.c | kfree_skb(skb, FREE_READ); |
skb | 603 | net/ipv4/tcp.c | struct sk_buff *skb; |
skb | 612 | net/ipv4/tcp.c | if (sk == NULL || (skb = skb_peek(&sk->receive_queue)) == NULL) |
skb | 629 | net/ipv4/tcp.c | if (before(counted, skb->seq)) /* Found a hole so stops here */ |
skb | 631 | net/ipv4/tcp.c | sum = skb->len - (counted - skb->seq); /* Length - header but start from where we are up to (avoid overlaps) */ |
skb | 632 | net/ipv4/tcp.c | if (skb->h.th->syn) |
skb | 637 | net/ipv4/tcp.c | if (skb->h.th->syn) |
skb | 657 | net/ipv4/tcp.c | if (skb->h.th->urg) |
skb | 659 | net/ipv4/tcp.c | if (amount && skb->h.th->psh) break; |
skb | 660 | net/ipv4/tcp.c | skb = skb->next; |
skb | 662 | net/ipv4/tcp.c | while(skb != (struct sk_buff *)&sk->receive_queue); |
skb | 806 | net/ipv4/tcp.c | unsigned long daddr, int len, struct sk_buff *skb) |
skb | 813 | net/ipv4/tcp.c | csum_partial((char *)th,sizeof(*th),skb->csum)); |
skb | 899 | net/ipv4/tcp.c | struct sk_buff *skb; |
skb | 997 | net/ipv4/tcp.c | if ((skb = tcp_dequeue_partial(sk)) != NULL) |
skb | 1001 | net/ipv4/tcp.c | tcp_size = skb->tail - (unsigned char *)(skb->h.th + 1); |
skb | 1013 | net/ipv4/tcp.c | memcpy_fromfs(skb_put(skb,copy), from, copy); |
skb | 1014 | net/ipv4/tcp.c | skb->csum = csum_partial(skb->tail - tcp_size, tcp_size, 0); |
skb | 1022 | net/ipv4/tcp.c | tcp_send_skb(sk, skb); |
skb | 1024 | net/ipv4/tcp.c | tcp_enqueue_partial(skb, sk); |
skb | 1058 | net/ipv4/tcp.c | skb = sock_wmalloc(sk, sk->mtu + 128 + prot->max_header + 15, 0, GFP_KERNEL); |
skb | 1059 | net/ipv4/tcp.c | send_tmp = skb; |
skb | 1063 | net/ipv4/tcp.c | skb = sock_wmalloc(sk, copy + prot->max_header + 15 , 0, GFP_KERNEL); |
skb | 1070 | net/ipv4/tcp.c | if (skb == NULL) |
skb | 1091 | net/ipv4/tcp.c | skb->sk = sk; |
skb | 1092 | net/ipv4/tcp.c | skb->free = 0; |
skb | 1093 | net/ipv4/tcp.c | skb->localroute = sk->localroute|(flags&MSG_DONTROUTE); |
skb | 1100 | net/ipv4/tcp.c | tmp = prot->build_header(skb, sk->saddr, sk->daddr, &dev, |
skb | 1101 | net/ipv4/tcp.c | IPPROTO_TCP, sk->opt, skb->truesize,sk->ip_tos,sk->ip_ttl,&sk->ip_route_cache); |
skb | 1104 | net/ipv4/tcp.c | sock_wfree(sk, skb); |
skb | 1110 | net/ipv4/tcp.c | skb->ip_hdr->frag_off |= htons(IP_DF); |
skb | 1112 | net/ipv4/tcp.c | skb->dev = dev; |
skb | 1113 | net/ipv4/tcp.c | skb->h.th =(struct tcphdr *)skb_put(skb,sizeof(struct tcphdr)); |
skb | 1114 | net/ipv4/tcp.c | tmp = tcp_build_header(skb->h.th, sk, seglen-copy); |
skb | 1117 | net/ipv4/tcp.c | sock_wfree(sk, skb); |
skb | 1125 | net/ipv4/tcp.c | skb->h.th->urg = 1; |
skb | 1126 | net/ipv4/tcp.c | skb->h.th->urg_ptr = ntohs(copy); |
skb | 1129 | net/ipv4/tcp.c | skb->csum = csum_partial_copy_fromuser(from, |
skb | 1130 | net/ipv4/tcp.c | skb_put(skb,copy), copy, 0); |
skb | 1136 | net/ipv4/tcp.c | skb->free = 0; |
skb | 1144 | net/ipv4/tcp.c | tcp_send_skb(sk, skb); |
skb | 1353 | net/ipv4/tcp.c | static inline void tcp_eat_skb(struct sock *sk, struct sk_buff * skb) |
skb | 1355 | net/ipv4/tcp.c | skb->sk = sk; |
skb | 1356 | net/ipv4/tcp.c | __skb_unlink(skb, &sk->receive_queue); |
skb | 1357 | net/ipv4/tcp.c | kfree_skb(skb, FREE_READ); |
skb | 1369 | net/ipv4/tcp.c | struct sk_buff *skb; |
skb | 1375 | net/ipv4/tcp.c | while ((skb=skb_peek(&sk->receive_queue)) != NULL) { |
skb | 1376 | net/ipv4/tcp.c | if (!skb->used || skb->users) |
skb | 1378 | net/ipv4/tcp.c | tcp_eat_skb(sk, skb); |
skb | 1431 | net/ipv4/tcp.c | struct sk_buff * skb; |
skb | 1447 | net/ipv4/tcp.c | skb = skb_peek(&sk->receive_queue); |
skb | 1450 | net/ipv4/tcp.c | if (!skb) |
skb | 1452 | net/ipv4/tcp.c | if (before(*seq, skb->seq)) |
skb | 1454 | net/ipv4/tcp.c | offset = *seq - skb->seq; |
skb | 1455 | net/ipv4/tcp.c | if (skb->h.th->syn) |
skb | 1457 | net/ipv4/tcp.c | if (offset < skb->len) |
skb | 1459 | net/ipv4/tcp.c | if (skb->h.th->fin) |
skb | 1462 | net/ipv4/tcp.c | skb->used = 1; |
skb | 1463 | net/ipv4/tcp.c | skb = skb->next; |
skb | 1465 | net/ipv4/tcp.c | while (skb != (struct sk_buff *)&sk->receive_queue); |
skb | 1521 | net/ipv4/tcp.c | skb->users++; |
skb | 1527 | net/ipv4/tcp.c | used = skb->len - offset; |
skb | 1566 | net/ipv4/tcp.c | memcpy_toiovec(msg->msg_iov,((unsigned char *)skb->h.th) + |
skb | 1567 | net/ipv4/tcp.c | skb->h.th->doff*4 + offset, used); |
skb | 1577 | net/ipv4/tcp.c | skb->users --; |
skb | 1581 | net/ipv4/tcp.c | if (used + offset < skb->len) |
skb | 1588 | net/ipv4/tcp.c | if (skb->h.th->fin) |
skb | 1592 | net/ipv4/tcp.c | skb->used = 1; |
skb | 1593 | net/ipv4/tcp.c | if (!skb->users) |
skb | 1594 | net/ipv4/tcp.c | tcp_eat_skb(sk, skb); |
skb | 1606 | net/ipv4/tcp.c | skb->used = 1; |
skb | 1765 | net/ipv4/tcp.c | struct sk_buff *skb; |
skb | 1797 | net/ipv4/tcp.c | while((skb=skb_dequeue(&sk->receive_queue))!=NULL) |
skb | 1798 | net/ipv4/tcp.c | kfree_skb(skb, FREE_READ); |
skb | 1851 | net/ipv4/tcp.c | struct sk_buff *skb; |
skb | 1868 | net/ipv4/tcp.c | while((skb = tcp_dequeue_established(sk)) == NULL) |
skb | 1894 | net/ipv4/tcp.c | newsk = skb->sk; |
skb | 1896 | net/ipv4/tcp.c | kfree_skb(skb, FREE_READ); |
skb | 209 | net/ipv4/tcp_input.c | static int tcp_reset(struct sock *sk, struct sk_buff *skb) |
skb | 240 | net/ipv4/tcp_input.c | kfree_skb(skb, FREE_READ); |
skb | 317 | net/ipv4/tcp_input.c | static void tcp_conn_request(struct sock *sk, struct sk_buff *skb, |
skb | 324 | net/ipv4/tcp_input.c | th = skb->h.th; |
skb | 337 | net/ipv4/tcp_input.c | kfree_skb(skb, FREE_READ); |
skb | 352 | net/ipv4/tcp_input.c | kfree_skb(skb, FREE_READ); |
skb | 369 | net/ipv4/tcp_input.c | kfree_skb(skb, FREE_READ); |
skb | 383 | net/ipv4/tcp_input.c | kfree_skb(skb, FREE_READ); |
skb | 386 | net/ipv4/tcp_input.c | if (ip_options_echo(sk->opt, opt, daddr, saddr, skb)) |
skb | 391 | net/ipv4/tcp_input.c | kfree_skb(skb, FREE_READ); |
skb | 424 | net/ipv4/tcp_input.c | newsk->acked_seq = skb->seq+1; |
skb | 425 | net/ipv4/tcp_input.c | newsk->lastwin_seq = skb->seq+1; |
skb | 427 | net/ipv4/tcp_input.c | newsk->copied_seq = skb->seq+1; |
skb | 428 | net/ipv4/tcp_input.c | newsk->fin_seq = skb->seq; |
skb | 445 | net/ipv4/tcp_input.c | newsk->dummy_th.source = skb->h.th->dest; |
skb | 446 | net/ipv4/tcp_input.c | newsk->dummy_th.dest = skb->h.th->source; |
skb | 457 | net/ipv4/tcp_input.c | newsk->acked_seq = skb->seq + 1; |
skb | 458 | net/ipv4/tcp_input.c | newsk->copied_seq = skb->seq + 1; |
skb | 466 | net/ipv4/tcp_input.c | newsk->ip_tos=skb->ip_hdr->tos; |
skb | 515 | net/ipv4/tcp_input.c | tcp_options(newsk,skb->h.th); |
skb | 518 | net/ipv4/tcp_input.c | tcp_send_synack(newsk, sk, skb); |
skb | 534 | net/ipv4/tcp_input.c | struct sk_buff *skb; |
skb | 549 | net/ipv4/tcp_input.c | skb = skb2; |
skb | 550 | net/ipv4/tcp_input.c | skb2 = skb->link3; |
skb | 551 | net/ipv4/tcp_input.c | skb->link3 = NULL; |
skb | 552 | net/ipv4/tcp_input.c | if (after(skb->end_seq, window_seq)) |
skb | 557 | net/ipv4/tcp_input.c | if (skb->next != NULL) |
skb | 559 | net/ipv4/tcp_input.c | skb_unlink(skb); |
skb | 563 | net/ipv4/tcp_input.c | skb_queue_head(&sk->write_queue,skb); |
skb | 565 | net/ipv4/tcp_input.c | skb_append(wskb,skb); |
skb | 566 | net/ipv4/tcp_input.c | wskb = skb; |
skb | 572 | net/ipv4/tcp_input.c | sk->send_head = skb; |
skb | 573 | net/ipv4/tcp_input.c | sk->send_tail = skb; |
skb | 577 | net/ipv4/tcp_input.c | sk->send_tail->link3 = skb; |
skb | 578 | net/ipv4/tcp_input.c | sk->send_tail = skb; |
skb | 580 | net/ipv4/tcp_input.c | skb->link3 = NULL; |
skb | 1108 | net/ipv4/tcp_input.c | static int tcp_fin(struct sk_buff *skb, struct sock *sk, struct tcphdr *th) |
skb | 1110 | net/ipv4/tcp_input.c | sk->fin_seq = skb->end_seq; |
skb | 1190 | net/ipv4/tcp_input.c | static inline u32 tcp_queue_ack(struct sk_buff * skb, struct sock * sk) |
skb | 1196 | net/ipv4/tcp_input.c | skb->acked = 1; |
skb | 1197 | net/ipv4/tcp_input.c | if (skb->h.th->fin) |
skb | 1198 | net/ipv4/tcp_input.c | tcp_fin(skb,sk,skb->h.th); |
skb | 1199 | net/ipv4/tcp_input.c | return skb->end_seq; |
skb | 1207 | net/ipv4/tcp_input.c | static void tcp_queue(struct sk_buff * skb, struct sock * sk, |
skb | 1220 | net/ipv4/tcp_input.c | if (!after(next->seq, skb->seq)) |
skb | 1228 | net/ipv4/tcp_input.c | __skb_append(next, skb, list); |
skb | 1229 | net/ipv4/tcp_input.c | next = skb->next; |
skb | 1235 | net/ipv4/tcp_input.c | if (!after(skb->seq, ack_seq) && after(skb->end_seq, ack_seq)) { |
skb | 1236 | net/ipv4/tcp_input.c | ack_seq = tcp_queue_ack(skb, sk); |
skb | 1293 | net/ipv4/tcp_input.c | static int tcp_data(struct sk_buff *skb, struct sock *sk, |
skb | 1299 | net/ipv4/tcp_input.c | th = skb->h.th; |
skb | 1300 | net/ipv4/tcp_input.c | skb_pull(skb,th->doff*4); |
skb | 1301 | net/ipv4/tcp_input.c | skb_trim(skb,len-(th->doff*4)); |
skb | 1308 | net/ipv4/tcp_input.c | sk->bytes_rcv += skb->len; |
skb | 1310 | net/ipv4/tcp_input.c | if (skb->len == 0 && !th->fin) |
skb | 1318 | net/ipv4/tcp_input.c | kfree_skb(skb, FREE_READ); |
skb | 1337 | net/ipv4/tcp_input.c | if(skb->len) /* We don't care if it's just an ack or |
skb | 1340 | net/ipv4/tcp_input.c | new_seq = skb->seq + skb->len + th->syn; /* Right edge of _data_ part of frame */ |
skb | 1363 | net/ipv4/tcp_input.c | tcp_send_reset(sk->saddr, sk->daddr, skb->h.th, |
skb | 1364 | net/ipv4/tcp_input.c | sk->prot, NULL, skb->dev, sk->ip_tos, sk->ip_ttl); |
skb | 1370 | net/ipv4/tcp_input.c | kfree_skb(skb, FREE_READ); |
skb | 1379 | net/ipv4/tcp_input.c | tcp_queue(skb, sk, th, saddr); |
skb | 1386 | net/ipv4/tcp_input.c | if (!skb->acked) |
skb | 1510 | net/ipv4/tcp_input.c | int tcp_rcv(struct sk_buff *skb, struct device *dev, struct options *opt, |
skb | 1524 | net/ipv4/tcp_input.c | th = skb->h.th; |
skb | 1525 | net/ipv4/tcp_input.c | sk = skb->sk; |
skb | 1528 | net/ipv4/tcp_input.c | if (skb->pkt_type!=PACKET_HOST) |
skb | 1535 | net/ipv4/tcp_input.c | skb_pull(skb, skb->h.raw-skb->data); |
skb | 1540 | net/ipv4/tcp_input.c | switch (skb->ip_summed) |
skb | 1543 | net/ipv4/tcp_input.c | skb->csum = csum_partial((char *)th, len, 0); |
skb | 1545 | net/ipv4/tcp_input.c | if (tcp_check(th, len, saddr, daddr, skb->csum)) |
skb | 1553 | net/ipv4/tcp_input.c | skb->sk = sk; |
skb | 1554 | net/ipv4/tcp_input.c | skb->seq = ntohl(th->seq); |
skb | 1555 | net/ipv4/tcp_input.c | skb->end_seq = skb->seq + th->syn + th->fin + len - th->doff*4; |
skb | 1556 | net/ipv4/tcp_input.c | skb->ack_seq = ntohl(th->ack_seq); |
skb | 1558 | net/ipv4/tcp_input.c | skb->acked = 0; |
skb | 1559 | net/ipv4/tcp_input.c | skb->used = 0; |
skb | 1560 | net/ipv4/tcp_input.c | skb->free = 1; |
skb | 1561 | net/ipv4/tcp_input.c | skb->saddr = daddr; |
skb | 1562 | net/ipv4/tcp_input.c | skb->daddr = saddr; |
skb | 1569 | net/ipv4/tcp_input.c | __skb_queue_tail(&sk->back_log, skb); |
skb | 1597 | net/ipv4/tcp_input.c | skb->sk=sk; |
skb | 1598 | net/ipv4/tcp_input.c | atomic_add(skb->truesize, &sk->rmem_alloc); |
skb | 1632 | net/ipv4/tcp_input.c | kfree_skb(skb, FREE_READ); |
skb | 1640 | net/ipv4/tcp_input.c | tcp_conn_request(sk, skb, daddr, saddr, opt, dev, tcp_init_seq()); |
skb | 1661 | net/ipv4/tcp_input.c | if (sk->state == TCP_SYN_RECV && th->syn && skb->seq+1 == sk->acked_seq) |
skb | 1663 | net/ipv4/tcp_input.c | kfree_skb(skb, FREE_READ); |
skb | 1679 | net/ipv4/tcp_input.c | if(!tcp_ack(sk,th,skb->ack_seq,len)) |
skb | 1686 | net/ipv4/tcp_input.c | kfree_skb(skb, FREE_READ); |
skb | 1690 | net/ipv4/tcp_input.c | return tcp_reset(sk,skb); |
skb | 1698 | net/ipv4/tcp_input.c | kfree_skb(skb, FREE_READ); |
skb | 1706 | net/ipv4/tcp_input.c | sk->acked_seq = skb->seq+1; |
skb | 1707 | net/ipv4/tcp_input.c | sk->lastwin_seq = skb->seq+1; |
skb | 1708 | net/ipv4/tcp_input.c | sk->fin_seq = skb->seq; |
skb | 1737 | net/ipv4/tcp_input.c | return tcp_reset(sk,skb); |
skb | 1747 | net/ipv4/tcp_input.c | kfree_skb(skb, FREE_READ); |
skb | 1770 | net/ipv4/tcp_input.c | after(skb->seq, sk->acked_seq) && !th->rst) |
skb | 1776 | net/ipv4/tcp_input.c | atomic_sub(skb->truesize, &sk->rmem_alloc); |
skb | 1777 | net/ipv4/tcp_input.c | skb->sk = NULL; |
skb | 1785 | net/ipv4/tcp_input.c | skb->sk = sk; |
skb | 1786 | net/ipv4/tcp_input.c | atomic_add(skb->truesize, &sk->rmem_alloc); |
skb | 1787 | net/ipv4/tcp_input.c | tcp_conn_request(sk, skb, daddr, saddr,opt, dev,seq+128000); |
skb | 1790 | net/ipv4/tcp_input.c | kfree_skb(skb, FREE_READ); |
skb | 1802 | net/ipv4/tcp_input.c | if (!tcp_sequence(sk, skb->seq, skb->end_seq-th->syn)) |
skb | 1805 | net/ipv4/tcp_input.c | kfree_skb(skb, FREE_READ); |
skb | 1810 | net/ipv4/tcp_input.c | return tcp_reset(sk,skb); |
skb | 1818 | net/ipv4/tcp_input.c | tcp_send_reset(daddr,saddr,th, &tcp_prot, opt, dev, skb->ip_hdr->tos, 255); |
skb | 1819 | net/ipv4/tcp_input.c | return tcp_reset(sk,skb); |
skb | 1829 | net/ipv4/tcp_input.c | if(th->ack && !tcp_ack(sk,th,skb->ack_seq,len)) |
skb | 1839 | net/ipv4/tcp_input.c | kfree_skb(skb, FREE_READ); |
skb | 1858 | net/ipv4/tcp_input.c | kfree_skb(skb, FREE_READ); |
skb | 1874 | net/ipv4/tcp_input.c | if(tcp_data(skb,sk, saddr, len)) |
skb | 1875 | net/ipv4/tcp_input.c | kfree_skb(skb, FREE_READ); |
skb | 1887 | net/ipv4/tcp_input.c | tcp_send_reset(daddr, saddr, th, &tcp_prot, opt,dev,skb->ip_hdr->tos,255); |
skb | 1893 | net/ipv4/tcp_input.c | skb->sk = NULL; |
skb | 1894 | net/ipv4/tcp_input.c | kfree_skb(skb, FREE_READ); |
skb | 31 | net/ipv4/tcp_output.c | void tcp_send_skb(struct sock *sk, struct sk_buff *skb) |
skb | 34 | net/ipv4/tcp_output.c | struct tcphdr * th = skb->h.th; |
skb | 40 | net/ipv4/tcp_output.c | size = skb->len - ((unsigned char *) th - skb->data); |
skb | 46 | net/ipv4/tcp_output.c | if (size < sizeof(struct tcphdr) || size > skb->len) |
skb | 49 | net/ipv4/tcp_output.c | skb, skb->data, th, skb->len); |
skb | 50 | net/ipv4/tcp_output.c | kfree_skb(skb, FREE_WRITE); |
skb | 65 | net/ipv4/tcp_output.c | kfree_skb(skb,FREE_WRITE); |
skb | 75 | net/ipv4/tcp_output.c | skb->seq = ntohl(th->seq); |
skb | 76 | net/ipv4/tcp_output.c | skb->end_seq = skb->seq + size - 4*th->doff; |
skb | 86 | net/ipv4/tcp_output.c | if (after(skb->end_seq, sk->window_seq) || |
skb | 93 | net/ipv4/tcp_output.c | if (skb->next != NULL) |
skb | 96 | net/ipv4/tcp_output.c | skb_unlink(skb); |
skb | 98 | net/ipv4/tcp_output.c | skb_queue_tail(&sk->write_queue, skb); |
skb | 113 | net/ipv4/tcp_output.c | tcp_send_check(th, sk->saddr, sk->daddr, size, skb); |
skb | 123 | net/ipv4/tcp_output.c | sk->prot->queue_xmit(sk, skb->dev, skb, 0); |
skb | 150 | net/ipv4/tcp_output.c | struct sk_buff * skb; |
skb | 155 | net/ipv4/tcp_output.c | skb = sk->partial; |
skb | 156 | net/ipv4/tcp_output.c | if (skb) { |
skb | 161 | net/ipv4/tcp_output.c | return skb; |
skb | 170 | net/ipv4/tcp_output.c | struct sk_buff *skb; |
skb | 174 | net/ipv4/tcp_output.c | while ((skb = tcp_dequeue_partial(sk)) != NULL) |
skb | 175 | net/ipv4/tcp_output.c | tcp_send_skb(sk, skb); |
skb | 182 | net/ipv4/tcp_output.c | void tcp_enqueue_partial(struct sk_buff * skb, struct sock * sk) |
skb | 192 | net/ipv4/tcp_output.c | sk->partial = skb; |
skb | 214 | net/ipv4/tcp_output.c | struct sk_buff *skb; |
skb | 232 | net/ipv4/tcp_output.c | while((skb = skb_peek(&sk->write_queue)) != NULL && |
skb | 233 | net/ipv4/tcp_output.c | !after(skb->end_seq, sk->window_seq) && |
skb | 236 | net/ipv4/tcp_output.c | !after(skb->end_seq, sk->rcv_ack_seq)) |
skb | 239 | net/ipv4/tcp_output.c | IS_SKB(skb); |
skb | 240 | net/ipv4/tcp_output.c | skb_unlink(skb); |
skb | 246 | net/ipv4/tcp_output.c | if (before(skb->end_seq, sk->rcv_ack_seq +1)) |
skb | 254 | net/ipv4/tcp_output.c | kfree_skb(skb, FREE_WRITE); |
skb | 270 | net/ipv4/tcp_output.c | iph = skb->ip_hdr; |
skb | 272 | net/ipv4/tcp_output.c | size = skb->len - (((unsigned char *) th) - skb->data); |
skb | 284 | net/ipv4/tcp_output.c | tcp_send_check(th, sk->saddr, sk->daddr, size, skb); |
skb | 286 | net/ipv4/tcp_output.c | sk->sent_seq = skb->end_seq; |
skb | 292 | net/ipv4/tcp_output.c | sk->prot->queue_xmit(sk, skb->dev, skb, skb->free); |
skb | 315 | net/ipv4/tcp_output.c | struct sk_buff * skb; |
skb | 322 | net/ipv4/tcp_output.c | skb = sk->send_head; |
skb | 324 | net/ipv4/tcp_output.c | while (skb != NULL) |
skb | 330 | net/ipv4/tcp_output.c | dev = skb->dev; |
skb | 331 | net/ipv4/tcp_output.c | IS_SKB(skb); |
skb | 332 | net/ipv4/tcp_output.c | skb->when = jiffies; |
skb | 344 | net/ipv4/tcp_output.c | if (skb_device_locked(skb)) |
skb | 351 | net/ipv4/tcp_output.c | skb_pull(skb,((unsigned char *)skb->ip_hdr)-skb->data); |
skb | 362 | net/ipv4/tcp_output.c | iph = (struct iphdr *)skb->data; |
skb | 378 | net/ipv4/tcp_output.c | struct options * opt = (struct options*)skb->proto_priv; |
skb | 379 | net/ipv4/tcp_output.c | rt = ip_check_route(&sk->ip_route_cache, opt->srr?opt->faddr:iph->daddr, skb->localroute); |
skb | 391 | net/ipv4/tcp_output.c | if(skb->sk) |
skb | 393 | net/ipv4/tcp_output.c | skb->sk->err_soft=ENETUNREACH; |
skb | 394 | net/ipv4/tcp_output.c | skb->sk->error_report(skb->sk); |
skb | 400 | net/ipv4/tcp_output.c | skb->raddr=rt->rt_gateway; |
skb | 401 | net/ipv4/tcp_output.c | skb->dev=dev; |
skb | 402 | net/ipv4/tcp_output.c | skb->arp=1; |
skb | 405 | net/ipv4/tcp_output.c | memcpy(skb_push(skb,dev->hard_header_len),rt->rt_hh->hh_data,dev->hard_header_len); |
skb | 408 | net/ipv4/tcp_output.c | skb->arp = 0; |
skb | 416 | net/ipv4/tcp_output.c | if(dev->hard_header(skb, dev, ETH_P_IP, NULL, NULL, skb->len)<0) |
skb | 417 | net/ipv4/tcp_output.c | skb->arp=0; |
skb | 435 | net/ipv4/tcp_output.c | tcp_send_check(th, sk->saddr, sk->daddr, size, skb); |
skb | 451 | net/ipv4/tcp_output.c | if (sk && !skb_device_locked(skb)) |
skb | 454 | net/ipv4/tcp_output.c | skb_unlink(skb); |
skb | 457 | net/ipv4/tcp_output.c | dev_queue_xmit(skb, dev, sk->priority); |
skb | 485 | net/ipv4/tcp_output.c | skb = skb->link3; |
skb | 662 | net/ipv4/tcp_output.c | void tcp_send_synack(struct sock * newsk, struct sock * sk, struct sk_buff * skb) |
skb | 675 | net/ipv4/tcp_output.c | kfree_skb(skb, FREE_READ); |
skb | 700 | net/ipv4/tcp_output.c | skb->sk = sk; |
skb | 701 | net/ipv4/tcp_output.c | kfree_skb(skb, FREE_READ); |
skb | 708 | net/ipv4/tcp_output.c | memcpy(t1, skb->h.th, sizeof(*t1)); |
skb | 714 | net/ipv4/tcp_output.c | t1->dest = skb->h.th->source; |
skb | 735 | net/ipv4/tcp_output.c | skb->sk = newsk; |
skb | 741 | net/ipv4/tcp_output.c | atomic_sub(skb->truesize, &sk->rmem_alloc); |
skb | 742 | net/ipv4/tcp_output.c | atomic_add(skb->truesize, &newsk->rmem_alloc); |
skb | 744 | net/ipv4/tcp_output.c | skb_queue_tail(&sk->receive_queue,skb); |
skb | 860 | net/ipv4/tcp_output.c | struct sk_buff *buff,*skb; |
skb | 884 | net/ipv4/tcp_output.c | (skb=skb_peek(&sk->write_queue))) |
skb | 910 | net/ipv4/tcp_output.c | iph = (struct iphdr *)skb->ip_hdr; |
skb | 179 | net/ipv4/tcp_timer.c | struct sk_buff *skb; |
skb | 184 | net/ipv4/tcp_timer.c | skb = sk->send_head; |
skb | 185 | net/ipv4/tcp_timer.c | if (!skb) { |
skb | 196 | net/ipv4/tcp_timer.c | if (jiffies < skb->when + sk->rto) |
skb | 200 | net/ipv4/tcp_timer.c | tcp_reset_xmit_timer (sk, TIME_WRITE, skb->when + sk->rto - jiffies); |
skb | 138 | net/ipv4/udp.c | static int udp_deliver(struct sock *sk, struct udphdr *uh, struct sk_buff *skb, struct device *dev, long saddr, long daddr, int len); |
skb | 465 | net/ipv4/udp.c | struct sk_buff *skb; |
skb | 470 | net/ipv4/udp.c | skb = skb_peek(&sk->receive_queue); |
skb | 471 | net/ipv4/udp.c | if (skb != NULL) { |
skb | 477 | net/ipv4/udp.c | amount = skb->len-sizeof(struct udphdr); |
skb | 504 | net/ipv4/udp.c | struct sk_buff *skb; |
skb | 520 | net/ipv4/udp.c | skb=skb_recv_datagram(sk,flags,noblock,&er); |
skb | 521 | net/ipv4/udp.c | if(skb==NULL) |
skb | 524 | net/ipv4/udp.c | truesize = skb->len - sizeof(struct udphdr); |
skb | 531 | net/ipv4/udp.c | skb_copy_datagram_iovec(skb,sizeof(struct udphdr),msg->msg_iov,copied); |
skb | 532 | net/ipv4/udp.c | sk->stamp=skb->stamp; |
skb | 538 | net/ipv4/udp.c | sin->sin_port = skb->h.uh->source; |
skb | 539 | net/ipv4/udp.c | sin->sin_addr.s_addr = skb->daddr; |
skb | 542 | net/ipv4/udp.c | skb_free_datagram(sk, skb); |
skb | 591 | net/ipv4/udp.c | int udp_rcv(struct sk_buff *skb, struct device *dev, struct options *opt, |
skb | 607 | net/ipv4/udp.c | uh = (struct udphdr *) skb->h.uh; |
skb | 621 | net/ipv4/udp.c | kfree_skb(skb, FREE_WRITE); |
skb | 633 | net/ipv4/udp.c | ( (skb->ip_summed == CHECKSUM_HW) && udp_check(uh, len, saddr, daddr, skb->csum ) ) || |
skb | 634 | net/ipv4/udp.c | ( (skb->ip_summed == CHECKSUM_NONE) && udp_check(uh, len, saddr, daddr,csum_partial((char*)uh, len, 0))) |
skb | 650 | net/ipv4/udp.c | kfree_skb(skb, FREE_WRITE); |
skb | 674 | net/ipv4/udp.c | skb1=skb_clone(skb,GFP_ATOMIC); |
skb | 676 | net/ipv4/udp.c | skb1=skb; |
skb | 684 | net/ipv4/udp.c | kfree_skb(skb, FREE_READ); |
skb | 705 | net/ipv4/udp.c | icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0, dev); |
skb | 711 | net/ipv4/udp.c | skb->sk = NULL; |
skb | 712 | net/ipv4/udp.c | kfree_skb(skb, FREE_WRITE); |
skb | 715 | net/ipv4/udp.c | return udp_deliver(sk,uh,skb,dev, saddr, daddr, len); |
skb | 718 | net/ipv4/udp.c | static int udp_deliver(struct sock *sk, struct udphdr *uh, struct sk_buff *skb, struct device *dev, long saddr, long daddr, int len) |
skb | 720 | net/ipv4/udp.c | skb->sk = sk; |
skb | 721 | net/ipv4/udp.c | skb->dev = dev; |
skb | 722 | net/ipv4/udp.c | skb_trim(skb,len); |
skb | 728 | net/ipv4/udp.c | skb->daddr = saddr; |
skb | 729 | net/ipv4/udp.c | skb->saddr = daddr; |
skb | 739 | net/ipv4/udp.c | if (sock_queue_rcv_skb(sk,skb)<0) |
skb | 744 | net/ipv4/udp.c | skb->sk = NULL; |
skb | 745 | net/ipv4/udp.c | kfree_skb(skb, FREE_WRITE); |
skb | 187 | net/ipx/af_ipx.c | struct sk_buff *skb; |
skb | 190 | net/ipx/af_ipx.c | while((skb=skb_dequeue(&sk->receive_queue))!=NULL) { |
skb | 191 | net/ipx/af_ipx.c | kfree_skb(skb,FREE_READ); |
skb | 360 | net/ipx/af_ipx.c | static int ipxitf_def_skb_handler(struct sock *sock, struct sk_buff *skb) |
skb | 364 | net/ipx/af_ipx.c | if((retval = sock_queue_rcv_skb(sock, skb))<0) |
skb | 370 | net/ipx/af_ipx.c | kfree_skb(skb,FREE_WRITE); |
skb | 381 | net/ipx/af_ipx.c | ipxitf_demux_socket(ipx_interface *intrfc, struct sk_buff *skb, int copy) |
skb | 383 | net/ipx/af_ipx.c | ipx_packet *ipx = (ipx_packet *)(skb->h.raw); |
skb | 403 | net/ipx/af_ipx.c | skb1 = skb_clone(skb, GFP_ATOMIC); |
skb | 415 | net/ipx/af_ipx.c | skb1 = skb; |
skb | 437 | net/ipx/af_ipx.c | kfree_skb(skb, FREE_WRITE); |
skb | 445 | net/ipx/af_ipx.c | ipxitf_demux_socket(ipx_interface *intrfc, struct sk_buff *skb, int copy) |
skb | 447 | net/ipx/af_ipx.c | ipx_packet *ipx = (ipx_packet *)(skb->h.raw); |
skb | 490 | net/ipx/af_ipx.c | kfree_skb(skb,FREE_WRITE); |
skb | 504 | net/ipx/af_ipx.c | skb1 = skb_clone(skb, GFP_ATOMIC); |
skb | 510 | net/ipx/af_ipx.c | skb1 = skb; |
skb | 543 | net/ipx/af_ipx.c | ipxitf_adjust_skbuff(ipx_interface *intrfc, struct sk_buff *skb) |
skb | 546 | net/ipx/af_ipx.c | int in_offset = skb->h.raw - skb->head; |
skb | 552 | net/ipx/af_ipx.c | skb->arp = skb->free = 1; |
skb | 553 | net/ipx/af_ipx.c | return skb; |
skb | 557 | net/ipx/af_ipx.c | len = skb->len + out_offset; |
skb | 561 | net/ipx/af_ipx.c | skb2->h.raw=skb_put(skb2,skb->len); |
skb | 564 | net/ipx/af_ipx.c | memcpy(skb2->h.raw, skb->h.raw, skb->len); |
skb | 566 | net/ipx/af_ipx.c | kfree_skb(skb, FREE_WRITE); |
skb | 570 | net/ipx/af_ipx.c | static int ipxitf_send(ipx_interface *intrfc, struct sk_buff *skb, char *node) |
skb | 572 | net/ipx/af_ipx.c | ipx_packet *ipx = (ipx_packet *)(skb->h.raw); |
skb | 604 | net/ipx/af_ipx.c | if(skb->sk) |
skb | 606 | net/ipx/af_ipx.c | atomic_sub(skb->truesize, &skb->sk->wmem_alloc); |
skb | 607 | net/ipx/af_ipx.c | skb->sk=NULL; |
skb | 612 | net/ipx/af_ipx.c | return ipxitf_demux_socket(intrfc, skb, 0); |
skb | 619 | net/ipx/af_ipx.c | if (!send_to_wire && skb->sk) |
skb | 621 | net/ipx/af_ipx.c | atomic_sub(skb->truesize, &skb->sk->wmem_alloc); |
skb | 622 | net/ipx/af_ipx.c | skb->sk=NULL; |
skb | 624 | net/ipx/af_ipx.c | ipxitf_demux_socket(intrfc, skb, send_to_wire); |
skb | 652 | net/ipx/af_ipx.c | kfree_skb(skb,FREE_WRITE); |
skb | 670 | net/ipx/af_ipx.c | skb = ipxitf_adjust_skbuff(intrfc, skb); |
skb | 671 | net/ipx/af_ipx.c | if (skb == NULL) |
skb | 675 | net/ipx/af_ipx.c | skb->dev = dev; |
skb | 676 | net/ipx/af_ipx.c | skb->protocol = htons(ETH_P_IPX); |
skb | 677 | net/ipx/af_ipx.c | dl->datalink_header(dl, skb, dest_node); |
skb | 683 | net/ipx/af_ipx.c | dump_pkt("IPX snd:", (ipx_packet *)skb->h.raw); |
skb | 684 | net/ipx/af_ipx.c | dump_data("ETH hdr:", skb->data, skb->h.raw - skb->data); |
skb | 691 | net/ipx/af_ipx.c | dev_queue_xmit(skb, dev, SOPRI_NORMAL); |
skb | 706 | net/ipx/af_ipx.c | static int ipxitf_rcv(ipx_interface *intrfc, struct sk_buff *skb) |
skb | 708 | net/ipx/af_ipx.c | ipx_packet *ipx = (ipx_packet *) (skb->h.raw); |
skb | 716 | net/ipx/af_ipx.c | if (call_in_firewall(PF_IPX, skb, ipx)!=FW_ACCEPT) |
skb | 718 | net/ipx/af_ipx.c | kfree_skb(skb, FREE_READ); |
skb | 760 | net/ipx/af_ipx.c | if (call_fw_firewall(PF_IPX, skb, ipx)!=FW_ACCEPT) |
skb | 762 | net/ipx/af_ipx.c | kfree_skb(skb, FREE_READ); |
skb | 767 | net/ipx/af_ipx.c | if ((skb->pkt_type != PACKET_BROADCAST) && |
skb | 768 | net/ipx/af_ipx.c | (skb->pkt_type != PACKET_MULTICAST)) |
skb | 769 | net/ipx/af_ipx.c | return ipxrtr_route_skb(skb); |
skb | 771 | net/ipx/af_ipx.c | kfree_skb(skb,FREE_READ); |
skb | 779 | net/ipx/af_ipx.c | return ipxitf_demux_socket(intrfc, skb, 0); |
skb | 783 | net/ipx/af_ipx.c | kfree_skb(skb,FREE_READ); |
skb | 1172 | net/ipx/af_ipx.c | struct sk_buff *skb; |
skb | 1199 | net/ipx/af_ipx.c | skb=sock_alloc_send_skb(sk, size, 0, 0, &err); |
skb | 1200 | net/ipx/af_ipx.c | if(skb==NULL) |
skb | 1203 | net/ipx/af_ipx.c | skb_reserve(skb,ipx_offset); |
skb | 1204 | net/ipx/af_ipx.c | skb->free=1; |
skb | 1205 | net/ipx/af_ipx.c | skb->arp=1; |
skb | 1206 | net/ipx/af_ipx.c | skb->sk=sk; |
skb | 1209 | net/ipx/af_ipx.c | ipx=(ipx_packet *)skb_put(skb,sizeof(ipx_packet)); |
skb | 1214 | net/ipx/af_ipx.c | skb->h.raw = (unsigned char *)ipx; |
skb | 1237 | net/ipx/af_ipx.c | memcpy_fromiovec(skb_put(skb,len),iov,len); |
skb | 1240 | net/ipx/af_ipx.c | if(call_out_firewall(PF_IPX, skb, ipx)!=FW_ACCEPT) |
skb | 1242 | net/ipx/af_ipx.c | kfree_skb(skb, FREE_WRITE); |
skb | 1247 | net/ipx/af_ipx.c | return ipxitf_send(intrfc, skb, (rt && rt->ir_routed) ? |
skb | 1252 | net/ipx/af_ipx.c | ipxrtr_route_skb(struct sk_buff *skb) |
skb | 1254 | net/ipx/af_ipx.c | ipx_packet *ipx = (ipx_packet *) (skb->h.raw); |
skb | 1261 | net/ipx/af_ipx.c | kfree_skb(skb,FREE_READ); |
skb | 1265 | net/ipx/af_ipx.c | (void)ipxitf_send(i, skb, (r->ir_routed) ? |
skb | 1950 | net/ipx/af_ipx.c | int ipx_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *pt) |
skb | 1957 | net/ipx/af_ipx.c | ipx=(ipx_packet *)skb->h.raw; |
skb | 1963 | net/ipx/af_ipx.c | kfree_skb(skb,FREE_READ); |
skb | 1969 | net/ipx/af_ipx.c | kfree_skb(skb,FREE_READ); |
skb | 1982 | net/ipx/af_ipx.c | kfree_skb(skb,FREE_READ); |
skb | 1987 | net/ipx/af_ipx.c | return ipxitf_rcv(intrfc, skb); |
skb | 2051 | net/ipx/af_ipx.c | struct sk_buff *skb; |
skb | 2061 | net/ipx/af_ipx.c | skb=skb_recv_datagram(sk,flags,noblock,&er); |
skb | 2062 | net/ipx/af_ipx.c | if(skb==NULL) |
skb | 2068 | net/ipx/af_ipx.c | ipx = (ipx_packet *)(skb->h.raw); |
skb | 2071 | net/ipx/af_ipx.c | skb_copy_datagram_iovec(skb,sizeof(struct ipx_packet),msg->msg_iov,copied); |
skb | 2081 | net/ipx/af_ipx.c | skb_free_datagram(sk, skb); |
skb | 2116 | net/ipx/af_ipx.c | struct sk_buff *skb; |
skb | 2118 | net/ipx/af_ipx.c | if((skb=skb_peek(&sk->receive_queue))!=NULL) |
skb | 2119 | net/ipx/af_ipx.c | amount=skb->len-sizeof(struct ipx_packet); |
skb | 33 | net/netlink.c | static int (*netlink_handler[MAX_LINKS])(struct sk_buff *skb); |
skb | 49 | net/netlink.c | static int netlink_err(struct sk_buff *skb) |
skb | 51 | net/netlink.c | kfree_skb(skb, FREE_READ); |
skb | 60 | net/netlink.c | int netlink_donothing(struct sk_buff *skb) |
skb | 62 | net/netlink.c | kfree_skb(skb, FREE_READ); |
skb | 73 | net/netlink.c | struct sk_buff *skb; |
skb | 74 | net/netlink.c | skb=alloc_skb(count, GFP_KERNEL); |
skb | 75 | net/netlink.c | skb->free=1; |
skb | 76 | net/netlink.c | memcpy_fromfs(skb_put(skb,count),buf, count); |
skb | 77 | net/netlink.c | return (netlink_handler[minor])(skb); |
skb | 87 | net/netlink.c | struct sk_buff *skb; |
skb | 89 | net/netlink.c | while((skb=skb_dequeue(&skb_queue_rd[minor]))==NULL) |
skb | 103 | net/netlink.c | rdq_size[minor]-=skb->len; |
skb | 105 | net/netlink.c | if(skb->len<count) |
skb | 106 | net/netlink.c | count=skb->len; |
skb | 107 | net/netlink.c | memcpy_tofs(buf,skb->data,count); |
skb | 108 | net/netlink.c | kfree_skb(skb, FREE_READ); |
skb | 177 | net/netlink.c | int netlink_attach(int unit, int (*function)(struct sk_buff *skb)) |
skb | 194 | net/netlink.c | int netlink_post(int unit, struct sk_buff *skb) |
skb | 202 | net/netlink.c | if(rdq_size[unit]+skb->len>MAX_QBYTES) |
skb | 206 | net/netlink.c | skb_queue_tail(&skb_queue_rd[unit], skb); |
skb | 207 | net/netlink.c | rdq_size[unit]+=skb->len; |
skb | 240 | net/netrom/af_netrom.c | struct sk_buff *skb; |
skb | 251 | net/netrom/af_netrom.c | while ((skb = skb_dequeue(&sk->receive_queue)) != NULL) { |
skb | 252 | net/netrom/af_netrom.c | if (skb->sk != sk) { /* A pending connection */ |
skb | 253 | net/netrom/af_netrom.c | skb->sk->dead = 1; /* Queue the unaccepted socket for death */ |
skb | 254 | net/netrom/af_netrom.c | nr_set_timer(skb->sk); |
skb | 255 | net/netrom/af_netrom.c | skb->sk->nr->state = NR_STATE_0; |
skb | 258 | net/netrom/af_netrom.c | kfree_skb(skb, FREE_READ); |
skb | 814 | net/netrom/af_netrom.c | struct sk_buff *skb; |
skb | 835 | net/netrom/af_netrom.c | if ((skb = skb_dequeue(&sk->receive_queue)) == NULL) { |
skb | 846 | net/netrom/af_netrom.c | } while (skb == NULL); |
skb | 848 | net/netrom/af_netrom.c | newsk = skb->sk; |
skb | 853 | net/netrom/af_netrom.c | skb->sk = NULL; |
skb | 854 | net/netrom/af_netrom.c | kfree_skb(skb, FREE_READ); |
skb | 887 | net/netrom/af_netrom.c | int nr_rx_frame(struct sk_buff *skb, struct device *dev) |
skb | 896 | net/netrom/af_netrom.c | skb->sk = NULL; /* Initially we don't know who its for */ |
skb | 902 | net/netrom/af_netrom.c | src = (ax25_address *)(skb->data + 0); |
skb | 903 | net/netrom/af_netrom.c | dest = (ax25_address *)(skb->data + 7); |
skb | 905 | net/netrom/af_netrom.c | circuit_index = skb->data[15]; |
skb | 906 | net/netrom/af_netrom.c | circuit_id = skb->data[16]; |
skb | 907 | net/netrom/af_netrom.c | frametype = skb->data[19]; |
skb | 914 | net/netrom/af_netrom.c | skb_pull(skb, NR_NETWORK_LEN + NR_TRANSPORT_LEN); |
skb | 915 | net/netrom/af_netrom.c | skb->h.raw = skb->data; |
skb | 917 | net/netrom/af_netrom.c | return nr_rx_ip(skb, dev); |
skb | 927 | net/netrom/af_netrom.c | skb->h.raw = skb->data; |
skb | 929 | net/netrom/af_netrom.c | if ((frametype & 0x0F) == NR_CONNACK && skb->len == 22) |
skb | 934 | net/netrom/af_netrom.c | return nr_process_rx_frame(sk, skb); |
skb | 942 | net/netrom/af_netrom.c | user = (ax25_address *)(skb->data + 21); |
skb | 945 | net/netrom/af_netrom.c | nr_transmit_dm(skb); |
skb | 949 | net/netrom/af_netrom.c | window = skb->data[20]; |
skb | 951 | net/netrom/af_netrom.c | skb->sk = make; |
skb | 972 | net/netrom/af_netrom.c | if (skb->len == 37) { |
skb | 973 | net/netrom/af_netrom.c | timeout = skb->data[36] * 256 + skb->data[35]; |
skb | 994 | net/netrom/af_netrom.c | skb_queue_head(&sk->receive_queue, skb); |
skb | 999 | net/netrom/af_netrom.c | sk->data_ready(sk, skb->len); |
skb | 1010 | net/netrom/af_netrom.c | struct sk_buff *skb; |
skb | 1050 | net/netrom/af_netrom.c | if ((skb = sock_alloc_send_skb(sk, size, 0, 0, &err)) == NULL) |
skb | 1053 | net/netrom/af_netrom.c | skb->sk = sk; |
skb | 1054 | net/netrom/af_netrom.c | skb->free = 1; |
skb | 1055 | net/netrom/af_netrom.c | skb->arp = 1; |
skb | 1057 | net/netrom/af_netrom.c | skb_reserve(skb, size - len); |
skb | 1063 | net/netrom/af_netrom.c | asmptr = skb_push(skb, NR_TRANSPORT_LEN); |
skb | 1083 | net/netrom/af_netrom.c | skb->h.raw = skb_put(skb, len); |
skb | 1085 | net/netrom/af_netrom.c | asmptr = skb->h.raw; |
skb | 1097 | net/netrom/af_netrom.c | kfree_skb(skb, FREE_WRITE); |
skb | 1101 | net/netrom/af_netrom.c | nr_output(sk, skb); /* Shove it onto the queue */ |
skb | 1113 | net/netrom/af_netrom.c | struct sk_buff *skb; |
skb | 1130 | net/netrom/af_netrom.c | if ((skb = skb_recv_datagram(sk, flags, noblock, &er)) == NULL) |
skb | 1134 | net/netrom/af_netrom.c | skb_pull(skb, NR_NETWORK_LEN + NR_TRANSPORT_LEN); |
skb | 1135 | net/netrom/af_netrom.c | skb->h.raw = skb->data; |
skb | 1138 | net/netrom/af_netrom.c | copied = (size < skb->len) ? size : skb->len; |
skb | 1139 | net/netrom/af_netrom.c | skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); |
skb | 1145 | net/netrom/af_netrom.c | memcpy(&addr.sax25_call, skb->data + 7, AX25_ADDR_LEN); |
skb | 1152 | net/netrom/af_netrom.c | skb_free_datagram(sk, skb); |
skb | 1186 | net/netrom/af_netrom.c | struct sk_buff *skb; |
skb | 1188 | net/netrom/af_netrom.c | if ((skb = skb_peek(&sk->receive_queue)) != NULL) |
skb | 1189 | net/netrom/af_netrom.c | amount = skb->len - 20; |
skb | 54 | net/netrom/nr_dev.c | int nr_rx_ip(struct sk_buff *skb, struct device *dev) |
skb | 64 | net/netrom/nr_dev.c | skb->protocol = htons(ETH_P_IP); |
skb | 67 | net/netrom/nr_dev.c | skb->dev = dev; |
skb | 69 | net/netrom/nr_dev.c | skb->h.raw = skb->data; |
skb | 70 | net/netrom/nr_dev.c | ip_rcv(skb, skb->dev, NULL); |
skb | 75 | net/netrom/nr_dev.c | static int nr_header(struct sk_buff *skb, struct device *dev, unsigned short type, |
skb | 78 | net/netrom/nr_dev.c | unsigned char *buff = skb_push(skb, NR_NETWORK_LEN + NR_TRANSPORT_LEN); |
skb | 108 | net/netrom/nr_dev.c | unsigned long raddr, struct sk_buff *skb) |
skb | 113 | net/netrom/nr_dev.c | skb_device_unlock(skb); |
skb | 116 | net/netrom/nr_dev.c | skb->free = 1; |
skb | 117 | net/netrom/nr_dev.c | kfree_skb(skb, FREE_WRITE); |
skb | 130 | net/netrom/nr_dev.c | if (!nr_route_frame(skb, NULL)) { |
skb | 131 | net/netrom/nr_dev.c | skb->free = 1; |
skb | 132 | net/netrom/nr_dev.c | kfree_skb(skb, FREE_WRITE); |
skb | 165 | net/netrom/nr_dev.c | static int nr_xmit(struct sk_buff *skb, struct device *dev) |
skb | 169 | net/netrom/nr_dev.c | if (skb == NULL || dev == NULL) |
skb | 189 | net/netrom/nr_dev.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 53 | net/netrom/nr_in.c | static int nr_queue_rx_frame(struct sock *sk, struct sk_buff *skb, int more) |
skb | 55 | net/netrom/nr_in.c | struct sk_buff *skbo, *skbn = skb; |
skb | 58 | net/netrom/nr_in.c | sk->nr->fraglen += skb->len; |
skb | 59 | net/netrom/nr_in.c | skb_queue_tail(&sk->nr->frag_queue, skb); |
skb | 64 | net/netrom/nr_in.c | sk->nr->fraglen += skb->len; |
skb | 65 | net/netrom/nr_in.c | skb_queue_tail(&sk->nr->frag_queue, skb); |
skb | 97 | net/netrom/nr_in.c | static int nr_state1_machine(struct sock *sk, struct sk_buff *skb, int frametype) |
skb | 103 | net/netrom/nr_in.c | sk->window = skb->data[20]; |
skb | 104 | net/netrom/nr_in.c | sk->nr->your_index = skb->data[17]; |
skb | 105 | net/netrom/nr_in.c | sk->nr->your_id = skb->data[18]; |
skb | 143 | net/netrom/nr_in.c | static int nr_state2_machine(struct sock *sk, struct sk_buff *skb, int frametype) |
skb | 171 | net/netrom/nr_in.c | static int nr_state3_machine(struct sock *sk, struct sk_buff *skb, int frametype) |
skb | 179 | net/netrom/nr_in.c | nr = skb->data[18]; |
skb | 180 | net/netrom/nr_in.c | ns = skb->data[17]; |
skb | 263 | net/netrom/nr_in.c | skb_queue_head(&sk->nr->reseq_queue, skb); |
skb | 310 | net/netrom/nr_in.c | int nr_process_rx_frame(struct sock *sk, struct sk_buff *skb) |
skb | 325 | net/netrom/nr_in.c | frametype = skb->data[19]; |
skb | 330 | net/netrom/nr_in.c | queued = nr_state1_machine(sk, skb, frametype); |
skb | 333 | net/netrom/nr_in.c | queued = nr_state2_machine(sk, skb, frametype); |
skb | 336 | net/netrom/nr_in.c | queued = nr_state3_machine(sk, skb, frametype); |
skb | 49 | net/netrom/nr_out.c | void nr_output(struct sock *sk, struct sk_buff *skb) |
skb | 57 | net/netrom/nr_out.c | if (skb->len - NR_TRANSPORT_LEN > mtu) { |
skb | 59 | net/netrom/nr_out.c | memcpy(transport, skb->data, NR_TRANSPORT_LEN); |
skb | 60 | net/netrom/nr_out.c | skb_pull(skb, NR_TRANSPORT_LEN); |
skb | 62 | net/netrom/nr_out.c | frontlen = skb_headroom(skb); |
skb | 64 | net/netrom/nr_out.c | while (skb->len > 0) { |
skb | 74 | net/netrom/nr_out.c | len = (mtu > skb->len) ? skb->len : mtu; |
skb | 77 | net/netrom/nr_out.c | memcpy(skb_put(skbn, len), skb->data, len); |
skb | 78 | net/netrom/nr_out.c | skb_pull(skb, len); |
skb | 84 | net/netrom/nr_out.c | if (skb->len > 0) |
skb | 90 | net/netrom/nr_out.c | skb->free = 1; |
skb | 91 | net/netrom/nr_out.c | kfree_skb(skb, FREE_WRITE); |
skb | 93 | net/netrom/nr_out.c | skb_queue_tail(&sk->write_queue, skb); /* Throw it on the queue */ |
skb | 104 | net/netrom/nr_out.c | static void nr_send_iframe(struct sock *sk, struct sk_buff *skb) |
skb | 106 | net/netrom/nr_out.c | if (skb == NULL) |
skb | 109 | net/netrom/nr_out.c | skb->data[2] = sk->nr->vs; |
skb | 110 | net/netrom/nr_out.c | skb->data[3] = sk->nr->vr; |
skb | 113 | net/netrom/nr_out.c | skb->data[4] |= NR_CHOKE_FLAG; |
skb | 115 | net/netrom/nr_out.c | nr_transmit_buffer(sk, skb); |
skb | 120 | net/netrom/nr_out.c | struct sk_buff *skb, *skbn; |
skb | 122 | net/netrom/nr_out.c | if ((skb = skb_peek(&sk->nr->ack_queue)) == NULL) |
skb | 125 | net/netrom/nr_out.c | if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) |
skb | 143 | net/netrom/nr_out.c | struct sk_buff *skb, *skbn; |
skb | 166 | net/netrom/nr_out.c | skb = skb_dequeue(&sk->write_queue); |
skb | 169 | net/netrom/nr_out.c | if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) { |
skb | 170 | net/netrom/nr_out.c | skb_queue_head(&sk->write_queue, skb); |
skb | 187 | net/netrom/nr_out.c | skb_queue_tail(&sk->nr->ack_queue, skb); |
skb | 189 | net/netrom/nr_out.c | } while (!last && (skb = skb_dequeue(&sk->write_queue)) != NULL); |
skb | 202 | net/netrom/nr_out.c | void nr_transmit_buffer(struct sock *sk, struct sk_buff *skb) |
skb | 209 | net/netrom/nr_out.c | dptr = skb_push(skb, NR_NETWORK_LEN); |
skb | 225 | net/netrom/nr_out.c | skb->arp = 1; |
skb | 227 | net/netrom/nr_out.c | if (!nr_route_frame(skb, NULL)) { |
skb | 228 | net/netrom/nr_out.c | kfree_skb(skb, FREE_WRITE); |
skb | 649 | net/netrom/nr_route.c | int nr_route_frame(struct sk_buff *skb, ax25_cb *ax25) |
skb | 659 | net/netrom/nr_route.c | if(ax25 && call_in_firewall(PF_NETROM, skb, skb->data)!=FW_ACCEPT) |
skb | 661 | net/netrom/nr_route.c | if(!ax25 && call_out_firewall(PF_NETROM, skb, skb->data)!=FW_ACCEPT) |
skb | 664 | net/netrom/nr_route.c | nr_src = (ax25_address *)(skb->data + 0); |
skb | 665 | net/netrom/nr_route.c | nr_dest = (ax25_address *)(skb->data + 7); |
skb | 671 | net/netrom/nr_route.c | return nr_rx_frame(skb, dev); |
skb | 677 | net/netrom/nr_route.c | if (--skb->data[14] == 0) |
skb | 698 | net/netrom/nr_route.c | if(ax25 && call_fw_firewall(PF_NETROM, skb, skb->data)!=FW_ACCEPT) |
skb | 702 | net/netrom/nr_route.c | dptr = skb_push(skb, 1); |
skb | 705 | net/netrom/nr_route.c | ax25_send_frame(skb, (ax25_address *)dev->dev_addr, &nr_neigh->callsign, nr_neigh->digipeat, nr_neigh->dev); |
skb | 49 | net/netrom/nr_subr.c | struct sk_buff *skb; |
skb | 51 | net/netrom/nr_subr.c | while ((skb = skb_dequeue(&sk->write_queue)) != NULL) { |
skb | 52 | net/netrom/nr_subr.c | skb->sk = sk; |
skb | 53 | net/netrom/nr_subr.c | skb->free = 1; |
skb | 54 | net/netrom/nr_subr.c | kfree_skb(skb, FREE_WRITE); |
skb | 57 | net/netrom/nr_subr.c | while ((skb = skb_dequeue(&sk->nr->ack_queue)) != NULL) { |
skb | 58 | net/netrom/nr_subr.c | skb->sk = sk; |
skb | 59 | net/netrom/nr_subr.c | skb->free = 1; |
skb | 60 | net/netrom/nr_subr.c | kfree_skb(skb, FREE_WRITE); |
skb | 63 | net/netrom/nr_subr.c | while ((skb = skb_dequeue(&sk->nr->reseq_queue)) != NULL) { |
skb | 64 | net/netrom/nr_subr.c | kfree_skb(skb, FREE_READ); |
skb | 67 | net/netrom/nr_subr.c | while ((skb = skb_dequeue(&sk->nr->frag_queue)) != NULL) { |
skb | 68 | net/netrom/nr_subr.c | kfree_skb(skb, FREE_READ); |
skb | 79 | net/netrom/nr_subr.c | struct sk_buff *skb; |
skb | 86 | net/netrom/nr_subr.c | skb = skb_dequeue(&sk->nr->ack_queue); |
skb | 87 | net/netrom/nr_subr.c | skb->sk = sk; |
skb | 88 | net/netrom/nr_subr.c | skb->free = 1; |
skb | 89 | net/netrom/nr_subr.c | kfree_skb(skb, FREE_WRITE); |
skb | 102 | net/netrom/nr_subr.c | struct sk_buff *skb, *skb_prev = NULL; |
skb | 104 | net/netrom/nr_subr.c | while ((skb = skb_dequeue(&sk->nr->ack_queue)) != NULL) { |
skb | 106 | net/netrom/nr_subr.c | skb_queue_head(&sk->write_queue, skb); |
skb | 108 | net/netrom/nr_subr.c | skb_append(skb_prev, skb); |
skb | 109 | net/netrom/nr_subr.c | skb_prev = skb; |
skb | 153 | net/netrom/nr_subr.c | struct sk_buff *skb; |
skb | 175 | net/netrom/nr_subr.c | if ((skb = alloc_skb(len, GFP_ATOMIC)) == NULL) |
skb | 181 | net/netrom/nr_subr.c | skb_reserve(skb, AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + NR_NETWORK_LEN); |
skb | 183 | net/netrom/nr_subr.c | dptr = skb_put(skb, skb_tailroom(skb)); |
skb | 237 | net/netrom/nr_subr.c | skb->free = 1; |
skb | 239 | net/netrom/nr_subr.c | nr_transmit_buffer(sk, skb); |
skb | 246 | net/netrom/nr_subr.c | void nr_transmit_dm(struct sk_buff *skb) |
skb | 261 | net/netrom/nr_subr.c | memcpy(dptr, skb->data + 7, AX25_ADDR_LEN); |
skb | 267 | net/netrom/nr_subr.c | memcpy(dptr, skb->data + 0, AX25_ADDR_LEN); |
skb | 275 | net/netrom/nr_subr.c | *dptr++ = skb->data[15]; |
skb | 276 | net/netrom/nr_subr.c | *dptr++ = skb->data[16]; |
skb | 174 | net/unix/af_unix.c | struct sk_buff *skb; |
skb | 178 | net/unix/af_unix.c | while((skb=skb_dequeue(&sk->receive_queue))!=NULL) |
skb | 182 | net/unix/af_unix.c | unix_socket *osk=skb->sk; |
skb | 184 | net/unix/af_unix.c | kfree_skb(skb, FREE_WRITE); /* Now surplus - free the skb first before the socket */ |
skb | 191 | net/unix/af_unix.c | kfree_skb(skb,FREE_WRITE); |
skb | 463 | net/unix/af_unix.c | struct sk_buff *skb; |
skb | 519 | net/unix/af_unix.c | skb=sock_alloc_send_skb(sk, 0, 0, 0, &err); /* Marker object */ |
skb | 520 | net/unix/af_unix.c | if(skb==NULL) |
skb | 522 | net/unix/af_unix.c | skb->sk=sk; /* So they know it is us */ |
skb | 523 | net/unix/af_unix.c | skb->free=1; |
skb | 529 | net/unix/af_unix.c | kfree_skb(skb, FREE_WRITE); |
skb | 534 | net/unix/af_unix.c | kfree_skb(skb, FREE_WRITE); |
skb | 540 | net/unix/af_unix.c | skb_queue_tail(&other->receive_queue,skb); |
skb | 591 | net/unix/af_unix.c | unix_socket *ska,*skb; |
skb | 594 | net/unix/af_unix.c | skb=b->data; |
skb | 598 | net/unix/af_unix.c | skb->protinfo.af_unix.locks++; |
skb | 599 | net/unix/af_unix.c | ska->protinfo.af_unix.other=skb; |
skb | 600 | net/unix/af_unix.c | skb->protinfo.af_unix.other=ska; |
skb | 602 | net/unix/af_unix.c | skb->state=TCP_ESTABLISHED; |
skb | 610 | net/unix/af_unix.c | struct sk_buff *skb; |
skb | 633 | net/unix/af_unix.c | skb=skb_dequeue(&sk->receive_queue); |
skb | 634 | net/unix/af_unix.c | if(skb==NULL) |
skb | 650 | net/unix/af_unix.c | while(skb==NULL); |
skb | 651 | net/unix/af_unix.c | tsk=skb->sk; |
skb | 652 | net/unix/af_unix.c | kfree_skb(skb, FREE_WRITE); /* The buffer is just used as a tag */ |
skb | 804 | net/unix/af_unix.c | static void unix_detach_fds(struct sk_buff *skb, struct cmsghdr *cmsg) |
skb | 826 | net/unix/af_unix.c | memcpy(&fdnum,skb->h.filp,sizeof(int)); |
skb | 827 | net/unix/af_unix.c | fp=(struct file **)(skb->h.filp+sizeof(int)); |
skb | 858 | net/unix/af_unix.c | kfree(skb->h.filp); |
skb | 859 | net/unix/af_unix.c | skb->h.filp=NULL; |
skb | 862 | net/unix/af_unix.c | skb->destructor = NULL; |
skb | 865 | net/unix/af_unix.c | static void unix_destruct_fds(struct sk_buff *skb) |
skb | 867 | net/unix/af_unix.c | unix_detach_fds(skb,NULL); |
skb | 873 | net/unix/af_unix.c | static void unix_attach_fds(int fpnum,struct file **fp,struct sk_buff *skb) |
skb | 876 | net/unix/af_unix.c | skb->h.filp=kmalloc(sizeof(int)+fpnum*sizeof(struct file *), |
skb | 879 | net/unix/af_unix.c | memcpy(skb->h.filp,&fpnum,sizeof(int)); |
skb | 881 | net/unix/af_unix.c | memcpy(skb->h.filp+sizeof(int),fp,fpnum*sizeof(struct file *)); |
skb | 882 | net/unix/af_unix.c | skb->destructor = unix_destruct_fds; |
skb | 895 | net/unix/af_unix.c | struct sk_buff *skb; |
skb | 986 | net/unix/af_unix.c | skb=sock_alloc_send_skb(sk,size,limit,nonblock, &err); |
skb | 988 | net/unix/af_unix.c | if(skb==NULL) |
skb | 998 | net/unix/af_unix.c | size=skb_tailroom(skb); /* If we dropped back on a limit then our skb is smaller */ |
skb | 1000 | net/unix/af_unix.c | skb->sk=sk; |
skb | 1001 | net/unix/af_unix.c | skb->free=1; |
skb | 1005 | net/unix/af_unix.c | unix_attach_fds(fpnum,fp,skb); |
skb | 1009 | net/unix/af_unix.c | skb->h.filp=NULL; |
skb | 1011 | net/unix/af_unix.c | memcpy_fromiovec(skb_put(skb,size),msg->msg_iov, size); |
skb | 1023 | net/unix/af_unix.c | kfree_skb(skb, FREE_WRITE); |
skb | 1037 | net/unix/af_unix.c | kfree_skb(skb, FREE_WRITE); |
skb | 1044 | net/unix/af_unix.c | skb_queue_tail(&other->receive_queue, skb); |
skb | 1072 | net/unix/af_unix.c | struct sk_buff *skb; |
skb | 1124 | net/unix/af_unix.c | skb=skb_dequeue(&sk->receive_queue); |
skb | 1125 | net/unix/af_unix.c | if(skb==NULL) |
skb | 1143 | net/unix/af_unix.c | if(skb->sk->protinfo.af_unix.name) |
skb | 1145 | net/unix/af_unix.c | memcpy(sunaddr->sun_path, skb->sk->protinfo.af_unix.name, 108); |
skb | 1154 | net/unix/af_unix.c | num=min(skb->len,len-done); |
skb | 1155 | net/unix/af_unix.c | memcpy_tofs(sp, skb->data, num); |
skb | 1157 | net/unix/af_unix.c | if (skb->h.filp!=NULL) |
skb | 1158 | net/unix/af_unix.c | unix_detach_fds(skb,cm); |
skb | 1164 | net/unix/af_unix.c | skb_pull(skb, num); |
skb | 1166 | net/unix/af_unix.c | if (skb->len) { |
skb | 1167 | net/unix/af_unix.c | skb_queue_head(&sk->receive_queue, skb); |
skb | 1170 | net/unix/af_unix.c | kfree_skb(skb, FREE_WRITE); |
skb | 1236 | net/unix/af_unix.c | struct sk_buff *skb; |
skb | 1240 | net/unix/af_unix.c | if((skb=skb_peek(&sk->receive_queue))!=NULL) |
skb | 1241 | net/unix/af_unix.c | amount=skb->len; |
skb | 196 | net/unix/garbage.c | struct sk_buff *skb; |
skb | 198 | net/unix/garbage.c | skb=skb_peek(&x->receive_queue); |
skb | 204 | net/unix/garbage.c | while(skb && skb != (struct sk_buff *)&x->receive_queue) |
skb | 209 | net/unix/garbage.c | if(skb->h.filp) |
skb | 214 | net/unix/garbage.c | int nfd=*(int *)skb->h.filp; |
skb | 215 | net/unix/garbage.c | struct file **fp=(struct file **)(skb->h.filp+sizeof(int)); |
skb | 235 | net/unix/garbage.c | skb=skb->next; |