tag | line | file | source code |
skb | 123 | drivers/net/3c501.c | static int el_start_xmit(struct sk_buff *skb, struct device *dev); |
skb | 333 | drivers/net/3c501.c | el_start_xmit(struct sk_buff *skb, struct device *dev) |
skb | 357 | drivers/net/3c501.c | if (skb == NULL) { |
skb | 374 | drivers/net/3c501.c | int gp_start = 0x800 - (ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN); |
skb | 375 | drivers/net/3c501.c | unsigned char *buf = skb->data; |
skb | 398 | drivers/net/3c501.c | outsb(DATAPORT,buf,skb->len); /* load buffer (usual thing each byte increments the pointer) */ |
skb | 412 | drivers/net/3c501.c | dev_kfree_skb (skb, FREE_WRITE); |
skb | 555 | drivers/net/3c501.c | struct sk_buff *skb; |
skb | 575 | drivers/net/3c501.c | skb = dev_alloc_skb(pkt_len+2); |
skb | 580 | drivers/net/3c501.c | if (skb == NULL) { |
skb | 585 | drivers/net/3c501.c | skb_reserve(skb,2); /* Force 16 byte alignment */ |
skb | 586 | drivers/net/3c501.c | skb->dev = dev; |
skb | 594 | drivers/net/3c501.c | insb(DATAPORT, skb_put(skb,pkt_len), pkt_len); |
skb | 595 | drivers/net/3c501.c | skb->protocol=eth_type_trans(skb,dev); |
skb | 596 | drivers/net/3c501.c | netif_rx(skb); |
skb | 553 | drivers/net/3c505.c | struct sk_buff *skb; |
skb | 568 | drivers/net/3c505.c | skb = dev_alloc_skb(rlen+2); |
skb | 579 | drivers/net/3c505.c | if (skb == NULL) { |
skb | 595 | drivers/net/3c505.c | skb_reserve(skb,2); /* 16 byte alignment */ |
skb | 596 | drivers/net/3c505.c | skb->dev = dev; |
skb | 601 | drivers/net/3c505.c | ptr = (unsigned short *)skb_put(skb,len); |
skb | 610 | drivers/net/3c505.c | kfree_skb(skb, FREE_WRITE); |
skb | 619 | drivers/net/3c505.c | skb->protocol=eth_type_trans(skb,dev); |
skb | 620 | drivers/net/3c505.c | netif_rx(skb); |
skb | 1003 | drivers/net/3c505.c | elp_start_xmit (struct sk_buff *skb, struct device *dev) |
skb | 1010 | drivers/net/3c505.c | if (skb == NULL) { |
skb | 1018 | drivers/net/3c505.c | if (skb->len <= 0) |
skb | 1022 | drivers/net/3c505.c | printk("%s: request to send packet of length %d\n", dev->name, (int)skb->len); |
skb | 1043 | drivers/net/3c505.c | if (!send_packet(dev, skb->data, skb->len)) { |
skb | 1049 | drivers/net/3c505.c | printk("%s: packet of length %d sent\n", dev->name, (int)skb->len); |
skb | 1065 | drivers/net/3c505.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 287 | drivers/net/3c507.c | static int el16_send_packet(struct sk_buff *skb, struct device *dev); |
skb | 455 | drivers/net/3c507.c | el16_send_packet(struct sk_buff *skb, struct device *dev) |
skb | 490 | drivers/net/3c507.c | if (skb == NULL) { |
skb | 499 | drivers/net/3c507.c | short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN; |
skb | 500 | drivers/net/3c507.c | unsigned char *buf = skb->data; |
skb | 510 | drivers/net/3c507.c | dev_kfree_skb (skb, FREE_WRITE); |
skb | 847 | drivers/net/3c507.c | struct sk_buff *skb; |
skb | 850 | drivers/net/3c507.c | skb = dev_alloc_skb(pkt_len+2); |
skb | 851 | drivers/net/3c507.c | if (skb == NULL) { |
skb | 857 | drivers/net/3c507.c | skb_reserve(skb,2); |
skb | 858 | drivers/net/3c507.c | skb->dev = dev; |
skb | 861 | drivers/net/3c507.c | memcpy(skb_put(skb,pkt_len), data_frame + 5, pkt_len); |
skb | 863 | drivers/net/3c507.c | skb->protocol=eth_type_trans(skb,dev); |
skb | 864 | drivers/net/3c507.c | netif_rx(skb); |
skb | 106 | drivers/net/3c509.c | static int el3_start_xmit(struct sk_buff *skb, struct device *dev); |
skb | 388 | drivers/net/3c509.c | el3_start_xmit(struct sk_buff *skb, struct device *dev) |
skb | 407 | drivers/net/3c509.c | if (skb == NULL) { |
skb | 412 | drivers/net/3c509.c | if (skb->len <= 0) |
skb | 417 | drivers/net/3c509.c | dev->name, skb->len, inw(ioaddr + EL3_STATUS)); |
skb | 441 | drivers/net/3c509.c | outw(skb->len, ioaddr + TX_FIFO); |
skb | 444 | drivers/net/3c509.c | outsl(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2); |
skb | 454 | drivers/net/3c509.c | dev_kfree_skb (skb, FREE_WRITE); |
skb | 602 | drivers/net/3c509.c | struct sk_buff *skb; |
skb | 604 | drivers/net/3c509.c | skb = dev_alloc_skb(pkt_len+5); |
skb | 608 | drivers/net/3c509.c | if (skb != NULL) { |
skb | 609 | drivers/net/3c509.c | skb->dev = dev; |
skb | 610 | drivers/net/3c509.c | skb_reserve(skb,2); /* Align IP on 16 byte boundaries */ |
skb | 613 | drivers/net/3c509.c | insl(ioaddr+RX_FIFO, skb_put(skb,pkt_len), |
skb | 616 | drivers/net/3c509.c | skb->protocol=eth_type_trans(skb,dev); |
skb | 617 | drivers/net/3c509.c | netif_rx(skb); |
skb | 132 | drivers/net/8390.c | static int ei_start_xmit(struct sk_buff *skb, struct device *dev) |
skb | 175 | drivers/net/8390.c | if (skb == NULL) { |
skb | 180 | drivers/net/8390.c | length = skb->len; |
skb | 181 | drivers/net/8390.c | if (skb->len <= 0) |
skb | 221 | drivers/net/8390.c | ei_block_output(dev, length, skb->data, output_page); |
skb | 235 | drivers/net/8390.c | ei_block_output(dev, length, skb->data, ei_local->tx_start_page); |
skb | 246 | drivers/net/8390.c | dev_kfree_skb (skb, FREE_WRITE); |
skb | 461 | drivers/net/8390.c | struct sk_buff *skb; |
skb | 463 | drivers/net/8390.c | skb = dev_alloc_skb(pkt_len+2); |
skb | 464 | drivers/net/8390.c | if (skb == NULL) { |
skb | 471 | drivers/net/8390.c | skb_reserve(skb,2); /* IP headers on 16 byte boundaries */ |
skb | 472 | drivers/net/8390.c | skb->dev = dev; |
skb | 474 | drivers/net/8390.c | ei_block_input(dev, pkt_len, skb_put(skb,pkt_len), |
skb | 476 | drivers/net/8390.c | skb->protocol=eth_type_trans(skb,dev); |
skb | 477 | drivers/net/8390.c | netif_rx(skb); |
skb | 188 | drivers/net/apricot.c | static int i596_start_xmit(struct sk_buff *skb, struct device *dev); |
skb | 345 | drivers/net/apricot.c | struct sk_buff *skb = dev_alloc_skb(pkt_len); |
skb | 349 | drivers/net/apricot.c | if (skb == NULL) |
skb | 356 | drivers/net/apricot.c | skb->dev = dev; |
skb | 357 | drivers/net/apricot.c | memcpy(skb_put(skb,pkt_len), lp->scb.rfd->data, pkt_len); |
skb | 359 | drivers/net/apricot.c | skb->protocol=eth_type_trans(skb,dev); |
skb | 360 | drivers/net/apricot.c | netif_rx(skb); |
skb | 363 | drivers/net/apricot.c | if (i596_debug > 4) print_eth(skb->data); |
skb | 411 | drivers/net/apricot.c | struct sk_buff *skb = ((struct sk_buff *)(tx_cmd->tbd->data)) -1; |
skb | 413 | drivers/net/apricot.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 576 | drivers/net/apricot.c | i596_start_xmit(struct sk_buff *skb, struct device *dev) |
skb | 614 | drivers/net/apricot.c | if (skb == NULL) { |
skb | 620 | drivers/net/apricot.c | if (skb->len <= 0) return 0; |
skb | 630 | drivers/net/apricot.c | short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN; |
skb | 639 | drivers/net/apricot.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 653 | drivers/net/apricot.c | tx_cmd->tbd->data = skb->data; |
skb | 655 | drivers/net/apricot.c | if (i596_debug > 3) print_eth(skb->data); |
skb | 813 | drivers/net/apricot.c | struct sk_buff *skb = ((struct sk_buff *)(tx_cmd->tbd->data)) -1; |
skb | 815 | drivers/net/apricot.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 819 | drivers/net/apricot.c | if (i596_debug >2) print_eth(skb->data); |
skb | 397 | drivers/net/arcnet.c | struct sk_buff *skb; /* packet data buffer */ |
skb | 405 | drivers/net/arcnet.c | struct sk_buff *skb; /* buffer from upper levels */ |
skb | 455 | drivers/net/arcnet.c | static int arcnetA_send_packet(struct sk_buff *skb, struct device *dev); |
skb | 461 | drivers/net/arcnet.c | static int arcnetE_send_packet(struct sk_buff *skb, struct device *dev); |
skb | 479 | drivers/net/arcnet.c | struct sk_buff *skb); |
skb | 481 | drivers/net/arcnet.c | int arcnetA_header(struct sk_buff *skb,struct device *dev, |
skb | 485 | drivers/net/arcnet.c | struct sk_buff *skb); |
skb | 486 | drivers/net/arcnet.c | unsigned short arcnetA_type_trans(struct sk_buff *skb,struct device *dev); |
skb | 1054 | drivers/net/arcnet.c | arcnetA_send_packet(struct sk_buff *skb, struct device *dev) |
skb | 1084 | drivers/net/arcnet.c | status,tickssofar,lp->outgoing.skb, |
skb | 1105 | drivers/net/arcnet.c | if (lp->outgoing.skb) |
skb | 1107 | drivers/net/arcnet.c | dev_kfree_skb(lp->outgoing.skb,FREE_WRITE); |
skb | 1110 | drivers/net/arcnet.c | lp->outgoing.skb=NULL; |
skb | 1128 | drivers/net/arcnet.c | if (skb == NULL) { |
skb | 1166 | drivers/net/arcnet.c | out->length = 1 < skb->len ? skb->len : 1; |
skb | 1167 | drivers/net/arcnet.c | out->hdr=(struct ClientData*)skb->data; |
skb | 1168 | drivers/net/arcnet.c | out->skb=skb; |
skb | 1173 | drivers/net/arcnet.c | for( i=0; i< skb->len; i++) |
skb | 1176 | drivers/net/arcnet.c | printk("%02hX ",((unsigned char*)skb->data)[i]); |
skb | 1199 | drivers/net/arcnet.c | ((char *)skb->data)+sizeof(struct ClientData)); |
skb | 1202 | drivers/net/arcnet.c | dev_kfree_skb(out->skb,FREE_WRITE); |
skb | 1203 | drivers/net/arcnet.c | out->skb=NULL; |
skb | 1218 | drivers/net/arcnet.c | out->data=(u_char *)skb->data |
skb | 1254 | drivers/net/arcnet.c | if (out->skb) |
skb | 1255 | drivers/net/arcnet.c | dev_kfree_skb(out->skb,FREE_WRITE); |
skb | 1256 | drivers/net/arcnet.c | out->skb=NULL; |
skb | 1444 | drivers/net/arcnet.c | arcnetE_send_packet(struct sk_buff *skb, struct device *dev) |
skb | 1449 | drivers/net/arcnet.c | printk("%s: in arcnetE_send_packet (skb=%p)\n",dev->name,skb); |
skb | 1469 | drivers/net/arcnet.c | if (skb == NULL) |
skb | 1484 | drivers/net/arcnet.c | short offset,length=skb->len+1; |
skb | 1494 | drivers/net/arcnet.c | dev_kfree_skb(skb,FREE_WRITE); |
skb | 1508 | drivers/net/arcnet.c | if (((struct ethhdr*)(skb->data))->h_dest[0] == 0xFF) |
skb | 1512 | drivers/net/arcnet.c | ((struct ethhdr*)(skb->data))->h_dest[5]; |
skb | 1541 | drivers/net/arcnet.c | memcpy(arcsoft,skb->data,skb->len); |
skb | 1573 | drivers/net/arcnet.c | dev_kfree_skb(skb,FREE_WRITE); |
skb | 1696 | drivers/net/arcnet.c | status,out->numsegs,out->segnum,out->skb); |
skb | 1732 | drivers/net/arcnet.c | if (!lp->outgoing.skb) |
skb | 1765 | drivers/net/arcnet.c | if (out->skb) |
skb | 1766 | drivers/net/arcnet.c | dev_kfree_skb(out->skb,FREE_WRITE); |
skb | 1767 | drivers/net/arcnet.c | out->skb=NULL; |
skb | 1905 | drivers/net/arcnet.c | struct sk_buff *skb; |
skb | 1933 | drivers/net/arcnet.c | if (in->skb) /* already assembling one! */ |
skb | 1938 | drivers/net/arcnet.c | kfree_skb(in->skb,FREE_WRITE); |
skb | 1941 | drivers/net/arcnet.c | in->skb=NULL; |
skb | 1946 | drivers/net/arcnet.c | skb = alloc_skb(length, GFP_ATOMIC); |
skb | 1947 | drivers/net/arcnet.c | if (skb == NULL) { |
skb | 1952 | drivers/net/arcnet.c | soft=(struct ClientData *)skb->data; |
skb | 1954 | drivers/net/arcnet.c | skb->len = length; |
skb | 1955 | drivers/net/arcnet.c | skb->dev = dev; |
skb | 2002 | drivers/net/arcnet.c | for( i=0; i< skb->len; i++) |
skb | 2005 | drivers/net/arcnet.c | printk("%02hX ",((unsigned char*)skb->data)[i]); |
skb | 2011 | drivers/net/arcnet.c | skb->protocol=arcnetA_type_trans(skb,dev); |
skb | 2014 | drivers/net/arcnet.c | netif_rx(skb); |
skb | 2041 | drivers/net/arcnet.c | if (in->skb && in->sequence!=arcsoft->sequence) |
skb | 2046 | drivers/net/arcnet.c | kfree_skb(in->skb,FREE_WRITE); |
skb | 2047 | drivers/net/arcnet.c | in->skb=NULL; |
skb | 2057 | drivers/net/arcnet.c | if (in->skb) /* already assembling one! */ |
skb | 2064 | drivers/net/arcnet.c | kfree_skb(in->skb,FREE_WRITE); |
skb | 2079 | drivers/net/arcnet.c | in->skb=skb=alloc_skb(508*in->numpackets |
skb | 2082 | drivers/net/arcnet.c | if (skb == NULL) { |
skb | 2092 | drivers/net/arcnet.c | skb->free=1; |
skb | 2094 | drivers/net/arcnet.c | soft=(struct ClientData *)skb->data; |
skb | 2096 | drivers/net/arcnet.c | skb->len=sizeof(struct ClientData); |
skb | 2097 | drivers/net/arcnet.c | skb->dev=dev; |
skb | 2111 | drivers/net/arcnet.c | if (!in->skb) |
skb | 2134 | drivers/net/arcnet.c | kfree_skb(in->skb,FREE_WRITE); |
skb | 2135 | drivers/net/arcnet.c | in->skb=NULL; |
skb | 2142 | drivers/net/arcnet.c | soft=(struct ClientData *)in->skb->data; |
skb | 2145 | drivers/net/arcnet.c | skb=in->skb; |
skb | 2147 | drivers/net/arcnet.c | memcpy(skb->data+skb->len, |
skb | 2151 | drivers/net/arcnet.c | skb->len+=length-sizeof(struct ClientData); |
skb | 2159 | drivers/net/arcnet.c | if (!skb || !in->skb) |
skb | 2161 | drivers/net/arcnet.c | skb,in->skb); |
skb | 2162 | drivers/net/arcnet.c | in->skb=NULL; |
skb | 2168 | drivers/net/arcnet.c | for( i=0; i< skb->len; i++) |
skb | 2171 | drivers/net/arcnet.c | printk("%02hX ",((unsigned char*)skb->data)[i]); |
skb | 2178 | drivers/net/arcnet.c | skb->protocol=arcnetA_type_trans(skb,dev); |
skb | 2181 | drivers/net/arcnet.c | netif_rx(skb); |
skb | 2195 | drivers/net/arcnet.c | struct sk_buff *skb; |
skb | 2201 | drivers/net/arcnet.c | skb = alloc_skb(length, GFP_ATOMIC); |
skb | 2202 | drivers/net/arcnet.c | if (skb == NULL) { |
skb | 2208 | drivers/net/arcnet.c | skb->len = length; |
skb | 2209 | drivers/net/arcnet.c | skb->dev = lp->edev; |
skb | 2211 | drivers/net/arcnet.c | memcpy(skb->data,(u_char *)arcsoft+1,length-1); |
skb | 2217 | drivers/net/arcnet.c | for(i=0; i<skb->len; i++) |
skb | 2222 | drivers/net/arcnet.c | printk("%02hX ",((u_char *)skb->data)[i]); |
skb | 2228 | drivers/net/arcnet.c | skb->protocol=eth_type_trans(skb,dev); |
skb | 2231 | drivers/net/arcnet.c | netif_rx(skb); |
skb | 2283 | drivers/net/arcnet.c | struct sk_buff *skb) |
skb | 2285 | drivers/net/arcnet.c | int arcnetA_header(struct sk_buff *skb,struct device *dev, |
skb | 2293 | drivers/net/arcnet.c | skb_push(skb,dev->hard_header_len); |
skb | 2369 | drivers/net/arcnet.c | struct sk_buff *skb) |
skb | 2390 | drivers/net/arcnet.c | return arp_find(&(head->daddr), dst, dev, dev->pa_addr, skb)? 1 : 0; |
skb | 2400 | drivers/net/arcnet.c | unsigned short arcnetA_type_trans(struct sk_buff *skb,struct device *dev) |
skb | 2402 | drivers/net/arcnet.c | struct ClientData *head = (struct ClientData *) skb->data; |
skb | 2407 | drivers/net/arcnet.c | skb->mac.raw=skb->data; |
skb | 2408 | drivers/net/arcnet.c | skb_pull(skb,dev->hard_header_len); |
skb | 2412 | drivers/net/arcnet.c | skb->pkt_type=PACKET_BROADCAST; |
skb | 2417 | drivers/net/arcnet.c | skb->pkt_type=PACKET_OTHERHOST; |
skb | 124 | drivers/net/at1700.c | static int net_send_packet(struct sk_buff *skb, struct device *dev); |
skb | 370 | drivers/net/at1700.c | net_send_packet(struct sk_buff *skb, struct device *dev) |
skb | 404 | drivers/net/at1700.c | if (skb == NULL) { |
skb | 414 | drivers/net/at1700.c | short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN; |
skb | 415 | drivers/net/at1700.c | unsigned char *buf = skb->data; |
skb | 441 | drivers/net/at1700.c | dev_kfree_skb (skb, FREE_WRITE); |
skb | 526 | drivers/net/at1700.c | struct sk_buff *skb; |
skb | 537 | drivers/net/at1700.c | skb = dev_alloc_skb(pkt_len+3); |
skb | 538 | drivers/net/at1700.c | if (skb == NULL) { |
skb | 547 | drivers/net/at1700.c | skb->dev = dev; |
skb | 548 | drivers/net/at1700.c | skb_reserve(skb,2); |
skb | 550 | drivers/net/at1700.c | insw(ioaddr + DATAPORT, skb_put(skb,pkt_len), (pkt_len + 1) >> 1); |
skb | 551 | drivers/net/at1700.c | skb->protocol=eth_type_trans(skb, dev); |
skb | 552 | drivers/net/at1700.c | netif_rx(skb); |
skb | 137 | drivers/net/atp.c | static int net_send_packet(struct sk_buff *skb, struct device *dev); |
skb | 414 | drivers/net/atp.c | net_send_packet(struct sk_buff *skb, struct device *dev) |
skb | 438 | drivers/net/atp.c | if (skb == NULL) { |
skb | 448 | drivers/net/atp.c | short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN; |
skb | 449 | drivers/net/atp.c | unsigned char *buf = skb->data; |
skb | 477 | drivers/net/atp.c | dev_kfree_skb (skb, FREE_WRITE); |
skb | 671 | drivers/net/atp.c | struct sk_buff *skb; |
skb | 673 | drivers/net/atp.c | skb = dev_alloc_skb(pkt_len); |
skb | 674 | drivers/net/atp.c | if (skb == NULL) { |
skb | 679 | drivers/net/atp.c | skb->dev = dev; |
skb | 681 | drivers/net/atp.c | read_block(ioaddr, pkt_len, skb_put(skb,pkt_len), dev->if_port); |
skb | 684 | drivers/net/atp.c | unsigned char *data = skb->data; |
skb | 692 | drivers/net/atp.c | skb->protocol=eth_type_trans(skb,dev); |
skb | 693 | drivers/net/atp.c | netif_rx(skb); |
skb | 360 | drivers/net/de4x5.c | struct sk_buff *skb[NUM_TX_DESC]; /* TX skb for freeing when sent */ |
skb | 405 | drivers/net/de4x5.c | static int de4x5_queue_pkt(struct sk_buff *skb, struct device *dev); |
skb | 425 | drivers/net/de4x5.c | static void load_packet(struct device *dev, char *buf, u32 flags, struct sk_buff *skb); |
skb | 945 | drivers/net/de4x5.c | de4x5_queue_pkt(struct sk_buff *skb, struct device *dev) |
skb | 984 | drivers/net/de4x5.c | if (lp->skb[i] != NULL) { |
skb | 985 | drivers/net/de4x5.c | if (lp->skb[i]->len != FAKE_FRAME_LEN) { |
skb | 987 | drivers/net/de4x5.c | dev_queue_xmit(lp->skb[i], dev, SOPRI_NORMAL); |
skb | 989 | drivers/net/de4x5.c | dev_kfree_skb(lp->skb[i], FREE_WRITE); |
skb | 992 | drivers/net/de4x5.c | dev_kfree_skb(lp->skb[i], FREE_WRITE); |
skb | 994 | drivers/net/de4x5.c | lp->skb[i] = NULL; |
skb | 997 | drivers/net/de4x5.c | if (skb->len != FAKE_FRAME_LEN) { |
skb | 998 | drivers/net/de4x5.c | dev_queue_xmit(skb, dev, SOPRI_NORMAL); |
skb | 1000 | drivers/net/de4x5.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 1030 | drivers/net/de4x5.c | } else if (skb == NULL) { |
skb | 1032 | drivers/net/de4x5.c | } else if (skb->len == FAKE_FRAME_LEN) { /* Don't TX a fake frame! */ |
skb | 1033 | drivers/net/de4x5.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 1034 | drivers/net/de4x5.c | } else if (skb->len > 0) { |
skb | 1042 | drivers/net/de4x5.c | load_packet(dev, skb->data, TD_IC | TD_LS | TD_FS | skb->len, skb); |
skb | 1153 | drivers/net/de4x5.c | struct sk_buff *skb; |
skb | 1156 | drivers/net/de4x5.c | if ((skb = dev_alloc_skb(pkt_len+2)) != NULL) { |
skb | 1157 | drivers/net/de4x5.c | skb->dev = dev; |
skb | 1159 | drivers/net/de4x5.c | skb_reserve(skb,2); /* Align */ |
skb | 1162 | drivers/net/de4x5.c | memcpy(skb_put(skb,len), bus_to_virt(lp->rx_ring[lp->rx_old].buf), len); |
skb | 1163 | drivers/net/de4x5.c | memcpy(skb_put(skb,pkt_len-len), bus_to_virt(lp->rx_ring[0].buf), pkt_len - len); |
skb | 1165 | drivers/net/de4x5.c | memcpy(skb_put(skb,pkt_len), bus_to_virt(lp->rx_ring[lp->rx_old].buf), pkt_len); |
skb | 1169 | drivers/net/de4x5.c | skb->protocol=eth_type_trans(skb,dev); |
skb | 1170 | drivers/net/de4x5.c | netif_rx(skb); |
skb | 1180 | drivers/net/de4x5.c | buf = skb->data; /* Look at the dest addr */ |
skb | 1259 | drivers/net/de4x5.c | if (lp->skb[entry] != NULL) { |
skb | 1260 | drivers/net/de4x5.c | dev_kfree_skb(lp->skb[entry], FREE_WRITE); |
skb | 1261 | drivers/net/de4x5.c | lp->skb[entry] = NULL; |
skb | 1367 | drivers/net/de4x5.c | static void load_packet(struct device *dev, char *buf, u32 flags, struct sk_buff *skb) |
skb | 1374 | drivers/net/de4x5.c | lp->skb[lp->tx_new] = skb; |
skb | 2478 | drivers/net/de4x5.c | struct sk_buff *skb; |
skb | 2480 | drivers/net/de4x5.c | if ((skb = alloc_skb(0, GFP_ATOMIC)) != NULL) { |
skb | 2481 | drivers/net/de4x5.c | skb->len= FAKE_FRAME_LEN; |
skb | 2482 | drivers/net/de4x5.c | skb->arp=1; |
skb | 2483 | drivers/net/de4x5.c | skb->dev=dev; |
skb | 2484 | drivers/net/de4x5.c | dev_queue_xmit(skb, dev, SOPRI_NORMAL); |
skb | 250 | drivers/net/de600.c | static int de600_start_xmit(struct sk_buff *skb, struct device *dev); |
skb | 404 | drivers/net/de600.c | de600_start_xmit(struct sk_buff *skb, struct device *dev) |
skb | 409 | drivers/net/de600.c | byte *buffer = skb->data; |
skb | 417 | drivers/net/de600.c | if (skb == NULL) { |
skb | 441 | drivers/net/de600.c | PRINTK(("de600_start_xmit:len=%d, page %d/%d\n", skb->len, tx_fifo_in, free_tx_pages)); |
skb | 443 | drivers/net/de600.c | if ((len = skb->len) < RUNT) |
skb | 483 | drivers/net/de600.c | if (skb->sk && (skb->sk->protocol == IPPROTO_TCP) && |
skb | 484 | drivers/net/de600.c | (skb->sk->prot->rspace != &de600_rspace)) |
skb | 485 | drivers/net/de600.c | skb->sk->prot->rspace = de600_rspace; /* Ugh! */ |
skb | 488 | drivers/net/de600.c | dev_kfree_skb (skb, FREE_WRITE); |
skb | 587 | drivers/net/de600.c | struct sk_buff *skb; |
skb | 612 | drivers/net/de600.c | skb = dev_alloc_skb(size+2); |
skb | 614 | drivers/net/de600.c | if (skb == NULL) { |
skb | 621 | drivers/net/de600.c | skb->dev = dev; |
skb | 622 | drivers/net/de600.c | skb_reserve(skb,2); /* Align */ |
skb | 625 | drivers/net/de600.c | buffer = skb_put(skb,size); |
skb | 634 | drivers/net/de600.c | skb->protocol=eth_type_trans(skb,dev); |
skb | 636 | drivers/net/de600.c | netif_rx(skb); |
skb | 485 | drivers/net/de620.c | de620_start_xmit(struct sk_buff *skb, struct device *dev) |
skb | 490 | drivers/net/de620.c | byte *buffer = skb->data; |
skb | 499 | drivers/net/de620.c | if (skb == NULL) { |
skb | 524 | drivers/net/de620.c | if ((len = skb->len) < RUNT) |
skb | 534 | drivers/net/de620.c | (int)skb->len, using_txbuf)); |
skb | 564 | drivers/net/de620.c | dev_kfree_skb (skb, FREE_WRITE); |
skb | 626 | drivers/net/de620.c | struct sk_buff *skb; |
skb | 681 | drivers/net/de620.c | skb = dev_alloc_skb(size+2); |
skb | 682 | drivers/net/de620.c | if (skb == NULL) { /* Yeah, but no place to put it... */ |
skb | 688 | drivers/net/de620.c | skb_reserve(skb,2); /* Align */ |
skb | 689 | drivers/net/de620.c | skb->dev = dev; |
skb | 690 | drivers/net/de620.c | skb->free = 1; |
skb | 692 | drivers/net/de620.c | buffer = skb_put(skb,size); |
skb | 696 | drivers/net/de620.c | skb->protocol=eth_type_trans(skb,dev); |
skb | 697 | drivers/net/de620.c | netif_rx(skb); /* deliver it "upstairs" */ |
skb | 368 | drivers/net/depca.c | static int depca_start_xmit(struct sk_buff *skb, struct device *dev); |
skb | 393 | drivers/net/depca.c | static int load_packet(struct device *dev, struct sk_buff *skb); |
skb | 811 | drivers/net/depca.c | depca_start_xmit(struct sk_buff *skb, struct device *dev) |
skb | 836 | drivers/net/depca.c | } else if (skb == NULL) { |
skb | 838 | drivers/net/depca.c | } else if (skb->len > 0) { |
skb | 845 | drivers/net/depca.c | status = load_packet(dev, skb); |
skb | 853 | drivers/net/depca.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 944 | drivers/net/depca.c | struct sk_buff *skb; |
skb | 946 | drivers/net/depca.c | skb = dev_alloc_skb(pkt_len+2); |
skb | 947 | drivers/net/depca.c | if (skb != NULL) { |
skb | 949 | drivers/net/depca.c | skb_reserve(skb,2); /* 16 byte align the IP header */ |
skb | 950 | drivers/net/depca.c | buf = skb_put(skb,pkt_len); |
skb | 951 | drivers/net/depca.c | skb->dev = dev; |
skb | 964 | drivers/net/depca.c | skb->protocol=eth_type_trans(skb,dev); |
skb | 965 | drivers/net/depca.c | netif_rx(skb); |
skb | 1565 | drivers/net/depca.c | static int load_packet(struct device *dev, struct sk_buff *skb) |
skb | 1571 | drivers/net/depca.c | end = (entry + (skb->len - 1) / TX_BUFF_SZ) & lp->txRingMask; |
skb | 1579 | drivers/net/depca.c | memcpy_toio(lp->tx_memcpy[entry], skb->data, len); |
skb | 1580 | drivers/net/depca.c | memcpy_toio(lp->tx_memcpy[0], skb->data + len, skb->len - len); |
skb | 1582 | drivers/net/depca.c | memcpy_toio(lp->tx_memcpy[entry], skb->data, skb->len); |
skb | 1586 | drivers/net/depca.c | len = (skb->len < ETH_ZLEN) ? ETH_ZLEN : skb->len; |
skb | 59 | drivers/net/dummy.c | static int dummy_xmit(struct sk_buff *skb, struct device *dev); |
skb | 107 | drivers/net/dummy.c | dummy_xmit(struct sk_buff *skb, struct device *dev) |
skb | 113 | drivers/net/dummy.c | if (skb == NULL || dev == NULL) |
skb | 116 | drivers/net/dummy.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 144 | drivers/net/eepro.c | static int eepro_send_packet(struct sk_buff *skb, struct device *dev); |
skb | 603 | drivers/net/eepro.c | eepro_send_packet(struct sk_buff *skb, struct device *dev) |
skb | 641 | drivers/net/eepro.c | if (skb == NULL) { |
skb | 650 | drivers/net/eepro.c | short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN; |
skb | 651 | drivers/net/eepro.c | unsigned char *buf = skb->data; |
skb | 657 | drivers/net/eepro.c | dev_kfree_skb (skb, FREE_WRITE); |
skb | 1042 | drivers/net/eepro.c | struct sk_buff *skb; |
skb | 1045 | drivers/net/eepro.c | skb = dev_alloc_skb(rcv_size+2); |
skb | 1046 | drivers/net/eepro.c | if (skb == NULL) { |
skb | 1051 | drivers/net/eepro.c | skb->dev = dev; |
skb | 1052 | drivers/net/eepro.c | skb_reserve(skb,2); |
skb | 1054 | drivers/net/eepro.c | insw(ioaddr+IO_PORT, skb_put(skb,rcv_size), (rcv_size + 1) >> 1); |
skb | 1056 | drivers/net/eepro.c | skb->protocol = eth_type_trans(skb,dev); |
skb | 1057 | drivers/net/eepro.c | netif_rx(skb); |
skb | 293 | drivers/net/eexpress.c | static int eexp_send_packet(struct sk_buff *skb, struct device *dev); |
skb | 453 | drivers/net/eexpress.c | eexp_send_packet(struct sk_buff *skb, struct device *dev) |
skb | 488 | drivers/net/eexpress.c | if (skb == NULL) { |
skb | 497 | drivers/net/eexpress.c | short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN; |
skb | 498 | drivers/net/eexpress.c | unsigned char *buf = skb->data; |
skb | 508 | drivers/net/eexpress.c | dev_kfree_skb (skb, FREE_WRITE); |
skb | 939 | drivers/net/eexpress.c | struct sk_buff *skb; |
skb | 942 | drivers/net/eexpress.c | skb = dev_alloc_skb(pkt_len+2); |
skb | 943 | drivers/net/eexpress.c | if (skb == NULL) { |
skb | 948 | drivers/net/eexpress.c | skb->dev = dev; |
skb | 949 | drivers/net/eexpress.c | skb_reserve(skb,2); |
skb | 953 | drivers/net/eexpress.c | insw(ioaddr, skb_put(skb,pkt_len), (pkt_len + 1) >> 1); |
skb | 955 | drivers/net/eexpress.c | skb->protocol=eth_type_trans(skb,dev); |
skb | 956 | drivers/net/eexpress.c | netif_rx(skb); |
skb | 149 | drivers/net/eql.c | static int eql_slave_xmit(struct sk_buff *skb, struct device *dev); /* */ |
skb | 152 | drivers/net/eql.c | static int eql_header(struct sk_buff *skb, struct device *dev, |
skb | 156 | drivers/net/eql.c | unsigned long raddr, struct sk_buff *skb); /* */ |
skb | 357 | drivers/net/eql.c | eql_slave_xmit(struct sk_buff *skb, struct device *dev) |
skb | 363 | drivers/net/eql.c | if (skb == NULL) |
skb | 378 | drivers/net/eql.c | dev->name, eql_number_slaves (eql->queue), skb->len, |
skb | 382 | drivers/net/eql.c | dev_queue_xmit (skb, slave_dev, 1); |
skb | 384 | drivers/net/eql.c | slave->bytes_queued += skb->len; |
skb | 392 | drivers/net/eql.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 410 | drivers/net/eql.c | eql_header(struct sk_buff *skb, struct device *dev, |
skb | 421 | drivers/net/eql.c | unsigned long raddr, struct sk_buff *skb) |
skb | 292 | drivers/net/ewrk3.c | static int ewrk3_queue_pkt(struct sk_buff *skb, struct device *dev); |
skb | 760 | drivers/net/ewrk3.c | ewrk3_queue_pkt(struct sk_buff *skb, struct device *dev) |
skb | 796 | drivers/net/ewrk3.c | } else if (skb == NULL) { |
skb | 798 | drivers/net/ewrk3.c | } else if (skb->len > 0) { |
skb | 848 | drivers/net/ewrk3.c | unsigned char *p = skb->data; |
skb | 851 | drivers/net/ewrk3.c | outb((char)(skb->len & 0xff), EWRK3_DATA); |
skb | 852 | drivers/net/ewrk3.c | outb((char)((skb->len >> 8) & 0xff), EWRK3_DATA); |
skb | 854 | drivers/net/ewrk3.c | for (i=0; i<skb->len; i++) { |
skb | 860 | drivers/net/ewrk3.c | *buf++ = (char)(skb->len & 0xff); /* length (16 bit xfer)*/ |
skb | 862 | drivers/net/ewrk3.c | *buf++ = (char)(((skb->len >> 8) & 0xff) | XCT); |
skb | 864 | drivers/net/ewrk3.c | *(buf + skb->len) = 0x00; /* Write the XCT flag */ |
skb | 865 | drivers/net/ewrk3.c | memcpy(buf, skb->data, PRELOAD); /* Write PRELOAD bytes */ |
skb | 867 | drivers/net/ewrk3.c | memcpy(buf + PRELOAD, skb->data + PRELOAD, skb->len - PRELOAD); |
skb | 868 | drivers/net/ewrk3.c | *(buf + skb->len) = 0xff; /* Write the XCT flag */ |
skb | 870 | drivers/net/ewrk3.c | *buf++ = (char)((skb->len >> 8) & 0xff); |
skb | 872 | drivers/net/ewrk3.c | memcpy(buf, skb->data, skb->len); /* Write data bytes */ |
skb | 879 | drivers/net/ewrk3.c | dev_kfree_skb (skb, FREE_WRITE); |
skb | 1036 | drivers/net/ewrk3.c | struct sk_buff *skb; |
skb | 1038 | drivers/net/ewrk3.c | if ((skb = dev_alloc_skb(pkt_len+2)) != NULL) { |
skb | 1040 | drivers/net/ewrk3.c | skb->dev = dev; |
skb | 1041 | drivers/net/ewrk3.c | skb_reserve(skb,2); /* Align to 16 bytes */ |
skb | 1042 | drivers/net/ewrk3.c | p = skb_put(skb,pkt_len); |
skb | 1058 | drivers/net/ewrk3.c | skb->protocol=eth_type_trans(skb,dev); |
skb | 1059 | drivers/net/ewrk3.c | netif_rx(skb); |
skb | 1071 | drivers/net/ewrk3.c | buf = skb->data; /* Look at the dest addr */ |
skb | 195 | drivers/net/hp100.c | static int hp100_start_xmit( struct sk_buff *skb, struct device *dev ); |
skb | 584 | drivers/net/hp100.c | static int hp100_start_xmit( struct sk_buff *skb, struct device *dev ) |
skb | 605 | drivers/net/hp100.c | if ( ( i = ( hp100_inl( TX_MEM_FREE ) & ~0x7fffffff ) ) < skb -> len + 16 ) |
skb | 649 | drivers/net/hp100.c | if ( skb == NULL ) |
skb | 655 | drivers/net/hp100.c | if ( skb -> len <= 0 ) return 0; |
skb | 668 | drivers/net/hp100.c | printk( "hp100_start_xmit: irq_status = 0x%x, len = %d\n", val, (int)skb -> len ); |
skb | 670 | drivers/net/hp100.c | ok_flag = skb -> len >= HP100_MIN_PACKET_SIZE; |
skb | 671 | drivers/net/hp100.c | i = ok_flag ? skb -> len : HP100_MIN_PACKET_SIZE; |
skb | 678 | drivers/net/hp100.c | memcpy( lp -> mem_ptr_virt, skb -> data, skb -> len ); |
skb | 680 | drivers/net/hp100.c | memset( lp -> mem_ptr_virt, 0, HP100_MIN_PACKET_SIZE - skb -> len ); |
skb | 684 | drivers/net/hp100.c | memcpy_toio( lp -> mem_ptr_phys, skb -> data, skb -> len ); |
skb | 686 | drivers/net/hp100.c | memset_io( lp -> mem_ptr_phys, 0, HP100_MIN_PACKET_SIZE - skb -> len ); |
skb | 691 | drivers/net/hp100.c | outsl( ioaddr + HP100_REG_DATA32, skb -> data, ( skb -> len + 3 ) >> 2 ); |
skb | 693 | drivers/net/hp100.c | for ( i = ( skb -> len + 3 ) & ~3; i < HP100_MIN_PACKET_SIZE; i += 4 ) |
skb | 701 | drivers/net/hp100.c | dev_kfree_skb( skb, FREE_WRITE ); |
skb | 720 | drivers/net/hp100.c | struct sk_buff *skb; |
skb | 763 | drivers/net/hp100.c | skb = dev_alloc_skb( pkt_len ); |
skb | 764 | drivers/net/hp100.c | if ( skb == NULL ) |
skb | 775 | drivers/net/hp100.c | skb -> dev = dev; |
skb | 776 | drivers/net/hp100.c | ptr = (u_char *)skb_put( skb, pkt_len ); |
skb | 786 | drivers/net/hp100.c | skb -> protocol = eth_type_trans( skb, dev ); |
skb | 787 | drivers/net/hp100.c | netif_rx( skb ); |
skb | 118 | drivers/net/ibmtr.c | static int tok_send_packet(struct sk_buff *skb, struct device *dev); |
skb | 1063 | drivers/net/ibmtr.c | struct sk_buff *skb; |
skb | 1106 | drivers/net/ibmtr.c | if(!(skb=dev_alloc_skb(ntohs(rec_req->frame_len)-lan_hdr_len+sizeof(struct trh_hdr)))) { |
skb | 1114 | drivers/net/ibmtr.c | skb_put(skb,ntohs(rec_req->frame_len)-lan_hdr_len+sizeof(struct trh_hdr)); |
skb | 1115 | drivers/net/ibmtr.c | skb->dev=dev; |
skb | 1122 | drivers/net/ibmtr.c | data=skb->data; |
skb | 1152 | drivers/net/ibmtr.c | skb->protocol=tr_type_trans(skb,dev); |
skb | 1153 | drivers/net/ibmtr.c | netif_rx(skb); |
skb | 1158 | drivers/net/ibmtr.c | static int tok_send_packet(struct sk_buff *skb, struct device *dev) { |
skb | 1179 | drivers/net/ibmtr.c | if(skb==NULL) { |
skb | 1189 | drivers/net/ibmtr.c | ti->current_skb=skb; /* save skb. We will need it when the adapter |
skb | 256 | drivers/net/lance.c | static int lance_start_xmit(struct sk_buff *skb, struct device *dev); |
skb | 709 | drivers/net/lance.c | lance_start_xmit(struct sk_buff *skb, struct device *dev) |
skb | 751 | drivers/net/lance.c | if (skb == NULL) { |
skb | 756 | drivers/net/lance.c | if (skb->len <= 0) |
skb | 791 | drivers/net/lance.c | -(ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN); |
skb | 793 | drivers/net/lance.c | lp->tx_ring[entry].length = -skb->len; |
skb | 799 | drivers/net/lance.c | if ((int)(skb->data) + skb->len > 0x01000000) { |
skb | 802 | drivers/net/lance.c | dev->name, (int)(skb->data)); |
skb | 803 | drivers/net/lance.c | memcpy(&lp->tx_bounce_buffs[entry], skb->data, skb->len); |
skb | 806 | drivers/net/lance.c | dev_kfree_skb (skb, FREE_WRITE); |
skb | 808 | drivers/net/lance.c | lp->tx_skbuff[entry] = skb; |
skb | 809 | drivers/net/lance.c | lp->tx_ring[entry].base = (int)(skb->data) | 0x83000000; |
skb | 986 | drivers/net/lance.c | struct sk_buff *skb; |
skb | 988 | drivers/net/lance.c | skb = dev_alloc_skb(pkt_len+2); |
skb | 989 | drivers/net/lance.c | if (skb == NULL) { |
skb | 1002 | drivers/net/lance.c | skb->dev = dev; |
skb | 1003 | drivers/net/lance.c | skb_reserve(skb,2); /* 16 byte align */ |
skb | 1004 | drivers/net/lance.c | skb_put(skb,pkt_len); /* Make room */ |
skb | 1005 | drivers/net/lance.c | eth_copy_and_sum(skb, |
skb | 1008 | drivers/net/lance.c | skb->protocol=eth_type_trans(skb,dev); |
skb | 1009 | drivers/net/lance.c | netif_rx(skb); |
skb | 48 | drivers/net/loopback.c | static int loopback_xmit(struct sk_buff *skb, struct device *dev) |
skb | 54 | drivers/net/loopback.c | if (skb == NULL || dev == NULL) |
skb | 73 | drivers/net/loopback.c | if(skb->free==0) |
skb | 75 | drivers/net/loopback.c | struct sk_buff *skb2=skb; |
skb | 76 | drivers/net/loopback.c | skb=skb_clone(skb, GFP_ATOMIC); /* Clone the buffer */ |
skb | 77 | drivers/net/loopback.c | if(skb==NULL) |
skb | 82 | drivers/net/loopback.c | else if(skb->sk) |
skb | 90 | drivers/net/loopback.c | skb->sk->wmem_alloc-=skb->truesize; |
skb | 91 | drivers/net/loopback.c | skb->sk->write_space(skb->sk); |
skb | 95 | drivers/net/loopback.c | skb->protocol=eth_type_trans(skb,dev); |
skb | 96 | drivers/net/loopback.c | skb->dev=dev; |
skb | 99 | drivers/net/loopback.c | netif_rx(skb); |
skb | 101 | drivers/net/loopback.c | skb_device_unlock(skb); |
skb | 789 | drivers/net/ni52.c | struct sk_buff *skb; |
skb | 803 | drivers/net/ni52.c | skb = (struct sk_buff *) dev_alloc_skb(totlen+2); |
skb | 804 | drivers/net/ni52.c | if(skb != NULL) |
skb | 806 | drivers/net/ni52.c | skb->dev = dev; |
skb | 807 | drivers/net/ni52.c | skb_reserve(skb,2); /* 16 byte alignment */ |
skb | 808 | drivers/net/ni52.c | memcpy(skb_put(skb,totlen),(char *) p->base+(unsigned long) rbd->buffer, totlen); |
skb | 809 | drivers/net/ni52.c | skb->protocol=eth_type_trans(skb,dev); |
skb | 810 | drivers/net/ni52.c | netif_rx(skb); |
skb | 925 | drivers/net/ni52.c | static int ni52_send_packet(struct sk_buff *skb, struct device *dev) |
skb | 969 | drivers/net/ni52.c | if(skb == NULL) |
skb | 975 | drivers/net/ni52.c | if (skb->len <= 0) |
skb | 977 | drivers/net/ni52.c | if(skb->len > XMIT_BUFF_SIZE) |
skb | 979 | drivers/net/ni52.c | printk("%s: Sorry, max. framelength is %d bytes. The length of your frame is %ld bytes.\n",dev->name,XMIT_BUFF_SIZE,skb->len); |
skb | 987 | drivers/net/ni52.c | memcpy((char *)p->xmit_cbuffs[p->xmit_count],(char *)(skb->data),skb->len); |
skb | 988 | drivers/net/ni52.c | len = (ETH_ZLEN < skb->len) ? skb->len : ETH_ZLEN; |
skb | 1002 | drivers/net/ni52.c | dev_kfree_skb(skb,FREE_WRITE); |
skb | 1022 | drivers/net/ni52.c | dev_kfree_skb(skb,FREE_WRITE); |
skb | 1042 | drivers/net/ni52.c | dev_kfree_skb(skb,FREE_WRITE); |
skb | 120 | drivers/net/ni65.c | static int ni65_send_packet(struct sk_buff *skb, struct device *dev); |
skb | 450 | drivers/net/ni65.c | struct sk_buff *skb; |
skb | 460 | drivers/net/ni65.c | skb = (struct sk_buff *) p->tmdbufs[p->tmdlast]; |
skb | 461 | drivers/net/ni65.c | dev_kfree_skb(skb,FREE_WRITE); |
skb | 498 | drivers/net/ni65.c | struct sk_buff *skb,*skb1; |
skb | 527 | drivers/net/ni65.c | skb = dev_alloc_skb(R_BUF_SIZE); |
skb | 528 | drivers/net/ni65.c | if(skb != NULL) |
skb | 530 | drivers/net/ni65.c | if( (unsigned long) (skb->data + R_BUF_SIZE) & 0xff000000) { |
skb | 531 | drivers/net/ni65.c | memcpy(skb_put(skb,len),p->recv_skb[p->rmdnum]->data,len); |
skb | 532 | drivers/net/ni65.c | skb1 = skb; |
skb | 536 | drivers/net/ni65.c | p->recv_skb[p->rmdnum] = skb; |
skb | 562 | drivers/net/ni65.c | static int ni65_send_packet(struct sk_buff *skb, struct device *dev) |
skb | 579 | drivers/net/ni65.c | if(skb == NULL) |
skb | 585 | drivers/net/ni65.c | if (skb->len <= 0) |
skb | 600 | drivers/net/ni65.c | short len = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN; |
skb | 605 | drivers/net/ni65.c | tmdp->u.buffer = (unsigned long) (skb->data); |
skb | 606 | drivers/net/ni65.c | p->tmdbufs[p->tmdnum] = skb; |
skb | 608 | drivers/net/ni65.c | memcpy((char *) (tmdp->u.buffer & 0x00ffffff),(char *)skb->data,skb->len); |
skb | 609 | drivers/net/ni65.c | dev_kfree_skb (skb, FREE_WRITE); |
skb | 153 | drivers/net/pi2.c | static int pi_send_packet(struct sk_buff *skb, struct device *dev); |
skb | 210 | drivers/net/pi2.c | static void hardware_send_packet(struct pi_local *lp, struct sk_buff *skb) |
skb | 222 | drivers/net/pi2.c | skb_queue_tail(&lp->sndq, skb); |
skb | 340 | drivers/net/pi2.c | static void free_p(struct sk_buff *skb) |
skb | 342 | drivers/net/pi2.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 512 | drivers/net/pi2.c | struct sk_buff *skb; |
skb | 556 | drivers/net/pi2.c | skb = dev_alloc_skb(sksize); |
skb | 557 | drivers/net/pi2.c | if (skb == NULL) { |
skb | 563 | drivers/net/pi2.c | skb->dev = dev; |
skb | 566 | drivers/net/pi2.c | cfix=skb_put(skb,pkt_len); |
skb | 571 | drivers/net/pi2.c | skb->protocol=htons(ETH_P_AX25); |
skb | 572 | drivers/net/pi2.c | skb->mac.raw=skb->data; |
skb | 573 | drivers/net/pi2.c | IS_SKB(skb); |
skb | 574 | drivers/net/pi2.c | netif_rx(skb); |
skb | 587 | drivers/net/pi2.c | struct sk_buff *skb; |
skb | 645 | drivers/net/pi2.c | skb = dev_alloc_skb(sksize); |
skb | 646 | drivers/net/pi2.c | if (skb == NULL) { |
skb | 652 | drivers/net/pi2.c | skb->dev = dev; |
skb | 655 | drivers/net/pi2.c | cfix=skb_put(skb,pkt_len); |
skb | 659 | drivers/net/pi2.c | skb->protocol=ntohs(ETH_P_AX25); |
skb | 660 | drivers/net/pi2.c | IS_SKB(skb); |
skb | 661 | drivers/net/pi2.c | netif_rx(skb); |
skb | 1078 | drivers/net/pi2.c | static int pi_header(struct sk_buff *skb, struct device *dev, unsigned short type, |
skb | 1081 | drivers/net/pi2.c | return ax25_encapsulate(skb, dev, type, daddr, saddr, len); |
skb | 1086 | drivers/net/pi2.c | struct sk_buff *skb) |
skb | 1088 | drivers/net/pi2.c | return ax25_rebuild_header(buff, dev, raddr, skb); |
skb | 1498 | drivers/net/pi2.c | static int pi_send_packet(struct sk_buff *skb, struct device *dev) |
skb | 1505 | drivers/net/pi2.c | if (skb == NULL) { |
skb | 1509 | drivers/net/pi2.c | hardware_send_packet(lp, skb); |
skb | 133 | drivers/net/plip.c | unsigned long raddr, struct sk_buff *skb); |
skb | 134 | drivers/net/plip.c | static int plip_tx_packet(struct sk_buff *skb, struct device *dev); |
skb | 184 | drivers/net/plip.c | struct sk_buff *skb; |
skb | 199 | drivers/net/plip.c | unsigned long raddr, struct sk_buff *skb); |
skb | 398 | drivers/net/plip.c | if (rcv->skb) { |
skb | 399 | drivers/net/plip.c | rcv->skb->free = 1; |
skb | 400 | drivers/net/plip.c | kfree_skb(rcv->skb, FREE_READ); |
skb | 401 | drivers/net/plip.c | rcv->skb = NULL; |
skb | 404 | drivers/net/plip.c | if (snd->skb) { |
skb | 405 | drivers/net/plip.c | dev_kfree_skb(snd->skb, FREE_WRITE); |
skb | 406 | drivers/net/plip.c | snd->skb = NULL; |
skb | 527 | drivers/net/plip.c | rcv->skb = dev_alloc_skb(rcv->length.h); |
skb | 528 | drivers/net/plip.c | if (rcv->skb == NULL) { |
skb | 532 | drivers/net/plip.c | skb_put(rcv->skb,rcv->length.h); |
skb | 533 | drivers/net/plip.c | rcv->skb->dev = dev; |
skb | 539 | drivers/net/plip.c | lbuf = rcv->skb->data; |
skb | 564 | drivers/net/plip.c | rcv->skb->protocol=eth_type_trans(rcv->skb, dev); |
skb | 565 | drivers/net/plip.c | netif_rx(rcv->skb); |
skb | 567 | drivers/net/plip.c | rcv->skb = NULL; |
skb | 651 | drivers/net/plip.c | if (snd->skb == NULL || (lbuf = snd->skb->data) == NULL) { |
skb | 654 | drivers/net/plip.c | snd->skb = NULL; |
skb | 723 | drivers/net/plip.c | dev_kfree_skb(snd->skb, FREE_WRITE); |
skb | 730 | drivers/net/plip.c | snd->skb = NULL; |
skb | 839 | drivers/net/plip.c | struct sk_buff *skb) |
skb | 846 | drivers/net/plip.c | return nl->orig_rebuild_header(buff, dev, dst, skb); |
skb | 861 | drivers/net/plip.c | plip_tx_packet(struct sk_buff *skb, struct device *dev) |
skb | 872 | drivers/net/plip.c | if (skb == NULL) { |
skb | 882 | drivers/net/plip.c | if (skb->len > dev->mtu) { |
skb | 883 | drivers/net/plip.c | printk("%s: packet too big, %d.\n", dev->name, (int)skb->len); |
skb | 893 | drivers/net/plip.c | snd->skb = skb; |
skb | 894 | drivers/net/plip.c | snd->length.h = skb->len; |
skb | 940 | drivers/net/plip.c | nl->rcv_data.skb = nl->snd_data.skb = NULL; |
skb | 975 | drivers/net/plip.c | if (snd->skb) { |
skb | 976 | drivers/net/plip.c | dev_kfree_skb(snd->skb, FREE_WRITE); |
skb | 977 | drivers/net/plip.c | snd->skb = NULL; |
skb | 980 | drivers/net/plip.c | if (rcv->skb) { |
skb | 981 | drivers/net/plip.c | rcv->skb->free = 1; |
skb | 982 | drivers/net/plip.c | kfree_skb(rcv->skb, FREE_READ); |
skb | 983 | drivers/net/plip.c | rcv->skb = NULL; |
skb | 124 | drivers/net/ppp.c | static void ppp_add_arp(unsigned long addr, struct sk_buff *skb, |
skb | 1084 | drivers/net/ppp.c | struct sk_buff *skb; |
skb | 1153 | drivers/net/ppp.c | skb=dev_alloc_skb(count); |
skb | 1154 | drivers/net/ppp.c | if(skb) |
skb | 1156 | drivers/net/ppp.c | skb->mac.raw=skb->data; |
skb | 1157 | drivers/net/ppp.c | memcpy(skb_put(skb,count), c,count); |
skb | 1158 | drivers/net/ppp.c | skb->protocol=htons(ETH_P_IP); |
skb | 1159 | drivers/net/ppp.c | skb->dev=ppp->dev; |
skb | 1160 | drivers/net/ppp.c | netif_rx(skb); |
skb | 1724 | drivers/net/ppp.c | ppp_xmit(struct sk_buff *skb, struct device *dev) |
skb | 1733 | drivers/net/ppp.c | if (skb == NULL) { |
skb | 1741 | drivers/net/ppp.c | p = skb->data; |
skb | 1742 | drivers/net/ppp.c | len = skb->len; |
skb | 1746 | drivers/net/ppp.c | (unsigned long int) skb, ppp->sending)); |
skb | 1750 | drivers/net/ppp.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 1773 | drivers/net/ppp.c | len = ntohs( ((struct iphdr *)(skb->data)) -> tot_len ); |
skb | 1815 | drivers/net/ppp.c | struct iphdr *iph = (struct iphdr *)skb->data; |
skb | 1862 | drivers/net/ppp.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 1881 | drivers/net/ppp.c | ppp_add_arp(unsigned long addr, struct sk_buff *skb, struct device *dev) |
skb | 1888 | drivers/net/ppp.c | ppp_header(struct sk_buff *skb, struct device *dev, unsigned short type, |
skb | 1896 | drivers/net/ppp.c | struct sk_buff *skb) |
skb | 89 | drivers/net/seeq8005.c | static int seeq8005_send_packet(struct sk_buff *skb, struct device *dev); |
skb | 377 | drivers/net/seeq8005.c | seeq8005_send_packet(struct sk_buff *skb, struct device *dev) |
skb | 398 | drivers/net/seeq8005.c | if (skb == NULL) { |
skb | 408 | drivers/net/seeq8005.c | short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN; |
skb | 409 | drivers/net/seeq8005.c | unsigned char *buf = skb->data; |
skb | 414 | drivers/net/seeq8005.c | dev_kfree_skb (skb, FREE_WRITE); |
skb | 536 | drivers/net/seeq8005.c | struct sk_buff *skb; |
skb | 539 | drivers/net/seeq8005.c | skb = dev_alloc_skb(pkt_len); |
skb | 540 | drivers/net/seeq8005.c | if (skb == NULL) { |
skb | 545 | drivers/net/seeq8005.c | skb->dev = dev; |
skb | 546 | drivers/net/seeq8005.c | skb_reserve(skb, 2); /* align data on 16 byte */ |
skb | 547 | drivers/net/seeq8005.c | buf = skb_put(skb,pkt_len); |
skb | 560 | drivers/net/seeq8005.c | skb->protocol=eth_type_trans(skb,dev); |
skb | 561 | drivers/net/seeq8005.c | netif_rx(skb); |
skb | 489 | drivers/net/sk_g16.c | static int SK_send_packet(struct sk_buff *skb, struct device *dev); |
skb | 1193 | drivers/net/sk_g16.c | static int SK_send_packet(struct sk_buff *skb, struct device *dev) |
skb | 1224 | drivers/net/sk_g16.c | if (skb == NULL) |
skb | 1250 | drivers/net/sk_g16.c | short len = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN; |
skb | 1258 | drivers/net/sk_g16.c | memcpy((char *) (tmdp->u.buffer & 0x00ffffff), (char *)skb->data, |
skb | 1259 | drivers/net/sk_g16.c | skb->len); |
skb | 1290 | drivers/net/sk_g16.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 1571 | drivers/net/sk_g16.c | struct sk_buff *skb; |
skb | 1573 | drivers/net/sk_g16.c | skb = dev_alloc_skb(len+2); /* allocate socket buffer */ |
skb | 1575 | drivers/net/sk_g16.c | if (skb == NULL) /* Could not get mem ? */ |
skb | 1593 | drivers/net/sk_g16.c | skb->dev = dev; |
skb | 1594 | drivers/net/sk_g16.c | skb_reserve(skb,2); /* Align IP header on 16 byte boundary */ |
skb | 1603 | drivers/net/sk_g16.c | memcpy(skb_put(skb,len), (unsigned char *) (rmdp->u.buffer & 0x00ffffff), |
skb | 1614 | drivers/net/sk_g16.c | skb->protocol=eth_type_trans(skb,dev); |
skb | 1615 | drivers/net/sk_g16.c | netif_rx(skb); /* queue packet and mark it for processing */ |
skb | 110 | drivers/net/skeleton.c | static int net_send_packet(struct sk_buff *skb, struct device *dev); |
skb | 335 | drivers/net/skeleton.c | net_send_packet(struct sk_buff *skb, struct device *dev) |
skb | 357 | drivers/net/skeleton.c | if (skb == NULL) { |
skb | 367 | drivers/net/skeleton.c | short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN; |
skb | 368 | drivers/net/skeleton.c | unsigned char *buf = skb->data; |
skb | 373 | drivers/net/skeleton.c | dev_kfree_skb (skb, FREE_WRITE); |
skb | 444 | drivers/net/skeleton.c | struct sk_buff *skb; |
skb | 446 | drivers/net/skeleton.c | skb = dev_alloc_skb(pkt_len); |
skb | 447 | drivers/net/skeleton.c | if (skb == NULL) { |
skb | 453 | drivers/net/skeleton.c | skb->dev = dev; |
skb | 456 | drivers/net/skeleton.c | memcpy(skb_put(skb,pkt_len), (void*)dev->rmem_start, |
skb | 459 | drivers/net/skeleton.c | insw(ioaddr, skb->data, (pkt_len + 1) >> 1); |
skb | 461 | drivers/net/skeleton.c | netif_rx(skb); |
skb | 342 | drivers/net/slip.c | struct sk_buff *skb; |
skb | 379 | drivers/net/slip.c | skb = dev_alloc_skb(count); |
skb | 380 | drivers/net/slip.c | if (skb == NULL) { |
skb | 385 | drivers/net/slip.c | skb->dev = sl->dev; |
skb | 386 | drivers/net/slip.c | memcpy(skb_put(skb,count), sl->rbuff, count); |
skb | 387 | drivers/net/slip.c | skb->mac.raw=skb->data; |
skb | 389 | drivers/net/slip.c | skb->protocol=htons(ETH_P_AX25); |
skb | 391 | drivers/net/slip.c | skb->protocol=htons(ETH_P_IP); |
skb | 392 | drivers/net/slip.c | netif_rx(skb); |
skb | 481 | drivers/net/slip.c | sl_xmit(struct sk_buff *skb, struct device *dev) |
skb | 520 | drivers/net/slip.c | if (skb != NULL) { |
skb | 522 | drivers/net/slip.c | sl_encaps(sl, skb->data, skb->len); |
skb | 523 | drivers/net/slip.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 533 | drivers/net/slip.c | sl_header(struct sk_buff *skb, struct device *dev, unsigned short type, |
skb | 541 | drivers/net/slip.c | return ax25_encapsulate(skb, dev, type, daddr, saddr, len); |
skb | 552 | drivers/net/slip.c | struct sk_buff *skb) |
skb | 559 | drivers/net/slip.c | return ax25_rebuild_header(buff, dev, raddr, skb); |
skb | 161 | drivers/net/tulip.c | static int tulip_start_xmit(struct sk_buff *skb, struct device *dev); |
skb | 389 | drivers/net/tulip.c | tulip_start_xmit(struct sk_buff *skb, struct device *dev) |
skb | 419 | drivers/net/tulip.c | if (skb == NULL || skb->len <= 0) { |
skb | 441 | drivers/net/tulip.c | tp->tx_skbuff[entry] = skb; |
skb | 442 | drivers/net/tulip.c | tp->tx_ring[entry].length = skb->len | |
skb | 444 | drivers/net/tulip.c | tp->tx_ring[entry].buffer1 = skb->data; |
skb | 614 | drivers/net/tulip.c | struct sk_buff *skb; |
skb | 616 | drivers/net/tulip.c | skb = dev_alloc_skb(pkt_len+2); |
skb | 617 | drivers/net/tulip.c | if (skb == NULL) { |
skb | 632 | drivers/net/tulip.c | skb->dev = dev; |
skb | 633 | drivers/net/tulip.c | skb_reserve(skb,2); /* 16 byte align the data fields */ |
skb | 634 | drivers/net/tulip.c | memcpy(skb_put(skb,pkt_len), lp->rx_ring[entry].buffer1, pkt_len); |
skb | 635 | drivers/net/tulip.c | skb->protocol=eth_type_trans(skb,dev); |
skb | 636 | drivers/net/tulip.c | netif_rx(skb); |
skb | 47 | drivers/net/tunnel.c | static int tunnel_xmit(struct sk_buff *skb, struct device *dev); |
skb | 121 | drivers/net/tunnel.c | static int tunnel_xmit(struct sk_buff *skb, struct device *dev) |
skb | 132 | drivers/net/tunnel.c | if (skb == NULL || dev == NULL) |
skb | 157 | drivers/net/tunnel.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 162 | drivers/net/tunnel.c | iph=(struct iphdr *)skb->data; |
skb | 169 | drivers/net/tunnel.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 186 | drivers/net/tunnel.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 193 | drivers/net/tunnel.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 207 | drivers/net/tunnel.c | newlen = (skb->len + ip_header_len); |
skb | 211 | drivers/net/tunnel.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 221 | drivers/net/tunnel.c | memcpy(skb2->h.iph, skb->data, ip_header_len ); |
skb | 222 | drivers/net/tunnel.c | memcpy(skb2->data + ip_header_len, skb->data, skb->len); |
skb | 224 | drivers/net/tunnel.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 1392 | drivers/net/wavelan.c | wavelan_send_packet(struct sk_buff *skb, device *dev) |
skb | 1433 | drivers/net/wavelan.c | if (skb == (struct sk_buff *)0) |
skb | 1447 | drivers/net/wavelan.c | length = (ETH_ZLEN < skb->len) ? skb->len : ETH_ZLEN; |
skb | 1448 | drivers/net/wavelan.c | buf = skb->data; |
skb | 1455 | drivers/net/wavelan.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 1500 | drivers/net/wavelan.c | struct sk_buff *skb; |
skb | 1643 | drivers/net/wavelan.c | if ((skb = dev_alloc_skb(sksize)) == (struct sk_buff *)0) |
skb | 1650 | drivers/net/wavelan.c | skb->dev = dev; |
skb | 1652 | drivers/net/wavelan.c | obram_read(ioaddr, rbd.rbd_bufl, skb_put(skb,pkt_len), pkt_len); |
skb | 1668 | drivers/net/wavelan.c | c = skb->data[i]; |
skb | 1670 | drivers/net/wavelan.c | printk(" %c", skb->data[i]); |
skb | 1672 | drivers/net/wavelan.c | printk("%02x", skb->data[i]); |
skb | 1681 | drivers/net/wavelan.c | skb->protocol=eth_type_trans(skb,dev); |
skb | 1682 | drivers/net/wavelan.c | netif_rx(skb); |
skb | 184 | drivers/net/znet.c | static int znet_send_packet(struct sk_buff *skb, struct device *dev); |
skb | 317 | drivers/net/znet.c | static int znet_send_packet(struct sk_buff *skb, struct device *dev) |
skb | 343 | drivers/net/znet.c | if (skb == NULL) { |
skb | 360 | drivers/net/znet.c | short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN; |
skb | 361 | drivers/net/znet.c | unsigned char *buf = (void *)skb->data; |
skb | 386 | drivers/net/znet.c | memcpy(zn.tx_cur, buf, skb->len); |
skb | 400 | drivers/net/znet.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 549 | drivers/net/znet.c | struct sk_buff *skb; |
skb | 551 | drivers/net/znet.c | skb = dev_alloc_skb(pkt_len); |
skb | 552 | drivers/net/znet.c | if (skb == NULL) { |
skb | 558 | drivers/net/znet.c | skb->dev = dev; |
skb | 562 | drivers/net/znet.c | memcpy(skb_put(skb,semi_cnt), zn.rx_cur, semi_cnt); |
skb | 563 | drivers/net/znet.c | memcpy(skb_put(skb,pkt_len-semi_cnt), zn.rx_start, |
skb | 566 | drivers/net/znet.c | memcpy(skb_put(skb,pkt_len), zn.rx_cur, pkt_len); |
skb | 568 | drivers/net/znet.c | unsigned int *packet = (unsigned int *) skb->data; |
skb | 573 | drivers/net/znet.c | skb->protocol=eth_type_trans(skb,dev); |
skb | 574 | drivers/net/znet.c | netif_rx(skb); |
skb | 141 | include/linux/atalk.h | extern int aarp_send_ddp(struct device *dev,struct sk_buff *skb, struct at_addr *sa, void *hwaddr); |
skb | 31 | include/linux/etherdevice.h | extern int eth_header(struct sk_buff *skb, struct device *dev, |
skb | 35 | include/linux/etherdevice.h | unsigned long dst, struct sk_buff *skb); |
skb | 36 | include/linux/etherdevice.h | extern unsigned short eth_type_trans(struct sk_buff *skb, struct device *dev); |
skb | 144 | include/linux/netdevice.h | int (*hard_start_xmit) (struct sk_buff *skb, |
skb | 146 | include/linux/netdevice.h | int (*hard_header) (struct sk_buff *skb, |
skb | 153 | include/linux/netdevice.h | unsigned long raddr, struct sk_buff *skb); |
skb | 202 | include/linux/netdevice.h | extern void dev_queue_xmit(struct sk_buff *skb, struct device *dev, |
skb | 205 | include/linux/netdevice.h | extern void netif_rx(struct sk_buff *skb); |
skb | 117 | include/linux/skbuff.h | extern void kfree_skb(struct sk_buff *skb, int rw); |
skb | 128 | include/linux/skbuff.h | extern void kfree_skbmem(struct sk_buff *skb); |
skb | 129 | include/linux/skbuff.h | extern struct sk_buff * skb_clone(struct sk_buff *skb, int priority); |
skb | 130 | include/linux/skbuff.h | extern void skb_device_lock(struct sk_buff *skb); |
skb | 131 | include/linux/skbuff.h | extern void skb_device_unlock(struct sk_buff *skb); |
skb | 132 | include/linux/skbuff.h | extern void dev_kfree_skb(struct sk_buff *skb, int mode); |
skb | 133 | include/linux/skbuff.h | extern int skb_device_locked(struct sk_buff *skb); |
skb | 134 | include/linux/skbuff.h | extern unsigned char * skb_put(struct sk_buff *skb, int len); |
skb | 135 | include/linux/skbuff.h | extern unsigned char * skb_push(struct sk_buff *skb, int len); |
skb | 136 | include/linux/skbuff.h | extern unsigned char * skb_pull(struct sk_buff *skb, int len); |
skb | 137 | include/linux/skbuff.h | extern int skb_headroom(struct sk_buff *skb); |
skb | 138 | include/linux/skbuff.h | extern int skb_tailroom(struct sk_buff *skb); |
skb | 139 | include/linux/skbuff.h | extern void skb_reserve(struct sk_buff *skb, int len); |
skb | 140 | include/linux/skbuff.h | extern void skb_trim(struct sk_buff *skb, int len); |
skb | 155 | include/linux/skbuff.h | extern int skb_check(struct sk_buff *skb,int,int, char *); |
skb | 156 | include/linux/skbuff.h | #define IS_SKB(skb) skb_check((skb), 0, __LINE__,__FILE__) |
skb | 157 | include/linux/skbuff.h | #define IS_SKB_HEAD(skb) skb_check((skb), 1, __LINE__,__FILE__) |
skb | 159 | include/linux/skbuff.h | #define IS_SKB(skb) |
skb | 160 | include/linux/skbuff.h | #define IS_SKB_HEAD(skb) |
skb | 287 | include/linux/skbuff.h | extern __inline__ void skb_unlink(struct sk_buff *skb) |
skb | 294 | include/linux/skbuff.h | if(skb->prev && skb->next) |
skb | 296 | include/linux/skbuff.h | skb->next->prev = skb->prev; |
skb | 297 | include/linux/skbuff.h | skb->prev->next = skb->next; |
skb | 298 | include/linux/skbuff.h | skb->next = NULL; |
skb | 299 | include/linux/skbuff.h | skb->prev = NULL; |
skb | 308 | include/linux/skbuff.h | extern __inline__ unsigned char *skb_put(struct sk_buff *skb, int len) |
skb | 310 | include/linux/skbuff.h | unsigned char *tmp=skb->tail; |
skb | 311 | include/linux/skbuff.h | skb->tail+=len; |
skb | 312 | include/linux/skbuff.h | skb->len+=len; |
skb | 313 | include/linux/skbuff.h | if(skb->tail>skb->end) |
skb | 318 | include/linux/skbuff.h | extern __inline__ unsigned char *skb_push(struct sk_buff *skb, int len) |
skb | 320 | include/linux/skbuff.h | skb->data-=len; |
skb | 321 | include/linux/skbuff.h | skb->len+=len; |
skb | 322 | include/linux/skbuff.h | if(skb->data<skb->head) |
skb | 324 | include/linux/skbuff.h | return skb->data; |
skb | 327 | include/linux/skbuff.h | extern __inline__ unsigned char * skb_pull(struct sk_buff *skb, int len) |
skb | 329 | include/linux/skbuff.h | if(len > skb->len) |
skb | 331 | include/linux/skbuff.h | skb->data+=len; |
skb | 332 | include/linux/skbuff.h | skb->len-=len; |
skb | 333 | include/linux/skbuff.h | return skb->data; |
skb | 336 | include/linux/skbuff.h | extern __inline__ int skb_headroom(struct sk_buff *skb) |
skb | 338 | include/linux/skbuff.h | return skb->data-skb->head; |
skb | 341 | include/linux/skbuff.h | extern __inline__ int skb_tailroom(struct sk_buff *skb) |
skb | 343 | include/linux/skbuff.h | return skb->end-skb->tail; |
skb | 346 | include/linux/skbuff.h | extern __inline__ void skb_reserve(struct sk_buff *skb, int len) |
skb | 348 | include/linux/skbuff.h | skb->data+=len; |
skb | 349 | include/linux/skbuff.h | skb->tail+=len; |
skb | 352 | include/linux/skbuff.h | extern __inline__ void skb_trim(struct sk_buff *skb, int len) |
skb | 354 | include/linux/skbuff.h | if(skb->len>len) |
skb | 356 | include/linux/skbuff.h | skb->len=len; |
skb | 357 | include/linux/skbuff.h | skb->tail=skb->data+len; |
skb | 367 | include/linux/skbuff.h | extern void skb_free_datagram(struct sk_buff *skb); |
skb | 31 | include/linux/trdevice.h | extern int tr_header(struct sk_buff *skb, struct device *dev, |
skb | 35 | include/linux/trdevice.h | unsigned long raddr, struct sk_buff *skb); |
skb | 36 | include/linux/trdevice.h | extern unsigned short tr_type_trans(struct sk_buff *skb, struct device *dev); |
skb | 8 | include/net/arp.h | extern int arp_rcv(struct sk_buff *skb, struct device *dev, |
skb | 12 | include/net/arp.h | struct device *dev, u32 saddr, struct sk_buff *skb); |
skb | 55 | include/net/ip.h | struct sk_buff *skb; /* complete received fragment */ |
skb | 82 | include/net/ip.h | extern int ip_build_header(struct sk_buff *skb, |
skb | 89 | include/net/ip.h | extern int ip_rcv(struct sk_buff *skb, struct device *dev, |
skb | 91 | include/net/ip.h | extern int ip_forward(struct sk_buff *skb, struct device *dev, int is_frag, unsigned long target_addr, int target_strict); |
skb | 95 | include/net/ip.h | struct device *dev, struct sk_buff *skb, |
skb | 1 | include/net/ipip.h | extern int ipip_rcv(struct sk_buff *skb, struct device *dev, struct options *opt, |
skb | 48 | include/net/ipx.h | extern int ipx_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *pt); |
skb | 31 | include/net/protocol.h | int (*handler)(struct sk_buff *skb, struct device *dev, |
skb | 35 | include/net/protocol.h | int (*frag_handler)(struct sk_buff *skb, struct device *dev, |
skb | 244 | include/net/sock.h | void (*wfree)(struct sock *sk, struct sk_buff *skb); |
skb | 245 | include/net/sock.h | void (*rfree)(struct sock *sk, struct sk_buff *skb); |
skb | 261 | include/net/sock.h | int (*build_header)(struct sk_buff *skb, |
skb | 271 | include/net/sock.h | struct device *dev, struct sk_buff *skb, |
skb | 339 | include/net/sock.h | struct sk_buff *skb); |
skb | 341 | include/net/sock.h | struct sk_buff *skb); |
skb | 352 | include/net/sock.h | extern struct sk_buff *sock_alloc_send_skb(struct sock *skb, |
skb | 366 | include/net/sock.h | extern __inline__ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) |
skb | 369 | include/net/sock.h | if(sk->rmem_alloc + skb->truesize >= sk->rcvbuf) |
skb | 373 | include/net/sock.h | sk->rmem_alloc+=skb->truesize; |
skb | 374 | include/net/sock.h | skb->sk=sk; |
skb | 376 | include/net/sock.h | skb_queue_tail(&sk->receive_queue,skb); |
skb | 378 | include/net/sock.h | sk->data_ready(sk,skb->len); |
skb | 129 | include/net/tcp.h | extern int tcp_rcv(struct sk_buff *skb, struct device *dev, |
skb | 45 | include/net/udp.h | extern int udp_rcv(struct sk_buff *skb, struct device *dev, |
skb | 6 | net/802/llc.c | int llc_rx_adm(struct sock *sk,struct sk_buff *skb, int type, int cmd, int pf, int nr, int ns) |
skb | 36 | net/802/llc.c | int llc_rx_setup(struct sock *sk, struct sk_buff *skb, int type, int cmd, int pf, int nr, int ns) |
skb | 70 | net/802/llc.c | int llc_rx_reset(struct sock *sk, struct sk_buff *skb, int type, int cmd, int pf, int nr, int ns) |
skb | 114 | net/802/llc.c | int llc_rx_d_conn(struct sock *sk, struct sk_buff *skb, int type, int cmd, int pf, int nr, int ns) |
skb | 150 | net/802/llc.c | int llc_rx_error(struct sock *sk, struct sk_buff *skb, int type, int cmd, int pf, int nr, int ns) |
skb | 199 | net/802/llc.c | int llc_rx_nr_shared(struct sock *sk, struct sk_buff *skb, int type, int cmd, int pf, int nr, int ns) |
skb | 288 | net/802/llc.c | int llc_rx_normal(struct sock *sk, struct sk_buff *skb, int type, int cmd, int pf, int nr, int ns) |
skb | 290 | net/802/llc.c | if(llc_rx_nr_shared(sk, skb, type, cmd, pf, nr, ns)) |
skb | 364 | net/802/llc.c | llc_queue_data(sk,skb); |
skb | 30 | net/802/p8022.c | p8022_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *pt) |
skb | 34 | net/802/p8022.c | proto = find_8022_client(*(skb->h.raw)); |
skb | 36 | net/802/p8022.c | skb->h.raw += 3; |
skb | 37 | net/802/p8022.c | skb_pull(skb,3); |
skb | 38 | net/802/p8022.c | return proto->rcvfunc(skb, dev, pt); |
skb | 41 | net/802/p8022.c | skb->sk = NULL; |
skb | 42 | net/802/p8022.c | kfree_skb(skb, FREE_READ); |
skb | 48 | net/802/p8022.c | struct sk_buff *skb, unsigned char *dest_node) |
skb | 50 | net/802/p8022.c | struct device *dev = skb->dev; |
skb | 53 | net/802/p8022.c | rawp = skb_push(skb,3); |
skb | 57 | net/802/p8022.c | dev->hard_header(skb, dev, ETH_P_802_3, dest_node, NULL, skb->len); |
skb | 9 | net/802/p8023.c | struct sk_buff *skb, unsigned char *dest_node) |
skb | 11 | net/802/p8023.c | struct device *dev = skb->dev; |
skb | 13 | net/802/p8023.c | dev->hard_header(skb, dev, ETH_P_802_3, dest_node, NULL, skb->len); |
skb | 40 | net/802/psnap.c | int snap_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *pt) |
skb | 53 | net/802/psnap.c | proto = find_snap_client(skb->h.raw); |
skb | 60 | net/802/psnap.c | skb->h.raw += 5; |
skb | 61 | net/802/psnap.c | skb_pull(skb,5); |
skb | 64 | net/802/psnap.c | return proto->rcvfunc(skb, dev, &psnap_packet_type); |
skb | 66 | net/802/psnap.c | skb->sk = NULL; |
skb | 67 | net/802/psnap.c | kfree_skb(skb, FREE_READ); |
skb | 75 | net/802/psnap.c | static void snap_datalink_header(struct datalink_proto *dl, struct sk_buff *skb, unsigned char *dest_node) |
skb | 77 | net/802/psnap.c | memcpy(skb_push(skb,5),dl->type,5); |
skb | 78 | net/802/psnap.c | snap_dl->datalink_header(snap_dl, skb, dest_node); |
skb | 41 | net/802/tr.c | int tr_header(struct sk_buff *skb, struct device *dev, unsigned short type, |
skb | 45 | net/802/tr.c | struct trh_hdr *trh=(struct trh_hdr *)skb_push(skb,dev->hard_header_len); |
skb | 72 | net/802/tr.c | struct sk_buff *skb) { |
skb | 82 | net/802/tr.c | if(arp_find(trh->daddr, dest, dev, dev->pa_addr, skb)) { |
skb | 91 | net/802/tr.c | unsigned short tr_type_trans(struct sk_buff *skb, struct device *dev) { |
skb | 93 | net/802/tr.c | struct trh_hdr *trh=(struct trh_hdr *)skb->data; |
skb | 94 | net/802/tr.c | struct trllc *trllc=(struct trllc *)(skb->data+sizeof(struct trh_hdr)); |
skb | 96 | net/802/tr.c | skb->mac.raw = skb->data; |
skb | 98 | net/802/tr.c | skb_pull(skb,dev->hard_header_len); |
skb | 106 | net/802/tr.c | skb->pkt_type=PACKET_BROADCAST; |
skb | 108 | net/802/tr.c | skb->pkt_type=PACKET_MULTICAST; |
skb | 114 | net/802/tr.c | skb->pkt_type=PACKET_OTHERHOST; |
skb | 90 | net/appletalk/aarp.c | struct sk_buff *skb; |
skb | 92 | net/appletalk/aarp.c | while((skb=skb_dequeue(&a->packet_queue))!=NULL) |
skb | 93 | net/appletalk/aarp.c | kfree_skb(skb, FREE_WRITE); |
skb | 106 | net/appletalk/aarp.c | struct sk_buff *skb=alloc_skb(len, GFP_ATOMIC); |
skb | 110 | net/appletalk/aarp.c | if(skb==NULL || sat==NULL) |
skb | 117 | net/appletalk/aarp.c | skb_reserve(skb,dev->hard_header_len+aarp_dl->header_length); |
skb | 118 | net/appletalk/aarp.c | eah = (struct elapaarp *)skb_put(skb,sizeof(struct elapaarp)); |
skb | 119 | net/appletalk/aarp.c | skb->arp = 1; |
skb | 120 | net/appletalk/aarp.c | skb->free = 1; |
skb | 121 | net/appletalk/aarp.c | skb->dev = a->dev; |
skb | 149 | net/appletalk/aarp.c | aarp_dl->datalink_header(aarp_dl, skb, aarp_eth_multicast); |
skb | 156 | net/appletalk/aarp.c | dev_queue_xmit(skb, dev, SOPRI_NORMAL); |
skb | 168 | net/appletalk/aarp.c | struct sk_buff *skb=alloc_skb(len, GFP_ATOMIC); |
skb | 171 | net/appletalk/aarp.c | if(skb==NULL) |
skb | 178 | net/appletalk/aarp.c | skb_reserve(skb,dev->hard_header_len+aarp_dl->header_length); |
skb | 179 | net/appletalk/aarp.c | eah = (struct elapaarp *)skb_put(skb,sizeof(struct elapaarp)); |
skb | 180 | net/appletalk/aarp.c | skb->arp = 1; |
skb | 181 | net/appletalk/aarp.c | skb->free = 1; |
skb | 182 | net/appletalk/aarp.c | skb->dev = dev; |
skb | 213 | net/appletalk/aarp.c | aarp_dl->datalink_header(aarp_dl, skb, sha); |
skb | 219 | net/appletalk/aarp.c | dev_queue_xmit(skb, dev, SOPRI_NORMAL); |
skb | 230 | net/appletalk/aarp.c | struct sk_buff *skb=alloc_skb(len, GFP_ATOMIC); |
skb | 234 | net/appletalk/aarp.c | if(skb==NULL) |
skb | 241 | net/appletalk/aarp.c | skb_reserve(skb,dev->hard_header_len+aarp_dl->header_length); |
skb | 242 | net/appletalk/aarp.c | eah = (struct elapaarp *)skb_put(skb,sizeof(struct elapaarp)); |
skb | 244 | net/appletalk/aarp.c | skb->arp = 1; |
skb | 245 | net/appletalk/aarp.c | skb->free = 1; |
skb | 246 | net/appletalk/aarp.c | skb->dev = dev; |
skb | 274 | net/appletalk/aarp.c | aarp_dl->datalink_header(aarp_dl, skb, aarp_eth_multicast); |
skb | 280 | net/appletalk/aarp.c | dev_queue_xmit(skb, dev, SOPRI_NORMAL); |
skb | 428 | net/appletalk/aarp.c | int aarp_send_ddp(struct device *dev,struct sk_buff *skb, struct at_addr *sa, void *hwaddr) |
skb | 443 | net/appletalk/aarp.c | skb->dev = dev; |
skb | 455 | net/appletalk/aarp.c | ddp_dl->datalink_header(ddp_dl, skb, ddp_eth_multicast); |
skb | 456 | net/appletalk/aarp.c | if(skb->sk==NULL) |
skb | 457 | net/appletalk/aarp.c | dev_queue_xmit(skb, skb->dev, SOPRI_NORMAL); |
skb | 459 | net/appletalk/aarp.c | dev_queue_xmit(skb, skb->dev, skb->sk->priority); |
skb | 470 | net/appletalk/aarp.c | ddp_dl->datalink_header(ddp_dl, skb, a->hwaddr); |
skb | 471 | net/appletalk/aarp.c | if(skb->sk==NULL) |
skb | 472 | net/appletalk/aarp.c | dev_queue_xmit(skb, skb->dev, SOPRI_NORMAL); |
skb | 474 | net/appletalk/aarp.c | dev_queue_xmit(skb, skb->dev, skb->sk->priority); |
skb | 487 | net/appletalk/aarp.c | skb_queue_tail(&a->packet_queue, skb); |
skb | 507 | net/appletalk/aarp.c | skb_queue_tail(&a->packet_queue, skb); |
skb | 538 | net/appletalk/aarp.c | struct sk_buff *skb; |
skb | 549 | net/appletalk/aarp.c | while((skb=skb_dequeue(&a->packet_queue))!=NULL) |
skb | 552 | net/appletalk/aarp.c | ddp_dl->datalink_header(ddp_dl,skb,a->hwaddr); |
skb | 553 | net/appletalk/aarp.c | if(skb->sk==NULL) |
skb | 554 | net/appletalk/aarp.c | dev_queue_xmit(skb, skb->dev, SOPRI_NORMAL); |
skb | 556 | net/appletalk/aarp.c | dev_queue_xmit(skb, skb->dev, skb->sk->priority); |
skb | 564 | net/appletalk/aarp.c | static int aarp_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *pt) |
skb | 566 | net/appletalk/aarp.c | struct elapaarp *ea=(struct elapaarp *)skb->h.raw; |
skb | 580 | net/appletalk/aarp.c | kfree_skb(skb, FREE_READ); |
skb | 588 | net/appletalk/aarp.c | if(!skb_pull(skb,sizeof(*ea))) |
skb | 590 | net/appletalk/aarp.c | kfree_skb(skb, FREE_READ); |
skb | 603 | net/appletalk/aarp.c | kfree_skb(skb, FREE_READ); |
skb | 634 | net/appletalk/aarp.c | kfree_skb(skb, FREE_READ); |
skb | 646 | net/appletalk/aarp.c | kfree_skb(skb, FREE_READ); |
skb | 703 | net/appletalk/aarp.c | kfree_skb(skb, FREE_READ); |
skb | 193 | net/appletalk/ddp.c | struct sk_buff *skb; |
skb | 196 | net/appletalk/ddp.c | while((skb=skb_dequeue(&sk->receive_queue))!=NULL) |
skb | 198 | net/appletalk/ddp.c | kfree_skb(skb,FREE_READ); |
skb | 1339 | net/appletalk/ddp.c | int atalk_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *pt) |
skb | 1342 | net/appletalk/ddp.c | struct ddpehdr *ddp=(void *)skb->h.raw; |
skb | 1348 | net/appletalk/ddp.c | if(skb->len<sizeof(*ddp)) |
skb | 1350 | net/appletalk/ddp.c | kfree_skb(skb,FREE_READ); |
skb | 1367 | net/appletalk/ddp.c | origlen = skb->len; |
skb | 1369 | net/appletalk/ddp.c | skb_trim(skb,min(skb->len,ddp->deh_len)); |
skb | 1377 | net/appletalk/ddp.c | if(skb->len<sizeof(*ddp)) |
skb | 1379 | net/appletalk/ddp.c | kfree_skb(skb,FREE_READ); |
skb | 1391 | net/appletalk/ddp.c | kfree_skb(skb,FREE_READ); |
skb | 1410 | net/appletalk/ddp.c | if (skb->pkt_type != PACKET_HOST || ddp->deh_dnet == 0) |
skb | 1412 | net/appletalk/ddp.c | kfree_skb(skb, FREE_READ); |
skb | 1423 | net/appletalk/ddp.c | kfree_skb(skb, FREE_READ); |
skb | 1429 | net/appletalk/ddp.c | skb_trim(skb,min(origlen, rt->dev->hard_header_len + |
skb | 1436 | net/appletalk/ddp.c | if(aarp_send_ddp(rt->dev, skb, &ta, NULL)==-1) |
skb | 1437 | net/appletalk/ddp.c | kfree_skb(skb, FREE_READ); |
skb | 1451 | net/appletalk/ddp.c | kfree_skb(skb,FREE_READ); |
skb | 1460 | net/appletalk/ddp.c | skb->sk = sock; |
skb | 1462 | net/appletalk/ddp.c | if(sock_queue_rcv_skb(sock,skb)<0) |
skb | 1464 | net/appletalk/ddp.c | skb->sk=NULL; |
skb | 1465 | net/appletalk/ddp.c | kfree_skb(skb, FREE_WRITE); |
skb | 1475 | net/appletalk/ddp.c | struct sk_buff *skb; |
skb | 1548 | net/appletalk/ddp.c | skb = sock_alloc_send_skb(sk, size, 0 , &err); |
skb | 1549 | net/appletalk/ddp.c | if(skb==NULL) |
skb | 1552 | net/appletalk/ddp.c | skb->sk=sk; |
skb | 1553 | net/appletalk/ddp.c | skb->free=1; |
skb | 1554 | net/appletalk/ddp.c | skb->arp=1; |
skb | 1555 | net/appletalk/ddp.c | skb_reserve(skb,ddp_dl->header_length); |
skb | 1556 | net/appletalk/ddp.c | skb_reserve(skb,dev->hard_header_len); |
skb | 1558 | net/appletalk/ddp.c | skb->dev=dev; |
skb | 1563 | net/appletalk/ddp.c | ddp=(struct ddpehdr *)skb_put(skb,sizeof(struct ddpehdr)); |
skb | 1584 | net/appletalk/ddp.c | memcpy_fromiovec(skb_put(skb,len),msg->msg_iov,len); |
skb | 1600 | net/appletalk/ddp.c | struct sk_buff *skb2=skb_clone(skb, GFP_KERNEL); |
skb | 1618 | net/appletalk/ddp.c | sk->wmem_alloc-=skb->truesize; |
skb | 1619 | net/appletalk/ddp.c | ddp_dl->datalink_header(ddp_dl, skb, dev->dev_addr); |
skb | 1620 | net/appletalk/ddp.c | skb->sk = NULL; |
skb | 1621 | net/appletalk/ddp.c | skb->mac.raw=skb->data; |
skb | 1622 | net/appletalk/ddp.c | skb->h.raw = skb->data + ddp_dl->header_length + dev->hard_header_len; |
skb | 1623 | net/appletalk/ddp.c | skb_pull(skb,dev->hard_header_len); |
skb | 1624 | net/appletalk/ddp.c | skb_pull(skb,ddp_dl->header_length); |
skb | 1625 | net/appletalk/ddp.c | atalk_rcv(skb,dev,NULL); |
skb | 1637 | net/appletalk/ddp.c | if(aarp_send_ddp(dev,skb,&usat->sat_addr, NULL)==-1) |
skb | 1638 | net/appletalk/ddp.c | kfree_skb(skb, FREE_WRITE); |
skb | 1674 | net/appletalk/ddp.c | struct sk_buff *skb; |
skb | 1687 | net/appletalk/ddp.c | skb=skb_recv_datagram(sk,flags,noblock,&er); |
skb | 1688 | net/appletalk/ddp.c | if(skb==NULL) |
skb | 1691 | net/appletalk/ddp.c | ddp = (struct ddpehdr *)(skb->h.raw); |
skb | 1697 | net/appletalk/ddp.c | skb_copy_datagram_iovec(skb,0,msg->msg_iov,copied); |
skb | 1704 | net/appletalk/ddp.c | skb_copy_datagram_iovec(skb,sizeof(*ddp),msg->msg_iov,copied); |
skb | 1713 | net/appletalk/ddp.c | skb_free_datagram(skb); |
skb | 1791 | net/appletalk/ddp.c | struct sk_buff *skb; |
skb | 1793 | net/appletalk/ddp.c | if((skb=skb_peek(&sk->receive_queue))!=NULL) |
skb | 1794 | net/appletalk/ddp.c | v=skb->len-sizeof(struct ddpehdr); |
skb | 355 | net/ax25/af_ax25.c | static void ax25_send_to_raw(struct sock *sk, struct sk_buff *skb, int proto) |
skb | 361 | net/ax25/af_ax25.c | if ((copy = skb_clone(skb, GFP_ATOMIC)) == NULL) |
skb | 368 | net/ax25/af_ax25.c | sk->data_ready(sk, skb->len); |
skb | 396 | net/ax25/af_ax25.c | struct sk_buff *skb; |
skb | 408 | net/ax25/af_ax25.c | while ((skb = skb_dequeue(&ax25->sk->receive_queue)) != NULL) { |
skb | 409 | net/ax25/af_ax25.c | if (skb->sk != ax25->sk) { /* A pending connection */ |
skb | 410 | net/ax25/af_ax25.c | skb->sk->dead = 1; /* Queue the unaccepted socket for death */ |
skb | 411 | net/ax25/af_ax25.c | ax25_set_timer(skb->sk->ax25); |
skb | 412 | net/ax25/af_ax25.c | skb->sk->ax25->state = AX25_STATE_0; |
skb | 415 | net/ax25/af_ax25.c | kfree_skb(skb, FREE_READ); |
skb | 588 | net/ax25/af_ax25.c | int ax25_send_frame(struct sk_buff *skb, ax25_address *src, ax25_address *dest, |
skb | 593 | net/ax25/af_ax25.c | if (skb == NULL) |
skb | 604 | net/ax25/af_ax25.c | ax25_output(ax25, skb); |
skb | 632 | net/ax25/af_ax25.c | ax25_output(ax25, skb); |
skb | 1255 | net/ax25/af_ax25.c | struct sk_buff *skb; |
skb | 1274 | net/ax25/af_ax25.c | if ((skb = skb_dequeue(&sk->receive_queue)) == NULL) { |
skb | 1285 | net/ax25/af_ax25.c | } while (skb == NULL); |
skb | 1287 | net/ax25/af_ax25.c | newsk = skb->sk; |
skb | 1292 | net/ax25/af_ax25.c | skb->sk = NULL; |
skb | 1293 | net/ax25/af_ax25.c | kfree_skb(skb, FREE_READ); |
skb | 1335 | net/ax25/af_ax25.c | static int ax25_rcv(struct sk_buff *skb, struct device *dev, ax25_address *dev_addr, struct packet_type *ptype) |
skb | 1349 | net/ax25/af_ax25.c | skb->h.raw = skb->data; |
skb | 1354 | net/ax25/af_ax25.c | if (ax25_parse_addr(skb->data, skb->len, &src, &dest, &dp, &type) == NULL) { |
skb | 1355 | net/ax25/af_ax25.c | kfree_skb(skb, FREE_READ); |
skb | 1383 | net/ax25/af_ax25.c | build_ax25_addr(skb->data, &src, &dest, &dp, type, MODULUS); |
skb | 1384 | net/ax25/af_ax25.c | skb->arp = 1; |
skb | 1386 | net/ax25/af_ax25.c | ax25_queue_xmit(skb, dev, SOPRI_NORMAL); |
skb | 1388 | net/ax25/af_ax25.c | kfree_skb(skb, FREE_READ); |
skb | 1391 | net/ax25/af_ax25.c | kfree_skb(skb, FREE_READ); |
skb | 1400 | net/ax25/af_ax25.c | skb_pull(skb, size_ax25_addr(&dp)); |
skb | 1412 | net/ax25/af_ax25.c | if ((*skb->data & ~0x10) == LAPB_UI) { /* UI frame - bypass LAPB processing */ |
skb | 1413 | net/ax25/af_ax25.c | skb->h.raw = skb->data + 2; /* skip control and pid */ |
skb | 1416 | net/ax25/af_ax25.c | ax25_send_to_raw(raw, skb, skb->data[1]); |
skb | 1419 | net/ax25/af_ax25.c | kfree_skb(skb, FREE_READ); |
skb | 1424 | net/ax25/af_ax25.c | switch (skb->data[1]) { |
skb | 1427 | net/ax25/af_ax25.c | skb_pull(skb,2); /* drop PID/CTRL */ |
skb | 1429 | net/ax25/af_ax25.c | ip_rcv(skb, dev, ptype); /* Note ptype here is the wrong one, fix me later */ |
skb | 1433 | net/ax25/af_ax25.c | skb_pull(skb,2); |
skb | 1434 | net/ax25/af_ax25.c | arp_rcv(skb, dev, ptype); /* Note ptype here is wrong... */ |
skb | 1441 | net/ax25/af_ax25.c | kfree_skb(skb, FREE_READ); |
skb | 1446 | net/ax25/af_ax25.c | skb_pull(skb, 2); |
skb | 1447 | net/ax25/af_ax25.c | skb_queue_tail(&sk->receive_queue, skb); |
skb | 1448 | net/ax25/af_ax25.c | skb->sk = sk; |
skb | 1449 | net/ax25/af_ax25.c | sk->rmem_alloc += skb->truesize; |
skb | 1451 | net/ax25/af_ax25.c | sk->data_ready(sk, skb->len); |
skb | 1454 | net/ax25/af_ax25.c | kfree_skb(skb, FREE_READ); |
skb | 1459 | net/ax25/af_ax25.c | kfree_skb(skb, FREE_READ); /* Will scan SOCK_AX25 RAW sockets */ |
skb | 1472 | net/ax25/af_ax25.c | kfree_skb(skb, FREE_READ); |
skb | 1483 | net/ax25/af_ax25.c | if (ax25_process_rx_frame(ax25, skb, type) == 0) |
skb | 1484 | net/ax25/af_ax25.c | kfree_skb(skb, FREE_READ); |
skb | 1489 | net/ax25/af_ax25.c | if ((*skb->data & ~PF) != SABM && (*skb->data & ~PF) != SABME) { |
skb | 1494 | net/ax25/af_ax25.c | if ((*skb->data & ~PF) != DM && mine) |
skb | 1497 | net/ax25/af_ax25.c | kfree_skb(skb, FREE_READ); |
skb | 1506 | net/ax25/af_ax25.c | kfree_skb(skb, FREE_READ); |
skb | 1512 | net/ax25/af_ax25.c | skb_queue_head(&sk->receive_queue, skb); |
skb | 1514 | net/ax25/af_ax25.c | skb->sk = make; |
skb | 1522 | net/ax25/af_ax25.c | kfree_skb(skb, FREE_READ); |
skb | 1528 | net/ax25/af_ax25.c | kfree_skb(skb, FREE_READ); |
skb | 1537 | net/ax25/af_ax25.c | kfree_skb(skb, FREE_READ); |
skb | 1549 | net/ax25/af_ax25.c | kfree_skb(skb, FREE_READ); |
skb | 1564 | net/ax25/af_ax25.c | if ((*skb->data & ~PF) == SABME) { |
skb | 1585 | net/ax25/af_ax25.c | sk->data_ready(sk, skb->len ); |
skb | 1587 | net/ax25/af_ax25.c | kfree_skb(skb, FREE_READ); |
skb | 1596 | net/ax25/af_ax25.c | static int kiss_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *ptype) |
skb | 1598 | net/ax25/af_ax25.c | skb->sk = NULL; /* Initially we don't know who its for */ |
skb | 1600 | net/ax25/af_ax25.c | if ((*skb->data & 0x0F) != 0) { |
skb | 1601 | net/ax25/af_ax25.c | kfree_skb(skb, FREE_READ); /* Not a KISS data frame */ |
skb | 1605 | net/ax25/af_ax25.c | skb_pull(skb, AX25_KISS_HEADER_LEN); /* Remove the KISS byte */ |
skb | 1607 | net/ax25/af_ax25.c | return ax25_rcv(skb, dev, (ax25_address *)dev->dev_addr, ptype); |
skb | 1613 | net/ax25/af_ax25.c | static int bpq_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *ptype) |
skb | 1618 | net/ax25/af_ax25.c | skb->sk = NULL; /* Initially we don't know who its for */ |
skb | 1621 | net/ax25/af_ax25.c | kfree_skb(skb, FREE_READ); /* We have no port callsign */ |
skb | 1625 | net/ax25/af_ax25.c | len = skb->data[0] + skb->data[1] * 256 - 5; |
skb | 1627 | net/ax25/af_ax25.c | skb_pull(skb, 2); /* Remove the length bytes */ |
skb | 1628 | net/ax25/af_ax25.c | skb_trim(skb, len); /* Set the length of the data */ |
skb | 1630 | net/ax25/af_ax25.c | return ax25_rcv(skb, dev, &port_call, ptype); |
skb | 1640 | net/ax25/af_ax25.c | struct sk_buff *skb; |
skb | 1723 | net/ax25/af_ax25.c | if ((skb = sock_alloc_send_skb(sk, size, 0, &err)) == NULL) |
skb | 1726 | net/ax25/af_ax25.c | skb->sk = sk; |
skb | 1727 | net/ax25/af_ax25.c | skb->free = 1; |
skb | 1728 | net/ax25/af_ax25.c | skb->arp = 1; |
skb | 1730 | net/ax25/af_ax25.c | skb_reserve(skb, size - len); |
skb | 1736 | net/ax25/af_ax25.c | memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len); |
skb | 1739 | net/ax25/af_ax25.c | asmptr = skb_push(skb, 1); |
skb | 1748 | net/ax25/af_ax25.c | kfree_skb(skb, FREE_WRITE); |
skb | 1752 | net/ax25/af_ax25.c | ax25_output(sk->ax25, skb); /* Shove it onto the queue and kick */ |
skb | 1756 | net/ax25/af_ax25.c | asmptr = skb_push(skb, 1 + size_ax25_addr(dp)); |
skb | 1770 | net/ax25/af_ax25.c | skb->h.raw = asmptr; |
skb | 1773 | net/ax25/af_ax25.c | printk("base=%p pos=%p\n", skb->data, asmptr); |
skb | 1778 | net/ax25/af_ax25.c | ax25_queue_xmit(skb, sk->ax25->device, SOPRI_NORMAL); |
skb | 1817 | net/ax25/af_ax25.c | struct sk_buff *skb; |
skb | 1839 | net/ax25/af_ax25.c | if ((skb = skb_recv_datagram(sk, flags, noblock, &er)) == NULL) |
skb | 1843 | net/ax25/af_ax25.c | length = skb->len + (skb->data - skb->h.raw); |
skb | 1846 | net/ax25/af_ax25.c | skb_pull(skb, 1); /* Remove PID */ |
skb | 1847 | net/ax25/af_ax25.c | length = skb->len; |
skb | 1848 | net/ax25/af_ax25.c | skb->h.raw = skb->data; |
skb | 1852 | net/ax25/af_ax25.c | skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); |
skb | 1858 | net/ax25/af_ax25.c | unsigned char *dp = skb->data; |
skb | 1861 | net/ax25/af_ax25.c | ax25_parse_addr(dp, skb->len, NULL, &dest, &digi, NULL); |
skb | 1876 | net/ax25/af_ax25.c | skb_free_datagram(skb); |
skb | 1945 | net/ax25/af_ax25.c | struct sk_buff *skb; |
skb | 1947 | net/ax25/af_ax25.c | if ((skb = skb_peek(&sk->receive_queue)) != NULL) |
skb | 1948 | net/ax25/af_ax25.c | amount = skb->len; |
skb | 2179 | net/ax25/af_ax25.c | void ax25_queue_xmit(struct sk_buff *skb, struct device *dev, int pri) |
skb | 2186 | net/ax25/af_ax25.c | if (skb_headroom(skb) < AX25_BPQ_HEADER_LEN) { |
skb | 2188 | net/ax25/af_ax25.c | skb->free = 1; |
skb | 2189 | net/ax25/af_ax25.c | kfree_skb(skb, FREE_WRITE); |
skb | 2193 | net/ax25/af_ax25.c | size = skb->len; |
skb | 2195 | net/ax25/af_ax25.c | ptr = skb_push(skb, 2); |
skb | 2200 | net/ax25/af_ax25.c | dev->hard_header(skb, dev, ETH_P_BPQ, bcast_addr, NULL, 0); |
skb | 2202 | net/ax25/af_ax25.c | ptr = skb_push(skb, 1); |
skb | 2207 | net/ax25/af_ax25.c | dev_queue_xmit(skb, dev, pri); |
skb | 2223 | net/ax25/af_ax25.c | int ax25_encapsulate(struct sk_buff *skb, struct device *dev, unsigned short type, void *daddr, |
skb | 2227 | net/ax25/af_ax25.c | unsigned char *buff = skb_push(skb, AX25_HEADER_LEN); |
skb | 2271 | net/ax25/af_ax25.c | int ax25_rebuild_header(unsigned char *bp, struct device *dev, unsigned long dest, struct sk_buff *skb) |
skb | 2275 | net/ax25/af_ax25.c | if (arp_find(bp + 1, dest, dev, dev->pa_addr, skb)) |
skb | 2281 | net/ax25/af_ax25.c | skb_device_unlock(skb); |
skb | 2282 | net/ax25/af_ax25.c | skb_pull(skb, AX25_HEADER_LEN - 1); /* Keep PID */ |
skb | 2283 | net/ax25/af_ax25.c | ax25_send_frame(skb, (ax25_address *)(bp + 8), (ax25_address *)(bp + 1), NULL, dev); |
skb | 64 | net/ax25/ax25_in.c | static int ax25_rx_fragment(ax25_cb *ax25, struct sk_buff *skb) |
skb | 70 | net/ax25/ax25_in.c | if (!(*skb->data & SEG_FIRST)) { |
skb | 71 | net/ax25/ax25_in.c | if ((ax25->fragno - 1) == (*skb->data & SEG_REM)) { |
skb | 72 | net/ax25/ax25_in.c | ax25->fragno = *skb->data & SEG_REM; |
skb | 73 | net/ax25/ax25_in.c | skb_pull(skb, 1); |
skb | 74 | net/ax25/ax25_in.c | ax25->fraglen += skb->len; |
skb | 75 | net/ax25/ax25_in.c | skb_queue_tail(&ax25->frag_queue, skb); |
skb | 114 | net/ax25/ax25_in.c | if (*skb->data & SEG_FIRST) { |
skb | 115 | net/ax25/ax25_in.c | ax25->fragno = *skb->data & SEG_REM; |
skb | 116 | net/ax25/ax25_in.c | skb_pull(skb, 1); |
skb | 117 | net/ax25/ax25_in.c | ax25->fraglen = skb->len; |
skb | 118 | net/ax25/ax25_in.c | skb_queue_tail(&ax25->frag_queue, skb); |
skb | 130 | net/ax25/ax25_in.c | static int ax25_rx_iframe(ax25_cb *ax25, struct sk_buff *skb) |
skb | 134 | net/ax25/ax25_in.c | switch (*skb->data) { |
skb | 138 | net/ax25/ax25_in.c | skb_pull(skb, 1); /* Remove PID */ |
skb | 139 | net/ax25/ax25_in.c | queued = nr_route_frame(skb, ax25); |
skb | 145 | net/ax25/ax25_in.c | skb_pull(skb, 1); /* Remove PID */ |
skb | 146 | net/ax25/ax25_in.c | skb->h.raw = skb->data; |
skb | 148 | net/ax25/ax25_in.c | ip_rcv(skb, skb->dev, NULL); /* Wrong ptype */ |
skb | 154 | net/ax25/ax25_in.c | if (sock_queue_rcv_skb(ax25->sk, skb) == 0) { |
skb | 163 | net/ax25/ax25_in.c | skb_pull(skb, 1); /* Remove PID */ |
skb | 164 | net/ax25/ax25_in.c | queued = ax25_rx_fragment(ax25, skb); |
skb | 179 | net/ax25/ax25_in.c | static int ax25_state1_machine(ax25_cb *ax25, struct sk_buff *skb, int frametype, int pf, int type) |
skb | 248 | net/ax25/ax25_in.c | static int ax25_state2_machine(ax25_cb *ax25, struct sk_buff *skb, int frametype, int pf, int type) |
skb | 306 | net/ax25/ax25_in.c | static int ax25_state3_machine(ax25_cb *ax25, struct sk_buff *skb, int frametype, int ns, int nr, int pf, int type) |
skb | 421 | net/ax25/ax25_in.c | queued = ax25_rx_iframe(ax25, skb); |
skb | 465 | net/ax25/ax25_in.c | static int ax25_state4_machine(ax25_cb *ax25, struct sk_buff *skb, int frametype, int ns, int nr, int pf, int type) |
skb | 625 | net/ax25/ax25_in.c | queued = ax25_rx_iframe(ax25, skb); |
skb | 667 | net/ax25/ax25_in.c | int ax25_process_rx_frame(ax25_cb *ax25, struct sk_buff *skb, int type) |
skb | 679 | net/ax25/ax25_in.c | frametype = ax25_decode(ax25, skb, &ns, &nr, &pf); |
skb | 683 | net/ax25/ax25_in.c | queued = ax25_state1_machine(ax25, skb, frametype, pf, type); |
skb | 686 | net/ax25/ax25_in.c | queued = ax25_state2_machine(ax25, skb, frametype, pf, type); |
skb | 689 | net/ax25/ax25_in.c | queued = ax25_state3_machine(ax25, skb, frametype, ns, nr, pf, type); |
skb | 692 | net/ax25/ax25_in.c | queued = ax25_state4_machine(ax25, skb, frametype, ns, nr, pf, type); |
skb | 56 | net/ax25/ax25_out.c | void ax25_output(ax25_cb *ax25, struct sk_buff *skb) |
skb | 64 | net/ax25/ax25_out.c | if (skb->len > mtu) { |
skb | 67 | net/ax25/ax25_out.c | fragno = skb->len / mtu; |
skb | 68 | net/ax25/ax25_out.c | if (skb->len % mtu == 0) fragno--; |
skb | 70 | net/ax25/ax25_out.c | frontlen = skb_headroom(skb); /* Address space + CTRL */ |
skb | 72 | net/ax25/ax25_out.c | while (skb->len > 0) { |
skb | 73 | net/ax25/ax25_out.c | if (skb->sk != NULL) { |
skb | 74 | net/ax25/ax25_out.c | if ((skbn = sock_alloc_send_skb(skb->sk, mtu + 2 + frontlen, 0, &err)) == NULL) |
skb | 81 | net/ax25/ax25_out.c | skbn->sk = skb->sk; |
skb | 87 | net/ax25/ax25_out.c | len = (mtu > skb->len) ? skb->len : mtu; |
skb | 89 | net/ax25/ax25_out.c | memcpy(skb_put(skbn, len), skb->data, len); |
skb | 90 | net/ax25/ax25_out.c | skb_pull(skb, len); |
skb | 105 | net/ax25/ax25_out.c | skb->free = 1; |
skb | 106 | net/ax25/ax25_out.c | kfree_skb(skb, FREE_WRITE); |
skb | 108 | net/ax25/ax25_out.c | skb_queue_tail(&ax25->write_queue, skb); /* Throw it on the queue */ |
skb | 119 | net/ax25/ax25_out.c | static void ax25_send_iframe(ax25_cb *ax25, struct sk_buff *skb, int poll_bit) |
skb | 123 | net/ax25/ax25_out.c | if (skb == NULL) |
skb | 127 | net/ax25/ax25_out.c | frame = skb_push(skb, 1); |
skb | 134 | net/ax25/ax25_out.c | frame = skb_push(skb, 2); |
skb | 142 | net/ax25/ax25_out.c | ax25_transmit_buffer(ax25, skb, C_COMMAND); |
skb | 147 | net/ax25/ax25_out.c | struct sk_buff *skb, *skbn; |
skb | 171 | net/ax25/ax25_out.c | skb = skb_dequeue(&ax25->write_queue); |
skb | 173 | net/ax25/ax25_out.c | if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) { |
skb | 174 | net/ax25/ax25_out.c | skb_queue_head(&ax25->write_queue, skb); |
skb | 194 | net/ax25/ax25_out.c | skb_queue_tail(&ax25->ack_queue, skb); |
skb | 211 | net/ax25/ax25_out.c | void ax25_transmit_buffer(ax25_cb *ax25, struct sk_buff *skb, int type) |
skb | 226 | net/ax25/ax25_out.c | if (skb_headroom(skb) < size_ax25_addr(ax25->digipeat)) { |
skb | 228 | net/ax25/ax25_out.c | skb->free = 1; |
skb | 229 | net/ax25/ax25_out.c | kfree_skb(skb, FREE_WRITE); |
skb | 233 | net/ax25/ax25_out.c | ptr = skb_push(skb, size_ax25_addr(ax25->digipeat)); |
skb | 236 | net/ax25/ax25_out.c | skb->arp = 1; |
skb | 238 | net/ax25/ax25_out.c | ax25_queue_xmit(skb, ax25->device, SOPRI_NORMAL); |
skb | 58 | net/ax25/ax25_subr.c | struct sk_buff *skb; |
skb | 60 | net/ax25/ax25_subr.c | while ((skb = skb_dequeue(&ax25->write_queue)) != NULL) { |
skb | 61 | net/ax25/ax25_subr.c | skb->free = 1; |
skb | 62 | net/ax25/ax25_subr.c | kfree_skb(skb, FREE_WRITE); |
skb | 65 | net/ax25/ax25_subr.c | while ((skb = skb_dequeue(&ax25->ack_queue)) != NULL) { |
skb | 66 | net/ax25/ax25_subr.c | skb->free = 1; |
skb | 67 | net/ax25/ax25_subr.c | kfree_skb(skb, FREE_WRITE); |
skb | 70 | net/ax25/ax25_subr.c | while ((skb = skb_dequeue(&ax25->reseq_queue)) != NULL) { |
skb | 71 | net/ax25/ax25_subr.c | kfree_skb(skb, FREE_READ); |
skb | 74 | net/ax25/ax25_subr.c | while ((skb = skb_dequeue(&ax25->frag_queue)) != NULL) { |
skb | 75 | net/ax25/ax25_subr.c | kfree_skb(skb, FREE_READ); |
skb | 86 | net/ax25/ax25_subr.c | struct sk_buff *skb, *skb_prev = NULL; |
skb | 93 | net/ax25/ax25_subr.c | skb = skb_dequeue(&ax25->ack_queue); |
skb | 94 | net/ax25/ax25_subr.c | skb->free = 1; |
skb | 95 | net/ax25/ax25_subr.c | kfree_skb(skb, FREE_WRITE); |
skb | 105 | net/ax25/ax25_subr.c | while ((skb = skb_dequeue(&ax25->ack_queue)) != NULL) { |
skb | 107 | net/ax25/ax25_subr.c | skb_queue_head(&ax25->write_queue, skb); |
skb | 109 | net/ax25/ax25_subr.c | skb_append(skb_prev, skb); |
skb | 110 | net/ax25/ax25_subr.c | skb_prev = skb; |
skb | 136 | net/ax25/ax25_subr.c | int ax25_decode(ax25_cb *ax25, struct sk_buff *skb, int *ns, int *nr, int *pf) |
skb | 141 | net/ax25/ax25_subr.c | frame = skb->data; |
skb | 158 | net/ax25/ax25_subr.c | skb_pull(skb, 1); |
skb | 165 | net/ax25/ax25_subr.c | skb_pull(skb, 2); |
skb | 170 | net/ax25/ax25_subr.c | skb_pull(skb, 2); |
skb | 174 | net/ax25/ax25_subr.c | skb_pull(skb, 1); |
skb | 188 | net/ax25/ax25_subr.c | struct sk_buff *skb; |
skb | 195 | net/ax25/ax25_subr.c | if ((skb = alloc_skb(AX25_BPQ_HEADER_LEN + size_ax25_addr(ax25->digipeat) + 2, GFP_ATOMIC)) == NULL) |
skb | 198 | net/ax25/ax25_subr.c | skb_reserve(skb, AX25_BPQ_HEADER_LEN + size_ax25_addr(ax25->digipeat)); |
skb | 201 | net/ax25/ax25_subr.c | skb->sk = ax25->sk; |
skb | 202 | net/ax25/ax25_subr.c | ax25->sk->wmem_alloc += skb->truesize; |
skb | 207 | net/ax25/ax25_subr.c | dptr = skb_put(skb, 1); |
skb | 214 | net/ax25/ax25_subr.c | dptr = skb_put(skb, 1); |
skb | 218 | net/ax25/ax25_subr.c | dptr = skb_put(skb, 2); |
skb | 225 | net/ax25/ax25_subr.c | skb->free = 1; |
skb | 227 | net/ax25/ax25_subr.c | ax25_transmit_buffer(ax25, skb, type); |
skb | 237 | net/ax25/ax25_subr.c | struct sk_buff *skb; |
skb | 244 | net/ax25/ax25_subr.c | if ((skb = alloc_skb(AX25_BPQ_HEADER_LEN + size_ax25_addr(digi) + 1, GFP_ATOMIC)) == NULL) |
skb | 247 | net/ax25/ax25_subr.c | skb_reserve(skb, AX25_BPQ_HEADER_LEN + size_ax25_addr(digi)); |
skb | 251 | net/ax25/ax25_subr.c | dptr = skb_put(skb, 1); |
skb | 252 | net/ax25/ax25_subr.c | skb->sk = NULL; |
skb | 260 | net/ax25/ax25_subr.c | dptr = skb_push(skb, size_ax25_addr(digi)); |
skb | 263 | net/ax25/ax25_subr.c | skb->arp = 1; |
skb | 264 | net/ax25/ax25_subr.c | skb->free = 1; |
skb | 266 | net/ax25/ax25_subr.c | ax25_queue_xmit(skb, dev, SOPRI_NORMAL); |
skb | 57 | net/core/datagram.c | struct sk_buff *skb; |
skb | 131 | net/core/datagram.c | skb=skb_dequeue(&sk->receive_queue); |
skb | 132 | net/core/datagram.c | if(skb!=NULL) |
skb | 133 | net/core/datagram.c | skb->users++; |
skb | 140 | net/core/datagram.c | skb=skb_peek(&sk->receive_queue); |
skb | 141 | net/core/datagram.c | if(skb!=NULL) |
skb | 142 | net/core/datagram.c | skb->users++; |
skb | 144 | net/core/datagram.c | if(skb==NULL) /* shouldn't happen but .. */ |
skb | 147 | net/core/datagram.c | return skb; |
skb | 150 | net/core/datagram.c | void skb_free_datagram(struct sk_buff *skb) |
skb | 156 | net/core/datagram.c | skb->users--; |
skb | 157 | net/core/datagram.c | if(skb->users>0) |
skb | 163 | net/core/datagram.c | if(!skb->next && !skb->prev) /* Been dequeued by someone - ie it's read */ |
skb | 164 | net/core/datagram.c | kfree_skb(skb,FREE_READ); |
skb | 172 | net/core/datagram.c | void skb_copy_datagram(struct sk_buff *skb, int offset, char *to, int size) |
skb | 174 | net/core/datagram.c | memcpy_tofs(to,skb->h.raw+offset,size); |
skb | 182 | net/core/datagram.c | void skb_copy_datagram_iovec(struct sk_buff *skb, int offset, struct iovec *to, int size) |
skb | 184 | net/core/datagram.c | memcpy_toiovec(to,skb->h.raw+offset,size); |
skb | 278 | net/core/dev.c | struct sk_buff *skb; |
skb | 279 | net/core/dev.c | while((skb=skb_dequeue(&dev->buffs[ct]))!=NULL) |
skb | 280 | net/core/dev.c | if(skb->free) |
skb | 281 | net/core/dev.c | kfree_skb(skb,FREE_WRITE); |
skb | 314 | net/core/dev.c | void dev_queue_xmit(struct sk_buff *skb, struct device *dev, int pri) |
skb | 322 | net/core/dev.c | if(pri>=0 && !skb_device_locked(skb)) |
skb | 323 | net/core/dev.c | skb_device_lock(skb); /* Shove a lock on the frame */ |
skb | 325 | net/core/dev.c | IS_SKB(skb); |
skb | 327 | net/core/dev.c | skb->dev = dev; |
skb | 354 | net/core/dev.c | if (!skb->arp && dev->rebuild_header(skb->data, dev, skb->raddr, skb)) { |
skb | 363 | net/core/dev.c | skb_queue_tail(dev->buffs + pri,skb); |
skb | 364 | net/core/dev.c | skb_device_unlock(skb); /* Buffer is on the device queue and can be freed safely */ |
skb | 365 | net/core/dev.c | skb = skb_dequeue(dev->buffs + pri); |
skb | 366 | net/core/dev.c | skb_device_lock(skb); /* New buffer needs locking down */ |
skb | 373 | net/core/dev.c | skb->stamp=xtime; |
skb | 380 | net/core/dev.c | ((struct sock *)ptype->data != skb->sk)) |
skb | 383 | net/core/dev.c | if ((skb2 = skb_clone(skb, GFP_ATOMIC)) == NULL) |
skb | 387 | net/core/dev.c | ptype->func(skb2, skb->dev, ptype); |
skb | 392 | net/core/dev.c | if (dev->hard_start_xmit(skb, dev) == 0) { |
skb | 406 | net/core/dev.c | skb_device_unlock(skb); |
skb | 407 | net/core/dev.c | skb_queue_head(dev->buffs + pri,skb); |
skb | 417 | net/core/dev.c | void netif_rx(struct sk_buff *skb) |
skb | 426 | net/core/dev.c | skb->sk = NULL; |
skb | 427 | net/core/dev.c | skb->free = 1; |
skb | 428 | net/core/dev.c | if(skb->stamp.tv_sec==0) |
skb | 429 | net/core/dev.c | skb->stamp = xtime; |
skb | 442 | net/core/dev.c | kfree_skb(skb, FREE_READ); |
skb | 450 | net/core/dev.c | IS_SKB(skb); |
skb | 452 | net/core/dev.c | skb_queue_tail(&backlog,skb); |
skb | 486 | net/core/dev.c | struct sk_buff *skb = NULL; |
skb | 496 | net/core/dev.c | skb = (struct sk_buff *) buff; |
skb | 508 | net/core/dev.c | skb = alloc_skb(len, GFP_ATOMIC); |
skb | 509 | net/core/dev.c | if (skb == NULL) |
skb | 522 | net/core/dev.c | to = skb_put(skb,len); |
skb | 544 | net/core/dev.c | skb->dev = dev; |
skb | 545 | net/core/dev.c | skb->free = 1; |
skb | 547 | net/core/dev.c | netif_rx(skb); |
skb | 604 | net/core/dev.c | struct sk_buff *skb; |
skb | 637 | net/core/dev.c | while((skb=skb_dequeue(&backlog))!=NULL) |
skb | 653 | net/core/dev.c | skb->h.raw = skb->data; |
skb | 659 | net/core/dev.c | type = skb->protocol; |
skb | 671 | net/core/dev.c | struct sk_buff *skb2=skb_clone(skb, GFP_ATOMIC); |
skb | 673 | net/core/dev.c | pt_prev->func(skb2,skb->dev, pt_prev); |
skb | 680 | net/core/dev.c | if (ptype->type == type && (!ptype->dev || ptype->dev==skb->dev)) |
skb | 690 | net/core/dev.c | skb2=skb_clone(skb, GFP_ATOMIC); |
skb | 698 | net/core/dev.c | pt_prev->func(skb2, skb->dev, pt_prev); |
skb | 710 | net/core/dev.c | pt_prev->func(skb, skb->dev, pt_prev); |
skb | 716 | net/core/dev.c | kfree_skb(skb, FREE_WRITE); |
skb | 752 | net/core/dev.c | struct sk_buff *skb; |
skb | 768 | net/core/dev.c | while((skb=skb_dequeue(&dev->buffs[i]))!=NULL) |
skb | 773 | net/core/dev.c | skb_device_lock(skb); |
skb | 779 | net/core/dev.c | dev_queue_xmit(skb,dev,-i - 1); |
skb | 71 | net/core/skbuff.c | int skb_check(struct sk_buff *skb, int head, int line, char *file) |
skb | 74 | net/core/skbuff.c | if (skb->magic_debug_cookie != SK_HEAD_SKB) { |
skb | 79 | net/core/skbuff.c | if (!skb->next || !skb->prev) { |
skb | 83 | net/core/skbuff.c | if (skb->next->magic_debug_cookie != SK_HEAD_SKB |
skb | 84 | net/core/skbuff.c | && skb->next->magic_debug_cookie != SK_GOOD_SKB) { |
skb | 89 | net/core/skbuff.c | if (skb->prev->magic_debug_cookie != SK_HEAD_SKB |
skb | 90 | net/core/skbuff.c | && skb->prev->magic_debug_cookie != SK_GOOD_SKB) { |
skb | 97 | net/core/skbuff.c | struct sk_buff *skb2 = skb->next; |
skb | 99 | net/core/skbuff.c | while (skb2 != skb && i < 5) { |
skb | 111 | net/core/skbuff.c | if (skb->next != NULL && skb->next->magic_debug_cookie != SK_HEAD_SKB |
skb | 112 | net/core/skbuff.c | && skb->next->magic_debug_cookie != SK_GOOD_SKB) { |
skb | 117 | net/core/skbuff.c | if (skb->prev != NULL && skb->prev->magic_debug_cookie != SK_HEAD_SKB |
skb | 118 | net/core/skbuff.c | && skb->prev->magic_debug_cookie != SK_GOOD_SKB) { |
skb | 125 | net/core/skbuff.c | if(skb->magic_debug_cookie==SK_FREED_SKB) |
skb | 130 | net/core/skbuff.c | skb,skb->truesize,skb->free); |
skb | 133 | net/core/skbuff.c | if(skb->magic_debug_cookie!=SK_GOOD_SKB) |
skb | 137 | net/core/skbuff.c | skb,skb->truesize,skb->free); |
skb | 140 | net/core/skbuff.c | if(skb->head>skb->data) |
skb | 144 | net/core/skbuff.c | skb,skb->head,skb->data); |
skb | 147 | net/core/skbuff.c | if(skb->tail>skb->end) |
skb | 151 | net/core/skbuff.c | skb,skb->tail,skb->end); |
skb | 154 | net/core/skbuff.c | if(skb->data>skb->tail) |
skb | 158 | net/core/skbuff.c | skb,skb->data,skb->tail); |
skb | 161 | net/core/skbuff.c | if(skb->tail-skb->data!=skb->len) |
skb | 165 | net/core/skbuff.c | skb,skb->data,skb->end,skb->len); |
skb | 168 | net/core/skbuff.c | if((unsigned long) skb->end > (unsigned long) skb) |
skb | 172 | net/core/skbuff.c | skb,skb->end); |
skb | 332 | net/core/skbuff.c | void skb_unlink(struct sk_buff *skb) |
skb | 339 | net/core/skbuff.c | IS_SKB(skb); |
skb | 341 | net/core/skbuff.c | if(skb->prev && skb->next) |
skb | 343 | net/core/skbuff.c | skb->next->prev = skb->prev; |
skb | 344 | net/core/skbuff.c | skb->prev->next = skb->next; |
skb | 345 | net/core/skbuff.c | skb->next = NULL; |
skb | 346 | net/core/skbuff.c | skb->prev = NULL; |
skb | 359 | net/core/skbuff.c | unsigned char *skb_put(struct sk_buff *skb, int len) |
skb | 361 | net/core/skbuff.c | unsigned char *tmp=skb->tail; |
skb | 362 | net/core/skbuff.c | IS_SKB(skb); |
skb | 363 | net/core/skbuff.c | skb->tail+=len; |
skb | 364 | net/core/skbuff.c | skb->len+=len; |
skb | 365 | net/core/skbuff.c | IS_SKB(skb); |
skb | 366 | net/core/skbuff.c | if(skb->tail>skb->end) |
skb | 371 | net/core/skbuff.c | unsigned char *skb_push(struct sk_buff *skb, int len) |
skb | 373 | net/core/skbuff.c | IS_SKB(skb); |
skb | 374 | net/core/skbuff.c | skb->data-=len; |
skb | 375 | net/core/skbuff.c | skb->len+=len; |
skb | 376 | net/core/skbuff.c | IS_SKB(skb); |
skb | 377 | net/core/skbuff.c | if(skb->data<skb->head) |
skb | 379 | net/core/skbuff.c | return skb->data; |
skb | 382 | net/core/skbuff.c | unsigned char * skb_pull(struct sk_buff *skb, int len) |
skb | 384 | net/core/skbuff.c | IS_SKB(skb); |
skb | 385 | net/core/skbuff.c | if(len>skb->len) |
skb | 387 | net/core/skbuff.c | skb->data+=len; |
skb | 388 | net/core/skbuff.c | skb->len-=len; |
skb | 389 | net/core/skbuff.c | return skb->data; |
skb | 392 | net/core/skbuff.c | int skb_headroom(struct sk_buff *skb) |
skb | 394 | net/core/skbuff.c | IS_SKB(skb); |
skb | 395 | net/core/skbuff.c | return skb->data-skb->head; |
skb | 398 | net/core/skbuff.c | int skb_tailroom(struct sk_buff *skb) |
skb | 400 | net/core/skbuff.c | IS_SKB(skb); |
skb | 401 | net/core/skbuff.c | return skb->end-skb->tail; |
skb | 404 | net/core/skbuff.c | void skb_reserve(struct sk_buff *skb, int len) |
skb | 406 | net/core/skbuff.c | IS_SKB(skb); |
skb | 407 | net/core/skbuff.c | skb->data+=len; |
skb | 408 | net/core/skbuff.c | skb->tail+=len; |
skb | 409 | net/core/skbuff.c | if(skb->tail>skb->end) |
skb | 411 | net/core/skbuff.c | if(skb->data<skb->head) |
skb | 413 | net/core/skbuff.c | IS_SKB(skb); |
skb | 416 | net/core/skbuff.c | void skb_trim(struct sk_buff *skb, int len) |
skb | 418 | net/core/skbuff.c | IS_SKB(skb); |
skb | 419 | net/core/skbuff.c | if(skb->len>len) |
skb | 421 | net/core/skbuff.c | skb->len=len; |
skb | 422 | net/core/skbuff.c | skb->tail=skb->data+len; |
skb | 435 | net/core/skbuff.c | void kfree_skb(struct sk_buff *skb, int rw) |
skb | 437 | net/core/skbuff.c | if (skb == NULL) |
skb | 444 | net/core/skbuff.c | IS_SKB(skb); |
skb | 446 | net/core/skbuff.c | if (skb->lock) |
skb | 448 | net/core/skbuff.c | skb->free = 3; /* Free when unlocked */ |
skb | 452 | net/core/skbuff.c | if (skb->free == 2) |
skb | 455 | net/core/skbuff.c | if (skb->next) |
skb | 458 | net/core/skbuff.c | if (skb->sk) |
skb | 460 | net/core/skbuff.c | if(skb->sk->prot!=NULL) |
skb | 463 | net/core/skbuff.c | skb->sk->prot->rfree(skb->sk, skb); |
skb | 465 | net/core/skbuff.c | skb->sk->prot->wfree(skb->sk, skb); |
skb | 475 | net/core/skbuff.c | skb->sk->rmem_alloc-=skb->truesize; |
skb | 477 | net/core/skbuff.c | skb->sk->wmem_alloc-=skb->truesize; |
skb | 479 | net/core/skbuff.c | if(!skb->sk->dead) |
skb | 480 | net/core/skbuff.c | skb->sk->write_space(skb->sk); |
skb | 481 | net/core/skbuff.c | kfree_skbmem(skb); |
skb | 485 | net/core/skbuff.c | kfree_skbmem(skb); |
skb | 494 | net/core/skbuff.c | struct sk_buff *skb; |
skb | 523 | net/core/skbuff.c | if(skb->magic_debug_cookie == SK_GOOD_SKB) |
skb | 524 | net/core/skbuff.c | printk("Kernel kmalloc handed us an existing skb (%p)\n",skb); |
skb | 535 | net/core/skbuff.c | skb=(struct sk_buff *)(bptr+size)-1; |
skb | 537 | net/core/skbuff.c | skb->free = 2; /* Invalid so we pick up forgetful users */ |
skb | 538 | net/core/skbuff.c | skb->lock = 0; |
skb | 539 | net/core/skbuff.c | skb->pkt_type = PACKET_HOST; /* Default type */ |
skb | 540 | net/core/skbuff.c | skb->prev = skb->next = NULL; |
skb | 541 | net/core/skbuff.c | skb->link3 = NULL; |
skb | 542 | net/core/skbuff.c | skb->sk = NULL; |
skb | 543 | net/core/skbuff.c | skb->truesize=size; |
skb | 544 | net/core/skbuff.c | skb->localroute=0; |
skb | 545 | net/core/skbuff.c | skb->stamp.tv_sec=0; /* No idea about time */ |
skb | 546 | net/core/skbuff.c | skb->localroute = 0; |
skb | 547 | net/core/skbuff.c | skb->ip_summed = 0; |
skb | 553 | net/core/skbuff.c | skb->magic_debug_cookie = SK_GOOD_SKB; |
skb | 555 | net/core/skbuff.c | skb->users = 0; |
skb | 557 | net/core/skbuff.c | skb->head=bptr; |
skb | 558 | net/core/skbuff.c | skb->data=bptr; |
skb | 559 | net/core/skbuff.c | skb->tail=bptr; |
skb | 560 | net/core/skbuff.c | skb->end=bptr+len; |
skb | 561 | net/core/skbuff.c | skb->len=0; |
skb | 562 | net/core/skbuff.c | return skb; |
skb | 569 | net/core/skbuff.c | void kfree_skbmem(struct sk_buff *skb) |
skb | 574 | net/core/skbuff.c | kfree((void *)skb->head); |
skb | 584 | net/core/skbuff.c | struct sk_buff *skb_clone(struct sk_buff *skb, int priority) |
skb | 593 | net/core/skbuff.c | IS_SKB(skb); |
skb | 595 | net/core/skbuff.c | n=alloc_skb(skb->truesize-sizeof(struct sk_buff),priority); |
skb | 603 | net/core/skbuff.c | offset=n->head-skb->head; |
skb | 606 | net/core/skbuff.c | skb_reserve(n,skb->data-skb->head); |
skb | 608 | net/core/skbuff.c | skb_put(n,skb->len); |
skb | 610 | net/core/skbuff.c | memcpy(n->head,skb->head,skb->end-skb->head); |
skb | 613 | net/core/skbuff.c | n->when=skb->when; |
skb | 614 | net/core/skbuff.c | n->dev=skb->dev; |
skb | 615 | net/core/skbuff.c | n->h.raw=skb->h.raw+offset; |
skb | 616 | net/core/skbuff.c | n->mac.raw=skb->mac.raw+offset; |
skb | 617 | net/core/skbuff.c | n->ip_hdr=(struct iphdr *)(((char *)skb->ip_hdr)+offset); |
skb | 618 | net/core/skbuff.c | n->saddr=skb->saddr; |
skb | 619 | net/core/skbuff.c | n->daddr=skb->daddr; |
skb | 620 | net/core/skbuff.c | n->raddr=skb->raddr; |
skb | 621 | net/core/skbuff.c | n->acked=skb->acked; |
skb | 622 | net/core/skbuff.c | n->used=skb->used; |
skb | 624 | net/core/skbuff.c | n->arp=skb->arp; |
skb | 628 | net/core/skbuff.c | n->pkt_type=skb->pkt_type; |
skb | 629 | net/core/skbuff.c | n->stamp=skb->stamp; |
skb | 640 | net/core/skbuff.c | void skb_device_lock(struct sk_buff *skb) |
skb | 642 | net/core/skbuff.c | if(skb->lock) |
skb | 646 | net/core/skbuff.c | skb->lock++; |
skb | 649 | net/core/skbuff.c | void skb_device_unlock(struct sk_buff *skb) |
skb | 651 | net/core/skbuff.c | if(skb->lock==0) |
skb | 653 | net/core/skbuff.c | skb->lock--; |
skb | 654 | net/core/skbuff.c | if(skb->lock==0) |
skb | 658 | net/core/skbuff.c | void dev_kfree_skb(struct sk_buff *skb, int mode) |
skb | 664 | net/core/skbuff.c | if(skb->lock==1) |
skb | 667 | net/core/skbuff.c | if (!--skb->lock && (skb->free == 1 || skb->free == 3)) |
skb | 670 | net/core/skbuff.c | kfree_skb(skb,mode); |
skb | 678 | net/core/skbuff.c | struct sk_buff *skb; |
skb | 680 | net/core/skbuff.c | skb = alloc_skb(length+16, GFP_ATOMIC); |
skb | 681 | net/core/skbuff.c | if (skb) |
skb | 682 | net/core/skbuff.c | skb_reserve(skb,16); |
skb | 683 | net/core/skbuff.c | return skb; |
skb | 686 | net/core/skbuff.c | int skb_device_locked(struct sk_buff *skb) |
skb | 688 | net/core/skbuff.c | return skb->lock? 1 : 0; |
skb | 382 | net/core/sock.c | void sock_wfree(struct sock *sk, struct sk_buff *skb) |
skb | 384 | net/core/sock.c | int s=skb->truesize; |
skb | 386 | net/core/sock.c | IS_SKB(skb); |
skb | 388 | net/core/sock.c | kfree_skbmem(skb); |
skb | 403 | net/core/sock.c | void sock_rfree(struct sock *sk, struct sk_buff *skb) |
skb | 405 | net/core/sock.c | int s=skb->truesize; |
skb | 407 | net/core/sock.c | IS_SKB(skb); |
skb | 409 | net/core/sock.c | kfree_skbmem(skb); |
skb | 426 | net/core/sock.c | struct sk_buff *skb; |
skb | 449 | net/core/sock.c | skb = sock_wmalloc(sk, size, 0, GFP_KERNEL); |
skb | 451 | net/core/sock.c | if(skb==NULL) |
skb | 489 | net/core/sock.c | while(skb==NULL); |
skb | 491 | net/core/sock.c | return skb; |
skb | 499 | net/core/sock.c | struct sk_buff *skb; |
skb | 523 | net/core/sock.c | while((skb = skb_dequeue(&sk->back_log)) != NULL) |
skb | 527 | net/core/sock.c | sk->prot->rcv(skb, skb->dev, sk->opt, |
skb | 528 | net/core/sock.c | skb->saddr, skb->len, skb->daddr, 1, |
skb | 87 | net/ethernet/eth.c | int eth_header(struct sk_buff *skb, struct device *dev, unsigned short type, |
skb | 90 | net/ethernet/eth.c | struct ethhdr *eth = (struct ethhdr *)skb_push(skb,14); |
skb | 138 | net/ethernet/eth.c | struct sk_buff *skb) |
skb | 157 | net/ethernet/eth.c | return arp_find(eth->h_dest, dst, dev, dev->pa_addr, skb)? 1 : 0; |
skb | 170 | net/ethernet/eth.c | unsigned short eth_type_trans(struct sk_buff *skb, struct device *dev) |
skb | 175 | net/ethernet/eth.c | skb->mac.raw=skb->data; |
skb | 176 | net/ethernet/eth.c | skb_pull(skb,14); |
skb | 177 | net/ethernet/eth.c | eth= skb->mac.ethernet; |
skb | 182 | net/ethernet/eth.c | skb->pkt_type=PACKET_BROADCAST; |
skb | 184 | net/ethernet/eth.c | skb->pkt_type=PACKET_MULTICAST; |
skb | 190 | net/ethernet/eth.c | skb->pkt_type=PACKET_OTHERHOST; |
skb | 196 | net/ethernet/eth.c | rawp = skb->data; |
skb | 9 | net/ethernet/pe2.c | struct sk_buff *skb, unsigned char *dest_node) |
skb | 11 | net/ethernet/pe2.c | struct device *dev = skb->dev; |
skb | 13 | net/ethernet/pe2.c | dev->hard_header(skb, dev, ETH_P_IPX, dest_node, NULL, skb->len); |
skb | 294 | net/ipv4/af_inet.c | struct sk_buff *skb; |
skb | 309 | net/ipv4/af_inet.c | while ((skb = tcp_dequeue_partial(sk)) != NULL) { |
skb | 310 | net/ipv4/af_inet.c | IS_SKB(skb); |
skb | 311 | net/ipv4/af_inet.c | kfree_skb(skb, FREE_WRITE); |
skb | 315 | net/ipv4/af_inet.c | while((skb = skb_dequeue(&sk->write_queue)) != NULL) { |
skb | 316 | net/ipv4/af_inet.c | IS_SKB(skb); |
skb | 317 | net/ipv4/af_inet.c | kfree_skb(skb, FREE_WRITE); |
skb | 327 | net/ipv4/af_inet.c | while((skb=skb_dequeue(&sk->receive_queue))!=NULL) |
skb | 333 | net/ipv4/af_inet.c | if (skb->sk != NULL && skb->sk != sk) |
skb | 335 | net/ipv4/af_inet.c | IS_SKB(skb); |
skb | 336 | net/ipv4/af_inet.c | skb->sk->dead = 1; |
skb | 337 | net/ipv4/af_inet.c | skb->sk->prot->close(skb->sk, 0); |
skb | 339 | net/ipv4/af_inet.c | IS_SKB(skb); |
skb | 340 | net/ipv4/af_inet.c | kfree_skb(skb, FREE_READ); |
skb | 346 | net/ipv4/af_inet.c | for(skb = sk->send_head; skb != NULL; ) |
skb | 354 | net/ipv4/af_inet.c | if (skb->next && skb->prev) { |
skb | 356 | net/ipv4/af_inet.c | IS_SKB(skb); |
skb | 357 | net/ipv4/af_inet.c | skb_unlink(skb); |
skb | 359 | net/ipv4/af_inet.c | skb->dev = NULL; |
skb | 360 | net/ipv4/af_inet.c | skb2 = skb->link3; |
skb | 361 | net/ipv4/af_inet.c | kfree_skb(skb, FREE_WRITE); |
skb | 362 | net/ipv4/af_inet.c | skb = skb2; |
skb | 368 | net/ipv4/af_inet.c | while((skb=skb_dequeue(&sk->back_log))!=NULL) |
skb | 372 | net/ipv4/af_inet.c | kfree_skb(skb, FREE_READ); |
skb | 118 | net/ipv4/arp.c | struct sk_buff_head skb; /* list of queued packets */ |
skb | 270 | net/ipv4/arp.c | struct sk_buff *skb; |
skb | 276 | net/ipv4/arp.c | while ((skb = skb_dequeue(&entry->skb)) != NULL) |
skb | 278 | net/ipv4/arp.c | skb_device_lock(skb); |
skb | 280 | net/ipv4/arp.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 338 | net/ipv4/arp.c | struct sk_buff *skb; |
skb | 353 | net/ipv4/arp.c | skb = alloc_skb(sizeof(struct arphdr)+ 2*(dev->addr_len+4) |
skb | 355 | net/ipv4/arp.c | if (skb == NULL) |
skb | 360 | net/ipv4/arp.c | skb_reserve(skb, dev->hard_header_len); |
skb | 361 | net/ipv4/arp.c | arp = (struct arphdr *) skb_put(skb,sizeof(struct arphdr) + 2*(dev->addr_len+4)); |
skb | 362 | net/ipv4/arp.c | skb->arp = 1; |
skb | 363 | net/ipv4/arp.c | skb->dev = dev; |
skb | 364 | net/ipv4/arp.c | skb->free = 1; |
skb | 370 | net/ipv4/arp.c | dev->hard_header(skb,dev,ptype,dest_hw?dest_hw:dev->broadcast,src_hw?src_hw:NULL,skb->len); |
skb | 400 | net/ipv4/arp.c | dev_queue_xmit(skb, dev, 0); |
skb | 498 | net/ipv4/arp.c | struct sk_buff *skb; |
skb | 520 | net/ipv4/arp.c | while((skb = skb_dequeue(&entry->skb)) != NULL) |
skb | 522 | net/ipv4/arp.c | IS_SKB(skb); |
skb | 523 | net/ipv4/arp.c | skb_device_lock(skb); |
skb | 525 | net/ipv4/arp.c | if(!skb->dev->rebuild_header(skb->data,skb->dev,skb->raddr,skb)) |
skb | 527 | net/ipv4/arp.c | skb->arp = 1; |
skb | 528 | net/ipv4/arp.c | if(skb->sk==NULL) |
skb | 529 | net/ipv4/arp.c | dev_queue_xmit(skb, skb->dev, 0); |
skb | 531 | net/ipv4/arp.c | dev_queue_xmit(skb,skb->dev,skb->sk->priority); |
skb | 590 | net/ipv4/arp.c | int arp_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *pt) |
skb | 596 | net/ipv4/arp.c | struct arphdr *arp = (struct arphdr *)skb->h.raw; |
skb | 618 | net/ipv4/arp.c | kfree_skb(skb, FREE_READ); |
skb | 638 | net/ipv4/arp.c | kfree_skb(skb, FREE_READ); |
skb | 647 | net/ipv4/arp.c | kfree_skb(skb, FREE_READ); |
skb | 656 | net/ipv4/arp.c | kfree_skb(skb, FREE_READ); |
skb | 664 | net/ipv4/arp.c | kfree_skb(skb, FREE_READ); |
skb | 671 | net/ipv4/arp.c | kfree_skb(skb, FREE_READ); |
skb | 696 | net/ipv4/arp.c | kfree_skb(skb, FREE_READ); |
skb | 730 | net/ipv4/arp.c | kfree_skb(skb, FREE_READ); |
skb | 775 | net/ipv4/arp.c | kfree_skb(skb, FREE_READ); |
skb | 781 | net/ipv4/arp.c | kfree_skb(skb, FREE_READ); |
skb | 847 | net/ipv4/arp.c | kfree_skb(skb, FREE_READ); |
skb | 859 | net/ipv4/arp.c | entry->dev = skb->dev; |
skb | 860 | net/ipv4/arp.c | skb_queue_head_init(&entry->skb); |
skb | 869 | net/ipv4/arp.c | kfree_skb(skb, FREE_READ); |
skb | 912 | net/ipv4/arp.c | u32 saddr, struct sk_buff *skb) |
skb | 925 | net/ipv4/arp.c | skb->arp = 1; |
skb | 950 | net/ipv4/arp.c | skb->arp = 1; |
skb | 971 | net/ipv4/arp.c | if (skb != NULL) |
skb | 973 | net/ipv4/arp.c | skb_queue_tail(&entry->skb, skb); |
skb | 974 | net/ipv4/arp.c | skb_device_unlock(skb); |
skb | 986 | net/ipv4/arp.c | if (skb) |
skb | 987 | net/ipv4/arp.c | skb->arp = 1; |
skb | 1016 | net/ipv4/arp.c | skb_queue_head_init(&entry->skb); |
skb | 1017 | net/ipv4/arp.c | if (skb != NULL) |
skb | 1019 | net/ipv4/arp.c | skb_queue_tail(&entry->skb, skb); |
skb | 1020 | net/ipv4/arp.c | skb_device_unlock(skb); |
skb | 1025 | net/ipv4/arp.c | if (skb != NULL && skb->free) |
skb | 1026 | net/ipv4/arp.c | kfree_skb(skb, FREE_WRITE); |
skb | 1317 | net/ipv4/arp.c | skb_queue_head_init(&entry->skb); |
skb | 167 | net/ipv4/icmp.c | struct sk_buff *skb; |
skb | 284 | net/ipv4/icmp.c | skb = (struct sk_buff *) alloc_skb(len+15, GFP_ATOMIC); |
skb | 285 | net/ipv4/icmp.c | if (skb == NULL) |
skb | 290 | net/ipv4/icmp.c | skb->free = 1; |
skb | 304 | net/ipv4/icmp.c | offset = ip_build_header(skb, our_addr, iph->saddr, |
skb | 310 | net/ipv4/icmp.c | skb->sk = NULL; |
skb | 311 | net/ipv4/icmp.c | kfree_skb(skb, FREE_READ); |
skb | 319 | net/ipv4/icmp.c | skb_put(skb,sizeof(struct icmphdr) + sizeof(struct iphdr) + 8); |
skb | 328 | net/ipv4/icmp.c | icmph = (struct icmphdr *) (skb->data + offset); |
skb | 346 | net/ipv4/icmp.c | ip_queue_xmit(NULL, ndev, skb, 1); |
skb | 354 | net/ipv4/icmp.c | static void icmp_unreach(struct icmphdr *icmph, struct sk_buff *skb) |
skb | 425 | net/ipv4/icmp.c | kfree_skb(skb, FREE_READ); |
skb | 433 | net/ipv4/icmp.c | static void icmp_redirect(struct icmphdr *icmph, struct sk_buff *skb, |
skb | 500 | net/ipv4/icmp.c | kfree_skb(skb, FREE_READ); |
skb | 508 | net/ipv4/icmp.c | static void icmp_echo(struct icmphdr *icmph, struct sk_buff *skb, struct device *dev, |
skb | 526 | net/ipv4/icmp.c | kfree_skb(skb, FREE_READ); |
skb | 533 | net/ipv4/icmp.c | IPPROTO_ICMP, opt, len, skb->ip_hdr->tos,255); |
skb | 539 | net/ipv4/icmp.c | kfree_skb(skb, FREE_READ); |
skb | 574 | net/ipv4/icmp.c | kfree_skb(skb, FREE_READ); |
skb | 581 | net/ipv4/icmp.c | static void icmp_timestamp(struct icmphdr *icmph, struct sk_buff *skb, struct device *dev, |
skb | 604 | net/ipv4/icmp.c | skb->sk = NULL; |
skb | 605 | net/ipv4/icmp.c | kfree_skb(skb, FREE_READ); |
skb | 616 | net/ipv4/icmp.c | skb->ip_hdr->tos, 255); |
skb | 621 | net/ipv4/icmp.c | kfree_skb(skb, FREE_READ); |
skb | 656 | net/ipv4/icmp.c | kfree_skb(skb, FREE_READ); |
skb | 666 | net/ipv4/icmp.c | static void icmp_info(struct icmphdr *icmph, struct sk_buff *skb, struct device *dev, |
skb | 671 | net/ipv4/icmp.c | kfree_skb(skb, FREE_READ); |
skb | 687 | net/ipv4/icmp.c | static void icmp_address(struct icmphdr *icmph, struct sk_buff *skb, struct device *dev, |
skb | 705 | net/ipv4/icmp.c | kfree_skb(skb, FREE_READ); |
skb | 715 | net/ipv4/icmp.c | IPPROTO_ICMP, opt, len, skb->ip_hdr->tos,255); |
skb | 721 | net/ipv4/icmp.c | kfree_skb(skb, FREE_READ); |
skb | 748 | net/ipv4/icmp.c | skb->sk = NULL; |
skb | 749 | net/ipv4/icmp.c | kfree_skb(skb, FREE_READ); |
skb | 86 | net/ipv4/igmp.c | struct sk_buff *skb=alloc_skb(MAX_IGMP_SIZE, GFP_ATOMIC); |
skb | 90 | net/ipv4/igmp.c | if(skb==NULL) |
skb | 92 | net/ipv4/igmp.c | tmp=ip_build_header(skb, INADDR_ANY, address, &dev, IPPROTO_IGMP, NULL, |
skb | 93 | net/ipv4/igmp.c | skb->truesize, 0, 1); |
skb | 96 | net/ipv4/igmp.c | kfree_skb(skb, FREE_WRITE); |
skb | 99 | net/ipv4/igmp.c | ih=(struct igmphdr *)skb_put(skb,sizeof(struct igmphdr)); |
skb | 105 | net/ipv4/igmp.c | ip_queue_xmit(NULL,dev,skb,1); |
skb | 200 | net/ipv4/igmp.c | int igmp_rcv(struct sk_buff *skb, struct device *dev, struct options *opt, |
skb | 207 | net/ipv4/igmp.c | ih=(struct igmphdr *)skb->data; |
skb | 209 | net/ipv4/igmp.c | if(skb->len <sizeof(struct igmphdr) || skb->ip_hdr->ttl!=1 || ip_compute_csum((void *)skb->h.raw,sizeof(struct igmphdr))) |
skb | 211 | net/ipv4/igmp.c | kfree_skb(skb, FREE_READ); |
skb | 219 | net/ipv4/igmp.c | kfree_skb(skb, FREE_READ); |
skb | 184 | net/ipv4/ip.c | static int ip_send(struct sk_buff *skb, unsigned long daddr, int len, struct device *dev, unsigned long saddr) |
skb | 188 | net/ipv4/ip.c | skb->dev = dev; |
skb | 189 | net/ipv4/ip.c | skb->arp = 1; |
skb | 196 | net/ipv4/ip.c | skb_reserve(skb,(dev->hard_header_len+15)&~15); /* 16 byte aligned IP headers are good */ |
skb | 197 | net/ipv4/ip.c | mac = dev->hard_header(skb, dev, ETH_P_IP, NULL, NULL, len); |
skb | 201 | net/ipv4/ip.c | skb->arp = 0; |
skb | 202 | net/ipv4/ip.c | skb->raddr = daddr; /* next routing address */ |
skb | 208 | net/ipv4/ip.c | static int ip_send_room(struct sk_buff *skb, unsigned long daddr, int len, struct device *dev, unsigned long saddr) |
skb | 212 | net/ipv4/ip.c | skb->dev = dev; |
skb | 213 | net/ipv4/ip.c | skb->arp = 1; |
skb | 216 | net/ipv4/ip.c | skb_reserve(skb,MAX_HEADER); |
skb | 217 | net/ipv4/ip.c | mac = dev->hard_header(skb, dev, ETH_P_IP, NULL, NULL, len); |
skb | 221 | net/ipv4/ip.c | skb->arp = 0; |
skb | 222 | net/ipv4/ip.c | skb->raddr = daddr; /* next routing address */ |
skb | 236 | net/ipv4/ip.c | int ip_build_header(struct sk_buff *skb, unsigned long saddr, unsigned long daddr, |
skb | 250 | net/ipv4/ip.c | if(MULTICAST(daddr) && *dev==NULL && skb->sk && *skb->sk->ip_mc_name) |
skb | 251 | net/ipv4/ip.c | *dev=dev_get(skb->sk->ip_mc_name); |
skb | 255 | net/ipv4/ip.c | if(skb->localroute) |
skb | 280 | net/ipv4/ip.c | if(skb->localroute) |
skb | 311 | net/ipv4/ip.c | tmp = ip_send_room(skb, raddr, len, *dev, saddr); |
skb | 313 | net/ipv4/ip.c | tmp = ip_send(skb, raddr, len, *dev, saddr); |
skb | 319 | net/ipv4/ip.c | skb->dev = *dev; |
skb | 320 | net/ipv4/ip.c | skb->saddr = saddr; |
skb | 321 | net/ipv4/ip.c | if (skb->sk) |
skb | 322 | net/ipv4/ip.c | skb->sk->saddr = saddr; |
skb | 340 | net/ipv4/ip.c | iph=(struct iphdr *)skb_put(skb,sizeof(struct iphdr)); |
skb | 350 | net/ipv4/ip.c | skb->ip_hdr = iph; |
skb | 380 | net/ipv4/ip.c | static struct ipfrag *ip_frag_create(int offset, int end, struct sk_buff *skb, unsigned char *ptr) |
skb | 396 | net/ipv4/ip.c | fp->skb = skb; |
skb | 468 | net/ipv4/ip.c | IS_SKB(fp->skb); |
skb | 469 | net/ipv4/ip.c | kfree_skb(fp->skb,FREE_READ); |
skb | 501 | net/ipv4/ip.c | icmp_send(qp->fragments->skb,ICMP_TIME_EXCEEDED, |
skb | 518 | net/ipv4/ip.c | static struct ipq *ip_create(struct sk_buff *skb, struct iphdr *iph, struct device *dev) |
skb | 528 | net/ipv4/ip.c | skb->dev = qp->dev; |
skb | 608 | net/ipv4/ip.c | struct sk_buff *skb; |
skb | 619 | net/ipv4/ip.c | if ((skb = dev_alloc_skb(len)) == NULL) |
skb | 628 | net/ipv4/ip.c | skb_put(skb,len); |
skb | 629 | net/ipv4/ip.c | skb->h.raw = skb->data; |
skb | 630 | net/ipv4/ip.c | skb->free = 1; |
skb | 633 | net/ipv4/ip.c | ptr = (unsigned char *) skb->h.raw; |
skb | 643 | net/ipv4/ip.c | if(count+fp->len > skb->len) |
skb | 647 | net/ipv4/ip.c | kfree_skb(skb,FREE_WRITE); |
skb | 660 | net/ipv4/ip.c | iph = skb->h.iph; |
skb | 663 | net/ipv4/ip.c | skb->ip_hdr = iph; |
skb | 666 | net/ipv4/ip.c | return(skb); |
skb | 674 | net/ipv4/ip.c | static struct sk_buff *ip_defrag(struct iphdr *iph, struct sk_buff *skb, struct device *dev) |
skb | 697 | net/ipv4/ip.c | return(skb); |
skb | 721 | net/ipv4/ip.c | if ((qp = ip_create(skb, iph, dev)) == NULL) |
skb | 723 | net/ipv4/ip.c | skb->sk = NULL; |
skb | 724 | net/ipv4/ip.c | kfree_skb(skb, FREE_READ); |
skb | 741 | net/ipv4/ip.c | ptr = skb->data + ihl; |
skb | 807 | net/ipv4/ip.c | kfree_skb(tmp->skb,FREE_READ); |
skb | 817 | net/ipv4/ip.c | tfp = ip_frag_create(offset, end, skb, ptr); |
skb | 825 | net/ipv4/ip.c | skb->sk = NULL; |
skb | 826 | net/ipv4/ip.c | kfree_skb(skb, FREE_READ); |
skb | 868 | net/ipv4/ip.c | void ip_fragment(struct sock *sk, struct sk_buff *skb, struct device *dev, int is_frag) |
skb | 882 | net/ipv4/ip.c | raw = skb->data; |
skb | 885 | net/ipv4/ip.c | skb->ip_hdr = iph; |
skb | 887 | net/ipv4/ip.c | iph = skb->ip_hdr; |
skb | 910 | net/ipv4/ip.c | icmp_send(skb,ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, dev->mtu, dev); |
skb | 923 | net/ipv4/ip.c | icmp_send(skb,ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED,dev->mtu, dev); |
skb | 975 | net/ipv4/ip.c | skb2->arp = skb->arp; |
skb | 976 | net/ipv4/ip.c | if(skb->free==0) |
skb | 994 | net/ipv4/ip.c | skb2->raddr = skb->raddr; /* For rebuild_header - must be here */ |
skb | 1044 | net/ipv4/ip.c | int ip_forward(struct sk_buff *skb, struct device *dev, int is_frag, unsigned long target_addr, int target_strict) |
skb | 1063 | net/ipv4/ip.c | fw_res=ip_fw_chk(skb->h.iph, dev, ip_fw_fwd_chain, ip_fw_fwd_policy, 0); |
skb | 1071 | net/ipv4/ip.c | icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0, dev); |
skb | 1089 | net/ipv4/ip.c | iph = skb->h.iph; |
skb | 1106 | net/ipv4/ip.c | icmp_send(skb, ICMP_TIME_EXCEEDED, ICMP_EXC_TTL, 0, dev); |
skb | 1122 | net/ipv4/ip.c | icmp_send(skb, ICMP_DEST_UNREACH, ICMP_NET_UNREACH, 0, dev); |
skb | 1145 | net/ipv4/ip.c | icmp_send(skb, ICMP_DEST_UNREACH, ICMP_SR_FAILED, 0, dev); |
skb | 1160 | net/ipv4/ip.c | icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0, dev); |
skb | 1182 | net/ipv4/ip.c | icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, raddr, dev); |
skb | 1198 | net/ipv4/ip.c | ip_fw_masquerade(&skb, dev2); |
skb | 1200 | net/ipv4/ip.c | IS_SKB(skb); |
skb | 1202 | net/ipv4/ip.c | if(skb_headroom(skb)<dev2->hard_header_len) |
skb | 1204 | net/ipv4/ip.c | skb2 = alloc_skb(dev2->hard_header_len + skb->len + 15, GFP_ATOMIC); |
skb | 1222 | net/ipv4/ip.c | ip_send(skb2,raddr,skb->len,dev2,dev2->pa_addr); |
skb | 1229 | net/ipv4/ip.c | ptr = skb_put(skb2,skb->len); |
skb | 1236 | net/ipv4/ip.c | memcpy(ptr, skb->h.raw, skb->len); |
skb | 1244 | net/ipv4/ip.c | skb2 = skb; |
skb | 1246 | net/ipv4/ip.c | skb->arp=1; |
skb | 1247 | net/ipv4/ip.c | skb->raddr=raddr; |
skb | 1250 | net/ipv4/ip.c | if(dev2->hard_header(skb, dev2, ETH_P_IP, NULL, NULL, skb->len)<0) |
skb | 1251 | net/ipv4/ip.c | skb->arp=0; |
skb | 1296 | net/ipv4/ip.c | if(skb==skb2) |
skb | 1311 | net/ipv4/ip.c | int ip_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *pt) |
skb | 1313 | net/ipv4/ip.c | struct iphdr *iph = skb->h.iph; |
skb | 1332 | net/ipv4/ip.c | return ipv6_rcv(skb,dev,pt); |
skb | 1341 | net/ipv4/ip.c | skb->ip_hdr = iph; |
skb | 1356 | net/ipv4/ip.c | if (skb->len<sizeof(struct iphdr) || iph->ihl<5 || iph->version != 4 || ip_fast_csum((unsigned char *)iph, iph->ihl) !=0 |
skb | 1357 | net/ipv4/ip.c | || skb->len < ntohs(iph->tot_len)) |
skb | 1360 | net/ipv4/ip.c | kfree_skb(skb, FREE_WRITE); |
skb | 1370 | net/ipv4/ip.c | skb_trim(skb,ntohs(iph->tot_len)); |
skb | 1381 | net/ipv4/ip.c | icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0, dev); |
skb | 1382 | net/ipv4/ip.c | kfree_skb(skb, FREE_WRITE); |
skb | 1410 | net/ipv4/ip.c | unsigned char *opt_ptr=skb->h.raw+sizeof(struct iphdr); |
skb | 1412 | net/ipv4/ip.c | skb->ip_summed=0; /* Our free checksum is bogus for this case */ |
skb | 1429 | net/ipv4/ip.c | icmp_send(skb, ICMP_PARAMETERPROB, 0, 0, skb->dev); |
skb | 1430 | net/ipv4/ip.c | kfree_skb(skb, FREE_READ); |
skb | 1443 | net/ipv4/ip.c | kfree_skb(skb, FREE_READ); |
skb | 1450 | net/ipv4/ip.c | if (iph->daddr!=skb->dev->pa_addr && (brd = ip_chk_addr(iph->daddr)) == 0) |
skb | 1455 | net/ipv4/ip.c | icmp_send(skb, ICMP_PARAMETERPROB, 0, 0, skb->dev); |
skb | 1456 | net/ipv4/ip.c | kfree_skb(skb, FREE_READ); |
skb | 1471 | net/ipv4/ip.c | icmp_send(skb, ICMP_PARAMETERPROB, 0, 0, skb->dev); |
skb | 1472 | net/ipv4/ip.c | kfree_skb(skb,FREE_READ); |
skb | 1476 | net/ipv4/ip.c | *(u32 *)(&opt_ptr[opt_ptr[2]])=skb->dev->pa_addr; /* Record hop */ |
skb | 1523 | net/ipv4/ip.c | if ( iph->daddr == skb->dev->pa_addr || (brd = ip_chk_addr(iph->daddr)) != 0) |
skb | 1537 | net/ipv4/ip.c | kfree_skb(skb, FREE_WRITE); |
skb | 1552 | net/ipv4/ip.c | if (ip_fw_demasquerade(skb)) |
skb | 1554 | net/ipv4/ip.c | struct iphdr *iph=skb->h.iph; |
skb | 1555 | net/ipv4/ip.c | if(ip_forward(skb, dev, is_frag|4, iph->daddr, 0)) |
skb | 1556 | net/ipv4/ip.c | kfree_skb(skb, FREE_WRITE); |
skb | 1576 | net/ipv4/ip.c | skb=ip_defrag(iph,skb,dev); |
skb | 1577 | net/ipv4/ip.c | if(skb==NULL) |
skb | 1579 | net/ipv4/ip.c | skb->dev = dev; |
skb | 1580 | net/ipv4/ip.c | iph=skb->h.iph; |
skb | 1587 | net/ipv4/ip.c | skb->ip_hdr = iph; |
skb | 1588 | net/ipv4/ip.c | skb->h.raw += iph->ihl*4; |
skb | 1614 | net/ipv4/ip.c | skb1=skb_clone(skb, GFP_ATOMIC); |
skb | 1653 | net/ipv4/ip.c | skb2 = skb_clone(skb, GFP_ATOMIC); |
skb | 1659 | net/ipv4/ip.c | skb2 = skb; |
skb | 1683 | net/ipv4/ip.c | raw_rcv(raw_sk, skb, dev, iph->saddr, iph->daddr); |
skb | 1687 | net/ipv4/ip.c | icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PROT_UNREACH, 0, dev); |
skb | 1688 | net/ipv4/ip.c | kfree_skb(skb, FREE_WRITE); |
skb | 1702 | net/ipv4/ip.c | if(skb->pkt_type!=PACKET_HOST || brd==IS_BROADCAST) |
skb | 1704 | net/ipv4/ip.c | kfree_skb(skb,FREE_WRITE); |
skb | 1713 | net/ipv4/ip.c | if(ip_forward(skb, dev, is_frag, target_addr, target_strict)) |
skb | 1714 | net/ipv4/ip.c | kfree_skb(skb, FREE_WRITE); |
skb | 1719 | net/ipv4/ip.c | kfree_skb(skb, FREE_WRITE); |
skb | 1729 | net/ipv4/ip.c | static void ip_loopback(struct device *old_dev, struct sk_buff *skb) |
skb | 1732 | net/ipv4/ip.c | int len=skb->len-old_dev->hard_header_len; |
skb | 1741 | net/ipv4/ip.c | newskb->saddr=skb->saddr; |
skb | 1742 | net/ipv4/ip.c | newskb->daddr=skb->daddr; |
skb | 1743 | net/ipv4/ip.c | newskb->raddr=skb->raddr; |
skb | 1747 | net/ipv4/ip.c | newskb->pkt_type=skb->pkt_type; |
skb | 1752 | net/ipv4/ip.c | ip_send(newskb, skb->ip_hdr->daddr, len, dev, skb->ip_hdr->saddr); |
skb | 1760 | net/ipv4/ip.c | memcpy(newskb->ip_hdr,skb->ip_hdr,len); |
skb | 1779 | net/ipv4/ip.c | struct sk_buff *skb, int free) |
skb | 1791 | net/ipv4/ip.c | IS_SKB(skb); |
skb | 1798 | net/ipv4/ip.c | skb->dev = dev; |
skb | 1799 | net/ipv4/ip.c | skb->when = jiffies; |
skb | 1810 | net/ipv4/ip.c | ptr = skb->data; |
skb | 1813 | net/ipv4/ip.c | skb->ip_hdr = iph; |
skb | 1815 | net/ipv4/ip.c | iph = skb->ip_hdr; |
skb | 1817 | net/ipv4/ip.c | iph->tot_len = ntohs(skb->len-(((unsigned char *)iph)-skb->data)); |
skb | 1838 | net/ipv4/ip.c | skb->free = free; |
skb | 1848 | net/ipv4/ip.c | ip_fragment(sk,skb,dev,0); |
skb | 1849 | net/ipv4/ip.c | IS_SKB(skb); |
skb | 1850 | net/ipv4/ip.c | kfree_skb(skb,FREE_WRITE); |
skb | 1868 | net/ipv4/ip.c | if (skb->next != NULL) |
skb | 1871 | net/ipv4/ip.c | skb_unlink(skb); |
skb | 1892 | net/ipv4/ip.c | if (skb->link3 != NULL) |
skb | 1895 | net/ipv4/ip.c | skb->link3 = NULL; |
skb | 1899 | net/ipv4/ip.c | sk->send_tail = skb; |
skb | 1900 | net/ipv4/ip.c | sk->send_head = skb; |
skb | 1904 | net/ipv4/ip.c | sk->send_tail->link3 = skb; |
skb | 1905 | net/ipv4/ip.c | sk->send_tail = skb; |
skb | 1914 | net/ipv4/ip.c | skb->sk = sk; |
skb | 1936 | net/ipv4/ip.c | ip_loopback(dev,skb); |
skb | 1944 | net/ipv4/ip.c | ip_loopback(dev,skb); |
skb | 1953 | net/ipv4/ip.c | if(skb->ip_hdr->ttl==0) |
skb | 1955 | net/ipv4/ip.c | kfree_skb(skb, FREE_READ); |
skb | 1961 | net/ipv4/ip.c | ip_loopback(dev,skb); |
skb | 1972 | net/ipv4/ip.c | dev_queue_xmit(skb, dev, sk->priority); |
skb | 1976 | net/ipv4/ip.c | dev_queue_xmit(skb, dev, SOPRI_NORMAL); |
skb | 1983 | net/ipv4/ip.c | kfree_skb(skb, FREE_WRITE); |
skb | 2503 | net/ipv4/ip.c | struct sk_buff *skb=sock_alloc_send_skb(sk, length+20+15+dev->hard_header_len,0,&error); |
skb | 2504 | net/ipv4/ip.c | if(skb==NULL) |
skb | 2509 | net/ipv4/ip.c | skb->dev=dev; |
skb | 2510 | net/ipv4/ip.c | skb->free=1; |
skb | 2511 | net/ipv4/ip.c | skb->when=jiffies; |
skb | 2512 | net/ipv4/ip.c | skb->sk=sk; |
skb | 2513 | net/ipv4/ip.c | skb->arp=0; |
skb | 2514 | net/ipv4/ip.c | skb->saddr=saddr; |
skb | 2516 | net/ipv4/ip.c | skb->raddr=(rt&&rt->rt_gateway)?rt->rt_gateway:daddr; |
skb | 2517 | net/ipv4/ip.c | skb_reserve(skb,(dev->hard_header_len+15)&~15); |
skb | 2520 | net/ipv4/ip.c | memcpy(skb_push(skb,dev->hard_header_len),sk->ip_hcache_data,dev->hard_header_len); |
skb | 2521 | net/ipv4/ip.c | skb->arp=1; |
skb | 2525 | net/ipv4/ip.c | if(dev->hard_header(skb,dev,ETH_P_IP,NULL,NULL,0)>0) |
skb | 2526 | net/ipv4/ip.c | skb->arp=1; |
skb | 2528 | net/ipv4/ip.c | skb->ip_hdr=iph=(struct iphdr *)skb_put(skb,length); |
skb | 2548 | net/ipv4/ip.c | ip_fw_chk((void *)skb->data,dev,ip_acct_chain, IP_FW_F_ACCEPT,1); |
skb | 2551 | net/ipv4/ip.c | dev_queue_xmit(skb,dev,sk->priority); |
skb | 2555 | net/ipv4/ip.c | kfree_skb(skb, FREE_WRITE); |
skb | 2616 | net/ipv4/ip.c | struct sk_buff * skb; |
skb | 2624 | net/ipv4/ip.c | skb = sock_alloc_send_skb(sk, fraglen+15, 0, &error); |
skb | 2625 | net/ipv4/ip.c | if (skb == NULL) |
skb | 2637 | net/ipv4/ip.c | skb->next = skb->prev = NULL; |
skb | 2638 | net/ipv4/ip.c | skb->dev = dev; |
skb | 2639 | net/ipv4/ip.c | skb->when = jiffies; |
skb | 2640 | net/ipv4/ip.c | skb->free = 1; /* dubious, this one */ |
skb | 2641 | net/ipv4/ip.c | skb->sk = sk; |
skb | 2642 | net/ipv4/ip.c | skb->arp = 0; |
skb | 2643 | net/ipv4/ip.c | skb->saddr = saddr; |
skb | 2644 | net/ipv4/ip.c | skb->raddr = (rt&&rt->rt_gateway) ? rt->rt_gateway : daddr; |
skb | 2645 | net/ipv4/ip.c | skb_reserve(skb,(dev->hard_header_len+15)&~15); |
skb | 2646 | net/ipv4/ip.c | data = skb_put(skb, fraglen-dev->hard_header_len); |
skb | 2657 | net/ipv4/ip.c | memcpy(skb_push(skb,dev->hard_header_len),sk->ip_hcache_data, dev->hard_header_len); |
skb | 2658 | net/ipv4/ip.c | skb->arp=1; |
skb | 2662 | net/ipv4/ip.c | if(dev->hard_header(skb, dev, ETH_P_IP, |
skb | 2664 | net/ipv4/ip.c | skb->arp=1; |
skb | 2671 | net/ipv4/ip.c | skb->ip_hdr = iph = (struct iphdr *)data; |
skb | 2739 | net/ipv4/ip.c | if(skb->daddr==IGMP_ALL_HOSTS) |
skb | 2740 | net/ipv4/ip.c | ip_loopback(rt?rt->rt_dev:dev,skb); |
skb | 2748 | net/ipv4/ip.c | ip_loopback(rt?rt->rt_dev:dev,skb); |
skb | 2761 | net/ipv4/ip.c | if(skb->ip_hdr->ttl==0) |
skb | 2762 | net/ipv4/ip.c | kfree_skb(skb, FREE_READ); |
skb | 2773 | net/ipv4/ip.c | ip_loopback(dev,skb); |
skb | 2781 | net/ipv4/ip.c | dev_queue_xmit(skb, dev, sk->priority); |
skb | 2796 | net/ipv4/ip.c | kfree_skb(skb, FREE_WRITE); |
skb | 546 | net/ipv4/ip_fw.c | static struct sk_buff *revamp(struct sk_buff *skb, struct device *dev, struct ip_masq *ftp) |
skb | 548 | net/ipv4/ip_fw.c | struct iphdr *iph = skb->h.iph; |
skb | 568 | net/ipv4/ip_fw.c | while (skb->len - ((unsigned char *)data - skb->h.raw) > 18) |
skb | 604 | net/ipv4/ip_fw.c | return skb; |
skb | 633 | net/ipv4/ip_fw.c | return skb; |
skb | 639 | net/ipv4/ip_fw.c | printk("MASQUERADE: resizing needed for %d bytes (%ld)\n",ftp->delta, skb->len); |
skb | 643 | net/ipv4/ip_fw.c | skb2 = alloc_skb(MAX_HEADER + skb->len+ftp->delta, GFP_ATOMIC); |
skb | 646 | net/ipv4/ip_fw.c | return skb; |
skb | 648 | net/ipv4/ip_fw.c | skb2->free = skb->free; |
skb | 650 | net/ipv4/ip_fw.c | skb_put(skb2,skb->len + ftp->delta); |
skb | 651 | net/ipv4/ip_fw.c | skb2->h.raw = &skb2->data[skb->h.raw - skb->data]; |
skb | 657 | net/ipv4/ip_fw.c | memcpy(skb2->data, skb->data, (p - (char *)skb->data)); |
skb | 658 | net/ipv4/ip_fw.c | memcpy(&skb2->data[(p - (char *)skb->data)], buf, strlen(buf)); |
skb | 659 | net/ipv4/ip_fw.c | memcpy(&skb2->data[(p - (char *)skb->data) + strlen(buf)], data, |
skb | 660 | net/ipv4/ip_fw.c | skb->len - ((char *)skb->h.raw - data)); |
skb | 667 | net/ipv4/ip_fw.c | kfree_skb(skb, FREE_WRITE); |
skb | 670 | net/ipv4/ip_fw.c | return skb; |
skb | 685 | net/ipv4/ip_fw.c | struct sk_buff *skb=*skb_ptr; |
skb | 686 | net/ipv4/ip_fw.c | struct iphdr *iph = skb->h.iph; |
skb | 746 | net/ipv4/ip_fw.c | size = skb->len - ((unsigned char *)portptr - skb->h.raw); |
skb | 764 | net/ipv4/ip_fw.c | skb = revamp(*skb_ptr, dev, ms); |
skb | 765 | net/ipv4/ip_fw.c | *skb_ptr = skb; |
skb | 766 | net/ipv4/ip_fw.c | iph = skb->h.iph; |
skb | 781 | net/ipv4/ip_fw.c | tcp_send_check(th,iph->saddr,iph->daddr,size,skb->sk); |
skb | 52 | net/ipv4/ipip.c | int ipip_rcv(struct sk_buff *skb, struct device *dev, struct options *opt, |
skb | 61 | net/ipv4/ipip.c | skb->h.iph=skb->data; /* Correct IP header pointer on to new header */ |
skb | 62 | net/ipv4/ipip.c | if(ip_forward(skb, dev, 0, daddr, 0)) |
skb | 63 | net/ipv4/ipip.c | kfree_skb(skb, FREE_READ); |
skb | 73 | net/ipv4/packet.c | int packet_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *pt) |
skb | 89 | net/ipv4/packet.c | skb_push(skb,skb->data-skb->mac.raw); |
skb | 95 | net/ipv4/packet.c | skb->dev = dev; |
skb | 107 | net/ipv4/packet.c | if (sk->rmem_alloc + skb->truesize >= sk->rcvbuf) |
skb | 110 | net/ipv4/packet.c | skb->sk = NULL; |
skb | 111 | net/ipv4/packet.c | kfree_skb(skb, FREE_READ); |
skb | 118 | net/ipv4/packet.c | skb->sk = sk; |
skb | 119 | net/ipv4/packet.c | sk->rmem_alloc += skb->truesize; |
skb | 125 | net/ipv4/packet.c | skb_queue_tail(&sk->receive_queue,skb); |
skb | 127 | net/ipv4/packet.c | sk->data_ready(sk,skb->len); |
skb | 148 | net/ipv4/packet.c | struct sk_buff *skb; |
skb | 190 | net/ipv4/packet.c | skb = sk->prot->wmalloc(sk, len, 0, GFP_KERNEL); |
skb | 197 | net/ipv4/packet.c | if (skb == NULL) |
skb | 206 | net/ipv4/packet.c | skb->sk = sk; |
skb | 207 | net/ipv4/packet.c | skb->free = 1; |
skb | 208 | net/ipv4/packet.c | memcpy_fromfs(skb_put(skb,len), from, len); |
skb | 209 | net/ipv4/packet.c | skb->arp = 1; /* No ARP needs doing on this (complete) frame */ |
skb | 216 | net/ipv4/packet.c | dev_queue_xmit(skb, dev, sk->priority); |
skb | 218 | net/ipv4/packet.c | kfree_skb(skb, FREE_WRITE); |
skb | 292 | net/ipv4/packet.c | struct sk_buff *skb; |
skb | 315 | net/ipv4/packet.c | skb=skb_recv_datagram(sk,flags,noblock,&err); |
skb | 323 | net/ipv4/packet.c | if(skb==NULL) |
skb | 331 | net/ipv4/packet.c | copied = min(len, skb->len); |
skb | 333 | net/ipv4/packet.c | memcpy_tofs(to, skb->data, copied); /* We can't use skb_copy_datagram here */ |
skb | 334 | net/ipv4/packet.c | sk->stamp=skb->stamp; |
skb | 342 | net/ipv4/packet.c | saddr->sa_family = skb->dev->type; |
skb | 343 | net/ipv4/packet.c | memcpy(saddr->sa_data,skb->dev->name, 14); |
skb | 351 | net/ipv4/packet.c | skb_free_datagram(skb); |
skb | 198 | net/ipv4/rarp.c | static int rarp_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *pt) |
skb | 203 | net/ipv4/rarp.c | struct arphdr *rarp = (struct arphdr *) skb->data; |
skb | 204 | net/ipv4/rarp.c | unsigned char *rarp_ptr = skb_pull(skb,sizeof(struct arphdr)); |
skb | 216 | net/ipv4/rarp.c | kfree_skb(skb, FREE_READ); |
skb | 225 | net/ipv4/rarp.c | kfree_skb(skb, FREE_READ); |
skb | 243 | net/ipv4/rarp.c | kfree_skb(skb, FREE_READ); |
skb | 279 | net/ipv4/rarp.c | kfree_skb(skb, FREE_READ); |
skb | 97 | net/ipv4/raw.c | int raw_rcv(struct sock *sk, struct sk_buff *skb, struct device *dev, long saddr, long daddr) |
skb | 100 | net/ipv4/raw.c | skb->sk = sk; |
skb | 101 | net/ipv4/raw.c | skb_trim(skb,ntohs(skb->ip_hdr->tot_len)); |
skb | 102 | net/ipv4/raw.c | skb->h.raw = (unsigned char *) skb->ip_hdr; |
skb | 103 | net/ipv4/raw.c | skb->dev = dev; |
skb | 104 | net/ipv4/raw.c | skb->saddr = daddr; |
skb | 105 | net/ipv4/raw.c | skb->daddr = saddr; |
skb | 109 | net/ipv4/raw.c | if(sock_queue_rcv_skb(sk,skb)<0) |
skb | 112 | net/ipv4/raw.c | skb->sk=NULL; |
skb | 113 | net/ipv4/raw.c | kfree_skb(skb, FREE_READ); |
skb | 239 | net/ipv4/raw.c | struct sk_buff *skb; |
skb | 252 | net/ipv4/raw.c | skb=skb_recv_datagram(sk,flags,noblock,&err); |
skb | 253 | net/ipv4/raw.c | if(skb==NULL) |
skb | 256 | net/ipv4/raw.c | truesize=skb->len; |
skb | 259 | net/ipv4/raw.c | skb_copy_datagram(skb, 0, to, copied); |
skb | 260 | net/ipv4/raw.c | sk->stamp=skb->stamp; |
skb | 266 | net/ipv4/raw.c | sin->sin_addr.s_addr = skb->daddr; |
skb | 268 | net/ipv4/raw.c | skb_free_datagram(skb); |
skb | 393 | net/ipv4/tcp.c | struct sk_buff *skb; |
skb | 397 | net/ipv4/tcp.c | skb=tcp_find_established(s); |
skb | 398 | net/ipv4/tcp.c | if(skb!=NULL) |
skb | 399 | net/ipv4/tcp.c | skb_unlink(skb); /* Take it off the queue */ |
skb | 401 | net/ipv4/tcp.c | return skb; |
skb | 412 | net/ipv4/tcp.c | struct sk_buff *skb; |
skb | 414 | net/ipv4/tcp.c | while ((skb = skb_dequeue(&sk->receive_queue)) != NULL) |
skb | 416 | net/ipv4/tcp.c | skb->sk->dead=1; |
skb | 417 | net/ipv4/tcp.c | tcp_close(skb->sk, 0); |
skb | 418 | net/ipv4/tcp.c | kfree_skb(skb, FREE_READ); |
skb | 443 | net/ipv4/tcp.c | struct sk_buff * skb; |
skb | 450 | net/ipv4/tcp.c | skb = sk->send_head; |
skb | 452 | net/ipv4/tcp.c | while (skb != NULL) |
skb | 458 | net/ipv4/tcp.c | dev = skb->dev; |
skb | 459 | net/ipv4/tcp.c | IS_SKB(skb); |
skb | 460 | net/ipv4/tcp.c | skb->when = jiffies; |
skb | 466 | net/ipv4/tcp.c | skb_pull(skb,((unsigned char *)skb->ip_hdr)-skb->data); |
skb | 477 | net/ipv4/tcp.c | iph = (struct iphdr *)skb->data; |
skb | 493 | net/ipv4/tcp.c | if(skb->localroute) |
skb | 500 | net/ipv4/tcp.c | if(skb->sk) |
skb | 502 | net/ipv4/tcp.c | skb->sk->err=ENETUNREACH; |
skb | 503 | net/ipv4/tcp.c | skb->sk->error_report(skb->sk); |
skb | 509 | net/ipv4/tcp.c | skb->raddr=rt->rt_gateway; |
skb | 510 | net/ipv4/tcp.c | if(skb->raddr==0) |
skb | 511 | net/ipv4/tcp.c | skb->raddr=iph->daddr; |
skb | 512 | net/ipv4/tcp.c | skb->dev=dev; |
skb | 513 | net/ipv4/tcp.c | skb->arp=1; |
skb | 516 | net/ipv4/tcp.c | if(dev->hard_header(skb, dev, ETH_P_IP, NULL, NULL, skb->len)<0) |
skb | 517 | net/ipv4/tcp.c | skb->arp=0; |
skb | 549 | net/ipv4/tcp.c | if (sk && !skb_device_locked(skb)) |
skb | 552 | net/ipv4/tcp.c | skb_unlink(skb); |
skb | 555 | net/ipv4/tcp.c | dev_queue_xmit(skb, dev, sk->priority); |
skb | 582 | net/ipv4/tcp.c | skb = skb->link3; |
skb | 785 | net/ipv4/tcp.c | struct sk_buff *skb; |
skb | 790 | net/ipv4/tcp.c | skb = sk->send_head; |
skb | 791 | net/ipv4/tcp.c | if (!skb) |
skb | 801 | net/ipv4/tcp.c | if (jiffies < skb->when + sk->rto) |
skb | 803 | net/ipv4/tcp.c | reset_xmit_timer (sk, TIME_WRITE, skb->when + sk->rto - jiffies); |
skb | 913 | net/ipv4/tcp.c | struct sk_buff *skb; |
skb | 922 | net/ipv4/tcp.c | if (sk == NULL || (skb = skb_peek(&sk->receive_queue)) == NULL) |
skb | 939 | net/ipv4/tcp.c | if (before(counted, skb->h.th->seq)) /* Found a hole so stops here */ |
skb | 941 | net/ipv4/tcp.c | sum = skb->len -(counted - skb->h.th->seq); /* Length - header but start from where we are up to (avoid overlaps) */ |
skb | 942 | net/ipv4/tcp.c | if (skb->h.th->syn) |
skb | 947 | net/ipv4/tcp.c | if (skb->h.th->syn) |
skb | 967 | net/ipv4/tcp.c | if (skb->h.th->urg) |
skb | 969 | net/ipv4/tcp.c | if (amount && skb->h.th->psh) break; |
skb | 970 | net/ipv4/tcp.c | skb = skb->next; |
skb | 972 | net/ipv4/tcp.c | while(skb != (struct sk_buff *)&sk->receive_queue); |
skb | 1137 | net/ipv4/tcp.c | static void tcp_send_skb(struct sock *sk, struct sk_buff *skb) |
skb | 1140 | net/ipv4/tcp.c | struct tcphdr * th = skb->h.th; |
skb | 1146 | net/ipv4/tcp.c | size = skb->len - ((unsigned char *) th - skb->data); |
skb | 1152 | net/ipv4/tcp.c | if (size < sizeof(struct tcphdr) || size > skb->len) |
skb | 1155 | net/ipv4/tcp.c | skb, skb->data, th, skb->len); |
skb | 1156 | net/ipv4/tcp.c | kfree_skb(skb, FREE_WRITE); |
skb | 1171 | net/ipv4/tcp.c | kfree_skb(skb,FREE_WRITE); |
skb | 1181 | net/ipv4/tcp.c | skb->h.seq = ntohl(th->seq) + size - 4*th->doff; |
skb | 1191 | net/ipv4/tcp.c | if (after(skb->h.seq, sk->window_seq) || |
skb | 1198 | net/ipv4/tcp.c | if (skb->next != NULL) |
skb | 1201 | net/ipv4/tcp.c | skb_unlink(skb); |
skb | 1203 | net/ipv4/tcp.c | skb_queue_tail(&sk->write_queue, skb); |
skb | 1235 | net/ipv4/tcp.c | sk->prot->queue_xmit(sk, skb->dev, skb, 0); |
skb | 1258 | net/ipv4/tcp.c | struct sk_buff * skb; |
skb | 1263 | net/ipv4/tcp.c | skb = sk->partial; |
skb | 1264 | net/ipv4/tcp.c | if (skb) { |
skb | 1269 | net/ipv4/tcp.c | return skb; |
skb | 1278 | net/ipv4/tcp.c | struct sk_buff *skb; |
skb | 1282 | net/ipv4/tcp.c | while ((skb = tcp_dequeue_partial(sk)) != NULL) |
skb | 1283 | net/ipv4/tcp.c | tcp_send_skb(sk, skb); |
skb | 1290 | net/ipv4/tcp.c | void tcp_enqueue_partial(struct sk_buff * skb, struct sock * sk) |
skb | 1300 | net/ipv4/tcp.c | sk->partial = skb; |
skb | 1465 | net/ipv4/tcp.c | struct sk_buff *skb; |
skb | 1577 | net/ipv4/tcp.c | if ((skb = tcp_dequeue_partial(sk)) != NULL) |
skb | 1582 | net/ipv4/tcp.c | hdrlen = ((unsigned long)skb->h.th - (unsigned long)skb->data) |
skb | 1588 | net/ipv4/tcp.c | copy = min(sk->mss - (skb->len - hdrlen), len); |
skb | 1596 | net/ipv4/tcp.c | memcpy_fromfs(skb_put(skb,copy), from, copy); |
skb | 1602 | net/ipv4/tcp.c | if ((skb->len - hdrlen) >= sk->mss || |
skb | 1604 | net/ipv4/tcp.c | tcp_send_skb(sk, skb); |
skb | 1606 | net/ipv4/tcp.c | tcp_enqueue_partial(skb, sk); |
skb | 1643 | net/ipv4/tcp.c | skb = prot->wmalloc(sk, sk->mtu + 128 + prot->max_header + 15, 0, GFP_KERNEL); |
skb | 1645 | net/ipv4/tcp.c | send_tmp = skb; |
skb | 1653 | net/ipv4/tcp.c | skb = prot->wmalloc(sk, copy + prot->max_header + 15 , 0, GFP_KERNEL); |
skb | 1661 | net/ipv4/tcp.c | if (skb == NULL) |
skb | 1701 | net/ipv4/tcp.c | skb->sk = sk; |
skb | 1702 | net/ipv4/tcp.c | skb->free = 0; |
skb | 1703 | net/ipv4/tcp.c | skb->localroute = sk->localroute|(flags&MSG_DONTROUTE); |
skb | 1710 | net/ipv4/tcp.c | tmp = prot->build_header(skb, sk->saddr, sk->daddr, &dev, |
skb | 1711 | net/ipv4/tcp.c | IPPROTO_TCP, sk->opt, skb->truesize,sk->ip_tos,sk->ip_ttl); |
skb | 1714 | net/ipv4/tcp.c | prot->wfree(sk, skb); |
skb | 1720 | net/ipv4/tcp.c | skb->dev = dev; |
skb | 1721 | net/ipv4/tcp.c | skb->h.th =(struct tcphdr *)skb_put(skb,sizeof(struct tcphdr)); |
skb | 1722 | net/ipv4/tcp.c | tmp = tcp_build_header(skb->h.th, sk, len-copy); |
skb | 1725 | net/ipv4/tcp.c | prot->wfree(sk, skb); |
skb | 1734 | net/ipv4/tcp.c | skb->h.th->urg = 1; |
skb | 1735 | net/ipv4/tcp.c | skb->h.th->urg_ptr = ntohs(copy); |
skb | 1738 | net/ipv4/tcp.c | memcpy_fromfs(skb_put(skb,copy), from, copy); |
skb | 1743 | net/ipv4/tcp.c | skb->free = 0; |
skb | 1751 | net/ipv4/tcp.c | tcp_send_skb(sk, skb); |
skb | 1891 | net/ipv4/tcp.c | struct sk_buff *skb; |
skb | 1907 | net/ipv4/tcp.c | while((skb=skb_peek(&sk->receive_queue)) != NULL) |
skb | 1909 | net/ipv4/tcp.c | if (!skb->used || skb->users) |
skb | 1911 | net/ipv4/tcp.c | skb_unlink(skb); |
skb | 1912 | net/ipv4/tcp.c | skb->sk = sk; |
skb | 1913 | net/ipv4/tcp.c | kfree_skb(skb, FREE_READ); |
skb | 2070 | net/ipv4/tcp.c | struct sk_buff * skb; |
skb | 2086 | net/ipv4/tcp.c | skb = skb_peek(&sk->receive_queue); |
skb | 2089 | net/ipv4/tcp.c | if (!skb) |
skb | 2091 | net/ipv4/tcp.c | if (before(*seq, skb->h.th->seq)) |
skb | 2093 | net/ipv4/tcp.c | offset = *seq - skb->h.th->seq; |
skb | 2094 | net/ipv4/tcp.c | if (skb->h.th->syn) |
skb | 2096 | net/ipv4/tcp.c | if (offset < skb->len) |
skb | 2098 | net/ipv4/tcp.c | if (skb->h.th->fin) |
skb | 2101 | net/ipv4/tcp.c | skb->used = 1; |
skb | 2102 | net/ipv4/tcp.c | skb = skb->next; |
skb | 2104 | net/ipv4/tcp.c | while (skb != (struct sk_buff *)&sk->receive_queue); |
skb | 2161 | net/ipv4/tcp.c | skb->users++; |
skb | 2167 | net/ipv4/tcp.c | used = skb->len - offset; |
skb | 2206 | net/ipv4/tcp.c | memcpy_tofs(to,((unsigned char *)skb->h.th) + |
skb | 2207 | net/ipv4/tcp.c | skb->h.th->doff*4 + offset, used); |
skb | 2218 | net/ipv4/tcp.c | skb->users --; |
skb | 2222 | net/ipv4/tcp.c | if (used + offset < skb->len) |
skb | 2229 | net/ipv4/tcp.c | if (skb->h.th->fin) |
skb | 2233 | net/ipv4/tcp.c | skb->used = 1; |
skb | 2245 | net/ipv4/tcp.c | skb->used = 1; |
skb | 2683 | net/ipv4/tcp.c | static void tcp_conn_request(struct sock *sk, struct sk_buff *skb, |
skb | 2696 | net/ipv4/tcp.c | th = skb->h.th; |
skb | 2709 | net/ipv4/tcp.c | kfree_skb(skb, FREE_READ); |
skb | 2721 | net/ipv4/tcp.c | kfree_skb(skb, FREE_READ); |
skb | 2738 | net/ipv4/tcp.c | kfree_skb(skb, FREE_READ); |
skb | 2771 | net/ipv4/tcp.c | newsk->acked_seq = skb->h.th->seq+1; |
skb | 2772 | net/ipv4/tcp.c | newsk->copied_seq = skb->h.th->seq+1; |
skb | 2773 | net/ipv4/tcp.c | newsk->fin_seq = skb->h.th->seq; |
skb | 2790 | net/ipv4/tcp.c | newsk->dummy_th.source = skb->h.th->dest; |
skb | 2791 | net/ipv4/tcp.c | newsk->dummy_th.dest = skb->h.th->source; |
skb | 2810 | net/ipv4/tcp.c | newsk->acked_seq = skb->h.th->seq + 1; |
skb | 2811 | net/ipv4/tcp.c | newsk->copied_seq = skb->h.th->seq + 1; |
skb | 2819 | net/ipv4/tcp.c | newsk->ip_tos=skb->ip_hdr->tos; |
skb | 2862 | net/ipv4/tcp.c | tcp_options(newsk,skb->h.th); |
skb | 2874 | net/ipv4/tcp.c | kfree_skb(skb, FREE_READ); |
skb | 2901 | net/ipv4/tcp.c | skb->sk = sk; |
skb | 2902 | net/ipv4/tcp.c | kfree_skb(skb, FREE_READ); |
skb | 2909 | net/ipv4/tcp.c | memcpy(t1, skb->h.th, sizeof(*t1)); |
skb | 2914 | net/ipv4/tcp.c | t1->dest = skb->h.th->source; |
skb | 2927 | net/ipv4/tcp.c | t1->ack_seq = ntohl(skb->h.th->seq+1); |
skb | 2938 | net/ipv4/tcp.c | skb->sk = newsk; |
skb | 2944 | net/ipv4/tcp.c | sk->rmem_alloc -= skb->truesize; |
skb | 2945 | net/ipv4/tcp.c | newsk->rmem_alloc += skb->truesize; |
skb | 2947 | net/ipv4/tcp.c | skb_queue_tail(&sk->receive_queue,skb); |
skb | 2982 | net/ipv4/tcp.c | struct sk_buff *skb; |
skb | 2990 | net/ipv4/tcp.c | while((skb=skb_dequeue(&sk->receive_queue))!=NULL) |
skb | 2991 | net/ipv4/tcp.c | kfree_skb(skb, FREE_READ); |
skb | 3029 | net/ipv4/tcp.c | struct sk_buff *skb; |
skb | 3047 | net/ipv4/tcp.c | while((skb = skb_peek(&sk->write_queue)) != NULL && |
skb | 3048 | net/ipv4/tcp.c | before(skb->h.seq, sk->window_seq + 1) && |
skb | 3051 | net/ipv4/tcp.c | before(skb->h.seq, sk->rcv_ack_seq + 1)) |
skb | 3054 | net/ipv4/tcp.c | IS_SKB(skb); |
skb | 3055 | net/ipv4/tcp.c | skb_unlink(skb); |
skb | 3061 | net/ipv4/tcp.c | if (before(skb->h.seq, sk->rcv_ack_seq +1)) |
skb | 3069 | net/ipv4/tcp.c | kfree_skb(skb, FREE_WRITE); |
skb | 3085 | net/ipv4/tcp.c | iph = skb->ip_hdr; |
skb | 3087 | net/ipv4/tcp.c | size = skb->len - (((unsigned char *) th) - skb->data); |
skb | 3094 | net/ipv4/tcp.c | sk->sent_seq = skb->h.seq; |
skb | 3100 | net/ipv4/tcp.c | sk->prot->queue_xmit(sk, skb->dev, skb, skb->free); |
skb | 3208 | net/ipv4/tcp.c | struct sk_buff *skb; |
skb | 3227 | net/ipv4/tcp.c | skb = skb2; |
skb | 3228 | net/ipv4/tcp.c | skb2 = skb->link3; |
skb | 3229 | net/ipv4/tcp.c | skb->link3 = NULL; |
skb | 3230 | net/ipv4/tcp.c | if (after(skb->h.seq, sk->window_seq)) |
skb | 3235 | net/ipv4/tcp.c | if (skb->next != NULL) |
skb | 3237 | net/ipv4/tcp.c | skb_unlink(skb); |
skb | 3241 | net/ipv4/tcp.c | skb_queue_head(&sk->write_queue,skb); |
skb | 3243 | net/ipv4/tcp.c | skb_append(wskb,skb); |
skb | 3244 | net/ipv4/tcp.c | wskb = skb; |
skb | 3250 | net/ipv4/tcp.c | sk->send_head = skb; |
skb | 3251 | net/ipv4/tcp.c | sk->send_tail = skb; |
skb | 3255 | net/ipv4/tcp.c | sk->send_tail->link3 = skb; |
skb | 3256 | net/ipv4/tcp.c | sk->send_tail = skb; |
skb | 3258 | net/ipv4/tcp.c | skb->link3 = NULL; |
skb | 3719 | net/ipv4/tcp.c | static int tcp_fin(struct sk_buff *skb, struct sock *sk, struct tcphdr *th) |
skb | 3721 | net/ipv4/tcp.c | sk->fin_seq = th->seq + skb->len + th->syn + th->fin; |
skb | 3806 | net/ipv4/tcp.c | extern __inline__ int tcp_data(struct sk_buff *skb, struct sock *sk, |
skb | 3814 | net/ipv4/tcp.c | th = skb->h.th; |
skb | 3815 | net/ipv4/tcp.c | skb_pull(skb,th->doff*4); |
skb | 3816 | net/ipv4/tcp.c | skb_trim(skb,len-(th->doff*4)); |
skb | 3823 | net/ipv4/tcp.c | sk->bytes_rcv += skb->len; |
skb | 3825 | net/ipv4/tcp.c | if (skb->len == 0 && !th->fin) |
skb | 3833 | net/ipv4/tcp.c | kfree_skb(skb, FREE_READ); |
skb | 3852 | net/ipv4/tcp.c | if(skb->len) /* We don't care if it's just an ack or |
skb | 3855 | net/ipv4/tcp.c | new_seq= th->seq + skb->len + th->syn; /* Right edge of _data_ part of frame */ |
skb | 3878 | net/ipv4/tcp.c | tcp_reset(sk->saddr, sk->daddr, skb->h.th, |
skb | 3879 | net/ipv4/tcp.c | sk->prot, NULL, skb->dev, sk->ip_tos, sk->ip_ttl); |
skb | 3884 | net/ipv4/tcp.c | kfree_skb(skb, FREE_READ); |
skb | 3907 | net/ipv4/tcp.c | skb_queue_head(&sk->receive_queue,skb); |
skb | 3918 | net/ipv4/tcp.c | printk("skb->h.th->seq = %d\n",skb->h.th->seq); |
skb | 3931 | net/ipv4/tcp.c | if (th->seq==skb1->h.th->seq && skb->len>= skb1->len) |
skb | 3933 | net/ipv4/tcp.c | skb_append(skb1,skb); |
skb | 3947 | net/ipv4/tcp.c | skb_append(skb1,skb); |
skb | 3956 | net/ipv4/tcp.c | skb_queue_head(&sk->receive_queue, skb); |
skb | 3966 | net/ipv4/tcp.c | th->ack_seq = th->seq + skb->len; |
skb | 3998 | net/ipv4/tcp.c | skb->acked = 1; |
skb | 4005 | net/ipv4/tcp.c | if (skb->h.th->fin) |
skb | 4007 | net/ipv4/tcp.c | tcp_fin(skb,sk,skb->h.th); |
skb | 4010 | net/ipv4/tcp.c | for(skb2 = skb->next; |
skb | 4032 | net/ipv4/tcp.c | tcp_fin(skb,sk,skb->h.th); |
skb | 4071 | net/ipv4/tcp.c | if (!skb->acked) |
skb | 4209 | net/ipv4/tcp.c | struct sk_buff *skb; |
skb | 4226 | net/ipv4/tcp.c | while((skb = tcp_dequeue_established(sk)) == NULL) |
skb | 4252 | net/ipv4/tcp.c | newsk = skb->sk; |
skb | 4254 | net/ipv4/tcp.c | kfree_skb(skb, FREE_READ); |
skb | 4481 | net/ipv4/tcp.c | static int tcp_std_reset(struct sock *sk, struct sk_buff *skb) |
skb | 4504 | net/ipv4/tcp.c | kfree_skb(skb, FREE_READ); |
skb | 4514 | net/ipv4/tcp.c | int tcp_rcv(struct sk_buff *skb, struct device *dev, struct options *opt, |
skb | 4523 | net/ipv4/tcp.c | if(skb->pkt_type!=PACKET_HOST) |
skb | 4525 | net/ipv4/tcp.c | kfree_skb(skb,FREE_READ); |
skb | 4529 | net/ipv4/tcp.c | th = skb->h.th; |
skb | 4564 | net/ipv4/tcp.c | skb_pull(skb, skb->h.raw-skb->data); |
skb | 4569 | net/ipv4/tcp.c | (skb->ip_summed && tcp_check(th, len, saddr, daddr, skb->csum ))|| |
skb | 4570 | net/ipv4/tcp.c | (!skb->ip_summed && tcp_check(th, len, saddr, daddr, csum_partial((char *)th, len, 0))) |
skb | 4573 | net/ipv4/tcp.c | skb->sk = NULL; |
skb | 4574 | net/ipv4/tcp.c | kfree_skb(skb,FREE_READ); |
skb | 4589 | net/ipv4/tcp.c | tcp_reset(daddr, saddr, th, &tcp_prot, opt,dev,skb->ip_hdr->tos,255); |
skb | 4590 | net/ipv4/tcp.c | skb->sk = NULL; |
skb | 4594 | net/ipv4/tcp.c | kfree_skb(skb, FREE_READ); |
skb | 4599 | net/ipv4/tcp.c | skb->acked = 0; |
skb | 4600 | net/ipv4/tcp.c | skb->used = 0; |
skb | 4601 | net/ipv4/tcp.c | skb->free = 0; |
skb | 4602 | net/ipv4/tcp.c | skb->saddr = daddr; |
skb | 4603 | net/ipv4/tcp.c | skb->daddr = saddr; |
skb | 4609 | net/ipv4/tcp.c | skb_queue_tail(&sk->back_log, skb); |
skb | 4620 | net/ipv4/tcp.c | tcp_reset(daddr, saddr, th, &tcp_prot, opt,dev,skb->ip_hdr->tos,255); |
skb | 4621 | net/ipv4/tcp.c | skb->sk = NULL; |
skb | 4622 | net/ipv4/tcp.c | kfree_skb(skb, FREE_READ); |
skb | 4639 | net/ipv4/tcp.c | if (sk->rmem_alloc + skb->truesize >= sk->rcvbuf) |
skb | 4641 | net/ipv4/tcp.c | kfree_skb(skb, FREE_READ); |
skb | 4646 | net/ipv4/tcp.c | skb->sk=sk; |
skb | 4647 | net/ipv4/tcp.c | sk->rmem_alloc += skb->truesize; |
skb | 4677 | net/ipv4/tcp.c | kfree_skb(skb, FREE_READ); |
skb | 4686 | net/ipv4/tcp.c | tcp_conn_request(sk, skb, daddr, saddr, opt, dev, tcp_init_seq()); |
skb | 4704 | net/ipv4/tcp.c | kfree_skb(skb, FREE_READ); |
skb | 4727 | net/ipv4/tcp.c | kfree_skb(skb, FREE_READ); |
skb | 4732 | net/ipv4/tcp.c | return tcp_std_reset(sk,skb); |
skb | 4737 | net/ipv4/tcp.c | kfree_skb(skb, FREE_READ); |
skb | 4776 | net/ipv4/tcp.c | return tcp_std_reset(sk,skb); |
skb | 4786 | net/ipv4/tcp.c | kfree_skb(skb, FREE_READ); |
skb | 4811 | net/ipv4/tcp.c | sk->rmem_alloc -= skb->truesize; |
skb | 4812 | net/ipv4/tcp.c | skb->sk = NULL; |
skb | 4821 | net/ipv4/tcp.c | skb->sk = sk; |
skb | 4822 | net/ipv4/tcp.c | sk->rmem_alloc += skb->truesize; |
skb | 4823 | net/ipv4/tcp.c | tcp_conn_request(sk, skb, daddr, saddr,opt, dev,seq+128000); |
skb | 4827 | net/ipv4/tcp.c | kfree_skb(skb, FREE_READ); |
skb | 4841 | net/ipv4/tcp.c | kfree_skb(skb, FREE_READ); |
skb | 4847 | net/ipv4/tcp.c | return tcp_std_reset(sk,skb); |
skb | 4855 | net/ipv4/tcp.c | tcp_reset(daddr,saddr,th, &tcp_prot, opt, dev, skb->ip_hdr->tos, 255); |
skb | 4856 | net/ipv4/tcp.c | return tcp_std_reset(sk,skb); |
skb | 4874 | net/ipv4/tcp.c | kfree_skb(skb, FREE_READ); |
skb | 4887 | net/ipv4/tcp.c | kfree_skb(skb, FREE_READ); |
skb | 4897 | net/ipv4/tcp.c | if(tcp_data(skb,sk, saddr, len)) |
skb | 4899 | net/ipv4/tcp.c | kfree_skb(skb, FREE_READ); |
skb | 4919 | net/ipv4/tcp.c | struct sk_buff *buff,*skb; |
skb | 4943 | net/ipv4/tcp.c | (skb=skb_peek(&sk->write_queue))) |
skb | 4967 | net/ipv4/tcp.c | iph = (struct iphdr *)skb->ip_hdr; |
skb | 5027 | net/ipv4/tcp.c | tcp_data_start = skb->data + skb->dev->hard_header_len + |
skb | 5048 | net/ipv4/tcp.c | ow_size = skb->len - win_size - |
skb | 5049 | net/ipv4/tcp.c | ((unsigned long) (tcp_data_start - (void *) skb->data)); |
skb | 5052 | net/ipv4/tcp.c | skb_trim(skb,skb->len-win_size); |
skb | 137 | net/ipv4/udp.c | static int udp_deliver(struct sock *sk, struct udphdr *uh, struct sk_buff *skb, struct device *dev, long saddr, long daddr, int len); |
skb | 429 | net/ipv4/udp.c | struct sk_buff *skb; |
skb | 434 | net/ipv4/udp.c | skb = skb_peek(&sk->receive_queue); |
skb | 435 | net/ipv4/udp.c | if (skb != NULL) { |
skb | 441 | net/ipv4/udp.c | amount = skb->len; |
skb | 469 | net/ipv4/udp.c | struct sk_buff *skb; |
skb | 484 | net/ipv4/udp.c | skb=skb_recv_datagram(sk,flags,noblock,&er); |
skb | 485 | net/ipv4/udp.c | if(skb==NULL) |
skb | 488 | net/ipv4/udp.c | truesize = skb->len - sizeof(struct udphdr); |
skb | 495 | net/ipv4/udp.c | skb_copy_datagram(skb,sizeof(struct udphdr),to,copied); |
skb | 496 | net/ipv4/udp.c | sk->stamp=skb->stamp; |
skb | 502 | net/ipv4/udp.c | sin->sin_port = skb->h.uh->source; |
skb | 503 | net/ipv4/udp.c | sin->sin_addr.s_addr = skb->daddr; |
skb | 506 | net/ipv4/udp.c | skb_free_datagram(skb); |
skb | 568 | net/ipv4/udp.c | int udp_rcv(struct sk_buff *skb, struct device *dev, struct options *opt, |
skb | 584 | net/ipv4/udp.c | uh = (struct udphdr *) skb->h.uh; |
skb | 598 | net/ipv4/udp.c | kfree_skb(skb, FREE_WRITE); |
skb | 610 | net/ipv4/udp.c | ( skb->ip_summed && udp_check(uh, len, saddr, daddr, skb->csum ) ) || |
skb | 611 | net/ipv4/udp.c | ( !skb->ip_summed && udp_check(uh, len, saddr, daddr,csum_partial((char*)uh, len, 0))) |
skb | 626 | net/ipv4/udp.c | kfree_skb(skb, FREE_WRITE); |
skb | 650 | net/ipv4/udp.c | skb1=skb_clone(skb,GFP_ATOMIC); |
skb | 652 | net/ipv4/udp.c | skb1=skb; |
skb | 660 | net/ipv4/udp.c | kfree_skb(skb, FREE_READ); |
skb | 681 | net/ipv4/udp.c | icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0, dev); |
skb | 687 | net/ipv4/udp.c | skb->sk = NULL; |
skb | 688 | net/ipv4/udp.c | kfree_skb(skb, FREE_WRITE); |
skb | 691 | net/ipv4/udp.c | return udp_deliver(sk,uh,skb,dev, saddr, daddr, len); |
skb | 694 | net/ipv4/udp.c | static int udp_deliver(struct sock *sk, struct udphdr *uh, struct sk_buff *skb, struct device *dev, long saddr, long daddr, int len) |
skb | 696 | net/ipv4/udp.c | skb->sk = sk; |
skb | 697 | net/ipv4/udp.c | skb->dev = dev; |
skb | 698 | net/ipv4/udp.c | skb_trim(skb,len); |
skb | 704 | net/ipv4/udp.c | skb->daddr = saddr; |
skb | 705 | net/ipv4/udp.c | skb->saddr = daddr; |
skb | 715 | net/ipv4/udp.c | if (sock_queue_rcv_skb(sk,skb)<0) |
skb | 720 | net/ipv4/udp.c | skb->sk = NULL; |
skb | 721 | net/ipv4/udp.c | kfree_skb(skb, FREE_WRITE); |
skb | 179 | net/ipx/af_ipx.c | struct sk_buff *skb; |
skb | 182 | net/ipx/af_ipx.c | while((skb=skb_dequeue(&sk->receive_queue))!=NULL) { |
skb | 183 | net/ipx/af_ipx.c | kfree_skb(skb,FREE_READ); |
skb | 327 | net/ipx/af_ipx.c | ipxitf_def_skb_handler(struct sock *sock, struct sk_buff *skb) |
skb | 331 | net/ipx/af_ipx.c | if((retval = sock_queue_rcv_skb(sock, skb))<0) { |
skb | 340 | net/ipx/af_ipx.c | kfree_skb(skb,FREE_WRITE); |
skb | 346 | net/ipx/af_ipx.c | ipxitf_demux_socket(ipx_interface *intrfc, struct sk_buff *skb, int copy) |
skb | 348 | net/ipx/af_ipx.c | ipx_packet *ipx = (ipx_packet *)(skb->h.raw); |
skb | 383 | net/ipx/af_ipx.c | kfree_skb(skb,FREE_WRITE); |
skb | 394 | net/ipx/af_ipx.c | skb1 = skb_clone(skb, GFP_ATOMIC); |
skb | 399 | net/ipx/af_ipx.c | skb1 = skb; |
skb | 428 | net/ipx/af_ipx.c | ipxitf_adjust_skbuff(ipx_interface *intrfc, struct sk_buff *skb) |
skb | 431 | net/ipx/af_ipx.c | int in_offset = skb->h.raw - skb->head; |
skb | 441 | net/ipx/af_ipx.c | skb->arp = skb->free = 1; |
skb | 442 | net/ipx/af_ipx.c | return skb; |
skb | 448 | net/ipx/af_ipx.c | oldraw = skb->h.raw; |
skb | 449 | net/ipx/af_ipx.c | skb->h.raw = &(skb->data[out_offset]); |
skb | 450 | net/ipx/af_ipx.c | memmove(skb->h.raw, oldraw, skb->len); |
skb | 451 | net/ipx/af_ipx.c | skb->len += out_offset; |
skb | 452 | net/ipx/af_ipx.c | skb->arp = skb->free = 1; |
skb | 453 | net/ipx/af_ipx.c | return skb; |
skb | 458 | net/ipx/af_ipx.c | len = skb->len + out_offset; |
skb | 462 | net/ipx/af_ipx.c | skb2->h.raw=skb_put(skb2,skb->len); |
skb | 465 | net/ipx/af_ipx.c | memcpy(skb2->h.raw, skb->h.raw, skb->len); |
skb | 467 | net/ipx/af_ipx.c | kfree_skb(skb, FREE_WRITE); |
skb | 472 | net/ipx/af_ipx.c | ipxitf_send(ipx_interface *intrfc, struct sk_buff *skb, char *node) |
skb | 474 | net/ipx/af_ipx.c | ipx_packet *ipx = (ipx_packet *)(skb->h.raw); |
skb | 490 | net/ipx/af_ipx.c | return ipxitf_demux_socket(intrfc, skb, 0); |
skb | 492 | net/ipx/af_ipx.c | ipxitf_demux_socket(intrfc, skb, send_to_wire); |
skb | 512 | net/ipx/af_ipx.c | kfree_skb(skb,FREE_WRITE); |
skb | 525 | net/ipx/af_ipx.c | skb = ipxitf_adjust_skbuff(intrfc, skb); |
skb | 526 | net/ipx/af_ipx.c | if (skb == NULL) return 0; |
skb | 529 | net/ipx/af_ipx.c | skb->dev = dev; |
skb | 530 | net/ipx/af_ipx.c | dl->datalink_header(dl, skb, dest_node); |
skb | 532 | net/ipx/af_ipx.c | if (skb->sk != NULL) { |
skb | 536 | net/ipx/af_ipx.c | skb->sk->wmem_alloc += skb->truesize; |
skb | 541 | net/ipx/af_ipx.c | dump_pkt("IPX snd:", (ipx_packet *)skb->h.raw); |
skb | 542 | net/ipx/af_ipx.c | dump_data("ETH hdr:", skb->data, skb->h.raw - skb->data); |
skb | 546 | net/ipx/af_ipx.c | dev_queue_xmit(skb, dev, SOPRI_NORMAL); |
skb | 564 | net/ipx/af_ipx.c | ipxitf_rcv(ipx_interface *intrfc, struct sk_buff *skb) |
skb | 566 | net/ipx/af_ipx.c | ipx_packet *ipx = (ipx_packet *) (skb->h.raw); |
skb | 597 | net/ipx/af_ipx.c | if ((skb->pkt_type != PACKET_BROADCAST) && |
skb | 598 | net/ipx/af_ipx.c | (skb->pkt_type != PACKET_MULTICAST)) |
skb | 599 | net/ipx/af_ipx.c | return ipxrtr_route_skb(skb); |
skb | 601 | net/ipx/af_ipx.c | kfree_skb(skb,FREE_READ); |
skb | 608 | net/ipx/af_ipx.c | return ipxitf_demux_socket(intrfc, skb, 0); |
skb | 612 | net/ipx/af_ipx.c | kfree_skb(skb,FREE_READ); |
skb | 994 | net/ipx/af_ipx.c | struct sk_buff *skb; |
skb | 1019 | net/ipx/af_ipx.c | skb=alloc_skb(size,GFP_KERNEL); |
skb | 1020 | net/ipx/af_ipx.c | if(skb==NULL) return -ENOMEM; |
skb | 1022 | net/ipx/af_ipx.c | skb->sk=sk; |
skb | 1023 | net/ipx/af_ipx.c | skb_reserve(skb,ipx_offset); |
skb | 1024 | net/ipx/af_ipx.c | skb->free=1; |
skb | 1025 | net/ipx/af_ipx.c | skb->arp=1; |
skb | 1028 | net/ipx/af_ipx.c | ipx=(ipx_packet *)skb_put(skb,sizeof(ipx_packet)); |
skb | 1033 | net/ipx/af_ipx.c | skb->h.raw = (unsigned char *)ipx; |
skb | 1042 | net/ipx/af_ipx.c | memcpy_fromfs(skb_put(skb,len),ubuf,len); |
skb | 1043 | net/ipx/af_ipx.c | return ipxitf_send(intrfc, skb, (rt && rt->ir_routed) ? |
skb | 1048 | net/ipx/af_ipx.c | ipxrtr_route_skb(struct sk_buff *skb) |
skb | 1050 | net/ipx/af_ipx.c | ipx_packet *ipx = (ipx_packet *) (skb->h.raw); |
skb | 1057 | net/ipx/af_ipx.c | kfree_skb(skb,FREE_READ); |
skb | 1061 | net/ipx/af_ipx.c | (void)ipxitf_send(i, skb, (r->ir_routed) ? |
skb | 1654 | net/ipx/af_ipx.c | int ipx_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *pt) |
skb | 1661 | net/ipx/af_ipx.c | ipx=(ipx_packet *)skb->h.raw; |
skb | 1667 | net/ipx/af_ipx.c | kfree_skb(skb,FREE_READ); |
skb | 1673 | net/ipx/af_ipx.c | kfree_skb(skb,FREE_READ); |
skb | 1686 | net/ipx/af_ipx.c | kfree_skb(skb,FREE_READ); |
skb | 1691 | net/ipx/af_ipx.c | return ipxitf_rcv(intrfc, skb); |
skb | 1750 | net/ipx/af_ipx.c | struct sk_buff *skb; |
skb | 1764 | net/ipx/af_ipx.c | skb=skb_recv_datagram(sk,flags,noblock,&er); |
skb | 1765 | net/ipx/af_ipx.c | if(skb==NULL) |
skb | 1770 | net/ipx/af_ipx.c | ipx = (ipx_packet *)(skb->h.raw); |
skb | 1773 | net/ipx/af_ipx.c | skb_copy_datagram(skb,sizeof(struct ipx_packet),ubuf,copied); |
skb | 1783 | net/ipx/af_ipx.c | skb_free_datagram(skb); |
skb | 1839 | net/ipx/af_ipx.c | struct sk_buff *skb; |
skb | 1841 | net/ipx/af_ipx.c | if((skb=skb_peek(&sk->receive_queue))!=NULL) |
skb | 1842 | net/ipx/af_ipx.c | amount=skb->len; |
skb | 243 | net/netrom/af_netrom.c | struct sk_buff *skb; |
skb | 254 | net/netrom/af_netrom.c | while ((skb = skb_dequeue(&sk->receive_queue)) != NULL) { |
skb | 255 | net/netrom/af_netrom.c | if (skb->sk != sk) { /* A pending connection */ |
skb | 256 | net/netrom/af_netrom.c | skb->sk->dead = 1; /* Queue the unaccepted socket for death */ |
skb | 257 | net/netrom/af_netrom.c | nr_set_timer(skb->sk); |
skb | 258 | net/netrom/af_netrom.c | skb->sk->nr->state = NR_STATE_0; |
skb | 261 | net/netrom/af_netrom.c | kfree_skb(skb, FREE_READ); |
skb | 836 | net/netrom/af_netrom.c | struct sk_buff *skb; |
skb | 855 | net/netrom/af_netrom.c | if ((skb = skb_dequeue(&sk->receive_queue)) == NULL) { |
skb | 866 | net/netrom/af_netrom.c | } while (skb == NULL); |
skb | 868 | net/netrom/af_netrom.c | newsk = skb->sk; |
skb | 873 | net/netrom/af_netrom.c | skb->sk = NULL; |
skb | 874 | net/netrom/af_netrom.c | kfree_skb(skb, FREE_READ); |
skb | 907 | net/netrom/af_netrom.c | int nr_rx_frame(struct sk_buff *skb, struct device *dev) |
skb | 915 | net/netrom/af_netrom.c | skb->sk = NULL; /* Initially we don't know who its for */ |
skb | 921 | net/netrom/af_netrom.c | src = (ax25_address *)(skb->data + 0); |
skb | 922 | net/netrom/af_netrom.c | dest = (ax25_address *)(skb->data + 7); |
skb | 924 | net/netrom/af_netrom.c | circuit_index = skb->data[15]; |
skb | 925 | net/netrom/af_netrom.c | circuit_id = skb->data[16]; |
skb | 926 | net/netrom/af_netrom.c | frametype = skb->data[19]; |
skb | 933 | net/netrom/af_netrom.c | skb_pull(skb, NR_NETWORK_LEN + NR_TRANSPORT_LEN); |
skb | 934 | net/netrom/af_netrom.c | skb->h.raw = skb->data; |
skb | 936 | net/netrom/af_netrom.c | return nr_rx_ip(skb, dev); |
skb | 946 | net/netrom/af_netrom.c | skb->h.raw = skb->data; |
skb | 948 | net/netrom/af_netrom.c | if ((frametype & 0x0F) == NR_CONNACK && skb->len == 22) |
skb | 953 | net/netrom/af_netrom.c | return nr_process_rx_frame(sk, skb); |
skb | 962 | net/netrom/af_netrom.c | nr_transmit_dm(skb); |
skb | 966 | net/netrom/af_netrom.c | user = (ax25_address *)(skb->data + 21); |
skb | 967 | net/netrom/af_netrom.c | window = skb->data[20]; |
skb | 969 | net/netrom/af_netrom.c | skb->sk = make; |
skb | 990 | net/netrom/af_netrom.c | if (skb->len == 37) { |
skb | 991 | net/netrom/af_netrom.c | timeout = skb->data[36] * 256 + skb->data[35]; |
skb | 1012 | net/netrom/af_netrom.c | skb_queue_head(&sk->receive_queue, skb); |
skb | 1017 | net/netrom/af_netrom.c | sk->data_ready(sk, skb->len); |
skb | 1029 | net/netrom/af_netrom.c | struct sk_buff *skb; |
skb | 1072 | net/netrom/af_netrom.c | if ((skb = sock_alloc_send_skb(sk, size, 0, &err)) == NULL) |
skb | 1075 | net/netrom/af_netrom.c | skb->sk = sk; |
skb | 1076 | net/netrom/af_netrom.c | skb->free = 1; |
skb | 1077 | net/netrom/af_netrom.c | skb->arp = 1; |
skb | 1079 | net/netrom/af_netrom.c | skb_reserve(skb, size - len); |
skb | 1085 | net/netrom/af_netrom.c | asmptr = skb_push(skb, NR_TRANSPORT_LEN); |
skb | 1105 | net/netrom/af_netrom.c | skb->h.raw = skb_put(skb, len); |
skb | 1107 | net/netrom/af_netrom.c | asmptr = skb->h.raw; |
skb | 1119 | net/netrom/af_netrom.c | kfree_skb(skb, FREE_WRITE); |
skb | 1123 | net/netrom/af_netrom.c | nr_output(sk, skb); /* Shove it onto the queue */ |
skb | 1144 | net/netrom/af_netrom.c | struct sk_buff *skb; |
skb | 1164 | net/netrom/af_netrom.c | if ((skb = skb_recv_datagram(sk, flags, noblock, &er)) == NULL) |
skb | 1168 | net/netrom/af_netrom.c | skb_pull(skb, NR_NETWORK_LEN + NR_TRANSPORT_LEN); |
skb | 1169 | net/netrom/af_netrom.c | skb->h.raw = skb->data; |
skb | 1172 | net/netrom/af_netrom.c | copied = (size < skb->len) ? size : skb->len; |
skb | 1173 | net/netrom/af_netrom.c | skb_copy_datagram(skb, 0, ubuf, copied); |
skb | 1179 | net/netrom/af_netrom.c | memcpy(&addr.sax25_call, skb->data + 7, sizeof(ax25_address)); |
skb | 1186 | net/netrom/af_netrom.c | skb_free_datagram(skb); |
skb | 1237 | net/netrom/af_netrom.c | struct sk_buff *skb; |
skb | 1239 | net/netrom/af_netrom.c | if ((skb = skb_peek(&sk->receive_queue)) != NULL) |
skb | 1240 | net/netrom/af_netrom.c | amount = skb->len - 20; |
skb | 53 | net/netrom/nr_dev.c | int nr_rx_ip(struct sk_buff *skb, struct device *dev) |
skb | 64 | net/netrom/nr_dev.c | skb->protocol = htons(ETH_P_IP); |
skb | 67 | net/netrom/nr_dev.c | skb->dev = dev; |
skb | 69 | net/netrom/nr_dev.c | skb->h.raw = skb->data; |
skb | 70 | net/netrom/nr_dev.c | ip_rcv(skb, skb->dev, NULL); |
skb | 75 | net/netrom/nr_dev.c | static int nr_header(struct sk_buff *skb, struct device *dev, unsigned short type, |
skb | 78 | net/netrom/nr_dev.c | unsigned char *buff = skb_push(skb, NR_NETWORK_LEN + NR_TRANSPORT_LEN); |
skb | 108 | net/netrom/nr_dev.c | unsigned long raddr, struct sk_buff *skb) |
skb | 113 | net/netrom/nr_dev.c | skb_device_unlock(skb); |
skb | 116 | net/netrom/nr_dev.c | skb->free = 1; |
skb | 117 | net/netrom/nr_dev.c | kfree_skb(skb, FREE_WRITE); |
skb | 130 | net/netrom/nr_dev.c | if (!nr_route_frame(skb, NULL)) { |
skb | 131 | net/netrom/nr_dev.c | skb->free = 1; |
skb | 132 | net/netrom/nr_dev.c | kfree_skb(skb, FREE_WRITE); |
skb | 164 | net/netrom/nr_dev.c | static int nr_xmit(struct sk_buff *skb, struct device *dev) |
skb | 168 | net/netrom/nr_dev.c | if (skb == NULL || dev == NULL) |
skb | 188 | net/netrom/nr_dev.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 51 | net/netrom/nr_in.c | static int nr_queue_rx_frame(struct sock *sk, struct sk_buff *skb, int more) |
skb | 53 | net/netrom/nr_in.c | struct sk_buff *skbo, *skbn = skb; |
skb | 56 | net/netrom/nr_in.c | sk->nr->fraglen += skb->len; |
skb | 57 | net/netrom/nr_in.c | skb_queue_tail(&sk->nr->frag_queue, skb); |
skb | 62 | net/netrom/nr_in.c | sk->nr->fraglen += skb->len; |
skb | 63 | net/netrom/nr_in.c | skb_queue_tail(&sk->nr->frag_queue, skb); |
skb | 95 | net/netrom/nr_in.c | static int nr_state1_machine(struct sock *sk, struct sk_buff *skb, int frametype) |
skb | 101 | net/netrom/nr_in.c | sk->window = skb->data[20]; |
skb | 102 | net/netrom/nr_in.c | sk->nr->your_index = skb->data[17]; |
skb | 103 | net/netrom/nr_in.c | sk->nr->your_id = skb->data[18]; |
skb | 141 | net/netrom/nr_in.c | static int nr_state2_machine(struct sock *sk, struct sk_buff *skb, int frametype) |
skb | 170 | net/netrom/nr_in.c | static int nr_state3_machine(struct sock *sk, struct sk_buff *skb, int frametype) |
skb | 178 | net/netrom/nr_in.c | nr = skb->data[18]; |
skb | 179 | net/netrom/nr_in.c | ns = skb->data[17]; |
skb | 277 | net/netrom/nr_in.c | skb_queue_head(&sk->nr->reseq_queue, skb); |
skb | 324 | net/netrom/nr_in.c | int nr_process_rx_frame(struct sock *sk, struct sk_buff *skb) |
skb | 336 | net/netrom/nr_in.c | frametype = skb->data[19]; |
skb | 341 | net/netrom/nr_in.c | queued = nr_state1_machine(sk, skb, frametype); |
skb | 344 | net/netrom/nr_in.c | queued = nr_state2_machine(sk, skb, frametype); |
skb | 347 | net/netrom/nr_in.c | queued = nr_state3_machine(sk, skb, frametype); |
skb | 48 | net/netrom/nr_out.c | void nr_output(struct sock *sk, struct sk_buff *skb) |
skb | 56 | net/netrom/nr_out.c | if (skb->len - NR_TRANSPORT_LEN > mtu) { |
skb | 58 | net/netrom/nr_out.c | memcpy(transport, skb->data, NR_TRANSPORT_LEN); |
skb | 59 | net/netrom/nr_out.c | skb_pull(skb, NR_TRANSPORT_LEN); |
skb | 61 | net/netrom/nr_out.c | frontlen = skb_headroom(skb); |
skb | 63 | net/netrom/nr_out.c | while (skb->len > 0) { |
skb | 73 | net/netrom/nr_out.c | len = (mtu > skb->len) ? skb->len : mtu; |
skb | 76 | net/netrom/nr_out.c | memcpy(skb_put(skbn, len), skb->data, len); |
skb | 77 | net/netrom/nr_out.c | skb_pull(skb, len); |
skb | 83 | net/netrom/nr_out.c | if (skb->len > 0) |
skb | 89 | net/netrom/nr_out.c | skb->free = 1; |
skb | 90 | net/netrom/nr_out.c | kfree_skb(skb, FREE_WRITE); |
skb | 92 | net/netrom/nr_out.c | skb_queue_tail(&sk->write_queue, skb); /* Throw it on the queue */ |
skb | 103 | net/netrom/nr_out.c | static void nr_send_iframe(struct sock *sk, struct sk_buff *skb) |
skb | 105 | net/netrom/nr_out.c | if (skb == NULL) |
skb | 108 | net/netrom/nr_out.c | skb->data[2] = sk->nr->vs; |
skb | 109 | net/netrom/nr_out.c | skb->data[3] = sk->nr->vr; |
skb | 112 | net/netrom/nr_out.c | skb->data[4] |= NR_CHOKE_FLAG; |
skb | 114 | net/netrom/nr_out.c | nr_transmit_buffer(sk, skb); |
skb | 119 | net/netrom/nr_out.c | struct sk_buff *skb, *skbn; |
skb | 121 | net/netrom/nr_out.c | if ((skb = skb_peek(&sk->nr->ack_queue)) == NULL) |
skb | 124 | net/netrom/nr_out.c | if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) |
skb | 136 | net/netrom/nr_out.c | struct sk_buff *skb, *skbn; |
skb | 159 | net/netrom/nr_out.c | skb = skb_dequeue(&sk->write_queue); |
skb | 161 | net/netrom/nr_out.c | if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) { |
skb | 162 | net/netrom/nr_out.c | skb_queue_head(&sk->write_queue, skb); |
skb | 179 | net/netrom/nr_out.c | skb_queue_tail(&sk->nr->ack_queue, skb); |
skb | 194 | net/netrom/nr_out.c | void nr_transmit_buffer(struct sock *sk, struct sk_buff *skb) |
skb | 201 | net/netrom/nr_out.c | dptr = skb_push(skb, NR_NETWORK_LEN); |
skb | 217 | net/netrom/nr_out.c | skb->arp = 1; |
skb | 219 | net/netrom/nr_out.c | if (!nr_route_frame(skb, NULL)) { |
skb | 220 | net/netrom/nr_out.c | kfree_skb(skb, FREE_WRITE); |
skb | 647 | net/netrom/nr_route.c | int nr_route_frame(struct sk_buff *skb, ax25_cb *ax25) |
skb | 655 | net/netrom/nr_route.c | nr_src = (ax25_address *)(skb->data + 0); |
skb | 656 | net/netrom/nr_route.c | nr_dest = (ax25_address *)(skb->data + 7); |
skb | 662 | net/netrom/nr_route.c | return nr_rx_frame(skb, dev); |
skb | 668 | net/netrom/nr_route.c | if (--skb->data[14] == 0) |
skb | 688 | net/netrom/nr_route.c | dptr = skb_push(skb, 1); |
skb | 691 | net/netrom/nr_route.c | ax25_send_frame(skb, (ax25_address *)dev->dev_addr, &nr_neigh->callsign, nr_neigh->digipeat, nr_neigh->dev); |
skb | 49 | net/netrom/nr_subr.c | struct sk_buff *skb; |
skb | 51 | net/netrom/nr_subr.c | while ((skb = skb_dequeue(&sk->write_queue)) != NULL) { |
skb | 52 | net/netrom/nr_subr.c | skb->sk = sk; |
skb | 53 | net/netrom/nr_subr.c | skb->free = 1; |
skb | 54 | net/netrom/nr_subr.c | kfree_skb(skb, FREE_WRITE); |
skb | 57 | net/netrom/nr_subr.c | while ((skb = skb_dequeue(&sk->nr->ack_queue)) != NULL) { |
skb | 58 | net/netrom/nr_subr.c | skb->sk = sk; |
skb | 59 | net/netrom/nr_subr.c | skb->free = 1; |
skb | 60 | net/netrom/nr_subr.c | kfree_skb(skb, FREE_WRITE); |
skb | 63 | net/netrom/nr_subr.c | while ((skb = skb_dequeue(&sk->nr->reseq_queue)) != NULL) { |
skb | 64 | net/netrom/nr_subr.c | kfree_skb(skb, FREE_READ); |
skb | 67 | net/netrom/nr_subr.c | while ((skb = skb_dequeue(&sk->nr->frag_queue)) != NULL) { |
skb | 68 | net/netrom/nr_subr.c | kfree_skb(skb, FREE_READ); |
skb | 79 | net/netrom/nr_subr.c | struct sk_buff *skb; |
skb | 86 | net/netrom/nr_subr.c | skb = skb_dequeue(&sk->nr->ack_queue); |
skb | 87 | net/netrom/nr_subr.c | skb->sk = sk; |
skb | 88 | net/netrom/nr_subr.c | skb->free = 1; |
skb | 89 | net/netrom/nr_subr.c | kfree_skb(skb, FREE_WRITE); |
skb | 102 | net/netrom/nr_subr.c | struct sk_buff *skb, *skb_prev = NULL; |
skb | 104 | net/netrom/nr_subr.c | while ((skb = skb_dequeue(&sk->nr->ack_queue)) != NULL) { |
skb | 106 | net/netrom/nr_subr.c | skb_queue_head(&sk->write_queue, skb); |
skb | 108 | net/netrom/nr_subr.c | skb_append(skb_prev, skb); |
skb | 109 | net/netrom/nr_subr.c | skb_prev = skb; |
skb | 155 | net/netrom/nr_subr.c | struct sk_buff *skb; |
skb | 177 | net/netrom/nr_subr.c | if ((skb = alloc_skb(len, GFP_ATOMIC)) == NULL) |
skb | 183 | net/netrom/nr_subr.c | skb_reserve(skb, AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + 2 + NR_NETWORK_LEN); |
skb | 185 | net/netrom/nr_subr.c | dptr = skb_put(skb, skb_tailroom(skb)); |
skb | 239 | net/netrom/nr_subr.c | skb->free = 1; |
skb | 241 | net/netrom/nr_subr.c | nr_transmit_buffer(sk, skb); |
skb | 248 | net/netrom/nr_subr.c | void nr_transmit_dm(struct sk_buff *skb) |
skb | 263 | net/netrom/nr_subr.c | memcpy(dptr, skb->data + 7, AX25_ADDR_LEN); |
skb | 269 | net/netrom/nr_subr.c | memcpy(dptr, skb->data + 0, AX25_ADDR_LEN); |
skb | 277 | net/netrom/nr_subr.c | *dptr++ = skb->data[15]; |
skb | 278 | net/netrom/nr_subr.c | *dptr++ = skb->data[16]; |
skb | 156 | net/unix/af_unix.c | struct sk_buff *skb; |
skb | 159 | net/unix/af_unix.c | while((skb=skb_dequeue(&sk->receive_queue))!=NULL) |
skb | 163 | net/unix/af_unix.c | unix_socket *osk=skb->sk; |
skb | 165 | net/unix/af_unix.c | kfree_skb(skb, FREE_WRITE); /* Now surplus - free the skb first before the socket */ |
skb | 172 | net/unix/af_unix.c | kfree_skb(skb,FREE_WRITE); |
skb | 425 | net/unix/af_unix.c | struct sk_buff *skb; |
skb | 476 | net/unix/af_unix.c | skb=sock_alloc_send_skb(sk, 0, 0, &err); /* Marker object */ |
skb | 477 | net/unix/af_unix.c | if(skb==NULL) |
skb | 479 | net/unix/af_unix.c | skb->sk=sk; /* So they know it is us */ |
skb | 480 | net/unix/af_unix.c | skb->free=1; |
skb | 486 | net/unix/af_unix.c | kfree_skb(skb, FREE_WRITE); |
skb | 492 | net/unix/af_unix.c | skb_queue_tail(&other->receive_queue,skb); |
skb | 542 | net/unix/af_unix.c | unix_socket *ska,*skb; |
skb | 556 | net/unix/af_unix.c | skb=b->data; |
skb | 560 | net/unix/af_unix.c | skb->protinfo.af_unix.locks++; |
skb | 561 | net/unix/af_unix.c | ska->protinfo.af_unix.other=skb; |
skb | 562 | net/unix/af_unix.c | skb->protinfo.af_unix.other=ska; |
skb | 564 | net/unix/af_unix.c | skb->state=TCP_ESTABLISHED; |
skb | 572 | net/unix/af_unix.c | struct sk_buff *skb; |
skb | 595 | net/unix/af_unix.c | skb=skb_dequeue(&sk->receive_queue); |
skb | 596 | net/unix/af_unix.c | if(skb==NULL) |
skb | 612 | net/unix/af_unix.c | while(skb==NULL); |
skb | 613 | net/unix/af_unix.c | tsk=skb->sk; |
skb | 614 | net/unix/af_unix.c | kfree_skb(skb, FREE_WRITE); /* The buffer is just used as a tag */ |
skb | 658 | net/unix/af_unix.c | struct sk_buff *skb; |
skb | 708 | net/unix/af_unix.c | skb=sock_alloc_send_skb(sk,size,nonblock, &err); |
skb | 709 | net/unix/af_unix.c | if(skb==NULL) |
skb | 712 | net/unix/af_unix.c | skb->sk=sk; |
skb | 713 | net/unix/af_unix.c | skb->free=1; |
skb | 714 | net/unix/af_unix.c | memcpy_fromiovec(skb_put(skb,len),msg->msg_iov, len); |
skb | 734 | net/unix/af_unix.c | kfree_skb(skb, FREE_WRITE); |
skb | 738 | net/unix/af_unix.c | skb_queue_tail(&other->receive_queue, skb); |
skb | 749 | net/unix/af_unix.c | struct sk_buff *skb; |
skb | 788 | net/unix/af_unix.c | skb=skb_peek(&sk->receive_queue); |
skb | 789 | net/unix/af_unix.c | if(skb==NULL) |
skb | 817 | net/unix/af_unix.c | if(skb->sk->protinfo.af_unix.name) |
skb | 819 | net/unix/af_unix.c | memcpy(sun->sun_path, skb->sk->protinfo.af_unix.name, 108); |
skb | 827 | net/unix/af_unix.c | num=min(skb->len,size-copied); |
skb | 832 | net/unix/af_unix.c | memcpy_tofs(sp, skb->data, num); |
skb | 837 | net/unix/af_unix.c | memcpy_tofs(sp, skb->data,num); |
skb | 838 | net/unix/af_unix.c | skb_pull(skb,num); |
skb | 840 | net/unix/af_unix.c | if(skb->len==0) |
skb | 842 | net/unix/af_unix.c | skb_unlink(skb); |
skb | 843 | net/unix/af_unix.c | kfree_skb(skb, FREE_WRITE); |
skb | 908 | net/unix/af_unix.c | struct sk_buff *skb; |
skb | 912 | net/unix/af_unix.c | if((skb=skb_peek(&sk->receive_queue))!=NULL) |
skb | 913 | net/unix/af_unix.c | amount=skb->len; |