tag | line | file | source code |
skb | 122 | drivers/net/3c501.c | static int el_start_xmit(struct sk_buff *skb, struct device *dev); |
skb | 377 | drivers/net/3c501.c | static int el_start_xmit(struct sk_buff *skb, struct device *dev) |
skb | 406 | drivers/net/3c501.c | if (skb == NULL) |
skb | 433 | drivers/net/3c501.c | int gp_start = 0x800 - (ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN); |
skb | 434 | drivers/net/3c501.c | unsigned char *buf = skb->data; |
skb | 462 | drivers/net/3c501.c | outsb(DATAPORT,buf,skb->len); /* load buffer (usual thing each byte increments the pointer) */ |
skb | 482 | drivers/net/3c501.c | dev_kfree_skb (skb, FREE_WRITE); |
skb | 679 | drivers/net/3c501.c | struct sk_buff *skb; |
skb | 699 | drivers/net/3c501.c | skb = dev_alloc_skb(pkt_len+2); |
skb | 706 | drivers/net/3c501.c | if (skb == NULL) |
skb | 714 | drivers/net/3c501.c | skb_reserve(skb,2); /* Force 16 byte alignment */ |
skb | 715 | drivers/net/3c501.c | skb->dev = dev; |
skb | 721 | drivers/net/3c501.c | insb(DATAPORT, skb_put(skb,pkt_len), pkt_len); |
skb | 722 | drivers/net/3c501.c | skb->protocol=eth_type_trans(skb,dev); |
skb | 723 | drivers/net/3c501.c | netif_rx(skb); |
skb | 77 | drivers/net/3c503.c | static void el2_block_input(struct device *dev, int count, struct sk_buff *skb, |
skb | 513 | drivers/net/3c503.c | el2_block_input(struct device *dev, int count, struct sk_buff *skb, int ring_offset) |
skb | 525 | drivers/net/3c503.c | memcpy_fromio(skb->data, dev->mem_start + ring_offset, semi_count); |
skb | 527 | drivers/net/3c503.c | memcpy_fromio(skb->data + semi_count, dev->rmem_start, count); |
skb | 530 | drivers/net/3c503.c | eth_io_copy_and_sum(skb, dev->mem_start + ring_offset, count, 0); |
skb | 551 | drivers/net/3c503.c | (skb->data)[i] = inb_p(E33G_FIFOH); |
skb | 557 | drivers/net/3c505.c | struct sk_buff *skb; |
skb | 572 | drivers/net/3c505.c | skb = dev_alloc_skb(rlen+2); |
skb | 583 | drivers/net/3c505.c | if (skb == NULL) { |
skb | 599 | drivers/net/3c505.c | skb_reserve(skb,2); /* 16 byte alignment */ |
skb | 600 | drivers/net/3c505.c | skb->dev = dev; |
skb | 605 | drivers/net/3c505.c | ptr = (unsigned short *)skb_put(skb,len); |
skb | 614 | drivers/net/3c505.c | kfree_skb(skb, FREE_WRITE); |
skb | 623 | drivers/net/3c505.c | skb->protocol=eth_type_trans(skb,dev); |
skb | 624 | drivers/net/3c505.c | netif_rx(skb); |
skb | 1005 | drivers/net/3c505.c | elp_start_xmit (struct sk_buff *skb, struct device *dev) |
skb | 1012 | drivers/net/3c505.c | if (skb == NULL) { |
skb | 1020 | drivers/net/3c505.c | if (skb->len <= 0) |
skb | 1024 | drivers/net/3c505.c | printk("%s: request to send packet of length %d\n", dev->name, (int)skb->len); |
skb | 1045 | drivers/net/3c505.c | if (!send_packet(dev, skb->data, skb->len)) { |
skb | 1051 | drivers/net/3c505.c | printk("%s: packet of length %d sent\n", dev->name, (int)skb->len); |
skb | 1067 | drivers/net/3c505.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 282 | drivers/net/3c507.c | static int el16_send_packet(struct sk_buff *skb, struct device *dev); |
skb | 451 | drivers/net/3c507.c | el16_send_packet(struct sk_buff *skb, struct device *dev) |
skb | 486 | drivers/net/3c507.c | if (skb == NULL) { |
skb | 495 | drivers/net/3c507.c | short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN; |
skb | 496 | drivers/net/3c507.c | unsigned char *buf = skb->data; |
skb | 506 | drivers/net/3c507.c | dev_kfree_skb (skb, FREE_WRITE); |
skb | 841 | drivers/net/3c507.c | struct sk_buff *skb; |
skb | 844 | drivers/net/3c507.c | skb = dev_alloc_skb(pkt_len+2); |
skb | 845 | drivers/net/3c507.c | if (skb == NULL) { |
skb | 851 | drivers/net/3c507.c | skb_reserve(skb,2); |
skb | 852 | drivers/net/3c507.c | skb->dev = dev; |
skb | 855 | drivers/net/3c507.c | memcpy(skb_put(skb,pkt_len), data_frame + 5, pkt_len); |
skb | 857 | drivers/net/3c507.c | skb->protocol=eth_type_trans(skb,dev); |
skb | 858 | drivers/net/3c507.c | netif_rx(skb); |
skb | 104 | drivers/net/3c509.c | static int el3_start_xmit(struct sk_buff *skb, struct device *dev); |
skb | 382 | drivers/net/3c509.c | el3_start_xmit(struct sk_buff *skb, struct device *dev) |
skb | 401 | drivers/net/3c509.c | if (skb == NULL) { |
skb | 406 | drivers/net/3c509.c | if (skb->len <= 0) |
skb | 411 | drivers/net/3c509.c | dev->name, skb->len, inw(ioaddr + EL3_STATUS)); |
skb | 435 | drivers/net/3c509.c | outw(skb->len, ioaddr + TX_FIFO); |
skb | 438 | drivers/net/3c509.c | outsl(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2); |
skb | 448 | drivers/net/3c509.c | dev_kfree_skb (skb, FREE_WRITE); |
skb | 596 | drivers/net/3c509.c | struct sk_buff *skb; |
skb | 598 | drivers/net/3c509.c | skb = dev_alloc_skb(pkt_len+5); |
skb | 602 | drivers/net/3c509.c | if (skb != NULL) { |
skb | 603 | drivers/net/3c509.c | skb->dev = dev; |
skb | 604 | drivers/net/3c509.c | skb_reserve(skb,2); /* Align IP on 16 byte boundaries */ |
skb | 607 | drivers/net/3c509.c | insl(ioaddr+RX_FIFO, skb_put(skb,pkt_len), |
skb | 610 | drivers/net/3c509.c | skb->protocol=eth_type_trans(skb,dev); |
skb | 611 | drivers/net/3c509.c | netif_rx(skb); |
skb | 238 | drivers/net/3c59x.c | static int vortex_start_xmit(struct sk_buff *skb, struct device *dev); |
skb | 636 | drivers/net/3c59x.c | vortex_start_xmit(struct sk_buff *skb, struct device *dev) |
skb | 663 | drivers/net/3c59x.c | if (skb == NULL || skb->len <= 0) { |
skb | 679 | drivers/net/3c59x.c | outl(skb->len, ioaddr + TX_FIFO); |
skb | 683 | drivers/net/3c59x.c | outl((int)(skb->data), ioaddr + Wn7_MasterAddr); |
skb | 684 | drivers/net/3c59x.c | outw((skb->len + 3) & ~3, ioaddr + Wn7_MasterLen); |
skb | 685 | drivers/net/3c59x.c | vp->tx_skb = skb; |
skb | 689 | drivers/net/3c59x.c | outsl(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2); |
skb | 690 | drivers/net/3c59x.c | dev_kfree_skb (skb, FREE_WRITE); |
skb | 699 | drivers/net/3c59x.c | outsl(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2); |
skb | 700 | drivers/net/3c59x.c | dev_kfree_skb (skb, FREE_WRITE); |
skb | 891 | drivers/net/3c59x.c | struct sk_buff *skb; |
skb | 893 | drivers/net/3c59x.c | skb = dev_alloc_skb(pkt_len + 5); |
skb | 897 | drivers/net/3c59x.c | if (skb != NULL) { |
skb | 898 | drivers/net/3c59x.c | skb->dev = dev; |
skb | 899 | drivers/net/3c59x.c | skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ |
skb | 901 | drivers/net/3c59x.c | insl(ioaddr + RX_FIFO, skb_put(skb, pkt_len), |
skb | 903 | drivers/net/3c59x.c | skb->protocol = eth_type_trans(skb, dev); |
skb | 904 | drivers/net/3c59x.c | netif_rx(skb); |
skb | 140 | drivers/net/8390.c | static int ei_start_xmit(struct sk_buff *skb, struct device *dev) |
skb | 182 | drivers/net/8390.c | if (skb == NULL) { |
skb | 187 | drivers/net/8390.c | length = skb->len; |
skb | 188 | drivers/net/8390.c | if (skb->len <= 0) |
skb | 228 | drivers/net/8390.c | ei_block_output(dev, length, skb->data, output_page); |
skb | 242 | drivers/net/8390.c | ei_block_output(dev, length, skb->data, ei_local->tx_start_page); |
skb | 253 | drivers/net/8390.c | dev_kfree_skb (skb, FREE_WRITE); |
skb | 467 | drivers/net/8390.c | struct sk_buff *skb; |
skb | 469 | drivers/net/8390.c | skb = dev_alloc_skb(pkt_len+2); |
skb | 470 | drivers/net/8390.c | if (skb == NULL) { |
skb | 477 | drivers/net/8390.c | skb_reserve(skb,2); /* IP headers on 16 byte boundaries */ |
skb | 478 | drivers/net/8390.c | skb->dev = dev; |
skb | 479 | drivers/net/8390.c | skb_put(skb, pkt_len); /* Make room */ |
skb | 480 | drivers/net/8390.c | ei_block_input(dev, pkt_len, skb, current_offset + sizeof(rx_frame)); |
skb | 481 | drivers/net/8390.c | skb->protocol=eth_type_trans(skb,dev); |
skb | 482 | drivers/net/8390.c | netif_rx(skb); |
skb | 76 | drivers/net/ac3200.c | struct sk_buff *skb, int ring_offset); |
skb | 269 | drivers/net/ac3200.c | static void ac_block_input(struct device *dev, int count, struct sk_buff *skb, |
skb | 277 | drivers/net/ac3200.c | memcpy_fromio(skb->data, xfer_start, semi_count); |
skb | 279 | drivers/net/ac3200.c | memcpy_fromio(skb->data + semi_count, dev->rmem_start, count); |
skb | 282 | drivers/net/ac3200.c | eth_io_copy_and_sum(skb, xfer_start, count, 0); |
skb | 185 | drivers/net/apricot.c | static int i596_start_xmit(struct sk_buff *skb, struct device *dev); |
skb | 340 | drivers/net/apricot.c | struct sk_buff *skb = dev_alloc_skb(pkt_len); |
skb | 344 | drivers/net/apricot.c | if (skb == NULL) |
skb | 351 | drivers/net/apricot.c | skb->dev = dev; |
skb | 352 | drivers/net/apricot.c | memcpy(skb_put(skb,pkt_len), lp->scb.rfd->data, pkt_len); |
skb | 354 | drivers/net/apricot.c | skb->protocol=eth_type_trans(skb,dev); |
skb | 355 | drivers/net/apricot.c | netif_rx(skb); |
skb | 358 | drivers/net/apricot.c | if (i596_debug > 4) print_eth(skb->data); |
skb | 406 | drivers/net/apricot.c | struct sk_buff *skb = ((struct sk_buff *)(tx_cmd->tbd->data)) -1; |
skb | 408 | drivers/net/apricot.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 569 | drivers/net/apricot.c | i596_start_xmit(struct sk_buff *skb, struct device *dev) |
skb | 607 | drivers/net/apricot.c | if (skb == NULL) { |
skb | 613 | drivers/net/apricot.c | if (skb->len <= 0) return 0; |
skb | 623 | drivers/net/apricot.c | short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN; |
skb | 632 | drivers/net/apricot.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 646 | drivers/net/apricot.c | tx_cmd->tbd->data = skb->data; |
skb | 648 | drivers/net/apricot.c | if (i596_debug > 3) print_eth(skb->data); |
skb | 804 | drivers/net/apricot.c | struct sk_buff *skb = ((struct sk_buff *)(tx_cmd->tbd->data)) -1; |
skb | 806 | drivers/net/apricot.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 810 | drivers/net/apricot.c | if (i596_debug >2) print_eth(skb->data); |
skb | 520 | drivers/net/arcnet.c | struct sk_buff *skb; /* packet data buffer */ |
skb | 528 | drivers/net/arcnet.c | struct sk_buff *skb; /* buffer from upper levels */ |
skb | 586 | drivers/net/arcnet.c | static int arcnet_send_packet_bad(struct sk_buff *skb,struct device *dev); |
skb | 587 | drivers/net/arcnet.c | static int arcnetA_send_packet(struct sk_buff *skb, struct device *dev); |
skb | 602 | drivers/net/arcnet.c | int arcnetA_header(struct sk_buff *skb,struct device *dev, |
skb | 605 | drivers/net/arcnet.c | struct sk_buff *skb); |
skb | 606 | drivers/net/arcnet.c | unsigned short arcnetA_type_trans(struct sk_buff *skb,struct device *dev); |
skb | 611 | drivers/net/arcnet.c | static int arcnetE_send_packet(struct sk_buff *skb, struct device *dev); |
skb | 619 | drivers/net/arcnet.c | static int arcnetS_send_packet(struct sk_buff *skb, struct device *dev); |
skb | 622 | drivers/net/arcnet.c | int arcnetS_header(struct sk_buff *skb,struct device *dev, |
skb | 625 | drivers/net/arcnet.c | struct sk_buff *skb); |
skb | 626 | drivers/net/arcnet.c | unsigned short arcnetS_type_trans(struct sk_buff *skb,struct device *dev); |
skb | 1369 | drivers/net/arcnet.c | arcnet_send_packet_bad(struct sk_buff *skb, struct device *dev) |
skb | 1402 | drivers/net/arcnet.c | status,tickssofar,lp->outgoing.skb, |
skb | 1427 | drivers/net/arcnet.c | if (lp->outgoing.skb) |
skb | 1429 | drivers/net/arcnet.c | dev_kfree_skb(lp->outgoing.skb,FREE_WRITE); |
skb | 1432 | drivers/net/arcnet.c | lp->outgoing.skb=NULL; |
skb | 1444 | drivers/net/arcnet.c | if (skb == NULL) { |
skb | 1485 | drivers/net/arcnet.c | arcnetA_send_packet(struct sk_buff *skb, struct device *dev) |
skb | 1493 | drivers/net/arcnet.c | bad=arcnet_send_packet_bad(skb,dev); |
skb | 1502 | drivers/net/arcnet.c | out->length = 1 < skb->len ? skb->len : 1; |
skb | 1503 | drivers/net/arcnet.c | out->hdr=(struct ClientData*)skb->data; |
skb | 1504 | drivers/net/arcnet.c | out->skb=skb; |
skb | 1509 | drivers/net/arcnet.c | for(i=0; i<skb->len; i++) |
skb | 1512 | drivers/net/arcnet.c | printk("%02hX ",((unsigned char*)skb->data)[i]); |
skb | 1532 | drivers/net/arcnet.c | ((char *)skb->data)+sizeof(struct ClientData), |
skb | 1537 | drivers/net/arcnet.c | dev_kfree_skb(out->skb,FREE_WRITE); |
skb | 1538 | drivers/net/arcnet.c | out->skb=NULL; |
skb | 1551 | drivers/net/arcnet.c | out->data=(u_char *)skb->data |
skb | 1583 | drivers/net/arcnet.c | if (out->skb) |
skb | 1584 | drivers/net/arcnet.c | dev_kfree_skb(out->skb,FREE_WRITE); |
skb | 1585 | drivers/net/arcnet.c | out->skb=NULL; |
skb | 1923 | drivers/net/arcnet.c | status,out->numsegs,out->segnum,out->skb); |
skb | 1954 | drivers/net/arcnet.c | if (!lp->outgoing.skb) |
skb | 1982 | drivers/net/arcnet.c | if (out->skb) |
skb | 1983 | drivers/net/arcnet.c | dev_kfree_skb(out->skb,FREE_WRITE); |
skb | 1984 | drivers/net/arcnet.c | out->skb=NULL; |
skb | 2210 | drivers/net/arcnet.c | struct sk_buff *skb; |
skb | 2239 | drivers/net/arcnet.c | if (in->skb) /* already assembling one! */ |
skb | 2244 | drivers/net/arcnet.c | kfree_skb(in->skb,FREE_WRITE); |
skb | 2247 | drivers/net/arcnet.c | in->skb=NULL; |
skb | 2252 | drivers/net/arcnet.c | skb = alloc_skb(length, GFP_ATOMIC); |
skb | 2253 | drivers/net/arcnet.c | if (skb == NULL) { |
skb | 2258 | drivers/net/arcnet.c | soft=(struct ClientData *)skb->data; |
skb | 2260 | drivers/net/arcnet.c | skb->len = length; |
skb | 2261 | drivers/net/arcnet.c | skb->dev = dev; |
skb | 2309 | drivers/net/arcnet.c | for(i=0; i< skb->len; i++) |
skb | 2312 | drivers/net/arcnet.c | printk("%02hX ",((unsigned char*)skb->data)[i]); |
skb | 2317 | drivers/net/arcnet.c | skb->protocol=arcnetA_type_trans(skb,dev); |
skb | 2319 | drivers/net/arcnet.c | netif_rx(skb); |
skb | 2345 | drivers/net/arcnet.c | if (in->skb && in->sequence!=arcsoft->sequence) |
skb | 2350 | drivers/net/arcnet.c | kfree_skb(in->skb,FREE_WRITE); |
skb | 2351 | drivers/net/arcnet.c | in->skb=NULL; |
skb | 2361 | drivers/net/arcnet.c | if (in->skb) /* already assembling one! */ |
skb | 2368 | drivers/net/arcnet.c | kfree_skb(in->skb,FREE_WRITE); |
skb | 2384 | drivers/net/arcnet.c | in->skb=skb=alloc_skb(508*in->numpackets |
skb | 2387 | drivers/net/arcnet.c | if (skb == NULL) { |
skb | 2396 | drivers/net/arcnet.c | skb->free=1; |
skb | 2398 | drivers/net/arcnet.c | soft=(struct ClientData *)skb->data; |
skb | 2400 | drivers/net/arcnet.c | skb->len=sizeof(struct ClientData); |
skb | 2401 | drivers/net/arcnet.c | skb->dev=dev; |
skb | 2415 | drivers/net/arcnet.c | if (!in->skb) |
skb | 2441 | drivers/net/arcnet.c | kfree_skb(in->skb,FREE_WRITE); |
skb | 2442 | drivers/net/arcnet.c | in->skb=NULL; |
skb | 2449 | drivers/net/arcnet.c | soft=(struct ClientData *)in->skb->data; |
skb | 2452 | drivers/net/arcnet.c | skb=in->skb; |
skb | 2454 | drivers/net/arcnet.c | memcpy(skb->data+skb->len, |
skb | 2458 | drivers/net/arcnet.c | skb->len+=length-sizeof(struct ClientData); |
skb | 2466 | drivers/net/arcnet.c | if (!skb || !in->skb) |
skb | 2469 | drivers/net/arcnet.c | skb,in->skb); |
skb | 2473 | drivers/net/arcnet.c | in->skb=NULL; |
skb | 2479 | drivers/net/arcnet.c | for(i=0; i<skb->len; i++) |
skb | 2482 | drivers/net/arcnet.c | printk("%02hX ",((unsigned char*)skb->data)[i]); |
skb | 2488 | drivers/net/arcnet.c | skb->protocol=arcnetA_type_trans(skb,dev); |
skb | 2490 | drivers/net/arcnet.c | netif_rx(skb); |
skb | 2545 | drivers/net/arcnet.c | int arcnetA_header(struct sk_buff *skb,struct device *dev, |
skb | 2549 | drivers/net/arcnet.c | skb_push(skb,dev->hard_header_len); |
skb | 2620 | drivers/net/arcnet.c | struct sk_buff *skb) |
skb | 2647 | drivers/net/arcnet.c | status=arp_find(&(head->daddr), dst, dev, dev->pa_addr, skb)? 1 : 0; |
skb | 2661 | drivers/net/arcnet.c | unsigned short arcnetA_type_trans(struct sk_buff *skb,struct device *dev) |
skb | 2667 | drivers/net/arcnet.c | skb->mac.raw=skb->data; |
skb | 2668 | drivers/net/arcnet.c | skb_pull(skb,dev->hard_header_len); |
skb | 2669 | drivers/net/arcnet.c | head=(struct ClientData *)skb->mac.raw; |
skb | 2672 | drivers/net/arcnet.c | skb->pkt_type=PACKET_BROADCAST; |
skb | 2677 | drivers/net/arcnet.c | skb->pkt_type=PACKET_OTHERHOST; |
skb | 2732 | drivers/net/arcnet.c | arcnetE_send_packet(struct sk_buff *skb, struct device *dev) |
skb | 2739 | drivers/net/arcnet.c | short offset,length=skb->len+1; |
skb | 2743 | drivers/net/arcnet.c | bad=arcnet_send_packet_bad(skb,dev); |
skb | 2758 | drivers/net/arcnet.c | dev_kfree_skb(skb,FREE_WRITE); |
skb | 2774 | drivers/net/arcnet.c | if (((struct ethhdr*)(skb->data))->h_dest[0] == 0xFF) |
skb | 2778 | drivers/net/arcnet.c | ((struct ethhdr*)(skb->data))->h_dest[5]; |
skb | 2806 | drivers/net/arcnet.c | memcpy(arcsoft,skb->data,skb->len); |
skb | 2831 | drivers/net/arcnet.c | dev_kfree_skb(skb,FREE_WRITE); |
skb | 2858 | drivers/net/arcnet.c | struct sk_buff *skb; |
skb | 2863 | drivers/net/arcnet.c | skb = alloc_skb(length, GFP_ATOMIC); |
skb | 2864 | drivers/net/arcnet.c | if (skb == NULL) { |
skb | 2870 | drivers/net/arcnet.c | skb->len = length; |
skb | 2871 | drivers/net/arcnet.c | skb->dev = dev; |
skb | 2873 | drivers/net/arcnet.c | memcpy(skb->data,(u_char *)arcsoft+1,length-1); |
skb | 2879 | drivers/net/arcnet.c | for(i=0; i<skb->len; i++) |
skb | 2884 | drivers/net/arcnet.c | printk("%02hX ",((u_char *)skb->data)[i]); |
skb | 2889 | drivers/net/arcnet.c | skb->protocol=eth_type_trans(skb,dev); |
skb | 2891 | drivers/net/arcnet.c | netif_rx(skb); |
skb | 2930 | drivers/net/arcnet.c | arcnetS_send_packet(struct sk_buff *skb, struct device *dev) |
skb | 2934 | drivers/net/arcnet.c | struct S_ClientData *hdr=(struct S_ClientData *)skb->data; |
skb | 2938 | drivers/net/arcnet.c | bad=arcnet_send_packet_bad(skb,dev); |
skb | 2947 | drivers/net/arcnet.c | length = 1 < skb->len ? skb->len : 1; |
skb | 2952 | drivers/net/arcnet.c | for(i=0; i<skb->len; i++) |
skb | 2955 | drivers/net/arcnet.c | printk("%02hX ",((unsigned char*)skb->data)[i]); |
skb | 2964 | drivers/net/arcnet.c | skb->data+S_EXTRA_CLIENTDATA, |
skb | 2966 | drivers/net/arcnet.c | skb->data+sizeof(struct S_ClientData), |
skb | 2971 | drivers/net/arcnet.c | dev_kfree_skb(skb,FREE_WRITE); |
skb | 2984 | drivers/net/arcnet.c | dev_kfree_skb(skb,FREE_WRITE); |
skb | 3008 | drivers/net/arcnet.c | struct sk_buff *skb; |
skb | 3021 | drivers/net/arcnet.c | skb = alloc_skb(length, GFP_ATOMIC); |
skb | 3022 | drivers/net/arcnet.c | if (skb == NULL) { |
skb | 3027 | drivers/net/arcnet.c | soft=(struct S_ClientData *)skb->data; |
skb | 3028 | drivers/net/arcnet.c | skb->len = length; |
skb | 3038 | drivers/net/arcnet.c | skb->dev = dev; /* is already lp->sdev */ |
skb | 3043 | drivers/net/arcnet.c | for(i=0; i<skb->len; i++) |
skb | 3046 | drivers/net/arcnet.c | printk("%02hX ",((unsigned char*)skb->data)[i]); |
skb | 3051 | drivers/net/arcnet.c | skb->protocol=arcnetS_type_trans(skb,dev); |
skb | 3053 | drivers/net/arcnet.c | netif_rx(skb); |
skb | 3063 | drivers/net/arcnet.c | int arcnetS_header(struct sk_buff *skb,struct device *dev, |
skb | 3067 | drivers/net/arcnet.c | skb_push(skb,dev->hard_header_len); |
skb | 3120 | drivers/net/arcnet.c | struct sk_buff *skb) |
skb | 3144 | drivers/net/arcnet.c | return arp_find(&(head->daddr), dst, dev, dev->pa_addr, skb)? 1 : 0; |
skb | 3155 | drivers/net/arcnet.c | unsigned short arcnetS_type_trans(struct sk_buff *skb,struct device *dev) |
skb | 3161 | drivers/net/arcnet.c | skb->mac.raw=skb->data; |
skb | 3162 | drivers/net/arcnet.c | skb_pull(skb,dev->hard_header_len); |
skb | 3163 | drivers/net/arcnet.c | head=(struct S_ClientData *)skb->mac.raw; |
skb | 3166 | drivers/net/arcnet.c | skb->pkt_type=PACKET_BROADCAST; |
skb | 3171 | drivers/net/arcnet.c | skb->pkt_type=PACKET_OTHERHOST; |
skb | 119 | drivers/net/at1700.c | static int net_send_packet(struct sk_buff *skb, struct device *dev); |
skb | 364 | drivers/net/at1700.c | net_send_packet(struct sk_buff *skb, struct device *dev) |
skb | 398 | drivers/net/at1700.c | if (skb == NULL) { |
skb | 408 | drivers/net/at1700.c | short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN; |
skb | 409 | drivers/net/at1700.c | unsigned char *buf = skb->data; |
skb | 435 | drivers/net/at1700.c | dev_kfree_skb (skb, FREE_WRITE); |
skb | 520 | drivers/net/at1700.c | struct sk_buff *skb; |
skb | 531 | drivers/net/at1700.c | skb = dev_alloc_skb(pkt_len+3); |
skb | 532 | drivers/net/at1700.c | if (skb == NULL) { |
skb | 541 | drivers/net/at1700.c | skb->dev = dev; |
skb | 542 | drivers/net/at1700.c | skb_reserve(skb,2); |
skb | 544 | drivers/net/at1700.c | insw(ioaddr + DATAPORT, skb_put(skb,pkt_len), (pkt_len + 1) >> 1); |
skb | 545 | drivers/net/at1700.c | skb->protocol=eth_type_trans(skb, dev); |
skb | 546 | drivers/net/at1700.c | netif_rx(skb); |
skb | 137 | drivers/net/atp.c | static int net_send_packet(struct sk_buff *skb, struct device *dev); |
skb | 416 | drivers/net/atp.c | net_send_packet(struct sk_buff *skb, struct device *dev) |
skb | 440 | drivers/net/atp.c | if (skb == NULL) { |
skb | 450 | drivers/net/atp.c | short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN; |
skb | 451 | drivers/net/atp.c | unsigned char *buf = skb->data; |
skb | 479 | drivers/net/atp.c | dev_kfree_skb (skb, FREE_WRITE); |
skb | 673 | drivers/net/atp.c | struct sk_buff *skb; |
skb | 675 | drivers/net/atp.c | skb = dev_alloc_skb(pkt_len); |
skb | 676 | drivers/net/atp.c | if (skb == NULL) { |
skb | 681 | drivers/net/atp.c | skb->dev = dev; |
skb | 683 | drivers/net/atp.c | read_block(ioaddr, pkt_len, skb_put(skb,pkt_len), dev->if_port); |
skb | 686 | drivers/net/atp.c | unsigned char *data = skb->data; |
skb | 694 | drivers/net/atp.c | skb->protocol=eth_type_trans(skb,dev); |
skb | 695 | drivers/net/atp.c | netif_rx(skb); |
skb | 353 | drivers/net/de4x5.c | struct sk_buff *skb[NUM_TX_DESC]; /* TX skb for freeing when sent */ |
skb | 398 | drivers/net/de4x5.c | static int de4x5_queue_pkt(struct sk_buff *skb, struct device *dev); |
skb | 418 | drivers/net/de4x5.c | static void load_packet(struct device *dev, char *buf, u32 flags, struct sk_buff *skb); |
skb | 938 | drivers/net/de4x5.c | de4x5_queue_pkt(struct sk_buff *skb, struct device *dev) |
skb | 977 | drivers/net/de4x5.c | if (lp->skb[i] != NULL) { |
skb | 978 | drivers/net/de4x5.c | if (lp->skb[i]->len != FAKE_FRAME_LEN) { |
skb | 980 | drivers/net/de4x5.c | dev_queue_xmit(lp->skb[i], dev, SOPRI_NORMAL); |
skb | 982 | drivers/net/de4x5.c | dev_kfree_skb(lp->skb[i], FREE_WRITE); |
skb | 985 | drivers/net/de4x5.c | dev_kfree_skb(lp->skb[i], FREE_WRITE); |
skb | 987 | drivers/net/de4x5.c | lp->skb[i] = NULL; |
skb | 990 | drivers/net/de4x5.c | if (skb->len != FAKE_FRAME_LEN) { |
skb | 991 | drivers/net/de4x5.c | dev_queue_xmit(skb, dev, SOPRI_NORMAL); |
skb | 993 | drivers/net/de4x5.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 1023 | drivers/net/de4x5.c | } else if (skb == NULL) { |
skb | 1025 | drivers/net/de4x5.c | } else if (skb->len == FAKE_FRAME_LEN) { /* Don't TX a fake frame! */ |
skb | 1026 | drivers/net/de4x5.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 1027 | drivers/net/de4x5.c | } else if (skb->len > 0) { |
skb | 1035 | drivers/net/de4x5.c | load_packet(dev, skb->data, TD_IC | TD_LS | TD_FS | skb->len, skb); |
skb | 1146 | drivers/net/de4x5.c | struct sk_buff *skb; |
skb | 1149 | drivers/net/de4x5.c | if ((skb = dev_alloc_skb(pkt_len+2)) != NULL) { |
skb | 1150 | drivers/net/de4x5.c | skb->dev = dev; |
skb | 1152 | drivers/net/de4x5.c | skb_reserve(skb,2); /* Align */ |
skb | 1155 | drivers/net/de4x5.c | memcpy(skb_put(skb,len), bus_to_virt(lp->rx_ring[lp->rx_old].buf), len); |
skb | 1156 | drivers/net/de4x5.c | memcpy(skb_put(skb,pkt_len-len), bus_to_virt(lp->rx_ring[0].buf), pkt_len - len); |
skb | 1158 | drivers/net/de4x5.c | memcpy(skb_put(skb,pkt_len), bus_to_virt(lp->rx_ring[lp->rx_old].buf), pkt_len); |
skb | 1162 | drivers/net/de4x5.c | skb->protocol=eth_type_trans(skb,dev); |
skb | 1163 | drivers/net/de4x5.c | netif_rx(skb); |
skb | 1173 | drivers/net/de4x5.c | buf = skb->data; /* Look at the dest addr */ |
skb | 1252 | drivers/net/de4x5.c | if (lp->skb[entry] != NULL) { |
skb | 1253 | drivers/net/de4x5.c | dev_kfree_skb(lp->skb[entry], FREE_WRITE); |
skb | 1254 | drivers/net/de4x5.c | lp->skb[entry] = NULL; |
skb | 1360 | drivers/net/de4x5.c | static void load_packet(struct device *dev, char *buf, u32 flags, struct sk_buff *skb) |
skb | 1367 | drivers/net/de4x5.c | lp->skb[lp->tx_new] = skb; |
skb | 2478 | drivers/net/de4x5.c | struct sk_buff *skb; |
skb | 2480 | drivers/net/de4x5.c | if ((skb = alloc_skb(0, GFP_ATOMIC)) != NULL) { |
skb | 2481 | drivers/net/de4x5.c | skb->len= FAKE_FRAME_LEN; |
skb | 2482 | drivers/net/de4x5.c | skb->arp=1; |
skb | 2483 | drivers/net/de4x5.c | skb->dev=dev; |
skb | 2484 | drivers/net/de4x5.c | dev_queue_xmit(skb, dev, SOPRI_NORMAL); |
skb | 247 | drivers/net/de600.c | static int de600_start_xmit(struct sk_buff *skb, struct device *dev); |
skb | 397 | drivers/net/de600.c | de600_start_xmit(struct sk_buff *skb, struct device *dev) |
skb | 402 | drivers/net/de600.c | byte *buffer = skb->data; |
skb | 410 | drivers/net/de600.c | if (skb == NULL) { |
skb | 434 | drivers/net/de600.c | PRINTK(("de600_start_xmit:len=%d, page %d/%d\n", skb->len, tx_fifo_in, free_tx_pages)); |
skb | 436 | drivers/net/de600.c | if ((len = skb->len) < RUNT) |
skb | 476 | drivers/net/de600.c | if (skb->sk && (skb->sk->protocol == IPPROTO_TCP) && |
skb | 477 | drivers/net/de600.c | (skb->sk->prot->rspace != &de600_rspace)) |
skb | 478 | drivers/net/de600.c | skb->sk->prot->rspace = de600_rspace; /* Ugh! */ |
skb | 481 | drivers/net/de600.c | dev_kfree_skb (skb, FREE_WRITE); |
skb | 580 | drivers/net/de600.c | struct sk_buff *skb; |
skb | 605 | drivers/net/de600.c | skb = dev_alloc_skb(size+2); |
skb | 607 | drivers/net/de600.c | if (skb == NULL) { |
skb | 614 | drivers/net/de600.c | skb->dev = dev; |
skb | 615 | drivers/net/de600.c | skb_reserve(skb,2); /* Align */ |
skb | 618 | drivers/net/de600.c | buffer = skb_put(skb,size); |
skb | 627 | drivers/net/de600.c | skb->protocol=eth_type_trans(skb,dev); |
skb | 629 | drivers/net/de600.c | netif_rx(skb); |
skb | 511 | drivers/net/de620.c | de620_start_xmit(struct sk_buff *skb, struct device *dev) |
skb | 516 | drivers/net/de620.c | byte *buffer = skb->data; |
skb | 525 | drivers/net/de620.c | if (skb == NULL) { |
skb | 550 | drivers/net/de620.c | if ((len = skb->len) < RUNT) |
skb | 560 | drivers/net/de620.c | (int)skb->len, using_txbuf)); |
skb | 590 | drivers/net/de620.c | dev_kfree_skb (skb, FREE_WRITE); |
skb | 652 | drivers/net/de620.c | struct sk_buff *skb; |
skb | 707 | drivers/net/de620.c | skb = dev_alloc_skb(size+2); |
skb | 708 | drivers/net/de620.c | if (skb == NULL) { /* Yeah, but no place to put it... */ |
skb | 714 | drivers/net/de620.c | skb_reserve(skb,2); /* Align */ |
skb | 715 | drivers/net/de620.c | skb->dev = dev; |
skb | 716 | drivers/net/de620.c | skb->free = 1; |
skb | 718 | drivers/net/de620.c | buffer = skb_put(skb,size); |
skb | 722 | drivers/net/de620.c | skb->protocol=eth_type_trans(skb,dev); |
skb | 723 | drivers/net/de620.c | netif_rx(skb); /* deliver it "upstairs" */ |
skb | 383 | drivers/net/depca.c | static int depca_start_xmit(struct sk_buff *skb, struct device *dev); |
skb | 408 | drivers/net/depca.c | static int load_packet(struct device *dev, struct sk_buff *skb); |
skb | 779 | drivers/net/depca.c | depca_start_xmit(struct sk_buff *skb, struct device *dev) |
skb | 804 | drivers/net/depca.c | } else if (skb == NULL) { |
skb | 806 | drivers/net/depca.c | } else if (skb->len > 0) { |
skb | 813 | drivers/net/depca.c | status = load_packet(dev, skb); |
skb | 821 | drivers/net/depca.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 912 | drivers/net/depca.c | struct sk_buff *skb; |
skb | 914 | drivers/net/depca.c | skb = dev_alloc_skb(pkt_len+2); |
skb | 915 | drivers/net/depca.c | if (skb != NULL) { |
skb | 917 | drivers/net/depca.c | skb_reserve(skb,2); /* 16 byte align the IP header */ |
skb | 918 | drivers/net/depca.c | buf = skb_put(skb,pkt_len); |
skb | 919 | drivers/net/depca.c | skb->dev = dev; |
skb | 932 | drivers/net/depca.c | skb->protocol=eth_type_trans(skb,dev); |
skb | 933 | drivers/net/depca.c | netif_rx(skb); |
skb | 1529 | drivers/net/depca.c | static int load_packet(struct device *dev, struct sk_buff *skb) |
skb | 1535 | drivers/net/depca.c | end = (entry + (skb->len - 1) / TX_BUFF_SZ) & lp->txRingMask; |
skb | 1543 | drivers/net/depca.c | memcpy_toio(lp->tx_memcpy[entry], skb->data, len); |
skb | 1544 | drivers/net/depca.c | memcpy_toio(lp->tx_memcpy[0], skb->data + len, skb->len - len); |
skb | 1546 | drivers/net/depca.c | memcpy_toio(lp->tx_memcpy[entry], skb->data, skb->len); |
skb | 1550 | drivers/net/depca.c | len = (skb->len < ETH_ZLEN) ? ETH_ZLEN : skb->len; |
skb | 56 | drivers/net/dummy.c | static int dummy_xmit(struct sk_buff *skb, struct device *dev); |
skb | 105 | drivers/net/dummy.c | dummy_xmit(struct sk_buff *skb, struct device *dev) |
skb | 111 | drivers/net/dummy.c | if (skb == NULL || dev == NULL) |
skb | 114 | drivers/net/dummy.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 102 | drivers/net/e2100.c | struct sk_buff *skb, int ring_offset); |
skb | 318 | drivers/net/e2100.c | e21_block_input(struct device *dev, int count, struct sk_buff *skb, int ring_offset) |
skb | 326 | drivers/net/e2100.c | eth_io_copy_and_sum(skb, dev->mem_start + (ring_offset & 0xff), count, 0); |
skb | 137 | drivers/net/eepro.c | static int eepro_send_packet(struct sk_buff *skb, struct device *dev); |
skb | 595 | drivers/net/eepro.c | eepro_send_packet(struct sk_buff *skb, struct device *dev) |
skb | 633 | drivers/net/eepro.c | if (skb == NULL) { |
skb | 642 | drivers/net/eepro.c | short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN; |
skb | 643 | drivers/net/eepro.c | unsigned char *buf = skb->data; |
skb | 649 | drivers/net/eepro.c | dev_kfree_skb (skb, FREE_WRITE); |
skb | 1046 | drivers/net/eepro.c | struct sk_buff *skb; |
skb | 1049 | drivers/net/eepro.c | skb = dev_alloc_skb(rcv_size+2); |
skb | 1050 | drivers/net/eepro.c | if (skb == NULL) { |
skb | 1055 | drivers/net/eepro.c | skb->dev = dev; |
skb | 1056 | drivers/net/eepro.c | skb_reserve(skb,2); |
skb | 1058 | drivers/net/eepro.c | insw(ioaddr+IO_PORT, skb_put(skb,rcv_size), (rcv_size + 1) >> 1); |
skb | 1060 | drivers/net/eepro.c | skb->protocol = eth_type_trans(skb,dev); |
skb | 1061 | drivers/net/eepro.c | netif_rx(skb); |
skb | 288 | drivers/net/eexpress.c | static int eexp_send_packet(struct sk_buff *skb, struct device *dev); |
skb | 450 | drivers/net/eexpress.c | eexp_send_packet(struct sk_buff *skb, struct device *dev) |
skb | 485 | drivers/net/eexpress.c | if (skb == NULL) { |
skb | 494 | drivers/net/eexpress.c | short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN; |
skb | 495 | drivers/net/eexpress.c | unsigned char *buf = skb->data; |
skb | 505 | drivers/net/eexpress.c | dev_kfree_skb (skb, FREE_WRITE); |
skb | 930 | drivers/net/eexpress.c | struct sk_buff *skb; |
skb | 933 | drivers/net/eexpress.c | skb = dev_alloc_skb(pkt_len+2); |
skb | 934 | drivers/net/eexpress.c | if (skb == NULL) { |
skb | 939 | drivers/net/eexpress.c | skb->dev = dev; |
skb | 940 | drivers/net/eexpress.c | skb_reserve(skb,2); |
skb | 944 | drivers/net/eexpress.c | insw(ioaddr, skb_put(skb,pkt_len), (pkt_len + 1) >> 1); |
skb | 946 | drivers/net/eexpress.c | skb->protocol=eth_type_trans(skb,dev); |
skb | 947 | drivers/net/eexpress.c | netif_rx(skb); |
skb | 149 | drivers/net/eql.c | static int eql_slave_xmit(struct sk_buff *skb, struct device *dev); /* */ |
skb | 152 | drivers/net/eql.c | static int eql_header(struct sk_buff *skb, struct device *dev, |
skb | 156 | drivers/net/eql.c | unsigned long raddr, struct sk_buff *skb); /* */ |
skb | 361 | drivers/net/eql.c | static int eql_slave_xmit(struct sk_buff *skb, struct device *dev) |
skb | 367 | drivers/net/eql.c | if (skb == NULL) |
skb | 380 | drivers/net/eql.c | dev->name, eql_number_slaves (eql->queue), skb->len, |
skb | 383 | drivers/net/eql.c | dev_queue_xmit (skb, slave_dev, 1); |
skb | 385 | drivers/net/eql.c | slave->bytes_queued += skb->len; |
skb | 395 | drivers/net/eql.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 408 | drivers/net/eql.c | static int eql_header(struct sk_buff *skb, struct device *dev, |
skb | 417 | drivers/net/eql.c | unsigned long raddr, struct sk_buff *skb) |
skb | 327 | drivers/net/eth16i.c | static int eth16i_tx(struct sk_buff *skb, struct device *dev); |
skb | 859 | drivers/net/eth16i.c | static int eth16i_tx(struct sk_buff *skb, struct device *dev) |
skb | 918 | drivers/net/eth16i.c | if(skb == NULL) { |
skb | 932 | drivers/net/eth16i.c | short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN; |
skb | 933 | drivers/net/eth16i.c | unsigned char *buf = skb->data; |
skb | 973 | drivers/net/eth16i.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 1003 | drivers/net/eth16i.c | struct sk_buff *skb; |
skb | 1016 | drivers/net/eth16i.c | skb = dev_alloc_skb(pkt_len + 3); |
skb | 1017 | drivers/net/eth16i.c | if( skb == NULL ) { |
skb | 1025 | drivers/net/eth16i.c | skb->dev = dev; |
skb | 1026 | drivers/net/eth16i.c | skb_reserve(skb,2); |
skb | 1034 | drivers/net/eth16i.c | insw(ioaddr + DATAPORT, skb_put(skb, pkt_len), (pkt_len + 1) >> 1); |
skb | 1036 | drivers/net/eth16i.c | unsigned char *buf = skb_put(skb, pkt_len); |
skb | 1051 | drivers/net/eth16i.c | skb->protocol=eth_type_trans(skb, dev); |
skb | 1052 | drivers/net/eth16i.c | netif_rx(skb); |
skb | 1059 | drivers/net/eth16i.c | printk(" %02x", skb->data[i]); |
skb | 288 | drivers/net/ewrk3.c | static int ewrk3_queue_pkt(struct sk_buff *skb, struct device *dev); |
skb | 720 | drivers/net/ewrk3.c | ewrk3_queue_pkt(struct sk_buff *skb, struct device *dev) |
skb | 756 | drivers/net/ewrk3.c | } else if (skb == NULL) { |
skb | 758 | drivers/net/ewrk3.c | } else if (skb->len > 0) { |
skb | 806 | drivers/net/ewrk3.c | u_char *p = skb->data; |
skb | 809 | drivers/net/ewrk3.c | outb((char)(skb->len & 0xff), EWRK3_DATA); |
skb | 810 | drivers/net/ewrk3.c | outb((char)((skb->len >> 8) & 0xff), EWRK3_DATA); |
skb | 812 | drivers/net/ewrk3.c | for (i=0; i<skb->len; i++) { |
skb | 819 | drivers/net/ewrk3.c | writeb((char)(skb->len & 0xff), (char *)buf);/* length (16 bit xfer)*/ |
skb | 822 | drivers/net/ewrk3.c | writeb((char)(((skb->len >> 8) & 0xff) | XCT), (char *)buf); |
skb | 826 | drivers/net/ewrk3.c | writeb(0x00, (char *)(buf + skb->len)); /* Write the XCT flag */ |
skb | 827 | drivers/net/ewrk3.c | memcpy_toio(buf, skb->data, PRELOAD);/* Write PRELOAD bytes*/ |
skb | 829 | drivers/net/ewrk3.c | memcpy_toio(buf+PRELOAD, skb->data+PRELOAD, skb->len-PRELOAD); |
skb | 830 | drivers/net/ewrk3.c | writeb(0xff, (char *)(buf + skb->len)); /* Write the XCT flag */ |
skb | 832 | drivers/net/ewrk3.c | writeb((char)((skb->len >> 8) & 0xff), (char *)buf); |
skb | 836 | drivers/net/ewrk3.c | memcpy_toio((char *)buf, skb->data, skb->len);/* Write data bytes */ |
skb | 842 | drivers/net/ewrk3.c | dev_kfree_skb (skb, FREE_WRITE); |
skb | 998 | drivers/net/ewrk3.c | struct sk_buff *skb; |
skb | 1000 | drivers/net/ewrk3.c | if ((skb = dev_alloc_skb(pkt_len+2)) != NULL) { |
skb | 1002 | drivers/net/ewrk3.c | skb->dev = dev; |
skb | 1003 | drivers/net/ewrk3.c | skb_reserve(skb,2); /* Align to 16 bytes */ |
skb | 1004 | drivers/net/ewrk3.c | p = skb_put(skb,pkt_len); |
skb | 1019 | drivers/net/ewrk3.c | skb->protocol=eth_type_trans(skb,dev); |
skb | 1020 | drivers/net/ewrk3.c | netif_rx(skb); |
skb | 1032 | drivers/net/ewrk3.c | p = skb->data; /* Look at the dest addr */ |
skb | 102 | drivers/net/hp-plus.c | struct sk_buff *skb, int ring_offset); |
skb | 108 | drivers/net/hp-plus.c | struct sk_buff *skb, int ring_offset); |
skb | 340 | drivers/net/hp-plus.c | hpp_io_block_input(struct device *dev, int count, struct sk_buff *skb, int ring_offset) |
skb | 343 | drivers/net/hp-plus.c | char *buf = skb->data; |
skb | 367 | drivers/net/hp-plus.c | hpp_mem_block_input(struct device *dev, int count, struct sk_buff *skb, int ring_offset) |
skb | 380 | drivers/net/hp-plus.c | memcpy_fromio(skb->data, dev->mem_start, count); |
skb | 66 | drivers/net/hp.c | struct sk_buff *skb , int ring_offset); |
skb | 276 | drivers/net/hp.c | hp_block_input(struct device *dev, int count, struct sk_buff *skb, int ring_offset) |
skb | 281 | drivers/net/hp.c | char *buf = skb->data; |
skb | 193 | drivers/net/hp100.c | static int hp100_start_xmit( struct sk_buff *skb, struct device *dev ); |
skb | 574 | drivers/net/hp100.c | static int hp100_start_xmit( struct sk_buff *skb, struct device *dev ) |
skb | 595 | drivers/net/hp100.c | if ( ( i = ( hp100_inl( TX_MEM_FREE ) & ~0x7fffffff ) ) < skb -> len + 16 ) |
skb | 639 | drivers/net/hp100.c | if ( skb == NULL ) |
skb | 645 | drivers/net/hp100.c | if ( skb -> len <= 0 ) return 0; |
skb | 658 | drivers/net/hp100.c | printk( "hp100_start_xmit: irq_status = 0x%x, len = %d\n", val, (int)skb -> len ); |
skb | 660 | drivers/net/hp100.c | ok_flag = skb -> len >= HP100_MIN_PACKET_SIZE; |
skb | 661 | drivers/net/hp100.c | i = ok_flag ? skb -> len : HP100_MIN_PACKET_SIZE; |
skb | 668 | drivers/net/hp100.c | memcpy( lp -> mem_ptr_virt, skb -> data, skb -> len ); |
skb | 670 | drivers/net/hp100.c | memset( lp -> mem_ptr_virt, 0, HP100_MIN_PACKET_SIZE - skb -> len ); |
skb | 674 | drivers/net/hp100.c | memcpy_toio( lp -> mem_ptr_phys, skb -> data, skb -> len ); |
skb | 676 | drivers/net/hp100.c | memset_io( lp -> mem_ptr_phys, 0, HP100_MIN_PACKET_SIZE - skb -> len ); |
skb | 681 | drivers/net/hp100.c | outsl( ioaddr + HP100_REG_DATA32, skb -> data, ( skb -> len + 3 ) >> 2 ); |
skb | 683 | drivers/net/hp100.c | for ( i = ( skb -> len + 3 ) & ~3; i < HP100_MIN_PACKET_SIZE; i += 4 ) |
skb | 691 | drivers/net/hp100.c | dev_kfree_skb( skb, FREE_WRITE ); |
skb | 710 | drivers/net/hp100.c | struct sk_buff *skb; |
skb | 753 | drivers/net/hp100.c | skb = dev_alloc_skb( pkt_len ); |
skb | 754 | drivers/net/hp100.c | if ( skb == NULL ) |
skb | 765 | drivers/net/hp100.c | skb -> dev = dev; |
skb | 766 | drivers/net/hp100.c | ptr = (u_char *)skb_put( skb, pkt_len ); |
skb | 776 | drivers/net/hp100.c | skb -> protocol = eth_type_trans( skb, dev ); |
skb | 777 | drivers/net/hp100.c | netif_rx( skb ); |
skb | 148 | drivers/net/ibmtr.c | static int tok_send_packet(struct sk_buff *skb, struct device *dev); |
skb | 1139 | drivers/net/ibmtr.c | struct sk_buff *skb; |
skb | 1183 | drivers/net/ibmtr.c | if(!(skb=dev_alloc_skb(ntohs(rec_req->frame_len)-lan_hdr_len+sizeof(struct trh_hdr)))) { |
skb | 1191 | drivers/net/ibmtr.c | skb_put(skb,ntohs(rec_req->frame_len)-lan_hdr_len+sizeof(struct trh_hdr)); |
skb | 1192 | drivers/net/ibmtr.c | skb->dev=dev; |
skb | 1199 | drivers/net/ibmtr.c | data=skb->data; |
skb | 1229 | drivers/net/ibmtr.c | skb->protocol=tr_type_trans(skb,dev); |
skb | 1230 | drivers/net/ibmtr.c | netif_rx(skb); |
skb | 1235 | drivers/net/ibmtr.c | static int tok_send_packet(struct sk_buff *skb, struct device *dev) { |
skb | 1256 | drivers/net/ibmtr.c | if(skb==NULL) { |
skb | 1266 | drivers/net/ibmtr.c | ti->current_skb=skb; /* save skb. We will need it when the adapter |
skb | 255 | drivers/net/lance.c | static int lance_start_xmit(struct sk_buff *skb, struct device *dev); |
skb | 705 | drivers/net/lance.c | lance_start_xmit(struct sk_buff *skb, struct device *dev) |
skb | 747 | drivers/net/lance.c | if (skb == NULL) { |
skb | 752 | drivers/net/lance.c | if (skb->len <= 0) |
skb | 787 | drivers/net/lance.c | -(ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN); |
skb | 789 | drivers/net/lance.c | lp->tx_ring[entry].length = -skb->len; |
skb | 795 | drivers/net/lance.c | if ((int)(skb->data) + skb->len > 0x01000000) { |
skb | 798 | drivers/net/lance.c | dev->name, (int)(skb->data)); |
skb | 799 | drivers/net/lance.c | memcpy(&lp->tx_bounce_buffs[entry], skb->data, skb->len); |
skb | 802 | drivers/net/lance.c | dev_kfree_skb (skb, FREE_WRITE); |
skb | 804 | drivers/net/lance.c | lp->tx_skbuff[entry] = skb; |
skb | 805 | drivers/net/lance.c | lp->tx_ring[entry].base = (int)(skb->data) | 0x83000000; |
skb | 984 | drivers/net/lance.c | struct sk_buff *skb; |
skb | 993 | drivers/net/lance.c | skb = dev_alloc_skb(pkt_len+2); |
skb | 994 | drivers/net/lance.c | if (skb == NULL) |
skb | 1009 | drivers/net/lance.c | skb->dev = dev; |
skb | 1010 | drivers/net/lance.c | skb_reserve(skb,2); /* 16 byte align */ |
skb | 1011 | drivers/net/lance.c | skb_put(skb,pkt_len); /* Make room */ |
skb | 1012 | drivers/net/lance.c | eth_copy_and_sum(skb, |
skb | 1015 | drivers/net/lance.c | skb->protocol=eth_type_trans(skb,dev); |
skb | 1016 | drivers/net/lance.c | netif_rx(skb); |
skb | 56 | drivers/net/loopback.c | static int loopback_xmit(struct sk_buff *skb, struct device *dev) |
skb | 61 | drivers/net/loopback.c | if (skb == NULL || dev == NULL) |
skb | 69 | drivers/net/loopback.c | if(skb->free==0) |
skb | 71 | drivers/net/loopback.c | struct sk_buff *skb2=skb; |
skb | 72 | drivers/net/loopback.c | skb=skb_clone(skb, GFP_ATOMIC); /* Clone the buffer */ |
skb | 73 | drivers/net/loopback.c | if(skb==NULL) |
skb | 78 | drivers/net/loopback.c | else if(skb->sk) |
skb | 84 | drivers/net/loopback.c | skb->sk->wmem_alloc-=skb->truesize; |
skb | 85 | drivers/net/loopback.c | skb->sk->write_space(skb->sk); |
skb | 88 | drivers/net/loopback.c | skb->protocol=eth_type_trans(skb,dev); |
skb | 89 | drivers/net/loopback.c | skb->dev=dev; |
skb | 91 | drivers/net/loopback.c | skb->ip_summed = CHECKSUM_UNNECESSARY; |
skb | 93 | drivers/net/loopback.c | netif_rx(skb); |
skb | 95 | drivers/net/loopback.c | skb_device_unlock(skb); |
skb | 109 | drivers/net/ne.c | struct sk_buff *skb, int ring_offset); |
skb | 498 | drivers/net/ne.c | ne_block_input(struct device *dev, int count, struct sk_buff *skb, int ring_offset) |
skb | 504 | drivers/net/ne.c | char *buf = skb->data; |
skb | 804 | drivers/net/ni52.c | struct sk_buff *skb; |
skb | 818 | drivers/net/ni52.c | skb = (struct sk_buff *) dev_alloc_skb(totlen+2); |
skb | 819 | drivers/net/ni52.c | if(skb != NULL) |
skb | 821 | drivers/net/ni52.c | skb->dev = dev; |
skb | 822 | drivers/net/ni52.c | skb_reserve(skb,2); /* 16 byte alignment */ |
skb | 823 | drivers/net/ni52.c | memcpy(skb_put(skb,totlen),(char *) p->base+(unsigned long) rbd->buffer, totlen); |
skb | 824 | drivers/net/ni52.c | skb->protocol=eth_type_trans(skb,dev); |
skb | 825 | drivers/net/ni52.c | netif_rx(skb); |
skb | 940 | drivers/net/ni52.c | static int ni52_send_packet(struct sk_buff *skb, struct device *dev) |
skb | 984 | drivers/net/ni52.c | if(skb == NULL) |
skb | 990 | drivers/net/ni52.c | if (skb->len <= 0) |
skb | 992 | drivers/net/ni52.c | if(skb->len > XMIT_BUFF_SIZE) |
skb | 994 | drivers/net/ni52.c | printk("%s: Sorry, max. framelength is %d bytes. The length of your frame is %ld bytes.\n",dev->name,XMIT_BUFF_SIZE,skb->len); |
skb | 1002 | drivers/net/ni52.c | memcpy((char *)p->xmit_cbuffs[p->xmit_count],(char *)(skb->data),skb->len); |
skb | 1003 | drivers/net/ni52.c | len = (ETH_ZLEN < skb->len) ? skb->len : ETH_ZLEN; |
skb | 1017 | drivers/net/ni52.c | dev_kfree_skb(skb,FREE_WRITE); |
skb | 1037 | drivers/net/ni52.c | dev_kfree_skb(skb,FREE_WRITE); |
skb | 1057 | drivers/net/ni52.c | dev_kfree_skb(skb,FREE_WRITE); |
skb | 122 | drivers/net/ni65.c | static int ni65_send_packet(struct sk_buff *skb, struct device *dev); |
skb | 455 | drivers/net/ni65.c | struct sk_buff *skb; |
skb | 465 | drivers/net/ni65.c | skb = (struct sk_buff *) p->tmdbufs[p->tmdlast]; |
skb | 466 | drivers/net/ni65.c | dev_kfree_skb(skb,FREE_WRITE); |
skb | 503 | drivers/net/ni65.c | struct sk_buff *skb,*skb1; |
skb | 532 | drivers/net/ni65.c | skb = dev_alloc_skb(R_BUF_SIZE); |
skb | 533 | drivers/net/ni65.c | if(skb != NULL) |
skb | 535 | drivers/net/ni65.c | if( (unsigned long) (skb->data + R_BUF_SIZE) & 0xff000000) { |
skb | 536 | drivers/net/ni65.c | memcpy(skb_put(skb,len),p->recv_skb[p->rmdnum]->data,len); |
skb | 537 | drivers/net/ni65.c | skb1 = skb; |
skb | 541 | drivers/net/ni65.c | p->recv_skb[p->rmdnum] = skb; |
skb | 567 | drivers/net/ni65.c | static int ni65_send_packet(struct sk_buff *skb, struct device *dev) |
skb | 584 | drivers/net/ni65.c | if(skb == NULL) |
skb | 590 | drivers/net/ni65.c | if (skb->len <= 0) |
skb | 605 | drivers/net/ni65.c | short len = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN; |
skb | 610 | drivers/net/ni65.c | tmdp->u.buffer = (unsigned long) (skb->data); |
skb | 611 | drivers/net/ni65.c | p->tmdbufs[p->tmdnum] = skb; |
skb | 613 | drivers/net/ni65.c | memcpy((char *) (tmdp->u.buffer & 0x00ffffff),(char *)skb->data,skb->len); |
skb | 614 | drivers/net/ni65.c | dev_kfree_skb (skb, FREE_WRITE); |
skb | 151 | drivers/net/pi2.c | static int pi_send_packet(struct sk_buff *skb, struct device *dev); |
skb | 208 | drivers/net/pi2.c | static void hardware_send_packet(struct pi_local *lp, struct sk_buff *skb) |
skb | 220 | drivers/net/pi2.c | skb_queue_tail(&lp->sndq, skb); |
skb | 338 | drivers/net/pi2.c | static void free_p(struct sk_buff *skb) |
skb | 340 | drivers/net/pi2.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 510 | drivers/net/pi2.c | struct sk_buff *skb; |
skb | 554 | drivers/net/pi2.c | skb = dev_alloc_skb(sksize); |
skb | 555 | drivers/net/pi2.c | if (skb == NULL) { |
skb | 561 | drivers/net/pi2.c | skb->dev = dev; |
skb | 564 | drivers/net/pi2.c | cfix=skb_put(skb,pkt_len); |
skb | 569 | drivers/net/pi2.c | skb->protocol=htons(ETH_P_AX25); |
skb | 570 | drivers/net/pi2.c | skb->mac.raw=skb->data; |
skb | 571 | drivers/net/pi2.c | IS_SKB(skb); |
skb | 572 | drivers/net/pi2.c | netif_rx(skb); |
skb | 585 | drivers/net/pi2.c | struct sk_buff *skb; |
skb | 643 | drivers/net/pi2.c | skb = dev_alloc_skb(sksize); |
skb | 644 | drivers/net/pi2.c | if (skb == NULL) { |
skb | 650 | drivers/net/pi2.c | skb->dev = dev; |
skb | 653 | drivers/net/pi2.c | cfix=skb_put(skb,pkt_len); |
skb | 657 | drivers/net/pi2.c | skb->protocol=ntohs(ETH_P_AX25); |
skb | 658 | drivers/net/pi2.c | skb->mac.raw=skb->data; |
skb | 659 | drivers/net/pi2.c | IS_SKB(skb); |
skb | 660 | drivers/net/pi2.c | netif_rx(skb); |
skb | 1077 | drivers/net/pi2.c | static int pi_header(struct sk_buff *skb, struct device *dev, unsigned short type, |
skb | 1080 | drivers/net/pi2.c | return ax25_encapsulate(skb, dev, type, daddr, saddr, len); |
skb | 1085 | drivers/net/pi2.c | struct sk_buff *skb) |
skb | 1087 | drivers/net/pi2.c | return ax25_rebuild_header(buff, dev, raddr, skb); |
skb | 1493 | drivers/net/pi2.c | static int pi_send_packet(struct sk_buff *skb, struct device *dev) |
skb | 1500 | drivers/net/pi2.c | if (skb == NULL) { |
skb | 1504 | drivers/net/pi2.c | hardware_send_packet(lp, skb); |
skb | 146 | drivers/net/plip.c | unsigned long raddr, struct sk_buff *skb); |
skb | 147 | drivers/net/plip.c | static int plip_tx_packet(struct sk_buff *skb, struct device *dev); |
skb | 197 | drivers/net/plip.c | struct sk_buff *skb; |
skb | 212 | drivers/net/plip.c | unsigned long raddr, struct sk_buff *skb); |
skb | 415 | drivers/net/plip.c | if (rcv->skb) { |
skb | 416 | drivers/net/plip.c | rcv->skb->free = 1; |
skb | 417 | drivers/net/plip.c | kfree_skb(rcv->skb, FREE_READ); |
skb | 418 | drivers/net/plip.c | rcv->skb = NULL; |
skb | 421 | drivers/net/plip.c | if (snd->skb) { |
skb | 422 | drivers/net/plip.c | dev_kfree_skb(snd->skb, FREE_WRITE); |
skb | 423 | drivers/net/plip.c | snd->skb = NULL; |
skb | 543 | drivers/net/plip.c | rcv->skb = dev_alloc_skb(rcv->length.h); |
skb | 544 | drivers/net/plip.c | if (rcv->skb == NULL) { |
skb | 548 | drivers/net/plip.c | skb_put(rcv->skb,rcv->length.h); |
skb | 549 | drivers/net/plip.c | rcv->skb->dev = dev; |
skb | 555 | drivers/net/plip.c | lbuf = rcv->skb->data; |
skb | 580 | drivers/net/plip.c | rcv->skb->protocol=eth_type_trans(rcv->skb, dev); |
skb | 581 | drivers/net/plip.c | netif_rx(rcv->skb); |
skb | 583 | drivers/net/plip.c | rcv->skb = NULL; |
skb | 667 | drivers/net/plip.c | if (snd->skb == NULL || (lbuf = snd->skb->data) == NULL) { |
skb | 670 | drivers/net/plip.c | snd->skb = NULL; |
skb | 742 | drivers/net/plip.c | dev_kfree_skb(snd->skb, FREE_WRITE); |
skb | 749 | drivers/net/plip.c | snd->skb = NULL; |
skb | 858 | drivers/net/plip.c | struct sk_buff *skb) |
skb | 865 | drivers/net/plip.c | return nl->orig_rebuild_header(buff, dev, dst, skb); |
skb | 880 | drivers/net/plip.c | plip_tx_packet(struct sk_buff *skb, struct device *dev) |
skb | 891 | drivers/net/plip.c | if (skb == NULL) { |
skb | 901 | drivers/net/plip.c | if (skb->len > dev->mtu + dev->hard_header_len) { |
skb | 902 | drivers/net/plip.c | printk("%s: packet too big, %d.\n", dev->name, (int)skb->len); |
skb | 912 | drivers/net/plip.c | snd->skb = skb; |
skb | 913 | drivers/net/plip.c | snd->length.h = skb->len; |
skb | 959 | drivers/net/plip.c | nl->rcv_data.skb = nl->snd_data.skb = NULL; |
skb | 994 | drivers/net/plip.c | if (snd->skb) { |
skb | 995 | drivers/net/plip.c | dev_kfree_skb(snd->skb, FREE_WRITE); |
skb | 996 | drivers/net/plip.c | snd->skb = NULL; |
skb | 999 | drivers/net/plip.c | if (rcv->skb) { |
skb | 1000 | drivers/net/plip.c | rcv->skb->free = 1; |
skb | 1001 | drivers/net/plip.c | kfree_skb(rcv->skb, FREE_READ); |
skb | 1002 | drivers/net/plip.c | rcv->skb = NULL; |
skb | 101 | drivers/net/ppp.c | #define skb_data(skb) ((unsigned char *) (skb)->data) |
skb | 195 | drivers/net/ppp.c | #define skb_put(skb,count) skb_data(skb) |
skb | 204 | drivers/net/ppp.c | unsigned len, struct sk_buff *skb); |
skb | 218 | drivers/net/ppp.c | sk_buff *skb, void *saddr, void *daddr); |
skb | 219 | drivers/net/ppp.c | static int ppp_dev_output (struct protocol *self, sk_buff *skb, int type, |
skb | 1254 | drivers/net/ppp.c | sk_buff *skb = dev_alloc_skb (count); |
skb | 1258 | drivers/net/ppp.c | if (skb == NULL) { |
skb | 1268 | drivers/net/ppp.c | skb->dev = ppp2dev (ppp); /* We are the device */ |
skb | 1270 | drivers/net/ppp.c | skb->len = count; |
skb | 1272 | drivers/net/ppp.c | skb->protocol = proto; |
skb | 1273 | drivers/net/ppp.c | skb->mac.raw = skb_data(skb); |
skb | 1275 | drivers/net/ppp.c | memcpy (skb_put(skb,count), data, count); /* move data */ |
skb | 1279 | drivers/net/ppp.c | skb->free = 1; |
skb | 1281 | drivers/net/ppp.c | netif_rx (skb); |
skb | 3116 | drivers/net/ppp.c | ppp_dev_xmit (sk_buff *skb, struct device *dev) |
skb | 3125 | drivers/net/ppp.c | if (skb == NULL) { |
skb | 3134 | drivers/net/ppp.c | dev_kfree_skb (skb, FREE_WRITE); |
skb | 3143 | drivers/net/ppp.c | dev->name, skb); |
skb | 3152 | drivers/net/ppp.c | dev_kfree_skb (skb, FREE_WRITE); |
skb | 3158 | drivers/net/ppp.c | len = skb->len; |
skb | 3159 | drivers/net/ppp.c | data = skb_data(skb); |
skb | 3166 | drivers/net/ppp.c | switch (skb->protocol) { |
skb | 3176 | drivers/net/ppp.c | dev_kfree_skb (skb, FREE_WRITE); |
skb | 3187 | drivers/net/ppp.c | dev_kfree_skb (skb, FREE_WRITE); |
skb | 3234 | drivers/net/ppp.c | sk_buff *skb, void *saddr, void *daddr) |
skb | 3236 | drivers/net/ppp.c | return protocol_pass_demultiplex(self, NULL, skb, NULL, NULL); |
skb | 3239 | drivers/net/ppp.c | static int ppp_dev_output (struct protocol *self, sk_buff *skb, int type, |
skb | 3242 | drivers/net/ppp.c | if(skb->dev==NULL) |
skb | 3245 | drivers/net/ppp.c | kfree_skb(skb, FREE_WRITE); |
skb | 3248 | drivers/net/ppp.c | dev_queue_xmit(skb, skb->dev, skb->priority); |
skb | 3276 | drivers/net/ppp.c | ppp_dev_type (sk_buff *skb, struct device *dev) |
skb | 3285 | drivers/net/ppp.c | unsigned len, struct sk_buff *skb) |
skb | 3287 | drivers/net/ppp.c | static int ppp_dev_header (sk_buff *skb, struct device *dev, |
skb | 3297 | drivers/net/ppp.c | sk_buff *skb) |
skb | 122 | drivers/net/pt.c | static int pt_send_packet(struct sk_buff *skb, struct device *dev); |
skb | 177 | drivers/net/pt.c | static void hardware_send_packet(struct pt_local *lp, struct sk_buff *skb) |
skb | 185 | drivers/net/pt.c | ptr = skb->data; |
skb | 186 | drivers/net/pt.c | if (ptr[0] != 0 && skb->len >= 2) |
skb | 188 | drivers/net/pt.c | printk("Rx KISS... Control = %d, value = %d.\n", ptr[0], (skb->len > 1? ptr[1] : -1)); |
skb | 231 | drivers/net/pt.c | skb_queue_tail(&lp->sndq, skb); |
skb | 318 | drivers/net/pt.c | static void free_p(struct sk_buff *skb) |
skb | 320 | drivers/net/pt.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 339 | drivers/net/pt.c | static int pt_header (struct sk_buff *skb, struct device *dev, unsigned short type, |
skb | 342 | drivers/net/pt.c | return ax25_encapsulate(skb, dev, type, daddr, saddr, len); |
skb | 348 | drivers/net/pt.c | struct sk_buff *skb) |
skb | 350 | drivers/net/pt.c | return ax25_rebuild_header(buff, dev, raddr, skb); |
skb | 972 | drivers/net/pt.c | static int pt_send_packet(struct sk_buff *skb, struct device *dev) |
skb | 982 | drivers/net/pt.c | if (skb == NULL) { |
skb | 986 | drivers/net/pt.c | hardware_send_packet(lp, skb); |
skb | 1271 | drivers/net/pt.c | struct sk_buff *skb; |
skb | 1392 | drivers/net/pt.c | skb = dev_alloc_skb(sksize); |
skb | 1393 | drivers/net/pt.c | if (skb == NULL) |
skb | 1400 | drivers/net/pt.c | skb->dev = dev; |
skb | 1403 | drivers/net/pt.c | cfix=skb_put(skb,pkt_len); |
skb | 1410 | drivers/net/pt.c | skb->protocol = ntohs(ETH_P_AX25); |
skb | 1411 | drivers/net/pt.c | skb->mac.raw=skb->data; |
skb | 1412 | drivers/net/pt.c | IS_SKB(skb); |
skb | 1413 | drivers/net/pt.c | netif_rx(skb); |
skb | 1810 | drivers/net/pt.c | struct sk_buff *skb; |
skb | 1815 | drivers/net/pt.c | skb = dev_alloc_skb(2); |
skb | 1816 | drivers/net/pt.c | if (skb == NULL) |
skb | 1821 | drivers/net/pt.c | skb->dev = dev; |
skb | 1822 | drivers/net/pt.c | cfix = skb_put(skb, 2); |
skb | 1825 | drivers/net/pt.c | skb->protocol=htons(ETH_P_AX25); |
skb | 1826 | drivers/net/pt.c | skb->mac.raw=skb->data; |
skb | 1827 | drivers/net/pt.c | IS_SKB(skb); |
skb | 1828 | drivers/net/pt.c | netif_rx(skb); |
skb | 83 | drivers/net/seeq8005.c | static int seeq8005_send_packet(struct sk_buff *skb, struct device *dev); |
skb | 374 | drivers/net/seeq8005.c | seeq8005_send_packet(struct sk_buff *skb, struct device *dev) |
skb | 395 | drivers/net/seeq8005.c | if (skb == NULL) { |
skb | 405 | drivers/net/seeq8005.c | short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN; |
skb | 406 | drivers/net/seeq8005.c | unsigned char *buf = skb->data; |
skb | 411 | drivers/net/seeq8005.c | dev_kfree_skb (skb, FREE_WRITE); |
skb | 533 | drivers/net/seeq8005.c | struct sk_buff *skb; |
skb | 536 | drivers/net/seeq8005.c | skb = dev_alloc_skb(pkt_len); |
skb | 537 | drivers/net/seeq8005.c | if (skb == NULL) { |
skb | 542 | drivers/net/seeq8005.c | skb->dev = dev; |
skb | 543 | drivers/net/seeq8005.c | skb_reserve(skb, 2); /* align data on 16 byte */ |
skb | 544 | drivers/net/seeq8005.c | buf = skb_put(skb,pkt_len); |
skb | 557 | drivers/net/seeq8005.c | skb->protocol=eth_type_trans(skb,dev); |
skb | 558 | drivers/net/seeq8005.c | netif_rx(skb); |
skb | 489 | drivers/net/sk_g16.c | static int SK_send_packet(struct sk_buff *skb, struct device *dev); |
skb | 1192 | drivers/net/sk_g16.c | static int SK_send_packet(struct sk_buff *skb, struct device *dev) |
skb | 1223 | drivers/net/sk_g16.c | if (skb == NULL) |
skb | 1249 | drivers/net/sk_g16.c | short len = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN; |
skb | 1257 | drivers/net/sk_g16.c | memcpy((char *) (tmdp->u.buffer & 0x00ffffff), (char *)skb->data, |
skb | 1258 | drivers/net/sk_g16.c | skb->len); |
skb | 1289 | drivers/net/sk_g16.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 1570 | drivers/net/sk_g16.c | struct sk_buff *skb; |
skb | 1572 | drivers/net/sk_g16.c | skb = dev_alloc_skb(len+2); /* allocate socket buffer */ |
skb | 1574 | drivers/net/sk_g16.c | if (skb == NULL) /* Could not get mem ? */ |
skb | 1592 | drivers/net/sk_g16.c | skb->dev = dev; |
skb | 1593 | drivers/net/sk_g16.c | skb_reserve(skb,2); /* Align IP header on 16 byte boundary */ |
skb | 1602 | drivers/net/sk_g16.c | memcpy(skb_put(skb,len), (unsigned char *) (rmdp->u.buffer & 0x00ffffff), |
skb | 1613 | drivers/net/sk_g16.c | skb->protocol=eth_type_trans(skb,dev); |
skb | 1614 | drivers/net/sk_g16.c | netif_rx(skb); /* queue packet and mark it for processing */ |
skb | 102 | drivers/net/skeleton.c | static int net_send_packet(struct sk_buff *skb, struct device *dev); |
skb | 344 | drivers/net/skeleton.c | net_send_packet(struct sk_buff *skb, struct device *dev) |
skb | 369 | drivers/net/skeleton.c | if (skb == NULL) { |
skb | 380 | drivers/net/skeleton.c | short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN; |
skb | 381 | drivers/net/skeleton.c | unsigned char *buf = skb->data; |
skb | 386 | drivers/net/skeleton.c | dev_kfree_skb (skb, FREE_WRITE); |
skb | 459 | drivers/net/skeleton.c | struct sk_buff *skb; |
skb | 461 | drivers/net/skeleton.c | skb = dev_alloc_skb(pkt_len); |
skb | 462 | drivers/net/skeleton.c | if (skb == NULL) { |
skb | 468 | drivers/net/skeleton.c | skb->dev = dev; |
skb | 471 | drivers/net/skeleton.c | memcpy(skb_put(skb,pkt_len), (void*)dev->rmem_start, |
skb | 474 | drivers/net/skeleton.c | insw(ioaddr, skb->data, (pkt_len + 1) >> 1); |
skb | 476 | drivers/net/skeleton.c | netif_rx(skb); |
skb | 346 | drivers/net/slip.c | struct sk_buff *skb; |
skb | 383 | drivers/net/slip.c | skb = dev_alloc_skb(count); |
skb | 384 | drivers/net/slip.c | if (skb == NULL) { |
skb | 389 | drivers/net/slip.c | skb->dev = sl->dev; |
skb | 390 | drivers/net/slip.c | memcpy(skb_put(skb,count), sl->rbuff, count); |
skb | 391 | drivers/net/slip.c | skb->mac.raw=skb->data; |
skb | 393 | drivers/net/slip.c | skb->protocol=htons(ETH_P_AX25); |
skb | 395 | drivers/net/slip.c | skb->protocol=htons(ETH_P_IP); |
skb | 396 | drivers/net/slip.c | netif_rx(skb); |
skb | 487 | drivers/net/slip.c | sl_xmit(struct sk_buff *skb, struct device *dev) |
skb | 526 | drivers/net/slip.c | if (skb != NULL) { |
skb | 528 | drivers/net/slip.c | sl_encaps(sl, skb->data, skb->len); |
skb | 529 | drivers/net/slip.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 539 | drivers/net/slip.c | sl_header(struct sk_buff *skb, struct device *dev, unsigned short type, |
skb | 547 | drivers/net/slip.c | return ax25_encapsulate(skb, dev, type, daddr, saddr, len); |
skb | 558 | drivers/net/slip.c | struct sk_buff *skb) |
skb | 565 | drivers/net/slip.c | return ax25_rebuild_header(buff, dev, raddr, skb); |
skb | 73 | drivers/net/smc-ultra.c | struct sk_buff *skb, int ring_offset); |
skb | 286 | drivers/net/smc-ultra.c | ultra_block_input(struct device *dev, int count, struct sk_buff *skb, int ring_offset) |
skb | 296 | drivers/net/smc-ultra.c | memcpy_fromio(skb->data, xfer_start, semi_count); |
skb | 298 | drivers/net/smc-ultra.c | memcpy_fromio(skb->data + semi_count, dev->rmem_start, count); |
skb | 301 | drivers/net/smc-ultra.c | eth_io_copy_and_sum(skb, xfer_start, count, 0); |
skb | 347 | drivers/net/sunlance.c | struct sk_buff *skb; |
skb | 350 | drivers/net/sunlance.c | skb = dev_alloc_skb (pkt_len+2); |
skb | 351 | drivers/net/sunlance.c | if (skb == NULL){ |
skb | 358 | drivers/net/sunlance.c | skb->dev = dev; |
skb | 359 | drivers/net/sunlance.c | skb_reserve (skb, 2); /* 16 byte align */ |
skb | 360 | drivers/net/sunlance.c | buf = skb_put (skb, pkt_len); /* make room */ |
skb | 362 | drivers/net/sunlance.c | skb->protocol = eth_type_trans (skb,dev); |
skb | 363 | drivers/net/sunlance.c | netif_rx (skb); |
skb | 387 | drivers/net/sunlance.c | struct sk_buff *skb; |
skb | 390 | drivers/net/sunlance.c | skb = dev_alloc_skb (pkt_len+2); |
skb | 391 | drivers/net/sunlance.c | if (skb == NULL){ |
skb | 399 | drivers/net/sunlance.c | skb->dev = dev; |
skb | 400 | drivers/net/sunlance.c | skb_reserve (skb, 2); /* 16 byte align */ |
skb | 401 | drivers/net/sunlance.c | buf = skb_put (skb, pkt_len); /* make room */ |
skb | 403 | drivers/net/sunlance.c | skb->protocol = eth_type_trans (skb,dev); |
skb | 404 | drivers/net/sunlance.c | netif_rx (skb); |
skb | 602 | drivers/net/sunlance.c | static int lance_start_xmit (struct sk_buff *skb, struct device *dev) |
skb | 625 | drivers/net/sunlance.c | if (skb == NULL){ |
skb | 631 | drivers/net/sunlance.c | if (skb->len <= 0){ |
skb | 632 | drivers/net/sunlance.c | printk ("skb len is %ld\n", skb->len); |
skb | 644 | drivers/net/sunlance.c | skblen = skb->len; |
skb | 654 | drivers/net/sunlance.c | printk ("%2.2x ", skb->data [i]); |
skb | 662 | drivers/net/sunlance.c | memcpy ((char *)&ib->tx_buf [entry][0], skb->data, skblen); |
skb | 678 | drivers/net/sunlance.c | dev_kfree_skb (skb, FREE_WRITE); |
skb | 156 | drivers/net/tulip.c | static int tulip_start_xmit(struct sk_buff *skb, struct device *dev); |
skb | 376 | drivers/net/tulip.c | tulip_start_xmit(struct sk_buff *skb, struct device *dev) |
skb | 406 | drivers/net/tulip.c | if (skb == NULL || skb->len <= 0) { |
skb | 428 | drivers/net/tulip.c | tp->tx_skbuff[entry] = skb; |
skb | 429 | drivers/net/tulip.c | tp->tx_ring[entry].length = skb->len | |
skb | 431 | drivers/net/tulip.c | tp->tx_ring[entry].buffer1 = skb->data; |
skb | 601 | drivers/net/tulip.c | struct sk_buff *skb; |
skb | 603 | drivers/net/tulip.c | skb = dev_alloc_skb(pkt_len+2); |
skb | 604 | drivers/net/tulip.c | if (skb == NULL) { |
skb | 619 | drivers/net/tulip.c | skb->dev = dev; |
skb | 620 | drivers/net/tulip.c | skb_reserve(skb,2); /* 16 byte align the data fields */ |
skb | 621 | drivers/net/tulip.c | memcpy(skb_put(skb,pkt_len), lp->rx_ring[entry].buffer1, pkt_len); |
skb | 622 | drivers/net/tulip.c | skb->protocol=eth_type_trans(skb,dev); |
skb | 623 | drivers/net/tulip.c | netif_rx(skb); |
skb | 42 | drivers/net/tunnel.c | static int tunnel_xmit(struct sk_buff *skb, struct device *dev); |
skb | 123 | drivers/net/tunnel.c | static int tunnel_xmit(struct sk_buff *skb, struct device *dev) |
skb | 134 | drivers/net/tunnel.c | if (skb == NULL || dev == NULL) |
skb | 159 | drivers/net/tunnel.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 164 | drivers/net/tunnel.c | iph=(struct iphdr *)skb->data; |
skb | 171 | drivers/net/tunnel.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 188 | drivers/net/tunnel.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 195 | drivers/net/tunnel.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 209 | drivers/net/tunnel.c | newlen = (skb->len + ip_header_len); |
skb | 213 | drivers/net/tunnel.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 224 | drivers/net/tunnel.c | memcpy(skb2->h.iph, skb->data, ip_header_len ); |
skb | 225 | drivers/net/tunnel.c | memcpy(skb2->data + ip_header_len, skb->data, skb->len); |
skb | 227 | drivers/net/tunnel.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 1389 | drivers/net/wavelan.c | wavelan_send_packet(struct sk_buff *skb, device *dev) |
skb | 1430 | drivers/net/wavelan.c | if (skb == (struct sk_buff *)0) |
skb | 1444 | drivers/net/wavelan.c | length = (ETH_ZLEN < skb->len) ? skb->len : ETH_ZLEN; |
skb | 1445 | drivers/net/wavelan.c | buf = skb->data; |
skb | 1452 | drivers/net/wavelan.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 1497 | drivers/net/wavelan.c | struct sk_buff *skb; |
skb | 1640 | drivers/net/wavelan.c | if ((skb = dev_alloc_skb(sksize)) == (struct sk_buff *)0) |
skb | 1647 | drivers/net/wavelan.c | skb->dev = dev; |
skb | 1649 | drivers/net/wavelan.c | obram_read(ioaddr, rbd.rbd_bufl, skb_put(skb,pkt_len), pkt_len); |
skb | 1665 | drivers/net/wavelan.c | c = skb->data[i]; |
skb | 1667 | drivers/net/wavelan.c | printk(" %c", skb->data[i]); |
skb | 1669 | drivers/net/wavelan.c | printk("%02x", skb->data[i]); |
skb | 1678 | drivers/net/wavelan.c | skb->protocol=eth_type_trans(skb,dev); |
skb | 1679 | drivers/net/wavelan.c | netif_rx(skb); |
skb | 55 | drivers/net/wd.c | struct sk_buff *skb, int ring_offset); |
skb | 376 | drivers/net/wd.c | wd_block_input(struct device *dev, int count, struct sk_buff *skb, int ring_offset) |
skb | 384 | drivers/net/wd.c | memcpy_fromio(skb->data, xfer_start, semi_count); |
skb | 386 | drivers/net/wd.c | memcpy_fromio(skb->data + semi_count, dev->rmem_start, count); |
skb | 389 | drivers/net/wd.c | eth_io_copy_and_sum(skb, xfer_start, count, 0); |
skb | 184 | drivers/net/znet.c | static int znet_send_packet(struct sk_buff *skb, struct device *dev); |
skb | 317 | drivers/net/znet.c | static int znet_send_packet(struct sk_buff *skb, struct device *dev) |
skb | 343 | drivers/net/znet.c | if (skb == NULL) { |
skb | 360 | drivers/net/znet.c | short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN; |
skb | 361 | drivers/net/znet.c | unsigned char *buf = (void *)skb->data; |
skb | 386 | drivers/net/znet.c | memcpy(zn.tx_cur, buf, skb->len); |
skb | 400 | drivers/net/znet.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 549 | drivers/net/znet.c | struct sk_buff *skb; |
skb | 551 | drivers/net/znet.c | skb = dev_alloc_skb(pkt_len); |
skb | 552 | drivers/net/znet.c | if (skb == NULL) { |
skb | 558 | drivers/net/znet.c | skb->dev = dev; |
skb | 562 | drivers/net/znet.c | memcpy(skb_put(skb,semi_cnt), zn.rx_cur, semi_cnt); |
skb | 563 | drivers/net/znet.c | memcpy(skb_put(skb,pkt_len-semi_cnt), zn.rx_start, |
skb | 566 | drivers/net/znet.c | memcpy(skb_put(skb,pkt_len), zn.rx_cur, pkt_len); |
skb | 568 | drivers/net/znet.c | unsigned int *packet = (unsigned int *) skb->data; |
skb | 573 | drivers/net/znet.c | skb->protocol=eth_type_trans(skb,dev); |
skb | 574 | drivers/net/znet.c | netif_rx(skb); |
skb | 170 | fs/nfs/nfsroot.c | static int root_rarp_recv(struct sk_buff *skb, struct device *dev, |
skb | 207 | fs/nfs/nfsroot.c | static int root_rarp_recv(struct sk_buff *skb, struct device *dev, struct packet_type *pt) |
skb | 209 | fs/nfs/nfsroot.c | struct arphdr *rarp = (struct arphdr *)skb->h.raw; |
skb | 216 | fs/nfs/nfsroot.c | kfree_skb(skb, FREE_READ); |
skb | 222 | fs/nfs/nfsroot.c | kfree_skb(skb, FREE_READ); |
skb | 232 | fs/nfs/nfsroot.c | kfree_skb(skb, FREE_READ); |
skb | 247 | fs/nfs/nfsroot.c | kfree_skb(skb, FREE_READ); |
skb | 253 | fs/nfs/nfsroot.c | kfree_skb(skb, FREE_READ); |
skb | 264 | fs/nfs/nfsroot.c | kfree_skb(skb, FREE_READ); |
skb | 278 | fs/nfs/nfsroot.c | kfree_skb(skb, FREE_READ); |
skb | 209 | include/asm-alpha/io.h | #define eth_io_copy_and_sum(skb,src,len,unused) memcpy_fromio((skb)->data,(src),(len)) |
skb | 141 | include/linux/atalk.h | extern int aarp_send_ddp(struct device *dev,struct sk_buff *skb, struct at_addr *sa, void *hwaddr); |
skb | 31 | include/linux/etherdevice.h | extern int eth_header(struct sk_buff *skb, struct device *dev, |
skb | 35 | include/linux/etherdevice.h | unsigned long dst, struct sk_buff *skb); |
skb | 36 | include/linux/etherdevice.h | extern unsigned short eth_type_trans(struct sk_buff *skb, struct device *dev); |
skb | 18 | include/linux/firewall.h | struct sk_buff *skb, void *phdr); |
skb | 20 | include/linux/firewall.h | struct sk_buff *skb, void *phdr); |
skb | 22 | include/linux/firewall.h | struct sk_buff *skb, void *phdr); |
skb | 34 | include/linux/firewall.h | extern int call_fw_firewall(int pf, struct sk_buff *skb, void *phdr); |
skb | 35 | include/linux/firewall.h | extern int call_in_firewall(int pf, struct sk_buff *skb, void *phdr); |
skb | 36 | include/linux/firewall.h | extern int call_out_firewall(int pf, struct sk_buff *skb, void *phdr); |
skb | 124 | include/linux/mroute.h | extern void ipmr_forward(struct sk_buff *skb, int is_frag); |
skb | 165 | include/linux/netdevice.h | int (*hard_start_xmit) (struct sk_buff *skb, |
skb | 167 | include/linux/netdevice.h | int (*hard_header) (struct sk_buff *skb, |
skb | 174 | include/linux/netdevice.h | unsigned long raddr, struct sk_buff *skb); |
skb | 226 | include/linux/netdevice.h | extern void dev_queue_xmit(struct sk_buff *skb, struct device *dev, |
skb | 229 | include/linux/netdevice.h | extern void netif_rx(struct sk_buff *skb); |
skb | 137 | include/linux/skbuff.h | extern void kfree_skb(struct sk_buff *skb, int rw); |
skb | 149 | include/linux/skbuff.h | extern void kfree_skbmem(struct sk_buff *skb); |
skb | 150 | include/linux/skbuff.h | extern struct sk_buff * skb_clone(struct sk_buff *skb, int priority); |
skb | 151 | include/linux/skbuff.h | extern struct sk_buff * skb_copy(struct sk_buff *skb, int priority); |
skb | 152 | include/linux/skbuff.h | extern void skb_device_lock(struct sk_buff *skb); |
skb | 153 | include/linux/skbuff.h | extern void skb_device_unlock(struct sk_buff *skb); |
skb | 154 | include/linux/skbuff.h | extern void dev_kfree_skb(struct sk_buff *skb, int mode); |
skb | 155 | include/linux/skbuff.h | extern int skb_device_locked(struct sk_buff *skb); |
skb | 156 | include/linux/skbuff.h | extern unsigned char * skb_put(struct sk_buff *skb, int len); |
skb | 157 | include/linux/skbuff.h | extern unsigned char * skb_push(struct sk_buff *skb, int len); |
skb | 158 | include/linux/skbuff.h | extern unsigned char * skb_pull(struct sk_buff *skb, int len); |
skb | 159 | include/linux/skbuff.h | extern int skb_headroom(struct sk_buff *skb); |
skb | 160 | include/linux/skbuff.h | extern int skb_tailroom(struct sk_buff *skb); |
skb | 161 | include/linux/skbuff.h | extern void skb_reserve(struct sk_buff *skb, int len); |
skb | 162 | include/linux/skbuff.h | extern void skb_trim(struct sk_buff *skb, int len); |
skb | 188 | include/linux/skbuff.h | extern int skb_check(struct sk_buff *skb,int,int, char *); |
skb | 189 | include/linux/skbuff.h | #define IS_SKB(skb) skb_check((skb), 0, __LINE__,__FILE__) |
skb | 190 | include/linux/skbuff.h | #define IS_SKB_HEAD(skb) skb_check((skb), 1, __LINE__,__FILE__) |
skb | 192 | include/linux/skbuff.h | #define IS_SKB(skb) |
skb | 193 | include/linux/skbuff.h | #define IS_SKB_HEAD(skb) |
skb | 353 | include/linux/skbuff.h | extern __inline__ void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) |
skb | 358 | include/linux/skbuff.h | next = skb->next; |
skb | 359 | include/linux/skbuff.h | prev = skb->prev; |
skb | 360 | include/linux/skbuff.h | skb->next = NULL; |
skb | 361 | include/linux/skbuff.h | skb->prev = NULL; |
skb | 362 | include/linux/skbuff.h | skb->list = NULL; |
skb | 373 | include/linux/skbuff.h | extern __inline__ void skb_unlink(struct sk_buff *skb) |
skb | 379 | include/linux/skbuff.h | if(skb->list) |
skb | 380 | include/linux/skbuff.h | __skb_unlink(skb, skb->list); |
skb | 388 | include/linux/skbuff.h | extern __inline__ unsigned char *skb_put(struct sk_buff *skb, int len) |
skb | 390 | include/linux/skbuff.h | unsigned char *tmp=skb->tail; |
skb | 391 | include/linux/skbuff.h | skb->tail+=len; |
skb | 392 | include/linux/skbuff.h | skb->len+=len; |
skb | 393 | include/linux/skbuff.h | if(skb->tail>skb->end) |
skb | 398 | include/linux/skbuff.h | extern __inline__ unsigned char *skb_push(struct sk_buff *skb, int len) |
skb | 400 | include/linux/skbuff.h | skb->data-=len; |
skb | 401 | include/linux/skbuff.h | skb->len+=len; |
skb | 402 | include/linux/skbuff.h | if(skb->data<skb->head) |
skb | 404 | include/linux/skbuff.h | return skb->data; |
skb | 407 | include/linux/skbuff.h | extern __inline__ unsigned char * skb_pull(struct sk_buff *skb, int len) |
skb | 409 | include/linux/skbuff.h | if(len > skb->len) |
skb | 411 | include/linux/skbuff.h | skb->data+=len; |
skb | 412 | include/linux/skbuff.h | skb->len-=len; |
skb | 413 | include/linux/skbuff.h | return skb->data; |
skb | 416 | include/linux/skbuff.h | extern __inline__ int skb_headroom(struct sk_buff *skb) |
skb | 418 | include/linux/skbuff.h | return skb->data-skb->head; |
skb | 421 | include/linux/skbuff.h | extern __inline__ int skb_tailroom(struct sk_buff *skb) |
skb | 423 | include/linux/skbuff.h | return skb->end-skb->tail; |
skb | 426 | include/linux/skbuff.h | extern __inline__ void skb_reserve(struct sk_buff *skb, int len) |
skb | 428 | include/linux/skbuff.h | skb->data+=len; |
skb | 429 | include/linux/skbuff.h | skb->tail+=len; |
skb | 432 | include/linux/skbuff.h | extern __inline__ void skb_trim(struct sk_buff *skb, int len) |
skb | 434 | include/linux/skbuff.h | if(skb->len>len) |
skb | 436 | include/linux/skbuff.h | skb->len=len; |
skb | 437 | include/linux/skbuff.h | skb->tail=skb->data+len; |
skb | 447 | include/linux/skbuff.h | extern void skb_free_datagram(struct sk_buff *skb); |
skb | 31 | include/linux/trdevice.h | extern int tr_header(struct sk_buff *skb, struct device *dev, |
skb | 35 | include/linux/trdevice.h | unsigned long raddr, struct sk_buff *skb); |
skb | 36 | include/linux/trdevice.h | extern unsigned short tr_type_trans(struct sk_buff *skb, struct device *dev); |
skb | 6 | include/net/arp.h | extern int arp_rcv(struct sk_buff *skb, struct device *dev, |
skb | 10 | include/net/arp.h | struct device *dev, u32 saddr, struct sk_buff *skb); |
skb | 58 | include/net/ip.h | struct sk_buff *skb; /* complete received fragment */ |
skb | 89 | include/net/ip.h | extern int ip_send(struct rtable *rt, struct sk_buff *skb, __u32 daddr, int len, struct device *dev, __u32 saddr); |
skb | 90 | include/net/ip.h | extern int ip_build_header(struct sk_buff *skb, |
skb | 96 | include/net/ip.h | extern int ip_rcv(struct sk_buff *skb, struct device *dev, |
skb | 100 | include/net/ip.h | struct sk_buff * skb); |
skb | 101 | include/net/ip.h | extern int ip_options_compile(struct options * opt, struct sk_buff * skb); |
skb | 105 | include/net/ip.h | struct device *dev, struct sk_buff *skb, |
skb | 129 | include/net/ip.h | struct sk_buff *ip_defrag(struct iphdr *iph, struct sk_buff *skb, struct device *dev); |
skb | 130 | include/net/ip.h | void ip_fragment(struct sock *sk, struct sk_buff *skb, struct device *dev, int is_frag); |
skb | 136 | include/net/ip.h | extern int ip_forward(struct sk_buff *skb, struct device *dev, int is_frag, __u32 target_addr); |
skb | 142 | include/net/ip.h | extern void ip_options_build(struct sk_buff *skb, struct options *opt, __u32 daddr, __u32 saddr, int is_frag); |
skb | 143 | include/net/ip.h | extern int ip_options_echo(struct options *dopt, struct options *sopt, __u32 daddr, __u32 saddr, struct sk_buff *skb); |
skb | 144 | include/net/ip.h | extern void ip_options_fragment(struct sk_buff *skb); |
skb | 145 | include/net/ip.h | extern int ip_options_compile(struct options *opt, struct sk_buff *skb); |
skb | 1 | include/net/ipip.h | extern int ipip_rcv(struct sk_buff *skb, struct device *dev, struct options *opt, |
skb | 49 | include/net/ipx.h | extern int ipx_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *pt); |
skb | 10 | include/net/netlink.h | extern int netlink_attach(int unit, int (*function)(struct sk_buff *skb)); |
skb | 11 | include/net/netlink.h | extern int netlink_donothing(struct sk_buff *skb); |
skb | 13 | include/net/netlink.h | extern int netlink_post(int unit, struct sk_buff *skb); |
skb | 31 | include/net/protocol.h | int (*handler)(struct sk_buff *skb, struct device *dev, |
skb | 282 | include/net/sock.h | int (*build_header)(struct sk_buff *skb, |
skb | 292 | include/net/sock.h | struct device *dev, struct sk_buff *skb, |
skb | 368 | include/net/sock.h | struct sk_buff *skb); |
skb | 370 | include/net/sock.h | struct sk_buff *skb); |
skb | 381 | include/net/sock.h | extern struct sk_buff *sock_alloc_send_skb(struct sock *skb, |
skb | 396 | include/net/sock.h | extern __inline__ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) |
skb | 399 | include/net/sock.h | if(sk->rmem_alloc + skb->truesize >= sk->rcvbuf) |
skb | 403 | include/net/sock.h | sk->rmem_alloc+=skb->truesize; |
skb | 404 | include/net/sock.h | skb->sk=sk; |
skb | 406 | include/net/sock.h | skb_queue_tail(&sk->receive_queue,skb); |
skb | 408 | include/net/sock.h | sk->data_ready(sk,skb->len); |
skb | 122 | include/net/tcp.h | extern int tcp_rcv(struct sk_buff *skb, struct device *dev, |
skb | 135 | include/net/tcp.h | unsigned long daddr, int len, struct sk_buff *skb); |
skb | 45 | include/net/udp.h | extern int udp_rcv(struct sk_buff *skb, struct device *dev, |
skb | 6 | net/802/llc.c | int llc_rx_adm(struct sock *sk,struct sk_buff *skb, int type, int cmd, int pf, int nr, int ns) |
skb | 36 | net/802/llc.c | int llc_rx_setup(struct sock *sk, struct sk_buff *skb, int type, int cmd, int pf, int nr, int ns) |
skb | 70 | net/802/llc.c | int llc_rx_reset(struct sock *sk, struct sk_buff *skb, int type, int cmd, int pf, int nr, int ns) |
skb | 114 | net/802/llc.c | int llc_rx_d_conn(struct sock *sk, struct sk_buff *skb, int type, int cmd, int pf, int nr, int ns) |
skb | 150 | net/802/llc.c | int llc_rx_error(struct sock *sk, struct sk_buff *skb, int type, int cmd, int pf, int nr, int ns) |
skb | 199 | net/802/llc.c | int llc_rx_nr_shared(struct sock *sk, struct sk_buff *skb, int type, int cmd, int pf, int nr, int ns) |
skb | 288 | net/802/llc.c | int llc_rx_normal(struct sock *sk, struct sk_buff *skb, int type, int cmd, int pf, int nr, int ns) |
skb | 290 | net/802/llc.c | if(llc_rx_nr_shared(sk, skb, type, cmd, pf, nr, ns)) |
skb | 364 | net/802/llc.c | llc_queue_data(sk,skb); |
skb | 30 | net/802/p8022.c | p8022_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *pt) |
skb | 34 | net/802/p8022.c | proto = find_8022_client(*(skb->h.raw)); |
skb | 36 | net/802/p8022.c | skb->h.raw += 3; |
skb | 37 | net/802/p8022.c | skb_pull(skb,3); |
skb | 38 | net/802/p8022.c | return proto->rcvfunc(skb, dev, pt); |
skb | 41 | net/802/p8022.c | skb->sk = NULL; |
skb | 42 | net/802/p8022.c | kfree_skb(skb, FREE_READ); |
skb | 48 | net/802/p8022.c | struct sk_buff *skb, unsigned char *dest_node) |
skb | 50 | net/802/p8022.c | struct device *dev = skb->dev; |
skb | 53 | net/802/p8022.c | rawp = skb_push(skb,3); |
skb | 57 | net/802/p8022.c | dev->hard_header(skb, dev, ETH_P_802_3, dest_node, NULL, skb->len); |
skb | 9 | net/802/p8023.c | struct sk_buff *skb, unsigned char *dest_node) |
skb | 11 | net/802/p8023.c | struct device *dev = skb->dev; |
skb | 13 | net/802/p8023.c | dev->hard_header(skb, dev, ETH_P_802_3, dest_node, NULL, skb->len); |
skb | 40 | net/802/psnap.c | int snap_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *pt) |
skb | 53 | net/802/psnap.c | proto = find_snap_client(skb->h.raw); |
skb | 60 | net/802/psnap.c | skb->h.raw += 5; |
skb | 61 | net/802/psnap.c | skb_pull(skb,5); |
skb | 64 | net/802/psnap.c | return proto->rcvfunc(skb, dev, &psnap_packet_type); |
skb | 66 | net/802/psnap.c | skb->sk = NULL; |
skb | 67 | net/802/psnap.c | kfree_skb(skb, FREE_READ); |
skb | 75 | net/802/psnap.c | static void snap_datalink_header(struct datalink_proto *dl, struct sk_buff *skb, unsigned char *dest_node) |
skb | 77 | net/802/psnap.c | memcpy(skb_push(skb,5),dl->type,5); |
skb | 78 | net/802/psnap.c | snap_dl->datalink_header(snap_dl, skb, dest_node); |
skb | 41 | net/802/tr.c | int tr_header(struct sk_buff *skb, struct device *dev, unsigned short type, |
skb | 45 | net/802/tr.c | struct trh_hdr *trh=(struct trh_hdr *)skb_push(skb,dev->hard_header_len); |
skb | 72 | net/802/tr.c | struct sk_buff *skb) { |
skb | 82 | net/802/tr.c | if(arp_find(trh->daddr, dest, dev, dev->pa_addr, skb)) { |
skb | 91 | net/802/tr.c | unsigned short tr_type_trans(struct sk_buff *skb, struct device *dev) { |
skb | 93 | net/802/tr.c | struct trh_hdr *trh=(struct trh_hdr *)skb->data; |
skb | 94 | net/802/tr.c | struct trllc *trllc=(struct trllc *)(skb->data+sizeof(struct trh_hdr)); |
skb | 96 | net/802/tr.c | skb->mac.raw = skb->data; |
skb | 98 | net/802/tr.c | skb_pull(skb,dev->hard_header_len); |
skb | 106 | net/802/tr.c | skb->pkt_type=PACKET_BROADCAST; |
skb | 108 | net/802/tr.c | skb->pkt_type=PACKET_MULTICAST; |
skb | 114 | net/802/tr.c | skb->pkt_type=PACKET_OTHERHOST; |
skb | 90 | net/appletalk/aarp.c | struct sk_buff *skb; |
skb | 92 | net/appletalk/aarp.c | while((skb=skb_dequeue(&a->packet_queue))!=NULL) |
skb | 93 | net/appletalk/aarp.c | kfree_skb(skb, FREE_WRITE); |
skb | 106 | net/appletalk/aarp.c | struct sk_buff *skb=alloc_skb(len, GFP_ATOMIC); |
skb | 110 | net/appletalk/aarp.c | if(skb==NULL || sat==NULL) |
skb | 117 | net/appletalk/aarp.c | skb_reserve(skb,dev->hard_header_len+aarp_dl->header_length); |
skb | 118 | net/appletalk/aarp.c | eah = (struct elapaarp *)skb_put(skb,sizeof(struct elapaarp)); |
skb | 119 | net/appletalk/aarp.c | skb->arp = 1; |
skb | 120 | net/appletalk/aarp.c | skb->free = 1; |
skb | 121 | net/appletalk/aarp.c | skb->dev = a->dev; |
skb | 149 | net/appletalk/aarp.c | aarp_dl->datalink_header(aarp_dl, skb, aarp_eth_multicast); |
skb | 155 | net/appletalk/aarp.c | dev_queue_xmit(skb, dev, SOPRI_NORMAL); |
skb | 167 | net/appletalk/aarp.c | struct sk_buff *skb=alloc_skb(len, GFP_ATOMIC); |
skb | 170 | net/appletalk/aarp.c | if(skb==NULL) |
skb | 177 | net/appletalk/aarp.c | skb_reserve(skb,dev->hard_header_len+aarp_dl->header_length); |
skb | 178 | net/appletalk/aarp.c | eah = (struct elapaarp *)skb_put(skb,sizeof(struct elapaarp)); |
skb | 179 | net/appletalk/aarp.c | skb->arp = 1; |
skb | 180 | net/appletalk/aarp.c | skb->free = 1; |
skb | 181 | net/appletalk/aarp.c | skb->dev = dev; |
skb | 212 | net/appletalk/aarp.c | aarp_dl->datalink_header(aarp_dl, skb, sha); |
skb | 218 | net/appletalk/aarp.c | dev_queue_xmit(skb, dev, SOPRI_NORMAL); |
skb | 229 | net/appletalk/aarp.c | struct sk_buff *skb=alloc_skb(len, GFP_ATOMIC); |
skb | 233 | net/appletalk/aarp.c | if(skb==NULL) |
skb | 240 | net/appletalk/aarp.c | skb_reserve(skb,dev->hard_header_len+aarp_dl->header_length); |
skb | 241 | net/appletalk/aarp.c | eah = (struct elapaarp *)skb_put(skb,sizeof(struct elapaarp)); |
skb | 243 | net/appletalk/aarp.c | skb->arp = 1; |
skb | 244 | net/appletalk/aarp.c | skb->free = 1; |
skb | 245 | net/appletalk/aarp.c | skb->dev = dev; |
skb | 273 | net/appletalk/aarp.c | aarp_dl->datalink_header(aarp_dl, skb, aarp_eth_multicast); |
skb | 279 | net/appletalk/aarp.c | dev_queue_xmit(skb, dev, SOPRI_NORMAL); |
skb | 427 | net/appletalk/aarp.c | int aarp_send_ddp(struct device *dev,struct sk_buff *skb, struct at_addr *sa, void *hwaddr) |
skb | 443 | net/appletalk/aarp.c | skb->dev = dev; |
skb | 444 | net/appletalk/aarp.c | skb->protocol = htons(ETH_P_ATALK); |
skb | 456 | net/appletalk/aarp.c | ddp_dl->datalink_header(ddp_dl, skb, ddp_eth_multicast); |
skb | 457 | net/appletalk/aarp.c | if(skb->sk==NULL) |
skb | 458 | net/appletalk/aarp.c | dev_queue_xmit(skb, skb->dev, SOPRI_NORMAL); |
skb | 460 | net/appletalk/aarp.c | dev_queue_xmit(skb, skb->dev, skb->sk->priority); |
skb | 472 | net/appletalk/aarp.c | ddp_dl->datalink_header(ddp_dl, skb, a->hwaddr); |
skb | 473 | net/appletalk/aarp.c | if(skb->sk==NULL) |
skb | 474 | net/appletalk/aarp.c | dev_queue_xmit(skb, skb->dev, SOPRI_NORMAL); |
skb | 476 | net/appletalk/aarp.c | dev_queue_xmit(skb, skb->dev, skb->sk->priority); |
skb | 492 | net/appletalk/aarp.c | skb_queue_tail(&a->packet_queue, skb); |
skb | 516 | net/appletalk/aarp.c | skb_queue_tail(&a->packet_queue, skb); |
skb | 558 | net/appletalk/aarp.c | struct sk_buff *skb; |
skb | 577 | net/appletalk/aarp.c | while((skb=skb_dequeue(&a->packet_queue))!=NULL) |
skb | 580 | net/appletalk/aarp.c | ddp_dl->datalink_header(ddp_dl,skb,a->hwaddr); |
skb | 581 | net/appletalk/aarp.c | if(skb->sk==NULL) |
skb | 582 | net/appletalk/aarp.c | dev_queue_xmit(skb, skb->dev, SOPRI_NORMAL); |
skb | 584 | net/appletalk/aarp.c | dev_queue_xmit(skb, skb->dev, skb->sk->priority); |
skb | 597 | net/appletalk/aarp.c | static int aarp_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *pt) |
skb | 599 | net/appletalk/aarp.c | struct elapaarp *ea=(struct elapaarp *)skb->h.raw; |
skb | 613 | net/appletalk/aarp.c | kfree_skb(skb, FREE_READ); |
skb | 621 | net/appletalk/aarp.c | if(!skb_pull(skb,sizeof(*ea))) |
skb | 623 | net/appletalk/aarp.c | kfree_skb(skb, FREE_READ); |
skb | 636 | net/appletalk/aarp.c | kfree_skb(skb, FREE_READ); |
skb | 667 | net/appletalk/aarp.c | kfree_skb(skb, FREE_READ); |
skb | 680 | net/appletalk/aarp.c | kfree_skb(skb, FREE_READ); |
skb | 739 | net/appletalk/aarp.c | kfree_skb(skb, FREE_READ); |
skb | 206 | net/appletalk/ddp.c | struct sk_buff *skb; |
skb | 209 | net/appletalk/ddp.c | while((skb=skb_dequeue(&sk->receive_queue))!=NULL) |
skb | 211 | net/appletalk/ddp.c | kfree_skb(skb,FREE_READ); |
skb | 1363 | net/appletalk/ddp.c | int atalk_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *pt) |
skb | 1366 | net/appletalk/ddp.c | struct ddpehdr *ddp=(void *)skb->h.raw; |
skb | 1372 | net/appletalk/ddp.c | if(skb->len<sizeof(*ddp)) |
skb | 1374 | net/appletalk/ddp.c | kfree_skb(skb,FREE_READ); |
skb | 1391 | net/appletalk/ddp.c | origlen = skb->len; |
skb | 1393 | net/appletalk/ddp.c | skb_trim(skb,min(skb->len,ddp->deh_len)); |
skb | 1401 | net/appletalk/ddp.c | if(skb->len<sizeof(*ddp)) |
skb | 1403 | net/appletalk/ddp.c | kfree_skb(skb,FREE_READ); |
skb | 1415 | net/appletalk/ddp.c | kfree_skb(skb,FREE_READ); |
skb | 1421 | net/appletalk/ddp.c | if(call_in_firewall(AF_APPLETALK, skb, ddp)!=FW_ACCEPT) |
skb | 1423 | net/appletalk/ddp.c | kfree_skb(skb, FREE_READ); |
skb | 1444 | net/appletalk/ddp.c | if (skb->pkt_type != PACKET_HOST || ddp->deh_dnet == 0) |
skb | 1446 | net/appletalk/ddp.c | kfree_skb(skb, FREE_READ); |
skb | 1455 | net/appletalk/ddp.c | if(call_fw_firewall(AF_APPLETALK, skb, ddp)!=FW_ACCEPT) |
skb | 1457 | net/appletalk/ddp.c | kfree_skb(skb, FREE_READ); |
skb | 1468 | net/appletalk/ddp.c | kfree_skb(skb, FREE_READ); |
skb | 1474 | net/appletalk/ddp.c | skb_trim(skb,min(origlen, rt->dev->hard_header_len + |
skb | 1481 | net/appletalk/ddp.c | if(aarp_send_ddp(rt->dev, skb, &ta, NULL)==-1) |
skb | 1482 | net/appletalk/ddp.c | kfree_skb(skb, FREE_READ); |
skb | 1496 | net/appletalk/ddp.c | kfree_skb(skb,FREE_READ); |
skb | 1505 | net/appletalk/ddp.c | skb->sk = sock; |
skb | 1507 | net/appletalk/ddp.c | if(sock_queue_rcv_skb(sock,skb)<0) |
skb | 1509 | net/appletalk/ddp.c | skb->sk=NULL; |
skb | 1510 | net/appletalk/ddp.c | kfree_skb(skb, FREE_WRITE); |
skb | 1520 | net/appletalk/ddp.c | struct sk_buff *skb; |
skb | 1593 | net/appletalk/ddp.c | skb = sock_alloc_send_skb(sk, size, 0, 0 , &err); |
skb | 1594 | net/appletalk/ddp.c | if(skb==NULL) |
skb | 1597 | net/appletalk/ddp.c | skb->sk=sk; |
skb | 1598 | net/appletalk/ddp.c | skb->free=1; |
skb | 1599 | net/appletalk/ddp.c | skb->arp=1; |
skb | 1600 | net/appletalk/ddp.c | skb_reserve(skb,ddp_dl->header_length); |
skb | 1601 | net/appletalk/ddp.c | skb_reserve(skb,dev->hard_header_len); |
skb | 1603 | net/appletalk/ddp.c | skb->dev=dev; |
skb | 1608 | net/appletalk/ddp.c | ddp=(struct ddpehdr *)skb_put(skb,sizeof(struct ddpehdr)); |
skb | 1629 | net/appletalk/ddp.c | memcpy_fromiovec(skb_put(skb,len),msg->msg_iov,len); |
skb | 1638 | net/appletalk/ddp.c | if(call_out_firewall(AF_APPLETALK, skb, ddp)!=FW_ACCEPT) |
skb | 1640 | net/appletalk/ddp.c | kfree_skb(skb, FREE_WRITE); |
skb | 1655 | net/appletalk/ddp.c | struct sk_buff *skb2=skb_clone(skb, GFP_KERNEL); |
skb | 1673 | net/appletalk/ddp.c | sk->wmem_alloc-=skb->truesize; |
skb | 1674 | net/appletalk/ddp.c | ddp_dl->datalink_header(ddp_dl, skb, dev->dev_addr); |
skb | 1675 | net/appletalk/ddp.c | skb->sk = NULL; |
skb | 1676 | net/appletalk/ddp.c | skb->mac.raw=skb->data; |
skb | 1677 | net/appletalk/ddp.c | skb->h.raw = skb->data + ddp_dl->header_length + dev->hard_header_len; |
skb | 1678 | net/appletalk/ddp.c | skb_pull(skb,dev->hard_header_len); |
skb | 1679 | net/appletalk/ddp.c | skb_pull(skb,ddp_dl->header_length); |
skb | 1680 | net/appletalk/ddp.c | atalk_rcv(skb,dev,NULL); |
skb | 1692 | net/appletalk/ddp.c | if(aarp_send_ddp(dev,skb,&usat->sat_addr, NULL)==-1) |
skb | 1693 | net/appletalk/ddp.c | kfree_skb(skb, FREE_WRITE); |
skb | 1708 | net/appletalk/ddp.c | struct sk_buff *skb; |
skb | 1717 | net/appletalk/ddp.c | skb=skb_recv_datagram(sk,flags,noblock,&er); |
skb | 1718 | net/appletalk/ddp.c | if(skb==NULL) |
skb | 1721 | net/appletalk/ddp.c | ddp = (struct ddpehdr *)(skb->h.raw); |
skb | 1727 | net/appletalk/ddp.c | skb_copy_datagram_iovec(skb,0,msg->msg_iov,copied); |
skb | 1734 | net/appletalk/ddp.c | skb_copy_datagram_iovec(skb,sizeof(*ddp),msg->msg_iov,copied); |
skb | 1743 | net/appletalk/ddp.c | skb_free_datagram(skb); |
skb | 1782 | net/appletalk/ddp.c | struct sk_buff *skb; |
skb | 1784 | net/appletalk/ddp.c | if((skb=skb_peek(&sk->receive_queue))!=NULL) |
skb | 1785 | net/appletalk/ddp.c | amount=skb->len-sizeof(struct ddpehdr); |
skb | 364 | net/ax25/af_ax25.c | static void ax25_send_to_raw(struct sock *sk, struct sk_buff *skb, int proto) |
skb | 370 | net/ax25/af_ax25.c | if ((copy = skb_clone(skb, GFP_ATOMIC)) == NULL) |
skb | 377 | net/ax25/af_ax25.c | sk->data_ready(sk, skb->len); |
skb | 405 | net/ax25/af_ax25.c | struct sk_buff *skb; |
skb | 417 | net/ax25/af_ax25.c | while ((skb = skb_dequeue(&ax25->sk->receive_queue)) != NULL) { |
skb | 418 | net/ax25/af_ax25.c | if (skb->sk != ax25->sk) { /* A pending connection */ |
skb | 419 | net/ax25/af_ax25.c | skb->sk->dead = 1; /* Queue the unaccepted socket for death */ |
skb | 420 | net/ax25/af_ax25.c | ax25_set_timer(skb->sk->ax25); |
skb | 421 | net/ax25/af_ax25.c | skb->sk->ax25->state = AX25_STATE_0; |
skb | 424 | net/ax25/af_ax25.c | kfree_skb(skb, FREE_READ); |
skb | 629 | net/ax25/af_ax25.c | int ax25_send_frame(struct sk_buff *skb, ax25_address *src, ax25_address *dest, |
skb | 634 | net/ax25/af_ax25.c | if (skb == NULL) |
skb | 645 | net/ax25/af_ax25.c | ax25_output(ax25, skb); |
skb | 679 | net/ax25/af_ax25.c | ax25_output(ax25, skb); |
skb | 1352 | net/ax25/af_ax25.c | struct sk_buff *skb; |
skb | 1373 | net/ax25/af_ax25.c | if ((skb = skb_dequeue(&sk->receive_queue)) == NULL) { |
skb | 1384 | net/ax25/af_ax25.c | } while (skb == NULL); |
skb | 1386 | net/ax25/af_ax25.c | newsk = skb->sk; |
skb | 1391 | net/ax25/af_ax25.c | skb->sk = NULL; |
skb | 1392 | net/ax25/af_ax25.c | kfree_skb(skb, FREE_READ); |
skb | 1434 | net/ax25/af_ax25.c | static int ax25_rcv(struct sk_buff *skb, struct device *dev, ax25_address *dev_addr, struct packet_type *ptype) |
skb | 1450 | net/ax25/af_ax25.c | skb->h.raw = skb->data; |
skb | 1454 | net/ax25/af_ax25.c | if(call_in_firewall(PF_AX25, skb, skb->h.raw)!=FW_ACCEPT) |
skb | 1456 | net/ax25/af_ax25.c | kfree_skb(skb, FREE_READ); |
skb | 1465 | net/ax25/af_ax25.c | if (ax25_parse_addr(skb->data, skb->len, &src, &dest, &dp, &type, &dama) == NULL) { |
skb | 1466 | net/ax25/af_ax25.c | kfree_skb(skb, FREE_READ); |
skb | 1501 | net/ax25/af_ax25.c | kfree_skb(skb, FREE_READ); |
skb | 1508 | net/ax25/af_ax25.c | kfree_skb(skb, FREE_READ); |
skb | 1512 | net/ax25/af_ax25.c | build_ax25_addr(skb->data, &src, &dest, &dp, type, MODULUS); |
skb | 1514 | net/ax25/af_ax25.c | if(call_fw_firewall(PF_AX25, skb,skb->data)!=FW_ACCEPT) |
skb | 1516 | net/ax25/af_ax25.c | kfree_skb(skb, FREE_READ); |
skb | 1520 | net/ax25/af_ax25.c | skb->arp = 1; |
skb | 1521 | net/ax25/af_ax25.c | ax25_queue_xmit(skb, dev_out, SOPRI_NORMAL); |
skb | 1523 | net/ax25/af_ax25.c | kfree_skb(skb, FREE_READ); |
skb | 1532 | net/ax25/af_ax25.c | skb_pull(skb, size_ax25_addr(&dp)); |
skb | 1544 | net/ax25/af_ax25.c | if ((*skb->data & ~0x10) == LAPB_UI) { /* UI frame - bypass LAPB processing */ |
skb | 1545 | net/ax25/af_ax25.c | skb->h.raw = skb->data + 2; /* skip control and pid */ |
skb | 1548 | net/ax25/af_ax25.c | ax25_send_to_raw(raw, skb, skb->data[1]); |
skb | 1551 | net/ax25/af_ax25.c | kfree_skb(skb, FREE_READ); |
skb | 1556 | net/ax25/af_ax25.c | switch (skb->data[1]) { |
skb | 1559 | net/ax25/af_ax25.c | skb_pull(skb,2); /* drop PID/CTRL */ |
skb | 1561 | net/ax25/af_ax25.c | ip_rcv(skb, dev, ptype); /* Note ptype here is the wrong one, fix me later */ |
skb | 1565 | net/ax25/af_ax25.c | skb_pull(skb,2); |
skb | 1566 | net/ax25/af_ax25.c | arp_rcv(skb, dev, ptype); /* Note ptype here is wrong... */ |
skb | 1573 | net/ax25/af_ax25.c | kfree_skb(skb, FREE_READ); |
skb | 1578 | net/ax25/af_ax25.c | skb_pull(skb, 2); |
skb | 1579 | net/ax25/af_ax25.c | skb_queue_tail(&sk->receive_queue, skb); |
skb | 1580 | net/ax25/af_ax25.c | skb->sk = sk; |
skb | 1581 | net/ax25/af_ax25.c | sk->rmem_alloc += skb->truesize; |
skb | 1583 | net/ax25/af_ax25.c | sk->data_ready(sk, skb->len); |
skb | 1586 | net/ax25/af_ax25.c | kfree_skb(skb, FREE_READ); |
skb | 1591 | net/ax25/af_ax25.c | kfree_skb(skb, FREE_READ); /* Will scan SOCK_AX25 RAW sockets */ |
skb | 1604 | net/ax25/af_ax25.c | kfree_skb(skb, FREE_READ); |
skb | 1618 | net/ax25/af_ax25.c | if (ax25_process_rx_frame(ax25, skb, type, dama) == 0) |
skb | 1619 | net/ax25/af_ax25.c | kfree_skb(skb, FREE_READ); |
skb | 1628 | net/ax25/af_ax25.c | if ((*skb->data & ~PF) != SABM && (*skb->data & ~PF) != SABME) { |
skb | 1633 | net/ax25/af_ax25.c | if ((*skb->data & ~PF) != DM && mine) |
skb | 1636 | net/ax25/af_ax25.c | kfree_skb(skb, FREE_READ); |
skb | 1647 | net/ax25/af_ax25.c | kfree_skb(skb, FREE_READ); |
skb | 1653 | net/ax25/af_ax25.c | skb_queue_head(&sk->receive_queue, skb); |
skb | 1655 | net/ax25/af_ax25.c | skb->sk = make; |
skb | 1663 | net/ax25/af_ax25.c | kfree_skb(skb, FREE_READ); |
skb | 1669 | net/ax25/af_ax25.c | kfree_skb(skb, FREE_READ); |
skb | 1678 | net/ax25/af_ax25.c | kfree_skb(skb, FREE_READ); |
skb | 1690 | net/ax25/af_ax25.c | kfree_skb(skb, FREE_READ); |
skb | 1705 | net/ax25/af_ax25.c | if ((*skb->data & ~PF) == SABME) { |
skb | 1729 | net/ax25/af_ax25.c | sk->data_ready(sk, skb->len ); |
skb | 1731 | net/ax25/af_ax25.c | kfree_skb(skb, FREE_READ); |
skb | 1740 | net/ax25/af_ax25.c | static int kiss_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *ptype) |
skb | 1742 | net/ax25/af_ax25.c | skb->sk = NULL; /* Initially we don't know who its for */ |
skb | 1744 | net/ax25/af_ax25.c | if ((*skb->data & 0x0F) != 0) { |
skb | 1745 | net/ax25/af_ax25.c | kfree_skb(skb, FREE_READ); /* Not a KISS data frame */ |
skb | 1749 | net/ax25/af_ax25.c | skb_pull(skb, AX25_KISS_HEADER_LEN); /* Remove the KISS byte */ |
skb | 1751 | net/ax25/af_ax25.c | return ax25_rcv(skb, dev, (ax25_address *)dev->dev_addr, ptype); |
skb | 1758 | net/ax25/af_ax25.c | static int bpq_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *ptype) |
skb | 1763 | net/ax25/af_ax25.c | skb->sk = NULL; /* Initially we don't know who its for */ |
skb | 1766 | net/ax25/af_ax25.c | kfree_skb(skb, FREE_READ); /* We have no port callsign */ |
skb | 1770 | net/ax25/af_ax25.c | len = skb->data[0] + skb->data[1] * 256 - 5; |
skb | 1772 | net/ax25/af_ax25.c | skb_pull(skb, 2); /* Remove the length bytes */ |
skb | 1773 | net/ax25/af_ax25.c | skb_trim(skb, len); /* Set the length of the data */ |
skb | 1775 | net/ax25/af_ax25.c | return ax25_rcv(skb, dev, port_call, ptype); |
skb | 1785 | net/ax25/af_ax25.c | struct sk_buff *skb; |
skb | 1857 | net/ax25/af_ax25.c | if ((skb = sock_alloc_send_skb(sk, size, 0, 0, &err)) == NULL) |
skb | 1860 | net/ax25/af_ax25.c | skb->sk = sk; |
skb | 1861 | net/ax25/af_ax25.c | skb->free = 1; |
skb | 1862 | net/ax25/af_ax25.c | skb->arp = 1; |
skb | 1864 | net/ax25/af_ax25.c | skb_reserve(skb, size - len); |
skb | 1870 | net/ax25/af_ax25.c | memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len); |
skb | 1873 | net/ax25/af_ax25.c | asmptr = skb_push(skb, 1); |
skb | 1882 | net/ax25/af_ax25.c | kfree_skb(skb, FREE_WRITE); |
skb | 1886 | net/ax25/af_ax25.c | ax25_output(sk->ax25, skb); /* Shove it onto the queue and kick */ |
skb | 1890 | net/ax25/af_ax25.c | asmptr = skb_push(skb, 1 + size_ax25_addr(dp)); |
skb | 1904 | net/ax25/af_ax25.c | skb->h.raw = asmptr; |
skb | 1907 | net/ax25/af_ax25.c | printk("base=%p pos=%p\n", skb->data, asmptr); |
skb | 1912 | net/ax25/af_ax25.c | ax25_queue_xmit(skb, sk->ax25->device, SOPRI_NORMAL); |
skb | 1924 | net/ax25/af_ax25.c | struct sk_buff *skb; |
skb | 1943 | net/ax25/af_ax25.c | if ((skb = skb_recv_datagram(sk, flags, noblock, &er)) == NULL) |
skb | 1947 | net/ax25/af_ax25.c | length = skb->len + (skb->data - skb->h.raw); |
skb | 1950 | net/ax25/af_ax25.c | skb_pull(skb, 1); /* Remove PID */ |
skb | 1951 | net/ax25/af_ax25.c | length = skb->len; |
skb | 1952 | net/ax25/af_ax25.c | skb->h.raw = skb->data; |
skb | 1956 | net/ax25/af_ax25.c | skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); |
skb | 1967 | net/ax25/af_ax25.c | ax25_parse_addr(skb->data, skb->len, NULL, &dest, &digi, NULL, &dama); |
skb | 1991 | net/ax25/af_ax25.c | skb_free_datagram(skb); |
skb | 2027 | net/ax25/af_ax25.c | struct sk_buff *skb; |
skb | 2029 | net/ax25/af_ax25.c | if ((skb = skb_peek(&sk->receive_queue)) != NULL) |
skb | 2030 | net/ax25/af_ax25.c | amount = skb->len; |
skb | 2271 | net/ax25/af_ax25.c | void ax25_queue_xmit(struct sk_buff *skb, struct device *dev, int pri) |
skb | 2277 | net/ax25/af_ax25.c | if(call_out_firewall(PF_AX25, skb, skb->data)!=FW_ACCEPT) |
skb | 2279 | net/ax25/af_ax25.c | kfree_skb(skb, FREE_WRITE); |
skb | 2284 | net/ax25/af_ax25.c | skb->protocol = htons (ETH_P_AX25); |
skb | 2291 | net/ax25/af_ax25.c | if(skb_headroom(skb) < AX25_BPQ_HEADER_LEN) |
skb | 2294 | net/ax25/af_ax25.c | skb->free = 1; |
skb | 2295 | net/ax25/af_ax25.c | kfree_skb(skb, FREE_WRITE); |
skb | 2299 | net/ax25/af_ax25.c | size = skb->len; |
skb | 2301 | net/ax25/af_ax25.c | ptr = skb_push(skb, 2); |
skb | 2306 | net/ax25/af_ax25.c | dev->hard_header(skb, dev, ETH_P_BPQ, bcast_addr, NULL, 0); |
skb | 2313 | net/ax25/af_ax25.c | was_locked = skb_device_locked(skb); |
skb | 2314 | net/ax25/af_ax25.c | dev_queue_xmit(skb, dev, pri); |
skb | 2315 | net/ax25/af_ax25.c | if (was_locked) skb_device_unlock(skb); |
skb | 2321 | net/ax25/af_ax25.c | ptr = skb_push(skb, 1); |
skb | 2330 | net/ax25/af_ax25.c | was_locked = skb_device_locked(skb); |
skb | 2331 | net/ax25/af_ax25.c | dev_queue_xmit(skb, dev, pri); |
skb | 2332 | net/ax25/af_ax25.c | if (was_locked) skb_device_unlock(skb); |
skb | 2345 | net/ax25/af_ax25.c | int ax25_encapsulate(struct sk_buff *skb, struct device *dev, unsigned short type, void *daddr, |
skb | 2349 | net/ax25/af_ax25.c | unsigned char *buff = skb_push(skb, AX25_HEADER_LEN); |
skb | 2394 | net/ax25/af_ax25.c | int ax25_rebuild_header(unsigned char *bp, struct device *dev, unsigned long dest, struct sk_buff *skb) |
skb | 2398 | net/ax25/af_ax25.c | if (arp_find(bp + 1, dest, dev, dev->pa_addr, skb)) |
skb | 2407 | net/ax25/af_ax25.c | skb_pull(skb, AX25_HEADER_LEN - 1); /* Keep PID */ |
skb | 2413 | net/ax25/af_ax25.c | if ( !(ax25cmp((ax25_address *) (bp + 8), (ax25_address *) (skb->data + 8)) || |
skb | 2414 | net/ax25/af_ax25.c | ax25cmp((ax25_address *) (bp + 1), (ax25_address *) (skb->data + 1)) ) ) |
skb | 2417 | net/ax25/af_ax25.c | skb_pull(skb, AX25_HEADER_LEN); |
skb | 2419 | net/ax25/af_ax25.c | if (!*skb->data) |
skb | 2422 | net/ax25/af_ax25.c | ax25_send_frame(skb, (ax25_address *)(bp + 8), (ax25_address *)(bp + 1), NULL, dev); |
skb | 66 | net/ax25/ax25_in.c | static int ax25_rx_fragment(ax25_cb *ax25, struct sk_buff *skb) |
skb | 72 | net/ax25/ax25_in.c | if (!(*skb->data & SEG_FIRST)) { |
skb | 73 | net/ax25/ax25_in.c | if ((ax25->fragno - 1) == (*skb->data & SEG_REM)) { |
skb | 74 | net/ax25/ax25_in.c | ax25->fragno = *skb->data & SEG_REM; |
skb | 75 | net/ax25/ax25_in.c | skb_pull(skb, 1); |
skb | 76 | net/ax25/ax25_in.c | ax25->fraglen += skb->len; |
skb | 77 | net/ax25/ax25_in.c | skb_queue_tail(&ax25->frag_queue, skb); |
skb | 116 | net/ax25/ax25_in.c | if (*skb->data & SEG_FIRST) { |
skb | 117 | net/ax25/ax25_in.c | ax25->fragno = *skb->data & SEG_REM; |
skb | 118 | net/ax25/ax25_in.c | skb_pull(skb, 1); |
skb | 119 | net/ax25/ax25_in.c | ax25->fraglen = skb->len; |
skb | 120 | net/ax25/ax25_in.c | skb_queue_tail(&ax25->frag_queue, skb); |
skb | 132 | net/ax25/ax25_in.c | static int ax25_rx_iframe(ax25_cb *ax25, struct sk_buff *skb) |
skb | 137 | net/ax25/ax25_in.c | if (skb == NULL) return 0; |
skb | 139 | net/ax25/ax25_in.c | pid = *skb->data; |
skb | 145 | net/ax25/ax25_in.c | skb_pull(skb, 1); /* Remove PID */ |
skb | 146 | net/ax25/ax25_in.c | queued = nr_route_frame(skb, ax25); |
skb | 152 | net/ax25/ax25_in.c | skb_pull(skb, 1); /* Remove PID */ |
skb | 153 | net/ax25/ax25_in.c | skb->h.raw = skb->data; |
skb | 155 | net/ax25/ax25_in.c | ip_rcv(skb, ax25->device, NULL); /* Wrong ptype */ |
skb | 161 | net/ax25/ax25_in.c | skb_pull(skb, 1); /* Remove PID */ |
skb | 162 | net/ax25/ax25_in.c | queued = ax25_rx_fragment(ax25, skb); |
skb | 167 | net/ax25/ax25_in.c | if (sock_queue_rcv_skb(ax25->sk, skb) == 0) { |
skb | 184 | net/ax25/ax25_in.c | static int ax25_state1_machine(ax25_cb *ax25, struct sk_buff *skb, int frametype, int pf, int type, int dama) |
skb | 259 | net/ax25/ax25_in.c | static int ax25_state2_machine(ax25_cb *ax25, struct sk_buff *skb, int frametype, int pf, int type) |
skb | 345 | net/ax25/ax25_in.c | static int ax25_state3_machine(ax25_cb *ax25, struct sk_buff *skb, int frametype, int ns, int nr, int pf, int type, int dama) |
skb | 478 | net/ax25/ax25_in.c | queued = ax25_rx_iframe(ax25, skb); |
skb | 540 | net/ax25/ax25_in.c | static int ax25_state4_machine(ax25_cb *ax25, struct sk_buff *skb, int frametype, int ns, int nr, int pf, int type, int dama) |
skb | 729 | net/ax25/ax25_in.c | queued = ax25_rx_iframe(ax25, skb); |
skb | 789 | net/ax25/ax25_in.c | int ax25_process_rx_frame(ax25_cb *ax25, struct sk_buff *skb, int type, int dama) |
skb | 804 | net/ax25/ax25_in.c | frametype = ax25_decode(ax25, skb, &ns, &nr, &pf); |
skb | 808 | net/ax25/ax25_in.c | queued = ax25_state1_machine(ax25, skb, frametype, pf, type, dama); |
skb | 811 | net/ax25/ax25_in.c | queued = ax25_state2_machine(ax25, skb, frametype, pf, type); |
skb | 814 | net/ax25/ax25_in.c | queued = ax25_state3_machine(ax25, skb, frametype, ns, nr, pf, type, dama); |
skb | 817 | net/ax25/ax25_in.c | queued = ax25_state4_machine(ax25, skb, frametype, ns, nr, pf, type, dama); |
skb | 57 | net/ax25/ax25_out.c | void ax25_output(ax25_cb *ax25, struct sk_buff *skb) |
skb | 65 | net/ax25/ax25_out.c | if ((skb->len - 1) > mtu) { |
skb | 68 | net/ax25/ax25_out.c | fragno = skb->len / mtu; |
skb | 69 | net/ax25/ax25_out.c | if (skb->len % mtu == 0) fragno--; |
skb | 71 | net/ax25/ax25_out.c | frontlen = skb_headroom(skb); /* Address space + CTRL */ |
skb | 73 | net/ax25/ax25_out.c | while (skb->len > 0) { |
skb | 74 | net/ax25/ax25_out.c | if (skb->sk != NULL) { |
skb | 75 | net/ax25/ax25_out.c | if ((skbn = sock_alloc_send_skb(skb->sk, mtu + 2 + frontlen, 0, 0, &err)) == NULL) |
skb | 82 | net/ax25/ax25_out.c | skbn->sk = skb->sk; |
skb | 88 | net/ax25/ax25_out.c | len = (mtu > skb->len) ? skb->len : mtu; |
skb | 90 | net/ax25/ax25_out.c | memcpy(skb_put(skbn, len), skb->data, len); |
skb | 91 | net/ax25/ax25_out.c | skb_pull(skb, len); |
skb | 106 | net/ax25/ax25_out.c | skb->free = 1; |
skb | 107 | net/ax25/ax25_out.c | kfree_skb(skb, FREE_WRITE); |
skb | 109 | net/ax25/ax25_out.c | skb_queue_tail(&ax25->write_queue, skb); /* Throw it on the queue */ |
skb | 123 | net/ax25/ax25_out.c | static void ax25_send_iframe(ax25_cb *ax25, struct sk_buff *skb, int poll_bit) |
skb | 127 | net/ax25/ax25_out.c | if (skb == NULL) |
skb | 131 | net/ax25/ax25_out.c | frame = skb_push(skb, 1); |
skb | 138 | net/ax25/ax25_out.c | frame = skb_push(skb, 2); |
skb | 146 | net/ax25/ax25_out.c | ax25_transmit_buffer(ax25, skb, C_COMMAND); |
skb | 151 | net/ax25/ax25_out.c | struct sk_buff *skb, *skbn; |
skb | 175 | net/ax25/ax25_out.c | skb = skb_dequeue(&ax25->write_queue); |
skb | 178 | net/ax25/ax25_out.c | if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) { |
skb | 179 | net/ax25/ax25_out.c | skb_queue_head(&ax25->write_queue, skb); |
skb | 201 | net/ax25/ax25_out.c | skb_queue_tail(&ax25->ack_queue, skb); |
skb | 205 | net/ax25/ax25_out.c | } while (!last && (skb = skb_dequeue(&ax25->write_queue)) != NULL); |
skb | 218 | net/ax25/ax25_out.c | void ax25_transmit_buffer(ax25_cb *ax25, struct sk_buff *skb, int type) |
skb | 233 | net/ax25/ax25_out.c | if (skb_headroom(skb) < size_ax25_addr(ax25->digipeat)) { |
skb | 235 | net/ax25/ax25_out.c | skb->free = 1; |
skb | 236 | net/ax25/ax25_out.c | kfree_skb(skb, FREE_WRITE); |
skb | 240 | net/ax25/ax25_out.c | ptr = skb_push(skb, size_ax25_addr(ax25->digipeat)); |
skb | 243 | net/ax25/ax25_out.c | skb->arp = 1; |
skb | 245 | net/ax25/ax25_out.c | ax25_queue_xmit(skb, ax25->device, SOPRI_NORMAL); |
skb | 60 | net/ax25/ax25_subr.c | struct sk_buff *skb; |
skb | 62 | net/ax25/ax25_subr.c | while ((skb = skb_dequeue(&ax25->write_queue)) != NULL) { |
skb | 63 | net/ax25/ax25_subr.c | skb->free = 1; |
skb | 64 | net/ax25/ax25_subr.c | kfree_skb(skb, FREE_WRITE); |
skb | 67 | net/ax25/ax25_subr.c | while ((skb = skb_dequeue(&ax25->ack_queue)) != NULL) { |
skb | 68 | net/ax25/ax25_subr.c | skb->free = 1; |
skb | 69 | net/ax25/ax25_subr.c | kfree_skb(skb, FREE_WRITE); |
skb | 72 | net/ax25/ax25_subr.c | while ((skb = skb_dequeue(&ax25->reseq_queue)) != NULL) { |
skb | 73 | net/ax25/ax25_subr.c | kfree_skb(skb, FREE_READ); |
skb | 76 | net/ax25/ax25_subr.c | while ((skb = skb_dequeue(&ax25->frag_queue)) != NULL) { |
skb | 77 | net/ax25/ax25_subr.c | kfree_skb(skb, FREE_READ); |
skb | 88 | net/ax25/ax25_subr.c | struct sk_buff *skb; |
skb | 95 | net/ax25/ax25_subr.c | skb = skb_dequeue(&ax25->ack_queue); |
skb | 96 | net/ax25/ax25_subr.c | skb->free = 1; |
skb | 97 | net/ax25/ax25_subr.c | kfree_skb(skb, FREE_WRITE); |
skb | 111 | net/ax25/ax25_subr.c | struct sk_buff *skb, *skb_prev = NULL; |
skb | 118 | net/ax25/ax25_subr.c | while ((skb = skb_dequeue(&ax25->ack_queue)) != NULL) { |
skb | 120 | net/ax25/ax25_subr.c | skb_queue_head(&ax25->write_queue, skb); |
skb | 122 | net/ax25/ax25_subr.c | skb_append(skb_prev, skb); |
skb | 123 | net/ax25/ax25_subr.c | skb_prev = skb; |
skb | 149 | net/ax25/ax25_subr.c | int ax25_decode(ax25_cb *ax25, struct sk_buff *skb, int *ns, int *nr, int *pf) |
skb | 154 | net/ax25/ax25_subr.c | frame = skb->data; |
skb | 171 | net/ax25/ax25_subr.c | skb_pull(skb, 1); |
skb | 178 | net/ax25/ax25_subr.c | skb_pull(skb, 2); |
skb | 183 | net/ax25/ax25_subr.c | skb_pull(skb, 2); |
skb | 187 | net/ax25/ax25_subr.c | skb_pull(skb, 1); |
skb | 201 | net/ax25/ax25_subr.c | struct sk_buff *skb; |
skb | 208 | net/ax25/ax25_subr.c | if ((skb = alloc_skb(AX25_BPQ_HEADER_LEN + size_ax25_addr(ax25->digipeat) + 2, GFP_ATOMIC)) == NULL) |
skb | 211 | net/ax25/ax25_subr.c | skb_reserve(skb, AX25_BPQ_HEADER_LEN + size_ax25_addr(ax25->digipeat)); |
skb | 214 | net/ax25/ax25_subr.c | skb->sk = ax25->sk; |
skb | 215 | net/ax25/ax25_subr.c | ax25->sk->wmem_alloc += skb->truesize; |
skb | 220 | net/ax25/ax25_subr.c | dptr = skb_put(skb, 1); |
skb | 227 | net/ax25/ax25_subr.c | dptr = skb_put(skb, 1); |
skb | 231 | net/ax25/ax25_subr.c | dptr = skb_put(skb, 2); |
skb | 238 | net/ax25/ax25_subr.c | skb->free = 1; |
skb | 240 | net/ax25/ax25_subr.c | ax25_transmit_buffer(ax25, skb, type); |
skb | 250 | net/ax25/ax25_subr.c | struct sk_buff *skb; |
skb | 257 | net/ax25/ax25_subr.c | if ((skb = alloc_skb(AX25_BPQ_HEADER_LEN + size_ax25_addr(digi) + 1, GFP_ATOMIC)) == NULL) |
skb | 260 | net/ax25/ax25_subr.c | skb_reserve(skb, AX25_BPQ_HEADER_LEN + size_ax25_addr(digi)); |
skb | 264 | net/ax25/ax25_subr.c | dptr = skb_put(skb, 1); |
skb | 265 | net/ax25/ax25_subr.c | skb->sk = NULL; |
skb | 273 | net/ax25/ax25_subr.c | dptr = skb_push(skb, size_ax25_addr(digi)); |
skb | 276 | net/ax25/ax25_subr.c | skb->arp = 1; |
skb | 277 | net/ax25/ax25_subr.c | skb->free = 1; |
skb | 279 | net/ax25/ax25_subr.c | ax25_queue_xmit(skb, dev, SOPRI_NORMAL); |
skb | 473 | net/ax25/ax25_subr.c | struct sk_buff *skb; |
skb | 479 | net/ax25/ax25_subr.c | if ((skb = alloc_skb(2, GFP_ATOMIC)) == NULL) |
skb | 482 | net/ax25/ax25_subr.c | skb->free = 1; |
skb | 483 | net/ax25/ax25_subr.c | skb->arp = 1; |
skb | 487 | net/ax25/ax25_subr.c | skb->sk = ax25->sk; |
skb | 488 | net/ax25/ax25_subr.c | ax25->sk->wmem_alloc += skb->truesize; |
skb | 491 | net/ax25/ax25_subr.c | skb->protocol = htons(ETH_P_AX25); |
skb | 493 | net/ax25/ax25_subr.c | p = skb_put(skb, 2); |
skb | 498 | net/ax25/ax25_subr.c | dev_queue_xmit(skb, ax25->device, SOPRI_NORMAL); |
skb | 54 | net/core/datagram.c | struct sk_buff *skb; |
skb | 125 | net/core/datagram.c | skb=skb_dequeue(&sk->receive_queue); |
skb | 126 | net/core/datagram.c | if(skb!=NULL) |
skb | 127 | net/core/datagram.c | skb->users++; |
skb | 134 | net/core/datagram.c | skb=skb_peek(&sk->receive_queue); |
skb | 135 | net/core/datagram.c | if(skb!=NULL) |
skb | 136 | net/core/datagram.c | skb->users++; |
skb | 138 | net/core/datagram.c | if(skb==NULL) /* shouldn't happen but .. */ |
skb | 141 | net/core/datagram.c | return skb; |
skb | 144 | net/core/datagram.c | void skb_free_datagram(struct sk_buff *skb) |
skb | 150 | net/core/datagram.c | skb->users--; |
skb | 151 | net/core/datagram.c | if(skb->users>0) |
skb | 157 | net/core/datagram.c | if(!skb->next && !skb->prev) /* Been dequeued by someone - ie it's read */ |
skb | 158 | net/core/datagram.c | kfree_skb(skb,FREE_READ); |
skb | 166 | net/core/datagram.c | void skb_copy_datagram(struct sk_buff *skb, int offset, char *to, int size) |
skb | 168 | net/core/datagram.c | memcpy_tofs(to,skb->h.raw+offset,size); |
skb | 176 | net/core/datagram.c | void skb_copy_datagram_iovec(struct sk_buff *skb, int offset, struct iovec *to, int size) |
skb | 178 | net/core/datagram.c | memcpy_toiovec(to,skb->h.raw+offset,size); |
skb | 296 | net/core/dev.c | struct sk_buff *skb; |
skb | 297 | net/core/dev.c | while((skb=skb_dequeue(&dev->buffs[ct]))!=NULL) |
skb | 298 | net/core/dev.c | if(skb->free) |
skb | 299 | net/core/dev.c | kfree_skb(skb,FREE_WRITE); |
skb | 329 | net/core/dev.c | void dev_queue_xmit(struct sk_buff *skb, struct device *dev, int pri) |
skb | 337 | net/core/dev.c | if(pri>=0 && !skb_device_locked(skb)) |
skb | 338 | net/core/dev.c | skb_device_lock(skb); /* Shove a lock on the frame */ |
skb | 340 | net/core/dev.c | IS_SKB(skb); |
skb | 342 | net/core/dev.c | skb->dev = dev; |
skb | 369 | net/core/dev.c | if (!skb->arp && dev->rebuild_header(skb->data, dev, skb->raddr, skb)) { |
skb | 383 | net/core/dev.c | skb->dev = dev = net_alias_main_dev(dev); |
skb | 393 | net/core/dev.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 397 | net/core/dev.c | skb_device_unlock(skb); /* Buffer is on the device queue and can be freed safely */ |
skb | 398 | net/core/dev.c | __skb_queue_tail(list, skb); |
skb | 399 | net/core/dev.c | skb = __skb_dequeue(list); |
skb | 400 | net/core/dev.c | skb_device_lock(skb); /* New buffer needs locking down */ |
skb | 407 | net/core/dev.c | skb->stamp=xtime; |
skb | 414 | net/core/dev.c | ((struct sock *)ptype->data != skb->sk)) |
skb | 417 | net/core/dev.c | if ((skb2 = skb_clone(skb, GFP_ATOMIC)) == NULL) |
skb | 421 | net/core/dev.c | ptype->func(skb2, skb->dev, ptype); |
skb | 427 | net/core/dev.c | if (dev->hard_start_xmit(skb, dev) == 0) { |
skb | 441 | net/core/dev.c | skb_device_unlock(skb); |
skb | 442 | net/core/dev.c | __skb_queue_head(list,skb); |
skb | 452 | net/core/dev.c | void netif_rx(struct sk_buff *skb) |
skb | 462 | net/core/dev.c | skb->sk = NULL; |
skb | 463 | net/core/dev.c | skb->free = 1; |
skb | 464 | net/core/dev.c | if(skb->stamp.tv_sec==0) |
skb | 465 | net/core/dev.c | skb->stamp = xtime; |
skb | 478 | net/core/dev.c | kfree_skb(skb, FREE_READ); |
skb | 486 | net/core/dev.c | IS_SKB(skb); |
skb | 488 | net/core/dev.c | skb_queue_tail(&backlog,skb); |
skb | 553 | net/core/dev.c | struct sk_buff *skb; |
skb | 586 | net/core/dev.c | while((skb=__skb_dequeue(&backlog))!=NULL) |
skb | 602 | net/core/dev.c | skb->h.raw = skb->data; |
skb | 608 | net/core/dev.c | type = skb->protocol; |
skb | 620 | net/core/dev.c | struct sk_buff *skb2=skb_clone(skb, GFP_ATOMIC); |
skb | 622 | net/core/dev.c | pt_prev->func(skb2,skb->dev, pt_prev); |
skb | 629 | net/core/dev.c | if (ptype->type == type && (!ptype->dev || ptype->dev==skb->dev)) |
skb | 639 | net/core/dev.c | skb2=skb_clone(skb, GFP_ATOMIC); |
skb | 647 | net/core/dev.c | pt_prev->func(skb2, skb->dev, pt_prev); |
skb | 659 | net/core/dev.c | pt_prev->func(skb, skb->dev, pt_prev); |
skb | 665 | net/core/dev.c | kfree_skb(skb, FREE_WRITE); |
skb | 722 | net/core/dev.c | struct sk_buff *skb = skb_peek(head); |
skb | 724 | net/core/dev.c | if (skb) { |
skb | 725 | net/core/dev.c | __skb_unlink(skb, head); |
skb | 729 | net/core/dev.c | skb_device_lock(skb); |
skb | 735 | net/core/dev.c | dev_queue_xmit(skb,dev,-i - 1); |
skb | 96 | net/core/firewall.c | int call_fw_firewall(int pf, struct sk_buff *skb, void *phdr) |
skb | 102 | net/core/firewall.c | int rc=fw->fw_forward(fw,pf,skb,phdr); |
skb | 114 | net/core/firewall.c | int call_in_firewall(int pf, struct sk_buff *skb, void *phdr) |
skb | 120 | net/core/firewall.c | int rc=fw->fw_input(fw,pf,skb,phdr); |
skb | 128 | net/core/firewall.c | int call_out_firewall(int pf, struct sk_buff *skb, void *phdr) |
skb | 134 | net/core/firewall.c | int rc=fw->fw_output(fw,pf,skb,phdr); |
skb | 63 | net/core/net_alias.c | static int net_alias_hard_start_xmit(struct sk_buff *skb, struct device *dev); |
skb | 222 | net/core/net_alias.c | net_alias_hard_start_xmit(struct sk_buff *skb, struct device *dev) |
skb | 225 | net/core/net_alias.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 79 | net/core/skbuff.c | int skb_check(struct sk_buff *skb, int head, int line, char *file) |
skb | 82 | net/core/skbuff.c | if (skb->magic_debug_cookie != SK_HEAD_SKB) { |
skb | 87 | net/core/skbuff.c | if (!skb->next || !skb->prev) { |
skb | 91 | net/core/skbuff.c | if (skb->next->magic_debug_cookie != SK_HEAD_SKB |
skb | 92 | net/core/skbuff.c | && skb->next->magic_debug_cookie != SK_GOOD_SKB) { |
skb | 97 | net/core/skbuff.c | if (skb->prev->magic_debug_cookie != SK_HEAD_SKB |
skb | 98 | net/core/skbuff.c | && skb->prev->magic_debug_cookie != SK_GOOD_SKB) { |
skb | 105 | net/core/skbuff.c | struct sk_buff *skb2 = skb->next; |
skb | 107 | net/core/skbuff.c | while (skb2 != skb && i < 5) { |
skb | 119 | net/core/skbuff.c | if (skb->next != NULL && skb->next->magic_debug_cookie != SK_HEAD_SKB |
skb | 120 | net/core/skbuff.c | && skb->next->magic_debug_cookie != SK_GOOD_SKB) { |
skb | 125 | net/core/skbuff.c | if (skb->prev != NULL && skb->prev->magic_debug_cookie != SK_HEAD_SKB |
skb | 126 | net/core/skbuff.c | && skb->prev->magic_debug_cookie != SK_GOOD_SKB) { |
skb | 133 | net/core/skbuff.c | if(skb->magic_debug_cookie==SK_FREED_SKB) |
skb | 138 | net/core/skbuff.c | skb,skb->truesize,skb->free); |
skb | 141 | net/core/skbuff.c | if(skb->magic_debug_cookie!=SK_GOOD_SKB) |
skb | 145 | net/core/skbuff.c | skb,skb->truesize,skb->free); |
skb | 148 | net/core/skbuff.c | if(skb->head>skb->data) |
skb | 152 | net/core/skbuff.c | skb,skb->head,skb->data); |
skb | 155 | net/core/skbuff.c | if(skb->tail>skb->end) |
skb | 159 | net/core/skbuff.c | skb,skb->tail,skb->end); |
skb | 162 | net/core/skbuff.c | if(skb->data>skb->tail) |
skb | 166 | net/core/skbuff.c | skb,skb->data,skb->tail); |
skb | 169 | net/core/skbuff.c | if(skb->tail-skb->data!=skb->len) |
skb | 173 | net/core/skbuff.c | skb,skb->data,skb->end,skb->len); |
skb | 176 | net/core/skbuff.c | if((unsigned long) skb->end > (unsigned long) skb) |
skb | 180 | net/core/skbuff.c | skb,skb->end); |
skb | 352 | net/core/skbuff.c | void skb_unlink(struct sk_buff *skb) |
skb | 359 | net/core/skbuff.c | IS_SKB(skb); |
skb | 361 | net/core/skbuff.c | if(skb->list) |
skb | 363 | net/core/skbuff.c | skb->list->qlen--; |
skb | 364 | net/core/skbuff.c | skb->next->prev = skb->prev; |
skb | 365 | net/core/skbuff.c | skb->prev->next = skb->next; |
skb | 366 | net/core/skbuff.c | skb->next = NULL; |
skb | 367 | net/core/skbuff.c | skb->prev = NULL; |
skb | 368 | net/core/skbuff.c | skb->list = NULL; |
skb | 381 | net/core/skbuff.c | unsigned char *skb_put(struct sk_buff *skb, int len) |
skb | 383 | net/core/skbuff.c | unsigned char *tmp=skb->tail; |
skb | 384 | net/core/skbuff.c | IS_SKB(skb); |
skb | 385 | net/core/skbuff.c | skb->tail+=len; |
skb | 386 | net/core/skbuff.c | skb->len+=len; |
skb | 387 | net/core/skbuff.c | IS_SKB(skb); |
skb | 388 | net/core/skbuff.c | if(skb->tail>skb->end) |
skb | 393 | net/core/skbuff.c | unsigned char *skb_push(struct sk_buff *skb, int len) |
skb | 395 | net/core/skbuff.c | IS_SKB(skb); |
skb | 396 | net/core/skbuff.c | skb->data-=len; |
skb | 397 | net/core/skbuff.c | skb->len+=len; |
skb | 398 | net/core/skbuff.c | IS_SKB(skb); |
skb | 399 | net/core/skbuff.c | if(skb->data<skb->head) |
skb | 401 | net/core/skbuff.c | return skb->data; |
skb | 404 | net/core/skbuff.c | unsigned char * skb_pull(struct sk_buff *skb, int len) |
skb | 406 | net/core/skbuff.c | IS_SKB(skb); |
skb | 407 | net/core/skbuff.c | if(len>skb->len) |
skb | 409 | net/core/skbuff.c | skb->data+=len; |
skb | 410 | net/core/skbuff.c | skb->len-=len; |
skb | 411 | net/core/skbuff.c | return skb->data; |
skb | 414 | net/core/skbuff.c | int skb_headroom(struct sk_buff *skb) |
skb | 416 | net/core/skbuff.c | IS_SKB(skb); |
skb | 417 | net/core/skbuff.c | return skb->data-skb->head; |
skb | 420 | net/core/skbuff.c | int skb_tailroom(struct sk_buff *skb) |
skb | 422 | net/core/skbuff.c | IS_SKB(skb); |
skb | 423 | net/core/skbuff.c | return skb->end-skb->tail; |
skb | 426 | net/core/skbuff.c | void skb_reserve(struct sk_buff *skb, int len) |
skb | 428 | net/core/skbuff.c | IS_SKB(skb); |
skb | 429 | net/core/skbuff.c | skb->data+=len; |
skb | 430 | net/core/skbuff.c | skb->tail+=len; |
skb | 431 | net/core/skbuff.c | if(skb->tail>skb->end) |
skb | 433 | net/core/skbuff.c | if(skb->data<skb->head) |
skb | 435 | net/core/skbuff.c | IS_SKB(skb); |
skb | 438 | net/core/skbuff.c | void skb_trim(struct sk_buff *skb, int len) |
skb | 440 | net/core/skbuff.c | IS_SKB(skb); |
skb | 441 | net/core/skbuff.c | if(skb->len>len) |
skb | 443 | net/core/skbuff.c | skb->len=len; |
skb | 444 | net/core/skbuff.c | skb->tail=skb->data+len; |
skb | 457 | net/core/skbuff.c | void kfree_skb(struct sk_buff *skb, int rw) |
skb | 459 | net/core/skbuff.c | if (skb == NULL) |
skb | 466 | net/core/skbuff.c | IS_SKB(skb); |
skb | 468 | net/core/skbuff.c | if (skb->lock) |
skb | 470 | net/core/skbuff.c | skb->free = 3; /* Free when unlocked */ |
skb | 474 | net/core/skbuff.c | if (skb->free == 2) |
skb | 477 | net/core/skbuff.c | if (skb->list) |
skb | 481 | net/core/skbuff.c | if(skb->destructor) |
skb | 482 | net/core/skbuff.c | skb->destructor(skb); |
skb | 483 | net/core/skbuff.c | if (skb->sk) |
skb | 485 | net/core/skbuff.c | if(skb->sk->prot!=NULL) |
skb | 488 | net/core/skbuff.c | sock_rfree(skb->sk, skb); |
skb | 490 | net/core/skbuff.c | sock_wfree(skb->sk, skb); |
skb | 500 | net/core/skbuff.c | skb->sk->rmem_alloc-=skb->truesize; |
skb | 502 | net/core/skbuff.c | skb->sk->wmem_alloc-=skb->truesize; |
skb | 504 | net/core/skbuff.c | if(!skb->sk->dead) |
skb | 505 | net/core/skbuff.c | skb->sk->write_space(skb->sk); |
skb | 506 | net/core/skbuff.c | kfree_skbmem(skb); |
skb | 510 | net/core/skbuff.c | kfree_skbmem(skb); |
skb | 519 | net/core/skbuff.c | struct sk_buff *skb; |
skb | 547 | net/core/skbuff.c | if(skb->magic_debug_cookie == SK_GOOD_SKB) |
skb | 548 | net/core/skbuff.c | printk("Kernel kmalloc handed us an existing skb (%p)\n",skb); |
skb | 559 | net/core/skbuff.c | skb=(struct sk_buff *)(bptr+size)-1; |
skb | 561 | net/core/skbuff.c | skb->count = 1; /* only one reference to this */ |
skb | 562 | net/core/skbuff.c | skb->data_skb = NULL; /* and we're our own data skb */ |
skb | 564 | net/core/skbuff.c | skb->free = 2; /* Invalid so we pick up forgetful users */ |
skb | 565 | net/core/skbuff.c | skb->lock = 0; |
skb | 566 | net/core/skbuff.c | skb->pkt_type = PACKET_HOST; /* Default type */ |
skb | 567 | net/core/skbuff.c | skb->prev = skb->next = skb->link3 = NULL; |
skb | 568 | net/core/skbuff.c | skb->list = NULL; |
skb | 569 | net/core/skbuff.c | skb->sk = NULL; |
skb | 570 | net/core/skbuff.c | skb->truesize=size; |
skb | 571 | net/core/skbuff.c | skb->localroute=0; |
skb | 572 | net/core/skbuff.c | skb->stamp.tv_sec=0; /* No idea about time */ |
skb | 573 | net/core/skbuff.c | skb->localroute = 0; |
skb | 574 | net/core/skbuff.c | skb->ip_summed = 0; |
skb | 575 | net/core/skbuff.c | memset(skb->proto_priv, 0, sizeof(skb->proto_priv)); |
skb | 578 | net/core/skbuff.c | skb->magic_debug_cookie = SK_GOOD_SKB; |
skb | 580 | net/core/skbuff.c | skb->users = 0; |
skb | 582 | net/core/skbuff.c | skb->head=bptr; |
skb | 583 | net/core/skbuff.c | skb->data=bptr; |
skb | 584 | net/core/skbuff.c | skb->tail=bptr; |
skb | 585 | net/core/skbuff.c | skb->end=bptr+len; |
skb | 586 | net/core/skbuff.c | skb->len=0; |
skb | 587 | net/core/skbuff.c | skb->destructor=NULL; |
skb | 588 | net/core/skbuff.c | return skb; |
skb | 595 | net/core/skbuff.c | static inline void __kfree_skbmem(struct sk_buff *skb) |
skb | 598 | net/core/skbuff.c | if (--skb->count <= 0) { |
skb | 599 | net/core/skbuff.c | kfree(skb->head); |
skb | 604 | net/core/skbuff.c | void kfree_skbmem(struct sk_buff *skb) |
skb | 607 | net/core/skbuff.c | void * addr = skb->head; |
skb | 612 | net/core/skbuff.c | if (--skb->count <= 0) { |
skb | 614 | net/core/skbuff.c | if (skb->data_skb) { |
skb | 615 | net/core/skbuff.c | addr = skb; |
skb | 616 | net/core/skbuff.c | __kfree_skbmem(skb->data_skb); |
skb | 629 | net/core/skbuff.c | struct sk_buff *skb_clone(struct sk_buff *skb, int priority) |
skb | 634 | net/core/skbuff.c | IS_SKB(skb); |
skb | 638 | net/core/skbuff.c | memcpy(n, skb, sizeof(*n)); |
skb | 640 | net/core/skbuff.c | if (skb->data_skb) |
skb | 641 | net/core/skbuff.c | skb = skb->data_skb; |
skb | 644 | net/core/skbuff.c | skb->count++; |
skb | 648 | net/core/skbuff.c | n->data_skb = skb; |
skb | 664 | net/core/skbuff.c | struct sk_buff *skb_copy(struct sk_buff *skb, int priority) |
skb | 673 | net/core/skbuff.c | IS_SKB(skb); |
skb | 675 | net/core/skbuff.c | n=alloc_skb(skb->truesize-sizeof(struct sk_buff),priority); |
skb | 683 | net/core/skbuff.c | offset=n->head-skb->head; |
skb | 686 | net/core/skbuff.c | skb_reserve(n,skb->data-skb->head); |
skb | 688 | net/core/skbuff.c | skb_put(n,skb->len); |
skb | 690 | net/core/skbuff.c | memcpy(n->head,skb->head,skb->end-skb->head); |
skb | 694 | net/core/skbuff.c | n->when=skb->when; |
skb | 695 | net/core/skbuff.c | n->dev=skb->dev; |
skb | 696 | net/core/skbuff.c | n->h.raw=skb->h.raw+offset; |
skb | 697 | net/core/skbuff.c | n->mac.raw=skb->mac.raw+offset; |
skb | 698 | net/core/skbuff.c | n->ip_hdr=(struct iphdr *)(((char *)skb->ip_hdr)+offset); |
skb | 699 | net/core/skbuff.c | n->saddr=skb->saddr; |
skb | 700 | net/core/skbuff.c | n->daddr=skb->daddr; |
skb | 701 | net/core/skbuff.c | n->raddr=skb->raddr; |
skb | 702 | net/core/skbuff.c | n->seq=skb->seq; |
skb | 703 | net/core/skbuff.c | n->end_seq=skb->end_seq; |
skb | 704 | net/core/skbuff.c | n->ack_seq=skb->ack_seq; |
skb | 705 | net/core/skbuff.c | n->acked=skb->acked; |
skb | 706 | net/core/skbuff.c | memcpy(n->proto_priv, skb->proto_priv, sizeof(skb->proto_priv)); |
skb | 707 | net/core/skbuff.c | n->used=skb->used; |
skb | 709 | net/core/skbuff.c | n->arp=skb->arp; |
skb | 713 | net/core/skbuff.c | n->pkt_type=skb->pkt_type; |
skb | 714 | net/core/skbuff.c | n->stamp=skb->stamp; |
skb | 724 | net/core/skbuff.c | void skb_device_lock(struct sk_buff *skb) |
skb | 726 | net/core/skbuff.c | if(skb->lock) |
skb | 730 | net/core/skbuff.c | skb->lock++; |
skb | 733 | net/core/skbuff.c | void skb_device_unlock(struct sk_buff *skb) |
skb | 735 | net/core/skbuff.c | if(skb->lock==0) |
skb | 737 | net/core/skbuff.c | skb->lock--; |
skb | 738 | net/core/skbuff.c | if(skb->lock==0) |
skb | 742 | net/core/skbuff.c | void dev_kfree_skb(struct sk_buff *skb, int mode) |
skb | 748 | net/core/skbuff.c | if(skb->lock==1) |
skb | 751 | net/core/skbuff.c | if (!--skb->lock && (skb->free == 1 || skb->free == 3)) |
skb | 754 | net/core/skbuff.c | kfree_skb(skb,mode); |
skb | 762 | net/core/skbuff.c | struct sk_buff *skb; |
skb | 764 | net/core/skbuff.c | skb = alloc_skb(length+16, GFP_ATOMIC); |
skb | 765 | net/core/skbuff.c | if (skb) |
skb | 766 | net/core/skbuff.c | skb_reserve(skb,16); |
skb | 767 | net/core/skbuff.c | return skb; |
skb | 770 | net/core/skbuff.c | int skb_device_locked(struct sk_buff *skb) |
skb | 772 | net/core/skbuff.c | return skb->lock? 1 : 0; |
skb | 405 | net/core/sock.c | void sock_wfree(struct sock *sk, struct sk_buff *skb) |
skb | 407 | net/core/sock.c | int s=skb->truesize; |
skb | 409 | net/core/sock.c | IS_SKB(skb); |
skb | 411 | net/core/sock.c | kfree_skbmem(skb); |
skb | 426 | net/core/sock.c | void sock_rfree(struct sock *sk, struct sk_buff *skb) |
skb | 428 | net/core/sock.c | int s=skb->truesize; |
skb | 430 | net/core/sock.c | IS_SKB(skb); |
skb | 432 | net/core/sock.c | kfree_skbmem(skb); |
skb | 449 | net/core/sock.c | struct sk_buff *skb; |
skb | 473 | net/core/sock.c | skb = sock_wmalloc(sk, size, 0, sk->allocation); |
skb | 478 | net/core/sock.c | skb = sock_wmalloc(sk, size, 0 , GFP_BUFFER); |
skb | 479 | net/core/sock.c | if(!skb) |
skb | 480 | net/core/sock.c | skb=sock_wmalloc(sk, fallback, 0, GFP_KERNEL); |
skb | 487 | net/core/sock.c | if(skb==NULL) |
skb | 539 | net/core/sock.c | while(skb==NULL); |
skb | 541 | net/core/sock.c | return skb; |
skb | 548 | net/core/sock.c | struct sk_buff *skb; |
skb | 568 | net/core/sock.c | while ((skb = __skb_dequeue(&sk->back_log)) != NULL) |
skb | 572 | net/core/sock.c | sk->prot->rcv(skb, skb->dev, (struct options*)skb->proto_priv, |
skb | 573 | net/core/sock.c | skb->saddr, skb->len, skb->daddr, 1, |
skb | 89 | net/ethernet/eth.c | int eth_header(struct sk_buff *skb, struct device *dev, unsigned short type, |
skb | 92 | net/ethernet/eth.c | struct ethhdr *eth = (struct ethhdr *)skb_push(skb,ETH_HLEN); |
skb | 140 | net/ethernet/eth.c | struct sk_buff *skb) |
skb | 159 | net/ethernet/eth.c | return arp_find(eth->h_dest, dst, dev, dev->pa_addr, skb)? 1 : 0; |
skb | 172 | net/ethernet/eth.c | unsigned short eth_type_trans(struct sk_buff *skb, struct device *dev) |
skb | 177 | net/ethernet/eth.c | skb->mac.raw=skb->data; |
skb | 178 | net/ethernet/eth.c | skb_pull(skb,dev->hard_header_len); |
skb | 179 | net/ethernet/eth.c | eth= skb->mac.ethernet; |
skb | 184 | net/ethernet/eth.c | skb->pkt_type=PACKET_BROADCAST; |
skb | 186 | net/ethernet/eth.c | skb->pkt_type=PACKET_MULTICAST; |
skb | 197 | net/ethernet/eth.c | skb->pkt_type=PACKET_OTHERHOST; |
skb | 203 | net/ethernet/eth.c | rawp = skb->data; |
skb | 9 | net/ethernet/pe2.c | struct sk_buff *skb, unsigned char *dest_node) |
skb | 11 | net/ethernet/pe2.c | struct device *dev = skb->dev; |
skb | 13 | net/ethernet/pe2.c | skb->protocol = htons (ETH_P_IPX); |
skb | 14 | net/ethernet/pe2.c | dev->hard_header(skb, dev, ETH_P_IPX, dest_node, NULL, skb->len); |
skb | 292 | net/ipv4/af_inet.c | struct sk_buff *skb; |
skb | 310 | net/ipv4/af_inet.c | while ((skb = tcp_dequeue_partial(sk)) != NULL) |
skb | 312 | net/ipv4/af_inet.c | IS_SKB(skb); |
skb | 313 | net/ipv4/af_inet.c | kfree_skb(skb, FREE_WRITE); |
skb | 320 | net/ipv4/af_inet.c | while((skb = skb_dequeue(&sk->write_queue)) != NULL) { |
skb | 321 | net/ipv4/af_inet.c | IS_SKB(skb); |
skb | 322 | net/ipv4/af_inet.c | kfree_skb(skb, FREE_WRITE); |
skb | 339 | net/ipv4/af_inet.c | while((skb=skb_dequeue(&sk->receive_queue))!=NULL) |
skb | 345 | net/ipv4/af_inet.c | if (skb->sk != NULL && skb->sk != sk) |
skb | 347 | net/ipv4/af_inet.c | IS_SKB(skb); |
skb | 348 | net/ipv4/af_inet.c | skb->sk->dead = 1; |
skb | 349 | net/ipv4/af_inet.c | skb->sk->prot->close(skb->sk, 0); |
skb | 351 | net/ipv4/af_inet.c | IS_SKB(skb); |
skb | 352 | net/ipv4/af_inet.c | kfree_skb(skb, FREE_READ); |
skb | 361 | net/ipv4/af_inet.c | for(skb = sk->send_head; skb != NULL; ) |
skb | 369 | net/ipv4/af_inet.c | if (skb->next && skb->prev) |
skb | 371 | net/ipv4/af_inet.c | IS_SKB(skb); |
skb | 372 | net/ipv4/af_inet.c | skb_unlink(skb); |
skb | 374 | net/ipv4/af_inet.c | skb->dev = NULL; |
skb | 375 | net/ipv4/af_inet.c | skb2 = skb->link3; |
skb | 376 | net/ipv4/af_inet.c | kfree_skb(skb, FREE_WRITE); |
skb | 377 | net/ipv4/af_inet.c | skb = skb2; |
skb | 386 | net/ipv4/af_inet.c | while((skb=skb_dequeue(&sk->back_log))!=NULL) |
skb | 389 | net/ipv4/af_inet.c | skb->sk = NULL; |
skb | 390 | net/ipv4/af_inet.c | kfree_skb(skb, FREE_READ); |
skb | 132 | net/ipv4/arp.c | struct sk_buff_head skb; /* list of queued packets */ |
skb | 297 | net/ipv4/arp.c | struct sk_buff *skb; |
skb | 303 | net/ipv4/arp.c | while ((skb = skb_dequeue(&entry->skb)) != NULL) |
skb | 305 | net/ipv4/arp.c | skb_device_lock(skb); |
skb | 307 | net/ipv4/arp.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 629 | net/ipv4/arp.c | struct sk_buff *skb; |
skb | 644 | net/ipv4/arp.c | skb = alloc_skb(sizeof(struct arphdr)+ 2*(dev->addr_len+4) |
skb | 646 | net/ipv4/arp.c | if (skb == NULL) |
skb | 651 | net/ipv4/arp.c | skb_reserve(skb, dev->hard_header_len); |
skb | 652 | net/ipv4/arp.c | arp = (struct arphdr *) skb_put(skb,sizeof(struct arphdr) + 2*(dev->addr_len+4)); |
skb | 653 | net/ipv4/arp.c | skb->arp = 1; |
skb | 654 | net/ipv4/arp.c | skb->dev = dev; |
skb | 655 | net/ipv4/arp.c | skb->free = 1; |
skb | 656 | net/ipv4/arp.c | skb->protocol = htons (ETH_P_IP); |
skb | 662 | net/ipv4/arp.c | dev->hard_header(skb,dev,ptype,dest_hw?dest_hw:dev->broadcast,src_hw?src_hw:NULL,skb->len); |
skb | 692 | net/ipv4/arp.c | dev_queue_xmit(skb, dev, 0); |
skb | 701 | net/ipv4/arp.c | struct sk_buff *skb; |
skb | 723 | net/ipv4/arp.c | while((skb = skb_dequeue(&entry->skb)) != NULL) |
skb | 725 | net/ipv4/arp.c | IS_SKB(skb); |
skb | 726 | net/ipv4/arp.c | skb_device_lock(skb); |
skb | 728 | net/ipv4/arp.c | if(!skb->dev->rebuild_header(skb->data,skb->dev,skb->raddr,skb)) |
skb | 730 | net/ipv4/arp.c | skb->arp = 1; |
skb | 731 | net/ipv4/arp.c | if(skb->sk==NULL) |
skb | 732 | net/ipv4/arp.c | dev_queue_xmit(skb, skb->dev, 0); |
skb | 734 | net/ipv4/arp.c | dev_queue_xmit(skb,skb->dev,skb->sk->priority); |
skb | 774 | net/ipv4/arp.c | int arp_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *pt) |
skb | 780 | net/ipv4/arp.c | struct arphdr *arp = (struct arphdr *)skb->h.raw; |
skb | 801 | net/ipv4/arp.c | kfree_skb(skb, FREE_READ); |
skb | 821 | net/ipv4/arp.c | kfree_skb(skb, FREE_READ); |
skb | 830 | net/ipv4/arp.c | kfree_skb(skb, FREE_READ); |
skb | 839 | net/ipv4/arp.c | kfree_skb(skb, FREE_READ); |
skb | 847 | net/ipv4/arp.c | kfree_skb(skb, FREE_READ); |
skb | 854 | net/ipv4/arp.c | kfree_skb(skb, FREE_READ); |
skb | 876 | net/ipv4/arp.c | kfree_skb(skb, FREE_READ); |
skb | 902 | net/ipv4/arp.c | if (tip != dev->pa_addr && net_alias_has(skb->dev)) |
skb | 911 | net/ipv4/arp.c | kfree_skb(skb, FREE_READ); |
skb | 953 | net/ipv4/arp.c | kfree_skb(skb, FREE_READ); |
skb | 959 | net/ipv4/arp.c | kfree_skb(skb, FREE_READ); |
skb | 979 | net/ipv4/arp.c | kfree_skb(skb, FREE_READ); |
skb | 1032 | net/ipv4/arp.c | kfree_skb(skb, FREE_READ); |
skb | 1052 | net/ipv4/arp.c | entry->dev = skb->dev; |
skb | 1054 | net/ipv4/arp.c | skb_queue_head_init(&entry->skb); |
skb | 1073 | net/ipv4/arp.c | kfree_skb(skb, FREE_READ); |
skb | 1182 | net/ipv4/arp.c | u32 saddr, struct sk_buff *skb) |
skb | 1189 | net/ipv4/arp.c | if (skb) |
skb | 1190 | net/ipv4/arp.c | skb->arp = 1; |
skb | 1211 | net/ipv4/arp.c | if (skb != NULL) |
skb | 1215 | net/ipv4/arp.c | skb_queue_tail(&entry->skb, skb); |
skb | 1216 | net/ipv4/arp.c | skb_device_unlock(skb); |
skb | 1229 | net/ipv4/arp.c | if (skb->sk) |
skb | 1231 | net/ipv4/arp.c | skb->sk->err = EHOSTDOWN; |
skb | 1232 | net/ipv4/arp.c | skb->sk->error_report(skb->sk); |
skb | 1235 | net/ipv4/arp.c | icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0, dev); |
skb | 1237 | net/ipv4/arp.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 1250 | net/ipv4/arp.c | if (skb) |
skb | 1251 | net/ipv4/arp.c | skb->arp = 1; |
skb | 1275 | net/ipv4/arp.c | skb_queue_head_init(&entry->skb); |
skb | 1276 | net/ipv4/arp.c | if (skb != NULL) |
skb | 1278 | net/ipv4/arp.c | skb_queue_tail(&entry->skb, skb); |
skb | 1279 | net/ipv4/arp.c | skb_device_unlock(skb); |
skb | 1297 | net/ipv4/arp.c | else if (skb != NULL) |
skb | 1298 | net/ipv4/arp.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 1510 | net/ipv4/arp.c | skb_queue_head_init(&entry->skb); |
skb | 1586 | net/ipv4/arp.c | struct sk_buff * skb; |
skb | 1605 | net/ipv4/arp.c | while ((skb = skb_dequeue(&entry->skb)) != NULL) |
skb | 1607 | net/ipv4/arp.c | skb_device_lock(skb); |
skb | 1609 | net/ipv4/arp.c | skb_queue_tail(&entry1->skb, skb); |
skb | 1610 | net/ipv4/arp.c | skb_device_unlock(skb); |
skb | 1754 | net/ipv4/arp.c | skb_queue_head_init(&entry->skb); |
skb | 151 | net/ipv4/icmp.c | void (*handler)(struct icmphdr *icmph, struct sk_buff *skb, struct device *dev, __u32 saddr, __u32 daddr, int len); |
skb | 339 | net/ipv4/icmp.c | static void icmp_unreach(struct icmphdr *icmph, struct sk_buff *skb, struct device *dev, __u32 saddr, __u32 daddr, int len) |
skb | 462 | net/ipv4/icmp.c | kfree_skb(skb, FREE_READ); |
skb | 470 | net/ipv4/icmp.c | static void icmp_redirect(struct icmphdr *icmph, struct sk_buff *skb, struct device *dev, __u32 source, __u32 daddr, int len) |
skb | 529 | net/ipv4/icmp.c | kfree_skb(skb, FREE_READ); |
skb | 540 | net/ipv4/icmp.c | static void icmp_echo(struct icmphdr *icmph, struct sk_buff *skb, struct device *dev, __u32 saddr, __u32 daddr, int len) |
skb | 547 | net/ipv4/icmp.c | if (ip_options_echo(&icmp_param.replyopts, NULL, daddr, saddr, skb)==0) |
skb | 549 | net/ipv4/icmp.c | kfree_skb(skb, FREE_READ); |
skb | 560 | net/ipv4/icmp.c | static void icmp_timestamp(struct icmphdr *icmph, struct sk_buff *skb, struct device *dev, __u32 saddr, __u32 daddr, int len) |
skb | 572 | net/ipv4/icmp.c | kfree_skb(skb, FREE_READ); |
skb | 592 | net/ipv4/icmp.c | if (ip_options_echo(&icmp_param.replyopts, NULL, daddr, saddr, skb)==0) |
skb | 594 | net/ipv4/icmp.c | kfree_skb(skb,FREE_READ); |
skb | 610 | net/ipv4/icmp.c | static void icmp_address(struct icmphdr *icmph, struct sk_buff *skb, struct device *dev, __u32 saddr, __u32 daddr, int len) |
skb | 621 | net/ipv4/icmp.c | if (ip_options_echo(&icmp_param.replyopts, NULL, daddr, saddr, skb)==0) |
skb | 624 | net/ipv4/icmp.c | kfree_skb(skb, FREE_READ); |
skb | 627 | net/ipv4/icmp.c | static void icmp_discard(struct icmphdr *icmph, struct sk_buff *skb, struct device *dev, __u32 saddr, __u32 daddr, int len) |
skb | 629 | net/ipv4/icmp.c | kfree_skb(skb, FREE_READ); |
skb | 636 | net/ipv4/icmp.c | int icmp_rcv(struct sk_buff *skb, struct device *dev, struct options *opt, |
skb | 640 | net/ipv4/icmp.c | struct icmphdr *icmph=(void *)skb->h.raw; |
skb | 652 | net/ipv4/icmp.c | kfree_skb(skb, FREE_READ); |
skb | 665 | net/ipv4/icmp.c | kfree_skb(skb,FREE_READ); |
skb | 683 | net/ipv4/icmp.c | kfree_skb(skb, FREE_READ); |
skb | 696 | net/ipv4/icmp.c | (icmp_pointers[icmph->type].handler)(icmph,skb,skb->dev,saddr,daddr,len); |
skb | 237 | net/ipv4/igmp.c | struct sk_buff *skb=alloc_skb(MAX_IGMP_SIZE, GFP_ATOMIC); |
skb | 241 | net/ipv4/igmp.c | if(skb==NULL) |
skb | 243 | net/ipv4/igmp.c | tmp=ip_build_header(skb, INADDR_ANY, address, &dev, IPPROTO_IGMP, NULL, |
skb | 247 | net/ipv4/igmp.c | kfree_skb(skb, FREE_WRITE); |
skb | 250 | net/ipv4/igmp.c | ih=(struct igmphdr *)skb_put(skb,sizeof(struct igmphdr)); |
skb | 256 | net/ipv4/igmp.c | ip_queue_xmit(NULL,dev,skb,1); |
skb | 426 | net/ipv4/igmp.c | int igmp_rcv(struct sk_buff *skb, struct device *dev, struct options *opt, |
skb | 445 | net/ipv4/igmp.c | ih=(struct igmphdr *)skb->h.raw; |
skb | 447 | net/ipv4/igmp.c | if(skb->len <sizeof(struct igmphdr) || skb->ip_hdr->ttl>1 || ip_compute_csum((void *)skb->h.raw,sizeof(struct igmphdr))) |
skb | 449 | net/ipv4/igmp.c | kfree_skb(skb, FREE_READ); |
skb | 461 | net/ipv4/igmp.c | kfree_skb(skb, FREE_READ); |
skb | 471 | net/ipv4/igmp.c | kfree_skb(skb, FREE_READ); |
skb | 45 | net/ipv4/ip_forward.c | static void ip_encap(struct sk_buff *skb, int len, struct device *out, __u32 daddr) |
skb | 52 | net/ipv4/ip_forward.c | struct iphdr *iph=(struct iphdr *)skb_push(skb,sizeof(struct iphdr)); |
skb | 56 | net/ipv4/ip_forward.c | iph->tos = skb->ip_hdr->tos; |
skb | 57 | net/ipv4/ip_forward.c | iph->ttl = skb->ip_hdr->ttl; |
skb | 63 | net/ipv4/ip_forward.c | iph->tot_len = htons(skb->len); |
skb | 67 | net/ipv4/ip_forward.c | skb->dev = out; |
skb | 68 | net/ipv4/ip_forward.c | skb->arp = 1; |
skb | 69 | net/ipv4/ip_forward.c | skb->raddr=daddr; |
skb | 73 | net/ipv4/ip_forward.c | if (out->hard_header && out->hard_header(skb, out, ETH_P_IP, NULL, NULL, len)<0) |
skb | 74 | net/ipv4/ip_forward.c | skb->arp=0; |
skb | 86 | net/ipv4/ip_forward.c | int ip_forward(struct sk_buff *skb, struct device *dev, int is_frag, |
skb | 95 | net/ipv4/ip_forward.c | struct options * opt = (struct options*)skb->proto_priv; |
skb | 101 | net/ipv4/ip_forward.c | struct sk_buff *skb_in = skb; /* So we can remember if the masquerader did some swaps */ |
skb | 112 | net/ipv4/ip_forward.c | fw_res=call_fw_firewall(PF_INET, skb, skb->h.iph); |
skb | 118 | net/ipv4/ip_forward.c | icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0, dev); |
skb | 136 | net/ipv4/ip_forward.c | iph = skb->h.iph; |
skb | 153 | net/ipv4/ip_forward.c | icmp_send(skb, ICMP_TIME_EXCEEDED, ICMP_EXC_TTL, 0, dev); |
skb | 174 | net/ipv4/ip_forward.c | icmp_send(skb, ICMP_DEST_UNREACH, ICMP_NET_UNREACH, 0, dev); |
skb | 195 | net/ipv4/ip_forward.c | icmp_send(skb, ICMP_DEST_UNREACH, ICMP_SR_FAILED, 0, dev); |
skb | 213 | net/ipv4/ip_forward.c | icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, raddr, dev); |
skb | 222 | net/ipv4/ip_forward.c | dev2=skb->dev; |
skb | 223 | net/ipv4/ip_forward.c | raddr=skb->raddr; |
skb | 244 | net/ipv4/ip_forward.c | ip_fw_masquerade(&skb, dev2); |
skb | 246 | net/ipv4/ip_forward.c | IS_SKB(skb); |
skb | 248 | net/ipv4/ip_forward.c | if (skb->len+encap > dev2->mtu && (ntohs(iph->frag_off) & IP_DF)) |
skb | 251 | net/ipv4/ip_forward.c | icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(dev2->mtu), dev); |
skb | 258 | net/ipv4/ip_forward.c | if(skb_headroom(skb)-encap<dev2->hard_header_len) |
skb | 260 | net/ipv4/ip_forward.c | skb2 = alloc_skb(dev2->hard_header_len + skb->len + encap + 15, GFP_ATOMIC); |
skb | 262 | net/ipv4/ip_forward.c | if(skb_headroom(skb)<dev2->hard_header_len) |
skb | 264 | net/ipv4/ip_forward.c | skb2 = alloc_skb(dev2->hard_header_len + skb->len + 15, GFP_ATOMIC); |
skb | 287 | net/ipv4/ip_forward.c | skb_reserve(skb,(encap+dev->hard_header_len+15)&~15); /* 16 byte aligned IP headers are good */ |
skb | 288 | net/ipv4/ip_forward.c | ip_encap(skb2,skb->len, dev2, raddr); |
skb | 292 | net/ipv4/ip_forward.c | ip_send(rt,skb2,raddr,skb->len,dev2,dev2->pa_addr); |
skb | 299 | net/ipv4/ip_forward.c | ptr = skb_put(skb2,skb->len); |
skb | 306 | net/ipv4/ip_forward.c | memcpy(ptr, skb->h.raw, skb->len); |
skb | 307 | net/ipv4/ip_forward.c | memcpy(skb2->proto_priv, skb->proto_priv, sizeof(skb->proto_priv)); |
skb | 316 | net/ipv4/ip_forward.c | skb2 = skb; |
skb | 320 | net/ipv4/ip_forward.c | ip_encap(skb,skb->len, dev2, raddr); |
skb | 324 | net/ipv4/ip_forward.c | skb->arp=1; |
skb | 325 | net/ipv4/ip_forward.c | skb->raddr=raddr; |
skb | 328 | net/ipv4/ip_forward.c | memcpy(skb_push(skb, dev2->hard_header_len), hh->hh_data, dev2->hard_header_len); |
skb | 334 | net/ipv4/ip_forward.c | skb->arp = 0; |
skb | 339 | net/ipv4/ip_forward.c | if(dev2->hard_header(skb, dev2, ETH_P_IP, NULL, NULL, skb->len)<0) |
skb | 340 | net/ipv4/ip_forward.c | skb->arp=0; |
skb | 353 | net/ipv4/ip_forward.c | if (skb != skb2) |
skb | 459 | net/ipv4/ip_forward.c | if(skb==skb2) |
skb | 467 | net/ipv4/ip_forward.c | if(skb!=skb_in) |
skb | 54 | net/ipv4/ip_fragment.c | extern __inline__ void frag_kfree_skb(struct sk_buff *skb, int type) |
skb | 59 | net/ipv4/ip_fragment.c | ip_frag_mem-=skb->truesize; |
skb | 61 | net/ipv4/ip_fragment.c | kfree_skb(skb,type); |
skb | 91 | net/ipv4/ip_fragment.c | static struct ipfrag *ip_frag_create(int offset, int end, struct sk_buff *skb, unsigned char *ptr) |
skb | 108 | net/ipv4/ip_fragment.c | fp->skb = skb; |
skb | 117 | net/ipv4/ip_fragment.c | ip_frag_mem+=skb->truesize; |
skb | 189 | net/ipv4/ip_fragment.c | IS_SKB(fp->skb); |
skb | 190 | net/ipv4/ip_fragment.c | frag_kfree_skb(fp->skb,FREE_READ); |
skb | 222 | net/ipv4/ip_fragment.c | icmp_send(qp->fragments->skb,ICMP_TIME_EXCEEDED, |
skb | 253 | net/ipv4/ip_fragment.c | static struct ipq *ip_create(struct sk_buff *skb, struct iphdr *iph, struct device *dev) |
skb | 263 | net/ipv4/ip_fragment.c | skb->dev = qp->dev; |
skb | 343 | net/ipv4/ip_fragment.c | struct sk_buff *skb; |
skb | 354 | net/ipv4/ip_fragment.c | if ((skb = dev_alloc_skb(len)) == NULL) |
skb | 363 | net/ipv4/ip_fragment.c | skb_put(skb,len); |
skb | 364 | net/ipv4/ip_fragment.c | skb->h.raw = skb->data; |
skb | 365 | net/ipv4/ip_fragment.c | skb->free = 1; |
skb | 368 | net/ipv4/ip_fragment.c | ptr = (unsigned char *) skb->h.raw; |
skb | 378 | net/ipv4/ip_fragment.c | if(count+fp->len > skb->len) |
skb | 382 | net/ipv4/ip_fragment.c | frag_kfree_skb(skb,FREE_WRITE); |
skb | 395 | net/ipv4/ip_fragment.c | iph = skb->h.iph; |
skb | 398 | net/ipv4/ip_fragment.c | skb->ip_hdr = iph; |
skb | 401 | net/ipv4/ip_fragment.c | return(skb); |
skb | 409 | net/ipv4/ip_fragment.c | struct sk_buff *ip_defrag(struct iphdr *iph, struct sk_buff *skb, struct device *dev) |
skb | 441 | net/ipv4/ip_fragment.c | return(skb); |
skb | 474 | net/ipv4/ip_fragment.c | if ((qp = ip_create(skb, iph, dev)) == NULL) |
skb | 476 | net/ipv4/ip_fragment.c | skb->sk = NULL; |
skb | 477 | net/ipv4/ip_fragment.c | frag_kfree_skb(skb, FREE_READ); |
skb | 493 | net/ipv4/ip_fragment.c | ptr = skb->data + ihl; |
skb | 559 | net/ipv4/ip_fragment.c | frag_kfree_skb(tmp->skb,FREE_READ); |
skb | 569 | net/ipv4/ip_fragment.c | tfp = ip_frag_create(offset, end, skb, ptr); |
skb | 577 | net/ipv4/ip_fragment.c | skb->sk = NULL; |
skb | 578 | net/ipv4/ip_fragment.c | frag_kfree_skb(skb, FREE_READ); |
skb | 618 | net/ipv4/ip_fragment.c | void ip_fragment(struct sock *sk, struct sk_buff *skb, struct device *dev, int is_frag) |
skb | 632 | net/ipv4/ip_fragment.c | raw = skb->data; |
skb | 635 | net/ipv4/ip_fragment.c | skb->ip_hdr = iph; |
skb | 637 | net/ipv4/ip_fragment.c | iph = skb->ip_hdr; |
skb | 670 | net/ipv4/ip_fragment.c | icmp_send(skb,ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED,dev->mtu, dev); |
skb | 722 | net/ipv4/ip_fragment.c | skb2->arp = skb->arp; |
skb | 723 | net/ipv4/ip_fragment.c | if(skb->free==0) |
skb | 741 | net/ipv4/ip_fragment.c | skb2->raddr = skb->raddr; /* For rebuild_header - must be here */ |
skb | 771 | net/ipv4/ip_fragment.c | ip_options_fragment(skb); |
skb | 603 | net/ipv4/ip_fw.c | static struct sk_buff *revamp(struct sk_buff *skb, struct device *dev, struct ip_masq *ftp) |
skb | 605 | net/ipv4/ip_fw.c | struct iphdr *iph = skb->h.iph; |
skb | 644 | net/ipv4/ip_fw.c | while (skb->len - ((unsigned char *)data - skb->h.raw) > 18) |
skb | 681 | net/ipv4/ip_fw.c | return skb; |
skb | 720 | net/ipv4/ip_fw.c | return skb; |
skb | 744 | net/ipv4/ip_fw.c | printk("MASQUERADE: resizing needed for %d bytes (%ld)\n",diff, skb->len); |
skb | 746 | net/ipv4/ip_fw.c | skb2 = alloc_skb(MAX_HEADER + skb->len+diff, GFP_ATOMIC); |
skb | 749 | net/ipv4/ip_fw.c | return skb; |
skb | 751 | net/ipv4/ip_fw.c | skb2->free = skb->free; |
skb | 753 | net/ipv4/ip_fw.c | skb_put(skb2,skb->len + diff); |
skb | 754 | net/ipv4/ip_fw.c | skb2->h.raw = skb2->data + (skb->h.raw - skb->data); |
skb | 767 | net/ipv4/ip_fw.c | memcpy(skb2->data, skb->data, (p - (char *)skb->data)); |
skb | 768 | net/ipv4/ip_fw.c | memcpy(&skb2->data[(p - (char *)skb->data)], buf, strlen(buf)); |
skb | 769 | net/ipv4/ip_fw.c | memcpy(&skb2->data[(p - (char *)skb->data) + strlen(buf)], data, |
skb | 770 | net/ipv4/ip_fw.c | skb->len - (data-(char *)skb->data)); |
skb | 776 | net/ipv4/ip_fw.c | iph->tot_len = htons(skb->len + diff); |
skb | 783 | net/ipv4/ip_fw.c | kfree_skb(skb, FREE_WRITE); |
skb | 786 | net/ipv4/ip_fw.c | return skb; |
skb | 801 | net/ipv4/ip_fw.c | struct sk_buff *skb=*skb_ptr; |
skb | 802 | net/ipv4/ip_fw.c | struct iphdr *iph = skb->h.iph; |
skb | 862 | net/ipv4/ip_fw.c | size = skb->len - ((unsigned char *)portptr - skb->h.raw); |
skb | 880 | net/ipv4/ip_fw.c | skb = revamp(*skb_ptr, dev, ms); |
skb | 881 | net/ipv4/ip_fw.c | *skb_ptr = skb; |
skb | 882 | net/ipv4/ip_fw.c | iph = skb->h.iph; |
skb | 884 | net/ipv4/ip_fw.c | size = skb->len - ((unsigned char *)portptr-skb->h.raw); |
skb | 898 | net/ipv4/ip_fw.c | tcp_send_check(th,iph->saddr,iph->daddr,size,skb->sk); |
skb | 1585 | net/ipv4/ip_fw.c | int ipfw_input_check(struct firewall_ops *this, int pf, struct sk_buff *skb, void *phdr) |
skb | 1587 | net/ipv4/ip_fw.c | return ip_fw_chk(phdr, skb->dev, ip_fw_in_chain, ip_fw_in_policy, 0); |
skb | 1590 | net/ipv4/ip_fw.c | int ipfw_output_check(struct firewall_ops *this, int pf, struct sk_buff *skb, void *phdr) |
skb | 1592 | net/ipv4/ip_fw.c | return ip_fw_chk(phdr, skb->dev, ip_fw_out_chain, ip_fw_out_policy, 0); |
skb | 1595 | net/ipv4/ip_fw.c | int ipfw_forward_check(struct firewall_ops *this, int pf, struct sk_buff *skb, void *phdr) |
skb | 1597 | net/ipv4/ip_fw.c | return ip_fw_chk(phdr, skb->dev, ip_fw_fwd_chain, ip_fw_fwd_policy, 0); |
skb | 198 | net/ipv4/ip_input.c | int ip_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *pt) |
skb | 200 | net/ipv4/ip_input.c | struct iphdr *iph = skb->h.iph; |
skb | 223 | net/ipv4/ip_input.c | return ipv6_rcv(skb,dev,pt); |
skb | 232 | net/ipv4/ip_input.c | skb->ip_hdr = iph; |
skb | 247 | net/ipv4/ip_input.c | if (skb->len<sizeof(struct iphdr) || iph->ihl<5 || iph->version != 4 || ip_fast_csum((unsigned char *)iph, iph->ihl) !=0 |
skb | 248 | net/ipv4/ip_input.c | || skb->len < ntohs(iph->tot_len)) |
skb | 251 | net/ipv4/ip_input.c | kfree_skb(skb, FREE_WRITE); |
skb | 261 | net/ipv4/ip_input.c | skb_trim(skb,ntohs(iph->tot_len)); |
skb | 265 | net/ipv4/ip_input.c | skb->ip_summed = 0; |
skb | 266 | net/ipv4/ip_input.c | if (ip_options_compile(NULL, skb)) |
skb | 268 | net/ipv4/ip_input.c | opt = (struct options*)skb->proto_priv; |
skb | 272 | net/ipv4/ip_input.c | kfree_skb(skb, FREE_READ); |
skb | 285 | net/ipv4/ip_input.c | if (iph->daddr != skb->dev->pa_addr && net_alias_has(skb->dev)) |
skb | 286 | net/ipv4/ip_input.c | skb->dev = dev = net_alias_dev_rcv_sel32(skb->dev, AF_INET, iph->saddr, iph->daddr); |
skb | 304 | net/ipv4/ip_input.c | if ((err=call_in_firewall(PF_INET, skb, iph))<FW_ACCEPT) |
skb | 307 | net/ipv4/ip_input.c | icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0, dev); |
skb | 308 | net/ipv4/ip_input.c | kfree_skb(skb, FREE_WRITE); |
skb | 342 | net/ipv4/ip_input.c | if ( iph->daddr == skb->dev->pa_addr || (brd = ip_chk_addr(iph->daddr)) != 0) |
skb | 350 | net/ipv4/ip_input.c | if (brd != IS_MYADDR || skb->pkt_type != PACKET_HOST) |
skb | 352 | net/ipv4/ip_input.c | kfree_skb(skb, FREE_WRITE); |
skb | 364 | net/ipv4/ip_input.c | icmp_send(skb, ICMP_PARAMETERPROB, 0, opt->srr+2, |
skb | 365 | net/ipv4/ip_input.c | skb->dev); |
skb | 366 | net/ipv4/ip_input.c | kfree_skb(skb, FREE_WRITE); |
skb | 380 | net/ipv4/ip_input.c | kfree_skb(skb, FREE_WRITE); |
skb | 390 | net/ipv4/ip_input.c | if (ip_forward(skb, dev, is_frag, nexthop)) |
skb | 391 | net/ipv4/ip_input.c | kfree_skb(skb, FREE_WRITE); |
skb | 394 | net/ipv4/ip_input.c | kfree_skb(skb, FREE_WRITE); |
skb | 411 | net/ipv4/ip_input.c | kfree_skb(skb, FREE_WRITE); |
skb | 426 | net/ipv4/ip_input.c | if (ip_fw_demasquerade(skb)) |
skb | 428 | net/ipv4/ip_input.c | struct iphdr *iph=skb->h.iph; |
skb | 429 | net/ipv4/ip_input.c | if (ip_forward(skb, dev, is_frag|IPFWD_MASQUERADED, iph->daddr)) |
skb | 430 | net/ipv4/ip_input.c | kfree_skb(skb, FREE_WRITE); |
skb | 442 | net/ipv4/ip_input.c | skb=ip_defrag(iph,skb,dev); |
skb | 443 | net/ipv4/ip_input.c | if(skb==NULL) |
skb | 445 | net/ipv4/ip_input.c | skb->dev = dev; |
skb | 446 | net/ipv4/ip_input.c | iph=skb->h.iph; |
skb | 453 | net/ipv4/ip_input.c | skb->ip_hdr = iph; |
skb | 454 | net/ipv4/ip_input.c | skb->h.raw += iph->ihl*4; |
skb | 489 | net/ipv4/ip_input.c | skb1=skb_clone(skb, GFP_ATOMIC); |
skb | 532 | net/ipv4/ip_input.c | skb2 = skb_clone(skb, GFP_ATOMIC); |
skb | 538 | net/ipv4/ip_input.c | skb2 = skb; |
skb | 571 | net/ipv4/ip_input.c | ipmr_forward(skb, is_frag); |
skb | 574 | net/ipv4/ip_input.c | struct sk_buff *skb2=skb_clone(skb, GFP_ATOMIC); |
skb | 585 | net/ipv4/ip_input.c | raw_rcv(raw_sk, skb, dev, iph->saddr, daddr); |
skb | 589 | net/ipv4/ip_input.c | icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PROT_UNREACH, 0, dev); |
skb | 590 | net/ipv4/ip_input.c | kfree_skb(skb, FREE_WRITE); |
skb | 604 | net/ipv4/ip_input.c | if(skb->pkt_type!=PACKET_HOST || brd==IS_BROADCAST) |
skb | 606 | net/ipv4/ip_input.c | kfree_skb(skb,FREE_WRITE); |
skb | 617 | net/ipv4/ip_input.c | icmp_send(skb, ICMP_PARAMETERPROB, 0, 16, skb->dev); |
skb | 618 | net/ipv4/ip_input.c | kfree_skb(skb, FREE_WRITE); |
skb | 621 | net/ipv4/ip_input.c | if (ip_forward(skb, dev, is_frag, iph->daddr)) |
skb | 622 | net/ipv4/ip_input.c | kfree_skb(skb, FREE_WRITE); |
skb | 627 | net/ipv4/ip_input.c | kfree_skb(skb, FREE_WRITE); |
skb | 29 | net/ipv4/ip_options.c | void ip_options_build(struct sk_buff * skb, struct options * opt, |
skb | 33 | net/ipv4/ip_options.c | unsigned char * iph = (unsigned char*)skb->ip_hdr; |
skb | 35 | net/ipv4/ip_options.c | memcpy(skb->proto_priv, opt, sizeof(struct options)); |
skb | 37 | net/ipv4/ip_options.c | opt = (struct options*)skb->proto_priv; |
skb | 75 | net/ipv4/ip_options.c | struct sk_buff * skb) |
skb | 86 | net/ipv4/ip_options.c | sopt = (struct options*)skb->proto_priv; |
skb | 95 | net/ipv4/ip_options.c | (unsigned char *)skb->ip_hdr); |
skb | 194 | net/ipv4/ip_options.c | void ip_options_fragment(struct sk_buff * skb) |
skb | 196 | net/ipv4/ip_options.c | unsigned char * optptr = (unsigned char*)skb->ip_hdr; |
skb | 197 | net/ipv4/ip_options.c | struct options * opt = (struct options*)skb->proto_priv; |
skb | 234 | net/ipv4/ip_options.c | int ip_options_compile(struct options * opt, struct sk_buff * skb) |
skb | 244 | net/ipv4/ip_options.c | opt = (struct options*)skb->proto_priv; |
skb | 246 | net/ipv4/ip_options.c | iph = (unsigned char*)skb->ip_hdr; |
skb | 253 | net/ipv4/ip_options.c | optptr = opt->is_data ? opt->__data : (unsigned char*)&skb->ip_hdr[1]; |
skb | 302 | net/ipv4/ip_options.c | if (!skb) |
skb | 339 | net/ipv4/ip_options.c | if (skb) |
skb | 341 | net/ipv4/ip_options.c | memcpy(&optptr[optptr[2]-1], &skb->dev->pa_addr, 4); |
skb | 378 | net/ipv4/ip_options.c | if (skb) |
skb | 390 | net/ipv4/ip_options.c | if (skb) |
skb | 392 | net/ipv4/ip_options.c | memcpy(&optptr[ts->ptr-1], &skb->dev->pa_addr, 4); |
skb | 411 | net/ipv4/ip_options.c | if (skb) |
skb | 441 | net/ipv4/ip_options.c | if (skb) |
skb | 451 | net/ipv4/ip_options.c | if (!skb) |
skb | 466 | net/ipv4/ip_options.c | if (skb) |
skb | 468 | net/ipv4/ip_options.c | icmp_send(skb, ICMP_PARAMETERPROB, 0, pp_ptr-iph, skb->dev); |
skb | 469 | net/ipv4/ip_options.c | kfree_skb(skb, FREE_READ); |
skb | 67 | net/ipv4/ip_output.c | static void ip_loopback(struct device *old_dev, struct sk_buff *skb) |
skb | 70 | net/ipv4/ip_output.c | int len=ntohs(skb->ip_hdr->tot_len); |
skb | 79 | net/ipv4/ip_output.c | newskb->saddr=skb->saddr; |
skb | 80 | net/ipv4/ip_output.c | newskb->daddr=skb->daddr; |
skb | 81 | net/ipv4/ip_output.c | newskb->raddr=skb->raddr; |
skb | 85 | net/ipv4/ip_output.c | newskb->pkt_type=skb->pkt_type; |
skb | 90 | net/ipv4/ip_output.c | ip_send(NULL,newskb, skb->ip_hdr->daddr, len, dev, skb->ip_hdr->saddr); |
skb | 95 | net/ipv4/ip_output.c | memcpy(newskb->proto_priv, skb->proto_priv, sizeof(skb->proto_priv)); |
skb | 100 | net/ipv4/ip_output.c | memcpy(newskb->ip_hdr,skb->ip_hdr,len); |
skb | 114 | net/ipv4/ip_output.c | int ip_send(struct rtable * rt, struct sk_buff *skb, __u32 daddr, int len, struct device *dev, __u32 saddr) |
skb | 118 | net/ipv4/ip_output.c | skb->dev = dev; |
skb | 119 | net/ipv4/ip_output.c | skb->arp = 1; |
skb | 120 | net/ipv4/ip_output.c | skb->protocol = htons(ETH_P_IP); |
skb | 127 | net/ipv4/ip_output.c | skb_reserve(skb,(dev->hard_header_len+15)&~15); /* 16 byte aligned IP headers are good */ |
skb | 130 | net/ipv4/ip_output.c | memcpy(skb_push(skb,dev->hard_header_len),rt->rt_hh->hh_data,dev->hard_header_len); |
skb | 136 | net/ipv4/ip_output.c | skb->arp = 0; |
skb | 137 | net/ipv4/ip_output.c | skb->raddr = daddr; |
skb | 140 | net/ipv4/ip_output.c | mac = dev->hard_header(skb, dev, ETH_P_IP, NULL, NULL, len); |
skb | 144 | net/ipv4/ip_output.c | skb->arp = 0; |
skb | 145 | net/ipv4/ip_output.c | skb->raddr = daddr; /* next routing address */ |
skb | 151 | net/ipv4/ip_output.c | static int ip_send_room(struct rtable * rt, struct sk_buff *skb, __u32 daddr, int len, struct device *dev, __u32 saddr) |
skb | 155 | net/ipv4/ip_output.c | skb->dev = dev; |
skb | 156 | net/ipv4/ip_output.c | skb->arp = 1; |
skb | 157 | net/ipv4/ip_output.c | skb->protocol = htons(ETH_P_IP); |
skb | 160 | net/ipv4/ip_output.c | skb_reserve(skb,MAX_HEADER); |
skb | 163 | net/ipv4/ip_output.c | memcpy(skb_push(skb,dev->hard_header_len),rt->rt_hh->hh_data,dev->hard_header_len); |
skb | 169 | net/ipv4/ip_output.c | skb->arp = 0; |
skb | 170 | net/ipv4/ip_output.c | skb->raddr = daddr; |
skb | 173 | net/ipv4/ip_output.c | mac = dev->hard_header(skb, dev, ETH_P_IP, NULL, NULL, len); |
skb | 177 | net/ipv4/ip_output.c | skb->arp = 0; |
skb | 178 | net/ipv4/ip_output.c | skb->raddr = daddr; /* next routing address */ |
skb | 192 | net/ipv4/ip_output.c | int ip_build_header(struct sk_buff *skb, __u32 saddr, __u32 daddr, |
skb | 211 | net/ipv4/ip_output.c | if(MULTICAST(daddr) && *dev==NULL && skb->sk && *skb->sk->ip_mc_name) |
skb | 212 | net/ipv4/ip_output.c | *dev=dev_get(skb->sk->ip_mc_name); |
skb | 216 | net/ipv4/ip_output.c | rt = ip_check_route(rp, daddr, skb->localroute); |
skb | 225 | net/ipv4/ip_output.c | rt = ip_rt_route(daddr, skb->localroute); |
skb | 263 | net/ipv4/ip_output.c | tmp = ip_send_room(rt, skb, raddr, len, *dev, saddr); |
skb | 265 | net/ipv4/ip_output.c | tmp = ip_send(rt, skb, raddr, len, *dev, saddr); |
skb | 273 | net/ipv4/ip_output.c | skb->dev = *dev; |
skb | 274 | net/ipv4/ip_output.c | skb->saddr = saddr; |
skb | 293 | net/ipv4/ip_output.c | iph=(struct iphdr *)skb_put(skb,sizeof(struct iphdr) + opt->optlen); |
skb | 295 | net/ipv4/ip_output.c | iph=(struct iphdr *)skb_put(skb,sizeof(struct iphdr)); |
skb | 305 | net/ipv4/ip_output.c | skb->ip_hdr = iph; |
skb | 310 | net/ipv4/ip_output.c | ip_options_build(skb, opt, final_daddr, (*dev)->pa_addr, 0); |
skb | 335 | net/ipv4/ip_output.c | struct sk_buff *skb, int free) |
skb | 347 | net/ipv4/ip_output.c | IS_SKB(skb); |
skb | 354 | net/ipv4/ip_output.c | skb->dev = dev; |
skb | 355 | net/ipv4/ip_output.c | skb->when = jiffies; |
skb | 365 | net/ipv4/ip_output.c | iph = skb->ip_hdr; |
skb | 366 | net/ipv4/ip_output.c | iph->tot_len = ntohs(skb->len-(((unsigned char *)iph)-skb->data)); |
skb | 369 | net/ipv4/ip_output.c | if(call_out_firewall(PF_INET, skb, iph) < FW_ACCEPT) |
skb | 387 | net/ipv4/ip_output.c | skb->free = free; |
skb | 397 | net/ipv4/ip_output.c | ip_fragment(sk,skb,dev,0); |
skb | 398 | net/ipv4/ip_output.c | IS_SKB(skb); |
skb | 399 | net/ipv4/ip_output.c | kfree_skb(skb,FREE_WRITE); |
skb | 417 | net/ipv4/ip_output.c | if (skb->next != NULL) |
skb | 420 | net/ipv4/ip_output.c | skb_unlink(skb); |
skb | 441 | net/ipv4/ip_output.c | if (skb->link3 != NULL) |
skb | 444 | net/ipv4/ip_output.c | skb->link3 = NULL; |
skb | 448 | net/ipv4/ip_output.c | sk->send_tail = skb; |
skb | 449 | net/ipv4/ip_output.c | sk->send_head = skb; |
skb | 453 | net/ipv4/ip_output.c | sk->send_tail->link3 = skb; |
skb | 454 | net/ipv4/ip_output.c | sk->send_tail = skb; |
skb | 463 | net/ipv4/ip_output.c | skb->sk = sk; |
skb | 486 | net/ipv4/ip_output.c | ip_loopback(dev,skb); |
skb | 495 | net/ipv4/ip_output.c | ip_loopback(dev,skb); |
skb | 504 | net/ipv4/ip_output.c | if(skb->ip_hdr->ttl==0) |
skb | 506 | net/ipv4/ip_output.c | kfree_skb(skb, FREE_READ); |
skb | 512 | net/ipv4/ip_output.c | ip_loopback(dev,skb); |
skb | 523 | net/ipv4/ip_output.c | dev_queue_xmit(skb, dev, sk->priority); |
skb | 527 | net/ipv4/ip_output.c | dev_queue_xmit(skb, dev, SOPRI_NORMAL); |
skb | 536 | net/ipv4/ip_output.c | kfree_skb(skb, FREE_WRITE); |
skb | 649 | net/ipv4/ip_output.c | struct sk_buff *skb=sock_alloc_send_skb(sk, length+15+dev->hard_header_len,0, noblock, &error); |
skb | 650 | net/ipv4/ip_output.c | if(skb==NULL) |
skb | 655 | net/ipv4/ip_output.c | skb->dev=dev; |
skb | 656 | net/ipv4/ip_output.c | skb->protocol = htons(ETH_P_IP); |
skb | 657 | net/ipv4/ip_output.c | skb->free=1; |
skb | 658 | net/ipv4/ip_output.c | skb->when=jiffies; |
skb | 659 | net/ipv4/ip_output.c | skb->sk=sk; |
skb | 660 | net/ipv4/ip_output.c | skb->arp=0; |
skb | 661 | net/ipv4/ip_output.c | skb->saddr=saddr; |
skb | 662 | net/ipv4/ip_output.c | skb->raddr = raddr; |
skb | 663 | net/ipv4/ip_output.c | skb_reserve(skb,(dev->hard_header_len+15)&~15); |
skb | 666 | net/ipv4/ip_output.c | skb->arp=1; |
skb | 667 | net/ipv4/ip_output.c | memcpy(skb_push(skb,dev->hard_header_len),hh->hh_data,dev->hard_header_len); |
skb | 670 | net/ipv4/ip_output.c | skb->arp = 0; |
skb | 678 | net/ipv4/ip_output.c | if(dev->hard_header(skb,dev,ETH_P_IP,NULL,NULL,0)>0) |
skb | 679 | net/ipv4/ip_output.c | skb->arp=1; |
skb | 682 | net/ipv4/ip_output.c | skb->arp=1; |
skb | 683 | net/ipv4/ip_output.c | skb->ip_hdr=iph=(struct iphdr *)skb_put(skb,length); |
skb | 700 | net/ipv4/ip_output.c | ip_options_build(skb, opt, |
skb | 711 | net/ipv4/ip_output.c | if(call_out_firewall(PF_INET, skb, iph)< FW_ACCEPT) |
skb | 713 | net/ipv4/ip_output.c | kfree_skb(skb, FREE_WRITE); |
skb | 721 | net/ipv4/ip_output.c | dev_queue_xmit(skb,dev,sk->priority); |
skb | 725 | net/ipv4/ip_output.c | kfree_skb(skb, FREE_WRITE); |
skb | 800 | net/ipv4/ip_output.c | struct sk_buff * skb; |
skb | 808 | net/ipv4/ip_output.c | skb = sock_alloc_send_skb(sk, fraglen+15, 0, noblock, &error); |
skb | 809 | net/ipv4/ip_output.c | if (skb == NULL) |
skb | 822 | net/ipv4/ip_output.c | skb->dev = dev; |
skb | 823 | net/ipv4/ip_output.c | skb->protocol = htons(ETH_P_IP); |
skb | 824 | net/ipv4/ip_output.c | skb->when = jiffies; |
skb | 825 | net/ipv4/ip_output.c | skb->free = 1; /* dubious, this one */ |
skb | 826 | net/ipv4/ip_output.c | skb->sk = sk; |
skb | 827 | net/ipv4/ip_output.c | skb->arp = 0; |
skb | 828 | net/ipv4/ip_output.c | skb->saddr = saddr; |
skb | 829 | net/ipv4/ip_output.c | skb->raddr = raddr; |
skb | 830 | net/ipv4/ip_output.c | skb_reserve(skb,(dev->hard_header_len+15)&~15); |
skb | 831 | net/ipv4/ip_output.c | data = skb_put(skb, fraglen-dev->hard_header_len); |
skb | 842 | net/ipv4/ip_output.c | skb->arp=1; |
skb | 843 | net/ipv4/ip_output.c | memcpy(skb_push(skb,dev->hard_header_len),hh->hh_data,dev->hard_header_len); |
skb | 846 | net/ipv4/ip_output.c | skb->arp = 0; |
skb | 854 | net/ipv4/ip_output.c | if(dev->hard_header(skb, dev, ETH_P_IP, |
skb | 856 | net/ipv4/ip_output.c | skb->arp=1; |
skb | 863 | net/ipv4/ip_output.c | skb->ip_hdr = iph = (struct iphdr *)data; |
skb | 876 | net/ipv4/ip_output.c | ip_options_build(skb, opt, |
skb | 915 | net/ipv4/ip_output.c | if(!offset && call_out_firewall(PF_INET, skb, iph) < FW_ACCEPT) |
skb | 917 | net/ipv4/ip_output.c | kfree_skb(skb, FREE_WRITE); |
skb | 946 | net/ipv4/ip_output.c | if(skb->daddr==IGMP_ALL_HOSTS || (dev->flags&IFF_ALLMULTI)) |
skb | 947 | net/ipv4/ip_output.c | ip_loopback(dev,skb); |
skb | 955 | net/ipv4/ip_output.c | ip_loopback(dev,skb); |
skb | 968 | net/ipv4/ip_output.c | if(skb->ip_hdr->ttl==0) |
skb | 969 | net/ipv4/ip_output.c | kfree_skb(skb, FREE_READ); |
skb | 980 | net/ipv4/ip_output.c | ip_loopback(dev,skb); |
skb | 988 | net/ipv4/ip_output.c | dev_queue_xmit(skb, dev, sk->priority); |
skb | 999 | net/ipv4/ip_output.c | kfree_skb(skb, FREE_WRITE); |
skb | 1038 | net/ipv4/ip_output.c | struct sk_buff *skb=alloc_skb(sizeof(struct netlink_rtinfo), GFP_ATOMIC); |
skb | 1041 | net/ipv4/ip_output.c | if(skb==NULL) |
skb | 1043 | net/ipv4/ip_output.c | skb->free=1; |
skb | 1044 | net/ipv4/ip_output.c | nrt=(struct netlink_rtinfo *)skb_put(skb, sizeof(struct netlink_rtinfo)); |
skb | 1058 | net/ipv4/ip_output.c | netlink_post(NETLINK_ROUTE, skb); |
skb | 52 | net/ipv4/ipip.c | int ipip_rcv(struct sk_buff *skb, struct device *dev, struct options *opt, |
skb | 68 | net/ipv4/ipip.c | skb_pull(skb, ((struct iphdr *)skb->data)->ihl<<2); |
skb | 74 | net/ipv4/ipip.c | skb->h.iph=(struct iphdr *)skb->data; |
skb | 75 | net/ipv4/ipip.c | skb->ip_hdr=(struct iphdr *)skb->data; |
skb | 76 | net/ipv4/ipip.c | memset(skb->proto_priv, 0, sizeof(struct options)); |
skb | 77 | net/ipv4/ipip.c | if (skb->ip_hdr->ihl > 5) |
skb | 79 | net/ipv4/ipip.c | if (ip_options_compile(NULL, skb)) |
skb | 88 | net/ipv4/ipip.c | if((err=call_in_firewall(PF_INET, skb, skb->ip_hdr))<FW_ACCEPT) |
skb | 91 | net/ipv4/ipip.c | icmp_send(skb,ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0 , dev); |
skb | 92 | net/ipv4/ipip.c | kfree_skb(skb, FREE_READ); |
skb | 108 | net/ipv4/ipip.c | if(ip_forward(skb, dev, 0, daddr)) |
skb | 109 | net/ipv4/ipip.c | kfree_skb(skb, FREE_READ); |
skb | 99 | net/ipv4/ipmr.c | struct sk_buff *skb; |
skb | 140 | net/ipv4/ipmr.c | while((skb=skb_dequeue(&cache->mfc_unresolved))) |
skb | 141 | net/ipv4/ipmr.c | kfree_skb(skb, FREE_WRITE); |
skb | 216 | net/ipv4/ipmr.c | struct sk_buff *skb; |
skb | 244 | net/ipv4/ipmr.c | while((skb=skb_dequeue(&cache->mfc_unresolved))) |
skb | 245 | net/ipv4/ipmr.c | ipmr_forward(skb, skb->protocol); |
skb | 255 | net/ipv4/ipmr.c | struct sk_buff *skb=alloc_skb(128, GFP_ATOMIC); |
skb | 258 | net/ipv4/ipmr.c | if(!skb) |
skb | 261 | net/ipv4/ipmr.c | skb->free=1; |
skb | 267 | net/ipv4/ipmr.c | skb->ip_hdr=(struct iphdr *)skb_put(skb,ihl); |
skb | 268 | net/ipv4/ipmr.c | skb->h.iph=skb->ip_hdr; |
skb | 269 | net/ipv4/ipmr.c | memcpy(skb->data,pkt->data,ihl); |
skb | 270 | net/ipv4/ipmr.c | skb->ip_hdr->protocol = 0; /* Flag to the kernel this is a route add */ |
skb | 276 | net/ipv4/ipmr.c | igmp=(struct igmphdr *)skb_put(skb,sizeof(struct igmphdr)); |
skb | 279 | net/ipv4/ipmr.c | skb->ip_hdr->tot_len=htons(skb->len); /* Fix the length */ |
skb | 284 | net/ipv4/ipmr.c | if(sock_queue_rcv_skb(mroute_socket,skb)<0) |
skb | 286 | net/ipv4/ipmr.c | skb->sk=NULL; |
skb | 287 | net/ipv4/ipmr.c | kfree_skb(skb, FREE_READ); |
skb | 296 | net/ipv4/ipmr.c | static void ipmr_cache_unresolved(struct mfc_cache *cache, vifi_t vifi, struct sk_buff *skb, int is_frag) |
skb | 305 | net/ipv4/ipmr.c | kfree_skb(skb, FREE_WRITE); |
skb | 312 | net/ipv4/ipmr.c | cache->mfc_origin=skb->ip_hdr->saddr; |
skb | 313 | net/ipv4/ipmr.c | cache->mfc_mcastgrp=skb->ip_hdr->daddr; |
skb | 330 | net/ipv4/ipmr.c | ipmr_cache_report(skb); |
skb | 337 | net/ipv4/ipmr.c | kfree_skb(skb, FREE_WRITE); |
skb | 345 | net/ipv4/ipmr.c | skb->protocol=is_frag; |
skb | 346 | net/ipv4/ipmr.c | skb_queue_tail(&cache->mfc_unresolved,skb); |
skb | 702 | net/ipv4/ipmr.c | static void ipmr_queue_xmit(struct sk_buff *skb, struct vif_device *vif, struct device *in_dev, int frag) |
skb | 705 | net/ipv4/ipmr.c | __u32 raddr=skb->raddr; |
skb | 712 | net/ipv4/ipmr.c | vif->bytes_out+=skb->len; |
skb | 713 | net/ipv4/ipmr.c | skb->dev=vif->dev; |
skb | 714 | net/ipv4/ipmr.c | skb->raddr=skb->h.iph->daddr; |
skb | 719 | net/ipv4/ipmr.c | if(vif->dev==NULL || ip_forward(skb, in_dev, frag|IPFWD_MULTICASTING|tunnel, raddr)==-1) |
skb | 720 | net/ipv4/ipmr.c | kfree_skb(skb, FREE_WRITE); |
skb | 727 | net/ipv4/ipmr.c | void ipmr_forward(struct sk_buff *skb, int is_frag) |
skb | 732 | net/ipv4/ipmr.c | int vif=ipmr_vifi_find(skb->dev); |
skb | 735 | net/ipv4/ipmr.c | kfree_skb(skb, FREE_WRITE); |
skb | 740 | net/ipv4/ipmr.c | vif_table[vif].bytes_in+=skb->len; |
skb | 742 | net/ipv4/ipmr.c | cache=ipmr_cache_find(skb->ip_hdr->saddr,skb->ip_hdr->daddr); |
skb | 749 | net/ipv4/ipmr.c | ipmr_cache_unresolved(cache,vif,skb, is_frag); |
skb | 761 | net/ipv4/ipmr.c | if(skb->ip_hdr->ttl > cache->mfc_ttls[ct] && cache->mfc_ttls[ct]>0) |
skb | 769 | net/ipv4/ipmr.c | skb2=skb_copy(skb, GFP_ATOMIC); |
skb | 773 | net/ipv4/ipmr.c | ipmr_queue_xmit(skb2, &vif_table[psend], skb->dev, is_frag); |
skb | 781 | net/ipv4/ipmr.c | kfree_skb(skb, FREE_WRITE); |
skb | 784 | net/ipv4/ipmr.c | ipmr_queue_xmit(skb, &vif_table[psend], skb->dev, is_frag); |
skb | 74 | net/ipv4/packet.c | int packet_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *pt) |
skb | 90 | net/ipv4/packet.c | skb_push(skb,skb->data-skb->mac.raw); |
skb | 96 | net/ipv4/packet.c | skb->dev = dev; |
skb | 103 | net/ipv4/packet.c | if(sock_queue_rcv_skb(sk,skb)<0) |
skb | 105 | net/ipv4/packet.c | skb->sk = NULL; |
skb | 106 | net/ipv4/packet.c | kfree_skb(skb, FREE_READ); |
skb | 126 | net/ipv4/packet.c | struct sk_buff *skb; |
skb | 168 | net/ipv4/packet.c | skb = sock_wmalloc(sk, len, 0, GFP_KERNEL); |
skb | 176 | net/ipv4/packet.c | if (skb == NULL) |
skb | 185 | net/ipv4/packet.c | skb->sk = sk; |
skb | 186 | net/ipv4/packet.c | skb->free = 1; |
skb | 187 | net/ipv4/packet.c | memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len); |
skb | 188 | net/ipv4/packet.c | skb->arp = 1; /* No ARP needs doing on this (complete) frame */ |
skb | 195 | net/ipv4/packet.c | dev_queue_xmit(skb, dev, sk->priority); |
skb | 197 | net/ipv4/packet.c | kfree_skb(skb, FREE_WRITE); |
skb | 383 | net/ipv4/packet.c | struct sk_buff *skb; |
skb | 411 | net/ipv4/packet.c | skb=skb_recv_datagram(sk,flags,noblock,&err); |
skb | 419 | net/ipv4/packet.c | if(skb==NULL) |
skb | 427 | net/ipv4/packet.c | copied = min(len, skb->len); |
skb | 429 | net/ipv4/packet.c | memcpy_toiovec(msg->msg_iov, skb->data, copied); /* We can't use skb_copy_datagram here */ |
skb | 430 | net/ipv4/packet.c | sk->stamp=skb->stamp; |
skb | 438 | net/ipv4/packet.c | saddr->sa_family = skb->dev->type; |
skb | 439 | net/ipv4/packet.c | strncpy(saddr->sa_data,skb->dev->name, 15); |
skb | 447 | net/ipv4/packet.c | skb_free_datagram(skb); |
skb | 194 | net/ipv4/rarp.c | static int rarp_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *pt) |
skb | 199 | net/ipv4/rarp.c | struct arphdr *rarp = (struct arphdr *) skb->data; |
skb | 200 | net/ipv4/rarp.c | unsigned char *rarp_ptr = skb_pull(skb,sizeof(struct arphdr)); |
skb | 212 | net/ipv4/rarp.c | kfree_skb(skb, FREE_READ); |
skb | 221 | net/ipv4/rarp.c | kfree_skb(skb, FREE_READ); |
skb | 239 | net/ipv4/rarp.c | kfree_skb(skb, FREE_READ); |
skb | 275 | net/ipv4/rarp.c | kfree_skb(skb, FREE_READ); |
skb | 119 | net/ipv4/raw.c | int raw_rcv(struct sock *sk, struct sk_buff *skb, struct device *dev, __u32 saddr, __u32 daddr) |
skb | 122 | net/ipv4/raw.c | skb->sk = sk; |
skb | 123 | net/ipv4/raw.c | skb_trim(skb,ntohs(skb->ip_hdr->tot_len)); |
skb | 125 | net/ipv4/raw.c | skb->h.raw = (unsigned char *) skb->ip_hdr; |
skb | 126 | net/ipv4/raw.c | skb->dev = dev; |
skb | 127 | net/ipv4/raw.c | skb->saddr = daddr; |
skb | 128 | net/ipv4/raw.c | skb->daddr = saddr; |
skb | 137 | net/ipv4/raw.c | skb->ip_hdr->tot_len=ntohs(skb->ip_hdr->tot_len-4*skb->ip_hdr->ihl); |
skb | 142 | net/ipv4/raw.c | if(sock_queue_rcv_skb(sk,skb)<0) |
skb | 145 | net/ipv4/raw.c | skb->sk=NULL; |
skb | 146 | net/ipv4/raw.c | kfree_skb(skb, FREE_READ); |
skb | 317 | net/ipv4/raw.c | struct sk_buff *skb; |
skb | 330 | net/ipv4/raw.c | skb=skb_recv_datagram(sk,flags,noblock,&err); |
skb | 331 | net/ipv4/raw.c | if(skb==NULL) |
skb | 334 | net/ipv4/raw.c | copied = min(len, skb->len); |
skb | 336 | net/ipv4/raw.c | skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); |
skb | 337 | net/ipv4/raw.c | sk->stamp=skb->stamp; |
skb | 343 | net/ipv4/raw.c | sin->sin_addr.s_addr = skb->daddr; |
skb | 345 | net/ipv4/raw.c | skb_free_datagram(skb); |
skb | 460 | net/ipv4/tcp.c | struct sk_buff *skb; |
skb | 464 | net/ipv4/tcp.c | skb=tcp_find_established(s); |
skb | 465 | net/ipv4/tcp.c | if(skb!=NULL) |
skb | 466 | net/ipv4/tcp.c | skb_unlink(skb); /* Take it off the queue */ |
skb | 468 | net/ipv4/tcp.c | return skb; |
skb | 479 | net/ipv4/tcp.c | struct sk_buff *skb; |
skb | 481 | net/ipv4/tcp.c | while ((skb = skb_dequeue(&sk->receive_queue)) != NULL) |
skb | 483 | net/ipv4/tcp.c | skb->sk->dead=1; |
skb | 484 | net/ipv4/tcp.c | tcp_close(skb->sk, 0); |
skb | 485 | net/ipv4/tcp.c | kfree_skb(skb, FREE_READ); |
skb | 604 | net/ipv4/tcp.c | struct sk_buff *skb; |
skb | 613 | net/ipv4/tcp.c | if (sk == NULL || (skb = skb_peek(&sk->receive_queue)) == NULL) |
skb | 630 | net/ipv4/tcp.c | if (before(counted, skb->seq)) /* Found a hole so stops here */ |
skb | 632 | net/ipv4/tcp.c | sum = skb->len - (counted - skb->seq); /* Length - header but start from where we are up to (avoid overlaps) */ |
skb | 633 | net/ipv4/tcp.c | if (skb->h.th->syn) |
skb | 638 | net/ipv4/tcp.c | if (skb->h.th->syn) |
skb | 658 | net/ipv4/tcp.c | if (skb->h.th->urg) |
skb | 660 | net/ipv4/tcp.c | if (amount && skb->h.th->psh) break; |
skb | 661 | net/ipv4/tcp.c | skb = skb->next; |
skb | 663 | net/ipv4/tcp.c | while(skb != (struct sk_buff *)&sk->receive_queue); |
skb | 807 | net/ipv4/tcp.c | unsigned long daddr, int len, struct sk_buff *skb) |
skb | 814 | net/ipv4/tcp.c | csum_partial((char *)th,sizeof(*th),skb->csum)); |
skb | 866 | net/ipv4/tcp.c | struct sk_buff *skb; |
skb | 1018 | net/ipv4/tcp.c | if ((skb = tcp_dequeue_partial(sk)) != NULL) |
skb | 1022 | net/ipv4/tcp.c | tcp_size = skb->tail - (unsigned char *)(skb->h.th + 1); |
skb | 1034 | net/ipv4/tcp.c | memcpy_fromfs(skb_put(skb,copy), from, copy); |
skb | 1035 | net/ipv4/tcp.c | skb->csum = csum_partial(skb->tail - tcp_size, tcp_size, 0); |
skb | 1043 | net/ipv4/tcp.c | tcp_send_skb(sk, skb); |
skb | 1045 | net/ipv4/tcp.c | tcp_enqueue_partial(skb, sk); |
skb | 1088 | net/ipv4/tcp.c | skb = sock_wmalloc(sk, sk->mtu + 128 + prot->max_header + 15, 0, GFP_KERNEL); |
skb | 1090 | net/ipv4/tcp.c | send_tmp = skb; |
skb | 1100 | net/ipv4/tcp.c | skb = sock_wmalloc(sk, copy + prot->max_header + 15 , 0, GFP_KERNEL); |
skb | 1108 | net/ipv4/tcp.c | if (skb == NULL) |
skb | 1140 | net/ipv4/tcp.c | skb->sk = sk; |
skb | 1141 | net/ipv4/tcp.c | skb->free = 0; |
skb | 1142 | net/ipv4/tcp.c | skb->localroute = sk->localroute|(flags&MSG_DONTROUTE); |
skb | 1149 | net/ipv4/tcp.c | tmp = prot->build_header(skb, sk->saddr, sk->daddr, &dev, |
skb | 1150 | net/ipv4/tcp.c | IPPROTO_TCP, sk->opt, skb->truesize,sk->ip_tos,sk->ip_ttl,&sk->ip_route_cache); |
skb | 1153 | net/ipv4/tcp.c | sock_wfree(sk, skb); |
skb | 1160 | net/ipv4/tcp.c | skb->ip_hdr->frag_off |= htons(IP_DF); |
skb | 1162 | net/ipv4/tcp.c | skb->dev = dev; |
skb | 1163 | net/ipv4/tcp.c | skb->h.th =(struct tcphdr *)skb_put(skb,sizeof(struct tcphdr)); |
skb | 1164 | net/ipv4/tcp.c | tmp = tcp_build_header(skb->h.th, sk, seglen-copy); |
skb | 1167 | net/ipv4/tcp.c | sock_wfree(sk, skb); |
skb | 1176 | net/ipv4/tcp.c | skb->h.th->urg = 1; |
skb | 1177 | net/ipv4/tcp.c | skb->h.th->urg_ptr = ntohs(copy); |
skb | 1180 | net/ipv4/tcp.c | skb->csum = csum_partial_copy_fromuser(from, |
skb | 1181 | net/ipv4/tcp.c | skb_put(skb,copy), copy, 0); |
skb | 1187 | net/ipv4/tcp.c | skb->free = 0; |
skb | 1195 | net/ipv4/tcp.c | tcp_send_skb(sk, skb); |
skb | 1367 | net/ipv4/tcp.c | static inline void tcp_eat_skb(struct sock *sk, struct sk_buff * skb) |
skb | 1370 | net/ipv4/tcp.c | skb->sk = sk; |
skb | 1371 | net/ipv4/tcp.c | __skb_unlink(skb, &sk->receive_queue); |
skb | 1372 | net/ipv4/tcp.c | kfree_skb(skb, FREE_READ); |
skb | 1384 | net/ipv4/tcp.c | struct sk_buff *skb; |
skb | 1391 | net/ipv4/tcp.c | while ((skb=skb_peek(&sk->receive_queue)) != NULL) { |
skb | 1392 | net/ipv4/tcp.c | if (!skb->used || skb->users) |
skb | 1394 | net/ipv4/tcp.c | tcp_eat_skb(sk, skb); |
skb | 1488 | net/ipv4/tcp.c | struct sk_buff * skb; |
skb | 1504 | net/ipv4/tcp.c | skb = skb_peek(&sk->receive_queue); |
skb | 1507 | net/ipv4/tcp.c | if (!skb) |
skb | 1509 | net/ipv4/tcp.c | if (before(*seq, skb->seq)) |
skb | 1511 | net/ipv4/tcp.c | offset = *seq - skb->seq; |
skb | 1512 | net/ipv4/tcp.c | if (skb->h.th->syn) |
skb | 1514 | net/ipv4/tcp.c | if (offset < skb->len) |
skb | 1516 | net/ipv4/tcp.c | if (skb->h.th->fin) |
skb | 1519 | net/ipv4/tcp.c | skb->used = 1; |
skb | 1520 | net/ipv4/tcp.c | skb = skb->next; |
skb | 1522 | net/ipv4/tcp.c | while (skb != (struct sk_buff *)&sk->receive_queue); |
skb | 1578 | net/ipv4/tcp.c | skb->users++; |
skb | 1584 | net/ipv4/tcp.c | used = skb->len - offset; |
skb | 1623 | net/ipv4/tcp.c | memcpy_toiovec(msg->msg_iov,((unsigned char *)skb->h.th) + |
skb | 1624 | net/ipv4/tcp.c | skb->h.th->doff*4 + offset, used); |
skb | 1634 | net/ipv4/tcp.c | skb->users --; |
skb | 1638 | net/ipv4/tcp.c | if (used + offset < skb->len) |
skb | 1645 | net/ipv4/tcp.c | if (skb->h.th->fin) |
skb | 1649 | net/ipv4/tcp.c | skb->used = 1; |
skb | 1650 | net/ipv4/tcp.c | if (!skb->users) |
skb | 1651 | net/ipv4/tcp.c | tcp_eat_skb(sk, skb); |
skb | 1663 | net/ipv4/tcp.c | skb->used = 1; |
skb | 1830 | net/ipv4/tcp.c | struct sk_buff *skb; |
skb | 1838 | net/ipv4/tcp.c | while((skb=skb_dequeue(&sk->receive_queue))!=NULL) |
skb | 1839 | net/ipv4/tcp.c | kfree_skb(skb, FREE_READ); |
skb | 1876 | net/ipv4/tcp.c | struct sk_buff *skb; |
skb | 1893 | net/ipv4/tcp.c | while((skb = tcp_dequeue_established(sk)) == NULL) |
skb | 1919 | net/ipv4/tcp.c | newsk = skb->sk; |
skb | 1921 | net/ipv4/tcp.c | kfree_skb(skb, FREE_READ); |
skb | 204 | net/ipv4/tcp_input.c | static int tcp_reset(struct sock *sk, struct sk_buff *skb) |
skb | 235 | net/ipv4/tcp_input.c | kfree_skb(skb, FREE_READ); |
skb | 313 | net/ipv4/tcp_input.c | static void tcp_conn_request(struct sock *sk, struct sk_buff *skb, |
skb | 320 | net/ipv4/tcp_input.c | th = skb->h.th; |
skb | 333 | net/ipv4/tcp_input.c | kfree_skb(skb, FREE_READ); |
skb | 348 | net/ipv4/tcp_input.c | kfree_skb(skb, FREE_READ); |
skb | 365 | net/ipv4/tcp_input.c | kfree_skb(skb, FREE_READ); |
skb | 379 | net/ipv4/tcp_input.c | kfree_skb(skb, FREE_READ); |
skb | 382 | net/ipv4/tcp_input.c | if (ip_options_echo(sk->opt, opt, daddr, saddr, skb)) |
skb | 387 | net/ipv4/tcp_input.c | kfree_skb(skb, FREE_READ); |
skb | 419 | net/ipv4/tcp_input.c | newsk->acked_seq = skb->seq+1; |
skb | 420 | net/ipv4/tcp_input.c | newsk->lastwin_seq = skb->seq+1; |
skb | 422 | net/ipv4/tcp_input.c | newsk->copied_seq = skb->seq+1; |
skb | 423 | net/ipv4/tcp_input.c | newsk->fin_seq = skb->seq; |
skb | 440 | net/ipv4/tcp_input.c | newsk->dummy_th.source = skb->h.th->dest; |
skb | 441 | net/ipv4/tcp_input.c | newsk->dummy_th.dest = skb->h.th->source; |
skb | 452 | net/ipv4/tcp_input.c | newsk->acked_seq = skb->seq + 1; |
skb | 453 | net/ipv4/tcp_input.c | newsk->copied_seq = skb->seq + 1; |
skb | 461 | net/ipv4/tcp_input.c | newsk->ip_tos=skb->ip_hdr->tos; |
skb | 510 | net/ipv4/tcp_input.c | tcp_options(newsk,skb->h.th); |
skb | 513 | net/ipv4/tcp_input.c | tcp_send_synack(newsk, sk, skb); |
skb | 529 | net/ipv4/tcp_input.c | struct sk_buff *skb; |
skb | 544 | net/ipv4/tcp_input.c | skb = skb2; |
skb | 545 | net/ipv4/tcp_input.c | skb2 = skb->link3; |
skb | 546 | net/ipv4/tcp_input.c | skb->link3 = NULL; |
skb | 547 | net/ipv4/tcp_input.c | if (after(skb->end_seq, window_seq)) |
skb | 552 | net/ipv4/tcp_input.c | if (skb->next != NULL) |
skb | 554 | net/ipv4/tcp_input.c | skb_unlink(skb); |
skb | 558 | net/ipv4/tcp_input.c | skb_queue_head(&sk->write_queue,skb); |
skb | 560 | net/ipv4/tcp_input.c | skb_append(wskb,skb); |
skb | 561 | net/ipv4/tcp_input.c | wskb = skb; |
skb | 567 | net/ipv4/tcp_input.c | sk->send_head = skb; |
skb | 568 | net/ipv4/tcp_input.c | sk->send_tail = skb; |
skb | 572 | net/ipv4/tcp_input.c | sk->send_tail->link3 = skb; |
skb | 573 | net/ipv4/tcp_input.c | sk->send_tail = skb; |
skb | 575 | net/ipv4/tcp_input.c | skb->link3 = NULL; |
skb | 1103 | net/ipv4/tcp_input.c | static int tcp_fin(struct sk_buff *skb, struct sock *sk, struct tcphdr *th) |
skb | 1105 | net/ipv4/tcp_input.c | sk->fin_seq = skb->end_seq; |
skb | 1190 | net/ipv4/tcp_input.c | static int tcp_data(struct sk_buff *skb, struct sock *sk, |
skb | 1198 | net/ipv4/tcp_input.c | th = skb->h.th; |
skb | 1199 | net/ipv4/tcp_input.c | skb_pull(skb,th->doff*4); |
skb | 1200 | net/ipv4/tcp_input.c | skb_trim(skb,len-(th->doff*4)); |
skb | 1207 | net/ipv4/tcp_input.c | sk->bytes_rcv += skb->len; |
skb | 1209 | net/ipv4/tcp_input.c | if (skb->len == 0 && !th->fin) |
skb | 1217 | net/ipv4/tcp_input.c | kfree_skb(skb, FREE_READ); |
skb | 1236 | net/ipv4/tcp_input.c | if(skb->len) /* We don't care if it's just an ack or |
skb | 1239 | net/ipv4/tcp_input.c | new_seq = skb->seq + skb->len + th->syn; /* Right edge of _data_ part of frame */ |
skb | 1262 | net/ipv4/tcp_input.c | tcp_send_reset(sk->saddr, sk->daddr, skb->h.th, |
skb | 1263 | net/ipv4/tcp_input.c | sk->prot, NULL, skb->dev, sk->ip_tos, sk->ip_ttl); |
skb | 1269 | net/ipv4/tcp_input.c | kfree_skb(skb, FREE_READ); |
skb | 1292 | net/ipv4/tcp_input.c | skb_queue_head(&sk->receive_queue,skb); |
skb | 1303 | net/ipv4/tcp_input.c | printk("skb->seq = %d\n",skb->seq); |
skb | 1316 | net/ipv4/tcp_input.c | if (skb->seq==skb1->seq && skb->len>=skb1->len) |
skb | 1318 | net/ipv4/tcp_input.c | skb_append(skb1,skb); |
skb | 1330 | net/ipv4/tcp_input.c | if (after(skb->seq+1, skb1->seq)) |
skb | 1332 | net/ipv4/tcp_input.c | skb_append(skb1,skb); |
skb | 1341 | net/ipv4/tcp_input.c | skb_queue_head(&sk->receive_queue, skb); |
skb | 1363 | net/ipv4/tcp_input.c | if ((!dup_dumped && (skb1 == NULL || skb1->acked)) || before(skb->seq, sk->acked_seq+1)) |
skb | 1365 | net/ipv4/tcp_input.c | if (before(skb->seq, sk->acked_seq+1)) |
skb | 1368 | net/ipv4/tcp_input.c | if (after(skb->end_seq, sk->acked_seq)) |
skb | 1369 | net/ipv4/tcp_input.c | sk->acked_seq = skb->end_seq; |
skb | 1371 | net/ipv4/tcp_input.c | skb->acked = 1; |
skb | 1378 | net/ipv4/tcp_input.c | if (skb->h.th->fin) |
skb | 1380 | net/ipv4/tcp_input.c | tcp_fin(skb,sk,skb->h.th); |
skb | 1383 | net/ipv4/tcp_input.c | for(skb2 = skb->next; |
skb | 1399 | net/ipv4/tcp_input.c | tcp_fin(skb,sk,skb->h.th); |
skb | 1446 | net/ipv4/tcp_input.c | if (!skb->acked) |
skb | 1574 | net/ipv4/tcp_input.c | int tcp_rcv(struct sk_buff *skb, struct device *dev, struct options *opt, |
skb | 1588 | net/ipv4/tcp_input.c | th = skb->h.th; |
skb | 1589 | net/ipv4/tcp_input.c | sk = skb->sk; |
skb | 1592 | net/ipv4/tcp_input.c | if (skb->pkt_type!=PACKET_HOST) |
skb | 1599 | net/ipv4/tcp_input.c | skb_pull(skb, skb->h.raw-skb->data); |
skb | 1604 | net/ipv4/tcp_input.c | switch (skb->ip_summed) |
skb | 1607 | net/ipv4/tcp_input.c | skb->csum = csum_partial((char *)th, len, 0); |
skb | 1609 | net/ipv4/tcp_input.c | if (tcp_check(th, len, saddr, daddr, skb->csum)) |
skb | 1617 | net/ipv4/tcp_input.c | skb->sk = sk; |
skb | 1618 | net/ipv4/tcp_input.c | skb->seq = ntohl(th->seq); |
skb | 1619 | net/ipv4/tcp_input.c | skb->end_seq = skb->seq + th->syn + th->fin + len - th->doff*4; |
skb | 1620 | net/ipv4/tcp_input.c | skb->ack_seq = ntohl(th->ack_seq); |
skb | 1622 | net/ipv4/tcp_input.c | skb->acked = 0; |
skb | 1623 | net/ipv4/tcp_input.c | skb->used = 0; |
skb | 1624 | net/ipv4/tcp_input.c | skb->free = 1; |
skb | 1625 | net/ipv4/tcp_input.c | skb->saddr = daddr; |
skb | 1626 | net/ipv4/tcp_input.c | skb->daddr = saddr; |
skb | 1632 | net/ipv4/tcp_input.c | skb_queue_tail(&sk->back_log, skb); |
skb | 1663 | net/ipv4/tcp_input.c | skb->sk=sk; |
skb | 1664 | net/ipv4/tcp_input.c | sk->rmem_alloc += skb->truesize; |
skb | 1698 | net/ipv4/tcp_input.c | kfree_skb(skb, FREE_READ); |
skb | 1707 | net/ipv4/tcp_input.c | tcp_conn_request(sk, skb, daddr, saddr, opt, dev, tcp_init_seq()); |
skb | 1729 | net/ipv4/tcp_input.c | if (sk->state == TCP_SYN_RECV && th->syn && skb->seq+1 == sk->acked_seq) |
skb | 1731 | net/ipv4/tcp_input.c | kfree_skb(skb, FREE_READ); |
skb | 1748 | net/ipv4/tcp_input.c | if(!tcp_ack(sk,th,skb->ack_seq,len)) |
skb | 1755 | net/ipv4/tcp_input.c | kfree_skb(skb, FREE_READ); |
skb | 1760 | net/ipv4/tcp_input.c | return tcp_reset(sk,skb); |
skb | 1768 | net/ipv4/tcp_input.c | kfree_skb(skb, FREE_READ); |
skb | 1777 | net/ipv4/tcp_input.c | sk->acked_seq = skb->seq+1; |
skb | 1778 | net/ipv4/tcp_input.c | sk->lastwin_seq = skb->seq+1; |
skb | 1779 | net/ipv4/tcp_input.c | sk->fin_seq = skb->seq; |
skb | 1808 | net/ipv4/tcp_input.c | return tcp_reset(sk,skb); |
skb | 1818 | net/ipv4/tcp_input.c | kfree_skb(skb, FREE_READ); |
skb | 1837 | net/ipv4/tcp_input.c | after(skb->seq, sk->acked_seq) && !th->rst) |
skb | 1843 | net/ipv4/tcp_input.c | sk->rmem_alloc -= skb->truesize; |
skb | 1844 | net/ipv4/tcp_input.c | skb->sk = NULL; |
skb | 1853 | net/ipv4/tcp_input.c | skb->sk = sk; |
skb | 1854 | net/ipv4/tcp_input.c | sk->rmem_alloc += skb->truesize; |
skb | 1855 | net/ipv4/tcp_input.c | tcp_conn_request(sk, skb, daddr, saddr,opt, dev,seq+128000); |
skb | 1859 | net/ipv4/tcp_input.c | kfree_skb(skb, FREE_READ); |
skb | 1871 | net/ipv4/tcp_input.c | if (!tcp_sequence(sk, skb->seq, skb->end_seq)) |
skb | 1874 | net/ipv4/tcp_input.c | kfree_skb(skb, FREE_READ); |
skb | 1880 | net/ipv4/tcp_input.c | return tcp_reset(sk,skb); |
skb | 1888 | net/ipv4/tcp_input.c | tcp_send_reset(daddr,saddr,th, &tcp_prot, opt, dev, skb->ip_hdr->tos, 255); |
skb | 1889 | net/ipv4/tcp_input.c | return tcp_reset(sk,skb); |
skb | 1899 | net/ipv4/tcp_input.c | if(th->ack && !tcp_ack(sk,th,skb->ack_seq,len)) |
skb | 1909 | net/ipv4/tcp_input.c | kfree_skb(skb, FREE_READ); |
skb | 1924 | net/ipv4/tcp_input.c | kfree_skb(skb, FREE_READ); |
skb | 1940 | net/ipv4/tcp_input.c | if(tcp_data(skb,sk, saddr, len)) |
skb | 1941 | net/ipv4/tcp_input.c | kfree_skb(skb, FREE_READ); |
skb | 1954 | net/ipv4/tcp_input.c | tcp_send_reset(daddr, saddr, th, &tcp_prot, opt,dev,skb->ip_hdr->tos,255); |
skb | 1960 | net/ipv4/tcp_input.c | skb->sk = NULL; |
skb | 1961 | net/ipv4/tcp_input.c | kfree_skb(skb, FREE_READ); |
skb | 31 | net/ipv4/tcp_output.c | void tcp_send_skb(struct sock *sk, struct sk_buff *skb) |
skb | 34 | net/ipv4/tcp_output.c | struct tcphdr * th = skb->h.th; |
skb | 40 | net/ipv4/tcp_output.c | size = skb->len - ((unsigned char *) th - skb->data); |
skb | 46 | net/ipv4/tcp_output.c | if (size < sizeof(struct tcphdr) || size > skb->len) |
skb | 49 | net/ipv4/tcp_output.c | skb, skb->data, th, skb->len); |
skb | 50 | net/ipv4/tcp_output.c | kfree_skb(skb, FREE_WRITE); |
skb | 65 | net/ipv4/tcp_output.c | kfree_skb(skb,FREE_WRITE); |
skb | 75 | net/ipv4/tcp_output.c | skb->seq = ntohl(th->seq); |
skb | 76 | net/ipv4/tcp_output.c | skb->end_seq = skb->seq + size - 4*th->doff; |
skb | 86 | net/ipv4/tcp_output.c | if (after(skb->end_seq, sk->window_seq) || |
skb | 93 | net/ipv4/tcp_output.c | if (skb->next != NULL) |
skb | 96 | net/ipv4/tcp_output.c | skb_unlink(skb); |
skb | 98 | net/ipv4/tcp_output.c | skb_queue_tail(&sk->write_queue, skb); |
skb | 113 | net/ipv4/tcp_output.c | tcp_send_check(th, sk->saddr, sk->daddr, size, skb); |
skb | 123 | net/ipv4/tcp_output.c | sk->prot->queue_xmit(sk, skb->dev, skb, 0); |
skb | 150 | net/ipv4/tcp_output.c | struct sk_buff * skb; |
skb | 155 | net/ipv4/tcp_output.c | skb = sk->partial; |
skb | 156 | net/ipv4/tcp_output.c | if (skb) { |
skb | 161 | net/ipv4/tcp_output.c | return skb; |
skb | 170 | net/ipv4/tcp_output.c | struct sk_buff *skb; |
skb | 174 | net/ipv4/tcp_output.c | while ((skb = tcp_dequeue_partial(sk)) != NULL) |
skb | 175 | net/ipv4/tcp_output.c | tcp_send_skb(sk, skb); |
skb | 182 | net/ipv4/tcp_output.c | void tcp_enqueue_partial(struct sk_buff * skb, struct sock * sk) |
skb | 192 | net/ipv4/tcp_output.c | sk->partial = skb; |
skb | 214 | net/ipv4/tcp_output.c | struct sk_buff *skb; |
skb | 232 | net/ipv4/tcp_output.c | while((skb = skb_peek(&sk->write_queue)) != NULL && |
skb | 233 | net/ipv4/tcp_output.c | before(skb->end_seq, sk->window_seq + 1) && |
skb | 236 | net/ipv4/tcp_output.c | before(skb->end_seq, sk->rcv_ack_seq + 1)) |
skb | 239 | net/ipv4/tcp_output.c | IS_SKB(skb); |
skb | 240 | net/ipv4/tcp_output.c | skb_unlink(skb); |
skb | 246 | net/ipv4/tcp_output.c | if (before(skb->end_seq, sk->rcv_ack_seq +1)) |
skb | 254 | net/ipv4/tcp_output.c | kfree_skb(skb, FREE_WRITE); |
skb | 270 | net/ipv4/tcp_output.c | iph = skb->ip_hdr; |
skb | 272 | net/ipv4/tcp_output.c | size = skb->len - (((unsigned char *) th) - skb->data); |
skb | 284 | net/ipv4/tcp_output.c | tcp_send_check(th, sk->saddr, sk->daddr, size, skb); |
skb | 286 | net/ipv4/tcp_output.c | sk->sent_seq = skb->end_seq; |
skb | 292 | net/ipv4/tcp_output.c | sk->prot->queue_xmit(sk, skb->dev, skb, skb->free); |
skb | 315 | net/ipv4/tcp_output.c | struct sk_buff * skb; |
skb | 322 | net/ipv4/tcp_output.c | skb = sk->send_head; |
skb | 324 | net/ipv4/tcp_output.c | while (skb != NULL) |
skb | 330 | net/ipv4/tcp_output.c | dev = skb->dev; |
skb | 331 | net/ipv4/tcp_output.c | IS_SKB(skb); |
skb | 332 | net/ipv4/tcp_output.c | skb->when = jiffies; |
skb | 344 | net/ipv4/tcp_output.c | if (skb_device_locked(skb)) |
skb | 351 | net/ipv4/tcp_output.c | skb_pull(skb,((unsigned char *)skb->ip_hdr)-skb->data); |
skb | 362 | net/ipv4/tcp_output.c | iph = (struct iphdr *)skb->data; |
skb | 378 | net/ipv4/tcp_output.c | struct options * opt = (struct options*)skb->proto_priv; |
skb | 379 | net/ipv4/tcp_output.c | rt = ip_check_route(&sk->ip_route_cache, opt->srr?opt->faddr:iph->daddr, skb->localroute); |
skb | 391 | net/ipv4/tcp_output.c | if(skb->sk) |
skb | 393 | net/ipv4/tcp_output.c | skb->sk->err_soft=ENETUNREACH; |
skb | 394 | net/ipv4/tcp_output.c | skb->sk->error_report(skb->sk); |
skb | 400 | net/ipv4/tcp_output.c | skb->raddr=rt->rt_gateway; |
skb | 401 | net/ipv4/tcp_output.c | skb->dev=dev; |
skb | 402 | net/ipv4/tcp_output.c | skb->arp=1; |
skb | 405 | net/ipv4/tcp_output.c | memcpy(skb_push(skb,dev->hard_header_len),rt->rt_hh->hh_data,dev->hard_header_len); |
skb | 408 | net/ipv4/tcp_output.c | skb->arp = 0; |
skb | 416 | net/ipv4/tcp_output.c | if(dev->hard_header(skb, dev, ETH_P_IP, NULL, NULL, skb->len)<0) |
skb | 417 | net/ipv4/tcp_output.c | skb->arp=0; |
skb | 435 | net/ipv4/tcp_output.c | tcp_send_check(th, sk->saddr, sk->daddr, size, skb); |
skb | 451 | net/ipv4/tcp_output.c | if (sk && !skb_device_locked(skb)) |
skb | 454 | net/ipv4/tcp_output.c | skb_unlink(skb); |
skb | 457 | net/ipv4/tcp_output.c | dev_queue_xmit(skb, dev, sk->priority); |
skb | 484 | net/ipv4/tcp_output.c | skb = skb->link3; |
skb | 664 | net/ipv4/tcp_output.c | void tcp_send_synack(struct sock * newsk, struct sock * sk, struct sk_buff * skb) |
skb | 680 | net/ipv4/tcp_output.c | kfree_skb(skb, FREE_READ); |
skb | 707 | net/ipv4/tcp_output.c | skb->sk = sk; |
skb | 708 | net/ipv4/tcp_output.c | kfree_skb(skb, FREE_READ); |
skb | 715 | net/ipv4/tcp_output.c | memcpy(t1, skb->h.th, sizeof(*t1)); |
skb | 721 | net/ipv4/tcp_output.c | t1->dest = skb->h.th->source; |
skb | 742 | net/ipv4/tcp_output.c | skb->sk = newsk; |
skb | 748 | net/ipv4/tcp_output.c | sk->rmem_alloc -= skb->truesize; |
skb | 749 | net/ipv4/tcp_output.c | newsk->rmem_alloc += skb->truesize; |
skb | 751 | net/ipv4/tcp_output.c | skb_queue_tail(&sk->receive_queue,skb); |
skb | 868 | net/ipv4/tcp_output.c | struct sk_buff *buff,*skb; |
skb | 892 | net/ipv4/tcp_output.c | (skb=skb_peek(&sk->write_queue))) |
skb | 918 | net/ipv4/tcp_output.c | iph = (struct iphdr *)skb->ip_hdr; |
skb | 235 | net/ipv4/tcp_timer.c | struct sk_buff *skb; |
skb | 240 | net/ipv4/tcp_timer.c | skb = sk->send_head; |
skb | 241 | net/ipv4/tcp_timer.c | if (!skb) |
skb | 253 | net/ipv4/tcp_timer.c | if (jiffies < skb->when + sk->rto) |
skb | 257 | net/ipv4/tcp_timer.c | tcp_reset_xmit_timer (sk, TIME_WRITE, skb->when + sk->rto - jiffies); |
skb | 138 | net/ipv4/udp.c | static int udp_deliver(struct sock *sk, struct udphdr *uh, struct sk_buff *skb, struct device *dev, long saddr, long daddr, int len); |
skb | 465 | net/ipv4/udp.c | struct sk_buff *skb; |
skb | 470 | net/ipv4/udp.c | skb = skb_peek(&sk->receive_queue); |
skb | 471 | net/ipv4/udp.c | if (skb != NULL) { |
skb | 477 | net/ipv4/udp.c | amount = skb->len-sizeof(struct udphdr); |
skb | 504 | net/ipv4/udp.c | struct sk_buff *skb; |
skb | 520 | net/ipv4/udp.c | skb=skb_recv_datagram(sk,flags,noblock,&er); |
skb | 521 | net/ipv4/udp.c | if(skb==NULL) |
skb | 524 | net/ipv4/udp.c | truesize = skb->len - sizeof(struct udphdr); |
skb | 531 | net/ipv4/udp.c | skb_copy_datagram_iovec(skb,sizeof(struct udphdr),msg->msg_iov,copied); |
skb | 532 | net/ipv4/udp.c | sk->stamp=skb->stamp; |
skb | 538 | net/ipv4/udp.c | sin->sin_port = skb->h.uh->source; |
skb | 539 | net/ipv4/udp.c | sin->sin_addr.s_addr = skb->daddr; |
skb | 542 | net/ipv4/udp.c | skb_free_datagram(skb); |
skb | 594 | net/ipv4/udp.c | int udp_rcv(struct sk_buff *skb, struct device *dev, struct options *opt, |
skb | 610 | net/ipv4/udp.c | uh = (struct udphdr *) skb->h.uh; |
skb | 624 | net/ipv4/udp.c | kfree_skb(skb, FREE_WRITE); |
skb | 636 | net/ipv4/udp.c | ( (skb->ip_summed == CHECKSUM_HW) && udp_check(uh, len, saddr, daddr, skb->csum ) ) || |
skb | 637 | net/ipv4/udp.c | ( (skb->ip_summed == CHECKSUM_NONE) && udp_check(uh, len, saddr, daddr,csum_partial((char*)uh, len, 0))) |
skb | 653 | net/ipv4/udp.c | kfree_skb(skb, FREE_WRITE); |
skb | 677 | net/ipv4/udp.c | skb1=skb_clone(skb,GFP_ATOMIC); |
skb | 679 | net/ipv4/udp.c | skb1=skb; |
skb | 687 | net/ipv4/udp.c | kfree_skb(skb, FREE_READ); |
skb | 708 | net/ipv4/udp.c | icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0, dev); |
skb | 714 | net/ipv4/udp.c | skb->sk = NULL; |
skb | 715 | net/ipv4/udp.c | kfree_skb(skb, FREE_WRITE); |
skb | 718 | net/ipv4/udp.c | return udp_deliver(sk,uh,skb,dev, saddr, daddr, len); |
skb | 721 | net/ipv4/udp.c | static int udp_deliver(struct sock *sk, struct udphdr *uh, struct sk_buff *skb, struct device *dev, long saddr, long daddr, int len) |
skb | 723 | net/ipv4/udp.c | skb->sk = sk; |
skb | 724 | net/ipv4/udp.c | skb->dev = dev; |
skb | 725 | net/ipv4/udp.c | skb_trim(skb,len); |
skb | 731 | net/ipv4/udp.c | skb->daddr = saddr; |
skb | 732 | net/ipv4/udp.c | skb->saddr = daddr; |
skb | 742 | net/ipv4/udp.c | if (sock_queue_rcv_skb(sk,skb)<0) |
skb | 747 | net/ipv4/udp.c | skb->sk = NULL; |
skb | 748 | net/ipv4/udp.c | kfree_skb(skb, FREE_WRITE); |
skb | 180 | net/ipx/af_ipx.c | struct sk_buff *skb; |
skb | 183 | net/ipx/af_ipx.c | while((skb=skb_dequeue(&sk->receive_queue))!=NULL) { |
skb | 184 | net/ipx/af_ipx.c | kfree_skb(skb,FREE_READ); |
skb | 348 | net/ipx/af_ipx.c | static int ipxitf_def_skb_handler(struct sock *sock, struct sk_buff *skb) |
skb | 352 | net/ipx/af_ipx.c | if((retval = sock_queue_rcv_skb(sock, skb))<0) |
skb | 358 | net/ipx/af_ipx.c | kfree_skb(skb,FREE_WRITE); |
skb | 369 | net/ipx/af_ipx.c | ipxitf_demux_socket(ipx_interface *intrfc, struct sk_buff *skb, int copy) |
skb | 371 | net/ipx/af_ipx.c | ipx_packet *ipx = (ipx_packet *)(skb->h.raw); |
skb | 391 | net/ipx/af_ipx.c | skb1 = skb_clone(skb, GFP_ATOMIC); |
skb | 403 | net/ipx/af_ipx.c | skb1 = skb; |
skb | 425 | net/ipx/af_ipx.c | kfree_skb(skb, FREE_WRITE); |
skb | 433 | net/ipx/af_ipx.c | ipxitf_demux_socket(ipx_interface *intrfc, struct sk_buff *skb, int copy) |
skb | 435 | net/ipx/af_ipx.c | ipx_packet *ipx = (ipx_packet *)(skb->h.raw); |
skb | 478 | net/ipx/af_ipx.c | kfree_skb(skb,FREE_WRITE); |
skb | 492 | net/ipx/af_ipx.c | skb1 = skb_clone(skb, GFP_ATOMIC); |
skb | 498 | net/ipx/af_ipx.c | skb1 = skb; |
skb | 531 | net/ipx/af_ipx.c | ipxitf_adjust_skbuff(ipx_interface *intrfc, struct sk_buff *skb) |
skb | 534 | net/ipx/af_ipx.c | int in_offset = skb->h.raw - skb->head; |
skb | 540 | net/ipx/af_ipx.c | skb->arp = skb->free = 1; |
skb | 541 | net/ipx/af_ipx.c | return skb; |
skb | 545 | net/ipx/af_ipx.c | len = skb->len + out_offset; |
skb | 549 | net/ipx/af_ipx.c | skb2->h.raw=skb_put(skb2,skb->len); |
skb | 552 | net/ipx/af_ipx.c | memcpy(skb2->h.raw, skb->h.raw, skb->len); |
skb | 554 | net/ipx/af_ipx.c | kfree_skb(skb, FREE_WRITE); |
skb | 558 | net/ipx/af_ipx.c | static int ipxitf_send(ipx_interface *intrfc, struct sk_buff *skb, char *node) |
skb | 560 | net/ipx/af_ipx.c | ipx_packet *ipx = (ipx_packet *)(skb->h.raw); |
skb | 592 | net/ipx/af_ipx.c | if(skb->sk) |
skb | 594 | net/ipx/af_ipx.c | skb->sk->wmem_alloc-=skb->truesize; |
skb | 595 | net/ipx/af_ipx.c | skb->sk=NULL; |
skb | 600 | net/ipx/af_ipx.c | return ipxitf_demux_socket(intrfc, skb, 0); |
skb | 607 | net/ipx/af_ipx.c | if (!send_to_wire && skb->sk) |
skb | 609 | net/ipx/af_ipx.c | skb->sk->wmem_alloc-=skb->truesize; |
skb | 610 | net/ipx/af_ipx.c | skb->sk=NULL; |
skb | 612 | net/ipx/af_ipx.c | ipxitf_demux_socket(intrfc, skb, send_to_wire); |
skb | 640 | net/ipx/af_ipx.c | kfree_skb(skb,FREE_WRITE); |
skb | 658 | net/ipx/af_ipx.c | skb = ipxitf_adjust_skbuff(intrfc, skb); |
skb | 659 | net/ipx/af_ipx.c | if (skb == NULL) |
skb | 663 | net/ipx/af_ipx.c | skb->dev = dev; |
skb | 664 | net/ipx/af_ipx.c | skb->protocol = htons(ETH_P_IPX); |
skb | 665 | net/ipx/af_ipx.c | dl->datalink_header(dl, skb, dest_node); |
skb | 671 | net/ipx/af_ipx.c | dump_pkt("IPX snd:", (ipx_packet *)skb->h.raw); |
skb | 672 | net/ipx/af_ipx.c | dump_data("ETH hdr:", skb->data, skb->h.raw - skb->data); |
skb | 679 | net/ipx/af_ipx.c | dev_queue_xmit(skb, dev, SOPRI_NORMAL); |
skb | 694 | net/ipx/af_ipx.c | static int ipxitf_rcv(ipx_interface *intrfc, struct sk_buff *skb) |
skb | 696 | net/ipx/af_ipx.c | ipx_packet *ipx = (ipx_packet *) (skb->h.raw); |
skb | 704 | net/ipx/af_ipx.c | if (call_in_firewall(PF_IPX, skb, ipx)!=FW_ACCEPT) |
skb | 706 | net/ipx/af_ipx.c | kfree_skb(skb, FREE_READ); |
skb | 748 | net/ipx/af_ipx.c | if (call_fw_firewall(PF_IPX, skb, ipx)!=FW_ACCEPT) |
skb | 750 | net/ipx/af_ipx.c | kfree_skb(skb, FREE_READ); |
skb | 755 | net/ipx/af_ipx.c | if ((skb->pkt_type != PACKET_BROADCAST) && |
skb | 756 | net/ipx/af_ipx.c | (skb->pkt_type != PACKET_MULTICAST)) |
skb | 757 | net/ipx/af_ipx.c | return ipxrtr_route_skb(skb); |
skb | 759 | net/ipx/af_ipx.c | kfree_skb(skb,FREE_READ); |
skb | 767 | net/ipx/af_ipx.c | return ipxitf_demux_socket(intrfc, skb, 0); |
skb | 771 | net/ipx/af_ipx.c | kfree_skb(skb,FREE_READ); |
skb | 1158 | net/ipx/af_ipx.c | struct sk_buff *skb; |
skb | 1185 | net/ipx/af_ipx.c | skb=sock_alloc_send_skb(sk, size, 0, 0, &err); |
skb | 1186 | net/ipx/af_ipx.c | if(skb==NULL) |
skb | 1189 | net/ipx/af_ipx.c | skb_reserve(skb,ipx_offset); |
skb | 1190 | net/ipx/af_ipx.c | skb->free=1; |
skb | 1191 | net/ipx/af_ipx.c | skb->arp=1; |
skb | 1192 | net/ipx/af_ipx.c | skb->sk=sk; |
skb | 1195 | net/ipx/af_ipx.c | ipx=(ipx_packet *)skb_put(skb,sizeof(ipx_packet)); |
skb | 1200 | net/ipx/af_ipx.c | skb->h.raw = (unsigned char *)ipx; |
skb | 1223 | net/ipx/af_ipx.c | memcpy_fromiovec(skb_put(skb,len),iov,len); |
skb | 1226 | net/ipx/af_ipx.c | if(call_out_firewall(PF_IPX, skb, ipx)!=FW_ACCEPT) |
skb | 1228 | net/ipx/af_ipx.c | kfree_skb(skb, FREE_WRITE); |
skb | 1233 | net/ipx/af_ipx.c | return ipxitf_send(intrfc, skb, (rt && rt->ir_routed) ? |
skb | 1238 | net/ipx/af_ipx.c | ipxrtr_route_skb(struct sk_buff *skb) |
skb | 1240 | net/ipx/af_ipx.c | ipx_packet *ipx = (ipx_packet *) (skb->h.raw); |
skb | 1247 | net/ipx/af_ipx.c | kfree_skb(skb,FREE_READ); |
skb | 1251 | net/ipx/af_ipx.c | (void)ipxitf_send(i, skb, (r->ir_routed) ? |
skb | 1922 | net/ipx/af_ipx.c | int ipx_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *pt) |
skb | 1929 | net/ipx/af_ipx.c | ipx=(ipx_packet *)skb->h.raw; |
skb | 1935 | net/ipx/af_ipx.c | kfree_skb(skb,FREE_READ); |
skb | 1941 | net/ipx/af_ipx.c | kfree_skb(skb,FREE_READ); |
skb | 1954 | net/ipx/af_ipx.c | kfree_skb(skb,FREE_READ); |
skb | 1959 | net/ipx/af_ipx.c | return ipxitf_rcv(intrfc, skb); |
skb | 2023 | net/ipx/af_ipx.c | struct sk_buff *skb; |
skb | 2033 | net/ipx/af_ipx.c | skb=skb_recv_datagram(sk,flags,noblock,&er); |
skb | 2034 | net/ipx/af_ipx.c | if(skb==NULL) |
skb | 2040 | net/ipx/af_ipx.c | ipx = (ipx_packet *)(skb->h.raw); |
skb | 2043 | net/ipx/af_ipx.c | skb_copy_datagram_iovec(skb,sizeof(struct ipx_packet),msg->msg_iov,copied); |
skb | 2053 | net/ipx/af_ipx.c | skb_free_datagram(skb); |
skb | 2088 | net/ipx/af_ipx.c | struct sk_buff *skb; |
skb | 2090 | net/ipx/af_ipx.c | if((skb=skb_peek(&sk->receive_queue))!=NULL) |
skb | 2091 | net/ipx/af_ipx.c | amount=skb->len-sizeof(struct ipx_packet); |
skb | 33 | net/netlink.c | static int (*netlink_handler[MAX_LINKS])(struct sk_buff *skb); |
skb | 49 | net/netlink.c | static int netlink_err(struct sk_buff *skb) |
skb | 51 | net/netlink.c | kfree_skb(skb, FREE_READ); |
skb | 60 | net/netlink.c | int netlink_donothing(struct sk_buff *skb) |
skb | 62 | net/netlink.c | kfree_skb(skb, FREE_READ); |
skb | 73 | net/netlink.c | struct sk_buff *skb; |
skb | 74 | net/netlink.c | skb=alloc_skb(count, GFP_KERNEL); |
skb | 75 | net/netlink.c | skb->free=1; |
skb | 76 | net/netlink.c | memcpy_fromfs(skb_put(skb,count),buf, count); |
skb | 77 | net/netlink.c | return (netlink_handler[minor])(skb); |
skb | 87 | net/netlink.c | struct sk_buff *skb; |
skb | 89 | net/netlink.c | while((skb=skb_dequeue(&skb_queue_rd[minor]))==NULL) |
skb | 103 | net/netlink.c | rdq_size[minor]-=skb->len; |
skb | 105 | net/netlink.c | if(skb->len<count) |
skb | 106 | net/netlink.c | count=skb->len; |
skb | 107 | net/netlink.c | memcpy_tofs(buf,skb->data,count); |
skb | 108 | net/netlink.c | kfree_skb(skb, FREE_READ); |
skb | 177 | net/netlink.c | int netlink_attach(int unit, int (*function)(struct sk_buff *skb)) |
skb | 194 | net/netlink.c | int netlink_post(int unit, struct sk_buff *skb) |
skb | 202 | net/netlink.c | if(rdq_size[unit]+skb->len>MAX_QBYTES) |
skb | 206 | net/netlink.c | skb_queue_tail(&skb_queue_rd[unit], skb); |
skb | 207 | net/netlink.c | rdq_size[unit]+=skb->len; |
skb | 240 | net/netrom/af_netrom.c | struct sk_buff *skb; |
skb | 251 | net/netrom/af_netrom.c | while ((skb = skb_dequeue(&sk->receive_queue)) != NULL) { |
skb | 252 | net/netrom/af_netrom.c | if (skb->sk != sk) { /* A pending connection */ |
skb | 253 | net/netrom/af_netrom.c | skb->sk->dead = 1; /* Queue the unaccepted socket for death */ |
skb | 254 | net/netrom/af_netrom.c | nr_set_timer(skb->sk); |
skb | 255 | net/netrom/af_netrom.c | skb->sk->nr->state = NR_STATE_0; |
skb | 258 | net/netrom/af_netrom.c | kfree_skb(skb, FREE_READ); |
skb | 821 | net/netrom/af_netrom.c | struct sk_buff *skb; |
skb | 842 | net/netrom/af_netrom.c | if ((skb = skb_dequeue(&sk->receive_queue)) == NULL) { |
skb | 853 | net/netrom/af_netrom.c | } while (skb == NULL); |
skb | 855 | net/netrom/af_netrom.c | newsk = skb->sk; |
skb | 860 | net/netrom/af_netrom.c | skb->sk = NULL; |
skb | 861 | net/netrom/af_netrom.c | kfree_skb(skb, FREE_READ); |
skb | 894 | net/netrom/af_netrom.c | int nr_rx_frame(struct sk_buff *skb, struct device *dev) |
skb | 903 | net/netrom/af_netrom.c | skb->sk = NULL; /* Initially we don't know who its for */ |
skb | 909 | net/netrom/af_netrom.c | src = (ax25_address *)(skb->data + 0); |
skb | 910 | net/netrom/af_netrom.c | dest = (ax25_address *)(skb->data + 7); |
skb | 912 | net/netrom/af_netrom.c | circuit_index = skb->data[15]; |
skb | 913 | net/netrom/af_netrom.c | circuit_id = skb->data[16]; |
skb | 914 | net/netrom/af_netrom.c | frametype = skb->data[19]; |
skb | 921 | net/netrom/af_netrom.c | skb_pull(skb, NR_NETWORK_LEN + NR_TRANSPORT_LEN); |
skb | 922 | net/netrom/af_netrom.c | skb->h.raw = skb->data; |
skb | 924 | net/netrom/af_netrom.c | return nr_rx_ip(skb, dev); |
skb | 934 | net/netrom/af_netrom.c | skb->h.raw = skb->data; |
skb | 936 | net/netrom/af_netrom.c | if ((frametype & 0x0F) == NR_CONNACK && skb->len == 22) |
skb | 941 | net/netrom/af_netrom.c | return nr_process_rx_frame(sk, skb); |
skb | 949 | net/netrom/af_netrom.c | user = (ax25_address *)(skb->data + 21); |
skb | 952 | net/netrom/af_netrom.c | nr_transmit_dm(skb); |
skb | 956 | net/netrom/af_netrom.c | window = skb->data[20]; |
skb | 958 | net/netrom/af_netrom.c | skb->sk = make; |
skb | 979 | net/netrom/af_netrom.c | if (skb->len == 37) { |
skb | 980 | net/netrom/af_netrom.c | timeout = skb->data[36] * 256 + skb->data[35]; |
skb | 1001 | net/netrom/af_netrom.c | skb_queue_head(&sk->receive_queue, skb); |
skb | 1006 | net/netrom/af_netrom.c | sk->data_ready(sk, skb->len); |
skb | 1017 | net/netrom/af_netrom.c | struct sk_buff *skb; |
skb | 1057 | net/netrom/af_netrom.c | if ((skb = sock_alloc_send_skb(sk, size, 0, 0, &err)) == NULL) |
skb | 1060 | net/netrom/af_netrom.c | skb->sk = sk; |
skb | 1061 | net/netrom/af_netrom.c | skb->free = 1; |
skb | 1062 | net/netrom/af_netrom.c | skb->arp = 1; |
skb | 1064 | net/netrom/af_netrom.c | skb_reserve(skb, size - len); |
skb | 1070 | net/netrom/af_netrom.c | asmptr = skb_push(skb, NR_TRANSPORT_LEN); |
skb | 1090 | net/netrom/af_netrom.c | skb->h.raw = skb_put(skb, len); |
skb | 1092 | net/netrom/af_netrom.c | asmptr = skb->h.raw; |
skb | 1104 | net/netrom/af_netrom.c | kfree_skb(skb, FREE_WRITE); |
skb | 1108 | net/netrom/af_netrom.c | nr_output(sk, skb); /* Shove it onto the queue */ |
skb | 1120 | net/netrom/af_netrom.c | struct sk_buff *skb; |
skb | 1137 | net/netrom/af_netrom.c | if ((skb = skb_recv_datagram(sk, flags, noblock, &er)) == NULL) |
skb | 1141 | net/netrom/af_netrom.c | skb_pull(skb, NR_NETWORK_LEN + NR_TRANSPORT_LEN); |
skb | 1142 | net/netrom/af_netrom.c | skb->h.raw = skb->data; |
skb | 1145 | net/netrom/af_netrom.c | copied = (size < skb->len) ? size : skb->len; |
skb | 1146 | net/netrom/af_netrom.c | skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); |
skb | 1152 | net/netrom/af_netrom.c | memcpy(&addr.sax25_call, skb->data + 7, sizeof(ax25_address)); |
skb | 1159 | net/netrom/af_netrom.c | skb_free_datagram(skb); |
skb | 1194 | net/netrom/af_netrom.c | struct sk_buff *skb; |
skb | 1196 | net/netrom/af_netrom.c | if ((skb = skb_peek(&sk->receive_queue)) != NULL) |
skb | 1197 | net/netrom/af_netrom.c | amount = skb->len - 20; |
skb | 54 | net/netrom/nr_dev.c | int nr_rx_ip(struct sk_buff *skb, struct device *dev) |
skb | 65 | net/netrom/nr_dev.c | skb->protocol = htons(ETH_P_IP); |
skb | 68 | net/netrom/nr_dev.c | skb->dev = dev; |
skb | 70 | net/netrom/nr_dev.c | skb->h.raw = skb->data; |
skb | 71 | net/netrom/nr_dev.c | ip_rcv(skb, skb->dev, NULL); |
skb | 76 | net/netrom/nr_dev.c | static int nr_header(struct sk_buff *skb, struct device *dev, unsigned short type, |
skb | 79 | net/netrom/nr_dev.c | unsigned char *buff = skb_push(skb, NR_NETWORK_LEN + NR_TRANSPORT_LEN); |
skb | 109 | net/netrom/nr_dev.c | unsigned long raddr, struct sk_buff *skb) |
skb | 114 | net/netrom/nr_dev.c | skb_device_unlock(skb); |
skb | 117 | net/netrom/nr_dev.c | skb->free = 1; |
skb | 118 | net/netrom/nr_dev.c | kfree_skb(skb, FREE_WRITE); |
skb | 131 | net/netrom/nr_dev.c | if (!nr_route_frame(skb, NULL)) { |
skb | 132 | net/netrom/nr_dev.c | skb->free = 1; |
skb | 133 | net/netrom/nr_dev.c | kfree_skb(skb, FREE_WRITE); |
skb | 166 | net/netrom/nr_dev.c | static int nr_xmit(struct sk_buff *skb, struct device *dev) |
skb | 170 | net/netrom/nr_dev.c | if (skb == NULL || dev == NULL) |
skb | 190 | net/netrom/nr_dev.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 53 | net/netrom/nr_in.c | static int nr_queue_rx_frame(struct sock *sk, struct sk_buff *skb, int more) |
skb | 55 | net/netrom/nr_in.c | struct sk_buff *skbo, *skbn = skb; |
skb | 58 | net/netrom/nr_in.c | sk->nr->fraglen += skb->len; |
skb | 59 | net/netrom/nr_in.c | skb_queue_tail(&sk->nr->frag_queue, skb); |
skb | 64 | net/netrom/nr_in.c | sk->nr->fraglen += skb->len; |
skb | 65 | net/netrom/nr_in.c | skb_queue_tail(&sk->nr->frag_queue, skb); |
skb | 97 | net/netrom/nr_in.c | static int nr_state1_machine(struct sock *sk, struct sk_buff *skb, int frametype) |
skb | 103 | net/netrom/nr_in.c | sk->window = skb->data[20]; |
skb | 104 | net/netrom/nr_in.c | sk->nr->your_index = skb->data[17]; |
skb | 105 | net/netrom/nr_in.c | sk->nr->your_id = skb->data[18]; |
skb | 143 | net/netrom/nr_in.c | static int nr_state2_machine(struct sock *sk, struct sk_buff *skb, int frametype) |
skb | 171 | net/netrom/nr_in.c | static int nr_state3_machine(struct sock *sk, struct sk_buff *skb, int frametype) |
skb | 179 | net/netrom/nr_in.c | nr = skb->data[18]; |
skb | 180 | net/netrom/nr_in.c | ns = skb->data[17]; |
skb | 263 | net/netrom/nr_in.c | skb_queue_head(&sk->nr->reseq_queue, skb); |
skb | 310 | net/netrom/nr_in.c | int nr_process_rx_frame(struct sock *sk, struct sk_buff *skb) |
skb | 325 | net/netrom/nr_in.c | frametype = skb->data[19]; |
skb | 330 | net/netrom/nr_in.c | queued = nr_state1_machine(sk, skb, frametype); |
skb | 333 | net/netrom/nr_in.c | queued = nr_state2_machine(sk, skb, frametype); |
skb | 336 | net/netrom/nr_in.c | queued = nr_state3_machine(sk, skb, frametype); |
skb | 49 | net/netrom/nr_out.c | void nr_output(struct sock *sk, struct sk_buff *skb) |
skb | 57 | net/netrom/nr_out.c | if (skb->len - NR_TRANSPORT_LEN > mtu) { |
skb | 59 | net/netrom/nr_out.c | memcpy(transport, skb->data, NR_TRANSPORT_LEN); |
skb | 60 | net/netrom/nr_out.c | skb_pull(skb, NR_TRANSPORT_LEN); |
skb | 62 | net/netrom/nr_out.c | frontlen = skb_headroom(skb); |
skb | 64 | net/netrom/nr_out.c | while (skb->len > 0) { |
skb | 74 | net/netrom/nr_out.c | len = (mtu > skb->len) ? skb->len : mtu; |
skb | 77 | net/netrom/nr_out.c | memcpy(skb_put(skbn, len), skb->data, len); |
skb | 78 | net/netrom/nr_out.c | skb_pull(skb, len); |
skb | 84 | net/netrom/nr_out.c | if (skb->len > 0) |
skb | 90 | net/netrom/nr_out.c | skb->free = 1; |
skb | 91 | net/netrom/nr_out.c | kfree_skb(skb, FREE_WRITE); |
skb | 93 | net/netrom/nr_out.c | skb_queue_tail(&sk->write_queue, skb); /* Throw it on the queue */ |
skb | 104 | net/netrom/nr_out.c | static void nr_send_iframe(struct sock *sk, struct sk_buff *skb) |
skb | 106 | net/netrom/nr_out.c | if (skb == NULL) |
skb | 109 | net/netrom/nr_out.c | skb->data[2] = sk->nr->vs; |
skb | 110 | net/netrom/nr_out.c | skb->data[3] = sk->nr->vr; |
skb | 113 | net/netrom/nr_out.c | skb->data[4] |= NR_CHOKE_FLAG; |
skb | 115 | net/netrom/nr_out.c | nr_transmit_buffer(sk, skb); |
skb | 120 | net/netrom/nr_out.c | struct sk_buff *skb, *skbn; |
skb | 122 | net/netrom/nr_out.c | if ((skb = skb_peek(&sk->nr->ack_queue)) == NULL) |
skb | 125 | net/netrom/nr_out.c | if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) |
skb | 143 | net/netrom/nr_out.c | struct sk_buff *skb, *skbn; |
skb | 166 | net/netrom/nr_out.c | skb = skb_dequeue(&sk->write_queue); |
skb | 169 | net/netrom/nr_out.c | if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) { |
skb | 170 | net/netrom/nr_out.c | skb_queue_head(&sk->write_queue, skb); |
skb | 187 | net/netrom/nr_out.c | skb_queue_tail(&sk->nr->ack_queue, skb); |
skb | 189 | net/netrom/nr_out.c | } while (!last && (skb = skb_dequeue(&sk->write_queue)) != NULL); |
skb | 202 | net/netrom/nr_out.c | void nr_transmit_buffer(struct sock *sk, struct sk_buff *skb) |
skb | 209 | net/netrom/nr_out.c | dptr = skb_push(skb, NR_NETWORK_LEN); |
skb | 225 | net/netrom/nr_out.c | skb->arp = 1; |
skb | 227 | net/netrom/nr_out.c | if (!nr_route_frame(skb, NULL)) { |
skb | 228 | net/netrom/nr_out.c | kfree_skb(skb, FREE_WRITE); |
skb | 651 | net/netrom/nr_route.c | int nr_route_frame(struct sk_buff *skb, ax25_cb *ax25) |
skb | 661 | net/netrom/nr_route.c | if(ax25 && call_in_firewall(PF_NETROM, skb, skb->data)!=FW_ACCEPT) |
skb | 663 | net/netrom/nr_route.c | if(!ax25 && call_out_firewall(PF_NETROM, skb, skb->data)!=FW_ACCEPT) |
skb | 666 | net/netrom/nr_route.c | nr_src = (ax25_address *)(skb->data + 0); |
skb | 667 | net/netrom/nr_route.c | nr_dest = (ax25_address *)(skb->data + 7); |
skb | 673 | net/netrom/nr_route.c | return nr_rx_frame(skb, dev); |
skb | 679 | net/netrom/nr_route.c | if (--skb->data[14] == 0) |
skb | 700 | net/netrom/nr_route.c | if(ax25 && call_fw_firewall(PF_NETROM, skb, skb->data)!=FW_ACCEPT) |
skb | 704 | net/netrom/nr_route.c | dptr = skb_push(skb, 1); |
skb | 707 | net/netrom/nr_route.c | ax25_send_frame(skb, (ax25_address *)dev->dev_addr, &nr_neigh->callsign, nr_neigh->digipeat, nr_neigh->dev); |
skb | 49 | net/netrom/nr_subr.c | struct sk_buff *skb; |
skb | 51 | net/netrom/nr_subr.c | while ((skb = skb_dequeue(&sk->write_queue)) != NULL) { |
skb | 52 | net/netrom/nr_subr.c | skb->sk = sk; |
skb | 53 | net/netrom/nr_subr.c | skb->free = 1; |
skb | 54 | net/netrom/nr_subr.c | kfree_skb(skb, FREE_WRITE); |
skb | 57 | net/netrom/nr_subr.c | while ((skb = skb_dequeue(&sk->nr->ack_queue)) != NULL) { |
skb | 58 | net/netrom/nr_subr.c | skb->sk = sk; |
skb | 59 | net/netrom/nr_subr.c | skb->free = 1; |
skb | 60 | net/netrom/nr_subr.c | kfree_skb(skb, FREE_WRITE); |
skb | 63 | net/netrom/nr_subr.c | while ((skb = skb_dequeue(&sk->nr->reseq_queue)) != NULL) { |
skb | 64 | net/netrom/nr_subr.c | kfree_skb(skb, FREE_READ); |
skb | 67 | net/netrom/nr_subr.c | while ((skb = skb_dequeue(&sk->nr->frag_queue)) != NULL) { |
skb | 68 | net/netrom/nr_subr.c | kfree_skb(skb, FREE_READ); |
skb | 79 | net/netrom/nr_subr.c | struct sk_buff *skb; |
skb | 86 | net/netrom/nr_subr.c | skb = skb_dequeue(&sk->nr->ack_queue); |
skb | 87 | net/netrom/nr_subr.c | skb->sk = sk; |
skb | 88 | net/netrom/nr_subr.c | skb->free = 1; |
skb | 89 | net/netrom/nr_subr.c | kfree_skb(skb, FREE_WRITE); |
skb | 102 | net/netrom/nr_subr.c | struct sk_buff *skb, *skb_prev = NULL; |
skb | 104 | net/netrom/nr_subr.c | while ((skb = skb_dequeue(&sk->nr->ack_queue)) != NULL) { |
skb | 106 | net/netrom/nr_subr.c | skb_queue_head(&sk->write_queue, skb); |
skb | 108 | net/netrom/nr_subr.c | skb_append(skb_prev, skb); |
skb | 109 | net/netrom/nr_subr.c | skb_prev = skb; |
skb | 153 | net/netrom/nr_subr.c | struct sk_buff *skb; |
skb | 175 | net/netrom/nr_subr.c | if ((skb = alloc_skb(len, GFP_ATOMIC)) == NULL) |
skb | 181 | net/netrom/nr_subr.c | skb_reserve(skb, AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + NR_NETWORK_LEN); |
skb | 183 | net/netrom/nr_subr.c | dptr = skb_put(skb, skb_tailroom(skb)); |
skb | 237 | net/netrom/nr_subr.c | skb->free = 1; |
skb | 239 | net/netrom/nr_subr.c | nr_transmit_buffer(sk, skb); |
skb | 246 | net/netrom/nr_subr.c | void nr_transmit_dm(struct sk_buff *skb) |
skb | 261 | net/netrom/nr_subr.c | memcpy(dptr, skb->data + 7, AX25_ADDR_LEN); |
skb | 267 | net/netrom/nr_subr.c | memcpy(dptr, skb->data + 0, AX25_ADDR_LEN); |
skb | 275 | net/netrom/nr_subr.c | *dptr++ = skb->data[15]; |
skb | 276 | net/netrom/nr_subr.c | *dptr++ = skb->data[16]; |
skb | 167 | net/unix/af_unix.c | struct sk_buff *skb; |
skb | 170 | net/unix/af_unix.c | while((skb=skb_dequeue(&sk->receive_queue))!=NULL) |
skb | 174 | net/unix/af_unix.c | unix_socket *osk=skb->sk; |
skb | 176 | net/unix/af_unix.c | kfree_skb(skb, FREE_WRITE); /* Now surplus - free the skb first before the socket */ |
skb | 183 | net/unix/af_unix.c | kfree_skb(skb,FREE_WRITE); |
skb | 456 | net/unix/af_unix.c | struct sk_buff *skb; |
skb | 509 | net/unix/af_unix.c | skb=sock_alloc_send_skb(sk, 0, 0, 0, &err); /* Marker object */ |
skb | 510 | net/unix/af_unix.c | if(skb==NULL) |
skb | 512 | net/unix/af_unix.c | skb->sk=sk; /* So they know it is us */ |
skb | 513 | net/unix/af_unix.c | skb->free=1; |
skb | 519 | net/unix/af_unix.c | kfree_skb(skb, FREE_WRITE); |
skb | 524 | net/unix/af_unix.c | kfree_skb(skb, FREE_WRITE); |
skb | 530 | net/unix/af_unix.c | skb_queue_tail(&other->receive_queue,skb); |
skb | 581 | net/unix/af_unix.c | unix_socket *ska,*skb; |
skb | 584 | net/unix/af_unix.c | skb=b->data; |
skb | 588 | net/unix/af_unix.c | skb->protinfo.af_unix.locks++; |
skb | 589 | net/unix/af_unix.c | ska->protinfo.af_unix.other=skb; |
skb | 590 | net/unix/af_unix.c | skb->protinfo.af_unix.other=ska; |
skb | 592 | net/unix/af_unix.c | skb->state=TCP_ESTABLISHED; |
skb | 600 | net/unix/af_unix.c | struct sk_buff *skb; |
skb | 623 | net/unix/af_unix.c | skb=skb_dequeue(&sk->receive_queue); |
skb | 624 | net/unix/af_unix.c | if(skb==NULL) |
skb | 640 | net/unix/af_unix.c | while(skb==NULL); |
skb | 641 | net/unix/af_unix.c | tsk=skb->sk; |
skb | 642 | net/unix/af_unix.c | kfree_skb(skb, FREE_WRITE); /* The buffer is just used as a tag */ |
skb | 689 | net/unix/af_unix.c | struct sk_buff *skb; |
skb | 772 | net/unix/af_unix.c | skb=sock_alloc_send_skb(sk,size,limit,nonblock, &err); |
skb | 774 | net/unix/af_unix.c | if(skb==NULL) |
skb | 783 | net/unix/af_unix.c | size=skb_tailroom(skb); /* If we dropped back on a limit then our skb is smaller */ |
skb | 785 | net/unix/af_unix.c | skb->sk=sk; |
skb | 786 | net/unix/af_unix.c | skb->free=1; |
skb | 788 | net/unix/af_unix.c | memcpy_fromiovec(skb_put(skb,size),msg->msg_iov, size); |
skb | 800 | net/unix/af_unix.c | kfree_skb(skb, FREE_WRITE); |
skb | 814 | net/unix/af_unix.c | kfree_skb(skb, FREE_WRITE); |
skb | 822 | net/unix/af_unix.c | skb->h.filp = filp; |
skb | 827 | net/unix/af_unix.c | skb_queue_tail(&other->receive_queue, skb); |
skb | 886 | net/unix/af_unix.c | struct sk_buff *skb; |
skb | 918 | net/unix/af_unix.c | skb=skb_dequeue(&sk->receive_queue); |
skb | 919 | net/unix/af_unix.c | if(skb==NULL) |
skb | 937 | net/unix/af_unix.c | if(skb->sk->protinfo.af_unix.name) |
skb | 939 | net/unix/af_unix.c | memcpy(sunaddr->sun_path, skb->sk->protinfo.af_unix.name, 108); |
skb | 948 | net/unix/af_unix.c | num=min(skb->len,size-copied); |
skb | 949 | net/unix/af_unix.c | memcpy_tofs(sp, skb->data, num); |
skb | 951 | net/unix/af_unix.c | if ((filp = skb->h.filp) != NULL) { |
skb | 952 | net/unix/af_unix.c | skb->h.filp = NULL; |
skb | 961 | net/unix/af_unix.c | skb_pull(skb, num); |
skb | 963 | net/unix/af_unix.c | if (skb->len) { |
skb | 964 | net/unix/af_unix.c | skb_queue_head(&sk->receive_queue, skb); |
skb | 967 | net/unix/af_unix.c | kfree_skb(skb, FREE_WRITE); |
skb | 1031 | net/unix/af_unix.c | struct sk_buff *skb; |
skb | 1035 | net/unix/af_unix.c | if((skb=skb_peek(&sk->receive_queue))!=NULL) |
skb | 1036 | net/unix/af_unix.c | amount=skb->len; |