tag | line | file | source code |
skb | 122 | drivers/net/3c501.c | static int el_start_xmit(struct sk_buff *skb, struct device *dev); |
skb | 377 | drivers/net/3c501.c | static int el_start_xmit(struct sk_buff *skb, struct device *dev) |
skb | 406 | drivers/net/3c501.c | if (skb == NULL) |
skb | 433 | drivers/net/3c501.c | int gp_start = 0x800 - (ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN); |
skb | 434 | drivers/net/3c501.c | unsigned char *buf = skb->data; |
skb | 462 | drivers/net/3c501.c | outsb(DATAPORT,buf,skb->len); /* load buffer (usual thing each byte increments the pointer) */ |
skb | 482 | drivers/net/3c501.c | dev_kfree_skb (skb, FREE_WRITE); |
skb | 679 | drivers/net/3c501.c | struct sk_buff *skb; |
skb | 699 | drivers/net/3c501.c | skb = dev_alloc_skb(pkt_len+2); |
skb | 706 | drivers/net/3c501.c | if (skb == NULL) |
skb | 714 | drivers/net/3c501.c | skb_reserve(skb,2); /* Force 16 byte alignment */ |
skb | 715 | drivers/net/3c501.c | skb->dev = dev; |
skb | 721 | drivers/net/3c501.c | insb(DATAPORT, skb_put(skb,pkt_len), pkt_len); |
skb | 722 | drivers/net/3c501.c | skb->protocol=eth_type_trans(skb,dev); |
skb | 723 | drivers/net/3c501.c | netif_rx(skb); |
skb | 77 | drivers/net/3c503.c | static void el2_block_input(struct device *dev, int count, struct sk_buff *skb, |
skb | 513 | drivers/net/3c503.c | el2_block_input(struct device *dev, int count, struct sk_buff *skb, int ring_offset) |
skb | 525 | drivers/net/3c503.c | memcpy_fromio(skb->data, dev->mem_start + ring_offset, semi_count); |
skb | 527 | drivers/net/3c503.c | memcpy_fromio(skb->data + semi_count, dev->rmem_start, count); |
skb | 530 | drivers/net/3c503.c | eth_io_copy_and_sum(skb, dev->mem_start + ring_offset, count, 0); |
skb | 551 | drivers/net/3c503.c | (skb->data)[i] = inb_p(E33G_FIFOH); |
skb | 557 | drivers/net/3c505.c | struct sk_buff *skb; |
skb | 572 | drivers/net/3c505.c | skb = dev_alloc_skb(rlen+2); |
skb | 583 | drivers/net/3c505.c | if (skb == NULL) { |
skb | 599 | drivers/net/3c505.c | skb_reserve(skb,2); /* 16 byte alignment */ |
skb | 600 | drivers/net/3c505.c | skb->dev = dev; |
skb | 605 | drivers/net/3c505.c | ptr = (unsigned short *)skb_put(skb,len); |
skb | 614 | drivers/net/3c505.c | kfree_skb(skb, FREE_WRITE); |
skb | 623 | drivers/net/3c505.c | skb->protocol=eth_type_trans(skb,dev); |
skb | 624 | drivers/net/3c505.c | netif_rx(skb); |
skb | 1005 | drivers/net/3c505.c | elp_start_xmit (struct sk_buff *skb, struct device *dev) |
skb | 1012 | drivers/net/3c505.c | if (skb == NULL) { |
skb | 1020 | drivers/net/3c505.c | if (skb->len <= 0) |
skb | 1024 | drivers/net/3c505.c | printk("%s: request to send packet of length %d\n", dev->name, (int)skb->len); |
skb | 1045 | drivers/net/3c505.c | if (!send_packet(dev, skb->data, skb->len)) { |
skb | 1051 | drivers/net/3c505.c | printk("%s: packet of length %d sent\n", dev->name, (int)skb->len); |
skb | 1067 | drivers/net/3c505.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 282 | drivers/net/3c507.c | static int el16_send_packet(struct sk_buff *skb, struct device *dev); |
skb | 451 | drivers/net/3c507.c | el16_send_packet(struct sk_buff *skb, struct device *dev) |
skb | 486 | drivers/net/3c507.c | if (skb == NULL) { |
skb | 495 | drivers/net/3c507.c | short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN; |
skb | 496 | drivers/net/3c507.c | unsigned char *buf = skb->data; |
skb | 506 | drivers/net/3c507.c | dev_kfree_skb (skb, FREE_WRITE); |
skb | 841 | drivers/net/3c507.c | struct sk_buff *skb; |
skb | 844 | drivers/net/3c507.c | skb = dev_alloc_skb(pkt_len+2); |
skb | 845 | drivers/net/3c507.c | if (skb == NULL) { |
skb | 851 | drivers/net/3c507.c | skb_reserve(skb,2); |
skb | 852 | drivers/net/3c507.c | skb->dev = dev; |
skb | 855 | drivers/net/3c507.c | memcpy(skb_put(skb,pkt_len), data_frame + 5, pkt_len); |
skb | 857 | drivers/net/3c507.c | skb->protocol=eth_type_trans(skb,dev); |
skb | 858 | drivers/net/3c507.c | netif_rx(skb); |
skb | 104 | drivers/net/3c509.c | static int el3_start_xmit(struct sk_buff *skb, struct device *dev); |
skb | 382 | drivers/net/3c509.c | el3_start_xmit(struct sk_buff *skb, struct device *dev) |
skb | 401 | drivers/net/3c509.c | if (skb == NULL) { |
skb | 406 | drivers/net/3c509.c | if (skb->len <= 0) |
skb | 411 | drivers/net/3c509.c | dev->name, skb->len, inw(ioaddr + EL3_STATUS)); |
skb | 435 | drivers/net/3c509.c | outw(skb->len, ioaddr + TX_FIFO); |
skb | 438 | drivers/net/3c509.c | outsl(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2); |
skb | 448 | drivers/net/3c509.c | dev_kfree_skb (skb, FREE_WRITE); |
skb | 596 | drivers/net/3c509.c | struct sk_buff *skb; |
skb | 598 | drivers/net/3c509.c | skb = dev_alloc_skb(pkt_len+5); |
skb | 602 | drivers/net/3c509.c | if (skb != NULL) { |
skb | 603 | drivers/net/3c509.c | skb->dev = dev; |
skb | 604 | drivers/net/3c509.c | skb_reserve(skb,2); /* Align IP on 16 byte boundaries */ |
skb | 607 | drivers/net/3c509.c | insl(ioaddr+RX_FIFO, skb_put(skb,pkt_len), |
skb | 610 | drivers/net/3c509.c | skb->protocol=eth_type_trans(skb,dev); |
skb | 611 | drivers/net/3c509.c | netif_rx(skb); |
skb | 140 | drivers/net/8390.c | static int ei_start_xmit(struct sk_buff *skb, struct device *dev) |
skb | 182 | drivers/net/8390.c | if (skb == NULL) { |
skb | 187 | drivers/net/8390.c | length = skb->len; |
skb | 188 | drivers/net/8390.c | if (skb->len <= 0) |
skb | 228 | drivers/net/8390.c | ei_block_output(dev, length, skb->data, output_page); |
skb | 242 | drivers/net/8390.c | ei_block_output(dev, length, skb->data, ei_local->tx_start_page); |
skb | 253 | drivers/net/8390.c | dev_kfree_skb (skb, FREE_WRITE); |
skb | 467 | drivers/net/8390.c | struct sk_buff *skb; |
skb | 469 | drivers/net/8390.c | skb = dev_alloc_skb(pkt_len+2); |
skb | 470 | drivers/net/8390.c | if (skb == NULL) { |
skb | 477 | drivers/net/8390.c | skb_reserve(skb,2); /* IP headers on 16 byte boundaries */ |
skb | 478 | drivers/net/8390.c | skb->dev = dev; |
skb | 479 | drivers/net/8390.c | skb_put(skb, pkt_len); /* Make room */ |
skb | 480 | drivers/net/8390.c | ei_block_input(dev, pkt_len, skb, current_offset + sizeof(rx_frame)); |
skb | 481 | drivers/net/8390.c | skb->protocol=eth_type_trans(skb,dev); |
skb | 482 | drivers/net/8390.c | netif_rx(skb); |
skb | 76 | drivers/net/ac3200.c | struct sk_buff *skb, int ring_offset); |
skb | 269 | drivers/net/ac3200.c | static void ac_block_input(struct device *dev, int count, struct sk_buff *skb, |
skb | 277 | drivers/net/ac3200.c | memcpy_fromio(skb->data, xfer_start, semi_count); |
skb | 279 | drivers/net/ac3200.c | memcpy_fromio(skb->data + semi_count, dev->rmem_start, count); |
skb | 282 | drivers/net/ac3200.c | eth_io_copy_and_sum(skb, xfer_start, count, 0); |
skb | 185 | drivers/net/apricot.c | static int i596_start_xmit(struct sk_buff *skb, struct device *dev); |
skb | 340 | drivers/net/apricot.c | struct sk_buff *skb = dev_alloc_skb(pkt_len); |
skb | 344 | drivers/net/apricot.c | if (skb == NULL) |
skb | 351 | drivers/net/apricot.c | skb->dev = dev; |
skb | 352 | drivers/net/apricot.c | memcpy(skb_put(skb,pkt_len), lp->scb.rfd->data, pkt_len); |
skb | 354 | drivers/net/apricot.c | skb->protocol=eth_type_trans(skb,dev); |
skb | 355 | drivers/net/apricot.c | netif_rx(skb); |
skb | 358 | drivers/net/apricot.c | if (i596_debug > 4) print_eth(skb->data); |
skb | 406 | drivers/net/apricot.c | struct sk_buff *skb = ((struct sk_buff *)(tx_cmd->tbd->data)) -1; |
skb | 408 | drivers/net/apricot.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 569 | drivers/net/apricot.c | i596_start_xmit(struct sk_buff *skb, struct device *dev) |
skb | 607 | drivers/net/apricot.c | if (skb == NULL) { |
skb | 613 | drivers/net/apricot.c | if (skb->len <= 0) return 0; |
skb | 623 | drivers/net/apricot.c | short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN; |
skb | 632 | drivers/net/apricot.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 646 | drivers/net/apricot.c | tx_cmd->tbd->data = skb->data; |
skb | 648 | drivers/net/apricot.c | if (i596_debug > 3) print_eth(skb->data); |
skb | 804 | drivers/net/apricot.c | struct sk_buff *skb = ((struct sk_buff *)(tx_cmd->tbd->data)) -1; |
skb | 806 | drivers/net/apricot.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 810 | drivers/net/apricot.c | if (i596_debug >2) print_eth(skb->data); |
skb | 428 | drivers/net/arcnet.c | struct sk_buff *skb; /* packet data buffer */ |
skb | 436 | drivers/net/arcnet.c | struct sk_buff *skb; /* buffer from upper levels */ |
skb | 495 | drivers/net/arcnet.c | static int arcnet_send_packet_bad(struct sk_buff *skb,struct device *dev); |
skb | 496 | drivers/net/arcnet.c | static int arcnetA_send_packet(struct sk_buff *skb, struct device *dev); |
skb | 497 | drivers/net/arcnet.c | static int arcnetE_send_packet(struct sk_buff *skb, struct device *dev); |
skb | 498 | drivers/net/arcnet.c | static int arcnetS_send_packet(struct sk_buff *skb, struct device *dev); |
skb | 520 | drivers/net/arcnet.c | int arcnetA_header(struct sk_buff *skb,struct device *dev, |
skb | 522 | drivers/net/arcnet.c | int arcnetS_header(struct sk_buff *skb,struct device *dev, |
skb | 525 | drivers/net/arcnet.c | struct sk_buff *skb); |
skb | 527 | drivers/net/arcnet.c | struct sk_buff *skb); |
skb | 528 | drivers/net/arcnet.c | unsigned short arcnetA_type_trans(struct sk_buff *skb,struct device *dev); |
skb | 529 | drivers/net/arcnet.c | unsigned short arcnetS_type_trans(struct sk_buff *skb,struct device *dev); |
skb | 1185 | drivers/net/arcnet.c | arcnet_send_packet_bad(struct sk_buff *skb, struct device *dev) |
skb | 1218 | drivers/net/arcnet.c | status,tickssofar,lp->outgoing.skb, |
skb | 1245 | drivers/net/arcnet.c | if (lp->outgoing.skb) |
skb | 1247 | drivers/net/arcnet.c | dev_kfree_skb(lp->outgoing.skb,FREE_WRITE); |
skb | 1250 | drivers/net/arcnet.c | lp->outgoing.skb=NULL; |
skb | 1262 | drivers/net/arcnet.c | if (skb == NULL) { |
skb | 1303 | drivers/net/arcnet.c | arcnetA_send_packet(struct sk_buff *skb, struct device *dev) |
skb | 1311 | drivers/net/arcnet.c | bad=arcnet_send_packet_bad(skb,dev); |
skb | 1320 | drivers/net/arcnet.c | out->length = 1 < skb->len ? skb->len : 1; |
skb | 1321 | drivers/net/arcnet.c | out->hdr=(struct ClientData*)skb->data; |
skb | 1322 | drivers/net/arcnet.c | out->skb=skb; |
skb | 1327 | drivers/net/arcnet.c | for(i=0; i<skb->len; i++) |
skb | 1330 | drivers/net/arcnet.c | printk("%02hX ",((unsigned char*)skb->data)[i]); |
skb | 1350 | drivers/net/arcnet.c | ((char *)skb->data)+sizeof(struct ClientData), |
skb | 1355 | drivers/net/arcnet.c | dev_kfree_skb(out->skb,FREE_WRITE); |
skb | 1356 | drivers/net/arcnet.c | out->skb=NULL; |
skb | 1369 | drivers/net/arcnet.c | out->data=(u_char *)skb->data |
skb | 1401 | drivers/net/arcnet.c | if (out->skb) |
skb | 1402 | drivers/net/arcnet.c | dev_kfree_skb(out->skb,FREE_WRITE); |
skb | 1403 | drivers/net/arcnet.c | out->skb=NULL; |
skb | 1421 | drivers/net/arcnet.c | arcnetE_send_packet(struct sk_buff *skb, struct device *dev) |
skb | 1428 | drivers/net/arcnet.c | short offset,length=skb->len+1; |
skb | 1432 | drivers/net/arcnet.c | bad=arcnet_send_packet_bad(skb,dev); |
skb | 1447 | drivers/net/arcnet.c | dev_kfree_skb(skb,FREE_WRITE); |
skb | 1461 | drivers/net/arcnet.c | if (((struct ethhdr*)(skb->data))->h_dest[0] == 0xFF) |
skb | 1465 | drivers/net/arcnet.c | ((struct ethhdr*)(skb->data))->h_dest[5]; |
skb | 1493 | drivers/net/arcnet.c | memcpy(arcsoft,skb->data,skb->len); |
skb | 1520 | drivers/net/arcnet.c | dev_kfree_skb(skb,FREE_WRITE); |
skb | 1543 | drivers/net/arcnet.c | arcnetS_send_packet(struct sk_buff *skb, struct device *dev) |
skb | 1547 | drivers/net/arcnet.c | struct S_ClientData *hdr=(struct S_ClientData *)skb->data; |
skb | 1551 | drivers/net/arcnet.c | bad=arcnet_send_packet_bad(skb,dev); |
skb | 1560 | drivers/net/arcnet.c | length = 1 < skb->len ? skb->len : 1; |
skb | 1565 | drivers/net/arcnet.c | for(i=0; i<skb->len; i++) |
skb | 1568 | drivers/net/arcnet.c | printk("%02hX ",((unsigned char*)skb->data)[i]); |
skb | 1577 | drivers/net/arcnet.c | skb->data+S_EXTRA_CLIENTDATA, |
skb | 1579 | drivers/net/arcnet.c | skb->data+sizeof(struct S_ClientData), |
skb | 1584 | drivers/net/arcnet.c | dev_kfree_skb(skb,FREE_WRITE); |
skb | 1597 | drivers/net/arcnet.c | dev_kfree_skb(skb,FREE_WRITE); |
skb | 1981 | drivers/net/arcnet.c | status,out->numsegs,out->segnum,out->skb); |
skb | 2013 | drivers/net/arcnet.c | if (!lp->outgoing.skb) |
skb | 2041 | drivers/net/arcnet.c | if (out->skb) |
skb | 2042 | drivers/net/arcnet.c | dev_kfree_skb(out->skb,FREE_WRITE); |
skb | 2043 | drivers/net/arcnet.c | out->skb=NULL; |
skb | 2191 | drivers/net/arcnet.c | struct sk_buff *skb; |
skb | 2220 | drivers/net/arcnet.c | if (in->skb) /* already assembling one! */ |
skb | 2225 | drivers/net/arcnet.c | kfree_skb(in->skb,FREE_WRITE); |
skb | 2228 | drivers/net/arcnet.c | in->skb=NULL; |
skb | 2233 | drivers/net/arcnet.c | skb = alloc_skb(length, GFP_ATOMIC); |
skb | 2234 | drivers/net/arcnet.c | if (skb == NULL) { |
skb | 2239 | drivers/net/arcnet.c | soft=(struct ClientData *)skb->data; |
skb | 2241 | drivers/net/arcnet.c | skb->len = length; |
skb | 2242 | drivers/net/arcnet.c | skb->dev = dev; |
skb | 2290 | drivers/net/arcnet.c | for(i=0; i< skb->len; i++) |
skb | 2293 | drivers/net/arcnet.c | printk("%02hX ",((unsigned char*)skb->data)[i]); |
skb | 2298 | drivers/net/arcnet.c | skb->protocol=arcnetA_type_trans(skb,dev); |
skb | 2300 | drivers/net/arcnet.c | netif_rx(skb); |
skb | 2326 | drivers/net/arcnet.c | if (in->skb && in->sequence!=arcsoft->sequence) |
skb | 2331 | drivers/net/arcnet.c | kfree_skb(in->skb,FREE_WRITE); |
skb | 2332 | drivers/net/arcnet.c | in->skb=NULL; |
skb | 2342 | drivers/net/arcnet.c | if (in->skb) /* already assembling one! */ |
skb | 2349 | drivers/net/arcnet.c | kfree_skb(in->skb,FREE_WRITE); |
skb | 2365 | drivers/net/arcnet.c | in->skb=skb=alloc_skb(508*in->numpackets |
skb | 2368 | drivers/net/arcnet.c | if (skb == NULL) { |
skb | 2377 | drivers/net/arcnet.c | skb->free=1; |
skb | 2379 | drivers/net/arcnet.c | soft=(struct ClientData *)skb->data; |
skb | 2381 | drivers/net/arcnet.c | skb->len=sizeof(struct ClientData); |
skb | 2382 | drivers/net/arcnet.c | skb->dev=dev; |
skb | 2396 | drivers/net/arcnet.c | if (!in->skb) |
skb | 2422 | drivers/net/arcnet.c | kfree_skb(in->skb,FREE_WRITE); |
skb | 2423 | drivers/net/arcnet.c | in->skb=NULL; |
skb | 2430 | drivers/net/arcnet.c | soft=(struct ClientData *)in->skb->data; |
skb | 2433 | drivers/net/arcnet.c | skb=in->skb; |
skb | 2435 | drivers/net/arcnet.c | memcpy(skb->data+skb->len, |
skb | 2439 | drivers/net/arcnet.c | skb->len+=length-sizeof(struct ClientData); |
skb | 2447 | drivers/net/arcnet.c | if (!skb || !in->skb) |
skb | 2450 | drivers/net/arcnet.c | skb,in->skb); |
skb | 2454 | drivers/net/arcnet.c | in->skb=NULL; |
skb | 2460 | drivers/net/arcnet.c | for(i=0; i<skb->len; i++) |
skb | 2463 | drivers/net/arcnet.c | printk("%02hX ",((unsigned char*)skb->data)[i]); |
skb | 2469 | drivers/net/arcnet.c | skb->protocol=arcnetA_type_trans(skb,dev); |
skb | 2471 | drivers/net/arcnet.c | netif_rx(skb); |
skb | 2485 | drivers/net/arcnet.c | struct sk_buff *skb; |
skb | 2490 | drivers/net/arcnet.c | skb = alloc_skb(length, GFP_ATOMIC); |
skb | 2491 | drivers/net/arcnet.c | if (skb == NULL) { |
skb | 2497 | drivers/net/arcnet.c | skb->len = length; |
skb | 2498 | drivers/net/arcnet.c | skb->dev = dev; |
skb | 2500 | drivers/net/arcnet.c | memcpy(skb->data,(u_char *)arcsoft+1,length-1); |
skb | 2506 | drivers/net/arcnet.c | for(i=0; i<skb->len; i++) |
skb | 2511 | drivers/net/arcnet.c | printk("%02hX ",((u_char *)skb->data)[i]); |
skb | 2516 | drivers/net/arcnet.c | skb->protocol=eth_type_trans(skb,dev); |
skb | 2518 | drivers/net/arcnet.c | netif_rx(skb); |
skb | 2528 | drivers/net/arcnet.c | struct sk_buff *skb; |
skb | 2541 | drivers/net/arcnet.c | skb = alloc_skb(length, GFP_ATOMIC); |
skb | 2542 | drivers/net/arcnet.c | if (skb == NULL) { |
skb | 2547 | drivers/net/arcnet.c | soft=(struct S_ClientData *)skb->data; |
skb | 2548 | drivers/net/arcnet.c | skb->len = length; |
skb | 2558 | drivers/net/arcnet.c | skb->dev = dev; /* is already lp->sdev */ |
skb | 2563 | drivers/net/arcnet.c | for(i=0; i<skb->len; i++) |
skb | 2566 | drivers/net/arcnet.c | printk("%02hX ",((unsigned char*)skb->data)[i]); |
skb | 2571 | drivers/net/arcnet.c | skb->protocol=arcnetS_type_trans(skb,dev); |
skb | 2573 | drivers/net/arcnet.c | netif_rx(skb); |
skb | 2625 | drivers/net/arcnet.c | int arcnetA_header(struct sk_buff *skb,struct device *dev, |
skb | 2629 | drivers/net/arcnet.c | skb_push(skb,dev->hard_header_len); |
skb | 2699 | drivers/net/arcnet.c | int arcnetS_header(struct sk_buff *skb,struct device *dev, |
skb | 2703 | drivers/net/arcnet.c | skb_push(skb,dev->hard_header_len); |
skb | 2757 | drivers/net/arcnet.c | struct sk_buff *skb) |
skb | 2784 | drivers/net/arcnet.c | status=arp_find(&(head->daddr), dst, dev, dev->pa_addr, skb)? 1 : 0; |
skb | 2795 | drivers/net/arcnet.c | struct sk_buff *skb) |
skb | 2819 | drivers/net/arcnet.c | return arp_find(&(head->daddr), dst, dev, dev->pa_addr, skb)? 1 : 0; |
skb | 2830 | drivers/net/arcnet.c | unsigned short arcnetA_type_trans(struct sk_buff *skb,struct device *dev) |
skb | 2836 | drivers/net/arcnet.c | skb->mac.raw=skb->data; |
skb | 2837 | drivers/net/arcnet.c | skb_pull(skb,dev->hard_header_len); |
skb | 2838 | drivers/net/arcnet.c | head=(struct ClientData *)skb->mac.raw; |
skb | 2841 | drivers/net/arcnet.c | skb->pkt_type=PACKET_BROADCAST; |
skb | 2846 | drivers/net/arcnet.c | skb->pkt_type=PACKET_OTHERHOST; |
skb | 2871 | drivers/net/arcnet.c | unsigned short arcnetS_type_trans(struct sk_buff *skb,struct device *dev) |
skb | 2877 | drivers/net/arcnet.c | skb->mac.raw=skb->data; |
skb | 2878 | drivers/net/arcnet.c | skb_pull(skb,dev->hard_header_len); |
skb | 2879 | drivers/net/arcnet.c | head=(struct S_ClientData *)skb->mac.raw; |
skb | 2882 | drivers/net/arcnet.c | skb->pkt_type=PACKET_BROADCAST; |
skb | 2887 | drivers/net/arcnet.c | skb->pkt_type=PACKET_OTHERHOST; |
skb | 119 | drivers/net/at1700.c | static int net_send_packet(struct sk_buff *skb, struct device *dev); |
skb | 364 | drivers/net/at1700.c | net_send_packet(struct sk_buff *skb, struct device *dev) |
skb | 398 | drivers/net/at1700.c | if (skb == NULL) { |
skb | 408 | drivers/net/at1700.c | short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN; |
skb | 409 | drivers/net/at1700.c | unsigned char *buf = skb->data; |
skb | 435 | drivers/net/at1700.c | dev_kfree_skb (skb, FREE_WRITE); |
skb | 520 | drivers/net/at1700.c | struct sk_buff *skb; |
skb | 531 | drivers/net/at1700.c | skb = dev_alloc_skb(pkt_len+3); |
skb | 532 | drivers/net/at1700.c | if (skb == NULL) { |
skb | 541 | drivers/net/at1700.c | skb->dev = dev; |
skb | 542 | drivers/net/at1700.c | skb_reserve(skb,2); |
skb | 544 | drivers/net/at1700.c | insw(ioaddr + DATAPORT, skb_put(skb,pkt_len), (pkt_len + 1) >> 1); |
skb | 545 | drivers/net/at1700.c | skb->protocol=eth_type_trans(skb, dev); |
skb | 546 | drivers/net/at1700.c | netif_rx(skb); |
skb | 137 | drivers/net/atp.c | static int net_send_packet(struct sk_buff *skb, struct device *dev); |
skb | 416 | drivers/net/atp.c | net_send_packet(struct sk_buff *skb, struct device *dev) |
skb | 440 | drivers/net/atp.c | if (skb == NULL) { |
skb | 450 | drivers/net/atp.c | short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN; |
skb | 451 | drivers/net/atp.c | unsigned char *buf = skb->data; |
skb | 479 | drivers/net/atp.c | dev_kfree_skb (skb, FREE_WRITE); |
skb | 673 | drivers/net/atp.c | struct sk_buff *skb; |
skb | 675 | drivers/net/atp.c | skb = dev_alloc_skb(pkt_len); |
skb | 676 | drivers/net/atp.c | if (skb == NULL) { |
skb | 681 | drivers/net/atp.c | skb->dev = dev; |
skb | 683 | drivers/net/atp.c | read_block(ioaddr, pkt_len, skb_put(skb,pkt_len), dev->if_port); |
skb | 686 | drivers/net/atp.c | unsigned char *data = skb->data; |
skb | 694 | drivers/net/atp.c | skb->protocol=eth_type_trans(skb,dev); |
skb | 695 | drivers/net/atp.c | netif_rx(skb); |
skb | 353 | drivers/net/de4x5.c | struct sk_buff *skb[NUM_TX_DESC]; /* TX skb for freeing when sent */ |
skb | 398 | drivers/net/de4x5.c | static int de4x5_queue_pkt(struct sk_buff *skb, struct device *dev); |
skb | 418 | drivers/net/de4x5.c | static void load_packet(struct device *dev, char *buf, u32 flags, struct sk_buff *skb); |
skb | 938 | drivers/net/de4x5.c | de4x5_queue_pkt(struct sk_buff *skb, struct device *dev) |
skb | 977 | drivers/net/de4x5.c | if (lp->skb[i] != NULL) { |
skb | 978 | drivers/net/de4x5.c | if (lp->skb[i]->len != FAKE_FRAME_LEN) { |
skb | 980 | drivers/net/de4x5.c | dev_queue_xmit(lp->skb[i], dev, SOPRI_NORMAL); |
skb | 982 | drivers/net/de4x5.c | dev_kfree_skb(lp->skb[i], FREE_WRITE); |
skb | 985 | drivers/net/de4x5.c | dev_kfree_skb(lp->skb[i], FREE_WRITE); |
skb | 987 | drivers/net/de4x5.c | lp->skb[i] = NULL; |
skb | 990 | drivers/net/de4x5.c | if (skb->len != FAKE_FRAME_LEN) { |
skb | 991 | drivers/net/de4x5.c | dev_queue_xmit(skb, dev, SOPRI_NORMAL); |
skb | 993 | drivers/net/de4x5.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 1023 | drivers/net/de4x5.c | } else if (skb == NULL) { |
skb | 1025 | drivers/net/de4x5.c | } else if (skb->len == FAKE_FRAME_LEN) { /* Don't TX a fake frame! */ |
skb | 1026 | drivers/net/de4x5.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 1027 | drivers/net/de4x5.c | } else if (skb->len > 0) { |
skb | 1035 | drivers/net/de4x5.c | load_packet(dev, skb->data, TD_IC | TD_LS | TD_FS | skb->len, skb); |
skb | 1146 | drivers/net/de4x5.c | struct sk_buff *skb; |
skb | 1149 | drivers/net/de4x5.c | if ((skb = dev_alloc_skb(pkt_len+2)) != NULL) { |
skb | 1150 | drivers/net/de4x5.c | skb->dev = dev; |
skb | 1152 | drivers/net/de4x5.c | skb_reserve(skb,2); /* Align */ |
skb | 1155 | drivers/net/de4x5.c | memcpy(skb_put(skb,len), bus_to_virt(lp->rx_ring[lp->rx_old].buf), len); |
skb | 1156 | drivers/net/de4x5.c | memcpy(skb_put(skb,pkt_len-len), bus_to_virt(lp->rx_ring[0].buf), pkt_len - len); |
skb | 1158 | drivers/net/de4x5.c | memcpy(skb_put(skb,pkt_len), bus_to_virt(lp->rx_ring[lp->rx_old].buf), pkt_len); |
skb | 1162 | drivers/net/de4x5.c | skb->protocol=eth_type_trans(skb,dev); |
skb | 1163 | drivers/net/de4x5.c | netif_rx(skb); |
skb | 1173 | drivers/net/de4x5.c | buf = skb->data; /* Look at the dest addr */ |
skb | 1252 | drivers/net/de4x5.c | if (lp->skb[entry] != NULL) { |
skb | 1253 | drivers/net/de4x5.c | dev_kfree_skb(lp->skb[entry], FREE_WRITE); |
skb | 1254 | drivers/net/de4x5.c | lp->skb[entry] = NULL; |
skb | 1360 | drivers/net/de4x5.c | static void load_packet(struct device *dev, char *buf, u32 flags, struct sk_buff *skb) |
skb | 1367 | drivers/net/de4x5.c | lp->skb[lp->tx_new] = skb; |
skb | 2478 | drivers/net/de4x5.c | struct sk_buff *skb; |
skb | 2480 | drivers/net/de4x5.c | if ((skb = alloc_skb(0, GFP_ATOMIC)) != NULL) { |
skb | 2481 | drivers/net/de4x5.c | skb->len= FAKE_FRAME_LEN; |
skb | 2482 | drivers/net/de4x5.c | skb->arp=1; |
skb | 2483 | drivers/net/de4x5.c | skb->dev=dev; |
skb | 2484 | drivers/net/de4x5.c | dev_queue_xmit(skb, dev, SOPRI_NORMAL); |
skb | 247 | drivers/net/de600.c | static int de600_start_xmit(struct sk_buff *skb, struct device *dev); |
skb | 397 | drivers/net/de600.c | de600_start_xmit(struct sk_buff *skb, struct device *dev) |
skb | 402 | drivers/net/de600.c | byte *buffer = skb->data; |
skb | 410 | drivers/net/de600.c | if (skb == NULL) { |
skb | 434 | drivers/net/de600.c | PRINTK(("de600_start_xmit:len=%d, page %d/%d\n", skb->len, tx_fifo_in, free_tx_pages)); |
skb | 436 | drivers/net/de600.c | if ((len = skb->len) < RUNT) |
skb | 476 | drivers/net/de600.c | if (skb->sk && (skb->sk->protocol == IPPROTO_TCP) && |
skb | 477 | drivers/net/de600.c | (skb->sk->prot->rspace != &de600_rspace)) |
skb | 478 | drivers/net/de600.c | skb->sk->prot->rspace = de600_rspace; /* Ugh! */ |
skb | 481 | drivers/net/de600.c | dev_kfree_skb (skb, FREE_WRITE); |
skb | 580 | drivers/net/de600.c | struct sk_buff *skb; |
skb | 605 | drivers/net/de600.c | skb = dev_alloc_skb(size+2); |
skb | 607 | drivers/net/de600.c | if (skb == NULL) { |
skb | 614 | drivers/net/de600.c | skb->dev = dev; |
skb | 615 | drivers/net/de600.c | skb_reserve(skb,2); /* Align */ |
skb | 618 | drivers/net/de600.c | buffer = skb_put(skb,size); |
skb | 627 | drivers/net/de600.c | skb->protocol=eth_type_trans(skb,dev); |
skb | 629 | drivers/net/de600.c | netif_rx(skb); |
skb | 511 | drivers/net/de620.c | de620_start_xmit(struct sk_buff *skb, struct device *dev) |
skb | 516 | drivers/net/de620.c | byte *buffer = skb->data; |
skb | 525 | drivers/net/de620.c | if (skb == NULL) { |
skb | 550 | drivers/net/de620.c | if ((len = skb->len) < RUNT) |
skb | 560 | drivers/net/de620.c | (int)skb->len, using_txbuf)); |
skb | 590 | drivers/net/de620.c | dev_kfree_skb (skb, FREE_WRITE); |
skb | 652 | drivers/net/de620.c | struct sk_buff *skb; |
skb | 707 | drivers/net/de620.c | skb = dev_alloc_skb(size+2); |
skb | 708 | drivers/net/de620.c | if (skb == NULL) { /* Yeah, but no place to put it... */ |
skb | 714 | drivers/net/de620.c | skb_reserve(skb,2); /* Align */ |
skb | 715 | drivers/net/de620.c | skb->dev = dev; |
skb | 716 | drivers/net/de620.c | skb->free = 1; |
skb | 718 | drivers/net/de620.c | buffer = skb_put(skb,size); |
skb | 722 | drivers/net/de620.c | skb->protocol=eth_type_trans(skb,dev); |
skb | 723 | drivers/net/de620.c | netif_rx(skb); /* deliver it "upstairs" */ |
skb | 383 | drivers/net/depca.c | static int depca_start_xmit(struct sk_buff *skb, struct device *dev); |
skb | 408 | drivers/net/depca.c | static int load_packet(struct device *dev, struct sk_buff *skb); |
skb | 779 | drivers/net/depca.c | depca_start_xmit(struct sk_buff *skb, struct device *dev) |
skb | 804 | drivers/net/depca.c | } else if (skb == NULL) { |
skb | 806 | drivers/net/depca.c | } else if (skb->len > 0) { |
skb | 813 | drivers/net/depca.c | status = load_packet(dev, skb); |
skb | 821 | drivers/net/depca.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 912 | drivers/net/depca.c | struct sk_buff *skb; |
skb | 914 | drivers/net/depca.c | skb = dev_alloc_skb(pkt_len+2); |
skb | 915 | drivers/net/depca.c | if (skb != NULL) { |
skb | 917 | drivers/net/depca.c | skb_reserve(skb,2); /* 16 byte align the IP header */ |
skb | 918 | drivers/net/depca.c | buf = skb_put(skb,pkt_len); |
skb | 919 | drivers/net/depca.c | skb->dev = dev; |
skb | 932 | drivers/net/depca.c | skb->protocol=eth_type_trans(skb,dev); |
skb | 933 | drivers/net/depca.c | netif_rx(skb); |
skb | 1529 | drivers/net/depca.c | static int load_packet(struct device *dev, struct sk_buff *skb) |
skb | 1535 | drivers/net/depca.c | end = (entry + (skb->len - 1) / TX_BUFF_SZ) & lp->txRingMask; |
skb | 1543 | drivers/net/depca.c | memcpy_toio(lp->tx_memcpy[entry], skb->data, len); |
skb | 1544 | drivers/net/depca.c | memcpy_toio(lp->tx_memcpy[0], skb->data + len, skb->len - len); |
skb | 1546 | drivers/net/depca.c | memcpy_toio(lp->tx_memcpy[entry], skb->data, skb->len); |
skb | 1550 | drivers/net/depca.c | len = (skb->len < ETH_ZLEN) ? ETH_ZLEN : skb->len; |
skb | 56 | drivers/net/dummy.c | static int dummy_xmit(struct sk_buff *skb, struct device *dev); |
skb | 105 | drivers/net/dummy.c | dummy_xmit(struct sk_buff *skb, struct device *dev) |
skb | 111 | drivers/net/dummy.c | if (skb == NULL || dev == NULL) |
skb | 114 | drivers/net/dummy.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 102 | drivers/net/e2100.c | struct sk_buff *skb, int ring_offset); |
skb | 318 | drivers/net/e2100.c | e21_block_input(struct device *dev, int count, struct sk_buff *skb, int ring_offset) |
skb | 326 | drivers/net/e2100.c | eth_io_copy_and_sum(skb, dev->mem_start + (ring_offset & 0xff), count, 0); |
skb | 137 | drivers/net/eepro.c | static int eepro_send_packet(struct sk_buff *skb, struct device *dev); |
skb | 595 | drivers/net/eepro.c | eepro_send_packet(struct sk_buff *skb, struct device *dev) |
skb | 633 | drivers/net/eepro.c | if (skb == NULL) { |
skb | 642 | drivers/net/eepro.c | short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN; |
skb | 643 | drivers/net/eepro.c | unsigned char *buf = skb->data; |
skb | 649 | drivers/net/eepro.c | dev_kfree_skb (skb, FREE_WRITE); |
skb | 1046 | drivers/net/eepro.c | struct sk_buff *skb; |
skb | 1049 | drivers/net/eepro.c | skb = dev_alloc_skb(rcv_size+2); |
skb | 1050 | drivers/net/eepro.c | if (skb == NULL) { |
skb | 1055 | drivers/net/eepro.c | skb->dev = dev; |
skb | 1056 | drivers/net/eepro.c | skb_reserve(skb,2); |
skb | 1058 | drivers/net/eepro.c | insw(ioaddr+IO_PORT, skb_put(skb,rcv_size), (rcv_size + 1) >> 1); |
skb | 1060 | drivers/net/eepro.c | skb->protocol = eth_type_trans(skb,dev); |
skb | 1061 | drivers/net/eepro.c | netif_rx(skb); |
skb | 288 | drivers/net/eexpress.c | static int eexp_send_packet(struct sk_buff *skb, struct device *dev); |
skb | 450 | drivers/net/eexpress.c | eexp_send_packet(struct sk_buff *skb, struct device *dev) |
skb | 485 | drivers/net/eexpress.c | if (skb == NULL) { |
skb | 494 | drivers/net/eexpress.c | short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN; |
skb | 495 | drivers/net/eexpress.c | unsigned char *buf = skb->data; |
skb | 505 | drivers/net/eexpress.c | dev_kfree_skb (skb, FREE_WRITE); |
skb | 930 | drivers/net/eexpress.c | struct sk_buff *skb; |
skb | 933 | drivers/net/eexpress.c | skb = dev_alloc_skb(pkt_len+2); |
skb | 934 | drivers/net/eexpress.c | if (skb == NULL) { |
skb | 939 | drivers/net/eexpress.c | skb->dev = dev; |
skb | 940 | drivers/net/eexpress.c | skb_reserve(skb,2); |
skb | 944 | drivers/net/eexpress.c | insw(ioaddr, skb_put(skb,pkt_len), (pkt_len + 1) >> 1); |
skb | 946 | drivers/net/eexpress.c | skb->protocol=eth_type_trans(skb,dev); |
skb | 947 | drivers/net/eexpress.c | netif_rx(skb); |
skb | 145 | drivers/net/eql.c | static int eql_slave_xmit(struct sk_buff *skb, struct device *dev); /* */ |
skb | 148 | drivers/net/eql.c | static int eql_header(struct sk_buff *skb, struct device *dev, |
skb | 152 | drivers/net/eql.c | unsigned long raddr, struct sk_buff *skb); /* */ |
skb | 356 | drivers/net/eql.c | eql_slave_xmit(struct sk_buff *skb, struct device *dev) |
skb | 362 | drivers/net/eql.c | if (skb == NULL) |
skb | 377 | drivers/net/eql.c | dev->name, eql_number_slaves (eql->queue), skb->len, |
skb | 381 | drivers/net/eql.c | dev_queue_xmit (skb, slave_dev, 1); |
skb | 383 | drivers/net/eql.c | slave->bytes_queued += skb->len; |
skb | 391 | drivers/net/eql.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 409 | drivers/net/eql.c | eql_header(struct sk_buff *skb, struct device *dev, |
skb | 420 | drivers/net/eql.c | unsigned long raddr, struct sk_buff *skb) |
skb | 327 | drivers/net/eth16i.c | static int eth16i_tx(struct sk_buff *skb, struct device *dev); |
skb | 859 | drivers/net/eth16i.c | static int eth16i_tx(struct sk_buff *skb, struct device *dev) |
skb | 918 | drivers/net/eth16i.c | if(skb == NULL) { |
skb | 932 | drivers/net/eth16i.c | short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN; |
skb | 933 | drivers/net/eth16i.c | unsigned char *buf = skb->data; |
skb | 973 | drivers/net/eth16i.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 1003 | drivers/net/eth16i.c | struct sk_buff *skb; |
skb | 1016 | drivers/net/eth16i.c | skb = dev_alloc_skb(pkt_len + 3); |
skb | 1017 | drivers/net/eth16i.c | if( skb == NULL ) { |
skb | 1025 | drivers/net/eth16i.c | skb->dev = dev; |
skb | 1026 | drivers/net/eth16i.c | skb_reserve(skb,2); |
skb | 1034 | drivers/net/eth16i.c | insw(ioaddr + DATAPORT, skb_put(skb, pkt_len), (pkt_len + 1) >> 1); |
skb | 1036 | drivers/net/eth16i.c | unsigned char *buf = skb_put(skb, pkt_len); |
skb | 1051 | drivers/net/eth16i.c | skb->protocol=eth_type_trans(skb, dev); |
skb | 1052 | drivers/net/eth16i.c | netif_rx(skb); |
skb | 1059 | drivers/net/eth16i.c | printk(" %02x", skb->data[i]); |
skb | 288 | drivers/net/ewrk3.c | static int ewrk3_queue_pkt(struct sk_buff *skb, struct device *dev); |
skb | 720 | drivers/net/ewrk3.c | ewrk3_queue_pkt(struct sk_buff *skb, struct device *dev) |
skb | 756 | drivers/net/ewrk3.c | } else if (skb == NULL) { |
skb | 758 | drivers/net/ewrk3.c | } else if (skb->len > 0) { |
skb | 806 | drivers/net/ewrk3.c | u_char *p = skb->data; |
skb | 809 | drivers/net/ewrk3.c | outb((char)(skb->len & 0xff), EWRK3_DATA); |
skb | 810 | drivers/net/ewrk3.c | outb((char)((skb->len >> 8) & 0xff), EWRK3_DATA); |
skb | 812 | drivers/net/ewrk3.c | for (i=0; i<skb->len; i++) { |
skb | 819 | drivers/net/ewrk3.c | writeb((char)(skb->len & 0xff), (char *)buf);/* length (16 bit xfer)*/ |
skb | 822 | drivers/net/ewrk3.c | writeb((char)(((skb->len >> 8) & 0xff) | XCT), (char *)buf); |
skb | 826 | drivers/net/ewrk3.c | writeb(0x00, (char *)(buf + skb->len)); /* Write the XCT flag */ |
skb | 827 | drivers/net/ewrk3.c | memcpy_toio(buf, skb->data, PRELOAD);/* Write PRELOAD bytes*/ |
skb | 829 | drivers/net/ewrk3.c | memcpy_toio(buf+PRELOAD, skb->data+PRELOAD, skb->len-PRELOAD); |
skb | 830 | drivers/net/ewrk3.c | writeb(0xff, (char *)(buf + skb->len)); /* Write the XCT flag */ |
skb | 832 | drivers/net/ewrk3.c | writeb((char)((skb->len >> 8) & 0xff), (char *)buf); |
skb | 836 | drivers/net/ewrk3.c | memcpy_toio((char *)buf, skb->data, skb->len);/* Write data bytes */ |
skb | 842 | drivers/net/ewrk3.c | dev_kfree_skb (skb, FREE_WRITE); |
skb | 998 | drivers/net/ewrk3.c | struct sk_buff *skb; |
skb | 1000 | drivers/net/ewrk3.c | if ((skb = dev_alloc_skb(pkt_len+2)) != NULL) { |
skb | 1002 | drivers/net/ewrk3.c | skb->dev = dev; |
skb | 1003 | drivers/net/ewrk3.c | skb_reserve(skb,2); /* Align to 16 bytes */ |
skb | 1004 | drivers/net/ewrk3.c | p = skb_put(skb,pkt_len); |
skb | 1019 | drivers/net/ewrk3.c | skb->protocol=eth_type_trans(skb,dev); |
skb | 1020 | drivers/net/ewrk3.c | netif_rx(skb); |
skb | 1032 | drivers/net/ewrk3.c | p = skb->data; /* Look at the dest addr */ |
skb | 102 | drivers/net/hp-plus.c | struct sk_buff *skb, int ring_offset); |
skb | 108 | drivers/net/hp-plus.c | struct sk_buff *skb, int ring_offset); |
skb | 340 | drivers/net/hp-plus.c | hpp_io_block_input(struct device *dev, int count, struct sk_buff *skb, int ring_offset) |
skb | 343 | drivers/net/hp-plus.c | char *buf = skb->data; |
skb | 367 | drivers/net/hp-plus.c | hpp_mem_block_input(struct device *dev, int count, struct sk_buff *skb, int ring_offset) |
skb | 380 | drivers/net/hp-plus.c | memcpy_fromio(skb->data, dev->mem_start, count); |
skb | 66 | drivers/net/hp.c | struct sk_buff *skb , int ring_offset); |
skb | 276 | drivers/net/hp.c | hp_block_input(struct device *dev, int count, struct sk_buff *skb, int ring_offset) |
skb | 281 | drivers/net/hp.c | char *buf = skb->data; |
skb | 193 | drivers/net/hp100.c | static int hp100_start_xmit( struct sk_buff *skb, struct device *dev ); |
skb | 574 | drivers/net/hp100.c | static int hp100_start_xmit( struct sk_buff *skb, struct device *dev ) |
skb | 595 | drivers/net/hp100.c | if ( ( i = ( hp100_inl( TX_MEM_FREE ) & ~0x7fffffff ) ) < skb -> len + 16 ) |
skb | 639 | drivers/net/hp100.c | if ( skb == NULL ) |
skb | 645 | drivers/net/hp100.c | if ( skb -> len <= 0 ) return 0; |
skb | 658 | drivers/net/hp100.c | printk( "hp100_start_xmit: irq_status = 0x%x, len = %d\n", val, (int)skb -> len ); |
skb | 660 | drivers/net/hp100.c | ok_flag = skb -> len >= HP100_MIN_PACKET_SIZE; |
skb | 661 | drivers/net/hp100.c | i = ok_flag ? skb -> len : HP100_MIN_PACKET_SIZE; |
skb | 668 | drivers/net/hp100.c | memcpy( lp -> mem_ptr_virt, skb -> data, skb -> len ); |
skb | 670 | drivers/net/hp100.c | memset( lp -> mem_ptr_virt, 0, HP100_MIN_PACKET_SIZE - skb -> len ); |
skb | 674 | drivers/net/hp100.c | memcpy_toio( lp -> mem_ptr_phys, skb -> data, skb -> len ); |
skb | 676 | drivers/net/hp100.c | memset_io( lp -> mem_ptr_phys, 0, HP100_MIN_PACKET_SIZE - skb -> len ); |
skb | 681 | drivers/net/hp100.c | outsl( ioaddr + HP100_REG_DATA32, skb -> data, ( skb -> len + 3 ) >> 2 ); |
skb | 683 | drivers/net/hp100.c | for ( i = ( skb -> len + 3 ) & ~3; i < HP100_MIN_PACKET_SIZE; i += 4 ) |
skb | 691 | drivers/net/hp100.c | dev_kfree_skb( skb, FREE_WRITE ); |
skb | 710 | drivers/net/hp100.c | struct sk_buff *skb; |
skb | 753 | drivers/net/hp100.c | skb = dev_alloc_skb( pkt_len ); |
skb | 754 | drivers/net/hp100.c | if ( skb == NULL ) |
skb | 765 | drivers/net/hp100.c | skb -> dev = dev; |
skb | 766 | drivers/net/hp100.c | ptr = (u_char *)skb_put( skb, pkt_len ); |
skb | 776 | drivers/net/hp100.c | skb -> protocol = eth_type_trans( skb, dev ); |
skb | 777 | drivers/net/hp100.c | netif_rx( skb ); |
skb | 148 | drivers/net/ibmtr.c | static int tok_send_packet(struct sk_buff *skb, struct device *dev); |
skb | 1139 | drivers/net/ibmtr.c | struct sk_buff *skb; |
skb | 1183 | drivers/net/ibmtr.c | if(!(skb=dev_alloc_skb(ntohs(rec_req->frame_len)-lan_hdr_len+sizeof(struct trh_hdr)))) { |
skb | 1191 | drivers/net/ibmtr.c | skb_put(skb,ntohs(rec_req->frame_len)-lan_hdr_len+sizeof(struct trh_hdr)); |
skb | 1192 | drivers/net/ibmtr.c | skb->dev=dev; |
skb | 1199 | drivers/net/ibmtr.c | data=skb->data; |
skb | 1229 | drivers/net/ibmtr.c | skb->protocol=tr_type_trans(skb,dev); |
skb | 1230 | drivers/net/ibmtr.c | netif_rx(skb); |
skb | 1235 | drivers/net/ibmtr.c | static int tok_send_packet(struct sk_buff *skb, struct device *dev) { |
skb | 1256 | drivers/net/ibmtr.c | if(skb==NULL) { |
skb | 1266 | drivers/net/ibmtr.c | ti->current_skb=skb; /* save skb. We will need it when the adapter |
skb | 255 | drivers/net/lance.c | static int lance_start_xmit(struct sk_buff *skb, struct device *dev); |
skb | 705 | drivers/net/lance.c | lance_start_xmit(struct sk_buff *skb, struct device *dev) |
skb | 747 | drivers/net/lance.c | if (skb == NULL) { |
skb | 752 | drivers/net/lance.c | if (skb->len <= 0) |
skb | 787 | drivers/net/lance.c | -(ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN); |
skb | 789 | drivers/net/lance.c | lp->tx_ring[entry].length = -skb->len; |
skb | 795 | drivers/net/lance.c | if ((int)(skb->data) + skb->len > 0x01000000) { |
skb | 798 | drivers/net/lance.c | dev->name, (int)(skb->data)); |
skb | 799 | drivers/net/lance.c | memcpy(&lp->tx_bounce_buffs[entry], skb->data, skb->len); |
skb | 802 | drivers/net/lance.c | dev_kfree_skb (skb, FREE_WRITE); |
skb | 804 | drivers/net/lance.c | lp->tx_skbuff[entry] = skb; |
skb | 805 | drivers/net/lance.c | lp->tx_ring[entry].base = (int)(skb->data) | 0x83000000; |
skb | 984 | drivers/net/lance.c | struct sk_buff *skb; |
skb | 993 | drivers/net/lance.c | skb = dev_alloc_skb(pkt_len+2); |
skb | 994 | drivers/net/lance.c | if (skb == NULL) |
skb | 1009 | drivers/net/lance.c | skb->dev = dev; |
skb | 1010 | drivers/net/lance.c | skb_reserve(skb,2); /* 16 byte align */ |
skb | 1011 | drivers/net/lance.c | skb_put(skb,pkt_len); /* Make room */ |
skb | 1012 | drivers/net/lance.c | eth_copy_and_sum(skb, |
skb | 1015 | drivers/net/lance.c | skb->protocol=eth_type_trans(skb,dev); |
skb | 1016 | drivers/net/lance.c | netif_rx(skb); |
skb | 51 | drivers/net/loopback.c | static int loopback_xmit(struct sk_buff *skb, struct device *dev) |
skb | 57 | drivers/net/loopback.c | if (skb == NULL || dev == NULL) |
skb | 76 | drivers/net/loopback.c | if(skb->free==0) |
skb | 78 | drivers/net/loopback.c | struct sk_buff *skb2=skb; |
skb | 79 | drivers/net/loopback.c | skb=skb_clone(skb, GFP_ATOMIC); /* Clone the buffer */ |
skb | 80 | drivers/net/loopback.c | if(skb==NULL) |
skb | 85 | drivers/net/loopback.c | else if(skb->sk) |
skb | 93 | drivers/net/loopback.c | skb->sk->wmem_alloc-=skb->truesize; |
skb | 94 | drivers/net/loopback.c | skb->sk->write_space(skb->sk); |
skb | 98 | drivers/net/loopback.c | skb->protocol=eth_type_trans(skb,dev); |
skb | 99 | drivers/net/loopback.c | skb->dev=dev; |
skb | 102 | drivers/net/loopback.c | netif_rx(skb); |
skb | 104 | drivers/net/loopback.c | skb_device_unlock(skb); |
skb | 109 | drivers/net/ne.c | struct sk_buff *skb, int ring_offset); |
skb | 498 | drivers/net/ne.c | ne_block_input(struct device *dev, int count, struct sk_buff *skb, int ring_offset) |
skb | 504 | drivers/net/ne.c | char *buf = skb->data; |
skb | 804 | drivers/net/ni52.c | struct sk_buff *skb; |
skb | 818 | drivers/net/ni52.c | skb = (struct sk_buff *) dev_alloc_skb(totlen+2); |
skb | 819 | drivers/net/ni52.c | if(skb != NULL) |
skb | 821 | drivers/net/ni52.c | skb->dev = dev; |
skb | 822 | drivers/net/ni52.c | skb_reserve(skb,2); /* 16 byte alignment */ |
skb | 823 | drivers/net/ni52.c | memcpy(skb_put(skb,totlen),(char *) p->base+(unsigned long) rbd->buffer, totlen); |
skb | 824 | drivers/net/ni52.c | skb->protocol=eth_type_trans(skb,dev); |
skb | 825 | drivers/net/ni52.c | netif_rx(skb); |
skb | 940 | drivers/net/ni52.c | static int ni52_send_packet(struct sk_buff *skb, struct device *dev) |
skb | 984 | drivers/net/ni52.c | if(skb == NULL) |
skb | 990 | drivers/net/ni52.c | if (skb->len <= 0) |
skb | 992 | drivers/net/ni52.c | if(skb->len > XMIT_BUFF_SIZE) |
skb | 994 | drivers/net/ni52.c | printk("%s: Sorry, max. framelength is %d bytes. The length of your frame is %ld bytes.\n",dev->name,XMIT_BUFF_SIZE,skb->len); |
skb | 1002 | drivers/net/ni52.c | memcpy((char *)p->xmit_cbuffs[p->xmit_count],(char *)(skb->data),skb->len); |
skb | 1003 | drivers/net/ni52.c | len = (ETH_ZLEN < skb->len) ? skb->len : ETH_ZLEN; |
skb | 1017 | drivers/net/ni52.c | dev_kfree_skb(skb,FREE_WRITE); |
skb | 1037 | drivers/net/ni52.c | dev_kfree_skb(skb,FREE_WRITE); |
skb | 1057 | drivers/net/ni52.c | dev_kfree_skb(skb,FREE_WRITE); |
skb | 122 | drivers/net/ni65.c | static int ni65_send_packet(struct sk_buff *skb, struct device *dev); |
skb | 455 | drivers/net/ni65.c | struct sk_buff *skb; |
skb | 465 | drivers/net/ni65.c | skb = (struct sk_buff *) p->tmdbufs[p->tmdlast]; |
skb | 466 | drivers/net/ni65.c | dev_kfree_skb(skb,FREE_WRITE); |
skb | 503 | drivers/net/ni65.c | struct sk_buff *skb,*skb1; |
skb | 532 | drivers/net/ni65.c | skb = dev_alloc_skb(R_BUF_SIZE); |
skb | 533 | drivers/net/ni65.c | if(skb != NULL) |
skb | 535 | drivers/net/ni65.c | if( (unsigned long) (skb->data + R_BUF_SIZE) & 0xff000000) { |
skb | 536 | drivers/net/ni65.c | memcpy(skb_put(skb,len),p->recv_skb[p->rmdnum]->data,len); |
skb | 537 | drivers/net/ni65.c | skb1 = skb; |
skb | 541 | drivers/net/ni65.c | p->recv_skb[p->rmdnum] = skb; |
skb | 567 | drivers/net/ni65.c | static int ni65_send_packet(struct sk_buff *skb, struct device *dev) |
skb | 584 | drivers/net/ni65.c | if(skb == NULL) |
skb | 590 | drivers/net/ni65.c | if (skb->len <= 0) |
skb | 605 | drivers/net/ni65.c | short len = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN; |
skb | 610 | drivers/net/ni65.c | tmdp->u.buffer = (unsigned long) (skb->data); |
skb | 611 | drivers/net/ni65.c | p->tmdbufs[p->tmdnum] = skb; |
skb | 613 | drivers/net/ni65.c | memcpy((char *) (tmdp->u.buffer & 0x00ffffff),(char *)skb->data,skb->len); |
skb | 614 | drivers/net/ni65.c | dev_kfree_skb (skb, FREE_WRITE); |
skb | 151 | drivers/net/pi2.c | static int pi_send_packet(struct sk_buff *skb, struct device *dev); |
skb | 208 | drivers/net/pi2.c | static void hardware_send_packet(struct pi_local *lp, struct sk_buff *skb) |
skb | 220 | drivers/net/pi2.c | skb_queue_tail(&lp->sndq, skb); |
skb | 338 | drivers/net/pi2.c | static void free_p(struct sk_buff *skb) |
skb | 340 | drivers/net/pi2.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 510 | drivers/net/pi2.c | struct sk_buff *skb; |
skb | 554 | drivers/net/pi2.c | skb = dev_alloc_skb(sksize); |
skb | 555 | drivers/net/pi2.c | if (skb == NULL) { |
skb | 561 | drivers/net/pi2.c | skb->dev = dev; |
skb | 564 | drivers/net/pi2.c | cfix=skb_put(skb,pkt_len); |
skb | 569 | drivers/net/pi2.c | skb->protocol=htons(ETH_P_AX25); |
skb | 570 | drivers/net/pi2.c | skb->mac.raw=skb->data; |
skb | 571 | drivers/net/pi2.c | IS_SKB(skb); |
skb | 572 | drivers/net/pi2.c | netif_rx(skb); |
skb | 585 | drivers/net/pi2.c | struct sk_buff *skb; |
skb | 643 | drivers/net/pi2.c | skb = dev_alloc_skb(sksize); |
skb | 644 | drivers/net/pi2.c | if (skb == NULL) { |
skb | 650 | drivers/net/pi2.c | skb->dev = dev; |
skb | 653 | drivers/net/pi2.c | cfix=skb_put(skb,pkt_len); |
skb | 657 | drivers/net/pi2.c | skb->protocol=ntohs(ETH_P_AX25); |
skb | 658 | drivers/net/pi2.c | skb->mac.raw=skb->data; |
skb | 659 | drivers/net/pi2.c | IS_SKB(skb); |
skb | 660 | drivers/net/pi2.c | netif_rx(skb); |
skb | 1077 | drivers/net/pi2.c | static int pi_header(struct sk_buff *skb, struct device *dev, unsigned short type, |
skb | 1080 | drivers/net/pi2.c | return ax25_encapsulate(skb, dev, type, daddr, saddr, len); |
skb | 1085 | drivers/net/pi2.c | struct sk_buff *skb) |
skb | 1087 | drivers/net/pi2.c | return ax25_rebuild_header(buff, dev, raddr, skb); |
skb | 1493 | drivers/net/pi2.c | static int pi_send_packet(struct sk_buff *skb, struct device *dev) |
skb | 1500 | drivers/net/pi2.c | if (skb == NULL) { |
skb | 1504 | drivers/net/pi2.c | hardware_send_packet(lp, skb); |
skb | 146 | drivers/net/plip.c | unsigned long raddr, struct sk_buff *skb); |
skb | 147 | drivers/net/plip.c | static int plip_tx_packet(struct sk_buff *skb, struct device *dev); |
skb | 197 | drivers/net/plip.c | struct sk_buff *skb; |
skb | 212 | drivers/net/plip.c | unsigned long raddr, struct sk_buff *skb); |
skb | 415 | drivers/net/plip.c | if (rcv->skb) { |
skb | 416 | drivers/net/plip.c | rcv->skb->free = 1; |
skb | 417 | drivers/net/plip.c | kfree_skb(rcv->skb, FREE_READ); |
skb | 418 | drivers/net/plip.c | rcv->skb = NULL; |
skb | 421 | drivers/net/plip.c | if (snd->skb) { |
skb | 422 | drivers/net/plip.c | dev_kfree_skb(snd->skb, FREE_WRITE); |
skb | 423 | drivers/net/plip.c | snd->skb = NULL; |
skb | 543 | drivers/net/plip.c | rcv->skb = dev_alloc_skb(rcv->length.h); |
skb | 544 | drivers/net/plip.c | if (rcv->skb == NULL) { |
skb | 548 | drivers/net/plip.c | skb_put(rcv->skb,rcv->length.h); |
skb | 549 | drivers/net/plip.c | rcv->skb->dev = dev; |
skb | 555 | drivers/net/plip.c | lbuf = rcv->skb->data; |
skb | 580 | drivers/net/plip.c | rcv->skb->protocol=eth_type_trans(rcv->skb, dev); |
skb | 581 | drivers/net/plip.c | netif_rx(rcv->skb); |
skb | 583 | drivers/net/plip.c | rcv->skb = NULL; |
skb | 667 | drivers/net/plip.c | if (snd->skb == NULL || (lbuf = snd->skb->data) == NULL) { |
skb | 670 | drivers/net/plip.c | snd->skb = NULL; |
skb | 742 | drivers/net/plip.c | dev_kfree_skb(snd->skb, FREE_WRITE); |
skb | 749 | drivers/net/plip.c | snd->skb = NULL; |
skb | 858 | drivers/net/plip.c | struct sk_buff *skb) |
skb | 865 | drivers/net/plip.c | return nl->orig_rebuild_header(buff, dev, dst, skb); |
skb | 880 | drivers/net/plip.c | plip_tx_packet(struct sk_buff *skb, struct device *dev) |
skb | 891 | drivers/net/plip.c | if (skb == NULL) { |
skb | 901 | drivers/net/plip.c | if (skb->len > dev->mtu + dev->hard_header_len) { |
skb | 902 | drivers/net/plip.c | printk("%s: packet too big, %d.\n", dev->name, (int)skb->len); |
skb | 912 | drivers/net/plip.c | snd->skb = skb; |
skb | 913 | drivers/net/plip.c | snd->length.h = skb->len; |
skb | 959 | drivers/net/plip.c | nl->rcv_data.skb = nl->snd_data.skb = NULL; |
skb | 994 | drivers/net/plip.c | if (snd->skb) { |
skb | 995 | drivers/net/plip.c | dev_kfree_skb(snd->skb, FREE_WRITE); |
skb | 996 | drivers/net/plip.c | snd->skb = NULL; |
skb | 999 | drivers/net/plip.c | if (rcv->skb) { |
skb | 1000 | drivers/net/plip.c | rcv->skb->free = 1; |
skb | 1001 | drivers/net/plip.c | kfree_skb(rcv->skb, FREE_READ); |
skb | 1002 | drivers/net/plip.c | rcv->skb = NULL; |
skb | 101 | drivers/net/ppp.c | #define skb_data(skb) ((unsigned char *) (skb)->data) |
skb | 195 | drivers/net/ppp.c | #define skb_put(skb,count) skb_data(skb) |
skb | 204 | drivers/net/ppp.c | unsigned len, struct sk_buff *skb); |
skb | 218 | drivers/net/ppp.c | sk_buff *skb, void *saddr, void *daddr); |
skb | 219 | drivers/net/ppp.c | static int ppp_dev_output (struct protocol *self, sk_buff *skb, int type, |
skb | 1253 | drivers/net/ppp.c | sk_buff *skb = dev_alloc_skb (count); |
skb | 1257 | drivers/net/ppp.c | if (skb == NULL) { |
skb | 1267 | drivers/net/ppp.c | skb->dev = ppp2dev (ppp); /* We are the device */ |
skb | 1269 | drivers/net/ppp.c | skb->len = count; |
skb | 1271 | drivers/net/ppp.c | skb->protocol = proto; |
skb | 1272 | drivers/net/ppp.c | skb->mac.raw = skb_data(skb); |
skb | 1274 | drivers/net/ppp.c | memcpy (skb_put(skb,count), data, count); /* move data */ |
skb | 1278 | drivers/net/ppp.c | skb->free = 1; |
skb | 1280 | drivers/net/ppp.c | netif_rx (skb); |
skb | 3115 | drivers/net/ppp.c | ppp_dev_xmit (sk_buff *skb, struct device *dev) |
skb | 3124 | drivers/net/ppp.c | if (skb == NULL) { |
skb | 3133 | drivers/net/ppp.c | dev_kfree_skb (skb, FREE_WRITE); |
skb | 3142 | drivers/net/ppp.c | dev->name, skb); |
skb | 3151 | drivers/net/ppp.c | dev_kfree_skb (skb, FREE_WRITE); |
skb | 3157 | drivers/net/ppp.c | len = skb->len; |
skb | 3158 | drivers/net/ppp.c | data = skb_data(skb); |
skb | 3165 | drivers/net/ppp.c | switch (skb->protocol) { |
skb | 3175 | drivers/net/ppp.c | dev_kfree_skb (skb, FREE_WRITE); |
skb | 3186 | drivers/net/ppp.c | dev_kfree_skb (skb, FREE_WRITE); |
skb | 3233 | drivers/net/ppp.c | sk_buff *skb, void *saddr, void *daddr) |
skb | 3235 | drivers/net/ppp.c | return protocol_pass_demultiplex(self, NULL, skb, NULL, NULL); |
skb | 3238 | drivers/net/ppp.c | static int ppp_dev_output (struct protocol *self, sk_buff *skb, int type, |
skb | 3241 | drivers/net/ppp.c | if(skb->dev==NULL) |
skb | 3244 | drivers/net/ppp.c | kfree_skb(skb, FREE_WRITE); |
skb | 3247 | drivers/net/ppp.c | dev_queue_xmit(skb, skb->dev, skb->priority); |
skb | 3275 | drivers/net/ppp.c | ppp_dev_type (sk_buff *skb, struct device *dev) |
skb | 3284 | drivers/net/ppp.c | unsigned len, struct sk_buff *skb) |
skb | 3286 | drivers/net/ppp.c | static int ppp_dev_header (sk_buff *skb, struct device *dev, |
skb | 3296 | drivers/net/ppp.c | sk_buff *skb) |
skb | 122 | drivers/net/pt.c | static int pt_send_packet(struct sk_buff *skb, struct device *dev); |
skb | 177 | drivers/net/pt.c | static void hardware_send_packet(struct pt_local *lp, struct sk_buff *skb) |
skb | 185 | drivers/net/pt.c | ptr = skb->data; |
skb | 186 | drivers/net/pt.c | if (ptr[0] != 0 && skb->len >= 2) |
skb | 188 | drivers/net/pt.c | printk("Rx KISS... Control = %d, value = %d.\n", ptr[0], (skb->len > 1? ptr[1] : -1)); |
skb | 231 | drivers/net/pt.c | skb_queue_tail(&lp->sndq, skb); |
skb | 318 | drivers/net/pt.c | static void free_p(struct sk_buff *skb) |
skb | 320 | drivers/net/pt.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 339 | drivers/net/pt.c | static int pt_header (struct sk_buff *skb, struct device *dev, unsigned short type, |
skb | 342 | drivers/net/pt.c | return ax25_encapsulate(skb, dev, type, daddr, saddr, len); |
skb | 348 | drivers/net/pt.c | struct sk_buff *skb) |
skb | 350 | drivers/net/pt.c | return ax25_rebuild_header(buff, dev, raddr, skb); |
skb | 972 | drivers/net/pt.c | static int pt_send_packet(struct sk_buff *skb, struct device *dev) |
skb | 982 | drivers/net/pt.c | if (skb == NULL) { |
skb | 986 | drivers/net/pt.c | hardware_send_packet(lp, skb); |
skb | 1271 | drivers/net/pt.c | struct sk_buff *skb; |
skb | 1392 | drivers/net/pt.c | skb = dev_alloc_skb(sksize); |
skb | 1393 | drivers/net/pt.c | if (skb == NULL) |
skb | 1400 | drivers/net/pt.c | skb->dev = dev; |
skb | 1403 | drivers/net/pt.c | cfix=skb_put(skb,pkt_len); |
skb | 1410 | drivers/net/pt.c | skb->protocol = ntohs(ETH_P_AX25); |
skb | 1411 | drivers/net/pt.c | skb->mac.raw=skb->data; |
skb | 1412 | drivers/net/pt.c | IS_SKB(skb); |
skb | 1413 | drivers/net/pt.c | netif_rx(skb); |
skb | 1810 | drivers/net/pt.c | struct sk_buff *skb; |
skb | 1815 | drivers/net/pt.c | skb = dev_alloc_skb(2); |
skb | 1816 | drivers/net/pt.c | if (skb == NULL) |
skb | 1821 | drivers/net/pt.c | skb->dev = dev; |
skb | 1822 | drivers/net/pt.c | cfix = skb_put(skb, 2); |
skb | 1825 | drivers/net/pt.c | skb->protocol=htons(ETH_P_AX25); |
skb | 1826 | drivers/net/pt.c | skb->mac.raw=skb->data; |
skb | 1827 | drivers/net/pt.c | IS_SKB(skb); |
skb | 1828 | drivers/net/pt.c | netif_rx(skb); |
skb | 83 | drivers/net/seeq8005.c | static int seeq8005_send_packet(struct sk_buff *skb, struct device *dev); |
skb | 374 | drivers/net/seeq8005.c | seeq8005_send_packet(struct sk_buff *skb, struct device *dev) |
skb | 395 | drivers/net/seeq8005.c | if (skb == NULL) { |
skb | 405 | drivers/net/seeq8005.c | short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN; |
skb | 406 | drivers/net/seeq8005.c | unsigned char *buf = skb->data; |
skb | 411 | drivers/net/seeq8005.c | dev_kfree_skb (skb, FREE_WRITE); |
skb | 533 | drivers/net/seeq8005.c | struct sk_buff *skb; |
skb | 536 | drivers/net/seeq8005.c | skb = dev_alloc_skb(pkt_len); |
skb | 537 | drivers/net/seeq8005.c | if (skb == NULL) { |
skb | 542 | drivers/net/seeq8005.c | skb->dev = dev; |
skb | 543 | drivers/net/seeq8005.c | skb_reserve(skb, 2); /* align data on 16 byte */ |
skb | 544 | drivers/net/seeq8005.c | buf = skb_put(skb,pkt_len); |
skb | 557 | drivers/net/seeq8005.c | skb->protocol=eth_type_trans(skb,dev); |
skb | 558 | drivers/net/seeq8005.c | netif_rx(skb); |
skb | 489 | drivers/net/sk_g16.c | static int SK_send_packet(struct sk_buff *skb, struct device *dev); |
skb | 1192 | drivers/net/sk_g16.c | static int SK_send_packet(struct sk_buff *skb, struct device *dev) |
skb | 1223 | drivers/net/sk_g16.c | if (skb == NULL) |
skb | 1249 | drivers/net/sk_g16.c | short len = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN; |
skb | 1257 | drivers/net/sk_g16.c | memcpy((char *) (tmdp->u.buffer & 0x00ffffff), (char *)skb->data, |
skb | 1258 | drivers/net/sk_g16.c | skb->len); |
skb | 1289 | drivers/net/sk_g16.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 1570 | drivers/net/sk_g16.c | struct sk_buff *skb; |
skb | 1572 | drivers/net/sk_g16.c | skb = dev_alloc_skb(len+2); /* allocate socket buffer */ |
skb | 1574 | drivers/net/sk_g16.c | if (skb == NULL) /* Could not get mem ? */ |
skb | 1592 | drivers/net/sk_g16.c | skb->dev = dev; |
skb | 1593 | drivers/net/sk_g16.c | skb_reserve(skb,2); /* Align IP header on 16 byte boundary */ |
skb | 1602 | drivers/net/sk_g16.c | memcpy(skb_put(skb,len), (unsigned char *) (rmdp->u.buffer & 0x00ffffff), |
skb | 1613 | drivers/net/sk_g16.c | skb->protocol=eth_type_trans(skb,dev); |
skb | 1614 | drivers/net/sk_g16.c | netif_rx(skb); /* queue packet and mark it for processing */ |
skb | 102 | drivers/net/skeleton.c | static int net_send_packet(struct sk_buff *skb, struct device *dev); |
skb | 344 | drivers/net/skeleton.c | net_send_packet(struct sk_buff *skb, struct device *dev) |
skb | 369 | drivers/net/skeleton.c | if (skb == NULL) { |
skb | 380 | drivers/net/skeleton.c | short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN; |
skb | 381 | drivers/net/skeleton.c | unsigned char *buf = skb->data; |
skb | 386 | drivers/net/skeleton.c | dev_kfree_skb (skb, FREE_WRITE); |
skb | 459 | drivers/net/skeleton.c | struct sk_buff *skb; |
skb | 461 | drivers/net/skeleton.c | skb = dev_alloc_skb(pkt_len); |
skb | 462 | drivers/net/skeleton.c | if (skb == NULL) { |
skb | 468 | drivers/net/skeleton.c | skb->dev = dev; |
skb | 471 | drivers/net/skeleton.c | memcpy(skb_put(skb,pkt_len), (void*)dev->rmem_start, |
skb | 474 | drivers/net/skeleton.c | insw(ioaddr, skb->data, (pkt_len + 1) >> 1); |
skb | 476 | drivers/net/skeleton.c | netif_rx(skb); |
skb | 339 | drivers/net/slip.c | struct sk_buff *skb; |
skb | 376 | drivers/net/slip.c | skb = dev_alloc_skb(count); |
skb | 377 | drivers/net/slip.c | if (skb == NULL) { |
skb | 382 | drivers/net/slip.c | skb->dev = sl->dev; |
skb | 383 | drivers/net/slip.c | memcpy(skb_put(skb,count), sl->rbuff, count); |
skb | 384 | drivers/net/slip.c | skb->mac.raw=skb->data; |
skb | 386 | drivers/net/slip.c | skb->protocol=htons(ETH_P_AX25); |
skb | 388 | drivers/net/slip.c | skb->protocol=htons(ETH_P_IP); |
skb | 389 | drivers/net/slip.c | netif_rx(skb); |
skb | 478 | drivers/net/slip.c | sl_xmit(struct sk_buff *skb, struct device *dev) |
skb | 517 | drivers/net/slip.c | if (skb != NULL) { |
skb | 519 | drivers/net/slip.c | sl_encaps(sl, skb->data, skb->len); |
skb | 520 | drivers/net/slip.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 530 | drivers/net/slip.c | sl_header(struct sk_buff *skb, struct device *dev, unsigned short type, |
skb | 538 | drivers/net/slip.c | return ax25_encapsulate(skb, dev, type, daddr, saddr, len); |
skb | 549 | drivers/net/slip.c | struct sk_buff *skb) |
skb | 556 | drivers/net/slip.c | return ax25_rebuild_header(buff, dev, raddr, skb); |
skb | 73 | drivers/net/smc-ultra.c | struct sk_buff *skb, int ring_offset); |
skb | 286 | drivers/net/smc-ultra.c | ultra_block_input(struct device *dev, int count, struct sk_buff *skb, int ring_offset) |
skb | 296 | drivers/net/smc-ultra.c | memcpy_fromio(skb->data, xfer_start, semi_count); |
skb | 298 | drivers/net/smc-ultra.c | memcpy_fromio(skb->data + semi_count, dev->rmem_start, count); |
skb | 301 | drivers/net/smc-ultra.c | eth_io_copy_and_sum(skb, xfer_start, count, 0); |
skb | 347 | drivers/net/sunlance.c | struct sk_buff *skb; |
skb | 350 | drivers/net/sunlance.c | skb = dev_alloc_skb (pkt_len+2); |
skb | 351 | drivers/net/sunlance.c | if (skb == NULL){ |
skb | 358 | drivers/net/sunlance.c | skb->dev = dev; |
skb | 359 | drivers/net/sunlance.c | skb_reserve (skb, 2); /* 16 byte align */ |
skb | 360 | drivers/net/sunlance.c | buf = skb_put (skb, pkt_len); /* make room */ |
skb | 362 | drivers/net/sunlance.c | skb->protocol = eth_type_trans (skb,dev); |
skb | 363 | drivers/net/sunlance.c | netif_rx (skb); |
skb | 387 | drivers/net/sunlance.c | struct sk_buff *skb; |
skb | 390 | drivers/net/sunlance.c | skb = dev_alloc_skb (pkt_len+2); |
skb | 391 | drivers/net/sunlance.c | if (skb == NULL){ |
skb | 399 | drivers/net/sunlance.c | skb->dev = dev; |
skb | 400 | drivers/net/sunlance.c | skb_reserve (skb, 2); /* 16 byte align */ |
skb | 401 | drivers/net/sunlance.c | buf = skb_put (skb, pkt_len); /* make room */ |
skb | 403 | drivers/net/sunlance.c | skb->protocol = eth_type_trans (skb,dev); |
skb | 404 | drivers/net/sunlance.c | netif_rx (skb); |
skb | 602 | drivers/net/sunlance.c | static int lance_start_xmit (struct sk_buff *skb, struct device *dev) |
skb | 625 | drivers/net/sunlance.c | if (skb == NULL){ |
skb | 631 | drivers/net/sunlance.c | if (skb->len <= 0){ |
skb | 632 | drivers/net/sunlance.c | printk ("skb len is %ld\n", skb->len); |
skb | 644 | drivers/net/sunlance.c | skblen = skb->len; |
skb | 654 | drivers/net/sunlance.c | printk ("%2.2x ", skb->data [i]); |
skb | 662 | drivers/net/sunlance.c | memcpy ((char *)&ib->tx_buf [entry][0], skb->data, skblen); |
skb | 678 | drivers/net/sunlance.c | dev_kfree_skb (skb, FREE_WRITE); |
skb | 156 | drivers/net/tulip.c | static int tulip_start_xmit(struct sk_buff *skb, struct device *dev); |
skb | 376 | drivers/net/tulip.c | tulip_start_xmit(struct sk_buff *skb, struct device *dev) |
skb | 406 | drivers/net/tulip.c | if (skb == NULL || skb->len <= 0) { |
skb | 428 | drivers/net/tulip.c | tp->tx_skbuff[entry] = skb; |
skb | 429 | drivers/net/tulip.c | tp->tx_ring[entry].length = skb->len | |
skb | 431 | drivers/net/tulip.c | tp->tx_ring[entry].buffer1 = skb->data; |
skb | 601 | drivers/net/tulip.c | struct sk_buff *skb; |
skb | 603 | drivers/net/tulip.c | skb = dev_alloc_skb(pkt_len+2); |
skb | 604 | drivers/net/tulip.c | if (skb == NULL) { |
skb | 619 | drivers/net/tulip.c | skb->dev = dev; |
skb | 620 | drivers/net/tulip.c | skb_reserve(skb,2); /* 16 byte align the data fields */ |
skb | 621 | drivers/net/tulip.c | memcpy(skb_put(skb,pkt_len), lp->rx_ring[entry].buffer1, pkt_len); |
skb | 622 | drivers/net/tulip.c | skb->protocol=eth_type_trans(skb,dev); |
skb | 623 | drivers/net/tulip.c | netif_rx(skb); |
skb | 42 | drivers/net/tunnel.c | static int tunnel_xmit(struct sk_buff *skb, struct device *dev); |
skb | 122 | drivers/net/tunnel.c | static int tunnel_xmit(struct sk_buff *skb, struct device *dev) |
skb | 133 | drivers/net/tunnel.c | if (skb == NULL || dev == NULL) |
skb | 158 | drivers/net/tunnel.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 163 | drivers/net/tunnel.c | iph=(struct iphdr *)skb->data; |
skb | 170 | drivers/net/tunnel.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 187 | drivers/net/tunnel.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 194 | drivers/net/tunnel.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 208 | drivers/net/tunnel.c | newlen = (skb->len + ip_header_len); |
skb | 212 | drivers/net/tunnel.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 223 | drivers/net/tunnel.c | memcpy(skb2->h.iph, skb->data, ip_header_len ); |
skb | 224 | drivers/net/tunnel.c | memcpy(skb2->data + ip_header_len, skb->data, skb->len); |
skb | 226 | drivers/net/tunnel.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 1389 | drivers/net/wavelan.c | wavelan_send_packet(struct sk_buff *skb, device *dev) |
skb | 1430 | drivers/net/wavelan.c | if (skb == (struct sk_buff *)0) |
skb | 1444 | drivers/net/wavelan.c | length = (ETH_ZLEN < skb->len) ? skb->len : ETH_ZLEN; |
skb | 1445 | drivers/net/wavelan.c | buf = skb->data; |
skb | 1452 | drivers/net/wavelan.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 1497 | drivers/net/wavelan.c | struct sk_buff *skb; |
skb | 1640 | drivers/net/wavelan.c | if ((skb = dev_alloc_skb(sksize)) == (struct sk_buff *)0) |
skb | 1647 | drivers/net/wavelan.c | skb->dev = dev; |
skb | 1649 | drivers/net/wavelan.c | obram_read(ioaddr, rbd.rbd_bufl, skb_put(skb,pkt_len), pkt_len); |
skb | 1665 | drivers/net/wavelan.c | c = skb->data[i]; |
skb | 1667 | drivers/net/wavelan.c | printk(" %c", skb->data[i]); |
skb | 1669 | drivers/net/wavelan.c | printk("%02x", skb->data[i]); |
skb | 1678 | drivers/net/wavelan.c | skb->protocol=eth_type_trans(skb,dev); |
skb | 1679 | drivers/net/wavelan.c | netif_rx(skb); |
skb | 55 | drivers/net/wd.c | struct sk_buff *skb, int ring_offset); |
skb | 376 | drivers/net/wd.c | wd_block_input(struct device *dev, int count, struct sk_buff *skb, int ring_offset) |
skb | 384 | drivers/net/wd.c | memcpy_fromio(skb->data, xfer_start, semi_count); |
skb | 386 | drivers/net/wd.c | memcpy_fromio(skb->data + semi_count, dev->rmem_start, count); |
skb | 389 | drivers/net/wd.c | eth_io_copy_and_sum(skb, xfer_start, count, 0); |
skb | 184 | drivers/net/znet.c | static int znet_send_packet(struct sk_buff *skb, struct device *dev); |
skb | 317 | drivers/net/znet.c | static int znet_send_packet(struct sk_buff *skb, struct device *dev) |
skb | 343 | drivers/net/znet.c | if (skb == NULL) { |
skb | 360 | drivers/net/znet.c | short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN; |
skb | 361 | drivers/net/znet.c | unsigned char *buf = (void *)skb->data; |
skb | 386 | drivers/net/znet.c | memcpy(zn.tx_cur, buf, skb->len); |
skb | 400 | drivers/net/znet.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 549 | drivers/net/znet.c | struct sk_buff *skb; |
skb | 551 | drivers/net/znet.c | skb = dev_alloc_skb(pkt_len); |
skb | 552 | drivers/net/znet.c | if (skb == NULL) { |
skb | 558 | drivers/net/znet.c | skb->dev = dev; |
skb | 562 | drivers/net/znet.c | memcpy(skb_put(skb,semi_cnt), zn.rx_cur, semi_cnt); |
skb | 563 | drivers/net/znet.c | memcpy(skb_put(skb,pkt_len-semi_cnt), zn.rx_start, |
skb | 566 | drivers/net/znet.c | memcpy(skb_put(skb,pkt_len), zn.rx_cur, pkt_len); |
skb | 568 | drivers/net/znet.c | unsigned int *packet = (unsigned int *) skb->data; |
skb | 573 | drivers/net/znet.c | skb->protocol=eth_type_trans(skb,dev); |
skb | 574 | drivers/net/znet.c | netif_rx(skb); |
skb | 170 | fs/nfs/nfsroot.c | static int root_rarp_recv(struct sk_buff *skb, struct device *dev, |
skb | 207 | fs/nfs/nfsroot.c | static int root_rarp_recv(struct sk_buff *skb, struct device *dev, struct packet_type *pt) |
skb | 209 | fs/nfs/nfsroot.c | struct arphdr *rarp = (struct arphdr *)skb->h.raw; |
skb | 216 | fs/nfs/nfsroot.c | kfree_skb(skb, FREE_READ); |
skb | 222 | fs/nfs/nfsroot.c | kfree_skb(skb, FREE_READ); |
skb | 232 | fs/nfs/nfsroot.c | kfree_skb(skb, FREE_READ); |
skb | 247 | fs/nfs/nfsroot.c | kfree_skb(skb, FREE_READ); |
skb | 253 | fs/nfs/nfsroot.c | kfree_skb(skb, FREE_READ); |
skb | 264 | fs/nfs/nfsroot.c | kfree_skb(skb, FREE_READ); |
skb | 278 | fs/nfs/nfsroot.c | kfree_skb(skb, FREE_READ); |
skb | 209 | include/asm-alpha/io.h | #define eth_io_copy_and_sum(skb,src,len,unused) memcpy_fromio((skb)->data,(src),(len)) |
skb | 141 | include/linux/atalk.h | extern int aarp_send_ddp(struct device *dev,struct sk_buff *skb, struct at_addr *sa, void *hwaddr); |
skb | 31 | include/linux/etherdevice.h | extern int eth_header(struct sk_buff *skb, struct device *dev, |
skb | 35 | include/linux/etherdevice.h | unsigned long dst, struct sk_buff *skb); |
skb | 36 | include/linux/etherdevice.h | extern unsigned short eth_type_trans(struct sk_buff *skb, struct device *dev); |
skb | 18 | include/linux/firewall.h | struct sk_buff *skb, void *phdr); |
skb | 20 | include/linux/firewall.h | struct sk_buff *skb, void *phdr); |
skb | 22 | include/linux/firewall.h | struct sk_buff *skb, void *phdr); |
skb | 34 | include/linux/firewall.h | extern int call_fw_firewall(int pf, struct sk_buff *skb, void *phdr); |
skb | 35 | include/linux/firewall.h | extern int call_in_firewall(int pf, struct sk_buff *skb, void *phdr); |
skb | 36 | include/linux/firewall.h | extern int call_out_firewall(int pf, struct sk_buff *skb, void *phdr); |
skb | 124 | include/linux/mroute.h | extern void ipmr_forward(struct sk_buff *skb, int is_frag); |
skb | 165 | include/linux/netdevice.h | int (*hard_start_xmit) (struct sk_buff *skb, |
skb | 167 | include/linux/netdevice.h | int (*hard_header) (struct sk_buff *skb, |
skb | 174 | include/linux/netdevice.h | unsigned long raddr, struct sk_buff *skb); |
skb | 226 | include/linux/netdevice.h | extern void dev_queue_xmit(struct sk_buff *skb, struct device *dev, |
skb | 229 | include/linux/netdevice.h | extern void netif_rx(struct sk_buff *skb); |
skb | 124 | include/linux/skbuff.h | extern void kfree_skb(struct sk_buff *skb, int rw); |
skb | 135 | include/linux/skbuff.h | extern void kfree_skbmem(struct sk_buff *skb); |
skb | 136 | include/linux/skbuff.h | extern struct sk_buff * skb_clone(struct sk_buff *skb, int priority); |
skb | 137 | include/linux/skbuff.h | extern void skb_device_lock(struct sk_buff *skb); |
skb | 138 | include/linux/skbuff.h | extern void skb_device_unlock(struct sk_buff *skb); |
skb | 139 | include/linux/skbuff.h | extern void dev_kfree_skb(struct sk_buff *skb, int mode); |
skb | 140 | include/linux/skbuff.h | extern int skb_device_locked(struct sk_buff *skb); |
skb | 141 | include/linux/skbuff.h | extern unsigned char * skb_put(struct sk_buff *skb, int len); |
skb | 142 | include/linux/skbuff.h | extern unsigned char * skb_push(struct sk_buff *skb, int len); |
skb | 143 | include/linux/skbuff.h | extern unsigned char * skb_pull(struct sk_buff *skb, int len); |
skb | 144 | include/linux/skbuff.h | extern int skb_headroom(struct sk_buff *skb); |
skb | 145 | include/linux/skbuff.h | extern int skb_tailroom(struct sk_buff *skb); |
skb | 146 | include/linux/skbuff.h | extern void skb_reserve(struct sk_buff *skb, int len); |
skb | 147 | include/linux/skbuff.h | extern void skb_trim(struct sk_buff *skb, int len); |
skb | 162 | include/linux/skbuff.h | extern int skb_check(struct sk_buff *skb,int,int, char *); |
skb | 163 | include/linux/skbuff.h | #define IS_SKB(skb) skb_check((skb), 0, __LINE__,__FILE__) |
skb | 164 | include/linux/skbuff.h | #define IS_SKB_HEAD(skb) skb_check((skb), 1, __LINE__,__FILE__) |
skb | 166 | include/linux/skbuff.h | #define IS_SKB(skb) |
skb | 167 | include/linux/skbuff.h | #define IS_SKB_HEAD(skb) |
skb | 294 | include/linux/skbuff.h | extern __inline__ void skb_unlink(struct sk_buff *skb) |
skb | 301 | include/linux/skbuff.h | if(skb->prev && skb->next) |
skb | 303 | include/linux/skbuff.h | skb->next->prev = skb->prev; |
skb | 304 | include/linux/skbuff.h | skb->prev->next = skb->next; |
skb | 305 | include/linux/skbuff.h | skb->next = NULL; |
skb | 306 | include/linux/skbuff.h | skb->prev = NULL; |
skb | 315 | include/linux/skbuff.h | extern __inline__ unsigned char *skb_put(struct sk_buff *skb, int len) |
skb | 317 | include/linux/skbuff.h | unsigned char *tmp=skb->tail; |
skb | 318 | include/linux/skbuff.h | skb->tail+=len; |
skb | 319 | include/linux/skbuff.h | skb->len+=len; |
skb | 320 | include/linux/skbuff.h | if(skb->tail>skb->end) |
skb | 325 | include/linux/skbuff.h | extern __inline__ unsigned char *skb_push(struct sk_buff *skb, int len) |
skb | 327 | include/linux/skbuff.h | skb->data-=len; |
skb | 328 | include/linux/skbuff.h | skb->len+=len; |
skb | 329 | include/linux/skbuff.h | if(skb->data<skb->head) |
skb | 331 | include/linux/skbuff.h | return skb->data; |
skb | 334 | include/linux/skbuff.h | extern __inline__ unsigned char * skb_pull(struct sk_buff *skb, int len) |
skb | 336 | include/linux/skbuff.h | if(len > skb->len) |
skb | 338 | include/linux/skbuff.h | skb->data+=len; |
skb | 339 | include/linux/skbuff.h | skb->len-=len; |
skb | 340 | include/linux/skbuff.h | return skb->data; |
skb | 343 | include/linux/skbuff.h | extern __inline__ int skb_headroom(struct sk_buff *skb) |
skb | 345 | include/linux/skbuff.h | return skb->data-skb->head; |
skb | 348 | include/linux/skbuff.h | extern __inline__ int skb_tailroom(struct sk_buff *skb) |
skb | 350 | include/linux/skbuff.h | return skb->end-skb->tail; |
skb | 353 | include/linux/skbuff.h | extern __inline__ void skb_reserve(struct sk_buff *skb, int len) |
skb | 355 | include/linux/skbuff.h | skb->data+=len; |
skb | 356 | include/linux/skbuff.h | skb->tail+=len; |
skb | 359 | include/linux/skbuff.h | extern __inline__ void skb_trim(struct sk_buff *skb, int len) |
skb | 361 | include/linux/skbuff.h | if(skb->len>len) |
skb | 363 | include/linux/skbuff.h | skb->len=len; |
skb | 364 | include/linux/skbuff.h | skb->tail=skb->data+len; |
skb | 374 | include/linux/skbuff.h | extern void skb_free_datagram(struct sk_buff *skb); |
skb | 31 | include/linux/trdevice.h | extern int tr_header(struct sk_buff *skb, struct device *dev, |
skb | 35 | include/linux/trdevice.h | unsigned long raddr, struct sk_buff *skb); |
skb | 36 | include/linux/trdevice.h | extern unsigned short tr_type_trans(struct sk_buff *skb, struct device *dev); |
skb | 6 | include/net/arp.h | extern int arp_rcv(struct sk_buff *skb, struct device *dev, |
skb | 10 | include/net/arp.h | struct device *dev, u32 saddr, struct sk_buff *skb); |
skb | 57 | include/net/ip.h | struct sk_buff *skb; /* complete received fragment */ |
skb | 88 | include/net/ip.h | extern int ip_send(struct rtable *rt, struct sk_buff *skb, __u32 daddr, int len, struct device *dev, __u32 saddr); |
skb | 89 | include/net/ip.h | extern int ip_build_header(struct sk_buff *skb, |
skb | 95 | include/net/ip.h | extern int ip_rcv(struct sk_buff *skb, struct device *dev, |
skb | 99 | include/net/ip.h | struct sk_buff * skb); |
skb | 100 | include/net/ip.h | extern int ip_options_compile(struct options * opt, struct sk_buff * skb); |
skb | 104 | include/net/ip.h | struct device *dev, struct sk_buff *skb, |
skb | 128 | include/net/ip.h | struct sk_buff *ip_defrag(struct iphdr *iph, struct sk_buff *skb, struct device *dev); |
skb | 129 | include/net/ip.h | void ip_fragment(struct sock *sk, struct sk_buff *skb, struct device *dev, int is_frag); |
skb | 135 | include/net/ip.h | extern int ip_forward(struct sk_buff *skb, struct device *dev, int is_frag, __u32 target_addr); |
skb | 141 | include/net/ip.h | extern void ip_options_build(struct sk_buff *skb, struct options *opt, __u32 daddr, __u32 saddr, int is_frag); |
skb | 142 | include/net/ip.h | extern int ip_options_echo(struct options *dopt, struct options *sopt, __u32 daddr, __u32 saddr, struct sk_buff *skb); |
skb | 143 | include/net/ip.h | extern void ip_options_fragment(struct sk_buff *skb); |
skb | 144 | include/net/ip.h | extern int ip_options_compile(struct options *opt, struct sk_buff *skb); |
skb | 1 | include/net/ipip.h | extern int ipip_rcv(struct sk_buff *skb, struct device *dev, struct options *opt, |
skb | 48 | include/net/ipx.h | extern int ipx_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *pt); |
skb | 10 | include/net/netlink.h | extern int netlink_attach(int unit, int (*function)(struct sk_buff *skb)); |
skb | 11 | include/net/netlink.h | extern int netlink_donothing(struct sk_buff *skb); |
skb | 13 | include/net/netlink.h | extern int netlink_post(int unit, struct sk_buff *skb); |
skb | 32 | include/net/protocol.h | int (*handler)(struct sk_buff *skb, struct device *dev, |
skb | 276 | include/net/sock.h | int (*build_header)(struct sk_buff *skb, |
skb | 286 | include/net/sock.h | struct device *dev, struct sk_buff *skb, |
skb | 362 | include/net/sock.h | struct sk_buff *skb); |
skb | 364 | include/net/sock.h | struct sk_buff *skb); |
skb | 375 | include/net/sock.h | extern struct sk_buff *sock_alloc_send_skb(struct sock *skb, |
skb | 390 | include/net/sock.h | extern __inline__ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) |
skb | 393 | include/net/sock.h | if(sk->rmem_alloc + skb->truesize >= sk->rcvbuf) |
skb | 397 | include/net/sock.h | sk->rmem_alloc+=skb->truesize; |
skb | 398 | include/net/sock.h | skb->sk=sk; |
skb | 400 | include/net/sock.h | skb_queue_tail(&sk->receive_queue,skb); |
skb | 402 | include/net/sock.h | sk->data_ready(sk,skb->len); |
skb | 129 | include/net/tcp.h | extern int tcp_rcv(struct sk_buff *skb, struct device *dev, |
skb | 45 | include/net/udp.h | extern int udp_rcv(struct sk_buff *skb, struct device *dev, |
skb | 6 | net/802/llc.c | int llc_rx_adm(struct sock *sk,struct sk_buff *skb, int type, int cmd, int pf, int nr, int ns) |
skb | 36 | net/802/llc.c | int llc_rx_setup(struct sock *sk, struct sk_buff *skb, int type, int cmd, int pf, int nr, int ns) |
skb | 70 | net/802/llc.c | int llc_rx_reset(struct sock *sk, struct sk_buff *skb, int type, int cmd, int pf, int nr, int ns) |
skb | 114 | net/802/llc.c | int llc_rx_d_conn(struct sock *sk, struct sk_buff *skb, int type, int cmd, int pf, int nr, int ns) |
skb | 150 | net/802/llc.c | int llc_rx_error(struct sock *sk, struct sk_buff *skb, int type, int cmd, int pf, int nr, int ns) |
skb | 199 | net/802/llc.c | int llc_rx_nr_shared(struct sock *sk, struct sk_buff *skb, int type, int cmd, int pf, int nr, int ns) |
skb | 288 | net/802/llc.c | int llc_rx_normal(struct sock *sk, struct sk_buff *skb, int type, int cmd, int pf, int nr, int ns) |
skb | 290 | net/802/llc.c | if(llc_rx_nr_shared(sk, skb, type, cmd, pf, nr, ns)) |
skb | 364 | net/802/llc.c | llc_queue_data(sk,skb); |
skb | 30 | net/802/p8022.c | p8022_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *pt) |
skb | 34 | net/802/p8022.c | proto = find_8022_client(*(skb->h.raw)); |
skb | 36 | net/802/p8022.c | skb->h.raw += 3; |
skb | 37 | net/802/p8022.c | skb_pull(skb,3); |
skb | 38 | net/802/p8022.c | return proto->rcvfunc(skb, dev, pt); |
skb | 41 | net/802/p8022.c | skb->sk = NULL; |
skb | 42 | net/802/p8022.c | kfree_skb(skb, FREE_READ); |
skb | 48 | net/802/p8022.c | struct sk_buff *skb, unsigned char *dest_node) |
skb | 50 | net/802/p8022.c | struct device *dev = skb->dev; |
skb | 53 | net/802/p8022.c | rawp = skb_push(skb,3); |
skb | 57 | net/802/p8022.c | dev->hard_header(skb, dev, ETH_P_802_3, dest_node, NULL, skb->len); |
skb | 9 | net/802/p8023.c | struct sk_buff *skb, unsigned char *dest_node) |
skb | 11 | net/802/p8023.c | struct device *dev = skb->dev; |
skb | 13 | net/802/p8023.c | dev->hard_header(skb, dev, ETH_P_802_3, dest_node, NULL, skb->len); |
skb | 40 | net/802/psnap.c | int snap_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *pt) |
skb | 53 | net/802/psnap.c | proto = find_snap_client(skb->h.raw); |
skb | 60 | net/802/psnap.c | skb->h.raw += 5; |
skb | 61 | net/802/psnap.c | skb_pull(skb,5); |
skb | 64 | net/802/psnap.c | return proto->rcvfunc(skb, dev, &psnap_packet_type); |
skb | 66 | net/802/psnap.c | skb->sk = NULL; |
skb | 67 | net/802/psnap.c | kfree_skb(skb, FREE_READ); |
skb | 75 | net/802/psnap.c | static void snap_datalink_header(struct datalink_proto *dl, struct sk_buff *skb, unsigned char *dest_node) |
skb | 77 | net/802/psnap.c | memcpy(skb_push(skb,5),dl->type,5); |
skb | 78 | net/802/psnap.c | snap_dl->datalink_header(snap_dl, skb, dest_node); |
skb | 41 | net/802/tr.c | int tr_header(struct sk_buff *skb, struct device *dev, unsigned short type, |
skb | 45 | net/802/tr.c | struct trh_hdr *trh=(struct trh_hdr *)skb_push(skb,dev->hard_header_len); |
skb | 72 | net/802/tr.c | struct sk_buff *skb) { |
skb | 82 | net/802/tr.c | if(arp_find(trh->daddr, dest, dev, dev->pa_addr, skb)) { |
skb | 91 | net/802/tr.c | unsigned short tr_type_trans(struct sk_buff *skb, struct device *dev) { |
skb | 93 | net/802/tr.c | struct trh_hdr *trh=(struct trh_hdr *)skb->data; |
skb | 94 | net/802/tr.c | struct trllc *trllc=(struct trllc *)(skb->data+sizeof(struct trh_hdr)); |
skb | 96 | net/802/tr.c | skb->mac.raw = skb->data; |
skb | 98 | net/802/tr.c | skb_pull(skb,dev->hard_header_len); |
skb | 106 | net/802/tr.c | skb->pkt_type=PACKET_BROADCAST; |
skb | 108 | net/802/tr.c | skb->pkt_type=PACKET_MULTICAST; |
skb | 114 | net/802/tr.c | skb->pkt_type=PACKET_OTHERHOST; |
skb | 90 | net/appletalk/aarp.c | struct sk_buff *skb; |
skb | 92 | net/appletalk/aarp.c | while((skb=skb_dequeue(&a->packet_queue))!=NULL) |
skb | 93 | net/appletalk/aarp.c | kfree_skb(skb, FREE_WRITE); |
skb | 106 | net/appletalk/aarp.c | struct sk_buff *skb=alloc_skb(len, GFP_ATOMIC); |
skb | 110 | net/appletalk/aarp.c | if(skb==NULL || sat==NULL) |
skb | 117 | net/appletalk/aarp.c | skb_reserve(skb,dev->hard_header_len+aarp_dl->header_length); |
skb | 118 | net/appletalk/aarp.c | eah = (struct elapaarp *)skb_put(skb,sizeof(struct elapaarp)); |
skb | 119 | net/appletalk/aarp.c | skb->arp = 1; |
skb | 120 | net/appletalk/aarp.c | skb->free = 1; |
skb | 121 | net/appletalk/aarp.c | skb->dev = a->dev; |
skb | 149 | net/appletalk/aarp.c | aarp_dl->datalink_header(aarp_dl, skb, aarp_eth_multicast); |
skb | 155 | net/appletalk/aarp.c | dev_queue_xmit(skb, dev, SOPRI_NORMAL); |
skb | 167 | net/appletalk/aarp.c | struct sk_buff *skb=alloc_skb(len, GFP_ATOMIC); |
skb | 170 | net/appletalk/aarp.c | if(skb==NULL) |
skb | 177 | net/appletalk/aarp.c | skb_reserve(skb,dev->hard_header_len+aarp_dl->header_length); |
skb | 178 | net/appletalk/aarp.c | eah = (struct elapaarp *)skb_put(skb,sizeof(struct elapaarp)); |
skb | 179 | net/appletalk/aarp.c | skb->arp = 1; |
skb | 180 | net/appletalk/aarp.c | skb->free = 1; |
skb | 181 | net/appletalk/aarp.c | skb->dev = dev; |
skb | 212 | net/appletalk/aarp.c | aarp_dl->datalink_header(aarp_dl, skb, sha); |
skb | 218 | net/appletalk/aarp.c | dev_queue_xmit(skb, dev, SOPRI_NORMAL); |
skb | 229 | net/appletalk/aarp.c | struct sk_buff *skb=alloc_skb(len, GFP_ATOMIC); |
skb | 233 | net/appletalk/aarp.c | if(skb==NULL) |
skb | 240 | net/appletalk/aarp.c | skb_reserve(skb,dev->hard_header_len+aarp_dl->header_length); |
skb | 241 | net/appletalk/aarp.c | eah = (struct elapaarp *)skb_put(skb,sizeof(struct elapaarp)); |
skb | 243 | net/appletalk/aarp.c | skb->arp = 1; |
skb | 244 | net/appletalk/aarp.c | skb->free = 1; |
skb | 245 | net/appletalk/aarp.c | skb->dev = dev; |
skb | 273 | net/appletalk/aarp.c | aarp_dl->datalink_header(aarp_dl, skb, aarp_eth_multicast); |
skb | 279 | net/appletalk/aarp.c | dev_queue_xmit(skb, dev, SOPRI_NORMAL); |
skb | 427 | net/appletalk/aarp.c | int aarp_send_ddp(struct device *dev,struct sk_buff *skb, struct at_addr *sa, void *hwaddr) |
skb | 443 | net/appletalk/aarp.c | skb->dev = dev; |
skb | 444 | net/appletalk/aarp.c | skb->protocol = htons(ETH_P_ATALK); |
skb | 456 | net/appletalk/aarp.c | ddp_dl->datalink_header(ddp_dl, skb, ddp_eth_multicast); |
skb | 457 | net/appletalk/aarp.c | if(skb->sk==NULL) |
skb | 458 | net/appletalk/aarp.c | dev_queue_xmit(skb, skb->dev, SOPRI_NORMAL); |
skb | 460 | net/appletalk/aarp.c | dev_queue_xmit(skb, skb->dev, skb->sk->priority); |
skb | 472 | net/appletalk/aarp.c | ddp_dl->datalink_header(ddp_dl, skb, a->hwaddr); |
skb | 473 | net/appletalk/aarp.c | if(skb->sk==NULL) |
skb | 474 | net/appletalk/aarp.c | dev_queue_xmit(skb, skb->dev, SOPRI_NORMAL); |
skb | 476 | net/appletalk/aarp.c | dev_queue_xmit(skb, skb->dev, skb->sk->priority); |
skb | 492 | net/appletalk/aarp.c | skb_queue_tail(&a->packet_queue, skb); |
skb | 516 | net/appletalk/aarp.c | skb_queue_tail(&a->packet_queue, skb); |
skb | 558 | net/appletalk/aarp.c | struct sk_buff *skb; |
skb | 577 | net/appletalk/aarp.c | while((skb=skb_dequeue(&a->packet_queue))!=NULL) |
skb | 580 | net/appletalk/aarp.c | ddp_dl->datalink_header(ddp_dl,skb,a->hwaddr); |
skb | 581 | net/appletalk/aarp.c | if(skb->sk==NULL) |
skb | 582 | net/appletalk/aarp.c | dev_queue_xmit(skb, skb->dev, SOPRI_NORMAL); |
skb | 584 | net/appletalk/aarp.c | dev_queue_xmit(skb, skb->dev, skb->sk->priority); |
skb | 597 | net/appletalk/aarp.c | static int aarp_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *pt) |
skb | 599 | net/appletalk/aarp.c | struct elapaarp *ea=(struct elapaarp *)skb->h.raw; |
skb | 613 | net/appletalk/aarp.c | kfree_skb(skb, FREE_READ); |
skb | 621 | net/appletalk/aarp.c | if(!skb_pull(skb,sizeof(*ea))) |
skb | 623 | net/appletalk/aarp.c | kfree_skb(skb, FREE_READ); |
skb | 636 | net/appletalk/aarp.c | kfree_skb(skb, FREE_READ); |
skb | 667 | net/appletalk/aarp.c | kfree_skb(skb, FREE_READ); |
skb | 680 | net/appletalk/aarp.c | kfree_skb(skb, FREE_READ); |
skb | 739 | net/appletalk/aarp.c | kfree_skb(skb, FREE_READ); |
skb | 205 | net/appletalk/ddp.c | struct sk_buff *skb; |
skb | 208 | net/appletalk/ddp.c | while((skb=skb_dequeue(&sk->receive_queue))!=NULL) |
skb | 210 | net/appletalk/ddp.c | kfree_skb(skb,FREE_READ); |
skb | 1360 | net/appletalk/ddp.c | int atalk_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *pt) |
skb | 1363 | net/appletalk/ddp.c | struct ddpehdr *ddp=(void *)skb->h.raw; |
skb | 1369 | net/appletalk/ddp.c | if(skb->len<sizeof(*ddp)) |
skb | 1371 | net/appletalk/ddp.c | kfree_skb(skb,FREE_READ); |
skb | 1388 | net/appletalk/ddp.c | origlen = skb->len; |
skb | 1390 | net/appletalk/ddp.c | skb_trim(skb,min(skb->len,ddp->deh_len)); |
skb | 1398 | net/appletalk/ddp.c | if(skb->len<sizeof(*ddp)) |
skb | 1400 | net/appletalk/ddp.c | kfree_skb(skb,FREE_READ); |
skb | 1412 | net/appletalk/ddp.c | kfree_skb(skb,FREE_READ); |
skb | 1418 | net/appletalk/ddp.c | if(call_in_firewall(AF_APPLETALK, skb, ddp)!=FW_ACCEPT) |
skb | 1420 | net/appletalk/ddp.c | kfree_skb(skb, FREE_READ); |
skb | 1441 | net/appletalk/ddp.c | if (skb->pkt_type != PACKET_HOST || ddp->deh_dnet == 0) |
skb | 1443 | net/appletalk/ddp.c | kfree_skb(skb, FREE_READ); |
skb | 1452 | net/appletalk/ddp.c | if(call_fw_firewall(AF_APPLETALK, skb, ddp)!=FW_ACCEPT) |
skb | 1454 | net/appletalk/ddp.c | kfree_skb(skb, FREE_READ); |
skb | 1465 | net/appletalk/ddp.c | kfree_skb(skb, FREE_READ); |
skb | 1471 | net/appletalk/ddp.c | skb_trim(skb,min(origlen, rt->dev->hard_header_len + |
skb | 1478 | net/appletalk/ddp.c | if(aarp_send_ddp(rt->dev, skb, &ta, NULL)==-1) |
skb | 1479 | net/appletalk/ddp.c | kfree_skb(skb, FREE_READ); |
skb | 1493 | net/appletalk/ddp.c | kfree_skb(skb,FREE_READ); |
skb | 1502 | net/appletalk/ddp.c | skb->sk = sock; |
skb | 1504 | net/appletalk/ddp.c | if(sock_queue_rcv_skb(sock,skb)<0) |
skb | 1506 | net/appletalk/ddp.c | skb->sk=NULL; |
skb | 1507 | net/appletalk/ddp.c | kfree_skb(skb, FREE_WRITE); |
skb | 1517 | net/appletalk/ddp.c | struct sk_buff *skb; |
skb | 1590 | net/appletalk/ddp.c | skb = sock_alloc_send_skb(sk, size, 0, 0 , &err); |
skb | 1591 | net/appletalk/ddp.c | if(skb==NULL) |
skb | 1594 | net/appletalk/ddp.c | skb->sk=sk; |
skb | 1595 | net/appletalk/ddp.c | skb->free=1; |
skb | 1596 | net/appletalk/ddp.c | skb->arp=1; |
skb | 1597 | net/appletalk/ddp.c | skb_reserve(skb,ddp_dl->header_length); |
skb | 1598 | net/appletalk/ddp.c | skb_reserve(skb,dev->hard_header_len); |
skb | 1600 | net/appletalk/ddp.c | skb->dev=dev; |
skb | 1605 | net/appletalk/ddp.c | ddp=(struct ddpehdr *)skb_put(skb,sizeof(struct ddpehdr)); |
skb | 1626 | net/appletalk/ddp.c | memcpy_fromiovec(skb_put(skb,len),msg->msg_iov,len); |
skb | 1635 | net/appletalk/ddp.c | if(call_out_firewall(AF_APPLETALK, skb, ddp)!=FW_ACCEPT) |
skb | 1637 | net/appletalk/ddp.c | kfree_skb(skb, FREE_WRITE); |
skb | 1652 | net/appletalk/ddp.c | struct sk_buff *skb2=skb_clone(skb, GFP_KERNEL); |
skb | 1670 | net/appletalk/ddp.c | sk->wmem_alloc-=skb->truesize; |
skb | 1671 | net/appletalk/ddp.c | ddp_dl->datalink_header(ddp_dl, skb, dev->dev_addr); |
skb | 1672 | net/appletalk/ddp.c | skb->sk = NULL; |
skb | 1673 | net/appletalk/ddp.c | skb->mac.raw=skb->data; |
skb | 1674 | net/appletalk/ddp.c | skb->h.raw = skb->data + ddp_dl->header_length + dev->hard_header_len; |
skb | 1675 | net/appletalk/ddp.c | skb_pull(skb,dev->hard_header_len); |
skb | 1676 | net/appletalk/ddp.c | skb_pull(skb,ddp_dl->header_length); |
skb | 1677 | net/appletalk/ddp.c | atalk_rcv(skb,dev,NULL); |
skb | 1689 | net/appletalk/ddp.c | if(aarp_send_ddp(dev,skb,&usat->sat_addr, NULL)==-1) |
skb | 1690 | net/appletalk/ddp.c | kfree_skb(skb, FREE_WRITE); |
skb | 1705 | net/appletalk/ddp.c | struct sk_buff *skb; |
skb | 1714 | net/appletalk/ddp.c | skb=skb_recv_datagram(sk,flags,noblock,&er); |
skb | 1715 | net/appletalk/ddp.c | if(skb==NULL) |
skb | 1718 | net/appletalk/ddp.c | ddp = (struct ddpehdr *)(skb->h.raw); |
skb | 1724 | net/appletalk/ddp.c | skb_copy_datagram_iovec(skb,0,msg->msg_iov,copied); |
skb | 1731 | net/appletalk/ddp.c | skb_copy_datagram_iovec(skb,sizeof(*ddp),msg->msg_iov,copied); |
skb | 1740 | net/appletalk/ddp.c | skb_free_datagram(skb); |
skb | 1779 | net/appletalk/ddp.c | struct sk_buff *skb; |
skb | 1781 | net/appletalk/ddp.c | if((skb=skb_peek(&sk->receive_queue))!=NULL) |
skb | 1782 | net/appletalk/ddp.c | amount=skb->len-sizeof(struct ddpehdr); |
skb | 361 | net/ax25/af_ax25.c | static void ax25_send_to_raw(struct sock *sk, struct sk_buff *skb, int proto) |
skb | 367 | net/ax25/af_ax25.c | if ((copy = skb_clone(skb, GFP_ATOMIC)) == NULL) |
skb | 374 | net/ax25/af_ax25.c | sk->data_ready(sk, skb->len); |
skb | 402 | net/ax25/af_ax25.c | struct sk_buff *skb; |
skb | 414 | net/ax25/af_ax25.c | while ((skb = skb_dequeue(&ax25->sk->receive_queue)) != NULL) { |
skb | 415 | net/ax25/af_ax25.c | if (skb->sk != ax25->sk) { /* A pending connection */ |
skb | 416 | net/ax25/af_ax25.c | skb->sk->dead = 1; /* Queue the unaccepted socket for death */ |
skb | 417 | net/ax25/af_ax25.c | ax25_set_timer(skb->sk->ax25); |
skb | 418 | net/ax25/af_ax25.c | skb->sk->ax25->state = AX25_STATE_0; |
skb | 421 | net/ax25/af_ax25.c | kfree_skb(skb, FREE_READ); |
skb | 596 | net/ax25/af_ax25.c | int ax25_send_frame(struct sk_buff *skb, ax25_address *src, ax25_address *dest, |
skb | 601 | net/ax25/af_ax25.c | if (skb == NULL) |
skb | 612 | net/ax25/af_ax25.c | ax25_output(ax25, skb); |
skb | 640 | net/ax25/af_ax25.c | ax25_output(ax25, skb); |
skb | 1285 | net/ax25/af_ax25.c | struct sk_buff *skb; |
skb | 1304 | net/ax25/af_ax25.c | if ((skb = skb_dequeue(&sk->receive_queue)) == NULL) { |
skb | 1315 | net/ax25/af_ax25.c | } while (skb == NULL); |
skb | 1317 | net/ax25/af_ax25.c | newsk = skb->sk; |
skb | 1322 | net/ax25/af_ax25.c | skb->sk = NULL; |
skb | 1323 | net/ax25/af_ax25.c | kfree_skb(skb, FREE_READ); |
skb | 1365 | net/ax25/af_ax25.c | static int ax25_rcv(struct sk_buff *skb, struct device *dev, ax25_address *dev_addr, struct packet_type *ptype) |
skb | 1380 | net/ax25/af_ax25.c | skb->h.raw = skb->data; |
skb | 1384 | net/ax25/af_ax25.c | if(call_in_firewall(PF_AX25, skb, skb->h.raw)!=FW_ACCEPT) |
skb | 1386 | net/ax25/af_ax25.c | kfree_skb(skb, FREE_READ); |
skb | 1395 | net/ax25/af_ax25.c | if (ax25_parse_addr(skb->data, skb->len, &src, &dest, &dp, &type) == NULL) { |
skb | 1396 | net/ax25/af_ax25.c | kfree_skb(skb, FREE_READ); |
skb | 1430 | net/ax25/af_ax25.c | kfree_skb(skb, FREE_READ); |
skb | 1434 | net/ax25/af_ax25.c | kfree_skb(skb, FREE_READ); |
skb | 1436 | net/ax25/af_ax25.c | build_ax25_addr(skb->data, &src, &dest, &dp, type, MODULUS); |
skb | 1438 | net/ax25/af_ax25.c | if(call_fw_firewall(PF_AX25, skb,skb->data)!=FW_ACCEPT) |
skb | 1440 | net/ax25/af_ax25.c | kfree_skb(skb, FREE_READ); |
skb | 1444 | net/ax25/af_ax25.c | skb->arp = 1; |
skb | 1445 | net/ax25/af_ax25.c | ax25_queue_xmit(skb, dev_out, SOPRI_NORMAL); |
skb | 1447 | net/ax25/af_ax25.c | kfree_skb(skb, FREE_READ); |
skb | 1456 | net/ax25/af_ax25.c | skb_pull(skb, size_ax25_addr(&dp)); |
skb | 1468 | net/ax25/af_ax25.c | if ((*skb->data & ~0x10) == LAPB_UI) { /* UI frame - bypass LAPB processing */ |
skb | 1469 | net/ax25/af_ax25.c | skb->h.raw = skb->data + 2; /* skip control and pid */ |
skb | 1472 | net/ax25/af_ax25.c | ax25_send_to_raw(raw, skb, skb->data[1]); |
skb | 1475 | net/ax25/af_ax25.c | kfree_skb(skb, FREE_READ); |
skb | 1480 | net/ax25/af_ax25.c | switch (skb->data[1]) { |
skb | 1483 | net/ax25/af_ax25.c | skb_pull(skb,2); /* drop PID/CTRL */ |
skb | 1485 | net/ax25/af_ax25.c | ip_rcv(skb, dev, ptype); /* Note ptype here is the wrong one, fix me later */ |
skb | 1489 | net/ax25/af_ax25.c | skb_pull(skb,2); |
skb | 1490 | net/ax25/af_ax25.c | arp_rcv(skb, dev, ptype); /* Note ptype here is wrong... */ |
skb | 1497 | net/ax25/af_ax25.c | kfree_skb(skb, FREE_READ); |
skb | 1502 | net/ax25/af_ax25.c | skb_pull(skb, 2); |
skb | 1503 | net/ax25/af_ax25.c | skb_queue_tail(&sk->receive_queue, skb); |
skb | 1504 | net/ax25/af_ax25.c | skb->sk = sk; |
skb | 1505 | net/ax25/af_ax25.c | sk->rmem_alloc += skb->truesize; |
skb | 1507 | net/ax25/af_ax25.c | sk->data_ready(sk, skb->len); |
skb | 1510 | net/ax25/af_ax25.c | kfree_skb(skb, FREE_READ); |
skb | 1515 | net/ax25/af_ax25.c | kfree_skb(skb, FREE_READ); /* Will scan SOCK_AX25 RAW sockets */ |
skb | 1528 | net/ax25/af_ax25.c | kfree_skb(skb, FREE_READ); |
skb | 1539 | net/ax25/af_ax25.c | if (ax25_process_rx_frame(ax25, skb, type) == 0) |
skb | 1540 | net/ax25/af_ax25.c | kfree_skb(skb, FREE_READ); |
skb | 1545 | net/ax25/af_ax25.c | if ((*skb->data & ~PF) != SABM && (*skb->data & ~PF) != SABME) { |
skb | 1550 | net/ax25/af_ax25.c | if ((*skb->data & ~PF) != DM && mine) |
skb | 1553 | net/ax25/af_ax25.c | kfree_skb(skb, FREE_READ); |
skb | 1562 | net/ax25/af_ax25.c | kfree_skb(skb, FREE_READ); |
skb | 1568 | net/ax25/af_ax25.c | skb_queue_head(&sk->receive_queue, skb); |
skb | 1570 | net/ax25/af_ax25.c | skb->sk = make; |
skb | 1578 | net/ax25/af_ax25.c | kfree_skb(skb, FREE_READ); |
skb | 1584 | net/ax25/af_ax25.c | kfree_skb(skb, FREE_READ); |
skb | 1593 | net/ax25/af_ax25.c | kfree_skb(skb, FREE_READ); |
skb | 1605 | net/ax25/af_ax25.c | kfree_skb(skb, FREE_READ); |
skb | 1620 | net/ax25/af_ax25.c | if ((*skb->data & ~PF) == SABME) { |
skb | 1641 | net/ax25/af_ax25.c | sk->data_ready(sk, skb->len ); |
skb | 1643 | net/ax25/af_ax25.c | kfree_skb(skb, FREE_READ); |
skb | 1652 | net/ax25/af_ax25.c | static int kiss_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *ptype) |
skb | 1654 | net/ax25/af_ax25.c | skb->sk = NULL; /* Initially we don't know who its for */ |
skb | 1656 | net/ax25/af_ax25.c | if ((*skb->data & 0x0F) != 0) { |
skb | 1657 | net/ax25/af_ax25.c | kfree_skb(skb, FREE_READ); /* Not a KISS data frame */ |
skb | 1661 | net/ax25/af_ax25.c | skb_pull(skb, AX25_KISS_HEADER_LEN); /* Remove the KISS byte */ |
skb | 1663 | net/ax25/af_ax25.c | return ax25_rcv(skb, dev, (ax25_address *)dev->dev_addr, ptype); |
skb | 1670 | net/ax25/af_ax25.c | static int bpq_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *ptype) |
skb | 1675 | net/ax25/af_ax25.c | skb->sk = NULL; /* Initially we don't know who its for */ |
skb | 1678 | net/ax25/af_ax25.c | kfree_skb(skb, FREE_READ); /* We have no port callsign */ |
skb | 1682 | net/ax25/af_ax25.c | len = skb->data[0] + skb->data[1] * 256 - 5; |
skb | 1684 | net/ax25/af_ax25.c | skb_pull(skb, 2); /* Remove the length bytes */ |
skb | 1685 | net/ax25/af_ax25.c | skb_trim(skb, len); /* Set the length of the data */ |
skb | 1687 | net/ax25/af_ax25.c | return ax25_rcv(skb, dev, port_call, ptype); |
skb | 1697 | net/ax25/af_ax25.c | struct sk_buff *skb; |
skb | 1767 | net/ax25/af_ax25.c | if ((skb = sock_alloc_send_skb(sk, size, 0, 0, &err)) == NULL) |
skb | 1770 | net/ax25/af_ax25.c | skb->sk = sk; |
skb | 1771 | net/ax25/af_ax25.c | skb->free = 1; |
skb | 1772 | net/ax25/af_ax25.c | skb->arp = 1; |
skb | 1774 | net/ax25/af_ax25.c | skb_reserve(skb, size - len); |
skb | 1780 | net/ax25/af_ax25.c | memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len); |
skb | 1783 | net/ax25/af_ax25.c | asmptr = skb_push(skb, 1); |
skb | 1792 | net/ax25/af_ax25.c | kfree_skb(skb, FREE_WRITE); |
skb | 1796 | net/ax25/af_ax25.c | ax25_output(sk->ax25, skb); /* Shove it onto the queue and kick */ |
skb | 1800 | net/ax25/af_ax25.c | asmptr = skb_push(skb, 1 + size_ax25_addr(dp)); |
skb | 1814 | net/ax25/af_ax25.c | skb->h.raw = asmptr; |
skb | 1817 | net/ax25/af_ax25.c | printk("base=%p pos=%p\n", skb->data, asmptr); |
skb | 1822 | net/ax25/af_ax25.c | ax25_queue_xmit(skb, sk->ax25->device, SOPRI_NORMAL); |
skb | 1834 | net/ax25/af_ax25.c | struct sk_buff *skb; |
skb | 1852 | net/ax25/af_ax25.c | if ((skb = skb_recv_datagram(sk, flags, noblock, &er)) == NULL) |
skb | 1856 | net/ax25/af_ax25.c | length = skb->len + (skb->data - skb->h.raw); |
skb | 1859 | net/ax25/af_ax25.c | skb_pull(skb, 1); /* Remove PID */ |
skb | 1860 | net/ax25/af_ax25.c | length = skb->len; |
skb | 1861 | net/ax25/af_ax25.c | skb->h.raw = skb->data; |
skb | 1865 | net/ax25/af_ax25.c | skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); |
skb | 1876 | net/ax25/af_ax25.c | ax25_parse_addr(skb->data, skb->len, NULL, &dest, &digi, NULL); |
skb | 1900 | net/ax25/af_ax25.c | skb_free_datagram(skb); |
skb | 1936 | net/ax25/af_ax25.c | struct sk_buff *skb; |
skb | 1938 | net/ax25/af_ax25.c | if ((skb = skb_peek(&sk->receive_queue)) != NULL) |
skb | 1939 | net/ax25/af_ax25.c | amount = skb->len; |
skb | 2178 | net/ax25/af_ax25.c | void ax25_queue_xmit(struct sk_buff *skb, struct device *dev, int pri) |
skb | 2184 | net/ax25/af_ax25.c | if(call_out_firewall(PF_AX25, skb, skb->data)!=FW_ACCEPT) |
skb | 2186 | net/ax25/af_ax25.c | kfree_skb(skb, FREE_WRITE); |
skb | 2191 | net/ax25/af_ax25.c | skb->protocol = htons (ETH_P_AX25); |
skb | 2197 | net/ax25/af_ax25.c | if(skb_headroom(skb) < AX25_BPQ_HEADER_LEN) |
skb | 2200 | net/ax25/af_ax25.c | skb->free = 1; |
skb | 2201 | net/ax25/af_ax25.c | kfree_skb(skb, FREE_WRITE); |
skb | 2205 | net/ax25/af_ax25.c | size = skb->len; |
skb | 2207 | net/ax25/af_ax25.c | ptr = skb_push(skb, 2); |
skb | 2212 | net/ax25/af_ax25.c | dev->hard_header(skb, dev, ETH_P_BPQ, bcast_addr, NULL, 0); |
skb | 2214 | net/ax25/af_ax25.c | dev_queue_xmit(skb, dev, pri); |
skb | 2220 | net/ax25/af_ax25.c | ptr = skb_push(skb, 1); |
skb | 2223 | net/ax25/af_ax25.c | dev_queue_xmit(skb, dev, pri); |
skb | 2239 | net/ax25/af_ax25.c | int ax25_encapsulate(struct sk_buff *skb, struct device *dev, unsigned short type, void *daddr, |
skb | 2243 | net/ax25/af_ax25.c | unsigned char *buff = skb_push(skb, AX25_HEADER_LEN); |
skb | 2287 | net/ax25/af_ax25.c | int ax25_rebuild_header(unsigned char *bp, struct device *dev, unsigned long dest, struct sk_buff *skb) |
skb | 2291 | net/ax25/af_ax25.c | if (arp_find(bp + 1, dest, dev, dev->pa_addr, skb)) |
skb | 2299 | net/ax25/af_ax25.c | skb_device_unlock(skb); |
skb | 2300 | net/ax25/af_ax25.c | skb_pull(skb, AX25_HEADER_LEN - 1); /* Keep PID */ |
skb | 2301 | net/ax25/af_ax25.c | ax25_send_frame(skb, (ax25_address *)(bp + 8), (ax25_address *)(bp + 1), NULL, dev); |
skb | 64 | net/ax25/ax25_in.c | static int ax25_rx_fragment(ax25_cb *ax25, struct sk_buff *skb) |
skb | 70 | net/ax25/ax25_in.c | if (!(*skb->data & SEG_FIRST)) { |
skb | 71 | net/ax25/ax25_in.c | if ((ax25->fragno - 1) == (*skb->data & SEG_REM)) { |
skb | 72 | net/ax25/ax25_in.c | ax25->fragno = *skb->data & SEG_REM; |
skb | 73 | net/ax25/ax25_in.c | skb_pull(skb, 1); |
skb | 74 | net/ax25/ax25_in.c | ax25->fraglen += skb->len; |
skb | 75 | net/ax25/ax25_in.c | skb_queue_tail(&ax25->frag_queue, skb); |
skb | 114 | net/ax25/ax25_in.c | if (*skb->data & SEG_FIRST) { |
skb | 115 | net/ax25/ax25_in.c | ax25->fragno = *skb->data & SEG_REM; |
skb | 116 | net/ax25/ax25_in.c | skb_pull(skb, 1); |
skb | 117 | net/ax25/ax25_in.c | ax25->fraglen = skb->len; |
skb | 118 | net/ax25/ax25_in.c | skb_queue_tail(&ax25->frag_queue, skb); |
skb | 130 | net/ax25/ax25_in.c | static int ax25_rx_iframe(ax25_cb *ax25, struct sk_buff *skb) |
skb | 133 | net/ax25/ax25_in.c | unsigned char pid = *skb->data; |
skb | 139 | net/ax25/ax25_in.c | skb_pull(skb, 1); /* Remove PID */ |
skb | 140 | net/ax25/ax25_in.c | queued = nr_route_frame(skb, ax25); |
skb | 146 | net/ax25/ax25_in.c | skb_pull(skb, 1); /* Remove PID */ |
skb | 147 | net/ax25/ax25_in.c | skb->h.raw = skb->data; |
skb | 149 | net/ax25/ax25_in.c | ip_rcv(skb, skb->dev, NULL); /* Wrong ptype */ |
skb | 154 | net/ax25/ax25_in.c | skb_pull(skb, 1); /* Remove PID */ |
skb | 155 | net/ax25/ax25_in.c | queued = ax25_rx_fragment(ax25, skb); |
skb | 160 | net/ax25/ax25_in.c | if (sock_queue_rcv_skb(ax25->sk, skb) == 0) { |
skb | 177 | net/ax25/ax25_in.c | static int ax25_state1_machine(ax25_cb *ax25, struct sk_buff *skb, int frametype, int pf, int type) |
skb | 246 | net/ax25/ax25_in.c | static int ax25_state2_machine(ax25_cb *ax25, struct sk_buff *skb, int frametype, int pf, int type) |
skb | 304 | net/ax25/ax25_in.c | static int ax25_state3_machine(ax25_cb *ax25, struct sk_buff *skb, int frametype, int ns, int nr, int pf, int type) |
skb | 417 | net/ax25/ax25_in.c | queued = ax25_rx_iframe(ax25, skb); |
skb | 461 | net/ax25/ax25_in.c | static int ax25_state4_machine(ax25_cb *ax25, struct sk_buff *skb, int frametype, int ns, int nr, int pf, int type) |
skb | 625 | net/ax25/ax25_in.c | queued = ax25_rx_iframe(ax25, skb); |
skb | 667 | net/ax25/ax25_in.c | int ax25_process_rx_frame(ax25_cb *ax25, struct sk_buff *skb, int type) |
skb | 682 | net/ax25/ax25_in.c | frametype = ax25_decode(ax25, skb, &ns, &nr, &pf); |
skb | 686 | net/ax25/ax25_in.c | queued = ax25_state1_machine(ax25, skb, frametype, pf, type); |
skb | 689 | net/ax25/ax25_in.c | queued = ax25_state2_machine(ax25, skb, frametype, pf, type); |
skb | 692 | net/ax25/ax25_in.c | queued = ax25_state3_machine(ax25, skb, frametype, ns, nr, pf, type); |
skb | 695 | net/ax25/ax25_in.c | queued = ax25_state4_machine(ax25, skb, frametype, ns, nr, pf, type); |
skb | 56 | net/ax25/ax25_out.c | void ax25_output(ax25_cb *ax25, struct sk_buff *skb) |
skb | 64 | net/ax25/ax25_out.c | if ((skb->len - 1) > mtu) { |
skb | 67 | net/ax25/ax25_out.c | fragno = skb->len / mtu; |
skb | 68 | net/ax25/ax25_out.c | if (skb->len % mtu == 0) fragno--; |
skb | 70 | net/ax25/ax25_out.c | frontlen = skb_headroom(skb); /* Address space + CTRL */ |
skb | 72 | net/ax25/ax25_out.c | while (skb->len > 0) { |
skb | 73 | net/ax25/ax25_out.c | if (skb->sk != NULL) { |
skb | 74 | net/ax25/ax25_out.c | if ((skbn = sock_alloc_send_skb(skb->sk, mtu + 2 + frontlen, 0, 0, &err)) == NULL) |
skb | 81 | net/ax25/ax25_out.c | skbn->sk = skb->sk; |
skb | 87 | net/ax25/ax25_out.c | len = (mtu > skb->len) ? skb->len : mtu; |
skb | 89 | net/ax25/ax25_out.c | memcpy(skb_put(skbn, len), skb->data, len); |
skb | 90 | net/ax25/ax25_out.c | skb_pull(skb, len); |
skb | 105 | net/ax25/ax25_out.c | skb->free = 1; |
skb | 106 | net/ax25/ax25_out.c | kfree_skb(skb, FREE_WRITE); |
skb | 108 | net/ax25/ax25_out.c | skb_queue_tail(&ax25->write_queue, skb); /* Throw it on the queue */ |
skb | 119 | net/ax25/ax25_out.c | static void ax25_send_iframe(ax25_cb *ax25, struct sk_buff *skb, int poll_bit) |
skb | 123 | net/ax25/ax25_out.c | if (skb == NULL) |
skb | 127 | net/ax25/ax25_out.c | frame = skb_push(skb, 1); |
skb | 134 | net/ax25/ax25_out.c | frame = skb_push(skb, 2); |
skb | 142 | net/ax25/ax25_out.c | ax25_transmit_buffer(ax25, skb, C_COMMAND); |
skb | 147 | net/ax25/ax25_out.c | struct sk_buff *skb, *skbn; |
skb | 171 | net/ax25/ax25_out.c | skb = skb_dequeue(&ax25->write_queue); |
skb | 174 | net/ax25/ax25_out.c | if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) { |
skb | 175 | net/ax25/ax25_out.c | skb_queue_head(&ax25->write_queue, skb); |
skb | 195 | net/ax25/ax25_out.c | skb_queue_tail(&ax25->ack_queue, skb); |
skb | 199 | net/ax25/ax25_out.c | } while (!last && (skb = skb_dequeue(&ax25->write_queue)) != NULL); |
skb | 212 | net/ax25/ax25_out.c | void ax25_transmit_buffer(ax25_cb *ax25, struct sk_buff *skb, int type) |
skb | 227 | net/ax25/ax25_out.c | if (skb_headroom(skb) < size_ax25_addr(ax25->digipeat)) { |
skb | 229 | net/ax25/ax25_out.c | skb->free = 1; |
skb | 230 | net/ax25/ax25_out.c | kfree_skb(skb, FREE_WRITE); |
skb | 234 | net/ax25/ax25_out.c | ptr = skb_push(skb, size_ax25_addr(ax25->digipeat)); |
skb | 237 | net/ax25/ax25_out.c | skb->arp = 1; |
skb | 239 | net/ax25/ax25_out.c | ax25_queue_xmit(skb, ax25->device, SOPRI_NORMAL); |
skb | 58 | net/ax25/ax25_subr.c | struct sk_buff *skb; |
skb | 60 | net/ax25/ax25_subr.c | while ((skb = skb_dequeue(&ax25->write_queue)) != NULL) { |
skb | 61 | net/ax25/ax25_subr.c | skb->free = 1; |
skb | 62 | net/ax25/ax25_subr.c | kfree_skb(skb, FREE_WRITE); |
skb | 65 | net/ax25/ax25_subr.c | while ((skb = skb_dequeue(&ax25->ack_queue)) != NULL) { |
skb | 66 | net/ax25/ax25_subr.c | skb->free = 1; |
skb | 67 | net/ax25/ax25_subr.c | kfree_skb(skb, FREE_WRITE); |
skb | 70 | net/ax25/ax25_subr.c | while ((skb = skb_dequeue(&ax25->reseq_queue)) != NULL) { |
skb | 71 | net/ax25/ax25_subr.c | kfree_skb(skb, FREE_READ); |
skb | 74 | net/ax25/ax25_subr.c | while ((skb = skb_dequeue(&ax25->frag_queue)) != NULL) { |
skb | 75 | net/ax25/ax25_subr.c | kfree_skb(skb, FREE_READ); |
skb | 86 | net/ax25/ax25_subr.c | struct sk_buff *skb; |
skb | 93 | net/ax25/ax25_subr.c | skb = skb_dequeue(&ax25->ack_queue); |
skb | 94 | net/ax25/ax25_subr.c | skb->free = 1; |
skb | 95 | net/ax25/ax25_subr.c | kfree_skb(skb, FREE_WRITE); |
skb | 107 | net/ax25/ax25_subr.c | struct sk_buff *skb, *skb_prev = NULL; |
skb | 114 | net/ax25/ax25_subr.c | while ((skb = skb_dequeue(&ax25->ack_queue)) != NULL) { |
skb | 116 | net/ax25/ax25_subr.c | skb_queue_head(&ax25->write_queue, skb); |
skb | 118 | net/ax25/ax25_subr.c | skb_append(skb_prev, skb); |
skb | 119 | net/ax25/ax25_subr.c | skb_prev = skb; |
skb | 145 | net/ax25/ax25_subr.c | int ax25_decode(ax25_cb *ax25, struct sk_buff *skb, int *ns, int *nr, int *pf) |
skb | 150 | net/ax25/ax25_subr.c | frame = skb->data; |
skb | 167 | net/ax25/ax25_subr.c | skb_pull(skb, 1); |
skb | 174 | net/ax25/ax25_subr.c | skb_pull(skb, 2); |
skb | 179 | net/ax25/ax25_subr.c | skb_pull(skb, 2); |
skb | 183 | net/ax25/ax25_subr.c | skb_pull(skb, 1); |
skb | 197 | net/ax25/ax25_subr.c | struct sk_buff *skb; |
skb | 204 | net/ax25/ax25_subr.c | if ((skb = alloc_skb(AX25_BPQ_HEADER_LEN + size_ax25_addr(ax25->digipeat) + 2, GFP_ATOMIC)) == NULL) |
skb | 207 | net/ax25/ax25_subr.c | skb_reserve(skb, AX25_BPQ_HEADER_LEN + size_ax25_addr(ax25->digipeat)); |
skb | 210 | net/ax25/ax25_subr.c | skb->sk = ax25->sk; |
skb | 211 | net/ax25/ax25_subr.c | ax25->sk->wmem_alloc += skb->truesize; |
skb | 216 | net/ax25/ax25_subr.c | dptr = skb_put(skb, 1); |
skb | 223 | net/ax25/ax25_subr.c | dptr = skb_put(skb, 1); |
skb | 227 | net/ax25/ax25_subr.c | dptr = skb_put(skb, 2); |
skb | 234 | net/ax25/ax25_subr.c | skb->free = 1; |
skb | 236 | net/ax25/ax25_subr.c | ax25_transmit_buffer(ax25, skb, type); |
skb | 246 | net/ax25/ax25_subr.c | struct sk_buff *skb; |
skb | 253 | net/ax25/ax25_subr.c | if ((skb = alloc_skb(AX25_BPQ_HEADER_LEN + size_ax25_addr(digi) + 1, GFP_ATOMIC)) == NULL) |
skb | 256 | net/ax25/ax25_subr.c | skb_reserve(skb, AX25_BPQ_HEADER_LEN + size_ax25_addr(digi)); |
skb | 260 | net/ax25/ax25_subr.c | dptr = skb_put(skb, 1); |
skb | 261 | net/ax25/ax25_subr.c | skb->sk = NULL; |
skb | 269 | net/ax25/ax25_subr.c | dptr = skb_push(skb, size_ax25_addr(digi)); |
skb | 272 | net/ax25/ax25_subr.c | skb->arp = 1; |
skb | 273 | net/ax25/ax25_subr.c | skb->free = 1; |
skb | 275 | net/ax25/ax25_subr.c | ax25_queue_xmit(skb, dev, SOPRI_NORMAL); |
skb | 54 | net/core/datagram.c | struct sk_buff *skb; |
skb | 125 | net/core/datagram.c | skb=skb_dequeue(&sk->receive_queue); |
skb | 126 | net/core/datagram.c | if(skb!=NULL) |
skb | 127 | net/core/datagram.c | skb->users++; |
skb | 134 | net/core/datagram.c | skb=skb_peek(&sk->receive_queue); |
skb | 135 | net/core/datagram.c | if(skb!=NULL) |
skb | 136 | net/core/datagram.c | skb->users++; |
skb | 138 | net/core/datagram.c | if(skb==NULL) /* shouldn't happen but .. */ |
skb | 141 | net/core/datagram.c | return skb; |
skb | 144 | net/core/datagram.c | void skb_free_datagram(struct sk_buff *skb) |
skb | 150 | net/core/datagram.c | skb->users--; |
skb | 151 | net/core/datagram.c | if(skb->users>0) |
skb | 157 | net/core/datagram.c | if(!skb->next && !skb->prev) /* Been dequeued by someone - ie it's read */ |
skb | 158 | net/core/datagram.c | kfree_skb(skb,FREE_READ); |
skb | 166 | net/core/datagram.c | void skb_copy_datagram(struct sk_buff *skb, int offset, char *to, int size) |
skb | 168 | net/core/datagram.c | memcpy_tofs(to,skb->h.raw+offset,size); |
skb | 176 | net/core/datagram.c | void skb_copy_datagram_iovec(struct sk_buff *skb, int offset, struct iovec *to, int size) |
skb | 178 | net/core/datagram.c | memcpy_toiovec(to,skb->h.raw+offset,size); |
skb | 294 | net/core/dev.c | struct sk_buff *skb; |
skb | 295 | net/core/dev.c | while((skb=skb_dequeue(&dev->buffs[ct]))!=NULL) |
skb | 296 | net/core/dev.c | if(skb->free) |
skb | 297 | net/core/dev.c | kfree_skb(skb,FREE_WRITE); |
skb | 329 | net/core/dev.c | void dev_queue_xmit(struct sk_buff *skb, struct device *dev, int pri) |
skb | 337 | net/core/dev.c | if(pri>=0 && !skb_device_locked(skb)) |
skb | 338 | net/core/dev.c | skb_device_lock(skb); /* Shove a lock on the frame */ |
skb | 340 | net/core/dev.c | IS_SKB(skb); |
skb | 342 | net/core/dev.c | skb->dev = dev; |
skb | 369 | net/core/dev.c | if (!skb->arp && dev->rebuild_header(skb->data, dev, skb->raddr, skb)) { |
skb | 383 | net/core/dev.c | skb->dev = dev = net_alias_main_dev(dev); |
skb | 391 | net/core/dev.c | skb_queue_tail(dev->buffs + pri,skb); |
skb | 392 | net/core/dev.c | skb_device_unlock(skb); /* Buffer is on the device queue and can be freed safely */ |
skb | 393 | net/core/dev.c | skb = skb_dequeue(dev->buffs + pri); |
skb | 394 | net/core/dev.c | skb_device_lock(skb); /* New buffer needs locking down */ |
skb | 401 | net/core/dev.c | skb->stamp=xtime; |
skb | 408 | net/core/dev.c | ((struct sock *)ptype->data != skb->sk)) |
skb | 411 | net/core/dev.c | if ((skb2 = skb_clone(skb, GFP_ATOMIC)) == NULL) |
skb | 415 | net/core/dev.c | ptype->func(skb2, skb->dev, ptype); |
skb | 420 | net/core/dev.c | if (dev->hard_start_xmit(skb, dev) == 0) { |
skb | 434 | net/core/dev.c | skb_device_unlock(skb); |
skb | 435 | net/core/dev.c | skb_queue_head(dev->buffs + pri,skb); |
skb | 445 | net/core/dev.c | void netif_rx(struct sk_buff *skb) |
skb | 454 | net/core/dev.c | skb->sk = NULL; |
skb | 455 | net/core/dev.c | skb->free = 1; |
skb | 456 | net/core/dev.c | if(skb->stamp.tv_sec==0) |
skb | 457 | net/core/dev.c | skb->stamp = xtime; |
skb | 470 | net/core/dev.c | kfree_skb(skb, FREE_READ); |
skb | 478 | net/core/dev.c | IS_SKB(skb); |
skb | 480 | net/core/dev.c | skb_queue_tail(&backlog,skb); |
skb | 514 | net/core/dev.c | struct sk_buff *skb = NULL; |
skb | 524 | net/core/dev.c | skb = (struct sk_buff *) buff; |
skb | 536 | net/core/dev.c | skb = alloc_skb(len, GFP_ATOMIC); |
skb | 537 | net/core/dev.c | if (skb == NULL) |
skb | 550 | net/core/dev.c | to = skb_put(skb,len); |
skb | 572 | net/core/dev.c | skb->dev = dev; |
skb | 573 | net/core/dev.c | skb->free = 1; |
skb | 575 | net/core/dev.c | netif_rx(skb); |
skb | 632 | net/core/dev.c | struct sk_buff *skb; |
skb | 665 | net/core/dev.c | while((skb=skb_dequeue(&backlog))!=NULL) |
skb | 681 | net/core/dev.c | skb->h.raw = skb->data; |
skb | 687 | net/core/dev.c | type = skb->protocol; |
skb | 699 | net/core/dev.c | struct sk_buff *skb2=skb_clone(skb, GFP_ATOMIC); |
skb | 701 | net/core/dev.c | pt_prev->func(skb2,skb->dev, pt_prev); |
skb | 708 | net/core/dev.c | if (ptype->type == type && (!ptype->dev || ptype->dev==skb->dev)) |
skb | 718 | net/core/dev.c | skb2=skb_clone(skb, GFP_ATOMIC); |
skb | 726 | net/core/dev.c | pt_prev->func(skb2, skb->dev, pt_prev); |
skb | 738 | net/core/dev.c | pt_prev->func(skb, skb->dev, pt_prev); |
skb | 744 | net/core/dev.c | kfree_skb(skb, FREE_WRITE); |
skb | 780 | net/core/dev.c | struct sk_buff *skb; |
skb | 803 | net/core/dev.c | while((skb=skb_dequeue(&dev->buffs[i]))!=NULL) |
skb | 808 | net/core/dev.c | skb_device_lock(skb); |
skb | 814 | net/core/dev.c | dev_queue_xmit(skb,dev,-i - 1); |
skb | 96 | net/core/firewall.c | int call_fw_firewall(int pf, struct sk_buff *skb, void *phdr) |
skb | 102 | net/core/firewall.c | int rc=fw->fw_forward(fw,pf,skb,phdr); |
skb | 114 | net/core/firewall.c | int call_in_firewall(int pf, struct sk_buff *skb, void *phdr) |
skb | 120 | net/core/firewall.c | int rc=fw->fw_input(fw,pf,skb,phdr); |
skb | 128 | net/core/firewall.c | int call_out_firewall(int pf, struct sk_buff *skb, void *phdr) |
skb | 134 | net/core/firewall.c | int rc=fw->fw_output(fw,pf,skb,phdr); |
skb | 63 | net/core/net_alias.c | static int net_alias_hard_start_xmit(struct sk_buff *skb, struct device *dev); |
skb | 222 | net/core/net_alias.c | net_alias_hard_start_xmit(struct sk_buff *skb, struct device *dev) |
skb | 225 | net/core/net_alias.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 77 | net/core/skbuff.c | int skb_check(struct sk_buff *skb, int head, int line, char *file) |
skb | 80 | net/core/skbuff.c | if (skb->magic_debug_cookie != SK_HEAD_SKB) { |
skb | 85 | net/core/skbuff.c | if (!skb->next || !skb->prev) { |
skb | 89 | net/core/skbuff.c | if (skb->next->magic_debug_cookie != SK_HEAD_SKB |
skb | 90 | net/core/skbuff.c | && skb->next->magic_debug_cookie != SK_GOOD_SKB) { |
skb | 95 | net/core/skbuff.c | if (skb->prev->magic_debug_cookie != SK_HEAD_SKB |
skb | 96 | net/core/skbuff.c | && skb->prev->magic_debug_cookie != SK_GOOD_SKB) { |
skb | 103 | net/core/skbuff.c | struct sk_buff *skb2 = skb->next; |
skb | 105 | net/core/skbuff.c | while (skb2 != skb && i < 5) { |
skb | 117 | net/core/skbuff.c | if (skb->next != NULL && skb->next->magic_debug_cookie != SK_HEAD_SKB |
skb | 118 | net/core/skbuff.c | && skb->next->magic_debug_cookie != SK_GOOD_SKB) { |
skb | 123 | net/core/skbuff.c | if (skb->prev != NULL && skb->prev->magic_debug_cookie != SK_HEAD_SKB |
skb | 124 | net/core/skbuff.c | && skb->prev->magic_debug_cookie != SK_GOOD_SKB) { |
skb | 131 | net/core/skbuff.c | if(skb->magic_debug_cookie==SK_FREED_SKB) |
skb | 136 | net/core/skbuff.c | skb,skb->truesize,skb->free); |
skb | 139 | net/core/skbuff.c | if(skb->magic_debug_cookie!=SK_GOOD_SKB) |
skb | 143 | net/core/skbuff.c | skb,skb->truesize,skb->free); |
skb | 146 | net/core/skbuff.c | if(skb->head>skb->data) |
skb | 150 | net/core/skbuff.c | skb,skb->head,skb->data); |
skb | 153 | net/core/skbuff.c | if(skb->tail>skb->end) |
skb | 157 | net/core/skbuff.c | skb,skb->tail,skb->end); |
skb | 160 | net/core/skbuff.c | if(skb->data>skb->tail) |
skb | 164 | net/core/skbuff.c | skb,skb->data,skb->tail); |
skb | 167 | net/core/skbuff.c | if(skb->tail-skb->data!=skb->len) |
skb | 171 | net/core/skbuff.c | skb,skb->data,skb->end,skb->len); |
skb | 174 | net/core/skbuff.c | if((unsigned long) skb->end > (unsigned long) skb) |
skb | 178 | net/core/skbuff.c | skb,skb->end); |
skb | 338 | net/core/skbuff.c | void skb_unlink(struct sk_buff *skb) |
skb | 345 | net/core/skbuff.c | IS_SKB(skb); |
skb | 347 | net/core/skbuff.c | if(skb->prev && skb->next) |
skb | 349 | net/core/skbuff.c | skb->next->prev = skb->prev; |
skb | 350 | net/core/skbuff.c | skb->prev->next = skb->next; |
skb | 351 | net/core/skbuff.c | skb->next = NULL; |
skb | 352 | net/core/skbuff.c | skb->prev = NULL; |
skb | 365 | net/core/skbuff.c | unsigned char *skb_put(struct sk_buff *skb, int len) |
skb | 367 | net/core/skbuff.c | unsigned char *tmp=skb->tail; |
skb | 368 | net/core/skbuff.c | IS_SKB(skb); |
skb | 369 | net/core/skbuff.c | skb->tail+=len; |
skb | 370 | net/core/skbuff.c | skb->len+=len; |
skb | 371 | net/core/skbuff.c | IS_SKB(skb); |
skb | 372 | net/core/skbuff.c | if(skb->tail>skb->end) |
skb | 377 | net/core/skbuff.c | unsigned char *skb_push(struct sk_buff *skb, int len) |
skb | 379 | net/core/skbuff.c | IS_SKB(skb); |
skb | 380 | net/core/skbuff.c | skb->data-=len; |
skb | 381 | net/core/skbuff.c | skb->len+=len; |
skb | 382 | net/core/skbuff.c | IS_SKB(skb); |
skb | 383 | net/core/skbuff.c | if(skb->data<skb->head) |
skb | 385 | net/core/skbuff.c | return skb->data; |
skb | 388 | net/core/skbuff.c | unsigned char * skb_pull(struct sk_buff *skb, int len) |
skb | 390 | net/core/skbuff.c | IS_SKB(skb); |
skb | 391 | net/core/skbuff.c | if(len>skb->len) |
skb | 393 | net/core/skbuff.c | skb->data+=len; |
skb | 394 | net/core/skbuff.c | skb->len-=len; |
skb | 395 | net/core/skbuff.c | return skb->data; |
skb | 398 | net/core/skbuff.c | int skb_headroom(struct sk_buff *skb) |
skb | 400 | net/core/skbuff.c | IS_SKB(skb); |
skb | 401 | net/core/skbuff.c | return skb->data-skb->head; |
skb | 404 | net/core/skbuff.c | int skb_tailroom(struct sk_buff *skb) |
skb | 406 | net/core/skbuff.c | IS_SKB(skb); |
skb | 407 | net/core/skbuff.c | return skb->end-skb->tail; |
skb | 410 | net/core/skbuff.c | void skb_reserve(struct sk_buff *skb, int len) |
skb | 412 | net/core/skbuff.c | IS_SKB(skb); |
skb | 413 | net/core/skbuff.c | skb->data+=len; |
skb | 414 | net/core/skbuff.c | skb->tail+=len; |
skb | 415 | net/core/skbuff.c | if(skb->tail>skb->end) |
skb | 417 | net/core/skbuff.c | if(skb->data<skb->head) |
skb | 419 | net/core/skbuff.c | IS_SKB(skb); |
skb | 422 | net/core/skbuff.c | void skb_trim(struct sk_buff *skb, int len) |
skb | 424 | net/core/skbuff.c | IS_SKB(skb); |
skb | 425 | net/core/skbuff.c | if(skb->len>len) |
skb | 427 | net/core/skbuff.c | skb->len=len; |
skb | 428 | net/core/skbuff.c | skb->tail=skb->data+len; |
skb | 441 | net/core/skbuff.c | void kfree_skb(struct sk_buff *skb, int rw) |
skb | 443 | net/core/skbuff.c | if (skb == NULL) |
skb | 450 | net/core/skbuff.c | IS_SKB(skb); |
skb | 452 | net/core/skbuff.c | if (skb->lock) |
skb | 454 | net/core/skbuff.c | skb->free = 3; /* Free when unlocked */ |
skb | 458 | net/core/skbuff.c | if (skb->free == 2) |
skb | 461 | net/core/skbuff.c | if (skb->next) |
skb | 465 | net/core/skbuff.c | if(skb->destructor) |
skb | 466 | net/core/skbuff.c | skb->destructor(skb); |
skb | 467 | net/core/skbuff.c | if (skb->sk) |
skb | 469 | net/core/skbuff.c | if(skb->sk->prot!=NULL) |
skb | 472 | net/core/skbuff.c | sock_rfree(skb->sk, skb); |
skb | 474 | net/core/skbuff.c | sock_wfree(skb->sk, skb); |
skb | 484 | net/core/skbuff.c | skb->sk->rmem_alloc-=skb->truesize; |
skb | 486 | net/core/skbuff.c | skb->sk->wmem_alloc-=skb->truesize; |
skb | 488 | net/core/skbuff.c | if(!skb->sk->dead) |
skb | 489 | net/core/skbuff.c | skb->sk->write_space(skb->sk); |
skb | 490 | net/core/skbuff.c | kfree_skbmem(skb); |
skb | 494 | net/core/skbuff.c | kfree_skbmem(skb); |
skb | 503 | net/core/skbuff.c | struct sk_buff *skb; |
skb | 532 | net/core/skbuff.c | if(skb->magic_debug_cookie == SK_GOOD_SKB) |
skb | 533 | net/core/skbuff.c | printk("Kernel kmalloc handed us an existing skb (%p)\n",skb); |
skb | 544 | net/core/skbuff.c | skb=(struct sk_buff *)(bptr+size)-1; |
skb | 546 | net/core/skbuff.c | skb->count = 1; /* only one reference to this */ |
skb | 547 | net/core/skbuff.c | skb->data_skb = NULL; /* and we're our own data skb */ |
skb | 549 | net/core/skbuff.c | skb->free = 2; /* Invalid so we pick up forgetful users */ |
skb | 550 | net/core/skbuff.c | skb->lock = 0; |
skb | 551 | net/core/skbuff.c | skb->pkt_type = PACKET_HOST; /* Default type */ |
skb | 552 | net/core/skbuff.c | skb->prev = skb->next = NULL; |
skb | 553 | net/core/skbuff.c | skb->link3 = NULL; |
skb | 554 | net/core/skbuff.c | skb->sk = NULL; |
skb | 555 | net/core/skbuff.c | skb->truesize=size; |
skb | 556 | net/core/skbuff.c | skb->localroute=0; |
skb | 557 | net/core/skbuff.c | skb->stamp.tv_sec=0; /* No idea about time */ |
skb | 558 | net/core/skbuff.c | skb->localroute = 0; |
skb | 559 | net/core/skbuff.c | skb->ip_summed = 0; |
skb | 560 | net/core/skbuff.c | memset(skb->proto_priv, 0, sizeof(skb->proto_priv)); |
skb | 566 | net/core/skbuff.c | skb->magic_debug_cookie = SK_GOOD_SKB; |
skb | 568 | net/core/skbuff.c | skb->users = 0; |
skb | 570 | net/core/skbuff.c | skb->head=bptr; |
skb | 571 | net/core/skbuff.c | skb->data=bptr; |
skb | 572 | net/core/skbuff.c | skb->tail=bptr; |
skb | 573 | net/core/skbuff.c | skb->end=bptr+len; |
skb | 574 | net/core/skbuff.c | skb->len=0; |
skb | 575 | net/core/skbuff.c | skb->destructor=NULL; |
skb | 576 | net/core/skbuff.c | return skb; |
skb | 583 | net/core/skbuff.c | void kfree_skbmem(struct sk_buff *skb) |
skb | 586 | net/core/skbuff.c | void * addr = skb->head; |
skb | 591 | net/core/skbuff.c | if (--skb->count <= 0) { |
skb | 593 | net/core/skbuff.c | if (skb->data_skb) { |
skb | 594 | net/core/skbuff.c | addr = skb; |
skb | 595 | net/core/skbuff.c | kfree_skbmem(skb->data_skb); |
skb | 608 | net/core/skbuff.c | struct sk_buff *skb_clone(struct sk_buff *skb, int priority) |
skb | 613 | net/core/skbuff.c | IS_SKB(skb); |
skb | 617 | net/core/skbuff.c | memcpy(n, skb, sizeof(*n)); |
skb | 619 | net/core/skbuff.c | if (skb->data_skb) |
skb | 620 | net/core/skbuff.c | skb = skb->data_skb; |
skb | 623 | net/core/skbuff.c | skb->count++; |
skb | 627 | net/core/skbuff.c | n->data_skb = skb; |
skb | 639 | net/core/skbuff.c | struct sk_buff *skb_clone(struct sk_buff *skb, int priority) |
skb | 648 | net/core/skbuff.c | IS_SKB(skb); |
skb | 650 | net/core/skbuff.c | n=alloc_skb(skb->truesize-sizeof(struct sk_buff),priority); |
skb | 658 | net/core/skbuff.c | offset=n->head-skb->head; |
skb | 661 | net/core/skbuff.c | skb_reserve(n,skb->data-skb->head); |
skb | 663 | net/core/skbuff.c | skb_put(n,skb->len); |
skb | 665 | net/core/skbuff.c | memcpy(n->head,skb->head,skb->end-skb->head); |
skb | 668 | net/core/skbuff.c | n->when=skb->when; |
skb | 669 | net/core/skbuff.c | n->dev=skb->dev; |
skb | 670 | net/core/skbuff.c | n->h.raw=skb->h.raw+offset; |
skb | 671 | net/core/skbuff.c | n->mac.raw=skb->mac.raw+offset; |
skb | 672 | net/core/skbuff.c | n->ip_hdr=(struct iphdr *)(((char *)skb->ip_hdr)+offset); |
skb | 673 | net/core/skbuff.c | n->saddr=skb->saddr; |
skb | 674 | net/core/skbuff.c | n->daddr=skb->daddr; |
skb | 675 | net/core/skbuff.c | n->raddr=skb->raddr; |
skb | 676 | net/core/skbuff.c | n->raddr=skb->seq; |
skb | 677 | net/core/skbuff.c | n->raddr=skb->end_seq; |
skb | 678 | net/core/skbuff.c | n->raddr=skb->ack_seq; |
skb | 679 | net/core/skbuff.c | n->acked=skb->acked; |
skb | 680 | net/core/skbuff.c | memcpy(n->proto_priv, skb->proto_priv, sizeof(skb->proto_priv)); |
skb | 681 | net/core/skbuff.c | n->used=skb->used; |
skb | 683 | net/core/skbuff.c | n->arp=skb->arp; |
skb | 687 | net/core/skbuff.c | n->pkt_type=skb->pkt_type; |
skb | 688 | net/core/skbuff.c | n->stamp=skb->stamp; |
skb | 699 | net/core/skbuff.c | void skb_device_lock(struct sk_buff *skb) |
skb | 701 | net/core/skbuff.c | if(skb->lock) |
skb | 705 | net/core/skbuff.c | skb->lock++; |
skb | 708 | net/core/skbuff.c | void skb_device_unlock(struct sk_buff *skb) |
skb | 710 | net/core/skbuff.c | if(skb->lock==0) |
skb | 712 | net/core/skbuff.c | skb->lock--; |
skb | 713 | net/core/skbuff.c | if(skb->lock==0) |
skb | 717 | net/core/skbuff.c | void dev_kfree_skb(struct sk_buff *skb, int mode) |
skb | 723 | net/core/skbuff.c | if(skb->lock==1) |
skb | 726 | net/core/skbuff.c | if (!--skb->lock && (skb->free == 1 || skb->free == 3)) |
skb | 729 | net/core/skbuff.c | kfree_skb(skb,mode); |
skb | 737 | net/core/skbuff.c | struct sk_buff *skb; |
skb | 739 | net/core/skbuff.c | skb = alloc_skb(length+16, GFP_ATOMIC); |
skb | 740 | net/core/skbuff.c | if (skb) |
skb | 741 | net/core/skbuff.c | skb_reserve(skb,16); |
skb | 742 | net/core/skbuff.c | return skb; |
skb | 745 | net/core/skbuff.c | int skb_device_locked(struct sk_buff *skb) |
skb | 747 | net/core/skbuff.c | return skb->lock? 1 : 0; |
skb | 391 | net/core/sock.c | void sock_wfree(struct sock *sk, struct sk_buff *skb) |
skb | 393 | net/core/sock.c | int s=skb->truesize; |
skb | 395 | net/core/sock.c | IS_SKB(skb); |
skb | 397 | net/core/sock.c | kfree_skbmem(skb); |
skb | 412 | net/core/sock.c | void sock_rfree(struct sock *sk, struct sk_buff *skb) |
skb | 414 | net/core/sock.c | int s=skb->truesize; |
skb | 416 | net/core/sock.c | IS_SKB(skb); |
skb | 418 | net/core/sock.c | kfree_skbmem(skb); |
skb | 435 | net/core/sock.c | struct sk_buff *skb; |
skb | 459 | net/core/sock.c | skb = sock_wmalloc(sk, size, 0, sk->allocation); |
skb | 464 | net/core/sock.c | skb = sock_wmalloc(sk, size, 0 , GFP_BUFFER); |
skb | 465 | net/core/sock.c | if(!skb) |
skb | 466 | net/core/sock.c | skb=sock_wmalloc(sk, fallback, 0, GFP_KERNEL); |
skb | 473 | net/core/sock.c | if(skb==NULL) |
skb | 525 | net/core/sock.c | while(skb==NULL); |
skb | 527 | net/core/sock.c | return skb; |
skb | 535 | net/core/sock.c | struct sk_buff *skb; |
skb | 559 | net/core/sock.c | while((skb = skb_dequeue(&sk->back_log)) != NULL) |
skb | 563 | net/core/sock.c | sk->prot->rcv(skb, skb->dev, (struct options*)skb->proto_priv, |
skb | 564 | net/core/sock.c | skb->saddr, skb->len, skb->daddr, 1, |
skb | 89 | net/ethernet/eth.c | int eth_header(struct sk_buff *skb, struct device *dev, unsigned short type, |
skb | 92 | net/ethernet/eth.c | struct ethhdr *eth = (struct ethhdr *)skb_push(skb,ETH_HLEN); |
skb | 140 | net/ethernet/eth.c | struct sk_buff *skb) |
skb | 159 | net/ethernet/eth.c | return arp_find(eth->h_dest, dst, dev, dev->pa_addr, skb)? 1 : 0; |
skb | 172 | net/ethernet/eth.c | unsigned short eth_type_trans(struct sk_buff *skb, struct device *dev) |
skb | 177 | net/ethernet/eth.c | skb->mac.raw=skb->data; |
skb | 178 | net/ethernet/eth.c | skb_pull(skb,dev->hard_header_len); |
skb | 179 | net/ethernet/eth.c | eth= skb->mac.ethernet; |
skb | 184 | net/ethernet/eth.c | skb->pkt_type=PACKET_BROADCAST; |
skb | 186 | net/ethernet/eth.c | skb->pkt_type=PACKET_MULTICAST; |
skb | 197 | net/ethernet/eth.c | skb->pkt_type=PACKET_OTHERHOST; |
skb | 203 | net/ethernet/eth.c | rawp = skb->data; |
skb | 9 | net/ethernet/pe2.c | struct sk_buff *skb, unsigned char *dest_node) |
skb | 11 | net/ethernet/pe2.c | struct device *dev = skb->dev; |
skb | 13 | net/ethernet/pe2.c | skb->protocol = htons (ETH_P_IPX); |
skb | 14 | net/ethernet/pe2.c | dev->hard_header(skb, dev, ETH_P_IPX, dest_node, NULL, skb->len); |
skb | 287 | net/ipv4/af_inet.c | struct sk_buff *skb; |
skb | 312 | net/ipv4/af_inet.c | while ((skb = tcp_dequeue_partial(sk)) != NULL) |
skb | 314 | net/ipv4/af_inet.c | IS_SKB(skb); |
skb | 315 | net/ipv4/af_inet.c | kfree_skb(skb, FREE_WRITE); |
skb | 322 | net/ipv4/af_inet.c | while((skb = skb_dequeue(&sk->write_queue)) != NULL) { |
skb | 323 | net/ipv4/af_inet.c | IS_SKB(skb); |
skb | 324 | net/ipv4/af_inet.c | kfree_skb(skb, FREE_WRITE); |
skb | 334 | net/ipv4/af_inet.c | while((skb=skb_dequeue(&sk->receive_queue))!=NULL) |
skb | 340 | net/ipv4/af_inet.c | if (skb->sk != NULL && skb->sk != sk) |
skb | 342 | net/ipv4/af_inet.c | IS_SKB(skb); |
skb | 343 | net/ipv4/af_inet.c | skb->sk->dead = 1; |
skb | 344 | net/ipv4/af_inet.c | skb->sk->prot->close(skb->sk, 0); |
skb | 346 | net/ipv4/af_inet.c | IS_SKB(skb); |
skb | 347 | net/ipv4/af_inet.c | kfree_skb(skb, FREE_READ); |
skb | 356 | net/ipv4/af_inet.c | for(skb = sk->send_head; skb != NULL; ) |
skb | 364 | net/ipv4/af_inet.c | if (skb->next && skb->prev) |
skb | 366 | net/ipv4/af_inet.c | IS_SKB(skb); |
skb | 367 | net/ipv4/af_inet.c | skb_unlink(skb); |
skb | 369 | net/ipv4/af_inet.c | skb->dev = NULL; |
skb | 370 | net/ipv4/af_inet.c | skb2 = skb->link3; |
skb | 371 | net/ipv4/af_inet.c | kfree_skb(skb, FREE_WRITE); |
skb | 372 | net/ipv4/af_inet.c | skb = skb2; |
skb | 381 | net/ipv4/af_inet.c | while((skb=skb_dequeue(&sk->back_log))!=NULL) |
skb | 384 | net/ipv4/af_inet.c | kfree_skb(skb, FREE_READ); |
skb | 132 | net/ipv4/arp.c | struct sk_buff_head skb; /* list of queued packets */ |
skb | 297 | net/ipv4/arp.c | struct sk_buff *skb; |
skb | 303 | net/ipv4/arp.c | while ((skb = skb_dequeue(&entry->skb)) != NULL) |
skb | 305 | net/ipv4/arp.c | skb_device_lock(skb); |
skb | 307 | net/ipv4/arp.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 629 | net/ipv4/arp.c | struct sk_buff *skb; |
skb | 644 | net/ipv4/arp.c | skb = alloc_skb(sizeof(struct arphdr)+ 2*(dev->addr_len+4) |
skb | 646 | net/ipv4/arp.c | if (skb == NULL) |
skb | 651 | net/ipv4/arp.c | skb_reserve(skb, dev->hard_header_len); |
skb | 652 | net/ipv4/arp.c | arp = (struct arphdr *) skb_put(skb,sizeof(struct arphdr) + 2*(dev->addr_len+4)); |
skb | 653 | net/ipv4/arp.c | skb->arp = 1; |
skb | 654 | net/ipv4/arp.c | skb->dev = dev; |
skb | 655 | net/ipv4/arp.c | skb->free = 1; |
skb | 656 | net/ipv4/arp.c | skb->protocol = htons (ETH_P_IP); |
skb | 662 | net/ipv4/arp.c | dev->hard_header(skb,dev,ptype,dest_hw?dest_hw:dev->broadcast,src_hw?src_hw:NULL,skb->len); |
skb | 692 | net/ipv4/arp.c | dev_queue_xmit(skb, dev, 0); |
skb | 701 | net/ipv4/arp.c | struct sk_buff *skb; |
skb | 723 | net/ipv4/arp.c | while((skb = skb_dequeue(&entry->skb)) != NULL) |
skb | 725 | net/ipv4/arp.c | IS_SKB(skb); |
skb | 726 | net/ipv4/arp.c | skb_device_lock(skb); |
skb | 728 | net/ipv4/arp.c | if(!skb->dev->rebuild_header(skb->data,skb->dev,skb->raddr,skb)) |
skb | 730 | net/ipv4/arp.c | skb->arp = 1; |
skb | 731 | net/ipv4/arp.c | if(skb->sk==NULL) |
skb | 732 | net/ipv4/arp.c | dev_queue_xmit(skb, skb->dev, 0); |
skb | 734 | net/ipv4/arp.c | dev_queue_xmit(skb,skb->dev,skb->sk->priority); |
skb | 774 | net/ipv4/arp.c | int arp_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *pt) |
skb | 780 | net/ipv4/arp.c | struct arphdr *arp = (struct arphdr *)skb->h.raw; |
skb | 801 | net/ipv4/arp.c | kfree_skb(skb, FREE_READ); |
skb | 821 | net/ipv4/arp.c | kfree_skb(skb, FREE_READ); |
skb | 830 | net/ipv4/arp.c | kfree_skb(skb, FREE_READ); |
skb | 839 | net/ipv4/arp.c | kfree_skb(skb, FREE_READ); |
skb | 847 | net/ipv4/arp.c | kfree_skb(skb, FREE_READ); |
skb | 854 | net/ipv4/arp.c | kfree_skb(skb, FREE_READ); |
skb | 876 | net/ipv4/arp.c | kfree_skb(skb, FREE_READ); |
skb | 902 | net/ipv4/arp.c | if (tip != dev->pa_addr && net_alias_has(skb->dev)) |
skb | 911 | net/ipv4/arp.c | kfree_skb(skb, FREE_READ); |
skb | 953 | net/ipv4/arp.c | kfree_skb(skb, FREE_READ); |
skb | 959 | net/ipv4/arp.c | kfree_skb(skb, FREE_READ); |
skb | 979 | net/ipv4/arp.c | kfree_skb(skb, FREE_READ); |
skb | 1032 | net/ipv4/arp.c | kfree_skb(skb, FREE_READ); |
skb | 1052 | net/ipv4/arp.c | entry->dev = skb->dev; |
skb | 1054 | net/ipv4/arp.c | skb_queue_head_init(&entry->skb); |
skb | 1073 | net/ipv4/arp.c | kfree_skb(skb, FREE_READ); |
skb | 1182 | net/ipv4/arp.c | u32 saddr, struct sk_buff *skb) |
skb | 1189 | net/ipv4/arp.c | if (skb) |
skb | 1190 | net/ipv4/arp.c | skb->arp = 1; |
skb | 1211 | net/ipv4/arp.c | if (skb != NULL) |
skb | 1215 | net/ipv4/arp.c | skb_queue_tail(&entry->skb, skb); |
skb | 1216 | net/ipv4/arp.c | skb_device_unlock(skb); |
skb | 1229 | net/ipv4/arp.c | if (skb->sk) |
skb | 1231 | net/ipv4/arp.c | skb->sk->err = EHOSTDOWN; |
skb | 1232 | net/ipv4/arp.c | skb->sk->error_report(skb->sk); |
skb | 1235 | net/ipv4/arp.c | icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0, dev); |
skb | 1237 | net/ipv4/arp.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 1250 | net/ipv4/arp.c | if (skb) |
skb | 1251 | net/ipv4/arp.c | skb->arp = 1; |
skb | 1275 | net/ipv4/arp.c | skb_queue_head_init(&entry->skb); |
skb | 1276 | net/ipv4/arp.c | if (skb != NULL) |
skb | 1278 | net/ipv4/arp.c | skb_queue_tail(&entry->skb, skb); |
skb | 1279 | net/ipv4/arp.c | skb_device_unlock(skb); |
skb | 1297 | net/ipv4/arp.c | else if (skb != NULL) |
skb | 1298 | net/ipv4/arp.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 1510 | net/ipv4/arp.c | skb_queue_head_init(&entry->skb); |
skb | 1586 | net/ipv4/arp.c | struct sk_buff * skb; |
skb | 1605 | net/ipv4/arp.c | while ((skb = skb_dequeue(&entry->skb)) != NULL) |
skb | 1607 | net/ipv4/arp.c | skb_device_lock(skb); |
skb | 1609 | net/ipv4/arp.c | skb_queue_tail(&entry1->skb, skb); |
skb | 1610 | net/ipv4/arp.c | skb_device_unlock(skb); |
skb | 1746 | net/ipv4/arp.c | skb_queue_head_init(&entry->skb); |
skb | 151 | net/ipv4/icmp.c | void (*handler)(struct icmphdr *icmph, struct sk_buff *skb, struct device *dev, __u32 saddr, __u32 daddr, int len); |
skb | 339 | net/ipv4/icmp.c | static void icmp_unreach(struct icmphdr *icmph, struct sk_buff *skb, struct device *dev, __u32 saddr, __u32 daddr, int len) |
skb | 462 | net/ipv4/icmp.c | kfree_skb(skb, FREE_READ); |
skb | 470 | net/ipv4/icmp.c | static void icmp_redirect(struct icmphdr *icmph, struct sk_buff *skb, struct device *dev, __u32 source, __u32 daddr, int len) |
skb | 529 | net/ipv4/icmp.c | kfree_skb(skb, FREE_READ); |
skb | 540 | net/ipv4/icmp.c | static void icmp_echo(struct icmphdr *icmph, struct sk_buff *skb, struct device *dev, __u32 saddr, __u32 daddr, int len) |
skb | 547 | net/ipv4/icmp.c | if (ip_options_echo(&icmp_param.replyopts, NULL, daddr, saddr, skb)==0) |
skb | 549 | net/ipv4/icmp.c | kfree_skb(skb, FREE_READ); |
skb | 560 | net/ipv4/icmp.c | static void icmp_timestamp(struct icmphdr *icmph, struct sk_buff *skb, struct device *dev, __u32 saddr, __u32 daddr, int len) |
skb | 572 | net/ipv4/icmp.c | kfree_skb(skb, FREE_READ); |
skb | 592 | net/ipv4/icmp.c | if (ip_options_echo(&icmp_param.replyopts, NULL, daddr, saddr, skb)==0) |
skb | 594 | net/ipv4/icmp.c | kfree_skb(skb,FREE_READ); |
skb | 610 | net/ipv4/icmp.c | static void icmp_address(struct icmphdr *icmph, struct sk_buff *skb, struct device *dev, __u32 saddr, __u32 daddr, int len) |
skb | 621 | net/ipv4/icmp.c | if (ip_options_echo(&icmp_param.replyopts, NULL, daddr, saddr, skb)==0) |
skb | 624 | net/ipv4/icmp.c | kfree_skb(skb, FREE_READ); |
skb | 627 | net/ipv4/icmp.c | static void icmp_discard(struct icmphdr *icmph, struct sk_buff *skb, struct device *dev, __u32 saddr, __u32 daddr, int len) |
skb | 629 | net/ipv4/icmp.c | kfree_skb(skb, FREE_READ); |
skb | 636 | net/ipv4/icmp.c | int icmp_rcv(struct sk_buff *skb, struct device *dev, struct options *opt, |
skb | 640 | net/ipv4/icmp.c | struct icmphdr *icmph=(void *)skb->h.raw; |
skb | 652 | net/ipv4/icmp.c | kfree_skb(skb, FREE_READ); |
skb | 665 | net/ipv4/icmp.c | kfree_skb(skb,FREE_READ); |
skb | 683 | net/ipv4/icmp.c | kfree_skb(skb, FREE_READ); |
skb | 696 | net/ipv4/icmp.c | (icmp_pointers[icmph->type].handler)(icmph,skb,skb->dev,saddr,daddr,len); |
skb | 225 | net/ipv4/igmp.c | struct sk_buff *skb=alloc_skb(MAX_IGMP_SIZE, GFP_ATOMIC); |
skb | 229 | net/ipv4/igmp.c | if(skb==NULL) |
skb | 231 | net/ipv4/igmp.c | tmp=ip_build_header(skb, INADDR_ANY, address, &dev, IPPROTO_IGMP, NULL, |
skb | 235 | net/ipv4/igmp.c | kfree_skb(skb, FREE_WRITE); |
skb | 238 | net/ipv4/igmp.c | ih=(struct igmphdr *)skb_put(skb,sizeof(struct igmphdr)); |
skb | 244 | net/ipv4/igmp.c | ip_queue_xmit(NULL,dev,skb,1); |
skb | 407 | net/ipv4/igmp.c | int igmp_rcv(struct sk_buff *skb, struct device *dev, struct options *opt, |
skb | 426 | net/ipv4/igmp.c | ih=(struct igmphdr *)skb->h.raw; |
skb | 428 | net/ipv4/igmp.c | if(skb->len <sizeof(struct igmphdr) || skb->ip_hdr->ttl>1 || ip_compute_csum((void *)skb->h.raw,sizeof(struct igmphdr))) |
skb | 430 | net/ipv4/igmp.c | kfree_skb(skb, FREE_READ); |
skb | 442 | net/ipv4/igmp.c | kfree_skb(skb, FREE_READ); |
skb | 452 | net/ipv4/igmp.c | kfree_skb(skb, FREE_READ); |
skb | 43 | net/ipv4/ip_forward.c | static void ip_encap(struct sk_buff *skb, int len, struct device *out, __u32 daddr) |
skb | 50 | net/ipv4/ip_forward.c | struct iphdr *iph=(struct iphdr *)skb_push(skb,sizeof(struct iphdr)); |
skb | 54 | net/ipv4/ip_forward.c | iph->tos = skb->ip_hdr->tos; |
skb | 55 | net/ipv4/ip_forward.c | iph->ttl = skb->ip_hdr->ttl; |
skb | 61 | net/ipv4/ip_forward.c | iph->tot_len = htons(skb->len); |
skb | 65 | net/ipv4/ip_forward.c | skb->dev = out; |
skb | 66 | net/ipv4/ip_forward.c | skb->arp = 1; |
skb | 67 | net/ipv4/ip_forward.c | skb->raddr=daddr; |
skb | 71 | net/ipv4/ip_forward.c | if (out->hard_header && out->hard_header(skb, out, ETH_P_IP, NULL, NULL, len)<0) |
skb | 72 | net/ipv4/ip_forward.c | skb->arp=0; |
skb | 84 | net/ipv4/ip_forward.c | int ip_forward(struct sk_buff *skb, struct device *dev, int is_frag, |
skb | 93 | net/ipv4/ip_forward.c | struct options * opt = (struct options*)skb->proto_priv; |
skb | 99 | net/ipv4/ip_forward.c | struct sk_buff *skb_in = skb; /* So we can remember if the masquerader did some swaps */ |
skb | 110 | net/ipv4/ip_forward.c | fw_res=call_fw_firewall(PF_INET, skb, skb->h.iph); |
skb | 116 | net/ipv4/ip_forward.c | icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0, dev); |
skb | 134 | net/ipv4/ip_forward.c | iph = skb->h.iph; |
skb | 151 | net/ipv4/ip_forward.c | icmp_send(skb, ICMP_TIME_EXCEEDED, ICMP_EXC_TTL, 0, dev); |
skb | 172 | net/ipv4/ip_forward.c | icmp_send(skb, ICMP_DEST_UNREACH, ICMP_NET_UNREACH, 0, dev); |
skb | 193 | net/ipv4/ip_forward.c | icmp_send(skb, ICMP_DEST_UNREACH, ICMP_SR_FAILED, 0, dev); |
skb | 211 | net/ipv4/ip_forward.c | icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, raddr, dev); |
skb | 220 | net/ipv4/ip_forward.c | dev2=skb->dev; |
skb | 221 | net/ipv4/ip_forward.c | raddr=skb->raddr; |
skb | 242 | net/ipv4/ip_forward.c | ip_fw_masquerade(&skb, dev2); |
skb | 244 | net/ipv4/ip_forward.c | IS_SKB(skb); |
skb | 246 | net/ipv4/ip_forward.c | if (skb->len+encap > dev2->mtu && (ntohs(iph->frag_off) & IP_DF)) |
skb | 249 | net/ipv4/ip_forward.c | icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(dev2->mtu), dev); |
skb | 256 | net/ipv4/ip_forward.c | if(skb_headroom(skb)-encap<dev2->hard_header_len) |
skb | 258 | net/ipv4/ip_forward.c | skb2 = alloc_skb(dev2->hard_header_len + skb->len + encap + 15, GFP_ATOMIC); |
skb | 260 | net/ipv4/ip_forward.c | if(skb_headroom(skb)<dev2->hard_header_len) |
skb | 262 | net/ipv4/ip_forward.c | skb2 = alloc_skb(dev2->hard_header_len + skb->len + 15, GFP_ATOMIC); |
skb | 285 | net/ipv4/ip_forward.c | skb_reserve(skb,(encap+dev->hard_header_len+15)&~15); /* 16 byte aligned IP headers are good */ |
skb | 286 | net/ipv4/ip_forward.c | ip_encap(skb2,skb->len, dev2, raddr); |
skb | 290 | net/ipv4/ip_forward.c | ip_send(rt,skb2,raddr,skb->len,dev2,dev2->pa_addr); |
skb | 297 | net/ipv4/ip_forward.c | ptr = skb_put(skb2,skb->len); |
skb | 304 | net/ipv4/ip_forward.c | memcpy(ptr, skb->h.raw, skb->len); |
skb | 305 | net/ipv4/ip_forward.c | memcpy(skb2->proto_priv, skb->proto_priv, sizeof(skb->proto_priv)); |
skb | 314 | net/ipv4/ip_forward.c | skb2 = skb; |
skb | 318 | net/ipv4/ip_forward.c | ip_encap(skb,skb->len, dev2, raddr); |
skb | 322 | net/ipv4/ip_forward.c | skb->arp=1; |
skb | 323 | net/ipv4/ip_forward.c | skb->raddr=raddr; |
skb | 326 | net/ipv4/ip_forward.c | memcpy(skb_push(skb, dev2->hard_header_len), hh->hh_data, dev2->hard_header_len); |
skb | 332 | net/ipv4/ip_forward.c | skb->arp = 0; |
skb | 337 | net/ipv4/ip_forward.c | if(dev2->hard_header(skb, dev2, ETH_P_IP, NULL, NULL, skb->len)<0) |
skb | 338 | net/ipv4/ip_forward.c | skb->arp=0; |
skb | 445 | net/ipv4/ip_forward.c | if(skb==skb2) |
skb | 453 | net/ipv4/ip_forward.c | if(skb!=skb_in) |
skb | 54 | net/ipv4/ip_fragment.c | extern __inline__ void frag_kfree_skb(struct sk_buff *skb, int type) |
skb | 59 | net/ipv4/ip_fragment.c | ip_frag_mem-=skb->truesize; |
skb | 61 | net/ipv4/ip_fragment.c | kfree_skb(skb,type); |
skb | 91 | net/ipv4/ip_fragment.c | static struct ipfrag *ip_frag_create(int offset, int end, struct sk_buff *skb, unsigned char *ptr) |
skb | 108 | net/ipv4/ip_fragment.c | fp->skb = skb; |
skb | 117 | net/ipv4/ip_fragment.c | ip_frag_mem+=skb->truesize; |
skb | 189 | net/ipv4/ip_fragment.c | IS_SKB(fp->skb); |
skb | 190 | net/ipv4/ip_fragment.c | frag_kfree_skb(fp->skb,FREE_READ); |
skb | 222 | net/ipv4/ip_fragment.c | icmp_send(qp->fragments->skb,ICMP_TIME_EXCEEDED, |
skb | 253 | net/ipv4/ip_fragment.c | static struct ipq *ip_create(struct sk_buff *skb, struct iphdr *iph, struct device *dev) |
skb | 263 | net/ipv4/ip_fragment.c | skb->dev = qp->dev; |
skb | 343 | net/ipv4/ip_fragment.c | struct sk_buff *skb; |
skb | 354 | net/ipv4/ip_fragment.c | if ((skb = dev_alloc_skb(len)) == NULL) |
skb | 363 | net/ipv4/ip_fragment.c | skb_put(skb,len); |
skb | 364 | net/ipv4/ip_fragment.c | skb->h.raw = skb->data; |
skb | 365 | net/ipv4/ip_fragment.c | skb->free = 1; |
skb | 368 | net/ipv4/ip_fragment.c | ptr = (unsigned char *) skb->h.raw; |
skb | 378 | net/ipv4/ip_fragment.c | if(count+fp->len > skb->len) |
skb | 382 | net/ipv4/ip_fragment.c | frag_kfree_skb(skb,FREE_WRITE); |
skb | 395 | net/ipv4/ip_fragment.c | iph = skb->h.iph; |
skb | 398 | net/ipv4/ip_fragment.c | skb->ip_hdr = iph; |
skb | 401 | net/ipv4/ip_fragment.c | return(skb); |
skb | 409 | net/ipv4/ip_fragment.c | struct sk_buff *ip_defrag(struct iphdr *iph, struct sk_buff *skb, struct device *dev) |
skb | 441 | net/ipv4/ip_fragment.c | return(skb); |
skb | 474 | net/ipv4/ip_fragment.c | if ((qp = ip_create(skb, iph, dev)) == NULL) |
skb | 476 | net/ipv4/ip_fragment.c | skb->sk = NULL; |
skb | 477 | net/ipv4/ip_fragment.c | frag_kfree_skb(skb, FREE_READ); |
skb | 493 | net/ipv4/ip_fragment.c | ptr = skb->data + ihl; |
skb | 559 | net/ipv4/ip_fragment.c | frag_kfree_skb(tmp->skb,FREE_READ); |
skb | 569 | net/ipv4/ip_fragment.c | tfp = ip_frag_create(offset, end, skb, ptr); |
skb | 577 | net/ipv4/ip_fragment.c | skb->sk = NULL; |
skb | 578 | net/ipv4/ip_fragment.c | frag_kfree_skb(skb, FREE_READ); |
skb | 618 | net/ipv4/ip_fragment.c | void ip_fragment(struct sock *sk, struct sk_buff *skb, struct device *dev, int is_frag) |
skb | 632 | net/ipv4/ip_fragment.c | raw = skb->data; |
skb | 635 | net/ipv4/ip_fragment.c | skb->ip_hdr = iph; |
skb | 637 | net/ipv4/ip_fragment.c | iph = skb->ip_hdr; |
skb | 670 | net/ipv4/ip_fragment.c | icmp_send(skb,ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED,dev->mtu, dev); |
skb | 722 | net/ipv4/ip_fragment.c | skb2->arp = skb->arp; |
skb | 723 | net/ipv4/ip_fragment.c | if(skb->free==0) |
skb | 741 | net/ipv4/ip_fragment.c | skb2->raddr = skb->raddr; /* For rebuild_header - must be here */ |
skb | 771 | net/ipv4/ip_fragment.c | ip_options_fragment(skb); |
skb | 594 | net/ipv4/ip_fw.c | static struct sk_buff *revamp(struct sk_buff *skb, struct device *dev, struct ip_masq *ftp) |
skb | 596 | net/ipv4/ip_fw.c | struct iphdr *iph = skb->h.iph; |
skb | 635 | net/ipv4/ip_fw.c | while (skb->len - ((unsigned char *)data - skb->h.raw) > 18) |
skb | 672 | net/ipv4/ip_fw.c | return skb; |
skb | 711 | net/ipv4/ip_fw.c | return skb; |
skb | 735 | net/ipv4/ip_fw.c | printk("MASQUERADE: resizing needed for %d bytes (%ld)\n",diff, skb->len); |
skb | 737 | net/ipv4/ip_fw.c | skb2 = alloc_skb(MAX_HEADER + skb->len+diff, GFP_ATOMIC); |
skb | 740 | net/ipv4/ip_fw.c | return skb; |
skb | 742 | net/ipv4/ip_fw.c | skb2->free = skb->free; |
skb | 744 | net/ipv4/ip_fw.c | skb_put(skb2,skb->len + diff); |
skb | 745 | net/ipv4/ip_fw.c | skb2->h.raw = skb2->data + (skb->h.raw - skb->data); |
skb | 758 | net/ipv4/ip_fw.c | memcpy(skb2->data, skb->data, (p - (char *)skb->data)); |
skb | 759 | net/ipv4/ip_fw.c | memcpy(&skb2->data[(p - (char *)skb->data)], buf, strlen(buf)); |
skb | 760 | net/ipv4/ip_fw.c | memcpy(&skb2->data[(p - (char *)skb->data) + strlen(buf)], data, |
skb | 761 | net/ipv4/ip_fw.c | skb->len - (data-(char *)skb->data)); |
skb | 767 | net/ipv4/ip_fw.c | iph->tot_len = htons(skb->len + diff); |
skb | 774 | net/ipv4/ip_fw.c | kfree_skb(skb, FREE_WRITE); |
skb | 777 | net/ipv4/ip_fw.c | return skb; |
skb | 792 | net/ipv4/ip_fw.c | struct sk_buff *skb=*skb_ptr; |
skb | 793 | net/ipv4/ip_fw.c | struct iphdr *iph = skb->h.iph; |
skb | 853 | net/ipv4/ip_fw.c | size = skb->len - ((unsigned char *)portptr - skb->h.raw); |
skb | 871 | net/ipv4/ip_fw.c | skb = revamp(*skb_ptr, dev, ms); |
skb | 872 | net/ipv4/ip_fw.c | *skb_ptr = skb; |
skb | 873 | net/ipv4/ip_fw.c | iph = skb->h.iph; |
skb | 875 | net/ipv4/ip_fw.c | size = skb->len - ((unsigned char *)portptr-skb->h.raw); |
skb | 889 | net/ipv4/ip_fw.c | tcp_send_check(th,iph->saddr,iph->daddr,size,skb->sk); |
skb | 1696 | net/ipv4/ip_fw.c | int ipfw_input_check(struct firewall_ops *this, int pf, struct sk_buff *skb, void *phdr) |
skb | 1698 | net/ipv4/ip_fw.c | return ip_fw_chk(phdr, skb->dev, ip_fw_blk_chain, ip_fw_blk_policy, 0); |
skb | 1701 | net/ipv4/ip_fw.c | int ipfw_forward_check(struct firewall_ops *this, int pf, struct sk_buff *skb, void *phdr) |
skb | 1703 | net/ipv4/ip_fw.c | return ip_fw_chk(phdr, skb->dev, ip_fw_fwd_chain, ip_fw_fwd_policy, 0); |
skb | 197 | net/ipv4/ip_input.c | int ip_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *pt) |
skb | 199 | net/ipv4/ip_input.c | struct iphdr *iph = skb->h.iph; |
skb | 222 | net/ipv4/ip_input.c | return ipv6_rcv(skb,dev,pt); |
skb | 231 | net/ipv4/ip_input.c | skb->ip_hdr = iph; |
skb | 246 | net/ipv4/ip_input.c | if (skb->len<sizeof(struct iphdr) || iph->ihl<5 || iph->version != 4 || ip_fast_csum((unsigned char *)iph, iph->ihl) !=0 |
skb | 247 | net/ipv4/ip_input.c | || skb->len < ntohs(iph->tot_len)) |
skb | 250 | net/ipv4/ip_input.c | kfree_skb(skb, FREE_WRITE); |
skb | 260 | net/ipv4/ip_input.c | skb_trim(skb,ntohs(iph->tot_len)); |
skb | 264 | net/ipv4/ip_input.c | skb->ip_summed = 0; |
skb | 265 | net/ipv4/ip_input.c | if (ip_options_compile(NULL, skb)) |
skb | 267 | net/ipv4/ip_input.c | opt = (struct options*)skb->proto_priv; |
skb | 271 | net/ipv4/ip_input.c | kfree_skb(skb, FREE_READ); |
skb | 284 | net/ipv4/ip_input.c | if (iph->daddr != skb->dev->pa_addr && net_alias_has(skb->dev)) |
skb | 285 | net/ipv4/ip_input.c | skb->dev = dev = net_alias_dev_rcv_sel32(skb->dev, AF_INET, iph->saddr, iph->daddr); |
skb | 294 | net/ipv4/ip_input.c | if ((err=call_in_firewall(PF_INET, skb, iph))<FW_ACCEPT) |
skb | 297 | net/ipv4/ip_input.c | icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0, dev); |
skb | 298 | net/ipv4/ip_input.c | kfree_skb(skb, FREE_WRITE); |
skb | 332 | net/ipv4/ip_input.c | if ( iph->daddr == skb->dev->pa_addr || (brd = ip_chk_addr(iph->daddr)) != 0) |
skb | 340 | net/ipv4/ip_input.c | if (brd != IS_MYADDR || skb->pkt_type != PACKET_HOST) |
skb | 342 | net/ipv4/ip_input.c | kfree_skb(skb, FREE_WRITE); |
skb | 354 | net/ipv4/ip_input.c | icmp_send(skb, ICMP_PARAMETERPROB, 0, opt->srr+2, |
skb | 355 | net/ipv4/ip_input.c | skb->dev); |
skb | 356 | net/ipv4/ip_input.c | kfree_skb(skb, FREE_WRITE); |
skb | 370 | net/ipv4/ip_input.c | kfree_skb(skb, FREE_WRITE); |
skb | 380 | net/ipv4/ip_input.c | if (ip_forward(skb, dev, is_frag, nexthop)) |
skb | 381 | net/ipv4/ip_input.c | kfree_skb(skb, FREE_WRITE); |
skb | 384 | net/ipv4/ip_input.c | kfree_skb(skb, FREE_WRITE); |
skb | 401 | net/ipv4/ip_input.c | kfree_skb(skb, FREE_WRITE); |
skb | 416 | net/ipv4/ip_input.c | if (ip_fw_demasquerade(skb)) |
skb | 418 | net/ipv4/ip_input.c | struct iphdr *iph=skb->h.iph; |
skb | 419 | net/ipv4/ip_input.c | if (ip_forward(skb, dev, is_frag|4, iph->daddr)) |
skb | 420 | net/ipv4/ip_input.c | kfree_skb(skb, FREE_WRITE); |
skb | 440 | net/ipv4/ip_input.c | skb=ip_defrag(iph,skb,dev); |
skb | 441 | net/ipv4/ip_input.c | if(skb==NULL) |
skb | 443 | net/ipv4/ip_input.c | skb->dev = dev; |
skb | 444 | net/ipv4/ip_input.c | iph=skb->h.iph; |
skb | 451 | net/ipv4/ip_input.c | skb->ip_hdr = iph; |
skb | 452 | net/ipv4/ip_input.c | skb->h.raw += iph->ihl*4; |
skb | 487 | net/ipv4/ip_input.c | skb1=skb_clone(skb, GFP_ATOMIC); |
skb | 530 | net/ipv4/ip_input.c | skb2 = skb_clone(skb, GFP_ATOMIC); |
skb | 536 | net/ipv4/ip_input.c | skb2 = skb; |
skb | 569 | net/ipv4/ip_input.c | ipmr_forward(skb, is_frag); |
skb | 572 | net/ipv4/ip_input.c | struct sk_buff *skb2=skb_clone(skb, GFP_ATOMIC); |
skb | 583 | net/ipv4/ip_input.c | raw_rcv(raw_sk, skb, dev, iph->saddr, daddr); |
skb | 587 | net/ipv4/ip_input.c | icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PROT_UNREACH, 0, dev); |
skb | 588 | net/ipv4/ip_input.c | kfree_skb(skb, FREE_WRITE); |
skb | 602 | net/ipv4/ip_input.c | if(skb->pkt_type!=PACKET_HOST || brd==IS_BROADCAST) |
skb | 604 | net/ipv4/ip_input.c | kfree_skb(skb,FREE_WRITE); |
skb | 615 | net/ipv4/ip_input.c | icmp_send(skb, ICMP_PARAMETERPROB, 0, 16, skb->dev); |
skb | 616 | net/ipv4/ip_input.c | kfree_skb(skb, FREE_WRITE); |
skb | 619 | net/ipv4/ip_input.c | if (ip_forward(skb, dev, is_frag, iph->daddr)) |
skb | 620 | net/ipv4/ip_input.c | kfree_skb(skb, FREE_WRITE); |
skb | 625 | net/ipv4/ip_input.c | kfree_skb(skb, FREE_WRITE); |
skb | 29 | net/ipv4/ip_options.c | void ip_options_build(struct sk_buff * skb, struct options * opt, |
skb | 33 | net/ipv4/ip_options.c | unsigned char * iph = (unsigned char*)skb->ip_hdr; |
skb | 35 | net/ipv4/ip_options.c | memcpy(skb->proto_priv, opt, sizeof(struct options)); |
skb | 37 | net/ipv4/ip_options.c | opt = (struct options*)skb->proto_priv; |
skb | 75 | net/ipv4/ip_options.c | struct sk_buff * skb) |
skb | 86 | net/ipv4/ip_options.c | sopt = (struct options*)skb->proto_priv; |
skb | 95 | net/ipv4/ip_options.c | (unsigned char *)skb->ip_hdr); |
skb | 194 | net/ipv4/ip_options.c | void ip_options_fragment(struct sk_buff * skb) |
skb | 196 | net/ipv4/ip_options.c | unsigned char * optptr = (unsigned char*)skb->ip_hdr; |
skb | 197 | net/ipv4/ip_options.c | struct options * opt = (struct options*)skb->proto_priv; |
skb | 234 | net/ipv4/ip_options.c | int ip_options_compile(struct options * opt, struct sk_buff * skb) |
skb | 244 | net/ipv4/ip_options.c | opt = (struct options*)skb->proto_priv; |
skb | 246 | net/ipv4/ip_options.c | iph = (unsigned char*)skb->ip_hdr; |
skb | 253 | net/ipv4/ip_options.c | optptr = opt->is_data ? opt->__data : (unsigned char*)&skb->ip_hdr[1]; |
skb | 302 | net/ipv4/ip_options.c | if (!skb) |
skb | 339 | net/ipv4/ip_options.c | if (skb) |
skb | 341 | net/ipv4/ip_options.c | memcpy(&optptr[optptr[2]-1], &skb->dev->pa_addr, 4); |
skb | 378 | net/ipv4/ip_options.c | if (skb) |
skb | 390 | net/ipv4/ip_options.c | if (skb) |
skb | 392 | net/ipv4/ip_options.c | memcpy(&optptr[ts->ptr-1], &skb->dev->pa_addr, 4); |
skb | 411 | net/ipv4/ip_options.c | if (skb) |
skb | 441 | net/ipv4/ip_options.c | if (skb) |
skb | 451 | net/ipv4/ip_options.c | if (!skb) |
skb | 466 | net/ipv4/ip_options.c | if (skb) |
skb | 468 | net/ipv4/ip_options.c | icmp_send(skb, ICMP_PARAMETERPROB, 0, pp_ptr-iph, skb->dev); |
skb | 469 | net/ipv4/ip_options.c | kfree_skb(skb, FREE_READ); |
skb | 67 | net/ipv4/ip_output.c | static void ip_loopback(struct device *old_dev, struct sk_buff *skb) |
skb | 70 | net/ipv4/ip_output.c | int len=ntohs(skb->ip_hdr->tot_len); |
skb | 79 | net/ipv4/ip_output.c | newskb->saddr=skb->saddr; |
skb | 80 | net/ipv4/ip_output.c | newskb->daddr=skb->daddr; |
skb | 81 | net/ipv4/ip_output.c | newskb->raddr=skb->raddr; |
skb | 85 | net/ipv4/ip_output.c | newskb->pkt_type=skb->pkt_type; |
skb | 90 | net/ipv4/ip_output.c | ip_send(NULL,newskb, skb->ip_hdr->daddr, len, dev, skb->ip_hdr->saddr); |
skb | 95 | net/ipv4/ip_output.c | memcpy(newskb->proto_priv, skb->proto_priv, sizeof(skb->proto_priv)); |
skb | 100 | net/ipv4/ip_output.c | memcpy(newskb->ip_hdr,skb->ip_hdr,len); |
skb | 114 | net/ipv4/ip_output.c | int ip_send(struct rtable * rt, struct sk_buff *skb, __u32 daddr, int len, struct device *dev, __u32 saddr) |
skb | 118 | net/ipv4/ip_output.c | skb->dev = dev; |
skb | 119 | net/ipv4/ip_output.c | skb->arp = 1; |
skb | 120 | net/ipv4/ip_output.c | skb->protocol = htons(ETH_P_IP); |
skb | 127 | net/ipv4/ip_output.c | skb_reserve(skb,(dev->hard_header_len+15)&~15); /* 16 byte aligned IP headers are good */ |
skb | 130 | net/ipv4/ip_output.c | memcpy(skb_push(skb,dev->hard_header_len),rt->rt_hh->hh_data,dev->hard_header_len); |
skb | 136 | net/ipv4/ip_output.c | skb->arp = 0; |
skb | 137 | net/ipv4/ip_output.c | skb->raddr = daddr; |
skb | 140 | net/ipv4/ip_output.c | mac = dev->hard_header(skb, dev, ETH_P_IP, NULL, NULL, len); |
skb | 144 | net/ipv4/ip_output.c | skb->arp = 0; |
skb | 145 | net/ipv4/ip_output.c | skb->raddr = daddr; /* next routing address */ |
skb | 151 | net/ipv4/ip_output.c | static int ip_send_room(struct rtable * rt, struct sk_buff *skb, __u32 daddr, int len, struct device *dev, __u32 saddr) |
skb | 155 | net/ipv4/ip_output.c | skb->dev = dev; |
skb | 156 | net/ipv4/ip_output.c | skb->arp = 1; |
skb | 157 | net/ipv4/ip_output.c | skb->protocol = htons(ETH_P_IP); |
skb | 160 | net/ipv4/ip_output.c | skb_reserve(skb,MAX_HEADER); |
skb | 163 | net/ipv4/ip_output.c | memcpy(skb_push(skb,dev->hard_header_len),rt->rt_hh->hh_data,dev->hard_header_len); |
skb | 169 | net/ipv4/ip_output.c | skb->arp = 0; |
skb | 170 | net/ipv4/ip_output.c | skb->raddr = daddr; |
skb | 173 | net/ipv4/ip_output.c | mac = dev->hard_header(skb, dev, ETH_P_IP, NULL, NULL, len); |
skb | 177 | net/ipv4/ip_output.c | skb->arp = 0; |
skb | 178 | net/ipv4/ip_output.c | skb->raddr = daddr; /* next routing address */ |
skb | 192 | net/ipv4/ip_output.c | int ip_build_header(struct sk_buff *skb, __u32 saddr, __u32 daddr, |
skb | 211 | net/ipv4/ip_output.c | if(MULTICAST(daddr) && *dev==NULL && skb->sk && *skb->sk->ip_mc_name) |
skb | 212 | net/ipv4/ip_output.c | *dev=dev_get(skb->sk->ip_mc_name); |
skb | 216 | net/ipv4/ip_output.c | rt = ip_check_route(rp, daddr, skb->localroute); |
skb | 225 | net/ipv4/ip_output.c | rt = ip_rt_route(daddr, skb->localroute); |
skb | 263 | net/ipv4/ip_output.c | tmp = ip_send_room(rt, skb, raddr, len, *dev, saddr); |
skb | 265 | net/ipv4/ip_output.c | tmp = ip_send(rt, skb, raddr, len, *dev, saddr); |
skb | 273 | net/ipv4/ip_output.c | skb->dev = *dev; |
skb | 274 | net/ipv4/ip_output.c | skb->saddr = saddr; |
skb | 293 | net/ipv4/ip_output.c | iph=(struct iphdr *)skb_put(skb,sizeof(struct iphdr) + opt->optlen); |
skb | 295 | net/ipv4/ip_output.c | iph=(struct iphdr *)skb_put(skb,sizeof(struct iphdr)); |
skb | 305 | net/ipv4/ip_output.c | skb->ip_hdr = iph; |
skb | 310 | net/ipv4/ip_output.c | ip_options_build(skb, opt, final_daddr, (*dev)->pa_addr, 0); |
skb | 335 | net/ipv4/ip_output.c | struct sk_buff *skb, int free) |
skb | 347 | net/ipv4/ip_output.c | IS_SKB(skb); |
skb | 354 | net/ipv4/ip_output.c | skb->dev = dev; |
skb | 355 | net/ipv4/ip_output.c | skb->when = jiffies; |
skb | 365 | net/ipv4/ip_output.c | iph = skb->ip_hdr; |
skb | 366 | net/ipv4/ip_output.c | iph->tot_len = ntohs(skb->len-(((unsigned char *)iph)-skb->data)); |
skb | 369 | net/ipv4/ip_output.c | if(call_out_firewall(PF_INET, skb, iph) < FW_ACCEPT) |
skb | 387 | net/ipv4/ip_output.c | skb->free = free; |
skb | 397 | net/ipv4/ip_output.c | ip_fragment(sk,skb,dev,0); |
skb | 398 | net/ipv4/ip_output.c | IS_SKB(skb); |
skb | 399 | net/ipv4/ip_output.c | kfree_skb(skb,FREE_WRITE); |
skb | 417 | net/ipv4/ip_output.c | if (skb->next != NULL) |
skb | 420 | net/ipv4/ip_output.c | skb_unlink(skb); |
skb | 441 | net/ipv4/ip_output.c | if (skb->link3 != NULL) |
skb | 444 | net/ipv4/ip_output.c | skb->link3 = NULL; |
skb | 448 | net/ipv4/ip_output.c | sk->send_tail = skb; |
skb | 449 | net/ipv4/ip_output.c | sk->send_head = skb; |
skb | 453 | net/ipv4/ip_output.c | sk->send_tail->link3 = skb; |
skb | 454 | net/ipv4/ip_output.c | sk->send_tail = skb; |
skb | 463 | net/ipv4/ip_output.c | skb->sk = sk; |
skb | 486 | net/ipv4/ip_output.c | ip_loopback(dev,skb); |
skb | 495 | net/ipv4/ip_output.c | ip_loopback(dev,skb); |
skb | 504 | net/ipv4/ip_output.c | if(skb->ip_hdr->ttl==0) |
skb | 506 | net/ipv4/ip_output.c | kfree_skb(skb, FREE_READ); |
skb | 512 | net/ipv4/ip_output.c | ip_loopback(dev,skb); |
skb | 523 | net/ipv4/ip_output.c | dev_queue_xmit(skb, dev, sk->priority); |
skb | 527 | net/ipv4/ip_output.c | dev_queue_xmit(skb, dev, SOPRI_NORMAL); |
skb | 536 | net/ipv4/ip_output.c | kfree_skb(skb, FREE_WRITE); |
skb | 649 | net/ipv4/ip_output.c | struct sk_buff *skb=sock_alloc_send_skb(sk, length+15+dev->hard_header_len,0, noblock, &error); |
skb | 650 | net/ipv4/ip_output.c | if(skb==NULL) |
skb | 655 | net/ipv4/ip_output.c | skb->dev=dev; |
skb | 656 | net/ipv4/ip_output.c | skb->protocol = htons(ETH_P_IP); |
skb | 657 | net/ipv4/ip_output.c | skb->free=1; |
skb | 658 | net/ipv4/ip_output.c | skb->when=jiffies; |
skb | 659 | net/ipv4/ip_output.c | skb->sk=sk; |
skb | 660 | net/ipv4/ip_output.c | skb->arp=0; |
skb | 661 | net/ipv4/ip_output.c | skb->saddr=saddr; |
skb | 662 | net/ipv4/ip_output.c | skb->raddr = raddr; |
skb | 663 | net/ipv4/ip_output.c | skb_reserve(skb,(dev->hard_header_len+15)&~15); |
skb | 666 | net/ipv4/ip_output.c | skb->arp=1; |
skb | 667 | net/ipv4/ip_output.c | memcpy(skb_push(skb,dev->hard_header_len),hh->hh_data,dev->hard_header_len); |
skb | 670 | net/ipv4/ip_output.c | skb->arp = 0; |
skb | 678 | net/ipv4/ip_output.c | if(dev->hard_header(skb,dev,ETH_P_IP,NULL,NULL,0)>0) |
skb | 679 | net/ipv4/ip_output.c | skb->arp=1; |
skb | 682 | net/ipv4/ip_output.c | skb->arp=1; |
skb | 683 | net/ipv4/ip_output.c | skb->ip_hdr=iph=(struct iphdr *)skb_put(skb,length); |
skb | 700 | net/ipv4/ip_output.c | ip_options_build(skb, opt, |
skb | 711 | net/ipv4/ip_output.c | if(call_out_firewall(PF_INET, skb, iph)< FW_ACCEPT) |
skb | 713 | net/ipv4/ip_output.c | kfree_skb(skb, FREE_WRITE); |
skb | 718 | net/ipv4/ip_output.c | ip_fw_chk((void *)skb->data,dev,ip_acct_chain, IP_FW_F_ACCEPT,1); |
skb | 721 | net/ipv4/ip_output.c | dev_queue_xmit(skb,dev,sk->priority); |
skb | 725 | net/ipv4/ip_output.c | kfree_skb(skb, FREE_WRITE); |
skb | 800 | net/ipv4/ip_output.c | struct sk_buff * skb; |
skb | 808 | net/ipv4/ip_output.c | skb = sock_alloc_send_skb(sk, fraglen+15, 0, noblock, &error); |
skb | 809 | net/ipv4/ip_output.c | if (skb == NULL) |
skb | 822 | net/ipv4/ip_output.c | skb->dev = dev; |
skb | 823 | net/ipv4/ip_output.c | skb->protocol = htons(ETH_P_IP); |
skb | 824 | net/ipv4/ip_output.c | skb->when = jiffies; |
skb | 825 | net/ipv4/ip_output.c | skb->free = 1; /* dubious, this one */ |
skb | 826 | net/ipv4/ip_output.c | skb->sk = sk; |
skb | 827 | net/ipv4/ip_output.c | skb->arp = 0; |
skb | 828 | net/ipv4/ip_output.c | skb->saddr = saddr; |
skb | 829 | net/ipv4/ip_output.c | skb->raddr = raddr; |
skb | 830 | net/ipv4/ip_output.c | skb_reserve(skb,(dev->hard_header_len+15)&~15); |
skb | 831 | net/ipv4/ip_output.c | data = skb_put(skb, fraglen-dev->hard_header_len); |
skb | 842 | net/ipv4/ip_output.c | skb->arp=1; |
skb | 843 | net/ipv4/ip_output.c | memcpy(skb_push(skb,dev->hard_header_len),hh->hh_data,dev->hard_header_len); |
skb | 846 | net/ipv4/ip_output.c | skb->arp = 0; |
skb | 854 | net/ipv4/ip_output.c | if(dev->hard_header(skb, dev, ETH_P_IP, |
skb | 856 | net/ipv4/ip_output.c | skb->arp=1; |
skb | 863 | net/ipv4/ip_output.c | skb->ip_hdr = iph = (struct iphdr *)data; |
skb | 876 | net/ipv4/ip_output.c | ip_options_build(skb, opt, |
skb | 915 | net/ipv4/ip_output.c | if(!offset && call_out_firewall(PF_INET, skb, iph) < FW_ACCEPT) |
skb | 917 | net/ipv4/ip_output.c | kfree_skb(skb, FREE_WRITE); |
skb | 946 | net/ipv4/ip_output.c | if(skb->daddr==IGMP_ALL_HOSTS || (dev->flags&IFF_ALLMULTI)) |
skb | 947 | net/ipv4/ip_output.c | ip_loopback(dev,skb); |
skb | 955 | net/ipv4/ip_output.c | ip_loopback(dev,skb); |
skb | 968 | net/ipv4/ip_output.c | if(skb->ip_hdr->ttl==0) |
skb | 969 | net/ipv4/ip_output.c | kfree_skb(skb, FREE_READ); |
skb | 980 | net/ipv4/ip_output.c | ip_loopback(dev,skb); |
skb | 988 | net/ipv4/ip_output.c | dev_queue_xmit(skb, dev, sk->priority); |
skb | 999 | net/ipv4/ip_output.c | kfree_skb(skb, FREE_WRITE); |
skb | 1038 | net/ipv4/ip_output.c | struct sk_buff *skb=alloc_skb(sizeof(struct netlink_rtinfo), GFP_ATOMIC); |
skb | 1041 | net/ipv4/ip_output.c | if(skb==NULL) |
skb | 1043 | net/ipv4/ip_output.c | nrt=(struct netlink_rtinfo *)skb_put(skb, sizeof(struct netlink_rtinfo)); |
skb | 1057 | net/ipv4/ip_output.c | netlink_post(NETLINK_ROUTE, skb); |
skb | 52 | net/ipv4/ipip.c | int ipip_rcv(struct sk_buff *skb, struct device *dev, struct options *opt, |
skb | 68 | net/ipv4/ipip.c | skb_pull(skb, ((struct iphdr *)skb->data)->ihl<<2); |
skb | 74 | net/ipv4/ipip.c | skb->h.iph=(struct iphdr *)skb->data; |
skb | 75 | net/ipv4/ipip.c | skb->ip_hdr=(struct iphdr *)skb->data; |
skb | 76 | net/ipv4/ipip.c | memset(skb->proto_priv, 0, sizeof(struct options)); |
skb | 77 | net/ipv4/ipip.c | if (skb->ip_hdr->ihl > 5) |
skb | 79 | net/ipv4/ipip.c | if (ip_options_compile(NULL, skb)) |
skb | 88 | net/ipv4/ipip.c | if((err=call_in_firewall(PF_INET, skb, skb->ip_hdr))<FW_ACCEPT) |
skb | 91 | net/ipv4/ipip.c | icmp_send(skb,ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0 , dev); |
skb | 92 | net/ipv4/ipip.c | kfree_skb(skb, FREE_READ); |
skb | 108 | net/ipv4/ipip.c | if(ip_forward(skb, dev, 0, daddr)) |
skb | 109 | net/ipv4/ipip.c | kfree_skb(skb, FREE_READ); |
skb | 98 | net/ipv4/ipmr.c | struct sk_buff *skb; |
skb | 139 | net/ipv4/ipmr.c | while((skb=skb_dequeue(&cache->mfc_unresolved))) |
skb | 140 | net/ipv4/ipmr.c | kfree_skb(skb, FREE_WRITE); |
skb | 215 | net/ipv4/ipmr.c | struct sk_buff *skb; |
skb | 243 | net/ipv4/ipmr.c | while((skb=skb_dequeue(&cache->mfc_unresolved))) |
skb | 244 | net/ipv4/ipmr.c | ipmr_forward(skb, skb->protocol); |
skb | 254 | net/ipv4/ipmr.c | struct sk_buff *skb=alloc_skb(128, GFP_ATOMIC); |
skb | 257 | net/ipv4/ipmr.c | if(!skb) |
skb | 260 | net/ipv4/ipmr.c | skb->free=1; |
skb | 266 | net/ipv4/ipmr.c | skb->ip_hdr=(struct iphdr *)skb_put(skb,ihl); |
skb | 267 | net/ipv4/ipmr.c | skb->h.iph=skb->ip_hdr; |
skb | 268 | net/ipv4/ipmr.c | memcpy(skb->data,pkt->data,ihl); |
skb | 269 | net/ipv4/ipmr.c | skb->ip_hdr->protocol = 0; /* Flag to the kernel this is a route add */ |
skb | 275 | net/ipv4/ipmr.c | igmp=(struct igmphdr *)skb_put(skb,sizeof(struct igmphdr)); |
skb | 278 | net/ipv4/ipmr.c | skb->ip_hdr->tot_len=htons(skb->len); /* Fix the length */ |
skb | 283 | net/ipv4/ipmr.c | if(sock_queue_rcv_skb(mroute_socket,skb)<0) |
skb | 285 | net/ipv4/ipmr.c | skb->sk=NULL; |
skb | 286 | net/ipv4/ipmr.c | kfree_skb(skb, FREE_READ); |
skb | 295 | net/ipv4/ipmr.c | static void ipmr_cache_unresolved(struct mfc_cache *cache, vifi_t vifi, struct sk_buff *skb, int is_frag) |
skb | 304 | net/ipv4/ipmr.c | kfree_skb(skb, FREE_WRITE); |
skb | 311 | net/ipv4/ipmr.c | cache->mfc_origin=skb->ip_hdr->saddr; |
skb | 312 | net/ipv4/ipmr.c | cache->mfc_mcastgrp=skb->ip_hdr->daddr; |
skb | 329 | net/ipv4/ipmr.c | ipmr_cache_report(skb); |
skb | 336 | net/ipv4/ipmr.c | kfree_skb(skb, FREE_WRITE); |
skb | 344 | net/ipv4/ipmr.c | skb->protocol=is_frag; |
skb | 345 | net/ipv4/ipmr.c | skb_queue_tail(&cache->mfc_unresolved,skb); |
skb | 701 | net/ipv4/ipmr.c | static void ipmr_queue_xmit(struct sk_buff *skb, struct vif_device *vif, struct device *in_dev, int frag) |
skb | 704 | net/ipv4/ipmr.c | __u32 raddr=skb->raddr; |
skb | 711 | net/ipv4/ipmr.c | vif->bytes_out+=skb->len; |
skb | 712 | net/ipv4/ipmr.c | skb->dev=vif->dev; |
skb | 713 | net/ipv4/ipmr.c | skb->raddr=skb->h.iph->daddr; |
skb | 714 | net/ipv4/ipmr.c | if(ip_forward(skb, in_dev, frag|8|tunnel, raddr)==-1) |
skb | 715 | net/ipv4/ipmr.c | kfree_skb(skb, FREE_WRITE); |
skb | 722 | net/ipv4/ipmr.c | void ipmr_forward(struct sk_buff *skb, int is_frag) |
skb | 727 | net/ipv4/ipmr.c | int vif=ipmr_vifi_find(skb->dev); |
skb | 730 | net/ipv4/ipmr.c | kfree_skb(skb, FREE_WRITE); |
skb | 735 | net/ipv4/ipmr.c | vif_table[vif].bytes_in+=skb->len; |
skb | 737 | net/ipv4/ipmr.c | cache=ipmr_cache_find(skb->ip_hdr->saddr,skb->ip_hdr->daddr); |
skb | 744 | net/ipv4/ipmr.c | ipmr_cache_unresolved(cache,vif,skb, is_frag); |
skb | 756 | net/ipv4/ipmr.c | if(skb->ip_hdr->ttl > cache->mfc_ttls[ct] && cache->mfc_ttls[ct]>0) |
skb | 760 | net/ipv4/ipmr.c | skb2=skb_clone(skb, GFP_ATOMIC); |
skb | 764 | net/ipv4/ipmr.c | ipmr_queue_xmit(skb2, &vif_table[psend], skb->dev, is_frag); |
skb | 772 | net/ipv4/ipmr.c | kfree_skb(skb, FREE_WRITE); |
skb | 775 | net/ipv4/ipmr.c | ipmr_queue_xmit(skb, &vif_table[psend], skb->dev, is_frag); |
skb | 74 | net/ipv4/packet.c | int packet_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *pt) |
skb | 90 | net/ipv4/packet.c | skb_push(skb,skb->data-skb->mac.raw); |
skb | 96 | net/ipv4/packet.c | skb->dev = dev; |
skb | 103 | net/ipv4/packet.c | if(sock_queue_rcv_skb(sk,skb)<0) |
skb | 105 | net/ipv4/packet.c | skb->sk = NULL; |
skb | 106 | net/ipv4/packet.c | kfree_skb(skb, FREE_READ); |
skb | 126 | net/ipv4/packet.c | struct sk_buff *skb; |
skb | 168 | net/ipv4/packet.c | skb = sock_wmalloc(sk, len, 0, GFP_KERNEL); |
skb | 176 | net/ipv4/packet.c | if (skb == NULL) |
skb | 185 | net/ipv4/packet.c | skb->sk = sk; |
skb | 186 | net/ipv4/packet.c | skb->free = 1; |
skb | 187 | net/ipv4/packet.c | memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len); |
skb | 188 | net/ipv4/packet.c | skb->arp = 1; /* No ARP needs doing on this (complete) frame */ |
skb | 195 | net/ipv4/packet.c | dev_queue_xmit(skb, dev, sk->priority); |
skb | 197 | net/ipv4/packet.c | kfree_skb(skb, FREE_WRITE); |
skb | 383 | net/ipv4/packet.c | struct sk_buff *skb; |
skb | 411 | net/ipv4/packet.c | skb=skb_recv_datagram(sk,flags,noblock,&err); |
skb | 419 | net/ipv4/packet.c | if(skb==NULL) |
skb | 427 | net/ipv4/packet.c | copied = min(len, skb->len); |
skb | 429 | net/ipv4/packet.c | memcpy_toiovec(msg->msg_iov, skb->data, copied); /* We can't use skb_copy_datagram here */ |
skb | 430 | net/ipv4/packet.c | sk->stamp=skb->stamp; |
skb | 438 | net/ipv4/packet.c | saddr->sa_family = skb->dev->type; |
skb | 439 | net/ipv4/packet.c | memcpy(saddr->sa_data,skb->dev->name, 14); |
skb | 447 | net/ipv4/packet.c | skb_free_datagram(skb); |
skb | 193 | net/ipv4/rarp.c | static int rarp_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *pt) |
skb | 198 | net/ipv4/rarp.c | struct arphdr *rarp = (struct arphdr *) skb->data; |
skb | 199 | net/ipv4/rarp.c | unsigned char *rarp_ptr = skb_pull(skb,sizeof(struct arphdr)); |
skb | 211 | net/ipv4/rarp.c | kfree_skb(skb, FREE_READ); |
skb | 220 | net/ipv4/rarp.c | kfree_skb(skb, FREE_READ); |
skb | 238 | net/ipv4/rarp.c | kfree_skb(skb, FREE_READ); |
skb | 274 | net/ipv4/rarp.c | kfree_skb(skb, FREE_READ); |
skb | 119 | net/ipv4/raw.c | int raw_rcv(struct sock *sk, struct sk_buff *skb, struct device *dev, __u32 saddr, __u32 daddr) |
skb | 122 | net/ipv4/raw.c | skb->sk = sk; |
skb | 123 | net/ipv4/raw.c | skb_trim(skb,ntohs(skb->ip_hdr->tot_len)); |
skb | 125 | net/ipv4/raw.c | skb->h.raw = (unsigned char *) skb->ip_hdr; |
skb | 126 | net/ipv4/raw.c | skb->dev = dev; |
skb | 127 | net/ipv4/raw.c | skb->saddr = daddr; |
skb | 128 | net/ipv4/raw.c | skb->daddr = saddr; |
skb | 137 | net/ipv4/raw.c | skb->ip_hdr->tot_len=ntohs(skb->ip_hdr->tot_len-4*skb->ip_hdr->ihl); |
skb | 142 | net/ipv4/raw.c | if(sock_queue_rcv_skb(sk,skb)<0) |
skb | 145 | net/ipv4/raw.c | skb->sk=NULL; |
skb | 146 | net/ipv4/raw.c | kfree_skb(skb, FREE_READ); |
skb | 317 | net/ipv4/raw.c | struct sk_buff *skb; |
skb | 330 | net/ipv4/raw.c | skb=skb_recv_datagram(sk,flags,noblock,&err); |
skb | 331 | net/ipv4/raw.c | if(skb==NULL) |
skb | 334 | net/ipv4/raw.c | copied = min(len, skb->len); |
skb | 336 | net/ipv4/raw.c | skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); |
skb | 337 | net/ipv4/raw.c | sk->stamp=skb->stamp; |
skb | 343 | net/ipv4/raw.c | sin->sin_addr.s_addr = skb->daddr; |
skb | 345 | net/ipv4/raw.c | skb_free_datagram(skb); |
skb | 584 | net/ipv4/tcp.c | struct sk_buff *skb; |
skb | 588 | net/ipv4/tcp.c | skb=tcp_find_established(s); |
skb | 589 | net/ipv4/tcp.c | if(skb!=NULL) |
skb | 590 | net/ipv4/tcp.c | skb_unlink(skb); /* Take it off the queue */ |
skb | 592 | net/ipv4/tcp.c | return skb; |
skb | 603 | net/ipv4/tcp.c | struct sk_buff *skb; |
skb | 605 | net/ipv4/tcp.c | while ((skb = skb_dequeue(&sk->receive_queue)) != NULL) |
skb | 607 | net/ipv4/tcp.c | skb->sk->dead=1; |
skb | 608 | net/ipv4/tcp.c | tcp_close(skb->sk, 0); |
skb | 609 | net/ipv4/tcp.c | kfree_skb(skb, FREE_READ); |
skb | 634 | net/ipv4/tcp.c | struct sk_buff * skb; |
skb | 641 | net/ipv4/tcp.c | skb = sk->send_head; |
skb | 643 | net/ipv4/tcp.c | while (skb != NULL) |
skb | 649 | net/ipv4/tcp.c | dev = skb->dev; |
skb | 650 | net/ipv4/tcp.c | IS_SKB(skb); |
skb | 651 | net/ipv4/tcp.c | skb->when = jiffies; |
skb | 657 | net/ipv4/tcp.c | skb_pull(skb,((unsigned char *)skb->ip_hdr)-skb->data); |
skb | 668 | net/ipv4/tcp.c | iph = (struct iphdr *)skb->data; |
skb | 684 | net/ipv4/tcp.c | struct options * opt = (struct options*)skb->proto_priv; |
skb | 685 | net/ipv4/tcp.c | rt = ip_check_route(&sk->ip_route_cache, opt->srr?opt->faddr:iph->daddr, skb->localroute); |
skb | 697 | net/ipv4/tcp.c | if(skb->sk) |
skb | 699 | net/ipv4/tcp.c | skb->sk->err_soft=ENETUNREACH; |
skb | 700 | net/ipv4/tcp.c | skb->sk->error_report(skb->sk); |
skb | 706 | net/ipv4/tcp.c | skb->raddr=rt->rt_gateway; |
skb | 707 | net/ipv4/tcp.c | skb->dev=dev; |
skb | 708 | net/ipv4/tcp.c | skb->arp=1; |
skb | 711 | net/ipv4/tcp.c | memcpy(skb_push(skb,dev->hard_header_len),rt->rt_hh->hh_data,dev->hard_header_len); |
skb | 714 | net/ipv4/tcp.c | skb->arp = 0; |
skb | 722 | net/ipv4/tcp.c | if(dev->hard_header(skb, dev, ETH_P_IP, NULL, NULL, skb->len)<0) |
skb | 723 | net/ipv4/tcp.c | skb->arp=0; |
skb | 755 | net/ipv4/tcp.c | if (sk && !skb_device_locked(skb)) |
skb | 758 | net/ipv4/tcp.c | skb_unlink(skb); |
skb | 761 | net/ipv4/tcp.c | dev_queue_xmit(skb, dev, sk->priority); |
skb | 789 | net/ipv4/tcp.c | skb = skb->link3; |
skb | 1004 | net/ipv4/tcp.c | struct sk_buff *skb; |
skb | 1009 | net/ipv4/tcp.c | skb = sk->send_head; |
skb | 1010 | net/ipv4/tcp.c | if (!skb) |
skb | 1020 | net/ipv4/tcp.c | if (jiffies < skb->when + sk->rto) |
skb | 1022 | net/ipv4/tcp.c | reset_xmit_timer (sk, TIME_WRITE, skb->when + sk->rto - jiffies); |
skb | 1159 | net/ipv4/tcp.c | struct sk_buff *skb; |
skb | 1168 | net/ipv4/tcp.c | if (sk == NULL || (skb = skb_peek(&sk->receive_queue)) == NULL) |
skb | 1185 | net/ipv4/tcp.c | if (before(counted, skb->seq)) /* Found a hole so stops here */ |
skb | 1187 | net/ipv4/tcp.c | sum = skb->len - (counted - skb->seq); /* Length - header but start from where we are up to (avoid overlaps) */ |
skb | 1188 | net/ipv4/tcp.c | if (skb->h.th->syn) |
skb | 1193 | net/ipv4/tcp.c | if (skb->h.th->syn) |
skb | 1213 | net/ipv4/tcp.c | if (skb->h.th->urg) |
skb | 1215 | net/ipv4/tcp.c | if (amount && skb->h.th->psh) break; |
skb | 1216 | net/ipv4/tcp.c | skb = skb->next; |
skb | 1218 | net/ipv4/tcp.c | while(skb != (struct sk_buff *)&sk->receive_queue); |
skb | 1383 | net/ipv4/tcp.c | static void tcp_send_skb(struct sock *sk, struct sk_buff *skb) |
skb | 1386 | net/ipv4/tcp.c | struct tcphdr * th = skb->h.th; |
skb | 1392 | net/ipv4/tcp.c | size = skb->len - ((unsigned char *) th - skb->data); |
skb | 1398 | net/ipv4/tcp.c | if (size < sizeof(struct tcphdr) || size > skb->len) |
skb | 1401 | net/ipv4/tcp.c | skb, skb->data, th, skb->len); |
skb | 1402 | net/ipv4/tcp.c | kfree_skb(skb, FREE_WRITE); |
skb | 1417 | net/ipv4/tcp.c | kfree_skb(skb,FREE_WRITE); |
skb | 1427 | net/ipv4/tcp.c | skb->seq = ntohl(th->seq); |
skb | 1428 | net/ipv4/tcp.c | skb->end_seq = skb->seq + size - 4*th->doff; |
skb | 1438 | net/ipv4/tcp.c | if (after(skb->end_seq, sk->window_seq) || |
skb | 1445 | net/ipv4/tcp.c | if (skb->next != NULL) |
skb | 1448 | net/ipv4/tcp.c | skb_unlink(skb); |
skb | 1450 | net/ipv4/tcp.c | skb_queue_tail(&sk->write_queue, skb); |
skb | 1482 | net/ipv4/tcp.c | sk->prot->queue_xmit(sk, skb->dev, skb, 0); |
skb | 1505 | net/ipv4/tcp.c | struct sk_buff * skb; |
skb | 1510 | net/ipv4/tcp.c | skb = sk->partial; |
skb | 1511 | net/ipv4/tcp.c | if (skb) { |
skb | 1516 | net/ipv4/tcp.c | return skb; |
skb | 1525 | net/ipv4/tcp.c | struct sk_buff *skb; |
skb | 1529 | net/ipv4/tcp.c | while ((skb = tcp_dequeue_partial(sk)) != NULL) |
skb | 1530 | net/ipv4/tcp.c | tcp_send_skb(sk, skb); |
skb | 1537 | net/ipv4/tcp.c | void tcp_enqueue_partial(struct sk_buff * skb, struct sock * sk) |
skb | 1547 | net/ipv4/tcp.c | sk->partial = skb; |
skb | 1714 | net/ipv4/tcp.c | struct sk_buff *skb; |
skb | 1866 | net/ipv4/tcp.c | if ((skb = tcp_dequeue_partial(sk)) != NULL) |
skb | 1871 | net/ipv4/tcp.c | hdrlen = ((unsigned long)skb->h.th - (unsigned long)skb->data) |
skb | 1877 | net/ipv4/tcp.c | copy = min(sk->mss - (skb->len - hdrlen), seglen); |
skb | 1883 | net/ipv4/tcp.c | memcpy_fromfs(skb_put(skb,copy), from, copy); |
skb | 1890 | net/ipv4/tcp.c | if ((skb->len - hdrlen) >= sk->mss || |
skb | 1892 | net/ipv4/tcp.c | tcp_send_skb(sk, skb); |
skb | 1894 | net/ipv4/tcp.c | tcp_enqueue_partial(skb, sk); |
skb | 1931 | net/ipv4/tcp.c | skb = sock_wmalloc(sk, sk->mtu + 128 + prot->max_header + 15, 0, GFP_KERNEL); |
skb | 1933 | net/ipv4/tcp.c | send_tmp = skb; |
skb | 1941 | net/ipv4/tcp.c | skb = sock_wmalloc(sk, copy + prot->max_header + 15 , 0, GFP_KERNEL); |
skb | 1949 | net/ipv4/tcp.c | if (skb == NULL) |
skb | 1989 | net/ipv4/tcp.c | skb->sk = sk; |
skb | 1990 | net/ipv4/tcp.c | skb->free = 0; |
skb | 1991 | net/ipv4/tcp.c | skb->localroute = sk->localroute|(flags&MSG_DONTROUTE); |
skb | 1998 | net/ipv4/tcp.c | tmp = prot->build_header(skb, sk->saddr, sk->daddr, &dev, |
skb | 1999 | net/ipv4/tcp.c | IPPROTO_TCP, sk->opt, skb->truesize,sk->ip_tos,sk->ip_ttl,&sk->ip_route_cache); |
skb | 2002 | net/ipv4/tcp.c | sock_wfree(sk, skb); |
skb | 2009 | net/ipv4/tcp.c | skb->ip_hdr->frag_off |= htons(IP_DF); |
skb | 2011 | net/ipv4/tcp.c | skb->dev = dev; |
skb | 2012 | net/ipv4/tcp.c | skb->h.th =(struct tcphdr *)skb_put(skb,sizeof(struct tcphdr)); |
skb | 2013 | net/ipv4/tcp.c | tmp = tcp_build_header(skb->h.th, sk, seglen-copy); |
skb | 2016 | net/ipv4/tcp.c | sock_wfree(sk, skb); |
skb | 2025 | net/ipv4/tcp.c | skb->h.th->urg = 1; |
skb | 2026 | net/ipv4/tcp.c | skb->h.th->urg_ptr = ntohs(copy); |
skb | 2029 | net/ipv4/tcp.c | memcpy_fromfs(skb_put(skb,copy), from, copy); |
skb | 2035 | net/ipv4/tcp.c | skb->free = 0; |
skb | 2043 | net/ipv4/tcp.c | tcp_send_skb(sk, skb); |
skb | 2160 | net/ipv4/tcp.c | struct sk_buff *skb; |
skb | 2176 | net/ipv4/tcp.c | while((skb=skb_peek(&sk->receive_queue)) != NULL) |
skb | 2178 | net/ipv4/tcp.c | if (!skb->used || skb->users) |
skb | 2180 | net/ipv4/tcp.c | skb_unlink(skb); |
skb | 2181 | net/ipv4/tcp.c | skb->sk = sk; |
skb | 2182 | net/ipv4/tcp.c | kfree_skb(skb, FREE_READ); |
skb | 2345 | net/ipv4/tcp.c | struct sk_buff * skb; |
skb | 2361 | net/ipv4/tcp.c | skb = skb_peek(&sk->receive_queue); |
skb | 2364 | net/ipv4/tcp.c | if (!skb) |
skb | 2366 | net/ipv4/tcp.c | if (before(*seq, skb->seq)) |
skb | 2368 | net/ipv4/tcp.c | offset = *seq - skb->seq; |
skb | 2369 | net/ipv4/tcp.c | if (skb->h.th->syn) |
skb | 2371 | net/ipv4/tcp.c | if (offset < skb->len) |
skb | 2373 | net/ipv4/tcp.c | if (skb->h.th->fin) |
skb | 2376 | net/ipv4/tcp.c | skb->used = 1; |
skb | 2377 | net/ipv4/tcp.c | skb = skb->next; |
skb | 2379 | net/ipv4/tcp.c | while (skb != (struct sk_buff *)&sk->receive_queue); |
skb | 2435 | net/ipv4/tcp.c | skb->users++; |
skb | 2441 | net/ipv4/tcp.c | used = skb->len - offset; |
skb | 2480 | net/ipv4/tcp.c | memcpy_toiovec(msg->msg_iov,((unsigned char *)skb->h.th) + |
skb | 2481 | net/ipv4/tcp.c | skb->h.th->doff*4 + offset, used); |
skb | 2491 | net/ipv4/tcp.c | skb->users --; |
skb | 2495 | net/ipv4/tcp.c | if (used + offset < skb->len) |
skb | 2502 | net/ipv4/tcp.c | if (skb->h.th->fin) |
skb | 2506 | net/ipv4/tcp.c | skb->used = 1; |
skb | 2518 | net/ipv4/tcp.c | skb->used = 1; |
skb | 2939 | net/ipv4/tcp.c | static void tcp_conn_request(struct sock *sk, struct sk_buff *skb, |
skb | 2952 | net/ipv4/tcp.c | th = skb->h.th; |
skb | 2965 | net/ipv4/tcp.c | kfree_skb(skb, FREE_READ); |
skb | 2977 | net/ipv4/tcp.c | kfree_skb(skb, FREE_READ); |
skb | 2994 | net/ipv4/tcp.c | kfree_skb(skb, FREE_READ); |
skb | 3006 | net/ipv4/tcp.c | kfree_skb(skb, FREE_READ); |
skb | 3009 | net/ipv4/tcp.c | if (ip_options_echo(sk->opt, opt, daddr, saddr, skb)) { |
skb | 3013 | net/ipv4/tcp.c | kfree_skb(skb, FREE_READ); |
skb | 3045 | net/ipv4/tcp.c | newsk->acked_seq = skb->seq+1; |
skb | 3046 | net/ipv4/tcp.c | newsk->copied_seq = skb->seq+1; |
skb | 3047 | net/ipv4/tcp.c | newsk->fin_seq = skb->seq; |
skb | 3064 | net/ipv4/tcp.c | newsk->dummy_th.source = skb->h.th->dest; |
skb | 3065 | net/ipv4/tcp.c | newsk->dummy_th.dest = skb->h.th->source; |
skb | 3085 | net/ipv4/tcp.c | newsk->acked_seq = skb->seq + 1; |
skb | 3086 | net/ipv4/tcp.c | newsk->copied_seq = skb->seq + 1; |
skb | 3094 | net/ipv4/tcp.c | newsk->ip_tos=skb->ip_hdr->tos; |
skb | 3143 | net/ipv4/tcp.c | tcp_options(newsk,skb->h.th); |
skb | 3155 | net/ipv4/tcp.c | kfree_skb(skb, FREE_READ); |
skb | 3182 | net/ipv4/tcp.c | skb->sk = sk; |
skb | 3183 | net/ipv4/tcp.c | kfree_skb(skb, FREE_READ); |
skb | 3190 | net/ipv4/tcp.c | memcpy(t1, skb->h.th, sizeof(*t1)); |
skb | 3196 | net/ipv4/tcp.c | t1->dest = skb->h.th->source; |
skb | 3220 | net/ipv4/tcp.c | skb->sk = newsk; |
skb | 3226 | net/ipv4/tcp.c | sk->rmem_alloc -= skb->truesize; |
skb | 3227 | net/ipv4/tcp.c | newsk->rmem_alloc += skb->truesize; |
skb | 3229 | net/ipv4/tcp.c | skb_queue_tail(&sk->receive_queue,skb); |
skb | 3264 | net/ipv4/tcp.c | struct sk_buff *skb; |
skb | 3272 | net/ipv4/tcp.c | while((skb=skb_dequeue(&sk->receive_queue))!=NULL) |
skb | 3273 | net/ipv4/tcp.c | kfree_skb(skb, FREE_READ); |
skb | 3311 | net/ipv4/tcp.c | struct sk_buff *skb; |
skb | 3329 | net/ipv4/tcp.c | while((skb = skb_peek(&sk->write_queue)) != NULL && |
skb | 3330 | net/ipv4/tcp.c | before(skb->end_seq, sk->window_seq + 1) && |
skb | 3333 | net/ipv4/tcp.c | before(skb->end_seq, sk->rcv_ack_seq + 1)) |
skb | 3336 | net/ipv4/tcp.c | IS_SKB(skb); |
skb | 3337 | net/ipv4/tcp.c | skb_unlink(skb); |
skb | 3343 | net/ipv4/tcp.c | if (before(skb->end_seq, sk->rcv_ack_seq +1)) |
skb | 3351 | net/ipv4/tcp.c | kfree_skb(skb, FREE_WRITE); |
skb | 3367 | net/ipv4/tcp.c | iph = skb->ip_hdr; |
skb | 3369 | net/ipv4/tcp.c | size = skb->len - (((unsigned char *) th) - skb->data); |
skb | 3383 | net/ipv4/tcp.c | sk->sent_seq = skb->end_seq; |
skb | 3389 | net/ipv4/tcp.c | sk->prot->queue_xmit(sk, skb->dev, skb, skb->free); |
skb | 3497 | net/ipv4/tcp.c | struct sk_buff *skb; |
skb | 3516 | net/ipv4/tcp.c | skb = skb2; |
skb | 3517 | net/ipv4/tcp.c | skb2 = skb->link3; |
skb | 3518 | net/ipv4/tcp.c | skb->link3 = NULL; |
skb | 3519 | net/ipv4/tcp.c | if (after(skb->end_seq, sk->window_seq)) |
skb | 3524 | net/ipv4/tcp.c | if (skb->next != NULL) |
skb | 3526 | net/ipv4/tcp.c | skb_unlink(skb); |
skb | 3530 | net/ipv4/tcp.c | skb_queue_head(&sk->write_queue,skb); |
skb | 3532 | net/ipv4/tcp.c | skb_append(wskb,skb); |
skb | 3533 | net/ipv4/tcp.c | wskb = skb; |
skb | 3539 | net/ipv4/tcp.c | sk->send_head = skb; |
skb | 3540 | net/ipv4/tcp.c | sk->send_tail = skb; |
skb | 3544 | net/ipv4/tcp.c | sk->send_tail->link3 = skb; |
skb | 3545 | net/ipv4/tcp.c | sk->send_tail = skb; |
skb | 3547 | net/ipv4/tcp.c | skb->link3 = NULL; |
skb | 4016 | net/ipv4/tcp.c | static int tcp_fin(struct sk_buff *skb, struct sock *sk, struct tcphdr *th) |
skb | 4018 | net/ipv4/tcp.c | sk->fin_seq = skb->end_seq; |
skb | 4103 | net/ipv4/tcp.c | extern __inline__ int tcp_data(struct sk_buff *skb, struct sock *sk, |
skb | 4111 | net/ipv4/tcp.c | th = skb->h.th; |
skb | 4112 | net/ipv4/tcp.c | skb_pull(skb,th->doff*4); |
skb | 4113 | net/ipv4/tcp.c | skb_trim(skb,len-(th->doff*4)); |
skb | 4120 | net/ipv4/tcp.c | sk->bytes_rcv += skb->len; |
skb | 4122 | net/ipv4/tcp.c | if (skb->len == 0 && !th->fin) |
skb | 4130 | net/ipv4/tcp.c | kfree_skb(skb, FREE_READ); |
skb | 4149 | net/ipv4/tcp.c | if(skb->len) /* We don't care if it's just an ack or |
skb | 4152 | net/ipv4/tcp.c | new_seq = skb->seq + skb->len + th->syn; /* Right edge of _data_ part of frame */ |
skb | 4175 | net/ipv4/tcp.c | tcp_reset(sk->saddr, sk->daddr, skb->h.th, |
skb | 4176 | net/ipv4/tcp.c | sk->prot, NULL, skb->dev, sk->ip_tos, sk->ip_ttl); |
skb | 4182 | net/ipv4/tcp.c | kfree_skb(skb, FREE_READ); |
skb | 4205 | net/ipv4/tcp.c | skb_queue_head(&sk->receive_queue,skb); |
skb | 4216 | net/ipv4/tcp.c | printk("skb->seq = %d\n",skb->seq); |
skb | 4229 | net/ipv4/tcp.c | if (skb->seq==skb1->seq && skb->len>=skb1->len) |
skb | 4231 | net/ipv4/tcp.c | skb_append(skb1,skb); |
skb | 4243 | net/ipv4/tcp.c | if (after(skb->seq+1, skb1->seq)) |
skb | 4245 | net/ipv4/tcp.c | skb_append(skb1,skb); |
skb | 4254 | net/ipv4/tcp.c | skb_queue_head(&sk->receive_queue, skb); |
skb | 4276 | net/ipv4/tcp.c | if ((!dup_dumped && (skb1 == NULL || skb1->acked)) || before(skb->seq, sk->acked_seq+1)) |
skb | 4278 | net/ipv4/tcp.c | if (before(skb->seq, sk->acked_seq+1)) |
skb | 4282 | net/ipv4/tcp.c | if (after(skb->end_seq, sk->acked_seq)) |
skb | 4284 | net/ipv4/tcp.c | newwindow = sk->window - (skb->end_seq - sk->acked_seq); |
skb | 4288 | net/ipv4/tcp.c | sk->acked_seq = skb->end_seq; |
skb | 4290 | net/ipv4/tcp.c | skb->acked = 1; |
skb | 4297 | net/ipv4/tcp.c | if (skb->h.th->fin) |
skb | 4299 | net/ipv4/tcp.c | tcp_fin(skb,sk,skb->h.th); |
skb | 4302 | net/ipv4/tcp.c | for(skb2 = skb->next; |
skb | 4324 | net/ipv4/tcp.c | tcp_fin(skb,sk,skb->h.th); |
skb | 4363 | net/ipv4/tcp.c | if (!skb->acked) |
skb | 4501 | net/ipv4/tcp.c | struct sk_buff *skb; |
skb | 4518 | net/ipv4/tcp.c | while((skb = tcp_dequeue_established(sk)) == NULL) |
skb | 4544 | net/ipv4/tcp.c | newsk = skb->sk; |
skb | 4546 | net/ipv4/tcp.c | kfree_skb(skb, FREE_READ); |
skb | 4786 | net/ipv4/tcp.c | static int tcp_std_reset(struct sock *sk, struct sk_buff *skb) |
skb | 4809 | net/ipv4/tcp.c | kfree_skb(skb, FREE_READ); |
skb | 4819 | net/ipv4/tcp.c | int tcp_rcv(struct sk_buff *skb, struct device *dev, struct options *opt, |
skb | 4828 | net/ipv4/tcp.c | if(skb->pkt_type!=PACKET_HOST) |
skb | 4830 | net/ipv4/tcp.c | kfree_skb(skb,FREE_READ); |
skb | 4834 | net/ipv4/tcp.c | th = skb->h.th; |
skb | 4876 | net/ipv4/tcp.c | skb_pull(skb, skb->h.raw-skb->data); |
skb | 4881 | net/ipv4/tcp.c | (skb->ip_summed && tcp_check(th, len, saddr, daddr, skb->csum ))|| |
skb | 4882 | net/ipv4/tcp.c | (!skb->ip_summed && tcp_check(th, len, saddr, daddr, csum_partial((char *)th, len, 0))) |
skb | 4885 | net/ipv4/tcp.c | skb->sk = NULL; |
skb | 4886 | net/ipv4/tcp.c | kfree_skb(skb,FREE_READ); |
skb | 4894 | net/ipv4/tcp.c | skb->seq = ntohl(th->seq); |
skb | 4895 | net/ipv4/tcp.c | skb->end_seq = skb->seq + th->syn + th->fin + len - th->doff*4; |
skb | 4896 | net/ipv4/tcp.c | skb->ack_seq = ntohl(th->ack_seq); |
skb | 4904 | net/ipv4/tcp.c | tcp_reset(daddr, saddr, th, &tcp_prot, opt,dev,skb->ip_hdr->tos,255); |
skb | 4905 | net/ipv4/tcp.c | skb->sk = NULL; |
skb | 4909 | net/ipv4/tcp.c | kfree_skb(skb, FREE_READ); |
skb | 4913 | net/ipv4/tcp.c | skb->acked = 0; |
skb | 4914 | net/ipv4/tcp.c | skb->used = 0; |
skb | 4915 | net/ipv4/tcp.c | skb->free = 0; |
skb | 4916 | net/ipv4/tcp.c | skb->saddr = daddr; |
skb | 4917 | net/ipv4/tcp.c | skb->daddr = saddr; |
skb | 4923 | net/ipv4/tcp.c | skb_queue_tail(&sk->back_log, skb); |
skb | 4934 | net/ipv4/tcp.c | tcp_reset(daddr, saddr, th, &tcp_prot, opt,dev,skb->ip_hdr->tos,255); |
skb | 4935 | net/ipv4/tcp.c | skb->sk = NULL; |
skb | 4936 | net/ipv4/tcp.c | kfree_skb(skb, FREE_READ); |
skb | 4953 | net/ipv4/tcp.c | skb->sk=sk; |
skb | 4954 | net/ipv4/tcp.c | sk->rmem_alloc += skb->truesize; |
skb | 4984 | net/ipv4/tcp.c | kfree_skb(skb, FREE_READ); |
skb | 4993 | net/ipv4/tcp.c | tcp_conn_request(sk, skb, daddr, saddr, opt, dev, tcp_init_seq()); |
skb | 5009 | net/ipv4/tcp.c | if (sk->state == TCP_SYN_RECV && th->syn && skb->seq+1 == sk->acked_seq) |
skb | 5011 | net/ipv4/tcp.c | kfree_skb(skb, FREE_READ); |
skb | 5034 | net/ipv4/tcp.c | kfree_skb(skb, FREE_READ); |
skb | 5039 | net/ipv4/tcp.c | return tcp_std_reset(sk,skb); |
skb | 5044 | net/ipv4/tcp.c | kfree_skb(skb, FREE_READ); |
skb | 5053 | net/ipv4/tcp.c | sk->acked_seq = skb->seq+1; |
skb | 5054 | net/ipv4/tcp.c | sk->fin_seq = skb->seq; |
skb | 5083 | net/ipv4/tcp.c | return tcp_std_reset(sk,skb); |
skb | 5093 | net/ipv4/tcp.c | kfree_skb(skb, FREE_READ); |
skb | 5112 | net/ipv4/tcp.c | after(skb->seq, sk->acked_seq) && !th->rst) |
skb | 5118 | net/ipv4/tcp.c | sk->rmem_alloc -= skb->truesize; |
skb | 5119 | net/ipv4/tcp.c | skb->sk = NULL; |
skb | 5128 | net/ipv4/tcp.c | skb->sk = sk; |
skb | 5129 | net/ipv4/tcp.c | sk->rmem_alloc += skb->truesize; |
skb | 5130 | net/ipv4/tcp.c | tcp_conn_request(sk, skb, daddr, saddr,opt, dev,seq+128000); |
skb | 5134 | net/ipv4/tcp.c | kfree_skb(skb, FREE_READ); |
skb | 5148 | net/ipv4/tcp.c | kfree_skb(skb, FREE_READ); |
skb | 5154 | net/ipv4/tcp.c | return tcp_std_reset(sk,skb); |
skb | 5162 | net/ipv4/tcp.c | tcp_reset(daddr,saddr,th, &tcp_prot, opt, dev, skb->ip_hdr->tos, 255); |
skb | 5163 | net/ipv4/tcp.c | return tcp_std_reset(sk,skb); |
skb | 5181 | net/ipv4/tcp.c | kfree_skb(skb, FREE_READ); |
skb | 5196 | net/ipv4/tcp.c | kfree_skb(skb, FREE_READ); |
skb | 5208 | net/ipv4/tcp.c | kfree_skb(skb, FREE_READ); |
skb | 5217 | net/ipv4/tcp.c | if(tcp_data(skb,sk, saddr, len)) |
skb | 5219 | net/ipv4/tcp.c | kfree_skb(skb, FREE_READ); |
skb | 5239 | net/ipv4/tcp.c | struct sk_buff *buff,*skb; |
skb | 5263 | net/ipv4/tcp.c | (skb=skb_peek(&sk->write_queue))) |
skb | 5290 | net/ipv4/tcp.c | iph = (struct iphdr *)skb->ip_hdr; |
skb | 138 | net/ipv4/udp.c | static int udp_deliver(struct sock *sk, struct udphdr *uh, struct sk_buff *skb, struct device *dev, long saddr, long daddr, int len); |
skb | 465 | net/ipv4/udp.c | struct sk_buff *skb; |
skb | 470 | net/ipv4/udp.c | skb = skb_peek(&sk->receive_queue); |
skb | 471 | net/ipv4/udp.c | if (skb != NULL) { |
skb | 477 | net/ipv4/udp.c | amount = skb->len; |
skb | 504 | net/ipv4/udp.c | struct sk_buff *skb; |
skb | 520 | net/ipv4/udp.c | skb=skb_recv_datagram(sk,flags,noblock,&er); |
skb | 521 | net/ipv4/udp.c | if(skb==NULL) |
skb | 524 | net/ipv4/udp.c | truesize = skb->len - sizeof(struct udphdr); |
skb | 531 | net/ipv4/udp.c | skb_copy_datagram_iovec(skb,sizeof(struct udphdr),msg->msg_iov,copied); |
skb | 532 | net/ipv4/udp.c | sk->stamp=skb->stamp; |
skb | 538 | net/ipv4/udp.c | sin->sin_port = skb->h.uh->source; |
skb | 539 | net/ipv4/udp.c | sin->sin_addr.s_addr = skb->daddr; |
skb | 542 | net/ipv4/udp.c | skb_free_datagram(skb); |
skb | 594 | net/ipv4/udp.c | int udp_rcv(struct sk_buff *skb, struct device *dev, struct options *opt, |
skb | 610 | net/ipv4/udp.c | uh = (struct udphdr *) skb->h.uh; |
skb | 624 | net/ipv4/udp.c | kfree_skb(skb, FREE_WRITE); |
skb | 636 | net/ipv4/udp.c | ( skb->ip_summed && udp_check(uh, len, saddr, daddr, skb->csum ) ) || |
skb | 637 | net/ipv4/udp.c | ( !skb->ip_summed && udp_check(uh, len, saddr, daddr,csum_partial((char*)uh, len, 0))) |
skb | 652 | net/ipv4/udp.c | kfree_skb(skb, FREE_WRITE); |
skb | 676 | net/ipv4/udp.c | skb1=skb_clone(skb,GFP_ATOMIC); |
skb | 678 | net/ipv4/udp.c | skb1=skb; |
skb | 686 | net/ipv4/udp.c | kfree_skb(skb, FREE_READ); |
skb | 707 | net/ipv4/udp.c | icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0, dev); |
skb | 713 | net/ipv4/udp.c | skb->sk = NULL; |
skb | 714 | net/ipv4/udp.c | kfree_skb(skb, FREE_WRITE); |
skb | 717 | net/ipv4/udp.c | return udp_deliver(sk,uh,skb,dev, saddr, daddr, len); |
skb | 720 | net/ipv4/udp.c | static int udp_deliver(struct sock *sk, struct udphdr *uh, struct sk_buff *skb, struct device *dev, long saddr, long daddr, int len) |
skb | 722 | net/ipv4/udp.c | skb->sk = sk; |
skb | 723 | net/ipv4/udp.c | skb->dev = dev; |
skb | 724 | net/ipv4/udp.c | skb_trim(skb,len); |
skb | 730 | net/ipv4/udp.c | skb->daddr = saddr; |
skb | 731 | net/ipv4/udp.c | skb->saddr = daddr; |
skb | 741 | net/ipv4/udp.c | if (sock_queue_rcv_skb(sk,skb)<0) |
skb | 746 | net/ipv4/udp.c | skb->sk = NULL; |
skb | 747 | net/ipv4/udp.c | kfree_skb(skb, FREE_WRITE); |
skb | 180 | net/ipx/af_ipx.c | struct sk_buff *skb; |
skb | 183 | net/ipx/af_ipx.c | while((skb=skb_dequeue(&sk->receive_queue))!=NULL) { |
skb | 184 | net/ipx/af_ipx.c | kfree_skb(skb,FREE_READ); |
skb | 327 | net/ipx/af_ipx.c | static int ipxitf_def_skb_handler(struct sock *sock, struct sk_buff *skb) |
skb | 331 | net/ipx/af_ipx.c | if((retval = sock_queue_rcv_skb(sock, skb))<0) |
skb | 341 | net/ipx/af_ipx.c | kfree_skb(skb,FREE_WRITE); |
skb | 352 | net/ipx/af_ipx.c | static int ipxitf_demux_socket(ipx_interface *intrfc, struct sk_buff *skb, int copy) |
skb | 354 | net/ipx/af_ipx.c | ipx_packet *ipx = (ipx_packet *)(skb->h.raw); |
skb | 397 | net/ipx/af_ipx.c | kfree_skb(skb,FREE_WRITE); |
skb | 411 | net/ipx/af_ipx.c | skb1 = skb_clone(skb, GFP_ATOMIC); |
skb | 417 | net/ipx/af_ipx.c | skb1 = skb; |
skb | 449 | net/ipx/af_ipx.c | ipxitf_adjust_skbuff(ipx_interface *intrfc, struct sk_buff *skb) |
skb | 452 | net/ipx/af_ipx.c | int in_offset = skb->h.raw - skb->head; |
skb | 458 | net/ipx/af_ipx.c | skb->arp = skb->free = 1; |
skb | 459 | net/ipx/af_ipx.c | return skb; |
skb | 463 | net/ipx/af_ipx.c | len = skb->len + out_offset; |
skb | 467 | net/ipx/af_ipx.c | skb2->h.raw=skb_put(skb2,skb->len); |
skb | 470 | net/ipx/af_ipx.c | memcpy(skb2->h.raw, skb->h.raw, skb->len); |
skb | 472 | net/ipx/af_ipx.c | kfree_skb(skb, FREE_WRITE); |
skb | 476 | net/ipx/af_ipx.c | static int ipxitf_send(ipx_interface *intrfc, struct sk_buff *skb, char *node) |
skb | 478 | net/ipx/af_ipx.c | ipx_packet *ipx = (ipx_packet *)(skb->h.raw); |
skb | 510 | net/ipx/af_ipx.c | if(skb->sk) |
skb | 511 | net/ipx/af_ipx.c | skb->sk->wmem_alloc-=skb->truesize; |
skb | 515 | net/ipx/af_ipx.c | return ipxitf_demux_socket(intrfc, skb, 0); |
skb | 522 | net/ipx/af_ipx.c | if (!send_to_wire && skb->sk) |
skb | 523 | net/ipx/af_ipx.c | skb->sk->wmem_alloc-=skb->truesize; |
skb | 524 | net/ipx/af_ipx.c | ipxitf_demux_socket(intrfc, skb, send_to_wire); |
skb | 552 | net/ipx/af_ipx.c | kfree_skb(skb,FREE_WRITE); |
skb | 570 | net/ipx/af_ipx.c | skb = ipxitf_adjust_skbuff(intrfc, skb); |
skb | 571 | net/ipx/af_ipx.c | if (skb == NULL) |
skb | 575 | net/ipx/af_ipx.c | skb->dev = dev; |
skb | 576 | net/ipx/af_ipx.c | skb->protocol = htons(ETH_P_IPX); |
skb | 577 | net/ipx/af_ipx.c | dl->datalink_header(dl, skb, dest_node); |
skb | 583 | net/ipx/af_ipx.c | dump_pkt("IPX snd:", (ipx_packet *)skb->h.raw); |
skb | 584 | net/ipx/af_ipx.c | dump_data("ETH hdr:", skb->data, skb->h.raw - skb->data); |
skb | 591 | net/ipx/af_ipx.c | dev_queue_xmit(skb, dev, SOPRI_NORMAL); |
skb | 606 | net/ipx/af_ipx.c | static int ipxitf_rcv(ipx_interface *intrfc, struct sk_buff *skb) |
skb | 608 | net/ipx/af_ipx.c | ipx_packet *ipx = (ipx_packet *) (skb->h.raw); |
skb | 616 | net/ipx/af_ipx.c | if (call_in_firewall(PF_IPX, skb, ipx)!=FW_ACCEPT) |
skb | 618 | net/ipx/af_ipx.c | kfree_skb(skb, FREE_READ); |
skb | 660 | net/ipx/af_ipx.c | if (call_fw_firewall(PF_IPX, skb, ipx)!=FW_ACCEPT) |
skb | 662 | net/ipx/af_ipx.c | kfree_skb(skb, FREE_READ); |
skb | 667 | net/ipx/af_ipx.c | if ((skb->pkt_type != PACKET_BROADCAST) && |
skb | 668 | net/ipx/af_ipx.c | (skb->pkt_type != PACKET_MULTICAST)) |
skb | 669 | net/ipx/af_ipx.c | return ipxrtr_route_skb(skb); |
skb | 671 | net/ipx/af_ipx.c | kfree_skb(skb,FREE_READ); |
skb | 679 | net/ipx/af_ipx.c | return ipxitf_demux_socket(intrfc, skb, 0); |
skb | 683 | net/ipx/af_ipx.c | kfree_skb(skb,FREE_READ); |
skb | 1068 | net/ipx/af_ipx.c | struct sk_buff *skb; |
skb | 1095 | net/ipx/af_ipx.c | skb=sock_alloc_send_skb(sk, size, 0, 0, &err); |
skb | 1096 | net/ipx/af_ipx.c | if(skb==NULL) |
skb | 1099 | net/ipx/af_ipx.c | skb_reserve(skb,ipx_offset); |
skb | 1100 | net/ipx/af_ipx.c | skb->free=1; |
skb | 1101 | net/ipx/af_ipx.c | skb->arp=1; |
skb | 1102 | net/ipx/af_ipx.c | skb->sk=sk; |
skb | 1105 | net/ipx/af_ipx.c | ipx=(ipx_packet *)skb_put(skb,sizeof(ipx_packet)); |
skb | 1110 | net/ipx/af_ipx.c | skb->h.raw = (unsigned char *)ipx; |
skb | 1119 | net/ipx/af_ipx.c | memcpy_fromiovec(skb_put(skb,len),iov,len); |
skb | 1122 | net/ipx/af_ipx.c | if(call_out_firewall(PF_IPX, skb, ipx)!=FW_ACCEPT) |
skb | 1124 | net/ipx/af_ipx.c | kfree_skb(skb, FREE_WRITE); |
skb | 1129 | net/ipx/af_ipx.c | return ipxitf_send(intrfc, skb, (rt && rt->ir_routed) ? |
skb | 1134 | net/ipx/af_ipx.c | ipxrtr_route_skb(struct sk_buff *skb) |
skb | 1136 | net/ipx/af_ipx.c | ipx_packet *ipx = (ipx_packet *) (skb->h.raw); |
skb | 1143 | net/ipx/af_ipx.c | kfree_skb(skb,FREE_READ); |
skb | 1147 | net/ipx/af_ipx.c | (void)ipxitf_send(i, skb, (r->ir_routed) ? |
skb | 1740 | net/ipx/af_ipx.c | int ipx_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *pt) |
skb | 1747 | net/ipx/af_ipx.c | ipx=(ipx_packet *)skb->h.raw; |
skb | 1753 | net/ipx/af_ipx.c | kfree_skb(skb,FREE_READ); |
skb | 1759 | net/ipx/af_ipx.c | kfree_skb(skb,FREE_READ); |
skb | 1772 | net/ipx/af_ipx.c | kfree_skb(skb,FREE_READ); |
skb | 1777 | net/ipx/af_ipx.c | return ipxitf_rcv(intrfc, skb); |
skb | 1836 | net/ipx/af_ipx.c | struct sk_buff *skb; |
skb | 1846 | net/ipx/af_ipx.c | skb=skb_recv_datagram(sk,flags,noblock,&er); |
skb | 1847 | net/ipx/af_ipx.c | if(skb==NULL) |
skb | 1853 | net/ipx/af_ipx.c | ipx = (ipx_packet *)(skb->h.raw); |
skb | 1856 | net/ipx/af_ipx.c | skb_copy_datagram_iovec(skb,sizeof(struct ipx_packet),msg->msg_iov,copied); |
skb | 1866 | net/ipx/af_ipx.c | skb_free_datagram(skb); |
skb | 1901 | net/ipx/af_ipx.c | struct sk_buff *skb; |
skb | 1903 | net/ipx/af_ipx.c | if((skb=skb_peek(&sk->receive_queue))!=NULL) |
skb | 1904 | net/ipx/af_ipx.c | amount=skb->len; |
skb | 33 | net/netlink.c | static int (*netlink_handler[MAX_LINKS])(struct sk_buff *skb); |
skb | 49 | net/netlink.c | static int netlink_err(struct sk_buff *skb) |
skb | 51 | net/netlink.c | kfree_skb(skb, FREE_READ); |
skb | 60 | net/netlink.c | int netlink_donothing(struct sk_buff *skb) |
skb | 62 | net/netlink.c | kfree_skb(skb, FREE_READ); |
skb | 73 | net/netlink.c | struct sk_buff *skb; |
skb | 74 | net/netlink.c | skb=alloc_skb(count, GFP_KERNEL); |
skb | 75 | net/netlink.c | memcpy_fromfs(skb_put(skb,count),buf, count); |
skb | 76 | net/netlink.c | return (netlink_handler[minor])(skb); |
skb | 86 | net/netlink.c | struct sk_buff *skb; |
skb | 88 | net/netlink.c | while((skb=skb_dequeue(&skb_queue_rd[minor]))==NULL) |
skb | 102 | net/netlink.c | rdq_size[minor]-=skb->len; |
skb | 104 | net/netlink.c | if(skb->len<count) |
skb | 105 | net/netlink.c | count=skb->len; |
skb | 106 | net/netlink.c | memcpy_tofs(buf,skb->data,count); |
skb | 107 | net/netlink.c | kfree_skb(skb, FREE_READ); |
skb | 176 | net/netlink.c | int netlink_attach(int unit, int (*function)(struct sk_buff *skb)) |
skb | 193 | net/netlink.c | int netlink_post(int unit, struct sk_buff *skb) |
skb | 201 | net/netlink.c | if(rdq_size[unit]+skb->len>MAX_QBYTES) |
skb | 205 | net/netlink.c | skb_queue_tail(&skb_queue_rd[unit], skb); |
skb | 206 | net/netlink.c | rdq_size[unit]+=skb->len; |
skb | 246 | net/netrom/af_netrom.c | struct sk_buff *skb; |
skb | 257 | net/netrom/af_netrom.c | while ((skb = skb_dequeue(&sk->receive_queue)) != NULL) { |
skb | 258 | net/netrom/af_netrom.c | if (skb->sk != sk) { /* A pending connection */ |
skb | 259 | net/netrom/af_netrom.c | skb->sk->dead = 1; /* Queue the unaccepted socket for death */ |
skb | 260 | net/netrom/af_netrom.c | nr_set_timer(skb->sk); |
skb | 261 | net/netrom/af_netrom.c | skb->sk->nr->state = NR_STATE_0; |
skb | 264 | net/netrom/af_netrom.c | kfree_skb(skb, FREE_READ); |
skb | 840 | net/netrom/af_netrom.c | struct sk_buff *skb; |
skb | 859 | net/netrom/af_netrom.c | if ((skb = skb_dequeue(&sk->receive_queue)) == NULL) { |
skb | 870 | net/netrom/af_netrom.c | } while (skb == NULL); |
skb | 872 | net/netrom/af_netrom.c | newsk = skb->sk; |
skb | 877 | net/netrom/af_netrom.c | skb->sk = NULL; |
skb | 878 | net/netrom/af_netrom.c | kfree_skb(skb, FREE_READ); |
skb | 911 | net/netrom/af_netrom.c | int nr_rx_frame(struct sk_buff *skb, struct device *dev) |
skb | 920 | net/netrom/af_netrom.c | skb->sk = NULL; /* Initially we don't know who its for */ |
skb | 926 | net/netrom/af_netrom.c | src = (ax25_address *)(skb->data + 0); |
skb | 927 | net/netrom/af_netrom.c | dest = (ax25_address *)(skb->data + 7); |
skb | 929 | net/netrom/af_netrom.c | circuit_index = skb->data[15]; |
skb | 930 | net/netrom/af_netrom.c | circuit_id = skb->data[16]; |
skb | 931 | net/netrom/af_netrom.c | frametype = skb->data[19]; |
skb | 938 | net/netrom/af_netrom.c | skb_pull(skb, NR_NETWORK_LEN + NR_TRANSPORT_LEN); |
skb | 939 | net/netrom/af_netrom.c | skb->h.raw = skb->data; |
skb | 941 | net/netrom/af_netrom.c | return nr_rx_ip(skb, dev); |
skb | 951 | net/netrom/af_netrom.c | skb->h.raw = skb->data; |
skb | 953 | net/netrom/af_netrom.c | if ((frametype & 0x0F) == NR_CONNACK && skb->len == 22) |
skb | 958 | net/netrom/af_netrom.c | return nr_process_rx_frame(sk, skb); |
skb | 966 | net/netrom/af_netrom.c | user = (ax25_address *)(skb->data + 21); |
skb | 969 | net/netrom/af_netrom.c | nr_transmit_dm(skb); |
skb | 973 | net/netrom/af_netrom.c | window = skb->data[20]; |
skb | 975 | net/netrom/af_netrom.c | skb->sk = make; |
skb | 996 | net/netrom/af_netrom.c | if (skb->len == 37) { |
skb | 997 | net/netrom/af_netrom.c | timeout = skb->data[36] * 256 + skb->data[35]; |
skb | 1018 | net/netrom/af_netrom.c | skb_queue_head(&sk->receive_queue, skb); |
skb | 1023 | net/netrom/af_netrom.c | sk->data_ready(sk, skb->len); |
skb | 1034 | net/netrom/af_netrom.c | struct sk_buff *skb; |
skb | 1074 | net/netrom/af_netrom.c | if ((skb = sock_alloc_send_skb(sk, size, 0, 0, &err)) == NULL) |
skb | 1077 | net/netrom/af_netrom.c | skb->sk = sk; |
skb | 1078 | net/netrom/af_netrom.c | skb->free = 1; |
skb | 1079 | net/netrom/af_netrom.c | skb->arp = 1; |
skb | 1081 | net/netrom/af_netrom.c | skb_reserve(skb, size - len); |
skb | 1087 | net/netrom/af_netrom.c | asmptr = skb_push(skb, NR_TRANSPORT_LEN); |
skb | 1107 | net/netrom/af_netrom.c | skb->h.raw = skb_put(skb, len); |
skb | 1109 | net/netrom/af_netrom.c | asmptr = skb->h.raw; |
skb | 1121 | net/netrom/af_netrom.c | kfree_skb(skb, FREE_WRITE); |
skb | 1125 | net/netrom/af_netrom.c | nr_output(sk, skb); /* Shove it onto the queue */ |
skb | 1137 | net/netrom/af_netrom.c | struct sk_buff *skb; |
skb | 1154 | net/netrom/af_netrom.c | if ((skb = skb_recv_datagram(sk, flags, noblock, &er)) == NULL) |
skb | 1158 | net/netrom/af_netrom.c | skb_pull(skb, NR_NETWORK_LEN + NR_TRANSPORT_LEN); |
skb | 1159 | net/netrom/af_netrom.c | skb->h.raw = skb->data; |
skb | 1162 | net/netrom/af_netrom.c | copied = (size < skb->len) ? size : skb->len; |
skb | 1163 | net/netrom/af_netrom.c | skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); |
skb | 1169 | net/netrom/af_netrom.c | memcpy(&addr.sax25_call, skb->data + 7, sizeof(ax25_address)); |
skb | 1176 | net/netrom/af_netrom.c | skb_free_datagram(skb); |
skb | 1211 | net/netrom/af_netrom.c | struct sk_buff *skb; |
skb | 1213 | net/netrom/af_netrom.c | if ((skb = skb_peek(&sk->receive_queue)) != NULL) |
skb | 1214 | net/netrom/af_netrom.c | amount = skb->len - 20; |
skb | 54 | net/netrom/nr_dev.c | int nr_rx_ip(struct sk_buff *skb, struct device *dev) |
skb | 65 | net/netrom/nr_dev.c | skb->protocol = htons(ETH_P_IP); |
skb | 68 | net/netrom/nr_dev.c | skb->dev = dev; |
skb | 70 | net/netrom/nr_dev.c | skb->h.raw = skb->data; |
skb | 71 | net/netrom/nr_dev.c | ip_rcv(skb, skb->dev, NULL); |
skb | 76 | net/netrom/nr_dev.c | static int nr_header(struct sk_buff *skb, struct device *dev, unsigned short type, |
skb | 79 | net/netrom/nr_dev.c | unsigned char *buff = skb_push(skb, NR_NETWORK_LEN + NR_TRANSPORT_LEN); |
skb | 109 | net/netrom/nr_dev.c | unsigned long raddr, struct sk_buff *skb) |
skb | 114 | net/netrom/nr_dev.c | skb_device_unlock(skb); |
skb | 117 | net/netrom/nr_dev.c | skb->free = 1; |
skb | 118 | net/netrom/nr_dev.c | kfree_skb(skb, FREE_WRITE); |
skb | 131 | net/netrom/nr_dev.c | if (!nr_route_frame(skb, NULL)) { |
skb | 132 | net/netrom/nr_dev.c | skb->free = 1; |
skb | 133 | net/netrom/nr_dev.c | kfree_skb(skb, FREE_WRITE); |
skb | 166 | net/netrom/nr_dev.c | static int nr_xmit(struct sk_buff *skb, struct device *dev) |
skb | 170 | net/netrom/nr_dev.c | if (skb == NULL || dev == NULL) |
skb | 190 | net/netrom/nr_dev.c | dev_kfree_skb(skb, FREE_WRITE); |
skb | 53 | net/netrom/nr_in.c | static int nr_queue_rx_frame(struct sock *sk, struct sk_buff *skb, int more) |
skb | 55 | net/netrom/nr_in.c | struct sk_buff *skbo, *skbn = skb; |
skb | 58 | net/netrom/nr_in.c | sk->nr->fraglen += skb->len; |
skb | 59 | net/netrom/nr_in.c | skb_queue_tail(&sk->nr->frag_queue, skb); |
skb | 64 | net/netrom/nr_in.c | sk->nr->fraglen += skb->len; |
skb | 65 | net/netrom/nr_in.c | skb_queue_tail(&sk->nr->frag_queue, skb); |
skb | 97 | net/netrom/nr_in.c | static int nr_state1_machine(struct sock *sk, struct sk_buff *skb, int frametype) |
skb | 103 | net/netrom/nr_in.c | sk->window = skb->data[20]; |
skb | 104 | net/netrom/nr_in.c | sk->nr->your_index = skb->data[17]; |
skb | 105 | net/netrom/nr_in.c | sk->nr->your_id = skb->data[18]; |
skb | 143 | net/netrom/nr_in.c | static int nr_state2_machine(struct sock *sk, struct sk_buff *skb, int frametype) |
skb | 171 | net/netrom/nr_in.c | static int nr_state3_machine(struct sock *sk, struct sk_buff *skb, int frametype) |
skb | 179 | net/netrom/nr_in.c | nr = skb->data[18]; |
skb | 180 | net/netrom/nr_in.c | ns = skb->data[17]; |
skb | 263 | net/netrom/nr_in.c | skb_queue_head(&sk->nr->reseq_queue, skb); |
skb | 310 | net/netrom/nr_in.c | int nr_process_rx_frame(struct sock *sk, struct sk_buff *skb) |
skb | 325 | net/netrom/nr_in.c | frametype = skb->data[19]; |
skb | 330 | net/netrom/nr_in.c | queued = nr_state1_machine(sk, skb, frametype); |
skb | 333 | net/netrom/nr_in.c | queued = nr_state2_machine(sk, skb, frametype); |
skb | 336 | net/netrom/nr_in.c | queued = nr_state3_machine(sk, skb, frametype); |
skb | 49 | net/netrom/nr_out.c | void nr_output(struct sock *sk, struct sk_buff *skb) |
skb | 57 | net/netrom/nr_out.c | if (skb->len - NR_TRANSPORT_LEN > mtu) { |
skb | 59 | net/netrom/nr_out.c | memcpy(transport, skb->data, NR_TRANSPORT_LEN); |
skb | 60 | net/netrom/nr_out.c | skb_pull(skb, NR_TRANSPORT_LEN); |
skb | 62 | net/netrom/nr_out.c | frontlen = skb_headroom(skb); |
skb | 64 | net/netrom/nr_out.c | while (skb->len > 0) { |
skb | 74 | net/netrom/nr_out.c | len = (mtu > skb->len) ? skb->len : mtu; |
skb | 77 | net/netrom/nr_out.c | memcpy(skb_put(skbn, len), skb->data, len); |
skb | 78 | net/netrom/nr_out.c | skb_pull(skb, len); |
skb | 84 | net/netrom/nr_out.c | if (skb->len > 0) |
skb | 90 | net/netrom/nr_out.c | skb->free = 1; |
skb | 91 | net/netrom/nr_out.c | kfree_skb(skb, FREE_WRITE); |
skb | 93 | net/netrom/nr_out.c | skb_queue_tail(&sk->write_queue, skb); /* Throw it on the queue */ |
skb | 104 | net/netrom/nr_out.c | static void nr_send_iframe(struct sock *sk, struct sk_buff *skb) |
skb | 106 | net/netrom/nr_out.c | if (skb == NULL) |
skb | 109 | net/netrom/nr_out.c | skb->data[2] = sk->nr->vs; |
skb | 110 | net/netrom/nr_out.c | skb->data[3] = sk->nr->vr; |
skb | 113 | net/netrom/nr_out.c | skb->data[4] |= NR_CHOKE_FLAG; |
skb | 115 | net/netrom/nr_out.c | nr_transmit_buffer(sk, skb); |
skb | 120 | net/netrom/nr_out.c | struct sk_buff *skb, *skbn; |
skb | 122 | net/netrom/nr_out.c | if ((skb = skb_peek(&sk->nr->ack_queue)) == NULL) |
skb | 125 | net/netrom/nr_out.c | if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) |
skb | 143 | net/netrom/nr_out.c | struct sk_buff *skb, *skbn; |
skb | 166 | net/netrom/nr_out.c | skb = skb_dequeue(&sk->write_queue); |
skb | 169 | net/netrom/nr_out.c | if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) { |
skb | 170 | net/netrom/nr_out.c | skb_queue_head(&sk->write_queue, skb); |
skb | 187 | net/netrom/nr_out.c | skb_queue_tail(&sk->nr->ack_queue, skb); |
skb | 189 | net/netrom/nr_out.c | } while (!last && (skb = skb_dequeue(&sk->write_queue)) != NULL); |
skb | 202 | net/netrom/nr_out.c | void nr_transmit_buffer(struct sock *sk, struct sk_buff *skb) |
skb | 209 | net/netrom/nr_out.c | dptr = skb_push(skb, NR_NETWORK_LEN); |
skb | 225 | net/netrom/nr_out.c | skb->arp = 1; |
skb | 227 | net/netrom/nr_out.c | if (!nr_route_frame(skb, NULL)) { |
skb | 228 | net/netrom/nr_out.c | kfree_skb(skb, FREE_WRITE); |
skb | 651 | net/netrom/nr_route.c | int nr_route_frame(struct sk_buff *skb, ax25_cb *ax25) |
skb | 661 | net/netrom/nr_route.c | if(ax25 && call_in_firewall(PF_NETROM, skb, skb->data)!=FW_ACCEPT) |
skb | 663 | net/netrom/nr_route.c | if(!ax25 && call_out_firewall(PF_NETROM, skb, skb->data)!=FW_ACCEPT) |
skb | 666 | net/netrom/nr_route.c | nr_src = (ax25_address *)(skb->data + 0); |
skb | 667 | net/netrom/nr_route.c | nr_dest = (ax25_address *)(skb->data + 7); |
skb | 673 | net/netrom/nr_route.c | return nr_rx_frame(skb, dev); |
skb | 679 | net/netrom/nr_route.c | if (--skb->data[14] == 0) |
skb | 700 | net/netrom/nr_route.c | if(ax25 && call_fw_firewall(PF_NETROM, skb, skb->data)!=FW_ACCEPT) |
skb | 704 | net/netrom/nr_route.c | dptr = skb_push(skb, 1); |
skb | 707 | net/netrom/nr_route.c | ax25_send_frame(skb, (ax25_address *)dev->dev_addr, &nr_neigh->callsign, nr_neigh->digipeat, nr_neigh->dev); |
skb | 49 | net/netrom/nr_subr.c | struct sk_buff *skb; |
skb | 51 | net/netrom/nr_subr.c | while ((skb = skb_dequeue(&sk->write_queue)) != NULL) { |
skb | 52 | net/netrom/nr_subr.c | skb->sk = sk; |
skb | 53 | net/netrom/nr_subr.c | skb->free = 1; |
skb | 54 | net/netrom/nr_subr.c | kfree_skb(skb, FREE_WRITE); |
skb | 57 | net/netrom/nr_subr.c | while ((skb = skb_dequeue(&sk->nr->ack_queue)) != NULL) { |
skb | 58 | net/netrom/nr_subr.c | skb->sk = sk; |
skb | 59 | net/netrom/nr_subr.c | skb->free = 1; |
skb | 60 | net/netrom/nr_subr.c | kfree_skb(skb, FREE_WRITE); |
skb | 63 | net/netrom/nr_subr.c | while ((skb = skb_dequeue(&sk->nr->reseq_queue)) != NULL) { |
skb | 64 | net/netrom/nr_subr.c | kfree_skb(skb, FREE_READ); |
skb | 67 | net/netrom/nr_subr.c | while ((skb = skb_dequeue(&sk->nr->frag_queue)) != NULL) { |
skb | 68 | net/netrom/nr_subr.c | kfree_skb(skb, FREE_READ); |
skb | 79 | net/netrom/nr_subr.c | struct sk_buff *skb; |
skb | 86 | net/netrom/nr_subr.c | skb = skb_dequeue(&sk->nr->ack_queue); |
skb | 87 | net/netrom/nr_subr.c | skb->sk = sk; |
skb | 88 | net/netrom/nr_subr.c | skb->free = 1; |
skb | 89 | net/netrom/nr_subr.c | kfree_skb(skb, FREE_WRITE); |
skb | 102 | net/netrom/nr_subr.c | struct sk_buff *skb, *skb_prev = NULL; |
skb | 104 | net/netrom/nr_subr.c | while ((skb = skb_dequeue(&sk->nr->ack_queue)) != NULL) { |
skb | 106 | net/netrom/nr_subr.c | skb_queue_head(&sk->write_queue, skb); |
skb | 108 | net/netrom/nr_subr.c | skb_append(skb_prev, skb); |
skb | 109 | net/netrom/nr_subr.c | skb_prev = skb; |
skb | 153 | net/netrom/nr_subr.c | struct sk_buff *skb; |
skb | 175 | net/netrom/nr_subr.c | if ((skb = alloc_skb(len, GFP_ATOMIC)) == NULL) |
skb | 181 | net/netrom/nr_subr.c | skb_reserve(skb, AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + NR_NETWORK_LEN); |
skb | 183 | net/netrom/nr_subr.c | dptr = skb_put(skb, skb_tailroom(skb)); |
skb | 237 | net/netrom/nr_subr.c | skb->free = 1; |
skb | 239 | net/netrom/nr_subr.c | nr_transmit_buffer(sk, skb); |
skb | 246 | net/netrom/nr_subr.c | void nr_transmit_dm(struct sk_buff *skb) |
skb | 261 | net/netrom/nr_subr.c | memcpy(dptr, skb->data + 7, AX25_ADDR_LEN); |
skb | 267 | net/netrom/nr_subr.c | memcpy(dptr, skb->data + 0, AX25_ADDR_LEN); |
skb | 275 | net/netrom/nr_subr.c | *dptr++ = skb->data[15]; |
skb | 276 | net/netrom/nr_subr.c | *dptr++ = skb->data[16]; |
skb | 166 | net/unix/af_unix.c | struct sk_buff *skb; |
skb | 169 | net/unix/af_unix.c | while((skb=skb_dequeue(&sk->receive_queue))!=NULL) |
skb | 173 | net/unix/af_unix.c | unix_socket *osk=skb->sk; |
skb | 175 | net/unix/af_unix.c | kfree_skb(skb, FREE_WRITE); /* Now surplus - free the skb first before the socket */ |
skb | 182 | net/unix/af_unix.c | kfree_skb(skb,FREE_WRITE); |
skb | 455 | net/unix/af_unix.c | struct sk_buff *skb; |
skb | 508 | net/unix/af_unix.c | skb=sock_alloc_send_skb(sk, 0, 0, 0, &err); /* Marker object */ |
skb | 509 | net/unix/af_unix.c | if(skb==NULL) |
skb | 511 | net/unix/af_unix.c | skb->sk=sk; /* So they know it is us */ |
skb | 512 | net/unix/af_unix.c | skb->free=1; |
skb | 518 | net/unix/af_unix.c | kfree_skb(skb, FREE_WRITE); |
skb | 523 | net/unix/af_unix.c | kfree_skb(skb, FREE_WRITE); |
skb | 529 | net/unix/af_unix.c | skb_queue_tail(&other->receive_queue,skb); |
skb | 580 | net/unix/af_unix.c | unix_socket *ska,*skb; |
skb | 583 | net/unix/af_unix.c | skb=b->data; |
skb | 587 | net/unix/af_unix.c | skb->protinfo.af_unix.locks++; |
skb | 588 | net/unix/af_unix.c | ska->protinfo.af_unix.other=skb; |
skb | 589 | net/unix/af_unix.c | skb->protinfo.af_unix.other=ska; |
skb | 591 | net/unix/af_unix.c | skb->state=TCP_ESTABLISHED; |
skb | 599 | net/unix/af_unix.c | struct sk_buff *skb; |
skb | 622 | net/unix/af_unix.c | skb=skb_dequeue(&sk->receive_queue); |
skb | 623 | net/unix/af_unix.c | if(skb==NULL) |
skb | 639 | net/unix/af_unix.c | while(skb==NULL); |
skb | 640 | net/unix/af_unix.c | tsk=skb->sk; |
skb | 641 | net/unix/af_unix.c | kfree_skb(skb, FREE_WRITE); /* The buffer is just used as a tag */ |
skb | 685 | net/unix/af_unix.c | struct sk_buff *skb; |
skb | 745 | net/unix/af_unix.c | skb=sock_alloc_send_skb(sk,size,limit,nonblock, &err); |
skb | 747 | net/unix/af_unix.c | if(skb==NULL) |
skb | 756 | net/unix/af_unix.c | size=skb_tailroom(skb); /* If we dropped back on a limit then our skb is smaller */ |
skb | 758 | net/unix/af_unix.c | skb->sk=sk; |
skb | 759 | net/unix/af_unix.c | skb->free=1; |
skb | 761 | net/unix/af_unix.c | memcpy_fromiovec(skb_put(skb,size),msg->msg_iov, size); |
skb | 785 | net/unix/af_unix.c | kfree_skb(skb, FREE_WRITE); |
skb | 793 | net/unix/af_unix.c | skb_queue_tail(&other->receive_queue, skb); |
skb | 820 | net/unix/af_unix.c | struct sk_buff *skb; |
skb | 851 | net/unix/af_unix.c | skb=skb_dequeue(&sk->receive_queue); |
skb | 852 | net/unix/af_unix.c | if(skb==NULL) |
skb | 870 | net/unix/af_unix.c | if(skb->sk->protinfo.af_unix.name) |
skb | 872 | net/unix/af_unix.c | memcpy(sunaddr->sun_path, skb->sk->protinfo.af_unix.name, 108); |
skb | 880 | net/unix/af_unix.c | num=min(skb->len,size-copied); |
skb | 881 | net/unix/af_unix.c | memcpy_tofs(sp, skb->data, num); |
skb | 886 | net/unix/af_unix.c | skb_pull(skb, num); |
skb | 888 | net/unix/af_unix.c | if (skb->len) { |
skb | 889 | net/unix/af_unix.c | skb_queue_head(&sk->receive_queue, skb); |
skb | 892 | net/unix/af_unix.c | kfree_skb(skb, FREE_WRITE); |
skb | 956 | net/unix/af_unix.c | struct sk_buff *skb; |
skb | 960 | net/unix/af_unix.c | if((skb=skb_peek(&sk->receive_queue))!=NULL) |
skb | 961 | net/unix/af_unix.c | amount=skb->len; |