tag | line | file | source code |
sk | 280 | net/tcp/arp.c | skb->sk = NULL; |
sk | 439 | net/tcp/arp.c | skb->sk = NULL; |
sk | 298 | net/tcp/dev.c | skb->sk = NULL; |
sk | 156 | net/tcp/icmp.c | skb->sk = NULL; |
sk | 191 | net/tcp/icmp.c | skb1->sk = NULL; |
sk | 207 | net/tcp/icmp.c | skb1->sk = NULL; |
sk | 248 | net/tcp/icmp.c | skb1->sk = NULL; |
sk | 271 | net/tcp/icmp.c | skb1->sk = NULL; |
sk | 284 | net/tcp/icmp.c | skb1->sk = NULL; |
sk | 288 | net/tcp/icmp.c | skb->sk = NULL; |
sk | 300 | net/tcp/icmp.c | skb1->sk = NULL; |
sk | 323 | net/tcp/icmp.c | skb1->sk = NULL; |
sk | 329 | net/tcp/icmp.c | skb1->sk = NULL; |
sk | 335 | net/tcp/icmp.c | skb1->sk = NULL; |
sk | 828 | net/tcp/ip.c | skb->sk = NULL; |
sk | 841 | net/tcp/ip.c | skb->sk = NULL; |
sk | 853 | net/tcp/ip.c | skb->sk = NULL; |
sk | 899 | net/tcp/ip.c | skb->sk = NULL; |
sk | 914 | net/tcp/ip.c | ip_queue_xmit (volatile struct sock *sk, struct device *dev, |
sk | 919 | net/tcp/ip.c | if (sk == NULL) free = 1; |
sk | 946 | net/tcp/ip.c | sk->packets_out++; |
sk | 948 | net/tcp/ip.c | if (sk->send_tail == NULL) |
sk | 950 | net/tcp/ip.c | sk->send_tail = skb; |
sk | 951 | net/tcp/ip.c | sk->send_head = skb; |
sk | 955 | net/tcp/ip.c | sk->send_tail->link3 = skb; |
sk | 956 | net/tcp/ip.c | sk->send_tail = skb; |
sk | 959 | net/tcp/ip.c | sk->time_wait.len = sk->rtt*2; |
sk | 960 | net/tcp/ip.c | sk->timeout=TIME_WRITE; |
sk | 961 | net/tcp/ip.c | reset_timer ((struct timer *)&sk->time_wait); |
sk | 965 | net/tcp/ip.c | skb->sk = sk; |
sk | 969 | net/tcp/ip.c | if (sk != NULL) |
sk | 971 | net/tcp/ip.c | dev->queue_xmit(skb, dev, sk->priority); |
sk | 986 | net/tcp/ip.c | ip_retransmit (volatile struct sock *sk, int all) |
sk | 992 | net/tcp/ip.c | prot = sk->prot; |
sk | 993 | net/tcp/ip.c | skb = sk->send_head; |
sk | 1012 | net/tcp/ip.c | if (sk) |
sk | 1013 | net/tcp/ip.c | dev->queue_xmit(skb, dev, sk->priority); |
sk | 1017 | net/tcp/ip.c | sk->retransmits++; |
sk | 1018 | net/tcp/ip.c | sk->prot->retransmits ++; |
sk | 1023 | net/tcp/ip.c | if (sk->retransmits > sk->cong_window) break; |
sk | 1032 | net/tcp/ip.c | sk->rtt *= 2; |
sk | 1033 | net/tcp/ip.c | sk->time_wait.len = sk->rtt; |
sk | 1034 | net/tcp/ip.c | sk->timeout = TIME_WRITE; |
sk | 1035 | net/tcp/ip.c | reset_timer ((struct timer *)&sk->time_wait); |
sk | 178 | net/tcp/ip.h | void ip_queue_xmit (volatile struct sock *sk, struct device *dev, |
sk | 180 | net/tcp/ip.h | void ip_retransmit(volatile struct sock *sk, int all); |
sk | 184 | net/tcp/ip.h | int ip_handoff (volatile struct sock *sk); |
sk | 78 | net/tcp/packet.c | volatile struct sock *sk; |
sk | 80 | net/tcp/packet.c | sk = pt->data; |
sk | 86 | net/tcp/packet.c | if (sk->inuse) |
sk | 92 | net/tcp/packet.c | skb->sk = NULL; |
sk | 97 | net/tcp/packet.c | sk->inuse = 1; |
sk | 100 | net/tcp/packet.c | skb->sk = sk; |
sk | 103 | net/tcp/packet.c | if (sk->rmem_alloc + skb->mem_len >= SK_RMEM_MAX) |
sk | 105 | net/tcp/packet.c | skb->sk = NULL; |
sk | 110 | net/tcp/packet.c | sk->rmem_alloc += skb->mem_len; |
sk | 113 | net/tcp/packet.c | if (sk->rqueue == NULL) |
sk | 115 | net/tcp/packet.c | sk->rqueue = skb; |
sk | 121 | net/tcp/packet.c | skb->next = sk->rqueue; |
sk | 122 | net/tcp/packet.c | skb->prev = sk->rqueue->prev; |
sk | 126 | net/tcp/packet.c | wake_up (sk->sleep); |
sk | 127 | net/tcp/packet.c | release_sock (sk); |
sk | 133 | net/tcp/packet.c | packet_sendto (volatile struct sock *sk, unsigned char *from, int len, |
sk | 156 | net/tcp/packet.c | skb = sk->prot->wmalloc (sk, len+sizeof (*skb), 0, GFP_KERNEL); |
sk | 167 | net/tcp/packet.c | skb->sk = sk; |
sk | 173 | net/tcp/packet.c | sk->prot->wfree (sk, skb->mem_addr, skb->mem_len); |
sk | 181 | net/tcp/packet.c | dev->queue_xmit (skb, dev, sk->priority); |
sk | 188 | net/tcp/packet.c | packet_write (volatile struct sock *sk, unsigned char *buff, |
sk | 191 | net/tcp/packet.c | return (packet_sendto (sk, buff, len, noblock, flags, NULL, 0)); |
sk | 195 | net/tcp/packet.c | packet_close (volatile struct sock *sk, int timeout) |
sk | 197 | net/tcp/packet.c | sk->inuse = 1; |
sk | 198 | net/tcp/packet.c | sk->state = TCP_CLOSE; |
sk | 199 | net/tcp/packet.c | dev_remove_pack ((struct packet_type *)sk->pair); |
sk | 200 | net/tcp/packet.c | kfree_s ((void *)sk->pair, sizeof (struct packet_type)); |
sk | 201 | net/tcp/packet.c | sk->pair = NULL; |
sk | 202 | net/tcp/packet.c | release_sock (sk); |
sk | 206 | net/tcp/packet.c | packet_init (volatile struct sock *sk) |
sk | 213 | net/tcp/packet.c | p->type = sk->num; |
sk | 214 | net/tcp/packet.c | p->data = (void *)sk; |
sk | 218 | net/tcp/packet.c | sk->pair = (volatile struct sock *)p; |
sk | 225 | net/tcp/packet.c | packet_recvfrom (volatile struct sock *sk, unsigned char *to, int len, |
sk | 239 | net/tcp/packet.c | if (sk->shutdown & RCV_SHUTDOWN) return (0); |
sk | 247 | net/tcp/packet.c | sk->inuse = 1; |
sk | 248 | net/tcp/packet.c | while (sk->rqueue == NULL) |
sk | 252 | net/tcp/packet.c | release_sock (sk); |
sk | 255 | net/tcp/packet.c | release_sock (sk); |
sk | 257 | net/tcp/packet.c | if (sk->rqueue == NULL) |
sk | 259 | net/tcp/packet.c | interruptible_sleep_on (sk->sleep); |
sk | 266 | net/tcp/packet.c | sk->inuse = 1; |
sk | 269 | net/tcp/packet.c | skb = sk->rqueue; |
sk | 275 | net/tcp/packet.c | sk->rqueue = NULL; |
sk | 279 | net/tcp/packet.c | sk->rqueue = (struct sk_buff *)sk->rqueue ->next; |
sk | 302 | net/tcp/packet.c | release_sock (sk); |
sk | 308 | net/tcp/packet.c | packet_read (volatile struct sock *sk, unsigned char *buff, |
sk | 311 | net/tcp/packet.c | return (packet_recvfrom (sk, buff, len, noblock, flags, NULL, NULL)); |
sk | 315 | net/tcp/packet.c | int udp_connect (volatile struct sock *sk, struct sockaddr_in *usin, |
sk | 318 | net/tcp/packet.c | int udp_select (volatile struct sock *sk, int sel_type, select_table *wait); |
sk | 105 | net/tcp/raw.c | volatile struct sock *sk; |
sk | 111 | net/tcp/raw.c | sk = protocol->data; |
sk | 113 | net/tcp/raw.c | if (sk == NULL) return; |
sk | 118 | net/tcp/raw.c | if (sk->cong_window > 1) |
sk | 119 | net/tcp/raw.c | sk->cong_window = sk->cong_window/2; |
sk | 123 | net/tcp/raw.c | sk->err = icmp_err_convert[err & 0xff].errno; |
sk | 142 | net/tcp/raw.c | volatile struct sock *sk; |
sk | 154 | net/tcp/raw.c | sk = protocol->data; |
sk | 155 | net/tcp/raw.c | if (sk == NULL) |
sk | 162 | net/tcp/raw.c | skb->sk = sk; |
sk | 172 | net/tcp/raw.c | if (sk->inuse) |
sk | 175 | net/tcp/raw.c | if (sk->back_log == NULL) |
sk | 177 | net/tcp/raw.c | sk->back_log = skb; |
sk | 183 | net/tcp/raw.c | skb->next = sk->back_log; |
sk | 184 | net/tcp/raw.c | skb->prev = sk->back_log->prev; |
sk | 191 | net/tcp/raw.c | sk->inuse = 1; |
sk | 196 | net/tcp/raw.c | if (sk->rmem_alloc + skb->mem_len >= SK_RMEM_MAX) |
sk | 198 | net/tcp/raw.c | skb->sk = NULL; |
sk | 203 | net/tcp/raw.c | sk->rmem_alloc += skb->mem_len; |
sk | 206 | net/tcp/raw.c | if (sk->rqueue == NULL) |
sk | 208 | net/tcp/raw.c | sk->rqueue = skb; |
sk | 214 | net/tcp/raw.c | skb->next = sk->rqueue; |
sk | 215 | net/tcp/raw.c | skb->prev = sk->rqueue->prev; |
sk | 219 | net/tcp/raw.c | wake_up (sk->sleep); |
sk | 220 | net/tcp/raw.c | release_sock (sk); |
sk | 226 | net/tcp/raw.c | raw_sendto (volatile struct sock *sk, unsigned char *from, int len, |
sk | 236 | net/tcp/raw.c | " usin=%X, addr_len = %d)\n", sk, from, len, noblock, |
sk | 256 | net/tcp/raw.c | if (sk->state != TCP_ESTABLISHED) |
sk | 259 | net/tcp/raw.c | sin.sin_port = sk->protocol; |
sk | 260 | net/tcp/raw.c | sin.sin_addr.s_addr = sk->daddr; |
sk | 262 | net/tcp/raw.c | if (sin.sin_port == 0) sin.sin_port = sk->protocol; |
sk | 264 | net/tcp/raw.c | sk->inuse = 1; |
sk | 268 | net/tcp/raw.c | skb = sk->prot->wmalloc (sk, len+sizeof (*skb) + sk->prot->max_header, |
sk | 277 | net/tcp/raw.c | tmp = sk->wmem_alloc; |
sk | 278 | net/tcp/raw.c | release_sock (sk); |
sk | 280 | net/tcp/raw.c | if (tmp <= sk->wmem_alloc) |
sk | 282 | net/tcp/raw.c | interruptible_sleep_on (sk->sleep); |
sk | 289 | net/tcp/raw.c | sk->inuse = 1; |
sk | 295 | net/tcp/raw.c | skb->mem_len = len + sizeof (*skb) +sk->prot->max_header; |
sk | 296 | net/tcp/raw.c | skb->sk = sk; |
sk | 301 | net/tcp/raw.c | tmp = sk->prot->build_header (skb, sk->saddr, |
sk | 303 | net/tcp/raw.c | sk->protocol, sk->opt, skb->mem_len); |
sk | 307 | net/tcp/raw.c | sk->prot->wfree (sk, skb->mem_addr, skb->mem_len); |
sk | 308 | net/tcp/raw.c | release_sock (sk); |
sk | 315 | net/tcp/raw.c | sk->prot->queue_xmit (sk, dev, skb, 1); |
sk | 316 | net/tcp/raw.c | release_sock (sk); |
sk | 321 | net/tcp/raw.c | raw_write (volatile struct sock *sk, unsigned char *buff, int len, int noblock, |
sk | 324 | net/tcp/raw.c | return (raw_sendto (sk, buff, len, noblock, flags, NULL, 0)); |
sk | 328 | net/tcp/raw.c | raw_close (volatile struct sock *sk, int timeout) |
sk | 330 | net/tcp/raw.c | sk->inuse = 1; |
sk | 331 | net/tcp/raw.c | sk->state = TCP_CLOSE; |
sk | 333 | net/tcp/raw.c | ((struct ip_protocol *)sk->pair)->protocol)); |
sk | 334 | net/tcp/raw.c | if (delete_ip_protocol ((struct ip_protocol *)sk->pair) < 0) |
sk | 336 | net/tcp/raw.c | kfree_s ((void *)sk->pair, sizeof (struct ip_protocol)); |
sk | 337 | net/tcp/raw.c | sk->pair = NULL; |
sk | 338 | net/tcp/raw.c | release_sock (sk); |
sk | 342 | net/tcp/raw.c | raw_init (volatile struct sock *sk) |
sk | 349 | net/tcp/raw.c | p->protocol = sk->protocol; |
sk | 350 | net/tcp/raw.c | p->data = (void *)sk; |
sk | 355 | net/tcp/raw.c | sk->pair = (volatile struct sock *)p; |
sk | 357 | net/tcp/raw.c | PRINTK (("raw init added protocol %d\n", sk->protocol)); |
sk | 364 | net/tcp/raw.c | raw_recvfrom (volatile struct sock *sk, unsigned char *to, int len, |
sk | 374 | net/tcp/raw.c | " sin=%X, addr_len=%X)\n", sk, to, len, noblock, |
sk | 380 | net/tcp/raw.c | if (sk->shutdown & RCV_SHUTDOWN) return (0); |
sk | 386 | net/tcp/raw.c | sk->inuse = 1; |
sk | 387 | net/tcp/raw.c | while (sk->rqueue == NULL) |
sk | 391 | net/tcp/raw.c | release_sock (sk); |
sk | 395 | net/tcp/raw.c | release_sock (sk); |
sk | 397 | net/tcp/raw.c | if (sk->rqueue == NULL) |
sk | 399 | net/tcp/raw.c | interruptible_sleep_on (sk->sleep); |
sk | 406 | net/tcp/raw.c | sk->inuse = 1; |
sk | 409 | net/tcp/raw.c | skb = sk->rqueue; |
sk | 415 | net/tcp/raw.c | sk->rqueue = NULL; |
sk | 419 | net/tcp/raw.c | sk->rqueue = (struct sk_buff *)sk->rqueue ->next; |
sk | 441 | net/tcp/raw.c | release_sock (sk); |
sk | 447 | net/tcp/raw.c | raw_read (volatile struct sock *sk, unsigned char *buff, int len, int noblock, |
sk | 450 | net/tcp/raw.c | return (raw_recvfrom (sk, buff, len, noblock, flags, NULL, NULL)); |
sk | 454 | net/tcp/raw.c | int udp_connect (volatile struct sock *sk, struct sockaddr_in *usin, |
sk | 457 | net/tcp/raw.c | int udp_select (volatile struct sock *sk, int sel_type, select_table *wait); |
sk | 195 | net/tcp/sock.c | print_sk (volatile struct sock *sk) |
sk | 197 | net/tcp/sock.c | if (!sk) { |
sk | 201 | net/tcp/sock.c | printk (" wmem_alloc = %d\n", sk->wmem_alloc); |
sk | 202 | net/tcp/sock.c | printk (" rmem_alloc = %d\n", sk->rmem_alloc); |
sk | 203 | net/tcp/sock.c | printk (" send_head = %X\n", sk->send_head); |
sk | 204 | net/tcp/sock.c | printk (" state = %d\n",sk->state); |
sk | 205 | net/tcp/sock.c | printk (" wback = %X, rqueue = %X\n", sk->wback, sk->rqueue); |
sk | 206 | net/tcp/sock.c | printk (" wfront = %X\n", sk->wfront); |
sk | 207 | net/tcp/sock.c | printk (" daddr = %X, saddr = %X\n", sk->daddr,sk->saddr); |
sk | 208 | net/tcp/sock.c | printk (" num = %d", sk->num); |
sk | 209 | net/tcp/sock.c | printk (" next = %X\n", sk->next); |
sk | 211 | net/tcp/sock.c | sk->send_seq, sk->acked_seq, sk->copied_seq); |
sk | 213 | net/tcp/sock.c | sk->rcv_ack_seq, sk->window_seq, sk->fin_seq); |
sk | 214 | net/tcp/sock.c | printk (" prot = %X\n", sk->prot); |
sk | 215 | net/tcp/sock.c | printk (" pair = %X, back_log = %X\n", sk->pair,sk->back_log); |
sk | 216 | net/tcp/sock.c | printk (" inuse = %d , blog = %d\n", sk->inuse, sk->blog); |
sk | 217 | net/tcp/sock.c | printk (" dead = %d delay_acks=%d\n", sk->dead, sk->delay_acks); |
sk | 218 | net/tcp/sock.c | printk (" retransmits = %d, timeout = %d\n", sk->retransmits, sk->timeout); |
sk | 219 | net/tcp/sock.c | printk (" cong_window = %d, packets_out = %d\n", sk->cong_window, |
sk | 220 | net/tcp/sock.c | sk->packets_out); |
sk | 221 | net/tcp/sock.c | printk (" urg = %d shutdown=%d\n", sk->urg, sk->shutdown); |
sk | 232 | net/tcp/sock.c | printk (" sk = %X link3 = %X\n", skb->sk, skb->link3); |
sk | 270 | net/tcp/sock.c | if (skb->sk) |
sk | 274 | net/tcp/sock.c | skb->sk->prot->rfree (skb->sk, skb->mem_addr, skb->mem_len); |
sk | 278 | net/tcp/sock.c | skb->sk->prot->wfree (skb->sk, skb->mem_addr, skb->mem_len); |
sk | 302 | net/tcp/sock.c | volatile struct sock *sk; |
sk | 303 | net/tcp/sock.c | for (sk = prot->sock_array[num & (SOCK_ARRAY_SIZE -1 )]; |
sk | 304 | net/tcp/sock.c | sk != NULL; sk=sk->next) |
sk | 306 | net/tcp/sock.c | if (sk->num == num) return (1); |
sk | 321 | net/tcp/sock.c | volatile struct sock *sk; |
sk | 334 | net/tcp/sock.c | sk = prot->sock_array[(i+base+1) & (SOCK_ARRAY_SIZE -1)]; |
sk | 335 | net/tcp/sock.c | while (sk != NULL) |
sk | 337 | net/tcp/sock.c | sk = sk->next; |
sk | 364 | net/tcp/sock.c | put_sock(unsigned short num, volatile struct sock *sk) |
sk | 370 | net/tcp/sock.c | PRINTK (("put_sock (num = %d, sk = %X\n", num, sk)); |
sk | 371 | net/tcp/sock.c | sk->num = num; |
sk | 372 | net/tcp/sock.c | sk->next = NULL; |
sk | 377 | net/tcp/sock.c | if (sk->prot->sock_array[num] == NULL) |
sk | 379 | net/tcp/sock.c | sk->prot->sock_array[num] = sk; |
sk | 386 | net/tcp/sock.c | if (mask & sk->saddr) |
sk | 396 | net/tcp/sock.c | sk1 = sk->prot->sock_array[num]; |
sk | 403 | net/tcp/sock.c | sk->next = sk->prot->sock_array[num]; |
sk | 404 | net/tcp/sock.c | sk->prot->sock_array[num] = sk; |
sk | 408 | net/tcp/sock.c | sk->next = sk2; |
sk | 409 | net/tcp/sock.c | sk1->next= sk; |
sk | 416 | net/tcp/sock.c | sk->next = NULL; |
sk | 417 | net/tcp/sock.c | sk1->next = sk; |
sk | 466 | net/tcp/sock.c | destroy_sock(volatile struct sock *sk) |
sk | 470 | net/tcp/sock.c | PRINTK (("destroying socket %X\n",sk)); |
sk | 472 | net/tcp/sock.c | sk->inuse = 1; |
sk | 475 | net/tcp/sock.c | if (!sk->dead) wake_up (sk->sleep); |
sk | 477 | net/tcp/sock.c | remove_sock (sk); |
sk | 480 | net/tcp/sock.c | delete_timer((struct timer *)&sk->time_wait); |
sk | 482 | net/tcp/sock.c | if (sk->send_tmp != NULL) kfree_skb (sk->send_tmp, FREE_WRITE); |
sk | 485 | net/tcp/sock.c | for (skb = sk->wfront; skb != NULL; ) |
sk | 499 | net/tcp/sock.c | sk->wfront = NULL; |
sk | 500 | net/tcp/sock.c | sk->wback = NULL; |
sk | 502 | net/tcp/sock.c | if (sk->rqueue != NULL) |
sk | 504 | net/tcp/sock.c | skb = sk->rqueue; |
sk | 512 | net/tcp/sock.c | if (skb->sk != NULL && skb->sk != sk) |
sk | 514 | net/tcp/sock.c | skb->sk->dead = 1; |
sk | 515 | net/tcp/sock.c | skb->sk->prot->close (skb->sk, 0); |
sk | 519 | net/tcp/sock.c | } while (skb != sk->rqueue); |
sk | 522 | net/tcp/sock.c | sk->rqueue = NULL; |
sk | 525 | net/tcp/sock.c | for (skb = sk->send_head; skb != NULL; ) |
sk | 618 | net/tcp/sock.c | sk->send_head = NULL; |
sk | 622 | net/tcp/sock.c | if (sk->back_log != NULL) |
sk | 627 | net/tcp/sock.c | skb = (struct sk_buff *)sk->back_log; |
sk | 633 | net/tcp/sock.c | } while (skb != sk->back_log); |
sk | 637 | net/tcp/sock.c | sk->back_log = NULL; |
sk | 640 | net/tcp/sock.c | if (sk->pair) |
sk | 642 | net/tcp/sock.c | sk->pair->dead = 1; |
sk | 643 | net/tcp/sock.c | sk->pair->prot->close (sk->pair, 0); |
sk | 644 | net/tcp/sock.c | sk->pair = NULL; |
sk | 649 | net/tcp/sock.c | if (sk->rmem_alloc == 0 && sk->wmem_alloc == 0) |
sk | 651 | net/tcp/sock.c | kfree_s ((void *)sk,sizeof (*sk)); |
sk | 657 | net/tcp/sock.c | PRINTK (("possible memory leak in socket = %X\n", sk)); |
sk | 658 | net/tcp/sock.c | sk->destroy = 1; |
sk | 659 | net/tcp/sock.c | sk->ack_backlog = 0; |
sk | 660 | net/tcp/sock.c | sk->inuse = 0; |
sk | 661 | net/tcp/sock.c | sk->time_wait.len = SOCK_DESTROY_TIME; |
sk | 662 | net/tcp/sock.c | sk->timeout = TIME_DESTROY; |
sk | 663 | net/tcp/sock.c | reset_timer ((struct timer *)&sk->time_wait); |
sk | 672 | net/tcp/sock.c | volatile struct sock *sk; |
sk | 674 | net/tcp/sock.c | sk=sock->data; |
sk | 675 | net/tcp/sock.c | if (sk == NULL) |
sk | 690 | net/tcp/sock.c | sk->proc = arg; |
sk | 695 | net/tcp/sock.c | sk->proc = arg; |
sk | 699 | net/tcp/sock.c | return (sk->proc); |
sk | 710 | net/tcp/sock.c | volatile struct sock *sk; |
sk | 714 | net/tcp/sock.c | sk = sock->data; |
sk | 715 | net/tcp/sock.c | if (sk == NULL) |
sk | 740 | net/tcp/sock.c | sk->reuse = 1; |
sk | 742 | net/tcp/sock.c | sk->reuse = 0; |
sk | 747 | net/tcp/sock.c | sk->keepopen = 1; |
sk | 749 | net/tcp/sock.c | sk->keepopen = 0; |
sk | 754 | net/tcp/sock.c | sk->urginline = 1; |
sk | 756 | net/tcp/sock.c | sk->urginline = 0; |
sk | 761 | net/tcp/sock.c | sk->no_check = 1; |
sk | 763 | net/tcp/sock.c | sk->no_check = 0; |
sk | 769 | net/tcp/sock.c | sk->priority = val; |
sk | 784 | net/tcp/sock.c | volatile struct sock *sk; |
sk | 788 | net/tcp/sock.c | sk = sock->data; |
sk | 789 | net/tcp/sock.c | if (sk == NULL) |
sk | 809 | net/tcp/sock.c | val = sk->reuse; |
sk | 813 | net/tcp/sock.c | val = sk->keepopen; |
sk | 817 | net/tcp/sock.c | if (sk->prot == &tcp_prot) |
sk | 824 | net/tcp/sock.c | val = sk->err; |
sk | 825 | net/tcp/sock.c | sk->err = 0; |
sk | 829 | net/tcp/sock.c | val = sk->urginline; |
sk | 833 | net/tcp/sock.c | val = sk->no_check; |
sk | 837 | net/tcp/sock.c | val = sk->priority; |
sk | 851 | net/tcp/sock.c | volatile struct sock *sk; |
sk | 852 | net/tcp/sock.c | sk = sock->data; |
sk | 853 | net/tcp/sock.c | if (sk == NULL) |
sk | 860 | net/tcp/sock.c | if (sk->num == 0) |
sk | 862 | net/tcp/sock.c | sk->num = get_new_socknum (sk->prot, 0); |
sk | 863 | net/tcp/sock.c | if (sk->num == 0) return (-EAGAIN); |
sk | 864 | net/tcp/sock.c | put_sock (sk->num, sk); |
sk | 865 | net/tcp/sock.c | sk->dummy_th.source = net16(sk->num); |
sk | 869 | net/tcp/sock.c | sk->max_ack_backlog = backlog; |
sk | 870 | net/tcp/sock.c | sk->ack_backlog = 0; |
sk | 871 | net/tcp/sock.c | sk->state = TCP_LISTEN; |
sk | 927 | net/tcp/sock.c | volatile struct sock *sk; |
sk | 931 | net/tcp/sock.c | sk = kmalloc (sizeof (*sk), GFP_KERNEL); |
sk | 932 | net/tcp/sock.c | if (sk == NULL) |
sk | 934 | net/tcp/sock.c | sk->num = 0; |
sk | 943 | net/tcp/sock.c | kfree_s ((void *)sk, sizeof (*sk)); |
sk | 946 | net/tcp/sock.c | sk->no_check = TCP_NO_CHECK; |
sk | 953 | net/tcp/sock.c | kfree_s ((void *)sk, sizeof (*sk)); |
sk | 956 | net/tcp/sock.c | sk->no_check = UDP_NO_CHECK; |
sk | 963 | net/tcp/sock.c | kfree_s ((void *)sk, sizeof (*sk)); |
sk | 969 | net/tcp/sock.c | kfree_s ((void *)sk, sizeof (*sk)); |
sk | 973 | net/tcp/sock.c | sk->reuse = 1; |
sk | 974 | net/tcp/sock.c | sk->no_check = 0; /* doesn't matter no checksum is preformed |
sk | 976 | net/tcp/sock.c | sk->num = protocol; |
sk | 982 | net/tcp/sock.c | kfree_s ((void *)sk, sizeof (*sk)); |
sk | 988 | net/tcp/sock.c | kfree_s ((void *)sk, sizeof (*sk)); |
sk | 992 | net/tcp/sock.c | sk->reuse = 1; |
sk | 993 | net/tcp/sock.c | sk->no_check = 0; /* doesn't matter no checksum is preformed |
sk | 995 | net/tcp/sock.c | sk->num = protocol; |
sk | 1000 | net/tcp/sock.c | kfree_s ((void *)sk, sizeof (*sk)); |
sk | 1004 | net/tcp/sock.c | sk->protocol = protocol; |
sk | 1005 | net/tcp/sock.c | sk->wmem_alloc = 0; |
sk | 1006 | net/tcp/sock.c | sk->rmem_alloc = 0; |
sk | 1007 | net/tcp/sock.c | sk->pair = NULL; |
sk | 1008 | net/tcp/sock.c | sk->opt = NULL; |
sk | 1009 | net/tcp/sock.c | sk->send_seq = 0; |
sk | 1010 | net/tcp/sock.c | sk->acked_seq = 0; |
sk | 1011 | net/tcp/sock.c | sk->copied_seq = 0; |
sk | 1012 | net/tcp/sock.c | sk->fin_seq = 0; |
sk | 1013 | net/tcp/sock.c | sk->proc = 0; |
sk | 1014 | net/tcp/sock.c | sk->rtt = TCP_WRITE_TIME; |
sk | 1015 | net/tcp/sock.c | sk->packets_out = 0; |
sk | 1016 | net/tcp/sock.c | sk->cong_window = 1; /* start with only sending one packet at a time. */ |
sk | 1017 | net/tcp/sock.c | sk->exp_growth = 1; /* if set cong_window grow exponentially every time |
sk | 1019 | net/tcp/sock.c | sk->urginline = 0; |
sk | 1020 | net/tcp/sock.c | sk->intr = 0; |
sk | 1021 | net/tcp/sock.c | sk->linger = 0; |
sk | 1022 | net/tcp/sock.c | sk->destroy = 0; |
sk | 1023 | net/tcp/sock.c | sk->reuse = 0; |
sk | 1024 | net/tcp/sock.c | sk->priority = 1; |
sk | 1025 | net/tcp/sock.c | sk->shutdown = 0; |
sk | 1026 | net/tcp/sock.c | sk->urg = 0; |
sk | 1027 | net/tcp/sock.c | sk->keepopen = 0; |
sk | 1028 | net/tcp/sock.c | sk->done = 0; |
sk | 1029 | net/tcp/sock.c | sk->ack_backlog = 0; |
sk | 1030 | net/tcp/sock.c | sk->window = 0; |
sk | 1031 | net/tcp/sock.c | sk->bytes_rcv = 0; |
sk | 1032 | net/tcp/sock.c | sk->state = TCP_CLOSE; |
sk | 1033 | net/tcp/sock.c | sk->dead = 0; |
sk | 1034 | net/tcp/sock.c | sk->ack_timed = 0; |
sk | 1035 | net/tcp/sock.c | sk->send_tmp = NULL; |
sk | 1036 | net/tcp/sock.c | sk->mss = 0; /* we will try not to send any packets smaller |
sk | 1042 | net/tcp/sock.c | sk->max_unacked = 2048; /* needs to be at most 2 full packets. */ |
sk | 1047 | net/tcp/sock.c | sk->max_ack_backlog = 0; |
sk | 1048 | net/tcp/sock.c | sk->inuse = 0; |
sk | 1049 | net/tcp/sock.c | sk->delay_acks = 0; |
sk | 1050 | net/tcp/sock.c | sk->wback = NULL; |
sk | 1051 | net/tcp/sock.c | sk->wfront = NULL; |
sk | 1052 | net/tcp/sock.c | sk->rqueue = NULL; |
sk | 1053 | net/tcp/sock.c | sk->mtu = 576; |
sk | 1054 | net/tcp/sock.c | sk->prot = prot; |
sk | 1055 | net/tcp/sock.c | sk->sleep = sock->wait; |
sk | 1056 | net/tcp/sock.c | sk->daddr = 0; |
sk | 1057 | net/tcp/sock.c | sk->saddr = MY_IP_ADDR; |
sk | 1058 | net/tcp/sock.c | sk->err = 0; |
sk | 1059 | net/tcp/sock.c | sk->next = NULL; |
sk | 1060 | net/tcp/sock.c | sk->pair = NULL; |
sk | 1061 | net/tcp/sock.c | sk->send_tail = NULL; |
sk | 1062 | net/tcp/sock.c | sk->send_head = NULL; |
sk | 1063 | net/tcp/sock.c | sk->time_wait.len = TCP_CONNECT_TIME; |
sk | 1064 | net/tcp/sock.c | sk->time_wait.when = 0; |
sk | 1065 | net/tcp/sock.c | sk->time_wait.sk = sk; |
sk | 1066 | net/tcp/sock.c | sk->time_wait.next = NULL; |
sk | 1067 | net/tcp/sock.c | sk->timeout = 0; |
sk | 1068 | net/tcp/sock.c | sk->back_log = NULL; |
sk | 1069 | net/tcp/sock.c | sk->blog = 0; |
sk | 1070 | net/tcp/sock.c | sock->data =(void *) sk; |
sk | 1071 | net/tcp/sock.c | sk->dummy_th.doff = sizeof (sk->dummy_th)/4; |
sk | 1072 | net/tcp/sock.c | sk->dummy_th.res1=0; |
sk | 1073 | net/tcp/sock.c | sk->dummy_th.res2=0; |
sk | 1074 | net/tcp/sock.c | sk->dummy_th.urg_ptr = 0; |
sk | 1075 | net/tcp/sock.c | sk->dummy_th.fin = 0; |
sk | 1076 | net/tcp/sock.c | sk->dummy_th.syn = 0; |
sk | 1077 | net/tcp/sock.c | sk->dummy_th.rst = 0; |
sk | 1078 | net/tcp/sock.c | sk->dummy_th.psh = 0; |
sk | 1079 | net/tcp/sock.c | sk->dummy_th.ack = 0; |
sk | 1080 | net/tcp/sock.c | sk->dummy_th.urg = 0; |
sk | 1081 | net/tcp/sock.c | sk->dummy_th.dest = 0; |
sk | 1083 | net/tcp/sock.c | if (sk->num) |
sk | 1089 | net/tcp/sock.c | put_sock (sk->num, sk); |
sk | 1090 | net/tcp/sock.c | sk->dummy_th.source = net16(sk->num); |
sk | 1093 | net/tcp/sock.c | if (sk->prot->init) |
sk | 1095 | net/tcp/sock.c | err = sk->prot->init(sk); |
sk | 1098 | net/tcp/sock.c | destroy_sock (sk); |
sk | 1116 | net/tcp/sock.c | volatile struct sock *sk; |
sk | 1117 | net/tcp/sock.c | sk = sock->data; |
sk | 1118 | net/tcp/sock.c | if (sk == NULL) return (0); |
sk | 1120 | net/tcp/sock.c | wake_up (sk->sleep); |
sk | 1125 | net/tcp/sock.c | if (sk->linger == 0) |
sk | 1127 | net/tcp/sock.c | sk->prot->close(sk,0); |
sk | 1128 | net/tcp/sock.c | sk->dead = 1; |
sk | 1133 | net/tcp/sock.c | sk->prot->close(sk, 0); |
sk | 1135 | net/tcp/sock.c | while (sk->state != TCP_CLOSE) |
sk | 1137 | net/tcp/sock.c | interruptible_sleep_on (sk->sleep); |
sk | 1145 | net/tcp/sock.c | sk->dead = 1; |
sk | 1148 | net/tcp/sock.c | sk->inuse = 1; |
sk | 1150 | net/tcp/sock.c | release_sock (sk); |
sk | 1166 | net/tcp/sock.c | volatile struct sock *sk, *sk2; |
sk | 1169 | net/tcp/sock.c | sk = sock->data; |
sk | 1170 | net/tcp/sock.c | if (sk == NULL) |
sk | 1176 | net/tcp/sock.c | if (sk->state != TCP_CLOSE) return (-EIO); |
sk | 1177 | net/tcp/sock.c | if (sk->num != 0) return (-EINVAL); |
sk | 1192 | net/tcp/sock.c | PRINTK (("bind sk =%X to port = %d\n", sk, snum)); |
sk | 1193 | net/tcp/sock.c | sk = sock->data; |
sk | 1201 | net/tcp/sock.c | if ( sk->num > PROT_SOCK) return (0); |
sk | 1202 | net/tcp/sock.c | snum = get_new_socknum (sk->prot, 0); |
sk | 1209 | net/tcp/sock.c | sk->saddr = addr.sin_addr.s_addr; |
sk | 1212 | net/tcp/sock.c | sk->prot->sock_array[snum & (SOCK_ARRAY_SIZE -1)])); |
sk | 1215 | net/tcp/sock.c | for (sk2 = sk->prot->sock_array[snum & (SOCK_ARRAY_SIZE -1)]; |
sk | 1220 | net/tcp/sock.c | if (sk2->saddr != sk->saddr) continue; |
sk | 1221 | net/tcp/sock.c | if (!sk->reuse) return (-EADDRINUSE); |
sk | 1224 | net/tcp/sock.c | remove_sock (sk); |
sk | 1225 | net/tcp/sock.c | put_sock(snum, sk); |
sk | 1226 | net/tcp/sock.c | sk->dummy_th.source = net16(sk->num); |
sk | 1227 | net/tcp/sock.c | sk->daddr = 0; |
sk | 1228 | net/tcp/sock.c | sk->dummy_th.dest = 0; |
sk | 1236 | net/tcp/sock.c | volatile struct sock *sk; |
sk | 1239 | net/tcp/sock.c | sk = sock->data; |
sk | 1240 | net/tcp/sock.c | if (sk == NULL) |
sk | 1246 | net/tcp/sock.c | if (sk->state == TCP_ESTABLISHED) |
sk | 1254 | net/tcp/sock.c | if (sk->state == TCP_SYN_SENT || sk->state == TCP_SYN_RECV) |
sk | 1257 | net/tcp/sock.c | if (sk->err) return (-sk->err); |
sk | 1264 | net/tcp/sock.c | if (sk->num == 0) |
sk | 1266 | net/tcp/sock.c | sk->num = get_new_socknum (sk->prot, 0); |
sk | 1267 | net/tcp/sock.c | if (sk->num == 0) return (-EAGAIN); |
sk | 1268 | net/tcp/sock.c | put_sock (sk->num, sk); |
sk | 1269 | net/tcp/sock.c | sk->dummy_th.source = net16(sk->num); |
sk | 1272 | net/tcp/sock.c | if (sk->prot->connect == NULL) |
sk | 1275 | net/tcp/sock.c | err = sk->prot->connect (sk, (struct sockaddr_in *)uaddr, addr_len); |
sk | 1279 | net/tcp/sock.c | if (sk->state != TCP_ESTABLISHED && (flags & O_NONBLOCK)) |
sk | 1284 | net/tcp/sock.c | while (sk->state == TCP_SYN_SENT || sk->state == TCP_SYN_RECV) |
sk | 1286 | net/tcp/sock.c | interruptible_sleep_on (sk->sleep); |
sk | 1296 | net/tcp/sock.c | if (sk->state != TCP_ESTABLISHED && sk->err) |
sk | 1299 | net/tcp/sock.c | return (-sk->err); |
sk | 1387 | net/tcp/sock.c | volatile struct sock *sk; |
sk | 1393 | net/tcp/sock.c | sk = sock->data; |
sk | 1394 | net/tcp/sock.c | if (sk == NULL) |
sk | 1401 | net/tcp/sock.c | if (!tcp_connected(sk->state)) |
sk | 1403 | net/tcp/sock.c | sin.sin_port = sk->dummy_th.dest; |
sk | 1404 | net/tcp/sock.c | sin.sin_addr.s_addr = sk->daddr; |
sk | 1408 | net/tcp/sock.c | sin.sin_port = sk->dummy_th.source; |
sk | 1409 | net/tcp/sock.c | if (sk->saddr == 0) |
sk | 1412 | net/tcp/sock.c | sin.sin_addr.s_addr = sk->saddr; |
sk | 1425 | net/tcp/sock.c | volatile struct sock *sk; |
sk | 1426 | net/tcp/sock.c | sk = sock->data; |
sk | 1427 | net/tcp/sock.c | if (sk == NULL) |
sk | 1434 | net/tcp/sock.c | if (sk->num == 0) |
sk | 1436 | net/tcp/sock.c | sk->num = get_new_socknum (sk->prot, 0); |
sk | 1437 | net/tcp/sock.c | if (sk->num == 0) return (-EAGAIN); |
sk | 1438 | net/tcp/sock.c | put_sock (sk->num, sk); |
sk | 1439 | net/tcp/sock.c | sk->dummy_th.source = net16(sk->num); |
sk | 1442 | net/tcp/sock.c | return (sk->prot->read (sk, ubuf, size, noblock,0)); |
sk | 1449 | net/tcp/sock.c | volatile struct sock *sk; |
sk | 1450 | net/tcp/sock.c | sk = sock->data; |
sk | 1451 | net/tcp/sock.c | if (sk == NULL) |
sk | 1458 | net/tcp/sock.c | if (sk->num == 0) |
sk | 1460 | net/tcp/sock.c | sk->num = get_new_socknum (sk->prot, 0); |
sk | 1461 | net/tcp/sock.c | if (sk->num == 0) return (-EAGAIN); |
sk | 1462 | net/tcp/sock.c | put_sock (sk->num, sk); |
sk | 1463 | net/tcp/sock.c | sk->dummy_th.source = net16(sk->num); |
sk | 1466 | net/tcp/sock.c | return (sk->prot->read (sk, ubuf, size, noblock, flags)); |
sk | 1472 | net/tcp/sock.c | volatile struct sock *sk; |
sk | 1473 | net/tcp/sock.c | sk = sock->data; |
sk | 1474 | net/tcp/sock.c | if (sk == NULL) |
sk | 1479 | net/tcp/sock.c | if (sk->shutdown & SEND_SHUTDOWN) |
sk | 1486 | net/tcp/sock.c | if (sk->num == 0) |
sk | 1488 | net/tcp/sock.c | sk->num = get_new_socknum (sk->prot, 0); |
sk | 1489 | net/tcp/sock.c | if (sk->num == 0) return (-EAGAIN); |
sk | 1490 | net/tcp/sock.c | put_sock (sk->num, sk); |
sk | 1491 | net/tcp/sock.c | sk->dummy_th.source = net16(sk->num); |
sk | 1494 | net/tcp/sock.c | return (sk->prot->write (sk, ubuf, size, noblock, 0)); |
sk | 1502 | net/tcp/sock.c | volatile struct sock *sk; |
sk | 1503 | net/tcp/sock.c | sk = sock->data; |
sk | 1504 | net/tcp/sock.c | if (sk == NULL) |
sk | 1509 | net/tcp/sock.c | if (sk->shutdown & SEND_SHUTDOWN) |
sk | 1516 | net/tcp/sock.c | if (sk->num == 0) |
sk | 1518 | net/tcp/sock.c | sk->num = get_new_socknum (sk->prot, 0); |
sk | 1519 | net/tcp/sock.c | if (sk->num == 0) return (-EAGAIN); |
sk | 1520 | net/tcp/sock.c | put_sock (sk->num, sk); |
sk | 1521 | net/tcp/sock.c | sk->dummy_th.source = net16(sk->num); |
sk | 1524 | net/tcp/sock.c | return (sk->prot->write (sk, ubuf, size, noblock, flags)); |
sk | 1532 | net/tcp/sock.c | volatile struct sock *sk; |
sk | 1533 | net/tcp/sock.c | sk = sock->data; |
sk | 1534 | net/tcp/sock.c | if (sk == NULL) |
sk | 1539 | net/tcp/sock.c | if (sk->shutdown & SEND_SHUTDOWN) |
sk | 1545 | net/tcp/sock.c | if (sk->prot->sendto == NULL) return (-EOPNOTSUPP); |
sk | 1548 | net/tcp/sock.c | if (sk->num == 0) |
sk | 1550 | net/tcp/sock.c | sk->num = get_new_socknum (sk->prot, 0); |
sk | 1551 | net/tcp/sock.c | if (sk->num == 0) return (-EAGAIN); |
sk | 1552 | net/tcp/sock.c | put_sock (sk->num, sk); |
sk | 1553 | net/tcp/sock.c | sk->dummy_th.source = net16(sk->num); |
sk | 1556 | net/tcp/sock.c | return (sk->prot->sendto (sk, ubuf, size, noblock, flags, |
sk | 1564 | net/tcp/sock.c | volatile struct sock *sk; |
sk | 1565 | net/tcp/sock.c | sk = sock->data; |
sk | 1566 | net/tcp/sock.c | if (sk == NULL) |
sk | 1572 | net/tcp/sock.c | if (sk->prot->recvfrom == NULL) return (-EOPNOTSUPP); |
sk | 1575 | net/tcp/sock.c | if (sk->num == 0) |
sk | 1577 | net/tcp/sock.c | sk->num = get_new_socknum (sk->prot, 0); |
sk | 1578 | net/tcp/sock.c | if (sk->num == 0) return (-EAGAIN); |
sk | 1579 | net/tcp/sock.c | put_sock (sk->num, sk); |
sk | 1580 | net/tcp/sock.c | sk->dummy_th.source = net16(sk->num); |
sk | 1583 | net/tcp/sock.c | return (sk->prot->recvfrom (sk, ubuf, size, noblock, flags, |
sk | 1590 | net/tcp/sock.c | volatile struct sock *sk; |
sk | 1597 | net/tcp/sock.c | sk = sock->data; |
sk | 1598 | net/tcp/sock.c | if (sk == NULL) |
sk | 1603 | net/tcp/sock.c | if (sock->state == SS_CONNECTING && sk->state == TCP_ESTABLISHED) |
sk | 1606 | net/tcp/sock.c | if (!tcp_connected(sk->state)) return (-ENOTCONN); |
sk | 1607 | net/tcp/sock.c | sk->shutdown |= how; |
sk | 1608 | net/tcp/sock.c | if (sk->prot->shutdown) |
sk | 1609 | net/tcp/sock.c | sk->prot->shutdown (sk, how); |
sk | 1616 | net/tcp/sock.c | volatile struct sock *sk; |
sk | 1617 | net/tcp/sock.c | sk = sock->data; |
sk | 1618 | net/tcp/sock.c | if (sk == NULL) |
sk | 1624 | net/tcp/sock.c | if (sk->prot->select == NULL) |
sk | 1629 | net/tcp/sock.c | return (sk->prot->select(sk, sel_type, wait)); |
sk | 1637 | net/tcp/sock.c | volatile struct sock *sk; |
sk | 1638 | net/tcp/sock.c | sk = sock->data; |
sk | 1639 | net/tcp/sock.c | if (sk == NULL) |
sk | 1672 | net/tcp/sock.c | sk->proc = user; |
sk | 1680 | net/tcp/sock.c | put_fs_long (sk->proc, (void *)arg); |
sk | 1685 | net/tcp/sock.c | if (!sk->prot->ioctl) |
sk | 1687 | net/tcp/sock.c | return (sk->prot->ioctl (sk, cmd, arg)); |
sk | 1692 | net/tcp/sock.c | sock_wmalloc(volatile struct sock *sk, unsigned long size, int force, |
sk | 1695 | net/tcp/sock.c | if (sk) |
sk | 1697 | net/tcp/sock.c | if (sk->wmem_alloc + size < SK_WMEM_MAX || force) |
sk | 1700 | net/tcp/sock.c | sk->wmem_alloc+= size; |
sk | 1705 | net/tcp/sock.c | sk, size, force, priority)); |
sk | 1712 | net/tcp/sock.c | sock_rmalloc(volatile struct sock *sk, unsigned long size, int force, |
sk | 1715 | net/tcp/sock.c | if (sk ) |
sk | 1717 | net/tcp/sock.c | if (sk->rmem_alloc + size < SK_RMEM_MAX || force) |
sk | 1720 | net/tcp/sock.c | sk->rmem_alloc+= size; |
sk | 1725 | net/tcp/sock.c | sk,size,force, priority)); |
sk | 1733 | net/tcp/sock.c | sock_rspace (volatile struct sock *sk) |
sk | 1736 | net/tcp/sock.c | if (sk != NULL) |
sk | 1738 | net/tcp/sock.c | if (sk->rmem_alloc >= SK_RMEM_MAX-2*MIN_WINDOW) return (0); |
sk | 1739 | net/tcp/sock.c | amt = min ((SK_RMEM_MAX-sk->rmem_alloc)/2-MIN_WINDOW, MAX_WINDOW); |
sk | 1747 | net/tcp/sock.c | sock_wspace (volatile struct sock *sk) |
sk | 1749 | net/tcp/sock.c | if (sk != NULL) |
sk | 1751 | net/tcp/sock.c | if (sk->shutdown & SEND_SHUTDOWN) return (0); |
sk | 1752 | net/tcp/sock.c | if (sk->wmem_alloc >= SK_WMEM_MAX) return (0); |
sk | 1753 | net/tcp/sock.c | return (SK_WMEM_MAX-sk->wmem_alloc ); |
sk | 1760 | net/tcp/sock.c | sock_wfree (volatile struct sock *sk, void *mem, unsigned long size) |
sk | 1762 | net/tcp/sock.c | MPRINTK (("sock_wfree (sk=%X, mem=%X, size=%d)\n",sk, mem, size)); |
sk | 1764 | net/tcp/sock.c | if (sk) |
sk | 1766 | net/tcp/sock.c | sk->wmem_alloc -= size; |
sk | 1768 | net/tcp/sock.c | if (!sk->dead) wake_up(sk->sleep); |
sk | 1769 | net/tcp/sock.c | if (sk->destroy && sk->wmem_alloc == 0 && sk->rmem_alloc == 0) |
sk | 1771 | net/tcp/sock.c | MPRINTK (("recovered lost memory, destroying sock = %X\n",sk)); |
sk | 1772 | net/tcp/sock.c | delete_timer ((struct timer *)&sk->time_wait); |
sk | 1773 | net/tcp/sock.c | kfree_s ((void *)sk, sizeof (*sk)); |
sk | 1780 | net/tcp/sock.c | sock_rfree (volatile struct sock *sk, void *mem, unsigned long size) |
sk | 1782 | net/tcp/sock.c | MPRINTK (("sock_rfree (sk=%X, mem=%X, size=%d)\n",sk, mem, size)); |
sk | 1784 | net/tcp/sock.c | if (sk) |
sk | 1786 | net/tcp/sock.c | sk->rmem_alloc -= size; |
sk | 1787 | net/tcp/sock.c | if (sk->destroy && sk->wmem_alloc == 0 && sk->rmem_alloc == 0) |
sk | 1789 | net/tcp/sock.c | delete_timer ((struct timer *)&sk->time_wait); |
sk | 1790 | net/tcp/sock.c | kfree_s ((void *)sk, sizeof (*sk)); |
sk | 1831 | net/tcp/sock.c | void release_sock (volatile struct sock *sk) |
sk | 1833 | net/tcp/sock.c | if (!sk) |
sk | 1839 | net/tcp/sock.c | if (!sk->prot) |
sk | 1845 | net/tcp/sock.c | if (sk->blog) return; |
sk | 1849 | net/tcp/sock.c | sk->inuse = 1; |
sk | 1850 | net/tcp/sock.c | while (sk->back_log != NULL) |
sk | 1853 | net/tcp/sock.c | sk->blog = 1; |
sk | 1854 | net/tcp/sock.c | skb = (struct sk_buff *)sk->back_log; |
sk | 1858 | net/tcp/sock.c | sk->back_log = skb->next; |
sk | 1864 | net/tcp/sock.c | sk->back_log = NULL; |
sk | 1867 | net/tcp/sock.c | PRINTK (("sk->back_log = %X\n",sk->back_log)); |
sk | 1868 | net/tcp/sock.c | if (sk->prot->rcv) |
sk | 1869 | net/tcp/sock.c | sk->prot->rcv(skb, skb->dev, sk->opt, |
sk | 1872 | net/tcp/sock.c | (struct ip_protocol *)sk->pair); |
sk | 1875 | net/tcp/sock.c | sk->blog = 0; |
sk | 1876 | net/tcp/sock.c | sk->inuse = 0; |
sk | 1878 | net/tcp/sock.c | if (sk->dead && sk->state == TCP_CLOSE) |
sk | 1881 | net/tcp/sock.c | sk->time_wait.len = min (sk->rtt * 2, TCP_DONE_TIME); |
sk | 1882 | net/tcp/sock.c | sk->timeout = TIME_DONE; |
sk | 1883 | net/tcp/sock.c | reset_timer ((struct timer *)&sk->time_wait); |
sk | 110 | net/tcp/sock.h | void *(*wmalloc)(volatile struct sock *sk, unsigned long size, int force, |
sk | 112 | net/tcp/sock.h | void *(*rmalloc)(volatile struct sock *sk, unsigned long size, int force, |
sk | 114 | net/tcp/sock.h | void (*wfree)(volatile struct sock *sk, void *mem, unsigned long size); |
sk | 115 | net/tcp/sock.h | void (*rfree)(volatile struct sock *sk, void *mem, unsigned long size); |
sk | 116 | net/tcp/sock.h | unsigned long (*rspace)(volatile struct sock *sk); |
sk | 117 | net/tcp/sock.h | unsigned long (*wspace)(volatile struct sock *sk); |
sk | 118 | net/tcp/sock.h | void (*close)(volatile struct sock *sk, int timeout); |
sk | 119 | net/tcp/sock.h | int (*read)(volatile struct sock *sk, unsigned char *to, int len, |
sk | 121 | net/tcp/sock.h | int (*write)(volatile struct sock *sk, unsigned char *to, int len, |
sk | 123 | net/tcp/sock.h | int (*sendto) (volatile struct sock *sk, unsigned char *from, int len, |
sk | 126 | net/tcp/sock.h | int (*recvfrom) (volatile struct sock *sk, unsigned char *from, int len, |
sk | 132 | net/tcp/sock.h | int (*connect) (volatile struct sock *sk, struct sockaddr_in *usin, |
sk | 134 | net/tcp/sock.h | volatile struct sock *(*accept) (volatile struct sock *sk, int flags); |
sk | 135 | net/tcp/sock.h | void (*queue_xmit) (volatile struct sock *sk, struct device *dev, |
sk | 137 | net/tcp/sock.h | void (*retransmit) (volatile struct sock *sk, int all); |
sk | 138 | net/tcp/sock.h | void (*write_wakeup) (volatile struct sock *sk); |
sk | 139 | net/tcp/sock.h | void (*read_wakeup) (volatile struct sock *sk); |
sk | 143 | net/tcp/sock.h | int (*select)(volatile struct sock *sk, int which, select_table *wait); |
sk | 144 | net/tcp/sock.h | int (*ioctl) (volatile struct sock *sk, int cmd, unsigned long arg); |
sk | 145 | net/tcp/sock.h | int (*init) (volatile struct sock *sk); |
sk | 146 | net/tcp/sock.h | void (*shutdown) (volatile struct sock *sk, int how); |
sk | 169 | net/tcp/sock.h | volatile struct sock *sk; |
sk | 198 | net/tcp/sock.h | void destroy_sock (volatile struct sock *sk); |
sk | 201 | net/tcp/sock.h | void release_sock (volatile struct sock *sk); |
sk | 206 | net/tcp/sock.h | void *sock_wmalloc(volatile struct sock *sk, unsigned long size, int force, |
sk | 208 | net/tcp/sock.h | void *sock_rmalloc(volatile struct sock *sk, unsigned long size, int force, |
sk | 210 | net/tcp/sock.h | void sock_wfree(volatile struct sock *sk, void *mem, unsigned long size); |
sk | 211 | net/tcp/sock.h | void sock_rfree(volatile struct sock *sk, void *mem, unsigned long size); |
sk | 212 | net/tcp/sock.h | unsigned long sock_rspace(volatile struct sock *sk); |
sk | 213 | net/tcp/sock.h | unsigned long sock_wspace(volatile struct sock *sk); |
sk | 144 | net/tcp/tcp.c | get_firstr(volatile struct sock *sk) |
sk | 147 | net/tcp/tcp.c | skb = sk->rqueue; |
sk | 149 | net/tcp/tcp.c | sk->rqueue = (struct sk_buff *)skb->next; |
sk | 150 | net/tcp/tcp.c | if (sk->rqueue == skb) |
sk | 152 | net/tcp/tcp.c | sk->rqueue = NULL; |
sk | 156 | net/tcp/tcp.c | sk->rqueue->prev=skb->prev; |
sk | 157 | net/tcp/tcp.c | sk->rqueue->prev->next = sk->rqueue; |
sk | 174 | net/tcp/tcp.c | tcp_time_wait (volatile struct sock *sk) |
sk | 176 | net/tcp/tcp.c | sk->state = TCP_TIME_WAIT; |
sk | 177 | net/tcp/tcp.c | sk->shutdown = SHUTDOWN_MASK; |
sk | 178 | net/tcp/tcp.c | if (!sk->dead) wake_up (sk->sleep); |
sk | 179 | net/tcp/tcp.c | sk->time_wait.len = TCP_TIMEWAIT_LEN; |
sk | 180 | net/tcp/tcp.c | sk->timeout = TIME_CLOSE; |
sk | 181 | net/tcp/tcp.c | reset_timer ((struct timer *)&sk->time_wait); |
sk | 185 | net/tcp/tcp.c | tcp_retransmit (volatile struct sock *sk, int all) |
sk | 189 | net/tcp/tcp.c | ip_retransmit (sk, all); |
sk | 192 | net/tcp/tcp.c | sk->rtt *= 2; /* exponential back off. */ |
sk | 193 | net/tcp/tcp.c | if (sk->cong_window > 1) |
sk | 194 | net/tcp/tcp.c | sk->cong_window = sk->cong_window / 2; |
sk | 195 | net/tcp/tcp.c | sk->exp_growth = 0; |
sk | 198 | net/tcp/tcp.c | ip_retransmit (sk, all); |
sk | 214 | net/tcp/tcp.c | volatile struct sock *sk; |
sk | 220 | net/tcp/tcp.c | sk = get_sock (&tcp_prot, net16(th->dest), saddr, th->source, daddr); |
sk | 223 | net/tcp/tcp.c | if (sk == NULL) return; |
sk | 230 | net/tcp/tcp.c | if (sk->cong_window > 1) |
sk | 231 | net/tcp/tcp.c | sk->cong_window --; |
sk | 237 | net/tcp/tcp.c | sk->err = icmp_err_convert[err & 0xff].errno; |
sk | 242 | net/tcp/tcp.c | if (sk->state == TCP_SYN_SENT) |
sk | 244 | net/tcp/tcp.c | sk->state = TCP_CLOSE; |
sk | 245 | net/tcp/tcp.c | sk->prot->close(sk, 0); |
sk | 254 | net/tcp/tcp.c | tcp_readable (volatile struct sock *sk) |
sk | 262 | net/tcp/tcp.c | PRINTK (("tcp_readable (sk=%X)\n", sk)); |
sk | 264 | net/tcp/tcp.c | if (sk == NULL || sk->rqueue == NULL) return (0); |
sk | 266 | net/tcp/tcp.c | counted = sk->copied_seq+1; |
sk | 268 | net/tcp/tcp.c | skb = (struct sk_buff *)sk->rqueue->next; |
sk | 294 | net/tcp/tcp.c | } while (skb != sk->rqueue->next); |
sk | 301 | net/tcp/tcp.c | tcp_select (volatile struct sock *sk, int sel_type, select_table *wait) |
sk | 303 | net/tcp/tcp.c | sk->inuse = 1; |
sk | 305 | net/tcp/tcp.c | sk, sel_type, wait)); |
sk | 309 | net/tcp/tcp.c | select_wait (sk->sleep, wait); |
sk | 310 | net/tcp/tcp.c | if (sk->rqueue != NULL) |
sk | 312 | net/tcp/tcp.c | if (sk->state == TCP_LISTEN || tcp_readable(sk)) |
sk | 314 | net/tcp/tcp.c | release_sock (sk); |
sk | 319 | net/tcp/tcp.c | if (sk->shutdown & RCV_SHUTDOWN) |
sk | 321 | net/tcp/tcp.c | release_sock (sk); |
sk | 326 | net/tcp/tcp.c | release_sock (sk); |
sk | 331 | net/tcp/tcp.c | select_wait (sk->sleep, wait); |
sk | 333 | net/tcp/tcp.c | if (sk->shutdown & SEND_SHUTDOWN) |
sk | 337 | net/tcp/tcp.c | release_sock (sk); |
sk | 343 | net/tcp/tcp.c | if (sk->prot->wspace(sk) >= sk->mtu) |
sk | 345 | net/tcp/tcp.c | release_sock (sk); |
sk | 347 | net/tcp/tcp.c | if (sk->state == TCP_SYN_RECV || sk->state == TCP_SYN_SENT) |
sk | 356 | net/tcp/tcp.c | sk->wmem_alloc, sk->packets_out, |
sk | 357 | net/tcp/tcp.c | sk->wback, sk->wfront, |
sk | 358 | net/tcp/tcp.c | sk->send_seq, sk->window_seq)); |
sk | 360 | net/tcp/tcp.c | release_sock (sk); |
sk | 365 | net/tcp/tcp.c | select_wait(sk->sleep,wait); |
sk | 366 | net/tcp/tcp.c | if (sk->err) |
sk | 368 | net/tcp/tcp.c | release_sock (sk); |
sk | 371 | net/tcp/tcp.c | release_sock (sk); |
sk | 375 | net/tcp/tcp.c | release_sock (sk); |
sk | 380 | net/tcp/tcp.c | tcp_ioctl (volatile struct sock *sk, int cmd, unsigned long arg) |
sk | 382 | net/tcp/tcp.c | PRINTK (("tcp_ioctl (sk=%X, cmd = %d, arg=%X)\n", sk, cmd, arg)); |
sk | 393 | net/tcp/tcp.c | if (sk->state == TCP_LISTEN) |
sk | 397 | net/tcp/tcp.c | sk->inuse = 1; |
sk | 398 | net/tcp/tcp.c | if (sk->rqueue != NULL) |
sk | 400 | net/tcp/tcp.c | amount = tcp_readable(sk); |
sk | 402 | net/tcp/tcp.c | release_sock (sk); |
sk | 414 | net/tcp/tcp.c | sk->inuse = 1; |
sk | 415 | net/tcp/tcp.c | if (sk->rqueue != NULL) |
sk | 417 | net/tcp/tcp.c | skb = (struct sk_buff *)sk->rqueue->next; |
sk | 418 | net/tcp/tcp.c | if (sk->copied_seq+1 == skb->h.th->seq && skb->h.th->urg) |
sk | 421 | net/tcp/tcp.c | release_sock (sk); |
sk | 430 | net/tcp/tcp.c | if (sk->state == TCP_LISTEN) |
sk | 432 | net/tcp/tcp.c | amount = sk->prot->wspace(sk)/2; |
sk | 513 | net/tcp/tcp.c | unsigned long daddr, int len, volatile struct sock *sk) |
sk | 517 | net/tcp/tcp.c | if (sk && sk->no_check) return; |
sk | 523 | net/tcp/tcp.c | tcp_send_partial(volatile struct sock *sk) |
sk | 527 | net/tcp/tcp.c | if (sk == NULL || sk->send_tmp == NULL) return; |
sk | 529 | net/tcp/tcp.c | skb = sk->send_tmp; |
sk | 531 | net/tcp/tcp.c | tcp_send_check (skb->h.th, sk->saddr, sk->daddr, |
sk | 533 | net/tcp/tcp.c | (unsigned long)(skb+1), sk); |
sk | 535 | net/tcp/tcp.c | skb->h.seq = sk->send_seq; |
sk | 536 | net/tcp/tcp.c | if (after (sk->send_seq , sk->window_seq) || |
sk | 537 | net/tcp/tcp.c | sk->packets_out >= sk->cong_window) |
sk | 540 | net/tcp/tcp.c | sk->cong_window, sk->packets_out)); |
sk | 542 | net/tcp/tcp.c | sk->send_seq, sk->window_seq)); |
sk | 545 | net/tcp/tcp.c | if (sk->wback == NULL) |
sk | 547 | net/tcp/tcp.c | sk->wfront=skb; |
sk | 551 | net/tcp/tcp.c | sk->wback->next = skb; |
sk | 553 | net/tcp/tcp.c | sk->wback = skb; |
sk | 557 | net/tcp/tcp.c | sk->prot->queue_xmit (sk, skb->dev, skb,0); |
sk | 559 | net/tcp/tcp.c | sk->send_tmp = NULL; |
sk | 567 | net/tcp/tcp.c | volatile struct sock *sk, |
sk | 578 | net/tcp/tcp.c | buff=sk->prot->wmalloc(sk,MAX_ACK_SIZE,1, GFP_ATOMIC); |
sk | 582 | net/tcp/tcp.c | sk->ack_backlog++; |
sk | 583 | net/tcp/tcp.c | if (sk->timeout != TIME_WRITE && tcp_connected (sk->state)) |
sk | 585 | net/tcp/tcp.c | sk->timeout = TIME_WRITE; |
sk | 586 | net/tcp/tcp.c | sk->time_wait.len = 10; /* got to do it quickly. */ |
sk | 587 | net/tcp/tcp.c | reset_timer ((struct timer *)&sk->time_wait); |
sk | 596 | net/tcp/tcp.c | buff->sk = sk; |
sk | 599 | net/tcp/tcp.c | tmp = sk->prot->build_header (buff, sk->saddr, daddr, &dev, |
sk | 600 | net/tcp/tcp.c | IPPROTO_TCP, sk->opt, MAX_ACK_SIZE); |
sk | 603 | net/tcp/tcp.c | sk->prot->wfree(sk, buff->mem_addr, buff->mem_len); |
sk | 616 | net/tcp/tcp.c | sk->window = sk->prot->rspace(sk); |
sk | 617 | net/tcp/tcp.c | t1->window = net16(sk->window); |
sk | 625 | net/tcp/tcp.c | if (ack == sk->acked_seq) |
sk | 627 | net/tcp/tcp.c | sk->ack_backlog = 0; |
sk | 628 | net/tcp/tcp.c | sk->bytes_rcv = 0; |
sk | 629 | net/tcp/tcp.c | sk->ack_timed = 0; |
sk | 630 | net/tcp/tcp.c | if (sk->send_head == NULL && |
sk | 631 | net/tcp/tcp.c | sk->wfront == NULL) |
sk | 633 | net/tcp/tcp.c | delete_timer((struct timer *)&sk->time_wait); |
sk | 634 | net/tcp/tcp.c | sk->timeout = 0; |
sk | 640 | net/tcp/tcp.c | tcp_send_check (t1, sk->saddr, daddr, sizeof (*t1), sk); |
sk | 641 | net/tcp/tcp.c | sk->prot->queue_xmit(sk, dev, buff, 1); |
sk | 646 | net/tcp/tcp.c | tcp_build_header(struct tcp_header *th, volatile struct sock *sk, int push) |
sk | 650 | net/tcp/tcp.c | memcpy (th,(void *) &(sk->dummy_th), sizeof (*th)); |
sk | 651 | net/tcp/tcp.c | th->seq = net32(sk->send_seq); |
sk | 656 | net/tcp/tcp.c | sk->ack_backlog = 0; |
sk | 657 | net/tcp/tcp.c | sk->bytes_rcv = 0; |
sk | 658 | net/tcp/tcp.c | sk->ack_timed = 0; |
sk | 659 | net/tcp/tcp.c | th->ack_seq = net32(sk->acked_seq); |
sk | 660 | net/tcp/tcp.c | sk->window = sk->prot->rspace(sk); |
sk | 661 | net/tcp/tcp.c | th->window = net16(sk->window); |
sk | 670 | net/tcp/tcp.c | tcp_write (volatile struct sock *sk, unsigned char *from, |
sk | 682 | net/tcp/tcp.c | sk, from, len, nonblock, flags)); |
sk | 684 | net/tcp/tcp.c | prot = sk->prot; |
sk | 688 | net/tcp/tcp.c | if (sk->err) |
sk | 691 | net/tcp/tcp.c | tmp = -sk->err; |
sk | 692 | net/tcp/tcp.c | sk->err = 0; |
sk | 698 | net/tcp/tcp.c | sk->inuse = 1; /* no one else will use this socket. */ |
sk | 699 | net/tcp/tcp.c | if (sk->shutdown & SEND_SHUTDOWN) |
sk | 701 | net/tcp/tcp.c | release_sock (sk); |
sk | 702 | net/tcp/tcp.c | sk->err = EPIPE; |
sk | 704 | net/tcp/tcp.c | sk->err = 0; |
sk | 708 | net/tcp/tcp.c | while (sk->state != TCP_ESTABLISHED && sk->state != TCP_CLOSE_WAIT) |
sk | 711 | net/tcp/tcp.c | if (sk->err) |
sk | 714 | net/tcp/tcp.c | tmp = -sk->err; |
sk | 715 | net/tcp/tcp.c | sk->err = 0; |
sk | 719 | net/tcp/tcp.c | if (sk->state != TCP_SYN_SENT && |
sk | 720 | net/tcp/tcp.c | sk->state != TCP_SYN_RECV) |
sk | 722 | net/tcp/tcp.c | release_sock (sk); |
sk | 726 | net/tcp/tcp.c | if (sk->err) |
sk | 728 | net/tcp/tcp.c | tmp = -sk->err; |
sk | 729 | net/tcp/tcp.c | sk->err = 0; |
sk | 733 | net/tcp/tcp.c | if (sk->keepopen) |
sk | 742 | net/tcp/tcp.c | release_sock (sk); |
sk | 754 | net/tcp/tcp.c | release_sock (sk); |
sk | 756 | net/tcp/tcp.c | if (sk->state != TCP_ESTABLISHED && sk->state != TCP_CLOSE_WAIT && |
sk | 757 | net/tcp/tcp.c | sk->err == 0) |
sk | 759 | net/tcp/tcp.c | interruptible_sleep_on (sk->sleep); |
sk | 769 | net/tcp/tcp.c | sk->inuse = 1; |
sk | 773 | net/tcp/tcp.c | if (sk->send_tmp != NULL) |
sk | 777 | net/tcp/tcp.c | skb = sk->send_tmp; |
sk | 780 | net/tcp/tcp.c | copy = min (sk->mss - skb->len + 128 + prot->max_header, len); |
sk | 791 | net/tcp/tcp.c | sk->send_seq += copy; |
sk | 795 | net/tcp/tcp.c | (unsigned long)(skb+1) >= sk->mss |
sk | 798 | net/tcp/tcp.c | tcp_send_partial (sk); |
sk | 807 | net/tcp/tcp.c | copy = min (sk->mtu, diff(sk->window_seq, sk->send_seq)); |
sk | 810 | net/tcp/tcp.c | if (copy < 200 || copy > sk->mtu) copy = sk->mtu; |
sk | 814 | net/tcp/tcp.c | if (sk->packets_out && copy < sk->mss && !(flags & MSG_OOB)) |
sk | 817 | net/tcp/tcp.c | release_sock (sk); |
sk | 818 | net/tcp/tcp.c | skb=prot->wmalloc (sk, |
sk | 819 | net/tcp/tcp.c | sk->mss + 128 + prot->max_header + sizeof (*skb), |
sk | 821 | net/tcp/tcp.c | sk->inuse = 1; |
sk | 822 | net/tcp/tcp.c | sk->send_tmp = skb; |
sk | 824 | net/tcp/tcp.c | skb->mem_len = sk->mss + 128 + prot->max_header+sizeof (*skb); |
sk | 829 | net/tcp/tcp.c | release_sock (sk); |
sk | 830 | net/tcp/tcp.c | skb=prot->wmalloc (sk, copy + prot->max_header+sizeof (*skb),0, |
sk | 832 | net/tcp/tcp.c | sk->inuse = 1; |
sk | 842 | net/tcp/tcp.c | release_sock (sk); |
sk | 849 | net/tcp/tcp.c | tmp = sk->wmem_alloc; |
sk | 850 | net/tcp/tcp.c | release_sock (sk); |
sk | 854 | net/tcp/tcp.c | if (tmp <= sk->wmem_alloc |
sk | 855 | net/tcp/tcp.c | && (sk->state == TCP_ESTABLISHED || sk->state == TCP_CLOSE_WAIT ) |
sk | 856 | net/tcp/tcp.c | && sk->err == 0) |
sk | 858 | net/tcp/tcp.c | interruptible_sleep_on (sk->sleep); |
sk | 867 | net/tcp/tcp.c | sk->inuse = 1; |
sk | 874 | net/tcp/tcp.c | skb->sk = sk; |
sk | 882 | net/tcp/tcp.c | tmp = prot->build_header (skb, sk->saddr, sk->daddr, &dev, |
sk | 883 | net/tcp/tcp.c | IPPROTO_TCP, sk->opt, skb->mem_len); |
sk | 886 | net/tcp/tcp.c | prot->wfree (sk, skb->mem_addr, skb->mem_len); |
sk | 887 | net/tcp/tcp.c | release_sock (sk); |
sk | 896 | net/tcp/tcp.c | tmp = tcp_build_header((struct tcp_header *)buff, sk, len-copy); |
sk | 900 | net/tcp/tcp.c | prot->wfree (sk, skb->mem_addr, skb->mem_len); |
sk | 901 | net/tcp/tcp.c | release_sock (sk); |
sk | 920 | net/tcp/tcp.c | sk->send_seq += copy; |
sk | 922 | net/tcp/tcp.c | if (sk->send_tmp != NULL) |
sk | 927 | net/tcp/tcp.c | tcp_send_check ((struct tcp_header *)buff, sk->saddr, sk->daddr, |
sk | 928 | net/tcp/tcp.c | copy +sizeof (struct tcp_header), sk); |
sk | 931 | net/tcp/tcp.c | skb->h.seq = sk->send_seq; |
sk | 932 | net/tcp/tcp.c | if (after (sk->send_seq , sk->window_seq) || |
sk | 933 | net/tcp/tcp.c | sk->packets_out >= sk->cong_window) |
sk | 936 | net/tcp/tcp.c | sk->cong_window, sk->packets_out)); |
sk | 938 | net/tcp/tcp.c | sk->send_seq, sk->window_seq)); |
sk | 941 | net/tcp/tcp.c | if (sk->wback == NULL) |
sk | 943 | net/tcp/tcp.c | sk->wfront=skb; |
sk | 947 | net/tcp/tcp.c | sk->wback->next = skb; |
sk | 949 | net/tcp/tcp.c | sk->wback = skb; |
sk | 953 | net/tcp/tcp.c | prot->queue_xmit (sk, dev, skb,0); |
sk | 956 | net/tcp/tcp.c | sk->err = 0; |
sk | 957 | net/tcp/tcp.c | release_sock (sk); |
sk | 963 | net/tcp/tcp.c | tcp_sendto (volatile struct sock *sk, unsigned char *from, |
sk | 973 | net/tcp/tcp.c | if (sin.sin_port != sk->dummy_th.dest) |
sk | 975 | net/tcp/tcp.c | if (sin.sin_addr.s_addr != sk->daddr) |
sk | 977 | net/tcp/tcp.c | return (tcp_write (sk, from, len, nonblock, flags)); |
sk | 981 | net/tcp/tcp.c | tcp_read_wakeup(volatile struct sock *sk) |
sk | 988 | net/tcp/tcp.c | if (!sk->ack_backlog ) return; |
sk | 997 | net/tcp/tcp.c | buff=sk->prot->wmalloc(sk,MAX_ACK_SIZE,1, GFP_ATOMIC); |
sk | 1001 | net/tcp/tcp.c | sk->timeout = TIME_WRITE; |
sk | 1002 | net/tcp/tcp.c | sk->time_wait.len = 10; |
sk | 1003 | net/tcp/tcp.c | reset_timer((struct timer *) &sk->time_wait); |
sk | 1011 | net/tcp/tcp.c | buff->sk = sk; |
sk | 1014 | net/tcp/tcp.c | tmp = sk->prot->build_header (buff, sk->saddr, sk->daddr, &dev, |
sk | 1015 | net/tcp/tcp.c | IPPROTO_TCP, sk->opt, MAX_ACK_SIZE); |
sk | 1018 | net/tcp/tcp.c | sk->prot->wfree(sk, buff->mem_addr, buff->mem_len); |
sk | 1025 | net/tcp/tcp.c | memcpy (t1,(void *) &sk->dummy_th, sizeof (*t1)); |
sk | 1026 | net/tcp/tcp.c | t1->seq = net32(sk->send_seq); |
sk | 1034 | net/tcp/tcp.c | sk->ack_backlog = 0; |
sk | 1035 | net/tcp/tcp.c | sk->bytes_rcv = 0; |
sk | 1036 | net/tcp/tcp.c | sk->window = sk->prot->rspace(sk); |
sk | 1037 | net/tcp/tcp.c | t1->window = net16(sk->window); |
sk | 1038 | net/tcp/tcp.c | t1->ack_seq = net32(sk->acked_seq); |
sk | 1040 | net/tcp/tcp.c | tcp_send_check (t1, sk->saddr, sk->daddr, sizeof (*t1), sk); |
sk | 1041 | net/tcp/tcp.c | sk->prot->queue_xmit(sk, dev, buff, 1); |
sk | 1050 | net/tcp/tcp.c | cleanup_rbuf (volatile struct sock *sk) |
sk | 1052 | net/tcp/tcp.c | PRINTK (("cleaning rbuf for sk=%X\n",sk)); |
sk | 1055 | net/tcp/tcp.c | while (sk->rqueue != NULL ) |
sk | 1058 | net/tcp/tcp.c | skb=(struct sk_buff *)sk->rqueue->next; |
sk | 1060 | net/tcp/tcp.c | if (sk->rqueue == skb) |
sk | 1062 | net/tcp/tcp.c | sk->rqueue = NULL; |
sk | 1069 | net/tcp/tcp.c | skb->sk = sk; |
sk | 1077 | net/tcp/tcp.c | sk->window - sk->bytes_rcv, sk->prot->rspace(sk))); |
sk | 1087 | net/tcp/tcp.c | sk->ack_backlog ++; |
sk | 1088 | net/tcp/tcp.c | if ((sk->prot->rspace(sk) > |
sk | 1089 | net/tcp/tcp.c | (sk->window - sk->bytes_rcv + sk->mtu))) |
sk | 1092 | net/tcp/tcp.c | tcp_read_wakeup (sk); |
sk | 1097 | net/tcp/tcp.c | if ( before (jiffies + TCP_ACK_TIME, sk->time_wait.when)) |
sk | 1099 | net/tcp/tcp.c | sk->time_wait.len = TCP_ACK_TIME; |
sk | 1100 | net/tcp/tcp.c | sk->timeout = TIME_WRITE; |
sk | 1101 | net/tcp/tcp.c | reset_timer ((struct timer *)&sk->time_wait); |
sk | 1108 | net/tcp/tcp.c | tcp_read_urg(volatile struct sock * sk, int nonblock, |
sk | 1115 | net/tcp/tcp.c | sk, to, len, flags)); |
sk | 1119 | net/tcp/tcp.c | sk->inuse = 1; |
sk | 1120 | net/tcp/tcp.c | while (sk->urg==0 || sk->rqueue == NULL) |
sk | 1122 | net/tcp/tcp.c | if (sk->err) |
sk | 1125 | net/tcp/tcp.c | release_sock (sk); |
sk | 1127 | net/tcp/tcp.c | tmp = -sk->err; |
sk | 1128 | net/tcp/tcp.c | sk->err = 0; |
sk | 1132 | net/tcp/tcp.c | if (sk->state == TCP_CLOSE || sk->done) |
sk | 1134 | net/tcp/tcp.c | release_sock (sk); |
sk | 1136 | net/tcp/tcp.c | if (!sk->done) |
sk | 1138 | net/tcp/tcp.c | sk->done = 1; |
sk | 1144 | net/tcp/tcp.c | if (sk->shutdown & RCV_SHUTDOWN) |
sk | 1146 | net/tcp/tcp.c | release_sock(sk); |
sk | 1148 | net/tcp/tcp.c | sk->done = 1; |
sk | 1154 | net/tcp/tcp.c | release_sock (sk); |
sk | 1160 | net/tcp/tcp.c | release_sock (sk); |
sk | 1162 | net/tcp/tcp.c | if ((sk->urg == 0 || sk->rqueue == NULL) && sk->err == 0 |
sk | 1163 | net/tcp/tcp.c | && !(sk->shutdown & RCV_SHUTDOWN) ) |
sk | 1165 | net/tcp/tcp.c | interruptible_sleep_on (sk->sleep); |
sk | 1174 | net/tcp/tcp.c | sk->inuse = 1; |
sk | 1177 | net/tcp/tcp.c | skb = (struct sk_buff *)sk->rqueue->next; |
sk | 1194 | net/tcp/tcp.c | sk->urg --; |
sk | 1196 | net/tcp/tcp.c | release_sock (sk); |
sk | 1201 | net/tcp/tcp.c | } while (skb != sk->rqueue->next); |
sk | 1203 | net/tcp/tcp.c | sk->urg = 0; |
sk | 1204 | net/tcp/tcp.c | release_sock(sk); |
sk | 1210 | net/tcp/tcp.c | tcp_read (volatile struct sock *sk, unsigned char *to, |
sk | 1225 | net/tcp/tcp.c | if (sk->state == TCP_LISTEN) return (-ENOTCONN); |
sk | 1229 | net/tcp/tcp.c | return (tcp_read_urg (sk, nonblock, to, len, flags)); |
sk | 1232 | net/tcp/tcp.c | sk->inuse = 1; |
sk | 1233 | net/tcp/tcp.c | if (sk->rqueue != NULL) |
sk | 1234 | net/tcp/tcp.c | skb=(struct sk_buff *)sk->rqueue->next; |
sk | 1239 | net/tcp/tcp.c | sk, to, len, nonblock, flags)); |
sk | 1243 | net/tcp/tcp.c | while ( skb == NULL || before (sk->copied_seq+1, skb->h.th->seq) || |
sk | 1250 | net/tcp/tcp.c | cleanup_rbuf(sk); |
sk | 1252 | net/tcp/tcp.c | if (sk->err) |
sk | 1255 | net/tcp/tcp.c | release_sock (sk); |
sk | 1261 | net/tcp/tcp.c | tmp = -sk->err; |
sk | 1262 | net/tcp/tcp.c | sk->err = 0; |
sk | 1266 | net/tcp/tcp.c | if (sk->state == TCP_CLOSE) |
sk | 1268 | net/tcp/tcp.c | release_sock (sk); |
sk | 1274 | net/tcp/tcp.c | if (!sk->done) |
sk | 1276 | net/tcp/tcp.c | sk->done = 1; |
sk | 1282 | net/tcp/tcp.c | if (sk->shutdown & RCV_SHUTDOWN) |
sk | 1284 | net/tcp/tcp.c | release_sock (sk); |
sk | 1285 | net/tcp/tcp.c | if (copied == 0) sk->done = 1; |
sk | 1292 | net/tcp/tcp.c | release_sock (sk); |
sk | 1303 | net/tcp/tcp.c | release_sock (sk); |
sk | 1308 | net/tcp/tcp.c | PRINTK (("tcp_read about to sleep. state = %d\n",sk->state)); |
sk | 1310 | net/tcp/tcp.c | release_sock (sk); /* now we may have some data waiting. */ |
sk | 1313 | net/tcp/tcp.c | if ( sk->shutdown & RCV_SHUTDOWN || sk->err != 0) |
sk | 1315 | net/tcp/tcp.c | sk->inuse = 1; |
sk | 1320 | net/tcp/tcp.c | if ( sk->rqueue == NULL || |
sk | 1321 | net/tcp/tcp.c | before (sk->copied_seq+1, sk->rqueue->next->h.th->seq) ) |
sk | 1323 | net/tcp/tcp.c | interruptible_sleep_on (sk->sleep); |
sk | 1339 | net/tcp/tcp.c | sk->inuse = 1; |
sk | 1341 | net/tcp/tcp.c | if (sk->rqueue != NULL) |
sk | 1342 | net/tcp/tcp.c | skb=(struct sk_buff *)sk->rqueue->next; |
sk | 1351 | net/tcp/tcp.c | offset = sk->copied_seq+1 - skb->h.th->seq; |
sk | 1362 | net/tcp/tcp.c | sk->copied_seq += net16(skb->h.th->urg_ptr); |
sk | 1373 | net/tcp/tcp.c | release_sock (sk); |
sk | 1389 | net/tcp/tcp.c | sk->copied_seq += used; |
sk | 1412 | net/tcp/tcp.c | cleanup_rbuf (sk); |
sk | 1413 | net/tcp/tcp.c | release_sock (sk); |
sk | 1424 | net/tcp/tcp.c | tcp_shutdown (volatile struct sock *sk, int how) |
sk | 1439 | net/tcp/tcp.c | if (sk->state == TCP_FIN_WAIT1 || |
sk | 1440 | net/tcp/tcp.c | sk->state == TCP_FIN_WAIT2) |
sk | 1444 | net/tcp/tcp.c | sk->inuse = 1; |
sk | 1447 | net/tcp/tcp.c | if (sk->send_tmp) |
sk | 1448 | net/tcp/tcp.c | tcp_send_partial(sk); |
sk | 1450 | net/tcp/tcp.c | prot = (struct proto *)sk->prot; |
sk | 1451 | net/tcp/tcp.c | th=(struct tcp_header *)&sk->dummy_th; |
sk | 1452 | net/tcp/tcp.c | release_sock (sk); /* incase the malloc sleeps. */ |
sk | 1453 | net/tcp/tcp.c | buff=prot->wmalloc(sk, MAX_RESET_SIZE,1 , GFP_KERNEL); |
sk | 1458 | net/tcp/tcp.c | sk->inuse = 1; |
sk | 1465 | net/tcp/tcp.c | buff->sk = sk; |
sk | 1470 | net/tcp/tcp.c | tmp = prot->build_header (buff,sk->saddr, sk->daddr, &dev, |
sk | 1471 | net/tcp/tcp.c | IPPROTO_TCP, sk->opt, |
sk | 1475 | net/tcp/tcp.c | prot->wfree (sk,buff->mem_addr, buff->mem_len); |
sk | 1476 | net/tcp/tcp.c | release_sock(sk); |
sk | 1487 | net/tcp/tcp.c | t1->seq = net32(sk->send_seq); |
sk | 1489 | net/tcp/tcp.c | sk->send_seq++; |
sk | 1490 | net/tcp/tcp.c | buff->h.seq = sk->send_seq; |
sk | 1493 | net/tcp/tcp.c | t1->ack_seq = net32(sk->acked_seq); |
sk | 1494 | net/tcp/tcp.c | t1->window = net16(sk->prot->rspace(sk)); |
sk | 1499 | net/tcp/tcp.c | tcp_send_check (t1, sk->saddr, sk->daddr, sizeof (*t1), sk); |
sk | 1503 | net/tcp/tcp.c | if (sk->wback != NULL) |
sk | 1506 | net/tcp/tcp.c | sk->wback->next = buff; |
sk | 1507 | net/tcp/tcp.c | sk->wback = buff; |
sk | 1512 | net/tcp/tcp.c | sk->prot->queue_xmit (sk, dev, buff,0); |
sk | 1515 | net/tcp/tcp.c | if (sk->state == TCP_ESTABLISHED) |
sk | 1517 | net/tcp/tcp.c | sk->state = TCP_FIN_WAIT1; |
sk | 1521 | net/tcp/tcp.c | sk->state = TCP_FIN_WAIT2; |
sk | 1523 | net/tcp/tcp.c | release_sock(sk); |
sk | 1528 | net/tcp/tcp.c | tcp_recvfrom (volatile struct sock *sk, unsigned char *to, |
sk | 1532 | net/tcp/tcp.c | int result = tcp_read(sk, to, to_len, nonblock, flags); |
sk | 1541 | net/tcp/tcp.c | sin.sin_port = sk->dummy_th.dest; |
sk | 1542 | net/tcp/tcp.c | sin.sin_addr.s_addr = sk->daddr; |
sk | 1568 | net/tcp/tcp.c | buff->sk = NULL; |
sk | 1607 | net/tcp/tcp.c | tcp_conn_request(volatile struct sock *sk, struct sk_buff *skb, |
sk | 1621 | net/tcp/tcp.c | sk, skb, daddr, saddr, opt, dev)); |
sk | 1624 | net/tcp/tcp.c | if (!sk->dead) |
sk | 1626 | net/tcp/tcp.c | wake_up(sk->sleep); |
sk | 1631 | net/tcp/tcp.c | tcp_reset (daddr, saddr, th, sk->prot, opt, dev); |
sk | 1638 | net/tcp/tcp.c | if (sk->ack_backlog >= sk->max_ack_backlog) |
sk | 1660 | net/tcp/tcp.c | memcpy ((void *)newsk, (void *)sk, sizeof (*newsk)); |
sk | 1692 | net/tcp/tcp.c | newsk->time_wait.sk = newsk; |
sk | 1734 | net/tcp/tcp.c | sk->err = -ENOMEM; |
sk | 1745 | net/tcp/tcp.c | buff->sk = newsk; |
sk | 1750 | net/tcp/tcp.c | tmp = sk->prot->build_header (buff, newsk->saddr, newsk->daddr, &dev, |
sk | 1756 | net/tcp/tcp.c | sk->err = tmp; |
sk | 1757 | net/tcp/tcp.c | sk->prot->wfree(newsk, buff->mem_addr, buff->mem_len); |
sk | 1760 | net/tcp/tcp.c | skb->sk = sk; |
sk | 1796 | net/tcp/tcp.c | PRINTK (("newsk->time_wait.sk = %X\n", newsk->time_wait.sk)); |
sk | 1798 | net/tcp/tcp.c | skb->sk = newsk; |
sk | 1800 | net/tcp/tcp.c | sk->rmem_alloc -= skb->mem_len; |
sk | 1803 | net/tcp/tcp.c | if (sk->rqueue == NULL) |
sk | 1807 | net/tcp/tcp.c | sk->rqueue = skb; |
sk | 1811 | net/tcp/tcp.c | skb->next = sk->rqueue; |
sk | 1812 | net/tcp/tcp.c | skb->prev = sk->rqueue->prev; |
sk | 1813 | net/tcp/tcp.c | sk->rqueue->prev = skb; |
sk | 1816 | net/tcp/tcp.c | sk->ack_backlog++; |
sk | 1821 | net/tcp/tcp.c | tcp_close (volatile struct sock *sk, int timeout) |
sk | 1831 | net/tcp/tcp.c | PRINTK (("tcp_close ((struct sock *)%X, %d)\n",sk, timeout)); |
sk | 1832 | net/tcp/tcp.c | sk->inuse = 1; |
sk | 1833 | net/tcp/tcp.c | sk->keepopen = 1; |
sk | 1834 | net/tcp/tcp.c | sk->shutdown = SHUTDOWN_MASK; |
sk | 1836 | net/tcp/tcp.c | if (!sk->dead) |
sk | 1837 | net/tcp/tcp.c | wake_up (sk->sleep); |
sk | 1841 | net/tcp/tcp.c | if (sk->rqueue != NULL) |
sk | 1845 | net/tcp/tcp.c | skb = sk->rqueue; |
sk | 1850 | net/tcp/tcp.c | after (skb->h.th->seq + skb->len + 1, sk->copied_seq)) |
sk | 1854 | net/tcp/tcp.c | } while (skb != sk->rqueue); |
sk | 1856 | net/tcp/tcp.c | sk->rqueue = NULL; |
sk | 1859 | net/tcp/tcp.c | if (sk->send_tmp) |
sk | 1861 | net/tcp/tcp.c | tcp_send_partial (sk); |
sk | 1864 | net/tcp/tcp.c | switch (sk->state) |
sk | 1871 | net/tcp/tcp.c | sk->time_wait.len = 4*sk->rtt;; |
sk | 1872 | net/tcp/tcp.c | sk->timeout = TIME_CLOSE; |
sk | 1873 | net/tcp/tcp.c | reset_timer ((struct timer *)&sk->time_wait); |
sk | 1875 | net/tcp/tcp.c | tcp_time_wait(sk); |
sk | 1876 | net/tcp/tcp.c | release_sock (sk); |
sk | 1881 | net/tcp/tcp.c | sk->state = TCP_CLOSE; |
sk | 1882 | net/tcp/tcp.c | release_sock (sk); |
sk | 1886 | net/tcp/tcp.c | sk->state = TCP_CLOSE; |
sk | 1887 | net/tcp/tcp.c | release_sock(sk); |
sk | 1892 | net/tcp/tcp.c | release_sock(sk); |
sk | 1901 | net/tcp/tcp.c | prot = (struct proto *)sk->prot; |
sk | 1902 | net/tcp/tcp.c | th=(struct tcp_header *)&sk->dummy_th; |
sk | 1904 | net/tcp/tcp.c | buff=prot->wmalloc(sk, MAX_FIN_SIZE,1, GFP_ATOMIC); |
sk | 1908 | net/tcp/tcp.c | if (sk->state != TCP_CLOSE_WAIT) |
sk | 1909 | net/tcp/tcp.c | sk->state = TCP_ESTABLISHED; |
sk | 1910 | net/tcp/tcp.c | sk->timeout = TIME_CLOSE; |
sk | 1911 | net/tcp/tcp.c | sk->time_wait.len = 100; /* wait a second. */ |
sk | 1912 | net/tcp/tcp.c | reset_timer ((struct timer *)&sk->time_wait); |
sk | 1919 | net/tcp/tcp.c | buff->sk = sk; |
sk | 1923 | net/tcp/tcp.c | tmp = prot->build_header (buff,sk->saddr, sk->daddr, &dev, |
sk | 1924 | net/tcp/tcp.c | IPPROTO_TCP, sk->opt, |
sk | 1928 | net/tcp/tcp.c | prot->wfree (sk,buff->mem_addr, buff->mem_len); |
sk | 1930 | net/tcp/tcp.c | release_sock(sk); |
sk | 1938 | net/tcp/tcp.c | t1->seq = net32(sk->send_seq); |
sk | 1939 | net/tcp/tcp.c | sk->send_seq++; |
sk | 1940 | net/tcp/tcp.c | buff->h.seq = sk->send_seq; |
sk | 1944 | net/tcp/tcp.c | sk->delay_acks = 0; |
sk | 1945 | net/tcp/tcp.c | t1->ack_seq = net32(sk->acked_seq); |
sk | 1946 | net/tcp/tcp.c | t1->window = net16(sk->prot->rspace(sk)); |
sk | 1950 | net/tcp/tcp.c | tcp_send_check (t1, sk->saddr, sk->daddr, sizeof (*t1), sk); |
sk | 1952 | net/tcp/tcp.c | if (sk->wfront == NULL) |
sk | 1954 | net/tcp/tcp.c | prot->queue_xmit(sk, dev, buff, 0); |
sk | 1958 | net/tcp/tcp.c | sk->time_wait.len = sk->rtt; |
sk | 1959 | net/tcp/tcp.c | sk->timeout = TIME_WRITE; |
sk | 1960 | net/tcp/tcp.c | reset_timer ((struct timer *)&sk->time_wait); |
sk | 1962 | net/tcp/tcp.c | if (sk->wback == NULL) |
sk | 1964 | net/tcp/tcp.c | sk->wfront=buff; |
sk | 1968 | net/tcp/tcp.c | sk->wback->next = buff; |
sk | 1970 | net/tcp/tcp.c | sk->wback = buff; |
sk | 1975 | net/tcp/tcp.c | if (sk->state == TCP_CLOSE_WAIT) |
sk | 1977 | net/tcp/tcp.c | sk->state = TCP_FIN_WAIT2; |
sk | 1981 | net/tcp/tcp.c | sk->state = TCP_FIN_WAIT1; |
sk | 1984 | net/tcp/tcp.c | release_sock (sk); |
sk | 1991 | net/tcp/tcp.c | tcp_write_xmit (volatile struct sock *sk) |
sk | 1994 | net/tcp/tcp.c | PRINTK (("tcp_write_xmit (sk=%X)\n",sk)); |
sk | 1995 | net/tcp/tcp.c | while (sk->wfront != NULL && before (sk->wfront->h.seq, sk->window_seq) && |
sk | 1996 | net/tcp/tcp.c | sk->packets_out < sk->cong_window) |
sk | 1998 | net/tcp/tcp.c | skb = sk->wfront; |
sk | 1999 | net/tcp/tcp.c | sk->wfront = (struct sk_buff *)skb->next; |
sk | 2000 | net/tcp/tcp.c | if (sk->wfront == NULL) |
sk | 2001 | net/tcp/tcp.c | sk->wback = NULL; |
sk | 2007 | net/tcp/tcp.c | sk->wfront = NULL; |
sk | 2008 | net/tcp/tcp.c | sk->wback = NULL; |
sk | 2013 | net/tcp/tcp.c | sk->prot->queue_xmit (sk, skb->dev, skb, skb->free); |
sk | 2022 | net/tcp/tcp.c | tcp_ack (volatile struct sock *sk, struct tcp_header *th, unsigned long saddr) |
sk | 2029 | net/tcp/tcp.c | ack, net16(th->window), sk->rcv_ack_seq, sk->window_seq)); |
sk | 2030 | net/tcp/tcp.c | if (after (ack, sk->send_seq+1) || before (ack, sk->rcv_ack_seq-1)) |
sk | 2032 | net/tcp/tcp.c | if (after (ack, sk->send_seq) || (sk->state != TCP_ESTABLISHED && |
sk | 2033 | net/tcp/tcp.c | sk->state != TCP_CLOSE_WAIT)) |
sk | 2037 | net/tcp/tcp.c | if (sk->keepopen) |
sk | 2038 | net/tcp/tcp.c | reset_timer ((struct timer *)&sk->time_wait); |
sk | 2039 | net/tcp/tcp.c | sk->retransmits = 0; |
sk | 2044 | net/tcp/tcp.c | if (after (sk->window_seq, ack+net16(th->window))) |
sk | 2056 | net/tcp/tcp.c | sk->window_seq = ack + net16(th->window); |
sk | 2058 | net/tcp/tcp.c | for (skb = sk->send_head; skb != NULL; skb= (struct sk_buff *)skb->link3) |
sk | 2060 | net/tcp/tcp.c | if (after( skb->h.seq, sk->window_seq)) |
sk | 2066 | net/tcp/tcp.c | sk->send_head = (struct sk_buff *)skb->link3; |
sk | 2072 | net/tcp/tcp.c | if (sk->send_tail == skb) |
sk | 2073 | net/tcp/tcp.c | sk->send_tail = skb2; |
sk | 2108 | net/tcp/tcp.c | skb->next = sk->wfront; |
sk | 2109 | net/tcp/tcp.c | sk->wfront = skb; |
sk | 2126 | net/tcp/tcp.c | sk->window_seq = ack + net16(th->window); |
sk | 2129 | net/tcp/tcp.c | if (sk->cong_window < 2048 && ack != sk->rcv_ack_seq) |
sk | 2131 | net/tcp/tcp.c | if (sk->exp_growth) |
sk | 2132 | net/tcp/tcp.c | sk->cong_window *= 2; |
sk | 2134 | net/tcp/tcp.c | sk->cong_window++; |
sk | 2138 | net/tcp/tcp.c | sk->rcv_ack_seq = ack; |
sk | 2141 | net/tcp/tcp.c | while (sk->send_head != NULL) |
sk | 2143 | net/tcp/tcp.c | if (before (sk->send_head->h.seq, ack+1)) |
sk | 2147 | net/tcp/tcp.c | sk->packets_out --; |
sk | 2148 | net/tcp/tcp.c | PRINTK (("skb=%X acked\n", sk->send_head)); |
sk | 2151 | net/tcp/tcp.c | if (!sk->dead) |
sk | 2152 | net/tcp/tcp.c | wake_up (sk->sleep); |
sk | 2156 | net/tcp/tcp.c | oskb = sk->send_head; |
sk | 2158 | net/tcp/tcp.c | sk->rtt += ((jiffies - oskb->when) - sk->rtt)/2; |
sk | 2159 | net/tcp/tcp.c | if (sk->rtt < 30) sk->rtt = 30; |
sk | 2160 | net/tcp/tcp.c | sk->send_head = (struct sk_buff *)oskb->link3; |
sk | 2161 | net/tcp/tcp.c | if (sk->send_head == NULL) |
sk | 2163 | net/tcp/tcp.c | sk->send_tail = NULL; |
sk | 2196 | net/tcp/tcp.c | if (!sk->dead) |
sk | 2197 | net/tcp/tcp.c | wake_up(sk->sleep); |
sk | 2212 | net/tcp/tcp.c | if (sk->retransmits && sk->send_head != NULL) |
sk | 2215 | net/tcp/tcp.c | sk->prot->retransmit (sk,1); |
sk | 2217 | net/tcp/tcp.c | sk->retransmits = 0; |
sk | 2221 | net/tcp/tcp.c | if (sk->wfront != NULL && sk->packets_out < sk->cong_window) |
sk | 2223 | net/tcp/tcp.c | if (after (sk->window_seq, sk->wfront->h.seq)) |
sk | 2225 | net/tcp/tcp.c | tcp_write_xmit (sk); |
sk | 2230 | net/tcp/tcp.c | if (sk->send_head == NULL && sk->ack_backlog == 0 && |
sk | 2231 | net/tcp/tcp.c | sk->state != TCP_TIME_WAIT && !sk->keepopen) |
sk | 2234 | net/tcp/tcp.c | if (!sk->dead) |
sk | 2235 | net/tcp/tcp.c | wake_up (sk->sleep); |
sk | 2237 | net/tcp/tcp.c | delete_timer((struct timer *)&sk->time_wait); |
sk | 2238 | net/tcp/tcp.c | sk->timeout = 0; |
sk | 2242 | net/tcp/tcp.c | if (sk->state != sk->keepopen) |
sk | 2244 | net/tcp/tcp.c | sk->timeout = TIME_WRITE; |
sk | 2245 | net/tcp/tcp.c | sk->time_wait.len = sk->rtt*2; |
sk | 2246 | net/tcp/tcp.c | reset_timer ((struct timer *)&sk->time_wait); |
sk | 2248 | net/tcp/tcp.c | if (sk->state == TCP_TIME_WAIT) |
sk | 2250 | net/tcp/tcp.c | sk->time_wait.len = TCP_TIMEWAIT_LEN; |
sk | 2251 | net/tcp/tcp.c | reset_timer ((struct timer *)&sk->time_wait); |
sk | 2252 | net/tcp/tcp.c | sk->timeout = TIME_CLOSE; |
sk | 2258 | net/tcp/tcp.c | if (sk->packets_out == 0 && sk->send_tmp != NULL && |
sk | 2259 | net/tcp/tcp.c | sk->wfront == NULL && sk->send_head == NULL) |
sk | 2261 | net/tcp/tcp.c | tcp_send_partial (sk); |
sk | 2265 | net/tcp/tcp.c | if ( sk->state == TCP_TIME_WAIT) |
sk | 2267 | net/tcp/tcp.c | if (!sk->dead) wake_up (sk->sleep); |
sk | 2268 | net/tcp/tcp.c | if (sk->rcv_ack_seq == sk->send_seq && |
sk | 2269 | net/tcp/tcp.c | sk->acked_seq == sk->fin_seq) |
sk | 2271 | net/tcp/tcp.c | sk->state = TCP_CLOSE; |
sk | 2272 | net/tcp/tcp.c | sk->shutdown = SHUTDOWN_MASK; |
sk | 2276 | net/tcp/tcp.c | if (sk->state == TCP_LAST_ACK || sk->state == TCP_FIN_WAIT2) |
sk | 2278 | net/tcp/tcp.c | if (!sk->dead) wake_up (sk->sleep); |
sk | 2279 | net/tcp/tcp.c | if (sk->rcv_ack_seq == sk->send_seq) |
sk | 2281 | net/tcp/tcp.c | if (sk->acked_seq != sk->fin_seq) |
sk | 2283 | net/tcp/tcp.c | tcp_time_wait(sk); |
sk | 2287 | net/tcp/tcp.c | PRINTK (("tcp_ack closing socket - %X\n", sk)); |
sk | 2288 | net/tcp/tcp.c | tcp_send_ack (sk->send_seq, sk->acked_seq, sk, th, sk->daddr); |
sk | 2289 | net/tcp/tcp.c | sk->shutdown = SHUTDOWN_MASK; |
sk | 2290 | net/tcp/tcp.c | sk->state = TCP_CLOSE; |
sk | 2305 | net/tcp/tcp.c | tcp_data (struct sk_buff *skb, volatile struct sock *sk, |
sk | 2315 | net/tcp/tcp.c | PRINTK(("tcp_data len = %d sk = %X:\n",skb->len, sk)); |
sk | 2317 | net/tcp/tcp.c | sk->bytes_rcv += skb->len; |
sk | 2323 | net/tcp/tcp.c | tcp_send_ack (sk->send_seq, sk->acked_seq,sk, th, saddr); |
sk | 2328 | net/tcp/tcp.c | if (sk->shutdown & RCV_SHUTDOWN) |
sk | 2330 | net/tcp/tcp.c | sk->acked_seq = th->seq + skb->len + th->syn + th->fin; |
sk | 2331 | net/tcp/tcp.c | tcp_reset (sk->saddr, sk->daddr, skb->h.th, |
sk | 2332 | net/tcp/tcp.c | sk->prot, NULL, skb->dev); |
sk | 2333 | net/tcp/tcp.c | sk->state = TCP_CLOSE; |
sk | 2334 | net/tcp/tcp.c | sk->err = EPIPE; |
sk | 2335 | net/tcp/tcp.c | sk->shutdown = SHUTDOWN_MASK; |
sk | 2336 | net/tcp/tcp.c | PRINTK (("tcp_data: closing socket - %X\n", sk)); |
sk | 2338 | net/tcp/tcp.c | if (!sk->dead) wake_up (sk->sleep); |
sk | 2349 | net/tcp/tcp.c | if (sk->rqueue == NULL) |
sk | 2353 | net/tcp/tcp.c | sk->rqueue = skb; |
sk | 2360 | net/tcp/tcp.c | PRINTK (("tcp_data adding to chain sk = %X:\n",sk)); |
sk | 2362 | net/tcp/tcp.c | for (skb1=sk->rqueue; ; skb1=(struct sk_buff *)skb1->prev) |
sk | 2372 | net/tcp/tcp.c | if (skb1 == sk->rqueue) |
sk | 2373 | net/tcp/tcp.c | sk->rqueue = skb; |
sk | 2376 | net/tcp/tcp.c | if ( skb1->prev == sk->rqueue) |
sk | 2395 | net/tcp/tcp.c | if (before (sk->acked_seq, sk->copied_seq)) |
sk | 2398 | net/tcp/tcp.c | sk->acked_seq = sk->copied_seq; |
sk | 2402 | net/tcp/tcp.c | if (skb1 == NULL || skb1->acked || before (th->seq, sk->acked_seq+1)) |
sk | 2404 | net/tcp/tcp.c | if (before (th->seq, sk->acked_seq+1)) |
sk | 2406 | net/tcp/tcp.c | if (after (th->ack_seq, sk->acked_seq)) |
sk | 2407 | net/tcp/tcp.c | sk->acked_seq = th->ack_seq; |
sk | 2413 | net/tcp/tcp.c | if (!sk->dead) wake_up (sk->sleep); |
sk | 2414 | net/tcp/tcp.c | sk->shutdown |= RCV_SHUTDOWN; |
sk | 2418 | net/tcp/tcp.c | skb2 !=(struct sk_buff *) sk->rqueue->next; |
sk | 2421 | net/tcp/tcp.c | if (before(skb2->h.th->seq, sk->acked_seq+1)) |
sk | 2423 | net/tcp/tcp.c | if (after (skb2->h.th->ack_seq, sk->acked_seq)) |
sk | 2424 | net/tcp/tcp.c | sk->acked_seq = skb2->h.th->ack_seq; |
sk | 2430 | net/tcp/tcp.c | sk->shutdown |= RCV_SHUTDOWN; |
sk | 2431 | net/tcp/tcp.c | if (!sk->dead) wake_up (sk->sleep); |
sk | 2435 | net/tcp/tcp.c | sk->ack_backlog = sk->max_ack_backlog; |
sk | 2446 | net/tcp/tcp.c | if (!sk->delay_acks || |
sk | 2447 | net/tcp/tcp.c | sk->ack_backlog >= sk->max_ack_backlog || |
sk | 2448 | net/tcp/tcp.c | sk->bytes_rcv > sk->max_unacked || |
sk | 2451 | net/tcp/tcp.c | tcp_send_ack (sk->send_seq, sk->acked_seq,sk,th, saddr); |
sk | 2455 | net/tcp/tcp.c | sk->ack_backlog++; |
sk | 2456 | net/tcp/tcp.c | sk->time_wait.len = TCP_ACK_TIME; |
sk | 2457 | net/tcp/tcp.c | sk->timeout = TIME_WRITE; |
sk | 2458 | net/tcp/tcp.c | reset_timer ((struct timer *)&sk->time_wait); |
sk | 2459 | net/tcp/tcp.c | sk->retransmits = 0; |
sk | 2466 | net/tcp/tcp.c | tcp_send_ack (sk->send_seq, sk->acked_seq, sk, th, saddr); |
sk | 2470 | net/tcp/tcp.c | if (!sk->dead) |
sk | 2472 | net/tcp/tcp.c | wake_up (sk->sleep); |
sk | 2479 | net/tcp/tcp.c | if (sk->state == TCP_FIN_WAIT2 && sk->acked_seq == sk->fin_seq |
sk | 2480 | net/tcp/tcp.c | && sk->rcv_ack_seq == sk->send_seq) |
sk | 2482 | net/tcp/tcp.c | PRINTK (("tcp_data: entering last_ack state sk = %X\n", sk)); |
sk | 2484 | net/tcp/tcp.c | tcp_send_ack (sk->send_seq, sk->acked_seq, sk, th, saddr); |
sk | 2485 | net/tcp/tcp.c | sk->shutdown = SHUTDOWN_MASK; |
sk | 2486 | net/tcp/tcp.c | sk->state = TCP_LAST_ACK; |
sk | 2487 | net/tcp/tcp.c | if (!sk->dead) wake_up (sk->sleep); |
sk | 2494 | net/tcp/tcp.c | tcp_urg (volatile struct sock *sk, struct tcp_header *th, unsigned long saddr) |
sk | 2499 | net/tcp/tcp.c | if (!sk->dead) |
sk | 2500 | net/tcp/tcp.c | wake_up(sk->sleep); |
sk | 2502 | net/tcp/tcp.c | if (sk->urginline) |
sk | 2509 | net/tcp/tcp.c | if (!sk->urg) |
sk | 2513 | net/tcp/tcp.c | if (sk->proc != 0) |
sk | 2515 | net/tcp/tcp.c | if (sk->proc > 0) |
sk | 2517 | net/tcp/tcp.c | kill_proc (sk->proc, SIGURG, 1); |
sk | 2521 | net/tcp/tcp.c | kill_pg (-sk->proc, SIGURG, 1); |
sk | 2525 | net/tcp/tcp.c | sk->urg++; |
sk | 2531 | net/tcp/tcp.c | tcp_fin (volatile struct sock *sk, struct tcp_header *th, |
sk | 2535 | net/tcp/tcp.c | sk, th, saddr, dev)); |
sk | 2537 | net/tcp/tcp.c | if (!sk->dead) |
sk | 2539 | net/tcp/tcp.c | wake_up (sk->sleep); |
sk | 2542 | net/tcp/tcp.c | switch (sk->state) |
sk | 2547 | net/tcp/tcp.c | sk->fin_seq = th->seq+1; /* Contains the one that needs to be acked */ |
sk | 2548 | net/tcp/tcp.c | sk->state = TCP_CLOSE_WAIT; |
sk | 2549 | net/tcp/tcp.c | if (th->rst) sk->shutdown = SHUTDOWN_MASK; |
sk | 2557 | net/tcp/tcp.c | sk->fin_seq = th->seq+1; /* Contains the one that needs to be acked */ |
sk | 2558 | net/tcp/tcp.c | sk->state = TCP_FIN_WAIT2; |
sk | 2563 | net/tcp/tcp.c | sk->state = TCP_LAST_ACK; |
sk | 2565 | net/tcp/tcp.c | sk->time_wait.len = TCP_TIMEWAIT_LEN; |
sk | 2566 | net/tcp/tcp.c | sk->timeout = TIME_CLOSE; |
sk | 2567 | net/tcp/tcp.c | reset_timer ((struct timer *)&sk->time_wait); |
sk | 2573 | net/tcp/tcp.c | sk->ack_backlog ++; |
sk | 2577 | net/tcp/tcp.c | buff=sk->prot->wmalloc(sk,MAX_ACK_SIZE,1, GFP_ATOMIC); |
sk | 2587 | net/tcp/tcp.c | buff->sk = sk; |
sk | 2591 | net/tcp/tcp.c | tmp = sk->prot->build_header (buff, sk->saddr, sk->daddr, &dev, |
sk | 2592 | net/tcp/tcp.c | IPPROTO_TCP, sk->opt, MAX_ACK_SIZE); |
sk | 2595 | net/tcp/tcp.c | sk->prot->wfree(sk, buff->mem_addr, buff->mem_len); |
sk | 2609 | net/tcp/tcp.c | t1->seq = net32(sk->send_seq); |
sk | 2614 | net/tcp/tcp.c | buff->h.seq = sk->send_seq; |
sk | 2615 | net/tcp/tcp.c | t1->window = net16(sk->prot->rspace(sk)); |
sk | 2625 | net/tcp/tcp.c | t1->ack_seq = net32(sk->acked_seq); |
sk | 2628 | net/tcp/tcp.c | tcp_send_check (t1, sk->saddr, sk->daddr, sizeof (*t1), sk); |
sk | 2632 | net/tcp/tcp.c | if (sk->wback != NULL) |
sk | 2635 | net/tcp/tcp.c | sk->wback->next = buff; |
sk | 2636 | net/tcp/tcp.c | sk->wback = buff; |
sk | 2641 | net/tcp/tcp.c | sk->prot->queue_xmit (sk, dev, buff,0); |
sk | 2651 | net/tcp/tcp.c | tcp_accept (volatile struct sock *sk, int flags) |
sk | 2656 | net/tcp/tcp.c | PRINTK (("tcp_accept(sk=%X, flags=%X)\n", sk, flags)); |
sk | 2660 | net/tcp/tcp.c | if (sk->state != TCP_LISTEN) |
sk | 2662 | net/tcp/tcp.c | sk->err = EINVAL; |
sk | 2667 | net/tcp/tcp.c | sk->inuse = 1; |
sk | 2669 | net/tcp/tcp.c | while ( (skb = get_firstr(sk)) == NULL ) |
sk | 2674 | net/tcp/tcp.c | release_sock (sk); |
sk | 2675 | net/tcp/tcp.c | sk->err = EAGAIN; |
sk | 2679 | net/tcp/tcp.c | release_sock (sk); |
sk | 2680 | net/tcp/tcp.c | interruptible_sleep_on (sk->sleep); |
sk | 2684 | net/tcp/tcp.c | sk->err = ERESTARTSYS; |
sk | 2688 | net/tcp/tcp.c | sk->inuse = 1; |
sk | 2693 | net/tcp/tcp.c | newsk = skb->sk; |
sk | 2696 | net/tcp/tcp.c | sk->ack_backlog--; |
sk | 2697 | net/tcp/tcp.c | release_sock (sk); |
sk | 2705 | net/tcp/tcp.c | tcp_connect (volatile struct sock *sk, struct sockaddr_in *usin, int addr_len) |
sk | 2713 | net/tcp/tcp.c | if (sk->state != TCP_CLOSE) return (-EISCONN); |
sk | 2720 | net/tcp/tcp.c | sk->inuse = 1; |
sk | 2721 | net/tcp/tcp.c | sk->daddr = sin.sin_addr.s_addr; |
sk | 2722 | net/tcp/tcp.c | sk->send_seq = timer_seq*SEQ_TICK-seq_offset; |
sk | 2723 | net/tcp/tcp.c | sk->rcv_ack_seq = sk->send_seq -1; |
sk | 2724 | net/tcp/tcp.c | sk->err = 0; |
sk | 2725 | net/tcp/tcp.c | sk->dummy_th.dest = sin.sin_port; |
sk | 2726 | net/tcp/tcp.c | release_sock (sk); |
sk | 2728 | net/tcp/tcp.c | buff=sk->prot->wmalloc(sk,MAX_SYN_SIZE,0, GFP_KERNEL); |
sk | 2733 | net/tcp/tcp.c | sk->inuse = 1; |
sk | 2738 | net/tcp/tcp.c | buff->sk = sk; |
sk | 2743 | net/tcp/tcp.c | tmp = sk->prot->build_header (buff, sk->saddr, sk->daddr, &dev, |
sk | 2747 | net/tcp/tcp.c | sk->prot->wfree(sk, buff->mem_addr, buff->mem_len); |
sk | 2748 | net/tcp/tcp.c | release_sock (sk); |
sk | 2754 | net/tcp/tcp.c | memcpy (t1, (void *)&(sk->dummy_th), sizeof (*t1)); |
sk | 2755 | net/tcp/tcp.c | t1->seq = net32(sk->send_seq++); |
sk | 2756 | net/tcp/tcp.c | buff->h.seq = sk->send_seq; |
sk | 2773 | net/tcp/tcp.c | sk->mtu = dev->mtu - HEADER_SIZE; |
sk | 2774 | net/tcp/tcp.c | tcp_send_check (t1, sk->saddr, sk->daddr, |
sk | 2775 | net/tcp/tcp.c | sizeof (struct tcp_header) + 4, sk); |
sk | 2778 | net/tcp/tcp.c | sk->state = TCP_SYN_SENT; |
sk | 2780 | net/tcp/tcp.c | sk->prot->queue_xmit(sk, dev, buff, 0); |
sk | 2782 | net/tcp/tcp.c | sk->time_wait.len = TCP_CONNECT_TIME; |
sk | 2783 | net/tcp/tcp.c | sk->rtt = TCP_CONNECT_TIME; |
sk | 2784 | net/tcp/tcp.c | reset_timer ((struct timer *)&sk->time_wait); |
sk | 2785 | net/tcp/tcp.c | sk->retransmits = TCP_RETR2 - TCP_SYN_RETRIES; |
sk | 2786 | net/tcp/tcp.c | release_sock (sk); |
sk | 2795 | net/tcp/tcp.c | tcp_sequence (volatile struct sock *sk, struct tcp_header *th, short len, |
sk | 2804 | net/tcp/tcp.c | sk, th, len, opt, saddr)); |
sk | 2806 | net/tcp/tcp.c | if (between(th->seq, sk->acked_seq, sk->acked_seq + sk->window)|| |
sk | 2807 | net/tcp/tcp.c | between(th->seq + len-(th->doff * 4), sk->acked_seq + 1, |
sk | 2808 | net/tcp/tcp.c | sk->acked_seq + sk->window) || |
sk | 2809 | net/tcp/tcp.c | (before (th->seq, sk->acked_seq) && |
sk | 2810 | net/tcp/tcp.c | after (th->seq + len - (th->doff * 4), sk->acked_seq + sk->window))) |
sk | 2819 | net/tcp/tcp.c | if (after (th->seq, sk->acked_seq + sk->window)) |
sk | 2821 | net/tcp/tcp.c | tcp_send_ack (sk->send_seq, sk->acked_seq, sk, th, saddr); |
sk | 2826 | net/tcp/tcp.c | if (th->ack && len == (th->doff * 4) && after (th->seq, sk->acked_seq - 32767) && |
sk | 2832 | net/tcp/tcp.c | tcp_send_ack (net32(th->ack_seq), sk->acked_seq, sk, th, saddr); |
sk | 2841 | net/tcp/tcp.c | tcp_options (volatile struct sock *sk, struct tcp_header *th) |
sk | 2847 | net/tcp/tcp.c | sk->mtu = min (sk->mtu, 576-HEADER_SIZE); |
sk | 2850 | net/tcp/tcp.c | sk->mtu = min (sk->mtu, ptr[2]*256 + ptr[3] - HEADER_SIZE); |
sk | 2859 | net/tcp/tcp.c | volatile struct sock *sk; |
sk | 2887 | net/tcp/tcp.c | sk=get_sock(&tcp_prot, net16(th->dest), saddr, th->source, daddr); |
sk | 2891 | net/tcp/tcp.c | if (sk) |
sk | 2893 | net/tcp/tcp.c | PRINTK (("sk = %X:\n",sk)); |
sk | 2900 | net/tcp/tcp.c | skb->sk = NULL; |
sk | 2909 | net/tcp/tcp.c | if (sk == NULL) |
sk | 2913 | net/tcp/tcp.c | skb->sk = NULL; |
sk | 2919 | net/tcp/tcp.c | skb->sk = sk; |
sk | 2932 | net/tcp/tcp.c | if (sk->inuse) |
sk | 2934 | net/tcp/tcp.c | if (sk->back_log == NULL) |
sk | 2936 | net/tcp/tcp.c | sk->back_log = skb; |
sk | 2942 | net/tcp/tcp.c | skb->next = sk->back_log; |
sk | 2943 | net/tcp/tcp.c | skb->prev = sk->back_log->prev; |
sk | 2950 | net/tcp/tcp.c | sk->inuse = 1; |
sk | 2955 | net/tcp/tcp.c | if (!sk) |
sk | 2962 | net/tcp/tcp.c | if (!sk->prot) |
sk | 2969 | net/tcp/tcp.c | if (sk->rmem_alloc + skb->mem_len >= SK_RMEM_MAX) |
sk | 2971 | net/tcp/tcp.c | skb->sk = NULL; |
sk | 2974 | net/tcp/tcp.c | release_sock (sk); |
sk | 2978 | net/tcp/tcp.c | sk->rmem_alloc += skb->mem_len; |
sk | 2984 | net/tcp/tcp.c | switch (sk->state) |
sk | 2991 | net/tcp/tcp.c | sk->err = ECONNRESET; |
sk | 2992 | net/tcp/tcp.c | sk->state = TCP_CLOSE; |
sk | 2993 | net/tcp/tcp.c | sk->shutdown = SHUTDOWN_MASK; |
sk | 2994 | net/tcp/tcp.c | if (!sk->dead) |
sk | 2996 | net/tcp/tcp.c | wake_up (sk->sleep); |
sk | 2999 | net/tcp/tcp.c | release_sock(sk); |
sk | 3009 | net/tcp/tcp.c | if (!tcp_sequence (sk, th, len, opt, saddr)) |
sk | 3012 | net/tcp/tcp.c | release_sock(sk); |
sk | 3019 | net/tcp/tcp.c | sk->err = ECONNRESET; |
sk | 3021 | net/tcp/tcp.c | if (sk->state == TCP_CLOSE_WAIT) |
sk | 3023 | net/tcp/tcp.c | sk->err = EPIPE; |
sk | 3030 | net/tcp/tcp.c | sk->state = TCP_CLOSE; |
sk | 3031 | net/tcp/tcp.c | sk->shutdown = SHUTDOWN_MASK; |
sk | 3032 | net/tcp/tcp.c | if (!sk->dead) |
sk | 3034 | net/tcp/tcp.c | wake_up (sk->sleep); |
sk | 3037 | net/tcp/tcp.c | release_sock(sk); |
sk | 3044 | net/tcp/tcp.c | sk->err = ECONNRESET; |
sk | 3045 | net/tcp/tcp.c | sk->state = TCP_CLOSE; |
sk | 3046 | net/tcp/tcp.c | sk->shutdown = SHUTDOWN_MASK; |
sk | 3047 | net/tcp/tcp.c | tcp_reset (daddr, saddr, th, sk->prot, opt,dev); |
sk | 3048 | net/tcp/tcp.c | if (!sk->dead) |
sk | 3050 | net/tcp/tcp.c | wake_up (sk->sleep); |
sk | 3053 | net/tcp/tcp.c | release_sock(sk); |
sk | 3059 | net/tcp/tcp.c | if(!tcp_ack (sk, th, saddr)) |
sk | 3062 | net/tcp/tcp.c | release_sock(sk); |
sk | 3068 | net/tcp/tcp.c | if (tcp_urg (sk, th, saddr)) |
sk | 3071 | net/tcp/tcp.c | release_sock(sk); |
sk | 3076 | net/tcp/tcp.c | if (th->fin && tcp_fin (sk, th, saddr, dev)) |
sk | 3079 | net/tcp/tcp.c | release_sock(sk); |
sk | 3083 | net/tcp/tcp.c | if ( tcp_data (skb, sk, saddr, len)) |
sk | 3086 | net/tcp/tcp.c | release_sock(sk); |
sk | 3090 | net/tcp/tcp.c | release_sock(sk); |
sk | 3095 | net/tcp/tcp.c | if (sk->dead || sk->daddr) |
sk | 3099 | net/tcp/tcp.c | release_sock (sk); |
sk | 3107 | net/tcp/tcp.c | tcp_reset (daddr, saddr, th, sk->prot, opt,dev); |
sk | 3110 | net/tcp/tcp.c | release_sock(sk); |
sk | 3117 | net/tcp/tcp.c | release_sock(sk); |
sk | 3122 | net/tcp/tcp.c | tcp_reset (daddr, saddr, th, sk->prot, opt,dev ); |
sk | 3124 | net/tcp/tcp.c | release_sock(sk); |
sk | 3142 | net/tcp/tcp.c | tcp_conn_request (sk, skb, daddr, saddr, opt, dev); |
sk | 3144 | net/tcp/tcp.c | release_sock(sk); |
sk | 3149 | net/tcp/tcp.c | release_sock(sk); |
sk | 3153 | net/tcp/tcp.c | if (!tcp_sequence (sk, th, len, opt, saddr)) |
sk | 3156 | net/tcp/tcp.c | release_sock(sk); |
sk | 3163 | net/tcp/tcp.c | sk->err = ECONNREFUSED ; |
sk | 3164 | net/tcp/tcp.c | sk->state = TCP_CLOSE; |
sk | 3165 | net/tcp/tcp.c | sk->shutdown = SHUTDOWN_MASK; |
sk | 3166 | net/tcp/tcp.c | if (!sk->dead) |
sk | 3168 | net/tcp/tcp.c | wake_up (sk->sleep); |
sk | 3171 | net/tcp/tcp.c | release_sock(sk); |
sk | 3177 | net/tcp/tcp.c | sk->err = ECONNRESET; |
sk | 3178 | net/tcp/tcp.c | sk->state = TCP_CLOSE; |
sk | 3179 | net/tcp/tcp.c | sk->shutdown = SHUTDOWN_MASK; |
sk | 3180 | net/tcp/tcp.c | tcp_reset (daddr, saddr, th, sk->prot, opt, dev); |
sk | 3181 | net/tcp/tcp.c | if (!sk->dead) |
sk | 3183 | net/tcp/tcp.c | wake_up (sk->sleep); |
sk | 3186 | net/tcp/tcp.c | release_sock(sk); |
sk | 3194 | net/tcp/tcp.c | sk->state = TCP_SYN_RECV; |
sk | 3198 | net/tcp/tcp.c | release_sock(sk); |
sk | 3202 | net/tcp/tcp.c | switch (sk->state) |
sk | 3205 | net/tcp/tcp.c | if (!tcp_ack(sk, th, saddr)) |
sk | 3207 | net/tcp/tcp.c | tcp_reset(daddr, saddr, th, sk->prot, opt,dev); |
sk | 3209 | net/tcp/tcp.c | release_sock(sk); |
sk | 3219 | net/tcp/tcp.c | release_sock (sk); |
sk | 3224 | net/tcp/tcp.c | sk->acked_seq = th->seq+1; |
sk | 3225 | net/tcp/tcp.c | sk->fin_seq = th->seq; |
sk | 3226 | net/tcp/tcp.c | tcp_send_ack (sk->send_seq, th->seq+1, sk, |
sk | 3227 | net/tcp/tcp.c | th, sk->daddr); |
sk | 3230 | net/tcp/tcp.c | if (!tcp_ack(sk, th, saddr)) |
sk | 3232 | net/tcp/tcp.c | tcp_reset(daddr, saddr, th, sk->prot, opt, dev); |
sk | 3234 | net/tcp/tcp.c | release_sock(sk); |
sk | 3238 | net/tcp/tcp.c | sk->state = TCP_ESTABLISHED; |
sk | 3243 | net/tcp/tcp.c | tcp_options(sk, th); |
sk | 3244 | net/tcp/tcp.c | sk->dummy_th.dest = th->source; |
sk | 3245 | net/tcp/tcp.c | sk->copied_seq = sk->acked_seq-1; |
sk | 3246 | net/tcp/tcp.c | if (!sk->dead) |
sk | 3248 | net/tcp/tcp.c | wake_up (sk->sleep); |
sk | 3254 | net/tcp/tcp.c | if (tcp_urg (sk, th, saddr)) |
sk | 3257 | net/tcp/tcp.c | release_sock(sk); |
sk | 3260 | net/tcp/tcp.c | if (tcp_data (skb, sk, saddr, len)) |
sk | 3264 | net/tcp/tcp.c | tcp_fin(sk, th, saddr, dev); |
sk | 3266 | net/tcp/tcp.c | release_sock(sk); |
sk | 3272 | net/tcp/tcp.c | if (tcp_urg (sk, th, saddr)) |
sk | 3275 | net/tcp/tcp.c | release_sock (sk); |
sk | 3280 | net/tcp/tcp.c | if (tcp_data (skb, sk, saddr, len)) |
sk | 3283 | net/tcp/tcp.c | release_sock (sk); |
sk | 3289 | net/tcp/tcp.c | release_sock(sk); |
sk | 3292 | net/tcp/tcp.c | tcp_fin (sk, th, saddr, dev); |
sk | 3293 | net/tcp/tcp.c | release_sock(sk); |
sk | 3303 | net/tcp/tcp.c | tcp_write_wakeup(volatile struct sock *sk) |
sk | 3309 | net/tcp/tcp.c | if (sk -> state != TCP_ESTABLISHED && sk->state != TCP_CLOSE_WAIT) return; |
sk | 3311 | net/tcp/tcp.c | buff=sk->prot->wmalloc(sk,MAX_ACK_SIZE,1, GFP_ATOMIC); |
sk | 3320 | net/tcp/tcp.c | buff->sk = sk; |
sk | 3325 | net/tcp/tcp.c | tmp = sk->prot->build_header (buff, sk->saddr, sk->daddr, &dev, |
sk | 3326 | net/tcp/tcp.c | IPPROTO_TCP, sk->opt, MAX_ACK_SIZE); |
sk | 3329 | net/tcp/tcp.c | sk->prot->wfree(sk, buff->mem_addr, buff->mem_len); |
sk | 3336 | net/tcp/tcp.c | memcpy (t1,(void *) &sk->dummy_th, sizeof (*t1)); |
sk | 3340 | net/tcp/tcp.c | t1->seq = net32(sk->send_seq-1); |
sk | 3349 | net/tcp/tcp.c | t1->ack_seq = net32(sk->acked_seq); |
sk | 3350 | net/tcp/tcp.c | t1->window = net16(sk->prot->rspace(sk)); |
sk | 3352 | net/tcp/tcp.c | tcp_send_check (t1, sk->saddr, sk->daddr, sizeof (*t1), sk); |
sk | 3355 | net/tcp/tcp.c | sk->prot->queue_xmit(sk, dev, buff, 1); |
sk | 168 | net/tcp/timer.c | volatile struct sock *sk; |
sk | 176 | net/tcp/timer.c | sk = timer_base->sk; |
sk | 178 | net/tcp/timer.c | if (sk->inuse) |
sk | 183 | net/tcp/timer.c | sk->inuse = 1; |
sk | 185 | net/tcp/timer.c | why = sk->timeout; |
sk | 187 | net/tcp/timer.c | PRINTK (("net_timer: found sk=%X why = %d\n",sk, why)); |
sk | 189 | net/tcp/timer.c | if (sk->keepopen) |
sk | 191 | net/tcp/timer.c | sk->time_wait.len = TCP_TIMEOUT_LEN; |
sk | 192 | net/tcp/timer.c | sk->timeout = TIME_KEEPOPEN; |
sk | 197 | net/tcp/timer.c | sk->timeout = 0; |
sk | 202 | net/tcp/timer.c | if (sk->ack_backlog) |
sk | 204 | net/tcp/timer.c | sk->prot->read_wakeup(sk); |
sk | 205 | net/tcp/timer.c | if (!sk->dead) wake_up (sk->sleep); |
sk | 213 | net/tcp/timer.c | if (!sk->dead || sk->state != TCP_CLOSE) |
sk | 216 | net/tcp/timer.c | release_sock (sk); |
sk | 219 | net/tcp/timer.c | destroy_sock (sk); |
sk | 226 | net/tcp/timer.c | PRINTK (("possible memory leak. sk = %X\n", sk)); |
sk | 227 | net/tcp/timer.c | reset_timer ((struct timer *)&sk->time_wait); |
sk | 228 | net/tcp/timer.c | sk->inuse = 0; |
sk | 234 | net/tcp/timer.c | sk->state = TCP_CLOSE; |
sk | 235 | net/tcp/timer.c | delete_timer ((struct timer *)&sk->time_wait); |
sk | 238 | net/tcp/timer.c | arp_destroy (sk->daddr); |
sk | 239 | net/tcp/timer.c | if (!sk->dead) |
sk | 240 | net/tcp/timer.c | wake_up (sk->sleep); |
sk | 241 | net/tcp/timer.c | release_sock(sk); |
sk | 247 | net/tcp/timer.c | if (sk->send_head != NULL) |
sk | 249 | net/tcp/timer.c | if (before (jiffies, sk->send_head->when + 2*sk->rtt)) |
sk | 251 | net/tcp/timer.c | sk->time_wait.len = 2*sk->rtt; |
sk | 252 | net/tcp/timer.c | sk->timeout = TIME_WRITE; |
sk | 253 | net/tcp/timer.c | reset_timer ((struct timer *)&sk->time_wait); |
sk | 254 | net/tcp/timer.c | release_sock (sk); |
sk | 258 | net/tcp/timer.c | sk->prot->retransmit (sk, 0); |
sk | 260 | net/tcp/timer.c | if (sk->retransmits > TCP_RETR1) |
sk | 263 | net/tcp/timer.c | arp_destroy (sk->daddr); |
sk | 264 | net/tcp/timer.c | ip_route_check (sk->daddr); |
sk | 267 | net/tcp/timer.c | if (sk->retransmits > TCP_RETR2) |
sk | 270 | net/tcp/timer.c | sk->err = ETIMEDOUT; |
sk | 271 | net/tcp/timer.c | if (sk->state == TCP_FIN_WAIT1 || |
sk | 272 | net/tcp/timer.c | sk->state == TCP_FIN_WAIT2 || |
sk | 273 | net/tcp/timer.c | sk->state == TCP_LAST_ACK) |
sk | 275 | net/tcp/timer.c | sk->state = TCP_TIME_WAIT; |
sk | 276 | net/tcp/timer.c | sk->timeout = TIME_CLOSE; |
sk | 277 | net/tcp/timer.c | sk->time_wait.len = TCP_TIMEWAIT_LEN; |
sk | 278 | net/tcp/timer.c | reset_timer ((struct timer *)&sk->time_wait); |
sk | 279 | net/tcp/timer.c | release_sock(sk); |
sk | 284 | net/tcp/timer.c | sk->prot->close (sk,1); |
sk | 288 | net/tcp/timer.c | release_sock (sk); |
sk | 291 | net/tcp/timer.c | release_sock (sk); |
sk | 297 | net/tcp/timer.c | if (sk->prot->write_wakeup != NULL) |
sk | 298 | net/tcp/timer.c | sk->prot->write_wakeup(sk); |
sk | 299 | net/tcp/timer.c | sk->retransmits ++; |
sk | 300 | net/tcp/timer.c | if (sk->shutdown == SHUTDOWN_MASK) |
sk | 302 | net/tcp/timer.c | sk->prot->close (sk,1); |
sk | 303 | net/tcp/timer.c | sk->state = TCP_CLOSE; |
sk | 306 | net/tcp/timer.c | if (sk->retransmits > TCP_RETR1) |
sk | 309 | net/tcp/timer.c | arp_destroy (sk->daddr); |
sk | 310 | net/tcp/timer.c | ip_route_check (sk->daddr); |
sk | 311 | net/tcp/timer.c | release_sock (sk); |
sk | 314 | net/tcp/timer.c | if (sk->retransmits > TCP_RETR2) |
sk | 317 | net/tcp/timer.c | arp_destroy (sk->daddr); |
sk | 318 | net/tcp/timer.c | sk->err = ETIMEDOUT; |
sk | 319 | net/tcp/timer.c | if (sk->state == TCP_FIN_WAIT1 || |
sk | 320 | net/tcp/timer.c | sk->state == TCP_FIN_WAIT2) |
sk | 322 | net/tcp/timer.c | sk->state = TCP_TIME_WAIT; |
sk | 323 | net/tcp/timer.c | if (!sk->dead) |
sk | 324 | net/tcp/timer.c | wake_up (sk->sleep); |
sk | 325 | net/tcp/timer.c | release_sock(sk); |
sk | 329 | net/tcp/timer.c | sk->prot->close (sk, 1); |
sk | 333 | net/tcp/timer.c | release_sock (sk); |
sk | 337 | net/tcp/timer.c | release_sock(sk); |
sk | 46 | net/tcp/timer.h | volatile struct sock *sk; |
sk | 110 | net/tcp/udp.c | udp_select (volatile struct sock *sk, int sel_type, select_table *wait) |
sk | 112 | net/tcp/udp.c | select_wait(sk->sleep, wait); |
sk | 116 | net/tcp/udp.c | if (sk->rqueue != NULL) |
sk | 123 | net/tcp/udp.c | if (sk->prot->wspace(sk) >= MIN_WRITE_SPACE) |
sk | 130 | net/tcp/udp.c | if (sk->err) return (1); /* can this ever happen? */ |
sk | 148 | net/tcp/udp.c | volatile struct sock *sk; |
sk | 153 | net/tcp/udp.c | sk = get_sock (&udp_prot, net16(th->dest), saddr, th->source, daddr); |
sk | 155 | net/tcp/udp.c | if (sk == NULL) return; |
sk | 158 | net/tcp/udp.c | if (sk->cong_window > 1) |
sk | 159 | net/tcp/udp.c | sk->cong_window = sk->cong_window/2; |
sk | 163 | net/tcp/udp.c | sk->err = icmp_err_convert[err & 0xff].errno; |
sk | 165 | net/tcp/udp.c | if (icmp_err_convert[err & 0xff].fatal && sk->state == TCP_ESTABLISHED) |
sk | 167 | net/tcp/udp.c | sk->prot->close(sk, 0); |
sk | 245 | net/tcp/udp.c | unsigned long daddr, int len, volatile struct sock *sk) |
sk | 248 | net/tcp/udp.c | if (sk && sk->no_check) return; |
sk | 253 | net/tcp/udp.c | udp_loopback (volatile struct sock *sk, unsigned short port, |
sk | 260 | net/tcp/udp.c | sk->inuse = 1; |
sk | 264 | net/tcp/udp.c | pair = get_sock (sk->prot, net16(port), saddr, |
sk | 265 | net/tcp/udp.c | sk->dummy_th.source, daddr); |
sk | 286 | net/tcp/udp.c | uh -> source = sk->dummy_th.source; |
sk | 307 | net/tcp/udp.c | release_sock (sk); |
sk | 313 | net/tcp/udp.c | udp_sendto (volatile struct sock *sk, unsigned char *from, int len, |
sk | 349 | net/tcp/udp.c | if (sk->state != TCP_ESTABLISHED) |
sk | 352 | net/tcp/udp.c | sin.sin_port = sk->dummy_th.dest; |
sk | 353 | net/tcp/udp.c | sin.sin_addr.s_addr = sk->daddr; |
sk | 357 | net/tcp/udp.c | saddr = sk->saddr; |
sk | 367 | net/tcp/udp.c | err = udp_loopback (sk, sin.sin_port, from, len, |
sk | 373 | net/tcp/udp.c | sk->inuse = 1; |
sk | 378 | net/tcp/udp.c | skb = sk->prot->wmalloc (sk, len + sizeof (*skb) |
sk | 379 | net/tcp/udp.c | + sk->prot->max_header, 0, |
sk | 385 | net/tcp/udp.c | tmp = sk->wmem_alloc; |
sk | 386 | net/tcp/udp.c | release_sock (sk); |
sk | 390 | net/tcp/udp.c | if (tmp <= sk->wmem_alloc) |
sk | 392 | net/tcp/udp.c | interruptible_sleep_on (sk->sleep); |
sk | 400 | net/tcp/udp.c | sk->inuse = 1; |
sk | 407 | net/tcp/udp.c | skb->mem_len = len + sizeof (*skb) + sk->prot->max_header; |
sk | 408 | net/tcp/udp.c | skb->sk = sk; |
sk | 414 | net/tcp/udp.c | tmp = sk->prot->build_header (skb, saddr, |
sk | 416 | net/tcp/udp.c | IPPROTO_UDP, sk->opt, skb->mem_len); |
sk | 419 | net/tcp/udp.c | sk->prot->wfree (sk, skb->mem_addr, skb->mem_len); |
sk | 420 | net/tcp/udp.c | release_sock (sk); |
sk | 437 | net/tcp/udp.c | uh->source = sk->dummy_th.source; |
sk | 445 | net/tcp/udp.c | release_sock (sk); |
sk | 456 | net/tcp/udp.c | amt+sizeof (*uh), sk); |
sk | 458 | net/tcp/udp.c | sk->prot->queue_xmit (sk, dev, skb, 1); |
sk | 460 | net/tcp/udp.c | release_sock (sk); |
sk | 465 | net/tcp/udp.c | udp_write (volatile struct sock *sk, unsigned char *buff, int len, int noblock, |
sk | 468 | net/tcp/udp.c | return (udp_sendto (sk, buff, len, noblock, flags, NULL, 0)); |
sk | 473 | net/tcp/udp.c | udp_ioctl (volatile struct sock *sk, int cmd, unsigned long arg) |
sk | 483 | net/tcp/udp.c | if (sk->state == TCP_LISTEN) |
sk | 485 | net/tcp/udp.c | amount = sk->prot->wspace(sk)/2; |
sk | 497 | net/tcp/udp.c | if (sk->state == TCP_LISTEN) |
sk | 500 | net/tcp/udp.c | skb = sk->rqueue; |
sk | 516 | net/tcp/udp.c | udp_recvfrom (volatile struct sock *sk, unsigned char *to, int len, |
sk | 530 | net/tcp/udp.c | if (sk->err) |
sk | 533 | net/tcp/udp.c | err = -sk->err; |
sk | 534 | net/tcp/udp.c | sk->err = 0; |
sk | 542 | net/tcp/udp.c | sk->inuse = 1; |
sk | 543 | net/tcp/udp.c | while (sk->rqueue == NULL) |
sk | 545 | net/tcp/udp.c | if (sk->shutdown & RCV_SHUTDOWN) |
sk | 552 | net/tcp/udp.c | release_sock (sk); |
sk | 555 | net/tcp/udp.c | release_sock (sk); |
sk | 557 | net/tcp/udp.c | if (sk->rqueue == NULL) |
sk | 559 | net/tcp/udp.c | interruptible_sleep_on (sk->sleep); |
sk | 566 | net/tcp/udp.c | sk->inuse = 1; |
sk | 569 | net/tcp/udp.c | skb = sk->rqueue; |
sk | 575 | net/tcp/udp.c | sk->rqueue = NULL; |
sk | 579 | net/tcp/udp.c | sk->rqueue = (struct sk_buff *)sk->rqueue ->next; |
sk | 602 | net/tcp/udp.c | release_sock (sk); |
sk | 609 | net/tcp/udp.c | udp_read (volatile struct sock *sk, unsigned char *buff, int len, int noblock, |
sk | 612 | net/tcp/udp.c | return (udp_recvfrom (sk, buff, len, noblock, flags, NULL, NULL)); |
sk | 616 | net/tcp/udp.c | udp_connect (volatile struct sock *sk, struct sockaddr_in *usin, int addr_len) |
sk | 624 | net/tcp/udp.c | sk->daddr = sin.sin_addr.s_addr; |
sk | 625 | net/tcp/udp.c | sk->dummy_th.dest = sin.sin_port; |
sk | 626 | net/tcp/udp.c | sk->state = TCP_ESTABLISHED; |
sk | 631 | net/tcp/udp.c | udp_close(volatile struct sock *sk, int timeout) |
sk | 633 | net/tcp/udp.c | sk->inuse = 1; |
sk | 634 | net/tcp/udp.c | sk->state = TCP_CLOSE; |
sk | 635 | net/tcp/udp.c | if (sk->dead) |
sk | 636 | net/tcp/udp.c | destroy_sock (sk); |
sk | 638 | net/tcp/udp.c | release_sock (sk); |
sk | 648 | net/tcp/udp.c | volatile struct sock *sk; |
sk | 655 | net/tcp/udp.c | sk = get_sock (prot, net16(uh->dest), saddr, uh->source, daddr); |
sk | 658 | net/tcp/udp.c | if (sk == NULL) |
sk | 665 | net/tcp/udp.c | skb->sk = NULL; |
sk | 676 | net/tcp/udp.c | skb->sk = NULL; |
sk | 681 | net/tcp/udp.c | skb->sk = sk; |
sk | 691 | net/tcp/udp.c | if (sk->inuse) |
sk | 693 | net/tcp/udp.c | if (sk->back_log == NULL) |
sk | 695 | net/tcp/udp.c | sk->back_log = skb; |
sk | 701 | net/tcp/udp.c | skb->next = sk->back_log; |
sk | 702 | net/tcp/udp.c | skb->prev = sk->back_log->prev; |
sk | 709 | net/tcp/udp.c | sk->inuse = 1; |
sk | 714 | net/tcp/udp.c | if (sk->rmem_alloc + skb->mem_len >= SK_RMEM_MAX) |
sk | 716 | net/tcp/udp.c | skb->sk = NULL; |
sk | 718 | net/tcp/udp.c | release_sock (sk); |
sk | 722 | net/tcp/udp.c | sk->rmem_alloc += skb->mem_len; |
sk | 728 | net/tcp/udp.c | if (sk->rqueue == NULL) |
sk | 730 | net/tcp/udp.c | sk->rqueue = skb; |
sk | 736 | net/tcp/udp.c | skb->next = sk->rqueue; |
sk | 737 | net/tcp/udp.c | skb->prev = sk->rqueue->prev; |
sk | 744 | net/tcp/udp.c | if (!sk->dead) |
sk | 745 | net/tcp/udp.c | wake_up (sk->sleep); |
sk | 747 | net/tcp/udp.c | release_sock (sk); |