1 /* 2 * INET An implementation of the TCP/IP protocol suite for the LINUX 3 * operating system. INET is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * Implementation of the Transmission Control Protocol(TCP). 7 * 8 * Version: @(#)tcp_input.c 1.0.16 05/25/93 9 * 10 * Authors: Ross Biro, <bir7@leland.Stanford.Edu> 11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 12 * Mark Evans, <evansmp@uhura.aston.ac.uk> 13 * Corey Minyard <wf-rch!minyard@relay.EU.net> 14 * Florian La Roche, <flla@stud.uni-sb.de> 15 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu> 16 * Linus Torvalds, <torvalds@cs.helsinki.fi> 17 * Alan Cox, <gw4pts@gw4pts.ampr.org> 18 * Matthew Dillon, <dillon@apollo.west.oic.com> 19 * Arnt Gulbrandsen, <agulbra@nvg.unit.no> 20 * Jorge Cwik, <jorge@laser.satlink.net> 21 * 22 * FIXES 23 * Pedro Roque : Double ACK bug 24 */ 25
26 #include <linux/config.h>
27 #include <net/tcp.h>
28
29 #include <linux/interrupt.h>
30
31 /* 32 * Policy code extracted so its now seperate 33 */ 34
35 /* 36 * Called each time to estimate the delayed ack timeout. This is 37 * how it should be done so a fast link isnt impacted by ack delay. 38 */ 39
40 extern__inline__voidtcp_delack_estimator(structsock *sk)
/* */ 41 { 42 /* 43 * Delayed ACK time estimator. 44 */ 45
46 if (sk->lrcvtime == 0)
47 { 48 sk->lrcvtime = jiffies;
49 sk->ato = HZ/3;
50 } 51 else 52 { 53 intm;
54
55 m = jiffies - sk->lrcvtime;
56
57 sk->lrcvtime = jiffies;
58
59 if (m <= 0)
60 m = 1;
61
62 if (m > (sk->rtt >> 3))
63 { 64 sk->ato = sk->rtt >> 3;
65 /* 66 * printk(KERN_DEBUG "ato: rtt %lu\n", sk->ato); 67 */ 68 } 69 else 70 { 71 sk->ato = (sk->ato >> 1) + m;
72 /* 73 * printk(KERN_DEBUG "ato: m %lu\n", sk->ato); 74 */ 75 } 76 } 77 } 78
79 /* 80 * Called on frames that were known _not_ to have been 81 * retransmitted [see Karn/Partridge Proceedings SIGCOMM 87]. 82 * The algorithm is from the SIGCOMM 88 piece by Van Jacobson. 83 */ 84
85 extern__inline__voidtcp_rtt_estimator(structsock *sk, structsk_buff *oskb)
/* */ 86 { 87 longm;
88 /* 89 * The following amusing code comes from Jacobson's 90 * article in SIGCOMM '88. Note that rtt and mdev 91 * are scaled versions of rtt and mean deviation. 92 * This is designed to be as fast as possible 93 * m stands for "measurement". 94 */ 95
96 m = jiffies - oskb->when; /* RTT */ 97 if(m<=0)
98 m=1; /* IS THIS RIGHT FOR <0 ??? */ 99 m -= (sk->rtt >> 3); /* m is now error in rtt est */ 100 sk->rtt += m; /* rtt = 7/8 rtt + 1/8 new */ 101 if (m < 0)
102 m = -m; /* m is now abs(error) */ 103 m -= (sk->mdev >> 2); /* similar update on mdev */ 104 sk->mdev += m; /* mdev = 3/4 mdev + 1/4 new */ 105
106 /* 107 * Now update timeout. Note that this removes any backoff. 108 */ 109
110 sk->rto = ((sk->rtt >> 2) + sk->mdev) >> 1;
111 if (sk->rto > 120*HZ)
112 sk->rto = 120*HZ;
113 if (sk->rto < HZ/5) /* Was 1*HZ - keep .2 as minimum cos of the BSD delayed acks */ 114 sk->rto = HZ/5;
115 sk->backoff = 0;
116 } 117
118 /* 119 * Cached last hit socket 120 */ 121
122 staticvolatileunsignedlongth_cache_saddr, th_cache_daddr;
123 staticvolatileunsignedshortth_cache_dport, th_cache_sport;
124 staticvolatilestructsock *th_cache_sk;
125
126 voidtcp_cache_zap(void)
/* */ 127 { 128 th_cache_sk=NULL;
129 } 130
131 /* 132 * Find the socket, using the last hit cache if applicable. The cache is not quite 133 * right... 134 */ 135
136 staticinlinestructsock * get_tcp_sock(u32saddr, u16sport, u32daddr, u16dport)
/* */ 137 { 138 structsock * sk;
139
140 sk = (structsock *) th_cache_sk;
141 if (!sk || saddr != th_cache_saddr || daddr != th_cache_daddr ||
142 sport != th_cache_sport || dport != th_cache_dport) { 143 sk = get_sock(&tcp_prot, dport, saddr, sport, daddr);
144 if (sk) { 145 th_cache_saddr=saddr;
146 th_cache_daddr=daddr;
147 th_cache_dport=dport;
148 th_cache_sport=sport;
149 th_cache_sk=sk;
150 } 151 } 152 returnsk;
153 } 154
155 /* 156 * React to a out-of-window TCP sequence number in an incoming packet 157 */ 158
159 staticvoidbad_tcp_sequence(structsock *sk, structtcphdr *th, shortlen,
/* */ 160 structoptions *opt, unsignedlongsaddr, structdevice *dev)
161 { 162 if (th->rst)
163 return;
164
165 /* 166 * Send a reset if we get something not ours and we are 167 * unsynchronized. Note: We don't do anything to our end. We 168 * are just killing the bogus remote connection then we will 169 * connect again and it will work (with luck). 170 */ 171
172 if (sk->state==TCP_SYN_SENT || sk->state==TCP_SYN_RECV)
173 { 174 tcp_send_reset(sk->saddr,sk->daddr,th,sk->prot,NULL,dev, sk->ip_tos,sk->ip_ttl);
175 return;
176 } 177
178 /* 179 * 4.3reno machines look for these kind of acks so they can do fast 180 * recovery. Three identical 'old' acks lets it know that one frame has 181 * been lost and should be resent. Because this is before the whole window 182 * of data has timed out it can take one lost frame per window without 183 * stalling. [See Jacobson RFC1323, Stevens TCP/IP illus vol2] 184 * 185 * We also should be spotting triple bad sequences. 186 */ 187 tcp_send_ack(sk->sent_seq, sk->acked_seq, sk, th, saddr);
188 return;
189 } 190
191 /* 192 * This functions checks to see if the tcp header is actually acceptable. 193 */ 194
195 extern__inline__inttcp_sequence(structsock *sk, u32seq, u32end_seq)
/* */ 196 { 197 u32end_window = sk->acked_seq + sk->window;
198 return/* if start is at end of window, end must be too (zero window) */ 199 (seq == end_window && seq == end_seq) ||
200 /* if start is before end of window, check for interest */ 201 (before(seq, end_window) && !before(end_seq, sk->acked_seq));
202 } 203
204 /* 205 * When we get a reset we do this. This probably is a tcp_output routine 206 * really. 207 */ 208
209 staticinttcp_reset(structsock *sk, structsk_buff *skb)
/* */ 210 { 211 sk->zapped = 1;
212 /* 213 * We want the right error as BSD sees it (and indeed as we do). 214 */ 215 sk->err = ECONNRESET;
216 if (sk->state == TCP_SYN_SENT)
217 sk->err = ECONNREFUSED;
218 if (sk->state == TCP_CLOSE_WAIT)
219 sk->err = EPIPE;
220 #ifdef CONFIG_TCP_RFC1337
221 /* 222 * Time wait assassination protection [RFC1337] 223 * 224 * This is a good idea, but causes more sockets to take time to close. 225 * 226 * Ian Heavens has since shown this is an inadequate fix for the protocol 227 * bug in question. 228 */ 229 if(sk->state!=TCP_TIME_WAIT)
230 { 231 tcp_set_state(sk,TCP_CLOSE);
232 sk->shutdown = SHUTDOWN_MASK;
233 } 234 #else 235 tcp_set_state(sk,TCP_CLOSE);
236 sk->shutdown = SHUTDOWN_MASK;
237 #endif 238 if (!sk->dead)
239 sk->state_change(sk);
240 kfree_skb(skb, FREE_READ);
241 return(0);
242 } 243
244
245 /* 246 * Look for tcp options. Parses everything but only knows about MSS. 247 * This routine is always called with the packet containing the SYN. 248 * However it may also be called with the ack to the SYN. So you 249 * can't assume this is always the SYN. It's always called after 250 * we have set up sk->mtu to our own MTU. 251 * 252 * We need at minimum to add PAWS support here. Possibly large windows 253 * as Linux gets deployed on 100Mb/sec networks. 254 */ 255
256 staticvoidtcp_options(structsock *sk, structtcphdr *th)
/* */ 257 { 258 unsignedchar *ptr;
259 intlength=(th->doff*4)-sizeof(structtcphdr);
260 intmss_seen = 0;
261
262 ptr = (unsignedchar *)(th + 1);
263
264 while(length>0)
265 { 266 intopcode=*ptr++;
267 intopsize=*ptr++;
268 switch(opcode)
269 { 270 caseTCPOPT_EOL:
271 return;
272 caseTCPOPT_NOP: /* Ref: RFC 793 section 3.1 */ 273 length--;
274 ptr--; /* the opsize=*ptr++ above was a mistake */ 275 continue;
276
277 default:
278 if(opsize<=2) /* Avoid silly options looping forever */ 279 return;
280 switch(opcode)
281 { 282 caseTCPOPT_MSS:
283 if(opsize==4 && th->syn)
284 { 285 sk->mtu=min(sk->mtu,ntohs(*(unsignedshort *)ptr));
286 mss_seen = 1;
287 } 288 break;
289 /* Add other options here as people feel the urge to implement stuff like large windows */ 290 } 291 ptr+=opsize-2;
292 length-=opsize;
293 } 294 } 295 if (th->syn)
296 { 297 if (! mss_seen)
298 sk->mtu=min(sk->mtu, 536); /* default MSS if none sent */ 299 } 300 #ifdefCONFIG_INET_PCTCP 301 sk->mss = min(sk->max_window >> 1, sk->mtu);
302 #else 303 sk->mss = min(sk->max_window, sk->mtu);
304 sk->max_unacked = 2 * sk->mss;
305 #endif 306 } 307
308
309 /* 310 * This routine handles a connection request. 311 * It should make sure we haven't already responded. 312 * Because of the way BSD works, we have to send a syn/ack now. 313 * This also means it will be harder to close a socket which is 314 * listening. 315 */ 316
317 staticvoidtcp_conn_request(structsock *sk, structsk_buff *skb,
/* */ 318 u32daddr, u32saddr, structoptions *opt, structdevice *dev, u32seq)
319 { 320 structsock *newsk;
321 structtcphdr *th;
322 structrtable *rt;
323
324 th = skb->h.th;
325
326 /* If the socket is dead, don't accept the connection. */ 327 if (!sk->dead)
328 { 329 sk->data_ready(sk,0);
330 } 331 else 332 { 333 if(sk->debug)
334 printk("Reset on %p: Connect on dead socket.\n",sk);
335 tcp_send_reset(daddr, saddr, th, sk->prot, opt, dev, sk->ip_tos,sk->ip_ttl);
336 tcp_statistics.TcpAttemptFails++;
337 kfree_skb(skb, FREE_READ);
338 return;
339 } 340
341 /* 342 * Make sure we can accept more. This will prevent a 343 * flurry of syns from eating up all our memory. 344 * 345 * BSD does some funnies here and allows 3/2 times the 346 * set backlog as a fudge factor. Thats just too gross. 347 */ 348
349 if (sk->ack_backlog >= sk->max_ack_backlog)
350 { 351 tcp_statistics.TcpAttemptFails++;
352 kfree_skb(skb, FREE_READ);
353 return;
354 } 355
356 /* 357 * We need to build a new sock struct. 358 * It is sort of bad to have a socket without an inode attached 359 * to it, but the wake_up's will just wake up the listening socket, 360 * and if the listening socket is destroyed before this is taken 361 * off of the queue, this will take care of it. 362 */ 363
364 newsk = (structsock *) kmalloc(sizeof(structsock), GFP_ATOMIC);
365 if (newsk == NULL)
366 { 367 /* just ignore the syn. It will get retransmitted. */ 368 tcp_statistics.TcpAttemptFails++;
369 kfree_skb(skb, FREE_READ);
370 return;
371 } 372
373 memcpy(newsk, sk, sizeof(*newsk));
374 newsk->opt = NULL;
375 newsk->ip_route_cache = NULL;
376 if (opt && opt->optlen)
377 { 378 sk->opt = (structoptions*)kmalloc(sizeof(structoptions)+opt->optlen, GFP_ATOMIC);
379 if (!sk->opt)
380 { 381 kfree_s(newsk, sizeof(structsock));
382 tcp_statistics.TcpAttemptFails++;
383 kfree_skb(skb, FREE_READ);
384 return;
385 } 386 if (ip_options_echo(sk->opt, opt, daddr, saddr, skb))
387 { 388 kfree_s(sk->opt, sizeof(structoptions)+opt->optlen);
389 kfree_s(newsk, sizeof(structsock));
390 tcp_statistics.TcpAttemptFails++;
391 kfree_skb(skb, FREE_READ);
392 return;
393 } 394 } 395 skb_queue_head_init(&newsk->write_queue);
396 skb_queue_head_init(&newsk->receive_queue);
397 newsk->send_head = NULL;
398 newsk->send_tail = NULL;
399 skb_queue_head_init(&newsk->back_log);
400 newsk->rtt = 0; /*TCP_CONNECT_TIME<<3*/ 401 newsk->rto = TCP_TIMEOUT_INIT;
402 newsk->mdev = 0;
403 newsk->max_window = 0;
404 newsk->cong_window = 1;
405 newsk->cong_count = 0;
406 newsk->ssthresh = 0;
407 newsk->backoff = 0;
408 newsk->blog = 0;
409 newsk->intr = 0;
410 newsk->proc = 0;
411 newsk->done = 0;
412 newsk->partial = NULL;
413 newsk->pair = NULL;
414 newsk->wmem_alloc = 0;
415 newsk->rmem_alloc = 0;
416 newsk->localroute = sk->localroute;
417
418 newsk->max_unacked = MAX_WINDOW - TCP_WINDOW_DIFF;
419
420 newsk->err = 0;
421 newsk->shutdown = 0;
422 newsk->ack_backlog = 0;
423 newsk->acked_seq = skb->seq+1;
424 newsk->lastwin_seq = skb->seq+1;
425 newsk->delay_acks = 1;
426 newsk->copied_seq = skb->seq+1;
427 newsk->fin_seq = skb->seq;
428 newsk->state = TCP_SYN_RECV;
429 newsk->timeout = 0;
430 newsk->ip_xmit_timeout = 0;
431 newsk->write_seq = seq;
432 newsk->window_seq = newsk->write_seq;
433 newsk->rcv_ack_seq = newsk->write_seq;
434 newsk->urg_data = 0;
435 newsk->retransmits = 0;
436 newsk->linger=0;
437 newsk->destroy = 0;
438 init_timer(&newsk->timer);
439 newsk->timer.data = (unsignedlong)newsk;
440 newsk->timer.function = &net_timer;
441 init_timer(&newsk->retransmit_timer);
442 newsk->retransmit_timer.data = (unsignedlong)newsk;
443 newsk->retransmit_timer.function=&tcp_retransmit_timer;
444 newsk->dummy_th.source = skb->h.th->dest;
445 newsk->dummy_th.dest = skb->h.th->source;
446
447 /* 448 * Swap these two, they are from our point of view. 449 */ 450
451 newsk->daddr = saddr;
452 newsk->saddr = daddr;
453 newsk->rcv_saddr = daddr;
454
455 put_sock(newsk->num,newsk);
456 newsk->acked_seq = skb->seq + 1;
457 newsk->copied_seq = skb->seq + 1;
458 newsk->socket = NULL;
459
460 /* 461 * Grab the ttl and tos values and use them 462 */ 463
464 newsk->ip_ttl=sk->ip_ttl;
465 newsk->ip_tos=skb->ip_hdr->tos;
466
467 /* 468 * Use 512 or whatever user asked for 469 */ 470
471 /* 472 * Note use of sk->user_mss, since user has no direct access to newsk 473 */ 474
475 rt = ip_rt_route(newsk->opt && newsk->opt->srr ? newsk->opt->faddr : saddr, 0);
476 newsk->ip_route_cache = rt;
477
478 if(rt!=NULL && (rt->rt_flags&RTF_WINDOW))
479 newsk->window_clamp = rt->rt_window;
480 else 481 newsk->window_clamp = 0;
482
483 if (sk->user_mss)
484 newsk->mtu = sk->user_mss;
485 elseif (rt)
486 newsk->mtu = rt->rt_mtu - sizeof(structiphdr) - sizeof(structtcphdr);
487 else 488 newsk->mtu = 576 - sizeof(structiphdr) - sizeof(structtcphdr);
489
490 /* 491 * But not bigger than device MTU 492 */ 493
494 newsk->mtu = min(newsk->mtu, dev->mtu - sizeof(structiphdr) - sizeof(structtcphdr));
495
496 #ifdefCONFIG_SKIP 497
498 /* 499 * SKIP devices set their MTU to 65535. This is so they can take packets 500 * unfragmented to security process then fragment. They could lie to the 501 * TCP layer about a suitable MTU, but its easier to let skip sort it out 502 * simply because the final package we want unfragmented is going to be 503 * 504 * [IPHDR][IPSP][Security data][Modified TCP data][Security data] 505 */ 506
507 if(skip_pick_mtu!=NULL) /* If SKIP is loaded.. */ 508 sk->mtu=skip_pick_mtu(sk->mtu,dev);
509 #endif 510 /* 511 * This will min with what arrived in the packet 512 */ 513
514 tcp_options(newsk,skb->h.th);
515
516 tcp_cache_zap();
517 tcp_send_synack(newsk, sk, skb);
518 } 519
520
521 /* 522 * Handle a TCP window that shrunk on us. It shouldn't happen, 523 * but.. 524 * 525 * We may need to move packets from the send queue 526 * to the write queue, if the window has been shrunk on us. 527 * The RFC says you are not allowed to shrink your window 528 * like this, but if the other end does, you must be able 529 * to deal with it. 530 */ 531 voidtcp_window_shrunk(structsock * sk, u32window_seq)
/* */ 532 { 533 structsk_buff *skb;
534 structsk_buff *skb2;
535 structsk_buff *wskb = NULL;
536
537 skb2 = sk->send_head;
538 sk->send_head = NULL;
539 sk->send_tail = NULL;
540
541 /* 542 * This is an artifact of a flawed concept. We want one 543 * queue and a smarter send routine when we send all. 544 */ 545 cli();
546 while (skb2 != NULL)
547 { 548 skb = skb2;
549 skb2 = skb->link3;
550 skb->link3 = NULL;
551 if (after(skb->end_seq, window_seq))
552 { 553 if (sk->packets_out > 0)
554 sk->packets_out--;
555 /* We may need to remove this from the dev send list. */ 556 if (skb->next != NULL)
557 { 558 skb_unlink(skb);
559 } 560 /* Now add it to the write_queue. */ 561 if (wskb == NULL)
562 skb_queue_head(&sk->write_queue,skb);
563 else 564 skb_append(wskb,skb);
565 wskb = skb;
566 } 567 else 568 { 569 if (sk->send_head == NULL)
570 { 571 sk->send_head = skb;
572 sk->send_tail = skb;
573 } 574 else 575 { 576 sk->send_tail->link3 = skb;
577 sk->send_tail = skb;
578 } 579 skb->link3 = NULL;
580 } 581 } 582 sti();
583 } 584
585
586 /* 587 * This routine deals with incoming acks, but not outgoing ones. 588 * 589 * This routine is totally _WRONG_. The list structuring is wrong, 590 * the algorithm is wrong, the code is wrong. 591 */ 592
593 staticinttcp_ack(structsock *sk, structtcphdr *th, u32ack, intlen)
/* */ 594 { 595 intflag = 0;
596 u32window_seq;
597
598 /* 599 * 1 - there was data in packet as well as ack or new data is sent or 600 * in shutdown state 601 * 2 - data from retransmit queue was acked and removed 602 * 4 - window shrunk or data from retransmit queue was acked and removed 603 */ 604
605 if(sk->zapped)
606 return(1); /* Dead, cant ack any more so why bother */ 607
608 /* 609 * We have dropped back to keepalive timeouts. Thus we have 610 * no retransmits pending. 611 */ 612
613 if (sk->ip_xmit_timeout == TIME_KEEPOPEN)
614 sk->retransmits = 0;
615
616 /* 617 * If the ack is newer than sent or older than previous acks 618 * then we can probably ignore it. 619 */ 620
621 if (after(ack, sk->sent_seq) || before(ack, sk->rcv_ack_seq))
622 gotouninteresting_ack;
623
624 /* 625 * If there is data set flag 1 626 */ 627
628 if (len != th->doff*4)
629 flag |= 1;
630
631 /* 632 * Have we discovered a larger window 633 */ 634 window_seq = ntohs(th->window);
635 if (window_seq > sk->max_window)
636 { 637 sk->max_window = window_seq;
638 #ifdefCONFIG_INET_PCTCP 639 /* Hack because we don't send partial packets to non SWS 640 handling hosts */ 641 sk->mss = min(window_seq>>1, sk->mtu);
642 #else 643 sk->mss = min(window_seq, sk->mtu);
644 #endif 645 } 646 window_seq += ack;
647
648 /* 649 * See if our window has been shrunk. 650 */ 651 if (after(sk->window_seq, window_seq)) { 652 flag |= 4;
653 tcp_window_shrunk(sk, window_seq);
654 } 655
656 /* 657 * Update the right hand window edge of the host 658 */ 659 sk->window_seq = window_seq;
660
661 /* 662 * Pipe has emptied 663 */ 664 if (sk->send_tail == NULL || sk->send_head == NULL)
665 { 666 sk->send_head = NULL;
667 sk->send_tail = NULL;
668 sk->packets_out= 0;
669 } 670
671 /* 672 * We don't want too many packets out there. 673 */ 674
675 if (sk->ip_xmit_timeout == TIME_WRITE &&
676 sk->cong_window < 2048 && after(ack, sk->rcv_ack_seq))
677 { 678
679 /* 680 * This is Jacobson's slow start and congestion avoidance. 681 * SIGCOMM '88, p. 328. Because we keep cong_window in integral 682 * mss's, we can't do cwnd += 1 / cwnd. Instead, maintain a 683 * counter and increment it once every cwnd times. It's possible 684 * that this should be done only if sk->retransmits == 0. I'm 685 * interpreting "new data is acked" as including data that has 686 * been retransmitted but is just now being acked. 687 */ 688 if (sk->cong_window < sk->ssthresh)
689 /* 690 * In "safe" area, increase 691 */ 692 sk->cong_window++;
693 else 694 { 695 /* 696 * In dangerous area, increase slowly. In theory this is 697 * sk->cong_window += 1 / sk->cong_window 698 */ 699 if (sk->cong_count >= sk->cong_window)
700 { 701 sk->cong_window++;
702 sk->cong_count = 0;
703 } 704 else 705 sk->cong_count++;
706 } 707 } 708
709 /* 710 * Remember the highest ack received. 711 */ 712
713 sk->rcv_ack_seq = ack;
714
715 /* 716 * We passed data and got it acked, remove any soft error 717 * log. Something worked... 718 */ 719
720 sk->err_soft = 0;
721
722 /* 723 * If this ack opens up a zero window, clear backoff. It was 724 * being used to time the probes, and is probably far higher than 725 * it needs to be for normal retransmission. 726 */ 727
728 if (sk->ip_xmit_timeout == TIME_PROBE0)
729 { 730 sk->retransmits = 0; /* Our probe was answered */ 731
732 /* 733 * Was it a usable window open ? 734 */ 735
736 if (skb_peek(&sk->write_queue) != NULL && /* should always be non-null */ 737 ! before (sk->window_seq, sk->write_queue.next->end_seq))
738 { 739 sk->backoff = 0;
740
741 /* 742 * Recompute rto from rtt. this eliminates any backoff. 743 */ 744
745 sk->rto = ((sk->rtt >> 2) + sk->mdev) >> 1;
746 if (sk->rto > 120*HZ)
747 sk->rto = 120*HZ;
748 if (sk->rto < HZ/5) /* Was 1*HZ, then 1 - turns out we must allow about 749 .2 of a second because of BSD delayed acks - on a 100Mb/sec link 750 .2 of a second is going to need huge windows (SIGH) */ 751 sk->rto = HZ/5;
752 } 753 } 754
755 /* 756 * See if we can take anything off of the retransmit queue. 757 */ 758
759 for (;;) { 760 structsk_buff * skb = sk->send_head;
761 if (!skb)
762 break;
763
764 /* Check for a bug. */ 765 if (skb->link3 && after(skb->end_seq, skb->link3->end_seq))
766 printk("INET: tcp.c: *** bug send_list out of order.\n");
767
768 /* 769 * If our packet is before the ack sequence we can 770 * discard it as it's confirmed to have arrived the other end. 771 */ 772
773 if (after(skb->end_seq, ack))
774 break;
775
776 if (sk->retransmits)
777 { 778 /* 779 * We were retransmitting. don't count this in RTT est 780 */ 781 flag |= 2;
782 } 783
784 if ((sk->send_head = skb->link3) == NULL)
785 { 786 sk->send_tail = NULL;
787 sk->retransmits = 0;
788 } 789 /* 790 * Note that we only reset backoff and rto in the 791 * rtt recomputation code. And that doesn't happen 792 * if there were retransmissions in effect. So the 793 * first new packet after the retransmissions is 794 * sent with the backoff still in effect. Not until 795 * we get an ack from a non-retransmitted packet do 796 * we reset the backoff and rto. This allows us to deal 797 * with a situation where the network delay has increased 798 * suddenly. I.e. Karn's algorithm. (SIGCOMM '87, p5.) 799 */ 800
801 /* 802 * We have one less packet out there. 803 */ 804
805 if (sk->packets_out > 0)
806 sk->packets_out --;
807
808 if (!(flag&2)) /* Not retransmitting */ 809 tcp_rtt_estimator(sk,skb);
810 flag |= (2|4); /* 2 is really more like 'don't adjust the rtt 811 In this case as we just set it up */ 812 IS_SKB(skb);
813
814 /* 815 * We may need to remove this from the dev send list. 816 */ 817 cli();
818 if (skb->next)
819 skb_unlink(skb);
820 sti();
821 kfree_skb(skb, FREE_WRITE); /* write. */ 822 if (!sk->dead)
823 sk->write_space(sk);
824 } 825
826 /* 827 * XXX someone ought to look at this too.. at the moment, if skb_peek() 828 * returns non-NULL, we complete ignore the timer stuff in the else 829 * clause. We ought to organize the code so that else clause can 830 * (should) be executed regardless, possibly moving the PROBE timer 831 * reset over. The skb_peek() thing should only move stuff to the 832 * write queue, NOT also manage the timer functions. 833 */ 834
835 /* 836 * Maybe we can take some stuff off of the write queue, 837 * and put it onto the xmit queue. 838 */ 839 if (skb_peek(&sk->write_queue) != NULL)
840 { 841 if (!before(sk->window_seq, sk->write_queue.next->end_seq) &&
842 (sk->retransmits == 0 ||
843 sk->ip_xmit_timeout != TIME_WRITE ||
844 !after(sk->write_queue.next->end_seq, sk->rcv_ack_seq))
845 && sk->packets_out < sk->cong_window)
846 { 847 /* 848 * Add more data to the send queue. 849 */ 850 flag |= 1;
851 tcp_write_xmit(sk);
852 } 853 elseif (before(sk->window_seq, sk->write_queue.next->end_seq) &&
854 sk->send_head == NULL &&
855 sk->ack_backlog == 0 &&
856 sk->state != TCP_TIME_WAIT)
857 { 858 /* 859 * Data to queue but no room. 860 */ 861 tcp_reset_xmit_timer(sk, TIME_PROBE0, sk->rto);
862 } 863 } 864 else 865 { 866 /* 867 * from TIME_WAIT we stay in TIME_WAIT as long as we rx packets 868 * from TCP_CLOSE we don't do anything 869 * 870 * from anything else, if there is write data (or fin) pending, 871 * we use a TIME_WRITE timeout, else if keepalive we reset to 872 * a KEEPALIVE timeout, else we delete the timer. 873 * 874 * We do not set flag for nominal write data, otherwise we may 875 * force a state where we start to write itsy bitsy tidbits 876 * of data. 877 */ 878
879 switch(sk->state) { 880 caseTCP_TIME_WAIT:
881 /* 882 * keep us in TIME_WAIT until we stop getting packets, 883 * reset the timeout. 884 */ 885 tcp_reset_msl_timer(sk, TIME_CLOSE, TCP_TIMEWAIT_LEN);
886 break;
887 caseTCP_CLOSE:
888 /* 889 * don't touch the timer. 890 */ 891 break;
892 default:
893 /* 894 * Must check send_head, write_queue, and ack_backlog 895 * to determine which timeout to use. 896 */ 897 if (sk->send_head || skb_peek(&sk->write_queue) != NULL || sk->ack_backlog) { 898 tcp_reset_xmit_timer(sk, TIME_WRITE, sk->rto);
899 }elseif (sk->keepopen) { 900 tcp_reset_xmit_timer(sk, TIME_KEEPOPEN, TCP_TIMEOUT_LEN);
901 }else{ 902 del_timer(&sk->retransmit_timer);
903 sk->ip_xmit_timeout = 0;
904 } 905 break;
906 } 907 } 908
909 /* 910 * We have nothing queued but space to send. Send any partial 911 * packets immediately (end of Nagle rule application). 912 */ 913
914 if (sk->packets_out == 0 && sk->partial != NULL &&
915 skb_peek(&sk->write_queue) == NULL && sk->send_head == NULL)
916 { 917 flag |= 1;
918 tcp_send_partial(sk);
919 } 920
921 /* 922 * In the LAST_ACK case, the other end FIN'd us. We then FIN'd them, and 923 * we are now waiting for an acknowledge to our FIN. The other end is 924 * already in TIME_WAIT. 925 * 926 * Move to TCP_CLOSE on success. 927 */ 928
929 if (sk->state == TCP_LAST_ACK)
930 { 931 if (!sk->dead)
932 sk->state_change(sk);
933 if(sk->debug)
934 printk("rcv_ack_seq: %X==%X, acked_seq: %X==%X\n",
935 sk->rcv_ack_seq,sk->write_seq,sk->acked_seq,sk->fin_seq);
936 if (sk->rcv_ack_seq == sk->write_seq/*&& sk->acked_seq == sk->fin_seq*/)
937 { 938 flag |= 1;
939 sk->shutdown = SHUTDOWN_MASK;
940 tcp_set_state(sk,TCP_CLOSE);
941 return 1;
942 } 943 } 944
945 /* 946 * Incoming ACK to a FIN we sent in the case of our initiating the close. 947 * 948 * Move to FIN_WAIT2 to await a FIN from the other end. Set 949 * SEND_SHUTDOWN but not RCV_SHUTDOWN as data can still be coming in. 950 */ 951
952 if (sk->state == TCP_FIN_WAIT1)
953 { 954
955 if (!sk->dead)
956 sk->state_change(sk);
957 if (sk->rcv_ack_seq == sk->write_seq)
958 { 959 flag |= 1;
960 sk->shutdown |= SEND_SHUTDOWN;
961 tcp_set_state(sk, TCP_FIN_WAIT2);
962 } 963 } 964
965 /* 966 * Incoming ACK to a FIN we sent in the case of a simultaneous close. 967 * 968 * Move to TIME_WAIT 969 */ 970
971 if (sk->state == TCP_CLOSING)
972 { 973
974 if (!sk->dead)
975 sk->state_change(sk);
976 if (sk->rcv_ack_seq == sk->write_seq)
977 { 978 flag |= 1;
979 tcp_time_wait(sk);
980 } 981 } 982
983 /* 984 * Final ack of a three way shake 985 */ 986
987 if(sk->state==TCP_SYN_RECV)
988 { 989 tcp_set_state(sk, TCP_ESTABLISHED);
990 tcp_options(sk,th);
991 sk->dummy_th.dest=th->source;
992 sk->copied_seq = sk->acked_seq;
993 if(!sk->dead)
994 sk->state_change(sk);
995 if(sk->max_window==0)
996 { 997 sk->max_window=32; /* Sanity check */ 998 sk->mss=min(sk->max_window,sk->mtu);
999 }1000 }1001
1002 /*1003 * I make no guarantees about the first clause in the following1004 * test, i.e. "(!flag) || (flag&4)". I'm not entirely sure under1005 * what conditions "!flag" would be true. However I think the rest1006 * of the conditions would prevent that from causing any1007 * unnecessary retransmission. 1008 * Clearly if the first packet has expired it should be 1009 * retransmitted. The other alternative, "flag&2 && retransmits", is1010 * harder to explain: You have to look carefully at how and when the1011 * timer is set and with what timeout. The most recent transmission always1012 * sets the timer. So in general if the most recent thing has timed1013 * out, everything before it has as well. So we want to go ahead and1014 * retransmit some more. If we didn't explicitly test for this1015 * condition with "flag&2 && retransmits", chances are "when + rto < jiffies"1016 * would not be true. If you look at the pattern of timing, you can1017 * show that rto is increased fast enough that the next packet would1018 * almost never be retransmitted immediately. Then you'd end up1019 * waiting for a timeout to send each packet on the retransmission1020 * queue. With my implementation of the Karn sampling algorithm,1021 * the timeout would double each time. The net result is that it would1022 * take a hideous amount of time to recover from a single dropped packet.1023 * It's possible that there should also be a test for TIME_WRITE, but1024 * I think as long as "send_head != NULL" and "retransmit" is on, we've1025 * got to be in real retransmission mode.1026 * Note that tcp_do_retransmit is called with all==1. Setting cong_window1027 * back to 1 at the timeout will cause us to send 1, then 2, etc. packets.1028 * As long as no further losses occur, this seems reasonable.1029 */1030
1031 if (((!flag) || (flag&4)) && sk->send_head != NULL &&
1032 (((flag&2) && sk->retransmits) ||
1033 (sk->send_head->when + sk->rto < jiffies)))
1034 {1035 if(sk->send_head->when + sk->rto < jiffies)
1036 tcp_retransmit(sk,0);
1037 else1038 {1039 tcp_do_retransmit(sk, 1);
1040 tcp_reset_xmit_timer(sk, TIME_WRITE, sk->rto);
1041 }1042 }1043
1044 return 1;
1045
1046 uninteresting_ack:
1047 if(sk->debug)
1048 printk("Ack ignored %u %u\n",ack,sk->sent_seq);
1049
1050 /*1051 * Keepalive processing.1052 */1053
1054 if (after(ack, sk->sent_seq))
1055 {1056 return 0;
1057 }1058
1059 /*1060 * Restart the keepalive timer.1061 */1062
1063 if (sk->keepopen)
1064 {1065 if(sk->ip_xmit_timeout==TIME_KEEPOPEN)
1066 tcp_reset_xmit_timer(sk, TIME_KEEPOPEN, TCP_TIMEOUT_LEN);
1067 }1068 return 1;
1069 }1070
1071
1072 /*1073 * Process the FIN bit. This now behaves as it is supposed to work1074 * and the FIN takes effect when it is validly part of sequence1075 * space. Not before when we get holes.1076 *1077 * If we are ESTABLISHED, a received fin moves us to CLOSE-WAIT1078 * (and thence onto LAST-ACK and finally, CLOSE, we never enter1079 * TIME-WAIT)1080 *1081 * If we are in FINWAIT-1, a received FIN indicates simultaneous1082 * close and we go into CLOSING (and later onto TIME-WAIT)1083 *1084 * If we are in FINWAIT-2, a received FIN moves us to TIME-WAIT.1085 *1086 */1087
1088 staticinttcp_fin(structsk_buff *skb, structsock *sk, structtcphdr *th)
/* */1089 {1090 sk->fin_seq = skb->end_seq;
1091
1092 if (!sk->dead)
1093 {1094 sk->state_change(sk);
1095 sock_wake_async(sk->socket, 1);
1096 }1097
1098 switch(sk->state)
1099 {1100 caseTCP_SYN_RECV:
1101 caseTCP_SYN_SENT:
1102 caseTCP_ESTABLISHED:
1103 /*1104 * move to CLOSE_WAIT, tcp_data() already handled1105 * sending the ack.1106 */1107 tcp_set_state(sk,TCP_CLOSE_WAIT);
1108 if (th->rst)
1109 sk->shutdown = SHUTDOWN_MASK;
1110 break;
1111
1112 caseTCP_CLOSE_WAIT:
1113 caseTCP_CLOSING:
1114 /*1115 * received a retransmission of the FIN, do1116 * nothing.1117 */1118 break;
1119 caseTCP_TIME_WAIT:
1120 /*1121 * received a retransmission of the FIN,1122 * restart the TIME_WAIT timer.1123 */1124 tcp_reset_msl_timer(sk, TIME_CLOSE, TCP_TIMEWAIT_LEN);
1125 return(0);
1126 caseTCP_FIN_WAIT1:
1127 /*1128 * This case occurs when a simultaneous close1129 * happens, we must ack the received FIN and1130 * enter the CLOSING state.1131 *1132 * This causes a WRITE timeout, which will either1133 * move on to TIME_WAIT when we timeout, or resend1134 * the FIN properly (maybe we get rid of that annoying1135 * FIN lost hang). The TIME_WRITE code is already correct1136 * for handling this timeout.1137 */1138
1139 if(sk->ip_xmit_timeout != TIME_WRITE)
1140 tcp_reset_xmit_timer(sk, TIME_WRITE, sk->rto);
1141 tcp_set_state(sk,TCP_CLOSING);
1142 break;
1143 caseTCP_FIN_WAIT2:
1144 /*1145 * received a FIN -- send ACK and enter TIME_WAIT1146 */1147 tcp_reset_msl_timer(sk, TIME_CLOSE, TCP_TIMEWAIT_LEN);
1148 sk->shutdown|=SHUTDOWN_MASK;
1149 tcp_set_state(sk,TCP_TIME_WAIT);
1150 break;
1151 caseTCP_CLOSE:
1152 /*1153 * already in CLOSE1154 */1155 break;
1156 default:
1157 tcp_set_state(sk,TCP_LAST_ACK);
1158
1159 /* Start the timers. */1160 tcp_reset_msl_timer(sk, TIME_CLOSE, TCP_TIMEWAIT_LEN);
1161 return(0);
1162 }1163
1164 return(0);
1165 }1166
1167 /*1168 * Add a sk_buff to the TCP receive queue, calculating1169 * the ACK sequence as we go..1170 */1171 staticinlinevoidtcp_insert_skb(structsk_buff * skb, structsk_buff_head * list)
/* */1172 {1173 structsk_buff * prev, * next;
1174 u32seq;
1175
1176 /*1177 * Find where the new skb goes.. (This goes backwards,1178 * on the assumption that we get the packets in order)1179 */1180 seq = skb->seq;
1181 prev = list->prev;
1182 next = (structsk_buff *) list;
1183 for (;;) {1184 if (prev == (structsk_buff *) list || !after(prev->seq, seq))
1185 break;
1186 next = prev;
1187 prev = prev->prev;
1188 }1189 __skb_insert(skb, prev, next, list);
1190 }1191
1192 /*1193 * Called for each packet when we find a new ACK endpoint sequence in it1194 */1195 staticinlineu32tcp_queue_ack(structsk_buff * skb, structsock * sk)
/* */1196 {1197 /*1198 * When we ack the fin, we do the FIN 1199 * processing.1200 */1201 skb->acked = 1;
1202 if (skb->h.th->fin)
1203 tcp_fin(skb,sk,skb->h.th);
1204 returnskb->end_seq;
1205 }1206
1207 staticvoidtcp_queue(structsk_buff * skb, structsock * sk,
/* */1208 structtcphdr *th, unsignedlongsaddr)
1209 {1210 u32ack_seq;
1211
1212 tcp_insert_skb(skb, &sk->receive_queue);
1213 /*1214 * Did we get anything new to ack?1215 */1216 ack_seq = sk->acked_seq;
1217 if (!after(skb->seq, ack_seq) && after(skb->end_seq, ack_seq)) {1218 structsk_buff_head * list = &sk->receive_queue;
1219 structsk_buff * next;
1220 ack_seq = tcp_queue_ack(skb, sk);
1221
1222 /*1223 * Do we have any old packets to ack that the above1224 * made visible? (Go forward from skb)1225 */1226 next = skb->next;
1227 while (next != (structsk_buff *) list) {1228 if (after(next->seq, ack_seq))
1229 break;
1230 if (after(next->end_seq, ack_seq))
1231 ack_seq = tcp_queue_ack(next, sk);
1232 next = next->next;
1233 }1234
1235 /*1236 * Ok, we found new data, update acked_seq as1237 * necessary (and possibly send the actual1238 * ACK packet).1239 */1240 sk->acked_seq = ack_seq;
1241
1242 /*1243 * rules for delaying an ack:1244 * - delay time <= 0.5 HZ1245 * - must send at least every 2 full sized packets1246 * - we don't have a window update to send1247 *1248 * We handle the window update in the actual read1249 * side, so we only have to worry about the first two.1250 */1251 if (!sk->delay_acks || th->fin) {1252 tcp_send_ack(sk->sent_seq, sk->acked_seq, sk, th, saddr);
1253 }1254 else1255 {1256 inttimeout = sk->ato;
1257 if (timeout > HZ/2)
1258 timeout = HZ/2;
1259 if (sk->bytes_rcv > sk->max_unacked) {1260 timeout = 0;
1261 mark_bh(TIMER_BH);
1262 }1263 sk->ack_backlog++;
1264 if(sk->debug)
1265 printk("Ack queued.\n");
1266 tcp_reset_xmit_timer(sk, TIME_WRITE, timeout);
1267 }1268 }1269 }1270
1271
1272 /*1273 * This routine handles the data. If there is room in the buffer,1274 * it will be have already been moved into it. If there is no1275 * room, then we will just have to discard the packet.1276 */1277
1278 staticinttcp_data(structsk_buff *skb, structsock *sk,
/* */1279 unsignedlongsaddr, unsignedshortlen)
1280 {1281 structtcphdr *th;
1282 u32new_seq, shut_seq;
1283
1284 th = skb->h.th;
1285 skb_pull(skb,th->doff*4);
1286 skb_trim(skb,len-(th->doff*4));
1287
1288 /*1289 * The bytes in the receive read/assembly queue has increased. Needed for the1290 * low memory discard algorithm 1291 */1292
1293 sk->bytes_rcv += skb->len;
1294
1295 if (skb->len == 0 && !th->fin)
1296 {1297 /* 1298 * Don't want to keep passing ack's back and forth. 1299 * (someone sent us dataless, boring frame)1300 */1301 if (!th->ack)
1302 tcp_send_ack(sk->sent_seq, sk->acked_seq,sk, th, saddr);
1303 kfree_skb(skb, FREE_READ);
1304 return(0);
1305 }1306
1307 /*1308 * We no longer have anyone receiving data on this connection.1309 */1310
1311 #ifndef TCP_DONT_RST_SHUTDOWN
1312
1313 if(sk->shutdown & RCV_SHUTDOWN)
1314 {1315 /*1316 * FIXME: BSD has some magic to avoid sending resets to1317 * broken 4.2 BSD keepalives. Much to my surprise a few non1318 * BSD stacks still have broken keepalives so we want to1319 * cope with it.1320 */1321
1322 if(skb->len) /* We don't care if it's just an ack or1323 a keepalive/window probe */1324 {1325 new_seq = skb->seq + skb->len + th->syn; /* Right edge of _data_ part of frame */1326
1327 /* Do this the way 4.4BSD treats it. Not what I'd1328 regard as the meaning of the spec but it's what BSD1329 does and clearly they know everything 8) */1330
1331 /*1332 * This is valid because of two things1333 *1334 * a) The way tcp_data behaves at the bottom.1335 * b) A fin takes effect when read not when received.1336 */1337
1338 shut_seq = sk->acked_seq+1; /* Last byte */1339
1340 if(after(new_seq,shut_seq))
1341 {1342 if(sk->debug)
1343 printk("Data arrived on %p after close [Data right edge %X, Socket shut on %X] %d\n",
1344 sk, new_seq, shut_seq, sk->blog);
1345 if(sk->dead)
1346 {1347 sk->acked_seq = new_seq + th->fin;
1348 tcp_send_reset(sk->saddr, sk->daddr, skb->h.th,
1349 sk->prot, NULL, skb->dev, sk->ip_tos, sk->ip_ttl);
1350 tcp_statistics.TcpEstabResets++;
1351 sk->err = EPIPE;
1352 sk->error_report(sk);
1353 sk->shutdown = SHUTDOWN_MASK;
1354 tcp_set_state(sk,TCP_CLOSE);
1355 kfree_skb(skb, FREE_READ);
1356 return 0;
1357 }1358 }1359 }1360 }1361
1362 #endif1363
1364 tcp_queue(skb, sk, th, saddr);
1365
1366 /*1367 * If we've missed a packet, send an ack.1368 * Also start a timer to send another.1369 */1370
1371 if (!skb->acked)
1372 {1373 tcp_send_ack(sk->sent_seq, sk->acked_seq, sk, th, saddr);
1374 sk->ack_backlog++;
1375 tcp_reset_xmit_timer(sk, TIME_WRITE, min(sk->ato, HZ/2));
1376 }1377
1378 /*1379 * Now tell the user we may have some data. 1380 */1381
1382 if (!sk->dead)
1383 {1384 if(sk->debug)
1385 printk("Data wakeup.\n");
1386 sk->data_ready(sk,0);
1387 }1388 return(0);
1389 }1390
1391
1392 /*1393 * This routine is only called when we have urgent data1394 * signalled. Its the 'slow' part of tcp_urg. It could be1395 * moved inline now as tcp_urg is only called from one1396 * place. We handle URGent data wrong. We have to - as1397 * BSD still doesn't use the correction from RFC961.1398 */1399
1400 staticvoidtcp_check_urg(structsock * sk, structtcphdr * th)
/* */1401 {1402 u32ptr = ntohs(th->urg_ptr);
1403
1404 if (ptr)
1405 ptr--;
1406 ptr += ntohl(th->seq);
1407
1408 /* ignore urgent data that we've already seen and read */1409 if (after(sk->copied_seq, ptr))
1410 return;
1411
1412 /* do we already have a newer (or duplicate) urgent pointer? */1413 if (sk->urg_data && !after(ptr, sk->urg_seq))
1414 return;
1415
1416 /* tell the world about our new urgent pointer */1417 if (sk->proc != 0) {1418 if (sk->proc > 0) {1419 kill_proc(sk->proc, SIGURG, 1);
1420 }else{1421 kill_pg(-sk->proc, SIGURG, 1);
1422 }1423 }1424 sk->urg_data = URG_NOTYET;
1425 sk->urg_seq = ptr;
1426 }1427
1428 /*1429 * This is the 'fast' part of urgent handling.1430 */1431
1432 staticinlinevoidtcp_urg(structsock *sk, structtcphdr *th, unsignedlonglen)
/* */1433 {1434 /*1435 * Check if we get a new urgent pointer - normally not 1436 */1437
1438 if (th->urg)
1439 tcp_check_urg(sk,th);
1440
1441 /*1442 * Do we wait for any urgent data? - normally not1443 */1444
1445 if (sk->urg_data == URG_NOTYET) {1446 u32ptr;
1447
1448 /*1449 * Is the urgent pointer pointing into this packet? 1450 */1451 ptr = sk->urg_seq - ntohl(th->seq) + th->doff*4;
1452 if (ptr < len) {1453 sk->urg_data = URG_VALID | *(ptr + (unsignedchar *) th);
1454 if (!sk->dead)
1455 sk->data_ready(sk,0);
1456 }1457 }1458 }1459
1460 /*1461 * This should be a bit smarter and remove partially1462 * overlapping stuff too, but this should be good1463 * enough for any even remotely normal case (and the1464 * worst that can happen is that we have a few1465 * unnecessary packets in the receive queue).1466 *1467 * This function is never called with an empty list..1468 */1469 staticinlinevoidtcp_remove_dups(structsk_buff_head * list)
/* */1470 {1471 structsk_buff * next = list->next;
1472
1473 for (;;) {1474 structsk_buff * skb = next;
1475 next = next->next;
1476 if (next == (structsk_buff *) list)
1477 break;
1478 if (before(next->end_seq, skb->end_seq)) {1479 __skb_unlink(next, list);
1480 kfree_skb(next, FREE_READ);
1481 next = skb;
1482 continue;
1483 }1484 if (next->seq != skb->seq)
1485 continue;
1486 __skb_unlink(skb, list);
1487 kfree_skb(skb, FREE_READ);
1488 }1489 }1490
1491 /*1492 * Throw out all unnecessary packets: we've gone over the1493 * receive queue limit. This shouldn't happen in a normal1494 * TCP connection, but we might have gotten duplicates etc.1495 */1496 staticvoidprune_queue(structsk_buff_head * list)
/* */1497 {1498 for (;;) {1499 structsk_buff * skb = list->prev;
1500
1501 /* gone through it all? */1502 if (skb == (structsk_buff *) list)
1503 break;
1504 if (!skb->acked) {1505 __skb_unlink(skb, list);
1506 kfree_skb(skb, FREE_READ);
1507 continue;
1508 }1509 tcp_remove_dups(list);
1510 break;
1511 }1512 }1513
1514 /*1515 * A TCP packet has arrived.1516 * skb->h.raw is the TCP header.1517 */1518
1519 inttcp_rcv(structsk_buff *skb, structdevice *dev, structoptions *opt,
/* */1520 __u32daddr, unsignedshortlen,
1521 __u32saddr, intredo, structinet_protocol * protocol)
1522 {1523 structtcphdr *th;
1524 structsock *sk;
1525 intsyn_ok=0;
1526
1527 /*1528 * "redo" is 1 if we have already seen this skb but couldn't1529 * use it at that time (the socket was locked). In that case1530 * we have already done a lot of the work (looked up the socket1531 * etc).1532 */1533 th = skb->h.th;
1534 sk = skb->sk;
1535 if (!redo) {1536 tcp_statistics.TcpInSegs++;
1537 if (skb->pkt_type!=PACKET_HOST)
1538 gotodiscard_it;
1539
1540 /*1541 * Pull up the IP header.1542 */1543
1544 skb_pull(skb, skb->h.raw-skb->data);
1545
1546 /*1547 * Try to use the device checksum if provided.1548 */1549 switch (skb->ip_summed)
1550 {1551 caseCHECKSUM_NONE:
1552 skb->csum = csum_partial((char *)th, len, 0);
1553 caseCHECKSUM_HW:
1554 if (tcp_check(th, len, saddr, daddr, skb->csum))
1555 gotodiscard_it;
1556 default:
1557 /* CHECKSUM_UNNECESSARY */1558 }1559 sk = get_tcp_sock(saddr, th->source, daddr, th->dest);
1560 if (!sk)
1561 gotono_tcp_socket;
1562 skb->sk = sk;
1563 skb->seq = ntohl(th->seq);
1564 skb->end_seq = skb->seq + th->syn + th->fin + len - th->doff*4;
1565 skb->ack_seq = ntohl(th->ack_seq);
1566
1567 skb->acked = 0;
1568 skb->used = 0;
1569 skb->free = 1;
1570 skb->saddr = daddr;
1571 skb->daddr = saddr;
1572
1573 /*1574 * We may need to add it to the backlog here. 1575 */1576 if (sk->users)
1577 {1578 __skb_queue_tail(&sk->back_log, skb);
1579 return(0);
1580 }1581 }1582
1583 /*1584 * If this socket has got a reset it's to all intents and purposes 1585 * really dead. Count closed sockets as dead.1586 *1587 * Note: BSD appears to have a bug here. A 'closed' TCP in BSD1588 * simply drops data. This seems incorrect as a 'closed' TCP doesn't1589 * exist so should cause resets as if the port was unreachable.1590 */1591
1592 if (sk->zapped || sk->state==TCP_CLOSE)
1593 gotono_tcp_socket;
1594
1595 if (!sk->prot)
1596 {1597 printk("IMPOSSIBLE 3\n");
1598 return(0);
1599 }1600
1601
1602 /*1603 * Charge the memory to the socket. 1604 */1605
1606 skb->sk=sk;
1607 atomic_add(skb->truesize, &sk->rmem_alloc);
1608
1609 /*1610 * We should now do header prediction.1611 */1612
1613 /*1614 * This basically follows the flow suggested by RFC793, with the corrections in RFC1122. We1615 * don't implement precedence and we process URG incorrectly (deliberately so) for BSD bug1616 * compatibility. We also set up variables more thoroughly [Karn notes in the1617 * KA9Q code the RFC793 incoming segment rules don't initialise the variables for all paths].1618 */1619
1620 if(sk->state!=TCP_ESTABLISHED) /* Skip this lot for normal flow */1621 {1622
1623 /*1624 * Now deal with unusual cases.1625 */1626
1627 if(sk->state==TCP_LISTEN)
1628 {1629 if(th->ack) /* These use the socket TOS.. might want to be the received TOS */1630 tcp_send_reset(daddr,saddr,th,sk->prot,opt,dev,sk->ip_tos, sk->ip_ttl);
1631
1632 /*1633 * We don't care for RST, and non SYN are absorbed (old segments)1634 * Broadcast/multicast SYN isn't allowed. Note - bug if you change the1635 * netmask on a running connection it can go broadcast. Even Sun's have1636 * this problem so I'm ignoring it 1637 */1638
1639 if(th->rst || !th->syn || th->ack || ip_chk_addr(daddr)!=IS_MYADDR)
1640 {1641 kfree_skb(skb, FREE_READ);
1642 return 0;
1643 }1644
1645 /* 1646 * Guess we need to make a new socket up 1647 */1648
1649 tcp_conn_request(sk, skb, daddr, saddr, opt, dev, tcp_init_seq());
1650
1651 /*1652 * Now we have several options: In theory there is nothing else1653 * in the frame. KA9Q has an option to send data with the syn,1654 * BSD accepts data with the syn up to the [to be] advertised window1655 * and Solaris 2.1 gives you a protocol error. For now we just ignore1656 * it, that fits the spec precisely and avoids incompatibilities. It1657 * would be nice in future to drop through and process the data.1658 *1659 * Now TTCP is starting to use we ought to queue this data.1660 */1661
1662 return 0;
1663 }1664
1665 /* 1666 * Retransmitted SYN for our socket. This is uninteresting. If sk->state==TCP_LISTEN1667 * then its a new connection1668 */1669
1670 if (sk->state == TCP_SYN_RECV && th->syn && skb->seq+1 == sk->acked_seq)
1671 {1672 kfree_skb(skb, FREE_READ);
1673 return 0;
1674 }1675
1676 /*1677 * SYN sent means we have to look for a suitable ack and either reset1678 * for bad matches or go to connected. The SYN_SENT case is unusual and should1679 * not be in line code. [AC]1680 */1681
1682 if(sk->state==TCP_SYN_SENT)
1683 {1684 /* Crossed SYN or previous junk segment */1685 if(th->ack)
1686 {1687 /* We got an ack, but it's not a good ack */1688 if(!tcp_ack(sk,th,skb->ack_seq,len))
1689 {1690 /* Reset the ack - its an ack from a 1691 different connection [ th->rst is checked in tcp_send_reset()] */1692 tcp_statistics.TcpAttemptFails++;
1693 tcp_send_reset(daddr, saddr, th,
1694 sk->prot, opt,dev,sk->ip_tos,sk->ip_ttl);
1695 kfree_skb(skb, FREE_READ);
1696 return(0);
1697 }1698 if(th->rst)
1699 returntcp_reset(sk,skb);
1700 if(!th->syn)
1701 {1702 /* A valid ack from a different connection1703 start. Shouldn't happen but cover it */1704 tcp_statistics.TcpAttemptFails++;
1705 tcp_send_reset(daddr, saddr, th,
1706 sk->prot, opt,dev,sk->ip_tos,sk->ip_ttl);
1707 kfree_skb(skb, FREE_READ);
1708 return 0;
1709 }1710 /*1711 * Ok.. it's good. Set up sequence numbers and1712 * move to established.1713 */1714 syn_ok=1; /* Don't reset this connection for the syn */1715 sk->acked_seq = skb->seq+1;
1716 sk->lastwin_seq = skb->seq+1;
1717 sk->fin_seq = skb->seq;
1718 tcp_send_ack(sk->sent_seq,sk->acked_seq,sk,th,sk->daddr);
1719 tcp_set_state(sk, TCP_ESTABLISHED);
1720 tcp_options(sk,th);
1721 sk->dummy_th.dest=th->source;
1722 sk->copied_seq = sk->acked_seq;
1723 if(!sk->dead)
1724 {1725 sk->state_change(sk);
1726 sock_wake_async(sk->socket, 0);
1727 }1728 if(sk->max_window==0)
1729 {1730 sk->max_window = 32;
1731 sk->mss = min(sk->max_window, sk->mtu);
1732 }1733 }1734 else1735 {1736 /* See if SYN's cross. Drop if boring */1737 if(th->syn && !th->rst)
1738 {1739 /* Crossed SYN's are fine - but talking to1740 yourself is right out... */1741 if(sk->saddr==saddr && sk->daddr==daddr &&
1742 sk->dummy_th.source==th->source &&
1743 sk->dummy_th.dest==th->dest)
1744 {1745 tcp_statistics.TcpAttemptFails++;
1746 returntcp_reset(sk,skb);
1747 }1748 tcp_set_state(sk,TCP_SYN_RECV);
1749
1750 /*1751 * FIXME:1752 * Must send SYN|ACK here1753 */1754 }1755 /* Discard junk segment */1756 kfree_skb(skb, FREE_READ);
1757 return 0;
1758 }1759 /*1760 * SYN_RECV with data maybe.. drop through1761 */1762 gotorfc_step6;
1763 }1764
1765 /*1766 * BSD has a funny hack with TIME_WAIT and fast reuse of a port. There is1767 * a more complex suggestion for fixing these reuse issues in RFC16441768 * but not yet ready for general use. Also see RFC1379.1769 *1770 * Note the funny way we go back to the top of this function for1771 * this case ("goto try_next_socket"). That also takes care of1772 * checking "sk->users" for the new socket as well as doing all1773 * the normal tests on the packet.1774 */1775
1776 #defineBSD_TIME_WAIT1777 #ifdefBSD_TIME_WAIT1778 if (sk->state == TCP_TIME_WAIT && th->syn && sk->dead &&
1779 after(skb->seq, sk->acked_seq) && !th->rst)
1780 {1781 u32seq = sk->write_seq;
1782 if(sk->debug)
1783 printk("Doing a BSD time wait\n");
1784 tcp_statistics.TcpEstabResets++;
1785 atomic_sub(skb->truesize, &sk->rmem_alloc);
1786 skb->sk = NULL;
1787 sk->err=ECONNRESET;
1788 tcp_set_state(sk, TCP_CLOSE);
1789 sk->shutdown = SHUTDOWN_MASK;
1790 sk=get_sock(&tcp_prot, th->dest, saddr, th->source, daddr);
1791 /* this is not really correct: we should check sk->users */1792 if (sk && sk->state==TCP_LISTEN)
1793 {1794 skb->sk = sk;
1795 atomic_add(skb->truesize, &sk->rmem_alloc);
1796 tcp_conn_request(sk, skb, daddr, saddr,opt, dev,seq+128000);
1797 return 0;
1798 }1799 kfree_skb(skb, FREE_READ);
1800 return 0;
1801 }1802 #endif1803 }1804
1805 /*1806 * We are now in normal data flow (see the step list in the RFC)1807 * Note most of these are inline now. I'll inline the lot when1808 * I have time to test it hard and look at what gcc outputs 1809 */1810
1811 if (!tcp_sequence(sk, skb->seq, skb->end_seq-th->syn))
1812 {1813 bad_tcp_sequence(sk, th, len, opt, saddr, dev);
1814 kfree_skb(skb, FREE_READ);
1815 return 0;
1816 }1817
1818 if(th->rst)
1819 returntcp_reset(sk,skb);
1820
1821 /*1822 * !syn_ok is effectively the state test in RFC793.1823 */1824
1825 if(th->syn && !syn_ok)
1826 {1827 tcp_send_reset(daddr,saddr,th, &tcp_prot, opt, dev, skb->ip_hdr->tos, 255);
1828 returntcp_reset(sk,skb);
1829 }1830
1831 tcp_delack_estimator(sk);
1832
1833 /*1834 * Process the ACK1835 */1836
1837
1838 if(th->ack && !tcp_ack(sk,th,skb->ack_seq,len))
1839 {1840 /*1841 * Our three way handshake failed.1842 */1843
1844 if(sk->state==TCP_SYN_RECV)
1845 {1846 tcp_send_reset(daddr, saddr, th,sk->prot, opt, dev,sk->ip_tos,sk->ip_ttl);
1847 }1848 kfree_skb(skb, FREE_READ);
1849 return 0;
1850 }1851
1852 rfc_step6: /* I'll clean this up later */1853
1854 /*1855 * If the accepted buffer put us over our queue size we1856 * now drop it (we must process the ack first to avoid1857 * deadlock cases).1858 */1859
1860 /*1861 * Process urgent data1862 */1863
1864 tcp_urg(sk, th, len);
1865
1866 /*1867 * Process the encapsulated data1868 */1869
1870 if(tcp_data(skb,sk, saddr, len))
1871 kfree_skb(skb, FREE_READ);
1872
1873 /*1874 * If our receive queue has grown past its limits,1875 * try to prune away duplicates etc..1876 */1877 if (sk->rmem_alloc > sk->rcvbuf)
1878 prune_queue(&sk->receive_queue);
1879
1880 /*1881 * And done1882 */1883
1884 return 0;
1885
1886 no_tcp_socket:
1887 /*1888 * No such TCB. If th->rst is 0 send a reset (checked in tcp_send_reset)1889 */1890 tcp_send_reset(daddr, saddr, th, &tcp_prot, opt,dev,skb->ip_hdr->tos,255);
1891
1892 discard_it:
1893 /*1894 * Discard frame1895 */1896 skb->sk = NULL;
1897 kfree_skb(skb, FREE_READ);
1898 return 0;
1899 }