root/net/ipv4/tcp_output.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. tcp_send_skb
  2. tcp_dequeue_partial
  3. tcp_send_partial
  4. tcp_enqueue_partial
  5. tcp_write_xmit
  6. tcp_do_retransmit
  7. tcp_send_reset
  8. tcp_send_fin
  9. tcp_send_synack
  10. tcp_send_ack
  11. tcp_write_wakeup
  12. tcp_send_probe0

   1 /*
   2  * INET         An implementation of the TCP/IP protocol suite for the LINUX
   3  *              operating system.  INET is implemented using the  BSD Socket
   4  *              interface as the means of communication with the user level.
   5  *
   6  *              Implementation of the Transmission Control Protocol(TCP).
   7  *
   8  * Version:     @(#)tcp_input.c 1.0.16  05/25/93
   9  *
  10  * Authors:     Ross Biro, <bir7@leland.Stanford.Edu>
  11  *              Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  12  *              Mark Evans, <evansmp@uhura.aston.ac.uk>
  13  *              Corey Minyard <wf-rch!minyard@relay.EU.net>
  14  *              Florian La Roche, <flla@stud.uni-sb.de>
  15  *              Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
  16  *              Linus Torvalds, <torvalds@cs.helsinki.fi>
  17  *              Alan Cox, <gw4pts@gw4pts.ampr.org>
  18  *              Matthew Dillon, <dillon@apollo.west.oic.com>
  19  *              Arnt Gulbrandsen, <agulbra@nvg.unit.no>
  20  *              Jorge Cwik, <jorge@laser.satlink.net>
  21  */
  22 
  23 #include <linux/config.h>
  24 #include <net/tcp.h>
  25 
  26 /*
  27  *      This is the main buffer sending routine. We queue the buffer
  28  *      having checked it is sane seeming.
  29  */
  30  
  31 void tcp_send_skb(struct sock *sk, struct sk_buff *skb)
     /* [previous][next][first][last][top][bottom][index][help] */
  32 {
  33         int size;
  34         struct tcphdr * th = skb->h.th;
  35 
  36         /*
  37          *      length of packet (not counting length of pre-tcp headers) 
  38          */
  39          
  40         size = skb->len - ((unsigned char *) th - skb->data);
  41 
  42         /*
  43          *      Sanity check it.. 
  44          */
  45          
  46         if (size < sizeof(struct tcphdr) || size > skb->len) 
  47         {
  48                 printk("tcp_send_skb: bad skb (skb = %p, data = %p, th = %p, len = %lu)\n",
  49                         skb, skb->data, th, skb->len);
  50                 kfree_skb(skb, FREE_WRITE);
  51                 return;
  52         }
  53 
  54         /*
  55          *      If we have queued a header size packet.. (these crash a few
  56          *      tcp stacks if ack is not set)
  57          */
  58          
  59         if (size == sizeof(struct tcphdr)) 
  60         {
  61                 /* If it's got a syn or fin it's notionally included in the size..*/
  62                 if(!th->syn && !th->fin) 
  63                 {
  64                         printk("tcp_send_skb: attempt to queue a bogon.\n");
  65                         kfree_skb(skb,FREE_WRITE);
  66                         return;
  67                 }
  68         }
  69 
  70         /*
  71          *      Actual processing.
  72          */
  73          
  74         tcp_statistics.TcpOutSegs++;  
  75         skb->seq = ntohl(th->seq);
  76         skb->end_seq = skb->seq + size - 4*th->doff;
  77         
  78         /*
  79          *      We must queue if
  80          *
  81          *      a) The right edge of this frame exceeds the window
  82          *      b) We are retransmitting (Nagle's rule)
  83          *      c) We have too many packets 'in flight'
  84          */
  85          
  86         if (after(skb->end_seq, sk->window_seq) ||
  87             (sk->retransmits && sk->ip_xmit_timeout == TIME_WRITE) ||
  88              sk->packets_out >= sk->cong_window) 
  89         {
  90                 /* checksum will be supplied by tcp_write_xmit.  So
  91                  * we shouldn't need to set it at all.  I'm being paranoid */
  92                 th->check = 0;
  93                 if (skb->next != NULL) 
  94                 {
  95                         printk("tcp_send_partial: next != NULL\n");
  96                         skb_unlink(skb);
  97                 }
  98                 skb_queue_tail(&sk->write_queue, skb);
  99                 
 100                 if (before(sk->window_seq, sk->write_queue.next->end_seq) &&
 101                     sk->send_head == NULL && sk->ack_backlog == 0)
 102                         tcp_reset_xmit_timer(sk, TIME_PROBE0, sk->rto);
 103         } 
 104         else 
 105         {
 106                 /*
 107                  *      This is going straight out
 108                  */
 109                  
 110                 th->ack_seq = htonl(sk->acked_seq);
 111                 th->window = htons(tcp_select_window(sk));
 112 
 113                 tcp_send_check(th, sk->saddr, sk->daddr, size, skb);
 114 
 115                 sk->sent_seq = sk->write_seq;
 116                 
 117                 /*
 118                  *      This is mad. The tcp retransmit queue is put together
 119                  *      by the ip layer. This causes half the problems with
 120                  *      unroutable FIN's and other things.
 121                  */
 122                  
 123                 sk->prot->queue_xmit(sk, skb->dev, skb, 0);
 124                 
 125                 
 126                 sk->ack_backlog = 0;
 127                 sk->bytes_rcv = 0;
 128 
 129                 /*
 130                  *      Set for next retransmit based on expected ACK time.
 131                  *      FIXME: We set this every time which means our 
 132                  *      retransmits are really about a window behind.
 133                  */
 134 
 135                 tcp_reset_xmit_timer(sk, TIME_WRITE, sk->rto);
 136         }
 137 }
 138 
 139 /*
 140  *      Locking problems lead us to a messy situation where we can have
 141  *      multiple partially complete buffers queued up. This is really bad
 142  *      as we don't want to be sending partial buffers. Fix this with
 143  *      a semaphore or similar to lock tcp_write per socket.
 144  *
 145  *      These routines are pretty self descriptive.
 146  */
 147  
 148 struct sk_buff * tcp_dequeue_partial(struct sock * sk)
     /* [previous][next][first][last][top][bottom][index][help] */
 149 {
 150         struct sk_buff * skb;
 151         unsigned long flags;
 152 
 153         save_flags(flags);
 154         cli();
 155         skb = sk->partial;
 156         if (skb) {
 157                 sk->partial = NULL;
 158                 del_timer(&sk->partial_timer);
 159         }
 160         restore_flags(flags);
 161         return skb;
 162 }
 163 
 164 /*
 165  *      Empty the partial queue
 166  */
 167  
 168 void tcp_send_partial(struct sock *sk)
     /* [previous][next][first][last][top][bottom][index][help] */
 169 {
 170         struct sk_buff *skb;
 171 
 172         if (sk == NULL)
 173                 return;
 174         while ((skb = tcp_dequeue_partial(sk)) != NULL)
 175                 tcp_send_skb(sk, skb);
 176 }
 177 
 178 /*
 179  *      Queue a partial frame
 180  */
 181  
 182 void tcp_enqueue_partial(struct sk_buff * skb, struct sock * sk)
     /* [previous][next][first][last][top][bottom][index][help] */
 183 {
 184         struct sk_buff * tmp;
 185         unsigned long flags;
 186 
 187         save_flags(flags);
 188         cli();
 189         tmp = sk->partial;
 190         if (tmp)
 191                 del_timer(&sk->partial_timer);
 192         sk->partial = skb;
 193         init_timer(&sk->partial_timer);
 194         /*
 195          *      Wait up to 1 second for the buffer to fill.
 196          */
 197         sk->partial_timer.expires = jiffies+HZ;
 198         sk->partial_timer.function = (void (*)(unsigned long)) tcp_send_partial;
 199         sk->partial_timer.data = (unsigned long) sk;
 200         add_timer(&sk->partial_timer);
 201         restore_flags(flags);
 202         if (tmp)
 203                 tcp_send_skb(sk, tmp);
 204 }
 205 
 206 /*
 207  *      This routine takes stuff off of the write queue,
 208  *      and puts it in the xmit queue. This happens as incoming acks
 209  *      open up the remote window for us.
 210  */
 211  
 212 void tcp_write_xmit(struct sock *sk)
     /* [previous][next][first][last][top][bottom][index][help] */
 213 {
 214         struct sk_buff *skb;
 215 
 216         /*
 217          *      The bytes will have to remain here. In time closedown will
 218          *      empty the write queue and all will be happy 
 219          */
 220 
 221         if(sk->zapped)
 222                 return;
 223 
 224         /*
 225          *      Anything on the transmit queue that fits the window can
 226          *      be added providing we are not
 227          *
 228          *      a) retransmitting (Nagle's rule)
 229          *      b) exceeding our congestion window.
 230          */
 231          
 232         while((skb = skb_peek(&sk->write_queue)) != NULL &&
 233                 before(skb->end_seq, sk->window_seq + 1) &&
 234                 (sk->retransmits == 0 ||
 235                  sk->ip_xmit_timeout != TIME_WRITE ||
 236                  before(skb->end_seq, sk->rcv_ack_seq + 1))
 237                 && sk->packets_out < sk->cong_window) 
 238         {
 239                 IS_SKB(skb);
 240                 skb_unlink(skb);
 241                 
 242                 /*
 243                  *      See if we really need to send the packet. 
 244                  */
 245                  
 246                 if (before(skb->end_seq, sk->rcv_ack_seq +1)) 
 247                 {
 248                         /*
 249                          *      This is acked data. We can discard it. This 
 250                          *      cannot currently occur.
 251                          */
 252                          
 253                         sk->retransmits = 0;
 254                         kfree_skb(skb, FREE_WRITE);
 255                         if (!sk->dead) 
 256                                 sk->write_space(sk);
 257                 } 
 258                 else
 259                 {
 260                         struct tcphdr *th;
 261                         struct iphdr *iph;
 262                         int size;
 263 /*
 264  * put in the ack seq and window at this point rather than earlier,
 265  * in order to keep them monotonic.  We really want to avoid taking
 266  * back window allocations.  That's legal, but RFC1122 says it's frowned on.
 267  * Ack and window will in general have changed since this packet was put
 268  * on the write queue.
 269  */
 270                         iph = skb->ip_hdr;
 271                         th = (struct tcphdr *)(((char *)iph) +(iph->ihl << 2));
 272                         size = skb->len - (((unsigned char *) th) - skb->data);
 273 #ifndef CONFIG_NO_PATH_MTU_DISCOVERY
 274                         if (size > sk->mtu - sizeof(struct iphdr))
 275                         {
 276                                 iph->frag_off &= ~htons(IP_DF);
 277                                 ip_send_check(iph);
 278                         }
 279 #endif
 280                         
 281                         th->ack_seq = htonl(sk->acked_seq);
 282                         th->window = htons(tcp_select_window(sk));
 283 
 284                         tcp_send_check(th, sk->saddr, sk->daddr, size, skb);
 285 
 286                         sk->sent_seq = skb->end_seq;
 287                         
 288                         /*
 289                          *      IP manages our queue for some crazy reason
 290                          */
 291                          
 292                         sk->prot->queue_xmit(sk, skb->dev, skb, skb->free);
 293                         
 294                         
 295                         sk->ack_backlog = 0;
 296                         sk->bytes_rcv = 0;
 297 
 298                         /*
 299                          *      Again we slide the timer wrongly
 300                          */
 301                          
 302                         tcp_reset_xmit_timer(sk, TIME_WRITE, sk->rto);
 303                 }
 304         }
 305 }
 306 
 307 
 308 /*
 309  *      A socket has timed out on its send queue and wants to do a
 310  *      little retransmitting. Currently this means TCP.
 311  */
 312 
 313 void tcp_do_retransmit(struct sock *sk, int all)
     /* [previous][next][first][last][top][bottom][index][help] */
 314 {
 315         struct sk_buff * skb;
 316         struct proto *prot;
 317         struct device *dev;
 318         int ct=0;
 319         struct rtable *rt;
 320 
 321         prot = sk->prot;
 322         skb = sk->send_head;
 323 
 324         while (skb != NULL)
 325         {
 326                 struct tcphdr *th;
 327                 struct iphdr *iph;
 328                 int size;
 329 
 330                 dev = skb->dev;
 331                 IS_SKB(skb);
 332                 skb->when = jiffies;
 333                 
 334                 /* dl1bke 960201 - @%$$! Hope this cures strange race conditions    */
 335                 /*                 with AX.25 mode VC. (esp. DAMA)                  */
 336                 /*                 if the buffer is locked we should not retransmit */
 337                 /*                 anyway, so we don't need all the fuss to prepare */
 338                 /*                 the buffer in this case.                         */
 339                 /*                 (the skb_pull() changes skb->data while we may   */
 340                 /*                 actually try to send the data. Ouch. A side      */
 341                 /*                 effect is that we'll send some unnecessary data, */
 342                 /*                 but the alternative is disasterous...            */
 343                 
 344                 if (skb_device_locked(skb))
 345                         break;
 346 
 347                 /*
 348                  *      Discard the surplus MAC header
 349                  */
 350                  
 351                 skb_pull(skb,((unsigned char *)skb->ip_hdr)-skb->data);
 352 
 353                 /*
 354                  * In general it's OK just to use the old packet.  However we
 355                  * need to use the current ack and window fields.  Urg and
 356                  * urg_ptr could possibly stand to be updated as well, but we
 357                  * don't keep the necessary data.  That shouldn't be a problem,
 358                  * if the other end is doing the right thing.  Since we're
 359                  * changing the packet, we have to issue a new IP identifier.
 360                  */
 361 
 362                 iph = (struct iphdr *)skb->data;
 363                 th = (struct tcphdr *)(((char *)iph) + (iph->ihl << 2));
 364                 size = ntohs(iph->tot_len) - (iph->ihl<<2);
 365                 
 366                 /*
 367                  *      Note: We ought to check for window limits here but
 368                  *      currently this is done (less efficiently) elsewhere.
 369                  */
 370 
 371                 /*
 372                  *      Put a MAC header back on (may cause ARPing)
 373                  */
 374                  
 375                 {
 376                         /* ANK: UGLY, but the bug, that was here, should be fixed.
 377                          */
 378                         struct options *  opt = (struct options*)skb->proto_priv;
 379                         rt = ip_check_route(&sk->ip_route_cache, opt->srr?opt->faddr:iph->daddr, skb->localroute);
 380                 }
 381 
 382                 iph->id = htons(ip_id_count++);
 383 #ifndef CONFIG_NO_PATH_MTU_DISCOVERY
 384                 if (rt && ntohs(iph->tot_len) > rt->rt_mtu)
 385                         iph->frag_off &= ~htons(IP_DF);
 386 #endif
 387                 ip_send_check(iph);
 388                         
 389                 if (rt==NULL)   /* Deep poo */
 390                 {
 391                         if(skb->sk)
 392                         {
 393                                 skb->sk->err_soft=ENETUNREACH;
 394                                 skb->sk->error_report(skb->sk);
 395                         }
 396                 }
 397                 else
 398                 {
 399                         dev=rt->rt_dev;
 400                         skb->raddr=rt->rt_gateway;
 401                         skb->dev=dev;
 402                         skb->arp=1;
 403                         if (rt->rt_hh)
 404                         {
 405                                 memcpy(skb_push(skb,dev->hard_header_len),rt->rt_hh->hh_data,dev->hard_header_len);
 406                                 if (!rt->rt_hh->hh_uptodate)
 407                                 {
 408                                         skb->arp = 0;
 409 #if RT_CACHE_DEBUG >= 2
 410                                         printk("tcp_do_retransmit: hh miss %08x via %08x\n", iph->daddr, rt->rt_gateway);
 411 #endif
 412                                 }
 413                         }
 414                         else if (dev->hard_header)
 415                         {
 416                                 if(dev->hard_header(skb, dev, ETH_P_IP, NULL, NULL, skb->len)<0)
 417                                         skb->arp=0;
 418                         }
 419                 
 420                         /*
 421                          *      This is not the right way to handle this. We have to
 422                          *      issue an up to date window and ack report with this 
 423                          *      retransmit to keep the odd buggy tcp that relies on 
 424                          *      the fact BSD does this happy. 
 425                          *      We don't however need to recalculate the entire 
 426                          *      checksum, so someone wanting a small problem to play
 427                          *      with might like to implement RFC1141/RFC1624 and speed
 428                          *      this up by avoiding a full checksum.
 429                          */
 430                  
 431                         th->ack_seq = htonl(sk->acked_seq);
 432                         sk->ack_backlog = 0;
 433                         sk->bytes_rcv = 0;
 434                         th->window = ntohs(tcp_select_window(sk));
 435                         tcp_send_check(th, sk->saddr, sk->daddr, size, skb);
 436                 
 437                         /*
 438                          *      If the interface is (still) up and running, kick it.
 439                          */
 440         
 441                         if (dev->flags & IFF_UP)
 442                         {
 443                                 /*
 444                                  *      If the packet is still being sent by the device/protocol
 445                                  *      below then don't retransmit. This is both needed, and good -
 446                                  *      especially with connected mode AX.25 where it stops resends
 447                                  *      occurring of an as yet unsent anyway frame!
 448                                  *      We still add up the counts as the round trip time wants
 449                                  *      adjusting.
 450                                  */
 451                                 if (sk && !skb_device_locked(skb))
 452                                 {
 453                                         /* Remove it from any existing driver queue first! */
 454                                         skb_unlink(skb);
 455                                         /* Now queue it */
 456                                         ip_statistics.IpOutRequests++;
 457                                         dev_queue_xmit(skb, dev, sk->priority);
 458                                 }
 459                         }
 460                 }
 461                 
 462                 /*
 463                  *      Count retransmissions
 464                  */
 465                  
 466                 ct++;
 467                 sk->prot->retransmits ++;
 468                 tcp_statistics.TcpRetransSegs++;
 469                 
 470 
 471                 /*
 472                  *      Only one retransmit requested.
 473                  */
 474         
 475                 if (!all)
 476                         break;
 477 
 478                 /*
 479                  *      This should cut it off before we send too many packets.
 480                  */
 481 
 482                 if (ct >= sk->cong_window)
 483                         break;
 484                 skb = skb->link3;
 485         }
 486 }
 487 
 488 /*
 489  *      This routine will send an RST to the other tcp. 
 490  */
 491  
 492 void tcp_send_reset(unsigned long saddr, unsigned long daddr, struct tcphdr *th,
     /* [previous][next][first][last][top][bottom][index][help] */
 493           struct proto *prot, struct options *opt, struct device *dev, int tos, int ttl)
 494 {
 495         struct sk_buff *buff;
 496         struct tcphdr *t1;
 497         int tmp;
 498         struct device *ndev=NULL;
 499 
 500         /*
 501          *      Cannot reset a reset (Think about it).
 502          */
 503          
 504         if(th->rst)
 505                 return;
 506   
 507         /*
 508          * We need to grab some memory, and put together an RST,
 509          * and then put it into the queue to be sent.
 510          */
 511 
 512         buff = sock_wmalloc(NULL, MAX_RESET_SIZE, 1, GFP_ATOMIC);
 513         if (buff == NULL) 
 514                 return;
 515 
 516         buff->sk = NULL;
 517         buff->dev = dev;
 518         buff->localroute = 0;
 519         buff->csum = 0;
 520 
 521         /*
 522          *      Put in the IP header and routing stuff. 
 523          */
 524 
 525         tmp = prot->build_header(buff, saddr, daddr, &ndev, IPPROTO_TCP, opt,
 526                            sizeof(struct tcphdr),tos,ttl,NULL);
 527         if (tmp < 0) 
 528         {
 529                 buff->free = 1;
 530                 sock_wfree(NULL, buff);
 531                 return;
 532         }
 533 
 534         t1 =(struct tcphdr *)skb_put(buff,sizeof(struct tcphdr));
 535         memset(t1, 0, sizeof(*t1));
 536 
 537         /*
 538          *      Swap the send and the receive. 
 539          */
 540 
 541         t1->dest = th->source;
 542         t1->source = th->dest;
 543         t1->doff = sizeof(*t1)/4;
 544         t1->rst = 1;
 545   
 546         if(th->ack)
 547         {
 548                 t1->seq = th->ack_seq;
 549         }
 550         else
 551         {
 552                 t1->ack = 1;
 553                 if(!th->syn)
 554                         t1->ack_seq = th->seq;
 555                 else
 556                         t1->ack_seq = htonl(ntohl(th->seq)+1);
 557         }
 558 
 559         tcp_send_check(t1, saddr, daddr, sizeof(*t1), buff);
 560         prot->queue_xmit(NULL, ndev, buff, 1);
 561         tcp_statistics.TcpOutSegs++;
 562 }
 563 
 564 /*
 565  *      Send a fin.
 566  */
 567 
 568 void tcp_send_fin(struct sock *sk)
     /* [previous][next][first][last][top][bottom][index][help] */
 569 {
 570         struct proto *prot =(struct proto *)sk->prot;
 571         struct tcphdr *th =(struct tcphdr *)&sk->dummy_th;
 572         struct tcphdr *t1;
 573         struct sk_buff *buff;
 574         struct device *dev=NULL;
 575         int tmp;
 576                 
 577         buff = sock_wmalloc(sk, MAX_RESET_SIZE,1 , GFP_KERNEL);
 578 
 579         if (buff == NULL)
 580         {
 581                 /* This is a disaster if it occurs */
 582                 printk("tcp_send_fin: Impossible malloc failure");
 583                 return;
 584         }
 585 
 586         /*
 587          *      Administrivia
 588          */
 589          
 590         buff->sk = sk;
 591         buff->localroute = sk->localroute;
 592         buff->csum = 0;
 593 
 594         /*
 595          *      Put in the IP header and routing stuff. 
 596          */
 597 
 598         tmp = prot->build_header(buff,sk->saddr, sk->daddr, &dev,
 599                            IPPROTO_TCP, sk->opt,
 600                            sizeof(struct tcphdr),sk->ip_tos,sk->ip_ttl,&sk->ip_route_cache);
 601         if (tmp < 0) 
 602         {
 603                 int t;
 604                 /*
 605                  *      Finish anyway, treat this as a send that got lost. 
 606                  *      (Not good).
 607                  */
 608                  
 609                 buff->free = 1;
 610                 sock_wfree(sk,buff);
 611                 sk->write_seq++;
 612                 t=del_timer(&sk->timer);
 613                 if(t)
 614                         add_timer(&sk->timer);
 615                 else
 616                         tcp_reset_msl_timer(sk, TIME_CLOSE, TCP_TIMEWAIT_LEN);
 617                 return;
 618         }
 619         
 620         /*
 621          *      We ought to check if the end of the queue is a buffer and
 622          *      if so simply add the fin to that buffer, not send it ahead.
 623          */
 624 
 625         t1 =(struct tcphdr *)skb_put(buff,sizeof(struct tcphdr));
 626         buff->dev = dev;
 627         memcpy(t1, th, sizeof(*t1));
 628         buff->seq = sk->write_seq;
 629         sk->write_seq++;
 630         buff->end_seq = sk->write_seq;
 631         t1->seq = htonl(buff->seq);
 632         t1->ack_seq = htonl(sk->acked_seq);
 633         t1->window = htons(sk->window=tcp_select_window(sk));
 634         t1->fin = 1;
 635         tcp_send_check(t1, sk->saddr, sk->daddr, sizeof(*t1), buff);
 636 
 637         /*
 638          * If there is data in the write queue, the fin must be appended to
 639          * the write queue.
 640          */
 641         
 642         if (skb_peek(&sk->write_queue) != NULL) 
 643         {
 644                 buff->free = 0;
 645                 if (buff->next != NULL) 
 646                 {
 647                         printk("tcp_send_fin: next != NULL\n");
 648                         skb_unlink(buff);
 649                 }
 650                 skb_queue_tail(&sk->write_queue, buff);
 651         } 
 652         else 
 653         {
 654                 sk->sent_seq = sk->write_seq;
 655                 sk->prot->queue_xmit(sk, dev, buff, 0);
 656                 tcp_reset_xmit_timer(sk, TIME_WRITE, sk->rto);
 657         }
 658 }
 659 
 660 
 661 void tcp_send_synack(struct sock * newsk, struct sock * sk, struct sk_buff * skb)
     /* [previous][next][first][last][top][bottom][index][help] */
 662 {
 663         struct tcphdr *t1;
 664         unsigned char *ptr;
 665         struct sk_buff * buff;
 666         struct device *ndev=NULL;
 667         int tmp;
 668 
 669         buff = sock_wmalloc(newsk, MAX_SYN_SIZE, 1, GFP_ATOMIC);
 670         if (buff == NULL) 
 671         {
 672                 sk->err = ENOMEM;
 673                 newsk->dead = 1;
 674                 newsk->state = TCP_CLOSE;
 675                 /* And this will destroy it */
 676                 kfree_skb(skb, FREE_READ);
 677                 tcp_statistics.TcpAttemptFails++;
 678                 return;
 679         }
 680   
 681         buff->sk = newsk;
 682         buff->localroute = newsk->localroute;
 683 
 684         /*
 685          *      Put in the IP header and routing stuff. 
 686          */
 687 
 688         tmp = sk->prot->build_header(buff, newsk->saddr, newsk->daddr, &ndev,
 689                                IPPROTO_TCP, NULL, MAX_SYN_SIZE,sk->ip_tos,sk->ip_ttl,&newsk->ip_route_cache);
 690 
 691         /*
 692          *      Something went wrong. 
 693          */
 694 
 695         if (tmp < 0) 
 696         {
 697                 sk->err = tmp;
 698                 buff->free = 1;
 699                 kfree_skb(buff,FREE_WRITE);
 700                 newsk->dead = 1;
 701                 newsk->state = TCP_CLOSE;
 702                 skb->sk = sk;
 703                 kfree_skb(skb, FREE_READ);
 704                 tcp_statistics.TcpAttemptFails++;
 705                 return;
 706         }
 707 
 708         t1 =(struct tcphdr *)skb_put(buff,sizeof(struct tcphdr));
 709   
 710         memcpy(t1, skb->h.th, sizeof(*t1));
 711         buff->seq = newsk->write_seq++;
 712         buff->end_seq = newsk->write_seq;
 713         /*
 714          *      Swap the send and the receive. 
 715          */
 716         t1->dest = skb->h.th->source;
 717         t1->source = newsk->dummy_th.source;
 718         t1->seq = ntohl(buff->seq);
 719         newsk->sent_seq = newsk->write_seq;
 720         t1->window = ntohs(tcp_select_window(newsk));
 721         t1->syn = 1;
 722         t1->ack = 1;
 723         t1->urg = 0;
 724         t1->rst = 0;
 725         t1->psh = 0;
 726         t1->ack_seq = htonl(newsk->acked_seq);
 727         t1->doff = sizeof(*t1)/4+1;
 728         ptr = skb_put(buff,4);
 729         ptr[0] = 2;
 730         ptr[1] = 4;
 731         ptr[2] = ((newsk->mtu) >> 8) & 0xff;
 732         ptr[3] =(newsk->mtu) & 0xff;
 733         buff->csum = csum_partial(ptr, 4, 0);
 734         tcp_send_check(t1, newsk->saddr, newsk->daddr, sizeof(*t1)+4, buff);
 735         newsk->prot->queue_xmit(newsk, ndev, buff, 0);
 736         tcp_reset_xmit_timer(newsk, TIME_WRITE , TCP_TIMEOUT_INIT);
 737         skb->sk = newsk;
 738 
 739         /*
 740          *      Charge the sock_buff to newsk. 
 741          */
 742          
 743         sk->rmem_alloc -= skb->truesize;
 744         newsk->rmem_alloc += skb->truesize;
 745         
 746         skb_queue_tail(&sk->receive_queue,skb);
 747         sk->ack_backlog++;
 748         tcp_statistics.TcpOutSegs++;
 749 }
 750 
 751 /*
 752  *      This routine sends an ack and also updates the window. 
 753  */
 754  
 755 void tcp_send_ack(u32 sequence, u32 ack,
     /* [previous][next][first][last][top][bottom][index][help] */
 756              struct sock *sk,
 757              struct tcphdr *th, u32 daddr)
 758 {
 759         struct sk_buff *buff;
 760         struct tcphdr *t1;
 761         struct device *dev = NULL;
 762         int tmp;
 763 
 764         if(sk->zapped)
 765                 return;         /* We have been reset, we may not send again */
 766                 
 767         /*
 768          * We need to grab some memory, and put together an ack,
 769          * and then put it into the queue to be sent.
 770          */
 771 
 772         buff = sock_wmalloc(sk, MAX_ACK_SIZE, 1, GFP_ATOMIC);
 773         if (buff == NULL) 
 774         {
 775                 /* 
 776                  *      Force it to send an ack. We don't have to do this
 777                  *      (ACK is unreliable) but it's much better use of 
 778                  *      bandwidth on slow links to send a spare ack than
 779                  *      resend packets. 
 780                  */
 781                  
 782                 sk->ack_backlog++;
 783                 if (sk->ip_xmit_timeout != TIME_WRITE && tcp_connected(sk->state)) 
 784                 {
 785                         tcp_reset_xmit_timer(sk, TIME_WRITE, HZ);
 786                 }
 787                 return;
 788         }
 789 
 790         /*
 791          *      Assemble a suitable TCP frame
 792          */
 793          
 794         buff->sk = sk;
 795         buff->localroute = sk->localroute;
 796         buff->csum = 0;
 797 
 798         /* 
 799          *      Put in the IP header and routing stuff. 
 800          */
 801          
 802         tmp = sk->prot->build_header(buff, sk->saddr, daddr, &dev,
 803                                 IPPROTO_TCP, sk->opt, MAX_ACK_SIZE,sk->ip_tos,sk->ip_ttl,&sk->ip_route_cache);
 804         if (tmp < 0) 
 805         {
 806                 buff->free = 1;
 807                 sock_wfree(sk, buff);
 808                 return;
 809         }
 810         t1 =(struct tcphdr *)skb_put(buff,sizeof(struct tcphdr));
 811 
 812         memcpy(t1, &sk->dummy_th, sizeof(*t1));
 813 
 814         /*
 815          *      Swap the send and the receive. 
 816          */
 817          
 818         t1->dest = th->source;
 819         t1->source = th->dest;
 820         t1->seq = ntohl(sequence);
 821         sk->window = tcp_select_window(sk);
 822         t1->window = ntohs(sk->window);
 823         
 824         /*
 825          *      If we have nothing queued for transmit and the transmit timer
 826          *      is on we are just doing an ACK timeout and need to switch
 827          *      to a keepalive.
 828          */
 829          
 830         if (ack == sk->acked_seq) {               
 831                 sk->ack_backlog = 0;
 832                 sk->bytes_rcv = 0;
 833                 sk->ack_timed = 0;
 834 
 835                 if (sk->send_head == NULL && skb_peek(&sk->write_queue) == NULL
 836                     && sk->ip_xmit_timeout == TIME_WRITE)       
 837                   if(sk->keepopen) 
 838                     tcp_reset_xmit_timer(sk,TIME_KEEPOPEN,TCP_TIMEOUT_LEN);
 839                   else 
 840                     delete_timer(sk);                           
 841         }
 842 
 843         /*
 844          *      Fill in the packet and send it
 845          */
 846          
 847         t1->ack_seq = htonl(ack);
 848         tcp_send_check(t1, sk->saddr, daddr, sizeof(*t1), buff);
 849         if (sk->debug)
 850                  printk("\rtcp_ack: seq %x ack %x\n", sequence, ack);
 851         sk->prot->queue_xmit(sk, dev, buff, 1);
 852         tcp_statistics.TcpOutSegs++;
 853 }
 854 
 855 /*
 856  *      This routine sends a packet with an out of date sequence
 857  *      number. It assumes the other end will try to ack it.
 858  */
 859 
 860 void tcp_write_wakeup(struct sock *sk)
     /* [previous][next][first][last][top][bottom][index][help] */
 861 {
 862         struct sk_buff *buff,*skb;
 863         struct tcphdr *t1;
 864         struct device *dev=NULL;
 865         int tmp;
 866 
 867         if (sk->zapped)
 868                 return; /* After a valid reset we can send no more */
 869 
 870         /*
 871          *      Write data can still be transmitted/retransmitted in the
 872          *      following states.  If any other state is encountered, return.
 873          *      [listen/close will never occur here anyway]
 874          */
 875 
 876         if (sk->state != TCP_ESTABLISHED && 
 877             sk->state != TCP_CLOSE_WAIT &&
 878             sk->state != TCP_FIN_WAIT1 && 
 879             sk->state != TCP_LAST_ACK &&
 880             sk->state != TCP_CLOSING
 881         ) 
 882         {
 883                 return;
 884         }
 885         if ( before(sk->sent_seq, sk->window_seq) && 
 886             (skb=skb_peek(&sk->write_queue)))
 887         {
 888                 /*
 889                  * We are probing the opening of a window
 890                  * but the window size is != 0
 891                  * must have been a result SWS advoidance ( sender )
 892                  */
 893             
 894                 struct iphdr *iph;
 895                 struct tcphdr *th;
 896                 struct tcphdr *nth;
 897                 unsigned long win_size;
 898 #if 0
 899                 unsigned long ow_size;
 900 #endif
 901         
 902                 /*
 903                  *      How many bytes can we send ?
 904                  */
 905                  
 906                 win_size = sk->window_seq - sk->sent_seq;
 907 
 908                 /*
 909                  *      Recover the buffer pointers
 910                  */
 911                  
 912                 iph = (struct iphdr *)skb->ip_hdr;
 913                 th = (struct tcphdr *)(((char *)iph) +(iph->ihl << 2));
 914 
 915                 /*
 916                  *      Grab the data for a temporary frame
 917                  */
 918                  
 919                 buff = sock_wmalloc(sk, win_size + th->doff * 4 + 
 920                                      (iph->ihl << 2) +
 921                                      sk->prot->max_header + 15, 
 922                                      1, GFP_ATOMIC);
 923                 if ( buff == NULL )
 924                         return;
 925 
 926                 /* 
 927                  *      If we strip the packet on the write queue we must
 928                  *      be ready to retransmit this one 
 929                  */
 930             
 931                 buff->free = /*0*/1;
 932 
 933                 buff->sk = sk;
 934                 buff->localroute = sk->localroute;
 935                 
 936                 /*
 937                  *      Put headers on the new packet
 938                  */
 939 
 940                 tmp = sk->prot->build_header(buff, sk->saddr, sk->daddr, &dev,
 941                                          IPPROTO_TCP, sk->opt, buff->truesize,
 942                                          sk->ip_tos,sk->ip_ttl,&sk->ip_route_cache);
 943                 if (tmp < 0) 
 944                 {
 945                         sock_wfree(sk, buff);
 946                         return;
 947                 }
 948                 
 949                 /*
 950                  *      Move the TCP header over
 951                  */
 952 
 953                 buff->dev = dev;
 954 
 955                 nth = (struct tcphdr *) skb_put(buff,sizeof(*th));
 956 
 957                 memcpy(nth, th, sizeof(*th));
 958                 
 959                 /*
 960                  *      Correct the new header
 961                  */
 962                  
 963                 nth->ack = 1; 
 964                 nth->ack_seq = htonl(sk->acked_seq);
 965                 nth->window = htons(tcp_select_window(sk));
 966                 nth->check = 0;
 967 
 968                 /*
 969                  *      Copy TCP options and data start to our new buffer
 970                  */
 971                  
 972                 buff->csum = csum_partial_copy((void *)(th + 1), skb_put(buff,win_size),
 973                                 win_size + th->doff*4 - sizeof(*th), 0);
 974                 
 975                 /*
 976                  *      Remember our right edge sequence number.
 977                  */
 978                  
 979                 buff->end_seq = sk->sent_seq + win_size;
 980                 sk->sent_seq = buff->end_seq;           /* Hack */
 981                 if(th->urg && ntohs(th->urg_ptr) < win_size)
 982                         nth->urg = 0;
 983 
 984                 /*
 985                  *      Checksum the split buffer
 986                  */
 987                  
 988                 tcp_send_check(nth, sk->saddr, sk->daddr, 
 989                            nth->doff * 4 + win_size , buff);
 990         }
 991         else
 992         {       
 993                 buff = sock_wmalloc(sk,MAX_ACK_SIZE,1, GFP_ATOMIC);
 994                 if (buff == NULL) 
 995                         return;
 996 
 997                 buff->free = 1;
 998                 buff->sk = sk;
 999                 buff->localroute = sk->localroute;
1000                 buff->csum = 0;
1001 
1002                 /*
1003                  *      Put in the IP header and routing stuff. 
1004                  */
1005                  
1006                 tmp = sk->prot->build_header(buff, sk->saddr, sk->daddr, &dev,
1007                                 IPPROTO_TCP, sk->opt, MAX_ACK_SIZE,sk->ip_tos,sk->ip_ttl,&sk->ip_route_cache);
1008                 if (tmp < 0) 
1009                 {
1010                         sock_wfree(sk, buff);
1011                         return;
1012                 }
1013 
1014                 t1 = (struct tcphdr *)skb_put(buff,sizeof(struct tcphdr));
1015                 memcpy(t1,(void *) &sk->dummy_th, sizeof(*t1));
1016 
1017                 /*
1018                  *      Use a previous sequence.
1019                  *      This should cause the other end to send an ack.
1020                  */
1021          
1022                 t1->seq = htonl(sk->sent_seq-1);
1023 /*              t1->fin = 0;    -- We are sending a 'previous' sequence, and 0 bytes of data - thus no FIN bit */
1024                 t1->ack_seq = htonl(sk->acked_seq);
1025                 t1->window = htons(tcp_select_window(sk));
1026                 tcp_send_check(t1, sk->saddr, sk->daddr, sizeof(*t1), buff);
1027 
1028         }               
1029 
1030         /*
1031          *      Send it.
1032          */
1033         
1034         sk->prot->queue_xmit(sk, dev, buff, 1);
1035         tcp_statistics.TcpOutSegs++;
1036 }
1037 
1038 /*
1039  *      A window probe timeout has occurred.
1040  */
1041 
1042 void tcp_send_probe0(struct sock *sk)
     /* [previous][next][first][last][top][bottom][index][help] */
1043 {
1044         if (sk->zapped)
1045                 return;         /* After a valid reset we can send no more */
1046 
1047         tcp_write_wakeup(sk);
1048 
1049         sk->backoff++;
1050         sk->rto = min(sk->rto << 1, 120*HZ);
1051         sk->retransmits++;
1052         sk->prot->retransmits ++;
1053         tcp_reset_xmit_timer (sk, TIME_PROBE0, sk->rto);
1054 }

/* [previous][next][first][last][top][bottom][index][help] */