root/net/ipv4/tcp_output.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. tcp_send_skb
  2. tcp_dequeue_partial
  3. tcp_send_partial
  4. tcp_enqueue_partial
  5. tcp_write_xmit
  6. tcp_do_retransmit
  7. tcp_send_reset
  8. tcp_send_fin
  9. tcp_send_synack
  10. tcp_send_ack
  11. tcp_write_wakeup
  12. tcp_send_probe0

   1 /*
   2  * INET         An implementation of the TCP/IP protocol suite for the LINUX
   3  *              operating system.  INET is implemented using the  BSD Socket
   4  *              interface as the means of communication with the user level.
   5  *
   6  *              Implementation of the Transmission Control Protocol(TCP).
   7  *
   8  * Version:     @(#)tcp_input.c 1.0.16  05/25/93
   9  *
  10  * Authors:     Ross Biro, <bir7@leland.Stanford.Edu>
  11  *              Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  12  *              Mark Evans, <evansmp@uhura.aston.ac.uk>
  13  *              Corey Minyard <wf-rch!minyard@relay.EU.net>
  14  *              Florian La Roche, <flla@stud.uni-sb.de>
  15  *              Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
  16  *              Linus Torvalds, <torvalds@cs.helsinki.fi>
  17  *              Alan Cox, <gw4pts@gw4pts.ampr.org>
  18  *              Matthew Dillon, <dillon@apollo.west.oic.com>
  19  *              Arnt Gulbrandsen, <agulbra@nvg.unit.no>
  20  *              Jorge Cwik, <jorge@laser.satlink.net>
  21  */
  22 
  23 #include <linux/config.h>
  24 #include <net/tcp.h>
  25 
  26 /*
  27  *      This is the main buffer sending routine. We queue the buffer
  28  *      having checked it is sane seeming.
  29  */
  30  
  31 void tcp_send_skb(struct sock *sk, struct sk_buff *skb)
     /* [previous][next][first][last][top][bottom][index][help] */
  32 {
  33         int size;
  34         struct tcphdr * th = skb->h.th;
  35 
  36         /*
  37          *      length of packet (not counting length of pre-tcp headers) 
  38          */
  39          
  40         size = skb->len - ((unsigned char *) th - skb->data);
  41 
  42         /*
  43          *      Sanity check it.. 
  44          */
  45          
  46         if (size < sizeof(struct tcphdr) || size > skb->len) 
  47         {
  48                 printk("tcp_send_skb: bad skb (skb = %p, data = %p, th = %p, len = %lu)\n",
  49                         skb, skb->data, th, skb->len);
  50                 kfree_skb(skb, FREE_WRITE);
  51                 return;
  52         }
  53 
  54         /*
  55          *      If we have queued a header size packet.. (these crash a few
  56          *      tcp stacks if ack is not set)
  57          */
  58          
  59         if (size == sizeof(struct tcphdr)) 
  60         {
  61                 /* If it's got a syn or fin it's notionally included in the size..*/
  62                 if(!th->syn && !th->fin) 
  63                 {
  64                         printk("tcp_send_skb: attempt to queue a bogon.\n");
  65                         kfree_skb(skb,FREE_WRITE);
  66                         return;
  67                 }
  68         }
  69 
  70         /*
  71          *      Actual processing.
  72          */
  73          
  74         tcp_statistics.TcpOutSegs++;  
  75         skb->seq = ntohl(th->seq);
  76         skb->end_seq = skb->seq + size - 4*th->doff;
  77         
  78         /*
  79          *      We must queue if
  80          *
  81          *      a) The right edge of this frame exceeds the window
  82          *      b) We are retransmitting (Nagle's rule)
  83          *      c) We have too many packets 'in flight'
  84          */
  85          
  86         if (after(skb->end_seq, sk->window_seq) ||
  87             (sk->retransmits && sk->ip_xmit_timeout == TIME_WRITE) ||
  88              sk->packets_out >= sk->cong_window) 
  89         {
  90                 /* checksum will be supplied by tcp_write_xmit.  So
  91                  * we shouldn't need to set it at all.  I'm being paranoid */
  92                 th->check = 0;
  93                 if (skb->next != NULL) 
  94                 {
  95                         printk("tcp_send_partial: next != NULL\n");
  96                         skb_unlink(skb);
  97                 }
  98                 skb_queue_tail(&sk->write_queue, skb);
  99                 
 100                 /*
 101                  *      If we don't fit we have to start the zero window
 102                  *      probes. This is broken - we really need to do a partial
 103                  *      send _first_ (This is what causes the Cisco and PC/TCP
 104                  *      grief).
 105                  */
 106                  
 107                 if (before(sk->window_seq, sk->write_queue.next->end_seq) &&
 108                     sk->send_head == NULL && sk->ack_backlog == 0)
 109                         tcp_reset_xmit_timer(sk, TIME_PROBE0, sk->rto);
 110         } 
 111         else 
 112         {
 113                 /*
 114                  *      This is going straight out
 115                  */
 116                  
 117                 th->ack_seq = htonl(sk->acked_seq);
 118                 th->window = htons(tcp_select_window(sk));
 119 
 120                 tcp_send_check(th, sk->saddr, sk->daddr, size, sk);
 121 
 122                 sk->sent_seq = sk->write_seq;
 123                 
 124                 /*
 125                  *      This is mad. The tcp retransmit queue is put together
 126                  *      by the ip layer. This causes half the problems with
 127                  *      unroutable FIN's and other things.
 128                  */
 129                  
 130                 sk->prot->queue_xmit(sk, skb->dev, skb, 0);
 131                 
 132                 
 133                 sk->ack_backlog = 0;
 134                 sk->bytes_rcv = 0;
 135 
 136                 /*
 137                  *      Set for next retransmit based on expected ACK time.
 138                  *      FIXME: We set this every time which means our 
 139                  *      retransmits are really about a window behind.
 140                  */
 141 
 142                 tcp_reset_xmit_timer(sk, TIME_WRITE, sk->rto);
 143         }
 144 }
 145 
 146 /*
 147  *      Locking problems lead us to a messy situation where we can have
 148  *      multiple partially complete buffers queued up. This is really bad
 149  *      as we don't want to be sending partial buffers. Fix this with
 150  *      a semaphore or similar to lock tcp_write per socket.
 151  *
 152  *      These routines are pretty self descriptive.
 153  */
 154  
 155 struct sk_buff * tcp_dequeue_partial(struct sock * sk)
     /* [previous][next][first][last][top][bottom][index][help] */
 156 {
 157         struct sk_buff * skb;
 158         unsigned long flags;
 159 
 160         save_flags(flags);
 161         cli();
 162         skb = sk->partial;
 163         if (skb) {
 164                 sk->partial = NULL;
 165                 del_timer(&sk->partial_timer);
 166         }
 167         restore_flags(flags);
 168         return skb;
 169 }
 170 
 171 /*
 172  *      Empty the partial queue
 173  */
 174  
 175 void tcp_send_partial(struct sock *sk)
     /* [previous][next][first][last][top][bottom][index][help] */
 176 {
 177         struct sk_buff *skb;
 178 
 179         if (sk == NULL)
 180                 return;
 181         while ((skb = tcp_dequeue_partial(sk)) != NULL)
 182                 tcp_send_skb(sk, skb);
 183 }
 184 
 185 /*
 186  *      Queue a partial frame
 187  */
 188  
 189 void tcp_enqueue_partial(struct sk_buff * skb, struct sock * sk)
     /* [previous][next][first][last][top][bottom][index][help] */
 190 {
 191         struct sk_buff * tmp;
 192         unsigned long flags;
 193 
 194         save_flags(flags);
 195         cli();
 196         tmp = sk->partial;
 197         if (tmp)
 198                 del_timer(&sk->partial_timer);
 199         sk->partial = skb;
 200         init_timer(&sk->partial_timer);
 201         /*
 202          *      Wait up to 1 second for the buffer to fill.
 203          */
 204         sk->partial_timer.expires = jiffies+HZ;
 205         sk->partial_timer.function = (void (*)(unsigned long)) tcp_send_partial;
 206         sk->partial_timer.data = (unsigned long) sk;
 207         add_timer(&sk->partial_timer);
 208         restore_flags(flags);
 209         if (tmp)
 210                 tcp_send_skb(sk, tmp);
 211 }
 212 
 213 /*
 214  *      This routine takes stuff off of the write queue,
 215  *      and puts it in the xmit queue. This happens as incoming acks
 216  *      open up the remote window for us.
 217  */
 218  
 219 void tcp_write_xmit(struct sock *sk)
     /* [previous][next][first][last][top][bottom][index][help] */
 220 {
 221         struct sk_buff *skb;
 222 
 223         /*
 224          *      The bytes will have to remain here. In time closedown will
 225          *      empty the write queue and all will be happy 
 226          */
 227 
 228         if(sk->zapped)
 229                 return;
 230 
 231         /*
 232          *      Anything on the transmit queue that fits the window can
 233          *      be added providing we are not
 234          *
 235          *      a) retransmitting (Nagle's rule)
 236          *      b) exceeding our congestion window.
 237          */
 238          
 239         while((skb = skb_peek(&sk->write_queue)) != NULL &&
 240                 before(skb->end_seq, sk->window_seq + 1) &&
 241                 (sk->retransmits == 0 ||
 242                  sk->ip_xmit_timeout != TIME_WRITE ||
 243                  before(skb->end_seq, sk->rcv_ack_seq + 1))
 244                 && sk->packets_out < sk->cong_window) 
 245         {
 246                 IS_SKB(skb);
 247                 skb_unlink(skb);
 248                 
 249                 /*
 250                  *      See if we really need to send the packet. 
 251                  */
 252                  
 253                 if (before(skb->end_seq, sk->rcv_ack_seq +1)) 
 254                 {
 255                         /*
 256                          *      This is acked data. We can discard it. This 
 257                          *      cannot currently occur.
 258                          */
 259                          
 260                         sk->retransmits = 0;
 261                         kfree_skb(skb, FREE_WRITE);
 262                         if (!sk->dead) 
 263                                 sk->write_space(sk);
 264                 } 
 265                 else
 266                 {
 267                         struct tcphdr *th;
 268                         struct iphdr *iph;
 269                         int size;
 270 /*
 271  * put in the ack seq and window at this point rather than earlier,
 272  * in order to keep them monotonic.  We really want to avoid taking
 273  * back window allocations.  That's legal, but RFC1122 says it's frowned on.
 274  * Ack and window will in general have changed since this packet was put
 275  * on the write queue.
 276  */
 277                         iph = skb->ip_hdr;
 278                         th = (struct tcphdr *)(((char *)iph) +(iph->ihl << 2));
 279                         size = skb->len - (((unsigned char *) th) - skb->data);
 280 #ifndef CONFIG_NO_PATH_MTU_DISCOVERY
 281                         if (size > sk->mtu - sizeof(struct iphdr))
 282                         {
 283                                 iph->frag_off &= ~htons(IP_DF);
 284                                 ip_send_check(iph);
 285                         }
 286 #endif
 287                         
 288                         th->ack_seq = htonl(sk->acked_seq);
 289                         th->window = htons(tcp_select_window(sk));
 290 
 291                         tcp_send_check(th, sk->saddr, sk->daddr, size, sk);
 292 
 293                         sk->sent_seq = skb->end_seq;
 294                         
 295                         /*
 296                          *      IP manages our queue for some crazy reason
 297                          */
 298                          
 299                         sk->prot->queue_xmit(sk, skb->dev, skb, skb->free);
 300                         
 301                         
 302                         sk->ack_backlog = 0;
 303                         sk->bytes_rcv = 0;
 304 
 305                         /*
 306                          *      Again we slide the timer wrongly
 307                          */
 308                          
 309                         tcp_reset_xmit_timer(sk, TIME_WRITE, sk->rto);
 310                 }
 311         }
 312 }
 313 
 314 
 315 /*
 316  *      A socket has timed out on its send queue and wants to do a
 317  *      little retransmitting. Currently this means TCP.
 318  */
 319 
 320 void tcp_do_retransmit(struct sock *sk, int all)
     /* [previous][next][first][last][top][bottom][index][help] */
 321 {
 322         struct sk_buff * skb;
 323         struct proto *prot;
 324         struct device *dev;
 325         int ct=0;
 326         struct rtable *rt;
 327 
 328         prot = sk->prot;
 329         skb = sk->send_head;
 330 
 331         while (skb != NULL)
 332         {
 333                 struct tcphdr *th;
 334                 struct iphdr *iph;
 335                 int size;
 336 
 337                 dev = skb->dev;
 338                 IS_SKB(skb);
 339                 skb->when = jiffies;
 340                 
 341                 /* dl1bke 960201 - @%$$! Hope this cures strange race conditions    */
 342                 /*                 with AX.25 mode VC. (esp. DAMA)                  */
 343                 /*                 if the buffer is locked we should not retransmit */
 344                 /*                 anyway, so we don't need all the fuss to prepare */
 345                 /*                 the buffer in this case.                         */
 346                 /*                 (the skb_pull() changes skb->data while we may   */
 347                 /*                 actually try to send the data. Ough. A side      */
 348                 /*                 effect is that we'll send some unnecessary data, */
 349                 /*                 but the alternative is desastrous...             */
 350                 
 351                 if (skb_device_locked(skb))
 352                         break;
 353 
 354                 /*
 355                  *      Discard the surplus MAC header
 356                  */
 357                  
 358                 skb_pull(skb,((unsigned char *)skb->ip_hdr)-skb->data);
 359 
 360                 /*
 361                  * In general it's OK just to use the old packet.  However we
 362                  * need to use the current ack and window fields.  Urg and
 363                  * urg_ptr could possibly stand to be updated as well, but we
 364                  * don't keep the necessary data.  That shouldn't be a problem,
 365                  * if the other end is doing the right thing.  Since we're
 366                  * changing the packet, we have to issue a new IP identifier.
 367                  */
 368 
 369                 iph = (struct iphdr *)skb->data;
 370                 th = (struct tcphdr *)(((char *)iph) + (iph->ihl << 2));
 371                 size = ntohs(iph->tot_len) - (iph->ihl<<2);
 372                 
 373                 /*
 374                  *      Note: We ought to check for window limits here but
 375                  *      currently this is done (less efficiently) elsewhere.
 376                  */
 377 
 378                 /*
 379                  *      Put a MAC header back on (may cause ARPing)
 380                  */
 381                  
 382                 {
 383                         /* ANK: UGLY, but the bug, that was here, should be fixed.
 384                          */
 385                         struct options *  opt = (struct options*)skb->proto_priv;
 386                         rt = ip_check_route(&sk->ip_route_cache, opt->srr?opt->faddr:iph->daddr, skb->localroute);
 387                 }
 388 
 389                 iph->id = htons(ip_id_count++);
 390 #ifndef CONFIG_NO_PATH_MTU_DISCOVERY
 391                 if (rt && ntohs(iph->tot_len) > rt->rt_mtu)
 392                         iph->frag_off &= ~htons(IP_DF);
 393 #endif
 394                 ip_send_check(iph);
 395                         
 396                 if (rt==NULL)   /* Deep poo */
 397                 {
 398                         if(skb->sk)
 399                         {
 400                                 skb->sk->err_soft=ENETUNREACH;
 401                                 skb->sk->error_report(skb->sk);
 402                         }
 403                 }
 404                 else
 405                 {
 406                         dev=rt->rt_dev;
 407                         skb->raddr=rt->rt_gateway;
 408                         skb->dev=dev;
 409                         skb->arp=1;
 410                         if (rt->rt_hh)
 411                         {
 412                                 memcpy(skb_push(skb,dev->hard_header_len),rt->rt_hh->hh_data,dev->hard_header_len);
 413                                 if (!rt->rt_hh->hh_uptodate)
 414                                 {
 415                                         skb->arp = 0;
 416 #if RT_CACHE_DEBUG >= 2
 417                                         printk("tcp_do_retransmit: hh miss %08x via %08x\n", iph->daddr, rt->rt_gateway);
 418 #endif
 419                                 }
 420                         }
 421                         else if (dev->hard_header)
 422                         {
 423                                 if(dev->hard_header(skb, dev, ETH_P_IP, NULL, NULL, skb->len)<0)
 424                                         skb->arp=0;
 425                         }
 426                 
 427                         /*
 428                          *      This is not the right way to handle this. We have to
 429                          *      issue an up to date window and ack report with this 
 430                          *      retransmit to keep the odd buggy tcp that relies on 
 431                          *      the fact BSD does this happy. 
 432                          *      We don't however need to recalculate the entire 
 433                          *      checksum, so someone wanting a small problem to play
 434                          *      with might like to implement RFC1141/RFC1624 and speed
 435                          *      this up by avoiding a full checksum.
 436                          */
 437                  
 438                         th->ack_seq = htonl(sk->acked_seq);
 439                         sk->ack_backlog = 0;
 440                         sk->bytes_rcv = 0;
 441                         th->window = ntohs(tcp_select_window(sk));
 442                         tcp_send_check(th, sk->saddr, sk->daddr, size, sk);
 443                 
 444                         /*
 445                          *      If the interface is (still) up and running, kick it.
 446                          */
 447         
 448                         if (dev->flags & IFF_UP)
 449                         {
 450                                 /*
 451                                  *      If the packet is still being sent by the device/protocol
 452                                  *      below then don't retransmit. This is both needed, and good -
 453                                  *      especially with connected mode AX.25 where it stops resends
 454                                  *      occurring of an as yet unsent anyway frame!
 455                                  *      We still add up the counts as the round trip time wants
 456                                  *      adjusting.
 457                                  */
 458                                 if (sk && !skb_device_locked(skb))
 459                                 {
 460                                         /* Remove it from any existing driver queue first! */
 461                                         skb_unlink(skb);
 462                                         /* Now queue it */
 463                                         ip_statistics.IpOutRequests++;
 464                                         dev_queue_xmit(skb, dev, sk->priority);
 465                                 }
 466                         }
 467                 }
 468                 
 469                 /*
 470                  *      Count retransmissions
 471                  */
 472                  
 473                 ct++;
 474                 sk->prot->retransmits ++;
 475                 tcp_statistics.TcpRetransSegs++;
 476                 
 477 
 478                 /*
 479                  *      Only one retransmit requested.
 480                  */
 481         
 482                 if (!all)
 483                         break;
 484 
 485                 /*
 486                  *      This should cut it off before we send too many packets.
 487                  */
 488 
 489                 if (ct >= sk->cong_window)
 490                         break;
 491                 skb = skb->link3;
 492         }
 493 }
 494 
 495 /*
 496  *      This routine will send an RST to the other tcp. 
 497  */
 498  
 499 void tcp_send_reset(unsigned long saddr, unsigned long daddr, struct tcphdr *th,
     /* [previous][next][first][last][top][bottom][index][help] */
 500           struct proto *prot, struct options *opt, struct device *dev, int tos, int ttl)
 501 {
 502         struct sk_buff *buff;
 503         struct tcphdr *t1;
 504         int tmp;
 505         struct device *ndev=NULL;
 506 
 507         /*
 508          *      Cannot reset a reset (Think about it).
 509          */
 510          
 511         if(th->rst)
 512                 return;
 513   
 514         /*
 515          * We need to grab some memory, and put together an RST,
 516          * and then put it into the queue to be sent.
 517          */
 518 
 519         buff = sock_wmalloc(NULL, MAX_RESET_SIZE, 1, GFP_ATOMIC);
 520         if (buff == NULL) 
 521                 return;
 522 
 523         buff->sk = NULL;
 524         buff->dev = dev;
 525         buff->localroute = 0;
 526 
 527         /*
 528          *      Put in the IP header and routing stuff. 
 529          */
 530 
 531         tmp = prot->build_header(buff, saddr, daddr, &ndev, IPPROTO_TCP, opt,
 532                            sizeof(struct tcphdr),tos,ttl,NULL);
 533         if (tmp < 0) 
 534         {
 535                 buff->free = 1;
 536                 sock_wfree(NULL, buff);
 537                 return;
 538         }
 539 
 540         t1 =(struct tcphdr *)skb_put(buff,sizeof(struct tcphdr));
 541         memcpy(t1, th, sizeof(*t1));
 542 
 543         /*
 544          *      Swap the send and the receive. 
 545          */
 546 
 547         t1->dest = th->source;
 548         t1->source = th->dest;
 549         t1->rst = 1;  
 550         t1->window = 0;
 551   
 552         if(th->ack)
 553         {
 554                 t1->ack = 0;
 555                 t1->seq = th->ack_seq;
 556                 t1->ack_seq = 0;
 557         }
 558         else
 559         {
 560                 t1->ack = 1;
 561                 if(!th->syn)
 562                         t1->ack_seq = th->seq;
 563                 else
 564                         t1->ack_seq = htonl(ntohl(th->seq)+1);
 565                 t1->seq = 0;
 566         }
 567 
 568         t1->syn = 0;
 569         t1->urg = 0;
 570         t1->fin = 0;
 571         t1->psh = 0;
 572         t1->doff = sizeof(*t1)/4;
 573         tcp_send_check(t1, saddr, daddr, sizeof(*t1), NULL);
 574         prot->queue_xmit(NULL, ndev, buff, 1);
 575         tcp_statistics.TcpOutSegs++;
 576 }
 577 
 578 /*
 579  *      Send a fin.
 580  */
 581 
 582 void tcp_send_fin(struct sock *sk)
     /* [previous][next][first][last][top][bottom][index][help] */
 583 {
 584         struct proto *prot =(struct proto *)sk->prot;
 585         struct tcphdr *th =(struct tcphdr *)&sk->dummy_th;
 586         struct tcphdr *t1;
 587         struct sk_buff *buff;
 588         struct device *dev=NULL;
 589         int tmp;
 590                 
 591         release_sock(sk); /* in case the malloc sleeps. */
 592         
 593         buff = sock_wmalloc(sk, MAX_RESET_SIZE,1 , GFP_KERNEL);
 594         sk->inuse = 1;
 595 
 596         if (buff == NULL)
 597         {
 598                 /* This is a disaster if it occurs */
 599                 printk("tcp_send_fin: Impossible malloc failure");
 600                 return;
 601         }
 602 
 603         /*
 604          *      Administrivia
 605          */
 606          
 607         buff->sk = sk;
 608         buff->localroute = sk->localroute;
 609 
 610         /*
 611          *      Put in the IP header and routing stuff. 
 612          */
 613 
 614         tmp = prot->build_header(buff,sk->saddr, sk->daddr, &dev,
 615                            IPPROTO_TCP, sk->opt,
 616                            sizeof(struct tcphdr),sk->ip_tos,sk->ip_ttl,&sk->ip_route_cache);
 617         if (tmp < 0) 
 618         {
 619                 int t;
 620                 /*
 621                  *      Finish anyway, treat this as a send that got lost. 
 622                  *      (Not good).
 623                  */
 624                  
 625                 buff->free = 1;
 626                 sock_wfree(sk,buff);
 627                 sk->write_seq++;
 628                 t=del_timer(&sk->timer);
 629                 if(t)
 630                         add_timer(&sk->timer);
 631                 else
 632                         tcp_reset_msl_timer(sk, TIME_CLOSE, TCP_TIMEWAIT_LEN);
 633                 return;
 634         }
 635         
 636         /*
 637          *      We ought to check if the end of the queue is a buffer and
 638          *      if so simply add the fin to that buffer, not send it ahead.
 639          */
 640 
 641         t1 =(struct tcphdr *)skb_put(buff,sizeof(struct tcphdr));
 642         buff->dev = dev;
 643         memcpy(t1, th, sizeof(*t1));
 644         buff->seq = sk->write_seq;
 645         sk->write_seq++;
 646         buff->end_seq = sk->write_seq;
 647         t1->seq = htonl(buff->seq);
 648         t1->ack = 1;
 649         t1->ack_seq = htonl(sk->acked_seq);
 650         t1->window = htons(sk->window=tcp_select_window(sk));
 651         t1->fin = 1;
 652         t1->rst = 0;
 653         t1->doff = sizeof(*t1)/4;
 654         tcp_send_check(t1, sk->saddr, sk->daddr, sizeof(*t1), sk);
 655 
 656         /*
 657          * If there is data in the write queue, the fin must be appended to
 658          * the write queue.
 659          */
 660         
 661         if (skb_peek(&sk->write_queue) != NULL) 
 662         {
 663                 buff->free = 0;
 664                 if (buff->next != NULL) 
 665                 {
 666                         printk("tcp_send_fin: next != NULL\n");
 667                         skb_unlink(buff);
 668                 }
 669                 skb_queue_tail(&sk->write_queue, buff);
 670         } 
 671         else 
 672         {
 673                 sk->sent_seq = sk->write_seq;
 674                 sk->prot->queue_xmit(sk, dev, buff, 0);
 675                 tcp_reset_xmit_timer(sk, TIME_WRITE, sk->rto);
 676         }
 677 }
 678 
 679 
 680 void tcp_send_synack(struct sock * newsk, struct sock * sk, struct sk_buff * skb)
     /* [previous][next][first][last][top][bottom][index][help] */
 681 {
 682         struct tcphdr *t1;
 683         unsigned char *ptr;
 684         struct sk_buff * buff;
 685         struct device *ndev=NULL;
 686         int tmp;
 687 
 688         buff = sock_wmalloc(newsk, MAX_SYN_SIZE, 1, GFP_ATOMIC);
 689         if (buff == NULL) 
 690         {
 691                 sk->err = ENOMEM;
 692                 newsk->dead = 1;
 693                 newsk->state = TCP_CLOSE;
 694                 /* And this will destroy it */
 695                 release_sock(newsk);
 696                 kfree_skb(skb, FREE_READ);
 697                 tcp_statistics.TcpAttemptFails++;
 698                 return;
 699         }
 700   
 701         buff->sk = newsk;
 702         buff->localroute = newsk->localroute;
 703 
 704         /*
 705          *      Put in the IP header and routing stuff. 
 706          */
 707 
 708         tmp = sk->prot->build_header(buff, newsk->saddr, newsk->daddr, &ndev,
 709                                IPPROTO_TCP, NULL, MAX_SYN_SIZE,sk->ip_tos,sk->ip_ttl,&newsk->ip_route_cache);
 710 
 711         /*
 712          *      Something went wrong. 
 713          */
 714 
 715         if (tmp < 0) 
 716         {
 717                 sk->err = tmp;
 718                 buff->free = 1;
 719                 kfree_skb(buff,FREE_WRITE);
 720                 newsk->dead = 1;
 721                 newsk->state = TCP_CLOSE;
 722                 release_sock(newsk);
 723                 skb->sk = sk;
 724                 kfree_skb(skb, FREE_READ);
 725                 tcp_statistics.TcpAttemptFails++;
 726                 return;
 727         }
 728 
 729         t1 =(struct tcphdr *)skb_put(buff,sizeof(struct tcphdr));
 730   
 731         memcpy(t1, skb->h.th, sizeof(*t1));
 732         buff->seq = newsk->write_seq++;
 733         buff->end_seq = newsk->write_seq;
 734         /*
 735          *      Swap the send and the receive. 
 736          */
 737         t1->dest = skb->h.th->source;
 738         t1->source = newsk->dummy_th.source;
 739         t1->seq = ntohl(buff->seq);
 740         t1->ack = 1;
 741         newsk->sent_seq = newsk->write_seq;
 742         t1->window = ntohs(tcp_select_window(newsk));
 743         t1->res1 = 0;
 744         t1->res2 = 0;
 745         t1->rst = 0;
 746         t1->urg = 0;
 747         t1->psh = 0;
 748         t1->syn = 1;
 749         t1->ack_seq = htonl(newsk->acked_seq);
 750         t1->doff = sizeof(*t1)/4+1;
 751         ptr = skb_put(buff,4);
 752         ptr[0] = 2;
 753         ptr[1] = 4;
 754         ptr[2] = ((newsk->mtu) >> 8) & 0xff;
 755         ptr[3] =(newsk->mtu) & 0xff;
 756 
 757         tcp_send_check(t1, newsk->saddr, newsk->daddr, sizeof(*t1)+4, newsk);
 758         newsk->prot->queue_xmit(newsk, ndev, buff, 0);
 759         tcp_reset_xmit_timer(newsk, TIME_WRITE , TCP_TIMEOUT_INIT);
 760         skb->sk = newsk;
 761 
 762         /*
 763          *      Charge the sock_buff to newsk. 
 764          */
 765          
 766         sk->rmem_alloc -= skb->truesize;
 767         newsk->rmem_alloc += skb->truesize;
 768         
 769         skb_queue_tail(&sk->receive_queue,skb);
 770         sk->ack_backlog++;
 771         release_sock(newsk);
 772         tcp_statistics.TcpOutSegs++;
 773 }
 774 
 775 /*
 776  *      This routine sends an ack and also updates the window. 
 777  */
 778  
 779 void tcp_send_ack(u32 sequence, u32 ack,
     /* [previous][next][first][last][top][bottom][index][help] */
 780              struct sock *sk,
 781              struct tcphdr *th, u32 daddr)
 782 {
 783         struct sk_buff *buff;
 784         struct tcphdr *t1;
 785         struct device *dev = NULL;
 786         int tmp;
 787 
 788         if(sk->zapped)
 789                 return;         /* We have been reset, we may not send again */
 790                 
 791         /*
 792          * We need to grab some memory, and put together an ack,
 793          * and then put it into the queue to be sent.
 794          */
 795 
 796         buff = sock_wmalloc(sk, MAX_ACK_SIZE, 1, GFP_ATOMIC);
 797         if (buff == NULL) 
 798         {
 799                 /* 
 800                  *      Force it to send an ack. We don't have to do this
 801                  *      (ACK is unreliable) but it's much better use of 
 802                  *      bandwidth on slow links to send a spare ack than
 803                  *      resend packets. 
 804                  */
 805                  
 806                 sk->ack_backlog++;
 807                 if (sk->ip_xmit_timeout != TIME_WRITE && tcp_connected(sk->state)) 
 808                 {
 809                         tcp_reset_xmit_timer(sk, TIME_WRITE, HZ);
 810                 }
 811                 return;
 812         }
 813 
 814         /*
 815          *      Assemble a suitable TCP frame
 816          */
 817          
 818         buff->sk = sk;
 819         buff->localroute = sk->localroute;
 820 
 821         /* 
 822          *      Put in the IP header and routing stuff. 
 823          */
 824          
 825         tmp = sk->prot->build_header(buff, sk->saddr, daddr, &dev,
 826                                 IPPROTO_TCP, sk->opt, MAX_ACK_SIZE,sk->ip_tos,sk->ip_ttl,&sk->ip_route_cache);
 827         if (tmp < 0) 
 828         {
 829                 buff->free = 1;
 830                 sock_wfree(sk, buff);
 831                 return;
 832         }
 833         t1 =(struct tcphdr *)skb_put(buff,sizeof(struct tcphdr));
 834 
 835         memcpy(t1, th, sizeof(*t1));
 836 
 837         /*
 838          *      Swap the send and the receive. 
 839          */
 840          
 841         t1->dest = th->source;
 842         t1->source = th->dest;
 843         t1->seq = ntohl(sequence);
 844         t1->ack = 1;
 845         sk->window = tcp_select_window(sk);
 846         t1->window = ntohs(sk->window);
 847         t1->res1 = 0;
 848         t1->res2 = 0;
 849         t1->rst = 0;
 850         t1->urg = 0;
 851         t1->syn = 0;
 852         t1->psh = 0;
 853         t1->fin = 0;
 854         
 855         /*
 856          *      If we have nothing queued for transmit and the transmit timer
 857          *      is on we are just doing an ACK timeout and need to switch
 858          *      to a keepalive.
 859          */
 860          
 861         if (ack == sk->acked_seq) {               
 862                 sk->ack_backlog = 0;
 863                 sk->bytes_rcv = 0;
 864                 sk->ack_timed = 0;
 865 
 866                 if (sk->send_head == NULL && skb_peek(&sk->write_queue) == NULL
 867                     && sk->ip_xmit_timeout == TIME_WRITE)       
 868                   if(sk->keepopen) 
 869                     tcp_reset_xmit_timer(sk,TIME_KEEPOPEN,TCP_TIMEOUT_LEN);
 870                   else 
 871                     delete_timer(sk);                           
 872         }
 873 
 874         /*
 875          *      Fill in the packet and send it
 876          */
 877          
 878         t1->ack_seq = htonl(ack);
 879         t1->doff = sizeof(*t1)/4;
 880         tcp_send_check(t1, sk->saddr, daddr, sizeof(*t1), sk);
 881         if (sk->debug)
 882                  printk("\rtcp_ack: seq %x ack %x\n", sequence, ack);
 883         sk->prot->queue_xmit(sk, dev, buff, 1);
 884         tcp_statistics.TcpOutSegs++;
 885 }
 886 
 887 /*
 888  *      This routine sends a packet with an out of date sequence
 889  *      number. It assumes the other end will try to ack it.
 890  */
 891 
 892 void tcp_write_wakeup(struct sock *sk)
     /* [previous][next][first][last][top][bottom][index][help] */
 893 {
 894         struct sk_buff *buff,*skb;
 895         struct tcphdr *t1;
 896         struct device *dev=NULL;
 897         int tmp;
 898 
 899         if (sk->zapped)
 900                 return; /* After a valid reset we can send no more */
 901 
 902         /*
 903          *      Write data can still be transmitted/retransmitted in the
 904          *      following states.  If any other state is encountered, return.
 905          *      [listen/close will never occur here anyway]
 906          */
 907 
 908         if (sk->state != TCP_ESTABLISHED && 
 909             sk->state != TCP_CLOSE_WAIT &&
 910             sk->state != TCP_FIN_WAIT1 && 
 911             sk->state != TCP_LAST_ACK &&
 912             sk->state != TCP_CLOSING
 913         ) 
 914         {
 915                 return;
 916         }
 917         if ( before(sk->sent_seq, sk->window_seq) && 
 918             (skb=skb_peek(&sk->write_queue)))
 919         {
 920                 /*
 921                  * We are probing the opening of a window
 922                  * but the window size is != 0
 923                  * must have been a result SWS advoidance ( sender )
 924                  */
 925             
 926                 struct iphdr *iph;
 927                 struct tcphdr *th;
 928                 struct tcphdr *nth;
 929                 unsigned long win_size;
 930 #if 0
 931                 unsigned long ow_size;
 932 #endif
 933                 void * tcp_data_start;
 934         
 935                 /*
 936                  *      How many bytes can we send ?
 937                  */
 938                  
 939                 win_size = sk->window_seq - sk->sent_seq;
 940 
 941                 /*
 942                  *      Recover the buffer pointers
 943                  */
 944                  
 945                 iph = (struct iphdr *)skb->ip_hdr;
 946                 th = (struct tcphdr *)(((char *)iph) +(iph->ihl << 2));
 947 
 948                 /*
 949                  *      Grab the data for a temporary frame
 950                  */
 951                  
 952                 buff = sock_wmalloc(sk, win_size + th->doff * 4 + 
 953                                      (iph->ihl << 2) +
 954                                      sk->prot->max_header + 15, 
 955                                      1, GFP_ATOMIC);
 956                 if ( buff == NULL )
 957                         return;
 958 
 959                 /* 
 960                  *      If we strip the packet on the write queue we must
 961                  *      be ready to retransmit this one 
 962                  */
 963             
 964                 buff->free = /*0*/1;
 965 
 966                 buff->sk = sk;
 967                 buff->localroute = sk->localroute;
 968                 
 969                 /*
 970                  *      Put headers on the new packet
 971                  */
 972 
 973                 tmp = sk->prot->build_header(buff, sk->saddr, sk->daddr, &dev,
 974                                          IPPROTO_TCP, sk->opt, buff->truesize,
 975                                          sk->ip_tos,sk->ip_ttl,&sk->ip_route_cache);
 976                 if (tmp < 0) 
 977                 {
 978                         sock_wfree(sk, buff);
 979                         return;
 980                 }
 981                 
 982                 /*
 983                  *      Move the TCP header over
 984                  */
 985 
 986                 buff->dev = dev;
 987 
 988                 nth = (struct tcphdr *) skb_put(buff,th->doff*4);
 989 
 990                 memcpy(nth, th, th->doff * 4);
 991                 
 992                 /*
 993                  *      Correct the new header
 994                  */
 995                  
 996                 nth->ack = 1; 
 997                 nth->ack_seq = htonl(sk->acked_seq);
 998                 nth->window = htons(tcp_select_window(sk));
 999                 nth->check = 0;
1000 
1001                 /*
1002                  *      Find the first data byte.
1003                  */
1004                  
1005                 tcp_data_start = (char *) th + (th->doff << 2);
1006 
1007                 /*
1008                  *      Add it to our new buffer
1009                  */
1010                  
1011                 memcpy(skb_put(buff,win_size), tcp_data_start, win_size);
1012                 
1013                 /*
1014                  *      Remember our right edge sequence number.
1015                  */
1016                  
1017                 buff->end_seq = sk->sent_seq + win_size;
1018                 sk->sent_seq = buff->end_seq;           /* Hack */
1019                 if(th->urg && ntohs(th->urg_ptr) < win_size)
1020                         nth->urg = 0;
1021 
1022                 /*
1023                  *      Checksum the split buffer
1024                  */
1025                  
1026                 tcp_send_check(nth, sk->saddr, sk->daddr, 
1027                            nth->doff * 4 + win_size , sk);
1028         }
1029         else
1030         {       
1031                 buff = sock_wmalloc(sk,MAX_ACK_SIZE,1, GFP_ATOMIC);
1032                 if (buff == NULL) 
1033                         return;
1034 
1035                 buff->free = 1;
1036                 buff->sk = sk;
1037                 buff->localroute = sk->localroute;
1038 
1039                 /*
1040                  *      Put in the IP header and routing stuff. 
1041                  */
1042                  
1043                 tmp = sk->prot->build_header(buff, sk->saddr, sk->daddr, &dev,
1044                                 IPPROTO_TCP, sk->opt, MAX_ACK_SIZE,sk->ip_tos,sk->ip_ttl,&sk->ip_route_cache);
1045                 if (tmp < 0) 
1046                 {
1047                         sock_wfree(sk, buff);
1048                         return;
1049                 }
1050 
1051                 t1 = (struct tcphdr *)skb_put(buff,sizeof(struct tcphdr));
1052                 memcpy(t1,(void *) &sk->dummy_th, sizeof(*t1));
1053 
1054                 /*
1055                  *      Use a previous sequence.
1056                  *      This should cause the other end to send an ack.
1057                  */
1058          
1059                 t1->seq = htonl(sk->sent_seq-1);
1060                 t1->ack = 1; 
1061                 t1->res1= 0;
1062                 t1->res2= 0;
1063                 t1->rst = 0;
1064                 t1->urg = 0;
1065                 t1->psh = 0;
1066                 t1->fin = 0;    /* We are sending a 'previous' sequence, and 0 bytes of data - thus no FIN bit */
1067                 t1->syn = 0;
1068                 t1->ack_seq = htonl(sk->acked_seq);
1069                 t1->window = htons(tcp_select_window(sk));
1070                 t1->doff = sizeof(*t1)/4;
1071                 tcp_send_check(t1, sk->saddr, sk->daddr, sizeof(*t1), sk);
1072 
1073         }               
1074 
1075         /*
1076          *      Send it.
1077          */
1078         
1079         sk->prot->queue_xmit(sk, dev, buff, 1);
1080         tcp_statistics.TcpOutSegs++;
1081 }
1082 
1083 /*
1084  *      A window probe timeout has occurred.
1085  */
1086 
1087 void tcp_send_probe0(struct sock *sk)
     /* [previous][next][first][last][top][bottom][index][help] */
1088 {
1089         if (sk->zapped)
1090                 return;         /* After a valid reset we can send no more */
1091 
1092         tcp_write_wakeup(sk);
1093 
1094         sk->backoff++;
1095         sk->rto = min(sk->rto << 1, 120*HZ);
1096         sk->retransmits++;
1097         sk->prot->retransmits ++;
1098         tcp_reset_xmit_timer (sk, TIME_PROBE0, sk->rto);
1099 }

/* [previous][next][first][last][top][bottom][index][help] */