This source file includes following definitions.
- tcp_send_skb
- tcp_dequeue_partial
- tcp_send_partial
- tcp_enqueue_partial
- tcp_write_xmit
- tcp_do_retransmit
- tcp_send_reset
- tcp_send_fin
- tcp_send_synack
- tcp_send_ack
- tcp_write_wakeup
- tcp_send_probe0
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23 #include <linux/config.h>
24 #include <net/tcp.h>
25
26
27
28
29
30
31 void tcp_send_skb(struct sock *sk, struct sk_buff *skb)
32 {
33 int size;
34 struct tcphdr * th = skb->h.th;
35
36
37
38
39
40 size = skb->len - ((unsigned char *) th - skb->data);
41
42
43
44
45
46 if (size < sizeof(struct tcphdr) || size > skb->len)
47 {
48 printk("tcp_send_skb: bad skb (skb = %p, data = %p, th = %p, len = %lu)\n",
49 skb, skb->data, th, skb->len);
50 kfree_skb(skb, FREE_WRITE);
51 return;
52 }
53
54
55
56
57
58
59 if (size == sizeof(struct tcphdr))
60 {
61
62 if(!th->syn && !th->fin)
63 {
64 printk("tcp_send_skb: attempt to queue a bogon.\n");
65 kfree_skb(skb,FREE_WRITE);
66 return;
67 }
68 }
69
70
71
72
73
74 tcp_statistics.TcpOutSegs++;
75 skb->seq = ntohl(th->seq);
76 skb->end_seq = skb->seq + size - 4*th->doff;
77
78
79
80
81
82
83
84
85
86 if (after(skb->end_seq, sk->window_seq) ||
87 (sk->retransmits && sk->ip_xmit_timeout == TIME_WRITE) ||
88 sk->packets_out >= sk->cong_window)
89 {
90
91
92 th->check = 0;
93 if (skb->next != NULL)
94 {
95 printk("tcp_send_partial: next != NULL\n");
96 skb_unlink(skb);
97 }
98 skb_queue_tail(&sk->write_queue, skb);
99
100 if (before(sk->window_seq, sk->write_queue.next->end_seq) &&
101 sk->send_head == NULL && sk->ack_backlog == 0)
102 tcp_reset_xmit_timer(sk, TIME_PROBE0, sk->rto);
103 }
104 else
105 {
106
107
108
109
110 th->ack_seq = htonl(sk->acked_seq);
111 th->window = htons(tcp_select_window(sk));
112
113 tcp_send_check(th, sk->saddr, sk->daddr, size, skb);
114
115 sk->sent_seq = sk->write_seq;
116
117
118
119
120
121
122
123 sk->prot->queue_xmit(sk, skb->dev, skb, 0);
124
125
126 sk->ack_backlog = 0;
127 sk->bytes_rcv = 0;
128
129
130
131
132
133
134
135 tcp_reset_xmit_timer(sk, TIME_WRITE, sk->rto);
136 }
137 }
138
139
140
141
142
143
144
145
146
147
148 struct sk_buff * tcp_dequeue_partial(struct sock * sk)
149 {
150 struct sk_buff * skb;
151 unsigned long flags;
152
153 save_flags(flags);
154 cli();
155 skb = sk->partial;
156 if (skb) {
157 sk->partial = NULL;
158 del_timer(&sk->partial_timer);
159 }
160 restore_flags(flags);
161 return skb;
162 }
163
164
165
166
167
168 void tcp_send_partial(struct sock *sk)
169 {
170 struct sk_buff *skb;
171
172 if (sk == NULL)
173 return;
174 while ((skb = tcp_dequeue_partial(sk)) != NULL)
175 tcp_send_skb(sk, skb);
176 }
177
178
179
180
181
182 void tcp_enqueue_partial(struct sk_buff * skb, struct sock * sk)
183 {
184 struct sk_buff * tmp;
185 unsigned long flags;
186
187 save_flags(flags);
188 cli();
189 tmp = sk->partial;
190 if (tmp)
191 del_timer(&sk->partial_timer);
192 sk->partial = skb;
193 init_timer(&sk->partial_timer);
194
195
196
197 sk->partial_timer.expires = jiffies+HZ;
198 sk->partial_timer.function = (void (*)(unsigned long)) tcp_send_partial;
199 sk->partial_timer.data = (unsigned long) sk;
200 add_timer(&sk->partial_timer);
201 restore_flags(flags);
202 if (tmp)
203 tcp_send_skb(sk, tmp);
204 }
205
206
207
208
209
210
211
212 void tcp_write_xmit(struct sock *sk)
213 {
214 struct sk_buff *skb;
215
216
217
218
219
220
221 if(sk->zapped)
222 return;
223
224
225
226
227
228
229
230
231
232 while((skb = skb_peek(&sk->write_queue)) != NULL &&
233 before(skb->end_seq, sk->window_seq + 1) &&
234 (sk->retransmits == 0 ||
235 sk->ip_xmit_timeout != TIME_WRITE ||
236 before(skb->end_seq, sk->rcv_ack_seq + 1))
237 && sk->packets_out < sk->cong_window)
238 {
239 IS_SKB(skb);
240 skb_unlink(skb);
241
242
243
244
245
246 if (before(skb->end_seq, sk->rcv_ack_seq +1))
247 {
248
249
250
251
252
253 sk->retransmits = 0;
254 kfree_skb(skb, FREE_WRITE);
255 if (!sk->dead)
256 sk->write_space(sk);
257 }
258 else
259 {
260 struct tcphdr *th;
261 struct iphdr *iph;
262 int size;
263
264
265
266
267
268
269
270 iph = skb->ip_hdr;
271 th = (struct tcphdr *)(((char *)iph) +(iph->ihl << 2));
272 size = skb->len - (((unsigned char *) th) - skb->data);
273 #ifndef CONFIG_NO_PATH_MTU_DISCOVERY
274 if (size > sk->mtu - sizeof(struct iphdr))
275 {
276 iph->frag_off &= ~htons(IP_DF);
277 ip_send_check(iph);
278 }
279 #endif
280
281 th->ack_seq = htonl(sk->acked_seq);
282 th->window = htons(tcp_select_window(sk));
283
284 tcp_send_check(th, sk->saddr, sk->daddr, size, skb);
285
286 sk->sent_seq = skb->end_seq;
287
288
289
290
291
292 sk->prot->queue_xmit(sk, skb->dev, skb, skb->free);
293
294
295 sk->ack_backlog = 0;
296 sk->bytes_rcv = 0;
297
298
299
300
301
302 tcp_reset_xmit_timer(sk, TIME_WRITE, sk->rto);
303 }
304 }
305 }
306
307
308
309
310
311
312
313 void tcp_do_retransmit(struct sock *sk, int all)
314 {
315 struct sk_buff * skb;
316 struct proto *prot;
317 struct device *dev;
318 int ct=0;
319 struct rtable *rt;
320
321 prot = sk->prot;
322 skb = sk->send_head;
323
324 while (skb != NULL)
325 {
326 struct tcphdr *th;
327 struct iphdr *iph;
328 int size;
329
330 dev = skb->dev;
331 IS_SKB(skb);
332 skb->when = jiffies;
333
334
335
336
337
338
339
340
341
342
343
344 if (skb_device_locked(skb))
345 break;
346
347
348
349
350
351 skb_pull(skb,((unsigned char *)skb->ip_hdr)-skb->data);
352
353
354
355
356
357
358
359
360
361
362 iph = (struct iphdr *)skb->data;
363 th = (struct tcphdr *)(((char *)iph) + (iph->ihl << 2));
364 size = ntohs(iph->tot_len) - (iph->ihl<<2);
365
366
367
368
369
370
371
372
373
374
375 {
376
377
378 struct options * opt = (struct options*)skb->proto_priv;
379 rt = ip_check_route(&sk->ip_route_cache, opt->srr?opt->faddr:iph->daddr, skb->localroute);
380 }
381
382 iph->id = htons(ip_id_count++);
383 #ifndef CONFIG_NO_PATH_MTU_DISCOVERY
384 if (rt && ntohs(iph->tot_len) > rt->rt_mtu)
385 iph->frag_off &= ~htons(IP_DF);
386 #endif
387 ip_send_check(iph);
388
389 if (rt==NULL)
390 {
391 if(skb->sk)
392 {
393 skb->sk->err_soft=ENETUNREACH;
394 skb->sk->error_report(skb->sk);
395 }
396 }
397 else
398 {
399 dev=rt->rt_dev;
400 skb->raddr=rt->rt_gateway;
401 skb->dev=dev;
402 skb->arp=1;
403 if (rt->rt_hh)
404 {
405 memcpy(skb_push(skb,dev->hard_header_len),rt->rt_hh->hh_data,dev->hard_header_len);
406 if (!rt->rt_hh->hh_uptodate)
407 {
408 skb->arp = 0;
409 #if RT_CACHE_DEBUG >= 2
410 printk("tcp_do_retransmit: hh miss %08x via %08x\n", iph->daddr, rt->rt_gateway);
411 #endif
412 }
413 }
414 else if (dev->hard_header)
415 {
416 if(dev->hard_header(skb, dev, ETH_P_IP, NULL, NULL, skb->len)<0)
417 skb->arp=0;
418 }
419
420
421
422
423
424
425
426
427
428
429
430
431 th->ack_seq = htonl(sk->acked_seq);
432 sk->ack_backlog = 0;
433 sk->bytes_rcv = 0;
434 th->window = ntohs(tcp_select_window(sk));
435 tcp_send_check(th, sk->saddr, sk->daddr, size, skb);
436
437
438
439
440
441 if (dev->flags & IFF_UP)
442 {
443
444
445
446
447
448
449
450
451 if (sk && !skb_device_locked(skb))
452 {
453
454 skb_unlink(skb);
455
456 ip_statistics.IpOutRequests++;
457 dev_queue_xmit(skb, dev, sk->priority);
458 }
459 }
460 }
461
462
463
464
465
466 ct++;
467 sk->prot->retransmits ++;
468 tcp_statistics.TcpRetransSegs++;
469
470
471
472
473
474
475 if (!all)
476 break;
477
478
479
480
481
482 if (ct >= sk->cong_window)
483 break;
484 skb = skb->link3;
485 }
486 }
487
488
489
490
491
492 void tcp_send_reset(unsigned long saddr, unsigned long daddr, struct tcphdr *th,
493 struct proto *prot, struct options *opt, struct device *dev, int tos, int ttl)
494 {
495 struct sk_buff *buff;
496 struct tcphdr *t1;
497 int tmp;
498 struct device *ndev=NULL;
499
500
501
502
503
504 if(th->rst)
505 return;
506
507
508
509
510
511
512 buff = sock_wmalloc(NULL, MAX_RESET_SIZE, 1, GFP_ATOMIC);
513 if (buff == NULL)
514 return;
515
516 buff->sk = NULL;
517 buff->dev = dev;
518 buff->localroute = 0;
519 buff->csum = 0;
520
521
522
523
524
525 tmp = prot->build_header(buff, saddr, daddr, &ndev, IPPROTO_TCP, opt,
526 sizeof(struct tcphdr),tos,ttl,NULL);
527 if (tmp < 0)
528 {
529 buff->free = 1;
530 sock_wfree(NULL, buff);
531 return;
532 }
533
534 t1 =(struct tcphdr *)skb_put(buff,sizeof(struct tcphdr));
535 memset(t1, 0, sizeof(*t1));
536
537
538
539
540
541 t1->dest = th->source;
542 t1->source = th->dest;
543 t1->doff = sizeof(*t1)/4;
544 t1->rst = 1;
545
546 if(th->ack)
547 {
548 t1->seq = th->ack_seq;
549 }
550 else
551 {
552 t1->ack = 1;
553 if(!th->syn)
554 t1->ack_seq = th->seq;
555 else
556 t1->ack_seq = htonl(ntohl(th->seq)+1);
557 }
558
559 tcp_send_check(t1, saddr, daddr, sizeof(*t1), buff);
560 prot->queue_xmit(NULL, ndev, buff, 1);
561 tcp_statistics.TcpOutSegs++;
562 }
563
564
565
566
567
568 void tcp_send_fin(struct sock *sk)
569 {
570 struct proto *prot =(struct proto *)sk->prot;
571 struct tcphdr *th =(struct tcphdr *)&sk->dummy_th;
572 struct tcphdr *t1;
573 struct sk_buff *buff;
574 struct device *dev=NULL;
575 int tmp;
576
577 buff = sock_wmalloc(sk, MAX_RESET_SIZE,1 , GFP_KERNEL);
578
579 if (buff == NULL)
580 {
581
582 printk("tcp_send_fin: Impossible malloc failure");
583 return;
584 }
585
586
587
588
589
590 buff->sk = sk;
591 buff->localroute = sk->localroute;
592 buff->csum = 0;
593
594
595
596
597
598 tmp = prot->build_header(buff,sk->saddr, sk->daddr, &dev,
599 IPPROTO_TCP, sk->opt,
600 sizeof(struct tcphdr),sk->ip_tos,sk->ip_ttl,&sk->ip_route_cache);
601 if (tmp < 0)
602 {
603 int t;
604
605
606
607
608
609 buff->free = 1;
610 sock_wfree(sk,buff);
611 sk->write_seq++;
612 t=del_timer(&sk->timer);
613 if(t)
614 add_timer(&sk->timer);
615 else
616 tcp_reset_msl_timer(sk, TIME_CLOSE, TCP_TIMEWAIT_LEN);
617 return;
618 }
619
620
621
622
623
624
625 t1 =(struct tcphdr *)skb_put(buff,sizeof(struct tcphdr));
626 buff->dev = dev;
627 memcpy(t1, th, sizeof(*t1));
628 buff->seq = sk->write_seq;
629 sk->write_seq++;
630 buff->end_seq = sk->write_seq;
631 t1->seq = htonl(buff->seq);
632 t1->ack_seq = htonl(sk->acked_seq);
633 t1->window = htons(sk->window=tcp_select_window(sk));
634 t1->fin = 1;
635 tcp_send_check(t1, sk->saddr, sk->daddr, sizeof(*t1), buff);
636
637
638
639
640
641
642 if (skb_peek(&sk->write_queue) != NULL)
643 {
644 buff->free = 0;
645 if (buff->next != NULL)
646 {
647 printk("tcp_send_fin: next != NULL\n");
648 skb_unlink(buff);
649 }
650 skb_queue_tail(&sk->write_queue, buff);
651 }
652 else
653 {
654 sk->sent_seq = sk->write_seq;
655 sk->prot->queue_xmit(sk, dev, buff, 0);
656 tcp_reset_xmit_timer(sk, TIME_WRITE, sk->rto);
657 }
658 }
659
660
661 void tcp_send_synack(struct sock * newsk, struct sock * sk, struct sk_buff * skb)
662 {
663 struct tcphdr *t1;
664 unsigned char *ptr;
665 struct sk_buff * buff;
666 struct device *ndev=NULL;
667 int tmp;
668
669 buff = sock_wmalloc(newsk, MAX_SYN_SIZE, 1, GFP_ATOMIC);
670 if (buff == NULL)
671 {
672 sk->err = ENOMEM;
673 newsk->dead = 1;
674 newsk->state = TCP_CLOSE;
675
676 kfree_skb(skb, FREE_READ);
677 tcp_statistics.TcpAttemptFails++;
678 return;
679 }
680
681 buff->sk = newsk;
682 buff->localroute = newsk->localroute;
683
684
685
686
687
688 tmp = sk->prot->build_header(buff, newsk->saddr, newsk->daddr, &ndev,
689 IPPROTO_TCP, NULL, MAX_SYN_SIZE,sk->ip_tos,sk->ip_ttl,&newsk->ip_route_cache);
690
691
692
693
694
695 if (tmp < 0)
696 {
697 sk->err = tmp;
698 buff->free = 1;
699 kfree_skb(buff,FREE_WRITE);
700 newsk->dead = 1;
701 newsk->state = TCP_CLOSE;
702 skb->sk = sk;
703 kfree_skb(skb, FREE_READ);
704 tcp_statistics.TcpAttemptFails++;
705 return;
706 }
707
708 t1 =(struct tcphdr *)skb_put(buff,sizeof(struct tcphdr));
709
710 memcpy(t1, skb->h.th, sizeof(*t1));
711 buff->seq = newsk->write_seq++;
712 buff->end_seq = newsk->write_seq;
713
714
715
716 t1->dest = skb->h.th->source;
717 t1->source = newsk->dummy_th.source;
718 t1->seq = ntohl(buff->seq);
719 newsk->sent_seq = newsk->write_seq;
720 t1->window = ntohs(tcp_select_window(newsk));
721 t1->syn = 1;
722 t1->ack = 1;
723 t1->urg = 0;
724 t1->rst = 0;
725 t1->psh = 0;
726 t1->ack_seq = htonl(newsk->acked_seq);
727 t1->doff = sizeof(*t1)/4+1;
728 ptr = skb_put(buff,4);
729 ptr[0] = 2;
730 ptr[1] = 4;
731 ptr[2] = ((newsk->mtu) >> 8) & 0xff;
732 ptr[3] =(newsk->mtu) & 0xff;
733 buff->csum = csum_partial(ptr, 4, 0);
734 tcp_send_check(t1, newsk->saddr, newsk->daddr, sizeof(*t1)+4, buff);
735 newsk->prot->queue_xmit(newsk, ndev, buff, 0);
736 tcp_reset_xmit_timer(newsk, TIME_WRITE , TCP_TIMEOUT_INIT);
737 skb->sk = newsk;
738
739
740
741
742
743 sk->rmem_alloc -= skb->truesize;
744 newsk->rmem_alloc += skb->truesize;
745
746 skb_queue_tail(&sk->receive_queue,skb);
747 sk->ack_backlog++;
748 tcp_statistics.TcpOutSegs++;
749 }
750
751
752
753
754
755 void tcp_send_ack(u32 sequence, u32 ack,
756 struct sock *sk,
757 struct tcphdr *th, u32 daddr)
758 {
759 struct sk_buff *buff;
760 struct tcphdr *t1;
761 struct device *dev = NULL;
762 int tmp;
763
764 if(sk->zapped)
765 return;
766
767
768
769
770
771
772 buff = sock_wmalloc(sk, MAX_ACK_SIZE, 1, GFP_ATOMIC);
773 if (buff == NULL)
774 {
775
776
777
778
779
780
781
782 sk->ack_backlog++;
783 if (sk->ip_xmit_timeout != TIME_WRITE && tcp_connected(sk->state))
784 {
785 tcp_reset_xmit_timer(sk, TIME_WRITE, HZ);
786 }
787 return;
788 }
789
790
791
792
793
794 buff->sk = sk;
795 buff->localroute = sk->localroute;
796 buff->csum = 0;
797
798
799
800
801
802 tmp = sk->prot->build_header(buff, sk->saddr, daddr, &dev,
803 IPPROTO_TCP, sk->opt, MAX_ACK_SIZE,sk->ip_tos,sk->ip_ttl,&sk->ip_route_cache);
804 if (tmp < 0)
805 {
806 buff->free = 1;
807 sock_wfree(sk, buff);
808 return;
809 }
810 t1 =(struct tcphdr *)skb_put(buff,sizeof(struct tcphdr));
811
812 memcpy(t1, &sk->dummy_th, sizeof(*t1));
813
814
815
816
817
818 t1->dest = th->source;
819 t1->source = th->dest;
820 t1->seq = ntohl(sequence);
821 sk->window = tcp_select_window(sk);
822 t1->window = ntohs(sk->window);
823
824
825
826
827
828
829
830 if (ack == sk->acked_seq) {
831 sk->ack_backlog = 0;
832 sk->bytes_rcv = 0;
833 sk->ack_timed = 0;
834
835 if (sk->send_head == NULL && skb_peek(&sk->write_queue) == NULL
836 && sk->ip_xmit_timeout == TIME_WRITE)
837 if(sk->keepopen)
838 tcp_reset_xmit_timer(sk,TIME_KEEPOPEN,TCP_TIMEOUT_LEN);
839 else
840 delete_timer(sk);
841 }
842
843
844
845
846
847 t1->ack_seq = htonl(ack);
848 tcp_send_check(t1, sk->saddr, daddr, sizeof(*t1), buff);
849 if (sk->debug)
850 printk("\rtcp_ack: seq %x ack %x\n", sequence, ack);
851 sk->prot->queue_xmit(sk, dev, buff, 1);
852 tcp_statistics.TcpOutSegs++;
853 }
854
855
856
857
858
859
860 void tcp_write_wakeup(struct sock *sk)
861 {
862 struct sk_buff *buff,*skb;
863 struct tcphdr *t1;
864 struct device *dev=NULL;
865 int tmp;
866
867 if (sk->zapped)
868 return;
869
870
871
872
873
874
875
876 if (sk->state != TCP_ESTABLISHED &&
877 sk->state != TCP_CLOSE_WAIT &&
878 sk->state != TCP_FIN_WAIT1 &&
879 sk->state != TCP_LAST_ACK &&
880 sk->state != TCP_CLOSING
881 )
882 {
883 return;
884 }
885 if ( before(sk->sent_seq, sk->window_seq) &&
886 (skb=skb_peek(&sk->write_queue)))
887 {
888
889
890
891
892
893
894 struct iphdr *iph;
895 struct tcphdr *th;
896 struct tcphdr *nth;
897 unsigned long win_size;
898 #if 0
899 unsigned long ow_size;
900 #endif
901
902
903
904
905
906 win_size = sk->window_seq - sk->sent_seq;
907
908
909
910
911
912 iph = (struct iphdr *)skb->ip_hdr;
913 th = (struct tcphdr *)(((char *)iph) +(iph->ihl << 2));
914
915
916
917
918
919 buff = sock_wmalloc(sk, win_size + th->doff * 4 +
920 (iph->ihl << 2) +
921 sk->prot->max_header + 15,
922 1, GFP_ATOMIC);
923 if ( buff == NULL )
924 return;
925
926
927
928
929
930
931 buff->free = 1;
932
933 buff->sk = sk;
934 buff->localroute = sk->localroute;
935
936
937
938
939
940 tmp = sk->prot->build_header(buff, sk->saddr, sk->daddr, &dev,
941 IPPROTO_TCP, sk->opt, buff->truesize,
942 sk->ip_tos,sk->ip_ttl,&sk->ip_route_cache);
943 if (tmp < 0)
944 {
945 sock_wfree(sk, buff);
946 return;
947 }
948
949
950
951
952
953 buff->dev = dev;
954
955 nth = (struct tcphdr *) skb_put(buff,sizeof(*th));
956
957 memcpy(nth, th, sizeof(*th));
958
959
960
961
962
963 nth->ack = 1;
964 nth->ack_seq = htonl(sk->acked_seq);
965 nth->window = htons(tcp_select_window(sk));
966 nth->check = 0;
967
968
969
970
971
972 buff->csum = csum_partial_copy((void *)(th + 1), skb_put(buff,win_size),
973 win_size + th->doff*4 - sizeof(*th), 0);
974
975
976
977
978
979 buff->end_seq = sk->sent_seq + win_size;
980 sk->sent_seq = buff->end_seq;
981 if(th->urg && ntohs(th->urg_ptr) < win_size)
982 nth->urg = 0;
983
984
985
986
987
988 tcp_send_check(nth, sk->saddr, sk->daddr,
989 nth->doff * 4 + win_size , buff);
990 }
991 else
992 {
993 buff = sock_wmalloc(sk,MAX_ACK_SIZE,1, GFP_ATOMIC);
994 if (buff == NULL)
995 return;
996
997 buff->free = 1;
998 buff->sk = sk;
999 buff->localroute = sk->localroute;
1000 buff->csum = 0;
1001
1002
1003
1004
1005
1006 tmp = sk->prot->build_header(buff, sk->saddr, sk->daddr, &dev,
1007 IPPROTO_TCP, sk->opt, MAX_ACK_SIZE,sk->ip_tos,sk->ip_ttl,&sk->ip_route_cache);
1008 if (tmp < 0)
1009 {
1010 sock_wfree(sk, buff);
1011 return;
1012 }
1013
1014 t1 = (struct tcphdr *)skb_put(buff,sizeof(struct tcphdr));
1015 memcpy(t1,(void *) &sk->dummy_th, sizeof(*t1));
1016
1017
1018
1019
1020
1021
1022 t1->seq = htonl(sk->sent_seq-1);
1023
1024 t1->ack_seq = htonl(sk->acked_seq);
1025 t1->window = htons(tcp_select_window(sk));
1026 tcp_send_check(t1, sk->saddr, sk->daddr, sizeof(*t1), buff);
1027
1028 }
1029
1030
1031
1032
1033
1034 sk->prot->queue_xmit(sk, dev, buff, 1);
1035 tcp_statistics.TcpOutSegs++;
1036 }
1037
1038
1039
1040
1041
1042 void tcp_send_probe0(struct sock *sk)
1043 {
1044 if (sk->zapped)
1045 return;
1046
1047 tcp_write_wakeup(sk);
1048
1049 sk->backoff++;
1050 sk->rto = min(sk->rto << 1, 120*HZ);
1051 sk->retransmits++;
1052 sk->prot->retransmits ++;
1053 tcp_reset_xmit_timer (sk, TIME_PROBE0, sk->rto);
1054 }