This source file includes following definitions.
- clear_delayed_acks
- tcp_send_skb
- tcp_dequeue_partial
- tcp_send_partial
- tcp_enqueue_partial
- tcp_write_xmit
- tcp_do_retransmit
- tcp_send_reset
- tcp_send_fin
- tcp_send_synack
- tcp_send_delayed_ack
- tcp_send_ack
- tcp_write_wakeup
- tcp_send_probe0
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23 #include <linux/config.h>
24 #include <net/tcp.h>
25
26 #include <linux/interrupt.h>
27
28
29
30
31 static __inline__ void clear_delayed_acks(struct sock * sk)
32 {
33 sk->ack_timed = 0;
34 sk->ack_backlog = 0;
35 sk->bytes_rcv = 0;
36 del_timer(&sk->delack_timer);
37 }
38
39
40
41
42
43
44 void tcp_send_skb(struct sock *sk, struct sk_buff *skb)
45 {
46 int size;
47 struct tcphdr * th = skb->h.th;
48
49
50
51
52
53 size = skb->len - ((unsigned char *) th - skb->data);
54
55
56
57
58
59 if (size < sizeof(struct tcphdr) || size > skb->len)
60 {
61 printk("tcp_send_skb: bad skb (skb = %p, data = %p, th = %p, len = %lu)\n",
62 skb, skb->data, th, skb->len);
63 kfree_skb(skb, FREE_WRITE);
64 return;
65 }
66
67
68
69
70
71
72 if (size == sizeof(struct tcphdr))
73 {
74
75 if(!th->syn && !th->fin)
76 {
77 printk("tcp_send_skb: attempt to queue a bogon.\n");
78 kfree_skb(skb,FREE_WRITE);
79 return;
80 }
81 }
82
83
84
85
86
87 tcp_statistics.TcpOutSegs++;
88 skb->seq = ntohl(th->seq);
89 skb->end_seq = skb->seq + size - 4*th->doff;
90
91
92
93
94
95
96
97
98
99 if (after(skb->end_seq, sk->window_seq) ||
100 (sk->retransmits && sk->ip_xmit_timeout == TIME_WRITE) ||
101 sk->packets_out >= sk->cong_window)
102 {
103
104
105 th->check = 0;
106 if (skb->next != NULL)
107 {
108 printk("tcp_send_partial: next != NULL\n");
109 skb_unlink(skb);
110 }
111 skb_queue_tail(&sk->write_queue, skb);
112
113 if (before(sk->window_seq, sk->write_queue.next->end_seq) &&
114 sk->send_head == NULL && sk->ack_backlog == 0)
115 tcp_reset_xmit_timer(sk, TIME_PROBE0, sk->rto);
116 }
117 else
118 {
119
120
121
122 clear_delayed_acks(sk);
123 th->ack_seq = htonl(sk->acked_seq);
124 th->window = htons(tcp_select_window(sk));
125
126 tcp_send_check(th, sk->saddr, sk->daddr, size, skb);
127
128 sk->sent_seq = sk->write_seq;
129
130
131
132
133
134
135
136 sk->prot->queue_xmit(sk, skb->dev, skb, 0);
137
138
139
140
141
142
143
144 tcp_reset_xmit_timer(sk, TIME_WRITE, sk->rto);
145 }
146 }
147
148
149
150
151
152
153
154
155
156
157 struct sk_buff * tcp_dequeue_partial(struct sock * sk)
158 {
159 struct sk_buff * skb;
160 unsigned long flags;
161
162 save_flags(flags);
163 cli();
164 skb = sk->partial;
165 if (skb) {
166 sk->partial = NULL;
167 del_timer(&sk->partial_timer);
168 }
169 restore_flags(flags);
170 return skb;
171 }
172
173
174
175
176
177 void tcp_send_partial(struct sock *sk)
178 {
179 struct sk_buff *skb;
180
181 if (sk == NULL)
182 return;
183 while ((skb = tcp_dequeue_partial(sk)) != NULL)
184 tcp_send_skb(sk, skb);
185 }
186
187
188
189
190
191 void tcp_enqueue_partial(struct sk_buff * skb, struct sock * sk)
192 {
193 struct sk_buff * tmp;
194 unsigned long flags;
195
196 save_flags(flags);
197 cli();
198 tmp = sk->partial;
199 if (tmp)
200 del_timer(&sk->partial_timer);
201 sk->partial = skb;
202 init_timer(&sk->partial_timer);
203
204
205
206 sk->partial_timer.expires = jiffies+HZ;
207 sk->partial_timer.function = (void (*)(unsigned long)) tcp_send_partial;
208 sk->partial_timer.data = (unsigned long) sk;
209 add_timer(&sk->partial_timer);
210 restore_flags(flags);
211 if (tmp)
212 tcp_send_skb(sk, tmp);
213 }
214
215
216
217
218
219
220
221 void tcp_write_xmit(struct sock *sk)
222 {
223 struct sk_buff *skb;
224
225
226
227
228
229
230 if(sk->zapped)
231 return;
232
233
234
235
236
237
238
239
240
241 while((skb = skb_peek(&sk->write_queue)) != NULL &&
242 !after(skb->end_seq, sk->window_seq) &&
243 (sk->retransmits == 0 ||
244 sk->ip_xmit_timeout != TIME_WRITE ||
245 !after(skb->end_seq, sk->rcv_ack_seq))
246 && sk->packets_out < sk->cong_window)
247 {
248 IS_SKB(skb);
249 skb_unlink(skb);
250
251
252
253
254
255 if (before(skb->end_seq, sk->rcv_ack_seq +1))
256 {
257
258
259
260
261
262 sk->retransmits = 0;
263 kfree_skb(skb, FREE_WRITE);
264 if (!sk->dead)
265 sk->write_space(sk);
266 }
267 else
268 {
269 struct tcphdr *th;
270 struct iphdr *iph;
271 int size;
272
273
274
275
276
277
278
279 iph = skb->ip_hdr;
280 th = (struct tcphdr *)(((char *)iph) +(iph->ihl << 2));
281 size = skb->len - (((unsigned char *) th) - skb->data);
282 #ifndef CONFIG_NO_PATH_MTU_DISCOVERY
283 if (size > sk->mtu - sizeof(struct iphdr))
284 {
285 iph->frag_off &= ~htons(IP_DF);
286 ip_send_check(iph);
287 }
288 #endif
289
290 th->ack_seq = htonl(sk->acked_seq);
291 th->window = htons(tcp_select_window(sk));
292
293 tcp_send_check(th, sk->saddr, sk->daddr, size, skb);
294
295 sk->sent_seq = skb->end_seq;
296
297
298
299
300
301 sk->prot->queue_xmit(sk, skb->dev, skb, skb->free);
302
303 clear_delayed_acks(sk);
304
305
306
307
308
309 tcp_reset_xmit_timer(sk, TIME_WRITE, sk->rto);
310 }
311 }
312 }
313
314
315
316
317
318
319
320 void tcp_do_retransmit(struct sock *sk, int all)
321 {
322 struct sk_buff * skb;
323 struct proto *prot;
324 struct device *dev;
325 int ct=0;
326 struct rtable *rt;
327
328 prot = sk->prot;
329 skb = sk->send_head;
330
331 while (skb != NULL)
332 {
333 struct tcphdr *th;
334 struct iphdr *iph;
335 int size;
336
337 dev = skb->dev;
338 IS_SKB(skb);
339 skb->when = jiffies;
340
341
342
343
344
345
346
347
348
349
350
351 if (skb_device_locked(skb))
352 break;
353
354
355
356
357
358 skb_pull(skb,((unsigned char *)skb->ip_hdr)-skb->data);
359
360
361
362
363
364
365
366
367
368
369 iph = (struct iphdr *)skb->data;
370 th = (struct tcphdr *)(((char *)iph) + (iph->ihl << 2));
371 size = ntohs(iph->tot_len) - (iph->ihl<<2);
372
373
374
375
376
377
378
379
380
381
382 {
383
384
385 struct options * opt = (struct options*)skb->proto_priv;
386 rt = ip_check_route(&sk->ip_route_cache, opt->srr?opt->faddr:iph->daddr, skb->localroute);
387 }
388
389 iph->id = htons(ip_id_count++);
390 #ifndef CONFIG_NO_PATH_MTU_DISCOVERY
391 if (rt && ntohs(iph->tot_len) > rt->rt_mtu)
392 iph->frag_off &= ~htons(IP_DF);
393 #endif
394 ip_send_check(iph);
395
396 if (rt==NULL)
397 {
398 if(skb->sk)
399 {
400 skb->sk->err_soft=ENETUNREACH;
401 skb->sk->error_report(skb->sk);
402 }
403 }
404 else
405 {
406 dev=rt->rt_dev;
407 skb->raddr=rt->rt_gateway;
408 skb->dev=dev;
409 skb->arp=1;
410 if (rt->rt_hh)
411 {
412 memcpy(skb_push(skb,dev->hard_header_len),rt->rt_hh->hh_data,dev->hard_header_len);
413 if (!rt->rt_hh->hh_uptodate)
414 {
415 skb->arp = 0;
416 #if RT_CACHE_DEBUG >= 2
417 printk("tcp_do_retransmit: hh miss %08x via %08x\n", iph->daddr, rt->rt_gateway);
418 #endif
419 }
420 }
421 else if (dev->hard_header)
422 {
423 if(dev->hard_header(skb, dev, ETH_P_IP, NULL, NULL, skb->len)<0)
424 skb->arp=0;
425 }
426
427
428
429
430
431
432
433
434
435
436
437
438 th->ack_seq = htonl(sk->acked_seq);
439 clear_delayed_acks(sk);
440 th->window = ntohs(tcp_select_window(sk));
441 tcp_send_check(th, sk->saddr, sk->daddr, size, skb);
442
443
444
445
446
447 if (dev->flags & IFF_UP)
448 {
449
450
451
452
453
454
455
456
457 if (sk && !skb_device_locked(skb))
458 {
459
460 skb_unlink(skb);
461
462 ip_statistics.IpOutRequests++;
463 dev_queue_xmit(skb, dev, sk->priority);
464 }
465 }
466 }
467
468
469
470
471
472 ct++;
473 sk->retransmits++;
474 sk->prot->retransmits++;
475 tcp_statistics.TcpRetransSegs++;
476
477
478
479
480
481
482 if (!all)
483 break;
484
485
486
487
488
489 if (ct >= sk->cong_window)
490 break;
491 skb = skb->link3;
492 }
493 }
494
495
496
497
498
499 void tcp_send_reset(unsigned long saddr, unsigned long daddr, struct tcphdr *th,
500 struct proto *prot, struct options *opt, struct device *dev, int tos, int ttl)
501 {
502 struct sk_buff *buff;
503 struct tcphdr *t1;
504 int tmp;
505 struct device *ndev=NULL;
506
507
508
509
510
511 if(th->rst)
512 return;
513
514
515
516
517
518
519 buff = sock_wmalloc(NULL, MAX_RESET_SIZE, 1, GFP_ATOMIC);
520 if (buff == NULL)
521 return;
522
523 buff->sk = NULL;
524 buff->dev = dev;
525 buff->localroute = 0;
526 buff->csum = 0;
527
528
529
530
531
532 tmp = prot->build_header(buff, saddr, daddr, &ndev, IPPROTO_TCP, opt,
533 sizeof(struct tcphdr),tos,ttl,NULL);
534 if (tmp < 0)
535 {
536 buff->free = 1;
537 sock_wfree(NULL, buff);
538 return;
539 }
540
541 t1 =(struct tcphdr *)skb_put(buff,sizeof(struct tcphdr));
542 memset(t1, 0, sizeof(*t1));
543
544
545
546
547
548 t1->dest = th->source;
549 t1->source = th->dest;
550 t1->doff = sizeof(*t1)/4;
551 t1->rst = 1;
552
553 if(th->ack)
554 {
555 t1->seq = th->ack_seq;
556 }
557 else
558 {
559 t1->ack = 1;
560 if(!th->syn)
561 t1->ack_seq = th->seq;
562 else
563 t1->ack_seq = htonl(ntohl(th->seq)+1);
564 }
565
566 tcp_send_check(t1, saddr, daddr, sizeof(*t1), buff);
567 prot->queue_xmit(NULL, ndev, buff, 1);
568 tcp_statistics.TcpOutSegs++;
569 }
570
571
572
573
574
575 void tcp_send_fin(struct sock *sk)
576 {
577 struct proto *prot =(struct proto *)sk->prot;
578 struct tcphdr *th =(struct tcphdr *)&sk->dummy_th;
579 struct tcphdr *t1;
580 struct sk_buff *buff;
581 struct device *dev=NULL;
582 int tmp;
583
584 buff = sock_wmalloc(sk, MAX_RESET_SIZE,1 , GFP_KERNEL);
585
586 if (buff == NULL)
587 {
588
589 printk("tcp_send_fin: Impossible malloc failure");
590 return;
591 }
592
593
594
595
596
597 buff->sk = sk;
598 buff->localroute = sk->localroute;
599 buff->csum = 0;
600
601
602
603
604
605 tmp = prot->build_header(buff,sk->saddr, sk->daddr, &dev,
606 IPPROTO_TCP, sk->opt,
607 sizeof(struct tcphdr),sk->ip_tos,sk->ip_ttl,&sk->ip_route_cache);
608 if (tmp < 0)
609 {
610 int t;
611
612
613
614
615
616 buff->free = 1;
617 sock_wfree(sk,buff);
618 sk->write_seq++;
619 t=del_timer(&sk->timer);
620 if(t)
621 add_timer(&sk->timer);
622 else
623 tcp_reset_msl_timer(sk, TIME_CLOSE, TCP_TIMEWAIT_LEN);
624 return;
625 }
626
627
628
629
630
631
632 t1 =(struct tcphdr *)skb_put(buff,sizeof(struct tcphdr));
633 buff->dev = dev;
634 memcpy(t1, th, sizeof(*t1));
635 buff->seq = sk->write_seq;
636 sk->write_seq++;
637 buff->end_seq = sk->write_seq;
638 t1->seq = htonl(buff->seq);
639 t1->ack_seq = htonl(sk->acked_seq);
640 t1->window = htons(tcp_select_window(sk));
641 t1->fin = 1;
642 tcp_send_check(t1, sk->saddr, sk->daddr, sizeof(*t1), buff);
643
644
645
646
647
648
649 if (skb_peek(&sk->write_queue) != NULL)
650 {
651 buff->free = 0;
652 if (buff->next != NULL)
653 {
654 printk("tcp_send_fin: next != NULL\n");
655 skb_unlink(buff);
656 }
657 skb_queue_tail(&sk->write_queue, buff);
658 }
659 else
660 {
661 sk->sent_seq = sk->write_seq;
662 sk->prot->queue_xmit(sk, dev, buff, 0);
663 tcp_reset_xmit_timer(sk, TIME_WRITE, sk->rto);
664 }
665 }
666
667
668 void tcp_send_synack(struct sock * newsk, struct sock * sk, struct sk_buff * skb)
669 {
670 struct tcphdr *t1;
671 unsigned char *ptr;
672 struct sk_buff * buff;
673 struct device *ndev=NULL;
674 int tmp;
675
676 buff = sock_wmalloc(newsk, MAX_SYN_SIZE, 1, GFP_ATOMIC);
677 if (buff == NULL)
678 {
679 sk->err = ENOMEM;
680 destroy_sock(newsk);
681 kfree_skb(skb, FREE_READ);
682 tcp_statistics.TcpAttemptFails++;
683 return;
684 }
685
686 buff->sk = newsk;
687 buff->localroute = newsk->localroute;
688
689
690
691
692
693 tmp = sk->prot->build_header(buff, newsk->saddr, newsk->daddr, &ndev,
694 IPPROTO_TCP, NULL, MAX_SYN_SIZE,sk->ip_tos,sk->ip_ttl,&newsk->ip_route_cache);
695
696
697
698
699
700 if (tmp < 0)
701 {
702 sk->err = tmp;
703 buff->free = 1;
704 kfree_skb(buff,FREE_WRITE);
705 destroy_sock(newsk);
706 skb->sk = sk;
707 kfree_skb(skb, FREE_READ);
708 tcp_statistics.TcpAttemptFails++;
709 return;
710 }
711
712 t1 =(struct tcphdr *)skb_put(buff,sizeof(struct tcphdr));
713
714 memcpy(t1, skb->h.th, sizeof(*t1));
715 buff->seq = newsk->write_seq++;
716 buff->end_seq = newsk->write_seq;
717
718
719
720 t1->dest = skb->h.th->source;
721 t1->source = newsk->dummy_th.source;
722 t1->seq = ntohl(buff->seq);
723 newsk->sent_seq = newsk->write_seq;
724 t1->window = ntohs(tcp_select_window(newsk));
725 t1->syn = 1;
726 t1->ack = 1;
727 t1->urg = 0;
728 t1->rst = 0;
729 t1->psh = 0;
730 t1->ack_seq = htonl(newsk->acked_seq);
731 t1->doff = sizeof(*t1)/4+1;
732 ptr = skb_put(buff,4);
733 ptr[0] = 2;
734 ptr[1] = 4;
735 ptr[2] = ((newsk->mtu) >> 8) & 0xff;
736 ptr[3] =(newsk->mtu) & 0xff;
737 buff->csum = csum_partial(ptr, 4, 0);
738 tcp_send_check(t1, newsk->saddr, newsk->daddr, sizeof(*t1)+4, buff);
739 newsk->prot->queue_xmit(newsk, ndev, buff, 0);
740 tcp_reset_xmit_timer(newsk, TIME_WRITE , TCP_TIMEOUT_INIT);
741 skb->sk = newsk;
742
743
744
745
746
747 atomic_sub(skb->truesize, &sk->rmem_alloc);
748 atomic_add(skb->truesize, &newsk->rmem_alloc);
749
750 skb_queue_tail(&sk->receive_queue,skb);
751 sk->ack_backlog++;
752 tcp_statistics.TcpOutSegs++;
753 }
754
755
756
757
758
759
760
761
762
763 void tcp_send_delayed_ack(struct sock * sk, int max_timeout)
764 {
765 unsigned long timeout, now;
766
767
768 now = jiffies;
769 timeout = sk->ato;
770 if (timeout > max_timeout)
771 timeout = max_timeout;
772 timeout += now;
773 if (sk->bytes_rcv > sk->max_unacked) {
774 timeout = now;
775 mark_bh(TIMER_BH);
776 }
777
778
779 if (!del_timer(&sk->delack_timer) || timeout < sk->delack_timer.expires)
780 sk->delack_timer.expires = timeout;
781
782 sk->ack_backlog++;
783 add_timer(&sk->delack_timer);
784 }
785
786
787
788
789
790
791
792 void tcp_send_ack(struct sock *sk)
793 {
794 struct sk_buff *buff;
795 struct tcphdr *t1;
796 struct device *dev = NULL;
797 int tmp;
798
799 if(sk->zapped)
800 return;
801
802
803
804
805
806
807
808 clear_delayed_acks(sk);
809
810 if (sk->send_head == NULL
811 && skb_queue_empty(&sk->write_queue)
812 && sk->ip_xmit_timeout == TIME_WRITE)
813 {
814 if(sk->keepopen)
815 tcp_reset_xmit_timer(sk,TIME_KEEPOPEN,TCP_TIMEOUT_LEN);
816 else
817 delete_timer(sk);
818 }
819
820
821
822
823
824
825 buff = sock_wmalloc(sk, MAX_ACK_SIZE, 1, GFP_ATOMIC);
826 if (buff == NULL)
827 {
828
829
830
831
832
833
834
835 tcp_send_delayed_ack(sk, HZ/2);
836 return;
837 }
838
839
840
841
842
843 buff->sk = sk;
844 buff->localroute = sk->localroute;
845 buff->csum = 0;
846
847
848
849
850
851 tmp = sk->prot->build_header(buff, sk->saddr, sk->daddr, &dev,
852 IPPROTO_TCP, sk->opt, MAX_ACK_SIZE,sk->ip_tos,sk->ip_ttl,&sk->ip_route_cache);
853 if (tmp < 0)
854 {
855 buff->free = 1;
856 sock_wfree(sk, buff);
857 return;
858 }
859 t1 =(struct tcphdr *)skb_put(buff,sizeof(struct tcphdr));
860
861
862
863
864
865 memcpy(t1, &sk->dummy_th, sizeof(*t1));
866 t1->seq = htonl(sk->sent_seq);
867 t1->ack_seq = htonl(sk->acked_seq);
868 t1->window = htons(tcp_select_window(sk));
869
870 tcp_send_check(t1, sk->saddr, sk->daddr, sizeof(*t1), buff);
871 if (sk->debug)
872 printk("\rtcp_ack: seq %x ack %x\n", sk->sent_seq, sk->acked_seq);
873 sk->prot->queue_xmit(sk, dev, buff, 1);
874 tcp_statistics.TcpOutSegs++;
875 }
876
877
878
879
880
881
882 void tcp_write_wakeup(struct sock *sk)
883 {
884 struct sk_buff *buff,*skb;
885 struct tcphdr *t1;
886 struct device *dev=NULL;
887 int tmp;
888
889 if (sk->zapped)
890 return;
891
892
893
894
895
896
897
898 if (sk->state != TCP_ESTABLISHED &&
899 sk->state != TCP_CLOSE_WAIT &&
900 sk->state != TCP_FIN_WAIT1 &&
901 sk->state != TCP_LAST_ACK &&
902 sk->state != TCP_CLOSING
903 )
904 {
905 return;
906 }
907 if ( before(sk->sent_seq, sk->window_seq) &&
908 (skb=skb_peek(&sk->write_queue)))
909 {
910
911
912
913
914
915
916 struct iphdr *iph;
917 struct tcphdr *th;
918 struct tcphdr *nth;
919 unsigned long win_size;
920 #if 0
921 unsigned long ow_size;
922 #endif
923
924
925
926
927
928 win_size = sk->window_seq - sk->sent_seq;
929
930
931
932
933
934 iph = (struct iphdr *)skb->ip_hdr;
935 th = (struct tcphdr *)(((char *)iph) +(iph->ihl << 2));
936
937
938
939
940
941 buff = sock_wmalloc(sk, win_size + th->doff * 4 +
942 (iph->ihl << 2) +
943 sk->prot->max_header + 15,
944 1, GFP_ATOMIC);
945 if ( buff == NULL )
946 return;
947
948
949
950
951
952
953 buff->free = 1;
954
955 buff->sk = sk;
956 buff->localroute = sk->localroute;
957
958
959
960
961
962 tmp = sk->prot->build_header(buff, sk->saddr, sk->daddr, &dev,
963 IPPROTO_TCP, sk->opt, buff->truesize,
964 sk->ip_tos,sk->ip_ttl,&sk->ip_route_cache);
965 if (tmp < 0)
966 {
967 sock_wfree(sk, buff);
968 return;
969 }
970
971
972
973
974
975 buff->dev = dev;
976
977 nth = (struct tcphdr *) skb_put(buff,sizeof(*th));
978
979 memcpy(nth, th, sizeof(*th));
980
981
982
983
984
985 nth->ack = 1;
986 nth->ack_seq = htonl(sk->acked_seq);
987 nth->window = htons(tcp_select_window(sk));
988 nth->check = 0;
989
990
991
992
993
994 buff->csum = csum_partial_copy((void *)(th + 1), skb_put(buff,win_size),
995 win_size + th->doff*4 - sizeof(*th), 0);
996
997
998
999
1000
1001 buff->end_seq = sk->sent_seq + win_size;
1002 sk->sent_seq = buff->end_seq;
1003 if(th->urg && ntohs(th->urg_ptr) < win_size)
1004 nth->urg = 0;
1005
1006
1007
1008
1009
1010 tcp_send_check(nth, sk->saddr, sk->daddr,
1011 nth->doff * 4 + win_size , buff);
1012 }
1013 else
1014 {
1015 buff = sock_wmalloc(sk,MAX_ACK_SIZE,1, GFP_ATOMIC);
1016 if (buff == NULL)
1017 return;
1018
1019 buff->free = 1;
1020 buff->sk = sk;
1021 buff->localroute = sk->localroute;
1022 buff->csum = 0;
1023
1024
1025
1026
1027
1028 tmp = sk->prot->build_header(buff, sk->saddr, sk->daddr, &dev,
1029 IPPROTO_TCP, sk->opt, MAX_ACK_SIZE,sk->ip_tos,sk->ip_ttl,&sk->ip_route_cache);
1030 if (tmp < 0)
1031 {
1032 sock_wfree(sk, buff);
1033 return;
1034 }
1035
1036 t1 = (struct tcphdr *)skb_put(buff,sizeof(struct tcphdr));
1037 memcpy(t1,(void *) &sk->dummy_th, sizeof(*t1));
1038
1039
1040
1041
1042
1043
1044 t1->seq = htonl(sk->sent_seq-1);
1045
1046 t1->ack_seq = htonl(sk->acked_seq);
1047 t1->window = htons(tcp_select_window(sk));
1048 tcp_send_check(t1, sk->saddr, sk->daddr, sizeof(*t1), buff);
1049
1050 }
1051
1052
1053
1054
1055
1056 sk->prot->queue_xmit(sk, dev, buff, 1);
1057 tcp_statistics.TcpOutSegs++;
1058 }
1059
1060
1061
1062
1063
1064 void tcp_send_probe0(struct sock *sk)
1065 {
1066 if (sk->zapped)
1067 return;
1068
1069 tcp_write_wakeup(sk);
1070
1071 sk->backoff++;
1072 sk->rto = min(sk->rto << 1, 120*HZ);
1073 sk->retransmits++;
1074 sk->prot->retransmits ++;
1075 tcp_reset_xmit_timer (sk, TIME_PROBE0, sk->rto);
1076 }