This source file includes following definitions.
- tcp_send_skb
- tcp_dequeue_partial
- tcp_send_partial
- tcp_enqueue_partial
- tcp_write_xmit
- tcp_do_retransmit
- tcp_send_reset
- tcp_send_fin
- tcp_send_synack
- tcp_send_ack
- tcp_write_wakeup
- tcp_send_probe0
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23 #include <linux/config.h>
24 #include <net/tcp.h>
25
26
27
28
29
30
31 void tcp_send_skb(struct sock *sk, struct sk_buff *skb)
32 {
33 int size;
34 struct tcphdr * th = skb->h.th;
35
36
37
38
39
40 size = skb->len - ((unsigned char *) th - skb->data);
41
42
43
44
45
46 if (size < sizeof(struct tcphdr) || size > skb->len)
47 {
48 printk("tcp_send_skb: bad skb (skb = %p, data = %p, th = %p, len = %lu)\n",
49 skb, skb->data, th, skb->len);
50 kfree_skb(skb, FREE_WRITE);
51 return;
52 }
53
54
55
56
57
58
59 if (size == sizeof(struct tcphdr))
60 {
61
62 if(!th->syn && !th->fin)
63 {
64 printk("tcp_send_skb: attempt to queue a bogon.\n");
65 kfree_skb(skb,FREE_WRITE);
66 return;
67 }
68 }
69
70
71
72
73
74 tcp_statistics.TcpOutSegs++;
75 skb->seq = ntohl(th->seq);
76 skb->end_seq = skb->seq + size - 4*th->doff;
77
78
79
80
81
82
83
84
85
86 if (after(skb->end_seq, sk->window_seq) ||
87 (sk->retransmits && sk->ip_xmit_timeout == TIME_WRITE) ||
88 sk->packets_out >= sk->cong_window)
89 {
90
91
92 th->check = 0;
93 if (skb->next != NULL)
94 {
95 printk("tcp_send_partial: next != NULL\n");
96 skb_unlink(skb);
97 }
98 skb_queue_tail(&sk->write_queue, skb);
99
100
101
102
103
104
105
106
107 if (before(sk->window_seq, sk->write_queue.next->end_seq) &&
108 sk->send_head == NULL && sk->ack_backlog == 0)
109 tcp_reset_xmit_timer(sk, TIME_PROBE0, sk->rto);
110 }
111 else
112 {
113
114
115
116
117 th->ack_seq = htonl(sk->acked_seq);
118 th->window = htons(tcp_select_window(sk));
119
120 tcp_send_check(th, sk->saddr, sk->daddr, size, sk);
121
122 sk->sent_seq = sk->write_seq;
123
124
125
126
127
128
129
130 sk->prot->queue_xmit(sk, skb->dev, skb, 0);
131
132
133 sk->ack_backlog = 0;
134 sk->bytes_rcv = 0;
135
136
137
138
139
140
141
142 tcp_reset_xmit_timer(sk, TIME_WRITE, sk->rto);
143 }
144 }
145
146
147
148
149
150
151
152
153
154
155 struct sk_buff * tcp_dequeue_partial(struct sock * sk)
156 {
157 struct sk_buff * skb;
158 unsigned long flags;
159
160 save_flags(flags);
161 cli();
162 skb = sk->partial;
163 if (skb) {
164 sk->partial = NULL;
165 del_timer(&sk->partial_timer);
166 }
167 restore_flags(flags);
168 return skb;
169 }
170
171
172
173
174
175 void tcp_send_partial(struct sock *sk)
176 {
177 struct sk_buff *skb;
178
179 if (sk == NULL)
180 return;
181 while ((skb = tcp_dequeue_partial(sk)) != NULL)
182 tcp_send_skb(sk, skb);
183 }
184
185
186
187
188
189 void tcp_enqueue_partial(struct sk_buff * skb, struct sock * sk)
190 {
191 struct sk_buff * tmp;
192 unsigned long flags;
193
194 save_flags(flags);
195 cli();
196 tmp = sk->partial;
197 if (tmp)
198 del_timer(&sk->partial_timer);
199 sk->partial = skb;
200 init_timer(&sk->partial_timer);
201
202
203
204 sk->partial_timer.expires = jiffies+HZ;
205 sk->partial_timer.function = (void (*)(unsigned long)) tcp_send_partial;
206 sk->partial_timer.data = (unsigned long) sk;
207 add_timer(&sk->partial_timer);
208 restore_flags(flags);
209 if (tmp)
210 tcp_send_skb(sk, tmp);
211 }
212
213
214
215
216
217
218
219 void tcp_write_xmit(struct sock *sk)
220 {
221 struct sk_buff *skb;
222
223
224
225
226
227
228 if(sk->zapped)
229 return;
230
231
232
233
234
235
236
237
238
239 while((skb = skb_peek(&sk->write_queue)) != NULL &&
240 before(skb->end_seq, sk->window_seq + 1) &&
241 (sk->retransmits == 0 ||
242 sk->ip_xmit_timeout != TIME_WRITE ||
243 before(skb->end_seq, sk->rcv_ack_seq + 1))
244 && sk->packets_out < sk->cong_window)
245 {
246 IS_SKB(skb);
247 skb_unlink(skb);
248
249
250
251
252
253 if (before(skb->end_seq, sk->rcv_ack_seq +1))
254 {
255
256
257
258
259
260 sk->retransmits = 0;
261 kfree_skb(skb, FREE_WRITE);
262 if (!sk->dead)
263 sk->write_space(sk);
264 }
265 else
266 {
267 struct tcphdr *th;
268 struct iphdr *iph;
269 int size;
270
271
272
273
274
275
276
277 iph = skb->ip_hdr;
278 th = (struct tcphdr *)(((char *)iph) +(iph->ihl << 2));
279 size = skb->len - (((unsigned char *) th) - skb->data);
280 #ifndef CONFIG_NO_PATH_MTU_DISCOVERY
281 if (size > sk->mtu - sizeof(struct iphdr))
282 {
283 iph->frag_off &= ~htons(IP_DF);
284 ip_send_check(iph);
285 }
286 #endif
287
288 th->ack_seq = htonl(sk->acked_seq);
289 th->window = htons(tcp_select_window(sk));
290
291 tcp_send_check(th, sk->saddr, sk->daddr, size, sk);
292
293 sk->sent_seq = skb->end_seq;
294
295
296
297
298
299 sk->prot->queue_xmit(sk, skb->dev, skb, skb->free);
300
301
302 sk->ack_backlog = 0;
303 sk->bytes_rcv = 0;
304
305
306
307
308
309 tcp_reset_xmit_timer(sk, TIME_WRITE, sk->rto);
310 }
311 }
312 }
313
314
315
316
317
318
319
320 void tcp_do_retransmit(struct sock *sk, int all)
321 {
322 struct sk_buff * skb;
323 struct proto *prot;
324 struct device *dev;
325 int ct=0;
326 struct rtable *rt;
327
328 prot = sk->prot;
329 skb = sk->send_head;
330
331 while (skb != NULL)
332 {
333 struct tcphdr *th;
334 struct iphdr *iph;
335 int size;
336
337 dev = skb->dev;
338 IS_SKB(skb);
339 skb->when = jiffies;
340
341
342
343
344
345
346
347
348
349
350
351 if (skb_device_locked(skb))
352 break;
353
354
355
356
357
358 skb_pull(skb,((unsigned char *)skb->ip_hdr)-skb->data);
359
360
361
362
363
364
365
366
367
368
369 iph = (struct iphdr *)skb->data;
370 th = (struct tcphdr *)(((char *)iph) + (iph->ihl << 2));
371 size = ntohs(iph->tot_len) - (iph->ihl<<2);
372
373
374
375
376
377
378
379
380
381
382 {
383
384
385 struct options * opt = (struct options*)skb->proto_priv;
386 rt = ip_check_route(&sk->ip_route_cache, opt->srr?opt->faddr:iph->daddr, skb->localroute);
387 }
388
389 iph->id = htons(ip_id_count++);
390 #ifndef CONFIG_NO_PATH_MTU_DISCOVERY
391 if (rt && ntohs(iph->tot_len) > rt->rt_mtu)
392 iph->frag_off &= ~htons(IP_DF);
393 #endif
394 ip_send_check(iph);
395
396 if (rt==NULL)
397 {
398 if(skb->sk)
399 {
400 skb->sk->err_soft=ENETUNREACH;
401 skb->sk->error_report(skb->sk);
402 }
403 }
404 else
405 {
406 dev=rt->rt_dev;
407 skb->raddr=rt->rt_gateway;
408 skb->dev=dev;
409 skb->arp=1;
410 if (rt->rt_hh)
411 {
412 memcpy(skb_push(skb,dev->hard_header_len),rt->rt_hh->hh_data,dev->hard_header_len);
413 if (!rt->rt_hh->hh_uptodate)
414 {
415 skb->arp = 0;
416 #if RT_CACHE_DEBUG >= 2
417 printk("tcp_do_retransmit: hh miss %08x via %08x\n", iph->daddr, rt->rt_gateway);
418 #endif
419 }
420 }
421 else if (dev->hard_header)
422 {
423 if(dev->hard_header(skb, dev, ETH_P_IP, NULL, NULL, skb->len)<0)
424 skb->arp=0;
425 }
426
427
428
429
430
431
432
433
434
435
436
437
438 th->ack_seq = htonl(sk->acked_seq);
439 sk->ack_backlog = 0;
440 sk->bytes_rcv = 0;
441 th->window = ntohs(tcp_select_window(sk));
442 tcp_send_check(th, sk->saddr, sk->daddr, size, sk);
443
444
445
446
447
448 if (dev->flags & IFF_UP)
449 {
450
451
452
453
454
455
456
457
458 if (sk && !skb_device_locked(skb))
459 {
460
461 skb_unlink(skb);
462
463 ip_statistics.IpOutRequests++;
464 dev_queue_xmit(skb, dev, sk->priority);
465 }
466 }
467 }
468
469
470
471
472
473 ct++;
474 sk->prot->retransmits ++;
475 tcp_statistics.TcpRetransSegs++;
476
477
478
479
480
481
482 if (!all)
483 break;
484
485
486
487
488
489 if (ct >= sk->cong_window)
490 break;
491 skb = skb->link3;
492 }
493 }
494
495
496
497
498
499 void tcp_send_reset(unsigned long saddr, unsigned long daddr, struct tcphdr *th,
500 struct proto *prot, struct options *opt, struct device *dev, int tos, int ttl)
501 {
502 struct sk_buff *buff;
503 struct tcphdr *t1;
504 int tmp;
505 struct device *ndev=NULL;
506
507
508
509
510
511 if(th->rst)
512 return;
513
514
515
516
517
518
519 buff = sock_wmalloc(NULL, MAX_RESET_SIZE, 1, GFP_ATOMIC);
520 if (buff == NULL)
521 return;
522
523 buff->sk = NULL;
524 buff->dev = dev;
525 buff->localroute = 0;
526
527
528
529
530
531 tmp = prot->build_header(buff, saddr, daddr, &ndev, IPPROTO_TCP, opt,
532 sizeof(struct tcphdr),tos,ttl,NULL);
533 if (tmp < 0)
534 {
535 buff->free = 1;
536 sock_wfree(NULL, buff);
537 return;
538 }
539
540 t1 =(struct tcphdr *)skb_put(buff,sizeof(struct tcphdr));
541 memcpy(t1, th, sizeof(*t1));
542
543
544
545
546
547 t1->dest = th->source;
548 t1->source = th->dest;
549 t1->rst = 1;
550 t1->window = 0;
551
552 if(th->ack)
553 {
554 t1->ack = 0;
555 t1->seq = th->ack_seq;
556 t1->ack_seq = 0;
557 }
558 else
559 {
560 t1->ack = 1;
561 if(!th->syn)
562 t1->ack_seq = th->seq;
563 else
564 t1->ack_seq = htonl(ntohl(th->seq)+1);
565 t1->seq = 0;
566 }
567
568 t1->syn = 0;
569 t1->urg = 0;
570 t1->fin = 0;
571 t1->psh = 0;
572 t1->doff = sizeof(*t1)/4;
573 tcp_send_check(t1, saddr, daddr, sizeof(*t1), NULL);
574 prot->queue_xmit(NULL, ndev, buff, 1);
575 tcp_statistics.TcpOutSegs++;
576 }
577
578
579
580
581
582 void tcp_send_fin(struct sock *sk)
583 {
584 struct proto *prot =(struct proto *)sk->prot;
585 struct tcphdr *th =(struct tcphdr *)&sk->dummy_th;
586 struct tcphdr *t1;
587 struct sk_buff *buff;
588 struct device *dev=NULL;
589 int tmp;
590
591 release_sock(sk);
592
593 buff = sock_wmalloc(sk, MAX_RESET_SIZE,1 , GFP_KERNEL);
594 sk->inuse = 1;
595
596 if (buff == NULL)
597 {
598
599 printk("tcp_send_fin: Impossible malloc failure");
600 return;
601 }
602
603
604
605
606
607 buff->sk = sk;
608 buff->localroute = sk->localroute;
609
610
611
612
613
614 tmp = prot->build_header(buff,sk->saddr, sk->daddr, &dev,
615 IPPROTO_TCP, sk->opt,
616 sizeof(struct tcphdr),sk->ip_tos,sk->ip_ttl,&sk->ip_route_cache);
617 if (tmp < 0)
618 {
619 int t;
620
621
622
623
624
625 buff->free = 1;
626 sock_wfree(sk,buff);
627 sk->write_seq++;
628 t=del_timer(&sk->timer);
629 if(t)
630 add_timer(&sk->timer);
631 else
632 tcp_reset_msl_timer(sk, TIME_CLOSE, TCP_TIMEWAIT_LEN);
633 return;
634 }
635
636
637
638
639
640
641 t1 =(struct tcphdr *)skb_put(buff,sizeof(struct tcphdr));
642 buff->dev = dev;
643 memcpy(t1, th, sizeof(*t1));
644 buff->seq = sk->write_seq;
645 sk->write_seq++;
646 buff->end_seq = sk->write_seq;
647 t1->seq = htonl(buff->seq);
648 t1->ack = 1;
649 t1->ack_seq = htonl(sk->acked_seq);
650 t1->window = htons(sk->window=tcp_select_window(sk));
651 t1->fin = 1;
652 t1->rst = 0;
653 t1->doff = sizeof(*t1)/4;
654 tcp_send_check(t1, sk->saddr, sk->daddr, sizeof(*t1), sk);
655
656
657
658
659
660
661 if (skb_peek(&sk->write_queue) != NULL)
662 {
663 buff->free = 0;
664 if (buff->next != NULL)
665 {
666 printk("tcp_send_fin: next != NULL\n");
667 skb_unlink(buff);
668 }
669 skb_queue_tail(&sk->write_queue, buff);
670 }
671 else
672 {
673 sk->sent_seq = sk->write_seq;
674 sk->prot->queue_xmit(sk, dev, buff, 0);
675 tcp_reset_xmit_timer(sk, TIME_WRITE, sk->rto);
676 }
677 }
678
679
680 void tcp_send_synack(struct sock * newsk, struct sock * sk, struct sk_buff * skb)
681 {
682 struct tcphdr *t1;
683 unsigned char *ptr;
684 struct sk_buff * buff;
685 struct device *ndev=NULL;
686 int tmp;
687
688 buff = sock_wmalloc(newsk, MAX_SYN_SIZE, 1, GFP_ATOMIC);
689 if (buff == NULL)
690 {
691 sk->err = ENOMEM;
692 newsk->dead = 1;
693 newsk->state = TCP_CLOSE;
694
695 release_sock(newsk);
696 kfree_skb(skb, FREE_READ);
697 tcp_statistics.TcpAttemptFails++;
698 return;
699 }
700
701 buff->sk = newsk;
702 buff->localroute = newsk->localroute;
703
704
705
706
707
708 tmp = sk->prot->build_header(buff, newsk->saddr, newsk->daddr, &ndev,
709 IPPROTO_TCP, NULL, MAX_SYN_SIZE,sk->ip_tos,sk->ip_ttl,&newsk->ip_route_cache);
710
711
712
713
714
715 if (tmp < 0)
716 {
717 sk->err = tmp;
718 buff->free = 1;
719 kfree_skb(buff,FREE_WRITE);
720 newsk->dead = 1;
721 newsk->state = TCP_CLOSE;
722 release_sock(newsk);
723 skb->sk = sk;
724 kfree_skb(skb, FREE_READ);
725 tcp_statistics.TcpAttemptFails++;
726 return;
727 }
728
729 t1 =(struct tcphdr *)skb_put(buff,sizeof(struct tcphdr));
730
731 memcpy(t1, skb->h.th, sizeof(*t1));
732 buff->seq = newsk->write_seq++;
733 buff->end_seq = newsk->write_seq;
734
735
736
737 t1->dest = skb->h.th->source;
738 t1->source = newsk->dummy_th.source;
739 t1->seq = ntohl(buff->seq);
740 t1->ack = 1;
741 newsk->sent_seq = newsk->write_seq;
742 t1->window = ntohs(tcp_select_window(newsk));
743 t1->res1 = 0;
744 t1->res2 = 0;
745 t1->rst = 0;
746 t1->urg = 0;
747 t1->psh = 0;
748 t1->syn = 1;
749 t1->ack_seq = htonl(newsk->acked_seq);
750 t1->doff = sizeof(*t1)/4+1;
751 ptr = skb_put(buff,4);
752 ptr[0] = 2;
753 ptr[1] = 4;
754 ptr[2] = ((newsk->mtu) >> 8) & 0xff;
755 ptr[3] =(newsk->mtu) & 0xff;
756
757 tcp_send_check(t1, newsk->saddr, newsk->daddr, sizeof(*t1)+4, newsk);
758 newsk->prot->queue_xmit(newsk, ndev, buff, 0);
759 tcp_reset_xmit_timer(newsk, TIME_WRITE , TCP_TIMEOUT_INIT);
760 skb->sk = newsk;
761
762
763
764
765
766 sk->rmem_alloc -= skb->truesize;
767 newsk->rmem_alloc += skb->truesize;
768
769 skb_queue_tail(&sk->receive_queue,skb);
770 sk->ack_backlog++;
771 release_sock(newsk);
772 tcp_statistics.TcpOutSegs++;
773 }
774
775
776
777
778
779 void tcp_send_ack(u32 sequence, u32 ack,
780 struct sock *sk,
781 struct tcphdr *th, u32 daddr)
782 {
783 struct sk_buff *buff;
784 struct tcphdr *t1;
785 struct device *dev = NULL;
786 int tmp;
787
788 if(sk->zapped)
789 return;
790
791
792
793
794
795
796 buff = sock_wmalloc(sk, MAX_ACK_SIZE, 1, GFP_ATOMIC);
797 if (buff == NULL)
798 {
799
800
801
802
803
804
805
806 sk->ack_backlog++;
807 if (sk->ip_xmit_timeout != TIME_WRITE && tcp_connected(sk->state))
808 {
809 tcp_reset_xmit_timer(sk, TIME_WRITE, HZ);
810 }
811 return;
812 }
813
814
815
816
817
818 buff->sk = sk;
819 buff->localroute = sk->localroute;
820
821
822
823
824
825 tmp = sk->prot->build_header(buff, sk->saddr, daddr, &dev,
826 IPPROTO_TCP, sk->opt, MAX_ACK_SIZE,sk->ip_tos,sk->ip_ttl,&sk->ip_route_cache);
827 if (tmp < 0)
828 {
829 buff->free = 1;
830 sock_wfree(sk, buff);
831 return;
832 }
833 t1 =(struct tcphdr *)skb_put(buff,sizeof(struct tcphdr));
834
835 memcpy(t1, th, sizeof(*t1));
836
837
838
839
840
841 t1->dest = th->source;
842 t1->source = th->dest;
843 t1->seq = ntohl(sequence);
844 t1->ack = 1;
845 sk->window = tcp_select_window(sk);
846 t1->window = ntohs(sk->window);
847 t1->res1 = 0;
848 t1->res2 = 0;
849 t1->rst = 0;
850 t1->urg = 0;
851 t1->syn = 0;
852 t1->psh = 0;
853 t1->fin = 0;
854
855
856
857
858
859
860
861 if (ack == sk->acked_seq) {
862 sk->ack_backlog = 0;
863 sk->bytes_rcv = 0;
864 sk->ack_timed = 0;
865
866 if (sk->send_head == NULL && skb_peek(&sk->write_queue) == NULL
867 && sk->ip_xmit_timeout == TIME_WRITE)
868 if(sk->keepopen)
869 tcp_reset_xmit_timer(sk,TIME_KEEPOPEN,TCP_TIMEOUT_LEN);
870 else
871 delete_timer(sk);
872 }
873
874
875
876
877
878 t1->ack_seq = htonl(ack);
879 t1->doff = sizeof(*t1)/4;
880 tcp_send_check(t1, sk->saddr, daddr, sizeof(*t1), sk);
881 if (sk->debug)
882 printk("\rtcp_ack: seq %x ack %x\n", sequence, ack);
883 sk->prot->queue_xmit(sk, dev, buff, 1);
884 tcp_statistics.TcpOutSegs++;
885 }
886
887
888
889
890
891
892 void tcp_write_wakeup(struct sock *sk)
893 {
894 struct sk_buff *buff,*skb;
895 struct tcphdr *t1;
896 struct device *dev=NULL;
897 int tmp;
898
899 if (sk->zapped)
900 return;
901
902
903
904
905
906
907
908 if (sk->state != TCP_ESTABLISHED &&
909 sk->state != TCP_CLOSE_WAIT &&
910 sk->state != TCP_FIN_WAIT1 &&
911 sk->state != TCP_LAST_ACK &&
912 sk->state != TCP_CLOSING
913 )
914 {
915 return;
916 }
917 if ( before(sk->sent_seq, sk->window_seq) &&
918 (skb=skb_peek(&sk->write_queue)))
919 {
920
921
922
923
924
925
926 struct iphdr *iph;
927 struct tcphdr *th;
928 struct tcphdr *nth;
929 unsigned long win_size;
930 #if 0
931 unsigned long ow_size;
932 #endif
933 void * tcp_data_start;
934
935
936
937
938
939 win_size = sk->window_seq - sk->sent_seq;
940
941
942
943
944
945 iph = (struct iphdr *)skb->ip_hdr;
946 th = (struct tcphdr *)(((char *)iph) +(iph->ihl << 2));
947
948
949
950
951
952 buff = sock_wmalloc(sk, win_size + th->doff * 4 +
953 (iph->ihl << 2) +
954 sk->prot->max_header + 15,
955 1, GFP_ATOMIC);
956 if ( buff == NULL )
957 return;
958
959
960
961
962
963
964 buff->free = 1;
965
966 buff->sk = sk;
967 buff->localroute = sk->localroute;
968
969
970
971
972
973 tmp = sk->prot->build_header(buff, sk->saddr, sk->daddr, &dev,
974 IPPROTO_TCP, sk->opt, buff->truesize,
975 sk->ip_tos,sk->ip_ttl,&sk->ip_route_cache);
976 if (tmp < 0)
977 {
978 sock_wfree(sk, buff);
979 return;
980 }
981
982
983
984
985
986 buff->dev = dev;
987
988 nth = (struct tcphdr *) skb_put(buff,th->doff*4);
989
990 memcpy(nth, th, th->doff * 4);
991
992
993
994
995
996 nth->ack = 1;
997 nth->ack_seq = htonl(sk->acked_seq);
998 nth->window = htons(tcp_select_window(sk));
999 nth->check = 0;
1000
1001
1002
1003
1004
1005 tcp_data_start = (char *) th + (th->doff << 2);
1006
1007
1008
1009
1010
1011 memcpy(skb_put(buff,win_size), tcp_data_start, win_size);
1012
1013
1014
1015
1016
1017 buff->end_seq = sk->sent_seq + win_size;
1018 sk->sent_seq = buff->end_seq;
1019 if(th->urg && ntohs(th->urg_ptr) < win_size)
1020 nth->urg = 0;
1021
1022
1023
1024
1025
1026 tcp_send_check(nth, sk->saddr, sk->daddr,
1027 nth->doff * 4 + win_size , sk);
1028 }
1029 else
1030 {
1031 buff = sock_wmalloc(sk,MAX_ACK_SIZE,1, GFP_ATOMIC);
1032 if (buff == NULL)
1033 return;
1034
1035 buff->free = 1;
1036 buff->sk = sk;
1037 buff->localroute = sk->localroute;
1038
1039
1040
1041
1042
1043 tmp = sk->prot->build_header(buff, sk->saddr, sk->daddr, &dev,
1044 IPPROTO_TCP, sk->opt, MAX_ACK_SIZE,sk->ip_tos,sk->ip_ttl,&sk->ip_route_cache);
1045 if (tmp < 0)
1046 {
1047 sock_wfree(sk, buff);
1048 return;
1049 }
1050
1051 t1 = (struct tcphdr *)skb_put(buff,sizeof(struct tcphdr));
1052 memcpy(t1,(void *) &sk->dummy_th, sizeof(*t1));
1053
1054
1055
1056
1057
1058
1059 t1->seq = htonl(sk->sent_seq-1);
1060 t1->ack = 1;
1061 t1->res1= 0;
1062 t1->res2= 0;
1063 t1->rst = 0;
1064 t1->urg = 0;
1065 t1->psh = 0;
1066 t1->fin = 0;
1067 t1->syn = 0;
1068 t1->ack_seq = htonl(sk->acked_seq);
1069 t1->window = htons(tcp_select_window(sk));
1070 t1->doff = sizeof(*t1)/4;
1071 tcp_send_check(t1, sk->saddr, sk->daddr, sizeof(*t1), sk);
1072
1073 }
1074
1075
1076
1077
1078
1079 sk->prot->queue_xmit(sk, dev, buff, 1);
1080 tcp_statistics.TcpOutSegs++;
1081 }
1082
1083
1084
1085
1086
1087 void tcp_send_probe0(struct sock *sk)
1088 {
1089 if (sk->zapped)
1090 return;
1091
1092 tcp_write_wakeup(sk);
1093
1094 sk->backoff++;
1095 sk->rto = min(sk->rto << 1, 120*HZ);
1096 sk->retransmits++;
1097 sk->prot->retransmits ++;
1098 tcp_reset_xmit_timer (sk, TIME_PROBE0, sk->rto);
1099 }