This source file includes following definitions.
- tcp_new_window
- clear_delayed_acks
- tcp_send_skb
- tcp_dequeue_partial
- tcp_send_partial
- tcp_enqueue_partial
- tcp_write_xmit
- tcp_do_retransmit
- tcp_send_reset
- tcp_send_fin
- tcp_send_synack
- tcp_send_delayed_ack
- tcp_send_ack
- tcp_write_wakeup
- tcp_send_probe0
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23 #include <linux/config.h>
24 #include <net/tcp.h>
25
26 #include <linux/interrupt.h>
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56 int tcp_new_window(struct sock * sk)
57 {
58 unsigned long window;
59 unsigned long minwin, maxwin;
60
61
62 minwin = sk->mss;
63 if (!minwin)
64 minwin = sk->mtu;
65 maxwin = sk->window_clamp;
66 if (!maxwin)
67 maxwin = MAX_WINDOW;
68 if (minwin > maxwin/2)
69 minwin = maxwin/2;
70
71
72 window = sk->rcvbuf/2;
73 if (window < minwin) {
74 sk->rcvbuf = minwin*2;
75 window = minwin;
76 }
77
78
79 window -= sk->rmem_alloc/2;
80 if ((long)(window - minwin) < 0)
81 window = 0;
82
83 if (window > 1023)
84 window &= ~1023;
85 if (window > maxwin)
86 window = maxwin;
87 return window;
88 }
89
90
91
92
93 static __inline__ void clear_delayed_acks(struct sock * sk)
94 {
95 sk->ack_timed = 0;
96 sk->ack_backlog = 0;
97 sk->bytes_rcv = 0;
98 del_timer(&sk->delack_timer);
99 }
100
101
102
103
104
105
106 void tcp_send_skb(struct sock *sk, struct sk_buff *skb)
107 {
108 int size;
109 struct tcphdr * th = skb->h.th;
110
111
112
113
114
115 size = skb->len - ((unsigned char *) th - skb->data);
116
117
118
119
120
121 if (size < sizeof(struct tcphdr) || size > skb->len)
122 {
123 printk("tcp_send_skb: bad skb (skb = %p, data = %p, th = %p, len = %lu)\n",
124 skb, skb->data, th, skb->len);
125 kfree_skb(skb, FREE_WRITE);
126 return;
127 }
128
129
130
131
132
133
134 if (size == sizeof(struct tcphdr))
135 {
136
137 if(!th->syn && !th->fin)
138 {
139 printk("tcp_send_skb: attempt to queue a bogon.\n");
140 kfree_skb(skb,FREE_WRITE);
141 return;
142 }
143 }
144
145
146
147
148
149 tcp_statistics.TcpOutSegs++;
150 skb->seq = ntohl(th->seq);
151 skb->end_seq = skb->seq + size - 4*th->doff;
152
153
154
155
156
157
158
159
160
161 if (after(skb->end_seq, sk->window_seq) ||
162 (sk->retransmits && sk->ip_xmit_timeout == TIME_WRITE) ||
163 sk->packets_out >= sk->cong_window)
164 {
165
166
167 th->check = 0;
168 if (skb->next != NULL)
169 {
170 printk("tcp_send_partial: next != NULL\n");
171 skb_unlink(skb);
172 }
173 skb_queue_tail(&sk->write_queue, skb);
174
175 if (before(sk->window_seq, sk->write_queue.next->end_seq) &&
176 sk->send_head == NULL && sk->ack_backlog == 0)
177 tcp_reset_xmit_timer(sk, TIME_PROBE0, sk->rto);
178 }
179 else
180 {
181
182
183
184 clear_delayed_acks(sk);
185 th->ack_seq = htonl(sk->acked_seq);
186 th->window = htons(tcp_select_window(sk));
187
188 tcp_send_check(th, sk->saddr, sk->daddr, size, skb);
189
190 sk->sent_seq = sk->write_seq;
191
192
193
194
195
196
197
198 sk->prot->queue_xmit(sk, skb->dev, skb, 0);
199
200
201
202
203
204
205
206 tcp_reset_xmit_timer(sk, TIME_WRITE, sk->rto);
207 }
208 }
209
210
211
212
213
214
215
216
217
218
219 struct sk_buff * tcp_dequeue_partial(struct sock * sk)
220 {
221 struct sk_buff * skb;
222 unsigned long flags;
223
224 save_flags(flags);
225 cli();
226 skb = sk->partial;
227 if (skb) {
228 sk->partial = NULL;
229 del_timer(&sk->partial_timer);
230 }
231 restore_flags(flags);
232 return skb;
233 }
234
235
236
237
238
239 void tcp_send_partial(struct sock *sk)
240 {
241 struct sk_buff *skb;
242
243 if (sk == NULL)
244 return;
245 while ((skb = tcp_dequeue_partial(sk)) != NULL)
246 tcp_send_skb(sk, skb);
247 }
248
249
250
251
252
253 void tcp_enqueue_partial(struct sk_buff * skb, struct sock * sk)
254 {
255 struct sk_buff * tmp;
256 unsigned long flags;
257
258 save_flags(flags);
259 cli();
260 tmp = sk->partial;
261 if (tmp)
262 del_timer(&sk->partial_timer);
263 sk->partial = skb;
264 init_timer(&sk->partial_timer);
265
266
267
268 sk->partial_timer.expires = jiffies+HZ;
269 sk->partial_timer.function = (void (*)(unsigned long)) tcp_send_partial;
270 sk->partial_timer.data = (unsigned long) sk;
271 add_timer(&sk->partial_timer);
272 restore_flags(flags);
273 if (tmp)
274 tcp_send_skb(sk, tmp);
275 }
276
277
278
279
280
281
282
283 void tcp_write_xmit(struct sock *sk)
284 {
285 struct sk_buff *skb;
286
287
288
289
290
291
292 if(sk->zapped)
293 return;
294
295
296
297
298
299
300
301
302
303 while((skb = skb_peek(&sk->write_queue)) != NULL &&
304 !after(skb->end_seq, sk->window_seq) &&
305 (sk->retransmits == 0 ||
306 sk->ip_xmit_timeout != TIME_WRITE ||
307 !after(skb->end_seq, sk->rcv_ack_seq))
308 && sk->packets_out < sk->cong_window)
309 {
310 IS_SKB(skb);
311 skb_unlink(skb);
312
313
314
315
316
317 if (before(skb->end_seq, sk->rcv_ack_seq +1))
318 {
319
320
321
322
323
324 sk->retransmits = 0;
325 kfree_skb(skb, FREE_WRITE);
326 if (!sk->dead)
327 sk->write_space(sk);
328 }
329 else
330 {
331 struct tcphdr *th;
332 struct iphdr *iph;
333 int size;
334
335
336
337
338
339
340
341 iph = skb->ip_hdr;
342 th = (struct tcphdr *)(((char *)iph) +(iph->ihl << 2));
343 size = skb->len - (((unsigned char *) th) - skb->data);
344 #ifndef CONFIG_NO_PATH_MTU_DISCOVERY
345 if (size > sk->mtu - sizeof(struct iphdr))
346 {
347 iph->frag_off &= ~htons(IP_DF);
348 ip_send_check(iph);
349 }
350 #endif
351
352 th->ack_seq = htonl(sk->acked_seq);
353 th->window = htons(tcp_select_window(sk));
354
355 tcp_send_check(th, sk->saddr, sk->daddr, size, skb);
356
357 sk->sent_seq = skb->end_seq;
358
359
360
361
362
363 sk->prot->queue_xmit(sk, skb->dev, skb, skb->free);
364
365 clear_delayed_acks(sk);
366
367
368
369
370
371 tcp_reset_xmit_timer(sk, TIME_WRITE, sk->rto);
372 }
373 }
374 }
375
376
377
378
379
380
381
382 void tcp_do_retransmit(struct sock *sk, int all)
383 {
384 struct sk_buff * skb;
385 struct proto *prot;
386 struct device *dev;
387 int ct=0;
388 struct rtable *rt;
389
390 prot = sk->prot;
391 skb = sk->send_head;
392
393 while (skb != NULL)
394 {
395 struct tcphdr *th;
396 struct iphdr *iph;
397 int size;
398
399 dev = skb->dev;
400 IS_SKB(skb);
401 skb->when = jiffies;
402
403
404
405
406
407
408
409
410
411
412
413 if (skb_device_locked(skb))
414 break;
415
416
417
418
419
420 skb_pull(skb,((unsigned char *)skb->ip_hdr)-skb->data);
421
422
423
424
425
426
427
428
429
430
431 iph = (struct iphdr *)skb->data;
432 th = (struct tcphdr *)(((char *)iph) + (iph->ihl << 2));
433 size = ntohs(iph->tot_len) - (iph->ihl<<2);
434
435
436
437
438
439
440
441
442
443
444 {
445
446
447 struct options * opt = (struct options*)skb->proto_priv;
448 rt = ip_check_route(&sk->ip_route_cache, opt->srr?opt->faddr:iph->daddr, skb->localroute);
449 }
450
451 iph->id = htons(ip_id_count++);
452 #ifndef CONFIG_NO_PATH_MTU_DISCOVERY
453 if (rt && ntohs(iph->tot_len) > rt->rt_mtu)
454 iph->frag_off &= ~htons(IP_DF);
455 #endif
456 ip_send_check(iph);
457
458 if (rt==NULL)
459 {
460 if(skb->sk)
461 {
462 skb->sk->err_soft=ENETUNREACH;
463 skb->sk->error_report(skb->sk);
464 }
465 }
466 else
467 {
468 dev=rt->rt_dev;
469 skb->raddr=rt->rt_gateway;
470 skb->dev=dev;
471 skb->arp=1;
472 if (rt->rt_hh)
473 {
474 memcpy(skb_push(skb,dev->hard_header_len),rt->rt_hh->hh_data,dev->hard_header_len);
475 if (!rt->rt_hh->hh_uptodate)
476 {
477 skb->arp = 0;
478 #if RT_CACHE_DEBUG >= 2
479 printk("tcp_do_retransmit: hh miss %08x via %08x\n", iph->daddr, rt->rt_gateway);
480 #endif
481 }
482 }
483 else if (dev->hard_header)
484 {
485 if(dev->hard_header(skb, dev, ETH_P_IP, NULL, NULL, skb->len)<0)
486 skb->arp=0;
487 }
488
489
490
491
492
493
494
495
496
497
498
499
500 th->ack_seq = htonl(sk->acked_seq);
501 clear_delayed_acks(sk);
502 th->window = ntohs(tcp_select_window(sk));
503 tcp_send_check(th, sk->saddr, sk->daddr, size, skb);
504
505
506
507
508
509 if (dev->flags & IFF_UP)
510 {
511
512
513
514
515
516
517
518
519 if (sk && !skb_device_locked(skb))
520 {
521
522 skb_unlink(skb);
523
524 ip_statistics.IpOutRequests++;
525 dev_queue_xmit(skb, dev, sk->priority);
526 }
527 }
528 }
529
530
531
532
533
534 ct++;
535 sk->retransmits++;
536 sk->prot->retransmits++;
537 tcp_statistics.TcpRetransSegs++;
538
539
540
541
542
543
544 if (!all)
545 break;
546
547
548
549
550
551 if (ct >= sk->cong_window)
552 break;
553 skb = skb->link3;
554 }
555 }
556
557
558
559
560
561 void tcp_send_reset(unsigned long saddr, unsigned long daddr, struct tcphdr *th,
562 struct proto *prot, struct options *opt, struct device *dev, int tos, int ttl)
563 {
564 struct sk_buff *buff;
565 struct tcphdr *t1;
566 int tmp;
567 struct device *ndev=NULL;
568
569
570
571
572
573 if(th->rst)
574 return;
575
576
577
578
579
580
581 buff = sock_wmalloc(NULL, MAX_RESET_SIZE, 1, GFP_ATOMIC);
582 if (buff == NULL)
583 return;
584
585 buff->sk = NULL;
586 buff->dev = dev;
587 buff->localroute = 0;
588 buff->csum = 0;
589
590
591
592
593
594 tmp = prot->build_header(buff, saddr, daddr, &ndev, IPPROTO_TCP, opt,
595 sizeof(struct tcphdr),tos,ttl,NULL);
596 if (tmp < 0)
597 {
598 buff->free = 1;
599 sock_wfree(NULL, buff);
600 return;
601 }
602
603 t1 =(struct tcphdr *)skb_put(buff,sizeof(struct tcphdr));
604 memset(t1, 0, sizeof(*t1));
605
606
607
608
609
610 t1->dest = th->source;
611 t1->source = th->dest;
612 t1->doff = sizeof(*t1)/4;
613 t1->rst = 1;
614
615 if(th->ack)
616 {
617 t1->seq = th->ack_seq;
618 }
619 else
620 {
621 t1->ack = 1;
622 if(!th->syn)
623 t1->ack_seq = th->seq;
624 else
625 t1->ack_seq = htonl(ntohl(th->seq)+1);
626 }
627
628 tcp_send_check(t1, saddr, daddr, sizeof(*t1), buff);
629 prot->queue_xmit(NULL, ndev, buff, 1);
630 tcp_statistics.TcpOutSegs++;
631 }
632
633
634
635
636
637 void tcp_send_fin(struct sock *sk)
638 {
639 struct proto *prot =(struct proto *)sk->prot;
640 struct tcphdr *th =(struct tcphdr *)&sk->dummy_th;
641 struct tcphdr *t1;
642 struct sk_buff *buff;
643 struct device *dev=NULL;
644 int tmp;
645
646 buff = sock_wmalloc(sk, MAX_RESET_SIZE,1 , GFP_KERNEL);
647
648 if (buff == NULL)
649 {
650
651 printk("tcp_send_fin: Impossible malloc failure");
652 return;
653 }
654
655
656
657
658
659 buff->sk = sk;
660 buff->localroute = sk->localroute;
661 buff->csum = 0;
662
663
664
665
666
667 tmp = prot->build_header(buff,sk->saddr, sk->daddr, &dev,
668 IPPROTO_TCP, sk->opt,
669 sizeof(struct tcphdr),sk->ip_tos,sk->ip_ttl,&sk->ip_route_cache);
670 if (tmp < 0)
671 {
672 int t;
673
674
675
676
677
678 buff->free = 1;
679 sock_wfree(sk,buff);
680 sk->write_seq++;
681 t=del_timer(&sk->timer);
682 if(t)
683 add_timer(&sk->timer);
684 else
685 tcp_reset_msl_timer(sk, TIME_CLOSE, TCP_TIMEWAIT_LEN);
686 return;
687 }
688
689
690
691
692
693
694 t1 =(struct tcphdr *)skb_put(buff,sizeof(struct tcphdr));
695 buff->dev = dev;
696 memcpy(t1, th, sizeof(*t1));
697 buff->seq = sk->write_seq;
698 sk->write_seq++;
699 buff->end_seq = sk->write_seq;
700 t1->seq = htonl(buff->seq);
701 t1->ack_seq = htonl(sk->acked_seq);
702 t1->window = htons(tcp_select_window(sk));
703 t1->fin = 1;
704 tcp_send_check(t1, sk->saddr, sk->daddr, sizeof(*t1), buff);
705
706
707
708
709
710
711 if (skb_peek(&sk->write_queue) != NULL)
712 {
713 buff->free = 0;
714 if (buff->next != NULL)
715 {
716 printk("tcp_send_fin: next != NULL\n");
717 skb_unlink(buff);
718 }
719 skb_queue_tail(&sk->write_queue, buff);
720 }
721 else
722 {
723 sk->sent_seq = sk->write_seq;
724 sk->prot->queue_xmit(sk, dev, buff, 0);
725 tcp_reset_xmit_timer(sk, TIME_WRITE, sk->rto);
726 }
727 }
728
729
730 void tcp_send_synack(struct sock * newsk, struct sock * sk, struct sk_buff * skb)
731 {
732 struct tcphdr *t1;
733 unsigned char *ptr;
734 struct sk_buff * buff;
735 struct device *ndev=NULL;
736 int tmp;
737
738 buff = sock_wmalloc(newsk, MAX_SYN_SIZE, 1, GFP_ATOMIC);
739 if (buff == NULL)
740 {
741 sk->err = ENOMEM;
742 destroy_sock(newsk);
743 kfree_skb(skb, FREE_READ);
744 tcp_statistics.TcpAttemptFails++;
745 return;
746 }
747
748 buff->sk = newsk;
749 buff->localroute = newsk->localroute;
750
751
752
753
754
755 tmp = sk->prot->build_header(buff, newsk->saddr, newsk->daddr, &ndev,
756 IPPROTO_TCP, newsk->opt, MAX_SYN_SIZE,sk->ip_tos,sk->ip_ttl,&newsk->ip_route_cache);
757
758
759
760
761
762 if (tmp < 0)
763 {
764 sk->err = tmp;
765 buff->free = 1;
766 kfree_skb(buff,FREE_WRITE);
767 destroy_sock(newsk);
768 skb->sk = sk;
769 kfree_skb(skb, FREE_READ);
770 tcp_statistics.TcpAttemptFails++;
771 return;
772 }
773
774 t1 =(struct tcphdr *)skb_put(buff,sizeof(struct tcphdr));
775
776 memcpy(t1, skb->h.th, sizeof(*t1));
777 buff->seq = newsk->write_seq++;
778 buff->end_seq = newsk->write_seq;
779
780
781
782 t1->dest = skb->h.th->source;
783 t1->source = newsk->dummy_th.source;
784 t1->seq = ntohl(buff->seq);
785 newsk->sent_seq = newsk->write_seq;
786 t1->window = ntohs(tcp_select_window(newsk));
787 t1->syn = 1;
788 t1->ack = 1;
789 t1->urg = 0;
790 t1->rst = 0;
791 t1->psh = 0;
792 t1->ack_seq = htonl(newsk->acked_seq);
793 t1->doff = sizeof(*t1)/4+1;
794 ptr = skb_put(buff,4);
795 ptr[0] = 2;
796 ptr[1] = 4;
797 ptr[2] = ((newsk->mtu) >> 8) & 0xff;
798 ptr[3] =(newsk->mtu) & 0xff;
799 buff->csum = csum_partial(ptr, 4, 0);
800 tcp_send_check(t1, newsk->saddr, newsk->daddr, sizeof(*t1)+4, buff);
801 newsk->prot->queue_xmit(newsk, ndev, buff, 0);
802 tcp_reset_xmit_timer(newsk, TIME_WRITE , TCP_TIMEOUT_INIT);
803 skb->sk = newsk;
804
805
806
807
808
809 atomic_sub(skb->truesize, &sk->rmem_alloc);
810 atomic_add(skb->truesize, &newsk->rmem_alloc);
811
812 skb_queue_tail(&sk->receive_queue,skb);
813 sk->ack_backlog++;
814 tcp_statistics.TcpOutSegs++;
815 }
816
817
818
819
820
821
822
823
824
825 void tcp_send_delayed_ack(struct sock * sk, int max_timeout)
826 {
827 unsigned long timeout, now;
828
829
830 now = jiffies;
831 timeout = sk->ato;
832 if (timeout > max_timeout)
833 timeout = max_timeout;
834 timeout += now;
835 if (sk->bytes_rcv > sk->max_unacked) {
836 timeout = now;
837 mark_bh(TIMER_BH);
838 }
839
840
841 if (!del_timer(&sk->delack_timer) || timeout < sk->delack_timer.expires)
842 sk->delack_timer.expires = timeout;
843
844 sk->ack_backlog++;
845 add_timer(&sk->delack_timer);
846 }
847
848
849
850
851
852
853
854 void tcp_send_ack(struct sock *sk)
855 {
856 struct sk_buff *buff;
857 struct tcphdr *t1;
858 struct device *dev = NULL;
859 int tmp;
860
861 if(sk->zapped)
862 return;
863
864
865
866
867
868
869
870 clear_delayed_acks(sk);
871
872 if (sk->send_head == NULL
873 && skb_queue_empty(&sk->write_queue)
874 && sk->ip_xmit_timeout == TIME_WRITE)
875 {
876 if(sk->keepopen)
877 tcp_reset_xmit_timer(sk,TIME_KEEPOPEN,TCP_TIMEOUT_LEN);
878 else
879 delete_timer(sk);
880 }
881
882
883
884
885
886
887 buff = sock_wmalloc(sk, MAX_ACK_SIZE, 1, GFP_ATOMIC);
888 if (buff == NULL)
889 {
890
891
892
893
894
895
896
897 tcp_send_delayed_ack(sk, HZ/2);
898 return;
899 }
900
901
902
903
904
905 buff->sk = sk;
906 buff->localroute = sk->localroute;
907 buff->csum = 0;
908
909
910
911
912
913 tmp = sk->prot->build_header(buff, sk->saddr, sk->daddr, &dev,
914 IPPROTO_TCP, sk->opt, MAX_ACK_SIZE,sk->ip_tos,sk->ip_ttl,&sk->ip_route_cache);
915 if (tmp < 0)
916 {
917 buff->free = 1;
918 sock_wfree(sk, buff);
919 return;
920 }
921 t1 =(struct tcphdr *)skb_put(buff,sizeof(struct tcphdr));
922
923
924
925
926
927 memcpy(t1, &sk->dummy_th, sizeof(*t1));
928 t1->seq = htonl(sk->sent_seq);
929 t1->ack_seq = htonl(sk->acked_seq);
930 t1->window = htons(tcp_select_window(sk));
931
932 tcp_send_check(t1, sk->saddr, sk->daddr, sizeof(*t1), buff);
933 if (sk->debug)
934 printk("\rtcp_ack: seq %x ack %x\n", sk->sent_seq, sk->acked_seq);
935 sk->prot->queue_xmit(sk, dev, buff, 1);
936 tcp_statistics.TcpOutSegs++;
937 }
938
939
940
941
942
943
944 void tcp_write_wakeup(struct sock *sk)
945 {
946 struct sk_buff *buff,*skb;
947 struct tcphdr *t1;
948 struct device *dev=NULL;
949 int tmp;
950
951 if (sk->zapped)
952 return;
953
954
955
956
957
958
959
960 if (sk->state != TCP_ESTABLISHED &&
961 sk->state != TCP_CLOSE_WAIT &&
962 sk->state != TCP_FIN_WAIT1 &&
963 sk->state != TCP_LAST_ACK &&
964 sk->state != TCP_CLOSING
965 )
966 {
967 return;
968 }
969 if ( before(sk->sent_seq, sk->window_seq) &&
970 (skb=skb_peek(&sk->write_queue)))
971 {
972
973
974
975
976
977
978 struct iphdr *iph;
979 struct tcphdr *th;
980 struct tcphdr *nth;
981 unsigned long win_size;
982 #if 0
983 unsigned long ow_size;
984 #endif
985
986
987
988
989
990 win_size = sk->window_seq - sk->sent_seq;
991
992
993
994
995
996 iph = (struct iphdr *)skb->ip_hdr;
997 th = (struct tcphdr *)(((char *)iph) +(iph->ihl << 2));
998
999
1000
1001
1002
1003 buff = sock_wmalloc(sk, win_size + th->doff * 4 +
1004 (iph->ihl << 2) +
1005 sk->prot->max_header + 15,
1006 1, GFP_ATOMIC);
1007 if ( buff == NULL )
1008 return;
1009
1010
1011
1012
1013
1014
1015 buff->free = 1;
1016
1017 buff->sk = sk;
1018 buff->localroute = sk->localroute;
1019
1020
1021
1022
1023
1024 tmp = sk->prot->build_header(buff, sk->saddr, sk->daddr, &dev,
1025 IPPROTO_TCP, sk->opt, buff->truesize,
1026 sk->ip_tos,sk->ip_ttl,&sk->ip_route_cache);
1027 if (tmp < 0)
1028 {
1029 sock_wfree(sk, buff);
1030 return;
1031 }
1032
1033
1034
1035
1036
1037 buff->dev = dev;
1038
1039 nth = (struct tcphdr *) skb_put(buff,sizeof(*th));
1040
1041 memcpy(nth, th, sizeof(*th));
1042
1043
1044
1045
1046
1047 nth->ack = 1;
1048 nth->ack_seq = htonl(sk->acked_seq);
1049 nth->window = htons(tcp_select_window(sk));
1050 nth->check = 0;
1051
1052
1053
1054
1055
1056 buff->csum = csum_partial_copy((void *)(th + 1), skb_put(buff,win_size),
1057 win_size + th->doff*4 - sizeof(*th), 0);
1058
1059
1060
1061
1062
1063 buff->end_seq = sk->sent_seq + win_size;
1064 sk->sent_seq = buff->end_seq;
1065 if(th->urg && ntohs(th->urg_ptr) < win_size)
1066 nth->urg = 0;
1067
1068
1069
1070
1071
1072 tcp_send_check(nth, sk->saddr, sk->daddr,
1073 nth->doff * 4 + win_size , buff);
1074 }
1075 else
1076 {
1077 buff = sock_wmalloc(sk,MAX_ACK_SIZE,1, GFP_ATOMIC);
1078 if (buff == NULL)
1079 return;
1080
1081 buff->free = 1;
1082 buff->sk = sk;
1083 buff->localroute = sk->localroute;
1084 buff->csum = 0;
1085
1086
1087
1088
1089
1090 tmp = sk->prot->build_header(buff, sk->saddr, sk->daddr, &dev,
1091 IPPROTO_TCP, sk->opt, MAX_ACK_SIZE,sk->ip_tos,sk->ip_ttl,&sk->ip_route_cache);
1092 if (tmp < 0)
1093 {
1094 sock_wfree(sk, buff);
1095 return;
1096 }
1097
1098 t1 = (struct tcphdr *)skb_put(buff,sizeof(struct tcphdr));
1099 memcpy(t1,(void *) &sk->dummy_th, sizeof(*t1));
1100
1101
1102
1103
1104
1105
1106 t1->seq = htonl(sk->sent_seq-1);
1107
1108 t1->ack_seq = htonl(sk->acked_seq);
1109 t1->window = htons(tcp_select_window(sk));
1110 tcp_send_check(t1, sk->saddr, sk->daddr, sizeof(*t1), buff);
1111
1112 }
1113
1114
1115
1116
1117
1118 sk->prot->queue_xmit(sk, dev, buff, 1);
1119 tcp_statistics.TcpOutSegs++;
1120 }
1121
1122
1123
1124
1125
1126 void tcp_send_probe0(struct sock *sk)
1127 {
1128 if (sk->zapped)
1129 return;
1130
1131 tcp_write_wakeup(sk);
1132
1133 sk->backoff++;
1134 sk->rto = min(sk->rto << 1, 120*HZ);
1135 sk->retransmits++;
1136 sk->prot->retransmits ++;
1137 tcp_reset_xmit_timer (sk, TIME_PROBE0, sk->rto);
1138 }