This source file includes following definitions.
- tcp_cache_zap
- min
- tcp_set_state
- tcp_select_window
- tcp_find_established
- tcp_dequeue_established
- tcp_close_pending
- tcp_time_wait
- tcp_do_retransmit
- reset_xmit_timer
- tcp_retransmit_time
- tcp_retransmit
- tcp_write_timeout
- retransmit_timer
- tcp_err
- tcp_readable
- tcp_listen_select
- tcp_select
- tcp_ioctl
- tcp_check
- tcp_send_check
- tcp_send_skb
- tcp_dequeue_partial
- tcp_send_partial
- tcp_enqueue_partial
- tcp_send_ack
- tcp_build_header
- tcp_write
- tcp_sendto
- tcp_read_wakeup
- cleanup_rbuf
- tcp_read_urg
- tcp_read
- tcp_close_state
- tcp_send_fin
- tcp_shutdown
- tcp_recvfrom
- tcp_reset
- tcp_options
- default_mask
- tcp_init_seq
- tcp_conn_request
- tcp_close
- tcp_write_xmit
- tcp_ack
- tcp_fin
- tcp_data
- tcp_check_urg
- tcp_urg
- tcp_accept
- tcp_connect
- tcp_sequence
- tcp_std_reset
- tcp_rcv
- tcp_write_wakeup
- tcp_send_probe0
- tcp_setsockopt
- tcp_getsockopt
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211 #include <linux/types.h>
212 #include <linux/sched.h>
213 #include <linux/mm.h>
214 #include <linux/time.h>
215 #include <linux/string.h>
216 #include <linux/config.h>
217 #include <linux/socket.h>
218 #include <linux/sockios.h>
219 #include <linux/termios.h>
220 #include <linux/in.h>
221 #include <linux/fcntl.h>
222 #include <linux/inet.h>
223 #include <linux/netdevice.h>
224 #include <net/snmp.h>
225 #include <net/ip.h>
226 #include <net/protocol.h>
227 #include <net/icmp.h>
228 #include <net/tcp.h>
229 #include <net/arp.h>
230 #include <linux/skbuff.h>
231 #include <net/sock.h>
232 #include <net/route.h>
233 #include <linux/errno.h>
234 #include <linux/timer.h>
235 #include <asm/system.h>
236 #include <asm/segment.h>
237 #include <linux/mm.h>
238 #include <net/checksum.h>
239
240
241
242
243
244 #define reset_msl_timer(x,y,z) reset_timer(x,y,z)
245
246 #define SEQ_TICK 3
247 unsigned long seq_offset;
248 struct tcp_mib tcp_statistics;
249
250
251
252
253
254 volatile unsigned long th_cache_saddr,th_cache_daddr;
255 volatile unsigned short th_cache_dport, th_cache_sport;
256 volatile struct sock *th_cache_sk;
257
258 void tcp_cache_zap(void)
259 {
260 unsigned long flags;
261 save_flags(flags);
262 cli();
263 th_cache_saddr=0;
264 th_cache_daddr=0;
265 th_cache_dport=0;
266 th_cache_sport=0;
267 th_cache_sk=NULL;
268 restore_flags(flags);
269 }
270
271 static void tcp_close(struct sock *sk, int timeout);
272
273
274
275
276
277
278 static struct wait_queue *master_select_wakeup;
279
280 static __inline__ int min(unsigned int a, unsigned int b)
281 {
282 if (a < b)
283 return(a);
284 return(b);
285 }
286
287 #undef STATE_TRACE
288
289 #ifdef STATE_TRACE
290 static char *statename[]={
291 "Unused","Established","Syn Sent","Syn Recv",
292 "Fin Wait 1","Fin Wait 2","Time Wait", "Close",
293 "Close Wait","Last ACK","Listen","Closing"
294 };
295 #endif
296
297 static __inline__ void tcp_set_state(struct sock *sk, int state)
298 {
299 if(sk->state==TCP_ESTABLISHED)
300 tcp_statistics.TcpCurrEstab--;
301 #ifdef STATE_TRACE
302 if(sk->debug)
303 printk("TCP sk=%p, State %s -> %s\n",sk, statename[sk->state],statename[state]);
304 #endif
305
306
307
308 if(state==TCP_ESTABLISHED && sk->state==TCP_SYN_RECV)
309 {
310 wake_up_interruptible(&master_select_wakeup);
311 }
312 sk->state=state;
313 if(state==TCP_ESTABLISHED)
314 tcp_statistics.TcpCurrEstab++;
315 }
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332 int tcp_select_window(struct sock *sk)
333 {
334 int new_window = sk->prot->rspace(sk);
335
336 if(sk->window_clamp)
337 new_window=min(sk->window_clamp,new_window);
338
339
340
341
342
343
344
345
346
347
348
349
350
351 if (new_window < min(sk->mss, MAX_WINDOW/2) || new_window < sk->window)
352 return(sk->window);
353 return(new_window);
354 }
355
356
357
358
359
360
361 static struct sk_buff *tcp_find_established(struct sock *s)
362 {
363 struct sk_buff *p=skb_peek(&s->receive_queue);
364 if(p==NULL)
365 return NULL;
366 do
367 {
368 if(p->sk->state == TCP_ESTABLISHED || p->sk->state >= TCP_FIN_WAIT1)
369 return p;
370 p=p->next;
371 }
372 while(p!=(struct sk_buff *)&s->receive_queue);
373 return NULL;
374 }
375
376
377
378
379
380
381 static struct sk_buff *tcp_dequeue_established(struct sock *s)
382 {
383 struct sk_buff *skb;
384 unsigned long flags;
385 save_flags(flags);
386 cli();
387 skb=tcp_find_established(s);
388 if(skb!=NULL)
389 skb_unlink(skb);
390 restore_flags(flags);
391 return skb;
392 }
393
394
395
396
397
398
399
400 static void tcp_close_pending (struct sock *sk)
401 {
402 struct sk_buff *skb;
403
404 while ((skb = skb_dequeue(&sk->receive_queue)) != NULL)
405 {
406 skb->sk->dead=1;
407 tcp_close(skb->sk, 0);
408 kfree_skb(skb, FREE_READ);
409 }
410 return;
411 }
412
413
414
415
416
417 static void tcp_time_wait(struct sock *sk)
418 {
419 tcp_set_state(sk,TCP_TIME_WAIT);
420 sk->shutdown = SHUTDOWN_MASK;
421 if (!sk->dead)
422 sk->state_change(sk);
423 reset_msl_timer(sk, TIME_CLOSE, TCP_TIMEWAIT_LEN);
424 }
425
426
427
428
429
430
431 void tcp_do_retransmit(struct sock *sk, int all)
432 {
433 struct sk_buff * skb;
434 struct proto *prot;
435 struct device *dev;
436 int ct=0;
437
438 prot = sk->prot;
439 skb = sk->send_head;
440
441 while (skb != NULL)
442 {
443 struct tcphdr *th;
444 struct iphdr *iph;
445 int size;
446
447 dev = skb->dev;
448 IS_SKB(skb);
449 skb->when = jiffies;
450
451
452
453
454
455
456
457
458
459
460 iph = (struct iphdr *)(skb->data + dev->hard_header_len);
461 th = (struct tcphdr *)(((char *)iph) + (iph->ihl << 2));
462 size = skb->len - (((unsigned char *) th) - skb->data);
463
464
465
466
467
468
469
470
471
472 iph->id = htons(ip_id_count++);
473 ip_send_check(iph);
474
475
476
477
478
479
480
481
482
483
484
485
486 th->ack_seq = ntohl(sk->acked_seq);
487 th->window = ntohs(tcp_select_window(sk));
488 tcp_send_check(th, sk->saddr, sk->daddr, size, sk);
489
490
491
492
493
494 if (dev->flags & IFF_UP)
495 {
496
497
498
499
500
501
502
503
504 if (sk && !skb_device_locked(skb))
505 {
506
507 skb_unlink(skb);
508
509 ip_statistics.IpOutRequests++;
510 dev_queue_xmit(skb, dev, sk->priority);
511 }
512 }
513
514
515
516
517
518 ct++;
519 sk->prot->retransmits ++;
520
521
522
523
524
525 if (!all)
526 break;
527
528
529
530
531
532 if (ct >= sk->cong_window)
533 break;
534 skb = skb->link3;
535 }
536 }
537
538
539
540
541
542 static void reset_xmit_timer(struct sock *sk, int why, unsigned long when)
543 {
544 del_timer(&sk->retransmit_timer);
545 sk->ip_xmit_timeout = why;
546 if((int)when < 0)
547 {
548 when=3;
549 printk("Error: Negative timer in xmit_timer\n");
550 }
551 sk->retransmit_timer.expires=when;
552 add_timer(&sk->retransmit_timer);
553 }
554
555
556
557
558
559
560
561
562
563 void tcp_retransmit_time(struct sock *sk, int all)
564 {
565 tcp_do_retransmit(sk, all);
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584 sk->retransmits++;
585 sk->backoff++;
586 sk->rto = min(sk->rto << 1, 120*HZ);
587 reset_xmit_timer(sk, TIME_WRITE, sk->rto);
588 }
589
590
591
592
593
594
595
596
597
598 static void tcp_retransmit(struct sock *sk, int all)
599 {
600 if (all)
601 {
602 tcp_retransmit_time(sk, all);
603 return;
604 }
605
606 sk->ssthresh = sk->cong_window >> 1;
607
608 sk->cong_count = 0;
609
610 sk->cong_window = 1;
611
612
613 tcp_retransmit_time(sk, all);
614 }
615
616
617
618
619
620 static int tcp_write_timeout(struct sock *sk)
621 {
622
623
624
625 if ((sk->state == TCP_ESTABLISHED && sk->retransmits && !(sk->retransmits & 7))
626 || (sk->state != TCP_ESTABLISHED && sk->retransmits > TCP_RETR1))
627 {
628
629
630
631
632 arp_destroy (sk->daddr, 0);
633
634 }
635
636
637
638 if (sk->retransmits > TCP_RETR2)
639 {
640 sk->err = ETIMEDOUT;
641 sk->error_report(sk);
642 del_timer(&sk->retransmit_timer);
643
644
645
646 if (sk->state == TCP_FIN_WAIT1 || sk->state == TCP_FIN_WAIT2 || sk->state == TCP_CLOSING )
647 {
648 tcp_set_state(sk,TCP_TIME_WAIT);
649 reset_msl_timer (sk, TIME_CLOSE, TCP_TIMEWAIT_LEN);
650 }
651 else
652 {
653
654
655
656 tcp_set_state(sk, TCP_CLOSE);
657 return 0;
658 }
659 }
660 return 1;
661 }
662
663
664
665
666
667
668
669
670
671
672
673 static void retransmit_timer(unsigned long data)
674 {
675 struct sock *sk = (struct sock*)data;
676 int why = sk->ip_xmit_timeout;
677
678
679
680
681
682 cli();
683 if (sk->inuse || in_bh)
684 {
685
686 sk->retransmit_timer.expires = HZ;
687 add_timer(&sk->retransmit_timer);
688 sti();
689 return;
690 }
691
692 sk->inuse = 1;
693 sti();
694
695
696
697 if (sk->ack_backlog && !sk->zapped)
698 {
699 sk->prot->read_wakeup (sk);
700 if (! sk->dead)
701 sk->data_ready(sk,0);
702 }
703
704
705
706 switch (why)
707 {
708
709 case TIME_PROBE0:
710 tcp_send_probe0(sk);
711 tcp_write_timeout(sk);
712 break;
713
714 case TIME_WRITE:
715
716
717
718 {
719 struct sk_buff *skb;
720 unsigned long flags;
721
722 save_flags(flags);
723 cli();
724 skb = sk->send_head;
725 if (!skb)
726 {
727 restore_flags(flags);
728 }
729 else
730 {
731
732
733
734
735 if (jiffies < skb->when + sk->rto)
736 {
737 reset_xmit_timer (sk, TIME_WRITE, skb->when + sk->rto - jiffies);
738 restore_flags(flags);
739 break;
740 }
741 restore_flags(flags);
742
743
744
745 sk->prot->retransmit (sk, 0);
746 tcp_write_timeout(sk);
747 }
748 break;
749 }
750
751 case TIME_KEEPOPEN:
752
753
754
755
756 reset_xmit_timer (sk, TIME_KEEPOPEN, TCP_TIMEOUT_LEN);
757
758
759 if (sk->prot->write_wakeup)
760 sk->prot->write_wakeup (sk);
761 sk->retransmits++;
762 tcp_write_timeout(sk);
763 break;
764 default:
765 printk ("rexmit_timer: timer expired - reason unknown\n");
766 break;
767 }
768 release_sock(sk);
769 }
770
771
772
773
774
775
776
777
778
779
780 void tcp_err(int err, unsigned char *header, unsigned long daddr,
781 unsigned long saddr, struct inet_protocol *protocol)
782 {
783 struct tcphdr *th;
784 struct sock *sk;
785 struct iphdr *iph=(struct iphdr *)header;
786
787 header+=4*iph->ihl;
788
789
790 th =(struct tcphdr *)header;
791 sk = get_sock(&tcp_prot, th->source, daddr, th->dest, saddr);
792
793 if (sk == NULL)
794 return;
795
796 if(err<0)
797 {
798 sk->err = -err;
799 sk->error_report(sk);
800 return;
801 }
802
803 if ((err & 0xff00) == (ICMP_SOURCE_QUENCH << 8))
804 {
805
806
807
808
809
810 if (sk->cong_window > 4)
811 sk->cong_window--;
812 return;
813 }
814
815
816
817
818
819
820
821
822 if (icmp_err_convert[err & 0xff].fatal || sk->state == TCP_SYN_SENT)
823 {
824 if (sk->state == TCP_SYN_SENT)
825 {
826 tcp_statistics.TcpAttemptFails++;
827 tcp_set_state(sk,TCP_CLOSE);
828 sk->error_report(sk);
829 }
830 sk->err = icmp_err_convert[err & 0xff].errno;
831 }
832 return;
833 }
834
835
836
837
838
839
840
841
842 static int tcp_readable(struct sock *sk)
843 {
844 unsigned long counted;
845 unsigned long amount;
846 struct sk_buff *skb;
847 int sum;
848 unsigned long flags;
849
850 if(sk && sk->debug)
851 printk("tcp_readable: %p - ",sk);
852
853 save_flags(flags);
854 cli();
855 if (sk == NULL || (skb = skb_peek(&sk->receive_queue)) == NULL)
856 {
857 restore_flags(flags);
858 if(sk && sk->debug)
859 printk("empty\n");
860 return(0);
861 }
862
863 counted = sk->copied_seq;
864 amount = 0;
865
866
867
868
869
870 do
871 {
872 if (before(counted, skb->h.th->seq))
873 break;
874 sum = skb->len -(counted - skb->h.th->seq);
875 if (skb->h.th->syn)
876 sum++;
877 if (sum > 0)
878 {
879 amount += sum;
880 if (skb->h.th->syn)
881 amount--;
882 counted += sum;
883 }
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900 if (skb->h.th->urg)
901 amount--;
902 if (amount && skb->h.th->psh) break;
903 skb = skb->next;
904 }
905 while(skb != (struct sk_buff *)&sk->receive_queue);
906
907 restore_flags(flags);
908 if(sk->debug)
909 printk("got %lu bytes.\n",amount);
910 return(amount);
911 }
912
913
914
915
916 static int tcp_listen_select(struct sock *sk, int sel_type, select_table *wait)
917 {
918 if (sel_type == SEL_IN) {
919 int retval;
920
921 sk->inuse = 1;
922 retval = (tcp_find_established(sk) != NULL);
923 release_sock(sk);
924 if (!retval)
925 select_wait(&master_select_wakeup,wait);
926 return retval;
927 }
928 return 0;
929 }
930
931
932
933
934
935
936
937
938
939 static int tcp_select(struct sock *sk, int sel_type, select_table *wait)
940 {
941 if (sk->state == TCP_LISTEN)
942 return tcp_listen_select(sk, sel_type, wait);
943
944 switch(sel_type) {
945 case SEL_IN:
946 if (sk->err)
947 return 1;
948 if (sk->state == TCP_SYN_SENT || sk->state == TCP_SYN_RECV)
949 break;
950
951 if (sk->shutdown & RCV_SHUTDOWN)
952 return 1;
953
954 if (sk->acked_seq == sk->copied_seq)
955 break;
956
957 if (sk->urg_seq != sk->copied_seq ||
958 sk->acked_seq != sk->copied_seq+1 ||
959 sk->urginline || !sk->urg_data)
960 return 1;
961 break;
962
963 case SEL_OUT:
964 if (sk->err)
965 return 1;
966 if (sk->shutdown & SEND_SHUTDOWN)
967 return 0;
968 if (sk->state == TCP_SYN_SENT || sk->state == TCP_SYN_RECV)
969 break;
970
971
972
973
974
975 if (sk->prot->wspace(sk) < sk->mtu+128+sk->prot->max_header)
976 break;
977 return 1;
978
979 case SEL_EX:
980 if (sk->urg_data)
981 return 1;
982 break;
983 }
984 select_wait(sk->sleep, wait);
985 return 0;
986 }
987
988 int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
989 {
990 int err;
991 switch(cmd)
992 {
993
994 case TIOCINQ:
995 #ifdef FIXME
996 case FIONREAD:
997 #endif
998 {
999 unsigned long amount;
1000
1001 if (sk->state == TCP_LISTEN)
1002 return(-EINVAL);
1003
1004 sk->inuse = 1;
1005 amount = tcp_readable(sk);
1006 release_sock(sk);
1007 err=verify_area(VERIFY_WRITE,(void *)arg,
1008 sizeof(unsigned long));
1009 if(err)
1010 return err;
1011 put_fs_long(amount,(unsigned long *)arg);
1012 return(0);
1013 }
1014 case SIOCATMARK:
1015 {
1016 int answ = sk->urg_data && sk->urg_seq == sk->copied_seq;
1017
1018 err = verify_area(VERIFY_WRITE,(void *) arg,
1019 sizeof(unsigned long));
1020 if (err)
1021 return err;
1022 put_fs_long(answ,(int *) arg);
1023 return(0);
1024 }
1025 case TIOCOUTQ:
1026 {
1027 unsigned long amount;
1028
1029 if (sk->state == TCP_LISTEN) return(-EINVAL);
1030 amount = sk->prot->wspace(sk);
1031 err=verify_area(VERIFY_WRITE,(void *)arg,
1032 sizeof(unsigned long));
1033 if(err)
1034 return err;
1035 put_fs_long(amount,(unsigned long *)arg);
1036 return(0);
1037 }
1038 default:
1039 return(-EINVAL);
1040 }
1041 }
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051 unsigned short tcp_check(struct tcphdr *th, int len,
1052 unsigned long saddr, unsigned long daddr)
1053 {
1054 return csum_tcpudp_magic(saddr,daddr,len,IPPROTO_TCP,
1055 csum_partial((char *)th,len,0));
1056 }
1057
1058
1059
1060 void tcp_send_check(struct tcphdr *th, unsigned long saddr,
1061 unsigned long daddr, int len, struct sock *sk)
1062 {
1063 th->check = 0;
1064 th->check = tcp_check(th, len, saddr, daddr);
1065 return;
1066 }
1067
1068
1069
1070
1071
1072
1073 static void tcp_send_skb(struct sock *sk, struct sk_buff *skb)
1074 {
1075 int size;
1076 struct tcphdr * th = skb->h.th;
1077
1078
1079
1080
1081
1082 size = skb->len - ((unsigned char *) th - skb->data);
1083
1084
1085
1086
1087
1088 if (size < sizeof(struct tcphdr) || size > skb->len)
1089 {
1090 printk("tcp_send_skb: bad skb (skb = %p, data = %p, th = %p, len = %lu)\n",
1091 skb, skb->data, th, skb->len);
1092 kfree_skb(skb, FREE_WRITE);
1093 return;
1094 }
1095
1096
1097
1098
1099
1100
1101 if (size == sizeof(struct tcphdr))
1102 {
1103
1104 if(!th->syn && !th->fin)
1105 {
1106 printk("tcp_send_skb: attempt to queue a bogon.\n");
1107 kfree_skb(skb,FREE_WRITE);
1108 return;
1109 }
1110 }
1111
1112
1113
1114
1115
1116 tcp_statistics.TcpOutSegs++;
1117 skb->h.seq = ntohl(th->seq) + size - 4*th->doff;
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127 if (after(skb->h.seq, sk->window_seq) ||
1128 (sk->retransmits && sk->ip_xmit_timeout == TIME_WRITE) ||
1129 sk->packets_out >= sk->cong_window)
1130 {
1131
1132
1133 th->check = 0;
1134 if (skb->next != NULL)
1135 {
1136 printk("tcp_send_partial: next != NULL\n");
1137 skb_unlink(skb);
1138 }
1139 skb_queue_tail(&sk->write_queue, skb);
1140
1141
1142
1143
1144
1145
1146
1147
1148 if (before(sk->window_seq, sk->write_queue.next->h.seq) &&
1149 sk->send_head == NULL && sk->ack_backlog == 0)
1150 reset_xmit_timer(sk, TIME_PROBE0, sk->rto);
1151 }
1152 else
1153 {
1154
1155
1156
1157
1158 th->ack_seq = ntohl(sk->acked_seq);
1159 th->window = ntohs(tcp_select_window(sk));
1160
1161 tcp_send_check(th, sk->saddr, sk->daddr, size, sk);
1162
1163 sk->sent_seq = sk->write_seq;
1164
1165
1166
1167
1168
1169
1170
1171 sk->prot->queue_xmit(sk, skb->dev, skb, 0);
1172
1173
1174
1175
1176
1177
1178
1179 reset_xmit_timer(sk, TIME_WRITE, sk->rto);
1180 }
1181 }
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192 struct sk_buff * tcp_dequeue_partial(struct sock * sk)
1193 {
1194 struct sk_buff * skb;
1195 unsigned long flags;
1196
1197 save_flags(flags);
1198 cli();
1199 skb = sk->partial;
1200 if (skb) {
1201 sk->partial = NULL;
1202 del_timer(&sk->partial_timer);
1203 }
1204 restore_flags(flags);
1205 return skb;
1206 }
1207
1208
1209
1210
1211
1212 static void tcp_send_partial(struct sock *sk)
1213 {
1214 struct sk_buff *skb;
1215
1216 if (sk == NULL)
1217 return;
1218 while ((skb = tcp_dequeue_partial(sk)) != NULL)
1219 tcp_send_skb(sk, skb);
1220 }
1221
1222
1223
1224
1225
1226 void tcp_enqueue_partial(struct sk_buff * skb, struct sock * sk)
1227 {
1228 struct sk_buff * tmp;
1229 unsigned long flags;
1230
1231 save_flags(flags);
1232 cli();
1233 tmp = sk->partial;
1234 if (tmp)
1235 del_timer(&sk->partial_timer);
1236 sk->partial = skb;
1237 init_timer(&sk->partial_timer);
1238
1239
1240
1241 sk->partial_timer.expires = HZ;
1242 sk->partial_timer.function = (void (*)(unsigned long)) tcp_send_partial;
1243 sk->partial_timer.data = (unsigned long) sk;
1244 add_timer(&sk->partial_timer);
1245 restore_flags(flags);
1246 if (tmp)
1247 tcp_send_skb(sk, tmp);
1248 }
1249
1250
1251
1252
1253
1254
1255 static void tcp_send_ack(u32 sequence, u32 ack,
1256 struct sock *sk,
1257 struct tcphdr *th, unsigned long daddr)
1258 {
1259 struct sk_buff *buff;
1260 struct tcphdr *t1;
1261 struct device *dev = NULL;
1262 int tmp;
1263
1264 if(sk->zapped)
1265 return;
1266
1267
1268
1269
1270
1271
1272 buff = sk->prot->wmalloc(sk, MAX_ACK_SIZE, 1, GFP_ATOMIC);
1273 if (buff == NULL)
1274 {
1275
1276
1277
1278
1279
1280
1281
1282 sk->ack_backlog++;
1283 if (sk->ip_xmit_timeout != TIME_WRITE && tcp_connected(sk->state))
1284 {
1285 reset_xmit_timer(sk, TIME_WRITE, HZ);
1286 }
1287 return;
1288 }
1289
1290
1291
1292
1293
1294 buff->len = sizeof(struct tcphdr);
1295 buff->sk = sk;
1296 buff->localroute = sk->localroute;
1297 t1 =(struct tcphdr *) buff->data;
1298
1299
1300
1301
1302
1303 tmp = sk->prot->build_header(buff, sk->saddr, daddr, &dev,
1304 IPPROTO_TCP, sk->opt, MAX_ACK_SIZE,sk->ip_tos,sk->ip_ttl);
1305 if (tmp < 0)
1306 {
1307 buff->free = 1;
1308 sk->prot->wfree(sk, buff->mem_addr, buff->mem_len);
1309 return;
1310 }
1311 buff->len += tmp;
1312 t1 =(struct tcphdr *)((char *)t1 +tmp);
1313
1314 memcpy(t1, th, sizeof(*t1));
1315
1316
1317
1318
1319
1320 t1->dest = th->source;
1321 t1->source = th->dest;
1322 t1->seq = ntohl(sequence);
1323 t1->ack = 1;
1324 sk->window = tcp_select_window(sk);
1325 t1->window = ntohs(sk->window);
1326 t1->res1 = 0;
1327 t1->res2 = 0;
1328 t1->rst = 0;
1329 t1->urg = 0;
1330 t1->syn = 0;
1331 t1->psh = 0;
1332 t1->fin = 0;
1333
1334
1335
1336
1337
1338
1339
1340 if (ack == sk->acked_seq)
1341 {
1342 sk->ack_backlog = 0;
1343 sk->bytes_rcv = 0;
1344 sk->ack_timed = 0;
1345 if (sk->send_head == NULL && skb_peek(&sk->write_queue) == NULL
1346 && sk->ip_xmit_timeout == TIME_WRITE)
1347 {
1348 if(sk->keepopen) {
1349 reset_xmit_timer(sk,TIME_KEEPOPEN,TCP_TIMEOUT_LEN);
1350 } else {
1351 delete_timer(sk);
1352 }
1353 }
1354 }
1355
1356
1357
1358
1359
1360 t1->ack_seq = ntohl(ack);
1361 t1->doff = sizeof(*t1)/4;
1362 tcp_send_check(t1, sk->saddr, daddr, sizeof(*t1), sk);
1363 if (sk->debug)
1364 printk("\rtcp_ack: seq %x ack %x\n", sequence, ack);
1365 tcp_statistics.TcpOutSegs++;
1366 sk->prot->queue_xmit(sk, dev, buff, 1);
1367 }
1368
1369
1370
1371
1372
1373
1374 extern __inline int tcp_build_header(struct tcphdr *th, struct sock *sk, int push)
1375 {
1376
1377 memcpy(th,(void *) &(sk->dummy_th), sizeof(*th));
1378 th->seq = htonl(sk->write_seq);
1379 th->psh =(push == 0) ? 1 : 0;
1380 th->doff = sizeof(*th)/4;
1381 th->ack = 1;
1382 th->fin = 0;
1383 sk->ack_backlog = 0;
1384 sk->bytes_rcv = 0;
1385 sk->ack_timed = 0;
1386 th->ack_seq = htonl(sk->acked_seq);
1387 sk->window = tcp_select_window(sk);
1388 th->window = htons(sk->window);
1389
1390 return(sizeof(*th));
1391 }
1392
1393
1394
1395
1396
1397
1398 static int tcp_write(struct sock *sk, unsigned char *from,
1399 int len, int nonblock, unsigned flags)
1400 {
1401 int copied = 0;
1402 int copy;
1403 int tmp;
1404 struct sk_buff *skb;
1405 struct sk_buff *send_tmp;
1406 unsigned char *buff;
1407 struct proto *prot;
1408 struct device *dev = NULL;
1409
1410 sk->inuse=1;
1411 prot = sk->prot;
1412 while(len > 0)
1413 {
1414 if (sk->err)
1415 {
1416 release_sock(sk);
1417 if (copied)
1418 return(copied);
1419 tmp = -sk->err;
1420 sk->err = 0;
1421 return(tmp);
1422 }
1423
1424
1425
1426
1427
1428 if (sk->shutdown & SEND_SHUTDOWN)
1429 {
1430 release_sock(sk);
1431 sk->err = EPIPE;
1432 if (copied)
1433 return(copied);
1434 sk->err = 0;
1435 return(-EPIPE);
1436 }
1437
1438
1439
1440
1441
1442 while(sk->state != TCP_ESTABLISHED && sk->state != TCP_CLOSE_WAIT)
1443 {
1444 if (sk->err)
1445 {
1446 release_sock(sk);
1447 if (copied)
1448 return(copied);
1449 tmp = -sk->err;
1450 sk->err = 0;
1451 return(tmp);
1452 }
1453
1454 if (sk->state != TCP_SYN_SENT && sk->state != TCP_SYN_RECV)
1455 {
1456 release_sock(sk);
1457 if (copied)
1458 return(copied);
1459
1460 if (sk->err)
1461 {
1462 tmp = -sk->err;
1463 sk->err = 0;
1464 return(tmp);
1465 }
1466
1467 if (sk->keepopen)
1468 {
1469 send_sig(SIGPIPE, current, 0);
1470 }
1471 return(-EPIPE);
1472 }
1473
1474 if (nonblock || copied)
1475 {
1476 release_sock(sk);
1477 if (copied)
1478 return(copied);
1479 return(-EAGAIN);
1480 }
1481
1482 release_sock(sk);
1483 cli();
1484
1485 if (sk->state != TCP_ESTABLISHED &&
1486 sk->state != TCP_CLOSE_WAIT && sk->err == 0)
1487 {
1488 interruptible_sleep_on(sk->sleep);
1489 if (current->signal & ~current->blocked)
1490 {
1491 sti();
1492 if (copied)
1493 return(copied);
1494 return(-ERESTARTSYS);
1495 }
1496 }
1497 sk->inuse = 1;
1498 sti();
1499 }
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517 if ((skb = tcp_dequeue_partial(sk)) != NULL)
1518 {
1519 int hdrlen;
1520
1521
1522 hdrlen = ((unsigned long)skb->h.th - (unsigned long)skb->data)
1523 + sizeof(struct tcphdr);
1524
1525
1526 if (!(flags & MSG_OOB))
1527 {
1528 copy = min(sk->mss - (skb->len - hdrlen), len);
1529
1530 if (copy <= 0)
1531 {
1532 printk("TCP: **bug**: \"copy\" <= 0!!\n");
1533 copy = 0;
1534 }
1535
1536 memcpy_fromfs(skb->data + skb->len, from, copy);
1537 skb->len += copy;
1538 from += copy;
1539 copied += copy;
1540 len -= copy;
1541 sk->write_seq += copy;
1542 }
1543 if ((skb->len - hdrlen) >= sk->mss ||
1544 (flags & MSG_OOB) || !sk->packets_out)
1545 tcp_send_skb(sk, skb);
1546 else
1547 tcp_enqueue_partial(skb, sk);
1548 continue;
1549 }
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563 copy = sk->window_seq - sk->write_seq;
1564 if (copy <= 0 || copy < (sk->max_window >> 1) || copy > sk->mss)
1565 copy = sk->mss;
1566 if (copy > len)
1567 copy = len;
1568
1569
1570
1571
1572
1573 send_tmp = NULL;
1574 if (copy < sk->mss && !(flags & MSG_OOB))
1575 {
1576
1577
1578
1579 release_sock(sk);
1580
1581
1582
1583
1584 skb = prot->wmalloc(sk, sk->mtu + 128 + prot->max_header, 0, GFP_KERNEL);
1585 sk->inuse = 1;
1586 send_tmp = skb;
1587 }
1588 else
1589 {
1590
1591
1592
1593 release_sock(sk);
1594 skb = prot->wmalloc(sk, copy + prot->max_header , 0, GFP_KERNEL);
1595 sk->inuse = 1;
1596 }
1597
1598
1599
1600
1601
1602 if (skb == NULL)
1603 {
1604 sk->socket->flags |= SO_NOSPACE;
1605 if (nonblock)
1606 {
1607 release_sock(sk);
1608 if (copied)
1609 return(copied);
1610 return(-EAGAIN);
1611 }
1612
1613
1614
1615
1616
1617 tmp = sk->wmem_alloc;
1618 release_sock(sk);
1619 cli();
1620
1621
1622
1623 if (tmp <= sk->wmem_alloc &&
1624 (sk->state == TCP_ESTABLISHED||sk->state == TCP_CLOSE_WAIT)
1625 && sk->err == 0)
1626 {
1627 sk->socket->flags &= ~SO_NOSPACE;
1628 interruptible_sleep_on(sk->sleep);
1629 if (current->signal & ~current->blocked)
1630 {
1631 sti();
1632 if (copied)
1633 return(copied);
1634 return(-ERESTARTSYS);
1635 }
1636 }
1637 sk->inuse = 1;
1638 sti();
1639 continue;
1640 }
1641
1642 skb->len = 0;
1643 skb->sk = sk;
1644 skb->free = 0;
1645 skb->localroute = sk->localroute|(flags&MSG_DONTROUTE);
1646
1647 buff = skb->data;
1648
1649
1650
1651
1652
1653
1654 tmp = prot->build_header(skb, sk->saddr, sk->daddr, &dev,
1655 IPPROTO_TCP, sk->opt, skb->mem_len,sk->ip_tos,sk->ip_ttl);
1656 if (tmp < 0 )
1657 {
1658 prot->wfree(sk, skb->mem_addr, skb->mem_len);
1659 release_sock(sk);
1660 if (copied)
1661 return(copied);
1662 return(tmp);
1663 }
1664 skb->len += tmp;
1665 skb->dev = dev;
1666 buff += tmp;
1667 skb->h.th =(struct tcphdr *) buff;
1668 tmp = tcp_build_header((struct tcphdr *)buff, sk, len-copy);
1669 if (tmp < 0)
1670 {
1671 prot->wfree(sk, skb->mem_addr, skb->mem_len);
1672 release_sock(sk);
1673 if (copied)
1674 return(copied);
1675 return(tmp);
1676 }
1677
1678 if (flags & MSG_OOB)
1679 {
1680 ((struct tcphdr *)buff)->urg = 1;
1681 ((struct tcphdr *)buff)->urg_ptr = ntohs(copy);
1682 }
1683 skb->len += tmp;
1684 memcpy_fromfs(buff+tmp, from, copy);
1685
1686 from += copy;
1687 copied += copy;
1688 len -= copy;
1689 skb->len += copy;
1690 skb->free = 0;
1691 sk->write_seq += copy;
1692
1693 if (send_tmp != NULL && sk->packets_out)
1694 {
1695 tcp_enqueue_partial(send_tmp, sk);
1696 continue;
1697 }
1698 tcp_send_skb(sk, skb);
1699 }
1700 sk->err = 0;
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713 if(sk->partial && ((!sk->packets_out)
1714
1715 || (sk->nonagle && before(sk->write_seq , sk->window_seq))
1716 ))
1717 tcp_send_partial(sk);
1718
1719 release_sock(sk);
1720 return(copied);
1721 }
1722
1723
1724
1725
1726
1727 static int tcp_sendto(struct sock *sk, unsigned char *from,
1728 int len, int nonblock, unsigned flags,
1729 struct sockaddr_in *addr, int addr_len)
1730 {
1731 if (flags & ~(MSG_OOB|MSG_DONTROUTE))
1732 return -EINVAL;
1733 if (sk->state == TCP_CLOSE)
1734 return -ENOTCONN;
1735 if (addr_len < sizeof(*addr))
1736 return -EINVAL;
1737 if (addr->sin_family && addr->sin_family != AF_INET)
1738 return -EINVAL;
1739 if (addr->sin_port != sk->dummy_th.dest)
1740 return -EISCONN;
1741 if (addr->sin_addr.s_addr != sk->daddr)
1742 return -EISCONN;
1743 return tcp_write(sk, from, len, nonblock, flags);
1744 }
1745
1746
1747
1748
1749
1750
1751
1752 static void tcp_read_wakeup(struct sock *sk)
1753 {
1754 int tmp;
1755 struct device *dev = NULL;
1756 struct tcphdr *t1;
1757 struct sk_buff *buff;
1758
1759 if (!sk->ack_backlog)
1760 return;
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773 buff = sk->prot->wmalloc(sk,MAX_ACK_SIZE,1, GFP_ATOMIC);
1774 if (buff == NULL)
1775 {
1776
1777 reset_xmit_timer(sk, TIME_WRITE, HZ);
1778 return;
1779 }
1780
1781 buff->len = sizeof(struct tcphdr);
1782 buff->sk = sk;
1783 buff->localroute = sk->localroute;
1784
1785
1786
1787
1788
1789 tmp = sk->prot->build_header(buff, sk->saddr, sk->daddr, &dev,
1790 IPPROTO_TCP, sk->opt, MAX_ACK_SIZE,sk->ip_tos,sk->ip_ttl);
1791 if (tmp < 0)
1792 {
1793 buff->free = 1;
1794 sk->prot->wfree(sk, buff->mem_addr, buff->mem_len);
1795 return;
1796 }
1797
1798 buff->len += tmp;
1799 t1 =(struct tcphdr *)(buff->data +tmp);
1800
1801 memcpy(t1,(void *) &sk->dummy_th, sizeof(*t1));
1802 t1->seq = htonl(sk->sent_seq);
1803 t1->ack = 1;
1804 t1->res1 = 0;
1805 t1->res2 = 0;
1806 t1->rst = 0;
1807 t1->urg = 0;
1808 t1->syn = 0;
1809 t1->psh = 0;
1810 sk->ack_backlog = 0;
1811 sk->bytes_rcv = 0;
1812 sk->window = tcp_select_window(sk);
1813 t1->window = ntohs(sk->window);
1814 t1->ack_seq = ntohl(sk->acked_seq);
1815 t1->doff = sizeof(*t1)/4;
1816 tcp_send_check(t1, sk->saddr, sk->daddr, sizeof(*t1), sk);
1817 sk->prot->queue_xmit(sk, dev, buff, 1);
1818 tcp_statistics.TcpOutSegs++;
1819 }
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829 static void cleanup_rbuf(struct sock *sk)
1830 {
1831 unsigned long flags;
1832 unsigned long left;
1833 struct sk_buff *skb;
1834 unsigned long rspace;
1835
1836 if(sk->debug)
1837 printk("cleaning rbuf for sk=%p\n", sk);
1838
1839 save_flags(flags);
1840 cli();
1841
1842 left = sk->prot->rspace(sk);
1843
1844
1845
1846
1847
1848
1849 while((skb=skb_peek(&sk->receive_queue)) != NULL)
1850 {
1851 if (!skb->used || skb->users)
1852 break;
1853 skb_unlink(skb);
1854 skb->sk = sk;
1855 kfree_skb(skb, FREE_READ);
1856 }
1857
1858 restore_flags(flags);
1859
1860
1861
1862
1863
1864
1865
1866
1867 if(sk->debug)
1868 printk("sk->rspace = %lu, was %lu\n", sk->prot->rspace(sk),
1869 left);
1870 if ((rspace=sk->prot->rspace(sk)) != left)
1871 {
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882 sk->ack_backlog++;
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892 if (rspace > (sk->window - sk->bytes_rcv + sk->mtu))
1893 {
1894
1895 tcp_read_wakeup(sk);
1896 }
1897 else
1898 {
1899
1900 int was_active = del_timer(&sk->retransmit_timer);
1901 if (!was_active || TCP_ACK_TIME < sk->timer.expires)
1902 {
1903 reset_xmit_timer(sk, TIME_WRITE, TCP_ACK_TIME);
1904 }
1905 else
1906 add_timer(&sk->retransmit_timer);
1907 }
1908 }
1909 }
1910
1911
1912
1913
1914
1915
1916
1917 static int tcp_read_urg(struct sock * sk, int nonblock,
1918 unsigned char *to, int len, unsigned flags)
1919 {
1920
1921
1922
1923 if (sk->urginline || !sk->urg_data || sk->urg_data == URG_READ)
1924 return -EINVAL;
1925
1926 if (sk->err)
1927 {
1928 int tmp = -sk->err;
1929 sk->err = 0;
1930 return tmp;
1931 }
1932
1933 if (sk->state == TCP_CLOSE || sk->done)
1934 {
1935 if (!sk->done) {
1936 sk->done = 1;
1937 return 0;
1938 }
1939 return -ENOTCONN;
1940 }
1941
1942 if (sk->shutdown & RCV_SHUTDOWN)
1943 {
1944 sk->done = 1;
1945 return 0;
1946 }
1947 sk->inuse = 1;
1948 if (sk->urg_data & URG_VALID)
1949 {
1950 char c = sk->urg_data;
1951 if (!(flags & MSG_PEEK))
1952 sk->urg_data = URG_READ;
1953 put_fs_byte(c, to);
1954 release_sock(sk);
1955 return 1;
1956 }
1957 release_sock(sk);
1958
1959
1960
1961
1962
1963
1964
1965
1966 return -EAGAIN;
1967 }
1968
1969
1970
1971
1972
1973
1974 static int tcp_read(struct sock *sk, unsigned char *to,
1975 int len, int nonblock, unsigned flags)
1976 {
1977 struct wait_queue wait = { current, NULL };
1978 int copied = 0;
1979 u32 peek_seq;
1980 volatile u32 *seq;
1981 unsigned long used;
1982
1983
1984
1985
1986
1987 if (sk->state == TCP_LISTEN)
1988 return -ENOTCONN;
1989
1990
1991
1992
1993
1994 if (flags & MSG_OOB)
1995 return tcp_read_urg(sk, nonblock, to, len, flags);
1996
1997
1998
1999
2000
2001
2002
2003 peek_seq = sk->copied_seq;
2004 seq = &sk->copied_seq;
2005 if (flags & MSG_PEEK)
2006 seq = &peek_seq;
2007
2008 add_wait_queue(sk->sleep, &wait);
2009 sk->inuse = 1;
2010 while (len > 0)
2011 {
2012 struct sk_buff * skb;
2013 unsigned long offset;
2014
2015
2016
2017
2018
2019 if (copied && sk->urg_data && sk->urg_seq == *seq)
2020 break;
2021
2022
2023
2024
2025
2026 current->state = TASK_INTERRUPTIBLE;
2027
2028 skb = skb_peek(&sk->receive_queue);
2029 do
2030 {
2031 if (!skb)
2032 break;
2033 if (before(*seq, skb->h.th->seq))
2034 break;
2035 offset = *seq - skb->h.th->seq;
2036 if (skb->h.th->syn)
2037 offset--;
2038 if (offset < skb->len)
2039 goto found_ok_skb;
2040 if (skb->h.th->fin)
2041 goto found_fin_ok;
2042 if (!(flags & MSG_PEEK))
2043 skb->used = 1;
2044 skb = skb->next;
2045 }
2046 while (skb != (struct sk_buff *)&sk->receive_queue);
2047
2048 if (copied)
2049 break;
2050
2051 if (sk->err)
2052 {
2053 copied = -sk->err;
2054 sk->err = 0;
2055 break;
2056 }
2057
2058 if (sk->state == TCP_CLOSE)
2059 {
2060 if (!sk->done)
2061 {
2062 sk->done = 1;
2063 break;
2064 }
2065 copied = -ENOTCONN;
2066 break;
2067 }
2068
2069 if (sk->shutdown & RCV_SHUTDOWN)
2070 {
2071 sk->done = 1;
2072 break;
2073 }
2074
2075 if (nonblock)
2076 {
2077 copied = -EAGAIN;
2078 break;
2079 }
2080
2081 cleanup_rbuf(sk);
2082 release_sock(sk);
2083 sk->socket->flags |= SO_WAITDATA;
2084 schedule();
2085 sk->socket->flags &= ~SO_WAITDATA;
2086 sk->inuse = 1;
2087
2088 if (current->signal & ~current->blocked)
2089 {
2090 copied = -ERESTARTSYS;
2091 break;
2092 }
2093 continue;
2094
2095 found_ok_skb:
2096
2097
2098
2099
2100
2101
2102
2103 skb->users++;
2104
2105
2106
2107
2108
2109 used = skb->len - offset;
2110 if (len < used)
2111 used = len;
2112
2113
2114
2115
2116 if (sk->urg_data)
2117 {
2118 unsigned long urg_offset = sk->urg_seq - *seq;
2119 if (urg_offset < used)
2120 {
2121 if (!urg_offset)
2122 {
2123 if (!sk->urginline)
2124 {
2125 ++*seq;
2126 offset++;
2127 used--;
2128 }
2129 }
2130 else
2131 used = urg_offset;
2132 }
2133 }
2134
2135
2136
2137
2138
2139
2140 *seq += used;
2141
2142
2143
2144
2145
2146
2147
2148 memcpy_tofs(to,((unsigned char *)skb->h.th) +
2149 skb->h.th->doff*4 + offset, used);
2150 copied += used;
2151 len -= used;
2152 to += used;
2153
2154
2155
2156
2157
2158
2159
2160 skb->users --;
2161
2162 if (after(sk->copied_seq,sk->urg_seq))
2163 sk->urg_data = 0;
2164 if (used + offset < skb->len)
2165 continue;
2166
2167
2168
2169
2170
2171 if (skb->h.th->fin)
2172 goto found_fin_ok;
2173 if (flags & MSG_PEEK)
2174 continue;
2175 skb->used = 1;
2176 continue;
2177
2178 found_fin_ok:
2179 ++*seq;
2180 if (flags & MSG_PEEK)
2181 break;
2182
2183
2184
2185
2186
2187 skb->used = 1;
2188 sk->shutdown |= RCV_SHUTDOWN;
2189 break;
2190
2191 }
2192 remove_wait_queue(sk->sleep, &wait);
2193 current->state = TASK_RUNNING;
2194
2195
2196 cleanup_rbuf(sk);
2197 release_sock(sk);
2198 return copied;
2199 }
2200
2201
2202
2203
2204
2205
2206
2207
2208 static int tcp_close_state(struct sock *sk, int dead)
2209 {
2210 int ns=TCP_CLOSE;
2211 int send_fin=0;
2212 switch(sk->state)
2213 {
2214 case TCP_SYN_SENT:
2215 break;
2216 case TCP_SYN_RECV:
2217 case TCP_ESTABLISHED:
2218 ns=TCP_FIN_WAIT1;
2219 send_fin=1;
2220 break;
2221 case TCP_FIN_WAIT1:
2222 case TCP_FIN_WAIT2:
2223 case TCP_CLOSING:
2224 ns=sk->state;
2225 break;
2226 case TCP_CLOSE:
2227 case TCP_LISTEN:
2228 break;
2229 case TCP_CLOSE_WAIT:
2230
2231 ns=TCP_LAST_ACK;
2232 send_fin=1;
2233 }
2234
2235 tcp_set_state(sk,ns);
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246 if(dead && ns==TCP_FIN_WAIT2)
2247 {
2248 int timer_active=del_timer(&sk->timer);
2249 if(timer_active)
2250 add_timer(&sk->timer);
2251 else
2252 reset_msl_timer(sk, TIME_CLOSE, TCP_FIN_TIMEOUT);
2253 }
2254
2255 return send_fin;
2256 }
2257
2258
2259
2260
2261
2262 static void tcp_send_fin(struct sock *sk)
2263 {
2264 struct proto *prot =(struct proto *)sk->prot;
2265 struct tcphdr *th =(struct tcphdr *)&sk->dummy_th;
2266 struct tcphdr *t1;
2267 struct sk_buff *buff;
2268 struct device *dev=NULL;
2269 int tmp;
2270
2271 release_sock(sk);
2272
2273 buff = prot->wmalloc(sk, MAX_RESET_SIZE,1 , GFP_KERNEL);
2274 sk->inuse = 1;
2275
2276 if (buff == NULL)
2277 {
2278
2279 printk("tcp_send_fin: Impossible malloc failure");
2280 return;
2281 }
2282
2283
2284
2285
2286
2287 buff->sk = sk;
2288 buff->len = sizeof(*t1);
2289 buff->localroute = sk->localroute;
2290 t1 =(struct tcphdr *) buff->data;
2291
2292
2293
2294
2295
2296 tmp = prot->build_header(buff,sk->saddr, sk->daddr, &dev,
2297 IPPROTO_TCP, sk->opt,
2298 sizeof(struct tcphdr),sk->ip_tos,sk->ip_ttl);
2299 if (tmp < 0)
2300 {
2301 int t;
2302
2303
2304
2305
2306
2307 buff->free = 1;
2308 prot->wfree(sk,buff->mem_addr, buff->mem_len);
2309 sk->write_seq++;
2310 t=del_timer(&sk->timer);
2311 if(t)
2312 add_timer(&sk->timer);
2313 else
2314 reset_msl_timer(sk, TIME_CLOSE, TCP_TIMEWAIT_LEN);
2315 return;
2316 }
2317
2318
2319
2320
2321
2322
2323 t1 =(struct tcphdr *)((char *)t1 +tmp);
2324 buff->len += tmp;
2325 buff->dev = dev;
2326 memcpy(t1, th, sizeof(*t1));
2327 t1->seq = ntohl(sk->write_seq);
2328 sk->write_seq++;
2329 buff->h.seq = sk->write_seq;
2330 t1->ack = 1;
2331 t1->ack_seq = ntohl(sk->acked_seq);
2332 t1->window = ntohs(sk->window=tcp_select_window(sk));
2333 t1->fin = 1;
2334 t1->rst = 0;
2335 t1->doff = sizeof(*t1)/4;
2336 tcp_send_check(t1, sk->saddr, sk->daddr, sizeof(*t1), sk);
2337
2338
2339
2340
2341
2342
2343 if (skb_peek(&sk->write_queue) != NULL)
2344 {
2345 buff->free = 0;
2346 if (buff->next != NULL)
2347 {
2348 printk("tcp_send_fin: next != NULL\n");
2349 skb_unlink(buff);
2350 }
2351 skb_queue_tail(&sk->write_queue, buff);
2352 }
2353 else
2354 {
2355 sk->sent_seq = sk->write_seq;
2356 sk->prot->queue_xmit(sk, dev, buff, 0);
2357 reset_xmit_timer(sk, TIME_WRITE, sk->rto);
2358 }
2359 }
2360
2361
2362
2363
2364
2365
2366 void tcp_shutdown(struct sock *sk, int how)
2367 {
2368
2369
2370
2371
2372
2373
2374 if (!(how & SEND_SHUTDOWN))
2375 return;
2376
2377
2378
2379
2380
2381 if (sk->state == TCP_FIN_WAIT1 ||
2382 sk->state == TCP_FIN_WAIT2 ||
2383 sk->state == TCP_CLOSING ||
2384 sk->state == TCP_LAST_ACK ||
2385 sk->state == TCP_TIME_WAIT ||
2386 sk->state == TCP_CLOSE ||
2387 sk->state == TCP_LISTEN
2388 )
2389 {
2390 return;
2391 }
2392 sk->inuse = 1;
2393
2394
2395
2396
2397
2398 sk->shutdown |= SEND_SHUTDOWN;
2399
2400
2401
2402
2403
2404 if (sk->partial)
2405 tcp_send_partial(sk);
2406
2407
2408
2409
2410
2411 if(tcp_close_state(sk,0))
2412 tcp_send_fin(sk);
2413
2414 release_sock(sk);
2415 }
2416
2417
2418 static int
2419 tcp_recvfrom(struct sock *sk, unsigned char *to,
2420 int to_len, int nonblock, unsigned flags,
2421 struct sockaddr_in *addr, int *addr_len)
2422 {
2423 int result;
2424
2425
2426
2427
2428
2429
2430
2431 if(addr_len)
2432 *addr_len = sizeof(*addr);
2433 result=tcp_read(sk, to, to_len, nonblock, flags);
2434
2435 if (result < 0)
2436 return(result);
2437
2438 if(addr)
2439 {
2440 addr->sin_family = AF_INET;
2441 addr->sin_port = sk->dummy_th.dest;
2442 addr->sin_addr.s_addr = sk->daddr;
2443 }
2444 return(result);
2445 }
2446
2447
2448
2449
2450
2451
2452 static void tcp_reset(unsigned long saddr, unsigned long daddr, struct tcphdr *th,
2453 struct proto *prot, struct options *opt, struct device *dev, int tos, int ttl)
2454 {
2455 struct sk_buff *buff;
2456 struct tcphdr *t1;
2457 int tmp;
2458 struct device *ndev=NULL;
2459
2460
2461
2462
2463
2464 if(th->rst)
2465 return;
2466
2467
2468
2469
2470
2471
2472 buff = prot->wmalloc(NULL, MAX_RESET_SIZE, 1, GFP_ATOMIC);
2473 if (buff == NULL)
2474 return;
2475
2476 buff->len = sizeof(*t1);
2477 buff->sk = NULL;
2478 buff->dev = dev;
2479 buff->localroute = 0;
2480
2481 t1 =(struct tcphdr *) buff->data;
2482
2483
2484
2485
2486
2487 tmp = prot->build_header(buff, saddr, daddr, &ndev, IPPROTO_TCP, opt,
2488 sizeof(struct tcphdr),tos,ttl);
2489 if (tmp < 0)
2490 {
2491 buff->free = 1;
2492 prot->wfree(NULL, buff->mem_addr, buff->mem_len);
2493 return;
2494 }
2495
2496 t1 =(struct tcphdr *)((char *)t1 +tmp);
2497 buff->len += tmp;
2498 memcpy(t1, th, sizeof(*t1));
2499
2500
2501
2502
2503
2504 t1->dest = th->source;
2505 t1->source = th->dest;
2506 t1->rst = 1;
2507 t1->window = 0;
2508
2509 if(th->ack)
2510 {
2511 t1->ack = 0;
2512 t1->seq = th->ack_seq;
2513 t1->ack_seq = 0;
2514 }
2515 else
2516 {
2517 t1->ack = 1;
2518 if(!th->syn)
2519 t1->ack_seq=htonl(th->seq);
2520 else
2521 t1->ack_seq=htonl(th->seq+1);
2522 t1->seq=0;
2523 }
2524
2525 t1->syn = 0;
2526 t1->urg = 0;
2527 t1->fin = 0;
2528 t1->psh = 0;
2529 t1->doff = sizeof(*t1)/4;
2530 tcp_send_check(t1, saddr, daddr, sizeof(*t1), NULL);
2531 prot->queue_xmit(NULL, ndev, buff, 1);
2532 tcp_statistics.TcpOutSegs++;
2533 }
2534
2535
2536
2537
2538
2539
2540
2541
2542
2543
2544
2545
2546
2547 static void tcp_options(struct sock *sk, struct tcphdr *th)
2548 {
2549 unsigned char *ptr;
2550 int length=(th->doff*4)-sizeof(struct tcphdr);
2551 int mss_seen = 0;
2552
2553 ptr = (unsigned char *)(th + 1);
2554
2555 while(length>0)
2556 {
2557 int opcode=*ptr++;
2558 int opsize=*ptr++;
2559 switch(opcode)
2560 {
2561 case TCPOPT_EOL:
2562 return;
2563 case TCPOPT_NOP:
2564 length--;
2565 ptr--;
2566 continue;
2567
2568 default:
2569 if(opsize<=2)
2570 return;
2571 switch(opcode)
2572 {
2573 case TCPOPT_MSS:
2574 if(opsize==4 && th->syn)
2575 {
2576 sk->mtu=min(sk->mtu,ntohs(*(unsigned short *)ptr));
2577 mss_seen = 1;
2578 }
2579 break;
2580
2581 }
2582 ptr+=opsize-2;
2583 length-=opsize;
2584 }
2585 }
2586 if (th->syn)
2587 {
2588 if (! mss_seen)
2589 sk->mtu=min(sk->mtu, 536);
2590 }
2591 #ifdef CONFIG_INET_PCTCP
2592 sk->mss = min(sk->max_window >> 1, sk->mtu);
2593 #else
2594 sk->mss = min(sk->max_window, sk->mtu);
2595 #endif
2596 }
2597
2598 static inline unsigned long default_mask(unsigned long dst)
2599 {
2600 dst = ntohl(dst);
2601 if (IN_CLASSA(dst))
2602 return htonl(IN_CLASSA_NET);
2603 if (IN_CLASSB(dst))
2604 return htonl(IN_CLASSB_NET);
2605 return htonl(IN_CLASSC_NET);
2606 }
2607
2608
2609
2610
2611
2612
2613
2614
2615
2616
2617 extern inline u32 tcp_init_seq(void)
2618 {
2619 struct timeval tv;
2620 do_gettimeofday(&tv);
2621 return tv.tv_usec+tv.tv_sec*1000000;
2622 }
2623
2624
2625
2626
2627
2628
2629
2630
2631
2632 static void tcp_conn_request(struct sock *sk, struct sk_buff *skb,
2633 unsigned long daddr, unsigned long saddr,
2634 struct options *opt, struct device *dev, u32 seq)
2635 {
2636 struct sk_buff *buff;
2637 struct tcphdr *t1;
2638 unsigned char *ptr;
2639 struct sock *newsk;
2640 struct tcphdr *th;
2641 struct device *ndev=NULL;
2642 int tmp;
2643 struct rtable *rt;
2644
2645 th = skb->h.th;
2646
2647
2648 if (!sk->dead)
2649 {
2650 sk->data_ready(sk,0);
2651 }
2652 else
2653 {
2654 if(sk->debug)
2655 printk("Reset on %p: Connect on dead socket.\n",sk);
2656 tcp_reset(daddr, saddr, th, sk->prot, opt, dev, sk->ip_tos,sk->ip_ttl);
2657 tcp_statistics.TcpAttemptFails++;
2658 kfree_skb(skb, FREE_READ);
2659 return;
2660 }
2661
2662
2663
2664
2665
2666
2667 if (sk->ack_backlog >= sk->max_ack_backlog)
2668 {
2669 tcp_statistics.TcpAttemptFails++;
2670 kfree_skb(skb, FREE_READ);
2671 return;
2672 }
2673
2674
2675
2676
2677
2678
2679
2680
2681
2682 newsk = (struct sock *) kmalloc(sizeof(struct sock), GFP_ATOMIC);
2683 if (newsk == NULL)
2684 {
2685
2686 tcp_statistics.TcpAttemptFails++;
2687 kfree_skb(skb, FREE_READ);
2688 return;
2689 }
2690
2691 memcpy(newsk, sk, sizeof(*newsk));
2692 skb_queue_head_init(&newsk->write_queue);
2693 skb_queue_head_init(&newsk->receive_queue);
2694 newsk->send_head = NULL;
2695 newsk->send_tail = NULL;
2696 skb_queue_head_init(&newsk->back_log);
2697 newsk->rtt = 0;
2698 newsk->rto = TCP_TIMEOUT_INIT;
2699 newsk->mdev = 0;
2700 newsk->max_window = 0;
2701 newsk->cong_window = 1;
2702 newsk->cong_count = 0;
2703 newsk->ssthresh = 0;
2704 newsk->backoff = 0;
2705 newsk->blog = 0;
2706 newsk->intr = 0;
2707 newsk->proc = 0;
2708 newsk->done = 0;
2709 newsk->partial = NULL;
2710 newsk->pair = NULL;
2711 newsk->wmem_alloc = 0;
2712 newsk->rmem_alloc = 0;
2713 newsk->localroute = sk->localroute;
2714
2715 newsk->max_unacked = MAX_WINDOW - TCP_WINDOW_DIFF;
2716
2717 newsk->err = 0;
2718 newsk->shutdown = 0;
2719 newsk->ack_backlog = 0;
2720 newsk->acked_seq = skb->h.th->seq+1;
2721 newsk->copied_seq = skb->h.th->seq+1;
2722 newsk->fin_seq = skb->h.th->seq;
2723 newsk->state = TCP_SYN_RECV;
2724 newsk->timeout = 0;
2725 newsk->ip_xmit_timeout = 0;
2726 newsk->write_seq = seq;
2727 newsk->window_seq = newsk->write_seq;
2728 newsk->rcv_ack_seq = newsk->write_seq;
2729 newsk->urg_data = 0;
2730 newsk->retransmits = 0;
2731 newsk->linger=0;
2732 newsk->destroy = 0;
2733 init_timer(&newsk->timer);
2734 newsk->timer.data = (unsigned long)newsk;
2735 newsk->timer.function = &net_timer;
2736 init_timer(&newsk->retransmit_timer);
2737 newsk->retransmit_timer.data = (unsigned long)newsk;
2738 newsk->retransmit_timer.function=&retransmit_timer;
2739 newsk->dummy_th.source = skb->h.th->dest;
2740 newsk->dummy_th.dest = skb->h.th->source;
2741
2742
2743
2744
2745
2746 newsk->daddr = saddr;
2747 newsk->saddr = daddr;
2748
2749 put_sock(newsk->num,newsk);
2750 newsk->dummy_th.res1 = 0;
2751 newsk->dummy_th.doff = 6;
2752 newsk->dummy_th.fin = 0;
2753 newsk->dummy_th.syn = 0;
2754 newsk->dummy_th.rst = 0;
2755 newsk->dummy_th.psh = 0;
2756 newsk->dummy_th.ack = 0;
2757 newsk->dummy_th.urg = 0;
2758 newsk->dummy_th.res2 = 0;
2759 newsk->acked_seq = skb->h.th->seq + 1;
2760 newsk->copied_seq = skb->h.th->seq + 1;
2761 newsk->socket = NULL;
2762
2763
2764
2765
2766
2767 newsk->ip_ttl=sk->ip_ttl;
2768 newsk->ip_tos=skb->ip_hdr->tos;
2769
2770
2771
2772
2773
2774
2775
2776
2777
2778 rt=ip_rt_route(saddr, NULL,NULL);
2779
2780 if(rt!=NULL && (rt->rt_flags&RTF_WINDOW))
2781 newsk->window_clamp = rt->rt_window;
2782 else
2783 newsk->window_clamp = 0;
2784
2785 if (sk->user_mss)
2786 newsk->mtu = sk->user_mss;
2787 else if(rt!=NULL && (rt->rt_flags&RTF_MSS))
2788 newsk->mtu = rt->rt_mss - HEADER_SIZE;
2789 else
2790 {
2791 #ifdef CONFIG_INET_SNARL
2792 if ((saddr ^ daddr) & default_mask(saddr))
2793 #else
2794 if ((saddr ^ daddr) & dev->pa_mask)
2795 #endif
2796 newsk->mtu = 576 - HEADER_SIZE;
2797 else
2798 newsk->mtu = MAX_WINDOW;
2799 }
2800
2801
2802
2803
2804
2805 newsk->mtu = min(newsk->mtu, dev->mtu - HEADER_SIZE);
2806
2807
2808
2809
2810
2811 tcp_options(newsk,skb->h.th);
2812
2813 tcp_cache_zap();
2814
2815 buff = newsk->prot->wmalloc(newsk, MAX_SYN_SIZE, 1, GFP_ATOMIC);
2816 if (buff == NULL)
2817 {
2818 sk->err = ENOMEM;
2819 newsk->dead = 1;
2820 newsk->state = TCP_CLOSE;
2821
2822 release_sock(newsk);
2823 kfree_skb(skb, FREE_READ);
2824 tcp_statistics.TcpAttemptFails++;
2825 return;
2826 }
2827
2828 buff->len = sizeof(struct tcphdr)+4;
2829 buff->sk = newsk;
2830 buff->localroute = newsk->localroute;
2831
2832 t1 =(struct tcphdr *) buff->data;
2833
2834
2835
2836
2837
2838 tmp = sk->prot->build_header(buff, newsk->saddr, newsk->daddr, &ndev,
2839 IPPROTO_TCP, NULL, MAX_SYN_SIZE,sk->ip_tos,sk->ip_ttl);
2840
2841
2842
2843
2844
2845 if (tmp < 0)
2846 {
2847 sk->err = tmp;
2848 buff->free = 1;
2849 kfree_skb(buff,FREE_WRITE);
2850 newsk->dead = 1;
2851 newsk->state = TCP_CLOSE;
2852 release_sock(newsk);
2853 skb->sk = sk;
2854 kfree_skb(skb, FREE_READ);
2855 tcp_statistics.TcpAttemptFails++;
2856 return;
2857 }
2858
2859 buff->len += tmp;
2860 t1 =(struct tcphdr *)((char *)t1 +tmp);
2861
2862 memcpy(t1, skb->h.th, sizeof(*t1));
2863 buff->h.seq = newsk->write_seq;
2864
2865
2866
2867 t1->dest = skb->h.th->source;
2868 t1->source = newsk->dummy_th.source;
2869 t1->seq = ntohl(newsk->write_seq++);
2870 t1->ack = 1;
2871 newsk->window = tcp_select_window(newsk);
2872 newsk->sent_seq = newsk->write_seq;
2873 t1->window = ntohs(newsk->window);
2874 t1->res1 = 0;
2875 t1->res2 = 0;
2876 t1->rst = 0;
2877 t1->urg = 0;
2878 t1->psh = 0;
2879 t1->syn = 1;
2880 t1->ack_seq = ntohl(skb->h.th->seq+1);
2881 t1->doff = sizeof(*t1)/4+1;
2882 ptr =(unsigned char *)(t1+1);
2883 ptr[0] = 2;
2884 ptr[1] = 4;
2885 ptr[2] = ((newsk->mtu) >> 8) & 0xff;
2886 ptr[3] =(newsk->mtu) & 0xff;
2887
2888 tcp_send_check(t1, daddr, saddr, sizeof(*t1)+4, newsk);
2889 newsk->prot->queue_xmit(newsk, ndev, buff, 0);
2890 reset_xmit_timer(newsk, TIME_WRITE , TCP_TIMEOUT_INIT);
2891 skb->sk = newsk;
2892
2893
2894
2895
2896
2897 sk->rmem_alloc -= skb->mem_len;
2898 newsk->rmem_alloc += skb->mem_len;
2899
2900 skb_queue_tail(&sk->receive_queue,skb);
2901 sk->ack_backlog++;
2902 release_sock(newsk);
2903 tcp_statistics.TcpOutSegs++;
2904 }
2905
2906
2907 static void tcp_close(struct sock *sk, int timeout)
2908 {
2909
2910
2911
2912
2913
2914 sk->inuse = 1;
2915
2916 if(th_cache_sk==sk)
2917 tcp_cache_zap();
2918 if(sk->state == TCP_LISTEN)
2919 {
2920
2921 tcp_set_state(sk, TCP_CLOSE);
2922 tcp_close_pending(sk);
2923 release_sock(sk);
2924 return;
2925 }
2926
2927 sk->keepopen = 1;
2928 sk->shutdown = SHUTDOWN_MASK;
2929
2930 if (!sk->dead)
2931 sk->state_change(sk);
2932
2933 if (timeout == 0)
2934 {
2935 struct sk_buff *skb;
2936
2937
2938
2939
2940
2941
2942
2943 while((skb=skb_dequeue(&sk->receive_queue))!=NULL)
2944 kfree_skb(skb, FREE_READ);
2945
2946
2947
2948
2949 if (sk->partial)
2950 tcp_send_partial(sk);
2951 }
2952
2953
2954
2955
2956
2957
2958
2959 if(timeout)
2960 {
2961 tcp_set_state(sk, TCP_CLOSE);
2962 }
2963 else
2964 {
2965 if(tcp_close_state(sk,1)==1)
2966 {
2967 tcp_send_fin(sk);
2968 }
2969 }
2970 release_sock(sk);
2971 }
2972
2973
2974
2975
2976
2977
2978
2979
2980 static void tcp_write_xmit(struct sock *sk)
2981 {
2982 struct sk_buff *skb;
2983
2984
2985
2986
2987
2988
2989 if(sk->zapped)
2990 return;
2991
2992
2993
2994
2995
2996
2997
2998
2999
3000 while((skb = skb_peek(&sk->write_queue)) != NULL &&
3001 before(skb->h.seq, sk->window_seq + 1) &&
3002 (sk->retransmits == 0 ||
3003 sk->ip_xmit_timeout != TIME_WRITE ||
3004 before(skb->h.seq, sk->rcv_ack_seq + 1))
3005 && sk->packets_out < sk->cong_window)
3006 {
3007 IS_SKB(skb);
3008 skb_unlink(skb);
3009
3010
3011
3012
3013
3014 if (before(skb->h.seq, sk->rcv_ack_seq +1))
3015 {
3016
3017
3018
3019
3020
3021 sk->retransmits = 0;
3022 kfree_skb(skb, FREE_WRITE);
3023 if (!sk->dead)
3024 sk->write_space(sk);
3025 }
3026 else
3027 {
3028 struct tcphdr *th;
3029 struct iphdr *iph;
3030 int size;
3031
3032
3033
3034
3035
3036
3037
3038 iph = (struct iphdr *)(skb->data +
3039 skb->dev->hard_header_len);
3040 th = (struct tcphdr *)(((char *)iph) +(iph->ihl << 2));
3041 size = skb->len - (((unsigned char *) th) - skb->data);
3042
3043 th->ack_seq = ntohl(sk->acked_seq);
3044 th->window = ntohs(tcp_select_window(sk));
3045
3046 tcp_send_check(th, sk->saddr, sk->daddr, size, sk);
3047
3048 sk->sent_seq = skb->h.seq;
3049
3050
3051
3052
3053
3054 sk->prot->queue_xmit(sk, skb->dev, skb, skb->free);
3055
3056
3057
3058
3059
3060 reset_xmit_timer(sk, TIME_WRITE, sk->rto);
3061 }
3062 }
3063 }
3064
3065
3066
3067
3068
3069
3070 extern __inline__ int tcp_ack(struct sock *sk, struct tcphdr *th, unsigned long saddr, int len)
3071 {
3072 u32 ack;
3073 int flag = 0;
3074
3075
3076
3077
3078
3079
3080
3081
3082 if(sk->zapped)
3083 return(1);
3084
3085
3086
3087
3088
3089 ack = ntohl(th->ack_seq);
3090
3091 if (ntohs(th->window) > sk->max_window)
3092 {
3093 sk->max_window = ntohs(th->window);
3094 #ifdef CONFIG_INET_PCTCP
3095
3096
3097 sk->mss = min(sk->max_window>>1, sk->mtu);
3098 #else
3099 sk->mss = min(sk->max_window, sk->mtu);
3100 #endif
3101 }
3102
3103
3104
3105
3106
3107
3108 if (sk->retransmits && sk->ip_xmit_timeout == TIME_KEEPOPEN)
3109 sk->retransmits = 0;
3110
3111
3112
3113
3114
3115
3116 if (after(ack, sk->sent_seq) || before(ack, sk->rcv_ack_seq))
3117 {
3118 if(sk->debug)
3119 printk("Ack ignored %u %u\n",ack,sk->sent_seq);
3120
3121
3122
3123
3124
3125 if (after(ack, sk->sent_seq))
3126 {
3127 return(0);
3128 }
3129
3130
3131
3132
3133
3134 if (sk->keepopen)
3135 {
3136 if(sk->ip_xmit_timeout==TIME_KEEPOPEN)
3137 reset_xmit_timer(sk, TIME_KEEPOPEN, TCP_TIMEOUT_LEN);
3138 }
3139 return(1);
3140 }
3141
3142
3143
3144
3145
3146 if (len != th->doff*4)
3147 flag |= 1;
3148
3149
3150
3151
3152
3153 if (after(sk->window_seq, ack+ntohs(th->window)))
3154 {
3155
3156
3157
3158
3159
3160
3161
3162 struct sk_buff *skb;
3163 struct sk_buff *skb2;
3164 struct sk_buff *wskb = NULL;
3165
3166 skb2 = sk->send_head;
3167 sk->send_head = NULL;
3168 sk->send_tail = NULL;
3169
3170
3171
3172
3173
3174
3175 flag |= 4;
3176
3177 sk->window_seq = ack + ntohs(th->window);
3178 cli();
3179 while (skb2 != NULL)
3180 {
3181 skb = skb2;
3182 skb2 = skb->link3;
3183 skb->link3 = NULL;
3184 if (after(skb->h.seq, sk->window_seq))
3185 {
3186 if (sk->packets_out > 0)
3187 sk->packets_out--;
3188
3189 if (skb->next != NULL)
3190 {
3191 skb_unlink(skb);
3192 }
3193
3194 if (wskb == NULL)
3195 skb_queue_head(&sk->write_queue,skb);
3196 else
3197 skb_append(wskb,skb);
3198 wskb = skb;
3199 }
3200 else
3201 {
3202 if (sk->send_head == NULL)
3203 {
3204 sk->send_head = skb;
3205 sk->send_tail = skb;
3206 }
3207 else
3208 {
3209 sk->send_tail->link3 = skb;
3210 sk->send_tail = skb;
3211 }
3212 skb->link3 = NULL;
3213 }
3214 }
3215 sti();
3216 }
3217
3218
3219
3220
3221
3222 if (sk->send_tail == NULL || sk->send_head == NULL)
3223 {
3224 sk->send_head = NULL;
3225 sk->send_tail = NULL;
3226 sk->packets_out= 0;
3227 }
3228
3229
3230
3231
3232
3233 sk->window_seq = ack + ntohs(th->window);
3234
3235
3236
3237
3238
3239 if (sk->ip_xmit_timeout == TIME_WRITE &&
3240 sk->cong_window < 2048 && after(ack, sk->rcv_ack_seq))
3241 {
3242
3243
3244
3245
3246
3247
3248
3249
3250
3251 if (sk->cong_window < sk->ssthresh)
3252
3253
3254
3255 sk->cong_window++;
3256 else
3257 {
3258
3259
3260
3261
3262 if (sk->cong_count >= sk->cong_window)
3263 {
3264 sk->cong_window++;
3265 sk->cong_count = 0;
3266 }
3267 else
3268 sk->cong_count++;
3269 }
3270 }
3271
3272
3273
3274
3275
3276 sk->rcv_ack_seq = ack;
3277
3278
3279
3280
3281
3282
3283
3284 if (sk->ip_xmit_timeout == TIME_PROBE0)
3285 {
3286 sk->retransmits = 0;
3287
3288
3289
3290
3291
3292 if (skb_peek(&sk->write_queue) != NULL &&
3293 ! before (sk->window_seq, sk->write_queue.next->h.seq))
3294 {
3295 sk->backoff = 0;
3296
3297
3298
3299
3300
3301 sk->rto = ((sk->rtt >> 2) + sk->mdev) >> 1;
3302 if (sk->rto > 120*HZ)
3303 sk->rto = 120*HZ;
3304 if (sk->rto < 20)
3305
3306
3307 sk->rto = 20;
3308 }
3309 }
3310
3311
3312
3313
3314
3315 while(sk->send_head != NULL)
3316 {
3317
3318 if (sk->send_head->link3 &&
3319 after(sk->send_head->h.seq, sk->send_head->link3->h.seq))
3320 printk("INET: tcp.c: *** bug send_list out of order.\n");
3321
3322
3323
3324
3325
3326
3327 if (before(sk->send_head->h.seq, ack+1))
3328 {
3329 struct sk_buff *oskb;
3330 if (sk->retransmits)
3331 {
3332
3333
3334
3335 flag |= 2;
3336
3337
3338
3339
3340
3341
3342
3343
3344
3345 if (sk->send_head->link3)
3346 sk->retransmits = 1;
3347 else
3348 sk->retransmits = 0;
3349 }
3350
3351
3352
3353
3354
3355
3356
3357
3358
3359
3360
3361
3362
3363
3364
3365
3366 if (sk->packets_out > 0)
3367 sk->packets_out --;
3368
3369
3370
3371 if (!sk->dead)
3372 sk->write_space(sk);
3373 oskb = sk->send_head;
3374
3375 if (!(flag&2))
3376 {
3377 long m;
3378
3379
3380
3381
3382
3383
3384
3385
3386
3387 m = jiffies - oskb->when;
3388 if(m<=0)
3389 m=1;
3390 m -= (sk->rtt >> 3);
3391 sk->rtt += m;
3392 if (m < 0)
3393 m = -m;
3394 m -= (sk->mdev >> 2);
3395 sk->mdev += m;
3396
3397
3398
3399
3400
3401 sk->rto = ((sk->rtt >> 2) + sk->mdev) >> 1;
3402 if (sk->rto > 120*HZ)
3403 sk->rto = 120*HZ;
3404 if (sk->rto < 20)
3405 sk->rto = 20;
3406 sk->backoff = 0;
3407 }
3408 flag |= (2|4);
3409
3410 cli();
3411 oskb = sk->send_head;
3412 IS_SKB(oskb);
3413 sk->send_head = oskb->link3;
3414 if (sk->send_head == NULL)
3415 {
3416 sk->send_tail = NULL;
3417 }
3418
3419
3420
3421
3422
3423 if (oskb->next)
3424 skb_unlink(oskb);
3425 sti();
3426 kfree_skb(oskb, FREE_WRITE);
3427 if (!sk->dead)
3428 sk->write_space(sk);
3429 }
3430 else
3431 {
3432 break;
3433 }
3434 }
3435
3436
3437
3438
3439
3440
3441
3442
3443
3444
3445
3446
3447
3448
3449 if (skb_peek(&sk->write_queue) != NULL)
3450 {
3451 if (after (sk->window_seq+1, sk->write_queue.next->h.seq) &&
3452 (sk->retransmits == 0 ||
3453 sk->ip_xmit_timeout != TIME_WRITE ||
3454 before(sk->write_queue.next->h.seq, sk->rcv_ack_seq + 1))
3455 && sk->packets_out < sk->cong_window)
3456 {
3457
3458
3459
3460 flag |= 1;
3461 tcp_write_xmit(sk);
3462 }
3463 else if (before(sk->window_seq, sk->write_queue.next->h.seq) &&
3464 sk->send_head == NULL &&
3465 sk->ack_backlog == 0 &&
3466 sk->state != TCP_TIME_WAIT)
3467 {
3468
3469
3470
3471 reset_xmit_timer(sk, TIME_PROBE0, sk->rto);
3472 }
3473 }
3474 else
3475 {
3476
3477
3478
3479
3480
3481
3482
3483
3484
3485
3486
3487
3488
3489 switch(sk->state) {
3490 case TCP_TIME_WAIT:
3491
3492
3493
3494
3495 reset_msl_timer(sk, TIME_CLOSE, TCP_TIMEWAIT_LEN);
3496 break;
3497 case TCP_CLOSE:
3498
3499
3500
3501 break;
3502 default:
3503
3504
3505
3506
3507 if (sk->send_head || skb_peek(&sk->write_queue) != NULL || sk->ack_backlog) {
3508 reset_xmit_timer(sk, TIME_WRITE, sk->rto);
3509 } else if (sk->keepopen) {
3510 reset_xmit_timer(sk, TIME_KEEPOPEN, TCP_TIMEOUT_LEN);
3511 } else {
3512 del_timer(&sk->retransmit_timer);
3513 sk->ip_xmit_timeout = 0;
3514 }
3515 break;
3516 }
3517 }
3518
3519
3520
3521
3522
3523
3524 if (sk->packets_out == 0 && sk->partial != NULL &&
3525 skb_peek(&sk->write_queue) == NULL && sk->send_head == NULL)
3526 {
3527 flag |= 1;
3528 tcp_send_partial(sk);
3529 }
3530
3531
3532
3533
3534
3535
3536
3537
3538
3539 if (sk->state == TCP_LAST_ACK)
3540 {
3541 if (!sk->dead)
3542 sk->state_change(sk);
3543 if(sk->debug)
3544 printk("rcv_ack_seq: %X==%X, acked_seq: %X==%X\n",
3545 sk->rcv_ack_seq,sk->write_seq,sk->acked_seq,sk->fin_seq);
3546 if (sk->rcv_ack_seq == sk->write_seq )
3547 {
3548 flag |= 1;
3549 tcp_set_state(sk,TCP_CLOSE);
3550 sk->shutdown = SHUTDOWN_MASK;
3551 }
3552 }
3553
3554
3555
3556
3557
3558
3559
3560
3561 if (sk->state == TCP_FIN_WAIT1)
3562 {
3563
3564 if (!sk->dead)
3565 sk->state_change(sk);
3566 if (sk->rcv_ack_seq == sk->write_seq)
3567 {
3568 flag |= 1;
3569 sk->shutdown |= SEND_SHUTDOWN;
3570 tcp_set_state(sk, TCP_FIN_WAIT2);
3571 }
3572 }
3573
3574
3575
3576
3577
3578
3579
3580 if (sk->state == TCP_CLOSING)
3581 {
3582
3583 if (!sk->dead)
3584 sk->state_change(sk);
3585 if (sk->rcv_ack_seq == sk->write_seq)
3586 {
3587 flag |= 1;
3588 tcp_time_wait(sk);
3589 }
3590 }
3591
3592
3593
3594
3595
3596 if(sk->state==TCP_SYN_RECV)
3597 {
3598 tcp_set_state(sk, TCP_ESTABLISHED);
3599 tcp_options(sk,th);
3600 sk->dummy_th.dest=th->source;
3601 sk->copied_seq = sk->acked_seq;
3602 if(!sk->dead)
3603 sk->state_change(sk);
3604 if(sk->max_window==0)
3605 {
3606 sk->max_window=32;
3607 sk->mss=min(sk->max_window,sk->mtu);
3608 }
3609 }
3610
3611
3612
3613
3614
3615
3616
3617
3618
3619
3620
3621
3622
3623
3624
3625
3626
3627
3628
3629
3630
3631
3632
3633
3634
3635
3636
3637
3638
3639
3640 if (((!flag) || (flag&4)) && sk->send_head != NULL &&
3641 (((flag&2) && sk->retransmits) ||
3642 (sk->send_head->when + sk->rto < jiffies)))
3643 {
3644 if(sk->send_head->when + sk->rto < jiffies)
3645 tcp_retransmit(sk,0);
3646 else
3647 {
3648 tcp_do_retransmit(sk, 1);
3649 reset_xmit_timer(sk, TIME_WRITE, sk->rto);
3650 }
3651 }
3652
3653 return(1);
3654 }
3655
3656
3657
3658
3659
3660
3661
3662
3663
3664
3665
3666
3667
3668
3669
3670
3671
3672
3673 static int tcp_fin(struct sk_buff *skb, struct sock *sk, struct tcphdr *th)
3674 {
3675 sk->fin_seq = th->seq + skb->len + th->syn + th->fin;
3676
3677 if (!sk->dead)
3678 {
3679 sk->state_change(sk);
3680 sock_wake_async(sk->socket, 1);
3681 }
3682
3683 switch(sk->state)
3684 {
3685 case TCP_SYN_RECV:
3686 case TCP_SYN_SENT:
3687 case TCP_ESTABLISHED:
3688
3689
3690
3691
3692 tcp_set_state(sk,TCP_CLOSE_WAIT);
3693 if (th->rst)
3694 sk->shutdown = SHUTDOWN_MASK;
3695 break;
3696
3697 case TCP_CLOSE_WAIT:
3698 case TCP_CLOSING:
3699
3700
3701
3702
3703 break;
3704 case TCP_TIME_WAIT:
3705
3706
3707
3708
3709 reset_msl_timer(sk, TIME_CLOSE, TCP_TIMEWAIT_LEN);
3710 return(0);
3711 case TCP_FIN_WAIT1:
3712
3713
3714
3715
3716
3717
3718
3719
3720
3721
3722
3723
3724 if(sk->ip_xmit_timeout != TIME_WRITE)
3725 reset_xmit_timer(sk, TIME_WRITE, sk->rto);
3726 tcp_set_state(sk,TCP_CLOSING);
3727 break;
3728 case TCP_FIN_WAIT2:
3729
3730
3731
3732 reset_msl_timer(sk, TIME_CLOSE, TCP_TIMEWAIT_LEN);
3733 sk->shutdown|=SHUTDOWN_MASK;
3734 tcp_set_state(sk,TCP_TIME_WAIT);
3735 break;
3736 case TCP_CLOSE:
3737
3738
3739
3740 break;
3741 default:
3742 tcp_set_state(sk,TCP_LAST_ACK);
3743
3744
3745 reset_msl_timer(sk, TIME_CLOSE, TCP_TIMEWAIT_LEN);
3746 return(0);
3747 }
3748
3749 return(0);
3750 }
3751
3752
3753
3754
3755
3756
3757
3758
3759
3760 extern __inline__ int tcp_data(struct sk_buff *skb, struct sock *sk,
3761 unsigned long saddr, unsigned short len)
3762 {
3763 struct sk_buff *skb1, *skb2;
3764 struct tcphdr *th;
3765 int dup_dumped=0;
3766 u32 new_seq, shut_seq;
3767
3768 th = skb->h.th;
3769 skb->len = len -(th->doff*4);
3770
3771
3772
3773
3774
3775
3776 sk->bytes_rcv += skb->len;
3777
3778 if (skb->len == 0 && !th->fin)
3779 {
3780
3781
3782
3783
3784 if (!th->ack)
3785 tcp_send_ack(sk->sent_seq, sk->acked_seq,sk, th, saddr);
3786 kfree_skb(skb, FREE_READ);
3787 return(0);
3788 }
3789
3790
3791
3792
3793
3794 #ifndef TCP_DONT_RST_SHUTDOWN
3795
3796 if(sk->shutdown & RCV_SHUTDOWN)
3797 {
3798
3799
3800
3801
3802
3803
3804
3805 if(skb->len)
3806
3807 {
3808 new_seq= th->seq + skb->len + th->syn;
3809
3810
3811
3812
3813
3814
3815
3816
3817
3818
3819
3820
3821 shut_seq=sk->acked_seq+1;
3822
3823 if(after(new_seq,shut_seq))
3824 {
3825 if(sk->debug)
3826 printk("Data arrived on %p after close [Data right edge %X, Socket shut on %X] %d\n",
3827 sk, new_seq, shut_seq, sk->blog);
3828 if(sk->dead)
3829 {
3830 sk->acked_seq = new_seq + th->fin;
3831 tcp_reset(sk->saddr, sk->daddr, skb->h.th,
3832 sk->prot, NULL, skb->dev, sk->ip_tos, sk->ip_ttl);
3833 tcp_statistics.TcpEstabResets++;
3834 tcp_set_state(sk,TCP_CLOSE);
3835 sk->err = EPIPE;
3836 sk->shutdown = SHUTDOWN_MASK;
3837 kfree_skb(skb, FREE_READ);
3838 return 0;
3839 }
3840 }
3841 }
3842 }
3843
3844 #endif
3845
3846
3847
3848
3849
3850
3851
3852
3853
3854
3855
3856
3857
3858 if (skb_peek(&sk->receive_queue) == NULL)
3859 {
3860 skb_queue_head(&sk->receive_queue,skb);
3861 skb1= NULL;
3862 }
3863 else
3864 {
3865 for(skb1=sk->receive_queue.prev; ; skb1 = skb1->prev)
3866 {
3867 if(sk->debug)
3868 {
3869 printk("skb1=%p :", skb1);
3870 printk("skb1->h.th->seq = %d: ", skb1->h.th->seq);
3871 printk("skb->h.th->seq = %d\n",skb->h.th->seq);
3872 printk("copied_seq = %d acked_seq = %d\n", sk->copied_seq,
3873 sk->acked_seq);
3874 }
3875
3876
3877
3878
3879
3880
3881
3882
3883
3884 if (th->seq==skb1->h.th->seq && skb->len>= skb1->len)
3885 {
3886 skb_append(skb1,skb);
3887 skb_unlink(skb1);
3888 kfree_skb(skb1,FREE_READ);
3889 dup_dumped=1;
3890 skb1=NULL;
3891 break;
3892 }
3893
3894
3895
3896
3897
3898 if (after(th->seq+1, skb1->h.th->seq))
3899 {
3900 skb_append(skb1,skb);
3901 break;
3902 }
3903
3904
3905
3906
3907 if (skb1 == skb_peek(&sk->receive_queue))
3908 {
3909 skb_queue_head(&sk->receive_queue, skb);
3910 break;
3911 }
3912 }
3913 }
3914
3915
3916
3917
3918
3919 th->ack_seq = th->seq + skb->len;
3920 if (th->syn)
3921 th->ack_seq++;
3922 if (th->fin)
3923 th->ack_seq++;
3924
3925 if (before(sk->acked_seq, sk->copied_seq))
3926 {
3927 printk("*** tcp.c:tcp_data bug acked < copied\n");
3928 sk->acked_seq = sk->copied_seq;
3929 }
3930
3931
3932
3933
3934
3935
3936
3937 if ((!dup_dumped && (skb1 == NULL || skb1->acked)) || before(th->seq, sk->acked_seq+1))
3938 {
3939 if (before(th->seq, sk->acked_seq+1))
3940 {
3941 int newwindow;
3942
3943 if (after(th->ack_seq, sk->acked_seq))
3944 {
3945 newwindow = sk->window-(th->ack_seq - sk->acked_seq);
3946 if (newwindow < 0)
3947 newwindow = 0;
3948 sk->window = newwindow;
3949 sk->acked_seq = th->ack_seq;
3950 }
3951 skb->acked = 1;
3952
3953
3954
3955
3956
3957
3958 if (skb->h.th->fin)
3959 {
3960 tcp_fin(skb,sk,skb->h.th);
3961 }
3962
3963 for(skb2 = skb->next;
3964 skb2 != (struct sk_buff *)&sk->receive_queue;
3965 skb2 = skb2->next)
3966 {
3967 if (before(skb2->h.th->seq, sk->acked_seq+1))
3968 {
3969 if (after(skb2->h.th->ack_seq, sk->acked_seq))
3970 {
3971 newwindow = sk->window -
3972 (skb2->h.th->ack_seq - sk->acked_seq);
3973 if (newwindow < 0)
3974 newwindow = 0;
3975 sk->window = newwindow;
3976 sk->acked_seq = skb2->h.th->ack_seq;
3977 }
3978 skb2->acked = 1;
3979
3980
3981
3982
3983 if (skb2->h.th->fin)
3984 {
3985 tcp_fin(skb,sk,skb->h.th);
3986 }
3987
3988
3989
3990
3991
3992 sk->ack_backlog = sk->max_ack_backlog;
3993 }
3994 else
3995 {
3996 break;
3997 }
3998 }
3999
4000
4001
4002
4003
4004 if (!sk->delay_acks ||
4005 sk->ack_backlog >= sk->max_ack_backlog ||
4006 sk->bytes_rcv > sk->max_unacked || th->fin) {
4007
4008 }
4009 else
4010 {
4011 sk->ack_backlog++;
4012 if(sk->debug)
4013 printk("Ack queued.\n");
4014 reset_xmit_timer(sk, TIME_WRITE, TCP_ACK_TIME);
4015 }
4016 }
4017 }
4018
4019
4020
4021
4022
4023
4024 if (!skb->acked)
4025 {
4026
4027
4028
4029
4030
4031
4032
4033
4034 while (sk->prot->rspace(sk) < sk->mtu)
4035 {
4036 skb1 = skb_peek(&sk->receive_queue);
4037 if (skb1 == NULL)
4038 {
4039 printk("INET: tcp.c:tcp_data memory leak detected.\n");
4040 break;
4041 }
4042
4043
4044
4045
4046
4047 if (skb1->acked)
4048 {
4049 break;
4050 }
4051
4052 skb_unlink(skb1);
4053 kfree_skb(skb1, FREE_READ);
4054 }
4055 tcp_send_ack(sk->sent_seq, sk->acked_seq, sk, th, saddr);
4056 sk->ack_backlog++;
4057 reset_xmit_timer(sk, TIME_WRITE, TCP_ACK_TIME);
4058 }
4059 else
4060 {
4061 tcp_send_ack(sk->sent_seq, sk->acked_seq, sk, th, saddr);
4062 }
4063
4064
4065
4066
4067
4068 if (!sk->dead)
4069 {
4070 if(sk->debug)
4071 printk("Data wakeup.\n");
4072 sk->data_ready(sk,0);
4073 }
4074 return(0);
4075 }
4076
4077
4078
4079
4080
4081
4082
4083
4084
4085
4086 static void tcp_check_urg(struct sock * sk, struct tcphdr * th)
4087 {
4088 unsigned long ptr = ntohs(th->urg_ptr);
4089
4090 if (ptr)
4091 ptr--;
4092 ptr += th->seq;
4093
4094
4095 if (after(sk->copied_seq, ptr))
4096 return;
4097
4098
4099 if (sk->urg_data && !after(ptr, sk->urg_seq))
4100 return;
4101
4102
4103 if (sk->proc != 0) {
4104 if (sk->proc > 0) {
4105 kill_proc(sk->proc, SIGURG, 1);
4106 } else {
4107 kill_pg(-sk->proc, SIGURG, 1);
4108 }
4109 }
4110 sk->urg_data = URG_NOTYET;
4111 sk->urg_seq = ptr;
4112 }
4113
4114
4115
4116
4117
4118 extern __inline__ int tcp_urg(struct sock *sk, struct tcphdr *th,
4119 unsigned long saddr, unsigned long len)
4120 {
4121 unsigned long ptr;
4122
4123
4124
4125
4126
4127 if (th->urg)
4128 tcp_check_urg(sk,th);
4129
4130
4131
4132
4133
4134 if (sk->urg_data != URG_NOTYET)
4135 return 0;
4136
4137
4138
4139
4140
4141 ptr = sk->urg_seq - th->seq + th->doff*4;
4142 if (ptr >= len)
4143 return 0;
4144
4145
4146
4147
4148
4149 sk->urg_data = URG_VALID | *(ptr + (unsigned char *) th);
4150 if (!sk->dead)
4151 sk->data_ready(sk,0);
4152 return 0;
4153 }
4154
4155
4156
4157
4158
4159 static struct sock *tcp_accept(struct sock *sk, int flags)
4160 {
4161 struct sock *newsk;
4162 struct sk_buff *skb;
4163
4164
4165
4166
4167
4168
4169 if (sk->state != TCP_LISTEN)
4170 {
4171 sk->err = EINVAL;
4172 return(NULL);
4173 }
4174
4175
4176 cli();
4177 sk->inuse = 1;
4178
4179 while((skb = tcp_dequeue_established(sk)) == NULL)
4180 {
4181 if (flags & O_NONBLOCK)
4182 {
4183 sti();
4184 release_sock(sk);
4185 sk->err = EAGAIN;
4186 return(NULL);
4187 }
4188
4189 release_sock(sk);
4190 interruptible_sleep_on(sk->sleep);
4191 if (current->signal & ~current->blocked)
4192 {
4193 sti();
4194 sk->err = ERESTARTSYS;
4195 return(NULL);
4196 }
4197 sk->inuse = 1;
4198 }
4199 sti();
4200
4201
4202
4203
4204
4205 newsk = skb->sk;
4206
4207 kfree_skb(skb, FREE_READ);
4208 sk->ack_backlog--;
4209 release_sock(sk);
4210 return(newsk);
4211 }
4212
4213
4214
4215
4216
4217
4218 static int tcp_connect(struct sock *sk, struct sockaddr_in *usin, int addr_len)
4219 {
4220 struct sk_buff *buff;
4221 struct device *dev=NULL;
4222 unsigned char *ptr;
4223 int tmp;
4224 int atype;
4225 struct tcphdr *t1;
4226 struct rtable *rt;
4227
4228 if (sk->state != TCP_CLOSE)
4229 {
4230 return(-EISCONN);
4231 }
4232
4233 if (addr_len < 8)
4234 return(-EINVAL);
4235
4236 if (usin->sin_family && usin->sin_family != AF_INET)
4237 return(-EAFNOSUPPORT);
4238
4239
4240
4241
4242
4243 if(usin->sin_addr.s_addr==INADDR_ANY)
4244 usin->sin_addr.s_addr=ip_my_addr();
4245
4246
4247
4248
4249
4250 if ((atype=ip_chk_addr(usin->sin_addr.s_addr)) == IS_BROADCAST || atype==IS_MULTICAST)
4251 return -ENETUNREACH;
4252
4253 sk->inuse = 1;
4254 sk->daddr = usin->sin_addr.s_addr;
4255 sk->write_seq = tcp_init_seq();
4256 sk->window_seq = sk->write_seq;
4257 sk->rcv_ack_seq = sk->write_seq -1;
4258 sk->err = 0;
4259 sk->dummy_th.dest = usin->sin_port;
4260 release_sock(sk);
4261
4262 buff = sk->prot->wmalloc(sk,MAX_SYN_SIZE,0, GFP_KERNEL);
4263 if (buff == NULL)
4264 {
4265 return(-ENOMEM);
4266 }
4267 sk->inuse = 1;
4268 buff->len = 24;
4269 buff->sk = sk;
4270 buff->free = 0;
4271 buff->localroute = sk->localroute;
4272
4273 t1 = (struct tcphdr *) buff->data;
4274
4275
4276
4277
4278
4279 rt=ip_rt_route(sk->daddr, NULL, NULL);
4280
4281
4282
4283
4284
4285
4286 tmp = sk->prot->build_header(buff, sk->saddr, sk->daddr, &dev,
4287 IPPROTO_TCP, NULL, MAX_SYN_SIZE,sk->ip_tos,sk->ip_ttl);
4288 if (tmp < 0)
4289 {
4290 sk->prot->wfree(sk, buff->mem_addr, buff->mem_len);
4291 release_sock(sk);
4292 return(-ENETUNREACH);
4293 }
4294
4295 buff->len += tmp;
4296 t1 = (struct tcphdr *)((char *)t1 +tmp);
4297
4298 memcpy(t1,(void *)&(sk->dummy_th), sizeof(*t1));
4299 t1->seq = ntohl(sk->write_seq++);
4300 sk->sent_seq = sk->write_seq;
4301 buff->h.seq = sk->write_seq;
4302 t1->ack = 0;
4303 t1->window = 2;
4304 t1->res1=0;
4305 t1->res2=0;
4306 t1->rst = 0;
4307 t1->urg = 0;
4308 t1->psh = 0;
4309 t1->syn = 1;
4310 t1->urg_ptr = 0;
4311 t1->doff = 6;
4312
4313
4314 if(rt!=NULL && (rt->rt_flags&RTF_WINDOW))
4315 sk->window_clamp=rt->rt_window;
4316 else
4317 sk->window_clamp=0;
4318
4319 if (sk->user_mss)
4320 sk->mtu = sk->user_mss;
4321 else if(rt!=NULL && (rt->rt_flags&RTF_MTU))
4322 sk->mtu = rt->rt_mss;
4323 else
4324 {
4325 #ifdef CONFIG_INET_SNARL
4326 if ((sk->saddr ^ sk->daddr) & default_mask(sk->saddr))
4327 #else
4328 if ((sk->saddr ^ sk->daddr) & dev->pa_mask)
4329 #endif
4330 sk->mtu = 576 - HEADER_SIZE;
4331 else
4332 sk->mtu = MAX_WINDOW;
4333 }
4334
4335
4336
4337
4338 if(sk->mtu <32)
4339 sk->mtu = 32;
4340
4341 sk->mtu = min(sk->mtu, dev->mtu - HEADER_SIZE);
4342
4343
4344
4345
4346
4347 ptr = (unsigned char *)(t1+1);
4348 ptr[0] = 2;
4349 ptr[1] = 4;
4350 ptr[2] = (sk->mtu) >> 8;
4351 ptr[3] = (sk->mtu) & 0xff;
4352 tcp_send_check(t1, sk->saddr, sk->daddr,
4353 sizeof(struct tcphdr) + 4, sk);
4354
4355
4356
4357
4358
4359 tcp_cache_zap();
4360 tcp_set_state(sk,TCP_SYN_SENT);
4361 if(rt&&rt->rt_flags&RTF_IRTT)
4362 sk->rto = rt->rt_irtt;
4363 else
4364 sk->rto = TCP_TIMEOUT_INIT;
4365 sk->retransmit_timer.function=&retransmit_timer;
4366 sk->retransmit_timer.data = (unsigned long)sk;
4367 reset_xmit_timer(sk, TIME_WRITE, sk->rto);
4368 sk->retransmits = TCP_SYN_RETRIES;
4369
4370 sk->prot->queue_xmit(sk, dev, buff, 0);
4371 reset_xmit_timer(sk, TIME_WRITE, sk->rto);
4372 tcp_statistics.TcpActiveOpens++;
4373 tcp_statistics.TcpOutSegs++;
4374
4375 release_sock(sk);
4376 return(0);
4377 }
4378
4379
4380
4381 extern __inline__ int tcp_sequence(struct sock *sk, struct tcphdr *th, short len,
4382 struct options *opt, unsigned long saddr, struct device *dev)
4383 {
4384 u32 next_seq;
4385
4386 next_seq = len - 4*th->doff;
4387 if (th->fin)
4388 next_seq++;
4389
4390 if (next_seq && !sk->window)
4391 goto ignore_it;
4392 next_seq += th->seq;
4393
4394
4395
4396
4397
4398
4399
4400
4401
4402 if (!after(next_seq+1, sk->acked_seq))
4403 goto ignore_it;
4404
4405 if (!before(th->seq, sk->acked_seq + sk->window + 1))
4406 goto ignore_it;
4407
4408
4409 return 1;
4410
4411 ignore_it:
4412 if (th->rst)
4413 return 0;
4414
4415
4416
4417
4418
4419
4420
4421
4422 if (sk->state==TCP_SYN_SENT || sk->state==TCP_SYN_RECV)
4423 {
4424 tcp_reset(sk->saddr,sk->daddr,th,sk->prot,NULL,dev, sk->ip_tos,sk->ip_ttl);
4425 return 1;
4426 }
4427
4428
4429 tcp_send_ack(sk->sent_seq, sk->acked_seq, sk, th, saddr);
4430 return 0;
4431 }
4432
4433
4434
4435
4436
4437 static int tcp_std_reset(struct sock *sk, struct sk_buff *skb)
4438 {
4439 sk->zapped = 1;
4440 sk->err = ECONNRESET;
4441 if (sk->state == TCP_SYN_SENT)
4442 sk->err = ECONNREFUSED;
4443 if (sk->state == TCP_CLOSE_WAIT)
4444 sk->err = EPIPE;
4445 #ifdef TCP_DO_RFC1337
4446
4447
4448
4449 if(sk->state!=TCP_TIME_WAIT)
4450 {
4451 tcp_set_state(sk,TCP_CLOSE);
4452 sk->shutdown = SHUTDOWN_MASK;
4453 }
4454 #else
4455 tcp_set_state(sk,TCP_CLOSE);
4456 sk->shutdown = SHUTDOWN_MASK;
4457 #endif
4458 if (!sk->dead)
4459 sk->state_change(sk);
4460 kfree_skb(skb, FREE_READ);
4461 release_sock(sk);
4462 return(0);
4463 }
4464
4465
4466
4467
4468
4469 int tcp_rcv(struct sk_buff *skb, struct device *dev, struct options *opt,
4470 unsigned long daddr, unsigned short len,
4471 unsigned long saddr, int redo, struct inet_protocol * protocol)
4472 {
4473 struct tcphdr *th;
4474 struct sock *sk;
4475 int syn_ok=0;
4476
4477 tcp_statistics.TcpInSegs++;
4478
4479 if(skb->pkt_type!=PACKET_HOST)
4480 {
4481 kfree_skb(skb,FREE_READ);
4482 return(0);
4483 }
4484
4485 th = skb->h.th;
4486
4487
4488
4489
4490
4491 if(saddr==th_cache_saddr && daddr==th_cache_daddr && th->dest==th_cache_dport && th->source==th_cache_sport)
4492 sk=(struct sock *)th_cache_sk;
4493 else
4494 {
4495 sk = get_sock(&tcp_prot, th->dest, saddr, th->source, daddr);
4496 th_cache_saddr=saddr;
4497 th_cache_daddr=daddr;
4498 th_cache_dport=th->dest;
4499 th_cache_sport=th->source;
4500 th_cache_sk=sk;
4501 }
4502
4503
4504
4505
4506
4507
4508
4509
4510
4511
4512 if (sk!=NULL && (sk->zapped || sk->state==TCP_CLOSE))
4513 sk=NULL;
4514
4515 if (!redo)
4516 {
4517 if (tcp_check(th, len, saddr, daddr ))
4518 {
4519 skb->sk = NULL;
4520 kfree_skb(skb,FREE_READ);
4521
4522
4523
4524
4525 return(0);
4526 }
4527 th->seq = ntohl(th->seq);
4528
4529
4530 if (sk == NULL)
4531 {
4532
4533
4534
4535 tcp_reset(daddr, saddr, th, &tcp_prot, opt,dev,skb->ip_hdr->tos,255);
4536 skb->sk = NULL;
4537
4538
4539
4540 kfree_skb(skb, FREE_READ);
4541 return(0);
4542 }
4543
4544 skb->len = len;
4545 skb->acked = 0;
4546 skb->used = 0;
4547 skb->free = 0;
4548 skb->saddr = daddr;
4549 skb->daddr = saddr;
4550
4551
4552 cli();
4553 if (sk->inuse)
4554 {
4555 skb_queue_tail(&sk->back_log, skb);
4556 sti();
4557 return(0);
4558 }
4559 sk->inuse = 1;
4560 sti();
4561 }
4562 else
4563 {
4564 if (sk==NULL)
4565 {
4566 tcp_reset(daddr, saddr, th, &tcp_prot, opt,dev,skb->ip_hdr->tos,255);
4567 skb->sk = NULL;
4568 kfree_skb(skb, FREE_READ);
4569 return(0);
4570 }
4571 }
4572
4573
4574 if (!sk->prot)
4575 {
4576 printk("IMPOSSIBLE 3\n");
4577 return(0);
4578 }
4579
4580
4581
4582
4583
4584
4585 if (sk->rmem_alloc + skb->mem_len >= sk->rcvbuf)
4586 {
4587 kfree_skb(skb, FREE_READ);
4588 release_sock(sk);
4589 return(0);
4590 }
4591
4592 skb->sk=sk;
4593 sk->rmem_alloc += skb->mem_len;
4594
4595
4596
4597
4598
4599
4600
4601
4602 if(sk->state!=TCP_ESTABLISHED)
4603 {
4604
4605
4606
4607
4608
4609 if(sk->state==TCP_LISTEN)
4610 {
4611 if(th->ack)
4612 tcp_reset(daddr,saddr,th,sk->prot,opt,dev,sk->ip_tos, sk->ip_ttl);
4613
4614
4615
4616
4617
4618
4619
4620
4621 if(th->rst || !th->syn || th->ack || ip_chk_addr(daddr)!=IS_MYADDR)
4622 {
4623 kfree_skb(skb, FREE_READ);
4624 release_sock(sk);
4625 return 0;
4626 }
4627
4628
4629
4630
4631
4632 tcp_conn_request(sk, skb, daddr, saddr, opt, dev, tcp_init_seq());
4633
4634
4635
4636
4637
4638
4639
4640
4641
4642
4643 release_sock(sk);
4644 return 0;
4645 }
4646
4647
4648 if (sk->state == TCP_SYN_RECV && th->syn && th->seq+1 == sk->acked_seq)
4649 {
4650 kfree_skb(skb, FREE_READ);
4651 release_sock(sk);
4652 return 0;
4653 }
4654
4655
4656
4657
4658
4659
4660 if(sk->state==TCP_SYN_SENT)
4661 {
4662
4663 if(th->ack)
4664 {
4665
4666 if(!tcp_ack(sk,th,saddr,len))
4667 {
4668
4669
4670 tcp_statistics.TcpAttemptFails++;
4671 tcp_reset(daddr, saddr, th,
4672 sk->prot, opt,dev,sk->ip_tos,sk->ip_ttl);
4673 kfree_skb(skb, FREE_READ);
4674 release_sock(sk);
4675 return(0);
4676 }
4677 if(th->rst)
4678 return tcp_std_reset(sk,skb);
4679 if(!th->syn)
4680 {
4681
4682
4683 kfree_skb(skb, FREE_READ);
4684 release_sock(sk);
4685 return 0;
4686 }
4687
4688
4689
4690
4691 syn_ok=1;
4692 sk->acked_seq=th->seq+1;
4693 sk->fin_seq=th->seq;
4694 tcp_send_ack(sk->sent_seq,sk->acked_seq,sk,th,sk->daddr);
4695 tcp_set_state(sk, TCP_ESTABLISHED);
4696 tcp_options(sk,th);
4697 sk->dummy_th.dest=th->source;
4698 sk->copied_seq = sk->acked_seq;
4699 if(!sk->dead)
4700 {
4701 sk->state_change(sk);
4702 sock_wake_async(sk->socket, 0);
4703 }
4704 if(sk->max_window==0)
4705 {
4706 sk->max_window = 32;
4707 sk->mss = min(sk->max_window, sk->mtu);
4708 }
4709 }
4710 else
4711 {
4712
4713 if(th->syn && !th->rst)
4714 {
4715
4716
4717 if(sk->saddr==saddr && sk->daddr==daddr &&
4718 sk->dummy_th.source==th->source &&
4719 sk->dummy_th.dest==th->dest)
4720 {
4721 tcp_statistics.TcpAttemptFails++;
4722 return tcp_std_reset(sk,skb);
4723 }
4724 tcp_set_state(sk,TCP_SYN_RECV);
4725
4726
4727
4728
4729
4730 }
4731
4732 kfree_skb(skb, FREE_READ);
4733 release_sock(sk);
4734 return 0;
4735 }
4736
4737
4738
4739 goto rfc_step6;
4740 }
4741
4742
4743
4744
4745
4746
4747
4748 #define BSD_TIME_WAIT
4749 #ifdef BSD_TIME_WAIT
4750 if (sk->state == TCP_TIME_WAIT && th->syn && sk->dead &&
4751 after(th->seq, sk->acked_seq) && !th->rst)
4752 {
4753 u32 seq = sk->write_seq;
4754 if(sk->debug)
4755 printk("Doing a BSD time wait\n");
4756 tcp_statistics.TcpEstabResets++;
4757 sk->rmem_alloc -= skb->mem_len;
4758 skb->sk = NULL;
4759 sk->err=ECONNRESET;
4760 tcp_set_state(sk, TCP_CLOSE);
4761 sk->shutdown = SHUTDOWN_MASK;
4762 release_sock(sk);
4763 sk=get_sock(&tcp_prot, th->dest, saddr, th->source, daddr);
4764 if (sk && sk->state==TCP_LISTEN)
4765 {
4766 sk->inuse=1;
4767 skb->sk = sk;
4768 sk->rmem_alloc += skb->mem_len;
4769 tcp_conn_request(sk, skb, daddr, saddr,opt, dev,seq+128000);
4770 release_sock(sk);
4771 return 0;
4772 }
4773 kfree_skb(skb, FREE_READ);
4774 return 0;
4775 }
4776 #endif
4777 }
4778
4779
4780
4781
4782
4783
4784
4785 if(!tcp_sequence(sk,th,len,opt,saddr,dev))
4786 {
4787 kfree_skb(skb, FREE_READ);
4788 release_sock(sk);
4789 return 0;
4790 }
4791
4792 if(th->rst)
4793 return tcp_std_reset(sk,skb);
4794
4795
4796
4797
4798
4799 if(th->syn && !syn_ok)
4800 {
4801 tcp_reset(daddr,saddr,th, &tcp_prot, opt, dev, skb->ip_hdr->tos, 255);
4802 return tcp_std_reset(sk,skb);
4803 }
4804
4805
4806
4807
4808
4809
4810 if(th->ack && !tcp_ack(sk,th,saddr,len))
4811 {
4812
4813
4814
4815
4816 if(sk->state==TCP_SYN_RECV)
4817 {
4818 tcp_reset(daddr, saddr, th,sk->prot, opt, dev,sk->ip_tos,sk->ip_ttl);
4819 }
4820 kfree_skb(skb, FREE_READ);
4821 release_sock(sk);
4822 return 0;
4823 }
4824
4825 rfc_step6:
4826
4827
4828
4829
4830
4831 if(tcp_urg(sk, th, saddr, len))
4832 {
4833 kfree_skb(skb, FREE_READ);
4834 release_sock(sk);
4835 return 0;
4836 }
4837
4838
4839
4840
4841
4842
4843 if(tcp_data(skb,sk, saddr, len))
4844 {
4845 kfree_skb(skb, FREE_READ);
4846 release_sock(sk);
4847 return 0;
4848 }
4849
4850
4851
4852
4853
4854 release_sock(sk);
4855 return 0;
4856 }
4857
4858
4859
4860
4861
4862
4863 static void tcp_write_wakeup(struct sock *sk)
4864 {
4865 struct sk_buff *buff,*skb;
4866 struct tcphdr *t1;
4867 struct device *dev=NULL;
4868 int tmp;
4869
4870 if (sk->zapped)
4871 return;
4872
4873
4874
4875
4876
4877
4878
4879 if (sk->state != TCP_ESTABLISHED &&
4880 sk->state != TCP_CLOSE_WAIT &&
4881 sk->state != TCP_FIN_WAIT1 &&
4882 sk->state != TCP_LAST_ACK &&
4883 sk->state != TCP_CLOSING
4884 )
4885 {
4886 return;
4887 }
4888
4889 if (before(sk->sent_seq, sk->window_seq) &&
4890 (skb=skb_peek(&sk->write_queue)))
4891 {
4892
4893
4894
4895
4896
4897
4898 struct iphdr *iph;
4899 struct tcphdr *th;
4900 struct tcphdr *nth;
4901 unsigned long win_size, ow_size;
4902 void * tcp_data_start;
4903
4904 win_size = sk->window_seq - sk->sent_seq;
4905
4906 iph = (struct iphdr *)(skb->data + skb->dev->hard_header_len);
4907 th = (struct tcphdr *)(((char *)iph) +(iph->ihl << 2));
4908
4909 buff = sk->prot->wmalloc(sk, win_size + th->doff * 4 +
4910 (iph->ihl << 2) +
4911 skb->dev->hard_header_len,
4912 1, GFP_ATOMIC);
4913 if ( buff == NULL )
4914 return;
4915
4916 buff->len = 0;
4917
4918
4919
4920
4921
4922
4923 buff->free = 0;
4924
4925 buff->sk = sk;
4926 buff->localroute = sk->localroute;
4927
4928 tmp = sk->prot->build_header(buff, sk->saddr, sk->daddr, &dev,
4929 IPPROTO_TCP, sk->opt, buff->mem_len,
4930 sk->ip_tos,sk->ip_ttl);
4931 if (tmp < 0)
4932 {
4933 sk->prot->wfree(sk, buff->mem_addr, buff->mem_len);
4934 return;
4935 }
4936
4937 buff->len += tmp;
4938 buff->dev = dev;
4939
4940 nth = (struct tcphdr *) (buff->data + buff->len);
4941 buff->len += th->doff * 4;
4942
4943 memcpy(nth, th, th->doff * 4);
4944
4945 nth->ack = 1;
4946 nth->ack_seq = ntohl(sk->acked_seq);
4947 nth->window = ntohs(tcp_select_window(sk));
4948 nth->check = 0;
4949
4950 tcp_data_start = skb->data + skb->dev->hard_header_len +
4951 (iph->ihl << 2) + th->doff * 4;
4952
4953 memcpy(buff->data + buff->len, tcp_data_start, win_size);
4954 buff->len += win_size;
4955 buff->h.seq = sk->sent_seq + win_size;
4956
4957
4958
4959
4960
4961 th->check = 0;
4962 ow_size = skb->len - win_size -
4963 ((unsigned long) (tcp_data_start - (void *) skb->data));
4964
4965 memmove(tcp_data_start, tcp_data_start + win_size, ow_size);
4966 skb->len -= win_size;
4967 sk->sent_seq += win_size;
4968 th->seq = htonl(sk->sent_seq);
4969
4970 if (th->urg)
4971 {
4972 unsigned short urg_ptr;
4973
4974 urg_ptr = ntohs(th->urg_ptr);
4975 if (urg_ptr <= win_size)
4976 th->urg = 0;
4977 else
4978 {
4979 urg_ptr -= win_size;
4980 th->urg_ptr = htons(urg_ptr);
4981 nth->urg_ptr = htons(win_size);
4982 }
4983 }
4984
4985 tcp_send_check(nth, sk->saddr, sk->daddr,
4986 nth->doff * 4 + win_size , sk);
4987 }
4988 else
4989 {
4990 buff = sk->prot->wmalloc(sk,MAX_ACK_SIZE,1, GFP_ATOMIC);
4991 if (buff == NULL)
4992 return;
4993
4994 buff->len = sizeof(struct tcphdr);
4995 buff->free = 1;
4996 buff->sk = sk;
4997 buff->localroute = sk->localroute;
4998
4999 t1 = (struct tcphdr *) buff->data;
5000
5001
5002
5003
5004
5005 tmp = sk->prot->build_header(buff, sk->saddr, sk->daddr, &dev,
5006 IPPROTO_TCP, sk->opt, MAX_ACK_SIZE,sk->ip_tos,sk->ip_ttl);
5007 if (tmp < 0)
5008 {
5009 sk->prot->wfree(sk, buff->mem_addr, buff->mem_len);
5010 return;
5011 }
5012
5013 buff->len += tmp;
5014 t1 = (struct tcphdr *)((char *)t1 +tmp);
5015
5016 memcpy(t1,(void *) &sk->dummy_th, sizeof(*t1));
5017
5018
5019
5020
5021
5022
5023 t1->seq = htonl(sk->sent_seq-1);
5024 t1->ack = 1;
5025 t1->res1= 0;
5026 t1->res2= 0;
5027 t1->rst = 0;
5028 t1->urg = 0;
5029 t1->psh = 0;
5030 t1->fin = 0;
5031 t1->syn = 0;
5032 t1->ack_seq = ntohl(sk->acked_seq);
5033 t1->window = ntohs(tcp_select_window(sk));
5034 t1->doff = sizeof(*t1)/4;
5035 tcp_send_check(t1, sk->saddr, sk->daddr, sizeof(*t1), sk);
5036
5037 }
5038
5039
5040
5041
5042
5043 sk->prot->queue_xmit(sk, dev, buff, 1);
5044 tcp_statistics.TcpOutSegs++;
5045 }
5046
5047
5048
5049
5050
5051 void tcp_send_probe0(struct sock *sk)
5052 {
5053 if (sk->zapped)
5054 return;
5055
5056 tcp_write_wakeup(sk);
5057
5058 sk->backoff++;
5059 sk->rto = min(sk->rto << 1, 120*HZ);
5060 reset_xmit_timer (sk, TIME_PROBE0, sk->rto);
5061 sk->retransmits++;
5062 sk->prot->retransmits ++;
5063 }
5064
5065
5066
5067
5068
5069 int tcp_setsockopt(struct sock *sk, int level, int optname, char *optval, int optlen)
5070 {
5071 int val,err;
5072
5073 if(level!=SOL_TCP)
5074 return ip_setsockopt(sk,level,optname,optval,optlen);
5075
5076 if (optval == NULL)
5077 return(-EINVAL);
5078
5079 err=verify_area(VERIFY_READ, optval, sizeof(int));
5080 if(err)
5081 return err;
5082
5083 val = get_fs_long((unsigned long *)optval);
5084
5085 switch(optname)
5086 {
5087 case TCP_MAXSEG:
5088
5089
5090
5091
5092
5093 if(val<1||val>MAX_WINDOW)
5094 return -EINVAL;
5095 sk->user_mss=val;
5096 return 0;
5097 case TCP_NODELAY:
5098 sk->nonagle=(val==0)?0:1;
5099 return 0;
5100 default:
5101 return(-ENOPROTOOPT);
5102 }
5103 }
5104
5105 int tcp_getsockopt(struct sock *sk, int level, int optname, char *optval, int *optlen)
5106 {
5107 int val,err;
5108
5109 if(level!=SOL_TCP)
5110 return ip_getsockopt(sk,level,optname,optval,optlen);
5111
5112 switch(optname)
5113 {
5114 case TCP_MAXSEG:
5115 val=sk->user_mss;
5116 break;
5117 case TCP_NODELAY:
5118 val=sk->nonagle;
5119 break;
5120 default:
5121 return(-ENOPROTOOPT);
5122 }
5123 err=verify_area(VERIFY_WRITE, optlen, sizeof(int));
5124 if(err)
5125 return err;
5126 put_fs_long(sizeof(int),(unsigned long *) optlen);
5127
5128 err=verify_area(VERIFY_WRITE, optval, sizeof(int));
5129 if(err)
5130 return err;
5131 put_fs_long(val,(unsigned long *)optval);
5132
5133 return(0);
5134 }
5135
5136
5137 struct proto tcp_prot = {
5138 sock_wmalloc,
5139 sock_rmalloc,
5140 sock_wfree,
5141 sock_rfree,
5142 sock_rspace,
5143 sock_wspace,
5144 tcp_close,
5145 tcp_read,
5146 tcp_write,
5147 tcp_sendto,
5148 tcp_recvfrom,
5149 ip_build_header,
5150 tcp_connect,
5151 tcp_accept,
5152 ip_queue_xmit,
5153 tcp_retransmit,
5154 tcp_write_wakeup,
5155 tcp_read_wakeup,
5156 tcp_rcv,
5157 tcp_select,
5158 tcp_ioctl,
5159 NULL,
5160 tcp_shutdown,
5161 tcp_setsockopt,
5162 tcp_getsockopt,
5163 128,
5164 0,
5165 "TCP",
5166 0, 0,
5167 {NULL,}
5168 };