This source file includes following definitions.
- min
- tcp_set_state
- tcp_select_window
- tcp_time_wait
- tcp_retransmit
- tcp_err
- tcp_readable
- tcp_select
- tcp_ioctl
- tcp_check
- tcp_send_check
- tcp_send_skb
- tcp_dequeue_partial
- tcp_send_partial
- tcp_enqueue_partial
- tcp_send_ack
- tcp_build_header
- tcp_write
- tcp_sendto
- tcp_read_wakeup
- cleanup_rbuf
- tcp_read_urg
- tcp_read
- tcp_shutdown
- tcp_recvfrom
- tcp_reset
- tcp_options
- default_mask
- tcp_conn_request
- tcp_close
- tcp_write_xmit
- sort_send
- tcp_ack
- tcp_data
- tcp_check_urg
- tcp_urg
- tcp_fin
- tcp_accept
- tcp_connect
- tcp_sequence
- tcp_clean_end
- tcp_rcv
- tcp_write_wakeup
- tcp_send_probe0
- tcp_setsockopt
- tcp_getsockopt
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134 #include <linux/types.h>
135 #include <linux/sched.h>
136 #include <linux/mm.h>
137 #include <linux/string.h>
138 #include <linux/socket.h>
139 #include <linux/sockios.h>
140 #include <linux/termios.h>
141 #include <linux/in.h>
142 #include <linux/fcntl.h>
143 #include <linux/inet.h>
144 #include <linux/netdevice.h>
145 #include "snmp.h"
146 #include "ip.h"
147 #include "protocol.h"
148 #include "icmp.h"
149 #include "tcp.h"
150 #include <linux/skbuff.h>
151 #include "sock.h"
152 #include "route.h"
153 #include <linux/errno.h>
154 #include <linux/timer.h>
155 #include <asm/system.h>
156 #include <asm/segment.h>
157 #include <linux/mm.h>
158
159 #undef TCP_FASTPATH
160
161 #define SEQ_TICK 3
162 unsigned long seq_offset;
163 struct tcp_mib tcp_statistics;
164
165 #ifdef TCP_FASTPATH
166 unsigned long tcp_rx_miss=0, tcp_rx_hit1=0, tcp_rx_hit2=0;
167 #endif
168
169
170 static __inline__ int min(unsigned int a, unsigned int b)
171 {
172 if (a < b)
173 return(a);
174 return(b);
175 }
176
177 #undef STATE_TRACE
178
179 static __inline__ void tcp_set_state(struct sock *sk, int state)
180 {
181 if(sk->state==TCP_ESTABLISHED)
182 tcp_statistics.TcpCurrEstab--;
183 #ifdef STATE_TRACE
184 if(sk->debug)
185 printk("TCP sk=%s, State %d -> %d\n",sk, sk->state,state);
186 #endif
187 sk->state=state;
188 if(state==TCP_ESTABLISHED)
189 tcp_statistics.TcpCurrEstab++;
190 }
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207 int tcp_select_window(struct sock *sk)
208 {
209 int new_window = sk->prot->rspace(sk);
210
211 if(sk->window_clamp)
212 new_window=min(sk->window_clamp,new_window);
213
214
215
216
217
218
219
220
221
222 if (new_window < min(sk->mss, MAX_WINDOW/2) || new_window < sk->window)
223 return(sk->window);
224 return(new_window);
225 }
226
227
228
229
230
231 static void tcp_time_wait(struct sock *sk)
232 {
233 tcp_set_state(sk,TCP_TIME_WAIT);
234 sk->shutdown = SHUTDOWN_MASK;
235 if (!sk->dead)
236 sk->state_change(sk);
237 reset_timer(sk, TIME_CLOSE, TCP_TIMEWAIT_LEN);
238 }
239
240
241
242
243
244
245
246
247 static void tcp_retransmit(struct sock *sk, int all)
248 {
249 if (all)
250 {
251 ip_retransmit(sk, all);
252 return;
253 }
254
255 sk->ssthresh = sk->cong_window >> 1;
256
257 sk->cong_count = 0;
258
259 sk->cong_window = 1;
260
261
262 ip_retransmit(sk, all);
263 }
264
265
266
267
268
269
270
271
272
273
274
275 void tcp_err(int err, unsigned char *header, unsigned long daddr,
276 unsigned long saddr, struct inet_protocol *protocol)
277 {
278 struct tcphdr *th;
279 struct sock *sk;
280 struct iphdr *iph=(struct iphdr *)header;
281
282 header+=4*iph->ihl;
283
284
285 th =(struct tcphdr *)header;
286 sk = get_sock(&tcp_prot, th->source, daddr, th->dest, saddr);
287
288 if (sk == NULL)
289 return;
290
291 if(err<0)
292 {
293 sk->err = -err;
294 sk->error_report(sk);
295 return;
296 }
297
298 if ((err & 0xff00) == (ICMP_SOURCE_QUENCH << 8))
299 {
300
301
302
303
304
305 if (sk->cong_window > 4)
306 sk->cong_window--;
307 return;
308 }
309
310
311
312
313
314
315
316
317 if (icmp_err_convert[err & 0xff].fatal || sk->state == TCP_SYN_SENT)
318 {
319 if (sk->state == TCP_SYN_SENT)
320 {
321 tcp_statistics.TcpAttemptFails++;
322 tcp_set_state(sk,TCP_CLOSE);
323 sk->error_report(sk);
324 }
325 sk->err = icmp_err_convert[err & 0xff].errno;
326 }
327 return;
328 }
329
330
331
332
333
334
335
336 static int tcp_readable(struct sock *sk)
337 {
338 unsigned long counted;
339 unsigned long amount;
340 struct sk_buff *skb;
341 int sum;
342 unsigned long flags;
343
344 if(sk && sk->debug)
345 printk("tcp_readable: %p - ",sk);
346
347 save_flags(flags);
348 cli();
349 if (sk == NULL || (skb = skb_peek(&sk->receive_queue)) == NULL)
350 {
351 restore_flags(flags);
352 if(sk && sk->debug)
353 printk("empty\n");
354 return(0);
355 }
356
357 counted = sk->copied_seq+1;
358 amount = 0;
359
360
361 do
362 {
363 if (before(counted, skb->h.th->seq))
364 break;
365 sum = skb->len -(counted - skb->h.th->seq);
366 if (skb->h.th->syn)
367 sum++;
368 if (sum > 0)
369 {
370 amount += sum;
371 if (skb->h.th->syn)
372 amount--;
373 counted += sum;
374 }
375 if (amount && skb->h.th->psh) break;
376 skb = skb->next;
377 }
378 while(skb != (struct sk_buff *)&sk->receive_queue);
379
380 if (amount && !sk->urginline && sk->urg_data &&
381 (sk->urg_seq - sk->copied_seq) <= (counted - sk->copied_seq))
382 amount--;
383 restore_flags(flags);
384 if(sk->debug)
385 printk("got %lu bytes.\n",amount);
386 return(amount);
387 }
388
389
390
391
392
393
394
395 static int tcp_select(struct sock *sk, int sel_type, select_table *wait)
396 {
397 sk->inuse = 1;
398
399 switch(sel_type)
400 {
401 case SEL_IN:
402 if(sk->debug)
403 printk("select in");
404 select_wait(sk->sleep, wait);
405 if(sk->debug)
406 printk("-select out");
407 if (skb_peek(&sk->receive_queue) != NULL)
408 {
409 if (sk->state == TCP_LISTEN || tcp_readable(sk))
410 {
411 release_sock(sk);
412 if(sk->debug)
413 printk("-select ok data\n");
414 return(1);
415 }
416 }
417 if (sk->err != 0)
418 {
419 release_sock(sk);
420 if(sk->debug)
421 printk("-select ok error");
422 return(1);
423 }
424 if (sk->shutdown & RCV_SHUTDOWN)
425 {
426 release_sock(sk);
427 if(sk->debug)
428 printk("-select ok down\n");
429 return(1);
430 }
431 else
432 {
433 release_sock(sk);
434 if(sk->debug)
435 printk("-select fail\n");
436 return(0);
437 }
438 case SEL_OUT:
439 select_wait(sk->sleep, wait);
440 if (sk->shutdown & SEND_SHUTDOWN)
441 {
442
443 release_sock(sk);
444 return(0);
445 }
446
447
448
449
450
451
452
453 if (sk->prot->wspace(sk) >= sk->mss)
454 {
455 release_sock(sk);
456
457 if (sk->state == TCP_SYN_RECV ||
458 sk->state == TCP_SYN_SENT) return(0);
459 return(1);
460 }
461 release_sock(sk);
462 return(0);
463 case SEL_EX:
464 select_wait(sk->sleep,wait);
465 if (sk->err || sk->urg_data)
466 {
467 release_sock(sk);
468 return(1);
469 }
470 release_sock(sk);
471 return(0);
472 }
473
474 release_sock(sk);
475 return(0);
476 }
477
478
479 int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
480 {
481 int err;
482 switch(cmd)
483 {
484
485 case TIOCINQ:
486 #ifdef FIXME
487 case FIONREAD:
488 #endif
489 {
490 unsigned long amount;
491
492 if (sk->state == TCP_LISTEN)
493 return(-EINVAL);
494
495 sk->inuse = 1;
496 amount = tcp_readable(sk);
497 release_sock(sk);
498 err=verify_area(VERIFY_WRITE,(void *)arg,
499 sizeof(unsigned long));
500 if(err)
501 return err;
502 put_fs_long(amount,(unsigned long *)arg);
503 return(0);
504 }
505 case SIOCATMARK:
506 {
507 int answ = sk->urg_data && sk->urg_seq == sk->copied_seq+1;
508
509 err = verify_area(VERIFY_WRITE,(void *) arg,
510 sizeof(unsigned long));
511 if (err)
512 return err;
513 put_fs_long(answ,(int *) arg);
514 return(0);
515 }
516 case TIOCOUTQ:
517 {
518 unsigned long amount;
519
520 if (sk->state == TCP_LISTEN) return(-EINVAL);
521 amount = sk->prot->wspace(sk);
522 err=verify_area(VERIFY_WRITE,(void *)arg,
523 sizeof(unsigned long));
524 if(err)
525 return err;
526 put_fs_long(amount,(unsigned long *)arg);
527 return(0);
528 }
529 default:
530 return(-EINVAL);
531 }
532 }
533
534
535
536
537
538
539 unsigned short tcp_check(struct tcphdr *th, int len,
540 unsigned long saddr, unsigned long daddr)
541 {
542 unsigned long sum;
543
544 if (saddr == 0) saddr = ip_my_addr();
545
546
547
548
549
550
551 __asm__("
552 addl %%ecx, %%ebx
553 adcl %%edx, %%ebx
554 adcl $0, %%ebx
555 "
556 : "=b"(sum)
557 : "0"(daddr), "c"(saddr), "d"((ntohs(len) << 16) + IPPROTO_TCP*256)
558 : "bx", "cx", "dx" );
559 __asm__("
560 movl %%ecx, %%edx
561 cld
562 cmpl $32, %%ecx
563 jb 2f
564 shrl $5, %%ecx
565 clc
566 1: lodsl
567 adcl %%eax, %%ebx
568 lodsl
569 adcl %%eax, %%ebx
570 lodsl
571 adcl %%eax, %%ebx
572 lodsl
573 adcl %%eax, %%ebx
574 lodsl
575 adcl %%eax, %%ebx
576 lodsl
577 adcl %%eax, %%ebx
578 lodsl
579 adcl %%eax, %%ebx
580 lodsl
581 adcl %%eax, %%ebx
582 loop 1b
583 adcl $0, %%ebx
584 movl %%edx, %%ecx
585 2: andl $28, %%ecx
586 je 4f
587 shrl $2, %%ecx
588 clc
589 3: lodsl
590 adcl %%eax, %%ebx
591 loop 3b
592 adcl $0, %%ebx
593 4: movl $0, %%eax
594 testw $2, %%dx
595 je 5f
596 lodsw
597 addl %%eax, %%ebx
598 adcl $0, %%ebx
599 movw $0, %%ax
600 5: test $1, %%edx
601 je 6f
602 lodsb
603 addl %%eax, %%ebx
604 adcl $0, %%ebx
605 6: movl %%ebx, %%eax
606 shrl $16, %%eax
607 addw %%ax, %%bx
608 adcw $0, %%bx
609 "
610 : "=b"(sum)
611 : "0"(sum), "c"(len), "S"(th)
612 : "ax", "bx", "cx", "dx", "si" );
613
614
615
616 return((~sum) & 0xffff);
617 }
618
619
620
621 void tcp_send_check(struct tcphdr *th, unsigned long saddr,
622 unsigned long daddr, int len, struct sock *sk)
623 {
624 th->check = 0;
625 th->check = tcp_check(th, len, saddr, daddr);
626 return;
627 }
628
629 static void tcp_send_skb(struct sock *sk, struct sk_buff *skb)
630 {
631 int size;
632 struct tcphdr * th = skb->h.th;
633
634
635 size = skb->len - ((unsigned char *) th - skb->data);
636
637
638 if (size < sizeof(struct tcphdr) || size > skb->len)
639 {
640 printk("tcp_send_skb: bad skb (skb = %p, data = %p, th = %p, len = %lu)\n",
641 skb, skb->data, th, skb->len);
642 kfree_skb(skb, FREE_WRITE);
643 return;
644 }
645
646
647 if (size == sizeof(struct tcphdr))
648 {
649
650 if(!th->syn && !th->fin)
651 {
652 printk("tcp_send_skb: attempt to queue a bogon.\n");
653 kfree_skb(skb,FREE_WRITE);
654 return;
655 }
656 }
657
658 tcp_statistics.TcpOutSegs++;
659
660 skb->h.seq = ntohl(th->seq) + size - 4*th->doff;
661 if (after(skb->h.seq, sk->window_seq) ||
662 (sk->retransmits && sk->timeout == TIME_WRITE) ||
663 sk->packets_out >= sk->cong_window)
664 {
665
666
667 th->check = 0;
668 if (skb->next != NULL)
669 {
670 printk("tcp_send_partial: next != NULL\n");
671 skb_unlink(skb);
672 }
673 skb_queue_tail(&sk->write_queue, skb);
674 if (before(sk->window_seq, sk->write_queue.next->h.seq) &&
675 sk->send_head == NULL &&
676 sk->ack_backlog == 0)
677 reset_timer(sk, TIME_PROBE0, sk->rto);
678 }
679 else
680 {
681 th->ack_seq = ntohl(sk->acked_seq);
682 th->window = ntohs(tcp_select_window(sk));
683
684 tcp_send_check(th, sk->saddr, sk->daddr, size, sk);
685
686 sk->sent_seq = sk->write_seq;
687 sk->prot->queue_xmit(sk, skb->dev, skb, 0);
688 }
689 }
690
691 struct sk_buff * tcp_dequeue_partial(struct sock * sk)
692 {
693 struct sk_buff * skb;
694 unsigned long flags;
695
696 save_flags(flags);
697 cli();
698 skb = sk->partial;
699 if (skb) {
700 sk->partial = NULL;
701 del_timer(&sk->partial_timer);
702 }
703 restore_flags(flags);
704 return skb;
705 }
706
707 static void tcp_send_partial(struct sock *sk)
708 {
709 struct sk_buff *skb;
710
711 if (sk == NULL)
712 return;
713 while ((skb = tcp_dequeue_partial(sk)) != NULL)
714 tcp_send_skb(sk, skb);
715 }
716
717 void tcp_enqueue_partial(struct sk_buff * skb, struct sock * sk)
718 {
719 struct sk_buff * tmp;
720 unsigned long flags;
721
722 save_flags(flags);
723 cli();
724 tmp = sk->partial;
725 if (tmp)
726 del_timer(&sk->partial_timer);
727 sk->partial = skb;
728 init_timer(&sk->partial_timer);
729 sk->partial_timer.expires = HZ;
730 sk->partial_timer.function = (void (*)(unsigned long)) tcp_send_partial;
731 sk->partial_timer.data = (unsigned long) sk;
732 add_timer(&sk->partial_timer);
733 restore_flags(flags);
734 if (tmp)
735 tcp_send_skb(sk, tmp);
736 }
737
738
739
740
741
742
743 static void tcp_send_ack(unsigned long sequence, unsigned long ack,
744 struct sock *sk,
745 struct tcphdr *th, unsigned long daddr)
746 {
747 struct sk_buff *buff;
748 struct tcphdr *t1;
749 struct device *dev = NULL;
750 int tmp;
751
752 if(sk->zapped)
753 return;
754
755
756
757
758
759 buff = sk->prot->wmalloc(sk, MAX_ACK_SIZE, 1, GFP_ATOMIC);
760 if (buff == NULL)
761 {
762
763 sk->ack_backlog++;
764 if (sk->timeout != TIME_WRITE && tcp_connected(sk->state))
765 {
766 reset_timer(sk, TIME_WRITE, 10);
767 }
768 return;
769 }
770
771 buff->len = sizeof(struct tcphdr);
772 buff->sk = sk;
773 buff->localroute = sk->localroute;
774 t1 =(struct tcphdr *) buff->data;
775
776
777 tmp = sk->prot->build_header(buff, sk->saddr, daddr, &dev,
778 IPPROTO_TCP, sk->opt, MAX_ACK_SIZE,sk->ip_tos,sk->ip_ttl);
779 if (tmp < 0)
780 {
781 buff->free=1;
782 sk->prot->wfree(sk, buff->mem_addr, buff->mem_len);
783 return;
784 }
785 buff->len += tmp;
786 t1 =(struct tcphdr *)((char *)t1 +tmp);
787
788
789 memcpy(t1, th, sizeof(*t1));
790
791
792
793
794
795 t1->dest = th->source;
796 t1->source = th->dest;
797 t1->seq = ntohl(sequence);
798 t1->ack = 1;
799 sk->window = tcp_select_window(sk);
800 t1->window = ntohs(sk->window);
801 t1->res1 = 0;
802 t1->res2 = 0;
803 t1->rst = 0;
804 t1->urg = 0;
805 t1->syn = 0;
806 t1->psh = 0;
807 t1->fin = 0;
808 if (ack == sk->acked_seq)
809 {
810 sk->ack_backlog = 0;
811 sk->bytes_rcv = 0;
812 sk->ack_timed = 0;
813 if (sk->send_head == NULL && skb_peek(&sk->write_queue) == NULL
814 && sk->timeout == TIME_WRITE)
815 {
816 if(sk->keepopen) {
817 reset_timer(sk,TIME_KEEPOPEN,TCP_TIMEOUT_LEN);
818 } else {
819 delete_timer(sk);
820 }
821 }
822 }
823 t1->ack_seq = ntohl(ack);
824 t1->doff = sizeof(*t1)/4;
825 tcp_send_check(t1, sk->saddr, daddr, sizeof(*t1), sk);
826 if (sk->debug)
827 printk("\rtcp_ack: seq %lx ack %lx\n", sequence, ack);
828 tcp_statistics.TcpOutSegs++;
829 sk->prot->queue_xmit(sk, dev, buff, 1);
830 }
831
832
833
834
835
836
837 static int tcp_build_header(struct tcphdr *th, struct sock *sk, int push)
838 {
839
840
841 memcpy(th,(void *) &(sk->dummy_th), sizeof(*th));
842 th->seq = htonl(sk->write_seq);
843 th->psh =(push == 0) ? 1 : 0;
844 th->doff = sizeof(*th)/4;
845 th->ack = 1;
846 th->fin = 0;
847 sk->ack_backlog = 0;
848 sk->bytes_rcv = 0;
849 sk->ack_timed = 0;
850 th->ack_seq = htonl(sk->acked_seq);
851 sk->window = tcp_select_window(sk);
852 th->window = htons(sk->window);
853
854 return(sizeof(*th));
855 }
856
857
858
859
860
861
862 static int tcp_write(struct sock *sk, unsigned char *from,
863 int len, int nonblock, unsigned flags)
864 {
865 int copied = 0;
866 int copy;
867 int tmp;
868 struct sk_buff *skb;
869 struct sk_buff *send_tmp;
870 unsigned char *buff;
871 struct proto *prot;
872 struct device *dev = NULL;
873
874 sk->inuse=1;
875 prot = sk->prot;
876 while(len > 0)
877 {
878 if (sk->err)
879 {
880 release_sock(sk);
881 if (copied)
882 return(copied);
883 tmp = -sk->err;
884 sk->err = 0;
885 return(tmp);
886 }
887
888
889
890
891
892 if (sk->shutdown & SEND_SHUTDOWN)
893 {
894 release_sock(sk);
895 sk->err = EPIPE;
896 if (copied)
897 return(copied);
898 sk->err = 0;
899 return(-EPIPE);
900 }
901
902
903
904
905
906
907 while(sk->state != TCP_ESTABLISHED && sk->state != TCP_CLOSE_WAIT)
908 {
909 if (sk->err)
910 {
911 release_sock(sk);
912 if (copied)
913 return(copied);
914 tmp = -sk->err;
915 sk->err = 0;
916 return(tmp);
917 }
918
919 if (sk->state != TCP_SYN_SENT && sk->state != TCP_SYN_RECV)
920 {
921 release_sock(sk);
922 if (copied)
923 return(copied);
924
925 if (sk->err)
926 {
927 tmp = -sk->err;
928 sk->err = 0;
929 return(tmp);
930 }
931
932 if (sk->keepopen)
933 {
934 send_sig(SIGPIPE, current, 0);
935 }
936 return(-EPIPE);
937 }
938
939 if (nonblock || copied)
940 {
941 release_sock(sk);
942 if (copied)
943 return(copied);
944 return(-EAGAIN);
945 }
946
947 release_sock(sk);
948 cli();
949
950 if (sk->state != TCP_ESTABLISHED &&
951 sk->state != TCP_CLOSE_WAIT && sk->err == 0)
952 {
953 interruptible_sleep_on(sk->sleep);
954 if (current->signal & ~current->blocked)
955 {
956 sti();
957 if (copied)
958 return(copied);
959 return(-ERESTARTSYS);
960 }
961 }
962 sk->inuse = 1;
963 sti();
964 }
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982 if ((skb = tcp_dequeue_partial(sk)) != NULL)
983 {
984 int hdrlen;
985
986
987 hdrlen = ((unsigned long)skb->h.th - (unsigned long)skb->data)
988 + sizeof(struct tcphdr);
989
990
991 if (!(flags & MSG_OOB))
992 {
993 copy = min(sk->mss - (skb->len - hdrlen), len);
994
995 if (copy <= 0)
996 {
997 printk("TCP: **bug**: \"copy\" <= 0!!\n");
998 copy = 0;
999 }
1000
1001 memcpy_fromfs(skb->data + skb->len, from, copy);
1002 skb->len += copy;
1003 from += copy;
1004 copied += copy;
1005 len -= copy;
1006 sk->write_seq += copy;
1007 }
1008 if ((skb->len - hdrlen) >= sk->mss ||
1009 (flags & MSG_OOB) || !sk->packets_out)
1010 tcp_send_skb(sk, skb);
1011 else
1012 tcp_enqueue_partial(skb, sk);
1013 continue;
1014 }
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028 copy = sk->window_seq - sk->write_seq;
1029 if (copy <= 0 || copy < (sk->max_window >> 1) || copy > sk->mss)
1030 copy = sk->mss;
1031 if (copy > len)
1032 copy = len;
1033
1034
1035
1036
1037
1038 send_tmp = NULL;
1039 if (copy < sk->mss && !(flags & MSG_OOB))
1040 {
1041
1042
1043
1044 release_sock(sk);
1045
1046
1047
1048
1049 skb = prot->wmalloc(sk, sk->mtu + 128 + prot->max_header, 0, GFP_KERNEL);
1050 sk->inuse = 1;
1051 send_tmp = skb;
1052 }
1053 else
1054 {
1055
1056
1057
1058 release_sock(sk);
1059 skb = prot->wmalloc(sk, copy + prot->max_header , 0, GFP_KERNEL);
1060 sk->inuse = 1;
1061 }
1062
1063
1064
1065
1066
1067 if (skb == NULL)
1068 {
1069 if (nonblock )
1070 {
1071 release_sock(sk);
1072 if (copied)
1073 return(copied);
1074 return(-EAGAIN);
1075 }
1076
1077
1078
1079
1080
1081 tmp = sk->wmem_alloc;
1082 release_sock(sk);
1083 cli();
1084
1085
1086
1087 if (tmp <= sk->wmem_alloc &&
1088 (sk->state == TCP_ESTABLISHED||sk->state == TCP_CLOSE_WAIT)
1089 && sk->err == 0)
1090 {
1091 interruptible_sleep_on(sk->sleep);
1092 if (current->signal & ~current->blocked)
1093 {
1094 sti();
1095 if (copied)
1096 return(copied);
1097 return(-ERESTARTSYS);
1098 }
1099 }
1100 sk->inuse = 1;
1101 sti();
1102 continue;
1103 }
1104
1105 skb->len = 0;
1106 skb->sk = sk;
1107 skb->free = 0;
1108 skb->localroute = sk->localroute|(flags&MSG_DONTROUTE);
1109
1110 buff = skb->data;
1111
1112
1113
1114
1115
1116
1117 tmp = prot->build_header(skb, sk->saddr, sk->daddr, &dev,
1118 IPPROTO_TCP, sk->opt, skb->mem_len,sk->ip_tos,sk->ip_ttl);
1119 if (tmp < 0 )
1120 {
1121 prot->wfree(sk, skb->mem_addr, skb->mem_len);
1122 release_sock(sk);
1123 if (copied)
1124 return(copied);
1125 return(tmp);
1126 }
1127 skb->len += tmp;
1128 skb->dev = dev;
1129 buff += tmp;
1130 skb->h.th =(struct tcphdr *) buff;
1131 tmp = tcp_build_header((struct tcphdr *)buff, sk, len-copy);
1132 if (tmp < 0)
1133 {
1134 prot->wfree(sk, skb->mem_addr, skb->mem_len);
1135 release_sock(sk);
1136 if (copied)
1137 return(copied);
1138 return(tmp);
1139 }
1140
1141 if (flags & MSG_OOB)
1142 {
1143 ((struct tcphdr *)buff)->urg = 1;
1144 ((struct tcphdr *)buff)->urg_ptr = ntohs(copy);
1145 }
1146 skb->len += tmp;
1147 memcpy_fromfs(buff+tmp, from, copy);
1148
1149 from += copy;
1150 copied += copy;
1151 len -= copy;
1152 skb->len += copy;
1153 skb->free = 0;
1154 sk->write_seq += copy;
1155
1156 if (send_tmp != NULL && sk->packets_out)
1157 {
1158 tcp_enqueue_partial(send_tmp, sk);
1159 continue;
1160 }
1161 tcp_send_skb(sk, skb);
1162 }
1163 sk->err = 0;
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176 if(sk->partial && ((!sk->packets_out)
1177
1178 || (sk->nonagle && before(sk->write_seq , sk->window_seq))
1179 ))
1180 tcp_send_partial(sk);
1181
1182 release_sock(sk);
1183 return(copied);
1184 }
1185
1186
1187 static int tcp_sendto(struct sock *sk, unsigned char *from,
1188 int len, int nonblock, unsigned flags,
1189 struct sockaddr_in *addr, int addr_len)
1190 {
1191 if (flags & ~(MSG_OOB|MSG_DONTROUTE))
1192 return -EINVAL;
1193 if (addr_len < sizeof(*addr))
1194 return(-EINVAL);
1195 if (addr->sin_family && addr->sin_family != AF_INET)
1196 return(-EINVAL);
1197 if (addr->sin_port != sk->dummy_th.dest)
1198 return(-EISCONN);
1199 if (addr->sin_addr.s_addr != sk->daddr)
1200 return(-EISCONN);
1201 return(tcp_write(sk, from, len, nonblock, flags));
1202 }
1203
1204
1205 static void tcp_read_wakeup(struct sock *sk)
1206 {
1207 int tmp;
1208 struct device *dev = NULL;
1209 struct tcphdr *t1;
1210 struct sk_buff *buff;
1211
1212 if (!sk->ack_backlog)
1213 return;
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226 buff = sk->prot->wmalloc(sk,MAX_ACK_SIZE,1, GFP_ATOMIC);
1227 if (buff == NULL)
1228 {
1229
1230 reset_timer(sk, TIME_WRITE, 10);
1231 return;
1232 }
1233
1234 buff->len = sizeof(struct tcphdr);
1235 buff->sk = sk;
1236 buff->localroute = sk->localroute;
1237
1238
1239
1240
1241
1242 tmp = sk->prot->build_header(buff, sk->saddr, sk->daddr, &dev,
1243 IPPROTO_TCP, sk->opt, MAX_ACK_SIZE,sk->ip_tos,sk->ip_ttl);
1244 if (tmp < 0)
1245 {
1246 buff->free=1;
1247 sk->prot->wfree(sk, buff->mem_addr, buff->mem_len);
1248 return;
1249 }
1250
1251 buff->len += tmp;
1252 t1 =(struct tcphdr *)(buff->data +tmp);
1253
1254 memcpy(t1,(void *) &sk->dummy_th, sizeof(*t1));
1255 t1->seq = htonl(sk->sent_seq);
1256 t1->ack = 1;
1257 t1->res1 = 0;
1258 t1->res2 = 0;
1259 t1->rst = 0;
1260 t1->urg = 0;
1261 t1->syn = 0;
1262 t1->psh = 0;
1263 sk->ack_backlog = 0;
1264 sk->bytes_rcv = 0;
1265 sk->window = tcp_select_window(sk);
1266 t1->window = ntohs(sk->window);
1267 t1->ack_seq = ntohl(sk->acked_seq);
1268 t1->doff = sizeof(*t1)/4;
1269 tcp_send_check(t1, sk->saddr, sk->daddr, sizeof(*t1), sk);
1270 sk->prot->queue_xmit(sk, dev, buff, 1);
1271 tcp_statistics.TcpOutSegs++;
1272 }
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282 static void cleanup_rbuf(struct sock *sk)
1283 {
1284 unsigned long flags;
1285 unsigned long left;
1286 struct sk_buff *skb;
1287 unsigned long rspace;
1288
1289 if(sk->debug)
1290 printk("cleaning rbuf for sk=%p\n", sk);
1291
1292 save_flags(flags);
1293 cli();
1294
1295 left = sk->prot->rspace(sk);
1296
1297
1298
1299
1300
1301
1302 while((skb=skb_peek(&sk->receive_queue)) != NULL)
1303 {
1304 if (!skb->used)
1305 break;
1306 skb_unlink(skb);
1307 skb->sk = sk;
1308 kfree_skb(skb, FREE_READ);
1309 }
1310
1311 restore_flags(flags);
1312
1313
1314
1315
1316
1317
1318
1319
1320 if(sk->debug)
1321 printk("sk->rspace = %lu, was %lu\n", sk->prot->rspace(sk),
1322 left);
1323 if ((rspace=sk->prot->rspace(sk)) != left)
1324 {
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335 sk->ack_backlog++;
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345 if (rspace > (sk->window - sk->bytes_rcv + sk->mtu))
1346 {
1347
1348 tcp_read_wakeup(sk);
1349 }
1350 else
1351 {
1352
1353 int was_active = del_timer(&sk->timer);
1354 if (!was_active || TCP_ACK_TIME < sk->timer.expires)
1355 {
1356 reset_timer(sk, TIME_WRITE, TCP_ACK_TIME);
1357 }
1358 else
1359 add_timer(&sk->timer);
1360 }
1361 }
1362 }
1363
1364
1365
1366
1367
1368
1369 static int tcp_read_urg(struct sock * sk, int nonblock,
1370 unsigned char *to, int len, unsigned flags)
1371 {
1372 struct wait_queue wait = { current, NULL };
1373
1374 while (len > 0)
1375 {
1376 if (sk->urginline || !sk->urg_data || sk->urg_data == URG_READ)
1377 return -EINVAL;
1378 if (sk->urg_data & URG_VALID)
1379 {
1380 char c = sk->urg_data;
1381 if (!(flags & MSG_PEEK))
1382 sk->urg_data = URG_READ;
1383 put_fs_byte(c, to);
1384 return 1;
1385 }
1386
1387 if (sk->err)
1388 {
1389 int tmp = -sk->err;
1390 sk->err = 0;
1391 return tmp;
1392 }
1393
1394 if (sk->state == TCP_CLOSE || sk->done)
1395 {
1396 if (!sk->done) {
1397 sk->done = 1;
1398 return 0;
1399 }
1400 return -ENOTCONN;
1401 }
1402
1403 if (sk->shutdown & RCV_SHUTDOWN)
1404 {
1405 sk->done = 1;
1406 return 0;
1407 }
1408
1409 if (nonblock)
1410 return -EAGAIN;
1411
1412 if (current->signal & ~current->blocked)
1413 return -ERESTARTSYS;
1414
1415 current->state = TASK_INTERRUPTIBLE;
1416 add_wait_queue(sk->sleep, &wait);
1417 if ((sk->urg_data & URG_NOTYET) && sk->err == 0 &&
1418 !(sk->shutdown & RCV_SHUTDOWN))
1419 schedule();
1420 remove_wait_queue(sk->sleep, &wait);
1421 current->state = TASK_RUNNING;
1422 }
1423 return 0;
1424 }
1425
1426
1427
1428
1429
1430
1431 static int tcp_read(struct sock *sk, unsigned char *to,
1432 int len, int nonblock, unsigned flags)
1433 {
1434 struct wait_queue wait = { current, NULL };
1435 int copied = 0;
1436 unsigned long peek_seq;
1437 unsigned long *seq;
1438 unsigned long used;
1439
1440
1441 if (sk->state == TCP_LISTEN)
1442 return -ENOTCONN;
1443
1444
1445 if (flags & MSG_OOB)
1446 return tcp_read_urg(sk, nonblock, to, len, flags);
1447
1448 peek_seq = sk->copied_seq;
1449 seq = &sk->copied_seq;
1450 if (flags & MSG_PEEK)
1451 seq = &peek_seq;
1452
1453 add_wait_queue(sk->sleep, &wait);
1454 sk->inuse = 1;
1455 while (len > 0)
1456 {
1457 struct sk_buff * skb;
1458 unsigned long offset;
1459
1460
1461
1462
1463 if (copied && sk->urg_data && sk->urg_seq == 1+*seq)
1464 break;
1465
1466 current->state = TASK_INTERRUPTIBLE;
1467
1468 skb = skb_peek(&sk->receive_queue);
1469 do
1470 {
1471 if (!skb)
1472 break;
1473 if (before(1+*seq, skb->h.th->seq))
1474 break;
1475 offset = 1 + *seq - skb->h.th->seq;
1476 if (skb->h.th->syn)
1477 offset--;
1478 if (offset < skb->len)
1479 goto found_ok_skb;
1480 if (!(flags & MSG_PEEK))
1481 skb->used = 1;
1482 skb = skb->next;
1483 }
1484 while (skb != (struct sk_buff *)&sk->receive_queue);
1485
1486 if (copied)
1487 break;
1488
1489 if (sk->err)
1490 {
1491 copied = -sk->err;
1492 sk->err = 0;
1493 break;
1494 }
1495
1496 if (sk->state == TCP_CLOSE)
1497 {
1498 if (!sk->done)
1499 {
1500 sk->done = 1;
1501 break;
1502 }
1503 copied = -ENOTCONN;
1504 break;
1505 }
1506
1507 if (sk->shutdown & RCV_SHUTDOWN)
1508 {
1509 sk->done = 1;
1510 break;
1511 }
1512
1513 if (nonblock)
1514 {
1515 copied = -EAGAIN;
1516 break;
1517 }
1518
1519 cleanup_rbuf(sk);
1520 release_sock(sk);
1521 schedule();
1522 sk->inuse = 1;
1523
1524 if (current->signal & ~current->blocked)
1525 {
1526 copied = -ERESTARTSYS;
1527 break;
1528 }
1529 continue;
1530
1531 found_ok_skb:
1532
1533 used = skb->len - offset;
1534 if (len < used)
1535 used = len;
1536
1537 if (sk->urg_data)
1538 {
1539 unsigned long urg_offset = sk->urg_seq - (1 + *seq);
1540 if (urg_offset < used)
1541 {
1542 if (!urg_offset)
1543 {
1544 if (!sk->urginline)
1545 {
1546 ++*seq;
1547 offset++;
1548 used--;
1549 }
1550 }
1551 else
1552 used = urg_offset;
1553 }
1554 }
1555
1556 memcpy_tofs(to,((unsigned char *)skb->h.th) +
1557 skb->h.th->doff*4 + offset, used);
1558 copied += used;
1559 len -= used;
1560 to += used;
1561 *seq += used;
1562 if (after(sk->copied_seq+1,sk->urg_seq))
1563 sk->urg_data = 0;
1564 if (!(flags & MSG_PEEK) && (used + offset >= skb->len))
1565 skb->used = 1;
1566 }
1567 remove_wait_queue(sk->sleep, &wait);
1568 current->state = TASK_RUNNING;
1569
1570
1571 cleanup_rbuf(sk);
1572 release_sock(sk);
1573 return copied;
1574 }
1575
1576
1577
1578
1579
1580
1581 void tcp_shutdown(struct sock *sk, int how)
1582 {
1583 struct sk_buff *buff;
1584 struct tcphdr *t1, *th;
1585 struct proto *prot;
1586 int tmp;
1587 struct device *dev = NULL;
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598 if (!(how & SEND_SHUTDOWN))
1599 return;
1600
1601
1602
1603
1604
1605 if (sk->state == TCP_FIN_WAIT1 ||
1606 sk->state == TCP_FIN_WAIT2 ||
1607 sk->state == TCP_CLOSING ||
1608 sk->state == TCP_LAST_ACK ||
1609 sk->state == TCP_TIME_WAIT
1610 )
1611 {
1612 return;
1613 }
1614 sk->inuse = 1;
1615
1616
1617
1618
1619
1620 sk->shutdown |= SEND_SHUTDOWN;
1621
1622
1623
1624
1625
1626 if (sk->partial)
1627 tcp_send_partial(sk);
1628
1629 prot =(struct proto *)sk->prot;
1630 th =(struct tcphdr *)&sk->dummy_th;
1631 release_sock(sk);
1632 buff = prot->wmalloc(sk, MAX_RESET_SIZE,1 , GFP_KERNEL);
1633 if (buff == NULL)
1634 return;
1635 sk->inuse = 1;
1636
1637 buff->sk = sk;
1638 buff->len = sizeof(*t1);
1639 buff->localroute = sk->localroute;
1640 t1 =(struct tcphdr *) buff->data;
1641
1642
1643
1644
1645
1646 tmp = prot->build_header(buff,sk->saddr, sk->daddr, &dev,
1647 IPPROTO_TCP, sk->opt,
1648 sizeof(struct tcphdr),sk->ip_tos,sk->ip_ttl);
1649 if (tmp < 0)
1650 {
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661 buff->free=1;
1662 prot->wfree(sk,buff->mem_addr, buff->mem_len);
1663
1664 if (sk->state == TCP_ESTABLISHED)
1665 tcp_set_state(sk,TCP_FIN_WAIT1);
1666 else if(sk->state == TCP_CLOSE_WAIT)
1667 tcp_set_state(sk,TCP_LAST_ACK);
1668 else
1669 tcp_set_state(sk,TCP_FIN_WAIT2);
1670
1671 release_sock(sk);
1672 return;
1673 }
1674
1675 t1 =(struct tcphdr *)((char *)t1 +tmp);
1676 buff->len += tmp;
1677 buff->dev = dev;
1678 memcpy(t1, th, sizeof(*t1));
1679 t1->seq = ntohl(sk->write_seq);
1680 sk->write_seq++;
1681 buff->h.seq = sk->write_seq;
1682 t1->ack = 1;
1683 t1->ack_seq = ntohl(sk->acked_seq);
1684 t1->window = ntohs(sk->window=tcp_select_window(sk));
1685 t1->fin = 1;
1686 t1->rst = 0;
1687 t1->doff = sizeof(*t1)/4;
1688 tcp_send_check(t1, sk->saddr, sk->daddr, sizeof(*t1), sk);
1689
1690
1691
1692
1693
1694
1695 if (skb_peek(&sk->write_queue) != NULL)
1696 {
1697 buff->free=0;
1698 if (buff->next != NULL)
1699 {
1700 printk("tcp_shutdown: next != NULL\n");
1701 skb_unlink(buff);
1702 }
1703 skb_queue_tail(&sk->write_queue, buff);
1704 }
1705 else
1706 {
1707 sk->sent_seq = sk->write_seq;
1708 sk->prot->queue_xmit(sk, dev, buff, 0);
1709 }
1710
1711 if (sk->state == TCP_ESTABLISHED)
1712 tcp_set_state(sk,TCP_FIN_WAIT1);
1713 else if (sk->state == TCP_CLOSE_WAIT)
1714 tcp_set_state(sk,TCP_LAST_ACK);
1715 else
1716 tcp_set_state(sk,TCP_FIN_WAIT2);
1717
1718 release_sock(sk);
1719 }
1720
1721
1722 static int
1723 tcp_recvfrom(struct sock *sk, unsigned char *to,
1724 int to_len, int nonblock, unsigned flags,
1725 struct sockaddr_in *addr, int *addr_len)
1726 {
1727 int result;
1728
1729
1730
1731
1732
1733
1734
1735 if(addr_len)
1736 *addr_len = sizeof(*addr);
1737 result=tcp_read(sk, to, to_len, nonblock, flags);
1738
1739 if (result < 0)
1740 return(result);
1741
1742 if(addr)
1743 {
1744 addr->sin_family = AF_INET;
1745 addr->sin_port = sk->dummy_th.dest;
1746 addr->sin_addr.s_addr = sk->daddr;
1747 }
1748 return(result);
1749 }
1750
1751
1752
1753
1754
1755
1756 static void tcp_reset(unsigned long saddr, unsigned long daddr, struct tcphdr *th,
1757 struct proto *prot, struct options *opt, struct device *dev, int tos, int ttl)
1758 {
1759 struct sk_buff *buff;
1760 struct tcphdr *t1;
1761 int tmp;
1762 struct device *ndev=NULL;
1763
1764
1765
1766
1767
1768
1769 buff = prot->wmalloc(NULL, MAX_RESET_SIZE, 1, GFP_ATOMIC);
1770 if (buff == NULL)
1771 return;
1772
1773 buff->len = sizeof(*t1);
1774 buff->sk = NULL;
1775 buff->dev = dev;
1776 buff->localroute = 0;
1777
1778 t1 =(struct tcphdr *) buff->data;
1779
1780
1781
1782
1783
1784 tmp = prot->build_header(buff, saddr, daddr, &ndev, IPPROTO_TCP, opt,
1785 sizeof(struct tcphdr),tos,ttl);
1786 if (tmp < 0)
1787 {
1788 buff->free = 1;
1789 prot->wfree(NULL, buff->mem_addr, buff->mem_len);
1790 return;
1791 }
1792
1793 t1 =(struct tcphdr *)((char *)t1 +tmp);
1794 buff->len += tmp;
1795 memcpy(t1, th, sizeof(*t1));
1796
1797
1798
1799
1800
1801 t1->dest = th->source;
1802 t1->source = th->dest;
1803 t1->rst = 1;
1804 t1->window = 0;
1805
1806 if(th->ack)
1807 {
1808 t1->ack = 0;
1809 t1->seq = th->ack_seq;
1810 t1->ack_seq = 0;
1811 }
1812 else
1813 {
1814 t1->ack = 1;
1815 if(!th->syn)
1816 t1->ack_seq=htonl(th->seq);
1817 else
1818 t1->ack_seq=htonl(th->seq+1);
1819 t1->seq=0;
1820 }
1821
1822 t1->syn = 0;
1823 t1->urg = 0;
1824 t1->fin = 0;
1825 t1->psh = 0;
1826 t1->doff = sizeof(*t1)/4;
1827 tcp_send_check(t1, saddr, daddr, sizeof(*t1), NULL);
1828 prot->queue_xmit(NULL, dev, buff, 1);
1829 tcp_statistics.TcpOutSegs++;
1830 }
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841 static void tcp_options(struct sock *sk, struct tcphdr *th)
1842 {
1843 unsigned char *ptr;
1844 int length=(th->doff*4)-sizeof(struct tcphdr);
1845 int mss_seen = 0;
1846
1847 ptr = (unsigned char *)(th + 1);
1848
1849 while(length>0)
1850 {
1851 int opcode=*ptr++;
1852 int opsize=*ptr++;
1853 switch(opcode)
1854 {
1855 case TCPOPT_EOL:
1856 return;
1857 case TCPOPT_NOP:
1858 length-=2;
1859 continue;
1860
1861 default:
1862 if(opsize<=2)
1863 return;
1864 switch(opcode)
1865 {
1866 case TCPOPT_MSS:
1867 if(opsize==4 && th->syn)
1868 {
1869 sk->mtu=min(sk->mtu,ntohs(*(unsigned short *)ptr));
1870 mss_seen = 1;
1871 }
1872 break;
1873
1874 }
1875 ptr+=opsize-2;
1876 length-=opsize;
1877 }
1878 }
1879 if (th->syn)
1880 {
1881 if (! mss_seen)
1882 sk->mtu=min(sk->mtu, 536);
1883 }
1884 #ifdef CONFIG_INET_PCTCP
1885 sk->mss = min(sk->max_window >> 1, sk->mtu);
1886 #else
1887 sk->mss = min(sk->max_window, sk->mtu);
1888 #endif
1889 }
1890
1891 static inline unsigned long default_mask(unsigned long dst)
1892 {
1893 dst = ntohl(dst);
1894 if (IN_CLASSA(dst))
1895 return htonl(IN_CLASSA_NET);
1896 if (IN_CLASSB(dst))
1897 return htonl(IN_CLASSB_NET);
1898 return htonl(IN_CLASSC_NET);
1899 }
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909 static void tcp_conn_request(struct sock *sk, struct sk_buff *skb,
1910 unsigned long daddr, unsigned long saddr,
1911 struct options *opt, struct device *dev)
1912 {
1913 struct sk_buff *buff;
1914 struct tcphdr *t1;
1915 unsigned char *ptr;
1916 struct sock *newsk;
1917 struct tcphdr *th;
1918 struct device *ndev=NULL;
1919 int tmp;
1920 struct rtable *rt;
1921
1922 th = skb->h.th;
1923
1924
1925 if (!sk->dead)
1926 {
1927 sk->data_ready(sk,0);
1928 }
1929 else
1930 {
1931 tcp_reset(daddr, saddr, th, sk->prot, opt, dev, sk->ip_tos,sk->ip_ttl);
1932 tcp_statistics.TcpAttemptFails++;
1933 kfree_skb(skb, FREE_READ);
1934 return;
1935 }
1936
1937
1938
1939
1940
1941
1942 if (sk->ack_backlog >= sk->max_ack_backlog)
1943 {
1944 tcp_statistics.TcpAttemptFails++;
1945 kfree_skb(skb, FREE_READ);
1946 return;
1947 }
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957 newsk = (struct sock *) kmalloc(sizeof(struct sock), GFP_ATOMIC);
1958 if (newsk == NULL)
1959 {
1960
1961 tcp_statistics.TcpAttemptFails++;
1962 kfree_skb(skb, FREE_READ);
1963 return;
1964 }
1965
1966 memcpy(newsk, sk, sizeof(*newsk));
1967 skb_queue_head_init(&newsk->write_queue);
1968 skb_queue_head_init(&newsk->receive_queue);
1969 newsk->send_head = NULL;
1970 newsk->send_tail = NULL;
1971 skb_queue_head_init(&newsk->back_log);
1972 newsk->rtt = 0;
1973 newsk->rto = TCP_TIMEOUT_INIT;
1974 newsk->mdev = 0;
1975 newsk->max_window = 0;
1976 newsk->cong_window = 1;
1977 newsk->cong_count = 0;
1978 newsk->ssthresh = 0;
1979 newsk->backoff = 0;
1980 newsk->blog = 0;
1981 newsk->intr = 0;
1982 newsk->proc = 0;
1983 newsk->done = 0;
1984 newsk->partial = NULL;
1985 newsk->pair = NULL;
1986 newsk->wmem_alloc = 0;
1987 newsk->rmem_alloc = 0;
1988 newsk->localroute = sk->localroute;
1989
1990 newsk->max_unacked = MAX_WINDOW - TCP_WINDOW_DIFF;
1991
1992 newsk->err = 0;
1993 newsk->shutdown = 0;
1994 newsk->ack_backlog = 0;
1995 newsk->acked_seq = skb->h.th->seq+1;
1996 newsk->fin_seq = skb->h.th->seq;
1997 newsk->copied_seq = skb->h.th->seq;
1998 newsk->state = TCP_SYN_RECV;
1999 newsk->timeout = 0;
2000 newsk->write_seq = jiffies * SEQ_TICK - seq_offset;
2001 newsk->window_seq = newsk->write_seq;
2002 newsk->rcv_ack_seq = newsk->write_seq;
2003 newsk->urg_data = 0;
2004 newsk->retransmits = 0;
2005 newsk->destroy = 0;
2006 init_timer(&newsk->timer);
2007 newsk->timer.data = (unsigned long)newsk;
2008 newsk->timer.function = &net_timer;
2009 newsk->dummy_th.source = skb->h.th->dest;
2010 newsk->dummy_th.dest = skb->h.th->source;
2011
2012
2013
2014
2015
2016 newsk->daddr = saddr;
2017 newsk->saddr = daddr;
2018
2019 put_sock(newsk->num,newsk);
2020 newsk->dummy_th.res1 = 0;
2021 newsk->dummy_th.doff = 6;
2022 newsk->dummy_th.fin = 0;
2023 newsk->dummy_th.syn = 0;
2024 newsk->dummy_th.rst = 0;
2025 newsk->dummy_th.psh = 0;
2026 newsk->dummy_th.ack = 0;
2027 newsk->dummy_th.urg = 0;
2028 newsk->dummy_th.res2 = 0;
2029 newsk->acked_seq = skb->h.th->seq + 1;
2030 newsk->copied_seq = skb->h.th->seq;
2031
2032
2033
2034
2035
2036 newsk->ip_ttl=sk->ip_ttl;
2037 newsk->ip_tos=skb->ip_hdr->tos;
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047 rt=ip_rt_route(saddr, NULL,NULL);
2048
2049 if(rt!=NULL && (rt->rt_flags&RTF_WINDOW))
2050 newsk->window_clamp = rt->rt_window;
2051 else
2052 newsk->window_clamp = 0;
2053
2054 if (sk->user_mss)
2055 newsk->mtu = sk->user_mss;
2056 else if(rt!=NULL && (rt->rt_flags&RTF_MSS))
2057 newsk->mtu = rt->rt_mss - HEADER_SIZE;
2058 else
2059 {
2060 #ifdef CONFIG_INET_SNARL
2061 if ((saddr ^ daddr) & default_mask(saddr))
2062 #else
2063 if ((saddr ^ daddr) & dev->pa_mask)
2064 #endif
2065 newsk->mtu = 576 - HEADER_SIZE;
2066 else
2067 newsk->mtu = MAX_WINDOW;
2068 }
2069
2070
2071
2072
2073
2074 newsk->mtu = min(newsk->mtu, dev->mtu - HEADER_SIZE);
2075
2076
2077
2078
2079
2080 tcp_options(newsk,skb->h.th);
2081
2082 buff = newsk->prot->wmalloc(newsk, MAX_SYN_SIZE, 1, GFP_ATOMIC);
2083 if (buff == NULL)
2084 {
2085 sk->err = -ENOMEM;
2086 newsk->dead = 1;
2087 release_sock(newsk);
2088 kfree_skb(skb, FREE_READ);
2089 tcp_statistics.TcpAttemptFails++;
2090 return;
2091 }
2092
2093 buff->len = sizeof(struct tcphdr)+4;
2094 buff->sk = newsk;
2095 buff->localroute = newsk->localroute;
2096
2097 t1 =(struct tcphdr *) buff->data;
2098
2099
2100
2101
2102
2103 tmp = sk->prot->build_header(buff, newsk->saddr, newsk->daddr, &ndev,
2104 IPPROTO_TCP, NULL, MAX_SYN_SIZE,sk->ip_tos,sk->ip_ttl);
2105
2106
2107
2108
2109
2110 if (tmp < 0)
2111 {
2112 sk->err = tmp;
2113 buff->free=1;
2114 kfree_skb(buff,FREE_WRITE);
2115 newsk->dead = 1;
2116 release_sock(newsk);
2117 skb->sk = sk;
2118 kfree_skb(skb, FREE_READ);
2119 tcp_statistics.TcpAttemptFails++;
2120 return;
2121 }
2122
2123 buff->len += tmp;
2124 t1 =(struct tcphdr *)((char *)t1 +tmp);
2125
2126 memcpy(t1, skb->h.th, sizeof(*t1));
2127 buff->h.seq = newsk->write_seq;
2128
2129
2130
2131 t1->dest = skb->h.th->source;
2132 t1->source = newsk->dummy_th.source;
2133 t1->seq = ntohl(newsk->write_seq++);
2134 t1->ack = 1;
2135 newsk->window = tcp_select_window(newsk);
2136 newsk->sent_seq = newsk->write_seq;
2137 t1->window = ntohs(newsk->window);
2138 t1->res1 = 0;
2139 t1->res2 = 0;
2140 t1->rst = 0;
2141 t1->urg = 0;
2142 t1->psh = 0;
2143 t1->syn = 1;
2144 t1->ack_seq = ntohl(skb->h.th->seq+1);
2145 t1->doff = sizeof(*t1)/4+1;
2146 ptr =(unsigned char *)(t1+1);
2147 ptr[0] = 2;
2148 ptr[1] = 4;
2149 ptr[2] = ((newsk->mtu) >> 8) & 0xff;
2150 ptr[3] =(newsk->mtu) & 0xff;
2151
2152 tcp_send_check(t1, daddr, saddr, sizeof(*t1)+4, newsk);
2153 newsk->prot->queue_xmit(newsk, dev, buff, 0);
2154
2155 reset_timer(newsk, TIME_WRITE , TCP_TIMEOUT_INIT);
2156 skb->sk = newsk;
2157
2158
2159
2160
2161
2162 sk->rmem_alloc -= skb->mem_len;
2163 newsk->rmem_alloc += skb->mem_len;
2164
2165 skb_queue_tail(&sk->receive_queue,skb);
2166 sk->ack_backlog++;
2167 release_sock(newsk);
2168 tcp_statistics.TcpOutSegs++;
2169 }
2170
2171
2172 static void tcp_close(struct sock *sk, int timeout)
2173 {
2174 struct sk_buff *buff;
2175 int need_reset = 0;
2176 struct tcphdr *t1, *th;
2177 struct proto *prot;
2178 struct device *dev=NULL;
2179 int tmp;
2180
2181
2182
2183
2184
2185 sk->inuse = 1;
2186 sk->keepopen = 1;
2187 sk->shutdown = SHUTDOWN_MASK;
2188
2189 if (!sk->dead)
2190 sk->state_change(sk);
2191
2192 if (timeout == 0)
2193 {
2194
2195
2196
2197
2198
2199
2200 if (skb_peek(&sk->receive_queue) != NULL)
2201 {
2202 struct sk_buff *skb;
2203 if(sk->debug)
2204 printk("Clean rcv queue\n");
2205 while((skb=skb_dequeue(&sk->receive_queue))!=NULL)
2206 {
2207
2208
2209 if(skb->len > 0 && after(skb->h.th->seq + skb->len , sk->copied_seq))
2210 need_reset = 1;
2211 kfree_skb(skb, FREE_READ);
2212 }
2213 if(sk->debug)
2214 printk("Cleaned.\n");
2215 }
2216 }
2217
2218
2219
2220
2221
2222 if (sk->partial)
2223 {
2224 tcp_send_partial(sk);
2225 }
2226
2227 switch(sk->state)
2228 {
2229 case TCP_FIN_WAIT1:
2230 case TCP_FIN_WAIT2:
2231 case TCP_CLOSING:
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242 if (!timeout) {
2243 int timer_active;
2244
2245 timer_active = del_timer(&sk->timer);
2246 if (timer_active)
2247 add_timer(&sk->timer);
2248 else
2249 reset_timer(sk, TIME_CLOSE, 4 * sk->rto);
2250 }
2251 #ifdef NOTDEF
2252
2253
2254
2255
2256
2257
2258 reset_timer(sk, TIME_CLOSE, 4 * sk->rto);
2259 #endif
2260 if (timeout)
2261 tcp_time_wait(sk);
2262 release_sock(sk);
2263 return;
2264 case TCP_TIME_WAIT:
2265 case TCP_LAST_ACK:
2266
2267
2268
2269 if (timeout)
2270 {
2271 tcp_set_state(sk,TCP_CLOSE);
2272 }
2273 release_sock(sk);
2274 return;
2275 case TCP_LISTEN:
2276 tcp_set_state(sk,TCP_CLOSE);
2277 release_sock(sk);
2278 return;
2279 case TCP_CLOSE:
2280 release_sock(sk);
2281 return;
2282 case TCP_CLOSE_WAIT:
2283 case TCP_ESTABLISHED:
2284 case TCP_SYN_SENT:
2285 case TCP_SYN_RECV:
2286 prot =(struct proto *)sk->prot;
2287 th =(struct tcphdr *)&sk->dummy_th;
2288 buff = prot->wmalloc(sk, MAX_FIN_SIZE, 1, GFP_ATOMIC);
2289 if (buff == NULL)
2290 {
2291
2292
2293
2294 release_sock(sk);
2295 if (sk->state != TCP_CLOSE_WAIT)
2296 tcp_set_state(sk,TCP_ESTABLISHED);
2297 reset_timer(sk, TIME_CLOSE, 100);
2298 return;
2299 }
2300 buff->sk = sk;
2301 buff->free = 1;
2302 buff->len = sizeof(*t1);
2303 buff->localroute = sk->localroute;
2304 t1 =(struct tcphdr *) buff->data;
2305
2306
2307
2308
2309 tmp = prot->build_header(buff,sk->saddr, sk->daddr, &dev,
2310 IPPROTO_TCP, sk->opt,
2311 sizeof(struct tcphdr),sk->ip_tos,sk->ip_ttl);
2312 if (tmp < 0)
2313 {
2314 sk->write_seq++;
2315 kfree_skb(buff,FREE_WRITE);
2316
2317
2318
2319
2320
2321
2322 if(sk->state==TCP_ESTABLISHED)
2323 tcp_set_state(sk,TCP_FIN_WAIT1);
2324 else
2325 tcp_set_state(sk,TCP_FIN_WAIT2);
2326 reset_timer(sk, TIME_CLOSE,4*sk->rto);
2327 if(timeout)
2328 tcp_time_wait(sk);
2329
2330 release_sock(sk);
2331 return;
2332 }
2333
2334 t1 =(struct tcphdr *)((char *)t1 +tmp);
2335 buff->len += tmp;
2336 buff->dev = dev;
2337 memcpy(t1, th, sizeof(*t1));
2338 t1->seq = ntohl(sk->write_seq);
2339 sk->write_seq++;
2340 buff->h.seq = sk->write_seq;
2341 t1->ack = 1;
2342
2343
2344
2345
2346
2347 sk->delay_acks = 0;
2348 t1->ack_seq = ntohl(sk->acked_seq);
2349 t1->window = ntohs(sk->window=tcp_select_window(sk));
2350 t1->fin = 1;
2351 t1->rst = need_reset;
2352 t1->doff = sizeof(*t1)/4;
2353 tcp_send_check(t1, sk->saddr, sk->daddr, sizeof(*t1), sk);
2354
2355 tcp_statistics.TcpOutSegs++;
2356
2357 if (skb_peek(&sk->write_queue) == NULL)
2358 {
2359 sk->sent_seq = sk->write_seq;
2360 prot->queue_xmit(sk, dev, buff, 0);
2361 }
2362 else
2363 {
2364 reset_timer(sk, TIME_WRITE, sk->rto);
2365 if (buff->next != NULL)
2366 {
2367 printk("tcp_close: next != NULL\n");
2368 skb_unlink(buff);
2369 }
2370 skb_queue_tail(&sk->write_queue, buff);
2371 }
2372
2373
2374
2375
2376
2377
2378
2379
2380 if (sk->state == TCP_ESTABLISHED)
2381 tcp_set_state(sk,TCP_FIN_WAIT1);
2382 else if (sk->state == TCP_CLOSE_WAIT)
2383 tcp_set_state(sk,TCP_LAST_ACK);
2384 else if (sk->state != TCP_CLOSING)
2385 tcp_set_state(sk,TCP_FIN_WAIT2);
2386 }
2387 release_sock(sk);
2388 }
2389
2390
2391
2392
2393
2394
2395 static void
2396 tcp_write_xmit(struct sock *sk)
2397 {
2398 struct sk_buff *skb;
2399
2400
2401
2402
2403
2404
2405 if(sk->zapped)
2406 return;
2407
2408 while((skb = skb_peek(&sk->write_queue)) != NULL &&
2409 before(skb->h.seq, sk->window_seq + 1) &&
2410 (sk->retransmits == 0 ||
2411 sk->timeout != TIME_WRITE ||
2412 before(skb->h.seq, sk->rcv_ack_seq + 1))
2413 && sk->packets_out < sk->cong_window)
2414 {
2415 IS_SKB(skb);
2416 skb_unlink(skb);
2417
2418 if (before(skb->h.seq, sk->rcv_ack_seq +1))
2419 {
2420 sk->retransmits = 0;
2421 kfree_skb(skb, FREE_WRITE);
2422 if (!sk->dead)
2423 sk->write_space(sk);
2424 }
2425 else
2426 {
2427 struct tcphdr *th;
2428 struct iphdr *iph;
2429 int size;
2430
2431
2432
2433
2434
2435
2436
2437 iph = (struct iphdr *)(skb->data +
2438 skb->dev->hard_header_len);
2439 th = (struct tcphdr *)(((char *)iph) +(iph->ihl << 2));
2440 size = skb->len - (((unsigned char *) th) - skb->data);
2441
2442 th->ack_seq = ntohl(sk->acked_seq);
2443 th->window = ntohs(tcp_select_window(sk));
2444
2445 tcp_send_check(th, sk->saddr, sk->daddr, size, sk);
2446
2447 sk->sent_seq = skb->h.seq;
2448 sk->prot->queue_xmit(sk, skb->dev, skb, skb->free);
2449 }
2450 }
2451 }
2452
2453
2454
2455
2456
2457
2458
2459 static void sort_send(struct sock *sk)
2460 {
2461 struct sk_buff *list = NULL;
2462 struct sk_buff *skb,*skb2,*skb3;
2463
2464 for (skb = sk->send_head; skb != NULL; skb = skb2)
2465 {
2466 skb2 = skb->link3;
2467 if (list == NULL || before (skb2->h.seq, list->h.seq))
2468 {
2469 skb->link3 = list;
2470 sk->send_tail = skb;
2471 list = skb;
2472 }
2473 else
2474 {
2475 for (skb3 = list; ; skb3 = skb3->link3)
2476 {
2477 if (skb3->link3 == NULL ||
2478 before(skb->h.seq, skb3->link3->h.seq))
2479 {
2480 skb->link3 = skb3->link3;
2481 skb3->link3 = skb;
2482 if (skb->link3 == NULL)
2483 sk->send_tail = skb;
2484 break;
2485 }
2486 }
2487 }
2488 }
2489 sk->send_head = list;
2490 }
2491
2492
2493
2494
2495
2496
2497 static int tcp_ack(struct sock *sk, struct tcphdr *th, unsigned long saddr, int len)
2498 {
2499 unsigned long ack;
2500 int flag = 0;
2501
2502
2503
2504
2505
2506
2507
2508
2509 if(sk->zapped)
2510 return(1);
2511
2512 ack = ntohl(th->ack_seq);
2513 if (ntohs(th->window) > sk->max_window)
2514 {
2515 sk->max_window = ntohs(th->window);
2516 #ifdef CONFIG_INET_PCTCP
2517 sk->mss = min(sk->max_window>>1, sk->mtu);
2518 #else
2519 sk->mss = min(sk->max_window, sk->mtu);
2520 #endif
2521 }
2522
2523 if (sk->retransmits && sk->timeout == TIME_KEEPOPEN)
2524 sk->retransmits = 0;
2525
2526 #if 0
2527
2528
2529
2530
2531 if (after(ack, sk->sent_seq+1) || before(ack, sk->rcv_ack_seq-1))
2532 #else
2533 if (after(ack, sk->sent_seq) || before(ack, sk->rcv_ack_seq))
2534 #endif
2535 {
2536 if(sk->debug)
2537 printk("Ack ignored %lu %lu\n",ack,sk->sent_seq);
2538
2539
2540
2541
2542
2543 if (after(ack, sk->sent_seq) || (sk->state != TCP_ESTABLISHED && sk->state != TCP_CLOSE_WAIT))
2544 {
2545 return(0);
2546 }
2547 if (sk->keepopen)
2548 {
2549 if(sk->timeout==TIME_KEEPOPEN)
2550 reset_timer(sk, TIME_KEEPOPEN, TCP_TIMEOUT_LEN);
2551 }
2552 return(1);
2553 }
2554
2555 if (len != th->doff*4)
2556 flag |= 1;
2557
2558
2559
2560 if (after(sk->window_seq, ack+ntohs(th->window)))
2561 {
2562
2563
2564
2565
2566
2567
2568
2569 struct sk_buff *skb;
2570 struct sk_buff *skb2;
2571 struct sk_buff *wskb = NULL;
2572
2573 skb2 = sk->send_head;
2574 sk->send_head = NULL;
2575 sk->send_tail = NULL;
2576
2577 flag |= 4;
2578
2579 sk->window_seq = ack + ntohs(th->window);
2580 cli();
2581 while (skb2 != NULL)
2582 {
2583 skb = skb2;
2584 skb2 = skb->link3;
2585 skb->link3 = NULL;
2586 if (after(skb->h.seq, sk->window_seq))
2587 {
2588 if (sk->packets_out > 0)
2589 sk->packets_out--;
2590
2591 if (skb->next != NULL)
2592 {
2593 skb_unlink(skb);
2594 }
2595
2596 if (wskb == NULL)
2597 skb_queue_head(&sk->write_queue,skb);
2598 else
2599 skb_append(wskb,skb);
2600 wskb = skb;
2601 }
2602 else
2603 {
2604 if (sk->send_head == NULL)
2605 {
2606 sk->send_head = skb;
2607 sk->send_tail = skb;
2608 }
2609 else
2610 {
2611 sk->send_tail->link3 = skb;
2612 sk->send_tail = skb;
2613 }
2614 skb->link3 = NULL;
2615 }
2616 }
2617 sti();
2618 }
2619
2620 if (sk->send_tail == NULL || sk->send_head == NULL)
2621 {
2622 sk->send_head = NULL;
2623 sk->send_tail = NULL;
2624 sk->packets_out= 0;
2625 }
2626
2627 sk->window_seq = ack + ntohs(th->window);
2628
2629
2630 if (sk->timeout == TIME_WRITE &&
2631 sk->cong_window < 2048 && after(ack, sk->rcv_ack_seq))
2632 {
2633
2634
2635
2636
2637
2638
2639
2640
2641
2642 if (sk->cong_window < sk->ssthresh)
2643
2644
2645
2646 sk->cong_window++;
2647 else
2648 {
2649
2650
2651
2652
2653 if (sk->cong_count >= sk->cong_window)
2654 {
2655 sk->cong_window++;
2656 sk->cong_count = 0;
2657 }
2658 else
2659 sk->cong_count++;
2660 }
2661 }
2662
2663 sk->rcv_ack_seq = ack;
2664
2665
2666
2667
2668
2669
2670
2671 if (sk->timeout == TIME_PROBE0)
2672 {
2673 if (skb_peek(&sk->write_queue) != NULL &&
2674 ! before (sk->window_seq, sk->write_queue.next->h.seq))
2675 {
2676 sk->retransmits = 0;
2677 sk->backoff = 0;
2678
2679
2680
2681
2682 sk->rto = ((sk->rtt >> 2) + sk->mdev) >> 1;
2683 if (sk->rto > 120*HZ)
2684 sk->rto = 120*HZ;
2685 if (sk->rto < 20)
2686
2687
2688 sk->rto = 20;
2689 }
2690 }
2691
2692
2693
2694
2695
2696 while(sk->send_head != NULL)
2697 {
2698
2699 if (sk->send_head->link3 &&
2700 after(sk->send_head->h.seq, sk->send_head->link3->h.seq))
2701 {
2702 printk("INET: tcp.c: *** bug send_list out of order.\n");
2703 sort_send(sk);
2704 }
2705
2706 if (before(sk->send_head->h.seq, ack+1))
2707 {
2708 struct sk_buff *oskb;
2709 if (sk->retransmits)
2710 {
2711
2712
2713
2714 flag |= 2;
2715
2716
2717
2718
2719
2720
2721
2722
2723
2724 if (sk->send_head->link3)
2725 sk->retransmits = 1;
2726 else
2727 sk->retransmits = 0;
2728 }
2729
2730
2731
2732
2733
2734
2735
2736
2737
2738
2739
2740
2741
2742
2743
2744
2745 if (sk->packets_out > 0)
2746 sk->packets_out --;
2747
2748
2749
2750 if (!sk->dead)
2751 sk->write_space(sk);
2752 oskb = sk->send_head;
2753
2754 if (!(flag&2))
2755 {
2756 long m;
2757
2758
2759
2760
2761
2762
2763
2764
2765
2766 m = jiffies - oskb->when;
2767 if(m<=0)
2768 m=1;
2769 m -= (sk->rtt >> 3);
2770 sk->rtt += m;
2771 if (m < 0)
2772 m = -m;
2773 m -= (sk->mdev >> 2);
2774 sk->mdev += m;
2775
2776
2777
2778
2779
2780 sk->rto = ((sk->rtt >> 2) + sk->mdev) >> 1;
2781 if (sk->rto > 120*HZ)
2782 sk->rto = 120*HZ;
2783 if (sk->rto < 20)
2784 sk->rto = 20;
2785 sk->backoff = 0;
2786 }
2787 flag |= (2|4);
2788 cli();
2789 oskb = sk->send_head;
2790 IS_SKB(oskb);
2791 sk->send_head = oskb->link3;
2792 if (sk->send_head == NULL)
2793 {
2794 sk->send_tail = NULL;
2795 }
2796
2797
2798
2799
2800
2801 if (oskb->next)
2802 skb_unlink(oskb);
2803 sti();
2804 kfree_skb(oskb, FREE_WRITE);
2805 if (!sk->dead)
2806 sk->write_space(sk);
2807 }
2808 else
2809 {
2810 break;
2811 }
2812 }
2813
2814
2815
2816
2817
2818
2819
2820
2821
2822
2823
2824
2825
2826
2827 if (skb_peek(&sk->write_queue) != NULL)
2828 {
2829 if (after (sk->window_seq+1, sk->write_queue.next->h.seq) &&
2830 (sk->retransmits == 0 ||
2831 sk->timeout != TIME_WRITE ||
2832 before(sk->write_queue.next->h.seq, sk->rcv_ack_seq + 1))
2833 && sk->packets_out < sk->cong_window)
2834 {
2835 flag |= 1;
2836 tcp_write_xmit(sk);
2837 }
2838 else if (before(sk->window_seq, sk->write_queue.next->h.seq) &&
2839 sk->send_head == NULL &&
2840 sk->ack_backlog == 0 &&
2841 sk->state != TCP_TIME_WAIT)
2842 {
2843 reset_timer(sk, TIME_PROBE0, sk->rto);
2844 }
2845 }
2846 else
2847 {
2848
2849
2850
2851
2852
2853
2854
2855
2856
2857
2858
2859
2860
2861 switch(sk->state) {
2862 case TCP_TIME_WAIT:
2863
2864
2865
2866
2867 reset_timer(sk, TIME_CLOSE, TCP_TIMEWAIT_LEN);
2868 break;
2869 case TCP_CLOSE:
2870
2871
2872
2873 break;
2874 default:
2875
2876
2877
2878
2879 if (sk->send_head || skb_peek(&sk->write_queue) != NULL || sk->ack_backlog) {
2880 reset_timer(sk, TIME_WRITE, sk->rto);
2881 } else if (sk->keepopen) {
2882 reset_timer(sk, TIME_KEEPOPEN, TCP_TIMEOUT_LEN);
2883 } else {
2884 delete_timer(sk);
2885 }
2886 break;
2887 }
2888 #ifdef NOTDEF
2889 if (sk->send_head == NULL && sk->ack_backlog == 0 &&
2890 sk->state != TCP_TIME_WAIT && !sk->keepopen)
2891 {
2892 if (!sk->dead)
2893 sk->write_space(sk);
2894 if (sk->keepopen) {
2895 reset_timer(sk, TIME_KEEPOPEN, TCP_TIMEOUT_LEN);
2896 } else {
2897 delete_timer(sk);
2898 }
2899 }
2900 else
2901 {
2902 if (sk->state != (unsigned char) sk->keepopen)
2903 {
2904 reset_timer(sk, TIME_WRITE, sk->rto);
2905 }
2906 if (sk->state == TCP_TIME_WAIT)
2907 {
2908 reset_timer(sk, TIME_CLOSE, TCP_TIMEWAIT_LEN);
2909 }
2910 }
2911 #endif
2912 }
2913
2914 if (sk->packets_out == 0 && sk->partial != NULL &&
2915 skb_peek(&sk->write_queue) == NULL && sk->send_head == NULL)
2916 {
2917 flag |= 1;
2918 tcp_send_partial(sk);
2919 }
2920
2921
2922
2923
2924
2925
2926
2927
2928
2929 if (sk->state == TCP_LAST_ACK)
2930 {
2931 if (!sk->dead)
2932 sk->state_change(sk);
2933 if (sk->rcv_ack_seq == sk->write_seq && sk->acked_seq == sk->fin_seq)
2934 {
2935 flag |= 1;
2936 tcp_set_state(sk,TCP_CLOSE);
2937 sk->shutdown = SHUTDOWN_MASK;
2938 }
2939 }
2940
2941
2942
2943
2944
2945
2946
2947 if (sk->state == TCP_FIN_WAIT1)
2948 {
2949
2950 if (!sk->dead)
2951 sk->state_change(sk);
2952 if (sk->rcv_ack_seq == sk->write_seq)
2953 {
2954 flag |= 1;
2955 if (sk->acked_seq != sk->fin_seq)
2956 {
2957 tcp_time_wait(sk);
2958 }
2959 else
2960 {
2961 sk->shutdown = SHUTDOWN_MASK;
2962 tcp_set_state(sk,TCP_FIN_WAIT2);
2963 }
2964 }
2965 }
2966
2967
2968
2969
2970
2971
2972
2973 if (sk->state == TCP_CLOSING)
2974 {
2975
2976 if (!sk->dead)
2977 sk->state_change(sk);
2978 if (sk->rcv_ack_seq == sk->write_seq)
2979 {
2980 flag |= 1;
2981 tcp_time_wait(sk);
2982 }
2983 }
2984
2985
2986
2987
2988
2989
2990
2991
2992
2993
2994
2995
2996
2997
2998
2999
3000
3001
3002
3003
3004
3005
3006
3007
3008
3009
3010
3011
3012
3013
3014 if (((!flag) || (flag&4)) && sk->send_head != NULL &&
3015 (((flag&2) && sk->retransmits) ||
3016 (sk->send_head->when + sk->rto < jiffies)))
3017 {
3018 ip_do_retransmit(sk, 1);
3019 reset_timer(sk, TIME_WRITE, sk->rto);
3020 }
3021
3022 return(1);
3023 }
3024
3025
3026
3027
3028
3029
3030
3031
3032 static int tcp_data(struct sk_buff *skb, struct sock *sk,
3033 unsigned long saddr, unsigned short len)
3034 {
3035 struct sk_buff *skb1, *skb2;
3036 struct tcphdr *th;
3037 int dup_dumped=0;
3038 unsigned long new_seq;
3039
3040 th = skb->h.th;
3041 skb->len = len -(th->doff*4);
3042
3043
3044
3045
3046 sk->bytes_rcv += skb->len;
3047
3048 if (skb->len == 0 && !th->fin && !th->urg && !th->psh)
3049 {
3050
3051
3052
3053
3054 if (!th->ack)
3055 tcp_send_ack(sk->sent_seq, sk->acked_seq,sk, th, saddr);
3056 kfree_skb(skb, FREE_READ);
3057 return(0);
3058 }
3059
3060
3061
3062
3063
3064 if(sk->shutdown & RCV_SHUTDOWN)
3065 {
3066 new_seq= th->seq + skb->len + th->syn;
3067
3068 if(after(new_seq,sk->copied_seq+1))
3069
3070
3071 {
3072 sk->acked_seq = new_seq + th->fin;
3073 tcp_reset(sk->saddr, sk->daddr, skb->h.th,
3074 sk->prot, NULL, skb->dev, sk->ip_tos, sk->ip_ttl);
3075 tcp_statistics.TcpEstabResets++;
3076 tcp_set_state(sk,TCP_CLOSE);
3077 sk->err = EPIPE;
3078 sk->shutdown = SHUTDOWN_MASK;
3079 kfree_skb(skb, FREE_READ);
3080 if (!sk->dead)
3081 sk->state_change(sk);
3082 return(0);
3083 }
3084 #if 0
3085
3086
3087 kfree_skb(skb, FREE_READ);
3088 return(0);
3089 #endif
3090 }
3091
3092
3093
3094
3095
3096
3097
3098
3099
3100
3101
3102
3103 if (skb_peek(&sk->receive_queue) == NULL)
3104 {
3105 skb_queue_head(&sk->receive_queue,skb);
3106 skb1= NULL;
3107 }
3108 else
3109 {
3110 for(skb1=sk->receive_queue.prev; ; skb1 = skb1->prev)
3111 {
3112 if(sk->debug)
3113 {
3114 printk("skb1=%p :", skb1);
3115 printk("skb1->h.th->seq = %ld: ", skb1->h.th->seq);
3116 printk("skb->h.th->seq = %ld\n",skb->h.th->seq);
3117 printk("copied_seq = %ld acked_seq = %ld\n", sk->copied_seq,
3118 sk->acked_seq);
3119 }
3120
3121
3122
3123
3124
3125
3126
3127
3128
3129 if (th->seq==skb1->h.th->seq && skb->len>= skb1->len)
3130 {
3131 skb_append(skb1,skb);
3132 skb_unlink(skb1);
3133 kfree_skb(skb1,FREE_READ);
3134 dup_dumped=1;
3135 skb1=NULL;
3136 break;
3137 }
3138
3139
3140
3141
3142
3143 if (after(th->seq+1, skb1->h.th->seq))
3144 {
3145 skb_append(skb1,skb);
3146 break;
3147 }
3148
3149
3150
3151
3152 if (skb1 == skb_peek(&sk->receive_queue))
3153 {
3154 skb_queue_head(&sk->receive_queue, skb);
3155 break;
3156 }
3157 }
3158 }
3159
3160
3161
3162
3163
3164 th->ack_seq = th->seq + skb->len;
3165 if (th->syn)
3166 th->ack_seq++;
3167 if (th->fin)
3168 th->ack_seq++;
3169
3170 if (before(sk->acked_seq, sk->copied_seq))
3171 {
3172 printk("*** tcp.c:tcp_data bug acked < copied\n");
3173 sk->acked_seq = sk->copied_seq;
3174 }
3175
3176
3177
3178
3179
3180 if ((!dup_dumped && (skb1 == NULL || skb1->acked)) || before(th->seq, sk->acked_seq+1))
3181 {
3182 if (before(th->seq, sk->acked_seq+1))
3183 {
3184 int newwindow;
3185
3186 if (after(th->ack_seq, sk->acked_seq))
3187 {
3188 newwindow = sk->window-(th->ack_seq - sk->acked_seq);
3189 if (newwindow < 0)
3190 newwindow = 0;
3191 sk->window = newwindow;
3192 sk->acked_seq = th->ack_seq;
3193 }
3194 skb->acked = 1;
3195
3196
3197
3198
3199
3200 if (skb->h.th->fin)
3201 {
3202 if (!sk->dead)
3203 sk->state_change(sk);
3204 sk->shutdown |= RCV_SHUTDOWN;
3205 }
3206
3207 for(skb2 = skb->next;
3208 skb2 != (struct sk_buff *)&sk->receive_queue;
3209 skb2 = skb2->next)
3210 {
3211 if (before(skb2->h.th->seq, sk->acked_seq+1))
3212 {
3213 if (after(skb2->h.th->ack_seq, sk->acked_seq))
3214 {
3215 newwindow = sk->window -
3216 (skb2->h.th->ack_seq - sk->acked_seq);
3217 if (newwindow < 0)
3218 newwindow = 0;
3219 sk->window = newwindow;
3220 sk->acked_seq = skb2->h.th->ack_seq;
3221 }
3222 skb2->acked = 1;
3223
3224
3225
3226
3227 if (skb2->h.th->fin)
3228 {
3229 sk->shutdown |= RCV_SHUTDOWN;
3230 if (!sk->dead)
3231 sk->state_change(sk);
3232 }
3233
3234
3235
3236
3237
3238 sk->ack_backlog = sk->max_ack_backlog;
3239 }
3240 else
3241 {
3242 break;
3243 }
3244 }
3245
3246
3247
3248
3249
3250 if (!sk->delay_acks ||
3251 sk->ack_backlog >= sk->max_ack_backlog ||
3252 sk->bytes_rcv > sk->max_unacked || th->fin) {
3253
3254 }
3255 else
3256 {
3257 sk->ack_backlog++;
3258 if(sk->debug)
3259 printk("Ack queued.\n");
3260 reset_timer(sk, TIME_WRITE, TCP_ACK_TIME);
3261 }
3262 }
3263 }
3264
3265
3266
3267
3268
3269
3270 if (!skb->acked)
3271 {
3272
3273
3274
3275
3276
3277
3278
3279
3280 while (sk->prot->rspace(sk) < sk->mtu)
3281 {
3282 skb1 = skb_peek(&sk->receive_queue);
3283 if (skb1 == NULL)
3284 {
3285 printk("INET: tcp.c:tcp_data memory leak detected.\n");
3286 break;
3287 }
3288
3289
3290
3291
3292
3293 if (skb1->acked)
3294 {
3295 break;
3296 }
3297
3298 skb_unlink(skb1);
3299 kfree_skb(skb1, FREE_READ);
3300 }
3301 tcp_send_ack(sk->sent_seq, sk->acked_seq, sk, th, saddr);
3302 sk->ack_backlog++;
3303 reset_timer(sk, TIME_WRITE, TCP_ACK_TIME);
3304 }
3305 else
3306 {
3307
3308 tcp_send_ack(sk->sent_seq, sk->acked_seq, sk, th, saddr);
3309 }
3310
3311
3312
3313
3314
3315 if (!sk->dead)
3316 {
3317 if(sk->debug)
3318 printk("Data wakeup.\n");
3319 sk->data_ready(sk,0);
3320 }
3321 return(0);
3322 }
3323
3324
3325 static void tcp_check_urg(struct sock * sk, struct tcphdr * th)
3326 {
3327 unsigned long ptr = ntohs(th->urg_ptr);
3328
3329 if (ptr)
3330 ptr--;
3331 ptr += th->seq;
3332
3333
3334 if (after(sk->copied_seq+1, ptr))
3335 return;
3336
3337
3338 if (sk->urg_data && !after(ptr, sk->urg_seq))
3339 return;
3340
3341
3342 if (sk->proc != 0) {
3343 if (sk->proc > 0) {
3344 kill_proc(sk->proc, SIGURG, 1);
3345 } else {
3346 kill_pg(-sk->proc, SIGURG, 1);
3347 }
3348 }
3349 sk->urg_data = URG_NOTYET;
3350 sk->urg_seq = ptr;
3351 }
3352
3353 static inline int tcp_urg(struct sock *sk, struct tcphdr *th,
3354 unsigned long saddr, unsigned long len)
3355 {
3356 unsigned long ptr;
3357
3358
3359 if (th->urg)
3360 tcp_check_urg(sk,th);
3361
3362
3363 if (sk->urg_data != URG_NOTYET)
3364 return 0;
3365
3366
3367 ptr = sk->urg_seq - th->seq + th->doff*4;
3368 if (ptr >= len)
3369 return 0;
3370
3371
3372 sk->urg_data = URG_VALID | *(ptr + (unsigned char *) th);
3373 if (!sk->dead)
3374 sk->data_ready(sk,0);
3375 return 0;
3376 }
3377
3378
3379
3380
3381
3382
3383
3384
3385
3386
3387
3388
3389
3390
3391
3392 static int tcp_fin(struct sk_buff *skb, struct sock *sk, struct tcphdr *th,
3393 unsigned long saddr, struct device *dev)
3394 {
3395 sk->fin_seq = th->seq + skb->len + th->syn + th->fin;
3396
3397 if (!sk->dead)
3398 {
3399 sk->state_change(sk);
3400 }
3401
3402 switch(sk->state)
3403 {
3404 case TCP_SYN_RECV:
3405 case TCP_SYN_SENT:
3406 case TCP_ESTABLISHED:
3407
3408
3409
3410
3411 reset_timer(sk, TIME_CLOSE, TCP_TIMEOUT_LEN);
3412
3413 tcp_set_state(sk,TCP_CLOSE_WAIT);
3414 if (th->rst)
3415 sk->shutdown = SHUTDOWN_MASK;
3416 break;
3417
3418 case TCP_CLOSE_WAIT:
3419 case TCP_CLOSING:
3420
3421
3422
3423
3424 break;
3425 case TCP_TIME_WAIT:
3426
3427
3428
3429
3430 reset_timer(sk, TIME_CLOSE, TCP_TIMEWAIT_LEN);
3431 return(0);
3432 case TCP_FIN_WAIT1:
3433
3434
3435
3436
3437
3438
3439
3440
3441 reset_timer(sk, TIME_CLOSE, TCP_TIMEWAIT_LEN);
3442
3443 tcp_set_state(sk,TCP_CLOSING);
3444 break;
3445 case TCP_FIN_WAIT2:
3446
3447
3448
3449 reset_timer(sk, TIME_CLOSE, TCP_TIMEWAIT_LEN);
3450
3451 tcp_set_state(sk,TCP_TIME_WAIT);
3452 break;
3453 case TCP_CLOSE:
3454
3455
3456
3457 break;
3458 default:
3459 tcp_set_state(sk,TCP_LAST_ACK);
3460
3461
3462 reset_timer(sk, TIME_CLOSE, TCP_TIMEWAIT_LEN);
3463 return(0);
3464 }
3465 sk->ack_backlog++;
3466
3467 return(0);
3468 }
3469
3470
3471
3472 static struct sock *
3473 tcp_accept(struct sock *sk, int flags)
3474 {
3475 struct sock *newsk;
3476 struct sk_buff *skb;
3477
3478
3479
3480
3481
3482
3483 if (sk->state != TCP_LISTEN)
3484 {
3485 sk->err = EINVAL;
3486 return(NULL);
3487 }
3488
3489
3490 cli();
3491 sk->inuse = 1;
3492
3493 while((skb = skb_dequeue(&sk->receive_queue)) == NULL)
3494 {
3495 if (flags & O_NONBLOCK)
3496 {
3497 sti();
3498 release_sock(sk);
3499 sk->err = EAGAIN;
3500 return(NULL);
3501 }
3502
3503 release_sock(sk);
3504 interruptible_sleep_on(sk->sleep);
3505 if (current->signal & ~current->blocked)
3506 {
3507 sti();
3508 sk->err = ERESTARTSYS;
3509 return(NULL);
3510 }
3511 sk->inuse = 1;
3512 }
3513 sti();
3514
3515
3516
3517
3518
3519 newsk = skb->sk;
3520
3521 kfree_skb(skb, FREE_READ);
3522 sk->ack_backlog--;
3523 release_sock(sk);
3524 return(newsk);
3525 }
3526
3527
3528
3529
3530
3531
3532 static int tcp_connect(struct sock *sk, struct sockaddr_in *usin, int addr_len)
3533 {
3534 struct sk_buff *buff;
3535 struct device *dev=NULL;
3536 unsigned char *ptr;
3537 int tmp;
3538 struct tcphdr *t1;
3539 struct rtable *rt;
3540
3541 if (sk->state != TCP_CLOSE)
3542 return(-EISCONN);
3543
3544 if (addr_len < 8)
3545 return(-EINVAL);
3546
3547 if (usin->sin_family && usin->sin_family != AF_INET)
3548 return(-EAFNOSUPPORT);
3549
3550
3551
3552
3553
3554 if(usin->sin_addr.s_addr==INADDR_ANY)
3555 usin->sin_addr.s_addr=ip_my_addr();
3556
3557
3558
3559
3560
3561 if (ip_chk_addr(usin->sin_addr.s_addr) == IS_BROADCAST)
3562 {
3563 return -ENETUNREACH;
3564 }
3565
3566
3567
3568
3569
3570 if(sk->saddr == usin->sin_addr.s_addr && sk->num==ntohs(usin->sin_port))
3571 return -EBUSY;
3572
3573 sk->inuse = 1;
3574 sk->daddr = usin->sin_addr.s_addr;
3575 sk->write_seq = jiffies * SEQ_TICK - seq_offset;
3576 sk->window_seq = sk->write_seq;
3577 sk->rcv_ack_seq = sk->write_seq -1;
3578 sk->err = 0;
3579 sk->dummy_th.dest = usin->sin_port;
3580 release_sock(sk);
3581
3582 buff = sk->prot->wmalloc(sk,MAX_SYN_SIZE,0, GFP_KERNEL);
3583 if (buff == NULL)
3584 {
3585 return(-ENOMEM);
3586 }
3587 sk->inuse = 1;
3588 buff->len = 24;
3589 buff->sk = sk;
3590 buff->free = 1;
3591 buff->localroute = sk->localroute;
3592
3593 t1 = (struct tcphdr *) buff->data;
3594
3595
3596
3597
3598
3599 rt=ip_rt_route(sk->daddr, NULL, NULL);
3600
3601
3602
3603
3604
3605
3606 tmp = sk->prot->build_header(buff, sk->saddr, sk->daddr, &dev,
3607 IPPROTO_TCP, NULL, MAX_SYN_SIZE,sk->ip_tos,sk->ip_ttl);
3608 if (tmp < 0)
3609 {
3610 sk->prot->wfree(sk, buff->mem_addr, buff->mem_len);
3611 release_sock(sk);
3612 return(-ENETUNREACH);
3613 }
3614
3615 buff->len += tmp;
3616 t1 = (struct tcphdr *)((char *)t1 +tmp);
3617
3618 memcpy(t1,(void *)&(sk->dummy_th), sizeof(*t1));
3619 t1->seq = ntohl(sk->write_seq++);
3620 sk->sent_seq = sk->write_seq;
3621 buff->h.seq = sk->write_seq;
3622 t1->ack = 0;
3623 t1->window = 2;
3624 t1->res1=0;
3625 t1->res2=0;
3626 t1->rst = 0;
3627 t1->urg = 0;
3628 t1->psh = 0;
3629 t1->syn = 1;
3630 t1->urg_ptr = 0;
3631 t1->doff = 6;
3632
3633
3634 if(rt!=NULL && (rt->rt_flags&RTF_WINDOW))
3635 sk->window_clamp=rt->rt_window;
3636 else
3637 sk->window_clamp=0;
3638
3639 if (sk->user_mss)
3640 sk->mtu = sk->user_mss;
3641 else if(rt!=NULL && (rt->rt_flags&RTF_MTU))
3642 sk->mtu = rt->rt_mss;
3643 else
3644 {
3645 #ifdef CONFIG_INET_SNARL
3646 if ((sk->saddr ^ sk->daddr) & default_mask(sk->saddr))
3647 #else
3648 if ((sk->saddr ^ sk->daddr) & dev->pa_mask)
3649 #endif
3650 sk->mtu = 576 - HEADER_SIZE;
3651 else
3652 sk->mtu = MAX_WINDOW;
3653 }
3654
3655
3656
3657
3658 if(sk->mtu <32)
3659 sk->mtu = 32;
3660
3661 sk->mtu = min(sk->mtu, dev->mtu - HEADER_SIZE);
3662
3663
3664
3665
3666
3667 ptr = (unsigned char *)(t1+1);
3668 ptr[0] = 2;
3669 ptr[1] = 4;
3670 ptr[2] = (sk->mtu) >> 8;
3671 ptr[3] = (sk->mtu) & 0xff;
3672 tcp_send_check(t1, sk->saddr, sk->daddr,
3673 sizeof(struct tcphdr) + 4, sk);
3674
3675
3676
3677
3678
3679 tcp_set_state(sk,TCP_SYN_SENT);
3680
3681 sk->rto = TCP_TIMEOUT_INIT;
3682 reset_timer(sk, TIME_WRITE, sk->rto);
3683 sk->retransmits = TCP_RETR2 - TCP_SYN_RETRIES;
3684
3685 sk->prot->queue_xmit(sk, dev, buff, 0);
3686 tcp_statistics.TcpActiveOpens++;
3687 tcp_statistics.TcpOutSegs++;
3688
3689 release_sock(sk);
3690 return(0);
3691 }
3692
3693
3694
3695 static int
3696 tcp_sequence(struct sock *sk, struct tcphdr *th, short len,
3697 struct options *opt, unsigned long saddr, struct device *dev)
3698 {
3699 unsigned long next_seq;
3700
3701 next_seq = len - 4*th->doff;
3702 if (th->fin)
3703 next_seq++;
3704
3705 if (next_seq && !sk->window)
3706 goto ignore_it;
3707 next_seq += th->seq;
3708
3709
3710
3711
3712
3713
3714
3715
3716
3717 if (!after(next_seq+1, sk->acked_seq))
3718 goto ignore_it;
3719
3720 if (!before(th->seq, sk->acked_seq + sk->window + 1))
3721 goto ignore_it;
3722
3723
3724 return 1;
3725
3726 ignore_it:
3727 if (th->rst)
3728 return 0;
3729
3730
3731
3732
3733
3734
3735
3736
3737 if (sk->state==TCP_SYN_SENT || sk->state==TCP_SYN_RECV) {
3738 tcp_reset(sk->saddr,sk->daddr,th,sk->prot,NULL,dev, sk->ip_tos,sk->ip_ttl);
3739 return 1;
3740 }
3741
3742
3743 tcp_send_ack(sk->sent_seq, sk->acked_seq, sk, th, saddr);
3744 return 0;
3745 }
3746
3747
3748 #ifdef TCP_FASTPATH
3749
3750
3751
3752
3753
3754
3755
3756 static inline int tcp_clean_end(struct sock *sk)
3757 {
3758 struct sk_buff *skb=skb_peek(&sk->receive_queue);
3759 if(skb==NULL || sk->receive_queue.prev->acked)
3760 return 1;
3761 }
3762
3763 #endif
3764
3765 int
3766 tcp_rcv(struct sk_buff *skb, struct device *dev, struct options *opt,
3767 unsigned long daddr, unsigned short len,
3768 unsigned long saddr, int redo, struct inet_protocol * protocol)
3769 {
3770 struct tcphdr *th;
3771 struct sock *sk;
3772
3773 if (!skb)
3774 {
3775 return(0);
3776 }
3777
3778 if (!dev)
3779 {
3780 return(0);
3781 }
3782
3783 tcp_statistics.TcpInSegs++;
3784
3785 if(skb->pkt_type!=PACKET_HOST)
3786 {
3787 kfree_skb(skb,FREE_READ);
3788 return(0);
3789 }
3790
3791 th = skb->h.th;
3792
3793
3794
3795
3796
3797 sk = get_sock(&tcp_prot, th->dest, saddr, th->source, daddr);
3798
3799
3800
3801
3802
3803
3804 if (sk!=NULL && sk->zapped)
3805 sk=NULL;
3806
3807 if (!redo)
3808 {
3809 if (tcp_check(th, len, saddr, daddr ))
3810 {
3811 skb->sk = NULL;
3812 kfree_skb(skb,FREE_READ);
3813
3814
3815
3816
3817 return(0);
3818 }
3819 th->seq = ntohl(th->seq);
3820
3821
3822 if (sk == NULL)
3823 {
3824 if (!th->rst)
3825 tcp_reset(daddr, saddr, th, &tcp_prot, opt,dev,skb->ip_hdr->tos,255);
3826 skb->sk = NULL;
3827 kfree_skb(skb, FREE_READ);
3828 return(0);
3829 }
3830
3831 skb->len = len;
3832 skb->sk = sk;
3833 skb->acked = 0;
3834 skb->used = 0;
3835 skb->free = 0;
3836 skb->saddr = daddr;
3837 skb->daddr = saddr;
3838
3839
3840 cli();
3841 if (sk->inuse)
3842 {
3843 skb_queue_head(&sk->back_log, skb);
3844 sti();
3845 return(0);
3846 }
3847 sk->inuse = 1;
3848 sti();
3849 }
3850 else
3851 {
3852 if (!sk)
3853 {
3854 return(0);
3855 }
3856 }
3857
3858
3859 if (!sk->prot)
3860 {
3861 return(0);
3862 }
3863
3864
3865
3866
3867
3868
3869 if (sk->rmem_alloc + skb->mem_len >= sk->rcvbuf)
3870 {
3871 skb->sk = NULL;
3872 kfree_skb(skb, FREE_READ);
3873 release_sock(sk);
3874 return(0);
3875 }
3876
3877 sk->rmem_alloc += skb->mem_len;
3878
3879 #ifdef TCP_FASTPATH
3880
3881
3882
3883
3884
3885
3886
3887
3888
3889
3890
3891
3892
3893
3894 if(!(sk->shutdown & RCV_SHUTDOWN) && sk->state==TCP_ESTABLISHED && !th->urg && !th->syn && !th->fin && !th->rst)
3895 {
3896
3897 if(th->seq == sk->acked_seq+1 && sk->window && tcp_clean_end(sk))
3898 {
3899
3900 if(th->ack && !tcp_ack(sk, th, saddr, len))
3901 {
3902 kfree_skb(skb, FREE_READ);
3903 release_sock(sk);
3904 return 0;
3905 }
3906
3907
3908
3909 skb->len -= (th->doff *4);
3910 sk->bytes_rcv += skb->len;
3911 tcp_rx_hit2++;
3912 if(skb->len)
3913 {
3914 skb_queue_tail(&sk->receive_queue,skb);
3915 if(sk->window >= skb->len)
3916 sk->window-=skb->len;
3917 else
3918 sk->window=0;
3919 sk->acked_seq = th->seq+skb->len;
3920 skb->acked=1;
3921 if(!sk->delay_acks || sk->ack_backlog >= sk->max_ack_backlog ||
3922 sk->bytes_rcv > sk->max_unacked)
3923 {
3924 tcp_send_ack(sk->sent_seq, sk->acked_seq, sk, th , saddr);
3925 }
3926 else
3927 {
3928 sk->ack_backlog++;
3929 reset_timer(sk, TIME_WRITE, TCP_ACK_TIME);
3930 }
3931 if(!sk->dead)
3932 sk->data_ready(sk,0);
3933 release_sock(sk);
3934 return 0;
3935 }
3936 }
3937
3938
3939
3940 tcp_rx_hit1++;
3941 if(!tcp_sequence(sk, th, len, opt, saddr, dev))
3942 {
3943 kfree_skb(skb, FREE_READ);
3944 release_sock(sk);
3945 return 0;
3946 }
3947 if(th->ack && !tcp_ack(sk, th, saddr, len))
3948 {
3949 kfree_skb(skb, FREE_READ);
3950 release_sock(sk);
3951 return 0;
3952 }
3953 if(tcp_data(skb, sk, saddr, len))
3954 kfree_skb(skb, FREE_READ);
3955 release_sock(sk);
3956 return 0;
3957 }
3958 tcp_rx_miss++;
3959 #endif
3960
3961
3962
3963
3964
3965 switch(sk->state)
3966 {
3967
3968
3969
3970
3971
3972 case TCP_LAST_ACK:
3973 if (th->rst)
3974 {
3975 sk->zapped=1;
3976 sk->err = ECONNRESET;
3977 tcp_set_state(sk,TCP_CLOSE);
3978 sk->shutdown = SHUTDOWN_MASK;
3979 if (!sk->dead)
3980 {
3981 sk->state_change(sk);
3982 }
3983 kfree_skb(skb, FREE_READ);
3984 release_sock(sk);
3985 return(0);
3986 }
3987
3988 case TCP_ESTABLISHED:
3989 case TCP_CLOSE_WAIT:
3990 case TCP_CLOSING:
3991 case TCP_FIN_WAIT1:
3992 case TCP_FIN_WAIT2:
3993 case TCP_TIME_WAIT:
3994 if (!tcp_sequence(sk, th, len, opt, saddr,dev))
3995 {
3996 kfree_skb(skb, FREE_READ);
3997 release_sock(sk);
3998 return(0);
3999 }
4000
4001 if (th->rst)
4002 {
4003 tcp_statistics.TcpEstabResets++;
4004 sk->zapped=1;
4005
4006 sk->err = ECONNRESET;
4007 if (sk->state == TCP_CLOSE_WAIT)
4008 {
4009 sk->err = EPIPE;
4010 }
4011
4012
4013
4014
4015
4016 tcp_set_state(sk,TCP_CLOSE);
4017 sk->shutdown = SHUTDOWN_MASK;
4018 if (!sk->dead)
4019 {
4020 sk->state_change(sk);
4021 }
4022 kfree_skb(skb, FREE_READ);
4023 release_sock(sk);
4024 return(0);
4025 }
4026 if (th->syn)
4027 {
4028 tcp_statistics.TcpEstabResets++;
4029 sk->err = ECONNRESET;
4030 tcp_set_state(sk,TCP_CLOSE);
4031 sk->shutdown = SHUTDOWN_MASK;
4032 tcp_reset(daddr, saddr, th, sk->prot, opt,dev, sk->ip_tos,sk->ip_ttl);
4033 if (!sk->dead) {
4034 sk->state_change(sk);
4035 }
4036 kfree_skb(skb, FREE_READ);
4037 release_sock(sk);
4038 return(0);
4039 }
4040
4041 if (th->ack && !tcp_ack(sk, th, saddr, len)) {
4042 kfree_skb(skb, FREE_READ);
4043 release_sock(sk);
4044 return(0);
4045 }
4046
4047 if (tcp_urg(sk, th, saddr, len)) {
4048 kfree_skb(skb, FREE_READ);
4049 release_sock(sk);
4050 return(0);
4051 }
4052
4053
4054 if (tcp_data(skb, sk, saddr, len)) {
4055 kfree_skb(skb, FREE_READ);
4056 release_sock(sk);
4057 return(0);
4058 }
4059
4060 if (th->fin && tcp_fin(skb, sk, th, saddr, dev)) {
4061 kfree_skb(skb, FREE_READ);
4062 release_sock(sk);
4063 return(0);
4064 }
4065
4066 release_sock(sk);
4067 return(0);
4068
4069 case TCP_CLOSE:
4070 if (sk->dead || sk->daddr) {
4071 kfree_skb(skb, FREE_READ);
4072 release_sock(sk);
4073 return(0);
4074 }
4075
4076 if (!th->rst) {
4077 if (!th->ack)
4078 th->ack_seq = 0;
4079 tcp_reset(daddr, saddr, th, sk->prot, opt,dev,sk->ip_tos,sk->ip_ttl);
4080 }
4081 kfree_skb(skb, FREE_READ);
4082 release_sock(sk);
4083 return(0);
4084
4085 case TCP_LISTEN:
4086 if (th->rst) {
4087 kfree_skb(skb, FREE_READ);
4088 release_sock(sk);
4089 return(0);
4090 }
4091 if (th->ack) {
4092 tcp_reset(daddr, saddr, th, sk->prot, opt,dev,sk->ip_tos,sk->ip_ttl);
4093 kfree_skb(skb, FREE_READ);
4094 release_sock(sk);
4095 return(0);
4096 }
4097
4098 if (th->syn)
4099 {
4100
4101
4102
4103
4104
4105
4106 tcp_conn_request(sk, skb, daddr, saddr, opt, dev);
4107 release_sock(sk);
4108 return(0);
4109 }
4110
4111 kfree_skb(skb, FREE_READ);
4112 release_sock(sk);
4113 return(0);
4114
4115 case TCP_SYN_RECV:
4116 if (th->syn) {
4117
4118 kfree_skb(skb, FREE_READ);
4119 release_sock(sk);
4120 return(0);
4121 }
4122
4123
4124 default:
4125 if (!tcp_sequence(sk, th, len, opt, saddr,dev))
4126 {
4127 kfree_skb(skb, FREE_READ);
4128 release_sock(sk);
4129 return(0);
4130 }
4131
4132 case TCP_SYN_SENT:
4133 if (th->rst)
4134 {
4135 tcp_statistics.TcpAttemptFails++;
4136 sk->err = ECONNREFUSED;
4137 tcp_set_state(sk,TCP_CLOSE);
4138 sk->shutdown = SHUTDOWN_MASK;
4139 sk->zapped = 1;
4140 if (!sk->dead)
4141 {
4142 sk->state_change(sk);
4143 }
4144 kfree_skb(skb, FREE_READ);
4145 release_sock(sk);
4146 return(0);
4147 }
4148 if (!th->ack)
4149 {
4150 if (th->syn)
4151 {
4152 tcp_set_state(sk,TCP_SYN_RECV);
4153 }
4154 kfree_skb(skb, FREE_READ);
4155 release_sock(sk);
4156 return(0);
4157 }
4158
4159 switch(sk->state)
4160 {
4161 case TCP_SYN_SENT:
4162 if (!tcp_ack(sk, th, saddr, len))
4163 {
4164 tcp_statistics.TcpAttemptFails++;
4165 tcp_reset(daddr, saddr, th,
4166 sk->prot, opt,dev,sk->ip_tos,sk->ip_ttl);
4167 kfree_skb(skb, FREE_READ);
4168 release_sock(sk);
4169 return(0);
4170 }
4171
4172
4173
4174
4175
4176 if (!th->syn)
4177 {
4178 kfree_skb(skb, FREE_READ);
4179 release_sock(sk);
4180 return(0);
4181 }
4182
4183
4184 sk->acked_seq = th->seq+1;
4185 sk->fin_seq = th->seq;
4186 tcp_send_ack(sk->sent_seq, th->seq+1,
4187 sk, th, sk->daddr);
4188
4189 case TCP_SYN_RECV:
4190 if (!tcp_ack(sk, th, saddr, len))
4191 {
4192 tcp_statistics.TcpAttemptFails++;
4193 tcp_reset(daddr, saddr, th,
4194 sk->prot, opt, dev,sk->ip_tos,sk->ip_ttl);
4195 kfree_skb(skb, FREE_READ);
4196 release_sock(sk);
4197 return(0);
4198 }
4199
4200 tcp_set_state(sk,TCP_ESTABLISHED);
4201
4202
4203
4204
4205
4206
4207
4208 tcp_options(sk, th);
4209 sk->dummy_th.dest = th->source;
4210 sk->copied_seq = sk->acked_seq-1;
4211 if (!sk->dead)
4212 {
4213 sk->state_change(sk);
4214 }
4215
4216
4217
4218
4219
4220
4221
4222
4223
4224
4225 if (sk->max_window == 0)
4226 {
4227 sk->max_window = 32;
4228 sk->mss = min(sk->max_window, sk->mtu);
4229 }
4230
4231
4232
4233
4234
4235 if (th->urg)
4236 {
4237 if (tcp_urg(sk, th, saddr, len))
4238 {
4239 kfree_skb(skb, FREE_READ);
4240 release_sock(sk);
4241 return(0);
4242 }
4243 }
4244 if (tcp_data(skb, sk, saddr, len))
4245 kfree_skb(skb, FREE_READ);
4246
4247 if (th->fin)
4248 tcp_fin(skb, sk, th, saddr, dev);
4249 release_sock(sk);
4250 return(0);
4251 }
4252
4253 if (th->urg)
4254 {
4255 if (tcp_urg(sk, th, saddr, len))
4256 {
4257 kfree_skb(skb, FREE_READ);
4258 release_sock(sk);
4259 return(0);
4260 }
4261 }
4262 if (tcp_data(skb, sk, saddr, len))
4263 {
4264 kfree_skb(skb, FREE_READ);
4265 release_sock(sk);
4266 return(0);
4267 }
4268
4269 if (!th->fin)
4270 {
4271 release_sock(sk);
4272 return(0);
4273 }
4274 tcp_fin(skb, sk, th, saddr, dev);
4275 release_sock(sk);
4276 return(0);
4277 }
4278 }
4279
4280
4281
4282
4283
4284
4285
4286 static void tcp_write_wakeup(struct sock *sk)
4287 {
4288 struct sk_buff *buff;
4289 struct tcphdr *t1;
4290 struct device *dev=NULL;
4291 int tmp;
4292
4293 if (sk->zapped)
4294 return;
4295
4296
4297
4298
4299
4300
4301 if (sk->state != TCP_ESTABLISHED &&
4302 sk->state != TCP_CLOSE_WAIT &&
4303 sk->state != TCP_FIN_WAIT1 &&
4304 sk->state != TCP_LAST_ACK &&
4305 sk->state != TCP_CLOSING
4306 ) {
4307 return;
4308 }
4309
4310 buff = sk->prot->wmalloc(sk,MAX_ACK_SIZE,1, GFP_ATOMIC);
4311 if (buff == NULL)
4312 return;
4313
4314 buff->len = sizeof(struct tcphdr);
4315 buff->free = 1;
4316 buff->sk = sk;
4317 buff->localroute = sk->localroute;
4318
4319 t1 = (struct tcphdr *) buff->data;
4320
4321
4322 tmp = sk->prot->build_header(buff, sk->saddr, sk->daddr, &dev,
4323 IPPROTO_TCP, sk->opt, MAX_ACK_SIZE,sk->ip_tos,sk->ip_ttl);
4324 if (tmp < 0)
4325 {
4326 sk->prot->wfree(sk, buff->mem_addr, buff->mem_len);
4327 return;
4328 }
4329
4330 buff->len += tmp;
4331 t1 = (struct tcphdr *)((char *)t1 +tmp);
4332
4333 memcpy(t1,(void *) &sk->dummy_th, sizeof(*t1));
4334
4335
4336
4337
4338
4339 t1->seq = htonl(sk->sent_seq-1);
4340 t1->ack = 1;
4341 t1->res1= 0;
4342 t1->res2= 0;
4343 t1->rst = 0;
4344 t1->urg = 0;
4345 t1->psh = 0;
4346 t1->fin = 0;
4347 t1->syn = 0;
4348 t1->ack_seq = ntohl(sk->acked_seq);
4349 t1->window = ntohs(tcp_select_window(sk));
4350 t1->doff = sizeof(*t1)/4;
4351 tcp_send_check(t1, sk->saddr, sk->daddr, sizeof(*t1), sk);
4352
4353
4354
4355
4356 sk->prot->queue_xmit(sk, dev, buff, 1);
4357 tcp_statistics.TcpOutSegs++;
4358 }
4359
4360 void
4361 tcp_send_probe0(struct sock *sk)
4362 {
4363 if (sk->zapped)
4364 return;
4365
4366 tcp_write_wakeup(sk);
4367
4368 sk->backoff++;
4369 sk->rto = min(sk->rto << 1, 120*HZ);
4370 reset_timer (sk, TIME_PROBE0, sk->rto);
4371 sk->retransmits++;
4372 sk->prot->retransmits ++;
4373 }
4374
4375
4376
4377
4378
4379 int tcp_setsockopt(struct sock *sk, int level, int optname, char *optval, int optlen)
4380 {
4381 int val,err;
4382
4383 if(level!=SOL_TCP)
4384 return ip_setsockopt(sk,level,optname,optval,optlen);
4385
4386 if (optval == NULL)
4387 return(-EINVAL);
4388
4389 err=verify_area(VERIFY_READ, optval, sizeof(int));
4390 if(err)
4391 return err;
4392
4393 val = get_fs_long((unsigned long *)optval);
4394
4395 switch(optname)
4396 {
4397 case TCP_MAXSEG:
4398
4399
4400
4401
4402
4403
4404 if(val<1||val>MAX_WINDOW)
4405 return -EINVAL;
4406 sk->user_mss=val;
4407 return 0;
4408 case TCP_NODELAY:
4409 sk->nonagle=(val==0)?0:1;
4410 return 0;
4411 default:
4412 return(-ENOPROTOOPT);
4413 }
4414 }
4415
4416 int tcp_getsockopt(struct sock *sk, int level, int optname, char *optval, int *optlen)
4417 {
4418 int val,err;
4419
4420 if(level!=SOL_TCP)
4421 return ip_getsockopt(sk,level,optname,optval,optlen);
4422
4423 switch(optname)
4424 {
4425 case TCP_MAXSEG:
4426 val=sk->user_mss;
4427 break;
4428 case TCP_NODELAY:
4429 val=sk->nonagle;
4430 break;
4431 default:
4432 return(-ENOPROTOOPT);
4433 }
4434 err=verify_area(VERIFY_WRITE, optlen, sizeof(int));
4435 if(err)
4436 return err;
4437 put_fs_long(sizeof(int),(unsigned long *) optlen);
4438
4439 err=verify_area(VERIFY_WRITE, optval, sizeof(int));
4440 if(err)
4441 return err;
4442 put_fs_long(val,(unsigned long *)optval);
4443
4444 return(0);
4445 }
4446
4447
4448 struct proto tcp_prot = {
4449 sock_wmalloc,
4450 sock_rmalloc,
4451 sock_wfree,
4452 sock_rfree,
4453 sock_rspace,
4454 sock_wspace,
4455 tcp_close,
4456 tcp_read,
4457 tcp_write,
4458 tcp_sendto,
4459 tcp_recvfrom,
4460 ip_build_header,
4461 tcp_connect,
4462 tcp_accept,
4463 ip_queue_xmit,
4464 tcp_retransmit,
4465 tcp_write_wakeup,
4466 tcp_read_wakeup,
4467 tcp_rcv,
4468 tcp_select,
4469 tcp_ioctl,
4470 NULL,
4471 tcp_shutdown,
4472 tcp_setsockopt,
4473 tcp_getsockopt,
4474 128,
4475 0,
4476 {NULL,},
4477 "TCP"
4478 };