This source file includes following definitions.
- min
- tcp_set_state
- tcp_select_window
- tcp_find_established
- tcp_dequeue_established
- tcp_time_wait
- tcp_retransmit
- tcp_err
- tcp_readable
- tcp_select
- tcp_ioctl
- tcp_check
- tcp_send_check
- tcp_send_skb
- tcp_dequeue_partial
- tcp_send_partial
- tcp_enqueue_partial
- tcp_send_ack
- tcp_build_header
- tcp_write
- tcp_sendto
- tcp_read_wakeup
- cleanup_rbuf
- tcp_read_urg
- tcp_read
- tcp_shutdown
- tcp_recvfrom
- tcp_reset
- tcp_options
- default_mask
- tcp_conn_request
- tcp_close
- tcp_write_xmit
- tcp_ack
- tcp_data
- tcp_check_urg
- tcp_urg
- tcp_fin
- tcp_accept
- tcp_connect
- tcp_sequence
- tcp_clean_end
- tcp_rcv
- tcp_write_wakeup
- tcp_send_probe0
- tcp_setsockopt
- tcp_getsockopt
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143 #include <linux/types.h>
144 #include <linux/sched.h>
145 #include <linux/mm.h>
146 #include <linux/string.h>
147 #include <linux/socket.h>
148 #include <linux/sockios.h>
149 #include <linux/termios.h>
150 #include <linux/in.h>
151 #include <linux/fcntl.h>
152 #include <linux/inet.h>
153 #include <linux/netdevice.h>
154 #include "snmp.h"
155 #include "ip.h"
156 #include "protocol.h"
157 #include "icmp.h"
158 #include "tcp.h"
159 #include <linux/skbuff.h>
160 #include "sock.h"
161 #include "route.h"
162 #include <linux/errno.h>
163 #include <linux/timer.h>
164 #include <asm/system.h>
165 #include <asm/segment.h>
166 #include <linux/mm.h>
167
168 #undef TCP_FASTPATH
169
170 #define SEQ_TICK 3
171 unsigned long seq_offset;
172 struct tcp_mib tcp_statistics;
173
174 #ifdef TCP_FASTPATH
175 unsigned long tcp_rx_miss=0, tcp_rx_hit1=0, tcp_rx_hit2=0;
176 #endif
177
178
179 static __inline__ int min(unsigned int a, unsigned int b)
180 {
181 if (a < b)
182 return(a);
183 return(b);
184 }
185
186 #undef STATE_TRACE
187
188 static __inline__ void tcp_set_state(struct sock *sk, int state)
189 {
190 if(sk->state==TCP_ESTABLISHED)
191 tcp_statistics.TcpCurrEstab--;
192 #ifdef STATE_TRACE
193 if(sk->debug)
194 printk("TCP sk=%s, State %d -> %d\n",sk, sk->state,state);
195 #endif
196 sk->state=state;
197 if(state==TCP_ESTABLISHED)
198 tcp_statistics.TcpCurrEstab++;
199 }
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216 int tcp_select_window(struct sock *sk)
217 {
218 int new_window = sk->prot->rspace(sk);
219
220 if(sk->window_clamp)
221 new_window=min(sk->window_clamp,new_window);
222
223
224
225
226
227
228
229
230
231 if (new_window < min(sk->mss, MAX_WINDOW/2) || new_window < sk->window)
232 return(sk->window);
233 return(new_window);
234 }
235
236
237
238
239
240
241 static struct sk_buff *tcp_find_established(struct sock *s)
242 {
243 struct sk_buff *p=skb_peek(&s->receive_queue);
244 if(p==NULL)
245 return NULL;
246 do
247 {
248 if(p->sk->state == TCP_ESTABLISHED || p->sk->state >= TCP_FIN_WAIT1)
249 return p;
250 p=p->next;
251 }
252 while(p!=skb_peek(&s->receive_queue));
253 return NULL;
254 }
255
256 static struct sk_buff *tcp_dequeue_established(struct sock *s)
257 {
258 struct sk_buff *skb;
259 unsigned long flags;
260 save_flags(flags);
261 cli();
262 skb=tcp_find_established(s);
263 if(skb!=NULL)
264 skb_unlink(skb);
265 restore_flags(flags);
266 return skb;
267 }
268
269
270
271
272
273
274 static void tcp_time_wait(struct sock *sk)
275 {
276 tcp_set_state(sk,TCP_TIME_WAIT);
277 sk->shutdown = SHUTDOWN_MASK;
278 if (!sk->dead)
279 sk->state_change(sk);
280 reset_timer(sk, TIME_CLOSE, TCP_TIMEWAIT_LEN);
281 }
282
283
284
285
286
287
288
289
290 static void tcp_retransmit(struct sock *sk, int all)
291 {
292 if (all)
293 {
294 ip_retransmit(sk, all);
295 return;
296 }
297
298 sk->ssthresh = sk->cong_window >> 1;
299
300 sk->cong_count = 0;
301
302 sk->cong_window = 1;
303
304
305 ip_retransmit(sk, all);
306 }
307
308
309
310
311
312
313
314
315
316
317
318 void tcp_err(int err, unsigned char *header, unsigned long daddr,
319 unsigned long saddr, struct inet_protocol *protocol)
320 {
321 struct tcphdr *th;
322 struct sock *sk;
323 struct iphdr *iph=(struct iphdr *)header;
324
325 header+=4*iph->ihl;
326
327
328 th =(struct tcphdr *)header;
329 sk = get_sock(&tcp_prot, th->source, daddr, th->dest, saddr);
330
331 if (sk == NULL)
332 return;
333
334 if(err<0)
335 {
336 sk->err = -err;
337 sk->error_report(sk);
338 return;
339 }
340
341 if ((err & 0xff00) == (ICMP_SOURCE_QUENCH << 8))
342 {
343
344
345
346
347
348 if (sk->cong_window > 4)
349 sk->cong_window--;
350 return;
351 }
352
353
354
355
356
357
358
359
360 if (icmp_err_convert[err & 0xff].fatal || sk->state == TCP_SYN_SENT)
361 {
362 if (sk->state == TCP_SYN_SENT)
363 {
364 tcp_statistics.TcpAttemptFails++;
365 tcp_set_state(sk,TCP_CLOSE);
366 sk->error_report(sk);
367 }
368 sk->err = icmp_err_convert[err & 0xff].errno;
369 }
370 return;
371 }
372
373
374
375
376
377
378
379 static int tcp_readable(struct sock *sk)
380 {
381 unsigned long counted;
382 unsigned long amount;
383 struct sk_buff *skb;
384 int sum;
385 unsigned long flags;
386
387 if(sk && sk->debug)
388 printk("tcp_readable: %p - ",sk);
389
390 save_flags(flags);
391 cli();
392 if (sk == NULL || (skb = skb_peek(&sk->receive_queue)) == NULL)
393 {
394 restore_flags(flags);
395 if(sk && sk->debug)
396 printk("empty\n");
397 return(0);
398 }
399
400 counted = sk->copied_seq+1;
401 amount = 0;
402
403
404 do
405 {
406 if (before(counted, skb->h.th->seq))
407 break;
408 sum = skb->len -(counted - skb->h.th->seq);
409 if (skb->h.th->syn)
410 sum++;
411 if (sum > 0)
412 {
413 amount += sum;
414 if (skb->h.th->syn)
415 amount--;
416 counted += sum;
417 }
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434 if (skb->h.th->urg)
435 amount--;
436 if (amount && skb->h.th->psh) break;
437 skb = skb->next;
438 }
439 while(skb != (struct sk_buff *)&sk->receive_queue);
440
441 restore_flags(flags);
442 if(sk->debug)
443 printk("got %lu bytes.\n",amount);
444 return(amount);
445 }
446
447
448
449
450
451
452
453 static int tcp_select(struct sock *sk, int sel_type, select_table *wait)
454 {
455 sk->inuse = 1;
456
457 switch(sel_type)
458 {
459 case SEL_IN:
460 select_wait(sk->sleep, wait);
461 if (skb_peek(&sk->receive_queue) != NULL)
462 {
463 if ((sk->state == TCP_LISTEN && tcp_find_established(sk)) || tcp_readable(sk))
464 {
465 release_sock(sk);
466 return(1);
467 }
468 }
469 if (sk->err != 0)
470 {
471 release_sock(sk);
472 return(1);
473 }
474 if (sk->shutdown & RCV_SHUTDOWN)
475 {
476 release_sock(sk);
477 return(1);
478 }
479 release_sock(sk);
480 return(0);
481 case SEL_OUT:
482 select_wait(sk->sleep, wait);
483 if (sk->shutdown & SEND_SHUTDOWN)
484 {
485
486 release_sock(sk);
487 return(0);
488 }
489
490
491
492
493
494
495 if (sk->prot->wspace(sk) >= sk->mtu+128+sk->prot->max_header)
496 {
497 release_sock(sk);
498
499 if (sk->state == TCP_SYN_RECV ||
500 sk->state == TCP_SYN_SENT) return(0);
501 return(1);
502 }
503 release_sock(sk);
504 return(0);
505 case SEL_EX:
506 select_wait(sk->sleep,wait);
507 if (sk->err || sk->urg_data)
508 {
509 release_sock(sk);
510 return(1);
511 }
512 release_sock(sk);
513 return(0);
514 }
515
516 release_sock(sk);
517 return(0);
518 }
519
520
521 int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
522 {
523 int err;
524 switch(cmd)
525 {
526
527 case TIOCINQ:
528 #ifdef FIXME
529 case FIONREAD:
530 #endif
531 {
532 unsigned long amount;
533
534 if (sk->state == TCP_LISTEN)
535 return(-EINVAL);
536
537 sk->inuse = 1;
538 amount = tcp_readable(sk);
539 release_sock(sk);
540 err=verify_area(VERIFY_WRITE,(void *)arg,
541 sizeof(unsigned long));
542 if(err)
543 return err;
544 put_fs_long(amount,(unsigned long *)arg);
545 return(0);
546 }
547 case SIOCATMARK:
548 {
549 int answ = sk->urg_data && sk->urg_seq == sk->copied_seq+1;
550
551 err = verify_area(VERIFY_WRITE,(void *) arg,
552 sizeof(unsigned long));
553 if (err)
554 return err;
555 put_fs_long(answ,(int *) arg);
556 return(0);
557 }
558 case TIOCOUTQ:
559 {
560 unsigned long amount;
561
562 if (sk->state == TCP_LISTEN) return(-EINVAL);
563 amount = sk->prot->wspace(sk);
564 err=verify_area(VERIFY_WRITE,(void *)arg,
565 sizeof(unsigned long));
566 if(err)
567 return err;
568 put_fs_long(amount,(unsigned long *)arg);
569 return(0);
570 }
571 default:
572 return(-EINVAL);
573 }
574 }
575
576
577
578
579
580
581 unsigned short tcp_check(struct tcphdr *th, int len,
582 unsigned long saddr, unsigned long daddr)
583 {
584 unsigned long sum;
585
586 if (saddr == 0) saddr = ip_my_addr();
587
588
589
590
591
592
593 __asm__("
594 addl %%ecx, %%ebx
595 adcl %%edx, %%ebx
596 adcl $0, %%ebx
597 "
598 : "=b"(sum)
599 : "0"(daddr), "c"(saddr), "d"((ntohs(len) << 16) + IPPROTO_TCP*256)
600 : "bx", "cx", "dx" );
601 __asm__("
602 movl %%ecx, %%edx
603 cld
604 cmpl $32, %%ecx
605 jb 2f
606 shrl $5, %%ecx
607 clc
608 1: lodsl
609 adcl %%eax, %%ebx
610 lodsl
611 adcl %%eax, %%ebx
612 lodsl
613 adcl %%eax, %%ebx
614 lodsl
615 adcl %%eax, %%ebx
616 lodsl
617 adcl %%eax, %%ebx
618 lodsl
619 adcl %%eax, %%ebx
620 lodsl
621 adcl %%eax, %%ebx
622 lodsl
623 adcl %%eax, %%ebx
624 loop 1b
625 adcl $0, %%ebx
626 movl %%edx, %%ecx
627 2: andl $28, %%ecx
628 je 4f
629 shrl $2, %%ecx
630 clc
631 3: lodsl
632 adcl %%eax, %%ebx
633 loop 3b
634 adcl $0, %%ebx
635 4: movl $0, %%eax
636 testw $2, %%dx
637 je 5f
638 lodsw
639 addl %%eax, %%ebx
640 adcl $0, %%ebx
641 movw $0, %%ax
642 5: test $1, %%edx
643 je 6f
644 lodsb
645 addl %%eax, %%ebx
646 adcl $0, %%ebx
647 6: movl %%ebx, %%eax
648 shrl $16, %%eax
649 addw %%ax, %%bx
650 adcw $0, %%bx
651 "
652 : "=b"(sum)
653 : "0"(sum), "c"(len), "S"(th)
654 : "ax", "bx", "cx", "dx", "si" );
655
656
657
658 return((~sum) & 0xffff);
659 }
660
661
662
663 void tcp_send_check(struct tcphdr *th, unsigned long saddr,
664 unsigned long daddr, int len, struct sock *sk)
665 {
666 th->check = 0;
667 th->check = tcp_check(th, len, saddr, daddr);
668 return;
669 }
670
671 static void tcp_send_skb(struct sock *sk, struct sk_buff *skb)
672 {
673 int size;
674 struct tcphdr * th = skb->h.th;
675
676
677 size = skb->len - ((unsigned char *) th - skb->data);
678
679
680 if (size < sizeof(struct tcphdr) || size > skb->len)
681 {
682 printk("tcp_send_skb: bad skb (skb = %p, data = %p, th = %p, len = %lu)\n",
683 skb, skb->data, th, skb->len);
684 kfree_skb(skb, FREE_WRITE);
685 return;
686 }
687
688
689 if (size == sizeof(struct tcphdr))
690 {
691
692 if(!th->syn && !th->fin)
693 {
694 printk("tcp_send_skb: attempt to queue a bogon.\n");
695 kfree_skb(skb,FREE_WRITE);
696 return;
697 }
698 }
699
700 tcp_statistics.TcpOutSegs++;
701
702 skb->h.seq = ntohl(th->seq) + size - 4*th->doff;
703 if (after(skb->h.seq, sk->window_seq) ||
704 (sk->retransmits && sk->timeout == TIME_WRITE) ||
705 sk->packets_out >= sk->cong_window)
706 {
707
708
709 th->check = 0;
710 if (skb->next != NULL)
711 {
712 printk("tcp_send_partial: next != NULL\n");
713 skb_unlink(skb);
714 }
715 skb_queue_tail(&sk->write_queue, skb);
716 if (before(sk->window_seq, sk->write_queue.next->h.seq) &&
717 sk->send_head == NULL &&
718 sk->ack_backlog == 0)
719 reset_timer(sk, TIME_PROBE0, sk->rto);
720 }
721 else
722 {
723 th->ack_seq = ntohl(sk->acked_seq);
724 th->window = ntohs(tcp_select_window(sk));
725
726 tcp_send_check(th, sk->saddr, sk->daddr, size, sk);
727
728 sk->sent_seq = sk->write_seq;
729 sk->prot->queue_xmit(sk, skb->dev, skb, 0);
730 }
731 }
732
733 struct sk_buff * tcp_dequeue_partial(struct sock * sk)
734 {
735 struct sk_buff * skb;
736 unsigned long flags;
737
738 save_flags(flags);
739 cli();
740 skb = sk->partial;
741 if (skb) {
742 sk->partial = NULL;
743 del_timer(&sk->partial_timer);
744 }
745 restore_flags(flags);
746 return skb;
747 }
748
749 static void tcp_send_partial(struct sock *sk)
750 {
751 struct sk_buff *skb;
752
753 if (sk == NULL)
754 return;
755 while ((skb = tcp_dequeue_partial(sk)) != NULL)
756 tcp_send_skb(sk, skb);
757 }
758
759 void tcp_enqueue_partial(struct sk_buff * skb, struct sock * sk)
760 {
761 struct sk_buff * tmp;
762 unsigned long flags;
763
764 save_flags(flags);
765 cli();
766 tmp = sk->partial;
767 if (tmp)
768 del_timer(&sk->partial_timer);
769 sk->partial = skb;
770 init_timer(&sk->partial_timer);
771 sk->partial_timer.expires = HZ;
772 sk->partial_timer.function = (void (*)(unsigned long)) tcp_send_partial;
773 sk->partial_timer.data = (unsigned long) sk;
774 add_timer(&sk->partial_timer);
775 restore_flags(flags);
776 if (tmp)
777 tcp_send_skb(sk, tmp);
778 }
779
780
781
782
783
784
785 static void tcp_send_ack(unsigned long sequence, unsigned long ack,
786 struct sock *sk,
787 struct tcphdr *th, unsigned long daddr)
788 {
789 struct sk_buff *buff;
790 struct tcphdr *t1;
791 struct device *dev = NULL;
792 int tmp;
793
794 if(sk->zapped)
795 return;
796
797
798
799
800
801 buff = sk->prot->wmalloc(sk, MAX_ACK_SIZE, 1, GFP_ATOMIC);
802 if (buff == NULL)
803 {
804
805 sk->ack_backlog++;
806 if (sk->timeout != TIME_WRITE && tcp_connected(sk->state))
807 {
808 reset_timer(sk, TIME_WRITE, 10);
809 }
810 return;
811 }
812
813 buff->len = sizeof(struct tcphdr);
814 buff->sk = sk;
815 buff->localroute = sk->localroute;
816 t1 =(struct tcphdr *) buff->data;
817
818
819 tmp = sk->prot->build_header(buff, sk->saddr, daddr, &dev,
820 IPPROTO_TCP, sk->opt, MAX_ACK_SIZE,sk->ip_tos,sk->ip_ttl);
821 if (tmp < 0)
822 {
823 buff->free=1;
824 sk->prot->wfree(sk, buff->mem_addr, buff->mem_len);
825 return;
826 }
827 buff->len += tmp;
828 t1 =(struct tcphdr *)((char *)t1 +tmp);
829
830
831 memcpy(t1, th, sizeof(*t1));
832
833
834
835
836
837 t1->dest = th->source;
838 t1->source = th->dest;
839 t1->seq = ntohl(sequence);
840 t1->ack = 1;
841 sk->window = tcp_select_window(sk);
842 t1->window = ntohs(sk->window);
843 t1->res1 = 0;
844 t1->res2 = 0;
845 t1->rst = 0;
846 t1->urg = 0;
847 t1->syn = 0;
848 t1->psh = 0;
849 t1->fin = 0;
850 if (ack == sk->acked_seq)
851 {
852 sk->ack_backlog = 0;
853 sk->bytes_rcv = 0;
854 sk->ack_timed = 0;
855 if (sk->send_head == NULL && skb_peek(&sk->write_queue) == NULL
856 && sk->timeout == TIME_WRITE)
857 {
858 if(sk->keepopen) {
859 reset_timer(sk,TIME_KEEPOPEN,TCP_TIMEOUT_LEN);
860 } else {
861 delete_timer(sk);
862 }
863 }
864 }
865 t1->ack_seq = ntohl(ack);
866 t1->doff = sizeof(*t1)/4;
867 tcp_send_check(t1, sk->saddr, daddr, sizeof(*t1), sk);
868 if (sk->debug)
869 printk("\rtcp_ack: seq %lx ack %lx\n", sequence, ack);
870 tcp_statistics.TcpOutSegs++;
871 sk->prot->queue_xmit(sk, dev, buff, 1);
872 }
873
874
875
876
877
878
879 static int tcp_build_header(struct tcphdr *th, struct sock *sk, int push)
880 {
881
882
883 memcpy(th,(void *) &(sk->dummy_th), sizeof(*th));
884 th->seq = htonl(sk->write_seq);
885 th->psh =(push == 0) ? 1 : 0;
886 th->doff = sizeof(*th)/4;
887 th->ack = 1;
888 th->fin = 0;
889 sk->ack_backlog = 0;
890 sk->bytes_rcv = 0;
891 sk->ack_timed = 0;
892 th->ack_seq = htonl(sk->acked_seq);
893 sk->window = tcp_select_window(sk);
894 th->window = htons(sk->window);
895
896 return(sizeof(*th));
897 }
898
899
900
901
902
903
904 static int tcp_write(struct sock *sk, unsigned char *from,
905 int len, int nonblock, unsigned flags)
906 {
907 int copied = 0;
908 int copy;
909 int tmp;
910 struct sk_buff *skb;
911 struct sk_buff *send_tmp;
912 unsigned char *buff;
913 struct proto *prot;
914 struct device *dev = NULL;
915
916 sk->inuse=1;
917 prot = sk->prot;
918 while(len > 0)
919 {
920 if (sk->err)
921 {
922 release_sock(sk);
923 if (copied)
924 return(copied);
925 tmp = -sk->err;
926 sk->err = 0;
927 return(tmp);
928 }
929
930
931
932
933
934 if (sk->shutdown & SEND_SHUTDOWN)
935 {
936 release_sock(sk);
937 sk->err = EPIPE;
938 if (copied)
939 return(copied);
940 sk->err = 0;
941 return(-EPIPE);
942 }
943
944
945
946
947
948
949 while(sk->state != TCP_ESTABLISHED && sk->state != TCP_CLOSE_WAIT)
950 {
951 if (sk->err)
952 {
953 release_sock(sk);
954 if (copied)
955 return(copied);
956 tmp = -sk->err;
957 sk->err = 0;
958 return(tmp);
959 }
960
961 if (sk->state != TCP_SYN_SENT && sk->state != TCP_SYN_RECV)
962 {
963 release_sock(sk);
964 if (copied)
965 return(copied);
966
967 if (sk->err)
968 {
969 tmp = -sk->err;
970 sk->err = 0;
971 return(tmp);
972 }
973
974 if (sk->keepopen)
975 {
976 send_sig(SIGPIPE, current, 0);
977 }
978 return(-EPIPE);
979 }
980
981 if (nonblock || copied)
982 {
983 release_sock(sk);
984 if (copied)
985 return(copied);
986 return(-EAGAIN);
987 }
988
989 release_sock(sk);
990 cli();
991
992 if (sk->state != TCP_ESTABLISHED &&
993 sk->state != TCP_CLOSE_WAIT && sk->err == 0)
994 {
995 interruptible_sleep_on(sk->sleep);
996 if (current->signal & ~current->blocked)
997 {
998 sti();
999 if (copied)
1000 return(copied);
1001 return(-ERESTARTSYS);
1002 }
1003 }
1004 sk->inuse = 1;
1005 sti();
1006 }
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024 if ((skb = tcp_dequeue_partial(sk)) != NULL)
1025 {
1026 int hdrlen;
1027
1028
1029 hdrlen = ((unsigned long)skb->h.th - (unsigned long)skb->data)
1030 + sizeof(struct tcphdr);
1031
1032
1033 if (!(flags & MSG_OOB))
1034 {
1035 copy = min(sk->mss - (skb->len - hdrlen), len);
1036
1037 if (copy <= 0)
1038 {
1039 printk("TCP: **bug**: \"copy\" <= 0!!\n");
1040 copy = 0;
1041 }
1042
1043 memcpy_fromfs(skb->data + skb->len, from, copy);
1044 skb->len += copy;
1045 from += copy;
1046 copied += copy;
1047 len -= copy;
1048 sk->write_seq += copy;
1049 }
1050 if ((skb->len - hdrlen) >= sk->mss ||
1051 (flags & MSG_OOB) || !sk->packets_out)
1052 tcp_send_skb(sk, skb);
1053 else
1054 tcp_enqueue_partial(skb, sk);
1055 continue;
1056 }
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070 copy = sk->window_seq - sk->write_seq;
1071 if (copy <= 0 || copy < (sk->max_window >> 1) || copy > sk->mss)
1072 copy = sk->mss;
1073 if (copy > len)
1074 copy = len;
1075
1076
1077
1078
1079
1080 send_tmp = NULL;
1081 if (copy < sk->mss && !(flags & MSG_OOB))
1082 {
1083
1084
1085
1086 release_sock(sk);
1087
1088
1089
1090
1091 skb = prot->wmalloc(sk, sk->mtu + 128 + prot->max_header, 0, GFP_KERNEL);
1092 sk->inuse = 1;
1093 send_tmp = skb;
1094 }
1095 else
1096 {
1097
1098
1099
1100 release_sock(sk);
1101 skb = prot->wmalloc(sk, copy + prot->max_header , 0, GFP_KERNEL);
1102 sk->inuse = 1;
1103 }
1104
1105
1106
1107
1108
1109 if (skb == NULL)
1110 {
1111 if (nonblock)
1112 {
1113 release_sock(sk);
1114 if (copied)
1115 return(copied);
1116 return(-EAGAIN);
1117 }
1118
1119
1120
1121
1122
1123 tmp = sk->wmem_alloc;
1124 release_sock(sk);
1125 cli();
1126
1127
1128
1129 if (tmp <= sk->wmem_alloc &&
1130 (sk->state == TCP_ESTABLISHED||sk->state == TCP_CLOSE_WAIT)
1131 && sk->err == 0)
1132 {
1133 interruptible_sleep_on(sk->sleep);
1134 if (current->signal & ~current->blocked)
1135 {
1136 sti();
1137 if (copied)
1138 return(copied);
1139 return(-ERESTARTSYS);
1140 }
1141 }
1142 sk->inuse = 1;
1143 sti();
1144 continue;
1145 }
1146
1147 skb->len = 0;
1148 skb->sk = sk;
1149 skb->free = 0;
1150 skb->localroute = sk->localroute|(flags&MSG_DONTROUTE);
1151
1152 buff = skb->data;
1153
1154
1155
1156
1157
1158
1159 tmp = prot->build_header(skb, sk->saddr, sk->daddr, &dev,
1160 IPPROTO_TCP, sk->opt, skb->mem_len,sk->ip_tos,sk->ip_ttl);
1161 if (tmp < 0 )
1162 {
1163 prot->wfree(sk, skb->mem_addr, skb->mem_len);
1164 release_sock(sk);
1165 if (copied)
1166 return(copied);
1167 return(tmp);
1168 }
1169 skb->len += tmp;
1170 skb->dev = dev;
1171 buff += tmp;
1172 skb->h.th =(struct tcphdr *) buff;
1173 tmp = tcp_build_header((struct tcphdr *)buff, sk, len-copy);
1174 if (tmp < 0)
1175 {
1176 prot->wfree(sk, skb->mem_addr, skb->mem_len);
1177 release_sock(sk);
1178 if (copied)
1179 return(copied);
1180 return(tmp);
1181 }
1182
1183 if (flags & MSG_OOB)
1184 {
1185 ((struct tcphdr *)buff)->urg = 1;
1186 ((struct tcphdr *)buff)->urg_ptr = ntohs(copy);
1187 }
1188 skb->len += tmp;
1189 memcpy_fromfs(buff+tmp, from, copy);
1190
1191 from += copy;
1192 copied += copy;
1193 len -= copy;
1194 skb->len += copy;
1195 skb->free = 0;
1196 sk->write_seq += copy;
1197
1198 if (send_tmp != NULL && sk->packets_out)
1199 {
1200 tcp_enqueue_partial(send_tmp, sk);
1201 continue;
1202 }
1203 tcp_send_skb(sk, skb);
1204 }
1205 sk->err = 0;
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218 if(sk->partial && ((!sk->packets_out)
1219
1220 || (sk->nonagle && before(sk->write_seq , sk->window_seq))
1221 ))
1222 tcp_send_partial(sk);
1223
1224 release_sock(sk);
1225 return(copied);
1226 }
1227
1228
1229 static int tcp_sendto(struct sock *sk, unsigned char *from,
1230 int len, int nonblock, unsigned flags,
1231 struct sockaddr_in *addr, int addr_len)
1232 {
1233 if (flags & ~(MSG_OOB|MSG_DONTROUTE))
1234 return -EINVAL;
1235 if (sk->state == TCP_CLOSE)
1236 return -ENOTCONN;
1237 if (addr_len < sizeof(*addr))
1238 return -EINVAL;
1239 if (addr->sin_family && addr->sin_family != AF_INET)
1240 return -EINVAL;
1241 if (addr->sin_port != sk->dummy_th.dest)
1242 return -EISCONN;
1243 if (addr->sin_addr.s_addr != sk->daddr)
1244 return -EISCONN;
1245 return tcp_write(sk, from, len, nonblock, flags);
1246 }
1247
1248
1249 static void tcp_read_wakeup(struct sock *sk)
1250 {
1251 int tmp;
1252 struct device *dev = NULL;
1253 struct tcphdr *t1;
1254 struct sk_buff *buff;
1255
1256 if (!sk->ack_backlog)
1257 return;
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270 buff = sk->prot->wmalloc(sk,MAX_ACK_SIZE,1, GFP_ATOMIC);
1271 if (buff == NULL)
1272 {
1273
1274 reset_timer(sk, TIME_WRITE, 10);
1275 return;
1276 }
1277
1278 buff->len = sizeof(struct tcphdr);
1279 buff->sk = sk;
1280 buff->localroute = sk->localroute;
1281
1282
1283
1284
1285
1286 tmp = sk->prot->build_header(buff, sk->saddr, sk->daddr, &dev,
1287 IPPROTO_TCP, sk->opt, MAX_ACK_SIZE,sk->ip_tos,sk->ip_ttl);
1288 if (tmp < 0)
1289 {
1290 buff->free=1;
1291 sk->prot->wfree(sk, buff->mem_addr, buff->mem_len);
1292 return;
1293 }
1294
1295 buff->len += tmp;
1296 t1 =(struct tcphdr *)(buff->data +tmp);
1297
1298 memcpy(t1,(void *) &sk->dummy_th, sizeof(*t1));
1299 t1->seq = htonl(sk->sent_seq);
1300 t1->ack = 1;
1301 t1->res1 = 0;
1302 t1->res2 = 0;
1303 t1->rst = 0;
1304 t1->urg = 0;
1305 t1->syn = 0;
1306 t1->psh = 0;
1307 sk->ack_backlog = 0;
1308 sk->bytes_rcv = 0;
1309 sk->window = tcp_select_window(sk);
1310 t1->window = ntohs(sk->window);
1311 t1->ack_seq = ntohl(sk->acked_seq);
1312 t1->doff = sizeof(*t1)/4;
1313 tcp_send_check(t1, sk->saddr, sk->daddr, sizeof(*t1), sk);
1314 sk->prot->queue_xmit(sk, dev, buff, 1);
1315 tcp_statistics.TcpOutSegs++;
1316 }
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326 static void cleanup_rbuf(struct sock *sk)
1327 {
1328 unsigned long flags;
1329 unsigned long left;
1330 struct sk_buff *skb;
1331 unsigned long rspace;
1332
1333 if(sk->debug)
1334 printk("cleaning rbuf for sk=%p\n", sk);
1335
1336 save_flags(flags);
1337 cli();
1338
1339 left = sk->prot->rspace(sk);
1340
1341
1342
1343
1344
1345
1346 while((skb=skb_peek(&sk->receive_queue)) != NULL)
1347 {
1348 if (!skb->used)
1349 break;
1350 skb_unlink(skb);
1351 skb->sk = sk;
1352 kfree_skb(skb, FREE_READ);
1353 }
1354
1355 restore_flags(flags);
1356
1357
1358
1359
1360
1361
1362
1363
1364 if(sk->debug)
1365 printk("sk->rspace = %lu, was %lu\n", sk->prot->rspace(sk),
1366 left);
1367 if ((rspace=sk->prot->rspace(sk)) != left)
1368 {
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379 sk->ack_backlog++;
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389 if (rspace > (sk->window - sk->bytes_rcv + sk->mtu))
1390 {
1391
1392 tcp_read_wakeup(sk);
1393 }
1394 else
1395 {
1396
1397 int was_active = del_timer(&sk->timer);
1398 if (!was_active || TCP_ACK_TIME < sk->timer.expires)
1399 {
1400 reset_timer(sk, TIME_WRITE, TCP_ACK_TIME);
1401 }
1402 else
1403 add_timer(&sk->timer);
1404 }
1405 }
1406 }
1407
1408
1409
1410
1411
1412
1413 static int tcp_read_urg(struct sock * sk, int nonblock,
1414 unsigned char *to, int len, unsigned flags)
1415 {
1416 #ifdef NOTDEF
1417 struct wait_queue wait = { current, NULL };
1418 #endif
1419
1420 while (len > 0)
1421 {
1422 if (sk->urginline || !sk->urg_data || sk->urg_data == URG_READ)
1423 return -EINVAL;
1424 sk->inuse = 1;
1425 if (sk->urg_data & URG_VALID)
1426 {
1427 char c = sk->urg_data;
1428 if (!(flags & MSG_PEEK))
1429 sk->urg_data = URG_READ;
1430 put_fs_byte(c, to);
1431 release_sock(sk);
1432 return 1;
1433 }
1434
1435 release_sock(sk);
1436
1437 if (sk->err)
1438 {
1439 int tmp = -sk->err;
1440 sk->err = 0;
1441 return tmp;
1442 }
1443
1444 if (sk->state == TCP_CLOSE || sk->done)
1445 {
1446 if (!sk->done) {
1447 sk->done = 1;
1448 return 0;
1449 }
1450 return -ENOTCONN;
1451 }
1452
1453 if (sk->shutdown & RCV_SHUTDOWN)
1454 {
1455 sk->done = 1;
1456 return 0;
1457 }
1458
1459
1460
1461
1462
1463
1464
1465
1466 return -EAGAIN;
1467 #ifdef NOTDEF
1468
1469 if (nonblock)
1470 return -EAGAIN;
1471
1472 if (current->signal & ~current->blocked)
1473 return -ERESTARTSYS;
1474
1475 current->state = TASK_INTERRUPTIBLE;
1476 add_wait_queue(sk->sleep, &wait);
1477 if ((sk->urg_data & URG_NOTYET) && sk->err == 0 &&
1478 !(sk->shutdown & RCV_SHUTDOWN))
1479 schedule();
1480 remove_wait_queue(sk->sleep, &wait);
1481 current->state = TASK_RUNNING;
1482 #endif
1483 }
1484 return 0;
1485 }
1486
1487
1488
1489
1490
1491
1492 static int tcp_read(struct sock *sk, unsigned char *to,
1493 int len, int nonblock, unsigned flags)
1494 {
1495 struct wait_queue wait = { current, NULL };
1496 int copied = 0;
1497 unsigned long peek_seq;
1498 unsigned long *seq;
1499 unsigned long used;
1500
1501
1502 if (sk->state == TCP_LISTEN)
1503 return -ENOTCONN;
1504
1505
1506 if (flags & MSG_OOB)
1507 return tcp_read_urg(sk, nonblock, to, len, flags);
1508
1509 peek_seq = sk->copied_seq;
1510 seq = &sk->copied_seq;
1511 if (flags & MSG_PEEK)
1512 seq = &peek_seq;
1513
1514 add_wait_queue(sk->sleep, &wait);
1515 sk->inuse = 1;
1516 while (len > 0)
1517 {
1518 struct sk_buff * skb;
1519 unsigned long offset;
1520
1521
1522
1523
1524 if (copied && sk->urg_data && sk->urg_seq == 1+*seq)
1525 break;
1526
1527 current->state = TASK_INTERRUPTIBLE;
1528
1529 skb = skb_peek(&sk->receive_queue);
1530 do
1531 {
1532 if (!skb)
1533 break;
1534 if (before(1+*seq, skb->h.th->seq))
1535 break;
1536 offset = 1 + *seq - skb->h.th->seq;
1537 if (skb->h.th->syn)
1538 offset--;
1539 if (offset < skb->len)
1540 goto found_ok_skb;
1541 if (!(flags & MSG_PEEK))
1542 skb->used = 1;
1543 skb = skb->next;
1544 }
1545 while (skb != (struct sk_buff *)&sk->receive_queue);
1546
1547 if (copied)
1548 break;
1549
1550 if (sk->err)
1551 {
1552 copied = -sk->err;
1553 sk->err = 0;
1554 break;
1555 }
1556
1557 if (sk->state == TCP_CLOSE)
1558 {
1559 if (!sk->done)
1560 {
1561 sk->done = 1;
1562 break;
1563 }
1564 copied = -ENOTCONN;
1565 break;
1566 }
1567
1568 if (sk->shutdown & RCV_SHUTDOWN)
1569 {
1570 sk->done = 1;
1571 break;
1572 }
1573
1574 if (nonblock)
1575 {
1576 copied = -EAGAIN;
1577 break;
1578 }
1579
1580 cleanup_rbuf(sk);
1581 release_sock(sk);
1582 schedule();
1583 sk->inuse = 1;
1584
1585 if (current->signal & ~current->blocked)
1586 {
1587 copied = -ERESTARTSYS;
1588 break;
1589 }
1590 continue;
1591
1592 found_ok_skb:
1593
1594 used = skb->len - offset;
1595 if (len < used)
1596 used = len;
1597
1598 if (sk->urg_data)
1599 {
1600 unsigned long urg_offset = sk->urg_seq - (1 + *seq);
1601 if (urg_offset < used)
1602 {
1603 if (!urg_offset)
1604 {
1605 if (!sk->urginline)
1606 {
1607 ++*seq;
1608 offset++;
1609 used--;
1610 }
1611 }
1612 else
1613 used = urg_offset;
1614 }
1615 }
1616
1617 memcpy_tofs(to,((unsigned char *)skb->h.th) +
1618 skb->h.th->doff*4 + offset, used);
1619 copied += used;
1620 len -= used;
1621 to += used;
1622 *seq += used;
1623 if (after(sk->copied_seq+1,sk->urg_seq))
1624 sk->urg_data = 0;
1625 if (!(flags & MSG_PEEK) && (used + offset >= skb->len))
1626 skb->used = 1;
1627 }
1628 remove_wait_queue(sk->sleep, &wait);
1629 current->state = TASK_RUNNING;
1630
1631
1632 cleanup_rbuf(sk);
1633 release_sock(sk);
1634 return copied;
1635 }
1636
1637
1638
1639
1640
1641
1642 void tcp_shutdown(struct sock *sk, int how)
1643 {
1644 struct sk_buff *buff;
1645 struct tcphdr *t1, *th;
1646 struct proto *prot;
1647 int tmp;
1648 struct device *dev = NULL;
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659 if (!(how & SEND_SHUTDOWN))
1660 return;
1661
1662
1663
1664
1665
1666 if (sk->state == TCP_FIN_WAIT1 ||
1667 sk->state == TCP_FIN_WAIT2 ||
1668 sk->state == TCP_CLOSING ||
1669 sk->state == TCP_LAST_ACK ||
1670 sk->state == TCP_TIME_WAIT
1671 )
1672 {
1673 return;
1674 }
1675 sk->inuse = 1;
1676
1677
1678
1679
1680
1681 sk->shutdown |= SEND_SHUTDOWN;
1682
1683
1684
1685
1686
1687 if (sk->partial)
1688 tcp_send_partial(sk);
1689
1690 prot =(struct proto *)sk->prot;
1691 th =(struct tcphdr *)&sk->dummy_th;
1692 release_sock(sk);
1693 buff = prot->wmalloc(sk, MAX_RESET_SIZE,1 , GFP_KERNEL);
1694 if (buff == NULL)
1695 return;
1696 sk->inuse = 1;
1697
1698 buff->sk = sk;
1699 buff->len = sizeof(*t1);
1700 buff->localroute = sk->localroute;
1701 t1 =(struct tcphdr *) buff->data;
1702
1703
1704
1705
1706
1707 tmp = prot->build_header(buff,sk->saddr, sk->daddr, &dev,
1708 IPPROTO_TCP, sk->opt,
1709 sizeof(struct tcphdr),sk->ip_tos,sk->ip_ttl);
1710 if (tmp < 0)
1711 {
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722 buff->free=1;
1723 prot->wfree(sk,buff->mem_addr, buff->mem_len);
1724
1725 if (sk->state == TCP_ESTABLISHED)
1726 tcp_set_state(sk,TCP_FIN_WAIT1);
1727 else if(sk->state == TCP_CLOSE_WAIT)
1728 tcp_set_state(sk,TCP_LAST_ACK);
1729 else
1730 tcp_set_state(sk,TCP_FIN_WAIT2);
1731
1732 release_sock(sk);
1733 return;
1734 }
1735
1736 t1 =(struct tcphdr *)((char *)t1 +tmp);
1737 buff->len += tmp;
1738 buff->dev = dev;
1739 memcpy(t1, th, sizeof(*t1));
1740 t1->seq = ntohl(sk->write_seq);
1741 sk->write_seq++;
1742 buff->h.seq = sk->write_seq;
1743 t1->ack = 1;
1744 t1->ack_seq = ntohl(sk->acked_seq);
1745 t1->window = ntohs(sk->window=tcp_select_window(sk));
1746 t1->fin = 1;
1747 t1->rst = 0;
1748 t1->doff = sizeof(*t1)/4;
1749 tcp_send_check(t1, sk->saddr, sk->daddr, sizeof(*t1), sk);
1750
1751
1752
1753
1754
1755
1756 if (skb_peek(&sk->write_queue) != NULL)
1757 {
1758 buff->free=0;
1759 if (buff->next != NULL)
1760 {
1761 printk("tcp_shutdown: next != NULL\n");
1762 skb_unlink(buff);
1763 }
1764 skb_queue_tail(&sk->write_queue, buff);
1765 }
1766 else
1767 {
1768 sk->sent_seq = sk->write_seq;
1769 sk->prot->queue_xmit(sk, dev, buff, 0);
1770 }
1771
1772 if (sk->state == TCP_ESTABLISHED)
1773 tcp_set_state(sk,TCP_FIN_WAIT1);
1774 else if (sk->state == TCP_CLOSE_WAIT)
1775 tcp_set_state(sk,TCP_LAST_ACK);
1776 else
1777 tcp_set_state(sk,TCP_FIN_WAIT2);
1778
1779 release_sock(sk);
1780 }
1781
1782
1783 static int
1784 tcp_recvfrom(struct sock *sk, unsigned char *to,
1785 int to_len, int nonblock, unsigned flags,
1786 struct sockaddr_in *addr, int *addr_len)
1787 {
1788 int result;
1789
1790
1791
1792
1793
1794
1795
1796 if(addr_len)
1797 *addr_len = sizeof(*addr);
1798 result=tcp_read(sk, to, to_len, nonblock, flags);
1799
1800 if (result < 0)
1801 return(result);
1802
1803 if(addr)
1804 {
1805 addr->sin_family = AF_INET;
1806 addr->sin_port = sk->dummy_th.dest;
1807 addr->sin_addr.s_addr = sk->daddr;
1808 }
1809 return(result);
1810 }
1811
1812
1813
1814
1815
1816
1817 static void tcp_reset(unsigned long saddr, unsigned long daddr, struct tcphdr *th,
1818 struct proto *prot, struct options *opt, struct device *dev, int tos, int ttl)
1819 {
1820 struct sk_buff *buff;
1821 struct tcphdr *t1;
1822 int tmp;
1823 struct device *ndev=NULL;
1824
1825
1826
1827
1828
1829
1830 buff = prot->wmalloc(NULL, MAX_RESET_SIZE, 1, GFP_ATOMIC);
1831 if (buff == NULL)
1832 return;
1833
1834 buff->len = sizeof(*t1);
1835 buff->sk = NULL;
1836 buff->dev = dev;
1837 buff->localroute = 0;
1838
1839 t1 =(struct tcphdr *) buff->data;
1840
1841
1842
1843
1844
1845 tmp = prot->build_header(buff, saddr, daddr, &ndev, IPPROTO_TCP, opt,
1846 sizeof(struct tcphdr),tos,ttl);
1847 if (tmp < 0)
1848 {
1849 buff->free = 1;
1850 prot->wfree(NULL, buff->mem_addr, buff->mem_len);
1851 return;
1852 }
1853
1854 t1 =(struct tcphdr *)((char *)t1 +tmp);
1855 buff->len += tmp;
1856 memcpy(t1, th, sizeof(*t1));
1857
1858
1859
1860
1861
1862 t1->dest = th->source;
1863 t1->source = th->dest;
1864 t1->rst = 1;
1865 t1->window = 0;
1866
1867 if(th->ack)
1868 {
1869 t1->ack = 0;
1870 t1->seq = th->ack_seq;
1871 t1->ack_seq = 0;
1872 }
1873 else
1874 {
1875 t1->ack = 1;
1876 if(!th->syn)
1877 t1->ack_seq=htonl(th->seq);
1878 else
1879 t1->ack_seq=htonl(th->seq+1);
1880 t1->seq=0;
1881 }
1882
1883 t1->syn = 0;
1884 t1->urg = 0;
1885 t1->fin = 0;
1886 t1->psh = 0;
1887 t1->doff = sizeof(*t1)/4;
1888 tcp_send_check(t1, saddr, daddr, sizeof(*t1), NULL);
1889 prot->queue_xmit(NULL, ndev, buff, 1);
1890 tcp_statistics.TcpOutSegs++;
1891 }
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902 static void tcp_options(struct sock *sk, struct tcphdr *th)
1903 {
1904 unsigned char *ptr;
1905 int length=(th->doff*4)-sizeof(struct tcphdr);
1906 int mss_seen = 0;
1907
1908 ptr = (unsigned char *)(th + 1);
1909
1910 while(length>0)
1911 {
1912 int opcode=*ptr++;
1913 int opsize=*ptr++;
1914 switch(opcode)
1915 {
1916 case TCPOPT_EOL:
1917 return;
1918 case TCPOPT_NOP:
1919 length-=2;
1920 continue;
1921
1922 default:
1923 if(opsize<=2)
1924 return;
1925 switch(opcode)
1926 {
1927 case TCPOPT_MSS:
1928 if(opsize==4 && th->syn)
1929 {
1930 sk->mtu=min(sk->mtu,ntohs(*(unsigned short *)ptr));
1931 mss_seen = 1;
1932 }
1933 break;
1934
1935 }
1936 ptr+=opsize-2;
1937 length-=opsize;
1938 }
1939 }
1940 if (th->syn)
1941 {
1942 if (! mss_seen)
1943 sk->mtu=min(sk->mtu, 536);
1944 }
1945 #ifdef CONFIG_INET_PCTCP
1946 sk->mss = min(sk->max_window >> 1, sk->mtu);
1947 #else
1948 sk->mss = min(sk->max_window, sk->mtu);
1949 #endif
1950 }
1951
1952 static inline unsigned long default_mask(unsigned long dst)
1953 {
1954 dst = ntohl(dst);
1955 if (IN_CLASSA(dst))
1956 return htonl(IN_CLASSA_NET);
1957 if (IN_CLASSB(dst))
1958 return htonl(IN_CLASSB_NET);
1959 return htonl(IN_CLASSC_NET);
1960 }
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970 static void tcp_conn_request(struct sock *sk, struct sk_buff *skb,
1971 unsigned long daddr, unsigned long saddr,
1972 struct options *opt, struct device *dev)
1973 {
1974 struct sk_buff *buff;
1975 struct tcphdr *t1;
1976 unsigned char *ptr;
1977 struct sock *newsk;
1978 struct tcphdr *th;
1979 struct device *ndev=NULL;
1980 int tmp;
1981 struct rtable *rt;
1982
1983 th = skb->h.th;
1984
1985
1986 if (!sk->dead)
1987 {
1988 sk->data_ready(sk,0);
1989 }
1990 else
1991 {
1992 tcp_reset(daddr, saddr, th, sk->prot, opt, dev, sk->ip_tos,sk->ip_ttl);
1993 tcp_statistics.TcpAttemptFails++;
1994 kfree_skb(skb, FREE_READ);
1995 return;
1996 }
1997
1998
1999
2000
2001
2002
2003 if (sk->ack_backlog >= sk->max_ack_backlog)
2004 {
2005 tcp_statistics.TcpAttemptFails++;
2006 kfree_skb(skb, FREE_READ);
2007 return;
2008 }
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018 newsk = (struct sock *) kmalloc(sizeof(struct sock), GFP_ATOMIC);
2019 if (newsk == NULL)
2020 {
2021
2022 tcp_statistics.TcpAttemptFails++;
2023 kfree_skb(skb, FREE_READ);
2024 return;
2025 }
2026
2027 memcpy(newsk, sk, sizeof(*newsk));
2028 skb_queue_head_init(&newsk->write_queue);
2029 skb_queue_head_init(&newsk->receive_queue);
2030 newsk->send_head = NULL;
2031 newsk->send_tail = NULL;
2032 skb_queue_head_init(&newsk->back_log);
2033 newsk->rtt = 0;
2034 newsk->rto = TCP_TIMEOUT_INIT;
2035 newsk->mdev = 0;
2036 newsk->max_window = 0;
2037 newsk->cong_window = 1;
2038 newsk->cong_count = 0;
2039 newsk->ssthresh = 0;
2040 newsk->backoff = 0;
2041 newsk->blog = 0;
2042 newsk->intr = 0;
2043 newsk->proc = 0;
2044 newsk->done = 0;
2045 newsk->partial = NULL;
2046 newsk->pair = NULL;
2047 newsk->wmem_alloc = 0;
2048 newsk->rmem_alloc = 0;
2049 newsk->localroute = sk->localroute;
2050
2051 newsk->max_unacked = MAX_WINDOW - TCP_WINDOW_DIFF;
2052
2053 newsk->err = 0;
2054 newsk->shutdown = 0;
2055 newsk->ack_backlog = 0;
2056 newsk->acked_seq = skb->h.th->seq+1;
2057 newsk->fin_seq = skb->h.th->seq;
2058 newsk->copied_seq = skb->h.th->seq;
2059 newsk->state = TCP_SYN_RECV;
2060 newsk->timeout = 0;
2061 newsk->write_seq = jiffies * SEQ_TICK - seq_offset;
2062 newsk->window_seq = newsk->write_seq;
2063 newsk->rcv_ack_seq = newsk->write_seq;
2064 newsk->urg_data = 0;
2065 newsk->retransmits = 0;
2066 newsk->destroy = 0;
2067 init_timer(&newsk->timer);
2068 newsk->timer.data = (unsigned long)newsk;
2069 newsk->timer.function = &net_timer;
2070 newsk->dummy_th.source = skb->h.th->dest;
2071 newsk->dummy_th.dest = skb->h.th->source;
2072
2073
2074
2075
2076
2077 newsk->daddr = saddr;
2078 newsk->saddr = daddr;
2079
2080 put_sock(newsk->num,newsk);
2081 newsk->dummy_th.res1 = 0;
2082 newsk->dummy_th.doff = 6;
2083 newsk->dummy_th.fin = 0;
2084 newsk->dummy_th.syn = 0;
2085 newsk->dummy_th.rst = 0;
2086 newsk->dummy_th.psh = 0;
2087 newsk->dummy_th.ack = 0;
2088 newsk->dummy_th.urg = 0;
2089 newsk->dummy_th.res2 = 0;
2090 newsk->acked_seq = skb->h.th->seq + 1;
2091 newsk->copied_seq = skb->h.th->seq;
2092 newsk->socket = NULL;
2093
2094
2095
2096
2097
2098 newsk->ip_ttl=sk->ip_ttl;
2099 newsk->ip_tos=skb->ip_hdr->tos;
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109 rt=ip_rt_route(saddr, NULL,NULL);
2110
2111 if(rt!=NULL && (rt->rt_flags&RTF_WINDOW))
2112 newsk->window_clamp = rt->rt_window;
2113 else
2114 newsk->window_clamp = 0;
2115
2116 if (sk->user_mss)
2117 newsk->mtu = sk->user_mss;
2118 else if(rt!=NULL && (rt->rt_flags&RTF_MSS))
2119 newsk->mtu = rt->rt_mss - HEADER_SIZE;
2120 else
2121 {
2122 #ifdef CONFIG_INET_SNARL
2123 if ((saddr ^ daddr) & default_mask(saddr))
2124 #else
2125 if ((saddr ^ daddr) & dev->pa_mask)
2126 #endif
2127 newsk->mtu = 576 - HEADER_SIZE;
2128 else
2129 newsk->mtu = MAX_WINDOW;
2130 }
2131
2132
2133
2134
2135
2136 newsk->mtu = min(newsk->mtu, dev->mtu - HEADER_SIZE);
2137
2138
2139
2140
2141
2142 tcp_options(newsk,skb->h.th);
2143
2144 buff = newsk->prot->wmalloc(newsk, MAX_SYN_SIZE, 1, GFP_ATOMIC);
2145 if (buff == NULL)
2146 {
2147 sk->err = -ENOMEM;
2148 newsk->dead = 1;
2149 release_sock(newsk);
2150 kfree_skb(skb, FREE_READ);
2151 tcp_statistics.TcpAttemptFails++;
2152 return;
2153 }
2154
2155 buff->len = sizeof(struct tcphdr)+4;
2156 buff->sk = newsk;
2157 buff->localroute = newsk->localroute;
2158
2159 t1 =(struct tcphdr *) buff->data;
2160
2161
2162
2163
2164
2165 tmp = sk->prot->build_header(buff, newsk->saddr, newsk->daddr, &ndev,
2166 IPPROTO_TCP, NULL, MAX_SYN_SIZE,sk->ip_tos,sk->ip_ttl);
2167
2168
2169
2170
2171
2172 if (tmp < 0)
2173 {
2174 sk->err = tmp;
2175 buff->free=1;
2176 kfree_skb(buff,FREE_WRITE);
2177 newsk->dead = 1;
2178 release_sock(newsk);
2179 skb->sk = sk;
2180 kfree_skb(skb, FREE_READ);
2181 tcp_statistics.TcpAttemptFails++;
2182 return;
2183 }
2184
2185 buff->len += tmp;
2186 t1 =(struct tcphdr *)((char *)t1 +tmp);
2187
2188 memcpy(t1, skb->h.th, sizeof(*t1));
2189 buff->h.seq = newsk->write_seq;
2190
2191
2192
2193 t1->dest = skb->h.th->source;
2194 t1->source = newsk->dummy_th.source;
2195 t1->seq = ntohl(newsk->write_seq++);
2196 t1->ack = 1;
2197 newsk->window = tcp_select_window(newsk);
2198 newsk->sent_seq = newsk->write_seq;
2199 t1->window = ntohs(newsk->window);
2200 t1->res1 = 0;
2201 t1->res2 = 0;
2202 t1->rst = 0;
2203 t1->urg = 0;
2204 t1->psh = 0;
2205 t1->syn = 1;
2206 t1->ack_seq = ntohl(skb->h.th->seq+1);
2207 t1->doff = sizeof(*t1)/4+1;
2208 ptr =(unsigned char *)(t1+1);
2209 ptr[0] = 2;
2210 ptr[1] = 4;
2211 ptr[2] = ((newsk->mtu) >> 8) & 0xff;
2212 ptr[3] =(newsk->mtu) & 0xff;
2213
2214 tcp_send_check(t1, daddr, saddr, sizeof(*t1)+4, newsk);
2215 newsk->prot->queue_xmit(newsk, ndev, buff, 0);
2216
2217 reset_timer(newsk, TIME_WRITE , TCP_TIMEOUT_INIT);
2218 skb->sk = newsk;
2219
2220
2221
2222
2223
2224 sk->rmem_alloc -= skb->mem_len;
2225 newsk->rmem_alloc += skb->mem_len;
2226
2227 skb_queue_tail(&sk->receive_queue,skb);
2228 sk->ack_backlog++;
2229 release_sock(newsk);
2230 tcp_statistics.TcpOutSegs++;
2231 }
2232
2233
2234 static void tcp_close(struct sock *sk, int timeout)
2235 {
2236 struct sk_buff *buff;
2237 struct tcphdr *t1, *th;
2238 struct proto *prot;
2239 struct device *dev=NULL;
2240 int tmp;
2241
2242
2243
2244
2245
2246 sk->inuse = 1;
2247 sk->keepopen = 1;
2248 sk->shutdown = SHUTDOWN_MASK;
2249
2250 if (!sk->dead)
2251 sk->state_change(sk);
2252
2253 if (timeout == 0)
2254 {
2255
2256
2257
2258
2259
2260
2261 if (skb_peek(&sk->receive_queue) != NULL)
2262 {
2263 struct sk_buff *skb;
2264 if(sk->debug)
2265 printk("Clean rcv queue\n");
2266 while((skb=skb_dequeue(&sk->receive_queue))!=NULL)
2267 kfree_skb(skb, FREE_READ);
2268 if(sk->debug)
2269 printk("Cleaned.\n");
2270 }
2271 }
2272
2273
2274
2275
2276
2277 if (sk->partial)
2278 {
2279 tcp_send_partial(sk);
2280 }
2281
2282 switch(sk->state)
2283 {
2284 case TCP_FIN_WAIT1:
2285 case TCP_FIN_WAIT2:
2286 case TCP_CLOSING:
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297 if (!timeout) {
2298 int timer_active;
2299
2300 timer_active = del_timer(&sk->timer);
2301 if (timer_active)
2302 add_timer(&sk->timer);
2303 else
2304 reset_timer(sk, TIME_CLOSE, 4 * sk->rto);
2305 }
2306 if (timeout)
2307 tcp_time_wait(sk);
2308 release_sock(sk);
2309 return;
2310 case TCP_TIME_WAIT:
2311 case TCP_LAST_ACK:
2312
2313
2314
2315 if (timeout)
2316 {
2317 tcp_set_state(sk,TCP_CLOSE);
2318 }
2319 release_sock(sk);
2320 return;
2321 case TCP_LISTEN:
2322 tcp_set_state(sk,TCP_CLOSE);
2323 release_sock(sk);
2324 return;
2325 case TCP_CLOSE:
2326 release_sock(sk);
2327 return;
2328 case TCP_CLOSE_WAIT:
2329 case TCP_ESTABLISHED:
2330 case TCP_SYN_SENT:
2331 case TCP_SYN_RECV:
2332 prot =(struct proto *)sk->prot;
2333 th =(struct tcphdr *)&sk->dummy_th;
2334 buff = prot->wmalloc(sk, MAX_FIN_SIZE, 1, GFP_ATOMIC);
2335 if (buff == NULL)
2336 {
2337
2338
2339
2340 release_sock(sk);
2341 if (sk->state != TCP_CLOSE_WAIT)
2342 tcp_set_state(sk,TCP_ESTABLISHED);
2343 reset_timer(sk, TIME_CLOSE, 100);
2344 return;
2345 }
2346 buff->sk = sk;
2347 buff->free = 1;
2348 buff->len = sizeof(*t1);
2349 buff->localroute = sk->localroute;
2350 t1 =(struct tcphdr *) buff->data;
2351
2352
2353
2354
2355 tmp = prot->build_header(buff,sk->saddr, sk->daddr, &dev,
2356 IPPROTO_TCP, sk->opt,
2357 sizeof(struct tcphdr),sk->ip_tos,sk->ip_ttl);
2358 if (tmp < 0)
2359 {
2360 sk->write_seq++;
2361 kfree_skb(buff,FREE_WRITE);
2362
2363
2364
2365
2366
2367
2368 if(sk->state==TCP_ESTABLISHED)
2369 tcp_set_state(sk,TCP_FIN_WAIT1);
2370 else
2371 tcp_set_state(sk,TCP_FIN_WAIT2);
2372 reset_timer(sk, TIME_CLOSE,4*sk->rto);
2373 if(timeout)
2374 tcp_time_wait(sk);
2375
2376 release_sock(sk);
2377 return;
2378 }
2379
2380 t1 =(struct tcphdr *)((char *)t1 +tmp);
2381 buff->len += tmp;
2382 buff->dev = dev;
2383 memcpy(t1, th, sizeof(*t1));
2384 t1->seq = ntohl(sk->write_seq);
2385 sk->write_seq++;
2386 buff->h.seq = sk->write_seq;
2387 t1->ack = 1;
2388
2389
2390
2391
2392
2393 sk->delay_acks = 0;
2394 t1->ack_seq = ntohl(sk->acked_seq);
2395 t1->window = ntohs(sk->window=tcp_select_window(sk));
2396 t1->fin = 1;
2397 t1->rst = 0;
2398 t1->doff = sizeof(*t1)/4;
2399 tcp_send_check(t1, sk->saddr, sk->daddr, sizeof(*t1), sk);
2400
2401 tcp_statistics.TcpOutSegs++;
2402
2403 if (skb_peek(&sk->write_queue) == NULL)
2404 {
2405 sk->sent_seq = sk->write_seq;
2406 prot->queue_xmit(sk, dev, buff, 0);
2407 }
2408 else
2409 {
2410 reset_timer(sk, TIME_WRITE, sk->rto);
2411 if (buff->next != NULL)
2412 {
2413 printk("tcp_close: next != NULL\n");
2414 skb_unlink(buff);
2415 }
2416 skb_queue_tail(&sk->write_queue, buff);
2417 }
2418
2419
2420
2421
2422
2423
2424
2425
2426 if (sk->state == TCP_ESTABLISHED)
2427 tcp_set_state(sk,TCP_FIN_WAIT1);
2428 else if (sk->state == TCP_CLOSE_WAIT)
2429 tcp_set_state(sk,TCP_LAST_ACK);
2430 else if (sk->state != TCP_CLOSING)
2431 tcp_set_state(sk,TCP_FIN_WAIT2);
2432 }
2433 release_sock(sk);
2434 }
2435
2436
2437
2438
2439
2440
2441 static void
2442 tcp_write_xmit(struct sock *sk)
2443 {
2444 struct sk_buff *skb;
2445
2446
2447
2448
2449
2450
2451 if(sk->zapped)
2452 return;
2453
2454 while((skb = skb_peek(&sk->write_queue)) != NULL &&
2455 before(skb->h.seq, sk->window_seq + 1) &&
2456 (sk->retransmits == 0 ||
2457 sk->timeout != TIME_WRITE ||
2458 before(skb->h.seq, sk->rcv_ack_seq + 1))
2459 && sk->packets_out < sk->cong_window)
2460 {
2461 IS_SKB(skb);
2462 skb_unlink(skb);
2463
2464 if (before(skb->h.seq, sk->rcv_ack_seq +1))
2465 {
2466 sk->retransmits = 0;
2467 kfree_skb(skb, FREE_WRITE);
2468 if (!sk->dead)
2469 sk->write_space(sk);
2470 }
2471 else
2472 {
2473 struct tcphdr *th;
2474 struct iphdr *iph;
2475 int size;
2476
2477
2478
2479
2480
2481
2482
2483 iph = (struct iphdr *)(skb->data +
2484 skb->dev->hard_header_len);
2485 th = (struct tcphdr *)(((char *)iph) +(iph->ihl << 2));
2486 size = skb->len - (((unsigned char *) th) - skb->data);
2487
2488 th->ack_seq = ntohl(sk->acked_seq);
2489 th->window = ntohs(tcp_select_window(sk));
2490
2491 tcp_send_check(th, sk->saddr, sk->daddr, size, sk);
2492
2493 sk->sent_seq = skb->h.seq;
2494 sk->prot->queue_xmit(sk, skb->dev, skb, skb->free);
2495 }
2496 }
2497 }
2498
2499
2500
2501
2502
2503
2504 static int tcp_ack(struct sock *sk, struct tcphdr *th, unsigned long saddr, int len)
2505 {
2506 unsigned long ack;
2507 int flag = 0;
2508
2509
2510
2511
2512
2513
2514
2515
2516 if(sk->zapped)
2517 return(1);
2518
2519 ack = ntohl(th->ack_seq);
2520 if (ntohs(th->window) > sk->max_window)
2521 {
2522 sk->max_window = ntohs(th->window);
2523 #ifdef CONFIG_INET_PCTCP
2524 sk->mss = min(sk->max_window>>1, sk->mtu);
2525 #else
2526 sk->mss = min(sk->max_window, sk->mtu);
2527 #endif
2528 }
2529
2530 if (sk->retransmits && sk->timeout == TIME_KEEPOPEN)
2531 sk->retransmits = 0;
2532
2533 if (after(ack, sk->sent_seq) || before(ack, sk->rcv_ack_seq))
2534 {
2535 if(sk->debug)
2536 printk("Ack ignored %lu %lu\n",ack,sk->sent_seq);
2537
2538
2539
2540
2541
2542 if (after(ack, sk->sent_seq) || (sk->state != TCP_ESTABLISHED && sk->state != TCP_CLOSE_WAIT))
2543 {
2544 return(0);
2545 }
2546 if (sk->keepopen)
2547 {
2548 if(sk->timeout==TIME_KEEPOPEN)
2549 reset_timer(sk, TIME_KEEPOPEN, TCP_TIMEOUT_LEN);
2550 }
2551 return(1);
2552 }
2553
2554 if (len != th->doff*4)
2555 flag |= 1;
2556
2557
2558
2559 if (after(sk->window_seq, ack+ntohs(th->window)))
2560 {
2561
2562
2563
2564
2565
2566
2567
2568 struct sk_buff *skb;
2569 struct sk_buff *skb2;
2570 struct sk_buff *wskb = NULL;
2571
2572 skb2 = sk->send_head;
2573 sk->send_head = NULL;
2574 sk->send_tail = NULL;
2575
2576 flag |= 4;
2577
2578 sk->window_seq = ack + ntohs(th->window);
2579 cli();
2580 while (skb2 != NULL)
2581 {
2582 skb = skb2;
2583 skb2 = skb->link3;
2584 skb->link3 = NULL;
2585 if (after(skb->h.seq, sk->window_seq))
2586 {
2587 if (sk->packets_out > 0)
2588 sk->packets_out--;
2589
2590 if (skb->next != NULL)
2591 {
2592 skb_unlink(skb);
2593 }
2594
2595 if (wskb == NULL)
2596 skb_queue_head(&sk->write_queue,skb);
2597 else
2598 skb_append(wskb,skb);
2599 wskb = skb;
2600 }
2601 else
2602 {
2603 if (sk->send_head == NULL)
2604 {
2605 sk->send_head = skb;
2606 sk->send_tail = skb;
2607 }
2608 else
2609 {
2610 sk->send_tail->link3 = skb;
2611 sk->send_tail = skb;
2612 }
2613 skb->link3 = NULL;
2614 }
2615 }
2616 sti();
2617 }
2618
2619 if (sk->send_tail == NULL || sk->send_head == NULL)
2620 {
2621 sk->send_head = NULL;
2622 sk->send_tail = NULL;
2623 sk->packets_out= 0;
2624 }
2625
2626 sk->window_seq = ack + ntohs(th->window);
2627
2628
2629 if (sk->timeout == TIME_WRITE &&
2630 sk->cong_window < 2048 && after(ack, sk->rcv_ack_seq))
2631 {
2632
2633
2634
2635
2636
2637
2638
2639
2640
2641 if (sk->cong_window < sk->ssthresh)
2642
2643
2644
2645 sk->cong_window++;
2646 else
2647 {
2648
2649
2650
2651
2652 if (sk->cong_count >= sk->cong_window)
2653 {
2654 sk->cong_window++;
2655 sk->cong_count = 0;
2656 }
2657 else
2658 sk->cong_count++;
2659 }
2660 }
2661
2662 sk->rcv_ack_seq = ack;
2663
2664
2665
2666
2667
2668
2669
2670 if (sk->timeout == TIME_PROBE0)
2671 {
2672 if (skb_peek(&sk->write_queue) != NULL &&
2673 ! before (sk->window_seq, sk->write_queue.next->h.seq))
2674 {
2675 sk->retransmits = 0;
2676 sk->backoff = 0;
2677
2678
2679
2680
2681 sk->rto = ((sk->rtt >> 2) + sk->mdev) >> 1;
2682 if (sk->rto > 120*HZ)
2683 sk->rto = 120*HZ;
2684 if (sk->rto < 20)
2685
2686
2687 sk->rto = 20;
2688 }
2689 }
2690
2691
2692
2693
2694
2695 while(sk->send_head != NULL)
2696 {
2697
2698 if (sk->send_head->link3 &&
2699 after(sk->send_head->h.seq, sk->send_head->link3->h.seq))
2700 printk("INET: tcp.c: *** bug send_list out of order.\n");
2701 if (before(sk->send_head->h.seq, ack+1))
2702 {
2703 struct sk_buff *oskb;
2704 if (sk->retransmits)
2705 {
2706
2707
2708
2709 flag |= 2;
2710
2711
2712
2713
2714
2715
2716
2717
2718
2719 if (sk->send_head->link3)
2720 sk->retransmits = 1;
2721 else
2722 sk->retransmits = 0;
2723 }
2724
2725
2726
2727
2728
2729
2730
2731
2732
2733
2734
2735
2736
2737
2738
2739
2740 if (sk->packets_out > 0)
2741 sk->packets_out --;
2742
2743
2744
2745 if (!sk->dead)
2746 sk->write_space(sk);
2747 oskb = sk->send_head;
2748
2749 if (!(flag&2))
2750 {
2751 long m;
2752
2753
2754
2755
2756
2757
2758
2759
2760
2761 m = jiffies - oskb->when;
2762 if(m<=0)
2763 m=1;
2764 m -= (sk->rtt >> 3);
2765 sk->rtt += m;
2766 if (m < 0)
2767 m = -m;
2768 m -= (sk->mdev >> 2);
2769 sk->mdev += m;
2770
2771
2772
2773
2774
2775 sk->rto = ((sk->rtt >> 2) + sk->mdev) >> 1;
2776 if (sk->rto > 120*HZ)
2777 sk->rto = 120*HZ;
2778 if (sk->rto < 20)
2779 sk->rto = 20;
2780 sk->backoff = 0;
2781 }
2782 flag |= (2|4);
2783 cli();
2784 oskb = sk->send_head;
2785 IS_SKB(oskb);
2786 sk->send_head = oskb->link3;
2787 if (sk->send_head == NULL)
2788 {
2789 sk->send_tail = NULL;
2790 }
2791
2792
2793
2794
2795
2796 if (oskb->next)
2797 skb_unlink(oskb);
2798 sti();
2799 kfree_skb(oskb, FREE_WRITE);
2800 if (!sk->dead)
2801 sk->write_space(sk);
2802 }
2803 else
2804 {
2805 break;
2806 }
2807 }
2808
2809
2810
2811
2812
2813
2814
2815
2816
2817
2818
2819
2820
2821
2822 if (skb_peek(&sk->write_queue) != NULL)
2823 {
2824 if (after (sk->window_seq+1, sk->write_queue.next->h.seq) &&
2825 (sk->retransmits == 0 ||
2826 sk->timeout != TIME_WRITE ||
2827 before(sk->write_queue.next->h.seq, sk->rcv_ack_seq + 1))
2828 && sk->packets_out < sk->cong_window)
2829 {
2830 flag |= 1;
2831 tcp_write_xmit(sk);
2832 }
2833 else if (before(sk->window_seq, sk->write_queue.next->h.seq) &&
2834 sk->send_head == NULL &&
2835 sk->ack_backlog == 0 &&
2836 sk->state != TCP_TIME_WAIT)
2837 {
2838 reset_timer(sk, TIME_PROBE0, sk->rto);
2839 }
2840 }
2841 else
2842 {
2843
2844
2845
2846
2847
2848
2849
2850
2851
2852
2853
2854
2855
2856 switch(sk->state) {
2857 case TCP_TIME_WAIT:
2858
2859
2860
2861
2862 reset_timer(sk, TIME_CLOSE, TCP_TIMEWAIT_LEN);
2863 break;
2864 case TCP_CLOSE:
2865
2866
2867
2868 break;
2869 default:
2870
2871
2872
2873
2874 if (sk->send_head || skb_peek(&sk->write_queue) != NULL || sk->ack_backlog) {
2875 reset_timer(sk, TIME_WRITE, sk->rto);
2876 } else if (sk->keepopen) {
2877 reset_timer(sk, TIME_KEEPOPEN, TCP_TIMEOUT_LEN);
2878 } else {
2879 delete_timer(sk);
2880 }
2881 break;
2882 }
2883 #ifdef NOTDEF
2884 if (sk->send_head == NULL && sk->ack_backlog == 0 &&
2885 sk->state != TCP_TIME_WAIT && !sk->keepopen)
2886 {
2887 if (!sk->dead)
2888 sk->write_space(sk);
2889 if (sk->keepopen) {
2890 reset_timer(sk, TIME_KEEPOPEN, TCP_TIMEOUT_LEN);
2891 } else {
2892 delete_timer(sk);
2893 }
2894 }
2895 else
2896 {
2897 if (sk->state != (unsigned char) sk->keepopen)
2898 {
2899 reset_timer(sk, TIME_WRITE, sk->rto);
2900 }
2901 if (sk->state == TCP_TIME_WAIT)
2902 {
2903 reset_timer(sk, TIME_CLOSE, TCP_TIMEWAIT_LEN);
2904 }
2905 }
2906 #endif
2907 }
2908
2909 if (sk->packets_out == 0 && sk->partial != NULL &&
2910 skb_peek(&sk->write_queue) == NULL && sk->send_head == NULL)
2911 {
2912 flag |= 1;
2913 tcp_send_partial(sk);
2914 }
2915
2916
2917
2918
2919
2920
2921
2922
2923
2924 if (sk->state == TCP_LAST_ACK)
2925 {
2926 if (!sk->dead)
2927 sk->state_change(sk);
2928 if (sk->rcv_ack_seq == sk->write_seq && sk->acked_seq == sk->fin_seq)
2929 {
2930 flag |= 1;
2931 tcp_set_state(sk,TCP_CLOSE);
2932 sk->shutdown = SHUTDOWN_MASK;
2933 }
2934 }
2935
2936
2937
2938
2939
2940
2941
2942
2943 if (sk->state == TCP_FIN_WAIT1)
2944 {
2945
2946 if (!sk->dead)
2947 sk->state_change(sk);
2948 if (sk->rcv_ack_seq == sk->write_seq)
2949 {
2950 flag |= 1;
2951 sk->shutdown |= SEND_SHUTDOWN;
2952 tcp_set_state(sk,TCP_FIN_WAIT2);
2953 }
2954 }
2955
2956
2957
2958
2959
2960
2961
2962 if (sk->state == TCP_CLOSING)
2963 {
2964
2965 if (!sk->dead)
2966 sk->state_change(sk);
2967 if (sk->rcv_ack_seq == sk->write_seq)
2968 {
2969 flag |= 1;
2970 tcp_time_wait(sk);
2971 }
2972 }
2973
2974
2975
2976
2977
2978
2979
2980
2981
2982
2983
2984
2985
2986
2987
2988
2989
2990
2991
2992
2993
2994
2995
2996
2997
2998
2999
3000
3001
3002
3003 if (((!flag) || (flag&4)) && sk->send_head != NULL &&
3004 (((flag&2) && sk->retransmits) ||
3005 (sk->send_head->when + sk->rto < jiffies)))
3006 {
3007 ip_do_retransmit(sk, 1);
3008 reset_timer(sk, TIME_WRITE, sk->rto);
3009 }
3010
3011 return(1);
3012 }
3013
3014
3015
3016
3017
3018
3019
3020
3021 static int tcp_data(struct sk_buff *skb, struct sock *sk,
3022 unsigned long saddr, unsigned short len)
3023 {
3024 struct sk_buff *skb1, *skb2;
3025 struct tcphdr *th;
3026 int dup_dumped=0;
3027 unsigned long new_seq;
3028
3029 th = skb->h.th;
3030 skb->len = len -(th->doff*4);
3031
3032
3033
3034
3035 sk->bytes_rcv += skb->len;
3036
3037 if (skb->len == 0 && !th->fin && !th->urg && !th->psh)
3038 {
3039
3040
3041
3042
3043 if (!th->ack)
3044 tcp_send_ack(sk->sent_seq, sk->acked_seq,sk, th, saddr);
3045 kfree_skb(skb, FREE_READ);
3046 return(0);
3047 }
3048
3049
3050
3051
3052
3053 if(sk->shutdown & RCV_SHUTDOWN)
3054 {
3055 new_seq= th->seq + skb->len + th->syn;
3056
3057 if(after(new_seq,sk->acked_seq+1))
3058
3059
3060 {
3061 sk->acked_seq = new_seq + th->fin;
3062 tcp_reset(sk->saddr, sk->daddr, skb->h.th,
3063 sk->prot, NULL, skb->dev, sk->ip_tos, sk->ip_ttl);
3064 tcp_statistics.TcpEstabResets++;
3065 tcp_set_state(sk,TCP_CLOSE);
3066 sk->err = EPIPE;
3067 sk->shutdown = SHUTDOWN_MASK;
3068 kfree_skb(skb, FREE_READ);
3069 if (!sk->dead)
3070 sk->state_change(sk);
3071 return(0);
3072 }
3073 }
3074
3075
3076
3077
3078
3079
3080
3081
3082
3083
3084
3085
3086 if (skb_peek(&sk->receive_queue) == NULL)
3087 {
3088 skb_queue_head(&sk->receive_queue,skb);
3089 skb1= NULL;
3090 }
3091 else
3092 {
3093 for(skb1=sk->receive_queue.prev; ; skb1 = skb1->prev)
3094 {
3095 if(sk->debug)
3096 {
3097 printk("skb1=%p :", skb1);
3098 printk("skb1->h.th->seq = %ld: ", skb1->h.th->seq);
3099 printk("skb->h.th->seq = %ld\n",skb->h.th->seq);
3100 printk("copied_seq = %ld acked_seq = %ld\n", sk->copied_seq,
3101 sk->acked_seq);
3102 }
3103
3104
3105
3106
3107
3108
3109
3110
3111
3112 if (th->seq==skb1->h.th->seq && skb->len>= skb1->len)
3113 {
3114 skb_append(skb1,skb);
3115 skb_unlink(skb1);
3116 kfree_skb(skb1,FREE_READ);
3117 dup_dumped=1;
3118 skb1=NULL;
3119 break;
3120 }
3121
3122
3123
3124
3125
3126 if (after(th->seq+1, skb1->h.th->seq))
3127 {
3128 skb_append(skb1,skb);
3129 break;
3130 }
3131
3132
3133
3134
3135 if (skb1 == skb_peek(&sk->receive_queue))
3136 {
3137 skb_queue_head(&sk->receive_queue, skb);
3138 break;
3139 }
3140 }
3141 }
3142
3143
3144
3145
3146
3147 th->ack_seq = th->seq + skb->len;
3148 if (th->syn)
3149 th->ack_seq++;
3150 if (th->fin)
3151 th->ack_seq++;
3152
3153 if (before(sk->acked_seq, sk->copied_seq))
3154 {
3155 printk("*** tcp.c:tcp_data bug acked < copied\n");
3156 sk->acked_seq = sk->copied_seq;
3157 }
3158
3159
3160
3161
3162
3163 if ((!dup_dumped && (skb1 == NULL || skb1->acked)) || before(th->seq, sk->acked_seq+1))
3164 {
3165 if (before(th->seq, sk->acked_seq+1))
3166 {
3167 int newwindow;
3168
3169 if (after(th->ack_seq, sk->acked_seq))
3170 {
3171 newwindow = sk->window-(th->ack_seq - sk->acked_seq);
3172 if (newwindow < 0)
3173 newwindow = 0;
3174 sk->window = newwindow;
3175 sk->acked_seq = th->ack_seq;
3176 }
3177 skb->acked = 1;
3178
3179
3180
3181
3182
3183 if (skb->h.th->fin)
3184 {
3185 if (!sk->dead)
3186 sk->state_change(sk);
3187 sk->shutdown |= RCV_SHUTDOWN;
3188 }
3189
3190 for(skb2 = skb->next;
3191 skb2 != (struct sk_buff *)&sk->receive_queue;
3192 skb2 = skb2->next)
3193 {
3194 if (before(skb2->h.th->seq, sk->acked_seq+1))
3195 {
3196 if (after(skb2->h.th->ack_seq, sk->acked_seq))
3197 {
3198 newwindow = sk->window -
3199 (skb2->h.th->ack_seq - sk->acked_seq);
3200 if (newwindow < 0)
3201 newwindow = 0;
3202 sk->window = newwindow;
3203 sk->acked_seq = skb2->h.th->ack_seq;
3204 }
3205 skb2->acked = 1;
3206
3207
3208
3209
3210 if (skb2->h.th->fin)
3211 {
3212 sk->shutdown |= RCV_SHUTDOWN;
3213 if (!sk->dead)
3214 sk->state_change(sk);
3215 }
3216
3217
3218
3219
3220
3221 sk->ack_backlog = sk->max_ack_backlog;
3222 }
3223 else
3224 {
3225 break;
3226 }
3227 }
3228
3229
3230
3231
3232
3233 if (!sk->delay_acks ||
3234 sk->ack_backlog >= sk->max_ack_backlog ||
3235 sk->bytes_rcv > sk->max_unacked || th->fin) {
3236
3237 }
3238 else
3239 {
3240 sk->ack_backlog++;
3241 if(sk->debug)
3242 printk("Ack queued.\n");
3243 reset_timer(sk, TIME_WRITE, TCP_ACK_TIME);
3244 }
3245 }
3246 }
3247
3248
3249
3250
3251
3252
3253 if (!skb->acked)
3254 {
3255
3256
3257
3258
3259
3260
3261
3262
3263 while (sk->prot->rspace(sk) < sk->mtu)
3264 {
3265 skb1 = skb_peek(&sk->receive_queue);
3266 if (skb1 == NULL)
3267 {
3268 printk("INET: tcp.c:tcp_data memory leak detected.\n");
3269 break;
3270 }
3271
3272
3273
3274
3275
3276 if (skb1->acked)
3277 {
3278 break;
3279 }
3280
3281 skb_unlink(skb1);
3282 kfree_skb(skb1, FREE_READ);
3283 }
3284 tcp_send_ack(sk->sent_seq, sk->acked_seq, sk, th, saddr);
3285 sk->ack_backlog++;
3286 reset_timer(sk, TIME_WRITE, TCP_ACK_TIME);
3287 }
3288 else
3289 {
3290
3291 tcp_send_ack(sk->sent_seq, sk->acked_seq, sk, th, saddr);
3292 }
3293
3294
3295
3296
3297
3298 if (!sk->dead)
3299 {
3300 if(sk->debug)
3301 printk("Data wakeup.\n");
3302 sk->data_ready(sk,0);
3303 }
3304 return(0);
3305 }
3306
3307
3308 static void tcp_check_urg(struct sock * sk, struct tcphdr * th)
3309 {
3310 unsigned long ptr = ntohs(th->urg_ptr);
3311
3312 if (ptr)
3313 ptr--;
3314 ptr += th->seq;
3315
3316
3317 if (after(sk->copied_seq+1, ptr))
3318 return;
3319
3320
3321 if (sk->urg_data && !after(ptr, sk->urg_seq))
3322 return;
3323
3324
3325 if (sk->proc != 0) {
3326 if (sk->proc > 0) {
3327 kill_proc(sk->proc, SIGURG, 1);
3328 } else {
3329 kill_pg(-sk->proc, SIGURG, 1);
3330 }
3331 }
3332 sk->urg_data = URG_NOTYET;
3333 sk->urg_seq = ptr;
3334 }
3335
3336 static inline int tcp_urg(struct sock *sk, struct tcphdr *th,
3337 unsigned long saddr, unsigned long len)
3338 {
3339 unsigned long ptr;
3340
3341
3342 if (th->urg)
3343 tcp_check_urg(sk,th);
3344
3345
3346 if (sk->urg_data != URG_NOTYET)
3347 return 0;
3348
3349
3350 ptr = sk->urg_seq - th->seq + th->doff*4;
3351 if (ptr >= len)
3352 return 0;
3353
3354
3355 sk->urg_data = URG_VALID | *(ptr + (unsigned char *) th);
3356 if (!sk->dead)
3357 sk->data_ready(sk,0);
3358 return 0;
3359 }
3360
3361
3362
3363
3364
3365
3366
3367
3368
3369
3370
3371
3372
3373
3374
3375 static int tcp_fin(struct sk_buff *skb, struct sock *sk, struct tcphdr *th,
3376 unsigned long saddr, struct device *dev)
3377 {
3378 sk->fin_seq = th->seq + skb->len + th->syn + th->fin;
3379
3380 if (!sk->dead)
3381 {
3382 sk->state_change(sk);
3383 }
3384
3385 switch(sk->state)
3386 {
3387 case TCP_SYN_RECV:
3388 case TCP_SYN_SENT:
3389 case TCP_ESTABLISHED:
3390
3391
3392
3393
3394 reset_timer(sk, TIME_CLOSE, TCP_TIMEOUT_LEN);
3395 tcp_set_state(sk,TCP_CLOSE_WAIT);
3396 if (th->rst)
3397 sk->shutdown = SHUTDOWN_MASK;
3398 break;
3399
3400 case TCP_CLOSE_WAIT:
3401 case TCP_CLOSING:
3402
3403
3404
3405
3406 break;
3407 case TCP_TIME_WAIT:
3408
3409
3410
3411
3412 reset_timer(sk, TIME_CLOSE, TCP_TIMEWAIT_LEN);
3413 return(0);
3414 case TCP_FIN_WAIT1:
3415
3416
3417
3418
3419
3420
3421
3422
3423 reset_timer(sk, TIME_CLOSE, TCP_TIMEWAIT_LEN);
3424 tcp_set_state(sk,TCP_CLOSING);
3425 break;
3426 case TCP_FIN_WAIT2:
3427
3428
3429
3430 reset_timer(sk, TIME_CLOSE, TCP_TIMEWAIT_LEN);
3431 sk->shutdown|=SHUTDOWN_MASK;
3432 tcp_set_state(sk,TCP_TIME_WAIT);
3433 break;
3434 case TCP_CLOSE:
3435
3436
3437
3438 break;
3439 default:
3440 tcp_set_state(sk,TCP_LAST_ACK);
3441
3442
3443 reset_timer(sk, TIME_CLOSE, TCP_TIMEWAIT_LEN);
3444 return(0);
3445 }
3446 sk->ack_backlog++;
3447
3448 return(0);
3449 }
3450
3451
3452
3453 static struct sock *
3454 tcp_accept(struct sock *sk, int flags)
3455 {
3456 struct sock *newsk;
3457 struct sk_buff *skb;
3458
3459
3460
3461
3462
3463
3464 if (sk->state != TCP_LISTEN)
3465 {
3466 sk->err = EINVAL;
3467 return(NULL);
3468 }
3469
3470
3471 cli();
3472 sk->inuse = 1;
3473
3474 while((skb = tcp_dequeue_established(sk)) == NULL)
3475 {
3476 if (flags & O_NONBLOCK)
3477 {
3478 sti();
3479 release_sock(sk);
3480 sk->err = EAGAIN;
3481 return(NULL);
3482 }
3483
3484 release_sock(sk);
3485 interruptible_sleep_on(sk->sleep);
3486 if (current->signal & ~current->blocked)
3487 {
3488 sti();
3489 sk->err = ERESTARTSYS;
3490 return(NULL);
3491 }
3492 sk->inuse = 1;
3493 }
3494 sti();
3495
3496
3497
3498
3499
3500 newsk = skb->sk;
3501
3502 kfree_skb(skb, FREE_READ);
3503 sk->ack_backlog--;
3504 release_sock(sk);
3505 return(newsk);
3506 }
3507
3508
3509
3510
3511
3512
3513 static int tcp_connect(struct sock *sk, struct sockaddr_in *usin, int addr_len)
3514 {
3515 struct sk_buff *buff;
3516 struct device *dev=NULL;
3517 unsigned char *ptr;
3518 int tmp;
3519 struct tcphdr *t1;
3520 struct rtable *rt;
3521
3522 if (sk->state != TCP_CLOSE)
3523 return(-EISCONN);
3524
3525 if (addr_len < 8)
3526 return(-EINVAL);
3527
3528 if (usin->sin_family && usin->sin_family != AF_INET)
3529 return(-EAFNOSUPPORT);
3530
3531
3532
3533
3534
3535 if(usin->sin_addr.s_addr==INADDR_ANY)
3536 usin->sin_addr.s_addr=ip_my_addr();
3537
3538
3539
3540
3541
3542 if (ip_chk_addr(usin->sin_addr.s_addr) == IS_BROADCAST)
3543 {
3544 return -ENETUNREACH;
3545 }
3546
3547
3548
3549
3550
3551 if(sk->saddr == usin->sin_addr.s_addr && sk->num==ntohs(usin->sin_port))
3552 return -EBUSY;
3553
3554 sk->inuse = 1;
3555 sk->daddr = usin->sin_addr.s_addr;
3556 sk->write_seq = jiffies * SEQ_TICK - seq_offset;
3557 sk->window_seq = sk->write_seq;
3558 sk->rcv_ack_seq = sk->write_seq -1;
3559 sk->err = 0;
3560 sk->dummy_th.dest = usin->sin_port;
3561 release_sock(sk);
3562
3563 buff = sk->prot->wmalloc(sk,MAX_SYN_SIZE,0, GFP_KERNEL);
3564 if (buff == NULL)
3565 {
3566 return(-ENOMEM);
3567 }
3568 sk->inuse = 1;
3569 buff->len = 24;
3570 buff->sk = sk;
3571 buff->free = 1;
3572 buff->localroute = sk->localroute;
3573
3574 t1 = (struct tcphdr *) buff->data;
3575
3576
3577
3578
3579
3580 rt=ip_rt_route(sk->daddr, NULL, NULL);
3581
3582
3583
3584
3585
3586
3587 tmp = sk->prot->build_header(buff, sk->saddr, sk->daddr, &dev,
3588 IPPROTO_TCP, NULL, MAX_SYN_SIZE,sk->ip_tos,sk->ip_ttl);
3589 if (tmp < 0)
3590 {
3591 sk->prot->wfree(sk, buff->mem_addr, buff->mem_len);
3592 release_sock(sk);
3593 return(-ENETUNREACH);
3594 }
3595
3596 buff->len += tmp;
3597 t1 = (struct tcphdr *)((char *)t1 +tmp);
3598
3599 memcpy(t1,(void *)&(sk->dummy_th), sizeof(*t1));
3600 t1->seq = ntohl(sk->write_seq++);
3601 sk->sent_seq = sk->write_seq;
3602 buff->h.seq = sk->write_seq;
3603 t1->ack = 0;
3604 t1->window = 2;
3605 t1->res1=0;
3606 t1->res2=0;
3607 t1->rst = 0;
3608 t1->urg = 0;
3609 t1->psh = 0;
3610 t1->syn = 1;
3611 t1->urg_ptr = 0;
3612 t1->doff = 6;
3613
3614
3615 if(rt!=NULL && (rt->rt_flags&RTF_WINDOW))
3616 sk->window_clamp=rt->rt_window;
3617 else
3618 sk->window_clamp=0;
3619
3620 if (sk->user_mss)
3621 sk->mtu = sk->user_mss;
3622 else if(rt!=NULL && (rt->rt_flags&RTF_MTU))
3623 sk->mtu = rt->rt_mss;
3624 else
3625 {
3626 #ifdef CONFIG_INET_SNARL
3627 if ((sk->saddr ^ sk->daddr) & default_mask(sk->saddr))
3628 #else
3629 if ((sk->saddr ^ sk->daddr) & dev->pa_mask)
3630 #endif
3631 sk->mtu = 576 - HEADER_SIZE;
3632 else
3633 sk->mtu = MAX_WINDOW;
3634 }
3635
3636
3637
3638
3639 if(sk->mtu <32)
3640 sk->mtu = 32;
3641
3642 sk->mtu = min(sk->mtu, dev->mtu - HEADER_SIZE);
3643
3644
3645
3646
3647
3648 ptr = (unsigned char *)(t1+1);
3649 ptr[0] = 2;
3650 ptr[1] = 4;
3651 ptr[2] = (sk->mtu) >> 8;
3652 ptr[3] = (sk->mtu) & 0xff;
3653 tcp_send_check(t1, sk->saddr, sk->daddr,
3654 sizeof(struct tcphdr) + 4, sk);
3655
3656
3657
3658
3659
3660 tcp_set_state(sk,TCP_SYN_SENT);
3661 sk->rto = TCP_TIMEOUT_INIT;
3662 reset_timer(sk, TIME_WRITE, sk->rto);
3663 sk->retransmits = TCP_RETR2 - TCP_SYN_RETRIES;
3664
3665 sk->prot->queue_xmit(sk, dev, buff, 0);
3666 tcp_statistics.TcpActiveOpens++;
3667 tcp_statistics.TcpOutSegs++;
3668
3669 release_sock(sk);
3670 return(0);
3671 }
3672
3673
3674
3675 static int
3676 tcp_sequence(struct sock *sk, struct tcphdr *th, short len,
3677 struct options *opt, unsigned long saddr, struct device *dev)
3678 {
3679 unsigned long next_seq;
3680
3681 next_seq = len - 4*th->doff;
3682 if (th->fin)
3683 next_seq++;
3684
3685 if (next_seq && !sk->window)
3686 goto ignore_it;
3687 next_seq += th->seq;
3688
3689
3690
3691
3692
3693
3694
3695
3696
3697 if (!after(next_seq+1, sk->acked_seq))
3698 goto ignore_it;
3699
3700 if (!before(th->seq, sk->acked_seq + sk->window + 1))
3701 goto ignore_it;
3702
3703
3704 return 1;
3705
3706 ignore_it:
3707 if (th->rst)
3708 return 0;
3709
3710
3711
3712
3713
3714
3715
3716
3717 if (sk->state==TCP_SYN_SENT || sk->state==TCP_SYN_RECV) {
3718 tcp_reset(sk->saddr,sk->daddr,th,sk->prot,NULL,dev, sk->ip_tos,sk->ip_ttl);
3719 return 1;
3720 }
3721
3722
3723 tcp_send_ack(sk->sent_seq, sk->acked_seq, sk, th, saddr);
3724 return 0;
3725 }
3726
3727
3728 #ifdef TCP_FASTPATH
3729
3730
3731
3732
3733
3734
3735
3736 static inline int tcp_clean_end(struct sock *sk)
3737 {
3738 struct sk_buff *skb=skb_peek(&sk->receive_queue);
3739 if(skb==NULL || sk->receive_queue.prev->acked)
3740 return 1;
3741 }
3742
3743 #endif
3744
3745 int
3746 tcp_rcv(struct sk_buff *skb, struct device *dev, struct options *opt,
3747 unsigned long daddr, unsigned short len,
3748 unsigned long saddr, int redo, struct inet_protocol * protocol)
3749 {
3750 struct tcphdr *th;
3751 struct sock *sk;
3752
3753 if (!skb)
3754 {
3755 return(0);
3756 }
3757
3758 if (!dev)
3759 {
3760 return(0);
3761 }
3762
3763 tcp_statistics.TcpInSegs++;
3764
3765 if(skb->pkt_type!=PACKET_HOST)
3766 {
3767 kfree_skb(skb,FREE_READ);
3768 return(0);
3769 }
3770
3771 th = skb->h.th;
3772
3773
3774
3775
3776
3777 sk = get_sock(&tcp_prot, th->dest, saddr, th->source, daddr);
3778
3779
3780
3781
3782
3783
3784 if (sk!=NULL && sk->zapped)
3785 sk=NULL;
3786
3787 if (!redo)
3788 {
3789 if (tcp_check(th, len, saddr, daddr ))
3790 {
3791 skb->sk = NULL;
3792 kfree_skb(skb,FREE_READ);
3793
3794
3795
3796
3797 return(0);
3798 }
3799 th->seq = ntohl(th->seq);
3800
3801
3802 if (sk == NULL)
3803 {
3804 if (!th->rst)
3805 tcp_reset(daddr, saddr, th, &tcp_prot, opt,dev,skb->ip_hdr->tos,255);
3806 skb->sk = NULL;
3807 kfree_skb(skb, FREE_READ);
3808 return(0);
3809 }
3810
3811 skb->len = len;
3812 skb->sk = sk;
3813 skb->acked = 0;
3814 skb->used = 0;
3815 skb->free = 0;
3816 skb->saddr = daddr;
3817 skb->daddr = saddr;
3818
3819
3820 cli();
3821 if (sk->inuse)
3822 {
3823 skb_queue_head(&sk->back_log, skb);
3824 sti();
3825 return(0);
3826 }
3827 sk->inuse = 1;
3828 sti();
3829 }
3830 else
3831 {
3832 if (!sk)
3833 {
3834 return(0);
3835 }
3836 }
3837
3838
3839 if (!sk->prot)
3840 {
3841 return(0);
3842 }
3843
3844
3845
3846
3847
3848
3849 if (sk->rmem_alloc + skb->mem_len >= sk->rcvbuf)
3850 {
3851 skb->sk = NULL;
3852 kfree_skb(skb, FREE_READ);
3853 release_sock(sk);
3854 return(0);
3855 }
3856
3857 sk->rmem_alloc += skb->mem_len;
3858
3859 #ifdef TCP_FASTPATH
3860
3861
3862
3863
3864
3865
3866
3867
3868
3869
3870
3871
3872
3873
3874 if(!(sk->shutdown & RCV_SHUTDOWN) && sk->state==TCP_ESTABLISHED && !th->urg && !th->syn && !th->fin && !th->rst)
3875 {
3876
3877 if(th->seq == sk->acked_seq+1 && sk->window && tcp_clean_end(sk))
3878 {
3879
3880 if(th->ack && !tcp_ack(sk, th, saddr, len))
3881 {
3882 kfree_skb(skb, FREE_READ);
3883 release_sock(sk);
3884 return 0;
3885 }
3886
3887
3888
3889 skb->len -= (th->doff *4);
3890 sk->bytes_rcv += skb->len;
3891 tcp_rx_hit2++;
3892 if(skb->len)
3893 {
3894 skb_queue_tail(&sk->receive_queue,skb);
3895 if(sk->window >= skb->len)
3896 sk->window-=skb->len;
3897 else
3898 sk->window=0;
3899 sk->acked_seq = th->seq+skb->len;
3900 skb->acked=1;
3901 if(!sk->delay_acks || sk->ack_backlog >= sk->max_ack_backlog ||
3902 sk->bytes_rcv > sk->max_unacked)
3903 {
3904 tcp_send_ack(sk->sent_seq, sk->acked_seq, sk, th , saddr);
3905 }
3906 else
3907 {
3908 sk->ack_backlog++;
3909 reset_timer(sk, TIME_WRITE, TCP_ACK_TIME);
3910 }
3911 if(!sk->dead)
3912 sk->data_ready(sk,0);
3913 release_sock(sk);
3914 return 0;
3915 }
3916 }
3917
3918
3919
3920 tcp_rx_hit1++;
3921 if(!tcp_sequence(sk, th, len, opt, saddr, dev))
3922 {
3923 kfree_skb(skb, FREE_READ);
3924 release_sock(sk);
3925 return 0;
3926 }
3927 if(th->ack && !tcp_ack(sk, th, saddr, len))
3928 {
3929 kfree_skb(skb, FREE_READ);
3930 release_sock(sk);
3931 return 0;
3932 }
3933 if(tcp_data(skb, sk, saddr, len))
3934 kfree_skb(skb, FREE_READ);
3935 release_sock(sk);
3936 return 0;
3937 }
3938 tcp_rx_miss++;
3939 #endif
3940
3941
3942
3943
3944
3945 switch(sk->state)
3946 {
3947
3948
3949
3950
3951
3952 case TCP_LAST_ACK:
3953 if (th->rst)
3954 {
3955 sk->zapped=1;
3956 sk->err = ECONNRESET;
3957 tcp_set_state(sk,TCP_CLOSE);
3958 sk->shutdown = SHUTDOWN_MASK;
3959 if (!sk->dead)
3960 {
3961 sk->state_change(sk);
3962 }
3963 kfree_skb(skb, FREE_READ);
3964 release_sock(sk);
3965 return(0);
3966 }
3967
3968 case TCP_ESTABLISHED:
3969 case TCP_CLOSE_WAIT:
3970 case TCP_CLOSING:
3971 case TCP_FIN_WAIT1:
3972 case TCP_FIN_WAIT2:
3973 case TCP_TIME_WAIT:
3974 if (!tcp_sequence(sk, th, len, opt, saddr,dev))
3975 {
3976 kfree_skb(skb, FREE_READ);
3977 release_sock(sk);
3978 return(0);
3979 }
3980
3981 if (th->rst)
3982 {
3983 tcp_statistics.TcpEstabResets++;
3984 sk->zapped=1;
3985
3986 sk->err = ECONNRESET;
3987 if (sk->state == TCP_CLOSE_WAIT)
3988 {
3989 sk->err = EPIPE;
3990 }
3991
3992
3993
3994
3995
3996 tcp_set_state(sk,TCP_CLOSE);
3997 sk->shutdown = SHUTDOWN_MASK;
3998 if (!sk->dead)
3999 {
4000 sk->state_change(sk);
4001 }
4002 kfree_skb(skb, FREE_READ);
4003 release_sock(sk);
4004 return(0);
4005 }
4006 if (th->syn)
4007 {
4008 tcp_statistics.TcpEstabResets++;
4009 sk->err = ECONNRESET;
4010 tcp_set_state(sk,TCP_CLOSE);
4011 sk->shutdown = SHUTDOWN_MASK;
4012 tcp_reset(daddr, saddr, th, sk->prot, opt,dev, sk->ip_tos,sk->ip_ttl);
4013 if (!sk->dead) {
4014 sk->state_change(sk);
4015 }
4016 kfree_skb(skb, FREE_READ);
4017 release_sock(sk);
4018 return(0);
4019 }
4020
4021 if (th->ack && !tcp_ack(sk, th, saddr, len)) {
4022 kfree_skb(skb, FREE_READ);
4023 release_sock(sk);
4024 return(0);
4025 }
4026
4027 if (tcp_urg(sk, th, saddr, len)) {
4028 kfree_skb(skb, FREE_READ);
4029 release_sock(sk);
4030 return(0);
4031 }
4032
4033
4034 if (tcp_data(skb, sk, saddr, len)) {
4035 kfree_skb(skb, FREE_READ);
4036 release_sock(sk);
4037 return(0);
4038 }
4039
4040 if (th->fin && tcp_fin(skb, sk, th, saddr, dev)) {
4041 kfree_skb(skb, FREE_READ);
4042 release_sock(sk);
4043 return(0);
4044 }
4045
4046 release_sock(sk);
4047 return(0);
4048
4049 case TCP_CLOSE:
4050 if (sk->dead || sk->daddr) {
4051 kfree_skb(skb, FREE_READ);
4052 release_sock(sk);
4053 return(0);
4054 }
4055
4056 if (!th->rst) {
4057 if (!th->ack)
4058 th->ack_seq = 0;
4059 tcp_reset(daddr, saddr, th, sk->prot, opt,dev,sk->ip_tos,sk->ip_ttl);
4060 }
4061 kfree_skb(skb, FREE_READ);
4062 release_sock(sk);
4063 return(0);
4064
4065 case TCP_LISTEN:
4066 if (th->rst) {
4067 kfree_skb(skb, FREE_READ);
4068 release_sock(sk);
4069 return(0);
4070 }
4071 if (th->ack) {
4072 tcp_reset(daddr, saddr, th, sk->prot, opt,dev,sk->ip_tos,sk->ip_ttl);
4073 kfree_skb(skb, FREE_READ);
4074 release_sock(sk);
4075 return(0);
4076 }
4077
4078 if (th->syn)
4079 {
4080
4081
4082
4083
4084
4085
4086 tcp_conn_request(sk, skb, daddr, saddr, opt, dev);
4087 release_sock(sk);
4088 return(0);
4089 }
4090
4091 kfree_skb(skb, FREE_READ);
4092 release_sock(sk);
4093 return(0);
4094
4095 case TCP_SYN_RECV:
4096 if (th->syn) {
4097
4098 kfree_skb(skb, FREE_READ);
4099 release_sock(sk);
4100 return(0);
4101 }
4102
4103
4104 default:
4105 if (!tcp_sequence(sk, th, len, opt, saddr,dev))
4106 {
4107 kfree_skb(skb, FREE_READ);
4108 release_sock(sk);
4109 return(0);
4110 }
4111
4112 case TCP_SYN_SENT:
4113 if (th->rst)
4114 {
4115 tcp_statistics.TcpAttemptFails++;
4116 sk->err = ECONNREFUSED;
4117 tcp_set_state(sk,TCP_CLOSE);
4118 sk->shutdown = SHUTDOWN_MASK;
4119 sk->zapped = 1;
4120 if (!sk->dead)
4121 {
4122 sk->state_change(sk);
4123 }
4124 kfree_skb(skb, FREE_READ);
4125 release_sock(sk);
4126 return(0);
4127 }
4128 if (!th->ack)
4129 {
4130 if (th->syn)
4131 {
4132 tcp_set_state(sk,TCP_SYN_RECV);
4133 }
4134 kfree_skb(skb, FREE_READ);
4135 release_sock(sk);
4136 return(0);
4137 }
4138
4139 switch(sk->state)
4140 {
4141 case TCP_SYN_SENT:
4142 if (!tcp_ack(sk, th, saddr, len))
4143 {
4144 tcp_statistics.TcpAttemptFails++;
4145 tcp_reset(daddr, saddr, th,
4146 sk->prot, opt,dev,sk->ip_tos,sk->ip_ttl);
4147 kfree_skb(skb, FREE_READ);
4148 release_sock(sk);
4149 return(0);
4150 }
4151
4152
4153
4154
4155
4156 if (!th->syn)
4157 {
4158 kfree_skb(skb, FREE_READ);
4159 release_sock(sk);
4160 return(0);
4161 }
4162
4163
4164 sk->acked_seq = th->seq+1;
4165 sk->fin_seq = th->seq;
4166 tcp_send_ack(sk->sent_seq, th->seq+1,
4167 sk, th, sk->daddr);
4168
4169 case TCP_SYN_RECV:
4170 if (!tcp_ack(sk, th, saddr, len))
4171 {
4172 tcp_statistics.TcpAttemptFails++;
4173 tcp_reset(daddr, saddr, th,
4174 sk->prot, opt, dev,sk->ip_tos,sk->ip_ttl);
4175 kfree_skb(skb, FREE_READ);
4176 release_sock(sk);
4177 return(0);
4178 }
4179
4180 tcp_set_state(sk,TCP_ESTABLISHED);
4181
4182
4183
4184
4185
4186
4187
4188 tcp_options(sk, th);
4189 sk->dummy_th.dest = th->source;
4190 sk->copied_seq = sk->acked_seq-1;
4191 if (!sk->dead)
4192 {
4193 sk->state_change(sk);
4194 }
4195
4196
4197
4198
4199
4200
4201
4202
4203
4204
4205 if (sk->max_window == 0)
4206 {
4207 sk->max_window = 32;
4208 sk->mss = min(sk->max_window, sk->mtu);
4209 }
4210
4211
4212
4213
4214
4215 if (th->urg)
4216 {
4217 if (tcp_urg(sk, th, saddr, len))
4218 {
4219 kfree_skb(skb, FREE_READ);
4220 release_sock(sk);
4221 return(0);
4222 }
4223 }
4224 if (tcp_data(skb, sk, saddr, len))
4225 kfree_skb(skb, FREE_READ);
4226
4227 if (th->fin)
4228 tcp_fin(skb, sk, th, saddr, dev);
4229 release_sock(sk);
4230 return(0);
4231 }
4232
4233 if (th->urg)
4234 {
4235 if (tcp_urg(sk, th, saddr, len))
4236 {
4237 kfree_skb(skb, FREE_READ);
4238 release_sock(sk);
4239 return(0);
4240 }
4241 }
4242 if (tcp_data(skb, sk, saddr, len))
4243 {
4244 kfree_skb(skb, FREE_READ);
4245 release_sock(sk);
4246 return(0);
4247 }
4248
4249 if (!th->fin)
4250 {
4251 release_sock(sk);
4252 return(0);
4253 }
4254 tcp_fin(skb, sk, th, saddr, dev);
4255 release_sock(sk);
4256 return(0);
4257 }
4258 }
4259
4260
4261
4262
4263
4264
4265
4266 static void tcp_write_wakeup(struct sock *sk)
4267 {
4268 struct sk_buff *buff;
4269 struct tcphdr *t1;
4270 struct device *dev=NULL;
4271 int tmp;
4272
4273 if (sk->zapped)
4274 return;
4275
4276
4277
4278
4279
4280
4281 if (sk->state != TCP_ESTABLISHED &&
4282 sk->state != TCP_CLOSE_WAIT &&
4283 sk->state != TCP_FIN_WAIT1 &&
4284 sk->state != TCP_LAST_ACK &&
4285 sk->state != TCP_CLOSING
4286 ) {
4287 return;
4288 }
4289
4290 buff = sk->prot->wmalloc(sk,MAX_ACK_SIZE,1, GFP_ATOMIC);
4291 if (buff == NULL)
4292 return;
4293
4294 buff->len = sizeof(struct tcphdr);
4295 buff->free = 1;
4296 buff->sk = sk;
4297 buff->localroute = sk->localroute;
4298
4299 t1 = (struct tcphdr *) buff->data;
4300
4301
4302 tmp = sk->prot->build_header(buff, sk->saddr, sk->daddr, &dev,
4303 IPPROTO_TCP, sk->opt, MAX_ACK_SIZE,sk->ip_tos,sk->ip_ttl);
4304 if (tmp < 0)
4305 {
4306 sk->prot->wfree(sk, buff->mem_addr, buff->mem_len);
4307 return;
4308 }
4309
4310 buff->len += tmp;
4311 t1 = (struct tcphdr *)((char *)t1 +tmp);
4312
4313 memcpy(t1,(void *) &sk->dummy_th, sizeof(*t1));
4314
4315
4316
4317
4318
4319 t1->seq = htonl(sk->sent_seq-1);
4320 t1->ack = 1;
4321 t1->res1= 0;
4322 t1->res2= 0;
4323 t1->rst = 0;
4324 t1->urg = 0;
4325 t1->psh = 0;
4326 t1->fin = 0;
4327 t1->syn = 0;
4328 t1->ack_seq = ntohl(sk->acked_seq);
4329 t1->window = ntohs(tcp_select_window(sk));
4330 t1->doff = sizeof(*t1)/4;
4331 tcp_send_check(t1, sk->saddr, sk->daddr, sizeof(*t1), sk);
4332
4333
4334
4335
4336 sk->prot->queue_xmit(sk, dev, buff, 1);
4337 tcp_statistics.TcpOutSegs++;
4338 }
4339
4340 void
4341 tcp_send_probe0(struct sock *sk)
4342 {
4343 if (sk->zapped)
4344 return;
4345
4346 tcp_write_wakeup(sk);
4347
4348 sk->backoff++;
4349 sk->rto = min(sk->rto << 1, 120*HZ);
4350 reset_timer (sk, TIME_PROBE0, sk->rto);
4351 sk->retransmits++;
4352 sk->prot->retransmits ++;
4353 }
4354
4355
4356
4357
4358
4359 int tcp_setsockopt(struct sock *sk, int level, int optname, char *optval, int optlen)
4360 {
4361 int val,err;
4362
4363 if(level!=SOL_TCP)
4364 return ip_setsockopt(sk,level,optname,optval,optlen);
4365
4366 if (optval == NULL)
4367 return(-EINVAL);
4368
4369 err=verify_area(VERIFY_READ, optval, sizeof(int));
4370 if(err)
4371 return err;
4372
4373 val = get_fs_long((unsigned long *)optval);
4374
4375 switch(optname)
4376 {
4377 case TCP_MAXSEG:
4378
4379
4380
4381
4382
4383 if(val<1||val>MAX_WINDOW)
4384 return -EINVAL;
4385 sk->user_mss=val;
4386 return 0;
4387 case TCP_NODELAY:
4388 sk->nonagle=(val==0)?0:1;
4389 return 0;
4390 default:
4391 return(-ENOPROTOOPT);
4392 }
4393 }
4394
4395 int tcp_getsockopt(struct sock *sk, int level, int optname, char *optval, int *optlen)
4396 {
4397 int val,err;
4398
4399 if(level!=SOL_TCP)
4400 return ip_getsockopt(sk,level,optname,optval,optlen);
4401
4402 switch(optname)
4403 {
4404 case TCP_MAXSEG:
4405 val=sk->user_mss;
4406 break;
4407 case TCP_NODELAY:
4408 val=sk->nonagle;
4409 break;
4410 default:
4411 return(-ENOPROTOOPT);
4412 }
4413 err=verify_area(VERIFY_WRITE, optlen, sizeof(int));
4414 if(err)
4415 return err;
4416 put_fs_long(sizeof(int),(unsigned long *) optlen);
4417
4418 err=verify_area(VERIFY_WRITE, optval, sizeof(int));
4419 if(err)
4420 return err;
4421 put_fs_long(val,(unsigned long *)optval);
4422
4423 return(0);
4424 }
4425
4426
4427 struct proto tcp_prot = {
4428 sock_wmalloc,
4429 sock_rmalloc,
4430 sock_wfree,
4431 sock_rfree,
4432 sock_rspace,
4433 sock_wspace,
4434 tcp_close,
4435 tcp_read,
4436 tcp_write,
4437 tcp_sendto,
4438 tcp_recvfrom,
4439 ip_build_header,
4440 tcp_connect,
4441 tcp_accept,
4442 ip_queue_xmit,
4443 tcp_retransmit,
4444 tcp_write_wakeup,
4445 tcp_read_wakeup,
4446 tcp_rcv,
4447 tcp_select,
4448 tcp_ioctl,
4449 NULL,
4450 tcp_shutdown,
4451 tcp_setsockopt,
4452 tcp_getsockopt,
4453 128,
4454 0,
4455 {NULL,},
4456 "TCP"
4457 };