This source file includes following definitions.
- min
- tcp_select_window
- tcp_time_wait
- tcp_retransmit
- tcp_err
- tcp_readable
- tcp_select
- tcp_ioctl
- tcp_check
- tcp_send_check
- tcp_send_skb
- tcp_dequeue_partial
- tcp_send_partial
- tcp_enqueue_partial
- tcp_send_ack
- tcp_build_header
- tcp_write
- tcp_sendto
- tcp_read_wakeup
- cleanup_rbuf
- tcp_read_urg
- tcp_read
- tcp_shutdown
- tcp_recvfrom
- tcp_reset
- tcp_options
- default_mask
- tcp_conn_request
- tcp_close
- tcp_write_xmit
- sort_send
- tcp_ack
- tcp_data
- tcp_check_urg
- tcp_urg
- tcp_fin
- tcp_accept
- tcp_connect
- tcp_sequence
- tcp_rcv
- tcp_write_wakeup
- tcp_send_probe0
- tcp_setsockopt
- tcp_getsockopt
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122 #include <linux/types.h>
123 #include <linux/sched.h>
124 #include <linux/mm.h>
125 #include <linux/string.h>
126 #include <linux/socket.h>
127 #include <linux/sockios.h>
128 #include <linux/termios.h>
129 #include <linux/in.h>
130 #include <linux/fcntl.h>
131 #include <linux/inet.h>
132 #include <linux/netdevice.h>
133 #include "snmp.h"
134 #include "ip.h"
135 #include "protocol.h"
136 #include "icmp.h"
137 #include "tcp.h"
138 #include <linux/skbuff.h>
139 #include "sock.h"
140 #include "route.h"
141 #include <linux/errno.h>
142 #include <linux/timer.h>
143 #include <asm/system.h>
144 #include <asm/segment.h>
145 #include <linux/mm.h>
146
147 #define SEQ_TICK 3
148 unsigned long seq_offset;
149 struct tcp_mib tcp_statistics;
150
151
152 static __inline__ int
153 min(unsigned int a, unsigned int b)
154 {
155 if (a < b) return(a);
156 return(b);
157 }
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175 int tcp_select_window(struct sock *sk)
176 {
177 int new_window = sk->prot->rspace(sk);
178
179
180
181
182
183
184
185
186
187
188 if (new_window < min(sk->mss, MAX_WINDOW/2) ||
189 new_window < sk->window)
190 return(sk->window);
191 return(new_window);
192 }
193
194
195
196 static void tcp_time_wait(struct sock *sk)
197 {
198 sk->state = TCP_TIME_WAIT;
199 sk->shutdown = SHUTDOWN_MASK;
200 if (!sk->dead)
201 sk->state_change(sk);
202 reset_timer(sk, TIME_CLOSE, TCP_TIMEWAIT_LEN);
203 }
204
205
206
207
208
209
210
211
212 static void
213 tcp_retransmit(struct sock *sk, int all)
214 {
215 if (all) {
216 ip_retransmit(sk, all);
217 return;
218 }
219
220 sk->ssthresh = sk->cong_window >> 1;
221
222 sk->cong_count = 0;
223
224 sk->cong_window = 1;
225
226
227 ip_retransmit(sk, all);
228 }
229
230
231
232
233
234
235
236
237
238
239 void
240 tcp_err(int err, unsigned char *header, unsigned long daddr,
241 unsigned long saddr, struct inet_protocol *protocol)
242 {
243 struct tcphdr *th;
244 struct sock *sk;
245 struct iphdr *iph=(struct iphdr *)header;
246
247 header+=4*iph->ihl;
248
249
250 th =(struct tcphdr *)header;
251 sk = get_sock(&tcp_prot, th->source, daddr, th->dest, saddr);
252
253 if (sk == NULL) return;
254
255 if(err<0)
256 {
257 sk->err = -err;
258 sk->error_report(sk);
259 return;
260 }
261
262 if ((err & 0xff00) == (ICMP_SOURCE_QUENCH << 8)) {
263
264
265
266
267
268 if (sk->cong_window > 4) sk->cong_window--;
269 return;
270 }
271
272 sk->err = icmp_err_convert[err & 0xff].errno;
273
274
275
276
277
278 if (icmp_err_convert[err & 0xff].fatal) {
279 if (sk->state == TCP_SYN_SENT) {
280 tcp_statistics.TcpAttemptFails++;
281 sk->state = TCP_CLOSE;
282 sk->error_report(sk);
283 }
284 }
285 return;
286 }
287
288
289
290
291
292
293
294 static int
295 tcp_readable(struct sock *sk)
296 {
297 unsigned long counted;
298 unsigned long amount;
299 struct sk_buff *skb;
300 int sum;
301 unsigned long flags;
302
303 if(sk && sk->debug)
304 printk("tcp_readable: %p - ",sk);
305
306 save_flags(flags);
307 cli();
308 if (sk == NULL || (skb = skb_peek(&sk->receive_queue)) == NULL)
309 {
310 restore_flags(flags);
311 if(sk && sk->debug)
312 printk("empty\n");
313 return(0);
314 }
315
316 counted = sk->copied_seq+1;
317 amount = 0;
318
319
320 do {
321 if (before(counted, skb->h.th->seq))
322 break;
323 sum = skb->len -(counted - skb->h.th->seq);
324 if (skb->h.th->syn)
325 sum++;
326 if (sum >= 0) {
327 amount += sum;
328 if (skb->h.th->syn) amount--;
329 counted += sum;
330 }
331 if (amount && skb->h.th->psh) break;
332 skb = skb->next;
333 } while(skb != (struct sk_buff *)&sk->receive_queue);
334 if (amount && !sk->urginline && sk->urg_data &&
335 (sk->urg_seq - sk->copied_seq) <= (counted - sk->copied_seq))
336 amount--;
337 restore_flags(flags);
338 if(sk->debug)
339 printk("got %lu bytes.\n",amount);
340 return(amount);
341 }
342
343
344
345
346
347
348
349 static int
350 tcp_select(struct sock *sk, int sel_type, select_table *wait)
351 {
352 sk->inuse = 1;
353 switch(sel_type) {
354 case SEL_IN:
355 if(sk->debug)
356 printk("select in");
357 select_wait(sk->sleep, wait);
358 if(sk->debug)
359 printk("-select out");
360 if (skb_peek(&sk->receive_queue) != NULL) {
361 if (sk->state == TCP_LISTEN || tcp_readable(sk)) {
362 release_sock(sk);
363 if(sk->debug)
364 printk("-select ok data\n");
365 return(1);
366 }
367 }
368 if (sk->err != 0)
369 {
370 release_sock(sk);
371 if(sk->debug)
372 printk("-select ok error");
373 return(1);
374 }
375 if (sk->shutdown & RCV_SHUTDOWN) {
376 release_sock(sk);
377 if(sk->debug)
378 printk("-select ok down\n");
379 return(1);
380 } else {
381 release_sock(sk);
382 if(sk->debug)
383 printk("-select fail\n");
384 return(0);
385 }
386 case SEL_OUT:
387 select_wait(sk->sleep, wait);
388 if (sk->shutdown & SEND_SHUTDOWN) {
389
390 release_sock(sk);
391 return(0);
392 }
393
394
395
396
397
398
399 if (sk->prot->wspace(sk) >= sk->mss) {
400 release_sock(sk);
401
402 if (sk->state == TCP_SYN_RECV ||
403 sk->state == TCP_SYN_SENT) return(0);
404 return(1);
405 }
406 release_sock(sk);
407 return(0);
408 case SEL_EX:
409 select_wait(sk->sleep,wait);
410 if (sk->err || sk->urg_data) {
411 release_sock(sk);
412 return(1);
413 }
414 release_sock(sk);
415 return(0);
416 }
417
418 release_sock(sk);
419 return(0);
420 }
421
422
423 int
424 tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
425 {
426 int err;
427 switch(cmd) {
428
429 case TIOCINQ:
430 #ifdef FIXME
431 case FIONREAD:
432 #endif
433 {
434 unsigned long amount;
435
436 if (sk->state == TCP_LISTEN) return(-EINVAL);
437
438 sk->inuse = 1;
439 amount = tcp_readable(sk);
440 release_sock(sk);
441 err=verify_area(VERIFY_WRITE,(void *)arg,
442 sizeof(unsigned long));
443 if(err)
444 return err;
445 put_fs_long(amount,(unsigned long *)arg);
446 return(0);
447 }
448 case SIOCATMARK:
449 {
450 int answ = sk->urg_data && sk->urg_seq == sk->copied_seq+1;
451
452 err = verify_area(VERIFY_WRITE,(void *) arg,
453 sizeof(unsigned long));
454 if (err)
455 return err;
456 put_fs_long(answ,(int *) arg);
457 return(0);
458 }
459 case TIOCOUTQ:
460 {
461 unsigned long amount;
462
463 if (sk->state == TCP_LISTEN) return(-EINVAL);
464 amount = sk->prot->wspace(sk);
465 err=verify_area(VERIFY_WRITE,(void *)arg,
466 sizeof(unsigned long));
467 if(err)
468 return err;
469 put_fs_long(amount,(unsigned long *)arg);
470 return(0);
471 }
472 default:
473 return(-EINVAL);
474 }
475 }
476
477
478
479 unsigned short
480 tcp_check(struct tcphdr *th, int len,
481 unsigned long saddr, unsigned long daddr)
482 {
483 unsigned long sum;
484
485 if (saddr == 0) saddr = ip_my_addr();
486 __asm__("\t addl %%ecx,%%ebx\n"
487 "\t adcl %%edx,%%ebx\n"
488 "\t adcl $0, %%ebx\n"
489 : "=b"(sum)
490 : "0"(daddr), "c"(saddr), "d"((ntohs(len) << 16) + IPPROTO_TCP*256)
491 : "cx","bx","dx" );
492
493 if (len > 3) {
494 __asm__("\tclc\n"
495 "1:\n"
496 "\t lodsl\n"
497 "\t adcl %%eax, %%ebx\n"
498 "\t loop 1b\n"
499 "\t adcl $0, %%ebx\n"
500 : "=b"(sum) , "=S"(th)
501 : "0"(sum), "c"(len/4) ,"1"(th)
502 : "ax", "cx", "bx", "si" );
503 }
504
505
506 __asm__("\t movl %%ebx, %%ecx\n"
507 "\t shrl $16,%%ecx\n"
508 "\t addw %%cx, %%bx\n"
509 "\t adcw $0, %%bx\n"
510 : "=b"(sum)
511 : "0"(sum)
512 : "bx", "cx");
513
514
515 if ((len & 2) != 0) {
516 __asm__("\t lodsw\n"
517 "\t addw %%ax,%%bx\n"
518 "\t adcw $0, %%bx\n"
519 : "=b"(sum), "=S"(th)
520 : "0"(sum) ,"1"(th)
521 : "si", "ax", "bx");
522 }
523
524
525 if ((len & 1) != 0) {
526 __asm__("\t lodsb\n"
527 "\t movb $0,%%ah\n"
528 "\t addw %%ax,%%bx\n"
529 "\t adcw $0, %%bx\n"
530 : "=b"(sum)
531 : "0"(sum) ,"S"(th)
532 : "si", "ax", "bx");
533 }
534
535
536 return((~sum) & 0xffff);
537 }
538
539
540 void tcp_send_check(struct tcphdr *th, unsigned long saddr,
541 unsigned long daddr, int len, struct sock *sk)
542 {
543 th->check = 0;
544 th->check = tcp_check(th, len, saddr, daddr);
545 return;
546 }
547
548 static void tcp_send_skb(struct sock *sk, struct sk_buff *skb)
549 {
550 int size;
551 struct tcphdr * th = skb->h.th;
552
553
554 size = skb->len - ((unsigned char *) th - skb->data);
555
556
557 if (size < sizeof(struct tcphdr) || size > skb->len) {
558 printk("tcp_send_skb: bad skb (skb = %p, data = %p, th = %p, len = %lu)\n",
559 skb, skb->data, th, skb->len);
560 kfree_skb(skb, FREE_WRITE);
561 return;
562 }
563
564
565 if (size == sizeof(struct tcphdr)) {
566
567 if(!th->syn && !th->fin) {
568 printk("tcp_send_skb: attempt to queue a bogon.\n");
569 kfree_skb(skb,FREE_WRITE);
570 return;
571 }
572 }
573
574 tcp_statistics.TcpOutSegs++;
575
576 skb->h.seq = ntohl(th->seq) + size - 4*th->doff;
577 if (after(skb->h.seq, sk->window_seq) ||
578 (sk->retransmits && sk->timeout == TIME_WRITE) ||
579 sk->packets_out >= sk->cong_window) {
580
581
582 th->check = 0;
583 if (skb->next != NULL) {
584 printk("tcp_send_partial: next != NULL\n");
585 skb_unlink(skb);
586 }
587 skb_queue_tail(&sk->write_queue, skb);
588 if (before(sk->window_seq, sk->write_queue.next->h.seq) &&
589 sk->send_head == NULL &&
590 sk->ack_backlog == 0)
591 reset_timer(sk, TIME_PROBE0, sk->rto);
592 } else {
593 th->ack_seq = ntohl(sk->acked_seq);
594 th->window = ntohs(tcp_select_window(sk));
595
596 tcp_send_check(th, sk->saddr, sk->daddr, size, sk);
597
598 sk->sent_seq = sk->write_seq;
599 sk->prot->queue_xmit(sk, skb->dev, skb, 0);
600 }
601 }
602
603 struct sk_buff * tcp_dequeue_partial(struct sock * sk)
604 {
605 struct sk_buff * skb;
606 unsigned long flags;
607
608 save_flags(flags);
609 cli();
610 skb = sk->partial;
611 if (skb) {
612 sk->partial = NULL;
613 del_timer(&sk->partial_timer);
614 }
615 restore_flags(flags);
616 return skb;
617 }
618
619 static void tcp_send_partial(struct sock *sk)
620 {
621 struct sk_buff *skb;
622
623 if (sk == NULL)
624 return;
625 while ((skb = tcp_dequeue_partial(sk)) != NULL)
626 tcp_send_skb(sk, skb);
627 }
628
629 void tcp_enqueue_partial(struct sk_buff * skb, struct sock * sk)
630 {
631 struct sk_buff * tmp;
632 unsigned long flags;
633
634 save_flags(flags);
635 cli();
636 tmp = sk->partial;
637 if (tmp)
638 del_timer(&sk->partial_timer);
639 sk->partial = skb;
640 sk->partial_timer.expires = HZ;
641 sk->partial_timer.function = (void (*)(unsigned long)) tcp_send_partial;
642 sk->partial_timer.data = (unsigned long) sk;
643 add_timer(&sk->partial_timer);
644 restore_flags(flags);
645 if (tmp)
646 tcp_send_skb(sk, tmp);
647 }
648
649
650
651 static void
652 tcp_send_ack(unsigned long sequence, unsigned long ack,
653 struct sock *sk,
654 struct tcphdr *th, unsigned long daddr)
655 {
656 struct sk_buff *buff;
657 struct tcphdr *t1;
658 struct device *dev = NULL;
659 int tmp;
660
661 if(sk->zapped)
662 return;
663
664
665
666
667
668 buff = sk->prot->wmalloc(sk, MAX_ACK_SIZE, 1, GFP_ATOMIC);
669 if (buff == NULL)
670 {
671
672 sk->ack_backlog++;
673 if (sk->timeout != TIME_WRITE && tcp_connected(sk->state))
674 {
675 reset_timer(sk, TIME_WRITE, 10);
676 }
677 return;
678 }
679
680 buff->len = sizeof(struct tcphdr);
681 buff->sk = sk;
682 buff->localroute = sk->localroute;
683 t1 =(struct tcphdr *) buff->data;
684
685
686 tmp = sk->prot->build_header(buff, sk->saddr, daddr, &dev,
687 IPPROTO_TCP, sk->opt, MAX_ACK_SIZE,sk->ip_tos,sk->ip_ttl);
688 if (tmp < 0)
689 {
690 buff->free=1;
691 sk->prot->wfree(sk, buff->mem_addr, buff->mem_len);
692 return;
693 }
694 buff->len += tmp;
695 t1 =(struct tcphdr *)((char *)t1 +tmp);
696
697
698 memcpy(t1, th, sizeof(*t1));
699
700
701
702
703 t1->dest = th->source;
704 t1->source = th->dest;
705 t1->seq = ntohl(sequence);
706 t1->ack = 1;
707 sk->window = tcp_select_window(sk);
708 t1->window = ntohs(sk->window);
709 t1->res1 = 0;
710 t1->res2 = 0;
711 t1->rst = 0;
712 t1->urg = 0;
713 t1->syn = 0;
714 t1->psh = 0;
715 t1->fin = 0;
716 if (ack == sk->acked_seq)
717 {
718 sk->ack_backlog = 0;
719 sk->bytes_rcv = 0;
720 sk->ack_timed = 0;
721 if (sk->send_head == NULL && skb_peek(&sk->write_queue) == NULL
722 && sk->timeout == TIME_WRITE)
723 {
724 if(sk->keepopen)
725 reset_timer(sk,TIME_KEEPOPEN,TCP_TIMEOUT_LEN);
726 else
727 delete_timer(sk);
728 }
729 }
730 t1->ack_seq = ntohl(ack);
731 t1->doff = sizeof(*t1)/4;
732 tcp_send_check(t1, sk->saddr, daddr, sizeof(*t1), sk);
733 if (sk->debug)
734 printk("\rtcp_ack: seq %lx ack %lx\n", sequence, ack);
735 tcp_statistics.TcpOutSegs++;
736 sk->prot->queue_xmit(sk, dev, buff, 1);
737 }
738
739
740
741 static int
742 tcp_build_header(struct tcphdr *th, struct sock *sk, int push)
743 {
744
745
746 memcpy(th,(void *) &(sk->dummy_th), sizeof(*th));
747 th->seq = htonl(sk->write_seq);
748 th->psh =(push == 0) ? 1 : 0;
749 th->doff = sizeof(*th)/4;
750 th->ack = 1;
751 th->fin = 0;
752 sk->ack_backlog = 0;
753 sk->bytes_rcv = 0;
754 sk->ack_timed = 0;
755 th->ack_seq = htonl(sk->acked_seq);
756 sk->window = tcp_select_window(sk);
757 th->window = htons(sk->window);
758
759 return(sizeof(*th));
760 }
761
762
763
764
765
766 static int tcp_write(struct sock *sk, unsigned char *from,
767 int len, int nonblock, unsigned flags)
768 {
769 int copied = 0;
770 int copy;
771 int tmp;
772 struct sk_buff *skb;
773 struct sk_buff *send_tmp;
774 unsigned char *buff;
775 struct proto *prot;
776 struct device *dev = NULL;
777
778 sk->inuse=1;
779 prot = sk->prot;
780 while(len > 0)
781 {
782 if (sk->err)
783 {
784 release_sock(sk);
785 if (copied)
786 return(copied);
787 tmp = -sk->err;
788 sk->err = 0;
789 return(tmp);
790 }
791
792
793
794
795
796 if (sk->shutdown & SEND_SHUTDOWN)
797 {
798 release_sock(sk);
799 sk->err = EPIPE;
800 if (copied)
801 return(copied);
802 sk->err = 0;
803 return(-EPIPE);
804 }
805
806
807
808
809
810
811 while(sk->state != TCP_ESTABLISHED && sk->state != TCP_CLOSE_WAIT)
812 {
813 if (sk->err)
814 {
815 release_sock(sk);
816 if (copied)
817 return(copied);
818 tmp = -sk->err;
819 sk->err = 0;
820 return(tmp);
821 }
822
823 if (sk->state != TCP_SYN_SENT && sk->state != TCP_SYN_RECV)
824 {
825 release_sock(sk);
826 if (copied)
827 return(copied);
828
829 if (sk->err)
830 {
831 tmp = -sk->err;
832 sk->err = 0;
833 return(tmp);
834 }
835
836 if (sk->keepopen)
837 {
838 send_sig(SIGPIPE, current, 0);
839 }
840 return(-EPIPE);
841 }
842
843 if (nonblock || copied)
844 {
845 release_sock(sk);
846 if (copied)
847 return(copied);
848 return(-EAGAIN);
849 }
850
851 release_sock(sk);
852 cli();
853
854 if (sk->state != TCP_ESTABLISHED &&
855 sk->state != TCP_CLOSE_WAIT && sk->err == 0)
856 {
857 interruptible_sleep_on(sk->sleep);
858 if (current->signal & ~current->blocked)
859 {
860 sti();
861 if (copied)
862 return(copied);
863 return(-ERESTARTSYS);
864 }
865 }
866 sk->inuse = 1;
867 sti();
868 }
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886 if ((skb = tcp_dequeue_partial(sk)) != NULL)
887 {
888 int hdrlen;
889
890
891 hdrlen = ((unsigned long)skb->h.th - (unsigned long)skb->data)
892 + sizeof(struct tcphdr);
893
894
895 if (!(flags & MSG_OOB))
896 {
897 copy = min(sk->mss - (skb->len - hdrlen), len);
898
899 if (copy <= 0)
900 {
901 printk("TCP: **bug**: \"copy\" <= 0!!\n");
902 copy = 0;
903 }
904
905 memcpy_fromfs(skb->data + skb->len, from, copy);
906 skb->len += copy;
907 from += copy;
908 copied += copy;
909 len -= copy;
910 sk->write_seq += copy;
911 }
912 if ((skb->len - hdrlen) >= sk->mss ||
913 (flags & MSG_OOB) || !sk->packets_out)
914 tcp_send_skb(sk, skb);
915 else
916 tcp_enqueue_partial(skb, sk);
917 continue;
918 }
919
920
921
922
923
924
925
926
927
928
929
930
931
932 copy = sk->window_seq - sk->write_seq;
933 if (copy <= 0 || copy < (sk->max_window >> 1) || copy > sk->mss)
934 copy = sk->mss;
935 if (copy > len)
936 copy = len;
937
938
939
940
941
942 send_tmp = NULL;
943 if (copy < sk->mss && !(flags & MSG_OOB))
944 {
945
946
947
948 release_sock(sk);
949
950
951
952
953 skb = prot->wmalloc(sk, sk->mtu + 128 + prot->max_header, 0, GFP_KERNEL);
954 sk->inuse = 1;
955 send_tmp = skb;
956 }
957 else
958 {
959
960
961
962 release_sock(sk);
963 skb = prot->wmalloc(sk, copy + prot->max_header , 0, GFP_KERNEL);
964 sk->inuse = 1;
965 }
966
967
968
969
970
971 if (skb == NULL)
972 {
973 if (nonblock )
974 {
975 release_sock(sk);
976 if (copied)
977 return(copied);
978 return(-EAGAIN);
979 }
980
981
982
983
984
985 tmp = sk->wmem_alloc;
986 release_sock(sk);
987 cli();
988
989
990
991 if (tmp <= sk->wmem_alloc &&
992 (sk->state == TCP_ESTABLISHED||sk->state == TCP_CLOSE_WAIT)
993 && sk->err == 0)
994 {
995 interruptible_sleep_on(sk->sleep);
996 if (current->signal & ~current->blocked)
997 {
998 sti();
999 if (copied)
1000 return(copied);
1001 return(-ERESTARTSYS);
1002 }
1003 }
1004 sk->inuse = 1;
1005 sti();
1006 continue;
1007 }
1008
1009 skb->len = 0;
1010 skb->sk = sk;
1011 skb->free = 0;
1012 skb->localroute = sk->localroute|(flags&MSG_DONTROUTE);
1013
1014 buff = skb->data;
1015
1016
1017
1018
1019
1020
1021 tmp = prot->build_header(skb, sk->saddr, sk->daddr, &dev,
1022 IPPROTO_TCP, sk->opt, skb->mem_len,sk->ip_tos,sk->ip_ttl);
1023 if (tmp < 0 )
1024 {
1025 prot->wfree(sk, skb->mem_addr, skb->mem_len);
1026 release_sock(sk);
1027 if (copied)
1028 return(copied);
1029 return(tmp);
1030 }
1031 skb->len += tmp;
1032 skb->dev = dev;
1033 buff += tmp;
1034 skb->h.th =(struct tcphdr *) buff;
1035 tmp = tcp_build_header((struct tcphdr *)buff, sk, len-copy);
1036 if (tmp < 0)
1037 {
1038 prot->wfree(sk, skb->mem_addr, skb->mem_len);
1039 release_sock(sk);
1040 if (copied)
1041 return(copied);
1042 return(tmp);
1043 }
1044
1045 if (flags & MSG_OOB)
1046 {
1047 ((struct tcphdr *)buff)->urg = 1;
1048 ((struct tcphdr *)buff)->urg_ptr = ntohs(copy);
1049 }
1050 skb->len += tmp;
1051 memcpy_fromfs(buff+tmp, from, copy);
1052
1053 from += copy;
1054 copied += copy;
1055 len -= copy;
1056 skb->len += copy;
1057 skb->free = 0;
1058 sk->write_seq += copy;
1059
1060 if (send_tmp != NULL && sk->packets_out)
1061 {
1062 tcp_enqueue_partial(send_tmp, sk);
1063 continue;
1064 }
1065 tcp_send_skb(sk, skb);
1066 }
1067 sk->err = 0;
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080 if(sk->partial && ((!sk->packets_out)
1081
1082 || (sk->nonagle && before(sk->write_seq , sk->window_seq))
1083 ))
1084 tcp_send_partial(sk);
1085
1086 release_sock(sk);
1087 return(copied);
1088 }
1089
1090
1091 static int tcp_sendto(struct sock *sk, unsigned char *from,
1092 int len, int nonblock, unsigned flags,
1093 struct sockaddr_in *addr, int addr_len)
1094 {
1095 struct sockaddr_in sin;
1096
1097 if (flags & ~(MSG_OOB|MSG_DONTROUTE))
1098 return -EINVAL;
1099 if (addr_len < sizeof(sin))
1100 return(-EINVAL);
1101 memcpy_fromfs(&sin, addr, sizeof(sin));
1102 if (sin.sin_family && sin.sin_family != AF_INET)
1103 return(-EINVAL);
1104 if (sin.sin_port != sk->dummy_th.dest)
1105 return(-EINVAL);
1106 if (sin.sin_addr.s_addr != sk->daddr)
1107 return(-EINVAL);
1108 return(tcp_write(sk, from, len, nonblock, flags));
1109 }
1110
1111
1112 static void
1113 tcp_read_wakeup(struct sock *sk)
1114 {
1115 int tmp;
1116 struct device *dev = NULL;
1117 struct tcphdr *t1;
1118 struct sk_buff *buff;
1119
1120 if (!sk->ack_backlog)
1121 return;
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134 buff = sk->prot->wmalloc(sk,MAX_ACK_SIZE,1, GFP_ATOMIC);
1135 if (buff == NULL)
1136 {
1137
1138 reset_timer(sk, TIME_WRITE, 10);
1139 return;
1140 }
1141
1142 buff->len = sizeof(struct tcphdr);
1143 buff->sk = sk;
1144 buff->localroute = sk->localroute;
1145
1146
1147
1148
1149
1150 tmp = sk->prot->build_header(buff, sk->saddr, sk->daddr, &dev,
1151 IPPROTO_TCP, sk->opt, MAX_ACK_SIZE,sk->ip_tos,sk->ip_ttl);
1152 if (tmp < 0)
1153 {
1154 buff->free=1;
1155 sk->prot->wfree(sk, buff->mem_addr, buff->mem_len);
1156 return;
1157 }
1158
1159 buff->len += tmp;
1160 t1 =(struct tcphdr *)(buff->data +tmp);
1161
1162 memcpy(t1,(void *) &sk->dummy_th, sizeof(*t1));
1163 t1->seq = htonl(sk->sent_seq);
1164 t1->ack = 1;
1165 t1->res1 = 0;
1166 t1->res2 = 0;
1167 t1->rst = 0;
1168 t1->urg = 0;
1169 t1->syn = 0;
1170 t1->psh = 0;
1171 sk->ack_backlog = 0;
1172 sk->bytes_rcv = 0;
1173 sk->window = tcp_select_window(sk);
1174 t1->window = ntohs(sk->window);
1175 t1->ack_seq = ntohl(sk->acked_seq);
1176 t1->doff = sizeof(*t1)/4;
1177 tcp_send_check(t1, sk->saddr, sk->daddr, sizeof(*t1), sk);
1178 sk->prot->queue_xmit(sk, dev, buff, 1);
1179 tcp_statistics.TcpOutSegs++;
1180 }
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190 static void cleanup_rbuf(struct sock *sk)
1191 {
1192 unsigned long flags;
1193 int left;
1194 struct sk_buff *skb;
1195
1196 if(sk->debug)
1197 printk("cleaning rbuf for sk=%p\n", sk);
1198
1199 save_flags(flags);
1200 cli();
1201
1202 left = sk->prot->rspace(sk);
1203
1204
1205
1206
1207
1208
1209 while((skb=skb_peek(&sk->receive_queue)) != NULL)
1210 {
1211 if (!skb->used)
1212 break;
1213 skb_unlink(skb);
1214 skb->sk = sk;
1215 kfree_skb(skb, FREE_READ);
1216 }
1217
1218 restore_flags(flags);
1219
1220
1221
1222
1223
1224
1225
1226
1227 if(sk->debug)
1228 printk("sk->rspace = %lu, was %d\n", sk->prot->rspace(sk),
1229 left);
1230 if (sk->prot->rspace(sk) != left)
1231 {
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242 sk->ack_backlog++;
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252 if ((sk->prot->rspace(sk) > (sk->window - sk->bytes_rcv + sk->mtu)))
1253 {
1254
1255 tcp_read_wakeup(sk);
1256 }
1257 else
1258 {
1259
1260 int was_active = del_timer(&sk->timer);
1261 if (!was_active || TCP_ACK_TIME < sk->timer.expires)
1262 {
1263 reset_timer(sk, TIME_WRITE, TCP_ACK_TIME);
1264 }
1265 else
1266 add_timer(&sk->timer);
1267 }
1268 }
1269 }
1270
1271
1272
1273 static int
1274 tcp_read_urg(struct sock * sk, int nonblock,
1275 unsigned char *to, int len, unsigned flags)
1276 {
1277 struct wait_queue wait = { current, NULL };
1278
1279 while (len > 0) {
1280 if (sk->urginline || !sk->urg_data || sk->urg_data == URG_READ)
1281 return -EINVAL;
1282 if (sk->urg_data & URG_VALID) {
1283 char c = sk->urg_data;
1284 if (!(flags & MSG_PEEK))
1285 sk->urg_data = URG_READ;
1286 put_fs_byte(c, to);
1287 return 1;
1288 }
1289
1290 if (sk->err) {
1291 int tmp = -sk->err;
1292 sk->err = 0;
1293 return tmp;
1294 }
1295
1296 if (sk->state == TCP_CLOSE || sk->done) {
1297 if (!sk->done) {
1298 sk->done = 1;
1299 return 0;
1300 }
1301 return -ENOTCONN;
1302 }
1303
1304 if (sk->shutdown & RCV_SHUTDOWN) {
1305 sk->done = 1;
1306 return 0;
1307 }
1308
1309 if (nonblock)
1310 return -EAGAIN;
1311
1312 if (current->signal & ~current->blocked)
1313 return -ERESTARTSYS;
1314
1315 current->state = TASK_INTERRUPTIBLE;
1316 add_wait_queue(sk->sleep, &wait);
1317 if ((sk->urg_data & URG_NOTYET) && sk->err == 0 &&
1318 !(sk->shutdown & RCV_SHUTDOWN))
1319 schedule();
1320 remove_wait_queue(sk->sleep, &wait);
1321 current->state = TASK_RUNNING;
1322 }
1323 return 0;
1324 }
1325
1326
1327
1328 static int tcp_read(struct sock *sk, unsigned char *to,
1329 int len, int nonblock, unsigned flags)
1330 {
1331 struct wait_queue wait = { current, NULL };
1332 int copied = 0;
1333 unsigned long peek_seq;
1334 unsigned long *seq;
1335 unsigned long used;
1336
1337
1338 if (sk->state == TCP_LISTEN)
1339 return -ENOTCONN;
1340
1341
1342 if (flags & MSG_OOB)
1343 return tcp_read_urg(sk, nonblock, to, len, flags);
1344
1345 peek_seq = sk->copied_seq;
1346 seq = &sk->copied_seq;
1347 if (flags & MSG_PEEK)
1348 seq = &peek_seq;
1349
1350 add_wait_queue(sk->sleep, &wait);
1351 sk->inuse = 1;
1352 while (len > 0) {
1353 struct sk_buff * skb;
1354 unsigned long offset;
1355
1356
1357
1358
1359 if (copied && sk->urg_data && sk->urg_seq == 1+*seq)
1360 break;
1361
1362 current->state = TASK_INTERRUPTIBLE;
1363
1364 skb = skb_peek(&sk->receive_queue);
1365 do {
1366 if (!skb)
1367 break;
1368 if (before(1+*seq, skb->h.th->seq))
1369 break;
1370 offset = 1 + *seq - skb->h.th->seq;
1371 if (skb->h.th->syn)
1372 offset--;
1373 if (offset < skb->len)
1374 goto found_ok_skb;
1375 if (!(flags & MSG_PEEK))
1376 skb->used = 1;
1377 skb = skb->next;
1378 } while (skb != (struct sk_buff *)&sk->receive_queue);
1379
1380 if (copied)
1381 break;
1382
1383 if (sk->err) {
1384 copied = -sk->err;
1385 sk->err = 0;
1386 break;
1387 }
1388
1389 if (sk->state == TCP_CLOSE) {
1390 if (!sk->done) {
1391 sk->done = 1;
1392 break;
1393 }
1394 copied = -ENOTCONN;
1395 break;
1396 }
1397
1398 if (sk->shutdown & RCV_SHUTDOWN) {
1399 sk->done = 1;
1400 break;
1401 }
1402
1403 if (nonblock) {
1404 copied = -EAGAIN;
1405 break;
1406 }
1407
1408 cleanup_rbuf(sk);
1409 release_sock(sk);
1410 schedule();
1411 sk->inuse = 1;
1412
1413 if (current->signal & ~current->blocked) {
1414 copied = -ERESTARTSYS;
1415 break;
1416 }
1417 continue;
1418
1419 found_ok_skb:
1420
1421 used = skb->len - offset;
1422 if (len < used)
1423 used = len;
1424
1425 if (sk->urg_data) {
1426 unsigned long urg_offset = sk->urg_seq - (1 + *seq);
1427 if (urg_offset < used) {
1428 if (!urg_offset) {
1429 if (!sk->urginline) {
1430 ++*seq;
1431 offset++;
1432 used--;
1433 }
1434 } else
1435 used = urg_offset;
1436 }
1437 }
1438
1439 memcpy_tofs(to,((unsigned char *)skb->h.th) +
1440 skb->h.th->doff*4 + offset, used);
1441 copied += used;
1442 len -= used;
1443 to += used;
1444 *seq += used;
1445 if (after(sk->copied_seq+1,sk->urg_seq))
1446 sk->urg_data = 0;
1447 if (!(flags & MSG_PEEK) && (used + offset >= skb->len))
1448 skb->used = 1;
1449 }
1450 remove_wait_queue(sk->sleep, &wait);
1451 current->state = TASK_RUNNING;
1452
1453
1454 cleanup_rbuf(sk);
1455 release_sock(sk);
1456 return copied;
1457 }
1458
1459
1460
1461
1462
1463
1464 void tcp_shutdown(struct sock *sk, int how)
1465 {
1466 struct sk_buff *buff;
1467 struct tcphdr *t1, *th;
1468 struct proto *prot;
1469 int tmp;
1470 struct device *dev = NULL;
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481 if (!(how & SEND_SHUTDOWN))
1482 return;
1483
1484
1485
1486
1487
1488 if (sk->state == TCP_FIN_WAIT1 ||
1489 sk->state == TCP_FIN_WAIT2 ||
1490 sk->state == TCP_CLOSING ||
1491 sk->state == TCP_LAST_ACK ||
1492 sk->state == TCP_TIME_WAIT
1493 ) {
1494 return;
1495 }
1496 sk->inuse = 1;
1497
1498
1499
1500
1501
1502 sk->shutdown |= SEND_SHUTDOWN;
1503
1504
1505
1506
1507
1508 if (sk->partial)
1509 tcp_send_partial(sk);
1510
1511 prot =(struct proto *)sk->prot;
1512 th =(struct tcphdr *)&sk->dummy_th;
1513 release_sock(sk);
1514 buff = prot->wmalloc(sk, MAX_RESET_SIZE,1 , GFP_KERNEL);
1515 if (buff == NULL)
1516 return;
1517 sk->inuse = 1;
1518
1519 buff->sk = sk;
1520 buff->len = sizeof(*t1);
1521 buff->localroute = sk->localroute;
1522 t1 =(struct tcphdr *) buff->data;
1523
1524
1525
1526
1527
1528 tmp = prot->build_header(buff,sk->saddr, sk->daddr, &dev,
1529 IPPROTO_TCP, sk->opt,
1530 sizeof(struct tcphdr),sk->ip_tos,sk->ip_ttl);
1531 if (tmp < 0)
1532 {
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543 buff->free=1;
1544 prot->wfree(sk,buff->mem_addr, buff->mem_len);
1545
1546 if (sk->state == TCP_ESTABLISHED)
1547 sk->state = TCP_FIN_WAIT1;
1548 else if(sk->state == TCP_CLOSE_WAIT)
1549 sk->state = TCP_LAST_ACK;
1550 else
1551 sk->state = TCP_FIN_WAIT2;
1552
1553 release_sock(sk);
1554 return;
1555 }
1556
1557 t1 =(struct tcphdr *)((char *)t1 +tmp);
1558 buff->len += tmp;
1559 buff->dev = dev;
1560 memcpy(t1, th, sizeof(*t1));
1561 t1->seq = ntohl(sk->write_seq);
1562 sk->write_seq++;
1563 buff->h.seq = sk->write_seq;
1564 t1->ack = 1;
1565 t1->ack_seq = ntohl(sk->acked_seq);
1566 t1->window = ntohs(sk->window=tcp_select_window(sk));
1567 t1->fin = 1;
1568 t1->rst = 0;
1569 t1->doff = sizeof(*t1)/4;
1570 tcp_send_check(t1, sk->saddr, sk->daddr, sizeof(*t1), sk);
1571
1572
1573
1574
1575
1576
1577 if (skb_peek(&sk->write_queue) != NULL)
1578 {
1579 buff->free=0;
1580 if (buff->next != NULL)
1581 {
1582 printk("tcp_shutdown: next != NULL\n");
1583 skb_unlink(buff);
1584 }
1585 skb_queue_tail(&sk->write_queue, buff);
1586 }
1587 else
1588 {
1589 sk->sent_seq = sk->write_seq;
1590 sk->prot->queue_xmit(sk, dev, buff, 0);
1591 }
1592
1593 if (sk->state == TCP_ESTABLISHED)
1594 sk->state = TCP_FIN_WAIT1;
1595 else if (sk->state == TCP_CLOSE_WAIT)
1596 sk->state = TCP_LAST_ACK;
1597 else
1598 sk->state = TCP_FIN_WAIT2;
1599
1600 release_sock(sk);
1601 }
1602
1603
1604 static int
1605 tcp_recvfrom(struct sock *sk, unsigned char *to,
1606 int to_len, int nonblock, unsigned flags,
1607 struct sockaddr_in *addr, int *addr_len)
1608 {
1609 struct sockaddr_in sin;
1610 int len;
1611 int err;
1612 int result;
1613
1614
1615
1616
1617 err = verify_area(VERIFY_WRITE,addr_len,sizeof(long));
1618 if(err)
1619 return err;
1620 len = get_fs_long(addr_len);
1621 if(len > sizeof(sin))
1622 len = sizeof(sin);
1623 err=verify_area(VERIFY_WRITE, addr, len);
1624 if(err)
1625 return err;
1626
1627 result=tcp_read(sk, to, to_len, nonblock, flags);
1628
1629 if (result < 0) return(result);
1630
1631 sin.sin_family = AF_INET;
1632 sin.sin_port = sk->dummy_th.dest;
1633 sin.sin_addr.s_addr = sk->daddr;
1634
1635 memcpy_tofs(addr, &sin, len);
1636 put_fs_long(len, addr_len);
1637 return(result);
1638 }
1639
1640
1641
1642
1643
1644
1645 static void tcp_reset(unsigned long saddr, unsigned long daddr, struct tcphdr *th,
1646 struct proto *prot, struct options *opt, struct device *dev, int tos, int ttl)
1647 {
1648 struct sk_buff *buff;
1649 struct tcphdr *t1;
1650 int tmp;
1651 struct device *ndev=NULL;
1652
1653
1654
1655
1656
1657
1658 buff = prot->wmalloc(NULL, MAX_RESET_SIZE, 1, GFP_ATOMIC);
1659 if (buff == NULL)
1660 return;
1661
1662 buff->len = sizeof(*t1);
1663 buff->sk = NULL;
1664 buff->dev = dev;
1665 buff->localroute = 0;
1666
1667 t1 =(struct tcphdr *) buff->data;
1668
1669
1670
1671
1672
1673 tmp = prot->build_header(buff, saddr, daddr, &ndev, IPPROTO_TCP, opt,
1674 sizeof(struct tcphdr),tos,ttl);
1675 if (tmp < 0)
1676 {
1677 buff->free = 1;
1678 prot->wfree(NULL, buff->mem_addr, buff->mem_len);
1679 return;
1680 }
1681
1682 t1 =(struct tcphdr *)((char *)t1 +tmp);
1683 buff->len += tmp;
1684 memcpy(t1, th, sizeof(*t1));
1685
1686
1687
1688
1689
1690 t1->dest = th->source;
1691 t1->source = th->dest;
1692 t1->rst = 1;
1693 t1->window = 0;
1694
1695 if(th->ack)
1696 {
1697 t1->ack = 0;
1698 t1->seq = th->ack_seq;
1699 t1->ack_seq = 0;
1700 }
1701 else
1702 {
1703 t1->ack = 1;
1704 if(!th->syn)
1705 t1->ack_seq=htonl(th->seq);
1706 else
1707 t1->ack_seq=htonl(th->seq+1);
1708 t1->seq=0;
1709 }
1710
1711 t1->syn = 0;
1712 t1->urg = 0;
1713 t1->fin = 0;
1714 t1->psh = 0;
1715 t1->doff = sizeof(*t1)/4;
1716 tcp_send_check(t1, saddr, daddr, sizeof(*t1), NULL);
1717 prot->queue_xmit(NULL, dev, buff, 1);
1718 tcp_statistics.TcpOutSegs++;
1719 }
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730 static void
1731 tcp_options(struct sock *sk, struct tcphdr *th)
1732 {
1733 unsigned char *ptr;
1734 int length=(th->doff*4)-sizeof(struct tcphdr);
1735 int mss_seen = 0;
1736
1737 ptr = (unsigned char *)(th + 1);
1738
1739 while(length>0)
1740 {
1741 int opcode=*ptr++;
1742 int opsize=*ptr++;
1743 switch(opcode)
1744 {
1745 case TCPOPT_EOL:
1746 return;
1747 case TCPOPT_NOP:
1748 length-=2;
1749 continue;
1750
1751 default:
1752 if(opsize<=2)
1753 return;
1754 switch(opcode)
1755 {
1756 case TCPOPT_MSS:
1757 if(opsize==4 && th->syn)
1758 {
1759 sk->mtu=min(sk->mtu,ntohs(*(unsigned short *)ptr));
1760 mss_seen = 1;
1761 }
1762 break;
1763
1764 }
1765 ptr+=opsize-2;
1766 length-=opsize;
1767 }
1768 }
1769 if (th->syn) {
1770 if (! mss_seen)
1771 sk->mtu=min(sk->mtu, 536);
1772 }
1773 #ifdef CONFIG_INET_PCTCP
1774 sk->mss = min(sk->max_window >> 1, sk->mtu);
1775 #else
1776 sk->mss = min(sk->max_window, sk->mtu);
1777 #endif
1778 }
1779
1780 static inline unsigned long default_mask(unsigned long dst)
1781 {
1782 dst = ntohl(dst);
1783 if (IN_CLASSA(dst))
1784 return htonl(IN_CLASSA_NET);
1785 if (IN_CLASSB(dst))
1786 return htonl(IN_CLASSB_NET);
1787 return htonl(IN_CLASSC_NET);
1788 }
1789
1790
1791
1792
1793
1794
1795
1796
1797 static void
1798 tcp_conn_request(struct sock *sk, struct sk_buff *skb,
1799 unsigned long daddr, unsigned long saddr,
1800 struct options *opt, struct device *dev)
1801 {
1802 struct sk_buff *buff;
1803 struct tcphdr *t1;
1804 unsigned char *ptr;
1805 struct sock *newsk;
1806 struct tcphdr *th;
1807 struct device *ndev=NULL;
1808 int tmp;
1809 struct rtable *rt;
1810
1811 th = skb->h.th;
1812
1813
1814 if (!sk->dead) {
1815 sk->data_ready(sk,0);
1816 } else {
1817 tcp_reset(daddr, saddr, th, sk->prot, opt, dev, sk->ip_tos,sk->ip_ttl);
1818 tcp_statistics.TcpAttemptFails++;
1819 kfree_skb(skb, FREE_READ);
1820 return;
1821 }
1822
1823
1824
1825
1826
1827 if (sk->ack_backlog >= sk->max_ack_backlog) {
1828 tcp_statistics.TcpAttemptFails++;
1829 kfree_skb(skb, FREE_READ);
1830 return;
1831 }
1832
1833
1834
1835
1836
1837
1838
1839
1840 newsk = (struct sock *) kmalloc(sizeof(struct sock), GFP_ATOMIC);
1841 if (newsk == NULL) {
1842
1843 tcp_statistics.TcpAttemptFails++;
1844 kfree_skb(skb, FREE_READ);
1845 return;
1846 }
1847
1848 memcpy(newsk, sk, sizeof(*newsk));
1849 skb_queue_head_init(&newsk->write_queue);
1850 skb_queue_head_init(&newsk->receive_queue);
1851 newsk->send_head = NULL;
1852 newsk->send_tail = NULL;
1853 skb_queue_head_init(&newsk->back_log);
1854 newsk->rtt = 0;
1855 newsk->rto = TCP_TIMEOUT_INIT;
1856 newsk->mdev = 0;
1857 newsk->max_window = 0;
1858 newsk->cong_window = 1;
1859 newsk->cong_count = 0;
1860 newsk->ssthresh = 0;
1861 newsk->backoff = 0;
1862 newsk->blog = 0;
1863 newsk->intr = 0;
1864 newsk->proc = 0;
1865 newsk->done = 0;
1866 newsk->partial = NULL;
1867 newsk->pair = NULL;
1868 newsk->wmem_alloc = 0;
1869 newsk->rmem_alloc = 0;
1870 newsk->localroute = sk->localroute;
1871
1872 newsk->max_unacked = MAX_WINDOW - TCP_WINDOW_DIFF;
1873
1874 newsk->err = 0;
1875 newsk->shutdown = 0;
1876 newsk->ack_backlog = 0;
1877 newsk->acked_seq = skb->h.th->seq+1;
1878 newsk->fin_seq = skb->h.th->seq;
1879 newsk->copied_seq = skb->h.th->seq;
1880 newsk->state = TCP_SYN_RECV;
1881 newsk->timeout = 0;
1882 newsk->write_seq = jiffies * SEQ_TICK - seq_offset;
1883 newsk->window_seq = newsk->write_seq;
1884 newsk->rcv_ack_seq = newsk->write_seq;
1885 newsk->urg_data = 0;
1886 newsk->retransmits = 0;
1887 newsk->destroy = 0;
1888 newsk->timer.data = (unsigned long)newsk;
1889 newsk->timer.function = &net_timer;
1890 newsk->dummy_th.source = skb->h.th->dest;
1891 newsk->dummy_th.dest = skb->h.th->source;
1892
1893
1894 newsk->daddr = saddr;
1895 newsk->saddr = daddr;
1896
1897 put_sock(newsk->num,newsk);
1898 newsk->dummy_th.res1 = 0;
1899 newsk->dummy_th.doff = 6;
1900 newsk->dummy_th.fin = 0;
1901 newsk->dummy_th.syn = 0;
1902 newsk->dummy_th.rst = 0;
1903 newsk->dummy_th.psh = 0;
1904 newsk->dummy_th.ack = 0;
1905 newsk->dummy_th.urg = 0;
1906 newsk->dummy_th.res2 = 0;
1907 newsk->acked_seq = skb->h.th->seq + 1;
1908 newsk->copied_seq = skb->h.th->seq;
1909
1910
1911 newsk->ip_ttl=sk->ip_ttl;
1912 newsk->ip_tos=skb->ip_hdr->tos;
1913
1914
1915
1916 rt=ip_rt_route(saddr, NULL,NULL);
1917 if (sk->user_mss)
1918 newsk->mtu = sk->user_mss;
1919 else if(rt!=NULL && (rt->rt_flags&RTF_MTU))
1920 newsk->mtu = rt->rt_mtu - HEADER_SIZE;
1921 else {
1922 #ifdef CONFIG_INET_SNARL
1923 if ((saddr ^ daddr) & default_mask(saddr))
1924 #else
1925 if ((saddr ^ daddr) & dev->pa_mask)
1926 #endif
1927 newsk->mtu = 576 - HEADER_SIZE;
1928 else
1929 newsk->mtu = MAX_WINDOW;
1930 }
1931
1932 newsk->mtu = min(newsk->mtu, dev->mtu - HEADER_SIZE);
1933
1934
1935 tcp_options(newsk,skb->h.th);
1936
1937 buff = newsk->prot->wmalloc(newsk, MAX_SYN_SIZE, 1, GFP_ATOMIC);
1938 if (buff == NULL) {
1939 sk->err = -ENOMEM;
1940 newsk->dead = 1;
1941 release_sock(newsk);
1942 kfree_skb(skb, FREE_READ);
1943 tcp_statistics.TcpAttemptFails++;
1944 return;
1945 }
1946
1947 buff->len = sizeof(struct tcphdr)+4;
1948 buff->sk = newsk;
1949 buff->localroute = newsk->localroute;
1950
1951 t1 =(struct tcphdr *) buff->data;
1952
1953
1954 tmp = sk->prot->build_header(buff, newsk->saddr, newsk->daddr, &ndev,
1955 IPPROTO_TCP, NULL, MAX_SYN_SIZE,sk->ip_tos,sk->ip_ttl);
1956
1957
1958 if (tmp < 0) {
1959 sk->err = tmp;
1960 buff->free=1;
1961 kfree_skb(buff,FREE_WRITE);
1962 newsk->dead = 1;
1963 release_sock(newsk);
1964 skb->sk = sk;
1965 kfree_skb(skb, FREE_READ);
1966 tcp_statistics.TcpAttemptFails++;
1967 return;
1968 }
1969
1970 buff->len += tmp;
1971 t1 =(struct tcphdr *)((char *)t1 +tmp);
1972
1973 memcpy(t1, skb->h.th, sizeof(*t1));
1974 buff->h.seq = newsk->write_seq;
1975
1976
1977 t1->dest = skb->h.th->source;
1978 t1->source = newsk->dummy_th.source;
1979 t1->seq = ntohl(newsk->write_seq++);
1980 t1->ack = 1;
1981 newsk->window = tcp_select_window(newsk);
1982 newsk->sent_seq = newsk->write_seq;
1983 t1->window = ntohs(newsk->window);
1984 t1->res1 = 0;
1985 t1->res2 = 0;
1986 t1->rst = 0;
1987 t1->urg = 0;
1988 t1->psh = 0;
1989 t1->syn = 1;
1990 t1->ack_seq = ntohl(skb->h.th->seq+1);
1991 t1->doff = sizeof(*t1)/4+1;
1992
1993 ptr =(unsigned char *)(t1+1);
1994 ptr[0] = 2;
1995 ptr[1] = 4;
1996 ptr[2] = ((newsk->mtu) >> 8) & 0xff;
1997 ptr[3] =(newsk->mtu) & 0xff;
1998
1999 tcp_send_check(t1, daddr, saddr, sizeof(*t1)+4, newsk);
2000 newsk->prot->queue_xmit(newsk, dev, buff, 0);
2001
2002 reset_timer(newsk, TIME_WRITE , TCP_TIMEOUT_INIT);
2003 skb->sk = newsk;
2004
2005
2006 sk->rmem_alloc -= skb->mem_len;
2007 newsk->rmem_alloc += skb->mem_len;
2008
2009 skb_queue_tail(&sk->receive_queue,skb);
2010 sk->ack_backlog++;
2011 release_sock(newsk);
2012 tcp_statistics.TcpOutSegs++;
2013 }
2014
2015
2016 static void tcp_close(struct sock *sk, int timeout)
2017 {
2018 struct sk_buff *buff;
2019 int need_reset = 0;
2020 struct tcphdr *t1, *th;
2021 struct proto *prot;
2022 struct device *dev=NULL;
2023 int tmp;
2024
2025
2026
2027
2028
2029 sk->inuse = 1;
2030 sk->keepopen = 1;
2031 sk->shutdown = SHUTDOWN_MASK;
2032
2033 if (!sk->dead)
2034 sk->state_change(sk);
2035
2036
2037
2038
2039
2040 if (skb_peek(&sk->receive_queue) != NULL)
2041 {
2042 struct sk_buff *skb;
2043 if(sk->debug)
2044 printk("Clean rcv queue\n");
2045 while((skb=skb_dequeue(&sk->receive_queue))!=NULL)
2046 {
2047 if(skb->len > 0 && after(skb->h.th->seq + skb->len + 1 , sk->copied_seq))
2048 need_reset = 1;
2049 kfree_skb(skb, FREE_READ);
2050 }
2051 if(sk->debug)
2052 printk("Cleaned.\n");
2053 }
2054
2055
2056
2057
2058
2059 if (sk->partial)
2060 {
2061 tcp_send_partial(sk);
2062 }
2063
2064 switch(sk->state)
2065 {
2066 case TCP_FIN_WAIT1:
2067 case TCP_FIN_WAIT2:
2068 case TCP_CLOSING:
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079 #ifdef NOTDEF
2080
2081
2082
2083
2084
2085
2086 reset_timer(sk, TIME_CLOSE, 4 * sk->rto);
2087 #endif
2088 if (timeout)
2089 tcp_time_wait(sk);
2090 release_sock(sk);
2091 return;
2092 case TCP_TIME_WAIT:
2093 case TCP_LAST_ACK:
2094
2095
2096
2097 if (timeout)
2098 {
2099 sk->state = TCP_CLOSE;
2100 }
2101 release_sock(sk);
2102 return;
2103 case TCP_LISTEN:
2104 sk->state = TCP_CLOSE;
2105 release_sock(sk);
2106 return;
2107 case TCP_CLOSE:
2108 release_sock(sk);
2109 return;
2110 case TCP_CLOSE_WAIT:
2111 case TCP_ESTABLISHED:
2112 case TCP_SYN_SENT:
2113 case TCP_SYN_RECV:
2114 prot =(struct proto *)sk->prot;
2115 th =(struct tcphdr *)&sk->dummy_th;
2116 buff = prot->wmalloc(sk, MAX_FIN_SIZE, 1, GFP_ATOMIC);
2117 if (buff == NULL)
2118 {
2119
2120
2121
2122 release_sock(sk);
2123 if (sk->state != TCP_CLOSE_WAIT)
2124 sk->state = TCP_ESTABLISHED;
2125 reset_timer(sk, TIME_CLOSE, 100);
2126 return;
2127 }
2128 buff->sk = sk;
2129 buff->free = 1;
2130 buff->len = sizeof(*t1);
2131 buff->localroute = sk->localroute;
2132 t1 =(struct tcphdr *) buff->data;
2133
2134
2135
2136
2137 tmp = prot->build_header(buff,sk->saddr, sk->daddr, &dev,
2138 IPPROTO_TCP, sk->opt,
2139 sizeof(struct tcphdr),sk->ip_tos,sk->ip_ttl);
2140 if (tmp < 0)
2141 {
2142 kfree_skb(buff,FREE_WRITE);
2143
2144
2145
2146
2147
2148
2149 if(sk->state==TCP_ESTABLISHED)
2150 sk->state=TCP_FIN_WAIT1;
2151 else
2152 sk->state=TCP_FIN_WAIT2;
2153 reset_timer(sk, TIME_CLOSE,4*sk->rto);
2154 if(timeout)
2155 tcp_time_wait(sk);
2156
2157 release_sock(sk);
2158 return;
2159 }
2160
2161 t1 =(struct tcphdr *)((char *)t1 +tmp);
2162 buff->len += tmp;
2163 buff->dev = dev;
2164 memcpy(t1, th, sizeof(*t1));
2165 t1->seq = ntohl(sk->write_seq);
2166 sk->write_seq++;
2167 buff->h.seq = sk->write_seq;
2168 t1->ack = 1;
2169
2170
2171
2172
2173
2174 sk->delay_acks = 0;
2175 t1->ack_seq = ntohl(sk->acked_seq);
2176 t1->window = ntohs(sk->window=tcp_select_window(sk));
2177 t1->fin = 1;
2178 t1->rst = need_reset;
2179 t1->doff = sizeof(*t1)/4;
2180 tcp_send_check(t1, sk->saddr, sk->daddr, sizeof(*t1), sk);
2181
2182 tcp_statistics.TcpOutSegs++;
2183
2184 if (skb_peek(&sk->write_queue) == NULL)
2185 {
2186 sk->sent_seq = sk->write_seq;
2187 prot->queue_xmit(sk, dev, buff, 0);
2188 }
2189 else
2190 {
2191 reset_timer(sk, TIME_WRITE, sk->rto);
2192 if (buff->next != NULL)
2193 {
2194 printk("tcp_close: next != NULL\n");
2195 skb_unlink(buff);
2196 }
2197 skb_queue_tail(&sk->write_queue, buff);
2198 }
2199
2200
2201
2202
2203
2204
2205
2206
2207 if (sk->state == TCP_ESTABLISHED)
2208 sk->state = TCP_FIN_WAIT1;
2209 else if (sk->state == TCP_CLOSE_WAIT)
2210 sk->state = TCP_LAST_ACK;
2211 else if (sk->state != TCP_CLOSING)
2212 sk->state = TCP_FIN_WAIT2;
2213 }
2214 release_sock(sk);
2215 }
2216
2217
2218
2219
2220
2221
2222 static void
2223 tcp_write_xmit(struct sock *sk)
2224 {
2225 struct sk_buff *skb;
2226
2227
2228
2229 if(sk->zapped)
2230 return;
2231
2232 while((skb = skb_peek(&sk->write_queue)) != NULL &&
2233 before(skb->h.seq, sk->window_seq + 1) &&
2234 (sk->retransmits == 0 ||
2235 sk->timeout != TIME_WRITE ||
2236 before(skb->h.seq, sk->rcv_ack_seq + 1))
2237 && sk->packets_out < sk->cong_window) {
2238 IS_SKB(skb);
2239 skb_unlink(skb);
2240
2241 if (before(skb->h.seq, sk->rcv_ack_seq +1)) {
2242 sk->retransmits = 0;
2243 kfree_skb(skb, FREE_WRITE);
2244 if (!sk->dead) sk->write_space(sk);
2245 } else {
2246 struct tcphdr *th;
2247 struct iphdr *iph;
2248 int size;
2249
2250
2251
2252
2253
2254
2255
2256 iph = (struct iphdr *)(skb->data +
2257 skb->dev->hard_header_len);
2258 th = (struct tcphdr *)(((char *)iph) +(iph->ihl << 2));
2259 size = skb->len - (((unsigned char *) th) - skb->data);
2260
2261 th->ack_seq = ntohl(sk->acked_seq);
2262 th->window = ntohs(tcp_select_window(sk));
2263
2264 tcp_send_check(th, sk->saddr, sk->daddr, size, sk);
2265
2266 sk->sent_seq = skb->h.seq;
2267 sk->prot->queue_xmit(sk, skb->dev, skb, skb->free);
2268 }
2269 }
2270 }
2271
2272
2273
2274
2275
2276
2277 void
2278 sort_send(struct sock *sk)
2279 {
2280 struct sk_buff *list = NULL;
2281 struct sk_buff *skb,*skb2,*skb3;
2282
2283 for (skb = sk->send_head; skb != NULL; skb = skb2) {
2284 skb2 = skb->link3;
2285 if (list == NULL || before (skb2->h.seq, list->h.seq)) {
2286 skb->link3 = list;
2287 sk->send_tail = skb;
2288 list = skb;
2289 } else {
2290 for (skb3 = list; ; skb3 = skb3->link3) {
2291 if (skb3->link3 == NULL ||
2292 before(skb->h.seq, skb3->link3->h.seq)) {
2293 skb->link3 = skb3->link3;
2294 skb3->link3 = skb;
2295 if (skb->link3 == NULL) sk->send_tail = skb;
2296 break;
2297 }
2298 }
2299 }
2300 }
2301 sk->send_head = list;
2302 }
2303
2304
2305
2306
2307
2308
2309 static int
2310 tcp_ack(struct sock *sk, struct tcphdr *th, unsigned long saddr, int len)
2311 {
2312 unsigned long ack;
2313 int flag = 0;
2314
2315
2316
2317
2318
2319
2320
2321 if(sk->zapped)
2322 return(1);
2323
2324 ack = ntohl(th->ack_seq);
2325 if (ntohs(th->window) > sk->max_window) {
2326 sk->max_window = ntohs(th->window);
2327 #ifdef CONFIG_INET_PCTCP
2328 sk->mss = min(sk->max_window>>1, sk->mtu);
2329 #else
2330 sk->mss = min(sk->max_window, sk->mtu);
2331 #endif
2332 }
2333
2334 if (sk->retransmits && sk->timeout == TIME_KEEPOPEN)
2335 sk->retransmits = 0;
2336
2337
2338 if (after(ack, sk->sent_seq+1) || before(ack, sk->rcv_ack_seq-1)) {
2339 if (after(ack, sk->sent_seq) ||
2340 (sk->state != TCP_ESTABLISHED && sk->state != TCP_CLOSE_WAIT)) {
2341 return(0);
2342 }
2343 if (sk->keepopen) {
2344 reset_timer(sk, TIME_KEEPOPEN, TCP_TIMEOUT_LEN);
2345 }
2346 return(1);
2347 }
2348
2349 if (len != th->doff*4) flag |= 1;
2350
2351
2352 if (after(sk->window_seq, ack+ntohs(th->window))) {
2353
2354
2355
2356
2357
2358
2359
2360 struct sk_buff *skb;
2361 struct sk_buff *skb2;
2362 struct sk_buff *wskb = NULL;
2363
2364 skb2 = sk->send_head;
2365 sk->send_head = NULL;
2366 sk->send_tail = NULL;
2367
2368 flag |= 4;
2369
2370 sk->window_seq = ack + ntohs(th->window);
2371 cli();
2372 while (skb2 != NULL) {
2373 skb = skb2;
2374 skb2 = skb->link3;
2375 skb->link3 = NULL;
2376 if (after(skb->h.seq, sk->window_seq)) {
2377 if (sk->packets_out > 0) sk->packets_out--;
2378
2379 if (skb->next != NULL) {
2380 skb_unlink(skb);
2381 }
2382
2383 if (wskb == NULL)
2384 skb_queue_head(&sk->write_queue,skb);
2385 else
2386 skb_append(wskb,skb);
2387 wskb = skb;
2388 } else {
2389 if (sk->send_head == NULL) {
2390 sk->send_head = skb;
2391 sk->send_tail = skb;
2392 } else {
2393 sk->send_tail->link3 = skb;
2394 sk->send_tail = skb;
2395 }
2396 skb->link3 = NULL;
2397 }
2398 }
2399 sti();
2400 }
2401
2402 if (sk->send_tail == NULL || sk->send_head == NULL) {
2403 sk->send_head = NULL;
2404 sk->send_tail = NULL;
2405 sk->packets_out= 0;
2406 }
2407
2408 sk->window_seq = ack + ntohs(th->window);
2409
2410
2411 if (sk->timeout == TIME_WRITE &&
2412 sk->cong_window < 2048 && after(ack, sk->rcv_ack_seq)) {
2413
2414
2415
2416
2417
2418
2419
2420
2421
2422 if (sk->cong_window < sk->ssthresh)
2423
2424 sk->cong_window++;
2425 else {
2426
2427
2428
2429 if (sk->cong_count >= sk->cong_window) {
2430 sk->cong_window++;
2431 sk->cong_count = 0;
2432 } else
2433 sk->cong_count++;
2434 }
2435 }
2436
2437 sk->rcv_ack_seq = ack;
2438
2439
2440
2441
2442
2443
2444 if (sk->timeout == TIME_PROBE0) {
2445 if (skb_peek(&sk->write_queue) != NULL &&
2446 ! before (sk->window_seq, sk->write_queue.next->h.seq)) {
2447 sk->retransmits = 0;
2448 sk->backoff = 0;
2449
2450 sk->rto = ((sk->rtt >> 2) + sk->mdev) >> 1;
2451 if (sk->rto > 120*HZ)
2452 sk->rto = 120*HZ;
2453 if (sk->rto < 2)
2454 sk->rto = 2;
2455 }
2456 }
2457
2458
2459 while(sk->send_head != NULL) {
2460
2461 if (sk->send_head->link3 &&
2462 after(sk->send_head->h.seq, sk->send_head->link3->h.seq)) {
2463 printk("INET: tcp.c: *** bug send_list out of order.\n");
2464 sort_send(sk);
2465 }
2466
2467 if (before(sk->send_head->h.seq, ack+1)) {
2468 struct sk_buff *oskb;
2469
2470 if (sk->retransmits) {
2471
2472
2473 flag |= 2;
2474
2475
2476
2477
2478
2479
2480
2481
2482
2483 if (sk->send_head->link3)
2484 sk->retransmits = 1;
2485 else
2486 sk->retransmits = 0;
2487
2488 }
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503 if (sk->packets_out > 0) sk->packets_out --;
2504
2505 if (!sk->dead) sk->write_space(sk);
2506
2507 oskb = sk->send_head;
2508
2509 if (!(flag&2)) {
2510 long m;
2511
2512
2513
2514
2515
2516
2517
2518
2519 m = jiffies - oskb->when;
2520 if(m<=0)
2521 m=1;
2522 m -= (sk->rtt >> 3);
2523 sk->rtt += m;
2524 if (m < 0)
2525 m = -m;
2526 m -= (sk->mdev >> 2);
2527 sk->mdev += m;
2528
2529
2530 sk->rto = ((sk->rtt >> 2) + sk->mdev) >> 1;
2531 if (sk->rto > 120*HZ)
2532 sk->rto = 120*HZ;
2533 if (sk->rto < 2)
2534 sk->rto = 2;
2535 sk->backoff = 0;
2536
2537 }
2538 flag |= (2|4);
2539
2540 cli();
2541
2542 oskb = sk->send_head;
2543 IS_SKB(oskb);
2544 sk->send_head = oskb->link3;
2545 if (sk->send_head == NULL) {
2546 sk->send_tail = NULL;
2547 }
2548
2549
2550 if (oskb->next)
2551 skb_unlink(oskb);
2552 sti();
2553 kfree_skb(oskb, FREE_WRITE);
2554 if (!sk->dead) sk->write_space(sk);
2555 } else {
2556 break;
2557 }
2558 }
2559
2560
2561
2562
2563
2564 if (skb_peek(&sk->write_queue) != NULL) {
2565 if (after (sk->window_seq+1, sk->write_queue.next->h.seq) &&
2566 (sk->retransmits == 0 ||
2567 sk->timeout != TIME_WRITE ||
2568 before(sk->write_queue.next->h.seq, sk->rcv_ack_seq + 1))
2569 && sk->packets_out < sk->cong_window) {
2570 flag |= 1;
2571 tcp_write_xmit(sk);
2572 } else if (before(sk->window_seq, sk->write_queue.next->h.seq) &&
2573 sk->send_head == NULL &&
2574 sk->ack_backlog == 0 &&
2575 sk->state != TCP_TIME_WAIT) {
2576 reset_timer(sk, TIME_PROBE0, sk->rto);
2577 }
2578 } else {
2579 if (sk->send_head == NULL && sk->ack_backlog == 0 &&
2580 sk->state != TCP_TIME_WAIT && !sk->keepopen) {
2581 if (!sk->dead) sk->write_space(sk);
2582
2583 if (sk->keepopen)
2584 reset_timer(sk, TIME_KEEPOPEN, TCP_TIMEOUT_LEN);
2585 else
2586 delete_timer(sk);
2587 } else {
2588 if (sk->state != (unsigned char) sk->keepopen) {
2589 reset_timer(sk, TIME_WRITE, sk->rto);
2590 }
2591 if (sk->state == TCP_TIME_WAIT) {
2592 reset_timer(sk, TIME_CLOSE, TCP_TIMEWAIT_LEN);
2593 }
2594 }
2595 }
2596
2597 if (sk->packets_out == 0 && sk->partial != NULL &&
2598 skb_peek(&sk->write_queue) == NULL && sk->send_head == NULL) {
2599 flag |= 1;
2600 tcp_send_partial(sk);
2601 }
2602
2603
2604
2605
2606
2607
2608
2609
2610
2611 if (sk->state == TCP_LAST_ACK) {
2612 if (!sk->dead)
2613 sk->state_change(sk);
2614 if (sk->rcv_ack_seq == sk->write_seq && sk->acked_seq == sk->fin_seq) {
2615 flag |= 1;
2616 sk->state = TCP_CLOSE;
2617 sk->shutdown = SHUTDOWN_MASK;
2618 }
2619 }
2620
2621
2622
2623
2624
2625
2626
2627 if (sk->state == TCP_FIN_WAIT1) {
2628 if (!sk->dead)
2629 sk->state_change(sk);
2630 if (sk->rcv_ack_seq == sk->write_seq) {
2631 flag |= 1;
2632 if (sk->acked_seq != sk->fin_seq) {
2633 tcp_time_wait(sk);
2634 } else {
2635 sk->shutdown = SHUTDOWN_MASK;
2636 sk->state = TCP_FIN_WAIT2;
2637 }
2638 }
2639 }
2640
2641
2642
2643
2644
2645
2646
2647 if (sk->state == TCP_CLOSING) {
2648 if (!sk->dead)
2649 sk->state_change(sk);
2650 if (sk->rcv_ack_seq == sk->write_seq) {
2651 flag |= 1;
2652 tcp_time_wait(sk);
2653 }
2654 }
2655
2656
2657
2658
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673
2674
2675
2676
2677
2678
2679
2680
2681
2682
2683
2684
2685 if (((!flag) || (flag&4)) && sk->send_head != NULL &&
2686 (((flag&2) && sk->retransmits) ||
2687 (sk->send_head->when + sk->rto < jiffies))) {
2688 ip_do_retransmit(sk, 1);
2689 reset_timer(sk, TIME_WRITE, sk->rto);
2690 }
2691
2692 return(1);
2693 }
2694
2695
2696
2697
2698
2699
2700
2701 static int
2702 tcp_data(struct sk_buff *skb, struct sock *sk,
2703 unsigned long saddr, unsigned short len)
2704 {
2705 struct sk_buff *skb1, *skb2;
2706 struct tcphdr *th;
2707 int dup_dumped=0;
2708
2709 th = skb->h.th;
2710 skb->len = len -(th->doff*4);
2711
2712 sk->bytes_rcv += skb->len;
2713 if (skb->len == 0 && !th->fin && !th->urg && !th->psh) {
2714
2715 if (!th->ack) tcp_send_ack(sk->sent_seq, sk->acked_seq,sk, th, saddr);
2716 kfree_skb(skb, FREE_READ);
2717 return(0);
2718 }
2719
2720 if (sk->shutdown & RCV_SHUTDOWN && skb->len!=0 ) {
2721 sk->acked_seq = th->seq + skb->len + th->syn + th->fin;
2722 tcp_reset(sk->saddr, sk->daddr, skb->h.th,
2723 sk->prot, NULL, skb->dev, sk->ip_tos, sk->ip_ttl);
2724 tcp_statistics.TcpEstabResets++;
2725 sk->state = TCP_CLOSE;
2726 sk->err = EPIPE;
2727 sk->shutdown = SHUTDOWN_MASK;
2728 kfree_skb(skb, FREE_READ);
2729 if (!sk->dead) sk->state_change(sk);
2730 return(0);
2731 }
2732
2733
2734
2735
2736
2737
2738
2739
2740
2741
2742 if (skb_peek(&sk->receive_queue) == NULL) {
2743 skb_queue_head(&sk->receive_queue,skb);
2744 skb1= NULL;
2745 } else {
2746 for(skb1=sk->receive_queue.prev; ; skb1 = skb1->prev) {
2747 if(sk->debug)
2748 {
2749 printk("skb1=%p :", skb1);
2750 printk("skb1->h.th->seq = %ld: ", skb1->h.th->seq);
2751 printk("skb->h.th->seq = %ld\n",skb->h.th->seq);
2752 printk("copied_seq = %ld acked_seq = %ld\n", sk->copied_seq,
2753 sk->acked_seq);
2754 }
2755 if (th->seq==skb1->h.th->seq && skb->len>= skb1->len)
2756 {
2757 skb_append(skb1,skb);
2758 skb_unlink(skb1);
2759 kfree_skb(skb1,FREE_READ);
2760 dup_dumped=1;
2761 skb1=NULL;
2762 break;
2763 }
2764 if (after(th->seq+1, skb1->h.th->seq))
2765 {
2766 skb_append(skb1,skb);
2767 break;
2768 }
2769 if (skb1 == skb_peek(&sk->receive_queue))
2770 {
2771 skb_queue_head(&sk->receive_queue, skb);
2772 break;
2773 }
2774 }
2775 }
2776
2777 th->ack_seq = th->seq + skb->len;
2778 if (th->syn) th->ack_seq++;
2779 if (th->fin) th->ack_seq++;
2780
2781 if (before(sk->acked_seq, sk->copied_seq)) {
2782 printk("*** tcp.c:tcp_data bug acked < copied\n");
2783 sk->acked_seq = sk->copied_seq;
2784 }
2785
2786
2787 if ((!dup_dumped && (skb1 == NULL || skb1->acked)) || before(th->seq, sk->acked_seq+1)) {
2788 if (before(th->seq, sk->acked_seq+1)) {
2789 int newwindow;
2790
2791 if (after(th->ack_seq, sk->acked_seq)) {
2792 newwindow = sk->window -
2793 (th->ack_seq - sk->acked_seq);
2794 if (newwindow < 0)
2795 newwindow = 0;
2796 sk->window = newwindow;
2797 sk->acked_seq = th->ack_seq;
2798 }
2799 skb->acked = 1;
2800
2801
2802 if (skb->h.th->fin) {
2803 if (!sk->dead) sk->state_change(sk);
2804 sk->shutdown |= RCV_SHUTDOWN;
2805 }
2806
2807 for(skb2 = skb->next;
2808 skb2 != (struct sk_buff *)&sk->receive_queue;
2809 skb2 = skb2->next) {
2810 if (before(skb2->h.th->seq, sk->acked_seq+1)) {
2811 if (after(skb2->h.th->ack_seq, sk->acked_seq))
2812 {
2813 newwindow = sk->window -
2814 (skb2->h.th->ack_seq - sk->acked_seq);
2815 if (newwindow < 0)
2816 newwindow = 0;
2817 sk->window = newwindow;
2818 sk->acked_seq = skb2->h.th->ack_seq;
2819 }
2820 skb2->acked = 1;
2821
2822
2823
2824
2825
2826 if (skb2->h.th->fin) {
2827 sk->shutdown |= RCV_SHUTDOWN;
2828 if (!sk->dead) sk->state_change(sk);
2829 }
2830
2831
2832 sk->ack_backlog = sk->max_ack_backlog;
2833 } else {
2834 break;
2835 }
2836 }
2837
2838
2839
2840
2841
2842 if (!sk->delay_acks ||
2843 sk->ack_backlog >= sk->max_ack_backlog ||
2844 sk->bytes_rcv > sk->max_unacked || th->fin) {
2845
2846 } else {
2847 sk->ack_backlog++;
2848 if(sk->debug)
2849 printk("Ack queued.\n");
2850 reset_timer(sk, TIME_WRITE, TCP_ACK_TIME);
2851 }
2852 }
2853 }
2854
2855
2856
2857
2858
2859 if (!skb->acked) {
2860
2861
2862
2863
2864
2865
2866 while (sk->prot->rspace(sk) < sk->mtu) {
2867 skb1 = skb_peek(&sk->receive_queue);
2868 if (skb1 == NULL) {
2869 printk("INET: tcp.c:tcp_data memory leak detected.\n");
2870 break;
2871 }
2872
2873
2874 if (skb1->acked) {
2875 break;
2876 }
2877
2878 skb_unlink(skb1);
2879 kfree_skb(skb1, FREE_READ);
2880 }
2881 tcp_send_ack(sk->sent_seq, sk->acked_seq, sk, th, saddr);
2882 sk->ack_backlog++;
2883 reset_timer(sk, TIME_WRITE, TCP_ACK_TIME);
2884 } else {
2885
2886 tcp_send_ack(sk->sent_seq, sk->acked_seq, sk, th, saddr);
2887 }
2888
2889
2890 if (!sk->dead) {
2891 if(sk->debug)
2892 printk("Data wakeup.\n");
2893 sk->data_ready(sk,0);
2894 }
2895
2896 #ifdef NOTDEF
2897
2898 if (sk->state == TCP_FIN_WAIT2 &&
2899 sk->acked_seq == sk->fin_seq && sk->rcv_ack_seq == sk->write_seq) {
2900
2901 sk->shutdown = SHUTDOWN_MASK;
2902 sk->state = TCP_LAST_ACK;
2903 if (!sk->dead) sk->state_change(sk);
2904 }
2905 #endif
2906
2907 return(0);
2908 }
2909
2910
2911 static void tcp_check_urg(struct sock * sk, struct tcphdr * th)
2912 {
2913 unsigned long ptr = ntohs(th->urg_ptr);
2914
2915 if (ptr)
2916 ptr--;
2917 ptr += th->seq;
2918
2919
2920 if (after(sk->copied_seq+1, ptr))
2921 return;
2922
2923
2924 if (sk->urg_data && !after(ptr, sk->urg_seq))
2925 return;
2926
2927
2928 if (sk->proc != 0) {
2929 if (sk->proc > 0) {
2930 kill_proc(sk->proc, SIGURG, 1);
2931 } else {
2932 kill_pg(-sk->proc, SIGURG, 1);
2933 }
2934 }
2935 sk->urg_data = URG_NOTYET;
2936 sk->urg_seq = ptr;
2937 }
2938
2939 static inline int tcp_urg(struct sock *sk, struct tcphdr *th,
2940 unsigned long saddr, unsigned long len)
2941 {
2942 unsigned long ptr;
2943
2944
2945 if (th->urg)
2946 tcp_check_urg(sk,th);
2947
2948
2949 if (sk->urg_data != URG_NOTYET)
2950 return 0;
2951
2952
2953 ptr = sk->urg_seq - th->seq + th->doff*4;
2954 if (ptr >= len)
2955 return 0;
2956
2957
2958 sk->urg_data = URG_VALID | *(ptr + (unsigned char *) th);
2959 if (!sk->dead)
2960 sk->data_ready(sk,0);
2961 return 0;
2962 }
2963
2964
2965
2966
2967
2968
2969
2970
2971
2972
2973
2974
2975
2976
2977
2978
2979 static int tcp_fin(struct sk_buff *skb, struct sock *sk, struct tcphdr *th,
2980 unsigned long saddr, struct device *dev)
2981 {
2982 sk->fin_seq = th->seq + skb->len + th->syn + th->fin;
2983
2984 if (!sk->dead)
2985 {
2986 sk->state_change(sk);
2987 }
2988
2989 switch(sk->state)
2990 {
2991 case TCP_SYN_RECV:
2992 case TCP_SYN_SENT:
2993 case TCP_ESTABLISHED:
2994
2995
2996
2997
2998 reset_timer(sk, TIME_CLOSE, TCP_TIMEOUT_LEN);
2999
3000 tcp_statistics.TcpCurrEstab--;
3001 sk->state = TCP_CLOSE_WAIT;
3002 if (th->rst)
3003 sk->shutdown = SHUTDOWN_MASK;
3004 break;
3005
3006 case TCP_CLOSE_WAIT:
3007 case TCP_CLOSING:
3008
3009
3010
3011
3012 break;
3013 case TCP_TIME_WAIT:
3014
3015
3016
3017
3018 reset_timer(sk, TIME_CLOSE, TCP_TIMEWAIT_LEN);
3019 return(0);
3020 case TCP_FIN_WAIT1:
3021
3022
3023
3024
3025
3026
3027
3028
3029 reset_timer(sk, TIME_CLOSE, TCP_TIMEWAIT_LEN);
3030
3031 sk->state = TCP_CLOSING;
3032 break;
3033 case TCP_FIN_WAIT2:
3034
3035
3036
3037 reset_timer(sk, TIME_CLOSE, TCP_TIMEWAIT_LEN);
3038
3039 sk->state = TCP_TIME_WAIT;
3040 break;
3041 case TCP_CLOSE:
3042
3043
3044
3045 break;
3046 default:
3047 sk->state = TCP_LAST_ACK;
3048
3049
3050 reset_timer(sk, TIME_CLOSE, TCP_TIMEWAIT_LEN);
3051 return(0);
3052 }
3053 sk->ack_backlog++;
3054
3055 return(0);
3056 }
3057
3058
3059
3060 static struct sock *
3061 tcp_accept(struct sock *sk, int flags)
3062 {
3063 struct sock *newsk;
3064 struct sk_buff *skb;
3065
3066
3067
3068
3069
3070 if (sk->state != TCP_LISTEN) {
3071 sk->err = EINVAL;
3072 return(NULL);
3073 }
3074
3075
3076 cli();
3077 sk->inuse = 1;
3078 while((skb = skb_dequeue(&sk->receive_queue)) == NULL) {
3079 if (flags & O_NONBLOCK) {
3080 sti();
3081 release_sock(sk);
3082 sk->err = EAGAIN;
3083 return(NULL);
3084 }
3085
3086 release_sock(sk);
3087 interruptible_sleep_on(sk->sleep);
3088 if (current->signal & ~current->blocked) {
3089 sti();
3090 sk->err = ERESTARTSYS;
3091 return(NULL);
3092 }
3093 sk->inuse = 1;
3094 }
3095 sti();
3096
3097
3098 newsk = skb->sk;
3099
3100 kfree_skb(skb, FREE_READ);
3101 sk->ack_backlog--;
3102 release_sock(sk);
3103 return(newsk);
3104 }
3105
3106
3107
3108
3109
3110
3111 static int tcp_connect(struct sock *sk, struct sockaddr_in *usin, int addr_len)
3112 {
3113 struct sk_buff *buff;
3114 struct sockaddr_in sin;
3115 struct device *dev=NULL;
3116 unsigned char *ptr;
3117 int tmp;
3118 struct tcphdr *t1;
3119 int err;
3120 struct rtable *rt;
3121
3122 if (sk->state != TCP_CLOSE)
3123 return(-EISCONN);
3124 if (addr_len < 8)
3125 return(-EINVAL);
3126
3127 err=verify_area(VERIFY_READ, usin, addr_len);
3128 if(err)
3129 return err;
3130
3131 memcpy_fromfs(&sin,usin, min(sizeof(sin), addr_len));
3132
3133 if (sin.sin_family && sin.sin_family != AF_INET)
3134 return(-EAFNOSUPPORT);
3135
3136
3137
3138
3139
3140 if(sin.sin_addr.s_addr==INADDR_ANY)
3141 sin.sin_addr.s_addr=ip_my_addr();
3142
3143
3144
3145
3146
3147 if (ip_chk_addr(sin.sin_addr.s_addr) == IS_BROADCAST)
3148 {
3149 return -ENETUNREACH;
3150 }
3151
3152
3153
3154
3155
3156 if(sk->saddr == sin.sin_addr.s_addr && sk->num==ntohs(sin.sin_port))
3157 return -EBUSY;
3158
3159 sk->inuse = 1;
3160 sk->daddr = sin.sin_addr.s_addr;
3161 sk->write_seq = jiffies * SEQ_TICK - seq_offset;
3162 sk->window_seq = sk->write_seq;
3163 sk->rcv_ack_seq = sk->write_seq -1;
3164 sk->err = 0;
3165 sk->dummy_th.dest = sin.sin_port;
3166 release_sock(sk);
3167
3168 buff = sk->prot->wmalloc(sk,MAX_SYN_SIZE,0, GFP_KERNEL);
3169 if (buff == NULL)
3170 {
3171 return(-ENOMEM);
3172 }
3173 sk->inuse = 1;
3174 buff->len = 24;
3175 buff->sk = sk;
3176 buff->free = 1;
3177 buff->localroute = sk->localroute;
3178
3179 t1 = (struct tcphdr *) buff->data;
3180
3181
3182
3183
3184
3185 rt=ip_rt_route(sk->daddr, NULL, NULL);
3186
3187
3188
3189
3190
3191
3192 tmp = sk->prot->build_header(buff, sk->saddr, sk->daddr, &dev,
3193 IPPROTO_TCP, NULL, MAX_SYN_SIZE,sk->ip_tos,sk->ip_ttl);
3194 if (tmp < 0)
3195 {
3196 sk->prot->wfree(sk, buff->mem_addr, buff->mem_len);
3197 release_sock(sk);
3198 return(-ENETUNREACH);
3199 }
3200
3201 buff->len += tmp;
3202 t1 = (struct tcphdr *)((char *)t1 +tmp);
3203
3204 memcpy(t1,(void *)&(sk->dummy_th), sizeof(*t1));
3205 t1->seq = ntohl(sk->write_seq++);
3206 sk->sent_seq = sk->write_seq;
3207 buff->h.seq = sk->write_seq;
3208 t1->ack = 0;
3209 t1->window = 2;
3210 t1->res1=0;
3211 t1->res2=0;
3212 t1->rst = 0;
3213 t1->urg = 0;
3214 t1->psh = 0;
3215 t1->syn = 1;
3216 t1->urg_ptr = 0;
3217 t1->doff = 6;
3218
3219
3220 if (sk->user_mss)
3221 sk->mtu = sk->user_mss;
3222 else if(rt!=NULL && rt->rt_flags&RTF_MTU)
3223 sk->mtu = rt->rt_mtu;
3224 else
3225 {
3226 #ifdef CONFIG_INET_SNARL
3227 if ((sk->saddr ^ sk->daddr) & default_mask(sk->saddr))
3228 #else
3229 if ((sk->saddr ^ sk->daddr) & dev->pa_mask)
3230 #endif
3231 sk->mtu = 576 - HEADER_SIZE;
3232 else
3233 sk->mtu = MAX_WINDOW;
3234 }
3235
3236
3237
3238
3239 if(sk->mtu <32)
3240 sk->mtu = 32;
3241
3242 sk->mtu = min(sk->mtu, dev->mtu - HEADER_SIZE);
3243
3244
3245
3246
3247
3248 ptr = (unsigned char *)(t1+1);
3249 ptr[0] = 2;
3250 ptr[1] = 4;
3251 ptr[2] = (sk->mtu) >> 8;
3252 ptr[3] = (sk->mtu) & 0xff;
3253 tcp_send_check(t1, sk->saddr, sk->daddr,
3254 sizeof(struct tcphdr) + 4, sk);
3255
3256
3257
3258
3259
3260 sk->state = TCP_SYN_SENT;
3261
3262 sk->rto = TCP_TIMEOUT_INIT;
3263 reset_timer(sk, TIME_WRITE, sk->rto);
3264 sk->retransmits = TCP_RETR2 - TCP_SYN_RETRIES;
3265
3266 sk->prot->queue_xmit(sk, dev, buff, 0);
3267 tcp_statistics.TcpActiveOpens++;
3268 tcp_statistics.TcpOutSegs++;
3269
3270 release_sock(sk);
3271 return(0);
3272 }
3273
3274
3275
3276 static int
3277 tcp_sequence(struct sock *sk, struct tcphdr *th, short len,
3278 struct options *opt, unsigned long saddr, struct device *dev)
3279 {
3280 unsigned long next_seq;
3281
3282 next_seq = len - 4*th->doff;
3283 if (th->fin)
3284 next_seq++;
3285
3286 if (next_seq && !sk->window)
3287 goto ignore_it;
3288 next_seq += th->seq;
3289
3290
3291
3292
3293
3294
3295
3296
3297
3298 if (!after(next_seq+1, sk->acked_seq))
3299 goto ignore_it;
3300
3301 if (!before(th->seq, sk->acked_seq + sk->window + 1))
3302 goto ignore_it;
3303
3304
3305 return 1;
3306
3307 ignore_it:
3308 if (th->rst)
3309 return 0;
3310
3311
3312
3313
3314
3315
3316
3317
3318 if (sk->state==TCP_SYN_SENT || sk->state==TCP_SYN_RECV) {
3319 tcp_reset(sk->saddr,sk->daddr,th,sk->prot,NULL,dev, sk->ip_tos,sk->ip_ttl);
3320 return 1;
3321 }
3322
3323
3324 tcp_send_ack(sk->sent_seq, sk->acked_seq, sk, th, saddr);
3325 return 0;
3326 }
3327
3328
3329 int
3330 tcp_rcv(struct sk_buff *skb, struct device *dev, struct options *opt,
3331 unsigned long daddr, unsigned short len,
3332 unsigned long saddr, int redo, struct inet_protocol * protocol)
3333 {
3334 struct tcphdr *th;
3335 struct sock *sk;
3336
3337 if (!skb) {
3338 return(0);
3339 }
3340
3341 if (!dev)
3342 {
3343 return(0);
3344 }
3345
3346 tcp_statistics.TcpInSegs++;
3347
3348 if(skb->pkt_type!=PACKET_HOST)
3349 {
3350 kfree_skb(skb,FREE_READ);
3351 return(0);
3352 }
3353
3354 th = skb->h.th;
3355
3356
3357 sk = get_sock(&tcp_prot, th->dest, saddr, th->source, daddr);
3358
3359
3360 if (sk!=NULL && sk->zapped)
3361 sk=NULL;
3362
3363 if (!redo) {
3364 if (tcp_check(th, len, saddr, daddr )) {
3365 skb->sk = NULL;
3366 kfree_skb(skb,FREE_READ);
3367
3368
3369
3370
3371 return(0);
3372 }
3373
3374 th->seq = ntohl(th->seq);
3375
3376
3377 if (sk == NULL) {
3378 if (!th->rst)
3379 tcp_reset(daddr, saddr, th, &tcp_prot, opt,dev,skb->ip_hdr->tos,255);
3380 skb->sk = NULL;
3381 kfree_skb(skb, FREE_READ);
3382 return(0);
3383 }
3384
3385 skb->len = len;
3386 skb->sk = sk;
3387 skb->acked = 0;
3388 skb->used = 0;
3389 skb->free = 0;
3390 skb->saddr = daddr;
3391 skb->daddr = saddr;
3392
3393
3394 cli();
3395 if (sk->inuse) {
3396 skb_queue_head(&sk->back_log, skb);
3397 sti();
3398 return(0);
3399 }
3400 sk->inuse = 1;
3401 sti();
3402 } else {
3403 if (!sk) {
3404 return(0);
3405 }
3406 }
3407
3408 if (!sk->prot) {
3409 return(0);
3410 }
3411
3412
3413 if (sk->rmem_alloc + skb->mem_len >= sk->rcvbuf) {
3414 skb->sk = NULL;
3415 kfree_skb(skb, FREE_READ);
3416 release_sock(sk);
3417 return(0);
3418 }
3419 sk->rmem_alloc += skb->mem_len;
3420
3421
3422
3423 switch(sk->state) {
3424
3425
3426
3427
3428 case TCP_LAST_ACK:
3429 if (th->rst) {
3430 sk->zapped=1;
3431 sk->err = ECONNRESET;
3432 sk->state = TCP_CLOSE;
3433 sk->shutdown = SHUTDOWN_MASK;
3434 if (!sk->dead) {
3435 sk->state_change(sk);
3436 }
3437 kfree_skb(skb, FREE_READ);
3438 release_sock(sk);
3439 return(0);
3440 }
3441
3442 case TCP_ESTABLISHED:
3443 case TCP_CLOSE_WAIT:
3444 case TCP_CLOSING:
3445 case TCP_FIN_WAIT1:
3446 case TCP_FIN_WAIT2:
3447 case TCP_TIME_WAIT:
3448 if (!tcp_sequence(sk, th, len, opt, saddr,dev)) {
3449 kfree_skb(skb, FREE_READ);
3450 release_sock(sk);
3451 return(0);
3452 }
3453
3454 if (th->rst)
3455 {
3456 tcp_statistics.TcpEstabResets++;
3457 tcp_statistics.TcpCurrEstab--;
3458 sk->zapped=1;
3459
3460 sk->err = ECONNRESET;
3461
3462 if (sk->state == TCP_CLOSE_WAIT)
3463 {
3464 sk->err = EPIPE;
3465 }
3466
3467
3468
3469
3470
3471 sk->state = TCP_CLOSE;
3472 sk->shutdown = SHUTDOWN_MASK;
3473 if (!sk->dead)
3474 {
3475 sk->state_change(sk);
3476 }
3477 kfree_skb(skb, FREE_READ);
3478 release_sock(sk);
3479 return(0);
3480 }
3481 if (th->syn)
3482 {
3483 tcp_statistics.TcpCurrEstab--;
3484 tcp_statistics.TcpEstabResets++;
3485 sk->err = ECONNRESET;
3486 sk->state = TCP_CLOSE;
3487 sk->shutdown = SHUTDOWN_MASK;
3488 tcp_reset(daddr, saddr, th, sk->prot, opt,dev, sk->ip_tos,sk->ip_ttl);
3489 if (!sk->dead) {
3490 sk->state_change(sk);
3491 }
3492 kfree_skb(skb, FREE_READ);
3493 release_sock(sk);
3494 return(0);
3495 }
3496
3497 if (th->ack && !tcp_ack(sk, th, saddr, len)) {
3498 kfree_skb(skb, FREE_READ);
3499 release_sock(sk);
3500 return(0);
3501 }
3502
3503 if (tcp_urg(sk, th, saddr, len)) {
3504 kfree_skb(skb, FREE_READ);
3505 release_sock(sk);
3506 return(0);
3507 }
3508
3509 if (tcp_data(skb, sk, saddr, len)) {
3510 kfree_skb(skb, FREE_READ);
3511 release_sock(sk);
3512 return(0);
3513 }
3514
3515
3516 if (th->fin && tcp_fin(skb, sk, th, saddr, dev)) {
3517 kfree_skb(skb, FREE_READ);
3518 release_sock(sk);
3519 return(0);
3520 }
3521
3522 release_sock(sk);
3523 return(0);
3524
3525 case TCP_CLOSE:
3526 if (sk->dead || sk->daddr) {
3527 kfree_skb(skb, FREE_READ);
3528 release_sock(sk);
3529 return(0);
3530 }
3531
3532 if (!th->rst) {
3533 if (!th->ack)
3534 th->ack_seq = 0;
3535 tcp_reset(daddr, saddr, th, sk->prot, opt,dev,sk->ip_tos,sk->ip_ttl);
3536 }
3537 kfree_skb(skb, FREE_READ);
3538 release_sock(sk);
3539 return(0);
3540
3541 case TCP_LISTEN:
3542 if (th->rst) {
3543 kfree_skb(skb, FREE_READ);
3544 release_sock(sk);
3545 return(0);
3546 }
3547 if (th->ack) {
3548 tcp_reset(daddr, saddr, th, sk->prot, opt,dev,sk->ip_tos,sk->ip_ttl);
3549 kfree_skb(skb, FREE_READ);
3550 release_sock(sk);
3551 return(0);
3552 }
3553
3554 if (th->syn)
3555 {
3556
3557
3558
3559
3560
3561
3562 tcp_conn_request(sk, skb, daddr, saddr, opt, dev);
3563 release_sock(sk);
3564 return(0);
3565 }
3566
3567 kfree_skb(skb, FREE_READ);
3568 release_sock(sk);
3569 return(0);
3570
3571 case TCP_SYN_RECV:
3572 if (th->syn) {
3573
3574 kfree_skb(skb, FREE_READ);
3575 release_sock(sk);
3576 return(0);
3577 }
3578
3579
3580 default:
3581 if (!tcp_sequence(sk, th, len, opt, saddr,dev))
3582 {
3583 kfree_skb(skb, FREE_READ);
3584 release_sock(sk);
3585 return(0);
3586 }
3587
3588 case TCP_SYN_SENT:
3589 if (th->rst)
3590 {
3591 tcp_statistics.TcpAttemptFails++;
3592 sk->err = ECONNREFUSED;
3593 sk->state = TCP_CLOSE;
3594 sk->shutdown = SHUTDOWN_MASK;
3595 sk->zapped = 1;
3596 if (!sk->dead)
3597 {
3598 sk->state_change(sk);
3599 }
3600 kfree_skb(skb, FREE_READ);
3601 release_sock(sk);
3602 return(0);
3603 }
3604 if (!th->ack)
3605 {
3606 if (th->syn)
3607 {
3608 sk->state = TCP_SYN_RECV;
3609 }
3610
3611 kfree_skb(skb, FREE_READ);
3612 release_sock(sk);
3613 return(0);
3614 }
3615
3616 switch(sk->state)
3617 {
3618 case TCP_SYN_SENT:
3619 if (!tcp_ack(sk, th, saddr, len))
3620 {
3621 tcp_statistics.TcpAttemptFails++;
3622 tcp_reset(daddr, saddr, th,
3623 sk->prot, opt,dev,sk->ip_tos,sk->ip_ttl);
3624 kfree_skb(skb, FREE_READ);
3625 release_sock(sk);
3626 return(0);
3627 }
3628
3629
3630
3631
3632
3633 if (!th->syn)
3634 {
3635 kfree_skb(skb, FREE_READ);
3636 release_sock(sk);
3637 return(0);
3638 }
3639
3640
3641 sk->acked_seq = th->seq+1;
3642 sk->fin_seq = th->seq;
3643 tcp_send_ack(sk->sent_seq, th->seq+1,
3644 sk, th, sk->daddr);
3645
3646 case TCP_SYN_RECV:
3647 if (!tcp_ack(sk, th, saddr, len))
3648 {
3649 tcp_statistics.TcpAttemptFails++;
3650 tcp_reset(daddr, saddr, th,
3651 sk->prot, opt, dev,sk->ip_tos,sk->ip_ttl);
3652 kfree_skb(skb, FREE_READ);
3653 release_sock(sk);
3654 return(0);
3655 }
3656
3657 tcp_statistics.TcpCurrEstab++;
3658 sk->state = TCP_ESTABLISHED;
3659
3660
3661
3662
3663
3664
3665 tcp_options(sk, th);
3666 sk->dummy_th.dest = th->source;
3667 sk->copied_seq = sk->acked_seq-1;
3668 if (!sk->dead) {
3669 sk->state_change(sk);
3670 }
3671
3672
3673
3674
3675
3676
3677
3678
3679
3680
3681 if (sk->max_window == 0) {
3682 sk->max_window = 32;
3683 sk->mss = min(sk->max_window, sk->mtu);
3684 }
3685
3686
3687
3688
3689
3690 if (th->urg) {
3691 if (tcp_urg(sk, th, saddr, len)) {
3692 kfree_skb(skb, FREE_READ);
3693 release_sock(sk);
3694 return(0);
3695 }
3696 }
3697 if (tcp_data(skb, sk, saddr, len))
3698 kfree_skb(skb, FREE_READ);
3699
3700 if (th->fin) tcp_fin(skb, sk, th, saddr, dev);
3701 release_sock(sk);
3702 return(0);
3703 }
3704
3705 if (th->urg) {
3706 if (tcp_urg(sk, th, saddr, len)) {
3707 kfree_skb(skb, FREE_READ);
3708 release_sock(sk);
3709 return(0);
3710 }
3711 }
3712
3713 if (tcp_data(skb, sk, saddr, len)) {
3714 kfree_skb(skb, FREE_READ);
3715 release_sock(sk);
3716 return(0);
3717 }
3718
3719 if (!th->fin) {
3720 release_sock(sk);
3721 return(0);
3722 }
3723 tcp_fin(skb, sk, th, saddr, dev);
3724 release_sock(sk);
3725 return(0);
3726 }
3727 }
3728
3729
3730
3731
3732
3733
3734
3735 static void tcp_write_wakeup(struct sock *sk)
3736 {
3737 struct sk_buff *buff;
3738 struct tcphdr *t1;
3739 struct device *dev=NULL;
3740 int tmp;
3741
3742 if (sk->zapped)
3743 return;
3744
3745
3746
3747
3748
3749
3750 if (sk->state != TCP_ESTABLISHED &&
3751 sk->state != TCP_CLOSE_WAIT &&
3752 sk->state != TCP_FIN_WAIT1 &&
3753 sk->state != TCP_LAST_ACK &&
3754 sk->state != TCP_CLOSING
3755 ) {
3756 return;
3757 }
3758
3759 buff = sk->prot->wmalloc(sk,MAX_ACK_SIZE,1, GFP_ATOMIC);
3760 if (buff == NULL)
3761 return;
3762
3763 buff->len = sizeof(struct tcphdr);
3764 buff->free = 1;
3765 buff->sk = sk;
3766 buff->localroute = sk->localroute;
3767
3768 t1 = (struct tcphdr *) buff->data;
3769
3770
3771 tmp = sk->prot->build_header(buff, sk->saddr, sk->daddr, &dev,
3772 IPPROTO_TCP, sk->opt, MAX_ACK_SIZE,sk->ip_tos,sk->ip_ttl);
3773 if (tmp < 0)
3774 {
3775 sk->prot->wfree(sk, buff->mem_addr, buff->mem_len);
3776 return;
3777 }
3778
3779 buff->len += tmp;
3780 t1 = (struct tcphdr *)((char *)t1 +tmp);
3781
3782 memcpy(t1,(void *) &sk->dummy_th, sizeof(*t1));
3783
3784
3785
3786
3787
3788 t1->seq = htonl(sk->sent_seq-1);
3789 t1->ack = 1;
3790 t1->res1= 0;
3791 t1->res2= 0;
3792 t1->rst = 0;
3793 t1->urg = 0;
3794 t1->psh = 0;
3795 t1->fin = 0;
3796 t1->syn = 0;
3797 t1->ack_seq = ntohl(sk->acked_seq);
3798 t1->window = ntohs(tcp_select_window(sk));
3799 t1->doff = sizeof(*t1)/4;
3800 tcp_send_check(t1, sk->saddr, sk->daddr, sizeof(*t1), sk);
3801
3802
3803
3804
3805 sk->prot->queue_xmit(sk, dev, buff, 1);
3806 tcp_statistics.TcpOutSegs++;
3807 }
3808
3809 void
3810 tcp_send_probe0(struct sock *sk)
3811 {
3812 if (sk->zapped)
3813 return;
3814
3815 tcp_write_wakeup(sk);
3816
3817 sk->backoff++;
3818 sk->rto = min(sk->rto << 1, 120*HZ);
3819 reset_timer (sk, TIME_PROBE0, sk->rto);
3820 sk->retransmits++;
3821 sk->prot->retransmits ++;
3822 }
3823
3824
3825
3826
3827
3828 int tcp_setsockopt(struct sock *sk, int level, int optname, char *optval, int optlen)
3829 {
3830 int val,err;
3831
3832 if(level!=SOL_TCP)
3833 return ip_setsockopt(sk,level,optname,optval,optlen);
3834
3835 if (optval == NULL)
3836 return(-EINVAL);
3837
3838 err=verify_area(VERIFY_READ, optval, sizeof(int));
3839 if(err)
3840 return err;
3841
3842 val = get_fs_long((unsigned long *)optval);
3843
3844 switch(optname)
3845 {
3846 case TCP_MAXSEG:
3847
3848
3849
3850
3851
3852
3853 if(val<1||val>MAX_WINDOW)
3854 return -EINVAL;
3855 sk->user_mss=val;
3856 return 0;
3857 case TCP_NODELAY:
3858 sk->nonagle=(val==0)?0:1;
3859 return 0;
3860 default:
3861 return(-ENOPROTOOPT);
3862 }
3863 }
3864
3865 int tcp_getsockopt(struct sock *sk, int level, int optname, char *optval, int *optlen)
3866 {
3867 int val,err;
3868
3869 if(level!=SOL_TCP)
3870 return ip_getsockopt(sk,level,optname,optval,optlen);
3871
3872 switch(optname)
3873 {
3874 case TCP_MAXSEG:
3875 val=sk->user_mss;
3876 break;
3877 case TCP_NODELAY:
3878 val=sk->nonagle;
3879 break;
3880 default:
3881 return(-ENOPROTOOPT);
3882 }
3883 err=verify_area(VERIFY_WRITE, optlen, sizeof(int));
3884 if(err)
3885 return err;
3886 put_fs_long(sizeof(int),(unsigned long *) optlen);
3887
3888 err=verify_area(VERIFY_WRITE, optval, sizeof(int));
3889 if(err)
3890 return err;
3891 put_fs_long(val,(unsigned long *)optval);
3892
3893 return(0);
3894 }
3895
3896
3897 struct proto tcp_prot = {
3898 sock_wmalloc,
3899 sock_rmalloc,
3900 sock_wfree,
3901 sock_rfree,
3902 sock_rspace,
3903 sock_wspace,
3904 tcp_close,
3905 tcp_read,
3906 tcp_write,
3907 tcp_sendto,
3908 tcp_recvfrom,
3909 ip_build_header,
3910 tcp_connect,
3911 tcp_accept,
3912 ip_queue_xmit,
3913 tcp_retransmit,
3914 tcp_write_wakeup,
3915 tcp_read_wakeup,
3916 tcp_rcv,
3917 tcp_select,
3918 tcp_ioctl,
3919 NULL,
3920 tcp_shutdown,
3921 tcp_setsockopt,
3922 tcp_getsockopt,
3923 128,
3924 0,
3925 {NULL,},
3926 "TCP"
3927 };