This source file includes following definitions.
- min
- print_th
- get_firstr
- diff
- tcp_time_wait
- tcp_retransmit
- tcp_err
- tcp_readable
- tcp_select
- tcp_ioctl
- tcp_check
- tcp_send_check
- tcp_send_partial
- tcp_send_ack
- tcp_build_header
- tcp_write
- tcp_sendto
- tcp_read_wakeup
- cleanup_rbuf
- tcp_read_urg
- tcp_read_data
- tcp_read
- tcp_shutdown
- tcp_recvfrom
- tcp_reset
- tcp_options
- tcp_conn_request
- tcp_close
- tcp_write_xmit
- sort_send
- tcp_ack
- tcp_data
- tcp_urg
- tcp_fin
- tcp_accept
- tcp_connect
- tcp_sequence
- tcp_rcv
- tcp_write_wakeup
- tcp_setsockopt
- tcp_getsockopt
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77 #include <linux/types.h>
78 #include <linux/sched.h>
79 #include <linux/mm.h>
80 #include <linux/string.h>
81 #include <linux/socket.h>
82 #include <linux/sockios.h>
83 #include <linux/termios.h>
84 #include <linux/in.h>
85 #include <linux/fcntl.h>
86 #include "inet.h"
87 #include "devinet.h"
88 #include "ip.h"
89 #include "protocol.h"
90 #include "icmp.h"
91 #include "tcp.h"
92 #include "skbuff.h"
93 #include "sockinet.h"
94 #include "arp.h"
95 #include <linux/errno.h>
96 #include <linux/timer.h>
97 #include <asm/system.h>
98 #include <asm/segment.h>
99 #include <linux/mm.h>
100
101 #define SEQ_TICK 3
102 unsigned long seq_offset;
103
104 static __inline__ int
105 min(unsigned int a, unsigned int b)
106 {
107 if (a < b)
108 return(a);
109 return(b);
110 }
111
112
113 void print_th(struct tcphdr *th)
114 {
115 unsigned char *ptr;
116
117 if (inet_debug != DBG_TCP)
118 return;
119
120 printk("TCP header:\n");
121 ptr =(unsigned char *)(th + 1);
122 printk(" source=%d, dest=%d, seq =%ld, ack_seq = %ld\n",
123 ntohs(th->source), ntohs(th->dest),
124 ntohl(th->seq), ntohl(th->ack_seq));
125 printk(" fin=%d, syn=%d, rst=%d, psh=%d, ack=%d, urg=%d res1=%d res2=%d\n",
126 th->fin, th->syn, th->rst, th->psh, th->ack,
127 th->urg, th->res1, th->res2);
128 printk(" window = %d, check = %d urg_ptr = %d\n",
129 ntohs(th->window), ntohs(th->check), ntohs(th->urg_ptr));
130 printk(" doff = %d\n", th->doff);
131 printk(" options = %d %d %d %d\n", ptr[0], ptr[1], ptr[2], ptr[3]);
132 }
133
134
135
136
137
138
139
140 static struct sk_buff *get_firstr(struct sock *sk)
141 {
142 return skb_dequeue(&sk->rqueue);
143 }
144
145
146
147
148
149 static long diff(unsigned long seq1, unsigned long seq2)
150 {
151 long d;
152
153 d = seq1 - seq2;
154 if (d > 0)
155 return(d);
156
157
158 return(~d+1);
159 }
160
161
162
163
164
165
166 static void tcp_time_wait(struct sock *sk)
167 {
168 sk->state = TCP_TIME_WAIT;
169 sk->shutdown = SHUTDOWN_MASK;
170 if (!sk->dead)
171 sk->state_change(sk);
172 reset_timer(sk, TIME_CLOSE, TCP_TIMEWAIT_LEN);
173 }
174
175
176
177
178
179
180
181
182 static void tcp_retransmit(struct sock *sk, int all)
183 {
184 if (all)
185 {
186 ip_retransmit(sk, all);
187 return;
188 }
189
190 if (sk->cong_window > 4)
191 sk->cong_window = sk->cong_window / 2;
192 sk->exp_growth = 0;
193
194
195 ip_retransmit(sk, all);
196 }
197
198
199
200
201
202
203
204
205
206
207 void tcp_err(int err, unsigned char *header, unsigned long daddr,
208 unsigned long saddr, struct inet_protocol *protocol)
209 {
210 struct tcphdr *th;
211 struct sock *sk;
212 struct iphdr *iph=(struct iphdr *)header;
213
214 header+=4*iph->ihl;
215
216 DPRINTF((DBG_TCP, "TCP: tcp_err(%d, hdr=%X, daddr=%X saddr=%X, protocol=%X)\n",
217 err, header, daddr, saddr, protocol));
218
219 th =(struct tcphdr *)header;
220 sk = get_sock(&tcp_prot, th->source, daddr, th->dest, saddr);
221 print_th(th);
222
223 if (sk == NULL)
224 return;
225
226 if(err<0)
227 {
228 sk->err = -err;
229 sk->error_report(sk);
230 return;
231 }
232
233 if ((err & 0xff00) == (ICMP_SOURCE_QUENCH << 8))
234 {
235
236
237
238
239
240 if (sk->cong_window > 4) sk->cong_window--;
241 return;
242 }
243
244 DPRINTF((DBG_TCP, "TCP: icmp_err got error\n"));
245 sk->err = icmp_err_convert[err & 0xff].errno;
246
247
248
249
250
251
252 if (icmp_err_convert[err & 0xff].fatal)
253 {
254 if (sk->state == TCP_SYN_SENT)
255 {
256 sk->state = TCP_CLOSE;
257 sk->error_report(sk);
258 sk->state_change(sk);
259 }
260 }
261 return;
262 }
263
264
265
266
267
268
269
270 static int tcp_readable(struct sock *sk)
271 {
272 unsigned long counted;
273 unsigned long amount;
274 struct sk_buff *skb;
275 int count=0;
276 int sum;
277 unsigned long flags;
278
279 DPRINTF((DBG_TCP, "tcp_readable(sk=%X)\n", sk));
280 if(sk && sk->debug)
281 printk("tcp_readable: %p - ",sk);
282
283 if (sk == NULL || skb_peek(&sk->rqueue) == NULL)
284 {
285 if(sk && sk->debug)
286 printk("empty\n");
287 return(0);
288 }
289
290 counted = sk->copied_seq+1;
291 amount = 0;
292
293 save_flags(flags);
294 cli();
295 skb =(struct sk_buff *)sk->rqueue;
296
297
298 do
299 {
300 count++;
301 if (before(counted, skb->h.th->seq))
302 break;
303 sum = skb->len -(counted - skb->h.th->seq);
304 if (skb->h.th->syn)
305 sum++;
306 if (skb->h.th->urg)
307 {
308 sum -= ntohs(skb->h.th->urg_ptr);
309 }
310 if (sum >= 0)
311 {
312 amount += sum;
313 if (skb->h.th->syn)
314 amount--;
315 counted += sum;
316 }
317
318
319 skb =(struct sk_buff *)skb->next;
320 }
321 while(skb != sk->rqueue);
322 restore_flags(flags);
323 DPRINTF((DBG_TCP, "tcp readable returning %d bytes\n", amount));
324 if(sk->debug)
325 printk("got %lu bytes.\n",amount);
326 return(amount);
327 }
328
329
330
331
332
333
334
335 static int tcp_select(struct sock *sk, int sel_type, select_table *wait)
336 {
337 DPRINTF((DBG_TCP, "tcp_select(sk=%X, sel_type = %d, wait = %X)\n",
338 sk, sel_type, wait));
339
340 sk->inuse = 1;
341 switch(sel_type)
342 {
343 case SEL_IN:
344 if(sk->debug)
345 printk("select in");
346 select_wait(sk->sleep, wait);
347 if(sk->debug)
348 printk("-select out");
349 if (skb_peek(&sk->rqueue) != NULL)
350 {
351 if (sk->state == TCP_LISTEN || tcp_readable(sk))
352 {
353 release_sock(sk);
354 if(sk->debug)
355 printk("-select ok data\n");
356 return(1);
357 }
358 }
359 if (sk->err != 0)
360 {
361 release_sock(sk);
362 if(sk->debug)
363 printk("-select ok error");
364 return(1);
365 }
366 if (sk->shutdown & RCV_SHUTDOWN)
367 {
368 release_sock(sk);
369 if(sk->debug)
370 printk("-select ok down\n");
371 return(1);
372 }
373 else
374 {
375 release_sock(sk);
376 if(sk->debug)
377 printk("-select fail\n");
378 return(0);
379 }
380 case SEL_OUT:
381 select_wait(sk->sleep, wait);
382 if (sk->shutdown & SEND_SHUTDOWN)
383 {
384 DPRINTF((DBG_TCP,
385 "write select on shutdown socket.\n"));
386
387 release_sock(sk);
388 return(0);
389 }
390
391
392
393
394
395 if (sk->prot->wspace(sk) >= sk->mtu)
396 {
397 release_sock(sk);
398
399 if (sk->state == TCP_SYN_RECV ||
400 sk->state == TCP_SYN_SENT)
401 return(0);
402 return(1);
403 }
404 DPRINTF((DBG_TCP,
405 "tcp_select: sleeping on write sk->wmem_alloc = %d, "
406 "sk->packets_out = %d\n"
407 "sk->wback = %X, sk->wfront = %X\n"
408 "sk->send_seq = %u, sk->window_seq=%u\n",
409 sk->wmem_alloc, sk->packets_out,
410 sk->wback, sk->wfront,
411 sk->send_seq, sk->window_seq));
412
413 release_sock(sk);
414 return(0);
415 case SEL_EX:
416 select_wait(sk->sleep,wait);
417 if (sk->err)
418 {
419 release_sock(sk);
420 return(1);
421 }
422 release_sock(sk);
423 return(0);
424 }
425
426 release_sock(sk);
427 return(0);
428 }
429
430
431 int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
432 {
433 int err;
434 DPRINTF((DBG_TCP, "tcp_ioctl(sk=%X, cmd = %d, arg=%X)\n", sk, cmd, arg));
435 switch(cmd)
436 {
437 case DDIOCSDBG:
438 return(dbg_ioctl((void *) arg, DBG_TCP));
439
440 case TIOCINQ:
441 #ifdef FIXME
442 case FIONREAD:
443 #endif
444 {
445 unsigned long amount;
446
447 if (sk->state == TCP_LISTEN)
448 return(-EINVAL);
449
450 sk->inuse = 1;
451 amount = tcp_readable(sk);
452 release_sock(sk);
453 DPRINTF((DBG_TCP, "returning %d\n", amount));
454 err=verify_area(VERIFY_WRITE,(void *)arg,
455 sizeof(unsigned long));
456 if(err)
457 return err;
458 put_fs_long(amount,(unsigned long *)arg);
459 return(0);
460 }
461 case SIOCATMARK:
462 {
463 struct sk_buff *skb;
464 int answ = 0;
465
466
467
468
469
470 sk->inuse = 1;
471 if ((skb=skb_peek(&sk->rqueue)) != NULL)
472 {
473 if (sk->copied_seq+1 == skb->h.th->seq && skb->h.th->urg)
474 answ = 1;
475 }
476 release_sock(sk);
477 err=verify_area(VERIFY_WRITE,(void *) arg,
478 sizeof(unsigned long));
479 if(err)
480 return err;
481 put_fs_long(answ,(int *) arg);
482 return(0);
483 }
484 case TIOCOUTQ:
485 {
486 unsigned long amount;
487
488 if (sk->state == TCP_LISTEN)
489 return(-EINVAL);
490 amount = sk->prot->wspace(sk);
491 err=verify_area(VERIFY_WRITE,(void *)arg,
492 sizeof(unsigned long));
493 if(err)
494 return err;
495 put_fs_long(amount,(unsigned long *)arg);
496 return(0);
497 }
498 default:
499 return(-EINVAL);
500 }
501 }
502
503
504
505
506
507
508 unsigned short tcp_check(struct tcphdr *th, int len,
509 unsigned long saddr, unsigned long daddr)
510 {
511 unsigned long sum;
512
513 if (saddr == 0)
514 saddr = my_addr();
515 print_th(th);
516 __asm__("\t addl %%ecx,%%ebx\n"
517 "\t adcl %%edx,%%ebx\n"
518 "\t adcl $0, %%ebx\n"
519 : "=b"(sum)
520 : "0"(daddr), "c"(saddr), "d"((ntohs(len) << 16) + IPPROTO_TCP*256)
521 : "cx","bx","dx" );
522
523 if (len > 3)
524 {
525 __asm__("\tclc\n"
526 "1:\n"
527 "\t lodsl\n"
528 "\t adcl %%eax, %%ebx\n"
529 "\t loop 1b\n"
530 "\t adcl $0, %%ebx\n"
531 : "=b"(sum) , "=S"(th)
532 : "0"(sum), "c"(len/4) ,"1"(th)
533 : "ax", "cx", "bx", "si" );
534 }
535
536
537 __asm__("\t movl %%ebx, %%ecx\n"
538 "\t shrl $16,%%ecx\n"
539 "\t addw %%cx, %%bx\n"
540 "\t adcw $0, %%bx\n"
541 : "=b"(sum)
542 : "0"(sum)
543 : "bx", "cx");
544
545
546 if ((len & 2) != 0)
547 {
548 __asm__("\t lodsw\n"
549 "\t addw %%ax,%%bx\n"
550 "\t adcw $0, %%bx\n"
551 : "=b"(sum), "=S"(th)
552 : "0"(sum) ,"1"(th)
553 : "si", "ax", "bx");
554 }
555
556
557 if ((len & 1) != 0)
558 {
559 __asm__("\t lodsb\n"
560 "\t movb $0,%%ah\n"
561 "\t addw %%ax,%%bx\n"
562 "\t adcw $0, %%bx\n"
563 : "=b"(sum)
564 : "0"(sum) ,"S"(th)
565 : "si", "ax", "bx");
566 }
567
568
569 return((~sum) & 0xffff);
570 }
571
572
573 void tcp_send_check(struct tcphdr *th, unsigned long saddr,
574 unsigned long daddr, int len, struct sock *sk)
575 {
576 th->check = 0;
577 th->check = tcp_check(th, len, saddr, daddr);
578 return;
579 }
580
581
582
583
584
585 static void tcp_send_partial(struct sock *sk)
586 {
587 struct sk_buff *skb;
588
589 if (sk == NULL || sk->send_tmp == NULL)
590 return;
591
592 skb = sk->send_tmp;
593
594
595
596
597
598
599
600 tcp_send_check(skb->h.th, sk->saddr, sk->daddr,
601 skb->len-(unsigned long)skb->h.th +
602 (unsigned long)(skb+1), sk);
603
604
605
606
607 skb->h.seq = sk->send_seq;
608
609
610
611
612
613 if (after(sk->send_seq , sk->window_seq) ||
614 sk->packets_out >= sk->cong_window)
615 {
616 DPRINTF((DBG_TCP, "sk->cong_window = %d, sk->packets_out = %d\n",
617 sk->cong_window, sk->packets_out));
618 DPRINTF((DBG_TCP, "sk->send_seq = %d, sk->window_seq = %d\n",
619 sk->send_seq, sk->window_seq));
620 skb->next = NULL;
621 skb->magic = TCP_WRITE_QUEUE_MAGIC;
622 if (sk->wback == NULL)
623 {
624 sk->wfront=skb;
625 }
626 else
627 {
628 sk->wback->next = skb;
629 }
630 sk->wback = skb;
631 }
632 else
633 {
634 sk->prot->queue_xmit(sk, skb->dev, skb,0);
635 }
636 sk->send_tmp = NULL;
637 }
638
639
640
641
642
643
644 static void tcp_send_ack(unsigned long sequence, unsigned long ack,
645 struct sock *sk,
646 struct tcphdr *th, unsigned long daddr)
647 {
648 struct sk_buff *buff;
649 struct tcphdr *t1;
650 struct device *dev = NULL;
651 int tmp;
652
653 if(sk->zapped)
654 return;
655
656
657
658
659 buff = (struct sk_buff *) sk->prot->wmalloc(sk, MAX_ACK_SIZE, 1, GFP_ATOMIC);
660 if (buff == NULL)
661 {
662
663 sk->ack_backlog++;
664 if (sk->timeout != TIME_WRITE && tcp_connected(sk->state))
665 {
666 reset_timer(sk, TIME_WRITE, 10);
667 }
668 if (inet_debug == DBG_SLIP)
669 printk("\rtcp_ack: malloc failed\n");
670 return;
671 }
672
673 buff->mem_addr = buff;
674 buff->mem_len = MAX_ACK_SIZE;
675 buff->len = sizeof(struct tcphdr);
676 buff->sk = sk;
677 t1 =(struct tcphdr *)(buff + 1);
678
679
680 tmp = sk->prot->build_header(buff, sk->saddr, daddr, &dev,IPPROTO_TCP, sk->opt, MAX_ACK_SIZE,
681 sk->ip_ttl,sk->ip_tos);
682 if (tmp < 0)
683 {
684 buff->free=1;
685 sk->prot->wfree(sk, buff->mem_addr, buff->mem_len);
686 if (inet_debug == DBG_SLIP) printk("\rtcp_ack: build_header failed\n");
687 return;
688 }
689 buff->len += tmp;
690 t1 =(struct tcphdr *)((char *)t1 +tmp);
691
692
693 memcpy(t1, th, sizeof(*t1));
694
695
696 t1->dest = th->source;
697 t1->source = th->dest;
698 t1->seq = ntohl(sequence);
699 t1->ack = 1;
700 sk->window = 4096;
701 t1->window = ntohs(sk->window);
702 t1->res1 = 0;
703 t1->res2 = 0;
704 t1->rst = 0;
705 t1->urg = 0;
706 t1->syn = 0;
707 t1->psh = 0;
708 t1->fin = 0;
709 if (ack == sk->acked_seq)
710 {
711 sk->ack_backlog = 0;
712 sk->bytes_rcv = 0;
713 sk->ack_timed = 0;
714 if (sk->send_head == NULL && sk->wfront == NULL)
715 {
716
717 }
718 }
719 t1->ack_seq = ntohl(ack);
720 t1->doff = sizeof(*t1)/4;
721 tcp_send_check(t1, sk->saddr, daddr, sizeof(*t1), sk);
722 if (sk->debug)
723 printk("\rtcp_ack: seq %lx ack %lx\n", sequence, ack);
724 sk->prot->queue_xmit(sk, dev, buff, 1);
725 }
726
727
728
729
730
731
732 static int tcp_build_header(struct tcphdr *th, struct sock *sk, int push)
733 {
734
735
736 memcpy(th,(void *) &(sk->dummy_th), sizeof(*th));
737 th->seq = htonl(sk->send_seq);
738 th->psh =(push == 0) ? 1 : 0;
739 th->doff = sizeof(*th)/4;
740 th->ack = 1;
741 th->fin = 0;
742 sk->ack_backlog = 0;
743 sk->bytes_rcv = 0;
744 sk->ack_timed = 0;
745 th->ack_seq = htonl(sk->acked_seq);
746 sk->window = 4096-diff(sk->acked_seq,sk->copied_seq);
747 th->window = htons(sk->window);
748
749 return(sizeof(*th));
750 }
751
752
753
754
755
756
757
758 static int tcp_write(struct sock *sk, unsigned char *from,
759 int len, int nonblock, unsigned flags)
760 {
761 int copied = 0;
762 int copy;
763 int tmp;
764 struct sk_buff *skb;
765 unsigned char *buff;
766 struct proto *prot;
767 struct device *dev = NULL;
768
769 DPRINTF((DBG_TCP, "tcp_write(sk=%X, from=%X, len=%d, nonblock=%d, flags=%X)\n",
770 sk, from, len, nonblock, flags));
771
772 sk->inuse=1;
773 prot = sk->prot;
774
775 while(len > 0)
776 {
777 if (sk->err)
778 {
779 release_sock(sk);
780 if (copied)
781 return(copied);
782
783
784
785 tmp = -sk->err;
786 sk->err = 0;
787 return(tmp);
788 }
789
790
791 if (sk->shutdown & SEND_SHUTDOWN)
792 {
793 release_sock(sk);
794 sk->err = EPIPE;
795 if (copied)
796 return(copied);
797 sk->err = 0;
798 return(-EPIPE);
799 }
800
801
802
803
804 while(sk->state != TCP_ESTABLISHED && sk->state != TCP_CLOSE_WAIT)
805 {
806 if (sk->err)
807 {
808 release_sock(sk);
809 if (copied)
810 return(copied);
811 tmp = -sk->err;
812 sk->err = 0;
813 return(tmp);
814 }
815
816 if (sk->state != TCP_SYN_SENT && sk->state != TCP_SYN_RECV)
817 {
818 release_sock(sk);
819 DPRINTF((DBG_TCP, "tcp_write: return 1\n"));
820 if (copied)
821 return(copied);
822
823 if (sk->err)
824 {
825 tmp = -sk->err;
826 sk->err = 0;
827 return(tmp);
828 }
829
830 if (sk->keepopen)
831 {
832 send_sig(SIGPIPE, current, 0);
833 }
834 return(-EPIPE);
835 }
836
837 if (nonblock )
838 {
839 release_sock(sk);
840 DPRINTF((DBG_TCP, "tcp_write: return 2\n"));
841 if (copied)
842 return(copied);
843 return(-EAGAIN);
844 }
845
846 release_sock(sk);
847 cli();
848 if (sk->state != TCP_ESTABLISHED &&
849 sk->state != TCP_CLOSE_WAIT && sk->err == 0)
850 {
851 interruptible_sleep_on(sk->sleep);
852 if (current->signal & ~current->blocked)
853 {
854 sti();
855 DPRINTF((DBG_TCP, "tcp_write: return 3\n"));
856 if (copied)
857 return(copied);
858 return(-ERESTARTSYS);
859 }
860 }
861 sk->inuse = 1;
862 sti();
863 }
864
865
866
867
868
869
870
871
872
873 if (sk->send_tmp != NULL)
874 {
875 copy=0;
876
877 skb = sk->send_tmp;
878 if (!(flags & MSG_OOB))
879 {
880 copy = min(sk->mss - skb->len + 128 +
881 prot->max_header, len);
882
883
884 if (copy <= 0)
885 {
886 printk("TCP: **bug**: \"copy\" <= 0!!\n");
887 copy = 0;
888 }
889
890
891
892
893
894 memcpy_fromfs((unsigned char *)(skb+1) + skb->len, from, copy);
895 skb->len += copy;
896 from += copy;
897 copied += copy;
898 len -= copy;
899 sk->send_seq += copy;
900 }
901
902
903
904
905
906 if (skb->len -(unsigned long)skb->h.th + (unsigned long)(skb+1) >= sk->mss ||(flags & MSG_OOB) || copy==0)
907 {
908 tcp_send_partial(sk);
909 }
910 continue;
911 }
912
913
914
915
916
917
918
919
920 copy = min(sk->mtu, diff(sk->window_seq, sk->send_seq));
921
922
923 if (copy < 200 || copy > sk->mtu) copy = sk->mtu;
924 copy = min(copy, len);
925
926
927
928
929
930
931 if (sk->packets_out && copy < sk->mss && !(flags & MSG_OOB))
932 {
933
934 release_sock(sk);
935
936
937
938
939
940 skb = (struct sk_buff *) prot->wmalloc(sk,
941 sk->mss + 128 + prot->max_header +
942 sizeof(*skb), 0, GFP_KERNEL);
943 sk->inuse = 1;
944 sk->send_tmp = skb;
945 if (skb != NULL)
946 skb->mem_len = sk->mss + 128 + prot->max_header + sizeof(*skb);
947 }
948 else
949 {
950
951 release_sock(sk);
952 skb = (struct sk_buff *) prot->wmalloc(sk,
953 copy + prot->max_header +
954 sizeof(*skb), 0, GFP_KERNEL);
955 sk->inuse = 1;
956 if (skb != NULL)
957 skb->mem_len = copy+prot->max_header + sizeof(*skb);
958 }
959
960
961 if (skb == NULL)
962 {
963 if (nonblock )
964 {
965 release_sock(sk);
966 DPRINTF((DBG_TCP, "tcp_write: return 4\n"));
967 if (copied)
968 return(copied);
969 return(-EAGAIN);
970 }
971
972
973 tmp = sk->wmem_alloc;
974 release_sock(sk);
975 cli();
976
977 if (tmp <= sk->wmem_alloc && (sk->state == TCP_ESTABLISHED||sk->state == TCP_CLOSE_WAIT) && sk->err == 0)
978 {
979 interruptible_sleep_on(sk->sleep);
980 if (current->signal & ~current->blocked)
981 {
982 sti();
983 DPRINTF((DBG_TCP, "tcp_write: return 5\n"));
984 if (copied)
985 return(copied);
986 return(-ERESTARTSYS);
987 }
988 }
989 sk->inuse = 1;
990 sti();
991 continue;
992 }
993
994 skb->mem_addr = skb;
995 skb->len = 0;
996 skb->sk = sk;
997 skb->free = 0;
998
999 buff =(unsigned char *)(skb+1);
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010 tmp = prot->build_header(skb, sk->saddr, sk->daddr, &dev,
1011 IPPROTO_TCP, sk->opt, skb->mem_len, sk->ip_ttl,sk->ip_tos);
1012 if (tmp < 0 )
1013 {
1014 prot->wfree(sk, skb->mem_addr, skb->mem_len);
1015 release_sock(sk);
1016 DPRINTF((DBG_TCP, "tcp_write: return 6\n"));
1017 if (copied)
1018 return(copied);
1019 return(tmp);
1020 }
1021
1022 skb->len += tmp;
1023 skb->dev = dev;
1024 buff += tmp;
1025 skb->h.th =(struct tcphdr *) buff;
1026
1027
1028
1029
1030
1031 tmp = tcp_build_header((struct tcphdr *)buff, sk, len-copy);
1032 if (tmp < 0)
1033 {
1034 prot->wfree(sk, skb->mem_addr, skb->mem_len);
1035 release_sock(sk);
1036 DPRINTF((DBG_TCP, "tcp_write: return 7\n"));
1037 if (copied)
1038 return(copied);
1039 return(tmp);
1040 }
1041
1042
1043
1044
1045
1046
1047 if (flags & MSG_OOB)
1048 {
1049 ((struct tcphdr *)buff)->urg = 1;
1050 ((struct tcphdr *)buff)->urg_ptr = ntohs(copy);
1051 }
1052
1053
1054
1055
1056
1057 skb->len += tmp;
1058
1059
1060
1061
1062
1063 memcpy_fromfs(buff+tmp, from, copy);
1064
1065 from += copy;
1066 copied += copy;
1067 len -= copy;
1068 skb->len += copy;
1069 skb->free = 0;
1070 sk->send_seq += copy;
1071
1072 if (sk->send_tmp != NULL)
1073 continue;
1074
1075
1076
1077
1078
1079 tcp_send_check((struct tcphdr *)buff, sk->saddr, sk->daddr,
1080 copy + sizeof(struct tcphdr), sk);
1081
1082
1083
1084
1085
1086 skb->h.seq = sk->send_seq;
1087
1088
1089
1090
1091
1092
1093 if (after(sk->send_seq , sk->window_seq) || sk->packets_out >= sk->cong_window)
1094 {
1095
1096
1097
1098
1099
1100 DPRINTF((DBG_TCP, "sk->cong_window = %d, sk->packets_out = %d\n",
1101 sk->cong_window, sk->packets_out));
1102 DPRINTF((DBG_TCP, "sk->send_seq = %d, sk->window_seq = %d\n",
1103 sk->send_seq, sk->window_seq));
1104 skb->next = NULL;
1105 skb->magic = TCP_WRITE_QUEUE_MAGIC;
1106 if (sk->wback == NULL)
1107 {
1108 sk->wfront = skb;
1109 }
1110 else
1111 {
1112 sk->wback->next = skb;
1113 }
1114 sk->wback = skb;
1115 }
1116 else
1117 {
1118
1119
1120
1121
1122 prot->queue_xmit(sk, dev, skb,0);
1123 }
1124 }
1125 sk->err = 0;
1126
1127
1128 if(sk->send_tmp && sk->packets_out <sk->cong_window)
1129 tcp_send_partial(sk);
1130
1131
1132 release_sock(sk);
1133 DPRINTF((DBG_TCP, "tcp_write: return 8\n"));
1134 return(copied);
1135 }
1136
1137
1138 static int tcp_sendto(struct sock *sk, unsigned char *from,
1139 int len, int nonblock, unsigned flags,
1140 struct sockaddr_in *addr, int addr_len)
1141 {
1142 struct sockaddr_in sin;
1143
1144 if (addr_len < sizeof(sin))
1145 return(-EINVAL);
1146
1147 memcpy_fromfs(&sin, addr, sizeof(sin));
1148
1149 if (sin.sin_family && sin.sin_family != AF_INET)
1150 return(-EINVAL);
1151 if (sin.sin_port != sk->dummy_th.dest)
1152 return(-EINVAL);
1153 if (sin.sin_addr.s_addr != sk->daddr)
1154 return(-EINVAL);
1155 return(tcp_write(sk, from, len, nonblock, flags));
1156 }
1157
1158
1159 static void
1160 tcp_read_wakeup(struct sock *sk)
1161 {
1162 int tmp;
1163 struct device *dev = NULL;
1164 struct tcphdr *t1;
1165 struct sk_buff *buff;
1166
1167 DPRINTF((DBG_TCP, "in tcp read wakeup\n"));
1168 if (!sk->ack_backlog)
1169 return;
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185 buff = (struct sk_buff *) sk->prot->wmalloc(sk,MAX_ACK_SIZE,1, GFP_ATOMIC);
1186 if (buff == NULL)
1187 {
1188
1189 reset_timer(sk, TIME_WRITE, 10);
1190 return;
1191 }
1192
1193 buff->mem_addr = buff;
1194 buff->mem_len = MAX_ACK_SIZE;
1195 buff->len = sizeof(struct tcphdr);
1196 buff->sk = sk;
1197
1198
1199 tmp = sk->prot->build_header(buff, sk->saddr, sk->daddr, &dev,
1200 IPPROTO_TCP, sk->opt, MAX_ACK_SIZE, sk->ip_ttl,sk->ip_tos);
1201 if (tmp < 0)
1202 {
1203 buff->free=1;
1204 sk->prot->wfree(sk, buff->mem_addr, buff->mem_len);
1205 return;
1206 }
1207
1208 buff->len += tmp;
1209 t1 =(struct tcphdr *)((char *)(buff+1) +tmp);
1210
1211
1212
1213
1214
1215 memcpy(t1,(void *) &sk->dummy_th, sizeof(*t1));
1216 t1->seq = ntohl(sk->send_seq);
1217 t1->ack = 1;
1218 t1->res1 = 0;
1219 t1->res2 = 0;
1220 t1->rst = 0;
1221 t1->urg = 0;
1222 t1->syn = 0;
1223 t1->psh = 0;
1224 sk->ack_backlog = 0;
1225 sk->bytes_rcv = 0;
1226 sk->window = 4096;
1227 t1->window = ntohs(sk->window);
1228 t1->ack_seq = ntohl(sk->acked_seq);
1229 t1->doff = sizeof(*t1)/4;
1230 tcp_send_check(t1, sk->saddr, sk->daddr, sizeof(*t1), sk);
1231
1232
1233
1234
1235
1236 sk->prot->queue_xmit(sk, dev, buff, 1);
1237 }
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247 static void cleanup_rbuf(struct sock *sk)
1248 {
1249 unsigned long flags;
1250 int left;
1251 struct sk_buff *skb;
1252
1253 if(sk->debug)
1254 printk("cleaning rbuf for sk=%p\n", sk);
1255
1256 save_flags(flags);
1257 cli();
1258
1259 left = sk->prot->rspace(sk);
1260
1261
1262
1263
1264
1265
1266 while((skb=skb_peek(&sk->rqueue)) != NULL )
1267 {
1268 if (!skb->used)
1269 break;
1270 skb_unlink(skb);
1271 skb->sk = sk;
1272 kfree_skb(skb, FREE_READ);
1273 }
1274
1275 restore_flags(flags);
1276
1277
1278
1279
1280
1281
1282
1283 DPRINTF((DBG_TCP, "sk->window left = %d, sk->prot->rspace(sk)=%d\n",
1284 sk->window - sk->bytes_rcv, sk->prot->rspace(sk)));
1285
1286 if(sk->debug)
1287 printk("sk->rspace = %lu, was %d\n", sk->prot->rspace(sk),left);
1288 if (sk->prot->rspace(sk) != left)
1289 {
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302 sk->ack_backlog++;
1303 if (4096-diff(sk->acked_seq,sk->copied_seq) - sk->bytes_rcv < 3*sk->mtu)
1304 {
1305
1306 tcp_read_wakeup(sk);
1307 }
1308 else
1309 {
1310
1311 int was_active = del_timer(&sk->timer);
1312 if (!was_active || TCP_ACK_TIME < sk->timer.expires)
1313 {
1314 reset_timer(sk, TIME_WRITE, TCP_ACK_TIME);
1315 }
1316 else
1317 add_timer(&sk->timer);
1318 }
1319 }
1320 }
1321
1322
1323
1324
1325
1326
1327 static int tcp_read_urg(struct sock * sk, int nonblock,
1328 unsigned char *to, int len, unsigned flags)
1329 {
1330 int copied = 0;
1331 struct sk_buff *skb;
1332 int err;
1333
1334 DPRINTF((DBG_TCP, "tcp_read_urg(sk=%X, to=%X, len=%d, flags=%X)\n",
1335 sk, to, len, flags));
1336
1337 err=verify_area(VERIFY_WRITE,to,len);
1338 if(err)
1339 return err;
1340
1341 while(len > 0)
1342 {
1343 sk->inuse = 1;
1344 while(sk->urg==0 || skb_peek(&sk->rqueue) == NULL)
1345 {
1346 if (sk->err)
1347 {
1348 int tmp;
1349
1350 release_sock(sk);
1351 if (copied)
1352 return(copied);
1353 tmp = -sk->err;
1354 sk->err = 0;
1355 return(tmp);
1356 }
1357
1358 if (sk->state == TCP_CLOSE || sk->done)
1359 {
1360 release_sock(sk);
1361 if (copied)
1362 return(copied);
1363 if (!sk->done)
1364 {
1365 sk->done = 1;
1366 return(0);
1367 }
1368 return(-ENOTCONN);
1369 }
1370
1371 if (sk->shutdown & RCV_SHUTDOWN)
1372 {
1373 release_sock(sk);
1374 if (copied == 0)
1375 sk->done = 1;
1376 return(copied);
1377 }
1378
1379 if (nonblock || copied)
1380 {
1381 release_sock(sk);
1382 if (copied)
1383 return(copied);
1384 return(-EAGAIN);
1385 }
1386
1387
1388 release_sock(sk);
1389 cli();
1390 if ((sk->urg == 0 || skb_peek(&sk->rqueue) == NULL) &&
1391 sk->err == 0 && !(sk->shutdown & RCV_SHUTDOWN))
1392 {
1393 interruptible_sleep_on(sk->sleep);
1394 if (current->signal & ~current->blocked)
1395 {
1396 sti();
1397 if (copied)
1398 return(copied);
1399 return(-ERESTARTSYS);
1400 }
1401 }
1402 sk->inuse = 1;
1403 sti();
1404 }
1405
1406 skb = skb_peek(&sk->rqueue);
1407 do
1408 {
1409 int amt;
1410
1411 if (skb->h.th->urg && !skb->urg_used)
1412 {
1413 if (skb->h.th->urg_ptr == 0)
1414 {
1415 skb->h.th->urg_ptr = ntohs(skb->len);
1416 }
1417 amt = min(ntohs(skb->h.th->urg_ptr),len);
1418 if(amt)
1419 {
1420 verify_area(VERIFY_WRITE, to, amt);
1421 memcpy_tofs(to,(unsigned char *)(skb->h.th) +
1422 skb->h.th->doff*4, amt);
1423 }
1424
1425 if (!(flags & MSG_PEEK))
1426 {
1427 skb->urg_used = 1;
1428 sk->urg--;
1429 }
1430 release_sock(sk);
1431 copied += amt;
1432 return(copied);
1433 }
1434 skb =(struct sk_buff *)skb->next;
1435 }
1436 while(skb != sk->rqueue);
1437 }
1438 sk->urg = 0;
1439 release_sock(sk);
1440 return(0);
1441 }
1442
1443
1444
1445
1446
1447
1448 static int tcp_read_data(int type,struct sock *sk, unsigned char *to,
1449 int len, int nonblock, unsigned flags)
1450 {
1451
1452
1453 int copied=0;
1454 struct sk_buff *skb;
1455 unsigned long offset;
1456 unsigned long used;
1457 int err;
1458
1459 if (len == 0)
1460 return(0);
1461 if (len < 0)
1462 {
1463 return(-EINVAL);
1464 }
1465
1466 err=verify_area(VERIFY_WRITE,to,len);
1467 if(err)
1468 return err;
1469
1470
1471 if (sk->state == TCP_LISTEN)
1472 return(-ENOTCONN);
1473
1474
1475 if ((flags & MSG_OOB))
1476 return(tcp_read_urg(sk, nonblock, to, len, flags));
1477
1478
1479 sk->inuse = 1;
1480
1481 skb=skb_peek(&sk->rqueue);
1482
1483 DPRINTF((DBG_TCP, "tcp_read(sk=%X, to=%X, len=%d, nonblock=%d, flags=%X)\n",
1484 sk, to, len, nonblock, flags));
1485
1486 while(len > 0)
1487 {
1488
1489
1490
1491 while(skb == NULL || before(sk->copied_seq+1, skb->h.th->seq) || skb->used)
1492 {
1493 DPRINTF((DBG_TCP, "skb = %X:\n", skb));
1494
1495
1496
1497
1498 cleanup_rbuf(sk);
1499
1500
1501
1502
1503 if (sk->err)
1504 {
1505 int tmp;
1506
1507 release_sock(sk);
1508 if (copied)
1509 {
1510 DPRINTF((DBG_TCP, "tcp_read: returning %d\n",
1511 copied));
1512 return(copied);
1513 }
1514 tmp = -sk->err;
1515 sk->err = 0;
1516 return(tmp);
1517 }
1518
1519
1520
1521
1522 if (sk->state == TCP_CLOSE)
1523 {
1524 release_sock(sk);
1525 if (copied)
1526 {
1527 DPRINTF((DBG_TCP, "tcp_read: returning %d\n",
1528 copied));
1529 return(copied);
1530 }
1531 if (!sk->done)
1532 {
1533 sk->done = 1;
1534 return(0);
1535 }
1536 return(-ENOTCONN);
1537 }
1538
1539
1540
1541
1542
1543 if (sk->shutdown & RCV_SHUTDOWN)
1544 {
1545 release_sock(sk);
1546 if (copied == 0)
1547 sk->done = 1;
1548 DPRINTF((DBG_TCP, "tcp_read: returning %d\n", copied));
1549 return(copied);
1550 }
1551
1552
1553
1554
1555
1556 if (nonblock || copied)
1557 {
1558 release_sock(sk);
1559 if(sk->debug)
1560 printk("read: EAGAIN\n");
1561 if (copied)
1562 {
1563 DPRINTF((DBG_TCP, "tcp_read: returning %d\n",
1564 copied));
1565 return(copied);
1566 }
1567 return(-EAGAIN);
1568 }
1569
1570
1571
1572
1573
1574 if ((flags & MSG_PEEK) && copied != 0)
1575 {
1576 release_sock(sk);
1577 DPRINTF((DBG_TCP, "tcp_read: returning %d\n", copied));
1578 return(copied);
1579 }
1580
1581 DPRINTF((DBG_TCP, "tcp_read about to sleep. state = %d\n",
1582 sk->state));
1583 release_sock(sk);
1584
1585
1586
1587
1588
1589 cli();
1590
1591
1592
1593
1594
1595
1596
1597
1598 if (sk->shutdown & RCV_SHUTDOWN || sk->err != 0)
1599 {
1600 sk->inuse = 1;
1601 sti();
1602 continue;
1603 }
1604
1605
1606
1607
1608
1609 if (skb_peek(&sk->rqueue) == NULL || before(sk->copied_seq+1, sk->rqueue->h.th->seq))
1610 {
1611 if(sk->debug)
1612 printk("Read wait sleep\n");
1613 interruptible_sleep_on(sk->sleep);
1614 if(sk->debug)
1615 printk("Read wait wakes\n");
1616 if (current->signal & ~current->blocked)
1617 {
1618 sti();
1619 if (copied)
1620 {
1621 DPRINTF((DBG_TCP, "tcp_read: returning %d\n",
1622 copied));
1623 return(copied);
1624 }
1625 return(-ERESTARTSYS);
1626 }
1627 }
1628 sk->inuse = 1;
1629 sti();
1630 DPRINTF((DBG_TCP, "tcp_read woke up. \n"));
1631
1632
1633
1634
1635
1636
1637 skb=skb_peek(&sk->rqueue);
1638
1639
1640
1641
1642 }
1643
1644
1645
1646
1647
1648
1649 offset = sk->copied_seq+1 - skb->h.th->seq;
1650
1651 if (skb->h.th->syn)
1652 offset--;
1653
1654 if (offset < skb->len)
1655 {
1656
1657
1658
1659
1660 if (skb->h.th->urg)
1661 {
1662 if (skb->urg_used)
1663 {
1664 sk->copied_seq += ntohs(skb->h.th->urg_ptr);
1665 offset += ntohs(skb->h.th->urg_ptr);
1666 if (offset >= skb->len)
1667 {
1668 skb->used = 1;
1669 skb =(struct sk_buff *)skb->next;
1670 continue;
1671 }
1672 }
1673 else
1674 {
1675 release_sock(sk);
1676 if (copied)
1677 return(copied);
1678
1679
1680
1681
1682
1683
1684 send_sig(SIGURG, current, 0);
1685 return(-EINTR);
1686 }
1687 }
1688
1689
1690
1691
1692 used = min(skb->len - offset, len);
1693
1694
1695
1696 memcpy_tofs(to,((unsigned char *)skb->h.th) +
1697 skb->h.th->doff*4 + offset, used);
1698 copied += used;
1699 len -= used;
1700 to += used;
1701
1702
1703
1704
1705
1706 if (!(flags & MSG_PEEK))
1707 sk->copied_seq += used;
1708
1709
1710
1711
1712
1713
1714
1715 if (!(flags & MSG_PEEK) && (!skb->h.th->urg || skb->urg_used) && (used + offset >= skb->len))
1716 skb->used = 1;
1717
1718
1719
1720
1721
1722
1723 if ((skb->h.th->psh && type) || skb->h.th->urg)
1724 {
1725 break;
1726 }
1727 }
1728 else
1729 {
1730
1731
1732 skb->used = 1;
1733 }
1734
1735
1736
1737
1738
1739
1740 skb =(struct sk_buff *)skb->next;
1741 }
1742
1743
1744
1745
1746
1747
1748 cleanup_rbuf(sk);
1749 release_sock(sk);
1750 DPRINTF((DBG_TCP, "tcp_read: returning %d\n", copied));
1751 if (copied == 0 && nonblock)
1752 return(-EAGAIN);
1753 return(copied);
1754 }
1755
1756
1757
1758
1759
1760
1761 static int tcp_read(struct sock *sk, unsigned char *to,
1762 int len, int nonblock, unsigned flags)
1763 {
1764 return(tcp_read_data(1,sk,to,len,nonblock,flags));
1765 }
1766
1767
1768
1769
1770
1771
1772
1773 void tcp_shutdown(struct sock *sk, int how)
1774 {
1775 struct sk_buff *buff;
1776 struct tcphdr *t1, *th;
1777 struct proto *prot;
1778 int tmp;
1779 struct device *dev = NULL;
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789 if (sk->state == TCP_FIN_WAIT1 || sk->state == TCP_FIN_WAIT2)
1790 return;
1791 if (!(how & SEND_SHUTDOWN))
1792 return;
1793 sk->inuse = 1;
1794
1795
1796 if (sk->send_tmp)
1797 tcp_send_partial(sk);
1798
1799 prot =(struct proto *)sk->prot;
1800 th =(struct tcphdr *)&sk->dummy_th;
1801 release_sock(sk);
1802 buff = (struct sk_buff *) prot->wmalloc(sk, MAX_RESET_SIZE,1 , GFP_KERNEL);
1803
1804 if (buff == NULL)
1805 return;
1806
1807 sk->inuse = 1;
1808
1809 DPRINTF((DBG_TCP, "tcp_shutdown_send buff = %X\n", buff));
1810 buff->mem_addr = buff;
1811 buff->mem_len = MAX_RESET_SIZE;
1812 buff->sk = sk;
1813 buff->len = sizeof(*t1);
1814 t1 =(struct tcphdr *)(buff + 1);
1815
1816
1817 tmp = prot->build_header(buff,sk->saddr, sk->daddr, &dev,
1818 IPPROTO_TCP, sk->opt,
1819 sizeof(struct tcphdr),
1820 sk->ip_ttl,sk->ip_tos
1821 );
1822 if (tmp < 0)
1823 {
1824 buff->free=1;
1825 prot->wfree(sk,buff->mem_addr, buff->mem_len);
1826 release_sock(sk);
1827 DPRINTF((DBG_TCP, "Unable to build header for fin.\n"));
1828 return;
1829 }
1830
1831 t1 =(struct tcphdr *)((char *)t1 +tmp);
1832 buff ->len += tmp;
1833 buff->dev = dev;
1834 memcpy(t1, th, sizeof(*t1));
1835 t1->seq = ntohl(sk->send_seq);
1836 sk->send_seq++;
1837 buff->h.seq = sk->send_seq;
1838 t1->ack = 1;
1839 t1->ack_seq = ntohl(sk->acked_seq);
1840 t1->window = ntohs(sk->prot->rspace(sk));
1841 t1->fin = 1;
1842 t1->rst = 0;
1843 t1->doff = sizeof(*t1)/4;
1844 tcp_send_check(t1, sk->saddr, sk->daddr, sizeof(*t1), sk);
1845
1846
1847
1848
1849
1850
1851 if (sk->wback != NULL)
1852 {
1853 buff->free=0;
1854 buff->next = NULL;
1855 sk->wback->next = buff;
1856 sk->wback = buff;
1857 buff->magic = TCP_WRITE_QUEUE_MAGIC;
1858 }
1859 else
1860 {
1861 sk->prot->queue_xmit(sk, dev, buff, 0);
1862 }
1863
1864 if (sk->state == TCP_ESTABLISHED)
1865 sk->state = TCP_FIN_WAIT1;
1866 else
1867 sk->state = TCP_FIN_WAIT2;
1868
1869 release_sock(sk);
1870 }
1871
1872
1873 static int tcp_recvfrom(struct sock *sk, unsigned char *to,
1874 int to_len, int nonblock, unsigned flags,
1875 struct sockaddr_in *addr, int *addr_len)
1876 {
1877 struct sockaddr_in sin;
1878 int len;
1879 int err;
1880 int result;
1881
1882
1883
1884
1885
1886
1887
1888 err = verify_area(VERIFY_WRITE,addr_len,sizeof(long));
1889 if(err)
1890 return err;
1891 len = get_fs_long(addr_len);
1892 if(len > sizeof(sin))
1893 len = sizeof(sin);
1894 err=verify_area(VERIFY_WRITE, addr, len);
1895 if(err)
1896 return err;
1897
1898 result=tcp_read_data(1,sk, to, to_len, nonblock, flags);
1899
1900 if (result < 0)
1901 return(result);
1902
1903 sin.sin_family = AF_INET;
1904 sin.sin_port = sk->dummy_th.dest;
1905 sin.sin_addr.s_addr = sk->daddr;
1906
1907 memcpy_tofs(addr, &sin, len);
1908 put_fs_long(len, addr_len);
1909 return(result);
1910 }
1911
1912
1913
1914
1915
1916
1917 static void tcp_reset(unsigned long saddr, unsigned long daddr, struct tcphdr *th,
1918 struct proto *prot, struct options *opt, struct device *dev)
1919 {
1920 struct sk_buff *buff;
1921 struct tcphdr *t1;
1922 int tmp;
1923
1924
1925
1926
1927
1928 buff = (struct sk_buff *) prot->wmalloc(NULL, MAX_RESET_SIZE, 1, GFP_ATOMIC);
1929 if (buff == NULL)
1930 return;
1931
1932 DPRINTF((DBG_TCP, "tcp_reset buff = %X\n", buff));
1933 buff->mem_addr = buff;
1934 buff->mem_len = MAX_RESET_SIZE;
1935 buff->len = sizeof(*t1);
1936 buff->sk = NULL;
1937 buff->dev = dev;
1938
1939 t1 =(struct tcphdr *)(buff + 1);
1940
1941
1942 tmp = prot->build_header(buff, saddr, daddr, &dev, IPPROTO_TCP, opt,
1943 sizeof(struct tcphdr),255, IPTOS_RELIABILITY);
1944 if (tmp < 0)
1945 {
1946 buff->free = 1;
1947 prot->wfree(NULL, buff->mem_addr, buff->mem_len);
1948 return;
1949 }
1950 t1 =(struct tcphdr *)((char *)t1 +tmp);
1951 buff->len += tmp;
1952 memcpy(t1, th, sizeof(*t1));
1953
1954
1955 t1->dest = th->source;
1956 t1->source = th->dest;
1957 t1->rst = 1;
1958 t1->window = 0;
1959
1960
1961
1962
1963
1964 if(th->ack)
1965 {
1966 t1->ack=0;
1967 t1->seq=th->ack_seq;
1968 t1->ack_seq=0;
1969 }
1970 else
1971 {
1972 t1->ack=1;
1973 if(!th->syn)
1974 t1->ack_seq=htonl(th->seq);
1975 else
1976 t1->ack_seq=htonl(th->seq+1);
1977 t1->seq=0;
1978 }
1979
1980 t1->syn = 0;
1981 t1->urg = 0;
1982 t1->fin = 0;
1983 t1->psh = 0;
1984 t1->doff = sizeof(*t1)/4;
1985 tcp_send_check(t1, saddr, daddr, sizeof(*t1), NULL);
1986 prot->queue_xmit(NULL, dev, buff, 1);
1987 }
1988
1989
1990
1991
1992
1993
1994 static void tcp_options(struct sock *sk, struct tcphdr *th)
1995 {
1996 unsigned char *ptr;
1997 int length=(th->doff*4)-sizeof(struct tcphdr);
1998 int mtuset=0;
1999
2000 ptr = (unsigned char *)(th + 1);
2001
2002 while(length>0)
2003 {
2004 int opcode=*ptr++;
2005 int opsize=*ptr++;
2006 switch(opcode)
2007 {
2008 case TCPOPT_EOL:
2009 return;
2010 case TCPOPT_NOP:
2011 length-=2;
2012 continue;
2013
2014 default:
2015 if(opsize<=2)
2016 return;
2017 switch(opcode)
2018 {
2019 case TCPOPT_MSS:
2020 if(opsize==4)
2021 {
2022 sk->mtu=min(sk->mtu,ntohs(*(unsigned short *)ptr));
2023 mtuset=1;
2024 }
2025 break;
2026
2027
2028
2029 }
2030 ptr+=opsize-2;
2031 length-=opsize;
2032 }
2033 }
2034
2035 if (!mtuset)
2036 {
2037 sk->mtu = min(sk->mtu, 576 - HEADER_SIZE);
2038 return;
2039 }
2040 }
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050 static void tcp_conn_request(struct sock *sk, struct sk_buff *skb,
2051 unsigned long daddr, unsigned long saddr,
2052 struct options *opt, struct device *dev)
2053 {
2054 struct sk_buff *buff;
2055 struct tcphdr *t1;
2056 unsigned char *ptr;
2057 struct sock *newsk;
2058 struct tcphdr *th;
2059 int tmp;
2060
2061 DPRINTF((DBG_TCP, "tcp_conn_request(sk = %X, skb = %X, daddr = %X, sadd4= %X, \n"
2062 " opt = %X, dev = %X)\n",
2063 sk, skb, daddr, saddr, opt, dev));
2064
2065 th = skb->h.th;
2066
2067
2068 if (!sk->dead)
2069 {
2070 sk->data_ready(sk,0);
2071 }
2072 else
2073 {
2074 DPRINTF((DBG_TCP, "tcp_conn_request on dead socket\n"));
2075 tcp_reset(daddr, saddr, th, sk->prot, opt, dev);
2076 kfree_skb(skb, FREE_READ);
2077 return;
2078 }
2079
2080
2081
2082
2083
2084
2085 if (sk->ack_backlog >= sk->max_ack_backlog)
2086 {
2087 kfree_skb(skb, FREE_READ);
2088 return;
2089 }
2090
2091
2092
2093
2094
2095
2096
2097
2098 newsk = (struct sock *) kmalloc(sizeof(struct sock), GFP_ATOMIC);
2099 if (newsk == NULL)
2100 {
2101
2102 kfree_skb(skb, FREE_READ);
2103 return;
2104 }
2105
2106 DPRINTF((DBG_TCP, "newsk = %X\n", newsk));
2107 memcpy((void *)newsk,(void *)sk, sizeof(*newsk));
2108 newsk->wback = NULL;
2109 newsk->wfront = NULL;
2110 newsk->rqueue = NULL;
2111 newsk->send_head = NULL;
2112 newsk->send_tail = NULL;
2113 newsk->back_log = NULL;
2114 newsk->rtt = TCP_CONNECT_TIME;
2115 newsk->mdev = 0;
2116 newsk->backoff = 0;
2117 newsk->blog = 0;
2118 newsk->intr = 0;
2119 newsk->proc = 0;
2120 newsk->done = 0;
2121 newsk->send_tmp = NULL;
2122 newsk->pair = NULL;
2123 newsk->wmem_alloc = 0;
2124 newsk->rmem_alloc = 0;
2125
2126 newsk->max_unacked = MAX_WINDOW - TCP_WINDOW_DIFF;
2127
2128 newsk->err = 0;
2129 newsk->shutdown = 0;
2130 newsk->ack_backlog = 0;
2131 newsk->acked_seq = skb->h.th->seq+1;
2132 newsk->fin_seq = skb->h.th->seq;
2133 newsk->copied_seq = skb->h.th->seq;
2134 newsk->state = TCP_SYN_RECV;
2135 newsk->timeout = 0;
2136 newsk->send_seq = jiffies * SEQ_TICK - seq_offset;
2137 newsk->rcv_ack_seq = newsk->send_seq;
2138 newsk->urg =0;
2139 newsk->retransmits = 0;
2140 newsk->destroy = 0;
2141 newsk->timer.data = (unsigned long)newsk;
2142 newsk->timer.function = &net_timer;
2143 newsk->dummy_th.source = skb->h.th->dest;
2144 newsk->dummy_th.dest = skb->h.th->source;
2145
2146
2147 newsk->daddr = saddr;
2148 newsk->saddr = daddr;
2149
2150 put_sock(newsk->num,newsk);
2151 newsk->dummy_th.res1 = 0;
2152 newsk->dummy_th.doff = 6;
2153 newsk->dummy_th.fin = 0;
2154 newsk->dummy_th.syn = 0;
2155 newsk->dummy_th.rst = 0;
2156 newsk->dummy_th.psh = 0;
2157 newsk->dummy_th.ack = 0;
2158 newsk->dummy_th.urg = 0;
2159 newsk->dummy_th.res2 = 0;
2160 newsk->acked_seq = skb->h.th->seq + 1;
2161 newsk->copied_seq = skb->h.th->seq;
2162
2163
2164 newsk->ip_ttl=skb->ip_hdr->ttl;
2165 newsk->ip_tos=skb->ip_hdr->tos;
2166
2167 tcp_options(newsk,skb->h.th);
2168
2169 buff = (struct sk_buff *) newsk->prot->wmalloc(newsk, MAX_SYN_SIZE, 1, GFP_ATOMIC);
2170 if (buff == NULL)
2171 {
2172 sk->err = -ENOMEM;
2173 newsk->dead = 1;
2174 release_sock(newsk);
2175 kfree_skb(skb, FREE_READ);
2176 return;
2177 }
2178
2179 buff->mem_addr = buff;
2180 buff->mem_len = MAX_SYN_SIZE;
2181 buff->len = sizeof(struct tcphdr)+4;
2182 buff->sk = newsk;
2183
2184 t1 =(struct tcphdr *)(buff + 1);
2185
2186
2187 tmp = sk->prot->build_header(buff, newsk->saddr, newsk->daddr, &dev,
2188 IPPROTO_TCP, NULL, MAX_SYN_SIZE,newsk->ip_ttl,newsk->ip_tos);
2189
2190
2191 if (tmp < 0)
2192 {
2193 sk->err = tmp;
2194 buff->free=1;
2195 kfree_skb(buff,FREE_WRITE);
2196 newsk->dead = 1;
2197 release_sock(newsk);
2198 skb->sk = sk;
2199 kfree_skb(skb, FREE_READ);
2200 return;
2201 }
2202
2203
2204
2205
2206
2207 buff->len += tmp;
2208 t1 =(struct tcphdr *)((char *)t1 +tmp);
2209
2210 memcpy(t1, skb->h.th, sizeof(*t1));
2211 buff->h.seq = newsk->send_seq;
2212
2213
2214 t1->dest = skb->h.th->source;
2215 t1->source = newsk->dummy_th.source;
2216 t1->seq = ntohl(newsk->send_seq++);
2217 t1->ack = 1;
2218 newsk->window = 4096;
2219 t1->window = ntohs(newsk->window);
2220 t1->res1 = 0;
2221 t1->res2 = 0;
2222 t1->rst = 0;
2223 t1->urg = 0;
2224 t1->psh = 0;
2225 t1->syn = 1;
2226 t1->ack_seq = ntohl(skb->h.th->seq+1);
2227 t1->doff = sizeof(*t1)/4+1;
2228
2229 ptr =(unsigned char *)(t1+1);
2230 ptr[0] = 2;
2231 ptr[1] = 4;
2232 ptr[2] =((dev->mtu - HEADER_SIZE) >> 8) & 0xff;
2233 ptr[3] =(dev->mtu - HEADER_SIZE) & 0xff;
2234
2235 tcp_send_check(t1, daddr, saddr, sizeof(*t1)+4, newsk);
2236 newsk->prot->queue_xmit(newsk, dev, buff, 0);
2237
2238 reset_timer(newsk, TIME_WRITE , TCP_CONNECT_TIME);
2239 skb->sk = newsk;
2240
2241
2242 sk->rmem_alloc -= skb->mem_len;
2243 newsk->rmem_alloc += skb->mem_len;
2244
2245 skb_queue_tail(&sk->rqueue,skb);
2246 sk->ack_backlog++;
2247 release_sock(newsk);
2248 }
2249
2250
2251
2252
2253
2254 static void tcp_close(struct sock *sk, int timeout)
2255 {
2256 struct sk_buff *buff;
2257 int need_reset = 0;
2258 struct tcphdr *t1, *th;
2259 struct proto *prot;
2260 struct device *dev=NULL;
2261 int tmp;
2262
2263
2264
2265
2266
2267 DPRINTF((DBG_TCP, "tcp_close((struct sock *)%X, %d)\n",sk, timeout));
2268 sk->inuse = 1;
2269 sk->keepopen = 1;
2270 sk->shutdown = SHUTDOWN_MASK;
2271
2272
2273
2274 if (skb_peek(&sk->rqueue) != NULL)
2275 {
2276 struct sk_buff *skb;
2277 if(sk->debug)
2278 printk("Clean rcv queue\n");
2279 while((skb=skb_dequeue(&sk->rqueue))!=NULL)
2280 {
2281 if(skb->len > 0 && after(skb->h.th->seq + skb->len + 1 , sk->copied_seq))
2282 need_reset = 1;
2283 kfree_skb(skb, FREE_READ);
2284 }
2285 if(sk->debug)
2286 printk("Cleaned.\n");
2287 }
2288 sk->rqueue = NULL;
2289
2290
2291 if (sk->send_tmp)
2292 {
2293 tcp_send_partial(sk);
2294 }
2295
2296 switch(sk->state)
2297 {
2298 case TCP_FIN_WAIT1:
2299 case TCP_FIN_WAIT2:
2300 case TCP_LAST_ACK:
2301
2302 reset_timer(sk, TIME_CLOSE, 4 * sk->rtt);
2303 if (timeout)
2304 tcp_time_wait(sk);
2305 release_sock(sk);
2306 if (!sk->dead)
2307 sk->state_change(sk);
2308 return;
2309 case TCP_TIME_WAIT:
2310 if (timeout)
2311 {
2312 sk->state = TCP_CLOSE;
2313 }
2314 release_sock(sk);
2315 if (!sk->dead)
2316 sk->state_change(sk);
2317 return;
2318 case TCP_LISTEN:
2319 sk->state = TCP_CLOSE;
2320 release_sock(sk);
2321 if (!sk->dead)
2322 sk->state_change(sk);
2323 return;
2324 case TCP_CLOSE:
2325 release_sock(sk);
2326 if (!sk->dead)
2327 sk->state_change(sk);
2328 return;
2329 case TCP_CLOSE_WAIT:
2330 case TCP_ESTABLISHED:
2331 case TCP_SYN_SENT:
2332 case TCP_SYN_RECV:
2333 prot =(struct proto *)sk->prot;
2334 th =(struct tcphdr *)&sk->dummy_th;
2335 buff = (struct sk_buff *) prot->wmalloc(sk, MAX_FIN_SIZE, 1, GFP_ATOMIC);
2336 if (buff == NULL)
2337 {
2338
2339
2340
2341 release_sock(sk);
2342 if (sk->state != TCP_CLOSE_WAIT)
2343 sk->state = TCP_ESTABLISHED;
2344 reset_timer(sk, TIME_CLOSE, 100);
2345 return;
2346 }
2347 buff->mem_addr = buff;
2348 buff->mem_len = MAX_FIN_SIZE;
2349 buff->sk = sk;
2350 buff->free = 1;
2351 buff->len = sizeof(*t1);
2352 t1 =(struct tcphdr *)(buff + 1);
2353
2354
2355 tmp = prot->build_header(buff,sk->saddr, sk->daddr, &dev,
2356 IPPROTO_TCP, sk->opt,
2357 sizeof(struct tcphdr),sk->ip_ttl,sk->ip_tos);
2358
2359 if (tmp < 0)
2360 {
2361 kfree_skb(buff,FREE_WRITE);
2362 DPRINTF((DBG_TCP, "Unable to build header for fin.\n"));
2363 release_sock(sk);
2364 if (!sk->dead)
2365 sk->state_change(sk);
2366 return;
2367 }
2368
2369 t1 =(struct tcphdr *)((char *)t1 +tmp);
2370 buff ->len += tmp;
2371 buff->dev = dev;
2372 memcpy(t1, th, sizeof(*t1));
2373 t1->seq = ntohl(sk->send_seq);
2374 sk->send_seq++;
2375 buff->h.seq = sk->send_seq;
2376 t1->ack = 1;
2377
2378
2379 sk->delay_acks = 0;
2380 t1->ack_seq = ntohl(sk->acked_seq);
2381 t1->window = ntohs(sk->prot->rspace(sk));
2382 t1->fin = 1;
2383 t1->rst = need_reset;
2384 t1->doff = sizeof(*t1)/4;
2385 tcp_send_check(t1, sk->saddr, sk->daddr, sizeof(*t1), sk);
2386
2387 if (sk->wfront == NULL)
2388 {
2389 prot->queue_xmit(sk, dev, buff, 0);
2390 }
2391 else
2392 {
2393 buff->free=0;
2394 reset_timer(sk, TIME_WRITE,backoff(sk->backoff) * (2 * sk->mdev + sk->rtt));
2395 buff->next = NULL;
2396 if (sk->wback == NULL)
2397 {
2398 sk->wfront=buff;
2399 }
2400 else
2401 {
2402 sk->wback->next = buff;
2403 }
2404 sk->wback = buff;
2405 buff->magic = TCP_WRITE_QUEUE_MAGIC;
2406 }
2407
2408 if (sk->state == TCP_CLOSE_WAIT)
2409 {
2410 sk->state = TCP_FIN_WAIT2;
2411 }
2412 else
2413 {
2414 sk->state = TCP_FIN_WAIT1;
2415 }
2416 break;
2417 }
2418 if (!sk->dead)
2419 sk->state_change(sk);
2420 release_sock(sk);
2421 }
2422
2423
2424
2425
2426
2427
2428
2429 static void tcp_write_xmit(struct sock *sk)
2430 {
2431 struct sk_buff *skb;
2432
2433 DPRINTF((DBG_TCP, "tcp_write_xmit(sk=%X)\n", sk));
2434
2435
2436
2437 if(sk->zapped)
2438 return;
2439
2440 while(sk->wfront != NULL &&
2441 before(sk->wfront->h.seq, sk->window_seq) &&
2442 sk->packets_out < sk->cong_window)
2443 {
2444 skb = sk->wfront;
2445 IS_SKB(skb);
2446 sk->wfront =(struct sk_buff *)skb->next;
2447 if (sk->wfront == NULL)
2448 sk->wback = NULL;
2449 skb->next = NULL;
2450 if (skb->magic != TCP_WRITE_QUEUE_MAGIC)
2451 {
2452 printk("tcp.c skb with bad magic(%X) on write queue. Squashing "
2453 "queue\n", skb->magic);
2454 sk->wfront = NULL;
2455 sk->wback = NULL;
2456 return;
2457 }
2458 skb->magic = 0;
2459 DPRINTF((DBG_TCP, "Sending a packet.\n"));
2460
2461
2462 if (before(skb->h.seq, sk->rcv_ack_seq +1))
2463 {
2464 sk->retransmits = 0;
2465 kfree_skb(skb, FREE_WRITE);
2466 if (!sk->dead)
2467 sk->write_space(sk);
2468 }
2469 else
2470 {
2471 sk->prot->queue_xmit(sk, skb->dev, skb, skb->free);
2472 }
2473 }
2474 }
2475
2476
2477
2478
2479
2480
2481
2482 void sort_send(struct sock *sk)
2483 {
2484 struct sk_buff *list = NULL;
2485 struct sk_buff *skb,*skb2,*skb3;
2486
2487 for (skb = sk->send_head; skb != NULL; skb = skb2)
2488 {
2489 skb2 = (struct sk_buff *)skb->link3;
2490 if (list == NULL || before (skb2->h.seq, list->h.seq))
2491 {
2492 skb->link3 = list;
2493 sk->send_tail = skb;
2494 list = skb;
2495 }
2496 else
2497 {
2498 for (skb3 = list; ; skb3 = (struct sk_buff *)skb3->link3)
2499 {
2500 if (skb3->link3 == NULL || before(skb->h.seq, skb3->link3->h.seq))
2501 {
2502 skb->link3 = skb3->link3;
2503 skb3->link3 = skb;
2504 if (skb->link3 == NULL)
2505 sk->send_tail = skb;
2506 break;
2507 }
2508 }
2509 }
2510 }
2511 sk->send_head = list;
2512 }
2513
2514
2515
2516
2517
2518
2519 static int tcp_ack(struct sock *sk, struct tcphdr *th, unsigned long saddr, int len)
2520 {
2521 unsigned long ack;
2522 int flag = 0;
2523
2524 if(sk->zapped)
2525 return(1);
2526
2527 ack = ntohl(th->ack_seq);
2528 DPRINTF((DBG_TCP, "tcp_ack ack=%d, window=%d, "
2529 "sk->rcv_ack_seq=%d, sk->window_seq = %d\n",
2530 ack, ntohs(th->window), sk->rcv_ack_seq, sk->window_seq));
2531
2532 if (after(ack, sk->send_seq+1) || before(ack, sk->rcv_ack_seq-1))
2533 {
2534 if (after(ack, sk->send_seq) || (sk->state != TCP_ESTABLISHED && sk->state != TCP_CLOSE_WAIT))
2535 {
2536 return(0);
2537 }
2538 if (sk->keepopen)
2539 {
2540 reset_timer(sk, TIME_KEEPOPEN, TCP_TIMEOUT_LEN);
2541 }
2542 return(1);
2543 }
2544
2545 if (len != th->doff*4)
2546 flag |= 1;
2547
2548
2549 if (after(sk->window_seq, ack+ntohs(th->window)))
2550 {
2551
2552
2553
2554
2555
2556
2557
2558 struct sk_buff *skb;
2559 struct sk_buff *skb2;
2560 struct sk_buff *wskb = NULL;
2561
2562 skb2 = sk->send_head;
2563 sk->send_head = NULL;
2564 sk->send_tail = NULL;
2565
2566 flag |= 4;
2567
2568 sk->window_seq = ack + ntohs(th->window);
2569 cli();
2570 while (skb2 != NULL)
2571 {
2572 skb = skb2;
2573 skb2 = (struct sk_buff *)skb->link3;
2574 skb->link3 = NULL;
2575 if (after(skb->h.seq, sk->window_seq))
2576 {
2577 if (sk->packets_out > 0)
2578 sk->packets_out--;
2579
2580
2581
2582
2583 if (skb->next != NULL)
2584 {
2585 skb_unlink(skb);
2586 }
2587
2588 skb->magic = TCP_WRITE_QUEUE_MAGIC;
2589 if (wskb == NULL)
2590 {
2591 skb->next = sk->wfront;
2592 sk->wfront = skb;
2593 }
2594 else
2595 {
2596 skb->next = wskb->next;
2597 wskb->next = skb;
2598 }
2599 if (sk->wback == wskb)
2600 sk->wback = skb;
2601 wskb = skb;
2602 }
2603 else
2604 {
2605 if (sk->send_head == NULL)
2606 {
2607 sk->send_head = skb;
2608 sk->send_tail = skb;
2609 }
2610 else
2611 {
2612 sk->send_tail->link3 = skb;
2613 sk->send_tail = skb;
2614 }
2615 skb->link3 = NULL;
2616 }
2617 }
2618 sti();
2619 }
2620
2621 if (sk->send_tail == NULL || sk->send_head == NULL)
2622 {
2623 sk->send_head = NULL;
2624 sk->send_tail = NULL;
2625 sk->packets_out= 0;
2626 }
2627
2628 sk->window_seq = ack + ntohs(th->window);
2629
2630
2631 if (sk->cong_window < 2048 && ack != sk->rcv_ack_seq)
2632 {
2633 if (sk->exp_growth)
2634 sk->cong_window *= 2;
2635 else
2636 sk->cong_window++;
2637 }
2638
2639 DPRINTF((DBG_TCP, "tcp_ack: Updating rcv ack sequence.\n"));
2640 sk->rcv_ack_seq = ack;
2641
2642
2643 while(sk->send_head != NULL)
2644 {
2645
2646 if (sk->send_head->link3 && after(sk->send_head->h.seq, sk->send_head->link3->h.seq))
2647 {
2648 printk("INET: tcp.c: *** bug send_list out of order.\n");
2649 sort_send(sk);
2650 }
2651
2652 if (before(sk->send_head->h.seq, ack+1))
2653 {
2654 struct sk_buff *oskb;
2655
2656 sk->retransmits = 0;
2657
2658
2659 if (sk->packets_out > 0)
2660 sk->packets_out --;
2661 DPRINTF((DBG_TCP, "skb=%X skb->h.seq = %d acked ack=%d\n",
2662 sk->send_head, sk->send_head->h.seq, ack));
2663
2664
2665 if (!sk->dead)
2666 sk->write_space(sk);
2667
2668 oskb = sk->send_head;
2669
2670
2671
2672 if (sk->retransmits == 0 && !(flag&2))
2673 {
2674 long abserr, rtt = jiffies - oskb->when;
2675
2676 if (sk->state == TCP_SYN_SENT || sk->state == TCP_SYN_RECV)
2677
2678 sk->rtt = rtt;
2679 else
2680 {
2681 abserr = (rtt > sk->rtt) ? rtt - sk->rtt : sk->rtt - rtt;
2682 sk->rtt = (7 * sk->rtt + rtt) >> 3;
2683 sk->mdev = (3 * sk->mdev + abserr) >> 2;
2684 }
2685 sk->backoff = 0;
2686 }
2687 flag |= (2|4);
2688
2689
2690
2691 if (sk->rtt < 10)
2692 sk->rtt = 10;
2693
2694 if (sk->rtt > 12000)
2695 sk->rtt = 12000;
2696
2697 cli();
2698
2699 oskb = sk->send_head;
2700 IS_SKB(oskb);
2701 sk->send_head =(struct sk_buff *)oskb->link3;
2702 if (sk->send_head == NULL)
2703 {
2704 sk->send_tail = NULL;
2705 }
2706
2707
2708 skb_unlink(oskb);
2709 sti();
2710 oskb->magic = 0;
2711 kfree_skb(oskb, FREE_WRITE);
2712 if (!sk->dead)
2713 sk->write_space(sk);
2714 }
2715 else
2716 {
2717 break;
2718 }
2719 }
2720
2721
2722
2723
2724
2725 if (sk->wfront != NULL)
2726 {
2727 if (after (sk->window_seq, sk->wfront->h.seq) && sk->packets_out < sk->cong_window)
2728 {
2729 flag |= 1;
2730 tcp_write_xmit(sk);
2731 }
2732 }
2733 else
2734 {
2735 if (sk->send_head == NULL && sk->ack_backlog == 0 && sk->state != TCP_TIME_WAIT && !sk->keepopen)
2736 {
2737 DPRINTF((DBG_TCP, "Nothing to do, going to sleep.\n"));
2738 if (!sk->dead)
2739 sk->write_space(sk);
2740
2741 if (sk->keepopen)
2742 reset_timer(sk, TIME_KEEPOPEN, TCP_TIMEOUT_LEN);
2743 else
2744 delete_timer(sk);
2745 }
2746 else
2747 {
2748 if (sk->state != (unsigned char) sk->keepopen)
2749 {
2750 reset_timer(sk, TIME_WRITE, backoff(sk->backoff) * (2 * sk->mdev + sk->rtt));
2751 }
2752 if (sk->state == TCP_TIME_WAIT)
2753 {
2754 reset_timer(sk, TIME_CLOSE, TCP_TIMEWAIT_LEN);
2755 }
2756 }
2757 }
2758
2759
2760
2761
2762
2763 if (sk->packets_out == 0 && sk->send_tmp != NULL && sk->wfront == NULL && sk->send_head == NULL)
2764 {
2765 flag |= 1;
2766 tcp_send_partial(sk);
2767 }
2768
2769
2770 if (sk->state == TCP_TIME_WAIT)
2771 {
2772
2773
2774
2775
2776
2777 if (sk->rcv_ack_seq == sk->send_seq && sk->acked_seq == sk->fin_seq)
2778 {
2779 flag |= 1;
2780 sk->state = TCP_CLOSE;
2781 sk->shutdown = SHUTDOWN_MASK;
2782 if (!sk->dead)
2783 sk->state_change(sk);
2784 }
2785 }
2786
2787 if (sk->state == TCP_LAST_ACK || sk->state == TCP_FIN_WAIT2)
2788 {
2789 if (sk->rcv_ack_seq == sk->send_seq)
2790 {
2791 flag |= 1;
2792 if (sk->acked_seq != sk->fin_seq)
2793 {
2794 tcp_time_wait(sk);
2795 }
2796 else
2797 {
2798 DPRINTF((DBG_TCP, "tcp_ack closing socket - %X\n", sk));
2799 tcp_send_ack(sk->send_seq, sk->acked_seq, sk,
2800 th, sk->daddr);
2801 sk->shutdown = SHUTDOWN_MASK;
2802 sk->state = TCP_CLOSE;
2803 }
2804 }
2805 if (!sk->dead)
2806 sk->state_change(sk);
2807
2808 }
2809
2810 if (((!flag) || (flag&4)) && sk->send_head != NULL && (sk->send_head->when + backoff(sk->backoff) * (2 * sk->mdev + sk->rtt) < jiffies))
2811 {
2812 sk->exp_growth = 0;
2813 ip_retransmit(sk, 0);
2814 }
2815
2816 DPRINTF((DBG_TCP, "leaving tcp_ack\n"));
2817 return(1);
2818 }
2819
2820
2821
2822
2823
2824
2825
2826
2827 static int tcp_data(struct sk_buff *skb, struct sock *sk,
2828 unsigned long saddr, unsigned short len)
2829 {
2830 struct sk_buff *skb1, *skb2;
2831 struct tcphdr *th;
2832 int dup_dumped=0;
2833
2834 th = skb->h.th;
2835 print_th(th);
2836
2837 skb->len = len -(th->doff*4);
2838
2839 DPRINTF((DBG_TCP, "tcp_data len = %d sk = %X:\n", skb->len, sk));
2840
2841 sk->bytes_rcv += skb->len;
2842 if (skb->len == 0 && !th->fin && !th->urg && !th->psh)
2843 {
2844
2845 if (!th->ack)
2846 tcp_send_ack(sk->send_seq, sk->acked_seq,sk, th, saddr);
2847 kfree_skb(skb, FREE_READ);
2848 return(0);
2849 }
2850
2851 if (sk->shutdown & RCV_SHUTDOWN)
2852 {
2853 sk->acked_seq = th->seq + skb->len + th->syn + th->fin;
2854 tcp_reset(sk->saddr, sk->daddr, skb->h.th, sk->prot, NULL, skb->dev);
2855 sk->state = TCP_CLOSE;
2856 sk->err = EPIPE;
2857 sk->shutdown = SHUTDOWN_MASK;
2858 DPRINTF((DBG_TCP, "tcp_data: closing socket - %X\n", sk));
2859 kfree_skb(skb, FREE_READ);
2860 if (!sk->dead)
2861 sk->state_change(sk);
2862 return(0);
2863 }
2864
2865
2866
2867
2868
2869
2870
2871
2872
2873
2874
2875
2876
2877 if (sk->rqueue == NULL)
2878 {
2879 DPRINTF((DBG_TCP, "tcp_data: skb = %X:\n", skb));
2880 skb_queue_head(&sk->rqueue,skb);
2881 skb1= NULL;
2882 }
2883 else
2884 {
2885 DPRINTF((DBG_TCP, "tcp_data adding to chain sk = %X:\n", sk));
2886 for(skb1=sk->rqueue->prev; ; skb1 =(struct sk_buff *)skb1->prev)
2887 {
2888 if(sk->debug)
2889 {
2890 printk("skb1=%p :", skb1);
2891 printk("skb1->h.th->seq = %ld: ", skb1->h.th->seq);
2892 printk("skb->h.th->seq = %ld\n",skb->h.th->seq);
2893 printk("copied_seq = %ld acked_seq = %ld\n", sk->copied_seq,
2894 sk->acked_seq);
2895 }
2896 if (th->seq==skb1->h.th->seq && skb->len>= skb1->len)
2897 {
2898 skb_append(skb1,skb);
2899 skb_unlink(skb1);
2900 kfree_skb(skb1,FREE_READ);
2901 dup_dumped=1;
2902 skb1=NULL;
2903 break;
2904 }
2905 if (after(th->seq+1, skb1->h.th->seq))
2906 {
2907 skb_append(skb1,skb);
2908 break;
2909 }
2910 if (skb1 == sk->rqueue)
2911 {
2912 skb_queue_head(&sk->rqueue, skb);
2913 break;
2914 }
2915 }
2916 DPRINTF((DBG_TCP, "skb = %X:\n", skb));
2917 }
2918
2919 th->ack_seq = th->seq + skb->len;
2920
2921 if (th->syn)
2922 th->ack_seq++;
2923
2924 if (th->fin)
2925 th->ack_seq++;
2926
2927 if (before(sk->acked_seq, sk->copied_seq))
2928 {
2929 printk("*** tcp.c:tcp_data bug acked < copied\n");
2930 sk->acked_seq = sk->copied_seq;
2931 }
2932
2933
2934
2935 if ((!dup_dumped && (skb1 == NULL || skb1->acked)) || before(th->seq, sk->acked_seq+1))
2936 {
2937 if (before(th->seq, sk->acked_seq+1))
2938 {
2939 if (after(th->ack_seq, sk->acked_seq))
2940 sk->acked_seq = th->ack_seq;
2941 skb->acked = 1;
2942
2943
2944 if (skb->h.th->fin)
2945 {
2946 if (!sk->dead)
2947 sk->state_change(sk);
2948 sk->shutdown |= RCV_SHUTDOWN;
2949 }
2950
2951 for(skb2 = (struct sk_buff *)skb->next;
2952 skb2 !=(struct sk_buff *) sk->rqueue;
2953 skb2 = (struct sk_buff *)skb2->next)
2954 {
2955 if (before(skb2->h.th->seq, sk->acked_seq+1))
2956 {
2957 if (after(skb2->h.th->ack_seq, sk->acked_seq))
2958 sk->acked_seq = skb2->h.th->ack_seq;
2959 skb2->acked = 1;
2960
2961
2962
2963
2964
2965 if (skb2->h.th->fin)
2966 {
2967 sk->shutdown |= RCV_SHUTDOWN;
2968 if (!sk->dead)
2969 sk->state_change(sk);
2970 }
2971
2972
2973 sk->ack_backlog = sk->max_ack_backlog;
2974 }
2975 else
2976 {
2977 break;
2978 }
2979 }
2980
2981
2982
2983
2984
2985 if (!sk->delay_acks || sk->ack_backlog >= sk->max_ack_backlog ||
2986 sk->bytes_rcv > sk->max_unacked || th->fin)
2987 {
2988
2989 }
2990 else
2991 {
2992 sk->ack_backlog++;
2993 if(sk->debug)
2994 printk("Ack queued.\n");
2995 reset_timer(sk, TIME_WRITE, TCP_ACK_TIME);
2996 }
2997 }
2998 }
2999
3000
3001
3002
3003
3004 if (!skb->acked)
3005 {
3006
3007
3008
3009
3010
3011 while (sk->prot->rspace(sk) < sk->mtu)
3012 {
3013 skb1 = skb_peek(&sk->rqueue);
3014 if (skb1 == NULL)
3015 {
3016 printk("INET: tcp.c:tcp_data memory leak detected.\n");
3017 break;
3018 }
3019
3020
3021 if (skb1->acked)
3022 {
3023 break;
3024 }
3025
3026 skb_unlink(skb1);
3027 kfree_skb(skb1, FREE_READ);
3028 }
3029 tcp_send_ack(sk->send_seq, sk->acked_seq, sk, th, saddr);
3030 sk->ack_backlog++;
3031 reset_timer(sk, TIME_WRITE, TCP_ACK_TIME);
3032 }
3033 else
3034 {
3035
3036 tcp_send_ack(sk->send_seq, sk->acked_seq, sk, th, saddr);
3037 }
3038
3039
3040 if (!sk->dead)
3041 {
3042 if(sk->debug)
3043 printk("Data wakeup.\n");
3044 sk->data_ready(sk,0);
3045 }
3046 else
3047 {
3048 DPRINTF((DBG_TCP, "data received on dead socket.\n"));
3049 }
3050
3051 if (sk->state == TCP_FIN_WAIT2 && sk->acked_seq == sk->fin_seq && sk->rcv_ack_seq == sk->send_seq)
3052 {
3053 DPRINTF((DBG_TCP, "tcp_data: entering last_ack state sk = %X\n", sk));
3054
3055
3056 sk->shutdown = SHUTDOWN_MASK;
3057 sk->state = TCP_LAST_ACK;
3058 if (!sk->dead)
3059 sk->state_change(sk);
3060 }
3061
3062 return(0);
3063 }
3064
3065
3066 static int tcp_urg(struct sock *sk, struct tcphdr *th, unsigned long saddr)
3067 {
3068 extern int kill_pg(int pg, int sig, int priv);
3069 extern int kill_proc(int pid, int sig, int priv);
3070
3071 if (!sk->dead)
3072 sk->data_ready(sk,0);
3073
3074 if (sk->urginline)
3075 {
3076 th->urg = 0;
3077 th->psh = 1;
3078 return(0);
3079 }
3080
3081 if (!sk->urg)
3082 {
3083
3084 if (sk->proc != 0)
3085 {
3086 if (sk->proc > 0)
3087 {
3088 kill_proc(sk->proc, SIGURG, 1);
3089 }
3090 else
3091 {
3092 kill_pg(-sk->proc, SIGURG, 1);
3093 }
3094 }
3095 }
3096 sk->urg++;
3097 return(0);
3098 }
3099
3100
3101
3102
3103
3104
3105 static int tcp_fin(struct sock *sk, struct tcphdr *th,
3106 unsigned long saddr, struct device *dev)
3107 {
3108 DPRINTF((DBG_TCP, "tcp_fin(sk=%X, th=%X, saddr=%X, dev=%X)\n",
3109 sk, th, saddr, dev));
3110
3111
3112 switch(sk->state)
3113 {
3114 case TCP_SYN_RECV:
3115 case TCP_SYN_SENT:
3116 case TCP_ESTABLISHED:
3117
3118 sk->fin_seq = th->seq+1;
3119 sk->state = TCP_CLOSE_WAIT;
3120 if (th->rst)
3121 sk->shutdown = SHUTDOWN_MASK;
3122 break;
3123
3124 case TCP_CLOSE_WAIT:
3125 break;
3126
3127 case TCP_FIN_WAIT2:
3128 tcp_time_wait(sk);
3129 break;
3130
3131 case TCP_FIN_WAIT1:
3132
3133 if(before(sk->send_seq,sk->rcv_ack_seq+1) && after(sk->send_seq,sk->rcv_ack_seq-1))
3134 tcp_time_wait(sk);
3135 else
3136 {
3137 sk->fin_seq = th->seq+1;
3138 sk->state = TCP_FIN_WAIT2;
3139 }
3140 break;
3141
3142 default:
3143 case TCP_TIME_WAIT:
3144 sk->state = TCP_LAST_ACK;
3145
3146
3147 reset_timer(sk, TIME_CLOSE, TCP_TIMEWAIT_LEN);
3148 if (!sk->dead)
3149 {
3150 sk->state_change(sk);
3151 }
3152 return(0);
3153 }
3154 sk->ack_backlog++;
3155 if (!sk->dead)
3156 {
3157 sk->state_change(sk);
3158 }
3159
3160 return(0);
3161 }
3162
3163
3164
3165
3166
3167
3168 static struct sock *tcp_accept(struct sock *sk, int flags)
3169 {
3170 struct sock *newsk;
3171 struct sk_buff *skb;
3172
3173 DPRINTF((DBG_TCP, "tcp_accept(sk=%X, flags=%X, addr=%s)\n",
3174 sk, flags, in_ntoa(sk->saddr)));
3175
3176
3177
3178
3179
3180
3181 if (sk->state != TCP_LISTEN)
3182 {
3183 sk->err = EINVAL;
3184 return(NULL);
3185 }
3186
3187
3188
3189 cli();
3190 sk->inuse = 1;
3191 while((skb = get_firstr(sk)) == NULL)
3192 {
3193
3194
3195
3196 if (flags & O_NONBLOCK)
3197 {
3198 sti();
3199 release_sock(sk);
3200 sk->err = EAGAIN;
3201 return(NULL);
3202 }
3203
3204 release_sock(sk);
3205 interruptible_sleep_on(sk->sleep);
3206 if (current->signal & ~current->blocked)
3207 {
3208 sti();
3209 sk->err = ERESTARTSYS;
3210 return(NULL);
3211 }
3212 sk->inuse = 1;
3213 }
3214 sti();
3215
3216
3217 newsk = skb->sk;
3218
3219 kfree_skb(skb, FREE_READ);
3220 sk->ack_backlog--;
3221 release_sock(sk);
3222 return(newsk);
3223 }
3224
3225
3226
3227
3228
3229
3230 static int tcp_connect(struct sock *sk, struct sockaddr_in *usin, int addr_len)
3231 {
3232 struct sk_buff *buff;
3233 struct sockaddr_in sin;
3234 struct device *dev=NULL;
3235 unsigned char *ptr;
3236 int tmp;
3237 struct tcphdr *t1;
3238 int err;
3239
3240 if (sk->state != TCP_CLOSE)
3241 return(-EISCONN);
3242
3243 if (addr_len < 8)
3244 return(-EINVAL);
3245
3246 err=verify_area(VERIFY_READ, usin, addr_len);
3247 if(err)
3248 return err;
3249
3250 memcpy_fromfs(&sin,usin, min(sizeof(sin), addr_len));
3251
3252 if (sin.sin_family && sin.sin_family != AF_INET)
3253 return(-EAFNOSUPPORT);
3254
3255 DPRINTF((DBG_TCP, "TCP connect daddr=%s\n", in_ntoa(sin.sin_addr.s_addr)));
3256
3257
3258
3259
3260
3261 if (chk_addr(sin.sin_addr.s_addr) == IS_BROADCAST)
3262 {
3263 DPRINTF((DBG_TCP, "TCP connection to broadcast address not allowed\n"));
3264 return(-ENETUNREACH);
3265 }
3266
3267 sk->inuse = 1;
3268 sk->daddr = sin.sin_addr.s_addr;
3269 sk->send_seq = jiffies * SEQ_TICK - seq_offset;
3270 sk->rcv_ack_seq = sk->send_seq -1;
3271 sk->err = 0;
3272 sk->dummy_th.dest = sin.sin_port;
3273 release_sock(sk);
3274
3275 buff = (struct sk_buff *) sk->prot->wmalloc(sk,MAX_SYN_SIZE,0, GFP_KERNEL);
3276 if (buff == NULL)
3277 {
3278 return(-ENOMEM);
3279 }
3280
3281 sk->inuse = 1;
3282 buff->mem_addr = buff;
3283 buff->mem_len = MAX_SYN_SIZE;
3284 buff->len = 24;
3285 buff->sk = sk;
3286 buff->free = 1;
3287 t1 = (struct tcphdr *)(buff + 1);
3288
3289
3290
3291 tmp = sk->prot->build_header(buff, sk->saddr, sk->daddr, &dev,IPPROTO_TCP, NULL, MAX_SYN_SIZE,
3292 sk->ip_ttl,sk->ip_tos);
3293
3294 if (tmp < 0)
3295 {
3296 sk->prot->wfree(sk, buff->mem_addr, buff->mem_len);
3297 release_sock(sk);
3298 return(-ENETUNREACH);
3299 }
3300 buff->len += tmp;
3301 t1 = (struct tcphdr *)((char *)t1 +tmp);
3302
3303 memcpy(t1,(void *)&(sk->dummy_th), sizeof(*t1));
3304 t1->seq = ntohl(sk->send_seq++);
3305 buff->h.seq = sk->send_seq;
3306 t1->ack = 0;
3307 t1->window = 2;
3308 t1->res1=0;
3309 t1->res2=0;
3310 t1->rst = 0;
3311 t1->urg = 0;
3312 t1->psh = 0;
3313 t1->syn = 1;
3314 t1->urg_ptr = 0;
3315 t1->doff = 6;
3316
3317
3318 ptr = (unsigned char *)(t1+1);
3319 ptr[0] = 2;
3320 ptr[1] = 4;
3321 ptr[2] = (dev->mtu- HEADER_SIZE) >> 8;
3322 ptr[3] = (dev->mtu- HEADER_SIZE) & 0xff;
3323 sk->mtu = dev->mtu - HEADER_SIZE;
3324 tcp_send_check(t1, sk->saddr, sk->daddr,
3325 sizeof(struct tcphdr) + 4, sk);
3326
3327
3328
3329
3330
3331 sk->state = TCP_SYN_SENT;
3332 sk->rtt = TCP_CONNECT_TIME;
3333 reset_timer(sk, TIME_WRITE, TCP_CONNECT_TIME);
3334 sk->retransmits = TCP_RETR2 - TCP_SYN_RETRIES;
3335
3336 sk->prot->queue_xmit(sk, dev, buff, 0);
3337
3338 release_sock(sk);
3339 return(0);
3340 }
3341
3342
3343
3344
3345
3346
3347 static int tcp_sequence(struct sock *sk, struct tcphdr *th, short len,
3348 struct options *opt, unsigned long saddr, struct device *dev)
3349 {
3350
3351
3352
3353
3354
3355
3356 DPRINTF((DBG_TCP, "tcp_sequence(sk=%X, th=%X, len = %d, opt=%d, saddr=%X)\n",
3357 sk, th, len, opt, saddr));
3358
3359
3360 if (between(th->seq, sk->acked_seq, sk->acked_seq + sk->window)||
3361 between(th->seq + len-(th->doff*4), sk->acked_seq + 1, sk->acked_seq + sk->window) ||
3362 (before(th->seq, sk->acked_seq) &&
3363 after(th->seq + len -(th->doff*4), sk->acked_seq + sk->window)))
3364 {
3365 return(1);
3366 }
3367 DPRINTF((DBG_TCP, "tcp_sequence: rejecting packet.\n"));
3368
3369
3370
3371
3372
3373
3374
3375
3376 if(sk->state==TCP_SYN_SENT||sk->state==TCP_SYN_RECV)
3377 {
3378 tcp_reset(sk->saddr,sk->daddr,th,sk->prot,NULL,dev);
3379 return(1);
3380 }
3381
3382
3383
3384
3385
3386
3387 if (after(th->seq, sk->acked_seq + sk->window))
3388 {
3389 if(!th->rst)
3390 tcp_send_ack(sk->send_seq, sk->acked_seq, sk, th, saddr);
3391 return(0);
3392 }
3393
3394
3395
3396
3397
3398 if (th->ack && len == (th->doff * 4) && after(th->seq, sk->acked_seq - 32767) && !th->fin && !th->syn)
3399 return(1);
3400
3401 if (!th->rst)
3402 {
3403
3404 tcp_send_ack(sk->send_seq, sk->acked_seq, sk, th, saddr);
3405 }
3406 return(0);
3407 }
3408
3409
3410
3411
3412
3413
3414
3415
3416
3417
3418 int tcp_rcv(struct sk_buff *skb, struct device *dev, struct options *opt,
3419 unsigned long daddr, unsigned short len,
3420 unsigned long saddr, int redo, struct inet_protocol * protocol)
3421 {
3422 struct tcphdr *th;
3423 struct sock *sk;
3424
3425 if (!skb)
3426 {
3427 DPRINTF((DBG_TCP, "tcp.c: tcp_rcv skb = NULL\n"));
3428 return(0);
3429 }
3430
3431 if (!dev)
3432 {
3433 DPRINTF((DBG_TCP, "tcp.c: tcp_rcv dev = NULL\n"));
3434 return(0);
3435 }
3436
3437 th = skb->h.th;
3438
3439
3440 sk = get_sock(&tcp_prot, th->dest, saddr, th->source, daddr);
3441 DPRINTF((DBG_TCP, "<<\n"));
3442 DPRINTF((DBG_TCP, "len = %d, redo = %d, skb=%X\n", len, redo, skb));
3443
3444
3445
3446
3447 if (sk!=NULL && sk->zapped)
3448 sk=NULL;
3449
3450 if (sk)
3451 {
3452 DPRINTF((DBG_TCP, "sk = %X:\n", sk));
3453 }
3454
3455 if (!redo)
3456 {
3457 if (tcp_check(th, len, saddr, daddr ))
3458 {
3459 skb->sk = NULL;
3460 DPRINTF((DBG_TCP, "packet dropped with bad checksum.\n"));
3461 if (inet_debug == DBG_SLIP)
3462 printk("\rtcp_rcv: bad checksum\n");
3463 kfree_skb(skb,FREE_READ);
3464
3465
3466
3467
3468 return(0);
3469 }
3470
3471
3472 if (sk == NULL)
3473 {
3474
3475
3476
3477 if (!th->rst)
3478 {
3479 th->seq = ntohl(th->seq);
3480
3481 tcp_reset(daddr, saddr, th, &tcp_prot, opt,dev);
3482 }
3483 skb->sk = NULL;
3484 kfree_skb(skb, FREE_READ);
3485 return(0);
3486 }
3487
3488 skb->len = len;
3489 skb->sk = sk;
3490 skb->acked = 0;
3491 skb->used = 0;
3492 skb->free = 0;
3493 skb->urg_used = 0;
3494 skb->saddr = daddr;
3495 skb->daddr = saddr;
3496
3497 th->seq = ntohl(th->seq);
3498
3499
3500 cli();
3501 if (sk->inuse)
3502 {
3503 if (sk->back_log == NULL)
3504 {
3505 sk->back_log = skb;
3506 skb->next = skb;
3507 skb->prev = skb;
3508 }
3509 else
3510 {
3511 skb->next = sk->back_log;
3512 skb->prev = sk->back_log->prev;
3513 skb->prev->next = skb;
3514 skb->next->prev = skb;
3515 }
3516 sti();
3517 return(0);
3518 }
3519 sk->inuse = 1;
3520 sti();
3521 }
3522 else
3523 {
3524 if (!sk)
3525 {
3526 DPRINTF((DBG_TCP, "tcp.c: tcp_rcv bug sk=NULL redo = 1\n"));
3527 return(0);
3528 }
3529 }
3530
3531
3532 if (!sk->prot)
3533 {
3534 DPRINTF((DBG_TCP, "tcp.c: tcp_rcv sk->prot = NULL \n"));
3535 return(0);
3536 }
3537
3538
3539 if (sk->rmem_alloc + skb->mem_len >= sk->rcvbuf)
3540 {
3541 skb->sk = NULL;
3542 DPRINTF((DBG_TCP, "dropping packet due to lack of buffer space.\n"));
3543 kfree_skb(skb, FREE_READ);
3544 release_sock(sk);
3545 return(0);
3546 }
3547
3548 sk->rmem_alloc += skb->mem_len;
3549
3550 DPRINTF((DBG_TCP, "About to do switch.\n"));
3551
3552
3553
3554 switch(sk->state)
3555 {
3556
3557
3558
3559
3560
3561
3562 case TCP_LAST_ACK:
3563 if (th->rst)
3564 {
3565 sk->zapped=1;
3566 sk->err = ECONNRESET;
3567 sk->state = TCP_CLOSE;
3568 sk->shutdown = SHUTDOWN_MASK;
3569 if (!sk->dead)
3570 {
3571 sk->state_change(sk);
3572 }
3573 kfree_skb(skb, FREE_READ);
3574 release_sock(sk);
3575 return(0);
3576 }
3577
3578 case TCP_ESTABLISHED:
3579 case TCP_CLOSE_WAIT:
3580 case TCP_FIN_WAIT1:
3581 case TCP_FIN_WAIT2:
3582 case TCP_TIME_WAIT:
3583 if (!tcp_sequence(sk, th, len, opt, saddr,dev))
3584 {
3585 if (inet_debug == DBG_SLIP)
3586 printk("\rtcp_rcv: not in seq\n");
3587 if(!th->rst)
3588 tcp_send_ack(sk->send_seq, sk->acked_seq,
3589 sk, th, saddr);
3590 kfree_skb(skb, FREE_READ);
3591 release_sock(sk);
3592 return(0);
3593 }
3594
3595 if (th->rst)
3596 {
3597 sk->zapped=1;
3598
3599 sk->err = ECONNRESET;
3600
3601 if (sk->state == TCP_CLOSE_WAIT)
3602 {
3603 sk->err = EPIPE;
3604 }
3605
3606 sk->state = TCP_CLOSE;
3607 sk->shutdown = SHUTDOWN_MASK;
3608 if (!sk->dead)
3609 {
3610 sk->state_change(sk);
3611 }
3612 kfree_skb(skb, FREE_READ);
3613 release_sock(sk);
3614 return(0);
3615 }
3616
3617 if (opt && (opt->security != 0 || opt->compartment != 0))
3618 {
3619 sk->err = ECONNRESET;
3620 sk->state = TCP_CLOSE;
3621 sk->shutdown = SHUTDOWN_MASK;
3622 tcp_reset(daddr, saddr, th, sk->prot, opt,dev);
3623 if (!sk->dead)
3624 {
3625 sk->state_change(sk);
3626 }
3627 kfree_skb(skb, FREE_READ);
3628 release_sock(sk);
3629 return(0);
3630 }
3631 if (th->syn)
3632 {
3633 sk->err = ECONNRESET;
3634 sk->state = TCP_CLOSE;
3635 sk->shutdown = SHUTDOWN_MASK;
3636 tcp_reset(daddr, saddr, th, sk->prot, opt,dev);
3637 if (!sk->dead)
3638 {
3639 sk->state_change(sk);
3640 }
3641 kfree_skb(skb, FREE_READ);
3642 release_sock(sk);
3643 return(0);
3644 }
3645
3646 if (th->ack)
3647 {
3648 if (!tcp_ack(sk, th, saddr, len))
3649 {
3650 kfree_skb(skb, FREE_READ);
3651 release_sock(sk);
3652 return(0);
3653 }
3654 }
3655
3656 if (th->urg &&(sk->state==TCP_ESTABLISHED||sk->state==TCP_FIN_WAIT1||sk->state==TCP_FIN_WAIT2))
3657 {
3658 if (tcp_urg(sk, th, saddr))
3659 {
3660 kfree_skb(skb, FREE_READ);
3661 release_sock(sk);
3662 return(0);
3663 }
3664 }
3665
3666
3667
3668 if (tcp_data(skb, sk, saddr, len))
3669 {
3670 kfree_skb(skb, FREE_READ);
3671 release_sock(sk);
3672 return(0);
3673 }
3674
3675
3676 if (th->fin && tcp_fin(sk, th, saddr, dev))
3677 {
3678 kfree_skb(skb, FREE_READ);
3679 release_sock(sk);
3680 return(0);
3681 }
3682
3683 release_sock(sk);
3684 return(0);
3685
3686 case TCP_CLOSE:
3687 if (sk->dead || sk->daddr)
3688 {
3689 DPRINTF((DBG_TCP, "packet received for closed,dead socket\n"));
3690 kfree_skb(skb, FREE_READ);
3691 release_sock(sk);
3692 return(0);
3693 }
3694
3695 if (!th->rst)
3696 {
3697 if (!th->ack)
3698 th->ack_seq = 0;
3699 tcp_reset(daddr, saddr, th, sk->prot, opt,dev);
3700 }
3701 kfree_skb(skb, FREE_READ);
3702 release_sock(sk);
3703 return(0);
3704
3705 case TCP_LISTEN:
3706 if (th->rst)
3707 {
3708 kfree_skb(skb, FREE_READ);
3709 release_sock(sk);
3710 return(0);
3711 }
3712 if (th->ack)
3713 {
3714 tcp_reset(daddr, saddr, th, sk->prot, opt,dev);
3715 kfree_skb(skb, FREE_READ);
3716 release_sock(sk);
3717 return(0);
3718 }
3719
3720 if (th->syn)
3721 {
3722 if (opt && (opt->security != 0 || opt->compartment != 0))
3723 {
3724 tcp_reset(daddr, saddr, th, sk->prot, opt,dev);
3725 release_sock(sk);
3726 return(0);
3727 }
3728
3729
3730
3731
3732
3733
3734
3735 tcp_conn_request(sk, skb, daddr, saddr, opt, dev);
3736 release_sock(sk);
3737 return(0);
3738 }
3739
3740 kfree_skb(skb, FREE_READ);
3741 release_sock(sk);
3742 return(0);
3743
3744 default:
3745 if (!tcp_sequence(sk, th, len, opt, saddr,dev))
3746 {
3747 kfree_skb(skb, FREE_READ);
3748 release_sock(sk);
3749 return(0);
3750 }
3751
3752 case TCP_SYN_SENT:
3753 if (th->rst)
3754 {
3755 sk->err = ECONNREFUSED;
3756 sk->state = TCP_CLOSE;
3757 sk->shutdown = SHUTDOWN_MASK;
3758 sk->zapped = 1;
3759 if (!sk->dead)
3760 {
3761 sk->state_change(sk);
3762 }
3763 kfree_skb(skb, FREE_READ);
3764 release_sock(sk);
3765 return(0);
3766 }
3767 if (opt && (opt->security != 0 || opt->compartment != 0))
3768 {
3769 sk->err = ECONNRESET;
3770 sk->state = TCP_CLOSE;
3771 sk->shutdown = SHUTDOWN_MASK;
3772 tcp_reset(daddr, saddr, th, sk->prot, opt, dev);
3773 if (!sk->dead)
3774 {
3775 sk->state_change(sk);
3776 }
3777 kfree_skb(skb, FREE_READ);
3778 release_sock(sk);
3779 return(0);
3780 }
3781 if (!th->ack)
3782 {
3783 if (th->syn)
3784 {
3785 sk->state = TCP_SYN_RECV;
3786 }
3787
3788 kfree_skb(skb, FREE_READ);
3789 release_sock(sk);
3790 return(0);
3791 }
3792
3793 switch(sk->state)
3794 {
3795 case TCP_SYN_SENT:
3796
3797 if (!tcp_ack(sk, th, saddr, len))
3798 {
3799 tcp_reset(daddr, saddr, th,
3800 sk->prot, opt,dev);
3801 kfree_skb(skb, FREE_READ);
3802 release_sock(sk);
3803 return(0);
3804 }
3805
3806
3807
3808
3809
3810 if (!th->syn)
3811 {
3812 kfree_skb(skb, FREE_READ);
3813 release_sock(sk);
3814 return(0);
3815 }
3816
3817
3818 sk->acked_seq = th->seq+1;
3819 sk->fin_seq = th->seq;
3820 tcp_send_ack(sk->send_seq, th->seq+1, sk, th, sk->daddr);
3821
3822 case TCP_SYN_RECV:
3823 if (!tcp_ack(sk, th, saddr, len))
3824 {
3825 tcp_reset(daddr, saddr, th, sk->prot, opt, dev);
3826 kfree_skb(skb, FREE_READ);
3827 release_sock(sk);
3828 return(0);
3829 }
3830 sk->state = TCP_ESTABLISHED;
3831
3832
3833
3834
3835
3836
3837 tcp_options(sk, th);
3838 sk->dummy_th.dest = th->source;
3839 sk->copied_seq = sk->acked_seq-1;
3840 if (!sk->dead)
3841 {
3842 sk->state_change(sk);
3843 }
3844
3845
3846
3847
3848
3849
3850 if (th->urg)
3851 {
3852 if (tcp_urg(sk, th, saddr))
3853 {
3854 kfree_skb(skb, FREE_READ);
3855 release_sock(sk);
3856 return(0);
3857 }
3858 }
3859 if (tcp_data(skb, sk, saddr, len))
3860 kfree_skb(skb, FREE_READ);
3861
3862 if (th->fin)
3863 tcp_fin(sk, th, saddr, dev);
3864 release_sock(sk);
3865 return(0);
3866 }
3867
3868 if (th->urg)
3869 {
3870 if (tcp_urg(sk, th, saddr))
3871 {
3872 kfree_skb(skb, FREE_READ);
3873 release_sock(sk);
3874 return(0);
3875 }
3876 }
3877
3878 if (tcp_data(skb, sk, saddr, len))
3879 {
3880 kfree_skb(skb, FREE_READ);
3881 release_sock(sk);
3882 return(0);
3883 }
3884
3885 if (!th->fin)
3886 {
3887 release_sock(sk);
3888 return(0);
3889 }
3890 tcp_fin(sk, th, saddr, dev);
3891 release_sock(sk);
3892 return(0);
3893 }
3894 }
3895
3896
3897
3898
3899
3900
3901
3902 static void tcp_write_wakeup(struct sock *sk)
3903 {
3904 struct sk_buff *buff;
3905 struct tcphdr *t1;
3906 struct device *dev=NULL;
3907 int tmp;
3908
3909 if (sk->zapped)
3910 return;
3911
3912 if (sk -> state != TCP_ESTABLISHED && sk->state != TCP_CLOSE_WAIT)
3913 return;
3914
3915 buff = (struct sk_buff *) sk->prot->wmalloc(sk,MAX_ACK_SIZE,1, GFP_ATOMIC);
3916
3917 if (buff == NULL)
3918 return;
3919
3920 buff->mem_addr = buff;
3921 buff->mem_len = MAX_ACK_SIZE;
3922 buff->len = sizeof(struct tcphdr);
3923 buff->free = 1;
3924 buff->sk = sk;
3925 DPRINTF((DBG_TCP, "in tcp_write_wakeup\n"));
3926 t1 = (struct tcphdr *)(buff + 1);
3927
3928
3929 tmp = sk->prot->build_header(buff, sk->saddr, sk->daddr, &dev,
3930 IPPROTO_TCP, sk->opt, MAX_ACK_SIZE, sk->ip_ttl,sk->ip_tos);
3931 if (tmp < 0)
3932 {
3933 sk->prot->wfree(sk, buff->mem_addr, buff->mem_len);
3934 return;
3935 }
3936
3937 buff->len += tmp;
3938 t1 = (struct tcphdr *)((char *)t1 +tmp);
3939
3940 memcpy(t1,(void *) &sk->dummy_th, sizeof(*t1));
3941
3942
3943
3944
3945
3946 t1->seq = ntohl(sk->send_seq-1);
3947 t1->ack = 1;
3948 t1->res1= 0;
3949 t1->res2= 0;
3950 t1->rst = 0;
3951 t1->urg = 0;
3952 t1->psh = 0;
3953 t1->fin = 0;
3954 t1->syn = 0;
3955 t1->ack_seq = ntohl(sk->acked_seq);
3956 t1->window = ntohs(sk->prot->rspace(sk));
3957 t1->doff = sizeof(*t1)/4;
3958 tcp_send_check(t1, sk->saddr, sk->daddr, sizeof(*t1), sk);
3959
3960
3961
3962
3963 sk->prot->queue_xmit(sk, dev, buff, 1);
3964 }
3965
3966
3967
3968
3969
3970 int tcp_setsockopt(struct sock *sk, int level, int optname, char *optval, int optlen)
3971 {
3972 int val,err;
3973
3974 if(level!=SOL_TCP)
3975 return ip_setsockopt(sk,level,optname,optval,optlen);
3976
3977 if (optval == NULL)
3978 return(-EINVAL);
3979
3980 err=verify_area(VERIFY_READ, optval, sizeof(int));
3981 if(err)
3982 return err;
3983
3984 val = get_fs_long((unsigned long *)optval);
3985
3986 switch(optname)
3987 {
3988 case TCP_MSS:
3989 if(val<200||val>2048)
3990 return -EINVAL;
3991 sk->mss=val;
3992 return 0;
3993 case TCP_NODELAY:
3994
3995 return 0;
3996 default:
3997 return(-ENOPROTOOPT);
3998 }
3999 }
4000
4001 int tcp_getsockopt(struct sock *sk, int level, int optname, char *optval, int *optlen)
4002 {
4003 int val,err;
4004
4005 if(level!=SOL_TCP)
4006 return ip_getsockopt(sk,level,optname,optval,optlen);
4007
4008 switch(optname)
4009 {
4010 case TCP_MSS:
4011 val=sk->mss;
4012 break;
4013 case TCP_NODELAY:
4014 val=1;
4015 break;
4016 default:
4017 return(-ENOPROTOOPT);
4018 }
4019 err=verify_area(VERIFY_WRITE, optlen, sizeof(int));
4020 if(err)
4021 return err;
4022 put_fs_long(sizeof(int),(unsigned long *) optlen);
4023
4024 err=verify_area(VERIFY_WRITE, optval, sizeof(int));
4025 if(err)
4026 return err;
4027 put_fs_long(val,(unsigned long *)optval);
4028
4029 return(0);
4030 }
4031
4032 struct proto tcp_prot =
4033 {
4034 sock_wmalloc,
4035 sock_rmalloc,
4036 sock_wfree,
4037 sock_rfree,
4038 sock_rspace,
4039 sock_wspace,
4040 tcp_close,
4041 tcp_read,
4042 tcp_write,
4043 tcp_sendto,
4044 tcp_recvfrom,
4045 ip_build_header,
4046 tcp_connect,
4047 tcp_accept,
4048 ip_queue_xmit,
4049 tcp_retransmit,
4050 tcp_write_wakeup,
4051 tcp_read_wakeup,
4052 tcp_rcv,
4053 tcp_select,
4054 tcp_ioctl,
4055 NULL,
4056 tcp_shutdown,
4057 tcp_setsockopt,
4058 tcp_getsockopt,
4059 128,
4060 0,
4061 {NULL,},
4062 "TCP"
4063 };