This source file includes following definitions.
- min
- __print_th
- print_th
- tcp_select_window
- tcp_time_wait
- tcp_retransmit
- tcp_err
- tcp_readable
- tcp_select
- tcp_ioctl
- tcp_check
- tcp_send_check
- tcp_send_skb
- tcp_dequeue_partial
- tcp_send_partial
- tcp_enqueue_partial
- tcp_send_ack
- tcp_build_header
- tcp_write
- tcp_sendto
- tcp_read_wakeup
- cleanup_rbuf
- tcp_read_urg
- tcp_read
- tcp_shutdown
- tcp_recvfrom
- tcp_reset
- tcp_options
- default_mask
- tcp_conn_request
- tcp_close
- tcp_write_xmit
- sort_send
- tcp_ack
- tcp_data
- tcp_check_urg
- tcp_urg
- tcp_fin
- tcp_accept
- tcp_connect
- tcp_sequence
- tcp_rcv
- tcp_write_wakeup
- tcp_send_probe0
- tcp_setsockopt
- tcp_getsockopt
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84 #include <linux/types.h>
85 #include <linux/sched.h>
86 #include <linux/mm.h>
87 #include <linux/string.h>
88 #include <linux/socket.h>
89 #include <linux/sockios.h>
90 #include <linux/termios.h>
91 #include <linux/in.h>
92 #include <linux/fcntl.h>
93 #include <linux/inet.h>
94 #include <linux/netdevice.h>
95 #include "snmp.h"
96 #include "ip.h"
97 #include "protocol.h"
98 #include "icmp.h"
99 #include "tcp.h"
100 #include <linux/skbuff.h>
101 #include "sock.h"
102 #include <linux/errno.h>
103 #include <linux/timer.h>
104 #include <asm/system.h>
105 #include <asm/segment.h>
106 #include <linux/mm.h>
107
108 #define SEQ_TICK 3
109 unsigned long seq_offset;
110 struct tcp_mib tcp_statistics;
111
112 #define SUBNETSARELOCAL
113
114 static __inline__ int
115 min(unsigned int a, unsigned int b)
116 {
117 if (a < b) return(a);
118 return(b);
119 }
120
121
122 static void __print_th(struct tcphdr *th)
123 {
124 unsigned char *ptr;
125
126 printk("TCP header:\n");
127 printk(" source=%d, dest=%d, seq =%ld, ack_seq = %ld\n",
128 ntohs(th->source), ntohs(th->dest),
129 ntohl(th->seq), ntohl(th->ack_seq));
130 printk(" fin=%d, syn=%d, rst=%d, psh=%d, ack=%d, urg=%d res1=%d res2=%d\n",
131 th->fin, th->syn, th->rst, th->psh, th->ack,
132 th->urg, th->res1, th->res2);
133 printk(" window = %d, check = %d urg_ptr = %d\n",
134 ntohs(th->window), ntohs(th->check), ntohs(th->urg_ptr));
135 printk(" doff = %d\n", th->doff);
136 ptr =(unsigned char *)(th + 1);
137 printk(" options = %d %d %d %d\n", ptr[0], ptr[1], ptr[2], ptr[3]);
138 }
139
140 static inline void print_th(struct tcphdr *th)
141 {
142 if (inet_debug == DBG_TCP)
143 __print_th(th);
144 }
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162 static int tcp_select_window(struct sock *sk)
163 {
164 int new_window = sk->prot->rspace(sk);
165
166
167
168
169
170
171
172
173
174
175 if (new_window < min(sk->mss, MAX_WINDOW/2) ||
176 new_window < sk->window)
177 return(sk->window);
178 return(new_window);
179 }
180
181
182
183 static void tcp_time_wait(struct sock *sk)
184 {
185 sk->state = TCP_TIME_WAIT;
186 sk->shutdown = SHUTDOWN_MASK;
187 if (!sk->dead)
188 sk->state_change(sk);
189 reset_timer(sk, TIME_CLOSE, TCP_TIMEWAIT_LEN);
190 }
191
192
193
194
195
196
197
198
199 static void
200 tcp_retransmit(struct sock *sk, int all)
201 {
202 if (all) {
203 ip_retransmit(sk, all);
204 return;
205 }
206
207 sk->ssthresh = sk->cong_window >> 1;
208
209 sk->cong_count = 0;
210
211 sk->cong_window = 1;
212
213
214 ip_retransmit(sk, all);
215 }
216
217
218
219
220
221
222
223
224
225
226 void
227 tcp_err(int err, unsigned char *header, unsigned long daddr,
228 unsigned long saddr, struct inet_protocol *protocol)
229 {
230 struct tcphdr *th;
231 struct sock *sk;
232 struct iphdr *iph=(struct iphdr *)header;
233
234 header+=4*iph->ihl;
235
236 DPRINTF((DBG_TCP, "TCP: tcp_err(%d, hdr=%X, daddr=%X saddr=%X, protocol=%X)\n",
237 err, header, daddr, saddr, protocol));
238
239 th =(struct tcphdr *)header;
240 sk = get_sock(&tcp_prot, th->source, daddr, th->dest, saddr);
241 print_th(th);
242
243 if (sk == NULL) return;
244
245 if(err<0)
246 {
247 sk->err = -err;
248 sk->error_report(sk);
249 return;
250 }
251
252 if ((err & 0xff00) == (ICMP_SOURCE_QUENCH << 8)) {
253
254
255
256
257
258 if (sk->cong_window > 4) sk->cong_window--;
259 return;
260 }
261
262 DPRINTF((DBG_TCP, "TCP: icmp_err got error\n"));
263 sk->err = icmp_err_convert[err & 0xff].errno;
264
265
266
267
268
269 if (icmp_err_convert[err & 0xff].fatal) {
270 if (sk->state == TCP_SYN_SENT) {
271 tcp_statistics.TcpAttemptFails++;
272 sk->state = TCP_CLOSE;
273 sk->error_report(sk);
274 }
275 }
276 return;
277 }
278
279
280
281
282
283
284
285 static int
286 tcp_readable(struct sock *sk)
287 {
288 unsigned long counted;
289 unsigned long amount;
290 struct sk_buff *skb;
291 int sum;
292 unsigned long flags;
293
294 DPRINTF((DBG_TCP, "tcp_readable(sk=%X)\n", sk));
295 if(sk && sk->debug)
296 printk("tcp_readable: %p - ",sk);
297
298 save_flags(flags);
299 cli();
300 if (sk == NULL || (skb = skb_peek(&sk->receive_queue)) == NULL)
301 {
302 restore_flags(flags);
303 if(sk && sk->debug)
304 printk("empty\n");
305 return(0);
306 }
307
308 counted = sk->copied_seq+1;
309 amount = 0;
310
311
312 do {
313 if (before(counted, skb->h.th->seq))
314 break;
315 sum = skb->len -(counted - skb->h.th->seq);
316 if (skb->h.th->syn)
317 sum++;
318 if (sum >= 0) {
319 amount += sum;
320 if (skb->h.th->syn) amount--;
321 counted += sum;
322 }
323 if (amount && skb->h.th->psh) break;
324 skb = skb->next;
325 } while(skb != (struct sk_buff *)&sk->receive_queue);
326 if (amount && !sk->urginline && sk->urg_data &&
327 (sk->urg_seq - sk->copied_seq) <= (counted - sk->copied_seq))
328 amount--;
329 restore_flags(flags);
330 DPRINTF((DBG_TCP, "tcp readable returning %d bytes\n", amount));
331 if(sk->debug)
332 printk("got %lu bytes.\n",amount);
333 return(amount);
334 }
335
336
337
338
339
340
341
342 static int
343 tcp_select(struct sock *sk, int sel_type, select_table *wait)
344 {
345 DPRINTF((DBG_TCP, "tcp_select(sk=%X, sel_type = %d, wait = %X)\n",
346 sk, sel_type, wait));
347
348 sk->inuse = 1;
349 switch(sel_type) {
350 case SEL_IN:
351 if(sk->debug)
352 printk("select in");
353 select_wait(sk->sleep, wait);
354 if(sk->debug)
355 printk("-select out");
356 if (skb_peek(&sk->receive_queue) != NULL) {
357 if (sk->state == TCP_LISTEN || tcp_readable(sk)) {
358 release_sock(sk);
359 if(sk->debug)
360 printk("-select ok data\n");
361 return(1);
362 }
363 }
364 if (sk->err != 0)
365 {
366 release_sock(sk);
367 if(sk->debug)
368 printk("-select ok error");
369 return(1);
370 }
371 if (sk->shutdown & RCV_SHUTDOWN) {
372 release_sock(sk);
373 if(sk->debug)
374 printk("-select ok down\n");
375 return(1);
376 } else {
377 release_sock(sk);
378 if(sk->debug)
379 printk("-select fail\n");
380 return(0);
381 }
382 case SEL_OUT:
383 select_wait(sk->sleep, wait);
384 if (sk->shutdown & SEND_SHUTDOWN) {
385 DPRINTF((DBG_TCP,
386 "write select on shutdown socket.\n"));
387
388
389 release_sock(sk);
390 return(0);
391 }
392
393
394
395
396
397
398 if (sk->prot->wspace(sk) >= sk->mss) {
399 release_sock(sk);
400
401 if (sk->state == TCP_SYN_RECV ||
402 sk->state == TCP_SYN_SENT) return(0);
403 return(1);
404 }
405 DPRINTF((DBG_TCP,
406 "tcp_select: sleeping on write sk->wmem_alloc = %d, "
407 "sk->packets_out = %d\n"
408 "sk->write_seq = %u, sk->window_seq=%u\n",
409 sk->wmem_alloc, sk->packets_out,
410 sk->write_seq, sk->window_seq));
411
412 release_sock(sk);
413 return(0);
414 case SEL_EX:
415 select_wait(sk->sleep,wait);
416 if (sk->err || sk->urg_data) {
417 release_sock(sk);
418 return(1);
419 }
420 release_sock(sk);
421 return(0);
422 }
423
424 release_sock(sk);
425 return(0);
426 }
427
428
429 int
430 tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
431 {
432 int err;
433 DPRINTF((DBG_TCP, "tcp_ioctl(sk=%X, cmd = %d, arg=%X)\n", sk, cmd, arg));
434 switch(cmd) {
435 case DDIOCSDBG:
436 return(dbg_ioctl((void *) arg, DBG_TCP));
437
438 case TIOCINQ:
439 #ifdef FIXME
440 case FIONREAD:
441 #endif
442 {
443 unsigned long amount;
444
445 if (sk->state == TCP_LISTEN) return(-EINVAL);
446
447 sk->inuse = 1;
448 amount = tcp_readable(sk);
449 release_sock(sk);
450 DPRINTF((DBG_TCP, "returning %d\n", amount));
451 err=verify_area(VERIFY_WRITE,(void *)arg,
452 sizeof(unsigned long));
453 if(err)
454 return err;
455 put_fs_long(amount,(unsigned long *)arg);
456 return(0);
457 }
458 case SIOCATMARK:
459 {
460 int answ = sk->urg_data && sk->urg_seq == sk->copied_seq+1;
461
462 err = verify_area(VERIFY_WRITE,(void *) arg,
463 sizeof(unsigned long));
464 if (err)
465 return err;
466 put_fs_long(answ,(int *) arg);
467 return(0);
468 }
469 case TIOCOUTQ:
470 {
471 unsigned long amount;
472
473 if (sk->state == TCP_LISTEN) return(-EINVAL);
474 amount = sk->prot->wspace(sk);
475 err=verify_area(VERIFY_WRITE,(void *)arg,
476 sizeof(unsigned long));
477 if(err)
478 return err;
479 put_fs_long(amount,(unsigned long *)arg);
480 return(0);
481 }
482 default:
483 return(-EINVAL);
484 }
485 }
486
487
488
489 unsigned short
490 tcp_check(struct tcphdr *th, int len,
491 unsigned long saddr, unsigned long daddr)
492 {
493 unsigned long sum;
494
495 if (saddr == 0) saddr = ip_my_addr();
496 print_th(th);
497 __asm__("\t addl %%ecx,%%ebx\n"
498 "\t adcl %%edx,%%ebx\n"
499 "\t adcl $0, %%ebx\n"
500 : "=b"(sum)
501 : "0"(daddr), "c"(saddr), "d"((ntohs(len) << 16) + IPPROTO_TCP*256)
502 : "cx","bx","dx" );
503
504 if (len > 3) {
505 __asm__("\tclc\n"
506 "1:\n"
507 "\t lodsl\n"
508 "\t adcl %%eax, %%ebx\n"
509 "\t loop 1b\n"
510 "\t adcl $0, %%ebx\n"
511 : "=b"(sum) , "=S"(th)
512 : "0"(sum), "c"(len/4) ,"1"(th)
513 : "ax", "cx", "bx", "si" );
514 }
515
516
517 __asm__("\t movl %%ebx, %%ecx\n"
518 "\t shrl $16,%%ecx\n"
519 "\t addw %%cx, %%bx\n"
520 "\t adcw $0, %%bx\n"
521 : "=b"(sum)
522 : "0"(sum)
523 : "bx", "cx");
524
525
526 if ((len & 2) != 0) {
527 __asm__("\t lodsw\n"
528 "\t addw %%ax,%%bx\n"
529 "\t adcw $0, %%bx\n"
530 : "=b"(sum), "=S"(th)
531 : "0"(sum) ,"1"(th)
532 : "si", "ax", "bx");
533 }
534
535
536 if ((len & 1) != 0) {
537 __asm__("\t lodsb\n"
538 "\t movb $0,%%ah\n"
539 "\t addw %%ax,%%bx\n"
540 "\t adcw $0, %%bx\n"
541 : "=b"(sum)
542 : "0"(sum) ,"S"(th)
543 : "si", "ax", "bx");
544 }
545
546
547 return((~sum) & 0xffff);
548 }
549
550
551 void tcp_send_check(struct tcphdr *th, unsigned long saddr,
552 unsigned long daddr, int len, struct sock *sk)
553 {
554 th->check = 0;
555 th->check = tcp_check(th, len, saddr, daddr);
556 return;
557 }
558
559 static void tcp_send_skb(struct sock *sk, struct sk_buff *skb)
560 {
561 int size;
562 struct tcphdr * th = skb->h.th;
563
564
565 size = skb->len - ((unsigned char *) th - skb->data);
566
567
568 if (size < sizeof(struct tcphdr) || size > skb->len) {
569 printk("tcp_send_skb: bad skb (skb = %p, data = %p, th = %p, len = %lu)\n",
570 skb, skb->data, th, skb->len);
571 kfree_skb(skb, FREE_WRITE);
572 return;
573 }
574
575
576 if (size == sizeof(struct tcphdr)) {
577
578 if(!th->syn && !th->fin) {
579 printk("tcp_send_skb: attempt to queue a bogon.\n");
580 kfree_skb(skb,FREE_WRITE);
581 return;
582 }
583 }
584
585 tcp_statistics.TcpOutSegs++;
586
587 tcp_send_check(th, sk->saddr, sk->daddr, size, sk);
588
589 skb->h.seq = ntohl(th->seq) + size - 4*th->doff;
590 if (after(skb->h.seq, sk->window_seq) ||
591 (sk->retransmits && sk->timeout == TIME_WRITE) ||
592 sk->packets_out >= sk->cong_window) {
593 DPRINTF((DBG_TCP, "sk->cong_window = %d, sk->packets_out = %d\n",
594 sk->cong_window, sk->packets_out));
595 DPRINTF((DBG_TCP, "sk->write_seq = %d, sk->window_seq = %d\n",
596 sk->write_seq, sk->window_seq));
597 if (skb->next != NULL) {
598 printk("tcp_send_partial: next != NULL\n");
599 skb_unlink(skb);
600 }
601 skb_queue_tail(&sk->write_queue, skb);
602 if (before(sk->window_seq, sk->write_queue.next->h.seq) &&
603 sk->send_head == NULL &&
604 sk->ack_backlog == 0)
605 reset_timer(sk, TIME_PROBE0, sk->rto);
606 } else {
607 sk->sent_seq = sk->write_seq;
608 sk->prot->queue_xmit(sk, skb->dev, skb, 0);
609 }
610 }
611
612 struct sk_buff * tcp_dequeue_partial(struct sock * sk)
613 {
614 struct sk_buff * skb;
615 unsigned long flags;
616
617 save_flags(flags);
618 cli();
619 skb = sk->partial;
620 if (skb) {
621 sk->partial = NULL;
622 del_timer(&sk->partial_timer);
623 }
624 restore_flags(flags);
625 return skb;
626 }
627
628 static void tcp_send_partial(struct sock *sk)
629 {
630 struct sk_buff *skb;
631
632 if (sk == NULL)
633 return;
634 while ((skb = tcp_dequeue_partial(sk)) != NULL)
635 tcp_send_skb(sk, skb);
636 }
637
638 void tcp_enqueue_partial(struct sk_buff * skb, struct sock * sk)
639 {
640 struct sk_buff * tmp;
641 unsigned long flags;
642
643 save_flags(flags);
644 cli();
645 tmp = sk->partial;
646 if (tmp)
647 del_timer(&sk->partial_timer);
648 sk->partial = skb;
649 sk->partial_timer.expires = HZ;
650 sk->partial_timer.function = (void (*)(unsigned long)) tcp_send_partial;
651 sk->partial_timer.data = (unsigned long) sk;
652 add_timer(&sk->partial_timer);
653 restore_flags(flags);
654 if (tmp)
655 tcp_send_skb(sk, tmp);
656 }
657
658
659
660 static void
661 tcp_send_ack(unsigned long sequence, unsigned long ack,
662 struct sock *sk,
663 struct tcphdr *th, unsigned long daddr)
664 {
665 struct sk_buff *buff;
666 struct tcphdr *t1;
667 struct device *dev = NULL;
668 int tmp;
669
670 if(sk->zapped)
671 return;
672
673
674
675
676
677 buff = sk->prot->wmalloc(sk, MAX_ACK_SIZE, 1, GFP_ATOMIC);
678 if (buff == NULL)
679 {
680
681 sk->ack_backlog++;
682 if (sk->timeout != TIME_WRITE && tcp_connected(sk->state))
683 {
684 reset_timer(sk, TIME_WRITE, 10);
685 }
686 if (inet_debug == DBG_SLIP)
687 printk("\rtcp_ack: malloc failed\n");
688 return;
689 }
690
691 buff->len = sizeof(struct tcphdr);
692 buff->sk = sk;
693 buff->localroute = sk->localroute;
694 t1 =(struct tcphdr *) buff->data;
695
696
697 tmp = sk->prot->build_header(buff, sk->saddr, daddr, &dev,
698 IPPROTO_TCP, sk->opt, MAX_ACK_SIZE,sk->ip_tos,sk->ip_ttl);
699 if (tmp < 0)
700 {
701 buff->free=1;
702 sk->prot->wfree(sk, buff->mem_addr, buff->mem_len);
703 if (inet_debug == DBG_SLIP)
704 printk("\rtcp_ack: build_header failed\n");
705 return;
706 }
707 buff->len += tmp;
708 t1 =(struct tcphdr *)((char *)t1 +tmp);
709
710
711 memcpy(t1, th, sizeof(*t1));
712
713
714
715
716 t1->dest = th->source;
717 t1->source = th->dest;
718 t1->seq = ntohl(sequence);
719 t1->ack = 1;
720 sk->window = tcp_select_window(sk);
721 t1->window = ntohs(sk->window);
722 t1->res1 = 0;
723 t1->res2 = 0;
724 t1->rst = 0;
725 t1->urg = 0;
726 t1->syn = 0;
727 t1->psh = 0;
728 t1->fin = 0;
729 if (ack == sk->acked_seq)
730 {
731 sk->ack_backlog = 0;
732 sk->bytes_rcv = 0;
733 sk->ack_timed = 0;
734 if (sk->send_head == NULL && skb_peek(&sk->write_queue) == NULL
735 && sk->timeout == TIME_WRITE)
736 {
737 if(sk->keepopen)
738 reset_timer(sk,TIME_KEEPOPEN,TCP_TIMEOUT_LEN);
739 else
740 delete_timer(sk);
741 }
742 }
743 t1->ack_seq = ntohl(ack);
744 t1->doff = sizeof(*t1)/4;
745 tcp_send_check(t1, sk->saddr, daddr, sizeof(*t1), sk);
746 if (sk->debug)
747 printk("\rtcp_ack: seq %lx ack %lx\n", sequence, ack);
748 tcp_statistics.TcpOutSegs++;
749 sk->prot->queue_xmit(sk, dev, buff, 1);
750 }
751
752
753
754 static int
755 tcp_build_header(struct tcphdr *th, struct sock *sk, int push)
756 {
757
758
759 memcpy(th,(void *) &(sk->dummy_th), sizeof(*th));
760 th->seq = htonl(sk->write_seq);
761 th->psh =(push == 0) ? 1 : 0;
762 th->doff = sizeof(*th)/4;
763 th->ack = 1;
764 th->fin = 0;
765 sk->ack_backlog = 0;
766 sk->bytes_rcv = 0;
767 sk->ack_timed = 0;
768 th->ack_seq = htonl(sk->acked_seq);
769 sk->window = tcp_select_window(sk);
770 th->window = htons(sk->window);
771
772 return(sizeof(*th));
773 }
774
775
776
777
778
779 static int tcp_write(struct sock *sk, unsigned char *from,
780 int len, int nonblock, unsigned flags)
781 {
782 int copied = 0;
783 int copy;
784 int tmp;
785 struct sk_buff *skb;
786 struct sk_buff *send_tmp;
787 unsigned char *buff;
788 struct proto *prot;
789 struct device *dev = NULL;
790
791 DPRINTF((DBG_TCP, "tcp_write(sk=%X, from=%X, len=%d, nonblock=%d, flags=%X)\n",
792 sk, from, len, nonblock, flags));
793
794 sk->inuse=1;
795 prot = sk->prot;
796 while(len > 0)
797 {
798 if (sk->err)
799 {
800 release_sock(sk);
801 if (copied)
802 return(copied);
803 tmp = -sk->err;
804 sk->err = 0;
805 return(tmp);
806 }
807
808
809
810
811
812 if (sk->shutdown & SEND_SHUTDOWN)
813 {
814 release_sock(sk);
815 sk->err = EPIPE;
816 if (copied)
817 return(copied);
818 sk->err = 0;
819 return(-EPIPE);
820 }
821
822
823
824
825
826
827 while(sk->state != TCP_ESTABLISHED && sk->state != TCP_CLOSE_WAIT)
828 {
829 if (sk->err)
830 {
831 release_sock(sk);
832 if (copied)
833 return(copied);
834 tmp = -sk->err;
835 sk->err = 0;
836 return(tmp);
837 }
838
839 if (sk->state != TCP_SYN_SENT && sk->state != TCP_SYN_RECV)
840 {
841 release_sock(sk);
842 DPRINTF((DBG_TCP, "tcp_write: return 1\n"));
843 if (copied)
844 return(copied);
845
846 if (sk->err)
847 {
848 tmp = -sk->err;
849 sk->err = 0;
850 return(tmp);
851 }
852
853 if (sk->keepopen)
854 {
855 send_sig(SIGPIPE, current, 0);
856 }
857 return(-EPIPE);
858 }
859
860 if (nonblock || copied)
861 {
862 release_sock(sk);
863 DPRINTF((DBG_TCP, "tcp_write: return 2\n"));
864 if (copied)
865 return(copied);
866 return(-EAGAIN);
867 }
868
869 release_sock(sk);
870 cli();
871
872 if (sk->state != TCP_ESTABLISHED &&
873 sk->state != TCP_CLOSE_WAIT && sk->err == 0)
874 {
875 interruptible_sleep_on(sk->sleep);
876 if (current->signal & ~current->blocked)
877 {
878 sti();
879 DPRINTF((DBG_TCP, "tcp_write: return 3\n"));
880 if (copied)
881 return(copied);
882 return(-ERESTARTSYS);
883 }
884 }
885 sk->inuse = 1;
886 sti();
887 }
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905 if ((skb = tcp_dequeue_partial(sk)) != NULL)
906 {
907 int hdrlen;
908
909
910 hdrlen = ((unsigned long)skb->h.th - (unsigned long)skb->data)
911 + sizeof(struct tcphdr);
912
913
914 if (!(flags & MSG_OOB))
915 {
916 copy = min(sk->mss - (skb->len - hdrlen), len);
917
918 if (copy <= 0)
919 {
920 printk("TCP: **bug**: \"copy\" <= 0!!\n");
921 copy = 0;
922 }
923
924 memcpy_fromfs(skb->data + skb->len, from, copy);
925 skb->len += copy;
926 from += copy;
927 copied += copy;
928 len -= copy;
929 sk->write_seq += copy;
930 }
931 if ((skb->len - hdrlen) >= sk->mss ||
932 (flags & MSG_OOB) || !sk->packets_out)
933 tcp_send_skb(sk, skb);
934 else
935 tcp_enqueue_partial(skb, sk);
936 continue;
937 }
938
939
940
941
942
943
944
945
946
947
948
949
950
951 copy = sk->window_seq - sk->write_seq;
952 if (copy <= 0 || copy < (sk->max_window >> 1) || copy > sk->mss)
953 copy = sk->mss;
954 if (copy > len)
955 copy = len;
956
957
958
959
960
961 send_tmp = NULL;
962 if (copy < sk->mss && !(flags & MSG_OOB))
963 {
964
965
966
967 release_sock(sk);
968
969
970
971
972 skb = prot->wmalloc(sk, sk->mtu + 128 + prot->max_header, 0, GFP_KERNEL);
973 sk->inuse = 1;
974 send_tmp = skb;
975 }
976 else
977 {
978
979
980
981 release_sock(sk);
982 skb = prot->wmalloc(sk, copy + prot->max_header , 0, GFP_KERNEL);
983 sk->inuse = 1;
984 }
985
986
987
988
989
990 if (skb == NULL)
991 {
992 if (nonblock )
993 {
994 release_sock(sk);
995 DPRINTF((DBG_TCP, "tcp_write: return 4\n"));
996 if (copied)
997 return(copied);
998 return(-EAGAIN);
999 }
1000
1001
1002
1003
1004
1005 tmp = sk->wmem_alloc;
1006 release_sock(sk);
1007 cli();
1008
1009
1010
1011 if (tmp <= sk->wmem_alloc &&
1012 (sk->state == TCP_ESTABLISHED||sk->state == TCP_CLOSE_WAIT)
1013 && sk->err == 0)
1014 {
1015 interruptible_sleep_on(sk->sleep);
1016 if (current->signal & ~current->blocked)
1017 {
1018 sti();
1019 DPRINTF((DBG_TCP, "tcp_write: return 5\n"));
1020 if (copied)
1021 return(copied);
1022 return(-ERESTARTSYS);
1023 }
1024 }
1025 sk->inuse = 1;
1026 sti();
1027 continue;
1028 }
1029
1030 skb->len = 0;
1031 skb->sk = sk;
1032 skb->free = 0;
1033 skb->localroute = sk->localroute|(flags&MSG_DONTROUTE);
1034
1035 buff = skb->data;
1036
1037
1038
1039
1040
1041
1042 tmp = prot->build_header(skb, sk->saddr, sk->daddr, &dev,
1043 IPPROTO_TCP, sk->opt, skb->mem_len,sk->ip_tos,sk->ip_ttl);
1044 if (tmp < 0 )
1045 {
1046 prot->wfree(sk, skb->mem_addr, skb->mem_len);
1047 release_sock(sk);
1048 DPRINTF((DBG_TCP, "tcp_write: return 6\n"));
1049 if (copied)
1050 return(copied);
1051 return(tmp);
1052 }
1053 skb->len += tmp;
1054 skb->dev = dev;
1055 buff += tmp;
1056 skb->h.th =(struct tcphdr *) buff;
1057 tmp = tcp_build_header((struct tcphdr *)buff, sk, len-copy);
1058 if (tmp < 0)
1059 {
1060 prot->wfree(sk, skb->mem_addr, skb->mem_len);
1061 release_sock(sk);
1062 DPRINTF((DBG_TCP, "tcp_write: return 7\n"));
1063 if (copied)
1064 return(copied);
1065 return(tmp);
1066 }
1067
1068 if (flags & MSG_OOB)
1069 {
1070 ((struct tcphdr *)buff)->urg = 1;
1071 ((struct tcphdr *)buff)->urg_ptr = ntohs(copy);
1072 }
1073 skb->len += tmp;
1074 memcpy_fromfs(buff+tmp, from, copy);
1075
1076 from += copy;
1077 copied += copy;
1078 len -= copy;
1079 skb->len += copy;
1080 skb->free = 0;
1081 sk->write_seq += copy;
1082
1083 if (send_tmp != NULL && sk->packets_out)
1084 {
1085 tcp_enqueue_partial(send_tmp, sk);
1086 continue;
1087 }
1088 tcp_send_skb(sk, skb);
1089 }
1090 sk->err = 0;
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103 if(sk->partial && ((!sk->packets_out)
1104
1105 || (sk->nonagle && before(sk->write_seq , sk->window_seq))
1106 ))
1107 tcp_send_partial(sk);
1108
1109 release_sock(sk);
1110 DPRINTF((DBG_TCP, "tcp_write: return 8\n"));
1111 return(copied);
1112 }
1113
1114
1115 static int tcp_sendto(struct sock *sk, unsigned char *from,
1116 int len, int nonblock, unsigned flags,
1117 struct sockaddr_in *addr, int addr_len)
1118 {
1119 struct sockaddr_in sin;
1120
1121 if (flags & ~(MSG_OOB|MSG_DONTROUTE))
1122 return -EINVAL;
1123 if (addr_len < sizeof(sin))
1124 return(-EINVAL);
1125 memcpy_fromfs(&sin, addr, sizeof(sin));
1126 if (sin.sin_family && sin.sin_family != AF_INET)
1127 return(-EINVAL);
1128 if (sin.sin_port != sk->dummy_th.dest)
1129 return(-EINVAL);
1130 if (sin.sin_addr.s_addr != sk->daddr)
1131 return(-EINVAL);
1132 return(tcp_write(sk, from, len, nonblock, flags));
1133 }
1134
1135
1136 static void
1137 tcp_read_wakeup(struct sock *sk)
1138 {
1139 int tmp;
1140 struct device *dev = NULL;
1141 struct tcphdr *t1;
1142 struct sk_buff *buff;
1143
1144 DPRINTF((DBG_TCP, "in tcp read wakeup\n"));
1145 if (!sk->ack_backlog)
1146 return;
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159 buff = sk->prot->wmalloc(sk,MAX_ACK_SIZE,1, GFP_ATOMIC);
1160 if (buff == NULL)
1161 {
1162
1163 reset_timer(sk, TIME_WRITE, 10);
1164 return;
1165 }
1166
1167 buff->len = sizeof(struct tcphdr);
1168 buff->sk = sk;
1169 buff->localroute = sk->localroute;
1170
1171
1172
1173
1174
1175 tmp = sk->prot->build_header(buff, sk->saddr, sk->daddr, &dev,
1176 IPPROTO_TCP, sk->opt, MAX_ACK_SIZE,sk->ip_tos,sk->ip_ttl);
1177 if (tmp < 0)
1178 {
1179 buff->free=1;
1180 sk->prot->wfree(sk, buff->mem_addr, buff->mem_len);
1181 return;
1182 }
1183
1184 buff->len += tmp;
1185 t1 =(struct tcphdr *)(buff->data +tmp);
1186
1187 memcpy(t1,(void *) &sk->dummy_th, sizeof(*t1));
1188 t1->seq = htonl(sk->sent_seq);
1189 t1->ack = 1;
1190 t1->res1 = 0;
1191 t1->res2 = 0;
1192 t1->rst = 0;
1193 t1->urg = 0;
1194 t1->syn = 0;
1195 t1->psh = 0;
1196 sk->ack_backlog = 0;
1197 sk->bytes_rcv = 0;
1198 sk->window = tcp_select_window(sk);
1199 t1->window = ntohs(sk->window);
1200 t1->ack_seq = ntohl(sk->acked_seq);
1201 t1->doff = sizeof(*t1)/4;
1202 tcp_send_check(t1, sk->saddr, sk->daddr, sizeof(*t1), sk);
1203 sk->prot->queue_xmit(sk, dev, buff, 1);
1204 tcp_statistics.TcpOutSegs++;
1205 }
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215 static void cleanup_rbuf(struct sock *sk)
1216 {
1217 unsigned long flags;
1218 int left;
1219 struct sk_buff *skb;
1220
1221 if(sk->debug)
1222 printk("cleaning rbuf for sk=%p\n", sk);
1223
1224 save_flags(flags);
1225 cli();
1226
1227 left = sk->prot->rspace(sk);
1228
1229
1230
1231
1232
1233
1234 while((skb=skb_peek(&sk->receive_queue)) != NULL)
1235 {
1236 if (!skb->used)
1237 break;
1238 skb_unlink(skb);
1239 skb->sk = sk;
1240 kfree_skb(skb, FREE_READ);
1241 }
1242
1243 restore_flags(flags);
1244
1245
1246
1247
1248
1249
1250
1251
1252 DPRINTF((DBG_TCP, "sk->window left = %d, sk->prot->rspace(sk)=%d\n",
1253 sk->window - sk->bytes_rcv, sk->prot->rspace(sk)));
1254
1255 if(sk->debug)
1256 printk("sk->rspace = %lu, was %d\n", sk->prot->rspace(sk),
1257 left);
1258 if (sk->prot->rspace(sk) != left)
1259 {
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270 sk->ack_backlog++;
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280 if ((sk->prot->rspace(sk) > (sk->window - sk->bytes_rcv + sk->mtu)))
1281 {
1282
1283 tcp_read_wakeup(sk);
1284 }
1285 else
1286 {
1287
1288 int was_active = del_timer(&sk->timer);
1289 if (!was_active || TCP_ACK_TIME < sk->timer.expires)
1290 {
1291 reset_timer(sk, TIME_WRITE, TCP_ACK_TIME);
1292 }
1293 else
1294 add_timer(&sk->timer);
1295 }
1296 }
1297 }
1298
1299
1300
1301 static int
1302 tcp_read_urg(struct sock * sk, int nonblock,
1303 unsigned char *to, int len, unsigned flags)
1304 {
1305 struct wait_queue wait = { current, NULL };
1306
1307 while (len > 0) {
1308 if (sk->urginline || !sk->urg_data || sk->urg_data == URG_READ)
1309 return -EINVAL;
1310 if (sk->urg_data & URG_VALID) {
1311 char c = sk->urg_data;
1312 if (!(flags & MSG_PEEK))
1313 sk->urg_data = URG_READ;
1314 put_fs_byte(c, to);
1315 return 1;
1316 }
1317
1318 if (sk->err) {
1319 int tmp = -sk->err;
1320 sk->err = 0;
1321 return tmp;
1322 }
1323
1324 if (sk->state == TCP_CLOSE || sk->done) {
1325 if (!sk->done) {
1326 sk->done = 1;
1327 return 0;
1328 }
1329 return -ENOTCONN;
1330 }
1331
1332 if (sk->shutdown & RCV_SHUTDOWN) {
1333 sk->done = 1;
1334 return 0;
1335 }
1336
1337 if (nonblock)
1338 return -EAGAIN;
1339
1340 if (current->signal & ~current->blocked)
1341 return -ERESTARTSYS;
1342
1343 current->state = TASK_INTERRUPTIBLE;
1344 add_wait_queue(sk->sleep, &wait);
1345 if ((sk->urg_data & URG_NOTYET) && sk->err == 0 &&
1346 !(sk->shutdown & RCV_SHUTDOWN))
1347 schedule();
1348 remove_wait_queue(sk->sleep, &wait);
1349 current->state = TASK_RUNNING;
1350 }
1351 return 0;
1352 }
1353
1354
1355
1356 static int tcp_read(struct sock *sk, unsigned char *to,
1357 int len, int nonblock, unsigned flags)
1358 {
1359 struct wait_queue wait = { current, NULL };
1360 int copied = 0;
1361 unsigned long peek_seq;
1362 unsigned long *seq;
1363 unsigned long used;
1364 int err;
1365
1366 if (len == 0)
1367 return 0;
1368
1369 if (len < 0)
1370 return -EINVAL;
1371
1372 err = verify_area(VERIFY_WRITE, to, len);
1373 if (err)
1374 return err;
1375
1376
1377 if (sk->state == TCP_LISTEN)
1378 return -ENOTCONN;
1379
1380
1381 if (flags & MSG_OOB)
1382 return tcp_read_urg(sk, nonblock, to, len, flags);
1383
1384 peek_seq = sk->copied_seq;
1385 seq = &sk->copied_seq;
1386 if (flags & MSG_PEEK)
1387 seq = &peek_seq;
1388
1389 add_wait_queue(sk->sleep, &wait);
1390 sk->inuse = 1;
1391 while (len > 0) {
1392 struct sk_buff * skb;
1393 unsigned long offset;
1394
1395
1396
1397
1398 if (copied && sk->urg_data && sk->urg_seq == 1+*seq)
1399 break;
1400
1401 current->state = TASK_INTERRUPTIBLE;
1402
1403 skb = skb_peek(&sk->receive_queue);
1404 do {
1405 if (!skb)
1406 break;
1407 if (before(1+*seq, skb->h.th->seq))
1408 break;
1409 offset = 1 + *seq - skb->h.th->seq;
1410 if (skb->h.th->syn)
1411 offset--;
1412 if (offset < skb->len)
1413 goto found_ok_skb;
1414 if (!(flags & MSG_PEEK))
1415 skb->used = 1;
1416 skb = skb->next;
1417 } while (skb != (struct sk_buff *)&sk->receive_queue);
1418
1419 if (copied)
1420 break;
1421
1422 if (sk->err) {
1423 copied = -sk->err;
1424 sk->err = 0;
1425 break;
1426 }
1427
1428 if (sk->state == TCP_CLOSE) {
1429 if (!sk->done) {
1430 sk->done = 1;
1431 break;
1432 }
1433 copied = -ENOTCONN;
1434 break;
1435 }
1436
1437 if (sk->shutdown & RCV_SHUTDOWN) {
1438 sk->done = 1;
1439 break;
1440 }
1441
1442 if (nonblock) {
1443 copied = -EAGAIN;
1444 break;
1445 }
1446
1447 cleanup_rbuf(sk);
1448 release_sock(sk);
1449 schedule();
1450 sk->inuse = 1;
1451
1452 if (current->signal & ~current->blocked) {
1453 copied = -ERESTARTSYS;
1454 break;
1455 }
1456 continue;
1457
1458 found_ok_skb:
1459
1460 used = skb->len - offset;
1461 if (len < used)
1462 used = len;
1463
1464 if (sk->urg_data) {
1465 unsigned long urg_offset = sk->urg_seq - (1 + *seq);
1466 if (urg_offset < used) {
1467 if (!urg_offset) {
1468 if (!sk->urginline) {
1469 ++*seq;
1470 offset++;
1471 used--;
1472 }
1473 } else
1474 used = urg_offset;
1475 }
1476 }
1477
1478 memcpy_tofs(to,((unsigned char *)skb->h.th) +
1479 skb->h.th->doff*4 + offset, used);
1480 copied += used;
1481 len -= used;
1482 to += used;
1483 *seq += used;
1484 if (after(sk->copied_seq+1,sk->urg_seq))
1485 sk->urg_data = 0;
1486 if (!(flags & MSG_PEEK) && (used + offset >= skb->len))
1487 skb->used = 1;
1488 }
1489 remove_wait_queue(sk->sleep, &wait);
1490 current->state = TASK_RUNNING;
1491
1492
1493 cleanup_rbuf(sk);
1494 release_sock(sk);
1495 DPRINTF((DBG_TCP, "tcp_read: returning %d\n", copied));
1496 return copied;
1497 }
1498
1499
1500
1501
1502
1503
1504
1505 void tcp_shutdown(struct sock *sk, int how)
1506 {
1507 struct sk_buff *buff;
1508 struct tcphdr *t1, *th;
1509 struct proto *prot;
1510 int tmp;
1511 struct device *dev = NULL;
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525 if (sk->state == TCP_FIN_WAIT1 || sk->state == TCP_FIN_WAIT2)
1526 return;
1527 if (!(how & SEND_SHUTDOWN))
1528 return;
1529 sk->inuse = 1;
1530
1531
1532
1533
1534
1535 if (sk->partial)
1536 tcp_send_partial(sk);
1537
1538 prot =(struct proto *)sk->prot;
1539 th =(struct tcphdr *)&sk->dummy_th;
1540 release_sock(sk);
1541 buff = prot->wmalloc(sk, MAX_RESET_SIZE,1 , GFP_KERNEL);
1542 if (buff == NULL)
1543 return;
1544 sk->inuse = 1;
1545
1546 DPRINTF((DBG_TCP, "tcp_shutdown_send buff = %X\n", buff));
1547 buff->sk = sk;
1548 buff->len = sizeof(*t1);
1549 buff->localroute = sk->localroute;
1550 t1 =(struct tcphdr *) buff->data;
1551
1552
1553
1554
1555
1556 tmp = prot->build_header(buff,sk->saddr, sk->daddr, &dev,
1557 IPPROTO_TCP, sk->opt,
1558 sizeof(struct tcphdr),sk->ip_tos,sk->ip_ttl);
1559 if (tmp < 0)
1560 {
1561
1562
1563
1564 buff->free=1;
1565 prot->wfree(sk,buff->mem_addr, buff->mem_len);
1566 if(sk->state==TCP_ESTABLISHED)
1567 sk->state=TCP_FIN_WAIT1;
1568 else
1569 sk->state=TCP_FIN_WAIT2;
1570 release_sock(sk);
1571 DPRINTF((DBG_TCP, "Unable to build header for fin.\n"));
1572 return;
1573 }
1574
1575 t1 =(struct tcphdr *)((char *)t1 +tmp);
1576 buff->len += tmp;
1577 buff->dev = dev;
1578 memcpy(t1, th, sizeof(*t1));
1579 t1->seq = ntohl(sk->write_seq);
1580 sk->write_seq++;
1581 buff->h.seq = sk->write_seq;
1582 t1->ack = 1;
1583 t1->ack_seq = ntohl(sk->acked_seq);
1584 t1->window = ntohs(sk->window=tcp_select_window(sk));
1585 t1->fin = 1;
1586 t1->rst = 0;
1587 t1->doff = sizeof(*t1)/4;
1588 tcp_send_check(t1, sk->saddr, sk->daddr, sizeof(*t1), sk);
1589
1590
1591
1592
1593
1594
1595 if (skb_peek(&sk->write_queue) != NULL)
1596 {
1597 buff->free=0;
1598 if (buff->next != NULL)
1599 {
1600 printk("tcp_shutdown: next != NULL\n");
1601 skb_unlink(buff);
1602 }
1603 skb_queue_tail(&sk->write_queue, buff);
1604 }
1605 else
1606 {
1607 sk->sent_seq = sk->write_seq;
1608 sk->prot->queue_xmit(sk, dev, buff, 0);
1609 }
1610
1611 if (sk->state == TCP_ESTABLISHED)
1612 sk->state = TCP_FIN_WAIT1;
1613 else
1614 sk->state = TCP_FIN_WAIT2;
1615
1616 release_sock(sk);
1617 }
1618
1619
1620 static int
1621 tcp_recvfrom(struct sock *sk, unsigned char *to,
1622 int to_len, int nonblock, unsigned flags,
1623 struct sockaddr_in *addr, int *addr_len)
1624 {
1625 struct sockaddr_in sin;
1626 int len;
1627 int err;
1628 int result;
1629
1630
1631
1632
1633 err = verify_area(VERIFY_WRITE,addr_len,sizeof(long));
1634 if(err)
1635 return err;
1636 len = get_fs_long(addr_len);
1637 if(len > sizeof(sin))
1638 len = sizeof(sin);
1639 err=verify_area(VERIFY_WRITE, addr, len);
1640 if(err)
1641 return err;
1642
1643 result=tcp_read(sk, to, to_len, nonblock, flags);
1644
1645 if (result < 0) return(result);
1646
1647 sin.sin_family = AF_INET;
1648 sin.sin_port = sk->dummy_th.dest;
1649 sin.sin_addr.s_addr = sk->daddr;
1650
1651 memcpy_tofs(addr, &sin, len);
1652 put_fs_long(len, addr_len);
1653 return(result);
1654 }
1655
1656
1657
1658
1659
1660
1661 static void tcp_reset(unsigned long saddr, unsigned long daddr, struct tcphdr *th,
1662 struct proto *prot, struct options *opt, struct device *dev, int tos, int ttl)
1663 {
1664 struct sk_buff *buff;
1665 struct tcphdr *t1;
1666 int tmp;
1667 struct device *ndev=NULL;
1668
1669
1670
1671
1672
1673
1674 buff = prot->wmalloc(NULL, MAX_RESET_SIZE, 1, GFP_ATOMIC);
1675 if (buff == NULL)
1676 return;
1677
1678 DPRINTF((DBG_TCP, "tcp_reset buff = %X\n", buff));
1679 buff->len = sizeof(*t1);
1680 buff->sk = NULL;
1681 buff->dev = dev;
1682 buff->localroute = 0;
1683
1684 t1 =(struct tcphdr *) buff->data;
1685
1686
1687
1688
1689
1690 tmp = prot->build_header(buff, saddr, daddr, &ndev, IPPROTO_TCP, opt,
1691 sizeof(struct tcphdr),tos,ttl);
1692 if (tmp < 0)
1693 {
1694 buff->free = 1;
1695 prot->wfree(NULL, buff->mem_addr, buff->mem_len);
1696 return;
1697 }
1698
1699 t1 =(struct tcphdr *)((char *)t1 +tmp);
1700 buff->len += tmp;
1701 memcpy(t1, th, sizeof(*t1));
1702
1703
1704
1705
1706
1707 t1->dest = th->source;
1708 t1->source = th->dest;
1709 t1->rst = 1;
1710 t1->window = 0;
1711
1712 if(th->ack)
1713 {
1714 t1->ack = 0;
1715 t1->seq = th->ack_seq;
1716 t1->ack_seq = 0;
1717 }
1718 else
1719 {
1720 t1->ack = 1;
1721 if(!th->syn)
1722 t1->ack_seq=htonl(th->seq);
1723 else
1724 t1->ack_seq=htonl(th->seq+1);
1725 t1->seq=0;
1726 }
1727
1728 t1->syn = 0;
1729 t1->urg = 0;
1730 t1->fin = 0;
1731 t1->psh = 0;
1732 t1->doff = sizeof(*t1)/4;
1733 tcp_send_check(t1, saddr, daddr, sizeof(*t1), NULL);
1734 prot->queue_xmit(NULL, dev, buff, 1);
1735 tcp_statistics.TcpOutSegs++;
1736 }
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747 static void
1748 tcp_options(struct sock *sk, struct tcphdr *th)
1749 {
1750 unsigned char *ptr;
1751 int length=(th->doff*4)-sizeof(struct tcphdr);
1752 int mss_seen = 0;
1753
1754 ptr = (unsigned char *)(th + 1);
1755
1756 while(length>0)
1757 {
1758 int opcode=*ptr++;
1759 int opsize=*ptr++;
1760 switch(opcode)
1761 {
1762 case TCPOPT_EOL:
1763 return;
1764 case TCPOPT_NOP:
1765 length-=2;
1766 continue;
1767
1768 default:
1769 if(opsize<=2)
1770 return;
1771 switch(opcode)
1772 {
1773 case TCPOPT_MSS:
1774 if(opsize==4 && th->syn)
1775 {
1776 sk->mtu=min(sk->mtu,ntohs(*(unsigned short *)ptr));
1777 mss_seen = 1;
1778 }
1779 break;
1780
1781 }
1782 ptr+=opsize-2;
1783 length-=opsize;
1784 }
1785 }
1786 if (th->syn) {
1787 if (! mss_seen)
1788 sk->mtu=min(sk->mtu, 536);
1789 }
1790 sk->mss = min(sk->max_window, sk->mtu);
1791 }
1792
1793 static inline unsigned long default_mask(unsigned long dst)
1794 {
1795 dst = ntohl(dst);
1796 if (IN_CLASSA(dst))
1797 return htonl(IN_CLASSA_NET);
1798 if (IN_CLASSB(dst))
1799 return htonl(IN_CLASSB_NET);
1800 return htonl(IN_CLASSC_NET);
1801 }
1802
1803
1804
1805
1806
1807
1808
1809
1810 static void
1811 tcp_conn_request(struct sock *sk, struct sk_buff *skb,
1812 unsigned long daddr, unsigned long saddr,
1813 struct options *opt, struct device *dev)
1814 {
1815 struct sk_buff *buff;
1816 struct tcphdr *t1;
1817 unsigned char *ptr;
1818 struct sock *newsk;
1819 struct tcphdr *th;
1820 struct device *ndev=NULL;
1821 int tmp;
1822
1823 DPRINTF((DBG_TCP, "tcp_conn_request(sk = %X, skb = %X, daddr = %X, sadd4= %X, \n"
1824 " opt = %X, dev = %X)\n",
1825 sk, skb, daddr, saddr, opt, dev));
1826
1827 th = skb->h.th;
1828
1829
1830 if (!sk->dead) {
1831 sk->data_ready(sk,0);
1832 } else {
1833 DPRINTF((DBG_TCP, "tcp_conn_request on dead socket\n"));
1834 tcp_reset(daddr, saddr, th, sk->prot, opt, dev, sk->ip_tos,sk->ip_ttl);
1835 tcp_statistics.TcpAttemptFails++;
1836 kfree_skb(skb, FREE_READ);
1837 return;
1838 }
1839
1840
1841
1842
1843
1844 if (sk->ack_backlog >= sk->max_ack_backlog) {
1845 tcp_statistics.TcpAttemptFails++;
1846 kfree_skb(skb, FREE_READ);
1847 return;
1848 }
1849
1850
1851
1852
1853
1854
1855
1856
1857 newsk = (struct sock *) kmalloc(sizeof(struct sock), GFP_ATOMIC);
1858 if (newsk == NULL) {
1859
1860 tcp_statistics.TcpAttemptFails++;
1861 kfree_skb(skb, FREE_READ);
1862 return;
1863 }
1864
1865 DPRINTF((DBG_TCP, "newsk = %X\n", newsk));
1866 memcpy(newsk, sk, sizeof(*newsk));
1867 skb_queue_head_init(&newsk->write_queue);
1868 skb_queue_head_init(&newsk->receive_queue);
1869 newsk->send_head = NULL;
1870 newsk->send_tail = NULL;
1871 skb_queue_head_init(&newsk->back_log);
1872 newsk->rtt = TCP_CONNECT_TIME << 3;
1873 newsk->rto = TCP_CONNECT_TIME;
1874 newsk->mdev = 0;
1875 newsk->max_window = 0;
1876 newsk->cong_window = 1;
1877 newsk->cong_count = 0;
1878 newsk->ssthresh = 0;
1879 newsk->backoff = 0;
1880 newsk->blog = 0;
1881 newsk->intr = 0;
1882 newsk->proc = 0;
1883 newsk->done = 0;
1884 newsk->partial = NULL;
1885 newsk->pair = NULL;
1886 newsk->wmem_alloc = 0;
1887 newsk->rmem_alloc = 0;
1888 newsk->localroute = sk->localroute;
1889
1890 newsk->max_unacked = MAX_WINDOW - TCP_WINDOW_DIFF;
1891
1892 newsk->err = 0;
1893 newsk->shutdown = 0;
1894 newsk->ack_backlog = 0;
1895 newsk->acked_seq = skb->h.th->seq+1;
1896 newsk->fin_seq = skb->h.th->seq;
1897 newsk->copied_seq = skb->h.th->seq;
1898 newsk->state = TCP_SYN_RECV;
1899 newsk->timeout = 0;
1900 newsk->write_seq = jiffies * SEQ_TICK - seq_offset;
1901 newsk->window_seq = newsk->write_seq;
1902 newsk->rcv_ack_seq = newsk->write_seq;
1903 newsk->urg_data = 0;
1904 newsk->retransmits = 0;
1905 newsk->destroy = 0;
1906 newsk->timer.data = (unsigned long)newsk;
1907 newsk->timer.function = &net_timer;
1908 newsk->dummy_th.source = skb->h.th->dest;
1909 newsk->dummy_th.dest = skb->h.th->source;
1910
1911
1912 newsk->daddr = saddr;
1913 newsk->saddr = daddr;
1914
1915 put_sock(newsk->num,newsk);
1916 newsk->dummy_th.res1 = 0;
1917 newsk->dummy_th.doff = 6;
1918 newsk->dummy_th.fin = 0;
1919 newsk->dummy_th.syn = 0;
1920 newsk->dummy_th.rst = 0;
1921 newsk->dummy_th.psh = 0;
1922 newsk->dummy_th.ack = 0;
1923 newsk->dummy_th.urg = 0;
1924 newsk->dummy_th.res2 = 0;
1925 newsk->acked_seq = skb->h.th->seq + 1;
1926 newsk->copied_seq = skb->h.th->seq;
1927
1928
1929 newsk->ip_ttl=sk->ip_ttl;
1930 newsk->ip_tos=skb->ip_hdr->tos;
1931
1932
1933
1934 if (sk->user_mss)
1935 newsk->mtu = sk->user_mss;
1936 else {
1937 #ifdef SUBNETSARELOCAL
1938 if ((saddr ^ daddr) & default_mask(saddr))
1939 #else
1940 if ((saddr ^ daddr) & dev->pa_mask)
1941 #endif
1942 newsk->mtu = 576 - HEADER_SIZE;
1943 else
1944 newsk->mtu = MAX_WINDOW;
1945 }
1946
1947 newsk->mtu = min(newsk->mtu, dev->mtu - HEADER_SIZE);
1948
1949
1950 tcp_options(newsk,skb->h.th);
1951
1952 buff = newsk->prot->wmalloc(newsk, MAX_SYN_SIZE, 1, GFP_ATOMIC);
1953 if (buff == NULL) {
1954 sk->err = -ENOMEM;
1955 newsk->dead = 1;
1956 release_sock(newsk);
1957 kfree_skb(skb, FREE_READ);
1958 tcp_statistics.TcpAttemptFails++;
1959 return;
1960 }
1961
1962 buff->len = sizeof(struct tcphdr)+4;
1963 buff->sk = newsk;
1964 buff->localroute = newsk->localroute;
1965
1966 t1 =(struct tcphdr *) buff->data;
1967
1968
1969 tmp = sk->prot->build_header(buff, newsk->saddr, newsk->daddr, &ndev,
1970 IPPROTO_TCP, NULL, MAX_SYN_SIZE,sk->ip_tos,sk->ip_ttl);
1971
1972
1973 if (tmp < 0) {
1974 sk->err = tmp;
1975 buff->free=1;
1976 kfree_skb(buff,FREE_WRITE);
1977 newsk->dead = 1;
1978 release_sock(newsk);
1979 skb->sk = sk;
1980 kfree_skb(skb, FREE_READ);
1981 tcp_statistics.TcpAttemptFails++;
1982 return;
1983 }
1984
1985 buff->len += tmp;
1986 t1 =(struct tcphdr *)((char *)t1 +tmp);
1987
1988 memcpy(t1, skb->h.th, sizeof(*t1));
1989 buff->h.seq = newsk->write_seq;
1990
1991
1992 t1->dest = skb->h.th->source;
1993 t1->source = newsk->dummy_th.source;
1994 t1->seq = ntohl(newsk->write_seq++);
1995 t1->ack = 1;
1996 newsk->window = tcp_select_window(newsk);
1997 newsk->sent_seq = newsk->write_seq;
1998 t1->window = ntohs(newsk->window);
1999 t1->res1 = 0;
2000 t1->res2 = 0;
2001 t1->rst = 0;
2002 t1->urg = 0;
2003 t1->psh = 0;
2004 t1->syn = 1;
2005 t1->ack_seq = ntohl(skb->h.th->seq+1);
2006 t1->doff = sizeof(*t1)/4+1;
2007
2008 ptr =(unsigned char *)(t1+1);
2009 ptr[0] = 2;
2010 ptr[1] = 4;
2011 ptr[2] = ((newsk->mtu) >> 8) & 0xff;
2012 ptr[3] =(newsk->mtu) & 0xff;
2013
2014 tcp_send_check(t1, daddr, saddr, sizeof(*t1)+4, newsk);
2015 newsk->prot->queue_xmit(newsk, dev, buff, 0);
2016
2017 reset_timer(newsk, TIME_WRITE , TCP_CONNECT_TIME);
2018 skb->sk = newsk;
2019
2020
2021 sk->rmem_alloc -= skb->mem_len;
2022 newsk->rmem_alloc += skb->mem_len;
2023
2024 skb_queue_tail(&sk->receive_queue,skb);
2025 sk->ack_backlog++;
2026 release_sock(newsk);
2027 tcp_statistics.TcpOutSegs++;
2028 }
2029
2030
2031 static void tcp_close(struct sock *sk, int timeout)
2032 {
2033 struct sk_buff *buff;
2034 int need_reset = 0;
2035 struct tcphdr *t1, *th;
2036 struct proto *prot;
2037 struct device *dev=NULL;
2038 int tmp;
2039
2040
2041
2042
2043
2044 DPRINTF((DBG_TCP, "tcp_close((struct sock *)%X, %d)\n",sk, timeout));
2045 sk->inuse = 1;
2046 sk->keepopen = 1;
2047 sk->shutdown = SHUTDOWN_MASK;
2048
2049 if (!sk->dead)
2050 sk->state_change(sk);
2051
2052
2053
2054
2055
2056 if (skb_peek(&sk->receive_queue) != NULL)
2057 {
2058 struct sk_buff *skb;
2059 if(sk->debug)
2060 printk("Clean rcv queue\n");
2061 while((skb=skb_dequeue(&sk->receive_queue))!=NULL)
2062 {
2063 if(skb->len > 0 && after(skb->h.th->seq + skb->len + 1 , sk->copied_seq))
2064 need_reset = 1;
2065 kfree_skb(skb, FREE_READ);
2066 }
2067 if(sk->debug)
2068 printk("Cleaned.\n");
2069 }
2070
2071
2072
2073
2074
2075 if (sk->partial)
2076 {
2077 tcp_send_partial(sk);
2078 }
2079
2080 switch(sk->state)
2081 {
2082 case TCP_FIN_WAIT1:
2083 case TCP_FIN_WAIT2:
2084 case TCP_LAST_ACK:
2085
2086
2087
2088
2089
2090
2091 reset_timer(sk, TIME_CLOSE, 4 * sk->rto);
2092 if (timeout)
2093 tcp_time_wait(sk);
2094 release_sock(sk);
2095 return;
2096 case TCP_TIME_WAIT:
2097 if (timeout)
2098 {
2099 sk->state = TCP_CLOSE;
2100 }
2101 release_sock(sk);
2102 return;
2103 case TCP_LISTEN:
2104 sk->state = TCP_CLOSE;
2105 release_sock(sk);
2106 return;
2107 case TCP_CLOSE:
2108 release_sock(sk);
2109 return;
2110 case TCP_CLOSE_WAIT:
2111 case TCP_ESTABLISHED:
2112 case TCP_SYN_SENT:
2113 case TCP_SYN_RECV:
2114 prot =(struct proto *)sk->prot;
2115 th =(struct tcphdr *)&sk->dummy_th;
2116 buff = prot->wmalloc(sk, MAX_FIN_SIZE, 1, GFP_ATOMIC);
2117 if (buff == NULL)
2118 {
2119
2120
2121
2122 release_sock(sk);
2123 if (sk->state != TCP_CLOSE_WAIT)
2124 sk->state = TCP_ESTABLISHED;
2125 reset_timer(sk, TIME_CLOSE, 100);
2126 return;
2127 }
2128 buff->sk = sk;
2129 buff->free = 1;
2130 buff->len = sizeof(*t1);
2131 buff->localroute = sk->localroute;
2132 t1 =(struct tcphdr *) buff->data;
2133
2134
2135
2136
2137 tmp = prot->build_header(buff,sk->saddr, sk->daddr, &dev,
2138 IPPROTO_TCP, sk->opt,
2139 sizeof(struct tcphdr),sk->ip_tos,sk->ip_ttl);
2140 if (tmp < 0)
2141 {
2142 kfree_skb(buff,FREE_WRITE);
2143 if(sk->state==TCP_ESTABLISHED)
2144 sk->state=TCP_FIN_WAIT1;
2145 else
2146 sk->state=TCP_FIN_WAIT2;
2147 reset_timer(sk, TIME_CLOSE,4*sk->rto);
2148 if(timeout)
2149 tcp_time_wait(sk);
2150
2151 DPRINTF((DBG_TCP, "Unable to build header for fin.\n"));
2152 release_sock(sk);
2153 return;
2154 }
2155
2156 t1 =(struct tcphdr *)((char *)t1 +tmp);
2157 buff->len += tmp;
2158 buff->dev = dev;
2159 memcpy(t1, th, sizeof(*t1));
2160 t1->seq = ntohl(sk->write_seq);
2161 sk->write_seq++;
2162 buff->h.seq = sk->write_seq;
2163 t1->ack = 1;
2164
2165
2166
2167
2168
2169 sk->delay_acks = 0;
2170 t1->ack_seq = ntohl(sk->acked_seq);
2171 t1->window = ntohs(sk->window=tcp_select_window(sk));
2172 t1->fin = 1;
2173 t1->rst = need_reset;
2174 t1->doff = sizeof(*t1)/4;
2175 tcp_send_check(t1, sk->saddr, sk->daddr, sizeof(*t1), sk);
2176
2177 tcp_statistics.TcpOutSegs++;
2178
2179 if (skb_peek(&sk->write_queue) == NULL)
2180 {
2181 sk->sent_seq = sk->write_seq;
2182 prot->queue_xmit(sk, dev, buff, 0);
2183 }
2184 else
2185 {
2186 reset_timer(sk, TIME_WRITE, sk->rto);
2187 if (buff->next != NULL)
2188 {
2189 printk("tcp_close: next != NULL\n");
2190 skb_unlink(buff);
2191 }
2192 skb_queue_tail(&sk->write_queue, buff);
2193 }
2194
2195 if (sk->state == TCP_CLOSE_WAIT)
2196 {
2197 sk->state = TCP_FIN_WAIT2;
2198 }
2199 else
2200 {
2201 sk->state = TCP_FIN_WAIT1;
2202 }
2203 }
2204 release_sock(sk);
2205 }
2206
2207
2208
2209
2210
2211
2212 static void
2213 tcp_write_xmit(struct sock *sk)
2214 {
2215 struct sk_buff *skb;
2216
2217 DPRINTF((DBG_TCP, "tcp_write_xmit(sk=%X)\n", sk));
2218
2219
2220
2221 if(sk->zapped)
2222 return;
2223
2224 while((skb = skb_peek(&sk->write_queue)) != NULL &&
2225 before(skb->h.seq, sk->window_seq + 1) &&
2226 (sk->retransmits == 0 ||
2227 sk->timeout != TIME_WRITE ||
2228 before(skb->h.seq, sk->rcv_ack_seq + 1))
2229 && sk->packets_out < sk->cong_window) {
2230 IS_SKB(skb);
2231 skb_unlink(skb);
2232 DPRINTF((DBG_TCP, "Sending a packet.\n"));
2233
2234
2235 if (before(skb->h.seq, sk->rcv_ack_seq +1)) {
2236 sk->retransmits = 0;
2237 kfree_skb(skb, FREE_WRITE);
2238 if (!sk->dead) sk->write_space(sk);
2239 } else {
2240 sk->sent_seq = skb->h.seq;
2241 sk->prot->queue_xmit(sk, skb->dev, skb, skb->free);
2242 }
2243 }
2244 }
2245
2246
2247
2248
2249
2250
2251 void
2252 sort_send(struct sock *sk)
2253 {
2254 struct sk_buff *list = NULL;
2255 struct sk_buff *skb,*skb2,*skb3;
2256
2257 for (skb = sk->send_head; skb != NULL; skb = skb2) {
2258 skb2 = skb->link3;
2259 if (list == NULL || before (skb2->h.seq, list->h.seq)) {
2260 skb->link3 = list;
2261 sk->send_tail = skb;
2262 list = skb;
2263 } else {
2264 for (skb3 = list; ; skb3 = skb3->link3) {
2265 if (skb3->link3 == NULL ||
2266 before(skb->h.seq, skb3->link3->h.seq)) {
2267 skb->link3 = skb3->link3;
2268 skb3->link3 = skb;
2269 if (skb->link3 == NULL) sk->send_tail = skb;
2270 break;
2271 }
2272 }
2273 }
2274 }
2275 sk->send_head = list;
2276 }
2277
2278
2279
2280 static int
2281 tcp_ack(struct sock *sk, struct tcphdr *th, unsigned long saddr, int len)
2282 {
2283 unsigned long ack;
2284 int flag = 0;
2285
2286
2287
2288
2289
2290
2291
2292 if(sk->zapped)
2293 return(1);
2294
2295 ack = ntohl(th->ack_seq);
2296 DPRINTF((DBG_TCP, "tcp_ack ack=%d, window=%d, "
2297 "sk->rcv_ack_seq=%d, sk->window_seq = %d\n",
2298 ack, ntohs(th->window), sk->rcv_ack_seq, sk->window_seq));
2299
2300 if (ntohs(th->window) > sk->max_window) {
2301 sk->max_window = ntohs(th->window);
2302 sk->mss = min(sk->max_window, sk->mtu);
2303 }
2304
2305 if (sk->retransmits && sk->timeout == TIME_KEEPOPEN)
2306 sk->retransmits = 0;
2307
2308
2309 if (after(ack, sk->sent_seq+1) || before(ack, sk->rcv_ack_seq-1)) {
2310 if (after(ack, sk->sent_seq) ||
2311 (sk->state != TCP_ESTABLISHED && sk->state != TCP_CLOSE_WAIT)) {
2312 return(0);
2313 }
2314 if (sk->keepopen) {
2315 reset_timer(sk, TIME_KEEPOPEN, TCP_TIMEOUT_LEN);
2316 }
2317 return(1);
2318 }
2319
2320 if (len != th->doff*4) flag |= 1;
2321
2322
2323 if (after(sk->window_seq, ack+ntohs(th->window))) {
2324
2325
2326
2327
2328
2329
2330
2331 struct sk_buff *skb;
2332 struct sk_buff *skb2;
2333 struct sk_buff *wskb = NULL;
2334
2335 skb2 = sk->send_head;
2336 sk->send_head = NULL;
2337 sk->send_tail = NULL;
2338
2339 flag |= 4;
2340
2341 sk->window_seq = ack + ntohs(th->window);
2342 cli();
2343 while (skb2 != NULL) {
2344 skb = skb2;
2345 skb2 = skb->link3;
2346 skb->link3 = NULL;
2347 if (after(skb->h.seq, sk->window_seq)) {
2348 if (sk->packets_out > 0) sk->packets_out--;
2349
2350 if (skb->next != NULL) {
2351 skb_unlink(skb);
2352 }
2353
2354 if (wskb == NULL)
2355 skb_queue_head(&sk->write_queue,skb);
2356 else
2357 skb_append(wskb,skb);
2358 wskb = skb;
2359 } else {
2360 if (sk->send_head == NULL) {
2361 sk->send_head = skb;
2362 sk->send_tail = skb;
2363 } else {
2364 sk->send_tail->link3 = skb;
2365 sk->send_tail = skb;
2366 }
2367 skb->link3 = NULL;
2368 }
2369 }
2370 sti();
2371 }
2372
2373 if (sk->send_tail == NULL || sk->send_head == NULL) {
2374 sk->send_head = NULL;
2375 sk->send_tail = NULL;
2376 sk->packets_out= 0;
2377 }
2378
2379 sk->window_seq = ack + ntohs(th->window);
2380
2381
2382 if (sk->timeout == TIME_WRITE &&
2383 sk->cong_window < 2048 && after(ack, sk->rcv_ack_seq)) {
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393 if (sk->cong_window < sk->ssthresh)
2394
2395 sk->cong_window++;
2396 else {
2397
2398
2399
2400 if (sk->cong_count >= sk->cong_window) {
2401 sk->cong_window++;
2402 sk->cong_count = 0;
2403 } else
2404 sk->cong_count++;
2405 }
2406 }
2407
2408 DPRINTF((DBG_TCP, "tcp_ack: Updating rcv ack sequence.\n"));
2409 sk->rcv_ack_seq = ack;
2410
2411
2412
2413
2414
2415
2416 if (sk->timeout == TIME_PROBE0) {
2417 if (skb_peek(&sk->write_queue) != NULL &&
2418 ! before (sk->window_seq, sk->write_queue.next->h.seq)) {
2419 sk->retransmits = 0;
2420 sk->backoff = 0;
2421
2422 sk->rto = ((sk->rtt >> 2) + sk->mdev) >> 1;
2423 if (sk->rto > 120*HZ)
2424 sk->rto = 120*HZ;
2425 if (sk->rto < 1*HZ)
2426 sk->rto = 1*HZ;
2427 }
2428 }
2429
2430
2431 while(sk->send_head != NULL) {
2432
2433 if (sk->send_head->link3 &&
2434 after(sk->send_head->h.seq, sk->send_head->link3->h.seq)) {
2435 printk("INET: tcp.c: *** bug send_list out of order.\n");
2436 sort_send(sk);
2437 }
2438
2439 if (before(sk->send_head->h.seq, ack+1)) {
2440 struct sk_buff *oskb;
2441
2442 if (sk->retransmits) {
2443
2444
2445 flag |= 2;
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455 if (sk->send_head->link3)
2456 sk->retransmits = 1;
2457 else
2458 sk->retransmits = 0;
2459
2460 }
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475 if (sk->packets_out > 0) sk->packets_out --;
2476 DPRINTF((DBG_TCP, "skb=%X skb->h.seq = %d acked ack=%d\n",
2477 sk->send_head, sk->send_head->h.seq, ack));
2478
2479
2480 if (!sk->dead) sk->write_space(sk);
2481
2482 oskb = sk->send_head;
2483
2484 if (!(flag&2)) {
2485 long m;
2486
2487
2488
2489
2490
2491
2492
2493
2494 m = jiffies - oskb->when;
2495 m -= (sk->rtt >> 3);
2496 sk->rtt += m;
2497 if (m < 0)
2498 m = -m;
2499 m -= (sk->mdev >> 2);
2500 sk->mdev += m;
2501
2502
2503 sk->rto = ((sk->rtt >> 2) + sk->mdev) >> 1;
2504 if (sk->rto > 120*HZ)
2505 sk->rto = 120*HZ;
2506 if (sk->rto < 1*HZ)
2507 sk->rto = 1*HZ;
2508 sk->backoff = 0;
2509
2510 }
2511 flag |= (2|4);
2512
2513 cli();
2514
2515 oskb = sk->send_head;
2516 IS_SKB(oskb);
2517 sk->send_head = oskb->link3;
2518 if (sk->send_head == NULL) {
2519 sk->send_tail = NULL;
2520 }
2521
2522
2523 if (oskb->next)
2524 skb_unlink(oskb);
2525 sti();
2526 kfree_skb(oskb, FREE_WRITE);
2527 if (!sk->dead) sk->write_space(sk);
2528 } else {
2529 break;
2530 }
2531 }
2532
2533
2534
2535
2536
2537 if (skb_peek(&sk->write_queue) != NULL) {
2538 if (after (sk->window_seq+1, sk->write_queue.next->h.seq) &&
2539 (sk->retransmits == 0 ||
2540 sk->timeout != TIME_WRITE ||
2541 before(sk->write_queue.next->h.seq, sk->rcv_ack_seq + 1))
2542 && sk->packets_out < sk->cong_window) {
2543 flag |= 1;
2544 tcp_write_xmit(sk);
2545 } else if (before(sk->window_seq, sk->write_queue.next->h.seq) &&
2546 sk->send_head == NULL &&
2547 sk->ack_backlog == 0 &&
2548 sk->state != TCP_TIME_WAIT) {
2549 reset_timer(sk, TIME_PROBE0, sk->rto);
2550 }
2551 } else {
2552 if (sk->send_head == NULL && sk->ack_backlog == 0 &&
2553 sk->state != TCP_TIME_WAIT && !sk->keepopen) {
2554 DPRINTF((DBG_TCP, "Nothing to do, going to sleep.\n"));
2555 if (!sk->dead) sk->write_space(sk);
2556
2557 if (sk->keepopen)
2558 reset_timer(sk, TIME_KEEPOPEN, TCP_TIMEOUT_LEN);
2559 else
2560 delete_timer(sk);
2561 } else {
2562 if (sk->state != (unsigned char) sk->keepopen) {
2563 reset_timer(sk, TIME_WRITE, sk->rto);
2564 }
2565 if (sk->state == TCP_TIME_WAIT) {
2566 reset_timer(sk, TIME_CLOSE, TCP_TIMEWAIT_LEN);
2567 }
2568 }
2569 }
2570
2571 if (sk->packets_out == 0 && sk->partial != NULL &&
2572 skb_peek(&sk->write_queue) == NULL && sk->send_head == NULL) {
2573 flag |= 1;
2574 tcp_send_partial(sk);
2575 }
2576
2577
2578 if (sk->state == TCP_TIME_WAIT) {
2579 if (!sk->dead)
2580 sk->state_change(sk);
2581 if (sk->rcv_ack_seq == sk->write_seq && sk->acked_seq == sk->fin_seq) {
2582 flag |= 1;
2583 sk->state = TCP_CLOSE;
2584 sk->shutdown = SHUTDOWN_MASK;
2585 }
2586 }
2587
2588 if (sk->state == TCP_LAST_ACK || sk->state == TCP_FIN_WAIT2) {
2589 if (!sk->dead) sk->state_change(sk);
2590 if (sk->rcv_ack_seq == sk->write_seq) {
2591 flag |= 1;
2592 if (sk->acked_seq != sk->fin_seq) {
2593 tcp_time_wait(sk);
2594 } else {
2595 DPRINTF((DBG_TCP, "tcp_ack closing socket - %X\n", sk));
2596 tcp_send_ack(sk->sent_seq, sk->acked_seq, sk,
2597 th, sk->daddr);
2598 sk->shutdown = SHUTDOWN_MASK;
2599 sk->state = TCP_CLOSE;
2600 }
2601 }
2602 }
2603
2604
2605
2606
2607
2608
2609
2610
2611
2612
2613
2614
2615
2616
2617
2618
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628
2629
2630
2631
2632
2633 if (((!flag) || (flag&4)) && sk->send_head != NULL &&
2634 (((flag&2) && sk->retransmits) ||
2635 (sk->send_head->when + sk->rto < jiffies))) {
2636 ip_do_retransmit(sk, 1);
2637 reset_timer(sk, TIME_WRITE, sk->rto);
2638 }
2639
2640 DPRINTF((DBG_TCP, "leaving tcp_ack\n"));
2641 return(1);
2642 }
2643
2644
2645
2646
2647
2648
2649
2650 static int
2651 tcp_data(struct sk_buff *skb, struct sock *sk,
2652 unsigned long saddr, unsigned short len)
2653 {
2654 struct sk_buff *skb1, *skb2;
2655 struct tcphdr *th;
2656 int dup_dumped=0;
2657
2658 th = skb->h.th;
2659 print_th(th);
2660 skb->len = len -(th->doff*4);
2661
2662 DPRINTF((DBG_TCP, "tcp_data len = %d sk = %X:\n", skb->len, sk));
2663
2664 sk->bytes_rcv += skb->len;
2665 if (skb->len == 0 && !th->fin && !th->urg && !th->psh) {
2666
2667 if (!th->ack) tcp_send_ack(sk->sent_seq, sk->acked_seq,sk, th, saddr);
2668 kfree_skb(skb, FREE_READ);
2669 return(0);
2670 }
2671
2672 if (sk->shutdown & RCV_SHUTDOWN) {
2673 sk->acked_seq = th->seq + skb->len + th->syn + th->fin;
2674 tcp_reset(sk->saddr, sk->daddr, skb->h.th,
2675 sk->prot, NULL, skb->dev, sk->ip_tos, sk->ip_ttl);
2676 tcp_statistics.TcpEstabResets++;
2677 sk->state = TCP_CLOSE;
2678 sk->err = EPIPE;
2679 sk->shutdown = SHUTDOWN_MASK;
2680 DPRINTF((DBG_TCP, "tcp_data: closing socket - %X\n", sk));
2681 kfree_skb(skb, FREE_READ);
2682 if (!sk->dead) sk->state_change(sk);
2683 return(0);
2684 }
2685
2686
2687
2688
2689
2690
2691
2692
2693
2694
2695 if (skb_peek(&sk->receive_queue) == NULL) {
2696 DPRINTF((DBG_TCP, "tcp_data: skb = %X:\n", skb));
2697 skb_queue_head(&sk->receive_queue,skb);
2698 skb1= NULL;
2699 } else {
2700 DPRINTF((DBG_TCP, "tcp_data adding to chain sk = %X:\n", sk));
2701 for(skb1=sk->receive_queue.prev; ; skb1 = skb1->prev) {
2702 if(sk->debug)
2703 {
2704 printk("skb1=%p :", skb1);
2705 printk("skb1->h.th->seq = %ld: ", skb1->h.th->seq);
2706 printk("skb->h.th->seq = %ld\n",skb->h.th->seq);
2707 printk("copied_seq = %ld acked_seq = %ld\n", sk->copied_seq,
2708 sk->acked_seq);
2709 }
2710 if (th->seq==skb1->h.th->seq && skb->len>= skb1->len)
2711 {
2712 skb_append(skb1,skb);
2713 skb_unlink(skb1);
2714 kfree_skb(skb1,FREE_READ);
2715 dup_dumped=1;
2716 skb1=NULL;
2717 break;
2718 }
2719 if (after(th->seq+1, skb1->h.th->seq))
2720 {
2721 skb_append(skb1,skb);
2722 break;
2723 }
2724 if (skb1 == skb_peek(&sk->receive_queue))
2725 {
2726 skb_queue_head(&sk->receive_queue, skb);
2727 break;
2728 }
2729 }
2730 DPRINTF((DBG_TCP, "skb = %X:\n", skb));
2731 }
2732
2733 th->ack_seq = th->seq + skb->len;
2734 if (th->syn) th->ack_seq++;
2735 if (th->fin) th->ack_seq++;
2736
2737 if (before(sk->acked_seq, sk->copied_seq)) {
2738 printk("*** tcp.c:tcp_data bug acked < copied\n");
2739 sk->acked_seq = sk->copied_seq;
2740 }
2741
2742
2743 if ((!dup_dumped && (skb1 == NULL || skb1->acked)) || before(th->seq, sk->acked_seq+1)) {
2744 if (before(th->seq, sk->acked_seq+1)) {
2745 int newwindow;
2746
2747 if (after(th->ack_seq, sk->acked_seq)) {
2748 newwindow = sk->window -
2749 (th->ack_seq - sk->acked_seq);
2750 if (newwindow < 0)
2751 newwindow = 0;
2752 sk->window = newwindow;
2753 sk->acked_seq = th->ack_seq;
2754 }
2755 skb->acked = 1;
2756
2757
2758 if (skb->h.th->fin) {
2759 if (!sk->dead) sk->state_change(sk);
2760 sk->shutdown |= RCV_SHUTDOWN;
2761 }
2762
2763 for(skb2 = skb->next;
2764 skb2 != (struct sk_buff *)&sk->receive_queue;
2765 skb2 = skb2->next) {
2766 if (before(skb2->h.th->seq, sk->acked_seq+1)) {
2767 if (after(skb2->h.th->ack_seq, sk->acked_seq))
2768 {
2769 newwindow = sk->window -
2770 (skb2->h.th->ack_seq - sk->acked_seq);
2771 if (newwindow < 0)
2772 newwindow = 0;
2773 sk->window = newwindow;
2774 sk->acked_seq = skb2->h.th->ack_seq;
2775 }
2776 skb2->acked = 1;
2777
2778
2779
2780
2781
2782 if (skb2->h.th->fin) {
2783 sk->shutdown |= RCV_SHUTDOWN;
2784 if (!sk->dead) sk->state_change(sk);
2785 }
2786
2787
2788 sk->ack_backlog = sk->max_ack_backlog;
2789 } else {
2790 break;
2791 }
2792 }
2793
2794
2795
2796
2797
2798 if (!sk->delay_acks ||
2799 sk->ack_backlog >= sk->max_ack_backlog ||
2800 sk->bytes_rcv > sk->max_unacked || th->fin) {
2801
2802 } else {
2803 sk->ack_backlog++;
2804 if(sk->debug)
2805 printk("Ack queued.\n");
2806 reset_timer(sk, TIME_WRITE, TCP_ACK_TIME);
2807 }
2808 }
2809 }
2810
2811
2812
2813
2814
2815 if (!skb->acked) {
2816
2817
2818
2819
2820
2821
2822 while (sk->prot->rspace(sk) < sk->mtu) {
2823 skb1 = skb_peek(&sk->receive_queue);
2824 if (skb1 == NULL) {
2825 printk("INET: tcp.c:tcp_data memory leak detected.\n");
2826 break;
2827 }
2828
2829
2830 if (skb1->acked) {
2831 break;
2832 }
2833
2834 skb_unlink(skb1);
2835 kfree_skb(skb1, FREE_READ);
2836 }
2837 tcp_send_ack(sk->sent_seq, sk->acked_seq, sk, th, saddr);
2838 sk->ack_backlog++;
2839 reset_timer(sk, TIME_WRITE, TCP_ACK_TIME);
2840 } else {
2841
2842 tcp_send_ack(sk->sent_seq, sk->acked_seq, sk, th, saddr);
2843 }
2844
2845
2846 if (!sk->dead) {
2847 if(sk->debug)
2848 printk("Data wakeup.\n");
2849 sk->data_ready(sk,0);
2850 } else {
2851 DPRINTF((DBG_TCP, "data received on dead socket.\n"));
2852 }
2853
2854 if (sk->state == TCP_FIN_WAIT2 &&
2855 sk->acked_seq == sk->fin_seq && sk->rcv_ack_seq == sk->write_seq) {
2856 DPRINTF((DBG_TCP, "tcp_data: entering last_ack state sk = %X\n", sk));
2857
2858
2859 sk->shutdown = SHUTDOWN_MASK;
2860 sk->state = TCP_LAST_ACK;
2861 if (!sk->dead) sk->state_change(sk);
2862 }
2863
2864 return(0);
2865 }
2866
2867
2868 static void tcp_check_urg(struct sock * sk, struct tcphdr * th)
2869 {
2870 unsigned long ptr = ntohs(th->urg_ptr);
2871
2872 if (ptr)
2873 ptr--;
2874 ptr += th->seq;
2875
2876
2877 if (after(sk->copied_seq+1, ptr))
2878 return;
2879
2880
2881 if (sk->urg_data && !after(ptr, sk->urg_seq))
2882 return;
2883
2884
2885 if (sk->proc != 0) {
2886 if (sk->proc > 0) {
2887 kill_proc(sk->proc, SIGURG, 1);
2888 } else {
2889 kill_pg(-sk->proc, SIGURG, 1);
2890 }
2891 }
2892 sk->urg_data = URG_NOTYET;
2893 sk->urg_seq = ptr;
2894 }
2895
2896 static inline int tcp_urg(struct sock *sk, struct tcphdr *th,
2897 unsigned long saddr, unsigned long len)
2898 {
2899 unsigned long ptr;
2900
2901
2902 if (th->urg)
2903 tcp_check_urg(sk,th);
2904
2905
2906 if (sk->urg_data != URG_NOTYET)
2907 return 0;
2908
2909
2910 ptr = sk->urg_seq - th->seq + th->doff*4;
2911 if (ptr >= len)
2912 return 0;
2913
2914
2915 sk->urg_data = URG_VALID | *(ptr + (unsigned char *) th);
2916 if (!sk->dead)
2917 sk->data_ready(sk,0);
2918 return 0;
2919 }
2920
2921
2922
2923
2924
2925
2926 static int tcp_fin(struct sock *sk, struct tcphdr *th,
2927 unsigned long saddr, struct device *dev)
2928 {
2929 DPRINTF((DBG_TCP, "tcp_fin(sk=%X, th=%X, saddr=%X, dev=%X)\n",
2930 sk, th, saddr, dev));
2931
2932 if (!sk->dead)
2933 {
2934 sk->state_change(sk);
2935 }
2936
2937 switch(sk->state)
2938 {
2939 case TCP_SYN_RECV:
2940 case TCP_SYN_SENT:
2941 case TCP_ESTABLISHED:
2942
2943 reset_timer(sk, TIME_CLOSE, TCP_TIMEOUT_LEN);
2944 sk->fin_seq = th->seq+1;
2945 tcp_statistics.TcpCurrEstab--;
2946 sk->state = TCP_CLOSE_WAIT;
2947 if (th->rst)
2948 sk->shutdown = SHUTDOWN_MASK;
2949 break;
2950
2951 case TCP_CLOSE_WAIT:
2952 case TCP_FIN_WAIT2:
2953 break;
2954
2955 case TCP_FIN_WAIT1:
2956
2957 sk->fin_seq = th->seq+1;
2958 sk->state = TCP_FIN_WAIT2;
2959 break;
2960
2961 default:
2962 case TCP_TIME_WAIT:
2963 sk->state = TCP_LAST_ACK;
2964
2965
2966 reset_timer(sk, TIME_CLOSE, TCP_TIMEWAIT_LEN);
2967 return(0);
2968 }
2969 sk->ack_backlog++;
2970
2971 return(0);
2972 }
2973
2974
2975
2976 static struct sock *
2977 tcp_accept(struct sock *sk, int flags)
2978 {
2979 struct sock *newsk;
2980 struct sk_buff *skb;
2981
2982 DPRINTF((DBG_TCP, "tcp_accept(sk=%X, flags=%X, addr=%s)\n",
2983 sk, flags, in_ntoa(sk->saddr)));
2984
2985
2986
2987
2988
2989 if (sk->state != TCP_LISTEN) {
2990 sk->err = EINVAL;
2991 return(NULL);
2992 }
2993
2994
2995 cli();
2996 sk->inuse = 1;
2997 while((skb = skb_dequeue(&sk->receive_queue)) == NULL) {
2998 if (flags & O_NONBLOCK) {
2999 sti();
3000 release_sock(sk);
3001 sk->err = EAGAIN;
3002 return(NULL);
3003 }
3004
3005 release_sock(sk);
3006 interruptible_sleep_on(sk->sleep);
3007 if (current->signal & ~current->blocked) {
3008 sti();
3009 sk->err = ERESTARTSYS;
3010 return(NULL);
3011 }
3012 sk->inuse = 1;
3013 }
3014 sti();
3015
3016
3017 newsk = skb->sk;
3018
3019 kfree_skb(skb, FREE_READ);
3020 sk->ack_backlog--;
3021 release_sock(sk);
3022 return(newsk);
3023 }
3024
3025
3026
3027
3028
3029
3030 static int tcp_connect(struct sock *sk, struct sockaddr_in *usin, int addr_len)
3031 {
3032 struct sk_buff *buff;
3033 struct sockaddr_in sin;
3034 struct device *dev=NULL;
3035 unsigned char *ptr;
3036 int tmp;
3037 struct tcphdr *t1;
3038 int err;
3039
3040 if (sk->state != TCP_CLOSE)
3041 return(-EISCONN);
3042 if (addr_len < 8)
3043 return(-EINVAL);
3044
3045 err=verify_area(VERIFY_READ, usin, addr_len);
3046 if(err)
3047 return err;
3048
3049 memcpy_fromfs(&sin,usin, min(sizeof(sin), addr_len));
3050
3051 if (sin.sin_family && sin.sin_family != AF_INET)
3052 return(-EAFNOSUPPORT);
3053
3054 DPRINTF((DBG_TCP, "TCP connect daddr=%s\n", in_ntoa(sin.sin_addr.s_addr)));
3055
3056
3057
3058
3059
3060 if(sin.sin_addr.s_addr==INADDR_ANY)
3061 sin.sin_addr.s_addr=ip_my_addr();
3062
3063
3064
3065
3066
3067 if (ip_chk_addr(sin.sin_addr.s_addr) == IS_BROADCAST)
3068 {
3069 DPRINTF((DBG_TCP, "TCP connection to broadcast address not allowed\n"));
3070 return(-ENETUNREACH);
3071 }
3072
3073
3074
3075
3076
3077 if(sk->saddr == sin.sin_addr.s_addr && sk->num==ntohs(sin.sin_port))
3078 return -EBUSY;
3079
3080 sk->inuse = 1;
3081 sk->daddr = sin.sin_addr.s_addr;
3082 sk->write_seq = jiffies * SEQ_TICK - seq_offset;
3083 sk->window_seq = sk->write_seq;
3084 sk->rcv_ack_seq = sk->write_seq -1;
3085 sk->err = 0;
3086 sk->dummy_th.dest = sin.sin_port;
3087 release_sock(sk);
3088
3089 buff = sk->prot->wmalloc(sk,MAX_SYN_SIZE,0, GFP_KERNEL);
3090 if (buff == NULL)
3091 {
3092 return(-ENOMEM);
3093 }
3094 sk->inuse = 1;
3095 buff->len = 24;
3096 buff->sk = sk;
3097 buff->free = 1;
3098 buff->localroute = sk->localroute;
3099
3100 t1 = (struct tcphdr *) buff->data;
3101
3102
3103
3104
3105
3106
3107
3108
3109
3110 tmp = sk->prot->build_header(buff, sk->saddr, sk->daddr, &dev,
3111 IPPROTO_TCP, NULL, MAX_SYN_SIZE,sk->ip_tos,sk->ip_ttl);
3112 if (tmp < 0)
3113 {
3114 sk->prot->wfree(sk, buff->mem_addr, buff->mem_len);
3115 release_sock(sk);
3116 return(-ENETUNREACH);
3117 }
3118
3119 buff->len += tmp;
3120 t1 = (struct tcphdr *)((char *)t1 +tmp);
3121
3122 memcpy(t1,(void *)&(sk->dummy_th), sizeof(*t1));
3123 t1->seq = ntohl(sk->write_seq++);
3124 sk->sent_seq = sk->write_seq;
3125 buff->h.seq = sk->write_seq;
3126 t1->ack = 0;
3127 t1->window = 2;
3128 t1->res1=0;
3129 t1->res2=0;
3130 t1->rst = 0;
3131 t1->urg = 0;
3132 t1->psh = 0;
3133 t1->syn = 1;
3134 t1->urg_ptr = 0;
3135 t1->doff = 6;
3136
3137
3138 if (sk->user_mss)
3139 sk->mtu = sk->user_mss;
3140 else
3141 {
3142 #ifdef SUBNETSARELOCAL
3143 if ((sk->saddr ^ sk->daddr) & default_mask(sk->saddr))
3144 #else
3145 if ((sk->saddr ^ sk->daddr) & dev->pa_mask)
3146 #endif
3147 sk->mtu = 576 - HEADER_SIZE;
3148 else
3149 sk->mtu = MAX_WINDOW;
3150 }
3151
3152
3153
3154
3155 sk->mtu = min(sk->mtu, dev->mtu - HEADER_SIZE);
3156
3157
3158
3159
3160
3161 ptr = (unsigned char *)(t1+1);
3162 ptr[0] = 2;
3163 ptr[1] = 4;
3164 ptr[2] = (sk->mtu) >> 8;
3165 ptr[3] = (sk->mtu) & 0xff;
3166 tcp_send_check(t1, sk->saddr, sk->daddr,
3167 sizeof(struct tcphdr) + 4, sk);
3168
3169
3170
3171
3172
3173 sk->state = TCP_SYN_SENT;
3174 sk->rtt = TCP_CONNECT_TIME;
3175 reset_timer(sk, TIME_WRITE, TCP_CONNECT_TIME);
3176 sk->retransmits = TCP_RETR2 - TCP_SYN_RETRIES;
3177
3178 sk->prot->queue_xmit(sk, dev, buff, 0);
3179 tcp_statistics.TcpActiveOpens++;
3180 tcp_statistics.TcpOutSegs++;
3181
3182 release_sock(sk);
3183 return(0);
3184 }
3185
3186
3187
3188 static int
3189 tcp_sequence(struct sock *sk, struct tcphdr *th, short len,
3190 struct options *opt, unsigned long saddr, struct device *dev)
3191 {
3192 unsigned long next_seq;
3193
3194 next_seq = len - 4*th->doff;
3195 if (th->fin)
3196 next_seq++;
3197
3198 if (next_seq && !sk->window)
3199 goto ignore_it;
3200 next_seq += th->seq;
3201
3202
3203
3204
3205
3206
3207
3208
3209
3210 if (!after(next_seq+1, sk->acked_seq))
3211 goto ignore_it;
3212
3213 if (!before(th->seq, sk->acked_seq + sk->window + 1))
3214 goto ignore_it;
3215
3216
3217 return 1;
3218
3219 ignore_it:
3220 DPRINTF((DBG_TCP, "tcp_sequence: rejecting packet.\n"));
3221
3222 if (th->rst)
3223 return 0;
3224
3225
3226
3227
3228
3229
3230
3231
3232 if (sk->state==TCP_SYN_SENT || sk->state==TCP_SYN_RECV) {
3233 tcp_reset(sk->saddr,sk->daddr,th,sk->prot,NULL,dev, sk->ip_tos,sk->ip_ttl);
3234 return 1;
3235 }
3236
3237
3238 tcp_send_ack(sk->sent_seq, sk->acked_seq, sk, th, saddr);
3239 return 0;
3240 }
3241
3242
3243 int
3244 tcp_rcv(struct sk_buff *skb, struct device *dev, struct options *opt,
3245 unsigned long daddr, unsigned short len,
3246 unsigned long saddr, int redo, struct inet_protocol * protocol)
3247 {
3248 struct tcphdr *th;
3249 struct sock *sk;
3250
3251 if (!skb) {
3252 DPRINTF((DBG_TCP, "tcp.c: tcp_rcv skb = NULL\n"));
3253 return(0);
3254 }
3255
3256 if (!dev)
3257 {
3258 DPRINTF((DBG_TCP, "tcp.c: tcp_rcv dev = NULL\n"));
3259 return(0);
3260 }
3261
3262 tcp_statistics.TcpInSegs++;
3263
3264 th = skb->h.th;
3265
3266
3267 sk = get_sock(&tcp_prot, th->dest, saddr, th->source, daddr);
3268 DPRINTF((DBG_TCP, "<<\n"));
3269 DPRINTF((DBG_TCP, "len = %d, redo = %d, skb=%X\n", len, redo, skb));
3270
3271
3272
3273 if (sk!=NULL && sk->zapped)
3274 sk=NULL;
3275
3276 if (sk) {
3277 DPRINTF((DBG_TCP, "sk = %X:\n", sk));
3278 }
3279
3280 if (!redo) {
3281 if (tcp_check(th, len, saddr, daddr )) {
3282 skb->sk = NULL;
3283 DPRINTF((DBG_TCP, "packet dropped with bad checksum.\n"));
3284 if (inet_debug == DBG_SLIP) printk("\rtcp_rcv: bad checksum\n");
3285 kfree_skb(skb,FREE_READ);
3286
3287
3288
3289
3290 return(0);
3291 }
3292
3293 th->seq = ntohl(th->seq);
3294
3295
3296 if (sk == NULL) {
3297 if (!th->rst)
3298 tcp_reset(daddr, saddr, th, &tcp_prot, opt,dev,skb->ip_hdr->tos,255);
3299 skb->sk = NULL;
3300 kfree_skb(skb, FREE_READ);
3301 return(0);
3302 }
3303
3304 skb->len = len;
3305 skb->sk = sk;
3306 skb->acked = 0;
3307 skb->used = 0;
3308 skb->free = 0;
3309 skb->saddr = daddr;
3310 skb->daddr = saddr;
3311
3312
3313 cli();
3314 if (sk->inuse) {
3315 skb_queue_head(&sk->back_log, skb);
3316 sti();
3317 return(0);
3318 }
3319 sk->inuse = 1;
3320 sti();
3321 } else {
3322 if (!sk) {
3323 DPRINTF((DBG_TCP, "tcp.c: tcp_rcv bug sk=NULL redo = 1\n"));
3324 return(0);
3325 }
3326 }
3327
3328 if (!sk->prot) {
3329 DPRINTF((DBG_TCP, "tcp.c: tcp_rcv sk->prot = NULL \n"));
3330 return(0);
3331 }
3332
3333
3334 if (sk->rmem_alloc + skb->mem_len >= sk->rcvbuf) {
3335 skb->sk = NULL;
3336 DPRINTF((DBG_TCP, "dropping packet due to lack of buffer space.\n"));
3337 kfree_skb(skb, FREE_READ);
3338 release_sock(sk);
3339 return(0);
3340 }
3341 sk->rmem_alloc += skb->mem_len;
3342
3343 DPRINTF((DBG_TCP, "About to do switch.\n"));
3344
3345
3346 switch(sk->state) {
3347
3348
3349
3350
3351 case TCP_LAST_ACK:
3352 if (th->rst) {
3353 sk->zapped=1;
3354 sk->err = ECONNRESET;
3355 sk->state = TCP_CLOSE;
3356 sk->shutdown = SHUTDOWN_MASK;
3357 if (!sk->dead) {
3358 sk->state_change(sk);
3359 }
3360 kfree_skb(skb, FREE_READ);
3361 release_sock(sk);
3362 return(0);
3363 }
3364
3365 case TCP_ESTABLISHED:
3366 case TCP_CLOSE_WAIT:
3367 case TCP_FIN_WAIT1:
3368 case TCP_FIN_WAIT2:
3369 case TCP_TIME_WAIT:
3370 if (!tcp_sequence(sk, th, len, opt, saddr,dev)) {
3371 if (inet_debug == DBG_SLIP)
3372 printk("\rtcp_rcv: not in seq\n");
3373 kfree_skb(skb, FREE_READ);
3374 release_sock(sk);
3375 return(0);
3376 }
3377
3378 if (th->rst)
3379 {
3380 tcp_statistics.TcpEstabResets++;
3381 tcp_statistics.TcpCurrEstab--;
3382 sk->zapped=1;
3383
3384 sk->err = ECONNRESET;
3385
3386 if (sk->state == TCP_CLOSE_WAIT)
3387 {
3388 sk->err = EPIPE;
3389 }
3390
3391
3392
3393
3394
3395 sk->state = TCP_CLOSE;
3396 sk->shutdown = SHUTDOWN_MASK;
3397 if (!sk->dead)
3398 {
3399 sk->state_change(sk);
3400 }
3401 kfree_skb(skb, FREE_READ);
3402 release_sock(sk);
3403 return(0);
3404 }
3405 if (th->syn)
3406 {
3407 tcp_statistics.TcpCurrEstab--;
3408 tcp_statistics.TcpEstabResets++;
3409 sk->err = ECONNRESET;
3410 sk->state = TCP_CLOSE;
3411 sk->shutdown = SHUTDOWN_MASK;
3412 tcp_reset(daddr, saddr, th, sk->prot, opt,dev, sk->ip_tos,sk->ip_ttl);
3413 if (!sk->dead) {
3414 sk->state_change(sk);
3415 }
3416 kfree_skb(skb, FREE_READ);
3417 release_sock(sk);
3418 return(0);
3419 }
3420
3421 if (th->ack && !tcp_ack(sk, th, saddr, len)) {
3422 kfree_skb(skb, FREE_READ);
3423 release_sock(sk);
3424 return(0);
3425 }
3426
3427 if (tcp_urg(sk, th, saddr, len)) {
3428 kfree_skb(skb, FREE_READ);
3429 release_sock(sk);
3430 return(0);
3431 }
3432
3433 if (tcp_data(skb, sk, saddr, len)) {
3434 kfree_skb(skb, FREE_READ);
3435 release_sock(sk);
3436 return(0);
3437 }
3438
3439
3440 if (th->fin && tcp_fin(sk, th, saddr, dev)) {
3441 kfree_skb(skb, FREE_READ);
3442 release_sock(sk);
3443 return(0);
3444 }
3445
3446 release_sock(sk);
3447 return(0);
3448
3449 case TCP_CLOSE:
3450 if (sk->dead || sk->daddr) {
3451 DPRINTF((DBG_TCP, "packet received for closed,dead socket\n"));
3452 kfree_skb(skb, FREE_READ);
3453 release_sock(sk);
3454 return(0);
3455 }
3456
3457 if (!th->rst) {
3458 if (!th->ack)
3459 th->ack_seq = 0;
3460 tcp_reset(daddr, saddr, th, sk->prot, opt,dev,sk->ip_tos,sk->ip_ttl);
3461 }
3462 kfree_skb(skb, FREE_READ);
3463 release_sock(sk);
3464 return(0);
3465
3466 case TCP_LISTEN:
3467 if (th->rst) {
3468 kfree_skb(skb, FREE_READ);
3469 release_sock(sk);
3470 return(0);
3471 }
3472 if (th->ack) {
3473 tcp_reset(daddr, saddr, th, sk->prot, opt,dev,sk->ip_tos,sk->ip_ttl);
3474 kfree_skb(skb, FREE_READ);
3475 release_sock(sk);
3476 return(0);
3477 }
3478
3479 if (th->syn)
3480 {
3481
3482
3483
3484
3485
3486
3487 tcp_conn_request(sk, skb, daddr, saddr, opt, dev);
3488 release_sock(sk);
3489 return(0);
3490 }
3491
3492 kfree_skb(skb, FREE_READ);
3493 release_sock(sk);
3494 return(0);
3495
3496 case TCP_SYN_RECV:
3497 if (th->syn) {
3498
3499 kfree_skb(skb, FREE_READ);
3500 release_sock(sk);
3501 return(0);
3502 }
3503
3504
3505 default:
3506 if (!tcp_sequence(sk, th, len, opt, saddr,dev))
3507 {
3508 kfree_skb(skb, FREE_READ);
3509 release_sock(sk);
3510 return(0);
3511 }
3512
3513 case TCP_SYN_SENT:
3514 if (th->rst)
3515 {
3516 tcp_statistics.TcpAttemptFails++;
3517 sk->err = ECONNREFUSED;
3518 sk->state = TCP_CLOSE;
3519 sk->shutdown = SHUTDOWN_MASK;
3520 sk->zapped = 1;
3521 if (!sk->dead)
3522 {
3523 sk->state_change(sk);
3524 }
3525 kfree_skb(skb, FREE_READ);
3526 release_sock(sk);
3527 return(0);
3528 }
3529 if (!th->ack)
3530 {
3531 if (th->syn)
3532 {
3533 sk->state = TCP_SYN_RECV;
3534 }
3535
3536 kfree_skb(skb, FREE_READ);
3537 release_sock(sk);
3538 return(0);
3539 }
3540
3541 switch(sk->state)
3542 {
3543 case TCP_SYN_SENT:
3544 if (!tcp_ack(sk, th, saddr, len))
3545 {
3546 tcp_statistics.TcpAttemptFails++;
3547 tcp_reset(daddr, saddr, th,
3548 sk->prot, opt,dev,sk->ip_tos,sk->ip_ttl);
3549 kfree_skb(skb, FREE_READ);
3550 release_sock(sk);
3551 return(0);
3552 }
3553
3554
3555
3556
3557
3558 if (!th->syn)
3559 {
3560 kfree_skb(skb, FREE_READ);
3561 release_sock(sk);
3562 return(0);
3563 }
3564
3565
3566 sk->acked_seq = th->seq+1;
3567 sk->fin_seq = th->seq;
3568 tcp_send_ack(sk->sent_seq, th->seq+1,
3569 sk, th, sk->daddr);
3570
3571 case TCP_SYN_RECV:
3572 if (!tcp_ack(sk, th, saddr, len))
3573 {
3574 tcp_statistics.TcpAttemptFails++;
3575 tcp_reset(daddr, saddr, th,
3576 sk->prot, opt, dev,sk->ip_tos,sk->ip_ttl);
3577 kfree_skb(skb, FREE_READ);
3578 release_sock(sk);
3579 return(0);
3580 }
3581
3582 tcp_statistics.TcpCurrEstab++;
3583 sk->state = TCP_ESTABLISHED;
3584
3585
3586
3587
3588
3589
3590 tcp_options(sk, th);
3591 sk->dummy_th.dest = th->source;
3592 sk->copied_seq = sk->acked_seq-1;
3593 if (!sk->dead) {
3594 sk->state_change(sk);
3595 }
3596
3597
3598
3599
3600
3601
3602
3603
3604
3605
3606 if (sk->max_window == 0) {
3607 sk->max_window = 32;
3608 sk->mss = min(sk->max_window, sk->mtu);
3609 }
3610
3611
3612
3613
3614
3615 if (th->urg) {
3616 if (tcp_urg(sk, th, saddr, len)) {
3617 kfree_skb(skb, FREE_READ);
3618 release_sock(sk);
3619 return(0);
3620 }
3621 }
3622 if (tcp_data(skb, sk, saddr, len))
3623 kfree_skb(skb, FREE_READ);
3624
3625 if (th->fin) tcp_fin(sk, th, saddr, dev);
3626 release_sock(sk);
3627 return(0);
3628 }
3629
3630 if (th->urg) {
3631 if (tcp_urg(sk, th, saddr, len)) {
3632 kfree_skb(skb, FREE_READ);
3633 release_sock(sk);
3634 return(0);
3635 }
3636 }
3637
3638 if (tcp_data(skb, sk, saddr, len)) {
3639 kfree_skb(skb, FREE_READ);
3640 release_sock(sk);
3641 return(0);
3642 }
3643
3644 if (!th->fin) {
3645 release_sock(sk);
3646 return(0);
3647 }
3648 tcp_fin(sk, th, saddr, dev);
3649 release_sock(sk);
3650 return(0);
3651 }
3652 }
3653
3654
3655
3656
3657
3658
3659
3660 static void tcp_write_wakeup(struct sock *sk)
3661 {
3662 struct sk_buff *buff;
3663 struct tcphdr *t1;
3664 struct device *dev=NULL;
3665 int tmp;
3666
3667 if (sk->zapped)
3668 return;
3669
3670 if (sk -> state != TCP_ESTABLISHED && sk->state != TCP_CLOSE_WAIT &&
3671 sk -> state != TCP_FIN_WAIT1 && sk->state != TCP_FIN_WAIT2)
3672 return;
3673
3674 buff = sk->prot->wmalloc(sk,MAX_ACK_SIZE,1, GFP_ATOMIC);
3675 if (buff == NULL)
3676 return;
3677
3678 buff->len = sizeof(struct tcphdr);
3679 buff->free = 1;
3680 buff->sk = sk;
3681 buff->localroute = sk->localroute;
3682
3683 DPRINTF((DBG_TCP, "in tcp_write_wakeup\n"));
3684 t1 = (struct tcphdr *) buff->data;
3685
3686
3687 tmp = sk->prot->build_header(buff, sk->saddr, sk->daddr, &dev,
3688 IPPROTO_TCP, sk->opt, MAX_ACK_SIZE,sk->ip_tos,sk->ip_ttl);
3689 if (tmp < 0)
3690 {
3691 sk->prot->wfree(sk, buff->mem_addr, buff->mem_len);
3692 return;
3693 }
3694
3695 buff->len += tmp;
3696 t1 = (struct tcphdr *)((char *)t1 +tmp);
3697
3698 memcpy(t1,(void *) &sk->dummy_th, sizeof(*t1));
3699
3700
3701
3702
3703
3704 t1->seq = htonl(sk->sent_seq-1);
3705 t1->ack = 1;
3706 t1->res1= 0;
3707 t1->res2= 0;
3708 t1->rst = 0;
3709 t1->urg = 0;
3710 t1->psh = 0;
3711 t1->fin = 0;
3712 t1->syn = 0;
3713 t1->ack_seq = ntohl(sk->acked_seq);
3714 t1->window = ntohs(tcp_select_window(sk));
3715 t1->doff = sizeof(*t1)/4;
3716 tcp_send_check(t1, sk->saddr, sk->daddr, sizeof(*t1), sk);
3717
3718
3719
3720
3721 sk->prot->queue_xmit(sk, dev, buff, 1);
3722 tcp_statistics.TcpOutSegs++;
3723 }
3724
3725 void
3726 tcp_send_probe0(struct sock *sk)
3727 {
3728 if (sk->zapped)
3729 return;
3730
3731 tcp_write_wakeup(sk);
3732
3733 sk->backoff++;
3734 sk->rto = min(sk->rto << 1, 120*HZ);
3735 reset_timer (sk, TIME_PROBE0, sk->rto);
3736 sk->retransmits++;
3737 sk->prot->retransmits ++;
3738 }
3739
3740
3741
3742
3743
3744 int tcp_setsockopt(struct sock *sk, int level, int optname, char *optval, int optlen)
3745 {
3746 int val,err;
3747
3748 if(level!=SOL_TCP)
3749 return ip_setsockopt(sk,level,optname,optval,optlen);
3750
3751 if (optval == NULL)
3752 return(-EINVAL);
3753
3754 err=verify_area(VERIFY_READ, optval, sizeof(int));
3755 if(err)
3756 return err;
3757
3758 val = get_fs_long((unsigned long *)optval);
3759
3760 switch(optname)
3761 {
3762 case TCP_MAXSEG:
3763
3764
3765
3766
3767
3768
3769 if(val<1||val>MAX_WINDOW)
3770 return -EINVAL;
3771 sk->user_mss=val;
3772 return 0;
3773 case TCP_NODELAY:
3774 sk->nonagle=(val==0)?0:1;
3775 return 0;
3776 default:
3777 return(-ENOPROTOOPT);
3778 }
3779 }
3780
3781 int tcp_getsockopt(struct sock *sk, int level, int optname, char *optval, int *optlen)
3782 {
3783 int val,err;
3784
3785 if(level!=SOL_TCP)
3786 return ip_getsockopt(sk,level,optname,optval,optlen);
3787
3788 switch(optname)
3789 {
3790 case TCP_MAXSEG:
3791 val=sk->user_mss;
3792 break;
3793 case TCP_NODELAY:
3794 val=sk->nonagle;
3795 break;
3796 default:
3797 return(-ENOPROTOOPT);
3798 }
3799 err=verify_area(VERIFY_WRITE, optlen, sizeof(int));
3800 if(err)
3801 return err;
3802 put_fs_long(sizeof(int),(unsigned long *) optlen);
3803
3804 err=verify_area(VERIFY_WRITE, optval, sizeof(int));
3805 if(err)
3806 return err;
3807 put_fs_long(val,(unsigned long *)optval);
3808
3809 return(0);
3810 }
3811
3812
3813 struct proto tcp_prot = {
3814 sock_wmalloc,
3815 sock_rmalloc,
3816 sock_wfree,
3817 sock_rfree,
3818 sock_rspace,
3819 sock_wspace,
3820 tcp_close,
3821 tcp_read,
3822 tcp_write,
3823 tcp_sendto,
3824 tcp_recvfrom,
3825 ip_build_header,
3826 tcp_connect,
3827 tcp_accept,
3828 ip_queue_xmit,
3829 tcp_retransmit,
3830 tcp_write_wakeup,
3831 tcp_read_wakeup,
3832 tcp_rcv,
3833 tcp_select,
3834 tcp_ioctl,
3835 NULL,
3836 tcp_shutdown,
3837 tcp_setsockopt,
3838 tcp_getsockopt,
3839 128,
3840 0,
3841 {NULL,},
3842 "TCP"
3843 };