This source file includes following definitions.
- min
- __print_th
- print_th
- get_firstr
- diff
- tcp_select_window
- tcp_time_wait
- tcp_retransmit
- tcp_err
- tcp_readable
- tcp_select
- tcp_ioctl
- tcp_check
- tcp_send_check
- tcp_send_skb
- tcp_dequeue_partial
- tcp_send_partial
- tcp_enqueue_partial
- tcp_send_ack
- tcp_build_header
- tcp_write
- tcp_sendto
- tcp_read_wakeup
- cleanup_rbuf
- tcp_read_urg
- tcp_read
- tcp_shutdown
- tcp_recvfrom
- tcp_reset
- tcp_options
- default_mask
- tcp_conn_request
- tcp_close
- tcp_write_xmit
- sort_send
- tcp_ack
- tcp_data
- tcp_check_urg
- tcp_urg
- tcp_fin
- tcp_accept
- tcp_connect
- tcp_sequence
- tcp_rcv
- tcp_write_wakeup
- tcp_send_probe0
- tcp_setsockopt
- tcp_getsockopt
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84 #include <linux/types.h>
85 #include <linux/sched.h>
86 #include <linux/mm.h>
87 #include <linux/string.h>
88 #include <linux/socket.h>
89 #include <linux/sockios.h>
90 #include <linux/termios.h>
91 #include <linux/in.h>
92 #include <linux/fcntl.h>
93 #include "inet.h"
94 #include "dev.h"
95 #include "ip.h"
96 #include "protocol.h"
97 #include "icmp.h"
98 #include "tcp.h"
99 #include "skbuff.h"
100 #include "sock.h"
101 #include "arp.h"
102 #include <linux/errno.h>
103 #include <linux/timer.h>
104 #include <asm/system.h>
105 #include <asm/segment.h>
106 #include <linux/mm.h>
107
108 #define SEQ_TICK 3
109 unsigned long seq_offset;
110 #define SUBNETSARELOCAL
111
112 static __inline__ int
113 min(unsigned int a, unsigned int b)
114 {
115 if (a < b) return(a);
116 return(b);
117 }
118
119
120 static void __print_th(struct tcphdr *th)
121 {
122 unsigned char *ptr;
123
124 printk("TCP header:\n");
125 printk(" source=%d, dest=%d, seq =%ld, ack_seq = %ld\n",
126 ntohs(th->source), ntohs(th->dest),
127 ntohl(th->seq), ntohl(th->ack_seq));
128 printk(" fin=%d, syn=%d, rst=%d, psh=%d, ack=%d, urg=%d res1=%d res2=%d\n",
129 th->fin, th->syn, th->rst, th->psh, th->ack,
130 th->urg, th->res1, th->res2);
131 printk(" window = %d, check = %d urg_ptr = %d\n",
132 ntohs(th->window), ntohs(th->check), ntohs(th->urg_ptr));
133 printk(" doff = %d\n", th->doff);
134 ptr =(unsigned char *)(th + 1);
135 printk(" options = %d %d %d %d\n", ptr[0], ptr[1], ptr[2], ptr[3]);
136 }
137
138 static inline void print_th(struct tcphdr *th)
139 {
140 if (inet_debug == DBG_TCP)
141 __print_th(th);
142 }
143
144
145 static struct sk_buff *
146 get_firstr(struct sock *sk)
147 {
148 return skb_dequeue(&sk->rqueue);
149 }
150
151
152
153
154
155 static long
156 diff(unsigned long seq1, unsigned long seq2)
157 {
158 long d;
159
160 d = seq1 - seq2;
161 if (d > 0) return(d);
162
163
164 return(~d+1);
165 }
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182 static int tcp_select_window(struct sock *sk)
183 {
184 int new_window = sk->prot->rspace(sk);
185
186
187
188
189
190
191
192
193
194
195 if (new_window < min(sk->mss, MAX_WINDOW/2) ||
196 new_window < sk->window)
197 return(sk->window);
198 return(new_window);
199 }
200
201
202
203 static void tcp_time_wait(struct sock *sk)
204 {
205 sk->state = TCP_TIME_WAIT;
206 sk->shutdown = SHUTDOWN_MASK;
207 if (!sk->dead)
208 sk->state_change(sk);
209 reset_timer(sk, TIME_CLOSE, TCP_TIMEWAIT_LEN);
210 }
211
212
213
214
215
216
217
218
219 static void
220 tcp_retransmit(struct sock *sk, int all)
221 {
222 if (all) {
223 ip_retransmit(sk, all);
224 return;
225 }
226
227 sk->ssthresh = sk->cong_window >> 1;
228
229 sk->cong_count = 0;
230
231 sk->cong_window = 1;
232
233
234 ip_retransmit(sk, all);
235 }
236
237
238
239
240
241
242
243
244
245
246 void
247 tcp_err(int err, unsigned char *header, unsigned long daddr,
248 unsigned long saddr, struct inet_protocol *protocol)
249 {
250 struct tcphdr *th;
251 struct sock *sk;
252 struct iphdr *iph=(struct iphdr *)header;
253
254 header+=4*iph->ihl;
255
256 DPRINTF((DBG_TCP, "TCP: tcp_err(%d, hdr=%X, daddr=%X saddr=%X, protocol=%X)\n",
257 err, header, daddr, saddr, protocol));
258
259 th =(struct tcphdr *)header;
260 sk = get_sock(&tcp_prot, th->source, daddr, th->dest, saddr);
261 print_th(th);
262
263 if (sk == NULL) return;
264
265 if(err<0)
266 {
267 sk->err = -err;
268 sk->error_report(sk);
269 return;
270 }
271
272 if ((err & 0xff00) == (ICMP_SOURCE_QUENCH << 8)) {
273
274
275
276
277
278 if (sk->cong_window > 4) sk->cong_window--;
279 return;
280 }
281
282 DPRINTF((DBG_TCP, "TCP: icmp_err got error\n"));
283 sk->err = icmp_err_convert[err & 0xff].errno;
284
285
286
287
288
289 if (icmp_err_convert[err & 0xff].fatal) {
290 if (sk->state == TCP_SYN_SENT) {
291 sk->state = TCP_CLOSE;
292 sk->error_report(sk);
293 }
294 }
295 return;
296 }
297
298
299
300
301
302
303
304 static int
305 tcp_readable(struct sock *sk)
306 {
307 unsigned long counted;
308 unsigned long amount;
309 struct sk_buff *skb;
310 int count=0;
311 int sum;
312 unsigned long flags;
313
314 DPRINTF((DBG_TCP, "tcp_readable(sk=%X)\n", sk));
315 if(sk && sk->debug)
316 printk("tcp_readable: %p - ",sk);
317
318 if (sk == NULL || skb_peek(&sk->rqueue) == NULL)
319 {
320 if(sk && sk->debug)
321 printk("empty\n");
322 return(0);
323 }
324
325 counted = sk->copied_seq+1;
326 amount = 0;
327
328 save_flags(flags);
329 cli();
330 skb =(struct sk_buff *)sk->rqueue;
331
332
333 do {
334 count++;
335 #ifdef OLD
336
337 if (count > 20) {
338 restore_flags(flags);
339 DPRINTF((DBG_TCP, "tcp_readable, more than 20 packets without a psh\n"));
340 printk("tcp_read: possible read_queue corruption.\n");
341 return(amount);
342 }
343 #endif
344 if (before(counted, skb->h.th->seq))
345 break;
346 sum = skb->len -(counted - skb->h.th->seq);
347 if (skb->h.th->syn)
348 sum++;
349 if (sum >= 0) {
350 amount += sum;
351 if (skb->h.th->syn) amount--;
352 counted += sum;
353 }
354 if (amount && skb->h.th->psh) break;
355 skb =(struct sk_buff *)skb->next;
356 } while(skb != sk->rqueue);
357 if (amount && !sk->urginline && sk->urg_data &&
358 (sk->urg_seq - sk->copied_seq) <= (counted - sk->copied_seq))
359 amount--;
360 restore_flags(flags);
361 DPRINTF((DBG_TCP, "tcp readable returning %d bytes\n", amount));
362 if(sk->debug)
363 printk("got %lu bytes.\n",amount);
364 return(amount);
365 }
366
367
368
369
370
371
372
373 static int
374 tcp_select(struct sock *sk, int sel_type, select_table *wait)
375 {
376 DPRINTF((DBG_TCP, "tcp_select(sk=%X, sel_type = %d, wait = %X)\n",
377 sk, sel_type, wait));
378
379 sk->inuse = 1;
380 switch(sel_type) {
381 case SEL_IN:
382 if(sk->debug)
383 printk("select in");
384 select_wait(sk->sleep, wait);
385 if(sk->debug)
386 printk("-select out");
387 if (skb_peek(&sk->rqueue) != NULL) {
388 if (sk->state == TCP_LISTEN || tcp_readable(sk)) {
389 release_sock(sk);
390 if(sk->debug)
391 printk("-select ok data\n");
392 return(1);
393 }
394 }
395 if (sk->err != 0)
396 {
397 release_sock(sk);
398 if(sk->debug)
399 printk("-select ok error");
400 return(1);
401 }
402 if (sk->shutdown & RCV_SHUTDOWN) {
403 release_sock(sk);
404 if(sk->debug)
405 printk("-select ok down\n");
406 return(1);
407 } else {
408 release_sock(sk);
409 if(sk->debug)
410 printk("-select fail\n");
411 return(0);
412 }
413 case SEL_OUT:
414 select_wait(sk->sleep, wait);
415 if (sk->shutdown & SEND_SHUTDOWN) {
416 DPRINTF((DBG_TCP,
417 "write select on shutdown socket.\n"));
418
419
420 release_sock(sk);
421 return(0);
422 }
423
424
425
426
427
428
429 if (sk->prot->wspace(sk) >= sk->mss) {
430 release_sock(sk);
431
432 if (sk->state == TCP_SYN_RECV ||
433 sk->state == TCP_SYN_SENT) return(0);
434 return(1);
435 }
436 DPRINTF((DBG_TCP,
437 "tcp_select: sleeping on write sk->wmem_alloc = %d, "
438 "sk->packets_out = %d\n"
439 "sk->wback = %X, sk->wfront = %X\n"
440 "sk->write_seq = %u, sk->window_seq=%u\n",
441 sk->wmem_alloc, sk->packets_out,
442 sk->wback, sk->wfront,
443 sk->write_seq, sk->window_seq));
444
445 release_sock(sk);
446 return(0);
447 case SEL_EX:
448 select_wait(sk->sleep,wait);
449 if (sk->err || sk->urg_data) {
450 release_sock(sk);
451 return(1);
452 }
453 release_sock(sk);
454 return(0);
455 }
456
457 release_sock(sk);
458 return(0);
459 }
460
461
462 int
463 tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
464 {
465 int err;
466 DPRINTF((DBG_TCP, "tcp_ioctl(sk=%X, cmd = %d, arg=%X)\n", sk, cmd, arg));
467 switch(cmd) {
468 case DDIOCSDBG:
469 return(dbg_ioctl((void *) arg, DBG_TCP));
470
471 case TIOCINQ:
472 #ifdef FIXME
473 case FIONREAD:
474 #endif
475 {
476 unsigned long amount;
477
478 if (sk->state == TCP_LISTEN) return(-EINVAL);
479
480 sk->inuse = 1;
481 amount = tcp_readable(sk);
482 release_sock(sk);
483 DPRINTF((DBG_TCP, "returning %d\n", amount));
484 err=verify_area(VERIFY_WRITE,(void *)arg,
485 sizeof(unsigned long));
486 if(err)
487 return err;
488 put_fs_long(amount,(unsigned long *)arg);
489 return(0);
490 }
491 case SIOCATMARK:
492 {
493 int answ = sk->urg_data && sk->urg_seq == sk->copied_seq+1;
494
495 err = verify_area(VERIFY_WRITE,(void *) arg,
496 sizeof(unsigned long));
497 if (err)
498 return err;
499 put_fs_long(answ,(int *) arg);
500 return(0);
501 }
502 case TIOCOUTQ:
503 {
504 unsigned long amount;
505
506 if (sk->state == TCP_LISTEN) return(-EINVAL);
507 amount = sk->prot->wspace(sk);
508 err=verify_area(VERIFY_WRITE,(void *)arg,
509 sizeof(unsigned long));
510 if(err)
511 return err;
512 put_fs_long(amount,(unsigned long *)arg);
513 return(0);
514 }
515 default:
516 return(-EINVAL);
517 }
518 }
519
520
521
522 unsigned short
523 tcp_check(struct tcphdr *th, int len,
524 unsigned long saddr, unsigned long daddr)
525 {
526 unsigned long sum;
527
528 if (saddr == 0) saddr = my_addr();
529 print_th(th);
530 __asm__("\t addl %%ecx,%%ebx\n"
531 "\t adcl %%edx,%%ebx\n"
532 "\t adcl $0, %%ebx\n"
533 : "=b"(sum)
534 : "0"(daddr), "c"(saddr), "d"((ntohs(len) << 16) + IPPROTO_TCP*256)
535 : "cx","bx","dx" );
536
537 if (len > 3) {
538 __asm__("\tclc\n"
539 "1:\n"
540 "\t lodsl\n"
541 "\t adcl %%eax, %%ebx\n"
542 "\t loop 1b\n"
543 "\t adcl $0, %%ebx\n"
544 : "=b"(sum) , "=S"(th)
545 : "0"(sum), "c"(len/4) ,"1"(th)
546 : "ax", "cx", "bx", "si" );
547 }
548
549
550 __asm__("\t movl %%ebx, %%ecx\n"
551 "\t shrl $16,%%ecx\n"
552 "\t addw %%cx, %%bx\n"
553 "\t adcw $0, %%bx\n"
554 : "=b"(sum)
555 : "0"(sum)
556 : "bx", "cx");
557
558
559 if ((len & 2) != 0) {
560 __asm__("\t lodsw\n"
561 "\t addw %%ax,%%bx\n"
562 "\t adcw $0, %%bx\n"
563 : "=b"(sum), "=S"(th)
564 : "0"(sum) ,"1"(th)
565 : "si", "ax", "bx");
566 }
567
568
569 if ((len & 1) != 0) {
570 __asm__("\t lodsb\n"
571 "\t movb $0,%%ah\n"
572 "\t addw %%ax,%%bx\n"
573 "\t adcw $0, %%bx\n"
574 : "=b"(sum)
575 : "0"(sum) ,"S"(th)
576 : "si", "ax", "bx");
577 }
578
579
580 return((~sum) & 0xffff);
581 }
582
583
584 void tcp_send_check(struct tcphdr *th, unsigned long saddr,
585 unsigned long daddr, int len, struct sock *sk)
586 {
587 th->check = 0;
588 th->check = tcp_check(th, len, saddr, daddr);
589 return;
590 }
591
592 static void tcp_send_skb(struct sock *sk, struct sk_buff *skb)
593 {
594 int size;
595 struct tcphdr * th = skb->h.th;
596
597
598 size = skb->len - ((unsigned char *) th - skb->data);
599
600
601 if (size < sizeof(struct tcphdr) || size > skb->len) {
602 printk("tcp_send_skb: bad skb (skb = %p, data = %p, th = %p, len = %lu)\n",
603 skb, skb->data, th, skb->len);
604 kfree_skb(skb, FREE_WRITE);
605 return;
606 }
607
608
609 if (size == sizeof(struct tcphdr)) {
610
611 if(!th->syn && !th->fin) {
612 printk("tcp_send_skb: attempt to queue a bogon.\n");
613 kfree_skb(skb,FREE_WRITE);
614 return;
615 }
616 }
617
618
619 tcp_send_check(th, sk->saddr, sk->daddr, size, sk);
620
621 skb->h.seq = ntohl(th->seq) + size - 4*th->doff;
622 if (after(skb->h.seq, sk->window_seq) ||
623 (sk->retransmits && sk->timeout == TIME_WRITE) ||
624 sk->packets_out >= sk->cong_window) {
625 DPRINTF((DBG_TCP, "sk->cong_window = %d, sk->packets_out = %d\n",
626 sk->cong_window, sk->packets_out));
627 DPRINTF((DBG_TCP, "sk->write_seq = %d, sk->window_seq = %d\n",
628 sk->write_seq, sk->window_seq));
629 skb->next = NULL;
630 skb->magic = TCP_WRITE_QUEUE_MAGIC;
631 if (sk->wback == NULL) {
632 sk->wfront = skb;
633 } else {
634 sk->wback->next = skb;
635 }
636 sk->wback = skb;
637 if (before(sk->window_seq, sk->wfront->h.seq) &&
638 sk->send_head == NULL &&
639 sk->ack_backlog == 0)
640 reset_timer(sk, TIME_PROBE0, sk->rto);
641 } else {
642 sk->sent_seq = sk->write_seq;
643 sk->prot->queue_xmit(sk, skb->dev, skb, 0);
644 }
645 }
646
647 struct sk_buff * tcp_dequeue_partial(struct sock * sk)
648 {
649 struct sk_buff * skb;
650 unsigned long flags;
651
652 save_flags(flags);
653 cli();
654 skb = sk->partial;
655 if (skb) {
656 sk->partial = NULL;
657 del_timer(&sk->partial_timer);
658 }
659 restore_flags(flags);
660 return skb;
661 }
662
663 static void tcp_send_partial(struct sock *sk)
664 {
665 struct sk_buff *skb;
666
667 if (sk == NULL)
668 return;
669 while ((skb = tcp_dequeue_partial(sk)) != NULL)
670 tcp_send_skb(sk, skb);
671 }
672
673 void tcp_enqueue_partial(struct sk_buff * skb, struct sock * sk)
674 {
675 struct sk_buff * tmp;
676 unsigned long flags;
677
678 save_flags(flags);
679 cli();
680 tmp = sk->partial;
681 if (tmp)
682 del_timer(&sk->partial_timer);
683 sk->partial = skb;
684 sk->partial_timer.expires = HZ;
685 sk->partial_timer.function = (void (*)(unsigned long)) tcp_send_partial;
686 sk->partial_timer.data = (unsigned long) sk;
687 add_timer(&sk->partial_timer);
688 restore_flags(flags);
689 if (tmp)
690 tcp_send_skb(sk, tmp);
691 }
692
693
694
695 static void
696 tcp_send_ack(unsigned long sequence, unsigned long ack,
697 struct sock *sk,
698 struct tcphdr *th, unsigned long daddr)
699 {
700 struct sk_buff *buff;
701 struct tcphdr *t1;
702 struct device *dev = NULL;
703 int tmp;
704
705 if(sk->zapped)
706 return;
707
708
709
710
711 buff = sk->prot->wmalloc(sk, MAX_ACK_SIZE, 1, GFP_ATOMIC);
712 if (buff == NULL) {
713
714 sk->ack_backlog++;
715 if (sk->timeout != TIME_WRITE && tcp_connected(sk->state)) {
716 reset_timer(sk, TIME_WRITE, 10);
717 }
718 if (inet_debug == DBG_SLIP) printk("\rtcp_ack: malloc failed\n");
719 return;
720 }
721
722 buff->mem_addr = buff;
723 buff->mem_len = MAX_ACK_SIZE;
724 buff->len = sizeof(struct tcphdr);
725 buff->sk = sk;
726 t1 =(struct tcphdr *) buff->data;
727
728
729 tmp = sk->prot->build_header(buff, sk->saddr, daddr, &dev,
730 IPPROTO_TCP, sk->opt, MAX_ACK_SIZE,sk->ip_tos,sk->ip_ttl);
731 if (tmp < 0) {
732 buff->free=1;
733 sk->prot->wfree(sk, buff->mem_addr, buff->mem_len);
734 if (inet_debug == DBG_SLIP) printk("\rtcp_ack: build_header failed\n");
735 return;
736 }
737 buff->len += tmp;
738 t1 =(struct tcphdr *)((char *)t1 +tmp);
739
740
741 memcpy(t1, th, sizeof(*t1));
742
743
744 t1->dest = th->source;
745 t1->source = th->dest;
746 t1->seq = ntohl(sequence);
747 t1->ack = 1;
748 sk->window = tcp_select_window(sk);
749 t1->window = ntohs(sk->window);
750 t1->res1 = 0;
751 t1->res2 = 0;
752 t1->rst = 0;
753 t1->urg = 0;
754 t1->syn = 0;
755 t1->psh = 0;
756 t1->fin = 0;
757 if (ack == sk->acked_seq) {
758 sk->ack_backlog = 0;
759 sk->bytes_rcv = 0;
760 sk->ack_timed = 0;
761 if (sk->send_head == NULL && sk->wfront == NULL && sk->timeout == TIME_WRITE)
762 {
763 if(sk->keepopen)
764 reset_timer(sk,TIME_KEEPOPEN,TCP_TIMEOUT_LEN);
765 else
766 delete_timer(sk);
767 }
768 }
769 t1->ack_seq = ntohl(ack);
770 t1->doff = sizeof(*t1)/4;
771 tcp_send_check(t1, sk->saddr, daddr, sizeof(*t1), sk);
772 if (sk->debug)
773 printk("\rtcp_ack: seq %lx ack %lx\n", sequence, ack);
774 sk->prot->queue_xmit(sk, dev, buff, 1);
775 }
776
777
778
779 static int
780 tcp_build_header(struct tcphdr *th, struct sock *sk, int push)
781 {
782
783
784 memcpy(th,(void *) &(sk->dummy_th), sizeof(*th));
785 th->seq = htonl(sk->write_seq);
786 th->psh =(push == 0) ? 1 : 0;
787 th->doff = sizeof(*th)/4;
788 th->ack = 1;
789 th->fin = 0;
790 sk->ack_backlog = 0;
791 sk->bytes_rcv = 0;
792 sk->ack_timed = 0;
793 th->ack_seq = htonl(sk->acked_seq);
794 sk->window = tcp_select_window(sk);
795 th->window = htons(sk->window);
796
797 return(sizeof(*th));
798 }
799
800
801
802
803
804 static int
805 tcp_write(struct sock *sk, unsigned char *from,
806 int len, int nonblock, unsigned flags)
807 {
808 int copied = 0;
809 int copy;
810 int tmp;
811 struct sk_buff *skb;
812 struct sk_buff *send_tmp;
813 unsigned char *buff;
814 struct proto *prot;
815 struct device *dev = NULL;
816
817 DPRINTF((DBG_TCP, "tcp_write(sk=%X, from=%X, len=%d, nonblock=%d, flags=%X)\n",
818 sk, from, len, nonblock, flags));
819
820 sk->inuse=1;
821 prot = sk->prot;
822 while(len > 0) {
823 if (sk->err) {
824 release_sock(sk);
825 if (copied) return(copied);
826 tmp = -sk->err;
827 sk->err = 0;
828 return(tmp);
829 }
830
831
832 if (sk->shutdown & SEND_SHUTDOWN) {
833 release_sock(sk);
834 sk->err = EPIPE;
835 if (copied) return(copied);
836 sk->err = 0;
837 return(-EPIPE);
838 }
839
840
841
842
843 while(sk->state != TCP_ESTABLISHED && sk->state != TCP_CLOSE_WAIT) {
844 if (sk->err) {
845 release_sock(sk);
846 if (copied) return(copied);
847 tmp = -sk->err;
848 sk->err = 0;
849 return(tmp);
850 }
851
852 if (sk->state != TCP_SYN_SENT && sk->state != TCP_SYN_RECV) {
853 release_sock(sk);
854 DPRINTF((DBG_TCP, "tcp_write: return 1\n"));
855 if (copied) return(copied);
856
857 if (sk->err) {
858 tmp = -sk->err;
859 sk->err = 0;
860 return(tmp);
861 }
862
863 if (sk->keepopen) {
864 send_sig(SIGPIPE, current, 0);
865 }
866 return(-EPIPE);
867 }
868
869 if (nonblock || copied) {
870 release_sock(sk);
871 DPRINTF((DBG_TCP, "tcp_write: return 2\n"));
872 if (copied) return(copied);
873 return(-EAGAIN);
874 }
875
876 release_sock(sk);
877 cli();
878 if (sk->state != TCP_ESTABLISHED &&
879 sk->state != TCP_CLOSE_WAIT && sk->err == 0) {
880 interruptible_sleep_on(sk->sleep);
881 if (current->signal & ~current->blocked) {
882 sti();
883 DPRINTF((DBG_TCP, "tcp_write: return 3\n"));
884 if (copied) return(copied);
885 return(-ERESTARTSYS);
886 }
887 }
888 sk->inuse = 1;
889 sti();
890 }
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905 if ((skb = tcp_dequeue_partial(sk)) != NULL) {
906 int hdrlen;
907
908
909 hdrlen = ((unsigned long)skb->h.th - (unsigned long)skb->data)
910 + sizeof(struct tcphdr);
911
912
913 if (!(flags & MSG_OOB)) {
914 copy = min(sk->mss - (skb->len - hdrlen), len);
915
916 if (copy <= 0) {
917 printk("TCP: **bug**: \"copy\" <= 0!!\n");
918 copy = 0;
919 }
920
921 memcpy_fromfs(skb->data + skb->len, from, copy);
922 skb->len += copy;
923 from += copy;
924 copied += copy;
925 len -= copy;
926 sk->write_seq += copy;
927 }
928 if ((skb->len - hdrlen) >= sk->mss ||
929 (flags & MSG_OOB) ||
930 !sk->packets_out)
931 tcp_send_skb(sk, skb);
932 else
933 tcp_enqueue_partial(skb, sk);
934 continue;
935 }
936
937
938
939
940
941
942
943
944
945
946
947
948
949 copy = sk->window_seq - sk->write_seq;
950 if (copy <= 0 || copy < (sk->max_window >> 1) || copy > sk->mss)
951 copy = sk->mss;
952 if (copy > len)
953 copy = len;
954
955
956 send_tmp = NULL;
957 if (copy < sk->mss && !(flags & MSG_OOB)) {
958
959 release_sock(sk);
960
961
962 skb = prot->wmalloc(sk, sk->mtu + 128 + prot->max_header + sizeof(*skb), 0, GFP_KERNEL);
963 sk->inuse = 1;
964 send_tmp = skb;
965 } else {
966
967 release_sock(sk);
968 skb = prot->wmalloc(sk, copy + prot->max_header + sizeof(*skb), 0, GFP_KERNEL);
969 sk->inuse = 1;
970 }
971
972
973 if (skb == NULL) {
974 if (nonblock ) {
975 release_sock(sk);
976 DPRINTF((DBG_TCP, "tcp_write: return 4\n"));
977 if (copied) return(copied);
978 return(-EAGAIN);
979 }
980
981
982 tmp = sk->wmem_alloc;
983 release_sock(sk);
984 cli();
985
986 if (tmp <= sk->wmem_alloc &&
987 (sk->state == TCP_ESTABLISHED||sk->state == TCP_CLOSE_WAIT)
988 && sk->err == 0) {
989 interruptible_sleep_on(sk->sleep);
990 if (current->signal & ~current->blocked) {
991 sti();
992 DPRINTF((DBG_TCP, "tcp_write: return 5\n"));
993 if (copied) return(copied);
994 return(-ERESTARTSYS);
995 }
996 }
997 sk->inuse = 1;
998 sti();
999 continue;
1000 }
1001
1002 skb->len = 0;
1003 skb->sk = sk;
1004 skb->free = 0;
1005
1006 buff = skb->data;
1007
1008
1009
1010
1011
1012 tmp = prot->build_header(skb, sk->saddr, sk->daddr, &dev,
1013 IPPROTO_TCP, sk->opt, skb->mem_len,sk->ip_tos,sk->ip_ttl);
1014 if (tmp < 0 ) {
1015 prot->wfree(sk, skb->mem_addr, skb->mem_len);
1016 release_sock(sk);
1017 DPRINTF((DBG_TCP, "tcp_write: return 6\n"));
1018 if (copied) return(copied);
1019 return(tmp);
1020 }
1021 skb->len += tmp;
1022 skb->dev = dev;
1023 buff += tmp;
1024 skb->h.th =(struct tcphdr *) buff;
1025 tmp = tcp_build_header((struct tcphdr *)buff, sk, len-copy);
1026 if (tmp < 0) {
1027 prot->wfree(sk, skb->mem_addr, skb->mem_len);
1028 release_sock(sk);
1029 DPRINTF((DBG_TCP, "tcp_write: return 7\n"));
1030 if (copied) return(copied);
1031 return(tmp);
1032 }
1033
1034 if (flags & MSG_OOB) {
1035 ((struct tcphdr *)buff)->urg = 1;
1036 ((struct tcphdr *)buff)->urg_ptr = ntohs(copy);
1037 }
1038 skb->len += tmp;
1039 memcpy_fromfs(buff+tmp, from, copy);
1040
1041 from += copy;
1042 copied += copy;
1043 len -= copy;
1044 skb->len += copy;
1045 skb->free = 0;
1046 sk->write_seq += copy;
1047
1048 if (send_tmp != NULL && sk->packets_out) {
1049 tcp_enqueue_partial(send_tmp, sk);
1050 continue;
1051 }
1052 tcp_send_skb(sk, skb);
1053 }
1054 sk->err = 0;
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064 if(sk->partial &&
1065 ((!sk->packets_out)
1066
1067 || (sk->nonagle && before(sk->write_seq , sk->window_seq))
1068 ))
1069 tcp_send_partial(sk);
1070
1071 release_sock(sk);
1072 DPRINTF((DBG_TCP, "tcp_write: return 8\n"));
1073 return(copied);
1074 }
1075
1076
1077 static int
1078 tcp_sendto(struct sock *sk, unsigned char *from,
1079 int len, int nonblock, unsigned flags,
1080 struct sockaddr_in *addr, int addr_len)
1081 {
1082 struct sockaddr_in sin;
1083
1084 if (addr_len < sizeof(sin)) return(-EINVAL);
1085 memcpy_fromfs(&sin, addr, sizeof(sin));
1086 if (sin.sin_family && sin.sin_family != AF_INET) return(-EINVAL);
1087 if (sin.sin_port != sk->dummy_th.dest) return(-EINVAL);
1088 if (sin.sin_addr.s_addr != sk->daddr) return(-EINVAL);
1089 return(tcp_write(sk, from, len, nonblock, flags));
1090 }
1091
1092
1093 static void
1094 tcp_read_wakeup(struct sock *sk)
1095 {
1096 int tmp;
1097 struct device *dev = NULL;
1098 struct tcphdr *t1;
1099 struct sk_buff *buff;
1100
1101 DPRINTF((DBG_TCP, "in tcp read wakeup\n"));
1102 if (!sk->ack_backlog) return;
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114 buff = sk->prot->wmalloc(sk,MAX_ACK_SIZE,1, GFP_ATOMIC);
1115 if (buff == NULL) {
1116
1117 reset_timer(sk, TIME_WRITE, 10);
1118 return;
1119 }
1120
1121 buff->mem_addr = buff;
1122 buff->mem_len = MAX_ACK_SIZE;
1123 buff->len = sizeof(struct tcphdr);
1124 buff->sk = sk;
1125
1126
1127 tmp = sk->prot->build_header(buff, sk->saddr, sk->daddr, &dev,
1128 IPPROTO_TCP, sk->opt, MAX_ACK_SIZE,sk->ip_tos,sk->ip_ttl);
1129 if (tmp < 0) {
1130 buff->free=1;
1131 sk->prot->wfree(sk, buff->mem_addr, buff->mem_len);
1132 return;
1133 }
1134
1135 buff->len += tmp;
1136 t1 =(struct tcphdr *)(buff->data +tmp);
1137
1138 memcpy(t1,(void *) &sk->dummy_th, sizeof(*t1));
1139 t1->seq = htonl(sk->sent_seq);
1140 t1->ack = 1;
1141 t1->res1 = 0;
1142 t1->res2 = 0;
1143 t1->rst = 0;
1144 t1->urg = 0;
1145 t1->syn = 0;
1146 t1->psh = 0;
1147 sk->ack_backlog = 0;
1148 sk->bytes_rcv = 0;
1149 sk->window = tcp_select_window(sk);
1150 t1->window = ntohs(sk->window);
1151 t1->ack_seq = ntohl(sk->acked_seq);
1152 t1->doff = sizeof(*t1)/4;
1153 tcp_send_check(t1, sk->saddr, sk->daddr, sizeof(*t1), sk);
1154 sk->prot->queue_xmit(sk, dev, buff, 1);
1155 }
1156
1157
1158
1159
1160
1161
1162
1163
1164 static void
1165 cleanup_rbuf(struct sock *sk)
1166 {
1167 unsigned long flags;
1168 int left;
1169 struct sk_buff *skb;
1170
1171 if(sk->debug)
1172 printk("cleaning rbuf for sk=%p\n", sk);
1173
1174 save_flags(flags);
1175 cli();
1176
1177 left = sk->prot->rspace(sk);
1178
1179
1180
1181
1182
1183 while((skb=skb_peek(&sk->rqueue)) != NULL )
1184 {
1185 if (!skb->used)
1186 break;
1187 skb_unlink(skb);
1188 skb->sk = sk;
1189 kfree_skb(skb, FREE_READ);
1190 }
1191
1192 restore_flags(flags);
1193
1194
1195
1196
1197
1198
1199
1200 DPRINTF((DBG_TCP, "sk->window left = %d, sk->prot->rspace(sk)=%d\n",
1201 sk->window - sk->bytes_rcv, sk->prot->rspace(sk)));
1202
1203 if(sk->debug)
1204 printk("sk->rspace = %lu, was %d\n", sk->prot->rspace(sk),
1205 left);
1206 if (sk->prot->rspace(sk) != left)
1207 {
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218 sk->ack_backlog++;
1219
1220
1221
1222
1223
1224
1225
1226
1227 if ((sk->prot->rspace(sk) > (sk->window - sk->bytes_rcv + sk->mtu))) {
1228
1229 tcp_read_wakeup(sk);
1230 } else {
1231
1232 int was_active = del_timer(&sk->timer);
1233 if (!was_active || TCP_ACK_TIME < sk->timer.expires) {
1234 reset_timer(sk, TIME_WRITE, TCP_ACK_TIME);
1235 } else
1236 add_timer(&sk->timer);
1237 }
1238 }
1239 }
1240
1241
1242
1243 static int
1244 tcp_read_urg(struct sock * sk, int nonblock,
1245 unsigned char *to, int len, unsigned flags)
1246 {
1247 struct wait_queue wait = { current, NULL };
1248
1249 while (len > 0) {
1250 if (sk->urginline || !sk->urg_data || sk->urg_data == URG_READ)
1251 return -EINVAL;
1252 if (sk->urg_data & URG_VALID) {
1253 char c = sk->urg_data;
1254 if (!(flags & MSG_PEEK))
1255 sk->urg_data = URG_READ;
1256 put_fs_byte(c, to);
1257 return 1;
1258 }
1259
1260 if (sk->err) {
1261 int tmp = -sk->err;
1262 sk->err = 0;
1263 return tmp;
1264 }
1265
1266 if (sk->state == TCP_CLOSE || sk->done) {
1267 if (!sk->done) {
1268 sk->done = 1;
1269 return 0;
1270 }
1271 return -ENOTCONN;
1272 }
1273
1274 if (sk->shutdown & RCV_SHUTDOWN) {
1275 sk->done = 1;
1276 return 0;
1277 }
1278
1279 if (nonblock)
1280 return -EAGAIN;
1281
1282 if (current->signal & ~current->blocked)
1283 return -ERESTARTSYS;
1284
1285 current->state = TASK_INTERRUPTIBLE;
1286 add_wait_queue(sk->sleep, &wait);
1287 if ((sk->urg_data & URG_NOTYET) && sk->err == 0 &&
1288 !(sk->shutdown & RCV_SHUTDOWN))
1289 schedule();
1290 remove_wait_queue(sk->sleep, &wait);
1291 current->state = TASK_RUNNING;
1292 }
1293 return 0;
1294 }
1295
1296
1297
1298 static int tcp_read(struct sock *sk, unsigned char *to,
1299 int len, int nonblock, unsigned flags)
1300 {
1301 struct wait_queue wait = { current, NULL };
1302 int copied = 0;
1303 unsigned long peek_seq;
1304 unsigned long *seq;
1305 unsigned long used;
1306 int err;
1307
1308 if (len == 0)
1309 return 0;
1310
1311 if (len < 0)
1312 return -EINVAL;
1313
1314 err = verify_area(VERIFY_WRITE, to, len);
1315 if (err)
1316 return err;
1317
1318
1319 if (sk->state == TCP_LISTEN)
1320 return -ENOTCONN;
1321
1322
1323 if (flags & MSG_OOB)
1324 return tcp_read_urg(sk, nonblock, to, len, flags);
1325
1326 peek_seq = sk->copied_seq;
1327 seq = &sk->copied_seq;
1328 if (flags & MSG_PEEK)
1329 seq = &peek_seq;
1330
1331 add_wait_queue(sk->sleep, &wait);
1332 sk->inuse = 1;
1333 while (len > 0) {
1334 struct sk_buff * skb;
1335 unsigned long offset;
1336
1337
1338
1339
1340 if (copied && sk->urg_data && sk->urg_seq == 1+*seq)
1341 break;
1342
1343 current->state = TASK_INTERRUPTIBLE;
1344
1345 skb = sk->rqueue;
1346 do {
1347 if (!skb)
1348 break;
1349 if (before(1+*seq, skb->h.th->seq))
1350 break;
1351 offset = 1 + *seq - skb->h.th->seq;
1352 if (skb->h.th->syn)
1353 offset--;
1354 if (offset < skb->len)
1355 goto found_ok_skb;
1356 if (!(flags & MSG_PEEK))
1357 skb->used = 1;
1358 skb = (struct sk_buff *)skb->next;
1359 } while (skb != sk->rqueue);
1360
1361 if (copied)
1362 break;
1363
1364 if (sk->err) {
1365 copied = -sk->err;
1366 sk->err = 0;
1367 break;
1368 }
1369
1370 if (sk->state == TCP_CLOSE) {
1371 if (!sk->done) {
1372 sk->done = 1;
1373 break;
1374 }
1375 copied = -ENOTCONN;
1376 break;
1377 }
1378
1379 if (sk->shutdown & RCV_SHUTDOWN) {
1380 sk->done = 1;
1381 break;
1382 }
1383
1384 if (nonblock) {
1385 copied = -EAGAIN;
1386 break;
1387 }
1388
1389 cleanup_rbuf(sk);
1390 release_sock(sk);
1391 schedule();
1392 sk->inuse = 1;
1393
1394 if (current->signal & ~current->blocked) {
1395 copied = -ERESTARTSYS;
1396 break;
1397 }
1398 continue;
1399
1400 found_ok_skb:
1401
1402 used = skb->len - offset;
1403 if (len < used)
1404 used = len;
1405
1406 if (sk->urg_data) {
1407 unsigned long urg_offset = sk->urg_seq - (1 + *seq);
1408 if (urg_offset < used) {
1409 if (!urg_offset) {
1410 if (!sk->urginline) {
1411 ++*seq;
1412 offset++;
1413 used--;
1414 }
1415 } else
1416 used = urg_offset;
1417 }
1418 }
1419
1420 memcpy_tofs(to,((unsigned char *)skb->h.th) +
1421 skb->h.th->doff*4 + offset, used);
1422 copied += used;
1423 len -= used;
1424 to += used;
1425 *seq += used;
1426 if (after(sk->copied_seq+1,sk->urg_seq))
1427 sk->urg_data = 0;
1428 if (!(flags & MSG_PEEK) && (used + offset >= skb->len))
1429 skb->used = 1;
1430 }
1431 remove_wait_queue(sk->sleep, &wait);
1432 current->state = TASK_RUNNING;
1433
1434
1435 cleanup_rbuf(sk);
1436 release_sock(sk);
1437 DPRINTF((DBG_TCP, "tcp_read: returning %d\n", copied));
1438 return copied;
1439 }
1440
1441
1442
1443
1444
1445
1446 void
1447 tcp_shutdown(struct sock *sk, int how)
1448 {
1449 struct sk_buff *buff;
1450 struct tcphdr *t1, *th;
1451 struct proto *prot;
1452 int tmp;
1453 struct device *dev = NULL;
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463 if (sk->state == TCP_FIN_WAIT1 || sk->state == TCP_FIN_WAIT2) return;
1464 if (!(how & SEND_SHUTDOWN)) return;
1465 sk->inuse = 1;
1466
1467
1468 if (sk->partial)
1469 tcp_send_partial(sk);
1470
1471 prot =(struct proto *)sk->prot;
1472 th =(struct tcphdr *)&sk->dummy_th;
1473 release_sock(sk);
1474 buff = prot->wmalloc(sk, MAX_RESET_SIZE,1 , GFP_KERNEL);
1475 if (buff == NULL) return;
1476 sk->inuse = 1;
1477
1478 DPRINTF((DBG_TCP, "tcp_shutdown_send buff = %X\n", buff));
1479 buff->mem_addr = buff;
1480 buff->mem_len = MAX_RESET_SIZE;
1481 buff->sk = sk;
1482 buff->len = sizeof(*t1);
1483 t1 =(struct tcphdr *) buff->data;
1484
1485
1486 tmp = prot->build_header(buff,sk->saddr, sk->daddr, &dev,
1487 IPPROTO_TCP, sk->opt,
1488 sizeof(struct tcphdr),sk->ip_tos,sk->ip_ttl);
1489 if (tmp < 0) {
1490 buff->free=1;
1491 prot->wfree(sk,buff->mem_addr, buff->mem_len);
1492 release_sock(sk);
1493 DPRINTF((DBG_TCP, "Unable to build header for fin.\n"));
1494 return;
1495 }
1496
1497 t1 =(struct tcphdr *)((char *)t1 +tmp);
1498 buff->len += tmp;
1499 buff->dev = dev;
1500 memcpy(t1, th, sizeof(*t1));
1501 t1->seq = ntohl(sk->write_seq);
1502 sk->write_seq++;
1503 buff->h.seq = sk->write_seq;
1504 t1->ack = 1;
1505 t1->ack_seq = ntohl(sk->acked_seq);
1506 t1->window = ntohs(sk->window=tcp_select_window(sk));
1507 t1->fin = 1;
1508 t1->rst = 0;
1509 t1->doff = sizeof(*t1)/4;
1510 tcp_send_check(t1, sk->saddr, sk->daddr, sizeof(*t1), sk);
1511
1512
1513
1514
1515
1516 if (sk->wback != NULL) {
1517 buff->free=0;
1518 buff->next = NULL;
1519 sk->wback->next = buff;
1520 sk->wback = buff;
1521 buff->magic = TCP_WRITE_QUEUE_MAGIC;
1522 } else {
1523 sk->sent_seq = sk->write_seq;
1524 sk->prot->queue_xmit(sk, dev, buff, 0);
1525 }
1526
1527 if (sk->state == TCP_ESTABLISHED) sk->state = TCP_FIN_WAIT1;
1528 else sk->state = TCP_FIN_WAIT2;
1529
1530 release_sock(sk);
1531 }
1532
1533
1534 static int
1535 tcp_recvfrom(struct sock *sk, unsigned char *to,
1536 int to_len, int nonblock, unsigned flags,
1537 struct sockaddr_in *addr, int *addr_len)
1538 {
1539 struct sockaddr_in sin;
1540 int len;
1541 int err;
1542 int result;
1543
1544
1545
1546
1547 err = verify_area(VERIFY_WRITE,addr_len,sizeof(long));
1548 if(err)
1549 return err;
1550 len = get_fs_long(addr_len);
1551 if(len > sizeof(sin))
1552 len = sizeof(sin);
1553 err=verify_area(VERIFY_WRITE, addr, len);
1554 if(err)
1555 return err;
1556
1557 result=tcp_read(sk, to, to_len, nonblock, flags);
1558
1559 if (result < 0) return(result);
1560
1561 sin.sin_family = AF_INET;
1562 sin.sin_port = sk->dummy_th.dest;
1563 sin.sin_addr.s_addr = sk->daddr;
1564
1565 memcpy_tofs(addr, &sin, len);
1566 put_fs_long(len, addr_len);
1567 return(result);
1568 }
1569
1570
1571
1572 static void
1573 tcp_reset(unsigned long saddr, unsigned long daddr, struct tcphdr *th,
1574 struct proto *prot, struct options *opt, struct device *dev, int tos, int ttl)
1575 {
1576 struct sk_buff *buff;
1577 struct tcphdr *t1;
1578 int tmp;
1579
1580
1581
1582
1583
1584 buff = prot->wmalloc(NULL, MAX_RESET_SIZE, 1, GFP_ATOMIC);
1585 if (buff == NULL)
1586 return;
1587
1588 DPRINTF((DBG_TCP, "tcp_reset buff = %X\n", buff));
1589 buff->mem_addr = buff;
1590 buff->mem_len = MAX_RESET_SIZE;
1591 buff->len = sizeof(*t1);
1592 buff->sk = NULL;
1593 buff->dev = dev;
1594
1595 t1 =(struct tcphdr *) buff->data;
1596
1597
1598 tmp = prot->build_header(buff, saddr, daddr, &dev, IPPROTO_TCP, opt,
1599 sizeof(struct tcphdr),tos,ttl);
1600 if (tmp < 0) {
1601 buff->free = 1;
1602 prot->wfree(NULL, buff->mem_addr, buff->mem_len);
1603 return;
1604 }
1605 t1 =(struct tcphdr *)((char *)t1 +tmp);
1606 buff->len += tmp;
1607 memcpy(t1, th, sizeof(*t1));
1608
1609
1610 t1->dest = th->source;
1611 t1->source = th->dest;
1612 t1->rst = 1;
1613 t1->window = 0;
1614
1615 if(th->ack)
1616 {
1617 t1->ack = 0;
1618 t1->seq = th->ack_seq;
1619 t1->ack_seq = 0;
1620 }
1621 else
1622 {
1623 t1->ack = 1;
1624 if(!th->syn)
1625 t1->ack_seq=htonl(th->seq);
1626 else
1627 t1->ack_seq=htonl(th->seq+1);
1628 t1->seq=0;
1629 }
1630
1631 t1->syn = 0;
1632 t1->urg = 0;
1633 t1->fin = 0;
1634 t1->psh = 0;
1635 t1->doff = sizeof(*t1)/4;
1636 tcp_send_check(t1, saddr, daddr, sizeof(*t1), NULL);
1637 prot->queue_xmit(NULL, dev, buff, 1);
1638 }
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649 static void
1650 tcp_options(struct sock *sk, struct tcphdr *th)
1651 {
1652 unsigned char *ptr;
1653 int length=(th->doff*4)-sizeof(struct tcphdr);
1654 int mss_seen = 0;
1655
1656 ptr = (unsigned char *)(th + 1);
1657
1658 while(length>0)
1659 {
1660 int opcode=*ptr++;
1661 int opsize=*ptr++;
1662 switch(opcode)
1663 {
1664 case TCPOPT_EOL:
1665 return;
1666 case TCPOPT_NOP:
1667 length-=2;
1668 continue;
1669
1670 default:
1671 if(opsize<=2)
1672 return;
1673 switch(opcode)
1674 {
1675 case TCPOPT_MSS:
1676 if(opsize==4 && th->syn)
1677 {
1678 sk->mtu=min(sk->mtu,ntohs(*(unsigned short *)ptr));
1679 mss_seen = 1;
1680 }
1681 break;
1682
1683 }
1684 ptr+=opsize-2;
1685 length-=opsize;
1686 }
1687 }
1688 if (th->syn) {
1689 if (! mss_seen)
1690 sk->mtu=min(sk->mtu, 536);
1691 }
1692 sk->mss = min(sk->max_window, sk->mtu);
1693 }
1694
1695 static inline unsigned long default_mask(unsigned long dst)
1696 {
1697 dst = ntohl(dst);
1698 if (IN_CLASSA(dst))
1699 return htonl(IN_CLASSA_NET);
1700 if (IN_CLASSB(dst))
1701 return htonl(IN_CLASSB_NET);
1702 return htonl(IN_CLASSC_NET);
1703 }
1704
1705
1706
1707
1708
1709
1710
1711
1712 static void
1713 tcp_conn_request(struct sock *sk, struct sk_buff *skb,
1714 unsigned long daddr, unsigned long saddr,
1715 struct options *opt, struct device *dev)
1716 {
1717 struct sk_buff *buff;
1718 struct tcphdr *t1;
1719 unsigned char *ptr;
1720 struct sock *newsk;
1721 struct tcphdr *th;
1722 int tmp;
1723
1724 DPRINTF((DBG_TCP, "tcp_conn_request(sk = %X, skb = %X, daddr = %X, sadd4= %X, \n"
1725 " opt = %X, dev = %X)\n",
1726 sk, skb, daddr, saddr, opt, dev));
1727
1728 th = skb->h.th;
1729
1730
1731 if (!sk->dead) {
1732 sk->data_ready(sk,0);
1733 } else {
1734 DPRINTF((DBG_TCP, "tcp_conn_request on dead socket\n"));
1735 tcp_reset(daddr, saddr, th, sk->prot, opt, dev, sk->ip_tos,sk->ip_ttl);
1736 kfree_skb(skb, FREE_READ);
1737 return;
1738 }
1739
1740
1741
1742
1743
1744 if (sk->ack_backlog >= sk->max_ack_backlog) {
1745 kfree_skb(skb, FREE_READ);
1746 return;
1747 }
1748
1749
1750
1751
1752
1753
1754
1755
1756 newsk = (struct sock *) kmalloc(sizeof(struct sock), GFP_ATOMIC);
1757 if (newsk == NULL) {
1758
1759 kfree_skb(skb, FREE_READ);
1760 return;
1761 }
1762
1763 DPRINTF((DBG_TCP, "newsk = %X\n", newsk));
1764 memcpy((void *)newsk,(void *)sk, sizeof(*newsk));
1765 newsk->wback = NULL;
1766 newsk->wfront = NULL;
1767 newsk->rqueue = NULL;
1768 newsk->send_head = NULL;
1769 newsk->send_tail = NULL;
1770 newsk->back_log = NULL;
1771 newsk->rtt = TCP_CONNECT_TIME << 3;
1772 newsk->rto = TCP_CONNECT_TIME;
1773 newsk->mdev = 0;
1774 newsk->max_window = 0;
1775 newsk->cong_window = 1;
1776 newsk->cong_count = 0;
1777 newsk->ssthresh = 0;
1778 newsk->backoff = 0;
1779 newsk->blog = 0;
1780 newsk->intr = 0;
1781 newsk->proc = 0;
1782 newsk->done = 0;
1783 newsk->partial = NULL;
1784 newsk->pair = NULL;
1785 newsk->wmem_alloc = 0;
1786 newsk->rmem_alloc = 0;
1787
1788 newsk->max_unacked = MAX_WINDOW - TCP_WINDOW_DIFF;
1789
1790 newsk->err = 0;
1791 newsk->shutdown = 0;
1792 newsk->ack_backlog = 0;
1793 newsk->acked_seq = skb->h.th->seq+1;
1794 newsk->fin_seq = skb->h.th->seq;
1795 newsk->copied_seq = skb->h.th->seq;
1796 newsk->state = TCP_SYN_RECV;
1797 newsk->timeout = 0;
1798 newsk->write_seq = jiffies * SEQ_TICK - seq_offset;
1799 newsk->window_seq = newsk->write_seq;
1800 newsk->rcv_ack_seq = newsk->write_seq;
1801 newsk->urg_data = 0;
1802 newsk->retransmits = 0;
1803 newsk->destroy = 0;
1804 newsk->timer.data = (unsigned long)newsk;
1805 newsk->timer.function = &net_timer;
1806 newsk->dummy_th.source = skb->h.th->dest;
1807 newsk->dummy_th.dest = skb->h.th->source;
1808
1809
1810 newsk->daddr = saddr;
1811 newsk->saddr = daddr;
1812
1813 put_sock(newsk->num,newsk);
1814 newsk->dummy_th.res1 = 0;
1815 newsk->dummy_th.doff = 6;
1816 newsk->dummy_th.fin = 0;
1817 newsk->dummy_th.syn = 0;
1818 newsk->dummy_th.rst = 0;
1819 newsk->dummy_th.psh = 0;
1820 newsk->dummy_th.ack = 0;
1821 newsk->dummy_th.urg = 0;
1822 newsk->dummy_th.res2 = 0;
1823 newsk->acked_seq = skb->h.th->seq + 1;
1824 newsk->copied_seq = skb->h.th->seq;
1825
1826
1827 newsk->ip_ttl=sk->ip_ttl;
1828 newsk->ip_tos=skb->ip_hdr->tos;
1829
1830
1831
1832 if (sk->user_mss)
1833 newsk->mtu = sk->user_mss;
1834 else {
1835 #ifdef SUBNETSARELOCAL
1836 if ((saddr ^ daddr) & default_mask(saddr))
1837 #else
1838 if ((saddr ^ daddr) & dev->pa_mask)
1839 #endif
1840 newsk->mtu = 576 - HEADER_SIZE;
1841 else
1842 newsk->mtu = MAX_WINDOW;
1843 }
1844
1845 newsk->mtu = min(newsk->mtu, dev->mtu - HEADER_SIZE);
1846
1847
1848 tcp_options(newsk,skb->h.th);
1849
1850 buff = newsk->prot->wmalloc(newsk, MAX_SYN_SIZE, 1, GFP_ATOMIC);
1851 if (buff == NULL) {
1852 sk->err = -ENOMEM;
1853 newsk->dead = 1;
1854 release_sock(newsk);
1855 kfree_skb(skb, FREE_READ);
1856 return;
1857 }
1858
1859 buff->mem_addr = buff;
1860 buff->mem_len = MAX_SYN_SIZE;
1861 buff->len = sizeof(struct tcphdr)+4;
1862 buff->sk = newsk;
1863
1864 t1 =(struct tcphdr *) buff->data;
1865
1866
1867 tmp = sk->prot->build_header(buff, newsk->saddr, newsk->daddr, &dev,
1868 IPPROTO_TCP, NULL, MAX_SYN_SIZE,sk->ip_tos,sk->ip_ttl);
1869
1870
1871 if (tmp < 0) {
1872 sk->err = tmp;
1873 buff->free=1;
1874 kfree_skb(buff,FREE_WRITE);
1875 newsk->dead = 1;
1876 release_sock(newsk);
1877 skb->sk = sk;
1878 kfree_skb(skb, FREE_READ);
1879 return;
1880 }
1881
1882 buff->len += tmp;
1883 t1 =(struct tcphdr *)((char *)t1 +tmp);
1884
1885 memcpy(t1, skb->h.th, sizeof(*t1));
1886 buff->h.seq = newsk->write_seq;
1887
1888
1889 t1->dest = skb->h.th->source;
1890 t1->source = newsk->dummy_th.source;
1891 t1->seq = ntohl(newsk->write_seq++);
1892 t1->ack = 1;
1893 newsk->window = tcp_select_window(newsk);
1894 newsk->sent_seq = newsk->write_seq;
1895 t1->window = ntohs(newsk->window);
1896 t1->res1 = 0;
1897 t1->res2 = 0;
1898 t1->rst = 0;
1899 t1->urg = 0;
1900 t1->psh = 0;
1901 t1->syn = 1;
1902 t1->ack_seq = ntohl(skb->h.th->seq+1);
1903 t1->doff = sizeof(*t1)/4+1;
1904
1905 ptr =(unsigned char *)(t1+1);
1906 ptr[0] = 2;
1907 ptr[1] = 4;
1908 ptr[2] = ((newsk->mtu) >> 8) & 0xff;
1909 ptr[3] =(newsk->mtu) & 0xff;
1910
1911 tcp_send_check(t1, daddr, saddr, sizeof(*t1)+4, newsk);
1912 newsk->prot->queue_xmit(newsk, dev, buff, 0);
1913
1914 reset_timer(newsk, TIME_WRITE , TCP_CONNECT_TIME);
1915 skb->sk = newsk;
1916
1917
1918 sk->rmem_alloc -= skb->mem_len;
1919 newsk->rmem_alloc += skb->mem_len;
1920
1921 skb_queue_tail(&sk->rqueue,skb);
1922 sk->ack_backlog++;
1923 release_sock(newsk);
1924 }
1925
1926
1927 static void
1928 tcp_close(struct sock *sk, int timeout)
1929 {
1930 struct sk_buff *buff;
1931 int need_reset = 0;
1932 struct tcphdr *t1, *th;
1933 struct proto *prot;
1934 struct device *dev=NULL;
1935 int tmp;
1936
1937
1938
1939
1940
1941 DPRINTF((DBG_TCP, "tcp_close((struct sock *)%X, %d)\n",sk, timeout));
1942 sk->inuse = 1;
1943 sk->keepopen = 1;
1944 sk->shutdown = SHUTDOWN_MASK;
1945
1946 if (!sk->dead)
1947 sk->state_change(sk);
1948
1949
1950 if (skb_peek(&sk->rqueue) != NULL)
1951 {
1952 struct sk_buff *skb;
1953 if(sk->debug)
1954 printk("Clean rcv queue\n");
1955 while((skb=skb_dequeue(&sk->rqueue))!=NULL)
1956 {
1957 if(skb->len > 0 && after(skb->h.th->seq + skb->len + 1 , sk->copied_seq))
1958 need_reset = 1;
1959 kfree_skb(skb, FREE_READ);
1960 }
1961 if(sk->debug)
1962 printk("Cleaned.\n");
1963 }
1964 sk->rqueue = NULL;
1965
1966
1967 if (sk->partial) {
1968 tcp_send_partial(sk);
1969 }
1970
1971 switch(sk->state) {
1972 case TCP_FIN_WAIT1:
1973 case TCP_FIN_WAIT2:
1974 case TCP_LAST_ACK:
1975
1976
1977
1978
1979
1980 reset_timer(sk, TIME_CLOSE, 4 * sk->rto);
1981 if (timeout) tcp_time_wait(sk);
1982 release_sock(sk);
1983 return;
1984 case TCP_TIME_WAIT:
1985 if (timeout) {
1986 sk->state = TCP_CLOSE;
1987 }
1988 release_sock(sk);
1989 return;
1990 case TCP_LISTEN:
1991 sk->state = TCP_CLOSE;
1992 release_sock(sk);
1993 return;
1994 case TCP_CLOSE:
1995 release_sock(sk);
1996 return;
1997 case TCP_CLOSE_WAIT:
1998 case TCP_ESTABLISHED:
1999 case TCP_SYN_SENT:
2000 case TCP_SYN_RECV:
2001 prot =(struct proto *)sk->prot;
2002 th =(struct tcphdr *)&sk->dummy_th;
2003 buff = prot->wmalloc(sk, MAX_FIN_SIZE, 1, GFP_ATOMIC);
2004 if (buff == NULL) {
2005
2006
2007
2008 release_sock(sk);
2009 if (sk->state != TCP_CLOSE_WAIT)
2010 sk->state = TCP_ESTABLISHED;
2011 reset_timer(sk, TIME_CLOSE, 100);
2012 return;
2013 }
2014 buff->mem_addr = buff;
2015 buff->mem_len = MAX_FIN_SIZE;
2016 buff->sk = sk;
2017 buff->free = 1;
2018 buff->len = sizeof(*t1);
2019 t1 =(struct tcphdr *) buff->data;
2020
2021
2022 tmp = prot->build_header(buff,sk->saddr, sk->daddr, &dev,
2023 IPPROTO_TCP, sk->opt,
2024 sizeof(struct tcphdr),sk->ip_tos,sk->ip_ttl);
2025 if (tmp < 0) {
2026 kfree_skb(buff,FREE_WRITE);
2027 DPRINTF((DBG_TCP, "Unable to build header for fin.\n"));
2028 release_sock(sk);
2029 return;
2030 }
2031
2032 t1 =(struct tcphdr *)((char *)t1 +tmp);
2033 buff->len += tmp;
2034 buff->dev = dev;
2035 memcpy(t1, th, sizeof(*t1));
2036 t1->seq = ntohl(sk->write_seq);
2037 sk->write_seq++;
2038 buff->h.seq = sk->write_seq;
2039 t1->ack = 1;
2040
2041
2042 sk->delay_acks = 0;
2043 t1->ack_seq = ntohl(sk->acked_seq);
2044 t1->window = ntohs(sk->window=tcp_select_window(sk));
2045 t1->fin = 1;
2046 t1->rst = need_reset;
2047 t1->doff = sizeof(*t1)/4;
2048 tcp_send_check(t1, sk->saddr, sk->daddr, sizeof(*t1), sk);
2049
2050 if (sk->wfront == NULL) {
2051 sk->sent_seq = sk->write_seq;
2052 prot->queue_xmit(sk, dev, buff, 0);
2053 } else {
2054 reset_timer(sk, TIME_WRITE, sk->rto);
2055 buff->next = NULL;
2056 if (sk->wback == NULL) {
2057 sk->wfront = buff;
2058 } else {
2059 sk->wback->next = buff;
2060 }
2061 sk->wback = buff;
2062 buff->magic = TCP_WRITE_QUEUE_MAGIC;
2063 }
2064
2065 if (sk->state == TCP_CLOSE_WAIT) {
2066 sk->state = TCP_FIN_WAIT2;
2067 } else {
2068 sk->state = TCP_FIN_WAIT1;
2069 }
2070 }
2071 release_sock(sk);
2072 }
2073
2074
2075
2076
2077
2078
2079 static void
2080 tcp_write_xmit(struct sock *sk)
2081 {
2082 struct sk_buff *skb;
2083
2084 DPRINTF((DBG_TCP, "tcp_write_xmit(sk=%X)\n", sk));
2085
2086
2087
2088 if(sk->zapped)
2089 return;
2090
2091 while(sk->wfront != NULL &&
2092 before(sk->wfront->h.seq, sk->window_seq +1) &&
2093 (sk->retransmits == 0 ||
2094 sk->timeout != TIME_WRITE ||
2095 before(sk->wfront->h.seq, sk->rcv_ack_seq +1))
2096 && sk->packets_out < sk->cong_window) {
2097 skb = sk->wfront;
2098 IS_SKB(skb);
2099 sk->wfront = skb->next;
2100 if (sk->wfront == NULL) sk->wback = NULL;
2101 skb->next = NULL;
2102 if (skb->magic != TCP_WRITE_QUEUE_MAGIC) {
2103 printk("tcp.c skb with bad magic(%X) on write queue. Squashing "
2104 "queue\n", skb->magic);
2105 sk->wfront = NULL;
2106 sk->wback = NULL;
2107 return;
2108 }
2109 skb->magic = 0;
2110 DPRINTF((DBG_TCP, "Sending a packet.\n"));
2111
2112
2113 if (before(skb->h.seq, sk->rcv_ack_seq +1)) {
2114 sk->retransmits = 0;
2115 kfree_skb(skb, FREE_WRITE);
2116 if (!sk->dead) sk->write_space(sk);
2117 } else {
2118 sk->sent_seq = skb->h.seq;
2119 sk->prot->queue_xmit(sk, skb->dev, skb, skb->free);
2120 }
2121 }
2122 }
2123
2124
2125
2126
2127
2128
2129 void
2130 sort_send(struct sock *sk)
2131 {
2132 struct sk_buff *list = NULL;
2133 struct sk_buff *skb,*skb2,*skb3;
2134
2135 for (skb = sk->send_head; skb != NULL; skb = skb2) {
2136 skb2 = (struct sk_buff *)skb->link3;
2137 if (list == NULL || before (skb2->h.seq, list->h.seq)) {
2138 skb->link3 = list;
2139 sk->send_tail = skb;
2140 list = skb;
2141 } else {
2142 for (skb3 = list; ; skb3 = (struct sk_buff *)skb3->link3) {
2143 if (skb3->link3 == NULL ||
2144 before(skb->h.seq, skb3->link3->h.seq)) {
2145 skb->link3 = skb3->link3;
2146 skb3->link3 = skb;
2147 if (skb->link3 == NULL) sk->send_tail = skb;
2148 break;
2149 }
2150 }
2151 }
2152 }
2153 sk->send_head = list;
2154 }
2155
2156
2157
2158 static int
2159 tcp_ack(struct sock *sk, struct tcphdr *th, unsigned long saddr, int len)
2160 {
2161 unsigned long ack;
2162 int flag = 0;
2163
2164
2165
2166
2167
2168
2169
2170 if(sk->zapped)
2171 return(1);
2172
2173 ack = ntohl(th->ack_seq);
2174 DPRINTF((DBG_TCP, "tcp_ack ack=%d, window=%d, "
2175 "sk->rcv_ack_seq=%d, sk->window_seq = %d\n",
2176 ack, ntohs(th->window), sk->rcv_ack_seq, sk->window_seq));
2177
2178 if (ntohs(th->window) > sk->max_window) {
2179 sk->max_window = ntohs(th->window);
2180 sk->mss = min(sk->max_window, sk->mtu);
2181 }
2182
2183 if (sk->retransmits && sk->timeout == TIME_KEEPOPEN)
2184 sk->retransmits = 0;
2185
2186
2187 if (after(ack, sk->sent_seq+1) || before(ack, sk->rcv_ack_seq-1)) {
2188 if (after(ack, sk->sent_seq) ||
2189 (sk->state != TCP_ESTABLISHED && sk->state != TCP_CLOSE_WAIT)) {
2190 return(0);
2191 }
2192 if (sk->keepopen) {
2193 reset_timer(sk, TIME_KEEPOPEN, TCP_TIMEOUT_LEN);
2194 }
2195 return(1);
2196 }
2197
2198 if (len != th->doff*4) flag |= 1;
2199
2200
2201 if (after(sk->window_seq, ack+ntohs(th->window))) {
2202
2203
2204
2205
2206
2207
2208
2209 struct sk_buff *skb;
2210 struct sk_buff *skb2;
2211 struct sk_buff *wskb = NULL;
2212
2213 skb2 = sk->send_head;
2214 sk->send_head = NULL;
2215 sk->send_tail = NULL;
2216
2217 flag |= 4;
2218
2219 sk->window_seq = ack + ntohs(th->window);
2220 cli();
2221 while (skb2 != NULL) {
2222 skb = skb2;
2223 skb2 = (struct sk_buff *)skb->link3;
2224 skb->link3 = NULL;
2225 if (after(skb->h.seq, sk->window_seq)) {
2226 if (sk->packets_out > 0) sk->packets_out--;
2227
2228 if (skb->next != NULL) {
2229 skb_unlink(skb);
2230 }
2231
2232 skb->magic = TCP_WRITE_QUEUE_MAGIC;
2233 if (wskb == NULL) {
2234 skb->next = sk->wfront;
2235 sk->wfront = skb;
2236 } else {
2237 skb->next = wskb->next;
2238 wskb->next = skb;
2239 }
2240 if (sk->wback == wskb) sk->wback = skb;
2241 wskb = skb;
2242 } else {
2243 if (sk->send_head == NULL) {
2244 sk->send_head = skb;
2245 sk->send_tail = skb;
2246 } else {
2247 sk->send_tail->link3 = skb;
2248 sk->send_tail = skb;
2249 }
2250 skb->link3 = NULL;
2251 }
2252 }
2253 sti();
2254 }
2255
2256 if (sk->send_tail == NULL || sk->send_head == NULL) {
2257 sk->send_head = NULL;
2258 sk->send_tail = NULL;
2259 sk->packets_out= 0;
2260 }
2261
2262 sk->window_seq = ack + ntohs(th->window);
2263
2264
2265 if (sk->timeout == TIME_WRITE &&
2266 sk->cong_window < 2048 && after(ack, sk->rcv_ack_seq)) {
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276 if (sk->cong_window < sk->ssthresh)
2277
2278 sk->cong_window++;
2279 else {
2280
2281
2282
2283 if (sk->cong_count >= sk->cong_window) {
2284 sk->cong_window++;
2285 sk->cong_count = 0;
2286 } else
2287 sk->cong_count++;
2288 }
2289 }
2290
2291 DPRINTF((DBG_TCP, "tcp_ack: Updating rcv ack sequence.\n"));
2292 sk->rcv_ack_seq = ack;
2293
2294
2295
2296
2297
2298
2299 if (sk->timeout == TIME_PROBE0) {
2300 if (sk->wfront != NULL &&
2301 ! before (sk->window_seq, sk->wfront->h.seq)) {
2302 sk->retransmits = 0;
2303 sk->backoff = 0;
2304
2305 sk->rto = ((sk->rtt >> 2) + sk->mdev) >> 1;
2306 if (sk->rto > 120*HZ)
2307 sk->rto = 120*HZ;
2308 if (sk->rto < 1*HZ)
2309 sk->rto = 1*HZ;
2310 }
2311 }
2312
2313
2314 while(sk->send_head != NULL) {
2315
2316 if (sk->send_head->link3 &&
2317 after(sk->send_head->h.seq, sk->send_head->link3->h.seq)) {
2318 printk("INET: tcp.c: *** bug send_list out of order.\n");
2319 sort_send(sk);
2320 }
2321
2322 if (before(sk->send_head->h.seq, ack+1)) {
2323 struct sk_buff *oskb;
2324
2325 if (sk->retransmits) {
2326
2327
2328 flag |= 2;
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338 if (sk->send_head->link3)
2339 sk->retransmits = 1;
2340 else
2341 sk->retransmits = 0;
2342
2343 }
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358 if (sk->packets_out > 0) sk->packets_out --;
2359 DPRINTF((DBG_TCP, "skb=%X skb->h.seq = %d acked ack=%d\n",
2360 sk->send_head, sk->send_head->h.seq, ack));
2361
2362
2363 if (!sk->dead) sk->write_space(sk);
2364
2365 oskb = sk->send_head;
2366
2367 if (!(flag&2)) {
2368 long m;
2369
2370
2371
2372
2373
2374
2375
2376
2377 m = jiffies - oskb->when;
2378 m -= (sk->rtt >> 3);
2379 sk->rtt += m;
2380 if (m < 0)
2381 m = -m;
2382 m -= (sk->mdev >> 2);
2383 sk->mdev += m;
2384
2385
2386 sk->rto = ((sk->rtt >> 2) + sk->mdev) >> 1;
2387 if (sk->rto > 120*HZ)
2388 sk->rto = 120*HZ;
2389 if (sk->rto < 1*HZ)
2390 sk->rto = 1*HZ;
2391 sk->backoff = 0;
2392
2393 }
2394 flag |= (2|4);
2395
2396 cli();
2397
2398 oskb = sk->send_head;
2399 IS_SKB(oskb);
2400 sk->send_head =(struct sk_buff *)oskb->link3;
2401 if (sk->send_head == NULL) {
2402 sk->send_tail = NULL;
2403 }
2404
2405
2406 skb_unlink(oskb);
2407 sti();
2408 oskb->magic = 0;
2409 kfree_skb(oskb, FREE_WRITE);
2410 if (!sk->dead) sk->write_space(sk);
2411 } else {
2412 break;
2413 }
2414 }
2415
2416
2417
2418
2419
2420 if (sk->wfront != NULL) {
2421 if (after (sk->window_seq+1, sk->wfront->h.seq) &&
2422 (sk->retransmits == 0 ||
2423 sk->timeout != TIME_WRITE ||
2424 before(sk->wfront->h.seq, sk->rcv_ack_seq +1))
2425 && sk->packets_out < sk->cong_window) {
2426 flag |= 1;
2427 tcp_write_xmit(sk);
2428 } else if (before(sk->window_seq, sk->wfront->h.seq) &&
2429 sk->send_head == NULL &&
2430 sk->ack_backlog == 0 &&
2431 sk->state != TCP_TIME_WAIT) {
2432 reset_timer(sk, TIME_PROBE0, sk->rto);
2433 }
2434 } else {
2435 if (sk->send_head == NULL && sk->ack_backlog == 0 &&
2436 sk->state != TCP_TIME_WAIT && !sk->keepopen) {
2437 DPRINTF((DBG_TCP, "Nothing to do, going to sleep.\n"));
2438 if (!sk->dead) sk->write_space(sk);
2439
2440 if (sk->keepopen)
2441 reset_timer(sk, TIME_KEEPOPEN, TCP_TIMEOUT_LEN);
2442 else
2443 delete_timer(sk);
2444 } else {
2445 if (sk->state != (unsigned char) sk->keepopen) {
2446 reset_timer(sk, TIME_WRITE, sk->rto);
2447 }
2448 if (sk->state == TCP_TIME_WAIT) {
2449 reset_timer(sk, TIME_CLOSE, TCP_TIMEWAIT_LEN);
2450 }
2451 }
2452 }
2453
2454 if (sk->packets_out == 0 && sk->partial != NULL &&
2455 sk->wfront == NULL && sk->send_head == NULL) {
2456 flag |= 1;
2457 tcp_send_partial(sk);
2458 }
2459
2460
2461 if (sk->state == TCP_TIME_WAIT) {
2462 if (!sk->dead)
2463 sk->state_change(sk);
2464 if (sk->rcv_ack_seq == sk->write_seq && sk->acked_seq == sk->fin_seq) {
2465 flag |= 1;
2466 sk->state = TCP_CLOSE;
2467 sk->shutdown = SHUTDOWN_MASK;
2468 }
2469 }
2470
2471 if (sk->state == TCP_LAST_ACK || sk->state == TCP_FIN_WAIT2) {
2472 if (!sk->dead) sk->state_change(sk);
2473 if (sk->rcv_ack_seq == sk->write_seq) {
2474 flag |= 1;
2475 if (sk->acked_seq != sk->fin_seq) {
2476 tcp_time_wait(sk);
2477 } else {
2478 DPRINTF((DBG_TCP, "tcp_ack closing socket - %X\n", sk));
2479 tcp_send_ack(sk->sent_seq, sk->acked_seq, sk,
2480 th, sk->daddr);
2481 sk->shutdown = SHUTDOWN_MASK;
2482 sk->state = TCP_CLOSE;
2483 }
2484 }
2485 }
2486
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516 if (((!flag) || (flag&4)) && sk->send_head != NULL &&
2517 (((flag&2) && sk->retransmits) ||
2518 (sk->send_head->when + sk->rto < jiffies))) {
2519 ip_do_retransmit(sk, 1);
2520 reset_timer(sk, TIME_WRITE, sk->rto);
2521 }
2522
2523 DPRINTF((DBG_TCP, "leaving tcp_ack\n"));
2524 return(1);
2525 }
2526
2527
2528
2529
2530
2531
2532
2533 static int
2534 tcp_data(struct sk_buff *skb, struct sock *sk,
2535 unsigned long saddr, unsigned short len)
2536 {
2537 struct sk_buff *skb1, *skb2;
2538 struct tcphdr *th;
2539 int dup_dumped=0;
2540
2541 th = skb->h.th;
2542 print_th(th);
2543 skb->len = len -(th->doff*4);
2544
2545 DPRINTF((DBG_TCP, "tcp_data len = %d sk = %X:\n", skb->len, sk));
2546
2547 sk->bytes_rcv += skb->len;
2548 if (skb->len == 0 && !th->fin && !th->urg && !th->psh) {
2549
2550 if (!th->ack) tcp_send_ack(sk->sent_seq, sk->acked_seq,sk, th, saddr);
2551 kfree_skb(skb, FREE_READ);
2552 return(0);
2553 }
2554
2555 if (sk->shutdown & RCV_SHUTDOWN) {
2556 sk->acked_seq = th->seq + skb->len + th->syn + th->fin;
2557 tcp_reset(sk->saddr, sk->daddr, skb->h.th,
2558 sk->prot, NULL, skb->dev, sk->ip_tos, sk->ip_ttl);
2559 sk->state = TCP_CLOSE;
2560 sk->err = EPIPE;
2561 sk->shutdown = SHUTDOWN_MASK;
2562 DPRINTF((DBG_TCP, "tcp_data: closing socket - %X\n", sk));
2563 kfree_skb(skb, FREE_READ);
2564 if (!sk->dead) sk->state_change(sk);
2565 return(0);
2566 }
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577 if (sk->rqueue == NULL) {
2578 DPRINTF((DBG_TCP, "tcp_data: skb = %X:\n", skb));
2579 #ifdef OLDWAY
2580 sk->rqueue = skb;
2581 skb->next = skb;
2582 skb->prev = skb;
2583 skb->list = &sk->rqueue;
2584 #else
2585 skb_queue_head(&sk->rqueue,skb);
2586 #endif
2587 skb1= NULL;
2588 } else {
2589 DPRINTF((DBG_TCP, "tcp_data adding to chain sk = %X:\n", sk));
2590 for(skb1=sk->rqueue->prev; ; skb1 =(struct sk_buff *)skb1->prev) {
2591 if(sk->debug)
2592 {
2593 printk("skb1=%p :", skb1);
2594 printk("skb1->h.th->seq = %ld: ", skb1->h.th->seq);
2595 printk("skb->h.th->seq = %ld\n",skb->h.th->seq);
2596 printk("copied_seq = %ld acked_seq = %ld\n", sk->copied_seq,
2597 sk->acked_seq);
2598 }
2599 #ifdef OLD
2600 if (after(th->seq+1, skb1->h.th->seq)) {
2601 skb->prev = skb1;
2602 skb->next = skb1->next;
2603 skb->next->prev = skb;
2604 skb1->next = skb;
2605 if (skb1 == sk->rqueue) sk->rqueue = skb;
2606 break;
2607 }
2608 if (skb1->prev == sk->rqueue) {
2609 skb->next= skb1;
2610 skb->prev = skb1->prev;
2611 skb->prev->next = skb;
2612 skb1->prev = skb;
2613 skb1 = NULL;
2614
2615 break;
2616 }
2617 #else
2618 if (th->seq==skb1->h.th->seq && skb->len>= skb1->len)
2619 {
2620 skb_append(skb1,skb);
2621 skb_unlink(skb1);
2622 kfree_skb(skb1,FREE_READ);
2623 dup_dumped=1;
2624 skb1=NULL;
2625 break;
2626 }
2627 if (after(th->seq+1, skb1->h.th->seq))
2628 {
2629 skb_append(skb1,skb);
2630 break;
2631 }
2632 if (skb1 == sk->rqueue)
2633 {
2634 skb_queue_head(&sk->rqueue, skb);
2635 break;
2636 }
2637 #endif
2638 }
2639 DPRINTF((DBG_TCP, "skb = %X:\n", skb));
2640 }
2641
2642 th->ack_seq = th->seq + skb->len;
2643 if (th->syn) th->ack_seq++;
2644 if (th->fin) th->ack_seq++;
2645
2646 if (before(sk->acked_seq, sk->copied_seq)) {
2647 printk("*** tcp.c:tcp_data bug acked < copied\n");
2648 sk->acked_seq = sk->copied_seq;
2649 }
2650
2651
2652 if ((!dup_dumped && (skb1 == NULL || skb1->acked)) || before(th->seq, sk->acked_seq+1)) {
2653 if (before(th->seq, sk->acked_seq+1)) {
2654 int newwindow;
2655
2656 if (after(th->ack_seq, sk->acked_seq)) {
2657 newwindow = sk->window -
2658 (th->ack_seq - sk->acked_seq);
2659 if (newwindow < 0)
2660 newwindow = 0;
2661 sk->window = newwindow;
2662 sk->acked_seq = th->ack_seq;
2663 }
2664 skb->acked = 1;
2665
2666
2667 if (skb->h.th->fin) {
2668 if (!sk->dead) sk->state_change(sk);
2669 sk->shutdown |= RCV_SHUTDOWN;
2670 }
2671
2672 for(skb2 = (struct sk_buff *)skb->next;
2673 skb2 !=(struct sk_buff *) sk->rqueue;
2674 skb2 = (struct sk_buff *)skb2->next) {
2675 if (before(skb2->h.th->seq, sk->acked_seq+1)) {
2676 if (after(skb2->h.th->ack_seq, sk->acked_seq))
2677 {
2678 newwindow = sk->window -
2679 (skb2->h.th->ack_seq - sk->acked_seq);
2680 if (newwindow < 0)
2681 newwindow = 0;
2682 sk->window = newwindow;
2683 sk->acked_seq = skb2->h.th->ack_seq;
2684 }
2685 skb2->acked = 1;
2686
2687
2688
2689
2690
2691 if (skb2->h.th->fin) {
2692 sk->shutdown |= RCV_SHUTDOWN;
2693 if (!sk->dead) sk->state_change(sk);
2694 }
2695
2696
2697 sk->ack_backlog = sk->max_ack_backlog;
2698 } else {
2699 break;
2700 }
2701 }
2702
2703
2704
2705
2706
2707 if (!sk->delay_acks ||
2708 sk->ack_backlog >= sk->max_ack_backlog ||
2709 sk->bytes_rcv > sk->max_unacked || th->fin) {
2710
2711 } else {
2712 sk->ack_backlog++;
2713 if(sk->debug)
2714 printk("Ack queued.\n");
2715 reset_timer(sk, TIME_WRITE, TCP_ACK_TIME);
2716 }
2717 }
2718 }
2719
2720
2721
2722
2723
2724 if (!skb->acked) {
2725
2726
2727
2728
2729
2730
2731 while (sk->prot->rspace(sk) < sk->mtu) {
2732 skb1 = skb_peek(&sk->rqueue);
2733 if (skb1 == NULL) {
2734 printk("INET: tcp.c:tcp_data memory leak detected.\n");
2735 break;
2736 }
2737
2738
2739 if (skb1->acked) {
2740 break;
2741 }
2742
2743 skb_unlink(skb1);
2744 #ifdef OLDWAY
2745 if (skb1->prev == skb1) {
2746 sk->rqueue = NULL;
2747 } else {
2748 sk->rqueue = (struct sk_buff *)skb1->prev;
2749 skb1->next->prev = skb1->prev;
2750 skb1->prev->next = skb1->next;
2751 }
2752 #endif
2753 kfree_skb(skb1, FREE_READ);
2754 }
2755 tcp_send_ack(sk->sent_seq, sk->acked_seq, sk, th, saddr);
2756 sk->ack_backlog++;
2757 reset_timer(sk, TIME_WRITE, TCP_ACK_TIME);
2758 } else {
2759
2760 tcp_send_ack(sk->sent_seq, sk->acked_seq, sk, th, saddr);
2761 }
2762
2763
2764 if (!sk->dead) {
2765 if(sk->debug)
2766 printk("Data wakeup.\n");
2767 sk->data_ready(sk,0);
2768 } else {
2769 DPRINTF((DBG_TCP, "data received on dead socket.\n"));
2770 }
2771
2772 if (sk->state == TCP_FIN_WAIT2 &&
2773 sk->acked_seq == sk->fin_seq && sk->rcv_ack_seq == sk->write_seq) {
2774 DPRINTF((DBG_TCP, "tcp_data: entering last_ack state sk = %X\n", sk));
2775
2776
2777 sk->shutdown = SHUTDOWN_MASK;
2778 sk->state = TCP_LAST_ACK;
2779 if (!sk->dead) sk->state_change(sk);
2780 }
2781
2782 return(0);
2783 }
2784
2785
2786 static void tcp_check_urg(struct sock * sk, struct tcphdr * th)
2787 {
2788 unsigned long ptr = ntohs(th->urg_ptr);
2789
2790 if (ptr)
2791 ptr--;
2792 ptr += th->seq;
2793
2794
2795 if (after(sk->copied_seq+1, ptr))
2796 return;
2797
2798
2799 if (sk->urg_data && !after(ptr, sk->urg_seq))
2800 return;
2801
2802
2803 if (sk->proc != 0) {
2804 if (sk->proc > 0) {
2805 kill_proc(sk->proc, SIGURG, 1);
2806 } else {
2807 kill_pg(-sk->proc, SIGURG, 1);
2808 }
2809 }
2810 sk->urg_data = URG_NOTYET;
2811 sk->urg_seq = ptr;
2812 }
2813
2814 static inline int tcp_urg(struct sock *sk, struct tcphdr *th,
2815 unsigned long saddr, unsigned long len)
2816 {
2817 unsigned long ptr;
2818
2819
2820 if (th->urg)
2821 tcp_check_urg(sk,th);
2822
2823
2824 if (sk->urg_data != URG_NOTYET)
2825 return 0;
2826
2827
2828 ptr = sk->urg_seq - th->seq + th->doff*4;
2829 if (ptr >= len)
2830 return 0;
2831
2832
2833 sk->urg_data = URG_VALID | *(ptr + (unsigned char *) th);
2834 if (!sk->dead)
2835 wake_up_interruptible(sk->sleep);
2836 return 0;
2837 }
2838
2839
2840
2841 static int
2842 tcp_fin(struct sock *sk, struct tcphdr *th,
2843 unsigned long saddr, struct device *dev)
2844 {
2845 DPRINTF((DBG_TCP, "tcp_fin(sk=%X, th=%X, saddr=%X, dev=%X)\n",
2846 sk, th, saddr, dev));
2847
2848 if (!sk->dead) {
2849 sk->state_change(sk);
2850 }
2851
2852 switch(sk->state) {
2853 case TCP_SYN_RECV:
2854 case TCP_SYN_SENT:
2855 case TCP_ESTABLISHED:
2856
2857 sk->fin_seq = th->seq+1;
2858 sk->state = TCP_CLOSE_WAIT;
2859 if (th->rst) sk->shutdown = SHUTDOWN_MASK;
2860 break;
2861
2862 case TCP_CLOSE_WAIT:
2863 case TCP_FIN_WAIT2:
2864 break;
2865
2866 case TCP_FIN_WAIT1:
2867
2868 sk->fin_seq = th->seq+1;
2869 sk->state = TCP_FIN_WAIT2;
2870 break;
2871
2872 default:
2873 case TCP_TIME_WAIT:
2874 sk->state = TCP_LAST_ACK;
2875
2876
2877 reset_timer(sk, TIME_CLOSE, TCP_TIMEWAIT_LEN);
2878 return(0);
2879 }
2880 sk->ack_backlog++;
2881
2882 return(0);
2883 }
2884
2885
2886
2887 static struct sock *
2888 tcp_accept(struct sock *sk, int flags)
2889 {
2890 struct sock *newsk;
2891 struct sk_buff *skb;
2892
2893 DPRINTF((DBG_TCP, "tcp_accept(sk=%X, flags=%X, addr=%s)\n",
2894 sk, flags, in_ntoa(sk->saddr)));
2895
2896
2897
2898
2899
2900 if (sk->state != TCP_LISTEN) {
2901 sk->err = EINVAL;
2902 return(NULL);
2903 }
2904
2905
2906 cli();
2907 sk->inuse = 1;
2908 while((skb = get_firstr(sk)) == NULL) {
2909 if (flags & O_NONBLOCK) {
2910 sti();
2911 release_sock(sk);
2912 sk->err = EAGAIN;
2913 return(NULL);
2914 }
2915
2916 release_sock(sk);
2917 interruptible_sleep_on(sk->sleep);
2918 if (current->signal & ~current->blocked) {
2919 sti();
2920 sk->err = ERESTARTSYS;
2921 return(NULL);
2922 }
2923 sk->inuse = 1;
2924 }
2925 sti();
2926
2927
2928 newsk = skb->sk;
2929
2930 kfree_skb(skb, FREE_READ);
2931 sk->ack_backlog--;
2932 release_sock(sk);
2933 return(newsk);
2934 }
2935
2936
2937
2938 static int
2939 tcp_connect(struct sock *sk, struct sockaddr_in *usin, int addr_len)
2940 {
2941 struct sk_buff *buff;
2942 struct sockaddr_in sin;
2943 struct device *dev=NULL;
2944 unsigned char *ptr;
2945 int tmp;
2946 struct tcphdr *t1;
2947 int err;
2948
2949 if (sk->state != TCP_CLOSE) return(-EISCONN);
2950 if (addr_len < 8) return(-EINVAL);
2951
2952 err=verify_area(VERIFY_READ, usin, addr_len);
2953 if(err)
2954 return err;
2955
2956 memcpy_fromfs(&sin,usin, min(sizeof(sin), addr_len));
2957
2958 if (sin.sin_family && sin.sin_family != AF_INET) return(-EAFNOSUPPORT);
2959
2960 DPRINTF((DBG_TCP, "TCP connect daddr=%s\n", in_ntoa(sin.sin_addr.s_addr)));
2961
2962
2963 if (chk_addr(sin.sin_addr.s_addr) == IS_BROADCAST) {
2964 DPRINTF((DBG_TCP, "TCP connection to broadcast address not allowed\n"));
2965 return(-ENETUNREACH);
2966 }
2967
2968
2969 if(sk->saddr == sin.sin_addr.s_addr && sk->num==ntohs(sin.sin_port))
2970 return -EBUSY;
2971
2972 sk->inuse = 1;
2973 sk->daddr = sin.sin_addr.s_addr;
2974 sk->write_seq = jiffies * SEQ_TICK - seq_offset;
2975 sk->window_seq = sk->write_seq;
2976 sk->rcv_ack_seq = sk->write_seq -1;
2977 sk->err = 0;
2978 sk->dummy_th.dest = sin.sin_port;
2979 release_sock(sk);
2980
2981 buff = sk->prot->wmalloc(sk,MAX_SYN_SIZE,0, GFP_KERNEL);
2982 if (buff == NULL) {
2983 return(-ENOMEM);
2984 }
2985 sk->inuse = 1;
2986 buff->mem_addr = buff;
2987 buff->mem_len = MAX_SYN_SIZE;
2988 buff->len = 24;
2989 buff->sk = sk;
2990 buff->free = 1;
2991 t1 = (struct tcphdr *) buff->data;
2992
2993
2994
2995 tmp = sk->prot->build_header(buff, sk->saddr, sk->daddr, &dev,
2996 IPPROTO_TCP, NULL, MAX_SYN_SIZE,sk->ip_tos,sk->ip_ttl);
2997 if (tmp < 0) {
2998 sk->prot->wfree(sk, buff->mem_addr, buff->mem_len);
2999 release_sock(sk);
3000 return(-ENETUNREACH);
3001 }
3002 buff->len += tmp;
3003 t1 = (struct tcphdr *)((char *)t1 +tmp);
3004
3005 memcpy(t1,(void *)&(sk->dummy_th), sizeof(*t1));
3006 t1->seq = ntohl(sk->write_seq++);
3007 sk->sent_seq = sk->write_seq;
3008 buff->h.seq = sk->write_seq;
3009 t1->ack = 0;
3010 t1->window = 2;
3011 t1->res1=0;
3012 t1->res2=0;
3013 t1->rst = 0;
3014 t1->urg = 0;
3015 t1->psh = 0;
3016 t1->syn = 1;
3017 t1->urg_ptr = 0;
3018 t1->doff = 6;
3019
3020
3021 if (sk->user_mss)
3022 sk->mtu = sk->user_mss;
3023 else {
3024 #ifdef SUBNETSARELOCAL
3025 if ((sk->saddr ^ sk->daddr) & default_mask(sk->saddr))
3026 #else
3027 if ((sk->saddr ^ sk->daddr) & dev->pa_mask)
3028 #endif
3029 sk->mtu = 576 - HEADER_SIZE;
3030 else
3031 sk->mtu = MAX_WINDOW;
3032 }
3033
3034 sk->mtu = min(sk->mtu, dev->mtu - HEADER_SIZE);
3035
3036
3037 ptr = (unsigned char *)(t1+1);
3038 ptr[0] = 2;
3039 ptr[1] = 4;
3040 ptr[2] = (sk->mtu) >> 8;
3041 ptr[3] = (sk->mtu) & 0xff;
3042 tcp_send_check(t1, sk->saddr, sk->daddr,
3043 sizeof(struct tcphdr) + 4, sk);
3044
3045
3046 sk->state = TCP_SYN_SENT;
3047 sk->rtt = TCP_CONNECT_TIME;
3048 reset_timer(sk, TIME_WRITE, TCP_CONNECT_TIME);
3049 sk->retransmits = TCP_RETR2 - TCP_SYN_RETRIES;
3050
3051 sk->prot->queue_xmit(sk, dev, buff, 0);
3052
3053 release_sock(sk);
3054 return(0);
3055 }
3056
3057
3058
3059 static int
3060 tcp_sequence(struct sock *sk, struct tcphdr *th, short len,
3061 struct options *opt, unsigned long saddr, struct device *dev)
3062 {
3063 unsigned long next_seq;
3064
3065 next_seq = len - 4*th->doff;
3066 if (th->fin)
3067 next_seq++;
3068
3069 if (next_seq && !sk->window)
3070 goto ignore_it;
3071 next_seq += th->seq;
3072
3073
3074
3075
3076
3077
3078
3079
3080
3081 if (!after(next_seq+1, sk->acked_seq))
3082 goto ignore_it;
3083
3084 if (!before(th->seq, sk->acked_seq + sk->window + 1))
3085 goto ignore_it;
3086
3087
3088 return 1;
3089
3090 ignore_it:
3091 DPRINTF((DBG_TCP, "tcp_sequence: rejecting packet.\n"));
3092
3093
3094
3095
3096
3097
3098
3099
3100 if (sk->state==TCP_SYN_SENT || sk->state==TCP_SYN_RECV) {
3101 tcp_reset(sk->saddr,sk->daddr,th,sk->prot,NULL,dev, sk->ip_tos,sk->ip_ttl);
3102 return 1;
3103 }
3104
3105 if (th->rst)
3106 return 0;
3107
3108
3109 tcp_send_ack(sk->sent_seq, sk->acked_seq, sk, th, saddr);
3110 return 0;
3111 }
3112
3113
3114 int
3115 tcp_rcv(struct sk_buff *skb, struct device *dev, struct options *opt,
3116 unsigned long daddr, unsigned short len,
3117 unsigned long saddr, int redo, struct inet_protocol * protocol)
3118 {
3119 struct tcphdr *th;
3120 struct sock *sk;
3121
3122 if (!skb) {
3123 DPRINTF((DBG_TCP, "tcp.c: tcp_rcv skb = NULL\n"));
3124 return(0);
3125 }
3126 #if 0
3127 if (!protocol) {
3128 DPRINTF((DBG_TCP, "tcp.c: tcp_rcv protocol = NULL\n"));
3129 return(0);
3130 }
3131
3132 if (!opt) {
3133 DPRINTF((DBG_TCP, "tcp.c: tcp_rcv opt = NULL\n"));
3134 }
3135 #endif
3136 if (!dev) {
3137 DPRINTF((DBG_TCP, "tcp.c: tcp_rcv dev = NULL\n"));
3138 return(0);
3139 }
3140 th = skb->h.th;
3141
3142
3143 sk = get_sock(&tcp_prot, th->dest, saddr, th->source, daddr);
3144 DPRINTF((DBG_TCP, "<<\n"));
3145 DPRINTF((DBG_TCP, "len = %d, redo = %d, skb=%X\n", len, redo, skb));
3146
3147
3148
3149 if (sk!=NULL && sk->zapped)
3150 sk=NULL;
3151
3152 if (sk) {
3153 DPRINTF((DBG_TCP, "sk = %X:\n", sk));
3154 }
3155
3156 if (!redo) {
3157 if (tcp_check(th, len, saddr, daddr )) {
3158 skb->sk = NULL;
3159 DPRINTF((DBG_TCP, "packet dropped with bad checksum.\n"));
3160 if (inet_debug == DBG_SLIP) printk("\rtcp_rcv: bad checksum\n");
3161 kfree_skb(skb,FREE_READ);
3162
3163
3164
3165
3166 return(0);
3167 }
3168
3169 th->seq = ntohl(th->seq);
3170
3171
3172 if (sk == NULL) {
3173 if (!th->rst)
3174 tcp_reset(daddr, saddr, th, &tcp_prot, opt,dev,skb->ip_hdr->tos,255);
3175 skb->sk = NULL;
3176 kfree_skb(skb, FREE_READ);
3177 return(0);
3178 }
3179
3180 skb->len = len;
3181 skb->sk = sk;
3182 skb->acked = 0;
3183 skb->used = 0;
3184 skb->free = 0;
3185 skb->saddr = daddr;
3186 skb->daddr = saddr;
3187
3188
3189 cli();
3190 if (sk->inuse) {
3191 if (sk->back_log == NULL) {
3192 sk->back_log = skb;
3193 skb->next = skb;
3194 skb->prev = skb;
3195 } else {
3196 skb->next = sk->back_log;
3197 skb->prev = sk->back_log->prev;
3198 skb->prev->next = skb;
3199 skb->next->prev = skb;
3200 }
3201 sti();
3202 return(0);
3203 }
3204 sk->inuse = 1;
3205 sti();
3206 } else {
3207 if (!sk) {
3208 DPRINTF((DBG_TCP, "tcp.c: tcp_rcv bug sk=NULL redo = 1\n"));
3209 return(0);
3210 }
3211 }
3212
3213 if (!sk->prot) {
3214 DPRINTF((DBG_TCP, "tcp.c: tcp_rcv sk->prot = NULL \n"));
3215 return(0);
3216 }
3217
3218
3219 if (sk->rmem_alloc + skb->mem_len >= sk->rcvbuf) {
3220 skb->sk = NULL;
3221 DPRINTF((DBG_TCP, "dropping packet due to lack of buffer space.\n"));
3222 kfree_skb(skb, FREE_READ);
3223 release_sock(sk);
3224 return(0);
3225 }
3226 sk->rmem_alloc += skb->mem_len;
3227
3228 DPRINTF((DBG_TCP, "About to do switch.\n"));
3229
3230
3231 switch(sk->state) {
3232
3233
3234
3235
3236 case TCP_LAST_ACK:
3237 if (th->rst) {
3238 sk->zapped=1;
3239 sk->err = ECONNRESET;
3240 sk->state = TCP_CLOSE;
3241 sk->shutdown = SHUTDOWN_MASK;
3242 if (!sk->dead) {
3243 sk->state_change(sk);
3244 }
3245 kfree_skb(skb, FREE_READ);
3246 release_sock(sk);
3247 return(0);
3248 }
3249
3250 case TCP_ESTABLISHED:
3251 case TCP_CLOSE_WAIT:
3252 case TCP_FIN_WAIT1:
3253 case TCP_FIN_WAIT2:
3254 case TCP_TIME_WAIT:
3255 if (!tcp_sequence(sk, th, len, opt, saddr,dev)) {
3256 if (inet_debug == DBG_SLIP) printk("\rtcp_rcv: not in seq\n");
3257 #ifdef undef
3258
3259 if(!th->rst)
3260 tcp_send_ack(sk->sent_seq, sk->acked_seq,
3261 sk, th, saddr);
3262 #endif
3263 kfree_skb(skb, FREE_READ);
3264 release_sock(sk);
3265 return(0);
3266 }
3267
3268 if (th->rst) {
3269 sk->zapped=1;
3270
3271 sk->err = ECONNRESET;
3272
3273 if (sk->state == TCP_CLOSE_WAIT) {
3274 sk->err = EPIPE;
3275 }
3276
3277
3278
3279
3280
3281 sk->state = TCP_CLOSE;
3282 sk->shutdown = SHUTDOWN_MASK;
3283 if (!sk->dead) {
3284 sk->state_change(sk);
3285 }
3286 kfree_skb(skb, FREE_READ);
3287 release_sock(sk);
3288 return(0);
3289 }
3290 if (
3291 #if 0
3292 if ((opt && (opt->security != 0 ||
3293 opt->compartment != 0)) ||
3294 #endif
3295 th->syn) {
3296 sk->err = ECONNRESET;
3297 sk->state = TCP_CLOSE;
3298 sk->shutdown = SHUTDOWN_MASK;
3299 tcp_reset(daddr, saddr, th, sk->prot, opt,dev, sk->ip_tos,sk->ip_ttl);
3300 if (!sk->dead) {
3301 sk->state_change(sk);
3302 }
3303 kfree_skb(skb, FREE_READ);
3304 release_sock(sk);
3305 return(0);
3306 }
3307
3308 if (th->ack && !tcp_ack(sk, th, saddr, len)) {
3309 kfree_skb(skb, FREE_READ);
3310 release_sock(sk);
3311 return(0);
3312 }
3313
3314 if (tcp_urg(sk, th, saddr, len)) {
3315 kfree_skb(skb, FREE_READ);
3316 release_sock(sk);
3317 return(0);
3318 }
3319
3320 if (tcp_data(skb, sk, saddr, len)) {
3321 kfree_skb(skb, FREE_READ);
3322 release_sock(sk);
3323 return(0);
3324 }
3325
3326
3327 if (th->fin && tcp_fin(sk, th, saddr, dev)) {
3328 kfree_skb(skb, FREE_READ);
3329 release_sock(sk);
3330 return(0);
3331 }
3332
3333 release_sock(sk);
3334 return(0);
3335
3336 case TCP_CLOSE:
3337 if (sk->dead || sk->daddr) {
3338 DPRINTF((DBG_TCP, "packet received for closed,dead socket\n"));
3339 kfree_skb(skb, FREE_READ);
3340 release_sock(sk);
3341 return(0);
3342 }
3343
3344 if (!th->rst) {
3345 if (!th->ack)
3346 th->ack_seq = 0;
3347 tcp_reset(daddr, saddr, th, sk->prot, opt,dev,sk->ip_tos,sk->ip_ttl);
3348 }
3349 kfree_skb(skb, FREE_READ);
3350 release_sock(sk);
3351 return(0);
3352
3353 case TCP_LISTEN:
3354 if (th->rst) {
3355 kfree_skb(skb, FREE_READ);
3356 release_sock(sk);
3357 return(0);
3358 }
3359 if (th->ack) {
3360 tcp_reset(daddr, saddr, th, sk->prot, opt,dev,sk->ip_tos,sk->ip_ttl);
3361 kfree_skb(skb, FREE_READ);
3362 release_sock(sk);
3363 return(0);
3364 }
3365
3366 if (th->syn) {
3367 #if 0
3368 if (opt->security != 0 || opt->compartment != 0) {
3369 tcp_reset(daddr, saddr, th, prot, opt,dev);
3370 release_sock(sk);
3371 return(0);
3372 }
3373 #endif
3374
3375
3376
3377
3378
3379
3380
3381 tcp_conn_request(sk, skb, daddr, saddr, opt, dev);
3382 release_sock(sk);
3383 return(0);
3384 }
3385
3386 kfree_skb(skb, FREE_READ);
3387 release_sock(sk);
3388 return(0);
3389
3390 default:
3391 if (!tcp_sequence(sk, th, len, opt, saddr,dev)) {
3392 kfree_skb(skb, FREE_READ);
3393 release_sock(sk);
3394 return(0);
3395 }
3396
3397 case TCP_SYN_SENT:
3398 if (th->rst) {
3399 sk->err = ECONNREFUSED;
3400 sk->state = TCP_CLOSE;
3401 sk->shutdown = SHUTDOWN_MASK;
3402 sk->zapped = 1;
3403 if (!sk->dead) {
3404 sk->state_change(sk);
3405 }
3406 kfree_skb(skb, FREE_READ);
3407 release_sock(sk);
3408 return(0);
3409 }
3410 #if 0
3411 if (opt->security != 0 || opt->compartment != 0) {
3412 sk->err = ECONNRESET;
3413 sk->state = TCP_CLOSE;
3414 sk->shutdown = SHUTDOWN_MASK;
3415 tcp_reset(daddr, saddr, th, sk->prot, opt, dev);
3416 if (!sk->dead) {
3417 wake_up_interruptible(sk->sleep);
3418 }
3419 kfree_skb(skb, FREE_READ);
3420 release_sock(sk);
3421 return(0);
3422 }
3423 #endif
3424 if (!th->ack) {
3425 if (th->syn) {
3426 sk->state = TCP_SYN_RECV;
3427 }
3428
3429 kfree_skb(skb, FREE_READ);
3430 release_sock(sk);
3431 return(0);
3432 }
3433
3434 switch(sk->state) {
3435 case TCP_SYN_SENT:
3436 if (!tcp_ack(sk, th, saddr, len)) {
3437 tcp_reset(daddr, saddr, th,
3438 sk->prot, opt,dev,sk->ip_tos,sk->ip_ttl);
3439 kfree_skb(skb, FREE_READ);
3440 release_sock(sk);
3441 return(0);
3442 }
3443
3444
3445
3446
3447
3448 if (!th->syn) {
3449 kfree_skb(skb, FREE_READ);
3450 release_sock(sk);
3451 return(0);
3452 }
3453
3454
3455 sk->acked_seq = th->seq+1;
3456 sk->fin_seq = th->seq;
3457 tcp_send_ack(sk->sent_seq, th->seq+1,
3458 sk, th, sk->daddr);
3459
3460 case TCP_SYN_RECV:
3461 if (!tcp_ack(sk, th, saddr, len)) {
3462 tcp_reset(daddr, saddr, th,
3463 sk->prot, opt, dev,sk->ip_tos,sk->ip_ttl);
3464 kfree_skb(skb, FREE_READ);
3465 release_sock(sk);
3466 return(0);
3467 }
3468 sk->state = TCP_ESTABLISHED;
3469
3470
3471
3472
3473
3474
3475 tcp_options(sk, th);
3476 sk->dummy_th.dest = th->source;
3477 sk->copied_seq = sk->acked_seq-1;
3478 if (!sk->dead) {
3479 sk->state_change(sk);
3480 }
3481
3482
3483
3484
3485
3486
3487
3488
3489
3490
3491 if (sk->max_window == 0) {
3492 sk->max_window = 32;
3493 sk->mss = min(sk->max_window, sk->mtu);
3494 }
3495
3496
3497
3498
3499
3500 if (th->urg) {
3501 if (tcp_urg(sk, th, saddr, len)) {
3502 kfree_skb(skb, FREE_READ);
3503 release_sock(sk);
3504 return(0);
3505 }
3506 }
3507 if (tcp_data(skb, sk, saddr, len))
3508 kfree_skb(skb, FREE_READ);
3509
3510 if (th->fin) tcp_fin(sk, th, saddr, dev);
3511 release_sock(sk);
3512 return(0);
3513 }
3514
3515 if (th->urg) {
3516 if (tcp_urg(sk, th, saddr, len)) {
3517 kfree_skb(skb, FREE_READ);
3518 release_sock(sk);
3519 return(0);
3520 }
3521 }
3522
3523 if (tcp_data(skb, sk, saddr, len)) {
3524 kfree_skb(skb, FREE_READ);
3525 release_sock(sk);
3526 return(0);
3527 }
3528
3529 if (!th->fin) {
3530 release_sock(sk);
3531 return(0);
3532 }
3533 tcp_fin(sk, th, saddr, dev);
3534 release_sock(sk);
3535 return(0);
3536 }
3537 }
3538
3539
3540
3541
3542
3543
3544 static void
3545 tcp_write_wakeup(struct sock *sk)
3546 {
3547 struct sk_buff *buff;
3548 struct tcphdr *t1;
3549 struct device *dev=NULL;
3550 int tmp;
3551
3552 if (sk->zapped)
3553 return;
3554
3555 if (sk -> state != TCP_ESTABLISHED && sk->state != TCP_CLOSE_WAIT &&
3556 sk -> state != TCP_FIN_WAIT1 && sk->state != TCP_FIN_WAIT2)
3557 return;
3558
3559 buff = sk->prot->wmalloc(sk,MAX_ACK_SIZE,1, GFP_ATOMIC);
3560 if (buff == NULL) return;
3561
3562 buff->mem_addr = buff;
3563 buff->mem_len = MAX_ACK_SIZE;
3564 buff->len = sizeof(struct tcphdr);
3565 buff->free = 1;
3566 buff->sk = sk;
3567 DPRINTF((DBG_TCP, "in tcp_write_wakeup\n"));
3568 t1 = (struct tcphdr *) buff->data;
3569
3570
3571 tmp = sk->prot->build_header(buff, sk->saddr, sk->daddr, &dev,
3572 IPPROTO_TCP, sk->opt, MAX_ACK_SIZE,sk->ip_tos,sk->ip_ttl);
3573 if (tmp < 0) {
3574 sk->prot->wfree(sk, buff->mem_addr, buff->mem_len);
3575 return;
3576 }
3577
3578 buff->len += tmp;
3579 t1 = (struct tcphdr *)((char *)t1 +tmp);
3580
3581 memcpy(t1,(void *) &sk->dummy_th, sizeof(*t1));
3582
3583
3584
3585
3586
3587 t1->seq = htonl(sk->sent_seq-1);
3588 t1->ack = 1;
3589 t1->res1= 0;
3590 t1->res2= 0;
3591 t1->rst = 0;
3592 t1->urg = 0;
3593 t1->psh = 0;
3594 t1->fin = 0;
3595 t1->syn = 0;
3596 t1->ack_seq = ntohl(sk->acked_seq);
3597 t1->window = ntohs(tcp_select_window(sk));
3598 t1->doff = sizeof(*t1)/4;
3599 tcp_send_check(t1, sk->saddr, sk->daddr, sizeof(*t1), sk);
3600
3601
3602
3603
3604 sk->prot->queue_xmit(sk, dev, buff, 1);
3605 }
3606
3607 void
3608 tcp_send_probe0(struct sock *sk)
3609 {
3610 if (sk->zapped)
3611 return;
3612
3613 tcp_write_wakeup(sk);
3614
3615 sk->backoff++;
3616 sk->rto = min(sk->rto << 1, 120*HZ);
3617 reset_timer (sk, TIME_PROBE0, sk->rto);
3618 sk->retransmits++;
3619 sk->prot->retransmits ++;
3620 }
3621
3622
3623
3624
3625
3626 int tcp_setsockopt(struct sock *sk, int level, int optname, char *optval, int optlen)
3627 {
3628 int val,err;
3629
3630 if(level!=SOL_TCP)
3631 return ip_setsockopt(sk,level,optname,optval,optlen);
3632
3633 if (optval == NULL)
3634 return(-EINVAL);
3635
3636 err=verify_area(VERIFY_READ, optval, sizeof(int));
3637 if(err)
3638 return err;
3639
3640 val = get_fs_long((unsigned long *)optval);
3641
3642 switch(optname)
3643 {
3644 case TCP_MAXSEG:
3645
3646
3647
3648
3649
3650
3651 if(val<1||val>MAX_WINDOW)
3652 return -EINVAL;
3653 sk->user_mss=val;
3654 return 0;
3655 case TCP_NODELAY:
3656 sk->nonagle=(val==0)?0:1;
3657 return 0;
3658 default:
3659 return(-ENOPROTOOPT);
3660 }
3661 }
3662
3663 int tcp_getsockopt(struct sock *sk, int level, int optname, char *optval, int *optlen)
3664 {
3665 int val,err;
3666
3667 if(level!=SOL_TCP)
3668 return ip_getsockopt(sk,level,optname,optval,optlen);
3669
3670 switch(optname)
3671 {
3672 case TCP_MAXSEG:
3673 val=sk->user_mss;
3674 break;
3675 case TCP_NODELAY:
3676 val=sk->nonagle;
3677 break;
3678 default:
3679 return(-ENOPROTOOPT);
3680 }
3681 err=verify_area(VERIFY_WRITE, optlen, sizeof(int));
3682 if(err)
3683 return err;
3684 put_fs_long(sizeof(int),(unsigned long *) optlen);
3685
3686 err=verify_area(VERIFY_WRITE, optval, sizeof(int));
3687 if(err)
3688 return err;
3689 put_fs_long(val,(unsigned long *)optval);
3690
3691 return(0);
3692 }
3693
3694
3695 struct proto tcp_prot = {
3696 sock_wmalloc,
3697 sock_rmalloc,
3698 sock_wfree,
3699 sock_rfree,
3700 sock_rspace,
3701 sock_wspace,
3702 tcp_close,
3703 tcp_read,
3704 tcp_write,
3705 tcp_sendto,
3706 tcp_recvfrom,
3707 ip_build_header,
3708 tcp_connect,
3709 tcp_accept,
3710 ip_queue_xmit,
3711 tcp_retransmit,
3712 tcp_write_wakeup,
3713 tcp_read_wakeup,
3714 tcp_rcv,
3715 tcp_select,
3716 tcp_ioctl,
3717 NULL,
3718 tcp_shutdown,
3719 tcp_setsockopt,
3720 tcp_getsockopt,
3721 128,
3722 0,
3723 {NULL,},
3724 "TCP"
3725 };