This source file includes following definitions.
- min
- __print_th
- print_th
- get_firstr
- diff
- tcp_select_window
- tcp_time_wait
- tcp_retransmit
- tcp_err
- tcp_readable
- tcp_select
- tcp_ioctl
- tcp_check
- tcp_send_check
- tcp_send_skb
- tcp_dequeue_partial
- tcp_send_partial
- tcp_enqueue_partial
- tcp_send_ack
- tcp_build_header
- tcp_write
- tcp_sendto
- tcp_read_wakeup
- cleanup_rbuf
- tcp_read_urg
- tcp_read
- tcp_shutdown
- tcp_recvfrom
- tcp_reset
- tcp_options
- default_mask
- tcp_conn_request
- tcp_close
- tcp_write_xmit
- sort_send
- tcp_ack
- tcp_data
- tcp_check_urg
- tcp_urg
- tcp_fin
- tcp_accept
- tcp_connect
- tcp_sequence
- tcp_rcv
- tcp_write_wakeup
- tcp_send_probe0
- tcp_setsockopt
- tcp_getsockopt
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88 #include <linux/types.h>
89 #include <linux/sched.h>
90 #include <linux/mm.h>
91 #include <linux/string.h>
92 #include <linux/socket.h>
93 #include <linux/sockios.h>
94 #include <linux/termios.h>
95 #include <linux/in.h>
96 #include <linux/fcntl.h>
97 #include "inet.h"
98 #include "dev.h"
99 #include "ip.h"
100 #include "protocol.h"
101 #include "icmp.h"
102 #include "tcp.h"
103 #include "skbuff.h"
104 #include "sock.h"
105 #include "arp.h"
106 #include <linux/errno.h>
107 #include <linux/timer.h>
108 #include <asm/system.h>
109 #include <asm/segment.h>
110 #include <linux/mm.h>
111
112 #define SEQ_TICK 3
113 unsigned long seq_offset;
114 #define SUBNETSARELOCAL
115
116 static __inline__ int
117 min(unsigned int a, unsigned int b)
118 {
119 if (a < b) return(a);
120 return(b);
121 }
122
123
124 static void __print_th(struct tcphdr *th)
125 {
126 unsigned char *ptr;
127
128 printk("TCP header:\n");
129 printk(" source=%d, dest=%d, seq =%ld, ack_seq = %ld\n",
130 ntohs(th->source), ntohs(th->dest),
131 ntohl(th->seq), ntohl(th->ack_seq));
132 printk(" fin=%d, syn=%d, rst=%d, psh=%d, ack=%d, urg=%d res1=%d res2=%d\n",
133 th->fin, th->syn, th->rst, th->psh, th->ack,
134 th->urg, th->res1, th->res2);
135 printk(" window = %d, check = %d urg_ptr = %d\n",
136 ntohs(th->window), ntohs(th->check), ntohs(th->urg_ptr));
137 printk(" doff = %d\n", th->doff);
138 ptr =(unsigned char *)(th + 1);
139 printk(" options = %d %d %d %d\n", ptr[0], ptr[1], ptr[2], ptr[3]);
140 }
141
142 static inline void print_th(struct tcphdr *th)
143 {
144 if (inet_debug == DBG_TCP)
145 __print_th(th);
146 }
147
148
149 static struct sk_buff *
150 get_firstr(struct sock *sk)
151 {
152 return skb_dequeue(&sk->rqueue);
153 }
154
155
156
157
158
159 static long
160 diff(unsigned long seq1, unsigned long seq2)
161 {
162 long d;
163
164 d = seq1 - seq2;
165 if (d > 0) return(d);
166
167
168 return(~d+1);
169 }
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186 static int tcp_select_window(struct sock *sk)
187 {
188 int new_window = sk->prot->rspace(sk);
189
190
191
192
193
194
195
196
197
198
199 if (new_window < min(sk->mss, MAX_WINDOW/2) ||
200 new_window < sk->window)
201 return(sk->window);
202 return(new_window);
203 }
204
205
206
207 static void tcp_time_wait(struct sock *sk)
208 {
209 sk->state = TCP_TIME_WAIT;
210 sk->shutdown = SHUTDOWN_MASK;
211 if (!sk->dead)
212 sk->state_change(sk);
213 reset_timer(sk, TIME_CLOSE, TCP_TIMEWAIT_LEN);
214 }
215
216
217
218
219
220
221
222
223 static void
224 tcp_retransmit(struct sock *sk, int all)
225 {
226 if (all) {
227 ip_retransmit(sk, all);
228 return;
229 }
230
231 sk->ssthresh = sk->cong_window >> 1;
232
233 sk->cong_count = 0;
234
235 sk->cong_window = 1;
236
237
238 ip_retransmit(sk, all);
239 }
240
241
242
243
244
245
246
247
248
249
250 void
251 tcp_err(int err, unsigned char *header, unsigned long daddr,
252 unsigned long saddr, struct inet_protocol *protocol)
253 {
254 struct tcphdr *th;
255 struct sock *sk;
256 struct iphdr *iph=(struct iphdr *)header;
257
258 header+=4*iph->ihl;
259
260 DPRINTF((DBG_TCP, "TCP: tcp_err(%d, hdr=%X, daddr=%X saddr=%X, protocol=%X)\n",
261 err, header, daddr, saddr, protocol));
262
263 th =(struct tcphdr *)header;
264 sk = get_sock(&tcp_prot, th->source, daddr, th->dest, saddr);
265 print_th(th);
266
267 if (sk == NULL) return;
268
269 if(err<0)
270 {
271 sk->err = -err;
272 sk->error_report(sk);
273 return;
274 }
275
276 if ((err & 0xff00) == (ICMP_SOURCE_QUENCH << 8)) {
277
278
279
280
281
282 if (sk->cong_window > 4) sk->cong_window--;
283 return;
284 }
285
286 DPRINTF((DBG_TCP, "TCP: icmp_err got error\n"));
287 sk->err = icmp_err_convert[err & 0xff].errno;
288
289
290
291
292
293 if (icmp_err_convert[err & 0xff].fatal) {
294 if (sk->state == TCP_SYN_SENT) {
295 sk->state = TCP_CLOSE;
296 sk->error_report(sk);
297 }
298 }
299 return;
300 }
301
302
303
304
305
306
307
308 static int
309 tcp_readable(struct sock *sk)
310 {
311 unsigned long counted;
312 unsigned long amount;
313 struct sk_buff *skb;
314 int count=0;
315 int sum;
316 unsigned long flags;
317
318 DPRINTF((DBG_TCP, "tcp_readable(sk=%X)\n", sk));
319 if(sk && sk->debug)
320 printk("tcp_readable: %p - ",sk);
321
322 if (sk == NULL || skb_peek(&sk->rqueue) == NULL)
323 {
324 if(sk && sk->debug)
325 printk("empty\n");
326 return(0);
327 }
328
329 counted = sk->copied_seq+1;
330 amount = 0;
331
332 save_flags(flags);
333 cli();
334 skb =(struct sk_buff *)sk->rqueue;
335
336
337 do {
338 count++;
339 #ifdef OLD
340
341 if (count > 20) {
342 restore_flags(flags);
343 DPRINTF((DBG_TCP, "tcp_readable, more than 20 packets without a psh\n"));
344 printk("tcp_read: possible read_queue corruption.\n");
345 return(amount);
346 }
347 #endif
348 if (before(counted, skb->h.th->seq))
349 break;
350 sum = skb->len -(counted - skb->h.th->seq);
351 if (skb->h.th->syn)
352 sum++;
353 if (sum >= 0) {
354 amount += sum;
355 if (skb->h.th->syn) amount--;
356 counted += sum;
357 }
358 if (amount && skb->h.th->psh) break;
359 skb =(struct sk_buff *)skb->next;
360 } while(skb != sk->rqueue);
361 if (amount && !sk->urginline && sk->urg_data &&
362 (sk->urg_seq - sk->copied_seq) <= (counted - sk->copied_seq))
363 amount--;
364 restore_flags(flags);
365 DPRINTF((DBG_TCP, "tcp readable returning %d bytes\n", amount));
366 if(sk->debug)
367 printk("got %lu bytes.\n",amount);
368 return(amount);
369 }
370
371
372
373
374
375
376
377 static int
378 tcp_select(struct sock *sk, int sel_type, select_table *wait)
379 {
380 DPRINTF((DBG_TCP, "tcp_select(sk=%X, sel_type = %d, wait = %X)\n",
381 sk, sel_type, wait));
382
383 sk->inuse = 1;
384 switch(sel_type) {
385 case SEL_IN:
386 if(sk->debug)
387 printk("select in");
388 select_wait(sk->sleep, wait);
389 if(sk->debug)
390 printk("-select out");
391 if (skb_peek(&sk->rqueue) != NULL) {
392 if (sk->state == TCP_LISTEN || tcp_readable(sk)) {
393 release_sock(sk);
394 if(sk->debug)
395 printk("-select ok data\n");
396 return(1);
397 }
398 }
399 if (sk->err != 0)
400 {
401 release_sock(sk);
402 if(sk->debug)
403 printk("-select ok error");
404 return(1);
405 }
406 if (sk->shutdown & RCV_SHUTDOWN) {
407 release_sock(sk);
408 if(sk->debug)
409 printk("-select ok down\n");
410 return(1);
411 } else {
412 release_sock(sk);
413 if(sk->debug)
414 printk("-select fail\n");
415 return(0);
416 }
417 case SEL_OUT:
418 select_wait(sk->sleep, wait);
419 if (sk->shutdown & SEND_SHUTDOWN) {
420 DPRINTF((DBG_TCP,
421 "write select on shutdown socket.\n"));
422
423
424 release_sock(sk);
425 return(0);
426 }
427
428
429
430
431
432
433 if (sk->prot->wspace(sk) >= sk->mss) {
434 release_sock(sk);
435
436 if (sk->state == TCP_SYN_RECV ||
437 sk->state == TCP_SYN_SENT) return(0);
438 return(1);
439 }
440 DPRINTF((DBG_TCP,
441 "tcp_select: sleeping on write sk->wmem_alloc = %d, "
442 "sk->packets_out = %d\n"
443 "sk->wback = %X, sk->wfront = %X\n"
444 "sk->write_seq = %u, sk->window_seq=%u\n",
445 sk->wmem_alloc, sk->packets_out,
446 sk->wback, sk->wfront,
447 sk->write_seq, sk->window_seq));
448
449 release_sock(sk);
450 return(0);
451 case SEL_EX:
452 select_wait(sk->sleep,wait);
453 if (sk->err || sk->urg_data) {
454 release_sock(sk);
455 return(1);
456 }
457 release_sock(sk);
458 return(0);
459 }
460
461 release_sock(sk);
462 return(0);
463 }
464
465
466 int
467 tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
468 {
469 int err;
470 DPRINTF((DBG_TCP, "tcp_ioctl(sk=%X, cmd = %d, arg=%X)\n", sk, cmd, arg));
471 switch(cmd) {
472 case DDIOCSDBG:
473 return(dbg_ioctl((void *) arg, DBG_TCP));
474
475 case TIOCINQ:
476 #ifdef FIXME
477 case FIONREAD:
478 #endif
479 {
480 unsigned long amount;
481
482 if (sk->state == TCP_LISTEN) return(-EINVAL);
483
484 sk->inuse = 1;
485 amount = tcp_readable(sk);
486 release_sock(sk);
487 DPRINTF((DBG_TCP, "returning %d\n", amount));
488 err=verify_area(VERIFY_WRITE,(void *)arg,
489 sizeof(unsigned long));
490 if(err)
491 return err;
492 put_fs_long(amount,(unsigned long *)arg);
493 return(0);
494 }
495 case SIOCATMARK:
496 {
497 int answ = sk->urg_data && sk->urg_seq == sk->copied_seq+1;
498
499 err = verify_area(VERIFY_WRITE,(void *) arg,
500 sizeof(unsigned long));
501 if (err)
502 return err;
503 put_fs_long(answ,(int *) arg);
504 return(0);
505 }
506 case TIOCOUTQ:
507 {
508 unsigned long amount;
509
510 if (sk->state == TCP_LISTEN) return(-EINVAL);
511 amount = sk->prot->wspace(sk);
512 err=verify_area(VERIFY_WRITE,(void *)arg,
513 sizeof(unsigned long));
514 if(err)
515 return err;
516 put_fs_long(amount,(unsigned long *)arg);
517 return(0);
518 }
519 default:
520 return(-EINVAL);
521 }
522 }
523
524
525
526 unsigned short
527 tcp_check(struct tcphdr *th, int len,
528 unsigned long saddr, unsigned long daddr)
529 {
530 unsigned long sum;
531
532 if (saddr == 0) saddr = my_addr();
533 print_th(th);
534 __asm__("\t addl %%ecx,%%ebx\n"
535 "\t adcl %%edx,%%ebx\n"
536 "\t adcl $0, %%ebx\n"
537 : "=b"(sum)
538 : "0"(daddr), "c"(saddr), "d"((ntohs(len) << 16) + IPPROTO_TCP*256)
539 : "cx","bx","dx" );
540
541 if (len > 3) {
542 __asm__("\tclc\n"
543 "1:\n"
544 "\t lodsl\n"
545 "\t adcl %%eax, %%ebx\n"
546 "\t loop 1b\n"
547 "\t adcl $0, %%ebx\n"
548 : "=b"(sum) , "=S"(th)
549 : "0"(sum), "c"(len/4) ,"1"(th)
550 : "ax", "cx", "bx", "si" );
551 }
552
553
554 __asm__("\t movl %%ebx, %%ecx\n"
555 "\t shrl $16,%%ecx\n"
556 "\t addw %%cx, %%bx\n"
557 "\t adcw $0, %%bx\n"
558 : "=b"(sum)
559 : "0"(sum)
560 : "bx", "cx");
561
562
563 if ((len & 2) != 0) {
564 __asm__("\t lodsw\n"
565 "\t addw %%ax,%%bx\n"
566 "\t adcw $0, %%bx\n"
567 : "=b"(sum), "=S"(th)
568 : "0"(sum) ,"1"(th)
569 : "si", "ax", "bx");
570 }
571
572
573 if ((len & 1) != 0) {
574 __asm__("\t lodsb\n"
575 "\t movb $0,%%ah\n"
576 "\t addw %%ax,%%bx\n"
577 "\t adcw $0, %%bx\n"
578 : "=b"(sum)
579 : "0"(sum) ,"S"(th)
580 : "si", "ax", "bx");
581 }
582
583
584 return((~sum) & 0xffff);
585 }
586
587
588 void tcp_send_check(struct tcphdr *th, unsigned long saddr,
589 unsigned long daddr, int len, struct sock *sk)
590 {
591 th->check = 0;
592 th->check = tcp_check(th, len, saddr, daddr);
593 return;
594 }
595
596 static void tcp_send_skb(struct sock *sk, struct sk_buff *skb)
597 {
598 int size;
599 struct tcphdr * th = skb->h.th;
600
601
602 size = skb->len - ((unsigned char *) th - skb->data);
603
604
605 if (size < sizeof(struct tcphdr) || size > skb->len) {
606 printk("tcp_send_skb: bad skb (skb = %p, data = %p, th = %p, len = %lu)\n",
607 skb, skb->data, th, skb->len);
608 kfree_skb(skb, FREE_WRITE);
609 return;
610 }
611
612
613 if (size == sizeof(struct tcphdr)) {
614
615 if(!th->syn && !th->fin) {
616 printk("tcp_send_skb: attempt to queue a bogon.\n");
617 kfree_skb(skb,FREE_WRITE);
618 return;
619 }
620 }
621
622
623 tcp_send_check(th, sk->saddr, sk->daddr, size, sk);
624
625 skb->h.seq = ntohl(th->seq) + size - 4*th->doff;
626 if (after(skb->h.seq, sk->window_seq) ||
627 (sk->retransmits && sk->timeout == TIME_WRITE) ||
628 sk->packets_out >= sk->cong_window) {
629 DPRINTF((DBG_TCP, "sk->cong_window = %d, sk->packets_out = %d\n",
630 sk->cong_window, sk->packets_out));
631 DPRINTF((DBG_TCP, "sk->write_seq = %d, sk->window_seq = %d\n",
632 sk->write_seq, sk->window_seq));
633 skb->next = NULL;
634 skb->magic = TCP_WRITE_QUEUE_MAGIC;
635 if (sk->wback == NULL) {
636 sk->wfront = skb;
637 } else {
638 sk->wback->next = skb;
639 }
640 sk->wback = skb;
641 if (before(sk->window_seq, sk->wfront->h.seq) &&
642 sk->send_head == NULL &&
643 sk->ack_backlog == 0)
644 reset_timer(sk, TIME_PROBE0, sk->rto);
645 } else {
646 sk->sent_seq = sk->write_seq;
647 sk->prot->queue_xmit(sk, skb->dev, skb, 0);
648 }
649 }
650
651 struct sk_buff * tcp_dequeue_partial(struct sock * sk)
652 {
653 struct sk_buff * skb;
654 unsigned long flags;
655
656 save_flags(flags);
657 cli();
658 skb = sk->partial;
659 if (skb) {
660 sk->partial = NULL;
661 del_timer(&sk->partial_timer);
662 }
663 restore_flags(flags);
664 return skb;
665 }
666
667 static void tcp_send_partial(struct sock *sk)
668 {
669 struct sk_buff *skb;
670
671 if (sk == NULL)
672 return;
673 while ((skb = tcp_dequeue_partial(sk)) != NULL)
674 tcp_send_skb(sk, skb);
675 }
676
677 void tcp_enqueue_partial(struct sk_buff * skb, struct sock * sk)
678 {
679 struct sk_buff * tmp;
680 unsigned long flags;
681
682 save_flags(flags);
683 cli();
684 tmp = sk->partial;
685 if (tmp)
686 del_timer(&sk->partial_timer);
687 sk->partial = skb;
688 sk->partial_timer.expires = HZ;
689 sk->partial_timer.function = (void (*)(unsigned long)) tcp_send_partial;
690 sk->partial_timer.data = (unsigned long) sk;
691 add_timer(&sk->partial_timer);
692 restore_flags(flags);
693 if (tmp)
694 tcp_send_skb(sk, tmp);
695 }
696
697
698
699 static void
700 tcp_send_ack(unsigned long sequence, unsigned long ack,
701 struct sock *sk,
702 struct tcphdr *th, unsigned long daddr)
703 {
704 struct sk_buff *buff;
705 struct tcphdr *t1;
706 struct device *dev = NULL;
707 int tmp;
708
709 if(sk->zapped)
710 return;
711
712
713
714
715 buff = sk->prot->wmalloc(sk, MAX_ACK_SIZE, 1, GFP_ATOMIC);
716 if (buff == NULL) {
717
718 sk->ack_backlog++;
719 if (sk->timeout != TIME_WRITE && tcp_connected(sk->state)) {
720 reset_timer(sk, TIME_WRITE, 10);
721 }
722 if (inet_debug == DBG_SLIP) printk("\rtcp_ack: malloc failed\n");
723 return;
724 }
725
726 buff->mem_addr = buff;
727 buff->mem_len = MAX_ACK_SIZE;
728 buff->len = sizeof(struct tcphdr);
729 buff->sk = sk;
730 t1 =(struct tcphdr *) buff->data;
731
732
733 tmp = sk->prot->build_header(buff, sk->saddr, daddr, &dev,
734 IPPROTO_TCP, sk->opt, MAX_ACK_SIZE,sk->ip_tos,sk->ip_ttl);
735 if (tmp < 0) {
736 buff->free=1;
737 sk->prot->wfree(sk, buff->mem_addr, buff->mem_len);
738 if (inet_debug == DBG_SLIP) printk("\rtcp_ack: build_header failed\n");
739 return;
740 }
741 buff->len += tmp;
742 t1 =(struct tcphdr *)((char *)t1 +tmp);
743
744
745 memcpy(t1, th, sizeof(*t1));
746
747
748 t1->dest = th->source;
749 t1->source = th->dest;
750 t1->seq = ntohl(sequence);
751 t1->ack = 1;
752 sk->window = tcp_select_window(sk);
753 t1->window = ntohs(sk->window);
754 t1->res1 = 0;
755 t1->res2 = 0;
756 t1->rst = 0;
757 t1->urg = 0;
758 t1->syn = 0;
759 t1->psh = 0;
760 t1->fin = 0;
761 if (ack == sk->acked_seq) {
762 sk->ack_backlog = 0;
763 sk->bytes_rcv = 0;
764 sk->ack_timed = 0;
765 if (sk->send_head == NULL && sk->wfront == NULL && sk->timeout == TIME_WRITE)
766 {
767 if(sk->keepopen)
768 reset_timer(sk,TIME_KEEPOPEN,TCP_TIMEOUT_LEN);
769 else
770 delete_timer(sk);
771 }
772 }
773 t1->ack_seq = ntohl(ack);
774 t1->doff = sizeof(*t1)/4;
775 tcp_send_check(t1, sk->saddr, daddr, sizeof(*t1), sk);
776 if (sk->debug)
777 printk("\rtcp_ack: seq %lx ack %lx\n", sequence, ack);
778 sk->prot->queue_xmit(sk, dev, buff, 1);
779 }
780
781
782
783 static int
784 tcp_build_header(struct tcphdr *th, struct sock *sk, int push)
785 {
786
787
788 memcpy(th,(void *) &(sk->dummy_th), sizeof(*th));
789 th->seq = htonl(sk->write_seq);
790 th->psh =(push == 0) ? 1 : 0;
791 th->doff = sizeof(*th)/4;
792 th->ack = 1;
793 th->fin = 0;
794 sk->ack_backlog = 0;
795 sk->bytes_rcv = 0;
796 sk->ack_timed = 0;
797 th->ack_seq = htonl(sk->acked_seq);
798 sk->window = tcp_select_window(sk);
799 th->window = htons(sk->window);
800
801 return(sizeof(*th));
802 }
803
804
805
806
807
808 static int
809 tcp_write(struct sock *sk, unsigned char *from,
810 int len, int nonblock, unsigned flags)
811 {
812 int copied = 0;
813 int copy;
814 int tmp;
815 struct sk_buff *skb;
816 struct sk_buff *send_tmp;
817 unsigned char *buff;
818 struct proto *prot;
819 struct device *dev = NULL;
820
821 DPRINTF((DBG_TCP, "tcp_write(sk=%X, from=%X, len=%d, nonblock=%d, flags=%X)\n",
822 sk, from, len, nonblock, flags));
823
824 sk->inuse=1;
825 prot = sk->prot;
826 while(len > 0) {
827 if (sk->err) {
828 release_sock(sk);
829 if (copied) return(copied);
830 tmp = -sk->err;
831 sk->err = 0;
832 return(tmp);
833 }
834
835
836 if (sk->shutdown & SEND_SHUTDOWN) {
837 release_sock(sk);
838 sk->err = EPIPE;
839 if (copied) return(copied);
840 sk->err = 0;
841 return(-EPIPE);
842 }
843
844
845
846
847 while(sk->state != TCP_ESTABLISHED && sk->state != TCP_CLOSE_WAIT) {
848 if (sk->err) {
849 release_sock(sk);
850 if (copied) return(copied);
851 tmp = -sk->err;
852 sk->err = 0;
853 return(tmp);
854 }
855
856 if (sk->state != TCP_SYN_SENT && sk->state != TCP_SYN_RECV) {
857 release_sock(sk);
858 DPRINTF((DBG_TCP, "tcp_write: return 1\n"));
859 if (copied) return(copied);
860
861 if (sk->err) {
862 tmp = -sk->err;
863 sk->err = 0;
864 return(tmp);
865 }
866
867 if (sk->keepopen) {
868 send_sig(SIGPIPE, current, 0);
869 }
870 return(-EPIPE);
871 }
872
873 if (nonblock || copied) {
874 release_sock(sk);
875 DPRINTF((DBG_TCP, "tcp_write: return 2\n"));
876 if (copied) return(copied);
877 return(-EAGAIN);
878 }
879
880 release_sock(sk);
881 cli();
882 if (sk->state != TCP_ESTABLISHED &&
883 sk->state != TCP_CLOSE_WAIT && sk->err == 0) {
884 interruptible_sleep_on(sk->sleep);
885 if (current->signal & ~current->blocked) {
886 sti();
887 DPRINTF((DBG_TCP, "tcp_write: return 3\n"));
888 if (copied) return(copied);
889 return(-ERESTARTSYS);
890 }
891 }
892 sk->inuse = 1;
893 sti();
894 }
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909 if ((skb = tcp_dequeue_partial(sk)) != NULL) {
910 int hdrlen;
911
912
913 hdrlen = ((unsigned long)skb->h.th - (unsigned long)skb->data)
914 + sizeof(struct tcphdr);
915
916
917 if (!(flags & MSG_OOB)) {
918 copy = min(sk->mss - (skb->len - hdrlen), len);
919
920 if (copy <= 0) {
921 printk("TCP: **bug**: \"copy\" <= 0!!\n");
922 copy = 0;
923 }
924
925 memcpy_fromfs(skb->data + skb->len, from, copy);
926 skb->len += copy;
927 from += copy;
928 copied += copy;
929 len -= copy;
930 sk->write_seq += copy;
931 }
932 if ((skb->len - hdrlen) >= sk->mss ||
933 (flags & MSG_OOB) ||
934 !sk->packets_out)
935 tcp_send_skb(sk, skb);
936 else
937 tcp_enqueue_partial(skb, sk);
938 continue;
939 }
940
941
942
943
944
945
946
947
948
949
950
951
952
953 copy = sk->window_seq - sk->write_seq;
954 if (copy <= 0 || copy < (sk->max_window >> 1) || copy > sk->mss)
955 copy = sk->mss;
956 if (copy > len)
957 copy = len;
958
959
960 send_tmp = NULL;
961 if (copy < sk->mss && !(flags & MSG_OOB)) {
962
963 release_sock(sk);
964
965
966 skb = prot->wmalloc(sk, sk->mtu + 128 + prot->max_header + sizeof(*skb), 0, GFP_KERNEL);
967 sk->inuse = 1;
968 send_tmp = skb;
969 } else {
970
971 release_sock(sk);
972 skb = prot->wmalloc(sk, copy + prot->max_header + sizeof(*skb), 0, GFP_KERNEL);
973 sk->inuse = 1;
974 }
975
976
977 if (skb == NULL) {
978 if (nonblock ) {
979 release_sock(sk);
980 DPRINTF((DBG_TCP, "tcp_write: return 4\n"));
981 if (copied) return(copied);
982 return(-EAGAIN);
983 }
984
985
986 tmp = sk->wmem_alloc;
987 release_sock(sk);
988 cli();
989
990 if (tmp <= sk->wmem_alloc &&
991 (sk->state == TCP_ESTABLISHED||sk->state == TCP_CLOSE_WAIT)
992 && sk->err == 0) {
993 interruptible_sleep_on(sk->sleep);
994 if (current->signal & ~current->blocked) {
995 sti();
996 DPRINTF((DBG_TCP, "tcp_write: return 5\n"));
997 if (copied) return(copied);
998 return(-ERESTARTSYS);
999 }
1000 }
1001 sk->inuse = 1;
1002 sti();
1003 continue;
1004 }
1005
1006 skb->len = 0;
1007 skb->sk = sk;
1008 skb->free = 0;
1009
1010 buff = skb->data;
1011
1012
1013
1014
1015
1016 tmp = prot->build_header(skb, sk->saddr, sk->daddr, &dev,
1017 IPPROTO_TCP, sk->opt, skb->mem_len,sk->ip_tos,sk->ip_ttl);
1018 if (tmp < 0 ) {
1019 prot->wfree(sk, skb->mem_addr, skb->mem_len);
1020 release_sock(sk);
1021 DPRINTF((DBG_TCP, "tcp_write: return 6\n"));
1022 if (copied) return(copied);
1023 return(tmp);
1024 }
1025 skb->len += tmp;
1026 skb->dev = dev;
1027 buff += tmp;
1028 skb->h.th =(struct tcphdr *) buff;
1029 tmp = tcp_build_header((struct tcphdr *)buff, sk, len-copy);
1030 if (tmp < 0) {
1031 prot->wfree(sk, skb->mem_addr, skb->mem_len);
1032 release_sock(sk);
1033 DPRINTF((DBG_TCP, "tcp_write: return 7\n"));
1034 if (copied) return(copied);
1035 return(tmp);
1036 }
1037
1038 if (flags & MSG_OOB) {
1039 ((struct tcphdr *)buff)->urg = 1;
1040 ((struct tcphdr *)buff)->urg_ptr = ntohs(copy);
1041 }
1042 skb->len += tmp;
1043 memcpy_fromfs(buff+tmp, from, copy);
1044
1045 from += copy;
1046 copied += copy;
1047 len -= copy;
1048 skb->len += copy;
1049 skb->free = 0;
1050 sk->write_seq += copy;
1051
1052 if (send_tmp != NULL && sk->packets_out) {
1053 tcp_enqueue_partial(send_tmp, sk);
1054 continue;
1055 }
1056 tcp_send_skb(sk, skb);
1057 }
1058 sk->err = 0;
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068 if(sk->partial &&
1069 ((!sk->packets_out)
1070
1071 || (sk->nonagle && before(sk->write_seq , sk->window_seq))
1072 ))
1073 tcp_send_partial(sk);
1074
1075 release_sock(sk);
1076 DPRINTF((DBG_TCP, "tcp_write: return 8\n"));
1077 return(copied);
1078 }
1079
1080
1081 static int
1082 tcp_sendto(struct sock *sk, unsigned char *from,
1083 int len, int nonblock, unsigned flags,
1084 struct sockaddr_in *addr, int addr_len)
1085 {
1086 struct sockaddr_in sin;
1087
1088 if (addr_len < sizeof(sin)) return(-EINVAL);
1089 memcpy_fromfs(&sin, addr, sizeof(sin));
1090 if (sin.sin_family && sin.sin_family != AF_INET) return(-EINVAL);
1091 if (sin.sin_port != sk->dummy_th.dest) return(-EINVAL);
1092 if (sin.sin_addr.s_addr != sk->daddr) return(-EINVAL);
1093 return(tcp_write(sk, from, len, nonblock, flags));
1094 }
1095
1096
1097 static void
1098 tcp_read_wakeup(struct sock *sk)
1099 {
1100 int tmp;
1101 struct device *dev = NULL;
1102 struct tcphdr *t1;
1103 struct sk_buff *buff;
1104
1105 DPRINTF((DBG_TCP, "in tcp read wakeup\n"));
1106 if (!sk->ack_backlog) return;
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118 buff = sk->prot->wmalloc(sk,MAX_ACK_SIZE,1, GFP_ATOMIC);
1119 if (buff == NULL) {
1120
1121 reset_timer(sk, TIME_WRITE, 10);
1122 return;
1123 }
1124
1125 buff->mem_addr = buff;
1126 buff->mem_len = MAX_ACK_SIZE;
1127 buff->len = sizeof(struct tcphdr);
1128 buff->sk = sk;
1129
1130
1131 tmp = sk->prot->build_header(buff, sk->saddr, sk->daddr, &dev,
1132 IPPROTO_TCP, sk->opt, MAX_ACK_SIZE,sk->ip_tos,sk->ip_ttl);
1133 if (tmp < 0) {
1134 buff->free=1;
1135 sk->prot->wfree(sk, buff->mem_addr, buff->mem_len);
1136 return;
1137 }
1138
1139 buff->len += tmp;
1140 t1 =(struct tcphdr *)(buff->data +tmp);
1141
1142 memcpy(t1,(void *) &sk->dummy_th, sizeof(*t1));
1143 t1->seq = htonl(sk->sent_seq);
1144 t1->ack = 1;
1145 t1->res1 = 0;
1146 t1->res2 = 0;
1147 t1->rst = 0;
1148 t1->urg = 0;
1149 t1->syn = 0;
1150 t1->psh = 0;
1151 sk->ack_backlog = 0;
1152 sk->bytes_rcv = 0;
1153 sk->window = tcp_select_window(sk);
1154 t1->window = ntohs(sk->window);
1155 t1->ack_seq = ntohl(sk->acked_seq);
1156 t1->doff = sizeof(*t1)/4;
1157 tcp_send_check(t1, sk->saddr, sk->daddr, sizeof(*t1), sk);
1158 sk->prot->queue_xmit(sk, dev, buff, 1);
1159 }
1160
1161
1162
1163
1164
1165
1166
1167
1168 static void
1169 cleanup_rbuf(struct sock *sk)
1170 {
1171 unsigned long flags;
1172 int left;
1173 struct sk_buff *skb;
1174
1175 if(sk->debug)
1176 printk("cleaning rbuf for sk=%p\n", sk);
1177
1178 save_flags(flags);
1179 cli();
1180
1181 left = sk->prot->rspace(sk);
1182
1183
1184
1185
1186
1187 while((skb=skb_peek(&sk->rqueue)) != NULL )
1188 {
1189 if (!skb->used)
1190 break;
1191 skb_unlink(skb);
1192 skb->sk = sk;
1193 kfree_skb(skb, FREE_READ);
1194 }
1195
1196 restore_flags(flags);
1197
1198
1199
1200
1201
1202
1203
1204 DPRINTF((DBG_TCP, "sk->window left = %d, sk->prot->rspace(sk)=%d\n",
1205 sk->window - sk->bytes_rcv, sk->prot->rspace(sk)));
1206
1207 if(sk->debug)
1208 printk("sk->rspace = %lu, was %d\n", sk->prot->rspace(sk),
1209 left);
1210 if (sk->prot->rspace(sk) != left)
1211 {
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222 sk->ack_backlog++;
1223
1224
1225
1226
1227
1228
1229
1230
1231 if ((sk->prot->rspace(sk) > (sk->window - sk->bytes_rcv + sk->mtu))) {
1232
1233 tcp_read_wakeup(sk);
1234 } else {
1235
1236 int was_active = del_timer(&sk->timer);
1237 if (!was_active || TCP_ACK_TIME < sk->timer.expires) {
1238 reset_timer(sk, TIME_WRITE, TCP_ACK_TIME);
1239 } else
1240 add_timer(&sk->timer);
1241 }
1242 }
1243 }
1244
1245
1246
1247 static int
1248 tcp_read_urg(struct sock * sk, int nonblock,
1249 unsigned char *to, int len, unsigned flags)
1250 {
1251 struct wait_queue wait = { current, NULL };
1252
1253 while (len > 0) {
1254 if (sk->urginline || !sk->urg_data || sk->urg_data == URG_READ)
1255 return -EINVAL;
1256 if (sk->urg_data & URG_VALID) {
1257 char c = sk->urg_data;
1258 if (!(flags & MSG_PEEK))
1259 sk->urg_data = URG_READ;
1260 put_fs_byte(c, to);
1261 return 1;
1262 }
1263
1264 if (sk->err) {
1265 int tmp = -sk->err;
1266 sk->err = 0;
1267 return tmp;
1268 }
1269
1270 if (sk->state == TCP_CLOSE || sk->done) {
1271 if (!sk->done) {
1272 sk->done = 1;
1273 return 0;
1274 }
1275 return -ENOTCONN;
1276 }
1277
1278 if (sk->shutdown & RCV_SHUTDOWN) {
1279 sk->done = 1;
1280 return 0;
1281 }
1282
1283 if (nonblock)
1284 return -EAGAIN;
1285
1286 if (current->signal & ~current->blocked)
1287 return -ERESTARTSYS;
1288
1289 current->state = TASK_INTERRUPTIBLE;
1290 add_wait_queue(sk->sleep, &wait);
1291 if ((sk->urg_data & URG_NOTYET) && sk->err == 0 &&
1292 !(sk->shutdown & RCV_SHUTDOWN))
1293 schedule();
1294 remove_wait_queue(sk->sleep, &wait);
1295 current->state = TASK_RUNNING;
1296 }
1297 return 0;
1298 }
1299
1300
1301
1302 static int tcp_read(struct sock *sk, unsigned char *to,
1303 int len, int nonblock, unsigned flags)
1304 {
1305 struct wait_queue wait = { current, NULL };
1306 int copied = 0;
1307 unsigned long peek_seq;
1308 unsigned long *seq;
1309 unsigned long used;
1310 int err;
1311
1312 if (len == 0)
1313 return 0;
1314
1315 if (len < 0)
1316 return -EINVAL;
1317
1318 err = verify_area(VERIFY_WRITE, to, len);
1319 if (err)
1320 return err;
1321
1322
1323 if (sk->state == TCP_LISTEN)
1324 return -ENOTCONN;
1325
1326
1327 if (flags & MSG_OOB)
1328 return tcp_read_urg(sk, nonblock, to, len, flags);
1329
1330 peek_seq = sk->copied_seq;
1331 seq = &sk->copied_seq;
1332 if (flags & MSG_PEEK)
1333 seq = &peek_seq;
1334
1335 add_wait_queue(sk->sleep, &wait);
1336 sk->inuse = 1;
1337 while (len > 0) {
1338 struct sk_buff * skb;
1339 unsigned long offset;
1340
1341
1342
1343
1344 if (copied && sk->urg_data && sk->urg_seq == 1+*seq)
1345 break;
1346
1347 current->state = TASK_INTERRUPTIBLE;
1348
1349 skb = sk->rqueue;
1350 do {
1351 if (!skb)
1352 break;
1353 if (before(1+*seq, skb->h.th->seq))
1354 break;
1355 offset = 1 + *seq - skb->h.th->seq;
1356 if (skb->h.th->syn)
1357 offset--;
1358 if (offset < skb->len)
1359 goto found_ok_skb;
1360 if (!(flags & MSG_PEEK))
1361 skb->used = 1;
1362 skb = (struct sk_buff *)skb->next;
1363 } while (skb != sk->rqueue);
1364
1365 if (copied)
1366 break;
1367
1368 if (sk->err) {
1369 copied = -sk->err;
1370 sk->err = 0;
1371 break;
1372 }
1373
1374 if (sk->state == TCP_CLOSE) {
1375 if (!sk->done) {
1376 sk->done = 1;
1377 break;
1378 }
1379 copied = -ENOTCONN;
1380 break;
1381 }
1382
1383 if (sk->shutdown & RCV_SHUTDOWN) {
1384 sk->done = 1;
1385 break;
1386 }
1387
1388 if (nonblock) {
1389 copied = -EAGAIN;
1390 break;
1391 }
1392
1393 cleanup_rbuf(sk);
1394 release_sock(sk);
1395 schedule();
1396 sk->inuse = 1;
1397
1398 if (current->signal & ~current->blocked) {
1399 copied = -ERESTARTSYS;
1400 break;
1401 }
1402 continue;
1403
1404 found_ok_skb:
1405
1406 used = skb->len - offset;
1407 if (len < used)
1408 used = len;
1409
1410 if (sk->urg_data) {
1411 unsigned long urg_offset = sk->urg_seq - (1 + *seq);
1412 if (urg_offset < used) {
1413 if (!urg_offset) {
1414 if (!sk->urginline) {
1415 ++*seq;
1416 offset++;
1417 used--;
1418 }
1419 } else
1420 used = urg_offset;
1421 }
1422 }
1423
1424 memcpy_tofs(to,((unsigned char *)skb->h.th) +
1425 skb->h.th->doff*4 + offset, used);
1426 copied += used;
1427 len -= used;
1428 to += used;
1429 *seq += used;
1430 if (after(sk->copied_seq+1,sk->urg_seq))
1431 sk->urg_data = 0;
1432 if (!(flags & MSG_PEEK) && (used + offset >= skb->len))
1433 skb->used = 1;
1434 }
1435 remove_wait_queue(sk->sleep, &wait);
1436 current->state = TASK_RUNNING;
1437
1438
1439 cleanup_rbuf(sk);
1440 release_sock(sk);
1441 DPRINTF((DBG_TCP, "tcp_read: returning %d\n", copied));
1442 return copied;
1443 }
1444
1445
1446
1447
1448
1449
1450 void
1451 tcp_shutdown(struct sock *sk, int how)
1452 {
1453 struct sk_buff *buff;
1454 struct tcphdr *t1, *th;
1455 struct proto *prot;
1456 int tmp;
1457 struct device *dev = NULL;
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467 if (sk->state == TCP_FIN_WAIT1 || sk->state == TCP_FIN_WAIT2) return;
1468 if (!(how & SEND_SHUTDOWN)) return;
1469 sk->inuse = 1;
1470
1471
1472 if (sk->partial)
1473 tcp_send_partial(sk);
1474
1475 prot =(struct proto *)sk->prot;
1476 th =(struct tcphdr *)&sk->dummy_th;
1477 release_sock(sk);
1478 buff = prot->wmalloc(sk, MAX_RESET_SIZE,1 , GFP_KERNEL);
1479 if (buff == NULL) return;
1480 sk->inuse = 1;
1481
1482 DPRINTF((DBG_TCP, "tcp_shutdown_send buff = %X\n", buff));
1483 buff->mem_addr = buff;
1484 buff->mem_len = MAX_RESET_SIZE;
1485 buff->sk = sk;
1486 buff->len = sizeof(*t1);
1487 t1 =(struct tcphdr *) buff->data;
1488
1489
1490 tmp = prot->build_header(buff,sk->saddr, sk->daddr, &dev,
1491 IPPROTO_TCP, sk->opt,
1492 sizeof(struct tcphdr),sk->ip_tos,sk->ip_ttl);
1493 if (tmp < 0) {
1494
1495 buff->free=1;
1496 prot->wfree(sk,buff->mem_addr, buff->mem_len);
1497 if(sk->state==TCP_ESTABLISHED)
1498 sk->state=TCP_FIN_WAIT1;
1499 else
1500 sk->state=TCP_FIN_WAIT2;
1501 release_sock(sk);
1502 DPRINTF((DBG_TCP, "Unable to build header for fin.\n"));
1503 return;
1504 }
1505
1506 t1 =(struct tcphdr *)((char *)t1 +tmp);
1507 buff->len += tmp;
1508 buff->dev = dev;
1509 memcpy(t1, th, sizeof(*t1));
1510 t1->seq = ntohl(sk->write_seq);
1511 sk->write_seq++;
1512 buff->h.seq = sk->write_seq;
1513 t1->ack = 1;
1514 t1->ack_seq = ntohl(sk->acked_seq);
1515 t1->window = ntohs(sk->window=tcp_select_window(sk));
1516 t1->fin = 1;
1517 t1->rst = 0;
1518 t1->doff = sizeof(*t1)/4;
1519 tcp_send_check(t1, sk->saddr, sk->daddr, sizeof(*t1), sk);
1520
1521
1522
1523
1524
1525 if (sk->wback != NULL) {
1526 buff->free=0;
1527 buff->next = NULL;
1528 sk->wback->next = buff;
1529 sk->wback = buff;
1530 buff->magic = TCP_WRITE_QUEUE_MAGIC;
1531 } else {
1532 sk->sent_seq = sk->write_seq;
1533 sk->prot->queue_xmit(sk, dev, buff, 0);
1534 }
1535
1536 if (sk->state == TCP_ESTABLISHED) sk->state = TCP_FIN_WAIT1;
1537 else sk->state = TCP_FIN_WAIT2;
1538
1539 release_sock(sk);
1540 }
1541
1542
1543 static int
1544 tcp_recvfrom(struct sock *sk, unsigned char *to,
1545 int to_len, int nonblock, unsigned flags,
1546 struct sockaddr_in *addr, int *addr_len)
1547 {
1548 struct sockaddr_in sin;
1549 int len;
1550 int err;
1551 int result;
1552
1553
1554
1555
1556 err = verify_area(VERIFY_WRITE,addr_len,sizeof(long));
1557 if(err)
1558 return err;
1559 len = get_fs_long(addr_len);
1560 if(len > sizeof(sin))
1561 len = sizeof(sin);
1562 err=verify_area(VERIFY_WRITE, addr, len);
1563 if(err)
1564 return err;
1565
1566 result=tcp_read(sk, to, to_len, nonblock, flags);
1567
1568 if (result < 0) return(result);
1569
1570 sin.sin_family = AF_INET;
1571 sin.sin_port = sk->dummy_th.dest;
1572 sin.sin_addr.s_addr = sk->daddr;
1573
1574 memcpy_tofs(addr, &sin, len);
1575 put_fs_long(len, addr_len);
1576 return(result);
1577 }
1578
1579
1580
1581 static void
1582 tcp_reset(unsigned long saddr, unsigned long daddr, struct tcphdr *th,
1583 struct proto *prot, struct options *opt, struct device *dev, int tos, int ttl)
1584 {
1585 struct sk_buff *buff;
1586 struct tcphdr *t1;
1587 int tmp;
1588
1589
1590
1591
1592
1593 buff = prot->wmalloc(NULL, MAX_RESET_SIZE, 1, GFP_ATOMIC);
1594 if (buff == NULL)
1595 return;
1596
1597 DPRINTF((DBG_TCP, "tcp_reset buff = %X\n", buff));
1598 buff->mem_addr = buff;
1599 buff->mem_len = MAX_RESET_SIZE;
1600 buff->len = sizeof(*t1);
1601 buff->sk = NULL;
1602 buff->dev = dev;
1603
1604 t1 =(struct tcphdr *) buff->data;
1605
1606
1607 tmp = prot->build_header(buff, saddr, daddr, &dev, IPPROTO_TCP, opt,
1608 sizeof(struct tcphdr),tos,ttl);
1609 if (tmp < 0) {
1610 buff->free = 1;
1611 prot->wfree(NULL, buff->mem_addr, buff->mem_len);
1612 return;
1613 }
1614 t1 =(struct tcphdr *)((char *)t1 +tmp);
1615 buff->len += tmp;
1616 memcpy(t1, th, sizeof(*t1));
1617
1618
1619 t1->dest = th->source;
1620 t1->source = th->dest;
1621 t1->rst = 1;
1622 t1->window = 0;
1623
1624 if(th->ack)
1625 {
1626 t1->ack = 0;
1627 t1->seq = th->ack_seq;
1628 t1->ack_seq = 0;
1629 }
1630 else
1631 {
1632 t1->ack = 1;
1633 if(!th->syn)
1634 t1->ack_seq=htonl(th->seq);
1635 else
1636 t1->ack_seq=htonl(th->seq+1);
1637 t1->seq=0;
1638 }
1639
1640 t1->syn = 0;
1641 t1->urg = 0;
1642 t1->fin = 0;
1643 t1->psh = 0;
1644 t1->doff = sizeof(*t1)/4;
1645 tcp_send_check(t1, saddr, daddr, sizeof(*t1), NULL);
1646 prot->queue_xmit(NULL, dev, buff, 1);
1647 }
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658 static void
1659 tcp_options(struct sock *sk, struct tcphdr *th)
1660 {
1661 unsigned char *ptr;
1662 int length=(th->doff*4)-sizeof(struct tcphdr);
1663 int mss_seen = 0;
1664
1665 ptr = (unsigned char *)(th + 1);
1666
1667 while(length>0)
1668 {
1669 int opcode=*ptr++;
1670 int opsize=*ptr++;
1671 switch(opcode)
1672 {
1673 case TCPOPT_EOL:
1674 return;
1675 case TCPOPT_NOP:
1676 length-=2;
1677 continue;
1678
1679 default:
1680 if(opsize<=2)
1681 return;
1682 switch(opcode)
1683 {
1684 case TCPOPT_MSS:
1685 if(opsize==4 && th->syn)
1686 {
1687 sk->mtu=min(sk->mtu,ntohs(*(unsigned short *)ptr));
1688 mss_seen = 1;
1689 }
1690 break;
1691
1692 }
1693 ptr+=opsize-2;
1694 length-=opsize;
1695 }
1696 }
1697 if (th->syn) {
1698 if (! mss_seen)
1699 sk->mtu=min(sk->mtu, 536);
1700 }
1701 sk->mss = min(sk->max_window, sk->mtu);
1702 }
1703
1704 static inline unsigned long default_mask(unsigned long dst)
1705 {
1706 dst = ntohl(dst);
1707 if (IN_CLASSA(dst))
1708 return htonl(IN_CLASSA_NET);
1709 if (IN_CLASSB(dst))
1710 return htonl(IN_CLASSB_NET);
1711 return htonl(IN_CLASSC_NET);
1712 }
1713
1714
1715
1716
1717
1718
1719
1720
1721 static void
1722 tcp_conn_request(struct sock *sk, struct sk_buff *skb,
1723 unsigned long daddr, unsigned long saddr,
1724 struct options *opt, struct device *dev)
1725 {
1726 struct sk_buff *buff;
1727 struct tcphdr *t1;
1728 unsigned char *ptr;
1729 struct sock *newsk;
1730 struct tcphdr *th;
1731 int tmp;
1732
1733 DPRINTF((DBG_TCP, "tcp_conn_request(sk = %X, skb = %X, daddr = %X, sadd4= %X, \n"
1734 " opt = %X, dev = %X)\n",
1735 sk, skb, daddr, saddr, opt, dev));
1736
1737 th = skb->h.th;
1738
1739
1740 if (!sk->dead) {
1741 sk->data_ready(sk,0);
1742 } else {
1743 DPRINTF((DBG_TCP, "tcp_conn_request on dead socket\n"));
1744 tcp_reset(daddr, saddr, th, sk->prot, opt, dev, sk->ip_tos,sk->ip_ttl);
1745 kfree_skb(skb, FREE_READ);
1746 return;
1747 }
1748
1749
1750
1751
1752
1753 if (sk->ack_backlog >= sk->max_ack_backlog) {
1754 kfree_skb(skb, FREE_READ);
1755 return;
1756 }
1757
1758
1759
1760
1761
1762
1763
1764
1765 newsk = (struct sock *) kmalloc(sizeof(struct sock), GFP_ATOMIC);
1766 if (newsk == NULL) {
1767
1768 kfree_skb(skb, FREE_READ);
1769 return;
1770 }
1771
1772 DPRINTF((DBG_TCP, "newsk = %X\n", newsk));
1773 memcpy((void *)newsk,(void *)sk, sizeof(*newsk));
1774 newsk->wback = NULL;
1775 newsk->wfront = NULL;
1776 newsk->rqueue = NULL;
1777 newsk->send_head = NULL;
1778 newsk->send_tail = NULL;
1779 newsk->back_log = NULL;
1780 newsk->rtt = TCP_CONNECT_TIME << 3;
1781 newsk->rto = TCP_CONNECT_TIME;
1782 newsk->mdev = 0;
1783 newsk->max_window = 0;
1784 newsk->cong_window = 1;
1785 newsk->cong_count = 0;
1786 newsk->ssthresh = 0;
1787 newsk->backoff = 0;
1788 newsk->blog = 0;
1789 newsk->intr = 0;
1790 newsk->proc = 0;
1791 newsk->done = 0;
1792 newsk->partial = NULL;
1793 newsk->pair = NULL;
1794 newsk->wmem_alloc = 0;
1795 newsk->rmem_alloc = 0;
1796
1797 newsk->max_unacked = MAX_WINDOW - TCP_WINDOW_DIFF;
1798
1799 newsk->err = 0;
1800 newsk->shutdown = 0;
1801 newsk->ack_backlog = 0;
1802 newsk->acked_seq = skb->h.th->seq+1;
1803 newsk->fin_seq = skb->h.th->seq;
1804 newsk->copied_seq = skb->h.th->seq;
1805 newsk->state = TCP_SYN_RECV;
1806 newsk->timeout = 0;
1807 newsk->write_seq = jiffies * SEQ_TICK - seq_offset;
1808 newsk->window_seq = newsk->write_seq;
1809 newsk->rcv_ack_seq = newsk->write_seq;
1810 newsk->urg_data = 0;
1811 newsk->retransmits = 0;
1812 newsk->destroy = 0;
1813 newsk->timer.data = (unsigned long)newsk;
1814 newsk->timer.function = &net_timer;
1815 newsk->dummy_th.source = skb->h.th->dest;
1816 newsk->dummy_th.dest = skb->h.th->source;
1817
1818
1819 newsk->daddr = saddr;
1820 newsk->saddr = daddr;
1821
1822 put_sock(newsk->num,newsk);
1823 newsk->dummy_th.res1 = 0;
1824 newsk->dummy_th.doff = 6;
1825 newsk->dummy_th.fin = 0;
1826 newsk->dummy_th.syn = 0;
1827 newsk->dummy_th.rst = 0;
1828 newsk->dummy_th.psh = 0;
1829 newsk->dummy_th.ack = 0;
1830 newsk->dummy_th.urg = 0;
1831 newsk->dummy_th.res2 = 0;
1832 newsk->acked_seq = skb->h.th->seq + 1;
1833 newsk->copied_seq = skb->h.th->seq;
1834
1835
1836 newsk->ip_ttl=sk->ip_ttl;
1837 newsk->ip_tos=skb->ip_hdr->tos;
1838
1839
1840
1841 if (sk->user_mss)
1842 newsk->mtu = sk->user_mss;
1843 else {
1844 #ifdef SUBNETSARELOCAL
1845 if ((saddr ^ daddr) & default_mask(saddr))
1846 #else
1847 if ((saddr ^ daddr) & dev->pa_mask)
1848 #endif
1849 newsk->mtu = 576 - HEADER_SIZE;
1850 else
1851 newsk->mtu = MAX_WINDOW;
1852 }
1853
1854 newsk->mtu = min(newsk->mtu, dev->mtu - HEADER_SIZE);
1855
1856
1857 tcp_options(newsk,skb->h.th);
1858
1859 buff = newsk->prot->wmalloc(newsk, MAX_SYN_SIZE, 1, GFP_ATOMIC);
1860 if (buff == NULL) {
1861 sk->err = -ENOMEM;
1862 newsk->dead = 1;
1863 release_sock(newsk);
1864 kfree_skb(skb, FREE_READ);
1865 return;
1866 }
1867
1868 buff->mem_addr = buff;
1869 buff->mem_len = MAX_SYN_SIZE;
1870 buff->len = sizeof(struct tcphdr)+4;
1871 buff->sk = newsk;
1872
1873 t1 =(struct tcphdr *) buff->data;
1874
1875
1876 tmp = sk->prot->build_header(buff, newsk->saddr, newsk->daddr, &dev,
1877 IPPROTO_TCP, NULL, MAX_SYN_SIZE,sk->ip_tos,sk->ip_ttl);
1878
1879
1880 if (tmp < 0) {
1881 sk->err = tmp;
1882 buff->free=1;
1883 kfree_skb(buff,FREE_WRITE);
1884 newsk->dead = 1;
1885 release_sock(newsk);
1886 skb->sk = sk;
1887 kfree_skb(skb, FREE_READ);
1888 return;
1889 }
1890
1891 buff->len += tmp;
1892 t1 =(struct tcphdr *)((char *)t1 +tmp);
1893
1894 memcpy(t1, skb->h.th, sizeof(*t1));
1895 buff->h.seq = newsk->write_seq;
1896
1897
1898 t1->dest = skb->h.th->source;
1899 t1->source = newsk->dummy_th.source;
1900 t1->seq = ntohl(newsk->write_seq++);
1901 t1->ack = 1;
1902 newsk->window = tcp_select_window(newsk);
1903 newsk->sent_seq = newsk->write_seq;
1904 t1->window = ntohs(newsk->window);
1905 t1->res1 = 0;
1906 t1->res2 = 0;
1907 t1->rst = 0;
1908 t1->urg = 0;
1909 t1->psh = 0;
1910 t1->syn = 1;
1911 t1->ack_seq = ntohl(skb->h.th->seq+1);
1912 t1->doff = sizeof(*t1)/4+1;
1913
1914 ptr =(unsigned char *)(t1+1);
1915 ptr[0] = 2;
1916 ptr[1] = 4;
1917 ptr[2] = ((newsk->mtu) >> 8) & 0xff;
1918 ptr[3] =(newsk->mtu) & 0xff;
1919
1920 tcp_send_check(t1, daddr, saddr, sizeof(*t1)+4, newsk);
1921 newsk->prot->queue_xmit(newsk, dev, buff, 0);
1922
1923 reset_timer(newsk, TIME_WRITE , TCP_CONNECT_TIME);
1924 skb->sk = newsk;
1925
1926
1927 sk->rmem_alloc -= skb->mem_len;
1928 newsk->rmem_alloc += skb->mem_len;
1929
1930 skb_queue_tail(&sk->rqueue,skb);
1931 sk->ack_backlog++;
1932 release_sock(newsk);
1933 }
1934
1935
1936 static void
1937 tcp_close(struct sock *sk, int timeout)
1938 {
1939 struct sk_buff *buff;
1940 int need_reset = 0;
1941 struct tcphdr *t1, *th;
1942 struct proto *prot;
1943 struct device *dev=NULL;
1944 int tmp;
1945
1946
1947
1948
1949
1950 DPRINTF((DBG_TCP, "tcp_close((struct sock *)%X, %d)\n",sk, timeout));
1951 sk->inuse = 1;
1952 sk->keepopen = 1;
1953 sk->shutdown = SHUTDOWN_MASK;
1954
1955 if (!sk->dead)
1956 sk->state_change(sk);
1957
1958
1959 if (skb_peek(&sk->rqueue) != NULL)
1960 {
1961 struct sk_buff *skb;
1962 if(sk->debug)
1963 printk("Clean rcv queue\n");
1964 while((skb=skb_dequeue(&sk->rqueue))!=NULL)
1965 {
1966 if(skb->len > 0 && after(skb->h.th->seq + skb->len + 1 , sk->copied_seq))
1967 need_reset = 1;
1968 kfree_skb(skb, FREE_READ);
1969 }
1970 if(sk->debug)
1971 printk("Cleaned.\n");
1972 }
1973 sk->rqueue = NULL;
1974
1975
1976 if (sk->partial) {
1977 tcp_send_partial(sk);
1978 }
1979
1980 switch(sk->state) {
1981 case TCP_FIN_WAIT1:
1982 case TCP_FIN_WAIT2:
1983 case TCP_LAST_ACK:
1984
1985
1986
1987
1988
1989 reset_timer(sk, TIME_CLOSE, 4 * sk->rto);
1990 if (timeout) tcp_time_wait(sk);
1991 release_sock(sk);
1992 return;
1993 case TCP_TIME_WAIT:
1994 if (timeout) {
1995 sk->state = TCP_CLOSE;
1996 }
1997 release_sock(sk);
1998 return;
1999 case TCP_LISTEN:
2000 sk->state = TCP_CLOSE;
2001 release_sock(sk);
2002 return;
2003 case TCP_CLOSE:
2004 release_sock(sk);
2005 return;
2006 case TCP_CLOSE_WAIT:
2007 case TCP_ESTABLISHED:
2008 case TCP_SYN_SENT:
2009 case TCP_SYN_RECV:
2010 prot =(struct proto *)sk->prot;
2011 th =(struct tcphdr *)&sk->dummy_th;
2012 buff = prot->wmalloc(sk, MAX_FIN_SIZE, 1, GFP_ATOMIC);
2013 if (buff == NULL) {
2014
2015
2016
2017 release_sock(sk);
2018 if (sk->state != TCP_CLOSE_WAIT)
2019 sk->state = TCP_ESTABLISHED;
2020 reset_timer(sk, TIME_CLOSE, 100);
2021 return;
2022 }
2023 buff->mem_addr = buff;
2024 buff->mem_len = MAX_FIN_SIZE;
2025 buff->sk = sk;
2026 buff->free = 1;
2027 buff->len = sizeof(*t1);
2028 t1 =(struct tcphdr *) buff->data;
2029
2030
2031 tmp = prot->build_header(buff,sk->saddr, sk->daddr, &dev,
2032 IPPROTO_TCP, sk->opt,
2033 sizeof(struct tcphdr),sk->ip_tos,sk->ip_ttl);
2034 if (tmp < 0) {
2035 kfree_skb(buff,FREE_WRITE);
2036 if(sk->state==TCP_ESTABLISHED)
2037 sk->state=TCP_FIN_WAIT1;
2038 else
2039 sk->state=TCP_FIN_WAIT2;
2040 reset_timer(sk, TIME_CLOSE,4*sk->rto);
2041 if(timeout)
2042 tcp_time_wait(sk);
2043 DPRINTF((DBG_TCP, "Unable to build header for fin.\n"));
2044 release_sock(sk);
2045 return;
2046 }
2047
2048 t1 =(struct tcphdr *)((char *)t1 +tmp);
2049 buff->len += tmp;
2050 buff->dev = dev;
2051 memcpy(t1, th, sizeof(*t1));
2052 t1->seq = ntohl(sk->write_seq);
2053 sk->write_seq++;
2054 buff->h.seq = sk->write_seq;
2055 t1->ack = 1;
2056
2057
2058 sk->delay_acks = 0;
2059 t1->ack_seq = ntohl(sk->acked_seq);
2060 t1->window = ntohs(sk->window=tcp_select_window(sk));
2061 t1->fin = 1;
2062 t1->rst = need_reset;
2063 t1->doff = sizeof(*t1)/4;
2064 tcp_send_check(t1, sk->saddr, sk->daddr, sizeof(*t1), sk);
2065
2066 if (sk->wfront == NULL) {
2067 sk->sent_seq = sk->write_seq;
2068 prot->queue_xmit(sk, dev, buff, 0);
2069 } else {
2070 reset_timer(sk, TIME_WRITE, sk->rto);
2071 buff->next = NULL;
2072 if (sk->wback == NULL) {
2073 sk->wfront = buff;
2074 } else {
2075 sk->wback->next = buff;
2076 }
2077 sk->wback = buff;
2078 buff->magic = TCP_WRITE_QUEUE_MAGIC;
2079 }
2080
2081 if (sk->state == TCP_CLOSE_WAIT) {
2082 sk->state = TCP_FIN_WAIT2;
2083 } else {
2084 sk->state = TCP_FIN_WAIT1;
2085 }
2086 }
2087 release_sock(sk);
2088 }
2089
2090
2091
2092
2093
2094
2095 static void
2096 tcp_write_xmit(struct sock *sk)
2097 {
2098 struct sk_buff *skb;
2099
2100 DPRINTF((DBG_TCP, "tcp_write_xmit(sk=%X)\n", sk));
2101
2102
2103
2104 if(sk->zapped)
2105 return;
2106
2107 while(sk->wfront != NULL &&
2108 before(sk->wfront->h.seq, sk->window_seq +1) &&
2109 (sk->retransmits == 0 ||
2110 sk->timeout != TIME_WRITE ||
2111 before(sk->wfront->h.seq, sk->rcv_ack_seq +1))
2112 && sk->packets_out < sk->cong_window) {
2113 skb = sk->wfront;
2114 IS_SKB(skb);
2115 sk->wfront = skb->next;
2116 if (sk->wfront == NULL) sk->wback = NULL;
2117 skb->next = NULL;
2118 if (skb->magic != TCP_WRITE_QUEUE_MAGIC) {
2119 printk("tcp.c skb with bad magic(%X) on write queue. Squashing "
2120 "queue\n", skb->magic);
2121 sk->wfront = NULL;
2122 sk->wback = NULL;
2123 return;
2124 }
2125 skb->magic = 0;
2126 DPRINTF((DBG_TCP, "Sending a packet.\n"));
2127
2128
2129 if (before(skb->h.seq, sk->rcv_ack_seq +1)) {
2130 sk->retransmits = 0;
2131 kfree_skb(skb, FREE_WRITE);
2132 if (!sk->dead) sk->write_space(sk);
2133 } else {
2134 sk->sent_seq = skb->h.seq;
2135 sk->prot->queue_xmit(sk, skb->dev, skb, skb->free);
2136 }
2137 }
2138 }
2139
2140
2141
2142
2143
2144
2145 void
2146 sort_send(struct sock *sk)
2147 {
2148 struct sk_buff *list = NULL;
2149 struct sk_buff *skb,*skb2,*skb3;
2150
2151 for (skb = sk->send_head; skb != NULL; skb = skb2) {
2152 skb2 = (struct sk_buff *)skb->link3;
2153 if (list == NULL || before (skb2->h.seq, list->h.seq)) {
2154 skb->link3 = list;
2155 sk->send_tail = skb;
2156 list = skb;
2157 } else {
2158 for (skb3 = list; ; skb3 = (struct sk_buff *)skb3->link3) {
2159 if (skb3->link3 == NULL ||
2160 before(skb->h.seq, skb3->link3->h.seq)) {
2161 skb->link3 = skb3->link3;
2162 skb3->link3 = skb;
2163 if (skb->link3 == NULL) sk->send_tail = skb;
2164 break;
2165 }
2166 }
2167 }
2168 }
2169 sk->send_head = list;
2170 }
2171
2172
2173
2174 static int
2175 tcp_ack(struct sock *sk, struct tcphdr *th, unsigned long saddr, int len)
2176 {
2177 unsigned long ack;
2178 int flag = 0;
2179
2180
2181
2182
2183
2184
2185
2186 if(sk->zapped)
2187 return(1);
2188
2189 ack = ntohl(th->ack_seq);
2190 DPRINTF((DBG_TCP, "tcp_ack ack=%d, window=%d, "
2191 "sk->rcv_ack_seq=%d, sk->window_seq = %d\n",
2192 ack, ntohs(th->window), sk->rcv_ack_seq, sk->window_seq));
2193
2194 if (ntohs(th->window) > sk->max_window) {
2195 sk->max_window = ntohs(th->window);
2196 sk->mss = min(sk->max_window, sk->mtu);
2197 }
2198
2199 if (sk->retransmits && sk->timeout == TIME_KEEPOPEN)
2200 sk->retransmits = 0;
2201
2202
2203 if (after(ack, sk->sent_seq+1) || before(ack, sk->rcv_ack_seq-1)) {
2204 if (after(ack, sk->sent_seq) ||
2205 (sk->state != TCP_ESTABLISHED && sk->state != TCP_CLOSE_WAIT)) {
2206 return(0);
2207 }
2208 if (sk->keepopen) {
2209 reset_timer(sk, TIME_KEEPOPEN, TCP_TIMEOUT_LEN);
2210 }
2211 return(1);
2212 }
2213
2214 if (len != th->doff*4) flag |= 1;
2215
2216
2217 if (after(sk->window_seq, ack+ntohs(th->window))) {
2218
2219
2220
2221
2222
2223
2224
2225 struct sk_buff *skb;
2226 struct sk_buff *skb2;
2227 struct sk_buff *wskb = NULL;
2228
2229 skb2 = sk->send_head;
2230 sk->send_head = NULL;
2231 sk->send_tail = NULL;
2232
2233 flag |= 4;
2234
2235 sk->window_seq = ack + ntohs(th->window);
2236 cli();
2237 while (skb2 != NULL) {
2238 skb = skb2;
2239 skb2 = (struct sk_buff *)skb->link3;
2240 skb->link3 = NULL;
2241 if (after(skb->h.seq, sk->window_seq)) {
2242 if (sk->packets_out > 0) sk->packets_out--;
2243
2244 if (skb->next != NULL) {
2245 skb_unlink(skb);
2246 }
2247
2248 skb->magic = TCP_WRITE_QUEUE_MAGIC;
2249 if (wskb == NULL) {
2250 skb->next = sk->wfront;
2251 sk->wfront = skb;
2252 } else {
2253 skb->next = wskb->next;
2254 wskb->next = skb;
2255 }
2256 if (sk->wback == wskb) sk->wback = skb;
2257 wskb = skb;
2258 } else {
2259 if (sk->send_head == NULL) {
2260 sk->send_head = skb;
2261 sk->send_tail = skb;
2262 } else {
2263 sk->send_tail->link3 = skb;
2264 sk->send_tail = skb;
2265 }
2266 skb->link3 = NULL;
2267 }
2268 }
2269 sti();
2270 }
2271
2272 if (sk->send_tail == NULL || sk->send_head == NULL) {
2273 sk->send_head = NULL;
2274 sk->send_tail = NULL;
2275 sk->packets_out= 0;
2276 }
2277
2278 sk->window_seq = ack + ntohs(th->window);
2279
2280
2281 if (sk->timeout == TIME_WRITE &&
2282 sk->cong_window < 2048 && after(ack, sk->rcv_ack_seq)) {
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292 if (sk->cong_window < sk->ssthresh)
2293
2294 sk->cong_window++;
2295 else {
2296
2297
2298
2299 if (sk->cong_count >= sk->cong_window) {
2300 sk->cong_window++;
2301 sk->cong_count = 0;
2302 } else
2303 sk->cong_count++;
2304 }
2305 }
2306
2307 DPRINTF((DBG_TCP, "tcp_ack: Updating rcv ack sequence.\n"));
2308 sk->rcv_ack_seq = ack;
2309
2310
2311
2312
2313
2314
2315 if (sk->timeout == TIME_PROBE0) {
2316 if (sk->wfront != NULL &&
2317 ! before (sk->window_seq, sk->wfront->h.seq)) {
2318 sk->retransmits = 0;
2319 sk->backoff = 0;
2320
2321 sk->rto = ((sk->rtt >> 2) + sk->mdev) >> 1;
2322 if (sk->rto > 120*HZ)
2323 sk->rto = 120*HZ;
2324 if (sk->rto < 1*HZ)
2325 sk->rto = 1*HZ;
2326 }
2327 }
2328
2329
2330 while(sk->send_head != NULL) {
2331
2332 if (sk->send_head->link3 &&
2333 after(sk->send_head->h.seq, sk->send_head->link3->h.seq)) {
2334 printk("INET: tcp.c: *** bug send_list out of order.\n");
2335 sort_send(sk);
2336 }
2337
2338 if (before(sk->send_head->h.seq, ack+1)) {
2339 struct sk_buff *oskb;
2340
2341 if (sk->retransmits) {
2342
2343
2344 flag |= 2;
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354 if (sk->send_head->link3)
2355 sk->retransmits = 1;
2356 else
2357 sk->retransmits = 0;
2358
2359 }
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374 if (sk->packets_out > 0) sk->packets_out --;
2375 DPRINTF((DBG_TCP, "skb=%X skb->h.seq = %d acked ack=%d\n",
2376 sk->send_head, sk->send_head->h.seq, ack));
2377
2378
2379 if (!sk->dead) sk->write_space(sk);
2380
2381 oskb = sk->send_head;
2382
2383 if (!(flag&2)) {
2384 long m;
2385
2386
2387
2388
2389
2390
2391
2392
2393 m = jiffies - oskb->when;
2394 m -= (sk->rtt >> 3);
2395 sk->rtt += m;
2396 if (m < 0)
2397 m = -m;
2398 m -= (sk->mdev >> 2);
2399 sk->mdev += m;
2400
2401
2402 sk->rto = ((sk->rtt >> 2) + sk->mdev) >> 1;
2403 if (sk->rto > 120*HZ)
2404 sk->rto = 120*HZ;
2405 if (sk->rto < 1*HZ)
2406 sk->rto = 1*HZ;
2407 sk->backoff = 0;
2408
2409 }
2410 flag |= (2|4);
2411
2412 cli();
2413
2414 oskb = sk->send_head;
2415 IS_SKB(oskb);
2416 sk->send_head =(struct sk_buff *)oskb->link3;
2417 if (sk->send_head == NULL) {
2418 sk->send_tail = NULL;
2419 }
2420
2421
2422 skb_unlink(oskb);
2423 sti();
2424 oskb->magic = 0;
2425 kfree_skb(oskb, FREE_WRITE);
2426 if (!sk->dead) sk->write_space(sk);
2427 } else {
2428 break;
2429 }
2430 }
2431
2432
2433
2434
2435
2436 if (sk->wfront != NULL) {
2437 if (after (sk->window_seq+1, sk->wfront->h.seq) &&
2438 (sk->retransmits == 0 ||
2439 sk->timeout != TIME_WRITE ||
2440 before(sk->wfront->h.seq, sk->rcv_ack_seq +1))
2441 && sk->packets_out < sk->cong_window) {
2442 flag |= 1;
2443 tcp_write_xmit(sk);
2444 } else if (before(sk->window_seq, sk->wfront->h.seq) &&
2445 sk->send_head == NULL &&
2446 sk->ack_backlog == 0 &&
2447 sk->state != TCP_TIME_WAIT) {
2448 reset_timer(sk, TIME_PROBE0, sk->rto);
2449 }
2450 } else {
2451 if (sk->send_head == NULL && sk->ack_backlog == 0 &&
2452 sk->state != TCP_TIME_WAIT && !sk->keepopen) {
2453 DPRINTF((DBG_TCP, "Nothing to do, going to sleep.\n"));
2454 if (!sk->dead) sk->write_space(sk);
2455
2456 if (sk->keepopen)
2457 reset_timer(sk, TIME_KEEPOPEN, TCP_TIMEOUT_LEN);
2458 else
2459 delete_timer(sk);
2460 } else {
2461 if (sk->state != (unsigned char) sk->keepopen) {
2462 reset_timer(sk, TIME_WRITE, sk->rto);
2463 }
2464 if (sk->state == TCP_TIME_WAIT) {
2465 reset_timer(sk, TIME_CLOSE, TCP_TIMEWAIT_LEN);
2466 }
2467 }
2468 }
2469
2470 if (sk->packets_out == 0 && sk->partial != NULL &&
2471 sk->wfront == NULL && sk->send_head == NULL) {
2472 flag |= 1;
2473 tcp_send_partial(sk);
2474 }
2475
2476
2477 if (sk->state == TCP_TIME_WAIT) {
2478 if (!sk->dead)
2479 sk->state_change(sk);
2480 if (sk->rcv_ack_seq == sk->write_seq && sk->acked_seq == sk->fin_seq) {
2481 flag |= 1;
2482 sk->state = TCP_CLOSE;
2483 sk->shutdown = SHUTDOWN_MASK;
2484 }
2485 }
2486
2487 if (sk->state == TCP_LAST_ACK || sk->state == TCP_FIN_WAIT2) {
2488 if (!sk->dead) sk->state_change(sk);
2489 if (sk->rcv_ack_seq == sk->write_seq) {
2490 flag |= 1;
2491 if (sk->acked_seq != sk->fin_seq) {
2492 tcp_time_wait(sk);
2493 } else {
2494 DPRINTF((DBG_TCP, "tcp_ack closing socket - %X\n", sk));
2495 tcp_send_ack(sk->sent_seq, sk->acked_seq, sk,
2496 th, sk->daddr);
2497 sk->shutdown = SHUTDOWN_MASK;
2498 sk->state = TCP_CLOSE;
2499 }
2500 }
2501 }
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532 if (((!flag) || (flag&4)) && sk->send_head != NULL &&
2533 (((flag&2) && sk->retransmits) ||
2534 (sk->send_head->when + sk->rto < jiffies))) {
2535 ip_do_retransmit(sk, 1);
2536 reset_timer(sk, TIME_WRITE, sk->rto);
2537 }
2538
2539 DPRINTF((DBG_TCP, "leaving tcp_ack\n"));
2540 return(1);
2541 }
2542
2543
2544
2545
2546
2547
2548
2549 static int
2550 tcp_data(struct sk_buff *skb, struct sock *sk,
2551 unsigned long saddr, unsigned short len)
2552 {
2553 struct sk_buff *skb1, *skb2;
2554 struct tcphdr *th;
2555 int dup_dumped=0;
2556
2557 th = skb->h.th;
2558 print_th(th);
2559 skb->len = len -(th->doff*4);
2560
2561 DPRINTF((DBG_TCP, "tcp_data len = %d sk = %X:\n", skb->len, sk));
2562
2563 sk->bytes_rcv += skb->len;
2564 if (skb->len == 0 && !th->fin && !th->urg && !th->psh) {
2565
2566 if (!th->ack) tcp_send_ack(sk->sent_seq, sk->acked_seq,sk, th, saddr);
2567 kfree_skb(skb, FREE_READ);
2568 return(0);
2569 }
2570
2571 if (sk->shutdown & RCV_SHUTDOWN) {
2572 sk->acked_seq = th->seq + skb->len + th->syn + th->fin;
2573 tcp_reset(sk->saddr, sk->daddr, skb->h.th,
2574 sk->prot, NULL, skb->dev, sk->ip_tos, sk->ip_ttl);
2575 sk->state = TCP_CLOSE;
2576 sk->err = EPIPE;
2577 sk->shutdown = SHUTDOWN_MASK;
2578 DPRINTF((DBG_TCP, "tcp_data: closing socket - %X\n", sk));
2579 kfree_skb(skb, FREE_READ);
2580 if (!sk->dead) sk->state_change(sk);
2581 return(0);
2582 }
2583
2584
2585
2586
2587
2588
2589
2590
2591
2592
2593 if (sk->rqueue == NULL) {
2594 DPRINTF((DBG_TCP, "tcp_data: skb = %X:\n", skb));
2595 #ifdef OLDWAY
2596 sk->rqueue = skb;
2597 skb->next = skb;
2598 skb->prev = skb;
2599 skb->list = &sk->rqueue;
2600 #else
2601 skb_queue_head(&sk->rqueue,skb);
2602 #endif
2603 skb1= NULL;
2604 } else {
2605 DPRINTF((DBG_TCP, "tcp_data adding to chain sk = %X:\n", sk));
2606 for(skb1=sk->rqueue->prev; ; skb1 =(struct sk_buff *)skb1->prev) {
2607 if(sk->debug)
2608 {
2609 printk("skb1=%p :", skb1);
2610 printk("skb1->h.th->seq = %ld: ", skb1->h.th->seq);
2611 printk("skb->h.th->seq = %ld\n",skb->h.th->seq);
2612 printk("copied_seq = %ld acked_seq = %ld\n", sk->copied_seq,
2613 sk->acked_seq);
2614 }
2615 #ifdef OLD
2616 if (after(th->seq+1, skb1->h.th->seq)) {
2617 skb->prev = skb1;
2618 skb->next = skb1->next;
2619 skb->next->prev = skb;
2620 skb1->next = skb;
2621 if (skb1 == sk->rqueue) sk->rqueue = skb;
2622 break;
2623 }
2624 if (skb1->prev == sk->rqueue) {
2625 skb->next= skb1;
2626 skb->prev = skb1->prev;
2627 skb->prev->next = skb;
2628 skb1->prev = skb;
2629 skb1 = NULL;
2630
2631 break;
2632 }
2633 #else
2634 if (th->seq==skb1->h.th->seq && skb->len>= skb1->len)
2635 {
2636 skb_append(skb1,skb);
2637 skb_unlink(skb1);
2638 kfree_skb(skb1,FREE_READ);
2639 dup_dumped=1;
2640 skb1=NULL;
2641 break;
2642 }
2643 if (after(th->seq+1, skb1->h.th->seq))
2644 {
2645 skb_append(skb1,skb);
2646 break;
2647 }
2648 if (skb1 == sk->rqueue)
2649 {
2650 skb_queue_head(&sk->rqueue, skb);
2651 break;
2652 }
2653 #endif
2654 }
2655 DPRINTF((DBG_TCP, "skb = %X:\n", skb));
2656 }
2657
2658 th->ack_seq = th->seq + skb->len;
2659 if (th->syn) th->ack_seq++;
2660 if (th->fin) th->ack_seq++;
2661
2662 if (before(sk->acked_seq, sk->copied_seq)) {
2663 printk("*** tcp.c:tcp_data bug acked < copied\n");
2664 sk->acked_seq = sk->copied_seq;
2665 }
2666
2667
2668 if ((!dup_dumped && (skb1 == NULL || skb1->acked)) || before(th->seq, sk->acked_seq+1)) {
2669 if (before(th->seq, sk->acked_seq+1)) {
2670 int newwindow;
2671
2672 if (after(th->ack_seq, sk->acked_seq)) {
2673 newwindow = sk->window -
2674 (th->ack_seq - sk->acked_seq);
2675 if (newwindow < 0)
2676 newwindow = 0;
2677 sk->window = newwindow;
2678 sk->acked_seq = th->ack_seq;
2679 }
2680 skb->acked = 1;
2681
2682
2683 if (skb->h.th->fin) {
2684 if (!sk->dead) sk->state_change(sk);
2685 sk->shutdown |= RCV_SHUTDOWN;
2686 }
2687
2688 for(skb2 = (struct sk_buff *)skb->next;
2689 skb2 !=(struct sk_buff *) sk->rqueue;
2690 skb2 = (struct sk_buff *)skb2->next) {
2691 if (before(skb2->h.th->seq, sk->acked_seq+1)) {
2692 if (after(skb2->h.th->ack_seq, sk->acked_seq))
2693 {
2694 newwindow = sk->window -
2695 (skb2->h.th->ack_seq - sk->acked_seq);
2696 if (newwindow < 0)
2697 newwindow = 0;
2698 sk->window = newwindow;
2699 sk->acked_seq = skb2->h.th->ack_seq;
2700 }
2701 skb2->acked = 1;
2702
2703
2704
2705
2706
2707 if (skb2->h.th->fin) {
2708 sk->shutdown |= RCV_SHUTDOWN;
2709 if (!sk->dead) sk->state_change(sk);
2710 }
2711
2712
2713 sk->ack_backlog = sk->max_ack_backlog;
2714 } else {
2715 break;
2716 }
2717 }
2718
2719
2720
2721
2722
2723 if (!sk->delay_acks ||
2724 sk->ack_backlog >= sk->max_ack_backlog ||
2725 sk->bytes_rcv > sk->max_unacked || th->fin) {
2726
2727 } else {
2728 sk->ack_backlog++;
2729 if(sk->debug)
2730 printk("Ack queued.\n");
2731 reset_timer(sk, TIME_WRITE, TCP_ACK_TIME);
2732 }
2733 }
2734 }
2735
2736
2737
2738
2739
2740 if (!skb->acked) {
2741
2742
2743
2744
2745
2746
2747 while (sk->prot->rspace(sk) < sk->mtu) {
2748 skb1 = skb_peek(&sk->rqueue);
2749 if (skb1 == NULL) {
2750 printk("INET: tcp.c:tcp_data memory leak detected.\n");
2751 break;
2752 }
2753
2754
2755 if (skb1->acked) {
2756 break;
2757 }
2758
2759 skb_unlink(skb1);
2760 #ifdef OLDWAY
2761 if (skb1->prev == skb1) {
2762 sk->rqueue = NULL;
2763 } else {
2764 sk->rqueue = (struct sk_buff *)skb1->prev;
2765 skb1->next->prev = skb1->prev;
2766 skb1->prev->next = skb1->next;
2767 }
2768 #endif
2769 kfree_skb(skb1, FREE_READ);
2770 }
2771 tcp_send_ack(sk->sent_seq, sk->acked_seq, sk, th, saddr);
2772 sk->ack_backlog++;
2773 reset_timer(sk, TIME_WRITE, TCP_ACK_TIME);
2774 } else {
2775
2776 tcp_send_ack(sk->sent_seq, sk->acked_seq, sk, th, saddr);
2777 }
2778
2779
2780 if (!sk->dead) {
2781 if(sk->debug)
2782 printk("Data wakeup.\n");
2783 sk->data_ready(sk,0);
2784 } else {
2785 DPRINTF((DBG_TCP, "data received on dead socket.\n"));
2786 }
2787
2788 if (sk->state == TCP_FIN_WAIT2 &&
2789 sk->acked_seq == sk->fin_seq && sk->rcv_ack_seq == sk->write_seq) {
2790 DPRINTF((DBG_TCP, "tcp_data: entering last_ack state sk = %X\n", sk));
2791
2792
2793 sk->shutdown = SHUTDOWN_MASK;
2794 sk->state = TCP_LAST_ACK;
2795 if (!sk->dead) sk->state_change(sk);
2796 }
2797
2798 return(0);
2799 }
2800
2801
2802 static void tcp_check_urg(struct sock * sk, struct tcphdr * th)
2803 {
2804 unsigned long ptr = ntohs(th->urg_ptr);
2805
2806 if (ptr)
2807 ptr--;
2808 ptr += th->seq;
2809
2810
2811 if (after(sk->copied_seq+1, ptr))
2812 return;
2813
2814
2815 if (sk->urg_data && !after(ptr, sk->urg_seq))
2816 return;
2817
2818
2819 if (sk->proc != 0) {
2820 if (sk->proc > 0) {
2821 kill_proc(sk->proc, SIGURG, 1);
2822 } else {
2823 kill_pg(-sk->proc, SIGURG, 1);
2824 }
2825 }
2826 sk->urg_data = URG_NOTYET;
2827 sk->urg_seq = ptr;
2828 }
2829
2830 static inline int tcp_urg(struct sock *sk, struct tcphdr *th,
2831 unsigned long saddr, unsigned long len)
2832 {
2833 unsigned long ptr;
2834
2835
2836 if (th->urg)
2837 tcp_check_urg(sk,th);
2838
2839
2840 if (sk->urg_data != URG_NOTYET)
2841 return 0;
2842
2843
2844 ptr = sk->urg_seq - th->seq + th->doff*4;
2845 if (ptr >= len)
2846 return 0;
2847
2848
2849 sk->urg_data = URG_VALID | *(ptr + (unsigned char *) th);
2850 if (!sk->dead)
2851 wake_up_interruptible(sk->sleep);
2852 return 0;
2853 }
2854
2855
2856
2857 static int
2858 tcp_fin(struct sock *sk, struct tcphdr *th,
2859 unsigned long saddr, struct device *dev)
2860 {
2861 DPRINTF((DBG_TCP, "tcp_fin(sk=%X, th=%X, saddr=%X, dev=%X)\n",
2862 sk, th, saddr, dev));
2863
2864 if (!sk->dead) {
2865 sk->state_change(sk);
2866 }
2867
2868 switch(sk->state) {
2869 case TCP_SYN_RECV:
2870 case TCP_SYN_SENT:
2871 case TCP_ESTABLISHED:
2872
2873 reset_timer(sk, TIME_CLOSE, TCP_TIMEOUT_LEN);
2874 sk->fin_seq = th->seq+1;
2875 sk->state = TCP_CLOSE_WAIT;
2876 if (th->rst) sk->shutdown = SHUTDOWN_MASK;
2877 break;
2878
2879 case TCP_CLOSE_WAIT:
2880 case TCP_FIN_WAIT2:
2881 break;
2882
2883 case TCP_FIN_WAIT1:
2884
2885 sk->fin_seq = th->seq+1;
2886 sk->state = TCP_FIN_WAIT2;
2887 break;
2888
2889 default:
2890 case TCP_TIME_WAIT:
2891 sk->state = TCP_LAST_ACK;
2892
2893
2894 reset_timer(sk, TIME_CLOSE, TCP_TIMEWAIT_LEN);
2895 return(0);
2896 }
2897 sk->ack_backlog++;
2898
2899 return(0);
2900 }
2901
2902
2903
2904 static struct sock *
2905 tcp_accept(struct sock *sk, int flags)
2906 {
2907 struct sock *newsk;
2908 struct sk_buff *skb;
2909
2910 DPRINTF((DBG_TCP, "tcp_accept(sk=%X, flags=%X, addr=%s)\n",
2911 sk, flags, in_ntoa(sk->saddr)));
2912
2913
2914
2915
2916
2917 if (sk->state != TCP_LISTEN) {
2918 sk->err = EINVAL;
2919 return(NULL);
2920 }
2921
2922
2923 cli();
2924 sk->inuse = 1;
2925 while((skb = get_firstr(sk)) == NULL) {
2926 if (flags & O_NONBLOCK) {
2927 sti();
2928 release_sock(sk);
2929 sk->err = EAGAIN;
2930 return(NULL);
2931 }
2932
2933 release_sock(sk);
2934 interruptible_sleep_on(sk->sleep);
2935 if (current->signal & ~current->blocked) {
2936 sti();
2937 sk->err = ERESTARTSYS;
2938 return(NULL);
2939 }
2940 sk->inuse = 1;
2941 }
2942 sti();
2943
2944
2945 newsk = skb->sk;
2946
2947 kfree_skb(skb, FREE_READ);
2948 sk->ack_backlog--;
2949 release_sock(sk);
2950 return(newsk);
2951 }
2952
2953
2954
2955 static int
2956 tcp_connect(struct sock *sk, struct sockaddr_in *usin, int addr_len)
2957 {
2958 struct sk_buff *buff;
2959 struct sockaddr_in sin;
2960 struct device *dev=NULL;
2961 unsigned char *ptr;
2962 int tmp;
2963 struct tcphdr *t1;
2964 int err;
2965
2966 if (sk->state != TCP_CLOSE) return(-EISCONN);
2967 if (addr_len < 8) return(-EINVAL);
2968
2969 err=verify_area(VERIFY_READ, usin, addr_len);
2970 if(err)
2971 return err;
2972
2973 memcpy_fromfs(&sin,usin, min(sizeof(sin), addr_len));
2974
2975 if (sin.sin_family && sin.sin_family != AF_INET) return(-EAFNOSUPPORT);
2976
2977 DPRINTF((DBG_TCP, "TCP connect daddr=%s\n", in_ntoa(sin.sin_addr.s_addr)));
2978
2979
2980 if (chk_addr(sin.sin_addr.s_addr) == IS_BROADCAST) {
2981 DPRINTF((DBG_TCP, "TCP connection to broadcast address not allowed\n"));
2982 return(-ENETUNREACH);
2983 }
2984
2985
2986 if(sk->saddr == sin.sin_addr.s_addr && sk->num==ntohs(sin.sin_port))
2987 return -EBUSY;
2988
2989 sk->inuse = 1;
2990 sk->daddr = sin.sin_addr.s_addr;
2991 sk->write_seq = jiffies * SEQ_TICK - seq_offset;
2992 sk->window_seq = sk->write_seq;
2993 sk->rcv_ack_seq = sk->write_seq -1;
2994 sk->err = 0;
2995 sk->dummy_th.dest = sin.sin_port;
2996 release_sock(sk);
2997
2998 buff = sk->prot->wmalloc(sk,MAX_SYN_SIZE,0, GFP_KERNEL);
2999 if (buff == NULL) {
3000 return(-ENOMEM);
3001 }
3002 sk->inuse = 1;
3003 buff->mem_addr = buff;
3004 buff->mem_len = MAX_SYN_SIZE;
3005 buff->len = 24;
3006 buff->sk = sk;
3007 buff->free = 1;
3008 t1 = (struct tcphdr *) buff->data;
3009
3010
3011
3012 tmp = sk->prot->build_header(buff, sk->saddr, sk->daddr, &dev,
3013 IPPROTO_TCP, NULL, MAX_SYN_SIZE,sk->ip_tos,sk->ip_ttl);
3014 if (tmp < 0) {
3015 sk->prot->wfree(sk, buff->mem_addr, buff->mem_len);
3016 release_sock(sk);
3017 return(-ENETUNREACH);
3018 }
3019 buff->len += tmp;
3020 t1 = (struct tcphdr *)((char *)t1 +tmp);
3021
3022 memcpy(t1,(void *)&(sk->dummy_th), sizeof(*t1));
3023 t1->seq = ntohl(sk->write_seq++);
3024 sk->sent_seq = sk->write_seq;
3025 buff->h.seq = sk->write_seq;
3026 t1->ack = 0;
3027 t1->window = 2;
3028 t1->res1=0;
3029 t1->res2=0;
3030 t1->rst = 0;
3031 t1->urg = 0;
3032 t1->psh = 0;
3033 t1->syn = 1;
3034 t1->urg_ptr = 0;
3035 t1->doff = 6;
3036
3037
3038 if (sk->user_mss)
3039 sk->mtu = sk->user_mss;
3040 else {
3041 #ifdef SUBNETSARELOCAL
3042 if ((sk->saddr ^ sk->daddr) & default_mask(sk->saddr))
3043 #else
3044 if ((sk->saddr ^ sk->daddr) & dev->pa_mask)
3045 #endif
3046 sk->mtu = 576 - HEADER_SIZE;
3047 else
3048 sk->mtu = MAX_WINDOW;
3049 }
3050
3051 sk->mtu = min(sk->mtu, dev->mtu - HEADER_SIZE);
3052
3053
3054 ptr = (unsigned char *)(t1+1);
3055 ptr[0] = 2;
3056 ptr[1] = 4;
3057 ptr[2] = (sk->mtu) >> 8;
3058 ptr[3] = (sk->mtu) & 0xff;
3059 tcp_send_check(t1, sk->saddr, sk->daddr,
3060 sizeof(struct tcphdr) + 4, sk);
3061
3062
3063 sk->state = TCP_SYN_SENT;
3064 sk->rtt = TCP_CONNECT_TIME;
3065 reset_timer(sk, TIME_WRITE, TCP_CONNECT_TIME);
3066 sk->retransmits = TCP_RETR2 - TCP_SYN_RETRIES;
3067
3068 sk->prot->queue_xmit(sk, dev, buff, 0);
3069
3070 release_sock(sk);
3071 return(0);
3072 }
3073
3074
3075
3076 static int
3077 tcp_sequence(struct sock *sk, struct tcphdr *th, short len,
3078 struct options *opt, unsigned long saddr, struct device *dev)
3079 {
3080 unsigned long next_seq;
3081
3082 next_seq = len - 4*th->doff;
3083 if (th->fin)
3084 next_seq++;
3085
3086 if (next_seq && !sk->window)
3087 goto ignore_it;
3088 next_seq += th->seq;
3089
3090
3091
3092
3093
3094
3095
3096
3097
3098 if (!after(next_seq+1, sk->acked_seq))
3099 goto ignore_it;
3100
3101 if (!before(th->seq, sk->acked_seq + sk->window + 1))
3102 goto ignore_it;
3103
3104
3105 return 1;
3106
3107 ignore_it:
3108 DPRINTF((DBG_TCP, "tcp_sequence: rejecting packet.\n"));
3109
3110
3111
3112
3113
3114
3115
3116
3117 if (sk->state==TCP_SYN_SENT || sk->state==TCP_SYN_RECV) {
3118 tcp_reset(sk->saddr,sk->daddr,th,sk->prot,NULL,dev, sk->ip_tos,sk->ip_ttl);
3119 return 1;
3120 }
3121
3122 if (th->rst)
3123 return 0;
3124
3125
3126 tcp_send_ack(sk->sent_seq, sk->acked_seq, sk, th, saddr);
3127 return 0;
3128 }
3129
3130
3131 int
3132 tcp_rcv(struct sk_buff *skb, struct device *dev, struct options *opt,
3133 unsigned long daddr, unsigned short len,
3134 unsigned long saddr, int redo, struct inet_protocol * protocol)
3135 {
3136 struct tcphdr *th;
3137 struct sock *sk;
3138
3139 if (!skb) {
3140 DPRINTF((DBG_TCP, "tcp.c: tcp_rcv skb = NULL\n"));
3141 return(0);
3142 }
3143 #if 0
3144 if (!protocol) {
3145 DPRINTF((DBG_TCP, "tcp.c: tcp_rcv protocol = NULL\n"));
3146 return(0);
3147 }
3148
3149 if (!opt) {
3150 DPRINTF((DBG_TCP, "tcp.c: tcp_rcv opt = NULL\n"));
3151 }
3152 #endif
3153 if (!dev) {
3154 DPRINTF((DBG_TCP, "tcp.c: tcp_rcv dev = NULL\n"));
3155 return(0);
3156 }
3157 th = skb->h.th;
3158
3159
3160 sk = get_sock(&tcp_prot, th->dest, saddr, th->source, daddr);
3161 DPRINTF((DBG_TCP, "<<\n"));
3162 DPRINTF((DBG_TCP, "len = %d, redo = %d, skb=%X\n", len, redo, skb));
3163
3164
3165
3166 if (sk!=NULL && sk->zapped)
3167 sk=NULL;
3168
3169 if (sk) {
3170 DPRINTF((DBG_TCP, "sk = %X:\n", sk));
3171 }
3172
3173 if (!redo) {
3174 if (tcp_check(th, len, saddr, daddr )) {
3175 skb->sk = NULL;
3176 DPRINTF((DBG_TCP, "packet dropped with bad checksum.\n"));
3177 if (inet_debug == DBG_SLIP) printk("\rtcp_rcv: bad checksum\n");
3178 kfree_skb(skb,FREE_READ);
3179
3180
3181
3182
3183 return(0);
3184 }
3185
3186 th->seq = ntohl(th->seq);
3187
3188
3189 if (sk == NULL) {
3190 if (!th->rst)
3191 tcp_reset(daddr, saddr, th, &tcp_prot, opt,dev,skb->ip_hdr->tos,255);
3192 skb->sk = NULL;
3193 kfree_skb(skb, FREE_READ);
3194 return(0);
3195 }
3196
3197 skb->len = len;
3198 skb->sk = sk;
3199 skb->acked = 0;
3200 skb->used = 0;
3201 skb->free = 0;
3202 skb->saddr = daddr;
3203 skb->daddr = saddr;
3204
3205
3206 cli();
3207 if (sk->inuse) {
3208 if (sk->back_log == NULL) {
3209 sk->back_log = skb;
3210 skb->next = skb;
3211 skb->prev = skb;
3212 } else {
3213 skb->next = sk->back_log;
3214 skb->prev = sk->back_log->prev;
3215 skb->prev->next = skb;
3216 skb->next->prev = skb;
3217 }
3218 sti();
3219 return(0);
3220 }
3221 sk->inuse = 1;
3222 sti();
3223 } else {
3224 if (!sk) {
3225 DPRINTF((DBG_TCP, "tcp.c: tcp_rcv bug sk=NULL redo = 1\n"));
3226 return(0);
3227 }
3228 }
3229
3230 if (!sk->prot) {
3231 DPRINTF((DBG_TCP, "tcp.c: tcp_rcv sk->prot = NULL \n"));
3232 return(0);
3233 }
3234
3235
3236 if (sk->rmem_alloc + skb->mem_len >= sk->rcvbuf) {
3237 skb->sk = NULL;
3238 DPRINTF((DBG_TCP, "dropping packet due to lack of buffer space.\n"));
3239 kfree_skb(skb, FREE_READ);
3240 release_sock(sk);
3241 return(0);
3242 }
3243 sk->rmem_alloc += skb->mem_len;
3244
3245 DPRINTF((DBG_TCP, "About to do switch.\n"));
3246
3247
3248 switch(sk->state) {
3249
3250
3251
3252
3253 case TCP_LAST_ACK:
3254 if (th->rst) {
3255 sk->zapped=1;
3256 sk->err = ECONNRESET;
3257 sk->state = TCP_CLOSE;
3258 sk->shutdown = SHUTDOWN_MASK;
3259 if (!sk->dead) {
3260 sk->state_change(sk);
3261 }
3262 kfree_skb(skb, FREE_READ);
3263 release_sock(sk);
3264 return(0);
3265 }
3266
3267 case TCP_ESTABLISHED:
3268 case TCP_CLOSE_WAIT:
3269 case TCP_FIN_WAIT1:
3270 case TCP_FIN_WAIT2:
3271 case TCP_TIME_WAIT:
3272 if (!tcp_sequence(sk, th, len, opt, saddr,dev)) {
3273 if (inet_debug == DBG_SLIP) printk("\rtcp_rcv: not in seq\n");
3274 #ifdef undef
3275
3276 if(!th->rst)
3277 tcp_send_ack(sk->sent_seq, sk->acked_seq,
3278 sk, th, saddr);
3279 #endif
3280 kfree_skb(skb, FREE_READ);
3281 release_sock(sk);
3282 return(0);
3283 }
3284
3285 if (th->rst) {
3286 sk->zapped=1;
3287
3288 sk->err = ECONNRESET;
3289
3290 if (sk->state == TCP_CLOSE_WAIT) {
3291 sk->err = EPIPE;
3292 }
3293
3294
3295
3296
3297
3298 sk->state = TCP_CLOSE;
3299 sk->shutdown = SHUTDOWN_MASK;
3300 if (!sk->dead) {
3301 sk->state_change(sk);
3302 }
3303 kfree_skb(skb, FREE_READ);
3304 release_sock(sk);
3305 return(0);
3306 }
3307 if (
3308 #if 0
3309 if ((opt && (opt->security != 0 ||
3310 opt->compartment != 0)) ||
3311 #endif
3312 th->syn) {
3313 sk->err = ECONNRESET;
3314 sk->state = TCP_CLOSE;
3315 sk->shutdown = SHUTDOWN_MASK;
3316 tcp_reset(daddr, saddr, th, sk->prot, opt,dev, sk->ip_tos,sk->ip_ttl);
3317 if (!sk->dead) {
3318 sk->state_change(sk);
3319 }
3320 kfree_skb(skb, FREE_READ);
3321 release_sock(sk);
3322 return(0);
3323 }
3324
3325 if (th->ack && !tcp_ack(sk, th, saddr, len)) {
3326 kfree_skb(skb, FREE_READ);
3327 release_sock(sk);
3328 return(0);
3329 }
3330
3331 if (tcp_urg(sk, th, saddr, len)) {
3332 kfree_skb(skb, FREE_READ);
3333 release_sock(sk);
3334 return(0);
3335 }
3336
3337 if (tcp_data(skb, sk, saddr, len)) {
3338 kfree_skb(skb, FREE_READ);
3339 release_sock(sk);
3340 return(0);
3341 }
3342
3343
3344 if (th->fin && tcp_fin(sk, th, saddr, dev)) {
3345 kfree_skb(skb, FREE_READ);
3346 release_sock(sk);
3347 return(0);
3348 }
3349
3350 release_sock(sk);
3351 return(0);
3352
3353 case TCP_CLOSE:
3354 if (sk->dead || sk->daddr) {
3355 DPRINTF((DBG_TCP, "packet received for closed,dead socket\n"));
3356 kfree_skb(skb, FREE_READ);
3357 release_sock(sk);
3358 return(0);
3359 }
3360
3361 if (!th->rst) {
3362 if (!th->ack)
3363 th->ack_seq = 0;
3364 tcp_reset(daddr, saddr, th, sk->prot, opt,dev,sk->ip_tos,sk->ip_ttl);
3365 }
3366 kfree_skb(skb, FREE_READ);
3367 release_sock(sk);
3368 return(0);
3369
3370 case TCP_LISTEN:
3371 if (th->rst) {
3372 kfree_skb(skb, FREE_READ);
3373 release_sock(sk);
3374 return(0);
3375 }
3376 if (th->ack) {
3377 tcp_reset(daddr, saddr, th, sk->prot, opt,dev,sk->ip_tos,sk->ip_ttl);
3378 kfree_skb(skb, FREE_READ);
3379 release_sock(sk);
3380 return(0);
3381 }
3382
3383 if (th->syn) {
3384 #if 0
3385 if (opt->security != 0 || opt->compartment != 0) {
3386 tcp_reset(daddr, saddr, th, prot, opt,dev);
3387 release_sock(sk);
3388 return(0);
3389 }
3390 #endif
3391
3392
3393
3394
3395
3396
3397
3398 tcp_conn_request(sk, skb, daddr, saddr, opt, dev);
3399 release_sock(sk);
3400 return(0);
3401 }
3402
3403 kfree_skb(skb, FREE_READ);
3404 release_sock(sk);
3405 return(0);
3406
3407 case TCP_SYN_RECV:
3408 if (th->syn) {
3409
3410 kfree_skb(skb, FREE_READ);
3411 release_sock(sk);
3412 return(0);
3413 }
3414
3415
3416 default:
3417 if (!tcp_sequence(sk, th, len, opt, saddr,dev)) {
3418 kfree_skb(skb, FREE_READ);
3419 release_sock(sk);
3420 return(0);
3421 }
3422
3423 case TCP_SYN_SENT:
3424 if (th->rst) {
3425 sk->err = ECONNREFUSED;
3426 sk->state = TCP_CLOSE;
3427 sk->shutdown = SHUTDOWN_MASK;
3428 sk->zapped = 1;
3429 if (!sk->dead) {
3430 sk->state_change(sk);
3431 }
3432 kfree_skb(skb, FREE_READ);
3433 release_sock(sk);
3434 return(0);
3435 }
3436 #if 0
3437 if (opt->security != 0 || opt->compartment != 0) {
3438 sk->err = ECONNRESET;
3439 sk->state = TCP_CLOSE;
3440 sk->shutdown = SHUTDOWN_MASK;
3441 tcp_reset(daddr, saddr, th, sk->prot, opt, dev);
3442 if (!sk->dead) {
3443 wake_up_interruptible(sk->sleep);
3444 }
3445 kfree_skb(skb, FREE_READ);
3446 release_sock(sk);
3447 return(0);
3448 }
3449 #endif
3450 if (!th->ack) {
3451 if (th->syn) {
3452 sk->state = TCP_SYN_RECV;
3453 }
3454
3455 kfree_skb(skb, FREE_READ);
3456 release_sock(sk);
3457 return(0);
3458 }
3459
3460 switch(sk->state) {
3461 case TCP_SYN_SENT:
3462 if (!tcp_ack(sk, th, saddr, len)) {
3463 tcp_reset(daddr, saddr, th,
3464 sk->prot, opt,dev,sk->ip_tos,sk->ip_ttl);
3465 kfree_skb(skb, FREE_READ);
3466 release_sock(sk);
3467 return(0);
3468 }
3469
3470
3471
3472
3473
3474 if (!th->syn) {
3475 kfree_skb(skb, FREE_READ);
3476 release_sock(sk);
3477 return(0);
3478 }
3479
3480
3481 sk->acked_seq = th->seq+1;
3482 sk->fin_seq = th->seq;
3483 tcp_send_ack(sk->sent_seq, th->seq+1,
3484 sk, th, sk->daddr);
3485
3486 case TCP_SYN_RECV:
3487 if (!tcp_ack(sk, th, saddr, len)) {
3488 tcp_reset(daddr, saddr, th,
3489 sk->prot, opt, dev,sk->ip_tos,sk->ip_ttl);
3490 kfree_skb(skb, FREE_READ);
3491 release_sock(sk);
3492 return(0);
3493 }
3494 sk->state = TCP_ESTABLISHED;
3495
3496
3497
3498
3499
3500
3501 tcp_options(sk, th);
3502 sk->dummy_th.dest = th->source;
3503 sk->copied_seq = sk->acked_seq-1;
3504 if (!sk->dead) {
3505 sk->state_change(sk);
3506 }
3507
3508
3509
3510
3511
3512
3513
3514
3515
3516
3517 if (sk->max_window == 0) {
3518 sk->max_window = 32;
3519 sk->mss = min(sk->max_window, sk->mtu);
3520 }
3521
3522
3523
3524
3525
3526 if (th->urg) {
3527 if (tcp_urg(sk, th, saddr, len)) {
3528 kfree_skb(skb, FREE_READ);
3529 release_sock(sk);
3530 return(0);
3531 }
3532 }
3533 if (tcp_data(skb, sk, saddr, len))
3534 kfree_skb(skb, FREE_READ);
3535
3536 if (th->fin) tcp_fin(sk, th, saddr, dev);
3537 release_sock(sk);
3538 return(0);
3539 }
3540
3541 if (th->urg) {
3542 if (tcp_urg(sk, th, saddr, len)) {
3543 kfree_skb(skb, FREE_READ);
3544 release_sock(sk);
3545 return(0);
3546 }
3547 }
3548
3549 if (tcp_data(skb, sk, saddr, len)) {
3550 kfree_skb(skb, FREE_READ);
3551 release_sock(sk);
3552 return(0);
3553 }
3554
3555 if (!th->fin) {
3556 release_sock(sk);
3557 return(0);
3558 }
3559 tcp_fin(sk, th, saddr, dev);
3560 release_sock(sk);
3561 return(0);
3562 }
3563 }
3564
3565
3566
3567
3568
3569
3570 static void
3571 tcp_write_wakeup(struct sock *sk)
3572 {
3573 struct sk_buff *buff;
3574 struct tcphdr *t1;
3575 struct device *dev=NULL;
3576 int tmp;
3577
3578 if (sk->zapped)
3579 return;
3580
3581 if (sk -> state != TCP_ESTABLISHED && sk->state != TCP_CLOSE_WAIT &&
3582 sk -> state != TCP_FIN_WAIT1 && sk->state != TCP_FIN_WAIT2)
3583 return;
3584
3585 buff = sk->prot->wmalloc(sk,MAX_ACK_SIZE,1, GFP_ATOMIC);
3586 if (buff == NULL) return;
3587
3588 buff->mem_addr = buff;
3589 buff->mem_len = MAX_ACK_SIZE;
3590 buff->len = sizeof(struct tcphdr);
3591 buff->free = 1;
3592 buff->sk = sk;
3593 DPRINTF((DBG_TCP, "in tcp_write_wakeup\n"));
3594 t1 = (struct tcphdr *) buff->data;
3595
3596
3597 tmp = sk->prot->build_header(buff, sk->saddr, sk->daddr, &dev,
3598 IPPROTO_TCP, sk->opt, MAX_ACK_SIZE,sk->ip_tos,sk->ip_ttl);
3599 if (tmp < 0) {
3600 sk->prot->wfree(sk, buff->mem_addr, buff->mem_len);
3601 return;
3602 }
3603
3604 buff->len += tmp;
3605 t1 = (struct tcphdr *)((char *)t1 +tmp);
3606
3607 memcpy(t1,(void *) &sk->dummy_th, sizeof(*t1));
3608
3609
3610
3611
3612
3613 t1->seq = htonl(sk->sent_seq-1);
3614 t1->ack = 1;
3615 t1->res1= 0;
3616 t1->res2= 0;
3617 t1->rst = 0;
3618 t1->urg = 0;
3619 t1->psh = 0;
3620 t1->fin = 0;
3621 t1->syn = 0;
3622 t1->ack_seq = ntohl(sk->acked_seq);
3623 t1->window = ntohs(tcp_select_window(sk));
3624 t1->doff = sizeof(*t1)/4;
3625 tcp_send_check(t1, sk->saddr, sk->daddr, sizeof(*t1), sk);
3626
3627
3628
3629
3630 sk->prot->queue_xmit(sk, dev, buff, 1);
3631 }
3632
3633 void
3634 tcp_send_probe0(struct sock *sk)
3635 {
3636 if (sk->zapped)
3637 return;
3638
3639 tcp_write_wakeup(sk);
3640
3641 sk->backoff++;
3642 sk->rto = min(sk->rto << 1, 120*HZ);
3643 reset_timer (sk, TIME_PROBE0, sk->rto);
3644 sk->retransmits++;
3645 sk->prot->retransmits ++;
3646 }
3647
3648
3649
3650
3651
3652 int tcp_setsockopt(struct sock *sk, int level, int optname, char *optval, int optlen)
3653 {
3654 int val,err;
3655
3656 if(level!=SOL_TCP)
3657 return ip_setsockopt(sk,level,optname,optval,optlen);
3658
3659 if (optval == NULL)
3660 return(-EINVAL);
3661
3662 err=verify_area(VERIFY_READ, optval, sizeof(int));
3663 if(err)
3664 return err;
3665
3666 val = get_fs_long((unsigned long *)optval);
3667
3668 switch(optname)
3669 {
3670 case TCP_MAXSEG:
3671
3672
3673
3674
3675
3676
3677 if(val<1||val>MAX_WINDOW)
3678 return -EINVAL;
3679 sk->user_mss=val;
3680 return 0;
3681 case TCP_NODELAY:
3682 sk->nonagle=(val==0)?0:1;
3683 return 0;
3684 default:
3685 return(-ENOPROTOOPT);
3686 }
3687 }
3688
3689 int tcp_getsockopt(struct sock *sk, int level, int optname, char *optval, int *optlen)
3690 {
3691 int val,err;
3692
3693 if(level!=SOL_TCP)
3694 return ip_getsockopt(sk,level,optname,optval,optlen);
3695
3696 switch(optname)
3697 {
3698 case TCP_MAXSEG:
3699 val=sk->user_mss;
3700 break;
3701 case TCP_NODELAY:
3702 val=sk->nonagle;
3703 break;
3704 default:
3705 return(-ENOPROTOOPT);
3706 }
3707 err=verify_area(VERIFY_WRITE, optlen, sizeof(int));
3708 if(err)
3709 return err;
3710 put_fs_long(sizeof(int),(unsigned long *) optlen);
3711
3712 err=verify_area(VERIFY_WRITE, optval, sizeof(int));
3713 if(err)
3714 return err;
3715 put_fs_long(val,(unsigned long *)optval);
3716
3717 return(0);
3718 }
3719
3720
3721 struct proto tcp_prot = {
3722 sock_wmalloc,
3723 sock_rmalloc,
3724 sock_wfree,
3725 sock_rfree,
3726 sock_rspace,
3727 sock_wspace,
3728 tcp_close,
3729 tcp_read,
3730 tcp_write,
3731 tcp_sendto,
3732 tcp_recvfrom,
3733 ip_build_header,
3734 tcp_connect,
3735 tcp_accept,
3736 ip_queue_xmit,
3737 tcp_retransmit,
3738 tcp_write_wakeup,
3739 tcp_read_wakeup,
3740 tcp_rcv,
3741 tcp_select,
3742 tcp_ioctl,
3743 NULL,
3744 tcp_shutdown,
3745 tcp_setsockopt,
3746 tcp_getsockopt,
3747 128,
3748 0,
3749 {NULL,},
3750 "TCP"
3751 };