This source file includes following definitions.
- min
- __print_th
- print_th
- get_firstr
- tcp_select_window
- tcp_time_wait
- tcp_retransmit
- tcp_err
- tcp_readable
- tcp_select
- tcp_ioctl
- tcp_check
- tcp_send_check
- tcp_send_skb
- tcp_dequeue_partial
- tcp_send_partial
- tcp_enqueue_partial
- tcp_send_ack
- tcp_build_header
- tcp_write
- tcp_sendto
- tcp_read_wakeup
- cleanup_rbuf
- tcp_read_urg
- tcp_read
- tcp_shutdown
- tcp_recvfrom
- tcp_reset
- tcp_options
- default_mask
- tcp_conn_request
- tcp_close
- tcp_write_xmit
- sort_send
- tcp_ack
- tcp_data
- tcp_check_urg
- tcp_urg
- tcp_fin
- tcp_accept
- tcp_connect
- tcp_sequence
- tcp_rcv
- tcp_write_wakeup
- tcp_send_probe0
- tcp_setsockopt
- tcp_getsockopt
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88 #include <linux/types.h>
89 #include <linux/sched.h>
90 #include <linux/mm.h>
91 #include <linux/string.h>
92 #include <linux/socket.h>
93 #include <linux/sockios.h>
94 #include <linux/termios.h>
95 #include <linux/in.h>
96 #include <linux/fcntl.h>
97 #include "inet.h"
98 #include "dev.h"
99 #include "ip.h"
100 #include "protocol.h"
101 #include "icmp.h"
102 #include "tcp.h"
103 #include "skbuff.h"
104 #include "sock.h"
105 #include "arp.h"
106 #include <linux/errno.h>
107 #include <linux/timer.h>
108 #include <asm/system.h>
109 #include <asm/segment.h>
110 #include <linux/mm.h>
111
112 #define SEQ_TICK 3
113 unsigned long seq_offset;
114 #define SUBNETSARELOCAL
115
116 static __inline__ int
117 min(unsigned int a, unsigned int b)
118 {
119 if (a < b) return(a);
120 return(b);
121 }
122
123
124 static void __print_th(struct tcphdr *th)
125 {
126 unsigned char *ptr;
127
128 printk("TCP header:\n");
129 printk(" source=%d, dest=%d, seq =%ld, ack_seq = %ld\n",
130 ntohs(th->source), ntohs(th->dest),
131 ntohl(th->seq), ntohl(th->ack_seq));
132 printk(" fin=%d, syn=%d, rst=%d, psh=%d, ack=%d, urg=%d res1=%d res2=%d\n",
133 th->fin, th->syn, th->rst, th->psh, th->ack,
134 th->urg, th->res1, th->res2);
135 printk(" window = %d, check = %d urg_ptr = %d\n",
136 ntohs(th->window), ntohs(th->check), ntohs(th->urg_ptr));
137 printk(" doff = %d\n", th->doff);
138 ptr =(unsigned char *)(th + 1);
139 printk(" options = %d %d %d %d\n", ptr[0], ptr[1], ptr[2], ptr[3]);
140 }
141
142 static inline void print_th(struct tcphdr *th)
143 {
144 if (inet_debug == DBG_TCP)
145 __print_th(th);
146 }
147
148
149 static struct sk_buff *
150 get_firstr(struct sock *sk)
151 {
152 return skb_dequeue(&sk->rqueue);
153 }
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170 static int tcp_select_window(struct sock *sk)
171 {
172 int new_window = sk->prot->rspace(sk);
173
174
175
176
177
178
179
180
181
182
183 if (new_window < min(sk->mss, MAX_WINDOW/2) ||
184 new_window < sk->window)
185 return(sk->window);
186 return(new_window);
187 }
188
189
190
191 static void tcp_time_wait(struct sock *sk)
192 {
193 sk->state = TCP_TIME_WAIT;
194 sk->shutdown = SHUTDOWN_MASK;
195 if (!sk->dead)
196 sk->state_change(sk);
197 reset_timer(sk, TIME_CLOSE, TCP_TIMEWAIT_LEN);
198 }
199
200
201
202
203
204
205
206
207 static void
208 tcp_retransmit(struct sock *sk, int all)
209 {
210 if (all) {
211 ip_retransmit(sk, all);
212 return;
213 }
214
215 sk->ssthresh = sk->cong_window >> 1;
216
217 sk->cong_count = 0;
218
219 sk->cong_window = 1;
220
221
222 ip_retransmit(sk, all);
223 }
224
225
226
227
228
229
230
231
232
233
234 void
235 tcp_err(int err, unsigned char *header, unsigned long daddr,
236 unsigned long saddr, struct inet_protocol *protocol)
237 {
238 struct tcphdr *th;
239 struct sock *sk;
240 struct iphdr *iph=(struct iphdr *)header;
241
242 header+=4*iph->ihl;
243
244 DPRINTF((DBG_TCP, "TCP: tcp_err(%d, hdr=%X, daddr=%X saddr=%X, protocol=%X)\n",
245 err, header, daddr, saddr, protocol));
246
247 th =(struct tcphdr *)header;
248 sk = get_sock(&tcp_prot, th->source, daddr, th->dest, saddr);
249 print_th(th);
250
251 if (sk == NULL) return;
252
253 if(err<0)
254 {
255 sk->err = -err;
256 sk->error_report(sk);
257 return;
258 }
259
260 if ((err & 0xff00) == (ICMP_SOURCE_QUENCH << 8)) {
261
262
263
264
265
266 if (sk->cong_window > 4) sk->cong_window--;
267 return;
268 }
269
270 DPRINTF((DBG_TCP, "TCP: icmp_err got error\n"));
271 sk->err = icmp_err_convert[err & 0xff].errno;
272
273
274
275
276
277 if (icmp_err_convert[err & 0xff].fatal) {
278 if (sk->state == TCP_SYN_SENT) {
279 sk->state = TCP_CLOSE;
280 sk->error_report(sk);
281 }
282 }
283 return;
284 }
285
286
287
288
289
290
291
292 static int
293 tcp_readable(struct sock *sk)
294 {
295 unsigned long counted;
296 unsigned long amount;
297 struct sk_buff *skb;
298 int count=0;
299 int sum;
300 unsigned long flags;
301
302 DPRINTF((DBG_TCP, "tcp_readable(sk=%X)\n", sk));
303 if(sk && sk->debug)
304 printk("tcp_readable: %p - ",sk);
305
306 if (sk == NULL || skb_peek(&sk->rqueue) == NULL)
307 {
308 if(sk && sk->debug)
309 printk("empty\n");
310 return(0);
311 }
312
313 counted = sk->copied_seq+1;
314 amount = 0;
315
316 save_flags(flags);
317 cli();
318 skb =(struct sk_buff *)sk->rqueue;
319
320
321 do {
322 count++;
323 #ifdef OLD
324
325 if (count > 20) {
326 restore_flags(flags);
327 DPRINTF((DBG_TCP, "tcp_readable, more than 20 packets without a psh\n"));
328 printk("tcp_read: possible read_queue corruption.\n");
329 return(amount);
330 }
331 #endif
332 if (before(counted, skb->h.th->seq))
333 break;
334 sum = skb->len -(counted - skb->h.th->seq);
335 if (skb->h.th->syn)
336 sum++;
337 if (sum >= 0) {
338 amount += sum;
339 if (skb->h.th->syn) amount--;
340 counted += sum;
341 }
342 if (amount && skb->h.th->psh) break;
343 skb =(struct sk_buff *)skb->next;
344 } while(skb != sk->rqueue);
345 if (amount && !sk->urginline && sk->urg_data &&
346 (sk->urg_seq - sk->copied_seq) <= (counted - sk->copied_seq))
347 amount--;
348 restore_flags(flags);
349 DPRINTF((DBG_TCP, "tcp readable returning %d bytes\n", amount));
350 if(sk->debug)
351 printk("got %lu bytes.\n",amount);
352 return(amount);
353 }
354
355
356
357
358
359
360
361 static int
362 tcp_select(struct sock *sk, int sel_type, select_table *wait)
363 {
364 DPRINTF((DBG_TCP, "tcp_select(sk=%X, sel_type = %d, wait = %X)\n",
365 sk, sel_type, wait));
366
367 sk->inuse = 1;
368 switch(sel_type) {
369 case SEL_IN:
370 if(sk->debug)
371 printk("select in");
372 select_wait(sk->sleep, wait);
373 if(sk->debug)
374 printk("-select out");
375 if (skb_peek(&sk->rqueue) != NULL) {
376 if (sk->state == TCP_LISTEN || tcp_readable(sk)) {
377 release_sock(sk);
378 if(sk->debug)
379 printk("-select ok data\n");
380 return(1);
381 }
382 }
383 if (sk->err != 0)
384 {
385 release_sock(sk);
386 if(sk->debug)
387 printk("-select ok error");
388 return(1);
389 }
390 if (sk->shutdown & RCV_SHUTDOWN) {
391 release_sock(sk);
392 if(sk->debug)
393 printk("-select ok down\n");
394 return(1);
395 } else {
396 release_sock(sk);
397 if(sk->debug)
398 printk("-select fail\n");
399 return(0);
400 }
401 case SEL_OUT:
402 select_wait(sk->sleep, wait);
403 if (sk->shutdown & SEND_SHUTDOWN) {
404 DPRINTF((DBG_TCP,
405 "write select on shutdown socket.\n"));
406
407
408 release_sock(sk);
409 return(0);
410 }
411
412
413
414
415
416
417 if (sk->prot->wspace(sk) >= sk->mss) {
418 release_sock(sk);
419
420 if (sk->state == TCP_SYN_RECV ||
421 sk->state == TCP_SYN_SENT) return(0);
422 return(1);
423 }
424 DPRINTF((DBG_TCP,
425 "tcp_select: sleeping on write sk->wmem_alloc = %d, "
426 "sk->packets_out = %d\n"
427 "sk->wback = %X, sk->wfront = %X\n"
428 "sk->write_seq = %u, sk->window_seq=%u\n",
429 sk->wmem_alloc, sk->packets_out,
430 sk->wback, sk->wfront,
431 sk->write_seq, sk->window_seq));
432
433 release_sock(sk);
434 return(0);
435 case SEL_EX:
436 select_wait(sk->sleep,wait);
437 if (sk->err || sk->urg_data) {
438 release_sock(sk);
439 return(1);
440 }
441 release_sock(sk);
442 return(0);
443 }
444
445 release_sock(sk);
446 return(0);
447 }
448
449
450 int
451 tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
452 {
453 int err;
454 DPRINTF((DBG_TCP, "tcp_ioctl(sk=%X, cmd = %d, arg=%X)\n", sk, cmd, arg));
455 switch(cmd) {
456 case DDIOCSDBG:
457 return(dbg_ioctl((void *) arg, DBG_TCP));
458
459 case TIOCINQ:
460 #ifdef FIXME
461 case FIONREAD:
462 #endif
463 {
464 unsigned long amount;
465
466 if (sk->state == TCP_LISTEN) return(-EINVAL);
467
468 sk->inuse = 1;
469 amount = tcp_readable(sk);
470 release_sock(sk);
471 DPRINTF((DBG_TCP, "returning %d\n", amount));
472 err=verify_area(VERIFY_WRITE,(void *)arg,
473 sizeof(unsigned long));
474 if(err)
475 return err;
476 put_fs_long(amount,(unsigned long *)arg);
477 return(0);
478 }
479 case SIOCATMARK:
480 {
481 int answ = sk->urg_data && sk->urg_seq == sk->copied_seq+1;
482
483 err = verify_area(VERIFY_WRITE,(void *) arg,
484 sizeof(unsigned long));
485 if (err)
486 return err;
487 put_fs_long(answ,(int *) arg);
488 return(0);
489 }
490 case TIOCOUTQ:
491 {
492 unsigned long amount;
493
494 if (sk->state == TCP_LISTEN) return(-EINVAL);
495 amount = sk->prot->wspace(sk);
496 err=verify_area(VERIFY_WRITE,(void *)arg,
497 sizeof(unsigned long));
498 if(err)
499 return err;
500 put_fs_long(amount,(unsigned long *)arg);
501 return(0);
502 }
503 default:
504 return(-EINVAL);
505 }
506 }
507
508
509
510 unsigned short
511 tcp_check(struct tcphdr *th, int len,
512 unsigned long saddr, unsigned long daddr)
513 {
514 unsigned long sum;
515
516 if (saddr == 0) saddr = my_addr();
517 print_th(th);
518 __asm__("\t addl %%ecx,%%ebx\n"
519 "\t adcl %%edx,%%ebx\n"
520 "\t adcl $0, %%ebx\n"
521 : "=b"(sum)
522 : "0"(daddr), "c"(saddr), "d"((ntohs(len) << 16) + IPPROTO_TCP*256)
523 : "cx","bx","dx" );
524
525 if (len > 3) {
526 __asm__("\tclc\n"
527 "1:\n"
528 "\t lodsl\n"
529 "\t adcl %%eax, %%ebx\n"
530 "\t loop 1b\n"
531 "\t adcl $0, %%ebx\n"
532 : "=b"(sum) , "=S"(th)
533 : "0"(sum), "c"(len/4) ,"1"(th)
534 : "ax", "cx", "bx", "si" );
535 }
536
537
538 __asm__("\t movl %%ebx, %%ecx\n"
539 "\t shrl $16,%%ecx\n"
540 "\t addw %%cx, %%bx\n"
541 "\t adcw $0, %%bx\n"
542 : "=b"(sum)
543 : "0"(sum)
544 : "bx", "cx");
545
546
547 if ((len & 2) != 0) {
548 __asm__("\t lodsw\n"
549 "\t addw %%ax,%%bx\n"
550 "\t adcw $0, %%bx\n"
551 : "=b"(sum), "=S"(th)
552 : "0"(sum) ,"1"(th)
553 : "si", "ax", "bx");
554 }
555
556
557 if ((len & 1) != 0) {
558 __asm__("\t lodsb\n"
559 "\t movb $0,%%ah\n"
560 "\t addw %%ax,%%bx\n"
561 "\t adcw $0, %%bx\n"
562 : "=b"(sum)
563 : "0"(sum) ,"S"(th)
564 : "si", "ax", "bx");
565 }
566
567
568 return((~sum) & 0xffff);
569 }
570
571
572 void tcp_send_check(struct tcphdr *th, unsigned long saddr,
573 unsigned long daddr, int len, struct sock *sk)
574 {
575 th->check = 0;
576 th->check = tcp_check(th, len, saddr, daddr);
577 return;
578 }
579
580 static void tcp_send_skb(struct sock *sk, struct sk_buff *skb)
581 {
582 int size;
583 struct tcphdr * th = skb->h.th;
584
585
586 size = skb->len - ((unsigned char *) th - skb->data);
587
588
589 if (size < sizeof(struct tcphdr) || size > skb->len) {
590 printk("tcp_send_skb: bad skb (skb = %p, data = %p, th = %p, len = %lu)\n",
591 skb, skb->data, th, skb->len);
592 kfree_skb(skb, FREE_WRITE);
593 return;
594 }
595
596
597 if (size == sizeof(struct tcphdr)) {
598
599 if(!th->syn && !th->fin) {
600 printk("tcp_send_skb: attempt to queue a bogon.\n");
601 kfree_skb(skb,FREE_WRITE);
602 return;
603 }
604 }
605
606
607 tcp_send_check(th, sk->saddr, sk->daddr, size, sk);
608
609 skb->h.seq = ntohl(th->seq) + size - 4*th->doff;
610 if (after(skb->h.seq, sk->window_seq) ||
611 (sk->retransmits && sk->timeout == TIME_WRITE) ||
612 sk->packets_out >= sk->cong_window) {
613 DPRINTF((DBG_TCP, "sk->cong_window = %d, sk->packets_out = %d\n",
614 sk->cong_window, sk->packets_out));
615 DPRINTF((DBG_TCP, "sk->write_seq = %d, sk->window_seq = %d\n",
616 sk->write_seq, sk->window_seq));
617 skb->next = NULL;
618 skb->magic = TCP_WRITE_QUEUE_MAGIC;
619 if (sk->wback == NULL) {
620 sk->wfront = skb;
621 } else {
622 sk->wback->next = skb;
623 }
624 sk->wback = skb;
625 if (before(sk->window_seq, sk->wfront->h.seq) &&
626 sk->send_head == NULL &&
627 sk->ack_backlog == 0)
628 reset_timer(sk, TIME_PROBE0, sk->rto);
629 } else {
630 sk->sent_seq = sk->write_seq;
631 sk->prot->queue_xmit(sk, skb->dev, skb, 0);
632 }
633 }
634
635 struct sk_buff * tcp_dequeue_partial(struct sock * sk)
636 {
637 struct sk_buff * skb;
638 unsigned long flags;
639
640 save_flags(flags);
641 cli();
642 skb = sk->partial;
643 if (skb) {
644 sk->partial = NULL;
645 del_timer(&sk->partial_timer);
646 }
647 restore_flags(flags);
648 return skb;
649 }
650
651 static void tcp_send_partial(struct sock *sk)
652 {
653 struct sk_buff *skb;
654
655 if (sk == NULL)
656 return;
657 while ((skb = tcp_dequeue_partial(sk)) != NULL)
658 tcp_send_skb(sk, skb);
659 }
660
661 void tcp_enqueue_partial(struct sk_buff * skb, struct sock * sk)
662 {
663 struct sk_buff * tmp;
664 unsigned long flags;
665
666 save_flags(flags);
667 cli();
668 tmp = sk->partial;
669 if (tmp)
670 del_timer(&sk->partial_timer);
671 sk->partial = skb;
672 sk->partial_timer.expires = HZ;
673 sk->partial_timer.function = (void (*)(unsigned long)) tcp_send_partial;
674 sk->partial_timer.data = (unsigned long) sk;
675 add_timer(&sk->partial_timer);
676 restore_flags(flags);
677 if (tmp)
678 tcp_send_skb(sk, tmp);
679 }
680
681
682
683 static void
684 tcp_send_ack(unsigned long sequence, unsigned long ack,
685 struct sock *sk,
686 struct tcphdr *th, unsigned long daddr)
687 {
688 struct sk_buff *buff;
689 struct tcphdr *t1;
690 struct device *dev = NULL;
691 int tmp;
692
693 if(sk->zapped)
694 return;
695
696
697
698
699 buff = sk->prot->wmalloc(sk, MAX_ACK_SIZE, 1, GFP_ATOMIC);
700 if (buff == NULL) {
701
702 sk->ack_backlog++;
703 if (sk->timeout != TIME_WRITE && tcp_connected(sk->state)) {
704 reset_timer(sk, TIME_WRITE, 10);
705 }
706 if (inet_debug == DBG_SLIP) printk("\rtcp_ack: malloc failed\n");
707 return;
708 }
709
710 buff->mem_addr = buff;
711 buff->mem_len = MAX_ACK_SIZE;
712 buff->len = sizeof(struct tcphdr);
713 buff->sk = sk;
714 t1 =(struct tcphdr *) buff->data;
715
716
717 tmp = sk->prot->build_header(buff, sk->saddr, daddr, &dev,
718 IPPROTO_TCP, sk->opt, MAX_ACK_SIZE,sk->ip_tos,sk->ip_ttl);
719 if (tmp < 0) {
720 buff->free=1;
721 sk->prot->wfree(sk, buff->mem_addr, buff->mem_len);
722 if (inet_debug == DBG_SLIP) printk("\rtcp_ack: build_header failed\n");
723 return;
724 }
725 buff->len += tmp;
726 t1 =(struct tcphdr *)((char *)t1 +tmp);
727
728
729 memcpy(t1, th, sizeof(*t1));
730
731
732 t1->dest = th->source;
733 t1->source = th->dest;
734 t1->seq = ntohl(sequence);
735 t1->ack = 1;
736 sk->window = tcp_select_window(sk);
737 t1->window = ntohs(sk->window);
738 t1->res1 = 0;
739 t1->res2 = 0;
740 t1->rst = 0;
741 t1->urg = 0;
742 t1->syn = 0;
743 t1->psh = 0;
744 t1->fin = 0;
745 if (ack == sk->acked_seq) {
746 sk->ack_backlog = 0;
747 sk->bytes_rcv = 0;
748 sk->ack_timed = 0;
749 if (sk->send_head == NULL && sk->wfront == NULL && sk->timeout == TIME_WRITE)
750 {
751 if(sk->keepopen)
752 reset_timer(sk,TIME_KEEPOPEN,TCP_TIMEOUT_LEN);
753 else
754 delete_timer(sk);
755 }
756 }
757 t1->ack_seq = ntohl(ack);
758 t1->doff = sizeof(*t1)/4;
759 tcp_send_check(t1, sk->saddr, daddr, sizeof(*t1), sk);
760 if (sk->debug)
761 printk("\rtcp_ack: seq %lx ack %lx\n", sequence, ack);
762 sk->prot->queue_xmit(sk, dev, buff, 1);
763 }
764
765
766
767 static int
768 tcp_build_header(struct tcphdr *th, struct sock *sk, int push)
769 {
770
771
772 memcpy(th,(void *) &(sk->dummy_th), sizeof(*th));
773 th->seq = htonl(sk->write_seq);
774 th->psh =(push == 0) ? 1 : 0;
775 th->doff = sizeof(*th)/4;
776 th->ack = 1;
777 th->fin = 0;
778 sk->ack_backlog = 0;
779 sk->bytes_rcv = 0;
780 sk->ack_timed = 0;
781 th->ack_seq = htonl(sk->acked_seq);
782 sk->window = tcp_select_window(sk);
783 th->window = htons(sk->window);
784
785 return(sizeof(*th));
786 }
787
788
789
790
791
792 static int
793 tcp_write(struct sock *sk, unsigned char *from,
794 int len, int nonblock, unsigned flags)
795 {
796 int copied = 0;
797 int copy;
798 int tmp;
799 struct sk_buff *skb;
800 struct sk_buff *send_tmp;
801 unsigned char *buff;
802 struct proto *prot;
803 struct device *dev = NULL;
804
805 DPRINTF((DBG_TCP, "tcp_write(sk=%X, from=%X, len=%d, nonblock=%d, flags=%X)\n",
806 sk, from, len, nonblock, flags));
807
808 sk->inuse=1;
809 prot = sk->prot;
810 while(len > 0) {
811 if (sk->err) {
812 release_sock(sk);
813 if (copied) return(copied);
814 tmp = -sk->err;
815 sk->err = 0;
816 return(tmp);
817 }
818
819
820 if (sk->shutdown & SEND_SHUTDOWN) {
821 release_sock(sk);
822 sk->err = EPIPE;
823 if (copied) return(copied);
824 sk->err = 0;
825 return(-EPIPE);
826 }
827
828
829
830
831 while(sk->state != TCP_ESTABLISHED && sk->state != TCP_CLOSE_WAIT) {
832 if (sk->err) {
833 release_sock(sk);
834 if (copied) return(copied);
835 tmp = -sk->err;
836 sk->err = 0;
837 return(tmp);
838 }
839
840 if (sk->state != TCP_SYN_SENT && sk->state != TCP_SYN_RECV) {
841 release_sock(sk);
842 DPRINTF((DBG_TCP, "tcp_write: return 1\n"));
843 if (copied) return(copied);
844
845 if (sk->err) {
846 tmp = -sk->err;
847 sk->err = 0;
848 return(tmp);
849 }
850
851 if (sk->keepopen) {
852 send_sig(SIGPIPE, current, 0);
853 }
854 return(-EPIPE);
855 }
856
857 if (nonblock || copied) {
858 release_sock(sk);
859 DPRINTF((DBG_TCP, "tcp_write: return 2\n"));
860 if (copied) return(copied);
861 return(-EAGAIN);
862 }
863
864 release_sock(sk);
865 cli();
866 if (sk->state != TCP_ESTABLISHED &&
867 sk->state != TCP_CLOSE_WAIT && sk->err == 0) {
868 interruptible_sleep_on(sk->sleep);
869 if (current->signal & ~current->blocked) {
870 sti();
871 DPRINTF((DBG_TCP, "tcp_write: return 3\n"));
872 if (copied) return(copied);
873 return(-ERESTARTSYS);
874 }
875 }
876 sk->inuse = 1;
877 sti();
878 }
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893 if ((skb = tcp_dequeue_partial(sk)) != NULL) {
894 int hdrlen;
895
896
897 hdrlen = ((unsigned long)skb->h.th - (unsigned long)skb->data)
898 + sizeof(struct tcphdr);
899
900
901 if (!(flags & MSG_OOB)) {
902 copy = min(sk->mss - (skb->len - hdrlen), len);
903
904 if (copy <= 0) {
905 printk("TCP: **bug**: \"copy\" <= 0!!\n");
906 copy = 0;
907 }
908
909 memcpy_fromfs(skb->data + skb->len, from, copy);
910 skb->len += copy;
911 from += copy;
912 copied += copy;
913 len -= copy;
914 sk->write_seq += copy;
915 }
916 if ((skb->len - hdrlen) >= sk->mss ||
917 (flags & MSG_OOB) ||
918 !sk->packets_out)
919 tcp_send_skb(sk, skb);
920 else
921 tcp_enqueue_partial(skb, sk);
922 continue;
923 }
924
925
926
927
928
929
930
931
932
933
934
935
936
937 copy = sk->window_seq - sk->write_seq;
938 if (copy <= 0 || copy < (sk->max_window >> 1) || copy > sk->mss)
939 copy = sk->mss;
940 if (copy > len)
941 copy = len;
942
943
944 send_tmp = NULL;
945 if (copy < sk->mss && !(flags & MSG_OOB)) {
946
947 release_sock(sk);
948
949
950 skb = prot->wmalloc(sk, sk->mtu + 128 + prot->max_header + sizeof(*skb), 0, GFP_KERNEL);
951 sk->inuse = 1;
952 send_tmp = skb;
953 } else {
954
955 release_sock(sk);
956 skb = prot->wmalloc(sk, copy + prot->max_header + sizeof(*skb), 0, GFP_KERNEL);
957 sk->inuse = 1;
958 }
959
960
961 if (skb == NULL) {
962 if (nonblock ) {
963 release_sock(sk);
964 DPRINTF((DBG_TCP, "tcp_write: return 4\n"));
965 if (copied) return(copied);
966 return(-EAGAIN);
967 }
968
969
970 tmp = sk->wmem_alloc;
971 release_sock(sk);
972 cli();
973
974 if (tmp <= sk->wmem_alloc &&
975 (sk->state == TCP_ESTABLISHED||sk->state == TCP_CLOSE_WAIT)
976 && sk->err == 0) {
977 interruptible_sleep_on(sk->sleep);
978 if (current->signal & ~current->blocked) {
979 sti();
980 DPRINTF((DBG_TCP, "tcp_write: return 5\n"));
981 if (copied) return(copied);
982 return(-ERESTARTSYS);
983 }
984 }
985 sk->inuse = 1;
986 sti();
987 continue;
988 }
989
990 skb->len = 0;
991 skb->sk = sk;
992 skb->free = 0;
993
994 buff = skb->data;
995
996
997
998
999
1000 tmp = prot->build_header(skb, sk->saddr, sk->daddr, &dev,
1001 IPPROTO_TCP, sk->opt, skb->mem_len,sk->ip_tos,sk->ip_ttl);
1002 if (tmp < 0 ) {
1003 prot->wfree(sk, skb->mem_addr, skb->mem_len);
1004 release_sock(sk);
1005 DPRINTF((DBG_TCP, "tcp_write: return 6\n"));
1006 if (copied) return(copied);
1007 return(tmp);
1008 }
1009 skb->len += tmp;
1010 skb->dev = dev;
1011 buff += tmp;
1012 skb->h.th =(struct tcphdr *) buff;
1013 tmp = tcp_build_header((struct tcphdr *)buff, sk, len-copy);
1014 if (tmp < 0) {
1015 prot->wfree(sk, skb->mem_addr, skb->mem_len);
1016 release_sock(sk);
1017 DPRINTF((DBG_TCP, "tcp_write: return 7\n"));
1018 if (copied) return(copied);
1019 return(tmp);
1020 }
1021
1022 if (flags & MSG_OOB) {
1023 ((struct tcphdr *)buff)->urg = 1;
1024 ((struct tcphdr *)buff)->urg_ptr = ntohs(copy);
1025 }
1026 skb->len += tmp;
1027 memcpy_fromfs(buff+tmp, from, copy);
1028
1029 from += copy;
1030 copied += copy;
1031 len -= copy;
1032 skb->len += copy;
1033 skb->free = 0;
1034 sk->write_seq += copy;
1035
1036 if (send_tmp != NULL && sk->packets_out) {
1037 tcp_enqueue_partial(send_tmp, sk);
1038 continue;
1039 }
1040 tcp_send_skb(sk, skb);
1041 }
1042 sk->err = 0;
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052 if(sk->partial &&
1053 ((!sk->packets_out)
1054
1055 || (sk->nonagle && before(sk->write_seq , sk->window_seq))
1056 ))
1057 tcp_send_partial(sk);
1058
1059 release_sock(sk);
1060 DPRINTF((DBG_TCP, "tcp_write: return 8\n"));
1061 return(copied);
1062 }
1063
1064
1065 static int
1066 tcp_sendto(struct sock *sk, unsigned char *from,
1067 int len, int nonblock, unsigned flags,
1068 struct sockaddr_in *addr, int addr_len)
1069 {
1070 struct sockaddr_in sin;
1071
1072 if (addr_len < sizeof(sin)) return(-EINVAL);
1073 memcpy_fromfs(&sin, addr, sizeof(sin));
1074 if (sin.sin_family && sin.sin_family != AF_INET) return(-EINVAL);
1075 if (sin.sin_port != sk->dummy_th.dest) return(-EINVAL);
1076 if (sin.sin_addr.s_addr != sk->daddr) return(-EINVAL);
1077 return(tcp_write(sk, from, len, nonblock, flags));
1078 }
1079
1080
1081 static void
1082 tcp_read_wakeup(struct sock *sk)
1083 {
1084 int tmp;
1085 struct device *dev = NULL;
1086 struct tcphdr *t1;
1087 struct sk_buff *buff;
1088
1089 DPRINTF((DBG_TCP, "in tcp read wakeup\n"));
1090 if (!sk->ack_backlog) return;
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102 buff = sk->prot->wmalloc(sk,MAX_ACK_SIZE,1, GFP_ATOMIC);
1103 if (buff == NULL) {
1104
1105 reset_timer(sk, TIME_WRITE, 10);
1106 return;
1107 }
1108
1109 buff->mem_addr = buff;
1110 buff->mem_len = MAX_ACK_SIZE;
1111 buff->len = sizeof(struct tcphdr);
1112 buff->sk = sk;
1113
1114
1115 tmp = sk->prot->build_header(buff, sk->saddr, sk->daddr, &dev,
1116 IPPROTO_TCP, sk->opt, MAX_ACK_SIZE,sk->ip_tos,sk->ip_ttl);
1117 if (tmp < 0) {
1118 buff->free=1;
1119 sk->prot->wfree(sk, buff->mem_addr, buff->mem_len);
1120 return;
1121 }
1122
1123 buff->len += tmp;
1124 t1 =(struct tcphdr *)(buff->data +tmp);
1125
1126 memcpy(t1,(void *) &sk->dummy_th, sizeof(*t1));
1127 t1->seq = htonl(sk->sent_seq);
1128 t1->ack = 1;
1129 t1->res1 = 0;
1130 t1->res2 = 0;
1131 t1->rst = 0;
1132 t1->urg = 0;
1133 t1->syn = 0;
1134 t1->psh = 0;
1135 sk->ack_backlog = 0;
1136 sk->bytes_rcv = 0;
1137 sk->window = tcp_select_window(sk);
1138 t1->window = ntohs(sk->window);
1139 t1->ack_seq = ntohl(sk->acked_seq);
1140 t1->doff = sizeof(*t1)/4;
1141 tcp_send_check(t1, sk->saddr, sk->daddr, sizeof(*t1), sk);
1142 sk->prot->queue_xmit(sk, dev, buff, 1);
1143 }
1144
1145
1146
1147
1148
1149
1150
1151
1152 static void
1153 cleanup_rbuf(struct sock *sk)
1154 {
1155 unsigned long flags;
1156 int left;
1157 struct sk_buff *skb;
1158
1159 if(sk->debug)
1160 printk("cleaning rbuf for sk=%p\n", sk);
1161
1162 save_flags(flags);
1163 cli();
1164
1165 left = sk->prot->rspace(sk);
1166
1167
1168
1169
1170
1171 while((skb=skb_peek(&sk->rqueue)) != NULL )
1172 {
1173 if (!skb->used)
1174 break;
1175 skb_unlink(skb);
1176 skb->sk = sk;
1177 kfree_skb(skb, FREE_READ);
1178 }
1179
1180 restore_flags(flags);
1181
1182
1183
1184
1185
1186
1187
1188 DPRINTF((DBG_TCP, "sk->window left = %d, sk->prot->rspace(sk)=%d\n",
1189 sk->window - sk->bytes_rcv, sk->prot->rspace(sk)));
1190
1191 if(sk->debug)
1192 printk("sk->rspace = %lu, was %d\n", sk->prot->rspace(sk),
1193 left);
1194 if (sk->prot->rspace(sk) != left)
1195 {
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206 sk->ack_backlog++;
1207
1208
1209
1210
1211
1212
1213
1214
1215 if ((sk->prot->rspace(sk) > (sk->window - sk->bytes_rcv + sk->mtu))) {
1216
1217 tcp_read_wakeup(sk);
1218 } else {
1219
1220 int was_active = del_timer(&sk->timer);
1221 if (!was_active || TCP_ACK_TIME < sk->timer.expires) {
1222 reset_timer(sk, TIME_WRITE, TCP_ACK_TIME);
1223 } else
1224 add_timer(&sk->timer);
1225 }
1226 }
1227 }
1228
1229
1230
1231 static int
1232 tcp_read_urg(struct sock * sk, int nonblock,
1233 unsigned char *to, int len, unsigned flags)
1234 {
1235 struct wait_queue wait = { current, NULL };
1236
1237 while (len > 0) {
1238 if (sk->urginline || !sk->urg_data || sk->urg_data == URG_READ)
1239 return -EINVAL;
1240 if (sk->urg_data & URG_VALID) {
1241 char c = sk->urg_data;
1242 if (!(flags & MSG_PEEK))
1243 sk->urg_data = URG_READ;
1244 put_fs_byte(c, to);
1245 return 1;
1246 }
1247
1248 if (sk->err) {
1249 int tmp = -sk->err;
1250 sk->err = 0;
1251 return tmp;
1252 }
1253
1254 if (sk->state == TCP_CLOSE || sk->done) {
1255 if (!sk->done) {
1256 sk->done = 1;
1257 return 0;
1258 }
1259 return -ENOTCONN;
1260 }
1261
1262 if (sk->shutdown & RCV_SHUTDOWN) {
1263 sk->done = 1;
1264 return 0;
1265 }
1266
1267 if (nonblock)
1268 return -EAGAIN;
1269
1270 if (current->signal & ~current->blocked)
1271 return -ERESTARTSYS;
1272
1273 current->state = TASK_INTERRUPTIBLE;
1274 add_wait_queue(sk->sleep, &wait);
1275 if ((sk->urg_data & URG_NOTYET) && sk->err == 0 &&
1276 !(sk->shutdown & RCV_SHUTDOWN))
1277 schedule();
1278 remove_wait_queue(sk->sleep, &wait);
1279 current->state = TASK_RUNNING;
1280 }
1281 return 0;
1282 }
1283
1284
1285
1286 static int tcp_read(struct sock *sk, unsigned char *to,
1287 int len, int nonblock, unsigned flags)
1288 {
1289 struct wait_queue wait = { current, NULL };
1290 int copied = 0;
1291 unsigned long peek_seq;
1292 unsigned long *seq;
1293 unsigned long used;
1294 int err;
1295
1296 if (len == 0)
1297 return 0;
1298
1299 if (len < 0)
1300 return -EINVAL;
1301
1302 err = verify_area(VERIFY_WRITE, to, len);
1303 if (err)
1304 return err;
1305
1306
1307 if (sk->state == TCP_LISTEN)
1308 return -ENOTCONN;
1309
1310
1311 if (flags & MSG_OOB)
1312 return tcp_read_urg(sk, nonblock, to, len, flags);
1313
1314 peek_seq = sk->copied_seq;
1315 seq = &sk->copied_seq;
1316 if (flags & MSG_PEEK)
1317 seq = &peek_seq;
1318
1319 add_wait_queue(sk->sleep, &wait);
1320 sk->inuse = 1;
1321 while (len > 0) {
1322 struct sk_buff * skb;
1323 unsigned long offset;
1324
1325
1326
1327
1328 if (copied && sk->urg_data && sk->urg_seq == 1+*seq)
1329 break;
1330
1331 current->state = TASK_INTERRUPTIBLE;
1332
1333 skb = sk->rqueue;
1334 do {
1335 if (!skb)
1336 break;
1337 if (before(1+*seq, skb->h.th->seq))
1338 break;
1339 offset = 1 + *seq - skb->h.th->seq;
1340 if (skb->h.th->syn)
1341 offset--;
1342 if (offset < skb->len)
1343 goto found_ok_skb;
1344 if (!(flags & MSG_PEEK))
1345 skb->used = 1;
1346 skb = (struct sk_buff *)skb->next;
1347 } while (skb != sk->rqueue);
1348
1349 if (copied)
1350 break;
1351
1352 if (sk->err) {
1353 copied = -sk->err;
1354 sk->err = 0;
1355 break;
1356 }
1357
1358 if (sk->state == TCP_CLOSE) {
1359 if (!sk->done) {
1360 sk->done = 1;
1361 break;
1362 }
1363 copied = -ENOTCONN;
1364 break;
1365 }
1366
1367 if (sk->shutdown & RCV_SHUTDOWN) {
1368 sk->done = 1;
1369 break;
1370 }
1371
1372 if (nonblock) {
1373 copied = -EAGAIN;
1374 break;
1375 }
1376
1377 cleanup_rbuf(sk);
1378 release_sock(sk);
1379 schedule();
1380 sk->inuse = 1;
1381
1382 if (current->signal & ~current->blocked) {
1383 copied = -ERESTARTSYS;
1384 break;
1385 }
1386 continue;
1387
1388 found_ok_skb:
1389
1390 used = skb->len - offset;
1391 if (len < used)
1392 used = len;
1393
1394 if (sk->urg_data) {
1395 unsigned long urg_offset = sk->urg_seq - (1 + *seq);
1396 if (urg_offset < used) {
1397 if (!urg_offset) {
1398 if (!sk->urginline) {
1399 ++*seq;
1400 offset++;
1401 used--;
1402 }
1403 } else
1404 used = urg_offset;
1405 }
1406 }
1407
1408 memcpy_tofs(to,((unsigned char *)skb->h.th) +
1409 skb->h.th->doff*4 + offset, used);
1410 copied += used;
1411 len -= used;
1412 to += used;
1413 *seq += used;
1414 if (after(sk->copied_seq+1,sk->urg_seq))
1415 sk->urg_data = 0;
1416 if (!(flags & MSG_PEEK) && (used + offset >= skb->len))
1417 skb->used = 1;
1418 }
1419 remove_wait_queue(sk->sleep, &wait);
1420 current->state = TASK_RUNNING;
1421
1422
1423 cleanup_rbuf(sk);
1424 release_sock(sk);
1425 DPRINTF((DBG_TCP, "tcp_read: returning %d\n", copied));
1426 return copied;
1427 }
1428
1429
1430
1431
1432
1433
1434 void
1435 tcp_shutdown(struct sock *sk, int how)
1436 {
1437 struct sk_buff *buff;
1438 struct tcphdr *t1, *th;
1439 struct proto *prot;
1440 int tmp;
1441 struct device *dev = NULL;
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451 if (sk->state == TCP_FIN_WAIT1 || sk->state == TCP_FIN_WAIT2) return;
1452 if (!(how & SEND_SHUTDOWN)) return;
1453 sk->inuse = 1;
1454
1455
1456 if (sk->partial)
1457 tcp_send_partial(sk);
1458
1459 prot =(struct proto *)sk->prot;
1460 th =(struct tcphdr *)&sk->dummy_th;
1461 release_sock(sk);
1462 buff = prot->wmalloc(sk, MAX_RESET_SIZE,1 , GFP_KERNEL);
1463 if (buff == NULL) return;
1464 sk->inuse = 1;
1465
1466 DPRINTF((DBG_TCP, "tcp_shutdown_send buff = %X\n", buff));
1467 buff->mem_addr = buff;
1468 buff->mem_len = MAX_RESET_SIZE;
1469 buff->sk = sk;
1470 buff->len = sizeof(*t1);
1471 t1 =(struct tcphdr *) buff->data;
1472
1473
1474 tmp = prot->build_header(buff,sk->saddr, sk->daddr, &dev,
1475 IPPROTO_TCP, sk->opt,
1476 sizeof(struct tcphdr),sk->ip_tos,sk->ip_ttl);
1477 if (tmp < 0) {
1478
1479 buff->free=1;
1480 prot->wfree(sk,buff->mem_addr, buff->mem_len);
1481 if(sk->state==TCP_ESTABLISHED)
1482 sk->state=TCP_FIN_WAIT1;
1483 else
1484 sk->state=TCP_FIN_WAIT2;
1485 release_sock(sk);
1486 DPRINTF((DBG_TCP, "Unable to build header for fin.\n"));
1487 return;
1488 }
1489
1490 t1 =(struct tcphdr *)((char *)t1 +tmp);
1491 buff->len += tmp;
1492 buff->dev = dev;
1493 memcpy(t1, th, sizeof(*t1));
1494 t1->seq = ntohl(sk->write_seq);
1495 sk->write_seq++;
1496 buff->h.seq = sk->write_seq;
1497 t1->ack = 1;
1498 t1->ack_seq = ntohl(sk->acked_seq);
1499 t1->window = ntohs(sk->window=tcp_select_window(sk));
1500 t1->fin = 1;
1501 t1->rst = 0;
1502 t1->doff = sizeof(*t1)/4;
1503 tcp_send_check(t1, sk->saddr, sk->daddr, sizeof(*t1), sk);
1504
1505
1506
1507
1508
1509 if (sk->wback != NULL) {
1510 buff->free=0;
1511 buff->next = NULL;
1512 sk->wback->next = buff;
1513 sk->wback = buff;
1514 buff->magic = TCP_WRITE_QUEUE_MAGIC;
1515 } else {
1516 sk->sent_seq = sk->write_seq;
1517 sk->prot->queue_xmit(sk, dev, buff, 0);
1518 }
1519
1520 if (sk->state == TCP_ESTABLISHED) sk->state = TCP_FIN_WAIT1;
1521 else sk->state = TCP_FIN_WAIT2;
1522
1523 release_sock(sk);
1524 }
1525
1526
1527 static int
1528 tcp_recvfrom(struct sock *sk, unsigned char *to,
1529 int to_len, int nonblock, unsigned flags,
1530 struct sockaddr_in *addr, int *addr_len)
1531 {
1532 struct sockaddr_in sin;
1533 int len;
1534 int err;
1535 int result;
1536
1537
1538
1539
1540 err = verify_area(VERIFY_WRITE,addr_len,sizeof(long));
1541 if(err)
1542 return err;
1543 len = get_fs_long(addr_len);
1544 if(len > sizeof(sin))
1545 len = sizeof(sin);
1546 err=verify_area(VERIFY_WRITE, addr, len);
1547 if(err)
1548 return err;
1549
1550 result=tcp_read(sk, to, to_len, nonblock, flags);
1551
1552 if (result < 0) return(result);
1553
1554 sin.sin_family = AF_INET;
1555 sin.sin_port = sk->dummy_th.dest;
1556 sin.sin_addr.s_addr = sk->daddr;
1557
1558 memcpy_tofs(addr, &sin, len);
1559 put_fs_long(len, addr_len);
1560 return(result);
1561 }
1562
1563
1564
1565 static void
1566 tcp_reset(unsigned long saddr, unsigned long daddr, struct tcphdr *th,
1567 struct proto *prot, struct options *opt, struct device *dev, int tos, int ttl)
1568 {
1569 struct sk_buff *buff;
1570 struct tcphdr *t1;
1571 int tmp;
1572
1573
1574
1575
1576
1577 buff = prot->wmalloc(NULL, MAX_RESET_SIZE, 1, GFP_ATOMIC);
1578 if (buff == NULL)
1579 return;
1580
1581 DPRINTF((DBG_TCP, "tcp_reset buff = %X\n", buff));
1582 buff->mem_addr = buff;
1583 buff->mem_len = MAX_RESET_SIZE;
1584 buff->len = sizeof(*t1);
1585 buff->sk = NULL;
1586 buff->dev = dev;
1587
1588 t1 =(struct tcphdr *) buff->data;
1589
1590
1591 tmp = prot->build_header(buff, saddr, daddr, &dev, IPPROTO_TCP, opt,
1592 sizeof(struct tcphdr),tos,ttl);
1593 if (tmp < 0) {
1594 buff->free = 1;
1595 prot->wfree(NULL, buff->mem_addr, buff->mem_len);
1596 return;
1597 }
1598 t1 =(struct tcphdr *)((char *)t1 +tmp);
1599 buff->len += tmp;
1600 memcpy(t1, th, sizeof(*t1));
1601
1602
1603 t1->dest = th->source;
1604 t1->source = th->dest;
1605 t1->rst = 1;
1606 t1->window = 0;
1607
1608 if(th->ack)
1609 {
1610 t1->ack = 0;
1611 t1->seq = th->ack_seq;
1612 t1->ack_seq = 0;
1613 }
1614 else
1615 {
1616 t1->ack = 1;
1617 if(!th->syn)
1618 t1->ack_seq=htonl(th->seq);
1619 else
1620 t1->ack_seq=htonl(th->seq+1);
1621 t1->seq=0;
1622 }
1623
1624 t1->syn = 0;
1625 t1->urg = 0;
1626 t1->fin = 0;
1627 t1->psh = 0;
1628 t1->doff = sizeof(*t1)/4;
1629 tcp_send_check(t1, saddr, daddr, sizeof(*t1), NULL);
1630 prot->queue_xmit(NULL, dev, buff, 1);
1631 }
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642 static void
1643 tcp_options(struct sock *sk, struct tcphdr *th)
1644 {
1645 unsigned char *ptr;
1646 int length=(th->doff*4)-sizeof(struct tcphdr);
1647 int mss_seen = 0;
1648
1649 ptr = (unsigned char *)(th + 1);
1650
1651 while(length>0)
1652 {
1653 int opcode=*ptr++;
1654 int opsize=*ptr++;
1655 switch(opcode)
1656 {
1657 case TCPOPT_EOL:
1658 return;
1659 case TCPOPT_NOP:
1660 length-=2;
1661 continue;
1662
1663 default:
1664 if(opsize<=2)
1665 return;
1666 switch(opcode)
1667 {
1668 case TCPOPT_MSS:
1669 if(opsize==4 && th->syn)
1670 {
1671 sk->mtu=min(sk->mtu,ntohs(*(unsigned short *)ptr));
1672 mss_seen = 1;
1673 }
1674 break;
1675
1676 }
1677 ptr+=opsize-2;
1678 length-=opsize;
1679 }
1680 }
1681 if (th->syn) {
1682 if (! mss_seen)
1683 sk->mtu=min(sk->mtu, 536);
1684 }
1685 sk->mss = min(sk->max_window, sk->mtu);
1686 }
1687
1688 static inline unsigned long default_mask(unsigned long dst)
1689 {
1690 dst = ntohl(dst);
1691 if (IN_CLASSA(dst))
1692 return htonl(IN_CLASSA_NET);
1693 if (IN_CLASSB(dst))
1694 return htonl(IN_CLASSB_NET);
1695 return htonl(IN_CLASSC_NET);
1696 }
1697
1698
1699
1700
1701
1702
1703
1704
1705 static void
1706 tcp_conn_request(struct sock *sk, struct sk_buff *skb,
1707 unsigned long daddr, unsigned long saddr,
1708 struct options *opt, struct device *dev)
1709 {
1710 struct sk_buff *buff;
1711 struct tcphdr *t1;
1712 unsigned char *ptr;
1713 struct sock *newsk;
1714 struct tcphdr *th;
1715 int tmp;
1716
1717 DPRINTF((DBG_TCP, "tcp_conn_request(sk = %X, skb = %X, daddr = %X, sadd4= %X, \n"
1718 " opt = %X, dev = %X)\n",
1719 sk, skb, daddr, saddr, opt, dev));
1720
1721 th = skb->h.th;
1722
1723
1724 if (!sk->dead) {
1725 sk->data_ready(sk,0);
1726 } else {
1727 DPRINTF((DBG_TCP, "tcp_conn_request on dead socket\n"));
1728 tcp_reset(daddr, saddr, th, sk->prot, opt, dev, sk->ip_tos,sk->ip_ttl);
1729 kfree_skb(skb, FREE_READ);
1730 return;
1731 }
1732
1733
1734
1735
1736
1737 if (sk->ack_backlog >= sk->max_ack_backlog) {
1738 kfree_skb(skb, FREE_READ);
1739 return;
1740 }
1741
1742
1743
1744
1745
1746
1747
1748
1749 newsk = (struct sock *) kmalloc(sizeof(struct sock), GFP_ATOMIC);
1750 if (newsk == NULL) {
1751
1752 kfree_skb(skb, FREE_READ);
1753 return;
1754 }
1755
1756 DPRINTF((DBG_TCP, "newsk = %X\n", newsk));
1757 memcpy((void *)newsk,(void *)sk, sizeof(*newsk));
1758 newsk->wback = NULL;
1759 newsk->wfront = NULL;
1760 newsk->rqueue = NULL;
1761 newsk->send_head = NULL;
1762 newsk->send_tail = NULL;
1763 newsk->back_log = NULL;
1764 newsk->rtt = TCP_CONNECT_TIME << 3;
1765 newsk->rto = TCP_CONNECT_TIME;
1766 newsk->mdev = 0;
1767 newsk->max_window = 0;
1768 newsk->cong_window = 1;
1769 newsk->cong_count = 0;
1770 newsk->ssthresh = 0;
1771 newsk->backoff = 0;
1772 newsk->blog = 0;
1773 newsk->intr = 0;
1774 newsk->proc = 0;
1775 newsk->done = 0;
1776 newsk->partial = NULL;
1777 newsk->pair = NULL;
1778 newsk->wmem_alloc = 0;
1779 newsk->rmem_alloc = 0;
1780
1781 newsk->max_unacked = MAX_WINDOW - TCP_WINDOW_DIFF;
1782
1783 newsk->err = 0;
1784 newsk->shutdown = 0;
1785 newsk->ack_backlog = 0;
1786 newsk->acked_seq = skb->h.th->seq+1;
1787 newsk->fin_seq = skb->h.th->seq;
1788 newsk->copied_seq = skb->h.th->seq;
1789 newsk->state = TCP_SYN_RECV;
1790 newsk->timeout = 0;
1791 newsk->write_seq = jiffies * SEQ_TICK - seq_offset;
1792 newsk->window_seq = newsk->write_seq;
1793 newsk->rcv_ack_seq = newsk->write_seq;
1794 newsk->urg_data = 0;
1795 newsk->retransmits = 0;
1796 newsk->destroy = 0;
1797 newsk->timer.data = (unsigned long)newsk;
1798 newsk->timer.function = &net_timer;
1799 newsk->dummy_th.source = skb->h.th->dest;
1800 newsk->dummy_th.dest = skb->h.th->source;
1801
1802
1803 newsk->daddr = saddr;
1804 newsk->saddr = daddr;
1805
1806 put_sock(newsk->num,newsk);
1807 newsk->dummy_th.res1 = 0;
1808 newsk->dummy_th.doff = 6;
1809 newsk->dummy_th.fin = 0;
1810 newsk->dummy_th.syn = 0;
1811 newsk->dummy_th.rst = 0;
1812 newsk->dummy_th.psh = 0;
1813 newsk->dummy_th.ack = 0;
1814 newsk->dummy_th.urg = 0;
1815 newsk->dummy_th.res2 = 0;
1816 newsk->acked_seq = skb->h.th->seq + 1;
1817 newsk->copied_seq = skb->h.th->seq;
1818
1819
1820 newsk->ip_ttl=sk->ip_ttl;
1821 newsk->ip_tos=skb->ip_hdr->tos;
1822
1823
1824
1825 if (sk->user_mss)
1826 newsk->mtu = sk->user_mss;
1827 else {
1828 #ifdef SUBNETSARELOCAL
1829 if ((saddr ^ daddr) & default_mask(saddr))
1830 #else
1831 if ((saddr ^ daddr) & dev->pa_mask)
1832 #endif
1833 newsk->mtu = 576 - HEADER_SIZE;
1834 else
1835 newsk->mtu = MAX_WINDOW;
1836 }
1837
1838 newsk->mtu = min(newsk->mtu, dev->mtu - HEADER_SIZE);
1839
1840
1841 tcp_options(newsk,skb->h.th);
1842
1843 buff = newsk->prot->wmalloc(newsk, MAX_SYN_SIZE, 1, GFP_ATOMIC);
1844 if (buff == NULL) {
1845 sk->err = -ENOMEM;
1846 newsk->dead = 1;
1847 release_sock(newsk);
1848 kfree_skb(skb, FREE_READ);
1849 return;
1850 }
1851
1852 buff->mem_addr = buff;
1853 buff->mem_len = MAX_SYN_SIZE;
1854 buff->len = sizeof(struct tcphdr)+4;
1855 buff->sk = newsk;
1856
1857 t1 =(struct tcphdr *) buff->data;
1858
1859
1860 tmp = sk->prot->build_header(buff, newsk->saddr, newsk->daddr, &dev,
1861 IPPROTO_TCP, NULL, MAX_SYN_SIZE,sk->ip_tos,sk->ip_ttl);
1862
1863
1864 if (tmp < 0) {
1865 sk->err = tmp;
1866 buff->free=1;
1867 kfree_skb(buff,FREE_WRITE);
1868 newsk->dead = 1;
1869 release_sock(newsk);
1870 skb->sk = sk;
1871 kfree_skb(skb, FREE_READ);
1872 return;
1873 }
1874
1875 buff->len += tmp;
1876 t1 =(struct tcphdr *)((char *)t1 +tmp);
1877
1878 memcpy(t1, skb->h.th, sizeof(*t1));
1879 buff->h.seq = newsk->write_seq;
1880
1881
1882 t1->dest = skb->h.th->source;
1883 t1->source = newsk->dummy_th.source;
1884 t1->seq = ntohl(newsk->write_seq++);
1885 t1->ack = 1;
1886 newsk->window = tcp_select_window(newsk);
1887 newsk->sent_seq = newsk->write_seq;
1888 t1->window = ntohs(newsk->window);
1889 t1->res1 = 0;
1890 t1->res2 = 0;
1891 t1->rst = 0;
1892 t1->urg = 0;
1893 t1->psh = 0;
1894 t1->syn = 1;
1895 t1->ack_seq = ntohl(skb->h.th->seq+1);
1896 t1->doff = sizeof(*t1)/4+1;
1897
1898 ptr =(unsigned char *)(t1+1);
1899 ptr[0] = 2;
1900 ptr[1] = 4;
1901 ptr[2] = ((newsk->mtu) >> 8) & 0xff;
1902 ptr[3] =(newsk->mtu) & 0xff;
1903
1904 tcp_send_check(t1, daddr, saddr, sizeof(*t1)+4, newsk);
1905 newsk->prot->queue_xmit(newsk, dev, buff, 0);
1906
1907 reset_timer(newsk, TIME_WRITE , TCP_CONNECT_TIME);
1908 skb->sk = newsk;
1909
1910
1911 sk->rmem_alloc -= skb->mem_len;
1912 newsk->rmem_alloc += skb->mem_len;
1913
1914 skb_queue_tail(&sk->rqueue,skb);
1915 sk->ack_backlog++;
1916 release_sock(newsk);
1917 }
1918
1919
1920 static void
1921 tcp_close(struct sock *sk, int timeout)
1922 {
1923 struct sk_buff *buff;
1924 int need_reset = 0;
1925 struct tcphdr *t1, *th;
1926 struct proto *prot;
1927 struct device *dev=NULL;
1928 int tmp;
1929
1930
1931
1932
1933
1934 DPRINTF((DBG_TCP, "tcp_close((struct sock *)%X, %d)\n",sk, timeout));
1935 sk->inuse = 1;
1936 sk->keepopen = 1;
1937 sk->shutdown = SHUTDOWN_MASK;
1938
1939 if (!sk->dead)
1940 sk->state_change(sk);
1941
1942
1943 if (skb_peek(&sk->rqueue) != NULL)
1944 {
1945 struct sk_buff *skb;
1946 if(sk->debug)
1947 printk("Clean rcv queue\n");
1948 while((skb=skb_dequeue(&sk->rqueue))!=NULL)
1949 {
1950 if(skb->len > 0 && after(skb->h.th->seq + skb->len + 1 , sk->copied_seq))
1951 need_reset = 1;
1952 kfree_skb(skb, FREE_READ);
1953 }
1954 if(sk->debug)
1955 printk("Cleaned.\n");
1956 }
1957 sk->rqueue = NULL;
1958
1959
1960 if (sk->partial) {
1961 tcp_send_partial(sk);
1962 }
1963
1964 switch(sk->state) {
1965 case TCP_FIN_WAIT1:
1966 case TCP_FIN_WAIT2:
1967 case TCP_LAST_ACK:
1968
1969
1970
1971
1972
1973 reset_timer(sk, TIME_CLOSE, 4 * sk->rto);
1974 if (timeout) tcp_time_wait(sk);
1975 release_sock(sk);
1976 return;
1977 case TCP_TIME_WAIT:
1978 if (timeout) {
1979 sk->state = TCP_CLOSE;
1980 }
1981 release_sock(sk);
1982 return;
1983 case TCP_LISTEN:
1984 sk->state = TCP_CLOSE;
1985 release_sock(sk);
1986 return;
1987 case TCP_CLOSE:
1988 release_sock(sk);
1989 return;
1990 case TCP_CLOSE_WAIT:
1991 case TCP_ESTABLISHED:
1992 case TCP_SYN_SENT:
1993 case TCP_SYN_RECV:
1994 prot =(struct proto *)sk->prot;
1995 th =(struct tcphdr *)&sk->dummy_th;
1996 buff = prot->wmalloc(sk, MAX_FIN_SIZE, 1, GFP_ATOMIC);
1997 if (buff == NULL) {
1998
1999
2000
2001 release_sock(sk);
2002 if (sk->state != TCP_CLOSE_WAIT)
2003 sk->state = TCP_ESTABLISHED;
2004 reset_timer(sk, TIME_CLOSE, 100);
2005 return;
2006 }
2007 buff->mem_addr = buff;
2008 buff->mem_len = MAX_FIN_SIZE;
2009 buff->sk = sk;
2010 buff->free = 1;
2011 buff->len = sizeof(*t1);
2012 t1 =(struct tcphdr *) buff->data;
2013
2014
2015 tmp = prot->build_header(buff,sk->saddr, sk->daddr, &dev,
2016 IPPROTO_TCP, sk->opt,
2017 sizeof(struct tcphdr),sk->ip_tos,sk->ip_ttl);
2018 if (tmp < 0) {
2019 kfree_skb(buff,FREE_WRITE);
2020 if(sk->state==TCP_ESTABLISHED)
2021 sk->state=TCP_FIN_WAIT1;
2022 else
2023 sk->state=TCP_FIN_WAIT2;
2024 reset_timer(sk, TIME_CLOSE,4*sk->rto);
2025 if(timeout)
2026 tcp_time_wait(sk);
2027 DPRINTF((DBG_TCP, "Unable to build header for fin.\n"));
2028 release_sock(sk);
2029 return;
2030 }
2031
2032 t1 =(struct tcphdr *)((char *)t1 +tmp);
2033 buff->len += tmp;
2034 buff->dev = dev;
2035 memcpy(t1, th, sizeof(*t1));
2036 t1->seq = ntohl(sk->write_seq);
2037 sk->write_seq++;
2038 buff->h.seq = sk->write_seq;
2039 t1->ack = 1;
2040
2041
2042 sk->delay_acks = 0;
2043 t1->ack_seq = ntohl(sk->acked_seq);
2044 t1->window = ntohs(sk->window=tcp_select_window(sk));
2045 t1->fin = 1;
2046 t1->rst = need_reset;
2047 t1->doff = sizeof(*t1)/4;
2048 tcp_send_check(t1, sk->saddr, sk->daddr, sizeof(*t1), sk);
2049
2050 if (sk->wfront == NULL) {
2051 sk->sent_seq = sk->write_seq;
2052 prot->queue_xmit(sk, dev, buff, 0);
2053 } else {
2054 reset_timer(sk, TIME_WRITE, sk->rto);
2055 buff->next = NULL;
2056 if (sk->wback == NULL) {
2057 sk->wfront = buff;
2058 } else {
2059 sk->wback->next = buff;
2060 }
2061 sk->wback = buff;
2062 buff->magic = TCP_WRITE_QUEUE_MAGIC;
2063 }
2064
2065 if (sk->state == TCP_CLOSE_WAIT) {
2066 sk->state = TCP_FIN_WAIT2;
2067 } else {
2068 sk->state = TCP_FIN_WAIT1;
2069 }
2070 }
2071 release_sock(sk);
2072 }
2073
2074
2075
2076
2077
2078
2079 static void
2080 tcp_write_xmit(struct sock *sk)
2081 {
2082 struct sk_buff *skb;
2083
2084 DPRINTF((DBG_TCP, "tcp_write_xmit(sk=%X)\n", sk));
2085
2086
2087
2088 if(sk->zapped)
2089 return;
2090
2091 while(sk->wfront != NULL &&
2092 before(sk->wfront->h.seq, sk->window_seq +1) &&
2093 (sk->retransmits == 0 ||
2094 sk->timeout != TIME_WRITE ||
2095 before(sk->wfront->h.seq, sk->rcv_ack_seq +1))
2096 && sk->packets_out < sk->cong_window) {
2097 skb = sk->wfront;
2098 IS_SKB(skb);
2099 sk->wfront = skb->next;
2100 if (sk->wfront == NULL) sk->wback = NULL;
2101 skb->next = NULL;
2102 if (skb->magic != TCP_WRITE_QUEUE_MAGIC) {
2103 printk("tcp.c skb with bad magic(%X) on write queue. Squashing "
2104 "queue\n", skb->magic);
2105 sk->wfront = NULL;
2106 sk->wback = NULL;
2107 return;
2108 }
2109 skb->magic = 0;
2110 DPRINTF((DBG_TCP, "Sending a packet.\n"));
2111
2112
2113 if (before(skb->h.seq, sk->rcv_ack_seq +1)) {
2114 sk->retransmits = 0;
2115 kfree_skb(skb, FREE_WRITE);
2116 if (!sk->dead) sk->write_space(sk);
2117 } else {
2118 sk->sent_seq = skb->h.seq;
2119 sk->prot->queue_xmit(sk, skb->dev, skb, skb->free);
2120 }
2121 }
2122 }
2123
2124
2125
2126
2127
2128
2129 void
2130 sort_send(struct sock *sk)
2131 {
2132 struct sk_buff *list = NULL;
2133 struct sk_buff *skb,*skb2,*skb3;
2134
2135 for (skb = sk->send_head; skb != NULL; skb = skb2) {
2136 skb2 = (struct sk_buff *)skb->link3;
2137 if (list == NULL || before (skb2->h.seq, list->h.seq)) {
2138 skb->link3 = list;
2139 sk->send_tail = skb;
2140 list = skb;
2141 } else {
2142 for (skb3 = list; ; skb3 = (struct sk_buff *)skb3->link3) {
2143 if (skb3->link3 == NULL ||
2144 before(skb->h.seq, skb3->link3->h.seq)) {
2145 skb->link3 = skb3->link3;
2146 skb3->link3 = skb;
2147 if (skb->link3 == NULL) sk->send_tail = skb;
2148 break;
2149 }
2150 }
2151 }
2152 }
2153 sk->send_head = list;
2154 }
2155
2156
2157
2158 static int
2159 tcp_ack(struct sock *sk, struct tcphdr *th, unsigned long saddr, int len)
2160 {
2161 unsigned long ack;
2162 int flag = 0;
2163
2164
2165
2166
2167
2168
2169
2170 if(sk->zapped)
2171 return(1);
2172
2173 ack = ntohl(th->ack_seq);
2174 DPRINTF((DBG_TCP, "tcp_ack ack=%d, window=%d, "
2175 "sk->rcv_ack_seq=%d, sk->window_seq = %d\n",
2176 ack, ntohs(th->window), sk->rcv_ack_seq, sk->window_seq));
2177
2178 if (ntohs(th->window) > sk->max_window) {
2179 sk->max_window = ntohs(th->window);
2180 sk->mss = min(sk->max_window, sk->mtu);
2181 }
2182
2183 if (sk->retransmits && sk->timeout == TIME_KEEPOPEN)
2184 sk->retransmits = 0;
2185
2186
2187 if (after(ack, sk->sent_seq+1) || before(ack, sk->rcv_ack_seq-1)) {
2188 if (after(ack, sk->sent_seq) ||
2189 (sk->state != TCP_ESTABLISHED && sk->state != TCP_CLOSE_WAIT)) {
2190 return(0);
2191 }
2192 if (sk->keepopen) {
2193 reset_timer(sk, TIME_KEEPOPEN, TCP_TIMEOUT_LEN);
2194 }
2195 return(1);
2196 }
2197
2198 if (len != th->doff*4) flag |= 1;
2199
2200
2201 if (after(sk->window_seq, ack+ntohs(th->window))) {
2202
2203
2204
2205
2206
2207
2208
2209 struct sk_buff *skb;
2210 struct sk_buff *skb2;
2211 struct sk_buff *wskb = NULL;
2212
2213 skb2 = sk->send_head;
2214 sk->send_head = NULL;
2215 sk->send_tail = NULL;
2216
2217 flag |= 4;
2218
2219 sk->window_seq = ack + ntohs(th->window);
2220 cli();
2221 while (skb2 != NULL) {
2222 skb = skb2;
2223 skb2 = (struct sk_buff *)skb->link3;
2224 skb->link3 = NULL;
2225 if (after(skb->h.seq, sk->window_seq)) {
2226 if (sk->packets_out > 0) sk->packets_out--;
2227
2228 if (skb->next != NULL) {
2229 skb_unlink(skb);
2230 }
2231
2232 skb->magic = TCP_WRITE_QUEUE_MAGIC;
2233 if (wskb == NULL) {
2234 skb->next = sk->wfront;
2235 sk->wfront = skb;
2236 } else {
2237 skb->next = wskb->next;
2238 wskb->next = skb;
2239 }
2240 if (sk->wback == wskb) sk->wback = skb;
2241 wskb = skb;
2242 } else {
2243 if (sk->send_head == NULL) {
2244 sk->send_head = skb;
2245 sk->send_tail = skb;
2246 } else {
2247 sk->send_tail->link3 = skb;
2248 sk->send_tail = skb;
2249 }
2250 skb->link3 = NULL;
2251 }
2252 }
2253 sti();
2254 }
2255
2256 if (sk->send_tail == NULL || sk->send_head == NULL) {
2257 sk->send_head = NULL;
2258 sk->send_tail = NULL;
2259 sk->packets_out= 0;
2260 }
2261
2262 sk->window_seq = ack + ntohs(th->window);
2263
2264
2265 if (sk->timeout == TIME_WRITE &&
2266 sk->cong_window < 2048 && after(ack, sk->rcv_ack_seq)) {
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276 if (sk->cong_window < sk->ssthresh)
2277
2278 sk->cong_window++;
2279 else {
2280
2281
2282
2283 if (sk->cong_count >= sk->cong_window) {
2284 sk->cong_window++;
2285 sk->cong_count = 0;
2286 } else
2287 sk->cong_count++;
2288 }
2289 }
2290
2291 DPRINTF((DBG_TCP, "tcp_ack: Updating rcv ack sequence.\n"));
2292 sk->rcv_ack_seq = ack;
2293
2294
2295
2296
2297
2298
2299 if (sk->timeout == TIME_PROBE0) {
2300 if (sk->wfront != NULL &&
2301 ! before (sk->window_seq, sk->wfront->h.seq)) {
2302 sk->retransmits = 0;
2303 sk->backoff = 0;
2304
2305 sk->rto = ((sk->rtt >> 2) + sk->mdev) >> 1;
2306 if (sk->rto > 120*HZ)
2307 sk->rto = 120*HZ;
2308 if (sk->rto < 1*HZ)
2309 sk->rto = 1*HZ;
2310 }
2311 }
2312
2313
2314 while(sk->send_head != NULL) {
2315
2316 if (sk->send_head->link3 &&
2317 after(sk->send_head->h.seq, sk->send_head->link3->h.seq)) {
2318 printk("INET: tcp.c: *** bug send_list out of order.\n");
2319 sort_send(sk);
2320 }
2321
2322 if (before(sk->send_head->h.seq, ack+1)) {
2323 struct sk_buff *oskb;
2324
2325 if (sk->retransmits) {
2326
2327
2328 flag |= 2;
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338 if (sk->send_head->link3)
2339 sk->retransmits = 1;
2340 else
2341 sk->retransmits = 0;
2342
2343 }
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358 if (sk->packets_out > 0) sk->packets_out --;
2359 DPRINTF((DBG_TCP, "skb=%X skb->h.seq = %d acked ack=%d\n",
2360 sk->send_head, sk->send_head->h.seq, ack));
2361
2362
2363 if (!sk->dead) sk->write_space(sk);
2364
2365 oskb = sk->send_head;
2366
2367 if (!(flag&2)) {
2368 long m;
2369
2370
2371
2372
2373
2374
2375
2376
2377 m = jiffies - oskb->when;
2378 m -= (sk->rtt >> 3);
2379 sk->rtt += m;
2380 if (m < 0)
2381 m = -m;
2382 m -= (sk->mdev >> 2);
2383 sk->mdev += m;
2384
2385
2386 sk->rto = ((sk->rtt >> 2) + sk->mdev) >> 1;
2387 if (sk->rto > 120*HZ)
2388 sk->rto = 120*HZ;
2389 if (sk->rto < 1*HZ)
2390 sk->rto = 1*HZ;
2391 sk->backoff = 0;
2392
2393 }
2394 flag |= (2|4);
2395
2396 cli();
2397
2398 oskb = sk->send_head;
2399 IS_SKB(oskb);
2400 sk->send_head =(struct sk_buff *)oskb->link3;
2401 if (sk->send_head == NULL) {
2402 sk->send_tail = NULL;
2403 }
2404
2405
2406 skb_unlink(oskb);
2407 sti();
2408 oskb->magic = 0;
2409 kfree_skb(oskb, FREE_WRITE);
2410 if (!sk->dead) sk->write_space(sk);
2411 } else {
2412 break;
2413 }
2414 }
2415
2416
2417
2418
2419
2420 if (sk->wfront != NULL) {
2421 if (after (sk->window_seq+1, sk->wfront->h.seq) &&
2422 (sk->retransmits == 0 ||
2423 sk->timeout != TIME_WRITE ||
2424 before(sk->wfront->h.seq, sk->rcv_ack_seq +1))
2425 && sk->packets_out < sk->cong_window) {
2426 flag |= 1;
2427 tcp_write_xmit(sk);
2428 } else if (before(sk->window_seq, sk->wfront->h.seq) &&
2429 sk->send_head == NULL &&
2430 sk->ack_backlog == 0 &&
2431 sk->state != TCP_TIME_WAIT) {
2432 reset_timer(sk, TIME_PROBE0, sk->rto);
2433 }
2434 } else {
2435 if (sk->send_head == NULL && sk->ack_backlog == 0 &&
2436 sk->state != TCP_TIME_WAIT && !sk->keepopen) {
2437 DPRINTF((DBG_TCP, "Nothing to do, going to sleep.\n"));
2438 if (!sk->dead) sk->write_space(sk);
2439
2440 if (sk->keepopen)
2441 reset_timer(sk, TIME_KEEPOPEN, TCP_TIMEOUT_LEN);
2442 else
2443 delete_timer(sk);
2444 } else {
2445 if (sk->state != (unsigned char) sk->keepopen) {
2446 reset_timer(sk, TIME_WRITE, sk->rto);
2447 }
2448 if (sk->state == TCP_TIME_WAIT) {
2449 reset_timer(sk, TIME_CLOSE, TCP_TIMEWAIT_LEN);
2450 }
2451 }
2452 }
2453
2454 if (sk->packets_out == 0 && sk->partial != NULL &&
2455 sk->wfront == NULL && sk->send_head == NULL) {
2456 flag |= 1;
2457 tcp_send_partial(sk);
2458 }
2459
2460
2461 if (sk->state == TCP_TIME_WAIT) {
2462 if (!sk->dead)
2463 sk->state_change(sk);
2464 if (sk->rcv_ack_seq == sk->write_seq && sk->acked_seq == sk->fin_seq) {
2465 flag |= 1;
2466 sk->state = TCP_CLOSE;
2467 sk->shutdown = SHUTDOWN_MASK;
2468 }
2469 }
2470
2471 if (sk->state == TCP_LAST_ACK || sk->state == TCP_FIN_WAIT2) {
2472 if (!sk->dead) sk->state_change(sk);
2473 if (sk->rcv_ack_seq == sk->write_seq) {
2474 flag |= 1;
2475 if (sk->acked_seq != sk->fin_seq) {
2476 tcp_time_wait(sk);
2477 } else {
2478 DPRINTF((DBG_TCP, "tcp_ack closing socket - %X\n", sk));
2479 tcp_send_ack(sk->sent_seq, sk->acked_seq, sk,
2480 th, sk->daddr);
2481 sk->shutdown = SHUTDOWN_MASK;
2482 sk->state = TCP_CLOSE;
2483 }
2484 }
2485 }
2486
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516 if (((!flag) || (flag&4)) && sk->send_head != NULL &&
2517 (((flag&2) && sk->retransmits) ||
2518 (sk->send_head->when + sk->rto < jiffies))) {
2519 ip_do_retransmit(sk, 1);
2520 reset_timer(sk, TIME_WRITE, sk->rto);
2521 }
2522
2523 DPRINTF((DBG_TCP, "leaving tcp_ack\n"));
2524 return(1);
2525 }
2526
2527
2528
2529
2530
2531
2532
2533 static int
2534 tcp_data(struct sk_buff *skb, struct sock *sk,
2535 unsigned long saddr, unsigned short len)
2536 {
2537 struct sk_buff *skb1, *skb2;
2538 struct tcphdr *th;
2539 int dup_dumped=0;
2540
2541 th = skb->h.th;
2542 print_th(th);
2543 skb->len = len -(th->doff*4);
2544
2545 DPRINTF((DBG_TCP, "tcp_data len = %d sk = %X:\n", skb->len, sk));
2546
2547 sk->bytes_rcv += skb->len;
2548 if (skb->len == 0 && !th->fin && !th->urg && !th->psh) {
2549
2550 if (!th->ack) tcp_send_ack(sk->sent_seq, sk->acked_seq,sk, th, saddr);
2551 kfree_skb(skb, FREE_READ);
2552 return(0);
2553 }
2554
2555 if (sk->shutdown & RCV_SHUTDOWN) {
2556 sk->acked_seq = th->seq + skb->len + th->syn + th->fin;
2557 tcp_reset(sk->saddr, sk->daddr, skb->h.th,
2558 sk->prot, NULL, skb->dev, sk->ip_tos, sk->ip_ttl);
2559 sk->state = TCP_CLOSE;
2560 sk->err = EPIPE;
2561 sk->shutdown = SHUTDOWN_MASK;
2562 DPRINTF((DBG_TCP, "tcp_data: closing socket - %X\n", sk));
2563 kfree_skb(skb, FREE_READ);
2564 if (!sk->dead) sk->state_change(sk);
2565 return(0);
2566 }
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577 if (sk->rqueue == NULL) {
2578 DPRINTF((DBG_TCP, "tcp_data: skb = %X:\n", skb));
2579 #ifdef OLDWAY
2580 sk->rqueue = skb;
2581 skb->next = skb;
2582 skb->prev = skb;
2583 skb->list = &sk->rqueue;
2584 #else
2585 skb_queue_head(&sk->rqueue,skb);
2586 #endif
2587 skb1= NULL;
2588 } else {
2589 DPRINTF((DBG_TCP, "tcp_data adding to chain sk = %X:\n", sk));
2590 for(skb1=sk->rqueue->prev; ; skb1 =(struct sk_buff *)skb1->prev) {
2591 if(sk->debug)
2592 {
2593 printk("skb1=%p :", skb1);
2594 printk("skb1->h.th->seq = %ld: ", skb1->h.th->seq);
2595 printk("skb->h.th->seq = %ld\n",skb->h.th->seq);
2596 printk("copied_seq = %ld acked_seq = %ld\n", sk->copied_seq,
2597 sk->acked_seq);
2598 }
2599 #ifdef OLD
2600 if (after(th->seq+1, skb1->h.th->seq)) {
2601 skb->prev = skb1;
2602 skb->next = skb1->next;
2603 skb->next->prev = skb;
2604 skb1->next = skb;
2605 if (skb1 == sk->rqueue) sk->rqueue = skb;
2606 break;
2607 }
2608 if (skb1->prev == sk->rqueue) {
2609 skb->next= skb1;
2610 skb->prev = skb1->prev;
2611 skb->prev->next = skb;
2612 skb1->prev = skb;
2613 skb1 = NULL;
2614
2615 break;
2616 }
2617 #else
2618 if (th->seq==skb1->h.th->seq && skb->len>= skb1->len)
2619 {
2620 skb_append(skb1,skb);
2621 skb_unlink(skb1);
2622 kfree_skb(skb1,FREE_READ);
2623 dup_dumped=1;
2624 skb1=NULL;
2625 break;
2626 }
2627 if (after(th->seq+1, skb1->h.th->seq))
2628 {
2629 skb_append(skb1,skb);
2630 break;
2631 }
2632 if (skb1 == sk->rqueue)
2633 {
2634 skb_queue_head(&sk->rqueue, skb);
2635 break;
2636 }
2637 #endif
2638 }
2639 DPRINTF((DBG_TCP, "skb = %X:\n", skb));
2640 }
2641
2642 th->ack_seq = th->seq + skb->len;
2643 if (th->syn) th->ack_seq++;
2644 if (th->fin) th->ack_seq++;
2645
2646 if (before(sk->acked_seq, sk->copied_seq)) {
2647 printk("*** tcp.c:tcp_data bug acked < copied\n");
2648 sk->acked_seq = sk->copied_seq;
2649 }
2650
2651
2652 if ((!dup_dumped && (skb1 == NULL || skb1->acked)) || before(th->seq, sk->acked_seq+1)) {
2653 if (before(th->seq, sk->acked_seq+1)) {
2654 int newwindow;
2655
2656 if (after(th->ack_seq, sk->acked_seq)) {
2657 newwindow = sk->window -
2658 (th->ack_seq - sk->acked_seq);
2659 if (newwindow < 0)
2660 newwindow = 0;
2661 sk->window = newwindow;
2662 sk->acked_seq = th->ack_seq;
2663 }
2664 skb->acked = 1;
2665
2666
2667 if (skb->h.th->fin) {
2668 if (!sk->dead) sk->state_change(sk);
2669 sk->shutdown |= RCV_SHUTDOWN;
2670 }
2671
2672 for(skb2 = (struct sk_buff *)skb->next;
2673 skb2 !=(struct sk_buff *) sk->rqueue;
2674 skb2 = (struct sk_buff *)skb2->next) {
2675 if (before(skb2->h.th->seq, sk->acked_seq+1)) {
2676 if (after(skb2->h.th->ack_seq, sk->acked_seq))
2677 {
2678 newwindow = sk->window -
2679 (skb2->h.th->ack_seq - sk->acked_seq);
2680 if (newwindow < 0)
2681 newwindow = 0;
2682 sk->window = newwindow;
2683 sk->acked_seq = skb2->h.th->ack_seq;
2684 }
2685 skb2->acked = 1;
2686
2687
2688
2689
2690
2691 if (skb2->h.th->fin) {
2692 sk->shutdown |= RCV_SHUTDOWN;
2693 if (!sk->dead) sk->state_change(sk);
2694 }
2695
2696
2697 sk->ack_backlog = sk->max_ack_backlog;
2698 } else {
2699 break;
2700 }
2701 }
2702
2703
2704
2705
2706
2707 if (!sk->delay_acks ||
2708 sk->ack_backlog >= sk->max_ack_backlog ||
2709 sk->bytes_rcv > sk->max_unacked || th->fin) {
2710
2711 } else {
2712 sk->ack_backlog++;
2713 if(sk->debug)
2714 printk("Ack queued.\n");
2715 reset_timer(sk, TIME_WRITE, TCP_ACK_TIME);
2716 }
2717 }
2718 }
2719
2720
2721
2722
2723
2724 if (!skb->acked) {
2725
2726
2727
2728
2729
2730
2731 while (sk->prot->rspace(sk) < sk->mtu) {
2732 skb1 = skb_peek(&sk->rqueue);
2733 if (skb1 == NULL) {
2734 printk("INET: tcp.c:tcp_data memory leak detected.\n");
2735 break;
2736 }
2737
2738
2739 if (skb1->acked) {
2740 break;
2741 }
2742
2743 skb_unlink(skb1);
2744 #ifdef OLDWAY
2745 if (skb1->prev == skb1) {
2746 sk->rqueue = NULL;
2747 } else {
2748 sk->rqueue = (struct sk_buff *)skb1->prev;
2749 skb1->next->prev = skb1->prev;
2750 skb1->prev->next = skb1->next;
2751 }
2752 #endif
2753 kfree_skb(skb1, FREE_READ);
2754 }
2755 tcp_send_ack(sk->sent_seq, sk->acked_seq, sk, th, saddr);
2756 sk->ack_backlog++;
2757 reset_timer(sk, TIME_WRITE, TCP_ACK_TIME);
2758 } else {
2759
2760 tcp_send_ack(sk->sent_seq, sk->acked_seq, sk, th, saddr);
2761 }
2762
2763
2764 if (!sk->dead) {
2765 if(sk->debug)
2766 printk("Data wakeup.\n");
2767 sk->data_ready(sk,0);
2768 } else {
2769 DPRINTF((DBG_TCP, "data received on dead socket.\n"));
2770 }
2771
2772 if (sk->state == TCP_FIN_WAIT2 &&
2773 sk->acked_seq == sk->fin_seq && sk->rcv_ack_seq == sk->write_seq) {
2774 DPRINTF((DBG_TCP, "tcp_data: entering last_ack state sk = %X\n", sk));
2775
2776
2777 sk->shutdown = SHUTDOWN_MASK;
2778 sk->state = TCP_LAST_ACK;
2779 if (!sk->dead) sk->state_change(sk);
2780 }
2781
2782 return(0);
2783 }
2784
2785
2786 static void tcp_check_urg(struct sock * sk, struct tcphdr * th)
2787 {
2788 unsigned long ptr = ntohs(th->urg_ptr);
2789
2790 if (ptr)
2791 ptr--;
2792 ptr += th->seq;
2793
2794
2795 if (after(sk->copied_seq+1, ptr))
2796 return;
2797
2798
2799 if (sk->urg_data && !after(ptr, sk->urg_seq))
2800 return;
2801
2802
2803 if (sk->proc != 0) {
2804 if (sk->proc > 0) {
2805 kill_proc(sk->proc, SIGURG, 1);
2806 } else {
2807 kill_pg(-sk->proc, SIGURG, 1);
2808 }
2809 }
2810 sk->urg_data = URG_NOTYET;
2811 sk->urg_seq = ptr;
2812 }
2813
2814 static inline int tcp_urg(struct sock *sk, struct tcphdr *th,
2815 unsigned long saddr, unsigned long len)
2816 {
2817 unsigned long ptr;
2818
2819
2820 if (th->urg)
2821 tcp_check_urg(sk,th);
2822
2823
2824 if (sk->urg_data != URG_NOTYET)
2825 return 0;
2826
2827
2828 ptr = sk->urg_seq - th->seq + th->doff*4;
2829 if (ptr >= len)
2830 return 0;
2831
2832
2833 sk->urg_data = URG_VALID | *(ptr + (unsigned char *) th);
2834 if (!sk->dead)
2835 wake_up_interruptible(sk->sleep);
2836 return 0;
2837 }
2838
2839
2840
2841 static int
2842 tcp_fin(struct sock *sk, struct tcphdr *th,
2843 unsigned long saddr, struct device *dev)
2844 {
2845 DPRINTF((DBG_TCP, "tcp_fin(sk=%X, th=%X, saddr=%X, dev=%X)\n",
2846 sk, th, saddr, dev));
2847
2848 if (!sk->dead) {
2849 sk->state_change(sk);
2850 }
2851
2852 switch(sk->state) {
2853 case TCP_SYN_RECV:
2854 case TCP_SYN_SENT:
2855 case TCP_ESTABLISHED:
2856
2857 reset_timer(sk, TIME_CLOSE, TCP_TIMEOUT_LEN);
2858 sk->fin_seq = th->seq+1;
2859 sk->state = TCP_CLOSE_WAIT;
2860 if (th->rst) sk->shutdown = SHUTDOWN_MASK;
2861 break;
2862
2863 case TCP_CLOSE_WAIT:
2864 case TCP_FIN_WAIT2:
2865 break;
2866
2867 case TCP_FIN_WAIT1:
2868
2869 sk->fin_seq = th->seq+1;
2870 sk->state = TCP_FIN_WAIT2;
2871 break;
2872
2873 default:
2874 case TCP_TIME_WAIT:
2875 sk->state = TCP_LAST_ACK;
2876
2877
2878 reset_timer(sk, TIME_CLOSE, TCP_TIMEWAIT_LEN);
2879 return(0);
2880 }
2881 sk->ack_backlog++;
2882
2883 return(0);
2884 }
2885
2886
2887
2888 static struct sock *
2889 tcp_accept(struct sock *sk, int flags)
2890 {
2891 struct sock *newsk;
2892 struct sk_buff *skb;
2893
2894 DPRINTF((DBG_TCP, "tcp_accept(sk=%X, flags=%X, addr=%s)\n",
2895 sk, flags, in_ntoa(sk->saddr)));
2896
2897
2898
2899
2900
2901 if (sk->state != TCP_LISTEN) {
2902 sk->err = EINVAL;
2903 return(NULL);
2904 }
2905
2906
2907 cli();
2908 sk->inuse = 1;
2909 while((skb = get_firstr(sk)) == NULL) {
2910 if (flags & O_NONBLOCK) {
2911 sti();
2912 release_sock(sk);
2913 sk->err = EAGAIN;
2914 return(NULL);
2915 }
2916
2917 release_sock(sk);
2918 interruptible_sleep_on(sk->sleep);
2919 if (current->signal & ~current->blocked) {
2920 sti();
2921 sk->err = ERESTARTSYS;
2922 return(NULL);
2923 }
2924 sk->inuse = 1;
2925 }
2926 sti();
2927
2928
2929 newsk = skb->sk;
2930
2931 kfree_skb(skb, FREE_READ);
2932 sk->ack_backlog--;
2933 release_sock(sk);
2934 return(newsk);
2935 }
2936
2937
2938
2939 static int
2940 tcp_connect(struct sock *sk, struct sockaddr_in *usin, int addr_len)
2941 {
2942 struct sk_buff *buff;
2943 struct sockaddr_in sin;
2944 struct device *dev=NULL;
2945 unsigned char *ptr;
2946 int tmp;
2947 struct tcphdr *t1;
2948 int err;
2949
2950 if (sk->state != TCP_CLOSE) return(-EISCONN);
2951 if (addr_len < 8) return(-EINVAL);
2952
2953 err=verify_area(VERIFY_READ, usin, addr_len);
2954 if(err)
2955 return err;
2956
2957 memcpy_fromfs(&sin,usin, min(sizeof(sin), addr_len));
2958
2959 if (sin.sin_family && sin.sin_family != AF_INET) return(-EAFNOSUPPORT);
2960
2961 DPRINTF((DBG_TCP, "TCP connect daddr=%s\n", in_ntoa(sin.sin_addr.s_addr)));
2962
2963
2964 if (chk_addr(sin.sin_addr.s_addr) == IS_BROADCAST) {
2965 DPRINTF((DBG_TCP, "TCP connection to broadcast address not allowed\n"));
2966 return(-ENETUNREACH);
2967 }
2968
2969
2970 if(sk->saddr == sin.sin_addr.s_addr && sk->num==ntohs(sin.sin_port))
2971 return -EBUSY;
2972
2973 sk->inuse = 1;
2974 sk->daddr = sin.sin_addr.s_addr;
2975 sk->write_seq = jiffies * SEQ_TICK - seq_offset;
2976 sk->window_seq = sk->write_seq;
2977 sk->rcv_ack_seq = sk->write_seq -1;
2978 sk->err = 0;
2979 sk->dummy_th.dest = sin.sin_port;
2980 release_sock(sk);
2981
2982 buff = sk->prot->wmalloc(sk,MAX_SYN_SIZE,0, GFP_KERNEL);
2983 if (buff == NULL) {
2984 return(-ENOMEM);
2985 }
2986 sk->inuse = 1;
2987 buff->mem_addr = buff;
2988 buff->mem_len = MAX_SYN_SIZE;
2989 buff->len = 24;
2990 buff->sk = sk;
2991 buff->free = 1;
2992 t1 = (struct tcphdr *) buff->data;
2993
2994
2995
2996 tmp = sk->prot->build_header(buff, sk->saddr, sk->daddr, &dev,
2997 IPPROTO_TCP, NULL, MAX_SYN_SIZE,sk->ip_tos,sk->ip_ttl);
2998 if (tmp < 0) {
2999 sk->prot->wfree(sk, buff->mem_addr, buff->mem_len);
3000 release_sock(sk);
3001 return(-ENETUNREACH);
3002 }
3003 buff->len += tmp;
3004 t1 = (struct tcphdr *)((char *)t1 +tmp);
3005
3006 memcpy(t1,(void *)&(sk->dummy_th), sizeof(*t1));
3007 t1->seq = ntohl(sk->write_seq++);
3008 sk->sent_seq = sk->write_seq;
3009 buff->h.seq = sk->write_seq;
3010 t1->ack = 0;
3011 t1->window = 2;
3012 t1->res1=0;
3013 t1->res2=0;
3014 t1->rst = 0;
3015 t1->urg = 0;
3016 t1->psh = 0;
3017 t1->syn = 1;
3018 t1->urg_ptr = 0;
3019 t1->doff = 6;
3020
3021
3022 if (sk->user_mss)
3023 sk->mtu = sk->user_mss;
3024 else {
3025 #ifdef SUBNETSARELOCAL
3026 if ((sk->saddr ^ sk->daddr) & default_mask(sk->saddr))
3027 #else
3028 if ((sk->saddr ^ sk->daddr) & dev->pa_mask)
3029 #endif
3030 sk->mtu = 576 - HEADER_SIZE;
3031 else
3032 sk->mtu = MAX_WINDOW;
3033 }
3034
3035 sk->mtu = min(sk->mtu, dev->mtu - HEADER_SIZE);
3036
3037
3038 ptr = (unsigned char *)(t1+1);
3039 ptr[0] = 2;
3040 ptr[1] = 4;
3041 ptr[2] = (sk->mtu) >> 8;
3042 ptr[3] = (sk->mtu) & 0xff;
3043 tcp_send_check(t1, sk->saddr, sk->daddr,
3044 sizeof(struct tcphdr) + 4, sk);
3045
3046
3047 sk->state = TCP_SYN_SENT;
3048 sk->rtt = TCP_CONNECT_TIME;
3049 reset_timer(sk, TIME_WRITE, TCP_CONNECT_TIME);
3050 sk->retransmits = TCP_RETR2 - TCP_SYN_RETRIES;
3051
3052 sk->prot->queue_xmit(sk, dev, buff, 0);
3053
3054 release_sock(sk);
3055 return(0);
3056 }
3057
3058
3059
3060 static int
3061 tcp_sequence(struct sock *sk, struct tcphdr *th, short len,
3062 struct options *opt, unsigned long saddr, struct device *dev)
3063 {
3064 unsigned long next_seq;
3065
3066 next_seq = len - 4*th->doff;
3067 if (th->fin)
3068 next_seq++;
3069
3070 if (next_seq && !sk->window)
3071 goto ignore_it;
3072 next_seq += th->seq;
3073
3074
3075
3076
3077
3078
3079
3080
3081
3082 if (!after(next_seq+1, sk->acked_seq))
3083 goto ignore_it;
3084
3085 if (!before(th->seq, sk->acked_seq + sk->window + 1))
3086 goto ignore_it;
3087
3088
3089 return 1;
3090
3091 ignore_it:
3092 DPRINTF((DBG_TCP, "tcp_sequence: rejecting packet.\n"));
3093
3094
3095
3096
3097
3098
3099
3100
3101 if (sk->state==TCP_SYN_SENT || sk->state==TCP_SYN_RECV) {
3102 tcp_reset(sk->saddr,sk->daddr,th,sk->prot,NULL,dev, sk->ip_tos,sk->ip_ttl);
3103 return 1;
3104 }
3105
3106 if (th->rst)
3107 return 0;
3108
3109
3110 tcp_send_ack(sk->sent_seq, sk->acked_seq, sk, th, saddr);
3111 return 0;
3112 }
3113
3114
3115 int
3116 tcp_rcv(struct sk_buff *skb, struct device *dev, struct options *opt,
3117 unsigned long daddr, unsigned short len,
3118 unsigned long saddr, int redo, struct inet_protocol * protocol)
3119 {
3120 struct tcphdr *th;
3121 struct sock *sk;
3122
3123 if (!skb) {
3124 DPRINTF((DBG_TCP, "tcp.c: tcp_rcv skb = NULL\n"));
3125 return(0);
3126 }
3127 #if 0
3128 if (!protocol) {
3129 DPRINTF((DBG_TCP, "tcp.c: tcp_rcv protocol = NULL\n"));
3130 return(0);
3131 }
3132
3133 if (!opt) {
3134 DPRINTF((DBG_TCP, "tcp.c: tcp_rcv opt = NULL\n"));
3135 }
3136 #endif
3137 if (!dev) {
3138 DPRINTF((DBG_TCP, "tcp.c: tcp_rcv dev = NULL\n"));
3139 return(0);
3140 }
3141 th = skb->h.th;
3142
3143
3144 sk = get_sock(&tcp_prot, th->dest, saddr, th->source, daddr);
3145 DPRINTF((DBG_TCP, "<<\n"));
3146 DPRINTF((DBG_TCP, "len = %d, redo = %d, skb=%X\n", len, redo, skb));
3147
3148
3149
3150 if (sk!=NULL && sk->zapped)
3151 sk=NULL;
3152
3153 if (sk) {
3154 DPRINTF((DBG_TCP, "sk = %X:\n", sk));
3155 }
3156
3157 if (!redo) {
3158 if (tcp_check(th, len, saddr, daddr )) {
3159 skb->sk = NULL;
3160 DPRINTF((DBG_TCP, "packet dropped with bad checksum.\n"));
3161 if (inet_debug == DBG_SLIP) printk("\rtcp_rcv: bad checksum\n");
3162 kfree_skb(skb,FREE_READ);
3163
3164
3165
3166
3167 return(0);
3168 }
3169
3170 th->seq = ntohl(th->seq);
3171
3172
3173 if (sk == NULL) {
3174 if (!th->rst)
3175 tcp_reset(daddr, saddr, th, &tcp_prot, opt,dev,skb->ip_hdr->tos,255);
3176 skb->sk = NULL;
3177 kfree_skb(skb, FREE_READ);
3178 return(0);
3179 }
3180
3181 skb->len = len;
3182 skb->sk = sk;
3183 skb->acked = 0;
3184 skb->used = 0;
3185 skb->free = 0;
3186 skb->saddr = daddr;
3187 skb->daddr = saddr;
3188
3189
3190 cli();
3191 if (sk->inuse) {
3192 if (sk->back_log == NULL) {
3193 sk->back_log = skb;
3194 skb->next = skb;
3195 skb->prev = skb;
3196 } else {
3197 skb->next = sk->back_log;
3198 skb->prev = sk->back_log->prev;
3199 skb->prev->next = skb;
3200 skb->next->prev = skb;
3201 }
3202 sti();
3203 return(0);
3204 }
3205 sk->inuse = 1;
3206 sti();
3207 } else {
3208 if (!sk) {
3209 DPRINTF((DBG_TCP, "tcp.c: tcp_rcv bug sk=NULL redo = 1\n"));
3210 return(0);
3211 }
3212 }
3213
3214 if (!sk->prot) {
3215 DPRINTF((DBG_TCP, "tcp.c: tcp_rcv sk->prot = NULL \n"));
3216 return(0);
3217 }
3218
3219
3220 if (sk->rmem_alloc + skb->mem_len >= sk->rcvbuf) {
3221 skb->sk = NULL;
3222 DPRINTF((DBG_TCP, "dropping packet due to lack of buffer space.\n"));
3223 kfree_skb(skb, FREE_READ);
3224 release_sock(sk);
3225 return(0);
3226 }
3227 sk->rmem_alloc += skb->mem_len;
3228
3229 DPRINTF((DBG_TCP, "About to do switch.\n"));
3230
3231
3232 switch(sk->state) {
3233
3234
3235
3236
3237 case TCP_LAST_ACK:
3238 if (th->rst) {
3239 sk->zapped=1;
3240 sk->err = ECONNRESET;
3241 sk->state = TCP_CLOSE;
3242 sk->shutdown = SHUTDOWN_MASK;
3243 if (!sk->dead) {
3244 sk->state_change(sk);
3245 }
3246 kfree_skb(skb, FREE_READ);
3247 release_sock(sk);
3248 return(0);
3249 }
3250
3251 case TCP_ESTABLISHED:
3252 case TCP_CLOSE_WAIT:
3253 case TCP_FIN_WAIT1:
3254 case TCP_FIN_WAIT2:
3255 case TCP_TIME_WAIT:
3256 if (!tcp_sequence(sk, th, len, opt, saddr,dev)) {
3257 if (inet_debug == DBG_SLIP) printk("\rtcp_rcv: not in seq\n");
3258 #ifdef undef
3259
3260 if(!th->rst)
3261 tcp_send_ack(sk->sent_seq, sk->acked_seq,
3262 sk, th, saddr);
3263 #endif
3264 kfree_skb(skb, FREE_READ);
3265 release_sock(sk);
3266 return(0);
3267 }
3268
3269 if (th->rst) {
3270 sk->zapped=1;
3271
3272 sk->err = ECONNRESET;
3273
3274 if (sk->state == TCP_CLOSE_WAIT) {
3275 sk->err = EPIPE;
3276 }
3277
3278
3279
3280
3281
3282 sk->state = TCP_CLOSE;
3283 sk->shutdown = SHUTDOWN_MASK;
3284 if (!sk->dead) {
3285 sk->state_change(sk);
3286 }
3287 kfree_skb(skb, FREE_READ);
3288 release_sock(sk);
3289 return(0);
3290 }
3291 if (
3292 #if 0
3293 if ((opt && (opt->security != 0 ||
3294 opt->compartment != 0)) ||
3295 #endif
3296 th->syn) {
3297 sk->err = ECONNRESET;
3298 sk->state = TCP_CLOSE;
3299 sk->shutdown = SHUTDOWN_MASK;
3300 tcp_reset(daddr, saddr, th, sk->prot, opt,dev, sk->ip_tos,sk->ip_ttl);
3301 if (!sk->dead) {
3302 sk->state_change(sk);
3303 }
3304 kfree_skb(skb, FREE_READ);
3305 release_sock(sk);
3306 return(0);
3307 }
3308
3309 if (th->ack && !tcp_ack(sk, th, saddr, len)) {
3310 kfree_skb(skb, FREE_READ);
3311 release_sock(sk);
3312 return(0);
3313 }
3314
3315 if (tcp_urg(sk, th, saddr, len)) {
3316 kfree_skb(skb, FREE_READ);
3317 release_sock(sk);
3318 return(0);
3319 }
3320
3321 if (tcp_data(skb, sk, saddr, len)) {
3322 kfree_skb(skb, FREE_READ);
3323 release_sock(sk);
3324 return(0);
3325 }
3326
3327
3328 if (th->fin && tcp_fin(sk, th, saddr, dev)) {
3329 kfree_skb(skb, FREE_READ);
3330 release_sock(sk);
3331 return(0);
3332 }
3333
3334 release_sock(sk);
3335 return(0);
3336
3337 case TCP_CLOSE:
3338 if (sk->dead || sk->daddr) {
3339 DPRINTF((DBG_TCP, "packet received for closed,dead socket\n"));
3340 kfree_skb(skb, FREE_READ);
3341 release_sock(sk);
3342 return(0);
3343 }
3344
3345 if (!th->rst) {
3346 if (!th->ack)
3347 th->ack_seq = 0;
3348 tcp_reset(daddr, saddr, th, sk->prot, opt,dev,sk->ip_tos,sk->ip_ttl);
3349 }
3350 kfree_skb(skb, FREE_READ);
3351 release_sock(sk);
3352 return(0);
3353
3354 case TCP_LISTEN:
3355 if (th->rst) {
3356 kfree_skb(skb, FREE_READ);
3357 release_sock(sk);
3358 return(0);
3359 }
3360 if (th->ack) {
3361 tcp_reset(daddr, saddr, th, sk->prot, opt,dev,sk->ip_tos,sk->ip_ttl);
3362 kfree_skb(skb, FREE_READ);
3363 release_sock(sk);
3364 return(0);
3365 }
3366
3367 if (th->syn) {
3368 #if 0
3369 if (opt->security != 0 || opt->compartment != 0) {
3370 tcp_reset(daddr, saddr, th, prot, opt,dev);
3371 release_sock(sk);
3372 return(0);
3373 }
3374 #endif
3375
3376
3377
3378
3379
3380
3381
3382 tcp_conn_request(sk, skb, daddr, saddr, opt, dev);
3383 release_sock(sk);
3384 return(0);
3385 }
3386
3387 kfree_skb(skb, FREE_READ);
3388 release_sock(sk);
3389 return(0);
3390
3391 case TCP_SYN_RECV:
3392 if (th->syn) {
3393
3394 kfree_skb(skb, FREE_READ);
3395 release_sock(sk);
3396 return(0);
3397 }
3398
3399
3400 default:
3401 if (!tcp_sequence(sk, th, len, opt, saddr,dev)) {
3402 kfree_skb(skb, FREE_READ);
3403 release_sock(sk);
3404 return(0);
3405 }
3406
3407 case TCP_SYN_SENT:
3408 if (th->rst) {
3409 sk->err = ECONNREFUSED;
3410 sk->state = TCP_CLOSE;
3411 sk->shutdown = SHUTDOWN_MASK;
3412 sk->zapped = 1;
3413 if (!sk->dead) {
3414 sk->state_change(sk);
3415 }
3416 kfree_skb(skb, FREE_READ);
3417 release_sock(sk);
3418 return(0);
3419 }
3420 #if 0
3421 if (opt->security != 0 || opt->compartment != 0) {
3422 sk->err = ECONNRESET;
3423 sk->state = TCP_CLOSE;
3424 sk->shutdown = SHUTDOWN_MASK;
3425 tcp_reset(daddr, saddr, th, sk->prot, opt, dev);
3426 if (!sk->dead) {
3427 wake_up_interruptible(sk->sleep);
3428 }
3429 kfree_skb(skb, FREE_READ);
3430 release_sock(sk);
3431 return(0);
3432 }
3433 #endif
3434 if (!th->ack) {
3435 if (th->syn) {
3436 sk->state = TCP_SYN_RECV;
3437 }
3438
3439 kfree_skb(skb, FREE_READ);
3440 release_sock(sk);
3441 return(0);
3442 }
3443
3444 switch(sk->state) {
3445 case TCP_SYN_SENT:
3446 if (!tcp_ack(sk, th, saddr, len)) {
3447 tcp_reset(daddr, saddr, th,
3448 sk->prot, opt,dev,sk->ip_tos,sk->ip_ttl);
3449 kfree_skb(skb, FREE_READ);
3450 release_sock(sk);
3451 return(0);
3452 }
3453
3454
3455
3456
3457
3458 if (!th->syn) {
3459 kfree_skb(skb, FREE_READ);
3460 release_sock(sk);
3461 return(0);
3462 }
3463
3464
3465 sk->acked_seq = th->seq+1;
3466 sk->fin_seq = th->seq;
3467 tcp_send_ack(sk->sent_seq, th->seq+1,
3468 sk, th, sk->daddr);
3469
3470 case TCP_SYN_RECV:
3471 if (!tcp_ack(sk, th, saddr, len)) {
3472 tcp_reset(daddr, saddr, th,
3473 sk->prot, opt, dev,sk->ip_tos,sk->ip_ttl);
3474 kfree_skb(skb, FREE_READ);
3475 release_sock(sk);
3476 return(0);
3477 }
3478 sk->state = TCP_ESTABLISHED;
3479
3480
3481
3482
3483
3484
3485 tcp_options(sk, th);
3486 sk->dummy_th.dest = th->source;
3487 sk->copied_seq = sk->acked_seq-1;
3488 if (!sk->dead) {
3489 sk->state_change(sk);
3490 }
3491
3492
3493
3494
3495
3496
3497
3498
3499
3500
3501 if (sk->max_window == 0) {
3502 sk->max_window = 32;
3503 sk->mss = min(sk->max_window, sk->mtu);
3504 }
3505
3506
3507
3508
3509
3510 if (th->urg) {
3511 if (tcp_urg(sk, th, saddr, len)) {
3512 kfree_skb(skb, FREE_READ);
3513 release_sock(sk);
3514 return(0);
3515 }
3516 }
3517 if (tcp_data(skb, sk, saddr, len))
3518 kfree_skb(skb, FREE_READ);
3519
3520 if (th->fin) tcp_fin(sk, th, saddr, dev);
3521 release_sock(sk);
3522 return(0);
3523 }
3524
3525 if (th->urg) {
3526 if (tcp_urg(sk, th, saddr, len)) {
3527 kfree_skb(skb, FREE_READ);
3528 release_sock(sk);
3529 return(0);
3530 }
3531 }
3532
3533 if (tcp_data(skb, sk, saddr, len)) {
3534 kfree_skb(skb, FREE_READ);
3535 release_sock(sk);
3536 return(0);
3537 }
3538
3539 if (!th->fin) {
3540 release_sock(sk);
3541 return(0);
3542 }
3543 tcp_fin(sk, th, saddr, dev);
3544 release_sock(sk);
3545 return(0);
3546 }
3547 }
3548
3549
3550
3551
3552
3553
3554 static void
3555 tcp_write_wakeup(struct sock *sk)
3556 {
3557 struct sk_buff *buff;
3558 struct tcphdr *t1;
3559 struct device *dev=NULL;
3560 int tmp;
3561
3562 if (sk->zapped)
3563 return;
3564
3565 if (sk -> state != TCP_ESTABLISHED && sk->state != TCP_CLOSE_WAIT &&
3566 sk -> state != TCP_FIN_WAIT1 && sk->state != TCP_FIN_WAIT2)
3567 return;
3568
3569 buff = sk->prot->wmalloc(sk,MAX_ACK_SIZE,1, GFP_ATOMIC);
3570 if (buff == NULL) return;
3571
3572 buff->mem_addr = buff;
3573 buff->mem_len = MAX_ACK_SIZE;
3574 buff->len = sizeof(struct tcphdr);
3575 buff->free = 1;
3576 buff->sk = sk;
3577 DPRINTF((DBG_TCP, "in tcp_write_wakeup\n"));
3578 t1 = (struct tcphdr *) buff->data;
3579
3580
3581 tmp = sk->prot->build_header(buff, sk->saddr, sk->daddr, &dev,
3582 IPPROTO_TCP, sk->opt, MAX_ACK_SIZE,sk->ip_tos,sk->ip_ttl);
3583 if (tmp < 0) {
3584 sk->prot->wfree(sk, buff->mem_addr, buff->mem_len);
3585 return;
3586 }
3587
3588 buff->len += tmp;
3589 t1 = (struct tcphdr *)((char *)t1 +tmp);
3590
3591 memcpy(t1,(void *) &sk->dummy_th, sizeof(*t1));
3592
3593
3594
3595
3596
3597 t1->seq = htonl(sk->sent_seq-1);
3598 t1->ack = 1;
3599 t1->res1= 0;
3600 t1->res2= 0;
3601 t1->rst = 0;
3602 t1->urg = 0;
3603 t1->psh = 0;
3604 t1->fin = 0;
3605 t1->syn = 0;
3606 t1->ack_seq = ntohl(sk->acked_seq);
3607 t1->window = ntohs(tcp_select_window(sk));
3608 t1->doff = sizeof(*t1)/4;
3609 tcp_send_check(t1, sk->saddr, sk->daddr, sizeof(*t1), sk);
3610
3611
3612
3613
3614 sk->prot->queue_xmit(sk, dev, buff, 1);
3615 }
3616
3617 void
3618 tcp_send_probe0(struct sock *sk)
3619 {
3620 if (sk->zapped)
3621 return;
3622
3623 tcp_write_wakeup(sk);
3624
3625 sk->backoff++;
3626 sk->rto = min(sk->rto << 1, 120*HZ);
3627 reset_timer (sk, TIME_PROBE0, sk->rto);
3628 sk->retransmits++;
3629 sk->prot->retransmits ++;
3630 }
3631
3632
3633
3634
3635
3636 int tcp_setsockopt(struct sock *sk, int level, int optname, char *optval, int optlen)
3637 {
3638 int val,err;
3639
3640 if(level!=SOL_TCP)
3641 return ip_setsockopt(sk,level,optname,optval,optlen);
3642
3643 if (optval == NULL)
3644 return(-EINVAL);
3645
3646 err=verify_area(VERIFY_READ, optval, sizeof(int));
3647 if(err)
3648 return err;
3649
3650 val = get_fs_long((unsigned long *)optval);
3651
3652 switch(optname)
3653 {
3654 case TCP_MAXSEG:
3655
3656
3657
3658
3659
3660
3661 if(val<1||val>MAX_WINDOW)
3662 return -EINVAL;
3663 sk->user_mss=val;
3664 return 0;
3665 case TCP_NODELAY:
3666 sk->nonagle=(val==0)?0:1;
3667 return 0;
3668 default:
3669 return(-ENOPROTOOPT);
3670 }
3671 }
3672
3673 int tcp_getsockopt(struct sock *sk, int level, int optname, char *optval, int *optlen)
3674 {
3675 int val,err;
3676
3677 if(level!=SOL_TCP)
3678 return ip_getsockopt(sk,level,optname,optval,optlen);
3679
3680 switch(optname)
3681 {
3682 case TCP_MAXSEG:
3683 val=sk->user_mss;
3684 break;
3685 case TCP_NODELAY:
3686 val=sk->nonagle;
3687 break;
3688 default:
3689 return(-ENOPROTOOPT);
3690 }
3691 err=verify_area(VERIFY_WRITE, optlen, sizeof(int));
3692 if(err)
3693 return err;
3694 put_fs_long(sizeof(int),(unsigned long *) optlen);
3695
3696 err=verify_area(VERIFY_WRITE, optval, sizeof(int));
3697 if(err)
3698 return err;
3699 put_fs_long(val,(unsigned long *)optval);
3700
3701 return(0);
3702 }
3703
3704
3705 struct proto tcp_prot = {
3706 sock_wmalloc,
3707 sock_rmalloc,
3708 sock_wfree,
3709 sock_rfree,
3710 sock_rspace,
3711 sock_wspace,
3712 tcp_close,
3713 tcp_read,
3714 tcp_write,
3715 tcp_sendto,
3716 tcp_recvfrom,
3717 ip_build_header,
3718 tcp_connect,
3719 tcp_accept,
3720 ip_queue_xmit,
3721 tcp_retransmit,
3722 tcp_write_wakeup,
3723 tcp_read_wakeup,
3724 tcp_rcv,
3725 tcp_select,
3726 tcp_ioctl,
3727 NULL,
3728 tcp_shutdown,
3729 tcp_setsockopt,
3730 tcp_getsockopt,
3731 128,
3732 0,
3733 {NULL,},
3734 "TCP"
3735 };