This source file includes following definitions.
- min
- print_th
- get_firstr
- diff
- tcp_select_window
- tcp_time_wait
- tcp_retransmit
- tcp_err
- tcp_readable
- tcp_select
- tcp_ioctl
- tcp_check
- tcp_send_check
- tcp_send_partial
- tcp_send_ack
- tcp_build_header
- tcp_write
- tcp_sendto
- tcp_read_wakeup
- cleanup_rbuf
- tcp_read_urg
- tcp_read
- tcp_shutdown
- tcp_recvfrom
- tcp_reset
- tcp_options
- tcp_conn_request
- tcp_close
- tcp_write_xmit
- sort_send
- tcp_ack
- tcp_data
- tcp_urg
- tcp_fin
- tcp_accept
- tcp_connect
- tcp_sequence
- tcp_rcv
- tcp_write_wakeup
- tcp_setsockopt
- tcp_getsockopt
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78 #include <linux/types.h>
79 #include <linux/sched.h>
80 #include <linux/mm.h>
81 #include <linux/string.h>
82 #include <linux/socket.h>
83 #include <linux/sockios.h>
84 #include <linux/termios.h>
85 #include <linux/in.h>
86 #include <linux/fcntl.h>
87 #include "inet.h"
88 #include "dev.h"
89 #include "ip.h"
90 #include "protocol.h"
91 #include "icmp.h"
92 #include "tcp.h"
93 #include "skbuff.h"
94 #include "sock.h"
95 #include "arp.h"
96 #include <linux/errno.h>
97 #include <linux/timer.h>
98 #include <asm/system.h>
99 #include <asm/segment.h>
100 #include <linux/mm.h>
101
102 #define SEQ_TICK 3
103 unsigned long seq_offset;
104
105 static __inline__ int
106 min(unsigned int a, unsigned int b)
107 {
108 if (a < b) return(a);
109 return(b);
110 }
111
112
113 void
114 print_th(struct tcphdr *th)
115 {
116 unsigned char *ptr;
117
118 if (inet_debug != DBG_TCP) return;
119
120 printk("TCP header:\n");
121 ptr =(unsigned char *)(th + 1);
122 printk(" source=%d, dest=%d, seq =%ld, ack_seq = %ld\n",
123 ntohs(th->source), ntohs(th->dest),
124 ntohl(th->seq), ntohl(th->ack_seq));
125 printk(" fin=%d, syn=%d, rst=%d, psh=%d, ack=%d, urg=%d res1=%d res2=%d\n",
126 th->fin, th->syn, th->rst, th->psh, th->ack,
127 th->urg, th->res1, th->res2);
128 printk(" window = %d, check = %d urg_ptr = %d\n",
129 ntohs(th->window), ntohs(th->check), ntohs(th->urg_ptr));
130 printk(" doff = %d\n", th->doff);
131 printk(" options = %d %d %d %d\n", ptr[0], ptr[1], ptr[2], ptr[3]);
132 }
133
134
135
136
137 static struct sk_buff *
138 get_firstr(struct sock *sk)
139 {
140 return skb_dequeue(&sk->rqueue);
141 }
142
143
144
145
146
147 static long
148 diff(unsigned long seq1, unsigned long seq2)
149 {
150 long d;
151
152 d = seq1 - seq2;
153 if (d > 0) return(d);
154
155
156 return(~d+1);
157 }
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174 static int tcp_select_window(struct sock *sk)
175 {
176 int new_window=sk->prot->rspace(sk)/2;
177
178
179 if(new_window<sk->window)
180 return(sk->window);
181
182 return(new_window);
183 }
184
185
186
187 static void tcp_time_wait(struct sock *sk)
188 {
189 sk->state = TCP_TIME_WAIT;
190 sk->shutdown = SHUTDOWN_MASK;
191 if (!sk->dead)
192 sk->state_change(sk);
193 reset_timer(sk, TIME_CLOSE, TCP_TIMEWAIT_LEN);
194 }
195
196
197
198
199
200
201
202
203 static void
204 tcp_retransmit(struct sock *sk, int all)
205 {
206 if (all) {
207 ip_retransmit(sk, all);
208 return;
209 }
210
211
212
213
214
215
216
217
218
219 sk->cong_window = 1;
220 sk->exp_growth = 0;
221
222
223 ip_retransmit(sk, all);
224 }
225
226
227
228
229
230
231
232
233
234
235 void
236 tcp_err(int err, unsigned char *header, unsigned long daddr,
237 unsigned long saddr, struct inet_protocol *protocol)
238 {
239 struct tcphdr *th;
240 struct sock *sk;
241 struct iphdr *iph=(struct iphdr *)header;
242
243 header+=4*iph->ihl;
244
245 DPRINTF((DBG_TCP, "TCP: tcp_err(%d, hdr=%X, daddr=%X saddr=%X, protocol=%X)\n",
246 err, header, daddr, saddr, protocol));
247
248 th =(struct tcphdr *)header;
249 sk = get_sock(&tcp_prot, th->source, daddr, th->dest, saddr);
250 print_th(th);
251
252 if (sk == NULL) return;
253
254 if(err<0)
255 {
256 sk->err = -err;
257 sk->error_report(sk);
258 return;
259 }
260
261 if ((err & 0xff00) == (ICMP_SOURCE_QUENCH << 8)) {
262
263
264
265
266
267 if (sk->cong_window > 4) sk->cong_window--;
268 return;
269 }
270
271 DPRINTF((DBG_TCP, "TCP: icmp_err got error\n"));
272 sk->err = icmp_err_convert[err & 0xff].errno;
273
274
275
276
277
278 if (icmp_err_convert[err & 0xff].fatal) {
279 if (sk->state == TCP_SYN_SENT) {
280 sk->state = TCP_CLOSE;
281 sk->error_report(sk);
282 }
283 }
284 return;
285 }
286
287
288
289
290
291
292
293 static int
294 tcp_readable(struct sock *sk)
295 {
296 unsigned long counted;
297 unsigned long amount;
298 struct sk_buff *skb;
299 int count=0;
300 int sum;
301 unsigned long flags;
302
303 DPRINTF((DBG_TCP, "tcp_readable(sk=%X)\n", sk));
304 if(sk && sk->debug)
305 printk("tcp_readable: %p - ",sk);
306
307 if (sk == NULL || skb_peek(&sk->rqueue) == NULL)
308 {
309 if(sk && sk->debug)
310 printk("empty\n");
311 return(0);
312 }
313
314 counted = sk->copied_seq+1;
315 amount = 0;
316
317 save_flags(flags);
318 cli();
319 skb =(struct sk_buff *)sk->rqueue;
320
321
322 do {
323 count++;
324 #ifdef OLD
325
326 if (count > 20) {
327 restore_flags(flags);
328 DPRINTF((DBG_TCP, "tcp_readable, more than 20 packets without a psh\n"));
329 printk("tcp_read: possible read_queue corruption.\n");
330 return(amount);
331 }
332 #endif
333 if (before(counted, skb->h.th->seq))
334 break;
335 sum = skb->len -(counted - skb->h.th->seq);
336 if (skb->h.th->syn) sum++;
337 if (skb->h.th->urg) {
338 sum -= ntohs(skb->h.th->urg_ptr);
339 }
340 if (sum >= 0) {
341 amount += sum;
342 if (skb->h.th->syn) amount--;
343 counted += sum;
344 }
345 if (amount && skb->h.th->psh) break;
346 skb =(struct sk_buff *)skb->next;
347 } while(skb != sk->rqueue);
348 restore_flags(flags);
349 DPRINTF((DBG_TCP, "tcp readable returning %d bytes\n", amount));
350 if(sk->debug)
351 printk("got %lu bytes.\n",amount);
352 return(amount);
353 }
354
355
356
357
358
359
360
361 static int
362 tcp_select(struct sock *sk, int sel_type, select_table *wait)
363 {
364 DPRINTF((DBG_TCP, "tcp_select(sk=%X, sel_type = %d, wait = %X)\n",
365 sk, sel_type, wait));
366
367 sk->inuse = 1;
368 switch(sel_type) {
369 case SEL_IN:
370 if(sk->debug)
371 printk("select in");
372 select_wait(sk->sleep, wait);
373 if(sk->debug)
374 printk("-select out");
375 if (skb_peek(&sk->rqueue) != NULL) {
376 if (sk->state == TCP_LISTEN || tcp_readable(sk)) {
377 release_sock(sk);
378 if(sk->debug)
379 printk("-select ok data\n");
380 return(1);
381 }
382 }
383 if (sk->err != 0)
384 {
385 release_sock(sk);
386 if(sk->debug)
387 printk("-select ok error");
388 return(1);
389 }
390 if (sk->shutdown & RCV_SHUTDOWN) {
391 release_sock(sk);
392 if(sk->debug)
393 printk("-select ok down\n");
394 return(1);
395 } else {
396 release_sock(sk);
397 if(sk->debug)
398 printk("-select fail\n");
399 return(0);
400 }
401 case SEL_OUT:
402 select_wait(sk->sleep, wait);
403 if (sk->shutdown & SEND_SHUTDOWN) {
404 DPRINTF((DBG_TCP,
405 "write select on shutdown socket.\n"));
406
407
408 release_sock(sk);
409 return(0);
410 }
411
412
413
414
415
416
417 if (sk->prot->wspace(sk) >= sk->mtu) {
418 release_sock(sk);
419
420 if (sk->state == TCP_SYN_RECV ||
421 sk->state == TCP_SYN_SENT) return(0);
422 return(1);
423 }
424 DPRINTF((DBG_TCP,
425 "tcp_select: sleeping on write sk->wmem_alloc = %d, "
426 "sk->packets_out = %d\n"
427 "sk->wback = %X, sk->wfront = %X\n"
428 "sk->send_seq = %u, sk->window_seq=%u\n",
429 sk->wmem_alloc, sk->packets_out,
430 sk->wback, sk->wfront,
431 sk->send_seq, sk->window_seq));
432
433 release_sock(sk);
434 return(0);
435 case SEL_EX:
436 select_wait(sk->sleep,wait);
437 if (sk->err) {
438 release_sock(sk);
439 return(1);
440 }
441 release_sock(sk);
442 return(0);
443 }
444
445 release_sock(sk);
446 return(0);
447 }
448
449
450 int
451 tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
452 {
453 int err;
454 DPRINTF((DBG_TCP, "tcp_ioctl(sk=%X, cmd = %d, arg=%X)\n", sk, cmd, arg));
455 switch(cmd) {
456 case DDIOCSDBG:
457 return(dbg_ioctl((void *) arg, DBG_TCP));
458
459 case TIOCINQ:
460 #ifdef FIXME
461 case FIONREAD:
462 #endif
463 {
464 unsigned long amount;
465
466 if (sk->state == TCP_LISTEN) return(-EINVAL);
467
468 sk->inuse = 1;
469 amount = tcp_readable(sk);
470 release_sock(sk);
471 DPRINTF((DBG_TCP, "returning %d\n", amount));
472 err=verify_area(VERIFY_WRITE,(void *)arg,
473 sizeof(unsigned long));
474 if(err)
475 return err;
476 put_fs_long(amount,(unsigned long *)arg);
477 return(0);
478 }
479 case SIOCATMARK:
480 {
481 struct sk_buff *skb;
482 int answ = 0;
483
484
485
486
487
488 sk->inuse = 1;
489 if ((skb=skb_peek(&sk->rqueue)) != NULL)
490 {
491 if (sk->copied_seq+1 == skb->h.th->seq && skb->h.th->urg)
492 answ = 1;
493 }
494 release_sock(sk);
495 err=verify_area(VERIFY_WRITE,(void *) arg,
496 sizeof(unsigned long));
497 if(err)
498 return err;
499 put_fs_long(answ,(int *) arg);
500 return(0);
501 }
502 case TIOCOUTQ:
503 {
504 unsigned long amount;
505
506 if (sk->state == TCP_LISTEN) return(-EINVAL);
507 amount = sk->prot->wspace(sk);
508 err=verify_area(VERIFY_WRITE,(void *)arg,
509 sizeof(unsigned long));
510 if(err)
511 return err;
512 put_fs_long(amount,(unsigned long *)arg);
513 return(0);
514 }
515 default:
516 return(-EINVAL);
517 }
518 }
519
520
521
522 unsigned short
523 tcp_check(struct tcphdr *th, int len,
524 unsigned long saddr, unsigned long daddr)
525 {
526 unsigned long sum;
527
528 if (saddr == 0) saddr = my_addr();
529 print_th(th);
530 __asm__("\t addl %%ecx,%%ebx\n"
531 "\t adcl %%edx,%%ebx\n"
532 "\t adcl $0, %%ebx\n"
533 : "=b"(sum)
534 : "0"(daddr), "c"(saddr), "d"((ntohs(len) << 16) + IPPROTO_TCP*256)
535 : "cx","bx","dx" );
536
537 if (len > 3) {
538 __asm__("\tclc\n"
539 "1:\n"
540 "\t lodsl\n"
541 "\t adcl %%eax, %%ebx\n"
542 "\t loop 1b\n"
543 "\t adcl $0, %%ebx\n"
544 : "=b"(sum) , "=S"(th)
545 : "0"(sum), "c"(len/4) ,"1"(th)
546 : "ax", "cx", "bx", "si" );
547 }
548
549
550 __asm__("\t movl %%ebx, %%ecx\n"
551 "\t shrl $16,%%ecx\n"
552 "\t addw %%cx, %%bx\n"
553 "\t adcw $0, %%bx\n"
554 : "=b"(sum)
555 : "0"(sum)
556 : "bx", "cx");
557
558
559 if ((len & 2) != 0) {
560 __asm__("\t lodsw\n"
561 "\t addw %%ax,%%bx\n"
562 "\t adcw $0, %%bx\n"
563 : "=b"(sum), "=S"(th)
564 : "0"(sum) ,"1"(th)
565 : "si", "ax", "bx");
566 }
567
568
569 if ((len & 1) != 0) {
570 __asm__("\t lodsb\n"
571 "\t movb $0,%%ah\n"
572 "\t addw %%ax,%%bx\n"
573 "\t adcw $0, %%bx\n"
574 : "=b"(sum)
575 : "0"(sum) ,"S"(th)
576 : "si", "ax", "bx");
577 }
578
579
580 return((~sum) & 0xffff);
581 }
582
583
584 void
585 tcp_send_check(struct tcphdr *th, unsigned long saddr,
586 unsigned long daddr, int len, struct sock *sk)
587 {
588 th->check = 0;
589 th->check = tcp_check(th, len, saddr, daddr);
590 return;
591 }
592
593
594 static void
595 tcp_send_partial(struct sock *sk)
596 {
597 struct sk_buff *skb;
598
599 if (sk == NULL || sk->send_tmp == NULL) return;
600
601 skb = sk->send_tmp;
602
603
604 if(skb->len-(unsigned long)skb->h.th + (unsigned long)(skb+1)==sizeof(struct tcphdr))
605 {
606
607 if(!skb->h.th->syn && !skb->h.th->fin)
608 {
609 printk("tcp_send_partial: attempt to queue a bogon.\n");
610 kfree_skb(skb,FREE_WRITE);
611 sk->send_tmp=NULL;
612 return;
613 }
614 }
615
616
617 tcp_send_check(skb->h.th, sk->saddr, sk->daddr,
618 skb->len-(unsigned long)skb->h.th +
619 (unsigned long)(skb+1), sk);
620
621 skb->h.seq = sk->send_seq;
622 if (after(sk->send_seq , sk->window_seq) ||
623 sk->retransmits ||
624 sk->packets_out >= sk->cong_window) {
625 DPRINTF((DBG_TCP, "sk->cong_window = %d, sk->packets_out = %d\n",
626 sk->cong_window, sk->packets_out));
627 DPRINTF((DBG_TCP, "sk->send_seq = %d, sk->window_seq = %d\n",
628 sk->send_seq, sk->window_seq));
629 skb->next = NULL;
630 skb->magic = TCP_WRITE_QUEUE_MAGIC;
631 if (sk->wback == NULL) {
632 sk->wfront=skb;
633 } else {
634 sk->wback->next = skb;
635 }
636 sk->wback = skb;
637 } else {
638 sk->prot->queue_xmit(sk, skb->dev, skb,0);
639 }
640 sk->send_tmp = NULL;
641 }
642
643
644
645 static void
646 tcp_send_ack(unsigned long sequence, unsigned long ack,
647 struct sock *sk,
648 struct tcphdr *th, unsigned long daddr)
649 {
650 struct sk_buff *buff;
651 struct tcphdr *t1;
652 struct device *dev = NULL;
653 int tmp;
654
655 if(sk->zapped)
656 return;
657
658
659
660
661 buff = (struct sk_buff *) sk->prot->wmalloc(sk, MAX_ACK_SIZE, 1, GFP_ATOMIC);
662 if (buff == NULL) {
663
664 sk->ack_backlog++;
665 if (sk->timeout != TIME_WRITE && tcp_connected(sk->state)) {
666 reset_timer(sk, TIME_WRITE, 10);
667 }
668 if (inet_debug == DBG_SLIP) printk("\rtcp_ack: malloc failed\n");
669 return;
670 }
671
672 buff->mem_addr = buff;
673 buff->mem_len = MAX_ACK_SIZE;
674 buff->len = sizeof(struct tcphdr);
675 buff->sk = sk;
676 t1 =(struct tcphdr *)(buff + 1);
677
678
679 tmp = sk->prot->build_header(buff, sk->saddr, daddr, &dev,
680 IPPROTO_TCP, sk->opt, MAX_ACK_SIZE,sk->ip_tos,sk->ip_ttl);
681 if (tmp < 0) {
682 buff->free=1;
683 sk->prot->wfree(sk, buff->mem_addr, buff->mem_len);
684 if (inet_debug == DBG_SLIP) printk("\rtcp_ack: build_header failed\n");
685 return;
686 }
687 buff->len += tmp;
688 t1 =(struct tcphdr *)((char *)t1 +tmp);
689
690
691 memcpy(t1, th, sizeof(*t1));
692
693
694 t1->dest = th->source;
695 t1->source = th->dest;
696 t1->seq = ntohl(sequence);
697 t1->ack = 1;
698 sk->window = tcp_select_window(sk);
699 t1->window = ntohs(sk->window);
700 t1->res1 = 0;
701 t1->res2 = 0;
702 t1->rst = 0;
703 t1->urg = 0;
704 t1->syn = 0;
705 t1->psh = 0;
706 t1->fin = 0;
707 if (ack == sk->acked_seq) {
708 sk->ack_backlog = 0;
709 sk->bytes_rcv = 0;
710 sk->ack_timed = 0;
711 if (sk->send_head == NULL && sk->wfront == NULL && sk->timeout == TIME_WRITE)
712 {
713 if(sk->keepopen)
714 reset_timer(sk,TIME_KEEPOPEN,TCP_TIMEOUT_LEN);
715 else
716 delete_timer(sk);
717 }
718 }
719 t1->ack_seq = ntohl(ack);
720 t1->doff = sizeof(*t1)/4;
721 tcp_send_check(t1, sk->saddr, daddr, sizeof(*t1), sk);
722 if (sk->debug)
723 printk("\rtcp_ack: seq %lx ack %lx\n", sequence, ack);
724 sk->prot->queue_xmit(sk, dev, buff, 1);
725 }
726
727
728
729 static int
730 tcp_build_header(struct tcphdr *th, struct sock *sk, int push)
731 {
732
733
734 memcpy(th,(void *) &(sk->dummy_th), sizeof(*th));
735 th->seq = htonl(sk->send_seq);
736 th->psh =(push == 0) ? 1 : 0;
737 th->doff = sizeof(*th)/4;
738 th->ack = 1;
739 th->fin = 0;
740 sk->ack_backlog = 0;
741 sk->bytes_rcv = 0;
742 sk->ack_timed = 0;
743 th->ack_seq = htonl(sk->acked_seq);
744 sk->window = tcp_select_window(sk);
745 th->window = htons(sk->window);
746
747 return(sizeof(*th));
748 }
749
750
751
752
753
754
755 static int
756 tcp_write(struct sock *sk, unsigned char *from,
757 int len, int nonblock, unsigned flags)
758 {
759 int copied = 0;
760 int copy;
761 int tmp;
762 struct sk_buff *skb;
763 unsigned char *buff;
764 struct proto *prot;
765 struct device *dev = NULL;
766
767 DPRINTF((DBG_TCP, "tcp_write(sk=%X, from=%X, len=%d, nonblock=%d, flags=%X)\n",
768 sk, from, len, nonblock, flags));
769
770 sk->inuse=1;
771 prot = sk->prot;
772 while(len > 0) {
773 if (sk->err) {
774 release_sock(sk);
775 if (copied) return(copied);
776 tmp = -sk->err;
777 sk->err = 0;
778 return(tmp);
779 }
780
781
782 if (sk->shutdown & SEND_SHUTDOWN) {
783 release_sock(sk);
784 sk->err = EPIPE;
785 if (copied) return(copied);
786 sk->err = 0;
787 return(-EPIPE);
788 }
789
790
791
792
793 while(sk->state != TCP_ESTABLISHED && sk->state != TCP_CLOSE_WAIT) {
794 if (sk->err) {
795 release_sock(sk);
796 if (copied) return(copied);
797 tmp = -sk->err;
798 sk->err = 0;
799 return(tmp);
800 }
801
802 if (sk->state != TCP_SYN_SENT && sk->state != TCP_SYN_RECV) {
803 release_sock(sk);
804 DPRINTF((DBG_TCP, "tcp_write: return 1\n"));
805 if (copied) return(copied);
806
807 if (sk->err) {
808 tmp = -sk->err;
809 sk->err = 0;
810 return(tmp);
811 }
812
813 if (sk->keepopen) {
814 send_sig(SIGPIPE, current, 0);
815 }
816 return(-EPIPE);
817 }
818
819 if (nonblock || copied) {
820 release_sock(sk);
821 DPRINTF((DBG_TCP, "tcp_write: return 2\n"));
822 if (copied) return(copied);
823 return(-EAGAIN);
824 }
825
826 release_sock(sk);
827 cli();
828 if (sk->state != TCP_ESTABLISHED &&
829 sk->state != TCP_CLOSE_WAIT && sk->err == 0) {
830 interruptible_sleep_on(sk->sleep);
831 if (current->signal & ~current->blocked) {
832 sti();
833 DPRINTF((DBG_TCP, "tcp_write: return 3\n"));
834 if (copied) return(copied);
835 return(-ERESTARTSYS);
836 }
837 }
838 sk->inuse = 1;
839 sti();
840 }
841
842
843 if (sk->send_tmp != NULL) {
844 int hdrlen;
845
846 skb = sk->send_tmp;
847
848
849 hdrlen = ((unsigned long)skb->h.th - (unsigned long)(skb+1))
850 + sizeof(struct tcphdr);
851
852
853
854
855 if (!(flags & MSG_OOB)) {
856 copy = min(sk->mtu - (skb->len - hdrlen), len);
857
858 if (copy <= 0) {
859 printk("TCP: **bug**: \"copy\" <= 0!!\n");
860 copy = 0;
861 }
862
863 memcpy_fromfs((unsigned char *)(skb+1) + skb->len,
864 from, copy);
865 skb->len += copy;
866 from += copy;
867 copied += copy;
868 len -= copy;
869 sk->send_seq += copy;
870 }
871
872 if ((skb->len - hdrlen) > sk->mtu || (flags & MSG_OOB)) {
873 tcp_send_partial(sk);
874 }
875 continue;
876 }
877
878
879
880
881
882
883
884
885
886
887 copy = diff(sk->window_seq, sk->send_seq);
888 if (copy < (diff(sk->window_seq, sk->rcv_ack_seq) >> 2))
889 copy = sk->mtu;
890 copy = min(copy, sk->mtu);
891 copy = min(copy, len);
892
893
894 if (sk->packets_out && copy < sk->mtu && !(flags & MSG_OOB)) {
895
896 release_sock(sk);
897 skb = (struct sk_buff *) prot->wmalloc(sk,
898 sk->mtu + 128 + prot->max_header +
899 sizeof(*skb), 0, GFP_KERNEL);
900 sk->inuse = 1;
901 sk->send_tmp = skb;
902 if (skb != NULL)
903 skb->mem_len = sk->mtu + 128 + prot->max_header + sizeof(*skb);
904 } else {
905
906 release_sock(sk);
907 skb = (struct sk_buff *) prot->wmalloc(sk,
908 copy + prot->max_header +
909 sizeof(*skb), 0, GFP_KERNEL);
910 sk->inuse = 1;
911 if (skb != NULL)
912 skb->mem_len = copy+prot->max_header + sizeof(*skb);
913 }
914
915
916 if (skb == NULL) {
917 if (nonblock ) {
918 release_sock(sk);
919 DPRINTF((DBG_TCP, "tcp_write: return 4\n"));
920 if (copied) return(copied);
921 return(-EAGAIN);
922 }
923
924
925 tmp = sk->wmem_alloc;
926 release_sock(sk);
927 cli();
928
929 if (tmp <= sk->wmem_alloc &&
930 (sk->state == TCP_ESTABLISHED||sk->state == TCP_CLOSE_WAIT)
931 && sk->err == 0) {
932 interruptible_sleep_on(sk->sleep);
933 if (current->signal & ~current->blocked) {
934 sti();
935 DPRINTF((DBG_TCP, "tcp_write: return 5\n"));
936 if (copied) return(copied);
937 return(-ERESTARTSYS);
938 }
939 }
940 sk->inuse = 1;
941 sti();
942 continue;
943 }
944
945 skb->mem_addr = skb;
946 skb->len = 0;
947 skb->sk = sk;
948 skb->free = 0;
949
950 buff =(unsigned char *)(skb+1);
951
952
953
954
955
956 tmp = prot->build_header(skb, sk->saddr, sk->daddr, &dev,
957 IPPROTO_TCP, sk->opt, skb->mem_len,sk->ip_tos,sk->ip_ttl);
958 if (tmp < 0 ) {
959 prot->wfree(sk, skb->mem_addr, skb->mem_len);
960 release_sock(sk);
961 DPRINTF((DBG_TCP, "tcp_write: return 6\n"));
962 if (copied) return(copied);
963 return(tmp);
964 }
965 skb->len += tmp;
966 skb->dev = dev;
967 buff += tmp;
968 skb->h.th =(struct tcphdr *) buff;
969 tmp = tcp_build_header((struct tcphdr *)buff, sk, len-copy);
970 if (tmp < 0) {
971 prot->wfree(sk, skb->mem_addr, skb->mem_len);
972 release_sock(sk);
973 DPRINTF((DBG_TCP, "tcp_write: return 7\n"));
974 if (copied) return(copied);
975 return(tmp);
976 }
977
978 if (flags & MSG_OOB) {
979 ((struct tcphdr *)buff)->urg = 1;
980 ((struct tcphdr *)buff)->urg_ptr = ntohs(copy);
981 }
982 skb->len += tmp;
983 memcpy_fromfs(buff+tmp, from, copy);
984
985 from += copy;
986 copied += copy;
987 len -= copy;
988 skb->len += copy;
989 skb->free = 0;
990 sk->send_seq += copy;
991
992 if (sk->send_tmp != NULL) continue;
993
994 tcp_send_check((struct tcphdr *)buff, sk->saddr, sk->daddr,
995 copy + sizeof(struct tcphdr), sk);
996
997 skb->h.seq = sk->send_seq;
998 if (after(sk->send_seq , sk->window_seq) ||
999 sk->retransmits ||
1000 sk->packets_out >= sk->cong_window) {
1001 DPRINTF((DBG_TCP, "sk->cong_window = %d, sk->packets_out = %d\n",
1002 sk->cong_window, sk->packets_out));
1003 DPRINTF((DBG_TCP, "sk->send_seq = %d, sk->window_seq = %d\n",
1004 sk->send_seq, sk->window_seq));
1005 skb->next = NULL;
1006 skb->magic = TCP_WRITE_QUEUE_MAGIC;
1007 if (sk->wback == NULL) {
1008 sk->wfront = skb;
1009 } else {
1010 sk->wback->next = skb;
1011 }
1012 sk->wback = skb;
1013 } else {
1014 prot->queue_xmit(sk, dev, skb,0);
1015 }
1016 }
1017 sk->err = 0;
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029 if(sk->send_tmp &&
1030 ((!sk->packets_out)
1031 #ifndef USE_NAGLE
1032 || before(sk->send_seq , sk->window_seq)
1033 #endif
1034 ))
1035 tcp_send_partial(sk);
1036
1037 release_sock(sk);
1038 DPRINTF((DBG_TCP, "tcp_write: return 8\n"));
1039 return(copied);
1040 }
1041
1042
1043 static int
1044 tcp_sendto(struct sock *sk, unsigned char *from,
1045 int len, int nonblock, unsigned flags,
1046 struct sockaddr_in *addr, int addr_len)
1047 {
1048 struct sockaddr_in sin;
1049
1050 if (addr_len < sizeof(sin)) return(-EINVAL);
1051 memcpy_fromfs(&sin, addr, sizeof(sin));
1052 if (sin.sin_family && sin.sin_family != AF_INET) return(-EINVAL);
1053 if (sin.sin_port != sk->dummy_th.dest) return(-EINVAL);
1054 if (sin.sin_addr.s_addr != sk->daddr) return(-EINVAL);
1055 return(tcp_write(sk, from, len, nonblock, flags));
1056 }
1057
1058
1059 static void
1060 tcp_read_wakeup(struct sock *sk)
1061 {
1062 int tmp;
1063 struct device *dev = NULL;
1064 struct tcphdr *t1;
1065 struct sk_buff *buff;
1066
1067 DPRINTF((DBG_TCP, "in tcp read wakeup\n"));
1068 if (!sk->ack_backlog) return;
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080 buff = (struct sk_buff *) sk->prot->wmalloc(sk,MAX_ACK_SIZE,1, GFP_ATOMIC);
1081 if (buff == NULL) {
1082
1083 reset_timer(sk, TIME_WRITE, 10);
1084 return;
1085 }
1086
1087 buff->mem_addr = buff;
1088 buff->mem_len = MAX_ACK_SIZE;
1089 buff->len = sizeof(struct tcphdr);
1090 buff->sk = sk;
1091
1092
1093 tmp = sk->prot->build_header(buff, sk->saddr, sk->daddr, &dev,
1094 IPPROTO_TCP, sk->opt, MAX_ACK_SIZE,sk->ip_tos,sk->ip_ttl);
1095 if (tmp < 0) {
1096 buff->free=1;
1097 sk->prot->wfree(sk, buff->mem_addr, buff->mem_len);
1098 return;
1099 }
1100
1101 buff->len += tmp;
1102 t1 =(struct tcphdr *)((char *)(buff+1) +tmp);
1103
1104 memcpy(t1,(void *) &sk->dummy_th, sizeof(*t1));
1105 t1->seq = ntohl(sk->send_seq);
1106 t1->ack = 1;
1107 t1->res1 = 0;
1108 t1->res2 = 0;
1109 t1->rst = 0;
1110 t1->urg = 0;
1111 t1->syn = 0;
1112 t1->psh = 0;
1113 sk->ack_backlog = 0;
1114 sk->bytes_rcv = 0;
1115 sk->window = tcp_select_window(sk);
1116 t1->window = ntohs(sk->window);
1117 t1->ack_seq = ntohl(sk->acked_seq);
1118 t1->doff = sizeof(*t1)/4;
1119 tcp_send_check(t1, sk->saddr, sk->daddr, sizeof(*t1), sk);
1120 sk->prot->queue_xmit(sk, dev, buff, 1);
1121 }
1122
1123
1124
1125
1126
1127
1128
1129
1130 static void
1131 cleanup_rbuf(struct sock *sk)
1132 {
1133 unsigned long flags;
1134 int left;
1135 struct sk_buff *skb;
1136
1137 if(sk->debug)
1138 printk("cleaning rbuf for sk=%p\n", sk);
1139
1140 save_flags(flags);
1141 cli();
1142
1143 left = sk->prot->rspace(sk);
1144
1145
1146
1147
1148
1149 while((skb=skb_peek(&sk->rqueue)) != NULL )
1150 {
1151 if (!skb->used)
1152 break;
1153 skb_unlink(skb);
1154 skb->sk = sk;
1155 kfree_skb(skb, FREE_READ);
1156 }
1157
1158 restore_flags(flags);
1159
1160
1161
1162
1163
1164
1165
1166 DPRINTF((DBG_TCP, "sk->window left = %d, sk->prot->rspace(sk)=%d\n",
1167 sk->window - sk->bytes_rcv, sk->prot->rspace(sk)));
1168
1169 if(sk->debug)
1170 printk("sk->rspace = %lu, was %d\n", sk->prot->rspace(sk),
1171 left);
1172 if (sk->prot->rspace(sk) != left)
1173 {
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184 sk->ack_backlog++;
1185 if ((sk->prot->rspace(sk) > (sk->window - sk->bytes_rcv + sk->mtu))) {
1186
1187 tcp_read_wakeup(sk);
1188 } else {
1189
1190 int was_active = del_timer(&sk->timer);
1191 if (!was_active || TCP_ACK_TIME < sk->timer.expires) {
1192 reset_timer(sk, TIME_WRITE, TCP_ACK_TIME);
1193 } else
1194 add_timer(&sk->timer);
1195 }
1196 }
1197 }
1198
1199
1200
1201 static int
1202 tcp_read_urg(struct sock * sk, int nonblock,
1203 unsigned char *to, int len, unsigned flags)
1204 {
1205 int copied = 0;
1206 struct sk_buff *skb;
1207
1208 DPRINTF((DBG_TCP, "tcp_read_urg(sk=%X, to=%X, len=%d, flags=%X)\n",
1209 sk, to, len, flags));
1210
1211 while(len > 0)
1212 {
1213 sk->inuse = 1;
1214 while(sk->urg==0 || skb_peek(&sk->rqueue) == NULL) {
1215 if (sk->err) {
1216 int tmp;
1217
1218 release_sock(sk);
1219 if (copied) return(copied);
1220 tmp = -sk->err;
1221 sk->err = 0;
1222 return(tmp);
1223 }
1224
1225 if (sk->state == TCP_CLOSE || sk->done) {
1226 release_sock(sk);
1227 if (copied) return(copied);
1228 if (!sk->done) {
1229 sk->done = 1;
1230 return(0);
1231 }
1232 return(-ENOTCONN);
1233 }
1234
1235 if (sk->shutdown & RCV_SHUTDOWN) {
1236 release_sock(sk);
1237 if (copied == 0)
1238 sk->done = 1;
1239 return(copied);
1240 }
1241
1242 if (nonblock || copied) {
1243 release_sock(sk);
1244 if (copied) return(copied);
1245 return(-EAGAIN);
1246 }
1247
1248
1249 release_sock(sk);
1250 cli();
1251 if ((sk->urg == 0 || skb_peek(&sk->rqueue) == NULL) &&
1252 sk->err == 0 && !(sk->shutdown & RCV_SHUTDOWN)) {
1253 interruptible_sleep_on(sk->sleep);
1254 if (current->signal & ~current->blocked) {
1255 sti();
1256 if (copied) return(copied);
1257 return(-ERESTARTSYS);
1258 }
1259 }
1260 sk->inuse = 1;
1261 sti();
1262 }
1263
1264 skb = skb_peek(&sk->rqueue);
1265 do {
1266 int amt;
1267
1268 if (skb->h.th->urg && !skb->urg_used) {
1269 if (skb->h.th->urg_ptr == 0) {
1270 skb->h.th->urg_ptr = ntohs(skb->len);
1271 }
1272 amt = min(ntohs(skb->h.th->urg_ptr),len);
1273 if(amt)
1274 {
1275 verify_area(VERIFY_WRITE, to, amt);
1276 memcpy_tofs(to,(unsigned char *)(skb->h.th) +
1277 skb->h.th->doff*4, amt);
1278 }
1279
1280 if (!(flags & MSG_PEEK)) {
1281 skb->urg_used = 1;
1282 sk->urg--;
1283 }
1284 release_sock(sk);
1285 copied += amt;
1286 return(copied);
1287 }
1288 skb =(struct sk_buff *)skb->next;
1289 } while(skb != sk->rqueue);
1290 }
1291
1292 release_sock(sk);
1293 return(0);
1294 }
1295
1296
1297
1298 static int
1299 tcp_read(struct sock *sk, unsigned char *to,
1300 int len, int nonblock, unsigned flags)
1301 {
1302 int copied=0;
1303 struct sk_buff *skb;
1304 unsigned long offset;
1305 unsigned long used;
1306 int err;
1307
1308 if (len == 0) return(0);
1309 if (len < 0) {
1310 return(-EINVAL);
1311 }
1312
1313 err=verify_area(VERIFY_WRITE,to,len);
1314 if(err)
1315 return err;
1316
1317
1318 if (sk->state == TCP_LISTEN) return(-ENOTCONN);
1319
1320
1321 if ((flags & MSG_OOB))
1322 return(tcp_read_urg(sk, nonblock, to, len, flags));
1323
1324
1325 sk->inuse = 1;
1326
1327 skb=skb_peek(&sk->rqueue);
1328
1329 DPRINTF((DBG_TCP, "tcp_read(sk=%X, to=%X, len=%d, nonblock=%d, flags=%X)\n",
1330 sk, to, len, nonblock, flags));
1331
1332 while(len > 0) {
1333
1334
1335
1336 while(skb == NULL ||
1337 before(sk->copied_seq+1, skb->h.th->seq) || skb->used) {
1338 DPRINTF((DBG_TCP, "skb = %X:\n", skb));
1339 cleanup_rbuf(sk);
1340 if (sk->err)
1341 {
1342 int tmp;
1343
1344 release_sock(sk);
1345 if (copied)
1346 {
1347 DPRINTF((DBG_TCP, "tcp_read: returning %d\n",
1348 copied));
1349 return(copied);
1350 }
1351 tmp = -sk->err;
1352 sk->err = 0;
1353 return(tmp);
1354 }
1355
1356 if (sk->state == TCP_CLOSE)
1357 {
1358 release_sock(sk);
1359 if (copied) {
1360 DPRINTF((DBG_TCP, "tcp_read: returning %d\n",
1361 copied));
1362 return(copied);
1363 }
1364 if (!sk->done) {
1365 sk->done = 1;
1366 return(0);
1367 }
1368 return(-ENOTCONN);
1369 }
1370
1371 if (sk->shutdown & RCV_SHUTDOWN)
1372 {
1373 release_sock(sk);
1374 if (copied == 0) sk->done = 1;
1375 DPRINTF((DBG_TCP, "tcp_read: returning %d\n", copied));
1376 return(copied);
1377 }
1378
1379 if (nonblock || copied)
1380 {
1381 release_sock(sk);
1382 if(sk->debug)
1383 printk("read: EAGAIN\n");
1384 if (copied)
1385 {
1386 DPRINTF((DBG_TCP, "tcp_read: returning %d\n",
1387 copied));
1388 return(copied);
1389 }
1390 return(-EAGAIN);
1391 }
1392
1393 if ((flags & MSG_PEEK) && copied != 0)
1394 {
1395 release_sock(sk);
1396 DPRINTF((DBG_TCP, "tcp_read: returning %d\n", copied));
1397 return(copied);
1398 }
1399
1400 DPRINTF((DBG_TCP, "tcp_read about to sleep. state = %d\n",
1401 sk->state));
1402 release_sock(sk);
1403
1404
1405
1406
1407
1408 cli();
1409 if (sk->shutdown & RCV_SHUTDOWN || sk->err != 0) {
1410 sk->inuse = 1;
1411 sti();
1412 continue;
1413 }
1414
1415 if (skb_peek(&sk->rqueue) == NULL ||
1416 before(sk->copied_seq+1, sk->rqueue->h.th->seq)) {
1417 if(sk->debug)
1418 printk("Read wait sleep\n");
1419 interruptible_sleep_on(sk->sleep);
1420 if(sk->debug)
1421 printk("Read wait wakes\n");
1422 if (current->signal & ~current->blocked) {
1423 sti();
1424 if (copied) {
1425 DPRINTF((DBG_TCP, "tcp_read: returning %d\n",
1426 copied));
1427 return(copied);
1428 }
1429 return(-ERESTARTSYS);
1430 }
1431 }
1432 sk->inuse = 1;
1433 sti();
1434 DPRINTF((DBG_TCP, "tcp_read woke up. \n"));
1435
1436
1437 skb=skb_peek(&sk->rqueue);
1438
1439 }
1440
1441
1442
1443
1444
1445 offset = sk->copied_seq+1 - skb->h.th->seq;
1446
1447 if (skb->h.th->syn) offset--;
1448 if (offset < skb->len)
1449 {
1450
1451
1452
1453
1454 if (skb->h.th->urg)
1455 {
1456 if (skb->urg_used)
1457 {
1458 sk->copied_seq += ntohs(skb->h.th->urg_ptr);
1459 offset += ntohs(skb->h.th->urg_ptr);
1460 if (offset >= skb->len)
1461 {
1462 skb->used = 1;
1463 skb =(struct sk_buff *)skb->next;
1464 continue;
1465 }
1466 }
1467 else
1468 {
1469 release_sock(sk);
1470 if (copied)
1471 return(copied);
1472 send_sig(SIGURG, current, 0);
1473 return(-EINTR);
1474 }
1475 }
1476
1477 used = min(skb->len - offset, len);
1478
1479 memcpy_tofs(to,((unsigned char *)skb->h.th) +
1480 skb->h.th->doff*4 + offset, used);
1481 copied += used;
1482 len -= used;
1483 to += used;
1484
1485
1486 if (!(flags & MSG_PEEK))
1487 sk->copied_seq += used;
1488
1489
1490
1491
1492
1493
1494 if (!(flags & MSG_PEEK) &&
1495 (!skb->h.th->urg || skb->urg_used) &&
1496 (used + offset >= skb->len))
1497 skb->used = 1;
1498
1499
1500
1501
1502
1503 if (skb->h.th->urg)
1504 {
1505 break;
1506 }
1507 }
1508 else
1509 {
1510 skb->used = 1;
1511 }
1512
1513 skb =(struct sk_buff *)skb->next;
1514 }
1515
1516 cleanup_rbuf(sk);
1517 release_sock(sk);
1518 DPRINTF((DBG_TCP, "tcp_read: returning %d\n", copied));
1519 if (copied == 0 && nonblock)
1520 return(-EAGAIN);
1521 return(copied);
1522 }
1523
1524
1525
1526
1527
1528
1529 void
1530 tcp_shutdown(struct sock *sk, int how)
1531 {
1532 struct sk_buff *buff;
1533 struct tcphdr *t1, *th;
1534 struct proto *prot;
1535 int tmp;
1536 struct device *dev = NULL;
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546 if (sk->state == TCP_FIN_WAIT1 || sk->state == TCP_FIN_WAIT2) return;
1547 if (!(how & SEND_SHUTDOWN)) return;
1548 sk->inuse = 1;
1549
1550
1551 if (sk->send_tmp) tcp_send_partial(sk);
1552
1553 prot =(struct proto *)sk->prot;
1554 th =(struct tcphdr *)&sk->dummy_th;
1555 release_sock(sk);
1556 buff = (struct sk_buff *) prot->wmalloc(sk, MAX_RESET_SIZE,1 , GFP_KERNEL);
1557 if (buff == NULL) return;
1558 sk->inuse = 1;
1559
1560 DPRINTF((DBG_TCP, "tcp_shutdown_send buff = %X\n", buff));
1561 buff->mem_addr = buff;
1562 buff->mem_len = MAX_RESET_SIZE;
1563 buff->sk = sk;
1564 buff->len = sizeof(*t1);
1565 t1 =(struct tcphdr *)(buff + 1);
1566
1567
1568 tmp = prot->build_header(buff,sk->saddr, sk->daddr, &dev,
1569 IPPROTO_TCP, sk->opt,
1570 sizeof(struct tcphdr),sk->ip_tos,sk->ip_ttl);
1571 if (tmp < 0) {
1572 buff->free=1;
1573 prot->wfree(sk,buff->mem_addr, buff->mem_len);
1574 release_sock(sk);
1575 DPRINTF((DBG_TCP, "Unable to build header for fin.\n"));
1576 return;
1577 }
1578
1579 t1 =(struct tcphdr *)((char *)t1 +tmp);
1580 buff ->len += tmp;
1581 buff->dev = dev;
1582 memcpy(t1, th, sizeof(*t1));
1583 t1->seq = ntohl(sk->send_seq);
1584 sk->send_seq++;
1585 buff->h.seq = sk->send_seq;
1586 t1->ack = 1;
1587 t1->ack_seq = ntohl(sk->acked_seq);
1588 t1->window = ntohs(sk->window=tcp_select_window(sk));
1589 t1->fin = 1;
1590 t1->rst = 0;
1591 t1->doff = sizeof(*t1)/4;
1592 tcp_send_check(t1, sk->saddr, sk->daddr, sizeof(*t1), sk);
1593
1594
1595
1596
1597
1598 if (sk->wback != NULL) {
1599 buff->free=0;
1600 buff->next = NULL;
1601 sk->wback->next = buff;
1602 sk->wback = buff;
1603 buff->magic = TCP_WRITE_QUEUE_MAGIC;
1604 } else {
1605 sk->prot->queue_xmit(sk, dev, buff, 0);
1606 }
1607
1608 if (sk->state == TCP_ESTABLISHED) sk->state = TCP_FIN_WAIT1;
1609 else sk->state = TCP_FIN_WAIT2;
1610
1611 release_sock(sk);
1612 }
1613
1614
1615 static int
1616 tcp_recvfrom(struct sock *sk, unsigned char *to,
1617 int to_len, int nonblock, unsigned flags,
1618 struct sockaddr_in *addr, int *addr_len)
1619 {
1620 struct sockaddr_in sin;
1621 int len;
1622 int err;
1623 int result;
1624
1625
1626
1627
1628 err = verify_area(VERIFY_WRITE,addr_len,sizeof(long));
1629 if(err)
1630 return err;
1631 len = get_fs_long(addr_len);
1632 if(len > sizeof(sin))
1633 len = sizeof(sin);
1634 err=verify_area(VERIFY_WRITE, addr, len);
1635 if(err)
1636 return err;
1637
1638 result=tcp_read(sk, to, to_len, nonblock, flags);
1639
1640 if (result < 0) return(result);
1641
1642 sin.sin_family = AF_INET;
1643 sin.sin_port = sk->dummy_th.dest;
1644 sin.sin_addr.s_addr = sk->daddr;
1645
1646 memcpy_tofs(addr, &sin, len);
1647 put_fs_long(len, addr_len);
1648 return(result);
1649 }
1650
1651
1652
1653 static void
1654 tcp_reset(unsigned long saddr, unsigned long daddr, struct tcphdr *th,
1655 struct proto *prot, struct options *opt, struct device *dev, int tos, int ttl)
1656 {
1657 struct sk_buff *buff;
1658 struct tcphdr *t1;
1659 int tmp;
1660
1661
1662
1663
1664
1665 buff = (struct sk_buff *) prot->wmalloc(NULL, MAX_RESET_SIZE, 1, GFP_ATOMIC);
1666 if (buff == NULL)
1667 return;
1668
1669 DPRINTF((DBG_TCP, "tcp_reset buff = %X\n", buff));
1670 buff->mem_addr = buff;
1671 buff->mem_len = MAX_RESET_SIZE;
1672 buff->len = sizeof(*t1);
1673 buff->sk = NULL;
1674 buff->dev = dev;
1675
1676 t1 =(struct tcphdr *)(buff + 1);
1677
1678
1679 tmp = prot->build_header(buff, saddr, daddr, &dev, IPPROTO_TCP, opt,
1680 sizeof(struct tcphdr),tos,ttl);
1681 if (tmp < 0) {
1682 buff->free = 1;
1683 prot->wfree(NULL, buff->mem_addr, buff->mem_len);
1684 return;
1685 }
1686 t1 =(struct tcphdr *)((char *)t1 +tmp);
1687 buff->len += tmp;
1688 memcpy(t1, th, sizeof(*t1));
1689
1690
1691 t1->dest = th->source;
1692 t1->source = th->dest;
1693 t1->rst = 1;
1694 t1->window = 0;
1695
1696 if(th->ack)
1697 {
1698 t1->ack=0;
1699 t1->seq=th->ack_seq;
1700 t1->ack_seq=0;
1701 }
1702 else
1703 {
1704 t1->ack=1;
1705 if(!th->syn)
1706 t1->ack_seq=htonl(th->seq);
1707 else
1708 t1->ack_seq=htonl(th->seq+1);
1709 t1->seq=0;
1710 }
1711
1712 t1->syn = 0;
1713 t1->urg = 0;
1714 t1->fin = 0;
1715 t1->psh = 0;
1716 t1->doff = sizeof(*t1)/4;
1717 tcp_send_check(t1, saddr, daddr, sizeof(*t1), NULL);
1718 prot->queue_xmit(NULL, dev, buff, 1);
1719 }
1720
1721
1722
1723
1724
1725
1726 static void
1727 tcp_options(struct sock *sk, struct tcphdr *th)
1728 {
1729 unsigned char *ptr;
1730 int length=(th->doff*4)-sizeof(struct tcphdr);
1731
1732 ptr = (unsigned char *)(th + 1);
1733
1734 while(length>0)
1735 {
1736 int opcode=*ptr++;
1737 int opsize=*ptr++;
1738 switch(opcode)
1739 {
1740 case TCPOPT_EOL:
1741 return;
1742 case TCPOPT_NOP:
1743 length-=2;
1744 continue;
1745
1746 default:
1747 if(opsize<=2)
1748 return;
1749 switch(opcode)
1750 {
1751 case TCPOPT_MSS:
1752 if(opsize==4)
1753 {
1754 sk->mtu=min(sk->mtu,ntohs(*(unsigned short *)ptr));
1755 }
1756 break;
1757
1758 }
1759 ptr+=opsize-2;
1760 length-=opsize;
1761 }
1762 }
1763
1764 }
1765
1766
1767
1768
1769
1770
1771
1772
1773 static void
1774 tcp_conn_request(struct sock *sk, struct sk_buff *skb,
1775 unsigned long daddr, unsigned long saddr,
1776 struct options *opt, struct device *dev)
1777 {
1778 struct sk_buff *buff;
1779 struct tcphdr *t1;
1780 unsigned char *ptr;
1781 struct sock *newsk;
1782 struct tcphdr *th;
1783 int tmp;
1784
1785 DPRINTF((DBG_TCP, "tcp_conn_request(sk = %X, skb = %X, daddr = %X, sadd4= %X, \n"
1786 " opt = %X, dev = %X)\n",
1787 sk, skb, daddr, saddr, opt, dev));
1788
1789 th = skb->h.th;
1790
1791
1792 if (!sk->dead) {
1793 sk->data_ready(sk,0);
1794 } else {
1795 DPRINTF((DBG_TCP, "tcp_conn_request on dead socket\n"));
1796 tcp_reset(daddr, saddr, th, sk->prot, opt, dev, sk->ip_tos,sk->ip_ttl);
1797 kfree_skb(skb, FREE_READ);
1798 return;
1799 }
1800
1801
1802
1803
1804
1805 if (sk->ack_backlog >= sk->max_ack_backlog) {
1806 kfree_skb(skb, FREE_READ);
1807 return;
1808 }
1809
1810
1811
1812
1813
1814
1815
1816
1817 newsk = (struct sock *) kmalloc(sizeof(struct sock), GFP_ATOMIC);
1818 if (newsk == NULL) {
1819
1820 kfree_skb(skb, FREE_READ);
1821 return;
1822 }
1823
1824 DPRINTF((DBG_TCP, "newsk = %X\n", newsk));
1825 memcpy((void *)newsk,(void *)sk, sizeof(*newsk));
1826 newsk->wback = NULL;
1827 newsk->wfront = NULL;
1828 newsk->rqueue = NULL;
1829 newsk->send_head = NULL;
1830 newsk->send_tail = NULL;
1831 newsk->back_log = NULL;
1832 newsk->rtt = TCP_CONNECT_TIME;
1833 newsk->mdev = 0;
1834 newsk->backoff = 0;
1835 newsk->blog = 0;
1836 newsk->intr = 0;
1837 newsk->proc = 0;
1838 newsk->done = 0;
1839 newsk->send_tmp = NULL;
1840 newsk->pair = NULL;
1841 newsk->wmem_alloc = 0;
1842 newsk->rmem_alloc = 0;
1843
1844 newsk->max_unacked = MAX_WINDOW - TCP_WINDOW_DIFF;
1845
1846 newsk->err = 0;
1847 newsk->shutdown = 0;
1848 newsk->ack_backlog = 0;
1849 newsk->acked_seq = skb->h.th->seq+1;
1850 newsk->fin_seq = skb->h.th->seq;
1851 newsk->copied_seq = skb->h.th->seq;
1852 newsk->state = TCP_SYN_RECV;
1853 newsk->timeout = 0;
1854 newsk->send_seq = jiffies * SEQ_TICK - seq_offset;
1855 newsk->rcv_ack_seq = newsk->send_seq;
1856 newsk->urg =0;
1857 newsk->retransmits = 0;
1858 newsk->destroy = 0;
1859 newsk->timer.data = (unsigned long)newsk;
1860 newsk->timer.function = &net_timer;
1861 newsk->dummy_th.source = skb->h.th->dest;
1862 newsk->dummy_th.dest = skb->h.th->source;
1863
1864
1865 newsk->daddr = saddr;
1866 newsk->saddr = daddr;
1867
1868 put_sock(newsk->num,newsk);
1869 newsk->dummy_th.res1 = 0;
1870 newsk->dummy_th.doff = 6;
1871 newsk->dummy_th.fin = 0;
1872 newsk->dummy_th.syn = 0;
1873 newsk->dummy_th.rst = 0;
1874 newsk->dummy_th.psh = 0;
1875 newsk->dummy_th.ack = 0;
1876 newsk->dummy_th.urg = 0;
1877 newsk->dummy_th.res2 = 0;
1878 newsk->acked_seq = skb->h.th->seq + 1;
1879 newsk->copied_seq = skb->h.th->seq;
1880
1881
1882 newsk->ip_ttl=sk->ip_ttl;
1883 newsk->ip_tos=skb->ip_hdr->tos;
1884
1885
1886
1887 if (sk->mss)
1888 newsk->mtu = sk->mss;
1889 else
1890 newsk->mtu = 576 - HEADER_SIZE;
1891
1892 newsk->mtu = min(newsk->mtu, dev->mtu - HEADER_SIZE);
1893
1894
1895 tcp_options(newsk,skb->h.th);
1896
1897 buff = (struct sk_buff *) newsk->prot->wmalloc(newsk, MAX_SYN_SIZE, 1, GFP_ATOMIC);
1898 if (buff == NULL) {
1899 sk->err = -ENOMEM;
1900 newsk->dead = 1;
1901 release_sock(newsk);
1902 kfree_skb(skb, FREE_READ);
1903 return;
1904 }
1905
1906 buff->mem_addr = buff;
1907 buff->mem_len = MAX_SYN_SIZE;
1908 buff->len = sizeof(struct tcphdr)+4;
1909 buff->sk = newsk;
1910
1911 t1 =(struct tcphdr *)(buff + 1);
1912
1913
1914 tmp = sk->prot->build_header(buff, newsk->saddr, newsk->daddr, &dev,
1915 IPPROTO_TCP, NULL, MAX_SYN_SIZE,sk->ip_tos,sk->ip_ttl);
1916
1917
1918 if (tmp < 0) {
1919 sk->err = tmp;
1920 buff->free=1;
1921 kfree_skb(buff,FREE_WRITE);
1922 newsk->dead = 1;
1923 release_sock(newsk);
1924 skb->sk = sk;
1925 kfree_skb(skb, FREE_READ);
1926 return;
1927 }
1928
1929 buff->len += tmp;
1930 t1 =(struct tcphdr *)((char *)t1 +tmp);
1931
1932 memcpy(t1, skb->h.th, sizeof(*t1));
1933 buff->h.seq = newsk->send_seq;
1934
1935
1936 t1->dest = skb->h.th->source;
1937 t1->source = newsk->dummy_th.source;
1938 t1->seq = ntohl(newsk->send_seq++);
1939 t1->ack = 1;
1940 newsk->window = tcp_select_window(newsk);
1941 t1->window = ntohs(newsk->window);
1942 t1->res1 = 0;
1943 t1->res2 = 0;
1944 t1->rst = 0;
1945 t1->urg = 0;
1946 t1->psh = 0;
1947 t1->syn = 1;
1948 t1->ack_seq = ntohl(skb->h.th->seq+1);
1949 t1->doff = sizeof(*t1)/4+1;
1950
1951 ptr =(unsigned char *)(t1+1);
1952 ptr[0] = 2;
1953 ptr[1] = 4;
1954 ptr[2] = ((newsk->mtu) >> 8) & 0xff;
1955 ptr[3] =(newsk->mtu) & 0xff;
1956
1957 tcp_send_check(t1, daddr, saddr, sizeof(*t1)+4, newsk);
1958 newsk->prot->queue_xmit(newsk, dev, buff, 0);
1959
1960 reset_timer(newsk, TIME_WRITE , TCP_CONNECT_TIME);
1961 skb->sk = newsk;
1962
1963
1964 sk->rmem_alloc -= skb->mem_len;
1965 newsk->rmem_alloc += skb->mem_len;
1966
1967 skb_queue_tail(&sk->rqueue,skb);
1968 sk->ack_backlog++;
1969 release_sock(newsk);
1970 }
1971
1972
1973 static void
1974 tcp_close(struct sock *sk, int timeout)
1975 {
1976 struct sk_buff *buff;
1977 int need_reset = 0;
1978 struct tcphdr *t1, *th;
1979 struct proto *prot;
1980 struct device *dev=NULL;
1981 int tmp;
1982
1983
1984
1985
1986
1987 DPRINTF((DBG_TCP, "tcp_close((struct sock *)%X, %d)\n",sk, timeout));
1988 sk->inuse = 1;
1989 sk->keepopen = 1;
1990 sk->shutdown = SHUTDOWN_MASK;
1991
1992 if (!sk->dead)
1993 sk->state_change(sk);
1994
1995
1996 if (skb_peek(&sk->rqueue) != NULL)
1997 {
1998 struct sk_buff *skb;
1999 if(sk->debug)
2000 printk("Clean rcv queue\n");
2001 while((skb=skb_dequeue(&sk->rqueue))!=NULL)
2002 {
2003 if(skb->len > 0 && after(skb->h.th->seq + skb->len + 1 , sk->copied_seq))
2004 need_reset = 1;
2005 kfree_skb(skb, FREE_READ);
2006 }
2007 if(sk->debug)
2008 printk("Cleaned.\n");
2009 }
2010 sk->rqueue = NULL;
2011
2012
2013 if (sk->send_tmp) {
2014 tcp_send_partial(sk);
2015 }
2016
2017 switch(sk->state) {
2018 case TCP_FIN_WAIT1:
2019 case TCP_FIN_WAIT2:
2020 case TCP_LAST_ACK:
2021
2022 reset_timer(sk, TIME_CLOSE, 4 * sk->rtt);
2023 if (timeout) tcp_time_wait(sk);
2024 release_sock(sk);
2025 return;
2026 case TCP_TIME_WAIT:
2027 if (timeout) {
2028 sk->state = TCP_CLOSE;
2029 }
2030 release_sock(sk);
2031 return;
2032 case TCP_LISTEN:
2033 sk->state = TCP_CLOSE;
2034 release_sock(sk);
2035 return;
2036 case TCP_CLOSE:
2037 release_sock(sk);
2038 return;
2039 case TCP_CLOSE_WAIT:
2040 case TCP_ESTABLISHED:
2041 case TCP_SYN_SENT:
2042 case TCP_SYN_RECV:
2043 prot =(struct proto *)sk->prot;
2044 th =(struct tcphdr *)&sk->dummy_th;
2045 buff = (struct sk_buff *) prot->wmalloc(sk, MAX_FIN_SIZE, 1, GFP_ATOMIC);
2046 if (buff == NULL) {
2047
2048
2049
2050 release_sock(sk);
2051 if (sk->state != TCP_CLOSE_WAIT)
2052 sk->state = TCP_ESTABLISHED;
2053 reset_timer(sk, TIME_CLOSE, 100);
2054 return;
2055 }
2056 buff->mem_addr = buff;
2057 buff->mem_len = MAX_FIN_SIZE;
2058 buff->sk = sk;
2059 buff->free = 1;
2060 buff->len = sizeof(*t1);
2061 t1 =(struct tcphdr *)(buff + 1);
2062
2063
2064 tmp = prot->build_header(buff,sk->saddr, sk->daddr, &dev,
2065 IPPROTO_TCP, sk->opt,
2066 sizeof(struct tcphdr),sk->ip_tos,sk->ip_ttl);
2067 if (tmp < 0) {
2068 kfree_skb(buff,FREE_WRITE);
2069 DPRINTF((DBG_TCP, "Unable to build header for fin.\n"));
2070 release_sock(sk);
2071 return;
2072 }
2073
2074 t1 =(struct tcphdr *)((char *)t1 +tmp);
2075 buff ->len += tmp;
2076 buff->dev = dev;
2077 memcpy(t1, th, sizeof(*t1));
2078 t1->seq = ntohl(sk->send_seq);
2079 sk->send_seq++;
2080 buff->h.seq = sk->send_seq;
2081 t1->ack = 1;
2082
2083
2084 sk->delay_acks = 0;
2085 t1->ack_seq = ntohl(sk->acked_seq);
2086 t1->window = ntohs(sk->window=tcp_select_window(sk));
2087 t1->fin = 1;
2088 t1->rst = need_reset;
2089 t1->doff = sizeof(*t1)/4;
2090 tcp_send_check(t1, sk->saddr, sk->daddr, sizeof(*t1), sk);
2091
2092 if (sk->wfront == NULL) {
2093 prot->queue_xmit(sk, dev, buff, 0);
2094 } else {
2095 reset_timer(sk, TIME_WRITE,
2096 backoff(sk->backoff) * (2 * sk->mdev + sk->rtt));
2097 buff->next = NULL;
2098 if (sk->wback == NULL) {
2099 sk->wfront=buff;
2100 } else {
2101 sk->wback->next = buff;
2102 }
2103 sk->wback = buff;
2104 buff->magic = TCP_WRITE_QUEUE_MAGIC;
2105 }
2106
2107 if (sk->state == TCP_CLOSE_WAIT) {
2108 sk->state = TCP_FIN_WAIT2;
2109 } else {
2110 sk->state = TCP_FIN_WAIT1;
2111 }
2112 }
2113 release_sock(sk);
2114 }
2115
2116
2117
2118
2119
2120
2121 static void
2122 tcp_write_xmit(struct sock *sk)
2123 {
2124 struct sk_buff *skb;
2125
2126 DPRINTF((DBG_TCP, "tcp_write_xmit(sk=%X)\n", sk));
2127
2128
2129
2130 if(sk->zapped)
2131 return;
2132
2133 while(sk->wfront != NULL &&
2134 before(sk->wfront->h.seq, sk->window_seq) &&
2135 (sk->retransmits == 0 || before(sk->wfront->h.seq, sk->rcv_ack_seq +1))
2136 && sk->packets_out < sk->cong_window) {
2137 skb = sk->wfront;
2138 IS_SKB(skb);
2139 sk->wfront =(struct sk_buff *)skb->next;
2140 if (sk->wfront == NULL) sk->wback = NULL;
2141 skb->next = NULL;
2142 if (skb->magic != TCP_WRITE_QUEUE_MAGIC) {
2143 printk("tcp.c skb with bad magic(%X) on write queue. Squashing "
2144 "queue\n", skb->magic);
2145 sk->wfront = NULL;
2146 sk->wback = NULL;
2147 return;
2148 }
2149 skb->magic = 0;
2150 DPRINTF((DBG_TCP, "Sending a packet.\n"));
2151
2152
2153 if (before(skb->h.seq, sk->rcv_ack_seq +1)) {
2154 sk->retransmits = 0;
2155 kfree_skb(skb, FREE_WRITE);
2156 if (!sk->dead) sk->write_space(sk);
2157 } else {
2158 sk->prot->queue_xmit(sk, skb->dev, skb, skb->free);
2159 }
2160 }
2161 }
2162
2163
2164
2165
2166
2167
2168 void
2169 sort_send(struct sock *sk)
2170 {
2171 struct sk_buff *list = NULL;
2172 struct sk_buff *skb,*skb2,*skb3;
2173
2174 for (skb = sk->send_head; skb != NULL; skb = skb2) {
2175 skb2 = (struct sk_buff *)skb->link3;
2176 if (list == NULL || before (skb2->h.seq, list->h.seq)) {
2177 skb->link3 = list;
2178 sk->send_tail = skb;
2179 list = skb;
2180 } else {
2181 for (skb3 = list; ; skb3 = (struct sk_buff *)skb3->link3) {
2182 if (skb3->link3 == NULL ||
2183 before(skb->h.seq, skb3->link3->h.seq)) {
2184 skb->link3 = skb3->link3;
2185 skb3->link3 = skb;
2186 if (skb->link3 == NULL) sk->send_tail = skb;
2187 break;
2188 }
2189 }
2190 }
2191 }
2192 sk->send_head = list;
2193 }
2194
2195
2196
2197 static int
2198 tcp_ack(struct sock *sk, struct tcphdr *th, unsigned long saddr, int len)
2199 {
2200 unsigned long ack;
2201 int flag = 0;
2202
2203 if(sk->zapped)
2204 return(1);
2205
2206 ack = ntohl(th->ack_seq);
2207 DPRINTF((DBG_TCP, "tcp_ack ack=%d, window=%d, "
2208 "sk->rcv_ack_seq=%d, sk->window_seq = %d\n",
2209 ack, ntohs(th->window), sk->rcv_ack_seq, sk->window_seq));
2210
2211 if (after(ack, sk->send_seq+1) || before(ack, sk->rcv_ack_seq-1)) {
2212 if (after(ack, sk->send_seq) ||
2213 (sk->state != TCP_ESTABLISHED && sk->state != TCP_CLOSE_WAIT)) {
2214 return(0);
2215 }
2216 if (sk->keepopen) {
2217 reset_timer(sk, TIME_KEEPOPEN, TCP_TIMEOUT_LEN);
2218 }
2219 return(1);
2220 }
2221
2222 if (len != th->doff*4) flag |= 1;
2223
2224
2225 if (after(sk->window_seq, ack+ntohs(th->window))) {
2226
2227
2228
2229
2230
2231
2232
2233 struct sk_buff *skb;
2234 struct sk_buff *skb2;
2235 struct sk_buff *wskb = NULL;
2236
2237 skb2 = sk->send_head;
2238 sk->send_head = NULL;
2239 sk->send_tail = NULL;
2240
2241 flag |= 4;
2242
2243 sk->window_seq = ack + ntohs(th->window);
2244 cli();
2245 while (skb2 != NULL) {
2246 skb = skb2;
2247 skb2 = (struct sk_buff *)skb->link3;
2248 skb->link3 = NULL;
2249 if (after(skb->h.seq, sk->window_seq)) {
2250 if (sk->packets_out > 0) sk->packets_out--;
2251
2252 if (skb->next != NULL) {
2253 skb_unlink(skb);
2254 }
2255
2256 skb->magic = TCP_WRITE_QUEUE_MAGIC;
2257 if (wskb == NULL) {
2258 skb->next = sk->wfront;
2259 sk->wfront = skb;
2260 } else {
2261 skb->next = wskb->next;
2262 wskb->next = skb;
2263 }
2264 if (sk->wback == wskb) sk->wback = skb;
2265 wskb = skb;
2266 } else {
2267 if (sk->send_head == NULL) {
2268 sk->send_head = skb;
2269 sk->send_tail = skb;
2270 } else {
2271 sk->send_tail->link3 = skb;
2272 sk->send_tail = skb;
2273 }
2274 skb->link3 = NULL;
2275 }
2276 }
2277 sti();
2278 }
2279
2280 if (sk->send_tail == NULL || sk->send_head == NULL) {
2281 sk->send_head = NULL;
2282 sk->send_tail = NULL;
2283 sk->packets_out= 0;
2284 }
2285
2286 sk->window_seq = ack + ntohs(th->window);
2287
2288
2289 if (sk->cong_window < 2048 && ack != sk->rcv_ack_seq) {
2290 if (sk->exp_growth) sk->cong_window *= 2;
2291 else sk->cong_window++;
2292 }
2293
2294 DPRINTF((DBG_TCP, "tcp_ack: Updating rcv ack sequence.\n"));
2295 sk->rcv_ack_seq = ack;
2296
2297
2298 while(sk->send_head != NULL) {
2299
2300 if (sk->send_head->link3 &&
2301 after(sk->send_head->h.seq, sk->send_head->link3->h.seq)) {
2302 printk("INET: tcp.c: *** bug send_list out of order.\n");
2303 sort_send(sk);
2304 }
2305
2306 if (before(sk->send_head->h.seq, ack+1)) {
2307 struct sk_buff *oskb;
2308
2309 if (sk->retransmits) {
2310
2311
2312
2313
2314
2315
2316 if (sk->send_head->link3)
2317 sk->retransmits = 1;
2318 else
2319 sk->retransmits = 0;
2320 }
2321
2322
2323
2324
2325
2326
2327 sk->backoff = 0;
2328
2329 if (sk->packets_out > 0) sk->packets_out --;
2330 DPRINTF((DBG_TCP, "skb=%X skb->h.seq = %d acked ack=%d\n",
2331 sk->send_head, sk->send_head->h.seq, ack));
2332
2333
2334 if (!sk->dead) sk->write_space(sk);
2335
2336 oskb = sk->send_head;
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346 if ( !(flag&2)) {
2347 long abserr, rtt = jiffies - oskb->when;
2348
2349 if (sk->state == TCP_SYN_SENT || sk->state == TCP_SYN_RECV) {
2350
2351 sk->rtt = rtt;
2352 sk->mdev = rtt;
2353 }
2354 else {
2355 abserr = (rtt > sk->rtt) ? rtt - sk->rtt : sk->rtt - rtt;
2356 sk->rtt = (7 * sk->rtt + rtt) >> 3;
2357 sk->mdev = (3 * sk->mdev + abserr) >> 2;
2358 }
2359 sk->backoff = 0;
2360 }
2361 flag |= (2|4);
2362
2363
2364 if (sk->rtt < 10) sk->rtt = 10;
2365 if (sk->rtt > 12000) sk->rtt = 12000;
2366
2367 cli();
2368
2369 oskb = sk->send_head;
2370 IS_SKB(oskb);
2371 sk->send_head =(struct sk_buff *)oskb->link3;
2372 if (sk->send_head == NULL) {
2373 sk->send_tail = NULL;
2374 }
2375
2376
2377 skb_unlink(oskb);
2378 sti();
2379 oskb->magic = 0;
2380 kfree_skb(oskb, FREE_WRITE);
2381 if (!sk->dead) sk->write_space(sk);
2382 } else {
2383 break;
2384 }
2385 }
2386
2387
2388
2389
2390
2391 if (sk->wfront != NULL) {
2392 if (after (sk->window_seq, sk->wfront->h.seq) &&
2393 (sk->retransmits == 0 ||
2394 before(sk->wfront->h.seq, sk->rcv_ack_seq +1))
2395 && sk->packets_out < sk->cong_window) {
2396 flag |= 1;
2397 tcp_write_xmit(sk);
2398 }
2399 } else {
2400 if (sk->send_head == NULL && sk->ack_backlog == 0 &&
2401 sk->state != TCP_TIME_WAIT && !sk->keepopen) {
2402 DPRINTF((DBG_TCP, "Nothing to do, going to sleep.\n"));
2403 if (!sk->dead) sk->write_space(sk);
2404
2405 if (sk->keepopen)
2406 reset_timer(sk, TIME_KEEPOPEN, TCP_TIMEOUT_LEN);
2407 else
2408 delete_timer(sk);
2409 } else {
2410 if (sk->state != (unsigned char) sk->keepopen) {
2411 reset_timer(sk, TIME_WRITE,
2412 backoff(sk->backoff) * (2 * sk->mdev + sk->rtt));
2413 }
2414 if (sk->state == TCP_TIME_WAIT) {
2415 reset_timer(sk, TIME_CLOSE, TCP_TIMEWAIT_LEN);
2416 }
2417 }
2418 }
2419
2420 if (sk->packets_out == 0 && sk->send_tmp != NULL &&
2421 sk->wfront == NULL && sk->send_head == NULL) {
2422 flag |= 1;
2423 tcp_send_partial(sk);
2424 }
2425
2426
2427 if (sk->state == TCP_TIME_WAIT) {
2428 if (!sk->dead)
2429 sk->state_change(sk);
2430 if (sk->rcv_ack_seq == sk->send_seq && sk->acked_seq == sk->fin_seq) {
2431 flag |= 1;
2432 sk->state = TCP_CLOSE;
2433 sk->shutdown = SHUTDOWN_MASK;
2434 }
2435 }
2436
2437 if (sk->state == TCP_LAST_ACK || sk->state == TCP_FIN_WAIT2) {
2438 if (!sk->dead) sk->state_change(sk);
2439 if (sk->rcv_ack_seq == sk->send_seq) {
2440 flag |= 1;
2441 if (sk->acked_seq != sk->fin_seq) {
2442 tcp_time_wait(sk);
2443 } else {
2444 DPRINTF((DBG_TCP, "tcp_ack closing socket - %X\n", sk));
2445 tcp_send_ack(sk->send_seq, sk->acked_seq, sk,
2446 th, sk->daddr);
2447 sk->shutdown = SHUTDOWN_MASK;
2448 sk->state = TCP_CLOSE;
2449 }
2450 }
2451 }
2452
2453 if (((!flag) || (flag&4)) && sk->send_head != NULL &&
2454 (sk->send_head->when + backoff(sk->backoff) * (2 * sk->mdev + sk->rtt)
2455 < jiffies)) {
2456 sk->exp_growth = 0;
2457 ip_retransmit(sk, 1);
2458 }
2459
2460 DPRINTF((DBG_TCP, "leaving tcp_ack\n"));
2461 return(1);
2462 }
2463
2464
2465
2466
2467
2468
2469
2470 static int
2471 tcp_data(struct sk_buff *skb, struct sock *sk,
2472 unsigned long saddr, unsigned short len)
2473 {
2474 struct sk_buff *skb1, *skb2;
2475 struct tcphdr *th;
2476 int dup_dumped=0;
2477
2478 th = skb->h.th;
2479 print_th(th);
2480 skb->len = len -(th->doff*4);
2481
2482 DPRINTF((DBG_TCP, "tcp_data len = %d sk = %X:\n", skb->len, sk));
2483
2484 sk->bytes_rcv += skb->len;
2485 if (skb->len == 0 && !th->fin && !th->urg && !th->psh) {
2486
2487 if (!th->ack) tcp_send_ack(sk->send_seq, sk->acked_seq,sk, th, saddr);
2488 kfree_skb(skb, FREE_READ);
2489 return(0);
2490 }
2491
2492 if (sk->shutdown & RCV_SHUTDOWN) {
2493 sk->acked_seq = th->seq + skb->len + th->syn + th->fin;
2494 tcp_reset(sk->saddr, sk->daddr, skb->h.th,
2495 sk->prot, NULL, skb->dev, sk->ip_tos, sk->ip_ttl);
2496 sk->state = TCP_CLOSE;
2497 sk->err = EPIPE;
2498 sk->shutdown = SHUTDOWN_MASK;
2499 DPRINTF((DBG_TCP, "tcp_data: closing socket - %X\n", sk));
2500 kfree_skb(skb, FREE_READ);
2501 if (!sk->dead) sk->state_change(sk);
2502 return(0);
2503 }
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514 if (sk->rqueue == NULL) {
2515 DPRINTF((DBG_TCP, "tcp_data: skb = %X:\n", skb));
2516 #ifdef OLDWAY
2517 sk->rqueue = skb;
2518 skb->next = skb;
2519 skb->prev = skb;
2520 skb->list = &sk->rqueue;
2521 #else
2522 skb_queue_head(&sk->rqueue,skb);
2523 #endif
2524 skb1= NULL;
2525 } else {
2526 DPRINTF((DBG_TCP, "tcp_data adding to chain sk = %X:\n", sk));
2527 for(skb1=sk->rqueue->prev; ; skb1 =(struct sk_buff *)skb1->prev) {
2528 if(sk->debug)
2529 {
2530 printk("skb1=%p :", skb1);
2531 printk("skb1->h.th->seq = %ld: ", skb1->h.th->seq);
2532 printk("skb->h.th->seq = %ld\n",skb->h.th->seq);
2533 printk("copied_seq = %ld acked_seq = %ld\n", sk->copied_seq,
2534 sk->acked_seq);
2535 }
2536 #ifdef OLD
2537 if (after(th->seq+1, skb1->h.th->seq)) {
2538 skb->prev = skb1;
2539 skb->next = skb1->next;
2540 skb->next->prev = skb;
2541 skb1->next = skb;
2542 if (skb1 == sk->rqueue) sk->rqueue = skb;
2543 break;
2544 }
2545 if (skb1->prev == sk->rqueue) {
2546 skb->next= skb1;
2547 skb->prev = skb1->prev;
2548 skb->prev->next = skb;
2549 skb1->prev = skb;
2550 skb1 = NULL;
2551
2552 break;
2553 }
2554 #else
2555 if (th->seq==skb1->h.th->seq && skb->len>= skb1->len)
2556 {
2557 skb_append(skb1,skb);
2558 skb_unlink(skb1);
2559 kfree_skb(skb1,FREE_READ);
2560 dup_dumped=1;
2561 skb1=NULL;
2562 break;
2563 }
2564 if (after(th->seq+1, skb1->h.th->seq))
2565 {
2566 skb_append(skb1,skb);
2567 break;
2568 }
2569 if (skb1 == sk->rqueue)
2570 {
2571 skb_queue_head(&sk->rqueue, skb);
2572 break;
2573 }
2574 #endif
2575 }
2576 DPRINTF((DBG_TCP, "skb = %X:\n", skb));
2577 }
2578
2579 th->ack_seq = th->seq + skb->len;
2580 if (th->syn) th->ack_seq++;
2581 if (th->fin) th->ack_seq++;
2582
2583 if (before(sk->acked_seq, sk->copied_seq)) {
2584 printk("*** tcp.c:tcp_data bug acked < copied\n");
2585 sk->acked_seq = sk->copied_seq;
2586 }
2587
2588
2589 if ((!dup_dumped && (skb1 == NULL || skb1->acked)) || before(th->seq, sk->acked_seq+1)) {
2590 if (before(th->seq, sk->acked_seq+1)) {
2591 if (after(th->ack_seq, sk->acked_seq))
2592 sk->acked_seq = th->ack_seq;
2593 skb->acked = 1;
2594
2595
2596 if (skb->h.th->fin) {
2597 if (!sk->dead) sk->state_change(sk);
2598 sk->shutdown |= RCV_SHUTDOWN;
2599 }
2600
2601 for(skb2 = (struct sk_buff *)skb->next;
2602 skb2 !=(struct sk_buff *) sk->rqueue;
2603 skb2 = (struct sk_buff *)skb2->next) {
2604 if (before(skb2->h.th->seq, sk->acked_seq+1)) {
2605 if (after(skb2->h.th->ack_seq, sk->acked_seq))
2606 {
2607 long old_acked_seq = sk->acked_seq;
2608 sk->acked_seq = skb2->h.th->ack_seq;
2609 if((int)(sk->acked_seq - old_acked_seq) >0)
2610 {
2611 int new_window=sk->window-sk->acked_seq+
2612 old_acked_seq;
2613 if(new_window<0)
2614 new_window=0;
2615 sk->window = new_window;
2616 }
2617 }
2618 skb2->acked = 1;
2619
2620
2621
2622
2623
2624 if (skb2->h.th->fin) {
2625 sk->shutdown |= RCV_SHUTDOWN;
2626 if (!sk->dead) sk->state_change(sk);
2627 }
2628
2629
2630 sk->ack_backlog = sk->max_ack_backlog;
2631 } else {
2632 break;
2633 }
2634 }
2635
2636
2637
2638
2639
2640 if (!sk->delay_acks ||
2641 sk->ack_backlog >= sk->max_ack_backlog ||
2642 sk->bytes_rcv > sk->max_unacked || th->fin) {
2643
2644 } else {
2645 sk->ack_backlog++;
2646 if(sk->debug)
2647 printk("Ack queued.\n");
2648 reset_timer(sk, TIME_WRITE, TCP_ACK_TIME);
2649 }
2650 }
2651 }
2652
2653
2654
2655
2656
2657 if (!skb->acked) {
2658
2659
2660
2661
2662
2663 while (sk->prot->rspace(sk) < sk->mtu) {
2664 skb1 = skb_peek(&sk->rqueue);
2665 if (skb1 == NULL) {
2666 printk("INET: tcp.c:tcp_data memory leak detected.\n");
2667 break;
2668 }
2669
2670
2671 if (skb1->acked) {
2672 break;
2673 }
2674
2675 skb_unlink(skb1);
2676 #ifdef OLDWAY
2677 if (skb1->prev == skb1) {
2678 sk->rqueue = NULL;
2679 } else {
2680 sk->rqueue = (struct sk_buff *)skb1->prev;
2681 skb1->next->prev = skb1->prev;
2682 skb1->prev->next = skb1->next;
2683 }
2684 #endif
2685 kfree_skb(skb1, FREE_READ);
2686 }
2687 tcp_send_ack(sk->send_seq, sk->acked_seq, sk, th, saddr);
2688 sk->ack_backlog++;
2689 reset_timer(sk, TIME_WRITE, TCP_ACK_TIME);
2690 } else {
2691
2692 tcp_send_ack(sk->send_seq, sk->acked_seq, sk, th, saddr);
2693 }
2694
2695
2696 if (!sk->dead) {
2697 if(sk->debug)
2698 printk("Data wakeup.\n");
2699 sk->data_ready(sk,0);
2700 } else {
2701 DPRINTF((DBG_TCP, "data received on dead socket.\n"));
2702 }
2703
2704 if (sk->state == TCP_FIN_WAIT2 &&
2705 sk->acked_seq == sk->fin_seq && sk->rcv_ack_seq == sk->send_seq) {
2706 DPRINTF((DBG_TCP, "tcp_data: entering last_ack state sk = %X\n", sk));
2707
2708
2709 sk->shutdown = SHUTDOWN_MASK;
2710 sk->state = TCP_LAST_ACK;
2711 if (!sk->dead) sk->state_change(sk);
2712 }
2713
2714 return(0);
2715 }
2716
2717
2718 static int
2719 tcp_urg(struct sock *sk, struct tcphdr *th, unsigned long saddr)
2720 {
2721 extern int kill_pg(int pg, int sig, int priv);
2722 extern int kill_proc(int pid, int sig, int priv);
2723
2724 if (!sk->dead)
2725 sk->data_ready(sk,0);
2726
2727 if (sk->urginline) {
2728 th->urg = 0;
2729 th->psh = 1;
2730 return(0);
2731 }
2732
2733 if (!sk->urg) {
2734
2735 if (sk->proc != 0) {
2736 if (sk->proc > 0) {
2737 kill_proc(sk->proc, SIGURG, 1);
2738 } else {
2739 kill_pg(-sk->proc, SIGURG, 1);
2740 }
2741 }
2742 }
2743 sk->urg++;
2744 return(0);
2745 }
2746
2747
2748
2749 static int
2750 tcp_fin(struct sock *sk, struct tcphdr *th,
2751 unsigned long saddr, struct device *dev)
2752 {
2753 DPRINTF((DBG_TCP, "tcp_fin(sk=%X, th=%X, saddr=%X, dev=%X)\n",
2754 sk, th, saddr, dev));
2755
2756 if (!sk->dead) {
2757 sk->state_change(sk);
2758 }
2759
2760 switch(sk->state) {
2761 case TCP_SYN_RECV:
2762 case TCP_SYN_SENT:
2763 case TCP_ESTABLISHED:
2764
2765 sk->fin_seq = th->seq+1;
2766 sk->state = TCP_CLOSE_WAIT;
2767 if (th->rst) sk->shutdown = SHUTDOWN_MASK;
2768 break;
2769
2770 case TCP_CLOSE_WAIT:
2771 case TCP_FIN_WAIT2:
2772 break;
2773
2774 case TCP_FIN_WAIT1:
2775
2776 sk->fin_seq = th->seq+1;
2777 sk->state = TCP_FIN_WAIT2;
2778 break;
2779
2780 default:
2781 case TCP_TIME_WAIT:
2782 sk->state = TCP_LAST_ACK;
2783
2784
2785 reset_timer(sk, TIME_CLOSE, TCP_TIMEWAIT_LEN);
2786 return(0);
2787 }
2788 sk->ack_backlog++;
2789
2790 return(0);
2791 }
2792
2793
2794
2795 static struct sock *
2796 tcp_accept(struct sock *sk, int flags)
2797 {
2798 struct sock *newsk;
2799 struct sk_buff *skb;
2800
2801 DPRINTF((DBG_TCP, "tcp_accept(sk=%X, flags=%X, addr=%s)\n",
2802 sk, flags, in_ntoa(sk->saddr)));
2803
2804
2805
2806
2807
2808 if (sk->state != TCP_LISTEN) {
2809 sk->err = EINVAL;
2810 return(NULL);
2811 }
2812
2813
2814 cli();
2815 sk->inuse = 1;
2816 while((skb = get_firstr(sk)) == NULL) {
2817 if (flags & O_NONBLOCK) {
2818 sti();
2819 release_sock(sk);
2820 sk->err = EAGAIN;
2821 return(NULL);
2822 }
2823
2824 release_sock(sk);
2825 interruptible_sleep_on(sk->sleep);
2826 if (current->signal & ~current->blocked) {
2827 sti();
2828 sk->err = ERESTARTSYS;
2829 return(NULL);
2830 }
2831 sk->inuse = 1;
2832 }
2833 sti();
2834
2835
2836 newsk = skb->sk;
2837
2838 kfree_skb(skb, FREE_READ);
2839 sk->ack_backlog--;
2840 release_sock(sk);
2841 return(newsk);
2842 }
2843
2844
2845
2846 static int
2847 tcp_connect(struct sock *sk, struct sockaddr_in *usin, int addr_len)
2848 {
2849 struct sk_buff *buff;
2850 struct sockaddr_in sin;
2851 struct device *dev=NULL;
2852 unsigned char *ptr;
2853 int tmp;
2854 struct tcphdr *t1;
2855 int err;
2856
2857 if (sk->state != TCP_CLOSE) return(-EISCONN);
2858 if (addr_len < 8) return(-EINVAL);
2859
2860 err=verify_area(VERIFY_READ, usin, addr_len);
2861 if(err)
2862 return err;
2863
2864 memcpy_fromfs(&sin,usin, min(sizeof(sin), addr_len));
2865
2866 if (sin.sin_family && sin.sin_family != AF_INET) return(-EAFNOSUPPORT);
2867
2868 DPRINTF((DBG_TCP, "TCP connect daddr=%s\n", in_ntoa(sin.sin_addr.s_addr)));
2869
2870
2871 if (chk_addr(sin.sin_addr.s_addr) == IS_BROADCAST) {
2872 DPRINTF((DBG_TCP, "TCP connection to broadcast address not allowed\n"));
2873 return(-ENETUNREACH);
2874 }
2875
2876 sk->inuse = 1;
2877 sk->daddr = sin.sin_addr.s_addr;
2878 sk->send_seq = jiffies * SEQ_TICK - seq_offset;
2879 sk->rcv_ack_seq = sk->send_seq -1;
2880 sk->err = 0;
2881 sk->dummy_th.dest = sin.sin_port;
2882 release_sock(sk);
2883
2884 buff = (struct sk_buff *) sk->prot->wmalloc(sk,MAX_SYN_SIZE,0, GFP_KERNEL);
2885 if (buff == NULL) {
2886 return(-ENOMEM);
2887 }
2888 sk->inuse = 1;
2889 buff->mem_addr = buff;
2890 buff->mem_len = MAX_SYN_SIZE;
2891 buff->len = 24;
2892 buff->sk = sk;
2893 buff->free = 1;
2894 t1 = (struct tcphdr *)(buff + 1);
2895
2896
2897
2898 tmp = sk->prot->build_header(buff, sk->saddr, sk->daddr, &dev,
2899 IPPROTO_TCP, NULL, MAX_SYN_SIZE,sk->ip_tos,sk->ip_ttl);
2900 if (tmp < 0) {
2901 sk->prot->wfree(sk, buff->mem_addr, buff->mem_len);
2902 release_sock(sk);
2903 return(-ENETUNREACH);
2904 }
2905 buff->len += tmp;
2906 t1 = (struct tcphdr *)((char *)t1 +tmp);
2907
2908 memcpy(t1,(void *)&(sk->dummy_th), sizeof(*t1));
2909 t1->seq = ntohl(sk->send_seq++);
2910 buff->h.seq = sk->send_seq;
2911 t1->ack = 0;
2912 t1->window = 2;
2913 t1->res1=0;
2914 t1->res2=0;
2915 t1->rst = 0;
2916 t1->urg = 0;
2917 t1->psh = 0;
2918 t1->syn = 1;
2919 t1->urg_ptr = 0;
2920 t1->doff = 6;
2921
2922
2923 if (sk->mss)
2924 sk->mtu = sk->mss;
2925 else
2926 sk->mtu = 576 - HEADER_SIZE;
2927
2928 sk->mtu = min(sk->mtu, dev->mtu - HEADER_SIZE);
2929
2930
2931 ptr = (unsigned char *)(t1+1);
2932 ptr[0] = 2;
2933 ptr[1] = 4;
2934 ptr[2] = (sk->mtu) >> 8;
2935 ptr[3] = (sk->mtu) & 0xff;
2936 tcp_send_check(t1, sk->saddr, sk->daddr,
2937 sizeof(struct tcphdr) + 4, sk);
2938
2939
2940 sk->state = TCP_SYN_SENT;
2941 sk->rtt = TCP_CONNECT_TIME;
2942 reset_timer(sk, TIME_WRITE, TCP_CONNECT_TIME);
2943 sk->retransmits = TCP_RETR2 - TCP_SYN_RETRIES;
2944
2945 sk->prot->queue_xmit(sk, dev, buff, 0);
2946
2947 release_sock(sk);
2948 return(0);
2949 }
2950
2951
2952
2953 static int
2954 tcp_sequence(struct sock *sk, struct tcphdr *th, short len,
2955 struct options *opt, unsigned long saddr, struct device *dev)
2956 {
2957
2958
2959
2960
2961
2962
2963 DPRINTF((DBG_TCP, "tcp_sequence(sk=%X, th=%X, len = %d, opt=%d, saddr=%X)\n",
2964 sk, th, len, opt, saddr));
2965
2966 if (between(th->seq, sk->acked_seq, sk->acked_seq + sk->window)||
2967 between(th->seq + len-(th->doff*4), sk->acked_seq + 1,
2968 sk->acked_seq + sk->window) ||
2969 (before(th->seq, sk->acked_seq) &&
2970 after(th->seq + len -(th->doff*4), sk->acked_seq + sk->window))) {
2971 return(1);
2972 }
2973 DPRINTF((DBG_TCP, "tcp_sequence: rejecting packet.\n"));
2974
2975
2976
2977
2978
2979
2980
2981
2982 if(sk->state==TCP_SYN_SENT||sk->state==TCP_SYN_RECV)
2983 {
2984 tcp_reset(sk->saddr,sk->daddr,th,sk->prot,NULL,dev, sk->ip_tos,sk->ip_ttl);
2985 return(1);
2986 }
2987
2988
2989
2990
2991
2992 if (after(th->seq, sk->acked_seq + sk->window)) {
2993 if(!th->rst)
2994 tcp_send_ack(sk->send_seq, sk->acked_seq, sk, th, saddr);
2995 return(0);
2996 }
2997
2998
2999 if (th->ack && len == (th->doff * 4) &&
3000 after(th->seq, sk->acked_seq - 32767) &&
3001 !th->fin && !th->syn) return(1);
3002
3003 if (!th->rst) {
3004
3005 tcp_send_ack(sk->send_seq, sk->acked_seq, sk, th, saddr);
3006 }
3007 return(0);
3008 }
3009
3010
3011
3012
3013
3014 int
3015 tcp_rcv(struct sk_buff *skb, struct device *dev, struct options *opt,
3016 unsigned long daddr, unsigned short len,
3017 unsigned long saddr, int redo, struct inet_protocol * protocol)
3018 {
3019 struct tcphdr *th;
3020 struct sock *sk;
3021
3022 if (!skb) {
3023 DPRINTF((DBG_TCP, "tcp.c: tcp_rcv skb = NULL\n"));
3024 return(0);
3025 }
3026 #if 0
3027 if (!protocol) {
3028 DPRINTF((DBG_TCP, "tcp.c: tcp_rcv protocol = NULL\n"));
3029 return(0);
3030 }
3031
3032 if (!opt) {
3033 DPRINTF((DBG_TCP, "tcp.c: tcp_rcv opt = NULL\n"));
3034 }
3035 #endif
3036 if (!dev) {
3037 DPRINTF((DBG_TCP, "tcp.c: tcp_rcv dev = NULL\n"));
3038 return(0);
3039 }
3040 th = skb->h.th;
3041
3042
3043 sk = get_sock(&tcp_prot, th->dest, saddr, th->source, daddr);
3044 DPRINTF((DBG_TCP, "<<\n"));
3045 DPRINTF((DBG_TCP, "len = %d, redo = %d, skb=%X\n", len, redo, skb));
3046
3047
3048
3049 if (sk!=NULL && sk->zapped)
3050 sk=NULL;
3051
3052 if (sk) {
3053 DPRINTF((DBG_TCP, "sk = %X:\n", sk));
3054 }
3055
3056 if (!redo) {
3057 if (tcp_check(th, len, saddr, daddr )) {
3058 skb->sk = NULL;
3059 DPRINTF((DBG_TCP, "packet dropped with bad checksum.\n"));
3060 if (inet_debug == DBG_SLIP) printk("\rtcp_rcv: bad checksum\n");
3061 kfree_skb(skb,FREE_READ);
3062
3063
3064
3065
3066 return(0);
3067 }
3068
3069
3070 if (sk == NULL) {
3071 if (!th->rst)
3072 {
3073 th->seq = ntohl(th->seq);
3074
3075 tcp_reset(daddr, saddr, th, &tcp_prot, opt,dev,skb->ip_hdr->tos,255);
3076 }
3077 skb->sk = NULL;
3078 kfree_skb(skb, FREE_READ);
3079 return(0);
3080 }
3081
3082 skb->len = len;
3083 skb->sk = sk;
3084 skb->acked = 0;
3085 skb->used = 0;
3086 skb->free = 0;
3087 skb->urg_used = 0;
3088 skb->saddr = daddr;
3089 skb->daddr = saddr;
3090
3091 th->seq = ntohl(th->seq);
3092
3093
3094 cli();
3095 if (sk->inuse) {
3096 if (sk->back_log == NULL) {
3097 sk->back_log = skb;
3098 skb->next = skb;
3099 skb->prev = skb;
3100 } else {
3101 skb->next = sk->back_log;
3102 skb->prev = sk->back_log->prev;
3103 skb->prev->next = skb;
3104 skb->next->prev = skb;
3105 }
3106 sti();
3107 return(0);
3108 }
3109 sk->inuse = 1;
3110 sti();
3111 } else {
3112 if (!sk) {
3113 DPRINTF((DBG_TCP, "tcp.c: tcp_rcv bug sk=NULL redo = 1\n"));
3114 return(0);
3115 }
3116 }
3117
3118 if (!sk->prot) {
3119 DPRINTF((DBG_TCP, "tcp.c: tcp_rcv sk->prot = NULL \n"));
3120 return(0);
3121 }
3122
3123
3124 if (sk->rmem_alloc + skb->mem_len >= sk->rcvbuf) {
3125 skb->sk = NULL;
3126 DPRINTF((DBG_TCP, "dropping packet due to lack of buffer space.\n"));
3127 kfree_skb(skb, FREE_READ);
3128 release_sock(sk);
3129 return(0);
3130 }
3131 sk->rmem_alloc += skb->mem_len;
3132
3133 DPRINTF((DBG_TCP, "About to do switch.\n"));
3134
3135
3136 switch(sk->state) {
3137
3138
3139
3140
3141 case TCP_LAST_ACK:
3142 if (th->rst) {
3143 sk->zapped=1;
3144 sk->err = ECONNRESET;
3145 sk->state = TCP_CLOSE;
3146 sk->shutdown = SHUTDOWN_MASK;
3147 if (!sk->dead) {
3148 sk->state_change(sk);
3149 }
3150 kfree_skb(skb, FREE_READ);
3151 release_sock(sk);
3152 return(0);
3153 }
3154
3155 case TCP_ESTABLISHED:
3156 case TCP_CLOSE_WAIT:
3157 case TCP_FIN_WAIT1:
3158 case TCP_FIN_WAIT2:
3159 case TCP_TIME_WAIT:
3160 if (!tcp_sequence(sk, th, len, opt, saddr,dev)) {
3161 if (inet_debug == DBG_SLIP) printk("\rtcp_rcv: not in seq\n");
3162 if(!th->rst)
3163 tcp_send_ack(sk->send_seq, sk->acked_seq,
3164 sk, th, saddr);
3165 kfree_skb(skb, FREE_READ);
3166 release_sock(sk);
3167 return(0);
3168 }
3169
3170 if (th->rst) {
3171 sk->zapped=1;
3172
3173 sk->err = ECONNRESET;
3174
3175 if (sk->state == TCP_CLOSE_WAIT) {
3176 sk->err = EPIPE;
3177 }
3178
3179
3180
3181
3182
3183 sk->state = TCP_CLOSE;
3184 sk->shutdown = SHUTDOWN_MASK;
3185 if (!sk->dead) {
3186 sk->state_change(sk);
3187 }
3188 kfree_skb(skb, FREE_READ);
3189 release_sock(sk);
3190 return(0);
3191 }
3192 if (
3193 #if 0
3194 if ((opt && (opt->security != 0 ||
3195 opt->compartment != 0)) ||
3196 #endif
3197 th->syn) {
3198 sk->err = ECONNRESET;
3199 sk->state = TCP_CLOSE;
3200 sk->shutdown = SHUTDOWN_MASK;
3201 tcp_reset(daddr, saddr, th, sk->prot, opt,dev, sk->ip_tos,sk->ip_ttl);
3202 if (!sk->dead) {
3203 sk->state_change(sk);
3204 }
3205 kfree_skb(skb, FREE_READ);
3206 release_sock(sk);
3207 return(0);
3208 }
3209 if (th->ack) {
3210 if (!tcp_ack(sk, th, saddr, len)) {
3211 kfree_skb(skb, FREE_READ);
3212 release_sock(sk);
3213 return(0);
3214 }
3215 }
3216 if (th->urg) {
3217 if (tcp_urg(sk, th, saddr)) {
3218 kfree_skb(skb, FREE_READ);
3219 release_sock(sk);
3220 return(0);
3221 }
3222 }
3223
3224 if (tcp_data(skb, sk, saddr, len)) {
3225 kfree_skb(skb, FREE_READ);
3226 release_sock(sk);
3227 return(0);
3228 }
3229
3230
3231 if (th->fin && tcp_fin(sk, th, saddr, dev)) {
3232 kfree_skb(skb, FREE_READ);
3233 release_sock(sk);
3234 return(0);
3235 }
3236
3237 release_sock(sk);
3238 return(0);
3239
3240 case TCP_CLOSE:
3241 if (sk->dead || sk->daddr) {
3242 DPRINTF((DBG_TCP, "packet received for closed,dead socket\n"));
3243 kfree_skb(skb, FREE_READ);
3244 release_sock(sk);
3245 return(0);
3246 }
3247
3248 if (!th->rst) {
3249 if (!th->ack)
3250 th->ack_seq = 0;
3251 tcp_reset(daddr, saddr, th, sk->prot, opt,dev,sk->ip_tos,sk->ip_ttl);
3252 }
3253 kfree_skb(skb, FREE_READ);
3254 release_sock(sk);
3255 return(0);
3256
3257 case TCP_LISTEN:
3258 if (th->rst) {
3259 kfree_skb(skb, FREE_READ);
3260 release_sock(sk);
3261 return(0);
3262 }
3263 if (th->ack) {
3264 tcp_reset(daddr, saddr, th, sk->prot, opt,dev,sk->ip_tos,sk->ip_ttl);
3265 kfree_skb(skb, FREE_READ);
3266 release_sock(sk);
3267 return(0);
3268 }
3269
3270 if (th->syn) {
3271 #if 0
3272 if (opt->security != 0 || opt->compartment != 0) {
3273 tcp_reset(daddr, saddr, th, prot, opt,dev);
3274 release_sock(sk);
3275 return(0);
3276 }
3277 #endif
3278
3279
3280
3281
3282
3283
3284
3285 tcp_conn_request(sk, skb, daddr, saddr, opt, dev);
3286 release_sock(sk);
3287 return(0);
3288 }
3289
3290 kfree_skb(skb, FREE_READ);
3291 release_sock(sk);
3292 return(0);
3293
3294 default:
3295 if (!tcp_sequence(sk, th, len, opt, saddr,dev)) {
3296 kfree_skb(skb, FREE_READ);
3297 release_sock(sk);
3298 return(0);
3299 }
3300
3301 case TCP_SYN_SENT:
3302 if (th->rst) {
3303 sk->err = ECONNREFUSED;
3304 sk->state = TCP_CLOSE;
3305 sk->shutdown = SHUTDOWN_MASK;
3306 sk->zapped = 1;
3307 if (!sk->dead) {
3308 sk->state_change(sk);
3309 }
3310 kfree_skb(skb, FREE_READ);
3311 release_sock(sk);
3312 return(0);
3313 }
3314 #if 0
3315 if (opt->security != 0 || opt->compartment != 0) {
3316 sk->err = ECONNRESET;
3317 sk->state = TCP_CLOSE;
3318 sk->shutdown = SHUTDOWN_MASK;
3319 tcp_reset(daddr, saddr, th, sk->prot, opt, dev);
3320 if (!sk->dead) {
3321 wake_up(sk->sleep);
3322 }
3323 kfree_skb(skb, FREE_READ);
3324 release_sock(sk);
3325 return(0);
3326 }
3327 #endif
3328 if (!th->ack) {
3329 if (th->syn) {
3330 sk->state = TCP_SYN_RECV;
3331 }
3332
3333 kfree_skb(skb, FREE_READ);
3334 release_sock(sk);
3335 return(0);
3336 }
3337
3338 switch(sk->state) {
3339 case TCP_SYN_SENT:
3340 if (!tcp_ack(sk, th, saddr, len)) {
3341 tcp_reset(daddr, saddr, th,
3342 sk->prot, opt,dev,sk->ip_tos,sk->ip_ttl);
3343 kfree_skb(skb, FREE_READ);
3344 release_sock(sk);
3345 return(0);
3346 }
3347
3348
3349
3350
3351
3352 if (!th->syn) {
3353 kfree_skb(skb, FREE_READ);
3354 release_sock(sk);
3355 return(0);
3356 }
3357
3358
3359 sk->acked_seq = th->seq+1;
3360 sk->fin_seq = th->seq;
3361 tcp_send_ack(sk->send_seq, th->seq+1,
3362 sk, th, sk->daddr);
3363
3364 case TCP_SYN_RECV:
3365 if (!tcp_ack(sk, th, saddr, len)) {
3366 tcp_reset(daddr, saddr, th,
3367 sk->prot, opt, dev,sk->ip_tos,sk->ip_ttl);
3368 kfree_skb(skb, FREE_READ);
3369 release_sock(sk);
3370 return(0);
3371 }
3372 sk->state = TCP_ESTABLISHED;
3373
3374
3375
3376
3377
3378
3379 tcp_options(sk, th);
3380 sk->dummy_th.dest = th->source;
3381 sk->copied_seq = sk->acked_seq-1;
3382 if (!sk->dead) {
3383 sk->state_change(sk);
3384 }
3385
3386
3387
3388
3389
3390 if (th->urg) {
3391 if (tcp_urg(sk, th, saddr)) {
3392 kfree_skb(skb, FREE_READ);
3393 release_sock(sk);
3394 return(0);
3395 }
3396 }
3397 if (tcp_data(skb, sk, saddr, len))
3398 kfree_skb(skb, FREE_READ);
3399
3400 if (th->fin) tcp_fin(sk, th, saddr, dev);
3401 release_sock(sk);
3402 return(0);
3403 }
3404
3405 if (th->urg) {
3406 if (tcp_urg(sk, th, saddr)) {
3407 kfree_skb(skb, FREE_READ);
3408 release_sock(sk);
3409 return(0);
3410 }
3411 }
3412
3413 if (tcp_data(skb, sk, saddr, len)) {
3414 kfree_skb(skb, FREE_READ);
3415 release_sock(sk);
3416 return(0);
3417 }
3418
3419 if (!th->fin) {
3420 release_sock(sk);
3421 return(0);
3422 }
3423 tcp_fin(sk, th, saddr, dev);
3424 release_sock(sk);
3425 return(0);
3426 }
3427 }
3428
3429
3430
3431
3432
3433
3434 static void
3435 tcp_write_wakeup(struct sock *sk)
3436 {
3437 struct sk_buff *buff;
3438 struct tcphdr *t1;
3439 struct device *dev=NULL;
3440 int tmp;
3441
3442 if (sk->zapped)
3443 return;
3444
3445 if (sk -> state != TCP_ESTABLISHED && sk->state != TCP_CLOSE_WAIT) return;
3446
3447 buff = (struct sk_buff *) sk->prot->wmalloc(sk,MAX_ACK_SIZE,1, GFP_ATOMIC);
3448 if (buff == NULL) return;
3449
3450 buff->mem_addr = buff;
3451 buff->mem_len = MAX_ACK_SIZE;
3452 buff->len = sizeof(struct tcphdr);
3453 buff->free = 1;
3454 buff->sk = sk;
3455 DPRINTF((DBG_TCP, "in tcp_write_wakeup\n"));
3456 t1 = (struct tcphdr *)(buff + 1);
3457
3458
3459 tmp = sk->prot->build_header(buff, sk->saddr, sk->daddr, &dev,
3460 IPPROTO_TCP, sk->opt, MAX_ACK_SIZE,sk->ip_tos,sk->ip_ttl);
3461 if (tmp < 0) {
3462 sk->prot->wfree(sk, buff->mem_addr, buff->mem_len);
3463 return;
3464 }
3465
3466 buff->len += tmp;
3467 t1 = (struct tcphdr *)((char *)t1 +tmp);
3468
3469 memcpy(t1,(void *) &sk->dummy_th, sizeof(*t1));
3470
3471
3472
3473
3474
3475 t1->seq = ntohl(sk->send_seq-1);
3476 t1->ack = 1;
3477 t1->res1= 0;
3478 t1->res2= 0;
3479 t1->rst = 0;
3480 t1->urg = 0;
3481 t1->psh = 0;
3482 t1->fin = 0;
3483 t1->syn = 0;
3484 t1->ack_seq = ntohl(sk->acked_seq);
3485 t1->window = ntohs(tcp_select_window(sk));
3486 t1->doff = sizeof(*t1)/4;
3487 tcp_send_check(t1, sk->saddr, sk->daddr, sizeof(*t1), sk);
3488
3489
3490
3491
3492 sk->prot->queue_xmit(sk, dev, buff, 1);
3493 }
3494
3495
3496
3497
3498
3499 int tcp_setsockopt(struct sock *sk, int level, int optname, char *optval, int optlen)
3500 {
3501 int val,err;
3502
3503 if(level!=SOL_TCP)
3504 return ip_setsockopt(sk,level,optname,optval,optlen);
3505
3506 if (optval == NULL)
3507 return(-EINVAL);
3508
3509 err=verify_area(VERIFY_READ, optval, sizeof(int));
3510 if(err)
3511 return err;
3512
3513 val = get_fs_long((unsigned long *)optval);
3514
3515 switch(optname)
3516 {
3517 case TCP_MSS:
3518 if(val<200||val>2048)
3519 return -EINVAL;
3520 sk->mss=val;
3521 return 0;
3522 case TCP_NODELAY:
3523
3524 return 0;
3525 default:
3526 return(-ENOPROTOOPT);
3527 }
3528 }
3529
3530 int tcp_getsockopt(struct sock *sk, int level, int optname, char *optval, int *optlen)
3531 {
3532 int val,err;
3533
3534 if(level!=SOL_TCP)
3535 return ip_getsockopt(sk,level,optname,optval,optlen);
3536
3537 switch(optname)
3538 {
3539 case TCP_MSS:
3540 val=sk->mss;
3541 break;
3542 case TCP_NODELAY:
3543 val=1;
3544 break;
3545 default:
3546 return(-ENOPROTOOPT);
3547 }
3548 err=verify_area(VERIFY_WRITE, optlen, sizeof(int));
3549 if(err)
3550 return err;
3551 put_fs_long(sizeof(int),(unsigned long *) optlen);
3552
3553 err=verify_area(VERIFY_WRITE, optval, sizeof(int));
3554 if(err)
3555 return err;
3556 put_fs_long(val,(unsigned long *)optval);
3557
3558 return(0);
3559 }
3560
3561
3562 struct proto tcp_prot = {
3563 sock_wmalloc,
3564 sock_rmalloc,
3565 sock_wfree,
3566 sock_rfree,
3567 sock_rspace,
3568 sock_wspace,
3569 tcp_close,
3570 tcp_read,
3571 tcp_write,
3572 tcp_sendto,
3573 tcp_recvfrom,
3574 ip_build_header,
3575 tcp_connect,
3576 tcp_accept,
3577 ip_queue_xmit,
3578 tcp_retransmit,
3579 tcp_write_wakeup,
3580 tcp_read_wakeup,
3581 tcp_rcv,
3582 tcp_select,
3583 tcp_ioctl,
3584 NULL,
3585 tcp_shutdown,
3586 tcp_setsockopt,
3587 tcp_getsockopt,
3588 128,
3589 0,
3590 {NULL,},
3591 "TCP"
3592 };