This source file includes following definitions.
- min
- print_th
- get_firstr
- diff
- tcp_time_wait
- tcp_retransmit
- tcp_err
- tcp_readable
- tcp_select
- tcp_ioctl
- tcp_check
- tcp_send_check
- tcp_send_partial
- tcp_send_ack
- tcp_build_header
- tcp_write
- tcp_sendto
- tcp_read_wakeup
- cleanup_rbuf
- tcp_read_urg
- tcp_read
- tcp_shutdown
- tcp_recvfrom
- tcp_reset
- tcp_options
- tcp_conn_request
- tcp_close
- tcp_write_xmit
- sort_send
- tcp_ack
- tcp_data
- tcp_urg
- tcp_fin
- tcp_accept
- tcp_connect
- tcp_sequence
- tcp_rcv
- tcp_write_wakeup
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69 #include <linux/types.h>
70 #include <linux/sched.h>
71 #include <linux/mm.h>
72 #include <linux/string.h>
73 #include <linux/socket.h>
74 #include <linux/sockios.h>
75 #include <linux/termios.h>
76 #include <linux/in.h>
77 #include <linux/fcntl.h>
78 #include "inet.h"
79 #include "dev.h"
80 #include "ip.h"
81 #include "protocol.h"
82 #include "icmp.h"
83 #include "tcp.h"
84 #include "skbuff.h"
85 #include "sock.h"
86 #include "arp.h"
87 #include <linux/errno.h>
88 #include <linux/timer.h>
89 #include <asm/system.h>
90 #include <asm/segment.h>
91 #include <linux/mm.h>
92
93 #define SEQ_TICK 3
94 unsigned long seq_offset;
95
96 static __inline__ int
97 min(unsigned int a, unsigned int b)
98 {
99 if (a < b) return(a);
100 return(b);
101 }
102
103
104 void
105 print_th(struct tcphdr *th)
106 {
107 unsigned char *ptr;
108
109 if (inet_debug != DBG_TCP) return;
110
111 printk("TCP header:\n");
112 ptr =(unsigned char *)(th + 1);
113 printk(" source=%d, dest=%d, seq =%ld, ack_seq = %ld\n",
114 ntohs(th->source), ntohs(th->dest),
115 ntohl(th->seq), ntohl(th->ack_seq));
116 printk(" fin=%d, syn=%d, rst=%d, psh=%d, ack=%d, urg=%d res1=%d res2=%d\n",
117 th->fin, th->syn, th->rst, th->psh, th->ack,
118 th->urg, th->res1, th->res2);
119 printk(" window = %d, check = %d urg_ptr = %d\n",
120 ntohs(th->window), ntohs(th->check), ntohs(th->urg_ptr));
121 printk(" doff = %d\n", th->doff);
122 printk(" options = %d %d %d %d\n", ptr[0], ptr[1], ptr[2], ptr[3]);
123 }
124
125
126
127
128 static struct sk_buff *
129 get_firstr(struct sock *sk)
130 {
131 return skb_dequeue(&sk->rqueue);
132 }
133
134
135
136
137
138 static long
139 diff(unsigned long seq1, unsigned long seq2)
140 {
141 long d;
142
143 d = seq1 - seq2;
144 if (d > 0) return(d);
145
146
147 return(~d+1);
148 }
149
150
151
152
153 static void tcp_time_wait(struct sock *sk)
154 {
155 sk->state = TCP_TIME_WAIT;
156 sk->shutdown = SHUTDOWN_MASK;
157 if (!sk->dead)
158 wake_up(sk->sleep);
159 reset_timer(sk, TIME_CLOSE, TCP_TIMEWAIT_LEN);
160 }
161
162
163
164
165
166
167
168
169 static void
170 tcp_retransmit(struct sock *sk, int all)
171 {
172 if (all) {
173 ip_retransmit(sk, all);
174 return;
175 }
176
177 if (sk->cong_window > 4)
178 sk->cong_window = sk->cong_window / 2;
179 sk->exp_growth = 0;
180
181
182 ip_retransmit(sk, all);
183 }
184
185
186
187
188
189
190
191
192
193
194 void
195 tcp_err(int err, unsigned char *header, unsigned long daddr,
196 unsigned long saddr, struct inet_protocol *protocol)
197 {
198 struct tcphdr *th;
199 struct sock *sk;
200 struct iphdr *iph=(struct iphdr *)header;
201
202 header+=4*iph->ihl;
203
204 DPRINTF((DBG_TCP, "TCP: tcp_err(%d, hdr=%X, daddr=%X saddr=%X, protocol=%X)\n",
205 err, header, daddr, saddr, protocol));
206
207 th =(struct tcphdr *)header;
208 sk = get_sock(&tcp_prot, th->source, daddr, th->dest, saddr);
209 print_th(th);
210
211 if (sk == NULL) return;
212
213 if(err<0)
214 {
215 sk->err = -err;
216 wake_up(sk->sleep);
217 return;
218 }
219
220 if ((err & 0xff00) == (ICMP_SOURCE_QUENCH << 8)) {
221
222
223
224
225
226 if (sk->cong_window > 4) sk->cong_window--;
227 return;
228 }
229
230 DPRINTF((DBG_TCP, "TCP: icmp_err got error\n"));
231 sk->err = icmp_err_convert[err & 0xff].errno;
232
233
234
235
236
237 if (icmp_err_convert[err & 0xff].fatal) {
238 if (sk->state == TCP_SYN_SENT) {
239 sk->state = TCP_CLOSE;
240 wake_up(sk->sleep);
241 }
242 }
243 return;
244 }
245
246
247
248
249
250
251
252 static int
253 tcp_readable(struct sock *sk)
254 {
255 unsigned long counted;
256 unsigned long amount;
257 struct sk_buff *skb;
258 int count=0;
259 int sum;
260 unsigned long flags;
261
262 DPRINTF((DBG_TCP, "tcp_readable(sk=%X)\n", sk));
263 if(sk && sk->debug)
264 printk("tcp_readable: %p - ",sk);
265
266 if (sk == NULL || skb_peek(&sk->rqueue) == NULL)
267 {
268 if(sk && sk->debug)
269 printk("empty\n");
270 return(0);
271 }
272
273 counted = sk->copied_seq+1;
274 amount = 0;
275
276 save_flags(flags);
277 cli();
278 skb =(struct sk_buff *)sk->rqueue;
279
280
281 do {
282 count++;
283 #ifdef OLD
284
285 if (count > 20) {
286 restore_flags(flags);
287 DPRINTF((DBG_TCP, "tcp_readable, more than 20 packets without a psh\n"));
288 printk("tcp_read: possible read_queue corruption.\n");
289 return(amount);
290 }
291 #endif
292 if (before(counted, skb->h.th->seq))
293 break;
294 sum = skb->len -(counted - skb->h.th->seq);
295 if (skb->h.th->syn) sum++;
296 if (skb->h.th->urg) {
297 sum -= ntohs(skb->h.th->urg_ptr);
298 }
299 if (sum >= 0) {
300 amount += sum;
301 if (skb->h.th->syn) amount--;
302 counted += sum;
303 }
304
305 skb =(struct sk_buff *)skb->next;
306 } while(skb != sk->rqueue);
307 restore_flags(flags);
308 DPRINTF((DBG_TCP, "tcp readable returning %d bytes\n", amount));
309 if(sk->debug)
310 printk("got %lu bytes.\n",amount);
311 return(amount);
312 }
313
314
315
316
317
318
319
320 static int
321 tcp_select(struct sock *sk, int sel_type, select_table *wait)
322 {
323 DPRINTF((DBG_TCP, "tcp_select(sk=%X, sel_type = %d, wait = %X)\n",
324 sk, sel_type, wait));
325
326 sk->inuse = 1;
327 switch(sel_type) {
328 case SEL_IN:
329 if(sk->debug)
330 printk("select in");
331 select_wait(sk->sleep, wait);
332 if(sk->debug)
333 printk("-select out");
334 if (skb_peek(&sk->rqueue) != NULL) {
335 if (sk->state == TCP_LISTEN || tcp_readable(sk)) {
336 release_sock(sk);
337 if(sk->debug)
338 printk("-select ok data\n");
339 return(1);
340 }
341 }
342 if (sk->err != 0)
343 {
344 release_sock(sk);
345 if(sk->debug)
346 printk("-select ok error");
347 return(1);
348 }
349 if (sk->shutdown & RCV_SHUTDOWN) {
350 release_sock(sk);
351 if(sk->debug)
352 printk("-select ok down\n");
353 return(1);
354 } else {
355 release_sock(sk);
356 if(sk->debug)
357 printk("-select fail\n");
358 return(0);
359 }
360 case SEL_OUT:
361 select_wait(sk->sleep, wait);
362 if (sk->shutdown & SEND_SHUTDOWN) {
363 DPRINTF((DBG_TCP,
364 "write select on shutdown socket.\n"));
365
366
367 release_sock(sk);
368 return(0);
369 }
370
371
372
373
374
375
376 if (sk->prot->wspace(sk) >= sk->mtu) {
377 release_sock(sk);
378
379 if (sk->state == TCP_SYN_RECV ||
380 sk->state == TCP_SYN_SENT) return(0);
381 return(1);
382 }
383 DPRINTF((DBG_TCP,
384 "tcp_select: sleeping on write sk->wmem_alloc = %d, "
385 "sk->packets_out = %d\n"
386 "sk->wback = %X, sk->wfront = %X\n"
387 "sk->send_seq = %u, sk->window_seq=%u\n",
388 sk->wmem_alloc, sk->packets_out,
389 sk->wback, sk->wfront,
390 sk->send_seq, sk->window_seq));
391
392 release_sock(sk);
393 return(0);
394 case SEL_EX:
395 select_wait(sk->sleep,wait);
396 if (sk->err) {
397 release_sock(sk);
398 return(1);
399 }
400 release_sock(sk);
401 return(0);
402 }
403
404 release_sock(sk);
405 return(0);
406 }
407
408
409 int
410 tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
411 {
412 int err;
413 DPRINTF((DBG_TCP, "tcp_ioctl(sk=%X, cmd = %d, arg=%X)\n", sk, cmd, arg));
414 switch(cmd) {
415 case DDIOCSDBG:
416 return(dbg_ioctl((void *) arg, DBG_TCP));
417
418 case TIOCINQ:
419 #ifdef FIXME
420 case FIONREAD:
421 #endif
422 {
423 unsigned long amount;
424
425 if (sk->state == TCP_LISTEN) return(-EINVAL);
426
427 sk->inuse = 1;
428 amount = tcp_readable(sk);
429 release_sock(sk);
430 DPRINTF((DBG_TCP, "returning %d\n", amount));
431 err=verify_area(VERIFY_WRITE,(void *)arg,
432 sizeof(unsigned long));
433 if(err)
434 return err;
435 put_fs_long(amount,(unsigned long *)arg);
436 return(0);
437 }
438 case SIOCATMARK:
439 {
440 struct sk_buff *skb;
441 int answ = 0;
442
443
444
445
446
447 sk->inuse = 1;
448 if ((skb=skb_peek(&sk->rqueue)) != NULL)
449 {
450 if (sk->copied_seq+1 == skb->h.th->seq && skb->h.th->urg)
451 answ = 1;
452 }
453 release_sock(sk);
454 err=verify_area(VERIFY_WRITE,(void *) arg,
455 sizeof(unsigned long));
456 if(err)
457 return err;
458 put_fs_long(answ,(int *) arg);
459 return(0);
460 }
461 case TIOCOUTQ:
462 {
463 unsigned long amount;
464
465 if (sk->state == TCP_LISTEN) return(-EINVAL);
466 amount = sk->prot->wspace(sk);
467 err=verify_area(VERIFY_WRITE,(void *)arg,
468 sizeof(unsigned long));
469 if(err)
470 return err;
471 put_fs_long(amount,(unsigned long *)arg);
472 return(0);
473 }
474 default:
475 return(-EINVAL);
476 }
477 }
478
479
480
481 unsigned short
482 tcp_check(struct tcphdr *th, int len,
483 unsigned long saddr, unsigned long daddr)
484 {
485 unsigned long sum;
486
487 if (saddr == 0) saddr = my_addr();
488 print_th(th);
489 __asm__("\t addl %%ecx,%%ebx\n"
490 "\t adcl %%edx,%%ebx\n"
491 "\t adcl $0, %%ebx\n"
492 : "=b"(sum)
493 : "0"(daddr), "c"(saddr), "d"((ntohs(len) << 16) + IPPROTO_TCP*256)
494 : "cx","bx","dx" );
495
496 if (len > 3) {
497 __asm__("\tclc\n"
498 "1:\n"
499 "\t lodsl\n"
500 "\t adcl %%eax, %%ebx\n"
501 "\t loop 1b\n"
502 "\t adcl $0, %%ebx\n"
503 : "=b"(sum) , "=S"(th)
504 : "0"(sum), "c"(len/4) ,"1"(th)
505 : "ax", "cx", "bx", "si" );
506 }
507
508
509 __asm__("\t movl %%ebx, %%ecx\n"
510 "\t shrl $16,%%ecx\n"
511 "\t addw %%cx, %%bx\n"
512 "\t adcw $0, %%bx\n"
513 : "=b"(sum)
514 : "0"(sum)
515 : "bx", "cx");
516
517
518 if ((len & 2) != 0) {
519 __asm__("\t lodsw\n"
520 "\t addw %%ax,%%bx\n"
521 "\t adcw $0, %%bx\n"
522 : "=b"(sum), "=S"(th)
523 : "0"(sum) ,"1"(th)
524 : "si", "ax", "bx");
525 }
526
527
528 if ((len & 1) != 0) {
529 __asm__("\t lodsb\n"
530 "\t movb $0,%%ah\n"
531 "\t addw %%ax,%%bx\n"
532 "\t adcw $0, %%bx\n"
533 : "=b"(sum)
534 : "0"(sum) ,"S"(th)
535 : "si", "ax", "bx");
536 }
537
538
539 return((~sum) & 0xffff);
540 }
541
542
543 void
544 tcp_send_check(struct tcphdr *th, unsigned long saddr,
545 unsigned long daddr, int len, struct sock *sk)
546 {
547 th->check = 0;
548 th->check = tcp_check(th, len, saddr, daddr);
549 return;
550 }
551
552
553 static void
554 tcp_send_partial(struct sock *sk)
555 {
556 struct sk_buff *skb;
557
558 if (sk == NULL || sk->send_tmp == NULL) return;
559
560 skb = sk->send_tmp;
561
562
563 tcp_send_check(skb->h.th, sk->saddr, sk->daddr,
564 skb->len-(unsigned long)skb->h.th +
565 (unsigned long)(skb+1), sk);
566
567 skb->h.seq = sk->send_seq;
568 if (after(sk->send_seq , sk->window_seq) ||
569 sk->packets_out >= sk->cong_window) {
570 DPRINTF((DBG_TCP, "sk->cong_window = %d, sk->packets_out = %d\n",
571 sk->cong_window, sk->packets_out));
572 DPRINTF((DBG_TCP, "sk->send_seq = %d, sk->window_seq = %d\n",
573 sk->send_seq, sk->window_seq));
574 skb->next = NULL;
575 skb->magic = TCP_WRITE_QUEUE_MAGIC;
576 if (sk->wback == NULL) {
577 sk->wfront=skb;
578 } else {
579 sk->wback->next = skb;
580 }
581 sk->wback = skb;
582 } else {
583 sk->prot->queue_xmit(sk, skb->dev, skb,0);
584 }
585 sk->send_tmp = NULL;
586 }
587
588
589
590 static void
591 tcp_send_ack(unsigned long sequence, unsigned long ack,
592 struct sock *sk,
593 struct tcphdr *th, unsigned long daddr)
594 {
595 struct sk_buff *buff;
596 struct tcphdr *t1;
597 struct device *dev = NULL;
598 int tmp;
599
600 if(sk->zapped)
601 return;
602
603
604
605
606 buff = (struct sk_buff *) sk->prot->wmalloc(sk, MAX_ACK_SIZE, 1, GFP_ATOMIC);
607 if (buff == NULL) {
608
609 sk->ack_backlog++;
610 if (sk->timeout != TIME_WRITE && tcp_connected(sk->state)) {
611 reset_timer(sk, TIME_WRITE, 10);
612 }
613 if (inet_debug == DBG_SLIP) printk("\rtcp_ack: malloc failed\n");
614 return;
615 }
616
617 buff->mem_addr = buff;
618 buff->mem_len = MAX_ACK_SIZE;
619 buff->len = sizeof(struct tcphdr);
620 buff->sk = sk;
621 t1 =(struct tcphdr *)(buff + 1);
622
623
624 tmp = sk->prot->build_header(buff, sk->saddr, daddr, &dev,
625 IPPROTO_TCP, sk->opt, MAX_ACK_SIZE);
626 if (tmp < 0) {
627 buff->free=1;
628 sk->prot->wfree(sk, buff->mem_addr, buff->mem_len);
629 if (inet_debug == DBG_SLIP) printk("\rtcp_ack: build_header failed\n");
630 return;
631 }
632 buff->len += tmp;
633 t1 =(struct tcphdr *)((char *)t1 +tmp);
634
635
636 memcpy(t1, th, sizeof(*t1));
637
638
639 t1->dest = th->source;
640 t1->source = th->dest;
641 t1->seq = ntohl(sequence);
642 t1->ack = 1;
643 sk->window = sk->prot->rspace(sk);
644 t1->window = ntohs(sk->window);
645 t1->res1 = 0;
646 t1->res2 = 0;
647 t1->rst = 0;
648 t1->urg = 0;
649 t1->syn = 0;
650 t1->psh = 0;
651 t1->fin = 0;
652 if (ack == sk->acked_seq) {
653 sk->ack_backlog = 0;
654 sk->bytes_rcv = 0;
655 sk->ack_timed = 0;
656 if (sk->send_head == NULL && sk->wfront == NULL) {
657
658 }
659 }
660 t1->ack_seq = ntohl(ack);
661 t1->doff = sizeof(*t1)/4;
662 tcp_send_check(t1, sk->saddr, daddr, sizeof(*t1), sk);
663 if (sk->debug)
664 printk("\rtcp_ack: seq %lx ack %lx\n", sequence, ack);
665 sk->prot->queue_xmit(sk, dev, buff, 1);
666 }
667
668
669
670 static int
671 tcp_build_header(struct tcphdr *th, struct sock *sk, int push)
672 {
673
674
675 memcpy(th,(void *) &(sk->dummy_th), sizeof(*th));
676 th->seq = htonl(sk->send_seq);
677 th->psh =(push == 0) ? 1 : 0;
678 th->doff = sizeof(*th)/4;
679 th->ack = 1;
680 th->fin = 0;
681 sk->ack_backlog = 0;
682 sk->bytes_rcv = 0;
683 sk->ack_timed = 0;
684 th->ack_seq = htonl(sk->acked_seq);
685 sk->window = sk->prot->rspace(sk);
686 th->window = htons(sk->window);
687
688 return(sizeof(*th));
689 }
690
691
692
693
694
695
696 static int
697 tcp_write(struct sock *sk, unsigned char *from,
698 int len, int nonblock, unsigned flags)
699 {
700 int copied = 0;
701 int copy;
702 int tmp;
703 struct sk_buff *skb;
704 unsigned char *buff;
705 struct proto *prot;
706 struct device *dev = NULL;
707
708 DPRINTF((DBG_TCP, "tcp_write(sk=%X, from=%X, len=%d, nonblock=%d, flags=%X)\n",
709 sk, from, len, nonblock, flags));
710
711 sk->inuse=1;
712 prot = sk->prot;
713 while(len > 0) {
714 if (sk->err) {
715 release_sock(sk);
716 if (copied) return(copied);
717 tmp = -sk->err;
718 sk->err = 0;
719 return(tmp);
720 }
721
722
723 if (sk->shutdown & SEND_SHUTDOWN) {
724 release_sock(sk);
725 sk->err = EPIPE;
726 if (copied) return(copied);
727 sk->err = 0;
728 return(-EPIPE);
729 }
730
731
732
733
734 while(sk->state != TCP_ESTABLISHED && sk->state != TCP_CLOSE_WAIT) {
735 if (sk->err) {
736 release_sock(sk);
737 if (copied) return(copied);
738 tmp = -sk->err;
739 sk->err = 0;
740 return(tmp);
741 }
742
743 if (sk->state != TCP_SYN_SENT && sk->state != TCP_SYN_RECV) {
744 release_sock(sk);
745 DPRINTF((DBG_TCP, "tcp_write: return 1\n"));
746 if (copied) return(copied);
747
748 if (sk->err) {
749 tmp = -sk->err;
750 sk->err = 0;
751 return(tmp);
752 }
753
754 if (sk->keepopen) {
755 send_sig(SIGPIPE, current, 0);
756 }
757 return(-EPIPE);
758 }
759
760 if (nonblock || copied) {
761 release_sock(sk);
762 DPRINTF((DBG_TCP, "tcp_write: return 2\n"));
763 if (copied) return(copied);
764 return(-EAGAIN);
765 }
766
767 release_sock(sk);
768 cli();
769 if (sk->state != TCP_ESTABLISHED &&
770 sk->state != TCP_CLOSE_WAIT && sk->err == 0) {
771 interruptible_sleep_on(sk->sleep);
772 if (current->signal & ~current->blocked) {
773 sti();
774 DPRINTF((DBG_TCP, "tcp_write: return 3\n"));
775 if (copied) return(copied);
776 return(-ERESTARTSYS);
777 }
778 }
779 sk->inuse = 1;
780 sti();
781 }
782
783
784 if (sk->send_tmp != NULL) {
785
786
787
788 skb = sk->send_tmp;
789 if (!(flags & MSG_OOB)) {
790 copy = min(sk->mss - skb->len + 128 +
791 prot->max_header, len);
792
793
794 if (copy <= 0) {
795 printk("TCP: **bug**: \"copy\" <= 0!!\n");
796 copy = 0;
797 }
798
799 memcpy_fromfs((unsigned char *)(skb+1) + skb->len, from, copy);
800 skb->len += copy;
801 from += copy;
802 copied += copy;
803 len -= copy;
804 sk->send_seq += copy;
805 }
806
807 if (skb->len -(unsigned long)skb->h.th +
808 (unsigned long)(skb+1) >= sk->mss ||(flags & MSG_OOB)) {
809 tcp_send_partial(sk);
810 }
811 continue;
812 }
813
814
815
816
817
818
819
820 copy = min(sk->mtu, diff(sk->window_seq, sk->send_seq));
821
822
823 if (copy < 200 || copy > sk->mtu) copy = sk->mtu;
824 copy = min(copy, len);
825
826
827 if (sk->packets_out && copy < sk->mss && !(flags & MSG_OOB)) {
828
829 release_sock(sk);
830 skb = (struct sk_buff *) prot->wmalloc(sk,
831 sk->mss + 128 + prot->max_header +
832 sizeof(*skb), 0, GFP_KERNEL);
833 sk->inuse = 1;
834 sk->send_tmp = skb;
835 if (skb != NULL)
836 skb->mem_len = sk->mss + 128 + prot->max_header + sizeof(*skb);
837 } else {
838
839 release_sock(sk);
840 skb = (struct sk_buff *) prot->wmalloc(sk,
841 copy + prot->max_header +
842 sizeof(*skb), 0, GFP_KERNEL);
843 sk->inuse = 1;
844 if (skb != NULL)
845 skb->mem_len = copy+prot->max_header + sizeof(*skb);
846 }
847
848
849 if (skb == NULL) {
850 if (nonblock ) {
851 release_sock(sk);
852 DPRINTF((DBG_TCP, "tcp_write: return 4\n"));
853 if (copied) return(copied);
854 return(-EAGAIN);
855 }
856
857
858 tmp = sk->wmem_alloc;
859 release_sock(sk);
860 cli();
861
862 if (tmp <= sk->wmem_alloc &&
863 (sk->state == TCP_ESTABLISHED||sk->state == TCP_CLOSE_WAIT)
864 && sk->err == 0) {
865 interruptible_sleep_on(sk->sleep);
866 if (current->signal & ~current->blocked) {
867 sti();
868 DPRINTF((DBG_TCP, "tcp_write: return 5\n"));
869 if (copied) return(copied);
870 return(-ERESTARTSYS);
871 }
872 }
873 sk->inuse = 1;
874 sti();
875 continue;
876 }
877
878 skb->mem_addr = skb;
879 skb->len = 0;
880 skb->sk = sk;
881 skb->free = 0;
882
883 buff =(unsigned char *)(skb+1);
884
885
886
887
888
889 tmp = prot->build_header(skb, sk->saddr, sk->daddr, &dev,
890 IPPROTO_TCP, sk->opt, skb->mem_len);
891 if (tmp < 0 ) {
892 prot->wfree(sk, skb->mem_addr, skb->mem_len);
893 release_sock(sk);
894 DPRINTF((DBG_TCP, "tcp_write: return 6\n"));
895 if (copied) return(copied);
896 return(tmp);
897 }
898 skb->len += tmp;
899 skb->dev = dev;
900 buff += tmp;
901 skb->h.th =(struct tcphdr *) buff;
902 tmp = tcp_build_header((struct tcphdr *)buff, sk, len-copy);
903 if (tmp < 0) {
904 prot->wfree(sk, skb->mem_addr, skb->mem_len);
905 release_sock(sk);
906 DPRINTF((DBG_TCP, "tcp_write: return 7\n"));
907 if (copied) return(copied);
908 return(tmp);
909 }
910
911 if (flags & MSG_OOB) {
912 ((struct tcphdr *)buff)->urg = 1;
913 ((struct tcphdr *)buff)->urg_ptr = ntohs(copy);
914 }
915 skb->len += tmp;
916 memcpy_fromfs(buff+tmp, from, copy);
917
918 from += copy;
919 copied += copy;
920 len -= copy;
921 skb->len += copy;
922 skb->free = 0;
923 sk->send_seq += copy;
924
925 if (sk->send_tmp != NULL) continue;
926
927 tcp_send_check((struct tcphdr *)buff, sk->saddr, sk->daddr,
928 copy + sizeof(struct tcphdr), sk);
929
930 skb->h.seq = sk->send_seq;
931 if (after(sk->send_seq , sk->window_seq) ||
932 sk->packets_out >= sk->cong_window) {
933 DPRINTF((DBG_TCP, "sk->cong_window = %d, sk->packets_out = %d\n",
934 sk->cong_window, sk->packets_out));
935 DPRINTF((DBG_TCP, "sk->send_seq = %d, sk->window_seq = %d\n",
936 sk->send_seq, sk->window_seq));
937 skb->next = NULL;
938 skb->magic = TCP_WRITE_QUEUE_MAGIC;
939 if (sk->wback == NULL) {
940 sk->wfront = skb;
941 } else {
942 sk->wback->next = skb;
943 }
944 sk->wback = skb;
945 } else {
946 prot->queue_xmit(sk, dev, skb,0);
947 }
948 }
949 sk->err = 0;
950
951 if(sk->send_tmp && !sk->packets_out)
952 tcp_send_partial(sk);
953
954 release_sock(sk);
955 DPRINTF((DBG_TCP, "tcp_write: return 8\n"));
956 return(copied);
957 }
958
959
960 static int
961 tcp_sendto(struct sock *sk, unsigned char *from,
962 int len, int nonblock, unsigned flags,
963 struct sockaddr_in *addr, int addr_len)
964 {
965 struct sockaddr_in sin;
966
967 if (addr_len < sizeof(sin)) return(-EINVAL);
968 memcpy_fromfs(&sin, addr, sizeof(sin));
969 if (sin.sin_family && sin.sin_family != AF_INET) return(-EINVAL);
970 if (sin.sin_port != sk->dummy_th.dest) return(-EINVAL);
971 if (sin.sin_addr.s_addr != sk->daddr) return(-EINVAL);
972 return(tcp_write(sk, from, len, nonblock, flags));
973 }
974
975
976 static void
977 tcp_read_wakeup(struct sock *sk)
978 {
979 int tmp;
980 struct device *dev = NULL;
981 struct tcphdr *t1;
982 struct sk_buff *buff;
983
984 DPRINTF((DBG_TCP, "in tcp read wakeup\n"));
985 if (!sk->ack_backlog) return;
986
987
988
989
990
991
992
993
994
995
996
997 buff = (struct sk_buff *) sk->prot->wmalloc(sk,MAX_ACK_SIZE,1, GFP_ATOMIC);
998 if (buff == NULL) {
999
1000 reset_timer(sk, TIME_WRITE, 10);
1001 return;
1002 }
1003
1004 buff->mem_addr = buff;
1005 buff->mem_len = MAX_ACK_SIZE;
1006 buff->len = sizeof(struct tcphdr);
1007 buff->sk = sk;
1008
1009
1010 tmp = sk->prot->build_header(buff, sk->saddr, sk->daddr, &dev,
1011 IPPROTO_TCP, sk->opt, MAX_ACK_SIZE);
1012 if (tmp < 0) {
1013 buff->free=1;
1014 sk->prot->wfree(sk, buff->mem_addr, buff->mem_len);
1015 return;
1016 }
1017
1018 buff->len += tmp;
1019 t1 =(struct tcphdr *)((char *)(buff+1) +tmp);
1020
1021 memcpy(t1,(void *) &sk->dummy_th, sizeof(*t1));
1022 t1->seq = ntohl(sk->send_seq);
1023 t1->ack = 1;
1024 t1->res1 = 0;
1025 t1->res2 = 0;
1026 t1->rst = 0;
1027 t1->urg = 0;
1028 t1->syn = 0;
1029 t1->psh = 0;
1030 sk->ack_backlog = 0;
1031 sk->bytes_rcv = 0;
1032 sk->window = sk->prot->rspace(sk);
1033 t1->window = ntohs(sk->window);
1034 t1->ack_seq = ntohl(sk->acked_seq);
1035 t1->doff = sizeof(*t1)/4;
1036 tcp_send_check(t1, sk->saddr, sk->daddr, sizeof(*t1), sk);
1037 sk->prot->queue_xmit(sk, dev, buff, 1);
1038 }
1039
1040
1041
1042
1043
1044
1045
1046
1047 static void
1048 cleanup_rbuf(struct sock *sk)
1049 {
1050 unsigned long flags;
1051 int left;
1052 struct sk_buff *skb;
1053
1054 if(sk->debug)
1055 printk("cleaning rbuf for sk=%p\n", sk);
1056
1057 save_flags(flags);
1058 cli();
1059
1060 left = sk->prot->rspace(sk);
1061
1062
1063
1064
1065
1066 while((skb=skb_peek(&sk->rqueue)) != NULL )
1067 {
1068 if (!skb->used)
1069 break;
1070 skb_unlink(skb);
1071 skb->sk = sk;
1072 kfree_skb(skb, FREE_READ);
1073 }
1074
1075 restore_flags(flags);
1076
1077
1078
1079
1080
1081
1082
1083 DPRINTF((DBG_TCP, "sk->window left = %d, sk->prot->rspace(sk)=%d\n",
1084 sk->window - sk->bytes_rcv, sk->prot->rspace(sk)));
1085
1086 if(sk->debug)
1087 printk("sk->rspace = %lu, was %d\n", sk->prot->rspace(sk),
1088 left);
1089 if (sk->prot->rspace(sk) != left)
1090 {
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101 sk->ack_backlog++;
1102 if ((sk->prot->rspace(sk) > (sk->window - sk->bytes_rcv + sk->mtu))) {
1103
1104 tcp_read_wakeup(sk);
1105 } else {
1106
1107 int was_active = del_timer(&sk->timer);
1108 if (!was_active || TCP_ACK_TIME < sk->timer.expires) {
1109 reset_timer(sk, TIME_WRITE, TCP_ACK_TIME);
1110 } else
1111 add_timer(&sk->timer);
1112 }
1113 }
1114 }
1115
1116
1117
1118 static int
1119 tcp_read_urg(struct sock * sk, int nonblock,
1120 unsigned char *to, int len, unsigned flags)
1121 {
1122 int copied = 0;
1123 struct sk_buff *skb;
1124
1125 DPRINTF((DBG_TCP, "tcp_read_urg(sk=%X, to=%X, len=%d, flags=%X)\n",
1126 sk, to, len, flags));
1127
1128 while(len > 0)
1129 {
1130 sk->inuse = 1;
1131 while(sk->urg==0 || skb_peek(&sk->rqueue) == NULL) {
1132 if (sk->err) {
1133 int tmp;
1134
1135 release_sock(sk);
1136 if (copied) return(copied);
1137 tmp = -sk->err;
1138 sk->err = 0;
1139 return(tmp);
1140 }
1141
1142 if (sk->state == TCP_CLOSE || sk->done) {
1143 release_sock(sk);
1144 if (copied) return(copied);
1145 if (!sk->done) {
1146 sk->done = 1;
1147 return(0);
1148 }
1149 return(-ENOTCONN);
1150 }
1151
1152 if (sk->shutdown & RCV_SHUTDOWN) {
1153 release_sock(sk);
1154 if (copied == 0)
1155 sk->done = 1;
1156 return(copied);
1157 }
1158
1159 if (nonblock || copied) {
1160 release_sock(sk);
1161 if (copied) return(copied);
1162 return(-EAGAIN);
1163 }
1164
1165
1166 release_sock(sk);
1167 cli();
1168 if ((sk->urg == 0 || skb_peek(&sk->rqueue) == NULL) &&
1169 sk->err == 0 && !(sk->shutdown & RCV_SHUTDOWN)) {
1170 interruptible_sleep_on(sk->sleep);
1171 if (current->signal & ~current->blocked) {
1172 sti();
1173 if (copied) return(copied);
1174 return(-ERESTARTSYS);
1175 }
1176 }
1177 sk->inuse = 1;
1178 sti();
1179 }
1180
1181 skb = skb_peek(&sk->rqueue);
1182 do {
1183 int amt;
1184
1185 if (skb->h.th->urg && !skb->urg_used) {
1186 if (skb->h.th->urg_ptr == 0) {
1187 skb->h.th->urg_ptr = ntohs(skb->len);
1188 }
1189 amt = min(ntohs(skb->h.th->urg_ptr),len);
1190 if(amt)
1191 {
1192 verify_area(VERIFY_WRITE, to, amt);
1193 memcpy_tofs(to,(unsigned char *)(skb->h.th) +
1194 skb->h.th->doff*4, amt);
1195 }
1196
1197 if (!(flags & MSG_PEEK)) {
1198 skb->urg_used = 1;
1199 sk->urg--;
1200 }
1201 release_sock(sk);
1202 copied += amt;
1203 return(copied);
1204 }
1205 skb =(struct sk_buff *)skb->next;
1206 } while(skb != sk->rqueue);
1207 }
1208 sk->urg = 0;
1209 release_sock(sk);
1210 return(0);
1211 }
1212
1213
1214
1215 static int
1216 tcp_read(struct sock *sk, unsigned char *to,
1217 int len, int nonblock, unsigned flags)
1218 {
1219 int copied=0;
1220 struct sk_buff *skb;
1221 unsigned long offset;
1222 unsigned long used;
1223 int err;
1224
1225 if (len == 0) return(0);
1226 if (len < 0) {
1227 return(-EINVAL);
1228 }
1229
1230 err=verify_area(VERIFY_WRITE,to,len);
1231 if(err)
1232 return err;
1233
1234
1235 if (sk->state == TCP_LISTEN) return(-ENOTCONN);
1236
1237
1238 if ((flags & MSG_OOB))
1239 return(tcp_read_urg(sk, nonblock, to, len, flags));
1240
1241
1242 sk->inuse = 1;
1243
1244 skb=skb_peek(&sk->rqueue);
1245
1246 DPRINTF((DBG_TCP, "tcp_read(sk=%X, to=%X, len=%d, nonblock=%d, flags=%X)\n",
1247 sk, to, len, nonblock, flags));
1248
1249 while(len > 0) {
1250
1251
1252
1253 while(skb == NULL ||
1254 before(sk->copied_seq+1, skb->h.th->seq) || skb->used) {
1255 DPRINTF((DBG_TCP, "skb = %X:\n", skb));
1256 cleanup_rbuf(sk);
1257 if (sk->err)
1258 {
1259 int tmp;
1260
1261 release_sock(sk);
1262 if (copied)
1263 {
1264 DPRINTF((DBG_TCP, "tcp_read: returning %d\n",
1265 copied));
1266 return(copied);
1267 }
1268 tmp = -sk->err;
1269 sk->err = 0;
1270 return(tmp);
1271 }
1272
1273 if (sk->state == TCP_CLOSE)
1274 {
1275 release_sock(sk);
1276 if (copied) {
1277 DPRINTF((DBG_TCP, "tcp_read: returning %d\n",
1278 copied));
1279 return(copied);
1280 }
1281 if (!sk->done) {
1282 sk->done = 1;
1283 return(0);
1284 }
1285 return(-ENOTCONN);
1286 }
1287
1288 if (sk->shutdown & RCV_SHUTDOWN)
1289 {
1290 release_sock(sk);
1291 if (copied == 0) sk->done = 1;
1292 DPRINTF((DBG_TCP, "tcp_read: returning %d\n", copied));
1293 return(copied);
1294 }
1295
1296 if (nonblock || copied)
1297 {
1298 release_sock(sk);
1299 if(sk->debug)
1300 printk("read: EAGAIN\n");
1301 if (copied)
1302 {
1303 DPRINTF((DBG_TCP, "tcp_read: returning %d\n",
1304 copied));
1305 return(copied);
1306 }
1307 return(-EAGAIN);
1308 }
1309
1310 if ((flags & MSG_PEEK) && copied != 0)
1311 {
1312 release_sock(sk);
1313 DPRINTF((DBG_TCP, "tcp_read: returning %d\n", copied));
1314 return(copied);
1315 }
1316
1317 DPRINTF((DBG_TCP, "tcp_read about to sleep. state = %d\n",
1318 sk->state));
1319 release_sock(sk);
1320
1321
1322
1323
1324
1325 cli();
1326 if (sk->shutdown & RCV_SHUTDOWN || sk->err != 0) {
1327 sk->inuse = 1;
1328 sti();
1329 continue;
1330 }
1331
1332 if (skb_peek(&sk->rqueue) == NULL ||
1333 before(sk->copied_seq+1, sk->rqueue->h.th->seq)) {
1334 if(sk->debug)
1335 printk("Read wait sleep\n");
1336 interruptible_sleep_on(sk->sleep);
1337 if(sk->debug)
1338 printk("Read wait wakes\n");
1339 if (current->signal & ~current->blocked) {
1340 sti();
1341 if (copied) {
1342 DPRINTF((DBG_TCP, "tcp_read: returning %d\n",
1343 copied));
1344 return(copied);
1345 }
1346 return(-ERESTARTSYS);
1347 }
1348 }
1349 sk->inuse = 1;
1350 sti();
1351 DPRINTF((DBG_TCP, "tcp_read woke up. \n"));
1352
1353
1354 skb=skb_peek(&sk->rqueue);
1355
1356 }
1357
1358
1359
1360
1361
1362 offset = sk->copied_seq+1 - skb->h.th->seq;
1363
1364 if (skb->h.th->syn) offset--;
1365 if (offset < skb->len)
1366 {
1367
1368
1369
1370
1371 if (skb->h.th->urg)
1372 {
1373 if (skb->urg_used)
1374 {
1375 sk->copied_seq += ntohs(skb->h.th->urg_ptr);
1376 offset += ntohs(skb->h.th->urg_ptr);
1377 if (offset >= skb->len)
1378 {
1379 skb->used = 1;
1380 skb =(struct sk_buff *)skb->next;
1381 continue;
1382 }
1383 }
1384 else
1385 {
1386 release_sock(sk);
1387 if (copied)
1388 return(copied);
1389 send_sig(SIGURG, current, 0);
1390 return(-EINTR);
1391 }
1392 }
1393
1394 used = min(skb->len - offset, len);
1395
1396 memcpy_tofs(to,((unsigned char *)skb->h.th) +
1397 skb->h.th->doff*4 + offset, used);
1398 copied += used;
1399 len -= used;
1400 to += used;
1401
1402
1403 if (!(flags & MSG_PEEK))
1404 sk->copied_seq += used;
1405
1406
1407
1408
1409
1410
1411 if (!(flags & MSG_PEEK) &&
1412 (!skb->h.th->urg || skb->urg_used) &&
1413 (used + offset >= skb->len))
1414 skb->used = 1;
1415
1416
1417
1418
1419
1420 if (skb->h.th->urg)
1421 {
1422 break;
1423 }
1424 }
1425 else
1426 {
1427 skb->used = 1;
1428 }
1429
1430 skb =(struct sk_buff *)skb->next;
1431 }
1432
1433 cleanup_rbuf(sk);
1434 release_sock(sk);
1435 DPRINTF((DBG_TCP, "tcp_read: returning %d\n", copied));
1436 if (copied == 0 && nonblock)
1437 return(-EAGAIN);
1438 return(copied);
1439 }
1440
1441
1442
1443
1444
1445
1446 void
1447 tcp_shutdown(struct sock *sk, int how)
1448 {
1449 struct sk_buff *buff;
1450 struct tcphdr *t1, *th;
1451 struct proto *prot;
1452 int tmp;
1453 struct device *dev = NULL;
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463 if (sk->state == TCP_FIN_WAIT1 || sk->state == TCP_FIN_WAIT2) return;
1464 if (!(how & SEND_SHUTDOWN)) return;
1465 sk->inuse = 1;
1466
1467
1468 if (sk->send_tmp) tcp_send_partial(sk);
1469
1470 prot =(struct proto *)sk->prot;
1471 th =(struct tcphdr *)&sk->dummy_th;
1472 release_sock(sk);
1473 buff = (struct sk_buff *) prot->wmalloc(sk, MAX_RESET_SIZE,1 , GFP_KERNEL);
1474 if (buff == NULL) return;
1475 sk->inuse = 1;
1476
1477 DPRINTF((DBG_TCP, "tcp_shutdown_send buff = %X\n", buff));
1478 buff->mem_addr = buff;
1479 buff->mem_len = MAX_RESET_SIZE;
1480 buff->sk = sk;
1481 buff->len = sizeof(*t1);
1482 t1 =(struct tcphdr *)(buff + 1);
1483
1484
1485 tmp = prot->build_header(buff,sk->saddr, sk->daddr, &dev,
1486 IPPROTO_TCP, sk->opt,
1487 sizeof(struct tcphdr));
1488 if (tmp < 0) {
1489 buff->free=1;
1490 prot->wfree(sk,buff->mem_addr, buff->mem_len);
1491 release_sock(sk);
1492 DPRINTF((DBG_TCP, "Unable to build header for fin.\n"));
1493 return;
1494 }
1495
1496 t1 =(struct tcphdr *)((char *)t1 +tmp);
1497 buff ->len += tmp;
1498 buff->dev = dev;
1499 memcpy(t1, th, sizeof(*t1));
1500 t1->seq = ntohl(sk->send_seq);
1501 sk->send_seq++;
1502 buff->h.seq = sk->send_seq;
1503 t1->ack = 1;
1504 t1->ack_seq = ntohl(sk->acked_seq);
1505 t1->window = ntohs(sk->prot->rspace(sk));
1506 t1->fin = 1;
1507 t1->rst = 0;
1508 t1->doff = sizeof(*t1)/4;
1509 tcp_send_check(t1, sk->saddr, sk->daddr, sizeof(*t1), sk);
1510
1511
1512
1513
1514
1515 if (sk->wback != NULL) {
1516 buff->free=0;
1517 buff->next = NULL;
1518 sk->wback->next = buff;
1519 sk->wback = buff;
1520 buff->magic = TCP_WRITE_QUEUE_MAGIC;
1521 } else {
1522 sk->prot->queue_xmit(sk, dev, buff, 0);
1523 }
1524
1525 if (sk->state == TCP_ESTABLISHED) sk->state = TCP_FIN_WAIT1;
1526 else sk->state = TCP_FIN_WAIT2;
1527
1528 release_sock(sk);
1529 }
1530
1531
1532 static int
1533 tcp_recvfrom(struct sock *sk, unsigned char *to,
1534 int to_len, int nonblock, unsigned flags,
1535 struct sockaddr_in *addr, int *addr_len)
1536 {
1537 struct sockaddr_in sin;
1538 int len;
1539 int err;
1540 int result;
1541
1542
1543
1544
1545 err = verify_area(VERIFY_WRITE,addr_len,sizeof(long));
1546 if(err)
1547 return err;
1548 len = get_fs_long(addr_len);
1549 if(len > sizeof(sin))
1550 len = sizeof(sin);
1551 err=verify_area(VERIFY_WRITE, addr, len);
1552 if(err)
1553 return err;
1554
1555 result=tcp_read(sk, to, to_len, nonblock, flags);
1556
1557 if (result < 0) return(result);
1558
1559 sin.sin_family = AF_INET;
1560 sin.sin_port = sk->dummy_th.dest;
1561 sin.sin_addr.s_addr = sk->daddr;
1562
1563 memcpy_tofs(addr, &sin, len);
1564 put_fs_long(len, addr_len);
1565 return(result);
1566 }
1567
1568
1569
1570 static void
1571 tcp_reset(unsigned long saddr, unsigned long daddr, struct tcphdr *th,
1572 struct proto *prot, struct options *opt, struct device *dev)
1573 {
1574 struct sk_buff *buff;
1575 struct tcphdr *t1;
1576 int tmp;
1577
1578
1579
1580
1581
1582 buff = (struct sk_buff *) prot->wmalloc(NULL, MAX_RESET_SIZE, 1, GFP_ATOMIC);
1583 if (buff == NULL)
1584 return;
1585
1586 DPRINTF((DBG_TCP, "tcp_reset buff = %X\n", buff));
1587 buff->mem_addr = buff;
1588 buff->mem_len = MAX_RESET_SIZE;
1589 buff->len = sizeof(*t1);
1590 buff->sk = NULL;
1591 buff->dev = dev;
1592
1593 t1 =(struct tcphdr *)(buff + 1);
1594
1595
1596 tmp = prot->build_header(buff, saddr, daddr, &dev, IPPROTO_TCP, opt,
1597 sizeof(struct tcphdr));
1598 if (tmp < 0) {
1599 buff->free = 1;
1600 prot->wfree(NULL, buff->mem_addr, buff->mem_len);
1601 return;
1602 }
1603 t1 =(struct tcphdr *)((char *)t1 +tmp);
1604 buff->len += tmp;
1605 memcpy(t1, th, sizeof(*t1));
1606
1607
1608 t1->dest = th->source;
1609 t1->source = th->dest;
1610 t1->rst = 1;
1611 t1->window = 0;
1612
1613 if(th->ack)
1614 {
1615 t1->ack=0;
1616 t1->seq=th->ack_seq;
1617 t1->ack_seq=0;
1618 }
1619 else
1620 {
1621 t1->ack=1;
1622 if(!th->syn)
1623 t1->ack_seq=htonl(th->seq);
1624 else
1625 t1->ack_seq=htonl(th->seq+1);
1626 t1->seq=0;
1627 }
1628
1629 t1->syn = 0;
1630 t1->urg = 0;
1631 t1->fin = 0;
1632 t1->psh = 0;
1633 t1->doff = sizeof(*t1)/4;
1634 tcp_send_check(t1, saddr, daddr, sizeof(*t1), NULL);
1635 prot->queue_xmit(NULL, dev, buff, 1);
1636 }
1637
1638
1639
1640
1641
1642
1643 static void
1644 tcp_options(struct sock *sk, struct tcphdr *th)
1645 {
1646 unsigned char *ptr;
1647 int length=(th->doff*4)-sizeof(struct tcphdr);
1648 int mtuset=0;
1649
1650 ptr = (unsigned char *)(th + 1);
1651
1652 while(length>0)
1653 {
1654 int opcode=*ptr++;
1655 int opsize=*ptr++;
1656 switch(opcode)
1657 {
1658 case TCPOPT_EOL:
1659 return;
1660 case TCPOPT_NOP:
1661 length-=2;
1662 continue;
1663
1664 default:
1665 if(opsize<=2)
1666 return;
1667 switch(opcode)
1668 {
1669 case TCPOPT_MSS:
1670 if(opsize==4)
1671 {
1672 sk->mtu=min(sk->mtu,ntohs(*(unsigned short *)ptr));
1673 mtuset=1;
1674 }
1675 break;
1676
1677 }
1678 ptr+=opsize-2;
1679 length-=opsize;
1680 }
1681 }
1682
1683 if (!mtuset)
1684 {
1685 sk->mtu = min(sk->mtu, 576 - HEADER_SIZE);
1686 return;
1687 }
1688 }
1689
1690
1691
1692
1693
1694
1695
1696
1697 static void
1698 tcp_conn_request(struct sock *sk, struct sk_buff *skb,
1699 unsigned long daddr, unsigned long saddr,
1700 struct options *opt, struct device *dev)
1701 {
1702 struct sk_buff *buff;
1703 struct tcphdr *t1;
1704 unsigned char *ptr;
1705 struct sock *newsk;
1706 struct tcphdr *th;
1707 int tmp;
1708
1709 DPRINTF((DBG_TCP, "tcp_conn_request(sk = %X, skb = %X, daddr = %X, sadd4= %X, \n"
1710 " opt = %X, dev = %X)\n",
1711 sk, skb, daddr, saddr, opt, dev));
1712
1713 th = skb->h.th;
1714
1715
1716 if (!sk->dead) {
1717 wake_up(sk->sleep);
1718 } else {
1719 DPRINTF((DBG_TCP, "tcp_conn_request on dead socket\n"));
1720 tcp_reset(daddr, saddr, th, sk->prot, opt, dev);
1721 kfree_skb(skb, FREE_READ);
1722 return;
1723 }
1724
1725
1726
1727
1728
1729 if (sk->ack_backlog >= sk->max_ack_backlog) {
1730 kfree_skb(skb, FREE_READ);
1731 return;
1732 }
1733
1734
1735
1736
1737
1738
1739
1740
1741 newsk = (struct sock *) kmalloc(sizeof(struct sock), GFP_ATOMIC);
1742 if (newsk == NULL) {
1743
1744 kfree_skb(skb, FREE_READ);
1745 return;
1746 }
1747
1748 DPRINTF((DBG_TCP, "newsk = %X\n", newsk));
1749 memcpy((void *)newsk,(void *)sk, sizeof(*newsk));
1750 newsk->wback = NULL;
1751 newsk->wfront = NULL;
1752 newsk->rqueue = NULL;
1753 newsk->send_head = NULL;
1754 newsk->send_tail = NULL;
1755 newsk->back_log = NULL;
1756 newsk->rtt = TCP_CONNECT_TIME;
1757 newsk->mdev = 0;
1758 newsk->backoff = 0;
1759 newsk->blog = 0;
1760 newsk->intr = 0;
1761 newsk->proc = 0;
1762 newsk->done = 0;
1763 newsk->send_tmp = NULL;
1764 newsk->pair = NULL;
1765 newsk->wmem_alloc = 0;
1766 newsk->rmem_alloc = 0;
1767
1768 newsk->max_unacked = MAX_WINDOW - TCP_WINDOW_DIFF;
1769
1770 newsk->err = 0;
1771 newsk->shutdown = 0;
1772 newsk->ack_backlog = 0;
1773 newsk->acked_seq = skb->h.th->seq+1;
1774 newsk->fin_seq = skb->h.th->seq;
1775 newsk->copied_seq = skb->h.th->seq;
1776 newsk->state = TCP_SYN_RECV;
1777 newsk->timeout = 0;
1778 newsk->send_seq = jiffies * SEQ_TICK - seq_offset;
1779 newsk->rcv_ack_seq = newsk->send_seq;
1780 newsk->urg =0;
1781 newsk->retransmits = 0;
1782 newsk->destroy = 0;
1783 newsk->timer.data = (unsigned long)newsk;
1784 newsk->timer.function = &net_timer;
1785 newsk->dummy_th.source = skb->h.th->dest;
1786 newsk->dummy_th.dest = skb->h.th->source;
1787
1788
1789 newsk->daddr = saddr;
1790 newsk->saddr = daddr;
1791
1792 put_sock(newsk->num,newsk);
1793 newsk->dummy_th.res1 = 0;
1794 newsk->dummy_th.doff = 6;
1795 newsk->dummy_th.fin = 0;
1796 newsk->dummy_th.syn = 0;
1797 newsk->dummy_th.rst = 0;
1798 newsk->dummy_th.psh = 0;
1799 newsk->dummy_th.ack = 0;
1800 newsk->dummy_th.urg = 0;
1801 newsk->dummy_th.res2 = 0;
1802 newsk->acked_seq = skb->h.th->seq + 1;
1803 newsk->copied_seq = skb->h.th->seq;
1804
1805 #ifdef OLDWAY
1806 if (skb->h.th->doff == 5) {
1807 newsk->mtu = dev->mtu - HEADER_SIZE;
1808 } else {
1809 ptr =(unsigned char *)(skb->h.th + 1);
1810 if (ptr[0] != 2 || ptr[1] != 4) {
1811 newsk->mtu = dev->mtu - HEADER_SIZE;
1812 } else {
1813 newsk->mtu = min(ptr[2] * 256 + ptr[3] - HEADER_SIZE,
1814 dev->mtu - HEADER_SIZE);
1815 }
1816 }
1817 #else
1818 tcp_options(newsk,skb->h.th);
1819 #endif
1820 buff = (struct sk_buff *) newsk->prot->wmalloc(newsk, MAX_SYN_SIZE, 1, GFP_ATOMIC);
1821 if (buff == NULL) {
1822 sk->err = -ENOMEM;
1823 newsk->dead = 1;
1824 release_sock(newsk);
1825 kfree_skb(skb, FREE_READ);
1826 return;
1827 }
1828
1829 buff->mem_addr = buff;
1830 buff->mem_len = MAX_SYN_SIZE;
1831 buff->len = sizeof(struct tcphdr)+4;
1832 buff->sk = newsk;
1833
1834 t1 =(struct tcphdr *)(buff + 1);
1835
1836
1837 tmp = sk->prot->build_header(buff, newsk->saddr, newsk->daddr, &dev,
1838 IPPROTO_TCP, NULL, MAX_SYN_SIZE);
1839
1840
1841 if (tmp < 0) {
1842 sk->err = tmp;
1843 buff->free=1;
1844 kfree_skb(buff,FREE_WRITE);
1845 newsk->dead = 1;
1846 release_sock(newsk);
1847 skb->sk = sk;
1848 kfree_skb(skb, FREE_READ);
1849 return;
1850 }
1851
1852 buff->len += tmp;
1853 t1 =(struct tcphdr *)((char *)t1 +tmp);
1854
1855 memcpy(t1, skb->h.th, sizeof(*t1));
1856 buff->h.seq = newsk->send_seq;
1857
1858
1859 t1->dest = skb->h.th->source;
1860 t1->source = newsk->dummy_th.source;
1861 t1->seq = ntohl(newsk->send_seq++);
1862 t1->ack = 1;
1863 newsk->window = newsk->prot->rspace(newsk);
1864 t1->window = ntohs(newsk->window);
1865 t1->res1 = 0;
1866 t1->res2 = 0;
1867 t1->rst = 0;
1868 t1->urg = 0;
1869 t1->psh = 0;
1870 t1->syn = 1;
1871 t1->ack_seq = ntohl(skb->h.th->seq+1);
1872 t1->doff = sizeof(*t1)/4+1;
1873
1874 ptr =(unsigned char *)(t1+1);
1875 ptr[0] = 2;
1876 ptr[1] = 4;
1877 ptr[2] =((dev->mtu - HEADER_SIZE) >> 8) & 0xff;
1878 ptr[3] =(dev->mtu - HEADER_SIZE) & 0xff;
1879
1880 tcp_send_check(t1, daddr, saddr, sizeof(*t1)+4, newsk);
1881 newsk->prot->queue_xmit(newsk, dev, buff, 0);
1882
1883 reset_timer(newsk, TIME_WRITE , TCP_CONNECT_TIME);
1884 skb->sk = newsk;
1885
1886
1887 sk->rmem_alloc -= skb->mem_len;
1888 newsk->rmem_alloc += skb->mem_len;
1889
1890 skb_queue_tail(&sk->rqueue,skb);
1891 sk->ack_backlog++;
1892 release_sock(newsk);
1893 }
1894
1895
1896 static void
1897 tcp_close(struct sock *sk, int timeout)
1898 {
1899 struct sk_buff *buff;
1900 int need_reset = 0;
1901 struct tcphdr *t1, *th;
1902 struct proto *prot;
1903 struct device *dev=NULL;
1904 int tmp;
1905
1906
1907
1908
1909
1910 DPRINTF((DBG_TCP, "tcp_close((struct sock *)%X, %d)\n",sk, timeout));
1911 sk->inuse = 1;
1912 sk->keepopen = 1;
1913 sk->shutdown = SHUTDOWN_MASK;
1914
1915 if (!sk->dead) wake_up(sk->sleep);
1916
1917
1918 if (skb_peek(&sk->rqueue) != NULL)
1919 {
1920 struct sk_buff *skb;
1921 #ifdef OLD
1922 struct sk_buff *skb2;
1923 skb = skb_peek(&sk->rqueue);
1924 do {
1925 skb2 =(struct sk_buff *)skb->next;
1926
1927 if (skb->len > 0 &&
1928 after(skb->h.th->seq + skb->len + 1, sk->copied_seq))
1929 need_reset = 1;
1930 kfree_skb(skb, FREE_WRITE);
1931 skb = skb2;
1932 } while(skb != sk->rqueue);
1933 #else
1934 if(sk->debug)
1935 printk("Clean rcv queue\n");
1936 while((skb=skb_dequeue(&sk->rqueue))!=NULL)
1937 {
1938 if(skb->len > 0 && after(skb->h.th->seq + skb->len + 1 , sk->copied_seq))
1939 need_reset = 1;
1940 kfree_skb(skb, FREE_READ);
1941 }
1942 if(sk->debug)
1943 printk("Cleaned.\n");
1944 #endif
1945 }
1946 sk->rqueue = NULL;
1947
1948
1949 if (sk->send_tmp) {
1950 tcp_send_partial(sk);
1951 }
1952
1953 switch(sk->state) {
1954 case TCP_FIN_WAIT1:
1955 case TCP_FIN_WAIT2:
1956 case TCP_LAST_ACK:
1957
1958 reset_timer(sk, TIME_CLOSE, 4 * sk->rtt);
1959 if (timeout) tcp_time_wait(sk);
1960 release_sock(sk);
1961 return;
1962 case TCP_TIME_WAIT:
1963 if (timeout) {
1964 sk->state = TCP_CLOSE;
1965 }
1966 release_sock(sk);
1967 return;
1968 case TCP_LISTEN:
1969 sk->state = TCP_CLOSE;
1970 release_sock(sk);
1971 return;
1972 case TCP_CLOSE:
1973 release_sock(sk);
1974 return;
1975 case TCP_CLOSE_WAIT:
1976 case TCP_ESTABLISHED:
1977 case TCP_SYN_SENT:
1978 case TCP_SYN_RECV:
1979 prot =(struct proto *)sk->prot;
1980 th =(struct tcphdr *)&sk->dummy_th;
1981 buff = (struct sk_buff *) prot->wmalloc(sk, MAX_FIN_SIZE, 1, GFP_ATOMIC);
1982 if (buff == NULL) {
1983
1984
1985
1986 release_sock(sk);
1987 if (sk->state != TCP_CLOSE_WAIT)
1988 sk->state = TCP_ESTABLISHED;
1989 reset_timer(sk, TIME_CLOSE, 100);
1990 return;
1991 }
1992 buff->mem_addr = buff;
1993 buff->mem_len = MAX_FIN_SIZE;
1994 buff->sk = sk;
1995 buff->free = 1;
1996 buff->len = sizeof(*t1);
1997 t1 =(struct tcphdr *)(buff + 1);
1998
1999
2000 tmp = prot->build_header(buff,sk->saddr, sk->daddr, &dev,
2001 IPPROTO_TCP, sk->opt,
2002 sizeof(struct tcphdr));
2003 if (tmp < 0) {
2004 kfree_skb(buff,FREE_WRITE);
2005 DPRINTF((DBG_TCP, "Unable to build header for fin.\n"));
2006 release_sock(sk);
2007 return;
2008 }
2009
2010 t1 =(struct tcphdr *)((char *)t1 +tmp);
2011 buff ->len += tmp;
2012 buff->dev = dev;
2013 memcpy(t1, th, sizeof(*t1));
2014 t1->seq = ntohl(sk->send_seq);
2015 sk->send_seq++;
2016 buff->h.seq = sk->send_seq;
2017 t1->ack = 1;
2018
2019
2020 sk->delay_acks = 0;
2021 t1->ack_seq = ntohl(sk->acked_seq);
2022 t1->window = ntohs(sk->prot->rspace(sk));
2023 t1->fin = 1;
2024 t1->rst = need_reset;
2025 t1->doff = sizeof(*t1)/4;
2026 tcp_send_check(t1, sk->saddr, sk->daddr, sizeof(*t1), sk);
2027
2028 if (sk->wfront == NULL) {
2029 prot->queue_xmit(sk, dev, buff, 0);
2030 } else {
2031 reset_timer(sk, TIME_WRITE,
2032 backoff(sk->backoff) * (2 * sk->mdev + sk->rtt));
2033 buff->next = NULL;
2034 if (sk->wback == NULL) {
2035 sk->wfront=buff;
2036 } else {
2037 sk->wback->next = buff;
2038 }
2039 sk->wback = buff;
2040 buff->magic = TCP_WRITE_QUEUE_MAGIC;
2041 }
2042
2043 if (sk->state == TCP_CLOSE_WAIT) {
2044 sk->state = TCP_FIN_WAIT2;
2045 } else {
2046 sk->state = TCP_FIN_WAIT1;
2047 }
2048 }
2049 release_sock(sk);
2050 }
2051
2052
2053
2054
2055
2056
2057 static void
2058 tcp_write_xmit(struct sock *sk)
2059 {
2060 struct sk_buff *skb;
2061
2062 DPRINTF((DBG_TCP, "tcp_write_xmit(sk=%X)\n", sk));
2063
2064
2065
2066 if(sk->zapped)
2067 return;
2068
2069 while(sk->wfront != NULL &&
2070 before(sk->wfront->h.seq, sk->window_seq) &&
2071 sk->packets_out < sk->cong_window) {
2072 skb = sk->wfront;
2073 IS_SKB(skb);
2074 sk->wfront =(struct sk_buff *)skb->next;
2075 if (sk->wfront == NULL) sk->wback = NULL;
2076 skb->next = NULL;
2077 if (skb->magic != TCP_WRITE_QUEUE_MAGIC) {
2078 printk("tcp.c skb with bad magic(%X) on write queue. Squashing "
2079 "queue\n", skb->magic);
2080 sk->wfront = NULL;
2081 sk->wback = NULL;
2082 return;
2083 }
2084 skb->magic = 0;
2085 DPRINTF((DBG_TCP, "Sending a packet.\n"));
2086
2087
2088 if (before(skb->h.seq, sk->rcv_ack_seq +1)) {
2089 sk->retransmits = 0;
2090 kfree_skb(skb, FREE_WRITE);
2091 if (!sk->dead) wake_up(sk->sleep);
2092 } else {
2093 sk->prot->queue_xmit(sk, skb->dev, skb, skb->free);
2094 }
2095 }
2096 }
2097
2098
2099
2100
2101
2102
2103 void
2104 sort_send(struct sock *sk)
2105 {
2106 struct sk_buff *list = NULL;
2107 struct sk_buff *skb,*skb2,*skb3;
2108
2109 for (skb = sk->send_head; skb != NULL; skb = skb2) {
2110 skb2 = (struct sk_buff *)skb->link3;
2111 if (list == NULL || before (skb2->h.seq, list->h.seq)) {
2112 skb->link3 = list;
2113 sk->send_tail = skb;
2114 list = skb;
2115 } else {
2116 for (skb3 = list; ; skb3 = (struct sk_buff *)skb3->link3) {
2117 if (skb3->link3 == NULL ||
2118 before(skb->h.seq, skb3->link3->h.seq)) {
2119 skb->link3 = skb3->link3;
2120 skb3->link3 = skb;
2121 if (skb->link3 == NULL) sk->send_tail = skb;
2122 break;
2123 }
2124 }
2125 }
2126 }
2127 sk->send_head = list;
2128 }
2129
2130
2131
2132 static int
2133 tcp_ack(struct sock *sk, struct tcphdr *th, unsigned long saddr, int len)
2134 {
2135 unsigned long ack;
2136 int flag = 0;
2137
2138 if(sk->zapped)
2139 return(1);
2140
2141 ack = ntohl(th->ack_seq);
2142 DPRINTF((DBG_TCP, "tcp_ack ack=%d, window=%d, "
2143 "sk->rcv_ack_seq=%d, sk->window_seq = %d\n",
2144 ack, ntohs(th->window), sk->rcv_ack_seq, sk->window_seq));
2145
2146 if (after(ack, sk->send_seq+1) || before(ack, sk->rcv_ack_seq-1)) {
2147 if (after(ack, sk->send_seq) ||
2148 (sk->state != TCP_ESTABLISHED && sk->state != TCP_CLOSE_WAIT)) {
2149 return(0);
2150 }
2151 if (sk->keepopen) {
2152 reset_timer(sk, TIME_KEEPOPEN, TCP_TIMEOUT_LEN);
2153 }
2154 return(1);
2155 }
2156
2157 if (len != th->doff*4) flag |= 1;
2158
2159
2160 if (after(sk->window_seq, ack+ntohs(th->window))) {
2161
2162
2163
2164
2165
2166
2167
2168 struct sk_buff *skb;
2169 struct sk_buff *skb2;
2170 struct sk_buff *wskb = NULL;
2171
2172 skb2 = sk->send_head;
2173 sk->send_head = NULL;
2174 sk->send_tail = NULL;
2175
2176 flag |= 4;
2177
2178 sk->window_seq = ack + ntohs(th->window);
2179 cli();
2180 while (skb2 != NULL) {
2181 skb = skb2;
2182 skb2 = (struct sk_buff *)skb->link3;
2183 skb->link3 = NULL;
2184 if (after(skb->h.seq, sk->window_seq)) {
2185 if (sk->packets_out > 0) sk->packets_out--;
2186
2187 if (skb->next != NULL) {
2188 #ifdef OLD_WAY
2189 int i;
2190
2191 if (skb->next != skb) {
2192 skb->next->prev = skb->prev;
2193 skb->prev->next = skb->next;
2194 }
2195
2196 for(i = 0; i < DEV_NUMBUFFS; i++) {
2197 if (skb->dev->buffs[i] == skb) {
2198 if (skb->next == skb)
2199 skb->dev->buffs[i] = NULL;
2200 else
2201 skb->dev->buffs[i] = skb->next;
2202 break;
2203 }
2204 }
2205 if (arp_q == skb) {
2206 if (skb->next == skb) arp_q = NULL;
2207 else arp_q = skb->next;
2208 }
2209 #else
2210 skb_unlink(skb);
2211 #endif
2212 }
2213
2214 skb->magic = TCP_WRITE_QUEUE_MAGIC;
2215 if (wskb == NULL) {
2216 skb->next = sk->wfront;
2217 sk->wfront = skb;
2218 } else {
2219 skb->next = wskb->next;
2220 wskb->next = skb;
2221 }
2222 if (sk->wback == wskb) sk->wback = skb;
2223 wskb = skb;
2224 } else {
2225 if (sk->send_head == NULL) {
2226 sk->send_head = skb;
2227 sk->send_tail = skb;
2228 } else {
2229 sk->send_tail->link3 = skb;
2230 sk->send_tail = skb;
2231 }
2232 skb->link3 = NULL;
2233 }
2234 }
2235 sti();
2236 }
2237
2238 if (sk->send_tail == NULL || sk->send_head == NULL) {
2239 sk->send_head = NULL;
2240 sk->send_tail = NULL;
2241 sk->packets_out= 0;
2242 }
2243
2244 sk->window_seq = ack + ntohs(th->window);
2245
2246
2247 if (sk->cong_window < 2048 && ack != sk->rcv_ack_seq) {
2248 if (sk->exp_growth) sk->cong_window *= 2;
2249 else sk->cong_window++;
2250 }
2251
2252 DPRINTF((DBG_TCP, "tcp_ack: Updating rcv ack sequence.\n"));
2253 sk->rcv_ack_seq = ack;
2254
2255
2256 while(sk->send_head != NULL) {
2257
2258 if (sk->send_head->link3 &&
2259 after(sk->send_head->h.seq, sk->send_head->link3->h.seq)) {
2260 printk("INET: tcp.c: *** bug send_list out of order.\n");
2261 sort_send(sk);
2262 }
2263
2264 if (before(sk->send_head->h.seq, ack+1)) {
2265 struct sk_buff *oskb;
2266
2267 sk->retransmits = 0;
2268
2269
2270 if (sk->packets_out > 0) sk->packets_out --;
2271 DPRINTF((DBG_TCP, "skb=%X skb->h.seq = %d acked ack=%d\n",
2272 sk->send_head, sk->send_head->h.seq, ack));
2273
2274
2275 if (!sk->dead) wake_up(sk->sleep);
2276
2277 oskb = sk->send_head;
2278
2279
2280 if (sk->retransmits == 0 && !(flag&2)) {
2281 long abserr, rtt = jiffies - oskb->when;
2282
2283 if (sk->state == TCP_SYN_SENT || sk->state == TCP_SYN_RECV)
2284
2285 sk->rtt = rtt;
2286 else {
2287 abserr = (rtt > sk->rtt) ? rtt - sk->rtt : sk->rtt - rtt;
2288 sk->rtt = (7 * sk->rtt + rtt) >> 3;
2289 sk->mdev = (3 * sk->mdev + abserr) >> 2;
2290 }
2291 sk->backoff = 0;
2292 }
2293 flag |= (2|4);
2294
2295
2296 if (sk->rtt < 10) sk->rtt = 10;
2297 if (sk->rtt > 12000) sk->rtt = 12000;
2298
2299 cli();
2300
2301 oskb = sk->send_head;
2302 IS_SKB(oskb);
2303 sk->send_head =(struct sk_buff *)oskb->link3;
2304 if (sk->send_head == NULL) {
2305 sk->send_tail = NULL;
2306 }
2307
2308
2309 skb_unlink(oskb);
2310 sti();
2311 oskb->magic = 0;
2312 kfree_skb(oskb, FREE_WRITE);
2313 if (!sk->dead) wake_up(sk->sleep);
2314 } else {
2315 break;
2316 }
2317 }
2318
2319
2320
2321
2322
2323 if (sk->wfront != NULL) {
2324 if (after (sk->window_seq, sk->wfront->h.seq) &&
2325 sk->packets_out < sk->cong_window) {
2326 flag |= 1;
2327 tcp_write_xmit(sk);
2328 }
2329 } else {
2330 if (sk->send_head == NULL && sk->ack_backlog == 0 &&
2331 sk->state != TCP_TIME_WAIT && !sk->keepopen) {
2332 DPRINTF((DBG_TCP, "Nothing to do, going to sleep.\n"));
2333 if (!sk->dead) wake_up(sk->sleep);
2334
2335 if (sk->keepopen)
2336 reset_timer(sk, TIME_KEEPOPEN, TCP_TIMEOUT_LEN);
2337 else
2338 delete_timer(sk);
2339 } else {
2340 if (sk->state != (unsigned char) sk->keepopen) {
2341 reset_timer(sk, TIME_WRITE,
2342 backoff(sk->backoff) * (2 * sk->mdev + sk->rtt));
2343 }
2344 if (sk->state == TCP_TIME_WAIT) {
2345 reset_timer(sk, TIME_CLOSE, TCP_TIMEWAIT_LEN);
2346 }
2347 }
2348 }
2349
2350 if (sk->packets_out == 0 && sk->send_tmp != NULL &&
2351 sk->wfront == NULL && sk->send_head == NULL) {
2352 flag |= 1;
2353 tcp_send_partial(sk);
2354 }
2355
2356
2357 if (sk->state == TCP_TIME_WAIT) {
2358 if (!sk->dead) wake_up(sk->sleep);
2359 if (sk->rcv_ack_seq == sk->send_seq && sk->acked_seq == sk->fin_seq) {
2360 flag |= 1;
2361 sk->state = TCP_CLOSE;
2362 sk->shutdown = SHUTDOWN_MASK;
2363 }
2364 }
2365
2366 if (sk->state == TCP_LAST_ACK || sk->state == TCP_FIN_WAIT2) {
2367 if (!sk->dead) wake_up(sk->sleep);
2368 if (sk->rcv_ack_seq == sk->send_seq) {
2369 flag |= 1;
2370 if (sk->acked_seq != sk->fin_seq) {
2371 tcp_time_wait(sk);
2372 } else {
2373 DPRINTF((DBG_TCP, "tcp_ack closing socket - %X\n", sk));
2374 tcp_send_ack(sk->send_seq, sk->acked_seq, sk,
2375 th, sk->daddr);
2376 sk->shutdown = SHUTDOWN_MASK;
2377 sk->state = TCP_CLOSE;
2378 }
2379 }
2380 }
2381
2382 if (((!flag) || (flag&4)) && sk->send_head != NULL &&
2383 (sk->send_head->when + backoff(sk->backoff) * (2 * sk->mdev + sk->rtt)
2384 < jiffies)) {
2385 sk->exp_growth = 0;
2386 ip_retransmit(sk, 0);
2387 }
2388
2389 DPRINTF((DBG_TCP, "leaving tcp_ack\n"));
2390 return(1);
2391 }
2392
2393
2394
2395
2396
2397
2398
2399 static int
2400 tcp_data(struct sk_buff *skb, struct sock *sk,
2401 unsigned long saddr, unsigned short len)
2402 {
2403 struct sk_buff *skb1, *skb2;
2404 struct tcphdr *th;
2405 int dup_dumped=0;
2406
2407 th = skb->h.th;
2408 print_th(th);
2409 skb->len = len -(th->doff*4);
2410
2411 DPRINTF((DBG_TCP, "tcp_data len = %d sk = %X:\n", skb->len, sk));
2412
2413 sk->bytes_rcv += skb->len;
2414 if (skb->len == 0 && !th->fin && !th->urg && !th->psh) {
2415
2416 if (!th->ack) tcp_send_ack(sk->send_seq, sk->acked_seq,sk, th, saddr);
2417 kfree_skb(skb, FREE_READ);
2418 return(0);
2419 }
2420
2421 if (sk->shutdown & RCV_SHUTDOWN) {
2422 sk->acked_seq = th->seq + skb->len + th->syn + th->fin;
2423 tcp_reset(sk->saddr, sk->daddr, skb->h.th,
2424 sk->prot, NULL, skb->dev);
2425 sk->state = TCP_CLOSE;
2426 sk->err = EPIPE;
2427 sk->shutdown = SHUTDOWN_MASK;
2428 DPRINTF((DBG_TCP, "tcp_data: closing socket - %X\n", sk));
2429 kfree_skb(skb, FREE_READ);
2430 if (!sk->dead) wake_up(sk->sleep);
2431 return(0);
2432 }
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443 if (sk->rqueue == NULL) {
2444 DPRINTF((DBG_TCP, "tcp_data: skb = %X:\n", skb));
2445 #ifdef OLDWAY
2446 sk->rqueue = skb;
2447 skb->next = skb;
2448 skb->prev = skb;
2449 skb->list = &sk->rqueue;
2450 #else
2451 skb_queue_head(&sk->rqueue,skb);
2452 #endif
2453 skb1= NULL;
2454 } else {
2455 DPRINTF((DBG_TCP, "tcp_data adding to chain sk = %X:\n", sk));
2456 for(skb1=sk->rqueue->prev; ; skb1 =(struct sk_buff *)skb1->prev) {
2457 if(sk->debug)
2458 {
2459 printk("skb1=%p :", skb1);
2460 printk("skb1->h.th->seq = %ld: ", skb1->h.th->seq);
2461 printk("skb->h.th->seq = %ld\n",skb->h.th->seq);
2462 printk("copied_seq = %ld acked_seq = %ld\n", sk->copied_seq,
2463 sk->acked_seq);
2464 }
2465 #ifdef OLD
2466 if (after(th->seq+1, skb1->h.th->seq)) {
2467 skb->prev = skb1;
2468 skb->next = skb1->next;
2469 skb->next->prev = skb;
2470 skb1->next = skb;
2471 if (skb1 == sk->rqueue) sk->rqueue = skb;
2472 break;
2473 }
2474 if (skb1->prev == sk->rqueue) {
2475 skb->next= skb1;
2476 skb->prev = skb1->prev;
2477 skb->prev->next = skb;
2478 skb1->prev = skb;
2479 skb1 = NULL;
2480
2481 break;
2482 }
2483 #else
2484 if (th->seq==skb1->h.th->seq && skb->len>= skb1->len)
2485 {
2486 skb_append(skb1,skb);
2487 skb_unlink(skb1);
2488 kfree_skb(skb1,FREE_READ);
2489 dup_dumped=1;
2490 skb1=NULL;
2491 break;
2492 }
2493 if (after(th->seq+1, skb1->h.th->seq))
2494 {
2495 skb_append(skb1,skb);
2496 break;
2497 }
2498 if (skb1 == sk->rqueue)
2499 {
2500 skb_queue_head(&sk->rqueue, skb);
2501 break;
2502 }
2503 #endif
2504 }
2505 DPRINTF((DBG_TCP, "skb = %X:\n", skb));
2506 }
2507
2508 th->ack_seq = th->seq + skb->len;
2509 if (th->syn) th->ack_seq++;
2510 if (th->fin) th->ack_seq++;
2511
2512 if (before(sk->acked_seq, sk->copied_seq)) {
2513 printk("*** tcp.c:tcp_data bug acked < copied\n");
2514 sk->acked_seq = sk->copied_seq;
2515 }
2516
2517
2518 if ((!dup_dumped && (skb1 == NULL || skb1->acked)) || before(th->seq, sk->acked_seq+1)) {
2519 if (before(th->seq, sk->acked_seq+1)) {
2520 if (after(th->ack_seq, sk->acked_seq))
2521 sk->acked_seq = th->ack_seq;
2522 skb->acked = 1;
2523
2524
2525 if (skb->h.th->fin) {
2526 if (!sk->dead) wake_up(sk->sleep);
2527 sk->shutdown |= RCV_SHUTDOWN;
2528 }
2529
2530 for(skb2 = (struct sk_buff *)skb->next;
2531 skb2 !=(struct sk_buff *) sk->rqueue;
2532 skb2 = (struct sk_buff *)skb2->next) {
2533 if (before(skb2->h.th->seq, sk->acked_seq+1)) {
2534 if (after(skb2->h.th->ack_seq, sk->acked_seq))
2535 sk->acked_seq = skb2->h.th->ack_seq;
2536 skb2->acked = 1;
2537
2538
2539
2540
2541
2542 if (skb2->h.th->fin) {
2543 sk->shutdown |= RCV_SHUTDOWN;
2544 if (!sk->dead) wake_up(sk->sleep);
2545 }
2546
2547
2548 sk->ack_backlog = sk->max_ack_backlog;
2549 } else {
2550 break;
2551 }
2552 }
2553
2554
2555
2556
2557
2558 if (!sk->delay_acks ||
2559 sk->ack_backlog >= sk->max_ack_backlog ||
2560 sk->bytes_rcv > sk->max_unacked || th->fin) {
2561
2562 } else {
2563 sk->ack_backlog++;
2564 if(sk->debug)
2565 printk("Ack queued.\n");
2566 reset_timer(sk, TIME_WRITE, TCP_ACK_TIME);
2567 }
2568 }
2569 }
2570
2571
2572
2573
2574
2575 if (!skb->acked) {
2576
2577
2578
2579
2580
2581 while (sk->prot->rspace(sk) < sk->mtu) {
2582 skb1 = skb_peek(&sk->rqueue);
2583 if (skb1 == NULL) {
2584 printk("INET: tcp.c:tcp_data memory leak detected.\n");
2585 break;
2586 }
2587
2588
2589 if (skb1->acked) {
2590 break;
2591 }
2592
2593 skb_unlink(skb1);
2594 #ifdef OLDWAY
2595 if (skb1->prev == skb1) {
2596 sk->rqueue = NULL;
2597 } else {
2598 sk->rqueue = (struct sk_buff *)skb1->prev;
2599 skb1->next->prev = skb1->prev;
2600 skb1->prev->next = skb1->next;
2601 }
2602 #endif
2603 kfree_skb(skb1, FREE_READ);
2604 }
2605 tcp_send_ack(sk->send_seq, sk->acked_seq, sk, th, saddr);
2606 sk->ack_backlog++;
2607 reset_timer(sk, TIME_WRITE, TCP_ACK_TIME);
2608 } else {
2609
2610 tcp_send_ack(sk->send_seq, sk->acked_seq, sk, th, saddr);
2611 }
2612
2613
2614 if (!sk->dead) {
2615 if(sk->debug)
2616 printk("Data wakeup.\n");
2617 wake_up(sk->sleep);
2618 } else {
2619 DPRINTF((DBG_TCP, "data received on dead socket.\n"));
2620 }
2621
2622 if (sk->state == TCP_FIN_WAIT2 &&
2623 sk->acked_seq == sk->fin_seq && sk->rcv_ack_seq == sk->send_seq) {
2624 DPRINTF((DBG_TCP, "tcp_data: entering last_ack state sk = %X\n", sk));
2625
2626
2627 sk->shutdown = SHUTDOWN_MASK;
2628 sk->state = TCP_LAST_ACK;
2629 if (!sk->dead) wake_up(sk->sleep);
2630 }
2631
2632 return(0);
2633 }
2634
2635
2636 static int
2637 tcp_urg(struct sock *sk, struct tcphdr *th, unsigned long saddr)
2638 {
2639 extern int kill_pg(int pg, int sig, int priv);
2640 extern int kill_proc(int pid, int sig, int priv);
2641
2642 if (!sk->dead) wake_up(sk->sleep);
2643
2644 if (sk->urginline) {
2645 th->urg = 0;
2646 th->psh = 1;
2647 return(0);
2648 }
2649
2650 if (!sk->urg) {
2651
2652 if (sk->proc != 0) {
2653 if (sk->proc > 0) {
2654 kill_proc(sk->proc, SIGURG, 1);
2655 } else {
2656 kill_pg(-sk->proc, SIGURG, 1);
2657 }
2658 }
2659 }
2660 sk->urg++;
2661 return(0);
2662 }
2663
2664
2665
2666 static int
2667 tcp_fin(struct sock *sk, struct tcphdr *th,
2668 unsigned long saddr, struct device *dev)
2669 {
2670 DPRINTF((DBG_TCP, "tcp_fin(sk=%X, th=%X, saddr=%X, dev=%X)\n",
2671 sk, th, saddr, dev));
2672
2673 if (!sk->dead) {
2674 wake_up(sk->sleep);
2675 }
2676
2677 switch(sk->state) {
2678 case TCP_SYN_RECV:
2679 case TCP_SYN_SENT:
2680 case TCP_ESTABLISHED:
2681
2682 sk->fin_seq = th->seq+1;
2683 sk->state = TCP_CLOSE_WAIT;
2684 if (th->rst) sk->shutdown = SHUTDOWN_MASK;
2685 break;
2686
2687 case TCP_CLOSE_WAIT:
2688 case TCP_FIN_WAIT2:
2689 break;
2690
2691 case TCP_FIN_WAIT1:
2692
2693 sk->fin_seq = th->seq+1;
2694 sk->state = TCP_FIN_WAIT2;
2695 break;
2696
2697 default:
2698 case TCP_TIME_WAIT:
2699 sk->state = TCP_LAST_ACK;
2700
2701
2702 reset_timer(sk, TIME_CLOSE, TCP_TIMEWAIT_LEN);
2703 return(0);
2704 }
2705 sk->ack_backlog++;
2706
2707 return(0);
2708 }
2709
2710
2711
2712 static struct sock *
2713 tcp_accept(struct sock *sk, int flags)
2714 {
2715 struct sock *newsk;
2716 struct sk_buff *skb;
2717
2718 DPRINTF((DBG_TCP, "tcp_accept(sk=%X, flags=%X, addr=%s)\n",
2719 sk, flags, in_ntoa(sk->saddr)));
2720
2721
2722
2723
2724
2725 if (sk->state != TCP_LISTEN) {
2726 sk->err = EINVAL;
2727 return(NULL);
2728 }
2729
2730
2731 cli();
2732 sk->inuse = 1;
2733 while((skb = get_firstr(sk)) == NULL) {
2734 if (flags & O_NONBLOCK) {
2735 sti();
2736 release_sock(sk);
2737 sk->err = EAGAIN;
2738 return(NULL);
2739 }
2740
2741 release_sock(sk);
2742 interruptible_sleep_on(sk->sleep);
2743 if (current->signal & ~current->blocked) {
2744 sti();
2745 sk->err = ERESTARTSYS;
2746 return(NULL);
2747 }
2748 sk->inuse = 1;
2749 }
2750 sti();
2751
2752
2753 newsk = skb->sk;
2754
2755 kfree_skb(skb, FREE_READ);
2756 sk->ack_backlog--;
2757 release_sock(sk);
2758 return(newsk);
2759 }
2760
2761
2762
2763 static int
2764 tcp_connect(struct sock *sk, struct sockaddr_in *usin, int addr_len)
2765 {
2766 struct sk_buff *buff;
2767 struct sockaddr_in sin;
2768 struct device *dev=NULL;
2769 unsigned char *ptr;
2770 int tmp;
2771 struct tcphdr *t1;
2772 int err;
2773
2774 if (sk->state != TCP_CLOSE) return(-EISCONN);
2775 if (addr_len < 8) return(-EINVAL);
2776
2777 err=verify_area(VERIFY_READ, usin, addr_len);
2778 if(err)
2779 return err;
2780
2781 memcpy_fromfs(&sin,usin, min(sizeof(sin), addr_len));
2782
2783 if (sin.sin_family && sin.sin_family != AF_INET) return(-EAFNOSUPPORT);
2784
2785 DPRINTF((DBG_TCP, "TCP connect daddr=%s\n", in_ntoa(sin.sin_addr.s_addr)));
2786
2787
2788 if (chk_addr(sin.sin_addr.s_addr) == IS_BROADCAST) {
2789 DPRINTF((DBG_TCP, "TCP connection to broadcast address not allowed\n"));
2790 return(-ENETUNREACH);
2791 }
2792
2793 sk->inuse = 1;
2794 sk->daddr = sin.sin_addr.s_addr;
2795 sk->send_seq = jiffies * SEQ_TICK - seq_offset;
2796 sk->rcv_ack_seq = sk->send_seq -1;
2797 sk->err = 0;
2798 sk->dummy_th.dest = sin.sin_port;
2799 release_sock(sk);
2800
2801 buff = (struct sk_buff *) sk->prot->wmalloc(sk,MAX_SYN_SIZE,0, GFP_KERNEL);
2802 if (buff == NULL) {
2803 return(-ENOMEM);
2804 }
2805 sk->inuse = 1;
2806 buff->mem_addr = buff;
2807 buff->mem_len = MAX_SYN_SIZE;
2808 buff->len = 24;
2809 buff->sk = sk;
2810 buff->free = 1;
2811 t1 = (struct tcphdr *)(buff + 1);
2812
2813
2814
2815 tmp = sk->prot->build_header(buff, sk->saddr, sk->daddr, &dev,
2816 IPPROTO_TCP, NULL, MAX_SYN_SIZE);
2817 if (tmp < 0) {
2818 sk->prot->wfree(sk, buff->mem_addr, buff->mem_len);
2819 release_sock(sk);
2820 return(-ENETUNREACH);
2821 }
2822 buff->len += tmp;
2823 t1 = (struct tcphdr *)((char *)t1 +tmp);
2824
2825 memcpy(t1,(void *)&(sk->dummy_th), sizeof(*t1));
2826 t1->seq = ntohl(sk->send_seq++);
2827 buff->h.seq = sk->send_seq;
2828 t1->ack = 0;
2829 t1->window = 2;
2830 t1->res1=0;
2831 t1->res2=0;
2832 t1->rst = 0;
2833 t1->urg = 0;
2834 t1->psh = 0;
2835 t1->syn = 1;
2836 t1->urg_ptr = 0;
2837 t1->doff = 6;
2838
2839
2840 ptr = (unsigned char *)(t1+1);
2841 ptr[0] = 2;
2842 ptr[1] = 4;
2843 ptr[2] = (dev->mtu- HEADER_SIZE) >> 8;
2844 ptr[3] = (dev->mtu- HEADER_SIZE) & 0xff;
2845 sk->mtu = dev->mtu - HEADER_SIZE;
2846 tcp_send_check(t1, sk->saddr, sk->daddr,
2847 sizeof(struct tcphdr) + 4, sk);
2848
2849
2850 sk->state = TCP_SYN_SENT;
2851 sk->rtt = TCP_CONNECT_TIME;
2852 reset_timer(sk, TIME_WRITE, TCP_CONNECT_TIME);
2853 sk->retransmits = TCP_RETR2 - TCP_SYN_RETRIES;
2854
2855 sk->prot->queue_xmit(sk, dev, buff, 0);
2856
2857 release_sock(sk);
2858 return(0);
2859 }
2860
2861
2862
2863 static int
2864 tcp_sequence(struct sock *sk, struct tcphdr *th, short len,
2865 struct options *opt, unsigned long saddr)
2866 {
2867
2868
2869
2870
2871
2872
2873 DPRINTF((DBG_TCP, "tcp_sequence(sk=%X, th=%X, len = %d, opt=%d, saddr=%X)\n",
2874 sk, th, len, opt, saddr));
2875
2876 if (between(th->seq, sk->acked_seq, sk->acked_seq + sk->window)||
2877 between(th->seq + len-(th->doff*4), sk->acked_seq + 1,
2878 sk->acked_seq + sk->window) ||
2879 (before(th->seq, sk->acked_seq) &&
2880 after(th->seq + len -(th->doff*4), sk->acked_seq + sk->window))) {
2881 return(1);
2882 }
2883 DPRINTF((DBG_TCP, "tcp_sequence: rejecting packet.\n"));
2884
2885
2886
2887
2888
2889 if (after(th->seq, sk->acked_seq + sk->window)) {
2890 if(!th->rst)
2891 tcp_send_ack(sk->send_seq, sk->acked_seq, sk, th, saddr);
2892 return(0);
2893 }
2894
2895
2896 if (th->ack && len == (th->doff * 4) &&
2897 after(th->seq, sk->acked_seq - 32767) &&
2898 !th->fin && !th->syn) return(1);
2899
2900 if (!th->rst) {
2901
2902 tcp_send_ack(sk->send_seq, sk->acked_seq, sk, th, saddr);
2903 }
2904 return(0);
2905 }
2906
2907
2908
2909
2910
2911 int
2912 tcp_rcv(struct sk_buff *skb, struct device *dev, struct options *opt,
2913 unsigned long daddr, unsigned short len,
2914 unsigned long saddr, int redo, struct inet_protocol * protocol)
2915 {
2916 struct tcphdr *th;
2917 struct sock *sk;
2918
2919 if (!skb) {
2920 DPRINTF((DBG_TCP, "tcp.c: tcp_rcv skb = NULL\n"));
2921 return(0);
2922 }
2923 #if 0
2924 if (!protocol) {
2925 DPRINTF((DBG_TCP, "tcp.c: tcp_rcv protocol = NULL\n"));
2926 return(0);
2927 }
2928
2929 if (!opt) {
2930 DPRINTF((DBG_TCP, "tcp.c: tcp_rcv opt = NULL\n"));
2931 }
2932 #endif
2933 if (!dev) {
2934 DPRINTF((DBG_TCP, "tcp.c: tcp_rcv dev = NULL\n"));
2935 return(0);
2936 }
2937 th = skb->h.th;
2938
2939
2940 sk = get_sock(&tcp_prot, th->dest, saddr, th->source, daddr);
2941 DPRINTF((DBG_TCP, "<<\n"));
2942 DPRINTF((DBG_TCP, "len = %d, redo = %d, skb=%X\n", len, redo, skb));
2943
2944
2945
2946 if (sk!=NULL && sk->zapped)
2947 sk=NULL;
2948
2949 if (sk) {
2950 DPRINTF((DBG_TCP, "sk = %X:\n", sk));
2951 }
2952
2953 if (!redo) {
2954 if (tcp_check(th, len, saddr, daddr )) {
2955 skb->sk = NULL;
2956 DPRINTF((DBG_TCP, "packet dropped with bad checksum.\n"));
2957 if (inet_debug == DBG_SLIP) printk("\rtcp_rcv: bad checksum\n");
2958 kfree_skb(skb,FREE_READ);
2959
2960
2961
2962
2963 return(0);
2964 }
2965
2966
2967 if (sk == NULL) {
2968 if (!th->rst)
2969 {
2970 th->seq = ntohl(th->seq);
2971
2972 tcp_reset(daddr, saddr, th, &tcp_prot, opt,dev);
2973 }
2974 skb->sk = NULL;
2975 kfree_skb(skb, FREE_READ);
2976 return(0);
2977 }
2978
2979 skb->len = len;
2980 skb->sk = sk;
2981 skb->acked = 0;
2982 skb->used = 0;
2983 skb->free = 0;
2984 skb->urg_used = 0;
2985 skb->saddr = daddr;
2986 skb->daddr = saddr;
2987
2988 th->seq = ntohl(th->seq);
2989
2990
2991 cli();
2992 if (sk->inuse) {
2993 if (sk->back_log == NULL) {
2994 sk->back_log = skb;
2995 skb->next = skb;
2996 skb->prev = skb;
2997 } else {
2998 skb->next = sk->back_log;
2999 skb->prev = sk->back_log->prev;
3000 skb->prev->next = skb;
3001 skb->next->prev = skb;
3002 }
3003 sti();
3004 return(0);
3005 }
3006 sk->inuse = 1;
3007 sti();
3008 } else {
3009 if (!sk) {
3010 DPRINTF((DBG_TCP, "tcp.c: tcp_rcv bug sk=NULL redo = 1\n"));
3011 return(0);
3012 }
3013 }
3014
3015 if (!sk->prot) {
3016 DPRINTF((DBG_TCP, "tcp.c: tcp_rcv sk->prot = NULL \n"));
3017 return(0);
3018 }
3019
3020
3021 if (sk->rmem_alloc + skb->mem_len >= sk->rcvbuf) {
3022 skb->sk = NULL;
3023 DPRINTF((DBG_TCP, "dropping packet due to lack of buffer space.\n"));
3024 kfree_skb(skb, FREE_READ);
3025 release_sock(sk);
3026 return(0);
3027 }
3028 sk->rmem_alloc += skb->mem_len;
3029
3030 DPRINTF((DBG_TCP, "About to do switch.\n"));
3031
3032
3033 switch(sk->state) {
3034
3035
3036
3037
3038 case TCP_LAST_ACK:
3039 if (th->rst) {
3040 sk->zapped=1;
3041 sk->err = ECONNRESET;
3042 sk->state = TCP_CLOSE;
3043 sk->shutdown = SHUTDOWN_MASK;
3044 if (!sk->dead) {
3045 wake_up(sk->sleep);
3046 }
3047 kfree_skb(skb, FREE_READ);
3048 release_sock(sk);
3049 return(0);
3050 }
3051
3052 case TCP_ESTABLISHED:
3053 case TCP_CLOSE_WAIT:
3054 case TCP_FIN_WAIT1:
3055 case TCP_FIN_WAIT2:
3056 case TCP_TIME_WAIT:
3057 if (!tcp_sequence(sk, th, len, opt, saddr)) {
3058 if (inet_debug == DBG_SLIP) printk("\rtcp_rcv: not in seq\n");
3059 if(!th->rst)
3060 tcp_send_ack(sk->send_seq, sk->acked_seq,
3061 sk, th, saddr);
3062 kfree_skb(skb, FREE_READ);
3063 release_sock(sk);
3064 return(0);
3065 }
3066
3067 if (th->rst) {
3068 sk->zapped=1;
3069
3070 sk->err = ECONNRESET;
3071
3072 if (sk->state == TCP_CLOSE_WAIT) {
3073 sk->err = EPIPE;
3074 }
3075
3076
3077
3078
3079
3080
3081
3082 sk->state = TCP_CLOSE;
3083 sk->shutdown = SHUTDOWN_MASK;
3084 if (!sk->dead) {
3085 wake_up(sk->sleep);
3086 }
3087 kfree_skb(skb, FREE_READ);
3088 release_sock(sk);
3089 return(0);
3090
3091 }
3092 #if 0
3093 if (opt && (opt->security != 0 ||
3094 opt->compartment != 0 || th->syn)) {
3095 sk->err = ECONNRESET;
3096 sk->state = TCP_CLOSE;
3097 sk->shutdown = SHUTDOWN_MASK;
3098 tcp_reset(daddr, saddr, th, sk->prot, opt,dev);
3099 if (!sk->dead) {
3100 wake_up(sk->sleep);
3101 }
3102 kfree_skb(skb, FREE_READ);
3103 release_sock(sk);
3104 return(0);
3105 }
3106 #endif
3107 if (th->ack) {
3108 if (!tcp_ack(sk, th, saddr, len)) {
3109 kfree_skb(skb, FREE_READ);
3110 release_sock(sk);
3111 return(0);
3112 }
3113 }
3114 if (th->urg) {
3115 if (tcp_urg(sk, th, saddr)) {
3116 kfree_skb(skb, FREE_READ);
3117 release_sock(sk);
3118 return(0);
3119 }
3120 }
3121
3122 if (th->fin && tcp_fin(sk, th, saddr, dev)) {
3123 kfree_skb(skb, FREE_READ);
3124 release_sock(sk);
3125 return(0);
3126 }
3127
3128 if (tcp_data(skb, sk, saddr, len)) {
3129 kfree_skb(skb, FREE_READ);
3130 release_sock(sk);
3131 return(0);
3132 }
3133
3134 release_sock(sk);
3135 return(0);
3136
3137 case TCP_CLOSE:
3138 if (sk->dead || sk->daddr) {
3139 DPRINTF((DBG_TCP, "packet received for closed,dead socket\n"));
3140 kfree_skb(skb, FREE_READ);
3141 release_sock(sk);
3142 return(0);
3143 }
3144
3145 if (!th->rst) {
3146 if (!th->ack)
3147 th->ack_seq = 0;
3148 tcp_reset(daddr, saddr, th, sk->prot, opt,dev);
3149 }
3150 kfree_skb(skb, FREE_READ);
3151 release_sock(sk);
3152 return(0);
3153
3154 case TCP_LISTEN:
3155 if (th->rst) {
3156 kfree_skb(skb, FREE_READ);
3157 release_sock(sk);
3158 return(0);
3159 }
3160 if (th->ack) {
3161 tcp_reset(daddr, saddr, th, sk->prot, opt,dev);
3162 kfree_skb(skb, FREE_READ);
3163 release_sock(sk);
3164 return(0);
3165 }
3166
3167 if (th->syn) {
3168 #if 0
3169 if (opt->security != 0 || opt->compartment != 0) {
3170 tcp_reset(daddr, saddr, th, prot, opt,dev);
3171 release_sock(sk);
3172 return(0);
3173 }
3174 #endif
3175
3176
3177
3178
3179
3180
3181
3182 tcp_conn_request(sk, skb, daddr, saddr, opt, dev);
3183 release_sock(sk);
3184 return(0);
3185 }
3186
3187 kfree_skb(skb, FREE_READ);
3188 release_sock(sk);
3189 return(0);
3190
3191 default:
3192 if (!tcp_sequence(sk, th, len, opt, saddr)) {
3193 kfree_skb(skb, FREE_READ);
3194 release_sock(sk);
3195 return(0);
3196 }
3197
3198 case TCP_SYN_SENT:
3199 if (th->rst) {
3200 sk->err = ECONNREFUSED;
3201 sk->state = TCP_CLOSE;
3202 sk->shutdown = SHUTDOWN_MASK;
3203 sk->zapped = 1;
3204 if (!sk->dead) {
3205 wake_up(sk->sleep);
3206 }
3207 kfree_skb(skb, FREE_READ);
3208 release_sock(sk);
3209 return(0);
3210 }
3211 #if 0
3212 if (opt->security != 0 || opt->compartment != 0) {
3213 sk->err = ECONNRESET;
3214 sk->state = TCP_CLOSE;
3215 sk->shutdown = SHUTDOWN_MASK;
3216 tcp_reset(daddr, saddr, th, sk->prot, opt, dev);
3217 if (!sk->dead) {
3218 wake_up(sk->sleep);
3219 }
3220 kfree_skb(skb, FREE_READ);
3221 release_sock(sk);
3222 return(0);
3223 }
3224 #endif
3225 if (!th->ack) {
3226 if (th->syn) {
3227 sk->state = TCP_SYN_RECV;
3228 }
3229
3230 kfree_skb(skb, FREE_READ);
3231 release_sock(sk);
3232 return(0);
3233 }
3234
3235 switch(sk->state) {
3236 case TCP_SYN_SENT:
3237 if (!tcp_ack(sk, th, saddr, len)) {
3238 tcp_reset(daddr, saddr, th,
3239 sk->prot, opt,dev);
3240 kfree_skb(skb, FREE_READ);
3241 release_sock(sk);
3242 return(0);
3243 }
3244
3245
3246
3247
3248
3249 if (!th->syn) {
3250 kfree_skb(skb, FREE_READ);
3251 release_sock(sk);
3252 return(0);
3253 }
3254
3255
3256 sk->acked_seq = th->seq+1;
3257 sk->fin_seq = th->seq;
3258 tcp_send_ack(sk->send_seq, th->seq+1,
3259 sk, th, sk->daddr);
3260
3261 case TCP_SYN_RECV:
3262 if (!tcp_ack(sk, th, saddr, len)) {
3263 tcp_reset(daddr, saddr, th,
3264 sk->prot, opt, dev);
3265 kfree_skb(skb, FREE_READ);
3266 release_sock(sk);
3267 return(0);
3268 }
3269 sk->state = TCP_ESTABLISHED;
3270
3271
3272
3273
3274
3275
3276 tcp_options(sk, th);
3277 sk->dummy_th.dest = th->source;
3278 sk->copied_seq = sk->acked_seq-1;
3279 if (!sk->dead) {
3280 wake_up(sk->sleep);
3281 }
3282
3283
3284
3285
3286
3287 if (th->urg) {
3288 if (tcp_urg(sk, th, saddr)) {
3289 kfree_skb(skb, FREE_READ);
3290 release_sock(sk);
3291 return(0);
3292 }
3293 }
3294 if (tcp_data(skb, sk, saddr, len))
3295 kfree_skb(skb, FREE_READ);
3296
3297 if (th->fin) tcp_fin(sk, th, saddr, dev);
3298 release_sock(sk);
3299 return(0);
3300 }
3301
3302 if (th->urg) {
3303 if (tcp_urg(sk, th, saddr)) {
3304 kfree_skb(skb, FREE_READ);
3305 release_sock(sk);
3306 return(0);
3307 }
3308 }
3309
3310 if (tcp_data(skb, sk, saddr, len)) {
3311 kfree_skb(skb, FREE_READ);
3312 release_sock(sk);
3313 return(0);
3314 }
3315
3316 if (!th->fin) {
3317 release_sock(sk);
3318 return(0);
3319 }
3320 tcp_fin(sk, th, saddr, dev);
3321 release_sock(sk);
3322 return(0);
3323 }
3324 }
3325
3326
3327
3328
3329
3330
3331 static void
3332 tcp_write_wakeup(struct sock *sk)
3333 {
3334 struct sk_buff *buff;
3335 struct tcphdr *t1;
3336 struct device *dev=NULL;
3337 int tmp;
3338
3339 if (sk->zapped)
3340 return;
3341
3342 if (sk -> state != TCP_ESTABLISHED && sk->state != TCP_CLOSE_WAIT) return;
3343
3344 buff = (struct sk_buff *) sk->prot->wmalloc(sk,MAX_ACK_SIZE,1, GFP_ATOMIC);
3345 if (buff == NULL) return;
3346
3347 buff->mem_addr = buff;
3348 buff->mem_len = MAX_ACK_SIZE;
3349 buff->len = sizeof(struct tcphdr);
3350 buff->free = 1;
3351 buff->sk = sk;
3352 DPRINTF((DBG_TCP, "in tcp_write_wakeup\n"));
3353 t1 = (struct tcphdr *)(buff + 1);
3354
3355
3356 tmp = sk->prot->build_header(buff, sk->saddr, sk->daddr, &dev,
3357 IPPROTO_TCP, sk->opt, MAX_ACK_SIZE);
3358 if (tmp < 0) {
3359 sk->prot->wfree(sk, buff->mem_addr, buff->mem_len);
3360 return;
3361 }
3362
3363 buff->len += tmp;
3364 t1 = (struct tcphdr *)((char *)t1 +tmp);
3365
3366 memcpy(t1,(void *) &sk->dummy_th, sizeof(*t1));
3367
3368
3369
3370
3371
3372 t1->seq = ntohl(sk->send_seq-1);
3373 t1->ack = 1;
3374 t1->res1= 0;
3375 t1->res2= 0;
3376 t1->rst = 0;
3377 t1->urg = 0;
3378 t1->psh = 0;
3379 t1->fin = 0;
3380 t1->syn = 0;
3381 t1->ack_seq = ntohl(sk->acked_seq);
3382 t1->window = ntohs(sk->prot->rspace(sk));
3383 t1->doff = sizeof(*t1)/4;
3384 tcp_send_check(t1, sk->saddr, sk->daddr, sizeof(*t1), sk);
3385
3386
3387
3388
3389 sk->prot->queue_xmit(sk, dev, buff, 1);
3390 }
3391
3392
3393 struct proto tcp_prot = {
3394 sock_wmalloc,
3395 sock_rmalloc,
3396 sock_wfree,
3397 sock_rfree,
3398 sock_rspace,
3399 sock_wspace,
3400 tcp_close,
3401 tcp_read,
3402 tcp_write,
3403 tcp_sendto,
3404 tcp_recvfrom,
3405 ip_build_header,
3406 tcp_connect,
3407 tcp_accept,
3408 ip_queue_xmit,
3409 tcp_retransmit,
3410 tcp_write_wakeup,
3411 tcp_read_wakeup,
3412 tcp_rcv,
3413 tcp_select,
3414 tcp_ioctl,
3415 NULL,
3416 tcp_shutdown,
3417 128,
3418 0,
3419 {NULL,},
3420 "TCP"
3421 };