This source file includes following definitions.
- min
- __print_th
- print_th
- get_firstr
- diff
- tcp_select_window
- tcp_time_wait
- tcp_retransmit
- tcp_err
- tcp_readable
- tcp_select
- tcp_ioctl
- tcp_check
- tcp_send_check
- tcp_send_skb
- tcp_dequeue_partial
- tcp_send_partial
- tcp_enqueue_partial
- tcp_send_ack
- tcp_build_header
- tcp_write
- tcp_sendto
- tcp_read_wakeup
- cleanup_rbuf
- tcp_read_urg
- tcp_read
- tcp_shutdown
- tcp_recvfrom
- tcp_reset
- tcp_options
- default_mask
- tcp_conn_request
- tcp_close
- tcp_write_xmit
- sort_send
- tcp_ack
- tcp_data
- tcp_check_urg
- tcp_urg
- tcp_fin
- tcp_accept
- tcp_connect
- tcp_sequence
- tcp_rcv
- tcp_write_wakeup
- tcp_send_probe0
- tcp_setsockopt
- tcp_getsockopt
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84 #include <linux/types.h>
85 #include <linux/sched.h>
86 #include <linux/mm.h>
87 #include <linux/string.h>
88 #include <linux/socket.h>
89 #include <linux/sockios.h>
90 #include <linux/termios.h>
91 #include <linux/in.h>
92 #include <linux/fcntl.h>
93 #include "inet.h"
94 #include "dev.h"
95 #include "ip.h"
96 #include "protocol.h"
97 #include "icmp.h"
98 #include "tcp.h"
99 #include "skbuff.h"
100 #include "sock.h"
101 #include "arp.h"
102 #include <linux/errno.h>
103 #include <linux/timer.h>
104 #include <asm/system.h>
105 #include <asm/segment.h>
106 #include <linux/mm.h>
107
108 #define SEQ_TICK 3
109 unsigned long seq_offset;
110 #define SUBNETSARELOCAL
111
112 static __inline__ int
113 min(unsigned int a, unsigned int b)
114 {
115 if (a < b) return(a);
116 return(b);
117 }
118
119
120 static void __print_th(struct tcphdr *th)
121 {
122 unsigned char *ptr;
123
124 printk("TCP header:\n");
125 printk(" source=%d, dest=%d, seq =%ld, ack_seq = %ld\n",
126 ntohs(th->source), ntohs(th->dest),
127 ntohl(th->seq), ntohl(th->ack_seq));
128 printk(" fin=%d, syn=%d, rst=%d, psh=%d, ack=%d, urg=%d res1=%d res2=%d\n",
129 th->fin, th->syn, th->rst, th->psh, th->ack,
130 th->urg, th->res1, th->res2);
131 printk(" window = %d, check = %d urg_ptr = %d\n",
132 ntohs(th->window), ntohs(th->check), ntohs(th->urg_ptr));
133 printk(" doff = %d\n", th->doff);
134 ptr =(unsigned char *)(th + 1);
135 printk(" options = %d %d %d %d\n", ptr[0], ptr[1], ptr[2], ptr[3]);
136 }
137
138 static inline void print_th(struct tcphdr *th)
139 {
140 if (inet_debug == DBG_TCP)
141 __print_th(th);
142 }
143
144
145 static struct sk_buff *
146 get_firstr(struct sock *sk)
147 {
148 return skb_dequeue(&sk->rqueue);
149 }
150
151
152
153
154
155 static long
156 diff(unsigned long seq1, unsigned long seq2)
157 {
158 long d;
159
160 d = seq1 - seq2;
161 if (d > 0) return(d);
162
163
164 return(~d+1);
165 }
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182 static int tcp_select_window(struct sock *sk)
183 {
184 int new_window = sk->prot->rspace(sk);
185
186
187
188
189
190
191
192
193
194
195 if (new_window < min(sk->mss, MAX_WINDOW/2) ||
196 new_window < sk->window)
197 return(sk->window);
198 return(new_window);
199 }
200
201
202
203 static void tcp_time_wait(struct sock *sk)
204 {
205 sk->state = TCP_TIME_WAIT;
206 sk->shutdown = SHUTDOWN_MASK;
207 if (!sk->dead)
208 sk->state_change(sk);
209 reset_timer(sk, TIME_CLOSE, TCP_TIMEWAIT_LEN);
210 }
211
212
213
214
215
216
217
218
219 static void
220 tcp_retransmit(struct sock *sk, int all)
221 {
222 if (all) {
223 ip_retransmit(sk, all);
224 return;
225 }
226
227 sk->ssthresh = sk->cong_window >> 1;
228
229 sk->cong_count = 0;
230
231 sk->cong_window = 1;
232
233
234 ip_retransmit(sk, all);
235 }
236
237
238
239
240
241
242
243
244
245
246 void
247 tcp_err(int err, unsigned char *header, unsigned long daddr,
248 unsigned long saddr, struct inet_protocol *protocol)
249 {
250 struct tcphdr *th;
251 struct sock *sk;
252 struct iphdr *iph=(struct iphdr *)header;
253
254 header+=4*iph->ihl;
255
256 DPRINTF((DBG_TCP, "TCP: tcp_err(%d, hdr=%X, daddr=%X saddr=%X, protocol=%X)\n",
257 err, header, daddr, saddr, protocol));
258
259 th =(struct tcphdr *)header;
260 sk = get_sock(&tcp_prot, th->source, daddr, th->dest, saddr);
261 print_th(th);
262
263 if (sk == NULL) return;
264
265 if(err<0)
266 {
267 sk->err = -err;
268 sk->error_report(sk);
269 return;
270 }
271
272 if ((err & 0xff00) == (ICMP_SOURCE_QUENCH << 8)) {
273
274
275
276
277
278 if (sk->cong_window > 4) sk->cong_window--;
279 return;
280 }
281
282 DPRINTF((DBG_TCP, "TCP: icmp_err got error\n"));
283 sk->err = icmp_err_convert[err & 0xff].errno;
284
285
286
287
288
289 if (icmp_err_convert[err & 0xff].fatal) {
290 if (sk->state == TCP_SYN_SENT) {
291 sk->state = TCP_CLOSE;
292 sk->error_report(sk);
293 }
294 }
295 return;
296 }
297
298
299
300
301
302
303
304 static int
305 tcp_readable(struct sock *sk)
306 {
307 unsigned long counted;
308 unsigned long amount;
309 struct sk_buff *skb;
310 int count=0;
311 int sum;
312 unsigned long flags;
313
314 DPRINTF((DBG_TCP, "tcp_readable(sk=%X)\n", sk));
315 if(sk && sk->debug)
316 printk("tcp_readable: %p - ",sk);
317
318 if (sk == NULL || skb_peek(&sk->rqueue) == NULL)
319 {
320 if(sk && sk->debug)
321 printk("empty\n");
322 return(0);
323 }
324
325 counted = sk->copied_seq+1;
326 amount = 0;
327
328 save_flags(flags);
329 cli();
330 skb =(struct sk_buff *)sk->rqueue;
331
332
333 do {
334 count++;
335 #ifdef OLD
336
337 if (count > 20) {
338 restore_flags(flags);
339 DPRINTF((DBG_TCP, "tcp_readable, more than 20 packets without a psh\n"));
340 printk("tcp_read: possible read_queue corruption.\n");
341 return(amount);
342 }
343 #endif
344 if (before(counted, skb->h.th->seq))
345 break;
346 sum = skb->len -(counted - skb->h.th->seq);
347 if (skb->h.th->syn)
348 sum++;
349 if (sum >= 0) {
350 amount += sum;
351 if (skb->h.th->syn) amount--;
352 counted += sum;
353 }
354 if (amount && skb->h.th->psh) break;
355 skb =(struct sk_buff *)skb->next;
356 } while(skb != sk->rqueue);
357 if (amount && !sk->urginline && sk->urg_data &&
358 (sk->urg_seq - sk->copied_seq) <= (counted - sk->copied_seq))
359 amount--;
360 restore_flags(flags);
361 DPRINTF((DBG_TCP, "tcp readable returning %d bytes\n", amount));
362 if(sk->debug)
363 printk("got %lu bytes.\n",amount);
364 return(amount);
365 }
366
367
368
369
370
371
372
373 static int
374 tcp_select(struct sock *sk, int sel_type, select_table *wait)
375 {
376 DPRINTF((DBG_TCP, "tcp_select(sk=%X, sel_type = %d, wait = %X)\n",
377 sk, sel_type, wait));
378
379 sk->inuse = 1;
380 switch(sel_type) {
381 case SEL_IN:
382 if(sk->debug)
383 printk("select in");
384 select_wait(sk->sleep, wait);
385 if(sk->debug)
386 printk("-select out");
387 if (skb_peek(&sk->rqueue) != NULL) {
388 if (sk->state == TCP_LISTEN || tcp_readable(sk)) {
389 release_sock(sk);
390 if(sk->debug)
391 printk("-select ok data\n");
392 return(1);
393 }
394 }
395 if (sk->err != 0)
396 {
397 release_sock(sk);
398 if(sk->debug)
399 printk("-select ok error");
400 return(1);
401 }
402 if (sk->shutdown & RCV_SHUTDOWN) {
403 release_sock(sk);
404 if(sk->debug)
405 printk("-select ok down\n");
406 return(1);
407 } else {
408 release_sock(sk);
409 if(sk->debug)
410 printk("-select fail\n");
411 return(0);
412 }
413 case SEL_OUT:
414 select_wait(sk->sleep, wait);
415 if (sk->shutdown & SEND_SHUTDOWN) {
416 DPRINTF((DBG_TCP,
417 "write select on shutdown socket.\n"));
418
419
420 release_sock(sk);
421 return(0);
422 }
423
424
425
426
427
428
429 if (sk->prot->wspace(sk) >= sk->mss) {
430 release_sock(sk);
431
432 if (sk->state == TCP_SYN_RECV ||
433 sk->state == TCP_SYN_SENT) return(0);
434 return(1);
435 }
436 DPRINTF((DBG_TCP,
437 "tcp_select: sleeping on write sk->wmem_alloc = %d, "
438 "sk->packets_out = %d\n"
439 "sk->wback = %X, sk->wfront = %X\n"
440 "sk->send_seq = %u, sk->window_seq=%u\n",
441 sk->wmem_alloc, sk->packets_out,
442 sk->wback, sk->wfront,
443 sk->send_seq, sk->window_seq));
444
445 release_sock(sk);
446 return(0);
447 case SEL_EX:
448 select_wait(sk->sleep,wait);
449 if (sk->err || sk->urg_data) {
450 release_sock(sk);
451 return(1);
452 }
453 release_sock(sk);
454 return(0);
455 }
456
457 release_sock(sk);
458 return(0);
459 }
460
461
462 int
463 tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
464 {
465 int err;
466 DPRINTF((DBG_TCP, "tcp_ioctl(sk=%X, cmd = %d, arg=%X)\n", sk, cmd, arg));
467 switch(cmd) {
468 case DDIOCSDBG:
469 return(dbg_ioctl((void *) arg, DBG_TCP));
470
471 case TIOCINQ:
472 #ifdef FIXME
473 case FIONREAD:
474 #endif
475 {
476 unsigned long amount;
477
478 if (sk->state == TCP_LISTEN) return(-EINVAL);
479
480 sk->inuse = 1;
481 amount = tcp_readable(sk);
482 release_sock(sk);
483 DPRINTF((DBG_TCP, "returning %d\n", amount));
484 err=verify_area(VERIFY_WRITE,(void *)arg,
485 sizeof(unsigned long));
486 if(err)
487 return err;
488 put_fs_long(amount,(unsigned long *)arg);
489 return(0);
490 }
491 case SIOCATMARK:
492 {
493 int answ = sk->urg_data && sk->urg_seq == sk->copied_seq+1;
494
495 err = verify_area(VERIFY_WRITE,(void *) arg,
496 sizeof(unsigned long));
497 if (err)
498 return err;
499 put_fs_long(answ,(int *) arg);
500 return(0);
501 }
502 case TIOCOUTQ:
503 {
504 unsigned long amount;
505
506 if (sk->state == TCP_LISTEN) return(-EINVAL);
507 amount = sk->prot->wspace(sk);
508 err=verify_area(VERIFY_WRITE,(void *)arg,
509 sizeof(unsigned long));
510 if(err)
511 return err;
512 put_fs_long(amount,(unsigned long *)arg);
513 return(0);
514 }
515 default:
516 return(-EINVAL);
517 }
518 }
519
520
521
522 unsigned short
523 tcp_check(struct tcphdr *th, int len,
524 unsigned long saddr, unsigned long daddr)
525 {
526 unsigned long sum;
527
528 if (saddr == 0) saddr = my_addr();
529 print_th(th);
530 __asm__("\t addl %%ecx,%%ebx\n"
531 "\t adcl %%edx,%%ebx\n"
532 "\t adcl $0, %%ebx\n"
533 : "=b"(sum)
534 : "0"(daddr), "c"(saddr), "d"((ntohs(len) << 16) + IPPROTO_TCP*256)
535 : "cx","bx","dx" );
536
537 if (len > 3) {
538 __asm__("\tclc\n"
539 "1:\n"
540 "\t lodsl\n"
541 "\t adcl %%eax, %%ebx\n"
542 "\t loop 1b\n"
543 "\t adcl $0, %%ebx\n"
544 : "=b"(sum) , "=S"(th)
545 : "0"(sum), "c"(len/4) ,"1"(th)
546 : "ax", "cx", "bx", "si" );
547 }
548
549
550 __asm__("\t movl %%ebx, %%ecx\n"
551 "\t shrl $16,%%ecx\n"
552 "\t addw %%cx, %%bx\n"
553 "\t adcw $0, %%bx\n"
554 : "=b"(sum)
555 : "0"(sum)
556 : "bx", "cx");
557
558
559 if ((len & 2) != 0) {
560 __asm__("\t lodsw\n"
561 "\t addw %%ax,%%bx\n"
562 "\t adcw $0, %%bx\n"
563 : "=b"(sum), "=S"(th)
564 : "0"(sum) ,"1"(th)
565 : "si", "ax", "bx");
566 }
567
568
569 if ((len & 1) != 0) {
570 __asm__("\t lodsb\n"
571 "\t movb $0,%%ah\n"
572 "\t addw %%ax,%%bx\n"
573 "\t adcw $0, %%bx\n"
574 : "=b"(sum)
575 : "0"(sum) ,"S"(th)
576 : "si", "ax", "bx");
577 }
578
579
580 return((~sum) & 0xffff);
581 }
582
583
584 void tcp_send_check(struct tcphdr *th, unsigned long saddr,
585 unsigned long daddr, int len, struct sock *sk)
586 {
587 th->check = 0;
588 th->check = tcp_check(th, len, saddr, daddr);
589 return;
590 }
591
592 static void tcp_send_skb(struct sock *sk, struct sk_buff *skb)
593 {
594 int size;
595
596
597 size = skb->len - ((unsigned char *) skb->h.th - skb->data);
598
599
600 if (size < sizeof(struct tcphdr) || size > skb->len) {
601 printk("tcp_send_skb: bad skb (skb = %p, data = %p, th = %p, len = %lu)\n",
602 skb, skb->data, skb->h.th, skb->len);
603 kfree_skb(skb, FREE_WRITE);
604 return;
605 }
606
607
608 if (size == sizeof(struct tcphdr)) {
609
610 if(!skb->h.th->syn && !skb->h.th->fin) {
611 printk("tcp_send_skb: attempt to queue a bogon.\n");
612 kfree_skb(skb,FREE_WRITE);
613 return;
614 }
615 }
616
617
618 tcp_send_check(skb->h.th, sk->saddr, sk->daddr, size, sk);
619
620 size -= 4*skb->h.th->doff;
621 if (skb->h.th->syn)
622 size++;
623 if (skb->h.th->fin)
624 size++;
625
626 sk->send_seq += size;
627 skb->h.seq = sk->send_seq;
628 if (after(sk->send_seq , sk->window_seq) ||
629 (sk->retransmits && sk->timeout == TIME_WRITE) ||
630 sk->packets_out >= sk->cong_window) {
631 DPRINTF((DBG_TCP, "sk->cong_window = %d, sk->packets_out = %d\n",
632 sk->cong_window, sk->packets_out));
633 DPRINTF((DBG_TCP, "sk->send_seq = %d, sk->window_seq = %d\n",
634 sk->send_seq, sk->window_seq));
635 skb->next = NULL;
636 skb->magic = TCP_WRITE_QUEUE_MAGIC;
637 if (sk->wback == NULL) {
638 sk->wfront = skb;
639 } else {
640 sk->wback->next = skb;
641 }
642 sk->wback = skb;
643 if (before(sk->window_seq, sk->wfront->h.seq) &&
644 sk->send_head == NULL &&
645 sk->ack_backlog == 0)
646 reset_timer(sk, TIME_PROBE0, sk->rto);
647 } else {
648 sk->prot->queue_xmit(sk, skb->dev, skb, 0);
649 }
650 }
651
652 struct sk_buff * tcp_dequeue_partial(struct sock * sk)
653 {
654 struct sk_buff * skb;
655 unsigned long flags;
656
657 save_flags(flags);
658 cli();
659 skb = sk->partial;
660 if (skb) {
661 sk->partial = NULL;
662 del_timer(&sk->partial_timer);
663 }
664 restore_flags(flags);
665 return skb;
666 }
667
668 static void tcp_send_partial(struct sock *sk)
669 {
670 struct sk_buff *skb;
671
672 if (sk == NULL)
673 return;
674 while ((skb = tcp_dequeue_partial(sk)) != NULL)
675 tcp_send_skb(sk, skb);
676 }
677
678 void tcp_enqueue_partial(struct sk_buff * skb, struct sock * sk)
679 {
680 struct sk_buff * tmp;
681 unsigned long flags;
682
683 save_flags(flags);
684 cli();
685 tmp = sk->partial;
686 if (tmp)
687 del_timer(&sk->partial_timer);
688 sk->partial = skb;
689 sk->partial_timer.expires = HZ;
690 sk->partial_timer.function = (void (*)(unsigned long)) tcp_send_partial;
691 sk->partial_timer.data = (unsigned long) sk;
692 add_timer(&sk->partial_timer);
693 restore_flags(flags);
694 if (tmp)
695 tcp_send_skb(sk, tmp);
696 }
697
698
699
700 static void
701 tcp_send_ack(unsigned long sequence, unsigned long ack,
702 struct sock *sk,
703 struct tcphdr *th, unsigned long daddr)
704 {
705 struct sk_buff *buff;
706 struct tcphdr *t1;
707 struct device *dev = NULL;
708 int tmp;
709
710 if(sk->zapped)
711 return;
712
713
714
715
716 buff = sk->prot->wmalloc(sk, MAX_ACK_SIZE, 1, GFP_ATOMIC);
717 if (buff == NULL) {
718
719 sk->ack_backlog++;
720 if (sk->timeout != TIME_WRITE && tcp_connected(sk->state)) {
721 reset_timer(sk, TIME_WRITE, 10);
722 }
723 if (inet_debug == DBG_SLIP) printk("\rtcp_ack: malloc failed\n");
724 return;
725 }
726
727 buff->mem_addr = buff;
728 buff->mem_len = MAX_ACK_SIZE;
729 buff->len = sizeof(struct tcphdr);
730 buff->sk = sk;
731 t1 =(struct tcphdr *) buff->data;
732
733
734 tmp = sk->prot->build_header(buff, sk->saddr, daddr, &dev,
735 IPPROTO_TCP, sk->opt, MAX_ACK_SIZE,sk->ip_tos,sk->ip_ttl);
736 if (tmp < 0) {
737 buff->free=1;
738 sk->prot->wfree(sk, buff->mem_addr, buff->mem_len);
739 if (inet_debug == DBG_SLIP) printk("\rtcp_ack: build_header failed\n");
740 return;
741 }
742 buff->len += tmp;
743 t1 =(struct tcphdr *)((char *)t1 +tmp);
744
745
746 memcpy(t1, th, sizeof(*t1));
747
748
749 if (after(sequence, sk->window_seq))
750 sequence = sk->window_seq;
751
752
753 t1->dest = th->source;
754 t1->source = th->dest;
755 t1->seq = ntohl(sequence);
756 t1->ack = 1;
757 sk->window = tcp_select_window(sk);
758 t1->window = ntohs(sk->window);
759 t1->res1 = 0;
760 t1->res2 = 0;
761 t1->rst = 0;
762 t1->urg = 0;
763 t1->syn = 0;
764 t1->psh = 0;
765 t1->fin = 0;
766 if (ack == sk->acked_seq) {
767 sk->ack_backlog = 0;
768 sk->bytes_rcv = 0;
769 sk->ack_timed = 0;
770 if (sk->send_head == NULL && sk->wfront == NULL && sk->timeout == TIME_WRITE)
771 {
772 if(sk->keepopen)
773 reset_timer(sk,TIME_KEEPOPEN,TCP_TIMEOUT_LEN);
774 else
775 delete_timer(sk);
776 }
777 }
778 t1->ack_seq = ntohl(ack);
779 t1->doff = sizeof(*t1)/4;
780 tcp_send_check(t1, sk->saddr, daddr, sizeof(*t1), sk);
781 if (sk->debug)
782 printk("\rtcp_ack: seq %lx ack %lx\n", sequence, ack);
783 sk->prot->queue_xmit(sk, dev, buff, 1);
784 }
785
786
787
788 static int
789 tcp_build_header(struct tcphdr *th, struct sock *sk, int push)
790 {
791
792
793 memcpy(th,(void *) &(sk->dummy_th), sizeof(*th));
794 th->seq = htonl(sk->send_seq);
795 th->psh =(push == 0) ? 1 : 0;
796 th->doff = sizeof(*th)/4;
797 th->ack = 1;
798 th->fin = 0;
799 sk->ack_backlog = 0;
800 sk->bytes_rcv = 0;
801 sk->ack_timed = 0;
802 th->ack_seq = htonl(sk->acked_seq);
803 sk->window = tcp_select_window(sk);
804 th->window = htons(sk->window);
805
806 return(sizeof(*th));
807 }
808
809
810
811
812
813 static int
814 tcp_write(struct sock *sk, unsigned char *from,
815 int len, int nonblock, unsigned flags)
816 {
817 int copied = 0;
818 int copy;
819 int tmp;
820 struct sk_buff *skb;
821 struct sk_buff *send_tmp;
822 unsigned char *buff;
823 struct proto *prot;
824 struct device *dev = NULL;
825
826 DPRINTF((DBG_TCP, "tcp_write(sk=%X, from=%X, len=%d, nonblock=%d, flags=%X)\n",
827 sk, from, len, nonblock, flags));
828
829 sk->inuse=1;
830 prot = sk->prot;
831 while(len > 0) {
832 if (sk->err) {
833 release_sock(sk);
834 if (copied) return(copied);
835 tmp = -sk->err;
836 sk->err = 0;
837 return(tmp);
838 }
839
840
841 if (sk->shutdown & SEND_SHUTDOWN) {
842 release_sock(sk);
843 sk->err = EPIPE;
844 if (copied) return(copied);
845 sk->err = 0;
846 return(-EPIPE);
847 }
848
849
850
851
852 while(sk->state != TCP_ESTABLISHED && sk->state != TCP_CLOSE_WAIT) {
853 if (sk->err) {
854 release_sock(sk);
855 if (copied) return(copied);
856 tmp = -sk->err;
857 sk->err = 0;
858 return(tmp);
859 }
860
861 if (sk->state != TCP_SYN_SENT && sk->state != TCP_SYN_RECV) {
862 release_sock(sk);
863 DPRINTF((DBG_TCP, "tcp_write: return 1\n"));
864 if (copied) return(copied);
865
866 if (sk->err) {
867 tmp = -sk->err;
868 sk->err = 0;
869 return(tmp);
870 }
871
872 if (sk->keepopen) {
873 send_sig(SIGPIPE, current, 0);
874 }
875 return(-EPIPE);
876 }
877
878 if (nonblock || copied) {
879 release_sock(sk);
880 DPRINTF((DBG_TCP, "tcp_write: return 2\n"));
881 if (copied) return(copied);
882 return(-EAGAIN);
883 }
884
885 release_sock(sk);
886 cli();
887 if (sk->state != TCP_ESTABLISHED &&
888 sk->state != TCP_CLOSE_WAIT && sk->err == 0) {
889 interruptible_sleep_on(sk->sleep);
890 if (current->signal & ~current->blocked) {
891 sti();
892 DPRINTF((DBG_TCP, "tcp_write: return 3\n"));
893 if (copied) return(copied);
894 return(-ERESTARTSYS);
895 }
896 }
897 sk->inuse = 1;
898 sti();
899 }
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914 if ((skb = tcp_dequeue_partial(sk)) != NULL) {
915 int hdrlen;
916
917
918 hdrlen = ((unsigned long)skb->h.th - (unsigned long)skb->data)
919 + sizeof(struct tcphdr);
920
921
922 if (!(flags & MSG_OOB)) {
923 copy = min(sk->mss - (skb->len - hdrlen), len);
924
925 if (copy <= 0) {
926 printk("TCP: **bug**: \"copy\" <= 0!!\n");
927 copy = 0;
928 }
929
930 memcpy_fromfs(skb->data + skb->len, from, copy);
931 skb->len += copy;
932 from += copy;
933 copied += copy;
934 len -= copy;
935 }
936 if ((skb->len - hdrlen) >= sk->mss ||
937 (flags & MSG_OOB) ||
938 !sk->packets_out)
939 tcp_send_skb(sk, skb);
940 else
941 tcp_enqueue_partial(skb, sk);
942 continue;
943 }
944
945
946
947
948
949
950
951
952
953
954
955
956
957 copy = sk->window_seq - sk->send_seq;
958 if (copy <= 0 || copy < (sk->max_window >> 1) || copy > sk->mss)
959 copy = sk->mss;
960 if (copy > len)
961 copy = len;
962
963
964 send_tmp = NULL;
965 if (copy < sk->mss && !(flags & MSG_OOB)) {
966
967 release_sock(sk);
968
969
970 skb = prot->wmalloc(sk, sk->mtu + 128 + prot->max_header + sizeof(*skb), 0, GFP_KERNEL);
971 sk->inuse = 1;
972 send_tmp = skb;
973 } else {
974
975 release_sock(sk);
976 skb = prot->wmalloc(sk, copy + prot->max_header + sizeof(*skb), 0, GFP_KERNEL);
977 sk->inuse = 1;
978 }
979
980
981 if (skb == NULL) {
982 if (nonblock ) {
983 release_sock(sk);
984 DPRINTF((DBG_TCP, "tcp_write: return 4\n"));
985 if (copied) return(copied);
986 return(-EAGAIN);
987 }
988
989
990 tmp = sk->wmem_alloc;
991 release_sock(sk);
992 cli();
993
994 if (tmp <= sk->wmem_alloc &&
995 (sk->state == TCP_ESTABLISHED||sk->state == TCP_CLOSE_WAIT)
996 && sk->err == 0) {
997 interruptible_sleep_on(sk->sleep);
998 if (current->signal & ~current->blocked) {
999 sti();
1000 DPRINTF((DBG_TCP, "tcp_write: return 5\n"));
1001 if (copied) return(copied);
1002 return(-ERESTARTSYS);
1003 }
1004 }
1005 sk->inuse = 1;
1006 sti();
1007 continue;
1008 }
1009
1010 skb->len = 0;
1011 skb->sk = sk;
1012 skb->free = 0;
1013
1014 buff = skb->data;
1015
1016
1017
1018
1019
1020 tmp = prot->build_header(skb, sk->saddr, sk->daddr, &dev,
1021 IPPROTO_TCP, sk->opt, skb->mem_len,sk->ip_tos,sk->ip_ttl);
1022 if (tmp < 0 ) {
1023 prot->wfree(sk, skb->mem_addr, skb->mem_len);
1024 release_sock(sk);
1025 DPRINTF((DBG_TCP, "tcp_write: return 6\n"));
1026 if (copied) return(copied);
1027 return(tmp);
1028 }
1029 skb->len += tmp;
1030 skb->dev = dev;
1031 buff += tmp;
1032 skb->h.th =(struct tcphdr *) buff;
1033 tmp = tcp_build_header((struct tcphdr *)buff, sk, len-copy);
1034 if (tmp < 0) {
1035 prot->wfree(sk, skb->mem_addr, skb->mem_len);
1036 release_sock(sk);
1037 DPRINTF((DBG_TCP, "tcp_write: return 7\n"));
1038 if (copied) return(copied);
1039 return(tmp);
1040 }
1041
1042 if (flags & MSG_OOB) {
1043 ((struct tcphdr *)buff)->urg = 1;
1044 ((struct tcphdr *)buff)->urg_ptr = ntohs(copy);
1045 }
1046 skb->len += tmp;
1047 memcpy_fromfs(buff+tmp, from, copy);
1048
1049 from += copy;
1050 copied += copy;
1051 len -= copy;
1052 skb->len += copy;
1053 skb->free = 0;
1054
1055 if (send_tmp != NULL && sk->packets_out) {
1056 tcp_enqueue_partial(send_tmp, sk);
1057 continue;
1058 }
1059 tcp_send_skb(sk, skb);
1060 }
1061 sk->err = 0;
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071 if(sk->partial &&
1072 ((!sk->packets_out)
1073
1074 || (sk->nonagle && before(sk->send_seq , sk->window_seq))
1075 ))
1076 tcp_send_partial(sk);
1077
1078 release_sock(sk);
1079 DPRINTF((DBG_TCP, "tcp_write: return 8\n"));
1080 return(copied);
1081 }
1082
1083
1084 static int
1085 tcp_sendto(struct sock *sk, unsigned char *from,
1086 int len, int nonblock, unsigned flags,
1087 struct sockaddr_in *addr, int addr_len)
1088 {
1089 struct sockaddr_in sin;
1090
1091 if (addr_len < sizeof(sin)) return(-EINVAL);
1092 memcpy_fromfs(&sin, addr, sizeof(sin));
1093 if (sin.sin_family && sin.sin_family != AF_INET) return(-EINVAL);
1094 if (sin.sin_port != sk->dummy_th.dest) return(-EINVAL);
1095 if (sin.sin_addr.s_addr != sk->daddr) return(-EINVAL);
1096 return(tcp_write(sk, from, len, nonblock, flags));
1097 }
1098
1099
1100 static void
1101 tcp_read_wakeup(struct sock *sk)
1102 {
1103 int tmp;
1104 struct device *dev = NULL;
1105 struct tcphdr *t1;
1106 struct sk_buff *buff;
1107
1108 DPRINTF((DBG_TCP, "in tcp read wakeup\n"));
1109 if (!sk->ack_backlog) return;
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121 buff = sk->prot->wmalloc(sk,MAX_ACK_SIZE,1, GFP_ATOMIC);
1122 if (buff == NULL) {
1123
1124 reset_timer(sk, TIME_WRITE, 10);
1125 return;
1126 }
1127
1128 buff->mem_addr = buff;
1129 buff->mem_len = MAX_ACK_SIZE;
1130 buff->len = sizeof(struct tcphdr);
1131 buff->sk = sk;
1132
1133
1134 tmp = sk->prot->build_header(buff, sk->saddr, sk->daddr, &dev,
1135 IPPROTO_TCP, sk->opt, MAX_ACK_SIZE,sk->ip_tos,sk->ip_ttl);
1136 if (tmp < 0) {
1137 buff->free=1;
1138 sk->prot->wfree(sk, buff->mem_addr, buff->mem_len);
1139 return;
1140 }
1141
1142 buff->len += tmp;
1143 t1 =(struct tcphdr *)(buff->data +tmp);
1144
1145 memcpy(t1,(void *) &sk->dummy_th, sizeof(*t1));
1146 if (after(sk->send_seq, sk->window_seq))
1147 t1->seq = ntohl(sk->window_seq);
1148 else
1149 t1->seq = ntohl(sk->send_seq);
1150 t1->ack = 1;
1151 t1->res1 = 0;
1152 t1->res2 = 0;
1153 t1->rst = 0;
1154 t1->urg = 0;
1155 t1->syn = 0;
1156 t1->psh = 0;
1157 sk->ack_backlog = 0;
1158 sk->bytes_rcv = 0;
1159 sk->window = tcp_select_window(sk);
1160 t1->window = ntohs(sk->window);
1161 t1->ack_seq = ntohl(sk->acked_seq);
1162 t1->doff = sizeof(*t1)/4;
1163 tcp_send_check(t1, sk->saddr, sk->daddr, sizeof(*t1), sk);
1164 sk->prot->queue_xmit(sk, dev, buff, 1);
1165 }
1166
1167
1168
1169
1170
1171
1172
1173
1174 static void
1175 cleanup_rbuf(struct sock *sk)
1176 {
1177 unsigned long flags;
1178 int left;
1179 struct sk_buff *skb;
1180
1181 if(sk->debug)
1182 printk("cleaning rbuf for sk=%p\n", sk);
1183
1184 save_flags(flags);
1185 cli();
1186
1187 left = sk->prot->rspace(sk);
1188
1189
1190
1191
1192
1193 while((skb=skb_peek(&sk->rqueue)) != NULL )
1194 {
1195 if (!skb->used)
1196 break;
1197 skb_unlink(skb);
1198 skb->sk = sk;
1199 kfree_skb(skb, FREE_READ);
1200 }
1201
1202 restore_flags(flags);
1203
1204
1205
1206
1207
1208
1209
1210 DPRINTF((DBG_TCP, "sk->window left = %d, sk->prot->rspace(sk)=%d\n",
1211 sk->window - sk->bytes_rcv, sk->prot->rspace(sk)));
1212
1213 if(sk->debug)
1214 printk("sk->rspace = %lu, was %d\n", sk->prot->rspace(sk),
1215 left);
1216 if (sk->prot->rspace(sk) != left)
1217 {
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228 sk->ack_backlog++;
1229
1230
1231
1232
1233
1234
1235
1236
1237 if ((sk->prot->rspace(sk) > (sk->window - sk->bytes_rcv + sk->mtu))) {
1238
1239 tcp_read_wakeup(sk);
1240 } else {
1241
1242 int was_active = del_timer(&sk->timer);
1243 if (!was_active || TCP_ACK_TIME < sk->timer.expires) {
1244 reset_timer(sk, TIME_WRITE, TCP_ACK_TIME);
1245 } else
1246 add_timer(&sk->timer);
1247 }
1248 }
1249 }
1250
1251
1252
1253 static int
1254 tcp_read_urg(struct sock * sk, int nonblock,
1255 unsigned char *to, int len, unsigned flags)
1256 {
1257 struct wait_queue wait = { current, NULL };
1258
1259 while (len > 0) {
1260 if (sk->urginline || !sk->urg_data || sk->urg_data == URG_READ)
1261 return -EINVAL;
1262 if (sk->urg_data & URG_VALID) {
1263 char c = sk->urg_data;
1264 if (!(flags & MSG_PEEK))
1265 sk->urg_data = URG_READ;
1266 put_fs_byte(c, to);
1267 return 1;
1268 }
1269
1270 if (sk->err) {
1271 int tmp = -sk->err;
1272 sk->err = 0;
1273 return tmp;
1274 }
1275
1276 if (sk->state == TCP_CLOSE || sk->done) {
1277 if (!sk->done) {
1278 sk->done = 1;
1279 return 0;
1280 }
1281 return -ENOTCONN;
1282 }
1283
1284 if (sk->shutdown & RCV_SHUTDOWN) {
1285 sk->done = 1;
1286 return 0;
1287 }
1288
1289 if (nonblock)
1290 return -EAGAIN;
1291
1292 if (current->signal & ~current->blocked)
1293 return -ERESTARTSYS;
1294
1295 current->state = TASK_INTERRUPTIBLE;
1296 add_wait_queue(sk->sleep, &wait);
1297 if ((sk->urg_data & URG_NOTYET) && sk->err == 0 &&
1298 !(sk->shutdown & RCV_SHUTDOWN))
1299 schedule();
1300 remove_wait_queue(sk->sleep, &wait);
1301 current->state = TASK_RUNNING;
1302 }
1303 return 0;
1304 }
1305
1306
1307
1308 static int tcp_read(struct sock *sk, unsigned char *to,
1309 int len, int nonblock, unsigned flags)
1310 {
1311 struct wait_queue wait = { current, NULL };
1312 int copied = 0;
1313 unsigned long peek_seq;
1314 unsigned long *seq;
1315 unsigned long used;
1316 int err;
1317
1318 if (len == 0)
1319 return 0;
1320
1321 if (len < 0)
1322 return -EINVAL;
1323
1324 err = verify_area(VERIFY_WRITE, to, len);
1325 if (err)
1326 return err;
1327
1328
1329 if (sk->state == TCP_LISTEN)
1330 return -ENOTCONN;
1331
1332
1333 if (flags & MSG_OOB)
1334 return tcp_read_urg(sk, nonblock, to, len, flags);
1335
1336 peek_seq = sk->copied_seq;
1337 seq = &sk->copied_seq;
1338 if (flags & MSG_PEEK)
1339 seq = &peek_seq;
1340
1341 add_wait_queue(sk->sleep, &wait);
1342 sk->inuse = 1;
1343 while (len > 0) {
1344 struct sk_buff * skb;
1345 unsigned long offset;
1346
1347
1348
1349
1350 if (copied && sk->urg_data && sk->urg_seq == 1+*seq)
1351 break;
1352
1353 current->state = TASK_INTERRUPTIBLE;
1354
1355 skb = sk->rqueue;
1356 do {
1357 if (!skb)
1358 break;
1359 if (before(1+*seq, skb->h.th->seq))
1360 break;
1361 offset = 1 + *seq - skb->h.th->seq;
1362 if (skb->h.th->syn)
1363 offset--;
1364 if (offset < skb->len)
1365 goto found_ok_skb;
1366 if (!(flags & MSG_PEEK))
1367 skb->used = 1;
1368 skb = (struct sk_buff *)skb->next;
1369 } while (skb != sk->rqueue);
1370
1371 if (copied)
1372 break;
1373
1374 if (sk->err) {
1375 copied = -sk->err;
1376 sk->err = 0;
1377 break;
1378 }
1379
1380 if (sk->state == TCP_CLOSE) {
1381 if (!sk->done) {
1382 sk->done = 1;
1383 break;
1384 }
1385 copied = -ENOTCONN;
1386 break;
1387 }
1388
1389 if (sk->shutdown & RCV_SHUTDOWN) {
1390 sk->done = 1;
1391 break;
1392 }
1393
1394 if (nonblock) {
1395 copied = -EAGAIN;
1396 break;
1397 }
1398
1399 cleanup_rbuf(sk);
1400 release_sock(sk);
1401 schedule();
1402 sk->inuse = 1;
1403
1404 if (current->signal & ~current->blocked) {
1405 copied = -ERESTARTSYS;
1406 break;
1407 }
1408 continue;
1409
1410 found_ok_skb:
1411
1412 used = skb->len - offset;
1413 if (len < used)
1414 used = len;
1415
1416 if (sk->urg_data) {
1417 unsigned long urg_offset = sk->urg_seq - (1 + *seq);
1418 if (urg_offset < used) {
1419 if (!urg_offset) {
1420 if (!sk->urginline) {
1421 ++*seq;
1422 offset++;
1423 used--;
1424 }
1425 } else
1426 used = urg_offset;
1427 }
1428 }
1429
1430 memcpy_tofs(to,((unsigned char *)skb->h.th) +
1431 skb->h.th->doff*4 + offset, used);
1432 copied += used;
1433 len -= used;
1434 to += used;
1435 *seq += used;
1436 if (after(sk->copied_seq+1,sk->urg_seq))
1437 sk->urg_data = 0;
1438 if (!(flags & MSG_PEEK) && (used + offset >= skb->len))
1439 skb->used = 1;
1440 }
1441 remove_wait_queue(sk->sleep, &wait);
1442 current->state = TASK_RUNNING;
1443
1444
1445 cleanup_rbuf(sk);
1446 release_sock(sk);
1447 DPRINTF((DBG_TCP, "tcp_read: returning %d\n", copied));
1448 return copied;
1449 }
1450
1451
1452
1453
1454
1455
1456 void
1457 tcp_shutdown(struct sock *sk, int how)
1458 {
1459 struct sk_buff *buff;
1460 struct tcphdr *t1, *th;
1461 struct proto *prot;
1462 int tmp;
1463 struct device *dev = NULL;
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473 if (sk->state == TCP_FIN_WAIT1 || sk->state == TCP_FIN_WAIT2) return;
1474 if (!(how & SEND_SHUTDOWN)) return;
1475 sk->inuse = 1;
1476
1477
1478 if (sk->partial)
1479 tcp_send_partial(sk);
1480
1481 prot =(struct proto *)sk->prot;
1482 th =(struct tcphdr *)&sk->dummy_th;
1483 release_sock(sk);
1484 buff = prot->wmalloc(sk, MAX_RESET_SIZE,1 , GFP_KERNEL);
1485 if (buff == NULL) return;
1486 sk->inuse = 1;
1487
1488 DPRINTF((DBG_TCP, "tcp_shutdown_send buff = %X\n", buff));
1489 buff->mem_addr = buff;
1490 buff->mem_len = MAX_RESET_SIZE;
1491 buff->sk = sk;
1492 buff->len = sizeof(*t1);
1493 t1 =(struct tcphdr *) buff->data;
1494
1495
1496 tmp = prot->build_header(buff,sk->saddr, sk->daddr, &dev,
1497 IPPROTO_TCP, sk->opt,
1498 sizeof(struct tcphdr),sk->ip_tos,sk->ip_ttl);
1499 if (tmp < 0) {
1500 buff->free=1;
1501 prot->wfree(sk,buff->mem_addr, buff->mem_len);
1502 release_sock(sk);
1503 DPRINTF((DBG_TCP, "Unable to build header for fin.\n"));
1504 return;
1505 }
1506
1507 t1 =(struct tcphdr *)((char *)t1 +tmp);
1508 buff->len += tmp;
1509 buff->dev = dev;
1510 memcpy(t1, th, sizeof(*t1));
1511 t1->seq = ntohl(sk->send_seq);
1512 sk->send_seq++;
1513 buff->h.seq = sk->send_seq;
1514 t1->ack = 1;
1515 t1->ack_seq = ntohl(sk->acked_seq);
1516 t1->window = ntohs(sk->window=tcp_select_window(sk));
1517 t1->fin = 1;
1518 t1->rst = 0;
1519 t1->doff = sizeof(*t1)/4;
1520 tcp_send_check(t1, sk->saddr, sk->daddr, sizeof(*t1), sk);
1521
1522
1523
1524
1525
1526 if (sk->wback != NULL) {
1527 buff->free=0;
1528 buff->next = NULL;
1529 sk->wback->next = buff;
1530 sk->wback = buff;
1531 buff->magic = TCP_WRITE_QUEUE_MAGIC;
1532 } else {
1533 sk->prot->queue_xmit(sk, dev, buff, 0);
1534 }
1535
1536 if (sk->state == TCP_ESTABLISHED) sk->state = TCP_FIN_WAIT1;
1537 else sk->state = TCP_FIN_WAIT2;
1538
1539 release_sock(sk);
1540 }
1541
1542
1543 static int
1544 tcp_recvfrom(struct sock *sk, unsigned char *to,
1545 int to_len, int nonblock, unsigned flags,
1546 struct sockaddr_in *addr, int *addr_len)
1547 {
1548 struct sockaddr_in sin;
1549 int len;
1550 int err;
1551 int result;
1552
1553
1554
1555
1556 err = verify_area(VERIFY_WRITE,addr_len,sizeof(long));
1557 if(err)
1558 return err;
1559 len = get_fs_long(addr_len);
1560 if(len > sizeof(sin))
1561 len = sizeof(sin);
1562 err=verify_area(VERIFY_WRITE, addr, len);
1563 if(err)
1564 return err;
1565
1566 result=tcp_read(sk, to, to_len, nonblock, flags);
1567
1568 if (result < 0) return(result);
1569
1570 sin.sin_family = AF_INET;
1571 sin.sin_port = sk->dummy_th.dest;
1572 sin.sin_addr.s_addr = sk->daddr;
1573
1574 memcpy_tofs(addr, &sin, len);
1575 put_fs_long(len, addr_len);
1576 return(result);
1577 }
1578
1579
1580
1581 static void
1582 tcp_reset(unsigned long saddr, unsigned long daddr, struct tcphdr *th,
1583 struct proto *prot, struct options *opt, struct device *dev, int tos, int ttl)
1584 {
1585 struct sk_buff *buff;
1586 struct tcphdr *t1;
1587 int tmp;
1588
1589
1590
1591
1592
1593 buff = prot->wmalloc(NULL, MAX_RESET_SIZE, 1, GFP_ATOMIC);
1594 if (buff == NULL)
1595 return;
1596
1597 DPRINTF((DBG_TCP, "tcp_reset buff = %X\n", buff));
1598 buff->mem_addr = buff;
1599 buff->mem_len = MAX_RESET_SIZE;
1600 buff->len = sizeof(*t1);
1601 buff->sk = NULL;
1602 buff->dev = dev;
1603
1604 t1 =(struct tcphdr *) buff->data;
1605
1606
1607 tmp = prot->build_header(buff, saddr, daddr, &dev, IPPROTO_TCP, opt,
1608 sizeof(struct tcphdr),tos,ttl);
1609 if (tmp < 0) {
1610 buff->free = 1;
1611 prot->wfree(NULL, buff->mem_addr, buff->mem_len);
1612 return;
1613 }
1614 t1 =(struct tcphdr *)((char *)t1 +tmp);
1615 buff->len += tmp;
1616 memcpy(t1, th, sizeof(*t1));
1617
1618
1619 t1->dest = th->source;
1620 t1->source = th->dest;
1621 t1->rst = 1;
1622 t1->window = 0;
1623
1624 if(th->ack)
1625 {
1626 t1->ack = 0;
1627 t1->seq = th->ack_seq;
1628 t1->ack_seq = 0;
1629 }
1630 else
1631 {
1632 t1->ack = 1;
1633 if(!th->syn)
1634 t1->ack_seq=htonl(th->seq);
1635 else
1636 t1->ack_seq=htonl(th->seq+1);
1637 t1->seq=0;
1638 }
1639
1640 t1->syn = 0;
1641 t1->urg = 0;
1642 t1->fin = 0;
1643 t1->psh = 0;
1644 t1->doff = sizeof(*t1)/4;
1645 tcp_send_check(t1, saddr, daddr, sizeof(*t1), NULL);
1646 prot->queue_xmit(NULL, dev, buff, 1);
1647 }
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658 static void
1659 tcp_options(struct sock *sk, struct tcphdr *th)
1660 {
1661 unsigned char *ptr;
1662 int length=(th->doff*4)-sizeof(struct tcphdr);
1663 int mss_seen = 0;
1664
1665 ptr = (unsigned char *)(th + 1);
1666
1667 while(length>0)
1668 {
1669 int opcode=*ptr++;
1670 int opsize=*ptr++;
1671 switch(opcode)
1672 {
1673 case TCPOPT_EOL:
1674 return;
1675 case TCPOPT_NOP:
1676 length-=2;
1677 continue;
1678
1679 default:
1680 if(opsize<=2)
1681 return;
1682 switch(opcode)
1683 {
1684 case TCPOPT_MSS:
1685 if(opsize==4 && th->syn)
1686 {
1687 sk->mtu=min(sk->mtu,ntohs(*(unsigned short *)ptr));
1688 mss_seen = 1;
1689 }
1690 break;
1691
1692 }
1693 ptr+=opsize-2;
1694 length-=opsize;
1695 }
1696 }
1697 if (th->syn) {
1698 if (! mss_seen)
1699 sk->mtu=min(sk->mtu, 536);
1700 }
1701 sk->mss = min(sk->max_window, sk->mtu);
1702 }
1703
1704 static inline unsigned long default_mask(unsigned long dst)
1705 {
1706 dst = ntohl(dst);
1707 if (IN_CLASSA(dst))
1708 return htonl(IN_CLASSA_NET);
1709 if (IN_CLASSB(dst))
1710 return htonl(IN_CLASSB_NET);
1711 return htonl(IN_CLASSC_NET);
1712 }
1713
1714
1715
1716
1717
1718
1719
1720
1721 static void
1722 tcp_conn_request(struct sock *sk, struct sk_buff *skb,
1723 unsigned long daddr, unsigned long saddr,
1724 struct options *opt, struct device *dev)
1725 {
1726 struct sk_buff *buff;
1727 struct tcphdr *t1;
1728 unsigned char *ptr;
1729 struct sock *newsk;
1730 struct tcphdr *th;
1731 int tmp;
1732
1733 DPRINTF((DBG_TCP, "tcp_conn_request(sk = %X, skb = %X, daddr = %X, sadd4= %X, \n"
1734 " opt = %X, dev = %X)\n",
1735 sk, skb, daddr, saddr, opt, dev));
1736
1737 th = skb->h.th;
1738
1739
1740 if (!sk->dead) {
1741 sk->data_ready(sk,0);
1742 } else {
1743 DPRINTF((DBG_TCP, "tcp_conn_request on dead socket\n"));
1744 tcp_reset(daddr, saddr, th, sk->prot, opt, dev, sk->ip_tos,sk->ip_ttl);
1745 kfree_skb(skb, FREE_READ);
1746 return;
1747 }
1748
1749
1750
1751
1752
1753 if (sk->ack_backlog >= sk->max_ack_backlog) {
1754 kfree_skb(skb, FREE_READ);
1755 return;
1756 }
1757
1758
1759
1760
1761
1762
1763
1764
1765 newsk = (struct sock *) kmalloc(sizeof(struct sock), GFP_ATOMIC);
1766 if (newsk == NULL) {
1767
1768 kfree_skb(skb, FREE_READ);
1769 return;
1770 }
1771
1772 DPRINTF((DBG_TCP, "newsk = %X\n", newsk));
1773 memcpy((void *)newsk,(void *)sk, sizeof(*newsk));
1774 newsk->wback = NULL;
1775 newsk->wfront = NULL;
1776 newsk->rqueue = NULL;
1777 newsk->send_head = NULL;
1778 newsk->send_tail = NULL;
1779 newsk->back_log = NULL;
1780 newsk->rtt = TCP_CONNECT_TIME << 3;
1781 newsk->rto = TCP_CONNECT_TIME;
1782 newsk->mdev = 0;
1783 newsk->max_window = 0;
1784 newsk->cong_window = 1;
1785 newsk->cong_count = 0;
1786 newsk->ssthresh = 0;
1787 newsk->backoff = 0;
1788 newsk->blog = 0;
1789 newsk->intr = 0;
1790 newsk->proc = 0;
1791 newsk->done = 0;
1792 newsk->partial = NULL;
1793 newsk->pair = NULL;
1794 newsk->wmem_alloc = 0;
1795 newsk->rmem_alloc = 0;
1796
1797 newsk->max_unacked = MAX_WINDOW - TCP_WINDOW_DIFF;
1798
1799 newsk->err = 0;
1800 newsk->shutdown = 0;
1801 newsk->ack_backlog = 0;
1802 newsk->acked_seq = skb->h.th->seq+1;
1803 newsk->fin_seq = skb->h.th->seq;
1804 newsk->copied_seq = skb->h.th->seq;
1805 newsk->state = TCP_SYN_RECV;
1806 newsk->timeout = 0;
1807 newsk->send_seq = jiffies * SEQ_TICK - seq_offset;
1808 newsk->window_seq = newsk->send_seq;
1809 newsk->rcv_ack_seq = newsk->send_seq;
1810 newsk->urg_data = 0;
1811 newsk->retransmits = 0;
1812 newsk->destroy = 0;
1813 newsk->timer.data = (unsigned long)newsk;
1814 newsk->timer.function = &net_timer;
1815 newsk->dummy_th.source = skb->h.th->dest;
1816 newsk->dummy_th.dest = skb->h.th->source;
1817
1818
1819 newsk->daddr = saddr;
1820 newsk->saddr = daddr;
1821
1822 put_sock(newsk->num,newsk);
1823 newsk->dummy_th.res1 = 0;
1824 newsk->dummy_th.doff = 6;
1825 newsk->dummy_th.fin = 0;
1826 newsk->dummy_th.syn = 0;
1827 newsk->dummy_th.rst = 0;
1828 newsk->dummy_th.psh = 0;
1829 newsk->dummy_th.ack = 0;
1830 newsk->dummy_th.urg = 0;
1831 newsk->dummy_th.res2 = 0;
1832 newsk->acked_seq = skb->h.th->seq + 1;
1833 newsk->copied_seq = skb->h.th->seq;
1834
1835
1836 newsk->ip_ttl=sk->ip_ttl;
1837 newsk->ip_tos=skb->ip_hdr->tos;
1838
1839
1840
1841 if (sk->user_mss)
1842 newsk->mtu = sk->user_mss;
1843 else {
1844 #ifdef SUBNETSARELOCAL
1845 if ((saddr ^ daddr) & default_mask(saddr))
1846 #else
1847 if ((saddr ^ daddr) & dev->pa_mask)
1848 #endif
1849 newsk->mtu = 576 - HEADER_SIZE;
1850 else
1851 newsk->mtu = MAX_WINDOW;
1852 }
1853
1854 newsk->mtu = min(newsk->mtu, dev->mtu - HEADER_SIZE);
1855
1856
1857 tcp_options(newsk,skb->h.th);
1858
1859 buff = newsk->prot->wmalloc(newsk, MAX_SYN_SIZE, 1, GFP_ATOMIC);
1860 if (buff == NULL) {
1861 sk->err = -ENOMEM;
1862 newsk->dead = 1;
1863 release_sock(newsk);
1864 kfree_skb(skb, FREE_READ);
1865 return;
1866 }
1867
1868 buff->mem_addr = buff;
1869 buff->mem_len = MAX_SYN_SIZE;
1870 buff->len = sizeof(struct tcphdr)+4;
1871 buff->sk = newsk;
1872
1873 t1 =(struct tcphdr *) buff->data;
1874
1875
1876 tmp = sk->prot->build_header(buff, newsk->saddr, newsk->daddr, &dev,
1877 IPPROTO_TCP, NULL, MAX_SYN_SIZE,sk->ip_tos,sk->ip_ttl);
1878
1879
1880 if (tmp < 0) {
1881 sk->err = tmp;
1882 buff->free=1;
1883 kfree_skb(buff,FREE_WRITE);
1884 newsk->dead = 1;
1885 release_sock(newsk);
1886 skb->sk = sk;
1887 kfree_skb(skb, FREE_READ);
1888 return;
1889 }
1890
1891 buff->len += tmp;
1892 t1 =(struct tcphdr *)((char *)t1 +tmp);
1893
1894 memcpy(t1, skb->h.th, sizeof(*t1));
1895 buff->h.seq = newsk->send_seq;
1896
1897
1898 t1->dest = skb->h.th->source;
1899 t1->source = newsk->dummy_th.source;
1900 t1->seq = ntohl(newsk->send_seq++);
1901 t1->ack = 1;
1902 newsk->window = tcp_select_window(newsk);
1903 t1->window = ntohs(newsk->window);
1904 t1->res1 = 0;
1905 t1->res2 = 0;
1906 t1->rst = 0;
1907 t1->urg = 0;
1908 t1->psh = 0;
1909 t1->syn = 1;
1910 t1->ack_seq = ntohl(skb->h.th->seq+1);
1911 t1->doff = sizeof(*t1)/4+1;
1912
1913 ptr =(unsigned char *)(t1+1);
1914 ptr[0] = 2;
1915 ptr[1] = 4;
1916 ptr[2] = ((newsk->mtu) >> 8) & 0xff;
1917 ptr[3] =(newsk->mtu) & 0xff;
1918
1919 tcp_send_check(t1, daddr, saddr, sizeof(*t1)+4, newsk);
1920 newsk->prot->queue_xmit(newsk, dev, buff, 0);
1921
1922 reset_timer(newsk, TIME_WRITE , TCP_CONNECT_TIME);
1923 skb->sk = newsk;
1924
1925
1926 sk->rmem_alloc -= skb->mem_len;
1927 newsk->rmem_alloc += skb->mem_len;
1928
1929 skb_queue_tail(&sk->rqueue,skb);
1930 sk->ack_backlog++;
1931 release_sock(newsk);
1932 }
1933
1934
1935 static void
1936 tcp_close(struct sock *sk, int timeout)
1937 {
1938 struct sk_buff *buff;
1939 int need_reset = 0;
1940 struct tcphdr *t1, *th;
1941 struct proto *prot;
1942 struct device *dev=NULL;
1943 int tmp;
1944
1945
1946
1947
1948
1949 DPRINTF((DBG_TCP, "tcp_close((struct sock *)%X, %d)\n",sk, timeout));
1950 sk->inuse = 1;
1951 sk->keepopen = 1;
1952 sk->shutdown = SHUTDOWN_MASK;
1953
1954 if (!sk->dead)
1955 sk->state_change(sk);
1956
1957
1958 if (skb_peek(&sk->rqueue) != NULL)
1959 {
1960 struct sk_buff *skb;
1961 if(sk->debug)
1962 printk("Clean rcv queue\n");
1963 while((skb=skb_dequeue(&sk->rqueue))!=NULL)
1964 {
1965 if(skb->len > 0 && after(skb->h.th->seq + skb->len + 1 , sk->copied_seq))
1966 need_reset = 1;
1967 kfree_skb(skb, FREE_READ);
1968 }
1969 if(sk->debug)
1970 printk("Cleaned.\n");
1971 }
1972 sk->rqueue = NULL;
1973
1974
1975 if (sk->partial) {
1976 tcp_send_partial(sk);
1977 }
1978
1979 switch(sk->state) {
1980 case TCP_FIN_WAIT1:
1981 case TCP_FIN_WAIT2:
1982 case TCP_LAST_ACK:
1983
1984
1985
1986
1987
1988 reset_timer(sk, TIME_CLOSE, 4 * sk->rto);
1989 if (timeout) tcp_time_wait(sk);
1990 release_sock(sk);
1991 return;
1992 case TCP_TIME_WAIT:
1993 if (timeout) {
1994 sk->state = TCP_CLOSE;
1995 }
1996 release_sock(sk);
1997 return;
1998 case TCP_LISTEN:
1999 sk->state = TCP_CLOSE;
2000 release_sock(sk);
2001 return;
2002 case TCP_CLOSE:
2003 release_sock(sk);
2004 return;
2005 case TCP_CLOSE_WAIT:
2006 case TCP_ESTABLISHED:
2007 case TCP_SYN_SENT:
2008 case TCP_SYN_RECV:
2009 prot =(struct proto *)sk->prot;
2010 th =(struct tcphdr *)&sk->dummy_th;
2011 buff = prot->wmalloc(sk, MAX_FIN_SIZE, 1, GFP_ATOMIC);
2012 if (buff == NULL) {
2013
2014
2015
2016 release_sock(sk);
2017 if (sk->state != TCP_CLOSE_WAIT)
2018 sk->state = TCP_ESTABLISHED;
2019 reset_timer(sk, TIME_CLOSE, 100);
2020 return;
2021 }
2022 buff->mem_addr = buff;
2023 buff->mem_len = MAX_FIN_SIZE;
2024 buff->sk = sk;
2025 buff->free = 1;
2026 buff->len = sizeof(*t1);
2027 t1 =(struct tcphdr *) buff->data;
2028
2029
2030 tmp = prot->build_header(buff,sk->saddr, sk->daddr, &dev,
2031 IPPROTO_TCP, sk->opt,
2032 sizeof(struct tcphdr),sk->ip_tos,sk->ip_ttl);
2033 if (tmp < 0) {
2034 kfree_skb(buff,FREE_WRITE);
2035 DPRINTF((DBG_TCP, "Unable to build header for fin.\n"));
2036 release_sock(sk);
2037 return;
2038 }
2039
2040 t1 =(struct tcphdr *)((char *)t1 +tmp);
2041 buff->len += tmp;
2042 buff->dev = dev;
2043 memcpy(t1, th, sizeof(*t1));
2044 t1->seq = ntohl(sk->send_seq);
2045 sk->send_seq++;
2046 buff->h.seq = sk->send_seq;
2047 t1->ack = 1;
2048
2049
2050 sk->delay_acks = 0;
2051 t1->ack_seq = ntohl(sk->acked_seq);
2052 t1->window = ntohs(sk->window=tcp_select_window(sk));
2053 t1->fin = 1;
2054 t1->rst = need_reset;
2055 t1->doff = sizeof(*t1)/4;
2056 tcp_send_check(t1, sk->saddr, sk->daddr, sizeof(*t1), sk);
2057
2058 if (sk->wfront == NULL) {
2059 prot->queue_xmit(sk, dev, buff, 0);
2060 } else {
2061 reset_timer(sk, TIME_WRITE, sk->rto);
2062 buff->next = NULL;
2063 if (sk->wback == NULL) {
2064 sk->wfront = buff;
2065 } else {
2066 sk->wback->next = buff;
2067 }
2068 sk->wback = buff;
2069 buff->magic = TCP_WRITE_QUEUE_MAGIC;
2070 }
2071
2072 if (sk->state == TCP_CLOSE_WAIT) {
2073 sk->state = TCP_FIN_WAIT2;
2074 } else {
2075 sk->state = TCP_FIN_WAIT1;
2076 }
2077 }
2078 release_sock(sk);
2079 }
2080
2081
2082
2083
2084
2085
2086 static void
2087 tcp_write_xmit(struct sock *sk)
2088 {
2089 struct sk_buff *skb;
2090
2091 DPRINTF((DBG_TCP, "tcp_write_xmit(sk=%X)\n", sk));
2092
2093
2094
2095 if(sk->zapped)
2096 return;
2097
2098 while(sk->wfront != NULL &&
2099 before(sk->wfront->h.seq, sk->window_seq +1) &&
2100 (sk->retransmits == 0 ||
2101 sk->timeout != TIME_WRITE ||
2102 before(sk->wfront->h.seq, sk->rcv_ack_seq +1))
2103 && sk->packets_out < sk->cong_window) {
2104 skb = sk->wfront;
2105 IS_SKB(skb);
2106 sk->wfront = skb->next;
2107 if (sk->wfront == NULL) sk->wback = NULL;
2108 skb->next = NULL;
2109 if (skb->magic != TCP_WRITE_QUEUE_MAGIC) {
2110 printk("tcp.c skb with bad magic(%X) on write queue. Squashing "
2111 "queue\n", skb->magic);
2112 sk->wfront = NULL;
2113 sk->wback = NULL;
2114 return;
2115 }
2116 skb->magic = 0;
2117 DPRINTF((DBG_TCP, "Sending a packet.\n"));
2118
2119
2120 if (before(skb->h.seq, sk->rcv_ack_seq +1)) {
2121 sk->retransmits = 0;
2122 kfree_skb(skb, FREE_WRITE);
2123 if (!sk->dead) sk->write_space(sk);
2124 } else {
2125 sk->prot->queue_xmit(sk, skb->dev, skb, skb->free);
2126 }
2127 }
2128 }
2129
2130
2131
2132
2133
2134
2135 void
2136 sort_send(struct sock *sk)
2137 {
2138 struct sk_buff *list = NULL;
2139 struct sk_buff *skb,*skb2,*skb3;
2140
2141 for (skb = sk->send_head; skb != NULL; skb = skb2) {
2142 skb2 = (struct sk_buff *)skb->link3;
2143 if (list == NULL || before (skb2->h.seq, list->h.seq)) {
2144 skb->link3 = list;
2145 sk->send_tail = skb;
2146 list = skb;
2147 } else {
2148 for (skb3 = list; ; skb3 = (struct sk_buff *)skb3->link3) {
2149 if (skb3->link3 == NULL ||
2150 before(skb->h.seq, skb3->link3->h.seq)) {
2151 skb->link3 = skb3->link3;
2152 skb3->link3 = skb;
2153 if (skb->link3 == NULL) sk->send_tail = skb;
2154 break;
2155 }
2156 }
2157 }
2158 }
2159 sk->send_head = list;
2160 }
2161
2162
2163
2164 static int
2165 tcp_ack(struct sock *sk, struct tcphdr *th, unsigned long saddr, int len)
2166 {
2167 unsigned long ack;
2168 int flag = 0;
2169
2170
2171
2172
2173
2174
2175
2176 if(sk->zapped)
2177 return(1);
2178
2179 ack = ntohl(th->ack_seq);
2180 DPRINTF((DBG_TCP, "tcp_ack ack=%d, window=%d, "
2181 "sk->rcv_ack_seq=%d, sk->window_seq = %d\n",
2182 ack, ntohs(th->window), sk->rcv_ack_seq, sk->window_seq));
2183
2184 if (ntohs(th->window) > sk->max_window) {
2185 sk->max_window = ntohs(th->window);
2186 sk->mss = min(sk->max_window, sk->mtu);
2187 }
2188
2189 if (sk->retransmits && sk->timeout == TIME_KEEPOPEN)
2190 sk->retransmits = 0;
2191
2192 if (after(ack, sk->send_seq+1) || before(ack, sk->rcv_ack_seq-1)) {
2193 if (after(ack, sk->send_seq) ||
2194 (sk->state != TCP_ESTABLISHED && sk->state != TCP_CLOSE_WAIT)) {
2195 return(0);
2196 }
2197 if (sk->keepopen) {
2198 reset_timer(sk, TIME_KEEPOPEN, TCP_TIMEOUT_LEN);
2199 }
2200 return(1);
2201 }
2202
2203 if (len != th->doff*4) flag |= 1;
2204
2205
2206 if (after(sk->window_seq, ack+ntohs(th->window))) {
2207
2208
2209
2210
2211
2212
2213
2214 struct sk_buff *skb;
2215 struct sk_buff *skb2;
2216 struct sk_buff *wskb = NULL;
2217
2218 skb2 = sk->send_head;
2219 sk->send_head = NULL;
2220 sk->send_tail = NULL;
2221
2222 flag |= 4;
2223
2224 sk->window_seq = ack + ntohs(th->window);
2225 cli();
2226 while (skb2 != NULL) {
2227 skb = skb2;
2228 skb2 = (struct sk_buff *)skb->link3;
2229 skb->link3 = NULL;
2230 if (after(skb->h.seq, sk->window_seq)) {
2231 if (sk->packets_out > 0) sk->packets_out--;
2232
2233 if (skb->next != NULL) {
2234 skb_unlink(skb);
2235 }
2236
2237 skb->magic = TCP_WRITE_QUEUE_MAGIC;
2238 if (wskb == NULL) {
2239 skb->next = sk->wfront;
2240 sk->wfront = skb;
2241 } else {
2242 skb->next = wskb->next;
2243 wskb->next = skb;
2244 }
2245 if (sk->wback == wskb) sk->wback = skb;
2246 wskb = skb;
2247 } else {
2248 if (sk->send_head == NULL) {
2249 sk->send_head = skb;
2250 sk->send_tail = skb;
2251 } else {
2252 sk->send_tail->link3 = skb;
2253 sk->send_tail = skb;
2254 }
2255 skb->link3 = NULL;
2256 }
2257 }
2258 sti();
2259 }
2260
2261 if (sk->send_tail == NULL || sk->send_head == NULL) {
2262 sk->send_head = NULL;
2263 sk->send_tail = NULL;
2264 sk->packets_out= 0;
2265 }
2266
2267 sk->window_seq = ack + ntohs(th->window);
2268
2269
2270 if (sk->timeout == TIME_WRITE &&
2271 sk->cong_window < 2048 && after(ack, sk->rcv_ack_seq)) {
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281 if (sk->cong_window < sk->ssthresh)
2282
2283 sk->cong_window++;
2284 else {
2285
2286
2287
2288 if (sk->cong_count >= sk->cong_window) {
2289 sk->cong_window++;
2290 sk->cong_count = 0;
2291 } else
2292 sk->cong_count++;
2293 }
2294 }
2295
2296 DPRINTF((DBG_TCP, "tcp_ack: Updating rcv ack sequence.\n"));
2297 sk->rcv_ack_seq = ack;
2298
2299
2300
2301
2302
2303
2304 if (sk->timeout == TIME_PROBE0) {
2305 if (sk->wfront != NULL &&
2306 ! before (sk->window_seq, sk->wfront->h.seq)) {
2307 sk->retransmits = 0;
2308 sk->backoff = 0;
2309
2310 sk->rto = ((sk->rtt >> 2) + sk->mdev) >> 1;
2311 if (sk->rto > 120*HZ)
2312 sk->rto = 120*HZ;
2313 if (sk->rto < 1*HZ)
2314 sk->rto = 1*HZ;
2315 }
2316 }
2317
2318
2319 while(sk->send_head != NULL) {
2320
2321 if (sk->send_head->link3 &&
2322 after(sk->send_head->h.seq, sk->send_head->link3->h.seq)) {
2323 printk("INET: tcp.c: *** bug send_list out of order.\n");
2324 sort_send(sk);
2325 }
2326
2327 if (before(sk->send_head->h.seq, ack+1)) {
2328 struct sk_buff *oskb;
2329
2330 if (sk->retransmits) {
2331
2332
2333 flag |= 2;
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343 if (sk->send_head->link3)
2344 sk->retransmits = 1;
2345 else
2346 sk->retransmits = 0;
2347
2348 }
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363 if (sk->packets_out > 0) sk->packets_out --;
2364 DPRINTF((DBG_TCP, "skb=%X skb->h.seq = %d acked ack=%d\n",
2365 sk->send_head, sk->send_head->h.seq, ack));
2366
2367
2368 if (!sk->dead) sk->write_space(sk);
2369
2370 oskb = sk->send_head;
2371
2372 if (!(flag&2)) {
2373 long m;
2374
2375
2376
2377
2378
2379
2380
2381
2382 m = jiffies - oskb->when;
2383 m -= (sk->rtt >> 3);
2384 sk->rtt += m;
2385 if (m < 0)
2386 m = -m;
2387 m -= (sk->mdev >> 2);
2388 sk->mdev += m;
2389
2390
2391 sk->rto = ((sk->rtt >> 2) + sk->mdev) >> 1;
2392 if (sk->rto > 120*HZ)
2393 sk->rto = 120*HZ;
2394 if (sk->rto < 1*HZ)
2395 sk->rto = 1*HZ;
2396 sk->backoff = 0;
2397
2398 }
2399 flag |= (2|4);
2400
2401 cli();
2402
2403 oskb = sk->send_head;
2404 IS_SKB(oskb);
2405 sk->send_head =(struct sk_buff *)oskb->link3;
2406 if (sk->send_head == NULL) {
2407 sk->send_tail = NULL;
2408 }
2409
2410
2411 skb_unlink(oskb);
2412 sti();
2413 oskb->magic = 0;
2414 kfree_skb(oskb, FREE_WRITE);
2415 if (!sk->dead) sk->write_space(sk);
2416 } else {
2417 break;
2418 }
2419 }
2420
2421
2422
2423
2424
2425 if (sk->wfront != NULL) {
2426 if (after (sk->window_seq+1, sk->wfront->h.seq) &&
2427 (sk->retransmits == 0 ||
2428 sk->timeout != TIME_WRITE ||
2429 before(sk->wfront->h.seq, sk->rcv_ack_seq +1))
2430 && sk->packets_out < sk->cong_window) {
2431 flag |= 1;
2432 tcp_write_xmit(sk);
2433 } else if (before(sk->window_seq, sk->wfront->h.seq) &&
2434 sk->send_head == NULL &&
2435 sk->ack_backlog == 0 &&
2436 sk->state != TCP_TIME_WAIT) {
2437 reset_timer(sk, TIME_PROBE0, sk->rto);
2438 }
2439 } else {
2440 if (sk->send_head == NULL && sk->ack_backlog == 0 &&
2441 sk->state != TCP_TIME_WAIT && !sk->keepopen) {
2442 DPRINTF((DBG_TCP, "Nothing to do, going to sleep.\n"));
2443 if (!sk->dead) sk->write_space(sk);
2444
2445 if (sk->keepopen)
2446 reset_timer(sk, TIME_KEEPOPEN, TCP_TIMEOUT_LEN);
2447 else
2448 delete_timer(sk);
2449 } else {
2450 if (sk->state != (unsigned char) sk->keepopen) {
2451 reset_timer(sk, TIME_WRITE, sk->rto);
2452 }
2453 if (sk->state == TCP_TIME_WAIT) {
2454 reset_timer(sk, TIME_CLOSE, TCP_TIMEWAIT_LEN);
2455 }
2456 }
2457 }
2458
2459 if (sk->packets_out == 0 && sk->partial != NULL &&
2460 sk->wfront == NULL && sk->send_head == NULL) {
2461 flag |= 1;
2462 tcp_send_partial(sk);
2463 }
2464
2465
2466 if (sk->state == TCP_TIME_WAIT) {
2467 if (!sk->dead)
2468 sk->state_change(sk);
2469 if (sk->rcv_ack_seq == sk->send_seq && sk->acked_seq == sk->fin_seq) {
2470 flag |= 1;
2471 sk->state = TCP_CLOSE;
2472 sk->shutdown = SHUTDOWN_MASK;
2473 }
2474 }
2475
2476 if (sk->state == TCP_LAST_ACK || sk->state == TCP_FIN_WAIT2) {
2477 if (!sk->dead) sk->state_change(sk);
2478 if (sk->rcv_ack_seq == sk->send_seq) {
2479 flag |= 1;
2480 if (sk->acked_seq != sk->fin_seq) {
2481 tcp_time_wait(sk);
2482 } else {
2483 DPRINTF((DBG_TCP, "tcp_ack closing socket - %X\n", sk));
2484 tcp_send_ack(sk->send_seq, sk->acked_seq, sk,
2485 th, sk->daddr);
2486 sk->shutdown = SHUTDOWN_MASK;
2487 sk->state = TCP_CLOSE;
2488 }
2489 }
2490 }
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521 if (((!flag) || (flag&4)) && sk->send_head != NULL &&
2522 (((flag&2) && sk->retransmits) ||
2523 (sk->send_head->when + sk->rto < jiffies))) {
2524 ip_do_retransmit(sk, 1);
2525 reset_timer(sk, TIME_WRITE, sk->rto);
2526 }
2527
2528 DPRINTF((DBG_TCP, "leaving tcp_ack\n"));
2529 return(1);
2530 }
2531
2532
2533
2534
2535
2536
2537
2538 static int
2539 tcp_data(struct sk_buff *skb, struct sock *sk,
2540 unsigned long saddr, unsigned short len)
2541 {
2542 struct sk_buff *skb1, *skb2;
2543 struct tcphdr *th;
2544 int dup_dumped=0;
2545
2546 th = skb->h.th;
2547 print_th(th);
2548 skb->len = len -(th->doff*4);
2549
2550 DPRINTF((DBG_TCP, "tcp_data len = %d sk = %X:\n", skb->len, sk));
2551
2552 sk->bytes_rcv += skb->len;
2553 if (skb->len == 0 && !th->fin && !th->urg && !th->psh) {
2554
2555 if (!th->ack) tcp_send_ack(sk->send_seq, sk->acked_seq,sk, th, saddr);
2556 kfree_skb(skb, FREE_READ);
2557 return(0);
2558 }
2559
2560 if (sk->shutdown & RCV_SHUTDOWN) {
2561 sk->acked_seq = th->seq + skb->len + th->syn + th->fin;
2562 tcp_reset(sk->saddr, sk->daddr, skb->h.th,
2563 sk->prot, NULL, skb->dev, sk->ip_tos, sk->ip_ttl);
2564 sk->state = TCP_CLOSE;
2565 sk->err = EPIPE;
2566 sk->shutdown = SHUTDOWN_MASK;
2567 DPRINTF((DBG_TCP, "tcp_data: closing socket - %X\n", sk));
2568 kfree_skb(skb, FREE_READ);
2569 if (!sk->dead) sk->state_change(sk);
2570 return(0);
2571 }
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582 if (sk->rqueue == NULL) {
2583 DPRINTF((DBG_TCP, "tcp_data: skb = %X:\n", skb));
2584 #ifdef OLDWAY
2585 sk->rqueue = skb;
2586 skb->next = skb;
2587 skb->prev = skb;
2588 skb->list = &sk->rqueue;
2589 #else
2590 skb_queue_head(&sk->rqueue,skb);
2591 #endif
2592 skb1= NULL;
2593 } else {
2594 DPRINTF((DBG_TCP, "tcp_data adding to chain sk = %X:\n", sk));
2595 for(skb1=sk->rqueue->prev; ; skb1 =(struct sk_buff *)skb1->prev) {
2596 if(sk->debug)
2597 {
2598 printk("skb1=%p :", skb1);
2599 printk("skb1->h.th->seq = %ld: ", skb1->h.th->seq);
2600 printk("skb->h.th->seq = %ld\n",skb->h.th->seq);
2601 printk("copied_seq = %ld acked_seq = %ld\n", sk->copied_seq,
2602 sk->acked_seq);
2603 }
2604 #ifdef OLD
2605 if (after(th->seq+1, skb1->h.th->seq)) {
2606 skb->prev = skb1;
2607 skb->next = skb1->next;
2608 skb->next->prev = skb;
2609 skb1->next = skb;
2610 if (skb1 == sk->rqueue) sk->rqueue = skb;
2611 break;
2612 }
2613 if (skb1->prev == sk->rqueue) {
2614 skb->next= skb1;
2615 skb->prev = skb1->prev;
2616 skb->prev->next = skb;
2617 skb1->prev = skb;
2618 skb1 = NULL;
2619
2620 break;
2621 }
2622 #else
2623 if (th->seq==skb1->h.th->seq && skb->len>= skb1->len)
2624 {
2625 skb_append(skb1,skb);
2626 skb_unlink(skb1);
2627 kfree_skb(skb1,FREE_READ);
2628 dup_dumped=1;
2629 skb1=NULL;
2630 break;
2631 }
2632 if (after(th->seq+1, skb1->h.th->seq))
2633 {
2634 skb_append(skb1,skb);
2635 break;
2636 }
2637 if (skb1 == sk->rqueue)
2638 {
2639 skb_queue_head(&sk->rqueue, skb);
2640 break;
2641 }
2642 #endif
2643 }
2644 DPRINTF((DBG_TCP, "skb = %X:\n", skb));
2645 }
2646
2647 th->ack_seq = th->seq + skb->len;
2648 if (th->syn) th->ack_seq++;
2649 if (th->fin) th->ack_seq++;
2650
2651 if (before(sk->acked_seq, sk->copied_seq)) {
2652 printk("*** tcp.c:tcp_data bug acked < copied\n");
2653 sk->acked_seq = sk->copied_seq;
2654 }
2655
2656
2657 if ((!dup_dumped && (skb1 == NULL || skb1->acked)) || before(th->seq, sk->acked_seq+1)) {
2658 if (before(th->seq, sk->acked_seq+1)) {
2659 int newwindow;
2660
2661 if (after(th->ack_seq, sk->acked_seq)) {
2662 newwindow = sk->window -
2663 (th->ack_seq - sk->acked_seq);
2664 if (newwindow < 0)
2665 newwindow = 0;
2666 sk->window = newwindow;
2667 sk->acked_seq = th->ack_seq;
2668 }
2669 skb->acked = 1;
2670
2671
2672 if (skb->h.th->fin) {
2673 if (!sk->dead) sk->state_change(sk);
2674 sk->shutdown |= RCV_SHUTDOWN;
2675 }
2676
2677 for(skb2 = (struct sk_buff *)skb->next;
2678 skb2 !=(struct sk_buff *) sk->rqueue;
2679 skb2 = (struct sk_buff *)skb2->next) {
2680 if (before(skb2->h.th->seq, sk->acked_seq+1)) {
2681 if (after(skb2->h.th->ack_seq, sk->acked_seq))
2682 {
2683 newwindow = sk->window -
2684 (skb2->h.th->ack_seq - sk->acked_seq);
2685 if (newwindow < 0)
2686 newwindow = 0;
2687 sk->window = newwindow;
2688 sk->acked_seq = skb2->h.th->ack_seq;
2689 }
2690 skb2->acked = 1;
2691
2692
2693
2694
2695
2696 if (skb2->h.th->fin) {
2697 sk->shutdown |= RCV_SHUTDOWN;
2698 if (!sk->dead) sk->state_change(sk);
2699 }
2700
2701
2702 sk->ack_backlog = sk->max_ack_backlog;
2703 } else {
2704 break;
2705 }
2706 }
2707
2708
2709
2710
2711
2712 if (!sk->delay_acks ||
2713 sk->ack_backlog >= sk->max_ack_backlog ||
2714 sk->bytes_rcv > sk->max_unacked || th->fin) {
2715
2716 } else {
2717 sk->ack_backlog++;
2718 if(sk->debug)
2719 printk("Ack queued.\n");
2720 reset_timer(sk, TIME_WRITE, TCP_ACK_TIME);
2721 }
2722 }
2723 }
2724
2725
2726
2727
2728
2729 if (!skb->acked) {
2730
2731
2732
2733
2734
2735
2736 while (sk->prot->rspace(sk) < sk->mtu) {
2737 skb1 = skb_peek(&sk->rqueue);
2738 if (skb1 == NULL) {
2739 printk("INET: tcp.c:tcp_data memory leak detected.\n");
2740 break;
2741 }
2742
2743
2744 if (skb1->acked) {
2745 break;
2746 }
2747
2748 skb_unlink(skb1);
2749 #ifdef OLDWAY
2750 if (skb1->prev == skb1) {
2751 sk->rqueue = NULL;
2752 } else {
2753 sk->rqueue = (struct sk_buff *)skb1->prev;
2754 skb1->next->prev = skb1->prev;
2755 skb1->prev->next = skb1->next;
2756 }
2757 #endif
2758 kfree_skb(skb1, FREE_READ);
2759 }
2760 tcp_send_ack(sk->send_seq, sk->acked_seq, sk, th, saddr);
2761 sk->ack_backlog++;
2762 reset_timer(sk, TIME_WRITE, TCP_ACK_TIME);
2763 } else {
2764
2765 tcp_send_ack(sk->send_seq, sk->acked_seq, sk, th, saddr);
2766 }
2767
2768
2769 if (!sk->dead) {
2770 if(sk->debug)
2771 printk("Data wakeup.\n");
2772 sk->data_ready(sk,0);
2773 } else {
2774 DPRINTF((DBG_TCP, "data received on dead socket.\n"));
2775 }
2776
2777 if (sk->state == TCP_FIN_WAIT2 &&
2778 sk->acked_seq == sk->fin_seq && sk->rcv_ack_seq == sk->send_seq) {
2779 DPRINTF((DBG_TCP, "tcp_data: entering last_ack state sk = %X\n", sk));
2780
2781
2782 sk->shutdown = SHUTDOWN_MASK;
2783 sk->state = TCP_LAST_ACK;
2784 if (!sk->dead) sk->state_change(sk);
2785 }
2786
2787 return(0);
2788 }
2789
2790
2791 static void tcp_check_urg(struct sock * sk, struct tcphdr * th)
2792 {
2793 unsigned long ptr = ntohs(th->urg_ptr);
2794
2795 if (ptr)
2796 ptr--;
2797 ptr += th->seq;
2798
2799
2800 if (after(sk->copied_seq+1, ptr))
2801 return;
2802
2803
2804 if (sk->urg_data && !after(ptr, sk->urg_seq))
2805 return;
2806
2807
2808 if (sk->proc != 0) {
2809 if (sk->proc > 0) {
2810 kill_proc(sk->proc, SIGURG, 1);
2811 } else {
2812 kill_pg(-sk->proc, SIGURG, 1);
2813 }
2814 }
2815 sk->urg_data = URG_NOTYET;
2816 sk->urg_seq = ptr;
2817 }
2818
2819 static inline int tcp_urg(struct sock *sk, struct tcphdr *th,
2820 unsigned long saddr, unsigned long len)
2821 {
2822 unsigned long ptr;
2823
2824
2825 if (th->urg)
2826 tcp_check_urg(sk,th);
2827
2828
2829 if (sk->urg_data != URG_NOTYET)
2830 return 0;
2831
2832
2833 ptr = sk->urg_seq - th->seq + th->doff*4;
2834 if (ptr >= len)
2835 return 0;
2836
2837
2838 sk->urg_data = URG_VALID | *(ptr + (unsigned char *) th);
2839 if (!sk->dead)
2840 wake_up_interruptible(sk->sleep);
2841 return 0;
2842 }
2843
2844
2845
2846 static int
2847 tcp_fin(struct sock *sk, struct tcphdr *th,
2848 unsigned long saddr, struct device *dev)
2849 {
2850 DPRINTF((DBG_TCP, "tcp_fin(sk=%X, th=%X, saddr=%X, dev=%X)\n",
2851 sk, th, saddr, dev));
2852
2853 if (!sk->dead) {
2854 sk->state_change(sk);
2855 }
2856
2857 switch(sk->state) {
2858 case TCP_SYN_RECV:
2859 case TCP_SYN_SENT:
2860 case TCP_ESTABLISHED:
2861
2862 sk->fin_seq = th->seq+1;
2863 sk->state = TCP_CLOSE_WAIT;
2864 if (th->rst) sk->shutdown = SHUTDOWN_MASK;
2865 break;
2866
2867 case TCP_CLOSE_WAIT:
2868 case TCP_FIN_WAIT2:
2869 break;
2870
2871 case TCP_FIN_WAIT1:
2872
2873 sk->fin_seq = th->seq+1;
2874 sk->state = TCP_FIN_WAIT2;
2875 break;
2876
2877 default:
2878 case TCP_TIME_WAIT:
2879 sk->state = TCP_LAST_ACK;
2880
2881
2882 reset_timer(sk, TIME_CLOSE, TCP_TIMEWAIT_LEN);
2883 return(0);
2884 }
2885 sk->ack_backlog++;
2886
2887 return(0);
2888 }
2889
2890
2891
2892 static struct sock *
2893 tcp_accept(struct sock *sk, int flags)
2894 {
2895 struct sock *newsk;
2896 struct sk_buff *skb;
2897
2898 DPRINTF((DBG_TCP, "tcp_accept(sk=%X, flags=%X, addr=%s)\n",
2899 sk, flags, in_ntoa(sk->saddr)));
2900
2901
2902
2903
2904
2905 if (sk->state != TCP_LISTEN) {
2906 sk->err = EINVAL;
2907 return(NULL);
2908 }
2909
2910
2911 cli();
2912 sk->inuse = 1;
2913 while((skb = get_firstr(sk)) == NULL) {
2914 if (flags & O_NONBLOCK) {
2915 sti();
2916 release_sock(sk);
2917 sk->err = EAGAIN;
2918 return(NULL);
2919 }
2920
2921 release_sock(sk);
2922 interruptible_sleep_on(sk->sleep);
2923 if (current->signal & ~current->blocked) {
2924 sti();
2925 sk->err = ERESTARTSYS;
2926 return(NULL);
2927 }
2928 sk->inuse = 1;
2929 }
2930 sti();
2931
2932
2933 newsk = skb->sk;
2934
2935 kfree_skb(skb, FREE_READ);
2936 sk->ack_backlog--;
2937 release_sock(sk);
2938 return(newsk);
2939 }
2940
2941
2942
2943 static int
2944 tcp_connect(struct sock *sk, struct sockaddr_in *usin, int addr_len)
2945 {
2946 struct sk_buff *buff;
2947 struct sockaddr_in sin;
2948 struct device *dev=NULL;
2949 unsigned char *ptr;
2950 int tmp;
2951 struct tcphdr *t1;
2952 int err;
2953
2954 if (sk->state != TCP_CLOSE) return(-EISCONN);
2955 if (addr_len < 8) return(-EINVAL);
2956
2957 err=verify_area(VERIFY_READ, usin, addr_len);
2958 if(err)
2959 return err;
2960
2961 memcpy_fromfs(&sin,usin, min(sizeof(sin), addr_len));
2962
2963 if (sin.sin_family && sin.sin_family != AF_INET) return(-EAFNOSUPPORT);
2964
2965 DPRINTF((DBG_TCP, "TCP connect daddr=%s\n", in_ntoa(sin.sin_addr.s_addr)));
2966
2967
2968 if (chk_addr(sin.sin_addr.s_addr) == IS_BROADCAST) {
2969 DPRINTF((DBG_TCP, "TCP connection to broadcast address not allowed\n"));
2970 return(-ENETUNREACH);
2971 }
2972
2973
2974 if(sk->saddr == sin.sin_addr.s_addr && sk->num==ntohs(sin.sin_port))
2975 return -EBUSY;
2976
2977 sk->inuse = 1;
2978 sk->daddr = sin.sin_addr.s_addr;
2979 sk->send_seq = jiffies * SEQ_TICK - seq_offset;
2980 sk->window_seq = sk->send_seq;
2981 sk->rcv_ack_seq = sk->send_seq -1;
2982 sk->err = 0;
2983 sk->dummy_th.dest = sin.sin_port;
2984 release_sock(sk);
2985
2986 buff = sk->prot->wmalloc(sk,MAX_SYN_SIZE,0, GFP_KERNEL);
2987 if (buff == NULL) {
2988 return(-ENOMEM);
2989 }
2990 sk->inuse = 1;
2991 buff->mem_addr = buff;
2992 buff->mem_len = MAX_SYN_SIZE;
2993 buff->len = 24;
2994 buff->sk = sk;
2995 buff->free = 1;
2996 t1 = (struct tcphdr *) buff->data;
2997
2998
2999
3000 tmp = sk->prot->build_header(buff, sk->saddr, sk->daddr, &dev,
3001 IPPROTO_TCP, NULL, MAX_SYN_SIZE,sk->ip_tos,sk->ip_ttl);
3002 if (tmp < 0) {
3003 sk->prot->wfree(sk, buff->mem_addr, buff->mem_len);
3004 release_sock(sk);
3005 return(-ENETUNREACH);
3006 }
3007 buff->len += tmp;
3008 t1 = (struct tcphdr *)((char *)t1 +tmp);
3009
3010 memcpy(t1,(void *)&(sk->dummy_th), sizeof(*t1));
3011 t1->seq = ntohl(sk->send_seq++);
3012 buff->h.seq = sk->send_seq;
3013 t1->ack = 0;
3014 t1->window = 2;
3015 t1->res1=0;
3016 t1->res2=0;
3017 t1->rst = 0;
3018 t1->urg = 0;
3019 t1->psh = 0;
3020 t1->syn = 1;
3021 t1->urg_ptr = 0;
3022 t1->doff = 6;
3023
3024
3025 if (sk->user_mss)
3026 sk->mtu = sk->user_mss;
3027 else {
3028 #ifdef SUBNETSARELOCAL
3029 if ((sk->saddr ^ sk->daddr) & default_mask(sk->saddr))
3030 #else
3031 if ((sk->saddr ^ sk->daddr) & dev->pa_mask)
3032 #endif
3033 sk->mtu = 576 - HEADER_SIZE;
3034 else
3035 sk->mtu = MAX_WINDOW;
3036 }
3037
3038 sk->mtu = min(sk->mtu, dev->mtu - HEADER_SIZE);
3039
3040
3041 ptr = (unsigned char *)(t1+1);
3042 ptr[0] = 2;
3043 ptr[1] = 4;
3044 ptr[2] = (sk->mtu) >> 8;
3045 ptr[3] = (sk->mtu) & 0xff;
3046 tcp_send_check(t1, sk->saddr, sk->daddr,
3047 sizeof(struct tcphdr) + 4, sk);
3048
3049
3050 sk->state = TCP_SYN_SENT;
3051 sk->rtt = TCP_CONNECT_TIME;
3052 reset_timer(sk, TIME_WRITE, TCP_CONNECT_TIME);
3053 sk->retransmits = TCP_RETR2 - TCP_SYN_RETRIES;
3054
3055 sk->prot->queue_xmit(sk, dev, buff, 0);
3056
3057 release_sock(sk);
3058 return(0);
3059 }
3060
3061
3062
3063 static int
3064 tcp_sequence(struct sock *sk, struct tcphdr *th, short len,
3065 struct options *opt, unsigned long saddr, struct device *dev)
3066 {
3067 unsigned long next_seq;
3068
3069 next_seq = len - 4*th->doff;
3070
3071 if (next_seq && !sk->window)
3072 goto ignore_it;
3073 next_seq += th->seq;
3074 if (th->syn)
3075 next_seq++;
3076
3077
3078
3079
3080
3081
3082
3083
3084
3085 if (!after(next_seq+1, sk->acked_seq))
3086 goto ignore_it;
3087
3088 if (!before(th->seq, sk->acked_seq + sk->window + 1))
3089 goto ignore_it;
3090
3091
3092 return 1;
3093
3094 ignore_it:
3095 DPRINTF((DBG_TCP, "tcp_sequence: rejecting packet.\n"));
3096
3097
3098
3099
3100
3101
3102
3103
3104 if (sk->state==TCP_SYN_SENT || sk->state==TCP_SYN_RECV) {
3105 tcp_reset(sk->saddr,sk->daddr,th,sk->prot,NULL,dev, sk->ip_tos,sk->ip_ttl);
3106 return 1;
3107 }
3108
3109 if (th->rst)
3110 return 0;
3111
3112
3113 tcp_send_ack(sk->send_seq, sk->acked_seq, sk, th, saddr);
3114 return 0;
3115 }
3116
3117
3118 int
3119 tcp_rcv(struct sk_buff *skb, struct device *dev, struct options *opt,
3120 unsigned long daddr, unsigned short len,
3121 unsigned long saddr, int redo, struct inet_protocol * protocol)
3122 {
3123 struct tcphdr *th;
3124 struct sock *sk;
3125
3126 if (!skb) {
3127 DPRINTF((DBG_TCP, "tcp.c: tcp_rcv skb = NULL\n"));
3128 return(0);
3129 }
3130 #if 0
3131 if (!protocol) {
3132 DPRINTF((DBG_TCP, "tcp.c: tcp_rcv protocol = NULL\n"));
3133 return(0);
3134 }
3135
3136 if (!opt) {
3137 DPRINTF((DBG_TCP, "tcp.c: tcp_rcv opt = NULL\n"));
3138 }
3139 #endif
3140 if (!dev) {
3141 DPRINTF((DBG_TCP, "tcp.c: tcp_rcv dev = NULL\n"));
3142 return(0);
3143 }
3144 th = skb->h.th;
3145
3146
3147 sk = get_sock(&tcp_prot, th->dest, saddr, th->source, daddr);
3148 DPRINTF((DBG_TCP, "<<\n"));
3149 DPRINTF((DBG_TCP, "len = %d, redo = %d, skb=%X\n", len, redo, skb));
3150
3151
3152
3153 if (sk!=NULL && sk->zapped)
3154 sk=NULL;
3155
3156 if (sk) {
3157 DPRINTF((DBG_TCP, "sk = %X:\n", sk));
3158 }
3159
3160 if (!redo) {
3161 if (tcp_check(th, len, saddr, daddr )) {
3162 skb->sk = NULL;
3163 DPRINTF((DBG_TCP, "packet dropped with bad checksum.\n"));
3164 if (inet_debug == DBG_SLIP) printk("\rtcp_rcv: bad checksum\n");
3165 kfree_skb(skb,FREE_READ);
3166
3167
3168
3169
3170 return(0);
3171 }
3172
3173 th->seq = ntohl(th->seq);
3174
3175
3176 if (sk == NULL) {
3177 if (!th->rst)
3178 tcp_reset(daddr, saddr, th, &tcp_prot, opt,dev,skb->ip_hdr->tos,255);
3179 skb->sk = NULL;
3180 kfree_skb(skb, FREE_READ);
3181 return(0);
3182 }
3183
3184 skb->len = len;
3185 skb->sk = sk;
3186 skb->acked = 0;
3187 skb->used = 0;
3188 skb->free = 0;
3189 skb->saddr = daddr;
3190 skb->daddr = saddr;
3191
3192
3193 cli();
3194 if (sk->inuse) {
3195 if (sk->back_log == NULL) {
3196 sk->back_log = skb;
3197 skb->next = skb;
3198 skb->prev = skb;
3199 } else {
3200 skb->next = sk->back_log;
3201 skb->prev = sk->back_log->prev;
3202 skb->prev->next = skb;
3203 skb->next->prev = skb;
3204 }
3205 sti();
3206 return(0);
3207 }
3208 sk->inuse = 1;
3209 sti();
3210 } else {
3211 if (!sk) {
3212 DPRINTF((DBG_TCP, "tcp.c: tcp_rcv bug sk=NULL redo = 1\n"));
3213 return(0);
3214 }
3215 }
3216
3217 if (!sk->prot) {
3218 DPRINTF((DBG_TCP, "tcp.c: tcp_rcv sk->prot = NULL \n"));
3219 return(0);
3220 }
3221
3222
3223 if (sk->rmem_alloc + skb->mem_len >= sk->rcvbuf) {
3224 skb->sk = NULL;
3225 DPRINTF((DBG_TCP, "dropping packet due to lack of buffer space.\n"));
3226 kfree_skb(skb, FREE_READ);
3227 release_sock(sk);
3228 return(0);
3229 }
3230 sk->rmem_alloc += skb->mem_len;
3231
3232 DPRINTF((DBG_TCP, "About to do switch.\n"));
3233
3234
3235 switch(sk->state) {
3236
3237
3238
3239
3240 case TCP_LAST_ACK:
3241 if (th->rst) {
3242 sk->zapped=1;
3243 sk->err = ECONNRESET;
3244 sk->state = TCP_CLOSE;
3245 sk->shutdown = SHUTDOWN_MASK;
3246 if (!sk->dead) {
3247 sk->state_change(sk);
3248 }
3249 kfree_skb(skb, FREE_READ);
3250 release_sock(sk);
3251 return(0);
3252 }
3253
3254 case TCP_ESTABLISHED:
3255 case TCP_CLOSE_WAIT:
3256 case TCP_FIN_WAIT1:
3257 case TCP_FIN_WAIT2:
3258 case TCP_TIME_WAIT:
3259 if (!tcp_sequence(sk, th, len, opt, saddr,dev)) {
3260 if (inet_debug == DBG_SLIP) printk("\rtcp_rcv: not in seq\n");
3261 #ifdef undef
3262
3263 if(!th->rst)
3264 tcp_send_ack(sk->send_seq, sk->acked_seq,
3265 sk, th, saddr);
3266 #endif
3267 kfree_skb(skb, FREE_READ);
3268 release_sock(sk);
3269 return(0);
3270 }
3271
3272 if (th->rst) {
3273 sk->zapped=1;
3274
3275 sk->err = ECONNRESET;
3276
3277 if (sk->state == TCP_CLOSE_WAIT) {
3278 sk->err = EPIPE;
3279 }
3280
3281
3282
3283
3284
3285 sk->state = TCP_CLOSE;
3286 sk->shutdown = SHUTDOWN_MASK;
3287 if (!sk->dead) {
3288 sk->state_change(sk);
3289 }
3290 kfree_skb(skb, FREE_READ);
3291 release_sock(sk);
3292 return(0);
3293 }
3294 if (
3295 #if 0
3296 if ((opt && (opt->security != 0 ||
3297 opt->compartment != 0)) ||
3298 #endif
3299 th->syn) {
3300 sk->err = ECONNRESET;
3301 sk->state = TCP_CLOSE;
3302 sk->shutdown = SHUTDOWN_MASK;
3303 tcp_reset(daddr, saddr, th, sk->prot, opt,dev, sk->ip_tos,sk->ip_ttl);
3304 if (!sk->dead) {
3305 sk->state_change(sk);
3306 }
3307 kfree_skb(skb, FREE_READ);
3308 release_sock(sk);
3309 return(0);
3310 }
3311
3312 if (th->ack && !tcp_ack(sk, th, saddr, len)) {
3313 kfree_skb(skb, FREE_READ);
3314 release_sock(sk);
3315 return(0);
3316 }
3317
3318 if (tcp_urg(sk, th, saddr, len)) {
3319 kfree_skb(skb, FREE_READ);
3320 release_sock(sk);
3321 return(0);
3322 }
3323
3324 if (tcp_data(skb, sk, saddr, len)) {
3325 kfree_skb(skb, FREE_READ);
3326 release_sock(sk);
3327 return(0);
3328 }
3329
3330
3331 if (th->fin && tcp_fin(sk, th, saddr, dev)) {
3332 kfree_skb(skb, FREE_READ);
3333 release_sock(sk);
3334 return(0);
3335 }
3336
3337 release_sock(sk);
3338 return(0);
3339
3340 case TCP_CLOSE:
3341 if (sk->dead || sk->daddr) {
3342 DPRINTF((DBG_TCP, "packet received for closed,dead socket\n"));
3343 kfree_skb(skb, FREE_READ);
3344 release_sock(sk);
3345 return(0);
3346 }
3347
3348 if (!th->rst) {
3349 if (!th->ack)
3350 th->ack_seq = 0;
3351 tcp_reset(daddr, saddr, th, sk->prot, opt,dev,sk->ip_tos,sk->ip_ttl);
3352 }
3353 kfree_skb(skb, FREE_READ);
3354 release_sock(sk);
3355 return(0);
3356
3357 case TCP_LISTEN:
3358 if (th->rst) {
3359 kfree_skb(skb, FREE_READ);
3360 release_sock(sk);
3361 return(0);
3362 }
3363 if (th->ack) {
3364 tcp_reset(daddr, saddr, th, sk->prot, opt,dev,sk->ip_tos,sk->ip_ttl);
3365 kfree_skb(skb, FREE_READ);
3366 release_sock(sk);
3367 return(0);
3368 }
3369
3370 if (th->syn) {
3371 #if 0
3372 if (opt->security != 0 || opt->compartment != 0) {
3373 tcp_reset(daddr, saddr, th, prot, opt,dev);
3374 release_sock(sk);
3375 return(0);
3376 }
3377 #endif
3378
3379
3380
3381
3382
3383
3384
3385 tcp_conn_request(sk, skb, daddr, saddr, opt, dev);
3386 release_sock(sk);
3387 return(0);
3388 }
3389
3390 kfree_skb(skb, FREE_READ);
3391 release_sock(sk);
3392 return(0);
3393
3394 default:
3395 if (!tcp_sequence(sk, th, len, opt, saddr,dev)) {
3396 kfree_skb(skb, FREE_READ);
3397 release_sock(sk);
3398 return(0);
3399 }
3400
3401 case TCP_SYN_SENT:
3402 if (th->rst) {
3403 sk->err = ECONNREFUSED;
3404 sk->state = TCP_CLOSE;
3405 sk->shutdown = SHUTDOWN_MASK;
3406 sk->zapped = 1;
3407 if (!sk->dead) {
3408 sk->state_change(sk);
3409 }
3410 kfree_skb(skb, FREE_READ);
3411 release_sock(sk);
3412 return(0);
3413 }
3414 #if 0
3415 if (opt->security != 0 || opt->compartment != 0) {
3416 sk->err = ECONNRESET;
3417 sk->state = TCP_CLOSE;
3418 sk->shutdown = SHUTDOWN_MASK;
3419 tcp_reset(daddr, saddr, th, sk->prot, opt, dev);
3420 if (!sk->dead) {
3421 wake_up_interruptible(sk->sleep);
3422 }
3423 kfree_skb(skb, FREE_READ);
3424 release_sock(sk);
3425 return(0);
3426 }
3427 #endif
3428 if (!th->ack) {
3429 if (th->syn) {
3430 sk->state = TCP_SYN_RECV;
3431 }
3432
3433 kfree_skb(skb, FREE_READ);
3434 release_sock(sk);
3435 return(0);
3436 }
3437
3438 switch(sk->state) {
3439 case TCP_SYN_SENT:
3440 if (!tcp_ack(sk, th, saddr, len)) {
3441 tcp_reset(daddr, saddr, th,
3442 sk->prot, opt,dev,sk->ip_tos,sk->ip_ttl);
3443 kfree_skb(skb, FREE_READ);
3444 release_sock(sk);
3445 return(0);
3446 }
3447
3448
3449
3450
3451
3452 if (!th->syn) {
3453 kfree_skb(skb, FREE_READ);
3454 release_sock(sk);
3455 return(0);
3456 }
3457
3458
3459 sk->acked_seq = th->seq+1;
3460 sk->fin_seq = th->seq;
3461 tcp_send_ack(sk->send_seq, th->seq+1,
3462 sk, th, sk->daddr);
3463
3464 case TCP_SYN_RECV:
3465 if (!tcp_ack(sk, th, saddr, len)) {
3466 tcp_reset(daddr, saddr, th,
3467 sk->prot, opt, dev,sk->ip_tos,sk->ip_ttl);
3468 kfree_skb(skb, FREE_READ);
3469 release_sock(sk);
3470 return(0);
3471 }
3472 sk->state = TCP_ESTABLISHED;
3473
3474
3475
3476
3477
3478
3479 tcp_options(sk, th);
3480 sk->dummy_th.dest = th->source;
3481 sk->copied_seq = sk->acked_seq-1;
3482 if (!sk->dead) {
3483 sk->state_change(sk);
3484 }
3485
3486
3487
3488
3489
3490
3491
3492
3493
3494
3495 if (sk->max_window == 0) {
3496 sk->max_window = 32;
3497 sk->mss = min(sk->max_window, sk->mtu);
3498 }
3499
3500
3501
3502
3503
3504 if (th->urg) {
3505 if (tcp_urg(sk, th, saddr, len)) {
3506 kfree_skb(skb, FREE_READ);
3507 release_sock(sk);
3508 return(0);
3509 }
3510 }
3511 if (tcp_data(skb, sk, saddr, len))
3512 kfree_skb(skb, FREE_READ);
3513
3514 if (th->fin) tcp_fin(sk, th, saddr, dev);
3515 release_sock(sk);
3516 return(0);
3517 }
3518
3519 if (th->urg) {
3520 if (tcp_urg(sk, th, saddr, len)) {
3521 kfree_skb(skb, FREE_READ);
3522 release_sock(sk);
3523 return(0);
3524 }
3525 }
3526
3527 if (tcp_data(skb, sk, saddr, len)) {
3528 kfree_skb(skb, FREE_READ);
3529 release_sock(sk);
3530 return(0);
3531 }
3532
3533 if (!th->fin) {
3534 release_sock(sk);
3535 return(0);
3536 }
3537 tcp_fin(sk, th, saddr, dev);
3538 release_sock(sk);
3539 return(0);
3540 }
3541 }
3542
3543
3544
3545
3546
3547
3548 static void
3549 tcp_write_wakeup(struct sock *sk)
3550 {
3551 struct sk_buff *buff;
3552 struct tcphdr *t1;
3553 struct device *dev=NULL;
3554 int tmp;
3555
3556 if (sk->zapped)
3557 return;
3558
3559 if (sk -> state != TCP_ESTABLISHED && sk->state != TCP_CLOSE_WAIT) return;
3560
3561 buff = sk->prot->wmalloc(sk,MAX_ACK_SIZE,1, GFP_ATOMIC);
3562 if (buff == NULL) return;
3563
3564 buff->mem_addr = buff;
3565 buff->mem_len = MAX_ACK_SIZE;
3566 buff->len = sizeof(struct tcphdr);
3567 buff->free = 1;
3568 buff->sk = sk;
3569 DPRINTF((DBG_TCP, "in tcp_write_wakeup\n"));
3570 t1 = (struct tcphdr *) buff->data;
3571
3572
3573 tmp = sk->prot->build_header(buff, sk->saddr, sk->daddr, &dev,
3574 IPPROTO_TCP, sk->opt, MAX_ACK_SIZE,sk->ip_tos,sk->ip_ttl);
3575 if (tmp < 0) {
3576 sk->prot->wfree(sk, buff->mem_addr, buff->mem_len);
3577 return;
3578 }
3579
3580 buff->len += tmp;
3581 t1 = (struct tcphdr *)((char *)t1 +tmp);
3582
3583 memcpy(t1,(void *) &sk->dummy_th, sizeof(*t1));
3584
3585
3586
3587
3588
3589 t1->seq = ntohl(sk->send_seq-1);
3590 t1->ack = 1;
3591 t1->res1= 0;
3592 t1->res2= 0;
3593 t1->rst = 0;
3594 t1->urg = 0;
3595 t1->psh = 0;
3596 t1->fin = 0;
3597 t1->syn = 0;
3598 t1->ack_seq = ntohl(sk->acked_seq);
3599 t1->window = ntohs(tcp_select_window(sk));
3600 t1->doff = sizeof(*t1)/4;
3601 tcp_send_check(t1, sk->saddr, sk->daddr, sizeof(*t1), sk);
3602
3603
3604
3605
3606 sk->prot->queue_xmit(sk, dev, buff, 1);
3607 }
3608
3609
3610
3611
3612
3613 void
3614 tcp_send_probe0(struct sock *sk)
3615 {
3616 unsigned char *raw;
3617 struct iphdr *iph;
3618 struct sk_buff *skb2, *skb;
3619 int len, hlen, data;
3620 struct tcphdr *t1;
3621 struct device *dev;
3622
3623 if (sk->zapped)
3624 return;
3625
3626 if (sk -> state != TCP_ESTABLISHED && sk->state != TCP_CLOSE_WAIT &&
3627 sk -> state != TCP_FIN_WAIT1 && sk->state != TCP_FIN_WAIT2)
3628 return;
3629
3630 skb = sk->wfront;
3631 if (skb == NULL)
3632 return;
3633
3634 dev = skb->dev;
3635
3636 if(dev==NULL)
3637 {
3638 printk("tcp_send_probe0: NULL device bug!\n");
3639 return;
3640 }
3641 IS_SKB(skb);
3642
3643 raw = skb->data;
3644 iph = (struct iphdr *) (raw + dev->hard_header_len);
3645
3646 hlen = (iph->ihl * sizeof(unsigned long)) + dev->hard_header_len;
3647 data = skb->len - hlen - sizeof(struct tcphdr);
3648 len = hlen + sizeof(struct tcphdr) + (data ? 1 : 0);
3649
3650
3651 if ((skb2 = alloc_skb(sizeof(struct sk_buff) + len, GFP_ATOMIC)) == NULL) {
3652
3653
3654 reset_timer (sk, TIME_PROBE0, 10);
3655 return;
3656 }
3657
3658 skb2->arp = skb->arp;
3659 skb2->len = len;
3660 skb2->h.raw = (char *)(skb2->data);
3661
3662 sk->wmem_alloc += skb2->mem_len;
3663
3664
3665 memcpy(skb2->h.raw, raw, len);
3666
3667 skb2->h.raw += hlen;
3668 t1 = skb2->h.th;
3669
3670
3671 t1->ack_seq = ntohl(sk->acked_seq);
3672 t1->res1 = 0;
3673
3674
3675
3676 t1->ack = 1;
3677 t1->res2 = 0;
3678 t1->window = ntohs(tcp_select_window(sk));
3679 tcp_send_check(t1, sk->saddr, sk->daddr, len - hlen, sk);
3680
3681
3682
3683 sk->prot->queue_xmit(sk, dev, skb2, 1);
3684 sk->backoff++;
3685
3686
3687
3688
3689
3690
3691
3692 sk->rto = min(sk->rto << 1, 120*HZ);
3693 reset_timer (sk, TIME_PROBE0, sk->rto);
3694 sk->retransmits++;
3695 sk->prot->retransmits ++;
3696 }
3697
3698
3699
3700
3701
3702 int tcp_setsockopt(struct sock *sk, int level, int optname, char *optval, int optlen)
3703 {
3704 int val,err;
3705
3706 if(level!=SOL_TCP)
3707 return ip_setsockopt(sk,level,optname,optval,optlen);
3708
3709 if (optval == NULL)
3710 return(-EINVAL);
3711
3712 err=verify_area(VERIFY_READ, optval, sizeof(int));
3713 if(err)
3714 return err;
3715
3716 val = get_fs_long((unsigned long *)optval);
3717
3718 switch(optname)
3719 {
3720 case TCP_MAXSEG:
3721
3722
3723
3724
3725
3726
3727 if(val<1||val>MAX_WINDOW)
3728 return -EINVAL;
3729 sk->user_mss=val;
3730 return 0;
3731 case TCP_NODELAY:
3732 sk->nonagle=(val==0)?0:1;
3733 return 0;
3734 default:
3735 return(-ENOPROTOOPT);
3736 }
3737 }
3738
3739 int tcp_getsockopt(struct sock *sk, int level, int optname, char *optval, int *optlen)
3740 {
3741 int val,err;
3742
3743 if(level!=SOL_TCP)
3744 return ip_getsockopt(sk,level,optname,optval,optlen);
3745
3746 switch(optname)
3747 {
3748 case TCP_MAXSEG:
3749 val=sk->user_mss;
3750 break;
3751 case TCP_NODELAY:
3752 val=sk->nonagle;
3753 break;
3754 default:
3755 return(-ENOPROTOOPT);
3756 }
3757 err=verify_area(VERIFY_WRITE, optlen, sizeof(int));
3758 if(err)
3759 return err;
3760 put_fs_long(sizeof(int),(unsigned long *) optlen);
3761
3762 err=verify_area(VERIFY_WRITE, optval, sizeof(int));
3763 if(err)
3764 return err;
3765 put_fs_long(val,(unsigned long *)optval);
3766
3767 return(0);
3768 }
3769
3770
3771 struct proto tcp_prot = {
3772 sock_wmalloc,
3773 sock_rmalloc,
3774 sock_wfree,
3775 sock_rfree,
3776 sock_rspace,
3777 sock_wspace,
3778 tcp_close,
3779 tcp_read,
3780 tcp_write,
3781 tcp_sendto,
3782 tcp_recvfrom,
3783 ip_build_header,
3784 tcp_connect,
3785 tcp_accept,
3786 ip_queue_xmit,
3787 tcp_retransmit,
3788 tcp_write_wakeup,
3789 tcp_read_wakeup,
3790 tcp_rcv,
3791 tcp_select,
3792 tcp_ioctl,
3793 NULL,
3794 tcp_shutdown,
3795 tcp_setsockopt,
3796 tcp_getsockopt,
3797 128,
3798 0,
3799 {NULL,},
3800 "TCP"
3801 };