This source file includes following definitions.
- min
- print_th
- get_firstr
- diff
- tcp_select_window
- tcp_time_wait
- tcp_retransmit
- tcp_err
- tcp_readable
- tcp_select
- tcp_ioctl
- tcp_check
- tcp_send_check
- tcp_send_skb
- dequeue_partial
- enqueue_partial
- tcp_send_partial
- tcp_send_ack
- tcp_build_header
- tcp_write
- tcp_sendto
- tcp_read_wakeup
- cleanup_rbuf
- tcp_read_urg
- tcp_read
- tcp_shutdown
- tcp_recvfrom
- tcp_reset
- tcp_options
- tcp_conn_request
- tcp_close
- tcp_write_xmit
- sort_send
- tcp_ack
- tcp_data
- tcp_urg
- tcp_fin
- tcp_accept
- tcp_connect
- tcp_sequence
- tcp_rcv
- tcp_write_wakeup
- tcp_send_probe0
- tcp_setsockopt
- tcp_getsockopt
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80 #include <linux/types.h>
81 #include <linux/sched.h>
82 #include <linux/mm.h>
83 #include <linux/string.h>
84 #include <linux/socket.h>
85 #include <linux/sockios.h>
86 #include <linux/termios.h>
87 #include <linux/in.h>
88 #include <linux/fcntl.h>
89 #include "inet.h"
90 #include "dev.h"
91 #include "ip.h"
92 #include "protocol.h"
93 #include "icmp.h"
94 #include "tcp.h"
95 #include "skbuff.h"
96 #include "sock.h"
97 #include "arp.h"
98 #include <linux/errno.h>
99 #include <linux/timer.h>
100 #include <asm/system.h>
101 #include <asm/segment.h>
102 #include <linux/mm.h>
103
104 #define SEQ_TICK 3
105 unsigned long seq_offset;
106
107 static __inline__ int
108 min(unsigned int a, unsigned int b)
109 {
110 if (a < b) return(a);
111 return(b);
112 }
113
114
115 void
116 print_th(struct tcphdr *th)
117 {
118 unsigned char *ptr;
119
120 if (inet_debug != DBG_TCP) return;
121
122 printk("TCP header:\n");
123 ptr =(unsigned char *)(th + 1);
124 printk(" source=%d, dest=%d, seq =%ld, ack_seq = %ld\n",
125 ntohs(th->source), ntohs(th->dest),
126 ntohl(th->seq), ntohl(th->ack_seq));
127 printk(" fin=%d, syn=%d, rst=%d, psh=%d, ack=%d, urg=%d res1=%d res2=%d\n",
128 th->fin, th->syn, th->rst, th->psh, th->ack,
129 th->urg, th->res1, th->res2);
130 printk(" window = %d, check = %d urg_ptr = %d\n",
131 ntohs(th->window), ntohs(th->check), ntohs(th->urg_ptr));
132 printk(" doff = %d\n", th->doff);
133 printk(" options = %d %d %d %d\n", ptr[0], ptr[1], ptr[2], ptr[3]);
134 }
135
136
137
138
139 static struct sk_buff *
140 get_firstr(struct sock *sk)
141 {
142 return skb_dequeue(&sk->rqueue);
143 }
144
145
146
147
148
149 static long
150 diff(unsigned long seq1, unsigned long seq2)
151 {
152 long d;
153
154 d = seq1 - seq2;
155 if (d > 0) return(d);
156
157
158 return(~d+1);
159 }
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176 static int tcp_select_window(struct sock *sk)
177 {
178 int new_window = sk->prot->rspace(sk);
179
180
181 if(new_window<sk->window)
182 return(sk->window);
183
184 return(new_window);
185 }
186
187
188
189 static void tcp_time_wait(struct sock *sk)
190 {
191 sk->state = TCP_TIME_WAIT;
192 sk->shutdown = SHUTDOWN_MASK;
193 if (!sk->dead)
194 sk->state_change(sk);
195 reset_timer(sk, TIME_CLOSE, TCP_TIMEWAIT_LEN);
196 }
197
198
199
200
201
202
203
204
205 static void
206 tcp_retransmit(struct sock *sk, int all)
207 {
208 if (all) {
209 ip_retransmit(sk, all);
210 return;
211 }
212
213
214
215
216
217
218
219
220
221 sk->cong_window = 1;
222 sk->exp_growth = 0;
223
224
225 ip_retransmit(sk, all);
226 }
227
228
229
230
231
232
233
234
235
236
237 void
238 tcp_err(int err, unsigned char *header, unsigned long daddr,
239 unsigned long saddr, struct inet_protocol *protocol)
240 {
241 struct tcphdr *th;
242 struct sock *sk;
243 struct iphdr *iph=(struct iphdr *)header;
244
245 header+=4*iph->ihl;
246
247 DPRINTF((DBG_TCP, "TCP: tcp_err(%d, hdr=%X, daddr=%X saddr=%X, protocol=%X)\n",
248 err, header, daddr, saddr, protocol));
249
250 th =(struct tcphdr *)header;
251 sk = get_sock(&tcp_prot, th->source, daddr, th->dest, saddr);
252 print_th(th);
253
254 if (sk == NULL) return;
255
256 if(err<0)
257 {
258 sk->err = -err;
259 sk->error_report(sk);
260 return;
261 }
262
263 if ((err & 0xff00) == (ICMP_SOURCE_QUENCH << 8)) {
264
265
266
267
268
269 if (sk->cong_window > 4) sk->cong_window--;
270 return;
271 }
272
273 DPRINTF((DBG_TCP, "TCP: icmp_err got error\n"));
274 sk->err = icmp_err_convert[err & 0xff].errno;
275
276
277
278
279
280 if (icmp_err_convert[err & 0xff].fatal) {
281 if (sk->state == TCP_SYN_SENT) {
282 sk->state = TCP_CLOSE;
283 sk->error_report(sk);
284 }
285 }
286 return;
287 }
288
289
290
291
292
293
294
295 static int
296 tcp_readable(struct sock *sk)
297 {
298 unsigned long counted;
299 unsigned long amount;
300 struct sk_buff *skb;
301 int count=0;
302 int sum;
303 unsigned long flags;
304
305 DPRINTF((DBG_TCP, "tcp_readable(sk=%X)\n", sk));
306 if(sk && sk->debug)
307 printk("tcp_readable: %p - ",sk);
308
309 if (sk == NULL || skb_peek(&sk->rqueue) == NULL)
310 {
311 if(sk && sk->debug)
312 printk("empty\n");
313 return(0);
314 }
315
316 counted = sk->copied_seq+1;
317 amount = 0;
318
319 save_flags(flags);
320 cli();
321 skb =(struct sk_buff *)sk->rqueue;
322
323
324 do {
325 count++;
326 #ifdef OLD
327
328 if (count > 20) {
329 restore_flags(flags);
330 DPRINTF((DBG_TCP, "tcp_readable, more than 20 packets without a psh\n"));
331 printk("tcp_read: possible read_queue corruption.\n");
332 return(amount);
333 }
334 #endif
335 if (before(counted, skb->h.th->seq))
336 break;
337 sum = skb->len -(counted - skb->h.th->seq);
338 if (skb->h.th->syn) sum++;
339 if (skb->h.th->urg) {
340 sum -= ntohs(skb->h.th->urg_ptr);
341 }
342 if (sum >= 0) {
343 amount += sum;
344 if (skb->h.th->syn) amount--;
345 counted += sum;
346 }
347 if (amount && skb->h.th->psh) break;
348 skb =(struct sk_buff *)skb->next;
349 } while(skb != sk->rqueue);
350 restore_flags(flags);
351 DPRINTF((DBG_TCP, "tcp readable returning %d bytes\n", amount));
352 if(sk->debug)
353 printk("got %lu bytes.\n",amount);
354 return(amount);
355 }
356
357
358
359
360
361
362
363 static int
364 tcp_select(struct sock *sk, int sel_type, select_table *wait)
365 {
366 DPRINTF((DBG_TCP, "tcp_select(sk=%X, sel_type = %d, wait = %X)\n",
367 sk, sel_type, wait));
368
369 sk->inuse = 1;
370 switch(sel_type) {
371 case SEL_IN:
372 if(sk->debug)
373 printk("select in");
374 select_wait(sk->sleep, wait);
375 if(sk->debug)
376 printk("-select out");
377 if (skb_peek(&sk->rqueue) != NULL) {
378 if (sk->state == TCP_LISTEN || tcp_readable(sk)) {
379 release_sock(sk);
380 if(sk->debug)
381 printk("-select ok data\n");
382 return(1);
383 }
384 }
385 if (sk->err != 0)
386 {
387 release_sock(sk);
388 if(sk->debug)
389 printk("-select ok error");
390 return(1);
391 }
392 if (sk->shutdown & RCV_SHUTDOWN) {
393 release_sock(sk);
394 if(sk->debug)
395 printk("-select ok down\n");
396 return(1);
397 } else {
398 release_sock(sk);
399 if(sk->debug)
400 printk("-select fail\n");
401 return(0);
402 }
403 case SEL_OUT:
404 select_wait(sk->sleep, wait);
405 if (sk->shutdown & SEND_SHUTDOWN) {
406 DPRINTF((DBG_TCP,
407 "write select on shutdown socket.\n"));
408
409
410 release_sock(sk);
411 return(0);
412 }
413
414
415
416
417
418
419 if (sk->prot->wspace(sk) >= sk->mtu) {
420 release_sock(sk);
421
422 if (sk->state == TCP_SYN_RECV ||
423 sk->state == TCP_SYN_SENT) return(0);
424 return(1);
425 }
426 DPRINTF((DBG_TCP,
427 "tcp_select: sleeping on write sk->wmem_alloc = %d, "
428 "sk->packets_out = %d\n"
429 "sk->wback = %X, sk->wfront = %X\n"
430 "sk->send_seq = %u, sk->window_seq=%u\n",
431 sk->wmem_alloc, sk->packets_out,
432 sk->wback, sk->wfront,
433 sk->send_seq, sk->window_seq));
434
435 release_sock(sk);
436 return(0);
437 case SEL_EX:
438 select_wait(sk->sleep,wait);
439 if (sk->err) {
440 release_sock(sk);
441 return(1);
442 }
443 release_sock(sk);
444 return(0);
445 }
446
447 release_sock(sk);
448 return(0);
449 }
450
451
452 int
453 tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
454 {
455 int err;
456 DPRINTF((DBG_TCP, "tcp_ioctl(sk=%X, cmd = %d, arg=%X)\n", sk, cmd, arg));
457 switch(cmd) {
458 case DDIOCSDBG:
459 return(dbg_ioctl((void *) arg, DBG_TCP));
460
461 case TIOCINQ:
462 #ifdef FIXME
463 case FIONREAD:
464 #endif
465 {
466 unsigned long amount;
467
468 if (sk->state == TCP_LISTEN) return(-EINVAL);
469
470 sk->inuse = 1;
471 amount = tcp_readable(sk);
472 release_sock(sk);
473 DPRINTF((DBG_TCP, "returning %d\n", amount));
474 err=verify_area(VERIFY_WRITE,(void *)arg,
475 sizeof(unsigned long));
476 if(err)
477 return err;
478 put_fs_long(amount,(unsigned long *)arg);
479 return(0);
480 }
481 case SIOCATMARK:
482 {
483 struct sk_buff *skb;
484 int answ = 0;
485
486
487
488
489
490 sk->inuse = 1;
491 if ((skb=skb_peek(&sk->rqueue)) != NULL)
492 {
493 if (sk->copied_seq+1 == skb->h.th->seq && skb->h.th->urg)
494 answ = 1;
495 }
496 release_sock(sk);
497 err=verify_area(VERIFY_WRITE,(void *) arg,
498 sizeof(unsigned long));
499 if(err)
500 return err;
501 put_fs_long(answ,(int *) arg);
502 return(0);
503 }
504 case TIOCOUTQ:
505 {
506 unsigned long amount;
507
508 if (sk->state == TCP_LISTEN) return(-EINVAL);
509 amount = sk->prot->wspace(sk);
510 err=verify_area(VERIFY_WRITE,(void *)arg,
511 sizeof(unsigned long));
512 if(err)
513 return err;
514 put_fs_long(amount,(unsigned long *)arg);
515 return(0);
516 }
517 default:
518 return(-EINVAL);
519 }
520 }
521
522
523
524 unsigned short
525 tcp_check(struct tcphdr *th, int len,
526 unsigned long saddr, unsigned long daddr)
527 {
528 unsigned long sum;
529
530 if (saddr == 0) saddr = my_addr();
531 print_th(th);
532 __asm__("\t addl %%ecx,%%ebx\n"
533 "\t adcl %%edx,%%ebx\n"
534 "\t adcl $0, %%ebx\n"
535 : "=b"(sum)
536 : "0"(daddr), "c"(saddr), "d"((ntohs(len) << 16) + IPPROTO_TCP*256)
537 : "cx","bx","dx" );
538
539 if (len > 3) {
540 __asm__("\tclc\n"
541 "1:\n"
542 "\t lodsl\n"
543 "\t adcl %%eax, %%ebx\n"
544 "\t loop 1b\n"
545 "\t adcl $0, %%ebx\n"
546 : "=b"(sum) , "=S"(th)
547 : "0"(sum), "c"(len/4) ,"1"(th)
548 : "ax", "cx", "bx", "si" );
549 }
550
551
552 __asm__("\t movl %%ebx, %%ecx\n"
553 "\t shrl $16,%%ecx\n"
554 "\t addw %%cx, %%bx\n"
555 "\t adcw $0, %%bx\n"
556 : "=b"(sum)
557 : "0"(sum)
558 : "bx", "cx");
559
560
561 if ((len & 2) != 0) {
562 __asm__("\t lodsw\n"
563 "\t addw %%ax,%%bx\n"
564 "\t adcw $0, %%bx\n"
565 : "=b"(sum), "=S"(th)
566 : "0"(sum) ,"1"(th)
567 : "si", "ax", "bx");
568 }
569
570
571 if ((len & 1) != 0) {
572 __asm__("\t lodsb\n"
573 "\t movb $0,%%ah\n"
574 "\t addw %%ax,%%bx\n"
575 "\t adcw $0, %%bx\n"
576 : "=b"(sum)
577 : "0"(sum) ,"S"(th)
578 : "si", "ax", "bx");
579 }
580
581
582 return((~sum) & 0xffff);
583 }
584
585
586 void tcp_send_check(struct tcphdr *th, unsigned long saddr,
587 unsigned long daddr, int len, struct sock *sk)
588 {
589 th->check = 0;
590 th->check = tcp_check(th, len, saddr, daddr);
591 return;
592 }
593
594 static void tcp_send_skb(struct sock *sk, struct sk_buff *skb)
595 {
596 int size;
597
598
599 size = skb->len - ((unsigned char *) skb->h.th - skb->data);
600
601
602 if (size < sizeof(struct tcphdr) || size > skb->len) {
603 printk("tcp_send_skb: bad skb (skb = %p, data = %p, th = %p, len = %lu)\n",
604 skb, skb->data, skb->h.th, skb->len);
605 kfree_skb(skb, FREE_WRITE);
606 return;
607 }
608
609
610 if (size == sizeof(struct tcphdr)) {
611
612 if(!skb->h.th->syn && !skb->h.th->fin) {
613 printk("tcp_send_skb: attempt to queue a bogon.\n");
614 kfree_skb(skb,FREE_WRITE);
615 return;
616 }
617 }
618
619
620 tcp_send_check(skb->h.th, sk->saddr, sk->daddr, size, sk);
621
622 skb->h.seq = sk->send_seq;
623 if (after(sk->send_seq , sk->window_seq) ||
624 (sk->retransmits && sk->timeout == TIME_WRITE) ||
625 sk->packets_out >= sk->cong_window) {
626 DPRINTF((DBG_TCP, "sk->cong_window = %d, sk->packets_out = %d\n",
627 sk->cong_window, sk->packets_out));
628 DPRINTF((DBG_TCP, "sk->send_seq = %d, sk->window_seq = %d\n",
629 sk->send_seq, sk->window_seq));
630 skb->next = NULL;
631 skb->magic = TCP_WRITE_QUEUE_MAGIC;
632 if (sk->wback == NULL) {
633 sk->wfront=skb;
634 } else {
635 sk->wback->next = skb;
636 }
637 sk->wback = skb;
638 if (before(sk->window_seq, sk->wfront->h.seq) &&
639 sk->send_head == NULL &&
640 sk->ack_backlog == 0)
641 reset_timer(sk, TIME_PROBE0,
642 backoff(sk->backoff) * (2 * sk->mdev + sk->rtt));
643 } else {
644 sk->prot->queue_xmit(sk, skb->dev, skb, 0);
645 }
646 }
647
648 static struct sk_buff * dequeue_partial(struct sock * sk)
649 {
650 struct sk_buff * skb;
651 unsigned long flags;
652
653 save_flags(flags);
654 cli();
655 skb = sk->send_tmp;
656 if (skb) {
657 sk->send_tmp = skb->next;
658 skb->next = NULL;
659 }
660 restore_flags(flags);
661 return skb;
662 }
663
664 static void enqueue_partial(struct sk_buff * skb, struct sock * sk)
665 {
666 struct sk_buff * tmp;
667 unsigned long flags;
668
669 skb->next = NULL;
670 save_flags(flags);
671 cli();
672 tmp = sk->send_tmp;
673 sk->send_tmp = skb;
674 restore_flags(flags);
675 if (tmp)
676 tcp_send_skb(sk, tmp);
677 }
678
679 static void tcp_send_partial(struct sock *sk)
680 {
681 struct sk_buff *skb;
682
683 if (sk == NULL)
684 return;
685 while ((skb = dequeue_partial(sk)) != NULL)
686 tcp_send_skb(sk, skb);
687 }
688
689
690
691 static void
692 tcp_send_ack(unsigned long sequence, unsigned long ack,
693 struct sock *sk,
694 struct tcphdr *th, unsigned long daddr)
695 {
696 struct sk_buff *buff;
697 struct tcphdr *t1;
698 struct device *dev = NULL;
699 int tmp;
700
701 if(sk->zapped)
702 return;
703
704
705
706
707 buff = sk->prot->wmalloc(sk, MAX_ACK_SIZE, 1, GFP_ATOMIC);
708 if (buff == NULL) {
709
710 sk->ack_backlog++;
711 if (sk->timeout != TIME_WRITE && tcp_connected(sk->state)) {
712 reset_timer(sk, TIME_WRITE, 10);
713 }
714 if (inet_debug == DBG_SLIP) printk("\rtcp_ack: malloc failed\n");
715 return;
716 }
717
718 buff->mem_addr = buff;
719 buff->mem_len = MAX_ACK_SIZE;
720 buff->len = sizeof(struct tcphdr);
721 buff->sk = sk;
722 t1 =(struct tcphdr *) buff->data;
723
724
725 tmp = sk->prot->build_header(buff, sk->saddr, daddr, &dev,
726 IPPROTO_TCP, sk->opt, MAX_ACK_SIZE,sk->ip_tos,sk->ip_ttl);
727 if (tmp < 0) {
728 buff->free=1;
729 sk->prot->wfree(sk, buff->mem_addr, buff->mem_len);
730 if (inet_debug == DBG_SLIP) printk("\rtcp_ack: build_header failed\n");
731 return;
732 }
733 buff->len += tmp;
734 t1 =(struct tcphdr *)((char *)t1 +tmp);
735
736
737 memcpy(t1, th, sizeof(*t1));
738
739
740 t1->dest = th->source;
741 t1->source = th->dest;
742 t1->seq = ntohl(sequence);
743 t1->ack = 1;
744 sk->window = tcp_select_window(sk);
745 t1->window = ntohs(sk->window);
746 t1->res1 = 0;
747 t1->res2 = 0;
748 t1->rst = 0;
749 t1->urg = 0;
750 t1->syn = 0;
751 t1->psh = 0;
752 t1->fin = 0;
753 if (ack == sk->acked_seq) {
754 sk->ack_backlog = 0;
755 sk->bytes_rcv = 0;
756 sk->ack_timed = 0;
757 if (sk->send_head == NULL && sk->wfront == NULL && sk->timeout == TIME_WRITE)
758 {
759 if(sk->keepopen)
760 reset_timer(sk,TIME_KEEPOPEN,TCP_TIMEOUT_LEN);
761 else
762 delete_timer(sk);
763 }
764 }
765 t1->ack_seq = ntohl(ack);
766 t1->doff = sizeof(*t1)/4;
767 tcp_send_check(t1, sk->saddr, daddr, sizeof(*t1), sk);
768 if (sk->debug)
769 printk("\rtcp_ack: seq %lx ack %lx\n", sequence, ack);
770 sk->prot->queue_xmit(sk, dev, buff, 1);
771 }
772
773
774
775 static int
776 tcp_build_header(struct tcphdr *th, struct sock *sk, int push)
777 {
778
779
780 memcpy(th,(void *) &(sk->dummy_th), sizeof(*th));
781 th->seq = htonl(sk->send_seq);
782 th->psh =(push == 0) ? 1 : 0;
783 th->doff = sizeof(*th)/4;
784 th->ack = 1;
785 th->fin = 0;
786 sk->ack_backlog = 0;
787 sk->bytes_rcv = 0;
788 sk->ack_timed = 0;
789 th->ack_seq = htonl(sk->acked_seq);
790 sk->window = tcp_select_window(sk);
791 th->window = htons(sk->window);
792
793 return(sizeof(*th));
794 }
795
796
797
798
799
800 static int
801 tcp_write(struct sock *sk, unsigned char *from,
802 int len, int nonblock, unsigned flags)
803 {
804 int copied = 0;
805 int copy;
806 int tmp;
807 struct sk_buff *skb;
808 struct sk_buff *send_tmp;
809 unsigned char *buff;
810 struct proto *prot;
811 struct device *dev = NULL;
812
813 DPRINTF((DBG_TCP, "tcp_write(sk=%X, from=%X, len=%d, nonblock=%d, flags=%X)\n",
814 sk, from, len, nonblock, flags));
815
816 sk->inuse=1;
817 prot = sk->prot;
818 while(len > 0) {
819 if (sk->err) {
820 release_sock(sk);
821 if (copied) return(copied);
822 tmp = -sk->err;
823 sk->err = 0;
824 return(tmp);
825 }
826
827
828 if (sk->shutdown & SEND_SHUTDOWN) {
829 release_sock(sk);
830 sk->err = EPIPE;
831 if (copied) return(copied);
832 sk->err = 0;
833 return(-EPIPE);
834 }
835
836
837
838
839 while(sk->state != TCP_ESTABLISHED && sk->state != TCP_CLOSE_WAIT) {
840 if (sk->err) {
841 release_sock(sk);
842 if (copied) return(copied);
843 tmp = -sk->err;
844 sk->err = 0;
845 return(tmp);
846 }
847
848 if (sk->state != TCP_SYN_SENT && sk->state != TCP_SYN_RECV) {
849 release_sock(sk);
850 DPRINTF((DBG_TCP, "tcp_write: return 1\n"));
851 if (copied) return(copied);
852
853 if (sk->err) {
854 tmp = -sk->err;
855 sk->err = 0;
856 return(tmp);
857 }
858
859 if (sk->keepopen) {
860 send_sig(SIGPIPE, current, 0);
861 }
862 return(-EPIPE);
863 }
864
865 if (nonblock || copied) {
866 release_sock(sk);
867 DPRINTF((DBG_TCP, "tcp_write: return 2\n"));
868 if (copied) return(copied);
869 return(-EAGAIN);
870 }
871
872 release_sock(sk);
873 cli();
874 if (sk->state != TCP_ESTABLISHED &&
875 sk->state != TCP_CLOSE_WAIT && sk->err == 0) {
876 interruptible_sleep_on(sk->sleep);
877 if (current->signal & ~current->blocked) {
878 sti();
879 DPRINTF((DBG_TCP, "tcp_write: return 3\n"));
880 if (copied) return(copied);
881 return(-ERESTARTSYS);
882 }
883 }
884 sk->inuse = 1;
885 sti();
886 }
887
888
889 if ((skb = dequeue_partial(sk)) != NULL) {
890 int hdrlen;
891
892
893 hdrlen = ((unsigned long)skb->h.th - (unsigned long)skb->data)
894 + sizeof(struct tcphdr);
895
896
897
898
899 if (!(flags & MSG_OOB)) {
900 copy = min(sk->mtu - (skb->len - hdrlen), len);
901
902 if (copy <= 0) {
903 printk("TCP: **bug**: \"copy\" <= 0!!\n");
904 copy = 0;
905 }
906
907 memcpy_fromfs(skb->data + skb->len, from, copy);
908 skb->len += copy;
909 from += copy;
910 copied += copy;
911 len -= copy;
912 sk->send_seq += copy;
913 }
914 enqueue_partial(skb, sk);
915 if ((skb->len - hdrlen) >= sk->mtu || (flags & MSG_OOB)) {
916 tcp_send_partial(sk);
917 }
918 continue;
919 }
920
921 #if 0
922
923
924
925
926
927
928
929
930
931 copy = diff(sk->window_seq, sk->send_seq);
932 if (copy < (diff(sk->window_seq, sk->rcv_ack_seq) >> 2))
933 copy = sk->mtu;
934 copy = min(copy, sk->mtu);
935 copy = min(copy, len);
936 #else
937
938 copy = min(sk->mtu, len);
939 #endif
940
941
942 if (sk->packets_out && copy < sk->mtu && !(flags & MSG_OOB)) {
943
944 release_sock(sk);
945 skb = prot->wmalloc(sk, sk->mtu + 128 + prot->max_header + sizeof(*skb), 0, GFP_KERNEL);
946 sk->inuse = 1;
947 send_tmp = skb;
948 } else {
949
950 release_sock(sk);
951 skb = prot->wmalloc(sk, copy + prot->max_header + sizeof(*skb), 0, GFP_KERNEL);
952 sk->inuse = 1;
953 send_tmp = NULL;
954 }
955
956
957 if (skb == NULL) {
958 if (nonblock ) {
959 release_sock(sk);
960 DPRINTF((DBG_TCP, "tcp_write: return 4\n"));
961 if (copied) return(copied);
962 return(-EAGAIN);
963 }
964
965
966 tmp = sk->wmem_alloc;
967 release_sock(sk);
968 cli();
969
970 if (tmp <= sk->wmem_alloc &&
971 (sk->state == TCP_ESTABLISHED||sk->state == TCP_CLOSE_WAIT)
972 && sk->err == 0) {
973 interruptible_sleep_on(sk->sleep);
974 if (current->signal & ~current->blocked) {
975 sti();
976 DPRINTF((DBG_TCP, "tcp_write: return 5\n"));
977 if (copied) return(copied);
978 return(-ERESTARTSYS);
979 }
980 }
981 sk->inuse = 1;
982 sti();
983 continue;
984 }
985
986 skb->len = 0;
987 skb->sk = sk;
988 skb->free = 0;
989
990 buff = skb->data;
991
992
993
994
995
996 tmp = prot->build_header(skb, sk->saddr, sk->daddr, &dev,
997 IPPROTO_TCP, sk->opt, skb->mem_len,sk->ip_tos,sk->ip_ttl);
998 if (tmp < 0 ) {
999 prot->wfree(sk, skb->mem_addr, skb->mem_len);
1000 release_sock(sk);
1001 DPRINTF((DBG_TCP, "tcp_write: return 6\n"));
1002 if (copied) return(copied);
1003 return(tmp);
1004 }
1005 skb->len += tmp;
1006 skb->dev = dev;
1007 buff += tmp;
1008 skb->h.th =(struct tcphdr *) buff;
1009 tmp = tcp_build_header((struct tcphdr *)buff, sk, len-copy);
1010 if (tmp < 0) {
1011 prot->wfree(sk, skb->mem_addr, skb->mem_len);
1012 release_sock(sk);
1013 DPRINTF((DBG_TCP, "tcp_write: return 7\n"));
1014 if (copied) return(copied);
1015 return(tmp);
1016 }
1017
1018 if (flags & MSG_OOB) {
1019 ((struct tcphdr *)buff)->urg = 1;
1020 ((struct tcphdr *)buff)->urg_ptr = ntohs(copy);
1021 }
1022 skb->len += tmp;
1023 memcpy_fromfs(buff+tmp, from, copy);
1024
1025 from += copy;
1026 copied += copy;
1027 len -= copy;
1028 skb->len += copy;
1029 skb->free = 0;
1030 sk->send_seq += copy;
1031
1032 if (send_tmp != NULL) {
1033 enqueue_partial(send_tmp, sk);
1034 continue;
1035 }
1036 tcp_send_skb(sk, skb);
1037 }
1038 sk->err = 0;
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048 if(sk->send_tmp &&
1049 ((!sk->packets_out)
1050
1051 || (sk->nonagle && before(sk->send_seq , sk->window_seq))
1052 ))
1053 tcp_send_partial(sk);
1054
1055 release_sock(sk);
1056 DPRINTF((DBG_TCP, "tcp_write: return 8\n"));
1057 return(copied);
1058 }
1059
1060
1061 static int
1062 tcp_sendto(struct sock *sk, unsigned char *from,
1063 int len, int nonblock, unsigned flags,
1064 struct sockaddr_in *addr, int addr_len)
1065 {
1066 struct sockaddr_in sin;
1067
1068 if (addr_len < sizeof(sin)) return(-EINVAL);
1069 memcpy_fromfs(&sin, addr, sizeof(sin));
1070 if (sin.sin_family && sin.sin_family != AF_INET) return(-EINVAL);
1071 if (sin.sin_port != sk->dummy_th.dest) return(-EINVAL);
1072 if (sin.sin_addr.s_addr != sk->daddr) return(-EINVAL);
1073 return(tcp_write(sk, from, len, nonblock, flags));
1074 }
1075
1076
1077 static void
1078 tcp_read_wakeup(struct sock *sk)
1079 {
1080 int tmp;
1081 struct device *dev = NULL;
1082 struct tcphdr *t1;
1083 struct sk_buff *buff;
1084
1085 DPRINTF((DBG_TCP, "in tcp read wakeup\n"));
1086 if (!sk->ack_backlog) return;
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098 buff = sk->prot->wmalloc(sk,MAX_ACK_SIZE,1, GFP_ATOMIC);
1099 if (buff == NULL) {
1100
1101 reset_timer(sk, TIME_WRITE, 10);
1102 return;
1103 }
1104
1105 buff->mem_addr = buff;
1106 buff->mem_len = MAX_ACK_SIZE;
1107 buff->len = sizeof(struct tcphdr);
1108 buff->sk = sk;
1109
1110
1111 tmp = sk->prot->build_header(buff, sk->saddr, sk->daddr, &dev,
1112 IPPROTO_TCP, sk->opt, MAX_ACK_SIZE,sk->ip_tos,sk->ip_ttl);
1113 if (tmp < 0) {
1114 buff->free=1;
1115 sk->prot->wfree(sk, buff->mem_addr, buff->mem_len);
1116 return;
1117 }
1118
1119 buff->len += tmp;
1120 t1 =(struct tcphdr *)(buff->data +tmp);
1121
1122 memcpy(t1,(void *) &sk->dummy_th, sizeof(*t1));
1123 t1->seq = ntohl(sk->send_seq);
1124 t1->ack = 1;
1125 t1->res1 = 0;
1126 t1->res2 = 0;
1127 t1->rst = 0;
1128 t1->urg = 0;
1129 t1->syn = 0;
1130 t1->psh = 0;
1131 sk->ack_backlog = 0;
1132 sk->bytes_rcv = 0;
1133 sk->window = tcp_select_window(sk);
1134 t1->window = ntohs(sk->window);
1135 t1->ack_seq = ntohl(sk->acked_seq);
1136 t1->doff = sizeof(*t1)/4;
1137 tcp_send_check(t1, sk->saddr, sk->daddr, sizeof(*t1), sk);
1138 sk->prot->queue_xmit(sk, dev, buff, 1);
1139 }
1140
1141
1142
1143
1144
1145
1146
1147
1148 static void
1149 cleanup_rbuf(struct sock *sk)
1150 {
1151 unsigned long flags;
1152 int left;
1153 struct sk_buff *skb;
1154
1155 if(sk->debug)
1156 printk("cleaning rbuf for sk=%p\n", sk);
1157
1158 save_flags(flags);
1159 cli();
1160
1161 left = sk->prot->rspace(sk);
1162
1163
1164
1165
1166
1167 while((skb=skb_peek(&sk->rqueue)) != NULL )
1168 {
1169 if (!skb->used)
1170 break;
1171 skb_unlink(skb);
1172 skb->sk = sk;
1173 kfree_skb(skb, FREE_READ);
1174 }
1175
1176 restore_flags(flags);
1177
1178
1179
1180
1181
1182
1183
1184 DPRINTF((DBG_TCP, "sk->window left = %d, sk->prot->rspace(sk)=%d\n",
1185 sk->window - sk->bytes_rcv, sk->prot->rspace(sk)));
1186
1187 if(sk->debug)
1188 printk("sk->rspace = %lu, was %d\n", sk->prot->rspace(sk),
1189 left);
1190 if (sk->prot->rspace(sk) != left)
1191 {
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202 sk->ack_backlog++;
1203 if ((sk->prot->rspace(sk) > (sk->window - sk->bytes_rcv + sk->mtu))) {
1204
1205 tcp_read_wakeup(sk);
1206 } else {
1207
1208 int was_active = del_timer(&sk->timer);
1209 if (!was_active || TCP_ACK_TIME < sk->timer.expires) {
1210 reset_timer(sk, TIME_WRITE, TCP_ACK_TIME);
1211 } else
1212 add_timer(&sk->timer);
1213 }
1214 }
1215 }
1216
1217
1218
1219 static int
1220 tcp_read_urg(struct sock * sk, int nonblock,
1221 unsigned char *to, int len, unsigned flags)
1222 {
1223 int copied = 0;
1224 struct sk_buff *skb;
1225
1226 DPRINTF((DBG_TCP, "tcp_read_urg(sk=%X, to=%X, len=%d, flags=%X)\n",
1227 sk, to, len, flags));
1228
1229 while(len > 0)
1230 {
1231 sk->inuse = 1;
1232 while(sk->urg==0 || skb_peek(&sk->rqueue) == NULL) {
1233 if (sk->err) {
1234 int tmp;
1235
1236 release_sock(sk);
1237 if (copied) return(copied);
1238 tmp = -sk->err;
1239 sk->err = 0;
1240 return(tmp);
1241 }
1242
1243 if (sk->state == TCP_CLOSE || sk->done) {
1244 release_sock(sk);
1245 if (copied) return(copied);
1246 if (!sk->done) {
1247 sk->done = 1;
1248 return(0);
1249 }
1250 return(-ENOTCONN);
1251 }
1252
1253 if (sk->shutdown & RCV_SHUTDOWN) {
1254 release_sock(sk);
1255 if (copied == 0)
1256 sk->done = 1;
1257 return(copied);
1258 }
1259
1260 if (nonblock || copied) {
1261 release_sock(sk);
1262 if (copied) return(copied);
1263 return(-EAGAIN);
1264 }
1265
1266
1267 release_sock(sk);
1268 cli();
1269 if ((sk->urg == 0 || skb_peek(&sk->rqueue) == NULL) &&
1270 sk->err == 0 && !(sk->shutdown & RCV_SHUTDOWN)) {
1271 interruptible_sleep_on(sk->sleep);
1272 if (current->signal & ~current->blocked) {
1273 sti();
1274 if (copied) return(copied);
1275 return(-ERESTARTSYS);
1276 }
1277 }
1278 sk->inuse = 1;
1279 sti();
1280 }
1281
1282 skb = skb_peek(&sk->rqueue);
1283 do {
1284 int amt;
1285
1286 if (skb->h.th->urg && !skb->urg_used) {
1287 if (skb->h.th->urg_ptr == 0) {
1288 skb->h.th->urg_ptr = ntohs(skb->len);
1289 }
1290 amt = min(ntohs(skb->h.th->urg_ptr),len);
1291 if(amt)
1292 {
1293 memcpy_tofs(to,(unsigned char *)(skb->h.th) +
1294 skb->h.th->doff*4, amt);
1295 }
1296
1297 if (!(flags & MSG_PEEK)) {
1298 skb->urg_used = 1;
1299 sk->urg--;
1300 }
1301 release_sock(sk);
1302 copied += amt;
1303 return(copied);
1304 }
1305 skb =(struct sk_buff *)skb->next;
1306 } while(skb != sk->rqueue);
1307 }
1308
1309 release_sock(sk);
1310 return(0);
1311 }
1312
1313
1314
1315 static int
1316 tcp_read(struct sock *sk, unsigned char *to,
1317 int len, int nonblock, unsigned flags)
1318 {
1319 int copied=0;
1320 struct sk_buff *skb;
1321 unsigned long offset;
1322 unsigned long used;
1323 int err;
1324
1325 if (len == 0) return(0);
1326 if (len < 0) {
1327 return(-EINVAL);
1328 }
1329
1330 err=verify_area(VERIFY_WRITE,to,len);
1331 if(err)
1332 return err;
1333
1334
1335 if (sk->state == TCP_LISTEN) return(-ENOTCONN);
1336
1337
1338 if ((flags & MSG_OOB))
1339 return(tcp_read_urg(sk, nonblock, to, len, flags));
1340
1341
1342 sk->inuse = 1;
1343
1344 skb=skb_peek(&sk->rqueue);
1345
1346 DPRINTF((DBG_TCP, "tcp_read(sk=%X, to=%X, len=%d, nonblock=%d, flags=%X)\n",
1347 sk, to, len, nonblock, flags));
1348
1349 while(len > 0) {
1350
1351
1352
1353 while(skb == NULL ||
1354 before(sk->copied_seq+1, skb->h.th->seq) || skb->used) {
1355 DPRINTF((DBG_TCP, "skb = %X:\n", skb));
1356 cleanup_rbuf(sk);
1357 if (sk->err)
1358 {
1359 int tmp;
1360
1361 release_sock(sk);
1362 if (copied)
1363 {
1364 DPRINTF((DBG_TCP, "tcp_read: returning %d\n",
1365 copied));
1366 return(copied);
1367 }
1368 tmp = -sk->err;
1369 sk->err = 0;
1370 return(tmp);
1371 }
1372
1373 if (sk->state == TCP_CLOSE)
1374 {
1375 release_sock(sk);
1376 if (copied) {
1377 DPRINTF((DBG_TCP, "tcp_read: returning %d\n",
1378 copied));
1379 return(copied);
1380 }
1381 if (!sk->done) {
1382 sk->done = 1;
1383 return(0);
1384 }
1385 return(-ENOTCONN);
1386 }
1387
1388 if (sk->shutdown & RCV_SHUTDOWN)
1389 {
1390 release_sock(sk);
1391 if (copied == 0) sk->done = 1;
1392 DPRINTF((DBG_TCP, "tcp_read: returning %d\n", copied));
1393 return(copied);
1394 }
1395
1396 if (nonblock || copied)
1397 {
1398 release_sock(sk);
1399 if(sk->debug)
1400 printk("read: EAGAIN\n");
1401 if (copied)
1402 {
1403 DPRINTF((DBG_TCP, "tcp_read: returning %d\n",
1404 copied));
1405 return(copied);
1406 }
1407 return(-EAGAIN);
1408 }
1409
1410 if ((flags & MSG_PEEK) && copied != 0)
1411 {
1412 release_sock(sk);
1413 DPRINTF((DBG_TCP, "tcp_read: returning %d\n", copied));
1414 return(copied);
1415 }
1416
1417 DPRINTF((DBG_TCP, "tcp_read about to sleep. state = %d\n",
1418 sk->state));
1419 release_sock(sk);
1420
1421
1422
1423
1424
1425 cli();
1426 if (sk->shutdown & RCV_SHUTDOWN || sk->err != 0) {
1427 sk->inuse = 1;
1428 sti();
1429 continue;
1430 }
1431
1432 if (skb_peek(&sk->rqueue) == NULL ||
1433 before(sk->copied_seq+1, sk->rqueue->h.th->seq)) {
1434 if(sk->debug)
1435 printk("Read wait sleep\n");
1436 interruptible_sleep_on(sk->sleep);
1437 if(sk->debug)
1438 printk("Read wait wakes\n");
1439 if (current->signal & ~current->blocked) {
1440 sti();
1441 if (copied) {
1442 DPRINTF((DBG_TCP, "tcp_read: returning %d\n",
1443 copied));
1444 return(copied);
1445 }
1446 return(-ERESTARTSYS);
1447 }
1448 }
1449 sk->inuse = 1;
1450 sti();
1451 DPRINTF((DBG_TCP, "tcp_read woke up. \n"));
1452
1453
1454 skb=skb_peek(&sk->rqueue);
1455
1456 }
1457
1458
1459
1460
1461
1462 offset = sk->copied_seq+1 - skb->h.th->seq;
1463
1464 if (skb->h.th->syn) offset--;
1465 if (offset < skb->len)
1466 {
1467
1468
1469
1470
1471 if (skb->h.th->urg)
1472 {
1473 if (skb->urg_used)
1474 {
1475 sk->copied_seq += ntohs(skb->h.th->urg_ptr);
1476 offset += ntohs(skb->h.th->urg_ptr);
1477 if (offset >= skb->len)
1478 {
1479 skb->used = 1;
1480 skb =(struct sk_buff *)skb->next;
1481 continue;
1482 }
1483 }
1484 else
1485 {
1486 release_sock(sk);
1487 if (copied)
1488 return(copied);
1489 send_sig(SIGURG, current, 0);
1490 return(-EINTR);
1491 }
1492 }
1493
1494 used = min(skb->len - offset, len);
1495
1496 memcpy_tofs(to,((unsigned char *)skb->h.th) +
1497 skb->h.th->doff*4 + offset, used);
1498 copied += used;
1499 len -= used;
1500 to += used;
1501
1502
1503 if (!(flags & MSG_PEEK))
1504 sk->copied_seq += used;
1505
1506
1507
1508
1509
1510
1511 if (!(flags & MSG_PEEK) &&
1512 (!skb->h.th->urg || skb->urg_used) &&
1513 (used + offset >= skb->len))
1514 skb->used = 1;
1515
1516
1517
1518
1519
1520 if (skb->h.th->urg)
1521 {
1522 break;
1523 }
1524 }
1525 else
1526 {
1527 skb->used = 1;
1528 }
1529
1530 skb =(struct sk_buff *)skb->next;
1531 }
1532
1533 cleanup_rbuf(sk);
1534 release_sock(sk);
1535 DPRINTF((DBG_TCP, "tcp_read: returning %d\n", copied));
1536 if (copied == 0 && nonblock)
1537 return(-EAGAIN);
1538 return(copied);
1539 }
1540
1541
1542
1543
1544
1545
1546 void
1547 tcp_shutdown(struct sock *sk, int how)
1548 {
1549 struct sk_buff *buff;
1550 struct tcphdr *t1, *th;
1551 struct proto *prot;
1552 int tmp;
1553 struct device *dev = NULL;
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563 if (sk->state == TCP_FIN_WAIT1 || sk->state == TCP_FIN_WAIT2) return;
1564 if (!(how & SEND_SHUTDOWN)) return;
1565 sk->inuse = 1;
1566
1567
1568 if (sk->send_tmp) tcp_send_partial(sk);
1569
1570 prot =(struct proto *)sk->prot;
1571 th =(struct tcphdr *)&sk->dummy_th;
1572 release_sock(sk);
1573 buff = prot->wmalloc(sk, MAX_RESET_SIZE,1 , GFP_KERNEL);
1574 if (buff == NULL) return;
1575 sk->inuse = 1;
1576
1577 DPRINTF((DBG_TCP, "tcp_shutdown_send buff = %X\n", buff));
1578 buff->mem_addr = buff;
1579 buff->mem_len = MAX_RESET_SIZE;
1580 buff->sk = sk;
1581 buff->len = sizeof(*t1);
1582 t1 =(struct tcphdr *) buff->data;
1583
1584
1585 tmp = prot->build_header(buff,sk->saddr, sk->daddr, &dev,
1586 IPPROTO_TCP, sk->opt,
1587 sizeof(struct tcphdr),sk->ip_tos,sk->ip_ttl);
1588 if (tmp < 0) {
1589 buff->free=1;
1590 prot->wfree(sk,buff->mem_addr, buff->mem_len);
1591 release_sock(sk);
1592 DPRINTF((DBG_TCP, "Unable to build header for fin.\n"));
1593 return;
1594 }
1595
1596 t1 =(struct tcphdr *)((char *)t1 +tmp);
1597 buff->len += tmp;
1598 buff->dev = dev;
1599 memcpy(t1, th, sizeof(*t1));
1600 t1->seq = ntohl(sk->send_seq);
1601 sk->send_seq++;
1602 buff->h.seq = sk->send_seq;
1603 t1->ack = 1;
1604 t1->ack_seq = ntohl(sk->acked_seq);
1605 t1->window = ntohs(sk->window=tcp_select_window(sk));
1606 t1->fin = 1;
1607 t1->rst = 0;
1608 t1->doff = sizeof(*t1)/4;
1609 tcp_send_check(t1, sk->saddr, sk->daddr, sizeof(*t1), sk);
1610
1611
1612
1613
1614
1615 if (sk->wback != NULL) {
1616 buff->free=0;
1617 buff->next = NULL;
1618 sk->wback->next = buff;
1619 sk->wback = buff;
1620 buff->magic = TCP_WRITE_QUEUE_MAGIC;
1621 } else {
1622 sk->prot->queue_xmit(sk, dev, buff, 0);
1623 }
1624
1625 if (sk->state == TCP_ESTABLISHED) sk->state = TCP_FIN_WAIT1;
1626 else sk->state = TCP_FIN_WAIT2;
1627
1628 release_sock(sk);
1629 }
1630
1631
1632 static int
1633 tcp_recvfrom(struct sock *sk, unsigned char *to,
1634 int to_len, int nonblock, unsigned flags,
1635 struct sockaddr_in *addr, int *addr_len)
1636 {
1637 struct sockaddr_in sin;
1638 int len;
1639 int err;
1640 int result;
1641
1642
1643
1644
1645 err = verify_area(VERIFY_WRITE,addr_len,sizeof(long));
1646 if(err)
1647 return err;
1648 len = get_fs_long(addr_len);
1649 if(len > sizeof(sin))
1650 len = sizeof(sin);
1651 err=verify_area(VERIFY_WRITE, addr, len);
1652 if(err)
1653 return err;
1654
1655 result=tcp_read(sk, to, to_len, nonblock, flags);
1656
1657 if (result < 0) return(result);
1658
1659 sin.sin_family = AF_INET;
1660 sin.sin_port = sk->dummy_th.dest;
1661 sin.sin_addr.s_addr = sk->daddr;
1662
1663 memcpy_tofs(addr, &sin, len);
1664 put_fs_long(len, addr_len);
1665 return(result);
1666 }
1667
1668
1669
1670 static void
1671 tcp_reset(unsigned long saddr, unsigned long daddr, struct tcphdr *th,
1672 struct proto *prot, struct options *opt, struct device *dev, int tos, int ttl)
1673 {
1674 struct sk_buff *buff;
1675 struct tcphdr *t1;
1676 int tmp;
1677
1678
1679
1680
1681
1682 buff = prot->wmalloc(NULL, MAX_RESET_SIZE, 1, GFP_ATOMIC);
1683 if (buff == NULL)
1684 return;
1685
1686 DPRINTF((DBG_TCP, "tcp_reset buff = %X\n", buff));
1687 buff->mem_addr = buff;
1688 buff->mem_len = MAX_RESET_SIZE;
1689 buff->len = sizeof(*t1);
1690 buff->sk = NULL;
1691 buff->dev = dev;
1692
1693 t1 =(struct tcphdr *) buff->data;
1694
1695
1696 tmp = prot->build_header(buff, saddr, daddr, &dev, IPPROTO_TCP, opt,
1697 sizeof(struct tcphdr),tos,ttl);
1698 if (tmp < 0) {
1699 buff->free = 1;
1700 prot->wfree(NULL, buff->mem_addr, buff->mem_len);
1701 return;
1702 }
1703 t1 =(struct tcphdr *)((char *)t1 +tmp);
1704 buff->len += tmp;
1705 memcpy(t1, th, sizeof(*t1));
1706
1707
1708 t1->dest = th->source;
1709 t1->source = th->dest;
1710 t1->rst = 1;
1711 t1->window = 0;
1712
1713 if(th->ack)
1714 {
1715 t1->ack=0;
1716 t1->seq=th->ack_seq;
1717 t1->ack_seq=0;
1718 }
1719 else
1720 {
1721 t1->ack=1;
1722 if(!th->syn)
1723 t1->ack_seq=htonl(th->seq);
1724 else
1725 t1->ack_seq=htonl(th->seq+1);
1726 t1->seq=0;
1727 }
1728
1729 t1->syn = 0;
1730 t1->urg = 0;
1731 t1->fin = 0;
1732 t1->psh = 0;
1733 t1->doff = sizeof(*t1)/4;
1734 tcp_send_check(t1, saddr, daddr, sizeof(*t1), NULL);
1735 prot->queue_xmit(NULL, dev, buff, 1);
1736 }
1737
1738
1739
1740
1741
1742
1743 static void
1744 tcp_options(struct sock *sk, struct tcphdr *th)
1745 {
1746 unsigned char *ptr;
1747 int length=(th->doff*4)-sizeof(struct tcphdr);
1748
1749 ptr = (unsigned char *)(th + 1);
1750
1751 while(length>0)
1752 {
1753 int opcode=*ptr++;
1754 int opsize=*ptr++;
1755 switch(opcode)
1756 {
1757 case TCPOPT_EOL:
1758 return;
1759 case TCPOPT_NOP:
1760 length-=2;
1761 continue;
1762
1763 default:
1764 if(opsize<=2)
1765 return;
1766 switch(opcode)
1767 {
1768 case TCPOPT_MSS:
1769 if(opsize==4)
1770 {
1771 sk->mtu=min(sk->mtu,ntohs(*(unsigned short *)ptr));
1772 }
1773 break;
1774
1775 }
1776 ptr+=opsize-2;
1777 length-=opsize;
1778 }
1779 }
1780
1781 }
1782
1783
1784
1785
1786
1787
1788
1789
1790 static void
1791 tcp_conn_request(struct sock *sk, struct sk_buff *skb,
1792 unsigned long daddr, unsigned long saddr,
1793 struct options *opt, struct device *dev)
1794 {
1795 struct sk_buff *buff;
1796 struct tcphdr *t1;
1797 unsigned char *ptr;
1798 struct sock *newsk;
1799 struct tcphdr *th;
1800 int tmp;
1801
1802 DPRINTF((DBG_TCP, "tcp_conn_request(sk = %X, skb = %X, daddr = %X, sadd4= %X, \n"
1803 " opt = %X, dev = %X)\n",
1804 sk, skb, daddr, saddr, opt, dev));
1805
1806 th = skb->h.th;
1807
1808
1809 if (!sk->dead) {
1810 sk->data_ready(sk,0);
1811 } else {
1812 DPRINTF((DBG_TCP, "tcp_conn_request on dead socket\n"));
1813 tcp_reset(daddr, saddr, th, sk->prot, opt, dev, sk->ip_tos,sk->ip_ttl);
1814 kfree_skb(skb, FREE_READ);
1815 return;
1816 }
1817
1818
1819
1820
1821
1822 if (sk->ack_backlog >= sk->max_ack_backlog) {
1823 kfree_skb(skb, FREE_READ);
1824 return;
1825 }
1826
1827
1828
1829
1830
1831
1832
1833
1834 newsk = (struct sock *) kmalloc(sizeof(struct sock), GFP_ATOMIC);
1835 if (newsk == NULL) {
1836
1837 kfree_skb(skb, FREE_READ);
1838 return;
1839 }
1840
1841 DPRINTF((DBG_TCP, "newsk = %X\n", newsk));
1842 memcpy((void *)newsk,(void *)sk, sizeof(*newsk));
1843 newsk->wback = NULL;
1844 newsk->wfront = NULL;
1845 newsk->rqueue = NULL;
1846 newsk->send_head = NULL;
1847 newsk->send_tail = NULL;
1848 newsk->back_log = NULL;
1849 newsk->rtt = TCP_CONNECT_TIME;
1850 newsk->mdev = 0;
1851 newsk->backoff = 0;
1852 newsk->blog = 0;
1853 newsk->intr = 0;
1854 newsk->proc = 0;
1855 newsk->done = 0;
1856 newsk->send_tmp = NULL;
1857 newsk->pair = NULL;
1858 newsk->wmem_alloc = 0;
1859 newsk->rmem_alloc = 0;
1860
1861 newsk->max_unacked = MAX_WINDOW - TCP_WINDOW_DIFF;
1862
1863 newsk->err = 0;
1864 newsk->shutdown = 0;
1865 newsk->ack_backlog = 0;
1866 newsk->acked_seq = skb->h.th->seq+1;
1867 newsk->fin_seq = skb->h.th->seq;
1868 newsk->copied_seq = skb->h.th->seq;
1869 newsk->state = TCP_SYN_RECV;
1870 newsk->timeout = 0;
1871 newsk->send_seq = jiffies * SEQ_TICK - seq_offset;
1872 newsk->rcv_ack_seq = newsk->send_seq;
1873 newsk->urg =0;
1874 newsk->retransmits = 0;
1875 newsk->destroy = 0;
1876 newsk->timer.data = (unsigned long)newsk;
1877 newsk->timer.function = &net_timer;
1878 newsk->dummy_th.source = skb->h.th->dest;
1879 newsk->dummy_th.dest = skb->h.th->source;
1880
1881
1882 newsk->daddr = saddr;
1883 newsk->saddr = daddr;
1884
1885 put_sock(newsk->num,newsk);
1886 newsk->dummy_th.res1 = 0;
1887 newsk->dummy_th.doff = 6;
1888 newsk->dummy_th.fin = 0;
1889 newsk->dummy_th.syn = 0;
1890 newsk->dummy_th.rst = 0;
1891 newsk->dummy_th.psh = 0;
1892 newsk->dummy_th.ack = 0;
1893 newsk->dummy_th.urg = 0;
1894 newsk->dummy_th.res2 = 0;
1895 newsk->acked_seq = skb->h.th->seq + 1;
1896 newsk->copied_seq = skb->h.th->seq;
1897
1898
1899 newsk->ip_ttl=sk->ip_ttl;
1900 newsk->ip_tos=skb->ip_hdr->tos;
1901
1902
1903
1904 if (sk->mss)
1905 newsk->mtu = sk->mss;
1906 else
1907 newsk->mtu = 576 - HEADER_SIZE;
1908
1909 newsk->mtu = min(newsk->mtu, dev->mtu - HEADER_SIZE);
1910
1911
1912 tcp_options(newsk,skb->h.th);
1913
1914 buff = newsk->prot->wmalloc(newsk, MAX_SYN_SIZE, 1, GFP_ATOMIC);
1915 if (buff == NULL) {
1916 sk->err = -ENOMEM;
1917 newsk->dead = 1;
1918 release_sock(newsk);
1919 kfree_skb(skb, FREE_READ);
1920 return;
1921 }
1922
1923 buff->mem_addr = buff;
1924 buff->mem_len = MAX_SYN_SIZE;
1925 buff->len = sizeof(struct tcphdr)+4;
1926 buff->sk = newsk;
1927
1928 t1 =(struct tcphdr *) buff->data;
1929
1930
1931 tmp = sk->prot->build_header(buff, newsk->saddr, newsk->daddr, &dev,
1932 IPPROTO_TCP, NULL, MAX_SYN_SIZE,sk->ip_tos,sk->ip_ttl);
1933
1934
1935 if (tmp < 0) {
1936 sk->err = tmp;
1937 buff->free=1;
1938 kfree_skb(buff,FREE_WRITE);
1939 newsk->dead = 1;
1940 release_sock(newsk);
1941 skb->sk = sk;
1942 kfree_skb(skb, FREE_READ);
1943 return;
1944 }
1945
1946 buff->len += tmp;
1947 t1 =(struct tcphdr *)((char *)t1 +tmp);
1948
1949 memcpy(t1, skb->h.th, sizeof(*t1));
1950 buff->h.seq = newsk->send_seq;
1951
1952
1953 t1->dest = skb->h.th->source;
1954 t1->source = newsk->dummy_th.source;
1955 t1->seq = ntohl(newsk->send_seq++);
1956 t1->ack = 1;
1957 newsk->window = tcp_select_window(newsk);
1958 t1->window = ntohs(newsk->window);
1959 t1->res1 = 0;
1960 t1->res2 = 0;
1961 t1->rst = 0;
1962 t1->urg = 0;
1963 t1->psh = 0;
1964 t1->syn = 1;
1965 t1->ack_seq = ntohl(skb->h.th->seq+1);
1966 t1->doff = sizeof(*t1)/4+1;
1967
1968 ptr =(unsigned char *)(t1+1);
1969 ptr[0] = 2;
1970 ptr[1] = 4;
1971 ptr[2] = ((newsk->mtu) >> 8) & 0xff;
1972 ptr[3] =(newsk->mtu) & 0xff;
1973
1974 tcp_send_check(t1, daddr, saddr, sizeof(*t1)+4, newsk);
1975 newsk->prot->queue_xmit(newsk, dev, buff, 0);
1976
1977 reset_timer(newsk, TIME_WRITE , TCP_CONNECT_TIME);
1978 skb->sk = newsk;
1979
1980
1981 sk->rmem_alloc -= skb->mem_len;
1982 newsk->rmem_alloc += skb->mem_len;
1983
1984 skb_queue_tail(&sk->rqueue,skb);
1985 sk->ack_backlog++;
1986 release_sock(newsk);
1987 }
1988
1989
1990 static void
1991 tcp_close(struct sock *sk, int timeout)
1992 {
1993 struct sk_buff *buff;
1994 int need_reset = 0;
1995 struct tcphdr *t1, *th;
1996 struct proto *prot;
1997 struct device *dev=NULL;
1998 int tmp;
1999
2000
2001
2002
2003
2004 DPRINTF((DBG_TCP, "tcp_close((struct sock *)%X, %d)\n",sk, timeout));
2005 sk->inuse = 1;
2006 sk->keepopen = 1;
2007 sk->shutdown = SHUTDOWN_MASK;
2008
2009 if (!sk->dead)
2010 sk->state_change(sk);
2011
2012
2013 if (skb_peek(&sk->rqueue) != NULL)
2014 {
2015 struct sk_buff *skb;
2016 if(sk->debug)
2017 printk("Clean rcv queue\n");
2018 while((skb=skb_dequeue(&sk->rqueue))!=NULL)
2019 {
2020 if(skb->len > 0 && after(skb->h.th->seq + skb->len + 1 , sk->copied_seq))
2021 need_reset = 1;
2022 kfree_skb(skb, FREE_READ);
2023 }
2024 if(sk->debug)
2025 printk("Cleaned.\n");
2026 }
2027 sk->rqueue = NULL;
2028
2029
2030 if (sk->send_tmp) {
2031 tcp_send_partial(sk);
2032 }
2033
2034 switch(sk->state) {
2035 case TCP_FIN_WAIT1:
2036 case TCP_FIN_WAIT2:
2037 case TCP_LAST_ACK:
2038
2039 reset_timer(sk, TIME_CLOSE, 4 * sk->rtt);
2040 if (timeout) tcp_time_wait(sk);
2041 release_sock(sk);
2042 return;
2043 case TCP_TIME_WAIT:
2044 if (timeout) {
2045 sk->state = TCP_CLOSE;
2046 }
2047 release_sock(sk);
2048 return;
2049 case TCP_LISTEN:
2050 sk->state = TCP_CLOSE;
2051 release_sock(sk);
2052 return;
2053 case TCP_CLOSE:
2054 release_sock(sk);
2055 return;
2056 case TCP_CLOSE_WAIT:
2057 case TCP_ESTABLISHED:
2058 case TCP_SYN_SENT:
2059 case TCP_SYN_RECV:
2060 prot =(struct proto *)sk->prot;
2061 th =(struct tcphdr *)&sk->dummy_th;
2062 buff = prot->wmalloc(sk, MAX_FIN_SIZE, 1, GFP_ATOMIC);
2063 if (buff == NULL) {
2064
2065
2066
2067 release_sock(sk);
2068 if (sk->state != TCP_CLOSE_WAIT)
2069 sk->state = TCP_ESTABLISHED;
2070 reset_timer(sk, TIME_CLOSE, 100);
2071 return;
2072 }
2073 buff->mem_addr = buff;
2074 buff->mem_len = MAX_FIN_SIZE;
2075 buff->sk = sk;
2076 buff->free = 1;
2077 buff->len = sizeof(*t1);
2078 t1 =(struct tcphdr *) buff->data;
2079
2080
2081 tmp = prot->build_header(buff,sk->saddr, sk->daddr, &dev,
2082 IPPROTO_TCP, sk->opt,
2083 sizeof(struct tcphdr),sk->ip_tos,sk->ip_ttl);
2084 if (tmp < 0) {
2085 kfree_skb(buff,FREE_WRITE);
2086 DPRINTF((DBG_TCP, "Unable to build header for fin.\n"));
2087 release_sock(sk);
2088 return;
2089 }
2090
2091 t1 =(struct tcphdr *)((char *)t1 +tmp);
2092 buff->len += tmp;
2093 buff->dev = dev;
2094 memcpy(t1, th, sizeof(*t1));
2095 t1->seq = ntohl(sk->send_seq);
2096 sk->send_seq++;
2097 buff->h.seq = sk->send_seq;
2098 t1->ack = 1;
2099
2100
2101 sk->delay_acks = 0;
2102 t1->ack_seq = ntohl(sk->acked_seq);
2103 t1->window = ntohs(sk->window=tcp_select_window(sk));
2104 t1->fin = 1;
2105 t1->rst = need_reset;
2106 t1->doff = sizeof(*t1)/4;
2107 tcp_send_check(t1, sk->saddr, sk->daddr, sizeof(*t1), sk);
2108
2109 if (sk->wfront == NULL) {
2110 prot->queue_xmit(sk, dev, buff, 0);
2111 } else {
2112 reset_timer(sk, TIME_WRITE,
2113 backoff(sk->backoff) * (2 * sk->mdev + sk->rtt));
2114 buff->next = NULL;
2115 if (sk->wback == NULL) {
2116 sk->wfront=buff;
2117 } else {
2118 sk->wback->next = buff;
2119 }
2120 sk->wback = buff;
2121 buff->magic = TCP_WRITE_QUEUE_MAGIC;
2122 }
2123
2124 if (sk->state == TCP_CLOSE_WAIT) {
2125 sk->state = TCP_FIN_WAIT2;
2126 } else {
2127 sk->state = TCP_FIN_WAIT1;
2128 }
2129 }
2130 release_sock(sk);
2131 }
2132
2133
2134
2135
2136
2137
2138 static void
2139 tcp_write_xmit(struct sock *sk)
2140 {
2141 struct sk_buff *skb;
2142
2143 DPRINTF((DBG_TCP, "tcp_write_xmit(sk=%X)\n", sk));
2144
2145
2146
2147 if(sk->zapped)
2148 return;
2149
2150 while(sk->wfront != NULL &&
2151 before(sk->wfront->h.seq, sk->window_seq +1) &&
2152 (sk->retransmits == 0 ||
2153 sk->timeout != TIME_WRITE ||
2154 before(sk->wfront->h.seq, sk->rcv_ack_seq +1))
2155 && sk->packets_out < sk->cong_window) {
2156 skb = sk->wfront;
2157 IS_SKB(skb);
2158 sk->wfront =(struct sk_buff *)skb->next;
2159 if (sk->wfront == NULL) sk->wback = NULL;
2160 skb->next = NULL;
2161 if (skb->magic != TCP_WRITE_QUEUE_MAGIC) {
2162 printk("tcp.c skb with bad magic(%X) on write queue. Squashing "
2163 "queue\n", skb->magic);
2164 sk->wfront = NULL;
2165 sk->wback = NULL;
2166 return;
2167 }
2168 skb->magic = 0;
2169 DPRINTF((DBG_TCP, "Sending a packet.\n"));
2170
2171
2172 if (before(skb->h.seq, sk->rcv_ack_seq +1)) {
2173 sk->retransmits = 0;
2174 kfree_skb(skb, FREE_WRITE);
2175 if (!sk->dead) sk->write_space(sk);
2176 } else {
2177 sk->prot->queue_xmit(sk, skb->dev, skb, skb->free);
2178 }
2179 }
2180 }
2181
2182
2183
2184
2185
2186
2187 void
2188 sort_send(struct sock *sk)
2189 {
2190 struct sk_buff *list = NULL;
2191 struct sk_buff *skb,*skb2,*skb3;
2192
2193 for (skb = sk->send_head; skb != NULL; skb = skb2) {
2194 skb2 = (struct sk_buff *)skb->link3;
2195 if (list == NULL || before (skb2->h.seq, list->h.seq)) {
2196 skb->link3 = list;
2197 sk->send_tail = skb;
2198 list = skb;
2199 } else {
2200 for (skb3 = list; ; skb3 = (struct sk_buff *)skb3->link3) {
2201 if (skb3->link3 == NULL ||
2202 before(skb->h.seq, skb3->link3->h.seq)) {
2203 skb->link3 = skb3->link3;
2204 skb3->link3 = skb;
2205 if (skb->link3 == NULL) sk->send_tail = skb;
2206 break;
2207 }
2208 }
2209 }
2210 }
2211 sk->send_head = list;
2212 }
2213
2214
2215
2216 static int
2217 tcp_ack(struct sock *sk, struct tcphdr *th, unsigned long saddr, int len)
2218 {
2219 unsigned long ack;
2220 int flag = 0;
2221
2222 if(sk->zapped)
2223 return(1);
2224
2225 ack = ntohl(th->ack_seq);
2226 DPRINTF((DBG_TCP, "tcp_ack ack=%d, window=%d, "
2227 "sk->rcv_ack_seq=%d, sk->window_seq = %d\n",
2228 ack, ntohs(th->window), sk->rcv_ack_seq, sk->window_seq));
2229
2230 if (sk->retransmits && sk->timeout == TIME_KEEPOPEN)
2231 sk->retransmits = 0;
2232
2233 if (after(ack, sk->send_seq+1) || before(ack, sk->rcv_ack_seq-1)) {
2234 if (after(ack, sk->send_seq) ||
2235 (sk->state != TCP_ESTABLISHED && sk->state != TCP_CLOSE_WAIT)) {
2236 return(0);
2237 }
2238 if (sk->keepopen) {
2239 reset_timer(sk, TIME_KEEPOPEN, TCP_TIMEOUT_LEN);
2240 }
2241 return(1);
2242 }
2243
2244 if (len != th->doff*4) flag |= 1;
2245
2246
2247 if (after(sk->window_seq, ack+ntohs(th->window))) {
2248
2249
2250
2251
2252
2253
2254
2255 struct sk_buff *skb;
2256 struct sk_buff *skb2;
2257 struct sk_buff *wskb = NULL;
2258
2259 skb2 = sk->send_head;
2260 sk->send_head = NULL;
2261 sk->send_tail = NULL;
2262
2263 flag |= 4;
2264
2265 sk->window_seq = ack + ntohs(th->window);
2266 cli();
2267 while (skb2 != NULL) {
2268 skb = skb2;
2269 skb2 = (struct sk_buff *)skb->link3;
2270 skb->link3 = NULL;
2271 if (after(skb->h.seq, sk->window_seq)) {
2272 if (sk->packets_out > 0) sk->packets_out--;
2273
2274 if (skb->next != NULL) {
2275 skb_unlink(skb);
2276 }
2277
2278 skb->magic = TCP_WRITE_QUEUE_MAGIC;
2279 if (wskb == NULL) {
2280 skb->next = sk->wfront;
2281 sk->wfront = skb;
2282 } else {
2283 skb->next = wskb->next;
2284 wskb->next = skb;
2285 }
2286 if (sk->wback == wskb) sk->wback = skb;
2287 wskb = skb;
2288 } else {
2289 if (sk->send_head == NULL) {
2290 sk->send_head = skb;
2291 sk->send_tail = skb;
2292 } else {
2293 sk->send_tail->link3 = skb;
2294 sk->send_tail = skb;
2295 }
2296 skb->link3 = NULL;
2297 }
2298 }
2299 sti();
2300 }
2301
2302 if (sk->send_tail == NULL || sk->send_head == NULL) {
2303 sk->send_head = NULL;
2304 sk->send_tail = NULL;
2305 sk->packets_out= 0;
2306 }
2307
2308 sk->window_seq = ack + ntohs(th->window);
2309
2310
2311 if (sk->timeout == TIME_WRITE &&
2312 sk->cong_window < 2048 && ack != sk->rcv_ack_seq) {
2313 if (sk->exp_growth) sk->cong_window *= 2;
2314 else sk->cong_window++;
2315 }
2316
2317 DPRINTF((DBG_TCP, "tcp_ack: Updating rcv ack sequence.\n"));
2318 sk->rcv_ack_seq = ack;
2319
2320
2321
2322
2323
2324
2325 if (sk->timeout == TIME_PROBE0) {
2326 if (sk->wfront != NULL &&
2327 ! before (sk->window_seq, sk->wfront->h.seq)) {
2328 sk->retransmits = 0;
2329 sk->backoff = 0;
2330 }
2331 }
2332
2333
2334 while(sk->send_head != NULL) {
2335
2336 if (sk->send_head->link3 &&
2337 after(sk->send_head->h.seq, sk->send_head->link3->h.seq)) {
2338 printk("INET: tcp.c: *** bug send_list out of order.\n");
2339 sort_send(sk);
2340 }
2341
2342 if (before(sk->send_head->h.seq, ack+1)) {
2343 struct sk_buff *oskb;
2344
2345 if (sk->retransmits) {
2346
2347
2348
2349
2350
2351
2352 if (sk->send_head->link3)
2353 sk->retransmits = 1;
2354 else
2355 sk->retransmits = 0;
2356 }
2357
2358
2359
2360
2361
2362
2363 sk->backoff = 0;
2364
2365 if (sk->packets_out > 0) sk->packets_out --;
2366 DPRINTF((DBG_TCP, "skb=%X skb->h.seq = %d acked ack=%d\n",
2367 sk->send_head, sk->send_head->h.seq, ack));
2368
2369
2370 if (!sk->dead) sk->write_space(sk);
2371
2372 oskb = sk->send_head;
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382 if ( !(flag&2)) {
2383 long abserr, rtt = jiffies - oskb->when;
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396 if (rtt < 100) rtt = 100;
2397 if (rtt > 12000) rtt = 12000;
2398
2399 if (sk->state == TCP_SYN_SENT || sk->state == TCP_SYN_RECV) {
2400
2401 sk->rtt = rtt;
2402 sk->mdev = rtt;
2403 }
2404 else {
2405 abserr = (rtt > sk->rtt) ? rtt - sk->rtt : sk->rtt - rtt;
2406 sk->rtt = (7 * sk->rtt + rtt) >> 3;
2407 sk->mdev = (3 * sk->mdev + abserr) >> 2;
2408 }
2409 sk->backoff = 0;
2410 }
2411 flag |= (2|4);
2412
2413 cli();
2414
2415 oskb = sk->send_head;
2416 IS_SKB(oskb);
2417 sk->send_head =(struct sk_buff *)oskb->link3;
2418 if (sk->send_head == NULL) {
2419 sk->send_tail = NULL;
2420 }
2421
2422
2423 skb_unlink(oskb);
2424 sti();
2425 oskb->magic = 0;
2426 kfree_skb(oskb, FREE_WRITE);
2427 if (!sk->dead) sk->write_space(sk);
2428 } else {
2429 break;
2430 }
2431 }
2432
2433
2434
2435
2436
2437 if (sk->wfront != NULL) {
2438 if (after (sk->window_seq+1, sk->wfront->h.seq) &&
2439 (sk->retransmits == 0 ||
2440 sk->timeout != TIME_WRITE ||
2441 before(sk->wfront->h.seq, sk->rcv_ack_seq +1))
2442 && sk->packets_out < sk->cong_window) {
2443 flag |= 1;
2444 tcp_write_xmit(sk);
2445 } else if (before(sk->window_seq, sk->wfront->h.seq) &&
2446 sk->send_head == NULL &&
2447 sk->ack_backlog == 0 &&
2448 sk->state != TCP_TIME_WAIT) {
2449 reset_timer(sk, TIME_PROBE0,
2450 backoff(sk->backoff) * (2 * sk->mdev + sk->rtt));
2451 }
2452 } else {
2453 if (sk->send_head == NULL && sk->ack_backlog == 0 &&
2454 sk->state != TCP_TIME_WAIT && !sk->keepopen) {
2455 DPRINTF((DBG_TCP, "Nothing to do, going to sleep.\n"));
2456 if (!sk->dead) sk->write_space(sk);
2457
2458 if (sk->keepopen)
2459 reset_timer(sk, TIME_KEEPOPEN, TCP_TIMEOUT_LEN);
2460 else
2461 delete_timer(sk);
2462 } else {
2463 if (sk->state != (unsigned char) sk->keepopen) {
2464 reset_timer(sk, TIME_WRITE,
2465 backoff(sk->backoff) * (2 * sk->mdev + sk->rtt));
2466 }
2467 if (sk->state == TCP_TIME_WAIT) {
2468 reset_timer(sk, TIME_CLOSE, TCP_TIMEWAIT_LEN);
2469 }
2470 }
2471 }
2472
2473 if (sk->packets_out == 0 && sk->send_tmp != NULL &&
2474 sk->wfront == NULL && sk->send_head == NULL) {
2475 flag |= 1;
2476 tcp_send_partial(sk);
2477 }
2478
2479
2480 if (sk->state == TCP_TIME_WAIT) {
2481 if (!sk->dead)
2482 sk->state_change(sk);
2483 if (sk->rcv_ack_seq == sk->send_seq && sk->acked_seq == sk->fin_seq) {
2484 flag |= 1;
2485 sk->state = TCP_CLOSE;
2486 sk->shutdown = SHUTDOWN_MASK;
2487 }
2488 }
2489
2490 if (sk->state == TCP_LAST_ACK || sk->state == TCP_FIN_WAIT2) {
2491 if (!sk->dead) sk->state_change(sk);
2492 if (sk->rcv_ack_seq == sk->send_seq) {
2493 flag |= 1;
2494 if (sk->acked_seq != sk->fin_seq) {
2495 tcp_time_wait(sk);
2496 } else {
2497 DPRINTF((DBG_TCP, "tcp_ack closing socket - %X\n", sk));
2498 tcp_send_ack(sk->send_seq, sk->acked_seq, sk,
2499 th, sk->daddr);
2500 sk->shutdown = SHUTDOWN_MASK;
2501 sk->state = TCP_CLOSE;
2502 }
2503 }
2504 }
2505
2506 if (((!flag) || (flag&4)) && sk->send_head != NULL &&
2507 (sk->send_head->when + backoff(sk->backoff) * (2 * sk->mdev + sk->rtt)
2508 < jiffies)) {
2509 sk->exp_growth = 0;
2510 ip_retransmit(sk, 1);
2511 }
2512
2513 DPRINTF((DBG_TCP, "leaving tcp_ack\n"));
2514 return(1);
2515 }
2516
2517
2518
2519
2520
2521
2522
2523 static int
2524 tcp_data(struct sk_buff *skb, struct sock *sk,
2525 unsigned long saddr, unsigned short len)
2526 {
2527 struct sk_buff *skb1, *skb2;
2528 struct tcphdr *th;
2529 int dup_dumped=0;
2530
2531 th = skb->h.th;
2532 print_th(th);
2533 skb->len = len -(th->doff*4);
2534
2535 DPRINTF((DBG_TCP, "tcp_data len = %d sk = %X:\n", skb->len, sk));
2536
2537 sk->bytes_rcv += skb->len;
2538 if (skb->len == 0 && !th->fin && !th->urg && !th->psh) {
2539
2540 if (!th->ack) tcp_send_ack(sk->send_seq, sk->acked_seq,sk, th, saddr);
2541 kfree_skb(skb, FREE_READ);
2542 return(0);
2543 }
2544
2545 if (sk->shutdown & RCV_SHUTDOWN) {
2546 sk->acked_seq = th->seq + skb->len + th->syn + th->fin;
2547 tcp_reset(sk->saddr, sk->daddr, skb->h.th,
2548 sk->prot, NULL, skb->dev, sk->ip_tos, sk->ip_ttl);
2549 sk->state = TCP_CLOSE;
2550 sk->err = EPIPE;
2551 sk->shutdown = SHUTDOWN_MASK;
2552 DPRINTF((DBG_TCP, "tcp_data: closing socket - %X\n", sk));
2553 kfree_skb(skb, FREE_READ);
2554 if (!sk->dead) sk->state_change(sk);
2555 return(0);
2556 }
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567 if (sk->rqueue == NULL) {
2568 DPRINTF((DBG_TCP, "tcp_data: skb = %X:\n", skb));
2569 #ifdef OLDWAY
2570 sk->rqueue = skb;
2571 skb->next = skb;
2572 skb->prev = skb;
2573 skb->list = &sk->rqueue;
2574 #else
2575 skb_queue_head(&sk->rqueue,skb);
2576 #endif
2577 skb1= NULL;
2578 } else {
2579 DPRINTF((DBG_TCP, "tcp_data adding to chain sk = %X:\n", sk));
2580 for(skb1=sk->rqueue->prev; ; skb1 =(struct sk_buff *)skb1->prev) {
2581 if(sk->debug)
2582 {
2583 printk("skb1=%p :", skb1);
2584 printk("skb1->h.th->seq = %ld: ", skb1->h.th->seq);
2585 printk("skb->h.th->seq = %ld\n",skb->h.th->seq);
2586 printk("copied_seq = %ld acked_seq = %ld\n", sk->copied_seq,
2587 sk->acked_seq);
2588 }
2589 #ifdef OLD
2590 if (after(th->seq+1, skb1->h.th->seq)) {
2591 skb->prev = skb1;
2592 skb->next = skb1->next;
2593 skb->next->prev = skb;
2594 skb1->next = skb;
2595 if (skb1 == sk->rqueue) sk->rqueue = skb;
2596 break;
2597 }
2598 if (skb1->prev == sk->rqueue) {
2599 skb->next= skb1;
2600 skb->prev = skb1->prev;
2601 skb->prev->next = skb;
2602 skb1->prev = skb;
2603 skb1 = NULL;
2604
2605 break;
2606 }
2607 #else
2608 if (th->seq==skb1->h.th->seq && skb->len>= skb1->len)
2609 {
2610 skb_append(skb1,skb);
2611 skb_unlink(skb1);
2612 kfree_skb(skb1,FREE_READ);
2613 dup_dumped=1;
2614 skb1=NULL;
2615 break;
2616 }
2617 if (after(th->seq+1, skb1->h.th->seq))
2618 {
2619 skb_append(skb1,skb);
2620 break;
2621 }
2622 if (skb1 == sk->rqueue)
2623 {
2624 skb_queue_head(&sk->rqueue, skb);
2625 break;
2626 }
2627 #endif
2628 }
2629 DPRINTF((DBG_TCP, "skb = %X:\n", skb));
2630 }
2631
2632 th->ack_seq = th->seq + skb->len;
2633 if (th->syn) th->ack_seq++;
2634 if (th->fin) th->ack_seq++;
2635
2636 if (before(sk->acked_seq, sk->copied_seq)) {
2637 printk("*** tcp.c:tcp_data bug acked < copied\n");
2638 sk->acked_seq = sk->copied_seq;
2639 }
2640
2641
2642 if ((!dup_dumped && (skb1 == NULL || skb1->acked)) || before(th->seq, sk->acked_seq+1)) {
2643 if (before(th->seq, sk->acked_seq+1)) {
2644 if (after(th->ack_seq, sk->acked_seq))
2645 sk->acked_seq = th->ack_seq;
2646 skb->acked = 1;
2647
2648
2649 if (skb->h.th->fin) {
2650 if (!sk->dead) sk->state_change(sk);
2651 sk->shutdown |= RCV_SHUTDOWN;
2652 }
2653
2654 for(skb2 = (struct sk_buff *)skb->next;
2655 skb2 !=(struct sk_buff *) sk->rqueue;
2656 skb2 = (struct sk_buff *)skb2->next) {
2657 if (before(skb2->h.th->seq, sk->acked_seq+1)) {
2658 if (after(skb2->h.th->ack_seq, sk->acked_seq))
2659 {
2660 long old_acked_seq = sk->acked_seq;
2661 sk->acked_seq = skb2->h.th->ack_seq;
2662 if((int)(sk->acked_seq - old_acked_seq) >0)
2663 {
2664 int new_window=sk->window-sk->acked_seq+
2665 old_acked_seq;
2666 if(new_window<0)
2667 new_window=0;
2668 sk->window = new_window;
2669 }
2670 }
2671 skb2->acked = 1;
2672
2673
2674
2675
2676
2677 if (skb2->h.th->fin) {
2678 sk->shutdown |= RCV_SHUTDOWN;
2679 if (!sk->dead) sk->state_change(sk);
2680 }
2681
2682
2683 sk->ack_backlog = sk->max_ack_backlog;
2684 } else {
2685 break;
2686 }
2687 }
2688
2689
2690
2691
2692
2693 if (!sk->delay_acks ||
2694 sk->ack_backlog >= sk->max_ack_backlog ||
2695 sk->bytes_rcv > sk->max_unacked || th->fin) {
2696
2697 } else {
2698 sk->ack_backlog++;
2699 if(sk->debug)
2700 printk("Ack queued.\n");
2701 reset_timer(sk, TIME_WRITE, TCP_ACK_TIME);
2702 }
2703 }
2704 }
2705
2706
2707
2708
2709
2710 if (!skb->acked) {
2711
2712
2713
2714
2715
2716 while (sk->prot->rspace(sk) < sk->mtu) {
2717 skb1 = skb_peek(&sk->rqueue);
2718 if (skb1 == NULL) {
2719 printk("INET: tcp.c:tcp_data memory leak detected.\n");
2720 break;
2721 }
2722
2723
2724 if (skb1->acked) {
2725 break;
2726 }
2727
2728 skb_unlink(skb1);
2729 #ifdef OLDWAY
2730 if (skb1->prev == skb1) {
2731 sk->rqueue = NULL;
2732 } else {
2733 sk->rqueue = (struct sk_buff *)skb1->prev;
2734 skb1->next->prev = skb1->prev;
2735 skb1->prev->next = skb1->next;
2736 }
2737 #endif
2738 kfree_skb(skb1, FREE_READ);
2739 }
2740 tcp_send_ack(sk->send_seq, sk->acked_seq, sk, th, saddr);
2741 sk->ack_backlog++;
2742 reset_timer(sk, TIME_WRITE, TCP_ACK_TIME);
2743 } else {
2744
2745 tcp_send_ack(sk->send_seq, sk->acked_seq, sk, th, saddr);
2746 }
2747
2748
2749 if (!sk->dead) {
2750 if(sk->debug)
2751 printk("Data wakeup.\n");
2752 sk->data_ready(sk,0);
2753 } else {
2754 DPRINTF((DBG_TCP, "data received on dead socket.\n"));
2755 }
2756
2757 if (sk->state == TCP_FIN_WAIT2 &&
2758 sk->acked_seq == sk->fin_seq && sk->rcv_ack_seq == sk->send_seq) {
2759 DPRINTF((DBG_TCP, "tcp_data: entering last_ack state sk = %X\n", sk));
2760
2761
2762 sk->shutdown = SHUTDOWN_MASK;
2763 sk->state = TCP_LAST_ACK;
2764 if (!sk->dead) sk->state_change(sk);
2765 }
2766
2767 return(0);
2768 }
2769
2770
2771 static int
2772 tcp_urg(struct sock *sk, struct tcphdr *th, unsigned long saddr)
2773 {
2774 extern int kill_pg(int pg, int sig, int priv);
2775 extern int kill_proc(int pid, int sig, int priv);
2776
2777 if (!sk->dead)
2778 sk->data_ready(sk,0);
2779
2780 if (sk->urginline) {
2781 th->urg = 0;
2782 th->psh = 1;
2783 return(0);
2784 }
2785
2786 if (!sk->urg) {
2787
2788 if (sk->proc != 0) {
2789 if (sk->proc > 0) {
2790 kill_proc(sk->proc, SIGURG, 1);
2791 } else {
2792 kill_pg(-sk->proc, SIGURG, 1);
2793 }
2794 }
2795 }
2796 sk->urg++;
2797 return(0);
2798 }
2799
2800
2801
2802 static int
2803 tcp_fin(struct sock *sk, struct tcphdr *th,
2804 unsigned long saddr, struct device *dev)
2805 {
2806 DPRINTF((DBG_TCP, "tcp_fin(sk=%X, th=%X, saddr=%X, dev=%X)\n",
2807 sk, th, saddr, dev));
2808
2809 if (!sk->dead) {
2810 sk->state_change(sk);
2811 }
2812
2813 switch(sk->state) {
2814 case TCP_SYN_RECV:
2815 case TCP_SYN_SENT:
2816 case TCP_ESTABLISHED:
2817
2818 sk->fin_seq = th->seq+1;
2819 sk->state = TCP_CLOSE_WAIT;
2820 if (th->rst) sk->shutdown = SHUTDOWN_MASK;
2821 break;
2822
2823 case TCP_CLOSE_WAIT:
2824 case TCP_FIN_WAIT2:
2825 break;
2826
2827 case TCP_FIN_WAIT1:
2828
2829 sk->fin_seq = th->seq+1;
2830 sk->state = TCP_FIN_WAIT2;
2831 break;
2832
2833 default:
2834 case TCP_TIME_WAIT:
2835 sk->state = TCP_LAST_ACK;
2836
2837
2838 reset_timer(sk, TIME_CLOSE, TCP_TIMEWAIT_LEN);
2839 return(0);
2840 }
2841 sk->ack_backlog++;
2842
2843 return(0);
2844 }
2845
2846
2847
2848 static struct sock *
2849 tcp_accept(struct sock *sk, int flags)
2850 {
2851 struct sock *newsk;
2852 struct sk_buff *skb;
2853
2854 DPRINTF((DBG_TCP, "tcp_accept(sk=%X, flags=%X, addr=%s)\n",
2855 sk, flags, in_ntoa(sk->saddr)));
2856
2857
2858
2859
2860
2861 if (sk->state != TCP_LISTEN) {
2862 sk->err = EINVAL;
2863 return(NULL);
2864 }
2865
2866
2867 cli();
2868 sk->inuse = 1;
2869 while((skb = get_firstr(sk)) == NULL) {
2870 if (flags & O_NONBLOCK) {
2871 sti();
2872 release_sock(sk);
2873 sk->err = EAGAIN;
2874 return(NULL);
2875 }
2876
2877 release_sock(sk);
2878 interruptible_sleep_on(sk->sleep);
2879 if (current->signal & ~current->blocked) {
2880 sti();
2881 sk->err = ERESTARTSYS;
2882 return(NULL);
2883 }
2884 sk->inuse = 1;
2885 }
2886 sti();
2887
2888
2889 newsk = skb->sk;
2890
2891 kfree_skb(skb, FREE_READ);
2892 sk->ack_backlog--;
2893 release_sock(sk);
2894 return(newsk);
2895 }
2896
2897
2898
2899 static int
2900 tcp_connect(struct sock *sk, struct sockaddr_in *usin, int addr_len)
2901 {
2902 struct sk_buff *buff;
2903 struct sockaddr_in sin;
2904 struct device *dev=NULL;
2905 unsigned char *ptr;
2906 int tmp;
2907 struct tcphdr *t1;
2908 int err;
2909
2910 if (sk->state != TCP_CLOSE) return(-EISCONN);
2911 if (addr_len < 8) return(-EINVAL);
2912
2913 err=verify_area(VERIFY_READ, usin, addr_len);
2914 if(err)
2915 return err;
2916
2917 memcpy_fromfs(&sin,usin, min(sizeof(sin), addr_len));
2918
2919 if (sin.sin_family && sin.sin_family != AF_INET) return(-EAFNOSUPPORT);
2920
2921 DPRINTF((DBG_TCP, "TCP connect daddr=%s\n", in_ntoa(sin.sin_addr.s_addr)));
2922
2923
2924 if (chk_addr(sin.sin_addr.s_addr) == IS_BROADCAST) {
2925 DPRINTF((DBG_TCP, "TCP connection to broadcast address not allowed\n"));
2926 return(-ENETUNREACH);
2927 }
2928
2929
2930 if(sk->saddr == sin.sin_addr.s_addr && sk->num==ntohs(sin.sin_port))
2931 return -EBUSY;
2932
2933 sk->inuse = 1;
2934 sk->daddr = sin.sin_addr.s_addr;
2935 sk->send_seq = jiffies * SEQ_TICK - seq_offset;
2936 sk->rcv_ack_seq = sk->send_seq -1;
2937 sk->err = 0;
2938 sk->dummy_th.dest = sin.sin_port;
2939 release_sock(sk);
2940
2941 buff = sk->prot->wmalloc(sk,MAX_SYN_SIZE,0, GFP_KERNEL);
2942 if (buff == NULL) {
2943 return(-ENOMEM);
2944 }
2945 sk->inuse = 1;
2946 buff->mem_addr = buff;
2947 buff->mem_len = MAX_SYN_SIZE;
2948 buff->len = 24;
2949 buff->sk = sk;
2950 buff->free = 1;
2951 t1 = (struct tcphdr *) buff->data;
2952
2953
2954
2955 tmp = sk->prot->build_header(buff, sk->saddr, sk->daddr, &dev,
2956 IPPROTO_TCP, NULL, MAX_SYN_SIZE,sk->ip_tos,sk->ip_ttl);
2957 if (tmp < 0) {
2958 sk->prot->wfree(sk, buff->mem_addr, buff->mem_len);
2959 release_sock(sk);
2960 return(-ENETUNREACH);
2961 }
2962 buff->len += tmp;
2963 t1 = (struct tcphdr *)((char *)t1 +tmp);
2964
2965 memcpy(t1,(void *)&(sk->dummy_th), sizeof(*t1));
2966 t1->seq = ntohl(sk->send_seq++);
2967 buff->h.seq = sk->send_seq;
2968 t1->ack = 0;
2969 t1->window = 2;
2970 t1->res1=0;
2971 t1->res2=0;
2972 t1->rst = 0;
2973 t1->urg = 0;
2974 t1->psh = 0;
2975 t1->syn = 1;
2976 t1->urg_ptr = 0;
2977 t1->doff = 6;
2978
2979
2980 if (sk->mss)
2981 sk->mtu = sk->mss;
2982 else
2983 sk->mtu = 576 - HEADER_SIZE;
2984
2985 sk->mtu = min(sk->mtu, dev->mtu - HEADER_SIZE);
2986
2987
2988 ptr = (unsigned char *)(t1+1);
2989 ptr[0] = 2;
2990 ptr[1] = 4;
2991 ptr[2] = (sk->mtu) >> 8;
2992 ptr[3] = (sk->mtu) & 0xff;
2993 tcp_send_check(t1, sk->saddr, sk->daddr,
2994 sizeof(struct tcphdr) + 4, sk);
2995
2996
2997 sk->state = TCP_SYN_SENT;
2998 sk->rtt = TCP_CONNECT_TIME;
2999 reset_timer(sk, TIME_WRITE, TCP_CONNECT_TIME);
3000 sk->retransmits = TCP_RETR2 - TCP_SYN_RETRIES;
3001
3002 sk->prot->queue_xmit(sk, dev, buff, 0);
3003
3004 release_sock(sk);
3005 return(0);
3006 }
3007
3008
3009
3010 static int
3011 tcp_sequence(struct sock *sk, struct tcphdr *th, short len,
3012 struct options *opt, unsigned long saddr, struct device *dev)
3013 {
3014
3015
3016
3017
3018
3019
3020 DPRINTF((DBG_TCP, "tcp_sequence(sk=%X, th=%X, len = %d, opt=%d, saddr=%X)\n",
3021 sk, th, len, opt, saddr));
3022
3023 if (between(th->seq, sk->acked_seq, sk->acked_seq + sk->window)||
3024 between(th->seq + len-(th->doff*4), sk->acked_seq + 1,
3025 sk->acked_seq + sk->window) ||
3026 (before(th->seq, sk->acked_seq) &&
3027 after(th->seq + len -(th->doff*4), sk->acked_seq + sk->window))) {
3028 return(1);
3029 }
3030 DPRINTF((DBG_TCP, "tcp_sequence: rejecting packet.\n"));
3031
3032
3033
3034
3035
3036
3037
3038
3039 if(sk->state==TCP_SYN_SENT||sk->state==TCP_SYN_RECV)
3040 {
3041 tcp_reset(sk->saddr,sk->daddr,th,sk->prot,NULL,dev, sk->ip_tos,sk->ip_ttl);
3042 return(1);
3043 }
3044
3045
3046
3047
3048
3049 if (after(th->seq, sk->acked_seq + sk->window)) {
3050 if(!th->rst)
3051 tcp_send_ack(sk->send_seq, sk->acked_seq, sk, th, saddr);
3052 return(0);
3053 }
3054
3055 #ifdef undef
3056
3057
3058
3059
3060
3061
3062
3063
3064 if (th->ack && len == (th->doff * 4) &&
3065 after(th->seq, sk->acked_seq - 32767) &&
3066 !th->fin && !th->syn) return(1);
3067 #endif
3068
3069 if (!th->rst) {
3070
3071 tcp_send_ack(sk->send_seq, sk->acked_seq, sk, th, saddr);
3072 }
3073 return(0);
3074 }
3075
3076
3077
3078
3079
3080 int
3081 tcp_rcv(struct sk_buff *skb, struct device *dev, struct options *opt,
3082 unsigned long daddr, unsigned short len,
3083 unsigned long saddr, int redo, struct inet_protocol * protocol)
3084 {
3085 struct tcphdr *th;
3086 struct sock *sk;
3087
3088 if (!skb) {
3089 DPRINTF((DBG_TCP, "tcp.c: tcp_rcv skb = NULL\n"));
3090 return(0);
3091 }
3092 #if 0
3093 if (!protocol) {
3094 DPRINTF((DBG_TCP, "tcp.c: tcp_rcv protocol = NULL\n"));
3095 return(0);
3096 }
3097
3098 if (!opt) {
3099 DPRINTF((DBG_TCP, "tcp.c: tcp_rcv opt = NULL\n"));
3100 }
3101 #endif
3102 if (!dev) {
3103 DPRINTF((DBG_TCP, "tcp.c: tcp_rcv dev = NULL\n"));
3104 return(0);
3105 }
3106 th = skb->h.th;
3107
3108
3109 sk = get_sock(&tcp_prot, th->dest, saddr, th->source, daddr);
3110 DPRINTF((DBG_TCP, "<<\n"));
3111 DPRINTF((DBG_TCP, "len = %d, redo = %d, skb=%X\n", len, redo, skb));
3112
3113
3114
3115 if (sk!=NULL && sk->zapped)
3116 sk=NULL;
3117
3118 if (sk) {
3119 DPRINTF((DBG_TCP, "sk = %X:\n", sk));
3120 }
3121
3122 if (!redo) {
3123 if (tcp_check(th, len, saddr, daddr )) {
3124 skb->sk = NULL;
3125 DPRINTF((DBG_TCP, "packet dropped with bad checksum.\n"));
3126 if (inet_debug == DBG_SLIP) printk("\rtcp_rcv: bad checksum\n");
3127 kfree_skb(skb,FREE_READ);
3128
3129
3130
3131
3132 return(0);
3133 }
3134
3135
3136 if (sk == NULL) {
3137 if (!th->rst)
3138 {
3139 th->seq = ntohl(th->seq);
3140
3141 tcp_reset(daddr, saddr, th, &tcp_prot, opt,dev,skb->ip_hdr->tos,255);
3142 }
3143 skb->sk = NULL;
3144 kfree_skb(skb, FREE_READ);
3145 return(0);
3146 }
3147
3148 skb->len = len;
3149 skb->sk = sk;
3150 skb->acked = 0;
3151 skb->used = 0;
3152 skb->free = 0;
3153 skb->urg_used = 0;
3154 skb->saddr = daddr;
3155 skb->daddr = saddr;
3156
3157 th->seq = ntohl(th->seq);
3158
3159
3160 cli();
3161 if (sk->inuse) {
3162 if (sk->back_log == NULL) {
3163 sk->back_log = skb;
3164 skb->next = skb;
3165 skb->prev = skb;
3166 } else {
3167 skb->next = sk->back_log;
3168 skb->prev = sk->back_log->prev;
3169 skb->prev->next = skb;
3170 skb->next->prev = skb;
3171 }
3172 sti();
3173 return(0);
3174 }
3175 sk->inuse = 1;
3176 sti();
3177 } else {
3178 if (!sk) {
3179 DPRINTF((DBG_TCP, "tcp.c: tcp_rcv bug sk=NULL redo = 1\n"));
3180 return(0);
3181 }
3182 }
3183
3184 if (!sk->prot) {
3185 DPRINTF((DBG_TCP, "tcp.c: tcp_rcv sk->prot = NULL \n"));
3186 return(0);
3187 }
3188
3189
3190 if (sk->rmem_alloc + skb->mem_len >= sk->rcvbuf) {
3191 skb->sk = NULL;
3192 DPRINTF((DBG_TCP, "dropping packet due to lack of buffer space.\n"));
3193 kfree_skb(skb, FREE_READ);
3194 release_sock(sk);
3195 return(0);
3196 }
3197 sk->rmem_alloc += skb->mem_len;
3198
3199 DPRINTF((DBG_TCP, "About to do switch.\n"));
3200
3201
3202 switch(sk->state) {
3203
3204
3205
3206
3207 case TCP_LAST_ACK:
3208 if (th->rst) {
3209 sk->zapped=1;
3210 sk->err = ECONNRESET;
3211 sk->state = TCP_CLOSE;
3212 sk->shutdown = SHUTDOWN_MASK;
3213 if (!sk->dead) {
3214 sk->state_change(sk);
3215 }
3216 kfree_skb(skb, FREE_READ);
3217 release_sock(sk);
3218 return(0);
3219 }
3220
3221 case TCP_ESTABLISHED:
3222 case TCP_CLOSE_WAIT:
3223 case TCP_FIN_WAIT1:
3224 case TCP_FIN_WAIT2:
3225 case TCP_TIME_WAIT:
3226 if (!tcp_sequence(sk, th, len, opt, saddr,dev)) {
3227 if (inet_debug == DBG_SLIP) printk("\rtcp_rcv: not in seq\n");
3228 #ifdef undef
3229
3230 if(!th->rst)
3231 tcp_send_ack(sk->send_seq, sk->acked_seq,
3232 sk, th, saddr);
3233 #endif
3234 kfree_skb(skb, FREE_READ);
3235 release_sock(sk);
3236 return(0);
3237 }
3238
3239 if (th->rst) {
3240 sk->zapped=1;
3241
3242 sk->err = ECONNRESET;
3243
3244 if (sk->state == TCP_CLOSE_WAIT) {
3245 sk->err = EPIPE;
3246 }
3247
3248
3249
3250
3251
3252 sk->state = TCP_CLOSE;
3253 sk->shutdown = SHUTDOWN_MASK;
3254 if (!sk->dead) {
3255 sk->state_change(sk);
3256 }
3257 kfree_skb(skb, FREE_READ);
3258 release_sock(sk);
3259 return(0);
3260 }
3261 if (
3262 #if 0
3263 if ((opt && (opt->security != 0 ||
3264 opt->compartment != 0)) ||
3265 #endif
3266 th->syn) {
3267 sk->err = ECONNRESET;
3268 sk->state = TCP_CLOSE;
3269 sk->shutdown = SHUTDOWN_MASK;
3270 tcp_reset(daddr, saddr, th, sk->prot, opt,dev, sk->ip_tos,sk->ip_ttl);
3271 if (!sk->dead) {
3272 sk->state_change(sk);
3273 }
3274 kfree_skb(skb, FREE_READ);
3275 release_sock(sk);
3276 return(0);
3277 }
3278 if (th->ack) {
3279 if (!tcp_ack(sk, th, saddr, len)) {
3280 kfree_skb(skb, FREE_READ);
3281 release_sock(sk);
3282 return(0);
3283 }
3284 }
3285 if (th->urg) {
3286 if (tcp_urg(sk, th, saddr)) {
3287 kfree_skb(skb, FREE_READ);
3288 release_sock(sk);
3289 return(0);
3290 }
3291 }
3292
3293 if (tcp_data(skb, sk, saddr, len)) {
3294 kfree_skb(skb, FREE_READ);
3295 release_sock(sk);
3296 return(0);
3297 }
3298
3299
3300 if (th->fin && tcp_fin(sk, th, saddr, dev)) {
3301 kfree_skb(skb, FREE_READ);
3302 release_sock(sk);
3303 return(0);
3304 }
3305
3306 release_sock(sk);
3307 return(0);
3308
3309 case TCP_CLOSE:
3310 if (sk->dead || sk->daddr) {
3311 DPRINTF((DBG_TCP, "packet received for closed,dead socket\n"));
3312 kfree_skb(skb, FREE_READ);
3313 release_sock(sk);
3314 return(0);
3315 }
3316
3317 if (!th->rst) {
3318 if (!th->ack)
3319 th->ack_seq = 0;
3320 tcp_reset(daddr, saddr, th, sk->prot, opt,dev,sk->ip_tos,sk->ip_ttl);
3321 }
3322 kfree_skb(skb, FREE_READ);
3323 release_sock(sk);
3324 return(0);
3325
3326 case TCP_LISTEN:
3327 if (th->rst) {
3328 kfree_skb(skb, FREE_READ);
3329 release_sock(sk);
3330 return(0);
3331 }
3332 if (th->ack) {
3333 tcp_reset(daddr, saddr, th, sk->prot, opt,dev,sk->ip_tos,sk->ip_ttl);
3334 kfree_skb(skb, FREE_READ);
3335 release_sock(sk);
3336 return(0);
3337 }
3338
3339 if (th->syn) {
3340 #if 0
3341 if (opt->security != 0 || opt->compartment != 0) {
3342 tcp_reset(daddr, saddr, th, prot, opt,dev);
3343 release_sock(sk);
3344 return(0);
3345 }
3346 #endif
3347
3348
3349
3350
3351
3352
3353
3354 tcp_conn_request(sk, skb, daddr, saddr, opt, dev);
3355 release_sock(sk);
3356 return(0);
3357 }
3358
3359 kfree_skb(skb, FREE_READ);
3360 release_sock(sk);
3361 return(0);
3362
3363 default:
3364 if (!tcp_sequence(sk, th, len, opt, saddr,dev)) {
3365 kfree_skb(skb, FREE_READ);
3366 release_sock(sk);
3367 return(0);
3368 }
3369
3370 case TCP_SYN_SENT:
3371 if (th->rst) {
3372 sk->err = ECONNREFUSED;
3373 sk->state = TCP_CLOSE;
3374 sk->shutdown = SHUTDOWN_MASK;
3375 sk->zapped = 1;
3376 if (!sk->dead) {
3377 sk->state_change(sk);
3378 }
3379 kfree_skb(skb, FREE_READ);
3380 release_sock(sk);
3381 return(0);
3382 }
3383 #if 0
3384 if (opt->security != 0 || opt->compartment != 0) {
3385 sk->err = ECONNRESET;
3386 sk->state = TCP_CLOSE;
3387 sk->shutdown = SHUTDOWN_MASK;
3388 tcp_reset(daddr, saddr, th, sk->prot, opt, dev);
3389 if (!sk->dead) {
3390 wake_up(sk->sleep);
3391 }
3392 kfree_skb(skb, FREE_READ);
3393 release_sock(sk);
3394 return(0);
3395 }
3396 #endif
3397 if (!th->ack) {
3398 if (th->syn) {
3399 sk->state = TCP_SYN_RECV;
3400 }
3401
3402 kfree_skb(skb, FREE_READ);
3403 release_sock(sk);
3404 return(0);
3405 }
3406
3407 switch(sk->state) {
3408 case TCP_SYN_SENT:
3409 if (!tcp_ack(sk, th, saddr, len)) {
3410 tcp_reset(daddr, saddr, th,
3411 sk->prot, opt,dev,sk->ip_tos,sk->ip_ttl);
3412 kfree_skb(skb, FREE_READ);
3413 release_sock(sk);
3414 return(0);
3415 }
3416
3417
3418
3419
3420
3421 if (!th->syn) {
3422 kfree_skb(skb, FREE_READ);
3423 release_sock(sk);
3424 return(0);
3425 }
3426
3427
3428 sk->acked_seq = th->seq+1;
3429 sk->fin_seq = th->seq;
3430 tcp_send_ack(sk->send_seq, th->seq+1,
3431 sk, th, sk->daddr);
3432
3433 case TCP_SYN_RECV:
3434 if (!tcp_ack(sk, th, saddr, len)) {
3435 tcp_reset(daddr, saddr, th,
3436 sk->prot, opt, dev,sk->ip_tos,sk->ip_ttl);
3437 kfree_skb(skb, FREE_READ);
3438 release_sock(sk);
3439 return(0);
3440 }
3441 sk->state = TCP_ESTABLISHED;
3442
3443
3444
3445
3446
3447
3448 tcp_options(sk, th);
3449 sk->dummy_th.dest = th->source;
3450 sk->copied_seq = sk->acked_seq-1;
3451 if (!sk->dead) {
3452 sk->state_change(sk);
3453 }
3454
3455
3456
3457
3458
3459 if (th->urg) {
3460 if (tcp_urg(sk, th, saddr)) {
3461 kfree_skb(skb, FREE_READ);
3462 release_sock(sk);
3463 return(0);
3464 }
3465 }
3466 if (tcp_data(skb, sk, saddr, len))
3467 kfree_skb(skb, FREE_READ);
3468
3469 if (th->fin) tcp_fin(sk, th, saddr, dev);
3470 release_sock(sk);
3471 return(0);
3472 }
3473
3474 if (th->urg) {
3475 if (tcp_urg(sk, th, saddr)) {
3476 kfree_skb(skb, FREE_READ);
3477 release_sock(sk);
3478 return(0);
3479 }
3480 }
3481
3482 if (tcp_data(skb, sk, saddr, len)) {
3483 kfree_skb(skb, FREE_READ);
3484 release_sock(sk);
3485 return(0);
3486 }
3487
3488 if (!th->fin) {
3489 release_sock(sk);
3490 return(0);
3491 }
3492 tcp_fin(sk, th, saddr, dev);
3493 release_sock(sk);
3494 return(0);
3495 }
3496 }
3497
3498
3499
3500
3501
3502
3503 static void
3504 tcp_write_wakeup(struct sock *sk)
3505 {
3506 struct sk_buff *buff;
3507 struct tcphdr *t1;
3508 struct device *dev=NULL;
3509 int tmp;
3510
3511 if (sk->zapped)
3512 return;
3513
3514 if (sk -> state != TCP_ESTABLISHED && sk->state != TCP_CLOSE_WAIT) return;
3515
3516 buff = sk->prot->wmalloc(sk,MAX_ACK_SIZE,1, GFP_ATOMIC);
3517 if (buff == NULL) return;
3518
3519 buff->mem_addr = buff;
3520 buff->mem_len = MAX_ACK_SIZE;
3521 buff->len = sizeof(struct tcphdr);
3522 buff->free = 1;
3523 buff->sk = sk;
3524 DPRINTF((DBG_TCP, "in tcp_write_wakeup\n"));
3525 t1 = (struct tcphdr *) buff->data;
3526
3527
3528 tmp = sk->prot->build_header(buff, sk->saddr, sk->daddr, &dev,
3529 IPPROTO_TCP, sk->opt, MAX_ACK_SIZE,sk->ip_tos,sk->ip_ttl);
3530 if (tmp < 0) {
3531 sk->prot->wfree(sk, buff->mem_addr, buff->mem_len);
3532 return;
3533 }
3534
3535 buff->len += tmp;
3536 t1 = (struct tcphdr *)((char *)t1 +tmp);
3537
3538 memcpy(t1,(void *) &sk->dummy_th, sizeof(*t1));
3539
3540
3541
3542
3543
3544 t1->seq = ntohl(sk->send_seq-1);
3545 t1->ack = 1;
3546 t1->res1= 0;
3547 t1->res2= 0;
3548 t1->rst = 0;
3549 t1->urg = 0;
3550 t1->psh = 0;
3551 t1->fin = 0;
3552 t1->syn = 0;
3553 t1->ack_seq = ntohl(sk->acked_seq);
3554 t1->window = ntohs(tcp_select_window(sk));
3555 t1->doff = sizeof(*t1)/4;
3556 tcp_send_check(t1, sk->saddr, sk->daddr, sizeof(*t1), sk);
3557
3558
3559
3560
3561 sk->prot->queue_xmit(sk, dev, buff, 1);
3562 }
3563
3564
3565
3566
3567
3568 void
3569 tcp_send_probe0(struct sock *sk)
3570 {
3571 unsigned char *raw;
3572 struct iphdr *iph;
3573 struct sk_buff *skb2, *skb;
3574 int len, hlen, data;
3575 struct tcphdr *t1;
3576 struct device *dev;
3577
3578 if (sk->zapped)
3579 return;
3580
3581 if (sk -> state != TCP_ESTABLISHED && sk->state != TCP_CLOSE_WAIT &&
3582 sk -> state != TCP_FIN_WAIT1 && sk->state != TCP_FIN_WAIT2)
3583 return;
3584
3585 skb = sk->wfront;
3586 if (skb == NULL)
3587 return;
3588
3589 dev = skb->dev;
3590
3591 if(dev==NULL)
3592 {
3593 printk("tcp_send_probe0: NULL device bug!\n");
3594 return;
3595 }
3596 IS_SKB(skb);
3597
3598 raw = skb->data;
3599 iph = (struct iphdr *) (raw + dev->hard_header_len);
3600
3601 hlen = (iph->ihl * sizeof(unsigned long)) + dev->hard_header_len;
3602 data = skb->len - hlen - sizeof(struct tcphdr);
3603 len = hlen + sizeof(struct tcphdr) + (data ? 1 : 0);
3604
3605
3606 if ((skb2 = alloc_skb(sizeof(struct sk_buff) + len, GFP_ATOMIC)) == NULL) {
3607
3608
3609 reset_timer (sk, TIME_PROBE0, 10);
3610 return;
3611 }
3612
3613 skb2->arp = skb->arp;
3614 skb2->len = len;
3615 skb2->h.raw = (char *)(skb2->data);
3616
3617 sk->wmem_alloc += skb2->mem_len;
3618
3619
3620 memcpy(skb2->h.raw, raw, len);
3621
3622 skb2->h.raw += hlen;
3623 t1 = skb2->h.th;
3624
3625
3626 t1->ack_seq = ntohl(sk->acked_seq);
3627 t1->res1 = 0;
3628
3629
3630
3631 t1->ack = 1;
3632 t1->urg = 0;
3633 t1->res2 = 0;
3634 t1->window = ntohs(tcp_select_window(sk));
3635 t1->urg_ptr = 0;
3636 tcp_send_check(t1, sk->saddr, sk->daddr, len - hlen, sk);
3637
3638
3639
3640 sk->prot->queue_xmit(sk, dev, skb2, 1);
3641 sk->backoff++;
3642 reset_timer (sk, TIME_PROBE0,
3643 backoff (sk->backoff) * (2 * sk->mdev + sk->rtt));
3644 sk->retransmits++;
3645 sk->prot->retransmits ++;
3646 }
3647
3648
3649
3650
3651
3652 int tcp_setsockopt(struct sock *sk, int level, int optname, char *optval, int optlen)
3653 {
3654 int val,err;
3655
3656 if(level!=SOL_TCP)
3657 return ip_setsockopt(sk,level,optname,optval,optlen);
3658
3659 if (optval == NULL)
3660 return(-EINVAL);
3661
3662 err=verify_area(VERIFY_READ, optval, sizeof(int));
3663 if(err)
3664 return err;
3665
3666 val = get_fs_long((unsigned long *)optval);
3667
3668 switch(optname)
3669 {
3670 case TCP_MAXSEG:
3671 if(val<200||val>2048 || val>sk->mtu)
3672 return -EINVAL;
3673 sk->mss=val;
3674 return 0;
3675 case TCP_NODELAY:
3676 sk->nonagle=(val==0)?0:1;
3677 return 0;
3678 default:
3679 return(-ENOPROTOOPT);
3680 }
3681 }
3682
3683 int tcp_getsockopt(struct sock *sk, int level, int optname, char *optval, int *optlen)
3684 {
3685 int val,err;
3686
3687 if(level!=SOL_TCP)
3688 return ip_getsockopt(sk,level,optname,optval,optlen);
3689
3690 switch(optname)
3691 {
3692 case TCP_MAXSEG:
3693 val=sk->mss;
3694 break;
3695 case TCP_NODELAY:
3696 val=sk->nonagle;
3697 break;
3698 default:
3699 return(-ENOPROTOOPT);
3700 }
3701 err=verify_area(VERIFY_WRITE, optlen, sizeof(int));
3702 if(err)
3703 return err;
3704 put_fs_long(sizeof(int),(unsigned long *) optlen);
3705
3706 err=verify_area(VERIFY_WRITE, optval, sizeof(int));
3707 if(err)
3708 return err;
3709 put_fs_long(val,(unsigned long *)optval);
3710
3711 return(0);
3712 }
3713
3714
3715 struct proto tcp_prot = {
3716 sock_wmalloc,
3717 sock_rmalloc,
3718 sock_wfree,
3719 sock_rfree,
3720 sock_rspace,
3721 sock_wspace,
3722 tcp_close,
3723 tcp_read,
3724 tcp_write,
3725 tcp_sendto,
3726 tcp_recvfrom,
3727 ip_build_header,
3728 tcp_connect,
3729 tcp_accept,
3730 ip_queue_xmit,
3731 tcp_retransmit,
3732 tcp_write_wakeup,
3733 tcp_read_wakeup,
3734 tcp_rcv,
3735 tcp_select,
3736 tcp_ioctl,
3737 NULL,
3738 tcp_shutdown,
3739 tcp_setsockopt,
3740 tcp_getsockopt,
3741 128,
3742 0,
3743 {NULL,},
3744 "TCP"
3745 };