This source file includes following definitions.
- min
- print_th
- get_firstr
- diff
- tcp_select_window
- tcp_time_wait
- tcp_retransmit
- tcp_err
- tcp_readable
- tcp_select
- tcp_ioctl
- tcp_check
- tcp_send_check
- tcp_send_skb
- dequeue_partial
- enqueue_partial
- tcp_send_partial
- tcp_send_ack
- tcp_build_header
- tcp_write
- tcp_sendto
- tcp_read_wakeup
- cleanup_rbuf
- tcp_read_urg
- tcp_read
- tcp_shutdown
- tcp_recvfrom
- tcp_reset
- tcp_options
- tcp_conn_request
- tcp_close
- tcp_write_xmit
- sort_send
- tcp_ack
- tcp_data
- tcp_urg
- tcp_fin
- tcp_accept
- tcp_connect
- tcp_sequence
- tcp_rcv
- tcp_write_wakeup
- tcp_send_probe0
- tcp_setsockopt
- tcp_getsockopt
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80 #include <linux/types.h>
81 #include <linux/sched.h>
82 #include <linux/mm.h>
83 #include <linux/string.h>
84 #include <linux/socket.h>
85 #include <linux/sockios.h>
86 #include <linux/termios.h>
87 #include <linux/in.h>
88 #include <linux/fcntl.h>
89 #include "inet.h"
90 #include "dev.h"
91 #include "ip.h"
92 #include "protocol.h"
93 #include "icmp.h"
94 #include "tcp.h"
95 #include "skbuff.h"
96 #include "sock.h"
97 #include "arp.h"
98 #include <linux/errno.h>
99 #include <linux/timer.h>
100 #include <asm/system.h>
101 #include <asm/segment.h>
102 #include <linux/mm.h>
103
104 #define SEQ_TICK 3
105 unsigned long seq_offset;
106
107 static __inline__ int
108 min(unsigned int a, unsigned int b)
109 {
110 if (a < b) return(a);
111 return(b);
112 }
113
114
115 void
116 print_th(struct tcphdr *th)
117 {
118 unsigned char *ptr;
119
120 if (inet_debug != DBG_TCP) return;
121
122 printk("TCP header:\n");
123 ptr =(unsigned char *)(th + 1);
124 printk(" source=%d, dest=%d, seq =%ld, ack_seq = %ld\n",
125 ntohs(th->source), ntohs(th->dest),
126 ntohl(th->seq), ntohl(th->ack_seq));
127 printk(" fin=%d, syn=%d, rst=%d, psh=%d, ack=%d, urg=%d res1=%d res2=%d\n",
128 th->fin, th->syn, th->rst, th->psh, th->ack,
129 th->urg, th->res1, th->res2);
130 printk(" window = %d, check = %d urg_ptr = %d\n",
131 ntohs(th->window), ntohs(th->check), ntohs(th->urg_ptr));
132 printk(" doff = %d\n", th->doff);
133 printk(" options = %d %d %d %d\n", ptr[0], ptr[1], ptr[2], ptr[3]);
134 }
135
136
137
138
139 static struct sk_buff *
140 get_firstr(struct sock *sk)
141 {
142 return skb_dequeue(&sk->rqueue);
143 }
144
145
146
147
148
149 static long
150 diff(unsigned long seq1, unsigned long seq2)
151 {
152 long d;
153
154 d = seq1 - seq2;
155 if (d > 0) return(d);
156
157
158 return(~d+1);
159 }
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176 static int tcp_select_window(struct sock *sk)
177 {
178 int new_window = sk->prot->rspace(sk);
179
180
181 if(new_window<sk->window)
182 return(sk->window);
183
184 return(new_window);
185 }
186
187
188
189 static void tcp_time_wait(struct sock *sk)
190 {
191 sk->state = TCP_TIME_WAIT;
192 sk->shutdown = SHUTDOWN_MASK;
193 if (!sk->dead)
194 sk->state_change(sk);
195 reset_timer(sk, TIME_CLOSE, TCP_TIMEWAIT_LEN);
196 }
197
198
199
200
201
202
203
204
205 static void
206 tcp_retransmit(struct sock *sk, int all)
207 {
208 if (all) {
209 ip_retransmit(sk, all);
210 return;
211 }
212
213
214
215
216
217
218
219
220
221 sk->cong_window = 1;
222 sk->exp_growth = 0;
223
224
225 ip_retransmit(sk, all);
226 }
227
228
229
230
231
232
233
234
235
236
237 void
238 tcp_err(int err, unsigned char *header, unsigned long daddr,
239 unsigned long saddr, struct inet_protocol *protocol)
240 {
241 struct tcphdr *th;
242 struct sock *sk;
243 struct iphdr *iph=(struct iphdr *)header;
244
245 header+=4*iph->ihl;
246
247 DPRINTF((DBG_TCP, "TCP: tcp_err(%d, hdr=%X, daddr=%X saddr=%X, protocol=%X)\n",
248 err, header, daddr, saddr, protocol));
249
250 th =(struct tcphdr *)header;
251 sk = get_sock(&tcp_prot, th->source, daddr, th->dest, saddr);
252 print_th(th);
253
254 if (sk == NULL) return;
255
256 if(err<0)
257 {
258 sk->err = -err;
259 sk->error_report(sk);
260 return;
261 }
262
263 if ((err & 0xff00) == (ICMP_SOURCE_QUENCH << 8)) {
264
265
266
267
268
269 if (sk->cong_window > 4) sk->cong_window--;
270 return;
271 }
272
273 DPRINTF((DBG_TCP, "TCP: icmp_err got error\n"));
274 sk->err = icmp_err_convert[err & 0xff].errno;
275
276
277
278
279
280 if (icmp_err_convert[err & 0xff].fatal) {
281 if (sk->state == TCP_SYN_SENT) {
282 sk->state = TCP_CLOSE;
283 sk->error_report(sk);
284 }
285 }
286 return;
287 }
288
289
290
291
292
293
294
295 static int
296 tcp_readable(struct sock *sk)
297 {
298 unsigned long counted;
299 unsigned long amount;
300 struct sk_buff *skb;
301 int count=0;
302 int sum;
303 unsigned long flags;
304
305 DPRINTF((DBG_TCP, "tcp_readable(sk=%X)\n", sk));
306 if(sk && sk->debug)
307 printk("tcp_readable: %p - ",sk);
308
309 if (sk == NULL || skb_peek(&sk->rqueue) == NULL)
310 {
311 if(sk && sk->debug)
312 printk("empty\n");
313 return(0);
314 }
315
316 counted = sk->copied_seq+1;
317 amount = 0;
318
319 save_flags(flags);
320 cli();
321 skb =(struct sk_buff *)sk->rqueue;
322
323
324 do {
325 count++;
326 #ifdef OLD
327
328 if (count > 20) {
329 restore_flags(flags);
330 DPRINTF((DBG_TCP, "tcp_readable, more than 20 packets without a psh\n"));
331 printk("tcp_read: possible read_queue corruption.\n");
332 return(amount);
333 }
334 #endif
335 if (before(counted, skb->h.th->seq))
336 break;
337 sum = skb->len -(counted - skb->h.th->seq);
338 if (skb->h.th->syn) sum++;
339 if (skb->h.th->urg) {
340 sum -= ntohs(skb->h.th->urg_ptr);
341 }
342 if (sum >= 0) {
343 amount += sum;
344 if (skb->h.th->syn) amount--;
345 counted += sum;
346 }
347 if (amount && skb->h.th->psh) break;
348 skb =(struct sk_buff *)skb->next;
349 } while(skb != sk->rqueue);
350 restore_flags(flags);
351 DPRINTF((DBG_TCP, "tcp readable returning %d bytes\n", amount));
352 if(sk->debug)
353 printk("got %lu bytes.\n",amount);
354 return(amount);
355 }
356
357
358
359
360
361
362
363 static int
364 tcp_select(struct sock *sk, int sel_type, select_table *wait)
365 {
366 DPRINTF((DBG_TCP, "tcp_select(sk=%X, sel_type = %d, wait = %X)\n",
367 sk, sel_type, wait));
368
369 sk->inuse = 1;
370 switch(sel_type) {
371 case SEL_IN:
372 if(sk->debug)
373 printk("select in");
374 select_wait(sk->sleep, wait);
375 if(sk->debug)
376 printk("-select out");
377 if (skb_peek(&sk->rqueue) != NULL) {
378 if (sk->state == TCP_LISTEN || tcp_readable(sk)) {
379 release_sock(sk);
380 if(sk->debug)
381 printk("-select ok data\n");
382 return(1);
383 }
384 }
385 if (sk->err != 0)
386 {
387 release_sock(sk);
388 if(sk->debug)
389 printk("-select ok error");
390 return(1);
391 }
392 if (sk->shutdown & RCV_SHUTDOWN) {
393 release_sock(sk);
394 if(sk->debug)
395 printk("-select ok down\n");
396 return(1);
397 } else {
398 release_sock(sk);
399 if(sk->debug)
400 printk("-select fail\n");
401 return(0);
402 }
403 case SEL_OUT:
404 select_wait(sk->sleep, wait);
405 if (sk->shutdown & SEND_SHUTDOWN) {
406 DPRINTF((DBG_TCP,
407 "write select on shutdown socket.\n"));
408
409
410 release_sock(sk);
411 return(0);
412 }
413
414
415
416
417
418
419 if (sk->prot->wspace(sk) >= sk->mtu) {
420 release_sock(sk);
421
422 if (sk->state == TCP_SYN_RECV ||
423 sk->state == TCP_SYN_SENT) return(0);
424 return(1);
425 }
426 DPRINTF((DBG_TCP,
427 "tcp_select: sleeping on write sk->wmem_alloc = %d, "
428 "sk->packets_out = %d\n"
429 "sk->wback = %X, sk->wfront = %X\n"
430 "sk->send_seq = %u, sk->window_seq=%u\n",
431 sk->wmem_alloc, sk->packets_out,
432 sk->wback, sk->wfront,
433 sk->send_seq, sk->window_seq));
434
435 release_sock(sk);
436 return(0);
437 case SEL_EX:
438 select_wait(sk->sleep,wait);
439 if (sk->err) {
440 release_sock(sk);
441 return(1);
442 }
443 release_sock(sk);
444 return(0);
445 }
446
447 release_sock(sk);
448 return(0);
449 }
450
451
452 int
453 tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
454 {
455 int err;
456 DPRINTF((DBG_TCP, "tcp_ioctl(sk=%X, cmd = %d, arg=%X)\n", sk, cmd, arg));
457 switch(cmd) {
458 case DDIOCSDBG:
459 return(dbg_ioctl((void *) arg, DBG_TCP));
460
461 case TIOCINQ:
462 #ifdef FIXME
463 case FIONREAD:
464 #endif
465 {
466 unsigned long amount;
467
468 if (sk->state == TCP_LISTEN) return(-EINVAL);
469
470 sk->inuse = 1;
471 amount = tcp_readable(sk);
472 release_sock(sk);
473 DPRINTF((DBG_TCP, "returning %d\n", amount));
474 err=verify_area(VERIFY_WRITE,(void *)arg,
475 sizeof(unsigned long));
476 if(err)
477 return err;
478 put_fs_long(amount,(unsigned long *)arg);
479 return(0);
480 }
481 case SIOCATMARK:
482 {
483 struct sk_buff *skb;
484 int answ = 0;
485
486
487
488
489
490 sk->inuse = 1;
491 if ((skb=skb_peek(&sk->rqueue)) != NULL)
492 {
493 if (sk->copied_seq+1 == skb->h.th->seq && skb->h.th->urg)
494 answ = 1;
495 }
496 release_sock(sk);
497 err=verify_area(VERIFY_WRITE,(void *) arg,
498 sizeof(unsigned long));
499 if(err)
500 return err;
501 put_fs_long(answ,(int *) arg);
502 return(0);
503 }
504 case TIOCOUTQ:
505 {
506 unsigned long amount;
507
508 if (sk->state == TCP_LISTEN) return(-EINVAL);
509 amount = sk->prot->wspace(sk);
510 err=verify_area(VERIFY_WRITE,(void *)arg,
511 sizeof(unsigned long));
512 if(err)
513 return err;
514 put_fs_long(amount,(unsigned long *)arg);
515 return(0);
516 }
517 default:
518 return(-EINVAL);
519 }
520 }
521
522
523
524 unsigned short
525 tcp_check(struct tcphdr *th, int len,
526 unsigned long saddr, unsigned long daddr)
527 {
528 unsigned long sum;
529
530 if (saddr == 0) saddr = my_addr();
531 print_th(th);
532 __asm__("\t addl %%ecx,%%ebx\n"
533 "\t adcl %%edx,%%ebx\n"
534 "\t adcl $0, %%ebx\n"
535 : "=b"(sum)
536 : "0"(daddr), "c"(saddr), "d"((ntohs(len) << 16) + IPPROTO_TCP*256)
537 : "cx","bx","dx" );
538
539 if (len > 3) {
540 __asm__("\tclc\n"
541 "1:\n"
542 "\t lodsl\n"
543 "\t adcl %%eax, %%ebx\n"
544 "\t loop 1b\n"
545 "\t adcl $0, %%ebx\n"
546 : "=b"(sum) , "=S"(th)
547 : "0"(sum), "c"(len/4) ,"1"(th)
548 : "ax", "cx", "bx", "si" );
549 }
550
551
552 __asm__("\t movl %%ebx, %%ecx\n"
553 "\t shrl $16,%%ecx\n"
554 "\t addw %%cx, %%bx\n"
555 "\t adcw $0, %%bx\n"
556 : "=b"(sum)
557 : "0"(sum)
558 : "bx", "cx");
559
560
561 if ((len & 2) != 0) {
562 __asm__("\t lodsw\n"
563 "\t addw %%ax,%%bx\n"
564 "\t adcw $0, %%bx\n"
565 : "=b"(sum), "=S"(th)
566 : "0"(sum) ,"1"(th)
567 : "si", "ax", "bx");
568 }
569
570
571 if ((len & 1) != 0) {
572 __asm__("\t lodsb\n"
573 "\t movb $0,%%ah\n"
574 "\t addw %%ax,%%bx\n"
575 "\t adcw $0, %%bx\n"
576 : "=b"(sum)
577 : "0"(sum) ,"S"(th)
578 : "si", "ax", "bx");
579 }
580
581
582 return((~sum) & 0xffff);
583 }
584
585
586 void tcp_send_check(struct tcphdr *th, unsigned long saddr,
587 unsigned long daddr, int len, struct sock *sk)
588 {
589 th->check = 0;
590 th->check = tcp_check(th, len, saddr, daddr);
591 return;
592 }
593
594 static void tcp_send_skb(struct sock *sk, struct sk_buff *skb)
595 {
596 int size;
597
598
599 size = skb->len - ((unsigned char *) skb->h.th - skb->data);
600
601
602 if (size < sizeof(struct tcphdr) || size > skb->len) {
603 printk("tcp_send_skb: bad skb (skb = %p, data = %p, th = %p, len = %lu)\n",
604 skb, skb->data, skb->h.th, skb->len);
605 kfree_skb(skb, FREE_WRITE);
606 return;
607 }
608
609
610 if (size == sizeof(struct tcphdr)) {
611
612 if(!skb->h.th->syn && !skb->h.th->fin) {
613 printk("tcp_send_skb: attempt to queue a bogon.\n");
614 kfree_skb(skb,FREE_WRITE);
615 return;
616 }
617 }
618
619
620 tcp_send_check(skb->h.th, sk->saddr, sk->daddr, size, sk);
621
622 skb->h.seq = sk->send_seq;
623 if (after(sk->send_seq , sk->window_seq) ||
624 (sk->retransmits && sk->timeout == TIME_WRITE) ||
625 sk->packets_out >= sk->cong_window) {
626 DPRINTF((DBG_TCP, "sk->cong_window = %d, sk->packets_out = %d\n",
627 sk->cong_window, sk->packets_out));
628 DPRINTF((DBG_TCP, "sk->send_seq = %d, sk->window_seq = %d\n",
629 sk->send_seq, sk->window_seq));
630 skb->next = NULL;
631 skb->magic = TCP_WRITE_QUEUE_MAGIC;
632 if (sk->wback == NULL) {
633 sk->wfront=skb;
634 } else {
635 sk->wback->next = skb;
636 }
637 sk->wback = skb;
638 if (before(sk->window_seq, sk->wfront->h.seq) &&
639 sk->send_head == NULL &&
640 sk->ack_backlog == 0)
641 reset_timer(sk, TIME_PROBE0,
642 backoff(sk->backoff) * (2 * sk->mdev + sk->rtt));
643 } else {
644 sk->prot->queue_xmit(sk, skb->dev, skb, 0);
645 }
646 }
647
648 static struct sk_buff * dequeue_partial(struct sock * sk)
649 {
650 struct sk_buff * skb;
651 unsigned long flags;
652
653 save_flags(flags);
654 cli();
655 skb = sk->send_tmp;
656 if (skb) {
657 sk->send_tmp = skb->next;
658 skb->next = NULL;
659 }
660 restore_flags(flags);
661 return skb;
662 }
663
664 static void enqueue_partial(struct sk_buff * skb, struct sock * sk)
665 {
666 struct sk_buff * tmp;
667 unsigned long flags;
668
669 skb->next = NULL;
670 save_flags(flags);
671 cli();
672 tmp = sk->send_tmp;
673 sk->send_tmp = skb;
674 restore_flags(flags);
675 if (tmp)
676 tcp_send_skb(sk, tmp);
677 }
678
679 static void tcp_send_partial(struct sock *sk)
680 {
681 struct sk_buff *skb;
682
683 if (sk == NULL)
684 return;
685 while ((skb = dequeue_partial(sk)) != NULL)
686 tcp_send_skb(sk, skb);
687 }
688
689
690
691 static void
692 tcp_send_ack(unsigned long sequence, unsigned long ack,
693 struct sock *sk,
694 struct tcphdr *th, unsigned long daddr)
695 {
696 struct sk_buff *buff;
697 struct tcphdr *t1;
698 struct device *dev = NULL;
699 int tmp;
700
701 if(sk->zapped)
702 return;
703
704
705
706
707 buff = sk->prot->wmalloc(sk, MAX_ACK_SIZE, 1, GFP_ATOMIC);
708 if (buff == NULL) {
709
710 sk->ack_backlog++;
711 if (sk->timeout != TIME_WRITE && tcp_connected(sk->state)) {
712 reset_timer(sk, TIME_WRITE, 10);
713 }
714 if (inet_debug == DBG_SLIP) printk("\rtcp_ack: malloc failed\n");
715 return;
716 }
717
718 buff->mem_addr = buff;
719 buff->mem_len = MAX_ACK_SIZE;
720 buff->len = sizeof(struct tcphdr);
721 buff->sk = sk;
722 t1 =(struct tcphdr *) buff->data;
723
724
725 tmp = sk->prot->build_header(buff, sk->saddr, daddr, &dev,
726 IPPROTO_TCP, sk->opt, MAX_ACK_SIZE,sk->ip_tos,sk->ip_ttl);
727 if (tmp < 0) {
728 buff->free=1;
729 sk->prot->wfree(sk, buff->mem_addr, buff->mem_len);
730 if (inet_debug == DBG_SLIP) printk("\rtcp_ack: build_header failed\n");
731 return;
732 }
733 buff->len += tmp;
734 t1 =(struct tcphdr *)((char *)t1 +tmp);
735
736
737 memcpy(t1, th, sizeof(*t1));
738
739
740 t1->dest = th->source;
741 t1->source = th->dest;
742 t1->seq = ntohl(sequence);
743 t1->ack = 1;
744 sk->window = tcp_select_window(sk);
745 t1->window = ntohs(sk->window);
746 t1->res1 = 0;
747 t1->res2 = 0;
748 t1->rst = 0;
749 t1->urg = 0;
750 t1->syn = 0;
751 t1->psh = 0;
752 t1->fin = 0;
753 if (ack == sk->acked_seq) {
754 sk->ack_backlog = 0;
755 sk->bytes_rcv = 0;
756 sk->ack_timed = 0;
757 if (sk->send_head == NULL && sk->wfront == NULL && sk->timeout == TIME_WRITE)
758 {
759 if(sk->keepopen)
760 reset_timer(sk,TIME_KEEPOPEN,TCP_TIMEOUT_LEN);
761 else
762 delete_timer(sk);
763 }
764 }
765 t1->ack_seq = ntohl(ack);
766 t1->doff = sizeof(*t1)/4;
767 tcp_send_check(t1, sk->saddr, daddr, sizeof(*t1), sk);
768 if (sk->debug)
769 printk("\rtcp_ack: seq %lx ack %lx\n", sequence, ack);
770 sk->prot->queue_xmit(sk, dev, buff, 1);
771 }
772
773
774
775 static int
776 tcp_build_header(struct tcphdr *th, struct sock *sk, int push)
777 {
778
779
780 memcpy(th,(void *) &(sk->dummy_th), sizeof(*th));
781 th->seq = htonl(sk->send_seq);
782 th->psh =(push == 0) ? 1 : 0;
783 th->doff = sizeof(*th)/4;
784 th->ack = 1;
785 th->fin = 0;
786 sk->ack_backlog = 0;
787 sk->bytes_rcv = 0;
788 sk->ack_timed = 0;
789 th->ack_seq = htonl(sk->acked_seq);
790 sk->window = tcp_select_window(sk);
791 th->window = htons(sk->window);
792
793 return(sizeof(*th));
794 }
795
796
797
798
799
800 static int
801 tcp_write(struct sock *sk, unsigned char *from,
802 int len, int nonblock, unsigned flags)
803 {
804 int copied = 0;
805 int copy;
806 int tmp;
807 struct sk_buff *skb;
808 struct sk_buff *send_tmp;
809 unsigned char *buff;
810 struct proto *prot;
811 struct device *dev = NULL;
812
813 DPRINTF((DBG_TCP, "tcp_write(sk=%X, from=%X, len=%d, nonblock=%d, flags=%X)\n",
814 sk, from, len, nonblock, flags));
815
816 sk->inuse=1;
817 prot = sk->prot;
818 while(len > 0) {
819 if (sk->err) {
820 release_sock(sk);
821 if (copied) return(copied);
822 tmp = -sk->err;
823 sk->err = 0;
824 return(tmp);
825 }
826
827
828 if (sk->shutdown & SEND_SHUTDOWN) {
829 release_sock(sk);
830 sk->err = EPIPE;
831 if (copied) return(copied);
832 sk->err = 0;
833 return(-EPIPE);
834 }
835
836
837
838
839 while(sk->state != TCP_ESTABLISHED && sk->state != TCP_CLOSE_WAIT) {
840 if (sk->err) {
841 release_sock(sk);
842 if (copied) return(copied);
843 tmp = -sk->err;
844 sk->err = 0;
845 return(tmp);
846 }
847
848 if (sk->state != TCP_SYN_SENT && sk->state != TCP_SYN_RECV) {
849 release_sock(sk);
850 DPRINTF((DBG_TCP, "tcp_write: return 1\n"));
851 if (copied) return(copied);
852
853 if (sk->err) {
854 tmp = -sk->err;
855 sk->err = 0;
856 return(tmp);
857 }
858
859 if (sk->keepopen) {
860 send_sig(SIGPIPE, current, 0);
861 }
862 return(-EPIPE);
863 }
864
865 if (nonblock || copied) {
866 release_sock(sk);
867 DPRINTF((DBG_TCP, "tcp_write: return 2\n"));
868 if (copied) return(copied);
869 return(-EAGAIN);
870 }
871
872 release_sock(sk);
873 cli();
874 if (sk->state != TCP_ESTABLISHED &&
875 sk->state != TCP_CLOSE_WAIT && sk->err == 0) {
876 interruptible_sleep_on(sk->sleep);
877 if (current->signal & ~current->blocked) {
878 sti();
879 DPRINTF((DBG_TCP, "tcp_write: return 3\n"));
880 if (copied) return(copied);
881 return(-ERESTARTSYS);
882 }
883 }
884 sk->inuse = 1;
885 sti();
886 }
887
888
889 if ((skb = dequeue_partial(sk)) != NULL) {
890 int hdrlen;
891
892
893 hdrlen = ((unsigned long)skb->h.th - (unsigned long)skb->data)
894 + sizeof(struct tcphdr);
895
896
897
898
899 if (!(flags & MSG_OOB)) {
900 copy = min(sk->mtu - (skb->len - hdrlen), len);
901
902 if (copy <= 0) {
903 printk("TCP: **bug**: \"copy\" <= 0!!\n");
904 copy = 0;
905 }
906
907 memcpy_fromfs(skb->data + skb->len, from, copy);
908 skb->len += copy;
909 from += copy;
910 copied += copy;
911 len -= copy;
912 sk->send_seq += copy;
913 }
914 enqueue_partial(skb, sk);
915 if ((skb->len - hdrlen) >= sk->mtu || (flags & MSG_OOB)) {
916 tcp_send_partial(sk);
917 }
918 continue;
919 }
920
921 #if 0
922
923
924
925
926
927
928
929
930
931 copy = diff(sk->window_seq, sk->send_seq);
932 if (copy < (diff(sk->window_seq, sk->rcv_ack_seq) >> 2))
933 copy = sk->mtu;
934 copy = min(copy, sk->mtu);
935 copy = min(copy, len);
936 #else
937
938 copy = min(sk->mtu, len);
939 #endif
940
941
942 if (sk->packets_out && copy < sk->mtu && !(flags & MSG_OOB)) {
943
944 release_sock(sk);
945 skb = prot->wmalloc(sk, sk->mtu + 128 + prot->max_header + sizeof(*skb), 0, GFP_KERNEL);
946 sk->inuse = 1;
947 send_tmp = skb;
948 } else {
949
950 release_sock(sk);
951 skb = prot->wmalloc(sk, copy + prot->max_header + sizeof(*skb), 0, GFP_KERNEL);
952 sk->inuse = 1;
953 send_tmp = NULL;
954 }
955
956
957 if (skb == NULL) {
958 if (nonblock ) {
959 release_sock(sk);
960 DPRINTF((DBG_TCP, "tcp_write: return 4\n"));
961 if (copied) return(copied);
962 return(-EAGAIN);
963 }
964
965
966 tmp = sk->wmem_alloc;
967 release_sock(sk);
968 cli();
969
970 if (tmp <= sk->wmem_alloc &&
971 (sk->state == TCP_ESTABLISHED||sk->state == TCP_CLOSE_WAIT)
972 && sk->err == 0) {
973 interruptible_sleep_on(sk->sleep);
974 if (current->signal & ~current->blocked) {
975 sti();
976 DPRINTF((DBG_TCP, "tcp_write: return 5\n"));
977 if (copied) return(copied);
978 return(-ERESTARTSYS);
979 }
980 }
981 sk->inuse = 1;
982 sti();
983 continue;
984 }
985
986 skb->len = 0;
987 skb->sk = sk;
988 skb->free = 0;
989
990 buff = skb->data;
991
992
993
994
995
996 tmp = prot->build_header(skb, sk->saddr, sk->daddr, &dev,
997 IPPROTO_TCP, sk->opt, skb->mem_len,sk->ip_tos,sk->ip_ttl);
998 if (tmp < 0 ) {
999 prot->wfree(sk, skb->mem_addr, skb->mem_len);
1000 release_sock(sk);
1001 DPRINTF((DBG_TCP, "tcp_write: return 6\n"));
1002 if (copied) return(copied);
1003 return(tmp);
1004 }
1005 skb->len += tmp;
1006 skb->dev = dev;
1007 buff += tmp;
1008 skb->h.th =(struct tcphdr *) buff;
1009 tmp = tcp_build_header((struct tcphdr *)buff, sk, len-copy);
1010 if (tmp < 0) {
1011 prot->wfree(sk, skb->mem_addr, skb->mem_len);
1012 release_sock(sk);
1013 DPRINTF((DBG_TCP, "tcp_write: return 7\n"));
1014 if (copied) return(copied);
1015 return(tmp);
1016 }
1017
1018 if (flags & MSG_OOB) {
1019 ((struct tcphdr *)buff)->urg = 1;
1020 ((struct tcphdr *)buff)->urg_ptr = ntohs(copy);
1021 }
1022 skb->len += tmp;
1023 memcpy_fromfs(buff+tmp, from, copy);
1024
1025 from += copy;
1026 copied += copy;
1027 len -= copy;
1028 skb->len += copy;
1029 skb->free = 0;
1030 sk->send_seq += copy;
1031
1032 if (send_tmp != NULL) {
1033 enqueue_partial(send_tmp, sk);
1034 continue;
1035 }
1036 tcp_send_skb(sk, skb);
1037 }
1038 sk->err = 0;
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048 if(sk->send_tmp &&
1049 ((!sk->packets_out)
1050
1051 || (sk->nonagle && before(sk->send_seq , sk->window_seq))
1052 ))
1053 tcp_send_partial(sk);
1054
1055 release_sock(sk);
1056 DPRINTF((DBG_TCP, "tcp_write: return 8\n"));
1057 return(copied);
1058 }
1059
1060
1061 static int
1062 tcp_sendto(struct sock *sk, unsigned char *from,
1063 int len, int nonblock, unsigned flags,
1064 struct sockaddr_in *addr, int addr_len)
1065 {
1066 struct sockaddr_in sin;
1067
1068 if (addr_len < sizeof(sin)) return(-EINVAL);
1069 memcpy_fromfs(&sin, addr, sizeof(sin));
1070 if (sin.sin_family && sin.sin_family != AF_INET) return(-EINVAL);
1071 if (sin.sin_port != sk->dummy_th.dest) return(-EINVAL);
1072 if (sin.sin_addr.s_addr != sk->daddr) return(-EINVAL);
1073 return(tcp_write(sk, from, len, nonblock, flags));
1074 }
1075
1076
1077 static void
1078 tcp_read_wakeup(struct sock *sk)
1079 {
1080 int tmp;
1081 struct device *dev = NULL;
1082 struct tcphdr *t1;
1083 struct sk_buff *buff;
1084
1085 DPRINTF((DBG_TCP, "in tcp read wakeup\n"));
1086 if (!sk->ack_backlog) return;
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098 buff = sk->prot->wmalloc(sk,MAX_ACK_SIZE,1, GFP_ATOMIC);
1099 if (buff == NULL) {
1100
1101 reset_timer(sk, TIME_WRITE, 10);
1102 return;
1103 }
1104
1105 buff->mem_addr = buff;
1106 buff->mem_len = MAX_ACK_SIZE;
1107 buff->len = sizeof(struct tcphdr);
1108 buff->sk = sk;
1109
1110
1111 tmp = sk->prot->build_header(buff, sk->saddr, sk->daddr, &dev,
1112 IPPROTO_TCP, sk->opt, MAX_ACK_SIZE,sk->ip_tos,sk->ip_ttl);
1113 if (tmp < 0) {
1114 buff->free=1;
1115 sk->prot->wfree(sk, buff->mem_addr, buff->mem_len);
1116 return;
1117 }
1118
1119 buff->len += tmp;
1120 t1 =(struct tcphdr *)(buff->data +tmp);
1121
1122 memcpy(t1,(void *) &sk->dummy_th, sizeof(*t1));
1123 t1->seq = ntohl(sk->send_seq);
1124 t1->ack = 1;
1125 t1->res1 = 0;
1126 t1->res2 = 0;
1127 t1->rst = 0;
1128 t1->urg = 0;
1129 t1->syn = 0;
1130 t1->psh = 0;
1131 sk->ack_backlog = 0;
1132 sk->bytes_rcv = 0;
1133 sk->window = tcp_select_window(sk);
1134 t1->window = ntohs(sk->window);
1135 t1->ack_seq = ntohl(sk->acked_seq);
1136 t1->doff = sizeof(*t1)/4;
1137 tcp_send_check(t1, sk->saddr, sk->daddr, sizeof(*t1), sk);
1138 sk->prot->queue_xmit(sk, dev, buff, 1);
1139 }
1140
1141
1142
1143
1144
1145
1146
1147
1148 static void
1149 cleanup_rbuf(struct sock *sk)
1150 {
1151 unsigned long flags;
1152 int left;
1153 struct sk_buff *skb;
1154
1155 if(sk->debug)
1156 printk("cleaning rbuf for sk=%p\n", sk);
1157
1158 save_flags(flags);
1159 cli();
1160
1161 left = sk->prot->rspace(sk);
1162
1163
1164
1165
1166
1167 while((skb=skb_peek(&sk->rqueue)) != NULL )
1168 {
1169 if (!skb->used)
1170 break;
1171 skb_unlink(skb);
1172 skb->sk = sk;
1173 kfree_skb(skb, FREE_READ);
1174 }
1175
1176 restore_flags(flags);
1177
1178
1179
1180
1181
1182
1183
1184 DPRINTF((DBG_TCP, "sk->window left = %d, sk->prot->rspace(sk)=%d\n",
1185 sk->window - sk->bytes_rcv, sk->prot->rspace(sk)));
1186
1187 if(sk->debug)
1188 printk("sk->rspace = %lu, was %d\n", sk->prot->rspace(sk),
1189 left);
1190 if (sk->prot->rspace(sk) != left)
1191 {
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202 sk->ack_backlog++;
1203 if ((sk->prot->rspace(sk) > (sk->window - sk->bytes_rcv + sk->mtu))) {
1204
1205 tcp_read_wakeup(sk);
1206 } else {
1207
1208 int was_active = del_timer(&sk->timer);
1209 if (!was_active || TCP_ACK_TIME < sk->timer.expires) {
1210 reset_timer(sk, TIME_WRITE, TCP_ACK_TIME);
1211 } else
1212 add_timer(&sk->timer);
1213 }
1214 }
1215 }
1216
1217
1218
1219 static int
1220 tcp_read_urg(struct sock * sk, int nonblock,
1221 unsigned char *to, int len, unsigned flags)
1222 {
1223 int copied = 0;
1224 struct sk_buff *skb;
1225
1226 DPRINTF((DBG_TCP, "tcp_read_urg(sk=%X, to=%X, len=%d, flags=%X)\n",
1227 sk, to, len, flags));
1228
1229 while(len > 0)
1230 {
1231 sk->inuse = 1;
1232 while(sk->urg==0 || skb_peek(&sk->rqueue) == NULL) {
1233 if (sk->err) {
1234 int tmp;
1235
1236 release_sock(sk);
1237 if (copied) return(copied);
1238 tmp = -sk->err;
1239 sk->err = 0;
1240 return(tmp);
1241 }
1242
1243 if (sk->state == TCP_CLOSE || sk->done) {
1244 release_sock(sk);
1245 if (copied) return(copied);
1246 if (!sk->done) {
1247 sk->done = 1;
1248 return(0);
1249 }
1250 return(-ENOTCONN);
1251 }
1252
1253 if (sk->shutdown & RCV_SHUTDOWN) {
1254 release_sock(sk);
1255 if (copied == 0)
1256 sk->done = 1;
1257 return(copied);
1258 }
1259
1260 if (nonblock || copied) {
1261 release_sock(sk);
1262 if (copied) return(copied);
1263 return(-EAGAIN);
1264 }
1265
1266
1267 release_sock(sk);
1268 cli();
1269 if ((sk->urg == 0 || skb_peek(&sk->rqueue) == NULL) &&
1270 sk->err == 0 && !(sk->shutdown & RCV_SHUTDOWN)) {
1271 interruptible_sleep_on(sk->sleep);
1272 if (current->signal & ~current->blocked) {
1273 sti();
1274 if (copied) return(copied);
1275 return(-ERESTARTSYS);
1276 }
1277 }
1278 sk->inuse = 1;
1279 sti();
1280 }
1281
1282 skb = skb_peek(&sk->rqueue);
1283 do {
1284 int amt;
1285
1286 if (skb->h.th->urg && !skb->urg_used) {
1287 if (skb->h.th->urg_ptr == 0) {
1288 skb->h.th->urg_ptr = ntohs(skb->len);
1289 }
1290 amt = min(ntohs(skb->h.th->urg_ptr),len);
1291 if(amt)
1292 {
1293 memcpy_tofs(to,(unsigned char *)(skb->h.th) +
1294 skb->h.th->doff*4, amt);
1295 }
1296
1297 if (!(flags & MSG_PEEK)) {
1298 skb->urg_used = 1;
1299 sk->urg--;
1300 }
1301 release_sock(sk);
1302 copied += amt;
1303 return(copied);
1304 }
1305 skb =(struct sk_buff *)skb->next;
1306 } while(skb != sk->rqueue);
1307 }
1308
1309 release_sock(sk);
1310 return(0);
1311 }
1312
1313
1314
1315 static int
1316 tcp_read(struct sock *sk, unsigned char *to,
1317 int len, int nonblock, unsigned flags)
1318 {
1319 int copied=0;
1320 struct sk_buff *skb;
1321 unsigned long offset;
1322 unsigned long used;
1323 int err;
1324
1325 if (len == 0) return(0);
1326 if (len < 0) {
1327 return(-EINVAL);
1328 }
1329
1330 err=verify_area(VERIFY_WRITE,to,len);
1331 if(err)
1332 return err;
1333
1334
1335 if (sk->state == TCP_LISTEN) return(-ENOTCONN);
1336
1337
1338 if ((flags & MSG_OOB))
1339 return(tcp_read_urg(sk, nonblock, to, len, flags));
1340
1341
1342 sk->inuse = 1;
1343
1344 skb=skb_peek(&sk->rqueue);
1345
1346 DPRINTF((DBG_TCP, "tcp_read(sk=%X, to=%X, len=%d, nonblock=%d, flags=%X)\n",
1347 sk, to, len, nonblock, flags));
1348
1349 while(len > 0) {
1350
1351
1352
1353 while(skb == NULL ||
1354 before(sk->copied_seq+1, skb->h.th->seq) || skb->used) {
1355 DPRINTF((DBG_TCP, "skb = %X:\n", skb));
1356 cleanup_rbuf(sk);
1357 if (sk->err)
1358 {
1359 int tmp;
1360
1361 release_sock(sk);
1362 if (copied)
1363 {
1364 DPRINTF((DBG_TCP, "tcp_read: returning %d\n",
1365 copied));
1366 return(copied);
1367 }
1368 tmp = -sk->err;
1369 sk->err = 0;
1370 return(tmp);
1371 }
1372
1373 if (sk->state == TCP_CLOSE)
1374 {
1375 release_sock(sk);
1376 if (copied) {
1377 DPRINTF((DBG_TCP, "tcp_read: returning %d\n",
1378 copied));
1379 return(copied);
1380 }
1381 if (!sk->done) {
1382 sk->done = 1;
1383 return(0);
1384 }
1385 return(-ENOTCONN);
1386 }
1387
1388 if (sk->shutdown & RCV_SHUTDOWN)
1389 {
1390 release_sock(sk);
1391 if (copied == 0) sk->done = 1;
1392 DPRINTF((DBG_TCP, "tcp_read: returning %d\n", copied));
1393 return(copied);
1394 }
1395
1396 if (nonblock || copied)
1397 {
1398 release_sock(sk);
1399 if(sk->debug)
1400 printk("read: EAGAIN\n");
1401 if (copied)
1402 {
1403 DPRINTF((DBG_TCP, "tcp_read: returning %d\n",
1404 copied));
1405 return(copied);
1406 }
1407 return(-EAGAIN);
1408 }
1409
1410 if ((flags & MSG_PEEK) && copied != 0)
1411 {
1412 release_sock(sk);
1413 DPRINTF((DBG_TCP, "tcp_read: returning %d\n", copied));
1414 return(copied);
1415 }
1416
1417 DPRINTF((DBG_TCP, "tcp_read about to sleep. state = %d\n",
1418 sk->state));
1419 release_sock(sk);
1420
1421
1422
1423
1424
1425 cli();
1426 if (sk->shutdown & RCV_SHUTDOWN || sk->err != 0) {
1427 sk->inuse = 1;
1428 sti();
1429 continue;
1430 }
1431
1432 if (skb_peek(&sk->rqueue) == NULL ||
1433 before(sk->copied_seq+1, sk->rqueue->h.th->seq)) {
1434 if(sk->debug)
1435 printk("Read wait sleep\n");
1436 interruptible_sleep_on(sk->sleep);
1437 if(sk->debug)
1438 printk("Read wait wakes\n");
1439 if (current->signal & ~current->blocked) {
1440 sti();
1441 if (copied) {
1442 DPRINTF((DBG_TCP, "tcp_read: returning %d\n",
1443 copied));
1444 return(copied);
1445 }
1446 return(-ERESTARTSYS);
1447 }
1448 }
1449 sk->inuse = 1;
1450 sti();
1451 DPRINTF((DBG_TCP, "tcp_read woke up. \n"));
1452
1453
1454 skb=skb_peek(&sk->rqueue);
1455
1456 }
1457
1458
1459
1460
1461
1462 offset = sk->copied_seq+1 - skb->h.th->seq;
1463
1464 if (skb->h.th->syn) offset--;
1465 if (offset < skb->len)
1466 {
1467
1468
1469
1470
1471 if (skb->h.th->urg)
1472 {
1473 if (skb->urg_used)
1474 {
1475 sk->copied_seq += ntohs(skb->h.th->urg_ptr);
1476 offset += ntohs(skb->h.th->urg_ptr);
1477 if (offset >= skb->len)
1478 {
1479 skb->used = 1;
1480 skb =(struct sk_buff *)skb->next;
1481 continue;
1482 }
1483 }
1484 else
1485 {
1486 release_sock(sk);
1487 if (copied)
1488 return(copied);
1489 send_sig(SIGURG, current, 0);
1490 return(-EINTR);
1491 }
1492 }
1493
1494 used = min(skb->len - offset, len);
1495
1496 memcpy_tofs(to,((unsigned char *)skb->h.th) +
1497 skb->h.th->doff*4 + offset, used);
1498 copied += used;
1499 len -= used;
1500 to += used;
1501
1502
1503 if (!(flags & MSG_PEEK))
1504 sk->copied_seq += used;
1505
1506
1507
1508
1509
1510
1511 if (!(flags & MSG_PEEK) &&
1512 (!skb->h.th->urg || skb->urg_used) &&
1513 (used + offset >= skb->len))
1514 skb->used = 1;
1515
1516
1517
1518
1519
1520 if (skb->h.th->urg)
1521 {
1522 break;
1523 }
1524 }
1525 else
1526 {
1527 skb->used = 1;
1528 }
1529
1530 skb =(struct sk_buff *)skb->next;
1531 }
1532
1533 cleanup_rbuf(sk);
1534 release_sock(sk);
1535 DPRINTF((DBG_TCP, "tcp_read: returning %d\n", copied));
1536 if (copied == 0 && nonblock)
1537 return(-EAGAIN);
1538 return(copied);
1539 }
1540
1541
1542
1543
1544
1545
1546 void
1547 tcp_shutdown(struct sock *sk, int how)
1548 {
1549 struct sk_buff *buff;
1550 struct tcphdr *t1, *th;
1551 struct proto *prot;
1552 int tmp;
1553 struct device *dev = NULL;
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563 if (sk->state == TCP_FIN_WAIT1 || sk->state == TCP_FIN_WAIT2) return;
1564 if (!(how & SEND_SHUTDOWN)) return;
1565 sk->inuse = 1;
1566
1567
1568 if (sk->send_tmp) tcp_send_partial(sk);
1569
1570 prot =(struct proto *)sk->prot;
1571 th =(struct tcphdr *)&sk->dummy_th;
1572 release_sock(sk);
1573 buff = prot->wmalloc(sk, MAX_RESET_SIZE,1 , GFP_KERNEL);
1574 if (buff == NULL) return;
1575 sk->inuse = 1;
1576
1577 DPRINTF((DBG_TCP, "tcp_shutdown_send buff = %X\n", buff));
1578 buff->mem_addr = buff;
1579 buff->mem_len = MAX_RESET_SIZE;
1580 buff->sk = sk;
1581 buff->len = sizeof(*t1);
1582 t1 =(struct tcphdr *) buff->data;
1583
1584
1585 tmp = prot->build_header(buff,sk->saddr, sk->daddr, &dev,
1586 IPPROTO_TCP, sk->opt,
1587 sizeof(struct tcphdr),sk->ip_tos,sk->ip_ttl);
1588 if (tmp < 0) {
1589 buff->free=1;
1590 prot->wfree(sk,buff->mem_addr, buff->mem_len);
1591 release_sock(sk);
1592 DPRINTF((DBG_TCP, "Unable to build header for fin.\n"));
1593 return;
1594 }
1595
1596 t1 =(struct tcphdr *)((char *)t1 +tmp);
1597 buff->len += tmp;
1598 buff->dev = dev;
1599 memcpy(t1, th, sizeof(*t1));
1600 t1->seq = ntohl(sk->send_seq);
1601 sk->send_seq++;
1602 buff->h.seq = sk->send_seq;
1603 t1->ack = 1;
1604 t1->ack_seq = ntohl(sk->acked_seq);
1605 t1->window = ntohs(sk->window=tcp_select_window(sk));
1606 t1->fin = 1;
1607 t1->rst = 0;
1608 t1->doff = sizeof(*t1)/4;
1609 tcp_send_check(t1, sk->saddr, sk->daddr, sizeof(*t1), sk);
1610
1611
1612
1613
1614
1615 if (sk->wback != NULL) {
1616 buff->free=0;
1617 buff->next = NULL;
1618 sk->wback->next = buff;
1619 sk->wback = buff;
1620 buff->magic = TCP_WRITE_QUEUE_MAGIC;
1621 } else {
1622 sk->prot->queue_xmit(sk, dev, buff, 0);
1623 }
1624
1625 if (sk->state == TCP_ESTABLISHED) sk->state = TCP_FIN_WAIT1;
1626 else sk->state = TCP_FIN_WAIT2;
1627
1628 release_sock(sk);
1629 }
1630
1631
1632 static int
1633 tcp_recvfrom(struct sock *sk, unsigned char *to,
1634 int to_len, int nonblock, unsigned flags,
1635 struct sockaddr_in *addr, int *addr_len)
1636 {
1637 struct sockaddr_in sin;
1638 int len;
1639 int err;
1640 int result;
1641
1642
1643
1644
1645 err = verify_area(VERIFY_WRITE,addr_len,sizeof(long));
1646 if(err)
1647 return err;
1648 len = get_fs_long(addr_len);
1649 if(len > sizeof(sin))
1650 len = sizeof(sin);
1651 err=verify_area(VERIFY_WRITE, addr, len);
1652 if(err)
1653 return err;
1654
1655 result=tcp_read(sk, to, to_len, nonblock, flags);
1656
1657 if (result < 0) return(result);
1658
1659 sin.sin_family = AF_INET;
1660 sin.sin_port = sk->dummy_th.dest;
1661 sin.sin_addr.s_addr = sk->daddr;
1662
1663 memcpy_tofs(addr, &sin, len);
1664 put_fs_long(len, addr_len);
1665 return(result);
1666 }
1667
1668
1669
1670 static void
1671 tcp_reset(unsigned long saddr, unsigned long daddr, struct tcphdr *th,
1672 struct proto *prot, struct options *opt, struct device *dev, int tos, int ttl)
1673 {
1674 struct sk_buff *buff;
1675 struct tcphdr *t1;
1676 int tmp;
1677
1678
1679
1680
1681
1682 buff = prot->wmalloc(NULL, MAX_RESET_SIZE, 1, GFP_ATOMIC);
1683 if (buff == NULL)
1684 return;
1685
1686 DPRINTF((DBG_TCP, "tcp_reset buff = %X\n", buff));
1687 buff->mem_addr = buff;
1688 buff->mem_len = MAX_RESET_SIZE;
1689 buff->len = sizeof(*t1);
1690 buff->sk = NULL;
1691 buff->dev = dev;
1692
1693 t1 =(struct tcphdr *) buff->data;
1694
1695
1696 tmp = prot->build_header(buff, saddr, daddr, &dev, IPPROTO_TCP, opt,
1697 sizeof(struct tcphdr),tos,ttl);
1698 if (tmp < 0) {
1699 buff->free = 1;
1700 prot->wfree(NULL, buff->mem_addr, buff->mem_len);
1701 return;
1702 }
1703 t1 =(struct tcphdr *)((char *)t1 +tmp);
1704 buff->len += tmp;
1705 memcpy(t1, th, sizeof(*t1));
1706
1707
1708 t1->dest = th->source;
1709 t1->source = th->dest;
1710 t1->rst = 1;
1711 t1->window = 0;
1712
1713 if(th->ack)
1714 {
1715 t1->ack=0;
1716 t1->seq=th->ack_seq;
1717 t1->ack_seq=0;
1718 }
1719 else
1720 {
1721 t1->ack=1;
1722 if(!th->syn)
1723 t1->ack_seq=htonl(th->seq);
1724 else
1725 t1->ack_seq=htonl(th->seq+1);
1726 t1->seq=0;
1727 }
1728
1729 t1->syn = 0;
1730 t1->urg = 0;
1731 t1->fin = 0;
1732 t1->psh = 0;
1733 t1->doff = sizeof(*t1)/4;
1734 tcp_send_check(t1, saddr, daddr, sizeof(*t1), NULL);
1735 prot->queue_xmit(NULL, dev, buff, 1);
1736 }
1737
1738
1739
1740
1741
1742
1743 static void
1744 tcp_options(struct sock *sk, struct tcphdr *th)
1745 {
1746 unsigned char *ptr;
1747 int length=(th->doff*4)-sizeof(struct tcphdr);
1748
1749 ptr = (unsigned char *)(th + 1);
1750
1751 while(length>0)
1752 {
1753 int opcode=*ptr++;
1754 int opsize=*ptr++;
1755 switch(opcode)
1756 {
1757 case TCPOPT_EOL:
1758 return;
1759 case TCPOPT_NOP:
1760 length-=2;
1761 continue;
1762
1763 default:
1764 if(opsize<=2)
1765 return;
1766 switch(opcode)
1767 {
1768 case TCPOPT_MSS:
1769 if(opsize==4)
1770 {
1771 sk->mtu=min(sk->mtu,ntohs(*(unsigned short *)ptr));
1772 }
1773 break;
1774
1775 }
1776 ptr+=opsize-2;
1777 length-=opsize;
1778 }
1779 }
1780
1781 }
1782
1783
1784
1785
1786
1787
1788
1789
1790 static void
1791 tcp_conn_request(struct sock *sk, struct sk_buff *skb,
1792 unsigned long daddr, unsigned long saddr,
1793 struct options *opt, struct device *dev)
1794 {
1795 struct sk_buff *buff;
1796 struct tcphdr *t1;
1797 unsigned char *ptr;
1798 struct sock *newsk;
1799 struct tcphdr *th;
1800 int tmp;
1801
1802 DPRINTF((DBG_TCP, "tcp_conn_request(sk = %X, skb = %X, daddr = %X, sadd4= %X, \n"
1803 " opt = %X, dev = %X)\n",
1804 sk, skb, daddr, saddr, opt, dev));
1805
1806 th = skb->h.th;
1807
1808
1809 if (!sk->dead) {
1810 sk->data_ready(sk,0);
1811 } else {
1812 DPRINTF((DBG_TCP, "tcp_conn_request on dead socket\n"));
1813 tcp_reset(daddr, saddr, th, sk->prot, opt, dev, sk->ip_tos,sk->ip_ttl);
1814 kfree_skb(skb, FREE_READ);
1815 return;
1816 }
1817
1818
1819
1820
1821
1822 if (sk->ack_backlog >= sk->max_ack_backlog) {
1823 kfree_skb(skb, FREE_READ);
1824 return;
1825 }
1826
1827
1828
1829
1830
1831
1832
1833
1834 newsk = (struct sock *) kmalloc(sizeof(struct sock), GFP_ATOMIC);
1835 if (newsk == NULL) {
1836
1837 kfree_skb(skb, FREE_READ);
1838 return;
1839 }
1840
1841 DPRINTF((DBG_TCP, "newsk = %X\n", newsk));
1842 memcpy((void *)newsk,(void *)sk, sizeof(*newsk));
1843 newsk->wback = NULL;
1844 newsk->wfront = NULL;
1845 newsk->rqueue = NULL;
1846 newsk->send_head = NULL;
1847 newsk->send_tail = NULL;
1848 newsk->back_log = NULL;
1849 newsk->rtt = TCP_CONNECT_TIME;
1850 newsk->mdev = 0;
1851 newsk->backoff = 0;
1852 newsk->blog = 0;
1853 newsk->intr = 0;
1854 newsk->proc = 0;
1855 newsk->done = 0;
1856 newsk->send_tmp = NULL;
1857 newsk->pair = NULL;
1858 newsk->wmem_alloc = 0;
1859 newsk->rmem_alloc = 0;
1860
1861 newsk->max_unacked = MAX_WINDOW - TCP_WINDOW_DIFF;
1862
1863 newsk->err = 0;
1864 newsk->shutdown = 0;
1865 newsk->ack_backlog = 0;
1866 newsk->acked_seq = skb->h.th->seq+1;
1867 newsk->fin_seq = skb->h.th->seq;
1868 newsk->copied_seq = skb->h.th->seq;
1869 newsk->state = TCP_SYN_RECV;
1870 newsk->timeout = 0;
1871 newsk->send_seq = jiffies * SEQ_TICK - seq_offset;
1872 newsk->rcv_ack_seq = newsk->send_seq;
1873 newsk->urg =0;
1874 newsk->retransmits = 0;
1875 newsk->destroy = 0;
1876 newsk->timer.data = (unsigned long)newsk;
1877 newsk->timer.function = &net_timer;
1878 newsk->dummy_th.source = skb->h.th->dest;
1879 newsk->dummy_th.dest = skb->h.th->source;
1880
1881
1882 newsk->daddr = saddr;
1883 newsk->saddr = daddr;
1884
1885 put_sock(newsk->num,newsk);
1886 newsk->dummy_th.res1 = 0;
1887 newsk->dummy_th.doff = 6;
1888 newsk->dummy_th.fin = 0;
1889 newsk->dummy_th.syn = 0;
1890 newsk->dummy_th.rst = 0;
1891 newsk->dummy_th.psh = 0;
1892 newsk->dummy_th.ack = 0;
1893 newsk->dummy_th.urg = 0;
1894 newsk->dummy_th.res2 = 0;
1895 newsk->acked_seq = skb->h.th->seq + 1;
1896 newsk->copied_seq = skb->h.th->seq;
1897
1898
1899 newsk->ip_ttl=sk->ip_ttl;
1900 newsk->ip_tos=skb->ip_hdr->tos;
1901
1902
1903
1904 if (sk->mss)
1905 newsk->mtu = sk->mss;
1906 else
1907 newsk->mtu = 576 - HEADER_SIZE;
1908
1909 newsk->mtu = min(newsk->mtu, dev->mtu - HEADER_SIZE);
1910
1911
1912 tcp_options(newsk,skb->h.th);
1913
1914 buff = newsk->prot->wmalloc(newsk, MAX_SYN_SIZE, 1, GFP_ATOMIC);
1915 if (buff == NULL) {
1916 sk->err = -ENOMEM;
1917 newsk->dead = 1;
1918 release_sock(newsk);
1919 kfree_skb(skb, FREE_READ);
1920 return;
1921 }
1922
1923 buff->mem_addr = buff;
1924 buff->mem_len = MAX_SYN_SIZE;
1925 buff->len = sizeof(struct tcphdr)+4;
1926 buff->sk = newsk;
1927
1928 t1 =(struct tcphdr *) buff->data;
1929
1930
1931 tmp = sk->prot->build_header(buff, newsk->saddr, newsk->daddr, &dev,
1932 IPPROTO_TCP, NULL, MAX_SYN_SIZE,sk->ip_tos,sk->ip_ttl);
1933
1934
1935 if (tmp < 0) {
1936 sk->err = tmp;
1937 buff->free=1;
1938 kfree_skb(buff,FREE_WRITE);
1939 newsk->dead = 1;
1940 release_sock(newsk);
1941 skb->sk = sk;
1942 kfree_skb(skb, FREE_READ);
1943 return;
1944 }
1945
1946 buff->len += tmp;
1947 t1 =(struct tcphdr *)((char *)t1 +tmp);
1948
1949 memcpy(t1, skb->h.th, sizeof(*t1));
1950 buff->h.seq = newsk->send_seq;
1951
1952
1953 t1->dest = skb->h.th->source;
1954 t1->source = newsk->dummy_th.source;
1955 t1->seq = ntohl(newsk->send_seq++);
1956 t1->ack = 1;
1957 newsk->window = tcp_select_window(newsk);
1958 t1->window = ntohs(newsk->window);
1959 t1->res1 = 0;
1960 t1->res2 = 0;
1961 t1->rst = 0;
1962 t1->urg = 0;
1963 t1->psh = 0;
1964 t1->syn = 1;
1965 t1->ack_seq = ntohl(skb->h.th->seq+1);
1966 t1->doff = sizeof(*t1)/4+1;
1967
1968 ptr =(unsigned char *)(t1+1);
1969 ptr[0] = 2;
1970 ptr[1] = 4;
1971 ptr[2] = ((newsk->mtu) >> 8) & 0xff;
1972 ptr[3] =(newsk->mtu) & 0xff;
1973
1974 tcp_send_check(t1, daddr, saddr, sizeof(*t1)+4, newsk);
1975 newsk->prot->queue_xmit(newsk, dev, buff, 0);
1976
1977 reset_timer(newsk, TIME_WRITE , TCP_CONNECT_TIME);
1978 skb->sk = newsk;
1979
1980
1981 sk->rmem_alloc -= skb->mem_len;
1982 newsk->rmem_alloc += skb->mem_len;
1983
1984 skb_queue_tail(&sk->rqueue,skb);
1985 sk->ack_backlog++;
1986 release_sock(newsk);
1987 }
1988
1989
1990 static void
1991 tcp_close(struct sock *sk, int timeout)
1992 {
1993 struct sk_buff *buff;
1994 int need_reset = 0;
1995 struct tcphdr *t1, *th;
1996 struct proto *prot;
1997 struct device *dev=NULL;
1998 int tmp;
1999
2000
2001
2002
2003
2004 DPRINTF((DBG_TCP, "tcp_close((struct sock *)%X, %d)\n",sk, timeout));
2005 sk->inuse = 1;
2006 sk->keepopen = 1;
2007 sk->shutdown = SHUTDOWN_MASK;
2008
2009 if (!sk->dead)
2010 sk->state_change(sk);
2011
2012
2013 if (skb_peek(&sk->rqueue) != NULL)
2014 {
2015 struct sk_buff *skb;
2016 if(sk->debug)
2017 printk("Clean rcv queue\n");
2018 while((skb=skb_dequeue(&sk->rqueue))!=NULL)
2019 {
2020 if(skb->len > 0 && after(skb->h.th->seq + skb->len + 1 , sk->copied_seq))
2021 need_reset = 1;
2022 kfree_skb(skb, FREE_READ);
2023 }
2024 if(sk->debug)
2025 printk("Cleaned.\n");
2026 }
2027 sk->rqueue = NULL;
2028
2029
2030 if (sk->send_tmp) {
2031 tcp_send_partial(sk);
2032 }
2033
2034 switch(sk->state) {
2035 case TCP_FIN_WAIT1:
2036 case TCP_FIN_WAIT2:
2037 case TCP_LAST_ACK:
2038
2039 reset_timer(sk, TIME_CLOSE, 4 * sk->rtt);
2040 if (timeout) tcp_time_wait(sk);
2041 release_sock(sk);
2042 return;
2043 case TCP_TIME_WAIT:
2044 if (timeout) {
2045 sk->state = TCP_CLOSE;
2046 }
2047 release_sock(sk);
2048 return;
2049 case TCP_LISTEN:
2050 sk->state = TCP_CLOSE;
2051 release_sock(sk);
2052 return;
2053 case TCP_CLOSE:
2054 release_sock(sk);
2055 return;
2056 case TCP_CLOSE_WAIT:
2057 case TCP_ESTABLISHED:
2058 case TCP_SYN_SENT:
2059 case TCP_SYN_RECV:
2060 prot =(struct proto *)sk->prot;
2061 th =(struct tcphdr *)&sk->dummy_th;
2062 buff = prot->wmalloc(sk, MAX_FIN_SIZE, 1, GFP_ATOMIC);
2063 if (buff == NULL) {
2064
2065
2066
2067 release_sock(sk);
2068 if (sk->state != TCP_CLOSE_WAIT)
2069 sk->state = TCP_ESTABLISHED;
2070 reset_timer(sk, TIME_CLOSE, 100);
2071 return;
2072 }
2073 buff->mem_addr = buff;
2074 buff->mem_len = MAX_FIN_SIZE;
2075 buff->sk = sk;
2076 buff->free = 1;
2077 buff->len = sizeof(*t1);
2078 t1 =(struct tcphdr *) buff->data;
2079
2080
2081 tmp = prot->build_header(buff,sk->saddr, sk->daddr, &dev,
2082 IPPROTO_TCP, sk->opt,
2083 sizeof(struct tcphdr),sk->ip_tos,sk->ip_ttl);
2084 if (tmp < 0) {
2085 kfree_skb(buff,FREE_WRITE);
2086 DPRINTF((DBG_TCP, "Unable to build header for fin.\n"));
2087 release_sock(sk);
2088 return;
2089 }
2090
2091 t1 =(struct tcphdr *)((char *)t1 +tmp);
2092 buff->len += tmp;
2093 buff->dev = dev;
2094 memcpy(t1, th, sizeof(*t1));
2095 t1->seq = ntohl(sk->send_seq);
2096 sk->send_seq++;
2097 buff->h.seq = sk->send_seq;
2098 t1->ack = 1;
2099
2100
2101 sk->delay_acks = 0;
2102 t1->ack_seq = ntohl(sk->acked_seq);
2103 t1->window = ntohs(sk->window=tcp_select_window(sk));
2104 t1->fin = 1;
2105 t1->rst = need_reset;
2106 t1->doff = sizeof(*t1)/4;
2107 tcp_send_check(t1, sk->saddr, sk->daddr, sizeof(*t1), sk);
2108
2109 if (sk->wfront == NULL) {
2110 prot->queue_xmit(sk, dev, buff, 0);
2111 } else {
2112 reset_timer(sk, TIME_WRITE,
2113 backoff(sk->backoff) * (2 * sk->mdev + sk->rtt));
2114 buff->next = NULL;
2115 if (sk->wback == NULL) {
2116 sk->wfront=buff;
2117 } else {
2118 sk->wback->next = buff;
2119 }
2120 sk->wback = buff;
2121 buff->magic = TCP_WRITE_QUEUE_MAGIC;
2122 }
2123
2124 if (sk->state == TCP_CLOSE_WAIT) {
2125 sk->state = TCP_FIN_WAIT2;
2126 } else {
2127 sk->state = TCP_FIN_WAIT1;
2128 }
2129 }
2130 release_sock(sk);
2131 }
2132
2133
2134
2135
2136
2137
2138 static void
2139 tcp_write_xmit(struct sock *sk)
2140 {
2141 struct sk_buff *skb;
2142
2143 DPRINTF((DBG_TCP, "tcp_write_xmit(sk=%X)\n", sk));
2144
2145
2146
2147 if(sk->zapped)
2148 return;
2149
2150 while(sk->wfront != NULL &&
2151 before(sk->wfront->h.seq, sk->window_seq +1) &&
2152 (sk->retransmits == 0 ||
2153 sk->timeout != TIME_WRITE ||
2154 before(sk->wfront->h.seq, sk->rcv_ack_seq +1))
2155 && sk->packets_out < sk->cong_window) {
2156 skb = sk->wfront;
2157 IS_SKB(skb);
2158 sk->wfront =(struct sk_buff *)skb->next;
2159 if (sk->wfront == NULL) sk->wback = NULL;
2160 skb->next = NULL;
2161 if (skb->magic != TCP_WRITE_QUEUE_MAGIC) {
2162 printk("tcp.c skb with bad magic(%X) on write queue. Squashing "
2163 "queue\n", skb->magic);
2164 sk->wfront = NULL;
2165 sk->wback = NULL;
2166 return;
2167 }
2168 skb->magic = 0;
2169 DPRINTF((DBG_TCP, "Sending a packet.\n"));
2170
2171
2172 if (before(skb->h.seq, sk->rcv_ack_seq +1)) {
2173 sk->retransmits = 0;
2174 kfree_skb(skb, FREE_WRITE);
2175 if (!sk->dead) sk->write_space(sk);
2176 } else {
2177 sk->prot->queue_xmit(sk, skb->dev, skb, skb->free);
2178 }
2179 }
2180 }
2181
2182
2183
2184
2185
2186
2187 void
2188 sort_send(struct sock *sk)
2189 {
2190 struct sk_buff *list = NULL;
2191 struct sk_buff *skb,*skb2,*skb3;
2192
2193 for (skb = sk->send_head; skb != NULL; skb = skb2) {
2194 skb2 = (struct sk_buff *)skb->link3;
2195 if (list == NULL || before (skb2->h.seq, list->h.seq)) {
2196 skb->link3 = list;
2197 sk->send_tail = skb;
2198 list = skb;
2199 } else {
2200 for (skb3 = list; ; skb3 = (struct sk_buff *)skb3->link3) {
2201 if (skb3->link3 == NULL ||
2202 before(skb->h.seq, skb3->link3->h.seq)) {
2203 skb->link3 = skb3->link3;
2204 skb3->link3 = skb;
2205 if (skb->link3 == NULL) sk->send_tail = skb;
2206 break;
2207 }
2208 }
2209 }
2210 }
2211 sk->send_head = list;
2212 }
2213
2214
2215
2216 static int
2217 tcp_ack(struct sock *sk, struct tcphdr *th, unsigned long saddr, int len)
2218 {
2219 unsigned long ack;
2220 int flag = 0;
2221
2222 if(sk->zapped)
2223 return(1);
2224
2225 ack = ntohl(th->ack_seq);
2226 DPRINTF((DBG_TCP, "tcp_ack ack=%d, window=%d, "
2227 "sk->rcv_ack_seq=%d, sk->window_seq = %d\n",
2228 ack, ntohs(th->window), sk->rcv_ack_seq, sk->window_seq));
2229
2230 if (sk->retransmits && sk->timeout == TIME_KEEPOPEN)
2231 sk->retransmits = 0;
2232
2233 if (after(ack, sk->send_seq+1) || before(ack, sk->rcv_ack_seq-1)) {
2234 if (after(ack, sk->send_seq) ||
2235 (sk->state != TCP_ESTABLISHED && sk->state != TCP_CLOSE_WAIT)) {
2236 return(0);
2237 }
2238 if (sk->keepopen) {
2239 reset_timer(sk, TIME_KEEPOPEN, TCP_TIMEOUT_LEN);
2240 }
2241 return(1);
2242 }
2243
2244 if (len != th->doff*4) flag |= 1;
2245
2246
2247 if (after(sk->window_seq, ack+ntohs(th->window))) {
2248
2249
2250
2251
2252
2253
2254
2255 struct sk_buff *skb;
2256 struct sk_buff *skb2;
2257 struct sk_buff *wskb = NULL;
2258
2259 skb2 = sk->send_head;
2260 sk->send_head = NULL;
2261 sk->send_tail = NULL;
2262
2263 flag |= 4;
2264
2265 sk->window_seq = ack + ntohs(th->window);
2266 cli();
2267 while (skb2 != NULL) {
2268 skb = skb2;
2269 skb2 = (struct sk_buff *)skb->link3;
2270 skb->link3 = NULL;
2271 if (after(skb->h.seq, sk->window_seq)) {
2272 if (sk->packets_out > 0) sk->packets_out--;
2273
2274 if (skb->next != NULL) {
2275 skb_unlink(skb);
2276 }
2277
2278 skb->magic = TCP_WRITE_QUEUE_MAGIC;
2279 if (wskb == NULL) {
2280 skb->next = sk->wfront;
2281 sk->wfront = skb;
2282 } else {
2283 skb->next = wskb->next;
2284 wskb->next = skb;
2285 }
2286 if (sk->wback == wskb) sk->wback = skb;
2287 wskb = skb;
2288 } else {
2289 if (sk->send_head == NULL) {
2290 sk->send_head = skb;
2291 sk->send_tail = skb;
2292 } else {
2293 sk->send_tail->link3 = skb;
2294 sk->send_tail = skb;
2295 }
2296 skb->link3 = NULL;
2297 }
2298 }
2299 sti();
2300 }
2301
2302 if (sk->send_tail == NULL || sk->send_head == NULL) {
2303 sk->send_head = NULL;
2304 sk->send_tail = NULL;
2305 sk->packets_out= 0;
2306 }
2307
2308 sk->window_seq = ack + ntohs(th->window);
2309
2310
2311 if (sk->timeout == TIME_WRITE &&
2312 sk->cong_window < 2048 && ack != sk->rcv_ack_seq) {
2313 if (sk->exp_growth) sk->cong_window *= 2;
2314 else sk->cong_window++;
2315 }
2316
2317 DPRINTF((DBG_TCP, "tcp_ack: Updating rcv ack sequence.\n"));
2318 sk->rcv_ack_seq = ack;
2319
2320
2321
2322
2323
2324
2325 if (sk->timeout == TIME_PROBE0) {
2326 if (sk->wfront != NULL &&
2327 ! before (sk->window_seq, sk->wfront->h.seq)) {
2328 sk->retransmits = 0;
2329 sk->backoff = 0;
2330 }
2331 }
2332
2333
2334 while(sk->send_head != NULL) {
2335
2336 if (sk->send_head->link3 &&
2337 after(sk->send_head->h.seq, sk->send_head->link3->h.seq)) {
2338 printk("INET: tcp.c: *** bug send_list out of order.\n");
2339 sort_send(sk);
2340 }
2341
2342 if (before(sk->send_head->h.seq, ack+1)) {
2343 struct sk_buff *oskb;
2344
2345 if (sk->retransmits) {
2346
2347
2348
2349
2350
2351
2352 if (sk->send_head->link3)
2353 sk->retransmits = 1;
2354 else
2355 sk->retransmits = 0;
2356 }
2357
2358
2359
2360
2361
2362
2363 sk->backoff = 0;
2364
2365 if (sk->packets_out > 0) sk->packets_out --;
2366 DPRINTF((DBG_TCP, "skb=%X skb->h.seq = %d acked ack=%d\n",
2367 sk->send_head, sk->send_head->h.seq, ack));
2368
2369
2370 if (!sk->dead) sk->write_space(sk);
2371
2372 oskb = sk->send_head;
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382 if ( !(flag&2)) {
2383 long abserr, rtt = jiffies - oskb->when;
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396 if (rtt < 100) rtt = 100;
2397 if (rtt > 12000) rtt = 12000;
2398
2399 if (sk->state == TCP_SYN_SENT || sk->state == TCP_SYN_RECV) {
2400
2401 sk->rtt = rtt;
2402 sk->mdev = rtt;
2403 }
2404 else {
2405 abserr = (rtt > sk->rtt) ? rtt - sk->rtt : sk->rtt - rtt;
2406 sk->rtt = (7 * sk->rtt + rtt) >> 3;
2407 sk->mdev = (3 * sk->mdev + abserr) >> 2;
2408 }
2409 sk->backoff = 0;
2410 }
2411 flag |= (2|4);
2412
2413 cli();
2414
2415 oskb = sk->send_head;
2416 IS_SKB(oskb);
2417 sk->send_head =(struct sk_buff *)oskb->link3;
2418 if (sk->send_head == NULL) {
2419 sk->send_tail = NULL;
2420 }
2421
2422
2423 skb_unlink(oskb);
2424 sti();
2425 oskb->magic = 0;
2426 kfree_skb(oskb, FREE_WRITE);
2427 if (!sk->dead) sk->write_space(sk);
2428 } else {
2429 break;
2430 }
2431 }
2432
2433
2434
2435
2436
2437 if (sk->wfront != NULL) {
2438 if (after (sk->window_seq+1, sk->wfront->h.seq) &&
2439 (sk->retransmits == 0 ||
2440 sk->timeout != TIME_WRITE ||
2441 before(sk->wfront->h.seq, sk->rcv_ack_seq +1))
2442 && sk->packets_out < sk->cong_window) {
2443 flag |= 1;
2444 tcp_write_xmit(sk);
2445 } else if (before(sk->window_seq, sk->wfront->h.seq) &&
2446 sk->send_head == NULL &&
2447 sk->ack_backlog == 0 &&
2448 sk->state != TCP_TIME_WAIT) {
2449 reset_timer(sk, TIME_PROBE0,
2450 backoff(sk->backoff) * (2 * sk->mdev + sk->rtt));
2451 }
2452 } else {
2453 if (sk->send_head == NULL && sk->ack_backlog == 0 &&
2454 sk->state != TCP_TIME_WAIT && !sk->keepopen) {
2455 DPRINTF((DBG_TCP, "Nothing to do, going to sleep.\n"));
2456 if (!sk->dead) sk->write_space(sk);
2457
2458 if (sk->keepopen)
2459 reset_timer(sk, TIME_KEEPOPEN, TCP_TIMEOUT_LEN);
2460 else
2461 delete_timer(sk);
2462 } else {
2463 if (sk->state != (unsigned char) sk->keepopen) {
2464 reset_timer(sk, TIME_WRITE,
2465 backoff(sk->backoff) * (2 * sk->mdev + sk->rtt));
2466 }
2467 if (sk->state == TCP_TIME_WAIT) {
2468 reset_timer(sk, TIME_CLOSE, TCP_TIMEWAIT_LEN);
2469 }
2470 }
2471 }
2472
2473 if (sk->packets_out == 0 && sk->send_tmp != NULL &&
2474 sk->wfront == NULL && sk->send_head == NULL) {
2475 flag |= 1;
2476 tcp_send_partial(sk);
2477 }
2478
2479
2480 if (sk->state == TCP_TIME_WAIT) {
2481 if (!sk->dead)
2482 sk->state_change(sk);
2483 if (sk->rcv_ack_seq == sk->send_seq && sk->acked_seq == sk->fin_seq) {
2484 flag |= 1;
2485 sk->state = TCP_CLOSE;
2486 sk->shutdown = SHUTDOWN_MASK;
2487 }
2488 }
2489
2490 if (sk->state == TCP_LAST_ACK || sk->state == TCP_FIN_WAIT2) {
2491 if (!sk->dead) sk->state_change(sk);
2492 if (sk->rcv_ack_seq == sk->send_seq) {
2493 flag |= 1;
2494 if (sk->acked_seq != sk->fin_seq) {
2495 tcp_time_wait(sk);
2496 } else {
2497 DPRINTF((DBG_TCP, "tcp_ack closing socket - %X\n", sk));
2498 tcp_send_ack(sk->send_seq, sk->acked_seq, sk,
2499 th, sk->daddr);
2500 sk->shutdown = SHUTDOWN_MASK;
2501 sk->state = TCP_CLOSE;
2502 }
2503 }
2504 }
2505
2506 if (((!flag) || (flag&4)) && sk->send_head != NULL &&
2507 (sk->send_head->when + backoff(sk->backoff) * (2 * sk->mdev + sk->rtt)
2508 < jiffies)) {
2509 sk->exp_growth = 0;
2510 ip_retransmit(sk, 1);
2511 }
2512
2513 DPRINTF((DBG_TCP, "leaving tcp_ack\n"));
2514 return(1);
2515 }
2516
2517
2518
2519
2520
2521
2522
2523 static int
2524 tcp_data(struct sk_buff *skb, struct sock *sk,
2525 unsigned long saddr, unsigned short len)
2526 {
2527 struct sk_buff *skb1, *skb2;
2528 struct tcphdr *th;
2529 int dup_dumped=0;
2530
2531 th = skb->h.th;
2532 print_th(th);
2533 skb->len = len -(th->doff*4);
2534
2535 DPRINTF((DBG_TCP, "tcp_data len = %d sk = %X:\n", skb->len, sk));
2536
2537 sk->bytes_rcv += skb->len;
2538 if (skb->len == 0 && !th->fin && !th->urg && !th->psh) {
2539
2540 if (!th->ack) tcp_send_ack(sk->send_seq, sk->acked_seq,sk, th, saddr);
2541 kfree_skb(skb, FREE_READ);
2542 return(0);
2543 }
2544
2545 if (sk->shutdown & RCV_SHUTDOWN) {
2546 sk->acked_seq = th->seq + skb->len + th->syn + th->fin;
2547 tcp_reset(sk->saddr, sk->daddr, skb->h.th,
2548 sk->prot, NULL, skb->dev, sk->ip_tos, sk->ip_ttl);
2549 sk->state = TCP_CLOSE;
2550 sk->err = EPIPE;
2551 sk->shutdown = SHUTDOWN_MASK;
2552 DPRINTF((DBG_TCP, "tcp_data: closing socket - %X\n", sk));
2553 kfree_skb(skb, FREE_READ);
2554 if (!sk->dead) sk->state_change(sk);
2555 return(0);
2556 }
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567 if (sk->rqueue == NULL) {
2568 DPRINTF((DBG_TCP, "tcp_data: skb = %X:\n", skb));
2569 #ifdef OLDWAY
2570 sk->rqueue = skb;
2571 skb->next = skb;
2572 skb->prev = skb;
2573 skb->list = &sk->rqueue;
2574 #else
2575 skb_queue_head(&sk->rqueue,skb);
2576 #endif
2577 skb1= NULL;
2578 } else {
2579 DPRINTF((DBG_TCP, "tcp_data adding to chain sk = %X:\n", sk));
2580 for(skb1=sk->rqueue->prev; ; skb1 =(struct sk_buff *)skb1->prev) {
2581 if(sk->debug)
2582 {
2583 printk("skb1=%p :", skb1);
2584 printk("skb1->h.th->seq = %ld: ", skb1->h.th->seq);
2585 printk("skb->h.th->seq = %ld\n",skb->h.th->seq);
2586 printk("copied_seq = %ld acked_seq = %ld\n", sk->copied_seq,
2587 sk->acked_seq);
2588 }
2589 #ifdef OLD
2590 if (after(th->seq+1, skb1->h.th->seq)) {
2591 skb->prev = skb1;
2592 skb->next = skb1->next;
2593 skb->next->prev = skb;
2594 skb1->next = skb;
2595 if (skb1 == sk->rqueue) sk->rqueue = skb;
2596 break;
2597 }
2598 if (skb1->prev == sk->rqueue) {
2599 skb->next= skb1;
2600 skb->prev = skb1->prev;
2601 skb->prev->next = skb;
2602 skb1->prev = skb;
2603 skb1 = NULL;
2604
2605 break;
2606 }
2607 #else
2608 if (th->seq==skb1->h.th->seq && skb->len>= skb1->len)
2609 {
2610 skb_append(skb1,skb);
2611 skb_unlink(skb1);
2612 kfree_skb(skb1,FREE_READ);
2613 dup_dumped=1;
2614 skb1=NULL;
2615 break;
2616 }
2617 if (after(th->seq+1, skb1->h.th->seq))
2618 {
2619 skb_append(skb1,skb);
2620 break;
2621 }
2622 if (skb1 == sk->rqueue)
2623 {
2624 skb_queue_head(&sk->rqueue, skb);
2625 break;
2626 }
2627 #endif
2628 }
2629 DPRINTF((DBG_TCP, "skb = %X:\n", skb));
2630 }
2631
2632 th->ack_seq = th->seq + skb->len;
2633 if (th->syn) th->ack_seq++;
2634 if (th->fin) th->ack_seq++;
2635
2636 if (before(sk->acked_seq, sk->copied_seq)) {
2637 printk("*** tcp.c:tcp_data bug acked < copied\n");
2638 sk->acked_seq = sk->copied_seq;
2639 }
2640
2641
2642 if ((!dup_dumped && (skb1 == NULL || skb1->acked)) || before(th->seq, sk->acked_seq+1)) {
2643 if (before(th->seq, sk->acked_seq+1)) {
2644 if (after(th->ack_seq, sk->acked_seq))
2645 sk->acked_seq = th->ack_seq;
2646 skb->acked = 1;
2647
2648
2649 if (skb->h.th->fin) {
2650 if (!sk->dead) sk->state_change(sk);
2651 sk->shutdown |= RCV_SHUTDOWN;
2652 }
2653
2654 for(skb2 = (struct sk_buff *)skb->next;
2655 skb2 !=(struct sk_buff *) sk->rqueue;
2656 skb2 = (struct sk_buff *)skb2->next) {
2657 if (before(skb2->h.th->seq, sk->acked_seq+1)) {
2658 if (after(skb2->h.th->ack_seq, sk->acked_seq))
2659 {
2660 long old_acked_seq = sk->acked_seq;
2661 sk->acked_seq = skb2->h.th->ack_seq;
2662 if((int)(sk->acked_seq - old_acked_seq) >0)
2663 {
2664 int new_window=sk->window-sk->acked_seq+
2665 old_acked_seq;
2666 if(new_window<0)
2667 new_window=0;
2668 sk->window = new_window;
2669 }
2670 }
2671 skb2->acked = 1;
2672
2673
2674
2675
2676
2677 if (skb2->h.th->fin) {
2678 sk->shutdown |= RCV_SHUTDOWN;
2679 if (!sk->dead) sk->state_change(sk);
2680 }
2681
2682
2683 sk->ack_backlog = sk->max_ack_backlog;
2684 } else {
2685 break;
2686 }
2687 }
2688
2689
2690
2691
2692
2693 if (!sk->delay_acks ||
2694 sk->ack_backlog >= sk->max_ack_backlog ||
2695 sk->bytes_rcv > sk->max_unacked || th->fin) {
2696
2697 } else {
2698 sk->ack_backlog++;
2699 if(sk->debug)
2700 printk("Ack queued.\n");
2701 reset_timer(sk, TIME_WRITE, TCP_ACK_TIME);
2702 }
2703 }
2704 }
2705
2706
2707
2708
2709
2710 if (!skb->acked) {
2711
2712
2713
2714
2715
2716 while (sk->prot->rspace(sk) < sk->mtu) {
2717 skb1 = skb_peek(&sk->rqueue);
2718 if (skb1 == NULL) {
2719 printk("INET: tcp.c:tcp_data memory leak detected.\n");
2720 break;
2721 }
2722
2723
2724 if (skb1->acked) {
2725 break;
2726 }
2727
2728 skb_unlink(skb1);
2729 #ifdef OLDWAY
2730 if (skb1->prev == skb1) {
2731 sk->rqueue = NULL;
2732 } else {
2733 sk->rqueue = (struct sk_buff *)skb1->prev;
2734 skb1->next->prev = skb1->prev;
2735 skb1->prev->next = skb1->next;
2736 }
2737 #endif
2738 kfree_skb(skb1, FREE_READ);
2739 }
2740 tcp_send_ack(sk->send_seq, sk->acked_seq, sk, th, saddr);
2741 sk->ack_backlog++;
2742 reset_timer(sk, TIME_WRITE, TCP_ACK_TIME);
2743 } else {
2744
2745 tcp_send_ack(sk->send_seq, sk->acked_seq, sk, th, saddr);
2746 }
2747
2748
2749 if (!sk->dead) {
2750 if(sk->debug)
2751 printk("Data wakeup.\n");
2752 sk->data_ready(sk,0);
2753 } else {
2754 DPRINTF((DBG_TCP, "data received on dead socket.\n"));
2755 }
2756
2757 if (sk->state == TCP_FIN_WAIT2 &&
2758 sk->acked_seq == sk->fin_seq && sk->rcv_ack_seq == sk->send_seq) {
2759 DPRINTF((DBG_TCP, "tcp_data: entering last_ack state sk = %X\n", sk));
2760
2761
2762 sk->shutdown = SHUTDOWN_MASK;
2763 sk->state = TCP_LAST_ACK;
2764 if (!sk->dead) sk->state_change(sk);
2765 }
2766
2767 return(0);
2768 }
2769
2770
2771 static int
2772 tcp_urg(struct sock *sk, struct tcphdr *th, unsigned long saddr)
2773 {
2774 extern int kill_pg(int pg, int sig, int priv);
2775 extern int kill_proc(int pid, int sig, int priv);
2776
2777 if (!sk->dead)
2778 sk->data_ready(sk,0);
2779
2780 if (sk->urginline) {
2781 th->urg = 0;
2782 th->psh = 1;
2783 return(0);
2784 }
2785
2786 if (!sk->urg) {
2787
2788 if (sk->proc != 0) {
2789 if (sk->proc > 0) {
2790 kill_proc(sk->proc, SIGURG, 1);
2791 } else {
2792 kill_pg(-sk->proc, SIGURG, 1);
2793 }
2794 }
2795 }
2796 sk->urg++;
2797 return(0);
2798 }
2799
2800
2801
2802 static int
2803 tcp_fin(struct sock *sk, struct tcphdr *th,
2804 unsigned long saddr, struct device *dev)
2805 {
2806 DPRINTF((DBG_TCP, "tcp_fin(sk=%X, th=%X, saddr=%X, dev=%X)\n",
2807 sk, th, saddr, dev));
2808
2809 if (!sk->dead) {
2810 sk->state_change(sk);
2811 }
2812
2813 switch(sk->state) {
2814 case TCP_SYN_RECV:
2815 case TCP_SYN_SENT:
2816 case TCP_ESTABLISHED:
2817
2818 sk->fin_seq = th->seq+1;
2819 sk->state = TCP_CLOSE_WAIT;
2820 if (th->rst) sk->shutdown = SHUTDOWN_MASK;
2821 break;
2822
2823 case TCP_CLOSE_WAIT:
2824 case TCP_FIN_WAIT2:
2825 break;
2826
2827 case TCP_FIN_WAIT1:
2828
2829 sk->fin_seq = th->seq+1;
2830 sk->state = TCP_FIN_WAIT2;
2831 break;
2832
2833 default:
2834 case TCP_TIME_WAIT:
2835 sk->state = TCP_LAST_ACK;
2836
2837
2838 reset_timer(sk, TIME_CLOSE, TCP_TIMEWAIT_LEN);
2839 return(0);
2840 }
2841 sk->ack_backlog++;
2842
2843 return(0);
2844 }
2845
2846
2847
2848 static struct sock *
2849 tcp_accept(struct sock *sk, int flags)
2850 {
2851 struct sock *newsk;
2852 struct sk_buff *skb;
2853
2854 DPRINTF((DBG_TCP, "tcp_accept(sk=%X, flags=%X, addr=%s)\n",
2855 sk, flags, in_ntoa(sk->saddr)));
2856
2857
2858
2859
2860
2861 if (sk->state != TCP_LISTEN) {
2862 sk->err = EINVAL;
2863 return(NULL);
2864 }
2865
2866
2867 cli();
2868 sk->inuse = 1;
2869 while((skb = get_firstr(sk)) == NULL) {
2870 if (flags & O_NONBLOCK) {
2871 sti();
2872 release_sock(sk);
2873 sk->err = EAGAIN;
2874 return(NULL);
2875 }
2876
2877 release_sock(sk);
2878 interruptible_sleep_on(sk->sleep);
2879 if (current->signal & ~current->blocked) {
2880 sti();
2881 sk->err = ERESTARTSYS;
2882 return(NULL);
2883 }
2884 sk->inuse = 1;
2885 }
2886 sti();
2887
2888
2889 newsk = skb->sk;
2890
2891 kfree_skb(skb, FREE_READ);
2892 sk->ack_backlog--;
2893 release_sock(sk);
2894 return(newsk);
2895 }
2896
2897
2898
2899 static int
2900 tcp_connect(struct sock *sk, struct sockaddr_in *usin, int addr_len)
2901 {
2902 struct sk_buff *buff;
2903 struct sockaddr_in sin;
2904 struct device *dev=NULL;
2905 unsigned char *ptr;
2906 int tmp;
2907 struct tcphdr *t1;
2908 int err;
2909
2910 if (sk->state != TCP_CLOSE) return(-EISCONN);
2911 if (addr_len < 8) return(-EINVAL);
2912
2913 err=verify_area(VERIFY_READ, usin, addr_len);
2914 if(err)
2915 return err;
2916
2917 memcpy_fromfs(&sin,usin, min(sizeof(sin), addr_len));
2918
2919 if (sin.sin_family && sin.sin_family != AF_INET) return(-EAFNOSUPPORT);
2920
2921 DPRINTF((DBG_TCP, "TCP connect daddr=%s\n", in_ntoa(sin.sin_addr.s_addr)));
2922
2923
2924 if (chk_addr(sin.sin_addr.s_addr) == IS_BROADCAST) {
2925 DPRINTF((DBG_TCP, "TCP connection to broadcast address not allowed\n"));
2926 return(-ENETUNREACH);
2927 }
2928
2929 sk->inuse = 1;
2930 sk->daddr = sin.sin_addr.s_addr;
2931 sk->send_seq = jiffies * SEQ_TICK - seq_offset;
2932 sk->rcv_ack_seq = sk->send_seq -1;
2933 sk->err = 0;
2934 sk->dummy_th.dest = sin.sin_port;
2935 release_sock(sk);
2936
2937 buff = sk->prot->wmalloc(sk,MAX_SYN_SIZE,0, GFP_KERNEL);
2938 if (buff == NULL) {
2939 return(-ENOMEM);
2940 }
2941 sk->inuse = 1;
2942 buff->mem_addr = buff;
2943 buff->mem_len = MAX_SYN_SIZE;
2944 buff->len = 24;
2945 buff->sk = sk;
2946 buff->free = 1;
2947 t1 = (struct tcphdr *) buff->data;
2948
2949
2950
2951 tmp = sk->prot->build_header(buff, sk->saddr, sk->daddr, &dev,
2952 IPPROTO_TCP, NULL, MAX_SYN_SIZE,sk->ip_tos,sk->ip_ttl);
2953 if (tmp < 0) {
2954 sk->prot->wfree(sk, buff->mem_addr, buff->mem_len);
2955 release_sock(sk);
2956 return(-ENETUNREACH);
2957 }
2958 buff->len += tmp;
2959 t1 = (struct tcphdr *)((char *)t1 +tmp);
2960
2961 memcpy(t1,(void *)&(sk->dummy_th), sizeof(*t1));
2962 t1->seq = ntohl(sk->send_seq++);
2963 buff->h.seq = sk->send_seq;
2964 t1->ack = 0;
2965 t1->window = 2;
2966 t1->res1=0;
2967 t1->res2=0;
2968 t1->rst = 0;
2969 t1->urg = 0;
2970 t1->psh = 0;
2971 t1->syn = 1;
2972 t1->urg_ptr = 0;
2973 t1->doff = 6;
2974
2975
2976 if (sk->mss)
2977 sk->mtu = sk->mss;
2978 else
2979 sk->mtu = 576 - HEADER_SIZE;
2980
2981 sk->mtu = min(sk->mtu, dev->mtu - HEADER_SIZE);
2982
2983
2984 ptr = (unsigned char *)(t1+1);
2985 ptr[0] = 2;
2986 ptr[1] = 4;
2987 ptr[2] = (sk->mtu) >> 8;
2988 ptr[3] = (sk->mtu) & 0xff;
2989 tcp_send_check(t1, sk->saddr, sk->daddr,
2990 sizeof(struct tcphdr) + 4, sk);
2991
2992
2993 sk->state = TCP_SYN_SENT;
2994 sk->rtt = TCP_CONNECT_TIME;
2995 reset_timer(sk, TIME_WRITE, TCP_CONNECT_TIME);
2996 sk->retransmits = TCP_RETR2 - TCP_SYN_RETRIES;
2997
2998 sk->prot->queue_xmit(sk, dev, buff, 0);
2999
3000 release_sock(sk);
3001 return(0);
3002 }
3003
3004
3005
3006 static int
3007 tcp_sequence(struct sock *sk, struct tcphdr *th, short len,
3008 struct options *opt, unsigned long saddr, struct device *dev)
3009 {
3010
3011
3012
3013
3014
3015
3016 DPRINTF((DBG_TCP, "tcp_sequence(sk=%X, th=%X, len = %d, opt=%d, saddr=%X)\n",
3017 sk, th, len, opt, saddr));
3018
3019 if (between(th->seq, sk->acked_seq, sk->acked_seq + sk->window)||
3020 between(th->seq + len-(th->doff*4), sk->acked_seq + 1,
3021 sk->acked_seq + sk->window) ||
3022 (before(th->seq, sk->acked_seq) &&
3023 after(th->seq + len -(th->doff*4), sk->acked_seq + sk->window))) {
3024 return(1);
3025 }
3026 DPRINTF((DBG_TCP, "tcp_sequence: rejecting packet.\n"));
3027
3028
3029
3030
3031
3032
3033
3034
3035 if(sk->state==TCP_SYN_SENT||sk->state==TCP_SYN_RECV)
3036 {
3037 tcp_reset(sk->saddr,sk->daddr,th,sk->prot,NULL,dev, sk->ip_tos,sk->ip_ttl);
3038 return(1);
3039 }
3040
3041
3042
3043
3044
3045 if (after(th->seq, sk->acked_seq + sk->window)) {
3046 if(!th->rst)
3047 tcp_send_ack(sk->send_seq, sk->acked_seq, sk, th, saddr);
3048 return(0);
3049 }
3050
3051 #ifdef undef
3052
3053
3054
3055
3056
3057
3058
3059
3060 if (th->ack && len == (th->doff * 4) &&
3061 after(th->seq, sk->acked_seq - 32767) &&
3062 !th->fin && !th->syn) return(1);
3063 #endif
3064
3065 if (!th->rst) {
3066
3067 tcp_send_ack(sk->send_seq, sk->acked_seq, sk, th, saddr);
3068 }
3069 return(0);
3070 }
3071
3072
3073
3074
3075
3076 int
3077 tcp_rcv(struct sk_buff *skb, struct device *dev, struct options *opt,
3078 unsigned long daddr, unsigned short len,
3079 unsigned long saddr, int redo, struct inet_protocol * protocol)
3080 {
3081 struct tcphdr *th;
3082 struct sock *sk;
3083
3084 if (!skb) {
3085 DPRINTF((DBG_TCP, "tcp.c: tcp_rcv skb = NULL\n"));
3086 return(0);
3087 }
3088 #if 0
3089 if (!protocol) {
3090 DPRINTF((DBG_TCP, "tcp.c: tcp_rcv protocol = NULL\n"));
3091 return(0);
3092 }
3093
3094 if (!opt) {
3095 DPRINTF((DBG_TCP, "tcp.c: tcp_rcv opt = NULL\n"));
3096 }
3097 #endif
3098 if (!dev) {
3099 DPRINTF((DBG_TCP, "tcp.c: tcp_rcv dev = NULL\n"));
3100 return(0);
3101 }
3102 th = skb->h.th;
3103
3104
3105 sk = get_sock(&tcp_prot, th->dest, saddr, th->source, daddr);
3106 DPRINTF((DBG_TCP, "<<\n"));
3107 DPRINTF((DBG_TCP, "len = %d, redo = %d, skb=%X\n", len, redo, skb));
3108
3109
3110
3111 if (sk!=NULL && sk->zapped)
3112 sk=NULL;
3113
3114 if (sk) {
3115 DPRINTF((DBG_TCP, "sk = %X:\n", sk));
3116 }
3117
3118 if (!redo) {
3119 if (tcp_check(th, len, saddr, daddr )) {
3120 skb->sk = NULL;
3121 DPRINTF((DBG_TCP, "packet dropped with bad checksum.\n"));
3122 if (inet_debug == DBG_SLIP) printk("\rtcp_rcv: bad checksum\n");
3123 kfree_skb(skb,FREE_READ);
3124
3125
3126
3127
3128 return(0);
3129 }
3130
3131
3132 if (sk == NULL) {
3133 if (!th->rst)
3134 {
3135 th->seq = ntohl(th->seq);
3136
3137 tcp_reset(daddr, saddr, th, &tcp_prot, opt,dev,skb->ip_hdr->tos,255);
3138 }
3139 skb->sk = NULL;
3140 kfree_skb(skb, FREE_READ);
3141 return(0);
3142 }
3143
3144 skb->len = len;
3145 skb->sk = sk;
3146 skb->acked = 0;
3147 skb->used = 0;
3148 skb->free = 0;
3149 skb->urg_used = 0;
3150 skb->saddr = daddr;
3151 skb->daddr = saddr;
3152
3153 th->seq = ntohl(th->seq);
3154
3155
3156 cli();
3157 if (sk->inuse) {
3158 if (sk->back_log == NULL) {
3159 sk->back_log = skb;
3160 skb->next = skb;
3161 skb->prev = skb;
3162 } else {
3163 skb->next = sk->back_log;
3164 skb->prev = sk->back_log->prev;
3165 skb->prev->next = skb;
3166 skb->next->prev = skb;
3167 }
3168 sti();
3169 return(0);
3170 }
3171 sk->inuse = 1;
3172 sti();
3173 } else {
3174 if (!sk) {
3175 DPRINTF((DBG_TCP, "tcp.c: tcp_rcv bug sk=NULL redo = 1\n"));
3176 return(0);
3177 }
3178 }
3179
3180 if (!sk->prot) {
3181 DPRINTF((DBG_TCP, "tcp.c: tcp_rcv sk->prot = NULL \n"));
3182 return(0);
3183 }
3184
3185
3186 if (sk->rmem_alloc + skb->mem_len >= sk->rcvbuf) {
3187 skb->sk = NULL;
3188 DPRINTF((DBG_TCP, "dropping packet due to lack of buffer space.\n"));
3189 kfree_skb(skb, FREE_READ);
3190 release_sock(sk);
3191 return(0);
3192 }
3193 sk->rmem_alloc += skb->mem_len;
3194
3195 DPRINTF((DBG_TCP, "About to do switch.\n"));
3196
3197
3198 switch(sk->state) {
3199
3200
3201
3202
3203 case TCP_LAST_ACK:
3204 if (th->rst) {
3205 sk->zapped=1;
3206 sk->err = ECONNRESET;
3207 sk->state = TCP_CLOSE;
3208 sk->shutdown = SHUTDOWN_MASK;
3209 if (!sk->dead) {
3210 sk->state_change(sk);
3211 }
3212 kfree_skb(skb, FREE_READ);
3213 release_sock(sk);
3214 return(0);
3215 }
3216
3217 case TCP_ESTABLISHED:
3218 case TCP_CLOSE_WAIT:
3219 case TCP_FIN_WAIT1:
3220 case TCP_FIN_WAIT2:
3221 case TCP_TIME_WAIT:
3222 if (!tcp_sequence(sk, th, len, opt, saddr,dev)) {
3223 if (inet_debug == DBG_SLIP) printk("\rtcp_rcv: not in seq\n");
3224 #ifdef undef
3225
3226 if(!th->rst)
3227 tcp_send_ack(sk->send_seq, sk->acked_seq,
3228 sk, th, saddr);
3229 #endif
3230 kfree_skb(skb, FREE_READ);
3231 release_sock(sk);
3232 return(0);
3233 }
3234
3235 if (th->rst) {
3236 sk->zapped=1;
3237
3238 sk->err = ECONNRESET;
3239
3240 if (sk->state == TCP_CLOSE_WAIT) {
3241 sk->err = EPIPE;
3242 }
3243
3244
3245
3246
3247
3248 sk->state = TCP_CLOSE;
3249 sk->shutdown = SHUTDOWN_MASK;
3250 if (!sk->dead) {
3251 sk->state_change(sk);
3252 }
3253 kfree_skb(skb, FREE_READ);
3254 release_sock(sk);
3255 return(0);
3256 }
3257 if (
3258 #if 0
3259 if ((opt && (opt->security != 0 ||
3260 opt->compartment != 0)) ||
3261 #endif
3262 th->syn) {
3263 sk->err = ECONNRESET;
3264 sk->state = TCP_CLOSE;
3265 sk->shutdown = SHUTDOWN_MASK;
3266 tcp_reset(daddr, saddr, th, sk->prot, opt,dev, sk->ip_tos,sk->ip_ttl);
3267 if (!sk->dead) {
3268 sk->state_change(sk);
3269 }
3270 kfree_skb(skb, FREE_READ);
3271 release_sock(sk);
3272 return(0);
3273 }
3274 if (th->ack) {
3275 if (!tcp_ack(sk, th, saddr, len)) {
3276 kfree_skb(skb, FREE_READ);
3277 release_sock(sk);
3278 return(0);
3279 }
3280 }
3281 if (th->urg) {
3282 if (tcp_urg(sk, th, saddr)) {
3283 kfree_skb(skb, FREE_READ);
3284 release_sock(sk);
3285 return(0);
3286 }
3287 }
3288
3289 if (tcp_data(skb, sk, saddr, len)) {
3290 kfree_skb(skb, FREE_READ);
3291 release_sock(sk);
3292 return(0);
3293 }
3294
3295
3296 if (th->fin && tcp_fin(sk, th, saddr, dev)) {
3297 kfree_skb(skb, FREE_READ);
3298 release_sock(sk);
3299 return(0);
3300 }
3301
3302 release_sock(sk);
3303 return(0);
3304
3305 case TCP_CLOSE:
3306 if (sk->dead || sk->daddr) {
3307 DPRINTF((DBG_TCP, "packet received for closed,dead socket\n"));
3308 kfree_skb(skb, FREE_READ);
3309 release_sock(sk);
3310 return(0);
3311 }
3312
3313 if (!th->rst) {
3314 if (!th->ack)
3315 th->ack_seq = 0;
3316 tcp_reset(daddr, saddr, th, sk->prot, opt,dev,sk->ip_tos,sk->ip_ttl);
3317 }
3318 kfree_skb(skb, FREE_READ);
3319 release_sock(sk);
3320 return(0);
3321
3322 case TCP_LISTEN:
3323 if (th->rst) {
3324 kfree_skb(skb, FREE_READ);
3325 release_sock(sk);
3326 return(0);
3327 }
3328 if (th->ack) {
3329 tcp_reset(daddr, saddr, th, sk->prot, opt,dev,sk->ip_tos,sk->ip_ttl);
3330 kfree_skb(skb, FREE_READ);
3331 release_sock(sk);
3332 return(0);
3333 }
3334
3335 if (th->syn) {
3336 #if 0
3337 if (opt->security != 0 || opt->compartment != 0) {
3338 tcp_reset(daddr, saddr, th, prot, opt,dev);
3339 release_sock(sk);
3340 return(0);
3341 }
3342 #endif
3343
3344
3345
3346
3347
3348
3349
3350 tcp_conn_request(sk, skb, daddr, saddr, opt, dev);
3351 release_sock(sk);
3352 return(0);
3353 }
3354
3355 kfree_skb(skb, FREE_READ);
3356 release_sock(sk);
3357 return(0);
3358
3359 default:
3360 if (!tcp_sequence(sk, th, len, opt, saddr,dev)) {
3361 kfree_skb(skb, FREE_READ);
3362 release_sock(sk);
3363 return(0);
3364 }
3365
3366 case TCP_SYN_SENT:
3367 if (th->rst) {
3368 sk->err = ECONNREFUSED;
3369 sk->state = TCP_CLOSE;
3370 sk->shutdown = SHUTDOWN_MASK;
3371 sk->zapped = 1;
3372 if (!sk->dead) {
3373 sk->state_change(sk);
3374 }
3375 kfree_skb(skb, FREE_READ);
3376 release_sock(sk);
3377 return(0);
3378 }
3379 #if 0
3380 if (opt->security != 0 || opt->compartment != 0) {
3381 sk->err = ECONNRESET;
3382 sk->state = TCP_CLOSE;
3383 sk->shutdown = SHUTDOWN_MASK;
3384 tcp_reset(daddr, saddr, th, sk->prot, opt, dev);
3385 if (!sk->dead) {
3386 wake_up(sk->sleep);
3387 }
3388 kfree_skb(skb, FREE_READ);
3389 release_sock(sk);
3390 return(0);
3391 }
3392 #endif
3393 if (!th->ack) {
3394 if (th->syn) {
3395 sk->state = TCP_SYN_RECV;
3396 }
3397
3398 kfree_skb(skb, FREE_READ);
3399 release_sock(sk);
3400 return(0);
3401 }
3402
3403 switch(sk->state) {
3404 case TCP_SYN_SENT:
3405 if (!tcp_ack(sk, th, saddr, len)) {
3406 tcp_reset(daddr, saddr, th,
3407 sk->prot, opt,dev,sk->ip_tos,sk->ip_ttl);
3408 kfree_skb(skb, FREE_READ);
3409 release_sock(sk);
3410 return(0);
3411 }
3412
3413
3414
3415
3416
3417 if (!th->syn) {
3418 kfree_skb(skb, FREE_READ);
3419 release_sock(sk);
3420 return(0);
3421 }
3422
3423
3424 sk->acked_seq = th->seq+1;
3425 sk->fin_seq = th->seq;
3426 tcp_send_ack(sk->send_seq, th->seq+1,
3427 sk, th, sk->daddr);
3428
3429 case TCP_SYN_RECV:
3430 if (!tcp_ack(sk, th, saddr, len)) {
3431 tcp_reset(daddr, saddr, th,
3432 sk->prot, opt, dev,sk->ip_tos,sk->ip_ttl);
3433 kfree_skb(skb, FREE_READ);
3434 release_sock(sk);
3435 return(0);
3436 }
3437 sk->state = TCP_ESTABLISHED;
3438
3439
3440
3441
3442
3443
3444 tcp_options(sk, th);
3445 sk->dummy_th.dest = th->source;
3446 sk->copied_seq = sk->acked_seq-1;
3447 if (!sk->dead) {
3448 sk->state_change(sk);
3449 }
3450
3451
3452
3453
3454
3455 if (th->urg) {
3456 if (tcp_urg(sk, th, saddr)) {
3457 kfree_skb(skb, FREE_READ);
3458 release_sock(sk);
3459 return(0);
3460 }
3461 }
3462 if (tcp_data(skb, sk, saddr, len))
3463 kfree_skb(skb, FREE_READ);
3464
3465 if (th->fin) tcp_fin(sk, th, saddr, dev);
3466 release_sock(sk);
3467 return(0);
3468 }
3469
3470 if (th->urg) {
3471 if (tcp_urg(sk, th, saddr)) {
3472 kfree_skb(skb, FREE_READ);
3473 release_sock(sk);
3474 return(0);
3475 }
3476 }
3477
3478 if (tcp_data(skb, sk, saddr, len)) {
3479 kfree_skb(skb, FREE_READ);
3480 release_sock(sk);
3481 return(0);
3482 }
3483
3484 if (!th->fin) {
3485 release_sock(sk);
3486 return(0);
3487 }
3488 tcp_fin(sk, th, saddr, dev);
3489 release_sock(sk);
3490 return(0);
3491 }
3492 }
3493
3494
3495
3496
3497
3498
3499 static void
3500 tcp_write_wakeup(struct sock *sk)
3501 {
3502 struct sk_buff *buff;
3503 struct tcphdr *t1;
3504 struct device *dev=NULL;
3505 int tmp;
3506
3507 if (sk->zapped)
3508 return;
3509
3510 if (sk -> state != TCP_ESTABLISHED && sk->state != TCP_CLOSE_WAIT) return;
3511
3512 buff = sk->prot->wmalloc(sk,MAX_ACK_SIZE,1, GFP_ATOMIC);
3513 if (buff == NULL) return;
3514
3515 buff->mem_addr = buff;
3516 buff->mem_len = MAX_ACK_SIZE;
3517 buff->len = sizeof(struct tcphdr);
3518 buff->free = 1;
3519 buff->sk = sk;
3520 DPRINTF((DBG_TCP, "in tcp_write_wakeup\n"));
3521 t1 = (struct tcphdr *) buff->data;
3522
3523
3524 tmp = sk->prot->build_header(buff, sk->saddr, sk->daddr, &dev,
3525 IPPROTO_TCP, sk->opt, MAX_ACK_SIZE,sk->ip_tos,sk->ip_ttl);
3526 if (tmp < 0) {
3527 sk->prot->wfree(sk, buff->mem_addr, buff->mem_len);
3528 return;
3529 }
3530
3531 buff->len += tmp;
3532 t1 = (struct tcphdr *)((char *)t1 +tmp);
3533
3534 memcpy(t1,(void *) &sk->dummy_th, sizeof(*t1));
3535
3536
3537
3538
3539
3540 t1->seq = ntohl(sk->send_seq-1);
3541 t1->ack = 1;
3542 t1->res1= 0;
3543 t1->res2= 0;
3544 t1->rst = 0;
3545 t1->urg = 0;
3546 t1->psh = 0;
3547 t1->fin = 0;
3548 t1->syn = 0;
3549 t1->ack_seq = ntohl(sk->acked_seq);
3550 t1->window = ntohs(tcp_select_window(sk));
3551 t1->doff = sizeof(*t1)/4;
3552 tcp_send_check(t1, sk->saddr, sk->daddr, sizeof(*t1), sk);
3553
3554
3555
3556
3557 sk->prot->queue_xmit(sk, dev, buff, 1);
3558 }
3559
3560
3561
3562
3563
3564 void
3565 tcp_send_probe0(struct sock *sk)
3566 {
3567 unsigned char *raw;
3568 struct iphdr *iph;
3569 struct sk_buff *skb2, *skb;
3570 int len, hlen, data;
3571 struct tcphdr *t1;
3572 struct device *dev;
3573
3574 if (sk->zapped)
3575 return;
3576
3577 if (sk -> state != TCP_ESTABLISHED && sk->state != TCP_CLOSE_WAIT &&
3578 sk -> state != TCP_FIN_WAIT1 && sk->state != TCP_FIN_WAIT2)
3579 return;
3580
3581 skb = sk->wfront;
3582 if (skb == NULL)
3583 return;
3584
3585 dev = skb->dev;
3586
3587 if(dev==NULL)
3588 {
3589 printk("tcp_send_probe0: NULL device bug!\n");
3590 return;
3591 }
3592 IS_SKB(skb);
3593
3594 raw = skb->data;
3595 iph = (struct iphdr *) (raw + dev->hard_header_len);
3596
3597 hlen = (iph->ihl * sizeof(unsigned long)) + dev->hard_header_len;
3598 data = skb->len - hlen - sizeof(struct tcphdr);
3599 len = hlen + sizeof(struct tcphdr) + (data ? 1 : 0);
3600
3601
3602 if ((skb2 = alloc_skb(sizeof(struct sk_buff) + len, GFP_ATOMIC)) == NULL) {
3603
3604
3605 reset_timer (sk, TIME_PROBE0, 10);
3606 return;
3607 }
3608
3609 skb2->arp = skb->arp;
3610 skb2->len = len;
3611 skb2->h.raw = (char *)(skb2->data);
3612
3613 sk->wmem_alloc += skb2->mem_len;
3614
3615
3616 memcpy(skb2->h.raw, raw, len);
3617
3618 skb2->h.raw += hlen;
3619 t1 = skb2->h.th;
3620
3621
3622 t1->ack_seq = ntohl(sk->acked_seq);
3623 t1->res1 = 0;
3624
3625
3626
3627 t1->ack = 1;
3628 t1->urg = 0;
3629 t1->res2 = 0;
3630 t1->window = ntohs(tcp_select_window(sk));
3631 t1->urg_ptr = 0;
3632 tcp_send_check(t1, sk->saddr, sk->daddr, len - hlen, sk);
3633
3634
3635
3636 sk->prot->queue_xmit(sk, dev, skb2, 1);
3637 sk->backoff++;
3638 reset_timer (sk, TIME_PROBE0,
3639 backoff (sk->backoff) * (2 * sk->mdev + sk->rtt));
3640 sk->retransmits++;
3641 sk->prot->retransmits ++;
3642 }
3643
3644
3645
3646
3647
3648 int tcp_setsockopt(struct sock *sk, int level, int optname, char *optval, int optlen)
3649 {
3650 int val,err;
3651
3652 if(level!=SOL_TCP)
3653 return ip_setsockopt(sk,level,optname,optval,optlen);
3654
3655 if (optval == NULL)
3656 return(-EINVAL);
3657
3658 err=verify_area(VERIFY_READ, optval, sizeof(int));
3659 if(err)
3660 return err;
3661
3662 val = get_fs_long((unsigned long *)optval);
3663
3664 switch(optname)
3665 {
3666 case TCP_MAXSEG:
3667 if(val<200||val>2048 || val>sk->mtu)
3668 return -EINVAL;
3669 sk->mss=val;
3670 return 0;
3671 case TCP_NODELAY:
3672 sk->nonagle=(val==0)?0:1;
3673 return 0;
3674 default:
3675 return(-ENOPROTOOPT);
3676 }
3677 }
3678
3679 int tcp_getsockopt(struct sock *sk, int level, int optname, char *optval, int *optlen)
3680 {
3681 int val,err;
3682
3683 if(level!=SOL_TCP)
3684 return ip_getsockopt(sk,level,optname,optval,optlen);
3685
3686 switch(optname)
3687 {
3688 case TCP_MAXSEG:
3689 val=sk->mss;
3690 break;
3691 case TCP_NODELAY:
3692 val=sk->nonagle;
3693 break;
3694 default:
3695 return(-ENOPROTOOPT);
3696 }
3697 err=verify_area(VERIFY_WRITE, optlen, sizeof(int));
3698 if(err)
3699 return err;
3700 put_fs_long(sizeof(int),(unsigned long *) optlen);
3701
3702 err=verify_area(VERIFY_WRITE, optval, sizeof(int));
3703 if(err)
3704 return err;
3705 put_fs_long(val,(unsigned long *)optval);
3706
3707 return(0);
3708 }
3709
3710
3711 struct proto tcp_prot = {
3712 sock_wmalloc,
3713 sock_rmalloc,
3714 sock_wfree,
3715 sock_rfree,
3716 sock_rspace,
3717 sock_wspace,
3718 tcp_close,
3719 tcp_read,
3720 tcp_write,
3721 tcp_sendto,
3722 tcp_recvfrom,
3723 ip_build_header,
3724 tcp_connect,
3725 tcp_accept,
3726 ip_queue_xmit,
3727 tcp_retransmit,
3728 tcp_write_wakeup,
3729 tcp_read_wakeup,
3730 tcp_rcv,
3731 tcp_select,
3732 tcp_ioctl,
3733 NULL,
3734 tcp_shutdown,
3735 tcp_setsockopt,
3736 tcp_getsockopt,
3737 128,
3738 0,
3739 {NULL,},
3740 "TCP"
3741 };