This source file includes following definitions.
- min
- print_th
- get_firstr
- diff
- tcp_select_window
- tcp_time_wait
- tcp_retransmit
- tcp_err
- tcp_readable
- tcp_select
- tcp_ioctl
- tcp_check
- tcp_send_check
- tcp_send_skb
- tcp_dequeue_partial
- tcp_send_partial
- tcp_enqueue_partial
- tcp_send_ack
- tcp_build_header
- tcp_write
- tcp_sendto
- tcp_read_wakeup
- cleanup_rbuf
- tcp_read_urg
- tcp_read
- tcp_shutdown
- tcp_recvfrom
- tcp_reset
- tcp_options
- default_mask
- tcp_conn_request
- tcp_close
- tcp_write_xmit
- sort_send
- tcp_ack
- tcp_data
- tcp_urg
- tcp_fin
- tcp_accept
- tcp_connect
- tcp_sequence
- tcp_rcv
- tcp_write_wakeup
- tcp_send_probe0
- tcp_setsockopt
- tcp_getsockopt
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81 #include <linux/types.h>
82 #include <linux/sched.h>
83 #include <linux/mm.h>
84 #include <linux/string.h>
85 #include <linux/socket.h>
86 #include <linux/sockios.h>
87 #include <linux/termios.h>
88 #include <linux/in.h>
89 #include <linux/fcntl.h>
90 #include "inet.h"
91 #include "dev.h"
92 #include "ip.h"
93 #include "protocol.h"
94 #include "icmp.h"
95 #include "tcp.h"
96 #include "skbuff.h"
97 #include "sock.h"
98 #include "arp.h"
99 #include <linux/errno.h>
100 #include <linux/timer.h>
101 #include <asm/system.h>
102 #include <asm/segment.h>
103 #include <linux/mm.h>
104
105 #define SEQ_TICK 3
106 unsigned long seq_offset;
107 #define SUBNETSARELOCAL
108
109 static __inline__ int
110 min(unsigned int a, unsigned int b)
111 {
112 if (a < b) return(a);
113 return(b);
114 }
115
116
117 void
118 print_th(struct tcphdr *th)
119 {
120 unsigned char *ptr;
121
122 if (inet_debug != DBG_TCP) return;
123
124 printk("TCP header:\n");
125 ptr =(unsigned char *)(th + 1);
126 printk(" source=%d, dest=%d, seq =%ld, ack_seq = %ld\n",
127 ntohs(th->source), ntohs(th->dest),
128 ntohl(th->seq), ntohl(th->ack_seq));
129 printk(" fin=%d, syn=%d, rst=%d, psh=%d, ack=%d, urg=%d res1=%d res2=%d\n",
130 th->fin, th->syn, th->rst, th->psh, th->ack,
131 th->urg, th->res1, th->res2);
132 printk(" window = %d, check = %d urg_ptr = %d\n",
133 ntohs(th->window), ntohs(th->check), ntohs(th->urg_ptr));
134 printk(" doff = %d\n", th->doff);
135 printk(" options = %d %d %d %d\n", ptr[0], ptr[1], ptr[2], ptr[3]);
136 }
137
138
139
140
141 static struct sk_buff *
142 get_firstr(struct sock *sk)
143 {
144 return skb_dequeue(&sk->rqueue);
145 }
146
147
148
149
150
151 static long
152 diff(unsigned long seq1, unsigned long seq2)
153 {
154 long d;
155
156 d = seq1 - seq2;
157 if (d > 0) return(d);
158
159
160 return(~d+1);
161 }
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178 static int tcp_select_window(struct sock *sk)
179 {
180 int new_window = sk->prot->rspace(sk);
181
182
183
184
185
186
187
188
189
190
191 if (new_window < min(sk->mss, MAX_WINDOW/2) ||
192 new_window < sk->window)
193 return(sk->window);
194 return(new_window);
195 }
196
197
198
199 static void tcp_time_wait(struct sock *sk)
200 {
201 sk->state = TCP_TIME_WAIT;
202 sk->shutdown = SHUTDOWN_MASK;
203 if (!sk->dead)
204 sk->state_change(sk);
205 reset_timer(sk, TIME_CLOSE, TCP_TIMEWAIT_LEN);
206 }
207
208
209
210
211
212
213
214
215 static void
216 tcp_retransmit(struct sock *sk, int all)
217 {
218 if (all) {
219 ip_retransmit(sk, all);
220 return;
221 }
222
223 sk->ssthresh = sk->cong_window >> 1;
224
225 sk->cong_count = 0;
226
227 sk->cong_window = 1;
228
229
230 ip_retransmit(sk, all);
231 }
232
233
234
235
236
237
238
239
240
241
242 void
243 tcp_err(int err, unsigned char *header, unsigned long daddr,
244 unsigned long saddr, struct inet_protocol *protocol)
245 {
246 struct tcphdr *th;
247 struct sock *sk;
248 struct iphdr *iph=(struct iphdr *)header;
249
250 header+=4*iph->ihl;
251
252 DPRINTF((DBG_TCP, "TCP: tcp_err(%d, hdr=%X, daddr=%X saddr=%X, protocol=%X)\n",
253 err, header, daddr, saddr, protocol));
254
255 th =(struct tcphdr *)header;
256 sk = get_sock(&tcp_prot, th->source, daddr, th->dest, saddr);
257 print_th(th);
258
259 if (sk == NULL) return;
260
261 if(err<0)
262 {
263 sk->err = -err;
264 sk->error_report(sk);
265 return;
266 }
267
268 if ((err & 0xff00) == (ICMP_SOURCE_QUENCH << 8)) {
269
270
271
272
273
274 if (sk->cong_window > 4) sk->cong_window--;
275 return;
276 }
277
278 DPRINTF((DBG_TCP, "TCP: icmp_err got error\n"));
279 sk->err = icmp_err_convert[err & 0xff].errno;
280
281
282
283
284
285 if (icmp_err_convert[err & 0xff].fatal) {
286 if (sk->state == TCP_SYN_SENT) {
287 sk->state = TCP_CLOSE;
288 sk->error_report(sk);
289 }
290 }
291 return;
292 }
293
294
295
296
297
298
299
300 static int
301 tcp_readable(struct sock *sk)
302 {
303 unsigned long counted;
304 unsigned long amount;
305 struct sk_buff *skb;
306 int count=0;
307 int sum;
308 unsigned long flags;
309
310 DPRINTF((DBG_TCP, "tcp_readable(sk=%X)\n", sk));
311 if(sk && sk->debug)
312 printk("tcp_readable: %p - ",sk);
313
314 if (sk == NULL || skb_peek(&sk->rqueue) == NULL)
315 {
316 if(sk && sk->debug)
317 printk("empty\n");
318 return(0);
319 }
320
321 counted = sk->copied_seq+1;
322 amount = 0;
323
324 save_flags(flags);
325 cli();
326 skb =(struct sk_buff *)sk->rqueue;
327
328
329 do {
330 count++;
331 #ifdef OLD
332
333 if (count > 20) {
334 restore_flags(flags);
335 DPRINTF((DBG_TCP, "tcp_readable, more than 20 packets without a psh\n"));
336 printk("tcp_read: possible read_queue corruption.\n");
337 return(amount);
338 }
339 #endif
340 if (before(counted, skb->h.th->seq))
341 break;
342 sum = skb->len -(counted - skb->h.th->seq);
343 if (skb->h.th->syn)
344 sum++;
345 if (sum >= 0) {
346 amount += sum;
347 if (skb->h.th->syn) amount--;
348 counted += sum;
349 }
350 if (amount && skb->h.th->psh) break;
351 skb =(struct sk_buff *)skb->next;
352 } while(skb != sk->rqueue);
353 if (sk->urg_data &&
354 (sk->urg_seq - sk->copied_seq) < (counted - sk->copied_seq))
355 amount--;
356 restore_flags(flags);
357 DPRINTF((DBG_TCP, "tcp readable returning %d bytes\n", amount));
358 if(sk->debug)
359 printk("got %lu bytes.\n",amount);
360 return(amount);
361 }
362
363
364
365
366
367
368
369 static int
370 tcp_select(struct sock *sk, int sel_type, select_table *wait)
371 {
372 DPRINTF((DBG_TCP, "tcp_select(sk=%X, sel_type = %d, wait = %X)\n",
373 sk, sel_type, wait));
374
375 sk->inuse = 1;
376 switch(sel_type) {
377 case SEL_IN:
378 if(sk->debug)
379 printk("select in");
380 select_wait(sk->sleep, wait);
381 if(sk->debug)
382 printk("-select out");
383 if (skb_peek(&sk->rqueue) != NULL) {
384 if (sk->state == TCP_LISTEN || tcp_readable(sk)) {
385 release_sock(sk);
386 if(sk->debug)
387 printk("-select ok data\n");
388 return(1);
389 }
390 }
391 if (sk->err != 0)
392 {
393 release_sock(sk);
394 if(sk->debug)
395 printk("-select ok error");
396 return(1);
397 }
398 if (sk->shutdown & RCV_SHUTDOWN) {
399 release_sock(sk);
400 if(sk->debug)
401 printk("-select ok down\n");
402 return(1);
403 } else {
404 release_sock(sk);
405 if(sk->debug)
406 printk("-select fail\n");
407 return(0);
408 }
409 case SEL_OUT:
410 select_wait(sk->sleep, wait);
411 if (sk->shutdown & SEND_SHUTDOWN) {
412 DPRINTF((DBG_TCP,
413 "write select on shutdown socket.\n"));
414
415
416 release_sock(sk);
417 return(0);
418 }
419
420
421
422
423
424
425 if (sk->prot->wspace(sk) >= sk->mss) {
426 release_sock(sk);
427
428 if (sk->state == TCP_SYN_RECV ||
429 sk->state == TCP_SYN_SENT) return(0);
430 return(1);
431 }
432 DPRINTF((DBG_TCP,
433 "tcp_select: sleeping on write sk->wmem_alloc = %d, "
434 "sk->packets_out = %d\n"
435 "sk->wback = %X, sk->wfront = %X\n"
436 "sk->send_seq = %u, sk->window_seq=%u\n",
437 sk->wmem_alloc, sk->packets_out,
438 sk->wback, sk->wfront,
439 sk->send_seq, sk->window_seq));
440
441 release_sock(sk);
442 return(0);
443 case SEL_EX:
444 select_wait(sk->sleep,wait);
445 if (sk->err) {
446 release_sock(sk);
447 return(1);
448 }
449 release_sock(sk);
450 return(0);
451 }
452
453 release_sock(sk);
454 return(0);
455 }
456
457
458 int
459 tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
460 {
461 int err;
462 DPRINTF((DBG_TCP, "tcp_ioctl(sk=%X, cmd = %d, arg=%X)\n", sk, cmd, arg));
463 switch(cmd) {
464 case DDIOCSDBG:
465 return(dbg_ioctl((void *) arg, DBG_TCP));
466
467 case TIOCINQ:
468 #ifdef FIXME
469 case FIONREAD:
470 #endif
471 {
472 unsigned long amount;
473
474 if (sk->state == TCP_LISTEN) return(-EINVAL);
475
476 sk->inuse = 1;
477 amount = tcp_readable(sk);
478 release_sock(sk);
479 DPRINTF((DBG_TCP, "returning %d\n", amount));
480 err=verify_area(VERIFY_WRITE,(void *)arg,
481 sizeof(unsigned long));
482 if(err)
483 return err;
484 put_fs_long(amount,(unsigned long *)arg);
485 return(0);
486 }
487 case SIOCATMARK:
488 {
489 int answ = 0;
490
491
492
493
494
495 if (sk->urg_data && sk->copied_seq+1 == sk->urg_seq)
496 answ = 1;
497 err=verify_area(VERIFY_WRITE,(void *) arg,
498 sizeof(unsigned long));
499 if(err)
500 return err;
501 put_fs_long(answ,(int *) arg);
502 return(0);
503 }
504 case TIOCOUTQ:
505 {
506 unsigned long amount;
507
508 if (sk->state == TCP_LISTEN) return(-EINVAL);
509 amount = sk->prot->wspace(sk);
510 err=verify_area(VERIFY_WRITE,(void *)arg,
511 sizeof(unsigned long));
512 if(err)
513 return err;
514 put_fs_long(amount,(unsigned long *)arg);
515 return(0);
516 }
517 default:
518 return(-EINVAL);
519 }
520 }
521
522
523
524 unsigned short
525 tcp_check(struct tcphdr *th, int len,
526 unsigned long saddr, unsigned long daddr)
527 {
528 unsigned long sum;
529
530 if (saddr == 0) saddr = my_addr();
531 print_th(th);
532 __asm__("\t addl %%ecx,%%ebx\n"
533 "\t adcl %%edx,%%ebx\n"
534 "\t adcl $0, %%ebx\n"
535 : "=b"(sum)
536 : "0"(daddr), "c"(saddr), "d"((ntohs(len) << 16) + IPPROTO_TCP*256)
537 : "cx","bx","dx" );
538
539 if (len > 3) {
540 __asm__("\tclc\n"
541 "1:\n"
542 "\t lodsl\n"
543 "\t adcl %%eax, %%ebx\n"
544 "\t loop 1b\n"
545 "\t adcl $0, %%ebx\n"
546 : "=b"(sum) , "=S"(th)
547 : "0"(sum), "c"(len/4) ,"1"(th)
548 : "ax", "cx", "bx", "si" );
549 }
550
551
552 __asm__("\t movl %%ebx, %%ecx\n"
553 "\t shrl $16,%%ecx\n"
554 "\t addw %%cx, %%bx\n"
555 "\t adcw $0, %%bx\n"
556 : "=b"(sum)
557 : "0"(sum)
558 : "bx", "cx");
559
560
561 if ((len & 2) != 0) {
562 __asm__("\t lodsw\n"
563 "\t addw %%ax,%%bx\n"
564 "\t adcw $0, %%bx\n"
565 : "=b"(sum), "=S"(th)
566 : "0"(sum) ,"1"(th)
567 : "si", "ax", "bx");
568 }
569
570
571 if ((len & 1) != 0) {
572 __asm__("\t lodsb\n"
573 "\t movb $0,%%ah\n"
574 "\t addw %%ax,%%bx\n"
575 "\t adcw $0, %%bx\n"
576 : "=b"(sum)
577 : "0"(sum) ,"S"(th)
578 : "si", "ax", "bx");
579 }
580
581
582 return((~sum) & 0xffff);
583 }
584
585
586 void tcp_send_check(struct tcphdr *th, unsigned long saddr,
587 unsigned long daddr, int len, struct sock *sk)
588 {
589 th->check = 0;
590 th->check = tcp_check(th, len, saddr, daddr);
591 return;
592 }
593
594 static void tcp_send_skb(struct sock *sk, struct sk_buff *skb)
595 {
596 int size;
597
598
599 size = skb->len - ((unsigned char *) skb->h.th - skb->data);
600
601
602 if (size < sizeof(struct tcphdr) || size > skb->len) {
603 printk("tcp_send_skb: bad skb (skb = %p, data = %p, th = %p, len = %lu)\n",
604 skb, skb->data, skb->h.th, skb->len);
605 kfree_skb(skb, FREE_WRITE);
606 return;
607 }
608
609
610 if (size == sizeof(struct tcphdr)) {
611
612 if(!skb->h.th->syn && !skb->h.th->fin) {
613 printk("tcp_send_skb: attempt to queue a bogon.\n");
614 kfree_skb(skb,FREE_WRITE);
615 return;
616 }
617 }
618
619
620 tcp_send_check(skb->h.th, sk->saddr, sk->daddr, size, sk);
621
622 skb->h.seq = sk->send_seq;
623 if (after(sk->send_seq , sk->window_seq) ||
624 (sk->retransmits && sk->timeout == TIME_WRITE) ||
625 sk->packets_out >= sk->cong_window) {
626 DPRINTF((DBG_TCP, "sk->cong_window = %d, sk->packets_out = %d\n",
627 sk->cong_window, sk->packets_out));
628 DPRINTF((DBG_TCP, "sk->send_seq = %d, sk->window_seq = %d\n",
629 sk->send_seq, sk->window_seq));
630 skb->next = NULL;
631 skb->magic = TCP_WRITE_QUEUE_MAGIC;
632 if (sk->wback == NULL) {
633 sk->wfront = skb;
634 } else {
635 sk->wback->next = skb;
636 }
637 sk->wback = skb;
638 if (before(sk->window_seq, sk->wfront->h.seq) &&
639 sk->send_head == NULL &&
640 sk->ack_backlog == 0)
641 reset_timer(sk, TIME_PROBE0, sk->rto);
642 } else {
643 sk->prot->queue_xmit(sk, skb->dev, skb, 0);
644 }
645 }
646
647 struct sk_buff * tcp_dequeue_partial(struct sock * sk)
648 {
649 struct sk_buff * skb;
650 unsigned long flags;
651
652 save_flags(flags);
653 cli();
654 skb = sk->partial;
655 if (skb) {
656 sk->partial = NULL;
657 del_timer(&sk->partial_timer);
658 }
659 restore_flags(flags);
660 return skb;
661 }
662
663 static void tcp_send_partial(struct sock *sk)
664 {
665 struct sk_buff *skb;
666
667 if (sk == NULL)
668 return;
669 while ((skb = tcp_dequeue_partial(sk)) != NULL)
670 tcp_send_skb(sk, skb);
671 }
672
673 void tcp_enqueue_partial(struct sk_buff * skb, struct sock * sk)
674 {
675 struct sk_buff * tmp;
676 unsigned long flags;
677
678 save_flags(flags);
679 cli();
680 tmp = sk->partial;
681 if (tmp)
682 del_timer(&sk->partial_timer);
683 sk->partial = skb;
684 sk->partial_timer.expires = HZ;
685 sk->partial_timer.function = (void (*)(unsigned long)) tcp_send_partial;
686 sk->partial_timer.data = (unsigned long) sk;
687 add_timer(&sk->partial_timer);
688 restore_flags(flags);
689 if (tmp)
690 tcp_send_skb(sk, tmp);
691 }
692
693
694
695 static void
696 tcp_send_ack(unsigned long sequence, unsigned long ack,
697 struct sock *sk,
698 struct tcphdr *th, unsigned long daddr)
699 {
700 struct sk_buff *buff;
701 struct tcphdr *t1;
702 struct device *dev = NULL;
703 int tmp;
704
705 if(sk->zapped)
706 return;
707
708
709
710
711 buff = sk->prot->wmalloc(sk, MAX_ACK_SIZE, 1, GFP_ATOMIC);
712 if (buff == NULL) {
713
714 sk->ack_backlog++;
715 if (sk->timeout != TIME_WRITE && tcp_connected(sk->state)) {
716 reset_timer(sk, TIME_WRITE, 10);
717 }
718 if (inet_debug == DBG_SLIP) printk("\rtcp_ack: malloc failed\n");
719 return;
720 }
721
722 buff->mem_addr = buff;
723 buff->mem_len = MAX_ACK_SIZE;
724 buff->len = sizeof(struct tcphdr);
725 buff->sk = sk;
726 t1 =(struct tcphdr *) buff->data;
727
728
729 tmp = sk->prot->build_header(buff, sk->saddr, daddr, &dev,
730 IPPROTO_TCP, sk->opt, MAX_ACK_SIZE,sk->ip_tos,sk->ip_ttl);
731 if (tmp < 0) {
732 buff->free=1;
733 sk->prot->wfree(sk, buff->mem_addr, buff->mem_len);
734 if (inet_debug == DBG_SLIP) printk("\rtcp_ack: build_header failed\n");
735 return;
736 }
737 buff->len += tmp;
738 t1 =(struct tcphdr *)((char *)t1 +tmp);
739
740
741 memcpy(t1, th, sizeof(*t1));
742
743
744 t1->dest = th->source;
745 t1->source = th->dest;
746 t1->seq = ntohl(sequence);
747 t1->ack = 1;
748 sk->window = tcp_select_window(sk);
749 t1->window = ntohs(sk->window);
750 t1->res1 = 0;
751 t1->res2 = 0;
752 t1->rst = 0;
753 t1->urg = 0;
754 t1->syn = 0;
755 t1->psh = 0;
756 t1->fin = 0;
757 if (ack == sk->acked_seq) {
758 sk->ack_backlog = 0;
759 sk->bytes_rcv = 0;
760 sk->ack_timed = 0;
761 if (sk->send_head == NULL && sk->wfront == NULL && sk->timeout == TIME_WRITE)
762 {
763 if(sk->keepopen)
764 reset_timer(sk,TIME_KEEPOPEN,TCP_TIMEOUT_LEN);
765 else
766 delete_timer(sk);
767 }
768 }
769 t1->ack_seq = ntohl(ack);
770 t1->doff = sizeof(*t1)/4;
771 tcp_send_check(t1, sk->saddr, daddr, sizeof(*t1), sk);
772 if (sk->debug)
773 printk("\rtcp_ack: seq %lx ack %lx\n", sequence, ack);
774 sk->prot->queue_xmit(sk, dev, buff, 1);
775 }
776
777
778
779 static int
780 tcp_build_header(struct tcphdr *th, struct sock *sk, int push)
781 {
782
783
784 memcpy(th,(void *) &(sk->dummy_th), sizeof(*th));
785 th->seq = htonl(sk->send_seq);
786 th->psh =(push == 0) ? 1 : 0;
787 th->doff = sizeof(*th)/4;
788 th->ack = 1;
789 th->fin = 0;
790 sk->ack_backlog = 0;
791 sk->bytes_rcv = 0;
792 sk->ack_timed = 0;
793 th->ack_seq = htonl(sk->acked_seq);
794 sk->window = tcp_select_window(sk);
795 th->window = htons(sk->window);
796
797 return(sizeof(*th));
798 }
799
800
801
802
803
804 static int
805 tcp_write(struct sock *sk, unsigned char *from,
806 int len, int nonblock, unsigned flags)
807 {
808 int copied = 0;
809 int copy;
810 int tmp;
811 struct sk_buff *skb;
812 struct sk_buff *send_tmp;
813 unsigned char *buff;
814 struct proto *prot;
815 struct device *dev = NULL;
816
817 DPRINTF((DBG_TCP, "tcp_write(sk=%X, from=%X, len=%d, nonblock=%d, flags=%X)\n",
818 sk, from, len, nonblock, flags));
819
820 sk->inuse=1;
821 prot = sk->prot;
822 while(len > 0) {
823 if (sk->err) {
824 release_sock(sk);
825 if (copied) return(copied);
826 tmp = -sk->err;
827 sk->err = 0;
828 return(tmp);
829 }
830
831
832 if (sk->shutdown & SEND_SHUTDOWN) {
833 release_sock(sk);
834 sk->err = EPIPE;
835 if (copied) return(copied);
836 sk->err = 0;
837 return(-EPIPE);
838 }
839
840
841
842
843 while(sk->state != TCP_ESTABLISHED && sk->state != TCP_CLOSE_WAIT) {
844 if (sk->err) {
845 release_sock(sk);
846 if (copied) return(copied);
847 tmp = -sk->err;
848 sk->err = 0;
849 return(tmp);
850 }
851
852 if (sk->state != TCP_SYN_SENT && sk->state != TCP_SYN_RECV) {
853 release_sock(sk);
854 DPRINTF((DBG_TCP, "tcp_write: return 1\n"));
855 if (copied) return(copied);
856
857 if (sk->err) {
858 tmp = -sk->err;
859 sk->err = 0;
860 return(tmp);
861 }
862
863 if (sk->keepopen) {
864 send_sig(SIGPIPE, current, 0);
865 }
866 return(-EPIPE);
867 }
868
869 if (nonblock || copied) {
870 release_sock(sk);
871 DPRINTF((DBG_TCP, "tcp_write: return 2\n"));
872 if (copied) return(copied);
873 return(-EAGAIN);
874 }
875
876 release_sock(sk);
877 cli();
878 if (sk->state != TCP_ESTABLISHED &&
879 sk->state != TCP_CLOSE_WAIT && sk->err == 0) {
880 interruptible_sleep_on(sk->sleep);
881 if (current->signal & ~current->blocked) {
882 sti();
883 DPRINTF((DBG_TCP, "tcp_write: return 3\n"));
884 if (copied) return(copied);
885 return(-ERESTARTSYS);
886 }
887 }
888 sk->inuse = 1;
889 sti();
890 }
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905 if ((skb = tcp_dequeue_partial(sk)) != NULL) {
906 int hdrlen;
907
908
909 hdrlen = ((unsigned long)skb->h.th - (unsigned long)skb->data)
910 + sizeof(struct tcphdr);
911
912
913 if (!(flags & MSG_OOB)) {
914 copy = min(sk->mss - (skb->len - hdrlen), len);
915
916 if (copy <= 0) {
917 printk("TCP: **bug**: \"copy\" <= 0!!\n");
918 copy = 0;
919 }
920
921 memcpy_fromfs(skb->data + skb->len, from, copy);
922 skb->len += copy;
923 from += copy;
924 copied += copy;
925 len -= copy;
926 sk->send_seq += copy;
927 }
928 if ((skb->len - hdrlen) >= sk->mss ||
929 (flags & MSG_OOB) ||
930 !sk->packets_out)
931 tcp_send_skb(sk, skb);
932 else
933 tcp_enqueue_partial(skb, sk);
934 continue;
935 }
936
937
938
939
940
941
942
943
944
945
946
947
948
949 copy = diff(sk->window_seq, sk->send_seq);
950
951
952
953 if (copy < (sk->max_window >> 1))
954 copy = sk->mss;
955 copy = min(copy, sk->mss);
956 copy = min(copy, len);
957
958
959 send_tmp = NULL;
960 if (copy < sk->mss && !(flags & MSG_OOB)) {
961
962 release_sock(sk);
963
964
965 skb = prot->wmalloc(sk, sk->mtu + 128 + prot->max_header + sizeof(*skb), 0, GFP_KERNEL);
966 sk->inuse = 1;
967 send_tmp = skb;
968 } else {
969
970 release_sock(sk);
971 skb = prot->wmalloc(sk, copy + prot->max_header + sizeof(*skb), 0, GFP_KERNEL);
972 sk->inuse = 1;
973 }
974
975
976 if (skb == NULL) {
977 if (nonblock ) {
978 release_sock(sk);
979 DPRINTF((DBG_TCP, "tcp_write: return 4\n"));
980 if (copied) return(copied);
981 return(-EAGAIN);
982 }
983
984
985 tmp = sk->wmem_alloc;
986 release_sock(sk);
987 cli();
988
989 if (tmp <= sk->wmem_alloc &&
990 (sk->state == TCP_ESTABLISHED||sk->state == TCP_CLOSE_WAIT)
991 && sk->err == 0) {
992 interruptible_sleep_on(sk->sleep);
993 if (current->signal & ~current->blocked) {
994 sti();
995 DPRINTF((DBG_TCP, "tcp_write: return 5\n"));
996 if (copied) return(copied);
997 return(-ERESTARTSYS);
998 }
999 }
1000 sk->inuse = 1;
1001 sti();
1002 continue;
1003 }
1004
1005 skb->len = 0;
1006 skb->sk = sk;
1007 skb->free = 0;
1008
1009 buff = skb->data;
1010
1011
1012
1013
1014
1015 tmp = prot->build_header(skb, sk->saddr, sk->daddr, &dev,
1016 IPPROTO_TCP, sk->opt, skb->mem_len,sk->ip_tos,sk->ip_ttl);
1017 if (tmp < 0 ) {
1018 prot->wfree(sk, skb->mem_addr, skb->mem_len);
1019 release_sock(sk);
1020 DPRINTF((DBG_TCP, "tcp_write: return 6\n"));
1021 if (copied) return(copied);
1022 return(tmp);
1023 }
1024 skb->len += tmp;
1025 skb->dev = dev;
1026 buff += tmp;
1027 skb->h.th =(struct tcphdr *) buff;
1028 tmp = tcp_build_header((struct tcphdr *)buff, sk, len-copy);
1029 if (tmp < 0) {
1030 prot->wfree(sk, skb->mem_addr, skb->mem_len);
1031 release_sock(sk);
1032 DPRINTF((DBG_TCP, "tcp_write: return 7\n"));
1033 if (copied) return(copied);
1034 return(tmp);
1035 }
1036
1037 if (flags & MSG_OOB) {
1038 ((struct tcphdr *)buff)->urg = 1;
1039 ((struct tcphdr *)buff)->urg_ptr = ntohs(copy);
1040 }
1041 skb->len += tmp;
1042 memcpy_fromfs(buff+tmp, from, copy);
1043
1044 from += copy;
1045 copied += copy;
1046 len -= copy;
1047 skb->len += copy;
1048 skb->free = 0;
1049 sk->send_seq += copy;
1050
1051 if (send_tmp != NULL && sk->packets_out) {
1052 tcp_enqueue_partial(send_tmp, sk);
1053 continue;
1054 }
1055 tcp_send_skb(sk, skb);
1056 }
1057 sk->err = 0;
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067 if(sk->partial &&
1068 ((!sk->packets_out)
1069
1070 || (sk->nonagle && before(sk->send_seq , sk->window_seq))
1071 ))
1072 tcp_send_partial(sk);
1073
1074 release_sock(sk);
1075 DPRINTF((DBG_TCP, "tcp_write: return 8\n"));
1076 return(copied);
1077 }
1078
1079
1080 static int
1081 tcp_sendto(struct sock *sk, unsigned char *from,
1082 int len, int nonblock, unsigned flags,
1083 struct sockaddr_in *addr, int addr_len)
1084 {
1085 struct sockaddr_in sin;
1086
1087 if (addr_len < sizeof(sin)) return(-EINVAL);
1088 memcpy_fromfs(&sin, addr, sizeof(sin));
1089 if (sin.sin_family && sin.sin_family != AF_INET) return(-EINVAL);
1090 if (sin.sin_port != sk->dummy_th.dest) return(-EINVAL);
1091 if (sin.sin_addr.s_addr != sk->daddr) return(-EINVAL);
1092 return(tcp_write(sk, from, len, nonblock, flags));
1093 }
1094
1095
1096 static void
1097 tcp_read_wakeup(struct sock *sk)
1098 {
1099 int tmp;
1100 struct device *dev = NULL;
1101 struct tcphdr *t1;
1102 struct sk_buff *buff;
1103
1104 DPRINTF((DBG_TCP, "in tcp read wakeup\n"));
1105 if (!sk->ack_backlog) return;
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117 buff = sk->prot->wmalloc(sk,MAX_ACK_SIZE,1, GFP_ATOMIC);
1118 if (buff == NULL) {
1119
1120 reset_timer(sk, TIME_WRITE, 10);
1121 return;
1122 }
1123
1124 buff->mem_addr = buff;
1125 buff->mem_len = MAX_ACK_SIZE;
1126 buff->len = sizeof(struct tcphdr);
1127 buff->sk = sk;
1128
1129
1130 tmp = sk->prot->build_header(buff, sk->saddr, sk->daddr, &dev,
1131 IPPROTO_TCP, sk->opt, MAX_ACK_SIZE,sk->ip_tos,sk->ip_ttl);
1132 if (tmp < 0) {
1133 buff->free=1;
1134 sk->prot->wfree(sk, buff->mem_addr, buff->mem_len);
1135 return;
1136 }
1137
1138 buff->len += tmp;
1139 t1 =(struct tcphdr *)(buff->data +tmp);
1140
1141 memcpy(t1,(void *) &sk->dummy_th, sizeof(*t1));
1142 t1->seq = ntohl(sk->send_seq);
1143 t1->ack = 1;
1144 t1->res1 = 0;
1145 t1->res2 = 0;
1146 t1->rst = 0;
1147 t1->urg = 0;
1148 t1->syn = 0;
1149 t1->psh = 0;
1150 sk->ack_backlog = 0;
1151 sk->bytes_rcv = 0;
1152 sk->window = tcp_select_window(sk);
1153 t1->window = ntohs(sk->window);
1154 t1->ack_seq = ntohl(sk->acked_seq);
1155 t1->doff = sizeof(*t1)/4;
1156 tcp_send_check(t1, sk->saddr, sk->daddr, sizeof(*t1), sk);
1157 sk->prot->queue_xmit(sk, dev, buff, 1);
1158 }
1159
1160
1161
1162
1163
1164
1165
1166
1167 static void
1168 cleanup_rbuf(struct sock *sk)
1169 {
1170 unsigned long flags;
1171 int left;
1172 struct sk_buff *skb;
1173
1174 if(sk->debug)
1175 printk("cleaning rbuf for sk=%p\n", sk);
1176
1177 save_flags(flags);
1178 cli();
1179
1180 left = sk->prot->rspace(sk);
1181
1182
1183
1184
1185
1186 while((skb=skb_peek(&sk->rqueue)) != NULL )
1187 {
1188 if (!skb->used)
1189 break;
1190 skb_unlink(skb);
1191 skb->sk = sk;
1192 kfree_skb(skb, FREE_READ);
1193 }
1194
1195 restore_flags(flags);
1196
1197
1198
1199
1200
1201
1202
1203 DPRINTF((DBG_TCP, "sk->window left = %d, sk->prot->rspace(sk)=%d\n",
1204 sk->window - sk->bytes_rcv, sk->prot->rspace(sk)));
1205
1206 if(sk->debug)
1207 printk("sk->rspace = %lu, was %d\n", sk->prot->rspace(sk),
1208 left);
1209 if (sk->prot->rspace(sk) != left)
1210 {
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221 sk->ack_backlog++;
1222
1223
1224
1225
1226
1227
1228
1229
1230 if ((sk->prot->rspace(sk) > (sk->window - sk->bytes_rcv + sk->mtu))) {
1231
1232 tcp_read_wakeup(sk);
1233 } else {
1234
1235 int was_active = del_timer(&sk->timer);
1236 if (!was_active || TCP_ACK_TIME < sk->timer.expires) {
1237 reset_timer(sk, TIME_WRITE, TCP_ACK_TIME);
1238 } else
1239 add_timer(&sk->timer);
1240 }
1241 }
1242 }
1243
1244
1245
1246 static int
1247 tcp_read_urg(struct sock * sk, int nonblock,
1248 unsigned char *to, int len, unsigned flags)
1249 {
1250 struct wait_queue wait = { current, NULL };
1251
1252 while (len > 0) {
1253 if (sk->urg_data && sk->urg_data != URG_READ) {
1254 char c = sk->urg_data;
1255 if (!(flags & MSG_PEEK))
1256 sk->urg_data = URG_READ;
1257 put_fs_byte(c, to);
1258 return 1;
1259 }
1260
1261 if (sk->err) {
1262 int tmp = -sk->err;
1263 sk->err = 0;
1264 return tmp;
1265 }
1266
1267 if (sk->state == TCP_CLOSE || sk->done) {
1268 if (!sk->done) {
1269 sk->done = 1;
1270 return 0;
1271 }
1272 return -ENOTCONN;
1273 }
1274
1275 if (sk->shutdown & RCV_SHUTDOWN) {
1276 sk->done = 1;
1277 return 0;
1278 }
1279
1280 if (nonblock)
1281 return -EAGAIN;
1282
1283 if (current->signal & ~current->blocked)
1284 return -ERESTARTSYS;
1285
1286 current->state = TASK_INTERRUPTIBLE;
1287 add_wait_queue(sk->sleep, &wait);
1288 if ((!sk->urg_data || sk->urg_data == URG_READ) &&
1289 sk->err == 0 && !(sk->shutdown & RCV_SHUTDOWN))
1290 schedule();
1291 remove_wait_queue(sk->sleep, &wait);
1292 current->state = TASK_RUNNING;
1293 }
1294 return 0;
1295 }
1296
1297
1298
1299 static int
1300 tcp_read(struct sock *sk, unsigned char *to,
1301 int len, int nonblock, unsigned flags)
1302 {
1303 int copied = 0;
1304 struct sk_buff *skb;
1305 unsigned long offset;
1306 int err;
1307
1308 if (len == 0)
1309 return 0;
1310
1311 if (len < 0)
1312 return -EINVAL;
1313
1314 err=verify_area(VERIFY_WRITE,to,len);
1315 if(err)
1316 return err;
1317
1318
1319 if (sk->state == TCP_LISTEN)
1320 return -ENOTCONN;
1321
1322
1323 if (flags & MSG_OOB)
1324 return tcp_read_urg(sk, nonblock, to, len, flags);
1325
1326
1327 sk->inuse = 1;
1328 skb=skb_peek(&sk->rqueue);
1329
1330 DPRINTF((DBG_TCP, "tcp_read(sk=%X, to=%X, len=%d, nonblock=%d, flags=%X)\n",
1331 sk, to, len, nonblock, flags));
1332
1333 while(len > 0) {
1334
1335
1336
1337 while(skb == NULL || skb->used || before(sk->copied_seq+1, skb->h.th->seq)) {
1338 DPRINTF((DBG_TCP, "skb = %X:\n", skb));
1339 cleanup_rbuf(sk);
1340 if (sk->err)
1341 {
1342 int tmp;
1343
1344 release_sock(sk);
1345 if (copied)
1346 {
1347 DPRINTF((DBG_TCP, "tcp_read: returning %d\n",
1348 copied));
1349 return(copied);
1350 }
1351 tmp = -sk->err;
1352 sk->err = 0;
1353 return(tmp);
1354 }
1355
1356 if (sk->state == TCP_CLOSE)
1357 {
1358 release_sock(sk);
1359 if (copied) {
1360 DPRINTF((DBG_TCP, "tcp_read: returning %d\n",
1361 copied));
1362 return(copied);
1363 }
1364 if (!sk->done) {
1365 sk->done = 1;
1366 return(0);
1367 }
1368 return(-ENOTCONN);
1369 }
1370
1371 if (sk->shutdown & RCV_SHUTDOWN)
1372 {
1373 release_sock(sk);
1374 if (copied == 0) sk->done = 1;
1375 DPRINTF((DBG_TCP, "tcp_read: returning %d\n", copied));
1376 return(copied);
1377 }
1378
1379 if (nonblock || copied)
1380 {
1381 release_sock(sk);
1382 if(sk->debug)
1383 printk("read: EAGAIN\n");
1384 if (copied)
1385 {
1386 DPRINTF((DBG_TCP, "tcp_read: returning %d\n",
1387 copied));
1388 return(copied);
1389 }
1390 return(-EAGAIN);
1391 }
1392
1393 if ((flags & MSG_PEEK) && copied != 0)
1394 {
1395 release_sock(sk);
1396 DPRINTF((DBG_TCP, "tcp_read: returning %d\n", copied));
1397 return(copied);
1398 }
1399
1400 DPRINTF((DBG_TCP, "tcp_read about to sleep. state = %d\n",
1401 sk->state));
1402 release_sock(sk);
1403
1404
1405
1406
1407
1408 cli();
1409 if (sk->shutdown & RCV_SHUTDOWN || sk->err != 0) {
1410 sk->inuse = 1;
1411 sti();
1412 continue;
1413 }
1414
1415 skb = skb_peek(&sk->rqueue);
1416 if (skb == NULL || before(sk->copied_seq+1, skb->h.th->seq)) {
1417 if(sk->debug)
1418 printk("Read wait sleep\n");
1419 interruptible_sleep_on(sk->sleep);
1420 if(sk->debug)
1421 printk("Read wait wakes\n");
1422 if (current->signal & ~current->blocked) {
1423 sti();
1424 if (copied) {
1425 DPRINTF((DBG_TCP, "tcp_read: returning %d\n",
1426 copied));
1427 return(copied);
1428 }
1429 return(-ERESTARTSYS);
1430 }
1431 }
1432 sk->inuse = 1;
1433 sti();
1434 DPRINTF((DBG_TCP, "tcp_read woke up. \n"));
1435
1436
1437 skb=skb_peek(&sk->rqueue);
1438
1439 }
1440
1441
1442
1443
1444 if (sk->urg_data && sk->copied_seq+1 == sk->urg_seq) {
1445 if (sk->urg_data == URG_READ) {
1446 if (copied || (flags & MSG_PEEK)) {
1447 release_sock(sk);
1448 return copied;
1449 }
1450 sk->urg_data = 0;
1451 sk->copied_seq++;
1452 } else {
1453 release_sock(sk);
1454 if (copied)
1455 return copied;
1456 send_sig(SIGURG, current, 0);
1457 return -EINTR;
1458 }
1459 }
1460
1461
1462
1463
1464
1465 offset = sk->copied_seq+1 - skb->h.th->seq;
1466
1467 if (skb->h.th->syn) offset--;
1468
1469 if (offset < skb->len)
1470 {
1471
1472 unsigned long used = skb->len - offset;
1473 if (len < used)
1474 used = len;
1475
1476 if (sk->urg_data && sk->urg_seq - (sk->copied_seq+1) < used)
1477 used = sk->urg_seq - (sk->copied_seq+1);
1478
1479 memcpy_tofs(to,((unsigned char *)skb->h.th) +
1480 skb->h.th->doff*4 + offset, used);
1481 copied += used;
1482 len -= used;
1483 to += used;
1484
1485
1486 if (!(flags & MSG_PEEK))
1487 sk->copied_seq += used;
1488
1489
1490
1491
1492
1493 if (!(flags & MSG_PEEK) && (used + offset >= skb->len))
1494 skb->used = 1;
1495 }
1496 else
1497 {
1498 skb->used = 1;
1499 }
1500
1501 skb =(struct sk_buff *)skb->next;
1502 }
1503
1504 cleanup_rbuf(sk);
1505 release_sock(sk);
1506 DPRINTF((DBG_TCP, "tcp_read: returning %d\n", copied));
1507 if (copied == 0 && nonblock)
1508 return(-EAGAIN);
1509 return(copied);
1510 }
1511
1512
1513
1514
1515
1516
1517 void
1518 tcp_shutdown(struct sock *sk, int how)
1519 {
1520 struct sk_buff *buff;
1521 struct tcphdr *t1, *th;
1522 struct proto *prot;
1523 int tmp;
1524 struct device *dev = NULL;
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534 if (sk->state == TCP_FIN_WAIT1 || sk->state == TCP_FIN_WAIT2) return;
1535 if (!(how & SEND_SHUTDOWN)) return;
1536 sk->inuse = 1;
1537
1538
1539 if (sk->partial)
1540 tcp_send_partial(sk);
1541
1542 prot =(struct proto *)sk->prot;
1543 th =(struct tcphdr *)&sk->dummy_th;
1544 release_sock(sk);
1545 buff = prot->wmalloc(sk, MAX_RESET_SIZE,1 , GFP_KERNEL);
1546 if (buff == NULL) return;
1547 sk->inuse = 1;
1548
1549 DPRINTF((DBG_TCP, "tcp_shutdown_send buff = %X\n", buff));
1550 buff->mem_addr = buff;
1551 buff->mem_len = MAX_RESET_SIZE;
1552 buff->sk = sk;
1553 buff->len = sizeof(*t1);
1554 t1 =(struct tcphdr *) buff->data;
1555
1556
1557 tmp = prot->build_header(buff,sk->saddr, sk->daddr, &dev,
1558 IPPROTO_TCP, sk->opt,
1559 sizeof(struct tcphdr),sk->ip_tos,sk->ip_ttl);
1560 if (tmp < 0) {
1561 buff->free=1;
1562 prot->wfree(sk,buff->mem_addr, buff->mem_len);
1563 release_sock(sk);
1564 DPRINTF((DBG_TCP, "Unable to build header for fin.\n"));
1565 return;
1566 }
1567
1568 t1 =(struct tcphdr *)((char *)t1 +tmp);
1569 buff->len += tmp;
1570 buff->dev = dev;
1571 memcpy(t1, th, sizeof(*t1));
1572 t1->seq = ntohl(sk->send_seq);
1573 sk->send_seq++;
1574 buff->h.seq = sk->send_seq;
1575 t1->ack = 1;
1576 t1->ack_seq = ntohl(sk->acked_seq);
1577 t1->window = ntohs(sk->window=tcp_select_window(sk));
1578 t1->fin = 1;
1579 t1->rst = 0;
1580 t1->doff = sizeof(*t1)/4;
1581 tcp_send_check(t1, sk->saddr, sk->daddr, sizeof(*t1), sk);
1582
1583
1584
1585
1586
1587 if (sk->wback != NULL) {
1588 buff->free=0;
1589 buff->next = NULL;
1590 sk->wback->next = buff;
1591 sk->wback = buff;
1592 buff->magic = TCP_WRITE_QUEUE_MAGIC;
1593 } else {
1594 sk->prot->queue_xmit(sk, dev, buff, 0);
1595 }
1596
1597 if (sk->state == TCP_ESTABLISHED) sk->state = TCP_FIN_WAIT1;
1598 else sk->state = TCP_FIN_WAIT2;
1599
1600 release_sock(sk);
1601 }
1602
1603
1604 static int
1605 tcp_recvfrom(struct sock *sk, unsigned char *to,
1606 int to_len, int nonblock, unsigned flags,
1607 struct sockaddr_in *addr, int *addr_len)
1608 {
1609 struct sockaddr_in sin;
1610 int len;
1611 int err;
1612 int result;
1613
1614
1615
1616
1617 err = verify_area(VERIFY_WRITE,addr_len,sizeof(long));
1618 if(err)
1619 return err;
1620 len = get_fs_long(addr_len);
1621 if(len > sizeof(sin))
1622 len = sizeof(sin);
1623 err=verify_area(VERIFY_WRITE, addr, len);
1624 if(err)
1625 return err;
1626
1627 result=tcp_read(sk, to, to_len, nonblock, flags);
1628
1629 if (result < 0) return(result);
1630
1631 sin.sin_family = AF_INET;
1632 sin.sin_port = sk->dummy_th.dest;
1633 sin.sin_addr.s_addr = sk->daddr;
1634
1635 memcpy_tofs(addr, &sin, len);
1636 put_fs_long(len, addr_len);
1637 return(result);
1638 }
1639
1640
1641
1642 static void
1643 tcp_reset(unsigned long saddr, unsigned long daddr, struct tcphdr *th,
1644 struct proto *prot, struct options *opt, struct device *dev, int tos, int ttl)
1645 {
1646 struct sk_buff *buff;
1647 struct tcphdr *t1;
1648 int tmp;
1649
1650
1651
1652
1653
1654 buff = prot->wmalloc(NULL, MAX_RESET_SIZE, 1, GFP_ATOMIC);
1655 if (buff == NULL)
1656 return;
1657
1658 DPRINTF((DBG_TCP, "tcp_reset buff = %X\n", buff));
1659 buff->mem_addr = buff;
1660 buff->mem_len = MAX_RESET_SIZE;
1661 buff->len = sizeof(*t1);
1662 buff->sk = NULL;
1663 buff->dev = dev;
1664
1665 t1 =(struct tcphdr *) buff->data;
1666
1667
1668 tmp = prot->build_header(buff, saddr, daddr, &dev, IPPROTO_TCP, opt,
1669 sizeof(struct tcphdr),tos,ttl);
1670 if (tmp < 0) {
1671 buff->free = 1;
1672 prot->wfree(NULL, buff->mem_addr, buff->mem_len);
1673 return;
1674 }
1675 t1 =(struct tcphdr *)((char *)t1 +tmp);
1676 buff->len += tmp;
1677 memcpy(t1, th, sizeof(*t1));
1678
1679
1680 t1->dest = th->source;
1681 t1->source = th->dest;
1682 t1->rst = 1;
1683 t1->window = 0;
1684
1685 if(th->ack)
1686 {
1687 t1->ack=0;
1688 t1->seq=th->ack_seq;
1689 t1->ack_seq=0;
1690 }
1691 else
1692 {
1693 t1->ack=1;
1694 if(!th->syn)
1695 t1->ack_seq=htonl(th->seq);
1696 else
1697 t1->ack_seq=htonl(th->seq+1);
1698 t1->seq=0;
1699 }
1700
1701 t1->syn = 0;
1702 t1->urg = 0;
1703 t1->fin = 0;
1704 t1->psh = 0;
1705 t1->doff = sizeof(*t1)/4;
1706 tcp_send_check(t1, saddr, daddr, sizeof(*t1), NULL);
1707 prot->queue_xmit(NULL, dev, buff, 1);
1708 }
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719 static void
1720 tcp_options(struct sock *sk, struct tcphdr *th)
1721 {
1722 unsigned char *ptr;
1723 int length=(th->doff*4)-sizeof(struct tcphdr);
1724 int mss_seen = 0;
1725
1726 ptr = (unsigned char *)(th + 1);
1727
1728 while(length>0)
1729 {
1730 int opcode=*ptr++;
1731 int opsize=*ptr++;
1732 switch(opcode)
1733 {
1734 case TCPOPT_EOL:
1735 return;
1736 case TCPOPT_NOP:
1737 length-=2;
1738 continue;
1739
1740 default:
1741 if(opsize<=2)
1742 return;
1743 switch(opcode)
1744 {
1745 case TCPOPT_MSS:
1746 if(opsize==4 && th->syn)
1747 {
1748 sk->mtu=min(sk->mtu,ntohs(*(unsigned short *)ptr));
1749 mss_seen = 1;
1750 }
1751 break;
1752
1753 }
1754 ptr+=opsize-2;
1755 length-=opsize;
1756 }
1757 }
1758 if (th->syn) {
1759 if (! mss_seen)
1760 sk->mtu=min(sk->mtu, 536);
1761 }
1762 sk->mss = min(sk->max_window, sk->mtu);
1763 }
1764
1765 static inline unsigned long default_mask(unsigned long dst)
1766 {
1767 dst = ntohl(dst);
1768 if (IN_CLASSA(dst))
1769 return htonl(IN_CLASSA_NET);
1770 if (IN_CLASSB(dst))
1771 return htonl(IN_CLASSB_NET);
1772 return htonl(IN_CLASSC_NET);
1773 }
1774
1775
1776
1777
1778
1779
1780
1781
1782 static void
1783 tcp_conn_request(struct sock *sk, struct sk_buff *skb,
1784 unsigned long daddr, unsigned long saddr,
1785 struct options *opt, struct device *dev)
1786 {
1787 struct sk_buff *buff;
1788 struct tcphdr *t1;
1789 unsigned char *ptr;
1790 struct sock *newsk;
1791 struct tcphdr *th;
1792 int tmp;
1793
1794 DPRINTF((DBG_TCP, "tcp_conn_request(sk = %X, skb = %X, daddr = %X, sadd4= %X, \n"
1795 " opt = %X, dev = %X)\n",
1796 sk, skb, daddr, saddr, opt, dev));
1797
1798 th = skb->h.th;
1799
1800
1801 if (!sk->dead) {
1802 sk->data_ready(sk,0);
1803 } else {
1804 DPRINTF((DBG_TCP, "tcp_conn_request on dead socket\n"));
1805 tcp_reset(daddr, saddr, th, sk->prot, opt, dev, sk->ip_tos,sk->ip_ttl);
1806 kfree_skb(skb, FREE_READ);
1807 return;
1808 }
1809
1810
1811
1812
1813
1814 if (sk->ack_backlog >= sk->max_ack_backlog) {
1815 kfree_skb(skb, FREE_READ);
1816 return;
1817 }
1818
1819
1820
1821
1822
1823
1824
1825
1826 newsk = (struct sock *) kmalloc(sizeof(struct sock), GFP_ATOMIC);
1827 if (newsk == NULL) {
1828
1829 kfree_skb(skb, FREE_READ);
1830 return;
1831 }
1832
1833 DPRINTF((DBG_TCP, "newsk = %X\n", newsk));
1834 memcpy((void *)newsk,(void *)sk, sizeof(*newsk));
1835 newsk->wback = NULL;
1836 newsk->wfront = NULL;
1837 newsk->rqueue = NULL;
1838 newsk->send_head = NULL;
1839 newsk->send_tail = NULL;
1840 newsk->back_log = NULL;
1841 newsk->rtt = TCP_CONNECT_TIME << 3;
1842 newsk->rto = TCP_CONNECT_TIME;
1843 newsk->mdev = 0;
1844 newsk->max_window = 0;
1845 newsk->cong_window = 1;
1846 newsk->cong_count = 0;
1847 newsk->ssthresh = 0;
1848 newsk->backoff = 0;
1849 newsk->blog = 0;
1850 newsk->intr = 0;
1851 newsk->proc = 0;
1852 newsk->done = 0;
1853 newsk->partial = NULL;
1854 newsk->pair = NULL;
1855 newsk->wmem_alloc = 0;
1856 newsk->rmem_alloc = 0;
1857
1858 newsk->max_unacked = MAX_WINDOW - TCP_WINDOW_DIFF;
1859
1860 newsk->err = 0;
1861 newsk->shutdown = 0;
1862 newsk->ack_backlog = 0;
1863 newsk->acked_seq = skb->h.th->seq+1;
1864 newsk->fin_seq = skb->h.th->seq;
1865 newsk->copied_seq = skb->h.th->seq;
1866 newsk->state = TCP_SYN_RECV;
1867 newsk->timeout = 0;
1868 newsk->send_seq = jiffies * SEQ_TICK - seq_offset;
1869 newsk->window_seq = newsk->send_seq;
1870 newsk->rcv_ack_seq = newsk->send_seq;
1871 newsk->urg_data = 0;
1872 newsk->retransmits = 0;
1873 newsk->destroy = 0;
1874 newsk->timer.data = (unsigned long)newsk;
1875 newsk->timer.function = &net_timer;
1876 newsk->dummy_th.source = skb->h.th->dest;
1877 newsk->dummy_th.dest = skb->h.th->source;
1878
1879
1880 newsk->daddr = saddr;
1881 newsk->saddr = daddr;
1882
1883 put_sock(newsk->num,newsk);
1884 newsk->dummy_th.res1 = 0;
1885 newsk->dummy_th.doff = 6;
1886 newsk->dummy_th.fin = 0;
1887 newsk->dummy_th.syn = 0;
1888 newsk->dummy_th.rst = 0;
1889 newsk->dummy_th.psh = 0;
1890 newsk->dummy_th.ack = 0;
1891 newsk->dummy_th.urg = 0;
1892 newsk->dummy_th.res2 = 0;
1893 newsk->acked_seq = skb->h.th->seq + 1;
1894 newsk->copied_seq = skb->h.th->seq;
1895
1896
1897 newsk->ip_ttl=sk->ip_ttl;
1898 newsk->ip_tos=skb->ip_hdr->tos;
1899
1900
1901
1902 if (sk->user_mss)
1903 newsk->mtu = sk->user_mss;
1904 else {
1905 #ifdef SUBNETSARELOCAL
1906 if ((saddr ^ daddr) & default_mask(saddr))
1907 #else
1908 if ((saddr ^ daddr) & dev->pa_mask)
1909 #endif
1910 newsk->mtu = 576 - HEADER_SIZE;
1911 else
1912 newsk->mtu = MAX_WINDOW;
1913 }
1914
1915 newsk->mtu = min(newsk->mtu, dev->mtu - HEADER_SIZE);
1916
1917
1918 tcp_options(newsk,skb->h.th);
1919
1920 buff = newsk->prot->wmalloc(newsk, MAX_SYN_SIZE, 1, GFP_ATOMIC);
1921 if (buff == NULL) {
1922 sk->err = -ENOMEM;
1923 newsk->dead = 1;
1924 release_sock(newsk);
1925 kfree_skb(skb, FREE_READ);
1926 return;
1927 }
1928
1929 buff->mem_addr = buff;
1930 buff->mem_len = MAX_SYN_SIZE;
1931 buff->len = sizeof(struct tcphdr)+4;
1932 buff->sk = newsk;
1933
1934 t1 =(struct tcphdr *) buff->data;
1935
1936
1937 tmp = sk->prot->build_header(buff, newsk->saddr, newsk->daddr, &dev,
1938 IPPROTO_TCP, NULL, MAX_SYN_SIZE,sk->ip_tos,sk->ip_ttl);
1939
1940
1941 if (tmp < 0) {
1942 sk->err = tmp;
1943 buff->free=1;
1944 kfree_skb(buff,FREE_WRITE);
1945 newsk->dead = 1;
1946 release_sock(newsk);
1947 skb->sk = sk;
1948 kfree_skb(skb, FREE_READ);
1949 return;
1950 }
1951
1952 buff->len += tmp;
1953 t1 =(struct tcphdr *)((char *)t1 +tmp);
1954
1955 memcpy(t1, skb->h.th, sizeof(*t1));
1956 buff->h.seq = newsk->send_seq;
1957
1958
1959 t1->dest = skb->h.th->source;
1960 t1->source = newsk->dummy_th.source;
1961 t1->seq = ntohl(newsk->send_seq++);
1962 t1->ack = 1;
1963 newsk->window = tcp_select_window(newsk);
1964 t1->window = ntohs(newsk->window);
1965 t1->res1 = 0;
1966 t1->res2 = 0;
1967 t1->rst = 0;
1968 t1->urg = 0;
1969 t1->psh = 0;
1970 t1->syn = 1;
1971 t1->ack_seq = ntohl(skb->h.th->seq+1);
1972 t1->doff = sizeof(*t1)/4+1;
1973
1974 ptr =(unsigned char *)(t1+1);
1975 ptr[0] = 2;
1976 ptr[1] = 4;
1977 ptr[2] = ((newsk->mtu) >> 8) & 0xff;
1978 ptr[3] =(newsk->mtu) & 0xff;
1979
1980 tcp_send_check(t1, daddr, saddr, sizeof(*t1)+4, newsk);
1981 newsk->prot->queue_xmit(newsk, dev, buff, 0);
1982
1983 reset_timer(newsk, TIME_WRITE , TCP_CONNECT_TIME);
1984 skb->sk = newsk;
1985
1986
1987 sk->rmem_alloc -= skb->mem_len;
1988 newsk->rmem_alloc += skb->mem_len;
1989
1990 skb_queue_tail(&sk->rqueue,skb);
1991 sk->ack_backlog++;
1992 release_sock(newsk);
1993 }
1994
1995
1996 static void
1997 tcp_close(struct sock *sk, int timeout)
1998 {
1999 struct sk_buff *buff;
2000 int need_reset = 0;
2001 struct tcphdr *t1, *th;
2002 struct proto *prot;
2003 struct device *dev=NULL;
2004 int tmp;
2005
2006
2007
2008
2009
2010 DPRINTF((DBG_TCP, "tcp_close((struct sock *)%X, %d)\n",sk, timeout));
2011 sk->inuse = 1;
2012 sk->keepopen = 1;
2013 sk->shutdown = SHUTDOWN_MASK;
2014
2015 if (!sk->dead)
2016 sk->state_change(sk);
2017
2018
2019 if (skb_peek(&sk->rqueue) != NULL)
2020 {
2021 struct sk_buff *skb;
2022 if(sk->debug)
2023 printk("Clean rcv queue\n");
2024 while((skb=skb_dequeue(&sk->rqueue))!=NULL)
2025 {
2026 if(skb->len > 0 && after(skb->h.th->seq + skb->len + 1 , sk->copied_seq))
2027 need_reset = 1;
2028 kfree_skb(skb, FREE_READ);
2029 }
2030 if(sk->debug)
2031 printk("Cleaned.\n");
2032 }
2033 sk->rqueue = NULL;
2034
2035
2036 if (sk->partial) {
2037 tcp_send_partial(sk);
2038 }
2039
2040 switch(sk->state) {
2041 case TCP_FIN_WAIT1:
2042 case TCP_FIN_WAIT2:
2043 case TCP_LAST_ACK:
2044
2045
2046
2047
2048
2049 reset_timer(sk, TIME_CLOSE, 4 * sk->rto);
2050 if (timeout) tcp_time_wait(sk);
2051 release_sock(sk);
2052 return;
2053 case TCP_TIME_WAIT:
2054 if (timeout) {
2055 sk->state = TCP_CLOSE;
2056 }
2057 release_sock(sk);
2058 return;
2059 case TCP_LISTEN:
2060 sk->state = TCP_CLOSE;
2061 release_sock(sk);
2062 return;
2063 case TCP_CLOSE:
2064 release_sock(sk);
2065 return;
2066 case TCP_CLOSE_WAIT:
2067 case TCP_ESTABLISHED:
2068 case TCP_SYN_SENT:
2069 case TCP_SYN_RECV:
2070 prot =(struct proto *)sk->prot;
2071 th =(struct tcphdr *)&sk->dummy_th;
2072 buff = prot->wmalloc(sk, MAX_FIN_SIZE, 1, GFP_ATOMIC);
2073 if (buff == NULL) {
2074
2075
2076
2077 release_sock(sk);
2078 if (sk->state != TCP_CLOSE_WAIT)
2079 sk->state = TCP_ESTABLISHED;
2080 reset_timer(sk, TIME_CLOSE, 100);
2081 return;
2082 }
2083 buff->mem_addr = buff;
2084 buff->mem_len = MAX_FIN_SIZE;
2085 buff->sk = sk;
2086 buff->free = 1;
2087 buff->len = sizeof(*t1);
2088 t1 =(struct tcphdr *) buff->data;
2089
2090
2091 tmp = prot->build_header(buff,sk->saddr, sk->daddr, &dev,
2092 IPPROTO_TCP, sk->opt,
2093 sizeof(struct tcphdr),sk->ip_tos,sk->ip_ttl);
2094 if (tmp < 0) {
2095 kfree_skb(buff,FREE_WRITE);
2096 DPRINTF((DBG_TCP, "Unable to build header for fin.\n"));
2097 release_sock(sk);
2098 return;
2099 }
2100
2101 t1 =(struct tcphdr *)((char *)t1 +tmp);
2102 buff->len += tmp;
2103 buff->dev = dev;
2104 memcpy(t1, th, sizeof(*t1));
2105 t1->seq = ntohl(sk->send_seq);
2106 sk->send_seq++;
2107 buff->h.seq = sk->send_seq;
2108 t1->ack = 1;
2109
2110
2111 sk->delay_acks = 0;
2112 t1->ack_seq = ntohl(sk->acked_seq);
2113 t1->window = ntohs(sk->window=tcp_select_window(sk));
2114 t1->fin = 1;
2115 t1->rst = need_reset;
2116 t1->doff = sizeof(*t1)/4;
2117 tcp_send_check(t1, sk->saddr, sk->daddr, sizeof(*t1), sk);
2118
2119 if (sk->wfront == NULL) {
2120 prot->queue_xmit(sk, dev, buff, 0);
2121 } else {
2122 reset_timer(sk, TIME_WRITE, sk->rto);
2123 buff->next = NULL;
2124 if (sk->wback == NULL) {
2125 sk->wfront = buff;
2126 } else {
2127 sk->wback->next = buff;
2128 }
2129 sk->wback = buff;
2130 buff->magic = TCP_WRITE_QUEUE_MAGIC;
2131 }
2132
2133 if (sk->state == TCP_CLOSE_WAIT) {
2134 sk->state = TCP_FIN_WAIT2;
2135 } else {
2136 sk->state = TCP_FIN_WAIT1;
2137 }
2138 }
2139 release_sock(sk);
2140 }
2141
2142
2143
2144
2145
2146
2147 static void
2148 tcp_write_xmit(struct sock *sk)
2149 {
2150 struct sk_buff *skb;
2151
2152 DPRINTF((DBG_TCP, "tcp_write_xmit(sk=%X)\n", sk));
2153
2154
2155
2156 if(sk->zapped)
2157 return;
2158
2159 while(sk->wfront != NULL &&
2160 before(sk->wfront->h.seq, sk->window_seq +1) &&
2161 (sk->retransmits == 0 ||
2162 sk->timeout != TIME_WRITE ||
2163 before(sk->wfront->h.seq, sk->rcv_ack_seq +1))
2164 && sk->packets_out < sk->cong_window) {
2165 skb = sk->wfront;
2166 IS_SKB(skb);
2167 sk->wfront = skb->next;
2168 if (sk->wfront == NULL) sk->wback = NULL;
2169 skb->next = NULL;
2170 if (skb->magic != TCP_WRITE_QUEUE_MAGIC) {
2171 printk("tcp.c skb with bad magic(%X) on write queue. Squashing "
2172 "queue\n", skb->magic);
2173 sk->wfront = NULL;
2174 sk->wback = NULL;
2175 return;
2176 }
2177 skb->magic = 0;
2178 DPRINTF((DBG_TCP, "Sending a packet.\n"));
2179
2180
2181 if (before(skb->h.seq, sk->rcv_ack_seq +1)) {
2182 sk->retransmits = 0;
2183 kfree_skb(skb, FREE_WRITE);
2184 if (!sk->dead) sk->write_space(sk);
2185 } else {
2186 sk->prot->queue_xmit(sk, skb->dev, skb, skb->free);
2187 }
2188 }
2189 }
2190
2191
2192
2193
2194
2195
2196 void
2197 sort_send(struct sock *sk)
2198 {
2199 struct sk_buff *list = NULL;
2200 struct sk_buff *skb,*skb2,*skb3;
2201
2202 for (skb = sk->send_head; skb != NULL; skb = skb2) {
2203 skb2 = (struct sk_buff *)skb->link3;
2204 if (list == NULL || before (skb2->h.seq, list->h.seq)) {
2205 skb->link3 = list;
2206 sk->send_tail = skb;
2207 list = skb;
2208 } else {
2209 for (skb3 = list; ; skb3 = (struct sk_buff *)skb3->link3) {
2210 if (skb3->link3 == NULL ||
2211 before(skb->h.seq, skb3->link3->h.seq)) {
2212 skb->link3 = skb3->link3;
2213 skb3->link3 = skb;
2214 if (skb->link3 == NULL) sk->send_tail = skb;
2215 break;
2216 }
2217 }
2218 }
2219 }
2220 sk->send_head = list;
2221 }
2222
2223
2224
2225 static int
2226 tcp_ack(struct sock *sk, struct tcphdr *th, unsigned long saddr, int len)
2227 {
2228 unsigned long ack;
2229 int flag = 0;
2230
2231
2232
2233
2234
2235
2236
2237 if(sk->zapped)
2238 return(1);
2239
2240 ack = ntohl(th->ack_seq);
2241 DPRINTF((DBG_TCP, "tcp_ack ack=%d, window=%d, "
2242 "sk->rcv_ack_seq=%d, sk->window_seq = %d\n",
2243 ack, ntohs(th->window), sk->rcv_ack_seq, sk->window_seq));
2244
2245 if (ntohs(th->window) > sk->max_window) {
2246 sk->max_window = ntohs(th->window);
2247 sk->mss = min(sk->max_window, sk->mtu);
2248 }
2249
2250 if (sk->retransmits && sk->timeout == TIME_KEEPOPEN)
2251 sk->retransmits = 0;
2252
2253 if (after(ack, sk->send_seq+1) || before(ack, sk->rcv_ack_seq-1)) {
2254 if (after(ack, sk->send_seq) ||
2255 (sk->state != TCP_ESTABLISHED && sk->state != TCP_CLOSE_WAIT)) {
2256 return(0);
2257 }
2258 if (sk->keepopen) {
2259 reset_timer(sk, TIME_KEEPOPEN, TCP_TIMEOUT_LEN);
2260 }
2261 return(1);
2262 }
2263
2264 if (len != th->doff*4) flag |= 1;
2265
2266
2267 if (after(sk->window_seq, ack+ntohs(th->window))) {
2268
2269
2270
2271
2272
2273
2274
2275 struct sk_buff *skb;
2276 struct sk_buff *skb2;
2277 struct sk_buff *wskb = NULL;
2278
2279 skb2 = sk->send_head;
2280 sk->send_head = NULL;
2281 sk->send_tail = NULL;
2282
2283 flag |= 4;
2284
2285 sk->window_seq = ack + ntohs(th->window);
2286 cli();
2287 while (skb2 != NULL) {
2288 skb = skb2;
2289 skb2 = (struct sk_buff *)skb->link3;
2290 skb->link3 = NULL;
2291 if (after(skb->h.seq, sk->window_seq)) {
2292 if (sk->packets_out > 0) sk->packets_out--;
2293
2294 if (skb->next != NULL) {
2295 skb_unlink(skb);
2296 }
2297
2298 skb->magic = TCP_WRITE_QUEUE_MAGIC;
2299 if (wskb == NULL) {
2300 skb->next = sk->wfront;
2301 sk->wfront = skb;
2302 } else {
2303 skb->next = wskb->next;
2304 wskb->next = skb;
2305 }
2306 if (sk->wback == wskb) sk->wback = skb;
2307 wskb = skb;
2308 } else {
2309 if (sk->send_head == NULL) {
2310 sk->send_head = skb;
2311 sk->send_tail = skb;
2312 } else {
2313 sk->send_tail->link3 = skb;
2314 sk->send_tail = skb;
2315 }
2316 skb->link3 = NULL;
2317 }
2318 }
2319 sti();
2320 }
2321
2322 if (sk->send_tail == NULL || sk->send_head == NULL) {
2323 sk->send_head = NULL;
2324 sk->send_tail = NULL;
2325 sk->packets_out= 0;
2326 }
2327
2328 sk->window_seq = ack + ntohs(th->window);
2329
2330
2331 if (sk->timeout == TIME_WRITE &&
2332 sk->cong_window < 2048 && after(ack, sk->rcv_ack_seq)) {
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342 if (sk->cong_window < sk->ssthresh)
2343
2344 sk->cong_window++;
2345 else {
2346
2347
2348
2349 if (sk->cong_count >= sk->cong_window) {
2350 sk->cong_window++;
2351 sk->cong_count = 0;
2352 } else
2353 sk->cong_count++;
2354 }
2355 }
2356
2357 DPRINTF((DBG_TCP, "tcp_ack: Updating rcv ack sequence.\n"));
2358 sk->rcv_ack_seq = ack;
2359
2360
2361
2362
2363
2364
2365 if (sk->timeout == TIME_PROBE0) {
2366 if (sk->wfront != NULL &&
2367 ! before (sk->window_seq, sk->wfront->h.seq)) {
2368 sk->retransmits = 0;
2369 sk->backoff = 0;
2370
2371 sk->rto = ((sk->rtt >> 2) + sk->mdev) >> 1;
2372 if (sk->rto > 120*HZ)
2373 sk->rto = 120*HZ;
2374 if (sk->rto < 1*HZ)
2375 sk->rto = 1*HZ;
2376 }
2377 }
2378
2379
2380 while(sk->send_head != NULL) {
2381
2382 if (sk->send_head->link3 &&
2383 after(sk->send_head->h.seq, sk->send_head->link3->h.seq)) {
2384 printk("INET: tcp.c: *** bug send_list out of order.\n");
2385 sort_send(sk);
2386 }
2387
2388 if (before(sk->send_head->h.seq, ack+1)) {
2389 struct sk_buff *oskb;
2390
2391 if (sk->retransmits) {
2392
2393
2394 flag |= 2;
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404 if (sk->send_head->link3)
2405 sk->retransmits = 1;
2406 else
2407 sk->retransmits = 0;
2408
2409 }
2410
2411
2412
2413
2414
2415
2416
2417
2418
2419
2420
2421
2422
2423
2424 if (sk->packets_out > 0) sk->packets_out --;
2425 DPRINTF((DBG_TCP, "skb=%X skb->h.seq = %d acked ack=%d\n",
2426 sk->send_head, sk->send_head->h.seq, ack));
2427
2428
2429 if (!sk->dead) sk->write_space(sk);
2430
2431 oskb = sk->send_head;
2432
2433 if (!(flag&2)) {
2434 long m;
2435
2436
2437
2438
2439
2440
2441
2442
2443 m = jiffies - oskb->when;
2444 m -= (sk->rtt >> 3);
2445 sk->rtt += m;
2446 if (m < 0)
2447 m = -m;
2448 m -= (sk->mdev >> 2);
2449 sk->mdev += m;
2450
2451
2452 sk->rto = ((sk->rtt >> 2) + sk->mdev) >> 1;
2453 if (sk->rto > 120*HZ)
2454 sk->rto = 120*HZ;
2455 if (sk->rto < 1*HZ)
2456 sk->rto = 1*HZ;
2457 sk->backoff = 0;
2458
2459 }
2460 flag |= (2|4);
2461
2462 cli();
2463
2464 oskb = sk->send_head;
2465 IS_SKB(oskb);
2466 sk->send_head =(struct sk_buff *)oskb->link3;
2467 if (sk->send_head == NULL) {
2468 sk->send_tail = NULL;
2469 }
2470
2471
2472 skb_unlink(oskb);
2473 sti();
2474 oskb->magic = 0;
2475 kfree_skb(oskb, FREE_WRITE);
2476 if (!sk->dead) sk->write_space(sk);
2477 } else {
2478 break;
2479 }
2480 }
2481
2482
2483
2484
2485
2486 if (sk->wfront != NULL) {
2487 if (after (sk->window_seq+1, sk->wfront->h.seq) &&
2488 (sk->retransmits == 0 ||
2489 sk->timeout != TIME_WRITE ||
2490 before(sk->wfront->h.seq, sk->rcv_ack_seq +1))
2491 && sk->packets_out < sk->cong_window) {
2492 flag |= 1;
2493 tcp_write_xmit(sk);
2494 } else if (before(sk->window_seq, sk->wfront->h.seq) &&
2495 sk->send_head == NULL &&
2496 sk->ack_backlog == 0 &&
2497 sk->state != TCP_TIME_WAIT) {
2498 reset_timer(sk, TIME_PROBE0, sk->rto);
2499 }
2500 } else {
2501 if (sk->send_head == NULL && sk->ack_backlog == 0 &&
2502 sk->state != TCP_TIME_WAIT && !sk->keepopen) {
2503 DPRINTF((DBG_TCP, "Nothing to do, going to sleep.\n"));
2504 if (!sk->dead) sk->write_space(sk);
2505
2506 if (sk->keepopen)
2507 reset_timer(sk, TIME_KEEPOPEN, TCP_TIMEOUT_LEN);
2508 else
2509 delete_timer(sk);
2510 } else {
2511 if (sk->state != (unsigned char) sk->keepopen) {
2512 reset_timer(sk, TIME_WRITE, sk->rto);
2513 }
2514 if (sk->state == TCP_TIME_WAIT) {
2515 reset_timer(sk, TIME_CLOSE, TCP_TIMEWAIT_LEN);
2516 }
2517 }
2518 }
2519
2520 if (sk->packets_out == 0 && sk->partial != NULL &&
2521 sk->wfront == NULL && sk->send_head == NULL) {
2522 flag |= 1;
2523 tcp_send_partial(sk);
2524 }
2525
2526
2527 if (sk->state == TCP_TIME_WAIT) {
2528 if (!sk->dead)
2529 sk->state_change(sk);
2530 if (sk->rcv_ack_seq == sk->send_seq && sk->acked_seq == sk->fin_seq) {
2531 flag |= 1;
2532 sk->state = TCP_CLOSE;
2533 sk->shutdown = SHUTDOWN_MASK;
2534 }
2535 }
2536
2537 if (sk->state == TCP_LAST_ACK || sk->state == TCP_FIN_WAIT2) {
2538 if (!sk->dead) sk->state_change(sk);
2539 if (sk->rcv_ack_seq == sk->send_seq) {
2540 flag |= 1;
2541 if (sk->acked_seq != sk->fin_seq) {
2542 tcp_time_wait(sk);
2543 } else {
2544 DPRINTF((DBG_TCP, "tcp_ack closing socket - %X\n", sk));
2545 tcp_send_ack(sk->send_seq, sk->acked_seq, sk,
2546 th, sk->daddr);
2547 sk->shutdown = SHUTDOWN_MASK;
2548 sk->state = TCP_CLOSE;
2549 }
2550 }
2551 }
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582 if (((!flag) || (flag&4)) && sk->send_head != NULL &&
2583 (((flag&2) && sk->retransmits) ||
2584 (sk->send_head->when + sk->rto < jiffies))) {
2585 ip_do_retransmit(sk, 1);
2586 reset_timer(sk, TIME_WRITE, sk->rto);
2587 }
2588
2589 DPRINTF((DBG_TCP, "leaving tcp_ack\n"));
2590 return(1);
2591 }
2592
2593
2594
2595
2596
2597
2598
2599 static int
2600 tcp_data(struct sk_buff *skb, struct sock *sk,
2601 unsigned long saddr, unsigned short len)
2602 {
2603 struct sk_buff *skb1, *skb2;
2604 struct tcphdr *th;
2605 int dup_dumped=0;
2606
2607 th = skb->h.th;
2608 print_th(th);
2609 skb->len = len -(th->doff*4);
2610
2611 DPRINTF((DBG_TCP, "tcp_data len = %d sk = %X:\n", skb->len, sk));
2612
2613 sk->bytes_rcv += skb->len;
2614 if (skb->len == 0 && !th->fin && !th->urg && !th->psh) {
2615
2616 if (!th->ack) tcp_send_ack(sk->send_seq, sk->acked_seq,sk, th, saddr);
2617 kfree_skb(skb, FREE_READ);
2618 return(0);
2619 }
2620
2621 if (sk->shutdown & RCV_SHUTDOWN) {
2622 sk->acked_seq = th->seq + skb->len + th->syn + th->fin;
2623 tcp_reset(sk->saddr, sk->daddr, skb->h.th,
2624 sk->prot, NULL, skb->dev, sk->ip_tos, sk->ip_ttl);
2625 sk->state = TCP_CLOSE;
2626 sk->err = EPIPE;
2627 sk->shutdown = SHUTDOWN_MASK;
2628 DPRINTF((DBG_TCP, "tcp_data: closing socket - %X\n", sk));
2629 kfree_skb(skb, FREE_READ);
2630 if (!sk->dead) sk->state_change(sk);
2631 return(0);
2632 }
2633
2634
2635
2636
2637
2638
2639
2640
2641
2642
2643 if (sk->rqueue == NULL) {
2644 DPRINTF((DBG_TCP, "tcp_data: skb = %X:\n", skb));
2645 #ifdef OLDWAY
2646 sk->rqueue = skb;
2647 skb->next = skb;
2648 skb->prev = skb;
2649 skb->list = &sk->rqueue;
2650 #else
2651 skb_queue_head(&sk->rqueue,skb);
2652 #endif
2653 skb1= NULL;
2654 } else {
2655 DPRINTF((DBG_TCP, "tcp_data adding to chain sk = %X:\n", sk));
2656 for(skb1=sk->rqueue->prev; ; skb1 =(struct sk_buff *)skb1->prev) {
2657 if(sk->debug)
2658 {
2659 printk("skb1=%p :", skb1);
2660 printk("skb1->h.th->seq = %ld: ", skb1->h.th->seq);
2661 printk("skb->h.th->seq = %ld\n",skb->h.th->seq);
2662 printk("copied_seq = %ld acked_seq = %ld\n", sk->copied_seq,
2663 sk->acked_seq);
2664 }
2665 #ifdef OLD
2666 if (after(th->seq+1, skb1->h.th->seq)) {
2667 skb->prev = skb1;
2668 skb->next = skb1->next;
2669 skb->next->prev = skb;
2670 skb1->next = skb;
2671 if (skb1 == sk->rqueue) sk->rqueue = skb;
2672 break;
2673 }
2674 if (skb1->prev == sk->rqueue) {
2675 skb->next= skb1;
2676 skb->prev = skb1->prev;
2677 skb->prev->next = skb;
2678 skb1->prev = skb;
2679 skb1 = NULL;
2680
2681 break;
2682 }
2683 #else
2684 if (th->seq==skb1->h.th->seq && skb->len>= skb1->len)
2685 {
2686 skb_append(skb1,skb);
2687 skb_unlink(skb1);
2688 kfree_skb(skb1,FREE_READ);
2689 dup_dumped=1;
2690 skb1=NULL;
2691 break;
2692 }
2693 if (after(th->seq+1, skb1->h.th->seq))
2694 {
2695 skb_append(skb1,skb);
2696 break;
2697 }
2698 if (skb1 == sk->rqueue)
2699 {
2700 skb_queue_head(&sk->rqueue, skb);
2701 break;
2702 }
2703 #endif
2704 }
2705 DPRINTF((DBG_TCP, "skb = %X:\n", skb));
2706 }
2707
2708 th->ack_seq = th->seq + skb->len;
2709 if (th->syn) th->ack_seq++;
2710 if (th->fin) th->ack_seq++;
2711
2712 if (before(sk->acked_seq, sk->copied_seq)) {
2713 printk("*** tcp.c:tcp_data bug acked < copied\n");
2714 sk->acked_seq = sk->copied_seq;
2715 }
2716
2717
2718 if ((!dup_dumped && (skb1 == NULL || skb1->acked)) || before(th->seq, sk->acked_seq+1)) {
2719 if (before(th->seq, sk->acked_seq+1)) {
2720 if (after(th->ack_seq, sk->acked_seq))
2721 sk->acked_seq = th->ack_seq;
2722 skb->acked = 1;
2723
2724
2725 if (skb->h.th->fin) {
2726 if (!sk->dead) sk->state_change(sk);
2727 sk->shutdown |= RCV_SHUTDOWN;
2728 }
2729
2730 for(skb2 = (struct sk_buff *)skb->next;
2731 skb2 !=(struct sk_buff *) sk->rqueue;
2732 skb2 = (struct sk_buff *)skb2->next) {
2733 if (before(skb2->h.th->seq, sk->acked_seq+1)) {
2734 if (after(skb2->h.th->ack_seq, sk->acked_seq))
2735 {
2736 long old_acked_seq = sk->acked_seq;
2737 sk->acked_seq = skb2->h.th->ack_seq;
2738 if((int)(sk->acked_seq - old_acked_seq) >0)
2739 {
2740 int new_window=sk->window-sk->acked_seq+
2741 old_acked_seq;
2742 if(new_window<0)
2743 new_window=0;
2744 sk->window = new_window;
2745 }
2746 }
2747 skb2->acked = 1;
2748
2749
2750
2751
2752
2753 if (skb2->h.th->fin) {
2754 sk->shutdown |= RCV_SHUTDOWN;
2755 if (!sk->dead) sk->state_change(sk);
2756 }
2757
2758
2759 sk->ack_backlog = sk->max_ack_backlog;
2760 } else {
2761 break;
2762 }
2763 }
2764
2765
2766
2767
2768
2769 if (!sk->delay_acks ||
2770 sk->ack_backlog >= sk->max_ack_backlog ||
2771 sk->bytes_rcv > sk->max_unacked || th->fin) {
2772
2773 } else {
2774 sk->ack_backlog++;
2775 if(sk->debug)
2776 printk("Ack queued.\n");
2777 reset_timer(sk, TIME_WRITE, TCP_ACK_TIME);
2778 }
2779 }
2780 }
2781
2782
2783
2784
2785
2786 if (!skb->acked) {
2787
2788
2789
2790
2791
2792
2793 while (sk->prot->rspace(sk) < sk->mtu) {
2794 skb1 = skb_peek(&sk->rqueue);
2795 if (skb1 == NULL) {
2796 printk("INET: tcp.c:tcp_data memory leak detected.\n");
2797 break;
2798 }
2799
2800
2801 if (skb1->acked) {
2802 break;
2803 }
2804
2805 skb_unlink(skb1);
2806 #ifdef OLDWAY
2807 if (skb1->prev == skb1) {
2808 sk->rqueue = NULL;
2809 } else {
2810 sk->rqueue = (struct sk_buff *)skb1->prev;
2811 skb1->next->prev = skb1->prev;
2812 skb1->prev->next = skb1->next;
2813 }
2814 #endif
2815 kfree_skb(skb1, FREE_READ);
2816 }
2817 tcp_send_ack(sk->send_seq, sk->acked_seq, sk, th, saddr);
2818 sk->ack_backlog++;
2819 reset_timer(sk, TIME_WRITE, TCP_ACK_TIME);
2820 } else {
2821
2822 tcp_send_ack(sk->send_seq, sk->acked_seq, sk, th, saddr);
2823 }
2824
2825
2826 if (!sk->dead) {
2827 if(sk->debug)
2828 printk("Data wakeup.\n");
2829 sk->data_ready(sk,0);
2830 } else {
2831 DPRINTF((DBG_TCP, "data received on dead socket.\n"));
2832 }
2833
2834 if (sk->state == TCP_FIN_WAIT2 &&
2835 sk->acked_seq == sk->fin_seq && sk->rcv_ack_seq == sk->send_seq) {
2836 DPRINTF((DBG_TCP, "tcp_data: entering last_ack state sk = %X\n", sk));
2837
2838
2839 sk->shutdown = SHUTDOWN_MASK;
2840 sk->state = TCP_LAST_ACK;
2841 if (!sk->dead) sk->state_change(sk);
2842 }
2843
2844 return(0);
2845 }
2846
2847
2848 static int
2849 tcp_urg(struct sock *sk, struct tcphdr *th, unsigned long saddr, unsigned long len)
2850 {
2851 unsigned long ptr;
2852 extern int kill_pg(int pg, int sig, int priv);
2853 extern int kill_proc(int pid, int sig, int priv);
2854
2855 if (!sk->dead)
2856 sk->data_ready(sk,0);
2857
2858 if (sk->urginline) {
2859 th->urg = 0;
2860 th->psh = 1;
2861 return 0;
2862 }
2863
2864 ptr = ntohs(th->urg_ptr);
2865 if (ptr)
2866 ptr--;
2867
2868
2869 if (th->doff*4 + ptr >= len)
2870 return 0;
2871
2872
2873 if (after(sk->copied_seq+1, th->seq+ptr))
2874 return 0;
2875
2876
2877 if (sk->urg_data && sk->urg_seq == th->seq+ptr)
2878 return 0;
2879
2880
2881
2882
2883
2884 if (!sk->urg_data || sk->urg_data == URG_READ) {
2885 if (sk->proc != 0) {
2886 if (sk->proc > 0) {
2887 kill_proc(sk->proc, SIGURG, 1);
2888 } else {
2889 kill_pg(-sk->proc, SIGURG, 1);
2890 }
2891 }
2892 }
2893
2894 sk->urg_data = 0x100 | *(ptr + th->doff*4 + (unsigned char *) th);
2895 sk->urg_seq = th->seq + ptr;
2896 return 0;
2897 }
2898
2899
2900
2901 static int
2902 tcp_fin(struct sock *sk, struct tcphdr *th,
2903 unsigned long saddr, struct device *dev)
2904 {
2905 DPRINTF((DBG_TCP, "tcp_fin(sk=%X, th=%X, saddr=%X, dev=%X)\n",
2906 sk, th, saddr, dev));
2907
2908 if (!sk->dead) {
2909 sk->state_change(sk);
2910 }
2911
2912 switch(sk->state) {
2913 case TCP_SYN_RECV:
2914 case TCP_SYN_SENT:
2915 case TCP_ESTABLISHED:
2916
2917 sk->fin_seq = th->seq+1;
2918 sk->state = TCP_CLOSE_WAIT;
2919 if (th->rst) sk->shutdown = SHUTDOWN_MASK;
2920 break;
2921
2922 case TCP_CLOSE_WAIT:
2923 case TCP_FIN_WAIT2:
2924 break;
2925
2926 case TCP_FIN_WAIT1:
2927
2928 sk->fin_seq = th->seq+1;
2929 sk->state = TCP_FIN_WAIT2;
2930 break;
2931
2932 default:
2933 case TCP_TIME_WAIT:
2934 sk->state = TCP_LAST_ACK;
2935
2936
2937 reset_timer(sk, TIME_CLOSE, TCP_TIMEWAIT_LEN);
2938 return(0);
2939 }
2940 sk->ack_backlog++;
2941
2942 return(0);
2943 }
2944
2945
2946
2947 static struct sock *
2948 tcp_accept(struct sock *sk, int flags)
2949 {
2950 struct sock *newsk;
2951 struct sk_buff *skb;
2952
2953 DPRINTF((DBG_TCP, "tcp_accept(sk=%X, flags=%X, addr=%s)\n",
2954 sk, flags, in_ntoa(sk->saddr)));
2955
2956
2957
2958
2959
2960 if (sk->state != TCP_LISTEN) {
2961 sk->err = EINVAL;
2962 return(NULL);
2963 }
2964
2965
2966 cli();
2967 sk->inuse = 1;
2968 while((skb = get_firstr(sk)) == NULL) {
2969 if (flags & O_NONBLOCK) {
2970 sti();
2971 release_sock(sk);
2972 sk->err = EAGAIN;
2973 return(NULL);
2974 }
2975
2976 release_sock(sk);
2977 interruptible_sleep_on(sk->sleep);
2978 if (current->signal & ~current->blocked) {
2979 sti();
2980 sk->err = ERESTARTSYS;
2981 return(NULL);
2982 }
2983 sk->inuse = 1;
2984 }
2985 sti();
2986
2987
2988 newsk = skb->sk;
2989
2990 kfree_skb(skb, FREE_READ);
2991 sk->ack_backlog--;
2992 release_sock(sk);
2993 return(newsk);
2994 }
2995
2996
2997
2998 static int
2999 tcp_connect(struct sock *sk, struct sockaddr_in *usin, int addr_len)
3000 {
3001 struct sk_buff *buff;
3002 struct sockaddr_in sin;
3003 struct device *dev=NULL;
3004 unsigned char *ptr;
3005 int tmp;
3006 struct tcphdr *t1;
3007 int err;
3008
3009 if (sk->state != TCP_CLOSE) return(-EISCONN);
3010 if (addr_len < 8) return(-EINVAL);
3011
3012 err=verify_area(VERIFY_READ, usin, addr_len);
3013 if(err)
3014 return err;
3015
3016 memcpy_fromfs(&sin,usin, min(sizeof(sin), addr_len));
3017
3018 if (sin.sin_family && sin.sin_family != AF_INET) return(-EAFNOSUPPORT);
3019
3020 DPRINTF((DBG_TCP, "TCP connect daddr=%s\n", in_ntoa(sin.sin_addr.s_addr)));
3021
3022
3023 if (chk_addr(sin.sin_addr.s_addr) == IS_BROADCAST) {
3024 DPRINTF((DBG_TCP, "TCP connection to broadcast address not allowed\n"));
3025 return(-ENETUNREACH);
3026 }
3027
3028
3029 if(sk->saddr == sin.sin_addr.s_addr && sk->num==ntohs(sin.sin_port))
3030 return -EBUSY;
3031
3032 sk->inuse = 1;
3033 sk->daddr = sin.sin_addr.s_addr;
3034 sk->send_seq = jiffies * SEQ_TICK - seq_offset;
3035 sk->window_seq = sk->send_seq;
3036 sk->rcv_ack_seq = sk->send_seq -1;
3037 sk->err = 0;
3038 sk->dummy_th.dest = sin.sin_port;
3039 release_sock(sk);
3040
3041 buff = sk->prot->wmalloc(sk,MAX_SYN_SIZE,0, GFP_KERNEL);
3042 if (buff == NULL) {
3043 return(-ENOMEM);
3044 }
3045 sk->inuse = 1;
3046 buff->mem_addr = buff;
3047 buff->mem_len = MAX_SYN_SIZE;
3048 buff->len = 24;
3049 buff->sk = sk;
3050 buff->free = 1;
3051 t1 = (struct tcphdr *) buff->data;
3052
3053
3054
3055 tmp = sk->prot->build_header(buff, sk->saddr, sk->daddr, &dev,
3056 IPPROTO_TCP, NULL, MAX_SYN_SIZE,sk->ip_tos,sk->ip_ttl);
3057 if (tmp < 0) {
3058 sk->prot->wfree(sk, buff->mem_addr, buff->mem_len);
3059 release_sock(sk);
3060 return(-ENETUNREACH);
3061 }
3062 buff->len += tmp;
3063 t1 = (struct tcphdr *)((char *)t1 +tmp);
3064
3065 memcpy(t1,(void *)&(sk->dummy_th), sizeof(*t1));
3066 t1->seq = ntohl(sk->send_seq++);
3067 buff->h.seq = sk->send_seq;
3068 t1->ack = 0;
3069 t1->window = 2;
3070 t1->res1=0;
3071 t1->res2=0;
3072 t1->rst = 0;
3073 t1->urg = 0;
3074 t1->psh = 0;
3075 t1->syn = 1;
3076 t1->urg_ptr = 0;
3077 t1->doff = 6;
3078
3079
3080 if (sk->user_mss)
3081 sk->mtu = sk->user_mss;
3082 else {
3083 #ifdef SUBNETSARELOCAL
3084 if ((sk->saddr ^ sk->daddr) & default_mask(sk->saddr))
3085 #else
3086 if ((sk->saddr ^ sk->daddr) & dev->pa_mask)
3087 #endif
3088 sk->mtu = 576 - HEADER_SIZE;
3089 else
3090 sk->mtu = MAX_WINDOW;
3091 }
3092
3093 sk->mtu = min(sk->mtu, dev->mtu - HEADER_SIZE);
3094
3095
3096 ptr = (unsigned char *)(t1+1);
3097 ptr[0] = 2;
3098 ptr[1] = 4;
3099 ptr[2] = (sk->mtu) >> 8;
3100 ptr[3] = (sk->mtu) & 0xff;
3101 tcp_send_check(t1, sk->saddr, sk->daddr,
3102 sizeof(struct tcphdr) + 4, sk);
3103
3104
3105 sk->state = TCP_SYN_SENT;
3106 sk->rtt = TCP_CONNECT_TIME;
3107 reset_timer(sk, TIME_WRITE, TCP_CONNECT_TIME);
3108 sk->retransmits = TCP_RETR2 - TCP_SYN_RETRIES;
3109
3110 sk->prot->queue_xmit(sk, dev, buff, 0);
3111
3112 release_sock(sk);
3113 return(0);
3114 }
3115
3116
3117
3118 static int
3119 tcp_sequence(struct sock *sk, struct tcphdr *th, short len,
3120 struct options *opt, unsigned long saddr, struct device *dev)
3121 {
3122
3123
3124
3125
3126
3127
3128 DPRINTF((DBG_TCP, "tcp_sequence(sk=%X, th=%X, len = %d, opt=%d, saddr=%X)\n",
3129 sk, th, len, opt, saddr));
3130
3131 if (between(th->seq, sk->acked_seq, sk->acked_seq + sk->window)||
3132 between(th->seq + len-(th->doff*4), sk->acked_seq + 1,
3133 sk->acked_seq + sk->window) ||
3134 (before(th->seq, sk->acked_seq) &&
3135 after(th->seq + len -(th->doff*4), sk->acked_seq + sk->window))) {
3136 return(1);
3137 }
3138 DPRINTF((DBG_TCP, "tcp_sequence: rejecting packet.\n"));
3139
3140
3141
3142
3143
3144
3145
3146
3147 if(sk->state==TCP_SYN_SENT||sk->state==TCP_SYN_RECV)
3148 {
3149 tcp_reset(sk->saddr,sk->daddr,th,sk->prot,NULL,dev, sk->ip_tos,sk->ip_ttl);
3150 return(1);
3151 }
3152
3153
3154
3155
3156
3157 if (after(th->seq, sk->acked_seq + sk->window)) {
3158 if(!th->rst)
3159 tcp_send_ack(sk->send_seq, sk->acked_seq, sk, th, saddr);
3160 return(0);
3161 }
3162
3163 #ifdef undef
3164
3165
3166
3167
3168
3169
3170
3171
3172 if (th->ack && len == (th->doff * 4) &&
3173 after(th->seq, sk->acked_seq - 32767) &&
3174 !th->fin && !th->syn) return(1);
3175 #endif
3176
3177 if (!th->rst) {
3178
3179 tcp_send_ack(sk->send_seq, sk->acked_seq, sk, th, saddr);
3180 }
3181 return(0);
3182 }
3183
3184
3185
3186
3187
3188 int
3189 tcp_rcv(struct sk_buff *skb, struct device *dev, struct options *opt,
3190 unsigned long daddr, unsigned short len,
3191 unsigned long saddr, int redo, struct inet_protocol * protocol)
3192 {
3193 struct tcphdr *th;
3194 struct sock *sk;
3195
3196 if (!skb) {
3197 DPRINTF((DBG_TCP, "tcp.c: tcp_rcv skb = NULL\n"));
3198 return(0);
3199 }
3200 #if 0
3201 if (!protocol) {
3202 DPRINTF((DBG_TCP, "tcp.c: tcp_rcv protocol = NULL\n"));
3203 return(0);
3204 }
3205
3206 if (!opt) {
3207 DPRINTF((DBG_TCP, "tcp.c: tcp_rcv opt = NULL\n"));
3208 }
3209 #endif
3210 if (!dev) {
3211 DPRINTF((DBG_TCP, "tcp.c: tcp_rcv dev = NULL\n"));
3212 return(0);
3213 }
3214 th = skb->h.th;
3215
3216
3217 sk = get_sock(&tcp_prot, th->dest, saddr, th->source, daddr);
3218 DPRINTF((DBG_TCP, "<<\n"));
3219 DPRINTF((DBG_TCP, "len = %d, redo = %d, skb=%X\n", len, redo, skb));
3220
3221
3222
3223 if (sk!=NULL && sk->zapped)
3224 sk=NULL;
3225
3226 if (sk) {
3227 DPRINTF((DBG_TCP, "sk = %X:\n", sk));
3228 }
3229
3230 if (!redo) {
3231 if (tcp_check(th, len, saddr, daddr )) {
3232 skb->sk = NULL;
3233 DPRINTF((DBG_TCP, "packet dropped with bad checksum.\n"));
3234 if (inet_debug == DBG_SLIP) printk("\rtcp_rcv: bad checksum\n");
3235 kfree_skb(skb,FREE_READ);
3236
3237
3238
3239
3240 return(0);
3241 }
3242
3243
3244 if (sk == NULL) {
3245 if (!th->rst)
3246 {
3247 th->seq = ntohl(th->seq);
3248
3249 tcp_reset(daddr, saddr, th, &tcp_prot, opt,dev,skb->ip_hdr->tos,255);
3250 }
3251 skb->sk = NULL;
3252 kfree_skb(skb, FREE_READ);
3253 return(0);
3254 }
3255
3256 skb->len = len;
3257 skb->sk = sk;
3258 skb->acked = 0;
3259 skb->used = 0;
3260 skb->free = 0;
3261 skb->saddr = daddr;
3262 skb->daddr = saddr;
3263
3264 th->seq = ntohl(th->seq);
3265
3266
3267 cli();
3268 if (sk->inuse) {
3269 if (sk->back_log == NULL) {
3270 sk->back_log = skb;
3271 skb->next = skb;
3272 skb->prev = skb;
3273 } else {
3274 skb->next = sk->back_log;
3275 skb->prev = sk->back_log->prev;
3276 skb->prev->next = skb;
3277 skb->next->prev = skb;
3278 }
3279 sti();
3280 return(0);
3281 }
3282 sk->inuse = 1;
3283 sti();
3284 } else {
3285 if (!sk) {
3286 DPRINTF((DBG_TCP, "tcp.c: tcp_rcv bug sk=NULL redo = 1\n"));
3287 return(0);
3288 }
3289 }
3290
3291 if (!sk->prot) {
3292 DPRINTF((DBG_TCP, "tcp.c: tcp_rcv sk->prot = NULL \n"));
3293 return(0);
3294 }
3295
3296
3297 if (sk->rmem_alloc + skb->mem_len >= sk->rcvbuf) {
3298 skb->sk = NULL;
3299 DPRINTF((DBG_TCP, "dropping packet due to lack of buffer space.\n"));
3300 kfree_skb(skb, FREE_READ);
3301 release_sock(sk);
3302 return(0);
3303 }
3304 sk->rmem_alloc += skb->mem_len;
3305
3306 DPRINTF((DBG_TCP, "About to do switch.\n"));
3307
3308
3309 switch(sk->state) {
3310
3311
3312
3313
3314 case TCP_LAST_ACK:
3315 if (th->rst) {
3316 sk->zapped=1;
3317 sk->err = ECONNRESET;
3318 sk->state = TCP_CLOSE;
3319 sk->shutdown = SHUTDOWN_MASK;
3320 if (!sk->dead) {
3321 sk->state_change(sk);
3322 }
3323 kfree_skb(skb, FREE_READ);
3324 release_sock(sk);
3325 return(0);
3326 }
3327
3328 case TCP_ESTABLISHED:
3329 case TCP_CLOSE_WAIT:
3330 case TCP_FIN_WAIT1:
3331 case TCP_FIN_WAIT2:
3332 case TCP_TIME_WAIT:
3333 if (!tcp_sequence(sk, th, len, opt, saddr,dev)) {
3334 if (inet_debug == DBG_SLIP) printk("\rtcp_rcv: not in seq\n");
3335 #ifdef undef
3336
3337 if(!th->rst)
3338 tcp_send_ack(sk->send_seq, sk->acked_seq,
3339 sk, th, saddr);
3340 #endif
3341 kfree_skb(skb, FREE_READ);
3342 release_sock(sk);
3343 return(0);
3344 }
3345
3346 if (th->rst) {
3347 sk->zapped=1;
3348
3349 sk->err = ECONNRESET;
3350
3351 if (sk->state == TCP_CLOSE_WAIT) {
3352 sk->err = EPIPE;
3353 }
3354
3355
3356
3357
3358
3359 sk->state = TCP_CLOSE;
3360 sk->shutdown = SHUTDOWN_MASK;
3361 if (!sk->dead) {
3362 sk->state_change(sk);
3363 }
3364 kfree_skb(skb, FREE_READ);
3365 release_sock(sk);
3366 return(0);
3367 }
3368 if (
3369 #if 0
3370 if ((opt && (opt->security != 0 ||
3371 opt->compartment != 0)) ||
3372 #endif
3373 th->syn) {
3374 sk->err = ECONNRESET;
3375 sk->state = TCP_CLOSE;
3376 sk->shutdown = SHUTDOWN_MASK;
3377 tcp_reset(daddr, saddr, th, sk->prot, opt,dev, sk->ip_tos,sk->ip_ttl);
3378 if (!sk->dead) {
3379 sk->state_change(sk);
3380 }
3381 kfree_skb(skb, FREE_READ);
3382 release_sock(sk);
3383 return(0);
3384 }
3385 if (th->ack) {
3386 if (!tcp_ack(sk, th, saddr, len)) {
3387 kfree_skb(skb, FREE_READ);
3388 release_sock(sk);
3389 return(0);
3390 }
3391 }
3392 if (th->urg) {
3393 if (tcp_urg(sk, th, saddr, len)) {
3394 kfree_skb(skb, FREE_READ);
3395 release_sock(sk);
3396 return(0);
3397 }
3398 }
3399
3400 if (tcp_data(skb, sk, saddr, len)) {
3401 kfree_skb(skb, FREE_READ);
3402 release_sock(sk);
3403 return(0);
3404 }
3405
3406
3407 if (th->fin && tcp_fin(sk, th, saddr, dev)) {
3408 kfree_skb(skb, FREE_READ);
3409 release_sock(sk);
3410 return(0);
3411 }
3412
3413 release_sock(sk);
3414 return(0);
3415
3416 case TCP_CLOSE:
3417 if (sk->dead || sk->daddr) {
3418 DPRINTF((DBG_TCP, "packet received for closed,dead socket\n"));
3419 kfree_skb(skb, FREE_READ);
3420 release_sock(sk);
3421 return(0);
3422 }
3423
3424 if (!th->rst) {
3425 if (!th->ack)
3426 th->ack_seq = 0;
3427 tcp_reset(daddr, saddr, th, sk->prot, opt,dev,sk->ip_tos,sk->ip_ttl);
3428 }
3429 kfree_skb(skb, FREE_READ);
3430 release_sock(sk);
3431 return(0);
3432
3433 case TCP_LISTEN:
3434 if (th->rst) {
3435 kfree_skb(skb, FREE_READ);
3436 release_sock(sk);
3437 return(0);
3438 }
3439 if (th->ack) {
3440 tcp_reset(daddr, saddr, th, sk->prot, opt,dev,sk->ip_tos,sk->ip_ttl);
3441 kfree_skb(skb, FREE_READ);
3442 release_sock(sk);
3443 return(0);
3444 }
3445
3446 if (th->syn) {
3447 #if 0
3448 if (opt->security != 0 || opt->compartment != 0) {
3449 tcp_reset(daddr, saddr, th, prot, opt,dev);
3450 release_sock(sk);
3451 return(0);
3452 }
3453 #endif
3454
3455
3456
3457
3458
3459
3460
3461 tcp_conn_request(sk, skb, daddr, saddr, opt, dev);
3462 release_sock(sk);
3463 return(0);
3464 }
3465
3466 kfree_skb(skb, FREE_READ);
3467 release_sock(sk);
3468 return(0);
3469
3470 default:
3471 if (!tcp_sequence(sk, th, len, opt, saddr,dev)) {
3472 kfree_skb(skb, FREE_READ);
3473 release_sock(sk);
3474 return(0);
3475 }
3476
3477 case TCP_SYN_SENT:
3478 if (th->rst) {
3479 sk->err = ECONNREFUSED;
3480 sk->state = TCP_CLOSE;
3481 sk->shutdown = SHUTDOWN_MASK;
3482 sk->zapped = 1;
3483 if (!sk->dead) {
3484 sk->state_change(sk);
3485 }
3486 kfree_skb(skb, FREE_READ);
3487 release_sock(sk);
3488 return(0);
3489 }
3490 #if 0
3491 if (opt->security != 0 || opt->compartment != 0) {
3492 sk->err = ECONNRESET;
3493 sk->state = TCP_CLOSE;
3494 sk->shutdown = SHUTDOWN_MASK;
3495 tcp_reset(daddr, saddr, th, sk->prot, opt, dev);
3496 if (!sk->dead) {
3497 wake_up_interruptible(sk->sleep);
3498 }
3499 kfree_skb(skb, FREE_READ);
3500 release_sock(sk);
3501 return(0);
3502 }
3503 #endif
3504 if (!th->ack) {
3505 if (th->syn) {
3506 sk->state = TCP_SYN_RECV;
3507 }
3508
3509 kfree_skb(skb, FREE_READ);
3510 release_sock(sk);
3511 return(0);
3512 }
3513
3514 switch(sk->state) {
3515 case TCP_SYN_SENT:
3516 if (!tcp_ack(sk, th, saddr, len)) {
3517 tcp_reset(daddr, saddr, th,
3518 sk->prot, opt,dev,sk->ip_tos,sk->ip_ttl);
3519 kfree_skb(skb, FREE_READ);
3520 release_sock(sk);
3521 return(0);
3522 }
3523
3524
3525
3526
3527
3528 if (!th->syn) {
3529 kfree_skb(skb, FREE_READ);
3530 release_sock(sk);
3531 return(0);
3532 }
3533
3534
3535 sk->acked_seq = th->seq+1;
3536 sk->fin_seq = th->seq;
3537 tcp_send_ack(sk->send_seq, th->seq+1,
3538 sk, th, sk->daddr);
3539
3540 case TCP_SYN_RECV:
3541 if (!tcp_ack(sk, th, saddr, len)) {
3542 tcp_reset(daddr, saddr, th,
3543 sk->prot, opt, dev,sk->ip_tos,sk->ip_ttl);
3544 kfree_skb(skb, FREE_READ);
3545 release_sock(sk);
3546 return(0);
3547 }
3548 sk->state = TCP_ESTABLISHED;
3549
3550
3551
3552
3553
3554
3555 tcp_options(sk, th);
3556 sk->dummy_th.dest = th->source;
3557 sk->copied_seq = sk->acked_seq-1;
3558 if (!sk->dead) {
3559 sk->state_change(sk);
3560 }
3561
3562
3563
3564
3565
3566
3567
3568
3569
3570
3571 if (sk->max_window == 0) {
3572 sk->max_window = 32;
3573 sk->mss = min(sk->max_window, sk->mtu);
3574 }
3575
3576
3577
3578
3579
3580 if (th->urg) {
3581 if (tcp_urg(sk, th, saddr, len)) {
3582 kfree_skb(skb, FREE_READ);
3583 release_sock(sk);
3584 return(0);
3585 }
3586 }
3587 if (tcp_data(skb, sk, saddr, len))
3588 kfree_skb(skb, FREE_READ);
3589
3590 if (th->fin) tcp_fin(sk, th, saddr, dev);
3591 release_sock(sk);
3592 return(0);
3593 }
3594
3595 if (th->urg) {
3596 if (tcp_urg(sk, th, saddr, len)) {
3597 kfree_skb(skb, FREE_READ);
3598 release_sock(sk);
3599 return(0);
3600 }
3601 }
3602
3603 if (tcp_data(skb, sk, saddr, len)) {
3604 kfree_skb(skb, FREE_READ);
3605 release_sock(sk);
3606 return(0);
3607 }
3608
3609 if (!th->fin) {
3610 release_sock(sk);
3611 return(0);
3612 }
3613 tcp_fin(sk, th, saddr, dev);
3614 release_sock(sk);
3615 return(0);
3616 }
3617 }
3618
3619
3620
3621
3622
3623
3624 static void
3625 tcp_write_wakeup(struct sock *sk)
3626 {
3627 struct sk_buff *buff;
3628 struct tcphdr *t1;
3629 struct device *dev=NULL;
3630 int tmp;
3631
3632 if (sk->zapped)
3633 return;
3634
3635 if (sk -> state != TCP_ESTABLISHED && sk->state != TCP_CLOSE_WAIT) return;
3636
3637 buff = sk->prot->wmalloc(sk,MAX_ACK_SIZE,1, GFP_ATOMIC);
3638 if (buff == NULL) return;
3639
3640 buff->mem_addr = buff;
3641 buff->mem_len = MAX_ACK_SIZE;
3642 buff->len = sizeof(struct tcphdr);
3643 buff->free = 1;
3644 buff->sk = sk;
3645 DPRINTF((DBG_TCP, "in tcp_write_wakeup\n"));
3646 t1 = (struct tcphdr *) buff->data;
3647
3648
3649 tmp = sk->prot->build_header(buff, sk->saddr, sk->daddr, &dev,
3650 IPPROTO_TCP, sk->opt, MAX_ACK_SIZE,sk->ip_tos,sk->ip_ttl);
3651 if (tmp < 0) {
3652 sk->prot->wfree(sk, buff->mem_addr, buff->mem_len);
3653 return;
3654 }
3655
3656 buff->len += tmp;
3657 t1 = (struct tcphdr *)((char *)t1 +tmp);
3658
3659 memcpy(t1,(void *) &sk->dummy_th, sizeof(*t1));
3660
3661
3662
3663
3664
3665 t1->seq = ntohl(sk->send_seq-1);
3666 t1->ack = 1;
3667 t1->res1= 0;
3668 t1->res2= 0;
3669 t1->rst = 0;
3670 t1->urg = 0;
3671 t1->psh = 0;
3672 t1->fin = 0;
3673 t1->syn = 0;
3674 t1->ack_seq = ntohl(sk->acked_seq);
3675 t1->window = ntohs(tcp_select_window(sk));
3676 t1->doff = sizeof(*t1)/4;
3677 tcp_send_check(t1, sk->saddr, sk->daddr, sizeof(*t1), sk);
3678
3679
3680
3681
3682 sk->prot->queue_xmit(sk, dev, buff, 1);
3683 }
3684
3685
3686
3687
3688
3689 void
3690 tcp_send_probe0(struct sock *sk)
3691 {
3692 unsigned char *raw;
3693 struct iphdr *iph;
3694 struct sk_buff *skb2, *skb;
3695 int len, hlen, data;
3696 struct tcphdr *t1;
3697 struct device *dev;
3698
3699 if (sk->zapped)
3700 return;
3701
3702 if (sk -> state != TCP_ESTABLISHED && sk->state != TCP_CLOSE_WAIT &&
3703 sk -> state != TCP_FIN_WAIT1 && sk->state != TCP_FIN_WAIT2)
3704 return;
3705
3706 skb = sk->wfront;
3707 if (skb == NULL)
3708 return;
3709
3710 dev = skb->dev;
3711
3712 if(dev==NULL)
3713 {
3714 printk("tcp_send_probe0: NULL device bug!\n");
3715 return;
3716 }
3717 IS_SKB(skb);
3718
3719 raw = skb->data;
3720 iph = (struct iphdr *) (raw + dev->hard_header_len);
3721
3722 hlen = (iph->ihl * sizeof(unsigned long)) + dev->hard_header_len;
3723 data = skb->len - hlen - sizeof(struct tcphdr);
3724 len = hlen + sizeof(struct tcphdr) + (data ? 1 : 0);
3725
3726
3727 if ((skb2 = alloc_skb(sizeof(struct sk_buff) + len, GFP_ATOMIC)) == NULL) {
3728
3729
3730 reset_timer (sk, TIME_PROBE0, 10);
3731 return;
3732 }
3733
3734 skb2->arp = skb->arp;
3735 skb2->len = len;
3736 skb2->h.raw = (char *)(skb2->data);
3737
3738 sk->wmem_alloc += skb2->mem_len;
3739
3740
3741 memcpy(skb2->h.raw, raw, len);
3742
3743 skb2->h.raw += hlen;
3744 t1 = skb2->h.th;
3745
3746
3747 t1->ack_seq = ntohl(sk->acked_seq);
3748 t1->res1 = 0;
3749
3750
3751
3752 t1->ack = 1;
3753 t1->urg = 0;
3754 t1->res2 = 0;
3755 t1->window = ntohs(tcp_select_window(sk));
3756 t1->urg_ptr = 0;
3757 tcp_send_check(t1, sk->saddr, sk->daddr, len - hlen, sk);
3758
3759
3760
3761 sk->prot->queue_xmit(sk, dev, skb2, 1);
3762 sk->backoff++;
3763
3764
3765
3766
3767
3768
3769
3770 sk->rto = min(sk->rto << 1, 120*HZ);
3771 reset_timer (sk, TIME_PROBE0, sk->rto);
3772 sk->retransmits++;
3773 sk->prot->retransmits ++;
3774 }
3775
3776
3777
3778
3779
3780 int tcp_setsockopt(struct sock *sk, int level, int optname, char *optval, int optlen)
3781 {
3782 int val,err;
3783
3784 if(level!=SOL_TCP)
3785 return ip_setsockopt(sk,level,optname,optval,optlen);
3786
3787 if (optval == NULL)
3788 return(-EINVAL);
3789
3790 err=verify_area(VERIFY_READ, optval, sizeof(int));
3791 if(err)
3792 return err;
3793
3794 val = get_fs_long((unsigned long *)optval);
3795
3796 switch(optname)
3797 {
3798 case TCP_MAXSEG:
3799
3800
3801
3802
3803
3804
3805 if(val<1||val>MAX_WINDOW)
3806 return -EINVAL;
3807 sk->user_mss=val;
3808 return 0;
3809 case TCP_NODELAY:
3810 sk->nonagle=(val==0)?0:1;
3811 return 0;
3812 default:
3813 return(-ENOPROTOOPT);
3814 }
3815 }
3816
3817 int tcp_getsockopt(struct sock *sk, int level, int optname, char *optval, int *optlen)
3818 {
3819 int val,err;
3820
3821 if(level!=SOL_TCP)
3822 return ip_getsockopt(sk,level,optname,optval,optlen);
3823
3824 switch(optname)
3825 {
3826 case TCP_MAXSEG:
3827 val=sk->user_mss;
3828 break;
3829 case TCP_NODELAY:
3830 val=sk->nonagle;
3831 break;
3832 default:
3833 return(-ENOPROTOOPT);
3834 }
3835 err=verify_area(VERIFY_WRITE, optlen, sizeof(int));
3836 if(err)
3837 return err;
3838 put_fs_long(sizeof(int),(unsigned long *) optlen);
3839
3840 err=verify_area(VERIFY_WRITE, optval, sizeof(int));
3841 if(err)
3842 return err;
3843 put_fs_long(val,(unsigned long *)optval);
3844
3845 return(0);
3846 }
3847
3848
3849 struct proto tcp_prot = {
3850 sock_wmalloc,
3851 sock_rmalloc,
3852 sock_wfree,
3853 sock_rfree,
3854 sock_rspace,
3855 sock_wspace,
3856 tcp_close,
3857 tcp_read,
3858 tcp_write,
3859 tcp_sendto,
3860 tcp_recvfrom,
3861 ip_build_header,
3862 tcp_connect,
3863 tcp_accept,
3864 ip_queue_xmit,
3865 tcp_retransmit,
3866 tcp_write_wakeup,
3867 tcp_read_wakeup,
3868 tcp_rcv,
3869 tcp_select,
3870 tcp_ioctl,
3871 NULL,
3872 tcp_shutdown,
3873 tcp_setsockopt,
3874 tcp_getsockopt,
3875 128,
3876 0,
3877 {NULL,},
3878 "TCP"
3879 };