This source file includes following definitions.
- min
- print_th
- get_firstr
- diff
- tcp_time_wait
- tcp_retransmit
- tcp_err
- tcp_readable
- tcp_select
- tcp_ioctl
- tcp_check
- tcp_send_check
- tcp_send_partial
- tcp_send_ack
- tcp_build_header
- tcp_write
- tcp_sendto
- tcp_read_wakeup
- cleanup_rbuf
- tcp_read_urg
- tcp_read
- tcp_shutdown
- tcp_recvfrom
- tcp_reset
- tcp_conn_request
- tcp_close
- tcp_write_xmit
- sort_send
- tcp_ack
- tcp_data
- tcp_urg
- tcp_fin
- tcp_accept
- tcp_connect
- tcp_sequence
- tcp_options
- tcp_rcv
- tcp_write_wakeup
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20 #include <linux/types.h>
21 #include <linux/sched.h>
22 #include <linux/mm.h>
23 #include <linux/string.h>
24 #include <linux/socket.h>
25 #include <linux/sockios.h>
26 #include <linux/termios.h>
27 #include <linux/in.h>
28 #include <linux/fcntl.h>
29 #include "inet.h"
30 #include "timer.h"
31 #include "dev.h"
32 #include "ip.h"
33 #include "protocol.h"
34 #include "icmp.h"
35 #include "tcp.h"
36 #include "skbuff.h"
37 #include "sock.h"
38 #include "arp.h"
39 #include <linux/errno.h>
40 #include <linux/timer.h>
41 #include <asm/system.h>
42 #include <asm/segment.h>
43 #include <linux/mm.h>
44
45
46 #define tmax(a,b)(before((a),(b)) ?(b) :(a))
47 #define swap(a,b) {unsigned long c; c=a; a=b; b=c;}
48
49
50 static int
51 min(unsigned int a, unsigned int b)
52 {
53 if (a < b) return(a);
54 return(b);
55 }
56
57
58 void
59 print_th(struct tcphdr *th)
60 {
61 unsigned char *ptr;
62
63 if (inet_debug != DBG_TCP) return;
64
65 printk("TCP header:\n");
66 ptr =(unsigned char *)(th + 1);
67 printk(" source=%d, dest=%d, seq =%d, ack_seq = %d\n",
68 ntohs(th->source), ntohs(th->dest),
69 ntohl(th->seq), ntohl(th->ack_seq));
70 printk(" fin=%d, syn=%d, rst=%d, psh=%d, ack=%d, urg=%d res1=%d res2=%d\n",
71 th->fin, th->syn, th->rst, th->psh, th->ack,
72 th->urg, th->res1, th->res2);
73 printk(" window = %d, check = %d urg_ptr = %d\n",
74 ntohs(th->window), ntohs(th->check), ntohs(th->urg_ptr));
75 printk(" doff = %d\n", th->doff);
76 printk(" options = %d %d %d %d\n", ptr[0], ptr[1], ptr[2], ptr[3]);
77 }
78
79
80
81 static struct sk_buff *
82 get_firstr(struct sock *sk)
83 {
84 struct sk_buff *skb;
85
86 skb = sk->rqueue;
87 if (skb == NULL) return(NULL);
88 sk->rqueue =(struct sk_buff *)skb->next;
89 if (sk->rqueue == skb) {
90 sk->rqueue = NULL;
91 } else {
92 sk->rqueue->prev = skb->prev;
93 sk->rqueue->prev->next = sk->rqueue;
94 }
95 return(skb);
96 }
97
98
99 static long
100 diff(unsigned long seq1, unsigned long seq2)
101 {
102 long d;
103
104 d = seq1 - seq2;
105 if (d > 0) return(d);
106
107
108 return(~d+1);
109 }
110
111
112
113 static void
114 tcp_time_wait(struct sock *sk)
115 {
116 sk->state = TCP_TIME_WAIT;
117 sk->shutdown = SHUTDOWN_MASK;
118 if (!sk->dead) wake_up(sk->sleep);
119 sk->time_wait.len = TCP_TIMEWAIT_LEN;
120 sk->timeout = TIME_CLOSE;
121 reset_timer((struct timer *)&sk->time_wait);
122 }
123
124
125 static void
126 tcp_retransmit(struct sock *sk, int all)
127 {
128 if (all) {
129 ip_retransmit(sk, all);
130 return;
131 }
132
133 if (sk->cong_window > 4)
134 sk->cong_window = sk->cong_window / 2;
135 sk->exp_growth = 0;
136
137
138 ip_retransmit(sk, all);
139 }
140
141
142
143
144
145
146
147
148
149
150 void
151 tcp_err(int err, unsigned char *header, unsigned long daddr,
152 unsigned long saddr, struct inet_protocol *protocol)
153 {
154 struct tcphdr *th;
155 struct sock *sk;
156
157 DPRINTF((DBG_TCP, "TCP: tcp_err(%d, hdr=%X, daddr=%X saddr=%X, protocol=%X)\n",
158 err, header, daddr, saddr, protocol));
159
160 th =(struct tcphdr *)header;
161 sk = get_sock(&tcp_prot, th->dest, saddr, th->source, daddr);
162 print_th(th);
163
164 if (sk == NULL) return;
165
166 if ((err & 0xff00) == (ICMP_SOURCE_QUENCH << 8)) {
167
168
169
170
171
172 if (sk->cong_window > 4) sk->cong_window--;
173 return;
174 }
175
176 DPRINTF((DBG_TCP, "TCP: icmp_err got error\n"));
177 sk->err = icmp_err_convert[err & 0xff].errno;
178
179
180
181
182
183 if (icmp_err_convert[err & 0xff].fatal) {
184 if (sk->state == TCP_SYN_SENT) {
185 sk->state = TCP_CLOSE;
186 sk->prot->close(sk, 0);
187 }
188 }
189 return;
190 }
191
192
193 static int
194 tcp_readable(struct sock *sk)
195 {
196 unsigned long counted;
197 unsigned long amount;
198 struct sk_buff *skb;
199 int count=0;
200 int sum;
201
202 DPRINTF((DBG_TCP, "tcp_readable(sk=%X)\n", sk));
203
204 if (sk == NULL || sk->rqueue == NULL) return(0);
205
206 counted = sk->copied_seq+1;
207 amount = 0;
208 skb =(struct sk_buff *)sk->rqueue->next;
209
210
211 do {
212 count++;
213 if (count > 20) {
214 DPRINTF((DBG_TCP, "tcp_readable, more than 20 packets without a psh\n"));
215 DPRINTF((DBG_TCP, "possible read_queue corruption.\n"));
216 return(amount);
217 }
218 if (before(counted, skb->h.th->seq)) break;
219 sum = skb->len -(counted - skb->h.th->seq);
220 if (skb->h.th->syn) sum++;
221 if (skb->h.th->urg) {
222 sum -= ntohs(skb->h.th->urg_ptr);
223 }
224 if (sum >= 0) {
225 amount += sum;
226 if (skb->h.th->syn) amount--;
227 counted += sum;
228 }
229 if (amount && skb->h.th->psh) break;
230 skb =(struct sk_buff *)skb->next;
231 } while(skb != sk->rqueue->next);
232 DPRINTF((DBG_TCP, "tcp readable returning %d bytes\n", amount));
233 return(amount);
234 }
235
236
237 static int
238 tcp_select(struct sock *sk, int sel_type, select_table *wait)
239 {
240 DPRINTF((DBG_TCP, "tcp_select(sk=%X, sel_type = %d, wait = %X)\n",
241 sk, sel_type, wait));
242
243 sk->inuse = 1;
244 switch(sel_type) {
245 case SEL_IN:
246 select_wait(sk->sleep, wait);
247 if (sk->rqueue != NULL) {
248 if (sk->state == TCP_LISTEN || tcp_readable(sk)) {
249 release_sock(sk);
250 return(1);
251 }
252 }
253 if (sk->shutdown & RCV_SHUTDOWN) {
254 release_sock(sk);
255 return(1);
256 } else {
257 release_sock(sk);
258 return(0);
259 }
260 case SEL_OUT:
261 select_wait(sk->sleep, wait);
262 if (sk->shutdown & SEND_SHUTDOWN) {
263 DPRINTF((DBG_TCP,
264 "write select on shutdown socket.\n"));
265
266
267 release_sock(sk);
268 return(0);
269 }
270
271
272
273
274
275
276 if (sk->prot->wspace(sk) >= sk->mtu) {
277 release_sock(sk);
278
279 if (sk->state == TCP_SYN_RECV ||
280 sk->state == TCP_SYN_SENT) return(0);
281 return(1);
282 }
283 DPRINTF((DBG_TCP,
284 "tcp_select: sleeping on write sk->wmem_alloc = %d, "
285 "sk->packets_out = %d\n"
286 "sk->wback = %X, sk->wfront = %X\n"
287 "sk->send_seq = %u, sk->window_seq=%u\n",
288 sk->wmem_alloc, sk->packets_out,
289 sk->wback, sk->wfront,
290 sk->send_seq, sk->window_seq));
291
292 release_sock(sk);
293 return(0);
294 case SEL_EX:
295 select_wait(sk->sleep,wait);
296 if (sk->err) {
297 release_sock(sk);
298 return(1);
299 }
300 release_sock(sk);
301 return(0);
302 }
303
304 release_sock(sk);
305 return(0);
306 }
307
308
309 int
310 tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
311 {
312 DPRINTF((DBG_TCP, "tcp_ioctl(sk=%X, cmd = %d, arg=%X)\n", sk, cmd, arg));
313 switch(cmd) {
314 case DDIOCSDBG:
315 return(dbg_ioctl((void *) arg, DBG_TCP));
316
317 case TIOCINQ:
318 #ifdef FIXME
319 case FIONREAD:
320 #endif
321 {
322 unsigned long amount;
323
324 if (sk->state == TCP_LISTEN) return(-EINVAL);
325
326 amount = 0;
327 sk->inuse = 1;
328 if (sk->rqueue != NULL) {
329 amount = tcp_readable(sk);
330 }
331 release_sock(sk);
332 DPRINTF((DBG_TCP, "returning %d\n", amount));
333 verify_area(VERIFY_WRITE,(void *)arg,
334 sizeof(unsigned long));
335 put_fs_long(amount,(unsigned long *)arg);
336 return(0);
337 }
338 case SIOCATMARK:
339 {
340 struct sk_buff *skb;
341 int answ = 0;
342
343
344
345
346
347 sk->inuse = 1;
348 if (sk->rqueue != NULL) {
349 skb =(struct sk_buff *)sk->rqueue->next;
350 if (sk->copied_seq+1 == skb->h.th->seq &&
351 skb->h.th->urg) answ = 1;
352 }
353 release_sock(sk);
354 verify_area(VERIFY_WRITE,(void *) arg,
355 sizeof(unsigned long));
356 put_fs_long(answ,(int *) arg);
357 return(0);
358 }
359 case TIOCOUTQ:
360 {
361 unsigned long amount;
362
363 if (sk->state == TCP_LISTEN) return(-EINVAL);
364 amount = sk->prot->wspace(sk)/2;
365 verify_area(VERIFY_WRITE,(void *)arg,
366 sizeof(unsigned long));
367 put_fs_long(amount,(unsigned long *)arg);
368 return(0);
369 }
370 default:
371 return(-EINVAL);
372 }
373 }
374
375
376
377 unsigned short
378 tcp_check(struct tcphdr *th, int len,
379 unsigned long saddr, unsigned long daddr)
380 {
381 unsigned long sum;
382
383 if (saddr == 0) saddr = my_addr();
384 print_th(th);
385 __asm__("\t addl %%ecx,%%ebx\n"
386 "\t adcl %%edx,%%ebx\n"
387 "\t adcl $0, %%ebx\n"
388 : "=b"(sum)
389 : "0"(daddr), "c"(saddr), "d"((ntohs(len) << 16) + IPPROTO_TCP*256)
390 : "cx","bx","dx" );
391
392 if (len > 3) {
393 __asm__("\tclc\n"
394 "1:\n"
395 "\t lodsl\n"
396 "\t adcl %%eax, %%ebx\n"
397 "\t loop 1b\n"
398 "\t adcl $0, %%ebx\n"
399 : "=b"(sum) , "=S"(th)
400 : "0"(sum), "c"(len/4) ,"1"(th)
401 : "ax", "cx", "bx", "si" );
402 }
403
404
405 __asm__("\t movl %%ebx, %%ecx\n"
406 "\t shrl $16,%%ecx\n"
407 "\t addw %%cx, %%bx\n"
408 "\t adcw $0, %%bx\n"
409 : "=b"(sum)
410 : "0"(sum)
411 : "bx", "cx");
412
413
414 if ((len & 2) != 0) {
415 __asm__("\t lodsw\n"
416 "\t addw %%ax,%%bx\n"
417 "\t adcw $0, %%bx\n"
418 : "=b"(sum), "=S"(th)
419 : "0"(sum) ,"1"(th)
420 : "si", "ax", "bx");
421 }
422
423
424 if ((len & 1) != 0) {
425 __asm__("\t lodsb\n"
426 "\t movb $0,%%ah\n"
427 "\t addw %%ax,%%bx\n"
428 "\t adcw $0, %%bx\n"
429 : "=b"(sum)
430 : "0"(sum) ,"S"(th)
431 : "si", "ax", "bx");
432 }
433
434
435 return((~sum) & 0xffff);
436 }
437
438
439 void
440 tcp_send_check(struct tcphdr *th, unsigned long saddr,
441 unsigned long daddr, int len, struct sock *sk)
442 {
443 th->check = 0;
444 if (sk && sk->no_check) return;
445 th->check = tcp_check(th, len, saddr, daddr);
446 return;
447 }
448
449
450 static void
451 tcp_send_partial(struct sock *sk)
452 {
453 struct sk_buff *skb;
454
455 if (sk == NULL || sk->send_tmp == NULL) return;
456
457 skb = sk->send_tmp;
458
459
460 tcp_send_check(skb->h.th, sk->saddr, sk->daddr,
461 skb->len-(unsigned long)skb->h.th +
462 (unsigned long)(skb+1), sk);
463
464 skb->h.seq = sk->send_seq;
465 if (after(sk->send_seq , sk->window_seq) ||
466 sk->packets_out >= sk->cong_window) {
467 DPRINTF((DBG_TCP, "sk->cong_window = %d, sk->packets_out = %d\n",
468 sk->cong_window, sk->packets_out));
469 DPRINTF((DBG_TCP, "sk->send_seq = %d, sk->window_seq = %d\n",
470 sk->send_seq, sk->window_seq));
471 skb->next = NULL;
472 skb->magic = TCP_WRITE_QUEUE_MAGIC;
473 if (sk->wback == NULL) {
474 sk->wfront=skb;
475 } else {
476 sk->wback->next = skb;
477 }
478 sk->wback = skb;
479 } else {
480 sk->prot->queue_xmit(sk, skb->dev, skb,0);
481 }
482 sk->send_tmp = NULL;
483 }
484
485
486
487 static void
488 tcp_send_ack(unsigned long sequence, unsigned long ack,
489 struct sock *sk,
490 struct tcphdr *th, unsigned long daddr)
491 {
492 struct sk_buff *buff;
493 struct tcphdr *t1;
494 struct device *dev = NULL;
495 int tmp;
496
497
498
499
500
501 buff = (struct sk_buff *) sk->prot->wmalloc(sk, MAX_ACK_SIZE, 1, GFP_ATOMIC);
502 if (buff == NULL) {
503
504 sk->ack_backlog++;
505 if (sk->timeout != TIME_WRITE && tcp_connected(sk->state)) {
506 sk->timeout = TIME_WRITE;
507 sk->time_wait.len = 10;
508 reset_timer((struct timer *)&sk->time_wait);
509 }
510 if (inet_debug == DBG_SLIP) printk("\rtcp_ack: malloc failed\n");
511 return;
512 }
513
514 buff->mem_addr = buff;
515 buff->mem_len = MAX_ACK_SIZE;
516 buff->lock = 0;
517 buff->len = sizeof(struct tcphdr);
518 buff->sk = sk;
519 t1 =(struct tcphdr *)(buff + 1);
520
521
522 tmp = sk->prot->build_header(buff, sk->saddr, daddr, &dev,
523 IPPROTO_TCP, sk->opt, MAX_ACK_SIZE);
524 if (tmp < 0) {
525 sk->prot->wfree(sk, buff->mem_addr, buff->mem_len);
526 if (inet_debug == DBG_SLIP) printk("\rtcp_ack: build_header failed\n");
527 return;
528 }
529 buff->len += tmp;
530 t1 =(struct tcphdr *)((char *)t1 +tmp);
531
532
533 memcpy(t1, th, sizeof(*t1));
534
535
536 t1->dest = th->source;
537 t1->source = th->dest;
538 t1->seq = ntohl(sequence);
539 t1->ack = 1;
540 sk->window = sk->prot->rspace(sk);
541 t1->window = ntohs(sk->window);
542 t1->res1 = 0;
543 t1->res2 = 0;
544 t1->rst = 0;
545 t1->urg = 0;
546 t1->syn = 0;
547 t1->psh = 0;
548 t1->fin = 0;
549 if (ack == sk->acked_seq) {
550 sk->ack_backlog = 0;
551 sk->bytes_rcv = 0;
552 sk->ack_timed = 0;
553 if (sk->send_head == NULL && sk->wfront == NULL) {
554 delete_timer((struct timer *)&sk->time_wait);
555 sk->timeout = 0;
556 }
557 }
558 t1->ack_seq = ntohl(ack);
559 t1->doff = sizeof(*t1)/4;
560 tcp_send_check(t1, sk->saddr, daddr, sizeof(*t1), sk);
561 if (inet_debug == DBG_SLIP) printk("\rtcp_ack: seq %x ack %x\n",
562 sequence, ack);
563 sk->prot->queue_xmit(sk, dev, buff, 1);
564 }
565
566
567
568 static int
569 tcp_build_header(struct tcphdr *th, struct sock *sk, int push)
570 {
571
572
573 memcpy(th,(void *) &(sk->dummy_th), sizeof(*th));
574 th->seq = ntohl(sk->send_seq);
575 th->psh =(push == 0) ? 1 : 0;
576 th->doff = sizeof(*th)/4;
577 th->ack = 1;
578 th->fin = 0;
579 sk->ack_backlog = 0;
580 sk->bytes_rcv = 0;
581 sk->ack_timed = 0;
582 th->ack_seq = ntohl(sk->acked_seq);
583 sk->window = sk->prot->rspace(sk);
584 th->window = ntohs(sk->window);
585
586 return(sizeof(*th));
587 }
588
589
590
591
592
593
594 static int
595 tcp_write(struct sock *sk, unsigned char *from,
596 int len, int nonblock, unsigned flags)
597 {
598 int copied = 0;
599 int copy;
600 int tmp;
601 struct sk_buff *skb;
602 unsigned char *buff;
603 struct proto *prot;
604 struct device *dev = NULL;
605
606 DPRINTF((DBG_TCP, "tcp_write(sk=%X, from=%X, len=%d, nonblock=%d, flags=%X)\n",
607 sk, from, len, nonblock, flags));
608
609 prot = sk->prot;
610 while(len > 0) {
611 if (sk->err) {
612 if (copied) return(copied);
613 tmp = -sk->err;
614 sk->err = 0;
615 return(tmp);
616 }
617
618
619 sk->inuse = 1;
620 if (sk->shutdown & SEND_SHUTDOWN) {
621 release_sock(sk);
622 sk->err = EPIPE;
623 if (copied) return(copied);
624 sk->err = 0;
625 return(-EPIPE);
626 }
627
628 while(sk->state != TCP_ESTABLISHED && sk->state != TCP_CLOSE_WAIT) {
629 if (sk->err) {
630 if (copied) return(copied);
631 tmp = -sk->err;
632 sk->err = 0;
633 return(tmp);
634 }
635
636 if (sk->state != TCP_SYN_SENT && sk->state != TCP_SYN_RECV) {
637 release_sock(sk);
638 DPRINTF((DBG_TCP, "tcp_write: return 1\n"));
639 if (copied) return(copied);
640
641 if (sk->err) {
642 tmp = -sk->err;
643 sk->err = 0;
644 return(tmp);
645 }
646
647 if (sk->keepopen) {
648 send_sig(SIGPIPE, current, 0);
649 }
650 return(-EPIPE);
651 }
652
653 if (nonblock || copied) {
654 release_sock(sk);
655 DPRINTF((DBG_TCP, "tcp_write: return 2\n"));
656 if (copied) return(copied);
657 return(-EAGAIN);
658 }
659
660
661
662
663
664
665
666
667
668 release_sock(sk);
669 cli();
670 if (sk->state != TCP_ESTABLISHED &&
671 sk->state != TCP_CLOSE_WAIT && sk->err == 0) {
672 interruptible_sleep_on(sk->sleep);
673 if (current->signal & ~current->blocked) {
674 sti();
675 DPRINTF((DBG_TCP, "tcp_write: return 3\n"));
676 if (copied) return(copied);
677 return(-ERESTARTSYS);
678 }
679 }
680 sk->inuse = 1;
681 sti();
682 }
683
684
685 if (sk->send_tmp != NULL) {
686
687
688
689 skb = sk->send_tmp;
690 if (!(flags & MSG_OOB)) {
691 copy = min(sk->mss - skb->len + 128 +
692 prot->max_header, len);
693
694
695 if (copy <= 0) {
696 printk("TCP: **bug**: \"copy\" <= 0!!\n");
697 copy = 0;
698 }
699
700 memcpy_fromfs((unsigned char *)(skb+1) + skb->len, from, copy);
701 skb->len += copy;
702 from += copy;
703 copied += copy;
704 len -= copy;
705 sk->send_seq += copy;
706 }
707
708 if (skb->len -(unsigned long)skb->h.th +
709 (unsigned long)(skb+1) >= sk->mss ||(flags & MSG_OOB)) {
710 tcp_send_partial(sk);
711 }
712 continue;
713 }
714
715
716
717
718
719 copy = min(sk->mtu, diff(sk->window_seq, sk->send_seq));
720
721
722 if (copy < 200 || copy > sk->mtu) copy = sk->mtu;
723 copy = min(copy, len);
724
725
726 if (sk->packets_out && copy < sk->mss && !(flags & MSG_OOB)) {
727
728 release_sock(sk);
729 skb = (struct sk_buff *) prot->wmalloc(sk,
730 sk->mss + 128 + prot->max_header +
731 sizeof(*skb), 0, GFP_KERNEL);
732 sk->inuse = 1;
733 sk->send_tmp = skb;
734 if (skb != NULL)
735 skb->mem_len = sk->mss + 128 + prot->max_header + sizeof(*skb);
736 } else {
737
738 release_sock(sk);
739 skb = (struct sk_buff *) prot->wmalloc(sk,
740 copy + prot->max_header +
741 sizeof(*skb), 0, GFP_KERNEL);
742 sk->inuse = 1;
743 if (skb != NULL)
744 skb->mem_len = copy+prot->max_header + sizeof(*skb);
745 }
746
747
748 if (skb == NULL) {
749 if (nonblock || copied) {
750 release_sock(sk);
751 DPRINTF((DBG_TCP, "tcp_write: return 4\n"));
752 if (copied) return(copied);
753 return(-EAGAIN);
754 }
755
756
757 tmp = sk->wmem_alloc;
758 release_sock(sk);
759
760
761 cli();
762 if (tmp <= sk->wmem_alloc &&
763 (sk->state == TCP_ESTABLISHED||sk->state == TCP_CLOSE_WAIT)
764 && sk->err == 0) {
765 interruptible_sleep_on(sk->sleep);
766 if (current->signal & ~current->blocked) {
767 sti();
768 DPRINTF((DBG_TCP, "tcp_write: return 5\n"));
769 if (copied) return(copied);
770 return(-ERESTARTSYS);
771 }
772 }
773 sk->inuse = 1;
774 sti();
775 continue;
776 }
777
778 skb->mem_addr = skb;
779 skb->len = 0;
780 skb->sk = sk;
781 skb->lock = 0;
782 skb->free = 0;
783
784 buff =(unsigned char *)(skb+1);
785
786
787
788
789
790 tmp = prot->build_header(skb, sk->saddr, sk->daddr, &dev,
791 IPPROTO_TCP, sk->opt, skb->mem_len);
792 if (tmp < 0 ) {
793 prot->wfree(sk, skb->mem_addr, skb->mem_len);
794 release_sock(sk);
795 DPRINTF((DBG_TCP, "tcp_write: return 6\n"));
796 if (copied) return(copied);
797 return(tmp);
798 }
799 skb->len += tmp;
800 skb->dev = dev;
801 buff += tmp;
802 skb->h.th =(struct tcphdr *) buff;
803 tmp = tcp_build_header((struct tcphdr *)buff, sk, len-copy);
804 if (tmp < 0) {
805 prot->wfree(sk, skb->mem_addr, skb->mem_len);
806 release_sock(sk);
807 DPRINTF((DBG_TCP, "tcp_write: return 7\n"));
808 if (copied) return(copied);
809 return(tmp);
810 }
811
812 if (flags & MSG_OOB) {
813 ((struct tcphdr *)buff)->urg = 1;
814 ((struct tcphdr *)buff)->urg_ptr = ntohs(copy);
815 }
816 skb->len += tmp;
817 memcpy_fromfs(buff+tmp, from, copy);
818
819 from += copy;
820 copied += copy;
821 len -= copy;
822 skb->len += copy;
823 skb->free = 0;
824 sk->send_seq += copy;
825
826 if (sk->send_tmp != NULL) continue;
827
828 tcp_send_check((struct tcphdr *)buff, sk->saddr, sk->daddr,
829 copy + sizeof(struct tcphdr), sk);
830
831 skb->h.seq = sk->send_seq;
832 if (after(sk->send_seq , sk->window_seq) ||
833 sk->packets_out >= sk->cong_window) {
834 DPRINTF((DBG_TCP, "sk->cong_window = %d, sk->packets_out = %d\n",
835 sk->cong_window, sk->packets_out));
836 DPRINTF((DBG_TCP, "sk->send_seq = %d, sk->window_seq = %d\n",
837 sk->send_seq, sk->window_seq));
838 skb->next = NULL;
839 skb->magic = TCP_WRITE_QUEUE_MAGIC;
840 if (sk->wback == NULL) {
841 sk->wfront = skb;
842 } else {
843 sk->wback->next = skb;
844 }
845 sk->wback = skb;
846 } else {
847 prot->queue_xmit(sk, dev, skb,0);
848 }
849 }
850 sk->err = 0;
851 release_sock(sk);
852 DPRINTF((DBG_TCP, "tcp_write: return 8\n"));
853 return(copied);
854 }
855
856
857 static int
858 tcp_sendto(struct sock *sk, unsigned char *from,
859 int len, int nonblock, unsigned flags,
860 struct sockaddr_in *addr, int addr_len)
861 {
862 struct sockaddr_in sin;
863
864 if (addr_len < sizeof(sin)) return(-EINVAL);
865 memcpy_fromfs(&sin, addr, sizeof(sin));
866 if (sin.sin_family && sin.sin_family != AF_INET) return(-EINVAL);
867 if (sin.sin_port != sk->dummy_th.dest) return(-EINVAL);
868 if (sin.sin_addr.s_addr != sk->daddr) return(-EINVAL);
869 return(tcp_write(sk, from, len, nonblock, flags));
870 }
871
872
873 static void
874 tcp_read_wakeup(struct sock *sk)
875 {
876 int tmp;
877 struct device *dev = NULL;
878 struct tcphdr *t1;
879 struct sk_buff *buff;
880
881 DPRINTF((DBG_TCP, "in tcp read wakeup\n"));
882 if (!sk->ack_backlog) return;
883
884
885
886
887
888
889
890
891
892
893
894 buff = (struct sk_buff *) sk->prot->wmalloc(sk,MAX_ACK_SIZE,1, GFP_ATOMIC);
895 if (buff == NULL) {
896
897 sk->timeout = TIME_WRITE;
898 sk->time_wait.len = 10;
899 reset_timer((struct timer *) &sk->time_wait);
900 return;
901 }
902
903 buff->mem_addr = buff;
904 buff->mem_len = MAX_ACK_SIZE;
905 buff->lock = 0;
906 buff->len = sizeof(struct tcphdr);
907 buff->sk = sk;
908
909
910 tmp = sk->prot->build_header(buff, sk->saddr, sk->daddr, &dev,
911 IPPROTO_TCP, sk->opt, MAX_ACK_SIZE);
912 if (tmp < 0) {
913 sk->prot->wfree(sk, buff->mem_addr, buff->mem_len);
914 return;
915 }
916
917 buff->len += tmp;
918 t1 =(struct tcphdr *)((char *)(buff+1) +tmp);
919
920 memcpy(t1,(void *) &sk->dummy_th, sizeof(*t1));
921 t1->seq = ntohl(sk->send_seq);
922 t1->ack = 1;
923 t1->res1 = 0;
924 t1->res2 = 0;
925 t1->rst = 0;
926 t1->urg = 0;
927 t1->syn = 0;
928 t1->psh = 0;
929 sk->ack_backlog = 0;
930 sk->bytes_rcv = 0;
931 sk->window = sk->prot->rspace(sk);
932 t1->window = ntohs(sk->window);
933 t1->ack_seq = ntohl(sk->acked_seq);
934 t1->doff = sizeof(*t1)/4;
935 tcp_send_check(t1, sk->saddr, sk->daddr, sizeof(*t1), sk);
936 sk->prot->queue_xmit(sk, dev, buff, 1);
937 }
938
939
940
941
942
943
944
945
946 static void
947 cleanup_rbuf(struct sock *sk)
948 {
949 int left;
950
951 DPRINTF((DBG_TCP, "cleaning rbuf for sk=%X\n", sk));
952 left = sk->prot->rspace(sk);
953
954
955
956
957
958 while(sk->rqueue != NULL ) {
959 struct sk_buff *skb;
960
961 skb =(struct sk_buff *)sk->rqueue->next;
962 if (!skb->used) break;
963 if (sk->rqueue == skb) {
964 sk->rqueue = NULL;
965 } else {
966 skb->next->prev = skb->prev;
967 skb->prev->next = skb->next;
968 }
969 skb->sk = sk;
970 kfree_skb(skb, FREE_READ);
971 }
972
973
974
975
976
977
978
979 DPRINTF((DBG_TCP, "sk->window left = %d, sk->prot->rspace(sk)=%d\n",
980 sk->window - sk->bytes_rcv, sk->prot->rspace(sk)));
981
982 if (sk->prot->rspace(sk) != left) {
983
984
985
986
987
988
989
990
991
992
993 sk->ack_backlog++;
994 if ((sk->prot->rspace(sk) > (sk->window - sk->bytes_rcv + sk->mtu))) {
995
996 tcp_read_wakeup(sk);
997 } else {
998
999 if (jiffies + TCP_ACK_TIME < sk->time_wait.when) {
1000 sk->time_wait.len = TCP_ACK_TIME;
1001 sk->timeout = TIME_WRITE;
1002 reset_timer((struct timer *) &sk->time_wait);
1003 }
1004 }
1005 }
1006 }
1007
1008
1009
1010 static int
1011 tcp_read_urg(struct sock * sk, int nonblock,
1012 unsigned char *to, int len, unsigned flags)
1013 {
1014 int copied = 0;
1015 struct sk_buff *skb;
1016
1017 DPRINTF((DBG_TCP, "tcp_read_urg(sk=%X, to=%X, len=%d, flags=%X)\n",
1018 sk, to, len, flags));
1019
1020 while(len > 0) {
1021 sk->inuse = 1;
1022 while(sk->urg==0 || sk->rqueue == NULL) {
1023 if (sk->err) {
1024 int tmp;
1025
1026 release_sock(sk);
1027 if (copied) return(copied);
1028 tmp = -sk->err;
1029 sk->err = 0;
1030 return(tmp);
1031 }
1032
1033 if (sk->state == TCP_CLOSE || sk->done) {
1034 release_sock(sk);
1035 if (copied) return(copied);
1036 if (!sk->done) {
1037 sk->done = 1;
1038 return(0);
1039 }
1040 return(-ENOTCONN);
1041 }
1042
1043 if (sk->shutdown & RCV_SHUTDOWN) {
1044 release_sock(sk);
1045 if (copied == 0) sk->done = 1;
1046 return(copied);
1047 }
1048
1049 if (nonblock || copied) {
1050 release_sock(sk);
1051 if (copied) return(copied);
1052 return(-EAGAIN);
1053 }
1054
1055
1056 release_sock(sk);
1057 cli();
1058 if ((sk->urg == 0 || sk->rqueue == NULL) &&
1059 sk->err == 0 && !(sk->shutdown & RCV_SHUTDOWN)) {
1060 interruptible_sleep_on(sk->sleep);
1061 if (current->signal & ~current->blocked) {
1062 sti();
1063 if (copied) return(copied);
1064 return(-ERESTARTSYS);
1065 }
1066 }
1067 sk->inuse = 1;
1068 sti();
1069 }
1070
1071 skb =(struct sk_buff *)sk->rqueue->next;
1072 do {
1073 int amt;
1074
1075 if (skb->h.th->urg && !skb->urg_used) {
1076 if (skb->h.th->urg_ptr == 0) {
1077 skb->h.th->urg_ptr = ntohs(skb->len);
1078 }
1079 amt = min(ntohs(skb->h.th->urg_ptr),len);
1080 verify_area(VERIFY_WRITE, to, amt);
1081 memcpy_tofs(to,(unsigned char *)(skb->h.th) +
1082 skb->h.th->doff*4, amt);
1083
1084 if (!(flags & MSG_PEEK)) {
1085 skb->urg_used = 1;
1086 sk->urg--;
1087 }
1088 release_sock(sk);
1089 copied += amt;
1090 return(copied);
1091 }
1092 skb =(struct sk_buff *)skb->next;
1093 } while(skb != sk->rqueue->next);
1094 }
1095 sk->urg = 0;
1096 release_sock(sk);
1097 return(0);
1098 }
1099
1100
1101
1102 static int
1103 tcp_read(struct sock *sk, unsigned char *to,
1104 int len, int nonblock, unsigned flags)
1105 {
1106 int copied=0;
1107 struct sk_buff *skb;
1108 unsigned long offset;
1109 unsigned long used;
1110
1111 if (len == 0) return(0);
1112 if (len < 0) {
1113 return(-EINVAL);
1114 }
1115
1116
1117 if (sk->state == TCP_LISTEN) return(-ENOTCONN);
1118
1119
1120 if ((flags & MSG_OOB)) return(tcp_read_urg(sk, nonblock, to, len, flags));
1121
1122
1123 sk->inuse = 1;
1124 if (sk->rqueue != NULL) skb =(struct sk_buff *)sk->rqueue->next;
1125 else skb = NULL;
1126
1127 DPRINTF((DBG_TCP, "tcp_read(sk=%X, to=%X, len=%d, nonblock=%d, flags=%X)\n",
1128 sk, to, len, nonblock, flags));
1129
1130 while(len > 0) {
1131
1132 while(skb == NULL ||
1133 before(sk->copied_seq+1, skb->h.th->seq) || skb->used) {
1134 DPRINTF((DBG_TCP, "skb = %X:\n", skb));
1135 cleanup_rbuf(sk);
1136 if (sk->err) {
1137 int tmp;
1138
1139 release_sock(sk);
1140 if (copied) {
1141 DPRINTF((DBG_TCP, "tcp_read: returing %d\n",
1142 copied));
1143 return(copied);
1144 }
1145 tmp = -sk->err;
1146 sk->err = 0;
1147 return(tmp);
1148 }
1149
1150 if (sk->state == TCP_CLOSE) {
1151 release_sock(sk);
1152 if (copied) {
1153 DPRINTF((DBG_TCP, "tcp_read: returing %d\n",
1154 copied));
1155 return(copied);
1156 }
1157 if (!sk->done) {
1158 sk->done = 1;
1159 return(0);
1160 }
1161 return(-ENOTCONN);
1162 }
1163
1164 if (sk->shutdown & RCV_SHUTDOWN) {
1165 release_sock(sk);
1166 if (copied == 0) sk->done = 1;
1167 DPRINTF((DBG_TCP, "tcp_read: returing %d\n", copied));
1168 return(copied);
1169 }
1170
1171 if (nonblock || copied) {
1172 release_sock(sk);
1173 if (copied) {
1174 DPRINTF((DBG_TCP, "tcp_read: returing %d\n",
1175 copied));
1176 return(copied);
1177 }
1178 return(-EAGAIN);
1179 }
1180
1181 if ((flags & MSG_PEEK) && copied != 0) {
1182 release_sock(sk);
1183 DPRINTF((DBG_TCP, "tcp_read: returing %d\n", copied));
1184 return(copied);
1185 }
1186
1187 DPRINTF((DBG_TCP, "tcp_read about to sleep. state = %d\n",
1188 sk->state));
1189 release_sock(sk);
1190
1191
1192
1193
1194
1195 cli();
1196 if (sk->shutdown & RCV_SHUTDOWN || sk->err != 0) {
1197 sk->inuse = 1;
1198 sti();
1199 continue;
1200 }
1201
1202 if (sk->rqueue == NULL ||
1203 before(sk->copied_seq+1, sk->rqueue->next->h.th->seq)) {
1204 interruptible_sleep_on(sk->sleep);
1205 if (current->signal & ~current->blocked) {
1206 sti();
1207 if (copied) {
1208 DPRINTF((DBG_TCP, "tcp_read: returing %d\n",
1209 copied));
1210 return(copied);
1211 }
1212 return(-ERESTARTSYS);
1213 }
1214 }
1215 sk->inuse = 1;
1216 sti();
1217 DPRINTF((DBG_TCP, "tcp_read woke up. \n"));
1218
1219
1220 if (sk->rqueue == NULL) skb = NULL;
1221 else skb =(struct sk_buff *)sk->rqueue->next;
1222
1223 }
1224
1225
1226
1227
1228
1229 offset = sk->copied_seq+1 - skb->h.th->seq;
1230
1231 if (skb->h.th->syn) offset--;
1232 if (offset < skb->len) {
1233
1234
1235
1236
1237 if (skb->h.th->urg) {
1238 if (skb->urg_used) {
1239 sk->copied_seq += ntohs(skb->h.th->urg_ptr);
1240 offset += ntohs(skb->h.th->urg_ptr);
1241 if (offset >= skb->len) {
1242 skb->used = 1;
1243 skb =(struct sk_buff *)skb->next;
1244 continue;
1245 }
1246 } else {
1247 release_sock(sk);
1248 if (copied) return(copied);
1249 send_sig(SIGURG, current, 0);
1250 return(-EINTR);
1251 }
1252 }
1253 used = min(skb->len - offset, len);
1254 verify_area(VERIFY_WRITE, to, used);
1255 memcpy_tofs(to,((unsigned char *)skb->h.th) +
1256 skb->h.th->doff*4 + offset, used);
1257 copied += used;
1258 len -= used;
1259 to += used;
1260 if (!(flags & MSG_PEEK)) sk->copied_seq += used;
1261
1262
1263
1264
1265
1266
1267 if (!(flags & MSG_PEEK) &&
1268 (!skb->h.th->urg || skb->urg_used) &&
1269 (used + offset >= skb->len)) skb->used = 1;
1270
1271
1272
1273
1274
1275 if (skb->h.th->psh || skb->h.th->urg) {
1276 break;
1277 }
1278 } else {
1279 skb->used = 1;
1280 }
1281 skb =(struct sk_buff *)skb->next;
1282 }
1283 cleanup_rbuf(sk);
1284 release_sock(sk);
1285 DPRINTF((DBG_TCP, "tcp_read: returing %d\n", copied));
1286 if (copied == 0 && nonblock) return(-EAGAIN);
1287 return(copied);
1288 }
1289
1290
1291
1292
1293
1294
1295 void
1296 tcp_shutdown(struct sock *sk, int how)
1297 {
1298 struct sk_buff *buff;
1299 struct tcphdr *t1, *th;
1300 struct proto *prot;
1301 int tmp;
1302 struct device *dev = NULL;
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312 if (sk->state == TCP_FIN_WAIT1 || sk->state == TCP_FIN_WAIT2) return;
1313 if (!(how & SEND_SHUTDOWN)) return;
1314 sk->inuse = 1;
1315
1316
1317 if (sk->send_tmp) tcp_send_partial(sk);
1318
1319 prot =(struct proto *)sk->prot;
1320 th =(struct tcphdr *)&sk->dummy_th;
1321 release_sock(sk);
1322 buff = (struct sk_buff *) prot->wmalloc(sk, MAX_RESET_SIZE,1 , GFP_KERNEL);
1323 if (buff == NULL) return;
1324 sk->inuse = 1;
1325
1326 DPRINTF((DBG_TCP, "tcp_shutdown_send buff = %X\n", buff));
1327 buff->mem_addr = buff;
1328 buff->mem_len = MAX_RESET_SIZE;
1329 buff->lock = 0;
1330 buff->sk = sk;
1331 buff->len = sizeof(*t1);
1332 t1 =(struct tcphdr *)(buff + 1);
1333
1334
1335 tmp = prot->build_header(buff,sk->saddr, sk->daddr, &dev,
1336 IPPROTO_TCP, sk->opt,
1337 sizeof(struct tcphdr));
1338 if (tmp < 0) {
1339 prot->wfree(sk,buff->mem_addr, buff->mem_len);
1340 release_sock(sk);
1341 DPRINTF((DBG_TCP, "Unable to build header for fin.\n"));
1342 return;
1343 }
1344
1345 t1 =(struct tcphdr *)((char *)t1 +tmp);
1346 buff ->len += tmp;
1347 buff->dev = dev;
1348 memcpy(t1, th, sizeof(*t1));
1349 t1->seq = ntohl(sk->send_seq);
1350 sk->send_seq++;
1351 buff->h.seq = sk->send_seq;
1352 t1->ack = 1;
1353 t1->ack_seq = ntohl(sk->acked_seq);
1354 t1->window = ntohs(sk->prot->rspace(sk));
1355 t1->fin = 1;
1356 t1->rst = 0;
1357 t1->doff = sizeof(*t1)/4;
1358 tcp_send_check(t1, sk->saddr, sk->daddr, sizeof(*t1), sk);
1359
1360
1361
1362
1363
1364 if (sk->wback != NULL) {
1365 buff->next = NULL;
1366 sk->wback->next = buff;
1367 sk->wback = buff;
1368 buff->magic = TCP_WRITE_QUEUE_MAGIC;
1369 } else {
1370 sk->prot->queue_xmit(sk, dev, buff, 0);
1371 }
1372
1373 if (sk->state == TCP_ESTABLISHED) sk->state = TCP_FIN_WAIT1;
1374 else sk->state = TCP_FIN_WAIT2;
1375
1376 release_sock(sk);
1377 }
1378
1379
1380 static int
1381 tcp_recvfrom(struct sock *sk, unsigned char *to,
1382 int to_len, int nonblock, unsigned flags,
1383 struct sockaddr_in *addr, int *addr_len)
1384 {
1385 struct sockaddr_in sin;
1386 int len;
1387 int result = tcp_read(sk, to, to_len, nonblock, flags);
1388
1389 if (result < 0) return(result);
1390 len = get_fs_long(addr_len);
1391 if (len > sizeof(sin)) len = sizeof(sin);
1392 sin.sin_family = AF_INET;
1393 sin.sin_port = sk->dummy_th.dest;
1394 sin.sin_addr.s_addr = sk->daddr;
1395 verify_area(VERIFY_WRITE, addr, len);
1396 memcpy_tofs(addr, &sin, len);
1397 verify_area(VERIFY_WRITE, addr_len, sizeof(len));
1398 put_fs_long(len, addr_len);
1399 return(result);
1400 }
1401
1402
1403
1404 static void
1405 tcp_reset(unsigned long saddr, unsigned long daddr, struct tcphdr *th,
1406 struct proto *prot, struct options *opt, struct device *dev)
1407 {
1408 struct sk_buff *buff;
1409 struct tcphdr *t1;
1410 int tmp;
1411
1412
1413
1414
1415
1416 buff = (struct sk_buff *) prot->wmalloc(NULL, MAX_RESET_SIZE, 1, GFP_ATOMIC);
1417 if (buff == NULL) return;
1418
1419 DPRINTF((DBG_TCP, "tcp_reset buff = %X\n", buff));
1420 buff->mem_addr = buff;
1421 buff->mem_len = MAX_RESET_SIZE;
1422 buff->lock = 0;
1423 buff->len = sizeof(*t1);
1424 buff->sk = NULL;
1425 buff->dev = dev;
1426
1427 t1 =(struct tcphdr *)(buff + 1);
1428
1429
1430 tmp = prot->build_header(buff, saddr, daddr, &dev, IPPROTO_TCP, opt,
1431 sizeof(struct tcphdr));
1432 if (tmp < 0) {
1433 prot->wfree(NULL, buff->mem_addr, buff->mem_len);
1434 return;
1435 }
1436 t1 =(struct tcphdr *)((char *)t1 +tmp);
1437 buff->len += tmp;
1438 memcpy(t1, th, sizeof(*t1));
1439
1440
1441 t1->dest = th->source;
1442 t1->source = th->dest;
1443 t1->seq = th->ack_seq;
1444 t1->rst = 1;
1445 t1->window = 0;
1446 t1->ack = 0;
1447 t1->syn = 0;
1448 t1->urg = 0;
1449 t1->fin = 0;
1450 t1->psh = 0;
1451 t1->doff = sizeof(*t1)/4;
1452 tcp_send_check(t1, saddr, daddr, sizeof(*t1), NULL);
1453 prot->queue_xmit(NULL, dev, buff, 1);
1454 }
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464 static void
1465 tcp_conn_request(struct sock *sk, struct sk_buff *skb,
1466 unsigned long daddr, unsigned long saddr,
1467 struct options *opt, struct device *dev)
1468 {
1469 struct sk_buff *buff;
1470 struct tcphdr *t1;
1471 unsigned char *ptr;
1472 struct sock *newsk;
1473 struct tcphdr *th;
1474 int tmp;
1475
1476 DPRINTF((DBG_TCP, "tcp_conn_request(sk = %X, skb = %X, daddr = %X, sadd4= %X, \n"
1477 " opt = %X, dev = %X)\n",
1478 sk, skb, daddr, saddr, opt, dev));
1479
1480 th = skb->h.th;
1481
1482
1483 if (!sk->dead) {
1484 wake_up(sk->sleep);
1485 } else {
1486 DPRINTF((DBG_TCP, "tcp_conn_request on dead socket\n"));
1487 tcp_reset(daddr, saddr, th, sk->prot, opt, dev);
1488 kfree_skb(skb, FREE_READ);
1489 return;
1490 }
1491
1492
1493
1494
1495
1496 if (sk->ack_backlog >= sk->max_ack_backlog) {
1497 kfree_skb(skb, FREE_READ);
1498 return;
1499 }
1500
1501
1502
1503
1504
1505
1506
1507
1508 newsk = (struct sock *) kmalloc(sizeof(struct sock), GFP_ATOMIC);
1509 if (newsk == NULL) {
1510
1511 kfree_skb(skb, FREE_READ);
1512 return;
1513 }
1514
1515 DPRINTF((DBG_TCP, "newsk = %X\n", newsk));
1516 memcpy((void *)newsk,(void *)sk, sizeof(*newsk));
1517 newsk->wback = NULL;
1518 newsk->wfront = NULL;
1519 newsk->rqueue = NULL;
1520 newsk->send_head = NULL;
1521 newsk->send_tail = NULL;
1522 newsk->back_log = NULL;
1523 newsk->rtt = TCP_CONNECT_TIME;
1524 newsk->mdev = 0;
1525 newsk->backoff = 0;
1526 newsk->blog = 0;
1527 newsk->intr = 0;
1528 newsk->proc = 0;
1529 newsk->done = 0;
1530 newsk->send_tmp = NULL;
1531 newsk->pair = NULL;
1532 newsk->wmem_alloc = 0;
1533 newsk->rmem_alloc = 0;
1534
1535 newsk->max_unacked = MAX_WINDOW - TCP_WINDOW_DIFF;
1536
1537 newsk->err = 0;
1538 newsk->shutdown = 0;
1539 newsk->ack_backlog = 0;
1540 newsk->acked_seq = skb->h.th->seq+1;
1541 newsk->fin_seq = skb->h.th->seq;
1542 newsk->copied_seq = skb->h.th->seq;
1543 newsk->state = TCP_SYN_RECV;
1544 newsk->timeout = 0;
1545 newsk->send_seq = timer_seq * SEQ_TICK - seq_offset;
1546 newsk->rcv_ack_seq = newsk->send_seq;
1547 newsk->urg =0;
1548 newsk->retransmits = 0;
1549 newsk->destroy = 0;
1550 newsk->time_wait.sk = newsk;
1551 newsk->time_wait.next = NULL;
1552 newsk->dummy_th.source = skb->h.th->dest;
1553 newsk->dummy_th.dest = skb->h.th->source;
1554
1555
1556 newsk->daddr = saddr;
1557 newsk->saddr = daddr;
1558
1559 put_sock(newsk->num,newsk);
1560 newsk->dummy_th.res1 = 0;
1561 newsk->dummy_th.doff = 6;
1562 newsk->dummy_th.fin = 0;
1563 newsk->dummy_th.syn = 0;
1564 newsk->dummy_th.rst = 0;
1565 newsk->dummy_th.psh = 0;
1566 newsk->dummy_th.ack = 0;
1567 newsk->dummy_th.urg = 0;
1568 newsk->dummy_th.res2 = 0;
1569 newsk->acked_seq = skb->h.th->seq + 1;
1570 newsk->copied_seq = skb->h.th->seq;
1571
1572 if (skb->h.th->doff == 5) {
1573 newsk->mtu = dev->mtu - HEADER_SIZE;
1574 } else {
1575 ptr =(unsigned char *)(skb->h.th + 1);
1576 if (ptr[0] != 2 || ptr[1] != 4) {
1577 newsk->mtu = dev->mtu - HEADER_SIZE;
1578 } else {
1579 newsk->mtu = min(ptr[2] * 256 + ptr[3] - HEADER_SIZE,
1580 dev->mtu - HEADER_SIZE);
1581 }
1582 }
1583
1584 buff = (struct sk_buff *) newsk->prot->wmalloc(newsk, MAX_SYN_SIZE, 1, GFP_ATOMIC);
1585 if (buff == NULL) {
1586 sk->err = -ENOMEM;
1587 newsk->dead = 1;
1588 release_sock(newsk);
1589 kfree_skb(skb, FREE_READ);
1590 return;
1591 }
1592
1593 buff->lock = 0;
1594 buff->mem_addr = buff;
1595 buff->mem_len = MAX_SYN_SIZE;
1596 buff->len = sizeof(struct tcphdr)+4;
1597 buff->sk = newsk;
1598
1599 t1 =(struct tcphdr *)(buff + 1);
1600
1601
1602 tmp = sk->prot->build_header(buff, newsk->saddr, newsk->daddr, &dev,
1603 IPPROTO_TCP, NULL, MAX_SYN_SIZE);
1604
1605
1606 if (tmp < 0) {
1607 sk->err = tmp;
1608 sk->prot->wfree(newsk, buff->mem_addr, buff->mem_len);
1609 newsk->dead = 1;
1610 release_sock(newsk);
1611 skb->sk = sk;
1612 kfree_skb(skb, FREE_READ);
1613 return;
1614 }
1615
1616 buff->len += tmp;
1617 t1 =(struct tcphdr *)((char *)t1 +tmp);
1618
1619 memcpy(t1, skb->h.th, sizeof(*t1));
1620 buff->h.seq = newsk->send_seq;
1621
1622
1623 t1->dest = skb->h.th->source;
1624 t1->source = newsk->dummy_th.source;
1625 t1->seq = ntohl(newsk->send_seq++);
1626 t1->ack = 1;
1627 newsk->window = newsk->prot->rspace(newsk);
1628 t1->window = ntohs(newsk->window);
1629 t1->res1 = 0;
1630 t1->res2 = 0;
1631 t1->rst = 0;
1632 t1->urg = 0;
1633 t1->psh = 0;
1634 t1->syn = 1;
1635 t1->ack_seq = ntohl(skb->h.th->seq+1);
1636 t1->doff = sizeof(*t1)/4+1;
1637
1638 ptr =(unsigned char *)(t1+1);
1639 ptr[0] = 2;
1640 ptr[1] = 4;
1641 ptr[2] =((dev->mtu - HEADER_SIZE) >> 8) & 0xff;
1642 ptr[3] =(dev->mtu - HEADER_SIZE) & 0xff;
1643
1644 tcp_send_check(t1, daddr, saddr, sizeof(*t1)+4, newsk);
1645 newsk->prot->queue_xmit(newsk, dev, buff, 0);
1646
1647 newsk->time_wait.len = TCP_CONNECT_TIME;
1648 DPRINTF((DBG_TCP, "newsk->time_wait.sk = %X\n", newsk->time_wait.sk));
1649 reset_timer((struct timer *)&newsk->time_wait);
1650 skb->sk = newsk;
1651
1652
1653 sk->rmem_alloc -= skb->mem_len;
1654 newsk->rmem_alloc += skb->mem_len;
1655
1656 if (sk->rqueue == NULL) {
1657 skb->next = skb;
1658 skb->prev = skb;
1659 sk->rqueue = skb;
1660 } else {
1661 skb->next = sk->rqueue;
1662 skb->prev = sk->rqueue->prev;
1663 sk->rqueue->prev = skb;
1664 skb->prev->next = skb;
1665 }
1666 sk->ack_backlog++;
1667 release_sock(newsk);
1668 }
1669
1670
1671 static void
1672 tcp_close(struct sock *sk, int timeout)
1673 {
1674 struct sk_buff *buff;
1675 int need_reset = 0;
1676 struct tcphdr *t1, *th;
1677 struct proto *prot;
1678 struct device *dev=NULL;
1679 int tmp;
1680
1681
1682
1683
1684
1685 DPRINTF((DBG_TCP, "tcp_close((struct sock *)%X, %d)\n",sk, timeout));
1686 sk->inuse = 1;
1687 sk->keepopen = 1;
1688 sk->shutdown = SHUTDOWN_MASK;
1689
1690 if (!sk->dead) wake_up(sk->sleep);
1691
1692
1693 if (sk->rqueue != NULL) {
1694 struct sk_buff *skb;
1695 struct sk_buff *skb2;
1696
1697 skb = sk->rqueue;
1698 do {
1699 skb2 =(struct sk_buff *)skb->next;
1700
1701 if (skb->len > 0 &&
1702 after(skb->h.th->seq + skb->len + 1, sk->copied_seq))
1703 need_reset = 1;
1704 kfree_skb(skb, FREE_READ);
1705 skb = skb2;
1706 } while(skb != sk->rqueue);
1707 }
1708 sk->rqueue = NULL;
1709
1710
1711 if (sk->send_tmp) {
1712 tcp_send_partial(sk);
1713 }
1714
1715 switch(sk->state) {
1716 case TCP_FIN_WAIT1:
1717 case TCP_FIN_WAIT2:
1718 case TCP_LAST_ACK:
1719
1720 sk->time_wait.len = 4*sk->rtt;;
1721 sk->timeout = TIME_CLOSE;
1722 reset_timer((struct timer *)&sk->time_wait);
1723 if (timeout) tcp_time_wait(sk);
1724 release_sock(sk);
1725 break;
1726 case TCP_TIME_WAIT:
1727 if (timeout) {
1728 sk->state = TCP_CLOSE;
1729 }
1730 release_sock(sk);
1731 return;
1732 case TCP_LISTEN:
1733 sk->state = TCP_CLOSE;
1734 release_sock(sk);
1735 return;
1736 case TCP_CLOSE:
1737 release_sock(sk);
1738 return;
1739 case TCP_CLOSE_WAIT:
1740 case TCP_ESTABLISHED:
1741 case TCP_SYN_SENT:
1742 case TCP_SYN_RECV:
1743 prot =(struct proto *)sk->prot;
1744 th =(struct tcphdr *)&sk->dummy_th;
1745 buff = (struct sk_buff *) prot->wmalloc(sk, MAX_FIN_SIZE, 1, GFP_ATOMIC);
1746 if (buff == NULL) {
1747
1748 if (sk->state != TCP_CLOSE_WAIT)
1749 sk->state = TCP_ESTABLISHED;
1750 sk->timeout = TIME_CLOSE;
1751 sk->time_wait.len = 100;
1752 reset_timer((struct timer *)&sk->time_wait);
1753 return;
1754 }
1755 buff->lock = 0;
1756 buff->mem_addr = buff;
1757 buff->mem_len = MAX_FIN_SIZE;
1758 buff->sk = sk;
1759 buff->len = sizeof(*t1);
1760 t1 =(struct tcphdr *)(buff + 1);
1761
1762
1763 tmp = prot->build_header(buff,sk->saddr, sk->daddr, &dev,
1764 IPPROTO_TCP, sk->opt,
1765 sizeof(struct tcphdr));
1766 if (tmp < 0) {
1767 prot->wfree(sk,buff->mem_addr, buff->mem_len);
1768 DPRINTF((DBG_TCP, "Unable to build header for fin.\n"));
1769 release_sock(sk);
1770 return;
1771 }
1772
1773 t1 =(struct tcphdr *)((char *)t1 +tmp);
1774 buff ->len += tmp;
1775 buff->dev = dev;
1776 memcpy(t1, th, sizeof(*t1));
1777 t1->seq = ntohl(sk->send_seq);
1778 sk->send_seq++;
1779 buff->h.seq = sk->send_seq;
1780 t1->ack = 1;
1781
1782
1783 sk->delay_acks = 0;
1784 t1->ack_seq = ntohl(sk->acked_seq);
1785 t1->window = ntohs(sk->prot->rspace(sk));
1786 t1->fin = 1;
1787 t1->rst = need_reset;
1788 t1->doff = sizeof(*t1)/4;
1789 tcp_send_check(t1, sk->saddr, sk->daddr, sizeof(*t1), sk);
1790
1791 if (sk->wfront == NULL) {
1792 prot->queue_xmit(sk, dev, buff, 0);
1793 } else {
1794 sk->time_wait.len = backoff(sk->backoff) *
1795 (2 * sk->mdev + sk->rtt);
1796 sk->timeout = TIME_WRITE;
1797 reset_timer((struct timer *)&sk->time_wait);
1798 buff->next = NULL;
1799 if (sk->wback == NULL) {
1800 sk->wfront=buff;
1801 } else {
1802 sk->wback->next = buff;
1803 }
1804 sk->wback = buff;
1805 buff->magic = TCP_WRITE_QUEUE_MAGIC;
1806 }
1807
1808 if (sk->state == TCP_CLOSE_WAIT) {
1809 sk->state = TCP_FIN_WAIT2;
1810 } else {
1811 sk->state = TCP_FIN_WAIT1;
1812 }
1813 }
1814 release_sock(sk);
1815 }
1816
1817
1818
1819
1820
1821
1822 static void
1823 tcp_write_xmit(struct sock *sk)
1824 {
1825 struct sk_buff *skb;
1826
1827 DPRINTF((DBG_TCP, "tcp_write_xmit(sk=%X)\n", sk));
1828 while(sk->wfront != NULL &&
1829 before(sk->wfront->h.seq, sk->window_seq) &&
1830 sk->packets_out < sk->cong_window) {
1831 skb = sk->wfront;
1832 sk->wfront =(struct sk_buff *)skb->next;
1833 if (sk->wfront == NULL) sk->wback = NULL;
1834 skb->next = NULL;
1835 if (skb->magic != TCP_WRITE_QUEUE_MAGIC) {
1836 DPRINTF((DBG_TCP, "tcp.c skb with bad magic(%X) on write queue. Squashing "
1837 "queue\n", skb->magic));
1838 sk->wfront = NULL;
1839 sk->wback = NULL;
1840 return;
1841 }
1842 skb->magic = 0;
1843 DPRINTF((DBG_TCP, "Sending a packet.\n"));
1844
1845
1846 if (before(skb->h.seq, sk->rcv_ack_seq +1)) {
1847 sk->retransmits = 0;
1848 kfree_skb(skb, FREE_WRITE);
1849 if (!sk->dead) wake_up(sk->sleep);
1850 } else {
1851 sk->prot->queue_xmit(sk, skb->dev, skb, skb->free);
1852 }
1853 }
1854 }
1855
1856
1857
1858
1859
1860
1861 void
1862 sort_send(struct sock *sk)
1863 {
1864 struct sk_buff *list = NULL;
1865 struct sk_buff *skb,*skb2,*skb3;
1866
1867 for (skb = sk->send_head; skb != NULL; skb = skb2) {
1868 skb2 = (struct sk_buff *)skb->link3;
1869 if (list == NULL || before (skb2->h.seq, list->h.seq)) {
1870 skb->link3 = list;
1871 sk->send_tail = skb;
1872 list = skb;
1873 } else {
1874 for (skb3 = list; ; skb3 = (struct sk_buff *)skb3->link3) {
1875 if (skb3->link3 == NULL ||
1876 before(skb->h.seq, skb3->link3->h.seq)) {
1877 skb->link3 = skb3->link3;
1878 skb3->link3 = skb;
1879 if (skb->link3 == NULL) sk->send_tail = skb;
1880 break;
1881 }
1882 }
1883 }
1884 }
1885 sk->send_head = list;
1886 }
1887
1888
1889
1890 static int
1891 tcp_ack(struct sock *sk, struct tcphdr *th, unsigned long saddr, int len)
1892 {
1893 unsigned long ack;
1894 int flag = 0;
1895
1896 ack = ntohl(th->ack_seq);
1897 DPRINTF((DBG_TCP, "tcp_ack ack=%d, window=%d, "
1898 "sk->rcv_ack_seq=%d, sk->window_seq = %d\n",
1899 ack, ntohs(th->window), sk->rcv_ack_seq, sk->window_seq));
1900
1901 if (after(ack, sk->send_seq+1) || before(ack, sk->rcv_ack_seq-1)) {
1902 if (after(ack, sk->send_seq) ||
1903 (sk->state != TCP_ESTABLISHED && sk->state != TCP_CLOSE_WAIT)) {
1904 return(0);
1905 }
1906 if (sk->keepopen) {
1907 sk->time_wait.len = TCP_TIMEOUT_LEN;
1908 sk->timeout = TIME_KEEPOPEN;
1909 reset_timer((struct timer *)&sk->time_wait);
1910 }
1911 return(1);
1912 }
1913
1914 if (len != th->doff*4) flag |= 1;
1915
1916
1917 if (after(sk->window_seq, ack+ntohs(th->window))) {
1918
1919
1920
1921
1922
1923
1924
1925 struct sk_buff *skb;
1926 struct sk_buff *skb2;
1927 struct sk_buff *wskb = NULL;
1928
1929 skb2 = sk->send_head;
1930 sk->send_head = NULL;
1931 sk->send_tail = NULL;
1932
1933 flag |= 4;
1934
1935 sk->window_seq = ack + ntohs(th->window);
1936 cli();
1937 while (skb2 != NULL) {
1938 skb = skb2;
1939 skb2 = (struct sk_buff *)skb->link3;
1940 skb->link3 = NULL;
1941 if (after(skb->h.seq, sk->window_seq)) {
1942 if (sk->packets_out > 0) sk->packets_out--;
1943
1944
1945 if (skb->next != NULL) {
1946 int i;
1947
1948 if (skb->next != skb) {
1949 skb->next->prev = skb->prev;
1950 skb->prev->next = skb->next;
1951 }
1952
1953 for(i = 0; i < DEV_NUMBUFFS; i++) {
1954 if (skb->dev->buffs[i] == skb) {
1955 if (skb->next == skb)
1956 skb->dev->buffs[i] = NULL;
1957 else
1958 skb->dev->buffs[i] = skb->next;
1959 break;
1960 }
1961 }
1962 if (arp_q == skb) {
1963 if (skb->next == skb) arp_q = NULL;
1964 else arp_q = skb->next;
1965 }
1966 }
1967
1968
1969 skb->magic = TCP_WRITE_QUEUE_MAGIC;
1970 if (wskb == NULL) {
1971 skb->next = sk->wfront;
1972 sk->wfront = skb;
1973 } else {
1974 skb->next = wskb->next;
1975 wskb->next = skb;
1976 }
1977 if (sk->wback == wskb) sk->wback = skb;
1978 wskb = skb;
1979 } else {
1980 if (sk->send_head == NULL) {
1981 sk->send_head = skb;
1982 sk->send_tail = skb;
1983 } else {
1984 sk->send_tail->link3 = skb;
1985 sk->send_tail = skb;
1986 }
1987 skb->link3 = NULL;
1988 }
1989 }
1990 sti();
1991 }
1992
1993 if (sk->send_tail == NULL || sk->send_head == NULL) {
1994 sk->send_head = NULL;
1995 sk->send_tail = NULL;
1996 sk->packets_out= 0;
1997 }
1998
1999 sk->window_seq = ack + ntohs(th->window);
2000
2001
2002 if (sk->cong_window < 2048 && ack != sk->rcv_ack_seq) {
2003 if (sk->exp_growth) sk->cong_window *= 2;
2004 else sk->cong_window++;
2005 }
2006
2007 DPRINTF((DBG_TCP, "tcp_ack: Updating rcv ack sequence.\n"));
2008 sk->rcv_ack_seq = ack;
2009
2010
2011 while(sk->send_head != NULL) {
2012
2013 if (sk->send_head->link3 &&
2014 after(sk->send_head->h.seq, sk->send_head->link3->h.seq)) {
2015 printk("INET: tcp.c: *** bug send_list out of order.\n");
2016 sort_send(sk);
2017 }
2018
2019 if (before(sk->send_head->h.seq, ack+1)) {
2020 struct sk_buff *oskb;
2021
2022 sk->retransmits = 0;
2023
2024
2025 if (sk->packets_out > 0) sk->packets_out --;
2026 DPRINTF((DBG_TCP, "skb=%X skb->h.seq = %d acked ack=%d\n",
2027 sk->send_head, sk->send_head->h.seq, ack));
2028
2029
2030 if (!sk->dead) wake_up(sk->sleep);
2031
2032 oskb = sk->send_head;
2033
2034
2035 if (sk->retransmits == 0 && !(flag&2)) {
2036 long abserr, rtt = jiffies - oskb->when;
2037
2038 if (sk->state == TCP_SYN_SENT || sk->state == TCP_SYN_RECV)
2039
2040 sk->rtt = rtt;
2041 else {
2042 abserr = (rtt > sk->rtt) ? rtt - sk->rtt : sk->rtt - rtt;
2043 sk->rtt = (7 * sk->rtt + rtt) >> 3;
2044 sk->mdev = (3 * sk->mdev + abserr) >> 2;
2045 }
2046 sk->backoff = 0;
2047 }
2048 flag |= (2|4);
2049
2050
2051 if (sk->rtt < 10) sk->rtt = 10;
2052 if (sk->rtt > 12000) sk->rtt = 12000;
2053
2054 cli();
2055
2056 oskb = sk->send_head;
2057 sk->send_head =(struct sk_buff *)oskb->link3;
2058 if (sk->send_head == NULL) {
2059 sk->send_tail = NULL;
2060 }
2061
2062
2063 if (oskb->next != NULL) {
2064 int i;
2065
2066 if (oskb->next != oskb) {
2067 oskb->next->prev = oskb->prev;
2068 oskb->prev->next = oskb->next;
2069 }
2070 for(i = 0; i < DEV_NUMBUFFS; i++) {
2071 if (oskb->dev->buffs[i] == oskb) {
2072 if (oskb== oskb->next)
2073 oskb->dev->buffs[i]= NULL;
2074 else
2075 oskb->dev->buffs[i] = oskb->next;
2076 break;
2077 }
2078 }
2079 if (arp_q == oskb) {
2080 if (oskb == oskb->next) arp_q = NULL;
2081 else arp_q =(struct sk_buff *)oskb->next;
2082 }
2083 }
2084 sti();
2085 oskb->magic = 0;
2086 kfree_skb(oskb, FREE_WRITE);
2087 if (!sk->dead) wake_up(sk->sleep);
2088 } else {
2089 break;
2090 }
2091 }
2092
2093
2094
2095
2096
2097 if (sk->wfront != NULL) {
2098 if (after (sk->window_seq, sk->wfront->h.seq) &&
2099 sk->packets_out < sk->cong_window) {
2100 flag |= 1;
2101 tcp_write_xmit(sk);
2102 }
2103 } else {
2104 if (sk->send_head == NULL && sk->ack_backlog == 0 &&
2105 sk->state != TCP_TIME_WAIT && !sk->keepopen) {
2106 DPRINTF((DBG_TCP, "Nothing to do, going to sleep.\n"));
2107 if (!sk->dead) wake_up(sk->sleep);
2108
2109 delete_timer((struct timer *)&sk->time_wait);
2110 sk->timeout = 0;
2111 } else {
2112 if (sk->state != (unsigned char) sk->keepopen) {
2113 sk->timeout = TIME_WRITE;
2114 sk->time_wait.len = backoff(sk->backoff) *
2115 (2 * sk->mdev + sk->rtt);
2116 reset_timer((struct timer *)&sk->time_wait);
2117 }
2118 if (sk->state == TCP_TIME_WAIT) {
2119 sk->time_wait.len = TCP_TIMEWAIT_LEN;
2120 reset_timer((struct timer *)&sk->time_wait);
2121 sk->timeout = TIME_CLOSE;
2122 }
2123 }
2124 }
2125
2126 if (sk->packets_out == 0 && sk->send_tmp != NULL &&
2127 sk->wfront == NULL && sk->send_head == NULL) {
2128 flag |= 1;
2129 tcp_send_partial(sk);
2130 }
2131
2132
2133 if (sk->state == TCP_TIME_WAIT) {
2134 if (!sk->dead) wake_up(sk->sleep);
2135 if (sk->rcv_ack_seq == sk->send_seq && sk->acked_seq == sk->fin_seq) {
2136 flag |= 1;
2137 sk->state = TCP_CLOSE;
2138 sk->shutdown = SHUTDOWN_MASK;
2139 }
2140 }
2141
2142 if (sk->state == TCP_LAST_ACK || sk->state == TCP_FIN_WAIT2) {
2143 if (!sk->dead) wake_up(sk->sleep);
2144 if (sk->rcv_ack_seq == sk->send_seq) {
2145 flag |= 1;
2146 if (sk->acked_seq != sk->fin_seq) {
2147 tcp_time_wait(sk);
2148 } else {
2149 DPRINTF((DBG_TCP, "tcp_ack closing socket - %X\n", sk));
2150 tcp_send_ack(sk->send_seq, sk->acked_seq, sk,
2151 th, sk->daddr);
2152 sk->shutdown = SHUTDOWN_MASK;
2153 sk->state = TCP_CLOSE;
2154 }
2155 }
2156 }
2157
2158 if (((!flag) || (flag&4)) && sk->send_head != NULL &&
2159 (sk->send_head->when + backoff(sk->backoff) * (2 * sk->mdev + sk->rtt)
2160 < jiffies)) {
2161 sk->exp_growth = 0;
2162 ip_retransmit(sk, 0);
2163 }
2164
2165 DPRINTF((DBG_TCP, "leaving tcp_ack\n"));
2166 return(1);
2167 }
2168
2169
2170
2171
2172
2173
2174
2175 static int
2176 tcp_data(struct sk_buff *skb, struct sock *sk,
2177 unsigned long saddr, unsigned short len)
2178 {
2179 struct sk_buff *skb1, *skb2;
2180 struct tcphdr *th;
2181
2182 th = skb->h.th;
2183 print_th(th);
2184 skb->len = len -(th->doff*4);
2185
2186 DPRINTF((DBG_TCP, "tcp_data len = %d sk = %X:\n", skb->len, sk));
2187
2188 sk->bytes_rcv += skb->len;
2189 if (skb->len == 0 && !th->fin && !th->urg && !th->psh) {
2190
2191 if (!th->ack) tcp_send_ack(sk->send_seq, sk->acked_seq,sk, th, saddr);
2192 kfree_skb(skb, FREE_READ);
2193 return(0);
2194 }
2195
2196 if (sk->shutdown & RCV_SHUTDOWN) {
2197 sk->acked_seq = th->seq + skb->len + th->syn + th->fin;
2198 tcp_reset(sk->saddr, sk->daddr, skb->h.th,
2199 sk->prot, NULL, skb->dev);
2200 sk->state = TCP_CLOSE;
2201 sk->err = EPIPE;
2202 sk->shutdown = SHUTDOWN_MASK;
2203 DPRINTF((DBG_TCP, "tcp_data: closing socket - %X\n", sk));
2204 kfree_skb(skb, FREE_READ);
2205 if (!sk->dead) wake_up(sk->sleep);
2206 return(0);
2207 }
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218 if (sk->rqueue == NULL) {
2219 DPRINTF((DBG_TCP, "tcp_data: skb = %X:\n", skb));
2220
2221 sk->rqueue = skb;
2222 skb->next = skb;
2223 skb->prev = skb;
2224 skb1= NULL;
2225 } else {
2226 DPRINTF((DBG_TCP, "tcp_data adding to chain sk = %X:\n", sk));
2227
2228 for(skb1=sk->rqueue; ; skb1 =(struct sk_buff *)skb1->prev) {
2229 DPRINTF((DBG_TCP, "skb1=%X\n", skb1));
2230 DPRINTF((DBG_TCP, "skb1->h.th->seq = %d\n", skb1->h.th->seq));
2231 if (after(th->seq+1, skb1->h.th->seq)) {
2232 skb->prev = skb1;
2233 skb->next = skb1->next;
2234 skb->next->prev = skb;
2235 skb1->next = skb;
2236 if (skb1 == sk->rqueue) sk->rqueue = skb;
2237 break;
2238 }
2239 if (skb1->prev == sk->rqueue) {
2240 skb->next= skb1;
2241 skb->prev = skb1->prev;
2242 skb->prev->next = skb;
2243 skb1->prev = skb;
2244 skb1 = NULL;
2245
2246 break;
2247 }
2248 }
2249 DPRINTF((DBG_TCP, "skb = %X:\n", skb));
2250 }
2251
2252 th->ack_seq = th->seq + skb->len;
2253 if (th->syn) th->ack_seq++;
2254 if (th->fin) th->ack_seq++;
2255
2256 if (before(sk->acked_seq, sk->copied_seq)) {
2257 printk("*** tcp.c:tcp_data bug acked < copied\n");
2258 sk->acked_seq = sk->copied_seq;
2259 }
2260
2261
2262 if (skb1 == NULL || skb1->acked || before(th->seq, sk->acked_seq+1)) {
2263 if (before(th->seq, sk->acked_seq+1)) {
2264 if (after(th->ack_seq, sk->acked_seq))
2265 sk->acked_seq = th->ack_seq;
2266 skb->acked = 1;
2267
2268
2269 if (skb->h.th->fin) {
2270 if (!sk->dead) wake_up(sk->sleep);
2271 sk->shutdown |= RCV_SHUTDOWN;
2272 }
2273
2274 for(skb2 = (struct sk_buff *)skb->next;
2275 skb2 !=(struct sk_buff *) sk->rqueue->next;
2276 skb2 = (struct sk_buff *)skb2->next) {
2277 if (before(skb2->h.th->seq, sk->acked_seq+1)) {
2278 if (after(skb2->h.th->ack_seq, sk->acked_seq))
2279 sk->acked_seq = skb2->h.th->ack_seq;
2280 skb2->acked = 1;
2281
2282
2283
2284
2285
2286 if (skb2->h.th->fin) {
2287 sk->shutdown |= RCV_SHUTDOWN;
2288 if (!sk->dead) wake_up(sk->sleep);
2289 }
2290
2291
2292 sk->ack_backlog = sk->max_ack_backlog;
2293 } else {
2294 break;
2295 }
2296 }
2297
2298
2299
2300
2301
2302 if (!sk->delay_acks ||
2303 sk->ack_backlog >= sk->max_ack_backlog ||
2304 sk->bytes_rcv > sk->max_unacked || th->fin) {
2305
2306 } else {
2307 sk->ack_backlog++;
2308 sk->time_wait.len = TCP_ACK_TIME;
2309 sk->timeout = TIME_WRITE;
2310 reset_timer((struct timer *)&sk->time_wait);
2311 }
2312 }
2313 }
2314
2315
2316
2317
2318
2319 if (!skb->acked) {
2320
2321
2322
2323
2324
2325 while (sk->prot->rspace(sk) < sk->mtu) {
2326 skb1 = (struct sk_buff *)sk->rqueue;
2327 if (skb1 == NULL) {
2328 printk("INET: tcp.c:tcp_data memory leak detected.\n");
2329 break;
2330 }
2331
2332
2333 if (skb1->acked) {
2334 break;
2335 }
2336 if (skb1->prev == skb1) {
2337 sk->rqueue = NULL;
2338 } else {
2339 sk->rqueue = (struct sk_buff *)skb1->prev;
2340 skb1->next->prev = skb1->prev;
2341 skb1->prev->next = skb1->next;
2342 }
2343 kfree_skb(skb1, FREE_READ);
2344 }
2345 tcp_send_ack(sk->send_seq, sk->acked_seq, sk, th, saddr);
2346 sk->ack_backlog++;
2347 sk->time_wait.len = TCP_ACK_TIME;
2348 sk->timeout = TIME_WRITE;
2349 reset_timer((struct timer *)&sk->time_wait);
2350 } else {
2351
2352 tcp_send_ack(sk->send_seq, sk->acked_seq, sk, th, saddr);
2353 }
2354
2355
2356 if (!sk->dead) {
2357 wake_up(sk->sleep);
2358 } else {
2359 DPRINTF((DBG_TCP, "data received on dead socket.\n"));
2360 }
2361
2362 if (sk->state == TCP_FIN_WAIT2 &&
2363 sk->acked_seq == sk->fin_seq && sk->rcv_ack_seq == sk->send_seq) {
2364 DPRINTF((DBG_TCP, "tcp_data: entering last_ack state sk = %X\n", sk));
2365
2366
2367 sk->shutdown = SHUTDOWN_MASK;
2368 sk->state = TCP_LAST_ACK;
2369 if (!sk->dead) wake_up(sk->sleep);
2370 }
2371
2372 return(0);
2373 }
2374
2375
2376 static int
2377 tcp_urg(struct sock *sk, struct tcphdr *th, unsigned long saddr)
2378 {
2379 extern int kill_pg(int pg, int sig, int priv);
2380 extern int kill_proc(int pid, int sig, int priv);
2381
2382 if (!sk->dead) wake_up(sk->sleep);
2383
2384 if (sk->urginline) {
2385 th->urg = 0;
2386 th->psh = 1;
2387 return(0);
2388 }
2389
2390 if (!sk->urg) {
2391
2392 if (sk->proc != 0) {
2393 if (sk->proc > 0) {
2394 kill_proc(sk->proc, SIGURG, 1);
2395 } else {
2396 kill_pg(-sk->proc, SIGURG, 1);
2397 }
2398 }
2399 }
2400 sk->urg++;
2401 return(0);
2402 }
2403
2404
2405
2406 static int
2407 tcp_fin(struct sock *sk, struct tcphdr *th,
2408 unsigned long saddr, struct device *dev)
2409 {
2410 DPRINTF((DBG_TCP, "tcp_fin(sk=%X, th=%X, saddr=%X, dev=%X)\n",
2411 sk, th, saddr, dev));
2412
2413 if (!sk->dead) {
2414 wake_up(sk->sleep);
2415 }
2416
2417 switch(sk->state) {
2418 case TCP_SYN_RECV:
2419 case TCP_SYN_SENT:
2420 case TCP_ESTABLISHED:
2421
2422 sk->fin_seq = th->seq+1;
2423 sk->state = TCP_CLOSE_WAIT;
2424 if (th->rst) sk->shutdown = SHUTDOWN_MASK;
2425 break;
2426
2427 case TCP_CLOSE_WAIT:
2428 case TCP_FIN_WAIT2:
2429 break;
2430
2431 case TCP_FIN_WAIT1:
2432
2433 sk->fin_seq = th->seq+1;
2434 sk->state = TCP_FIN_WAIT2;
2435 break;
2436
2437 default:
2438 case TCP_TIME_WAIT:
2439 sk->state = TCP_LAST_ACK;
2440
2441
2442 sk->time_wait.len = TCP_TIMEWAIT_LEN;
2443 sk->timeout = TIME_CLOSE;
2444 reset_timer((struct timer *)&sk->time_wait);
2445 return(0);
2446 }
2447 sk->ack_backlog++;
2448
2449 return(0);
2450 }
2451
2452
2453
2454 static struct sock *
2455 tcp_accept(struct sock *sk, int flags)
2456 {
2457 struct sock *newsk;
2458 struct sk_buff *skb;
2459
2460 DPRINTF((DBG_TCP, "tcp_accept(sk=%X, flags=%X, addr=%s)\n",
2461 sk, flags, in_ntoa(sk->saddr)));
2462
2463
2464
2465
2466
2467 if (sk->state != TCP_LISTEN) {
2468 sk->err = EINVAL;
2469 return(NULL);
2470 }
2471
2472
2473 cli();
2474 sk->inuse = 1;
2475 while((skb = get_firstr(sk)) == NULL) {
2476 if (flags & O_NONBLOCK) {
2477 sti();
2478 release_sock(sk);
2479 sk->err = EAGAIN;
2480 return(NULL);
2481 }
2482
2483 release_sock(sk);
2484 interruptible_sleep_on(sk->sleep);
2485 if (current->signal & ~current->blocked) {
2486 sti();
2487 sk->err = ERESTARTSYS;
2488 return(NULL);
2489 }
2490 sk->inuse = 1;
2491 }
2492 sti();
2493
2494
2495 newsk = skb->sk;
2496
2497 kfree_skb(skb, FREE_READ);
2498 sk->ack_backlog--;
2499 release_sock(sk);
2500 return(newsk);
2501 }
2502
2503
2504
2505 static int
2506 tcp_connect(struct sock *sk, struct sockaddr_in *usin, int addr_len)
2507 {
2508 struct sk_buff *buff;
2509 struct sockaddr_in sin;
2510 struct device *dev=NULL;
2511 unsigned char *ptr;
2512 int tmp;
2513 struct tcphdr *t1;
2514
2515 if (sk->state != TCP_CLOSE) return(-EISCONN);
2516 if (addr_len < 8) return(-EINVAL);
2517
2518
2519 memcpy_fromfs(&sin,usin, min(sizeof(sin), addr_len));
2520
2521 if (sin.sin_family && sin.sin_family != AF_INET) return(-EAFNOSUPPORT);
2522
2523 DPRINTF((DBG_TCP, "TCP connect daddr=%s\n", in_ntoa(sin.sin_addr.s_addr)));
2524
2525
2526 if (chk_addr(sin.sin_addr.s_addr) == IS_BROADCAST) {
2527 DPRINTF((DBG_TCP, "TCP connection to broadcast address not allowed\n"));
2528 return(-ENETUNREACH);
2529 }
2530 sk->inuse = 1;
2531 sk->daddr = sin.sin_addr.s_addr;
2532 sk->send_seq = timer_seq*SEQ_TICK-seq_offset;
2533 sk->rcv_ack_seq = sk->send_seq -1;
2534 sk->err = 0;
2535 sk->dummy_th.dest = sin.sin_port;
2536 release_sock(sk);
2537
2538 buff = (struct sk_buff *) sk->prot->wmalloc(sk,MAX_SYN_SIZE,0, GFP_KERNEL);
2539 if (buff == NULL) {
2540 return(-ENOMEM);
2541 }
2542 sk->inuse = 1;
2543 buff->lock = 0;
2544 buff->mem_addr = buff;
2545 buff->mem_len = MAX_SYN_SIZE;
2546 buff->len = 24;
2547 buff->sk = sk;
2548 t1 = (struct tcphdr *)(buff + 1);
2549
2550
2551
2552 tmp = sk->prot->build_header(buff, sk->saddr, sk->daddr, &dev,
2553 IPPROTO_TCP, NULL, MAX_SYN_SIZE);
2554 if (tmp < 0) {
2555 sk->prot->wfree(sk, buff->mem_addr, buff->mem_len);
2556 release_sock(sk);
2557 return(-ENETUNREACH);
2558 }
2559 buff->len += tmp;
2560 t1 = (struct tcphdr *)((char *)t1 +tmp);
2561
2562 memcpy(t1,(void *)&(sk->dummy_th), sizeof(*t1));
2563 t1->seq = ntohl(sk->send_seq++);
2564 buff->h.seq = sk->send_seq;
2565 t1->ack = 0;
2566 t1->window = 2;
2567 t1->res1=0;
2568 t1->res2=0;
2569 t1->rst = 0;
2570 t1->urg = 0;
2571 t1->psh = 0;
2572 t1->syn = 1;
2573 t1->urg_ptr = 0;
2574 t1->doff = 6;
2575
2576
2577 ptr = (unsigned char *)(t1+1);
2578 ptr[0] = 2;
2579 ptr[1] = 4;
2580 ptr[2] = (dev->mtu- HEADER_SIZE) >> 8;
2581 ptr[3] = (dev->mtu- HEADER_SIZE) & 0xff;
2582 sk->mtu = dev->mtu - HEADER_SIZE;
2583 tcp_send_check(t1, sk->saddr, sk->daddr,
2584 sizeof(struct tcphdr) + 4, sk);
2585
2586
2587 sk->state = TCP_SYN_SENT;
2588
2589 sk->prot->queue_xmit(sk, dev, buff, 0);
2590
2591 sk->time_wait.len = TCP_CONNECT_TIME;
2592 sk->rtt = TCP_CONNECT_TIME;
2593 reset_timer((struct timer *)&sk->time_wait);
2594 sk->retransmits = TCP_RETR2 - TCP_SYN_RETRIES;
2595 release_sock(sk);
2596 return(0);
2597 }
2598
2599
2600
2601 static int
2602 tcp_sequence(struct sock *sk, struct tcphdr *th, short len,
2603 struct options *opt, unsigned long saddr)
2604 {
2605
2606
2607
2608
2609
2610
2611 DPRINTF((DBG_TCP, "tcp_sequence(sk=%X, th=%X, len = %d, opt=%d, saddr=%X)\n",
2612 sk, th, len, opt, saddr));
2613
2614 if (between(th->seq, sk->acked_seq, sk->acked_seq + sk->window)||
2615 between(th->seq + len-(th->doff*4), sk->acked_seq + 1,
2616 sk->acked_seq + sk->window) ||
2617 (before(th->seq, sk->acked_seq) &&
2618 after(th->seq + len -(th->doff*4), sk->acked_seq + sk->window))) {
2619 return(1);
2620 }
2621 DPRINTF((DBG_TCP, "tcp_sequence: rejecting packet.\n"));
2622
2623
2624
2625
2626
2627 if (after(th->seq, sk->acked_seq + sk->window)) {
2628 tcp_send_ack(sk->send_seq, sk->acked_seq, sk, th, saddr);
2629 return(0);
2630 }
2631
2632
2633 if (th->ack && len == (th->doff * 4) &&
2634 after(th->seq, sk->acked_seq - 32767) &&
2635 !th->fin && !th->syn) return(1);
2636
2637 if (!th->rst) {
2638
2639 tcp_send_ack(sk->send_seq, sk->acked_seq, sk, th, saddr);
2640 }
2641 return(0);
2642 }
2643
2644
2645
2646 static void
2647 tcp_options(struct sock *sk, struct tcphdr *th)
2648 {
2649 unsigned char *ptr;
2650
2651 ptr = (unsigned char *)(th + 1);
2652 if (ptr[0] != 2 || ptr[1] != 4) {
2653 sk->mtu = min(sk->mtu, 576 - HEADER_SIZE);
2654 return;
2655 }
2656 sk->mtu = min(sk->mtu, ptr[2]*256 + ptr[3] - HEADER_SIZE);
2657 }
2658
2659
2660 int
2661 tcp_rcv(struct sk_buff *skb, struct device *dev, struct options *opt,
2662 unsigned long daddr, unsigned short len,
2663 unsigned long saddr, int redo, struct inet_protocol * protocol)
2664 {
2665 struct tcphdr *th;
2666 struct sock *sk;
2667
2668 if (!skb) {
2669 DPRINTF((DBG_TCP, "tcp.c: tcp_rcv skb = NULL\n"));
2670 return(0);
2671 }
2672 #if 0
2673 if (!protocol) {
2674 DPRINTF((DBG_TCP, "tcp.c: tcp_rcv protocol = NULL\n"));
2675 return(0);
2676 }
2677
2678 if (!opt) {
2679 DPRINTF((DBG_TCP, "tcp.c: tcp_rcv opt = NULL\n"));
2680 }
2681 #endif
2682 if (!dev) {
2683 DPRINTF((DBG_TCP, "tcp.c: tcp_rcv dev = NULL\n"));
2684 return(0);
2685 }
2686 th = skb->h.th;
2687
2688
2689 sk = get_sock(&tcp_prot, th->dest, saddr, th->source, daddr);
2690 DPRINTF((DBG_TCP, "<<\n"));
2691 DPRINTF((DBG_TCP, "len = %d, redo = %d, skb=%X\n", len, redo, skb));
2692
2693 if (sk) {
2694 DPRINTF((DBG_TCP, "sk = %X:\n", sk));
2695 }
2696
2697 if (!redo) {
2698 if (th->check && tcp_check(th, len, saddr, daddr )) {
2699 skb->sk = NULL;
2700 DPRINTF((DBG_TCP, "packet dropped with bad checksum.\n"));
2701 if (inet_debug == DBG_SLIP) printk("\rtcp_rcv: back checksum\n");
2702 kfree_skb(skb, 0);
2703
2704
2705
2706
2707 return(0);
2708 }
2709
2710
2711 if (sk == NULL) {
2712 if (!th->rst) tcp_reset(daddr, saddr, th, &tcp_prot, opt,dev);
2713 skb->sk = NULL;
2714 kfree_skb(skb, 0);
2715 return(0);
2716 }
2717
2718 skb->len = len;
2719 skb->sk = sk;
2720 skb->acked = 0;
2721 skb->used = 0;
2722 skb->free = 0;
2723 skb->urg_used = 0;
2724 skb->saddr = daddr;
2725 skb->daddr = saddr;
2726
2727 th->seq = ntohl(th->seq);
2728
2729
2730 cli();
2731 if (sk->inuse) {
2732 if (sk->back_log == NULL) {
2733 sk->back_log = skb;
2734 skb->next = skb;
2735 skb->prev = skb;
2736 } else {
2737 skb->next = sk->back_log;
2738 skb->prev = sk->back_log->prev;
2739 skb->prev->next = skb;
2740 skb->next->prev = skb;
2741 }
2742 sti();
2743 return(0);
2744 }
2745 sk->inuse = 1;
2746 sti();
2747 } else {
2748 if (!sk) {
2749 DPRINTF((DBG_TCP, "tcp.c: tcp_rcv bug sk=NULL redo = 1\n"));
2750 return(0);
2751 }
2752 }
2753
2754 if (!sk->prot) {
2755 DPRINTF((DBG_TCP, "tcp.c: tcp_rcv sk->prot = NULL \n"));
2756 return(0);
2757 }
2758
2759
2760 if (sk->rmem_alloc + skb->mem_len >= SK_RMEM_MAX) {
2761 skb->sk = NULL;
2762 DPRINTF((DBG_TCP, "dropping packet due to lack of buffer space.\n"));
2763 kfree_skb(skb, 0);
2764 release_sock(sk);
2765 return(0);
2766 }
2767 sk->rmem_alloc += skb->mem_len;
2768
2769 DPRINTF((DBG_TCP, "About to do switch.\n"));
2770
2771
2772 switch(sk->state) {
2773
2774
2775
2776
2777 case TCP_LAST_ACK:
2778 if (th->rst) {
2779 sk->err = ECONNRESET;
2780 sk->state = TCP_CLOSE;
2781 sk->shutdown = SHUTDOWN_MASK;
2782 if (!sk->dead) {
2783 wake_up(sk->sleep);
2784 }
2785 kfree_skb(skb, FREE_READ);
2786 release_sock(sk);
2787 return(0);
2788 }
2789
2790 case TCP_ESTABLISHED:
2791 case TCP_CLOSE_WAIT:
2792 case TCP_FIN_WAIT1:
2793 case TCP_FIN_WAIT2:
2794 case TCP_TIME_WAIT:
2795 if (!tcp_sequence(sk, th, len, opt, saddr)) {
2796 if (inet_debug == DBG_SLIP) printk("\rtcp_rcv: not in seq\n");
2797 tcp_send_ack(sk->send_seq, sk->acked_seq,
2798 sk, th, saddr);
2799 kfree_skb(skb, FREE_READ);
2800 release_sock(sk);
2801 return(0);
2802 }
2803
2804 if (th->rst) {
2805
2806 sk->err = ECONNRESET;
2807
2808 if (sk->state == TCP_CLOSE_WAIT) {
2809 sk->err = EPIPE;
2810 }
2811
2812
2813
2814
2815
2816
2817
2818 sk->state = TCP_CLOSE;
2819 sk->shutdown = SHUTDOWN_MASK;
2820 if (!sk->dead) {
2821 wake_up(sk->sleep);
2822 }
2823 kfree_skb(skb, FREE_READ);
2824 release_sock(sk);
2825 return(0);
2826
2827 }
2828 #if 0
2829 if (opt && (opt->security != 0 ||
2830 opt->compartment != 0 || th->syn)) {
2831 sk->err = ECONNRESET;
2832 sk->state = TCP_CLOSE;
2833 sk->shutdown = SHUTDOWN_MASK;
2834 tcp_reset(daddr, saddr, th, sk->prot, opt,dev);
2835 if (!sk->dead) {
2836 wake_up(sk->sleep);
2837 }
2838 kfree_skb(skb, FREE_READ);
2839 release_sock(sk);
2840 return(0);
2841 }
2842 #endif
2843 if (th->ack) {
2844 if (!tcp_ack(sk, th, saddr, len)) {
2845 kfree_skb(skb, FREE_READ);
2846 release_sock(sk);
2847 return(0);
2848 }
2849 }
2850 if (th->urg) {
2851 if (tcp_urg(sk, th, saddr)) {
2852 kfree_skb(skb, FREE_READ);
2853 release_sock(sk);
2854 return(0);
2855 }
2856 }
2857
2858 if (th->fin && tcp_fin(sk, th, saddr, dev)) {
2859 kfree_skb(skb, FREE_READ);
2860 release_sock(sk);
2861 return(0);
2862 }
2863
2864 if (tcp_data(skb, sk, saddr, len)) {
2865 kfree_skb(skb, FREE_READ);
2866 release_sock(sk);
2867 return(0);
2868 }
2869
2870 release_sock(sk);
2871 return(0);
2872
2873 case TCP_CLOSE:
2874 if (sk->dead || sk->daddr) {
2875 DPRINTF((DBG_TCP, "packet received for closed,dead socket\n"));
2876 kfree_skb(skb, FREE_READ);
2877 release_sock(sk);
2878 return(0);
2879 }
2880
2881 if (!th->rst) {
2882 if (!th->ack)
2883 th->ack_seq = 0;
2884 tcp_reset(daddr, saddr, th, sk->prot, opt,dev);
2885 }
2886 kfree_skb(skb, FREE_READ);
2887 release_sock(sk);
2888 return(0);
2889
2890 case TCP_LISTEN:
2891 if (th->rst) {
2892 kfree_skb(skb, FREE_READ);
2893 release_sock(sk);
2894 return(0);
2895 }
2896 if (th->ack) {
2897 tcp_reset(daddr, saddr, th, sk->prot, opt,dev);
2898 kfree_skb(skb, FREE_READ);
2899 release_sock(sk);
2900 return(0);
2901 }
2902
2903 if (th->syn) {
2904 #if 0
2905 if (opt->security != 0 || opt->compartment != 0) {
2906 tcp_reset(daddr, saddr, th, prot, opt,dev);
2907 release_sock(sk);
2908 return(0);
2909 }
2910 #endif
2911
2912
2913
2914
2915
2916
2917
2918 tcp_conn_request(sk, skb, daddr, saddr, opt, dev);
2919 release_sock(sk);
2920 return(0);
2921 }
2922
2923 kfree_skb(skb, FREE_READ);
2924 release_sock(sk);
2925 return(0);
2926
2927 default:
2928 if (!tcp_sequence(sk, th, len, opt, saddr)) {
2929 kfree_skb(skb, FREE_READ);
2930 release_sock(sk);
2931 return(0);
2932 }
2933
2934 case TCP_SYN_SENT:
2935 if (th->rst) {
2936 sk->err = ECONNREFUSED;
2937 sk->state = TCP_CLOSE;
2938 sk->shutdown = SHUTDOWN_MASK;
2939 if (!sk->dead) {
2940 wake_up(sk->sleep);
2941 }
2942 kfree_skb(skb, FREE_READ);
2943 release_sock(sk);
2944 return(0);
2945 }
2946 #if 0
2947 if (opt->security != 0 || opt->compartment != 0) {
2948 sk->err = ECONNRESET;
2949 sk->state = TCP_CLOSE;
2950 sk->shutdown = SHUTDOWN_MASK;
2951 tcp_reset(daddr, saddr, th, sk->prot, opt, dev);
2952 if (!sk->dead) {
2953 wake_up(sk->sleep);
2954 }
2955 kfree_skb(skb, FREE_READ);
2956 release_sock(sk);
2957 return(0);
2958 }
2959 #endif
2960 if (!th->ack) {
2961 if (th->syn) {
2962 sk->state = TCP_SYN_RECV;
2963 }
2964
2965 kfree_skb(skb, FREE_READ);
2966 release_sock(sk);
2967 return(0);
2968 }
2969
2970 switch(sk->state) {
2971 case TCP_SYN_SENT:
2972 if (!tcp_ack(sk, th, saddr, len)) {
2973 tcp_reset(daddr, saddr, th,
2974 sk->prot, opt,dev);
2975 kfree_skb(skb, FREE_READ);
2976 release_sock(sk);
2977 return(0);
2978 }
2979
2980
2981
2982
2983
2984 if (!th->syn) {
2985 kfree_skb(skb, FREE_READ);
2986 release_sock(sk);
2987 return(0);
2988 }
2989
2990
2991 sk->acked_seq = th->seq+1;
2992 sk->fin_seq = th->seq;
2993 tcp_send_ack(sk->send_seq, th->seq+1,
2994 sk, th, sk->daddr);
2995
2996 case TCP_SYN_RECV:
2997 if (!tcp_ack(sk, th, saddr, len)) {
2998 tcp_reset(daddr, saddr, th,
2999 sk->prot, opt, dev);
3000 kfree_skb(skb, FREE_READ);
3001 release_sock(sk);
3002 return(0);
3003 }
3004 sk->state = TCP_ESTABLISHED;
3005
3006
3007
3008
3009
3010
3011 tcp_options(sk, th);
3012 sk->dummy_th.dest = th->source;
3013 sk->copied_seq = sk->acked_seq-1;
3014 if (!sk->dead) {
3015 wake_up(sk->sleep);
3016 }
3017
3018
3019
3020
3021
3022 if (th->urg) {
3023 if (tcp_urg(sk, th, saddr)) {
3024 kfree_skb(skb, FREE_READ);
3025 release_sock(sk);
3026 return(0);
3027 }
3028 }
3029 if (tcp_data(skb, sk, saddr, len))
3030 kfree_skb(skb, FREE_READ);
3031
3032 if (th->fin) tcp_fin(sk, th, saddr, dev);
3033 release_sock(sk);
3034 return(0);
3035 }
3036
3037 if (th->urg) {
3038 if (tcp_urg(sk, th, saddr)) {
3039 kfree_skb(skb, FREE_READ);
3040 release_sock(sk);
3041 return(0);
3042 }
3043 }
3044
3045 if (tcp_data(skb, sk, saddr, len)) {
3046 kfree_skb(skb, FREE_READ);
3047 release_sock(sk);
3048 return(0);
3049 }
3050
3051 if (!th->fin) {
3052 release_sock(sk);
3053 return(0);
3054 }
3055 tcp_fin(sk, th, saddr, dev);
3056 release_sock(sk);
3057 return(0);
3058 }
3059 }
3060
3061
3062
3063
3064
3065
3066 static void
3067 tcp_write_wakeup(struct sock *sk)
3068 {
3069 struct sk_buff *buff;
3070 struct tcphdr *t1;
3071 struct device *dev=NULL;
3072 int tmp;
3073
3074 if (sk -> state != TCP_ESTABLISHED && sk->state != TCP_CLOSE_WAIT) return;
3075
3076 buff = (struct sk_buff *) sk->prot->wmalloc(sk,MAX_ACK_SIZE,1, GFP_ATOMIC);
3077 if (buff == NULL) return;
3078
3079 buff->lock = 0;
3080 buff->mem_addr = buff;
3081 buff->mem_len = MAX_ACK_SIZE;
3082 buff->len = sizeof(struct tcphdr);
3083 buff->free = 1;
3084 buff->sk = sk;
3085 DPRINTF((DBG_TCP, "in tcp_write_wakeup\n"));
3086 t1 = (struct tcphdr *)(buff + 1);
3087
3088
3089 tmp = sk->prot->build_header(buff, sk->saddr, sk->daddr, &dev,
3090 IPPROTO_TCP, sk->opt, MAX_ACK_SIZE);
3091 if (tmp < 0) {
3092 sk->prot->wfree(sk, buff->mem_addr, buff->mem_len);
3093 return;
3094 }
3095
3096 buff->len += tmp;
3097 t1 = (struct tcphdr *)((char *)t1 +tmp);
3098
3099 memcpy(t1,(void *) &sk->dummy_th, sizeof(*t1));
3100
3101
3102
3103
3104
3105 t1->seq = ntohl(sk->send_seq-1);
3106 t1->ack = 1;
3107 t1->res1= 0;
3108 t1->res2= 0;
3109 t1->rst = 0;
3110 t1->urg = 0;
3111 t1->psh = 0;
3112 t1->fin = 0;
3113 t1->syn = 0;
3114 t1->ack_seq = ntohl(sk->acked_seq);
3115 t1->window = ntohs(sk->prot->rspace(sk));
3116 t1->doff = sizeof(*t1)/4;
3117 tcp_send_check(t1, sk->saddr, sk->daddr, sizeof(*t1), sk);
3118
3119
3120
3121
3122 sk->prot->queue_xmit(sk, dev, buff, 1);
3123 }
3124
3125
3126 struct proto tcp_prot = {
3127 sock_wmalloc,
3128 sock_rmalloc,
3129 sock_wfree,
3130 sock_rfree,
3131 sock_rspace,
3132 sock_wspace,
3133 tcp_close,
3134 tcp_read,
3135 tcp_write,
3136 tcp_sendto,
3137 tcp_recvfrom,
3138 ip_build_header,
3139 tcp_connect,
3140 tcp_accept,
3141 ip_queue_xmit,
3142 tcp_retransmit,
3143 tcp_write_wakeup,
3144 tcp_read_wakeup,
3145 tcp_rcv,
3146 tcp_select,
3147 tcp_ioctl,
3148 NULL,
3149 tcp_shutdown,
3150 128,
3151 0,
3152 {NULL,},
3153 "TCP"
3154 };