This source file includes following definitions.
- min
- print_th
- get_firstr
- diff
- tcp_time_wait
- tcp_retransmit
- tcp_err
- tcp_readable
- tcp_select
- tcp_ioctl
- tcp_check
- tcp_send_check
- tcp_send_partial
- tcp_send_ack
- tcp_build_header
- tcp_write
- tcp_sendto
- tcp_read_wakeup
- cleanup_rbuf
- tcp_read_urg
- tcp_read
- tcp_shutdown
- tcp_recvfrom
- tcp_reset
- tcp_conn_request
- tcp_close
- tcp_write_xmit
- sort_send
- tcp_ack
- tcp_data
- tcp_urg
- tcp_fin
- tcp_accept
- tcp_connect
- tcp_sequence
- tcp_options
- tcp_rcv
- tcp_write_wakeup
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20 #include <linux/types.h>
21 #include <linux/sched.h>
22 #include <linux/mm.h>
23 #include <linux/string.h>
24 #include <linux/socket.h>
25 #include <linux/sockios.h>
26 #include <linux/termios.h>
27 #include <linux/in.h>
28 #include <linux/fcntl.h>
29 #include "inet.h"
30 #include "timer.h"
31 #include "dev.h"
32 #include "ip.h"
33 #include "protocol.h"
34 #include "icmp.h"
35 #include "tcp.h"
36 #include "skbuff.h"
37 #include "sock.h"
38 #include "arp.h"
39 #include <linux/errno.h>
40 #include <linux/timer.h>
41 #include <asm/system.h>
42 #include <asm/segment.h>
43 #include <linux/mm.h>
44
45
46 #define tmax(a,b)(before((a),(b)) ?(b) :(a))
47 #define swap(a,b) {unsigned long c; c=a; a=b; b=c;}
48
49
50 static int
51 min(unsigned int a, unsigned int b)
52 {
53 if (a < b) return(a);
54 return(b);
55 }
56
57
58 void
59 print_th(struct tcphdr *th)
60 {
61 unsigned char *ptr;
62
63 if (inet_debug != DBG_TCP) return;
64
65 printk("TCP header:\n");
66 ptr =(unsigned char *)(th + 1);
67 printk(" source=%d, dest=%d, seq =%d, ack_seq = %d\n",
68 ntohs(th->source), ntohs(th->dest),
69 ntohl(th->seq), ntohl(th->ack_seq));
70 printk(" fin=%d, syn=%d, rst=%d, psh=%d, ack=%d, urg=%d res1=%d res2=%d\n",
71 th->fin, th->syn, th->rst, th->psh, th->ack,
72 th->urg, th->res1, th->res2);
73 printk(" window = %d, check = %d urg_ptr = %d\n",
74 ntohs(th->window), ntohs(th->check), ntohs(th->urg_ptr));
75 printk(" doff = %d\n", th->doff);
76 printk(" options = %d %d %d %d\n", ptr[0], ptr[1], ptr[2], ptr[3]);
77 }
78
79
80
81 static struct sk_buff *
82 get_firstr(struct sock *sk)
83 {
84 struct sk_buff *skb;
85
86 skb = sk->rqueue;
87 if (skb == NULL) return(NULL);
88 sk->rqueue =(struct sk_buff *)skb->next;
89 if (sk->rqueue == skb) {
90 sk->rqueue = NULL;
91 } else {
92 sk->rqueue->prev = skb->prev;
93 sk->rqueue->prev->next = sk->rqueue;
94 }
95 return(skb);
96 }
97
98
99 static long
100 diff(unsigned long seq1, unsigned long seq2)
101 {
102 long d;
103
104 d = seq1 - seq2;
105 if (d > 0) return(d);
106
107
108 return(~d+1);
109 }
110
111
112
113 static void
114 tcp_time_wait(struct sock *sk)
115 {
116 sk->state = TCP_TIME_WAIT;
117 sk->shutdown = SHUTDOWN_MASK;
118 if (!sk->dead) wake_up(sk->sleep);
119 sk->time_wait.len = TCP_TIMEWAIT_LEN;
120 sk->timeout = TIME_CLOSE;
121 reset_timer((struct timer *)&sk->time_wait);
122 }
123
124
125 static void
126 tcp_retransmit(struct sock *sk, int all)
127 {
128 if (all) {
129 ip_retransmit(sk, all);
130 return;
131 }
132
133 if (sk->cong_window > 4)
134 sk->cong_window = sk->cong_window / 2;
135 sk->exp_growth = 0;
136
137
138 ip_retransmit(sk, all);
139 }
140
141
142
143
144
145
146
147
148
149
150 void
151 tcp_err(int err, unsigned char *header, unsigned long daddr,
152 unsigned long saddr, struct inet_protocol *protocol)
153 {
154 struct tcphdr *th;
155 struct sock *sk;
156
157 DPRINTF((DBG_TCP, "TCP: tcp_err(%d, hdr=%X, daddr=%X saddr=%X, protocol=%X)\n",
158 err, header, daddr, saddr, protocol));
159
160 th =(struct tcphdr *)header;
161 sk = get_sock(&tcp_prot, th->dest, saddr, th->source, daddr);
162 print_th(th);
163
164 if (sk == NULL) return;
165
166 if ((err & 0xff00) == (ICMP_SOURCE_QUENCH << 8)) {
167
168
169
170
171
172 if (sk->cong_window > 4) sk->cong_window--;
173 return;
174 }
175
176 DPRINTF((DBG_TCP, "TCP: icmp_err got error\n"));
177 sk->err = icmp_err_convert[err & 0xff].errno;
178
179
180
181
182
183 if (icmp_err_convert[err & 0xff].fatal) {
184 if (sk->state == TCP_SYN_SENT) {
185 sk->state = TCP_CLOSE;
186 sk->prot->close(sk, 0);
187 }
188 }
189 return;
190 }
191
192
193 static int
194 tcp_readable(struct sock *sk)
195 {
196 unsigned long counted;
197 unsigned long amount;
198 struct sk_buff *skb;
199 int count=0;
200 int sum;
201
202 DPRINTF((DBG_TCP, "tcp_readable(sk=%X)\n", sk));
203
204 if (sk == NULL || sk->rqueue == NULL) return(0);
205
206 counted = sk->copied_seq+1;
207 amount = 0;
208 skb =(struct sk_buff *)sk->rqueue->next;
209
210
211 do {
212 count++;
213 if (count > 20) {
214 DPRINTF((DBG_TCP, "tcp_readable, more than 20 packets without a psh\n"));
215 DPRINTF((DBG_TCP, "possible read_queue corruption.\n"));
216 return(amount);
217 }
218 if (before(counted, skb->h.th->seq)) break;
219 sum = skb->len -(counted - skb->h.th->seq);
220 if (skb->h.th->syn) sum++;
221 if (skb->h.th->urg) {
222 sum -= ntohs(skb->h.th->urg_ptr);
223 }
224 if (sum >= 0) {
225 amount += sum;
226 if (skb->h.th->syn) amount--;
227 counted += sum;
228 }
229 if (amount && skb->h.th->psh) break;
230 skb =(struct sk_buff *)skb->next;
231 } while(skb != sk->rqueue->next);
232 DPRINTF((DBG_TCP, "tcp readable returning %d bytes\n", amount));
233 return(amount);
234 }
235
236
237 static int
238 tcp_select(struct sock *sk, int sel_type, select_table *wait)
239 {
240 DPRINTF((DBG_TCP, "tcp_select(sk=%X, sel_type = %d, wait = %X)\n",
241 sk, sel_type, wait));
242
243 sk->inuse = 1;
244 switch(sel_type) {
245 case SEL_IN:
246 select_wait(sk->sleep, wait);
247 if (sk->rqueue != NULL) {
248 if (sk->state == TCP_LISTEN || tcp_readable(sk)) {
249 release_sock(sk);
250 return(1);
251 }
252 }
253 if (sk->shutdown & RCV_SHUTDOWN) {
254 release_sock(sk);
255 return(1);
256 } else {
257 release_sock(sk);
258 return(0);
259 }
260 case SEL_OUT:
261 select_wait(sk->sleep, wait);
262 if (sk->shutdown & SEND_SHUTDOWN) {
263 DPRINTF((DBG_TCP,
264 "write select on shutdown socket.\n"));
265
266
267 release_sock(sk);
268 return(0);
269 }
270
271
272
273
274
275
276 if (sk->prot->wspace(sk) >= sk->mtu) {
277 release_sock(sk);
278
279 if (sk->state == TCP_SYN_RECV ||
280 sk->state == TCP_SYN_SENT) return(0);
281 return(1);
282 }
283 DPRINTF((DBG_TCP,
284 "tcp_select: sleeping on write sk->wmem_alloc = %d, "
285 "sk->packets_out = %d\n"
286 "sk->wback = %X, sk->wfront = %X\n"
287 "sk->send_seq = %u, sk->window_seq=%u\n",
288 sk->wmem_alloc, sk->packets_out,
289 sk->wback, sk->wfront,
290 sk->send_seq, sk->window_seq));
291
292 release_sock(sk);
293 return(0);
294 case SEL_EX:
295 select_wait(sk->sleep,wait);
296 if (sk->err) {
297 release_sock(sk);
298 return(1);
299 }
300 release_sock(sk);
301 return(0);
302 }
303
304 release_sock(sk);
305 return(0);
306 }
307
308
309 int
310 tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
311 {
312 DPRINTF((DBG_TCP, "tcp_ioctl(sk=%X, cmd = %d, arg=%X)\n", sk, cmd, arg));
313 switch(cmd) {
314 case DDIOCSDBG:
315 return(dbg_ioctl((void *) arg, DBG_TCP));
316
317 case TIOCINQ:
318 #ifdef FIXME
319 case FIONREAD:
320 #endif
321 {
322 unsigned long amount;
323
324 if (sk->state == TCP_LISTEN) return(-EINVAL);
325
326 amount = 0;
327 sk->inuse = 1;
328 if (sk->rqueue != NULL) {
329 amount = tcp_readable(sk);
330 }
331 release_sock(sk);
332 DPRINTF((DBG_TCP, "returning %d\n", amount));
333 verify_area(VERIFY_WRITE,(void *)arg,
334 sizeof(unsigned long));
335 put_fs_long(amount,(unsigned long *)arg);
336 return(0);
337 }
338 case SIOCATMARK:
339 {
340 struct sk_buff *skb;
341 int answ = 0;
342
343
344
345
346
347 sk->inuse = 1;
348 if (sk->rqueue != NULL) {
349 skb =(struct sk_buff *)sk->rqueue->next;
350 if (sk->copied_seq+1 == skb->h.th->seq &&
351 skb->h.th->urg) answ = 1;
352 }
353 release_sock(sk);
354 verify_area(VERIFY_WRITE,(void *) arg,
355 sizeof(unsigned long));
356 put_fs_long(answ,(void *) arg);
357 return(0);
358 }
359 case TIOCOUTQ:
360 {
361 unsigned long amount;
362
363 if (sk->state == TCP_LISTEN) return(-EINVAL);
364 amount = sk->prot->wspace(sk)/2;
365 verify_area(VERIFY_WRITE,(void *)arg,
366 sizeof(unsigned long));
367 put_fs_long(amount,(unsigned long *)arg);
368 return(0);
369 }
370 default:
371 return(-EINVAL);
372 }
373 }
374
375
376
377 static unsigned short
378 tcp_check(struct tcphdr *th, int len,
379 unsigned long saddr, unsigned long daddr)
380 {
381 unsigned long sum;
382
383 if (saddr == 0) saddr = my_addr();
384 print_th(th);
385 __asm__("\t addl %%ecx,%%ebx\n"
386 "\t adcl %%edx,%%ebx\n"
387 "\t adcl $0, %%ebx\n"
388 : "=b"(sum)
389 : "0"(daddr), "c"(saddr), "d"((ntohs(len) << 16) + IPPROTO_TCP*256)
390 : "cx","bx","dx" );
391
392 if (len > 3) {
393 __asm__("\tclc\n"
394 "1:\n"
395 "\t lodsl\n"
396 "\t adcl %%eax, %%ebx\n"
397 "\t loop 1b\n"
398 "\t adcl $0, %%ebx\n"
399 : "=b"(sum) , "=S"(th)
400 : "0"(sum), "c"(len/4) ,"1"(th)
401 : "ax", "cx", "bx", "si" );
402 }
403
404
405 __asm__("\t movl %%ebx, %%ecx\n"
406 "\t shrl $16,%%ecx\n"
407 "\t addw %%cx, %%bx\n"
408 "\t adcw $0, %%bx\n"
409 : "=b"(sum)
410 : "0"(sum)
411 : "bx", "cx");
412
413
414 if ((len & 2) != 0) {
415 __asm__("\t lodsw\n"
416 "\t addw %%ax,%%bx\n"
417 "\t adcw $0, %%bx\n"
418 : "=b"(sum), "=S"(th)
419 : "0"(sum) ,"1"(th)
420 : "si", "ax", "bx");
421 }
422
423
424 if ((len & 1) != 0) {
425 __asm__("\t lodsb\n"
426 "\t movb $0,%%ah\n"
427 "\t addw %%ax,%%bx\n"
428 "\t adcw $0, %%bx\n"
429 : "=b"(sum)
430 : "0"(sum) ,"S"(th)
431 : "si", "ax", "bx");
432 }
433
434
435 return((~sum) & 0xffff);
436 }
437
438
439 static void
440 tcp_send_check(struct tcphdr *th, unsigned long saddr,
441 unsigned long daddr, int len, struct sock *sk)
442 {
443 th->check = 0;
444 if (sk && sk->no_check) return;
445 th->check = tcp_check(th, len, saddr, daddr);
446 return;
447 }
448
449
450 static void
451 tcp_send_partial(struct sock *sk)
452 {
453 struct sk_buff *skb;
454
455 if (sk == NULL || sk->send_tmp == NULL) return;
456
457 skb = sk->send_tmp;
458
459
460 tcp_send_check(skb->h.th, sk->saddr, sk->daddr,
461 skb->len-(unsigned long)skb->h.th +
462 (unsigned long)(skb+1), sk);
463
464 skb->h.seq = sk->send_seq;
465 if (after(sk->send_seq , sk->window_seq) ||
466 sk->packets_out >= sk->cong_window) {
467 DPRINTF((DBG_TCP, "sk->cong_window = %d, sk->packets_out = %d\n",
468 sk->cong_window, sk->packets_out));
469 DPRINTF((DBG_TCP, "sk->send_seq = %d, sk->window_seq = %d\n",
470 sk->send_seq, sk->window_seq));
471 skb->next = NULL;
472 skb->magic = TCP_WRITE_QUEUE_MAGIC;
473 if (sk->wback == NULL) {
474 sk->wfront=skb;
475 } else {
476 sk->wback->next = skb;
477 }
478 sk->wback = skb;
479 } else {
480 sk->prot->queue_xmit(sk, skb->dev, skb,0);
481 }
482 sk->send_tmp = NULL;
483 }
484
485
486
487 static void
488 tcp_send_ack(unsigned long sequence, unsigned long ack,
489 struct sock *sk,
490 struct tcphdr *th, unsigned long daddr)
491 {
492 struct sk_buff *buff;
493 struct tcphdr *t1;
494 struct device *dev = NULL;
495 int tmp;
496
497
498
499
500
501 buff = sk->prot->wmalloc(sk, MAX_ACK_SIZE, 1, GFP_ATOMIC);
502 if (buff == NULL) {
503
504 sk->ack_backlog++;
505 if (sk->timeout != TIME_WRITE && tcp_connected(sk->state)) {
506 sk->timeout = TIME_WRITE;
507 sk->time_wait.len = 10;
508 reset_timer((struct timer *)&sk->time_wait);
509 }
510 return;
511 }
512
513 buff->mem_addr = buff;
514 buff->mem_len = MAX_ACK_SIZE;
515 buff->lock = 0;
516 buff->len = sizeof(struct tcphdr);
517 buff->sk = sk;
518 t1 =(struct tcphdr *)(buff + 1);
519
520
521 tmp = sk->prot->build_header(buff, sk->saddr, daddr, &dev,
522 IPPROTO_TCP, sk->opt, MAX_ACK_SIZE);
523 if (tmp < 0) {
524 sk->prot->wfree(sk, buff->mem_addr, buff->mem_len);
525 return;
526 }
527 buff->len += tmp;
528 t1 =(struct tcphdr *)((char *)t1 +tmp);
529
530
531 memcpy(t1, th, sizeof(*t1));
532
533
534 t1->dest = th->source;
535 t1->source = th->dest;
536 t1->seq = ntohl(sequence);
537 t1->ack = 1;
538 sk->window = sk->prot->rspace(sk);
539 t1->window = ntohs(sk->window);
540 t1->res1 = 0;
541 t1->res2 = 0;
542 t1->rst = 0;
543 t1->urg = 0;
544 t1->syn = 0;
545 t1->psh = 0;
546 t1->fin = 0;
547 if (ack == sk->acked_seq) {
548 sk->ack_backlog = 0;
549 sk->bytes_rcv = 0;
550 sk->ack_timed = 0;
551 if (sk->send_head == NULL && sk->wfront == NULL) {
552 delete_timer((struct timer *)&sk->time_wait);
553 sk->timeout = 0;
554 }
555 }
556 t1->ack_seq = ntohl(ack);
557 t1->doff = sizeof(*t1)/4;
558 tcp_send_check(t1, sk->saddr, daddr, sizeof(*t1), sk);
559 sk->prot->queue_xmit(sk, dev, buff, 1);
560 }
561
562
563
564 static int
565 tcp_build_header(struct tcphdr *th, struct sock *sk, int push)
566 {
567
568
569 memcpy(th,(void *) &(sk->dummy_th), sizeof(*th));
570 th->seq = ntohl(sk->send_seq);
571 th->psh =(push == 0) ? 1 : 0;
572 th->doff = sizeof(*th)/4;
573 th->ack = 1;
574 th->fin = 0;
575 sk->ack_backlog = 0;
576 sk->bytes_rcv = 0;
577 sk->ack_timed = 0;
578 th->ack_seq = ntohl(sk->acked_seq);
579 sk->window = sk->prot->rspace(sk);
580 th->window = ntohs(sk->window);
581
582 return(sizeof(*th));
583 }
584
585
586
587
588
589
590 static int
591 tcp_write(struct sock *sk, unsigned char *from,
592 int len, int nonblock, unsigned flags)
593 {
594 int copied = 0;
595 int copy;
596 int tmp;
597 struct sk_buff *skb;
598 unsigned char *buff;
599 struct proto *prot;
600 struct device *dev = NULL;
601
602 DPRINTF((DBG_TCP, "tcp_write(sk=%X, from=%X, len=%d, nonblock=%d, flags=%X)\n",
603 sk, from, len, nonblock, flags));
604
605 prot = sk->prot;
606 while(len > 0) {
607 if (sk->err) {
608 if (copied) return(copied);
609 tmp = -sk->err;
610 sk->err = 0;
611 return(tmp);
612 }
613
614
615 sk->inuse = 1;
616 if (sk->shutdown & SEND_SHUTDOWN) {
617 release_sock(sk);
618 sk->err = EPIPE;
619 if (copied) return(copied);
620 sk->err = 0;
621 return(-EPIPE);
622 }
623
624 while(sk->state != TCP_ESTABLISHED && sk->state != TCP_CLOSE_WAIT) {
625 if (sk->err) {
626 if (copied) return(copied);
627 tmp = -sk->err;
628 sk->err = 0;
629 return(tmp);
630 }
631
632 if (sk->state != TCP_SYN_SENT && sk->state != TCP_SYN_RECV) {
633 release_sock(sk);
634 DPRINTF((DBG_TCP, "tcp_write: return 1\n"));
635 if (copied) return(copied);
636
637 if (sk->err) {
638 tmp = -sk->err;
639 sk->err = 0;
640 return(tmp);
641 }
642
643 if (sk->keepopen) {
644 send_sig(SIGPIPE, current, 0);
645 }
646 return(-EPIPE);
647 }
648
649 if (nonblock || copied) {
650 release_sock(sk);
651 DPRINTF((DBG_TCP, "tcp_write: return 2\n"));
652 if (copied) return(copied);
653 return(-EAGAIN);
654 }
655
656
657
658
659
660
661
662
663
664 release_sock(sk);
665 cli();
666 if (sk->state != TCP_ESTABLISHED &&
667 sk->state != TCP_CLOSE_WAIT && sk->err == 0) {
668 interruptible_sleep_on(sk->sleep);
669 if (current->signal & ~current->blocked) {
670 sti();
671 DPRINTF((DBG_TCP, "tcp_write: return 3\n"));
672 if (copied) return(copied);
673 return(-ERESTARTSYS);
674 }
675 }
676 sk->inuse = 1;
677 sti();
678 }
679
680
681 if (sk->send_tmp != NULL) {
682
683
684
685 skb = sk->send_tmp;
686 if (!(flags & MSG_OOB)) {
687 copy = min(sk->mss - skb->len + 128 +
688 prot->max_header, len);
689
690
691 if (copy <= 0) {
692 printk("TCP: **bug**: \"copy\" <= 0!!\n");
693 copy = 0;
694 }
695
696 memcpy_fromfs((unsigned char *)(skb+1) + skb->len, from, copy);
697 skb->len += copy;
698 from += copy;
699 copied += copy;
700 len -= copy;
701 sk->send_seq += copy;
702 }
703
704 if (skb->len -(unsigned long)skb->h.th +
705 (unsigned long)(skb+1) >= sk->mss ||(flags & MSG_OOB)) {
706 tcp_send_partial(sk);
707 }
708 continue;
709 }
710
711
712
713
714
715 copy = min(sk->mtu, diff(sk->window_seq, sk->send_seq));
716
717
718 if (copy < 200 || copy > sk->mtu) copy = sk->mtu;
719 copy = min(copy, len);
720
721
722 if (sk->packets_out && copy < sk->mss && !(flags & MSG_OOB)) {
723
724 release_sock(sk);
725 skb = prot->wmalloc(sk, sk->mss + 128 + prot->max_header +
726 sizeof(*skb), 0, GFP_KERNEL);
727 sk->inuse = 1;
728 sk->send_tmp = skb;
729 if (skb != NULL)
730 skb->mem_len = sk->mss + 128 + prot->max_header + sizeof(*skb);
731 } else {
732
733 release_sock(sk);
734 skb = prot->wmalloc(sk, copy + prot->max_header +
735 sizeof(*skb), 0, GFP_KERNEL);
736 sk->inuse = 1;
737 if (skb != NULL)
738 skb->mem_len = copy+prot->max_header + sizeof(*skb);
739 }
740
741
742 if (skb == NULL) {
743 if (nonblock || copied) {
744 release_sock(sk);
745 DPRINTF((DBG_TCP, "tcp_write: return 4\n"));
746 if (copied) return(copied);
747 return(-EAGAIN);
748 }
749
750
751 tmp = sk->wmem_alloc;
752 release_sock(sk);
753
754
755 cli();
756 if (tmp <= sk->wmem_alloc &&
757 (sk->state == TCP_ESTABLISHED||sk->state == TCP_CLOSE_WAIT)
758 && sk->err == 0) {
759 interruptible_sleep_on(sk->sleep);
760 if (current->signal & ~current->blocked) {
761 sti();
762 DPRINTF((DBG_TCP, "tcp_write: return 5\n"));
763 if (copied) return(copied);
764 return(-ERESTARTSYS);
765 }
766 }
767 sk->inuse = 1;
768 sti();
769 continue;
770 }
771
772 skb->mem_addr = skb;
773 skb->len = 0;
774 skb->sk = sk;
775 skb->lock = 0;
776 skb->free = 0;
777
778 buff =(unsigned char *)(skb+1);
779
780
781
782
783
784 tmp = prot->build_header(skb, sk->saddr, sk->daddr, &dev,
785 IPPROTO_TCP, sk->opt, skb->mem_len);
786 if (tmp < 0 ) {
787 prot->wfree(sk, skb->mem_addr, skb->mem_len);
788 release_sock(sk);
789 DPRINTF((DBG_TCP, "tcp_write: return 6\n"));
790 if (copied) return(copied);
791 return(tmp);
792 }
793 skb->len += tmp;
794 skb->dev = dev;
795 buff += tmp;
796 skb->h.th =(struct tcphdr *) buff;
797 tmp = tcp_build_header((struct tcphdr *)buff, sk, len-copy);
798 if (tmp < 0) {
799 prot->wfree(sk, skb->mem_addr, skb->mem_len);
800 release_sock(sk);
801 DPRINTF((DBG_TCP, "tcp_write: return 7\n"));
802 if (copied) return(copied);
803 return(tmp);
804 }
805
806 if (flags & MSG_OOB) {
807 ((struct tcphdr *)buff)->urg = 1;
808 ((struct tcphdr *)buff)->urg_ptr = ntohs(copy);
809 }
810 skb->len += tmp;
811 memcpy_fromfs(buff+tmp, from, copy);
812
813 from += copy;
814 copied += copy;
815 len -= copy;
816 skb->len += copy;
817 skb->free = 0;
818 sk->send_seq += copy;
819
820 if (sk->send_tmp != NULL) continue;
821
822 tcp_send_check((struct tcphdr *)buff, sk->saddr, sk->daddr,
823 copy + sizeof(struct tcphdr), sk);
824
825 skb->h.seq = sk->send_seq;
826 if (after(sk->send_seq , sk->window_seq) ||
827 sk->packets_out >= sk->cong_window) {
828 DPRINTF((DBG_TCP, "sk->cong_window = %d, sk->packets_out = %d\n",
829 sk->cong_window, sk->packets_out));
830 DPRINTF((DBG_TCP, "sk->send_seq = %d, sk->window_seq = %d\n",
831 sk->send_seq, sk->window_seq));
832 skb->next = NULL;
833 skb->magic = TCP_WRITE_QUEUE_MAGIC;
834 if (sk->wback == NULL) {
835 sk->wfront = skb;
836 } else {
837 sk->wback->next = skb;
838 }
839 sk->wback = skb;
840 } else {
841 prot->queue_xmit(sk, dev, skb,0);
842 }
843 }
844 sk->err = 0;
845 release_sock(sk);
846 DPRINTF((DBG_TCP, "tcp_write: return 8\n"));
847 return(copied);
848 }
849
850
851 static int
852 tcp_sendto(struct sock *sk, unsigned char *from,
853 int len, int nonblock, unsigned flags,
854 struct sockaddr_in *addr, int addr_len)
855 {
856 struct sockaddr_in sin;
857
858 if (addr_len < sizeof(sin)) return(-EINVAL);
859 memcpy_fromfs(&sin, addr, sizeof(sin));
860 if (sin.sin_family && sin.sin_family != AF_INET) return(-EINVAL);
861 if (sin.sin_port != sk->dummy_th.dest) return(-EINVAL);
862 if (sin.sin_addr.s_addr != sk->daddr) return(-EINVAL);
863 return(tcp_write(sk, from, len, nonblock, flags));
864 }
865
866
867 static void
868 tcp_read_wakeup(struct sock *sk)
869 {
870 int tmp;
871 struct device *dev = NULL;
872 struct tcphdr *t1;
873 struct sk_buff *buff;
874
875 DPRINTF((DBG_TCP, "in tcp read wakeup\n"));
876 if (!sk->ack_backlog) return;
877
878
879
880
881
882
883
884
885
886
887
888 buff = sk->prot->wmalloc(sk,MAX_ACK_SIZE,1, GFP_ATOMIC);
889 if (buff == NULL) {
890
891 sk->timeout = TIME_WRITE;
892 sk->time_wait.len = 10;
893 reset_timer((struct timer *) &sk->time_wait);
894 return;
895 }
896
897 buff->mem_addr = buff;
898 buff->mem_len = MAX_ACK_SIZE;
899 buff->lock = 0;
900 buff->len = sizeof(struct tcphdr);
901 buff->sk = sk;
902
903
904 tmp = sk->prot->build_header(buff, sk->saddr, sk->daddr, &dev,
905 IPPROTO_TCP, sk->opt, MAX_ACK_SIZE);
906 if (tmp < 0) {
907 sk->prot->wfree(sk, buff->mem_addr, buff->mem_len);
908 return;
909 }
910
911 buff->len += tmp;
912 t1 =(struct tcphdr *)((char *)(buff+1) +tmp);
913
914 memcpy(t1,(void *) &sk->dummy_th, sizeof(*t1));
915 t1->seq = ntohl(sk->send_seq);
916 t1->ack = 1;
917 t1->res1 = 0;
918 t1->res2 = 0;
919 t1->rst = 0;
920 t1->urg = 0;
921 t1->syn = 0;
922 t1->psh = 0;
923 sk->ack_backlog = 0;
924 sk->bytes_rcv = 0;
925 sk->window = sk->prot->rspace(sk);
926 t1->window = ntohs(sk->window);
927 t1->ack_seq = ntohl(sk->acked_seq);
928 t1->doff = sizeof(*t1)/4;
929 tcp_send_check(t1, sk->saddr, sk->daddr, sizeof(*t1), sk);
930 sk->prot->queue_xmit(sk, dev, buff, 1);
931 }
932
933
934
935
936
937
938
939
940 static void
941 cleanup_rbuf(struct sock *sk)
942 {
943 int left;
944
945 DPRINTF((DBG_TCP, "cleaning rbuf for sk=%X\n", sk));
946 left = sk->prot->rspace(sk);
947
948
949
950
951
952 while(sk->rqueue != NULL ) {
953 struct sk_buff *skb;
954
955 skb =(struct sk_buff *)sk->rqueue->next;
956 if (!skb->used) break;
957 if (sk->rqueue == skb) {
958 sk->rqueue = NULL;
959 } else {
960 skb->next->prev = skb->prev;
961 skb->prev->next = skb->next;
962 }
963 skb->sk = sk;
964 kfree_skb(skb, FREE_READ);
965 }
966
967
968
969
970
971
972
973 DPRINTF((DBG_TCP, "sk->window left = %d, sk->prot->rspace(sk)=%d\n",
974 sk->window - sk->bytes_rcv, sk->prot->rspace(sk)));
975
976 if (sk->prot->rspace(sk) != left) {
977
978
979
980
981
982
983
984
985
986
987 sk->ack_backlog++;
988 if ((sk->prot->rspace(sk) > (sk->window - sk->bytes_rcv + sk->mtu))) {
989
990 tcp_read_wakeup(sk);
991 } else {
992
993 if (before(jiffies + TCP_ACK_TIME, sk->time_wait.when)) {
994 sk->time_wait.len = TCP_ACK_TIME;
995 sk->timeout = TIME_WRITE;
996 reset_timer((struct timer *) &sk->time_wait);
997 }
998 }
999 }
1000 }
1001
1002
1003
1004 static int
1005 tcp_read_urg(struct sock * sk, int nonblock,
1006 unsigned char *to, int len, unsigned flags)
1007 {
1008 int copied = 0;
1009 struct sk_buff *skb;
1010
1011 DPRINTF((DBG_TCP, "tcp_read_urg(sk=%X, to=%X, len=%d, flags=%X)\n",
1012 sk, to, len, flags));
1013
1014 while(len > 0) {
1015 sk->inuse = 1;
1016 while(sk->urg==0 || sk->rqueue == NULL) {
1017 if (sk->err) {
1018 int tmp;
1019
1020 release_sock(sk);
1021 if (copied) return(copied);
1022 tmp = -sk->err;
1023 sk->err = 0;
1024 return(tmp);
1025 }
1026
1027 if (sk->state == TCP_CLOSE || sk->done) {
1028 release_sock(sk);
1029 if (copied) return(copied);
1030 if (!sk->done) {
1031 sk->done = 1;
1032 return(0);
1033 }
1034 return(-ENOTCONN);
1035 }
1036
1037 if (sk->shutdown & RCV_SHUTDOWN) {
1038 release_sock(sk);
1039 if (copied == 0) sk->done = 1;
1040 return(copied);
1041 }
1042
1043 if (nonblock || copied) {
1044 release_sock(sk);
1045 if (copied) return(copied);
1046 return(-EAGAIN);
1047 }
1048
1049
1050 release_sock(sk);
1051 cli();
1052 if ((sk->urg == 0 || sk->rqueue == NULL) &&
1053 sk->err == 0 && !(sk->shutdown & RCV_SHUTDOWN)) {
1054 interruptible_sleep_on(sk->sleep);
1055 if (current->signal & ~current->blocked) {
1056 sti();
1057 if (copied) return(copied);
1058 return(-ERESTARTSYS);
1059 }
1060 }
1061 sk->inuse = 1;
1062 sti();
1063 }
1064
1065 skb =(struct sk_buff *)sk->rqueue->next;
1066 do {
1067 int amt;
1068
1069 if (skb->h.th->urg && !skb->urg_used) {
1070 if (skb->h.th->urg_ptr == 0) {
1071 skb->h.th->urg_ptr = ntohs(skb->len);
1072 }
1073 amt = min(ntohs(skb->h.th->urg_ptr),len);
1074 verify_area(VERIFY_WRITE, to, amt);
1075 memcpy_tofs(to,(unsigned char *)(skb->h.th) +
1076 skb->h.th->doff*4, amt);
1077
1078 if (!(flags & MSG_PEEK)) {
1079 skb->urg_used = 1;
1080 sk->urg--;
1081 }
1082 release_sock(sk);
1083 copied += amt;
1084 return(copied);
1085 }
1086 skb =(struct sk_buff *)skb->next;
1087 } while(skb != sk->rqueue->next);
1088 }
1089 sk->urg = 0;
1090 release_sock(sk);
1091 return(0);
1092 }
1093
1094
1095
1096 static int
1097 tcp_read(struct sock *sk, unsigned char *to,
1098 int len, int nonblock, unsigned flags)
1099 {
1100 int copied=0;
1101 struct sk_buff *skb;
1102 unsigned long offset;
1103 unsigned long used;
1104
1105 if (len == 0) return(0);
1106 if (len < 0) {
1107 return(-EINVAL);
1108 }
1109
1110
1111 if (sk->state == TCP_LISTEN) return(-ENOTCONN);
1112
1113
1114 if ((flags & MSG_OOB)) return(tcp_read_urg(sk, nonblock, to, len, flags));
1115
1116
1117 sk->inuse = 1;
1118 if (sk->rqueue != NULL) skb =(struct sk_buff *)sk->rqueue->next;
1119 else skb = NULL;
1120
1121 DPRINTF((DBG_TCP, "tcp_read(sk=%X, to=%X, len=%d, nonblock=%d, flags=%X)\n",
1122 sk, to, len, nonblock, flags));
1123
1124 while(len > 0) {
1125
1126 while(skb == NULL ||
1127 before(sk->copied_seq+1, skb->h.th->seq) || skb->used) {
1128 DPRINTF((DBG_TCP, "skb = %X:\n", skb));
1129 cleanup_rbuf(sk);
1130 if (sk->err) {
1131 int tmp;
1132
1133 release_sock(sk);
1134 if (copied) {
1135 DPRINTF((DBG_TCP, "tcp_read: returing %d\n",
1136 copied));
1137 return(copied);
1138 }
1139 tmp = -sk->err;
1140 sk->err = 0;
1141 return(tmp);
1142 }
1143
1144 if (sk->state == TCP_CLOSE) {
1145 release_sock(sk);
1146 if (copied) {
1147 DPRINTF((DBG_TCP, "tcp_read: returing %d\n",
1148 copied));
1149 return(copied);
1150 }
1151 if (!sk->done) {
1152 sk->done = 1;
1153 return(0);
1154 }
1155 return(-ENOTCONN);
1156 }
1157
1158 if (sk->shutdown & RCV_SHUTDOWN) {
1159 release_sock(sk);
1160 if (copied == 0) sk->done = 1;
1161 DPRINTF((DBG_TCP, "tcp_read: returing %d\n", copied));
1162 return(copied);
1163 }
1164
1165 if (nonblock || copied) {
1166 release_sock(sk);
1167 if (copied) {
1168 DPRINTF((DBG_TCP, "tcp_read: returing %d\n",
1169 copied));
1170 return(copied);
1171 }
1172 return(-EAGAIN);
1173 }
1174
1175 if ((flags & MSG_PEEK) && copied != 0) {
1176 release_sock(sk);
1177 DPRINTF((DBG_TCP, "tcp_read: returing %d\n", copied));
1178 return(copied);
1179 }
1180
1181 DPRINTF((DBG_TCP, "tcp_read about to sleep. state = %d\n",
1182 sk->state));
1183 release_sock(sk);
1184
1185
1186
1187
1188
1189 cli();
1190 if (sk->shutdown & RCV_SHUTDOWN || sk->err != 0) {
1191 sk->inuse = 1;
1192 sti();
1193 continue;
1194 }
1195
1196 if (sk->rqueue == NULL ||
1197 before(sk->copied_seq+1, sk->rqueue->next->h.th->seq)) {
1198 interruptible_sleep_on(sk->sleep);
1199 if (current->signal & ~current->blocked) {
1200 sti();
1201 if (copied) {
1202 DPRINTF((DBG_TCP, "tcp_read: returing %d\n",
1203 copied));
1204 return(copied);
1205 }
1206 return(-ERESTARTSYS);
1207 }
1208 }
1209 sk->inuse = 1;
1210 sti();
1211 DPRINTF((DBG_TCP, "tcp_read woke up. \n"));
1212
1213
1214 if (sk->rqueue == NULL) skb = NULL;
1215 else skb =(struct sk_buff *)sk->rqueue->next;
1216
1217 }
1218
1219
1220
1221
1222
1223 offset = sk->copied_seq+1 - skb->h.th->seq;
1224
1225 if (skb->h.th->syn) offset--;
1226 if (offset < skb->len) {
1227
1228
1229
1230
1231 if (skb->h.th->urg) {
1232 if (skb->urg_used) {
1233 sk->copied_seq += ntohs(skb->h.th->urg_ptr);
1234 offset += ntohs(skb->h.th->urg_ptr);
1235 if (offset >= skb->len) {
1236 skb->used = 1;
1237 skb =(struct sk_buff *)skb->next;
1238 continue;
1239 }
1240 } else {
1241 release_sock(sk);
1242 if (copied) return(copied);
1243 send_sig(SIGURG, current, 0);
1244 return(-EINTR);
1245 }
1246 }
1247 used = min(skb->len - offset, len);
1248 verify_area(VERIFY_WRITE, to, used);
1249 memcpy_tofs(to,((unsigned char *)skb->h.th) +
1250 skb->h.th->doff*4 + offset, used);
1251 copied += used;
1252 len -= used;
1253 to += used;
1254 if (!(flags & MSG_PEEK)) sk->copied_seq += used;
1255
1256
1257
1258
1259
1260
1261 if (!(flags & MSG_PEEK) &&
1262 (!skb->h.th->urg || skb->urg_used) &&
1263 (used + offset >= skb->len)) skb->used = 1;
1264
1265
1266
1267
1268
1269 if (skb->h.th->psh || skb->h.th->urg) {
1270 break;
1271 }
1272 } else {
1273 skb->used = 1;
1274 }
1275 skb =(struct sk_buff *)skb->next;
1276 }
1277 cleanup_rbuf(sk);
1278 release_sock(sk);
1279 DPRINTF((DBG_TCP, "tcp_read: returing %d\n", copied));
1280 if (copied == 0 && nonblock) return(-EAGAIN);
1281 return(copied);
1282 }
1283
1284
1285
1286
1287
1288
1289 void
1290 tcp_shutdown(struct sock *sk, int how)
1291 {
1292 struct sk_buff *buff;
1293 struct tcphdr *t1, *th;
1294 struct proto *prot;
1295 int tmp;
1296 struct device *dev = NULL;
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306 if (sk->state == TCP_FIN_WAIT1 || sk->state == TCP_FIN_WAIT2) return;
1307 if (!(how & SEND_SHUTDOWN)) return;
1308 sk->inuse = 1;
1309
1310
1311 if (sk->send_tmp) tcp_send_partial(sk);
1312
1313 prot =(struct proto *)sk->prot;
1314 th =(struct tcphdr *)&sk->dummy_th;
1315 release_sock(sk);
1316 buff = prot->wmalloc(sk, MAX_RESET_SIZE,1 , GFP_KERNEL);
1317 if (buff == NULL) return;
1318 sk->inuse = 1;
1319
1320 DPRINTF((DBG_TCP, "tcp_shutdown_send buff = %X\n", buff));
1321 buff->mem_addr = buff;
1322 buff->mem_len = MAX_RESET_SIZE;
1323 buff->lock = 0;
1324 buff->sk = sk;
1325 buff->len = sizeof(*t1);
1326 t1 =(struct tcphdr *)(buff + 1);
1327
1328
1329 tmp = prot->build_header(buff,sk->saddr, sk->daddr, &dev,
1330 IPPROTO_TCP, sk->opt,
1331 sizeof(struct tcphdr));
1332 if (tmp < 0) {
1333 prot->wfree(sk,buff->mem_addr, buff->mem_len);
1334 release_sock(sk);
1335 DPRINTF((DBG_TCP, "Unable to build header for fin.\n"));
1336 return;
1337 }
1338
1339 t1 =(struct tcphdr *)((char *)t1 +tmp);
1340 buff ->len += tmp;
1341 buff->dev = dev;
1342 memcpy(t1, th, sizeof(*t1));
1343 t1->seq = ntohl(sk->send_seq);
1344 sk->send_seq++;
1345 buff->h.seq = sk->send_seq;
1346 t1->ack = 1;
1347 t1->ack_seq = ntohl(sk->acked_seq);
1348 t1->window = ntohs(sk->prot->rspace(sk));
1349 t1->fin = 1;
1350 t1->rst = 0;
1351 t1->doff = sizeof(*t1)/4;
1352 tcp_send_check(t1, sk->saddr, sk->daddr, sizeof(*t1), sk);
1353
1354
1355
1356
1357
1358 if (sk->wback != NULL) {
1359 buff->next = NULL;
1360 sk->wback->next = buff;
1361 sk->wback = buff;
1362 buff->magic = TCP_WRITE_QUEUE_MAGIC;
1363 } else {
1364 sk->prot->queue_xmit(sk, dev, buff, 0);
1365 }
1366
1367 if (sk->state == TCP_ESTABLISHED) sk->state = TCP_FIN_WAIT1;
1368 else sk->state = TCP_FIN_WAIT2;
1369
1370 release_sock(sk);
1371 }
1372
1373
1374 static int
1375 tcp_recvfrom(struct sock *sk, unsigned char *to,
1376 int to_len, int nonblock, unsigned flags,
1377 struct sockaddr_in *addr, int *addr_len)
1378 {
1379 struct sockaddr_in sin;
1380 int len;
1381 int result = tcp_read(sk, to, to_len, nonblock, flags);
1382
1383 if (result < 0) return(result);
1384 len = get_fs_long(addr_len);
1385 if (len > sizeof(sin)) len = sizeof(sin);
1386 sin.sin_family = AF_INET;
1387 sin.sin_port = sk->dummy_th.dest;
1388 sin.sin_addr.s_addr = sk->daddr;
1389 verify_area(VERIFY_WRITE, addr, len);
1390 memcpy_tofs(addr, &sin, len);
1391 verify_area(VERIFY_WRITE, addr_len, sizeof(len));
1392 put_fs_long(len, addr_len);
1393 return(result);
1394 }
1395
1396
1397
1398 static void
1399 tcp_reset(unsigned long saddr, unsigned long daddr, struct tcphdr *th,
1400 struct proto *prot, struct options *opt, struct device *dev)
1401 {
1402 struct sk_buff *buff;
1403 struct tcphdr *t1;
1404 int tmp;
1405
1406
1407
1408
1409
1410 buff = prot->wmalloc(NULL, MAX_RESET_SIZE, 1, GFP_ATOMIC);
1411 if (buff == NULL) return;
1412
1413 DPRINTF((DBG_TCP, "tcp_reset buff = %X\n", buff));
1414 buff->mem_addr = buff;
1415 buff->mem_len = MAX_RESET_SIZE;
1416 buff->lock = 0;
1417 buff->len = sizeof(*t1);
1418 buff->sk = NULL;
1419 buff->dev = dev;
1420
1421 t1 =(struct tcphdr *)(buff + 1);
1422
1423
1424 tmp = prot->build_header(buff, saddr, daddr, &dev, IPPROTO_TCP, opt,
1425 sizeof(struct tcphdr));
1426 if (tmp < 0) {
1427 prot->wfree(NULL, buff->mem_addr, buff->mem_len);
1428 return;
1429 }
1430 t1 =(struct tcphdr *)((char *)t1 +tmp);
1431 buff->len += tmp;
1432 memcpy(t1, th, sizeof(*t1));
1433
1434
1435 t1->dest = th->source;
1436 t1->source = th->dest;
1437 t1->seq = th->ack_seq;
1438 t1->rst = 1;
1439 t1->window = 0;
1440 t1->ack = 0;
1441 t1->syn = 0;
1442 t1->urg = 0;
1443 t1->fin = 0;
1444 t1->psh = 0;
1445 t1->doff = sizeof(*t1)/4;
1446 tcp_send_check(t1, saddr, daddr, sizeof(*t1), NULL);
1447 prot->queue_xmit(NULL, dev, buff, 1);
1448 }
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458 static void
1459 tcp_conn_request(struct sock *sk, struct sk_buff *skb,
1460 unsigned long daddr, unsigned long saddr,
1461 struct options *opt, struct device *dev)
1462 {
1463 struct sk_buff *buff;
1464 struct tcphdr *t1;
1465 unsigned char *ptr;
1466 struct sock *newsk;
1467 struct tcphdr *th;
1468 int tmp;
1469
1470 DPRINTF((DBG_TCP, "tcp_conn_request(sk = %X, skb = %X, daddr = %X, sadd4= %X, \n"
1471 " opt = %X, dev = %X)\n",
1472 sk, skb, daddr, saddr, opt, dev));
1473
1474 th = skb->h.th;
1475
1476
1477 if (!sk->dead) {
1478 wake_up(sk->sleep);
1479 } else {
1480 DPRINTF((DBG_TCP, "tcp_conn_request on dead socket\n"));
1481 tcp_reset(daddr, saddr, th, sk->prot, opt, dev);
1482 kfree_skb(skb, FREE_READ);
1483 return;
1484 }
1485
1486
1487
1488
1489
1490 if (sk->ack_backlog >= sk->max_ack_backlog) {
1491 kfree_skb(skb, FREE_READ);
1492 return;
1493 }
1494
1495
1496
1497
1498
1499
1500
1501
1502 newsk = kmalloc(sizeof(struct sock), GFP_ATOMIC);
1503 if (newsk == NULL) {
1504
1505 kfree_skb(skb, FREE_READ);
1506 return;
1507 }
1508
1509 DPRINTF((DBG_TCP, "newsk = %X\n", newsk));
1510 memcpy((void *)newsk,(void *)sk, sizeof(*newsk));
1511 newsk->wback = NULL;
1512 newsk->wfront = NULL;
1513 newsk->rqueue = NULL;
1514 newsk->send_head = NULL;
1515 newsk->send_tail = NULL;
1516 newsk->back_log = NULL;
1517 newsk->rtt = TCP_CONNECT_TIME;
1518 newsk->blog = 0;
1519 newsk->intr = 0;
1520 newsk->proc = 0;
1521 newsk->done = 0;
1522 newsk->send_tmp = NULL;
1523 newsk->pair = NULL;
1524 newsk->wmem_alloc = 0;
1525 newsk->rmem_alloc = 0;
1526
1527 newsk->max_unacked = MAX_WINDOW - TCP_WINDOW_DIFF;
1528
1529 newsk->err = 0;
1530 newsk->shutdown = 0;
1531 newsk->ack_backlog = 0;
1532 newsk->acked_seq = skb->h.th->seq+1;
1533 newsk->fin_seq = skb->h.th->seq;
1534 newsk->copied_seq = skb->h.th->seq;
1535 newsk->state = TCP_SYN_RECV;
1536 newsk->timeout = 0;
1537 newsk->send_seq = timer_seq * SEQ_TICK - seq_offset;
1538 newsk->rcv_ack_seq = newsk->send_seq;
1539 newsk->urg =0;
1540 newsk->retransmits = 0;
1541 newsk->destroy = 0;
1542 newsk->time_wait.sk = newsk;
1543 newsk->time_wait.next = NULL;
1544 newsk->dummy_th.source = skb->h.th->dest;
1545 newsk->dummy_th.dest = skb->h.th->source;
1546
1547
1548 newsk->daddr = saddr;
1549 newsk->saddr = daddr;
1550
1551 put_sock(newsk->num,newsk);
1552 newsk->dummy_th.res1 = 0;
1553 newsk->dummy_th.doff = 6;
1554 newsk->dummy_th.fin = 0;
1555 newsk->dummy_th.syn = 0;
1556 newsk->dummy_th.rst = 0;
1557 newsk->dummy_th.psh = 0;
1558 newsk->dummy_th.ack = 0;
1559 newsk->dummy_th.urg = 0;
1560 newsk->dummy_th.res2 = 0;
1561 newsk->acked_seq = skb->h.th->seq + 1;
1562 newsk->copied_seq = skb->h.th->seq;
1563
1564 if (skb->h.th->doff == 5) {
1565 newsk->mtu = dev->mtu - HEADER_SIZE;
1566 } else {
1567 ptr =(unsigned char *)(skb->h.th + 1);
1568 if (ptr[0] != 2 || ptr[1] != 4) {
1569 newsk->mtu = dev->mtu - HEADER_SIZE;
1570 } else {
1571 newsk->mtu = min(ptr[2] * 256 + ptr[3] - HEADER_SIZE,
1572 dev->mtu - HEADER_SIZE);
1573 }
1574 }
1575
1576 buff = newsk->prot->wmalloc(newsk, MAX_SYN_SIZE, 1, GFP_ATOMIC);
1577 if (buff == NULL) {
1578 sk->err = -ENOMEM;
1579 newsk->dead = 1;
1580 release_sock(newsk);
1581 kfree_skb(skb, FREE_READ);
1582 return;
1583 }
1584
1585 buff->lock = 0;
1586 buff->mem_addr = buff;
1587 buff->mem_len = MAX_SYN_SIZE;
1588 buff->len = sizeof(struct tcphdr)+4;
1589 buff->sk = newsk;
1590
1591 t1 =(struct tcphdr *)(buff + 1);
1592
1593
1594 tmp = sk->prot->build_header(buff, newsk->saddr, newsk->daddr, &dev,
1595 IPPROTO_TCP, NULL, MAX_SYN_SIZE);
1596
1597
1598 if (tmp < 0) {
1599 sk->err = tmp;
1600 sk->prot->wfree(newsk, buff->mem_addr, buff->mem_len);
1601 newsk->dead = 1;
1602 release_sock(newsk);
1603 skb->sk = sk;
1604 kfree_skb(skb, FREE_READ);
1605 return;
1606 }
1607
1608 buff->len += tmp;
1609 t1 =(struct tcphdr *)((char *)t1 +tmp);
1610
1611 memcpy(t1, skb->h.th, sizeof(*t1));
1612 buff->h.seq = newsk->send_seq;
1613
1614
1615 t1->dest = skb->h.th->source;
1616 t1->source = newsk->dummy_th.source;
1617 t1->seq = ntohl(newsk->send_seq++);
1618 t1->ack = 1;
1619 newsk->window = newsk->prot->rspace(newsk);
1620 t1->window = ntohs(newsk->window);
1621 t1->res1 = 0;
1622 t1->res2 = 0;
1623 t1->rst = 0;
1624 t1->urg = 0;
1625 t1->psh = 0;
1626 t1->syn = 1;
1627 t1->ack_seq = ntohl(skb->h.th->seq+1);
1628 t1->doff = sizeof(*t1)/4+1;
1629
1630 ptr =(unsigned char *)(t1+1);
1631 ptr[0] = 2;
1632 ptr[1] = 4;
1633 ptr[2] =((dev->mtu - HEADER_SIZE) >> 8) & 0xff;
1634 ptr[3] =(dev->mtu - HEADER_SIZE) & 0xff;
1635
1636 tcp_send_check(t1, daddr, saddr, sizeof(*t1)+4, newsk);
1637 newsk->prot->queue_xmit(newsk, dev, buff, 0);
1638
1639 newsk->time_wait.len = TCP_CONNECT_TIME;
1640 DPRINTF((DBG_TCP, "newsk->time_wait.sk = %X\n", newsk->time_wait.sk));
1641 reset_timer((struct timer *)&newsk->time_wait);
1642 skb->sk = newsk;
1643
1644
1645 sk->rmem_alloc -= skb->mem_len;
1646 newsk->rmem_alloc += skb->mem_len;
1647
1648 if (sk->rqueue == NULL) {
1649 skb->next = skb;
1650 skb->prev = skb;
1651 sk->rqueue = skb;
1652 } else {
1653 skb->next = sk->rqueue;
1654 skb->prev = sk->rqueue->prev;
1655 sk->rqueue->prev = skb;
1656 skb->prev->next = skb;
1657 }
1658 sk->ack_backlog++;
1659 release_sock(newsk);
1660 }
1661
1662
1663 static void
1664 tcp_close(struct sock *sk, int timeout)
1665 {
1666 struct sk_buff *buff;
1667 int need_reset = 0;
1668 struct tcphdr *t1, *th;
1669 struct proto *prot;
1670 struct device *dev=NULL;
1671 int tmp;
1672
1673
1674
1675
1676
1677 DPRINTF((DBG_TCP, "tcp_close((struct sock *)%X, %d)\n",sk, timeout));
1678 sk->inuse = 1;
1679 sk->keepopen = 1;
1680 sk->shutdown = SHUTDOWN_MASK;
1681
1682 if (!sk->dead) wake_up(sk->sleep);
1683
1684
1685 if (sk->rqueue != NULL) {
1686 struct sk_buff *skb;
1687 struct sk_buff *skb2;
1688
1689 skb = sk->rqueue;
1690 do {
1691 skb2 =(struct sk_buff *)skb->next;
1692
1693 if (skb->len > 0 &&
1694 after(skb->h.th->seq + skb->len + 1, sk->copied_seq))
1695 need_reset = 1;
1696 kfree_skb(skb, FREE_READ);
1697 skb = skb2;
1698 } while(skb != sk->rqueue);
1699 }
1700 sk->rqueue = NULL;
1701
1702
1703 if (sk->send_tmp) {
1704 tcp_send_partial(sk);
1705 }
1706
1707 switch(sk->state) {
1708 case TCP_FIN_WAIT1:
1709 case TCP_FIN_WAIT2:
1710 case TCP_LAST_ACK:
1711
1712 sk->time_wait.len = 4*sk->rtt;;
1713 sk->timeout = TIME_CLOSE;
1714 reset_timer((struct timer *)&sk->time_wait);
1715 if (timeout) tcp_time_wait(sk);
1716 release_sock(sk);
1717 break;
1718 case TCP_TIME_WAIT:
1719 if (timeout) {
1720 sk->state = TCP_CLOSE;
1721 }
1722 release_sock(sk);
1723 return;
1724 case TCP_LISTEN:
1725 sk->state = TCP_CLOSE;
1726 release_sock(sk);
1727 return;
1728 case TCP_CLOSE:
1729 release_sock(sk);
1730 return;
1731 case TCP_CLOSE_WAIT:
1732 case TCP_ESTABLISHED:
1733 case TCP_SYN_SENT:
1734 case TCP_SYN_RECV:
1735 prot =(struct proto *)sk->prot;
1736 th =(struct tcphdr *)&sk->dummy_th;
1737 buff = prot->wmalloc(sk, MAX_FIN_SIZE, 1, GFP_ATOMIC);
1738 if (buff == NULL) {
1739
1740 if (sk->state != TCP_CLOSE_WAIT)
1741 sk->state = TCP_ESTABLISHED;
1742 sk->timeout = TIME_CLOSE;
1743 sk->time_wait.len = 100;
1744 reset_timer((struct timer *)&sk->time_wait);
1745 return;
1746 }
1747 buff->lock = 0;
1748 buff->mem_addr = buff;
1749 buff->mem_len = MAX_FIN_SIZE;
1750 buff->sk = sk;
1751 buff->len = sizeof(*t1);
1752 t1 =(struct tcphdr *)(buff + 1);
1753
1754
1755 tmp = prot->build_header(buff,sk->saddr, sk->daddr, &dev,
1756 IPPROTO_TCP, sk->opt,
1757 sizeof(struct tcphdr));
1758 if (tmp < 0) {
1759 prot->wfree(sk,buff->mem_addr, buff->mem_len);
1760 DPRINTF((DBG_TCP, "Unable to build header for fin.\n"));
1761 release_sock(sk);
1762 return;
1763 }
1764
1765 t1 =(struct tcphdr *)((char *)t1 +tmp);
1766 buff ->len += tmp;
1767 buff->dev = dev;
1768 memcpy(t1, th, sizeof(*t1));
1769 t1->seq = ntohl(sk->send_seq);
1770 sk->send_seq++;
1771 buff->h.seq = sk->send_seq;
1772 t1->ack = 1;
1773
1774
1775 sk->delay_acks = 0;
1776 t1->ack_seq = ntohl(sk->acked_seq);
1777 t1->window = ntohs(sk->prot->rspace(sk));
1778 t1->fin = 1;
1779 t1->rst = need_reset;
1780 t1->doff = sizeof(*t1)/4;
1781 tcp_send_check(t1, sk->saddr, sk->daddr, sizeof(*t1), sk);
1782
1783 if (sk->wfront == NULL) {
1784 prot->queue_xmit(sk, dev, buff, 0);
1785 } else {
1786 sk->time_wait.len = sk->rtt;
1787 sk->timeout = TIME_WRITE;
1788 reset_timer((struct timer *)&sk->time_wait);
1789 buff->next = NULL;
1790 if (sk->wback == NULL) {
1791 sk->wfront=buff;
1792 } else {
1793 sk->wback->next = buff;
1794 }
1795 sk->wback = buff;
1796 buff->magic = TCP_WRITE_QUEUE_MAGIC;
1797 }
1798
1799 if (sk->state == TCP_CLOSE_WAIT) {
1800 sk->state = TCP_FIN_WAIT2;
1801 } else {
1802 sk->state = TCP_FIN_WAIT1;
1803 }
1804 }
1805 release_sock(sk);
1806 }
1807
1808
1809
1810
1811
1812
1813 static void
1814 tcp_write_xmit(struct sock *sk)
1815 {
1816 struct sk_buff *skb;
1817
1818 DPRINTF((DBG_TCP, "tcp_write_xmit(sk=%X)\n", sk));
1819 while(sk->wfront != NULL &&
1820 before(sk->wfront->h.seq, sk->window_seq) &&
1821 sk->packets_out < sk->cong_window) {
1822 skb = sk->wfront;
1823 sk->wfront =(struct sk_buff *)skb->next;
1824 if (sk->wfront == NULL) sk->wback = NULL;
1825 skb->next = NULL;
1826 if (skb->magic != TCP_WRITE_QUEUE_MAGIC) {
1827 DPRINTF((DBG_TCP, "tcp.c skb with bad magic(%X) on write queue. Squashing "
1828 "queue\n", skb->magic));
1829 sk->wfront = NULL;
1830 sk->wback = NULL;
1831 return;
1832 }
1833 skb->magic = 0;
1834 DPRINTF((DBG_TCP, "Sending a packet.\n"));
1835
1836
1837 if (before(skb->h.seq, sk->rcv_ack_seq +1)) {
1838 sk->retransmits = 0;
1839 kfree_skb(skb, FREE_WRITE);
1840 if (!sk->dead) wake_up(sk->sleep);
1841 } else {
1842 sk->prot->queue_xmit(sk, skb->dev, skb, skb->free);
1843 }
1844 }
1845 }
1846
1847
1848
1849
1850
1851
1852 void
1853 sort_send(struct sock *sk)
1854 {
1855 struct sk_buff *list = NULL;
1856 struct sk_buff *skb,*skb2,*skb3;
1857
1858 for (skb = sk->send_head; skb != NULL; skb = skb2) {
1859 skb2 = (struct sk_buff *)skb->link3;
1860 if (list == NULL || before (skb2->h.seq, list->h.seq)) {
1861 skb->link3 = list;
1862 sk->send_tail = skb;
1863 list = skb;
1864 } else {
1865 for (skb3 = list; ; skb3 = (struct sk_buff *)skb3->link3) {
1866 if (skb3->link3 == NULL ||
1867 before(skb->h.seq, skb3->link3->h.seq)) {
1868 skb->link3 = skb3->link3;
1869 skb3->link3 = skb;
1870 if (skb->link3 == NULL) sk->send_tail = skb;
1871 break;
1872 }
1873 }
1874 }
1875 }
1876 sk->send_head = list;
1877 }
1878
1879
1880
1881 static int
1882 tcp_ack(struct sock *sk, struct tcphdr *th, unsigned long saddr, int len)
1883 {
1884 unsigned long ack;
1885 int flag = 0;
1886
1887 ack = ntohl(th->ack_seq);
1888 DPRINTF((DBG_TCP, "tcp_ack ack=%d, window=%d, "
1889 "sk->rcv_ack_seq=%d, sk->window_seq = %d\n",
1890 ack, ntohs(th->window), sk->rcv_ack_seq, sk->window_seq));
1891
1892 if (after(ack, sk->send_seq+1) || before(ack, sk->rcv_ack_seq-1)) {
1893 if (after(ack, sk->send_seq) ||
1894 (sk->state != TCP_ESTABLISHED && sk->state != TCP_CLOSE_WAIT)) {
1895 return(0);
1896 }
1897 if (sk->keepopen) {
1898 sk->time_wait.len = TCP_TIMEOUT_LEN;
1899 sk->timeout = TIME_KEEPOPEN;
1900 reset_timer((struct timer *)&sk->time_wait);
1901 }
1902 return(1);
1903 }
1904
1905 if (len != th->doff*4) flag |= 1;
1906
1907
1908 if (after(sk->window_seq, ack+ntohs(th->window))) {
1909
1910
1911
1912
1913
1914
1915
1916 struct sk_buff *skb;
1917 struct sk_buff *skb2;
1918 struct sk_buff *wskb = NULL;
1919
1920 skb2 = sk->send_head;
1921 sk->send_head = NULL;
1922 sk->send_tail = NULL;
1923
1924 flag |= 4;
1925
1926 sk->window_seq = ack + ntohs(th->window);
1927 cli();
1928 while (skb2 != NULL) {
1929 skb = skb2;
1930 skb2 = (struct sk_buff *)skb->link3;
1931 skb->link3 = NULL;
1932 if (after(skb->h.seq, sk->window_seq)) {
1933 if (sk->packets_out > 0) sk->packets_out--;
1934
1935
1936 if (skb->next != NULL) {
1937 int i;
1938
1939 if (skb->next != skb) {
1940 skb->next->prev = skb->prev;
1941 skb->prev->next = skb->next;
1942 }
1943
1944 for(i = 0; i < DEV_NUMBUFFS; i++) {
1945 if (skb->dev->buffs[i] == skb) {
1946 if (skb->next == skb)
1947 skb->dev->buffs[i] = NULL;
1948 else
1949 skb->dev->buffs[i] = skb->next;
1950 break;
1951 }
1952 }
1953 if (arp_q == skb) {
1954 if (skb->next == skb) arp_q = NULL;
1955 else arp_q = skb->next;
1956 }
1957 }
1958
1959
1960 skb->magic = TCP_WRITE_QUEUE_MAGIC;
1961 if (wskb == NULL) {
1962 skb->next = sk->wfront;
1963 sk->wfront = skb;
1964 } else {
1965 skb->next = wskb->next;
1966 wskb->next = skb;
1967 }
1968 if (sk->wback == wskb) sk->wback = skb;
1969 wskb = skb;
1970 } else {
1971 if (sk->send_head == NULL) {
1972 sk->send_head = skb;
1973 sk->send_tail = skb;
1974 } else {
1975 sk->send_tail->link3 = skb;
1976 sk->send_tail = skb;
1977 }
1978 skb->link3 = NULL;
1979 }
1980 }
1981 sti();
1982 }
1983
1984 if (sk->send_tail == NULL || sk->send_head == NULL) {
1985 sk->send_head = NULL;
1986 sk->send_tail = NULL;
1987 sk->packets_out= 0;
1988 }
1989
1990 sk->window_seq = ack + ntohs(th->window);
1991
1992
1993 if (sk->cong_window < 2048 && ack != sk->rcv_ack_seq) {
1994 if (sk->exp_growth) sk->cong_window *= 2;
1995 else sk->cong_window++;
1996 }
1997
1998 DPRINTF((DBG_TCP, "tcp_ack: Updating rcv ack sequence.\n"));
1999 sk->rcv_ack_seq = ack;
2000
2001
2002 while(sk->send_head != NULL) {
2003
2004 if (sk->send_head->link3 &&
2005 after(sk->send_head->h.seq, sk->send_head->link3->h.seq)) {
2006 printk("INET: tcp.c: *** bug send_list out of order.\n");
2007 sort_send(sk);
2008 }
2009
2010 if (before(sk->send_head->h.seq, ack+1)) {
2011 struct sk_buff *oskb;
2012
2013 sk->retransmits = 0;
2014
2015
2016 if (sk->packets_out > 0) sk->packets_out --;
2017 DPRINTF((DBG_TCP, "skb=%X skb->h.seq = %d acked ack=%d\n",
2018 sk->send_head, sk->send_head->h.seq, ack));
2019
2020
2021 if (!sk->dead) wake_up(sk->sleep);
2022
2023 cli();
2024
2025 oskb = sk->send_head;
2026
2027
2028 if (sk->retransmits == 0 && !(flag&2))
2029 sk->rtt += ((jiffies - oskb->when) - sk->rtt)>>2;
2030
2031 flag |= 2;
2032 if (sk->rtt < 1) sk->rtt = 1;
2033
2034 sk->send_head =(struct sk_buff *)oskb->link3;
2035 if (sk->send_head == NULL) {
2036 sk->send_tail = NULL;
2037 }
2038
2039
2040 if (oskb->next != NULL) {
2041 int i;
2042
2043 if (oskb->next != oskb) {
2044 oskb->next->prev = oskb->prev;
2045 oskb->prev->next = oskb->next;
2046 }
2047 for(i = 0; i < DEV_NUMBUFFS; i++) {
2048 if (oskb->dev->buffs[i] == oskb) {
2049 if (oskb== oskb->next)
2050 oskb->dev->buffs[i]= NULL;
2051 else
2052 oskb->dev->buffs[i] = oskb->next;
2053 break;
2054 }
2055 }
2056 if (arp_q == oskb) {
2057 if (oskb == oskb->next) arp_q = NULL;
2058 else arp_q =(struct sk_buff *)oskb->next;
2059 }
2060 }
2061 oskb->magic = 0;
2062 kfree_skb(oskb, FREE_WRITE);
2063 sti();
2064 if (!sk->dead) wake_up(sk->sleep);
2065 } else {
2066 break;
2067 }
2068 }
2069
2070
2071
2072
2073
2074 if (sk->wfront != NULL) {
2075 if (after (sk->window_seq, sk->wfront->h.seq) &&
2076 sk->packets_out < sk->cong_window) {
2077 flag |= 1;
2078 tcp_write_xmit(sk);
2079 }
2080 } else {
2081 if (sk->send_head == NULL && sk->ack_backlog == 0 &&
2082 sk->state != TCP_TIME_WAIT && !sk->keepopen) {
2083 DPRINTF((DBG_TCP, "Nothing to do, going to sleep.\n"));
2084 if (!sk->dead) wake_up(sk->sleep);
2085
2086 delete_timer((struct timer *)&sk->time_wait);
2087 sk->timeout = 0;
2088 } else {
2089 if (sk->state != (unsigned char) sk->keepopen) {
2090 sk->timeout = TIME_WRITE;
2091 sk->time_wait.len = sk->rtt*2;
2092 reset_timer((struct timer *)&sk->time_wait);
2093 }
2094 if (sk->state == TCP_TIME_WAIT) {
2095 sk->time_wait.len = TCP_TIMEWAIT_LEN;
2096 reset_timer((struct timer *)&sk->time_wait);
2097 sk->timeout = TIME_CLOSE;
2098 }
2099 }
2100 }
2101
2102 if (sk->packets_out == 0 && sk->send_tmp != NULL &&
2103 sk->wfront == NULL && sk->send_head == NULL) {
2104 flag |= 1;
2105 tcp_send_partial(sk);
2106 }
2107
2108
2109 if (sk->state == TCP_TIME_WAIT) {
2110 if (!sk->dead) wake_up(sk->sleep);
2111 if (sk->rcv_ack_seq == sk->send_seq && sk->acked_seq == sk->fin_seq) {
2112 flag |= 1;
2113 sk->state = TCP_CLOSE;
2114 sk->shutdown = SHUTDOWN_MASK;
2115 }
2116 }
2117
2118 if (sk->state == TCP_LAST_ACK || sk->state == TCP_FIN_WAIT2) {
2119 if (!sk->dead) wake_up(sk->sleep);
2120 if (sk->rcv_ack_seq == sk->send_seq) {
2121 flag |= 1;
2122 if (sk->acked_seq != sk->fin_seq) {
2123 tcp_time_wait(sk);
2124 } else {
2125 DPRINTF((DBG_TCP, "tcp_ack closing socket - %X\n", sk));
2126 tcp_send_ack(sk->send_seq, sk->acked_seq, sk,
2127 th, sk->daddr);
2128 sk->shutdown = SHUTDOWN_MASK;
2129 sk->state = TCP_CLOSE;
2130 }
2131 }
2132 }
2133
2134 if (((!flag) || (flag&4)) && sk->send_head != NULL &&
2135 (sk->send_head->when + sk->rtt < jiffies)) {
2136 sk->exp_growth = 0;
2137 ip_retransmit(sk, 0);
2138 }
2139
2140 DPRINTF((DBG_TCP, "leaving tcp_ack\n"));
2141 return(1);
2142 }
2143
2144
2145
2146
2147
2148
2149
2150 static int
2151 tcp_data(struct sk_buff *skb, struct sock *sk,
2152 unsigned long saddr, unsigned short len)
2153 {
2154 struct sk_buff *skb1, *skb2;
2155 struct tcphdr *th;
2156
2157 th = skb->h.th;
2158 print_th(th);
2159 skb->len = len -(th->doff*4);
2160
2161 DPRINTF((DBG_TCP, "tcp_data len = %d sk = %X:\n", skb->len, sk));
2162
2163 sk->bytes_rcv += skb->len;
2164 if (skb->len == 0 && !th->fin && !th->urg && !th->psh) {
2165
2166 if (!th->ack) tcp_send_ack(sk->send_seq, sk->acked_seq,sk, th, saddr);
2167 kfree_skb(skb, FREE_READ);
2168 return(0);
2169 }
2170
2171 if (sk->shutdown & RCV_SHUTDOWN) {
2172 sk->acked_seq = th->seq + skb->len + th->syn + th->fin;
2173 tcp_reset(sk->saddr, sk->daddr, skb->h.th,
2174 sk->prot, NULL, skb->dev);
2175 sk->state = TCP_CLOSE;
2176 sk->err = EPIPE;
2177 sk->shutdown = SHUTDOWN_MASK;
2178 DPRINTF((DBG_TCP, "tcp_data: closing socket - %X\n", sk));
2179 kfree_skb(skb, FREE_READ);
2180 if (!sk->dead) wake_up(sk->sleep);
2181 return(0);
2182 }
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193 if (sk->rqueue == NULL) {
2194 DPRINTF((DBG_TCP, "tcp_data: skb = %X:\n", skb));
2195
2196 sk->rqueue = skb;
2197 skb->next = skb;
2198 skb->prev = skb;
2199 skb1= NULL;
2200 } else {
2201 DPRINTF((DBG_TCP, "tcp_data adding to chain sk = %X:\n", sk));
2202
2203 for(skb1=sk->rqueue; ; skb1 =(struct sk_buff *)skb1->prev) {
2204 DPRINTF((DBG_TCP, "skb1=%X\n", skb1));
2205 DPRINTF((DBG_TCP, "skb1->h.th->seq = %d\n", skb1->h.th->seq));
2206 if (after(th->seq+1, skb1->h.th->seq)) {
2207 skb->prev = skb1;
2208 skb->next = skb1->next;
2209 skb->next->prev = skb;
2210 skb1->next = skb;
2211 if (skb1 == sk->rqueue) sk->rqueue = skb;
2212 break;
2213 }
2214 if (skb1->prev == sk->rqueue) {
2215 skb->next= skb1;
2216 skb->prev = skb1->prev;
2217 skb->prev->next = skb;
2218 skb1->prev = skb;
2219 skb1 = NULL;
2220
2221 break;
2222 }
2223 }
2224 DPRINTF((DBG_TCP, "skb = %X:\n", skb));
2225 }
2226
2227 th->ack_seq = th->seq + skb->len;
2228 if (th->syn) th->ack_seq++;
2229 if (th->fin) th->ack_seq++;
2230
2231 if (before(sk->acked_seq, sk->copied_seq)) {
2232 printk("*** tcp.c:tcp_data bug acked < copied\n");
2233 sk->acked_seq = sk->copied_seq;
2234 }
2235
2236
2237 if (skb1 == NULL || skb1->acked || before(th->seq, sk->acked_seq+1)) {
2238 if (before(th->seq, sk->acked_seq+1)) {
2239 if (after(th->ack_seq, sk->acked_seq))
2240 sk->acked_seq = th->ack_seq;
2241 skb->acked = 1;
2242
2243
2244 if (skb->h.th->fin) {
2245 if (!sk->dead) wake_up(sk->sleep);
2246 sk->shutdown |= RCV_SHUTDOWN;
2247 }
2248
2249 for(skb2 = (struct sk_buff *)skb->next;
2250 skb2 !=(struct sk_buff *) sk->rqueue->next;
2251 skb2 = (struct sk_buff *)skb2->next) {
2252 if (before(skb2->h.th->seq, sk->acked_seq+1)) {
2253 if (after(skb2->h.th->ack_seq, sk->acked_seq))
2254 sk->acked_seq = skb2->h.th->ack_seq;
2255 skb2->acked = 1;
2256
2257
2258
2259
2260
2261 if (skb2->h.th->fin) {
2262 sk->shutdown |= RCV_SHUTDOWN;
2263 if (!sk->dead) wake_up(sk->sleep);
2264 }
2265
2266
2267 sk->ack_backlog = sk->max_ack_backlog;
2268 } else {
2269 break;
2270 }
2271 }
2272
2273
2274
2275
2276
2277 if (!sk->delay_acks ||
2278 sk->ack_backlog >= sk->max_ack_backlog ||
2279 sk->bytes_rcv > sk->max_unacked || th->fin) {
2280 tcp_send_ack(sk->send_seq, sk->acked_seq,sk,th, saddr);
2281 } else {
2282 sk->ack_backlog++;
2283 sk->time_wait.len = TCP_ACK_TIME;
2284 sk->timeout = TIME_WRITE;
2285 reset_timer((struct timer *)&sk->time_wait);
2286 }
2287 }
2288 }
2289
2290
2291
2292
2293
2294 if (!skb->acked) {
2295
2296
2297
2298
2299
2300 while (sk->prot->rspace(sk) < sk->mtu) {
2301 skb1 = (struct sk_buff *)sk->rqueue;
2302 if (skb1 == NULL) {
2303 printk("INET: tcp.c:tcp_data memory leak detected.\n");
2304 break;
2305 }
2306
2307
2308 if (skb1->acked) {
2309 break;
2310 }
2311 if (skb1->prev == skb1) {
2312 sk->rqueue = NULL;
2313 } else {
2314 sk->rqueue = (struct sk_buff *)skb1->prev;
2315 skb1->next->prev = skb1->prev;
2316 skb1->prev->next = skb1->next;
2317 }
2318 kfree_skb(skb1, FREE_READ);
2319 }
2320 tcp_send_ack(sk->send_seq, sk->acked_seq, sk, th, saddr);
2321 sk->ack_backlog++;
2322 sk->time_wait.len = TCP_ACK_TIME;
2323 sk->timeout = TIME_WRITE;
2324 reset_timer((struct timer *)&sk->time_wait);
2325 } else {
2326
2327 tcp_send_ack(sk->send_seq, sk->acked_seq, sk, th, saddr);
2328 }
2329
2330
2331 if (!sk->dead) {
2332 wake_up(sk->sleep);
2333 } else {
2334 DPRINTF((DBG_TCP, "data received on dead socket.\n"));
2335 }
2336
2337 if (sk->state == TCP_FIN_WAIT2 &&
2338 sk->acked_seq == sk->fin_seq && sk->rcv_ack_seq == sk->send_seq) {
2339 DPRINTF((DBG_TCP, "tcp_data: entering last_ack state sk = %X\n", sk));
2340
2341 tcp_send_ack(sk->send_seq, sk->acked_seq, sk, th, saddr);
2342 sk->shutdown = SHUTDOWN_MASK;
2343 sk->state = TCP_LAST_ACK;
2344 if (!sk->dead) wake_up(sk->sleep);
2345 }
2346
2347 return(0);
2348 }
2349
2350
2351 static int
2352 tcp_urg(struct sock *sk, struct tcphdr *th, unsigned long saddr)
2353 {
2354 extern int kill_pg(int pg, int sig, int priv);
2355 extern int kill_proc(int pid, int sig, int priv);
2356
2357 if (!sk->dead) wake_up(sk->sleep);
2358
2359 if (sk->urginline) {
2360 th->urg = 0;
2361 th->psh = 1;
2362 return(0);
2363 }
2364
2365 if (!sk->urg) {
2366
2367 if (sk->proc != 0) {
2368 if (sk->proc > 0) {
2369 kill_proc(sk->proc, SIGURG, 1);
2370 } else {
2371 kill_pg(-sk->proc, SIGURG, 1);
2372 }
2373 }
2374 }
2375 sk->urg++;
2376 return(0);
2377 }
2378
2379
2380
2381 static int
2382 tcp_fin(struct sock *sk, struct tcphdr *th,
2383 unsigned long saddr, struct device *dev)
2384 {
2385 DPRINTF((DBG_TCP, "tcp_fin(sk=%X, th=%X, saddr=%X, dev=%X)\n",
2386 sk, th, saddr, dev));
2387
2388 if (!sk->dead) {
2389 wake_up(sk->sleep);
2390 }
2391
2392 switch(sk->state) {
2393 case TCP_SYN_RECV:
2394 case TCP_SYN_SENT:
2395 case TCP_ESTABLISHED:
2396
2397 sk->fin_seq = th->seq+1;
2398 sk->state = TCP_CLOSE_WAIT;
2399 if (th->rst) sk->shutdown = SHUTDOWN_MASK;
2400 break;
2401
2402 case TCP_CLOSE_WAIT:
2403 case TCP_FIN_WAIT2:
2404 break;
2405
2406 case TCP_FIN_WAIT1:
2407
2408 sk->fin_seq = th->seq+1;
2409 sk->state = TCP_FIN_WAIT2;
2410 break;
2411
2412 default:
2413 case TCP_TIME_WAIT:
2414 sk->state = TCP_LAST_ACK;
2415
2416
2417 sk->time_wait.len = TCP_TIMEWAIT_LEN;
2418 sk->timeout = TIME_CLOSE;
2419 reset_timer((struct timer *)&sk->time_wait);
2420 return(0);
2421 }
2422 sk->ack_backlog++;
2423
2424 return(0);
2425 }
2426
2427
2428
2429 static struct sock *
2430 tcp_accept(struct sock *sk, int flags)
2431 {
2432 struct sock *newsk;
2433 struct sk_buff *skb;
2434
2435 DPRINTF((DBG_TCP, "tcp_accept(sk=%X, flags=%X, addr=%s)\n",
2436 sk, flags, in_ntoa(sk->saddr)));
2437
2438
2439
2440
2441
2442 if (sk->state != TCP_LISTEN) {
2443 sk->err = EINVAL;
2444 return(NULL);
2445 }
2446
2447
2448 cli();
2449 sk->inuse = 1;
2450 while((skb = get_firstr(sk)) == NULL) {
2451 if (flags & O_NONBLOCK) {
2452 sti();
2453 release_sock(sk);
2454 sk->err = EAGAIN;
2455 return(NULL);
2456 }
2457
2458 release_sock(sk);
2459 interruptible_sleep_on(sk->sleep);
2460 if (current->signal & ~current->blocked) {
2461 sti();
2462 sk->err = ERESTARTSYS;
2463 return(NULL);
2464 }
2465 sk->inuse = 1;
2466 }
2467 sti();
2468
2469
2470 newsk = skb->sk;
2471
2472 kfree_skb(skb, FREE_READ);
2473 sk->ack_backlog--;
2474 release_sock(sk);
2475 return(newsk);
2476 }
2477
2478
2479
2480 static int
2481 tcp_connect(struct sock *sk, struct sockaddr_in *usin, int addr_len)
2482 {
2483 struct sk_buff *buff;
2484 struct sockaddr_in sin;
2485 struct device *dev=NULL;
2486 unsigned char *ptr;
2487 int tmp;
2488 struct tcphdr *t1;
2489
2490 if (sk->state != TCP_CLOSE) return(-EISCONN);
2491 if (addr_len < 8) return(-EINVAL);
2492
2493
2494 memcpy_fromfs(&sin,usin, min(sizeof(sin), addr_len));
2495
2496 if (sin.sin_family && sin.sin_family != AF_INET) return(-EAFNOSUPPORT);
2497
2498 DPRINTF((DBG_TCP, "TCP connect daddr=%s\n", in_ntoa(sin.sin_addr.s_addr)));
2499
2500
2501 if (chk_addr(sin.sin_addr.s_addr) == IS_BROADCAST) {
2502 DPRINTF((DBG_TCP, "TCP connection to broadcast address not allowed\n"));
2503 return(-ENETUNREACH);
2504 }
2505 sk->inuse = 1;
2506 sk->daddr = sin.sin_addr.s_addr;
2507 sk->send_seq = timer_seq*SEQ_TICK-seq_offset;
2508 sk->rcv_ack_seq = sk->send_seq -1;
2509 sk->err = 0;
2510 sk->dummy_th.dest = sin.sin_port;
2511 release_sock(sk);
2512
2513 buff=sk->prot->wmalloc(sk,MAX_SYN_SIZE,0, GFP_KERNEL);
2514 if (buff == NULL) {
2515 return(-ENOMEM);
2516 }
2517 sk->inuse = 1;
2518 buff->lock = 0;
2519 buff->mem_addr = buff;
2520 buff->mem_len = MAX_SYN_SIZE;
2521 buff->len = 24;
2522 buff->sk = sk;
2523 t1 = (struct tcphdr *)(buff + 1);
2524
2525
2526
2527 tmp = sk->prot->build_header(buff, sk->saddr, sk->daddr, &dev,
2528 IPPROTO_TCP, NULL, MAX_SYN_SIZE);
2529 if (tmp < 0) {
2530 sk->prot->wfree(sk, buff->mem_addr, buff->mem_len);
2531 release_sock(sk);
2532 return(-ENETUNREACH);
2533 }
2534 buff->len += tmp;
2535 t1 = (struct tcphdr *)((char *)t1 +tmp);
2536
2537 memcpy(t1,(void *)&(sk->dummy_th), sizeof(*t1));
2538 t1->seq = ntohl(sk->send_seq++);
2539 buff->h.seq = sk->send_seq;
2540 t1->ack = 0;
2541 t1->window = 2;
2542 t1->res1=0;
2543 t1->res2=0;
2544 t1->rst = 0;
2545 t1->urg = 0;
2546 t1->psh = 0;
2547 t1->syn = 1;
2548 t1->urg_ptr = 0;
2549 t1->doff = 6;
2550
2551
2552 ptr = (unsigned char *)(t1+1);
2553 ptr[0] = 2;
2554 ptr[1] = 4;
2555 ptr[2] = (dev->mtu- HEADER_SIZE) >> 8;
2556 ptr[3] = (dev->mtu- HEADER_SIZE) & 0xff;
2557 sk->mtu = dev->mtu - HEADER_SIZE;
2558 tcp_send_check(t1, sk->saddr, sk->daddr,
2559 sizeof(struct tcphdr) + 4, sk);
2560
2561
2562 sk->state = TCP_SYN_SENT;
2563
2564 sk->prot->queue_xmit(sk, dev, buff, 0);
2565
2566 sk->time_wait.len = TCP_CONNECT_TIME;
2567 sk->rtt = TCP_CONNECT_TIME;
2568 reset_timer((struct timer *)&sk->time_wait);
2569 sk->retransmits = TCP_RETR2 - TCP_SYN_RETRIES;
2570 release_sock(sk);
2571 return(0);
2572 }
2573
2574
2575
2576 static int
2577 tcp_sequence(struct sock *sk, struct tcphdr *th, short len,
2578 struct options *opt, unsigned long saddr)
2579 {
2580
2581
2582
2583
2584
2585
2586 DPRINTF((DBG_TCP, "tcp_sequence(sk=%X, th=%X, len = %d, opt=%d, saddr=%X)\n",
2587 sk, th, len, opt, saddr));
2588
2589 if (between(th->seq, sk->acked_seq, sk->acked_seq + sk->window)||
2590 between(th->seq + len-(th->doff*4), sk->acked_seq + 1,
2591 sk->acked_seq + sk->window) ||
2592 (before(th->seq, sk->acked_seq) &&
2593 after(th->seq + len -(th->doff*4), sk->acked_seq + sk->window))) {
2594 return(1);
2595 }
2596 DPRINTF((DBG_TCP, "tcp_sequence: rejecting packet.\n"));
2597
2598
2599
2600
2601
2602 if (after(th->seq, sk->acked_seq + sk->window)) {
2603 tcp_send_ack(sk->send_seq, sk->acked_seq, sk, th, saddr);
2604 return(0);
2605 }
2606
2607
2608 if (th->ack && len == (th->doff * 4) &&
2609 after(th->seq, sk->acked_seq - 32767) &&
2610 !th->fin && !th->syn) return(1);
2611
2612 if (!th->rst) {
2613
2614 tcp_send_ack(sk->send_seq, sk->acked_seq, sk, th, saddr);
2615 }
2616 return(0);
2617 }
2618
2619
2620
2621 static void
2622 tcp_options(struct sock *sk, struct tcphdr *th)
2623 {
2624 unsigned char *ptr;
2625
2626 ptr = (unsigned char *)(th + 1);
2627 if (ptr[0] != 2 || ptr[1] != 4) {
2628 sk->mtu = min(sk->mtu, 576 - HEADER_SIZE);
2629 return;
2630 }
2631 sk->mtu = min(sk->mtu, ptr[2]*256 + ptr[3] - HEADER_SIZE);
2632 }
2633
2634
2635 int
2636 tcp_rcv(struct sk_buff *skb, struct device *dev, struct options *opt,
2637 unsigned long daddr, unsigned short len,
2638 unsigned long saddr, int redo, struct inet_protocol * protocol)
2639 {
2640 struct tcphdr *th;
2641 struct sock *sk;
2642
2643 if (!skb) {
2644 DPRINTF((DBG_TCP, "tcp.c: tcp_rcv skb = NULL\n"));
2645 return(0);
2646 }
2647 #if 0
2648 if (!protocol) {
2649 DPRINTF((DBG_TCP, "tcp.c: tcp_rcv protocol = NULL\n"));
2650 return(0);
2651 }
2652
2653 if (!opt) {
2654 DPRINTF((DBG_TCP, "tcp.c: tcp_rcv opt = NULL\n"));
2655 }
2656 #endif
2657 if (!dev) {
2658 DPRINTF((DBG_TCP, "tcp.c: tcp_rcv dev = NULL\n"));
2659 return(0);
2660 }
2661 th = skb->h.th;
2662
2663
2664 sk = get_sock(&tcp_prot, th->dest, saddr, th->source, daddr);
2665 DPRINTF((DBG_TCP, "<<\n"));
2666 DPRINTF((DBG_TCP, "len = %d, redo = %d, skb=%X\n", len, redo, skb));
2667
2668 if (sk) {
2669 DPRINTF((DBG_TCP, "sk = %X:\n", sk));
2670 }
2671
2672 if (!redo) {
2673 if (th->check && tcp_check(th, len, saddr, daddr )) {
2674 skb->sk = NULL;
2675 DPRINTF((DBG_TCP, "packet dropped with bad checksum.\n"));
2676 kfree_skb(skb, 0);
2677
2678
2679
2680
2681 return(0);
2682 }
2683
2684
2685 if (sk == NULL) {
2686 if (!th->rst) tcp_reset(daddr, saddr, th, &tcp_prot, opt,dev);
2687 skb->sk = NULL;
2688 kfree_skb(skb, 0);
2689 return(0);
2690 }
2691
2692 skb->len = len;
2693 skb->sk = sk;
2694 skb->acked = 0;
2695 skb->used = 0;
2696 skb->free = 0;
2697 skb->urg_used = 0;
2698 skb->saddr = daddr;
2699 skb->daddr = saddr;
2700
2701 th->seq = ntohl(th->seq);
2702
2703
2704 cli();
2705 if (sk->inuse) {
2706 if (sk->back_log == NULL) {
2707 sk->back_log = skb;
2708 skb->next = skb;
2709 skb->prev = skb;
2710 } else {
2711 skb->next = sk->back_log;
2712 skb->prev = sk->back_log->prev;
2713 skb->prev->next = skb;
2714 skb->next->prev = skb;
2715 }
2716 sti();
2717 return(0);
2718 }
2719 sk->inuse = 1;
2720 sti();
2721 } else {
2722 if (!sk) {
2723 DPRINTF((DBG_TCP, "tcp.c: tcp_rcv bug sk=NULL redo = 1\n"));
2724 return(0);
2725 }
2726 }
2727
2728 if (!sk->prot) {
2729 DPRINTF((DBG_TCP, "tcp.c: tcp_rcv sk->prot = NULL \n"));
2730 return(0);
2731 }
2732
2733
2734 if (sk->rmem_alloc + skb->mem_len >= SK_RMEM_MAX) {
2735 skb->sk = NULL;
2736 DPRINTF((DBG_TCP, "dropping packet due to lack of buffer space.\n"));
2737 kfree_skb(skb, 0);
2738 release_sock(sk);
2739 return(0);
2740 }
2741 sk->rmem_alloc += skb->mem_len;
2742
2743 DPRINTF((DBG_TCP, "About to do switch.\n"));
2744
2745
2746 switch(sk->state) {
2747
2748
2749
2750
2751 case TCP_LAST_ACK:
2752 if (th->rst) {
2753 sk->err = ECONNRESET;
2754 sk->state = TCP_CLOSE;
2755 sk->shutdown = SHUTDOWN_MASK;
2756 if (!sk->dead) {
2757 wake_up(sk->sleep);
2758 }
2759 kfree_skb(skb, FREE_READ);
2760 release_sock(sk);
2761 return(0);
2762 }
2763
2764 case TCP_ESTABLISHED:
2765 case TCP_CLOSE_WAIT:
2766 case TCP_FIN_WAIT1:
2767 case TCP_FIN_WAIT2:
2768 case TCP_TIME_WAIT:
2769 if (!tcp_sequence(sk, th, len, opt, saddr)) {
2770 kfree_skb(skb, FREE_READ);
2771 release_sock(sk);
2772 return(0);
2773 }
2774
2775 if (th->rst) {
2776
2777 sk->err = ECONNRESET;
2778
2779 if (sk->state == TCP_CLOSE_WAIT) {
2780 sk->err = EPIPE;
2781 }
2782
2783
2784
2785
2786
2787 if (!th->fin) {
2788 sk->state = TCP_CLOSE;
2789 sk->shutdown = SHUTDOWN_MASK;
2790 if (!sk->dead) {
2791 wake_up(sk->sleep);
2792 }
2793 kfree_skb(skb, FREE_READ);
2794 release_sock(sk);
2795 return(0);
2796 }
2797 }
2798 #if 0
2799 if (opt && (opt->security != 0 ||
2800 opt->compartment != 0 || th->syn)) {
2801 sk->err = ECONNRESET;
2802 sk->state = TCP_CLOSE;
2803 sk->shutdown = SHUTDOWN_MASK;
2804 tcp_reset(daddr, saddr, th, sk->prot, opt,dev);
2805 if (!sk->dead) {
2806 wake_up(sk->sleep);
2807 }
2808 kfree_skb(skb, FREE_READ);
2809 release_sock(sk);
2810 return(0);
2811 }
2812 #endif
2813 if (th->ack) {
2814 if (!tcp_ack(sk, th, saddr, len)) {
2815 kfree_skb(skb, FREE_READ);
2816 release_sock(sk);
2817 return(0);
2818 }
2819 }
2820 if (th->urg) {
2821 if (tcp_urg(sk, th, saddr)) {
2822 kfree_skb(skb, FREE_READ);
2823 release_sock(sk);
2824 return(0);
2825 }
2826 }
2827
2828 if (th->fin && tcp_fin(sk, th, saddr, dev)) {
2829 kfree_skb(skb, FREE_READ);
2830 release_sock(sk);
2831 return(0);
2832 }
2833
2834 if (tcp_data(skb, sk, saddr, len)) {
2835 kfree_skb(skb, FREE_READ);
2836 release_sock(sk);
2837 return(0);
2838 }
2839
2840 release_sock(sk);
2841 return(0);
2842
2843 case TCP_CLOSE:
2844 if (sk->dead || sk->daddr) {
2845 DPRINTF((DBG_TCP, "packet received for closed,dead socket\n"));
2846 kfree_skb(skb, FREE_READ);
2847 release_sock(sk);
2848 return(0);
2849 }
2850
2851 if (!th->rst) {
2852 if (!th->ack)
2853 th->ack_seq = 0;
2854 tcp_reset(daddr, saddr, th, sk->prot, opt,dev);
2855 }
2856 kfree_skb(skb, FREE_READ);
2857 release_sock(sk);
2858 return(0);
2859
2860 case TCP_LISTEN:
2861 if (th->rst) {
2862 kfree_skb(skb, FREE_READ);
2863 release_sock(sk);
2864 return(0);
2865 }
2866 if (th->ack) {
2867 tcp_reset(daddr, saddr, th, sk->prot, opt,dev);
2868 kfree_skb(skb, FREE_READ);
2869 release_sock(sk);
2870 return(0);
2871 }
2872
2873 if (th->syn) {
2874 #if 0
2875 if (opt->security != 0 || opt->compartment != 0) {
2876 tcp_reset(daddr, saddr, th, prot, opt,dev);
2877 release_sock(sk);
2878 return(0);
2879 }
2880 #endif
2881
2882
2883
2884
2885
2886
2887
2888 tcp_conn_request(sk, skb, daddr, saddr, opt, dev);
2889 release_sock(sk);
2890 return(0);
2891 }
2892
2893 kfree_skb(skb, FREE_READ);
2894 release_sock(sk);
2895 return(0);
2896
2897 default:
2898 if (!tcp_sequence(sk, th, len, opt, saddr)) {
2899 kfree_skb(skb, FREE_READ);
2900 release_sock(sk);
2901 return(0);
2902 }
2903
2904 case TCP_SYN_SENT:
2905 if (th->rst) {
2906 sk->err = ECONNREFUSED;
2907 sk->state = TCP_CLOSE;
2908 sk->shutdown = SHUTDOWN_MASK;
2909 if (!sk->dead) {
2910 wake_up(sk->sleep);
2911 }
2912 kfree_skb(skb, FREE_READ);
2913 release_sock(sk);
2914 return(0);
2915 }
2916 #if 0
2917 if (opt->security != 0 || opt->compartment != 0) {
2918 sk->err = ECONNRESET;
2919 sk->state = TCP_CLOSE;
2920 sk->shutdown = SHUTDOWN_MASK;
2921 tcp_reset(daddr, saddr, th, sk->prot, opt, dev);
2922 if (!sk->dead) {
2923 wake_up(sk->sleep);
2924 }
2925 kfree_skb(skb, FREE_READ);
2926 release_sock(sk);
2927 return(0);
2928 }
2929 #endif
2930 if (!th->ack) {
2931 if (th->syn) {
2932 sk->state = TCP_SYN_RECV;
2933 }
2934
2935 kfree_skb(skb, FREE_READ);
2936 release_sock(sk);
2937 return(0);
2938 }
2939
2940 switch(sk->state) {
2941 case TCP_SYN_SENT:
2942 if (!tcp_ack(sk, th, saddr, len)) {
2943 tcp_reset(daddr, saddr, th,
2944 sk->prot, opt,dev);
2945 kfree_skb(skb, FREE_READ);
2946 release_sock(sk);
2947 return(0);
2948 }
2949
2950
2951
2952
2953
2954 if (!th->syn) {
2955 kfree_skb(skb, FREE_READ);
2956 release_sock(sk);
2957 return(0);
2958 }
2959
2960
2961 sk->acked_seq = th->seq+1;
2962 sk->fin_seq = th->seq;
2963 tcp_send_ack(sk->send_seq, th->seq+1,
2964 sk, th, sk->daddr);
2965
2966 case TCP_SYN_RECV:
2967 if (!tcp_ack(sk, th, saddr, len)) {
2968 tcp_reset(daddr, saddr, th,
2969 sk->prot, opt, dev);
2970 kfree_skb(skb, FREE_READ);
2971 release_sock(sk);
2972 return(0);
2973 }
2974 sk->state = TCP_ESTABLISHED;
2975
2976
2977
2978
2979
2980
2981 tcp_options(sk, th);
2982 sk->dummy_th.dest = th->source;
2983 sk->copied_seq = sk->acked_seq-1;
2984 if (!sk->dead) {
2985 wake_up(sk->sleep);
2986 }
2987
2988
2989
2990
2991
2992 if (th->urg) {
2993 if (tcp_urg(sk, th, saddr)) {
2994 kfree_skb(skb, FREE_READ);
2995 release_sock(sk);
2996 return(0);
2997 }
2998 }
2999 if (tcp_data(skb, sk, saddr, len))
3000 kfree_skb(skb, FREE_READ);
3001
3002 if (th->fin) tcp_fin(sk, th, saddr, dev);
3003 release_sock(sk);
3004 return(0);
3005 }
3006
3007 if (th->urg) {
3008 if (tcp_urg(sk, th, saddr)) {
3009 kfree_skb(skb, FREE_READ);
3010 release_sock(sk);
3011 return(0);
3012 }
3013 }
3014
3015 if (tcp_data(skb, sk, saddr, len)) {
3016 kfree_skb(skb, FREE_READ);
3017 release_sock(sk);
3018 return(0);
3019 }
3020
3021 if (!th->fin) {
3022 release_sock(sk);
3023 return(0);
3024 }
3025 tcp_fin(sk, th, saddr, dev);
3026 release_sock(sk);
3027 return(0);
3028 }
3029 }
3030
3031
3032
3033
3034
3035
3036 static void
3037 tcp_write_wakeup(struct sock *sk)
3038 {
3039 struct sk_buff *buff;
3040 struct tcphdr *t1;
3041 struct device *dev=NULL;
3042 int tmp;
3043
3044 if (sk -> state != TCP_ESTABLISHED && sk->state != TCP_CLOSE_WAIT) return;
3045
3046 buff = sk->prot->wmalloc(sk,MAX_ACK_SIZE,1, GFP_ATOMIC);
3047 if (buff == NULL) return;
3048
3049 buff->lock = 0;
3050 buff->mem_addr = buff;
3051 buff->mem_len = MAX_ACK_SIZE;
3052 buff->len = sizeof(struct tcphdr);
3053 buff->free = 1;
3054 buff->sk = sk;
3055 DPRINTF((DBG_TCP, "in tcp_write_wakeup\n"));
3056 t1 = (struct tcphdr *)(buff + 1);
3057
3058
3059 tmp = sk->prot->build_header(buff, sk->saddr, sk->daddr, &dev,
3060 IPPROTO_TCP, sk->opt, MAX_ACK_SIZE);
3061 if (tmp < 0) {
3062 sk->prot->wfree(sk, buff->mem_addr, buff->mem_len);
3063 return;
3064 }
3065
3066 buff->len += tmp;
3067 t1 = (struct tcphdr *)((char *)t1 +tmp);
3068
3069 memcpy(t1,(void *) &sk->dummy_th, sizeof(*t1));
3070
3071
3072
3073
3074
3075 t1->seq = ntohl(sk->send_seq-1);
3076 t1->ack = 1;
3077 t1->res1= 0;
3078 t1->res2= 0;
3079 t1->rst = 0;
3080 t1->urg = 0;
3081 t1->psh = 0;
3082 t1->fin = 0;
3083 t1->syn = 0;
3084 t1->ack_seq = ntohl(sk->acked_seq);
3085 t1->window = ntohs(sk->prot->rspace(sk));
3086 t1->doff = sizeof(*t1)/4;
3087 tcp_send_check(t1, sk->saddr, sk->daddr, sizeof(*t1), sk);
3088
3089
3090
3091
3092 sk->prot->queue_xmit(sk, dev, buff, 1);
3093 }
3094
3095
3096 struct proto tcp_prot = {
3097 sock_wmalloc,
3098 sock_rmalloc,
3099 sock_wfree,
3100 sock_rfree,
3101 sock_rspace,
3102 sock_wspace,
3103 tcp_close,
3104 tcp_read,
3105 tcp_write,
3106 tcp_sendto,
3107 tcp_recvfrom,
3108 ip_build_header,
3109 tcp_connect,
3110 tcp_accept,
3111 ip_queue_xmit,
3112 tcp_retransmit,
3113 tcp_write_wakeup,
3114 tcp_read_wakeup,
3115 tcp_rcv,
3116 tcp_select,
3117 tcp_ioctl,
3118 NULL,
3119 tcp_shutdown,
3120 128,
3121 0,
3122 {NULL,},
3123 "TCP"
3124 };