This source file includes following definitions.
- min
- print_th
- get_firstr
- diff
- tcp_time_wait
- tcp_retransmit
- tcp_err
- tcp_readable
- tcp_select
- tcp_ioctl
- tcp_check
- tcp_send_check
- tcp_send_partial
- tcp_send_ack
- tcp_build_header
- tcp_write
- tcp_sendto
- tcp_read_wakeup
- cleanup_rbuf
- tcp_read_urg
- tcp_read
- tcp_shutdown
- tcp_recvfrom
- tcp_reset
- tcp_conn_request
- tcp_close
- tcp_write_xmit
- sort_send
- tcp_ack
- tcp_data
- tcp_urg
- tcp_fin
- tcp_accept
- tcp_connect
- tcp_sequence
- tcp_options
- tcp_rcv
- tcp_write_wakeup
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21 #include <linux/types.h>
22 #include <linux/sched.h>
23 #include <linux/mm.h>
24 #include <linux/string.h>
25 #include <linux/socket.h>
26 #include <linux/sockios.h>
27 #include <linux/termios.h>
28 #include <linux/in.h>
29 #include <linux/fcntl.h>
30 #include "inet.h"
31 #include "dev.h"
32 #include "ip.h"
33 #include "protocol.h"
34 #include "icmp.h"
35 #include "tcp.h"
36 #include "skbuff.h"
37 #include "sock.h"
38 #include "arp.h"
39 #include <linux/errno.h>
40 #include <linux/timer.h>
41 #include <asm/system.h>
42 #include <asm/segment.h>
43 #include <linux/mm.h>
44
45 #define SEQ_TICK 3
46 unsigned long seq_offset;
47
48 static __inline__ int
49 min(unsigned int a, unsigned int b)
50 {
51 if (a < b) return(a);
52 return(b);
53 }
54
55
56 void
57 print_th(struct tcphdr *th)
58 {
59 unsigned char *ptr;
60
61 if (inet_debug != DBG_TCP) return;
62
63 printk("TCP header:\n");
64 ptr =(unsigned char *)(th + 1);
65 printk(" source=%d, dest=%d, seq =%d, ack_seq = %d\n",
66 ntohs(th->source), ntohs(th->dest),
67 ntohl(th->seq), ntohl(th->ack_seq));
68 printk(" fin=%d, syn=%d, rst=%d, psh=%d, ack=%d, urg=%d res1=%d res2=%d\n",
69 th->fin, th->syn, th->rst, th->psh, th->ack,
70 th->urg, th->res1, th->res2);
71 printk(" window = %d, check = %d urg_ptr = %d\n",
72 ntohs(th->window), ntohs(th->check), ntohs(th->urg_ptr));
73 printk(" doff = %d\n", th->doff);
74 printk(" options = %d %d %d %d\n", ptr[0], ptr[1], ptr[2], ptr[3]);
75 }
76
77
78
79 static struct sk_buff *
80 get_firstr(struct sock *sk)
81 {
82 struct sk_buff *skb;
83
84 skb = sk->rqueue;
85 if (skb == NULL) return(NULL);
86 sk->rqueue =(struct sk_buff *)skb->next;
87 if (sk->rqueue == skb) {
88 sk->rqueue = NULL;
89 } else {
90 sk->rqueue->prev = skb->prev;
91 sk->rqueue->prev->next = sk->rqueue;
92 }
93 return(skb);
94 }
95
96
97 static long
98 diff(unsigned long seq1, unsigned long seq2)
99 {
100 long d;
101
102 d = seq1 - seq2;
103 if (d > 0) return(d);
104
105
106 return(~d+1);
107 }
108
109
110
111 static void
112 tcp_time_wait(struct sock *sk)
113 {
114 sk->state = TCP_TIME_WAIT;
115 sk->shutdown = SHUTDOWN_MASK;
116 if (!sk->dead)
117 wake_up(sk->sleep);
118 reset_timer(sk, TIME_CLOSE, TCP_TIMEWAIT_LEN);
119 }
120
121
122 static void
123 tcp_retransmit(struct sock *sk, int all)
124 {
125 if (all) {
126 ip_retransmit(sk, all);
127 return;
128 }
129
130 if (sk->cong_window > 4)
131 sk->cong_window = sk->cong_window / 2;
132 sk->exp_growth = 0;
133
134
135 ip_retransmit(sk, all);
136 }
137
138
139
140
141
142
143
144
145
146
147 void
148 tcp_err(int err, unsigned char *header, unsigned long daddr,
149 unsigned long saddr, struct inet_protocol *protocol)
150 {
151 struct tcphdr *th;
152 struct sock *sk;
153
154 DPRINTF((DBG_TCP, "TCP: tcp_err(%d, hdr=%X, daddr=%X saddr=%X, protocol=%X)\n",
155 err, header, daddr, saddr, protocol));
156
157 th =(struct tcphdr *)header;
158 sk = get_sock(&tcp_prot, th->dest, saddr, th->source, daddr);
159 print_th(th);
160
161 if (sk == NULL) return;
162
163 if ((err & 0xff00) == (ICMP_SOURCE_QUENCH << 8)) {
164
165
166
167
168
169 if (sk->cong_window > 4) sk->cong_window--;
170 return;
171 }
172
173 DPRINTF((DBG_TCP, "TCP: icmp_err got error\n"));
174 sk->err = icmp_err_convert[err & 0xff].errno;
175
176
177
178
179
180 if (icmp_err_convert[err & 0xff].fatal) {
181 if (sk->state == TCP_SYN_SENT) {
182 sk->state = TCP_CLOSE;
183 sk->prot->close(sk, 0);
184 }
185 }
186 return;
187 }
188
189
190 static int
191 tcp_readable(struct sock *sk)
192 {
193 unsigned long counted;
194 unsigned long amount;
195 struct sk_buff *skb;
196 int count=0;
197 int sum;
198
199 DPRINTF((DBG_TCP, "tcp_readable(sk=%X)\n", sk));
200
201 if (sk == NULL || sk->rqueue == NULL) return(0);
202
203 counted = sk->copied_seq+1;
204 amount = 0;
205 skb =(struct sk_buff *)sk->rqueue->next;
206
207
208 do {
209 count++;
210 if (count > 20) {
211 DPRINTF((DBG_TCP, "tcp_readable, more than 20 packets without a psh\n"));
212 DPRINTF((DBG_TCP, "possible read_queue corruption.\n"));
213 return(amount);
214 }
215 if (before(counted, skb->h.th->seq)) break;
216 sum = skb->len -(counted - skb->h.th->seq);
217 if (skb->h.th->syn) sum++;
218 if (skb->h.th->urg) {
219 sum -= ntohs(skb->h.th->urg_ptr);
220 }
221 if (sum >= 0) {
222 amount += sum;
223 if (skb->h.th->syn) amount--;
224 counted += sum;
225 }
226 if (amount && skb->h.th->psh) break;
227 skb =(struct sk_buff *)skb->next;
228 } while(skb != sk->rqueue->next);
229 DPRINTF((DBG_TCP, "tcp readable returning %d bytes\n", amount));
230 return(amount);
231 }
232
233
234 static int
235 tcp_select(struct sock *sk, int sel_type, select_table *wait)
236 {
237 DPRINTF((DBG_TCP, "tcp_select(sk=%X, sel_type = %d, wait = %X)\n",
238 sk, sel_type, wait));
239
240 sk->inuse = 1;
241 switch(sel_type) {
242 case SEL_IN:
243 select_wait(sk->sleep, wait);
244 if (sk->rqueue != NULL) {
245 if (sk->state == TCP_LISTEN || tcp_readable(sk)) {
246 release_sock(sk);
247 return(1);
248 }
249 }
250 if (sk->shutdown & RCV_SHUTDOWN) {
251 release_sock(sk);
252 return(1);
253 } else {
254 release_sock(sk);
255 return(0);
256 }
257 case SEL_OUT:
258 select_wait(sk->sleep, wait);
259 if (sk->shutdown & SEND_SHUTDOWN) {
260 DPRINTF((DBG_TCP,
261 "write select on shutdown socket.\n"));
262
263
264 release_sock(sk);
265 return(0);
266 }
267
268
269
270
271
272
273 if (sk->prot->wspace(sk) >= sk->mtu) {
274 release_sock(sk);
275
276 if (sk->state == TCP_SYN_RECV ||
277 sk->state == TCP_SYN_SENT) return(0);
278 return(1);
279 }
280 DPRINTF((DBG_TCP,
281 "tcp_select: sleeping on write sk->wmem_alloc = %d, "
282 "sk->packets_out = %d\n"
283 "sk->wback = %X, sk->wfront = %X\n"
284 "sk->send_seq = %u, sk->window_seq=%u\n",
285 sk->wmem_alloc, sk->packets_out,
286 sk->wback, sk->wfront,
287 sk->send_seq, sk->window_seq));
288
289 release_sock(sk);
290 return(0);
291 case SEL_EX:
292 select_wait(sk->sleep,wait);
293 if (sk->err) {
294 release_sock(sk);
295 return(1);
296 }
297 release_sock(sk);
298 return(0);
299 }
300
301 release_sock(sk);
302 return(0);
303 }
304
305
306 int
307 tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
308 {
309 DPRINTF((DBG_TCP, "tcp_ioctl(sk=%X, cmd = %d, arg=%X)\n", sk, cmd, arg));
310 switch(cmd) {
311 case DDIOCSDBG:
312 return(dbg_ioctl((void *) arg, DBG_TCP));
313
314 case TIOCINQ:
315 #ifdef FIXME
316 case FIONREAD:
317 #endif
318 {
319 unsigned long amount;
320
321 if (sk->state == TCP_LISTEN) return(-EINVAL);
322
323 amount = 0;
324 sk->inuse = 1;
325 if (sk->rqueue != NULL) {
326 amount = tcp_readable(sk);
327 }
328 release_sock(sk);
329 DPRINTF((DBG_TCP, "returning %d\n", amount));
330 verify_area(VERIFY_WRITE,(void *)arg,
331 sizeof(unsigned long));
332 put_fs_long(amount,(unsigned long *)arg);
333 return(0);
334 }
335 case SIOCATMARK:
336 {
337 struct sk_buff *skb;
338 int answ = 0;
339
340
341
342
343
344 sk->inuse = 1;
345 if (sk->rqueue != NULL) {
346 skb =(struct sk_buff *)sk->rqueue->next;
347 if (sk->copied_seq+1 == skb->h.th->seq &&
348 skb->h.th->urg) answ = 1;
349 }
350 release_sock(sk);
351 verify_area(VERIFY_WRITE,(void *) arg,
352 sizeof(unsigned long));
353 put_fs_long(answ,(int *) arg);
354 return(0);
355 }
356 case TIOCOUTQ:
357 {
358 unsigned long amount;
359
360 if (sk->state == TCP_LISTEN) return(-EINVAL);
361 amount = sk->prot->wspace(sk)/2;
362 verify_area(VERIFY_WRITE,(void *)arg,
363 sizeof(unsigned long));
364 put_fs_long(amount,(unsigned long *)arg);
365 return(0);
366 }
367 default:
368 return(-EINVAL);
369 }
370 }
371
372
373
374 unsigned short
375 tcp_check(struct tcphdr *th, int len,
376 unsigned long saddr, unsigned long daddr)
377 {
378 unsigned long sum;
379
380 if (saddr == 0) saddr = my_addr();
381 print_th(th);
382 __asm__("\t addl %%ecx,%%ebx\n"
383 "\t adcl %%edx,%%ebx\n"
384 "\t adcl $0, %%ebx\n"
385 : "=b"(sum)
386 : "0"(daddr), "c"(saddr), "d"((ntohs(len) << 16) + IPPROTO_TCP*256)
387 : "cx","bx","dx" );
388
389 if (len > 3) {
390 __asm__("\tclc\n"
391 "1:\n"
392 "\t lodsl\n"
393 "\t adcl %%eax, %%ebx\n"
394 "\t loop 1b\n"
395 "\t adcl $0, %%ebx\n"
396 : "=b"(sum) , "=S"(th)
397 : "0"(sum), "c"(len/4) ,"1"(th)
398 : "ax", "cx", "bx", "si" );
399 }
400
401
402 __asm__("\t movl %%ebx, %%ecx\n"
403 "\t shrl $16,%%ecx\n"
404 "\t addw %%cx, %%bx\n"
405 "\t adcw $0, %%bx\n"
406 : "=b"(sum)
407 : "0"(sum)
408 : "bx", "cx");
409
410
411 if ((len & 2) != 0) {
412 __asm__("\t lodsw\n"
413 "\t addw %%ax,%%bx\n"
414 "\t adcw $0, %%bx\n"
415 : "=b"(sum), "=S"(th)
416 : "0"(sum) ,"1"(th)
417 : "si", "ax", "bx");
418 }
419
420
421 if ((len & 1) != 0) {
422 __asm__("\t lodsb\n"
423 "\t movb $0,%%ah\n"
424 "\t addw %%ax,%%bx\n"
425 "\t adcw $0, %%bx\n"
426 : "=b"(sum)
427 : "0"(sum) ,"S"(th)
428 : "si", "ax", "bx");
429 }
430
431
432 return((~sum) & 0xffff);
433 }
434
435
436 void
437 tcp_send_check(struct tcphdr *th, unsigned long saddr,
438 unsigned long daddr, int len, struct sock *sk)
439 {
440 th->check = 0;
441 if (sk && sk->no_check) return;
442 th->check = tcp_check(th, len, saddr, daddr);
443 return;
444 }
445
446
447 static void
448 tcp_send_partial(struct sock *sk)
449 {
450 struct sk_buff *skb;
451
452 if (sk == NULL || sk->send_tmp == NULL) return;
453
454 skb = sk->send_tmp;
455
456
457 tcp_send_check(skb->h.th, sk->saddr, sk->daddr,
458 skb->len-(unsigned long)skb->h.th +
459 (unsigned long)(skb+1), sk);
460
461 skb->h.seq = sk->send_seq;
462 if (after(sk->send_seq , sk->window_seq) ||
463 sk->packets_out >= sk->cong_window) {
464 DPRINTF((DBG_TCP, "sk->cong_window = %d, sk->packets_out = %d\n",
465 sk->cong_window, sk->packets_out));
466 DPRINTF((DBG_TCP, "sk->send_seq = %d, sk->window_seq = %d\n",
467 sk->send_seq, sk->window_seq));
468 skb->next = NULL;
469 skb->magic = TCP_WRITE_QUEUE_MAGIC;
470 if (sk->wback == NULL) {
471 sk->wfront=skb;
472 } else {
473 sk->wback->next = skb;
474 }
475 sk->wback = skb;
476 } else {
477 sk->prot->queue_xmit(sk, skb->dev, skb,0);
478 }
479 sk->send_tmp = NULL;
480 }
481
482
483
484 static void
485 tcp_send_ack(unsigned long sequence, unsigned long ack,
486 struct sock *sk,
487 struct tcphdr *th, unsigned long daddr)
488 {
489 struct sk_buff *buff;
490 struct tcphdr *t1;
491 struct device *dev = NULL;
492 int tmp;
493
494
495
496
497
498 buff = (struct sk_buff *) sk->prot->wmalloc(sk, MAX_ACK_SIZE, 1, GFP_ATOMIC);
499 if (buff == NULL) {
500
501 sk->ack_backlog++;
502 if (sk->timeout != TIME_WRITE && tcp_connected(sk->state)) {
503 reset_timer(sk, TIME_WRITE, 10);
504 }
505 if (inet_debug == DBG_SLIP) printk("\rtcp_ack: malloc failed\n");
506 return;
507 }
508
509 buff->mem_addr = buff;
510 buff->mem_len = MAX_ACK_SIZE;
511 buff->lock = 0;
512 buff->len = sizeof(struct tcphdr);
513 buff->sk = sk;
514 t1 =(struct tcphdr *)(buff + 1);
515
516
517 tmp = sk->prot->build_header(buff, sk->saddr, daddr, &dev,
518 IPPROTO_TCP, sk->opt, MAX_ACK_SIZE);
519 if (tmp < 0) {
520 sk->prot->wfree(sk, buff->mem_addr, buff->mem_len);
521 if (inet_debug == DBG_SLIP) printk("\rtcp_ack: build_header failed\n");
522 return;
523 }
524 buff->len += tmp;
525 t1 =(struct tcphdr *)((char *)t1 +tmp);
526
527
528 memcpy(t1, th, sizeof(*t1));
529
530
531 t1->dest = th->source;
532 t1->source = th->dest;
533 t1->seq = ntohl(sequence);
534 t1->ack = 1;
535 sk->window = sk->prot->rspace(sk);
536 t1->window = ntohs(sk->window);
537 t1->res1 = 0;
538 t1->res2 = 0;
539 t1->rst = 0;
540 t1->urg = 0;
541 t1->syn = 0;
542 t1->psh = 0;
543 t1->fin = 0;
544 if (ack == sk->acked_seq) {
545 sk->ack_backlog = 0;
546 sk->bytes_rcv = 0;
547 sk->ack_timed = 0;
548 if (sk->send_head == NULL && sk->wfront == NULL) {
549 delete_timer(sk);
550 }
551 }
552 t1->ack_seq = ntohl(ack);
553 t1->doff = sizeof(*t1)/4;
554 tcp_send_check(t1, sk->saddr, daddr, sizeof(*t1), sk);
555 if (inet_debug == DBG_SLIP) printk("\rtcp_ack: seq %x ack %x\n",
556 sequence, ack);
557 sk->prot->queue_xmit(sk, dev, buff, 1);
558 }
559
560
561
562 static int
563 tcp_build_header(struct tcphdr *th, struct sock *sk, int push)
564 {
565
566
567 memcpy(th,(void *) &(sk->dummy_th), sizeof(*th));
568 th->seq = ntohl(sk->send_seq);
569 th->psh =(push == 0) ? 1 : 0;
570 th->doff = sizeof(*th)/4;
571 th->ack = 1;
572 th->fin = 0;
573 sk->ack_backlog = 0;
574 sk->bytes_rcv = 0;
575 sk->ack_timed = 0;
576 th->ack_seq = ntohl(sk->acked_seq);
577 sk->window = sk->prot->rspace(sk);
578 th->window = ntohs(sk->window);
579
580 return(sizeof(*th));
581 }
582
583
584
585
586
587
588 static int
589 tcp_write(struct sock *sk, unsigned char *from,
590 int len, int nonblock, unsigned flags)
591 {
592 int copied = 0;
593 int copy;
594 int tmp;
595 struct sk_buff *skb;
596 unsigned char *buff;
597 struct proto *prot;
598 struct device *dev = NULL;
599
600 DPRINTF((DBG_TCP, "tcp_write(sk=%X, from=%X, len=%d, nonblock=%d, flags=%X)\n",
601 sk, from, len, nonblock, flags));
602
603 prot = sk->prot;
604 while(len > 0) {
605 if (sk->err) {
606 if (copied) return(copied);
607 tmp = -sk->err;
608 sk->err = 0;
609 return(tmp);
610 }
611
612
613 sk->inuse = 1;
614 if (sk->shutdown & SEND_SHUTDOWN) {
615 release_sock(sk);
616 sk->err = EPIPE;
617 if (copied) return(copied);
618 sk->err = 0;
619 return(-EPIPE);
620 }
621
622 while(sk->state != TCP_ESTABLISHED && sk->state != TCP_CLOSE_WAIT) {
623 if (sk->err) {
624 if (copied) return(copied);
625 tmp = -sk->err;
626 sk->err = 0;
627 return(tmp);
628 }
629
630 if (sk->state != TCP_SYN_SENT && sk->state != TCP_SYN_RECV) {
631 release_sock(sk);
632 DPRINTF((DBG_TCP, "tcp_write: return 1\n"));
633 if (copied) return(copied);
634
635 if (sk->err) {
636 tmp = -sk->err;
637 sk->err = 0;
638 return(tmp);
639 }
640
641 if (sk->keepopen) {
642 send_sig(SIGPIPE, current, 0);
643 }
644 return(-EPIPE);
645 }
646
647 if (nonblock || copied) {
648 release_sock(sk);
649 DPRINTF((DBG_TCP, "tcp_write: return 2\n"));
650 if (copied) return(copied);
651 return(-EAGAIN);
652 }
653
654
655
656
657
658
659
660
661
662 release_sock(sk);
663 cli();
664 if (sk->state != TCP_ESTABLISHED &&
665 sk->state != TCP_CLOSE_WAIT && sk->err == 0) {
666 interruptible_sleep_on(sk->sleep);
667 if (current->signal & ~current->blocked) {
668 sti();
669 DPRINTF((DBG_TCP, "tcp_write: return 3\n"));
670 if (copied) return(copied);
671 return(-ERESTARTSYS);
672 }
673 }
674 sk->inuse = 1;
675 sti();
676 }
677
678
679 if (sk->send_tmp != NULL) {
680
681
682
683 skb = sk->send_tmp;
684 if (!(flags & MSG_OOB)) {
685 copy = min(sk->mss - skb->len + 128 +
686 prot->max_header, len);
687
688
689 if (copy <= 0) {
690 printk("TCP: **bug**: \"copy\" <= 0!!\n");
691 copy = 0;
692 }
693
694 memcpy_fromfs((unsigned char *)(skb+1) + skb->len, from, copy);
695 skb->len += copy;
696 from += copy;
697 copied += copy;
698 len -= copy;
699 sk->send_seq += copy;
700 }
701
702 if (skb->len -(unsigned long)skb->h.th +
703 (unsigned long)(skb+1) >= sk->mss ||(flags & MSG_OOB)) {
704 tcp_send_partial(sk);
705 }
706 continue;
707 }
708
709
710
711
712
713 copy = min(sk->mtu, diff(sk->window_seq, sk->send_seq));
714
715
716 if (copy < 200 || copy > sk->mtu) copy = sk->mtu;
717 copy = min(copy, len);
718
719
720 if (sk->packets_out && copy < sk->mss && !(flags & MSG_OOB)) {
721
722 release_sock(sk);
723 skb = (struct sk_buff *) prot->wmalloc(sk,
724 sk->mss + 128 + prot->max_header +
725 sizeof(*skb), 0, GFP_KERNEL);
726 sk->inuse = 1;
727 sk->send_tmp = skb;
728 if (skb != NULL)
729 skb->mem_len = sk->mss + 128 + prot->max_header + sizeof(*skb);
730 } else {
731
732 release_sock(sk);
733 skb = (struct sk_buff *) prot->wmalloc(sk,
734 copy + prot->max_header +
735 sizeof(*skb), 0, GFP_KERNEL);
736 sk->inuse = 1;
737 if (skb != NULL)
738 skb->mem_len = copy+prot->max_header + sizeof(*skb);
739 }
740
741
742 if (skb == NULL) {
743 if (nonblock || copied) {
744 release_sock(sk);
745 DPRINTF((DBG_TCP, "tcp_write: return 4\n"));
746 if (copied) return(copied);
747 return(-EAGAIN);
748 }
749
750
751 tmp = sk->wmem_alloc;
752 release_sock(sk);
753
754
755 cli();
756 if (tmp <= sk->wmem_alloc &&
757 (sk->state == TCP_ESTABLISHED||sk->state == TCP_CLOSE_WAIT)
758 && sk->err == 0) {
759 interruptible_sleep_on(sk->sleep);
760 if (current->signal & ~current->blocked) {
761 sti();
762 DPRINTF((DBG_TCP, "tcp_write: return 5\n"));
763 if (copied) return(copied);
764 return(-ERESTARTSYS);
765 }
766 }
767 sk->inuse = 1;
768 sti();
769 continue;
770 }
771
772 skb->mem_addr = skb;
773 skb->len = 0;
774 skb->sk = sk;
775 skb->lock = 0;
776 skb->free = 0;
777
778 buff =(unsigned char *)(skb+1);
779
780
781
782
783
784 tmp = prot->build_header(skb, sk->saddr, sk->daddr, &dev,
785 IPPROTO_TCP, sk->opt, skb->mem_len);
786 if (tmp < 0 ) {
787 prot->wfree(sk, skb->mem_addr, skb->mem_len);
788 release_sock(sk);
789 DPRINTF((DBG_TCP, "tcp_write: return 6\n"));
790 if (copied) return(copied);
791 return(tmp);
792 }
793 skb->len += tmp;
794 skb->dev = dev;
795 buff += tmp;
796 skb->h.th =(struct tcphdr *) buff;
797 tmp = tcp_build_header((struct tcphdr *)buff, sk, len-copy);
798 if (tmp < 0) {
799 prot->wfree(sk, skb->mem_addr, skb->mem_len);
800 release_sock(sk);
801 DPRINTF((DBG_TCP, "tcp_write: return 7\n"));
802 if (copied) return(copied);
803 return(tmp);
804 }
805
806 if (flags & MSG_OOB) {
807 ((struct tcphdr *)buff)->urg = 1;
808 ((struct tcphdr *)buff)->urg_ptr = ntohs(copy);
809 }
810 skb->len += tmp;
811 memcpy_fromfs(buff+tmp, from, copy);
812
813 from += copy;
814 copied += copy;
815 len -= copy;
816 skb->len += copy;
817 skb->free = 0;
818 sk->send_seq += copy;
819
820 if (sk->send_tmp != NULL) continue;
821
822 tcp_send_check((struct tcphdr *)buff, sk->saddr, sk->daddr,
823 copy + sizeof(struct tcphdr), sk);
824
825 skb->h.seq = sk->send_seq;
826 if (after(sk->send_seq , sk->window_seq) ||
827 sk->packets_out >= sk->cong_window) {
828 DPRINTF((DBG_TCP, "sk->cong_window = %d, sk->packets_out = %d\n",
829 sk->cong_window, sk->packets_out));
830 DPRINTF((DBG_TCP, "sk->send_seq = %d, sk->window_seq = %d\n",
831 sk->send_seq, sk->window_seq));
832 skb->next = NULL;
833 skb->magic = TCP_WRITE_QUEUE_MAGIC;
834 if (sk->wback == NULL) {
835 sk->wfront = skb;
836 } else {
837 sk->wback->next = skb;
838 }
839 sk->wback = skb;
840 } else {
841 prot->queue_xmit(sk, dev, skb,0);
842 }
843 }
844 sk->err = 0;
845 release_sock(sk);
846 DPRINTF((DBG_TCP, "tcp_write: return 8\n"));
847 return(copied);
848 }
849
850
851 static int
852 tcp_sendto(struct sock *sk, unsigned char *from,
853 int len, int nonblock, unsigned flags,
854 struct sockaddr_in *addr, int addr_len)
855 {
856 struct sockaddr_in sin;
857
858 if (addr_len < sizeof(sin)) return(-EINVAL);
859 memcpy_fromfs(&sin, addr, sizeof(sin));
860 if (sin.sin_family && sin.sin_family != AF_INET) return(-EINVAL);
861 if (sin.sin_port != sk->dummy_th.dest) return(-EINVAL);
862 if (sin.sin_addr.s_addr != sk->daddr) return(-EINVAL);
863 return(tcp_write(sk, from, len, nonblock, flags));
864 }
865
866
867 static void
868 tcp_read_wakeup(struct sock *sk)
869 {
870 int tmp;
871 struct device *dev = NULL;
872 struct tcphdr *t1;
873 struct sk_buff *buff;
874
875 DPRINTF((DBG_TCP, "in tcp read wakeup\n"));
876 if (!sk->ack_backlog) return;
877
878
879
880
881
882
883
884
885
886
887
888 buff = (struct sk_buff *) sk->prot->wmalloc(sk,MAX_ACK_SIZE,1, GFP_ATOMIC);
889 if (buff == NULL) {
890
891 reset_timer(sk, TIME_WRITE, 10);
892 return;
893 }
894
895 buff->mem_addr = buff;
896 buff->mem_len = MAX_ACK_SIZE;
897 buff->lock = 0;
898 buff->len = sizeof(struct tcphdr);
899 buff->sk = sk;
900
901
902 tmp = sk->prot->build_header(buff, sk->saddr, sk->daddr, &dev,
903 IPPROTO_TCP, sk->opt, MAX_ACK_SIZE);
904 if (tmp < 0) {
905 sk->prot->wfree(sk, buff->mem_addr, buff->mem_len);
906 return;
907 }
908
909 buff->len += tmp;
910 t1 =(struct tcphdr *)((char *)(buff+1) +tmp);
911
912 memcpy(t1,(void *) &sk->dummy_th, sizeof(*t1));
913 t1->seq = ntohl(sk->send_seq);
914 t1->ack = 1;
915 t1->res1 = 0;
916 t1->res2 = 0;
917 t1->rst = 0;
918 t1->urg = 0;
919 t1->syn = 0;
920 t1->psh = 0;
921 sk->ack_backlog = 0;
922 sk->bytes_rcv = 0;
923 sk->window = sk->prot->rspace(sk);
924 t1->window = ntohs(sk->window);
925 t1->ack_seq = ntohl(sk->acked_seq);
926 t1->doff = sizeof(*t1)/4;
927 tcp_send_check(t1, sk->saddr, sk->daddr, sizeof(*t1), sk);
928 sk->prot->queue_xmit(sk, dev, buff, 1);
929 }
930
931
932
933
934
935
936
937
938 static void
939 cleanup_rbuf(struct sock *sk)
940 {
941 int left;
942
943 DPRINTF((DBG_TCP, "cleaning rbuf for sk=%X\n", sk));
944 left = sk->prot->rspace(sk);
945
946
947
948
949
950 while(sk->rqueue != NULL ) {
951 struct sk_buff *skb;
952
953 skb =(struct sk_buff *)sk->rqueue->next;
954 if (!skb->used) break;
955 if (sk->rqueue == skb) {
956 sk->rqueue = NULL;
957 } else {
958 skb->next->prev = skb->prev;
959 skb->prev->next = skb->next;
960 }
961 skb->sk = sk;
962 kfree_skb(skb, FREE_READ);
963 }
964
965
966
967
968
969
970
971 DPRINTF((DBG_TCP, "sk->window left = %d, sk->prot->rspace(sk)=%d\n",
972 sk->window - sk->bytes_rcv, sk->prot->rspace(sk)));
973
974 if (sk->prot->rspace(sk) != left) {
975
976
977
978
979
980
981
982
983
984
985 sk->ack_backlog++;
986 if ((sk->prot->rspace(sk) > (sk->window - sk->bytes_rcv + sk->mtu))) {
987
988 tcp_read_wakeup(sk);
989 } else {
990
991 int was_active = del_timer(&sk->timer);
992 if (!was_active || TCP_ACK_TIME < sk->timer.expires) {
993 reset_timer(sk, TIME_WRITE, TCP_ACK_TIME);
994 } else
995 add_timer(&sk->timer);
996 }
997 }
998 }
999
1000
1001
1002 static int
1003 tcp_read_urg(struct sock * sk, int nonblock,
1004 unsigned char *to, int len, unsigned flags)
1005 {
1006 int copied = 0;
1007 struct sk_buff *skb;
1008
1009 DPRINTF((DBG_TCP, "tcp_read_urg(sk=%X, to=%X, len=%d, flags=%X)\n",
1010 sk, to, len, flags));
1011
1012 while(len > 0) {
1013 sk->inuse = 1;
1014 while(sk->urg==0 || sk->rqueue == NULL) {
1015 if (sk->err) {
1016 int tmp;
1017
1018 release_sock(sk);
1019 if (copied) return(copied);
1020 tmp = -sk->err;
1021 sk->err = 0;
1022 return(tmp);
1023 }
1024
1025 if (sk->state == TCP_CLOSE || sk->done) {
1026 release_sock(sk);
1027 if (copied) return(copied);
1028 if (!sk->done) {
1029 sk->done = 1;
1030 return(0);
1031 }
1032 return(-ENOTCONN);
1033 }
1034
1035 if (sk->shutdown & RCV_SHUTDOWN) {
1036 release_sock(sk);
1037 if (copied == 0) sk->done = 1;
1038 return(copied);
1039 }
1040
1041 if (nonblock || copied) {
1042 release_sock(sk);
1043 if (copied) return(copied);
1044 return(-EAGAIN);
1045 }
1046
1047
1048 release_sock(sk);
1049 cli();
1050 if ((sk->urg == 0 || sk->rqueue == NULL) &&
1051 sk->err == 0 && !(sk->shutdown & RCV_SHUTDOWN)) {
1052 interruptible_sleep_on(sk->sleep);
1053 if (current->signal & ~current->blocked) {
1054 sti();
1055 if (copied) return(copied);
1056 return(-ERESTARTSYS);
1057 }
1058 }
1059 sk->inuse = 1;
1060 sti();
1061 }
1062
1063 skb =(struct sk_buff *)sk->rqueue->next;
1064 do {
1065 int amt;
1066
1067 if (skb->h.th->urg && !skb->urg_used) {
1068 if (skb->h.th->urg_ptr == 0) {
1069 skb->h.th->urg_ptr = ntohs(skb->len);
1070 }
1071 amt = min(ntohs(skb->h.th->urg_ptr),len);
1072 verify_area(VERIFY_WRITE, to, amt);
1073 memcpy_tofs(to,(unsigned char *)(skb->h.th) +
1074 skb->h.th->doff*4, amt);
1075
1076 if (!(flags & MSG_PEEK)) {
1077 skb->urg_used = 1;
1078 sk->urg--;
1079 }
1080 release_sock(sk);
1081 copied += amt;
1082 return(copied);
1083 }
1084 skb =(struct sk_buff *)skb->next;
1085 } while(skb != sk->rqueue->next);
1086 }
1087 sk->urg = 0;
1088 release_sock(sk);
1089 return(0);
1090 }
1091
1092
1093
1094 static int
1095 tcp_read(struct sock *sk, unsigned char *to,
1096 int len, int nonblock, unsigned flags)
1097 {
1098 int copied=0;
1099 struct sk_buff *skb;
1100 unsigned long offset;
1101 unsigned long used;
1102
1103 if (len == 0) return(0);
1104 if (len < 0) {
1105 return(-EINVAL);
1106 }
1107
1108
1109 if (sk->state == TCP_LISTEN) return(-ENOTCONN);
1110
1111
1112 if ((flags & MSG_OOB)) return(tcp_read_urg(sk, nonblock, to, len, flags));
1113
1114
1115 sk->inuse = 1;
1116 if (sk->rqueue != NULL) skb =(struct sk_buff *)sk->rqueue->next;
1117 else skb = NULL;
1118
1119 DPRINTF((DBG_TCP, "tcp_read(sk=%X, to=%X, len=%d, nonblock=%d, flags=%X)\n",
1120 sk, to, len, nonblock, flags));
1121
1122 while(len > 0) {
1123
1124 while(skb == NULL ||
1125 before(sk->copied_seq+1, skb->h.th->seq) || skb->used) {
1126 DPRINTF((DBG_TCP, "skb = %X:\n", skb));
1127 cleanup_rbuf(sk);
1128 if (sk->err) {
1129 int tmp;
1130
1131 release_sock(sk);
1132 if (copied) {
1133 DPRINTF((DBG_TCP, "tcp_read: returing %d\n",
1134 copied));
1135 return(copied);
1136 }
1137 tmp = -sk->err;
1138 sk->err = 0;
1139 return(tmp);
1140 }
1141
1142 if (sk->state == TCP_CLOSE) {
1143 release_sock(sk);
1144 if (copied) {
1145 DPRINTF((DBG_TCP, "tcp_read: returing %d\n",
1146 copied));
1147 return(copied);
1148 }
1149 if (!sk->done) {
1150 sk->done = 1;
1151 return(0);
1152 }
1153 return(-ENOTCONN);
1154 }
1155
1156 if (sk->shutdown & RCV_SHUTDOWN) {
1157 release_sock(sk);
1158 if (copied == 0) sk->done = 1;
1159 DPRINTF((DBG_TCP, "tcp_read: returing %d\n", copied));
1160 return(copied);
1161 }
1162
1163 if (nonblock || copied) {
1164 release_sock(sk);
1165 if (copied) {
1166 DPRINTF((DBG_TCP, "tcp_read: returing %d\n",
1167 copied));
1168 return(copied);
1169 }
1170 return(-EAGAIN);
1171 }
1172
1173 if ((flags & MSG_PEEK) && copied != 0) {
1174 release_sock(sk);
1175 DPRINTF((DBG_TCP, "tcp_read: returing %d\n", copied));
1176 return(copied);
1177 }
1178
1179 DPRINTF((DBG_TCP, "tcp_read about to sleep. state = %d\n",
1180 sk->state));
1181 release_sock(sk);
1182
1183
1184
1185
1186
1187 cli();
1188 if (sk->shutdown & RCV_SHUTDOWN || sk->err != 0) {
1189 sk->inuse = 1;
1190 sti();
1191 continue;
1192 }
1193
1194 if (sk->rqueue == NULL ||
1195 before(sk->copied_seq+1, sk->rqueue->next->h.th->seq)) {
1196 interruptible_sleep_on(sk->sleep);
1197 if (current->signal & ~current->blocked) {
1198 sti();
1199 if (copied) {
1200 DPRINTF((DBG_TCP, "tcp_read: returing %d\n",
1201 copied));
1202 return(copied);
1203 }
1204 return(-ERESTARTSYS);
1205 }
1206 }
1207 sk->inuse = 1;
1208 sti();
1209 DPRINTF((DBG_TCP, "tcp_read woke up. \n"));
1210
1211
1212 if (sk->rqueue == NULL) skb = NULL;
1213 else skb =(struct sk_buff *)sk->rqueue->next;
1214
1215 }
1216
1217
1218
1219
1220
1221 offset = sk->copied_seq+1 - skb->h.th->seq;
1222
1223 if (skb->h.th->syn) offset--;
1224 if (offset < skb->len) {
1225
1226
1227
1228
1229 if (skb->h.th->urg) {
1230 if (skb->urg_used) {
1231 sk->copied_seq += ntohs(skb->h.th->urg_ptr);
1232 offset += ntohs(skb->h.th->urg_ptr);
1233 if (offset >= skb->len) {
1234 skb->used = 1;
1235 skb =(struct sk_buff *)skb->next;
1236 continue;
1237 }
1238 } else {
1239 release_sock(sk);
1240 if (copied) return(copied);
1241 send_sig(SIGURG, current, 0);
1242 return(-EINTR);
1243 }
1244 }
1245 used = min(skb->len - offset, len);
1246 verify_area(VERIFY_WRITE, to, used);
1247 memcpy_tofs(to,((unsigned char *)skb->h.th) +
1248 skb->h.th->doff*4 + offset, used);
1249 copied += used;
1250 len -= used;
1251 to += used;
1252 if (!(flags & MSG_PEEK)) sk->copied_seq += used;
1253
1254
1255
1256
1257
1258
1259 if (!(flags & MSG_PEEK) &&
1260 (!skb->h.th->urg || skb->urg_used) &&
1261 (used + offset >= skb->len)) skb->used = 1;
1262
1263
1264
1265
1266
1267 if (skb->h.th->psh || skb->h.th->urg) {
1268 break;
1269 }
1270 } else {
1271 skb->used = 1;
1272 }
1273 skb =(struct sk_buff *)skb->next;
1274 }
1275 cleanup_rbuf(sk);
1276 release_sock(sk);
1277 DPRINTF((DBG_TCP, "tcp_read: returing %d\n", copied));
1278 if (copied == 0 && nonblock) return(-EAGAIN);
1279 return(copied);
1280 }
1281
1282
1283
1284
1285
1286
1287 void
1288 tcp_shutdown(struct sock *sk, int how)
1289 {
1290 struct sk_buff *buff;
1291 struct tcphdr *t1, *th;
1292 struct proto *prot;
1293 int tmp;
1294 struct device *dev = NULL;
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304 if (sk->state == TCP_FIN_WAIT1 || sk->state == TCP_FIN_WAIT2) return;
1305 if (!(how & SEND_SHUTDOWN)) return;
1306 sk->inuse = 1;
1307
1308
1309 if (sk->send_tmp) tcp_send_partial(sk);
1310
1311 prot =(struct proto *)sk->prot;
1312 th =(struct tcphdr *)&sk->dummy_th;
1313 release_sock(sk);
1314 buff = (struct sk_buff *) prot->wmalloc(sk, MAX_RESET_SIZE,1 , GFP_KERNEL);
1315 if (buff == NULL) return;
1316 sk->inuse = 1;
1317
1318 DPRINTF((DBG_TCP, "tcp_shutdown_send buff = %X\n", buff));
1319 buff->mem_addr = buff;
1320 buff->mem_len = MAX_RESET_SIZE;
1321 buff->lock = 0;
1322 buff->sk = sk;
1323 buff->len = sizeof(*t1);
1324 t1 =(struct tcphdr *)(buff + 1);
1325
1326
1327 tmp = prot->build_header(buff,sk->saddr, sk->daddr, &dev,
1328 IPPROTO_TCP, sk->opt,
1329 sizeof(struct tcphdr));
1330 if (tmp < 0) {
1331 prot->wfree(sk,buff->mem_addr, buff->mem_len);
1332 release_sock(sk);
1333 DPRINTF((DBG_TCP, "Unable to build header for fin.\n"));
1334 return;
1335 }
1336
1337 t1 =(struct tcphdr *)((char *)t1 +tmp);
1338 buff ->len += tmp;
1339 buff->dev = dev;
1340 memcpy(t1, th, sizeof(*t1));
1341 t1->seq = ntohl(sk->send_seq);
1342 sk->send_seq++;
1343 buff->h.seq = sk->send_seq;
1344 t1->ack = 1;
1345 t1->ack_seq = ntohl(sk->acked_seq);
1346 t1->window = ntohs(sk->prot->rspace(sk));
1347 t1->fin = 1;
1348 t1->rst = 0;
1349 t1->doff = sizeof(*t1)/4;
1350 tcp_send_check(t1, sk->saddr, sk->daddr, sizeof(*t1), sk);
1351
1352
1353
1354
1355
1356 if (sk->wback != NULL) {
1357 buff->next = NULL;
1358 sk->wback->next = buff;
1359 sk->wback = buff;
1360 buff->magic = TCP_WRITE_QUEUE_MAGIC;
1361 } else {
1362 sk->prot->queue_xmit(sk, dev, buff, 0);
1363 }
1364
1365 if (sk->state == TCP_ESTABLISHED) sk->state = TCP_FIN_WAIT1;
1366 else sk->state = TCP_FIN_WAIT2;
1367
1368 release_sock(sk);
1369 }
1370
1371
1372 static int
1373 tcp_recvfrom(struct sock *sk, unsigned char *to,
1374 int to_len, int nonblock, unsigned flags,
1375 struct sockaddr_in *addr, int *addr_len)
1376 {
1377 struct sockaddr_in sin;
1378 int len;
1379 int result = tcp_read(sk, to, to_len, nonblock, flags);
1380
1381 if (result < 0) return(result);
1382 len = get_fs_long(addr_len);
1383 if (len > sizeof(sin)) len = sizeof(sin);
1384 sin.sin_family = AF_INET;
1385 sin.sin_port = sk->dummy_th.dest;
1386 sin.sin_addr.s_addr = sk->daddr;
1387 verify_area(VERIFY_WRITE, addr, len);
1388 memcpy_tofs(addr, &sin, len);
1389 verify_area(VERIFY_WRITE, addr_len, sizeof(len));
1390 put_fs_long(len, addr_len);
1391 return(result);
1392 }
1393
1394
1395
1396 static void
1397 tcp_reset(unsigned long saddr, unsigned long daddr, struct tcphdr *th,
1398 struct proto *prot, struct options *opt, struct device *dev)
1399 {
1400 struct sk_buff *buff;
1401 struct tcphdr *t1;
1402 int tmp;
1403
1404
1405
1406
1407
1408 buff = (struct sk_buff *) prot->wmalloc(NULL, MAX_RESET_SIZE, 1, GFP_ATOMIC);
1409 if (buff == NULL) return;
1410
1411 DPRINTF((DBG_TCP, "tcp_reset buff = %X\n", buff));
1412 buff->mem_addr = buff;
1413 buff->mem_len = MAX_RESET_SIZE;
1414 buff->lock = 0;
1415 buff->len = sizeof(*t1);
1416 buff->sk = NULL;
1417 buff->dev = dev;
1418
1419 t1 =(struct tcphdr *)(buff + 1);
1420
1421
1422 tmp = prot->build_header(buff, saddr, daddr, &dev, IPPROTO_TCP, opt,
1423 sizeof(struct tcphdr));
1424 if (tmp < 0) {
1425 prot->wfree(NULL, buff->mem_addr, buff->mem_len);
1426 return;
1427 }
1428 t1 =(struct tcphdr *)((char *)t1 +tmp);
1429 buff->len += tmp;
1430 memcpy(t1, th, sizeof(*t1));
1431
1432
1433 t1->dest = th->source;
1434 t1->source = th->dest;
1435 t1->seq = th->ack_seq;
1436 t1->ack_seq = htonl(ntohl(th->seq)+1);
1437 t1->rst = 1;
1438 t1->ack_seq = htonl(ntohl(th->seq)+1);
1439 t1->window = 0;
1440 t1->ack = 1;
1441 t1->syn = 0;
1442 t1->urg = 0;
1443 t1->fin = 0;
1444 t1->psh = 0;
1445 t1->doff = sizeof(*t1)/4;
1446 tcp_send_check(t1, saddr, daddr, sizeof(*t1), NULL);
1447 prot->queue_xmit(NULL, dev, buff, 1);
1448 }
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458 static void
1459 tcp_conn_request(struct sock *sk, struct sk_buff *skb,
1460 unsigned long daddr, unsigned long saddr,
1461 struct options *opt, struct device *dev)
1462 {
1463 struct sk_buff *buff;
1464 struct tcphdr *t1;
1465 unsigned char *ptr;
1466 struct sock *newsk;
1467 struct tcphdr *th;
1468 int tmp;
1469
1470 DPRINTF((DBG_TCP, "tcp_conn_request(sk = %X, skb = %X, daddr = %X, sadd4= %X, \n"
1471 " opt = %X, dev = %X)\n",
1472 sk, skb, daddr, saddr, opt, dev));
1473
1474 th = skb->h.th;
1475
1476
1477 if (!sk->dead) {
1478 wake_up(sk->sleep);
1479 } else {
1480 DPRINTF((DBG_TCP, "tcp_conn_request on dead socket\n"));
1481 tcp_reset(daddr, saddr, th, sk->prot, opt, dev);
1482 kfree_skb(skb, FREE_READ);
1483 return;
1484 }
1485
1486
1487
1488
1489
1490 if (sk->ack_backlog >= sk->max_ack_backlog) {
1491 kfree_skb(skb, FREE_READ);
1492 return;
1493 }
1494
1495
1496
1497
1498
1499
1500
1501
1502 newsk = (struct sock *) kmalloc(sizeof(struct sock), GFP_ATOMIC);
1503 if (newsk == NULL) {
1504
1505 kfree_skb(skb, FREE_READ);
1506 return;
1507 }
1508
1509 DPRINTF((DBG_TCP, "newsk = %X\n", newsk));
1510 memcpy((void *)newsk,(void *)sk, sizeof(*newsk));
1511 newsk->wback = NULL;
1512 newsk->wfront = NULL;
1513 newsk->rqueue = NULL;
1514 newsk->send_head = NULL;
1515 newsk->send_tail = NULL;
1516 newsk->back_log = NULL;
1517 newsk->rtt = TCP_CONNECT_TIME;
1518 newsk->mdev = 0;
1519 newsk->backoff = 0;
1520 newsk->blog = 0;
1521 newsk->intr = 0;
1522 newsk->proc = 0;
1523 newsk->done = 0;
1524 newsk->send_tmp = NULL;
1525 newsk->pair = NULL;
1526 newsk->wmem_alloc = 0;
1527 newsk->rmem_alloc = 0;
1528
1529 newsk->max_unacked = MAX_WINDOW - TCP_WINDOW_DIFF;
1530
1531 newsk->err = 0;
1532 newsk->shutdown = 0;
1533 newsk->ack_backlog = 0;
1534 newsk->acked_seq = skb->h.th->seq+1;
1535 newsk->fin_seq = skb->h.th->seq;
1536 newsk->copied_seq = skb->h.th->seq;
1537 newsk->state = TCP_SYN_RECV;
1538 newsk->timeout = 0;
1539 newsk->send_seq = jiffies * SEQ_TICK - seq_offset;
1540 newsk->rcv_ack_seq = newsk->send_seq;
1541 newsk->urg =0;
1542 newsk->retransmits = 0;
1543 newsk->destroy = 0;
1544 newsk->timer.data = (unsigned long)newsk;
1545 newsk->timer.function = &net_timer;
1546 newsk->dummy_th.source = skb->h.th->dest;
1547 newsk->dummy_th.dest = skb->h.th->source;
1548
1549
1550 newsk->daddr = saddr;
1551 newsk->saddr = daddr;
1552
1553 put_sock(newsk->num,newsk);
1554 newsk->dummy_th.res1 = 0;
1555 newsk->dummy_th.doff = 6;
1556 newsk->dummy_th.fin = 0;
1557 newsk->dummy_th.syn = 0;
1558 newsk->dummy_th.rst = 0;
1559 newsk->dummy_th.psh = 0;
1560 newsk->dummy_th.ack = 0;
1561 newsk->dummy_th.urg = 0;
1562 newsk->dummy_th.res2 = 0;
1563 newsk->acked_seq = skb->h.th->seq + 1;
1564 newsk->copied_seq = skb->h.th->seq;
1565
1566 if (skb->h.th->doff == 5) {
1567 newsk->mtu = dev->mtu - HEADER_SIZE;
1568 } else {
1569 ptr =(unsigned char *)(skb->h.th + 1);
1570 if (ptr[0] != 2 || ptr[1] != 4) {
1571 newsk->mtu = dev->mtu - HEADER_SIZE;
1572 } else {
1573 newsk->mtu = min(ptr[2] * 256 + ptr[3] - HEADER_SIZE,
1574 dev->mtu - HEADER_SIZE);
1575 }
1576 }
1577
1578 buff = (struct sk_buff *) newsk->prot->wmalloc(newsk, MAX_SYN_SIZE, 1, GFP_ATOMIC);
1579 if (buff == NULL) {
1580 sk->err = -ENOMEM;
1581 newsk->dead = 1;
1582 release_sock(newsk);
1583 kfree_skb(skb, FREE_READ);
1584 return;
1585 }
1586
1587 buff->lock = 0;
1588 buff->mem_addr = buff;
1589 buff->mem_len = MAX_SYN_SIZE;
1590 buff->len = sizeof(struct tcphdr)+4;
1591 buff->sk = newsk;
1592
1593 t1 =(struct tcphdr *)(buff + 1);
1594
1595
1596 tmp = sk->prot->build_header(buff, newsk->saddr, newsk->daddr, &dev,
1597 IPPROTO_TCP, NULL, MAX_SYN_SIZE);
1598
1599
1600 if (tmp < 0) {
1601 sk->err = tmp;
1602 sk->prot->wfree(newsk, buff->mem_addr, buff->mem_len);
1603 newsk->dead = 1;
1604 release_sock(newsk);
1605 skb->sk = sk;
1606 kfree_skb(skb, FREE_READ);
1607 return;
1608 }
1609
1610 buff->len += tmp;
1611 t1 =(struct tcphdr *)((char *)t1 +tmp);
1612
1613 memcpy(t1, skb->h.th, sizeof(*t1));
1614 buff->h.seq = newsk->send_seq;
1615
1616
1617 t1->dest = skb->h.th->source;
1618 t1->source = newsk->dummy_th.source;
1619 t1->seq = ntohl(newsk->send_seq++);
1620 t1->ack = 1;
1621 newsk->window = newsk->prot->rspace(newsk);
1622 t1->window = ntohs(newsk->window);
1623 t1->res1 = 0;
1624 t1->res2 = 0;
1625 t1->rst = 0;
1626 t1->urg = 0;
1627 t1->psh = 0;
1628 t1->syn = 1;
1629 t1->ack_seq = ntohl(skb->h.th->seq+1);
1630 t1->doff = sizeof(*t1)/4+1;
1631
1632 ptr =(unsigned char *)(t1+1);
1633 ptr[0] = 2;
1634 ptr[1] = 4;
1635 ptr[2] =((dev->mtu - HEADER_SIZE) >> 8) & 0xff;
1636 ptr[3] =(dev->mtu - HEADER_SIZE) & 0xff;
1637
1638 tcp_send_check(t1, daddr, saddr, sizeof(*t1)+4, newsk);
1639 newsk->prot->queue_xmit(newsk, dev, buff, 0);
1640
1641 reset_timer(newsk, TIME_WRITE , TCP_CONNECT_TIME);
1642 skb->sk = newsk;
1643
1644
1645 sk->rmem_alloc -= skb->mem_len;
1646 newsk->rmem_alloc += skb->mem_len;
1647
1648 if (sk->rqueue == NULL) {
1649 skb->next = skb;
1650 skb->prev = skb;
1651 sk->rqueue = skb;
1652 } else {
1653 skb->next = sk->rqueue;
1654 skb->prev = sk->rqueue->prev;
1655 sk->rqueue->prev = skb;
1656 skb->prev->next = skb;
1657 }
1658 sk->ack_backlog++;
1659 release_sock(newsk);
1660 }
1661
1662
1663 static void
1664 tcp_close(struct sock *sk, int timeout)
1665 {
1666 struct sk_buff *buff;
1667 int need_reset = 0;
1668 struct tcphdr *t1, *th;
1669 struct proto *prot;
1670 struct device *dev=NULL;
1671 int tmp;
1672
1673
1674
1675
1676
1677 DPRINTF((DBG_TCP, "tcp_close((struct sock *)%X, %d)\n",sk, timeout));
1678 sk->inuse = 1;
1679 sk->keepopen = 1;
1680 sk->shutdown = SHUTDOWN_MASK;
1681
1682 if (!sk->dead) wake_up(sk->sleep);
1683
1684
1685 if (sk->rqueue != NULL) {
1686 struct sk_buff *skb;
1687 struct sk_buff *skb2;
1688
1689 skb = sk->rqueue;
1690 do {
1691 skb2 =(struct sk_buff *)skb->next;
1692
1693 if (skb->len > 0 &&
1694 after(skb->h.th->seq + skb->len + 1, sk->copied_seq))
1695 need_reset = 1;
1696 kfree_skb(skb, FREE_READ);
1697 skb = skb2;
1698 } while(skb != sk->rqueue);
1699 }
1700 sk->rqueue = NULL;
1701
1702
1703 if (sk->send_tmp) {
1704 tcp_send_partial(sk);
1705 }
1706
1707 switch(sk->state) {
1708 case TCP_FIN_WAIT1:
1709 case TCP_FIN_WAIT2:
1710 case TCP_LAST_ACK:
1711
1712 reset_timer(sk, TIME_CLOSE, 4 * sk->rtt);
1713 if (timeout) tcp_time_wait(sk);
1714 release_sock(sk);
1715 break;
1716 case TCP_TIME_WAIT:
1717 if (timeout) {
1718 sk->state = TCP_CLOSE;
1719 }
1720 release_sock(sk);
1721 return;
1722 case TCP_LISTEN:
1723 sk->state = TCP_CLOSE;
1724 release_sock(sk);
1725 return;
1726 case TCP_CLOSE:
1727 release_sock(sk);
1728 return;
1729 case TCP_CLOSE_WAIT:
1730 case TCP_ESTABLISHED:
1731 case TCP_SYN_SENT:
1732 case TCP_SYN_RECV:
1733 prot =(struct proto *)sk->prot;
1734 th =(struct tcphdr *)&sk->dummy_th;
1735 buff = (struct sk_buff *) prot->wmalloc(sk, MAX_FIN_SIZE, 1, GFP_ATOMIC);
1736 if (buff == NULL) {
1737
1738 if (sk->state != TCP_CLOSE_WAIT)
1739 sk->state = TCP_ESTABLISHED;
1740 reset_timer(sk, TIME_CLOSE, 100);
1741 return;
1742 }
1743 buff->lock = 0;
1744 buff->mem_addr = buff;
1745 buff->mem_len = MAX_FIN_SIZE;
1746 buff->sk = sk;
1747 buff->len = sizeof(*t1);
1748 t1 =(struct tcphdr *)(buff + 1);
1749
1750
1751 tmp = prot->build_header(buff,sk->saddr, sk->daddr, &dev,
1752 IPPROTO_TCP, sk->opt,
1753 sizeof(struct tcphdr));
1754 if (tmp < 0) {
1755 prot->wfree(sk,buff->mem_addr, buff->mem_len);
1756 DPRINTF((DBG_TCP, "Unable to build header for fin.\n"));
1757 release_sock(sk);
1758 return;
1759 }
1760
1761 t1 =(struct tcphdr *)((char *)t1 +tmp);
1762 buff ->len += tmp;
1763 buff->dev = dev;
1764 memcpy(t1, th, sizeof(*t1));
1765 t1->seq = ntohl(sk->send_seq);
1766 sk->send_seq++;
1767 buff->h.seq = sk->send_seq;
1768 t1->ack = 1;
1769
1770
1771 sk->delay_acks = 0;
1772 t1->ack_seq = ntohl(sk->acked_seq);
1773 t1->window = ntohs(sk->prot->rspace(sk));
1774 t1->fin = 1;
1775 t1->rst = need_reset;
1776 t1->doff = sizeof(*t1)/4;
1777 tcp_send_check(t1, sk->saddr, sk->daddr, sizeof(*t1), sk);
1778
1779 if (sk->wfront == NULL) {
1780 prot->queue_xmit(sk, dev, buff, 0);
1781 } else {
1782 reset_timer(sk, TIME_WRITE,
1783 backoff(sk->backoff) * (2 * sk->mdev + sk->rtt));
1784 buff->next = NULL;
1785 if (sk->wback == NULL) {
1786 sk->wfront=buff;
1787 } else {
1788 sk->wback->next = buff;
1789 }
1790 sk->wback = buff;
1791 buff->magic = TCP_WRITE_QUEUE_MAGIC;
1792 }
1793
1794 if (sk->state == TCP_CLOSE_WAIT) {
1795 sk->state = TCP_FIN_WAIT2;
1796 } else {
1797 sk->state = TCP_FIN_WAIT1;
1798 }
1799 }
1800 release_sock(sk);
1801 }
1802
1803
1804
1805
1806
1807
1808 static void
1809 tcp_write_xmit(struct sock *sk)
1810 {
1811 struct sk_buff *skb;
1812
1813 DPRINTF((DBG_TCP, "tcp_write_xmit(sk=%X)\n", sk));
1814 while(sk->wfront != NULL &&
1815 before(sk->wfront->h.seq, sk->window_seq) &&
1816 sk->packets_out < sk->cong_window) {
1817 skb = sk->wfront;
1818 sk->wfront =(struct sk_buff *)skb->next;
1819 if (sk->wfront == NULL) sk->wback = NULL;
1820 skb->next = NULL;
1821 if (skb->magic != TCP_WRITE_QUEUE_MAGIC) {
1822 DPRINTF((DBG_TCP, "tcp.c skb with bad magic(%X) on write queue. Squashing "
1823 "queue\n", skb->magic));
1824 sk->wfront = NULL;
1825 sk->wback = NULL;
1826 return;
1827 }
1828 skb->magic = 0;
1829 DPRINTF((DBG_TCP, "Sending a packet.\n"));
1830
1831
1832 if (before(skb->h.seq, sk->rcv_ack_seq +1)) {
1833 sk->retransmits = 0;
1834 kfree_skb(skb, FREE_WRITE);
1835 if (!sk->dead) wake_up(sk->sleep);
1836 } else {
1837 sk->prot->queue_xmit(sk, skb->dev, skb, skb->free);
1838 }
1839 }
1840 }
1841
1842
1843
1844
1845
1846
1847 void
1848 sort_send(struct sock *sk)
1849 {
1850 struct sk_buff *list = NULL;
1851 struct sk_buff *skb,*skb2,*skb3;
1852
1853 for (skb = sk->send_head; skb != NULL; skb = skb2) {
1854 skb2 = (struct sk_buff *)skb->link3;
1855 if (list == NULL || before (skb2->h.seq, list->h.seq)) {
1856 skb->link3 = list;
1857 sk->send_tail = skb;
1858 list = skb;
1859 } else {
1860 for (skb3 = list; ; skb3 = (struct sk_buff *)skb3->link3) {
1861 if (skb3->link3 == NULL ||
1862 before(skb->h.seq, skb3->link3->h.seq)) {
1863 skb->link3 = skb3->link3;
1864 skb3->link3 = skb;
1865 if (skb->link3 == NULL) sk->send_tail = skb;
1866 break;
1867 }
1868 }
1869 }
1870 }
1871 sk->send_head = list;
1872 }
1873
1874
1875
1876 static int
1877 tcp_ack(struct sock *sk, struct tcphdr *th, unsigned long saddr, int len)
1878 {
1879 unsigned long ack;
1880 int flag = 0;
1881
1882 ack = ntohl(th->ack_seq);
1883 DPRINTF((DBG_TCP, "tcp_ack ack=%d, window=%d, "
1884 "sk->rcv_ack_seq=%d, sk->window_seq = %d\n",
1885 ack, ntohs(th->window), sk->rcv_ack_seq, sk->window_seq));
1886
1887 if (after(ack, sk->send_seq+1) || before(ack, sk->rcv_ack_seq-1)) {
1888 if (after(ack, sk->send_seq) ||
1889 (sk->state != TCP_ESTABLISHED && sk->state != TCP_CLOSE_WAIT)) {
1890 return(0);
1891 }
1892 if (sk->keepopen) {
1893 reset_timer(sk, TIME_KEEPOPEN, TCP_TIMEOUT_LEN);
1894 }
1895 return(1);
1896 }
1897
1898 if (len != th->doff*4) flag |= 1;
1899
1900
1901 if (after(sk->window_seq, ack+ntohs(th->window))) {
1902
1903
1904
1905
1906
1907
1908
1909 struct sk_buff *skb;
1910 struct sk_buff *skb2;
1911 struct sk_buff *wskb = NULL;
1912
1913 skb2 = sk->send_head;
1914 sk->send_head = NULL;
1915 sk->send_tail = NULL;
1916
1917 flag |= 4;
1918
1919 sk->window_seq = ack + ntohs(th->window);
1920 cli();
1921 while (skb2 != NULL) {
1922 skb = skb2;
1923 skb2 = (struct sk_buff *)skb->link3;
1924 skb->link3 = NULL;
1925 if (after(skb->h.seq, sk->window_seq)) {
1926 if (sk->packets_out > 0) sk->packets_out--;
1927
1928
1929 if (skb->next != NULL) {
1930 int i;
1931
1932 if (skb->next != skb) {
1933 skb->next->prev = skb->prev;
1934 skb->prev->next = skb->next;
1935 }
1936
1937 for(i = 0; i < DEV_NUMBUFFS; i++) {
1938 if (skb->dev->buffs[i] == skb) {
1939 if (skb->next == skb)
1940 skb->dev->buffs[i] = NULL;
1941 else
1942 skb->dev->buffs[i] = skb->next;
1943 break;
1944 }
1945 }
1946 if (arp_q == skb) {
1947 if (skb->next == skb) arp_q = NULL;
1948 else arp_q = skb->next;
1949 }
1950 }
1951
1952
1953 skb->magic = TCP_WRITE_QUEUE_MAGIC;
1954 if (wskb == NULL) {
1955 skb->next = sk->wfront;
1956 sk->wfront = skb;
1957 } else {
1958 skb->next = wskb->next;
1959 wskb->next = skb;
1960 }
1961 if (sk->wback == wskb) sk->wback = skb;
1962 wskb = skb;
1963 } else {
1964 if (sk->send_head == NULL) {
1965 sk->send_head = skb;
1966 sk->send_tail = skb;
1967 } else {
1968 sk->send_tail->link3 = skb;
1969 sk->send_tail = skb;
1970 }
1971 skb->link3 = NULL;
1972 }
1973 }
1974 sti();
1975 }
1976
1977 if (sk->send_tail == NULL || sk->send_head == NULL) {
1978 sk->send_head = NULL;
1979 sk->send_tail = NULL;
1980 sk->packets_out= 0;
1981 }
1982
1983 sk->window_seq = ack + ntohs(th->window);
1984
1985
1986 if (sk->cong_window < 2048 && ack != sk->rcv_ack_seq) {
1987 if (sk->exp_growth) sk->cong_window *= 2;
1988 else sk->cong_window++;
1989 }
1990
1991 DPRINTF((DBG_TCP, "tcp_ack: Updating rcv ack sequence.\n"));
1992 sk->rcv_ack_seq = ack;
1993
1994
1995 while(sk->send_head != NULL) {
1996
1997 if (sk->send_head->link3 &&
1998 after(sk->send_head->h.seq, sk->send_head->link3->h.seq)) {
1999 printk("INET: tcp.c: *** bug send_list out of order.\n");
2000 sort_send(sk);
2001 }
2002
2003 if (before(sk->send_head->h.seq, ack+1)) {
2004 struct sk_buff *oskb;
2005
2006 sk->retransmits = 0;
2007
2008
2009 if (sk->packets_out > 0) sk->packets_out --;
2010 DPRINTF((DBG_TCP, "skb=%X skb->h.seq = %d acked ack=%d\n",
2011 sk->send_head, sk->send_head->h.seq, ack));
2012
2013
2014 if (!sk->dead) wake_up(sk->sleep);
2015
2016 oskb = sk->send_head;
2017
2018
2019 if (sk->retransmits == 0 && !(flag&2)) {
2020 long abserr, rtt = jiffies - oskb->when;
2021
2022 if (sk->state == TCP_SYN_SENT || sk->state == TCP_SYN_RECV)
2023
2024 sk->rtt = rtt;
2025 else {
2026 abserr = (rtt > sk->rtt) ? rtt - sk->rtt : sk->rtt - rtt;
2027 sk->rtt = (7 * sk->rtt + rtt) >> 3;
2028 sk->mdev = (3 * sk->mdev + abserr) >> 2;
2029 }
2030 sk->backoff = 0;
2031 }
2032 flag |= (2|4);
2033
2034
2035 if (sk->rtt < 10) sk->rtt = 10;
2036 if (sk->rtt > 12000) sk->rtt = 12000;
2037
2038 cli();
2039
2040 oskb = sk->send_head;
2041 sk->send_head =(struct sk_buff *)oskb->link3;
2042 if (sk->send_head == NULL) {
2043 sk->send_tail = NULL;
2044 }
2045
2046
2047 if (oskb->next != NULL) {
2048 int i;
2049
2050 if (oskb->next != oskb) {
2051 oskb->next->prev = oskb->prev;
2052 oskb->prev->next = oskb->next;
2053 }
2054 for(i = 0; i < DEV_NUMBUFFS; i++) {
2055 if (oskb->dev->buffs[i] == oskb) {
2056 if (oskb== oskb->next)
2057 oskb->dev->buffs[i]= NULL;
2058 else
2059 oskb->dev->buffs[i] = oskb->next;
2060 break;
2061 }
2062 }
2063 if (arp_q == oskb) {
2064 if (oskb == oskb->next) arp_q = NULL;
2065 else arp_q =(struct sk_buff *)oskb->next;
2066 }
2067 }
2068 sti();
2069 oskb->magic = 0;
2070 kfree_skb(oskb, FREE_WRITE);
2071 if (!sk->dead) wake_up(sk->sleep);
2072 } else {
2073 break;
2074 }
2075 }
2076
2077
2078
2079
2080
2081 if (sk->wfront != NULL) {
2082 if (after (sk->window_seq, sk->wfront->h.seq) &&
2083 sk->packets_out < sk->cong_window) {
2084 flag |= 1;
2085 tcp_write_xmit(sk);
2086 }
2087 } else {
2088 if (sk->send_head == NULL && sk->ack_backlog == 0 &&
2089 sk->state != TCP_TIME_WAIT && !sk->keepopen) {
2090 DPRINTF((DBG_TCP, "Nothing to do, going to sleep.\n"));
2091 if (!sk->dead) wake_up(sk->sleep);
2092
2093 delete_timer(sk);
2094 } else {
2095 if (sk->state != (unsigned char) sk->keepopen) {
2096 reset_timer(sk, TIME_WRITE,
2097 backoff(sk->backoff) * (2 * sk->mdev + sk->rtt));
2098 }
2099 if (sk->state == TCP_TIME_WAIT) {
2100 reset_timer(sk, TIME_CLOSE, TCP_TIMEWAIT_LEN);
2101 }
2102 }
2103 }
2104
2105 if (sk->packets_out == 0 && sk->send_tmp != NULL &&
2106 sk->wfront == NULL && sk->send_head == NULL) {
2107 flag |= 1;
2108 tcp_send_partial(sk);
2109 }
2110
2111
2112 if (sk->state == TCP_TIME_WAIT) {
2113 if (!sk->dead) wake_up(sk->sleep);
2114 if (sk->rcv_ack_seq == sk->send_seq && sk->acked_seq == sk->fin_seq) {
2115 flag |= 1;
2116 sk->state = TCP_CLOSE;
2117 sk->shutdown = SHUTDOWN_MASK;
2118 }
2119 }
2120
2121 if (sk->state == TCP_LAST_ACK || sk->state == TCP_FIN_WAIT2) {
2122 if (!sk->dead) wake_up(sk->sleep);
2123 if (sk->rcv_ack_seq == sk->send_seq) {
2124 flag |= 1;
2125 if (sk->acked_seq != sk->fin_seq) {
2126 tcp_time_wait(sk);
2127 } else {
2128 DPRINTF((DBG_TCP, "tcp_ack closing socket - %X\n", sk));
2129 tcp_send_ack(sk->send_seq, sk->acked_seq, sk,
2130 th, sk->daddr);
2131 sk->shutdown = SHUTDOWN_MASK;
2132 sk->state = TCP_CLOSE;
2133 }
2134 }
2135 }
2136
2137 if (((!flag) || (flag&4)) && sk->send_head != NULL &&
2138 (sk->send_head->when + backoff(sk->backoff) * (2 * sk->mdev + sk->rtt)
2139 < jiffies)) {
2140 sk->exp_growth = 0;
2141 ip_retransmit(sk, 0);
2142 }
2143
2144 DPRINTF((DBG_TCP, "leaving tcp_ack\n"));
2145 return(1);
2146 }
2147
2148
2149
2150
2151
2152
2153
2154 static int
2155 tcp_data(struct sk_buff *skb, struct sock *sk,
2156 unsigned long saddr, unsigned short len)
2157 {
2158 struct sk_buff *skb1, *skb2;
2159 struct tcphdr *th;
2160
2161 th = skb->h.th;
2162 print_th(th);
2163 skb->len = len -(th->doff*4);
2164
2165 DPRINTF((DBG_TCP, "tcp_data len = %d sk = %X:\n", skb->len, sk));
2166
2167 sk->bytes_rcv += skb->len;
2168 if (skb->len == 0 && !th->fin && !th->urg && !th->psh) {
2169
2170 if (!th->ack) tcp_send_ack(sk->send_seq, sk->acked_seq,sk, th, saddr);
2171 kfree_skb(skb, FREE_READ);
2172 return(0);
2173 }
2174
2175 if (sk->shutdown & RCV_SHUTDOWN) {
2176 sk->acked_seq = th->seq + skb->len + th->syn + th->fin;
2177 tcp_reset(sk->saddr, sk->daddr, skb->h.th,
2178 sk->prot, NULL, skb->dev);
2179 sk->state = TCP_CLOSE;
2180 sk->err = EPIPE;
2181 sk->shutdown = SHUTDOWN_MASK;
2182 DPRINTF((DBG_TCP, "tcp_data: closing socket - %X\n", sk));
2183 kfree_skb(skb, FREE_READ);
2184 if (!sk->dead) wake_up(sk->sleep);
2185 return(0);
2186 }
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197 if (sk->rqueue == NULL) {
2198 DPRINTF((DBG_TCP, "tcp_data: skb = %X:\n", skb));
2199
2200 sk->rqueue = skb;
2201 skb->next = skb;
2202 skb->prev = skb;
2203 skb1= NULL;
2204 } else {
2205 DPRINTF((DBG_TCP, "tcp_data adding to chain sk = %X:\n", sk));
2206
2207 for(skb1=sk->rqueue; ; skb1 =(struct sk_buff *)skb1->prev) {
2208 DPRINTF((DBG_TCP, "skb1=%X\n", skb1));
2209 DPRINTF((DBG_TCP, "skb1->h.th->seq = %d\n", skb1->h.th->seq));
2210 if (after(th->seq+1, skb1->h.th->seq)) {
2211 skb->prev = skb1;
2212 skb->next = skb1->next;
2213 skb->next->prev = skb;
2214 skb1->next = skb;
2215 if (skb1 == sk->rqueue) sk->rqueue = skb;
2216 break;
2217 }
2218 if (skb1->prev == sk->rqueue) {
2219 skb->next= skb1;
2220 skb->prev = skb1->prev;
2221 skb->prev->next = skb;
2222 skb1->prev = skb;
2223 skb1 = NULL;
2224
2225 break;
2226 }
2227 }
2228 DPRINTF((DBG_TCP, "skb = %X:\n", skb));
2229 }
2230
2231 th->ack_seq = th->seq + skb->len;
2232 if (th->syn) th->ack_seq++;
2233 if (th->fin) th->ack_seq++;
2234
2235 if (before(sk->acked_seq, sk->copied_seq)) {
2236 printk("*** tcp.c:tcp_data bug acked < copied\n");
2237 sk->acked_seq = sk->copied_seq;
2238 }
2239
2240
2241 if (skb1 == NULL || skb1->acked || before(th->seq, sk->acked_seq+1)) {
2242 if (before(th->seq, sk->acked_seq+1)) {
2243 if (after(th->ack_seq, sk->acked_seq))
2244 sk->acked_seq = th->ack_seq;
2245 skb->acked = 1;
2246
2247
2248 if (skb->h.th->fin) {
2249 if (!sk->dead) wake_up(sk->sleep);
2250 sk->shutdown |= RCV_SHUTDOWN;
2251 }
2252
2253 for(skb2 = (struct sk_buff *)skb->next;
2254 skb2 !=(struct sk_buff *) sk->rqueue->next;
2255 skb2 = (struct sk_buff *)skb2->next) {
2256 if (before(skb2->h.th->seq, sk->acked_seq+1)) {
2257 if (after(skb2->h.th->ack_seq, sk->acked_seq))
2258 sk->acked_seq = skb2->h.th->ack_seq;
2259 skb2->acked = 1;
2260
2261
2262
2263
2264
2265 if (skb2->h.th->fin) {
2266 sk->shutdown |= RCV_SHUTDOWN;
2267 if (!sk->dead) wake_up(sk->sleep);
2268 }
2269
2270
2271 sk->ack_backlog = sk->max_ack_backlog;
2272 } else {
2273 break;
2274 }
2275 }
2276
2277
2278
2279
2280
2281 if (!sk->delay_acks ||
2282 sk->ack_backlog >= sk->max_ack_backlog ||
2283 sk->bytes_rcv > sk->max_unacked || th->fin) {
2284
2285 } else {
2286 sk->ack_backlog++;
2287 reset_timer(sk, TIME_WRITE, TCP_ACK_TIME);
2288 }
2289 }
2290 }
2291
2292
2293
2294
2295
2296 if (!skb->acked) {
2297
2298
2299
2300
2301
2302 while (sk->prot->rspace(sk) < sk->mtu) {
2303 skb1 = (struct sk_buff *)sk->rqueue;
2304 if (skb1 == NULL) {
2305 printk("INET: tcp.c:tcp_data memory leak detected.\n");
2306 break;
2307 }
2308
2309
2310 if (skb1->acked) {
2311 break;
2312 }
2313 if (skb1->prev == skb1) {
2314 sk->rqueue = NULL;
2315 } else {
2316 sk->rqueue = (struct sk_buff *)skb1->prev;
2317 skb1->next->prev = skb1->prev;
2318 skb1->prev->next = skb1->next;
2319 }
2320 kfree_skb(skb1, FREE_READ);
2321 }
2322 tcp_send_ack(sk->send_seq, sk->acked_seq, sk, th, saddr);
2323 sk->ack_backlog++;
2324 reset_timer(sk, TIME_WRITE, TCP_ACK_TIME);
2325 } else {
2326
2327 tcp_send_ack(sk->send_seq, sk->acked_seq, sk, th, saddr);
2328 }
2329
2330
2331 if (!sk->dead) {
2332 wake_up(sk->sleep);
2333 } else {
2334 DPRINTF((DBG_TCP, "data received on dead socket.\n"));
2335 }
2336
2337 if (sk->state == TCP_FIN_WAIT2 &&
2338 sk->acked_seq == sk->fin_seq && sk->rcv_ack_seq == sk->send_seq) {
2339 DPRINTF((DBG_TCP, "tcp_data: entering last_ack state sk = %X\n", sk));
2340
2341
2342 sk->shutdown = SHUTDOWN_MASK;
2343 sk->state = TCP_LAST_ACK;
2344 if (!sk->dead) wake_up(sk->sleep);
2345 }
2346
2347 return(0);
2348 }
2349
2350
2351 static int
2352 tcp_urg(struct sock *sk, struct tcphdr *th, unsigned long saddr)
2353 {
2354 extern int kill_pg(int pg, int sig, int priv);
2355 extern int kill_proc(int pid, int sig, int priv);
2356
2357 if (!sk->dead) wake_up(sk->sleep);
2358
2359 if (sk->urginline) {
2360 th->urg = 0;
2361 th->psh = 1;
2362 return(0);
2363 }
2364
2365 if (!sk->urg) {
2366
2367 if (sk->proc != 0) {
2368 if (sk->proc > 0) {
2369 kill_proc(sk->proc, SIGURG, 1);
2370 } else {
2371 kill_pg(-sk->proc, SIGURG, 1);
2372 }
2373 }
2374 }
2375 sk->urg++;
2376 return(0);
2377 }
2378
2379
2380
2381 static int
2382 tcp_fin(struct sock *sk, struct tcphdr *th,
2383 unsigned long saddr, struct device *dev)
2384 {
2385 DPRINTF((DBG_TCP, "tcp_fin(sk=%X, th=%X, saddr=%X, dev=%X)\n",
2386 sk, th, saddr, dev));
2387
2388 if (!sk->dead) {
2389 wake_up(sk->sleep);
2390 }
2391
2392 switch(sk->state) {
2393 case TCP_SYN_RECV:
2394 case TCP_SYN_SENT:
2395 case TCP_ESTABLISHED:
2396
2397 sk->fin_seq = th->seq+1;
2398 sk->state = TCP_CLOSE_WAIT;
2399 if (th->rst) sk->shutdown = SHUTDOWN_MASK;
2400 break;
2401
2402 case TCP_CLOSE_WAIT:
2403 case TCP_FIN_WAIT2:
2404 break;
2405
2406 case TCP_FIN_WAIT1:
2407
2408 sk->fin_seq = th->seq+1;
2409 sk->state = TCP_FIN_WAIT2;
2410 break;
2411
2412 default:
2413 case TCP_TIME_WAIT:
2414 sk->state = TCP_LAST_ACK;
2415
2416
2417 reset_timer(sk, TIME_CLOSE, TCP_TIMEWAIT_LEN);
2418 return(0);
2419 }
2420 sk->ack_backlog++;
2421
2422 return(0);
2423 }
2424
2425
2426
2427 static struct sock *
2428 tcp_accept(struct sock *sk, int flags)
2429 {
2430 struct sock *newsk;
2431 struct sk_buff *skb;
2432
2433 DPRINTF((DBG_TCP, "tcp_accept(sk=%X, flags=%X, addr=%s)\n",
2434 sk, flags, in_ntoa(sk->saddr)));
2435
2436
2437
2438
2439
2440 if (sk->state != TCP_LISTEN) {
2441 sk->err = EINVAL;
2442 return(NULL);
2443 }
2444
2445
2446 cli();
2447 sk->inuse = 1;
2448 while((skb = get_firstr(sk)) == NULL) {
2449 if (flags & O_NONBLOCK) {
2450 sti();
2451 release_sock(sk);
2452 sk->err = EAGAIN;
2453 return(NULL);
2454 }
2455
2456 release_sock(sk);
2457 interruptible_sleep_on(sk->sleep);
2458 if (current->signal & ~current->blocked) {
2459 sti();
2460 sk->err = ERESTARTSYS;
2461 return(NULL);
2462 }
2463 sk->inuse = 1;
2464 }
2465 sti();
2466
2467
2468 newsk = skb->sk;
2469
2470 kfree_skb(skb, FREE_READ);
2471 sk->ack_backlog--;
2472 release_sock(sk);
2473 return(newsk);
2474 }
2475
2476
2477
2478 static int
2479 tcp_connect(struct sock *sk, struct sockaddr_in *usin, int addr_len)
2480 {
2481 struct sk_buff *buff;
2482 struct sockaddr_in sin;
2483 struct device *dev=NULL;
2484 unsigned char *ptr;
2485 int tmp;
2486 struct tcphdr *t1;
2487
2488 if (sk->state != TCP_CLOSE) return(-EISCONN);
2489 if (addr_len < 8) return(-EINVAL);
2490
2491
2492 memcpy_fromfs(&sin,usin, min(sizeof(sin), addr_len));
2493
2494 if (sin.sin_family && sin.sin_family != AF_INET) return(-EAFNOSUPPORT);
2495
2496 DPRINTF((DBG_TCP, "TCP connect daddr=%s\n", in_ntoa(sin.sin_addr.s_addr)));
2497
2498
2499 if (chk_addr(sin.sin_addr.s_addr) == IS_BROADCAST) {
2500 DPRINTF((DBG_TCP, "TCP connection to broadcast address not allowed\n"));
2501 return(-ENETUNREACH);
2502 }
2503 sk->inuse = 1;
2504 sk->daddr = sin.sin_addr.s_addr;
2505 sk->send_seq = jiffies * SEQ_TICK - seq_offset;
2506 sk->rcv_ack_seq = sk->send_seq -1;
2507 sk->err = 0;
2508 sk->dummy_th.dest = sin.sin_port;
2509 release_sock(sk);
2510
2511 buff = (struct sk_buff *) sk->prot->wmalloc(sk,MAX_SYN_SIZE,0, GFP_KERNEL);
2512 if (buff == NULL) {
2513 return(-ENOMEM);
2514 }
2515 sk->inuse = 1;
2516 buff->lock = 0;
2517 buff->mem_addr = buff;
2518 buff->mem_len = MAX_SYN_SIZE;
2519 buff->len = 24;
2520 buff->sk = sk;
2521 t1 = (struct tcphdr *)(buff + 1);
2522
2523
2524
2525 tmp = sk->prot->build_header(buff, sk->saddr, sk->daddr, &dev,
2526 IPPROTO_TCP, NULL, MAX_SYN_SIZE);
2527 if (tmp < 0) {
2528 sk->prot->wfree(sk, buff->mem_addr, buff->mem_len);
2529 release_sock(sk);
2530 return(-ENETUNREACH);
2531 }
2532 buff->len += tmp;
2533 t1 = (struct tcphdr *)((char *)t1 +tmp);
2534
2535 memcpy(t1,(void *)&(sk->dummy_th), sizeof(*t1));
2536 t1->seq = ntohl(sk->send_seq++);
2537 buff->h.seq = sk->send_seq;
2538 t1->ack = 0;
2539 t1->window = 2;
2540 t1->res1=0;
2541 t1->res2=0;
2542 t1->rst = 0;
2543 t1->urg = 0;
2544 t1->psh = 0;
2545 t1->syn = 1;
2546 t1->urg_ptr = 0;
2547 t1->doff = 6;
2548
2549
2550 ptr = (unsigned char *)(t1+1);
2551 ptr[0] = 2;
2552 ptr[1] = 4;
2553 ptr[2] = (dev->mtu- HEADER_SIZE) >> 8;
2554 ptr[3] = (dev->mtu- HEADER_SIZE) & 0xff;
2555 sk->mtu = dev->mtu - HEADER_SIZE;
2556 tcp_send_check(t1, sk->saddr, sk->daddr,
2557 sizeof(struct tcphdr) + 4, sk);
2558
2559
2560 sk->state = TCP_SYN_SENT;
2561
2562 sk->prot->queue_xmit(sk, dev, buff, 0);
2563
2564 sk->rtt = TCP_CONNECT_TIME;
2565 reset_timer(sk, TIME_WRITE , TCP_CONNECT_TIME);
2566 sk->retransmits = TCP_RETR2 - TCP_SYN_RETRIES;
2567 release_sock(sk);
2568 return(0);
2569 }
2570
2571
2572
2573 static int
2574 tcp_sequence(struct sock *sk, struct tcphdr *th, short len,
2575 struct options *opt, unsigned long saddr)
2576 {
2577
2578
2579
2580
2581
2582
2583 DPRINTF((DBG_TCP, "tcp_sequence(sk=%X, th=%X, len = %d, opt=%d, saddr=%X)\n",
2584 sk, th, len, opt, saddr));
2585
2586 if (between(th->seq, sk->acked_seq, sk->acked_seq + sk->window)||
2587 between(th->seq + len-(th->doff*4), sk->acked_seq + 1,
2588 sk->acked_seq + sk->window) ||
2589 (before(th->seq, sk->acked_seq) &&
2590 after(th->seq + len -(th->doff*4), sk->acked_seq + sk->window))) {
2591 return(1);
2592 }
2593 DPRINTF((DBG_TCP, "tcp_sequence: rejecting packet.\n"));
2594
2595
2596
2597
2598
2599 if (after(th->seq, sk->acked_seq + sk->window)) {
2600 tcp_send_ack(sk->send_seq, sk->acked_seq, sk, th, saddr);
2601 return(0);
2602 }
2603
2604
2605 if (th->ack && len == (th->doff * 4) &&
2606 after(th->seq, sk->acked_seq - 32767) &&
2607 !th->fin && !th->syn) return(1);
2608
2609 if (!th->rst) {
2610
2611 tcp_send_ack(sk->send_seq, sk->acked_seq, sk, th, saddr);
2612 }
2613 return(0);
2614 }
2615
2616
2617
2618 static void
2619 tcp_options(struct sock *sk, struct tcphdr *th)
2620 {
2621 unsigned char *ptr;
2622
2623 ptr = (unsigned char *)(th + 1);
2624 if (ptr[0] != 2 || ptr[1] != 4) {
2625 sk->mtu = min(sk->mtu, 576 - HEADER_SIZE);
2626 return;
2627 }
2628 sk->mtu = min(sk->mtu, ptr[2]*256 + ptr[3] - HEADER_SIZE);
2629 }
2630
2631
2632 int
2633 tcp_rcv(struct sk_buff *skb, struct device *dev, struct options *opt,
2634 unsigned long daddr, unsigned short len,
2635 unsigned long saddr, int redo, struct inet_protocol * protocol)
2636 {
2637 struct tcphdr *th;
2638 struct sock *sk;
2639
2640 if (!skb) {
2641 DPRINTF((DBG_TCP, "tcp.c: tcp_rcv skb = NULL\n"));
2642 return(0);
2643 }
2644 #if 0
2645 if (!protocol) {
2646 DPRINTF((DBG_TCP, "tcp.c: tcp_rcv protocol = NULL\n"));
2647 return(0);
2648 }
2649
2650 if (!opt) {
2651 DPRINTF((DBG_TCP, "tcp.c: tcp_rcv opt = NULL\n"));
2652 }
2653 #endif
2654 if (!dev) {
2655 DPRINTF((DBG_TCP, "tcp.c: tcp_rcv dev = NULL\n"));
2656 return(0);
2657 }
2658 th = skb->h.th;
2659
2660
2661 sk = get_sock(&tcp_prot, th->dest, saddr, th->source, daddr);
2662 DPRINTF((DBG_TCP, "<<\n"));
2663 DPRINTF((DBG_TCP, "len = %d, redo = %d, skb=%X\n", len, redo, skb));
2664
2665 if (sk) {
2666 DPRINTF((DBG_TCP, "sk = %X:\n", sk));
2667 }
2668
2669 if (!redo) {
2670 if (th->check && tcp_check(th, len, saddr, daddr )) {
2671 skb->sk = NULL;
2672 DPRINTF((DBG_TCP, "packet dropped with bad checksum.\n"));
2673 if (inet_debug == DBG_SLIP) printk("\rtcp_rcv: back checksum\n");
2674 kfree_skb(skb, 0);
2675
2676
2677
2678
2679 return(0);
2680 }
2681
2682
2683 if (sk == NULL) {
2684 if (!th->rst) tcp_reset(daddr, saddr, th, &tcp_prot, opt,dev);
2685 skb->sk = NULL;
2686 kfree_skb(skb, 0);
2687 return(0);
2688 }
2689
2690 skb->len = len;
2691 skb->sk = sk;
2692 skb->acked = 0;
2693 skb->used = 0;
2694 skb->free = 0;
2695 skb->urg_used = 0;
2696 skb->saddr = daddr;
2697 skb->daddr = saddr;
2698
2699 th->seq = ntohl(th->seq);
2700
2701
2702 cli();
2703 if (sk->inuse) {
2704 if (sk->back_log == NULL) {
2705 sk->back_log = skb;
2706 skb->next = skb;
2707 skb->prev = skb;
2708 } else {
2709 skb->next = sk->back_log;
2710 skb->prev = sk->back_log->prev;
2711 skb->prev->next = skb;
2712 skb->next->prev = skb;
2713 }
2714 sti();
2715 return(0);
2716 }
2717 sk->inuse = 1;
2718 sti();
2719 } else {
2720 if (!sk) {
2721 DPRINTF((DBG_TCP, "tcp.c: tcp_rcv bug sk=NULL redo = 1\n"));
2722 return(0);
2723 }
2724 }
2725
2726 if (!sk->prot) {
2727 DPRINTF((DBG_TCP, "tcp.c: tcp_rcv sk->prot = NULL \n"));
2728 return(0);
2729 }
2730
2731
2732 if (sk->rmem_alloc + skb->mem_len >= SK_RMEM_MAX) {
2733 skb->sk = NULL;
2734 DPRINTF((DBG_TCP, "dropping packet due to lack of buffer space.\n"));
2735 kfree_skb(skb, 0);
2736 release_sock(sk);
2737 return(0);
2738 }
2739 sk->rmem_alloc += skb->mem_len;
2740
2741 DPRINTF((DBG_TCP, "About to do switch.\n"));
2742
2743
2744 switch(sk->state) {
2745
2746
2747
2748
2749 case TCP_LAST_ACK:
2750 if (th->rst) {
2751 sk->err = ECONNRESET;
2752 sk->state = TCP_CLOSE;
2753 sk->shutdown = SHUTDOWN_MASK;
2754 if (!sk->dead) {
2755 wake_up(sk->sleep);
2756 }
2757 kfree_skb(skb, FREE_READ);
2758 release_sock(sk);
2759 return(0);
2760 }
2761
2762 case TCP_ESTABLISHED:
2763 case TCP_CLOSE_WAIT:
2764 case TCP_FIN_WAIT1:
2765 case TCP_FIN_WAIT2:
2766 case TCP_TIME_WAIT:
2767 if (!tcp_sequence(sk, th, len, opt, saddr)) {
2768 if (inet_debug == DBG_SLIP) printk("\rtcp_rcv: not in seq\n");
2769 tcp_send_ack(sk->send_seq, sk->acked_seq,
2770 sk, th, saddr);
2771 kfree_skb(skb, FREE_READ);
2772 release_sock(sk);
2773 return(0);
2774 }
2775
2776 if (th->rst) {
2777
2778 sk->err = ECONNRESET;
2779
2780 if (sk->state == TCP_CLOSE_WAIT) {
2781 sk->err = EPIPE;
2782 }
2783
2784
2785
2786
2787
2788
2789
2790 sk->state = TCP_CLOSE;
2791 sk->shutdown = SHUTDOWN_MASK;
2792 if (!sk->dead) {
2793 wake_up(sk->sleep);
2794 }
2795 kfree_skb(skb, FREE_READ);
2796 release_sock(sk);
2797 return(0);
2798
2799 }
2800 #if 0
2801 if (opt && (opt->security != 0 ||
2802 opt->compartment != 0 || th->syn)) {
2803 sk->err = ECONNRESET;
2804 sk->state = TCP_CLOSE;
2805 sk->shutdown = SHUTDOWN_MASK;
2806 tcp_reset(daddr, saddr, th, sk->prot, opt,dev);
2807 if (!sk->dead) {
2808 wake_up(sk->sleep);
2809 }
2810 kfree_skb(skb, FREE_READ);
2811 release_sock(sk);
2812 return(0);
2813 }
2814 #endif
2815 if (th->ack) {
2816 if (!tcp_ack(sk, th, saddr, len)) {
2817 kfree_skb(skb, FREE_READ);
2818 release_sock(sk);
2819 return(0);
2820 }
2821 }
2822 if (th->urg) {
2823 if (tcp_urg(sk, th, saddr)) {
2824 kfree_skb(skb, FREE_READ);
2825 release_sock(sk);
2826 return(0);
2827 }
2828 }
2829
2830 if (th->fin && tcp_fin(sk, th, saddr, dev)) {
2831 kfree_skb(skb, FREE_READ);
2832 release_sock(sk);
2833 return(0);
2834 }
2835
2836 if (tcp_data(skb, sk, saddr, len)) {
2837 kfree_skb(skb, FREE_READ);
2838 release_sock(sk);
2839 return(0);
2840 }
2841
2842 release_sock(sk);
2843 return(0);
2844
2845 case TCP_CLOSE:
2846 if (sk->dead || sk->daddr) {
2847 DPRINTF((DBG_TCP, "packet received for closed,dead socket\n"));
2848 kfree_skb(skb, FREE_READ);
2849 release_sock(sk);
2850 return(0);
2851 }
2852
2853 if (!th->rst) {
2854 if (!th->ack)
2855 th->ack_seq = 0;
2856 tcp_reset(daddr, saddr, th, sk->prot, opt,dev);
2857 }
2858 kfree_skb(skb, FREE_READ);
2859 release_sock(sk);
2860 return(0);
2861
2862 case TCP_LISTEN:
2863 if (th->rst) {
2864 kfree_skb(skb, FREE_READ);
2865 release_sock(sk);
2866 return(0);
2867 }
2868 if (th->ack) {
2869 tcp_reset(daddr, saddr, th, sk->prot, opt,dev);
2870 kfree_skb(skb, FREE_READ);
2871 release_sock(sk);
2872 return(0);
2873 }
2874
2875 if (th->syn) {
2876 #if 0
2877 if (opt->security != 0 || opt->compartment != 0) {
2878 tcp_reset(daddr, saddr, th, prot, opt,dev);
2879 release_sock(sk);
2880 return(0);
2881 }
2882 #endif
2883
2884
2885
2886
2887
2888
2889
2890 tcp_conn_request(sk, skb, daddr, saddr, opt, dev);
2891 release_sock(sk);
2892 return(0);
2893 }
2894
2895 kfree_skb(skb, FREE_READ);
2896 release_sock(sk);
2897 return(0);
2898
2899 default:
2900 if (!tcp_sequence(sk, th, len, opt, saddr)) {
2901 kfree_skb(skb, FREE_READ);
2902 release_sock(sk);
2903 return(0);
2904 }
2905
2906 case TCP_SYN_SENT:
2907 if (th->rst) {
2908 sk->err = ECONNREFUSED;
2909 sk->state = TCP_CLOSE;
2910 sk->shutdown = SHUTDOWN_MASK;
2911 if (!sk->dead) {
2912 wake_up(sk->sleep);
2913 }
2914 kfree_skb(skb, FREE_READ);
2915 release_sock(sk);
2916 return(0);
2917 }
2918 #if 0
2919 if (opt->security != 0 || opt->compartment != 0) {
2920 sk->err = ECONNRESET;
2921 sk->state = TCP_CLOSE;
2922 sk->shutdown = SHUTDOWN_MASK;
2923 tcp_reset(daddr, saddr, th, sk->prot, opt, dev);
2924 if (!sk->dead) {
2925 wake_up(sk->sleep);
2926 }
2927 kfree_skb(skb, FREE_READ);
2928 release_sock(sk);
2929 return(0);
2930 }
2931 #endif
2932 if (!th->ack) {
2933 if (th->syn) {
2934 sk->state = TCP_SYN_RECV;
2935 }
2936
2937 kfree_skb(skb, FREE_READ);
2938 release_sock(sk);
2939 return(0);
2940 }
2941
2942 switch(sk->state) {
2943 case TCP_SYN_SENT:
2944 if (!tcp_ack(sk, th, saddr, len)) {
2945 tcp_reset(daddr, saddr, th,
2946 sk->prot, opt,dev);
2947 kfree_skb(skb, FREE_READ);
2948 release_sock(sk);
2949 return(0);
2950 }
2951
2952
2953
2954
2955
2956 if (!th->syn) {
2957 kfree_skb(skb, FREE_READ);
2958 release_sock(sk);
2959 return(0);
2960 }
2961
2962
2963 sk->acked_seq = th->seq+1;
2964 sk->fin_seq = th->seq;
2965 tcp_send_ack(sk->send_seq, th->seq+1,
2966 sk, th, sk->daddr);
2967
2968 case TCP_SYN_RECV:
2969 if (!tcp_ack(sk, th, saddr, len)) {
2970 tcp_reset(daddr, saddr, th,
2971 sk->prot, opt, dev);
2972 kfree_skb(skb, FREE_READ);
2973 release_sock(sk);
2974 return(0);
2975 }
2976 sk->state = TCP_ESTABLISHED;
2977
2978
2979
2980
2981
2982
2983 tcp_options(sk, th);
2984 sk->dummy_th.dest = th->source;
2985 sk->copied_seq = sk->acked_seq-1;
2986 if (!sk->dead) {
2987 wake_up(sk->sleep);
2988 }
2989
2990
2991
2992
2993
2994 if (th->urg) {
2995 if (tcp_urg(sk, th, saddr)) {
2996 kfree_skb(skb, FREE_READ);
2997 release_sock(sk);
2998 return(0);
2999 }
3000 }
3001 if (tcp_data(skb, sk, saddr, len))
3002 kfree_skb(skb, FREE_READ);
3003
3004 if (th->fin) tcp_fin(sk, th, saddr, dev);
3005 release_sock(sk);
3006 return(0);
3007 }
3008
3009 if (th->urg) {
3010 if (tcp_urg(sk, th, saddr)) {
3011 kfree_skb(skb, FREE_READ);
3012 release_sock(sk);
3013 return(0);
3014 }
3015 }
3016
3017 if (tcp_data(skb, sk, saddr, len)) {
3018 kfree_skb(skb, FREE_READ);
3019 release_sock(sk);
3020 return(0);
3021 }
3022
3023 if (!th->fin) {
3024 release_sock(sk);
3025 return(0);
3026 }
3027 tcp_fin(sk, th, saddr, dev);
3028 release_sock(sk);
3029 return(0);
3030 }
3031 }
3032
3033
3034
3035
3036
3037
3038 static void
3039 tcp_write_wakeup(struct sock *sk)
3040 {
3041 struct sk_buff *buff;
3042 struct tcphdr *t1;
3043 struct device *dev=NULL;
3044 int tmp;
3045
3046 if (sk -> state != TCP_ESTABLISHED && sk->state != TCP_CLOSE_WAIT) return;
3047
3048 buff = (struct sk_buff *) sk->prot->wmalloc(sk,MAX_ACK_SIZE,1, GFP_ATOMIC);
3049 if (buff == NULL) return;
3050
3051 buff->lock = 0;
3052 buff->mem_addr = buff;
3053 buff->mem_len = MAX_ACK_SIZE;
3054 buff->len = sizeof(struct tcphdr);
3055 buff->free = 1;
3056 buff->sk = sk;
3057 DPRINTF((DBG_TCP, "in tcp_write_wakeup\n"));
3058 t1 = (struct tcphdr *)(buff + 1);
3059
3060
3061 tmp = sk->prot->build_header(buff, sk->saddr, sk->daddr, &dev,
3062 IPPROTO_TCP, sk->opt, MAX_ACK_SIZE);
3063 if (tmp < 0) {
3064 sk->prot->wfree(sk, buff->mem_addr, buff->mem_len);
3065 return;
3066 }
3067
3068 buff->len += tmp;
3069 t1 = (struct tcphdr *)((char *)t1 +tmp);
3070
3071 memcpy(t1,(void *) &sk->dummy_th, sizeof(*t1));
3072
3073
3074
3075
3076
3077 t1->seq = ntohl(sk->send_seq-1);
3078 t1->ack = 1;
3079 t1->res1= 0;
3080 t1->res2= 0;
3081 t1->rst = 0;
3082 t1->urg = 0;
3083 t1->psh = 0;
3084 t1->fin = 0;
3085 t1->syn = 0;
3086 t1->ack_seq = ntohl(sk->acked_seq);
3087 t1->window = ntohs(sk->prot->rspace(sk));
3088 t1->doff = sizeof(*t1)/4;
3089 tcp_send_check(t1, sk->saddr, sk->daddr, sizeof(*t1), sk);
3090
3091
3092
3093
3094 sk->prot->queue_xmit(sk, dev, buff, 1);
3095 }
3096
3097
3098 struct proto tcp_prot = {
3099 sock_wmalloc,
3100 sock_rmalloc,
3101 sock_wfree,
3102 sock_rfree,
3103 sock_rspace,
3104 sock_wspace,
3105 tcp_close,
3106 tcp_read,
3107 tcp_write,
3108 tcp_sendto,
3109 tcp_recvfrom,
3110 ip_build_header,
3111 tcp_connect,
3112 tcp_accept,
3113 ip_queue_xmit,
3114 tcp_retransmit,
3115 tcp_write_wakeup,
3116 tcp_read_wakeup,
3117 tcp_rcv,
3118 tcp_select,
3119 tcp_ioctl,
3120 NULL,
3121 tcp_shutdown,
3122 128,
3123 0,
3124 {NULL,},
3125 "TCP"
3126 };