This source file includes following definitions.
- min
- print_th
- get_firstr
- diff
- tcp_time_wait
- tcp_retransmit
- tcp_err
- tcp_select
- tcp_ioctl
- tcp_check
- tcp_send_check
- tcp_send_ack
- tcp_build_header
- tcp_write
- tcp_read_wakeup
- cleanup_rbuf
- tcp_read_urg
- tcp_read
- tcp_reset
- tcp_conn_request
- tcp_close
- tcp_write_xmit
- tcp_ack
- tcp_data
- tcp_urg
- tcp_fin
- tcp_accept
- tcp_connect
- tcp_sequence
- tcp_options
- tcp_rcv
- tcp_write_wakeup
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47 #include <linux/types.h>
48 #include <linux/sched.h>
49 #include <linux/mm.h>
50 #include <linux/string.h>
51 #include <linux/socket.h>
52 #include <netinet/in.h>
53 #include <linux/fcntl.h>
54 #include "timer.h"
55 #include "ip.h"
56 #include "icmp.h"
57 #include "tcp.h"
58 #include "sock.h"
59 #include <linux/errno.h>
60 #include <linux/timer.h>
61 #include <asm/system.h>
62 #include <asm/segment.h>
63 #include <linux/mm.h>
64
65 #include <linux/termios.h>
66 #include "../kern_sock.h"
67
68 #ifdef PRINTK
69 #undef PRINTK
70 #endif
71
72 #undef TCP_DEBUG
73
74 #ifdef TCP_DEBUG
75 #define PRINTK printk
76 #else
77 #define PRINTK dummy_routine
78 #endif
79
80 #define tmax(a,b) (before ((a),(b)) ? (b) : (a))
81 #define swap(a,b) {unsigned long c; c=a; a=b; b=c;}
82
83 extern struct proto tcp_prot;
84
85 static int
86 min (unsigned int a, unsigned int b)
87 {
88 if (a < b) return (a);
89 return (b);
90 }
91
92 void
93 print_th (struct tcp_header *th)
94 {
95 unsigned char *ptr;
96 ptr = (unsigned char *)(th + 1);
97 PRINTK ("tcp header:\n");
98 PRINTK (" source=%d, dest=%d, seq =%d, ack_seq = %d\n",
99 net16(th->source), net16(th->dest), net32(th->seq),
100 net32(th->ack_seq));
101 PRINTK (" fin=%d, syn=%d, rst=%d, psh=%d, ack=%d, urg=%d res1=%d res2=%d\n"
102 ,th->fin, th->syn, th->rst, th->psh, th->ack, th->urg,
103 th->res1, th->res2);
104 PRINTK (" window = %d, check = %d urg_ptr = %d\n",
105 net16(th->window), net16(th->check), net16(th->urg_ptr));
106 PRINTK (" doff = %d\n",th->doff);
107 PRINTK ("options = %d %d %d %d\n", ptr[0], ptr[1], ptr[2], ptr[3]);
108 }
109
110
111 static struct sk_buff *
112 get_firstr(volatile struct sock *sk)
113 {
114 struct sk_buff *skb;
115 skb = sk->rqueue;
116 if (skb == NULL) return (NULL);
117 sk->rqueue = skb->next;
118 if (sk->rqueue == skb)
119 {
120 sk->rqueue = NULL;
121 }
122 else
123 {
124 sk->rqueue->prev=skb->prev;
125 sk->rqueue->prev->next = sk->rqueue;
126 }
127 return (skb);
128 }
129
130 static long
131 diff (unsigned long seq1, unsigned long seq2)
132 {
133 long d;
134 d=seq1-seq2;
135 if (d > 0) return (d);
136
137 return (~d+1);
138 }
139
140
141 static void
142 tcp_time_wait (volatile struct sock *sk)
143 {
144 sk->state = TCP_TIME_WAIT;
145 sk->time_wait.len = TCP_TIMEWAIT_LEN;
146 sk->timeout = TIME_CLOSE;
147 reset_timer ((struct timer *)&sk->time_wait);
148 }
149
150 static void
151 tcp_retransmit (volatile struct sock *sk, int all)
152 {
153 if (all)
154 {
155 ip_retransmit (sk, all);
156 return;
157 }
158 sk->rtt *= 2;
159 if (sk->cong_window > 1)
160 sk->cong_window = sk->cong_window / 2;
161 sk->exp_growth = 0;
162
163
164 ip_retransmit (sk, all);
165
166 }
167
168
169
170
171
172
173
174
175 void
176 tcp_err (int err, unsigned char *header, unsigned long daddr,
177 unsigned long saddr, struct ip_protocol *protocol)
178 {
179 struct tcp_header *th;
180 volatile struct sock *sk;
181
182 th = (struct tcp_header *)header;
183 sk = get_sock (&tcp_prot, net16(th->dest), saddr, th->source, daddr);
184
185 if (sk == NULL) return;
186
187 if (err & 0xff00 == (ICMP_SOURCE_QUENCH << 8))
188 {
189
190
191
192 if (sk->cong_window > 1)
193 sk->cong_window --;
194
195 return;
196 }
197
198 sk->err = icmp_err_convert[err & 0xff].errno;
199 if (icmp_err_convert[err & 0xff].fatal)
200 {
201 if (sk->state != TCP_ESTABLISHED)
202 sk->state = TCP_CLOSE;
203 sk->prot->close(sk, 0);
204 }
205
206 return;
207
208 }
209
210 static int
211 tcp_select (volatile struct sock *sk, int sel_type, select_table *wait)
212 {
213 switch (sel_type)
214 {
215 case SEL_IN:
216 select_wait (sk->sleep, wait);
217 if (sk->rqueue != NULL &&
218 (between (sk->copied_seq, sk->rqueue->next->h.th->seq - 1,
219 sk->rqueue->next->h.th->seq + sk->rqueue->next->len) ||
220 sk->state == TCP_LISTEN))
221 {
222 return (1);
223 }
224
225 switch (sk->state)
226 {
227 case TCP_LISTEN:
228 case TCP_ESTABLISHED:
229 case TCP_SYN_SENT:
230 case TCP_SYN_RECV:
231 return (0);
232 default:
233 return (1);
234 }
235
236 case SEL_OUT:
237 select_wait (sk->sleep, wait);
238 if (sk->state != TCP_ESTABLISHED) return (1);
239
240
241 if (sk->prot->wspace(sk) >= MIN_WRITE_SPACE) return (1);
242 return (0);
243
244 case SEL_EX:
245 select_wait(sk->sleep,wait);
246 if (sk->err) return (1);
247 if (sk->state == TCP_FIN_WAIT1 ||
248 sk->state == TCP_FIN_WAIT2 ||
249 sk->state == TCP_TIME_WAIT ||
250 sk->state == TCP_LAST_ACK)
251 return (1);
252 return (0);
253 }
254 return (0);
255 }
256
257 static int
258 tcp_ioctl (volatile struct sock *sk, int cmd, unsigned long arg)
259 {
260 switch (cmd)
261 {
262 default:
263 return (-EINVAL);
264
265 case TIOCINQ:
266
267 {
268 unsigned long amount;
269 struct sk_buff *skb;
270
271 if (sk->state == TCP_LISTEN)
272 return (-EINVAL);
273
274 amount = 0;
275 if (sk->rqueue != NULL)
276 {
277 skb = sk->rqueue->next;
278
279 do {
280 amount += skb -> len;
281 if (skb->h.th->psh) break;
282 skb = skb->next;
283 } while (skb != sk->rqueue->next);
284 }
285
286 verify_area ((void *)arg, sizeof (unsigned long));
287 put_fs_long (amount, (unsigned long *)arg);
288 return (0);
289 }
290
291 case SIOCATMARK:
292 {
293 struct sk_buff *skb;
294 int answ=0;
295
296 if (sk->rqueue != NULL)
297 {
298 skb = sk->rqueue->next;
299 if (sk->copied_seq+1 == skb->h.th->seq && skb->h.th->urg)
300 answ = 1;
301 }
302 verify_area ((void *) arg, sizeof (unsigned long));
303 put_fs_long (answ, (void *) arg);
304 return (0);
305 }
306
307 case TIOCOUTQ:
308 {
309 unsigned long amount;
310 if (sk->state == TCP_LISTEN)
311 return (-EINVAL);
312 amount = sk->prot->wspace(sk)/2;
313 verify_area ((void *)arg, sizeof (unsigned long));
314 put_fs_long (amount, (unsigned long *)arg);
315 return (0);
316 }
317
318 }
319 }
320
321
322
323 static unsigned short
324 tcp_check (struct tcp_header *th, int len, unsigned long saddr,
325 unsigned long daddr)
326 {
327 unsigned long sum;
328
329 if (saddr == 0) saddr = MY_IP_ADDR;
330 print_th (th);
331 __asm__("\t addl %%ecx,%%ebx\n"
332 "\t adcl %%edx,%%ebx\n"
333 "\t adcl $0, %%ebx\n"
334 : "=b" (sum)
335 : "0" (daddr), "c" (saddr), "d" ((net16(len) << 16) + IPPROTO_TCP*256)
336 : "cx","bx","dx" );
337
338 if (len > 3)
339 {
340 __asm__(
341 "\tclc\n"
342 "1:\n"
343 "\t lodsl\n"
344 "\t adcl %%eax, %%ebx\n"
345 "\t loop 1b\n"
346 "\t adcl $0, %%ebx\n"
347 : "=b" (sum) , "=S" (th)
348 : "0" (sum), "c" (len/4) ,"1" (th)
349 : "ax", "cx", "bx", "si" );
350 }
351
352
353 __asm__(
354 "\t movl %%ebx, %%ecx\n"
355 "\t shrl $16,%%ecx\n"
356 "\t addw %%cx, %%bx\n"
357 "\t adcw $0, %%bx\n"
358 : "=b" (sum)
359 : "0" (sum)
360 : "bx", "cx");
361
362
363 if ((len & 2) != 0)
364 {
365 __asm__("\t lodsw\n"
366 "\t addw %%ax,%%bx\n"
367 "\t adcw $0, %%bx\n"
368 : "=b" (sum), "=S" (th)
369 : "0" (sum) ,"1" (th)
370 : "si", "ax", "bx");
371 }
372
373
374 if ((len & 1) != 0)
375 {
376 __asm__("\t lodsb\n"
377 "\t movb $0,%%ah\n"
378 "\t addw %%ax,%%bx\n"
379 "\t adcw $0, %%bx\n"
380 : "=b" (sum)
381 : "0" (sum) ,"S" (th)
382 : "si", "ax", "bx");
383 }
384
385
386
387 return ((~sum) & 0xffff);
388 }
389
390
391 static void
392 tcp_send_check (struct tcp_header *th, unsigned long saddr,
393 unsigned long daddr, int len, volatile struct sock *sk)
394 {
395
396 th->check = 0;
397 if (sk && sk->no_check) return;
398 th->check = tcp_check (th, len, saddr, daddr);
399 return;
400 }
401
402
403 static void
404 tcp_send_ack (unsigned long sequence, unsigned long ack,
405 volatile struct sock *sk,
406 struct tcp_header *th, unsigned long daddr)
407 {
408 struct sk_buff *buff;
409 struct tcp_header *t1;
410 struct device *dev=NULL;
411 int tmp;
412
413
414
415
416 buff=sk->prot->wmalloc(sk,MAX_ACK_SIZE,1, GFP_ATOMIC);
417 if (buff == NULL)
418 {
419
420 sk->ack_backlog++;
421 if (sk->timeout != TIME_WRITE && sk->state < TCP_CLOSING)
422 {
423 sk->timeout = TIME_WRITE;
424 sk->time_wait.len = 10;
425 reset_timer ((struct timer *)&sk->time_wait);
426 }
427 return;
428 }
429
430 buff->mem_addr = buff;
431 buff->mem_len = MAX_ACK_SIZE;
432 buff->len=sizeof (struct tcp_header);
433 buff->sk = sk;
434 t1 = (struct tcp_header *)(buff + 1);
435
436 tmp = sk->prot->build_header (buff, sk->saddr, daddr, &dev,
437 IPPROTO_TCP, sk->opt, MAX_ACK_SIZE);
438 if (tmp < 0)
439 {
440 sk->prot->wfree(sk, buff->mem_addr, buff->mem_len);
441 return;
442 }
443 buff->len += tmp;
444 t1 = (struct tcp_header *)((char *)t1 +tmp);
445
446 memcpy (t1, th, sizeof (*t1));
447
448
449 t1->dest = th->source;
450 t1->source = th->dest;
451 t1->seq = net32(sequence);
452 t1->ack = 1;
453 sk->window = sk->prot->rspace(sk);
454 t1->window = net16(sk->window);
455 t1->res1=0;
456 t1->res2=0;
457 t1->rst = 0;
458 t1->urg = 0;
459 t1->syn = 0;
460 t1->psh = 0;
461 t1->fin = 0;
462 if (ack == sk->acked_seq)
463 {
464 sk->ack_backlog = 0;
465 sk->bytes_rcv = 0;
466 sk->ack_timed = 0;
467 if (sk->send_head == NULL &&
468 sk->wfront == NULL)
469 {
470 delete_timer((struct timer *)&sk->time_wait);
471 sk->timeout = 0;
472 }
473
474 }
475 t1->ack_seq = net32(ack);
476 t1->doff = sizeof (*t1)/4;
477 tcp_send_check (t1, sk->saddr, daddr, sizeof (*t1), sk);
478 sk->prot->queue_xmit(sk, dev, buff, 1);
479 }
480
481
482 static int
483 tcp_build_header(struct tcp_header *th, volatile struct sock *sk, int push)
484 {
485
486
487 memcpy (th,(void *) &(sk->dummy_th), sizeof (*th));
488 th->seq = net32(sk->send_seq);
489 th->psh = (push == 0) ? 1 : 0;
490 th->doff = sizeof (*th)/4;
491 th->ack = 1;
492 th->fin = 0;
493 sk->ack_backlog = 0;
494 sk->bytes_rcv = 0;
495 sk->ack_timed = 0;
496 th->ack_seq = net32(sk->acked_seq);
497 sk->window = sk->prot->rspace(sk);
498 th->window = net16(sk->window);
499
500 return (sizeof (*th));
501 }
502
503
504
505
506 static int
507 tcp_write(volatile struct sock *sk, unsigned char *from,
508 int len, int nonblock, unsigned flags)
509 {
510 int copied=0;
511 int copy;
512 int tmp;
513 struct sk_buff *skb;
514 unsigned char *buff;
515 struct proto *prot;
516 struct device *dev=NULL;
517
518 PRINTK ("in TCP_WRITE sk = %X:\n",sk);
519 print_sk (sk);
520
521 sk->inuse = 1;
522 prot = sk->prot;
523 while (len > 0)
524 {
525
526
527 while (sk->state != TCP_ESTABLISHED)
528 {
529 if (sk->state != TCP_SYN_SENT &&
530 sk->state != TCP_SYN_RECV)
531 {
532 release_sock (sk);
533 if (sk->keepopen)
534 {
535 send_sig (SIGPIPE, current, 0);
536 return (-EINTR);
537 }
538 if (copied) return (copied);
539 if (sk->err) return (-sk->err);
540 return (-EPIPE);
541 }
542
543 if (nonblock)
544 {
545 release_sock (sk);
546 return (-EAGAIN);
547 }
548
549
550
551
552
553
554
555 release_sock (sk);
556 cli();
557 if (sk->state != TCP_ESTABLISHED)
558 {
559 interruptible_sleep_on (sk->sleep);
560 if (current->signal & ~current->blocked)
561 {
562 sti();
563 if (copied) return (copied);
564 return (-ERESTARTSYS);
565 }
566 }
567 sti();
568 sk->inuse = 1;
569 }
570
571
572
573
574 copy = min (sk->mtu, diff(sk->window_seq, sk->send_seq));
575
576
577 if (copy < 200 || copy > sk->mtu) copy = sk->mtu;
578 copy = min (copy, len);
579
580 skb=prot->wmalloc (sk, copy + prot->max_header+sizeof (*skb),0,
581 GFP_KERNEL);
582
583
584 if (skb == NULL)
585 {
586 if (nonblock || copied)
587 {
588 break;
589 }
590
591 tmp = sk->wmem_alloc;
592 release_sock (sk);
593
594 cli ();
595 if (tmp <= sk->wmem_alloc)
596 {
597 interruptible_sleep_on (sk->sleep);
598 if (current->signal & ~current->blocked)
599 {
600 sti();
601 if (copied) return (copied);
602 return (-ERESTARTSYS);
603 }
604 }
605 sk->inuse = 1;
606 sti();
607 continue;
608 }
609 skb->mem_addr = skb;
610 skb->mem_len = copy+prot->max_header+sizeof (*skb);
611 skb->len = 0;
612 skb->sk = sk;
613 buff =(unsigned char *)( skb+1);
614
615
616
617 tmp = prot->build_header (skb, sk->saddr, sk->daddr, &dev,
618 IPPROTO_TCP, sk->opt, skb->mem_len);
619 if (tmp < 0 )
620 {
621 prot->wfree (sk, skb->mem_addr, skb->mem_len);
622 release_sock (sk);
623 return (tmp);
624 }
625 skb->len += tmp;
626 skb->dev = dev;
627 buff+=tmp;
628 tmp = tcp_build_header((struct tcp_header *)buff, sk, len-copy);
629 if (tmp < 0)
630 {
631 prot->wfree (sk, skb->mem_addr, skb->mem_len);
632 release_sock (sk);
633 return (tmp);
634 }
635
636 if (flags & MSG_OOB)
637 {
638 ((struct tcp_header *)buff)->urg = 1;
639 ((struct tcp_header *)buff)->urg_ptr = copy;
640 }
641 skb->len += tmp;
642 memcpy_fromfs (buff+tmp, from, copy);
643
644 tcp_send_check ((struct tcp_header *)buff, sk->saddr, sk->daddr,
645 copy +sizeof (struct tcp_header), sk);
646
647 from += copy;
648 copied += copy;
649 len -= copy;
650 skb->len += copy;
651 skb->free = 0;
652 sk->send_seq += copy;
653 skb->h.seq = sk->send_seq;
654 if (after (sk->send_seq , sk->window_seq) ||
655 sk->packets_out >= sk->cong_window)
656 {
657 PRINTK ("sk->cong_window = %d, sk->packets_out = %d\n",
658 sk->cong_window, sk->packets_out);
659 PRINTK ("sk->send_seq = %d, sk->window_seq = %d\n",
660 sk->send_seq, sk->window_seq);
661 skb->next = NULL;
662 if (sk->wback == NULL)
663 {
664 sk->wfront=skb;
665 }
666 else
667 {
668 sk->wback->next = skb;
669 }
670 sk->wback = skb;
671 }
672 else
673 {
674 prot->queue_xmit (sk, dev, skb,0);
675 }
676 }
677 sk->err = 0;
678 release_sock (sk);
679 return (copied);
680 }
681
682
683 static void
684 tcp_read_wakeup(volatile struct sock *sk)
685 {
686 int tmp;
687 struct device *dev = NULL;
688 struct tcp_header *t1;
689 struct sk_buff *buff;
690
691 if (!sk->ack_backlog ) return;
692 PRINTK ("in tcp read wakeup\n");
693
694
695
696
697
698
699
700 buff=sk->prot->wmalloc(sk,MAX_ACK_SIZE,1, GFP_ATOMIC);
701 if (buff == NULL)
702 {
703
704 sk->timeout = TIME_WRITE;
705 sk->time_wait.len = 10;
706 reset_timer((struct timer *) &sk->time_wait);
707 return;
708 }
709
710 buff->mem_addr = buff;
711 buff->mem_len = MAX_ACK_SIZE;
712 buff->len=sizeof (struct tcp_header);
713 buff->sk = sk;
714
715
716 tmp = sk->prot->build_header (buff, sk->saddr, sk->daddr, &dev,
717 IPPROTO_TCP, sk->opt, MAX_ACK_SIZE);
718 if (tmp < 0)
719 {
720 sk->prot->wfree(sk, buff->mem_addr, buff->mem_len);
721 return;
722 }
723
724 buff->len += tmp;
725 t1 = (struct tcp_header *)((char *)(buff+1) +tmp);
726
727 memcpy (t1,(void *) &sk->dummy_th, sizeof (*t1));
728 t1->seq = net32(sk->send_seq);
729 t1->ack = 1;
730 t1->res1=0;
731 t1->res2=0;
732 t1->rst = 0;
733 t1->urg = 0;
734 t1->syn = 0;
735 t1->psh = 0;
736 sk->ack_backlog = 0;
737 sk->bytes_rcv = 0;
738 sk->window = sk->prot->rspace(sk);
739 t1->window = net16(sk->window);
740 t1->ack_seq = net32(sk->acked_seq);
741 t1->doff = sizeof (*t1)/4;
742 tcp_send_check (t1, sk->saddr, sk->daddr, sizeof (*t1), sk);
743 sk->prot->queue_xmit(sk, dev, buff, 1);
744 }
745
746
747
748
749
750
751 static void
752 cleanup_rbuf (volatile struct sock *sk)
753 {
754 PRINTK ("cleaning rbuf for sk=%X\n",sk);
755
756
757 while (sk->rqueue != NULL )
758 {
759 struct sk_buff *skb;
760 skb=sk->rqueue->next;
761 if (!skb->used) break;
762 if (sk->rqueue == skb)
763 {
764 sk->rqueue = NULL;
765 }
766 else
767 {
768 skb->next->prev = skb->prev;
769 skb->prev->next = skb->next;
770 }
771 skb->sk = sk;
772 kfree_skb (skb, FREE_READ);
773 }
774
775
776
777 PRINTK ("sk->window left = %d, sk->prot->rspace(sk)=%d\n",
778 sk->window - sk->bytes_rcv, sk->prot->rspace(sk));
779
780 if ((sk->prot->rspace(sk) >
781 (sk->window - sk->bytes_rcv + TCP_WINDOW_DIFF)) ||
782 (sk->window - sk->bytes_rcv < 2*sk->mtu))
783 {
784
785 sk->ack_backlog++;
786 if (sk->timeout != TIME_WRITE && sk->state == TCP_ESTABLISHED)
787 {
788 sk->time_wait.len = TCP_ACK_TIME;
789 sk->timeout=TIME_WRITE;
790 reset_timer ((struct timer *)&sk->time_wait);
791 }
792 }
793
794 }
795
796
797 static int
798 tcp_read_urg(volatile struct sock * sk,
799 unsigned char *to, int len, unsigned flags)
800 {
801 int copied = 0;
802 struct sk_buff *skb;
803 PRINTK ("tcp_read_urg(sk=%X, to=%X, len=%d, flags=%X)\n",
804 sk, to, len, flags);
805 print_sk(sk);
806 while (len > 0)
807 {
808 sk->inuse = 1;
809 while (sk->urg==0 || sk->rqueue == NULL)
810 {
811
812 release_sock (sk);
813 if (sk->state > TCP_CLOSING)
814 {
815 if (copied) return (copied);
816 return (-ENOTCONN);
817 }
818 cli();
819 if (sk->urg == 0 || sk->rqueue == NULL)
820 {
821 interruptible_sleep_on (sk->sleep);
822 if (current->signal & ~current->blocked)
823 {
824 sti();
825 if (copied) return (copied);
826 return (-ERESTARTSYS);
827 }
828 }
829 sti();
830 sk->inuse = 1;
831 }
832
833 for (skb = sk->rqueue->next; skb->next != sk->rqueue;
834 skb = skb->next)
835 {
836 int offset;
837 int amt;
838 if (!skb->h.th->urg) continue;
839 offset = 0;
840 amt = min(skb->h.th->urg_ptr,len);
841 verify_area (to, amt);
842 memcpy_tofs (to, (unsigned char *)(skb->h.th) +
843 skb->h.th->doff*4
844 + offset, amt);
845
846 if (!(flags & MSG_PEEK))
847 {
848 skb->urg_used = 1;
849 sk->urg --;
850 }
851 release_sock (sk);
852 copied += amt;
853 return (copied);
854 }
855 }
856 return (0);
857 }
858
859
860 static int
861 tcp_read(volatile struct sock *sk, unsigned char *to,
862 int len, int nonblock, unsigned flags)
863 {
864 int copied=0;
865 struct sk_buff *skb;
866 unsigned long offset;
867 unsigned long used;
868
869 if (len == 0) return (0);
870 if (len < 0)
871 {
872 return (-EINVAL);
873 }
874
875
876 if (sk->state == TCP_LISTEN) return (-ENOTCONN);
877
878
879 if (sk->err)
880 {
881 int err;
882 err = -sk->err;
883 sk->err = 0;
884 return (err);
885 }
886
887
888 if ((flags & MSG_OOB))
889 return (tcp_read_urg (sk, to, len, flags));
890
891
892 sk->inuse = 1;
893 if (sk->rqueue != NULL)
894 skb=sk->rqueue->next;
895 else
896 skb = NULL;
897
898 while ( len > 0)
899 {
900 PRINTK("tcp_read (sk=%X, to=%X, len=%d, nonblock=%d, flags=%X)\n",
901 sk, to, len, nonblock, flags);
902 while ( skb == NULL || before (sk->copied_seq+1, skb->h.th->seq) ||
903 skb->used)
904
905 {
906
907 PRINTK("skb = %X:\n",skb);
908 print_skb(skb);
909 print_sk (sk);
910
911 cleanup_rbuf(sk);
912
913 release_sock (sk);
914
915
916 PRINTK ("tcp_read about to sleep. state = %d\n",sk->state);
917 cli();
918
919 if (sk->state == TCP_CLOSE || sk->state == TCP_TIME_WAIT)
920 {
921 sti();
922 if (copied) return (copied);
923 if (sk->err) return (-sk->err);
924 if (!sk->done)
925 {
926 sk->done = 1;
927 return (0);
928 }
929 return (-ENOTCONN);
930 }
931
932
933 if (nonblock || ((flags & MSG_PEEK) && copied))
934 {
935 sti();
936 release_sock (sk);
937 if (copied) return (copied);
938 return (-EAGAIN);
939 }
940
941 if ( sk->rqueue == NULL ||
942 before (sk->copied_seq+1, sk->rqueue->next->h.th->seq))
943 {
944 interruptible_sleep_on (sk->sleep);
945 if (current->signal & ~current->blocked)
946 {
947 sti ();
948 if (copied) return (copied);
949 return (-ERESTARTSYS);
950 }
951 }
952 sti();
953 PRINTK ("tcp_read woke up. \n");
954
955 sk->inuse = 1;
956
957 if (sk->rqueue != NULL)
958 skb=sk->rqueue->next;
959 else
960 skb = NULL;
961
962 }
963
964
965
966
967 offset = sk->copied_seq+1 - skb->h.th->seq;
968
969 if (skb->h.th->syn) offset --;
970 if (offset < skb->len )
971 {
972
973
974 if (skb->h.th->urg)
975 {
976 if (skb->urg_used)
977 {
978 if (flags & MSG_PEEK) break;
979 sk->copied_seq += skb->h.th->urg_ptr;
980 offset += skb->h.th->urg_ptr;
981 if (offset > skb->len)
982 {
983 skb->used = 1;
984 skb=skb->next;
985 continue;
986 }
987 }
988 else
989 {
990 break;
991 }
992 }
993 used = min(skb->len - offset, len);
994
995 verify_area (to, used);
996 memcpy_tofs(to, ((unsigned char *)skb->h.th) +
997 skb->h.th->doff*4 +
998 offset,
999 used);
1000 copied += used;
1001 len -= used;
1002 to += used;
1003 if (!(flags & MSG_PEEK))
1004 sk->copied_seq += used;
1005
1006
1007
1008
1009 if (!(flags & MSG_PEEK) &&
1010 (!skb->h.th->urg || skb->urg_used) &&
1011 (used + offset >= skb->len) )
1012 skb->used = 1;
1013
1014
1015
1016 if ( skb->h.th->psh || skb->h.th->urg)
1017 {
1018 break;
1019 }
1020 }
1021 else
1022 {
1023 skb->used = 1;
1024 }
1025 skb=skb->next;
1026 }
1027 cleanup_rbuf (sk);
1028 release_sock (sk);
1029 if (copied == 0 && nonblock) return (-EAGAIN);
1030 return (copied);
1031 }
1032
1033
1034 static void
1035 tcp_reset(unsigned long saddr, unsigned long daddr, struct tcp_header *th,
1036 struct proto *prot, struct options *opt, struct device *dev)
1037 {
1038
1039
1040 struct sk_buff *buff;
1041 struct tcp_header *t1;
1042 int tmp;
1043 buff=prot->wmalloc(NULL, MAX_RESET_SIZE,1, GFP_ATOMIC);
1044 if (buff == NULL) return;
1045
1046 PRINTK("tcp_reset buff = %X\n", buff);
1047 buff->mem_addr = buff;
1048 buff->mem_len = MAX_RESET_SIZE;
1049 buff->len = sizeof (*t1);
1050 buff->sk = NULL;
1051 buff->dev = dev;
1052
1053 t1=(struct tcp_header *)(buff + 1);
1054
1055 tmp = prot->build_header (buff, saddr, daddr, &dev, IPPROTO_TCP, opt,
1056 sizeof(struct tcp_header));
1057 if (tmp < 0)
1058 {
1059 prot->wfree (NULL,buff->mem_addr, buff->mem_len);
1060 return;
1061 }
1062 t1 = (struct tcp_header *)((char *)t1 +tmp);
1063 buff->len += tmp;
1064 memcpy (t1, th, sizeof (*t1));
1065
1066 t1->dest = th->source;
1067 t1->source = th->dest;
1068 t1->seq = th->ack_seq;
1069
1070 t1->rst = 1;
1071 t1->ack = 0;
1072 t1->syn = 0;
1073 t1->urg = 0;
1074 t1->fin = 0;
1075 t1->psh = 0;
1076 t1->doff = sizeof (*t1)/4;
1077 tcp_send_check (t1, saddr, daddr, sizeof (*t1), NULL);
1078 prot->queue_xmit(NULL, dev, buff, 1);
1079
1080 }
1081
1082
1083
1084
1085
1086
1087
1088 static void
1089 tcp_conn_request(volatile struct sock *sk, struct sk_buff *skb,
1090 unsigned long daddr,
1091 unsigned long saddr, struct options *opt, struct device *dev)
1092 {
1093 struct sk_buff *buff;
1094 struct tcp_header *t1;
1095 unsigned char *ptr;
1096 volatile struct sock *newsk;
1097 struct tcp_header *th;
1098 int tmp;
1099 th = skb->h.th;
1100
1101 PRINTK ("tcp_conn_request (sk = %X, skb = %X, daddr = %X, sadd4= %X, \n"
1102 " opt = %X, dev = %X)\n",
1103 sk, skb, daddr, saddr, opt, dev);
1104
1105
1106 if (!sk->dead)
1107 {
1108 wake_up(sk->sleep);
1109 }
1110 else
1111 {
1112 PRINTK ("tcp_conn_request on dead socket\n");
1113 tcp_reset (daddr, saddr, th, sk->prot, opt, dev);
1114 kfree_skb (skb, FREE_READ);
1115 return;
1116 }
1117
1118
1119
1120
1121
1122
1123
1124
1125 newsk = kmalloc(sizeof (struct sock), GFP_ATOMIC);
1126 if (newsk == NULL)
1127 {
1128
1129 kfree_skb (skb, FREE_READ);
1130 return;
1131 }
1132
1133
1134 PRINTK ("newsk = %X\n", newsk);
1135 memcpy ((void *)newsk, (void *)sk, sizeof (*newsk));
1136 newsk->wback = NULL;
1137 newsk->wfront = NULL;
1138 newsk->rqueue = NULL;
1139 newsk->send_head = NULL;
1140 newsk->send_tail = NULL;
1141 newsk->back_log = NULL;
1142 newsk->blog = 0;
1143 newsk->intr = 0;
1144 newsk->proc = 0;
1145 newsk->done = 0;
1146
1147 newsk->pair = NULL;
1148 newsk->wmem_alloc = 0;
1149 newsk->rmem_alloc = 0;
1150
1151 newsk->max_unacked = MAX_WINDOW - TCP_WINDOW_DIFF;
1152
1153 newsk->err = 0;
1154 newsk->shutdown = 0;
1155 newsk->ack_backlog = 0;
1156 newsk->acked_seq = skb->h.th->seq+1;
1157 newsk->fin_seq = skb->h.th->seq;
1158 newsk->copied_seq = skb->h.th->seq;
1159 newsk->state = TCP_SYN_RECV;
1160 newsk->timeout = 0;
1161 newsk->send_seq = timer_seq*SEQ_TICK-seq_offset;
1162 newsk->rcv_ack_seq = newsk->send_seq;
1163 newsk->urg =0;
1164 newsk->retransmits = 0;
1165 newsk->destroy = 0;
1166 newsk->time_wait.sk = newsk;
1167 newsk->time_wait.next = NULL;
1168 newsk->dummy_th.source = skb->h.th->dest;
1169 newsk->dummy_th.dest = skb->h.th->source;
1170
1171 newsk->daddr=saddr;
1172 newsk->saddr=daddr;
1173
1174 put_sock (newsk->num,newsk);
1175 newsk->dummy_th.res1=0;
1176 newsk->dummy_th.doff=6;
1177 newsk->dummy_th.fin=0;
1178 newsk->dummy_th.syn=0;
1179 newsk->dummy_th.rst=0;
1180 newsk->dummy_th.psh=0;
1181 newsk->dummy_th.ack=0;
1182 newsk->dummy_th.urg=0;
1183 newsk->dummy_th.res2=0;
1184 newsk->acked_seq = skb->h.th->seq+1;
1185 newsk->copied_seq = skb->h.th->seq;
1186
1187 if (skb->h.th->doff == 5)
1188 {
1189 newsk->mtu=576-HEADER_SIZE;
1190 }
1191 else
1192 {
1193 ptr = (unsigned char *)(skb+1);
1194 if (ptr[0] != 2 || ptr[1] != 4)
1195 {
1196 newsk->mtu=576-HEADER_SIZE;
1197 }
1198 else
1199 {
1200 newsk->mtu = min (ptr[2]*256+ptr[3]-HEADER_SIZE,
1201 dev->mtu-HEADER_SIZE);
1202 }
1203 }
1204
1205 print_sk (newsk);
1206 buff=newsk->prot->wmalloc(newsk,MAX_SYN_SIZE,1, GFP_ATOMIC);
1207 if (buff == NULL)
1208 {
1209 sk->err = -ENOMEM;
1210 newsk->dead = 1;
1211 release_sock (newsk);
1212 kfree_skb (skb, FREE_READ);
1213 return;
1214 }
1215
1216 buff->mem_addr = buff;
1217 buff->mem_len = MAX_SYN_SIZE;
1218 buff->len=sizeof (struct tcp_header)+4;
1219 buff->sk = newsk;
1220
1221 t1=(struct tcp_header *)(buff + 1);
1222
1223
1224 tmp = sk->prot->build_header (buff, newsk->saddr, newsk->daddr, &dev,
1225 IPPROTO_TCP, NULL, MAX_SYN_SIZE);
1226
1227
1228 if (tmp < 0)
1229 {
1230 sk->err = tmp;
1231 sk->prot->wfree(newsk, buff->mem_addr, buff->mem_len);
1232 newsk->dead = 1;
1233 release_sock (newsk);
1234 skb->sk = sk;
1235 kfree_skb (skb, FREE_READ);
1236 return;
1237 }
1238
1239 buff->len += tmp;
1240 t1 = (struct tcp_header *)((char *)t1 +tmp);
1241
1242 memcpy (t1, skb->h.th, sizeof (*t1));
1243 buff->h.seq = newsk->send_seq;
1244
1245 t1->dest = skb->h.th->source;
1246 t1->source = newsk->dummy_th.source;
1247 t1->seq = net32(newsk->send_seq++);
1248 t1->ack = 1;
1249 newsk->window = newsk->prot->rspace(newsk);
1250 t1->window = net16(newsk->window);
1251 t1->res1=0;
1252 t1->res2=0;
1253 t1->rst = 0;
1254 t1->urg = 0;
1255 t1->psh = 0;
1256 t1->syn = 1;
1257 t1->ack_seq = net32(skb->h.th->seq+1);
1258 t1->doff = sizeof (*t1)/4+1;
1259
1260 ptr = (unsigned char *)(t1+1);
1261 ptr[0]=2;
1262 ptr[1]=4;
1263 ptr[2]=((dev->mtu - HEADER_SIZE) >> 8) & 0xff;
1264 ptr[3]=(dev->mtu - HEADER_SIZE) & 0xff;
1265
1266 tcp_send_check (t1, daddr, saddr, sizeof (*t1)+4, newsk);
1267 newsk->prot->queue_xmit(newsk, dev, buff, 0);
1268
1269 newsk->time_wait.len = TCP_CONNECT_TIME;
1270 PRINTK ("newsk->time_wait.sk = %X\n", newsk->time_wait.sk);
1271 reset_timer ((struct timer *)&newsk->time_wait);
1272 skb->sk = newsk;
1273
1274 sk->rmem_alloc -= skb->mem_len;
1275 newsk->rmem_alloc += skb->mem_len;
1276
1277 if (sk->rqueue == NULL)
1278 {
1279 skb->next = skb;
1280 skb->prev = skb;
1281 sk->rqueue = skb;
1282 }
1283 else
1284 {
1285 skb->next = sk->rqueue;
1286 skb->prev = sk->rqueue->prev;
1287 sk->rqueue->prev = skb;
1288 skb->prev->next = skb;
1289 }
1290 release_sock (newsk);
1291 }
1292
1293 static void
1294 tcp_close (volatile struct sock *sk, int timeout)
1295 {
1296
1297
1298 struct sk_buff *buff;
1299 int need_reset = 0;
1300 struct tcp_header *t1,*th;
1301 struct proto *prot;
1302 struct device *dev=NULL;
1303 int tmp;
1304 PRINTK ("tcp_close ((struct sock *)%X, %d)\n",sk, timeout);
1305 sk->inuse = 1;
1306 sk->keepopen = 0;
1307 sk->shutdown = SHUTDOWN_MASK;
1308
1309 if (!sk->dead)
1310 wake_up (sk->sleep);
1311
1312
1313
1314
1315 if (sk->rqueue != NULL)
1316 {
1317 struct sk_buff *skb;
1318 struct sk_buff *skb2;
1319 skb = sk->rqueue;
1320 do {
1321 skb2=skb->next;
1322 kfree_skb (skb, FREE_READ);
1323 skb=skb2;
1324 } while (skb != sk->rqueue);
1325 need_reset = 1;
1326 }
1327 sk->rqueue = NULL;
1328
1329
1330 switch (sk->state)
1331 {
1332 case TCP_FIN_WAIT1:
1333 case TCP_FIN_WAIT2:
1334 case TCP_LAST_ACK:
1335 if (timeout)
1336 tcp_time_wait(sk);
1337 release_sock (sk);
1338 if (!need_reset)
1339 return;
1340 break;
1341
1342 case TCP_TIME_WAIT:
1343 if (timeout)
1344 sk->state = TCP_CLOSE;
1345 release_sock (sk);
1346 return;
1347
1348 case TCP_LISTEN:
1349 sk->state = TCP_CLOSE;
1350 release_sock(sk);
1351 return;
1352
1353 case TCP_CLOSE:
1354
1355 release_sock(sk);
1356 return;
1357
1358
1359 case TCP_ESTABLISHED:
1360 case TCP_SYN_SENT:
1361 case TCP_SYN_RECV:
1362
1363 prot = (struct proto *)sk->prot;
1364 th=(struct tcp_header *)&sk->dummy_th;
1365
1366 buff=prot->wmalloc(sk, MAX_FIN_SIZE,1, GFP_ATOMIC);
1367 if (buff == NULL)
1368 {
1369
1370 sk->state = TCP_ESTABLISHED;
1371 sk->timeout = TIME_CLOSE;
1372 sk->time_wait.len = 100;
1373 reset_timer ((struct timer *)&sk->time_wait);
1374 return;
1375 }
1376
1377 buff->mem_addr = buff;
1378 buff->mem_len = MAX_FIN_SIZE;
1379 buff->sk = sk;
1380 buff->len = sizeof (*t1);
1381 t1=(struct tcp_header *)(buff + 1);
1382
1383 tmp = prot->build_header (buff,sk->saddr, sk->daddr, &dev,
1384 IPPROTO_TCP, sk->opt,
1385 sizeof(struct tcp_header));
1386 if (tmp < 0)
1387 {
1388 prot->wfree (sk,buff->mem_addr, buff->mem_len);
1389 PRINTK ("Unable to build header for fin.\n");
1390 release_sock(sk);
1391 return;
1392 }
1393 t1 = (struct tcp_header *)((char *)t1 +tmp);
1394 buff ->len += tmp;
1395 buff->dev = dev;
1396 memcpy (t1, th, sizeof (*t1));
1397 t1->seq = net32(sk->send_seq);
1398 sk->send_seq++;
1399 buff->h.seq = sk->send_seq;
1400 t1->ack = 1;
1401
1402 sk->delay_acks = 0;
1403 t1->ack_seq = net32(sk->acked_seq);
1404 t1->window = net16(sk->prot->rspace(sk));
1405 t1->fin = 1;
1406 t1->rst = need_reset;
1407 t1->doff = sizeof (*t1)/4;
1408 tcp_send_check (t1, sk->saddr, sk->daddr, sizeof (*t1), sk);
1409
1410 if (sk->wfront == NULL)
1411 {
1412 prot->queue_xmit(sk, dev, buff, 0);
1413 }
1414 else
1415 {
1416 sk->time_wait.len = sk->rtt;
1417 sk->timeout = TIME_WRITE;
1418 reset_timer ((struct timer *)&sk->time_wait);
1419 buff->next = NULL;
1420 if (sk->wback == NULL)
1421 {
1422 sk->wfront=buff;
1423 }
1424 else
1425 {
1426 sk->wback->next = buff;
1427 }
1428 sk->wback = buff;
1429
1430 }
1431 sk->state = TCP_FIN_WAIT1;
1432 }
1433 release_sock (sk);
1434 }
1435
1436
1437
1438
1439 static void
1440 tcp_write_xmit (volatile struct sock *sk)
1441 {
1442 struct sk_buff *skb;
1443 while (sk->wfront != NULL && before (sk->wfront->h.seq, sk->window_seq) &&
1444 sk->packets_out < sk->cong_window)
1445 {
1446 skb = sk->wfront;
1447 sk->wfront = skb->next;
1448 if (sk->wfront == NULL)
1449 sk->wback = NULL;
1450 sk->prot->queue_xmit (sk, skb->dev, skb, skb->free);
1451 }
1452 }
1453
1454
1455
1456
1457
1458 static int
1459 tcp_ack (volatile struct sock *sk, struct tcp_header *th, unsigned long saddr)
1460 {
1461 unsigned long ack;
1462 ack = net32(th->ack_seq);
1463
1464 if (!between (ack , sk->rcv_ack_seq, sk->send_seq))
1465 {
1466 if (after (ack, sk->send_seq) || sk->state != TCP_ESTABLISHED)
1467 {
1468 return (0);
1469 }
1470 if (sk->keepopen)
1471 reset_timer ((struct timer *)&sk->time_wait);
1472 sk->retransmits = 0;
1473 return (1);
1474 }
1475
1476 sk->window_seq = ack + net16(th->window);
1477
1478
1479
1480 if (sk->cong_window < 2048 && ack != sk->rcv_ack_seq)
1481 {
1482 if (sk->exp_growth)
1483 sk->cong_window *= 2;
1484 else
1485 sk->cong_window++;
1486 }
1487
1488 sk->rcv_ack_seq = ack;
1489
1490
1491 while (sk->send_head != NULL)
1492 {
1493 if (before (sk->send_head->h.seq, ack+1))
1494 {
1495 struct sk_buff *oskb;
1496
1497 sk->packets_out --;
1498 cli();
1499 oskb = sk->send_head;
1500
1501 sk->rtt += ((jiffies - oskb->when) - sk->rtt)/2;
1502 if (sk->rtt < 30) sk->rtt = 30;
1503 sk->send_head = oskb->link3;
1504 if (sk->send_head == NULL)
1505 {
1506 sk->send_tail = NULL;
1507 }
1508
1509 if (oskb->next != NULL)
1510 {
1511 if (oskb->next != oskb)
1512 {
1513 oskb->next->prev = oskb->prev;
1514 oskb->prev->next = oskb->next;
1515 }
1516 else
1517 {
1518 int i;
1519 for (i = 0; i < DEV_NUMBUFFS; i++)
1520 {
1521 if (oskb->dev->buffs[i] == oskb)
1522 {
1523 oskb->dev->buffs[i] = NULL;
1524 break;
1525 }
1526 }
1527 }
1528 }
1529 kfree_skb (oskb, FREE_WRITE);
1530 sti();
1531 if (!sk->dead)
1532 wake_up(sk->sleep);
1533 }
1534 else
1535 {
1536 break;
1537 }
1538
1539 }
1540
1541
1542
1543
1544
1545
1546
1547 if (sk->retransmits && sk->send_head != NULL)
1548 {
1549 sk->prot->retransmit (sk,1);
1550 }
1551 sk->retransmits = 0;
1552
1553
1554
1555 if (sk->wfront != NULL && sk->packets_out < sk->cong_window)
1556 {
1557 if (after (sk->window_seq, sk->wfront->h.seq))
1558 {
1559 tcp_write_xmit (sk);
1560 }
1561 }
1562 else
1563 {
1564 if (sk->send_head == NULL && sk->ack_backlog == 0 &&
1565 sk->state != TCP_TIME_WAIT)
1566 {
1567 delete_timer((struct timer *)&sk->time_wait);
1568 sk->timeout = 0;
1569 }
1570 else
1571 {
1572 if (sk->state == TCP_TIME_WAIT)
1573 {
1574 sk->time_wait.len = TCP_TIMEWAIT_LEN;
1575 sk->timeout = TIME_CLOSE;
1576 }
1577 reset_timer ((struct timer *)&sk->time_wait);
1578 }
1579 }
1580
1581
1582 if ( sk->state == TCP_TIME_WAIT)
1583 {
1584 if (sk->rcv_ack_seq == sk->send_seq &&
1585 sk->acked_seq == sk->fin_seq);
1586 if (!sk->dead) wake_up (sk->sleep);
1587 sk->state = TCP_CLOSE;
1588 }
1589
1590 if (sk->state == TCP_FIN_WAIT1)
1591 {
1592 if (sk->rcv_ack_seq == sk->send_seq)
1593 sk->state = TCP_FIN_WAIT2;
1594 }
1595
1596 if (sk->state == TCP_LAST_ACK)
1597 {
1598 if (sk->rcv_ack_seq == sk->send_seq)
1599 {
1600 if (sk->acked_seq != sk->fin_seq)
1601 {
1602 tcp_time_wait(sk);
1603 }
1604 else
1605 {
1606 sk->state = TCP_CLOSE;
1607 }
1608 }
1609 if (!sk->dead) wake_up (sk->sleep);
1610 }
1611
1612 return (1);
1613 }
1614
1615
1616
1617
1618
1619 static int
1620 tcp_data (struct sk_buff *skb, volatile struct sock *sk,
1621 unsigned long saddr, unsigned short len)
1622 {
1623 struct sk_buff *skb1, *skb2;
1624 struct tcp_header *th;
1625
1626 th = skb->h.th;
1627 print_th (th);
1628 skb->len = len - (th->doff*4);
1629
1630 PRINTK("tcp_data len = %d sk = %X:\n",skb->len, sk);
1631 print_sk(sk);
1632
1633 sk->bytes_rcv += skb->len;
1634
1635 if (skb->len == 0 && !th->fin && !th->urg && !th->psh)
1636 {
1637
1638 if (!th->ack)
1639 tcp_send_ack (sk->send_seq, sk->acked_seq,sk, th, saddr);
1640 kfree_skb(skb, FREE_READ);
1641 return (0);
1642 }
1643
1644 if (sk->shutdown & RCV_SHUTDOWN)
1645 {
1646
1647 sk->acked_seq = th->seq + skb->len + th->syn + th->fin;
1648 tcp_send_ack (sk->send_seq, sk->acked_seq, sk, skb->h.th, saddr);
1649 kfree_skb (skb, FREE_READ);
1650 if (sk->state == TCP_TIME_WAIT && sk->acked_seq == sk->fin_seq)
1651 {
1652 if (!sk->dead) wake_up (sk->sleep);
1653 sk->state = TCP_CLOSE;
1654 }
1655 return (0);
1656 }
1657
1658
1659
1660
1661
1662
1663
1664 if (sk->rqueue == NULL)
1665 {
1666 PRINTK ("tcp_data: skb = %X:\n",skb);
1667 print_skb (skb);
1668
1669 sk->rqueue = skb;
1670 skb->next = skb;
1671 skb->prev = skb;
1672 skb1= NULL;
1673 }
1674 else
1675 {
1676 PRINTK ("tcp_data adding to chain sk = %X:\n",sk);
1677 print_sk (sk);
1678
1679 for (skb1=sk->rqueue; ; skb1=skb1->prev)
1680 {
1681 PRINTK ("skb1=%X\n",skb1);
1682 print_skb(skb1);
1683 PRINTK ("skb1->h.th->seq = %d\n", skb1->h.th->seq);
1684 if (after ( th->seq+1, skb1->h.th->seq))
1685 {
1686 skb->prev = skb1;
1687 skb->next = skb1->next;
1688 skb->next->prev = skb;
1689 skb1->next = skb;
1690 if (skb1 == sk->rqueue)
1691 sk->rqueue = skb;
1692 break;
1693 }
1694 if ( skb1->prev == sk->rqueue)
1695 {
1696 skb->next= skb1;
1697 skb->prev = skb1->prev;
1698 skb->prev->next = skb;
1699 skb1->prev = skb;
1700 skb1 = NULL;
1701 break;
1702 }
1703 }
1704
1705 PRINTK ("skb = %X:\n",skb);
1706 print_skb (skb);
1707 PRINTK ("sk now equals:\n");
1708 print_sk (sk);
1709
1710 }
1711
1712 th->ack_seq = th->seq + skb->len;
1713 if (th->syn) th->ack_seq ++;
1714 if (th->fin) th->ack_seq ++;
1715
1716 if (before (sk->acked_seq, sk->copied_seq))
1717 {
1718 printk ("*** tcp.c:tcp_data bug acked < copied\n");
1719 sk->acked_seq = sk->copied_seq;
1720 }
1721
1722
1723 if (skb1 == NULL || skb1->acked || before (th->seq, sk->acked_seq+1))
1724 {
1725 if (before (th->seq, sk->acked_seq+1))
1726 {
1727 sk->acked_seq = th->ack_seq;
1728 skb->acked = 1;
1729
1730 for (skb2=skb->next; skb2 != sk->rqueue->next; skb2=skb2->next)
1731 {
1732 if (before(skb2->h.th->seq, sk->acked_seq+1))
1733 {
1734 sk->acked_seq = skb2->h.th->ack_seq;
1735 skb2->acked = 1;
1736
1737 sk->ack_backlog = sk->max_ack_backlog;
1738 }
1739 else
1740 break;
1741 }
1742
1743
1744
1745
1746 if (!sk->delay_acks ||
1747 sk->ack_backlog >= sk->max_ack_backlog ||
1748 sk->window < 2*sk->mtu + sk->bytes_rcv ||
1749 sk->bytes_rcv > sk->max_unacked ||
1750 th->fin)
1751 {
1752 tcp_send_ack (sk->send_seq, sk->acked_seq,sk,th, saddr);
1753 }
1754 else
1755 {
1756 sk->ack_backlog++;
1757 sk->time_wait.len = TCP_ACK_TIME;
1758 sk->timeout = TIME_WRITE;
1759 reset_timer ((struct timer *)&sk->time_wait);
1760 }
1761 }
1762 }
1763 else
1764 {
1765
1766 tcp_send_ack (sk->send_seq, sk->acked_seq, sk, th, saddr);
1767 }
1768
1769
1770 if (!sk->dead)
1771 {
1772 wake_up (sk->sleep);
1773 }
1774 else
1775 {
1776 PRINTK ("data received on dead socket. \n");
1777 }
1778
1779 if (sk->state > TCP_CLOSING && sk->acked_seq == sk->fin_seq)
1780 {
1781 sk->state = TCP_CLOSE;
1782 }
1783
1784 return (0);
1785 }
1786
1787 static int
1788 tcp_urg (volatile struct sock *sk, struct tcp_header *th, unsigned long saddr)
1789 {
1790 extern int kill_pg (int pg, int sig, int priv);
1791 extern int kill_proc (int pid, int sig, int priv);
1792
1793 if (!sk->dead)
1794 wake_up(sk->sleep);
1795
1796 if (sk->urginline)
1797 {
1798 th->urg = 0;
1799 th->psh = 1;
1800 return (0);
1801 }
1802
1803 sk->urg++;
1804
1805 if (!sk->urg)
1806 {
1807
1808
1809 if (sk->proc == 0) return (0);
1810 if (sk->proc > 0)
1811 {
1812 kill_proc (sk->proc, SIGURG, 1);
1813 }
1814 else
1815 {
1816 kill_pg (-sk->proc, SIGURG, 1);
1817 }
1818 }
1819 return (0);
1820 }
1821
1822
1823 static int
1824 tcp_fin (volatile struct sock *sk, struct tcp_header *th,
1825 unsigned long saddr, struct device *dev)
1826 {
1827 struct sk_buff *buff;
1828 struct tcp_header *t1;
1829 int tmp;
1830 PRINTK ("tcp_fin (sk=%X, th=%X, saddr=%X, dev=%X)\n",
1831 sk, th, saddr, dev);
1832
1833 if (!sk->dead)
1834 {
1835 wake_up (sk->sleep);
1836 }
1837
1838
1839 sk->shutdown |= SEND_SHUTDOWN;
1840
1841 sk->err = 0;
1842 switch (sk->state)
1843 {
1844 case TCP_SYN_RECV:
1845 case TCP_SYN_SENT:
1846 case TCP_ESTABLISHED:
1847 sk->state = TCP_LAST_ACK;
1848 break;
1849
1850 default:
1851 case TCP_FIN_WAIT1:
1852 case TCP_TIME_WAIT:
1853 sk->state = TCP_LAST_ACK;
1854
1855 sk->time_wait.len = TCP_TIMEWAIT_LEN;
1856 sk->timeout = TIME_CLOSE;
1857 reset_timer ((struct timer *)&sk->time_wait);
1858 return (0);
1859
1860 case TCP_FIN_WAIT2:
1861 sk->state = TCP_CLOSE;
1862 return (0);
1863 }
1864
1865
1866 buff=sk->prot->wmalloc(sk,MAX_ACK_SIZE,1, GFP_ATOMIC);
1867 if (buff == NULL)
1868 {
1869
1870 return (1);
1871 }
1872
1873 buff->mem_addr = buff;
1874 buff->mem_len = MAX_ACK_SIZE;
1875 buff->len=sizeof (struct tcp_header);
1876 buff->sk = sk;
1877
1878 t1 = (struct tcp_header *)(buff + 1);
1879
1880 tmp = sk->prot->build_header (buff, sk->saddr, sk->daddr, &dev,
1881 IPPROTO_TCP, sk->opt, MAX_ACK_SIZE);
1882 if (tmp < 0)
1883 {
1884 sk->prot->wfree(sk, buff->mem_addr, buff->mem_len);
1885 return (0);
1886 }
1887
1888 buff->len += tmp;
1889 t1 = (struct tcp_header *)((char *)t1 +tmp);
1890
1891 memcpy (t1, th, sizeof (*t1));
1892
1893
1894 t1->dest = th->source;
1895 t1->source = th->dest;
1896
1897
1898 t1->seq = net32(sk->send_seq++);
1899
1900
1901 sk->fin_seq = th->seq+1;
1902
1903 buff->h.seq = sk->send_seq;
1904 t1->window = net16(sk->prot->rspace(sk));
1905
1906 t1->res1=0;
1907 t1->res2=0;
1908 t1->rst = 0;
1909 t1->urg = 0;
1910 t1->syn = 0;
1911 t1->psh = 0;
1912 t1->ack = 1;
1913 t1->fin = 1;
1914 t1->ack_seq = net32(sk->acked_seq);
1915
1916 t1->doff = sizeof (*t1)/4;
1917 tcp_send_check (t1, sk->saddr, sk->daddr, sizeof (*t1), sk);
1918
1919
1920
1921 if (sk->wback != NULL)
1922 {
1923 buff->next = NULL;
1924 sk->wback->next = buff;
1925 sk->wback = buff;
1926 }
1927 else
1928 {
1929 sk->prot->queue_xmit (sk, dev, buff,0);
1930 }
1931
1932 return (0);
1933 }
1934
1935
1936
1937
1938 static volatile struct sock *
1939 tcp_accept (volatile struct sock *sk, int flags)
1940 {
1941 volatile struct sock *newsk;
1942 struct sk_buff *skb;
1943
1944 PRINTK ("tcp_accept(sk=%X, flags=%X)\n", sk, flags);
1945 print_sk(sk);
1946
1947
1948
1949 if (sk->state != TCP_LISTEN)
1950 {
1951 sk->err = EINVAL;
1952 return (NULL);
1953 }
1954
1955
1956 sk->inuse = 1;
1957 cli();
1958 while ( (skb = get_firstr(sk)) == NULL )
1959 {
1960 if (flags & O_NONBLOCK)
1961 {
1962 sti();
1963 release_sock (sk);
1964 sk->err = EAGAIN;
1965 return (NULL);
1966 }
1967
1968 release_sock (sk);
1969 interruptible_sleep_on (sk->sleep);
1970 if (current->signal & ~current->blocked)
1971 {
1972 sti();
1973 sk->err = ERESTARTSYS;
1974 return (NULL);
1975 }
1976
1977 sk->inuse = 1;
1978 }
1979 sti();
1980
1981
1982 newsk = skb->sk;
1983
1984 kfree_skb (skb, FREE_READ);
1985 release_sock (sk);
1986 return (newsk);
1987 }
1988
1989
1990
1991
1992 static int
1993 tcp_connect (volatile struct sock *sk, struct sockaddr_in *usin, int addr_len)
1994 {
1995 struct sk_buff *buff;
1996 struct sockaddr_in sin;
1997 struct device *dev=NULL;
1998 unsigned char *ptr;
1999 int tmp;
2000 struct tcp_header *t1;
2001 if (sk->state != TCP_CLOSE) return (-EISCONN);
2002 if (addr_len < 8) return (-EINVAL);
2003
2004
2005 memcpy_fromfs (&sin,usin, min(sizeof (sin), addr_len));
2006
2007 if (sin.sin_family && sin.sin_family != AF_INET) return (-EAFNOSUPPORT);
2008
2009 sk->daddr = sin.sin_addr.s_addr;
2010 sk->send_seq = timer_seq*SEQ_TICK-seq_offset;
2011 sk->rcv_ack_seq = sk->send_seq -1;
2012 sk->err = 0;
2013 sk->dummy_th.dest = sin.sin_port;
2014
2015 buff=sk->prot->wmalloc(sk,MAX_SYN_SIZE,0, GFP_KERNEL);
2016 if (buff == NULL)
2017 {
2018 return (-ENOMEM);
2019 }
2020 sk->inuse = 1;
2021 buff->mem_addr = buff;
2022 buff->mem_len = MAX_SYN_SIZE;
2023 buff->len=24;
2024 buff->sk = sk;
2025 t1=(struct tcp_header *)(buff + 1);
2026
2027
2028
2029 tmp = sk->prot->build_header (buff, sk->saddr, sk->daddr, &dev,
2030 IPPROTO_TCP, NULL, MAX_SYN_SIZE);
2031 if (tmp < 0)
2032 {
2033 sk->prot->wfree(sk, buff->mem_addr, buff->mem_len);
2034 release_sock (sk);
2035 return (-ENETUNREACH);
2036 }
2037 buff->len += tmp;
2038 t1 = (struct tcp_header *)((char *)t1 +tmp);
2039
2040 memcpy (t1, (void *)&(sk->dummy_th), sizeof (*t1));
2041 t1->seq = net32(sk->send_seq++);
2042 buff->h.seq = sk->send_seq;
2043 t1->ack = 0;
2044 t1->window = 2;
2045 t1->res1=0;
2046 t1->res2=0;
2047 t1->rst = 0;
2048 t1->urg = 0;
2049 t1->psh = 0;
2050 t1->syn = 1;
2051 t1->urg_ptr = 0;
2052 t1->doff =6;
2053
2054 ptr=(unsigned char *)(t1+1);
2055 ptr[0]=2;
2056 ptr[1]=4;
2057 ptr[2]=(dev->mtu- HEADER_SIZE) >> 8;
2058 ptr[3]=(dev->mtu- HEADER_SIZE) & 0xff;
2059 sk->mtu = dev->mtu - HEADER_SIZE;
2060 tcp_send_check (t1, sk->saddr, sk->daddr,
2061 sizeof (struct tcp_header) + 4, sk);
2062
2063
2064 sk->state = TCP_SYN_SENT;
2065
2066 sk->prot->queue_xmit(sk, dev, buff, 0);
2067
2068 sk->time_wait.len = TCP_CONNECT_TIME;
2069 reset_timer ((struct timer *)&sk->time_wait);
2070 sk->retransmits = TCP_RETR1 - TCP_SYN_RETRIES;
2071 release_sock (sk);
2072 return (0);
2073 }
2074
2075
2076
2077
2078
2079 static int
2080 tcp_sequence (volatile struct sock *sk, struct tcp_header *th, short len,
2081 struct options *opt, unsigned long saddr)
2082 {
2083
2084
2085
2086
2087
2088 if (between(th->seq, sk->acked_seq, sk->acked_seq + sk->window)||
2089 between(th->seq + len-sizeof (*th), sk->acked_seq+1,
2090 sk->acked_seq + sk->window))
2091 {
2092 return (1);
2093 }
2094
2095
2096
2097 if (after (th->seq, sk->acked_seq + sk->window))
2098 {
2099 tcp_send_ack (sk->send_seq, sk->acked_seq, sk, th, saddr);
2100 return (0);
2101 }
2102
2103 if (!th->rst)
2104 {
2105 if (len != th->doff*4 || th->fin || th->syn)
2106 {
2107 sk->delay_acks = 0;
2108 }
2109
2110
2111 tcp_send_ack (net32(th->ack_seq), sk->acked_seq, sk, th, saddr);
2112 }
2113
2114
2115 if (th->ack && len == th->doff*4 && after (th->seq, sk->acked_seq - 4096) &&
2116 !th->fin && !th->syn) return (1);
2117
2118 return (0);
2119 }
2120
2121
2122 static void
2123 tcp_options (volatile struct sock *sk, struct tcp_header *th)
2124 {
2125 unsigned char *ptr;
2126 ptr = (unsigned char *)(th + 1);
2127 if (ptr[0] != 2 || ptr[1] != 4)
2128 {
2129 sk->mtu = min (sk->mtu, 576-HEADER_SIZE);
2130 return;
2131 }
2132 sk->mtu = min (sk->mtu, ptr[2]*256 + ptr[3] - HEADER_SIZE);
2133 }
2134
2135 int
2136 tcp_rcv(struct sk_buff *skb, struct device *dev, struct options *opt,
2137 unsigned long daddr, unsigned short len,
2138 unsigned long saddr, int redo, struct ip_protocol * protocol)
2139 {
2140 struct tcp_header *th;
2141 volatile struct sock *sk;
2142
2143 if (!skb)
2144 {
2145 printk ("tcp.c: tcp_rcv skb = NULL\n");
2146 return (0);
2147 }
2148 #if 0
2149 if (!protocol)
2150 {
2151 printk ("tcp.c: tcp_rcv protocol = NULL\n");
2152 return (0);
2153 }
2154
2155 if (!opt)
2156 {
2157 printk ("tcp.c: tcp_rcv opt = NULL\n");
2158 }
2159 #endif
2160 if (!dev)
2161 {
2162 printk ("tcp.c: tcp_rcv dev = NULL\n");
2163 return (0);
2164 }
2165
2166 th = skb->h.th;
2167
2168
2169 sk=get_sock(&tcp_prot, net16(th->dest), saddr, th->source, daddr);
2170 PRINTK("<<\n");
2171 PRINTK("len = %d, redo = %d, skb=%X\n", len, redo, skb);
2172
2173 if (sk)
2174 {
2175 PRINTK ("sk = %X:\n",sk);
2176 print_sk (sk);
2177 }
2178
2179 if (!redo)
2180 {
2181 if (th->check && tcp_check (th, len, saddr, daddr ))
2182 {
2183 skb->sk = NULL;
2184 PRINTK ("packet dropped with bad checksum.\n");
2185 kfree_skb (skb, 0);
2186
2187
2188 return (0);
2189 }
2190
2191
2192 if (sk == NULL)
2193 {
2194 if (!th->rst)
2195 tcp_reset (daddr, saddr, th, &tcp_prot, opt,dev);
2196 skb->sk = NULL;
2197 kfree_skb (skb, 0);
2198 return (0);
2199 }
2200
2201 skb->len = len;
2202 skb->sk = sk;
2203 skb->acked = 0;
2204 skb->used = 0;
2205 skb->free = 0;
2206 skb->urg_used = 0;
2207 skb->saddr = daddr;
2208 skb->daddr = saddr;
2209
2210 th->seq = net32(th->seq);
2211
2212 cli();
2213
2214
2215 if (sk->inuse)
2216 {
2217 if (sk->back_log == NULL)
2218 {
2219 sk->back_log = skb;
2220 skb->next = skb;
2221 skb->prev = skb;
2222 }
2223 else
2224 {
2225 skb->next = sk->back_log;
2226 skb->prev = sk->back_log->prev;
2227 skb->prev->next = skb;
2228 skb->next->prev = skb;
2229 }
2230 sti();
2231 return (0);
2232 }
2233 sk->inuse = 1;
2234 sti();
2235 }
2236 else
2237 {
2238 if (!sk)
2239 {
2240 printk ("tcp.c: tcp_rcv bug sk=NULL redo = 1\n");
2241 return (0);
2242 }
2243 }
2244
2245 if (!sk->prot)
2246 {
2247 printk ("tcp.c: tcp_rcv sk->prot = NULL \n");
2248 return (0);
2249 }
2250
2251
2252 if (sk->rmem_alloc + skb->mem_len >= SK_RMEM_MAX)
2253 {
2254 skb->sk = NULL;
2255 PRINTK ("dropping packet due to lack of buffer space.\n");
2256 kfree_skb (skb, 0);
2257 release_sock (sk);
2258 return (0);
2259 }
2260
2261 sk->rmem_alloc += skb->mem_len;
2262
2263 PRINTK ("About to do switch. \n");
2264
2265
2266
2267 switch (sk->state)
2268 {
2269
2270
2271 case TCP_LAST_ACK:
2272 if (th->rst)
2273 {
2274 sk->err = ECONNRESET;
2275 sk->state = TCP_CLOSE;
2276 if (!sk->dead)
2277 {
2278 wake_up (sk->sleep);
2279 }
2280 kfree_skb (skb, FREE_READ);
2281 release_sock(sk);
2282 return (0);
2283 }
2284
2285 case TCP_ESTABLISHED:
2286 case TCP_FIN_WAIT1:
2287 case TCP_FIN_WAIT2:
2288 case TCP_TIME_WAIT:
2289
2290 if (!tcp_sequence (sk, th, len, opt, saddr))
2291 {
2292 kfree_skb (skb, FREE_READ);
2293 release_sock(sk);
2294 return (0);
2295 }
2296
2297 if (th->rst)
2298 {
2299 sk->err = ECONNRESET;
2300 sk->state = TCP_CLOSE;
2301 if (!sk->dead)
2302 {
2303 wake_up (sk->sleep);
2304 }
2305 kfree_skb (skb, FREE_READ);
2306 release_sock(sk);
2307 return (0);
2308 }
2309 if (opt && (opt->security != 0 || opt->compartment != 0 || th->syn))
2310 {
2311 sk->err = ECONNRESET;
2312 sk->state = TCP_CLOSE;
2313 tcp_reset (daddr, saddr, th, sk->prot, opt,dev);
2314 if (!sk->dead)
2315 {
2316 wake_up (sk->sleep);
2317 }
2318 kfree_skb (skb, FREE_READ);
2319 release_sock(sk);
2320 return (0);
2321 }
2322
2323 if (th->ack)
2324 {
2325 if(!tcp_ack (sk, th, saddr))
2326 {
2327 kfree_skb (skb, FREE_READ);
2328 release_sock(sk);
2329 return (0);
2330 }
2331 }
2332 if (th->urg)
2333 {
2334 if (tcp_urg (sk, th, saddr))
2335 {
2336 kfree_skb (skb, FREE_READ);
2337 release_sock(sk);
2338 return (0);
2339 }
2340 }
2341
2342 if ( tcp_data (skb, sk, saddr, len))
2343 {
2344 kfree_skb (skb, FREE_READ);
2345 release_sock(sk);
2346 return (0);
2347 }
2348
2349 if (!th->fin)
2350 {
2351 release_sock(sk);
2352 return (0);
2353 }
2354
2355 tcp_fin (sk, th, saddr, dev);
2356 release_sock(sk);
2357 return (0);
2358
2359 case TCP_CLOSE:
2360
2361 if (sk->dead || sk->daddr)
2362 {
2363 PRINTK ("packet received for closed,dead socket\n");
2364 kfree_skb (skb, FREE_READ);
2365 release_sock (sk);
2366 return (0);
2367 }
2368
2369 if (!th->rst)
2370 {
2371 if (!th->ack)
2372 th->ack_seq=0;
2373 tcp_reset (daddr, saddr, th, sk->prot, opt,dev);
2374 }
2375 kfree_skb (skb, FREE_READ);
2376 release_sock(sk);
2377 return (0);
2378
2379 case TCP_LISTEN:
2380 if (th->rst)
2381 {
2382 kfree_skb (skb, FREE_READ);
2383 release_sock(sk);
2384 return (0);
2385 }
2386 if (th->ack)
2387 {
2388 tcp_reset (daddr, saddr, th, sk->prot, opt,dev );
2389 kfree_skb (skb, FREE_READ);
2390 release_sock(sk);
2391 return (0);
2392 }
2393
2394 if (th->syn)
2395 {
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408 tcp_conn_request (sk, skb, daddr, saddr, opt, dev);
2409
2410 release_sock(sk);
2411 return (0);
2412 }
2413
2414 kfree_skb (skb, FREE_READ);
2415 release_sock(sk);
2416 return (0);
2417
2418 default:
2419 if (!tcp_sequence (sk, th, len, opt, saddr))
2420 {
2421 kfree_skb (skb, FREE_READ);
2422 release_sock(sk);
2423 return (0);
2424 }
2425
2426 case TCP_SYN_SENT:
2427 if (th->rst)
2428 {
2429 sk->err = ECONNREFUSED;
2430 sk->state = TCP_CLOSE;
2431 if (!sk->dead)
2432 {
2433 wake_up (sk->sleep);
2434 }
2435 kfree_skb (skb, FREE_READ);
2436 release_sock(sk);
2437 return (0);
2438 }
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451
2452
2453 if (!th->ack)
2454 {
2455 if (th->syn)
2456 {
2457 sk->state = TCP_SYN_RECV;
2458 }
2459
2460 kfree_skb (skb, FREE_READ);
2461 release_sock(sk);
2462 return (0);
2463 }
2464
2465 switch (sk->state)
2466 {
2467 case TCP_SYN_SENT:
2468 if (!tcp_ack(sk, th, saddr))
2469 {
2470 tcp_reset(daddr, saddr, th, sk->prot, opt,dev);
2471 kfree_skb (skb, FREE_READ);
2472 release_sock(sk);
2473 return (0);
2474 }
2475
2476
2477
2478
2479 if (!th->syn)
2480 {
2481 kfree_skb (skb, FREE_READ);
2482 release_sock (sk);
2483 return (0);
2484 }
2485
2486
2487 sk->acked_seq = th->seq+1;
2488 sk->fin_seq = th->seq;
2489 tcp_send_ack (sk->send_seq, th->seq+1, sk,
2490 th, sk->daddr);
2491
2492 case TCP_SYN_RECV:
2493 if (!tcp_ack(sk, th, saddr))
2494 {
2495 tcp_reset(daddr, saddr, th, sk->prot, opt, dev);
2496 kfree_skb (skb, FREE_READ);
2497 release_sock(sk);
2498 return (0);
2499 }
2500
2501 sk->state = TCP_ESTABLISHED;
2502
2503
2504
2505
2506 tcp_options(sk, th);
2507 sk->dummy_th.dest = th->source;
2508 sk->copied_seq = sk->acked_seq-1;
2509 if (!sk->dead)
2510 {
2511 wake_up (sk->sleep);
2512 }
2513
2514
2515
2516 if (th->urg)
2517 if (tcp_urg (sk, th, saddr))
2518 {
2519 kfree_skb (skb, FREE_READ);
2520 release_sock(sk);
2521 return (0);
2522 }
2523 if (tcp_data (skb, sk, saddr, len))
2524 kfree_skb (skb, FREE_READ);
2525
2526 if (th->fin)
2527 tcp_fin(sk, th, saddr, dev);
2528
2529 release_sock(sk);
2530 return (0);
2531 }
2532
2533 if (th->urg)
2534 {
2535 if (tcp_urg (sk, th, saddr))
2536 {
2537 kfree_skb (skb, FREE_READ);
2538 release_sock (sk);
2539 return (0);
2540 }
2541 }
2542
2543 if (tcp_data (skb, sk, saddr, len))
2544 {
2545 kfree_skb (skb, FREE_READ);
2546 release_sock (sk);
2547 return (0);
2548 }
2549
2550 if (!th->fin)
2551 {
2552 release_sock(sk);
2553 return (0);
2554 }
2555 tcp_fin (sk, th, saddr, dev);
2556 release_sock(sk);
2557 return (0);
2558 }
2559 }
2560
2561
2562
2563
2564
2565 static void
2566 tcp_write_wakeup(volatile struct sock *sk)
2567 {
2568 struct sk_buff *buff;
2569 struct tcp_header *t1;
2570 struct device *dev=NULL;
2571 int tmp;
2572 if (sk -> state != TCP_ESTABLISHED) return;
2573
2574 buff=sk->prot->wmalloc(sk,MAX_ACK_SIZE,1, GFP_ATOMIC);
2575
2576 if (buff == NULL) return;
2577
2578 buff->mem_addr = buff;
2579 buff->mem_len = MAX_ACK_SIZE;
2580 buff->len=sizeof (struct tcp_header);
2581 buff->free = 1;
2582 buff->sk = sk;
2583 PRINTK ("in tcp_write_wakeup\n");
2584 t1=(struct tcp_header *)(buff + 1);
2585
2586
2587 tmp = sk->prot->build_header (buff, sk->saddr, sk->daddr, &dev,
2588 IPPROTO_TCP, sk->opt, MAX_ACK_SIZE);
2589 if (tmp < 0)
2590 {
2591 sk->prot->wfree(sk, buff->mem_addr, buff->mem_len);
2592 return;
2593 }
2594
2595 buff->len += tmp;
2596 t1 = (struct tcp_header *)((char *)t1 +tmp);
2597
2598 memcpy (t1,(void *) &sk->dummy_th, sizeof (*t1));
2599
2600
2601
2602 t1->seq = net32(sk->send_seq-1);
2603 t1->ack = 1;
2604 t1->res1= 0;
2605 t1->res2= 0;
2606 t1->rst = 0;
2607 t1->urg = 0;
2608 t1->psh = 0;
2609 t1->fin = 0;
2610 t1->syn = 0;
2611 t1->ack_seq = net32(sk->acked_seq);
2612 t1->window = net16(sk->prot->rspace(sk));
2613 t1->doff = sizeof (*t1)/4;
2614 tcp_send_check (t1, sk->saddr, sk->daddr, sizeof (*t1), sk);
2615
2616
2617 sk->prot->queue_xmit(sk, dev, buff, 1);
2618
2619 }
2620
2621 struct proto tcp_prot =
2622 {
2623 sock_wmalloc,
2624 sock_rmalloc,
2625 sock_wfree,
2626 sock_rfree,
2627 sock_rspace,
2628 sock_wspace,
2629 tcp_close,
2630 tcp_read,
2631 tcp_write,
2632 NULL,
2633 NULL,
2634 ip_build_header,
2635 tcp_connect,
2636 tcp_accept,
2637 ip_queue_xmit,
2638 tcp_retransmit,
2639 tcp_write_wakeup,
2640 tcp_read_wakeup,
2641 tcp_rcv,
2642 tcp_select,
2643 tcp_ioctl,
2644 NULL,
2645 128,
2646 0,
2647 {NULL,}
2648 };
2649
2650
2651
2652