This source file includes following definitions.
- min
- print_th
- get_firstr
- diff
- tcp_time_wait
- tcp_retransmit
- tcp_err
- tcp_readable
- tcp_select
- tcp_ioctl
- tcp_check
- tcp_send_check
- tcp_send_partial
- tcp_send_ack
- tcp_build_header
- tcp_write
- tcp_sendto
- tcp_read_wakeup
- cleanup_rbuf
- tcp_read_urg
- tcp_read
- tcp_shutdown
- tcp_recvfrom
- tcp_reset
- tcp_conn_request
- tcp_close
- tcp_write_xmit
- tcp_ack
- tcp_data
- tcp_urg
- tcp_fin
- tcp_accept
- tcp_connect
- tcp_sequence
- tcp_options
- tcp_rcv
- tcp_write_wakeup
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79 #include <linux/types.h>
80 #include <linux/sched.h>
81 #include <linux/mm.h>
82 #include <linux/string.h>
83 #include <linux/socket.h>
84 #include <netinet/in.h>
85 #include <linux/fcntl.h>
86 #include "timer.h"
87 #include "ip.h"
88 #include "icmp.h"
89 #include "tcp.h"
90 #include "sock.h"
91 #include "arp.h"
92 #include <linux/errno.h>
93 #include <linux/timer.h>
94 #include <asm/system.h>
95 #include <asm/segment.h>
96 #include <linux/mm.h>
97
98 #include <linux/termios.h>
99
100 #ifdef PRINTK
101 #undef PRINTK
102 #endif
103
104 #undef TCP_DEBUG
105
106 #ifdef TCP_DEBUG
107 #define PRINTK(x) printk x
108 #else
109 #define PRINTK(x)
110 #endif
111
112 #define tmax(a,b) (before ((a),(b)) ? (b) : (a))
113 #define swap(a,b) {unsigned long c; c=a; a=b; b=c;}
114
115 extern struct proto tcp_prot;
116
117 static int
118 min (unsigned int a, unsigned int b)
119 {
120 if (a < b) return (a);
121 return (b);
122 }
123
124 void
125 print_th (struct tcp_header *th)
126 {
127 unsigned char *ptr;
128 ptr = (unsigned char *)(th + 1);
129 PRINTK (("tcp header:\n"));
130 PRINTK ((" source=%d, dest=%d, seq =%d, ack_seq = %d\n",
131 net16(th->source), net16(th->dest), net32(th->seq),
132 net32(th->ack_seq)));
133 PRINTK ((" fin=%d, syn=%d, rst=%d, psh=%d, ack=%d, urg=%d res1=%d res2=%d\n"
134 ,th->fin, th->syn, th->rst, th->psh, th->ack, th->urg,
135 th->res1, th->res2));
136 PRINTK ((" window = %d, check = %d urg_ptr = %d\n",
137 net16(th->window), net16(th->check), net16(th->urg_ptr)));
138 PRINTK ((" doff = %d\n",th->doff));
139 PRINTK (("options = %d %d %d %d\n", ptr[0], ptr[1], ptr[2], ptr[3]));
140 }
141
142
143 static struct sk_buff *
144 get_firstr(volatile struct sock *sk)
145 {
146 struct sk_buff *skb;
147 skb = sk->rqueue;
148 if (skb == NULL) return (NULL);
149 sk->rqueue = (struct sk_buff *)skb->next;
150 if (sk->rqueue == skb)
151 {
152 sk->rqueue = NULL;
153 }
154 else
155 {
156 sk->rqueue->prev=skb->prev;
157 sk->rqueue->prev->next = sk->rqueue;
158 }
159 return (skb);
160 }
161
162 static long
163 diff (unsigned long seq1, unsigned long seq2)
164 {
165 long d;
166 d=seq1-seq2;
167 if (d > 0) return (d);
168
169 return (~d+1);
170 }
171
172
173 static void
174 tcp_time_wait (volatile struct sock *sk)
175 {
176 sk->state = TCP_TIME_WAIT;
177 sk->shutdown = SHUTDOWN_MASK;
178 if (!sk->dead) wake_up (sk->sleep);
179 sk->time_wait.len = TCP_TIMEWAIT_LEN;
180 sk->timeout = TIME_CLOSE;
181 reset_timer ((struct timer *)&sk->time_wait);
182 }
183
184 static void
185 tcp_retransmit (volatile struct sock *sk, int all)
186 {
187 if (all)
188 {
189 ip_retransmit (sk, all);
190 return;
191 }
192 sk->rtt *= 2;
193 if (sk->cong_window > 1)
194 sk->cong_window = sk->cong_window / 2;
195 sk->exp_growth = 0;
196
197
198 ip_retransmit (sk, all);
199
200 }
201
202
203
204
205
206
207
208
209 void
210 tcp_err (int err, unsigned char *header, unsigned long daddr,
211 unsigned long saddr, struct ip_protocol *protocol)
212 {
213 struct tcp_header *th;
214 volatile struct sock *sk;
215
216 PRINTK (("tcp_err(err=%d, header=%X, daddr=%X saddr=%X, protocol=%X)\n",
217 err, header, daddr, saddr, protocol));
218
219 th = (struct tcp_header *)header;
220 sk = get_sock (&tcp_prot, net16(th->dest), saddr, th->source, daddr);
221 print_th (th);
222
223 if (sk == NULL) return;
224
225 if (err & 0xff00 == (ICMP_SOURCE_QUENCH << 8))
226 {
227
228
229
230 if (sk->cong_window > 1)
231 sk->cong_window --;
232
233 return;
234 }
235
236 PRINTK (("tcp.c: icmp_err got error\n"));
237 sk->err = icmp_err_convert[err & 0xff].errno;
238
239
240 if (icmp_err_convert[err & 0xff].fatal)
241 {
242 if (sk->state == TCP_SYN_SENT)
243 {
244 sk->state = TCP_CLOSE;
245 sk->prot->close(sk, 0);
246 }
247 }
248
249 return;
250
251 }
252
253 static int
254 tcp_readable (volatile struct sock *sk)
255 {
256 unsigned long counted;
257 unsigned long amount;
258 struct sk_buff *skb;
259 int count=0;
260 int sum;
261
262 PRINTK (("tcp_readable (sk=%X)\n", sk));
263
264 if (sk == NULL || sk->rqueue == NULL) return (0);
265
266 counted = sk->copied_seq+1;
267 amount = 0;
268 skb = (struct sk_buff *)sk->rqueue->next;
269
270
271 do {
272 count ++;
273 if (count > 20)
274 {
275 PRINTK (("tcp_readable, more than 20 packets without a psh\n"));
276 PRINTK (("possible read_queue corruption.\n"));
277 return (amount);
278 }
279 if (before (counted, skb->h.th->seq)) break;
280 sum = skb->len - ( counted - skb->h.th->seq);
281 if (skb->h.th->syn) sum ++;
282 if (skb->h.th->urg)
283 {
284 sum -= net16(skb->h.th->urg_ptr);
285 }
286 if (sum >= 0)
287 {
288 amount += sum;
289 if (skb->h.th->syn) amount --;
290 counted += sum;
291 }
292 if (amount && skb->h.th->psh) break;
293 skb = (struct sk_buff *)skb->next;
294 } while (skb != sk->rqueue->next);
295 PRINTK (("tcp readable returning %d bytes\n", amount));
296 return (amount);
297 }
298
299
300 static int
301 tcp_select (volatile struct sock *sk, int sel_type, select_table *wait)
302 {
303 sk->inuse = 1;
304 PRINTK (("tcp_select (sk=%X, sel_type = %d, wait = %X)\n",
305 sk, sel_type, wait));
306 switch (sel_type)
307 {
308 case SEL_IN:
309 select_wait (sk->sleep, wait);
310 if (sk->rqueue != NULL)
311 {
312 if (sk->state == TCP_LISTEN || tcp_readable(sk))
313 {
314 release_sock (sk);
315 return (1);
316 }
317 }
318
319 if (sk->shutdown & RCV_SHUTDOWN)
320 {
321 release_sock (sk);
322 return (1);
323 }
324 else
325 {
326 release_sock (sk);
327 return (0);
328 }
329
330 case SEL_OUT:
331 select_wait (sk->sleep, wait);
332
333 if (sk->shutdown & SEND_SHUTDOWN)
334 {
335 PRINTK (("write select on shutdown socket.\n"));
336
337 release_sock (sk);
338 return (0);
339 }
340
341
342
343 if (sk->prot->wspace(sk) >= sk->mtu)
344 {
345 release_sock (sk);
346
347 if (sk->state == TCP_SYN_RECV || sk->state == TCP_SYN_SENT)
348 return (0);
349 return (1);
350 }
351
352 PRINTK (("tcp_select: sleeping on write sk->wmem_alloc = %d, "
353 "sk->packets_out = %d\n"
354 "sk->wback = %X, sk->wfront = %X\n"
355 "sk->send_seq = %u, sk->window_seq=%u\n",
356 sk->wmem_alloc, sk->packets_out,
357 sk->wback, sk->wfront,
358 sk->send_seq, sk->window_seq));
359
360 release_sock (sk);
361 return (0);
362
363
364 case SEL_EX:
365 select_wait(sk->sleep,wait);
366 if (sk->err)
367 {
368 release_sock (sk);
369 return (1);
370 }
371 release_sock (sk);
372 return (0);
373 }
374
375 release_sock (sk);
376 return (0);
377 }
378
379 static int
380 tcp_ioctl (volatile struct sock *sk, int cmd, unsigned long arg)
381 {
382 PRINTK (("tcp_ioctl (sk=%X, cmd = %d, arg=%X)\n", sk, cmd, arg));
383 switch (cmd)
384 {
385 default:
386 return (-EINVAL);
387
388 case TIOCINQ:
389
390 {
391 unsigned long amount;
392
393 if (sk->state == TCP_LISTEN)
394 return (-EINVAL);
395
396 amount = 0;
397 sk->inuse = 1;
398 if (sk->rqueue != NULL)
399 {
400 amount = tcp_readable(sk);
401 }
402 release_sock (sk);
403 PRINTK (("returning %d\n", amount));
404 verify_area ((void *)arg, sizeof (unsigned long));
405 put_fs_long (amount, (unsigned long *)arg);
406 return (0);
407 }
408
409 case SIOCATMARK:
410 {
411 struct sk_buff *skb;
412 int answ=0;
413
414 sk->inuse = 1;
415 if (sk->rqueue != NULL)
416 {
417 skb = (struct sk_buff *)sk->rqueue->next;
418 if (sk->copied_seq+1 == skb->h.th->seq && skb->h.th->urg)
419 answ = 1;
420 }
421 release_sock (sk);
422 verify_area ((void *) arg, sizeof (unsigned long));
423 put_fs_long (answ, (void *) arg);
424 return (0);
425 }
426
427 case TIOCOUTQ:
428 {
429 unsigned long amount;
430 if (sk->state == TCP_LISTEN)
431 return (-EINVAL);
432 amount = sk->prot->wspace(sk)/2;
433 verify_area ((void *)arg, sizeof (unsigned long));
434 put_fs_long (amount, (unsigned long *)arg);
435 return (0);
436 }
437
438 }
439 }
440
441
442
443 static unsigned short
444 tcp_check (struct tcp_header *th, int len, unsigned long saddr,
445 unsigned long daddr)
446 {
447 unsigned long sum;
448
449 if (saddr == 0) saddr = MY_IP_ADDR;
450 print_th (th);
451 __asm__("\t addl %%ecx,%%ebx\n"
452 "\t adcl %%edx,%%ebx\n"
453 "\t adcl $0, %%ebx\n"
454 : "=b" (sum)
455 : "0" (daddr), "c" (saddr), "d" ((net16(len) << 16) + IPPROTO_TCP*256)
456 : "cx","bx","dx" );
457
458 if (len > 3)
459 {
460 __asm__(
461 "\tclc\n"
462 "1:\n"
463 "\t lodsl\n"
464 "\t adcl %%eax, %%ebx\n"
465 "\t loop 1b\n"
466 "\t adcl $0, %%ebx\n"
467 : "=b" (sum) , "=S" (th)
468 : "0" (sum), "c" (len/4) ,"1" (th)
469 : "ax", "cx", "bx", "si" );
470 }
471
472
473 __asm__(
474 "\t movl %%ebx, %%ecx\n"
475 "\t shrl $16,%%ecx\n"
476 "\t addw %%cx, %%bx\n"
477 "\t adcw $0, %%bx\n"
478 : "=b" (sum)
479 : "0" (sum)
480 : "bx", "cx");
481
482
483 if ((len & 2) != 0)
484 {
485 __asm__("\t lodsw\n"
486 "\t addw %%ax,%%bx\n"
487 "\t adcw $0, %%bx\n"
488 : "=b" (sum), "=S" (th)
489 : "0" (sum) ,"1" (th)
490 : "si", "ax", "bx");
491 }
492
493
494 if ((len & 1) != 0)
495 {
496 __asm__("\t lodsb\n"
497 "\t movb $0,%%ah\n"
498 "\t addw %%ax,%%bx\n"
499 "\t adcw $0, %%bx\n"
500 : "=b" (sum)
501 : "0" (sum) ,"S" (th)
502 : "si", "ax", "bx");
503 }
504
505
506
507 return ((~sum) & 0xffff);
508 }
509
510
511 static void
512 tcp_send_check (struct tcp_header *th, unsigned long saddr,
513 unsigned long daddr, int len, volatile struct sock *sk)
514 {
515
516 th->check = 0;
517 if (sk && sk->no_check) return;
518 th->check = tcp_check (th, len, saddr, daddr);
519 return;
520 }
521
522 static void
523 tcp_send_partial(volatile struct sock *sk)
524 {
525 struct sk_buff *skb;
526
527 if (sk == NULL || sk->send_tmp == NULL) return;
528
529 skb = sk->send_tmp;
530
531 tcp_send_check (skb->h.th, sk->saddr, sk->daddr,
532 skb->len-(unsigned long)skb->h.th +
533 (unsigned long)(skb+1), sk);
534
535 skb->h.seq = sk->send_seq;
536 if (after (sk->send_seq , sk->window_seq) ||
537 sk->packets_out >= sk->cong_window)
538 {
539 PRINTK (("sk->cong_window = %d, sk->packets_out = %d\n",
540 sk->cong_window, sk->packets_out));
541 PRINTK (("sk->send_seq = %d, sk->window_seq = %d\n",
542 sk->send_seq, sk->window_seq));
543 skb->next = NULL;
544 skb->magic = TCP_WRITE_QUEUE_MAGIC;
545 if (sk->wback == NULL)
546 {
547 sk->wfront=skb;
548 }
549 else
550 {
551 sk->wback->next = skb;
552 }
553 sk->wback = skb;
554 }
555 else
556 {
557 sk->prot->queue_xmit (sk, skb->dev, skb,0);
558 }
559 sk->send_tmp = NULL;
560 }
561
562
563
564
565 static void
566 tcp_send_ack (unsigned long sequence, unsigned long ack,
567 volatile struct sock *sk,
568 struct tcp_header *th, unsigned long daddr)
569 {
570 struct sk_buff *buff;
571 struct tcp_header *t1;
572 struct device *dev=NULL;
573 int tmp;
574
575
576
577
578 buff=sk->prot->wmalloc(sk,MAX_ACK_SIZE,1, GFP_ATOMIC);
579 if (buff == NULL)
580 {
581
582 sk->ack_backlog++;
583 if (sk->timeout != TIME_WRITE && tcp_connected (sk->state))
584 {
585 sk->timeout = TIME_WRITE;
586 sk->time_wait.len = 10;
587 reset_timer ((struct timer *)&sk->time_wait);
588 }
589 return;
590 }
591
592 buff->mem_addr = buff;
593 buff->mem_len = MAX_ACK_SIZE;
594 buff->lock = 0;
595 buff->len=sizeof (struct tcp_header);
596 buff->sk = sk;
597 t1 = (struct tcp_header *)(buff + 1);
598
599 tmp = sk->prot->build_header (buff, sk->saddr, daddr, &dev,
600 IPPROTO_TCP, sk->opt, MAX_ACK_SIZE);
601 if (tmp < 0)
602 {
603 sk->prot->wfree(sk, buff->mem_addr, buff->mem_len);
604 return;
605 }
606 buff->len += tmp;
607 t1 = (struct tcp_header *)((char *)t1 +tmp);
608
609 memcpy (t1, th, sizeof (*t1));
610
611
612 t1->dest = th->source;
613 t1->source = th->dest;
614 t1->seq = net32(sequence);
615 t1->ack = 1;
616 sk->window = sk->prot->rspace(sk);
617 t1->window = net16(sk->window);
618 t1->res1=0;
619 t1->res2=0;
620 t1->rst = 0;
621 t1->urg = 0;
622 t1->syn = 0;
623 t1->psh = 0;
624 t1->fin = 0;
625 if (ack == sk->acked_seq)
626 {
627 sk->ack_backlog = 0;
628 sk->bytes_rcv = 0;
629 sk->ack_timed = 0;
630 if (sk->send_head == NULL &&
631 sk->wfront == NULL)
632 {
633 delete_timer((struct timer *)&sk->time_wait);
634 sk->timeout = 0;
635 }
636
637 }
638 t1->ack_seq = net32(ack);
639 t1->doff = sizeof (*t1)/4;
640 tcp_send_check (t1, sk->saddr, daddr, sizeof (*t1), sk);
641 sk->prot->queue_xmit(sk, dev, buff, 1);
642 }
643
644
645 static int
646 tcp_build_header(struct tcp_header *th, volatile struct sock *sk, int push)
647 {
648
649
650 memcpy (th,(void *) &(sk->dummy_th), sizeof (*th));
651 th->seq = net32(sk->send_seq);
652 th->psh = (push == 0) ? 1 : 0;
653 th->doff = sizeof (*th)/4;
654 th->ack = 1;
655 th->fin = 0;
656 sk->ack_backlog = 0;
657 sk->bytes_rcv = 0;
658 sk->ack_timed = 0;
659 th->ack_seq = net32(sk->acked_seq);
660 sk->window = sk->prot->rspace(sk);
661 th->window = net16(sk->window);
662
663 return (sizeof (*th));
664 }
665
666
667
668
669 static int
670 tcp_write (volatile struct sock *sk, unsigned char *from,
671 int len, int nonblock, unsigned flags)
672 {
673 int copied=0;
674 int copy;
675 int tmp;
676 struct sk_buff *skb;
677 unsigned char *buff;
678 struct proto *prot;
679 struct device *dev=NULL;
680
681 PRINTK (("tcp_write (sk=%X, from=%X, len=%d, nonblock=%d, flags=%X)\n",
682 sk, from, len, nonblock, flags));
683
684 prot = sk->prot;
685 while (len > 0)
686 {
687
688 if (sk->err)
689 {
690 if (copied) return (copied);
691 tmp = -sk->err;
692 sk->err = 0;
693 return (tmp);
694 }
695
696
697
698 sk->inuse = 1;
699 if (sk->shutdown & SEND_SHUTDOWN)
700 {
701 release_sock (sk);
702 sk->err = EPIPE;
703 if (copied) return (copied);
704 sk->err = 0;
705 return (-EPIPE);
706 }
707
708 while (sk->state != TCP_ESTABLISHED && sk->state != TCP_CLOSE_WAIT)
709 {
710
711 if (sk->err)
712 {
713 if (copied) return (copied);
714 tmp = -sk->err;
715 sk->err = 0;
716 return (tmp);
717 }
718
719 if (sk->state != TCP_SYN_SENT &&
720 sk->state != TCP_SYN_RECV)
721 {
722 release_sock (sk);
723 PRINTK (("tcp_write: return 1\n"));
724 if (copied) return (copied);
725
726 if (sk->err)
727 {
728 tmp = -sk->err;
729 sk->err = 0;
730 return (tmp);
731 }
732
733 if (sk->keepopen)
734 {
735 send_sig (SIGPIPE, current, 0);
736 }
737 return (-EPIPE);
738 }
739
740 if (nonblock || copied)
741 {
742 release_sock (sk);
743 PRINTK (("tcp_write: return 2\n"));
744 if (copied) return (copied);
745 return (-EAGAIN);
746 }
747
748
749
750
751
752
753
754 release_sock (sk);
755 cli();
756 if (sk->state != TCP_ESTABLISHED && sk->state != TCP_CLOSE_WAIT &&
757 sk->err == 0)
758 {
759 interruptible_sleep_on (sk->sleep);
760 if (current->signal & ~current->blocked)
761 {
762 sti();
763 PRINTK (("tcp_write: return 3\n"));
764 if (copied) return (copied);
765 return (-ERESTARTSYS);
766 }
767 }
768 sti();
769 sk->inuse = 1;
770 }
771
772
773 if (sk->send_tmp != NULL)
774 {
775
776
777 skb = sk->send_tmp;
778 if (!(flags & MSG_OOB))
779 {
780 copy = min (sk->mss - skb->len + 128 + prot->max_header, len);
781
782
783 if (copy <= 0)
784 copy = 0;
785
786 memcpy_fromfs ((unsigned char *)(skb+1) + skb->len, from, copy);
787 skb->len += copy;
788 from += copy;
789 copied += copy;
790 len -= copy;
791 sk->send_seq += copy;
792 }
793
794 if (skb->len - (unsigned long)skb->h.th +
795 (unsigned long)(skb+1) >= sk->mss
796 || (flags & MSG_OOB))
797 {
798 tcp_send_partial (sk);
799 }
800 continue;
801
802 }
803
804
805
806
807 copy = min (sk->mtu, diff(sk->window_seq, sk->send_seq));
808
809
810 if (copy < 200 || copy > sk->mtu) copy = sk->mtu;
811 copy = min (copy, len);
812
813
814 if (sk->packets_out && copy < sk->mss && !(flags & MSG_OOB))
815 {
816
817 release_sock (sk);
818 skb=prot->wmalloc (sk,
819 sk->mss + 128 + prot->max_header + sizeof (*skb),
820 0, GFP_KERNEL);
821 sk->inuse = 1;
822 sk->send_tmp = skb;
823 if (skb != NULL)
824 skb->mem_len = sk->mss + 128 + prot->max_header+sizeof (*skb);
825 }
826 else
827 {
828
829 release_sock (sk);
830 skb=prot->wmalloc (sk, copy + prot->max_header+sizeof (*skb),0,
831 GFP_KERNEL);
832 sk->inuse = 1;
833 if (skb != NULL)
834 skb->mem_len = copy+prot->max_header+sizeof (*skb);
835 }
836
837
838 if (skb == NULL)
839 {
840 if (nonblock || copied)
841 {
842 release_sock (sk);
843 PRINTK (("tcp_write: return 4\n"));
844 if (copied) return (copied);
845 return (-EAGAIN);
846 }
847
848
849 tmp = sk->wmem_alloc;
850 release_sock (sk);
851
852
853 cli ();
854 if (tmp <= sk->wmem_alloc
855 && (sk->state == TCP_ESTABLISHED || sk->state == TCP_CLOSE_WAIT )
856 && sk->err == 0)
857 {
858 interruptible_sleep_on (sk->sleep);
859 if (current->signal & ~current->blocked)
860 {
861 sti();
862 PRINTK (("tcp_write: return 5\n"));
863 if (copied) return (copied);
864 return (-ERESTARTSYS);
865 }
866 }
867 sk->inuse = 1;
868 sti();
869 continue;
870 }
871
872 skb->mem_addr = skb;
873 skb->len = 0;
874 skb->sk = sk;
875 skb->lock = 0;
876 skb->free = 0;
877
878 buff =(unsigned char *)( skb+1);
879
880
881
882 tmp = prot->build_header (skb, sk->saddr, sk->daddr, &dev,
883 IPPROTO_TCP, sk->opt, skb->mem_len);
884 if (tmp < 0 )
885 {
886 prot->wfree (sk, skb->mem_addr, skb->mem_len);
887 release_sock (sk);
888 PRINTK (("tcp_write: return 6\n"));
889 if (copied) return (copied);
890 return (tmp);
891 }
892 skb->len += tmp;
893 skb->dev = dev;
894 buff+=tmp;
895 skb->h.th =(struct tcp_header *) buff;
896 tmp = tcp_build_header((struct tcp_header *)buff, sk, len-copy);
897
898 if (tmp < 0)
899 {
900 prot->wfree (sk, skb->mem_addr, skb->mem_len);
901 release_sock (sk);
902 PRINTK (("tcp_write: return 7\n"));
903 if (copied) return (copied);
904 return (tmp);
905 }
906
907 if (flags & MSG_OOB)
908 {
909 ((struct tcp_header *)buff)->urg = 1;
910 ((struct tcp_header *)buff)->urg_ptr = net16(copy);
911 }
912 skb->len += tmp;
913 memcpy_fromfs (buff+tmp, from, copy);
914
915 from += copy;
916 copied += copy;
917 len -= copy;
918 skb->len += copy;
919 skb->free = 0;
920 sk->send_seq += copy;
921
922 if (sk->send_tmp != NULL)
923 {
924 continue;
925 }
926
927 tcp_send_check ((struct tcp_header *)buff, sk->saddr, sk->daddr,
928 copy +sizeof (struct tcp_header), sk);
929
930
931 skb->h.seq = sk->send_seq;
932 if (after (sk->send_seq , sk->window_seq) ||
933 sk->packets_out >= sk->cong_window)
934 {
935 PRINTK (("sk->cong_window = %d, sk->packets_out = %d\n",
936 sk->cong_window, sk->packets_out));
937 PRINTK (("sk->send_seq = %d, sk->window_seq = %d\n",
938 sk->send_seq, sk->window_seq));
939 skb->next = NULL;
940 skb->magic = TCP_WRITE_QUEUE_MAGIC;
941 if (sk->wback == NULL)
942 {
943 sk->wfront=skb;
944 }
945 else
946 {
947 sk->wback->next = skb;
948 }
949 sk->wback = skb;
950 }
951 else
952 {
953 prot->queue_xmit (sk, dev, skb,0);
954 }
955 }
956 sk->err = 0;
957 release_sock (sk);
958 PRINTK (("tcp_write: return 8\n"));
959 return (copied);
960 }
961
962 static int
963 tcp_sendto (volatile struct sock *sk, unsigned char *from,
964 int len, int nonblock, unsigned flags,
965 struct sockaddr_in *addr, int addr_len)
966 {
967 struct sockaddr_in sin;
968 if (addr_len < sizeof (sin))
969 return (-EINVAL);
970 memcpy_fromfs (&sin, addr, sizeof (sin));
971 if (sin.sin_family && sin.sin_family != AF_INET)
972 return (-EINVAL);
973 if (sin.sin_port != sk->dummy_th.dest)
974 return (-EINVAL);
975 if (sin.sin_addr.s_addr != sk->daddr)
976 return (-EINVAL);
977 return (tcp_write (sk, from, len, nonblock, flags));
978 }
979
980 static void
981 tcp_read_wakeup(volatile struct sock *sk)
982 {
983 int tmp;
984 struct device *dev = NULL;
985 struct tcp_header *t1;
986 struct sk_buff *buff;
987
988 if (!sk->ack_backlog ) return;
989 PRINTK (("in tcp read wakeup\n"));
990
991
992
993
994
995
996
997 buff=sk->prot->wmalloc(sk,MAX_ACK_SIZE,1, GFP_ATOMIC);
998 if (buff == NULL)
999 {
1000
1001 sk->timeout = TIME_WRITE;
1002 sk->time_wait.len = 10;
1003 reset_timer((struct timer *) &sk->time_wait);
1004 return;
1005 }
1006
1007 buff->mem_addr = buff;
1008 buff->mem_len = MAX_ACK_SIZE;
1009 buff->lock = 0;
1010 buff->len=sizeof (struct tcp_header);
1011 buff->sk = sk;
1012
1013
1014 tmp = sk->prot->build_header (buff, sk->saddr, sk->daddr, &dev,
1015 IPPROTO_TCP, sk->opt, MAX_ACK_SIZE);
1016 if (tmp < 0)
1017 {
1018 sk->prot->wfree(sk, buff->mem_addr, buff->mem_len);
1019 return;
1020 }
1021
1022 buff->len += tmp;
1023 t1 = (struct tcp_header *)((char *)(buff+1) +tmp);
1024
1025 memcpy (t1,(void *) &sk->dummy_th, sizeof (*t1));
1026 t1->seq = net32(sk->send_seq);
1027 t1->ack = 1;
1028 t1->res1=0;
1029 t1->res2=0;
1030 t1->rst = 0;
1031 t1->urg = 0;
1032 t1->syn = 0;
1033 t1->psh = 0;
1034 sk->ack_backlog = 0;
1035 sk->bytes_rcv = 0;
1036 sk->window = sk->prot->rspace(sk);
1037 t1->window = net16(sk->window);
1038 t1->ack_seq = net32(sk->acked_seq);
1039 t1->doff = sizeof (*t1)/4;
1040 tcp_send_check (t1, sk->saddr, sk->daddr, sizeof (*t1), sk);
1041 sk->prot->queue_xmit(sk, dev, buff, 1);
1042 }
1043
1044
1045
1046
1047
1048
1049 static void
1050 cleanup_rbuf (volatile struct sock *sk)
1051 {
1052 PRINTK (("cleaning rbuf for sk=%X\n",sk));
1053
1054
1055 while (sk->rqueue != NULL )
1056 {
1057 struct sk_buff *skb;
1058 skb=(struct sk_buff *)sk->rqueue->next;
1059 if (!skb->used) break;
1060 if (sk->rqueue == skb)
1061 {
1062 sk->rqueue = NULL;
1063 }
1064 else
1065 {
1066 skb->next->prev = skb->prev;
1067 skb->prev->next = skb->next;
1068 }
1069 skb->sk = sk;
1070 kfree_skb (skb, FREE_READ);
1071 }
1072
1073
1074
1075
1076 PRINTK (("sk->window left = %d, sk->prot->rspace(sk)=%d\n",
1077 sk->window - sk->bytes_rcv, sk->prot->rspace(sk)));
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087 sk->ack_backlog ++;
1088 if ((sk->prot->rspace(sk) >
1089 (sk->window - sk->bytes_rcv + sk->mtu)))
1090 {
1091
1092 tcp_read_wakeup (sk);
1093 }
1094 else
1095 {
1096
1097 if ( before (jiffies + TCP_ACK_TIME, sk->time_wait.when))
1098 {
1099 sk->time_wait.len = TCP_ACK_TIME;
1100 sk->timeout = TIME_WRITE;
1101 reset_timer ((struct timer *)&sk->time_wait);
1102 }
1103 }
1104 }
1105
1106
1107 static int
1108 tcp_read_urg(volatile struct sock * sk, int nonblock,
1109 unsigned char *to, int len, unsigned flags)
1110 {
1111 int copied = 0;
1112
1113 struct sk_buff *skb;
1114 PRINTK (("tcp_read_urg(sk=%X, to=%X, len=%d, flags=%X)\n",
1115 sk, to, len, flags));
1116
1117 while (len > 0)
1118 {
1119 sk->inuse = 1;
1120 while (sk->urg==0 || sk->rqueue == NULL)
1121 {
1122 if (sk->err)
1123 {
1124 int tmp;
1125 release_sock (sk);
1126 if (copied) return (copied);
1127 tmp = -sk->err;
1128 sk->err = 0;
1129 return (tmp);
1130 }
1131
1132 if (sk->state == TCP_CLOSE || sk->done)
1133 {
1134 release_sock (sk);
1135 if (copied) return (copied);
1136 if (!sk->done)
1137 {
1138 sk->done = 1;
1139 return (0);
1140 }
1141 return (-ENOTCONN);
1142 }
1143
1144 if (sk->shutdown & RCV_SHUTDOWN)
1145 {
1146 release_sock(sk);
1147 if (copied == 0)
1148 sk->done = 1;
1149 return (copied);
1150 }
1151
1152 if (nonblock || copied)
1153 {
1154 release_sock (sk);
1155 if (copied) return (copied);
1156 return (-EAGAIN);
1157 }
1158
1159
1160 release_sock (sk);
1161 cli();
1162 if ((sk->urg == 0 || sk->rqueue == NULL) && sk->err == 0
1163 && !(sk->shutdown & RCV_SHUTDOWN) )
1164 {
1165 interruptible_sleep_on (sk->sleep);
1166 if (current->signal & ~current->blocked)
1167 {
1168 sti();
1169 if (copied) return (copied);
1170 return (-ERESTARTSYS);
1171 }
1172 }
1173 sti();
1174 sk->inuse = 1;
1175 }
1176
1177 skb = (struct sk_buff *)sk->rqueue->next;
1178 do {
1179 int amt;
1180 if (skb->h.th->urg && !skb->urg_used)
1181 {
1182 if (skb->h.th->urg_ptr == 0)
1183 {
1184 skb->h.th->urg_ptr = net16(skb->len);
1185 }
1186 amt = min(net16(skb->h.th->urg_ptr),len);
1187 verify_area (to, amt);
1188 memcpy_tofs (to, (unsigned char *)(skb->h.th) +
1189 skb->h.th->doff*4, amt);
1190
1191 if (!(flags & MSG_PEEK))
1192 {
1193 skb->urg_used = 1;
1194 sk->urg --;
1195 }
1196 release_sock (sk);
1197 copied += amt;
1198 return (copied);
1199 }
1200 skb = (struct sk_buff *)skb->next;
1201 } while (skb != sk->rqueue->next);
1202 }
1203 sk->urg = 0;
1204 release_sock(sk);
1205 return (0);
1206 }
1207
1208
1209 static int
1210 tcp_read (volatile struct sock *sk, unsigned char *to,
1211 int len, int nonblock, unsigned flags)
1212 {
1213 int copied=0;
1214 struct sk_buff *skb;
1215 unsigned long offset;
1216 unsigned long used;
1217
1218 if (len == 0) return (0);
1219 if (len < 0)
1220 {
1221 return (-EINVAL);
1222 }
1223
1224
1225 if (sk->state == TCP_LISTEN) return (-ENOTCONN);
1226
1227
1228 if ((flags & MSG_OOB))
1229 return (tcp_read_urg (sk, nonblock, to, len, flags));
1230
1231
1232 sk->inuse = 1;
1233 if (sk->rqueue != NULL)
1234 skb=(struct sk_buff *)sk->rqueue->next;
1235 else
1236 skb = NULL;
1237
1238 PRINTK(("tcp_read (sk=%X, to=%X, len=%d, nonblock=%d, flags=%X)\n",
1239 sk, to, len, nonblock, flags));
1240
1241 while ( len > 0)
1242 {
1243 while ( skb == NULL || before (sk->copied_seq+1, skb->h.th->seq) ||
1244 skb->used)
1245
1246 {
1247
1248 PRINTK(("skb = %X:\n",skb));
1249
1250 cleanup_rbuf(sk);
1251
1252 if (sk->err)
1253 {
1254 int tmp;
1255 release_sock (sk);
1256 if (copied)
1257 {
1258 PRINTK (("tcp_read: returing %d\n", copied));
1259 return (copied);
1260 }
1261 tmp = -sk->err;
1262 sk->err = 0;
1263 return (tmp);
1264 }
1265
1266 if (sk->state == TCP_CLOSE)
1267 {
1268 release_sock (sk);
1269 if (copied)
1270 {
1271 PRINTK (("tcp_read: returing %d\n", copied));
1272 return (copied);
1273 }
1274 if (!sk->done)
1275 {
1276 sk->done = 1;
1277 return (0);
1278 }
1279 return (-ENOTCONN);
1280 }
1281
1282 if (sk->shutdown & RCV_SHUTDOWN)
1283 {
1284 release_sock (sk);
1285 if (copied == 0) sk->done = 1;
1286 PRINTK (("tcp_read: returing %d\n", copied));
1287 return (copied);
1288 }
1289
1290 if (nonblock || copied)
1291 {
1292 release_sock (sk);
1293 if (copied)
1294 {
1295 PRINTK (("tcp_read: returing %d\n", copied));
1296 return (copied);
1297 }
1298 return (-EAGAIN);
1299 }
1300
1301 if ((flags & MSG_PEEK) && copied != 0)
1302 {
1303 release_sock (sk);
1304 PRINTK (("tcp_read: returing %d\n", copied));
1305 return (copied);
1306 }
1307
1308 PRINTK (("tcp_read about to sleep. state = %d\n",sk->state));
1309
1310 release_sock (sk);
1311
1312 cli();
1313 if ( sk->shutdown & RCV_SHUTDOWN || sk->err != 0)
1314 {
1315 sk->inuse = 1;
1316 sti();
1317 continue;
1318 }
1319
1320 if ( sk->rqueue == NULL ||
1321 before (sk->copied_seq+1, sk->rqueue->next->h.th->seq) )
1322 {
1323 interruptible_sleep_on (sk->sleep);
1324 if (current->signal & ~current->blocked)
1325 {
1326 sti ();
1327 if (copied)
1328 {
1329 PRINTK (("tcp_read: returing %d\n", copied));
1330 return (copied);
1331 }
1332
1333 return (-ERESTARTSYS);
1334 }
1335 }
1336 sti();
1337 PRINTK (("tcp_read woke up. \n"));
1338
1339 sk->inuse = 1;
1340
1341 if (sk->rqueue != NULL)
1342 skb=(struct sk_buff *)sk->rqueue->next;
1343 else
1344 skb = NULL;
1345
1346 }
1347
1348
1349
1350
1351 offset = sk->copied_seq+1 - skb->h.th->seq;
1352
1353 if (skb->h.th->syn) offset --;
1354 if (offset < skb->len )
1355 {
1356
1357
1358 if (skb->h.th->urg)
1359 {
1360 if (skb->urg_used)
1361 {
1362 sk->copied_seq += net16(skb->h.th->urg_ptr);
1363 offset += net16(skb->h.th->urg_ptr);
1364 if (offset >= skb->len)
1365 {
1366 skb->used = 1;
1367 skb = (struct sk_buff *)skb->next;
1368 continue;
1369 }
1370 }
1371 else
1372 {
1373 release_sock (sk);
1374 if (copied) return (copied);
1375 return (-EIO);
1376 }
1377 }
1378 used = min(skb->len - offset, len);
1379
1380 verify_area (to, used);
1381 memcpy_tofs(to, ((unsigned char *)skb->h.th) +
1382 skb->h.th->doff*4 +
1383 offset,
1384 used);
1385 copied += used;
1386 len -= used;
1387 to += used;
1388 if (!(flags & MSG_PEEK))
1389 sk->copied_seq += used;
1390
1391
1392
1393
1394 if (!(flags & MSG_PEEK) &&
1395 (!skb->h.th->urg || skb->urg_used) &&
1396 (used + offset >= skb->len) )
1397 skb->used = 1;
1398
1399
1400
1401 if ( skb->h.th->psh || skb->h.th->urg)
1402 {
1403 break;
1404 }
1405 }
1406 else
1407 {
1408 skb->used = 1;
1409 }
1410 skb=(struct sk_buff *)skb->next;
1411 }
1412 cleanup_rbuf (sk);
1413 release_sock (sk);
1414 PRINTK (("tcp_read: returing %d\n", copied));
1415 if (copied == 0 && nonblock) return (-EAGAIN);
1416 return (copied);
1417 }
1418
1419
1420
1421
1422
1423 void
1424 tcp_shutdown (volatile struct sock *sk, int how)
1425 {
1426
1427
1428 struct sk_buff *buff;
1429 struct tcp_header *t1,*th;
1430 struct proto *prot;
1431 int tmp;
1432 struct device *dev=NULL;
1433
1434
1435
1436
1437
1438
1439 if (sk->state == TCP_FIN_WAIT1 ||
1440 sk->state == TCP_FIN_WAIT2)
1441 return;
1442
1443 if (!(how & SEND_SHUTDOWN)) return;
1444 sk->inuse = 1;
1445
1446
1447 if (sk->send_tmp)
1448 tcp_send_partial(sk);
1449
1450 prot = (struct proto *)sk->prot;
1451 th=(struct tcp_header *)&sk->dummy_th;
1452 release_sock (sk);
1453 buff=prot->wmalloc(sk, MAX_RESET_SIZE,1 , GFP_KERNEL);
1454 if (buff == NULL)
1455 {
1456 return;
1457 }
1458 sk->inuse = 1;
1459
1460
1461 PRINTK(("tcp_shutdown_send buff = %X\n", buff));
1462 buff->mem_addr = buff;
1463 buff->mem_len = MAX_RESET_SIZE;
1464 buff->lock = 0;
1465 buff->sk = sk;
1466 buff->len = sizeof (*t1);
1467
1468 t1=(struct tcp_header *)(buff + 1);
1469
1470 tmp = prot->build_header (buff,sk->saddr, sk->daddr, &dev,
1471 IPPROTO_TCP, sk->opt,
1472 sizeof(struct tcp_header));
1473 if (tmp < 0)
1474 {
1475 prot->wfree (sk,buff->mem_addr, buff->mem_len);
1476 release_sock(sk);
1477 PRINTK (("Unable to build header for fin.\n"));
1478 return;
1479 }
1480
1481 t1 = (struct tcp_header *)((char *)t1 +tmp);
1482 buff ->len += tmp;
1483 buff->dev = dev;
1484
1485 memcpy (t1, th, sizeof (*t1));
1486
1487 t1->seq = net32(sk->send_seq);
1488
1489 sk->send_seq++;
1490 buff->h.seq = sk->send_seq;
1491 t1->ack = 1;
1492
1493 t1->ack_seq = net32(sk->acked_seq);
1494 t1->window = net16(sk->prot->rspace(sk));
1495 t1->fin = 1;
1496 t1->rst = 0;
1497
1498 t1->doff = sizeof (*t1)/4;
1499 tcp_send_check (t1, sk->saddr, sk->daddr, sizeof (*t1), sk);
1500
1501
1502
1503 if (sk->wback != NULL)
1504 {
1505 buff->next = NULL;
1506 sk->wback->next = buff;
1507 sk->wback = buff;
1508 buff->magic = TCP_WRITE_QUEUE_MAGIC;
1509 }
1510 else
1511 {
1512 sk->prot->queue_xmit (sk, dev, buff,0);
1513 }
1514
1515 if (sk->state == TCP_ESTABLISHED)
1516 {
1517 sk->state = TCP_FIN_WAIT1;
1518 }
1519 else
1520 {
1521 sk->state = TCP_FIN_WAIT2;
1522 }
1523 release_sock(sk);
1524 }
1525
1526
1527 static int
1528 tcp_recvfrom (volatile struct sock *sk, unsigned char *to,
1529 int to_len, int nonblock, unsigned flags,
1530 struct sockaddr_in *addr, int *addr_len)
1531 {
1532 int result = tcp_read(sk, to, to_len, nonblock, flags);
1533 struct sockaddr_in sin;
1534 int len;
1535 if (result < 0)
1536 return (result);
1537 len = get_fs_long(addr_len);
1538 if (len > sizeof (sin))
1539 len = sizeof (sin);
1540 sin.sin_family = AF_INET;
1541 sin.sin_port = sk->dummy_th.dest;
1542 sin.sin_addr.s_addr = sk->daddr;
1543 verify_area (addr, len);
1544 memcpy_tofs (addr, &sin, len);
1545 verify_area (addr_len, sizeof (len));
1546 put_fs_long (len, addr_len);
1547 return (result);
1548 }
1549
1550
1551 static void
1552 tcp_reset(unsigned long saddr, unsigned long daddr, struct tcp_header *th,
1553 struct proto *prot, struct options *opt, struct device *dev)
1554 {
1555
1556
1557 struct sk_buff *buff;
1558 struct tcp_header *t1;
1559 int tmp;
1560 buff=prot->wmalloc(NULL, MAX_RESET_SIZE,1, GFP_ATOMIC);
1561 if (buff == NULL) return;
1562
1563 PRINTK(("tcp_reset buff = %X\n", buff));
1564 buff->mem_addr = buff;
1565 buff->mem_len = MAX_RESET_SIZE;
1566 buff->lock = 0;
1567 buff->len = sizeof (*t1);
1568 buff->sk = NULL;
1569 buff->dev = dev;
1570
1571 t1=(struct tcp_header *)(buff + 1);
1572
1573 tmp = prot->build_header (buff, saddr, daddr, &dev, IPPROTO_TCP, opt,
1574 sizeof(struct tcp_header));
1575 if (tmp < 0)
1576 {
1577 prot->wfree (NULL,buff->mem_addr, buff->mem_len);
1578 return;
1579 }
1580 t1 = (struct tcp_header *)((char *)t1 +tmp);
1581 buff->len += tmp;
1582 memcpy (t1, th, sizeof (*t1));
1583
1584 t1->dest = th->source;
1585 t1->source = th->dest;
1586 t1->seq = th->ack_seq;
1587
1588 t1->rst = 1;
1589 t1->ack = 0;
1590 t1->syn = 0;
1591 t1->urg = 0;
1592 t1->fin = 0;
1593 t1->psh = 0;
1594 t1->doff = sizeof (*t1)/4;
1595 tcp_send_check (t1, saddr, daddr, sizeof (*t1), NULL);
1596 prot->queue_xmit(NULL, dev, buff, 1);
1597
1598 }
1599
1600
1601
1602
1603
1604
1605
1606 static void
1607 tcp_conn_request(volatile struct sock *sk, struct sk_buff *skb,
1608 unsigned long daddr,
1609 unsigned long saddr, struct options *opt, struct device *dev)
1610 {
1611 struct sk_buff *buff;
1612 struct tcp_header *t1;
1613 unsigned char *ptr;
1614 volatile struct sock *newsk;
1615 struct tcp_header *th;
1616 int tmp;
1617 th = skb->h.th;
1618
1619 PRINTK (("tcp_conn_request (sk = %X, skb = %X, daddr = %X, sadd4= %X, \n"
1620 " opt = %X, dev = %X)\n",
1621 sk, skb, daddr, saddr, opt, dev));
1622
1623
1624 if (!sk->dead)
1625 {
1626 wake_up(sk->sleep);
1627 }
1628 else
1629 {
1630 PRINTK (("tcp_conn_request on dead socket\n"));
1631 tcp_reset (daddr, saddr, th, sk->prot, opt, dev);
1632 kfree_skb (skb, FREE_READ);
1633 return;
1634 }
1635
1636
1637
1638 if (sk->ack_backlog >= sk->max_ack_backlog)
1639 {
1640 kfree_skb (skb, FREE_READ);
1641 return;
1642 }
1643
1644
1645
1646
1647
1648
1649
1650 newsk = kmalloc(sizeof (struct sock), GFP_ATOMIC);
1651 if (newsk == NULL)
1652 {
1653
1654 kfree_skb (skb, FREE_READ);
1655 return;
1656 }
1657
1658
1659 PRINTK (("newsk = %X\n", newsk));
1660 memcpy ((void *)newsk, (void *)sk, sizeof (*newsk));
1661 newsk->wback = NULL;
1662 newsk->wfront = NULL;
1663 newsk->rqueue = NULL;
1664 newsk->send_head = NULL;
1665 newsk->send_tail = NULL;
1666 newsk->back_log = NULL;
1667 newsk->blog = 0;
1668 newsk->intr = 0;
1669 newsk->proc = 0;
1670 newsk->done = 0;
1671 newsk->send_tmp = NULL;
1672 newsk->pair = NULL;
1673 newsk->wmem_alloc = 0;
1674 newsk->rmem_alloc = 0;
1675
1676 newsk->max_unacked = MAX_WINDOW - TCP_WINDOW_DIFF;
1677
1678 newsk->err = 0;
1679 newsk->shutdown = 0;
1680 newsk->ack_backlog = 0;
1681 newsk->acked_seq = skb->h.th->seq+1;
1682 newsk->fin_seq = skb->h.th->seq;
1683 newsk->copied_seq = skb->h.th->seq;
1684 newsk->state = TCP_SYN_RECV;
1685 newsk->timeout = 0;
1686 newsk->send_seq = timer_seq*SEQ_TICK-seq_offset;
1687 newsk->rcv_ack_seq = newsk->send_seq;
1688 newsk->urg =0;
1689 newsk->retransmits = 0;
1690 newsk->destroy = 0;
1691 newsk->time_wait.sk = newsk;
1692 newsk->time_wait.next = NULL;
1693 newsk->dummy_th.source = skb->h.th->dest;
1694 newsk->dummy_th.dest = skb->h.th->source;
1695
1696 newsk->daddr=saddr;
1697 newsk->saddr=daddr;
1698
1699 put_sock (newsk->num,newsk);
1700 newsk->dummy_th.res1=0;
1701 newsk->dummy_th.doff=6;
1702 newsk->dummy_th.fin=0;
1703 newsk->dummy_th.syn=0;
1704 newsk->dummy_th.rst=0;
1705 newsk->dummy_th.psh=0;
1706 newsk->dummy_th.ack=0;
1707 newsk->dummy_th.urg=0;
1708 newsk->dummy_th.res2=0;
1709 newsk->acked_seq = skb->h.th->seq+1;
1710 newsk->copied_seq = skb->h.th->seq;
1711
1712 if (skb->h.th->doff == 5)
1713 {
1714 newsk->mtu=576-HEADER_SIZE;
1715 }
1716 else
1717 {
1718 ptr = (unsigned char *)(skb->h.th + 1);
1719 if (ptr[0] != 2 || ptr[1] != 4)
1720 {
1721 newsk->mtu=576-HEADER_SIZE;
1722 }
1723 else
1724 {
1725 newsk->mtu = min (ptr[2]*256+ptr[3]-HEADER_SIZE,
1726 dev->mtu-HEADER_SIZE);
1727 }
1728 }
1729
1730 buff=newsk->prot->wmalloc(newsk,MAX_SYN_SIZE,1, GFP_ATOMIC);
1731 if (buff == NULL)
1732 {
1733 sk->err = -ENOMEM;
1734 newsk->dead = 1;
1735 release_sock (newsk);
1736 kfree_skb (skb, FREE_READ);
1737 return;
1738 }
1739
1740 buff->lock = 0;
1741 buff->mem_addr = buff;
1742 buff->mem_len = MAX_SYN_SIZE;
1743 buff->len=sizeof (struct tcp_header)+4;
1744 buff->sk = newsk;
1745
1746 t1=(struct tcp_header *)(buff + 1);
1747
1748
1749 tmp = sk->prot->build_header (buff, newsk->saddr, newsk->daddr, &dev,
1750 IPPROTO_TCP, NULL, MAX_SYN_SIZE);
1751
1752
1753 if (tmp < 0)
1754 {
1755 sk->err = tmp;
1756 sk->prot->wfree(newsk, buff->mem_addr, buff->mem_len);
1757 newsk->dead = 1;
1758 release_sock (newsk);
1759 skb->sk = sk;
1760 kfree_skb (skb, FREE_READ);
1761 return;
1762 }
1763
1764 buff->len += tmp;
1765 t1 = (struct tcp_header *)((char *)t1 +tmp);
1766
1767 memcpy (t1, skb->h.th, sizeof (*t1));
1768 buff->h.seq = newsk->send_seq;
1769
1770 t1->dest = skb->h.th->source;
1771 t1->source = newsk->dummy_th.source;
1772 t1->seq = net32(newsk->send_seq++);
1773 t1->ack = 1;
1774 newsk->window = newsk->prot->rspace(newsk);
1775 t1->window = net16(newsk->window);
1776 t1->res1=0;
1777 t1->res2=0;
1778 t1->rst = 0;
1779 t1->urg = 0;
1780 t1->psh = 0;
1781 t1->syn = 1;
1782 t1->ack_seq = net32(skb->h.th->seq+1);
1783 t1->doff = sizeof (*t1)/4+1;
1784
1785 ptr = (unsigned char *)(t1+1);
1786 ptr[0]=2;
1787 ptr[1]=4;
1788 ptr[2]=((dev->mtu - HEADER_SIZE) >> 8) & 0xff;
1789 ptr[3]=(dev->mtu - HEADER_SIZE) & 0xff;
1790
1791 tcp_send_check (t1, daddr, saddr, sizeof (*t1)+4, newsk);
1792 newsk->prot->queue_xmit(newsk, dev, buff, 0);
1793
1794 newsk->time_wait.len = TCP_CONNECT_TIME;
1795 PRINTK (("newsk->time_wait.sk = %X\n", newsk->time_wait.sk));
1796 reset_timer ((struct timer *)&newsk->time_wait);
1797 skb->sk = newsk;
1798
1799 sk->rmem_alloc -= skb->mem_len;
1800 newsk->rmem_alloc += skb->mem_len;
1801
1802 if (sk->rqueue == NULL)
1803 {
1804 skb->next = skb;
1805 skb->prev = skb;
1806 sk->rqueue = skb;
1807 }
1808 else
1809 {
1810 skb->next = sk->rqueue;
1811 skb->prev = sk->rqueue->prev;
1812 sk->rqueue->prev = skb;
1813 skb->prev->next = skb;
1814 }
1815 sk->ack_backlog++;
1816 release_sock (newsk);
1817 }
1818
1819 static void
1820 tcp_close (volatile struct sock *sk, int timeout)
1821 {
1822
1823
1824 struct sk_buff *buff;
1825 int need_reset = 0;
1826 struct tcp_header *t1,*th;
1827 struct proto *prot;
1828 struct device *dev=NULL;
1829 int tmp;
1830 PRINTK (("tcp_close ((struct sock *)%X, %d)\n",sk, timeout));
1831 sk->inuse = 1;
1832 sk->keepopen = 1;
1833 sk->shutdown = SHUTDOWN_MASK;
1834
1835 if (!sk->dead)
1836 wake_up (sk->sleep);
1837
1838
1839
1840 if (sk->rqueue != NULL)
1841 {
1842 struct sk_buff *skb;
1843 struct sk_buff *skb2;
1844 skb = sk->rqueue;
1845 do {
1846 skb2=(struct sk_buff *)skb->next;
1847
1848 if (skb->len > 0 &&
1849 after (skb->h.th->seq + skb->len + 1, sk->copied_seq))
1850 need_reset = 1;
1851 kfree_skb (skb, FREE_READ);
1852 skb=skb2;
1853 } while (skb != sk->rqueue);
1854 }
1855 sk->rqueue = NULL;
1856
1857
1858 if (sk->send_tmp)
1859 {
1860 tcp_send_partial (sk);
1861 }
1862
1863 switch (sk->state)
1864 {
1865
1866 case TCP_FIN_WAIT1:
1867 case TCP_FIN_WAIT2:
1868 case TCP_LAST_ACK:
1869
1870 sk->time_wait.len = 4*sk->rtt;;
1871 sk->timeout = TIME_CLOSE;
1872 reset_timer ((struct timer *)&sk->time_wait);
1873 if (timeout)
1874 tcp_time_wait(sk);
1875 release_sock (sk);
1876 break;
1877
1878 case TCP_TIME_WAIT:
1879 if (timeout)
1880 sk->state = TCP_CLOSE;
1881 release_sock (sk);
1882 return;
1883
1884 case TCP_LISTEN:
1885 sk->state = TCP_CLOSE;
1886 release_sock(sk);
1887 return;
1888
1889 case TCP_CLOSE:
1890
1891 release_sock(sk);
1892 return;
1893
1894
1895 case TCP_CLOSE_WAIT:
1896 case TCP_ESTABLISHED:
1897 case TCP_SYN_SENT:
1898 case TCP_SYN_RECV:
1899
1900 prot = (struct proto *)sk->prot;
1901 th=(struct tcp_header *)&sk->dummy_th;
1902
1903 buff=prot->wmalloc(sk, MAX_FIN_SIZE,1, GFP_ATOMIC);
1904 if (buff == NULL)
1905 {
1906
1907 if (sk->state != TCP_CLOSE_WAIT)
1908 sk->state = TCP_ESTABLISHED;
1909 sk->timeout = TIME_CLOSE;
1910 sk->time_wait.len = 100;
1911 reset_timer ((struct timer *)&sk->time_wait);
1912 return;
1913 }
1914
1915 buff->lock = 0;
1916 buff->mem_addr = buff;
1917 buff->mem_len = MAX_FIN_SIZE;
1918 buff->sk = sk;
1919 buff->len = sizeof (*t1);
1920 t1=(struct tcp_header *)(buff + 1);
1921
1922 tmp = prot->build_header (buff,sk->saddr, sk->daddr, &dev,
1923 IPPROTO_TCP, sk->opt,
1924 sizeof(struct tcp_header));
1925 if (tmp < 0)
1926 {
1927 prot->wfree (sk,buff->mem_addr, buff->mem_len);
1928 PRINTK (("Unable to build header for fin.\n"));
1929 release_sock(sk);
1930 return;
1931 }
1932
1933 t1 = (struct tcp_header *)((char *)t1 +tmp);
1934 buff ->len += tmp;
1935 buff->dev = dev;
1936 memcpy (t1, th, sizeof (*t1));
1937 t1->seq = net32(sk->send_seq);
1938 sk->send_seq++;
1939 buff->h.seq = sk->send_seq;
1940 t1->ack = 1;
1941
1942
1943 sk->delay_acks = 0;
1944 t1->ack_seq = net32(sk->acked_seq);
1945 t1->window = net16(sk->prot->rspace(sk));
1946 t1->fin = 1;
1947 t1->rst = need_reset;
1948 t1->doff = sizeof (*t1)/4;
1949 tcp_send_check (t1, sk->saddr, sk->daddr, sizeof (*t1), sk);
1950
1951 if (sk->wfront == NULL)
1952 {
1953 prot->queue_xmit(sk, dev, buff, 0);
1954 }
1955 else
1956 {
1957 sk->time_wait.len = sk->rtt;
1958 sk->timeout = TIME_WRITE;
1959 reset_timer ((struct timer *)&sk->time_wait);
1960 buff->next = NULL;
1961 if (sk->wback == NULL)
1962 {
1963 sk->wfront=buff;
1964 }
1965 else
1966 {
1967 sk->wback->next = buff;
1968 }
1969 sk->wback = buff;
1970 buff->magic = TCP_WRITE_QUEUE_MAGIC;
1971
1972 }
1973
1974 if (sk->state == TCP_CLOSE_WAIT)
1975 {
1976 sk->state = TCP_FIN_WAIT2;
1977 }
1978 else
1979 {
1980 sk->state = TCP_FIN_WAIT1;
1981 }
1982 }
1983 release_sock (sk);
1984 }
1985
1986
1987
1988
1989 static void
1990 tcp_write_xmit (volatile struct sock *sk)
1991 {
1992 struct sk_buff *skb;
1993 PRINTK (("tcp_write_xmit (sk=%X)\n",sk));
1994 while (sk->wfront != NULL && before (sk->wfront->h.seq, sk->window_seq) &&
1995 sk->packets_out < sk->cong_window)
1996 {
1997 skb = sk->wfront;
1998 sk->wfront = (struct sk_buff *)skb->next;
1999 if (sk->wfront == NULL)
2000 sk->wback = NULL;
2001 skb->next = NULL;
2002 if (skb->magic != TCP_WRITE_QUEUE_MAGIC)
2003 {
2004 PRINTK (("tcp.c skb with bad magic (%X) on write queue. Squashing "
2005 "queue\n", skb->magic));
2006 sk->wfront = NULL;
2007 sk->wback = NULL;
2008 return;
2009 }
2010 skb->magic = 0;
2011 PRINTK(("Sending a packet.\n"));
2012 sk->prot->queue_xmit (sk, skb->dev, skb, skb->free);
2013 }
2014 }
2015
2016
2017
2018
2019
2020 static int
2021 tcp_ack (volatile struct sock *sk, struct tcp_header *th, unsigned long saddr)
2022 {
2023 unsigned long ack;
2024 ack = net32(th->ack_seq);
2025
2026 PRINTK (("tcp_ack ack=%d, window=%d, "
2027 "sk->rcv_ack_seq=%d, sk->window_seq = %d\n",
2028 ack, net16(th->window), sk->rcv_ack_seq, sk->window_seq));
2029 if (after (ack, sk->send_seq+1) || before (ack, sk->rcv_ack_seq-1))
2030 {
2031 if (after (ack, sk->send_seq) || (sk->state != TCP_ESTABLISHED &&
2032 sk->state != TCP_CLOSE_WAIT))
2033 {
2034 return (0);
2035 }
2036 if (sk->keepopen)
2037 reset_timer ((struct timer *)&sk->time_wait);
2038 sk->retransmits = 0;
2039 return (1);
2040 }
2041
2042
2043 if (after (sk->window_seq, ack+net16(th->window)))
2044 {
2045
2046
2047
2048
2049
2050
2051 struct sk_buff *skb;
2052 struct sk_buff *skb2=NULL;
2053 struct sk_buff *wskb=NULL;
2054
2055 sk->window_seq = ack + net16(th->window);
2056 cli();
2057 for (skb = sk->send_head; skb != NULL; skb= (struct sk_buff *)skb->link3)
2058 {
2059 if (after( skb->h.seq, sk->window_seq))
2060 {
2061
2062
2063 if (skb2 == NULL)
2064 {
2065 sk->send_head = (struct sk_buff *)skb->link3;
2066 }
2067 else
2068 {
2069 skb2->link3 = skb->link3;
2070 }
2071 if (sk->send_tail == skb)
2072 sk->send_tail = skb2;
2073
2074
2075 if (skb->next != NULL)
2076 {
2077 int i;
2078 if (skb->next != skb)
2079 {
2080 skb->next->prev = skb->prev;
2081 skb->prev->next = skb->next;
2082 }
2083 for (i = 0; i < DEV_NUMBUFFS; i++)
2084 {
2085 if (skb->dev->buffs[i] == skb)
2086 {
2087 if (skb->next == skb)
2088 skb->dev->buffs[i] = NULL;
2089 else
2090 skb->dev->buffs[i] = skb->next;
2091 break;
2092 }
2093 }
2094 if (arp_q == skb)
2095 {
2096 if (skb->next == skb)
2097 arp_q = NULL;
2098 else
2099 arp_q = skb->next;
2100 }
2101 }
2102
2103
2104 skb->magic = TCP_WRITE_QUEUE_MAGIC;
2105 if (wskb == NULL)
2106 {
2107 skb->next = sk->wfront;
2108 sk->wfront = skb;
2109 }
2110 else
2111 {
2112 skb->next = wskb->next;
2113 wskb->next = skb;
2114 }
2115 wskb = skb;
2116 }
2117 else
2118 {
2119 skb2 = skb;
2120 }
2121 }
2122 sti();
2123 }
2124
2125 sk->window_seq = ack + net16(th->window);
2126
2127
2128 if (sk->cong_window < 2048 && ack != sk->rcv_ack_seq)
2129 {
2130 if (sk->exp_growth)
2131 sk->cong_window *= 2;
2132 else
2133 sk->cong_window++;
2134 }
2135
2136 PRINTK (("tcp_ack: Updating rcv ack sequence. \n"));
2137 sk->rcv_ack_seq = ack;
2138
2139
2140 while (sk->send_head != NULL)
2141 {
2142 if (before (sk->send_head->h.seq, ack+1))
2143 {
2144 struct sk_buff *oskb;
2145
2146 sk->packets_out --;
2147 PRINTK (("skb=%X acked\n", sk->send_head));
2148
2149
2150 if (!sk->dead)
2151 wake_up (sk->sleep);
2152
2153 cli();
2154
2155 oskb = sk->send_head;
2156
2157 sk->rtt += ((jiffies - oskb->when) - sk->rtt)/2;
2158 if (sk->rtt < 30) sk->rtt = 30;
2159 sk->send_head = (struct sk_buff *)oskb->link3;
2160 if (sk->send_head == NULL)
2161 {
2162 sk->send_tail = NULL;
2163 }
2164
2165 if (oskb->next != NULL)
2166 {
2167 int i;
2168 if (oskb->next != oskb)
2169 {
2170 oskb->next->prev = oskb->prev;
2171 oskb->prev->next = oskb->next;
2172 }
2173 for (i = 0; i < DEV_NUMBUFFS; i++)
2174 {
2175 if (oskb->dev->buffs[i] == oskb)
2176 {
2177 if (oskb== oskb->next)
2178 oskb->dev->buffs[i]= NULL;
2179 else
2180 oskb->dev->buffs[i] = oskb->next;
2181 break;
2182 }
2183 }
2184 if (arp_q == oskb)
2185 {
2186 if (oskb == oskb->next)
2187 arp_q = NULL;
2188 else
2189 arp_q = (struct sk_buff *)oskb->next;
2190 }
2191 }
2192 oskb->magic = 0;
2193 kfree_skb (oskb, FREE_WRITE);
2194 sti();
2195 if (!sk->dead)
2196 wake_up(sk->sleep);
2197 }
2198 else
2199 {
2200 break;
2201 }
2202
2203 }
2204
2205
2206
2207
2208
2209
2210
2211 if (sk->retransmits && sk->send_head != NULL)
2212 {
2213 PRINTK (("retransmitting\n"));
2214 sk->prot->retransmit (sk,1);
2215 }
2216 sk->retransmits = 0;
2217
2218
2219
2220 if (sk->wfront != NULL && sk->packets_out < sk->cong_window)
2221 {
2222 if (after (sk->window_seq, sk->wfront->h.seq))
2223 {
2224 tcp_write_xmit (sk);
2225 }
2226 }
2227 else
2228 {
2229 if (sk->send_head == NULL && sk->ack_backlog == 0 &&
2230 sk->state != TCP_TIME_WAIT && !sk->keepopen)
2231 {
2232 PRINTK (("Nothing to do, going to sleep.\n"));
2233 if (!sk->dead)
2234 wake_up (sk->sleep);
2235
2236 delete_timer((struct timer *)&sk->time_wait);
2237 sk->timeout = 0;
2238 }
2239 else
2240 {
2241 if (sk->state != sk->keepopen)
2242 {
2243 sk->timeout = TIME_WRITE;
2244 sk->time_wait.len = sk->rtt*2;
2245 reset_timer ((struct timer *)&sk->time_wait);
2246 }
2247 if (sk->state == TCP_TIME_WAIT)
2248 {
2249 sk->time_wait.len = TCP_TIMEWAIT_LEN;
2250 reset_timer ((struct timer *)&sk->time_wait);
2251 sk->timeout = TIME_CLOSE;
2252 }
2253 }
2254 }
2255
2256
2257 if (sk->packets_out == 0 && sk->send_tmp != NULL &&
2258 sk->wfront == NULL && sk->send_head == NULL)
2259 {
2260 tcp_send_partial (sk);
2261 }
2262
2263
2264 if ( sk->state == TCP_TIME_WAIT)
2265 {
2266 if (!sk->dead) wake_up (sk->sleep);
2267 if (sk->rcv_ack_seq == sk->send_seq &&
2268 sk->acked_seq == sk->fin_seq)
2269 {
2270 sk->state = TCP_CLOSE;
2271 sk->shutdown = SHUTDOWN_MASK;
2272 }
2273 }
2274
2275 if (sk->state == TCP_LAST_ACK || sk->state == TCP_FIN_WAIT2)
2276 {
2277 if (!sk->dead) wake_up (sk->sleep);
2278 if (sk->rcv_ack_seq == sk->send_seq)
2279 {
2280 if (sk->acked_seq != sk->fin_seq)
2281 {
2282 tcp_time_wait(sk);
2283 }
2284 else
2285 {
2286 PRINTK (("tcp_ack closing socket - %X\n", sk));
2287 tcp_send_ack (sk->send_seq, sk->acked_seq, sk, th, sk->daddr);
2288 sk->shutdown = SHUTDOWN_MASK;
2289 sk->state = TCP_CLOSE;
2290 }
2291 }
2292 }
2293
2294 PRINTK (("leaving tcp_ack\n"));
2295
2296 return (1);
2297 }
2298
2299
2300
2301
2302
2303 static int
2304 tcp_data (struct sk_buff *skb, volatile struct sock *sk,
2305 unsigned long saddr, unsigned short len)
2306 {
2307 struct sk_buff *skb1, *skb2;
2308 struct tcp_header *th;
2309
2310 th = skb->h.th;
2311 print_th (th);
2312 skb->len = len - (th->doff*4);
2313
2314 PRINTK(("tcp_data len = %d sk = %X:\n",skb->len, sk));
2315
2316 sk->bytes_rcv += skb->len;
2317
2318 if (skb->len == 0 && !th->fin && !th->urg && !th->psh)
2319 {
2320
2321 if (!th->ack)
2322 tcp_send_ack (sk->send_seq, sk->acked_seq,sk, th, saddr);
2323 kfree_skb(skb, FREE_READ);
2324 return (0);
2325 }
2326
2327 if (sk->shutdown & RCV_SHUTDOWN)
2328 {
2329 sk->acked_seq = th->seq + skb->len + th->syn + th->fin;
2330 tcp_reset (sk->saddr, sk->daddr, skb->h.th,
2331 sk->prot, NULL, skb->dev);
2332 sk->state = TCP_CLOSE;
2333 sk->err = EPIPE;
2334 sk->shutdown = SHUTDOWN_MASK;
2335 PRINTK (("tcp_data: closing socket - %X\n", sk));
2336 kfree_skb (skb, FREE_READ);
2337 if (!sk->dead) wake_up (sk->sleep);
2338 return (0);
2339 }
2340
2341
2342
2343
2344
2345
2346
2347
2348 if (sk->rqueue == NULL)
2349 {
2350 PRINTK (("tcp_data: skb = %X:\n",skb));
2351
2352 sk->rqueue = skb;
2353 skb->next = skb;
2354 skb->prev = skb;
2355 skb1= NULL;
2356 }
2357 else
2358 {
2359 PRINTK (("tcp_data adding to chain sk = %X:\n",sk));
2360
2361 for (skb1=sk->rqueue; ; skb1=(struct sk_buff *)skb1->prev)
2362 {
2363 PRINTK (("skb1=%X\n",skb1));
2364 PRINTK (("skb1->h.th->seq = %d\n", skb1->h.th->seq));
2365 if (after ( th->seq+1, skb1->h.th->seq))
2366 {
2367 skb->prev = skb1;
2368 skb->next = skb1->next;
2369 skb->next->prev = skb;
2370 skb1->next = skb;
2371 if (skb1 == sk->rqueue)
2372 sk->rqueue = skb;
2373 break;
2374 }
2375 if ( skb1->prev == sk->rqueue)
2376 {
2377 skb->next= skb1;
2378 skb->prev = skb1->prev;
2379 skb->prev->next = skb;
2380 skb1->prev = skb;
2381 skb1 = NULL;
2382 break;
2383 }
2384 }
2385
2386 PRINTK (("skb = %X:\n",skb));
2387
2388 }
2389
2390 th->ack_seq = th->seq + skb->len;
2391 if (th->syn) th->ack_seq ++;
2392 if (th->fin) th->ack_seq ++;
2393
2394 if (before (sk->acked_seq, sk->copied_seq))
2395 {
2396 printk ("*** tcp.c:tcp_data bug acked < copied\n");
2397 sk->acked_seq = sk->copied_seq;
2398 }
2399
2400
2401 if (skb1 == NULL || skb1->acked || before (th->seq, sk->acked_seq+1))
2402 {
2403 if (before (th->seq, sk->acked_seq+1))
2404 {
2405 if (after (th->ack_seq, sk->acked_seq))
2406 sk->acked_seq = th->ack_seq;
2407 skb->acked = 1;
2408
2409
2410 if (skb->h.th->fin)
2411 {
2412 if (!sk->dead) wake_up (sk->sleep);
2413 sk->shutdown |= RCV_SHUTDOWN;
2414 }
2415
2416 for (skb2=(struct sk_buff *)skb->next;
2417 skb2 !=(struct sk_buff *) sk->rqueue->next;
2418 skb2=(struct sk_buff *)skb2->next)
2419 {
2420 if (before(skb2->h.th->seq, sk->acked_seq+1))
2421 {
2422 if (after (skb2->h.th->ack_seq, sk->acked_seq))
2423 sk->acked_seq = skb2->h.th->ack_seq;
2424 skb2->acked = 1;
2425
2426
2427 if (skb2->h.th->fin)
2428 {
2429 sk->shutdown |= RCV_SHUTDOWN;
2430 if (!sk->dead) wake_up (sk->sleep);
2431 }
2432
2433
2434 sk->ack_backlog = sk->max_ack_backlog;
2435 }
2436 else
2437 {
2438 break;
2439 }
2440 }
2441
2442
2443
2444
2445 if (!sk->delay_acks ||
2446 sk->ack_backlog >= sk->max_ack_backlog ||
2447 sk->bytes_rcv > sk->max_unacked ||
2448 th->fin)
2449 {
2450 tcp_send_ack (sk->send_seq, sk->acked_seq,sk,th, saddr);
2451 }
2452 else
2453 {
2454 sk->ack_backlog++;
2455 sk->time_wait.len = TCP_ACK_TIME;
2456 sk->timeout = TIME_WRITE;
2457 reset_timer ((struct timer *)&sk->time_wait);
2458 sk->retransmits = 0;
2459 }
2460 }
2461 }
2462 else
2463 {
2464
2465 tcp_send_ack (sk->send_seq, sk->acked_seq, sk, th, saddr);
2466 }
2467
2468
2469 if (!sk->dead)
2470 {
2471 wake_up (sk->sleep);
2472 }
2473 else
2474 {
2475 PRINTK (("data received on dead socket. \n"));
2476 }
2477
2478 if (sk->state == TCP_FIN_WAIT2 && sk->acked_seq == sk->fin_seq
2479 && sk->rcv_ack_seq == sk->send_seq)
2480 {
2481 PRINTK (("tcp_data: entering last_ack state sk = %X\n", sk));
2482
2483 tcp_send_ack (sk->send_seq, sk->acked_seq, sk, th, saddr);
2484 sk->shutdown = SHUTDOWN_MASK;
2485 sk->state = TCP_LAST_ACK;
2486 if (!sk->dead) wake_up (sk->sleep);
2487 }
2488
2489 return (0);
2490 }
2491
2492 static int
2493 tcp_urg (volatile struct sock *sk, struct tcp_header *th, unsigned long saddr)
2494 {
2495 extern int kill_pg (int pg, int sig, int priv);
2496 extern int kill_proc (int pid, int sig, int priv);
2497
2498 if (!sk->dead)
2499 wake_up(sk->sleep);
2500
2501 if (sk->urginline)
2502 {
2503 th->urg = 0;
2504 th->psh = 1;
2505 return (0);
2506 }
2507
2508 if (!sk->urg)
2509 {
2510
2511
2512 if (sk->proc != 0)
2513 {
2514 if (sk->proc > 0)
2515 {
2516 kill_proc (sk->proc, SIGURG, 1);
2517 }
2518 else
2519 {
2520 kill_pg (-sk->proc, SIGURG, 1);
2521 }
2522 }
2523 }
2524 sk->urg++;
2525 return (0);
2526 }
2527
2528
2529 static int
2530 tcp_fin (volatile struct sock *sk, struct tcp_header *th,
2531 unsigned long saddr, struct device *dev)
2532 {
2533 PRINTK (("tcp_fin (sk=%X, th=%X, saddr=%X, dev=%X)\n",
2534 sk, th, saddr, dev));
2535
2536 if (!sk->dead)
2537 {
2538 wake_up (sk->sleep);
2539 }
2540
2541 switch (sk->state)
2542 {
2543 case TCP_SYN_RECV:
2544 case TCP_SYN_SENT:
2545 case TCP_ESTABLISHED:
2546 sk->fin_seq = th->seq+1;
2547 sk->state = TCP_CLOSE_WAIT;
2548 if (th->rst) sk->shutdown = SHUTDOWN_MASK;
2549 break;
2550
2551 case TCP_CLOSE_WAIT:
2552 case TCP_FIN_WAIT2:
2553 break;
2554
2555 case TCP_FIN_WAIT1:
2556 sk->fin_seq = th->seq+1;
2557 sk->state = TCP_FIN_WAIT2;
2558 break;
2559
2560 default:
2561 case TCP_TIME_WAIT:
2562 sk->state = TCP_LAST_ACK;
2563
2564 sk->time_wait.len = TCP_TIMEWAIT_LEN;
2565 sk->timeout = TIME_CLOSE;
2566 reset_timer ((struct timer *)&sk->time_wait);
2567 return (0);
2568
2569 }
2570
2571
2572 sk->ack_backlog ++;
2573
2574 #if 0
2575
2576 buff=sk->prot->wmalloc(sk,MAX_ACK_SIZE,1, GFP_ATOMIC);
2577 if (buff == NULL)
2578 {
2579
2580 return (1);
2581 }
2582
2583 buff->mem_addr = buff;
2584 buff->mem_len = MAX_ACK_SIZE;
2585 buff->len=sizeof (struct tcp_header);
2586 buff->sk = sk;
2587
2588 t1 = (struct tcp_header *)(buff + 1);
2589
2590 tmp = sk->prot->build_header (buff, sk->saddr, sk->daddr, &dev,
2591 IPPROTO_TCP, sk->opt, MAX_ACK_SIZE);
2592 if (tmp < 0)
2593 {
2594 sk->prot->wfree(sk, buff->mem_addr, buff->mem_len);
2595 return (0);
2596 }
2597
2598 buff->len += tmp;
2599 t1 = (struct tcp_header *)((char *)t1 +tmp);
2600
2601 memcpy (t1, th, sizeof (*t1));
2602
2603
2604 t1->dest = th->source;
2605 t1->source = th->dest;
2606
2607
2608 t1->seq = net32(sk->send_seq);
2609
2610
2611
2612
2613 buff->h.seq = sk->send_seq;
2614 t1->window = net16(sk->prot->rspace(sk));
2615
2616 t1->res1=0;
2617 t1->res2=0;
2618 t1->rst = 0;
2619 t1->urg = 0;
2620 t1->syn = 0;
2621 t1->psh = 0;
2622 t1->ack = 1;
2623 t1->fin = 0;
2624 t1->ack_seq = net32(sk->acked_seq);
2625
2626 t1->doff = sizeof (*t1)/4;
2627 tcp_send_check (t1, sk->saddr, sk->daddr, sizeof (*t1), sk);
2628
2629
2630
2631 if (sk->wback != NULL)
2632 {
2633 buff->next = NULL;
2634 sk->wback->next = buff;
2635 sk->wback = buff;
2636 buff->magic = TCP_WRITE_QUEUE_MAGIC;
2637 }
2638 else
2639 {
2640 sk->prot->queue_xmit (sk, dev, buff,0);
2641 }
2642 #endif
2643 return (0);
2644 }
2645
2646
2647
2648
2649 static volatile struct sock *
2650 tcp_accept (volatile struct sock *sk, int flags)
2651 {
2652 volatile struct sock *newsk;
2653 struct sk_buff *skb;
2654
2655 PRINTK (("tcp_accept(sk=%X, flags=%X)\n", sk, flags));
2656
2657
2658
2659 if (sk->state != TCP_LISTEN)
2660 {
2661 sk->err = EINVAL;
2662 return (NULL);
2663 }
2664
2665
2666 sk->inuse = 1;
2667 cli();
2668 while ( (skb = get_firstr(sk)) == NULL )
2669 {
2670 if (flags & O_NONBLOCK)
2671 {
2672 sti();
2673 release_sock (sk);
2674 sk->err = EAGAIN;
2675 return (NULL);
2676 }
2677
2678 release_sock (sk);
2679 interruptible_sleep_on (sk->sleep);
2680 if (current->signal & ~current->blocked)
2681 {
2682 sti();
2683 sk->err = ERESTARTSYS;
2684 return (NULL);
2685 }
2686
2687 sk->inuse = 1;
2688 }
2689 sti();
2690
2691
2692 newsk = skb->sk;
2693
2694 kfree_skb (skb, FREE_READ);
2695 sk->ack_backlog--;
2696 release_sock (sk);
2697 return (newsk);
2698 }
2699
2700
2701
2702
2703 static int
2704 tcp_connect (volatile struct sock *sk, struct sockaddr_in *usin, int addr_len)
2705 {
2706 struct sk_buff *buff;
2707 struct sockaddr_in sin;
2708 struct device *dev=NULL;
2709 unsigned char *ptr;
2710 int tmp;
2711 struct tcp_header *t1;
2712 if (sk->state != TCP_CLOSE) return (-EISCONN);
2713 if (addr_len < 8) return (-EINVAL);
2714
2715
2716 memcpy_fromfs (&sin,usin, min(sizeof (sin), addr_len));
2717
2718 if (sin.sin_family && sin.sin_family != AF_INET) return (-EAFNOSUPPORT);
2719 sk->inuse = 1;
2720 sk->daddr = sin.sin_addr.s_addr;
2721 sk->send_seq = timer_seq*SEQ_TICK-seq_offset;
2722 sk->rcv_ack_seq = sk->send_seq -1;
2723 sk->err = 0;
2724 sk->dummy_th.dest = sin.sin_port;
2725 release_sock (sk);
2726
2727 buff=sk->prot->wmalloc(sk,MAX_SYN_SIZE,0, GFP_KERNEL);
2728 if (buff == NULL)
2729 {
2730 return (-ENOMEM);
2731 }
2732 sk->inuse = 1;
2733 buff->lock = 0;
2734 buff->mem_addr = buff;
2735 buff->mem_len = MAX_SYN_SIZE;
2736 buff->len=24;
2737 buff->sk = sk;
2738 t1=(struct tcp_header *)(buff + 1);
2739
2740
2741
2742 tmp = sk->prot->build_header (buff, sk->saddr, sk->daddr, &dev,
2743 IPPROTO_TCP, NULL, MAX_SYN_SIZE);
2744 if (tmp < 0)
2745 {
2746 sk->prot->wfree(sk, buff->mem_addr, buff->mem_len);
2747 release_sock (sk);
2748 return (-ENETUNREACH);
2749 }
2750 buff->len += tmp;
2751 t1 = (struct tcp_header *)((char *)t1 +tmp);
2752
2753 memcpy (t1, (void *)&(sk->dummy_th), sizeof (*t1));
2754 t1->seq = net32(sk->send_seq++);
2755 buff->h.seq = sk->send_seq;
2756 t1->ack = 0;
2757 t1->window = 2;
2758 t1->res1=0;
2759 t1->res2=0;
2760 t1->rst = 0;
2761 t1->urg = 0;
2762 t1->psh = 0;
2763 t1->syn = 1;
2764 t1->urg_ptr = 0;
2765 t1->doff =6;
2766
2767 ptr=(unsigned char *)(t1+1);
2768 ptr[0]=2;
2769 ptr[1]=4;
2770 ptr[2]=(dev->mtu- HEADER_SIZE) >> 8;
2771 ptr[3]=(dev->mtu- HEADER_SIZE) & 0xff;
2772 sk->mtu = dev->mtu - HEADER_SIZE;
2773 tcp_send_check (t1, sk->saddr, sk->daddr,
2774 sizeof (struct tcp_header) + 4, sk);
2775
2776
2777 sk->state = TCP_SYN_SENT;
2778
2779 sk->prot->queue_xmit(sk, dev, buff, 0);
2780
2781 sk->time_wait.len = TCP_CONNECT_TIME;
2782 sk->rtt = TCP_CONNECT_TIME;
2783 reset_timer ((struct timer *)&sk->time_wait);
2784 sk->retransmits = TCP_RETR2 - TCP_SYN_RETRIES;
2785 release_sock (sk);
2786 return (0);
2787 }
2788
2789
2790
2791
2792
2793 static int
2794 tcp_sequence (volatile struct sock *sk, struct tcp_header *th, short len,
2795 struct options *opt, unsigned long saddr)
2796 {
2797
2798
2799
2800
2801
2802 PRINTK (("tcp_sequence (sk=%X, th=%X, len = %d, opt=%d, saddr=%X)\n",
2803 sk, th, len, opt, saddr));
2804
2805 if (between(th->seq, sk->acked_seq, sk->acked_seq + sk->window)||
2806 between(th->seq + len-sizeof (*th), sk->acked_seq,
2807 sk->acked_seq + sk->window) ||
2808 (before (th->seq, sk->acked_seq) &&
2809 after (th->seq + len - sizeof (*th), sk->acked_seq + sk->window)))
2810 {
2811 return (1);
2812 }
2813
2814 PRINTK (("tcp_sequence: rejecting packet. \n"));
2815
2816
2817
2818 if (after (th->seq, sk->acked_seq + sk->window))
2819 {
2820 tcp_send_ack (sk->send_seq, sk->acked_seq, sk, th, saddr);
2821 return (0);
2822 }
2823
2824
2825 if (th->ack && len == th->doff*4 && after (th->seq, sk->acked_seq - 32767) &&
2826 !th->fin && !th->syn) return (1);
2827
2828 if (!th->rst)
2829 {
2830
2831 tcp_send_ack (net32(th->ack_seq), sk->acked_seq, sk, th, saddr);
2832 }
2833
2834
2835 return (0);
2836 }
2837
2838
2839 static void
2840 tcp_options (volatile struct sock *sk, struct tcp_header *th)
2841 {
2842 unsigned char *ptr;
2843 ptr = (unsigned char *)(th + 1);
2844 if (ptr[0] != 2 || ptr[1] != 4)
2845 {
2846 sk->mtu = min (sk->mtu, 576-HEADER_SIZE);
2847 return;
2848 }
2849 sk->mtu = min (sk->mtu, ptr[2]*256 + ptr[3] - HEADER_SIZE);
2850 }
2851
2852 int
2853 tcp_rcv(struct sk_buff *skb, struct device *dev, struct options *opt,
2854 unsigned long daddr, unsigned short len,
2855 unsigned long saddr, int redo, struct ip_protocol * protocol)
2856 {
2857 struct tcp_header *th;
2858 volatile struct sock *sk;
2859
2860 if (!skb)
2861 {
2862 PRINTK (("tcp.c: tcp_rcv skb = NULL\n"));
2863 return (0);
2864 }
2865 #if 0
2866 if (!protocol)
2867 {
2868 PRINTK (("tcp.c: tcp_rcv protocol = NULL\n"));
2869 return (0);
2870 }
2871
2872 if (!opt)
2873 {
2874 PRINTK (("tcp.c: tcp_rcv opt = NULL\n"));
2875 }
2876 #endif
2877 if (!dev)
2878 {
2879 PRINTK (("tcp.c: tcp_rcv dev = NULL\n"));
2880 return (0);
2881 }
2882
2883 th = skb->h.th;
2884
2885
2886 sk=get_sock(&tcp_prot, net16(th->dest), saddr, th->source, daddr);
2887 PRINTK(("<<\n"));
2888 PRINTK(("len = %d, redo = %d, skb=%X\n", len, redo, skb));
2889
2890 if (sk)
2891 {
2892 PRINTK (("sk = %X:\n",sk));
2893 }
2894
2895 if (!redo)
2896 {
2897 if (th->check && tcp_check (th, len, saddr, daddr ))
2898 {
2899 skb->sk = NULL;
2900 PRINTK (("packet dropped with bad checksum.\n"));
2901 kfree_skb (skb, 0);
2902
2903
2904 return (0);
2905 }
2906
2907
2908 if (sk == NULL)
2909 {
2910 if (!th->rst)
2911 tcp_reset (daddr, saddr, th, &tcp_prot, opt,dev);
2912 skb->sk = NULL;
2913 kfree_skb (skb, 0);
2914 return (0);
2915 }
2916
2917 skb->len = len;
2918 skb->sk = sk;
2919 skb->acked = 0;
2920 skb->used = 0;
2921 skb->free = 0;
2922 skb->urg_used = 0;
2923 skb->saddr = daddr;
2924 skb->daddr = saddr;
2925
2926 th->seq = net32(th->seq);
2927
2928 cli();
2929
2930
2931 if (sk->inuse)
2932 {
2933 if (sk->back_log == NULL)
2934 {
2935 sk->back_log = skb;
2936 skb->next = skb;
2937 skb->prev = skb;
2938 }
2939 else
2940 {
2941 skb->next = sk->back_log;
2942 skb->prev = sk->back_log->prev;
2943 skb->prev->next = skb;
2944 skb->next->prev = skb;
2945 }
2946 sti();
2947 return (0);
2948 }
2949 sk->inuse = 1;
2950 sti();
2951 }
2952 else
2953 {
2954 if (!sk)
2955 {
2956 PRINTK (("tcp.c: tcp_rcv bug sk=NULL redo = 1\n"));
2957 return (0);
2958 }
2959 }
2960
2961 if (!sk->prot)
2962 {
2963 PRINTK (("tcp.c: tcp_rcv sk->prot = NULL \n"));
2964 return (0);
2965 }
2966
2967
2968 if (sk->rmem_alloc + skb->mem_len >= SK_RMEM_MAX)
2969 {
2970 skb->sk = NULL;
2971 PRINTK (("dropping packet due to lack of buffer space.\n"));
2972 kfree_skb (skb, 0);
2973 release_sock (sk);
2974 return (0);
2975 }
2976
2977 sk->rmem_alloc += skb->mem_len;
2978
2979 PRINTK (("About to do switch. \n"));
2980
2981
2982
2983 switch (sk->state)
2984 {
2985
2986
2987 case TCP_LAST_ACK:
2988 if (th->rst)
2989 {
2990 sk->err = ECONNRESET;
2991 sk->state = TCP_CLOSE;
2992 sk->shutdown = SHUTDOWN_MASK;
2993 if (!sk->dead)
2994 {
2995 wake_up (sk->sleep);
2996 }
2997 kfree_skb (skb, FREE_READ);
2998 release_sock(sk);
2999 return (0);
3000 }
3001
3002 case TCP_ESTABLISHED:
3003 case TCP_CLOSE_WAIT:
3004 case TCP_FIN_WAIT1:
3005 case TCP_FIN_WAIT2:
3006 case TCP_TIME_WAIT:
3007
3008 if (!tcp_sequence (sk, th, len, opt, saddr))
3009 {
3010 kfree_skb (skb, FREE_READ);
3011 release_sock(sk);
3012 return (0);
3013 }
3014
3015 if (th->rst)
3016 {
3017
3018 sk->err = ECONNRESET;
3019
3020 if (sk->state == TCP_CLOSE_WAIT)
3021 {
3022 sk->err = EPIPE;
3023 }
3024
3025
3026
3027 if (!th->fin)
3028 {
3029 sk->state = TCP_CLOSE;
3030 sk->shutdown = SHUTDOWN_MASK;
3031 if (!sk->dead)
3032 {
3033 wake_up (sk->sleep);
3034 }
3035 kfree_skb (skb, FREE_READ);
3036 release_sock(sk);
3037 return (0);
3038 }
3039 }
3040 #if 0
3041 if (opt && (opt->security != 0 || opt->compartment != 0 || th->syn))
3042 {
3043 sk->err = ECONNRESET;
3044 sk->state = TCP_CLOSE;
3045 sk->shutdown = SHUTDOWN_MASK;
3046 tcp_reset (daddr, saddr, th, sk->prot, opt,dev);
3047 if (!sk->dead)
3048 {
3049 wake_up (sk->sleep);
3050 }
3051 kfree_skb (skb, FREE_READ);
3052 release_sock(sk);
3053 return (0);
3054 }
3055 #endif
3056 if (th->ack)
3057 {
3058 if(!tcp_ack (sk, th, saddr))
3059 {
3060 kfree_skb (skb, FREE_READ);
3061 release_sock(sk);
3062 return (0);
3063 }
3064 }
3065 if (th->urg)
3066 {
3067 if (tcp_urg (sk, th, saddr))
3068 {
3069 kfree_skb (skb, FREE_READ);
3070 release_sock(sk);
3071 return (0);
3072 }
3073 }
3074
3075 if (th->fin && tcp_fin (sk, th, saddr, dev))
3076 {
3077 kfree_skb (skb, FREE_READ);
3078 release_sock(sk);
3079 return (0);
3080 }
3081
3082 if ( tcp_data (skb, sk, saddr, len))
3083 {
3084 kfree_skb (skb, FREE_READ);
3085 release_sock(sk);
3086 return (0);
3087 }
3088
3089 release_sock(sk);
3090 return (0);
3091
3092 case TCP_CLOSE:
3093
3094 if (sk->dead || sk->daddr)
3095 {
3096 PRINTK (("packet received for closed,dead socket\n"));
3097 kfree_skb (skb, FREE_READ);
3098 release_sock (sk);
3099 return (0);
3100 }
3101
3102 if (!th->rst)
3103 {
3104 if (!th->ack)
3105 th->ack_seq=0;
3106 tcp_reset (daddr, saddr, th, sk->prot, opt,dev);
3107 }
3108 kfree_skb (skb, FREE_READ);
3109 release_sock(sk);
3110 return (0);
3111
3112 case TCP_LISTEN:
3113 if (th->rst)
3114 {
3115 kfree_skb (skb, FREE_READ);
3116 release_sock(sk);
3117 return (0);
3118 }
3119 if (th->ack)
3120 {
3121 tcp_reset (daddr, saddr, th, sk->prot, opt,dev );
3122 kfree_skb (skb, FREE_READ);
3123 release_sock(sk);
3124 return (0);
3125 }
3126
3127 if (th->syn)
3128 {
3129
3130
3131
3132
3133
3134
3135
3136
3137
3138
3139
3140
3141 tcp_conn_request (sk, skb, daddr, saddr, opt, dev);
3142
3143 release_sock(sk);
3144 return (0);
3145 }
3146
3147 kfree_skb (skb, FREE_READ);
3148 release_sock(sk);
3149 return (0);
3150
3151 default:
3152 if (!tcp_sequence (sk, th, len, opt, saddr))
3153 {
3154 kfree_skb (skb, FREE_READ);
3155 release_sock(sk);
3156 return (0);
3157 }
3158
3159 case TCP_SYN_SENT:
3160 if (th->rst)
3161 {
3162 sk->err = ECONNREFUSED ;
3163 sk->state = TCP_CLOSE;
3164 sk->shutdown = SHUTDOWN_MASK;
3165 if (!sk->dead)
3166 {
3167 wake_up (sk->sleep);
3168 }
3169 kfree_skb (skb, FREE_READ);
3170 release_sock(sk);
3171 return (0);
3172 }
3173 #if 0
3174 if (opt->security != 0 || opt->compartment != 0 )
3175 {
3176 sk->err = ECONNRESET;
3177 sk->state = TCP_CLOSE;
3178 sk->shutdown = SHUTDOWN_MASK;
3179 tcp_reset (daddr, saddr, th, sk->prot, opt, dev);
3180 if (!sk->dead)
3181 {
3182 wake_up (sk->sleep);
3183 }
3184 kfree_skb (skb, FREE_READ);
3185 release_sock(sk);
3186 return (0);
3187 } */
3188 #endif
3189 if (!th->ack)
3190 {
3191 if (th->syn)
3192 {
3193 sk->state = TCP_SYN_RECV;
3194 }
3195
3196 kfree_skb (skb, FREE_READ);
3197 release_sock(sk);
3198 return (0);
3199 }
3200
3201 switch (sk->state)
3202 {
3203 case TCP_SYN_SENT:
3204 if (!tcp_ack(sk, th, saddr))
3205 {
3206 tcp_reset(daddr, saddr, th, sk->prot, opt,dev);
3207 kfree_skb (skb, FREE_READ);
3208 release_sock(sk);
3209 return (0);
3210 }
3211
3212
3213
3214
3215 if (!th->syn)
3216 {
3217 kfree_skb (skb, FREE_READ);
3218 release_sock (sk);
3219 return (0);
3220 }
3221
3222
3223 sk->acked_seq = th->seq+1;
3224 sk->fin_seq = th->seq;
3225 tcp_send_ack (sk->send_seq, th->seq+1, sk,
3226 th, sk->daddr);
3227
3228 case TCP_SYN_RECV:
3229 if (!tcp_ack(sk, th, saddr))
3230 {
3231 tcp_reset(daddr, saddr, th, sk->prot, opt, dev);
3232 kfree_skb (skb, FREE_READ);
3233 release_sock(sk);
3234 return (0);
3235 }
3236
3237 sk->state = TCP_ESTABLISHED;
3238
3239
3240
3241
3242 tcp_options(sk, th);
3243 sk->dummy_th.dest = th->source;
3244 sk->copied_seq = sk->acked_seq-1;
3245 if (!sk->dead)
3246 {
3247 wake_up (sk->sleep);
3248 }
3249
3250
3251
3252 if (th->urg)
3253 if (tcp_urg (sk, th, saddr))
3254 {
3255 kfree_skb (skb, FREE_READ);
3256 release_sock(sk);
3257 return (0);
3258 }
3259 if (tcp_data (skb, sk, saddr, len))
3260 kfree_skb (skb, FREE_READ);
3261
3262 if (th->fin)
3263 tcp_fin(sk, th, saddr, dev);
3264
3265 release_sock(sk);
3266 return (0);
3267 }
3268
3269 if (th->urg)
3270 {
3271 if (tcp_urg (sk, th, saddr))
3272 {
3273 kfree_skb (skb, FREE_READ);
3274 release_sock (sk);
3275 return (0);
3276 }
3277 }
3278
3279 if (tcp_data (skb, sk, saddr, len))
3280 {
3281 kfree_skb (skb, FREE_READ);
3282 release_sock (sk);
3283 return (0);
3284 }
3285
3286 if (!th->fin)
3287 {
3288 release_sock(sk);
3289 return (0);
3290 }
3291 tcp_fin (sk, th, saddr, dev);
3292 release_sock(sk);
3293 return (0);
3294 }
3295 }
3296
3297
3298
3299
3300
3301 static void
3302 tcp_write_wakeup(volatile struct sock *sk)
3303 {
3304 struct sk_buff *buff;
3305 struct tcp_header *t1;
3306 struct device *dev=NULL;
3307 int tmp;
3308 if (sk -> state != TCP_ESTABLISHED && sk->state != TCP_CLOSE_WAIT) return;
3309
3310 buff=sk->prot->wmalloc(sk,MAX_ACK_SIZE,1, GFP_ATOMIC);
3311
3312 if (buff == NULL) return;
3313
3314 buff->lock = 0;
3315 buff->mem_addr = buff;
3316 buff->mem_len = MAX_ACK_SIZE;
3317 buff->len=sizeof (struct tcp_header);
3318 buff->free = 1;
3319 buff->sk = sk;
3320 PRINTK (("in tcp_write_wakeup\n"));
3321 t1=(struct tcp_header *)(buff + 1);
3322
3323
3324 tmp = sk->prot->build_header (buff, sk->saddr, sk->daddr, &dev,
3325 IPPROTO_TCP, sk->opt, MAX_ACK_SIZE);
3326 if (tmp < 0)
3327 {
3328 sk->prot->wfree(sk, buff->mem_addr, buff->mem_len);
3329 return;
3330 }
3331
3332 buff->len += tmp;
3333 t1 = (struct tcp_header *)((char *)t1 +tmp);
3334
3335 memcpy (t1,(void *) &sk->dummy_th, sizeof (*t1));
3336
3337
3338
3339 t1->seq = net32(sk->send_seq-1);
3340 t1->ack = 1;
3341 t1->res1= 0;
3342 t1->res2= 0;
3343 t1->rst = 0;
3344 t1->urg = 0;
3345 t1->psh = 0;
3346 t1->fin = 0;
3347 t1->syn = 0;
3348 t1->ack_seq = net32(sk->acked_seq);
3349 t1->window = net16(sk->prot->rspace(sk));
3350 t1->doff = sizeof (*t1)/4;
3351 tcp_send_check (t1, sk->saddr, sk->daddr, sizeof (*t1), sk);
3352
3353
3354 sk->prot->queue_xmit(sk, dev, buff, 1);
3355
3356 }
3357
3358 struct proto tcp_prot =
3359 {
3360 sock_wmalloc,
3361 sock_rmalloc,
3362 sock_wfree,
3363 sock_rfree,
3364 sock_rspace,
3365 sock_wspace,
3366 tcp_close,
3367 tcp_read,
3368 tcp_write,
3369 tcp_sendto,
3370 tcp_recvfrom,
3371 ip_build_header,
3372 tcp_connect,
3373 tcp_accept,
3374 ip_queue_xmit,
3375 tcp_retransmit,
3376 tcp_write_wakeup,
3377 tcp_read_wakeup,
3378 tcp_rcv,
3379 tcp_select,
3380 tcp_ioctl,
3381 NULL,
3382 tcp_shutdown,
3383 128,
3384 0,
3385 {NULL,},
3386 "TCP"
3387 };
3388
3389
3390
3391