This source file includes following definitions.
- min
- print_th
- get_firstr
- diff
- tcp_time_wait
- tcp_retransmit
- tcp_err
- tcp_readable
- tcp_select
- tcp_ioctl
- tcp_check
- tcp_send_check
- tcp_send_partial
- tcp_send_ack
- tcp_build_header
- tcp_write
- tcp_sendto
- tcp_read_wakeup
- cleanup_rbuf
- tcp_read_urg
- tcp_read
- tcp_shutdown
- tcp_recvfrom
- tcp_reset
- tcp_conn_request
- tcp_close
- tcp_write_xmit
- tcp_ack
- tcp_data
- tcp_urg
- tcp_fin
- tcp_accept
- tcp_connect
- tcp_sequence
- tcp_options
- tcp_rcv
- tcp_write_wakeup
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79 #include <linux/types.h>
80 #include <linux/sched.h>
81 #include <linux/mm.h>
82 #include <linux/string.h>
83 #include <linux/socket.h>
84 #include <netinet/in.h>
85 #include <linux/fcntl.h>
86 #include "timer.h"
87 #include "ip.h"
88 #include "icmp.h"
89 #include "tcp.h"
90 #include "sock.h"
91 #include "arp.h"
92 #include <linux/errno.h>
93 #include <linux/timer.h>
94 #include <asm/system.h>
95 #include <asm/segment.h>
96 #include <linux/mm.h>
97
98 #include <linux/termios.h>
99
100 #ifdef PRINTK
101 #undef PRINTK
102 #endif
103
104 #undef TCP_DEBUG
105
106 #ifdef TCP_DEBUG
107 #define PRINTK(x) printk x
108 #else
109 #define PRINTK(x)
110 #endif
111
112 #define tmax(a,b) (before ((a),(b)) ? (b) : (a))
113 #define swap(a,b) {unsigned long c; c=a; a=b; b=c;}
114
115 extern struct proto tcp_prot;
116
117 static int
118 min (unsigned int a, unsigned int b)
119 {
120 if (a < b) return (a);
121 return (b);
122 }
123
124 void
125 print_th (struct tcp_header *th)
126 {
127 unsigned char *ptr;
128 ptr = (unsigned char *)(th + 1);
129 PRINTK (("tcp header:\n"));
130 PRINTK ((" source=%d, dest=%d, seq =%d, ack_seq = %d\n",
131 net16(th->source), net16(th->dest), net32(th->seq),
132 net32(th->ack_seq)));
133 PRINTK ((" fin=%d, syn=%d, rst=%d, psh=%d, ack=%d, urg=%d res1=%d res2=%d\n"
134 ,th->fin, th->syn, th->rst, th->psh, th->ack, th->urg,
135 th->res1, th->res2));
136 PRINTK ((" window = %d, check = %d urg_ptr = %d\n",
137 net16(th->window), net16(th->check), net16(th->urg_ptr)));
138 PRINTK ((" doff = %d\n",th->doff));
139 PRINTK (("options = %d %d %d %d\n", ptr[0], ptr[1], ptr[2], ptr[3]));
140 }
141
142
143 static struct sk_buff *
144 get_firstr(volatile struct sock *sk)
145 {
146 struct sk_buff *skb;
147 skb = sk->rqueue;
148 if (skb == NULL) return (NULL);
149 sk->rqueue = (struct sk_buff *)skb->next;
150 if (sk->rqueue == skb)
151 {
152 sk->rqueue = NULL;
153 }
154 else
155 {
156 sk->rqueue->prev=skb->prev;
157 sk->rqueue->prev->next = sk->rqueue;
158 }
159 return (skb);
160 }
161
162 static long
163 diff (unsigned long seq1, unsigned long seq2)
164 {
165 long d;
166 d=seq1-seq2;
167 if (d > 0) return (d);
168
169 return (~d+1);
170 }
171
172
173 static void
174 tcp_time_wait (volatile struct sock *sk)
175 {
176 sk->state = TCP_TIME_WAIT;
177 sk->shutdown = SHUTDOWN_MASK;
178 if (!sk->dead) wake_up (sk->sleep);
179 sk->time_wait.len = TCP_TIMEWAIT_LEN;
180 sk->timeout = TIME_CLOSE;
181 reset_timer ((struct timer *)&sk->time_wait);
182 }
183
184 static void
185 tcp_retransmit (volatile struct sock *sk, int all)
186 {
187 if (all)
188 {
189 ip_retransmit (sk, all);
190 return;
191 }
192 sk->rtt *= 2;
193 if (sk->cong_window > 1)
194 sk->cong_window = sk->cong_window / 2;
195 sk->exp_growth = 0;
196
197
198 ip_retransmit (sk, all);
199
200 }
201
202
203
204
205
206
207
208
209 void
210 tcp_err (int err, unsigned char *header, unsigned long daddr,
211 unsigned long saddr, struct ip_protocol *protocol)
212 {
213 struct tcp_header *th;
214 volatile struct sock *sk;
215
216 PRINTK (("tcp_err(err=%d, header=%X, daddr=%X saddr=%X, protocol=%X)\n",
217 err, header, daddr, saddr, protocol));
218
219 th = (struct tcp_header *)header;
220 sk = get_sock (&tcp_prot, net16(th->dest), saddr, th->source, daddr);
221 print_th (th);
222
223 if (sk == NULL) return;
224
225 if (err & 0xff00 == (ICMP_SOURCE_QUENCH << 8))
226 {
227
228
229
230 if (sk->cong_window > 1)
231 sk->cong_window --;
232
233 return;
234 }
235
236 PRINTK (("tcp.c: icmp_err got error\n"));
237 sk->err = icmp_err_convert[err & 0xff].errno;
238
239
240 if (icmp_err_convert[err & 0xff].fatal)
241 {
242 if (sk->state == TCP_SYN_SENT)
243 {
244 sk->state = TCP_CLOSE;
245 sk->prot->close(sk, 0);
246 }
247 }
248
249 return;
250
251 }
252
253 static int
254 tcp_readable (volatile struct sock *sk)
255 {
256 unsigned long counted;
257 unsigned long amount;
258 struct sk_buff *skb;
259 int count=0;
260 int sum;
261
262 PRINTK (("tcp_readable (sk=%X)\n", sk));
263
264 if (sk == NULL || sk->rqueue == NULL) return (0);
265
266 counted = sk->copied_seq+1;
267 amount = 0;
268 skb = (struct sk_buff *)sk->rqueue->next;
269
270
271 do {
272 count ++;
273 if (count > 20)
274 {
275 PRINTK (("tcp_readable, more than 20 packets without a psh\n"));
276 PRINTK (("possible read_queue corruption.\n"));
277 return (amount);
278 }
279 if (before (counted, skb->h.th->seq)) break;
280 sum = skb->len - ( counted - skb->h.th->seq);
281 if (skb->h.th->syn) sum ++;
282 if (skb->h.th->urg)
283 {
284 sum -= net16(skb->h.th->urg_ptr);
285 }
286 if (sum >= 0)
287 {
288 amount += sum;
289 if (skb->h.th->syn) amount --;
290 counted += sum;
291 }
292 if (amount && skb->h.th->psh) break;
293 skb = (struct sk_buff *)skb->next;
294 } while (skb != sk->rqueue->next);
295 PRINTK (("tcp readable returning %d bytes\n", amount));
296 return (amount);
297 }
298
299
300 static int
301 tcp_select (volatile struct sock *sk, int sel_type, select_table *wait)
302 {
303 sk->inuse = 1;
304 PRINTK (("tcp_select (sk=%X, sel_type = %d, wait = %X)\n",
305 sk, sel_type, wait));
306 switch (sel_type)
307 {
308 case SEL_IN:
309 select_wait (sk->sleep, wait);
310 if (sk->rqueue != NULL)
311 {
312 if (sk->state == TCP_LISTEN || tcp_readable(sk))
313 {
314 release_sock (sk);
315 return (1);
316 }
317 }
318
319 if (sk->shutdown & RCV_SHUTDOWN)
320 {
321 release_sock (sk);
322 return (1);
323 }
324 else
325 {
326 release_sock (sk);
327 return (0);
328 }
329
330 case SEL_OUT:
331 select_wait (sk->sleep, wait);
332
333 if (sk->shutdown & SEND_SHUTDOWN)
334 {
335 PRINTK (("write select on shutdown socket.\n"));
336
337 release_sock (sk);
338 return (0);
339 }
340
341
342
343 if (sk->prot->wspace(sk) >= sk->mtu)
344 {
345 release_sock (sk);
346
347 if (sk->state == TCP_SYN_RECV || sk->state == TCP_SYN_SENT)
348 return (0);
349 return (1);
350 }
351
352 PRINTK (("tcp_select: sleeping on write sk->wmem_alloc = %d, "
353 "sk->packets_out = %d\n"
354 "sk->wback = %X, sk->wfront = %X\n"
355 "sk->send_seq = %u, sk->window_seq=%u\n",
356 sk->wmem_alloc, sk->packets_out,
357 sk->wback, sk->wfront,
358 sk->send_seq, sk->window_seq));
359
360 release_sock (sk);
361 return (0);
362
363
364 case SEL_EX:
365 select_wait(sk->sleep,wait);
366 if (sk->err)
367 {
368 release_sock (sk);
369 return (1);
370 }
371 release_sock (sk);
372 return (0);
373 }
374
375 release_sock (sk);
376 return (0);
377 }
378
379 static int
380 tcp_ioctl (volatile struct sock *sk, int cmd, unsigned long arg)
381 {
382 PRINTK (("tcp_ioctl (sk=%X, cmd = %d, arg=%X)\n", sk, cmd, arg));
383 switch (cmd)
384 {
385 default:
386 return (-EINVAL);
387
388 case TIOCINQ:
389
390 {
391 unsigned long amount;
392
393 if (sk->state == TCP_LISTEN)
394 return (-EINVAL);
395
396 amount = 0;
397 sk->inuse = 1;
398 if (sk->rqueue != NULL)
399 {
400 amount = tcp_readable(sk);
401 }
402 release_sock (sk);
403 PRINTK (("returning %d\n", amount));
404 verify_area (VERIFY_WRITE, (void *)arg, sizeof (unsigned long));
405 put_fs_long (amount, (unsigned long *)arg);
406 return (0);
407 }
408
409 case SIOCATMARK:
410 {
411 struct sk_buff *skb;
412 int answ=0;
413
414 sk->inuse = 1;
415 if (sk->rqueue != NULL)
416 {
417 skb = (struct sk_buff *)sk->rqueue->next;
418 if (sk->copied_seq+1 == skb->h.th->seq && skb->h.th->urg)
419 answ = 1;
420 }
421 release_sock (sk);
422 verify_area (VERIFY_WRITE, (void *) arg, sizeof (unsigned long));
423 put_fs_long (answ, (void *) arg);
424 return (0);
425 }
426
427 case TIOCOUTQ:
428 {
429 unsigned long amount;
430 if (sk->state == TCP_LISTEN)
431 return (-EINVAL);
432 amount = sk->prot->wspace(sk)/2;
433 verify_area (VERIFY_WRITE, (void *)arg, sizeof (unsigned long));
434 put_fs_long (amount, (unsigned long *)arg);
435 return (0);
436 }
437
438 }
439 }
440
441
442
443 static unsigned short
444 tcp_check (struct tcp_header *th, int len, unsigned long saddr,
445 unsigned long daddr)
446 {
447 unsigned long sum;
448
449 if (saddr == 0) saddr = MY_IP_ADDR;
450 print_th (th);
451 __asm__("\t addl %%ecx,%%ebx\n"
452 "\t adcl %%edx,%%ebx\n"
453 "\t adcl $0, %%ebx\n"
454 : "=b" (sum)
455 : "0" (daddr), "c" (saddr), "d" ((net16(len) << 16) + IPPROTO_TCP*256)
456 : "cx","bx","dx" );
457
458 if (len > 3)
459 {
460 __asm__(
461 "\tclc\n"
462 "1:\n"
463 "\t lodsl\n"
464 "\t adcl %%eax, %%ebx\n"
465 "\t loop 1b\n"
466 "\t adcl $0, %%ebx\n"
467 : "=b" (sum) , "=S" (th)
468 : "0" (sum), "c" (len/4) ,"1" (th)
469 : "ax", "cx", "bx", "si" );
470 }
471
472
473 __asm__(
474 "\t movl %%ebx, %%ecx\n"
475 "\t shrl $16,%%ecx\n"
476 "\t addw %%cx, %%bx\n"
477 "\t adcw $0, %%bx\n"
478 : "=b" (sum)
479 : "0" (sum)
480 : "bx", "cx");
481
482
483 if ((len & 2) != 0)
484 {
485 __asm__("\t lodsw\n"
486 "\t addw %%ax,%%bx\n"
487 "\t adcw $0, %%bx\n"
488 : "=b" (sum), "=S" (th)
489 : "0" (sum) ,"1" (th)
490 : "si", "ax", "bx");
491 }
492
493
494 if ((len & 1) != 0)
495 {
496 __asm__("\t lodsb\n"
497 "\t movb $0,%%ah\n"
498 "\t addw %%ax,%%bx\n"
499 "\t adcw $0, %%bx\n"
500 : "=b" (sum)
501 : "0" (sum) ,"S" (th)
502 : "si", "ax", "bx");
503 }
504
505
506
507 return ((~sum) & 0xffff);
508 }
509
510
511 static void
512 tcp_send_check (struct tcp_header *th, unsigned long saddr,
513 unsigned long daddr, int len, volatile struct sock *sk)
514 {
515
516 th->check = 0;
517 if (sk && sk->no_check) return;
518 th->check = tcp_check (th, len, saddr, daddr);
519 return;
520 }
521
522 static void
523 tcp_send_partial(volatile struct sock *sk)
524 {
525 struct sk_buff *skb;
526
527 if (sk == NULL || sk->send_tmp == NULL) return;
528
529 skb = sk->send_tmp;
530
531 tcp_send_check (skb->h.th, sk->saddr, sk->daddr,
532 skb->len-(unsigned long)skb->h.th +
533 (unsigned long)(skb+1), sk);
534
535 skb->h.seq = sk->send_seq;
536 if (after (sk->send_seq , sk->window_seq) ||
537 sk->packets_out >= sk->cong_window)
538 {
539 PRINTK (("sk->cong_window = %d, sk->packets_out = %d\n",
540 sk->cong_window, sk->packets_out));
541 PRINTK (("sk->send_seq = %d, sk->window_seq = %d\n",
542 sk->send_seq, sk->window_seq));
543 skb->next = NULL;
544 skb->magic = TCP_WRITE_QUEUE_MAGIC;
545 if (sk->wback == NULL)
546 {
547 sk->wfront=skb;
548 }
549 else
550 {
551 sk->wback->next = skb;
552 }
553 sk->wback = skb;
554 }
555 else
556 {
557 sk->prot->queue_xmit (sk, skb->dev, skb,0);
558 }
559 sk->send_tmp = NULL;
560 }
561
562
563
564
565 static void
566 tcp_send_ack (unsigned long sequence, unsigned long ack,
567 volatile struct sock *sk,
568 struct tcp_header *th, unsigned long daddr)
569 {
570 struct sk_buff *buff;
571 struct tcp_header *t1;
572 struct device *dev=NULL;
573 int tmp;
574
575
576
577
578 buff=sk->prot->wmalloc(sk,MAX_ACK_SIZE,1, GFP_ATOMIC);
579 if (buff == NULL)
580 {
581
582 sk->ack_backlog++;
583 if (sk->timeout != TIME_WRITE && tcp_connected (sk->state))
584 {
585 sk->timeout = TIME_WRITE;
586 sk->time_wait.len = 10;
587 reset_timer ((struct timer *)&sk->time_wait);
588 }
589 return;
590 }
591
592 buff->mem_addr = buff;
593 buff->mem_len = MAX_ACK_SIZE;
594 buff->lock = 0;
595 buff->len=sizeof (struct tcp_header);
596 buff->sk = sk;
597 t1 = (struct tcp_header *)(buff + 1);
598
599 tmp = sk->prot->build_header (buff, sk->saddr, daddr, &dev,
600 IPPROTO_TCP, sk->opt, MAX_ACK_SIZE);
601 if (tmp < 0)
602 {
603 sk->prot->wfree(sk, buff->mem_addr, buff->mem_len);
604 return;
605 }
606 buff->len += tmp;
607 t1 = (struct tcp_header *)((char *)t1 +tmp);
608
609 memcpy (t1, th, sizeof (*t1));
610
611
612 t1->dest = th->source;
613 t1->source = th->dest;
614 t1->seq = net32(sequence);
615 t1->ack = 1;
616 sk->window = sk->prot->rspace(sk);
617 t1->window = net16(sk->window);
618 t1->res1=0;
619 t1->res2=0;
620 t1->rst = 0;
621 t1->urg = 0;
622 t1->syn = 0;
623 t1->psh = 0;
624 t1->fin = 0;
625 if (ack == sk->acked_seq)
626 {
627 sk->ack_backlog = 0;
628 sk->bytes_rcv = 0;
629 sk->ack_timed = 0;
630 if (sk->send_head == NULL &&
631 sk->wfront == NULL)
632 {
633 delete_timer((struct timer *)&sk->time_wait);
634 sk->timeout = 0;
635 }
636
637 }
638 t1->ack_seq = net32(ack);
639 t1->doff = sizeof (*t1)/4;
640 tcp_send_check (t1, sk->saddr, daddr, sizeof (*t1), sk);
641 sk->prot->queue_xmit(sk, dev, buff, 1);
642 }
643
644
645 static int
646 tcp_build_header(struct tcp_header *th, volatile struct sock *sk, int push)
647 {
648
649
650 memcpy (th,(void *) &(sk->dummy_th), sizeof (*th));
651 th->seq = net32(sk->send_seq);
652 th->psh = (push == 0) ? 1 : 0;
653 th->doff = sizeof (*th)/4;
654 th->ack = 1;
655 th->fin = 0;
656 sk->ack_backlog = 0;
657 sk->bytes_rcv = 0;
658 sk->ack_timed = 0;
659 th->ack_seq = net32(sk->acked_seq);
660 sk->window = sk->prot->rspace(sk);
661 th->window = net16(sk->window);
662
663 return (sizeof (*th));
664 }
665
666
667
668
669 static int
670 tcp_write (volatile struct sock *sk, unsigned char *from,
671 int len, int nonblock, unsigned flags)
672 {
673 int copied=0;
674 int copy;
675 int tmp;
676 struct sk_buff *skb;
677 unsigned char *buff;
678 struct proto *prot;
679 struct device *dev=NULL;
680
681 PRINTK (("tcp_write (sk=%X, from=%X, len=%d, nonblock=%d, flags=%X)\n",
682 sk, from, len, nonblock, flags));
683
684 prot = sk->prot;
685 while (len > 0)
686 {
687
688 if (sk->err)
689 {
690 if (copied) return (copied);
691 tmp = -sk->err;
692 sk->err = 0;
693 return (tmp);
694 }
695
696
697
698 sk->inuse = 1;
699 if (sk->shutdown & SEND_SHUTDOWN)
700 {
701 release_sock (sk);
702 sk->err = EPIPE;
703 if (copied) return (copied);
704 sk->err = 0;
705 return (-EPIPE);
706 }
707
708 while (sk->state != TCP_ESTABLISHED && sk->state != TCP_CLOSE_WAIT)
709 {
710
711 if (sk->err)
712 {
713 if (copied) return (copied);
714 tmp = -sk->err;
715 sk->err = 0;
716 return (tmp);
717 }
718
719 if (sk->state != TCP_SYN_SENT &&
720 sk->state != TCP_SYN_RECV)
721 {
722 release_sock (sk);
723 PRINTK (("tcp_write: return 1\n"));
724 if (copied) return (copied);
725
726 if (sk->err)
727 {
728 tmp = -sk->err;
729 sk->err = 0;
730 return (tmp);
731 }
732
733 if (sk->keepopen)
734 {
735 send_sig (SIGPIPE, current, 0);
736 }
737 return (-EPIPE);
738 }
739
740 if (nonblock || copied)
741 {
742 release_sock (sk);
743 PRINTK (("tcp_write: return 2\n"));
744 if (copied) return (copied);
745 return (-EAGAIN);
746 }
747
748
749
750
751
752
753
754 release_sock (sk);
755 cli();
756 if (sk->state != TCP_ESTABLISHED && sk->state != TCP_CLOSE_WAIT &&
757 sk->err == 0)
758 {
759 interruptible_sleep_on (sk->sleep);
760 if (current->signal & ~current->blocked)
761 {
762 sti();
763 PRINTK (("tcp_write: return 3\n"));
764 if (copied) return (copied);
765 return (-ERESTARTSYS);
766 }
767 }
768 sti();
769 sk->inuse = 1;
770 }
771
772
773 if (sk->send_tmp != NULL)
774 {
775
776
777 skb = sk->send_tmp;
778 if (!(flags & MSG_OOB))
779 {
780 copy = min (sk->mss - skb->len + 128 + prot->max_header, len);
781
782
783 if (copy <= 0)
784 copy = 0;
785
786 memcpy_fromfs ((unsigned char *)(skb+1) + skb->len, from, copy);
787 skb->len += copy;
788 from += copy;
789 copied += copy;
790 len -= copy;
791 sk->send_seq += copy;
792 }
793
794 if (skb->len - (unsigned long)skb->h.th +
795 (unsigned long)(skb+1) >= sk->mss
796 || (flags & MSG_OOB))
797 {
798 tcp_send_partial (sk);
799 }
800 continue;
801
802 }
803
804
805
806
807 copy = min (sk->mtu, diff(sk->window_seq, sk->send_seq));
808
809
810 if (copy < 200 || copy > sk->mtu) copy = sk->mtu;
811 copy = min (copy, len);
812
813
814 if (sk->packets_out && copy < sk->mss && !(flags & MSG_OOB))
815 {
816
817 release_sock (sk);
818 skb=prot->wmalloc (sk,
819 sk->mss + 128 + prot->max_header + sizeof (*skb),
820 0, GFP_KERNEL);
821 sk->inuse = 1;
822 sk->send_tmp = skb;
823 if (skb != NULL)
824 skb->mem_len = sk->mss + 128 + prot->max_header+sizeof (*skb);
825 }
826 else
827 {
828
829 release_sock (sk);
830 skb=prot->wmalloc (sk, copy + prot->max_header+sizeof (*skb),0,
831 GFP_KERNEL);
832 sk->inuse = 1;
833 if (skb != NULL)
834 skb->mem_len = copy+prot->max_header+sizeof (*skb);
835 }
836
837
838 if (skb == NULL)
839 {
840 if (nonblock || copied)
841 {
842 release_sock (sk);
843 PRINTK (("tcp_write: return 4\n"));
844 if (copied) return (copied);
845 return (-EAGAIN);
846 }
847
848
849 tmp = sk->wmem_alloc;
850 release_sock (sk);
851
852
853 cli ();
854 if (tmp <= sk->wmem_alloc
855 && (sk->state == TCP_ESTABLISHED || sk->state == TCP_CLOSE_WAIT )
856 && sk->err == 0)
857 {
858 interruptible_sleep_on (sk->sleep);
859 if (current->signal & ~current->blocked)
860 {
861 sti();
862 PRINTK (("tcp_write: return 5\n"));
863 if (copied) return (copied);
864 return (-ERESTARTSYS);
865 }
866 }
867 sk->inuse = 1;
868 sti();
869 continue;
870 }
871
872 skb->mem_addr = skb;
873 skb->len = 0;
874 skb->sk = sk;
875 skb->lock = 0;
876 skb->free = 0;
877
878 buff =(unsigned char *)( skb+1);
879
880
881
882 tmp = prot->build_header (skb, sk->saddr, sk->daddr, &dev,
883 IPPROTO_TCP, sk->opt, skb->mem_len);
884 if (tmp < 0 )
885 {
886 prot->wfree (sk, skb->mem_addr, skb->mem_len);
887 release_sock (sk);
888 PRINTK (("tcp_write: return 6\n"));
889 if (copied) return (copied);
890 return (tmp);
891 }
892 skb->len += tmp;
893 skb->dev = dev;
894 buff+=tmp;
895 skb->h.th =(struct tcp_header *) buff;
896 tmp = tcp_build_header((struct tcp_header *)buff, sk, len-copy);
897
898 if (tmp < 0)
899 {
900 prot->wfree (sk, skb->mem_addr, skb->mem_len);
901 release_sock (sk);
902 PRINTK (("tcp_write: return 7\n"));
903 if (copied) return (copied);
904 return (tmp);
905 }
906
907 if (flags & MSG_OOB)
908 {
909 ((struct tcp_header *)buff)->urg = 1;
910 ((struct tcp_header *)buff)->urg_ptr = net16(copy);
911 }
912 skb->len += tmp;
913 memcpy_fromfs (buff+tmp, from, copy);
914
915 from += copy;
916 copied += copy;
917 len -= copy;
918 skb->len += copy;
919 skb->free = 0;
920 sk->send_seq += copy;
921
922 if (sk->send_tmp != NULL)
923 {
924 continue;
925 }
926
927 tcp_send_check ((struct tcp_header *)buff, sk->saddr, sk->daddr,
928 copy +sizeof (struct tcp_header), sk);
929
930
931 skb->h.seq = sk->send_seq;
932 if (after (sk->send_seq , sk->window_seq) ||
933 sk->packets_out >= sk->cong_window)
934 {
935 PRINTK (("sk->cong_window = %d, sk->packets_out = %d\n",
936 sk->cong_window, sk->packets_out));
937 PRINTK (("sk->send_seq = %d, sk->window_seq = %d\n",
938 sk->send_seq, sk->window_seq));
939 skb->next = NULL;
940 skb->magic = TCP_WRITE_QUEUE_MAGIC;
941 if (sk->wback == NULL)
942 {
943 sk->wfront=skb;
944 }
945 else
946 {
947 sk->wback->next = skb;
948 }
949 sk->wback = skb;
950 }
951 else
952 {
953 prot->queue_xmit (sk, dev, skb,0);
954 }
955 }
956 sk->err = 0;
957 release_sock (sk);
958 PRINTK (("tcp_write: return 8\n"));
959 return (copied);
960 }
961
962 static int
963 tcp_sendto (volatile struct sock *sk, unsigned char *from,
964 int len, int nonblock, unsigned flags,
965 struct sockaddr_in *addr, int addr_len)
966 {
967 struct sockaddr_in sin;
968 if (addr_len < sizeof (sin))
969 return (-EINVAL);
970 memcpy_fromfs (&sin, addr, sizeof (sin));
971 if (sin.sin_family && sin.sin_family != AF_INET)
972 return (-EINVAL);
973 if (sin.sin_port != sk->dummy_th.dest)
974 return (-EINVAL);
975 if (sin.sin_addr.s_addr != sk->daddr)
976 return (-EINVAL);
977 return (tcp_write (sk, from, len, nonblock, flags));
978 }
979
980 static void
981 tcp_read_wakeup(volatile struct sock *sk)
982 {
983 int tmp;
984 struct device *dev = NULL;
985 struct tcp_header *t1;
986 struct sk_buff *buff;
987
988 if (!sk->ack_backlog ) return;
989 PRINTK (("in tcp read wakeup\n"));
990
991
992
993
994
995
996
997 buff=sk->prot->wmalloc(sk,MAX_ACK_SIZE,1, GFP_ATOMIC);
998 if (buff == NULL)
999 {
1000
1001 sk->timeout = TIME_WRITE;
1002 sk->time_wait.len = 10;
1003 reset_timer((struct timer *) &sk->time_wait);
1004 return;
1005 }
1006
1007 buff->mem_addr = buff;
1008 buff->mem_len = MAX_ACK_SIZE;
1009 buff->lock = 0;
1010 buff->len=sizeof (struct tcp_header);
1011 buff->sk = sk;
1012
1013
1014 tmp = sk->prot->build_header (buff, sk->saddr, sk->daddr, &dev,
1015 IPPROTO_TCP, sk->opt, MAX_ACK_SIZE);
1016 if (tmp < 0)
1017 {
1018 sk->prot->wfree(sk, buff->mem_addr, buff->mem_len);
1019 return;
1020 }
1021
1022 buff->len += tmp;
1023 t1 = (struct tcp_header *)((char *)(buff+1) +tmp);
1024
1025 memcpy (t1,(void *) &sk->dummy_th, sizeof (*t1));
1026 t1->seq = net32(sk->send_seq);
1027 t1->ack = 1;
1028 t1->res1=0;
1029 t1->res2=0;
1030 t1->rst = 0;
1031 t1->urg = 0;
1032 t1->syn = 0;
1033 t1->psh = 0;
1034 sk->ack_backlog = 0;
1035 sk->bytes_rcv = 0;
1036 sk->window = sk->prot->rspace(sk);
1037 t1->window = net16(sk->window);
1038 t1->ack_seq = net32(sk->acked_seq);
1039 t1->doff = sizeof (*t1)/4;
1040 tcp_send_check (t1, sk->saddr, sk->daddr, sizeof (*t1), sk);
1041 sk->prot->queue_xmit(sk, dev, buff, 1);
1042 }
1043
1044
1045
1046
1047
1048
1049 static void
1050 cleanup_rbuf (volatile struct sock *sk)
1051 {
1052 PRINTK (("cleaning rbuf for sk=%X\n",sk));
1053
1054
1055 while (sk->rqueue != NULL )
1056 {
1057 struct sk_buff *skb;
1058 skb=(struct sk_buff *)sk->rqueue->next;
1059 if (!skb->used) break;
1060 if (sk->rqueue == skb)
1061 {
1062 sk->rqueue = NULL;
1063 }
1064 else
1065 {
1066 skb->next->prev = skb->prev;
1067 skb->prev->next = skb->next;
1068 }
1069 skb->sk = sk;
1070 kfree_skb (skb, FREE_READ);
1071 }
1072
1073
1074
1075
1076 PRINTK (("sk->window left = %d, sk->prot->rspace(sk)=%d\n",
1077 sk->window - sk->bytes_rcv, sk->prot->rspace(sk)));
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087 sk->ack_backlog ++;
1088 if ((sk->prot->rspace(sk) >
1089 (sk->window - sk->bytes_rcv + sk->mtu)))
1090 {
1091
1092 tcp_read_wakeup (sk);
1093 }
1094 else
1095 {
1096
1097 if ( before (jiffies + TCP_ACK_TIME, sk->time_wait.when))
1098 {
1099 sk->time_wait.len = TCP_ACK_TIME;
1100 sk->timeout = TIME_WRITE;
1101 reset_timer ((struct timer *)&sk->time_wait);
1102 }
1103 }
1104 }
1105
1106
1107 static int
1108 tcp_read_urg(volatile struct sock * sk, int nonblock,
1109 unsigned char *to, int len, unsigned flags)
1110 {
1111 int copied = 0;
1112
1113 struct sk_buff *skb;
1114 PRINTK (("tcp_read_urg(sk=%X, to=%X, len=%d, flags=%X)\n",
1115 sk, to, len, flags));
1116
1117 while (len > 0)
1118 {
1119 sk->inuse = 1;
1120 while (sk->urg==0 || sk->rqueue == NULL)
1121 {
1122 if (sk->err)
1123 {
1124 int tmp;
1125 release_sock (sk);
1126 if (copied) return (copied);
1127 tmp = -sk->err;
1128 sk->err = 0;
1129 return (tmp);
1130 }
1131
1132 if (sk->state == TCP_CLOSE || sk->done)
1133 {
1134 release_sock (sk);
1135 if (copied) return (copied);
1136 if (!sk->done)
1137 {
1138 sk->done = 1;
1139 return (0);
1140 }
1141 return (-ENOTCONN);
1142 }
1143
1144 if (sk->shutdown & RCV_SHUTDOWN)
1145 {
1146 release_sock(sk);
1147 if (copied == 0)
1148 sk->done = 1;
1149 return (copied);
1150 }
1151
1152 if (nonblock || copied)
1153 {
1154 release_sock (sk);
1155 if (copied) return (copied);
1156 return (-EAGAIN);
1157 }
1158
1159
1160 release_sock (sk);
1161 cli();
1162 if ((sk->urg == 0 || sk->rqueue == NULL) && sk->err == 0
1163 && !(sk->shutdown & RCV_SHUTDOWN) )
1164 {
1165 interruptible_sleep_on (sk->sleep);
1166 if (current->signal & ~current->blocked)
1167 {
1168 sti();
1169 if (copied) return (copied);
1170 return (-ERESTARTSYS);
1171 }
1172 }
1173 sti();
1174 sk->inuse = 1;
1175 }
1176
1177 skb = (struct sk_buff *)sk->rqueue->next;
1178 do {
1179 int amt;
1180 if (skb->h.th->urg && !skb->urg_used)
1181 {
1182 if (skb->h.th->urg_ptr == 0)
1183 {
1184 skb->h.th->urg_ptr = net16(skb->len);
1185 }
1186 amt = min(net16(skb->h.th->urg_ptr),len);
1187 verify_area (VERIFY_WRITE, to, amt);
1188 memcpy_tofs (to, (unsigned char *)(skb->h.th) +
1189 skb->h.th->doff*4, amt);
1190
1191 if (!(flags & MSG_PEEK))
1192 {
1193 skb->urg_used = 1;
1194 sk->urg --;
1195 }
1196 release_sock (sk);
1197 copied += amt;
1198 return (copied);
1199 }
1200 skb = (struct sk_buff *)skb->next;
1201 } while (skb != sk->rqueue->next);
1202 }
1203 sk->urg = 0;
1204 release_sock(sk);
1205 return (0);
1206 }
1207
1208
1209 static int
1210 tcp_read (volatile struct sock *sk, unsigned char *to,
1211 int len, int nonblock, unsigned flags)
1212 {
1213 int copied=0;
1214 struct sk_buff *skb;
1215 unsigned long offset;
1216 unsigned long used;
1217
1218 if (len == 0) return (0);
1219 if (len < 0)
1220 {
1221 return (-EINVAL);
1222 }
1223
1224
1225 if (sk->state == TCP_LISTEN) return (-ENOTCONN);
1226
1227
1228 if ((flags & MSG_OOB))
1229 return (tcp_read_urg (sk, nonblock, to, len, flags));
1230
1231
1232 sk->inuse = 1;
1233 if (sk->rqueue != NULL)
1234 skb=(struct sk_buff *)sk->rqueue->next;
1235 else
1236 skb = NULL;
1237
1238 PRINTK(("tcp_read (sk=%X, to=%X, len=%d, nonblock=%d, flags=%X)\n",
1239 sk, to, len, nonblock, flags));
1240
1241 while ( len > 0)
1242 {
1243 while ( skb == NULL || before (sk->copied_seq+1, skb->h.th->seq) ||
1244 skb->used)
1245
1246 {
1247
1248 PRINTK(("skb = %X:\n",skb));
1249
1250 cleanup_rbuf(sk);
1251
1252 if (sk->err)
1253 {
1254 int tmp;
1255 release_sock (sk);
1256 if (copied)
1257 {
1258 PRINTK (("tcp_read: returing %d\n", copied));
1259 return (copied);
1260 }
1261 tmp = -sk->err;
1262 sk->err = 0;
1263 return (tmp);
1264 }
1265
1266 if (sk->state == TCP_CLOSE)
1267 {
1268 release_sock (sk);
1269 if (copied)
1270 {
1271 PRINTK (("tcp_read: returing %d\n", copied));
1272 return (copied);
1273 }
1274 if (!sk->done)
1275 {
1276 sk->done = 1;
1277 return (0);
1278 }
1279 return (-ENOTCONN);
1280 }
1281
1282 if (sk->shutdown & RCV_SHUTDOWN)
1283 {
1284 release_sock (sk);
1285 if (copied == 0) sk->done = 1;
1286 PRINTK (("tcp_read: returing %d\n", copied));
1287 return (copied);
1288 }
1289
1290 if (nonblock || copied)
1291 {
1292 release_sock (sk);
1293 if (copied)
1294 {
1295 PRINTK (("tcp_read: returing %d\n", copied));
1296 return (copied);
1297 }
1298 return (-EAGAIN);
1299 }
1300
1301 if ((flags & MSG_PEEK) && copied != 0)
1302 {
1303 release_sock (sk);
1304 PRINTK (("tcp_read: returing %d\n", copied));
1305 return (copied);
1306 }
1307
1308 PRINTK (("tcp_read about to sleep. state = %d\n",sk->state));
1309
1310 release_sock (sk);
1311
1312 cli();
1313 if ( sk->shutdown & RCV_SHUTDOWN || sk->err != 0)
1314 {
1315 sk->inuse = 1;
1316 sti();
1317 continue;
1318 }
1319
1320 if ( sk->rqueue == NULL ||
1321 before (sk->copied_seq+1, sk->rqueue->next->h.th->seq) )
1322 {
1323 interruptible_sleep_on (sk->sleep);
1324 if (current->signal & ~current->blocked)
1325 {
1326 sti ();
1327 if (copied)
1328 {
1329 PRINTK (("tcp_read: returing %d\n", copied));
1330 return (copied);
1331 }
1332
1333 return (-ERESTARTSYS);
1334 }
1335 }
1336 sti();
1337 PRINTK (("tcp_read woke up. \n"));
1338
1339 sk->inuse = 1;
1340
1341 if (sk->rqueue != NULL)
1342 skb=(struct sk_buff *)sk->rqueue->next;
1343 else
1344 skb = NULL;
1345
1346 }
1347
1348
1349
1350
1351 offset = sk->copied_seq+1 - skb->h.th->seq;
1352
1353 if (skb->h.th->syn) offset --;
1354 if (offset < skb->len )
1355 {
1356
1357
1358 if (skb->h.th->urg)
1359 {
1360 if (skb->urg_used)
1361 {
1362 sk->copied_seq += net16(skb->h.th->urg_ptr);
1363 offset += net16(skb->h.th->urg_ptr);
1364 if (offset >= skb->len)
1365 {
1366 skb->used = 1;
1367 skb = (struct sk_buff *)skb->next;
1368 continue;
1369 }
1370 }
1371 else
1372 {
1373 release_sock (sk);
1374 if (copied) return (copied);
1375 return (-EIO);
1376 }
1377 }
1378 used = min(skb->len - offset, len);
1379
1380 verify_area (VERIFY_WRITE, to, used);
1381 memcpy_tofs(to, ((unsigned char *)skb->h.th) +
1382 skb->h.th->doff*4 +
1383 offset,
1384 used);
1385 copied += used;
1386 len -= used;
1387 to += used;
1388 if (!(flags & MSG_PEEK))
1389 sk->copied_seq += used;
1390
1391
1392
1393
1394 if (!(flags & MSG_PEEK) &&
1395 (!skb->h.th->urg || skb->urg_used) &&
1396 (used + offset >= skb->len) )
1397 skb->used = 1;
1398
1399
1400
1401 if ( skb->h.th->psh || skb->h.th->urg)
1402 {
1403 break;
1404 }
1405 }
1406 else
1407 {
1408 skb->used = 1;
1409 }
1410 skb=(struct sk_buff *)skb->next;
1411 }
1412 cleanup_rbuf (sk);
1413 release_sock (sk);
1414 PRINTK (("tcp_read: returing %d\n", copied));
1415 if (copied == 0 && nonblock) return (-EAGAIN);
1416 return (copied);
1417 }
1418
1419
1420
1421
1422
1423 void
1424 tcp_shutdown (volatile struct sock *sk, int how)
1425 {
1426
1427
1428 struct sk_buff *buff;
1429 struct tcp_header *t1,*th;
1430 struct proto *prot;
1431 int tmp;
1432 struct device *dev=NULL;
1433
1434
1435
1436
1437
1438
1439 if (sk->state == TCP_FIN_WAIT1 ||
1440 sk->state == TCP_FIN_WAIT2)
1441 return;
1442
1443 if (!(how & SEND_SHUTDOWN)) return;
1444 sk->inuse = 1;
1445
1446
1447 if (sk->send_tmp)
1448 tcp_send_partial(sk);
1449
1450 prot = (struct proto *)sk->prot;
1451 th=(struct tcp_header *)&sk->dummy_th;
1452 release_sock (sk);
1453 buff=prot->wmalloc(sk, MAX_RESET_SIZE,1 , GFP_KERNEL);
1454 if (buff == NULL)
1455 {
1456 return;
1457 }
1458 sk->inuse = 1;
1459
1460
1461 PRINTK(("tcp_shutdown_send buff = %X\n", buff));
1462 buff->mem_addr = buff;
1463 buff->mem_len = MAX_RESET_SIZE;
1464 buff->lock = 0;
1465 buff->sk = sk;
1466 buff->len = sizeof (*t1);
1467
1468 t1=(struct tcp_header *)(buff + 1);
1469
1470 tmp = prot->build_header (buff,sk->saddr, sk->daddr, &dev,
1471 IPPROTO_TCP, sk->opt,
1472 sizeof(struct tcp_header));
1473 if (tmp < 0)
1474 {
1475 prot->wfree (sk,buff->mem_addr, buff->mem_len);
1476 release_sock(sk);
1477 PRINTK (("Unable to build header for fin.\n"));
1478 return;
1479 }
1480
1481 t1 = (struct tcp_header *)((char *)t1 +tmp);
1482 buff ->len += tmp;
1483 buff->dev = dev;
1484
1485 memcpy (t1, th, sizeof (*t1));
1486
1487 t1->seq = net32(sk->send_seq);
1488
1489 sk->send_seq++;
1490 buff->h.seq = sk->send_seq;
1491 t1->ack = 1;
1492
1493 t1->ack_seq = net32(sk->acked_seq);
1494 t1->window = net16(sk->prot->rspace(sk));
1495 t1->fin = 1;
1496 t1->rst = 0;
1497
1498 t1->doff = sizeof (*t1)/4;
1499 tcp_send_check (t1, sk->saddr, sk->daddr, sizeof (*t1), sk);
1500
1501
1502
1503 if (sk->wback != NULL)
1504 {
1505 buff->next = NULL;
1506 sk->wback->next = buff;
1507 sk->wback = buff;
1508 buff->magic = TCP_WRITE_QUEUE_MAGIC;
1509 }
1510 else
1511 {
1512 sk->prot->queue_xmit (sk, dev, buff,0);
1513 }
1514
1515 if (sk->state == TCP_ESTABLISHED)
1516 {
1517 sk->state = TCP_FIN_WAIT1;
1518 }
1519 else
1520 {
1521 sk->state = TCP_FIN_WAIT2;
1522 }
1523 release_sock(sk);
1524 }
1525
1526
1527 static int
1528 tcp_recvfrom (volatile struct sock *sk, unsigned char *to,
1529 int to_len, int nonblock, unsigned flags,
1530 struct sockaddr_in *addr, int *addr_len)
1531 {
1532 int result = tcp_read(sk, to, to_len, nonblock, flags);
1533 struct sockaddr_in sin;
1534 int len;
1535 if (result < 0)
1536 return (result);
1537 len = get_fs_long(addr_len);
1538 if (len > sizeof (sin))
1539 len = sizeof (sin);
1540 sin.sin_family = AF_INET;
1541 sin.sin_port = sk->dummy_th.dest;
1542 sin.sin_addr.s_addr = sk->daddr;
1543 verify_area (VERIFY_WRITE, addr, len);
1544 memcpy_tofs (addr, &sin, len);
1545 verify_area (VERIFY_WRITE, addr_len, sizeof (len));
1546 put_fs_long (len, addr_len);
1547 return (result);
1548 }
1549
1550
1551 static void
1552 tcp_reset(unsigned long saddr, unsigned long daddr, struct tcp_header *th,
1553 struct proto *prot, struct options *opt, struct device *dev)
1554 {
1555
1556
1557 struct sk_buff *buff;
1558 struct tcp_header *t1;
1559 int tmp;
1560 buff=prot->wmalloc(NULL, MAX_RESET_SIZE,1, GFP_ATOMIC);
1561 if (buff == NULL) return;
1562
1563 PRINTK(("tcp_reset buff = %X\n", buff));
1564 buff->mem_addr = buff;
1565 buff->mem_len = MAX_RESET_SIZE;
1566 buff->lock = 0;
1567 buff->len = sizeof (*t1);
1568 buff->sk = NULL;
1569 buff->dev = dev;
1570
1571 t1=(struct tcp_header *)(buff + 1);
1572
1573 tmp = prot->build_header (buff, saddr, daddr, &dev, IPPROTO_TCP, opt,
1574 sizeof(struct tcp_header));
1575 if (tmp < 0)
1576 {
1577 prot->wfree (NULL,buff->mem_addr, buff->mem_len);
1578 return;
1579 }
1580 t1 = (struct tcp_header *)((char *)t1 +tmp);
1581 buff->len += tmp;
1582 memcpy (t1, th, sizeof (*t1));
1583
1584 t1->dest = th->source;
1585 t1->source = th->dest;
1586 t1->seq = th->ack_seq;
1587
1588 t1->rst = 1;
1589 t1->ack = 0;
1590 t1->syn = 0;
1591 t1->urg = 0;
1592 t1->fin = 0;
1593 t1->psh = 0;
1594 t1->doff = sizeof (*t1)/4;
1595 tcp_send_check (t1, saddr, daddr, sizeof (*t1), NULL);
1596 prot->queue_xmit(NULL, dev, buff, 1);
1597
1598 }
1599
1600
1601
1602
1603
1604
1605
1606 static void
1607 tcp_conn_request(volatile struct sock *sk, struct sk_buff *skb,
1608 unsigned long daddr,
1609 unsigned long saddr, struct options *opt, struct device *dev)
1610 {
1611 struct sk_buff *buff;
1612 struct tcp_header *t1;
1613 unsigned char *ptr;
1614 volatile struct sock *newsk;
1615 struct tcp_header *th;
1616 int tmp;
1617 th = skb->h.th;
1618
1619 PRINTK (("tcp_conn_request (sk = %X, skb = %X, daddr = %X, sadd4= %X, \n"
1620 " opt = %X, dev = %X)\n",
1621 sk, skb, daddr, saddr, opt, dev));
1622
1623
1624 if (!sk->dead)
1625 {
1626 wake_up(sk->sleep);
1627 }
1628 else
1629 {
1630 PRINTK (("tcp_conn_request on dead socket\n"));
1631 tcp_reset (daddr, saddr, th, sk->prot, opt, dev);
1632 kfree_skb (skb, FREE_READ);
1633 return;
1634 }
1635
1636
1637
1638 if (sk->ack_backlog >= sk->max_ack_backlog)
1639 {
1640 kfree_skb (skb, FREE_READ);
1641 return;
1642 }
1643
1644
1645
1646
1647
1648
1649
1650 newsk = kmalloc(sizeof (struct sock), GFP_ATOMIC);
1651 if (newsk == NULL)
1652 {
1653
1654 kfree_skb (skb, FREE_READ);
1655 return;
1656 }
1657
1658
1659 PRINTK (("newsk = %X\n", newsk));
1660 memcpy ((void *)newsk, (void *)sk, sizeof (*newsk));
1661 newsk->wback = NULL;
1662 newsk->wfront = NULL;
1663 newsk->rqueue = NULL;
1664 newsk->send_head = NULL;
1665 newsk->send_tail = NULL;
1666 newsk->back_log = NULL;
1667 newsk->rtt = TCP_CONNECT_TIME;
1668 newsk->blog = 0;
1669 newsk->intr = 0;
1670 newsk->proc = 0;
1671 newsk->done = 0;
1672 newsk->send_tmp = NULL;
1673 newsk->pair = NULL;
1674 newsk->wmem_alloc = 0;
1675 newsk->rmem_alloc = 0;
1676
1677 newsk->max_unacked = MAX_WINDOW - TCP_WINDOW_DIFF;
1678
1679 newsk->err = 0;
1680 newsk->shutdown = 0;
1681 newsk->ack_backlog = 0;
1682 newsk->acked_seq = skb->h.th->seq+1;
1683 newsk->fin_seq = skb->h.th->seq;
1684 newsk->copied_seq = skb->h.th->seq;
1685 newsk->state = TCP_SYN_RECV;
1686 newsk->timeout = 0;
1687 newsk->send_seq = timer_seq*SEQ_TICK-seq_offset;
1688 newsk->rcv_ack_seq = newsk->send_seq;
1689 newsk->urg =0;
1690 newsk->retransmits = 0;
1691 newsk->destroy = 0;
1692 newsk->time_wait.sk = newsk;
1693 newsk->time_wait.next = NULL;
1694 newsk->dummy_th.source = skb->h.th->dest;
1695 newsk->dummy_th.dest = skb->h.th->source;
1696
1697 newsk->daddr=saddr;
1698 newsk->saddr=daddr;
1699
1700 put_sock (newsk->num,newsk);
1701 newsk->dummy_th.res1=0;
1702 newsk->dummy_th.doff=6;
1703 newsk->dummy_th.fin=0;
1704 newsk->dummy_th.syn=0;
1705 newsk->dummy_th.rst=0;
1706 newsk->dummy_th.psh=0;
1707 newsk->dummy_th.ack=0;
1708 newsk->dummy_th.urg=0;
1709 newsk->dummy_th.res2=0;
1710 newsk->acked_seq = skb->h.th->seq+1;
1711 newsk->copied_seq = skb->h.th->seq;
1712
1713 if (skb->h.th->doff == 5)
1714 {
1715 newsk->mtu=576-HEADER_SIZE;
1716 }
1717 else
1718 {
1719 ptr = (unsigned char *)(skb->h.th + 1);
1720 if (ptr[0] != 2 || ptr[1] != 4)
1721 {
1722 newsk->mtu=576-HEADER_SIZE;
1723 }
1724 else
1725 {
1726 newsk->mtu = min (ptr[2]*256+ptr[3]-HEADER_SIZE,
1727 dev->mtu-HEADER_SIZE);
1728 }
1729 }
1730
1731 buff=newsk->prot->wmalloc(newsk,MAX_SYN_SIZE,1, GFP_ATOMIC);
1732 if (buff == NULL)
1733 {
1734 sk->err = -ENOMEM;
1735 newsk->dead = 1;
1736 release_sock (newsk);
1737 kfree_skb (skb, FREE_READ);
1738 return;
1739 }
1740
1741 buff->lock = 0;
1742 buff->mem_addr = buff;
1743 buff->mem_len = MAX_SYN_SIZE;
1744 buff->len=sizeof (struct tcp_header)+4;
1745 buff->sk = newsk;
1746
1747 t1=(struct tcp_header *)(buff + 1);
1748
1749
1750 tmp = sk->prot->build_header (buff, newsk->saddr, newsk->daddr, &dev,
1751 IPPROTO_TCP, NULL, MAX_SYN_SIZE);
1752
1753
1754 if (tmp < 0)
1755 {
1756 sk->err = tmp;
1757 sk->prot->wfree(newsk, buff->mem_addr, buff->mem_len);
1758 newsk->dead = 1;
1759 release_sock (newsk);
1760 skb->sk = sk;
1761 kfree_skb (skb, FREE_READ);
1762 return;
1763 }
1764
1765 buff->len += tmp;
1766 t1 = (struct tcp_header *)((char *)t1 +tmp);
1767
1768 memcpy (t1, skb->h.th, sizeof (*t1));
1769 buff->h.seq = newsk->send_seq;
1770
1771 t1->dest = skb->h.th->source;
1772 t1->source = newsk->dummy_th.source;
1773 t1->seq = net32(newsk->send_seq++);
1774 t1->ack = 1;
1775 newsk->window = newsk->prot->rspace(newsk);
1776 t1->window = net16(newsk->window);
1777 t1->res1=0;
1778 t1->res2=0;
1779 t1->rst = 0;
1780 t1->urg = 0;
1781 t1->psh = 0;
1782 t1->syn = 1;
1783 t1->ack_seq = net32(skb->h.th->seq+1);
1784 t1->doff = sizeof (*t1)/4+1;
1785
1786 ptr = (unsigned char *)(t1+1);
1787 ptr[0]=2;
1788 ptr[1]=4;
1789 ptr[2]=((dev->mtu - HEADER_SIZE) >> 8) & 0xff;
1790 ptr[3]=(dev->mtu - HEADER_SIZE) & 0xff;
1791
1792 tcp_send_check (t1, daddr, saddr, sizeof (*t1)+4, newsk);
1793 newsk->prot->queue_xmit(newsk, dev, buff, 0);
1794
1795 newsk->time_wait.len = TCP_CONNECT_TIME;
1796 PRINTK (("newsk->time_wait.sk = %X\n", newsk->time_wait.sk));
1797 reset_timer ((struct timer *)&newsk->time_wait);
1798 skb->sk = newsk;
1799
1800 sk->rmem_alloc -= skb->mem_len;
1801 newsk->rmem_alloc += skb->mem_len;
1802
1803 if (sk->rqueue == NULL)
1804 {
1805 skb->next = skb;
1806 skb->prev = skb;
1807 sk->rqueue = skb;
1808 }
1809 else
1810 {
1811 skb->next = sk->rqueue;
1812 skb->prev = sk->rqueue->prev;
1813 sk->rqueue->prev = skb;
1814 skb->prev->next = skb;
1815 }
1816 sk->ack_backlog++;
1817 release_sock (newsk);
1818 }
1819
1820 static void
1821 tcp_close (volatile struct sock *sk, int timeout)
1822 {
1823
1824
1825 struct sk_buff *buff;
1826 int need_reset = 0;
1827 struct tcp_header *t1,*th;
1828 struct proto *prot;
1829 struct device *dev=NULL;
1830 int tmp;
1831 PRINTK (("tcp_close ((struct sock *)%X, %d)\n",sk, timeout));
1832 sk->inuse = 1;
1833 sk->keepopen = 1;
1834 sk->shutdown = SHUTDOWN_MASK;
1835
1836 if (!sk->dead)
1837 wake_up (sk->sleep);
1838
1839
1840
1841 if (sk->rqueue != NULL)
1842 {
1843 struct sk_buff *skb;
1844 struct sk_buff *skb2;
1845 skb = sk->rqueue;
1846 do {
1847 skb2=(struct sk_buff *)skb->next;
1848
1849 if (skb->len > 0 &&
1850 after (skb->h.th->seq + skb->len + 1, sk->copied_seq))
1851 need_reset = 1;
1852 kfree_skb (skb, FREE_READ);
1853 skb=skb2;
1854 } while (skb != sk->rqueue);
1855 }
1856 sk->rqueue = NULL;
1857
1858
1859 if (sk->send_tmp)
1860 {
1861 tcp_send_partial (sk);
1862 }
1863
1864 switch (sk->state)
1865 {
1866
1867 case TCP_FIN_WAIT1:
1868 case TCP_FIN_WAIT2:
1869 case TCP_LAST_ACK:
1870
1871 sk->time_wait.len = 4*sk->rtt;;
1872 sk->timeout = TIME_CLOSE;
1873 reset_timer ((struct timer *)&sk->time_wait);
1874 if (timeout)
1875 tcp_time_wait(sk);
1876 release_sock (sk);
1877 break;
1878
1879 case TCP_TIME_WAIT:
1880 if (timeout)
1881 sk->state = TCP_CLOSE;
1882 release_sock (sk);
1883 return;
1884
1885 case TCP_LISTEN:
1886 sk->state = TCP_CLOSE;
1887 release_sock(sk);
1888 return;
1889
1890 case TCP_CLOSE:
1891
1892 release_sock(sk);
1893 return;
1894
1895
1896 case TCP_CLOSE_WAIT:
1897 case TCP_ESTABLISHED:
1898 case TCP_SYN_SENT:
1899 case TCP_SYN_RECV:
1900
1901 prot = (struct proto *)sk->prot;
1902 th=(struct tcp_header *)&sk->dummy_th;
1903
1904 buff=prot->wmalloc(sk, MAX_FIN_SIZE,1, GFP_ATOMIC);
1905 if (buff == NULL)
1906 {
1907
1908 if (sk->state != TCP_CLOSE_WAIT)
1909 sk->state = TCP_ESTABLISHED;
1910 sk->timeout = TIME_CLOSE;
1911 sk->time_wait.len = 100;
1912 reset_timer ((struct timer *)&sk->time_wait);
1913 return;
1914 }
1915
1916 buff->lock = 0;
1917 buff->mem_addr = buff;
1918 buff->mem_len = MAX_FIN_SIZE;
1919 buff->sk = sk;
1920 buff->len = sizeof (*t1);
1921 t1=(struct tcp_header *)(buff + 1);
1922
1923 tmp = prot->build_header (buff,sk->saddr, sk->daddr, &dev,
1924 IPPROTO_TCP, sk->opt,
1925 sizeof(struct tcp_header));
1926 if (tmp < 0)
1927 {
1928 prot->wfree (sk,buff->mem_addr, buff->mem_len);
1929 PRINTK (("Unable to build header for fin.\n"));
1930 release_sock(sk);
1931 return;
1932 }
1933
1934 t1 = (struct tcp_header *)((char *)t1 +tmp);
1935 buff ->len += tmp;
1936 buff->dev = dev;
1937 memcpy (t1, th, sizeof (*t1));
1938 t1->seq = net32(sk->send_seq);
1939 sk->send_seq++;
1940 buff->h.seq = sk->send_seq;
1941 t1->ack = 1;
1942
1943
1944 sk->delay_acks = 0;
1945 t1->ack_seq = net32(sk->acked_seq);
1946 t1->window = net16(sk->prot->rspace(sk));
1947 t1->fin = 1;
1948 t1->rst = need_reset;
1949 t1->doff = sizeof (*t1)/4;
1950 tcp_send_check (t1, sk->saddr, sk->daddr, sizeof (*t1), sk);
1951
1952 if (sk->wfront == NULL)
1953 {
1954 prot->queue_xmit(sk, dev, buff, 0);
1955 }
1956 else
1957 {
1958 sk->time_wait.len = sk->rtt;
1959 sk->timeout = TIME_WRITE;
1960 reset_timer ((struct timer *)&sk->time_wait);
1961 buff->next = NULL;
1962 if (sk->wback == NULL)
1963 {
1964 sk->wfront=buff;
1965 }
1966 else
1967 {
1968 sk->wback->next = buff;
1969 }
1970 sk->wback = buff;
1971 buff->magic = TCP_WRITE_QUEUE_MAGIC;
1972
1973 }
1974
1975 if (sk->state == TCP_CLOSE_WAIT)
1976 {
1977 sk->state = TCP_FIN_WAIT2;
1978 }
1979 else
1980 {
1981 sk->state = TCP_FIN_WAIT1;
1982 }
1983 }
1984 release_sock (sk);
1985 }
1986
1987
1988
1989
1990 static void
1991 tcp_write_xmit (volatile struct sock *sk)
1992 {
1993 struct sk_buff *skb;
1994 PRINTK (("tcp_write_xmit (sk=%X)\n",sk));
1995 while (sk->wfront != NULL && before (sk->wfront->h.seq, sk->window_seq) &&
1996 sk->packets_out < sk->cong_window)
1997 {
1998 skb = sk->wfront;
1999 sk->wfront = (struct sk_buff *)skb->next;
2000 if (sk->wfront == NULL)
2001 sk->wback = NULL;
2002 skb->next = NULL;
2003 if (skb->magic != TCP_WRITE_QUEUE_MAGIC)
2004 {
2005 PRINTK (("tcp.c skb with bad magic (%X) on write queue. Squashing "
2006 "queue\n", skb->magic));
2007 sk->wfront = NULL;
2008 sk->wback = NULL;
2009 return;
2010 }
2011 skb->magic = 0;
2012 PRINTK(("Sending a packet.\n"));
2013 sk->prot->queue_xmit (sk, skb->dev, skb, skb->free);
2014 }
2015 }
2016
2017
2018
2019
2020
2021 static int
2022 tcp_ack (volatile struct sock *sk, struct tcp_header *th, unsigned long saddr)
2023 {
2024 unsigned long ack;
2025 ack = net32(th->ack_seq);
2026
2027 PRINTK (("tcp_ack ack=%d, window=%d, "
2028 "sk->rcv_ack_seq=%d, sk->window_seq = %d\n",
2029 ack, net16(th->window), sk->rcv_ack_seq, sk->window_seq));
2030 if (after (ack, sk->send_seq+1) || before (ack, sk->rcv_ack_seq-1))
2031 {
2032 if (after (ack, sk->send_seq) || (sk->state != TCP_ESTABLISHED &&
2033 sk->state != TCP_CLOSE_WAIT))
2034 {
2035 return (0);
2036 }
2037 if (sk->keepopen)
2038 reset_timer ((struct timer *)&sk->time_wait);
2039 sk->retransmits = 0;
2040 return (1);
2041 }
2042
2043
2044 if (after (sk->window_seq, ack+net16(th->window)))
2045 {
2046
2047
2048
2049
2050
2051
2052 struct sk_buff *skb;
2053 struct sk_buff *skb2=NULL;
2054 struct sk_buff *wskb=NULL;
2055
2056 sk->window_seq = ack + net16(th->window);
2057 cli();
2058 for (skb = sk->send_head; skb != NULL; skb= (struct sk_buff *)skb->link3)
2059 {
2060 if (after( skb->h.seq, sk->window_seq))
2061 {
2062
2063
2064 if (skb2 == NULL)
2065 {
2066 sk->send_head = (struct sk_buff *)skb->link3;
2067 }
2068 else
2069 {
2070 skb2->link3 = skb->link3;
2071 }
2072 if (sk->send_tail == skb)
2073 sk->send_tail = skb2;
2074
2075
2076 if (skb->next != NULL)
2077 {
2078 int i;
2079 if (skb->next != skb)
2080 {
2081 skb->next->prev = skb->prev;
2082 skb->prev->next = skb->next;
2083 }
2084 for (i = 0; i < DEV_NUMBUFFS; i++)
2085 {
2086 if (skb->dev->buffs[i] == skb)
2087 {
2088 if (skb->next == skb)
2089 skb->dev->buffs[i] = NULL;
2090 else
2091 skb->dev->buffs[i] = skb->next;
2092 break;
2093 }
2094 }
2095 if (arp_q == skb)
2096 {
2097 if (skb->next == skb)
2098 arp_q = NULL;
2099 else
2100 arp_q = skb->next;
2101 }
2102 }
2103
2104
2105 skb->magic = TCP_WRITE_QUEUE_MAGIC;
2106 if (wskb == NULL)
2107 {
2108 skb->next = sk->wfront;
2109 sk->wfront = skb;
2110 }
2111 else
2112 {
2113 skb->next = wskb->next;
2114 wskb->next = skb;
2115 }
2116 wskb = skb;
2117 }
2118 else
2119 {
2120 skb2 = skb;
2121 }
2122 }
2123 sti();
2124 }
2125
2126 sk->window_seq = ack + net16(th->window);
2127
2128
2129 if (sk->cong_window < 2048 && ack != sk->rcv_ack_seq)
2130 {
2131 if (sk->exp_growth)
2132 sk->cong_window *= 2;
2133 else
2134 sk->cong_window++;
2135 }
2136
2137 PRINTK (("tcp_ack: Updating rcv ack sequence. \n"));
2138 sk->rcv_ack_seq = ack;
2139
2140
2141 while (sk->send_head != NULL)
2142 {
2143 if (before (sk->send_head->h.seq, ack+1))
2144 {
2145 struct sk_buff *oskb;
2146
2147 sk->packets_out --;
2148 PRINTK (("skb=%X acked\n", sk->send_head));
2149
2150
2151 if (!sk->dead)
2152 wake_up (sk->sleep);
2153
2154 cli();
2155
2156 oskb = sk->send_head;
2157
2158 sk->rtt += ((jiffies - oskb->when) - sk->rtt)/2;
2159 if (sk->rtt < 30) sk->rtt = 30;
2160 sk->send_head = (struct sk_buff *)oskb->link3;
2161 if (sk->send_head == NULL)
2162 {
2163 sk->send_tail = NULL;
2164 }
2165
2166 if (oskb->next != NULL)
2167 {
2168 int i;
2169 if (oskb->next != oskb)
2170 {
2171 oskb->next->prev = oskb->prev;
2172 oskb->prev->next = oskb->next;
2173 }
2174 for (i = 0; i < DEV_NUMBUFFS; i++)
2175 {
2176 if (oskb->dev->buffs[i] == oskb)
2177 {
2178 if (oskb== oskb->next)
2179 oskb->dev->buffs[i]= NULL;
2180 else
2181 oskb->dev->buffs[i] = oskb->next;
2182 break;
2183 }
2184 }
2185 if (arp_q == oskb)
2186 {
2187 if (oskb == oskb->next)
2188 arp_q = NULL;
2189 else
2190 arp_q = (struct sk_buff *)oskb->next;
2191 }
2192 }
2193 oskb->magic = 0;
2194 kfree_skb (oskb, FREE_WRITE);
2195 sti();
2196 if (!sk->dead)
2197 wake_up(sk->sleep);
2198 }
2199 else
2200 {
2201 break;
2202 }
2203
2204 }
2205
2206
2207
2208
2209
2210
2211
2212 if (sk->retransmits && sk->send_head != NULL)
2213 {
2214 PRINTK (("retransmitting\n"));
2215 sk->prot->retransmit (sk,1);
2216 }
2217 sk->retransmits = 0;
2218
2219
2220
2221 if (sk->wfront != NULL && sk->packets_out < sk->cong_window)
2222 {
2223 if (after (sk->window_seq, sk->wfront->h.seq))
2224 {
2225 tcp_write_xmit (sk);
2226 }
2227 }
2228 else
2229 {
2230 if (sk->send_head == NULL && sk->ack_backlog == 0 &&
2231 sk->state != TCP_TIME_WAIT && !sk->keepopen)
2232 {
2233 PRINTK (("Nothing to do, going to sleep.\n"));
2234 if (!sk->dead)
2235 wake_up (sk->sleep);
2236
2237 delete_timer((struct timer *)&sk->time_wait);
2238 sk->timeout = 0;
2239 }
2240 else
2241 {
2242 if (sk->state != sk->keepopen)
2243 {
2244 sk->timeout = TIME_WRITE;
2245 sk->time_wait.len = sk->rtt*2;
2246 reset_timer ((struct timer *)&sk->time_wait);
2247 }
2248 if (sk->state == TCP_TIME_WAIT)
2249 {
2250 sk->time_wait.len = TCP_TIMEWAIT_LEN;
2251 reset_timer ((struct timer *)&sk->time_wait);
2252 sk->timeout = TIME_CLOSE;
2253 }
2254 }
2255 }
2256
2257
2258 if (sk->packets_out == 0 && sk->send_tmp != NULL &&
2259 sk->wfront == NULL && sk->send_head == NULL)
2260 {
2261 tcp_send_partial (sk);
2262 }
2263
2264
2265 if ( sk->state == TCP_TIME_WAIT)
2266 {
2267 if (!sk->dead) wake_up (sk->sleep);
2268 if (sk->rcv_ack_seq == sk->send_seq &&
2269 sk->acked_seq == sk->fin_seq)
2270 {
2271 sk->state = TCP_CLOSE;
2272 sk->shutdown = SHUTDOWN_MASK;
2273 }
2274 }
2275
2276 if (sk->state == TCP_LAST_ACK || sk->state == TCP_FIN_WAIT2)
2277 {
2278 if (!sk->dead) wake_up (sk->sleep);
2279 if (sk->rcv_ack_seq == sk->send_seq)
2280 {
2281 if (sk->acked_seq != sk->fin_seq)
2282 {
2283 tcp_time_wait(sk);
2284 }
2285 else
2286 {
2287 PRINTK (("tcp_ack closing socket - %X\n", sk));
2288 tcp_send_ack (sk->send_seq, sk->acked_seq, sk, th, sk->daddr);
2289 sk->shutdown = SHUTDOWN_MASK;
2290 sk->state = TCP_CLOSE;
2291 }
2292 }
2293 }
2294
2295 PRINTK (("leaving tcp_ack\n"));
2296
2297 return (1);
2298 }
2299
2300
2301
2302
2303
2304 static int
2305 tcp_data (struct sk_buff *skb, volatile struct sock *sk,
2306 unsigned long saddr, unsigned short len)
2307 {
2308 struct sk_buff *skb1, *skb2;
2309 struct tcp_header *th;
2310
2311 th = skb->h.th;
2312 print_th (th);
2313 skb->len = len - (th->doff*4);
2314
2315 PRINTK(("tcp_data len = %d sk = %X:\n",skb->len, sk));
2316
2317 sk->bytes_rcv += skb->len;
2318
2319 if (skb->len == 0 && !th->fin && !th->urg && !th->psh)
2320 {
2321
2322 if (!th->ack)
2323 tcp_send_ack (sk->send_seq, sk->acked_seq,sk, th, saddr);
2324 kfree_skb(skb, FREE_READ);
2325 return (0);
2326 }
2327
2328 if (sk->shutdown & RCV_SHUTDOWN)
2329 {
2330 sk->acked_seq = th->seq + skb->len + th->syn + th->fin;
2331 tcp_reset (sk->saddr, sk->daddr, skb->h.th,
2332 sk->prot, NULL, skb->dev);
2333 sk->state = TCP_CLOSE;
2334 sk->err = EPIPE;
2335 sk->shutdown = SHUTDOWN_MASK;
2336 PRINTK (("tcp_data: closing socket - %X\n", sk));
2337 kfree_skb (skb, FREE_READ);
2338 if (!sk->dead) wake_up (sk->sleep);
2339 return (0);
2340 }
2341
2342
2343
2344
2345
2346
2347
2348
2349 if (sk->rqueue == NULL)
2350 {
2351 PRINTK (("tcp_data: skb = %X:\n",skb));
2352
2353 sk->rqueue = skb;
2354 skb->next = skb;
2355 skb->prev = skb;
2356 skb1= NULL;
2357 }
2358 else
2359 {
2360 PRINTK (("tcp_data adding to chain sk = %X:\n",sk));
2361
2362 for (skb1=sk->rqueue; ; skb1=(struct sk_buff *)skb1->prev)
2363 {
2364 PRINTK (("skb1=%X\n",skb1));
2365 PRINTK (("skb1->h.th->seq = %d\n", skb1->h.th->seq));
2366 if (after ( th->seq+1, skb1->h.th->seq))
2367 {
2368 skb->prev = skb1;
2369 skb->next = skb1->next;
2370 skb->next->prev = skb;
2371 skb1->next = skb;
2372 if (skb1 == sk->rqueue)
2373 sk->rqueue = skb;
2374 break;
2375 }
2376 if ( skb1->prev == sk->rqueue)
2377 {
2378 skb->next= skb1;
2379 skb->prev = skb1->prev;
2380 skb->prev->next = skb;
2381 skb1->prev = skb;
2382 skb1 = NULL;
2383 break;
2384 }
2385 }
2386
2387 PRINTK (("skb = %X:\n",skb));
2388
2389 }
2390
2391 th->ack_seq = th->seq + skb->len;
2392 if (th->syn) th->ack_seq ++;
2393 if (th->fin) th->ack_seq ++;
2394
2395 if (before (sk->acked_seq, sk->copied_seq))
2396 {
2397 printk ("*** tcp.c:tcp_data bug acked < copied\n");
2398 sk->acked_seq = sk->copied_seq;
2399 }
2400
2401
2402 if (skb1 == NULL || skb1->acked || before (th->seq, sk->acked_seq+1))
2403 {
2404 if (before (th->seq, sk->acked_seq+1))
2405 {
2406 if (after (th->ack_seq, sk->acked_seq))
2407 sk->acked_seq = th->ack_seq;
2408 skb->acked = 1;
2409
2410
2411 if (skb->h.th->fin)
2412 {
2413 if (!sk->dead) wake_up (sk->sleep);
2414 sk->shutdown |= RCV_SHUTDOWN;
2415 }
2416
2417 for (skb2=(struct sk_buff *)skb->next;
2418 skb2 !=(struct sk_buff *) sk->rqueue->next;
2419 skb2=(struct sk_buff *)skb2->next)
2420 {
2421 if (before(skb2->h.th->seq, sk->acked_seq+1))
2422 {
2423 if (after (skb2->h.th->ack_seq, sk->acked_seq))
2424 sk->acked_seq = skb2->h.th->ack_seq;
2425 skb2->acked = 1;
2426
2427
2428 if (skb2->h.th->fin)
2429 {
2430 sk->shutdown |= RCV_SHUTDOWN;
2431 if (!sk->dead) wake_up (sk->sleep);
2432 }
2433
2434
2435 sk->ack_backlog = sk->max_ack_backlog;
2436 }
2437 else
2438 {
2439 break;
2440 }
2441 }
2442
2443
2444
2445
2446 if (!sk->delay_acks ||
2447 sk->ack_backlog >= sk->max_ack_backlog ||
2448 sk->bytes_rcv > sk->max_unacked ||
2449 th->fin)
2450 {
2451 tcp_send_ack (sk->send_seq, sk->acked_seq,sk,th, saddr);
2452 }
2453 else
2454 {
2455 sk->ack_backlog++;
2456 sk->time_wait.len = TCP_ACK_TIME;
2457 sk->timeout = TIME_WRITE;
2458 reset_timer ((struct timer *)&sk->time_wait);
2459 sk->retransmits = 0;
2460 }
2461 }
2462 }
2463 else
2464 {
2465
2466 tcp_send_ack (sk->send_seq, sk->acked_seq, sk, th, saddr);
2467 }
2468
2469
2470 if (!sk->dead)
2471 {
2472 wake_up (sk->sleep);
2473 }
2474 else
2475 {
2476 PRINTK (("data received on dead socket. \n"));
2477 }
2478
2479 if (sk->state == TCP_FIN_WAIT2 && sk->acked_seq == sk->fin_seq
2480 && sk->rcv_ack_seq == sk->send_seq)
2481 {
2482 PRINTK (("tcp_data: entering last_ack state sk = %X\n", sk));
2483
2484 tcp_send_ack (sk->send_seq, sk->acked_seq, sk, th, saddr);
2485 sk->shutdown = SHUTDOWN_MASK;
2486 sk->state = TCP_LAST_ACK;
2487 if (!sk->dead) wake_up (sk->sleep);
2488 }
2489
2490 return (0);
2491 }
2492
2493 static int
2494 tcp_urg (volatile struct sock *sk, struct tcp_header *th, unsigned long saddr)
2495 {
2496 extern int kill_pg (int pg, int sig, int priv);
2497 extern int kill_proc (int pid, int sig, int priv);
2498
2499 if (!sk->dead)
2500 wake_up(sk->sleep);
2501
2502 if (sk->urginline)
2503 {
2504 th->urg = 0;
2505 th->psh = 1;
2506 return (0);
2507 }
2508
2509 if (!sk->urg)
2510 {
2511
2512
2513 if (sk->proc != 0)
2514 {
2515 if (sk->proc > 0)
2516 {
2517 kill_proc (sk->proc, SIGURG, 1);
2518 }
2519 else
2520 {
2521 kill_pg (-sk->proc, SIGURG, 1);
2522 }
2523 }
2524 }
2525 sk->urg++;
2526 return (0);
2527 }
2528
2529
2530 static int
2531 tcp_fin (volatile struct sock *sk, struct tcp_header *th,
2532 unsigned long saddr, struct device *dev)
2533 {
2534 PRINTK (("tcp_fin (sk=%X, th=%X, saddr=%X, dev=%X)\n",
2535 sk, th, saddr, dev));
2536
2537 if (!sk->dead)
2538 {
2539 wake_up (sk->sleep);
2540 }
2541
2542 switch (sk->state)
2543 {
2544 case TCP_SYN_RECV:
2545 case TCP_SYN_SENT:
2546 case TCP_ESTABLISHED:
2547 sk->fin_seq = th->seq+1;
2548 sk->state = TCP_CLOSE_WAIT;
2549 if (th->rst) sk->shutdown = SHUTDOWN_MASK;
2550 break;
2551
2552 case TCP_CLOSE_WAIT:
2553 case TCP_FIN_WAIT2:
2554 break;
2555
2556 case TCP_FIN_WAIT1:
2557 sk->fin_seq = th->seq+1;
2558 sk->state = TCP_FIN_WAIT2;
2559 break;
2560
2561 default:
2562 case TCP_TIME_WAIT:
2563 sk->state = TCP_LAST_ACK;
2564
2565 sk->time_wait.len = TCP_TIMEWAIT_LEN;
2566 sk->timeout = TIME_CLOSE;
2567 reset_timer ((struct timer *)&sk->time_wait);
2568 return (0);
2569
2570 }
2571
2572
2573 sk->ack_backlog ++;
2574
2575 #if 0
2576
2577 buff=sk->prot->wmalloc(sk,MAX_ACK_SIZE,1, GFP_ATOMIC);
2578 if (buff == NULL)
2579 {
2580
2581 return (1);
2582 }
2583
2584 buff->mem_addr = buff;
2585 buff->mem_len = MAX_ACK_SIZE;
2586 buff->len=sizeof (struct tcp_header);
2587 buff->sk = sk;
2588
2589 t1 = (struct tcp_header *)(buff + 1);
2590
2591 tmp = sk->prot->build_header (buff, sk->saddr, sk->daddr, &dev,
2592 IPPROTO_TCP, sk->opt, MAX_ACK_SIZE);
2593 if (tmp < 0)
2594 {
2595 sk->prot->wfree(sk, buff->mem_addr, buff->mem_len);
2596 return (0);
2597 }
2598
2599 buff->len += tmp;
2600 t1 = (struct tcp_header *)((char *)t1 +tmp);
2601
2602 memcpy (t1, th, sizeof (*t1));
2603
2604
2605 t1->dest = th->source;
2606 t1->source = th->dest;
2607
2608
2609 t1->seq = net32(sk->send_seq);
2610
2611
2612
2613
2614 buff->h.seq = sk->send_seq;
2615 t1->window = net16(sk->prot->rspace(sk));
2616
2617 t1->res1=0;
2618 t1->res2=0;
2619 t1->rst = 0;
2620 t1->urg = 0;
2621 t1->syn = 0;
2622 t1->psh = 0;
2623 t1->ack = 1;
2624 t1->fin = 0;
2625 t1->ack_seq = net32(sk->acked_seq);
2626
2627 t1->doff = sizeof (*t1)/4;
2628 tcp_send_check (t1, sk->saddr, sk->daddr, sizeof (*t1), sk);
2629
2630
2631
2632 if (sk->wback != NULL)
2633 {
2634 buff->next = NULL;
2635 sk->wback->next = buff;
2636 sk->wback = buff;
2637 buff->magic = TCP_WRITE_QUEUE_MAGIC;
2638 }
2639 else
2640 {
2641 sk->prot->queue_xmit (sk, dev, buff,0);
2642 }
2643 #endif
2644 return (0);
2645 }
2646
2647
2648
2649
2650 static volatile struct sock *
2651 tcp_accept (volatile struct sock *sk, int flags)
2652 {
2653 volatile struct sock *newsk;
2654 struct sk_buff *skb;
2655
2656 PRINTK (("tcp_accept(sk=%X, flags=%X)\n", sk, flags));
2657
2658
2659
2660 if (sk->state != TCP_LISTEN)
2661 {
2662 sk->err = EINVAL;
2663 return (NULL);
2664 }
2665
2666
2667 sk->inuse = 1;
2668 cli();
2669 while ( (skb = get_firstr(sk)) == NULL )
2670 {
2671 if (flags & O_NONBLOCK)
2672 {
2673 sti();
2674 release_sock (sk);
2675 sk->err = EAGAIN;
2676 return (NULL);
2677 }
2678
2679 release_sock (sk);
2680 interruptible_sleep_on (sk->sleep);
2681 if (current->signal & ~current->blocked)
2682 {
2683 sti();
2684 sk->err = ERESTARTSYS;
2685 return (NULL);
2686 }
2687
2688 sk->inuse = 1;
2689 }
2690 sti();
2691
2692
2693 newsk = skb->sk;
2694
2695 kfree_skb (skb, FREE_READ);
2696 sk->ack_backlog--;
2697 release_sock (sk);
2698 return (newsk);
2699 }
2700
2701
2702
2703
2704 static int
2705 tcp_connect (volatile struct sock *sk, struct sockaddr_in *usin, int addr_len)
2706 {
2707 struct sk_buff *buff;
2708 struct sockaddr_in sin;
2709 struct device *dev=NULL;
2710 unsigned char *ptr;
2711 int tmp;
2712 struct tcp_header *t1;
2713 if (sk->state != TCP_CLOSE) return (-EISCONN);
2714 if (addr_len < 8) return (-EINVAL);
2715
2716
2717 memcpy_fromfs (&sin,usin, min(sizeof (sin), addr_len));
2718
2719 if (sin.sin_family && sin.sin_family != AF_INET) return (-EAFNOSUPPORT);
2720 sk->inuse = 1;
2721 sk->daddr = sin.sin_addr.s_addr;
2722 sk->send_seq = timer_seq*SEQ_TICK-seq_offset;
2723 sk->rcv_ack_seq = sk->send_seq -1;
2724 sk->err = 0;
2725 sk->dummy_th.dest = sin.sin_port;
2726 release_sock (sk);
2727
2728 buff=sk->prot->wmalloc(sk,MAX_SYN_SIZE,0, GFP_KERNEL);
2729 if (buff == NULL)
2730 {
2731 return (-ENOMEM);
2732 }
2733 sk->inuse = 1;
2734 buff->lock = 0;
2735 buff->mem_addr = buff;
2736 buff->mem_len = MAX_SYN_SIZE;
2737 buff->len=24;
2738 buff->sk = sk;
2739 t1=(struct tcp_header *)(buff + 1);
2740
2741
2742
2743 tmp = sk->prot->build_header (buff, sk->saddr, sk->daddr, &dev,
2744 IPPROTO_TCP, NULL, MAX_SYN_SIZE);
2745 if (tmp < 0)
2746 {
2747 sk->prot->wfree(sk, buff->mem_addr, buff->mem_len);
2748 release_sock (sk);
2749 return (-ENETUNREACH);
2750 }
2751 buff->len += tmp;
2752 t1 = (struct tcp_header *)((char *)t1 +tmp);
2753
2754 memcpy (t1, (void *)&(sk->dummy_th), sizeof (*t1));
2755 t1->seq = net32(sk->send_seq++);
2756 buff->h.seq = sk->send_seq;
2757 t1->ack = 0;
2758 t1->window = 2;
2759 t1->res1=0;
2760 t1->res2=0;
2761 t1->rst = 0;
2762 t1->urg = 0;
2763 t1->psh = 0;
2764 t1->syn = 1;
2765 t1->urg_ptr = 0;
2766 t1->doff =6;
2767
2768 ptr=(unsigned char *)(t1+1);
2769 ptr[0]=2;
2770 ptr[1]=4;
2771 ptr[2]=(dev->mtu- HEADER_SIZE) >> 8;
2772 ptr[3]=(dev->mtu- HEADER_SIZE) & 0xff;
2773 sk->mtu = dev->mtu - HEADER_SIZE;
2774 tcp_send_check (t1, sk->saddr, sk->daddr,
2775 sizeof (struct tcp_header) + 4, sk);
2776
2777
2778 sk->state = TCP_SYN_SENT;
2779
2780 sk->prot->queue_xmit(sk, dev, buff, 0);
2781
2782 sk->time_wait.len = TCP_CONNECT_TIME;
2783 sk->rtt = TCP_CONNECT_TIME;
2784 reset_timer ((struct timer *)&sk->time_wait);
2785 sk->retransmits = TCP_RETR2 - TCP_SYN_RETRIES;
2786 release_sock (sk);
2787 return (0);
2788 }
2789
2790
2791
2792
2793
2794 static int
2795 tcp_sequence (volatile struct sock *sk, struct tcp_header *th, short len,
2796 struct options *opt, unsigned long saddr)
2797 {
2798
2799
2800
2801
2802
2803 PRINTK (("tcp_sequence (sk=%X, th=%X, len = %d, opt=%d, saddr=%X)\n",
2804 sk, th, len, opt, saddr));
2805
2806 if (between(th->seq, sk->acked_seq, sk->acked_seq + sk->window)||
2807 between(th->seq + len-(th->doff * 4), sk->acked_seq + 1,
2808 sk->acked_seq + sk->window) ||
2809 (before (th->seq, sk->acked_seq) &&
2810 after (th->seq + len - (th->doff * 4), sk->acked_seq + sk->window)))
2811 {
2812 return (1);
2813 }
2814
2815 PRINTK (("tcp_sequence: rejecting packet. \n"));
2816
2817
2818
2819 if (after (th->seq, sk->acked_seq + sk->window))
2820 {
2821 tcp_send_ack (sk->send_seq, sk->acked_seq, sk, th, saddr);
2822 return (0);
2823 }
2824
2825
2826 if (th->ack && len == (th->doff * 4) && after (th->seq, sk->acked_seq - 32767) &&
2827 !th->fin && !th->syn) return (1);
2828
2829 if (!th->rst)
2830 {
2831
2832 tcp_send_ack (net32(th->ack_seq), sk->acked_seq, sk, th, saddr);
2833 }
2834
2835
2836 return (0);
2837 }
2838
2839
2840 static void
2841 tcp_options (volatile struct sock *sk, struct tcp_header *th)
2842 {
2843 unsigned char *ptr;
2844 ptr = (unsigned char *)(th + 1);
2845 if (ptr[0] != 2 || ptr[1] != 4)
2846 {
2847 sk->mtu = min (sk->mtu, 576-HEADER_SIZE);
2848 return;
2849 }
2850 sk->mtu = min (sk->mtu, ptr[2]*256 + ptr[3] - HEADER_SIZE);
2851 }
2852
2853 int
2854 tcp_rcv(struct sk_buff *skb, struct device *dev, struct options *opt,
2855 unsigned long daddr, unsigned short len,
2856 unsigned long saddr, int redo, struct ip_protocol * protocol)
2857 {
2858 struct tcp_header *th;
2859 volatile struct sock *sk;
2860
2861 if (!skb)
2862 {
2863 PRINTK (("tcp.c: tcp_rcv skb = NULL\n"));
2864 return (0);
2865 }
2866 #if 0
2867 if (!protocol)
2868 {
2869 PRINTK (("tcp.c: tcp_rcv protocol = NULL\n"));
2870 return (0);
2871 }
2872
2873 if (!opt)
2874 {
2875 PRINTK (("tcp.c: tcp_rcv opt = NULL\n"));
2876 }
2877 #endif
2878 if (!dev)
2879 {
2880 PRINTK (("tcp.c: tcp_rcv dev = NULL\n"));
2881 return (0);
2882 }
2883
2884 th = skb->h.th;
2885
2886
2887 sk=get_sock(&tcp_prot, net16(th->dest), saddr, th->source, daddr);
2888 PRINTK(("<<\n"));
2889 PRINTK(("len = %d, redo = %d, skb=%X\n", len, redo, skb));
2890
2891 if (sk)
2892 {
2893 PRINTK (("sk = %X:\n",sk));
2894 }
2895
2896 if (!redo)
2897 {
2898 if (th->check && tcp_check (th, len, saddr, daddr ))
2899 {
2900 skb->sk = NULL;
2901 PRINTK (("packet dropped with bad checksum.\n"));
2902 kfree_skb (skb, 0);
2903
2904
2905 return (0);
2906 }
2907
2908
2909 if (sk == NULL)
2910 {
2911 if (!th->rst)
2912 tcp_reset (daddr, saddr, th, &tcp_prot, opt,dev);
2913 skb->sk = NULL;
2914 kfree_skb (skb, 0);
2915 return (0);
2916 }
2917
2918 skb->len = len;
2919 skb->sk = sk;
2920 skb->acked = 0;
2921 skb->used = 0;
2922 skb->free = 0;
2923 skb->urg_used = 0;
2924 skb->saddr = daddr;
2925 skb->daddr = saddr;
2926
2927 th->seq = net32(th->seq);
2928
2929 cli();
2930
2931
2932 if (sk->inuse)
2933 {
2934 if (sk->back_log == NULL)
2935 {
2936 sk->back_log = skb;
2937 skb->next = skb;
2938 skb->prev = skb;
2939 }
2940 else
2941 {
2942 skb->next = sk->back_log;
2943 skb->prev = sk->back_log->prev;
2944 skb->prev->next = skb;
2945 skb->next->prev = skb;
2946 }
2947 sti();
2948 return (0);
2949 }
2950 sk->inuse = 1;
2951 sti();
2952 }
2953 else
2954 {
2955 if (!sk)
2956 {
2957 PRINTK (("tcp.c: tcp_rcv bug sk=NULL redo = 1\n"));
2958 return (0);
2959 }
2960 }
2961
2962 if (!sk->prot)
2963 {
2964 PRINTK (("tcp.c: tcp_rcv sk->prot = NULL \n"));
2965 return (0);
2966 }
2967
2968
2969 if (sk->rmem_alloc + skb->mem_len >= SK_RMEM_MAX)
2970 {
2971 skb->sk = NULL;
2972 PRINTK (("dropping packet due to lack of buffer space.\n"));
2973 kfree_skb (skb, 0);
2974 release_sock (sk);
2975 return (0);
2976 }
2977
2978 sk->rmem_alloc += skb->mem_len;
2979
2980 PRINTK (("About to do switch. \n"));
2981
2982
2983
2984 switch (sk->state)
2985 {
2986
2987
2988 case TCP_LAST_ACK:
2989 if (th->rst)
2990 {
2991 sk->err = ECONNRESET;
2992 sk->state = TCP_CLOSE;
2993 sk->shutdown = SHUTDOWN_MASK;
2994 if (!sk->dead)
2995 {
2996 wake_up (sk->sleep);
2997 }
2998 kfree_skb (skb, FREE_READ);
2999 release_sock(sk);
3000 return (0);
3001 }
3002
3003 case TCP_ESTABLISHED:
3004 case TCP_CLOSE_WAIT:
3005 case TCP_FIN_WAIT1:
3006 case TCP_FIN_WAIT2:
3007 case TCP_TIME_WAIT:
3008
3009 if (!tcp_sequence (sk, th, len, opt, saddr))
3010 {
3011 kfree_skb (skb, FREE_READ);
3012 release_sock(sk);
3013 return (0);
3014 }
3015
3016 if (th->rst)
3017 {
3018
3019 sk->err = ECONNRESET;
3020
3021 if (sk->state == TCP_CLOSE_WAIT)
3022 {
3023 sk->err = EPIPE;
3024 }
3025
3026
3027
3028 if (!th->fin)
3029 {
3030 sk->state = TCP_CLOSE;
3031 sk->shutdown = SHUTDOWN_MASK;
3032 if (!sk->dead)
3033 {
3034 wake_up (sk->sleep);
3035 }
3036 kfree_skb (skb, FREE_READ);
3037 release_sock(sk);
3038 return (0);
3039 }
3040 }
3041 #if 0
3042 if (opt && (opt->security != 0 || opt->compartment != 0 || th->syn))
3043 {
3044 sk->err = ECONNRESET;
3045 sk->state = TCP_CLOSE;
3046 sk->shutdown = SHUTDOWN_MASK;
3047 tcp_reset (daddr, saddr, th, sk->prot, opt,dev);
3048 if (!sk->dead)
3049 {
3050 wake_up (sk->sleep);
3051 }
3052 kfree_skb (skb, FREE_READ);
3053 release_sock(sk);
3054 return (0);
3055 }
3056 #endif
3057 if (th->ack)
3058 {
3059 if(!tcp_ack (sk, th, saddr))
3060 {
3061 kfree_skb (skb, FREE_READ);
3062 release_sock(sk);
3063 return (0);
3064 }
3065 }
3066 if (th->urg)
3067 {
3068 if (tcp_urg (sk, th, saddr))
3069 {
3070 kfree_skb (skb, FREE_READ);
3071 release_sock(sk);
3072 return (0);
3073 }
3074 }
3075
3076 if (th->fin && tcp_fin (sk, th, saddr, dev))
3077 {
3078 kfree_skb (skb, FREE_READ);
3079 release_sock(sk);
3080 return (0);
3081 }
3082
3083 if ( tcp_data (skb, sk, saddr, len))
3084 {
3085 kfree_skb (skb, FREE_READ);
3086 release_sock(sk);
3087 return (0);
3088 }
3089
3090 release_sock(sk);
3091 return (0);
3092
3093 case TCP_CLOSE:
3094
3095 if (sk->dead || sk->daddr)
3096 {
3097 PRINTK (("packet received for closed,dead socket\n"));
3098 kfree_skb (skb, FREE_READ);
3099 release_sock (sk);
3100 return (0);
3101 }
3102
3103 if (!th->rst)
3104 {
3105 if (!th->ack)
3106 th->ack_seq=0;
3107 tcp_reset (daddr, saddr, th, sk->prot, opt,dev);
3108 }
3109 kfree_skb (skb, FREE_READ);
3110 release_sock(sk);
3111 return (0);
3112
3113 case TCP_LISTEN:
3114 if (th->rst)
3115 {
3116 kfree_skb (skb, FREE_READ);
3117 release_sock(sk);
3118 return (0);
3119 }
3120 if (th->ack)
3121 {
3122 tcp_reset (daddr, saddr, th, sk->prot, opt,dev );
3123 kfree_skb (skb, FREE_READ);
3124 release_sock(sk);
3125 return (0);
3126 }
3127
3128 if (th->syn)
3129 {
3130
3131
3132
3133
3134
3135
3136
3137
3138
3139
3140
3141
3142 tcp_conn_request (sk, skb, daddr, saddr, opt, dev);
3143
3144 release_sock(sk);
3145 return (0);
3146 }
3147
3148 kfree_skb (skb, FREE_READ);
3149 release_sock(sk);
3150 return (0);
3151
3152 default:
3153 if (!tcp_sequence (sk, th, len, opt, saddr))
3154 {
3155 kfree_skb (skb, FREE_READ);
3156 release_sock(sk);
3157 return (0);
3158 }
3159
3160 case TCP_SYN_SENT:
3161 if (th->rst)
3162 {
3163 sk->err = ECONNREFUSED ;
3164 sk->state = TCP_CLOSE;
3165 sk->shutdown = SHUTDOWN_MASK;
3166 if (!sk->dead)
3167 {
3168 wake_up (sk->sleep);
3169 }
3170 kfree_skb (skb, FREE_READ);
3171 release_sock(sk);
3172 return (0);
3173 }
3174 #if 0
3175 if (opt->security != 0 || opt->compartment != 0 )
3176 {
3177 sk->err = ECONNRESET;
3178 sk->state = TCP_CLOSE;
3179 sk->shutdown = SHUTDOWN_MASK;
3180 tcp_reset (daddr, saddr, th, sk->prot, opt, dev);
3181 if (!sk->dead)
3182 {
3183 wake_up (sk->sleep);
3184 }
3185 kfree_skb (skb, FREE_READ);
3186 release_sock(sk);
3187 return (0);
3188 } */
3189 #endif
3190 if (!th->ack)
3191 {
3192 if (th->syn)
3193 {
3194 sk->state = TCP_SYN_RECV;
3195 }
3196
3197 kfree_skb (skb, FREE_READ);
3198 release_sock(sk);
3199 return (0);
3200 }
3201
3202 switch (sk->state)
3203 {
3204 case TCP_SYN_SENT:
3205 if (!tcp_ack(sk, th, saddr))
3206 {
3207 tcp_reset(daddr, saddr, th, sk->prot, opt,dev);
3208 kfree_skb (skb, FREE_READ);
3209 release_sock(sk);
3210 return (0);
3211 }
3212
3213
3214
3215
3216 if (!th->syn)
3217 {
3218 kfree_skb (skb, FREE_READ);
3219 release_sock (sk);
3220 return (0);
3221 }
3222
3223
3224 sk->acked_seq = th->seq+1;
3225 sk->fin_seq = th->seq;
3226 tcp_send_ack (sk->send_seq, th->seq+1, sk,
3227 th, sk->daddr);
3228
3229 case TCP_SYN_RECV:
3230 if (!tcp_ack(sk, th, saddr))
3231 {
3232 tcp_reset(daddr, saddr, th, sk->prot, opt, dev);
3233 kfree_skb (skb, FREE_READ);
3234 release_sock(sk);
3235 return (0);
3236 }
3237
3238 sk->state = TCP_ESTABLISHED;
3239
3240
3241
3242
3243 tcp_options(sk, th);
3244 sk->dummy_th.dest = th->source;
3245 sk->copied_seq = sk->acked_seq-1;
3246 if (!sk->dead)
3247 {
3248 wake_up (sk->sleep);
3249 }
3250
3251
3252
3253 if (th->urg)
3254 if (tcp_urg (sk, th, saddr))
3255 {
3256 kfree_skb (skb, FREE_READ);
3257 release_sock(sk);
3258 return (0);
3259 }
3260 if (tcp_data (skb, sk, saddr, len))
3261 kfree_skb (skb, FREE_READ);
3262
3263 if (th->fin)
3264 tcp_fin(sk, th, saddr, dev);
3265
3266 release_sock(sk);
3267 return (0);
3268 }
3269
3270 if (th->urg)
3271 {
3272 if (tcp_urg (sk, th, saddr))
3273 {
3274 kfree_skb (skb, FREE_READ);
3275 release_sock (sk);
3276 return (0);
3277 }
3278 }
3279
3280 if (tcp_data (skb, sk, saddr, len))
3281 {
3282 kfree_skb (skb, FREE_READ);
3283 release_sock (sk);
3284 return (0);
3285 }
3286
3287 if (!th->fin)
3288 {
3289 release_sock(sk);
3290 return (0);
3291 }
3292 tcp_fin (sk, th, saddr, dev);
3293 release_sock(sk);
3294 return (0);
3295 }
3296 }
3297
3298
3299
3300
3301
3302 static void
3303 tcp_write_wakeup(volatile struct sock *sk)
3304 {
3305 struct sk_buff *buff;
3306 struct tcp_header *t1;
3307 struct device *dev=NULL;
3308 int tmp;
3309 if (sk -> state != TCP_ESTABLISHED && sk->state != TCP_CLOSE_WAIT) return;
3310
3311 buff=sk->prot->wmalloc(sk,MAX_ACK_SIZE,1, GFP_ATOMIC);
3312
3313 if (buff == NULL) return;
3314
3315 buff->lock = 0;
3316 buff->mem_addr = buff;
3317 buff->mem_len = MAX_ACK_SIZE;
3318 buff->len=sizeof (struct tcp_header);
3319 buff->free = 1;
3320 buff->sk = sk;
3321 PRINTK (("in tcp_write_wakeup\n"));
3322 t1=(struct tcp_header *)(buff + 1);
3323
3324
3325 tmp = sk->prot->build_header (buff, sk->saddr, sk->daddr, &dev,
3326 IPPROTO_TCP, sk->opt, MAX_ACK_SIZE);
3327 if (tmp < 0)
3328 {
3329 sk->prot->wfree(sk, buff->mem_addr, buff->mem_len);
3330 return;
3331 }
3332
3333 buff->len += tmp;
3334 t1 = (struct tcp_header *)((char *)t1 +tmp);
3335
3336 memcpy (t1,(void *) &sk->dummy_th, sizeof (*t1));
3337
3338
3339
3340 t1->seq = net32(sk->send_seq-1);
3341 t1->ack = 1;
3342 t1->res1= 0;
3343 t1->res2= 0;
3344 t1->rst = 0;
3345 t1->urg = 0;
3346 t1->psh = 0;
3347 t1->fin = 0;
3348 t1->syn = 0;
3349 t1->ack_seq = net32(sk->acked_seq);
3350 t1->window = net16(sk->prot->rspace(sk));
3351 t1->doff = sizeof (*t1)/4;
3352 tcp_send_check (t1, sk->saddr, sk->daddr, sizeof (*t1), sk);
3353
3354
3355 sk->prot->queue_xmit(sk, dev, buff, 1);
3356
3357 }
3358
3359 struct proto tcp_prot =
3360 {
3361 sock_wmalloc,
3362 sock_rmalloc,
3363 sock_wfree,
3364 sock_rfree,
3365 sock_rspace,
3366 sock_wspace,
3367 tcp_close,
3368 tcp_read,
3369 tcp_write,
3370 tcp_sendto,
3371 tcp_recvfrom,
3372 ip_build_header,
3373 tcp_connect,
3374 tcp_accept,
3375 ip_queue_xmit,
3376 tcp_retransmit,
3377 tcp_write_wakeup,
3378 tcp_read_wakeup,
3379 tcp_rcv,
3380 tcp_select,
3381 tcp_ioctl,
3382 NULL,
3383 tcp_shutdown,
3384 128,
3385 0,
3386 {NULL,},
3387 "TCP"
3388 };
3389
3390
3391
3392