1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Implementation of the Transmission Control Protocol(TCP).
7 *
8 * Version: @(#)tcp_input.c 1.0.16 05/25/93
9 *
10 * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 * Corey Minyard <wf-rch!minyard@relay.EU.net>
14 * Florian La Roche, <flla@stud.uni-sb.de>
15 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
16 * Linus Torvalds, <torvalds@cs.helsinki.fi>
17 * Alan Cox, <gw4pts@gw4pts.ampr.org>
18 * Matthew Dillon, <dillon@apollo.west.oic.com>
19 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
20 * Jorge Cwik, <jorge@laser.satlink.net>
21 */
22
23 #include <linux/config.h>
24 #include <net/tcp.h>
25
26 /*
27 * This is the main buffer sending routine. We queue the buffer
28 * having checked it is sane seeming.
29 */
30
31 void tcp_send_skb(struct sock *sk, struct sk_buff *skb)
/* ![[previous]](../icons/n_left.png)
![[next]](../icons/right.png)
![[first]](../icons/n_first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
32 {
33 int size;
34 struct tcphdr * th = skb->h.th;
35
36 /*
37 * length of packet (not counting length of pre-tcp headers)
38 */
39
40 size = skb->len - ((unsigned char *) th - skb->data);
41
42 /*
43 * Sanity check it..
44 */
45
46 if (size < sizeof(struct tcphdr) || size > skb->len)
47 {
48 printk("tcp_send_skb: bad skb (skb = %p, data = %p, th = %p, len = %lu)\n",
49 skb, skb->data, th, skb->len);
50 kfree_skb(skb, FREE_WRITE);
51 return;
52 }
53
54 /*
55 * If we have queued a header size packet.. (these crash a few
56 * tcp stacks if ack is not set)
57 */
58
59 if (size == sizeof(struct tcphdr))
60 {
61 /* If it's got a syn or fin it's notionally included in the size..*/
62 if(!th->syn && !th->fin)
63 {
64 printk("tcp_send_skb: attempt to queue a bogon.\n");
65 kfree_skb(skb,FREE_WRITE);
66 return;
67 }
68 }
69
70 /*
71 * Actual processing.
72 */
73
74 tcp_statistics.TcpOutSegs++;
75 skb->seq = ntohl(th->seq);
76 skb->end_seq = skb->seq + size - 4*th->doff;
77
78 /*
79 * We must queue if
80 *
81 * a) The right edge of this frame exceeds the window
82 * b) We are retransmitting (Nagle's rule)
83 * c) We have too many packets 'in flight'
84 */
85
86 if (after(skb->end_seq, sk->window_seq) ||
87 (sk->retransmits && sk->ip_xmit_timeout == TIME_WRITE) ||
88 sk->packets_out >= sk->cong_window)
89 {
90 /* checksum will be supplied by tcp_write_xmit. So
91 * we shouldn't need to set it at all. I'm being paranoid */
92 th->check = 0;
93 if (skb->next != NULL)
94 {
95 printk("tcp_send_partial: next != NULL\n");
96 skb_unlink(skb);
97 }
98 skb_queue_tail(&sk->write_queue, skb);
99
100 if (before(sk->window_seq, sk->write_queue.next->end_seq) &&
101 sk->send_head == NULL && sk->ack_backlog == 0)
102 tcp_reset_xmit_timer(sk, TIME_PROBE0, sk->rto);
103 }
104 else
105 {
106 /*
107 * This is going straight out
108 */
109
110 th->ack_seq = htonl(sk->acked_seq);
111 th->window = htons(tcp_select_window(sk));
112
113 tcp_send_check(th, sk->saddr, sk->daddr, size, skb);
114
115 sk->sent_seq = sk->write_seq;
116
117 /*
118 * This is mad. The tcp retransmit queue is put together
119 * by the ip layer. This causes half the problems with
120 * unroutable FIN's and other things.
121 */
122
123 sk->prot->queue_xmit(sk, skb->dev, skb, 0);
124
125
126 sk->ack_backlog = 0;
127 sk->bytes_rcv = 0;
128
129 /*
130 * Set for next retransmit based on expected ACK time.
131 * FIXME: We set this every time which means our
132 * retransmits are really about a window behind.
133 */
134
135 tcp_reset_xmit_timer(sk, TIME_WRITE, sk->rto);
136 }
137 }
138
139 /*
140 * Locking problems lead us to a messy situation where we can have
141 * multiple partially complete buffers queued up. This is really bad
142 * as we don't want to be sending partial buffers. Fix this with
143 * a semaphore or similar to lock tcp_write per socket.
144 *
145 * These routines are pretty self descriptive.
146 */
147
148 struct sk_buff * tcp_dequeue_partial(struct sock * sk)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
149 {
150 struct sk_buff * skb;
151 unsigned long flags;
152
153 save_flags(flags);
154 cli();
155 skb = sk->partial;
156 if (skb) {
157 sk->partial = NULL;
158 del_timer(&sk->partial_timer);
159 }
160 restore_flags(flags);
161 return skb;
162 }
163
164 /*
165 * Empty the partial queue
166 */
167
168 void tcp_send_partial(struct sock *sk)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
169 {
170 struct sk_buff *skb;
171
172 if (sk == NULL)
173 return;
174 while ((skb = tcp_dequeue_partial(sk)) != NULL)
175 tcp_send_skb(sk, skb);
176 }
177
178 /*
179 * Queue a partial frame
180 */
181
182 void tcp_enqueue_partial(struct sk_buff * skb, struct sock * sk)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
183 {
184 struct sk_buff * tmp;
185 unsigned long flags;
186
187 save_flags(flags);
188 cli();
189 tmp = sk->partial;
190 if (tmp)
191 del_timer(&sk->partial_timer);
192 sk->partial = skb;
193 init_timer(&sk->partial_timer);
194 /*
195 * Wait up to 1 second for the buffer to fill.
196 */
197 sk->partial_timer.expires = jiffies+HZ;
198 sk->partial_timer.function = (void (*)(unsigned long)) tcp_send_partial;
199 sk->partial_timer.data = (unsigned long) sk;
200 add_timer(&sk->partial_timer);
201 restore_flags(flags);
202 if (tmp)
203 tcp_send_skb(sk, tmp);
204 }
205
206 /*
207 * This routine takes stuff off of the write queue,
208 * and puts it in the xmit queue. This happens as incoming acks
209 * open up the remote window for us.
210 */
211
212 void tcp_write_xmit(struct sock *sk)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
213 {
214 struct sk_buff *skb;
215
216 /*
217 * The bytes will have to remain here. In time closedown will
218 * empty the write queue and all will be happy
219 */
220
221 if(sk->zapped)
222 return;
223
224 /*
225 * Anything on the transmit queue that fits the window can
226 * be added providing we are not
227 *
228 * a) retransmitting (Nagle's rule)
229 * b) exceeding our congestion window.
230 */
231
232 while((skb = skb_peek(&sk->write_queue)) != NULL &&
233 before(skb->end_seq, sk->window_seq + 1) &&
234 (sk->retransmits == 0 ||
235 sk->ip_xmit_timeout != TIME_WRITE ||
236 before(skb->end_seq, sk->rcv_ack_seq + 1))
237 && sk->packets_out < sk->cong_window)
238 {
239 IS_SKB(skb);
240 skb_unlink(skb);
241
242 /*
243 * See if we really need to send the packet.
244 */
245
246 if (before(skb->end_seq, sk->rcv_ack_seq +1))
247 {
248 /*
249 * This is acked data. We can discard it. This
250 * cannot currently occur.
251 */
252
253 sk->retransmits = 0;
254 kfree_skb(skb, FREE_WRITE);
255 if (!sk->dead)
256 sk->write_space(sk);
257 }
258 else
259 {
260 struct tcphdr *th;
261 struct iphdr *iph;
262 int size;
263 /*
264 * put in the ack seq and window at this point rather than earlier,
265 * in order to keep them monotonic. We really want to avoid taking
266 * back window allocations. That's legal, but RFC1122 says it's frowned on.
267 * Ack and window will in general have changed since this packet was put
268 * on the write queue.
269 */
270 iph = skb->ip_hdr;
271 th = (struct tcphdr *)(((char *)iph) +(iph->ihl << 2));
272 size = skb->len - (((unsigned char *) th) - skb->data);
273 #ifndef CONFIG_NO_PATH_MTU_DISCOVERY
274 if (size > sk->mtu - sizeof(struct iphdr))
275 {
276 iph->frag_off &= ~htons(IP_DF);
277 ip_send_check(iph);
278 }
279 #endif
280
281 th->ack_seq = htonl(sk->acked_seq);
282 th->window = htons(tcp_select_window(sk));
283
284 tcp_send_check(th, sk->saddr, sk->daddr, size, skb);
285
286 sk->sent_seq = skb->end_seq;
287
288 /*
289 * IP manages our queue for some crazy reason
290 */
291
292 sk->prot->queue_xmit(sk, skb->dev, skb, skb->free);
293
294
295 sk->ack_backlog = 0;
296 sk->bytes_rcv = 0;
297
298 /*
299 * Again we slide the timer wrongly
300 */
301
302 tcp_reset_xmit_timer(sk, TIME_WRITE, sk->rto);
303 }
304 }
305 }
306
307
308 /*
309 * A socket has timed out on its send queue and wants to do a
310 * little retransmitting. Currently this means TCP.
311 */
312
313 void tcp_do_retransmit(struct sock *sk, int all)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
314 {
315 struct sk_buff * skb;
316 struct proto *prot;
317 struct device *dev;
318 int ct=0;
319 struct rtable *rt;
320
321 prot = sk->prot;
322 skb = sk->send_head;
323
324 while (skb != NULL)
325 {
326 struct tcphdr *th;
327 struct iphdr *iph;
328 int size;
329
330 dev = skb->dev;
331 IS_SKB(skb);
332 skb->when = jiffies;
333
334 /* dl1bke 960201 - @%$$! Hope this cures strange race conditions */
335 /* with AX.25 mode VC. (esp. DAMA) */
336 /* if the buffer is locked we should not retransmit */
337 /* anyway, so we don't need all the fuss to prepare */
338 /* the buffer in this case. */
339 /* (the skb_pull() changes skb->data while we may */
340 /* actually try to send the data. Ouch. A side */
341 /* effect is that we'll send some unnecessary data, */
342 /* but the alternative is disasterous... */
343
344 if (skb_device_locked(skb))
345 break;
346
347 /*
348 * Discard the surplus MAC header
349 */
350
351 skb_pull(skb,((unsigned char *)skb->ip_hdr)-skb->data);
352
353 /*
354 * In general it's OK just to use the old packet. However we
355 * need to use the current ack and window fields. Urg and
356 * urg_ptr could possibly stand to be updated as well, but we
357 * don't keep the necessary data. That shouldn't be a problem,
358 * if the other end is doing the right thing. Since we're
359 * changing the packet, we have to issue a new IP identifier.
360 */
361
362 iph = (struct iphdr *)skb->data;
363 th = (struct tcphdr *)(((char *)iph) + (iph->ihl << 2));
364 size = ntohs(iph->tot_len) - (iph->ihl<<2);
365
366 /*
367 * Note: We ought to check for window limits here but
368 * currently this is done (less efficiently) elsewhere.
369 */
370
371 /*
372 * Put a MAC header back on (may cause ARPing)
373 */
374
375 {
376 /* ANK: UGLY, but the bug, that was here, should be fixed.
377 */
378 struct options * opt = (struct options*)skb->proto_priv;
379 rt = ip_check_route(&sk->ip_route_cache, opt->srr?opt->faddr:iph->daddr, skb->localroute);
380 }
381
382 iph->id = htons(ip_id_count++);
383 #ifndef CONFIG_NO_PATH_MTU_DISCOVERY
384 if (rt && ntohs(iph->tot_len) > rt->rt_mtu)
385 iph->frag_off &= ~htons(IP_DF);
386 #endif
387 ip_send_check(iph);
388
389 if (rt==NULL) /* Deep poo */
390 {
391 if(skb->sk)
392 {
393 skb->sk->err_soft=ENETUNREACH;
394 skb->sk->error_report(skb->sk);
395 }
396 }
397 else
398 {
399 dev=rt->rt_dev;
400 skb->raddr=rt->rt_gateway;
401 skb->dev=dev;
402 skb->arp=1;
403 if (rt->rt_hh)
404 {
405 memcpy(skb_push(skb,dev->hard_header_len),rt->rt_hh->hh_data,dev->hard_header_len);
406 if (!rt->rt_hh->hh_uptodate)
407 {
408 skb->arp = 0;
409 #if RT_CACHE_DEBUG >= 2
410 printk("tcp_do_retransmit: hh miss %08x via %08x\n", iph->daddr, rt->rt_gateway);
411 #endif
412 }
413 }
414 else if (dev->hard_header)
415 {
416 if(dev->hard_header(skb, dev, ETH_P_IP, NULL, NULL, skb->len)<0)
417 skb->arp=0;
418 }
419
420 /*
421 * This is not the right way to handle this. We have to
422 * issue an up to date window and ack report with this
423 * retransmit to keep the odd buggy tcp that relies on
424 * the fact BSD does this happy.
425 * We don't however need to recalculate the entire
426 * checksum, so someone wanting a small problem to play
427 * with might like to implement RFC1141/RFC1624 and speed
428 * this up by avoiding a full checksum.
429 */
430
431 th->ack_seq = htonl(sk->acked_seq);
432 sk->ack_backlog = 0;
433 sk->bytes_rcv = 0;
434 th->window = ntohs(tcp_select_window(sk));
435 tcp_send_check(th, sk->saddr, sk->daddr, size, skb);
436
437 /*
438 * If the interface is (still) up and running, kick it.
439 */
440
441 if (dev->flags & IFF_UP)
442 {
443 /*
444 * If the packet is still being sent by the device/protocol
445 * below then don't retransmit. This is both needed, and good -
446 * especially with connected mode AX.25 where it stops resends
447 * occurring of an as yet unsent anyway frame!
448 * We still add up the counts as the round trip time wants
449 * adjusting.
450 */
451 if (sk && !skb_device_locked(skb))
452 {
453 /* Remove it from any existing driver queue first! */
454 skb_unlink(skb);
455 /* Now queue it */
456 ip_statistics.IpOutRequests++;
457 dev_queue_xmit(skb, dev, sk->priority);
458 }
459 }
460 }
461
462 /*
463 * Count retransmissions
464 */
465
466 ct++;
467 sk->prot->retransmits ++;
468 tcp_statistics.TcpRetransSegs++;
469
470
471 /*
472 * Only one retransmit requested.
473 */
474
475 if (!all)
476 break;
477
478 /*
479 * This should cut it off before we send too many packets.
480 */
481
482 if (ct >= sk->cong_window)
483 break;
484 skb = skb->link3;
485 }
486 }
487
488 /*
489 * This routine will send an RST to the other tcp.
490 */
491
492 void tcp_send_reset(unsigned long saddr, unsigned long daddr, struct tcphdr *th,
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
493 struct proto *prot, struct options *opt, struct device *dev, int tos, int ttl)
494 {
495 struct sk_buff *buff;
496 struct tcphdr *t1;
497 int tmp;
498 struct device *ndev=NULL;
499
500 /*
501 * Cannot reset a reset (Think about it).
502 */
503
504 if(th->rst)
505 return;
506
507 /*
508 * We need to grab some memory, and put together an RST,
509 * and then put it into the queue to be sent.
510 */
511
512 buff = sock_wmalloc(NULL, MAX_RESET_SIZE, 1, GFP_ATOMIC);
513 if (buff == NULL)
514 return;
515
516 buff->sk = NULL;
517 buff->dev = dev;
518 buff->localroute = 0;
519 buff->csum = 0;
520
521 /*
522 * Put in the IP header and routing stuff.
523 */
524
525 tmp = prot->build_header(buff, saddr, daddr, &ndev, IPPROTO_TCP, opt,
526 sizeof(struct tcphdr),tos,ttl,NULL);
527 if (tmp < 0)
528 {
529 buff->free = 1;
530 sock_wfree(NULL, buff);
531 return;
532 }
533
534 t1 =(struct tcphdr *)skb_put(buff,sizeof(struct tcphdr));
535 memset(t1, 0, sizeof(*t1));
536
537 /*
538 * Swap the send and the receive.
539 */
540
541 t1->dest = th->source;
542 t1->source = th->dest;
543 t1->doff = sizeof(*t1)/4;
544 t1->rst = 1;
545
546 if(th->ack)
547 {
548 t1->seq = th->ack_seq;
549 }
550 else
551 {
552 t1->ack = 1;
553 if(!th->syn)
554 t1->ack_seq = th->seq;
555 else
556 t1->ack_seq = htonl(ntohl(th->seq)+1);
557 }
558
559 tcp_send_check(t1, saddr, daddr, sizeof(*t1), buff);
560 prot->queue_xmit(NULL, ndev, buff, 1);
561 tcp_statistics.TcpOutSegs++;
562 }
563
564 /*
565 * Send a fin.
566 */
567
568 void tcp_send_fin(struct sock *sk)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
569 {
570 struct proto *prot =(struct proto *)sk->prot;
571 struct tcphdr *th =(struct tcphdr *)&sk->dummy_th;
572 struct tcphdr *t1;
573 struct sk_buff *buff;
574 struct device *dev=NULL;
575 int tmp;
576
577 buff = sock_wmalloc(sk, MAX_RESET_SIZE,1 , GFP_KERNEL);
578
579 if (buff == NULL)
580 {
581 /* This is a disaster if it occurs */
582 printk("tcp_send_fin: Impossible malloc failure");
583 return;
584 }
585
586 /*
587 * Administrivia
588 */
589
590 buff->sk = sk;
591 buff->localroute = sk->localroute;
592 buff->csum = 0;
593
594 /*
595 * Put in the IP header and routing stuff.
596 */
597
598 tmp = prot->build_header(buff,sk->saddr, sk->daddr, &dev,
599 IPPROTO_TCP, sk->opt,
600 sizeof(struct tcphdr),sk->ip_tos,sk->ip_ttl,&sk->ip_route_cache);
601 if (tmp < 0)
602 {
603 int t;
604 /*
605 * Finish anyway, treat this as a send that got lost.
606 * (Not good).
607 */
608
609 buff->free = 1;
610 sock_wfree(sk,buff);
611 sk->write_seq++;
612 t=del_timer(&sk->timer);
613 if(t)
614 add_timer(&sk->timer);
615 else
616 tcp_reset_msl_timer(sk, TIME_CLOSE, TCP_TIMEWAIT_LEN);
617 return;
618 }
619
620 /*
621 * We ought to check if the end of the queue is a buffer and
622 * if so simply add the fin to that buffer, not send it ahead.
623 */
624
625 t1 =(struct tcphdr *)skb_put(buff,sizeof(struct tcphdr));
626 buff->dev = dev;
627 memcpy(t1, th, sizeof(*t1));
628 buff->seq = sk->write_seq;
629 sk->write_seq++;
630 buff->end_seq = sk->write_seq;
631 t1->seq = htonl(buff->seq);
632 t1->ack_seq = htonl(sk->acked_seq);
633 t1->window = htons(sk->window=tcp_select_window(sk));
634 t1->fin = 1;
635 tcp_send_check(t1, sk->saddr, sk->daddr, sizeof(*t1), buff);
636
637 /*
638 * If there is data in the write queue, the fin must be appended to
639 * the write queue.
640 */
641
642 if (skb_peek(&sk->write_queue) != NULL)
643 {
644 buff->free = 0;
645 if (buff->next != NULL)
646 {
647 printk("tcp_send_fin: next != NULL\n");
648 skb_unlink(buff);
649 }
650 skb_queue_tail(&sk->write_queue, buff);
651 }
652 else
653 {
654 sk->sent_seq = sk->write_seq;
655 sk->prot->queue_xmit(sk, dev, buff, 0);
656 tcp_reset_xmit_timer(sk, TIME_WRITE, sk->rto);
657 }
658 }
659
660
661 void tcp_send_synack(struct sock * newsk, struct sock * sk, struct sk_buff * skb)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
662 {
663 struct tcphdr *t1;
664 unsigned char *ptr;
665 struct sk_buff * buff;
666 struct device *ndev=NULL;
667 int tmp;
668
669 buff = sock_wmalloc(newsk, MAX_SYN_SIZE, 1, GFP_ATOMIC);
670 if (buff == NULL)
671 {
672 sk->err = ENOMEM;
673 destroy_sock(newsk);
674 kfree_skb(skb, FREE_READ);
675 tcp_statistics.TcpAttemptFails++;
676 return;
677 }
678
679 buff->sk = newsk;
680 buff->localroute = newsk->localroute;
681
682 /*
683 * Put in the IP header and routing stuff.
684 */
685
686 tmp = sk->prot->build_header(buff, newsk->saddr, newsk->daddr, &ndev,
687 IPPROTO_TCP, NULL, MAX_SYN_SIZE,sk->ip_tos,sk->ip_ttl,&newsk->ip_route_cache);
688
689 /*
690 * Something went wrong.
691 */
692
693 if (tmp < 0)
694 {
695 sk->err = tmp;
696 buff->free = 1;
697 kfree_skb(buff,FREE_WRITE);
698 destroy_sock(newsk);
699 skb->sk = sk;
700 kfree_skb(skb, FREE_READ);
701 tcp_statistics.TcpAttemptFails++;
702 return;
703 }
704
705 t1 =(struct tcphdr *)skb_put(buff,sizeof(struct tcphdr));
706
707 memcpy(t1, skb->h.th, sizeof(*t1));
708 buff->seq = newsk->write_seq++;
709 buff->end_seq = newsk->write_seq;
710 /*
711 * Swap the send and the receive.
712 */
713 t1->dest = skb->h.th->source;
714 t1->source = newsk->dummy_th.source;
715 t1->seq = ntohl(buff->seq);
716 newsk->sent_seq = newsk->write_seq;
717 t1->window = ntohs(tcp_select_window(newsk));
718 t1->syn = 1;
719 t1->ack = 1;
720 t1->urg = 0;
721 t1->rst = 0;
722 t1->psh = 0;
723 t1->ack_seq = htonl(newsk->acked_seq);
724 t1->doff = sizeof(*t1)/4+1;
725 ptr = skb_put(buff,4);
726 ptr[0] = 2;
727 ptr[1] = 4;
728 ptr[2] = ((newsk->mtu) >> 8) & 0xff;
729 ptr[3] =(newsk->mtu) & 0xff;
730 buff->csum = csum_partial(ptr, 4, 0);
731 tcp_send_check(t1, newsk->saddr, newsk->daddr, sizeof(*t1)+4, buff);
732 newsk->prot->queue_xmit(newsk, ndev, buff, 0);
733 tcp_reset_xmit_timer(newsk, TIME_WRITE , TCP_TIMEOUT_INIT);
734 skb->sk = newsk;
735
736 /*
737 * Charge the sock_buff to newsk.
738 */
739
740 sk->rmem_alloc -= skb->truesize;
741 newsk->rmem_alloc += skb->truesize;
742
743 skb_queue_tail(&sk->receive_queue,skb);
744 sk->ack_backlog++;
745 tcp_statistics.TcpOutSegs++;
746 }
747
748 /*
749 * This routine sends an ack and also updates the window.
750 */
751
752 void tcp_send_ack(u32 sequence, u32 ack,
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
753 struct sock *sk,
754 struct tcphdr *th, u32 daddr)
755 {
756 struct sk_buff *buff;
757 struct tcphdr *t1;
758 struct device *dev = NULL;
759 int tmp;
760
761 if(sk->zapped)
762 return; /* We have been reset, we may not send again */
763
764 /*
765 * We need to grab some memory, and put together an ack,
766 * and then put it into the queue to be sent.
767 */
768
769 buff = sock_wmalloc(sk, MAX_ACK_SIZE, 1, GFP_ATOMIC);
770 if (buff == NULL)
771 {
772 /*
773 * Force it to send an ack. We don't have to do this
774 * (ACK is unreliable) but it's much better use of
775 * bandwidth on slow links to send a spare ack than
776 * resend packets.
777 */
778
779 sk->ack_backlog++;
780 if (sk->ip_xmit_timeout != TIME_WRITE && tcp_connected(sk->state))
781 {
782 tcp_reset_xmit_timer(sk, TIME_WRITE, HZ);
783 }
784 return;
785 }
786
787 /*
788 * Assemble a suitable TCP frame
789 */
790
791 buff->sk = sk;
792 buff->localroute = sk->localroute;
793 buff->csum = 0;
794
795 /*
796 * Put in the IP header and routing stuff.
797 */
798
799 tmp = sk->prot->build_header(buff, sk->saddr, daddr, &dev,
800 IPPROTO_TCP, sk->opt, MAX_ACK_SIZE,sk->ip_tos,sk->ip_ttl,&sk->ip_route_cache);
801 if (tmp < 0)
802 {
803 buff->free = 1;
804 sock_wfree(sk, buff);
805 return;
806 }
807 t1 =(struct tcphdr *)skb_put(buff,sizeof(struct tcphdr));
808
809 memcpy(t1, &sk->dummy_th, sizeof(*t1));
810
811 /*
812 * Swap the send and the receive.
813 */
814
815 t1->dest = th->source;
816 t1->source = th->dest;
817 t1->seq = ntohl(sequence);
818 sk->window = tcp_select_window(sk);
819 t1->window = ntohs(sk->window);
820
821 /*
822 * If we have nothing queued for transmit and the transmit timer
823 * is on we are just doing an ACK timeout and need to switch
824 * to a keepalive.
825 */
826
827 if (ack == sk->acked_seq) {
828 sk->ack_backlog = 0;
829 sk->bytes_rcv = 0;
830 sk->ack_timed = 0;
831
832 if (sk->send_head == NULL && skb_peek(&sk->write_queue) == NULL
833 && sk->ip_xmit_timeout == TIME_WRITE)
834 if(sk->keepopen)
835 tcp_reset_xmit_timer(sk,TIME_KEEPOPEN,TCP_TIMEOUT_LEN);
836 else
837 delete_timer(sk);
838 }
839
840 /*
841 * Fill in the packet and send it
842 */
843
844 t1->ack_seq = htonl(ack);
845 tcp_send_check(t1, sk->saddr, daddr, sizeof(*t1), buff);
846 if (sk->debug)
847 printk("\rtcp_ack: seq %x ack %x\n", sequence, ack);
848 sk->prot->queue_xmit(sk, dev, buff, 1);
849 tcp_statistics.TcpOutSegs++;
850 }
851
852 /*
853 * This routine sends a packet with an out of date sequence
854 * number. It assumes the other end will try to ack it.
855 */
856
857 void tcp_write_wakeup(struct sock *sk)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
858 {
859 struct sk_buff *buff,*skb;
860 struct tcphdr *t1;
861 struct device *dev=NULL;
862 int tmp;
863
864 if (sk->zapped)
865 return; /* After a valid reset we can send no more */
866
867 /*
868 * Write data can still be transmitted/retransmitted in the
869 * following states. If any other state is encountered, return.
870 * [listen/close will never occur here anyway]
871 */
872
873 if (sk->state != TCP_ESTABLISHED &&
874 sk->state != TCP_CLOSE_WAIT &&
875 sk->state != TCP_FIN_WAIT1 &&
876 sk->state != TCP_LAST_ACK &&
877 sk->state != TCP_CLOSING
878 )
879 {
880 return;
881 }
882 if ( before(sk->sent_seq, sk->window_seq) &&
883 (skb=skb_peek(&sk->write_queue)))
884 {
885 /*
886 * We are probing the opening of a window
887 * but the window size is != 0
888 * must have been a result SWS advoidance ( sender )
889 */
890
891 struct iphdr *iph;
892 struct tcphdr *th;
893 struct tcphdr *nth;
894 unsigned long win_size;
895 #if 0
896 unsigned long ow_size;
897 #endif
898
899 /*
900 * How many bytes can we send ?
901 */
902
903 win_size = sk->window_seq - sk->sent_seq;
904
905 /*
906 * Recover the buffer pointers
907 */
908
909 iph = (struct iphdr *)skb->ip_hdr;
910 th = (struct tcphdr *)(((char *)iph) +(iph->ihl << 2));
911
912 /*
913 * Grab the data for a temporary frame
914 */
915
916 buff = sock_wmalloc(sk, win_size + th->doff * 4 +
917 (iph->ihl << 2) +
918 sk->prot->max_header + 15,
919 1, GFP_ATOMIC);
920 if ( buff == NULL )
921 return;
922
923 /*
924 * If we strip the packet on the write queue we must
925 * be ready to retransmit this one
926 */
927
928 buff->free = /*0*/1;
929
930 buff->sk = sk;
931 buff->localroute = sk->localroute;
932
933 /*
934 * Put headers on the new packet
935 */
936
937 tmp = sk->prot->build_header(buff, sk->saddr, sk->daddr, &dev,
938 IPPROTO_TCP, sk->opt, buff->truesize,
939 sk->ip_tos,sk->ip_ttl,&sk->ip_route_cache);
940 if (tmp < 0)
941 {
942 sock_wfree(sk, buff);
943 return;
944 }
945
946 /*
947 * Move the TCP header over
948 */
949
950 buff->dev = dev;
951
952 nth = (struct tcphdr *) skb_put(buff,sizeof(*th));
953
954 memcpy(nth, th, sizeof(*th));
955
956 /*
957 * Correct the new header
958 */
959
960 nth->ack = 1;
961 nth->ack_seq = htonl(sk->acked_seq);
962 nth->window = htons(tcp_select_window(sk));
963 nth->check = 0;
964
965 /*
966 * Copy TCP options and data start to our new buffer
967 */
968
969 buff->csum = csum_partial_copy((void *)(th + 1), skb_put(buff,win_size),
970 win_size + th->doff*4 - sizeof(*th), 0);
971
972 /*
973 * Remember our right edge sequence number.
974 */
975
976 buff->end_seq = sk->sent_seq + win_size;
977 sk->sent_seq = buff->end_seq; /* Hack */
978 if(th->urg && ntohs(th->urg_ptr) < win_size)
979 nth->urg = 0;
980
981 /*
982 * Checksum the split buffer
983 */
984
985 tcp_send_check(nth, sk->saddr, sk->daddr,
986 nth->doff * 4 + win_size , buff);
987 }
988 else
989 {
990 buff = sock_wmalloc(sk,MAX_ACK_SIZE,1, GFP_ATOMIC);
991 if (buff == NULL)
992 return;
993
994 buff->free = 1;
995 buff->sk = sk;
996 buff->localroute = sk->localroute;
997 buff->csum = 0;
998
999 /*
1000 * Put in the IP header and routing stuff.
1001 */
1002
1003 tmp = sk->prot->build_header(buff, sk->saddr, sk->daddr, &dev,
1004 IPPROTO_TCP, sk->opt, MAX_ACK_SIZE,sk->ip_tos,sk->ip_ttl,&sk->ip_route_cache);
1005 if (tmp < 0)
1006 {
1007 sock_wfree(sk, buff);
1008 return;
1009 }
1010
1011 t1 = (struct tcphdr *)skb_put(buff,sizeof(struct tcphdr));
1012 memcpy(t1,(void *) &sk->dummy_th, sizeof(*t1));
1013
1014 /*
1015 * Use a previous sequence.
1016 * This should cause the other end to send an ack.
1017 */
1018
1019 t1->seq = htonl(sk->sent_seq-1);
1020 /* t1->fin = 0; -- We are sending a 'previous' sequence, and 0 bytes of data - thus no FIN bit */
1021 t1->ack_seq = htonl(sk->acked_seq);
1022 t1->window = htons(tcp_select_window(sk));
1023 tcp_send_check(t1, sk->saddr, sk->daddr, sizeof(*t1), buff);
1024
1025 }
1026
1027 /*
1028 * Send it.
1029 */
1030
1031 sk->prot->queue_xmit(sk, dev, buff, 1);
1032 tcp_statistics.TcpOutSegs++;
1033 }
1034
1035 /*
1036 * A window probe timeout has occurred.
1037 */
1038
1039 void tcp_send_probe0(struct sock *sk)
/* ![[previous]](../icons/left.png)
![[next]](../icons/n_right.png)
![[first]](../icons/first.png)
![[last]](../icons/n_last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
1040 {
1041 if (sk->zapped)
1042 return; /* After a valid reset we can send no more */
1043
1044 tcp_write_wakeup(sk);
1045
1046 sk->backoff++;
1047 sk->rto = min(sk->rto << 1, 120*HZ);
1048 sk->retransmits++;
1049 sk->prot->retransmits ++;
1050 tcp_reset_xmit_timer (sk, TIME_PROBE0, sk->rto);
1051 }