This source file includes following definitions.
- nr_output
- nr_send_iframe
- nr_send_nak_frame
- nr_kick
- nr_transmit_buffer
- nr_establish_data_link
- nr_enquiry_response
- nr_check_iframes_acked
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21 #include <linux/config.h>
22 #ifdef CONFIG_NETROM
23 #include <linux/errno.h>
24 #include <linux/types.h>
25 #include <linux/socket.h>
26 #include <linux/in.h>
27 #include <linux/kernel.h>
28 #include <linux/sched.h>
29 #include <linux/timer.h>
30 #include <linux/string.h>
31 #include <linux/sockios.h>
32 #include <linux/net.h>
33 #include <net/ax25.h>
34 #include <linux/inet.h>
35 #include <linux/netdevice.h>
36 #include <linux/skbuff.h>
37 #include <net/sock.h>
38 #include <asm/segment.h>
39 #include <asm/system.h>
40 #include <linux/fcntl.h>
41 #include <linux/mm.h>
42 #include <linux/interrupt.h>
43 #include <net/netrom.h>
44
45
46
47
48
49 void nr_output(struct sock *sk, struct sk_buff *skb)
50 {
51 struct sk_buff *skbn;
52 unsigned char transport[NR_TRANSPORT_LEN];
53 int err, frontlen, len, mtu;
54
55 mtu = sk->nr->device->mtu;
56
57 if (skb->len - NR_TRANSPORT_LEN > mtu) {
58
59 memcpy(transport, skb->data, NR_TRANSPORT_LEN);
60 skb_pull(skb, NR_TRANSPORT_LEN);
61
62 frontlen = skb_headroom(skb);
63
64 while (skb->len > 0) {
65 if ((skbn = sock_alloc_send_skb(sk, frontlen + mtu, 0, 0, &err)) == NULL)
66 return;
67
68 skbn->sk = sk;
69 skbn->free = 1;
70 skbn->arp = 1;
71
72 skb_reserve(skbn, frontlen);
73
74 len = (mtu > skb->len) ? skb->len : mtu;
75
76
77 memcpy(skb_put(skbn, len), skb->data, len);
78 skb_pull(skb, len);
79
80
81 skb_push(skbn, NR_TRANSPORT_LEN);
82 memcpy(skbn->data, transport, NR_TRANSPORT_LEN);
83
84 if (skb->len > 0)
85 skbn->data[4] |= NR_MORE_FLAG;
86
87 skb_queue_tail(&sk->write_queue, skbn);
88 }
89
90 skb->free = 1;
91 kfree_skb(skb, FREE_WRITE);
92 } else {
93 skb_queue_tail(&sk->write_queue, skb);
94 }
95
96 if (sk->nr->state == NR_STATE_3)
97 nr_kick(sk);
98 }
99
100
101
102
103
104 static void nr_send_iframe(struct sock *sk, struct sk_buff *skb)
105 {
106 if (skb == NULL)
107 return;
108
109 skb->data[2] = sk->nr->vs;
110 skb->data[3] = sk->nr->vr;
111
112 if (sk->nr->condition & OWN_RX_BUSY_CONDITION)
113 skb->data[4] |= NR_CHOKE_FLAG;
114
115 nr_transmit_buffer(sk, skb);
116 }
117
118 void nr_send_nak_frame(struct sock *sk)
119 {
120 struct sk_buff *skb, *skbn;
121
122 if ((skb = skb_peek(&sk->nr->ack_queue)) == NULL)
123 return;
124
125 if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL)
126 return;
127
128 skbn->data[2] = sk->nr->va;
129 skbn->data[3] = sk->nr->vr;
130
131 if (sk->nr->condition & OWN_RX_BUSY_CONDITION)
132 skbn->data[4] |= NR_CHOKE_FLAG;
133
134 nr_transmit_buffer(sk, skbn);
135
136 sk->nr->condition &= ~ACK_PENDING_CONDITION;
137 sk->nr->vl = sk->nr->vr;
138 sk->nr->t1timer = 0;
139 }
140
141 void nr_kick(struct sock *sk)
142 {
143 struct sk_buff *skb, *skbn;
144 int last = 1;
145 unsigned short start, end, next;
146
147 del_timer(&sk->timer);
148
149 start = (skb_peek(&sk->nr->ack_queue) == NULL) ? sk->nr->va : sk->nr->vs;
150 end = (sk->nr->va + sk->window) % NR_MODULUS;
151
152 if (!(sk->nr->condition & PEER_RX_BUSY_CONDITION) &&
153 start != end &&
154 skb_peek(&sk->write_queue) != NULL) {
155
156 sk->nr->vs = start;
157
158
159
160
161
162
163
164
165
166 skb = skb_dequeue(&sk->write_queue);
167
168 do {
169 if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) {
170 skb_queue_head(&sk->write_queue, skb);
171 return;
172 }
173
174 next = (sk->nr->vs + 1) % NR_MODULUS;
175 last = (next == end);
176
177
178
179
180 nr_send_iframe(sk, skbn);
181
182 sk->nr->vs = next;
183
184
185
186
187 skb_queue_tail(&sk->nr->ack_queue, skb);
188
189 } while (!last && (skb = skb_dequeue(&sk->write_queue)) != NULL);
190
191 sk->nr->vl = sk->nr->vr;
192 sk->nr->condition &= ~ACK_PENDING_CONDITION;
193
194 if (sk->nr->t1timer == 0) {
195 sk->nr->t1timer = sk->nr->t1 = nr_calculate_t1(sk);
196 }
197 }
198
199 nr_set_timer(sk);
200 }
201
202 void nr_transmit_buffer(struct sock *sk, struct sk_buff *skb)
203 {
204 unsigned char *dptr;
205
206
207
208
209 dptr = skb_push(skb, NR_NETWORK_LEN);
210
211 memcpy(dptr, &sk->nr->source_addr, AX25_ADDR_LEN);
212 dptr[6] &= ~LAPB_C;
213 dptr[6] &= ~LAPB_E;
214 dptr[6] |= SSSID_SPARE;
215 dptr += AX25_ADDR_LEN;
216
217 memcpy(dptr, &sk->nr->dest_addr, AX25_ADDR_LEN);
218 dptr[6] &= ~LAPB_C;
219 dptr[6] |= LAPB_E;
220 dptr[6] |= SSSID_SPARE;
221 dptr += AX25_ADDR_LEN;
222
223 *dptr++ = nr_default.ttl;
224
225 skb->arp = 1;
226
227 if (!nr_route_frame(skb, NULL)) {
228 kfree_skb(skb, FREE_WRITE);
229
230 sk->state = TCP_CLOSE;
231 sk->err = ENETUNREACH;
232 if (!sk->dead)
233 sk->state_change(sk);
234 sk->dead = 1;
235 }
236 }
237
238
239
240
241
242
243 void nr_establish_data_link(struct sock *sk)
244 {
245 sk->nr->condition = 0x00;
246 sk->nr->n2count = 0;
247
248 nr_write_internal(sk, NR_CONNREQ);
249
250 sk->nr->t2timer = 0;
251 sk->nr->t1timer = sk->nr->t1 = nr_calculate_t1(sk);
252 }
253
254
255
256
257 void nr_enquiry_response(struct sock *sk)
258 {
259 int frametype = NR_INFOACK;
260
261 if (sk->nr->condition & OWN_RX_BUSY_CONDITION) {
262 frametype |= NR_CHOKE_FLAG;
263 } else {
264 if (skb_peek(&sk->nr->reseq_queue) != NULL) {
265 frametype |= NR_NAK_FLAG;
266 }
267 }
268
269 nr_write_internal(sk, frametype);
270
271 sk->nr->vl = sk->nr->vr;
272 sk->nr->condition &= ~ACK_PENDING_CONDITION;
273 }
274
275 void nr_check_iframes_acked(struct sock *sk, unsigned short nr)
276 {
277 if (sk->nr->vs == nr) {
278 nr_frames_acked(sk, nr);
279 nr_calculate_rtt(sk);
280 sk->nr->t1timer = 0;
281 sk->nr->n2count = 0;
282 } else {
283 if (sk->nr->va != nr) {
284 nr_frames_acked(sk, nr);
285 sk->nr->t1timer = sk->nr->t1 = nr_calculate_t1(sk);
286 }
287 }
288 }
289
290 #endif