This source file includes following definitions.
- show_net_buffers
- skb_check
- skb_queue_head_init
- skb_queue_head
- skb_queue_tail
- skb_dequeue
- skb_insert
- skb_append
- skb_unlink
- kfree_skb
- alloc_skb
- kfree_skbmem
- skb_clone
- skb_device_lock
- skb_device_unlock
- dev_kfree_skb
- skb_device_locked
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22 #include <linux/config.h>
23 #include <linux/types.h>
24 #include <linux/kernel.h>
25 #include <linux/sched.h>
26 #include <asm/segment.h>
27 #include <asm/system.h>
28 #include <linux/mm.h>
29 #include <linux/interrupt.h>
30 #include <linux/in.h>
31 #include <linux/inet.h>
32 #include <linux/netdevice.h>
33 #include "ip.h"
34 #include "protocol.h"
35 #include <linux/string.h>
36 #include "route.h"
37 #include "tcp.h"
38 #include "udp.h"
39 #include <linux/skbuff.h>
40 #include "sock.h"
41
42
43
44
45
46
47 volatile unsigned long net_memory = 0;
48 volatile unsigned long net_skbcount = 0;
49 volatile unsigned long net_locked = 0;
50 volatile unsigned long net_allocs = 0;
51 volatile unsigned long net_fails = 0;
52 volatile unsigned long net_free_locked = 0;
53
54 void show_net_buffers(void)
55 {
56 printk("Networking buffers in use : %lu\n",net_skbcount);
57 printk("Memory committed to network buffers: %lu\n",net_memory);
58 printk("Network buffers locked by drivers : %lu\n",net_locked);
59 printk("Total network buffer allocations : %lu\n",net_allocs);
60 printk("Total failed network buffer allocs : %lu\n",net_fails);
61 printk("Total free while locked events : %lu\n",net_free_locked);
62 }
63
64 #if CONFIG_SKB_CHECK
65
66
67
68
69
70 int skb_check(struct sk_buff *skb, int head, int line, char *file)
71 {
72 if (head) {
73 if (skb->magic_debug_cookie != SK_HEAD_SKB) {
74 printk("File: %s Line %d, found a bad skb-head\n",
75 file,line);
76 return -1;
77 }
78 if (!skb->next || !skb->prev) {
79 printk("skb_check: head without next or prev\n");
80 return -1;
81 }
82 if (skb->next->magic_debug_cookie != SK_HEAD_SKB
83 && skb->next->magic_debug_cookie != SK_GOOD_SKB) {
84 printk("File: %s Line %d, bad next head-skb member\n",
85 file,line);
86 return -1;
87 }
88 if (skb->prev->magic_debug_cookie != SK_HEAD_SKB
89 && skb->prev->magic_debug_cookie != SK_GOOD_SKB) {
90 printk("File: %s Line %d, bad prev head-skb member\n",
91 file,line);
92 return -1;
93 }
94 #if 0
95 {
96 struct sk_buff *skb2 = skb->next;
97 int i = 0;
98 while (skb2 != skb && i < 5) {
99 if (skb_check(skb2, 0, line, file) < 0) {
100 printk("bad queue element in whole queue\n");
101 return -1;
102 }
103 i++;
104 skb2 = skb2->next;
105 }
106 }
107 #endif
108 return 0;
109 }
110 if (skb->next != NULL && skb->next->magic_debug_cookie != SK_HEAD_SKB
111 && skb->next->magic_debug_cookie != SK_GOOD_SKB) {
112 printk("File: %s Line %d, bad next skb member\n",
113 file,line);
114 return -1;
115 }
116 if (skb->prev != NULL && skb->prev->magic_debug_cookie != SK_HEAD_SKB
117 && skb->prev->magic_debug_cookie != SK_GOOD_SKB) {
118 printk("File: %s Line %d, bad prev skb member\n",
119 file,line);
120 return -1;
121 }
122
123
124 if(skb->magic_debug_cookie==SK_FREED_SKB)
125 {
126 printk("File: %s Line %d, found a freed skb lurking in the undergrowth!\n",
127 file,line);
128 printk("skb=%p, real size=%ld, claimed size=%ld, free=%d\n",
129 skb,skb->truesize,skb->mem_len,skb->free);
130 return -1;
131 }
132 if(skb->magic_debug_cookie!=SK_GOOD_SKB)
133 {
134 printk("File: %s Line %d, passed a non skb!\n", file,line);
135 printk("skb=%p, real size=%ld, claimed size=%ld, free=%d\n",
136 skb,skb->truesize,skb->mem_len,skb->free);
137 return -1;
138 }
139 if(skb->mem_len!=skb->truesize)
140 {
141 printk("File: %s Line %d, Dubious size setting!\n",file,line);
142 printk("skb=%p, real size=%ld, claimed size=%ld\n",
143 skb,skb->truesize,skb->mem_len);
144 return -1;
145 }
146
147 return 0;
148 }
149 #endif
150
151
152 #ifdef CONFIG_SKB_CHECK
153 void skb_queue_head_init(struct sk_buff_head *list)
154 {
155 list->prev = (struct sk_buff *)list;
156 list->next = (struct sk_buff *)list;
157 list->magic_debug_cookie = SK_HEAD_SKB;
158 }
159
160
161
162
163
164 void skb_queue_head(struct sk_buff_head *list_,struct sk_buff *newsk)
165 {
166 unsigned long flags;
167 struct sk_buff *list = (struct sk_buff *)list_;
168
169 save_flags(flags);
170 cli();
171
172 IS_SKB(newsk);
173 IS_SKB_HEAD(list);
174 if (newsk->next || newsk->prev)
175 printk("Suspicious queue head: sk_buff on list!\n");
176
177 newsk->next = list->next;
178 newsk->prev = list;
179
180 newsk->next->prev = newsk;
181 newsk->prev->next = newsk;
182
183 restore_flags(flags);
184 }
185
186
187
188
189 void skb_queue_tail(struct sk_buff_head *list_, struct sk_buff *newsk)
190 {
191 unsigned long flags;
192 struct sk_buff *list = (struct sk_buff *)list_;
193
194 save_flags(flags);
195 cli();
196
197 if (newsk->next || newsk->prev)
198 printk("Suspicious queue tail: sk_buff on list!\n");
199 IS_SKB(newsk);
200 IS_SKB_HEAD(list);
201
202 newsk->next = list;
203 newsk->prev = list->prev;
204
205 newsk->next->prev = newsk;
206 newsk->prev->next = newsk;
207
208 restore_flags(flags);
209 }
210
211
212
213
214
215
216 struct sk_buff *skb_dequeue(struct sk_buff_head *list_)
217 {
218 long flags;
219 struct sk_buff *result;
220 struct sk_buff *list = (struct sk_buff *)list_;
221
222 save_flags(flags);
223 cli();
224
225 IS_SKB_HEAD(list);
226
227 result = list->next;
228 if (result == list) {
229 restore_flags(flags);
230 return NULL;
231 }
232
233 result->next->prev = list;
234 list->next = result->next;
235
236 result->next = NULL;
237 result->prev = NULL;
238
239 restore_flags(flags);
240
241 IS_SKB(result);
242 return result;
243 }
244
245
246
247
248 void skb_insert(struct sk_buff *old, struct sk_buff *newsk)
249 {
250 unsigned long flags;
251
252 IS_SKB(old);
253 IS_SKB(newsk);
254
255 if(!old->next || !old->prev)
256 printk("insert before unlisted item!\n");
257 if(newsk->next || newsk->prev)
258 printk("inserted item is already on a list.\n");
259
260 save_flags(flags);
261 cli();
262 newsk->next = old;
263 newsk->prev = old->prev;
264 old->prev = newsk;
265 newsk->prev->next = newsk;
266
267 restore_flags(flags);
268 }
269
270
271
272
273 void skb_append(struct sk_buff *old, struct sk_buff *newsk)
274 {
275 unsigned long flags;
276
277 IS_SKB(old);
278 IS_SKB(newsk);
279
280 if(!old->next || !old->prev)
281 printk("append before unlisted item!\n");
282 if(newsk->next || newsk->prev)
283 printk("append item is already on a list.\n");
284
285 save_flags(flags);
286 cli();
287
288 newsk->prev = old;
289 newsk->next = old->next;
290 newsk->next->prev = newsk;
291 old->next = newsk;
292
293 restore_flags(flags);
294 }
295
296
297
298
299
300
301
302 void skb_unlink(struct sk_buff *skb)
303 {
304 unsigned long flags;
305
306 save_flags(flags);
307 cli();
308
309 IS_SKB(skb);
310
311 if(skb->prev && skb->next)
312 {
313 skb->next->prev = skb->prev;
314 skb->prev->next = skb->next;
315 skb->next = NULL;
316 skb->prev = NULL;
317 }
318 #ifdef PARANOID_BUGHUNT_MODE
319 else
320 printk("skb_unlink: not a linked element\n");
321 #endif
322 restore_flags(flags);
323 }
324
325 #endif
326
327
328
329
330
331
332 void kfree_skb(struct sk_buff *skb, int rw)
333 {
334 if (skb == NULL)
335 {
336 printk("kfree_skb: skb = NULL (from %p)\n",
337 __builtin_return_address(0));
338 return;
339 }
340 #ifdef CONFIG_SKB_CHECK
341 IS_SKB(skb);
342 #endif
343 if (skb->lock)
344 {
345 skb->free = 3;
346 net_free_locked++;
347 return;
348 }
349 if (skb->free == 2)
350 printk("Warning: kfree_skb passed an skb that nobody set the free flag on! (from %p)\n",
351 __builtin_return_address(0));
352 if (skb->next)
353 printk("Warning: kfree_skb passed an skb still on a list (from %p).\n",
354 __builtin_return_address(0));
355 if (skb->sk)
356 {
357 if(skb->sk->prot!=NULL)
358 {
359 if (rw)
360 skb->sk->prot->rfree(skb->sk, skb, skb->mem_len);
361 else
362 skb->sk->prot->wfree(skb->sk, skb, skb->mem_len);
363
364 }
365 else
366 {
367
368 if (rw)
369 skb->sk->rmem_alloc-=skb->mem_len;
370 else
371 skb->sk->wmem_alloc-=skb->mem_len;
372 if(!skb->sk->dead)
373 skb->sk->write_space(skb->sk);
374 kfree_skbmem(skb,skb->mem_len);
375 }
376 }
377 else
378 kfree_skbmem(skb, skb->mem_len);
379 }
380
381
382
383
384
385 struct sk_buff *alloc_skb(unsigned int size,int priority)
386 {
387 struct sk_buff *skb;
388 unsigned long flags;
389
390 if (intr_count && priority!=GFP_ATOMIC) {
391 static int count = 0;
392 if (++count < 5) {
393 printk("alloc_skb called nonatomically from interrupt %p\n",
394 __builtin_return_address(0));
395 priority = GFP_ATOMIC;
396 }
397 }
398
399 size+=sizeof(struct sk_buff);
400 skb=(struct sk_buff *)kmalloc(size,priority);
401 if (skb == NULL)
402 {
403 net_fails++;
404 return NULL;
405 }
406 #ifdef PARANOID_BUGHUNT_MODE
407 if(skb->magic_debug_cookie == SK_GOOD_SKB)
408 printk("Kernel kmalloc handed us an existing skb (%p)\n",skb);
409 #endif
410
411 net_allocs++;
412
413 skb->free = 2;
414 skb->lock = 0;
415 skb->pkt_type = PACKET_HOST;
416 skb->truesize = size;
417 skb->mem_len = size;
418 skb->mem_addr = skb;
419 #ifdef CONFIG_SLAVE_BALANCING
420 skb->in_dev_queue = 0;
421 #endif
422 skb->fraglist = NULL;
423 skb->prev = skb->next = NULL;
424 skb->link3 = NULL;
425 skb->sk = NULL;
426 skb->localroute=0;
427 skb->stamp.tv_sec=0;
428 skb->localroute = 0;
429 save_flags(flags);
430 cli();
431 net_memory += size;
432 net_skbcount++;
433 restore_flags(flags);
434 #if CONFIG_SKB_CHECK
435 skb->magic_debug_cookie = SK_GOOD_SKB;
436 #endif
437 skb->users = 0;
438 return skb;
439 }
440
441
442
443
444
445 void kfree_skbmem(struct sk_buff *skb,unsigned size)
446 {
447 unsigned long flags;
448 #ifdef CONFIG_SLAVE_BALANCING
449 save_flags(flags);
450 cli();
451 if(skb->in_dev_queue && skb->dev!=NULL)
452 skb->dev->pkt_queue--;
453 restore_flags(flags);
454 #endif
455 #ifdef CONFIG_SKB_CHECK
456 IS_SKB(skb);
457 if(size!=skb->truesize)
458 printk("kfree_skbmem: size mismatch.\n");
459
460 if(skb->magic_debug_cookie == SK_GOOD_SKB)
461 {
462 save_flags(flags);
463 cli();
464 IS_SKB(skb);
465 skb->magic_debug_cookie = SK_FREED_SKB;
466 kfree_s((void *)skb,size);
467 net_skbcount--;
468 net_memory -= size;
469 restore_flags(flags);
470 }
471 else
472 printk("kfree_skbmem: bad magic cookie\n");
473 #else
474 save_flags(flags);
475 cli();
476 kfree_s((void *)skb,size);
477 net_skbcount--;
478 net_memory -= size;
479 restore_flags(flags);
480 #endif
481 }
482
483
484
485
486
487
488 struct sk_buff *skb_clone(struct sk_buff *skb, int priority)
489 {
490 struct sk_buff *n;
491 unsigned long offset;
492
493 n=alloc_skb(skb->mem_len-sizeof(struct sk_buff),priority);
494 if(n==NULL)
495 return NULL;
496
497 offset=((char *)n)-((char *)skb);
498
499 memcpy(n->data,skb->data,skb->mem_len-sizeof(struct sk_buff));
500 n->len=skb->len;
501 n->link3=NULL;
502 n->sk=NULL;
503 n->when=skb->when;
504 n->dev=skb->dev;
505 n->h.raw=skb->h.raw+offset;
506 n->ip_hdr=(struct iphdr *)(((char *)skb->ip_hdr)+offset);
507 n->fraglen=skb->fraglen;
508 n->fraglist=skb->fraglist;
509 n->saddr=skb->saddr;
510 n->daddr=skb->daddr;
511 n->raddr=skb->raddr;
512 n->acked=skb->acked;
513 n->used=skb->used;
514 n->free=1;
515 n->arp=skb->arp;
516 n->tries=0;
517 n->lock=0;
518 n->users=0;
519 n->pkt_type=skb->pkt_type;
520 return n;
521 }
522
523
524
525
526
527
528 void skb_device_lock(struct sk_buff *skb)
529 {
530 if(skb->lock)
531 printk("double lock on device queue!\n");
532 else
533 net_locked++;
534 skb->lock++;
535 }
536
537 void skb_device_unlock(struct sk_buff *skb)
538 {
539 if(skb->lock==0)
540 printk("double unlock on device queue!\n");
541 skb->lock--;
542 if(skb->lock==0)
543 net_locked--;
544 }
545
546 void dev_kfree_skb(struct sk_buff *skb, int mode)
547 {
548 unsigned long flags;
549
550 save_flags(flags);
551 cli();
552 if(skb->lock==1)
553 net_locked--;
554
555 if (!--skb->lock && (skb->free == 1 || skb->free == 3))
556 {
557 restore_flags(flags);
558 kfree_skb(skb,mode);
559 }
560 else
561 restore_flags(flags);
562 }
563
564 int skb_device_locked(struct sk_buff *skb)
565 {
566 return skb->lock? 1 : 0;
567 }
568