This source file includes following definitions.
- show_net_buffers
- skb_check
- skb_queue_head_init
- skb_queue_head
- skb_queue_tail
- skb_dequeue
- skb_insert
- skb_append
- skb_unlink
- kfree_skb
- alloc_skb
- kfree_skbmem
- skb_clone
- skb_device_lock
- skb_device_unlock
- dev_kfree_skb
- skb_device_locked
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24 #include <linux/config.h>
25 #include <linux/types.h>
26 #include <linux/kernel.h>
27 #include <linux/sched.h>
28 #include <asm/segment.h>
29 #include <asm/system.h>
30 #include <linux/mm.h>
31 #include <linux/interrupt.h>
32 #include <linux/in.h>
33 #include <linux/inet.h>
34 #include <linux/netdevice.h>
35 #include <net/ip.h>
36 #include <net/protocol.h>
37 #include <linux/string.h>
38 #include <net/route.h>
39 #include <net/tcp.h>
40 #include <net/udp.h>
41 #include <linux/skbuff.h>
42 #include <net/sock.h>
43
44
45
46
47
48
49 volatile unsigned long net_memory = 0;
50 volatile unsigned long net_skbcount = 0;
51 volatile unsigned long net_locked = 0;
52 volatile unsigned long net_allocs = 0;
53 volatile unsigned long net_fails = 0;
54 volatile unsigned long net_free_locked = 0;
55
56 void show_net_buffers(void)
57 {
58 printk("Networking buffers in use : %lu\n",net_skbcount);
59 printk("Memory committed to network buffers: %lu\n",net_memory);
60 printk("Network buffers locked by drivers : %lu\n",net_locked);
61 printk("Total network buffer allocations : %lu\n",net_allocs);
62 printk("Total failed network buffer allocs : %lu\n",net_fails);
63 printk("Total free while locked events : %lu\n",net_free_locked);
64 }
65
66 #if CONFIG_SKB_CHECK
67
68
69
70
71
72 int skb_check(struct sk_buff *skb, int head, int line, char *file)
73 {
74 if (head) {
75 if (skb->magic_debug_cookie != SK_HEAD_SKB) {
76 printk("File: %s Line %d, found a bad skb-head\n",
77 file,line);
78 return -1;
79 }
80 if (!skb->next || !skb->prev) {
81 printk("skb_check: head without next or prev\n");
82 return -1;
83 }
84 if (skb->next->magic_debug_cookie != SK_HEAD_SKB
85 && skb->next->magic_debug_cookie != SK_GOOD_SKB) {
86 printk("File: %s Line %d, bad next head-skb member\n",
87 file,line);
88 return -1;
89 }
90 if (skb->prev->magic_debug_cookie != SK_HEAD_SKB
91 && skb->prev->magic_debug_cookie != SK_GOOD_SKB) {
92 printk("File: %s Line %d, bad prev head-skb member\n",
93 file,line);
94 return -1;
95 }
96 #if 0
97 {
98 struct sk_buff *skb2 = skb->next;
99 int i = 0;
100 while (skb2 != skb && i < 5) {
101 if (skb_check(skb2, 0, line, file) < 0) {
102 printk("bad queue element in whole queue\n");
103 return -1;
104 }
105 i++;
106 skb2 = skb2->next;
107 }
108 }
109 #endif
110 return 0;
111 }
112 if (skb->next != NULL && skb->next->magic_debug_cookie != SK_HEAD_SKB
113 && skb->next->magic_debug_cookie != SK_GOOD_SKB) {
114 printk("File: %s Line %d, bad next skb member\n",
115 file,line);
116 return -1;
117 }
118 if (skb->prev != NULL && skb->prev->magic_debug_cookie != SK_HEAD_SKB
119 && skb->prev->magic_debug_cookie != SK_GOOD_SKB) {
120 printk("File: %s Line %d, bad prev skb member\n",
121 file,line);
122 return -1;
123 }
124
125
126 if(skb->magic_debug_cookie==SK_FREED_SKB)
127 {
128 printk("File: %s Line %d, found a freed skb lurking in the undergrowth!\n",
129 file,line);
130 printk("skb=%p, real size=%ld, claimed size=%ld, free=%d\n",
131 skb,skb->truesize,skb->mem_len,skb->free);
132 return -1;
133 }
134 if(skb->magic_debug_cookie!=SK_GOOD_SKB)
135 {
136 printk("File: %s Line %d, passed a non skb!\n", file,line);
137 printk("skb=%p, real size=%ld, claimed size=%ld, free=%d\n",
138 skb,skb->truesize,skb->mem_len,skb->free);
139 return -1;
140 }
141 if(skb->mem_len!=skb->truesize)
142 {
143 printk("File: %s Line %d, Dubious size setting!\n",file,line);
144 printk("skb=%p, real size=%ld, claimed size=%ld\n",
145 skb,skb->truesize,skb->mem_len);
146 return -1;
147 }
148
149 return 0;
150 }
151 #endif
152
153
154 #ifdef CONFIG_SKB_CHECK
155 void skb_queue_head_init(struct sk_buff_head *list)
156 {
157 list->prev = (struct sk_buff *)list;
158 list->next = (struct sk_buff *)list;
159 list->magic_debug_cookie = SK_HEAD_SKB;
160 }
161
162
163
164
165
166 void skb_queue_head(struct sk_buff_head *list_,struct sk_buff *newsk)
167 {
168 unsigned long flags;
169 struct sk_buff *list = (struct sk_buff *)list_;
170
171 save_flags(flags);
172 cli();
173
174 IS_SKB(newsk);
175 IS_SKB_HEAD(list);
176 if (newsk->next || newsk->prev)
177 printk("Suspicious queue head: sk_buff on list!\n");
178
179 newsk->next = list->next;
180 newsk->prev = list;
181
182 newsk->next->prev = newsk;
183 newsk->prev->next = newsk;
184
185 restore_flags(flags);
186 }
187
188
189
190
191 void skb_queue_tail(struct sk_buff_head *list_, struct sk_buff *newsk)
192 {
193 unsigned long flags;
194 struct sk_buff *list = (struct sk_buff *)list_;
195
196 save_flags(flags);
197 cli();
198
199 if (newsk->next || newsk->prev)
200 printk("Suspicious queue tail: sk_buff on list!\n");
201 IS_SKB(newsk);
202 IS_SKB_HEAD(list);
203
204 newsk->next = list;
205 newsk->prev = list->prev;
206
207 newsk->next->prev = newsk;
208 newsk->prev->next = newsk;
209
210 restore_flags(flags);
211 }
212
213
214
215
216
217
218 struct sk_buff *skb_dequeue(struct sk_buff_head *list_)
219 {
220 long flags;
221 struct sk_buff *result;
222 struct sk_buff *list = (struct sk_buff *)list_;
223
224 save_flags(flags);
225 cli();
226
227 IS_SKB_HEAD(list);
228
229 result = list->next;
230 if (result == list) {
231 restore_flags(flags);
232 return NULL;
233 }
234
235 result->next->prev = list;
236 list->next = result->next;
237
238 result->next = NULL;
239 result->prev = NULL;
240
241 restore_flags(flags);
242
243 IS_SKB(result);
244 return result;
245 }
246
247
248
249
250 void skb_insert(struct sk_buff *old, struct sk_buff *newsk)
251 {
252 unsigned long flags;
253
254 IS_SKB(old);
255 IS_SKB(newsk);
256
257 if(!old->next || !old->prev)
258 printk("insert before unlisted item!\n");
259 if(newsk->next || newsk->prev)
260 printk("inserted item is already on a list.\n");
261
262 save_flags(flags);
263 cli();
264 newsk->next = old;
265 newsk->prev = old->prev;
266 old->prev = newsk;
267 newsk->prev->next = newsk;
268
269 restore_flags(flags);
270 }
271
272
273
274
275 void skb_append(struct sk_buff *old, struct sk_buff *newsk)
276 {
277 unsigned long flags;
278
279 IS_SKB(old);
280 IS_SKB(newsk);
281
282 if(!old->next || !old->prev)
283 printk("append before unlisted item!\n");
284 if(newsk->next || newsk->prev)
285 printk("append item is already on a list.\n");
286
287 save_flags(flags);
288 cli();
289
290 newsk->prev = old;
291 newsk->next = old->next;
292 newsk->next->prev = newsk;
293 old->next = newsk;
294
295 restore_flags(flags);
296 }
297
298
299
300
301
302
303
304 void skb_unlink(struct sk_buff *skb)
305 {
306 unsigned long flags;
307
308 save_flags(flags);
309 cli();
310
311 IS_SKB(skb);
312
313 if(skb->prev && skb->next)
314 {
315 skb->next->prev = skb->prev;
316 skb->prev->next = skb->next;
317 skb->next = NULL;
318 skb->prev = NULL;
319 }
320 #ifdef PARANOID_BUGHUNT_MODE
321 else
322 printk("skb_unlink: not a linked element\n");
323 #endif
324 restore_flags(flags);
325 }
326
327 #endif
328
329
330
331
332
333
334 void kfree_skb(struct sk_buff *skb, int rw)
335 {
336 if (skb == NULL)
337 {
338 printk("kfree_skb: skb = NULL (from %p)\n",
339 __builtin_return_address(0));
340 return;
341 }
342 #ifdef CONFIG_SKB_CHECK
343 IS_SKB(skb);
344 #endif
345 if (skb->lock)
346 {
347 skb->free = 3;
348 net_free_locked++;
349 return;
350 }
351 if (skb->free == 2)
352 printk("Warning: kfree_skb passed an skb that nobody set the free flag on! (from %p)\n",
353 __builtin_return_address(0));
354 if (skb->next)
355 printk("Warning: kfree_skb passed an skb still on a list (from %p).\n",
356 __builtin_return_address(0));
357 if (skb->sk)
358 {
359 if(skb->sk->prot!=NULL)
360 {
361 if (rw)
362 skb->sk->prot->rfree(skb->sk, skb, skb->mem_len);
363 else
364 skb->sk->prot->wfree(skb->sk, skb, skb->mem_len);
365
366 }
367 else
368 {
369 unsigned long flags;
370
371 save_flags(flags);
372 cli();
373 if (rw)
374 skb->sk->rmem_alloc-=skb->mem_len;
375 else
376 skb->sk->wmem_alloc-=skb->mem_len;
377 restore_flags(flags);
378 if(!skb->sk->dead)
379 skb->sk->write_space(skb->sk);
380 kfree_skbmem(skb,skb->mem_len);
381 }
382 }
383 else
384 kfree_skbmem(skb, skb->mem_len);
385 }
386
387
388
389
390
391 struct sk_buff *alloc_skb(unsigned int size,int priority)
392 {
393 struct sk_buff *skb;
394 unsigned long flags;
395
396 if (intr_count && priority!=GFP_ATOMIC) {
397 static int count = 0;
398 if (++count < 5) {
399 printk("alloc_skb called nonatomically from interrupt %p\n",
400 __builtin_return_address(0));
401 priority = GFP_ATOMIC;
402 }
403 }
404
405 size+=sizeof(struct sk_buff);
406 skb=(struct sk_buff *)kmalloc(size,priority);
407 if (skb == NULL)
408 {
409 net_fails++;
410 return NULL;
411 }
412 #ifdef PARANOID_BUGHUNT_MODE
413 if(skb->magic_debug_cookie == SK_GOOD_SKB)
414 printk("Kernel kmalloc handed us an existing skb (%p)\n",skb);
415 #endif
416
417 net_allocs++;
418
419 skb->free = 2;
420 skb->lock = 0;
421 skb->pkt_type = PACKET_HOST;
422 skb->truesize = size;
423 skb->mem_len = size;
424 skb->mem_addr = skb;
425 #ifdef CONFIG_SLAVE_BALANCING
426 skb->in_dev_queue = 0;
427 #endif
428 skb->fraglist = NULL;
429 skb->prev = skb->next = NULL;
430 skb->link3 = NULL;
431 skb->sk = NULL;
432 skb->localroute=0;
433 skb->stamp.tv_sec=0;
434 skb->localroute = 0;
435 save_flags(flags);
436 cli();
437 net_memory += size;
438 net_skbcount++;
439 restore_flags(flags);
440 #if CONFIG_SKB_CHECK
441 skb->magic_debug_cookie = SK_GOOD_SKB;
442 #endif
443 skb->users = 0;
444 return skb;
445 }
446
447
448
449
450
451 void kfree_skbmem(struct sk_buff *skb,unsigned size)
452 {
453 unsigned long flags;
454 #ifdef CONFIG_SLAVE_BALANCING
455 save_flags(flags);
456 cli();
457 if(skb->in_dev_queue && skb->dev!=NULL)
458 skb->dev->pkt_queue--;
459 restore_flags(flags);
460 #endif
461 #ifdef CONFIG_SKB_CHECK
462 IS_SKB(skb);
463 if(size!=skb->truesize)
464 printk("kfree_skbmem: size mismatch.\n");
465
466 if(skb->magic_debug_cookie == SK_GOOD_SKB)
467 {
468 save_flags(flags);
469 cli();
470 IS_SKB(skb);
471 skb->magic_debug_cookie = SK_FREED_SKB;
472 kfree_s((void *)skb,size);
473 net_skbcount--;
474 net_memory -= size;
475 restore_flags(flags);
476 }
477 else
478 printk("kfree_skbmem: bad magic cookie\n");
479 #else
480 save_flags(flags);
481 cli();
482 kfree_s((void *)skb,size);
483 net_skbcount--;
484 net_memory -= size;
485 restore_flags(flags);
486 #endif
487 }
488
489
490
491
492
493
494 struct sk_buff *skb_clone(struct sk_buff *skb, int priority)
495 {
496 struct sk_buff *n;
497 unsigned long offset;
498
499 n=alloc_skb(skb->mem_len-sizeof(struct sk_buff),priority);
500 if(n==NULL)
501 return NULL;
502
503 offset=((char *)n)-((char *)skb);
504
505 memcpy(n->data,skb->data,skb->mem_len-sizeof(struct sk_buff));
506 n->len=skb->len;
507 n->link3=NULL;
508 n->sk=NULL;
509 n->when=skb->when;
510 n->dev=skb->dev;
511 n->h.raw=skb->h.raw+offset;
512 n->ip_hdr=(struct iphdr *)(((char *)skb->ip_hdr)+offset);
513 n->fraglen=skb->fraglen;
514 n->fraglist=skb->fraglist;
515 n->saddr=skb->saddr;
516 n->daddr=skb->daddr;
517 n->raddr=skb->raddr;
518 n->acked=skb->acked;
519 n->used=skb->used;
520 n->free=1;
521 n->arp=skb->arp;
522 n->tries=0;
523 n->lock=0;
524 n->users=0;
525 n->pkt_type=skb->pkt_type;
526 n->stamp=skb->stamp;
527 return n;
528 }
529
530
531
532
533
534
535 void skb_device_lock(struct sk_buff *skb)
536 {
537 if(skb->lock)
538 printk("double lock on device queue!\n");
539 else
540 net_locked++;
541 skb->lock++;
542 }
543
544 void skb_device_unlock(struct sk_buff *skb)
545 {
546 if(skb->lock==0)
547 printk("double unlock on device queue!\n");
548 skb->lock--;
549 if(skb->lock==0)
550 net_locked--;
551 }
552
553 void dev_kfree_skb(struct sk_buff *skb, int mode)
554 {
555 unsigned long flags;
556
557 save_flags(flags);
558 cli();
559 if(skb->lock==1)
560 net_locked--;
561
562 if (!--skb->lock && (skb->free == 1 || skb->free == 3))
563 {
564 restore_flags(flags);
565 kfree_skb(skb,mode);
566 }
567 else
568 restore_flags(flags);
569 }
570
571 int skb_device_locked(struct sk_buff *skb)
572 {
573 return skb->lock? 1 : 0;
574 }
575