1 /*
2 * tqueue.h --- task queue handling for Linux.
3 *
4 * Mostly based on a proposed bottom-half replacement code written by
5 * Kai Petzke, wpp@marie.physik.tu-berlin.de.
6 *
7 * Modified for use in the Linux kernel by Theodore Ts'o,
8 * tytso@mit.edu. Any bugs are my fault, not Kai's.
9 *
10 * The original comment follows below.
11 */
12
13 #ifndef _LINUX_TQUEUE_H
14 #define _LINUX_TQUEUE_H
15
16 #include <asm/bitops.h>
17 #include <asm/system.h>
18
19 #ifdef INCLUDE_INLINE_FUNCS
20 #define _INLINE_ extern
21 #else
22 #define _INLINE_ extern __inline__
23 #endif
24
25 /*
26 * New proposed "bottom half" handlers:
27 * (C) 1994 Kai Petzke, wpp@marie.physik.tu-berlin.de
28 *
29 * Advantages:
30 * - Bottom halfs are implemented as a linked list. You can have as many
31 * of them, as you want.
32 * - No more scanning of a bit field is required upon call of a bottom half.
33 * - Support for chained bottom half lists. The run_task_queue() function can be
34 * used as a bottom half handler. This is for example useful for bottom
35 * halfs, which want to be delayed until the next clock tick.
36 *
37 * Problems:
38 * - The queue_task_irq() inline function is only atomic with respect to itself.
39 * Problems can occur, when queue_task_irq() is called from a normal system
40 * call, and an interrupt comes in. No problems occur, when queue_task_irq()
41 * is called from an interrupt or bottom half, and interrupted, as run_task_queue()
42 * will not be executed/continued before the last interrupt returns. If in
43 * doubt, use queue_task(), not queue_task_irq().
44 * - Bottom halfs are called in the reverse order that they were linked into
45 * the list.
46 */
47
48 struct tq_struct {
49 struct tq_struct *next; /* linked list of active bh's */
50 int sync; /* must be initialized to zero */
51 void (*routine)(void *); /* function to call */
52 void *data; /* argument to function */
53 };
54
55 typedef struct tq_struct * task_queue;
56
57 #define DECLARE_TASK_QUEUE(q) task_queue q = &tq_last
58
59 extern struct tq_struct tq_last;
60 extern task_queue tq_timer, tq_immediate, tq_scheduler;
61
62 #ifdef INCLUDE_INLINE_FUNCS
63 struct tq_struct tq_last = {
64 &tq_last, 0, 0, 0
65 };
66 #endif
67
68 /*
69 * To implement your own list of active bottom halfs, use the following
70 * two definitions:
71 *
72 * struct tq_struct *my_bh = &tq_last;
73 * struct tq_struct run_my_bh = {
74 * 0, 0, (void *)(void *) run_task_queue, &my_bh
75 * };
76 *
77 * To activate a bottom half on your list, use:
78 *
79 * queue_task(tq_pointer, &my_bh);
80 *
81 * To run the bottom halfs on your list put them on the immediate list by:
82 *
83 * queue_task(&run_my_bh, &tq_immediate);
84 *
85 * This allows you to do deferred procession. For example, you could
86 * have a bottom half list tq_timer, which is marked active by the timer
87 * interrupt.
88 */
89
90 /*
91 * queue_task_irq: put the bottom half handler "bh_pointer" on the list
92 * "bh_list". You may call this function only from an interrupt
93 * handler or a bottom half handler.
94 */
95 _INLINE_ void queue_task_irq(struct tq_struct *bh_pointer,
/* ![[previous]](../icons/n_left.png)
![[next]](../icons/right.png)
![[first]](../icons/n_first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
96 task_queue *bh_list)
97 {
98 if (!set_bit(0,&bh_pointer->sync)) {
99 bh_pointer->next = *bh_list;
100 *bh_list = bh_pointer;
101 }
102 }
103
104 /*
105 * queue_task_irq_off: put the bottom half handler "bh_pointer" on the list
106 * "bh_list". You may call this function only when interrupts are off.
107 */
108 _INLINE_ void queue_task_irq_off(struct tq_struct *bh_pointer,
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
109 task_queue *bh_list)
110 {
111 if (!(bh_pointer->sync & 1)) {
112 bh_pointer->sync = 1;
113 bh_pointer->next = *bh_list;
114 *bh_list = bh_pointer;
115 }
116 }
117
118
119 /*
120 * queue_task: as queue_task_irq, but can be called from anywhere.
121 */
122 _INLINE_ void queue_task(struct tq_struct *bh_pointer,
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
123 task_queue *bh_list)
124 {
125 if (!set_bit(0,&bh_pointer->sync)) {
126 unsigned long flags;
127 save_flags(flags);
128 cli();
129 bh_pointer->next = *bh_list;
130 *bh_list = bh_pointer;
131 restore_flags(flags);
132 }
133 }
134
135 /*
136 * Call all "bottom halfs" on a given list.
137 */
138 _INLINE_ void run_task_queue(task_queue *list)
/* ![[previous]](../icons/left.png)
![[next]](../icons/n_right.png)
![[first]](../icons/first.png)
![[last]](../icons/n_last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
139 {
140 register struct tq_struct *save_p;
141 register struct tq_struct *p;
142 void *arg;
143 void (*f) (void *);
144
145 while(1) {
146 p = xchg_ptr(list,&tq_last);
147 if(p == &tq_last)
148 break;
149
150 do {
151 arg = p -> data;
152 f = p -> routine;
153 save_p = p -> next;
154 p -> sync = 0;
155 (*f)(arg);
156 p = save_p;
157 } while(p != &tq_last);
158 }
159 }
160
161 #undef _INLINE_
162
163 #endif /* _LINUX_TQUEUE_H */