1 /*
2 * tqueue.h --- task queue handling for Linux.
3 *
4 * Mostly based on a proposed bottom-half replacement code written by
5 * Kai Petzke, wpp@marie.physik.tu-berlin.de.
6 *
7 * Modified for use in the Linux kernel by Theodore Ts'o,
8 * tytso@mit.edu. Any bugs are my fault, not Kai's.
9 *
10 * The original comment follows below.
11 */
12
13 #ifndef _LINUX_TQUEUE_H
14 #define _LINUX_TQUEUE_H
15
16 #ifdef INCLUDE_INLINE_FUNCS
17 #define _INLINE_ extern
18 #else
19 #define _INLINE_ extern __inline__
20 #endif
21
22 /*
23 * New proposed "bottom half" handlers:
24 * (C) 1994 Kai Petzke, wpp@marie.physik.tu-berlin.de
25 *
26 * Advantages:
27 * - Bottom halfs are implemented as a linked list. You can have as many
28 * of them, as you want.
29 * - No more scanning of a bit field is required upon call of a bottom half.
30 * - Support for chained bottom half lists. The run_task_queue() function can be
31 * used as a bottom half handler. This is for example usefull for bottom
32 * halfs, which want to be delayed until the next clock tick.
33 *
34 * Problems:
35 * - The queue_task_irq() inline function is only atomic with respect to itself.
36 * Problems can occur, when queue_task_irq() is called from a normal system
37 * call, and an interrupt comes in. No problems occur, when queue_task_irq()
38 * is called from an interrupt or bottom half, and interrupted, as run_task_queue()
39 * will not be executed/continued before the last interrupt returns. If in
40 * doubt, use queue_task(), not queue_task_irq().
41 * - Bottom halfs are called in the reverse order that they were linked into
42 * the list.
43 */
44
45 struct tq_struct {
46 struct tq_struct *next; /* linked list of active bh's */
47 int sync; /* must be initialized to zero */
48 void (*routine)(void *); /* function to call */
49 void *data; /* argument to function */
50 };
51
52 typedef struct tq_struct * task_queue;
53
54 #define DECLARE_TASK_QUEUE(q) task_queue q = &tq_last
55
56 extern struct tq_struct tq_last;
57 extern task_queue tq_timer;
58
59 #ifdef INCLUDE_INLINE_FUNCS
60 struct tq_struct tq_last = {
61 &tq_last, 0, 0, 0
62 };
63 #endif
64
65 /*
66 * To implement your own list of active bottom halfs, use the following
67 * two definitions:
68 *
69 * struct tq_struct *my_bh = &tq_last;
70 * struct tq_struct run_my_bh = {
71 * 0, 0, (void *)(void *) run_task_queue, &my_bh
72 * };
73 *
74 * To activate a bottom half on your list, use:
75 *
76 * queue_task(tq_pointer, &my_bh);
77 *
78 * To run the bottom halfs on your list put them on the immediate list by:
79 *
80 * queue_task(&run_my_bh, &tq_immediate);
81 *
82 * This allows you to do deferred procession. For example, you could
83 * have a bottom half list tq_timer, which is marked active by the timer
84 * interrupt.
85 */
86
87 /*
88 * queue_task_irq: put the bottom half handler "bh_pointer" on the list
89 * "bh_list". You may call this function only from an interrupt
90 * handler or a bottom half handler.
91 */
92 _INLINE_ void queue_task_irq(struct tq_struct *bh_pointer,
/* ![[previous]](../icons/n_left.png)
![[next]](../icons/right.png)
![[first]](../icons/n_first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
93 task_queue *bh_list)
94 {
95 int l1;
96
97 __asm__ __volatile__ (
98 "btsl $0,%1\n\t" /* bottom half already marked? */
99 "jc 1f\n\t"
100 "leal %2,%3\n\t" /* address of the "next" field of bh_struct */
101 "xchgl %3,%0\n\t" /* link bottom half into list */
102 "movl %3,%2\n1:" /* save the pointer to next bottom half */
103 : "=m" (*bh_list), "=m" (bh_pointer -> sync), "=m" (bh_pointer -> next),
104 "=r" (l1) );
105 }
106
107 /*
108 * queue_task_irq_off: put the bottom half handler "bh_pointer" on the list
109 * "bh_list". You may call this function only when interrupts are off.
110 */
111 _INLINE_ void queue_task_irq_off(struct tq_struct *bh_pointer,
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
112 task_queue *bh_list)
113 {
114 int l1;
115
116 __asm__ __volatile__ (
117 "testl $1,%1\n\t" /* bottom half already marked? */
118 "jne 1f\n\t"
119 "movl $1,%1\n\t"
120 "leal %2,%3\n\t" /* address of the "next" field of bh_struct */
121 "xchgl %3,%0\n\t" /* link bottom half into list */
122 "movl %3,%2\n1:" /* save the pointer to next bottom half */
123 : "=m" (*bh_list), "=m" (bh_pointer -> sync), "=m" (bh_pointer -> next),
124 "=r" (l1) );
125 }
126
127
128 /*
129 * queue_task: as queue_task_irq, but can be called from anywhere.
130 */
131 _INLINE_ void queue_task(struct tq_struct *bh_pointer,
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
132 task_queue *bh_list)
133 {
134 int l1;
135
136 __asm__ __volatile__ (
137 "btsl $0,%1\n\t"
138 "jc 1f\n\t"
139 "leal %2,%3\n\t"
140 "pushfl\n\t" /* save interrupt flag */
141 "cli\n\t" /* turn off interrupts */
142 "xchgl %3,%0\n\t"
143 "movl %3,%2\n\t" /* now the linking step is really atomic! */
144 "popfl\n1:" /* restore interrupt flag */
145 : "=m" (*bh_list), "=m" (bh_pointer -> sync), "=m" (bh_pointer -> next),
146 "=r" (l1) );
147 }
148
149 /*
150 * Call all "bottom halfs" on a given list.
151 */
152 _INLINE_ void run_task_queue(task_queue *list)
/* ![[previous]](../icons/left.png)
![[next]](../icons/n_right.png)
![[first]](../icons/first.png)
![[last]](../icons/n_last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
153 {
154 register struct tq_struct *save_p;
155 register struct tq_struct *p;
156 void *arg;
157 void (*f) (void *);
158
159 while(1) {
160 p = &tq_last;
161 __asm__ __volatile__("xchgl %0,%2" : "=r" (p) :
162 "0" (p), "m" (*list) : "memory");
163 if(p == &tq_last)
164 break;
165
166 do {
167 arg = p -> data;
168 f = p -> routine;
169 save_p = p -> next;
170 p -> sync = 0;
171 (*f)(arg);
172 p = save_p;
173 } while(p != &tq_last);
174 }
175 }
176
177 #undef _INLINE_
178
179 #endif /* _LINUX_TQUEUE_H */