This source file includes following definitions.
- add_to_runqueue
- del_from_runqueue
- wake_up_process
- process_timeout
- schedule
- sys_pause
- wake_up
- wake_up_interruptible
- __down
- __sleep_on
- interruptible_sleep_on
- sleep_on
- add_timer
- del_timer
- count_active_tasks
- calc_load
- second_overflow
- timer_bh
- tqueue_bh
- immediate_bh
- do_timer
- sys_alarm
- sys_getpid
- sys_getppid
- sys_getuid
- sys_geteuid
- sys_getgid
- sys_getegid
- sys_nice
- show_task
- show_state
- sched_init
1
2
3
4
5
6
7
8
9
10
11
12
13
14 #include <linux/config.h>
15 #include <linux/signal.h>
16 #include <linux/sched.h>
17 #include <linux/timer.h>
18 #include <linux/kernel.h>
19 #include <linux/kernel_stat.h>
20 #include <linux/fdreg.h>
21 #include <linux/errno.h>
22 #include <linux/time.h>
23 #include <linux/ptrace.h>
24 #include <linux/delay.h>
25 #include <linux/interrupt.h>
26 #include <linux/tqueue.h>
27 #include <linux/resource.h>
28 #include <linux/mm.h>
29
30 #include <asm/system.h>
31 #include <asm/io.h>
32 #include <asm/segment.h>
33 #include <asm/pgtable.h>
34
35 #define TIMER_IRQ 0
36
37 #include <linux/timex.h>
38
39
40
41
42 long tick = 1000000 / HZ;
43 volatile struct timeval xtime;
44 int tickadj = 500/HZ;
45
46 DECLARE_TASK_QUEUE(tq_timer);
47 DECLARE_TASK_QUEUE(tq_immediate);
48 DECLARE_TASK_QUEUE(tq_scheduler);
49
50
51
52
53 int time_state = TIME_BAD;
54 int time_status = STA_UNSYNC;
55 long time_offset = 0;
56 long time_constant = 0;
57 long time_tolerance = MAXFREQ;
58 long time_precision = 1;
59 long time_maxerror = 0x70000000;
60 long time_esterror = 0x70000000;
61 long time_phase = 0;
62 long time_freq = 0;
63 long time_adj = 0;
64 long time_reftime = 0;
65
66 long time_adjust = 0;
67 long time_adjust_step = 0;
68
69 int need_resched = 0;
70 unsigned long event = 0;
71
72 extern int _setitimer(int, struct itimerval *, struct itimerval *);
73 unsigned long * prof_buffer = NULL;
74 unsigned long prof_len = 0;
75 unsigned long prof_shift = 0;
76
77 #define _S(nr) (1<<((nr)-1))
78
79 extern void mem_use(void);
80
81 extern int timer_interrupt(void);
82
83 static unsigned long init_kernel_stack[1024] = { STACK_MAGIC, };
84 unsigned long init_user_stack[1024] = { STACK_MAGIC, };
85 static struct vm_area_struct init_mmap = INIT_MMAP;
86 static struct fs_struct init_fs = INIT_FS;
87 static struct files_struct init_files = INIT_FILES;
88 static struct signal_struct init_signals = INIT_SIGNALS;
89
90 struct mm_struct init_mm = INIT_MM;
91 struct task_struct init_task = INIT_TASK;
92
93 unsigned long volatile jiffies=0;
94
95 struct task_struct *current = &init_task;
96 struct task_struct *last_task_used_math = NULL;
97
98 struct task_struct * task[NR_TASKS] = {&init_task, };
99
100 struct kernel_stat kstat = { 0 };
101
102 static inline void add_to_runqueue(struct task_struct * p)
103 {
104 #if 1
105 if (p->next_run || p->prev_run) {
106 printk("task already on run-queue\n");
107 return;
108 }
109 #endif
110 if (p->counter > current->counter + 3)
111 need_resched = 1;
112 nr_running++;
113 (p->next_run = init_task.next_run)->prev_run = p;
114 p->prev_run = &init_task;
115 init_task.next_run = p;
116 }
117
118 static inline void del_from_runqueue(struct task_struct * p)
119 {
120 struct task_struct *next = p->next_run;
121 struct task_struct *prev = p->prev_run;
122
123 #if 1
124 if (!next || !prev) {
125 printk("task not on run-queue\n");
126 return;
127 }
128 #endif
129 if (p == &init_task) {
130 static int nr = 0;
131 if (nr < 5) {
132 nr++;
133 printk("idle task may not sleep\n");
134 }
135 return;
136 }
137 nr_running--;
138 next->prev_run = prev;
139 prev->next_run = next;
140 p->next_run = NULL;
141 p->prev_run = NULL;
142 }
143
144
145
146
147
148
149
150
151
152 inline void wake_up_process(struct task_struct * p)
153 {
154 unsigned long flags;
155
156 save_flags(flags);
157 cli();
158 p->state = TASK_RUNNING;
159 if (!p->next_run)
160 add_to_runqueue(p);
161 restore_flags(flags);
162 }
163
164 static void process_timeout(unsigned long __data)
165 {
166 struct task_struct * p = (struct task_struct *) __data;
167
168 p->timeout = 0;
169 wake_up_process(p);
170 }
171
172
173
174
175
176
177
178
179
180
181
182 asmlinkage void schedule(void)
183 {
184 int c;
185 struct task_struct * p;
186 struct task_struct * next;
187 unsigned long timeout = 0;
188
189
190
191 if (intr_count) {
192 printk("Aiee: scheduling in interrupt\n");
193 return;
194 }
195 run_task_queue(&tq_scheduler);
196
197 need_resched = 0;
198 cli();
199 switch (current->state) {
200 case TASK_INTERRUPTIBLE:
201 if (current->signal & ~current->blocked)
202 goto makerunnable;
203 timeout = current->timeout;
204 if (timeout && (timeout <= jiffies)) {
205 current->timeout = 0;
206 timeout = 0;
207 makerunnable:
208 current->state = TASK_RUNNING;
209 break;
210 }
211 default:
212 del_from_runqueue(current);
213 case TASK_RUNNING:
214 }
215 p = init_task.next_run;
216 sti();
217
218
219
220
221
222
223
224 c = -1000;
225 next = &init_task;
226 while (p != &init_task) {
227 if (p->counter > c)
228 c = p->counter, next = p;
229 p = p->next_run;
230 }
231
232
233 if (!c) {
234 for_each_task(p)
235 p->counter = (p->counter >> 1) + p->priority;
236 }
237 if (current != next) {
238 struct timer_list timer;
239
240 kstat.context_swtch++;
241 if (timeout) {
242 init_timer(&timer);
243 timer.expires = timeout;
244 timer.data = (unsigned long) current;
245 timer.function = process_timeout;
246 add_timer(&timer);
247 }
248 switch_to(next);
249 if (timeout)
250 del_timer(&timer);
251 }
252 }
253
254 asmlinkage int sys_pause(void)
255 {
256 current->state = TASK_INTERRUPTIBLE;
257 schedule();
258 return -ERESTARTNOHAND;
259 }
260
261
262
263
264
265
266
267
268
269 void wake_up(struct wait_queue **q)
270 {
271 struct wait_queue *tmp;
272 struct task_struct * p;
273
274 if (!q || !(tmp = *q))
275 return;
276 do {
277 if ((p = tmp->task) != NULL) {
278 if ((p->state == TASK_UNINTERRUPTIBLE) ||
279 (p->state == TASK_INTERRUPTIBLE))
280 wake_up_process(p);
281 }
282 if (!tmp->next) {
283 printk("wait_queue is bad (eip = %p)\n",
284 __builtin_return_address(0));
285 printk(" q = %p\n",q);
286 printk(" *q = %p\n",*q);
287 printk(" tmp = %p\n",tmp);
288 break;
289 }
290 tmp = tmp->next;
291 } while (tmp != *q);
292 }
293
294 void wake_up_interruptible(struct wait_queue **q)
295 {
296 struct wait_queue *tmp;
297 struct task_struct * p;
298
299 if (!q || !(tmp = *q))
300 return;
301 do {
302 if ((p = tmp->task) != NULL) {
303 if (p->state == TASK_INTERRUPTIBLE)
304 wake_up_process(p);
305 }
306 if (!tmp->next) {
307 printk("wait_queue is bad (eip = %p)\n",
308 __builtin_return_address(0));
309 printk(" q = %p\n",q);
310 printk(" *q = %p\n",*q);
311 printk(" tmp = %p\n",tmp);
312 break;
313 }
314 tmp = tmp->next;
315 } while (tmp != *q);
316 }
317
318 void __down(struct semaphore * sem)
319 {
320 struct wait_queue wait = { current, NULL };
321 add_wait_queue(&sem->wait, &wait);
322 current->state = TASK_UNINTERRUPTIBLE;
323 while (sem->count <= 0) {
324 schedule();
325 current->state = TASK_UNINTERRUPTIBLE;
326 }
327 current->state = TASK_RUNNING;
328 remove_wait_queue(&sem->wait, &wait);
329 }
330
331 static inline void __sleep_on(struct wait_queue **p, int state)
332 {
333 unsigned long flags;
334 struct wait_queue wait = { current, NULL };
335
336 if (!p)
337 return;
338 if (current == task[0])
339 panic("task[0] trying to sleep");
340 current->state = state;
341 add_wait_queue(p, &wait);
342 save_flags(flags);
343 sti();
344 schedule();
345 remove_wait_queue(p, &wait);
346 restore_flags(flags);
347 }
348
349 void interruptible_sleep_on(struct wait_queue **p)
350 {
351 __sleep_on(p,TASK_INTERRUPTIBLE);
352 }
353
354 void sleep_on(struct wait_queue **p)
355 {
356 __sleep_on(p,TASK_UNINTERRUPTIBLE);
357 }
358
359
360
361
362
363 static struct timer_list timer_head = { &timer_head, &timer_head, ~0, 0, NULL };
364 #define SLOW_BUT_DEBUGGING_TIMERS 1
365
366 void add_timer(struct timer_list * timer)
367 {
368 unsigned long flags;
369 struct timer_list *p;
370
371 #if SLOW_BUT_DEBUGGING_TIMERS
372 if (timer->next || timer->prev) {
373 printk("add_timer() called with non-zero list from %p\n",
374 __builtin_return_address(0));
375 return;
376 }
377 #endif
378 p = &timer_head;
379 save_flags(flags);
380 cli();
381 do {
382 p = p->next;
383 } while (timer->expires > p->expires);
384 timer->next = p;
385 timer->prev = p->prev;
386 p->prev = timer;
387 timer->prev->next = timer;
388 restore_flags(flags);
389 }
390
391 int del_timer(struct timer_list * timer)
392 {
393 unsigned long flags;
394 #if SLOW_BUT_DEBUGGING_TIMERS
395 struct timer_list * p;
396
397 p = &timer_head;
398 save_flags(flags);
399 cli();
400 while ((p = p->next) != &timer_head) {
401 if (p == timer) {
402 timer->next->prev = timer->prev;
403 timer->prev->next = timer->next;
404 timer->next = timer->prev = NULL;
405 restore_flags(flags);
406 return 1;
407 }
408 }
409 if (timer->next || timer->prev)
410 printk("del_timer() called from %p with timer not initialized\n",
411 __builtin_return_address(0));
412 restore_flags(flags);
413 return 0;
414 #else
415 save_flags(flags);
416 cli();
417 if (timer->next) {
418 timer->next->prev = timer->prev;
419 timer->prev->next = timer->next;
420 timer->next = timer->prev = NULL;
421 restore_flags(flags);
422 return 1;
423 }
424 restore_flags(flags);
425 return 0;
426 #endif
427 }
428
429 unsigned long timer_active = 0;
430 struct timer_struct timer_table[32];
431
432
433
434
435
436
437
438 unsigned long avenrun[3] = { 0,0,0 };
439
440
441
442
443 static unsigned long count_active_tasks(void)
444 {
445 struct task_struct **p;
446 unsigned long nr = 0;
447
448 for(p = &LAST_TASK; p > &FIRST_TASK; --p)
449 if (*p && ((*p)->state == TASK_RUNNING ||
450 (*p)->state == TASK_UNINTERRUPTIBLE ||
451 (*p)->state == TASK_SWAPPING))
452 nr += FIXED_1;
453 return nr;
454 }
455
456 static inline void calc_load(void)
457 {
458 unsigned long active_tasks;
459 static int count = LOAD_FREQ;
460
461 if (count-- > 0)
462 return;
463 count = LOAD_FREQ;
464 active_tasks = count_active_tasks();
465 CALC_LOAD(avenrun[0], EXP_1, active_tasks);
466 CALC_LOAD(avenrun[1], EXP_5, active_tasks);
467 CALC_LOAD(avenrun[2], EXP_15, active_tasks);
468 }
469
470
471
472
473
474
475
476
477
478
479 static void second_overflow(void)
480 {
481 long ltemp;
482
483
484 time_maxerror = (0x70000000-time_maxerror <
485 time_tolerance >> SHIFT_USEC) ?
486 0x70000000 : (time_maxerror + (time_tolerance >> SHIFT_USEC));
487
488
489
490
491
492
493
494
495
496
497 switch (time_state) {
498
499 case TIME_OK:
500 if (time_status & STA_INS)
501 time_state = TIME_INS;
502 else if (time_status & STA_DEL)
503 time_state = TIME_DEL;
504 break;
505
506 case TIME_INS:
507 if (xtime.tv_sec % 86400 == 0) {
508 xtime.tv_sec--;
509 time_state = TIME_OOP;
510 printk("Clock: inserting leap second 23:59:60 UTC\n");
511 }
512 break;
513
514 case TIME_DEL:
515 if ((xtime.tv_sec + 1) % 86400 == 0) {
516 xtime.tv_sec++;
517 time_state = TIME_WAIT;
518 printk("Clock: deleting leap second 23:59:59 UTC\n");
519 }
520 break;
521
522 case TIME_OOP:
523 time_state = TIME_WAIT;
524 break;
525
526 case TIME_WAIT:
527 if (!(time_status & (STA_INS | STA_DEL)))
528 time_state = TIME_OK;
529 }
530
531
532
533
534
535
536
537
538
539
540 if (time_offset < 0) {
541 ltemp = -time_offset;
542 if (!(time_status & STA_FLL))
543 ltemp >>= SHIFT_KG + time_constant;
544 if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE)
545 ltemp = (MAXPHASE / MINSEC) <<
546 SHIFT_UPDATE;
547 time_offset += ltemp;
548 time_adj = -ltemp << (SHIFT_SCALE - SHIFT_HZ -
549 SHIFT_UPDATE);
550 } else {
551 ltemp = time_offset;
552 if (!(time_status & STA_FLL))
553 ltemp >>= SHIFT_KG + time_constant;
554 if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE)
555 ltemp = (MAXPHASE / MINSEC) <<
556 SHIFT_UPDATE;
557 time_offset -= ltemp;
558 time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ -
559 SHIFT_UPDATE);
560 }
561
562
563
564
565
566
567
568
569 pps_valid++;
570 if (pps_valid == PPS_VALID) {
571 pps_jitter = MAXTIME;
572 pps_stabil = MAXFREQ;
573 time_status &= ~(STA_PPSSIGNAL | STA_PPSJITTER |
574 STA_PPSWANDER | STA_PPSERROR);
575 }
576 ltemp = time_freq + pps_freq;
577 if (ltemp < 0)
578 time_adj -= -ltemp >>
579 (SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE);
580 else
581 time_adj += ltemp >>
582 (SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE);
583
584
585 if (time_adj < 0)
586 time_adj -= -time_adj >> 2;
587 else
588 time_adj += time_adj >> 2;
589 }
590
591
592
593
594 static void timer_bh(void * unused)
595 {
596 unsigned long mask;
597 struct timer_struct *tp;
598 struct timer_list * timer;
599
600 cli();
601 while ((timer = timer_head.next) != &timer_head && timer->expires <= jiffies) {
602 void (*fn)(unsigned long) = timer->function;
603 unsigned long data = timer->data;
604 timer->next->prev = timer->prev;
605 timer->prev->next = timer->next;
606 timer->next = timer->prev = NULL;
607 sti();
608 fn(data);
609 cli();
610 }
611 sti();
612
613 for (mask = 1, tp = timer_table+0 ; mask ; tp++,mask += mask) {
614 if (mask > timer_active)
615 break;
616 if (!(mask & timer_active))
617 continue;
618 if (tp->expires > jiffies)
619 continue;
620 timer_active &= ~mask;
621 tp->fn();
622 sti();
623 }
624 }
625
626 void tqueue_bh(void * unused)
627 {
628 run_task_queue(&tq_timer);
629 }
630
631 void immediate_bh(void * unused)
632 {
633 run_task_queue(&tq_immediate);
634 }
635
636
637
638
639
640
641
642 static void do_timer(int irq, struct pt_regs * regs)
643 {
644 unsigned long mask;
645 struct timer_struct *tp;
646
647 static long last_rtc_update=0;
648 extern int set_rtc_mmss(unsigned long);
649
650 long ltemp, psecs;
651
652
653
654
655 time_phase += time_adj;
656 if (time_phase <= -FINEUSEC) {
657 ltemp = -time_phase >> SHIFT_SCALE;
658 time_phase += ltemp << SHIFT_SCALE;
659 xtime.tv_usec += tick + time_adjust_step - ltemp;
660 }
661 else if (time_phase >= FINEUSEC) {
662 ltemp = time_phase >> SHIFT_SCALE;
663 time_phase -= ltemp << SHIFT_SCALE;
664 xtime.tv_usec += tick + time_adjust_step + ltemp;
665 } else
666 xtime.tv_usec += tick + time_adjust_step;
667
668 if (time_adjust)
669 {
670
671
672
673
674
675
676
677
678
679 if (time_adjust > tickadj)
680 time_adjust_step = tickadj;
681 else if (time_adjust < -tickadj)
682 time_adjust_step = -tickadj;
683 else
684 time_adjust_step = time_adjust;
685
686
687 time_adjust -= time_adjust_step;
688 }
689 else
690 time_adjust_step = 0;
691
692 if (xtime.tv_usec >= 1000000) {
693 xtime.tv_usec -= 1000000;
694 xtime.tv_sec++;
695 second_overflow();
696 }
697
698
699
700
701
702 if (time_state != TIME_BAD && xtime.tv_sec > last_rtc_update + 660 &&
703 xtime.tv_usec > 500000 - (tick >> 1) &&
704 xtime.tv_usec < 500000 + (tick >> 1))
705 if (set_rtc_mmss(xtime.tv_sec) == 0)
706 last_rtc_update = xtime.tv_sec;
707 else
708 last_rtc_update = xtime.tv_sec - 600;
709
710 jiffies++;
711 calc_load();
712 if (user_mode(regs)) {
713 current->utime++;
714 if (current != task[0]) {
715 if (current->priority < 15)
716 kstat.cpu_nice++;
717 else
718 kstat.cpu_user++;
719 }
720
721 if (current->it_virt_value && !(--current->it_virt_value)) {
722 current->it_virt_value = current->it_virt_incr;
723 send_sig(SIGVTALRM,current,1);
724 }
725 } else {
726 current->stime++;
727 if(current != task[0])
728 kstat.cpu_system++;
729 if (prof_buffer && current != task[0]) {
730 extern int _stext;
731 unsigned long ip = instruction_pointer(regs);
732 ip -= (unsigned long) &_stext;
733 ip >>= prof_shift;
734 if (ip < prof_len)
735 prof_buffer[ip]++;
736 }
737 }
738
739
740
741 if ((current->rlim[RLIMIT_CPU].rlim_max != RLIM_INFINITY) &&
742 (((current->stime + current->utime) / HZ) >= current->rlim[RLIMIT_CPU].rlim_max))
743 send_sig(SIGKILL, current, 1);
744 if ((current->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) &&
745 (((current->stime + current->utime) % HZ) == 0)) {
746 psecs = (current->stime + current->utime) / HZ;
747
748 if (psecs == current->rlim[RLIMIT_CPU].rlim_cur)
749 send_sig(SIGXCPU, current, 1);
750
751 else if ((psecs > current->rlim[RLIMIT_CPU].rlim_cur) &&
752 ((psecs - current->rlim[RLIMIT_CPU].rlim_cur) % 5) == 0)
753 send_sig(SIGXCPU, current, 1);
754 }
755
756 if (current != task[0] && 0 > --current->counter) {
757 current->counter = 0;
758 need_resched = 1;
759 }
760
761 if (current->it_prof_value && !(--current->it_prof_value)) {
762 current->it_prof_value = current->it_prof_incr;
763 send_sig(SIGPROF,current,1);
764 }
765 for (mask = 1, tp = timer_table+0 ; mask ; tp++,mask += mask) {
766 if (mask > timer_active)
767 break;
768 if (!(mask & timer_active))
769 continue;
770 if (tp->expires > jiffies)
771 continue;
772 mark_bh(TIMER_BH);
773 }
774 cli();
775 if (timer_head.next->expires <= jiffies)
776 mark_bh(TIMER_BH);
777 if (tq_timer != &tq_last)
778 mark_bh(TQUEUE_BH);
779 sti();
780 }
781
782 asmlinkage unsigned int sys_alarm(unsigned int seconds)
783 {
784 struct itimerval it_new, it_old;
785 unsigned int oldalarm;
786
787 it_new.it_interval.tv_sec = it_new.it_interval.tv_usec = 0;
788 it_new.it_value.tv_sec = seconds;
789 it_new.it_value.tv_usec = 0;
790 _setitimer(ITIMER_REAL, &it_new, &it_old);
791 oldalarm = it_old.it_value.tv_sec;
792
793
794 if (it_old.it_value.tv_usec)
795 oldalarm++;
796 return oldalarm;
797 }
798
799 asmlinkage int sys_getpid(void)
800 {
801 return current->pid;
802 }
803
804 asmlinkage int sys_getppid(void)
805 {
806 return current->p_opptr->pid;
807 }
808
809 asmlinkage int sys_getuid(void)
810 {
811 return current->uid;
812 }
813
814 asmlinkage int sys_geteuid(void)
815 {
816 return current->euid;
817 }
818
819 asmlinkage int sys_getgid(void)
820 {
821 return current->gid;
822 }
823
824 asmlinkage int sys_getegid(void)
825 {
826 return current->egid;
827 }
828
829 asmlinkage int sys_nice(long increment)
830 {
831 int newprio;
832
833 if (increment < 0 && !suser())
834 return -EPERM;
835 newprio = current->priority - increment;
836 if (newprio < 1)
837 newprio = 1;
838 if (newprio > 35)
839 newprio = 35;
840 current->priority = newprio;
841 return 0;
842 }
843
844 static void show_task(int nr,struct task_struct * p)
845 {
846 unsigned long free;
847 static const char * stat_nam[] = { "R", "S", "D", "Z", "T", "W" };
848
849 printk("%-8s %3d ", p->comm, (p == current) ? -nr : nr);
850 if (((unsigned) p->state) < sizeof(stat_nam)/sizeof(char *))
851 printk(stat_nam[p->state]);
852 else
853 printk(" ");
854 #if ((~0UL) == 0xffffffff)
855 if (p == current)
856 printk(" current ");
857 else
858 printk(" %08lX ", thread_saved_pc(&p->tss));
859 #else
860 if (p == current)
861 printk(" current task ");
862 else
863 printk(" %016lx ", thread_saved_pc(&p->tss));
864 #endif
865 for (free = 1; free < PAGE_SIZE/sizeof(long) ; free++) {
866 if (((unsigned long *)p->kernel_stack_page)[free])
867 break;
868 }
869 printk("%5lu %5d %6d ", free*sizeof(long), p->pid, p->p_pptr->pid);
870 if (p->p_cptr)
871 printk("%5d ", p->p_cptr->pid);
872 else
873 printk(" ");
874 if (p->p_ysptr)
875 printk("%7d", p->p_ysptr->pid);
876 else
877 printk(" ");
878 if (p->p_osptr)
879 printk(" %5d\n", p->p_osptr->pid);
880 else
881 printk("\n");
882 }
883
884 void show_state(void)
885 {
886 int i;
887
888 #if ((~0UL) == 0xffffffff)
889 printk("\n"
890 " free sibling\n");
891 printk(" task PC stack pid father child younger older\n");
892 #else
893 printk("\n"
894 " free sibling\n");
895 printk(" task PC stack pid father child younger older\n");
896 #endif
897 for (i=0 ; i<NR_TASKS ; i++)
898 if (task[i])
899 show_task(i,task[i]);
900 }
901
902 void sched_init(void)
903 {
904 bh_base[TIMER_BH].routine = timer_bh;
905 bh_base[TQUEUE_BH].routine = tqueue_bh;
906 bh_base[IMMEDIATE_BH].routine = immediate_bh;
907 if (request_irq(TIMER_IRQ, do_timer, 0, "timer") != 0)
908 panic("Could not allocate timer IRQ!");
909 enable_bh(TIMER_BH);
910 enable_bh(TQUEUE_BH);
911 enable_bh(IMMEDIATE_BH);
912 }