This source file includes following definitions.
- math_state_restore
- math_emulate
- schedule
- sys_pause
- wake_up
- wake_up_interruptible
- __down
- __sleep_on
- interruptible_sleep_on
- sleep_on
- add_timer
- del_timer
- count_active_tasks
- calc_load
- second_overflow
- timer_bh
- tqueue_bh
- immediate_bh
- do_timer
- sys_alarm
- sys_getpid
- sys_getppid
- sys_getuid
- sys_geteuid
- sys_getgid
- sys_getegid
- sys_nice
- show_task
- show_state
- sched_init
1
2
3
4
5
6
7
8
9
10
11
12
13
14 #include <linux/config.h>
15 #include <linux/signal.h>
16 #include <linux/sched.h>
17 #include <linux/timer.h>
18 #include <linux/kernel.h>
19 #include <linux/kernel_stat.h>
20 #include <linux/fdreg.h>
21 #include <linux/errno.h>
22 #include <linux/time.h>
23 #include <linux/ptrace.h>
24 #include <linux/delay.h>
25 #include <linux/interrupt.h>
26 #include <linux/tqueue.h>
27 #include <linux/resource.h>
28
29 #include <asm/system.h>
30 #include <asm/io.h>
31 #include <asm/segment.h>
32
33 #define TIMER_IRQ 0
34
35 #include <linux/timex.h>
36
37
38
39
40 long tick = 1000000 / HZ;
41 volatile struct timeval xtime;
42 int tickadj = 500/HZ;
43
44 DECLARE_TASK_QUEUE(tq_timer);
45 DECLARE_TASK_QUEUE(tq_immediate);
46
47
48
49
50 int time_status = TIME_BAD;
51 long time_offset = 0;
52 long time_constant = 0;
53 long time_tolerance = MAXFREQ;
54 long time_precision = 1;
55 long time_maxerror = 0x70000000;
56 long time_esterror = 0x70000000;
57 long time_phase = 0;
58 long time_freq = 0;
59 long time_adj = 0;
60 long time_reftime = 0;
61
62 long time_adjust = 0;
63 long time_adjust_step = 0;
64
65 int need_resched = 0;
66 unsigned long event = 0;
67
68
69
70
71 char hard_math = 0;
72 char x86 = 0;
73 char x86_model = 0;
74 char x86_mask = 0;
75 int x86_capability = 0;
76 int fdiv_bug = 0;
77
78 char x86_vendor_id[13] = "Unknown";
79
80 char ignore_irq13 = 0;
81 char wp_works_ok = 0;
82 char hlt_works_ok = 1;
83
84
85
86
87 int EISA_bus = 0;
88
89 extern int _setitimer(int, struct itimerval *, struct itimerval *);
90 unsigned long * prof_buffer = NULL;
91 unsigned long prof_len = 0;
92
93 #define _S(nr) (1<<((nr)-1))
94
95 extern void mem_use(void);
96
97 extern int timer_interrupt(void);
98 asmlinkage int system_call(void);
99
100 static unsigned long init_kernel_stack[1024] = { STACK_MAGIC, };
101 static struct vm_area_struct init_mmap = INIT_MMAP;
102 struct task_struct init_task = INIT_TASK;
103
104 unsigned long volatile jiffies=0;
105
106 struct task_struct *current = &init_task;
107 struct task_struct *last_task_used_math = NULL;
108
109 struct task_struct * task[NR_TASKS] = {&init_task, };
110
111 long user_stack [ PAGE_SIZE>>2 ] = { STACK_MAGIC, };
112
113 struct {
114 long * a;
115 short b;
116 } stack_start = { & user_stack [PAGE_SIZE>>2] , KERNEL_DS };
117
118 struct kernel_stat kstat = { 0 };
119
120
121
122
123
124
125
126
127 asmlinkage void math_state_restore(void)
128 {
129 __asm__ __volatile__("clts");
130 if (last_task_used_math == current)
131 return;
132 timer_table[COPRO_TIMER].expires = jiffies+50;
133 timer_active |= 1<<COPRO_TIMER;
134 if (last_task_used_math)
135 __asm__("fnsave %0":"=m" (last_task_used_math->tss.i387));
136 else
137 __asm__("fnclex");
138 last_task_used_math = current;
139 if (current->used_math) {
140 __asm__("frstor %0": :"m" (current->tss.i387));
141 } else {
142 __asm__("fninit");
143 current->used_math=1;
144 }
145 timer_active &= ~(1<<COPRO_TIMER);
146 }
147
148 #ifndef CONFIG_MATH_EMULATION
149
150 asmlinkage void math_emulate(long arg)
151 {
152 printk("math-emulation not enabled and no coprocessor found.\n");
153 printk("killing %s.\n",current->comm);
154 send_sig(SIGFPE,current,1);
155 schedule();
156 }
157
158 #endif
159
160 unsigned long itimer_ticks = 0;
161 unsigned long itimer_next = ~0;
162
163
164
165
166
167
168
169
170
171
172
173
174
175 asmlinkage void schedule(void)
176 {
177 int c;
178 struct task_struct * p;
179 struct task_struct * next;
180 unsigned long ticks;
181
182
183
184 if (intr_count) {
185 printk("Aiee: scheduling in interrupt\n");
186 intr_count = 0;
187 }
188 cli();
189 ticks = itimer_ticks;
190 itimer_ticks = 0;
191 itimer_next = ~0;
192 sti();
193 need_resched = 0;
194 p = &init_task;
195 for (;;) {
196 if ((p = p->next_task) == &init_task)
197 goto confuse_gcc1;
198 if (ticks && p->it_real_value) {
199 if (p->it_real_value <= ticks) {
200 send_sig(SIGALRM, p, 1);
201 if (!p->it_real_incr) {
202 p->it_real_value = 0;
203 goto end_itimer;
204 }
205 do {
206 p->it_real_value += p->it_real_incr;
207 } while (p->it_real_value <= ticks);
208 }
209 p->it_real_value -= ticks;
210 if (p->it_real_value < itimer_next)
211 itimer_next = p->it_real_value;
212 }
213 end_itimer:
214 if (p->state != TASK_INTERRUPTIBLE)
215 continue;
216 if (p->signal & ~p->blocked) {
217 p->state = TASK_RUNNING;
218 continue;
219 }
220 if (p->timeout && p->timeout <= jiffies) {
221 p->timeout = 0;
222 p->state = TASK_RUNNING;
223 }
224 }
225 confuse_gcc1:
226
227
228 #if 0
229
230
231
232
233 if (TASK_UNINTERRUPTIBLE >= (unsigned) current->state &&
234 current->counter < current->priority*2) {
235 ++current->counter;
236 }
237 #endif
238 c = -1000;
239 next = p = &init_task;
240 for (;;) {
241 if ((p = p->next_task) == &init_task)
242 goto confuse_gcc2;
243 if (p->state == TASK_RUNNING && p->counter > c)
244 c = p->counter, next = p;
245 }
246 confuse_gcc2:
247 if (!c) {
248 for_each_task(p)
249 p->counter = (p->counter >> 1) + p->priority;
250 }
251 if (current == next)
252 return;
253 kstat.context_swtch++;
254 switch_to(next);
255
256 if(current->debugreg[7]){
257 loaddebug(0);
258 loaddebug(1);
259 loaddebug(2);
260 loaddebug(3);
261 loaddebug(6);
262 };
263 }
264
265 asmlinkage int sys_pause(void)
266 {
267 current->state = TASK_INTERRUPTIBLE;
268 schedule();
269 return -ERESTARTNOHAND;
270 }
271
272
273
274
275
276
277
278
279
280 void wake_up(struct wait_queue **q)
281 {
282 struct wait_queue *tmp;
283 struct task_struct * p;
284
285 if (!q || !(tmp = *q))
286 return;
287 do {
288 if ((p = tmp->task) != NULL) {
289 if ((p->state == TASK_UNINTERRUPTIBLE) ||
290 (p->state == TASK_INTERRUPTIBLE)) {
291 p->state = TASK_RUNNING;
292 if (p->counter > current->counter + 3)
293 need_resched = 1;
294 }
295 }
296 if (!tmp->next) {
297 printk("wait_queue is bad (eip = %p)\n",
298 __builtin_return_address(0));
299 printk(" q = %p\n",q);
300 printk(" *q = %p\n",*q);
301 printk(" tmp = %p\n",tmp);
302 break;
303 }
304 tmp = tmp->next;
305 } while (tmp != *q);
306 }
307
308 void wake_up_interruptible(struct wait_queue **q)
309 {
310 struct wait_queue *tmp;
311 struct task_struct * p;
312
313 if (!q || !(tmp = *q))
314 return;
315 do {
316 if ((p = tmp->task) != NULL) {
317 if (p->state == TASK_INTERRUPTIBLE) {
318 p->state = TASK_RUNNING;
319 if (p->counter > current->counter + 3)
320 need_resched = 1;
321 }
322 }
323 if (!tmp->next) {
324 printk("wait_queue is bad (eip = %p)\n",
325 __builtin_return_address(0));
326 printk(" q = %p\n",q);
327 printk(" *q = %p\n",*q);
328 printk(" tmp = %p\n",tmp);
329 break;
330 }
331 tmp = tmp->next;
332 } while (tmp != *q);
333 }
334
335 void __down(struct semaphore * sem)
336 {
337 struct wait_queue wait = { current, NULL };
338 add_wait_queue(&sem->wait, &wait);
339 current->state = TASK_UNINTERRUPTIBLE;
340 while (sem->count <= 0) {
341 schedule();
342 current->state = TASK_UNINTERRUPTIBLE;
343 }
344 current->state = TASK_RUNNING;
345 remove_wait_queue(&sem->wait, &wait);
346 }
347
348 static inline void __sleep_on(struct wait_queue **p, int state)
349 {
350 unsigned long flags;
351 struct wait_queue wait = { current, NULL };
352
353 if (!p)
354 return;
355 if (current == task[0])
356 panic("task[0] trying to sleep");
357 current->state = state;
358 add_wait_queue(p, &wait);
359 save_flags(flags);
360 sti();
361 schedule();
362 remove_wait_queue(p, &wait);
363 restore_flags(flags);
364 }
365
366 void interruptible_sleep_on(struct wait_queue **p)
367 {
368 __sleep_on(p,TASK_INTERRUPTIBLE);
369 }
370
371 void sleep_on(struct wait_queue **p)
372 {
373 __sleep_on(p,TASK_UNINTERRUPTIBLE);
374 }
375
376
377
378
379
380 static struct timer_list timer_head = { &timer_head, &timer_head, ~0, 0, NULL };
381 #define SLOW_BUT_DEBUGGING_TIMERS 1
382
383 void add_timer(struct timer_list * timer)
384 {
385 unsigned long flags;
386 struct timer_list *p;
387
388 #if SLOW_BUT_DEBUGGING_TIMERS
389 if (timer->next || timer->prev) {
390 printk("add_timer() called with non-zero list from %p\n",
391 __builtin_return_address(0));
392 return;
393 }
394 #endif
395 p = &timer_head;
396 timer->expires += jiffies;
397 save_flags(flags);
398 cli();
399 do {
400 p = p->next;
401 } while (timer->expires > p->expires);
402 timer->next = p;
403 timer->prev = p->prev;
404 p->prev = timer;
405 timer->prev->next = timer;
406 restore_flags(flags);
407 }
408
409 int del_timer(struct timer_list * timer)
410 {
411 unsigned long flags;
412 #if SLOW_BUT_DEBUGGING_TIMERS
413 struct timer_list * p;
414
415 p = &timer_head;
416 save_flags(flags);
417 cli();
418 while ((p = p->next) != &timer_head) {
419 if (p == timer) {
420 timer->next->prev = timer->prev;
421 timer->prev->next = timer->next;
422 timer->next = timer->prev = NULL;
423 restore_flags(flags);
424 timer->expires -= jiffies;
425 return 1;
426 }
427 }
428 if (timer->next || timer->prev)
429 printk("del_timer() called from %p with timer not initialized\n",
430 __builtin_return_address(0));
431 restore_flags(flags);
432 return 0;
433 #else
434 save_flags(flags);
435 cli();
436 if (timer->next) {
437 timer->next->prev = timer->prev;
438 timer->prev->next = timer->next;
439 timer->next = timer->prev = NULL;
440 restore_flags(flags);
441 timer->expires -= jiffies;
442 return 1;
443 }
444 restore_flags(flags);
445 return 0;
446 #endif
447 }
448
449 unsigned long timer_active = 0;
450 struct timer_struct timer_table[32];
451
452
453
454
455
456
457
458 unsigned long avenrun[3] = { 0,0,0 };
459
460
461
462
463 static unsigned long count_active_tasks(void)
464 {
465 struct task_struct **p;
466 unsigned long nr = 0;
467
468 for(p = &LAST_TASK; p > &FIRST_TASK; --p)
469 if (*p && ((*p)->state == TASK_RUNNING ||
470 (*p)->state == TASK_UNINTERRUPTIBLE ||
471 (*p)->state == TASK_SWAPPING))
472 nr += FIXED_1;
473 return nr;
474 }
475
476 static inline void calc_load(void)
477 {
478 unsigned long active_tasks;
479 static int count = LOAD_FREQ;
480
481 if (count-- > 0)
482 return;
483 count = LOAD_FREQ;
484 active_tasks = count_active_tasks();
485 CALC_LOAD(avenrun[0], EXP_1, active_tasks);
486 CALC_LOAD(avenrun[1], EXP_5, active_tasks);
487 CALC_LOAD(avenrun[2], EXP_15, active_tasks);
488 }
489
490
491
492
493
494
495
496
497
498
499
500 static void second_overflow(void)
501 {
502 long ltemp;
503
504 static long last_rtc_update=0;
505 extern int set_rtc_mmss(unsigned long);
506
507
508 time_maxerror = (0x70000000-time_maxerror < time_tolerance) ?
509 0x70000000 : (time_maxerror + time_tolerance);
510
511
512 if (time_offset < 0) {
513 ltemp = (-(time_offset+1) >> (SHIFT_KG + time_constant)) + 1;
514 time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
515 time_offset += (time_adj * HZ) >> (SHIFT_SCALE - SHIFT_UPDATE);
516 time_adj = - time_adj;
517 } else if (time_offset > 0) {
518 ltemp = ((time_offset-1) >> (SHIFT_KG + time_constant)) + 1;
519 time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
520 time_offset -= (time_adj * HZ) >> (SHIFT_SCALE - SHIFT_UPDATE);
521 } else {
522 time_adj = 0;
523 }
524
525 time_adj += (time_freq >> (SHIFT_KF + SHIFT_HZ - SHIFT_SCALE))
526 + FINETUNE;
527
528
529 switch (time_status) {
530 case TIME_INS:
531
532 if (xtime.tv_sec % 86400 == 0) {
533 xtime.tv_sec--;
534 time_status = TIME_OOP;
535 printk("Clock: inserting leap second 23:59:60 GMT\n");
536 }
537 break;
538
539 case TIME_DEL:
540
541 if (xtime.tv_sec % 86400 == 86399) {
542 xtime.tv_sec++;
543 time_status = TIME_OK;
544 printk("Clock: deleting leap second 23:59:59 GMT\n");
545 }
546 break;
547
548 case TIME_OOP:
549 time_status = TIME_OK;
550 break;
551 }
552 if (xtime.tv_sec > last_rtc_update + 660)
553 if (set_rtc_mmss(xtime.tv_sec) == 0)
554 last_rtc_update = xtime.tv_sec;
555 else
556 last_rtc_update = xtime.tv_sec - 600;
557 }
558
559
560
561
562 static void timer_bh(void * unused)
563 {
564 unsigned long mask;
565 struct timer_struct *tp;
566 struct timer_list * timer;
567
568 cli();
569 while ((timer = timer_head.next) != &timer_head && timer->expires < jiffies) {
570 void (*fn)(unsigned long) = timer->function;
571 unsigned long data = timer->data;
572 timer->next->prev = timer->prev;
573 timer->prev->next = timer->next;
574 timer->next = timer->prev = NULL;
575 sti();
576 fn(data);
577 cli();
578 }
579 sti();
580
581 for (mask = 1, tp = timer_table+0 ; mask ; tp++,mask += mask) {
582 if (mask > timer_active)
583 break;
584 if (!(mask & timer_active))
585 continue;
586 if (tp->expires > jiffies)
587 continue;
588 timer_active &= ~mask;
589 tp->fn();
590 sti();
591 }
592 }
593
594 void tqueue_bh(void * unused)
595 {
596 run_task_queue(&tq_timer);
597 }
598
599 void immediate_bh(void * unused)
600 {
601 run_task_queue(&tq_immediate);
602 }
603
604
605
606
607
608
609
610 static void do_timer(struct pt_regs * regs)
611 {
612 unsigned long mask;
613 struct timer_struct *tp;
614
615 long ltemp, psecs;
616
617
618
619
620 time_phase += time_adj;
621 if (time_phase < -FINEUSEC) {
622 ltemp = -time_phase >> SHIFT_SCALE;
623 time_phase += ltemp << SHIFT_SCALE;
624 xtime.tv_usec += tick + time_adjust_step - ltemp;
625 }
626 else if (time_phase > FINEUSEC) {
627 ltemp = time_phase >> SHIFT_SCALE;
628 time_phase -= ltemp << SHIFT_SCALE;
629 xtime.tv_usec += tick + time_adjust_step + ltemp;
630 } else
631 xtime.tv_usec += tick + time_adjust_step;
632
633 if (time_adjust)
634 {
635
636
637
638
639
640
641
642
643
644 if (time_adjust > tickadj)
645 time_adjust_step = tickadj;
646 else if (time_adjust < -tickadj)
647 time_adjust_step = -tickadj;
648 else
649 time_adjust_step = time_adjust;
650
651
652 time_adjust -= time_adjust_step;
653 }
654 else
655 time_adjust_step = 0;
656
657 if (xtime.tv_usec >= 1000000) {
658 xtime.tv_usec -= 1000000;
659 xtime.tv_sec++;
660 second_overflow();
661 }
662
663 jiffies++;
664 calc_load();
665 if ((VM_MASK & regs->eflags) || (3 & regs->cs)) {
666 current->utime++;
667 if (current != task[0]) {
668 if (current->priority < 15)
669 kstat.cpu_nice++;
670 else
671 kstat.cpu_user++;
672 }
673
674 if (current->it_virt_value && !(--current->it_virt_value)) {
675 current->it_virt_value = current->it_virt_incr;
676 send_sig(SIGVTALRM,current,1);
677 }
678 } else {
679 current->stime++;
680 if(current != task[0])
681 kstat.cpu_system++;
682 #ifdef CONFIG_PROFILE
683 if (prof_buffer && current != task[0]) {
684 unsigned long eip = regs->eip;
685 eip >>= CONFIG_PROFILE_SHIFT;
686 if (eip < prof_len)
687 prof_buffer[eip]++;
688 }
689 #endif
690 }
691
692
693
694 if ((current->rlim[RLIMIT_CPU].rlim_max != RLIM_INFINITY) &&
695 (((current->stime + current->utime) / HZ) >= current->rlim[RLIMIT_CPU].rlim_max))
696 send_sig(SIGKILL, current, 1);
697 if ((current->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) &&
698 (((current->stime + current->utime) % HZ) == 0)) {
699 psecs = (current->stime + current->utime) / HZ;
700
701 if (psecs == current->rlim[RLIMIT_CPU].rlim_cur)
702 send_sig(SIGXCPU, current, 1);
703
704 else if ((psecs > current->rlim[RLIMIT_CPU].rlim_cur) &&
705 ((psecs - current->rlim[RLIMIT_CPU].rlim_cur) % 5) == 0)
706 send_sig(SIGXCPU, current, 1);
707 }
708
709 if (current != task[0] && 0 > --current->counter) {
710 current->counter = 0;
711 need_resched = 1;
712 }
713
714 if (current->it_prof_value && !(--current->it_prof_value)) {
715 current->it_prof_value = current->it_prof_incr;
716 send_sig(SIGPROF,current,1);
717 }
718 for (mask = 1, tp = timer_table+0 ; mask ; tp++,mask += mask) {
719 if (mask > timer_active)
720 break;
721 if (!(mask & timer_active))
722 continue;
723 if (tp->expires > jiffies)
724 continue;
725 mark_bh(TIMER_BH);
726 }
727 cli();
728 itimer_ticks++;
729 if (itimer_ticks > itimer_next)
730 need_resched = 1;
731 if (timer_head.next->expires < jiffies)
732 mark_bh(TIMER_BH);
733 if (tq_timer != &tq_last)
734 mark_bh(TQUEUE_BH);
735 sti();
736 }
737
738 asmlinkage int sys_alarm(long seconds)
739 {
740 struct itimerval it_new, it_old;
741
742 it_new.it_interval.tv_sec = it_new.it_interval.tv_usec = 0;
743 it_new.it_value.tv_sec = seconds;
744 it_new.it_value.tv_usec = 0;
745 _setitimer(ITIMER_REAL, &it_new, &it_old);
746 return(it_old.it_value.tv_sec + (it_old.it_value.tv_usec / 1000000));
747 }
748
749 asmlinkage int sys_getpid(void)
750 {
751 return current->pid;
752 }
753
754 asmlinkage int sys_getppid(void)
755 {
756 return current->p_opptr->pid;
757 }
758
759 asmlinkage int sys_getuid(void)
760 {
761 return current->uid;
762 }
763
764 asmlinkage int sys_geteuid(void)
765 {
766 return current->euid;
767 }
768
769 asmlinkage int sys_getgid(void)
770 {
771 return current->gid;
772 }
773
774 asmlinkage int sys_getegid(void)
775 {
776 return current->egid;
777 }
778
779 asmlinkage int sys_nice(long increment)
780 {
781 int newprio;
782
783 if (increment < 0 && !suser())
784 return -EPERM;
785 newprio = current->priority - increment;
786 if (newprio < 1)
787 newprio = 1;
788 if (newprio > 35)
789 newprio = 35;
790 current->priority = newprio;
791 return 0;
792 }
793
794 static void show_task(int nr,struct task_struct * p)
795 {
796 unsigned long free;
797 static char * stat_nam[] = { "R", "S", "D", "Z", "T", "W" };
798
799 printk("%-8s %3d ", p->comm, (p == current) ? -nr : nr);
800 if (((unsigned) p->state) < sizeof(stat_nam)/sizeof(char *))
801 printk(stat_nam[p->state]);
802 else
803 printk(" ");
804 if (p == current)
805 printk(" current ");
806 else
807 printk(" %08lX ", ((unsigned long *)p->tss.esp)[3]);
808 for (free = 1; free < 1024 ; free++) {
809 if (((unsigned long *)p->kernel_stack_page)[free])
810 break;
811 }
812 printk("%5lu %5d %6d ", free << 2, p->pid, p->p_pptr->pid);
813 if (p->p_cptr)
814 printk("%5d ", p->p_cptr->pid);
815 else
816 printk(" ");
817 if (p->p_ysptr)
818 printk("%7d", p->p_ysptr->pid);
819 else
820 printk(" ");
821 if (p->p_osptr)
822 printk(" %5d\n", p->p_osptr->pid);
823 else
824 printk("\n");
825 }
826
827 void show_state(void)
828 {
829 int i;
830
831 printk(" free sibling\n");
832 printk(" task PC stack pid father child younger older\n");
833 for (i=0 ; i<NR_TASKS ; i++)
834 if (task[i])
835 show_task(i,task[i]);
836 }
837
838 void sched_init(void)
839 {
840 int i;
841 struct desc_struct * p;
842
843 bh_base[TIMER_BH].routine = timer_bh;
844 bh_base[TQUEUE_BH].routine = tqueue_bh;
845 bh_base[IMMEDIATE_BH].routine = immediate_bh;
846 if (sizeof(struct sigaction) != 16)
847 panic("Struct sigaction MUST be 16 bytes");
848 set_tss_desc(gdt+FIRST_TSS_ENTRY,&init_task.tss);
849 set_ldt_desc(gdt+FIRST_LDT_ENTRY,&default_ldt,1);
850 set_system_gate(0x80,&system_call);
851 p = gdt+2+FIRST_TSS_ENTRY;
852 for(i=1 ; i<NR_TASKS ; i++) {
853 task[i] = NULL;
854 p->a=p->b=0;
855 p++;
856 p->a=p->b=0;
857 p++;
858 }
859
860 __asm__("pushfl ; andl $0xffffbfff,(%esp) ; popfl");
861 load_TR(0);
862 load_ldt(0);
863 outb_p(0x34,0x43);
864 outb_p(LATCH & 0xff , 0x40);
865 outb(LATCH >> 8 , 0x40);
866 if (request_irq(TIMER_IRQ,(void (*)(int)) do_timer, 0, "timer") != 0)
867 panic("Could not allocate timer IRQ!");
868 }