This source file includes following definitions.
- sys_ni_syscall
- math_state_restore
- math_emulate
- schedule
- sys_pause
- wake_up
- wake_up_interruptible
- __down
- __sleep_on
- interruptible_sleep_on
- sleep_on
- add_timer
- del_timer
- count_active_tasks
- calc_load
- second_overflow
- timer_bh
- do_timer
- sys_alarm
- sys_getpid
- sys_getppid
- sys_getuid
- sys_geteuid
- sys_getgid
- sys_getegid
- sys_nice
- show_task
- show_state
- sched_init
1
2
3
4
5
6
7
8
9
10
11
12
13
14 #include <linux/config.h>
15 #include <linux/signal.h>
16 #include <linux/sched.h>
17 #include <linux/timer.h>
18 #include <linux/kernel.h>
19 #include <linux/kernel_stat.h>
20 #include <linux/sys.h>
21 #include <linux/fdreg.h>
22 #include <linux/errno.h>
23 #include <linux/time.h>
24 #include <linux/ptrace.h>
25 #include <linux/segment.h>
26 #include <linux/delay.h>
27 #include <linux/interrupt.h>
28
29 #include <asm/system.h>
30 #include <asm/io.h>
31 #include <asm/segment.h>
32
33 #define TIMER_IRQ 0
34
35 #include <linux/timex.h>
36
37
38
39
40 long tick = 1000000 / HZ;
41 volatile struct timeval xtime;
42 int tickadj = 500/HZ;
43
44
45
46
47 int time_status = TIME_BAD;
48 long time_offset = 0;
49 long time_constant = 0;
50 long time_tolerance = MAXFREQ;
51 long time_precision = 1;
52 long time_maxerror = 0x70000000;
53 long time_esterror = 0x70000000;
54 long time_phase = 0;
55 long time_freq = 0;
56 long time_adj = 0;
57 long time_reftime = 0;
58
59 long time_adjust = 0;
60 long time_adjust_step = 0;
61
62 int need_resched = 0;
63
64
65
66
67 int hard_math = 0;
68 int x86 = 0;
69 int ignore_irq13 = 0;
70 int wp_works_ok = 0;
71
72
73
74
75 int EISA_bus = 0;
76
77 extern int _setitimer(int, struct itimerval *, struct itimerval *);
78 unsigned long * prof_buffer = NULL;
79 unsigned long prof_len = 0;
80
81 #define _S(nr) (1<<((nr)-1))
82
83 extern void mem_use(void);
84
85 extern int timer_interrupt(void);
86 asmlinkage int system_call(void);
87
88 static unsigned long init_kernel_stack[1024] = { STACK_MAGIC, };
89 struct task_struct init_task = INIT_TASK;
90
91 unsigned long volatile jiffies=0;
92
93 struct task_struct *current = &init_task;
94 struct task_struct *last_task_used_math = NULL;
95
96 struct task_struct * task[NR_TASKS] = {&init_task, };
97
98 long user_stack [ PAGE_SIZE>>2 ] = { STACK_MAGIC, };
99
100 struct {
101 long * a;
102 short b;
103 } stack_start = { & user_stack [PAGE_SIZE>>2] , KERNEL_DS };
104
105 struct kernel_stat kstat =
106 { 0, 0, 0, { 0, 0, 0, 0 }, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
107
108
109
110
111
112 #ifdef __cplusplus
113 extern "C" {
114 #endif
115
116 int sys_ni_syscall(void)
117 {
118 return -EINVAL;
119 }
120
121 fn_ptr sys_call_table[] = { sys_setup, sys_exit, sys_fork, sys_read,
122 sys_write, sys_open, sys_close, sys_waitpid, sys_creat, sys_link,
123 sys_unlink, sys_execve, sys_chdir, sys_time, sys_mknod, sys_chmod,
124 sys_chown, sys_break, sys_stat, sys_lseek, sys_getpid, sys_mount,
125 sys_umount, sys_setuid, sys_getuid, sys_stime, sys_ptrace, sys_alarm,
126 sys_fstat, sys_pause, sys_utime, sys_stty, sys_gtty, sys_access,
127 sys_nice, sys_ftime, sys_sync, sys_kill, sys_rename, sys_mkdir,
128 sys_rmdir, sys_dup, sys_pipe, sys_times, sys_prof, sys_brk, sys_setgid,
129 sys_getgid, sys_signal, sys_geteuid, sys_getegid, sys_acct, sys_phys,
130 sys_lock, sys_ioctl, sys_fcntl, sys_mpx, sys_setpgid, sys_ulimit,
131 sys_olduname, sys_umask, sys_chroot, sys_ustat, sys_dup2, sys_getppid,
132 sys_getpgrp, sys_setsid, sys_sigaction, sys_sgetmask, sys_ssetmask,
133 sys_setreuid,sys_setregid, sys_sigsuspend, sys_sigpending,
134 sys_sethostname, sys_setrlimit, sys_getrlimit, sys_getrusage,
135 sys_gettimeofday, sys_settimeofday, sys_getgroups, sys_setgroups,
136 sys_select, sys_symlink, sys_lstat, sys_readlink, sys_uselib,
137 sys_swapon, sys_reboot, sys_readdir, sys_mmap, sys_munmap, sys_truncate,
138 sys_ftruncate, sys_fchmod, sys_fchown, sys_getpriority, sys_setpriority,
139 sys_profil, sys_statfs, sys_fstatfs, sys_ioperm, sys_socketcall,
140 sys_syslog, sys_setitimer, sys_getitimer, sys_newstat, sys_newlstat,
141 sys_newfstat, sys_uname, sys_iopl, sys_vhangup, sys_idle, sys_vm86,
142 sys_wait4, sys_swapoff, sys_sysinfo, sys_ipc, sys_fsync, sys_sigreturn,
143 sys_clone, sys_setdomainname, sys_newuname, sys_modify_ldt,
144 sys_adjtimex, sys_mprotect, sys_sigprocmask, sys_create_module,
145 sys_init_module, sys_delete_module, sys_get_kernel_syms, sys_quotactl,
146 sys_getpgid, sys_fchdir, sys_bdflush };
147
148
149 int NR_syscalls = sizeof(sys_call_table)/sizeof(fn_ptr);
150
151 #ifdef __cplusplus
152 }
153 #endif
154
155
156
157
158
159
160
161
162 asmlinkage void math_state_restore(void)
163 {
164 __asm__ __volatile__("clts");
165 if (last_task_used_math == current)
166 return;
167 timer_table[COPRO_TIMER].expires = jiffies+50;
168 timer_active |= 1<<COPRO_TIMER;
169 if (last_task_used_math)
170 __asm__("fnsave %0":"=m" (last_task_used_math->tss.i387));
171 else
172 __asm__("fnclex");
173 last_task_used_math = current;
174 if (current->used_math) {
175 __asm__("frstor %0": :"m" (current->tss.i387));
176 } else {
177 __asm__("fninit");
178 current->used_math=1;
179 }
180 timer_active &= ~(1<<COPRO_TIMER);
181 }
182
183 #ifndef CONFIG_MATH_EMULATION
184
185 asmlinkage void math_emulate(long arg)
186 {
187 printk("math-emulation not enabled and no coprocessor found.\n");
188 printk("killing %s.\n",current->comm);
189 send_sig(SIGFPE,current,1);
190 schedule();
191 }
192
193 #endif
194
195 unsigned long itimer_ticks = 0;
196 unsigned long itimer_next = ~0;
197 static unsigned long lost_ticks = 0;
198
199
200
201
202
203
204
205
206
207
208
209
210
211 asmlinkage void schedule(void)
212 {
213 int c;
214 struct task_struct * p;
215 struct task_struct * next;
216 unsigned long ticks;
217
218
219
220 if (intr_count) {
221 printk("Aiee: scheduling in interrupt\n");
222 intr_count = 0;
223 }
224 cli();
225 ticks = itimer_ticks;
226 itimer_ticks = 0;
227 itimer_next = ~0;
228 sti();
229 need_resched = 0;
230 p = &init_task;
231 for (;;) {
232 if ((p = p->next_task) == &init_task)
233 goto confuse_gcc1;
234 if (ticks && p->it_real_value) {
235 if (p->it_real_value <= ticks) {
236 send_sig(SIGALRM, p, 1);
237 if (!p->it_real_incr) {
238 p->it_real_value = 0;
239 goto end_itimer;
240 }
241 do {
242 p->it_real_value += p->it_real_incr;
243 } while (p->it_real_value <= ticks);
244 }
245 p->it_real_value -= ticks;
246 if (p->it_real_value < itimer_next)
247 itimer_next = p->it_real_value;
248 }
249 end_itimer:
250 if (p->state != TASK_INTERRUPTIBLE)
251 continue;
252 if (p->signal & ~p->blocked) {
253 p->state = TASK_RUNNING;
254 continue;
255 }
256 if (p->timeout && p->timeout <= jiffies) {
257 p->timeout = 0;
258 p->state = TASK_RUNNING;
259 }
260 }
261 confuse_gcc1:
262
263
264 #if 0
265
266
267
268
269 if (TASK_UNINTERRUPTIBLE >= (unsigned) current->state &&
270 current->counter < current->priority*2) {
271 ++current->counter;
272 }
273 #endif
274 c = -1;
275 next = p = &init_task;
276 for (;;) {
277 if ((p = p->next_task) == &init_task)
278 goto confuse_gcc2;
279 if (p->state == TASK_RUNNING && p->counter > c)
280 c = p->counter, next = p;
281 }
282 confuse_gcc2:
283 if (!c) {
284 for_each_task(p)
285 p->counter = (p->counter >> 1) + p->priority;
286 }
287 if(current != next)
288 kstat.context_swtch++;
289 switch_to(next);
290
291 if(current->debugreg[7]){
292 loaddebug(0);
293 loaddebug(1);
294 loaddebug(2);
295 loaddebug(3);
296 loaddebug(6);
297 };
298 }
299
300 asmlinkage int sys_pause(void)
301 {
302 current->state = TASK_INTERRUPTIBLE;
303 schedule();
304 return -ERESTARTNOHAND;
305 }
306
307
308
309
310
311
312
313
314
315 void wake_up(struct wait_queue **q)
316 {
317 struct wait_queue *tmp;
318 struct task_struct * p;
319
320 if (!q || !(tmp = *q))
321 return;
322 do {
323 if ((p = tmp->task) != NULL) {
324 if ((p->state == TASK_UNINTERRUPTIBLE) ||
325 (p->state == TASK_INTERRUPTIBLE)) {
326 p->state = TASK_RUNNING;
327 if (p->counter > current->counter)
328 need_resched = 1;
329 }
330 }
331 if (!tmp->next) {
332 printk("wait_queue is bad (eip = %08lx)\n",((unsigned long *) q)[-1]);
333 printk(" q = %p\n",q);
334 printk(" *q = %p\n",*q);
335 printk(" tmp = %p\n",tmp);
336 break;
337 }
338 tmp = tmp->next;
339 } while (tmp != *q);
340 }
341
342 void wake_up_interruptible(struct wait_queue **q)
343 {
344 struct wait_queue *tmp;
345 struct task_struct * p;
346
347 if (!q || !(tmp = *q))
348 return;
349 do {
350 if ((p = tmp->task) != NULL) {
351 if (p->state == TASK_INTERRUPTIBLE) {
352 p->state = TASK_RUNNING;
353 if (p->counter > current->counter)
354 need_resched = 1;
355 }
356 }
357 if (!tmp->next) {
358 printk("wait_queue is bad (eip = %08lx)\n",((unsigned long *) q)[-1]);
359 printk(" q = %p\n",q);
360 printk(" *q = %p\n",*q);
361 printk(" tmp = %p\n",tmp);
362 break;
363 }
364 tmp = tmp->next;
365 } while (tmp != *q);
366 }
367
368 void __down(struct semaphore * sem)
369 {
370 struct wait_queue wait = { current, NULL };
371 add_wait_queue(&sem->wait, &wait);
372 current->state = TASK_UNINTERRUPTIBLE;
373 while (sem->count <= 0) {
374 schedule();
375 current->state = TASK_UNINTERRUPTIBLE;
376 }
377 current->state = TASK_RUNNING;
378 remove_wait_queue(&sem->wait, &wait);
379 }
380
381 static inline void __sleep_on(struct wait_queue **p, int state)
382 {
383 unsigned long flags;
384 struct wait_queue wait = { current, NULL };
385
386 if (!p)
387 return;
388 if (current == task[0])
389 panic("task[0] trying to sleep");
390 current->state = state;
391 add_wait_queue(p, &wait);
392 save_flags(flags);
393 sti();
394 schedule();
395 remove_wait_queue(p, &wait);
396 restore_flags(flags);
397 }
398
399 void interruptible_sleep_on(struct wait_queue **p)
400 {
401 __sleep_on(p,TASK_INTERRUPTIBLE);
402 }
403
404 void sleep_on(struct wait_queue **p)
405 {
406 __sleep_on(p,TASK_UNINTERRUPTIBLE);
407 }
408
409 static struct timer_list * next_timer = NULL;
410
411 void add_timer(struct timer_list * timer)
412 {
413 unsigned long flags;
414 struct timer_list ** p;
415
416 if (!timer)
417 return;
418 timer->next = NULL;
419 p = &next_timer;
420 save_flags(flags);
421 cli();
422 while (*p) {
423 if ((*p)->expires > timer->expires) {
424 (*p)->expires -= timer->expires;
425 timer->next = *p;
426 break;
427 }
428 timer->expires -= (*p)->expires;
429 p = &(*p)->next;
430 }
431 *p = timer;
432 restore_flags(flags);
433 }
434
435 int del_timer(struct timer_list * timer)
436 {
437 unsigned long flags;
438 unsigned long expires = 0;
439 struct timer_list **p;
440
441 p = &next_timer;
442 save_flags(flags);
443 cli();
444 while (*p) {
445 if (*p == timer) {
446 if ((*p = timer->next) != NULL)
447 (*p)->expires += timer->expires;
448 timer->expires += expires;
449 restore_flags(flags);
450 return 1;
451 }
452 expires += (*p)->expires;
453 p = &(*p)->next;
454 }
455 restore_flags(flags);
456 return 0;
457 }
458
459 unsigned long timer_active = 0;
460 struct timer_struct timer_table[32];
461
462
463
464
465
466
467
468 unsigned long avenrun[3] = { 0,0,0 };
469
470
471
472
473 static unsigned long count_active_tasks(void)
474 {
475 struct task_struct **p;
476 unsigned long nr = 0;
477
478 for(p = &LAST_TASK; p > &FIRST_TASK; --p)
479 if (*p && ((*p)->state == TASK_RUNNING ||
480 (*p)->state == TASK_UNINTERRUPTIBLE ||
481 (*p)->state == TASK_SWAPPING))
482 nr += FIXED_1;
483 return nr;
484 }
485
486 static inline void calc_load(void)
487 {
488 unsigned long active_tasks;
489 static int count = LOAD_FREQ;
490
491 if (count-- > 0)
492 return;
493 count = LOAD_FREQ;
494 active_tasks = count_active_tasks();
495 CALC_LOAD(avenrun[0], EXP_1, active_tasks);
496 CALC_LOAD(avenrun[1], EXP_5, active_tasks);
497 CALC_LOAD(avenrun[2], EXP_15, active_tasks);
498 }
499
500
501
502
503
504
505
506
507
508
509
510 static void second_overflow(void)
511 {
512 long ltemp;
513
514 static long last_rtc_update=0;
515 extern int set_rtc_mmss(unsigned long);
516
517
518 time_maxerror = (0x70000000-time_maxerror < time_tolerance) ?
519 0x70000000 : (time_maxerror + time_tolerance);
520
521
522 if (time_offset < 0) {
523 ltemp = (-(time_offset+1) >> (SHIFT_KG + time_constant)) + 1;
524 time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
525 time_offset += (time_adj * HZ) >> (SHIFT_SCALE - SHIFT_UPDATE);
526 time_adj = - time_adj;
527 } else if (time_offset > 0) {
528 ltemp = ((time_offset-1) >> (SHIFT_KG + time_constant)) + 1;
529 time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
530 time_offset -= (time_adj * HZ) >> (SHIFT_SCALE - SHIFT_UPDATE);
531 } else {
532 time_adj = 0;
533 }
534
535 time_adj += (time_freq >> (SHIFT_KF + SHIFT_HZ - SHIFT_SCALE))
536 + FINETUNE;
537
538
539 switch (time_status) {
540 case TIME_INS:
541
542 if (xtime.tv_sec % 86400 == 0) {
543 xtime.tv_sec--;
544 time_status = TIME_OOP;
545 printk("Clock: inserting leap second 23:59:60 GMT\n");
546 }
547 break;
548
549 case TIME_DEL:
550
551 if (xtime.tv_sec % 86400 == 86399) {
552 xtime.tv_sec++;
553 time_status = TIME_OK;
554 printk("Clock: deleting leap second 23:59:59 GMT\n");
555 }
556 break;
557
558 case TIME_OOP:
559 time_status = TIME_OK;
560 break;
561 }
562 if (xtime.tv_sec > last_rtc_update + 660)
563 if (set_rtc_mmss(xtime.tv_sec) == 0)
564 last_rtc_update = xtime.tv_sec;
565 }
566
567
568
569
570 static void timer_bh(void * unused)
571 {
572 unsigned long mask;
573 struct timer_struct *tp;
574
575 cli();
576 while (next_timer && next_timer->expires == 0) {
577 void (*fn)(unsigned long) = next_timer->function;
578 unsigned long data = next_timer->data;
579 next_timer = next_timer->next;
580 sti();
581 fn(data);
582 cli();
583 }
584 sti();
585
586 for (mask = 1, tp = timer_table+0 ; mask ; tp++,mask += mask) {
587 if (mask > timer_active)
588 break;
589 if (!(mask & timer_active))
590 continue;
591 if (tp->expires > jiffies)
592 continue;
593 timer_active &= ~mask;
594 tp->fn();
595 sti();
596 }
597 }
598
599
600
601
602
603
604
605 static void do_timer(struct pt_regs * regs)
606 {
607 unsigned long mask;
608 struct timer_struct *tp;
609
610 long ltemp;
611
612
613
614
615 time_phase += time_adj;
616 if (time_phase < -FINEUSEC) {
617 ltemp = -time_phase >> SHIFT_SCALE;
618 time_phase += ltemp << SHIFT_SCALE;
619 xtime.tv_usec += tick + time_adjust_step - ltemp;
620 }
621 else if (time_phase > FINEUSEC) {
622 ltemp = time_phase >> SHIFT_SCALE;
623 time_phase -= ltemp << SHIFT_SCALE;
624 xtime.tv_usec += tick + time_adjust_step + ltemp;
625 } else
626 xtime.tv_usec += tick + time_adjust_step;
627
628 if (time_adjust)
629 {
630
631
632
633
634
635
636
637
638
639 if (time_adjust > tickadj)
640 time_adjust_step = tickadj;
641 else if (time_adjust < -tickadj)
642 time_adjust_step = -tickadj;
643 else
644 time_adjust_step = time_adjust;
645
646
647 time_adjust -= time_adjust_step;
648 }
649 else
650 time_adjust_step = 0;
651
652 if (xtime.tv_usec >= 1000000) {
653 xtime.tv_usec -= 1000000;
654 xtime.tv_sec++;
655 second_overflow();
656 }
657
658 jiffies++;
659 calc_load();
660 if ((VM_MASK & regs->eflags) || (3 & regs->cs)) {
661 current->utime++;
662 if (current != task[0]) {
663 if (current->priority < 15)
664 kstat.cpu_nice++;
665 else
666 kstat.cpu_user++;
667 }
668
669 if (current->it_virt_value && !(--current->it_virt_value)) {
670 current->it_virt_value = current->it_virt_incr;
671 send_sig(SIGVTALRM,current,1);
672 }
673 } else {
674 current->stime++;
675 if(current != task[0])
676 kstat.cpu_system++;
677 #ifdef CONFIG_PROFILE
678 if (prof_buffer && current != task[0]) {
679 unsigned long eip = regs->eip;
680 eip >>= 2;
681 if (eip < prof_len)
682 prof_buffer[eip]++;
683 }
684 #endif
685 }
686 if (current == task[0] || (--current->counter)<=0) {
687 current->counter=0;
688 need_resched = 1;
689 }
690
691 if (current->it_prof_value && !(--current->it_prof_value)) {
692 current->it_prof_value = current->it_prof_incr;
693 send_sig(SIGPROF,current,1);
694 }
695 for (mask = 1, tp = timer_table+0 ; mask ; tp++,mask += mask) {
696 if (mask > timer_active)
697 break;
698 if (!(mask & timer_active))
699 continue;
700 if (tp->expires > jiffies)
701 continue;
702 mark_bh(TIMER_BH);
703 }
704 cli();
705 itimer_ticks++;
706 if (itimer_ticks > itimer_next)
707 need_resched = 1;
708 if (next_timer) {
709 if (next_timer->expires) {
710 next_timer->expires--;
711 if (!next_timer->expires)
712 mark_bh(TIMER_BH);
713 } else {
714 lost_ticks++;
715 mark_bh(TIMER_BH);
716 }
717 }
718 sti();
719 }
720
721 asmlinkage int sys_alarm(long seconds)
722 {
723 struct itimerval it_new, it_old;
724
725 it_new.it_interval.tv_sec = it_new.it_interval.tv_usec = 0;
726 it_new.it_value.tv_sec = seconds;
727 it_new.it_value.tv_usec = 0;
728 _setitimer(ITIMER_REAL, &it_new, &it_old);
729 return(it_old.it_value.tv_sec + (it_old.it_value.tv_usec / 1000000));
730 }
731
732 asmlinkage int sys_getpid(void)
733 {
734 return current->pid;
735 }
736
737 asmlinkage int sys_getppid(void)
738 {
739 return current->p_opptr->pid;
740 }
741
742 asmlinkage int sys_getuid(void)
743 {
744 return current->uid;
745 }
746
747 asmlinkage int sys_geteuid(void)
748 {
749 return current->euid;
750 }
751
752 asmlinkage int sys_getgid(void)
753 {
754 return current->gid;
755 }
756
757 asmlinkage int sys_getegid(void)
758 {
759 return current->egid;
760 }
761
762 asmlinkage int sys_nice(long increment)
763 {
764 int newprio;
765
766 if (increment < 0 && !suser())
767 return -EPERM;
768 newprio = current->priority - increment;
769 if (newprio < 1)
770 newprio = 1;
771 if (newprio > 35)
772 newprio = 35;
773 current->priority = newprio;
774 return 0;
775 }
776
777 static void show_task(int nr,struct task_struct * p)
778 {
779 unsigned long free;
780 static char * stat_nam[] = { "R", "S", "D", "Z", "T", "W" };
781
782 printk("%-8s %3d ", p->comm, (p == current) ? -nr : nr);
783 if (((unsigned) p->state) < sizeof(stat_nam)/sizeof(char *))
784 printk(stat_nam[p->state]);
785 else
786 printk(" ");
787 if (p == current)
788 printk(" current ");
789 else
790 printk(" %08lX ", ((unsigned long *)p->tss.esp)[3]);
791 for (free = 1; free < 1024 ; free++) {
792 if (((unsigned long *)p->kernel_stack_page)[free])
793 break;
794 }
795 printk("%5lu %5d %6d ", free << 2, p->pid, p->p_pptr->pid);
796 if (p->p_cptr)
797 printk("%5d ", p->p_cptr->pid);
798 else
799 printk(" ");
800 if (p->p_ysptr)
801 printk("%7d", p->p_ysptr->pid);
802 else
803 printk(" ");
804 if (p->p_osptr)
805 printk(" %5d\n", p->p_osptr->pid);
806 else
807 printk("\n");
808 }
809
810 void show_state(void)
811 {
812 int i;
813
814 printk(" free sibling\n");
815 printk(" task PC stack pid father child younger older\n");
816 for (i=0 ; i<NR_TASKS ; i++)
817 if (task[i])
818 show_task(i,task[i]);
819 }
820
821 void sched_init(void)
822 {
823 int i;
824 struct desc_struct * p;
825
826 bh_base[TIMER_BH].routine = timer_bh;
827 if (sizeof(struct sigaction) != 16)
828 panic("Struct sigaction MUST be 16 bytes");
829 set_tss_desc(gdt+FIRST_TSS_ENTRY,&init_task.tss);
830 set_ldt_desc(gdt+FIRST_LDT_ENTRY,&default_ldt,1);
831 set_system_gate(0x80,&system_call);
832 p = gdt+2+FIRST_TSS_ENTRY;
833 for(i=1 ; i<NR_TASKS ; i++) {
834 task[i] = NULL;
835 p->a=p->b=0;
836 p++;
837 p->a=p->b=0;
838 p++;
839 }
840
841 __asm__("pushfl ; andl $0xffffbfff,(%esp) ; popfl");
842 load_TR(0);
843 load_ldt(0);
844 outb_p(0x34,0x43);
845 outb_p(LATCH & 0xff , 0x40);
846 outb(LATCH >> 8 , 0x40);
847 if (request_irq(TIMER_IRQ,(void (*)(int)) do_timer)!=0)
848 panic("Could not allocate timer IRQ!");
849 }