This source file includes following definitions.
- math_state_restore
- math_emulate
- schedule
- sys_pause
- wake_up
- wake_up_interruptible
- __down
- __sleep_on
- interruptible_sleep_on
- sleep_on
- add_timer
- del_timer
- count_active_tasks
- calc_load
- second_overflow
- timer_bh
- tqueue_bh
- do_timer
- sys_alarm
- sys_getpid
- sys_getppid
- sys_getuid
- sys_geteuid
- sys_getgid
- sys_getegid
- sys_nice
- show_task
- show_state
- sched_init
1
2
3
4
5
6
7
8
9
10
11
12
13
14 #include <linux/config.h>
15 #include <linux/signal.h>
16 #include <linux/sched.h>
17 #include <linux/timer.h>
18 #include <linux/kernel.h>
19 #include <linux/kernel_stat.h>
20 #include <linux/fdreg.h>
21 #include <linux/errno.h>
22 #include <linux/time.h>
23 #include <linux/ptrace.h>
24 #include <linux/segment.h>
25 #include <linux/delay.h>
26 #include <linux/interrupt.h>
27 #include <linux/tqueue.h>
28
29 #include <asm/system.h>
30 #include <asm/io.h>
31 #include <asm/segment.h>
32
33 #define TIMER_IRQ 0
34
35 #include <linux/timex.h>
36
37
38
39
40 long tick = 1000000 / HZ;
41 volatile struct timeval xtime;
42 int tickadj = 500/HZ;
43
44 DECLARE_TASK_QUEUE(tq_timer);
45
46
47
48
49 int time_status = TIME_BAD;
50 long time_offset = 0;
51 long time_constant = 0;
52 long time_tolerance = MAXFREQ;
53 long time_precision = 1;
54 long time_maxerror = 0x70000000;
55 long time_esterror = 0x70000000;
56 long time_phase = 0;
57 long time_freq = 0;
58 long time_adj = 0;
59 long time_reftime = 0;
60
61 long time_adjust = 0;
62 long time_adjust_step = 0;
63
64 int need_resched = 0;
65 unsigned long event = 0;
66
67
68
69
70 int hard_math = 0;
71 int x86 = 0;
72 int ignore_irq13 = 0;
73 int wp_works_ok = 0;
74 int hlt_works_ok = 1;
75
76
77
78
79 int EISA_bus = 0;
80
81 extern int _setitimer(int, struct itimerval *, struct itimerval *);
82 unsigned long * prof_buffer = NULL;
83 unsigned long prof_len = 0;
84
85 #define _S(nr) (1<<((nr)-1))
86
87 extern void mem_use(void);
88
89 extern int timer_interrupt(void);
90 asmlinkage int system_call(void);
91
92 static unsigned long init_kernel_stack[1024] = { STACK_MAGIC, };
93 static struct vm_area_struct init_mmap = INIT_MMAP;
94 struct task_struct init_task = INIT_TASK;
95
96 unsigned long volatile jiffies=0;
97
98 struct task_struct *current = &init_task;
99 struct task_struct *last_task_used_math = NULL;
100
101 struct task_struct * task[NR_TASKS] = {&init_task, };
102
103 long user_stack [ PAGE_SIZE>>2 ] = { STACK_MAGIC, };
104
105 struct {
106 long * a;
107 short b;
108 } stack_start = { & user_stack [PAGE_SIZE>>2] , KERNEL_DS };
109
110 struct kernel_stat kstat = { 0 };
111
112
113
114
115
116
117
118
119 asmlinkage void math_state_restore(void)
120 {
121 __asm__ __volatile__("clts");
122 if (last_task_used_math == current)
123 return;
124 timer_table[COPRO_TIMER].expires = jiffies+50;
125 timer_active |= 1<<COPRO_TIMER;
126 if (last_task_used_math)
127 __asm__("fnsave %0":"=m" (last_task_used_math->tss.i387));
128 else
129 __asm__("fnclex");
130 last_task_used_math = current;
131 if (current->used_math) {
132 __asm__("frstor %0": :"m" (current->tss.i387));
133 } else {
134 __asm__("fninit");
135 current->used_math=1;
136 }
137 timer_active &= ~(1<<COPRO_TIMER);
138 }
139
140 #ifndef CONFIG_MATH_EMULATION
141
142 asmlinkage void math_emulate(long arg)
143 {
144 printk("math-emulation not enabled and no coprocessor found.\n");
145 printk("killing %s.\n",current->comm);
146 send_sig(SIGFPE,current,1);
147 schedule();
148 }
149
150 #endif
151
152 unsigned long itimer_ticks = 0;
153 unsigned long itimer_next = ~0;
154
155
156
157
158
159
160
161
162
163
164
165
166
167 asmlinkage void schedule(void)
168 {
169 int c;
170 struct task_struct * p;
171 struct task_struct * next;
172 unsigned long ticks;
173
174
175
176 if (intr_count) {
177 printk("Aiee: scheduling in interrupt\n");
178 intr_count = 0;
179 }
180 cli();
181 ticks = itimer_ticks;
182 itimer_ticks = 0;
183 itimer_next = ~0;
184 sti();
185 need_resched = 0;
186 p = &init_task;
187 for (;;) {
188 if ((p = p->next_task) == &init_task)
189 goto confuse_gcc1;
190 if (ticks && p->it_real_value) {
191 if (p->it_real_value <= ticks) {
192 send_sig(SIGALRM, p, 1);
193 if (!p->it_real_incr) {
194 p->it_real_value = 0;
195 goto end_itimer;
196 }
197 do {
198 p->it_real_value += p->it_real_incr;
199 } while (p->it_real_value <= ticks);
200 }
201 p->it_real_value -= ticks;
202 if (p->it_real_value < itimer_next)
203 itimer_next = p->it_real_value;
204 }
205 end_itimer:
206 if (p->state != TASK_INTERRUPTIBLE)
207 continue;
208 if (p->signal & ~p->blocked) {
209 p->state = TASK_RUNNING;
210 continue;
211 }
212 if (p->timeout && p->timeout <= jiffies) {
213 p->timeout = 0;
214 p->state = TASK_RUNNING;
215 }
216 }
217 confuse_gcc1:
218
219
220 #if 0
221
222
223
224
225 if (TASK_UNINTERRUPTIBLE >= (unsigned) current->state &&
226 current->counter < current->priority*2) {
227 ++current->counter;
228 }
229 #endif
230 c = -1000;
231 next = p = &init_task;
232 for (;;) {
233 if ((p = p->next_task) == &init_task)
234 goto confuse_gcc2;
235 if (p->state == TASK_RUNNING && p->counter > c)
236 c = p->counter, next = p;
237 }
238 confuse_gcc2:
239 if (!c) {
240 for_each_task(p)
241 p->counter = (p->counter >> 1) + p->priority;
242 }
243 if (current == next)
244 return;
245 kstat.context_swtch++;
246 switch_to(next);
247
248 if(current->debugreg[7]){
249 loaddebug(0);
250 loaddebug(1);
251 loaddebug(2);
252 loaddebug(3);
253 loaddebug(6);
254 };
255 }
256
257 asmlinkage int sys_pause(void)
258 {
259 current->state = TASK_INTERRUPTIBLE;
260 schedule();
261 return -ERESTARTNOHAND;
262 }
263
264
265
266
267
268
269
270
271
272 void wake_up(struct wait_queue **q)
273 {
274 struct wait_queue *tmp;
275 struct task_struct * p;
276
277 if (!q || !(tmp = *q))
278 return;
279 do {
280 if ((p = tmp->task) != NULL) {
281 if ((p->state == TASK_UNINTERRUPTIBLE) ||
282 (p->state == TASK_INTERRUPTIBLE)) {
283 p->state = TASK_RUNNING;
284 if (p->counter > current->counter + 3)
285 need_resched = 1;
286 }
287 }
288 if (!tmp->next) {
289 printk("wait_queue is bad (eip = %08lx)\n",((unsigned long *) q)[-1]);
290 printk(" q = %p\n",q);
291 printk(" *q = %p\n",*q);
292 printk(" tmp = %p\n",tmp);
293 break;
294 }
295 tmp = tmp->next;
296 } while (tmp != *q);
297 }
298
299 void wake_up_interruptible(struct wait_queue **q)
300 {
301 struct wait_queue *tmp;
302 struct task_struct * p;
303
304 if (!q || !(tmp = *q))
305 return;
306 do {
307 if ((p = tmp->task) != NULL) {
308 if (p->state == TASK_INTERRUPTIBLE) {
309 p->state = TASK_RUNNING;
310 if (p->counter > current->counter + 3)
311 need_resched = 1;
312 }
313 }
314 if (!tmp->next) {
315 printk("wait_queue is bad (eip = %08lx)\n",((unsigned long *) q)[-1]);
316 printk(" q = %p\n",q);
317 printk(" *q = %p\n",*q);
318 printk(" tmp = %p\n",tmp);
319 break;
320 }
321 tmp = tmp->next;
322 } while (tmp != *q);
323 }
324
325 void __down(struct semaphore * sem)
326 {
327 struct wait_queue wait = { current, NULL };
328 add_wait_queue(&sem->wait, &wait);
329 current->state = TASK_UNINTERRUPTIBLE;
330 while (sem->count <= 0) {
331 schedule();
332 current->state = TASK_UNINTERRUPTIBLE;
333 }
334 current->state = TASK_RUNNING;
335 remove_wait_queue(&sem->wait, &wait);
336 }
337
338 static inline void __sleep_on(struct wait_queue **p, int state)
339 {
340 unsigned long flags;
341 struct wait_queue wait = { current, NULL };
342
343 if (!p)
344 return;
345 if (current == task[0])
346 panic("task[0] trying to sleep");
347 current->state = state;
348 add_wait_queue(p, &wait);
349 save_flags(flags);
350 sti();
351 schedule();
352 remove_wait_queue(p, &wait);
353 restore_flags(flags);
354 }
355
356 void interruptible_sleep_on(struct wait_queue **p)
357 {
358 __sleep_on(p,TASK_INTERRUPTIBLE);
359 }
360
361 void sleep_on(struct wait_queue **p)
362 {
363 __sleep_on(p,TASK_UNINTERRUPTIBLE);
364 }
365
366
367
368
369
370 static struct timer_list timer_head = { &timer_head, &timer_head, ~0, 0, NULL };
371 #define SLOW_BUT_DEBUGGING_TIMERS 1
372
373 void add_timer(struct timer_list * timer)
374 {
375 unsigned long flags;
376 struct timer_list *p;
377
378 #if SLOW_BUT_DEBUGGING_TIMERS
379 if (timer->next || timer->prev) {
380 printk("add_timer() called with non-zero list from %08lx\n",
381 ((unsigned long *) &timer)[-1]);
382 return;
383 }
384 #endif
385 p = &timer_head;
386 timer->expires += jiffies;
387 save_flags(flags);
388 cli();
389 do {
390 p = p->next;
391 } while (timer->expires > p->expires);
392 timer->next = p;
393 timer->prev = p->prev;
394 p->prev = timer;
395 timer->prev->next = timer;
396 restore_flags(flags);
397 }
398
399 int del_timer(struct timer_list * timer)
400 {
401 unsigned long flags;
402 #if SLOW_BUT_DEBUGGING_TIMERS
403 struct timer_list * p;
404
405 p = &timer_head;
406 save_flags(flags);
407 cli();
408 while ((p = p->next) != &timer_head) {
409 if (p == timer) {
410 timer->next->prev = timer->prev;
411 timer->prev->next = timer->next;
412 timer->next = timer->prev = NULL;
413 restore_flags(flags);
414 timer->expires -= jiffies;
415 return 1;
416 }
417 }
418 if (timer->next || timer->prev)
419 printk("del_timer() called from %08lx with timer not initialized\n",
420 ((unsigned long *) &timer)[-1]);
421 restore_flags(flags);
422 return 0;
423 #else
424 save_flags(flags);
425 cli();
426 if (timer->next) {
427 timer->next->prev = timer->prev;
428 timer->prev->next = timer->next;
429 timer->next = timer->prev = NULL;
430 restore_flags(flags);
431 timer->expires -= jiffies;
432 return 1;
433 }
434 restore_flags(flags);
435 return 0;
436 #endif
437 }
438
439 unsigned long timer_active = 0;
440 struct timer_struct timer_table[32];
441
442
443
444
445
446
447
448 unsigned long avenrun[3] = { 0,0,0 };
449
450
451
452
453 static unsigned long count_active_tasks(void)
454 {
455 struct task_struct **p;
456 unsigned long nr = 0;
457
458 for(p = &LAST_TASK; p > &FIRST_TASK; --p)
459 if (*p && ((*p)->state == TASK_RUNNING ||
460 (*p)->state == TASK_UNINTERRUPTIBLE ||
461 (*p)->state == TASK_SWAPPING))
462 nr += FIXED_1;
463 return nr;
464 }
465
466 static inline void calc_load(void)
467 {
468 unsigned long active_tasks;
469 static int count = LOAD_FREQ;
470
471 if (count-- > 0)
472 return;
473 count = LOAD_FREQ;
474 active_tasks = count_active_tasks();
475 CALC_LOAD(avenrun[0], EXP_1, active_tasks);
476 CALC_LOAD(avenrun[1], EXP_5, active_tasks);
477 CALC_LOAD(avenrun[2], EXP_15, active_tasks);
478 }
479
480
481
482
483
484
485
486
487
488
489
490 static void second_overflow(void)
491 {
492 long ltemp;
493
494 static long last_rtc_update=0;
495 extern int set_rtc_mmss(unsigned long);
496
497
498 time_maxerror = (0x70000000-time_maxerror < time_tolerance) ?
499 0x70000000 : (time_maxerror + time_tolerance);
500
501
502 if (time_offset < 0) {
503 ltemp = (-(time_offset+1) >> (SHIFT_KG + time_constant)) + 1;
504 time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
505 time_offset += (time_adj * HZ) >> (SHIFT_SCALE - SHIFT_UPDATE);
506 time_adj = - time_adj;
507 } else if (time_offset > 0) {
508 ltemp = ((time_offset-1) >> (SHIFT_KG + time_constant)) + 1;
509 time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
510 time_offset -= (time_adj * HZ) >> (SHIFT_SCALE - SHIFT_UPDATE);
511 } else {
512 time_adj = 0;
513 }
514
515 time_adj += (time_freq >> (SHIFT_KF + SHIFT_HZ - SHIFT_SCALE))
516 + FINETUNE;
517
518
519 switch (time_status) {
520 case TIME_INS:
521
522 if (xtime.tv_sec % 86400 == 0) {
523 xtime.tv_sec--;
524 time_status = TIME_OOP;
525 printk("Clock: inserting leap second 23:59:60 GMT\n");
526 }
527 break;
528
529 case TIME_DEL:
530
531 if (xtime.tv_sec % 86400 == 86399) {
532 xtime.tv_sec++;
533 time_status = TIME_OK;
534 printk("Clock: deleting leap second 23:59:59 GMT\n");
535 }
536 break;
537
538 case TIME_OOP:
539 time_status = TIME_OK;
540 break;
541 }
542 if (xtime.tv_sec > last_rtc_update + 660)
543 if (set_rtc_mmss(xtime.tv_sec) == 0)
544 last_rtc_update = xtime.tv_sec;
545 else
546 last_rtc_update = xtime.tv_sec - 600;
547 }
548
549
550
551
552 static void timer_bh(void * unused)
553 {
554 unsigned long mask;
555 struct timer_struct *tp;
556 struct timer_list * timer;
557
558 cli();
559 while ((timer = timer_head.next) != &timer_head && timer->expires < jiffies) {
560 void (*fn)(unsigned long) = timer->function;
561 unsigned long data = timer->data;
562 timer->next->prev = timer->prev;
563 timer->prev->next = timer->next;
564 timer->next = timer->prev = NULL;
565 sti();
566 fn(data);
567 cli();
568 }
569 sti();
570
571 for (mask = 1, tp = timer_table+0 ; mask ; tp++,mask += mask) {
572 if (mask > timer_active)
573 break;
574 if (!(mask & timer_active))
575 continue;
576 if (tp->expires > jiffies)
577 continue;
578 timer_active &= ~mask;
579 tp->fn();
580 sti();
581 }
582 }
583
584 void tqueue_bh(void * unused)
585 {
586 run_task_queue(&tq_timer);
587 }
588
589
590
591
592
593
594
595 static void do_timer(struct pt_regs * regs)
596 {
597 unsigned long mask;
598 struct timer_struct *tp;
599
600 long ltemp;
601
602
603
604
605 time_phase += time_adj;
606 if (time_phase < -FINEUSEC) {
607 ltemp = -time_phase >> SHIFT_SCALE;
608 time_phase += ltemp << SHIFT_SCALE;
609 xtime.tv_usec += tick + time_adjust_step - ltemp;
610 }
611 else if (time_phase > FINEUSEC) {
612 ltemp = time_phase >> SHIFT_SCALE;
613 time_phase -= ltemp << SHIFT_SCALE;
614 xtime.tv_usec += tick + time_adjust_step + ltemp;
615 } else
616 xtime.tv_usec += tick + time_adjust_step;
617
618 if (time_adjust)
619 {
620
621
622
623
624
625
626
627
628
629 if (time_adjust > tickadj)
630 time_adjust_step = tickadj;
631 else if (time_adjust < -tickadj)
632 time_adjust_step = -tickadj;
633 else
634 time_adjust_step = time_adjust;
635
636
637 time_adjust -= time_adjust_step;
638 }
639 else
640 time_adjust_step = 0;
641
642 if (xtime.tv_usec >= 1000000) {
643 xtime.tv_usec -= 1000000;
644 xtime.tv_sec++;
645 second_overflow();
646 }
647
648 jiffies++;
649 calc_load();
650 if ((VM_MASK & regs->eflags) || (3 & regs->cs)) {
651 current->utime++;
652 if (current != task[0]) {
653 if (current->priority < 15)
654 kstat.cpu_nice++;
655 else
656 kstat.cpu_user++;
657 }
658
659 if (current->it_virt_value && !(--current->it_virt_value)) {
660 current->it_virt_value = current->it_virt_incr;
661 send_sig(SIGVTALRM,current,1);
662 }
663 } else {
664 current->stime++;
665 if(current != task[0])
666 kstat.cpu_system++;
667 #ifdef CONFIG_PROFILE
668 if (prof_buffer && current != task[0]) {
669 unsigned long eip = regs->eip;
670 eip >>= 2;
671 if (eip < prof_len)
672 prof_buffer[eip]++;
673 }
674 #endif
675 }
676 if (current != task[0] && 0 > --current->counter) {
677 current->counter = 0;
678 need_resched = 1;
679 }
680
681 if (current->it_prof_value && !(--current->it_prof_value)) {
682 current->it_prof_value = current->it_prof_incr;
683 send_sig(SIGPROF,current,1);
684 }
685 for (mask = 1, tp = timer_table+0 ; mask ; tp++,mask += mask) {
686 if (mask > timer_active)
687 break;
688 if (!(mask & timer_active))
689 continue;
690 if (tp->expires > jiffies)
691 continue;
692 mark_bh(TIMER_BH);
693 }
694 cli();
695 itimer_ticks++;
696 if (itimer_ticks > itimer_next)
697 need_resched = 1;
698 if (timer_head.next->expires < jiffies)
699 mark_bh(TIMER_BH);
700 if (tq_timer != &tq_last)
701 mark_bh(TQUEUE_BH);
702 sti();
703 }
704
705 asmlinkage int sys_alarm(long seconds)
706 {
707 struct itimerval it_new, it_old;
708
709 it_new.it_interval.tv_sec = it_new.it_interval.tv_usec = 0;
710 it_new.it_value.tv_sec = seconds;
711 it_new.it_value.tv_usec = 0;
712 _setitimer(ITIMER_REAL, &it_new, &it_old);
713 return(it_old.it_value.tv_sec + (it_old.it_value.tv_usec / 1000000));
714 }
715
716 asmlinkage int sys_getpid(void)
717 {
718 return current->pid;
719 }
720
721 asmlinkage int sys_getppid(void)
722 {
723 return current->p_opptr->pid;
724 }
725
726 asmlinkage int sys_getuid(void)
727 {
728 return current->uid;
729 }
730
731 asmlinkage int sys_geteuid(void)
732 {
733 return current->euid;
734 }
735
736 asmlinkage int sys_getgid(void)
737 {
738 return current->gid;
739 }
740
741 asmlinkage int sys_getegid(void)
742 {
743 return current->egid;
744 }
745
746 asmlinkage int sys_nice(long increment)
747 {
748 int newprio;
749
750 if (increment < 0 && !suser())
751 return -EPERM;
752 newprio = current->priority - increment;
753 if (newprio < 1)
754 newprio = 1;
755 if (newprio > 35)
756 newprio = 35;
757 current->priority = newprio;
758 return 0;
759 }
760
761 static void show_task(int nr,struct task_struct * p)
762 {
763 unsigned long free;
764 static char * stat_nam[] = { "R", "S", "D", "Z", "T", "W" };
765
766 printk("%-8s %3d ", p->comm, (p == current) ? -nr : nr);
767 if (((unsigned) p->state) < sizeof(stat_nam)/sizeof(char *))
768 printk(stat_nam[p->state]);
769 else
770 printk(" ");
771 if (p == current)
772 printk(" current ");
773 else
774 printk(" %08lX ", ((unsigned long *)p->tss.esp)[3]);
775 for (free = 1; free < 1024 ; free++) {
776 if (((unsigned long *)p->kernel_stack_page)[free])
777 break;
778 }
779 printk("%5lu %5d %6d ", free << 2, p->pid, p->p_pptr->pid);
780 if (p->p_cptr)
781 printk("%5d ", p->p_cptr->pid);
782 else
783 printk(" ");
784 if (p->p_ysptr)
785 printk("%7d", p->p_ysptr->pid);
786 else
787 printk(" ");
788 if (p->p_osptr)
789 printk(" %5d\n", p->p_osptr->pid);
790 else
791 printk("\n");
792 }
793
794 void show_state(void)
795 {
796 int i;
797
798 printk(" free sibling\n");
799 printk(" task PC stack pid father child younger older\n");
800 for (i=0 ; i<NR_TASKS ; i++)
801 if (task[i])
802 show_task(i,task[i]);
803 }
804
805 void sched_init(void)
806 {
807 int i;
808 struct desc_struct * p;
809
810 bh_base[TIMER_BH].routine = timer_bh;
811 bh_base[TQUEUE_BH].routine = tqueue_bh;
812 if (sizeof(struct sigaction) != 16)
813 panic("Struct sigaction MUST be 16 bytes");
814 set_tss_desc(gdt+FIRST_TSS_ENTRY,&init_task.tss);
815 set_ldt_desc(gdt+FIRST_LDT_ENTRY,&default_ldt,1);
816 set_system_gate(0x80,&system_call);
817 p = gdt+2+FIRST_TSS_ENTRY;
818 for(i=1 ; i<NR_TASKS ; i++) {
819 task[i] = NULL;
820 p->a=p->b=0;
821 p++;
822 p->a=p->b=0;
823 p++;
824 }
825
826 __asm__("pushfl ; andl $0xffffbfff,(%esp) ; popfl");
827 load_TR(0);
828 load_ldt(0);
829 outb_p(0x34,0x43);
830 outb_p(LATCH & 0xff , 0x40);
831 outb(LATCH >> 8 , 0x40);
832 if (request_irq(TIMER_IRQ,(void (*)(int)) do_timer, 0, "timer") != 0)
833 panic("Could not allocate timer IRQ!");
834 }