This source file includes following definitions.
- math_state_restore
- math_emulate
- schedule
- sys_pause
- wake_up
- wake_up_interruptible
- __down
- __sleep_on
- interruptible_sleep_on
- sleep_on
- add_timer
- del_timer
- count_active_tasks
- calc_load
- second_overflow
- timer_bh
- tqueue_bh
- do_timer
- sys_alarm
- sys_getpid
- sys_getppid
- sys_getuid
- sys_geteuid
- sys_getgid
- sys_getegid
- sys_nice
- show_task
- show_state
- sched_init
1
2
3
4
5
6
7
8
9
10
11
12
13
14 #include <linux/config.h>
15 #include <linux/signal.h>
16 #include <linux/sched.h>
17 #include <linux/timer.h>
18 #include <linux/kernel.h>
19 #include <linux/kernel_stat.h>
20 #include <linux/fdreg.h>
21 #include <linux/errno.h>
22 #include <linux/time.h>
23 #include <linux/ptrace.h>
24 #include <linux/segment.h>
25 #include <linux/delay.h>
26 #include <linux/interrupt.h>
27 #include <linux/tqueue.h>
28
29 #include <asm/system.h>
30 #include <asm/io.h>
31 #include <asm/segment.h>
32
33 #define TIMER_IRQ 0
34
35 #include <linux/timex.h>
36
37
38
39
40 long tick = 1000000 / HZ;
41 volatile struct timeval xtime;
42 int tickadj = 500/HZ;
43
44 DECLARE_TASK_QUEUE(tq_timer);
45
46
47
48
49 int time_status = TIME_BAD;
50 long time_offset = 0;
51 long time_constant = 0;
52 long time_tolerance = MAXFREQ;
53 long time_precision = 1;
54 long time_maxerror = 0x70000000;
55 long time_esterror = 0x70000000;
56 long time_phase = 0;
57 long time_freq = 0;
58 long time_adj = 0;
59 long time_reftime = 0;
60
61 long time_adjust = 0;
62 long time_adjust_step = 0;
63
64 int need_resched = 0;
65 unsigned long event = 0;
66
67
68
69
70 int hard_math = 0;
71 int x86 = 0;
72 int ignore_irq13 = 0;
73 int wp_works_ok = 0;
74 int hlt_works_ok = 1;
75
76
77
78
79 int EISA_bus = 0;
80
81 extern int _setitimer(int, struct itimerval *, struct itimerval *);
82 unsigned long * prof_buffer = NULL;
83 unsigned long prof_len = 0;
84
85 #define _S(nr) (1<<((nr)-1))
86
87 extern void mem_use(void);
88
89 extern int timer_interrupt(void);
90 asmlinkage int system_call(void);
91
92 static unsigned long init_kernel_stack[1024] = { STACK_MAGIC, };
93 struct task_struct init_task = INIT_TASK;
94
95 unsigned long volatile jiffies=0;
96
97 struct task_struct *current = &init_task;
98 struct task_struct *last_task_used_math = NULL;
99
100 struct task_struct * task[NR_TASKS] = {&init_task, };
101
102 long user_stack [ PAGE_SIZE>>2 ] = { STACK_MAGIC, };
103
104 struct {
105 long * a;
106 short b;
107 } stack_start = { & user_stack [PAGE_SIZE>>2] , KERNEL_DS };
108
109 struct kernel_stat kstat = { 0 };
110
111
112
113
114
115
116
117
118 asmlinkage void math_state_restore(void)
119 {
120 __asm__ __volatile__("clts");
121 if (last_task_used_math == current)
122 return;
123 timer_table[COPRO_TIMER].expires = jiffies+50;
124 timer_active |= 1<<COPRO_TIMER;
125 if (last_task_used_math)
126 __asm__("fnsave %0":"=m" (last_task_used_math->tss.i387));
127 else
128 __asm__("fnclex");
129 last_task_used_math = current;
130 if (current->used_math) {
131 __asm__("frstor %0": :"m" (current->tss.i387));
132 } else {
133 __asm__("fninit");
134 current->used_math=1;
135 }
136 timer_active &= ~(1<<COPRO_TIMER);
137 }
138
139 #ifndef CONFIG_MATH_EMULATION
140
141 asmlinkage void math_emulate(long arg)
142 {
143 printk("math-emulation not enabled and no coprocessor found.\n");
144 printk("killing %s.\n",current->comm);
145 send_sig(SIGFPE,current,1);
146 schedule();
147 }
148
149 #endif
150
151 unsigned long itimer_ticks = 0;
152 unsigned long itimer_next = ~0;
153
154
155
156
157
158
159
160
161
162
163
164
165
166 asmlinkage void schedule(void)
167 {
168 int c;
169 struct task_struct * p;
170 struct task_struct * next;
171 unsigned long ticks;
172
173
174
175 if (intr_count) {
176 printk("Aiee: scheduling in interrupt\n");
177 intr_count = 0;
178 }
179 cli();
180 ticks = itimer_ticks;
181 itimer_ticks = 0;
182 itimer_next = ~0;
183 sti();
184 need_resched = 0;
185 p = &init_task;
186 for (;;) {
187 if ((p = p->next_task) == &init_task)
188 goto confuse_gcc1;
189 if (ticks && p->it_real_value) {
190 if (p->it_real_value <= ticks) {
191 send_sig(SIGALRM, p, 1);
192 if (!p->it_real_incr) {
193 p->it_real_value = 0;
194 goto end_itimer;
195 }
196 do {
197 p->it_real_value += p->it_real_incr;
198 } while (p->it_real_value <= ticks);
199 }
200 p->it_real_value -= ticks;
201 if (p->it_real_value < itimer_next)
202 itimer_next = p->it_real_value;
203 }
204 end_itimer:
205 if (p->state != TASK_INTERRUPTIBLE)
206 continue;
207 if (p->signal & ~p->blocked) {
208 p->state = TASK_RUNNING;
209 continue;
210 }
211 if (p->timeout && p->timeout <= jiffies) {
212 p->timeout = 0;
213 p->state = TASK_RUNNING;
214 }
215 }
216 confuse_gcc1:
217
218
219 #if 0
220
221
222
223
224 if (TASK_UNINTERRUPTIBLE >= (unsigned) current->state &&
225 current->counter < current->priority*2) {
226 ++current->counter;
227 }
228 #endif
229 c = -1000;
230 next = p = &init_task;
231 for (;;) {
232 if ((p = p->next_task) == &init_task)
233 goto confuse_gcc2;
234 if (p->state == TASK_RUNNING && p->counter > c)
235 c = p->counter, next = p;
236 }
237 confuse_gcc2:
238 if (!c) {
239 for_each_task(p)
240 p->counter = (p->counter >> 1) + p->priority;
241 }
242 if (current == next)
243 return;
244 kstat.context_swtch++;
245 switch_to(next);
246
247 if(current->debugreg[7]){
248 loaddebug(0);
249 loaddebug(1);
250 loaddebug(2);
251 loaddebug(3);
252 loaddebug(6);
253 };
254 }
255
256 asmlinkage int sys_pause(void)
257 {
258 current->state = TASK_INTERRUPTIBLE;
259 schedule();
260 return -ERESTARTNOHAND;
261 }
262
263
264
265
266
267
268
269
270
271 void wake_up(struct wait_queue **q)
272 {
273 struct wait_queue *tmp;
274 struct task_struct * p;
275
276 if (!q || !(tmp = *q))
277 return;
278 do {
279 if ((p = tmp->task) != NULL) {
280 if ((p->state == TASK_UNINTERRUPTIBLE) ||
281 (p->state == TASK_INTERRUPTIBLE)) {
282 p->state = TASK_RUNNING;
283 if (p->counter > current->counter + 3)
284 need_resched = 1;
285 }
286 }
287 if (!tmp->next) {
288 printk("wait_queue is bad (eip = %08lx)\n",((unsigned long *) q)[-1]);
289 printk(" q = %p\n",q);
290 printk(" *q = %p\n",*q);
291 printk(" tmp = %p\n",tmp);
292 break;
293 }
294 tmp = tmp->next;
295 } while (tmp != *q);
296 }
297
298 void wake_up_interruptible(struct wait_queue **q)
299 {
300 struct wait_queue *tmp;
301 struct task_struct * p;
302
303 if (!q || !(tmp = *q))
304 return;
305 do {
306 if ((p = tmp->task) != NULL) {
307 if (p->state == TASK_INTERRUPTIBLE) {
308 p->state = TASK_RUNNING;
309 if (p->counter > current->counter + 3)
310 need_resched = 1;
311 }
312 }
313 if (!tmp->next) {
314 printk("wait_queue is bad (eip = %08lx)\n",((unsigned long *) q)[-1]);
315 printk(" q = %p\n",q);
316 printk(" *q = %p\n",*q);
317 printk(" tmp = %p\n",tmp);
318 break;
319 }
320 tmp = tmp->next;
321 } while (tmp != *q);
322 }
323
324 void __down(struct semaphore * sem)
325 {
326 struct wait_queue wait = { current, NULL };
327 add_wait_queue(&sem->wait, &wait);
328 current->state = TASK_UNINTERRUPTIBLE;
329 while (sem->count <= 0) {
330 schedule();
331 current->state = TASK_UNINTERRUPTIBLE;
332 }
333 current->state = TASK_RUNNING;
334 remove_wait_queue(&sem->wait, &wait);
335 }
336
337 static inline void __sleep_on(struct wait_queue **p, int state)
338 {
339 unsigned long flags;
340 struct wait_queue wait = { current, NULL };
341
342 if (!p)
343 return;
344 if (current == task[0])
345 panic("task[0] trying to sleep");
346 current->state = state;
347 add_wait_queue(p, &wait);
348 save_flags(flags);
349 sti();
350 schedule();
351 remove_wait_queue(p, &wait);
352 restore_flags(flags);
353 }
354
355 void interruptible_sleep_on(struct wait_queue **p)
356 {
357 __sleep_on(p,TASK_INTERRUPTIBLE);
358 }
359
360 void sleep_on(struct wait_queue **p)
361 {
362 __sleep_on(p,TASK_UNINTERRUPTIBLE);
363 }
364
365
366
367
368
369 static struct timer_list timer_head = { &timer_head, &timer_head, ~0, 0, NULL };
370 #define SLOW_BUT_DEBUGGING_TIMERS 1
371
372 void add_timer(struct timer_list * timer)
373 {
374 unsigned long flags;
375 struct timer_list *p;
376
377 #if SLOW_BUT_DEBUGGING_TIMERS
378 if (timer->next || timer->prev) {
379 printk("add_timer() called with non-zero list from %08lx\n",
380 ((unsigned long *) &timer)[-1]);
381 return;
382 }
383 #endif
384 p = &timer_head;
385 timer->expires += jiffies;
386 save_flags(flags);
387 cli();
388 do {
389 p = p->next;
390 } while (timer->expires > p->expires);
391 timer->next = p;
392 timer->prev = p->prev;
393 p->prev = timer;
394 timer->prev->next = timer;
395 restore_flags(flags);
396 }
397
398 int del_timer(struct timer_list * timer)
399 {
400 unsigned long flags;
401 #if SLOW_BUT_DEBUGGING_TIMERS
402 struct timer_list * p;
403
404 p = &timer_head;
405 save_flags(flags);
406 cli();
407 while ((p = p->next) != &timer_head) {
408 if (p == timer) {
409 timer->next->prev = timer->prev;
410 timer->prev->next = timer->next;
411 timer->next = timer->prev = NULL;
412 restore_flags(flags);
413 timer->expires -= jiffies;
414 return 1;
415 }
416 }
417 if (timer->next || timer->prev)
418 printk("del_timer() called from %08lx with timer not initialized\n",
419 ((unsigned long *) &timer)[-1]);
420 restore_flags(flags);
421 return 0;
422 #else
423 save_flags(flags);
424 cli();
425 if (timer->next) {
426 timer->next->prev = timer->prev;
427 timer->prev->next = timer->next;
428 timer->next = timer->prev = NULL;
429 restore_flags(flags);
430 timer->expires -= jiffies;
431 return 1;
432 }
433 restore_flags(flags);
434 return 0;
435 #endif
436 }
437
438 unsigned long timer_active = 0;
439 struct timer_struct timer_table[32];
440
441
442
443
444
445
446
447 unsigned long avenrun[3] = { 0,0,0 };
448
449
450
451
452 static unsigned long count_active_tasks(void)
453 {
454 struct task_struct **p;
455 unsigned long nr = 0;
456
457 for(p = &LAST_TASK; p > &FIRST_TASK; --p)
458 if (*p && ((*p)->state == TASK_RUNNING ||
459 (*p)->state == TASK_UNINTERRUPTIBLE ||
460 (*p)->state == TASK_SWAPPING))
461 nr += FIXED_1;
462 return nr;
463 }
464
465 static inline void calc_load(void)
466 {
467 unsigned long active_tasks;
468 static int count = LOAD_FREQ;
469
470 if (count-- > 0)
471 return;
472 count = LOAD_FREQ;
473 active_tasks = count_active_tasks();
474 CALC_LOAD(avenrun[0], EXP_1, active_tasks);
475 CALC_LOAD(avenrun[1], EXP_5, active_tasks);
476 CALC_LOAD(avenrun[2], EXP_15, active_tasks);
477 }
478
479
480
481
482
483
484
485
486
487
488
489 static void second_overflow(void)
490 {
491 long ltemp;
492
493 static long last_rtc_update=0;
494 extern int set_rtc_mmss(unsigned long);
495
496
497 time_maxerror = (0x70000000-time_maxerror < time_tolerance) ?
498 0x70000000 : (time_maxerror + time_tolerance);
499
500
501 if (time_offset < 0) {
502 ltemp = (-(time_offset+1) >> (SHIFT_KG + time_constant)) + 1;
503 time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
504 time_offset += (time_adj * HZ) >> (SHIFT_SCALE - SHIFT_UPDATE);
505 time_adj = - time_adj;
506 } else if (time_offset > 0) {
507 ltemp = ((time_offset-1) >> (SHIFT_KG + time_constant)) + 1;
508 time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
509 time_offset -= (time_adj * HZ) >> (SHIFT_SCALE - SHIFT_UPDATE);
510 } else {
511 time_adj = 0;
512 }
513
514 time_adj += (time_freq >> (SHIFT_KF + SHIFT_HZ - SHIFT_SCALE))
515 + FINETUNE;
516
517
518 switch (time_status) {
519 case TIME_INS:
520
521 if (xtime.tv_sec % 86400 == 0) {
522 xtime.tv_sec--;
523 time_status = TIME_OOP;
524 printk("Clock: inserting leap second 23:59:60 GMT\n");
525 }
526 break;
527
528 case TIME_DEL:
529
530 if (xtime.tv_sec % 86400 == 86399) {
531 xtime.tv_sec++;
532 time_status = TIME_OK;
533 printk("Clock: deleting leap second 23:59:59 GMT\n");
534 }
535 break;
536
537 case TIME_OOP:
538 time_status = TIME_OK;
539 break;
540 }
541 if (xtime.tv_sec > last_rtc_update + 660)
542 if (set_rtc_mmss(xtime.tv_sec) == 0)
543 last_rtc_update = xtime.tv_sec;
544 else
545 last_rtc_update = xtime.tv_sec - 600;
546 }
547
548
549
550
551 static void timer_bh(void * unused)
552 {
553 unsigned long mask;
554 struct timer_struct *tp;
555 struct timer_list * timer;
556
557 cli();
558 while ((timer = timer_head.next) != &timer_head && timer->expires < jiffies) {
559 void (*fn)(unsigned long) = timer->function;
560 unsigned long data = timer->data;
561 timer->next->prev = timer->prev;
562 timer->prev->next = timer->next;
563 timer->next = timer->prev = NULL;
564 sti();
565 fn(data);
566 cli();
567 }
568 sti();
569
570 for (mask = 1, tp = timer_table+0 ; mask ; tp++,mask += mask) {
571 if (mask > timer_active)
572 break;
573 if (!(mask & timer_active))
574 continue;
575 if (tp->expires > jiffies)
576 continue;
577 timer_active &= ~mask;
578 tp->fn();
579 sti();
580 }
581 }
582
583 void tqueue_bh(void * unused)
584 {
585 run_task_queue(&tq_timer);
586 }
587
588
589
590
591
592
593
594 static void do_timer(struct pt_regs * regs)
595 {
596 unsigned long mask;
597 struct timer_struct *tp;
598
599 long ltemp;
600
601
602
603
604 time_phase += time_adj;
605 if (time_phase < -FINEUSEC) {
606 ltemp = -time_phase >> SHIFT_SCALE;
607 time_phase += ltemp << SHIFT_SCALE;
608 xtime.tv_usec += tick + time_adjust_step - ltemp;
609 }
610 else if (time_phase > FINEUSEC) {
611 ltemp = time_phase >> SHIFT_SCALE;
612 time_phase -= ltemp << SHIFT_SCALE;
613 xtime.tv_usec += tick + time_adjust_step + ltemp;
614 } else
615 xtime.tv_usec += tick + time_adjust_step;
616
617 if (time_adjust)
618 {
619
620
621
622
623
624
625
626
627
628 if (time_adjust > tickadj)
629 time_adjust_step = tickadj;
630 else if (time_adjust < -tickadj)
631 time_adjust_step = -tickadj;
632 else
633 time_adjust_step = time_adjust;
634
635
636 time_adjust -= time_adjust_step;
637 }
638 else
639 time_adjust_step = 0;
640
641 if (xtime.tv_usec >= 1000000) {
642 xtime.tv_usec -= 1000000;
643 xtime.tv_sec++;
644 second_overflow();
645 }
646
647 jiffies++;
648 calc_load();
649 if ((VM_MASK & regs->eflags) || (3 & regs->cs)) {
650 current->utime++;
651 if (current != task[0]) {
652 if (current->priority < 15)
653 kstat.cpu_nice++;
654 else
655 kstat.cpu_user++;
656 }
657
658 if (current->it_virt_value && !(--current->it_virt_value)) {
659 current->it_virt_value = current->it_virt_incr;
660 send_sig(SIGVTALRM,current,1);
661 }
662 } else {
663 current->stime++;
664 if(current != task[0])
665 kstat.cpu_system++;
666 #ifdef CONFIG_PROFILE
667 if (prof_buffer && current != task[0]) {
668 unsigned long eip = regs->eip;
669 eip >>= 2;
670 if (eip < prof_len)
671 prof_buffer[eip]++;
672 }
673 #endif
674 }
675 if (current != task[0] && 0 > --current->counter) {
676 current->counter = 0;
677 need_resched = 1;
678 }
679
680 if (current->it_prof_value && !(--current->it_prof_value)) {
681 current->it_prof_value = current->it_prof_incr;
682 send_sig(SIGPROF,current,1);
683 }
684 for (mask = 1, tp = timer_table+0 ; mask ; tp++,mask += mask) {
685 if (mask > timer_active)
686 break;
687 if (!(mask & timer_active))
688 continue;
689 if (tp->expires > jiffies)
690 continue;
691 mark_bh(TIMER_BH);
692 }
693 cli();
694 itimer_ticks++;
695 if (itimer_ticks > itimer_next)
696 need_resched = 1;
697 if (timer_head.next->expires < jiffies)
698 mark_bh(TIMER_BH);
699 if (tq_timer != &tq_last)
700 mark_bh(TQUEUE_BH);
701 sti();
702 }
703
704 asmlinkage int sys_alarm(long seconds)
705 {
706 struct itimerval it_new, it_old;
707
708 it_new.it_interval.tv_sec = it_new.it_interval.tv_usec = 0;
709 it_new.it_value.tv_sec = seconds;
710 it_new.it_value.tv_usec = 0;
711 _setitimer(ITIMER_REAL, &it_new, &it_old);
712 return(it_old.it_value.tv_sec + (it_old.it_value.tv_usec / 1000000));
713 }
714
715 asmlinkage int sys_getpid(void)
716 {
717 return current->pid;
718 }
719
720 asmlinkage int sys_getppid(void)
721 {
722 return current->p_opptr->pid;
723 }
724
725 asmlinkage int sys_getuid(void)
726 {
727 return current->uid;
728 }
729
730 asmlinkage int sys_geteuid(void)
731 {
732 return current->euid;
733 }
734
735 asmlinkage int sys_getgid(void)
736 {
737 return current->gid;
738 }
739
740 asmlinkage int sys_getegid(void)
741 {
742 return current->egid;
743 }
744
745 asmlinkage int sys_nice(long increment)
746 {
747 int newprio;
748
749 if (increment < 0 && !suser())
750 return -EPERM;
751 newprio = current->priority - increment;
752 if (newprio < 1)
753 newprio = 1;
754 if (newprio > 35)
755 newprio = 35;
756 current->priority = newprio;
757 return 0;
758 }
759
760 static void show_task(int nr,struct task_struct * p)
761 {
762 unsigned long free;
763 static char * stat_nam[] = { "R", "S", "D", "Z", "T", "W" };
764
765 printk("%-8s %3d ", p->comm, (p == current) ? -nr : nr);
766 if (((unsigned) p->state) < sizeof(stat_nam)/sizeof(char *))
767 printk(stat_nam[p->state]);
768 else
769 printk(" ");
770 if (p == current)
771 printk(" current ");
772 else
773 printk(" %08lX ", ((unsigned long *)p->tss.esp)[3]);
774 for (free = 1; free < 1024 ; free++) {
775 if (((unsigned long *)p->kernel_stack_page)[free])
776 break;
777 }
778 printk("%5lu %5d %6d ", free << 2, p->pid, p->p_pptr->pid);
779 if (p->p_cptr)
780 printk("%5d ", p->p_cptr->pid);
781 else
782 printk(" ");
783 if (p->p_ysptr)
784 printk("%7d", p->p_ysptr->pid);
785 else
786 printk(" ");
787 if (p->p_osptr)
788 printk(" %5d\n", p->p_osptr->pid);
789 else
790 printk("\n");
791 }
792
793 void show_state(void)
794 {
795 int i;
796
797 printk(" free sibling\n");
798 printk(" task PC stack pid father child younger older\n");
799 for (i=0 ; i<NR_TASKS ; i++)
800 if (task[i])
801 show_task(i,task[i]);
802 }
803
804 void sched_init(void)
805 {
806 int i;
807 struct desc_struct * p;
808
809 bh_base[TIMER_BH].routine = timer_bh;
810 bh_base[TQUEUE_BH].routine = tqueue_bh;
811 if (sizeof(struct sigaction) != 16)
812 panic("Struct sigaction MUST be 16 bytes");
813 set_tss_desc(gdt+FIRST_TSS_ENTRY,&init_task.tss);
814 set_ldt_desc(gdt+FIRST_LDT_ENTRY,&default_ldt,1);
815 set_system_gate(0x80,&system_call);
816 p = gdt+2+FIRST_TSS_ENTRY;
817 for(i=1 ; i<NR_TASKS ; i++) {
818 task[i] = NULL;
819 p->a=p->b=0;
820 p++;
821 p->a=p->b=0;
822 p++;
823 }
824
825 __asm__("pushfl ; andl $0xffffbfff,(%esp) ; popfl");
826 load_TR(0);
827 load_ldt(0);
828 outb_p(0x34,0x43);
829 outb_p(LATCH & 0xff , 0x40);
830 outb(LATCH >> 8 , 0x40);
831 if (request_irq(TIMER_IRQ,(void (*)(int)) do_timer)!=0)
832 panic("Could not allocate timer IRQ!");
833 }