This source file includes following definitions.
- math_state_restore
- math_emulate
- schedule
- sys_pause
- wake_up
- wake_up_interruptible
- __down
- __sleep_on
- interruptible_sleep_on
- sleep_on
- add_timer
- del_timer
- count_active_tasks
- calc_load
- second_overflow
- timer_bh
- tqueue_bh
- immediate_bh
- do_timer
- sys_alarm
- sys_getpid
- sys_getppid
- sys_getuid
- sys_geteuid
- sys_getgid
- sys_getegid
- sys_nice
- show_task
- show_state
- sched_init
1
2
3
4
5
6
7
8
9
10
11
12
13
14 #include <linux/config.h>
15 #include <linux/signal.h>
16 #include <linux/sched.h>
17 #include <linux/timer.h>
18 #include <linux/kernel.h>
19 #include <linux/kernel_stat.h>
20 #include <linux/fdreg.h>
21 #include <linux/errno.h>
22 #include <linux/time.h>
23 #include <linux/ptrace.h>
24 #include <linux/delay.h>
25 #include <linux/interrupt.h>
26 #include <linux/tqueue.h>
27 #include <linux/resource.h>
28
29 #include <asm/system.h>
30 #include <asm/io.h>
31 #include <asm/segment.h>
32
33 #define TIMER_IRQ 0
34
35 #include <linux/timex.h>
36
37
38
39
40 long tick = 1000000 / HZ;
41 volatile struct timeval xtime;
42 int tickadj = 500/HZ;
43
44 DECLARE_TASK_QUEUE(tq_timer);
45 DECLARE_TASK_QUEUE(tq_immediate);
46
47
48
49
50 int time_status = TIME_BAD;
51 long time_offset = 0;
52 long time_constant = 0;
53 long time_tolerance = MAXFREQ;
54 long time_precision = 1;
55 long time_maxerror = 0x70000000;
56 long time_esterror = 0x70000000;
57 long time_phase = 0;
58 long time_freq = 0;
59 long time_adj = 0;
60 long time_reftime = 0;
61
62 long time_adjust = 0;
63 long time_adjust_step = 0;
64
65 int need_resched = 0;
66 unsigned long event = 0;
67
68
69
70
71 int hard_math = 0;
72 int x86 = 0;
73 int ignore_irq13 = 0;
74 int wp_works_ok = 0;
75 int hlt_works_ok = 1;
76
77
78
79
80 int EISA_bus = 0;
81
82 extern int _setitimer(int, struct itimerval *, struct itimerval *);
83 unsigned long * prof_buffer = NULL;
84 unsigned long prof_len = 0;
85
86 #define _S(nr) (1<<((nr)-1))
87
88 extern void mem_use(void);
89
90 extern int timer_interrupt(void);
91 asmlinkage int system_call(void);
92
93 static unsigned long init_kernel_stack[1024] = { STACK_MAGIC, };
94 static struct vm_area_struct init_mmap = INIT_MMAP;
95 struct task_struct init_task = INIT_TASK;
96
97 unsigned long volatile jiffies=0;
98
99 struct task_struct *current = &init_task;
100 struct task_struct *last_task_used_math = NULL;
101
102 struct task_struct * task[NR_TASKS] = {&init_task, };
103
104 long user_stack [ PAGE_SIZE>>2 ] = { STACK_MAGIC, };
105
106 struct {
107 long * a;
108 short b;
109 } stack_start = { & user_stack [PAGE_SIZE>>2] , KERNEL_DS };
110
111 struct kernel_stat kstat = { 0 };
112
113
114
115
116
117
118
119
120 asmlinkage void math_state_restore(void)
121 {
122 __asm__ __volatile__("clts");
123 if (last_task_used_math == current)
124 return;
125 timer_table[COPRO_TIMER].expires = jiffies+50;
126 timer_active |= 1<<COPRO_TIMER;
127 if (last_task_used_math)
128 __asm__("fnsave %0":"=m" (last_task_used_math->tss.i387));
129 else
130 __asm__("fnclex");
131 last_task_used_math = current;
132 if (current->used_math) {
133 __asm__("frstor %0": :"m" (current->tss.i387));
134 } else {
135 __asm__("fninit");
136 current->used_math=1;
137 }
138 timer_active &= ~(1<<COPRO_TIMER);
139 }
140
141 #ifndef CONFIG_MATH_EMULATION
142
143 asmlinkage void math_emulate(long arg)
144 {
145 printk("math-emulation not enabled and no coprocessor found.\n");
146 printk("killing %s.\n",current->comm);
147 send_sig(SIGFPE,current,1);
148 schedule();
149 }
150
151 #endif
152
153 unsigned long itimer_ticks = 0;
154 unsigned long itimer_next = ~0;
155
156
157
158
159
160
161
162
163
164
165
166
167
168 asmlinkage void schedule(void)
169 {
170 int c;
171 struct task_struct * p;
172 struct task_struct * next;
173 unsigned long ticks;
174
175
176
177 if (intr_count) {
178 printk("Aiee: scheduling in interrupt\n");
179 intr_count = 0;
180 }
181 cli();
182 ticks = itimer_ticks;
183 itimer_ticks = 0;
184 itimer_next = ~0;
185 sti();
186 need_resched = 0;
187 p = &init_task;
188 for (;;) {
189 if ((p = p->next_task) == &init_task)
190 goto confuse_gcc1;
191 if (ticks && p->it_real_value) {
192 if (p->it_real_value <= ticks) {
193 send_sig(SIGALRM, p, 1);
194 if (!p->it_real_incr) {
195 p->it_real_value = 0;
196 goto end_itimer;
197 }
198 do {
199 p->it_real_value += p->it_real_incr;
200 } while (p->it_real_value <= ticks);
201 }
202 p->it_real_value -= ticks;
203 if (p->it_real_value < itimer_next)
204 itimer_next = p->it_real_value;
205 }
206 end_itimer:
207 if (p->state != TASK_INTERRUPTIBLE)
208 continue;
209 if (p->signal & ~p->blocked) {
210 p->state = TASK_RUNNING;
211 continue;
212 }
213 if (p->timeout && p->timeout <= jiffies) {
214 p->timeout = 0;
215 p->state = TASK_RUNNING;
216 }
217 }
218 confuse_gcc1:
219
220
221 #if 0
222
223
224
225
226 if (TASK_UNINTERRUPTIBLE >= (unsigned) current->state &&
227 current->counter < current->priority*2) {
228 ++current->counter;
229 }
230 #endif
231 c = -1000;
232 next = p = &init_task;
233 for (;;) {
234 if ((p = p->next_task) == &init_task)
235 goto confuse_gcc2;
236 if (p->state == TASK_RUNNING && p->counter > c)
237 c = p->counter, next = p;
238 }
239 confuse_gcc2:
240 if (!c) {
241 for_each_task(p)
242 p->counter = (p->counter >> 1) + p->priority;
243 }
244 if (current == next)
245 return;
246 kstat.context_swtch++;
247 switch_to(next);
248
249 if(current->debugreg[7]){
250 loaddebug(0);
251 loaddebug(1);
252 loaddebug(2);
253 loaddebug(3);
254 loaddebug(6);
255 };
256 }
257
258 asmlinkage int sys_pause(void)
259 {
260 current->state = TASK_INTERRUPTIBLE;
261 schedule();
262 return -ERESTARTNOHAND;
263 }
264
265
266
267
268
269
270
271
272
273 void wake_up(struct wait_queue **q)
274 {
275 struct wait_queue *tmp;
276 struct task_struct * p;
277
278 if (!q || !(tmp = *q))
279 return;
280 do {
281 if ((p = tmp->task) != NULL) {
282 if ((p->state == TASK_UNINTERRUPTIBLE) ||
283 (p->state == TASK_INTERRUPTIBLE)) {
284 p->state = TASK_RUNNING;
285 if (p->counter > current->counter + 3)
286 need_resched = 1;
287 }
288 }
289 if (!tmp->next) {
290 printk("wait_queue is bad (eip = %p)\n",
291 __builtin_return_address(0));
292 printk(" q = %p\n",q);
293 printk(" *q = %p\n",*q);
294 printk(" tmp = %p\n",tmp);
295 break;
296 }
297 tmp = tmp->next;
298 } while (tmp != *q);
299 }
300
301 void wake_up_interruptible(struct wait_queue **q)
302 {
303 struct wait_queue *tmp;
304 struct task_struct * p;
305
306 if (!q || !(tmp = *q))
307 return;
308 do {
309 if ((p = tmp->task) != NULL) {
310 if (p->state == TASK_INTERRUPTIBLE) {
311 p->state = TASK_RUNNING;
312 if (p->counter > current->counter + 3)
313 need_resched = 1;
314 }
315 }
316 if (!tmp->next) {
317 printk("wait_queue is bad (eip = %p)\n",
318 __builtin_return_address(0));
319 printk(" q = %p\n",q);
320 printk(" *q = %p\n",*q);
321 printk(" tmp = %p\n",tmp);
322 break;
323 }
324 tmp = tmp->next;
325 } while (tmp != *q);
326 }
327
328 void __down(struct semaphore * sem)
329 {
330 struct wait_queue wait = { current, NULL };
331 add_wait_queue(&sem->wait, &wait);
332 current->state = TASK_UNINTERRUPTIBLE;
333 while (sem->count <= 0) {
334 schedule();
335 current->state = TASK_UNINTERRUPTIBLE;
336 }
337 current->state = TASK_RUNNING;
338 remove_wait_queue(&sem->wait, &wait);
339 }
340
341 static inline void __sleep_on(struct wait_queue **p, int state)
342 {
343 unsigned long flags;
344 struct wait_queue wait = { current, NULL };
345
346 if (!p)
347 return;
348 if (current == task[0])
349 panic("task[0] trying to sleep");
350 current->state = state;
351 add_wait_queue(p, &wait);
352 save_flags(flags);
353 sti();
354 schedule();
355 remove_wait_queue(p, &wait);
356 restore_flags(flags);
357 }
358
359 void interruptible_sleep_on(struct wait_queue **p)
360 {
361 __sleep_on(p,TASK_INTERRUPTIBLE);
362 }
363
364 void sleep_on(struct wait_queue **p)
365 {
366 __sleep_on(p,TASK_UNINTERRUPTIBLE);
367 }
368
369
370
371
372
373 static struct timer_list timer_head = { &timer_head, &timer_head, ~0, 0, NULL };
374 #define SLOW_BUT_DEBUGGING_TIMERS 1
375
376 void add_timer(struct timer_list * timer)
377 {
378 unsigned long flags;
379 struct timer_list *p;
380
381 #if SLOW_BUT_DEBUGGING_TIMERS
382 if (timer->next || timer->prev) {
383 printk("add_timer() called with non-zero list from %p\n",
384 __builtin_return_address(0));
385 return;
386 }
387 #endif
388 p = &timer_head;
389 timer->expires += jiffies;
390 save_flags(flags);
391 cli();
392 do {
393 p = p->next;
394 } while (timer->expires > p->expires);
395 timer->next = p;
396 timer->prev = p->prev;
397 p->prev = timer;
398 timer->prev->next = timer;
399 restore_flags(flags);
400 }
401
402 int del_timer(struct timer_list * timer)
403 {
404 unsigned long flags;
405 #if SLOW_BUT_DEBUGGING_TIMERS
406 struct timer_list * p;
407
408 p = &timer_head;
409 save_flags(flags);
410 cli();
411 while ((p = p->next) != &timer_head) {
412 if (p == timer) {
413 timer->next->prev = timer->prev;
414 timer->prev->next = timer->next;
415 timer->next = timer->prev = NULL;
416 restore_flags(flags);
417 timer->expires -= jiffies;
418 return 1;
419 }
420 }
421 if (timer->next || timer->prev)
422 printk("del_timer() called from %p with timer not initialized\n",
423 __builtin_return_address(0));
424 restore_flags(flags);
425 return 0;
426 #else
427 save_flags(flags);
428 cli();
429 if (timer->next) {
430 timer->next->prev = timer->prev;
431 timer->prev->next = timer->next;
432 timer->next = timer->prev = NULL;
433 restore_flags(flags);
434 timer->expires -= jiffies;
435 return 1;
436 }
437 restore_flags(flags);
438 return 0;
439 #endif
440 }
441
442 unsigned long timer_active = 0;
443 struct timer_struct timer_table[32];
444
445
446
447
448
449
450
451 unsigned long avenrun[3] = { 0,0,0 };
452
453
454
455
456 static unsigned long count_active_tasks(void)
457 {
458 struct task_struct **p;
459 unsigned long nr = 0;
460
461 for(p = &LAST_TASK; p > &FIRST_TASK; --p)
462 if (*p && ((*p)->state == TASK_RUNNING ||
463 (*p)->state == TASK_UNINTERRUPTIBLE ||
464 (*p)->state == TASK_SWAPPING))
465 nr += FIXED_1;
466 return nr;
467 }
468
469 static inline void calc_load(void)
470 {
471 unsigned long active_tasks;
472 static int count = LOAD_FREQ;
473
474 if (count-- > 0)
475 return;
476 count = LOAD_FREQ;
477 active_tasks = count_active_tasks();
478 CALC_LOAD(avenrun[0], EXP_1, active_tasks);
479 CALC_LOAD(avenrun[1], EXP_5, active_tasks);
480 CALC_LOAD(avenrun[2], EXP_15, active_tasks);
481 }
482
483
484
485
486
487
488
489
490
491
492
493 static void second_overflow(void)
494 {
495 long ltemp;
496
497 static long last_rtc_update=0;
498 extern int set_rtc_mmss(unsigned long);
499
500
501 time_maxerror = (0x70000000-time_maxerror < time_tolerance) ?
502 0x70000000 : (time_maxerror + time_tolerance);
503
504
505 if (time_offset < 0) {
506 ltemp = (-(time_offset+1) >> (SHIFT_KG + time_constant)) + 1;
507 time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
508 time_offset += (time_adj * HZ) >> (SHIFT_SCALE - SHIFT_UPDATE);
509 time_adj = - time_adj;
510 } else if (time_offset > 0) {
511 ltemp = ((time_offset-1) >> (SHIFT_KG + time_constant)) + 1;
512 time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
513 time_offset -= (time_adj * HZ) >> (SHIFT_SCALE - SHIFT_UPDATE);
514 } else {
515 time_adj = 0;
516 }
517
518 time_adj += (time_freq >> (SHIFT_KF + SHIFT_HZ - SHIFT_SCALE))
519 + FINETUNE;
520
521
522 switch (time_status) {
523 case TIME_INS:
524
525 if (xtime.tv_sec % 86400 == 0) {
526 xtime.tv_sec--;
527 time_status = TIME_OOP;
528 printk("Clock: inserting leap second 23:59:60 GMT\n");
529 }
530 break;
531
532 case TIME_DEL:
533
534 if (xtime.tv_sec % 86400 == 86399) {
535 xtime.tv_sec++;
536 time_status = TIME_OK;
537 printk("Clock: deleting leap second 23:59:59 GMT\n");
538 }
539 break;
540
541 case TIME_OOP:
542 time_status = TIME_OK;
543 break;
544 }
545 if (xtime.tv_sec > last_rtc_update + 660)
546 if (set_rtc_mmss(xtime.tv_sec) == 0)
547 last_rtc_update = xtime.tv_sec;
548 else
549 last_rtc_update = xtime.tv_sec - 600;
550 }
551
552
553
554
555 static void timer_bh(void * unused)
556 {
557 unsigned long mask;
558 struct timer_struct *tp;
559 struct timer_list * timer;
560
561 cli();
562 while ((timer = timer_head.next) != &timer_head && timer->expires < jiffies) {
563 void (*fn)(unsigned long) = timer->function;
564 unsigned long data = timer->data;
565 timer->next->prev = timer->prev;
566 timer->prev->next = timer->next;
567 timer->next = timer->prev = NULL;
568 sti();
569 fn(data);
570 cli();
571 }
572 sti();
573
574 for (mask = 1, tp = timer_table+0 ; mask ; tp++,mask += mask) {
575 if (mask > timer_active)
576 break;
577 if (!(mask & timer_active))
578 continue;
579 if (tp->expires > jiffies)
580 continue;
581 timer_active &= ~mask;
582 tp->fn();
583 sti();
584 }
585 }
586
587 void tqueue_bh(void * unused)
588 {
589 run_task_queue(&tq_timer);
590 }
591
592 void immediate_bh(void * unused)
593 {
594 run_task_queue(&tq_immediate);
595 }
596
597
598
599
600
601
602
603 static void do_timer(struct pt_regs * regs)
604 {
605 unsigned long mask;
606 struct timer_struct *tp;
607
608 long ltemp, psecs;
609
610
611
612
613 time_phase += time_adj;
614 if (time_phase < -FINEUSEC) {
615 ltemp = -time_phase >> SHIFT_SCALE;
616 time_phase += ltemp << SHIFT_SCALE;
617 xtime.tv_usec += tick + time_adjust_step - ltemp;
618 }
619 else if (time_phase > FINEUSEC) {
620 ltemp = time_phase >> SHIFT_SCALE;
621 time_phase -= ltemp << SHIFT_SCALE;
622 xtime.tv_usec += tick + time_adjust_step + ltemp;
623 } else
624 xtime.tv_usec += tick + time_adjust_step;
625
626 if (time_adjust)
627 {
628
629
630
631
632
633
634
635
636
637 if (time_adjust > tickadj)
638 time_adjust_step = tickadj;
639 else if (time_adjust < -tickadj)
640 time_adjust_step = -tickadj;
641 else
642 time_adjust_step = time_adjust;
643
644
645 time_adjust -= time_adjust_step;
646 }
647 else
648 time_adjust_step = 0;
649
650 if (xtime.tv_usec >= 1000000) {
651 xtime.tv_usec -= 1000000;
652 xtime.tv_sec++;
653 second_overflow();
654 }
655
656 jiffies++;
657 calc_load();
658 if ((VM_MASK & regs->eflags) || (3 & regs->cs)) {
659 current->utime++;
660 if (current != task[0]) {
661 if (current->priority < 15)
662 kstat.cpu_nice++;
663 else
664 kstat.cpu_user++;
665 }
666
667 if (current->it_virt_value && !(--current->it_virt_value)) {
668 current->it_virt_value = current->it_virt_incr;
669 send_sig(SIGVTALRM,current,1);
670 }
671 } else {
672 current->stime++;
673 if(current != task[0])
674 kstat.cpu_system++;
675 #ifdef CONFIG_PROFILE
676 if (prof_buffer && current != task[0]) {
677 unsigned long eip = regs->eip;
678 eip >>= 2;
679 if (eip < prof_len)
680 prof_buffer[eip]++;
681 }
682 #endif
683 }
684
685
686
687 if ((current->rlim[RLIMIT_CPU].rlim_max != RLIM_INFINITY) &&
688 (((current->stime + current->utime) / HZ) >= current->rlim[RLIMIT_CPU].rlim_max))
689 send_sig(SIGKILL, current, 1);
690 if ((current->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) &&
691 (((current->stime + current->utime) % HZ) == 0)) {
692 psecs = (current->stime + current->utime) / HZ;
693
694 if (psecs == current->rlim[RLIMIT_CPU].rlim_cur)
695 send_sig(SIGXCPU, current, 1);
696
697 else if ((psecs > current->rlim[RLIMIT_CPU].rlim_cur) &&
698 ((psecs - current->rlim[RLIMIT_CPU].rlim_cur) % 5) == 0)
699 send_sig(SIGXCPU, current, 1);
700 }
701
702 if (current != task[0] && 0 > --current->counter) {
703 current->counter = 0;
704 need_resched = 1;
705 }
706
707 if (current->it_prof_value && !(--current->it_prof_value)) {
708 current->it_prof_value = current->it_prof_incr;
709 send_sig(SIGPROF,current,1);
710 }
711 for (mask = 1, tp = timer_table+0 ; mask ; tp++,mask += mask) {
712 if (mask > timer_active)
713 break;
714 if (!(mask & timer_active))
715 continue;
716 if (tp->expires > jiffies)
717 continue;
718 mark_bh(TIMER_BH);
719 }
720 cli();
721 itimer_ticks++;
722 if (itimer_ticks > itimer_next)
723 need_resched = 1;
724 if (timer_head.next->expires < jiffies)
725 mark_bh(TIMER_BH);
726 if (tq_timer != &tq_last)
727 mark_bh(TQUEUE_BH);
728 sti();
729 }
730
731 asmlinkage int sys_alarm(long seconds)
732 {
733 struct itimerval it_new, it_old;
734
735 it_new.it_interval.tv_sec = it_new.it_interval.tv_usec = 0;
736 it_new.it_value.tv_sec = seconds;
737 it_new.it_value.tv_usec = 0;
738 _setitimer(ITIMER_REAL, &it_new, &it_old);
739 return(it_old.it_value.tv_sec + (it_old.it_value.tv_usec / 1000000));
740 }
741
742 asmlinkage int sys_getpid(void)
743 {
744 return current->pid;
745 }
746
747 asmlinkage int sys_getppid(void)
748 {
749 return current->p_opptr->pid;
750 }
751
752 asmlinkage int sys_getuid(void)
753 {
754 return current->uid;
755 }
756
757 asmlinkage int sys_geteuid(void)
758 {
759 return current->euid;
760 }
761
762 asmlinkage int sys_getgid(void)
763 {
764 return current->gid;
765 }
766
767 asmlinkage int sys_getegid(void)
768 {
769 return current->egid;
770 }
771
772 asmlinkage int sys_nice(long increment)
773 {
774 int newprio;
775
776 if (increment < 0 && !suser())
777 return -EPERM;
778 newprio = current->priority - increment;
779 if (newprio < 1)
780 newprio = 1;
781 if (newprio > 35)
782 newprio = 35;
783 current->priority = newprio;
784 return 0;
785 }
786
787 static void show_task(int nr,struct task_struct * p)
788 {
789 unsigned long free;
790 static char * stat_nam[] = { "R", "S", "D", "Z", "T", "W" };
791
792 printk("%-8s %3d ", p->comm, (p == current) ? -nr : nr);
793 if (((unsigned) p->state) < sizeof(stat_nam)/sizeof(char *))
794 printk(stat_nam[p->state]);
795 else
796 printk(" ");
797 if (p == current)
798 printk(" current ");
799 else
800 printk(" %08lX ", ((unsigned long *)p->tss.esp)[3]);
801 for (free = 1; free < 1024 ; free++) {
802 if (((unsigned long *)p->kernel_stack_page)[free])
803 break;
804 }
805 printk("%5lu %5d %6d ", free << 2, p->pid, p->p_pptr->pid);
806 if (p->p_cptr)
807 printk("%5d ", p->p_cptr->pid);
808 else
809 printk(" ");
810 if (p->p_ysptr)
811 printk("%7d", p->p_ysptr->pid);
812 else
813 printk(" ");
814 if (p->p_osptr)
815 printk(" %5d\n", p->p_osptr->pid);
816 else
817 printk("\n");
818 }
819
820 void show_state(void)
821 {
822 int i;
823
824 printk(" free sibling\n");
825 printk(" task PC stack pid father child younger older\n");
826 for (i=0 ; i<NR_TASKS ; i++)
827 if (task[i])
828 show_task(i,task[i]);
829 }
830
831 void sched_init(void)
832 {
833 int i;
834 struct desc_struct * p;
835
836 bh_base[TIMER_BH].routine = timer_bh;
837 bh_base[TQUEUE_BH].routine = tqueue_bh;
838 bh_base[IMMEDIATE_BH].routine = immediate_bh;
839 if (sizeof(struct sigaction) != 16)
840 panic("Struct sigaction MUST be 16 bytes");
841 set_tss_desc(gdt+FIRST_TSS_ENTRY,&init_task.tss);
842 set_ldt_desc(gdt+FIRST_LDT_ENTRY,&default_ldt,1);
843 set_system_gate(0x80,&system_call);
844 p = gdt+2+FIRST_TSS_ENTRY;
845 for(i=1 ; i<NR_TASKS ; i++) {
846 task[i] = NULL;
847 p->a=p->b=0;
848 p++;
849 p->a=p->b=0;
850 p++;
851 }
852
853 __asm__("pushfl ; andl $0xffffbfff,(%esp) ; popfl");
854 load_TR(0);
855 load_ldt(0);
856 outb_p(0x34,0x43);
857 outb_p(LATCH & 0xff , 0x40);
858 outb(LATCH >> 8 , 0x40);
859 if (request_irq(TIMER_IRQ,(void (*)(int)) do_timer, 0, "timer") != 0)
860 panic("Could not allocate timer IRQ!");
861 }