This source file includes following definitions.
- math_state_restore
- math_emulate
- schedule
- sys_pause
- wake_up
- wake_up_interruptible
- __down
- __sleep_on
- interruptible_sleep_on
- sleep_on
- add_timer
- del_timer
- count_active_tasks
- calc_load
- second_overflow
- timer_bh
- tqueue_bh
- immediate_bh
- do_timer
- sys_alarm
- sys_getpid
- sys_getppid
- sys_getuid
- sys_geteuid
- sys_getgid
- sys_getegid
- sys_nice
- show_task
- show_state
- sched_init
1
2
3
4
5
6
7
8
9
10
11
12
13
14 #include <linux/config.h>
15 #include <linux/signal.h>
16 #include <linux/sched.h>
17 #include <linux/timer.h>
18 #include <linux/kernel.h>
19 #include <linux/kernel_stat.h>
20 #include <linux/fdreg.h>
21 #include <linux/errno.h>
22 #include <linux/time.h>
23 #include <linux/ptrace.h>
24 #include <linux/segment.h>
25 #include <linux/delay.h>
26 #include <linux/interrupt.h>
27 #include <linux/tqueue.h>
28 #include <linux/resource.h>
29
30 #include <asm/system.h>
31 #include <asm/io.h>
32 #include <asm/segment.h>
33
34 #define TIMER_IRQ 0
35
36 #include <linux/timex.h>
37
38
39
40
41 long tick = 1000000 / HZ;
42 volatile struct timeval xtime;
43 int tickadj = 500/HZ;
44
45 DECLARE_TASK_QUEUE(tq_timer);
46 DECLARE_TASK_QUEUE(tq_immediate);
47
48
49
50
51 int time_status = TIME_BAD;
52 long time_offset = 0;
53 long time_constant = 0;
54 long time_tolerance = MAXFREQ;
55 long time_precision = 1;
56 long time_maxerror = 0x70000000;
57 long time_esterror = 0x70000000;
58 long time_phase = 0;
59 long time_freq = 0;
60 long time_adj = 0;
61 long time_reftime = 0;
62
63 long time_adjust = 0;
64 long time_adjust_step = 0;
65
66 int need_resched = 0;
67 unsigned long event = 0;
68
69
70
71
72 int hard_math = 0;
73 int x86 = 0;
74 int ignore_irq13 = 0;
75 int wp_works_ok = 0;
76 int hlt_works_ok = 1;
77
78
79
80
81 int EISA_bus = 0;
82
83 extern int _setitimer(int, struct itimerval *, struct itimerval *);
84 unsigned long * prof_buffer = NULL;
85 unsigned long prof_len = 0;
86
87 #define _S(nr) (1<<((nr)-1))
88
89 extern void mem_use(void);
90
91 extern int timer_interrupt(void);
92 asmlinkage int system_call(void);
93
94 static unsigned long init_kernel_stack[1024] = { STACK_MAGIC, };
95 static struct vm_area_struct init_mmap = INIT_MMAP;
96 struct task_struct init_task = INIT_TASK;
97
98 unsigned long volatile jiffies=0;
99
100 struct task_struct *current = &init_task;
101 struct task_struct *last_task_used_math = NULL;
102
103 struct task_struct * task[NR_TASKS] = {&init_task, };
104
105 long user_stack [ PAGE_SIZE>>2 ] = { STACK_MAGIC, };
106
107 struct {
108 long * a;
109 short b;
110 } stack_start = { & user_stack [PAGE_SIZE>>2] , KERNEL_DS };
111
112 struct kernel_stat kstat = { 0 };
113
114
115
116
117
118
119
120
121 asmlinkage void math_state_restore(void)
122 {
123 __asm__ __volatile__("clts");
124 if (last_task_used_math == current)
125 return;
126 timer_table[COPRO_TIMER].expires = jiffies+50;
127 timer_active |= 1<<COPRO_TIMER;
128 if (last_task_used_math)
129 __asm__("fnsave %0":"=m" (last_task_used_math->tss.i387));
130 else
131 __asm__("fnclex");
132 last_task_used_math = current;
133 if (current->used_math) {
134 __asm__("frstor %0": :"m" (current->tss.i387));
135 } else {
136 __asm__("fninit");
137 current->used_math=1;
138 }
139 timer_active &= ~(1<<COPRO_TIMER);
140 }
141
142 #ifndef CONFIG_MATH_EMULATION
143
144 asmlinkage void math_emulate(long arg)
145 {
146 printk("math-emulation not enabled and no coprocessor found.\n");
147 printk("killing %s.\n",current->comm);
148 send_sig(SIGFPE,current,1);
149 schedule();
150 }
151
152 #endif
153
154 unsigned long itimer_ticks = 0;
155 unsigned long itimer_next = ~0;
156
157
158
159
160
161
162
163
164
165
166
167
168
169 asmlinkage void schedule(void)
170 {
171 int c;
172 struct task_struct * p;
173 struct task_struct * next;
174 unsigned long ticks;
175
176
177
178 if (intr_count) {
179 printk("Aiee: scheduling in interrupt\n");
180 intr_count = 0;
181 }
182 cli();
183 ticks = itimer_ticks;
184 itimer_ticks = 0;
185 itimer_next = ~0;
186 sti();
187 need_resched = 0;
188 p = &init_task;
189 for (;;) {
190 if ((p = p->next_task) == &init_task)
191 goto confuse_gcc1;
192 if (ticks && p->it_real_value) {
193 if (p->it_real_value <= ticks) {
194 send_sig(SIGALRM, p, 1);
195 if (!p->it_real_incr) {
196 p->it_real_value = 0;
197 goto end_itimer;
198 }
199 do {
200 p->it_real_value += p->it_real_incr;
201 } while (p->it_real_value <= ticks);
202 }
203 p->it_real_value -= ticks;
204 if (p->it_real_value < itimer_next)
205 itimer_next = p->it_real_value;
206 }
207 end_itimer:
208 if (p->state != TASK_INTERRUPTIBLE)
209 continue;
210 if (p->signal & ~p->blocked) {
211 p->state = TASK_RUNNING;
212 continue;
213 }
214 if (p->timeout && p->timeout <= jiffies) {
215 p->timeout = 0;
216 p->state = TASK_RUNNING;
217 }
218 }
219 confuse_gcc1:
220
221
222 #if 0
223
224
225
226
227 if (TASK_UNINTERRUPTIBLE >= (unsigned) current->state &&
228 current->counter < current->priority*2) {
229 ++current->counter;
230 }
231 #endif
232 c = -1000;
233 next = p = &init_task;
234 for (;;) {
235 if ((p = p->next_task) == &init_task)
236 goto confuse_gcc2;
237 if (p->state == TASK_RUNNING && p->counter > c)
238 c = p->counter, next = p;
239 }
240 confuse_gcc2:
241 if (!c) {
242 for_each_task(p)
243 p->counter = (p->counter >> 1) + p->priority;
244 }
245 if (current == next)
246 return;
247 kstat.context_swtch++;
248 switch_to(next);
249
250 if(current->debugreg[7]){
251 loaddebug(0);
252 loaddebug(1);
253 loaddebug(2);
254 loaddebug(3);
255 loaddebug(6);
256 };
257 }
258
259 asmlinkage int sys_pause(void)
260 {
261 current->state = TASK_INTERRUPTIBLE;
262 schedule();
263 return -ERESTARTNOHAND;
264 }
265
266
267
268
269
270
271
272
273
274 void wake_up(struct wait_queue **q)
275 {
276 struct wait_queue *tmp;
277 struct task_struct * p;
278
279 if (!q || !(tmp = *q))
280 return;
281 do {
282 if ((p = tmp->task) != NULL) {
283 if ((p->state == TASK_UNINTERRUPTIBLE) ||
284 (p->state == TASK_INTERRUPTIBLE)) {
285 p->state = TASK_RUNNING;
286 if (p->counter > current->counter + 3)
287 need_resched = 1;
288 }
289 }
290 if (!tmp->next) {
291 printk("wait_queue is bad (eip = %p)\n",
292 __builtin_return_address(0));
293 printk(" q = %p\n",q);
294 printk(" *q = %p\n",*q);
295 printk(" tmp = %p\n",tmp);
296 break;
297 }
298 tmp = tmp->next;
299 } while (tmp != *q);
300 }
301
302 void wake_up_interruptible(struct wait_queue **q)
303 {
304 struct wait_queue *tmp;
305 struct task_struct * p;
306
307 if (!q || !(tmp = *q))
308 return;
309 do {
310 if ((p = tmp->task) != NULL) {
311 if (p->state == TASK_INTERRUPTIBLE) {
312 p->state = TASK_RUNNING;
313 if (p->counter > current->counter + 3)
314 need_resched = 1;
315 }
316 }
317 if (!tmp->next) {
318 printk("wait_queue is bad (eip = %p)\n",
319 __builtin_return_address(0));
320 printk(" q = %p\n",q);
321 printk(" *q = %p\n",*q);
322 printk(" tmp = %p\n",tmp);
323 break;
324 }
325 tmp = tmp->next;
326 } while (tmp != *q);
327 }
328
329 void __down(struct semaphore * sem)
330 {
331 struct wait_queue wait = { current, NULL };
332 add_wait_queue(&sem->wait, &wait);
333 current->state = TASK_UNINTERRUPTIBLE;
334 while (sem->count <= 0) {
335 schedule();
336 current->state = TASK_UNINTERRUPTIBLE;
337 }
338 current->state = TASK_RUNNING;
339 remove_wait_queue(&sem->wait, &wait);
340 }
341
342 static inline void __sleep_on(struct wait_queue **p, int state)
343 {
344 unsigned long flags;
345 struct wait_queue wait = { current, NULL };
346
347 if (!p)
348 return;
349 if (current == task[0])
350 panic("task[0] trying to sleep");
351 current->state = state;
352 add_wait_queue(p, &wait);
353 save_flags(flags);
354 sti();
355 schedule();
356 remove_wait_queue(p, &wait);
357 restore_flags(flags);
358 }
359
360 void interruptible_sleep_on(struct wait_queue **p)
361 {
362 __sleep_on(p,TASK_INTERRUPTIBLE);
363 }
364
365 void sleep_on(struct wait_queue **p)
366 {
367 __sleep_on(p,TASK_UNINTERRUPTIBLE);
368 }
369
370
371
372
373
374 static struct timer_list timer_head = { &timer_head, &timer_head, ~0, 0, NULL };
375 #define SLOW_BUT_DEBUGGING_TIMERS 1
376
377 void add_timer(struct timer_list * timer)
378 {
379 unsigned long flags;
380 struct timer_list *p;
381
382 #if SLOW_BUT_DEBUGGING_TIMERS
383 if (timer->next || timer->prev) {
384 printk("add_timer() called with non-zero list from %p\n",
385 __builtin_return_address(0));
386 return;
387 }
388 #endif
389 p = &timer_head;
390 timer->expires += jiffies;
391 save_flags(flags);
392 cli();
393 do {
394 p = p->next;
395 } while (timer->expires > p->expires);
396 timer->next = p;
397 timer->prev = p->prev;
398 p->prev = timer;
399 timer->prev->next = timer;
400 restore_flags(flags);
401 }
402
403 int del_timer(struct timer_list * timer)
404 {
405 unsigned long flags;
406 #if SLOW_BUT_DEBUGGING_TIMERS
407 struct timer_list * p;
408
409 p = &timer_head;
410 save_flags(flags);
411 cli();
412 while ((p = p->next) != &timer_head) {
413 if (p == timer) {
414 timer->next->prev = timer->prev;
415 timer->prev->next = timer->next;
416 timer->next = timer->prev = NULL;
417 restore_flags(flags);
418 timer->expires -= jiffies;
419 return 1;
420 }
421 }
422 if (timer->next || timer->prev)
423 printk("del_timer() called from %p with timer not initialized\n",
424 __builtin_return_address(0));
425 restore_flags(flags);
426 return 0;
427 #else
428 save_flags(flags);
429 cli();
430 if (timer->next) {
431 timer->next->prev = timer->prev;
432 timer->prev->next = timer->next;
433 timer->next = timer->prev = NULL;
434 restore_flags(flags);
435 timer->expires -= jiffies;
436 return 1;
437 }
438 restore_flags(flags);
439 return 0;
440 #endif
441 }
442
443 unsigned long timer_active = 0;
444 struct timer_struct timer_table[32];
445
446
447
448
449
450
451
452 unsigned long avenrun[3] = { 0,0,0 };
453
454
455
456
457 static unsigned long count_active_tasks(void)
458 {
459 struct task_struct **p;
460 unsigned long nr = 0;
461
462 for(p = &LAST_TASK; p > &FIRST_TASK; --p)
463 if (*p && ((*p)->state == TASK_RUNNING ||
464 (*p)->state == TASK_UNINTERRUPTIBLE ||
465 (*p)->state == TASK_SWAPPING))
466 nr += FIXED_1;
467 return nr;
468 }
469
470 static inline void calc_load(void)
471 {
472 unsigned long active_tasks;
473 static int count = LOAD_FREQ;
474
475 if (count-- > 0)
476 return;
477 count = LOAD_FREQ;
478 active_tasks = count_active_tasks();
479 CALC_LOAD(avenrun[0], EXP_1, active_tasks);
480 CALC_LOAD(avenrun[1], EXP_5, active_tasks);
481 CALC_LOAD(avenrun[2], EXP_15, active_tasks);
482 }
483
484
485
486
487
488
489
490
491
492
493
494 static void second_overflow(void)
495 {
496 long ltemp;
497
498 static long last_rtc_update=0;
499 extern int set_rtc_mmss(unsigned long);
500
501
502 time_maxerror = (0x70000000-time_maxerror < time_tolerance) ?
503 0x70000000 : (time_maxerror + time_tolerance);
504
505
506 if (time_offset < 0) {
507 ltemp = (-(time_offset+1) >> (SHIFT_KG + time_constant)) + 1;
508 time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
509 time_offset += (time_adj * HZ) >> (SHIFT_SCALE - SHIFT_UPDATE);
510 time_adj = - time_adj;
511 } else if (time_offset > 0) {
512 ltemp = ((time_offset-1) >> (SHIFT_KG + time_constant)) + 1;
513 time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
514 time_offset -= (time_adj * HZ) >> (SHIFT_SCALE - SHIFT_UPDATE);
515 } else {
516 time_adj = 0;
517 }
518
519 time_adj += (time_freq >> (SHIFT_KF + SHIFT_HZ - SHIFT_SCALE))
520 + FINETUNE;
521
522
523 switch (time_status) {
524 case TIME_INS:
525
526 if (xtime.tv_sec % 86400 == 0) {
527 xtime.tv_sec--;
528 time_status = TIME_OOP;
529 printk("Clock: inserting leap second 23:59:60 GMT\n");
530 }
531 break;
532
533 case TIME_DEL:
534
535 if (xtime.tv_sec % 86400 == 86399) {
536 xtime.tv_sec++;
537 time_status = TIME_OK;
538 printk("Clock: deleting leap second 23:59:59 GMT\n");
539 }
540 break;
541
542 case TIME_OOP:
543 time_status = TIME_OK;
544 break;
545 }
546 if (xtime.tv_sec > last_rtc_update + 660)
547 if (set_rtc_mmss(xtime.tv_sec) == 0)
548 last_rtc_update = xtime.tv_sec;
549 else
550 last_rtc_update = xtime.tv_sec - 600;
551 }
552
553
554
555
556 static void timer_bh(void * unused)
557 {
558 unsigned long mask;
559 struct timer_struct *tp;
560 struct timer_list * timer;
561
562 cli();
563 while ((timer = timer_head.next) != &timer_head && timer->expires < jiffies) {
564 void (*fn)(unsigned long) = timer->function;
565 unsigned long data = timer->data;
566 timer->next->prev = timer->prev;
567 timer->prev->next = timer->next;
568 timer->next = timer->prev = NULL;
569 sti();
570 fn(data);
571 cli();
572 }
573 sti();
574
575 for (mask = 1, tp = timer_table+0 ; mask ; tp++,mask += mask) {
576 if (mask > timer_active)
577 break;
578 if (!(mask & timer_active))
579 continue;
580 if (tp->expires > jiffies)
581 continue;
582 timer_active &= ~mask;
583 tp->fn();
584 sti();
585 }
586 }
587
588 void tqueue_bh(void * unused)
589 {
590 run_task_queue(&tq_timer);
591 }
592
593 void immediate_bh(void * unused)
594 {
595 run_task_queue(&tq_immediate);
596 }
597
598
599
600
601
602
603
604 static void do_timer(struct pt_regs * regs)
605 {
606 unsigned long mask;
607 struct timer_struct *tp;
608
609 long ltemp, psecs;
610
611
612
613
614 time_phase += time_adj;
615 if (time_phase < -FINEUSEC) {
616 ltemp = -time_phase >> SHIFT_SCALE;
617 time_phase += ltemp << SHIFT_SCALE;
618 xtime.tv_usec += tick + time_adjust_step - ltemp;
619 }
620 else if (time_phase > FINEUSEC) {
621 ltemp = time_phase >> SHIFT_SCALE;
622 time_phase -= ltemp << SHIFT_SCALE;
623 xtime.tv_usec += tick + time_adjust_step + ltemp;
624 } else
625 xtime.tv_usec += tick + time_adjust_step;
626
627 if (time_adjust)
628 {
629
630
631
632
633
634
635
636
637
638 if (time_adjust > tickadj)
639 time_adjust_step = tickadj;
640 else if (time_adjust < -tickadj)
641 time_adjust_step = -tickadj;
642 else
643 time_adjust_step = time_adjust;
644
645
646 time_adjust -= time_adjust_step;
647 }
648 else
649 time_adjust_step = 0;
650
651 if (xtime.tv_usec >= 1000000) {
652 xtime.tv_usec -= 1000000;
653 xtime.tv_sec++;
654 second_overflow();
655 }
656
657 jiffies++;
658 calc_load();
659 if ((VM_MASK & regs->eflags) || (3 & regs->cs)) {
660 current->utime++;
661 if (current != task[0]) {
662 if (current->priority < 15)
663 kstat.cpu_nice++;
664 else
665 kstat.cpu_user++;
666 }
667
668 if (current->it_virt_value && !(--current->it_virt_value)) {
669 current->it_virt_value = current->it_virt_incr;
670 send_sig(SIGVTALRM,current,1);
671 }
672 } else {
673 current->stime++;
674 if(current != task[0])
675 kstat.cpu_system++;
676 #ifdef CONFIG_PROFILE
677 if (prof_buffer && current != task[0]) {
678 unsigned long eip = regs->eip;
679 eip >>= 2;
680 if (eip < prof_len)
681 prof_buffer[eip]++;
682 }
683 #endif
684 }
685
686
687
688 if ((current->rlim[RLIMIT_CPU].rlim_max != RLIM_INFINITY) &&
689 (((current->stime + current->utime) / HZ) >= current->rlim[RLIMIT_CPU].rlim_max))
690 send_sig(SIGKILL, current, 1);
691 if ((current->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) &&
692 (((current->stime + current->utime) % HZ) == 0)) {
693 psecs = (current->stime + current->utime) / HZ;
694
695 if (psecs == current->rlim[RLIMIT_CPU].rlim_cur)
696 send_sig(SIGXCPU, current, 1);
697
698 else if ((psecs > current->rlim[RLIMIT_CPU].rlim_cur) &&
699 ((psecs - current->rlim[RLIMIT_CPU].rlim_cur) % 5) == 0)
700 send_sig(SIGXCPU, current, 1);
701 }
702
703 if (current != task[0] && 0 > --current->counter) {
704 current->counter = 0;
705 need_resched = 1;
706 }
707
708 if (current->it_prof_value && !(--current->it_prof_value)) {
709 current->it_prof_value = current->it_prof_incr;
710 send_sig(SIGPROF,current,1);
711 }
712 for (mask = 1, tp = timer_table+0 ; mask ; tp++,mask += mask) {
713 if (mask > timer_active)
714 break;
715 if (!(mask & timer_active))
716 continue;
717 if (tp->expires > jiffies)
718 continue;
719 mark_bh(TIMER_BH);
720 }
721 cli();
722 itimer_ticks++;
723 if (itimer_ticks > itimer_next)
724 need_resched = 1;
725 if (timer_head.next->expires < jiffies)
726 mark_bh(TIMER_BH);
727 if (tq_timer != &tq_last)
728 mark_bh(TQUEUE_BH);
729 sti();
730 }
731
732 asmlinkage int sys_alarm(long seconds)
733 {
734 struct itimerval it_new, it_old;
735
736 it_new.it_interval.tv_sec = it_new.it_interval.tv_usec = 0;
737 it_new.it_value.tv_sec = seconds;
738 it_new.it_value.tv_usec = 0;
739 _setitimer(ITIMER_REAL, &it_new, &it_old);
740 return(it_old.it_value.tv_sec + (it_old.it_value.tv_usec / 1000000));
741 }
742
743 asmlinkage int sys_getpid(void)
744 {
745 return current->pid;
746 }
747
748 asmlinkage int sys_getppid(void)
749 {
750 return current->p_opptr->pid;
751 }
752
753 asmlinkage int sys_getuid(void)
754 {
755 return current->uid;
756 }
757
758 asmlinkage int sys_geteuid(void)
759 {
760 return current->euid;
761 }
762
763 asmlinkage int sys_getgid(void)
764 {
765 return current->gid;
766 }
767
768 asmlinkage int sys_getegid(void)
769 {
770 return current->egid;
771 }
772
773 asmlinkage int sys_nice(long increment)
774 {
775 int newprio;
776
777 if (increment < 0 && !suser())
778 return -EPERM;
779 newprio = current->priority - increment;
780 if (newprio < 1)
781 newprio = 1;
782 if (newprio > 35)
783 newprio = 35;
784 current->priority = newprio;
785 return 0;
786 }
787
788 static void show_task(int nr,struct task_struct * p)
789 {
790 unsigned long free;
791 static char * stat_nam[] = { "R", "S", "D", "Z", "T", "W" };
792
793 printk("%-8s %3d ", p->comm, (p == current) ? -nr : nr);
794 if (((unsigned) p->state) < sizeof(stat_nam)/sizeof(char *))
795 printk(stat_nam[p->state]);
796 else
797 printk(" ");
798 if (p == current)
799 printk(" current ");
800 else
801 printk(" %08lX ", ((unsigned long *)p->tss.esp)[3]);
802 for (free = 1; free < 1024 ; free++) {
803 if (((unsigned long *)p->kernel_stack_page)[free])
804 break;
805 }
806 printk("%5lu %5d %6d ", free << 2, p->pid, p->p_pptr->pid);
807 if (p->p_cptr)
808 printk("%5d ", p->p_cptr->pid);
809 else
810 printk(" ");
811 if (p->p_ysptr)
812 printk("%7d", p->p_ysptr->pid);
813 else
814 printk(" ");
815 if (p->p_osptr)
816 printk(" %5d\n", p->p_osptr->pid);
817 else
818 printk("\n");
819 }
820
821 void show_state(void)
822 {
823 int i;
824
825 printk(" free sibling\n");
826 printk(" task PC stack pid father child younger older\n");
827 for (i=0 ; i<NR_TASKS ; i++)
828 if (task[i])
829 show_task(i,task[i]);
830 }
831
832 void sched_init(void)
833 {
834 int i;
835 struct desc_struct * p;
836
837 bh_base[TIMER_BH].routine = timer_bh;
838 bh_base[TQUEUE_BH].routine = tqueue_bh;
839 bh_base[IMMEDIATE_BH].routine = immediate_bh;
840 if (sizeof(struct sigaction) != 16)
841 panic("Struct sigaction MUST be 16 bytes");
842 set_tss_desc(gdt+FIRST_TSS_ENTRY,&init_task.tss);
843 set_ldt_desc(gdt+FIRST_LDT_ENTRY,&default_ldt,1);
844 set_system_gate(0x80,&system_call);
845 p = gdt+2+FIRST_TSS_ENTRY;
846 for(i=1 ; i<NR_TASKS ; i++) {
847 task[i] = NULL;
848 p->a=p->b=0;
849 p++;
850 p->a=p->b=0;
851 p++;
852 }
853
854 __asm__("pushfl ; andl $0xffffbfff,(%esp) ; popfl");
855 load_TR(0);
856 load_ldt(0);
857 outb_p(0x34,0x43);
858 outb_p(LATCH & 0xff , 0x40);
859 outb(LATCH >> 8 , 0x40);
860 if (request_irq(TIMER_IRQ,(void (*)(int)) do_timer, 0, "timer") != 0)
861 panic("Could not allocate timer IRQ!");
862 }