This source file includes following definitions.
- schedule
- sys_pause
- wake_up
- wake_up_interruptible
- __down
- __sleep_on
- interruptible_sleep_on
- sleep_on
- add_timer
- del_timer
- count_active_tasks
- calc_load
- second_overflow
- timer_bh
- tqueue_bh
- immediate_bh
- do_timer
- sys_alarm
- sys_getpid
- sys_getppid
- sys_getuid
- sys_geteuid
- sys_getgid
- sys_getegid
- sys_nice
- show_task
- show_state
- sched_init
1
2
3
4
5
6
7
8
9
10
11
12
13
14 #include <linux/config.h>
15 #include <linux/signal.h>
16 #include <linux/sched.h>
17 #include <linux/timer.h>
18 #include <linux/kernel.h>
19 #include <linux/kernel_stat.h>
20 #include <linux/fdreg.h>
21 #include <linux/errno.h>
22 #include <linux/time.h>
23 #include <linux/ptrace.h>
24 #include <linux/delay.h>
25 #include <linux/interrupt.h>
26 #include <linux/tqueue.h>
27 #include <linux/resource.h>
28 #include <linux/mm.h>
29
30 #include <asm/system.h>
31 #include <asm/io.h>
32 #include <asm/segment.h>
33 #include <asm/pgtable.h>
34
35 #define TIMER_IRQ 0
36
37 #include <linux/timex.h>
38
39
40
41
42 long tick = 1000000 / HZ;
43 volatile struct timeval xtime;
44 int tickadj = 500/HZ;
45
46 DECLARE_TASK_QUEUE(tq_timer);
47 DECLARE_TASK_QUEUE(tq_immediate);
48 DECLARE_TASK_QUEUE(tq_scheduler);
49
50
51
52
53 int time_status = TIME_BAD;
54 long time_offset = 0;
55 long time_constant = 0;
56 long time_tolerance = MAXFREQ;
57 long time_precision = 1;
58 long time_maxerror = 0x70000000;
59 long time_esterror = 0x70000000;
60 long time_phase = 0;
61 long time_freq = 0;
62 long time_adj = 0;
63 long time_reftime = 0;
64
65 long time_adjust = 0;
66 long time_adjust_step = 0;
67
68 int need_resched = 0;
69 unsigned long event = 0;
70
71 extern int _setitimer(int, struct itimerval *, struct itimerval *);
72 unsigned long * prof_buffer = NULL;
73 unsigned long prof_len = 0;
74
75 #define _S(nr) (1<<((nr)-1))
76
77 extern void mem_use(void);
78
79 extern int timer_interrupt(void);
80
81 static unsigned long init_kernel_stack[1024] = { STACK_MAGIC, };
82 unsigned long init_user_stack[1024] = { STACK_MAGIC, };
83 static struct vm_area_struct init_mmap = INIT_MMAP;
84 struct task_struct init_task = INIT_TASK;
85
86 unsigned long volatile jiffies=0;
87
88 struct task_struct *current = &init_task;
89 struct task_struct *last_task_used_math = NULL;
90
91 struct task_struct * task[NR_TASKS] = {&init_task, };
92
93 struct kernel_stat kstat = { 0 };
94
95 unsigned long itimer_ticks = 0;
96 unsigned long itimer_next = ~0;
97
98
99
100
101
102
103
104
105
106
107
108
109
110 asmlinkage void schedule(void)
111 {
112 int c;
113 struct task_struct * p;
114 struct task_struct * next;
115 unsigned long ticks;
116
117
118
119 if (intr_count) {
120 printk("Aiee: scheduling in interrupt\n");
121 intr_count = 0;
122 }
123 run_task_queue(&tq_scheduler);
124 cli();
125 ticks = itimer_ticks;
126 itimer_ticks = 0;
127 itimer_next = ~0;
128 sti();
129 need_resched = 0;
130 nr_running = 0;
131 p = &init_task;
132 for (;;) {
133 if ((p = p->next_task) == &init_task)
134 goto confuse_gcc1;
135 if (ticks && p->it_real_value) {
136 if (p->it_real_value <= ticks) {
137 send_sig(SIGALRM, p, 1);
138 if (!p->it_real_incr) {
139 p->it_real_value = 0;
140 goto end_itimer;
141 }
142 do {
143 p->it_real_value += p->it_real_incr;
144 } while (p->it_real_value <= ticks);
145 }
146 p->it_real_value -= ticks;
147 if (p->it_real_value < itimer_next)
148 itimer_next = p->it_real_value;
149 }
150 end_itimer:
151 if (p->state != TASK_INTERRUPTIBLE)
152 continue;
153 if (p->signal & ~p->blocked) {
154 p->state = TASK_RUNNING;
155 continue;
156 }
157 if (p->timeout && p->timeout <= jiffies) {
158 p->timeout = 0;
159 p->state = TASK_RUNNING;
160 }
161 }
162 confuse_gcc1:
163
164
165 #if 0
166
167
168
169
170 if (TASK_UNINTERRUPTIBLE >= (unsigned) current->state &&
171 current->counter < current->priority*2) {
172 ++current->counter;
173 }
174 #endif
175 c = -1000;
176 next = p = &init_task;
177 for (;;) {
178 if ((p = p->next_task) == &init_task)
179 goto confuse_gcc2;
180 if (p->state == TASK_RUNNING) {
181 nr_running++;
182 if (p->counter > c)
183 c = p->counter, next = p;
184 }
185 }
186 confuse_gcc2:
187 if (!c) {
188 for_each_task(p)
189 p->counter = (p->counter >> 1) + p->priority;
190 }
191 if (current == next)
192 return;
193 kstat.context_swtch++;
194 switch_to(next);
195 }
196
197 asmlinkage int sys_pause(void)
198 {
199 current->state = TASK_INTERRUPTIBLE;
200 schedule();
201 return -ERESTARTNOHAND;
202 }
203
204
205
206
207
208
209
210
211
212 void wake_up(struct wait_queue **q)
213 {
214 struct wait_queue *tmp;
215 struct task_struct * p;
216
217 if (!q || !(tmp = *q))
218 return;
219 do {
220 if ((p = tmp->task) != NULL) {
221 if ((p->state == TASK_UNINTERRUPTIBLE) ||
222 (p->state == TASK_INTERRUPTIBLE)) {
223 p->state = TASK_RUNNING;
224 if (p->counter > current->counter + 3)
225 need_resched = 1;
226 }
227 }
228 if (!tmp->next) {
229 printk("wait_queue is bad (eip = %p)\n",
230 __builtin_return_address(0));
231 printk(" q = %p\n",q);
232 printk(" *q = %p\n",*q);
233 printk(" tmp = %p\n",tmp);
234 break;
235 }
236 tmp = tmp->next;
237 } while (tmp != *q);
238 }
239
240 void wake_up_interruptible(struct wait_queue **q)
241 {
242 struct wait_queue *tmp;
243 struct task_struct * p;
244
245 if (!q || !(tmp = *q))
246 return;
247 do {
248 if ((p = tmp->task) != NULL) {
249 if (p->state == TASK_INTERRUPTIBLE) {
250 p->state = TASK_RUNNING;
251 if (p->counter > current->counter + 3)
252 need_resched = 1;
253 }
254 }
255 if (!tmp->next) {
256 printk("wait_queue is bad (eip = %p)\n",
257 __builtin_return_address(0));
258 printk(" q = %p\n",q);
259 printk(" *q = %p\n",*q);
260 printk(" tmp = %p\n",tmp);
261 break;
262 }
263 tmp = tmp->next;
264 } while (tmp != *q);
265 }
266
267 void __down(struct semaphore * sem)
268 {
269 struct wait_queue wait = { current, NULL };
270 add_wait_queue(&sem->wait, &wait);
271 current->state = TASK_UNINTERRUPTIBLE;
272 while (sem->count <= 0) {
273 schedule();
274 current->state = TASK_UNINTERRUPTIBLE;
275 }
276 current->state = TASK_RUNNING;
277 remove_wait_queue(&sem->wait, &wait);
278 }
279
280 static inline void __sleep_on(struct wait_queue **p, int state)
281 {
282 unsigned long flags;
283 struct wait_queue wait = { current, NULL };
284
285 if (!p)
286 return;
287 if (current == task[0])
288 panic("task[0] trying to sleep");
289 current->state = state;
290 add_wait_queue(p, &wait);
291 save_flags(flags);
292 sti();
293 schedule();
294 remove_wait_queue(p, &wait);
295 restore_flags(flags);
296 }
297
298 void interruptible_sleep_on(struct wait_queue **p)
299 {
300 __sleep_on(p,TASK_INTERRUPTIBLE);
301 }
302
303 void sleep_on(struct wait_queue **p)
304 {
305 __sleep_on(p,TASK_UNINTERRUPTIBLE);
306 }
307
308
309
310
311
312 static struct timer_list timer_head = { &timer_head, &timer_head, ~0, 0, NULL };
313 #define SLOW_BUT_DEBUGGING_TIMERS 1
314
315 void add_timer(struct timer_list * timer)
316 {
317 unsigned long flags;
318 struct timer_list *p;
319
320 #if SLOW_BUT_DEBUGGING_TIMERS
321 if (timer->next || timer->prev) {
322 printk("add_timer() called with non-zero list from %p\n",
323 __builtin_return_address(0));
324 return;
325 }
326 #endif
327 p = &timer_head;
328 timer->expires += jiffies;
329 save_flags(flags);
330 cli();
331 do {
332 p = p->next;
333 } while (timer->expires > p->expires);
334 timer->next = p;
335 timer->prev = p->prev;
336 p->prev = timer;
337 timer->prev->next = timer;
338 restore_flags(flags);
339 }
340
341 int del_timer(struct timer_list * timer)
342 {
343 unsigned long flags;
344 #if SLOW_BUT_DEBUGGING_TIMERS
345 struct timer_list * p;
346
347 p = &timer_head;
348 save_flags(flags);
349 cli();
350 while ((p = p->next) != &timer_head) {
351 if (p == timer) {
352 timer->next->prev = timer->prev;
353 timer->prev->next = timer->next;
354 timer->next = timer->prev = NULL;
355 restore_flags(flags);
356 timer->expires -= jiffies;
357 return 1;
358 }
359 }
360 if (timer->next || timer->prev)
361 printk("del_timer() called from %p with timer not initialized\n",
362 __builtin_return_address(0));
363 restore_flags(flags);
364 return 0;
365 #else
366 save_flags(flags);
367 cli();
368 if (timer->next) {
369 timer->next->prev = timer->prev;
370 timer->prev->next = timer->next;
371 timer->next = timer->prev = NULL;
372 restore_flags(flags);
373 timer->expires -= jiffies;
374 return 1;
375 }
376 restore_flags(flags);
377 return 0;
378 #endif
379 }
380
381 unsigned long timer_active = 0;
382 struct timer_struct timer_table[32];
383
384
385
386
387
388
389
390 unsigned long avenrun[3] = { 0,0,0 };
391
392
393
394
395 static unsigned long count_active_tasks(void)
396 {
397 struct task_struct **p;
398 unsigned long nr = 0;
399
400 for(p = &LAST_TASK; p > &FIRST_TASK; --p)
401 if (*p && ((*p)->state == TASK_RUNNING ||
402 (*p)->state == TASK_UNINTERRUPTIBLE ||
403 (*p)->state == TASK_SWAPPING))
404 nr += FIXED_1;
405 return nr;
406 }
407
408 static inline void calc_load(void)
409 {
410 unsigned long active_tasks;
411 static int count = LOAD_FREQ;
412
413 if (count-- > 0)
414 return;
415 count = LOAD_FREQ;
416 active_tasks = count_active_tasks();
417 CALC_LOAD(avenrun[0], EXP_1, active_tasks);
418 CALC_LOAD(avenrun[1], EXP_5, active_tasks);
419 CALC_LOAD(avenrun[2], EXP_15, active_tasks);
420 }
421
422
423
424
425
426
427
428
429
430
431
432 static void second_overflow(void)
433 {
434 long ltemp;
435
436
437 time_maxerror = (0x70000000-time_maxerror < time_tolerance) ?
438 0x70000000 : (time_maxerror + time_tolerance);
439
440
441 if (time_offset < 0) {
442 ltemp = (-(time_offset+1) >> (SHIFT_KG + time_constant)) + 1;
443 time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
444 time_offset += (time_adj * HZ) >> (SHIFT_SCALE - SHIFT_UPDATE);
445 time_adj = - time_adj;
446 } else if (time_offset > 0) {
447 ltemp = ((time_offset-1) >> (SHIFT_KG + time_constant)) + 1;
448 time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
449 time_offset -= (time_adj * HZ) >> (SHIFT_SCALE - SHIFT_UPDATE);
450 } else {
451 time_adj = 0;
452 }
453
454 time_adj += (time_freq >> (SHIFT_KF + SHIFT_HZ - SHIFT_SCALE))
455 + FINETUNE;
456
457
458 switch (time_status) {
459 case TIME_INS:
460
461 if (xtime.tv_sec % 86400 == 0) {
462 xtime.tv_sec--;
463 time_status = TIME_OOP;
464 printk("Clock: inserting leap second 23:59:60 UTC\n");
465 }
466 break;
467
468 case TIME_DEL:
469
470 if (xtime.tv_sec % 86400 == 86399) {
471 xtime.tv_sec++;
472 time_status = TIME_OK;
473 printk("Clock: deleting leap second 23:59:59 UTC\n");
474 }
475 break;
476
477 case TIME_OOP:
478 time_status = TIME_OK;
479 break;
480 }
481 }
482
483
484
485
486 static void timer_bh(void * unused)
487 {
488 unsigned long mask;
489 struct timer_struct *tp;
490 struct timer_list * timer;
491
492 cli();
493 while ((timer = timer_head.next) != &timer_head && timer->expires < jiffies) {
494 void (*fn)(unsigned long) = timer->function;
495 unsigned long data = timer->data;
496 timer->next->prev = timer->prev;
497 timer->prev->next = timer->next;
498 timer->next = timer->prev = NULL;
499 sti();
500 fn(data);
501 cli();
502 }
503 sti();
504
505 for (mask = 1, tp = timer_table+0 ; mask ; tp++,mask += mask) {
506 if (mask > timer_active)
507 break;
508 if (!(mask & timer_active))
509 continue;
510 if (tp->expires > jiffies)
511 continue;
512 timer_active &= ~mask;
513 tp->fn();
514 sti();
515 }
516 }
517
518 void tqueue_bh(void * unused)
519 {
520 run_task_queue(&tq_timer);
521 }
522
523 void immediate_bh(void * unused)
524 {
525 run_task_queue(&tq_immediate);
526 }
527
528
529
530
531
532
533
534 static void do_timer(int irq, struct pt_regs * regs)
535 {
536 unsigned long mask;
537 struct timer_struct *tp;
538
539 static long last_rtc_update=0;
540 extern int set_rtc_mmss(unsigned long);
541
542 long ltemp, psecs;
543
544
545
546
547 time_phase += time_adj;
548 if (time_phase < -FINEUSEC) {
549 ltemp = -time_phase >> SHIFT_SCALE;
550 time_phase += ltemp << SHIFT_SCALE;
551 xtime.tv_usec += tick + time_adjust_step - ltemp;
552 }
553 else if (time_phase > FINEUSEC) {
554 ltemp = time_phase >> SHIFT_SCALE;
555 time_phase -= ltemp << SHIFT_SCALE;
556 xtime.tv_usec += tick + time_adjust_step + ltemp;
557 } else
558 xtime.tv_usec += tick + time_adjust_step;
559
560 if (time_adjust)
561 {
562
563
564
565
566
567
568
569
570
571 if (time_adjust > tickadj)
572 time_adjust_step = tickadj;
573 else if (time_adjust < -tickadj)
574 time_adjust_step = -tickadj;
575 else
576 time_adjust_step = time_adjust;
577
578
579 time_adjust -= time_adjust_step;
580 }
581 else
582 time_adjust_step = 0;
583
584 if (xtime.tv_usec >= 1000000) {
585 xtime.tv_usec -= 1000000;
586 xtime.tv_sec++;
587 second_overflow();
588 }
589
590
591
592
593
594 if (time_status != TIME_BAD && xtime.tv_sec > last_rtc_update + 660 &&
595 xtime.tv_usec > 500000 - (tick >> 1) &&
596 xtime.tv_usec < 500000 + (tick >> 1))
597 if (set_rtc_mmss(xtime.tv_sec) == 0)
598 last_rtc_update = xtime.tv_sec;
599 else
600 last_rtc_update = xtime.tv_sec - 600;
601
602 jiffies++;
603 calc_load();
604 if (user_mode(regs)) {
605 current->utime++;
606 if (current != task[0]) {
607 if (current->priority < 15)
608 kstat.cpu_nice++;
609 else
610 kstat.cpu_user++;
611 }
612
613 if (current->it_virt_value && !(--current->it_virt_value)) {
614 current->it_virt_value = current->it_virt_incr;
615 send_sig(SIGVTALRM,current,1);
616 }
617 } else {
618 current->stime++;
619 if(current != task[0])
620 kstat.cpu_system++;
621 #ifdef CONFIG_PROFILE
622 if (prof_buffer && current != task[0]) {
623 extern int _stext;
624 unsigned long eip = regs->eip - (unsigned long) &_stext;
625 eip >>= CONFIG_PROFILE_SHIFT;
626 if (eip < prof_len)
627 prof_buffer[eip]++;
628 }
629 #endif
630 }
631
632
633
634 if ((current->rlim[RLIMIT_CPU].rlim_max != RLIM_INFINITY) &&
635 (((current->stime + current->utime) / HZ) >= current->rlim[RLIMIT_CPU].rlim_max))
636 send_sig(SIGKILL, current, 1);
637 if ((current->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) &&
638 (((current->stime + current->utime) % HZ) == 0)) {
639 psecs = (current->stime + current->utime) / HZ;
640
641 if (psecs == current->rlim[RLIMIT_CPU].rlim_cur)
642 send_sig(SIGXCPU, current, 1);
643
644 else if ((psecs > current->rlim[RLIMIT_CPU].rlim_cur) &&
645 ((psecs - current->rlim[RLIMIT_CPU].rlim_cur) % 5) == 0)
646 send_sig(SIGXCPU, current, 1);
647 }
648
649 if (current != task[0] && 0 > --current->counter) {
650 current->counter = 0;
651 need_resched = 1;
652 }
653
654 if (current->it_prof_value && !(--current->it_prof_value)) {
655 current->it_prof_value = current->it_prof_incr;
656 send_sig(SIGPROF,current,1);
657 }
658 for (mask = 1, tp = timer_table+0 ; mask ; tp++,mask += mask) {
659 if (mask > timer_active)
660 break;
661 if (!(mask & timer_active))
662 continue;
663 if (tp->expires > jiffies)
664 continue;
665 mark_bh(TIMER_BH);
666 }
667 cli();
668 itimer_ticks++;
669 if (itimer_ticks > itimer_next)
670 need_resched = 1;
671 if (timer_head.next->expires < jiffies)
672 mark_bh(TIMER_BH);
673 if (tq_timer != &tq_last)
674 mark_bh(TQUEUE_BH);
675 sti();
676 }
677
678 asmlinkage int sys_alarm(long seconds)
679 {
680 struct itimerval it_new, it_old;
681
682 it_new.it_interval.tv_sec = it_new.it_interval.tv_usec = 0;
683 it_new.it_value.tv_sec = seconds;
684 it_new.it_value.tv_usec = 0;
685 _setitimer(ITIMER_REAL, &it_new, &it_old);
686 return(it_old.it_value.tv_sec + (it_old.it_value.tv_usec / 1000000));
687 }
688
689 asmlinkage int sys_getpid(void)
690 {
691 return current->pid;
692 }
693
694 asmlinkage int sys_getppid(void)
695 {
696 return current->p_opptr->pid;
697 }
698
699 asmlinkage int sys_getuid(void)
700 {
701 return current->uid;
702 }
703
704 asmlinkage int sys_geteuid(void)
705 {
706 return current->euid;
707 }
708
709 asmlinkage int sys_getgid(void)
710 {
711 return current->gid;
712 }
713
714 asmlinkage int sys_getegid(void)
715 {
716 return current->egid;
717 }
718
719 asmlinkage int sys_nice(long increment)
720 {
721 int newprio;
722
723 if (increment < 0 && !suser())
724 return -EPERM;
725 newprio = current->priority - increment;
726 if (newprio < 1)
727 newprio = 1;
728 if (newprio > 35)
729 newprio = 35;
730 current->priority = newprio;
731 return 0;
732 }
733
734 static void show_task(int nr,struct task_struct * p)
735 {
736 unsigned long free;
737 static char * stat_nam[] = { "R", "S", "D", "Z", "T", "W" };
738
739 printk("%-8s %3d ", p->comm, (p == current) ? -nr : nr);
740 if (((unsigned) p->state) < sizeof(stat_nam)/sizeof(char *))
741 printk(stat_nam[p->state]);
742 else
743 printk(" ");
744 #if ((~0UL) == 0xffffffff)
745 if (p == current)
746 printk(" current ");
747 else
748 printk(" %08lX ", thread_saved_pc(&p->tss));
749 #else
750 if (p == current)
751 printk(" current task ");
752 else
753 printk(" %016lx ", thread_saved_pc(&p->tss));
754 #endif
755 for (free = 1; free < PAGE_SIZE/sizeof(long) ; free++) {
756 if (((unsigned long *)p->kernel_stack_page)[free])
757 break;
758 }
759 printk("%5lu %5d %6d ", free*sizeof(long), p->pid, p->p_pptr->pid);
760 if (p->p_cptr)
761 printk("%5d ", p->p_cptr->pid);
762 else
763 printk(" ");
764 if (p->p_ysptr)
765 printk("%7d", p->p_ysptr->pid);
766 else
767 printk(" ");
768 if (p->p_osptr)
769 printk(" %5d\n", p->p_osptr->pid);
770 else
771 printk("\n");
772 }
773
774 void show_state(void)
775 {
776 int i;
777
778 #if ((~0UL) == 0xffffffff)
779 printk("\n"
780 " free sibling\n");
781 printk(" task PC stack pid father child younger older\n");
782 #else
783 printk("\n"
784 " free sibling\n");
785 printk(" task PC stack pid father child younger older\n");
786 #endif
787 for (i=0 ; i<NR_TASKS ; i++)
788 if (task[i])
789 show_task(i,task[i]);
790 }
791
792 void sched_init(void)
793 {
794 bh_base[TIMER_BH].routine = timer_bh;
795 bh_base[TQUEUE_BH].routine = tqueue_bh;
796 bh_base[IMMEDIATE_BH].routine = immediate_bh;
797 if (request_irq(TIMER_IRQ, do_timer, 0, "timer") != 0)
798 panic("Could not allocate timer IRQ!");
799 enable_bh(TIMER_BH);
800 enable_bh(TQUEUE_BH);
801 enable_bh(IMMEDIATE_BH);
802 }