This source file includes following definitions.
- schedule
- sys_pause
- wake_up
- wake_up_interruptible
- __down
- __sleep_on
- interruptible_sleep_on
- sleep_on
- add_timer
- del_timer
- count_active_tasks
- calc_load
- second_overflow
- timer_bh
- tqueue_bh
- immediate_bh
- do_timer
- sys_alarm
- sys_getpid
- sys_getppid
- sys_getuid
- sys_geteuid
- sys_getgid
- sys_getegid
- sys_nice
- show_task
- show_state
- sched_init
1
2
3
4
5
6
7
8
9
10
11
12
13
14 #include <linux/config.h>
15 #include <linux/signal.h>
16 #include <linux/sched.h>
17 #include <linux/timer.h>
18 #include <linux/kernel.h>
19 #include <linux/kernel_stat.h>
20 #include <linux/fdreg.h>
21 #include <linux/errno.h>
22 #include <linux/time.h>
23 #include <linux/ptrace.h>
24 #include <linux/delay.h>
25 #include <linux/interrupt.h>
26 #include <linux/tqueue.h>
27 #include <linux/resource.h>
28 #include <linux/mm.h>
29
30 #include <asm/system.h>
31 #include <asm/io.h>
32 #include <asm/segment.h>
33 #include <asm/pgtable.h>
34
35 #define TIMER_IRQ 0
36
37 #include <linux/timex.h>
38
39
40
41
42 long tick = 1000000 / HZ;
43 volatile struct timeval xtime;
44 int tickadj = 500/HZ;
45
46 DECLARE_TASK_QUEUE(tq_timer);
47 DECLARE_TASK_QUEUE(tq_immediate);
48
49
50
51
52 int time_status = TIME_BAD;
53 long time_offset = 0;
54 long time_constant = 0;
55 long time_tolerance = MAXFREQ;
56 long time_precision = 1;
57 long time_maxerror = 0x70000000;
58 long time_esterror = 0x70000000;
59 long time_phase = 0;
60 long time_freq = 0;
61 long time_adj = 0;
62 long time_reftime = 0;
63
64 long time_adjust = 0;
65 long time_adjust_step = 0;
66
67 int need_resched = 0;
68 unsigned long event = 0;
69
70 extern int _setitimer(int, struct itimerval *, struct itimerval *);
71 unsigned long * prof_buffer = NULL;
72 unsigned long prof_len = 0;
73
74 #define _S(nr) (1<<((nr)-1))
75
76 extern void mem_use(void);
77
78 extern int timer_interrupt(void);
79
80 static unsigned long init_kernel_stack[1024] = { STACK_MAGIC, };
81 unsigned long init_user_stack[1024] = { STACK_MAGIC, };
82 static struct vm_area_struct init_mmap = INIT_MMAP;
83 struct task_struct init_task = INIT_TASK;
84
85 unsigned long volatile jiffies=0;
86
87 struct task_struct *current = &init_task;
88 struct task_struct *last_task_used_math = NULL;
89
90 struct task_struct * task[NR_TASKS] = {&init_task, };
91
92 struct kernel_stat kstat = { 0 };
93
94 unsigned long itimer_ticks = 0;
95 unsigned long itimer_next = ~0;
96
97
98
99
100
101
102
103
104
105
106
107
108
109 asmlinkage void schedule(void)
110 {
111 int c;
112 struct task_struct * p;
113 struct task_struct * next;
114 unsigned long ticks;
115
116
117
118 if (intr_count) {
119 printk("Aiee: scheduling in interrupt\n");
120 intr_count = 0;
121 }
122 cli();
123 ticks = itimer_ticks;
124 itimer_ticks = 0;
125 itimer_next = ~0;
126 sti();
127 need_resched = 0;
128 nr_running = 0;
129 p = &init_task;
130 for (;;) {
131 if ((p = p->next_task) == &init_task)
132 goto confuse_gcc1;
133 if (ticks && p->it_real_value) {
134 if (p->it_real_value <= ticks) {
135 send_sig(SIGALRM, p, 1);
136 if (!p->it_real_incr) {
137 p->it_real_value = 0;
138 goto end_itimer;
139 }
140 do {
141 p->it_real_value += p->it_real_incr;
142 } while (p->it_real_value <= ticks);
143 }
144 p->it_real_value -= ticks;
145 if (p->it_real_value < itimer_next)
146 itimer_next = p->it_real_value;
147 }
148 end_itimer:
149 if (p->state != TASK_INTERRUPTIBLE)
150 continue;
151 if (p->signal & ~p->blocked) {
152 p->state = TASK_RUNNING;
153 continue;
154 }
155 if (p->timeout && p->timeout <= jiffies) {
156 p->timeout = 0;
157 p->state = TASK_RUNNING;
158 }
159 }
160 confuse_gcc1:
161
162
163 #if 0
164
165
166
167
168 if (TASK_UNINTERRUPTIBLE >= (unsigned) current->state &&
169 current->counter < current->priority*2) {
170 ++current->counter;
171 }
172 #endif
173 c = -1000;
174 next = p = &init_task;
175 for (;;) {
176 if ((p = p->next_task) == &init_task)
177 goto confuse_gcc2;
178 if (p->state == TASK_RUNNING) {
179 nr_running++;
180 if (p->counter > c)
181 c = p->counter, next = p;
182 }
183 }
184 confuse_gcc2:
185 if (!c) {
186 for_each_task(p)
187 p->counter = (p->counter >> 1) + p->priority;
188 }
189 if (current == next)
190 return;
191 kstat.context_swtch++;
192 switch_to(next);
193 }
194
195 asmlinkage int sys_pause(void)
196 {
197 current->state = TASK_INTERRUPTIBLE;
198 schedule();
199 return -ERESTARTNOHAND;
200 }
201
202
203
204
205
206
207
208
209
210 void wake_up(struct wait_queue **q)
211 {
212 struct wait_queue *tmp;
213 struct task_struct * p;
214
215 if (!q || !(tmp = *q))
216 return;
217 do {
218 if ((p = tmp->task) != NULL) {
219 if ((p->state == TASK_UNINTERRUPTIBLE) ||
220 (p->state == TASK_INTERRUPTIBLE)) {
221 p->state = TASK_RUNNING;
222 if (p->counter > current->counter + 3)
223 need_resched = 1;
224 }
225 }
226 if (!tmp->next) {
227 printk("wait_queue is bad (eip = %p)\n",
228 __builtin_return_address(0));
229 printk(" q = %p\n",q);
230 printk(" *q = %p\n",*q);
231 printk(" tmp = %p\n",tmp);
232 break;
233 }
234 tmp = tmp->next;
235 } while (tmp != *q);
236 }
237
238 void wake_up_interruptible(struct wait_queue **q)
239 {
240 struct wait_queue *tmp;
241 struct task_struct * p;
242
243 if (!q || !(tmp = *q))
244 return;
245 do {
246 if ((p = tmp->task) != NULL) {
247 if (p->state == TASK_INTERRUPTIBLE) {
248 p->state = TASK_RUNNING;
249 if (p->counter > current->counter + 3)
250 need_resched = 1;
251 }
252 }
253 if (!tmp->next) {
254 printk("wait_queue is bad (eip = %p)\n",
255 __builtin_return_address(0));
256 printk(" q = %p\n",q);
257 printk(" *q = %p\n",*q);
258 printk(" tmp = %p\n",tmp);
259 break;
260 }
261 tmp = tmp->next;
262 } while (tmp != *q);
263 }
264
265 void __down(struct semaphore * sem)
266 {
267 struct wait_queue wait = { current, NULL };
268 add_wait_queue(&sem->wait, &wait);
269 current->state = TASK_UNINTERRUPTIBLE;
270 while (sem->count <= 0) {
271 schedule();
272 current->state = TASK_UNINTERRUPTIBLE;
273 }
274 current->state = TASK_RUNNING;
275 remove_wait_queue(&sem->wait, &wait);
276 }
277
278 static inline void __sleep_on(struct wait_queue **p, int state)
279 {
280 unsigned long flags;
281 struct wait_queue wait = { current, NULL };
282
283 if (!p)
284 return;
285 if (current == task[0])
286 panic("task[0] trying to sleep");
287 current->state = state;
288 add_wait_queue(p, &wait);
289 save_flags(flags);
290 sti();
291 schedule();
292 remove_wait_queue(p, &wait);
293 restore_flags(flags);
294 }
295
296 void interruptible_sleep_on(struct wait_queue **p)
297 {
298 __sleep_on(p,TASK_INTERRUPTIBLE);
299 }
300
301 void sleep_on(struct wait_queue **p)
302 {
303 __sleep_on(p,TASK_UNINTERRUPTIBLE);
304 }
305
306
307
308
309
310 static struct timer_list timer_head = { &timer_head, &timer_head, ~0, 0, NULL };
311 #define SLOW_BUT_DEBUGGING_TIMERS 1
312
313 void add_timer(struct timer_list * timer)
314 {
315 unsigned long flags;
316 struct timer_list *p;
317
318 #if SLOW_BUT_DEBUGGING_TIMERS
319 if (timer->next || timer->prev) {
320 printk("add_timer() called with non-zero list from %p\n",
321 __builtin_return_address(0));
322 return;
323 }
324 #endif
325 p = &timer_head;
326 timer->expires += jiffies;
327 save_flags(flags);
328 cli();
329 do {
330 p = p->next;
331 } while (timer->expires > p->expires);
332 timer->next = p;
333 timer->prev = p->prev;
334 p->prev = timer;
335 timer->prev->next = timer;
336 restore_flags(flags);
337 }
338
339 int del_timer(struct timer_list * timer)
340 {
341 unsigned long flags;
342 #if SLOW_BUT_DEBUGGING_TIMERS
343 struct timer_list * p;
344
345 p = &timer_head;
346 save_flags(flags);
347 cli();
348 while ((p = p->next) != &timer_head) {
349 if (p == timer) {
350 timer->next->prev = timer->prev;
351 timer->prev->next = timer->next;
352 timer->next = timer->prev = NULL;
353 restore_flags(flags);
354 timer->expires -= jiffies;
355 return 1;
356 }
357 }
358 if (timer->next || timer->prev)
359 printk("del_timer() called from %p with timer not initialized\n",
360 __builtin_return_address(0));
361 restore_flags(flags);
362 return 0;
363 #else
364 save_flags(flags);
365 cli();
366 if (timer->next) {
367 timer->next->prev = timer->prev;
368 timer->prev->next = timer->next;
369 timer->next = timer->prev = NULL;
370 restore_flags(flags);
371 timer->expires -= jiffies;
372 return 1;
373 }
374 restore_flags(flags);
375 return 0;
376 #endif
377 }
378
379 unsigned long timer_active = 0;
380 struct timer_struct timer_table[32];
381
382
383
384
385
386
387
388 unsigned long avenrun[3] = { 0,0,0 };
389
390
391
392
393 static unsigned long count_active_tasks(void)
394 {
395 struct task_struct **p;
396 unsigned long nr = 0;
397
398 for(p = &LAST_TASK; p > &FIRST_TASK; --p)
399 if (*p && ((*p)->state == TASK_RUNNING ||
400 (*p)->state == TASK_UNINTERRUPTIBLE ||
401 (*p)->state == TASK_SWAPPING))
402 nr += FIXED_1;
403 return nr;
404 }
405
406 static inline void calc_load(void)
407 {
408 unsigned long active_tasks;
409 static int count = LOAD_FREQ;
410
411 if (count-- > 0)
412 return;
413 count = LOAD_FREQ;
414 active_tasks = count_active_tasks();
415 CALC_LOAD(avenrun[0], EXP_1, active_tasks);
416 CALC_LOAD(avenrun[1], EXP_5, active_tasks);
417 CALC_LOAD(avenrun[2], EXP_15, active_tasks);
418 }
419
420
421
422
423
424
425
426
427
428
429
430 static void second_overflow(void)
431 {
432 long ltemp;
433
434
435 time_maxerror = (0x70000000-time_maxerror < time_tolerance) ?
436 0x70000000 : (time_maxerror + time_tolerance);
437
438
439 if (time_offset < 0) {
440 ltemp = (-(time_offset+1) >> (SHIFT_KG + time_constant)) + 1;
441 time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
442 time_offset += (time_adj * HZ) >> (SHIFT_SCALE - SHIFT_UPDATE);
443 time_adj = - time_adj;
444 } else if (time_offset > 0) {
445 ltemp = ((time_offset-1) >> (SHIFT_KG + time_constant)) + 1;
446 time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
447 time_offset -= (time_adj * HZ) >> (SHIFT_SCALE - SHIFT_UPDATE);
448 } else {
449 time_adj = 0;
450 }
451
452 time_adj += (time_freq >> (SHIFT_KF + SHIFT_HZ - SHIFT_SCALE))
453 + FINETUNE;
454
455
456 switch (time_status) {
457 case TIME_INS:
458
459 if (xtime.tv_sec % 86400 == 0) {
460 xtime.tv_sec--;
461 time_status = TIME_OOP;
462 printk("Clock: inserting leap second 23:59:60 UTC\n");
463 }
464 break;
465
466 case TIME_DEL:
467
468 if (xtime.tv_sec % 86400 == 86399) {
469 xtime.tv_sec++;
470 time_status = TIME_OK;
471 printk("Clock: deleting leap second 23:59:59 UTC\n");
472 }
473 break;
474
475 case TIME_OOP:
476 time_status = TIME_OK;
477 break;
478 }
479 }
480
481
482
483
484 static void timer_bh(void * unused)
485 {
486 unsigned long mask;
487 struct timer_struct *tp;
488 struct timer_list * timer;
489
490 cli();
491 while ((timer = timer_head.next) != &timer_head && timer->expires < jiffies) {
492 void (*fn)(unsigned long) = timer->function;
493 unsigned long data = timer->data;
494 timer->next->prev = timer->prev;
495 timer->prev->next = timer->next;
496 timer->next = timer->prev = NULL;
497 sti();
498 fn(data);
499 cli();
500 }
501 sti();
502
503 for (mask = 1, tp = timer_table+0 ; mask ; tp++,mask += mask) {
504 if (mask > timer_active)
505 break;
506 if (!(mask & timer_active))
507 continue;
508 if (tp->expires > jiffies)
509 continue;
510 timer_active &= ~mask;
511 tp->fn();
512 sti();
513 }
514 }
515
516 void tqueue_bh(void * unused)
517 {
518 run_task_queue(&tq_timer);
519 }
520
521 void immediate_bh(void * unused)
522 {
523 run_task_queue(&tq_immediate);
524 }
525
526
527
528
529
530
531
532 static void do_timer(int irq, struct pt_regs * regs)
533 {
534 unsigned long mask;
535 struct timer_struct *tp;
536
537 static long last_rtc_update=0;
538 extern int set_rtc_mmss(unsigned long);
539
540 long ltemp, psecs;
541
542
543
544
545 time_phase += time_adj;
546 if (time_phase < -FINEUSEC) {
547 ltemp = -time_phase >> SHIFT_SCALE;
548 time_phase += ltemp << SHIFT_SCALE;
549 xtime.tv_usec += tick + time_adjust_step - ltemp;
550 }
551 else if (time_phase > FINEUSEC) {
552 ltemp = time_phase >> SHIFT_SCALE;
553 time_phase -= ltemp << SHIFT_SCALE;
554 xtime.tv_usec += tick + time_adjust_step + ltemp;
555 } else
556 xtime.tv_usec += tick + time_adjust_step;
557
558 if (time_adjust)
559 {
560
561
562
563
564
565
566
567
568
569 if (time_adjust > tickadj)
570 time_adjust_step = tickadj;
571 else if (time_adjust < -tickadj)
572 time_adjust_step = -tickadj;
573 else
574 time_adjust_step = time_adjust;
575
576
577 time_adjust -= time_adjust_step;
578 }
579 else
580 time_adjust_step = 0;
581
582 if (xtime.tv_usec >= 1000000) {
583 xtime.tv_usec -= 1000000;
584 xtime.tv_sec++;
585 second_overflow();
586 }
587
588
589
590
591
592 if (time_status != TIME_BAD && xtime.tv_sec > last_rtc_update + 660 &&
593 xtime.tv_usec > 500000 - (tick >> 1) &&
594 xtime.tv_usec < 500000 + (tick >> 1))
595 if (set_rtc_mmss(xtime.tv_sec) == 0)
596 last_rtc_update = xtime.tv_sec;
597 else
598 last_rtc_update = xtime.tv_sec - 600;
599
600 jiffies++;
601 calc_load();
602 if (user_mode(regs)) {
603 current->utime++;
604 if (current != task[0]) {
605 if (current->priority < 15)
606 kstat.cpu_nice++;
607 else
608 kstat.cpu_user++;
609 }
610
611 if (current->it_virt_value && !(--current->it_virt_value)) {
612 current->it_virt_value = current->it_virt_incr;
613 send_sig(SIGVTALRM,current,1);
614 }
615 } else {
616 current->stime++;
617 if(current != task[0])
618 kstat.cpu_system++;
619 #ifdef CONFIG_PROFILE
620 if (prof_buffer && current != task[0]) {
621 unsigned long eip = regs->eip;
622 eip >>= CONFIG_PROFILE_SHIFT;
623 if (eip < prof_len)
624 prof_buffer[eip]++;
625 }
626 #endif
627 }
628
629
630
631 if ((current->rlim[RLIMIT_CPU].rlim_max != RLIM_INFINITY) &&
632 (((current->stime + current->utime) / HZ) >= current->rlim[RLIMIT_CPU].rlim_max))
633 send_sig(SIGKILL, current, 1);
634 if ((current->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) &&
635 (((current->stime + current->utime) % HZ) == 0)) {
636 psecs = (current->stime + current->utime) / HZ;
637
638 if (psecs == current->rlim[RLIMIT_CPU].rlim_cur)
639 send_sig(SIGXCPU, current, 1);
640
641 else if ((psecs > current->rlim[RLIMIT_CPU].rlim_cur) &&
642 ((psecs - current->rlim[RLIMIT_CPU].rlim_cur) % 5) == 0)
643 send_sig(SIGXCPU, current, 1);
644 }
645
646 if (current != task[0] && 0 > --current->counter) {
647 current->counter = 0;
648 need_resched = 1;
649 }
650
651 if (current->it_prof_value && !(--current->it_prof_value)) {
652 current->it_prof_value = current->it_prof_incr;
653 send_sig(SIGPROF,current,1);
654 }
655 for (mask = 1, tp = timer_table+0 ; mask ; tp++,mask += mask) {
656 if (mask > timer_active)
657 break;
658 if (!(mask & timer_active))
659 continue;
660 if (tp->expires > jiffies)
661 continue;
662 mark_bh(TIMER_BH);
663 }
664 cli();
665 itimer_ticks++;
666 if (itimer_ticks > itimer_next)
667 need_resched = 1;
668 if (timer_head.next->expires < jiffies)
669 mark_bh(TIMER_BH);
670 if (tq_timer != &tq_last)
671 mark_bh(TQUEUE_BH);
672 sti();
673 }
674
675 asmlinkage int sys_alarm(long seconds)
676 {
677 struct itimerval it_new, it_old;
678
679 it_new.it_interval.tv_sec = it_new.it_interval.tv_usec = 0;
680 it_new.it_value.tv_sec = seconds;
681 it_new.it_value.tv_usec = 0;
682 _setitimer(ITIMER_REAL, &it_new, &it_old);
683 return(it_old.it_value.tv_sec + (it_old.it_value.tv_usec / 1000000));
684 }
685
686 asmlinkage int sys_getpid(void)
687 {
688 return current->pid;
689 }
690
691 asmlinkage int sys_getppid(void)
692 {
693 return current->p_opptr->pid;
694 }
695
696 asmlinkage int sys_getuid(void)
697 {
698 return current->uid;
699 }
700
701 asmlinkage int sys_geteuid(void)
702 {
703 return current->euid;
704 }
705
706 asmlinkage int sys_getgid(void)
707 {
708 return current->gid;
709 }
710
711 asmlinkage int sys_getegid(void)
712 {
713 return current->egid;
714 }
715
716 asmlinkage int sys_nice(long increment)
717 {
718 int newprio;
719
720 if (increment < 0 && !suser())
721 return -EPERM;
722 newprio = current->priority - increment;
723 if (newprio < 1)
724 newprio = 1;
725 if (newprio > 35)
726 newprio = 35;
727 current->priority = newprio;
728 return 0;
729 }
730
731 static void show_task(int nr,struct task_struct * p)
732 {
733 unsigned long free;
734 static char * stat_nam[] = { "R", "S", "D", "Z", "T", "W" };
735
736 printk("%-8s %3d ", p->comm, (p == current) ? -nr : nr);
737 if (((unsigned) p->state) < sizeof(stat_nam)/sizeof(char *))
738 printk(stat_nam[p->state]);
739 else
740 printk(" ");
741 #ifdef __i386__
742 if (p == current)
743 printk(" current ");
744 else
745 printk(" %08lX ", ((unsigned long *)p->tss.esp)[3]);
746 #endif
747 for (free = 1; free < 1024 ; free++) {
748 if (((unsigned long *)p->kernel_stack_page)[free])
749 break;
750 }
751 printk("%5lu %5d %6d ", free << 2, p->pid, p->p_pptr->pid);
752 if (p->p_cptr)
753 printk("%5d ", p->p_cptr->pid);
754 else
755 printk(" ");
756 if (p->p_ysptr)
757 printk("%7d", p->p_ysptr->pid);
758 else
759 printk(" ");
760 if (p->p_osptr)
761 printk(" %5d\n", p->p_osptr->pid);
762 else
763 printk("\n");
764 }
765
766 void show_state(void)
767 {
768 int i;
769
770 printk(" free sibling\n");
771 printk(" task PC stack pid father child younger older\n");
772 for (i=0 ; i<NR_TASKS ; i++)
773 if (task[i])
774 show_task(i,task[i]);
775 }
776
777 void sched_init(void)
778 {
779 bh_base[TIMER_BH].routine = timer_bh;
780 bh_base[TQUEUE_BH].routine = tqueue_bh;
781 bh_base[IMMEDIATE_BH].routine = immediate_bh;
782 if (request_irq(TIMER_IRQ, do_timer, 0, "timer") != 0)
783 panic("Could not allocate timer IRQ!");
784 enable_bh(TIMER_BH);
785 enable_bh(TQUEUE_BH);
786 enable_bh(IMMEDIATE_BH);
787 }