This source file includes following definitions.
- schedule
- sys_pause
- wake_up
- wake_up_interruptible
- __down
- __sleep_on
- interruptible_sleep_on
- sleep_on
- add_timer
- del_timer
- count_active_tasks
- calc_load
- second_overflow
- timer_bh
- tqueue_bh
- immediate_bh
- do_timer
- sys_alarm
- sys_getpid
- sys_getppid
- sys_getuid
- sys_geteuid
- sys_getgid
- sys_getegid
- sys_nice
- show_task
- show_state
- sched_init
1
2
3
4
5
6
7
8
9
10
11
12
13
14 #include <linux/config.h>
15 #include <linux/signal.h>
16 #include <linux/sched.h>
17 #include <linux/timer.h>
18 #include <linux/kernel.h>
19 #include <linux/kernel_stat.h>
20 #include <linux/fdreg.h>
21 #include <linux/errno.h>
22 #include <linux/time.h>
23 #include <linux/ptrace.h>
24 #include <linux/delay.h>
25 #include <linux/interrupt.h>
26 #include <linux/tqueue.h>
27 #include <linux/resource.h>
28
29 #include <asm/system.h>
30 #include <asm/io.h>
31 #include <asm/segment.h>
32
33 #define TIMER_IRQ 0
34
35 #include <linux/timex.h>
36
37
38
39
40 long tick = 1000000 / HZ;
41 volatile struct timeval xtime;
42 int tickadj = 500/HZ;
43
44 DECLARE_TASK_QUEUE(tq_timer);
45 DECLARE_TASK_QUEUE(tq_immediate);
46
47
48
49
50 int time_status = TIME_BAD;
51 long time_offset = 0;
52 long time_constant = 0;
53 long time_tolerance = MAXFREQ;
54 long time_precision = 1;
55 long time_maxerror = 0x70000000;
56 long time_esterror = 0x70000000;
57 long time_phase = 0;
58 long time_freq = 0;
59 long time_adj = 0;
60 long time_reftime = 0;
61
62 long time_adjust = 0;
63 long time_adjust_step = 0;
64
65 int need_resched = 0;
66 unsigned long event = 0;
67
68 extern int _setitimer(int, struct itimerval *, struct itimerval *);
69 unsigned long * prof_buffer = NULL;
70 unsigned long prof_len = 0;
71
72 #define _S(nr) (1<<((nr)-1))
73
74 extern void mem_use(void);
75
76 extern int timer_interrupt(void);
77
78 static unsigned long init_kernel_stack[1024] = { STACK_MAGIC, };
79 unsigned long init_user_stack[1024] = { STACK_MAGIC, };
80 static struct vm_area_struct init_mmap = INIT_MMAP;
81 struct task_struct init_task = INIT_TASK;
82
83 unsigned long volatile jiffies=0;
84
85 struct task_struct *current = &init_task;
86 struct task_struct *last_task_used_math = NULL;
87
88 struct task_struct * task[NR_TASKS] = {&init_task, };
89
90 struct kernel_stat kstat = { 0 };
91
92 unsigned long itimer_ticks = 0;
93 unsigned long itimer_next = ~0;
94
95
96
97
98
99
100
101
102
103
104
105
106
107 asmlinkage void schedule(void)
108 {
109 int c;
110 struct task_struct * p;
111 struct task_struct * next;
112 unsigned long ticks;
113
114
115
116 if (intr_count) {
117 printk("Aiee: scheduling in interrupt\n");
118 intr_count = 0;
119 }
120 cli();
121 ticks = itimer_ticks;
122 itimer_ticks = 0;
123 itimer_next = ~0;
124 sti();
125 need_resched = 0;
126 p = &init_task;
127 for (;;) {
128 if ((p = p->next_task) == &init_task)
129 goto confuse_gcc1;
130 if (ticks && p->it_real_value) {
131 if (p->it_real_value <= ticks) {
132 send_sig(SIGALRM, p, 1);
133 if (!p->it_real_incr) {
134 p->it_real_value = 0;
135 goto end_itimer;
136 }
137 do {
138 p->it_real_value += p->it_real_incr;
139 } while (p->it_real_value <= ticks);
140 }
141 p->it_real_value -= ticks;
142 if (p->it_real_value < itimer_next)
143 itimer_next = p->it_real_value;
144 }
145 end_itimer:
146 if (p->state != TASK_INTERRUPTIBLE)
147 continue;
148 if (p->signal & ~p->blocked) {
149 p->state = TASK_RUNNING;
150 continue;
151 }
152 if (p->timeout && p->timeout <= jiffies) {
153 p->timeout = 0;
154 p->state = TASK_RUNNING;
155 }
156 }
157 confuse_gcc1:
158
159
160 #if 0
161
162
163
164
165 if (TASK_UNINTERRUPTIBLE >= (unsigned) current->state &&
166 current->counter < current->priority*2) {
167 ++current->counter;
168 }
169 #endif
170 c = -1000;
171 next = p = &init_task;
172 for (;;) {
173 if ((p = p->next_task) == &init_task)
174 goto confuse_gcc2;
175 if (p->state == TASK_RUNNING && p->counter > c)
176 c = p->counter, next = p;
177 }
178 confuse_gcc2:
179 if (!c) {
180 for_each_task(p)
181 p->counter = (p->counter >> 1) + p->priority;
182 }
183 if (current == next)
184 return;
185 kstat.context_swtch++;
186 switch_to(next);
187 }
188
189 asmlinkage int sys_pause(void)
190 {
191 current->state = TASK_INTERRUPTIBLE;
192 schedule();
193 return -ERESTARTNOHAND;
194 }
195
196
197
198
199
200
201
202
203
204 void wake_up(struct wait_queue **q)
205 {
206 struct wait_queue *tmp;
207 struct task_struct * p;
208
209 if (!q || !(tmp = *q))
210 return;
211 do {
212 if ((p = tmp->task) != NULL) {
213 if ((p->state == TASK_UNINTERRUPTIBLE) ||
214 (p->state == TASK_INTERRUPTIBLE)) {
215 p->state = TASK_RUNNING;
216 if (p->counter > current->counter + 3)
217 need_resched = 1;
218 }
219 }
220 if (!tmp->next) {
221 printk("wait_queue is bad (eip = %p)\n",
222 __builtin_return_address(0));
223 printk(" q = %p\n",q);
224 printk(" *q = %p\n",*q);
225 printk(" tmp = %p\n",tmp);
226 break;
227 }
228 tmp = tmp->next;
229 } while (tmp != *q);
230 }
231
232 void wake_up_interruptible(struct wait_queue **q)
233 {
234 struct wait_queue *tmp;
235 struct task_struct * p;
236
237 if (!q || !(tmp = *q))
238 return;
239 do {
240 if ((p = tmp->task) != NULL) {
241 if (p->state == TASK_INTERRUPTIBLE) {
242 p->state = TASK_RUNNING;
243 if (p->counter > current->counter + 3)
244 need_resched = 1;
245 }
246 }
247 if (!tmp->next) {
248 printk("wait_queue is bad (eip = %p)\n",
249 __builtin_return_address(0));
250 printk(" q = %p\n",q);
251 printk(" *q = %p\n",*q);
252 printk(" tmp = %p\n",tmp);
253 break;
254 }
255 tmp = tmp->next;
256 } while (tmp != *q);
257 }
258
259 void __down(struct semaphore * sem)
260 {
261 struct wait_queue wait = { current, NULL };
262 add_wait_queue(&sem->wait, &wait);
263 current->state = TASK_UNINTERRUPTIBLE;
264 while (sem->count <= 0) {
265 schedule();
266 current->state = TASK_UNINTERRUPTIBLE;
267 }
268 current->state = TASK_RUNNING;
269 remove_wait_queue(&sem->wait, &wait);
270 }
271
272 static inline void __sleep_on(struct wait_queue **p, int state)
273 {
274 unsigned long flags;
275 struct wait_queue wait = { current, NULL };
276
277 if (!p)
278 return;
279 if (current == task[0])
280 panic("task[0] trying to sleep");
281 current->state = state;
282 add_wait_queue(p, &wait);
283 save_flags(flags);
284 sti();
285 schedule();
286 remove_wait_queue(p, &wait);
287 restore_flags(flags);
288 }
289
290 void interruptible_sleep_on(struct wait_queue **p)
291 {
292 __sleep_on(p,TASK_INTERRUPTIBLE);
293 }
294
295 void sleep_on(struct wait_queue **p)
296 {
297 __sleep_on(p,TASK_UNINTERRUPTIBLE);
298 }
299
300
301
302
303
304 static struct timer_list timer_head = { &timer_head, &timer_head, ~0, 0, NULL };
305 #define SLOW_BUT_DEBUGGING_TIMERS 1
306
307 void add_timer(struct timer_list * timer)
308 {
309 unsigned long flags;
310 struct timer_list *p;
311
312 #if SLOW_BUT_DEBUGGING_TIMERS
313 if (timer->next || timer->prev) {
314 printk("add_timer() called with non-zero list from %p\n",
315 __builtin_return_address(0));
316 return;
317 }
318 #endif
319 p = &timer_head;
320 timer->expires += jiffies;
321 save_flags(flags);
322 cli();
323 do {
324 p = p->next;
325 } while (timer->expires > p->expires);
326 timer->next = p;
327 timer->prev = p->prev;
328 p->prev = timer;
329 timer->prev->next = timer;
330 restore_flags(flags);
331 }
332
333 int del_timer(struct timer_list * timer)
334 {
335 unsigned long flags;
336 #if SLOW_BUT_DEBUGGING_TIMERS
337 struct timer_list * p;
338
339 p = &timer_head;
340 save_flags(flags);
341 cli();
342 while ((p = p->next) != &timer_head) {
343 if (p == timer) {
344 timer->next->prev = timer->prev;
345 timer->prev->next = timer->next;
346 timer->next = timer->prev = NULL;
347 restore_flags(flags);
348 timer->expires -= jiffies;
349 return 1;
350 }
351 }
352 if (timer->next || timer->prev)
353 printk("del_timer() called from %p with timer not initialized\n",
354 __builtin_return_address(0));
355 restore_flags(flags);
356 return 0;
357 #else
358 save_flags(flags);
359 cli();
360 if (timer->next) {
361 timer->next->prev = timer->prev;
362 timer->prev->next = timer->next;
363 timer->next = timer->prev = NULL;
364 restore_flags(flags);
365 timer->expires -= jiffies;
366 return 1;
367 }
368 restore_flags(flags);
369 return 0;
370 #endif
371 }
372
373 unsigned long timer_active = 0;
374 struct timer_struct timer_table[32];
375
376
377
378
379
380
381
382 unsigned long avenrun[3] = { 0,0,0 };
383
384
385
386
387 static unsigned long count_active_tasks(void)
388 {
389 struct task_struct **p;
390 unsigned long nr = 0;
391
392 for(p = &LAST_TASK; p > &FIRST_TASK; --p)
393 if (*p && ((*p)->state == TASK_RUNNING ||
394 (*p)->state == TASK_UNINTERRUPTIBLE ||
395 (*p)->state == TASK_SWAPPING))
396 nr += FIXED_1;
397 return nr;
398 }
399
400 static inline void calc_load(void)
401 {
402 unsigned long active_tasks;
403 static int count = LOAD_FREQ;
404
405 if (count-- > 0)
406 return;
407 count = LOAD_FREQ;
408 active_tasks = count_active_tasks();
409 CALC_LOAD(avenrun[0], EXP_1, active_tasks);
410 CALC_LOAD(avenrun[1], EXP_5, active_tasks);
411 CALC_LOAD(avenrun[2], EXP_15, active_tasks);
412 }
413
414
415
416
417
418
419
420
421
422
423
424 static void second_overflow(void)
425 {
426 long ltemp;
427
428 static long last_rtc_update=0;
429 extern int set_rtc_mmss(unsigned long);
430
431
432 time_maxerror = (0x70000000-time_maxerror < time_tolerance) ?
433 0x70000000 : (time_maxerror + time_tolerance);
434
435
436 if (time_offset < 0) {
437 ltemp = (-(time_offset+1) >> (SHIFT_KG + time_constant)) + 1;
438 time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
439 time_offset += (time_adj * HZ) >> (SHIFT_SCALE - SHIFT_UPDATE);
440 time_adj = - time_adj;
441 } else if (time_offset > 0) {
442 ltemp = ((time_offset-1) >> (SHIFT_KG + time_constant)) + 1;
443 time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
444 time_offset -= (time_adj * HZ) >> (SHIFT_SCALE - SHIFT_UPDATE);
445 } else {
446 time_adj = 0;
447 }
448
449 time_adj += (time_freq >> (SHIFT_KF + SHIFT_HZ - SHIFT_SCALE))
450 + FINETUNE;
451
452
453 switch (time_status) {
454 case TIME_INS:
455
456 if (xtime.tv_sec % 86400 == 0) {
457 xtime.tv_sec--;
458 time_status = TIME_OOP;
459 printk("Clock: inserting leap second 23:59:60 GMT\n");
460 }
461 break;
462
463 case TIME_DEL:
464
465 if (xtime.tv_sec % 86400 == 86399) {
466 xtime.tv_sec++;
467 time_status = TIME_OK;
468 printk("Clock: deleting leap second 23:59:59 GMT\n");
469 }
470 break;
471
472 case TIME_OOP:
473 time_status = TIME_OK;
474 break;
475 }
476 if (time_status != TIME_BAD && xtime.tv_sec > last_rtc_update + 660)
477 if (set_rtc_mmss(xtime.tv_sec) == 0)
478 last_rtc_update = xtime.tv_sec;
479 else
480 last_rtc_update = xtime.tv_sec - 600;
481 }
482
483
484
485
486 static void timer_bh(void * unused)
487 {
488 unsigned long mask;
489 struct timer_struct *tp;
490 struct timer_list * timer;
491
492 cli();
493 while ((timer = timer_head.next) != &timer_head && timer->expires < jiffies) {
494 void (*fn)(unsigned long) = timer->function;
495 unsigned long data = timer->data;
496 timer->next->prev = timer->prev;
497 timer->prev->next = timer->next;
498 timer->next = timer->prev = NULL;
499 sti();
500 fn(data);
501 cli();
502 }
503 sti();
504
505 for (mask = 1, tp = timer_table+0 ; mask ; tp++,mask += mask) {
506 if (mask > timer_active)
507 break;
508 if (!(mask & timer_active))
509 continue;
510 if (tp->expires > jiffies)
511 continue;
512 timer_active &= ~mask;
513 tp->fn();
514 sti();
515 }
516 }
517
518 void tqueue_bh(void * unused)
519 {
520 run_task_queue(&tq_timer);
521 }
522
523 void immediate_bh(void * unused)
524 {
525 run_task_queue(&tq_immediate);
526 }
527
528
529
530
531
532
533
534 static void do_timer(int irq, struct pt_regs * regs)
535 {
536 unsigned long mask;
537 struct timer_struct *tp;
538
539 long ltemp, psecs;
540
541
542
543
544 time_phase += time_adj;
545 if (time_phase < -FINEUSEC) {
546 ltemp = -time_phase >> SHIFT_SCALE;
547 time_phase += ltemp << SHIFT_SCALE;
548 xtime.tv_usec += tick + time_adjust_step - ltemp;
549 }
550 else if (time_phase > FINEUSEC) {
551 ltemp = time_phase >> SHIFT_SCALE;
552 time_phase -= ltemp << SHIFT_SCALE;
553 xtime.tv_usec += tick + time_adjust_step + ltemp;
554 } else
555 xtime.tv_usec += tick + time_adjust_step;
556
557 if (time_adjust)
558 {
559
560
561
562
563
564
565
566
567
568 if (time_adjust > tickadj)
569 time_adjust_step = tickadj;
570 else if (time_adjust < -tickadj)
571 time_adjust_step = -tickadj;
572 else
573 time_adjust_step = time_adjust;
574
575
576 time_adjust -= time_adjust_step;
577 }
578 else
579 time_adjust_step = 0;
580
581 if (xtime.tv_usec >= 1000000) {
582 xtime.tv_usec -= 1000000;
583 xtime.tv_sec++;
584 second_overflow();
585 }
586
587 jiffies++;
588 calc_load();
589 if (user_mode(regs)) {
590 current->utime++;
591 if (current != task[0]) {
592 if (current->priority < 15)
593 kstat.cpu_nice++;
594 else
595 kstat.cpu_user++;
596 }
597
598 if (current->it_virt_value && !(--current->it_virt_value)) {
599 current->it_virt_value = current->it_virt_incr;
600 send_sig(SIGVTALRM,current,1);
601 }
602 } else {
603 current->stime++;
604 if(current != task[0])
605 kstat.cpu_system++;
606 #ifdef CONFIG_PROFILE
607 if (prof_buffer && current != task[0]) {
608 unsigned long eip = regs->eip;
609 eip >>= CONFIG_PROFILE_SHIFT;
610 if (eip < prof_len)
611 prof_buffer[eip]++;
612 }
613 #endif
614 }
615
616
617
618 if ((current->rlim[RLIMIT_CPU].rlim_max != RLIM_INFINITY) &&
619 (((current->stime + current->utime) / HZ) >= current->rlim[RLIMIT_CPU].rlim_max))
620 send_sig(SIGKILL, current, 1);
621 if ((current->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) &&
622 (((current->stime + current->utime) % HZ) == 0)) {
623 psecs = (current->stime + current->utime) / HZ;
624
625 if (psecs == current->rlim[RLIMIT_CPU].rlim_cur)
626 send_sig(SIGXCPU, current, 1);
627
628 else if ((psecs > current->rlim[RLIMIT_CPU].rlim_cur) &&
629 ((psecs - current->rlim[RLIMIT_CPU].rlim_cur) % 5) == 0)
630 send_sig(SIGXCPU, current, 1);
631 }
632
633 if (current != task[0] && 0 > --current->counter) {
634 current->counter = 0;
635 need_resched = 1;
636 }
637
638 if (current->it_prof_value && !(--current->it_prof_value)) {
639 current->it_prof_value = current->it_prof_incr;
640 send_sig(SIGPROF,current,1);
641 }
642 for (mask = 1, tp = timer_table+0 ; mask ; tp++,mask += mask) {
643 if (mask > timer_active)
644 break;
645 if (!(mask & timer_active))
646 continue;
647 if (tp->expires > jiffies)
648 continue;
649 mark_bh(TIMER_BH);
650 }
651 cli();
652 itimer_ticks++;
653 if (itimer_ticks > itimer_next)
654 need_resched = 1;
655 if (timer_head.next->expires < jiffies)
656 mark_bh(TIMER_BH);
657 if (tq_timer != &tq_last)
658 mark_bh(TQUEUE_BH);
659 sti();
660 }
661
662 asmlinkage int sys_alarm(long seconds)
663 {
664 struct itimerval it_new, it_old;
665
666 it_new.it_interval.tv_sec = it_new.it_interval.tv_usec = 0;
667 it_new.it_value.tv_sec = seconds;
668 it_new.it_value.tv_usec = 0;
669 _setitimer(ITIMER_REAL, &it_new, &it_old);
670 return(it_old.it_value.tv_sec + (it_old.it_value.tv_usec / 1000000));
671 }
672
673 asmlinkage int sys_getpid(void)
674 {
675 return current->pid;
676 }
677
678 asmlinkage int sys_getppid(void)
679 {
680 return current->p_opptr->pid;
681 }
682
683 asmlinkage int sys_getuid(void)
684 {
685 return current->uid;
686 }
687
688 asmlinkage int sys_geteuid(void)
689 {
690 return current->euid;
691 }
692
693 asmlinkage int sys_getgid(void)
694 {
695 return current->gid;
696 }
697
698 asmlinkage int sys_getegid(void)
699 {
700 return current->egid;
701 }
702
703 asmlinkage int sys_nice(long increment)
704 {
705 int newprio;
706
707 if (increment < 0 && !suser())
708 return -EPERM;
709 newprio = current->priority - increment;
710 if (newprio < 1)
711 newprio = 1;
712 if (newprio > 35)
713 newprio = 35;
714 current->priority = newprio;
715 return 0;
716 }
717
718 static void show_task(int nr,struct task_struct * p)
719 {
720 unsigned long free;
721 static char * stat_nam[] = { "R", "S", "D", "Z", "T", "W" };
722
723 printk("%-8s %3d ", p->comm, (p == current) ? -nr : nr);
724 if (((unsigned) p->state) < sizeof(stat_nam)/sizeof(char *))
725 printk(stat_nam[p->state]);
726 else
727 printk(" ");
728 #ifdef __i386__
729 if (p == current)
730 printk(" current ");
731 else
732 printk(" %08lX ", ((unsigned long *)p->tss.esp)[3]);
733 #endif
734 for (free = 1; free < 1024 ; free++) {
735 if (((unsigned long *)p->kernel_stack_page)[free])
736 break;
737 }
738 printk("%5lu %5d %6d ", free << 2, p->pid, p->p_pptr->pid);
739 if (p->p_cptr)
740 printk("%5d ", p->p_cptr->pid);
741 else
742 printk(" ");
743 if (p->p_ysptr)
744 printk("%7d", p->p_ysptr->pid);
745 else
746 printk(" ");
747 if (p->p_osptr)
748 printk(" %5d\n", p->p_osptr->pid);
749 else
750 printk("\n");
751 }
752
753 void show_state(void)
754 {
755 int i;
756
757 printk(" free sibling\n");
758 printk(" task PC stack pid father child younger older\n");
759 for (i=0 ; i<NR_TASKS ; i++)
760 if (task[i])
761 show_task(i,task[i]);
762 }
763
764 void sched_init(void)
765 {
766 bh_base[TIMER_BH].routine = timer_bh;
767 bh_base[TQUEUE_BH].routine = tqueue_bh;
768 bh_base[IMMEDIATE_BH].routine = immediate_bh;
769 if (request_irq(TIMER_IRQ, do_timer, 0, "timer") != 0)
770 panic("Could not allocate timer IRQ!");
771 }