This source file includes following definitions.
- add_to_runqueue
- del_from_runqueue
- wake_up_process
- process_timeout
- schedule
- sys_pause
- wake_up
- wake_up_interruptible
- __down
- __sleep_on
- interruptible_sleep_on
- sleep_on
- add_timer
- del_timer
- count_active_tasks
- calc_load
- second_overflow
- timer_bh
- tqueue_bh
- immediate_bh
- do_timer
- sys_alarm
- sys_getpid
- sys_getppid
- sys_getuid
- sys_geteuid
- sys_getgid
- sys_getegid
- sys_nice
- show_task
- show_state
- sched_init
1
2
3
4
5
6
7
8
9
10
11
12
13
14 #include <linux/config.h>
15 #include <linux/signal.h>
16 #include <linux/sched.h>
17 #include <linux/timer.h>
18 #include <linux/kernel.h>
19 #include <linux/kernel_stat.h>
20 #include <linux/fdreg.h>
21 #include <linux/errno.h>
22 #include <linux/time.h>
23 #include <linux/ptrace.h>
24 #include <linux/delay.h>
25 #include <linux/interrupt.h>
26 #include <linux/tqueue.h>
27 #include <linux/resource.h>
28 #include <linux/mm.h>
29
30 #include <asm/system.h>
31 #include <asm/io.h>
32 #include <asm/segment.h>
33 #include <asm/pgtable.h>
34
35 #define TIMER_IRQ 0
36
37 #include <linux/timex.h>
38
39
40
41
42 long tick = 1000000 / HZ;
43 volatile struct timeval xtime;
44 int tickadj = 500/HZ;
45
46 DECLARE_TASK_QUEUE(tq_timer);
47 DECLARE_TASK_QUEUE(tq_immediate);
48 DECLARE_TASK_QUEUE(tq_scheduler);
49
50
51
52
53 int time_status = TIME_BAD;
54 long time_offset = 0;
55 long time_constant = 0;
56 long time_tolerance = MAXFREQ;
57 long time_precision = 1;
58 long time_maxerror = 0x70000000;
59 long time_esterror = 0x70000000;
60 long time_phase = 0;
61 long time_freq = 0;
62 long time_adj = 0;
63 long time_reftime = 0;
64
65 long time_adjust = 0;
66 long time_adjust_step = 0;
67
68 int need_resched = 0;
69 unsigned long event = 0;
70
71 extern int _setitimer(int, struct itimerval *, struct itimerval *);
72 unsigned long * prof_buffer = NULL;
73 unsigned long prof_len = 0;
74
75 #define _S(nr) (1<<((nr)-1))
76
77 extern void mem_use(void);
78
79 extern int timer_interrupt(void);
80
81 static unsigned long init_kernel_stack[1024] = { STACK_MAGIC, };
82 unsigned long init_user_stack[1024] = { STACK_MAGIC, };
83 static struct vm_area_struct init_mmap = INIT_MMAP;
84 static struct mm_struct init_mm = INIT_MM;
85 static struct fs_struct init_fs = INIT_FS;
86 static struct files_struct init_files = INIT_FILES;
87 static struct sigaction init_sigaction[32] = { {0,}, };
88 struct task_struct init_task = INIT_TASK;
89
90 unsigned long volatile jiffies=0;
91
92 struct task_struct *current = &init_task;
93 struct task_struct *last_task_used_math = NULL;
94
95 struct task_struct * task[NR_TASKS] = {&init_task, };
96
97 struct kernel_stat kstat = { 0 };
98
99 static inline void add_to_runqueue(struct task_struct * p)
100 {
101 #if 1
102 if (p->next_run || p->prev_run) {
103 printk("task already on run-queue\n");
104 return;
105 }
106 #endif
107 if (p->counter > current->counter + 3)
108 need_resched = 1;
109 nr_running++;
110 (p->next_run = init_task.next_run)->prev_run = p;
111 p->prev_run = &init_task;
112 init_task.next_run = p;
113 }
114
115 static inline void del_from_runqueue(struct task_struct * p)
116 {
117 struct task_struct *next = p->next_run;
118 struct task_struct *prev = p->prev_run;
119
120 #if 1
121 if (!next || !prev) {
122 printk("task not on run-queue\n");
123 return;
124 }
125 #endif
126 if (p == &init_task) {
127 static int nr = 0;
128 if (nr < 5) {
129 nr++;
130 printk("idle task may not sleep\n");
131 }
132 return;
133 }
134 nr_running--;
135 next->prev_run = prev;
136 prev->next_run = next;
137 p->next_run = NULL;
138 p->prev_run = NULL;
139 }
140
141
142
143
144
145
146
147
148
149 inline void wake_up_process(struct task_struct * p)
150 {
151 unsigned long flags;
152
153 save_flags(flags);
154 cli();
155 p->state = TASK_RUNNING;
156 if (!p->next_run)
157 add_to_runqueue(p);
158 restore_flags(flags);
159 }
160
161 static void process_timeout(unsigned long __data)
162 {
163 struct task_struct * p = (struct task_struct *) __data;
164
165 p->timeout = 0;
166 wake_up_process(p);
167 }
168
169
170
171
172
173
174
175
176
177
178
179 asmlinkage void schedule(void)
180 {
181 int c;
182 struct task_struct * p;
183 struct task_struct * next;
184 unsigned long timeout = 0;
185
186
187
188 if (intr_count) {
189 printk("Aiee: scheduling in interrupt\n");
190 return;
191 }
192 run_task_queue(&tq_scheduler);
193
194 need_resched = 0;
195 cli();
196 switch (current->state) {
197 case TASK_INTERRUPTIBLE:
198 if (current->signal & ~current->blocked)
199 goto makerunnable;
200 timeout = current->timeout;
201 if (timeout && (timeout <= jiffies)) {
202 current->timeout = 0;
203 timeout = 0;
204 makerunnable:
205 current->state = TASK_RUNNING;
206 break;
207 }
208 default:
209 del_from_runqueue(current);
210 case TASK_RUNNING:
211 }
212 p = init_task.next_run;
213 sti();
214
215
216
217
218
219
220
221 c = -1000;
222 next = &init_task;
223 while (p != &init_task) {
224 if (p->counter > c)
225 c = p->counter, next = p;
226 p = p->next_run;
227 }
228
229
230 if (!c) {
231 for_each_task(p)
232 p->counter = (p->counter >> 1) + p->priority;
233 }
234 if (current != next) {
235 struct timer_list timer;
236
237 kstat.context_swtch++;
238 if (timeout) {
239 init_timer(&timer);
240 timer.expires = timeout;
241 timer.data = (unsigned long) current;
242 timer.function = process_timeout;
243 add_timer(&timer);
244 }
245 switch_to(next);
246 if (timeout)
247 del_timer(&timer);
248 }
249 }
250
251 asmlinkage int sys_pause(void)
252 {
253 current->state = TASK_INTERRUPTIBLE;
254 schedule();
255 return -ERESTARTNOHAND;
256 }
257
258
259
260
261
262
263
264
265
266 void wake_up(struct wait_queue **q)
267 {
268 struct wait_queue *tmp;
269 struct task_struct * p;
270
271 if (!q || !(tmp = *q))
272 return;
273 do {
274 if ((p = tmp->task) != NULL) {
275 if ((p->state == TASK_UNINTERRUPTIBLE) ||
276 (p->state == TASK_INTERRUPTIBLE))
277 wake_up_process(p);
278 }
279 if (!tmp->next) {
280 printk("wait_queue is bad (eip = %p)\n",
281 __builtin_return_address(0));
282 printk(" q = %p\n",q);
283 printk(" *q = %p\n",*q);
284 printk(" tmp = %p\n",tmp);
285 break;
286 }
287 tmp = tmp->next;
288 } while (tmp != *q);
289 }
290
291 void wake_up_interruptible(struct wait_queue **q)
292 {
293 struct wait_queue *tmp;
294 struct task_struct * p;
295
296 if (!q || !(tmp = *q))
297 return;
298 do {
299 if ((p = tmp->task) != NULL) {
300 if (p->state == TASK_INTERRUPTIBLE)
301 wake_up_process(p);
302 }
303 if (!tmp->next) {
304 printk("wait_queue is bad (eip = %p)\n",
305 __builtin_return_address(0));
306 printk(" q = %p\n",q);
307 printk(" *q = %p\n",*q);
308 printk(" tmp = %p\n",tmp);
309 break;
310 }
311 tmp = tmp->next;
312 } while (tmp != *q);
313 }
314
315 void __down(struct semaphore * sem)
316 {
317 struct wait_queue wait = { current, NULL };
318 add_wait_queue(&sem->wait, &wait);
319 current->state = TASK_UNINTERRUPTIBLE;
320 while (sem->count <= 0) {
321 schedule();
322 current->state = TASK_UNINTERRUPTIBLE;
323 }
324 current->state = TASK_RUNNING;
325 remove_wait_queue(&sem->wait, &wait);
326 }
327
328 static inline void __sleep_on(struct wait_queue **p, int state)
329 {
330 unsigned long flags;
331 struct wait_queue wait = { current, NULL };
332
333 if (!p)
334 return;
335 if (current == task[0])
336 panic("task[0] trying to sleep");
337 current->state = state;
338 add_wait_queue(p, &wait);
339 save_flags(flags);
340 sti();
341 schedule();
342 remove_wait_queue(p, &wait);
343 restore_flags(flags);
344 }
345
346 void interruptible_sleep_on(struct wait_queue **p)
347 {
348 __sleep_on(p,TASK_INTERRUPTIBLE);
349 }
350
351 void sleep_on(struct wait_queue **p)
352 {
353 __sleep_on(p,TASK_UNINTERRUPTIBLE);
354 }
355
356
357
358
359
360 static struct timer_list timer_head = { &timer_head, &timer_head, ~0, 0, NULL };
361 #define SLOW_BUT_DEBUGGING_TIMERS 1
362
363 void add_timer(struct timer_list * timer)
364 {
365 unsigned long flags;
366 struct timer_list *p;
367
368 #if SLOW_BUT_DEBUGGING_TIMERS
369 if (timer->next || timer->prev) {
370 printk("add_timer() called with non-zero list from %p\n",
371 __builtin_return_address(0));
372 return;
373 }
374 #endif
375 p = &timer_head;
376 save_flags(flags);
377 cli();
378 do {
379 p = p->next;
380 } while (timer->expires > p->expires);
381 timer->next = p;
382 timer->prev = p->prev;
383 p->prev = timer;
384 timer->prev->next = timer;
385 restore_flags(flags);
386 }
387
388 int del_timer(struct timer_list * timer)
389 {
390 unsigned long flags;
391 #if SLOW_BUT_DEBUGGING_TIMERS
392 struct timer_list * p;
393
394 p = &timer_head;
395 save_flags(flags);
396 cli();
397 while ((p = p->next) != &timer_head) {
398 if (p == timer) {
399 timer->next->prev = timer->prev;
400 timer->prev->next = timer->next;
401 timer->next = timer->prev = NULL;
402 restore_flags(flags);
403 return 1;
404 }
405 }
406 if (timer->next || timer->prev)
407 printk("del_timer() called from %p with timer not initialized\n",
408 __builtin_return_address(0));
409 restore_flags(flags);
410 return 0;
411 #else
412 save_flags(flags);
413 cli();
414 if (timer->next) {
415 timer->next->prev = timer->prev;
416 timer->prev->next = timer->next;
417 timer->next = timer->prev = NULL;
418 restore_flags(flags);
419 return 1;
420 }
421 restore_flags(flags);
422 return 0;
423 #endif
424 }
425
426 unsigned long timer_active = 0;
427 struct timer_struct timer_table[32];
428
429
430
431
432
433
434
435 unsigned long avenrun[3] = { 0,0,0 };
436
437
438
439
440 static unsigned long count_active_tasks(void)
441 {
442 struct task_struct **p;
443 unsigned long nr = 0;
444
445 for(p = &LAST_TASK; p > &FIRST_TASK; --p)
446 if (*p && ((*p)->state == TASK_RUNNING ||
447 (*p)->state == TASK_UNINTERRUPTIBLE ||
448 (*p)->state == TASK_SWAPPING))
449 nr += FIXED_1;
450 return nr;
451 }
452
453 static inline void calc_load(void)
454 {
455 unsigned long active_tasks;
456 static int count = LOAD_FREQ;
457
458 if (count-- > 0)
459 return;
460 count = LOAD_FREQ;
461 active_tasks = count_active_tasks();
462 CALC_LOAD(avenrun[0], EXP_1, active_tasks);
463 CALC_LOAD(avenrun[1], EXP_5, active_tasks);
464 CALC_LOAD(avenrun[2], EXP_15, active_tasks);
465 }
466
467
468
469
470
471
472
473
474
475
476
477 static void second_overflow(void)
478 {
479 long ltemp;
480
481
482 time_maxerror = (0x70000000-time_maxerror < time_tolerance) ?
483 0x70000000 : (time_maxerror + time_tolerance);
484
485
486 if (time_offset < 0) {
487 ltemp = (-(time_offset+1) >> (SHIFT_KG + time_constant)) + 1;
488 time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
489 time_offset += (time_adj * HZ) >> (SHIFT_SCALE - SHIFT_UPDATE);
490 time_adj = - time_adj;
491 } else if (time_offset > 0) {
492 ltemp = ((time_offset-1) >> (SHIFT_KG + time_constant)) + 1;
493 time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
494 time_offset -= (time_adj * HZ) >> (SHIFT_SCALE - SHIFT_UPDATE);
495 } else {
496 time_adj = 0;
497 }
498
499 time_adj += (time_freq >> (SHIFT_KF + SHIFT_HZ - SHIFT_SCALE))
500 + FINETUNE;
501
502
503 switch (time_status) {
504 case TIME_INS:
505
506 if (xtime.tv_sec % 86400 == 0) {
507 xtime.tv_sec--;
508 time_status = TIME_OOP;
509 printk("Clock: inserting leap second 23:59:60 UTC\n");
510 }
511 break;
512
513 case TIME_DEL:
514
515 if (xtime.tv_sec % 86400 == 86399) {
516 xtime.tv_sec++;
517 time_status = TIME_OK;
518 printk("Clock: deleting leap second 23:59:59 UTC\n");
519 }
520 break;
521
522 case TIME_OOP:
523 time_status = TIME_OK;
524 break;
525 }
526 }
527
528
529
530
531 static void timer_bh(void * unused)
532 {
533 unsigned long mask;
534 struct timer_struct *tp;
535 struct timer_list * timer;
536
537 cli();
538 while ((timer = timer_head.next) != &timer_head && timer->expires < jiffies) {
539 void (*fn)(unsigned long) = timer->function;
540 unsigned long data = timer->data;
541 timer->next->prev = timer->prev;
542 timer->prev->next = timer->next;
543 timer->next = timer->prev = NULL;
544 sti();
545 fn(data);
546 cli();
547 }
548 sti();
549
550 for (mask = 1, tp = timer_table+0 ; mask ; tp++,mask += mask) {
551 if (mask > timer_active)
552 break;
553 if (!(mask & timer_active))
554 continue;
555 if (tp->expires > jiffies)
556 continue;
557 timer_active &= ~mask;
558 tp->fn();
559 sti();
560 }
561 }
562
563 void tqueue_bh(void * unused)
564 {
565 run_task_queue(&tq_timer);
566 }
567
568 void immediate_bh(void * unused)
569 {
570 run_task_queue(&tq_immediate);
571 }
572
573
574
575
576
577
578
579 static void do_timer(int irq, struct pt_regs * regs)
580 {
581 unsigned long mask;
582 struct timer_struct *tp;
583
584 static long last_rtc_update=0;
585 extern int set_rtc_mmss(unsigned long);
586
587 long ltemp, psecs;
588
589
590
591
592 time_phase += time_adj;
593 if (time_phase < -FINEUSEC) {
594 ltemp = -time_phase >> SHIFT_SCALE;
595 time_phase += ltemp << SHIFT_SCALE;
596 xtime.tv_usec += tick + time_adjust_step - ltemp;
597 }
598 else if (time_phase > FINEUSEC) {
599 ltemp = time_phase >> SHIFT_SCALE;
600 time_phase -= ltemp << SHIFT_SCALE;
601 xtime.tv_usec += tick + time_adjust_step + ltemp;
602 } else
603 xtime.tv_usec += tick + time_adjust_step;
604
605 if (time_adjust)
606 {
607
608
609
610
611
612
613
614
615
616 if (time_adjust > tickadj)
617 time_adjust_step = tickadj;
618 else if (time_adjust < -tickadj)
619 time_adjust_step = -tickadj;
620 else
621 time_adjust_step = time_adjust;
622
623
624 time_adjust -= time_adjust_step;
625 }
626 else
627 time_adjust_step = 0;
628
629 if (xtime.tv_usec >= 1000000) {
630 xtime.tv_usec -= 1000000;
631 xtime.tv_sec++;
632 second_overflow();
633 }
634
635
636
637
638
639 if (time_status != TIME_BAD && xtime.tv_sec > last_rtc_update + 660 &&
640 xtime.tv_usec > 500000 - (tick >> 1) &&
641 xtime.tv_usec < 500000 + (tick >> 1))
642 if (set_rtc_mmss(xtime.tv_sec) == 0)
643 last_rtc_update = xtime.tv_sec;
644 else
645 last_rtc_update = xtime.tv_sec - 600;
646
647 jiffies++;
648 calc_load();
649 if (user_mode(regs)) {
650 current->utime++;
651 if (current != task[0]) {
652 if (current->priority < 15)
653 kstat.cpu_nice++;
654 else
655 kstat.cpu_user++;
656 }
657
658 if (current->it_virt_value && !(--current->it_virt_value)) {
659 current->it_virt_value = current->it_virt_incr;
660 send_sig(SIGVTALRM,current,1);
661 }
662 } else {
663 current->stime++;
664 if(current != task[0])
665 kstat.cpu_system++;
666 #ifdef CONFIG_PROFILE
667 if (prof_buffer && current != task[0]) {
668 extern int _stext;
669 unsigned long eip = regs->eip - (unsigned long) &_stext;
670 eip >>= CONFIG_PROFILE_SHIFT;
671 if (eip < prof_len)
672 prof_buffer[eip]++;
673 }
674 #endif
675 }
676
677
678
679 if ((current->rlim[RLIMIT_CPU].rlim_max != RLIM_INFINITY) &&
680 (((current->stime + current->utime) / HZ) >= current->rlim[RLIMIT_CPU].rlim_max))
681 send_sig(SIGKILL, current, 1);
682 if ((current->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) &&
683 (((current->stime + current->utime) % HZ) == 0)) {
684 psecs = (current->stime + current->utime) / HZ;
685
686 if (psecs == current->rlim[RLIMIT_CPU].rlim_cur)
687 send_sig(SIGXCPU, current, 1);
688
689 else if ((psecs > current->rlim[RLIMIT_CPU].rlim_cur) &&
690 ((psecs - current->rlim[RLIMIT_CPU].rlim_cur) % 5) == 0)
691 send_sig(SIGXCPU, current, 1);
692 }
693
694 if (current != task[0] && 0 > --current->counter) {
695 current->counter = 0;
696 need_resched = 1;
697 }
698
699 if (current->it_prof_value && !(--current->it_prof_value)) {
700 current->it_prof_value = current->it_prof_incr;
701 send_sig(SIGPROF,current,1);
702 }
703 for (mask = 1, tp = timer_table+0 ; mask ; tp++,mask += mask) {
704 if (mask > timer_active)
705 break;
706 if (!(mask & timer_active))
707 continue;
708 if (tp->expires > jiffies)
709 continue;
710 mark_bh(TIMER_BH);
711 }
712 cli();
713 if (timer_head.next->expires < jiffies)
714 mark_bh(TIMER_BH);
715 if (tq_timer != &tq_last)
716 mark_bh(TQUEUE_BH);
717 sti();
718 }
719
720 asmlinkage unsigned int sys_alarm(unsigned int seconds)
721 {
722 struct itimerval it_new, it_old;
723 unsigned int oldalarm;
724
725 it_new.it_interval.tv_sec = it_new.it_interval.tv_usec = 0;
726 it_new.it_value.tv_sec = seconds;
727 it_new.it_value.tv_usec = 0;
728 _setitimer(ITIMER_REAL, &it_new, &it_old);
729 oldalarm = it_old.it_value.tv_sec;
730
731
732 if (it_old.it_value.tv_usec)
733 oldalarm++;
734 return oldalarm;
735 }
736
737 asmlinkage int sys_getpid(void)
738 {
739 return current->pid;
740 }
741
742 asmlinkage int sys_getppid(void)
743 {
744 return current->p_opptr->pid;
745 }
746
747 asmlinkage int sys_getuid(void)
748 {
749 return current->uid;
750 }
751
752 asmlinkage int sys_geteuid(void)
753 {
754 return current->euid;
755 }
756
757 asmlinkage int sys_getgid(void)
758 {
759 return current->gid;
760 }
761
762 asmlinkage int sys_getegid(void)
763 {
764 return current->egid;
765 }
766
767 asmlinkage int sys_nice(long increment)
768 {
769 int newprio;
770
771 if (increment < 0 && !suser())
772 return -EPERM;
773 newprio = current->priority - increment;
774 if (newprio < 1)
775 newprio = 1;
776 if (newprio > 35)
777 newprio = 35;
778 current->priority = newprio;
779 return 0;
780 }
781
782 static void show_task(int nr,struct task_struct * p)
783 {
784 unsigned long free;
785 static const char * stat_nam[] = { "R", "S", "D", "Z", "T", "W" };
786
787 printk("%-8s %3d ", p->comm, (p == current) ? -nr : nr);
788 if (((unsigned) p->state) < sizeof(stat_nam)/sizeof(char *))
789 printk(stat_nam[p->state]);
790 else
791 printk(" ");
792 #if ((~0UL) == 0xffffffff)
793 if (p == current)
794 printk(" current ");
795 else
796 printk(" %08lX ", thread_saved_pc(&p->tss));
797 #else
798 if (p == current)
799 printk(" current task ");
800 else
801 printk(" %016lx ", thread_saved_pc(&p->tss));
802 #endif
803 for (free = 1; free < PAGE_SIZE/sizeof(long) ; free++) {
804 if (((unsigned long *)p->kernel_stack_page)[free])
805 break;
806 }
807 printk("%5lu %5d %6d ", free*sizeof(long), p->pid, p->p_pptr->pid);
808 if (p->p_cptr)
809 printk("%5d ", p->p_cptr->pid);
810 else
811 printk(" ");
812 if (p->p_ysptr)
813 printk("%7d", p->p_ysptr->pid);
814 else
815 printk(" ");
816 if (p->p_osptr)
817 printk(" %5d\n", p->p_osptr->pid);
818 else
819 printk("\n");
820 }
821
822 void show_state(void)
823 {
824 int i;
825
826 #if ((~0UL) == 0xffffffff)
827 printk("\n"
828 " free sibling\n");
829 printk(" task PC stack pid father child younger older\n");
830 #else
831 printk("\n"
832 " free sibling\n");
833 printk(" task PC stack pid father child younger older\n");
834 #endif
835 for (i=0 ; i<NR_TASKS ; i++)
836 if (task[i])
837 show_task(i,task[i]);
838 }
839
840 void sched_init(void)
841 {
842 bh_base[TIMER_BH].routine = timer_bh;
843 bh_base[TQUEUE_BH].routine = tqueue_bh;
844 bh_base[IMMEDIATE_BH].routine = immediate_bh;
845 if (request_irq(TIMER_IRQ, do_timer, 0, "timer") != 0)
846 panic("Could not allocate timer IRQ!");
847 enable_bh(TIMER_BH);
848 enable_bh(TQUEUE_BH);
849 enable_bh(IMMEDIATE_BH);
850 }