This source file includes following definitions.
- add_to_runqueue
- del_from_runqueue
- wake_up_process
- process_timeout
- schedule
- sys_pause
- wake_up
- wake_up_interruptible
- __down
- __sleep_on
- interruptible_sleep_on
- sleep_on
- add_timer
- del_timer
- count_active_tasks
- calc_load
- second_overflow
- timer_bh
- tqueue_bh
- immediate_bh
- do_timer
- sys_alarm
- sys_getpid
- sys_getppid
- sys_getuid
- sys_geteuid
- sys_getgid
- sys_getegid
- sys_nice
- show_task
- show_state
- sched_init
1
2
3
4
5
6
7
8
9
10
11
12
13
14 #include <linux/config.h>
15 #include <linux/signal.h>
16 #include <linux/sched.h>
17 #include <linux/timer.h>
18 #include <linux/kernel.h>
19 #include <linux/kernel_stat.h>
20 #include <linux/fdreg.h>
21 #include <linux/errno.h>
22 #include <linux/time.h>
23 #include <linux/ptrace.h>
24 #include <linux/delay.h>
25 #include <linux/interrupt.h>
26 #include <linux/tqueue.h>
27 #include <linux/resource.h>
28 #include <linux/mm.h>
29
30 #include <asm/system.h>
31 #include <asm/io.h>
32 #include <asm/segment.h>
33 #include <asm/pgtable.h>
34
35 #define TIMER_IRQ 0
36
37 #include <linux/timex.h>
38
39
40
41
42 long tick = 1000000 / HZ;
43 volatile struct timeval xtime;
44 int tickadj = 500/HZ;
45
46 DECLARE_TASK_QUEUE(tq_timer);
47 DECLARE_TASK_QUEUE(tq_immediate);
48 DECLARE_TASK_QUEUE(tq_scheduler);
49
50
51
52
53 int time_status = TIME_BAD;
54 long time_offset = 0;
55 long time_constant = 0;
56 long time_tolerance = MAXFREQ;
57 long time_precision = 1;
58 long time_maxerror = 0x70000000;
59 long time_esterror = 0x70000000;
60 long time_phase = 0;
61 long time_freq = 0;
62 long time_adj = 0;
63 long time_reftime = 0;
64
65 long time_adjust = 0;
66 long time_adjust_step = 0;
67
68 int need_resched = 0;
69 unsigned long event = 0;
70
71 extern int _setitimer(int, struct itimerval *, struct itimerval *);
72 unsigned long * prof_buffer = NULL;
73 unsigned long prof_len = 0;
74
75 #define _S(nr) (1<<((nr)-1))
76
77 extern void mem_use(void);
78
79 extern int timer_interrupt(void);
80
81 static unsigned long init_kernel_stack[1024] = { STACK_MAGIC, };
82 unsigned long init_user_stack[1024] = { STACK_MAGIC, };
83 static struct vm_area_struct init_mmap = INIT_MMAP;
84 struct task_struct init_task = INIT_TASK;
85
86 unsigned long volatile jiffies=0;
87
88 struct task_struct *current = &init_task;
89 struct task_struct *last_task_used_math = NULL;
90
91 struct task_struct * task[NR_TASKS] = {&init_task, };
92
93 struct kernel_stat kstat = { 0 };
94
95 static inline void add_to_runqueue(struct task_struct * p)
96 {
97 #if 1
98 if (p->next_run || p->prev_run) {
99 printk("task already on run-queue\n");
100 return;
101 }
102 #endif
103 if (p->counter > current->counter + 3)
104 need_resched = 1;
105 nr_running++;
106 (p->next_run = init_task.next_run)->prev_run = p;
107 p->prev_run = &init_task;
108 init_task.next_run = p;
109 }
110
111 static inline void del_from_runqueue(struct task_struct * p)
112 {
113 struct task_struct *next = p->next_run;
114 struct task_struct *prev = p->prev_run;
115
116 #if 1
117 if (!next || !prev) {
118 printk("task not on run-queue\n");
119 return;
120 }
121 #endif
122 if (p == &init_task) {
123 static int nr = 0;
124 if (nr < 5) {
125 nr++;
126 printk("idle task may not sleep\n");
127 }
128 return;
129 }
130 nr_running--;
131 next->prev_run = prev;
132 prev->next_run = next;
133 p->next_run = NULL;
134 p->prev_run = NULL;
135 }
136
137
138
139
140
141
142
143
144
145 inline void wake_up_process(struct task_struct * p)
146 {
147 unsigned long flags;
148
149 save_flags(flags);
150 cli();
151 p->state = TASK_RUNNING;
152 if (!p->next_run)
153 add_to_runqueue(p);
154 restore_flags(flags);
155 }
156
157 static void process_timeout(unsigned long __data)
158 {
159 struct task_struct * p = (struct task_struct *) __data;
160
161 p->timeout = 0;
162 wake_up_process(p);
163 }
164
165
166
167
168
169
170
171
172
173
174
175 asmlinkage void schedule(void)
176 {
177 int c;
178 struct task_struct * p;
179 struct task_struct * next;
180 unsigned long timeout = 0;
181
182
183
184 if (intr_count) {
185 printk("Aiee: scheduling in interrupt\n");
186 intr_count = 0;
187 }
188 run_task_queue(&tq_scheduler);
189
190 need_resched = 0;
191 cli();
192 switch (current->state) {
193 case TASK_INTERRUPTIBLE:
194 if (current->signal & ~current->blocked)
195 goto makerunnable;
196 timeout = current->timeout;
197 if (timeout && (timeout <= jiffies)) {
198 current->timeout = 0;
199 timeout = 0;
200 makerunnable:
201 current->state = TASK_RUNNING;
202 break;
203 }
204 default:
205 del_from_runqueue(current);
206 case TASK_RUNNING:
207 }
208 p = init_task.next_run;
209 sti();
210
211
212
213
214
215
216
217 c = -1000;
218 next = &init_task;
219 while (p != &init_task) {
220 if (p->counter > c)
221 c = p->counter, next = p;
222 p = p->next_run;
223 }
224
225
226 if (!c) {
227 for_each_task(p)
228 p->counter = (p->counter >> 1) + p->priority;
229 }
230 if (current != next) {
231 struct timer_list timer;
232
233 kstat.context_swtch++;
234 if (timeout) {
235 init_timer(&timer);
236 timer.expires = timeout - jiffies;
237 timer.data = (unsigned long) current;
238 timer.function = process_timeout;
239 add_timer(&timer);
240 }
241 switch_to(next);
242 if (timeout)
243 del_timer(&timer);
244 }
245 }
246
247 asmlinkage int sys_pause(void)
248 {
249 current->state = TASK_INTERRUPTIBLE;
250 schedule();
251 return -ERESTARTNOHAND;
252 }
253
254
255
256
257
258
259
260
261
262 void wake_up(struct wait_queue **q)
263 {
264 struct wait_queue *tmp;
265 struct task_struct * p;
266
267 if (!q || !(tmp = *q))
268 return;
269 do {
270 if ((p = tmp->task) != NULL) {
271 if ((p->state == TASK_UNINTERRUPTIBLE) ||
272 (p->state == TASK_INTERRUPTIBLE))
273 wake_up_process(p);
274 }
275 if (!tmp->next) {
276 printk("wait_queue is bad (eip = %p)\n",
277 __builtin_return_address(0));
278 printk(" q = %p\n",q);
279 printk(" *q = %p\n",*q);
280 printk(" tmp = %p\n",tmp);
281 break;
282 }
283 tmp = tmp->next;
284 } while (tmp != *q);
285 }
286
287 void wake_up_interruptible(struct wait_queue **q)
288 {
289 struct wait_queue *tmp;
290 struct task_struct * p;
291
292 if (!q || !(tmp = *q))
293 return;
294 do {
295 if ((p = tmp->task) != NULL) {
296 if (p->state == TASK_INTERRUPTIBLE)
297 wake_up_process(p);
298 }
299 if (!tmp->next) {
300 printk("wait_queue is bad (eip = %p)\n",
301 __builtin_return_address(0));
302 printk(" q = %p\n",q);
303 printk(" *q = %p\n",*q);
304 printk(" tmp = %p\n",tmp);
305 break;
306 }
307 tmp = tmp->next;
308 } while (tmp != *q);
309 }
310
311 void __down(struct semaphore * sem)
312 {
313 struct wait_queue wait = { current, NULL };
314 add_wait_queue(&sem->wait, &wait);
315 current->state = TASK_UNINTERRUPTIBLE;
316 while (sem->count <= 0) {
317 schedule();
318 current->state = TASK_UNINTERRUPTIBLE;
319 }
320 current->state = TASK_RUNNING;
321 remove_wait_queue(&sem->wait, &wait);
322 }
323
324 static inline void __sleep_on(struct wait_queue **p, int state)
325 {
326 unsigned long flags;
327 struct wait_queue wait = { current, NULL };
328
329 if (!p)
330 return;
331 if (current == task[0])
332 panic("task[0] trying to sleep");
333 current->state = state;
334 add_wait_queue(p, &wait);
335 save_flags(flags);
336 sti();
337 schedule();
338 remove_wait_queue(p, &wait);
339 restore_flags(flags);
340 }
341
342 void interruptible_sleep_on(struct wait_queue **p)
343 {
344 __sleep_on(p,TASK_INTERRUPTIBLE);
345 }
346
347 void sleep_on(struct wait_queue **p)
348 {
349 __sleep_on(p,TASK_UNINTERRUPTIBLE);
350 }
351
352
353
354
355
356 static struct timer_list timer_head = { &timer_head, &timer_head, ~0, 0, NULL };
357 #define SLOW_BUT_DEBUGGING_TIMERS 1
358
359 void add_timer(struct timer_list * timer)
360 {
361 unsigned long flags;
362 struct timer_list *p;
363
364 #if SLOW_BUT_DEBUGGING_TIMERS
365 if (timer->next || timer->prev) {
366 printk("add_timer() called with non-zero list from %p\n",
367 __builtin_return_address(0));
368 return;
369 }
370 #endif
371 p = &timer_head;
372 timer->expires += jiffies;
373 save_flags(flags);
374 cli();
375 do {
376 p = p->next;
377 } while (timer->expires > p->expires);
378 timer->next = p;
379 timer->prev = p->prev;
380 p->prev = timer;
381 timer->prev->next = timer;
382 restore_flags(flags);
383 }
384
385 int del_timer(struct timer_list * timer)
386 {
387 unsigned long flags;
388 #if SLOW_BUT_DEBUGGING_TIMERS
389 struct timer_list * p;
390
391 p = &timer_head;
392 save_flags(flags);
393 cli();
394 while ((p = p->next) != &timer_head) {
395 if (p == timer) {
396 timer->next->prev = timer->prev;
397 timer->prev->next = timer->next;
398 timer->next = timer->prev = NULL;
399 restore_flags(flags);
400 timer->expires -= jiffies;
401 return 1;
402 }
403 }
404 if (timer->next || timer->prev)
405 printk("del_timer() called from %p with timer not initialized\n",
406 __builtin_return_address(0));
407 restore_flags(flags);
408 return 0;
409 #else
410 save_flags(flags);
411 cli();
412 if (timer->next) {
413 timer->next->prev = timer->prev;
414 timer->prev->next = timer->next;
415 timer->next = timer->prev = NULL;
416 restore_flags(flags);
417 timer->expires -= jiffies;
418 return 1;
419 }
420 restore_flags(flags);
421 return 0;
422 #endif
423 }
424
425 unsigned long timer_active = 0;
426 struct timer_struct timer_table[32];
427
428
429
430
431
432
433
434 unsigned long avenrun[3] = { 0,0,0 };
435
436
437
438
439 static unsigned long count_active_tasks(void)
440 {
441 struct task_struct **p;
442 unsigned long nr = 0;
443
444 for(p = &LAST_TASK; p > &FIRST_TASK; --p)
445 if (*p && ((*p)->state == TASK_RUNNING ||
446 (*p)->state == TASK_UNINTERRUPTIBLE ||
447 (*p)->state == TASK_SWAPPING))
448 nr += FIXED_1;
449 return nr;
450 }
451
452 static inline void calc_load(void)
453 {
454 unsigned long active_tasks;
455 static int count = LOAD_FREQ;
456
457 if (count-- > 0)
458 return;
459 count = LOAD_FREQ;
460 active_tasks = count_active_tasks();
461 CALC_LOAD(avenrun[0], EXP_1, active_tasks);
462 CALC_LOAD(avenrun[1], EXP_5, active_tasks);
463 CALC_LOAD(avenrun[2], EXP_15, active_tasks);
464 }
465
466
467
468
469
470
471
472
473
474
475
476 static void second_overflow(void)
477 {
478 long ltemp;
479
480
481 time_maxerror = (0x70000000-time_maxerror < time_tolerance) ?
482 0x70000000 : (time_maxerror + time_tolerance);
483
484
485 if (time_offset < 0) {
486 ltemp = (-(time_offset+1) >> (SHIFT_KG + time_constant)) + 1;
487 time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
488 time_offset += (time_adj * HZ) >> (SHIFT_SCALE - SHIFT_UPDATE);
489 time_adj = - time_adj;
490 } else if (time_offset > 0) {
491 ltemp = ((time_offset-1) >> (SHIFT_KG + time_constant)) + 1;
492 time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
493 time_offset -= (time_adj * HZ) >> (SHIFT_SCALE - SHIFT_UPDATE);
494 } else {
495 time_adj = 0;
496 }
497
498 time_adj += (time_freq >> (SHIFT_KF + SHIFT_HZ - SHIFT_SCALE))
499 + FINETUNE;
500
501
502 switch (time_status) {
503 case TIME_INS:
504
505 if (xtime.tv_sec % 86400 == 0) {
506 xtime.tv_sec--;
507 time_status = TIME_OOP;
508 printk("Clock: inserting leap second 23:59:60 UTC\n");
509 }
510 break;
511
512 case TIME_DEL:
513
514 if (xtime.tv_sec % 86400 == 86399) {
515 xtime.tv_sec++;
516 time_status = TIME_OK;
517 printk("Clock: deleting leap second 23:59:59 UTC\n");
518 }
519 break;
520
521 case TIME_OOP:
522 time_status = TIME_OK;
523 break;
524 }
525 }
526
527
528
529
530 static void timer_bh(void * unused)
531 {
532 unsigned long mask;
533 struct timer_struct *tp;
534 struct timer_list * timer;
535
536 cli();
537 while ((timer = timer_head.next) != &timer_head && timer->expires < jiffies) {
538 void (*fn)(unsigned long) = timer->function;
539 unsigned long data = timer->data;
540 timer->next->prev = timer->prev;
541 timer->prev->next = timer->next;
542 timer->next = timer->prev = NULL;
543 sti();
544 fn(data);
545 cli();
546 }
547 sti();
548
549 for (mask = 1, tp = timer_table+0 ; mask ; tp++,mask += mask) {
550 if (mask > timer_active)
551 break;
552 if (!(mask & timer_active))
553 continue;
554 if (tp->expires > jiffies)
555 continue;
556 timer_active &= ~mask;
557 tp->fn();
558 sti();
559 }
560 }
561
562 void tqueue_bh(void * unused)
563 {
564 run_task_queue(&tq_timer);
565 }
566
567 void immediate_bh(void * unused)
568 {
569 run_task_queue(&tq_immediate);
570 }
571
572
573
574
575
576
577
578 static void do_timer(int irq, struct pt_regs * regs)
579 {
580 unsigned long mask;
581 struct timer_struct *tp;
582
583 static long last_rtc_update=0;
584 extern int set_rtc_mmss(unsigned long);
585
586 long ltemp, psecs;
587
588
589
590
591 time_phase += time_adj;
592 if (time_phase < -FINEUSEC) {
593 ltemp = -time_phase >> SHIFT_SCALE;
594 time_phase += ltemp << SHIFT_SCALE;
595 xtime.tv_usec += tick + time_adjust_step - ltemp;
596 }
597 else if (time_phase > FINEUSEC) {
598 ltemp = time_phase >> SHIFT_SCALE;
599 time_phase -= ltemp << SHIFT_SCALE;
600 xtime.tv_usec += tick + time_adjust_step + ltemp;
601 } else
602 xtime.tv_usec += tick + time_adjust_step;
603
604 if (time_adjust)
605 {
606
607
608
609
610
611
612
613
614
615 if (time_adjust > tickadj)
616 time_adjust_step = tickadj;
617 else if (time_adjust < -tickadj)
618 time_adjust_step = -tickadj;
619 else
620 time_adjust_step = time_adjust;
621
622
623 time_adjust -= time_adjust_step;
624 }
625 else
626 time_adjust_step = 0;
627
628 if (xtime.tv_usec >= 1000000) {
629 xtime.tv_usec -= 1000000;
630 xtime.tv_sec++;
631 second_overflow();
632 }
633
634
635
636
637
638 if (time_status != TIME_BAD && xtime.tv_sec > last_rtc_update + 660 &&
639 xtime.tv_usec > 500000 - (tick >> 1) &&
640 xtime.tv_usec < 500000 + (tick >> 1))
641 if (set_rtc_mmss(xtime.tv_sec) == 0)
642 last_rtc_update = xtime.tv_sec;
643 else
644 last_rtc_update = xtime.tv_sec - 600;
645
646 jiffies++;
647 calc_load();
648 if (user_mode(regs)) {
649 current->utime++;
650 if (current != task[0]) {
651 if (current->priority < 15)
652 kstat.cpu_nice++;
653 else
654 kstat.cpu_user++;
655 }
656
657 if (current->it_virt_value && !(--current->it_virt_value)) {
658 current->it_virt_value = current->it_virt_incr;
659 send_sig(SIGVTALRM,current,1);
660 }
661 } else {
662 current->stime++;
663 if(current != task[0])
664 kstat.cpu_system++;
665 #ifdef CONFIG_PROFILE
666 if (prof_buffer && current != task[0]) {
667 extern int _stext;
668 unsigned long eip = regs->eip - (unsigned long) &_stext;
669 eip >>= CONFIG_PROFILE_SHIFT;
670 if (eip < prof_len)
671 prof_buffer[eip]++;
672 }
673 #endif
674 }
675
676
677
678 if ((current->rlim[RLIMIT_CPU].rlim_max != RLIM_INFINITY) &&
679 (((current->stime + current->utime) / HZ) >= current->rlim[RLIMIT_CPU].rlim_max))
680 send_sig(SIGKILL, current, 1);
681 if ((current->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) &&
682 (((current->stime + current->utime) % HZ) == 0)) {
683 psecs = (current->stime + current->utime) / HZ;
684
685 if (psecs == current->rlim[RLIMIT_CPU].rlim_cur)
686 send_sig(SIGXCPU, current, 1);
687
688 else if ((psecs > current->rlim[RLIMIT_CPU].rlim_cur) &&
689 ((psecs - current->rlim[RLIMIT_CPU].rlim_cur) % 5) == 0)
690 send_sig(SIGXCPU, current, 1);
691 }
692
693 if (current != task[0] && 0 > --current->counter) {
694 current->counter = 0;
695 need_resched = 1;
696 }
697
698 if (current->it_prof_value && !(--current->it_prof_value)) {
699 current->it_prof_value = current->it_prof_incr;
700 send_sig(SIGPROF,current,1);
701 }
702 for (mask = 1, tp = timer_table+0 ; mask ; tp++,mask += mask) {
703 if (mask > timer_active)
704 break;
705 if (!(mask & timer_active))
706 continue;
707 if (tp->expires > jiffies)
708 continue;
709 mark_bh(TIMER_BH);
710 }
711 cli();
712 if (timer_head.next->expires < jiffies)
713 mark_bh(TIMER_BH);
714 if (tq_timer != &tq_last)
715 mark_bh(TQUEUE_BH);
716 sti();
717 }
718
719 asmlinkage int sys_alarm(long seconds)
720 {
721 struct itimerval it_new, it_old;
722
723 it_new.it_interval.tv_sec = it_new.it_interval.tv_usec = 0;
724 it_new.it_value.tv_sec = seconds;
725 it_new.it_value.tv_usec = 0;
726 _setitimer(ITIMER_REAL, &it_new, &it_old);
727 return(it_old.it_value.tv_sec + (it_old.it_value.tv_usec / 1000000));
728 }
729
730 asmlinkage int sys_getpid(void)
731 {
732 return current->pid;
733 }
734
735 asmlinkage int sys_getppid(void)
736 {
737 return current->p_opptr->pid;
738 }
739
740 asmlinkage int sys_getuid(void)
741 {
742 return current->uid;
743 }
744
745 asmlinkage int sys_geteuid(void)
746 {
747 return current->euid;
748 }
749
750 asmlinkage int sys_getgid(void)
751 {
752 return current->gid;
753 }
754
755 asmlinkage int sys_getegid(void)
756 {
757 return current->egid;
758 }
759
760 asmlinkage int sys_nice(long increment)
761 {
762 int newprio;
763
764 if (increment < 0 && !suser())
765 return -EPERM;
766 newprio = current->priority - increment;
767 if (newprio < 1)
768 newprio = 1;
769 if (newprio > 35)
770 newprio = 35;
771 current->priority = newprio;
772 return 0;
773 }
774
775 static void show_task(int nr,struct task_struct * p)
776 {
777 unsigned long free;
778 static char * stat_nam[] = { "R", "S", "D", "Z", "T", "W" };
779
780 printk("%-8s %3d ", p->comm, (p == current) ? -nr : nr);
781 if (((unsigned) p->state) < sizeof(stat_nam)/sizeof(char *))
782 printk(stat_nam[p->state]);
783 else
784 printk(" ");
785 #if ((~0UL) == 0xffffffff)
786 if (p == current)
787 printk(" current ");
788 else
789 printk(" %08lX ", thread_saved_pc(&p->tss));
790 #else
791 if (p == current)
792 printk(" current task ");
793 else
794 printk(" %016lx ", thread_saved_pc(&p->tss));
795 #endif
796 for (free = 1; free < PAGE_SIZE/sizeof(long) ; free++) {
797 if (((unsigned long *)p->kernel_stack_page)[free])
798 break;
799 }
800 printk("%5lu %5d %6d ", free*sizeof(long), p->pid, p->p_pptr->pid);
801 if (p->p_cptr)
802 printk("%5d ", p->p_cptr->pid);
803 else
804 printk(" ");
805 if (p->p_ysptr)
806 printk("%7d", p->p_ysptr->pid);
807 else
808 printk(" ");
809 if (p->p_osptr)
810 printk(" %5d\n", p->p_osptr->pid);
811 else
812 printk("\n");
813 }
814
815 void show_state(void)
816 {
817 int i;
818
819 #if ((~0UL) == 0xffffffff)
820 printk("\n"
821 " free sibling\n");
822 printk(" task PC stack pid father child younger older\n");
823 #else
824 printk("\n"
825 " free sibling\n");
826 printk(" task PC stack pid father child younger older\n");
827 #endif
828 for (i=0 ; i<NR_TASKS ; i++)
829 if (task[i])
830 show_task(i,task[i]);
831 }
832
833 void sched_init(void)
834 {
835 bh_base[TIMER_BH].routine = timer_bh;
836 bh_base[TQUEUE_BH].routine = tqueue_bh;
837 bh_base[IMMEDIATE_BH].routine = immediate_bh;
838 if (request_irq(TIMER_IRQ, do_timer, 0, "timer") != 0)
839 panic("Could not allocate timer IRQ!");
840 enable_bh(TIMER_BH);
841 enable_bh(TQUEUE_BH);
842 enable_bh(IMMEDIATE_BH);
843 }