This source file includes following definitions.
- add_to_runqueue
- del_from_runqueue
- wake_up_process
- process_timeout
- schedule
- sys_pause
- wake_up
- wake_up_interruptible
- __down
- __sleep_on
- interruptible_sleep_on
- sleep_on
- add_timer
- del_timer
- count_active_tasks
- calc_load
- second_overflow
- timer_bh
- tqueue_bh
- immediate_bh
- do_timer
- sys_alarm
- sys_getpid
- sys_getppid
- sys_getuid
- sys_geteuid
- sys_getgid
- sys_getegid
- sys_nice
- show_task
- show_state
- sched_init
1
2
3
4
5
6
7
8
9
10
11
12
13
14 #include <linux/config.h>
15 #include <linux/signal.h>
16 #include <linux/sched.h>
17 #include <linux/timer.h>
18 #include <linux/kernel.h>
19 #include <linux/kernel_stat.h>
20 #include <linux/fdreg.h>
21 #include <linux/errno.h>
22 #include <linux/time.h>
23 #include <linux/ptrace.h>
24 #include <linux/delay.h>
25 #include <linux/interrupt.h>
26 #include <linux/tqueue.h>
27 #include <linux/resource.h>
28 #include <linux/mm.h>
29
30 #include <asm/system.h>
31 #include <asm/io.h>
32 #include <asm/segment.h>
33 #include <asm/pgtable.h>
34
35 #define TIMER_IRQ 0
36
37 #include <linux/timex.h>
38
39
40
41
42 long tick = 1000000 / HZ;
43 volatile struct timeval xtime;
44 int tickadj = 500/HZ;
45
46 DECLARE_TASK_QUEUE(tq_timer);
47 DECLARE_TASK_QUEUE(tq_immediate);
48 DECLARE_TASK_QUEUE(tq_scheduler);
49
50
51
52
53 int time_status = TIME_BAD;
54 long time_offset = 0;
55 long time_constant = 0;
56 long time_tolerance = MAXFREQ;
57 long time_precision = 1;
58 long time_maxerror = 0x70000000;
59 long time_esterror = 0x70000000;
60 long time_phase = 0;
61 long time_freq = 0;
62 long time_adj = 0;
63 long time_reftime = 0;
64
65 long time_adjust = 0;
66 long time_adjust_step = 0;
67
68 int need_resched = 0;
69 unsigned long event = 0;
70
71 extern int _setitimer(int, struct itimerval *, struct itimerval *);
72 unsigned long * prof_buffer = NULL;
73 unsigned long prof_len = 0;
74 unsigned long prof_shift = 0;
75
76 #define _S(nr) (1<<((nr)-1))
77
78 extern void mem_use(void);
79
80 extern int timer_interrupt(void);
81
82 static unsigned long init_kernel_stack[1024] = { STACK_MAGIC, };
83 unsigned long init_user_stack[1024] = { STACK_MAGIC, };
84 static struct vm_area_struct init_mmap = INIT_MMAP;
85 static struct fs_struct init_fs = INIT_FS;
86 static struct files_struct init_files = INIT_FILES;
87 static struct signal_struct init_signals = INIT_SIGNALS;
88
89 struct mm_struct init_mm = INIT_MM;
90 struct task_struct init_task = INIT_TASK;
91
92 unsigned long volatile jiffies=0;
93
94 struct task_struct *current = &init_task;
95 struct task_struct *last_task_used_math = NULL;
96
97 struct task_struct * task[NR_TASKS] = {&init_task, };
98
99 struct kernel_stat kstat = { 0 };
100
101 static inline void add_to_runqueue(struct task_struct * p)
102 {
103 #if 1
104 if (p->next_run || p->prev_run) {
105 printk("task already on run-queue\n");
106 return;
107 }
108 #endif
109 if (p->counter > current->counter + 3)
110 need_resched = 1;
111 nr_running++;
112 (p->next_run = init_task.next_run)->prev_run = p;
113 p->prev_run = &init_task;
114 init_task.next_run = p;
115 }
116
117 static inline void del_from_runqueue(struct task_struct * p)
118 {
119 struct task_struct *next = p->next_run;
120 struct task_struct *prev = p->prev_run;
121
122 #if 1
123 if (!next || !prev) {
124 printk("task not on run-queue\n");
125 return;
126 }
127 #endif
128 if (p == &init_task) {
129 static int nr = 0;
130 if (nr < 5) {
131 nr++;
132 printk("idle task may not sleep\n");
133 }
134 return;
135 }
136 nr_running--;
137 next->prev_run = prev;
138 prev->next_run = next;
139 p->next_run = NULL;
140 p->prev_run = NULL;
141 }
142
143
144
145
146
147
148
149
150
151 inline void wake_up_process(struct task_struct * p)
152 {
153 unsigned long flags;
154
155 save_flags(flags);
156 cli();
157 p->state = TASK_RUNNING;
158 if (!p->next_run)
159 add_to_runqueue(p);
160 restore_flags(flags);
161 }
162
163 static void process_timeout(unsigned long __data)
164 {
165 struct task_struct * p = (struct task_struct *) __data;
166
167 p->timeout = 0;
168 wake_up_process(p);
169 }
170
171
172
173
174
175
176
177
178
179
180
181 asmlinkage void schedule(void)
182 {
183 int c;
184 struct task_struct * p;
185 struct task_struct * next;
186 unsigned long timeout = 0;
187
188
189
190 if (intr_count) {
191 printk("Aiee: scheduling in interrupt\n");
192 return;
193 }
194 run_task_queue(&tq_scheduler);
195
196 need_resched = 0;
197 cli();
198 switch (current->state) {
199 case TASK_INTERRUPTIBLE:
200 if (current->signal & ~current->blocked)
201 goto makerunnable;
202 timeout = current->timeout;
203 if (timeout && (timeout <= jiffies)) {
204 current->timeout = 0;
205 timeout = 0;
206 makerunnable:
207 current->state = TASK_RUNNING;
208 break;
209 }
210 default:
211 del_from_runqueue(current);
212 case TASK_RUNNING:
213 }
214 p = init_task.next_run;
215 sti();
216
217
218
219
220
221
222
223 c = -1000;
224 next = &init_task;
225 while (p != &init_task) {
226 if (p->counter > c)
227 c = p->counter, next = p;
228 p = p->next_run;
229 }
230
231
232 if (!c) {
233 for_each_task(p)
234 p->counter = (p->counter >> 1) + p->priority;
235 }
236 if (current != next) {
237 struct timer_list timer;
238
239 kstat.context_swtch++;
240 if (timeout) {
241 init_timer(&timer);
242 timer.expires = timeout;
243 timer.data = (unsigned long) current;
244 timer.function = process_timeout;
245 add_timer(&timer);
246 }
247 switch_to(next);
248 if (timeout)
249 del_timer(&timer);
250 }
251 }
252
253 asmlinkage int sys_pause(void)
254 {
255 current->state = TASK_INTERRUPTIBLE;
256 schedule();
257 return -ERESTARTNOHAND;
258 }
259
260
261
262
263
264
265
266
267
268 void wake_up(struct wait_queue **q)
269 {
270 struct wait_queue *tmp;
271 struct task_struct * p;
272
273 if (!q || !(tmp = *q))
274 return;
275 do {
276 if ((p = tmp->task) != NULL) {
277 if ((p->state == TASK_UNINTERRUPTIBLE) ||
278 (p->state == TASK_INTERRUPTIBLE))
279 wake_up_process(p);
280 }
281 if (!tmp->next) {
282 printk("wait_queue is bad (eip = %p)\n",
283 __builtin_return_address(0));
284 printk(" q = %p\n",q);
285 printk(" *q = %p\n",*q);
286 printk(" tmp = %p\n",tmp);
287 break;
288 }
289 tmp = tmp->next;
290 } while (tmp != *q);
291 }
292
293 void wake_up_interruptible(struct wait_queue **q)
294 {
295 struct wait_queue *tmp;
296 struct task_struct * p;
297
298 if (!q || !(tmp = *q))
299 return;
300 do {
301 if ((p = tmp->task) != NULL) {
302 if (p->state == TASK_INTERRUPTIBLE)
303 wake_up_process(p);
304 }
305 if (!tmp->next) {
306 printk("wait_queue is bad (eip = %p)\n",
307 __builtin_return_address(0));
308 printk(" q = %p\n",q);
309 printk(" *q = %p\n",*q);
310 printk(" tmp = %p\n",tmp);
311 break;
312 }
313 tmp = tmp->next;
314 } while (tmp != *q);
315 }
316
317 void __down(struct semaphore * sem)
318 {
319 struct wait_queue wait = { current, NULL };
320 add_wait_queue(&sem->wait, &wait);
321 current->state = TASK_UNINTERRUPTIBLE;
322 while (sem->count <= 0) {
323 schedule();
324 current->state = TASK_UNINTERRUPTIBLE;
325 }
326 current->state = TASK_RUNNING;
327 remove_wait_queue(&sem->wait, &wait);
328 }
329
330 static inline void __sleep_on(struct wait_queue **p, int state)
331 {
332 unsigned long flags;
333 struct wait_queue wait = { current, NULL };
334
335 if (!p)
336 return;
337 if (current == task[0])
338 panic("task[0] trying to sleep");
339 current->state = state;
340 add_wait_queue(p, &wait);
341 save_flags(flags);
342 sti();
343 schedule();
344 remove_wait_queue(p, &wait);
345 restore_flags(flags);
346 }
347
348 void interruptible_sleep_on(struct wait_queue **p)
349 {
350 __sleep_on(p,TASK_INTERRUPTIBLE);
351 }
352
353 void sleep_on(struct wait_queue **p)
354 {
355 __sleep_on(p,TASK_UNINTERRUPTIBLE);
356 }
357
358
359
360
361
362 static struct timer_list timer_head = { &timer_head, &timer_head, ~0, 0, NULL };
363 #define SLOW_BUT_DEBUGGING_TIMERS 1
364
365 void add_timer(struct timer_list * timer)
366 {
367 unsigned long flags;
368 struct timer_list *p;
369
370 #if SLOW_BUT_DEBUGGING_TIMERS
371 if (timer->next || timer->prev) {
372 printk("add_timer() called with non-zero list from %p\n",
373 __builtin_return_address(0));
374 return;
375 }
376 #endif
377 p = &timer_head;
378 save_flags(flags);
379 cli();
380 do {
381 p = p->next;
382 } while (timer->expires > p->expires);
383 timer->next = p;
384 timer->prev = p->prev;
385 p->prev = timer;
386 timer->prev->next = timer;
387 restore_flags(flags);
388 }
389
390 int del_timer(struct timer_list * timer)
391 {
392 unsigned long flags;
393 #if SLOW_BUT_DEBUGGING_TIMERS
394 struct timer_list * p;
395
396 p = &timer_head;
397 save_flags(flags);
398 cli();
399 while ((p = p->next) != &timer_head) {
400 if (p == timer) {
401 timer->next->prev = timer->prev;
402 timer->prev->next = timer->next;
403 timer->next = timer->prev = NULL;
404 restore_flags(flags);
405 return 1;
406 }
407 }
408 if (timer->next || timer->prev)
409 printk("del_timer() called from %p with timer not initialized\n",
410 __builtin_return_address(0));
411 restore_flags(flags);
412 return 0;
413 #else
414 save_flags(flags);
415 cli();
416 if (timer->next) {
417 timer->next->prev = timer->prev;
418 timer->prev->next = timer->next;
419 timer->next = timer->prev = NULL;
420 restore_flags(flags);
421 return 1;
422 }
423 restore_flags(flags);
424 return 0;
425 #endif
426 }
427
428 unsigned long timer_active = 0;
429 struct timer_struct timer_table[32];
430
431
432
433
434
435
436
437 unsigned long avenrun[3] = { 0,0,0 };
438
439
440
441
442 static unsigned long count_active_tasks(void)
443 {
444 struct task_struct **p;
445 unsigned long nr = 0;
446
447 for(p = &LAST_TASK; p > &FIRST_TASK; --p)
448 if (*p && ((*p)->state == TASK_RUNNING ||
449 (*p)->state == TASK_UNINTERRUPTIBLE ||
450 (*p)->state == TASK_SWAPPING))
451 nr += FIXED_1;
452 return nr;
453 }
454
455 static inline void calc_load(void)
456 {
457 unsigned long active_tasks;
458 static int count = LOAD_FREQ;
459
460 if (count-- > 0)
461 return;
462 count = LOAD_FREQ;
463 active_tasks = count_active_tasks();
464 CALC_LOAD(avenrun[0], EXP_1, active_tasks);
465 CALC_LOAD(avenrun[1], EXP_5, active_tasks);
466 CALC_LOAD(avenrun[2], EXP_15, active_tasks);
467 }
468
469
470
471
472
473
474
475
476
477
478
479 static void second_overflow(void)
480 {
481 long ltemp;
482
483
484 time_maxerror = (0x70000000-time_maxerror < time_tolerance) ?
485 0x70000000 : (time_maxerror + time_tolerance);
486
487
488 if (time_offset < 0) {
489 ltemp = (-(time_offset+1) >> (SHIFT_KG + time_constant)) + 1;
490 time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
491 time_offset += (time_adj * HZ) >> (SHIFT_SCALE - SHIFT_UPDATE);
492 time_adj = - time_adj;
493 } else if (time_offset > 0) {
494 ltemp = ((time_offset-1) >> (SHIFT_KG + time_constant)) + 1;
495 time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
496 time_offset -= (time_adj * HZ) >> (SHIFT_SCALE - SHIFT_UPDATE);
497 } else {
498 time_adj = 0;
499 }
500
501 time_adj += (time_freq >> (SHIFT_KF + SHIFT_HZ - SHIFT_SCALE))
502 + FINETUNE;
503
504
505 switch (time_status) {
506 case TIME_INS:
507
508 if (xtime.tv_sec % 86400 == 0) {
509 xtime.tv_sec--;
510 time_status = TIME_OOP;
511 printk("Clock: inserting leap second 23:59:60 UTC\n");
512 }
513 break;
514
515 case TIME_DEL:
516
517 if (xtime.tv_sec % 86400 == 86399) {
518 xtime.tv_sec++;
519 time_status = TIME_OK;
520 printk("Clock: deleting leap second 23:59:59 UTC\n");
521 }
522 break;
523
524 case TIME_OOP:
525 time_status = TIME_OK;
526 break;
527 }
528 }
529
530
531
532
533 static void timer_bh(void * unused)
534 {
535 unsigned long mask;
536 struct timer_struct *tp;
537 struct timer_list * timer;
538
539 cli();
540 while ((timer = timer_head.next) != &timer_head && timer->expires <= jiffies) {
541 void (*fn)(unsigned long) = timer->function;
542 unsigned long data = timer->data;
543 timer->next->prev = timer->prev;
544 timer->prev->next = timer->next;
545 timer->next = timer->prev = NULL;
546 sti();
547 fn(data);
548 cli();
549 }
550 sti();
551
552 for (mask = 1, tp = timer_table+0 ; mask ; tp++,mask += mask) {
553 if (mask > timer_active)
554 break;
555 if (!(mask & timer_active))
556 continue;
557 if (tp->expires > jiffies)
558 continue;
559 timer_active &= ~mask;
560 tp->fn();
561 sti();
562 }
563 }
564
565 void tqueue_bh(void * unused)
566 {
567 run_task_queue(&tq_timer);
568 }
569
570 void immediate_bh(void * unused)
571 {
572 run_task_queue(&tq_immediate);
573 }
574
575
576
577
578
579
580
581 static void do_timer(int irq, struct pt_regs * regs)
582 {
583 unsigned long mask;
584 struct timer_struct *tp;
585
586 static long last_rtc_update=0;
587 extern int set_rtc_mmss(unsigned long);
588
589 long ltemp, psecs;
590
591
592
593
594 time_phase += time_adj;
595 if (time_phase < -FINEUSEC) {
596 ltemp = -time_phase >> SHIFT_SCALE;
597 time_phase += ltemp << SHIFT_SCALE;
598 xtime.tv_usec += tick + time_adjust_step - ltemp;
599 }
600 else if (time_phase > FINEUSEC) {
601 ltemp = time_phase >> SHIFT_SCALE;
602 time_phase -= ltemp << SHIFT_SCALE;
603 xtime.tv_usec += tick + time_adjust_step + ltemp;
604 } else
605 xtime.tv_usec += tick + time_adjust_step;
606
607 if (time_adjust)
608 {
609
610
611
612
613
614
615
616
617
618 if (time_adjust > tickadj)
619 time_adjust_step = tickadj;
620 else if (time_adjust < -tickadj)
621 time_adjust_step = -tickadj;
622 else
623 time_adjust_step = time_adjust;
624
625
626 time_adjust -= time_adjust_step;
627 }
628 else
629 time_adjust_step = 0;
630
631 if (xtime.tv_usec >= 1000000) {
632 xtime.tv_usec -= 1000000;
633 xtime.tv_sec++;
634 second_overflow();
635 }
636
637
638
639
640
641 if (time_status != TIME_BAD && xtime.tv_sec > last_rtc_update + 660 &&
642 xtime.tv_usec > 500000 - (tick >> 1) &&
643 xtime.tv_usec < 500000 + (tick >> 1))
644 if (set_rtc_mmss(xtime.tv_sec) == 0)
645 last_rtc_update = xtime.tv_sec;
646 else
647 last_rtc_update = xtime.tv_sec - 600;
648
649 jiffies++;
650 calc_load();
651 if (user_mode(regs)) {
652 current->utime++;
653 if (current != task[0]) {
654 if (current->priority < 15)
655 kstat.cpu_nice++;
656 else
657 kstat.cpu_user++;
658 }
659
660 if (current->it_virt_value && !(--current->it_virt_value)) {
661 current->it_virt_value = current->it_virt_incr;
662 send_sig(SIGVTALRM,current,1);
663 }
664 } else {
665 current->stime++;
666 if(current != task[0])
667 kstat.cpu_system++;
668 if (prof_buffer && current != task[0]) {
669 extern int _stext;
670 unsigned long ip = instruction_pointer(regs);
671 ip -= (unsigned long) &_stext;
672 ip >>= prof_shift;
673 if (ip < prof_len)
674 prof_buffer[ip]++;
675 }
676 }
677
678
679
680 if ((current->rlim[RLIMIT_CPU].rlim_max != RLIM_INFINITY) &&
681 (((current->stime + current->utime) / HZ) >= current->rlim[RLIMIT_CPU].rlim_max))
682 send_sig(SIGKILL, current, 1);
683 if ((current->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) &&
684 (((current->stime + current->utime) % HZ) == 0)) {
685 psecs = (current->stime + current->utime) / HZ;
686
687 if (psecs == current->rlim[RLIMIT_CPU].rlim_cur)
688 send_sig(SIGXCPU, current, 1);
689
690 else if ((psecs > current->rlim[RLIMIT_CPU].rlim_cur) &&
691 ((psecs - current->rlim[RLIMIT_CPU].rlim_cur) % 5) == 0)
692 send_sig(SIGXCPU, current, 1);
693 }
694
695 if (current != task[0] && 0 > --current->counter) {
696 current->counter = 0;
697 need_resched = 1;
698 }
699
700 if (current->it_prof_value && !(--current->it_prof_value)) {
701 current->it_prof_value = current->it_prof_incr;
702 send_sig(SIGPROF,current,1);
703 }
704 for (mask = 1, tp = timer_table+0 ; mask ; tp++,mask += mask) {
705 if (mask > timer_active)
706 break;
707 if (!(mask & timer_active))
708 continue;
709 if (tp->expires > jiffies)
710 continue;
711 mark_bh(TIMER_BH);
712 }
713 cli();
714 if (timer_head.next->expires <= jiffies)
715 mark_bh(TIMER_BH);
716 if (tq_timer != &tq_last)
717 mark_bh(TQUEUE_BH);
718 sti();
719 }
720
721 asmlinkage unsigned int sys_alarm(unsigned int seconds)
722 {
723 struct itimerval it_new, it_old;
724 unsigned int oldalarm;
725
726 it_new.it_interval.tv_sec = it_new.it_interval.tv_usec = 0;
727 it_new.it_value.tv_sec = seconds;
728 it_new.it_value.tv_usec = 0;
729 _setitimer(ITIMER_REAL, &it_new, &it_old);
730 oldalarm = it_old.it_value.tv_sec;
731
732
733 if (it_old.it_value.tv_usec)
734 oldalarm++;
735 return oldalarm;
736 }
737
738 asmlinkage int sys_getpid(void)
739 {
740 return current->pid;
741 }
742
743 asmlinkage int sys_getppid(void)
744 {
745 return current->p_opptr->pid;
746 }
747
748 asmlinkage int sys_getuid(void)
749 {
750 return current->uid;
751 }
752
753 asmlinkage int sys_geteuid(void)
754 {
755 return current->euid;
756 }
757
758 asmlinkage int sys_getgid(void)
759 {
760 return current->gid;
761 }
762
763 asmlinkage int sys_getegid(void)
764 {
765 return current->egid;
766 }
767
768 asmlinkage int sys_nice(long increment)
769 {
770 int newprio;
771
772 if (increment < 0 && !suser())
773 return -EPERM;
774 newprio = current->priority - increment;
775 if (newprio < 1)
776 newprio = 1;
777 if (newprio > 35)
778 newprio = 35;
779 current->priority = newprio;
780 return 0;
781 }
782
783 static void show_task(int nr,struct task_struct * p)
784 {
785 unsigned long free;
786 static const char * stat_nam[] = { "R", "S", "D", "Z", "T", "W" };
787
788 printk("%-8s %3d ", p->comm, (p == current) ? -nr : nr);
789 if (((unsigned) p->state) < sizeof(stat_nam)/sizeof(char *))
790 printk(stat_nam[p->state]);
791 else
792 printk(" ");
793 #if ((~0UL) == 0xffffffff)
794 if (p == current)
795 printk(" current ");
796 else
797 printk(" %08lX ", thread_saved_pc(&p->tss));
798 #else
799 if (p == current)
800 printk(" current task ");
801 else
802 printk(" %016lx ", thread_saved_pc(&p->tss));
803 #endif
804 for (free = 1; free < PAGE_SIZE/sizeof(long) ; free++) {
805 if (((unsigned long *)p->kernel_stack_page)[free])
806 break;
807 }
808 printk("%5lu %5d %6d ", free*sizeof(long), p->pid, p->p_pptr->pid);
809 if (p->p_cptr)
810 printk("%5d ", p->p_cptr->pid);
811 else
812 printk(" ");
813 if (p->p_ysptr)
814 printk("%7d", p->p_ysptr->pid);
815 else
816 printk(" ");
817 if (p->p_osptr)
818 printk(" %5d\n", p->p_osptr->pid);
819 else
820 printk("\n");
821 }
822
823 void show_state(void)
824 {
825 int i;
826
827 #if ((~0UL) == 0xffffffff)
828 printk("\n"
829 " free sibling\n");
830 printk(" task PC stack pid father child younger older\n");
831 #else
832 printk("\n"
833 " free sibling\n");
834 printk(" task PC stack pid father child younger older\n");
835 #endif
836 for (i=0 ; i<NR_TASKS ; i++)
837 if (task[i])
838 show_task(i,task[i]);
839 }
840
841 void sched_init(void)
842 {
843 bh_base[TIMER_BH].routine = timer_bh;
844 bh_base[TQUEUE_BH].routine = tqueue_bh;
845 bh_base[IMMEDIATE_BH].routine = immediate_bh;
846 if (request_irq(TIMER_IRQ, do_timer, 0, "timer") != 0)
847 panic("Could not allocate timer IRQ!");
848 enable_bh(TIMER_BH);
849 enable_bh(TQUEUE_BH);
850 enable_bh(IMMEDIATE_BH);
851 }