This source file includes following definitions.
- add_to_runqueue
- del_from_runqueue
- wake_up_process
- process_timeout
- schedule
- sys_pause
- wake_up
- wake_up_interruptible
- __down
- __sleep_on
- interruptible_sleep_on
- sleep_on
- add_timer
- del_timer
- count_active_tasks
- calc_load
- second_overflow
- timer_bh
- tqueue_bh
- immediate_bh
- do_timer
- sys_alarm
- sys_getpid
- sys_getppid
- sys_getuid
- sys_geteuid
- sys_getgid
- sys_getegid
- sys_nice
- show_task
- show_state
- sched_init
1
2
3
4
5
6
7
8
9
10
11
12
13
14 #include <linux/config.h>
15 #include <linux/signal.h>
16 #include <linux/sched.h>
17 #include <linux/timer.h>
18 #include <linux/kernel.h>
19 #include <linux/kernel_stat.h>
20 #include <linux/fdreg.h>
21 #include <linux/errno.h>
22 #include <linux/time.h>
23 #include <linux/ptrace.h>
24 #include <linux/delay.h>
25 #include <linux/interrupt.h>
26 #include <linux/tqueue.h>
27 #include <linux/resource.h>
28 #include <linux/mm.h>
29
30 #include <asm/system.h>
31 #include <asm/io.h>
32 #include <asm/segment.h>
33 #include <asm/pgtable.h>
34
35 #define TIMER_IRQ 0
36
37 #include <linux/timex.h>
38
39
40
41
42 long tick = 1000000 / HZ;
43 volatile struct timeval xtime;
44 int tickadj = 500/HZ;
45
46 DECLARE_TASK_QUEUE(tq_timer);
47 DECLARE_TASK_QUEUE(tq_immediate);
48 DECLARE_TASK_QUEUE(tq_scheduler);
49
50
51
52
53 int time_status = TIME_BAD;
54 long time_offset = 0;
55 long time_constant = 0;
56 long time_tolerance = MAXFREQ;
57 long time_precision = 1;
58 long time_maxerror = 0x70000000;
59 long time_esterror = 0x70000000;
60 long time_phase = 0;
61 long time_freq = 0;
62 long time_adj = 0;
63 long time_reftime = 0;
64
65 long time_adjust = 0;
66 long time_adjust_step = 0;
67
68 int need_resched = 0;
69 unsigned long event = 0;
70
71 extern int _setitimer(int, struct itimerval *, struct itimerval *);
72 unsigned long * prof_buffer = NULL;
73 unsigned long prof_len = 0;
74
75 #define _S(nr) (1<<((nr)-1))
76
77 extern void mem_use(void);
78
79 extern int timer_interrupt(void);
80
81 static unsigned long init_kernel_stack[1024] = { STACK_MAGIC, };
82 unsigned long init_user_stack[1024] = { STACK_MAGIC, };
83 static struct vm_area_struct init_mmap = INIT_MMAP;
84 static struct fs_struct init_fs = INIT_FS;
85 static struct files_struct init_files = INIT_FILES;
86 static struct sigaction init_sigaction[32] = { {0,}, };
87
88 struct mm_struct init_mm = INIT_MM;
89 struct task_struct init_task = INIT_TASK;
90
91 unsigned long volatile jiffies=0;
92
93 struct task_struct *current = &init_task;
94 struct task_struct *last_task_used_math = NULL;
95
96 struct task_struct * task[NR_TASKS] = {&init_task, };
97
98 struct kernel_stat kstat = { 0 };
99
100 static inline void add_to_runqueue(struct task_struct * p)
101 {
102 #if 1
103 if (p->next_run || p->prev_run) {
104 printk("task already on run-queue\n");
105 return;
106 }
107 #endif
108 if (p->counter > current->counter + 3)
109 need_resched = 1;
110 nr_running++;
111 (p->next_run = init_task.next_run)->prev_run = p;
112 p->prev_run = &init_task;
113 init_task.next_run = p;
114 }
115
116 static inline void del_from_runqueue(struct task_struct * p)
117 {
118 struct task_struct *next = p->next_run;
119 struct task_struct *prev = p->prev_run;
120
121 #if 1
122 if (!next || !prev) {
123 printk("task not on run-queue\n");
124 return;
125 }
126 #endif
127 if (p == &init_task) {
128 static int nr = 0;
129 if (nr < 5) {
130 nr++;
131 printk("idle task may not sleep\n");
132 }
133 return;
134 }
135 nr_running--;
136 next->prev_run = prev;
137 prev->next_run = next;
138 p->next_run = NULL;
139 p->prev_run = NULL;
140 }
141
142
143
144
145
146
147
148
149
150 inline void wake_up_process(struct task_struct * p)
151 {
152 unsigned long flags;
153
154 save_flags(flags);
155 cli();
156 p->state = TASK_RUNNING;
157 if (!p->next_run)
158 add_to_runqueue(p);
159 restore_flags(flags);
160 }
161
162 static void process_timeout(unsigned long __data)
163 {
164 struct task_struct * p = (struct task_struct *) __data;
165
166 p->timeout = 0;
167 wake_up_process(p);
168 }
169
170
171
172
173
174
175
176
177
178
179
180 asmlinkage void schedule(void)
181 {
182 int c;
183 struct task_struct * p;
184 struct task_struct * next;
185 unsigned long timeout = 0;
186
187
188
189 if (intr_count) {
190 printk("Aiee: scheduling in interrupt\n");
191 return;
192 }
193 run_task_queue(&tq_scheduler);
194
195 need_resched = 0;
196 cli();
197 switch (current->state) {
198 case TASK_INTERRUPTIBLE:
199 if (current->signal & ~current->blocked)
200 goto makerunnable;
201 timeout = current->timeout;
202 if (timeout && (timeout <= jiffies)) {
203 current->timeout = 0;
204 timeout = 0;
205 makerunnable:
206 current->state = TASK_RUNNING;
207 break;
208 }
209 default:
210 del_from_runqueue(current);
211 case TASK_RUNNING:
212 }
213 p = init_task.next_run;
214 sti();
215
216
217
218
219
220
221
222 c = -1000;
223 next = &init_task;
224 while (p != &init_task) {
225 if (p->counter > c)
226 c = p->counter, next = p;
227 p = p->next_run;
228 }
229
230
231 if (!c) {
232 for_each_task(p)
233 p->counter = (p->counter >> 1) + p->priority;
234 }
235 if (current != next) {
236 struct timer_list timer;
237
238 kstat.context_swtch++;
239 if (timeout) {
240 init_timer(&timer);
241 timer.expires = timeout;
242 timer.data = (unsigned long) current;
243 timer.function = process_timeout;
244 add_timer(&timer);
245 }
246 switch_to(next);
247 if (timeout)
248 del_timer(&timer);
249 }
250 }
251
252 asmlinkage int sys_pause(void)
253 {
254 current->state = TASK_INTERRUPTIBLE;
255 schedule();
256 return -ERESTARTNOHAND;
257 }
258
259
260
261
262
263
264
265
266
267 void wake_up(struct wait_queue **q)
268 {
269 struct wait_queue *tmp;
270 struct task_struct * p;
271
272 if (!q || !(tmp = *q))
273 return;
274 do {
275 if ((p = tmp->task) != NULL) {
276 if ((p->state == TASK_UNINTERRUPTIBLE) ||
277 (p->state == TASK_INTERRUPTIBLE))
278 wake_up_process(p);
279 }
280 if (!tmp->next) {
281 printk("wait_queue is bad (eip = %p)\n",
282 __builtin_return_address(0));
283 printk(" q = %p\n",q);
284 printk(" *q = %p\n",*q);
285 printk(" tmp = %p\n",tmp);
286 break;
287 }
288 tmp = tmp->next;
289 } while (tmp != *q);
290 }
291
292 void wake_up_interruptible(struct wait_queue **q)
293 {
294 struct wait_queue *tmp;
295 struct task_struct * p;
296
297 if (!q || !(tmp = *q))
298 return;
299 do {
300 if ((p = tmp->task) != NULL) {
301 if (p->state == TASK_INTERRUPTIBLE)
302 wake_up_process(p);
303 }
304 if (!tmp->next) {
305 printk("wait_queue is bad (eip = %p)\n",
306 __builtin_return_address(0));
307 printk(" q = %p\n",q);
308 printk(" *q = %p\n",*q);
309 printk(" tmp = %p\n",tmp);
310 break;
311 }
312 tmp = tmp->next;
313 } while (tmp != *q);
314 }
315
316 void __down(struct semaphore * sem)
317 {
318 struct wait_queue wait = { current, NULL };
319 add_wait_queue(&sem->wait, &wait);
320 current->state = TASK_UNINTERRUPTIBLE;
321 while (sem->count <= 0) {
322 schedule();
323 current->state = TASK_UNINTERRUPTIBLE;
324 }
325 current->state = TASK_RUNNING;
326 remove_wait_queue(&sem->wait, &wait);
327 }
328
329 static inline void __sleep_on(struct wait_queue **p, int state)
330 {
331 unsigned long flags;
332 struct wait_queue wait = { current, NULL };
333
334 if (!p)
335 return;
336 if (current == task[0])
337 panic("task[0] trying to sleep");
338 current->state = state;
339 add_wait_queue(p, &wait);
340 save_flags(flags);
341 sti();
342 schedule();
343 remove_wait_queue(p, &wait);
344 restore_flags(flags);
345 }
346
347 void interruptible_sleep_on(struct wait_queue **p)
348 {
349 __sleep_on(p,TASK_INTERRUPTIBLE);
350 }
351
352 void sleep_on(struct wait_queue **p)
353 {
354 __sleep_on(p,TASK_UNINTERRUPTIBLE);
355 }
356
357
358
359
360
361 static struct timer_list timer_head = { &timer_head, &timer_head, ~0, 0, NULL };
362 #define SLOW_BUT_DEBUGGING_TIMERS 1
363
364 void add_timer(struct timer_list * timer)
365 {
366 unsigned long flags;
367 struct timer_list *p;
368
369 #if SLOW_BUT_DEBUGGING_TIMERS
370 if (timer->next || timer->prev) {
371 printk("add_timer() called with non-zero list from %p\n",
372 __builtin_return_address(0));
373 return;
374 }
375 #endif
376 p = &timer_head;
377 save_flags(flags);
378 cli();
379 do {
380 p = p->next;
381 } while (timer->expires > p->expires);
382 timer->next = p;
383 timer->prev = p->prev;
384 p->prev = timer;
385 timer->prev->next = timer;
386 restore_flags(flags);
387 }
388
389 int del_timer(struct timer_list * timer)
390 {
391 unsigned long flags;
392 #if SLOW_BUT_DEBUGGING_TIMERS
393 struct timer_list * p;
394
395 p = &timer_head;
396 save_flags(flags);
397 cli();
398 while ((p = p->next) != &timer_head) {
399 if (p == timer) {
400 timer->next->prev = timer->prev;
401 timer->prev->next = timer->next;
402 timer->next = timer->prev = NULL;
403 restore_flags(flags);
404 return 1;
405 }
406 }
407 if (timer->next || timer->prev)
408 printk("del_timer() called from %p with timer not initialized\n",
409 __builtin_return_address(0));
410 restore_flags(flags);
411 return 0;
412 #else
413 save_flags(flags);
414 cli();
415 if (timer->next) {
416 timer->next->prev = timer->prev;
417 timer->prev->next = timer->next;
418 timer->next = timer->prev = NULL;
419 restore_flags(flags);
420 return 1;
421 }
422 restore_flags(flags);
423 return 0;
424 #endif
425 }
426
427 unsigned long timer_active = 0;
428 struct timer_struct timer_table[32];
429
430
431
432
433
434
435
436 unsigned long avenrun[3] = { 0,0,0 };
437
438
439
440
441 static unsigned long count_active_tasks(void)
442 {
443 struct task_struct **p;
444 unsigned long nr = 0;
445
446 for(p = &LAST_TASK; p > &FIRST_TASK; --p)
447 if (*p && ((*p)->state == TASK_RUNNING ||
448 (*p)->state == TASK_UNINTERRUPTIBLE ||
449 (*p)->state == TASK_SWAPPING))
450 nr += FIXED_1;
451 return nr;
452 }
453
454 static inline void calc_load(void)
455 {
456 unsigned long active_tasks;
457 static int count = LOAD_FREQ;
458
459 if (count-- > 0)
460 return;
461 count = LOAD_FREQ;
462 active_tasks = count_active_tasks();
463 CALC_LOAD(avenrun[0], EXP_1, active_tasks);
464 CALC_LOAD(avenrun[1], EXP_5, active_tasks);
465 CALC_LOAD(avenrun[2], EXP_15, active_tasks);
466 }
467
468
469
470
471
472
473
474
475
476
477
478 static void second_overflow(void)
479 {
480 long ltemp;
481
482
483 time_maxerror = (0x70000000-time_maxerror < time_tolerance) ?
484 0x70000000 : (time_maxerror + time_tolerance);
485
486
487 if (time_offset < 0) {
488 ltemp = (-(time_offset+1) >> (SHIFT_KG + time_constant)) + 1;
489 time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
490 time_offset += (time_adj * HZ) >> (SHIFT_SCALE - SHIFT_UPDATE);
491 time_adj = - time_adj;
492 } else if (time_offset > 0) {
493 ltemp = ((time_offset-1) >> (SHIFT_KG + time_constant)) + 1;
494 time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
495 time_offset -= (time_adj * HZ) >> (SHIFT_SCALE - SHIFT_UPDATE);
496 } else {
497 time_adj = 0;
498 }
499
500 time_adj += (time_freq >> (SHIFT_KF + SHIFT_HZ - SHIFT_SCALE))
501 + FINETUNE;
502
503
504 switch (time_status) {
505 case TIME_INS:
506
507 if (xtime.tv_sec % 86400 == 0) {
508 xtime.tv_sec--;
509 time_status = TIME_OOP;
510 printk("Clock: inserting leap second 23:59:60 UTC\n");
511 }
512 break;
513
514 case TIME_DEL:
515
516 if (xtime.tv_sec % 86400 == 86399) {
517 xtime.tv_sec++;
518 time_status = TIME_OK;
519 printk("Clock: deleting leap second 23:59:59 UTC\n");
520 }
521 break;
522
523 case TIME_OOP:
524 time_status = TIME_OK;
525 break;
526 }
527 }
528
529
530
531
532 static void timer_bh(void * unused)
533 {
534 unsigned long mask;
535 struct timer_struct *tp;
536 struct timer_list * timer;
537
538 cli();
539 while ((timer = timer_head.next) != &timer_head && timer->expires < jiffies) {
540 void (*fn)(unsigned long) = timer->function;
541 unsigned long data = timer->data;
542 timer->next->prev = timer->prev;
543 timer->prev->next = timer->next;
544 timer->next = timer->prev = NULL;
545 sti();
546 fn(data);
547 cli();
548 }
549 sti();
550
551 for (mask = 1, tp = timer_table+0 ; mask ; tp++,mask += mask) {
552 if (mask > timer_active)
553 break;
554 if (!(mask & timer_active))
555 continue;
556 if (tp->expires > jiffies)
557 continue;
558 timer_active &= ~mask;
559 tp->fn();
560 sti();
561 }
562 }
563
564 void tqueue_bh(void * unused)
565 {
566 run_task_queue(&tq_timer);
567 }
568
569 void immediate_bh(void * unused)
570 {
571 run_task_queue(&tq_immediate);
572 }
573
574
575
576
577
578
579
580 static void do_timer(int irq, struct pt_regs * regs)
581 {
582 unsigned long mask;
583 struct timer_struct *tp;
584
585 static long last_rtc_update=0;
586 extern int set_rtc_mmss(unsigned long);
587
588 long ltemp, psecs;
589
590
591
592
593 time_phase += time_adj;
594 if (time_phase < -FINEUSEC) {
595 ltemp = -time_phase >> SHIFT_SCALE;
596 time_phase += ltemp << SHIFT_SCALE;
597 xtime.tv_usec += tick + time_adjust_step - ltemp;
598 }
599 else if (time_phase > FINEUSEC) {
600 ltemp = time_phase >> SHIFT_SCALE;
601 time_phase -= ltemp << SHIFT_SCALE;
602 xtime.tv_usec += tick + time_adjust_step + ltemp;
603 } else
604 xtime.tv_usec += tick + time_adjust_step;
605
606 if (time_adjust)
607 {
608
609
610
611
612
613
614
615
616
617 if (time_adjust > tickadj)
618 time_adjust_step = tickadj;
619 else if (time_adjust < -tickadj)
620 time_adjust_step = -tickadj;
621 else
622 time_adjust_step = time_adjust;
623
624
625 time_adjust -= time_adjust_step;
626 }
627 else
628 time_adjust_step = 0;
629
630 if (xtime.tv_usec >= 1000000) {
631 xtime.tv_usec -= 1000000;
632 xtime.tv_sec++;
633 second_overflow();
634 }
635
636
637
638
639
640 if (time_status != TIME_BAD && xtime.tv_sec > last_rtc_update + 660 &&
641 xtime.tv_usec > 500000 - (tick >> 1) &&
642 xtime.tv_usec < 500000 + (tick >> 1))
643 if (set_rtc_mmss(xtime.tv_sec) == 0)
644 last_rtc_update = xtime.tv_sec;
645 else
646 last_rtc_update = xtime.tv_sec - 600;
647
648 jiffies++;
649 calc_load();
650 if (user_mode(regs)) {
651 current->utime++;
652 if (current != task[0]) {
653 if (current->priority < 15)
654 kstat.cpu_nice++;
655 else
656 kstat.cpu_user++;
657 }
658
659 if (current->it_virt_value && !(--current->it_virt_value)) {
660 current->it_virt_value = current->it_virt_incr;
661 send_sig(SIGVTALRM,current,1);
662 }
663 } else {
664 current->stime++;
665 if(current != task[0])
666 kstat.cpu_system++;
667 #ifdef CONFIG_PROFILE
668 if (prof_buffer && current != task[0]) {
669 extern int _stext;
670 unsigned long eip = regs->eip - (unsigned long) &_stext;
671 eip >>= CONFIG_PROFILE_SHIFT;
672 if (eip < prof_len)
673 prof_buffer[eip]++;
674 }
675 #endif
676 }
677
678
679
680 if ((current->rlim[RLIMIT_CPU].rlim_max != RLIM_INFINITY) &&
681 (((current->stime + current->utime) / HZ) >= current->rlim[RLIMIT_CPU].rlim_max))
682 send_sig(SIGKILL, current, 1);
683 if ((current->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) &&
684 (((current->stime + current->utime) % HZ) == 0)) {
685 psecs = (current->stime + current->utime) / HZ;
686
687 if (psecs == current->rlim[RLIMIT_CPU].rlim_cur)
688 send_sig(SIGXCPU, current, 1);
689
690 else if ((psecs > current->rlim[RLIMIT_CPU].rlim_cur) &&
691 ((psecs - current->rlim[RLIMIT_CPU].rlim_cur) % 5) == 0)
692 send_sig(SIGXCPU, current, 1);
693 }
694
695 if (current != task[0] && 0 > --current->counter) {
696 current->counter = 0;
697 need_resched = 1;
698 }
699
700 if (current->it_prof_value && !(--current->it_prof_value)) {
701 current->it_prof_value = current->it_prof_incr;
702 send_sig(SIGPROF,current,1);
703 }
704 for (mask = 1, tp = timer_table+0 ; mask ; tp++,mask += mask) {
705 if (mask > timer_active)
706 break;
707 if (!(mask & timer_active))
708 continue;
709 if (tp->expires > jiffies)
710 continue;
711 mark_bh(TIMER_BH);
712 }
713 cli();
714 if (timer_head.next->expires < jiffies)
715 mark_bh(TIMER_BH);
716 if (tq_timer != &tq_last)
717 mark_bh(TQUEUE_BH);
718 sti();
719 }
720
721 asmlinkage unsigned int sys_alarm(unsigned int seconds)
722 {
723 struct itimerval it_new, it_old;
724 unsigned int oldalarm;
725
726 it_new.it_interval.tv_sec = it_new.it_interval.tv_usec = 0;
727 it_new.it_value.tv_sec = seconds;
728 it_new.it_value.tv_usec = 0;
729 _setitimer(ITIMER_REAL, &it_new, &it_old);
730 oldalarm = it_old.it_value.tv_sec;
731
732
733 if (it_old.it_value.tv_usec)
734 oldalarm++;
735 return oldalarm;
736 }
737
738 asmlinkage int sys_getpid(void)
739 {
740 return current->pid;
741 }
742
743 asmlinkage int sys_getppid(void)
744 {
745 return current->p_opptr->pid;
746 }
747
748 asmlinkage int sys_getuid(void)
749 {
750 return current->uid;
751 }
752
753 asmlinkage int sys_geteuid(void)
754 {
755 return current->euid;
756 }
757
758 asmlinkage int sys_getgid(void)
759 {
760 return current->gid;
761 }
762
763 asmlinkage int sys_getegid(void)
764 {
765 return current->egid;
766 }
767
768 asmlinkage int sys_nice(long increment)
769 {
770 int newprio;
771
772 if (increment < 0 && !suser())
773 return -EPERM;
774 newprio = current->priority - increment;
775 if (newprio < 1)
776 newprio = 1;
777 if (newprio > 35)
778 newprio = 35;
779 current->priority = newprio;
780 return 0;
781 }
782
783 static void show_task(int nr,struct task_struct * p)
784 {
785 unsigned long free;
786 static const char * stat_nam[] = { "R", "S", "D", "Z", "T", "W" };
787
788 printk("%-8s %3d ", p->comm, (p == current) ? -nr : nr);
789 if (((unsigned) p->state) < sizeof(stat_nam)/sizeof(char *))
790 printk(stat_nam[p->state]);
791 else
792 printk(" ");
793 #if ((~0UL) == 0xffffffff)
794 if (p == current)
795 printk(" current ");
796 else
797 printk(" %08lX ", thread_saved_pc(&p->tss));
798 #else
799 if (p == current)
800 printk(" current task ");
801 else
802 printk(" %016lx ", thread_saved_pc(&p->tss));
803 #endif
804 for (free = 1; free < PAGE_SIZE/sizeof(long) ; free++) {
805 if (((unsigned long *)p->kernel_stack_page)[free])
806 break;
807 }
808 printk("%5lu %5d %6d ", free*sizeof(long), p->pid, p->p_pptr->pid);
809 if (p->p_cptr)
810 printk("%5d ", p->p_cptr->pid);
811 else
812 printk(" ");
813 if (p->p_ysptr)
814 printk("%7d", p->p_ysptr->pid);
815 else
816 printk(" ");
817 if (p->p_osptr)
818 printk(" %5d\n", p->p_osptr->pid);
819 else
820 printk("\n");
821 }
822
823 void show_state(void)
824 {
825 int i;
826
827 #if ((~0UL) == 0xffffffff)
828 printk("\n"
829 " free sibling\n");
830 printk(" task PC stack pid father child younger older\n");
831 #else
832 printk("\n"
833 " free sibling\n");
834 printk(" task PC stack pid father child younger older\n");
835 #endif
836 for (i=0 ; i<NR_TASKS ; i++)
837 if (task[i])
838 show_task(i,task[i]);
839 }
840
841 void sched_init(void)
842 {
843 bh_base[TIMER_BH].routine = timer_bh;
844 bh_base[TQUEUE_BH].routine = tqueue_bh;
845 bh_base[IMMEDIATE_BH].routine = immediate_bh;
846 if (request_irq(TIMER_IRQ, do_timer, 0, "timer") != 0)
847 panic("Could not allocate timer IRQ!");
848 enable_bh(TIMER_BH);
849 enable_bh(TQUEUE_BH);
850 enable_bh(IMMEDIATE_BH);
851 }