This source file includes following definitions.
- add_to_runqueue
- del_from_runqueue
- wake_up_process
- process_timeout
- schedule
- sys_pause
- wake_up
- wake_up_interruptible
- __down
- __sleep_on
- interruptible_sleep_on
- sleep_on
- add_timer
- del_timer
- count_active_tasks
- calc_load
- second_overflow
- timer_bh
- tqueue_bh
- immediate_bh
- do_timer
- sys_alarm
- sys_getpid
- sys_getppid
- sys_getuid
- sys_geteuid
- sys_getgid
- sys_getegid
- sys_nice
- show_task
- show_state
- sched_init
1
2
3
4
5
6
7
8
9
10
11
12
13
14 #include <linux/config.h>
15 #include <linux/signal.h>
16 #include <linux/sched.h>
17 #include <linux/timer.h>
18 #include <linux/kernel.h>
19 #include <linux/kernel_stat.h>
20 #include <linux/fdreg.h>
21 #include <linux/errno.h>
22 #include <linux/time.h>
23 #include <linux/ptrace.h>
24 #include <linux/delay.h>
25 #include <linux/interrupt.h>
26 #include <linux/tqueue.h>
27 #include <linux/resource.h>
28 #include <linux/mm.h>
29
30 #include <asm/system.h>
31 #include <asm/io.h>
32 #include <asm/segment.h>
33 #include <asm/pgtable.h>
34
35 #define TIMER_IRQ 0
36
37 #include <linux/timex.h>
38
39
40
41
42 long tick = 1000000 / HZ;
43 volatile struct timeval xtime;
44 int tickadj = 500/HZ;
45
46 DECLARE_TASK_QUEUE(tq_timer);
47 DECLARE_TASK_QUEUE(tq_immediate);
48 DECLARE_TASK_QUEUE(tq_scheduler);
49
50
51
52
53 int time_status = TIME_BAD;
54 long time_offset = 0;
55 long time_constant = 0;
56 long time_tolerance = MAXFREQ;
57 long time_precision = 1;
58 long time_maxerror = 0x70000000;
59 long time_esterror = 0x70000000;
60 long time_phase = 0;
61 long time_freq = 0;
62 long time_adj = 0;
63 long time_reftime = 0;
64
65 long time_adjust = 0;
66 long time_adjust_step = 0;
67
68 int need_resched = 0;
69 unsigned long event = 0;
70
71 extern int _setitimer(int, struct itimerval *, struct itimerval *);
72 unsigned long * prof_buffer = NULL;
73 unsigned long prof_len = 0;
74
75 #define _S(nr) (1<<((nr)-1))
76
77 extern void mem_use(void);
78
79 extern int timer_interrupt(void);
80
81 static unsigned long init_kernel_stack[1024] = { STACK_MAGIC, };
82 unsigned long init_user_stack[1024] = { STACK_MAGIC, };
83 static struct vm_area_struct init_mmap = INIT_MMAP;
84 struct task_struct init_task = INIT_TASK;
85
86 unsigned long volatile jiffies=0;
87
88 struct task_struct *current = &init_task;
89 struct task_struct *last_task_used_math = NULL;
90
91 struct task_struct * task[NR_TASKS] = {&init_task, };
92
93 struct kernel_stat kstat = { 0 };
94
95 static inline void add_to_runqueue(struct task_struct * p)
96 {
97 #if 1
98 if (p->next_run || p->prev_run) {
99 printk("task already on run-queue\n");
100 return;
101 }
102 #endif
103 if (p->counter > current->counter + 3)
104 need_resched = 1;
105 nr_running++;
106 (p->next_run = init_task.next_run)->prev_run = p;
107 p->prev_run = &init_task;
108 init_task.next_run = p;
109 }
110
111 static inline void del_from_runqueue(struct task_struct * p)
112 {
113 struct task_struct *next = p->next_run;
114 struct task_struct *prev = p->prev_run;
115
116 #if 1
117 if (!next || !prev) {
118 printk("task not on run-queue\n");
119 return;
120 }
121 #endif
122 if (p == &init_task) {
123 printk("idle task may not sleep\n");
124 return;
125 }
126 nr_running--;
127 next->prev_run = prev;
128 prev->next_run = next;
129 p->next_run = NULL;
130 p->prev_run = NULL;
131 }
132
133
134
135
136
137
138
139
140
141 inline void wake_up_process(struct task_struct * p)
142 {
143 unsigned long flags;
144
145 save_flags(flags);
146 cli();
147 p->state = TASK_RUNNING;
148 if (!p->next_run)
149 add_to_runqueue(p);
150 restore_flags(flags);
151 }
152
153 static void process_timeout(unsigned long __data)
154 {
155 struct task_struct * p = (struct task_struct *) __data;
156
157 p->timeout = 0;
158 wake_up_process(p);
159 }
160
161
162
163
164
165
166
167
168
169
170
171 asmlinkage void schedule(void)
172 {
173 int c;
174 struct task_struct * p;
175 struct task_struct * next;
176 unsigned long timeout = 0;
177
178
179
180 if (intr_count) {
181 printk("Aiee: scheduling in interrupt\n");
182 intr_count = 0;
183 }
184 run_task_queue(&tq_scheduler);
185
186 need_resched = 0;
187 cli();
188 switch (current->state) {
189 case TASK_INTERRUPTIBLE:
190 if (current->signal & ~current->blocked)
191 goto makerunnable;
192 timeout = current->timeout;
193 if (timeout && (timeout <= jiffies)) {
194 current->timeout = 0;
195 timeout = 0;
196 makerunnable:
197 current->state = TASK_RUNNING;
198 break;
199 }
200 default:
201 del_from_runqueue(current);
202 case TASK_RUNNING:
203 }
204 p = init_task.next_run;
205 sti();
206
207
208
209
210
211
212
213 c = -1000;
214 next = &init_task;
215 while (p != &init_task) {
216 if (p->counter > c)
217 c = p->counter, next = p;
218 p = p->next_run;
219 }
220
221
222 if (!c) {
223 for_each_task(p)
224 p->counter = (p->counter >> 1) + p->priority;
225 }
226 if (current != next) {
227 struct timer_list timer;
228
229 kstat.context_swtch++;
230 if (timeout) {
231 init_timer(&timer);
232 timer.expires = timeout - jiffies;
233 timer.data = (unsigned long) current;
234 timer.function = process_timeout;
235 add_timer(&timer);
236 }
237 switch_to(next);
238 if (timeout)
239 del_timer(&timer);
240 }
241 }
242
243 asmlinkage int sys_pause(void)
244 {
245 current->state = TASK_INTERRUPTIBLE;
246 schedule();
247 return -ERESTARTNOHAND;
248 }
249
250
251
252
253
254
255
256
257
258 void wake_up(struct wait_queue **q)
259 {
260 struct wait_queue *tmp;
261 struct task_struct * p;
262
263 if (!q || !(tmp = *q))
264 return;
265 do {
266 if ((p = tmp->task) != NULL) {
267 if ((p->state == TASK_UNINTERRUPTIBLE) ||
268 (p->state == TASK_INTERRUPTIBLE))
269 wake_up_process(p);
270 }
271 if (!tmp->next) {
272 printk("wait_queue is bad (eip = %p)\n",
273 __builtin_return_address(0));
274 printk(" q = %p\n",q);
275 printk(" *q = %p\n",*q);
276 printk(" tmp = %p\n",tmp);
277 break;
278 }
279 tmp = tmp->next;
280 } while (tmp != *q);
281 }
282
283 void wake_up_interruptible(struct wait_queue **q)
284 {
285 struct wait_queue *tmp;
286 struct task_struct * p;
287
288 if (!q || !(tmp = *q))
289 return;
290 do {
291 if ((p = tmp->task) != NULL) {
292 if (p->state == TASK_INTERRUPTIBLE)
293 wake_up_process(p);
294 }
295 if (!tmp->next) {
296 printk("wait_queue is bad (eip = %p)\n",
297 __builtin_return_address(0));
298 printk(" q = %p\n",q);
299 printk(" *q = %p\n",*q);
300 printk(" tmp = %p\n",tmp);
301 break;
302 }
303 tmp = tmp->next;
304 } while (tmp != *q);
305 }
306
307 void __down(struct semaphore * sem)
308 {
309 struct wait_queue wait = { current, NULL };
310 add_wait_queue(&sem->wait, &wait);
311 current->state = TASK_UNINTERRUPTIBLE;
312 while (sem->count <= 0) {
313 schedule();
314 current->state = TASK_UNINTERRUPTIBLE;
315 }
316 current->state = TASK_RUNNING;
317 remove_wait_queue(&sem->wait, &wait);
318 }
319
320 static inline void __sleep_on(struct wait_queue **p, int state)
321 {
322 unsigned long flags;
323 struct wait_queue wait = { current, NULL };
324
325 if (!p)
326 return;
327 if (current == task[0])
328 panic("task[0] trying to sleep");
329 current->state = state;
330 add_wait_queue(p, &wait);
331 save_flags(flags);
332 sti();
333 schedule();
334 remove_wait_queue(p, &wait);
335 restore_flags(flags);
336 }
337
338 void interruptible_sleep_on(struct wait_queue **p)
339 {
340 __sleep_on(p,TASK_INTERRUPTIBLE);
341 }
342
343 void sleep_on(struct wait_queue **p)
344 {
345 __sleep_on(p,TASK_UNINTERRUPTIBLE);
346 }
347
348
349
350
351
352 static struct timer_list timer_head = { &timer_head, &timer_head, ~0, 0, NULL };
353 #define SLOW_BUT_DEBUGGING_TIMERS 1
354
355 void add_timer(struct timer_list * timer)
356 {
357 unsigned long flags;
358 struct timer_list *p;
359
360 #if SLOW_BUT_DEBUGGING_TIMERS
361 if (timer->next || timer->prev) {
362 printk("add_timer() called with non-zero list from %p\n",
363 __builtin_return_address(0));
364 return;
365 }
366 #endif
367 p = &timer_head;
368 timer->expires += jiffies;
369 save_flags(flags);
370 cli();
371 do {
372 p = p->next;
373 } while (timer->expires > p->expires);
374 timer->next = p;
375 timer->prev = p->prev;
376 p->prev = timer;
377 timer->prev->next = timer;
378 restore_flags(flags);
379 }
380
381 int del_timer(struct timer_list * timer)
382 {
383 unsigned long flags;
384 #if SLOW_BUT_DEBUGGING_TIMERS
385 struct timer_list * p;
386
387 p = &timer_head;
388 save_flags(flags);
389 cli();
390 while ((p = p->next) != &timer_head) {
391 if (p == timer) {
392 timer->next->prev = timer->prev;
393 timer->prev->next = timer->next;
394 timer->next = timer->prev = NULL;
395 restore_flags(flags);
396 timer->expires -= jiffies;
397 return 1;
398 }
399 }
400 if (timer->next || timer->prev)
401 printk("del_timer() called from %p with timer not initialized\n",
402 __builtin_return_address(0));
403 restore_flags(flags);
404 return 0;
405 #else
406 save_flags(flags);
407 cli();
408 if (timer->next) {
409 timer->next->prev = timer->prev;
410 timer->prev->next = timer->next;
411 timer->next = timer->prev = NULL;
412 restore_flags(flags);
413 timer->expires -= jiffies;
414 return 1;
415 }
416 restore_flags(flags);
417 return 0;
418 #endif
419 }
420
421 unsigned long timer_active = 0;
422 struct timer_struct timer_table[32];
423
424
425
426
427
428
429
430 unsigned long avenrun[3] = { 0,0,0 };
431
432
433
434
435 static unsigned long count_active_tasks(void)
436 {
437 struct task_struct **p;
438 unsigned long nr = 0;
439
440 for(p = &LAST_TASK; p > &FIRST_TASK; --p)
441 if (*p && ((*p)->state == TASK_RUNNING ||
442 (*p)->state == TASK_UNINTERRUPTIBLE ||
443 (*p)->state == TASK_SWAPPING))
444 nr += FIXED_1;
445 return nr;
446 }
447
448 static inline void calc_load(void)
449 {
450 unsigned long active_tasks;
451 static int count = LOAD_FREQ;
452
453 if (count-- > 0)
454 return;
455 count = LOAD_FREQ;
456 active_tasks = count_active_tasks();
457 CALC_LOAD(avenrun[0], EXP_1, active_tasks);
458 CALC_LOAD(avenrun[1], EXP_5, active_tasks);
459 CALC_LOAD(avenrun[2], EXP_15, active_tasks);
460 }
461
462
463
464
465
466
467
468
469
470
471
472 static void second_overflow(void)
473 {
474 long ltemp;
475
476
477 time_maxerror = (0x70000000-time_maxerror < time_tolerance) ?
478 0x70000000 : (time_maxerror + time_tolerance);
479
480
481 if (time_offset < 0) {
482 ltemp = (-(time_offset+1) >> (SHIFT_KG + time_constant)) + 1;
483 time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
484 time_offset += (time_adj * HZ) >> (SHIFT_SCALE - SHIFT_UPDATE);
485 time_adj = - time_adj;
486 } else if (time_offset > 0) {
487 ltemp = ((time_offset-1) >> (SHIFT_KG + time_constant)) + 1;
488 time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
489 time_offset -= (time_adj * HZ) >> (SHIFT_SCALE - SHIFT_UPDATE);
490 } else {
491 time_adj = 0;
492 }
493
494 time_adj += (time_freq >> (SHIFT_KF + SHIFT_HZ - SHIFT_SCALE))
495 + FINETUNE;
496
497
498 switch (time_status) {
499 case TIME_INS:
500
501 if (xtime.tv_sec % 86400 == 0) {
502 xtime.tv_sec--;
503 time_status = TIME_OOP;
504 printk("Clock: inserting leap second 23:59:60 UTC\n");
505 }
506 break;
507
508 case TIME_DEL:
509
510 if (xtime.tv_sec % 86400 == 86399) {
511 xtime.tv_sec++;
512 time_status = TIME_OK;
513 printk("Clock: deleting leap second 23:59:59 UTC\n");
514 }
515 break;
516
517 case TIME_OOP:
518 time_status = TIME_OK;
519 break;
520 }
521 }
522
523
524
525
526 static void timer_bh(void * unused)
527 {
528 unsigned long mask;
529 struct timer_struct *tp;
530 struct timer_list * timer;
531
532 cli();
533 while ((timer = timer_head.next) != &timer_head && timer->expires < jiffies) {
534 void (*fn)(unsigned long) = timer->function;
535 unsigned long data = timer->data;
536 timer->next->prev = timer->prev;
537 timer->prev->next = timer->next;
538 timer->next = timer->prev = NULL;
539 sti();
540 fn(data);
541 cli();
542 }
543 sti();
544
545 for (mask = 1, tp = timer_table+0 ; mask ; tp++,mask += mask) {
546 if (mask > timer_active)
547 break;
548 if (!(mask & timer_active))
549 continue;
550 if (tp->expires > jiffies)
551 continue;
552 timer_active &= ~mask;
553 tp->fn();
554 sti();
555 }
556 }
557
558 void tqueue_bh(void * unused)
559 {
560 run_task_queue(&tq_timer);
561 }
562
563 void immediate_bh(void * unused)
564 {
565 run_task_queue(&tq_immediate);
566 }
567
568
569
570
571
572
573
574 static void do_timer(int irq, struct pt_regs * regs)
575 {
576 unsigned long mask;
577 struct timer_struct *tp;
578
579 static long last_rtc_update=0;
580 extern int set_rtc_mmss(unsigned long);
581
582 long ltemp, psecs;
583
584
585
586
587 time_phase += time_adj;
588 if (time_phase < -FINEUSEC) {
589 ltemp = -time_phase >> SHIFT_SCALE;
590 time_phase += ltemp << SHIFT_SCALE;
591 xtime.tv_usec += tick + time_adjust_step - ltemp;
592 }
593 else if (time_phase > FINEUSEC) {
594 ltemp = time_phase >> SHIFT_SCALE;
595 time_phase -= ltemp << SHIFT_SCALE;
596 xtime.tv_usec += tick + time_adjust_step + ltemp;
597 } else
598 xtime.tv_usec += tick + time_adjust_step;
599
600 if (time_adjust)
601 {
602
603
604
605
606
607
608
609
610
611 if (time_adjust > tickadj)
612 time_adjust_step = tickadj;
613 else if (time_adjust < -tickadj)
614 time_adjust_step = -tickadj;
615 else
616 time_adjust_step = time_adjust;
617
618
619 time_adjust -= time_adjust_step;
620 }
621 else
622 time_adjust_step = 0;
623
624 if (xtime.tv_usec >= 1000000) {
625 xtime.tv_usec -= 1000000;
626 xtime.tv_sec++;
627 second_overflow();
628 }
629
630
631
632
633
634 if (time_status != TIME_BAD && xtime.tv_sec > last_rtc_update + 660 &&
635 xtime.tv_usec > 500000 - (tick >> 1) &&
636 xtime.tv_usec < 500000 + (tick >> 1))
637 if (set_rtc_mmss(xtime.tv_sec) == 0)
638 last_rtc_update = xtime.tv_sec;
639 else
640 last_rtc_update = xtime.tv_sec - 600;
641
642 jiffies++;
643 calc_load();
644 if (user_mode(regs)) {
645 current->utime++;
646 if (current != task[0]) {
647 if (current->priority < 15)
648 kstat.cpu_nice++;
649 else
650 kstat.cpu_user++;
651 }
652
653 if (current->it_virt_value && !(--current->it_virt_value)) {
654 current->it_virt_value = current->it_virt_incr;
655 send_sig(SIGVTALRM,current,1);
656 }
657 } else {
658 current->stime++;
659 if(current != task[0])
660 kstat.cpu_system++;
661 #ifdef CONFIG_PROFILE
662 if (prof_buffer && current != task[0]) {
663 extern int _stext;
664 unsigned long eip = regs->eip - (unsigned long) &_stext;
665 eip >>= CONFIG_PROFILE_SHIFT;
666 if (eip < prof_len)
667 prof_buffer[eip]++;
668 }
669 #endif
670 }
671
672
673
674 if ((current->rlim[RLIMIT_CPU].rlim_max != RLIM_INFINITY) &&
675 (((current->stime + current->utime) / HZ) >= current->rlim[RLIMIT_CPU].rlim_max))
676 send_sig(SIGKILL, current, 1);
677 if ((current->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) &&
678 (((current->stime + current->utime) % HZ) == 0)) {
679 psecs = (current->stime + current->utime) / HZ;
680
681 if (psecs == current->rlim[RLIMIT_CPU].rlim_cur)
682 send_sig(SIGXCPU, current, 1);
683
684 else if ((psecs > current->rlim[RLIMIT_CPU].rlim_cur) &&
685 ((psecs - current->rlim[RLIMIT_CPU].rlim_cur) % 5) == 0)
686 send_sig(SIGXCPU, current, 1);
687 }
688
689 if (current != task[0] && 0 > --current->counter) {
690 current->counter = 0;
691 need_resched = 1;
692 }
693
694 if (current->it_prof_value && !(--current->it_prof_value)) {
695 current->it_prof_value = current->it_prof_incr;
696 send_sig(SIGPROF,current,1);
697 }
698 for (mask = 1, tp = timer_table+0 ; mask ; tp++,mask += mask) {
699 if (mask > timer_active)
700 break;
701 if (!(mask & timer_active))
702 continue;
703 if (tp->expires > jiffies)
704 continue;
705 mark_bh(TIMER_BH);
706 }
707 cli();
708 if (timer_head.next->expires < jiffies)
709 mark_bh(TIMER_BH);
710 if (tq_timer != &tq_last)
711 mark_bh(TQUEUE_BH);
712 sti();
713 }
714
715 asmlinkage int sys_alarm(long seconds)
716 {
717 struct itimerval it_new, it_old;
718
719 it_new.it_interval.tv_sec = it_new.it_interval.tv_usec = 0;
720 it_new.it_value.tv_sec = seconds;
721 it_new.it_value.tv_usec = 0;
722 _setitimer(ITIMER_REAL, &it_new, &it_old);
723 return(it_old.it_value.tv_sec + (it_old.it_value.tv_usec / 1000000));
724 }
725
726 asmlinkage int sys_getpid(void)
727 {
728 return current->pid;
729 }
730
731 asmlinkage int sys_getppid(void)
732 {
733 return current->p_opptr->pid;
734 }
735
736 asmlinkage int sys_getuid(void)
737 {
738 return current->uid;
739 }
740
741 asmlinkage int sys_geteuid(void)
742 {
743 return current->euid;
744 }
745
746 asmlinkage int sys_getgid(void)
747 {
748 return current->gid;
749 }
750
751 asmlinkage int sys_getegid(void)
752 {
753 return current->egid;
754 }
755
756 asmlinkage int sys_nice(long increment)
757 {
758 int newprio;
759
760 if (increment < 0 && !suser())
761 return -EPERM;
762 newprio = current->priority - increment;
763 if (newprio < 1)
764 newprio = 1;
765 if (newprio > 35)
766 newprio = 35;
767 current->priority = newprio;
768 return 0;
769 }
770
771 static void show_task(int nr,struct task_struct * p)
772 {
773 unsigned long free;
774 static char * stat_nam[] = { "R", "S", "D", "Z", "T", "W" };
775
776 printk("%-8s %3d ", p->comm, (p == current) ? -nr : nr);
777 if (((unsigned) p->state) < sizeof(stat_nam)/sizeof(char *))
778 printk(stat_nam[p->state]);
779 else
780 printk(" ");
781 #if ((~0UL) == 0xffffffff)
782 if (p == current)
783 printk(" current ");
784 else
785 printk(" %08lX ", thread_saved_pc(&p->tss));
786 #else
787 if (p == current)
788 printk(" current task ");
789 else
790 printk(" %016lx ", thread_saved_pc(&p->tss));
791 #endif
792 for (free = 1; free < PAGE_SIZE/sizeof(long) ; free++) {
793 if (((unsigned long *)p->kernel_stack_page)[free])
794 break;
795 }
796 printk("%5lu %5d %6d ", free*sizeof(long), p->pid, p->p_pptr->pid);
797 if (p->p_cptr)
798 printk("%5d ", p->p_cptr->pid);
799 else
800 printk(" ");
801 if (p->p_ysptr)
802 printk("%7d", p->p_ysptr->pid);
803 else
804 printk(" ");
805 if (p->p_osptr)
806 printk(" %5d\n", p->p_osptr->pid);
807 else
808 printk("\n");
809 }
810
811 void show_state(void)
812 {
813 int i;
814
815 #if ((~0UL) == 0xffffffff)
816 printk("\n"
817 " free sibling\n");
818 printk(" task PC stack pid father child younger older\n");
819 #else
820 printk("\n"
821 " free sibling\n");
822 printk(" task PC stack pid father child younger older\n");
823 #endif
824 for (i=0 ; i<NR_TASKS ; i++)
825 if (task[i])
826 show_task(i,task[i]);
827 }
828
829 void sched_init(void)
830 {
831 bh_base[TIMER_BH].routine = timer_bh;
832 bh_base[TQUEUE_BH].routine = tqueue_bh;
833 bh_base[IMMEDIATE_BH].routine = immediate_bh;
834 if (request_irq(TIMER_IRQ, do_timer, 0, "timer") != 0)
835 panic("Could not allocate timer IRQ!");
836 enable_bh(TIMER_BH);
837 enable_bh(TQUEUE_BH);
838 enable_bh(IMMEDIATE_BH);
839 }