This source file includes following definitions.
- add_to_runqueue
- del_from_runqueue
- wake_up_process
- process_timeout
- schedule
- sys_pause
- wake_up
- wake_up_interruptible
- __down
- __sleep_on
- interruptible_sleep_on
- sleep_on
- add_timer
- del_timer
- count_active_tasks
- calc_load
- second_overflow
- timer_bh
- tqueue_bh
- immediate_bh
- do_timer
- sys_alarm
- sys_getpid
- sys_getppid
- sys_getuid
- sys_geteuid
- sys_getgid
- sys_getegid
- sys_nice
- show_task
- show_state
- sched_init
1
2
3
4
5
6
7
8
9
10
11
12
13
14 #include <linux/config.h>
15 #include <linux/signal.h>
16 #include <linux/sched.h>
17 #include <linux/timer.h>
18 #include <linux/kernel.h>
19 #include <linux/kernel_stat.h>
20 #include <linux/fdreg.h>
21 #include <linux/errno.h>
22 #include <linux/time.h>
23 #include <linux/ptrace.h>
24 #include <linux/delay.h>
25 #include <linux/interrupt.h>
26 #include <linux/tqueue.h>
27 #include <linux/resource.h>
28 #include <linux/mm.h>
29
30 #include <asm/system.h>
31 #include <asm/io.h>
32 #include <asm/segment.h>
33 #include <asm/pgtable.h>
34
35 #include <linux/timex.h>
36
37
38
39
40 long tick = 1000000 / HZ;
41 volatile struct timeval xtime;
42 int tickadj = 500/HZ;
43
44 DECLARE_TASK_QUEUE(tq_timer);
45 DECLARE_TASK_QUEUE(tq_immediate);
46 DECLARE_TASK_QUEUE(tq_scheduler);
47
48
49
50
51 int time_state = TIME_BAD;
52 int time_status = STA_UNSYNC;
53 long time_offset = 0;
54 long time_constant = 0;
55 long time_tolerance = MAXFREQ;
56 long time_precision = 1;
57 long time_maxerror = 0x70000000;
58 long time_esterror = 0x70000000;
59 long time_phase = 0;
60 long time_freq = 0;
61 long time_adj = 0;
62 long time_reftime = 0;
63
64 long time_adjust = 0;
65 long time_adjust_step = 0;
66
67 int need_resched = 0;
68 unsigned long event = 0;
69
70 extern int _setitimer(int, struct itimerval *, struct itimerval *);
71 unsigned long * prof_buffer = NULL;
72 unsigned long prof_len = 0;
73 unsigned long prof_shift = 0;
74
75 #define _S(nr) (1<<((nr)-1))
76
77 extern void mem_use(void);
78
79 static unsigned long init_kernel_stack[1024] = { STACK_MAGIC, };
80 unsigned long init_user_stack[1024] = { STACK_MAGIC, };
81 static struct vm_area_struct init_mmap = INIT_MMAP;
82 static struct fs_struct init_fs = INIT_FS;
83 static struct files_struct init_files = INIT_FILES;
84 static struct signal_struct init_signals = INIT_SIGNALS;
85
86 struct mm_struct init_mm = INIT_MM;
87 struct task_struct init_task = INIT_TASK;
88
89 unsigned long volatile jiffies=0;
90
91 struct task_struct *current = &init_task;
92 struct task_struct *last_task_used_math = NULL;
93
94 struct task_struct * task[NR_TASKS] = {&init_task, };
95
96 struct kernel_stat kstat = { 0 };
97
98 static inline void add_to_runqueue(struct task_struct * p)
99 {
100 #if 1
101 if (p->next_run || p->prev_run) {
102 printk("task already on run-queue\n");
103 return;
104 }
105 #endif
106 if (p->counter > current->counter + 3)
107 need_resched = 1;
108 nr_running++;
109 (p->next_run = init_task.next_run)->prev_run = p;
110 p->prev_run = &init_task;
111 init_task.next_run = p;
112 }
113
114 static inline void del_from_runqueue(struct task_struct * p)
115 {
116 struct task_struct *next = p->next_run;
117 struct task_struct *prev = p->prev_run;
118
119 #if 1
120 if (!next || !prev) {
121 printk("task not on run-queue\n");
122 return;
123 }
124 #endif
125 if (p == &init_task) {
126 static int nr = 0;
127 if (nr < 5) {
128 nr++;
129 printk("idle task may not sleep\n");
130 }
131 return;
132 }
133 nr_running--;
134 next->prev_run = prev;
135 prev->next_run = next;
136 p->next_run = NULL;
137 p->prev_run = NULL;
138 }
139
140
141
142
143
144
145
146
147
148 inline void wake_up_process(struct task_struct * p)
149 {
150 unsigned long flags;
151
152 save_flags(flags);
153 cli();
154 p->state = TASK_RUNNING;
155 if (!p->next_run)
156 add_to_runqueue(p);
157 restore_flags(flags);
158 }
159
160 static void process_timeout(unsigned long __data)
161 {
162 struct task_struct * p = (struct task_struct *) __data;
163
164 p->timeout = 0;
165 wake_up_process(p);
166 }
167
168
169
170
171
172
173
174
175
176
177
178 asmlinkage void schedule(void)
179 {
180 int c;
181 struct task_struct * p;
182 struct task_struct * next;
183 unsigned long timeout = 0;
184
185
186
187 if (intr_count) {
188 printk("Aiee: scheduling in interrupt\n");
189 return;
190 }
191 run_task_queue(&tq_scheduler);
192
193 need_resched = 0;
194 cli();
195 switch (current->state) {
196 case TASK_INTERRUPTIBLE:
197 if (current->signal & ~current->blocked)
198 goto makerunnable;
199 timeout = current->timeout;
200 if (timeout && (timeout <= jiffies)) {
201 current->timeout = 0;
202 timeout = 0;
203 makerunnable:
204 current->state = TASK_RUNNING;
205 break;
206 }
207 default:
208 del_from_runqueue(current);
209 case TASK_RUNNING:
210 }
211 p = init_task.next_run;
212 sti();
213
214
215
216
217
218
219
220 c = -1000;
221 next = &init_task;
222 while (p != &init_task) {
223 if (p->counter > c)
224 c = p->counter, next = p;
225 p = p->next_run;
226 }
227
228
229 if (!c) {
230 for_each_task(p)
231 p->counter = (p->counter >> 1) + p->priority;
232 }
233 if (current != next) {
234 struct timer_list timer;
235
236 kstat.context_swtch++;
237 if (timeout) {
238 init_timer(&timer);
239 timer.expires = timeout;
240 timer.data = (unsigned long) current;
241 timer.function = process_timeout;
242 add_timer(&timer);
243 }
244 switch_to(next);
245 if (timeout)
246 del_timer(&timer);
247 }
248 }
249
250 asmlinkage int sys_pause(void)
251 {
252 current->state = TASK_INTERRUPTIBLE;
253 schedule();
254 return -ERESTARTNOHAND;
255 }
256
257
258
259
260
261
262
263
264
265 void wake_up(struct wait_queue **q)
266 {
267 struct wait_queue *tmp;
268 struct task_struct * p;
269
270 if (!q || !(tmp = *q))
271 return;
272 do {
273 if ((p = tmp->task) != NULL) {
274 if ((p->state == TASK_UNINTERRUPTIBLE) ||
275 (p->state == TASK_INTERRUPTIBLE))
276 wake_up_process(p);
277 }
278 if (!tmp->next) {
279 printk("wait_queue is bad (eip = %p)\n",
280 __builtin_return_address(0));
281 printk(" q = %p\n",q);
282 printk(" *q = %p\n",*q);
283 printk(" tmp = %p\n",tmp);
284 break;
285 }
286 tmp = tmp->next;
287 } while (tmp != *q);
288 }
289
290 void wake_up_interruptible(struct wait_queue **q)
291 {
292 struct wait_queue *tmp;
293 struct task_struct * p;
294
295 if (!q || !(tmp = *q))
296 return;
297 do {
298 if ((p = tmp->task) != NULL) {
299 if (p->state == TASK_INTERRUPTIBLE)
300 wake_up_process(p);
301 }
302 if (!tmp->next) {
303 printk("wait_queue is bad (eip = %p)\n",
304 __builtin_return_address(0));
305 printk(" q = %p\n",q);
306 printk(" *q = %p\n",*q);
307 printk(" tmp = %p\n",tmp);
308 break;
309 }
310 tmp = tmp->next;
311 } while (tmp != *q);
312 }
313
314 void __down(struct semaphore * sem)
315 {
316 struct wait_queue wait = { current, NULL };
317 add_wait_queue(&sem->wait, &wait);
318 current->state = TASK_UNINTERRUPTIBLE;
319 while (sem->count <= 0) {
320 schedule();
321 current->state = TASK_UNINTERRUPTIBLE;
322 }
323 current->state = TASK_RUNNING;
324 remove_wait_queue(&sem->wait, &wait);
325 }
326
327 static inline void __sleep_on(struct wait_queue **p, int state)
328 {
329 unsigned long flags;
330 struct wait_queue wait = { current, NULL };
331
332 if (!p)
333 return;
334 if (current == task[0])
335 panic("task[0] trying to sleep");
336 current->state = state;
337 add_wait_queue(p, &wait);
338 save_flags(flags);
339 sti();
340 schedule();
341 remove_wait_queue(p, &wait);
342 restore_flags(flags);
343 }
344
345 void interruptible_sleep_on(struct wait_queue **p)
346 {
347 __sleep_on(p,TASK_INTERRUPTIBLE);
348 }
349
350 void sleep_on(struct wait_queue **p)
351 {
352 __sleep_on(p,TASK_UNINTERRUPTIBLE);
353 }
354
355
356
357
358
359 static struct timer_list timer_head = { &timer_head, &timer_head, ~0, 0, NULL };
360 #define SLOW_BUT_DEBUGGING_TIMERS 1
361
362 void add_timer(struct timer_list * timer)
363 {
364 unsigned long flags;
365 struct timer_list *p;
366
367 #if SLOW_BUT_DEBUGGING_TIMERS
368 if (timer->next || timer->prev) {
369 printk("add_timer() called with non-zero list from %p\n",
370 __builtin_return_address(0));
371 return;
372 }
373 #endif
374 p = &timer_head;
375 save_flags(flags);
376 cli();
377 do {
378 p = p->next;
379 } while (timer->expires > p->expires);
380 timer->next = p;
381 timer->prev = p->prev;
382 p->prev = timer;
383 timer->prev->next = timer;
384 restore_flags(flags);
385 }
386
387 int del_timer(struct timer_list * timer)
388 {
389 unsigned long flags;
390 #if SLOW_BUT_DEBUGGING_TIMERS
391 struct timer_list * p;
392
393 p = &timer_head;
394 save_flags(flags);
395 cli();
396 while ((p = p->next) != &timer_head) {
397 if (p == timer) {
398 timer->next->prev = timer->prev;
399 timer->prev->next = timer->next;
400 timer->next = timer->prev = NULL;
401 restore_flags(flags);
402 return 1;
403 }
404 }
405 if (timer->next || timer->prev)
406 printk("del_timer() called from %p with timer not initialized\n",
407 __builtin_return_address(0));
408 restore_flags(flags);
409 return 0;
410 #else
411 save_flags(flags);
412 cli();
413 if (timer->next) {
414 timer->next->prev = timer->prev;
415 timer->prev->next = timer->next;
416 timer->next = timer->prev = NULL;
417 restore_flags(flags);
418 return 1;
419 }
420 restore_flags(flags);
421 return 0;
422 #endif
423 }
424
425 unsigned long timer_active = 0;
426 struct timer_struct timer_table[32];
427
428
429
430
431
432
433
434 unsigned long avenrun[3] = { 0,0,0 };
435
436
437
438
439 static unsigned long count_active_tasks(void)
440 {
441 struct task_struct **p;
442 unsigned long nr = 0;
443
444 for(p = &LAST_TASK; p > &FIRST_TASK; --p)
445 if (*p && ((*p)->state == TASK_RUNNING ||
446 (*p)->state == TASK_UNINTERRUPTIBLE ||
447 (*p)->state == TASK_SWAPPING))
448 nr += FIXED_1;
449 return nr;
450 }
451
452 static inline void calc_load(void)
453 {
454 unsigned long active_tasks;
455 static int count = LOAD_FREQ;
456
457 if (count-- > 0)
458 return;
459 count = LOAD_FREQ;
460 active_tasks = count_active_tasks();
461 CALC_LOAD(avenrun[0], EXP_1, active_tasks);
462 CALC_LOAD(avenrun[1], EXP_5, active_tasks);
463 CALC_LOAD(avenrun[2], EXP_15, active_tasks);
464 }
465
466
467
468
469
470
471
472
473
474
475 static void second_overflow(void)
476 {
477 long ltemp;
478
479
480 time_maxerror = (0x70000000-time_maxerror <
481 time_tolerance >> SHIFT_USEC) ?
482 0x70000000 : (time_maxerror + (time_tolerance >> SHIFT_USEC));
483
484
485
486
487
488
489
490
491
492
493 switch (time_state) {
494
495 case TIME_OK:
496 if (time_status & STA_INS)
497 time_state = TIME_INS;
498 else if (time_status & STA_DEL)
499 time_state = TIME_DEL;
500 break;
501
502 case TIME_INS:
503 if (xtime.tv_sec % 86400 == 0) {
504 xtime.tv_sec--;
505 time_state = TIME_OOP;
506 printk("Clock: inserting leap second 23:59:60 UTC\n");
507 }
508 break;
509
510 case TIME_DEL:
511 if ((xtime.tv_sec + 1) % 86400 == 0) {
512 xtime.tv_sec++;
513 time_state = TIME_WAIT;
514 printk("Clock: deleting leap second 23:59:59 UTC\n");
515 }
516 break;
517
518 case TIME_OOP:
519 time_state = TIME_WAIT;
520 break;
521
522 case TIME_WAIT:
523 if (!(time_status & (STA_INS | STA_DEL)))
524 time_state = TIME_OK;
525 }
526
527
528
529
530
531
532
533
534
535
536 if (time_offset < 0) {
537 ltemp = -time_offset;
538 if (!(time_status & STA_FLL))
539 ltemp >>= SHIFT_KG + time_constant;
540 if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE)
541 ltemp = (MAXPHASE / MINSEC) <<
542 SHIFT_UPDATE;
543 time_offset += ltemp;
544 time_adj = -ltemp << (SHIFT_SCALE - SHIFT_HZ -
545 SHIFT_UPDATE);
546 } else {
547 ltemp = time_offset;
548 if (!(time_status & STA_FLL))
549 ltemp >>= SHIFT_KG + time_constant;
550 if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE)
551 ltemp = (MAXPHASE / MINSEC) <<
552 SHIFT_UPDATE;
553 time_offset -= ltemp;
554 time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ -
555 SHIFT_UPDATE);
556 }
557
558
559
560
561
562
563
564
565 pps_valid++;
566 if (pps_valid == PPS_VALID) {
567 pps_jitter = MAXTIME;
568 pps_stabil = MAXFREQ;
569 time_status &= ~(STA_PPSSIGNAL | STA_PPSJITTER |
570 STA_PPSWANDER | STA_PPSERROR);
571 }
572 ltemp = time_freq + pps_freq;
573 if (ltemp < 0)
574 time_adj -= -ltemp >>
575 (SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE);
576 else
577 time_adj += ltemp >>
578 (SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE);
579
580
581 if (time_adj < 0)
582 time_adj -= -time_adj >> 2;
583 else
584 time_adj += time_adj >> 2;
585 }
586
587
588
589
590 static void timer_bh(void * unused)
591 {
592 unsigned long mask;
593 struct timer_struct *tp;
594 struct timer_list * timer;
595
596 cli();
597 while ((timer = timer_head.next) != &timer_head && timer->expires <= jiffies) {
598 void (*fn)(unsigned long) = timer->function;
599 unsigned long data = timer->data;
600 timer->next->prev = timer->prev;
601 timer->prev->next = timer->next;
602 timer->next = timer->prev = NULL;
603 sti();
604 fn(data);
605 cli();
606 }
607 sti();
608
609 for (mask = 1, tp = timer_table+0 ; mask ; tp++,mask += mask) {
610 if (mask > timer_active)
611 break;
612 if (!(mask & timer_active))
613 continue;
614 if (tp->expires > jiffies)
615 continue;
616 timer_active &= ~mask;
617 tp->fn();
618 sti();
619 }
620 }
621
622 void tqueue_bh(void * unused)
623 {
624 run_task_queue(&tq_timer);
625 }
626
627 void immediate_bh(void * unused)
628 {
629 run_task_queue(&tq_immediate);
630 }
631
632 void do_timer(struct pt_regs * regs)
633 {
634 unsigned long mask;
635 struct timer_struct *tp;
636 long ltemp, psecs;
637
638
639
640
641 time_phase += time_adj;
642 if (time_phase <= -FINEUSEC) {
643 ltemp = -time_phase >> SHIFT_SCALE;
644 time_phase += ltemp << SHIFT_SCALE;
645 xtime.tv_usec += tick + time_adjust_step - ltemp;
646 }
647 else if (time_phase >= FINEUSEC) {
648 ltemp = time_phase >> SHIFT_SCALE;
649 time_phase -= ltemp << SHIFT_SCALE;
650 xtime.tv_usec += tick + time_adjust_step + ltemp;
651 } else
652 xtime.tv_usec += tick + time_adjust_step;
653
654 if (time_adjust)
655 {
656
657
658
659
660
661
662
663
664
665 if (time_adjust > tickadj)
666 time_adjust_step = tickadj;
667 else if (time_adjust < -tickadj)
668 time_adjust_step = -tickadj;
669 else
670 time_adjust_step = time_adjust;
671
672
673 time_adjust -= time_adjust_step;
674 }
675 else
676 time_adjust_step = 0;
677
678 if (xtime.tv_usec >= 1000000) {
679 xtime.tv_usec -= 1000000;
680 xtime.tv_sec++;
681 second_overflow();
682 }
683
684 jiffies++;
685 calc_load();
686 if (user_mode(regs)) {
687 current->utime++;
688 if (current != task[0]) {
689 if (current->priority < 15)
690 kstat.cpu_nice++;
691 else
692 kstat.cpu_user++;
693 }
694
695 if (current->it_virt_value && !(--current->it_virt_value)) {
696 current->it_virt_value = current->it_virt_incr;
697 send_sig(SIGVTALRM,current,1);
698 }
699 } else {
700 current->stime++;
701 if(current != task[0])
702 kstat.cpu_system++;
703 if (prof_buffer && current != task[0]) {
704 extern int _stext;
705 unsigned long ip = instruction_pointer(regs);
706 ip -= (unsigned long) &_stext;
707 ip >>= prof_shift;
708 if (ip < prof_len)
709 prof_buffer[ip]++;
710 }
711 }
712
713
714
715 if ((current->rlim[RLIMIT_CPU].rlim_max != RLIM_INFINITY) &&
716 (((current->stime + current->utime) / HZ) >= current->rlim[RLIMIT_CPU].rlim_max))
717 send_sig(SIGKILL, current, 1);
718 if ((current->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) &&
719 (((current->stime + current->utime) % HZ) == 0)) {
720 psecs = (current->stime + current->utime) / HZ;
721
722 if (psecs == current->rlim[RLIMIT_CPU].rlim_cur)
723 send_sig(SIGXCPU, current, 1);
724
725 else if ((psecs > current->rlim[RLIMIT_CPU].rlim_cur) &&
726 ((psecs - current->rlim[RLIMIT_CPU].rlim_cur) % 5) == 0)
727 send_sig(SIGXCPU, current, 1);
728 }
729
730 if (current != task[0] && 0 > --current->counter) {
731 current->counter = 0;
732 need_resched = 1;
733 }
734
735 if (current->it_prof_value && !(--current->it_prof_value)) {
736 current->it_prof_value = current->it_prof_incr;
737 send_sig(SIGPROF,current,1);
738 }
739 for (mask = 1, tp = timer_table+0 ; mask ; tp++,mask += mask) {
740 if (mask > timer_active)
741 break;
742 if (!(mask & timer_active))
743 continue;
744 if (tp->expires > jiffies)
745 continue;
746 mark_bh(TIMER_BH);
747 }
748 cli();
749 if (timer_head.next->expires <= jiffies)
750 mark_bh(TIMER_BH);
751 if (tq_timer != &tq_last)
752 mark_bh(TQUEUE_BH);
753 sti();
754 }
755
756 asmlinkage unsigned int sys_alarm(unsigned int seconds)
757 {
758 struct itimerval it_new, it_old;
759 unsigned int oldalarm;
760
761 it_new.it_interval.tv_sec = it_new.it_interval.tv_usec = 0;
762 it_new.it_value.tv_sec = seconds;
763 it_new.it_value.tv_usec = 0;
764 _setitimer(ITIMER_REAL, &it_new, &it_old);
765 oldalarm = it_old.it_value.tv_sec;
766
767
768 if (it_old.it_value.tv_usec)
769 oldalarm++;
770 return oldalarm;
771 }
772
773 asmlinkage int sys_getpid(void)
774 {
775 return current->pid;
776 }
777
778 asmlinkage int sys_getppid(void)
779 {
780 return current->p_opptr->pid;
781 }
782
783 asmlinkage int sys_getuid(void)
784 {
785 return current->uid;
786 }
787
788 asmlinkage int sys_geteuid(void)
789 {
790 return current->euid;
791 }
792
793 asmlinkage int sys_getgid(void)
794 {
795 return current->gid;
796 }
797
798 asmlinkage int sys_getegid(void)
799 {
800 return current->egid;
801 }
802
803 asmlinkage int sys_nice(long increment)
804 {
805 int newprio;
806
807 if (increment < 0 && !suser())
808 return -EPERM;
809 newprio = current->priority - increment;
810 if (newprio < 1)
811 newprio = 1;
812 if (newprio > 35)
813 newprio = 35;
814 current->priority = newprio;
815 return 0;
816 }
817
818 static void show_task(int nr,struct task_struct * p)
819 {
820 unsigned long free;
821 static const char * stat_nam[] = { "R", "S", "D", "Z", "T", "W" };
822
823 printk("%-8s %3d ", p->comm, (p == current) ? -nr : nr);
824 if (((unsigned) p->state) < sizeof(stat_nam)/sizeof(char *))
825 printk(stat_nam[p->state]);
826 else
827 printk(" ");
828 #if ((~0UL) == 0xffffffff)
829 if (p == current)
830 printk(" current ");
831 else
832 printk(" %08lX ", thread_saved_pc(&p->tss));
833 #else
834 if (p == current)
835 printk(" current task ");
836 else
837 printk(" %016lx ", thread_saved_pc(&p->tss));
838 #endif
839 for (free = 1; free < PAGE_SIZE/sizeof(long) ; free++) {
840 if (((unsigned long *)p->kernel_stack_page)[free])
841 break;
842 }
843 printk("%5lu %5d %6d ", free*sizeof(long), p->pid, p->p_pptr->pid);
844 if (p->p_cptr)
845 printk("%5d ", p->p_cptr->pid);
846 else
847 printk(" ");
848 if (p->p_ysptr)
849 printk("%7d", p->p_ysptr->pid);
850 else
851 printk(" ");
852 if (p->p_osptr)
853 printk(" %5d\n", p->p_osptr->pid);
854 else
855 printk("\n");
856 }
857
858 void show_state(void)
859 {
860 int i;
861
862 #if ((~0UL) == 0xffffffff)
863 printk("\n"
864 " free sibling\n");
865 printk(" task PC stack pid father child younger older\n");
866 #else
867 printk("\n"
868 " free sibling\n");
869 printk(" task PC stack pid father child younger older\n");
870 #endif
871 for (i=0 ; i<NR_TASKS ; i++)
872 if (task[i])
873 show_task(i,task[i]);
874 }
875
876 void sched_init(void)
877 {
878 bh_base[TIMER_BH].routine = timer_bh;
879 bh_base[TQUEUE_BH].routine = tqueue_bh;
880 bh_base[IMMEDIATE_BH].routine = immediate_bh;
881 enable_bh(TIMER_BH);
882 enable_bh(TQUEUE_BH);
883 enable_bh(IMMEDIATE_BH);
884 }