This source file includes following definitions.
- add_to_runqueue
- del_from_runqueue
- move_last_runqueue
- wake_up_process
- process_timeout
- goodness
- schedule
- sys_pause
- wake_up
- wake_up_interruptible
- __down
- __sleep_on
- interruptible_sleep_on
- sleep_on
- add_timer
- del_timer
- count_active_tasks
- calc_load
- second_overflow
- timer_bh
- tqueue_bh
- immediate_bh
- do_timer
- sys_alarm
- sys_getpid
- sys_getppid
- sys_getuid
- sys_geteuid
- sys_getgid
- sys_getegid
- sys_nice
- show_task
- show_state
- sched_init
1
2
3
4
5
6
7
8
9
10
11
12
13
14 #include <linux/signal.h>
15 #include <linux/sched.h>
16 #include <linux/timer.h>
17 #include <linux/kernel.h>
18 #include <linux/kernel_stat.h>
19 #include <linux/fdreg.h>
20 #include <linux/errno.h>
21 #include <linux/time.h>
22 #include <linux/ptrace.h>
23 #include <linux/delay.h>
24 #include <linux/interrupt.h>
25 #include <linux/tqueue.h>
26 #include <linux/resource.h>
27 #include <linux/mm.h>
28 #include <linux/smp.h>
29
30 #include <asm/system.h>
31 #include <asm/io.h>
32 #include <asm/segment.h>
33 #include <asm/pgtable.h>
34
35 #include <linux/timex.h>
36
37
38
39
40 long tick = 1000000 / HZ;
41 volatile struct timeval xtime;
42 int tickadj = 500/HZ;
43
44 DECLARE_TASK_QUEUE(tq_timer);
45 DECLARE_TASK_QUEUE(tq_immediate);
46 DECLARE_TASK_QUEUE(tq_scheduler);
47
48
49
50
51 int time_state = TIME_BAD;
52 int time_status = STA_UNSYNC;
53 long time_offset = 0;
54 long time_constant = 0;
55 long time_tolerance = MAXFREQ;
56 long time_precision = 1;
57 long time_maxerror = 0x70000000;
58 long time_esterror = 0x70000000;
59 long time_phase = 0;
60 long time_freq = 0;
61 long time_adj = 0;
62 long time_reftime = 0;
63
64 long time_adjust = 0;
65 long time_adjust_step = 0;
66
67 int need_resched = 0;
68 unsigned long event = 0;
69
70 extern int _setitimer(int, struct itimerval *, struct itimerval *);
71 unsigned long * prof_buffer = NULL;
72 unsigned long prof_len = 0;
73 unsigned long prof_shift = 0;
74
75 #define _S(nr) (1<<((nr)-1))
76
77 extern void mem_use(void);
78
79 static unsigned long init_kernel_stack[1024] = { STACK_MAGIC, };
80 unsigned long init_user_stack[1024] = { STACK_MAGIC, };
81 static struct vm_area_struct init_mmap = INIT_MMAP;
82 static struct fs_struct init_fs = INIT_FS;
83 static struct files_struct init_files = INIT_FILES;
84 static struct signal_struct init_signals = INIT_SIGNALS;
85
86 struct mm_struct init_mm = INIT_MM;
87 struct task_struct init_task = INIT_TASK;
88
89 unsigned long volatile jiffies=0;
90
91 struct task_struct *current_set[NR_CPUS];
92 struct task_struct *last_task_used_math = NULL;
93
94 struct task_struct * task[NR_TASKS] = {&init_task, };
95
96 struct kernel_stat kstat = { 0 };
97
98 static inline void add_to_runqueue(struct task_struct * p)
99 {
100 #if 1
101 if (p->next_run || p->prev_run) {
102 printk("task already on run-queue\n");
103 return;
104 }
105 #endif
106 if (p->counter > current->counter + 3)
107 need_resched = 1;
108 nr_running++;
109 (p->prev_run = init_task.prev_run)->next_run = p;
110 p->next_run = &init_task;
111 init_task.prev_run = p;
112 }
113
114 static inline void del_from_runqueue(struct task_struct * p)
115 {
116 struct task_struct *next = p->next_run;
117 struct task_struct *prev = p->prev_run;
118
119 #if 1
120 if (!next || !prev) {
121 printk("task not on run-queue\n");
122 return;
123 }
124 #endif
125 if (p == &init_task) {
126 static int nr = 0;
127 if (nr < 5) {
128 nr++;
129 printk("idle task may not sleep\n");
130 }
131 return;
132 }
133 nr_running--;
134 next->prev_run = prev;
135 prev->next_run = next;
136 p->next_run = NULL;
137 p->prev_run = NULL;
138 }
139
140 static inline void move_last_runqueue(struct task_struct * p)
141 {
142 struct task_struct *next = p->next_run;
143 struct task_struct *prev = p->prev_run;
144
145 next->prev_run = prev;
146 prev->next_run = next;
147 (p->prev_run = init_task.prev_run)->next_run = p;
148 p->next_run = &init_task;
149 init_task.prev_run = p;
150 }
151
152
153
154
155
156
157
158
159
160 inline void wake_up_process(struct task_struct * p)
161 {
162 unsigned long flags;
163
164 save_flags(flags);
165 cli();
166 p->state = TASK_RUNNING;
167 if (!p->next_run)
168 add_to_runqueue(p);
169 restore_flags(flags);
170 }
171
172 static void process_timeout(unsigned long __data)
173 {
174 struct task_struct * p = (struct task_struct *) __data;
175
176 p->timeout = 0;
177 wake_up_process(p);
178 }
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193 static inline int goodness(struct task_struct * p, int this_cpu)
194 {
195 int weight;
196
197 #ifdef __SMP__
198
199 if (p->processor != NO_PROC_ID)
200 return -1000;
201 #endif
202
203
204
205
206
207
208 if (p->policy != SCHED_OTHER)
209 return 1000 + p->priority;
210
211
212
213
214
215
216
217
218 weight = p->counter;
219 if (weight) {
220
221 #ifdef __SMP__
222
223
224 if (p->last_processor == this_cpu)
225 weight += PROC_CHANGE_PENALTY;
226 #endif
227
228
229 if (p == current)
230 weight += 1;
231 }
232
233 return weight;
234 }
235
236
237
238
239
240
241
242
243
244
245
246 asmlinkage void schedule(void)
247 {
248 int c;
249 struct task_struct * p;
250 struct task_struct * next;
251 unsigned long timeout = 0;
252 int this_cpu=smp_processor_id();
253
254
255
256 if (intr_count) {
257 printk("Aiee: scheduling in interrupt\n");
258 return;
259 }
260 run_task_queue(&tq_scheduler);
261
262 need_resched = 0;
263 cli();
264
265 if (!current->counter && current->policy == SCHED_RR) {
266 current->counter = current->priority;
267 move_last_runqueue(current);
268 }
269 switch (current->state) {
270 case TASK_INTERRUPTIBLE:
271 if (current->signal & ~current->blocked)
272 goto makerunnable;
273 timeout = current->timeout;
274 if (timeout && (timeout <= jiffies)) {
275 current->timeout = 0;
276 timeout = 0;
277 makerunnable:
278 current->state = TASK_RUNNING;
279 break;
280 }
281 default:
282 del_from_runqueue(current);
283 case TASK_RUNNING:
284 }
285 p = init_task.next_run;
286 sti();
287
288 #ifdef __SMP__
289
290
291
292 current->processor = NO_PROC_ID;
293 #endif
294
295
296
297
298
299
300
301 c = -1000;
302 next = &init_task;
303 while (p != &init_task) {
304 int weight = goodness(p, this_cpu);
305 if (weight > c)
306 c = weight, next = p;
307 p = p->next_run;
308 }
309
310
311 if (!c) {
312 for_each_task(p)
313 p->counter = (p->counter >> 1) + p->priority;
314 }
315 #ifdef __SMP__
316
317
318
319
320 if(!current->pid && !next->pid)
321 next=current;
322
323
324
325
326 next->processor = this_cpu;
327 next->last_processor = this_cpu;
328
329 #endif
330 if (current != next) {
331 struct timer_list timer;
332
333 kstat.context_swtch++;
334 if (timeout) {
335 init_timer(&timer);
336 timer.expires = timeout;
337 timer.data = (unsigned long) current;
338 timer.function = process_timeout;
339 add_timer(&timer);
340 }
341 switch_to(next);
342 if (timeout)
343 del_timer(&timer);
344 }
345 }
346
347 asmlinkage int sys_pause(void)
348 {
349 current->state = TASK_INTERRUPTIBLE;
350 schedule();
351 return -ERESTARTNOHAND;
352 }
353
354
355
356
357
358
359
360
361
362 void wake_up(struct wait_queue **q)
363 {
364 struct wait_queue *tmp;
365 struct task_struct * p;
366
367 if (!q || !(tmp = *q))
368 return;
369 do {
370 if ((p = tmp->task) != NULL) {
371 if ((p->state == TASK_UNINTERRUPTIBLE) ||
372 (p->state == TASK_INTERRUPTIBLE))
373 wake_up_process(p);
374 }
375 if (!tmp->next) {
376 printk("wait_queue is bad (eip = %p)\n",
377 __builtin_return_address(0));
378 printk(" q = %p\n",q);
379 printk(" *q = %p\n",*q);
380 printk(" tmp = %p\n",tmp);
381 break;
382 }
383 tmp = tmp->next;
384 } while (tmp != *q);
385 }
386
387 void wake_up_interruptible(struct wait_queue **q)
388 {
389 struct wait_queue *tmp;
390 struct task_struct * p;
391
392 if (!q || !(tmp = *q))
393 return;
394 do {
395 if ((p = tmp->task) != NULL) {
396 if (p->state == TASK_INTERRUPTIBLE)
397 wake_up_process(p);
398 }
399 if (!tmp->next) {
400 printk("wait_queue is bad (eip = %p)\n",
401 __builtin_return_address(0));
402 printk(" q = %p\n",q);
403 printk(" *q = %p\n",*q);
404 printk(" tmp = %p\n",tmp);
405 break;
406 }
407 tmp = tmp->next;
408 } while (tmp != *q);
409 }
410
411 void __down(struct semaphore * sem)
412 {
413 struct wait_queue wait = { current, NULL };
414 add_wait_queue(&sem->wait, &wait);
415 current->state = TASK_UNINTERRUPTIBLE;
416 while (sem->count <= 0) {
417 schedule();
418 current->state = TASK_UNINTERRUPTIBLE;
419 }
420 current->state = TASK_RUNNING;
421 remove_wait_queue(&sem->wait, &wait);
422 }
423
424 static inline void __sleep_on(struct wait_queue **p, int state)
425 {
426 unsigned long flags;
427 struct wait_queue wait = { current, NULL };
428
429 if (!p)
430 return;
431 if (current == task[0])
432 panic("task[0] trying to sleep");
433 current->state = state;
434 add_wait_queue(p, &wait);
435 save_flags(flags);
436 sti();
437 schedule();
438 remove_wait_queue(p, &wait);
439 restore_flags(flags);
440 }
441
442 void interruptible_sleep_on(struct wait_queue **p)
443 {
444 __sleep_on(p,TASK_INTERRUPTIBLE);
445 }
446
447 void sleep_on(struct wait_queue **p)
448 {
449 __sleep_on(p,TASK_UNINTERRUPTIBLE);
450 }
451
452
453
454
455
456 static struct timer_list timer_head = { &timer_head, &timer_head, ~0, 0, NULL };
457 #define SLOW_BUT_DEBUGGING_TIMERS 1
458
459 void add_timer(struct timer_list * timer)
460 {
461 unsigned long flags;
462 struct timer_list *p;
463
464 #if SLOW_BUT_DEBUGGING_TIMERS
465 if (timer->next || timer->prev) {
466 printk("add_timer() called with non-zero list from %p\n",
467 __builtin_return_address(0));
468 return;
469 }
470 #endif
471 p = &timer_head;
472 save_flags(flags);
473 cli();
474 do {
475 p = p->next;
476 } while (timer->expires > p->expires);
477 timer->next = p;
478 timer->prev = p->prev;
479 p->prev = timer;
480 timer->prev->next = timer;
481 restore_flags(flags);
482 }
483
484 int del_timer(struct timer_list * timer)
485 {
486 unsigned long flags;
487 #if SLOW_BUT_DEBUGGING_TIMERS
488 struct timer_list * p;
489
490 p = &timer_head;
491 save_flags(flags);
492 cli();
493 while ((p = p->next) != &timer_head) {
494 if (p == timer) {
495 timer->next->prev = timer->prev;
496 timer->prev->next = timer->next;
497 timer->next = timer->prev = NULL;
498 restore_flags(flags);
499 return 1;
500 }
501 }
502 if (timer->next || timer->prev)
503 printk("del_timer() called from %p with timer not initialized\n",
504 __builtin_return_address(0));
505 restore_flags(flags);
506 return 0;
507 #else
508 save_flags(flags);
509 cli();
510 if (timer->next) {
511 timer->next->prev = timer->prev;
512 timer->prev->next = timer->next;
513 timer->next = timer->prev = NULL;
514 restore_flags(flags);
515 return 1;
516 }
517 restore_flags(flags);
518 return 0;
519 #endif
520 }
521
522 unsigned long timer_active = 0;
523 struct timer_struct timer_table[32];
524
525
526
527
528
529
530
531 unsigned long avenrun[3] = { 0,0,0 };
532
533
534
535
536 static unsigned long count_active_tasks(void)
537 {
538 struct task_struct **p;
539 unsigned long nr = 0;
540
541 for(p = &LAST_TASK; p > &FIRST_TASK; --p)
542 if (*p && ((*p)->state == TASK_RUNNING ||
543 (*p)->state == TASK_UNINTERRUPTIBLE ||
544 (*p)->state == TASK_SWAPPING))
545 nr += FIXED_1;
546 #ifdef __SMP__
547 nr-=(smp_num_cpus-1)*FIXED_1;
548 #endif
549 return nr;
550 }
551
552 static inline void calc_load(void)
553 {
554 unsigned long active_tasks;
555 static int count = LOAD_FREQ;
556
557 if (count-- > 0)
558 return;
559 count = LOAD_FREQ;
560 active_tasks = count_active_tasks();
561 CALC_LOAD(avenrun[0], EXP_1, active_tasks);
562 CALC_LOAD(avenrun[1], EXP_5, active_tasks);
563 CALC_LOAD(avenrun[2], EXP_15, active_tasks);
564 }
565
566
567
568
569
570
571
572
573
574
575 static void second_overflow(void)
576 {
577 long ltemp;
578
579
580 time_maxerror = (0x70000000-time_maxerror <
581 time_tolerance >> SHIFT_USEC) ?
582 0x70000000 : (time_maxerror + (time_tolerance >> SHIFT_USEC));
583
584
585
586
587
588
589
590
591
592
593 switch (time_state) {
594
595 case TIME_OK:
596 if (time_status & STA_INS)
597 time_state = TIME_INS;
598 else if (time_status & STA_DEL)
599 time_state = TIME_DEL;
600 break;
601
602 case TIME_INS:
603 if (xtime.tv_sec % 86400 == 0) {
604 xtime.tv_sec--;
605 time_state = TIME_OOP;
606 printk("Clock: inserting leap second 23:59:60 UTC\n");
607 }
608 break;
609
610 case TIME_DEL:
611 if ((xtime.tv_sec + 1) % 86400 == 0) {
612 xtime.tv_sec++;
613 time_state = TIME_WAIT;
614 printk("Clock: deleting leap second 23:59:59 UTC\n");
615 }
616 break;
617
618 case TIME_OOP:
619
620 time_state = TIME_WAIT;
621 break;
622
623 case TIME_WAIT:
624 if (!(time_status & (STA_INS | STA_DEL)))
625 time_state = TIME_OK;
626 }
627
628
629
630
631
632
633
634
635
636
637 if (time_offset < 0) {
638 ltemp = -time_offset;
639 if (!(time_status & STA_FLL))
640 ltemp >>= SHIFT_KG + time_constant;
641 if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE)
642 ltemp = (MAXPHASE / MINSEC) <<
643 SHIFT_UPDATE;
644 time_offset += ltemp;
645 time_adj = -ltemp << (SHIFT_SCALE - SHIFT_HZ -
646 SHIFT_UPDATE);
647 } else {
648 ltemp = time_offset;
649 if (!(time_status & STA_FLL))
650 ltemp >>= SHIFT_KG + time_constant;
651 if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE)
652 ltemp = (MAXPHASE / MINSEC) <<
653 SHIFT_UPDATE;
654 time_offset -= ltemp;
655 time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ -
656 SHIFT_UPDATE);
657 }
658
659
660
661
662
663
664
665
666 pps_valid++;
667 if (pps_valid == PPS_VALID) {
668 pps_jitter = MAXTIME;
669 pps_stabil = MAXFREQ;
670 time_status &= ~(STA_PPSSIGNAL | STA_PPSJITTER |
671 STA_PPSWANDER | STA_PPSERROR);
672 }
673 ltemp = time_freq + pps_freq;
674 if (ltemp < 0)
675 time_adj -= -ltemp >>
676 (SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE);
677 else
678 time_adj += ltemp >>
679 (SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE);
680
681 #if HZ == 100
682
683 if (time_adj < 0)
684 time_adj -= -time_adj >> 2;
685 else
686 time_adj += time_adj >> 2;
687 #endif
688 }
689
690
691
692
693 static void timer_bh(void * unused)
694 {
695 unsigned long mask;
696 struct timer_struct *tp;
697 struct timer_list * timer;
698
699 cli();
700 while ((timer = timer_head.next) != &timer_head && timer->expires <= jiffies) {
701 void (*fn)(unsigned long) = timer->function;
702 unsigned long data = timer->data;
703 timer->next->prev = timer->prev;
704 timer->prev->next = timer->next;
705 timer->next = timer->prev = NULL;
706 sti();
707 fn(data);
708 cli();
709 }
710 sti();
711
712 for (mask = 1, tp = timer_table+0 ; mask ; tp++,mask += mask) {
713 if (mask > timer_active)
714 break;
715 if (!(mask & timer_active))
716 continue;
717 if (tp->expires > jiffies)
718 continue;
719 timer_active &= ~mask;
720 tp->fn();
721 sti();
722 }
723 }
724
725 void tqueue_bh(void * unused)
726 {
727 run_task_queue(&tq_timer);
728 }
729
730 void immediate_bh(void * unused)
731 {
732 run_task_queue(&tq_immediate);
733 }
734
735 void do_timer(struct pt_regs * regs)
736 {
737 unsigned long mask;
738 struct timer_struct *tp;
739 long ltemp, psecs;
740
741
742
743
744 time_phase += time_adj;
745 if (time_phase <= -FINEUSEC) {
746 ltemp = -time_phase >> SHIFT_SCALE;
747 time_phase += ltemp << SHIFT_SCALE;
748 xtime.tv_usec += tick + time_adjust_step - ltemp;
749 }
750 else if (time_phase >= FINEUSEC) {
751 ltemp = time_phase >> SHIFT_SCALE;
752 time_phase -= ltemp << SHIFT_SCALE;
753 xtime.tv_usec += tick + time_adjust_step + ltemp;
754 } else
755 xtime.tv_usec += tick + time_adjust_step;
756
757 if (time_adjust) {
758
759
760
761
762
763
764
765
766
767 if (time_adjust > tickadj)
768 time_adjust_step = tickadj;
769 else if (time_adjust < -tickadj)
770 time_adjust_step = -tickadj;
771 else
772 time_adjust_step = time_adjust;
773
774
775 time_adjust -= time_adjust_step;
776 }
777 else
778 time_adjust_step = 0;
779
780 if (xtime.tv_usec >= 1000000) {
781 xtime.tv_usec -= 1000000;
782 xtime.tv_sec++;
783 second_overflow();
784 }
785
786 jiffies++;
787 calc_load();
788 if (user_mode(regs)) {
789 current->utime++;
790 if (current->pid) {
791 if (current->priority < DEF_PRIORITY)
792 kstat.cpu_nice++;
793 else
794 kstat.cpu_user++;
795 }
796
797 if (current->it_virt_value && !(--current->it_virt_value)) {
798 current->it_virt_value = current->it_virt_incr;
799 send_sig(SIGVTALRM,current,1);
800 }
801 } else {
802 current->stime++;
803 if(current->pid)
804 kstat.cpu_system++;
805 if (prof_buffer && current->pid) {
806 extern int _stext;
807 unsigned long ip = instruction_pointer(regs);
808 ip -= (unsigned long) &_stext;
809 ip >>= prof_shift;
810 if (ip < prof_len)
811 prof_buffer[ip]++;
812 }
813 }
814
815
816
817 if ((current->rlim[RLIMIT_CPU].rlim_max != RLIM_INFINITY) &&
818 (((current->stime + current->utime) / HZ) >= current->rlim[RLIMIT_CPU].rlim_max))
819 send_sig(SIGKILL, current, 1);
820 if ((current->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) &&
821 (((current->stime + current->utime) % HZ) == 0)) {
822 psecs = (current->stime + current->utime) / HZ;
823
824 if (psecs == current->rlim[RLIMIT_CPU].rlim_cur)
825 send_sig(SIGXCPU, current, 1);
826
827 else if ((psecs > current->rlim[RLIMIT_CPU].rlim_cur) &&
828 ((psecs - current->rlim[RLIMIT_CPU].rlim_cur) % 5) == 0)
829 send_sig(SIGXCPU, current, 1);
830 }
831
832 if (current->pid && 0 > --current->counter) {
833 current->counter = 0;
834 need_resched = 1;
835 }
836
837 if (current->it_prof_value && !(--current->it_prof_value)) {
838 current->it_prof_value = current->it_prof_incr;
839 send_sig(SIGPROF,current,1);
840 }
841 for (mask = 1, tp = timer_table+0 ; mask ; tp++,mask += mask) {
842 if (mask > timer_active)
843 break;
844 if (!(mask & timer_active))
845 continue;
846 if (tp->expires > jiffies)
847 continue;
848 mark_bh(TIMER_BH);
849 }
850 cli();
851 if (timer_head.next->expires <= jiffies)
852 mark_bh(TIMER_BH);
853 if (tq_timer != &tq_last)
854 mark_bh(TQUEUE_BH);
855 sti();
856 }
857
858 asmlinkage unsigned int sys_alarm(unsigned int seconds)
859 {
860 struct itimerval it_new, it_old;
861 unsigned int oldalarm;
862
863 it_new.it_interval.tv_sec = it_new.it_interval.tv_usec = 0;
864 it_new.it_value.tv_sec = seconds;
865 it_new.it_value.tv_usec = 0;
866 _setitimer(ITIMER_REAL, &it_new, &it_old);
867 oldalarm = it_old.it_value.tv_sec;
868
869
870 if (it_old.it_value.tv_usec)
871 oldalarm++;
872 return oldalarm;
873 }
874
875 asmlinkage int sys_getpid(void)
876 {
877 return current->pid;
878 }
879
880 asmlinkage int sys_getppid(void)
881 {
882 return current->p_opptr->pid;
883 }
884
885 asmlinkage int sys_getuid(void)
886 {
887 return current->uid;
888 }
889
890 asmlinkage int sys_geteuid(void)
891 {
892 return current->euid;
893 }
894
895 asmlinkage int sys_getgid(void)
896 {
897 return current->gid;
898 }
899
900 asmlinkage int sys_getegid(void)
901 {
902 return current->egid;
903 }
904
905 asmlinkage int sys_nice(int increment)
906 {
907 unsigned long newprio;
908 int increase = 0;
909
910 newprio = increment;
911 if (increment < 0) {
912 if (!suser())
913 return -EPERM;
914 newprio = -increment;
915 increase = 1;
916 }
917 if (newprio > 40)
918 newprio = 40;
919
920
921
922
923
924
925
926 newprio = (newprio * DEF_PRIORITY + 10) / 20;
927 increment = newprio;
928 if (increase)
929 increment = -increment;
930 newprio = current->priority - increment;
931 if (newprio < 1)
932 newprio = 1;
933 if (newprio > DEF_PRIORITY*2)
934 newprio = DEF_PRIORITY*2;
935 current->priority = newprio;
936 return 0;
937 }
938
939 static void show_task(int nr,struct task_struct * p)
940 {
941 unsigned long free;
942 static const char * stat_nam[] = { "R", "S", "D", "Z", "T", "W" };
943
944 printk("%-8s %3d ", p->comm, (p == current) ? -nr : nr);
945 if (((unsigned) p->state) < sizeof(stat_nam)/sizeof(char *))
946 printk(stat_nam[p->state]);
947 else
948 printk(" ");
949 #if ((~0UL) == 0xffffffff)
950 if (p == current)
951 printk(" current ");
952 else
953 printk(" %08lX ", thread_saved_pc(&p->tss));
954 #else
955 if (p == current)
956 printk(" current task ");
957 else
958 printk(" %016lx ", thread_saved_pc(&p->tss));
959 #endif
960 for (free = 1; free < PAGE_SIZE/sizeof(long) ; free++) {
961 if (((unsigned long *)p->kernel_stack_page)[free])
962 break;
963 }
964 printk("%5lu %5d %6d ", free*sizeof(long), p->pid, p->p_pptr->pid);
965 if (p->p_cptr)
966 printk("%5d ", p->p_cptr->pid);
967 else
968 printk(" ");
969 if (p->p_ysptr)
970 printk("%7d", p->p_ysptr->pid);
971 else
972 printk(" ");
973 if (p->p_osptr)
974 printk(" %5d\n", p->p_osptr->pid);
975 else
976 printk("\n");
977 }
978
979 void show_state(void)
980 {
981 int i;
982
983 #if ((~0UL) == 0xffffffff)
984 printk("\n"
985 " free sibling\n");
986 printk(" task PC stack pid father child younger older\n");
987 #else
988 printk("\n"
989 " free sibling\n");
990 printk(" task PC stack pid father child younger older\n");
991 #endif
992 for (i=0 ; i<NR_TASKS ; i++)
993 if (task[i])
994 show_task(i,task[i]);
995 }
996
997 void sched_init(void)
998 {
999
1000
1001
1002
1003 int cpu=smp_processor_id();
1004 current_set[cpu]=&init_task;
1005 #ifdef __SMP__
1006 init_task.processor=cpu;
1007 #endif
1008 bh_base[TIMER_BH].routine = timer_bh;
1009 bh_base[TQUEUE_BH].routine = tqueue_bh;
1010 bh_base[IMMEDIATE_BH].routine = immediate_bh;
1011 enable_bh(TIMER_BH);
1012 enable_bh(TQUEUE_BH);
1013 enable_bh(IMMEDIATE_BH);
1014 }