This source file includes following definitions.
- add_to_runqueue
- del_from_runqueue
- wake_up_process
- process_timeout
- schedule
- sys_pause
- wake_up
- wake_up_interruptible
- __down
- __sleep_on
- interruptible_sleep_on
- sleep_on
- add_timer
- del_timer
- count_active_tasks
- calc_load
- second_overflow
- timer_bh
- tqueue_bh
- immediate_bh
- do_timer
- sys_alarm
- sys_getpid
- sys_getppid
- sys_getuid
- sys_geteuid
- sys_getgid
- sys_getegid
- sys_nice
- show_task
- show_state
- sched_init
1
2
3
4
5
6
7
8
9
10
11
12
13
14 #include <linux/config.h>
15 #include <linux/signal.h>
16 #include <linux/sched.h>
17 #include <linux/timer.h>
18 #include <linux/kernel.h>
19 #include <linux/kernel_stat.h>
20 #include <linux/fdreg.h>
21 #include <linux/errno.h>
22 #include <linux/time.h>
23 #include <linux/ptrace.h>
24 #include <linux/delay.h>
25 #include <linux/interrupt.h>
26 #include <linux/tqueue.h>
27 #include <linux/resource.h>
28 #include <linux/mm.h>
29 #include <linux/smp.h>
30
31 #include <asm/system.h>
32 #include <asm/io.h>
33 #include <asm/segment.h>
34 #include <asm/pgtable.h>
35
36 #include <linux/timex.h>
37
38
39
40
41 long tick = 1000000 / HZ;
42 volatile struct timeval xtime;
43 int tickadj = 500/HZ;
44
45 DECLARE_TASK_QUEUE(tq_timer);
46 DECLARE_TASK_QUEUE(tq_immediate);
47 DECLARE_TASK_QUEUE(tq_scheduler);
48
49
50
51
52 int time_state = TIME_BAD;
53 int time_status = STA_UNSYNC;
54 long time_offset = 0;
55 long time_constant = 0;
56 long time_tolerance = MAXFREQ;
57 long time_precision = 1;
58 long time_maxerror = 0x70000000;
59 long time_esterror = 0x70000000;
60 long time_phase = 0;
61 long time_freq = 0;
62 long time_adj = 0;
63 long time_reftime = 0;
64
65 long time_adjust = 0;
66 long time_adjust_step = 0;
67
68 int need_resched = 0;
69 unsigned long event = 0;
70
71 extern int _setitimer(int, struct itimerval *, struct itimerval *);
72 unsigned long * prof_buffer = NULL;
73 unsigned long prof_len = 0;
74 unsigned long prof_shift = 0;
75
76 #define _S(nr) (1<<((nr)-1))
77
78 extern void mem_use(void);
79
80 static unsigned long init_kernel_stack[1024] = { STACK_MAGIC, };
81 unsigned long init_user_stack[1024] = { STACK_MAGIC, };
82 static struct vm_area_struct init_mmap = INIT_MMAP;
83 static struct fs_struct init_fs = INIT_FS;
84 static struct files_struct init_files = INIT_FILES;
85 static struct signal_struct init_signals = INIT_SIGNALS;
86
87 struct mm_struct init_mm = INIT_MM;
88 struct task_struct init_task = INIT_TASK;
89
90 unsigned long volatile jiffies=0;
91
92 struct task_struct *current_set[NR_CPUS];
93 struct task_struct *last_task_used_math = NULL;
94
95 struct task_struct * task[NR_TASKS] = {&init_task, };
96
97 struct kernel_stat kstat = { 0 };
98
99 static inline void add_to_runqueue(struct task_struct * p)
100 {
101 #if 1
102 if (p->next_run || p->prev_run) {
103 printk("task already on run-queue\n");
104 return;
105 }
106 #endif
107 if (p->counter > current->counter + 3)
108 need_resched = 1;
109 nr_running++;
110 (p->next_run = init_task.next_run)->prev_run = p;
111 p->prev_run = &init_task;
112 init_task.next_run = p;
113 }
114
115 static inline void del_from_runqueue(struct task_struct * p)
116 {
117 struct task_struct *next = p->next_run;
118 struct task_struct *prev = p->prev_run;
119
120 #if 1
121 if (!next || !prev) {
122 printk("task not on run-queue\n");
123 return;
124 }
125 #endif
126 if (p == &init_task) {
127 static int nr = 0;
128 if (nr < 5) {
129 nr++;
130 printk("idle task may not sleep\n");
131 }
132 return;
133 }
134 nr_running--;
135 next->prev_run = prev;
136 prev->next_run = next;
137 p->next_run = NULL;
138 p->prev_run = NULL;
139 }
140
141
142
143
144
145
146
147
148
149 inline void wake_up_process(struct task_struct * p)
150 {
151 unsigned long flags;
152
153 save_flags(flags);
154 cli();
155 p->state = TASK_RUNNING;
156 if (!p->next_run)
157 add_to_runqueue(p);
158 restore_flags(flags);
159 }
160
161 static void process_timeout(unsigned long __data)
162 {
163 struct task_struct * p = (struct task_struct *) __data;
164
165 p->timeout = 0;
166 wake_up_process(p);
167 }
168
169
170
171
172
173
174
175
176
177
178
179 asmlinkage void schedule(void)
180 {
181 int c;
182 struct task_struct * p;
183 struct task_struct * next;
184 unsigned long timeout = 0;
185
186 #ifdef CONFIG_SMP_DEBUG
187 int proc=smp_processor_id();
188 if(active_kernel_processor!=proc)
189 panic("active kernel processor set wrongly! %d not %d\n", active_kernel_processor,proc);
190 #endif
191
192
193
194 if (intr_count) {
195 printk("Aiee: scheduling in interrupt\n");
196 return;
197 }
198 run_task_queue(&tq_scheduler);
199
200 need_resched = 0;
201 cli();
202 switch (current->state) {
203 case TASK_INTERRUPTIBLE:
204 if (current->signal & ~current->blocked)
205 goto makerunnable;
206 timeout = current->timeout;
207 if (timeout && (timeout <= jiffies)) {
208 current->timeout = 0;
209 timeout = 0;
210 makerunnable:
211 current->state = TASK_RUNNING;
212 break;
213 }
214 default:
215 del_from_runqueue(current);
216 case TASK_RUNNING:
217 }
218 p = init_task.next_run;
219 sti();
220
221 #ifdef CONFIG_SMP
222
223
224
225 current->processor = NO_PROC_ID;
226 #endif
227
228
229
230
231
232
233
234 c = -1000;
235 next = &init_task;
236 while (p != &init_task) {
237 #ifdef CONFIG_SMP
238
239 if (p->processor != NO_PROC_ID) {
240 p = p->next_run;
241 continue;
242 }
243 #endif
244 if (p->counter > c)
245 c = p->counter, next = p;
246 p = p->next_run;
247 }
248
249
250 if (!c) {
251 for_each_task(p)
252 p->counter = (p->counter >> 1) + p->priority;
253 }
254 #ifdef CONFIG_SMP
255
256
257
258
259 if(!current->pid && !next->pid)
260 next=current;
261
262
263
264
265 next->processor = smp_processor_id();
266
267 #endif
268 if (current != next) {
269 struct timer_list timer;
270
271 kstat.context_swtch++;
272 if (timeout) {
273 init_timer(&timer);
274 timer.expires = timeout;
275 timer.data = (unsigned long) current;
276 timer.function = process_timeout;
277 add_timer(&timer);
278 }
279 switch_to(next);
280 if (timeout)
281 del_timer(&timer);
282 }
283 }
284
285 asmlinkage int sys_pause(void)
286 {
287 current->state = TASK_INTERRUPTIBLE;
288 schedule();
289 return -ERESTARTNOHAND;
290 }
291
292
293
294
295
296
297
298
299
300 void wake_up(struct wait_queue **q)
301 {
302 struct wait_queue *tmp;
303 struct task_struct * p;
304
305 if (!q || !(tmp = *q))
306 return;
307 do {
308 if ((p = tmp->task) != NULL) {
309 if ((p->state == TASK_UNINTERRUPTIBLE) ||
310 (p->state == TASK_INTERRUPTIBLE))
311 wake_up_process(p);
312 }
313 if (!tmp->next) {
314 printk("wait_queue is bad (eip = %p)\n",
315 __builtin_return_address(0));
316 printk(" q = %p\n",q);
317 printk(" *q = %p\n",*q);
318 printk(" tmp = %p\n",tmp);
319 break;
320 }
321 tmp = tmp->next;
322 } while (tmp != *q);
323 }
324
325 void wake_up_interruptible(struct wait_queue **q)
326 {
327 struct wait_queue *tmp;
328 struct task_struct * p;
329
330 if (!q || !(tmp = *q))
331 return;
332 do {
333 if ((p = tmp->task) != NULL) {
334 if (p->state == TASK_INTERRUPTIBLE)
335 wake_up_process(p);
336 }
337 if (!tmp->next) {
338 printk("wait_queue is bad (eip = %p)\n",
339 __builtin_return_address(0));
340 printk(" q = %p\n",q);
341 printk(" *q = %p\n",*q);
342 printk(" tmp = %p\n",tmp);
343 break;
344 }
345 tmp = tmp->next;
346 } while (tmp != *q);
347 }
348
349 void __down(struct semaphore * sem)
350 {
351 struct wait_queue wait = { current, NULL };
352 add_wait_queue(&sem->wait, &wait);
353 current->state = TASK_UNINTERRUPTIBLE;
354 while (sem->count <= 0) {
355 schedule();
356 current->state = TASK_UNINTERRUPTIBLE;
357 }
358 current->state = TASK_RUNNING;
359 remove_wait_queue(&sem->wait, &wait);
360 }
361
362 static inline void __sleep_on(struct wait_queue **p, int state)
363 {
364 unsigned long flags;
365 struct wait_queue wait = { current, NULL };
366
367 if (!p)
368 return;
369 if (current == task[0])
370 panic("task[0] trying to sleep");
371 current->state = state;
372 add_wait_queue(p, &wait);
373 save_flags(flags);
374 sti();
375 schedule();
376 remove_wait_queue(p, &wait);
377 restore_flags(flags);
378 }
379
380 void interruptible_sleep_on(struct wait_queue **p)
381 {
382 __sleep_on(p,TASK_INTERRUPTIBLE);
383 }
384
385 void sleep_on(struct wait_queue **p)
386 {
387 __sleep_on(p,TASK_UNINTERRUPTIBLE);
388 }
389
390
391
392
393
394 static struct timer_list timer_head = { &timer_head, &timer_head, ~0, 0, NULL };
395 #define SLOW_BUT_DEBUGGING_TIMERS 1
396
397 void add_timer(struct timer_list * timer)
398 {
399 unsigned long flags;
400 struct timer_list *p;
401
402 #if SLOW_BUT_DEBUGGING_TIMERS
403 if (timer->next || timer->prev) {
404 printk("add_timer() called with non-zero list from %p\n",
405 __builtin_return_address(0));
406 return;
407 }
408 #endif
409 p = &timer_head;
410 save_flags(flags);
411 cli();
412 do {
413 p = p->next;
414 } while (timer->expires > p->expires);
415 timer->next = p;
416 timer->prev = p->prev;
417 p->prev = timer;
418 timer->prev->next = timer;
419 restore_flags(flags);
420 }
421
422 int del_timer(struct timer_list * timer)
423 {
424 unsigned long flags;
425 #if SLOW_BUT_DEBUGGING_TIMERS
426 struct timer_list * p;
427
428 p = &timer_head;
429 save_flags(flags);
430 cli();
431 while ((p = p->next) != &timer_head) {
432 if (p == timer) {
433 timer->next->prev = timer->prev;
434 timer->prev->next = timer->next;
435 timer->next = timer->prev = NULL;
436 restore_flags(flags);
437 return 1;
438 }
439 }
440 if (timer->next || timer->prev)
441 printk("del_timer() called from %p with timer not initialized\n",
442 __builtin_return_address(0));
443 restore_flags(flags);
444 return 0;
445 #else
446 save_flags(flags);
447 cli();
448 if (timer->next) {
449 timer->next->prev = timer->prev;
450 timer->prev->next = timer->next;
451 timer->next = timer->prev = NULL;
452 restore_flags(flags);
453 return 1;
454 }
455 restore_flags(flags);
456 return 0;
457 #endif
458 }
459
460 unsigned long timer_active = 0;
461 struct timer_struct timer_table[32];
462
463
464
465
466
467
468
469 unsigned long avenrun[3] = { 0,0,0 };
470
471
472
473
474 static unsigned long count_active_tasks(void)
475 {
476 struct task_struct **p;
477 unsigned long nr = 0;
478
479 for(p = &LAST_TASK; p > &FIRST_TASK; --p)
480 if (*p && ((*p)->state == TASK_RUNNING ||
481 (*p)->state == TASK_UNINTERRUPTIBLE ||
482 (*p)->state == TASK_SWAPPING))
483 nr += FIXED_1;
484 #ifdef CONFIG_SMP
485 nr-=(smp_num_cpus-1)*FIXED_1;
486 #endif
487 return nr;
488 }
489
490 static inline void calc_load(void)
491 {
492 unsigned long active_tasks;
493 static int count = LOAD_FREQ;
494
495 if (count-- > 0)
496 return;
497 count = LOAD_FREQ;
498 active_tasks = count_active_tasks();
499 CALC_LOAD(avenrun[0], EXP_1, active_tasks);
500 CALC_LOAD(avenrun[1], EXP_5, active_tasks);
501 CALC_LOAD(avenrun[2], EXP_15, active_tasks);
502 }
503
504
505
506
507
508
509
510
511
512
513 static void second_overflow(void)
514 {
515 long ltemp;
516
517
518 time_maxerror = (0x70000000-time_maxerror <
519 time_tolerance >> SHIFT_USEC) ?
520 0x70000000 : (time_maxerror + (time_tolerance >> SHIFT_USEC));
521
522
523
524
525
526
527
528
529
530
531 switch (time_state) {
532
533 case TIME_OK:
534 if (time_status & STA_INS)
535 time_state = TIME_INS;
536 else if (time_status & STA_DEL)
537 time_state = TIME_DEL;
538 break;
539
540 case TIME_INS:
541 if (xtime.tv_sec % 86400 == 0) {
542 xtime.tv_sec--;
543 time_state = TIME_OOP;
544 printk("Clock: inserting leap second 23:59:60 UTC\n");
545 }
546 break;
547
548 case TIME_DEL:
549 if ((xtime.tv_sec + 1) % 86400 == 0) {
550 xtime.tv_sec++;
551 time_state = TIME_WAIT;
552 printk("Clock: deleting leap second 23:59:59 UTC\n");
553 }
554 break;
555
556 case TIME_OOP:
557 time_state = TIME_WAIT;
558 break;
559
560 case TIME_WAIT:
561 if (!(time_status & (STA_INS | STA_DEL)))
562 time_state = TIME_OK;
563 }
564
565
566
567
568
569
570
571
572
573
574 if (time_offset < 0) {
575 ltemp = -time_offset;
576 if (!(time_status & STA_FLL))
577 ltemp >>= SHIFT_KG + time_constant;
578 if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE)
579 ltemp = (MAXPHASE / MINSEC) <<
580 SHIFT_UPDATE;
581 time_offset += ltemp;
582 time_adj = -ltemp << (SHIFT_SCALE - SHIFT_HZ -
583 SHIFT_UPDATE);
584 } else {
585 ltemp = time_offset;
586 if (!(time_status & STA_FLL))
587 ltemp >>= SHIFT_KG + time_constant;
588 if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE)
589 ltemp = (MAXPHASE / MINSEC) <<
590 SHIFT_UPDATE;
591 time_offset -= ltemp;
592 time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ -
593 SHIFT_UPDATE);
594 }
595
596
597
598
599
600
601
602
603 pps_valid++;
604 if (pps_valid == PPS_VALID) {
605 pps_jitter = MAXTIME;
606 pps_stabil = MAXFREQ;
607 time_status &= ~(STA_PPSSIGNAL | STA_PPSJITTER |
608 STA_PPSWANDER | STA_PPSERROR);
609 }
610 ltemp = time_freq + pps_freq;
611 if (ltemp < 0)
612 time_adj -= -ltemp >>
613 (SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE);
614 else
615 time_adj += ltemp >>
616 (SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE);
617
618
619 if (time_adj < 0)
620 time_adj -= -time_adj >> 2;
621 else
622 time_adj += time_adj >> 2;
623 }
624
625
626
627
628 static void timer_bh(void * unused)
629 {
630 unsigned long mask;
631 struct timer_struct *tp;
632 struct timer_list * timer;
633
634 cli();
635 while ((timer = timer_head.next) != &timer_head && timer->expires <= jiffies) {
636 void (*fn)(unsigned long) = timer->function;
637 unsigned long data = timer->data;
638 timer->next->prev = timer->prev;
639 timer->prev->next = timer->next;
640 timer->next = timer->prev = NULL;
641 sti();
642 fn(data);
643 cli();
644 }
645 sti();
646
647 for (mask = 1, tp = timer_table+0 ; mask ; tp++,mask += mask) {
648 if (mask > timer_active)
649 break;
650 if (!(mask & timer_active))
651 continue;
652 if (tp->expires > jiffies)
653 continue;
654 timer_active &= ~mask;
655 tp->fn();
656 sti();
657 }
658 }
659
660 void tqueue_bh(void * unused)
661 {
662 run_task_queue(&tq_timer);
663 }
664
665 void immediate_bh(void * unused)
666 {
667 run_task_queue(&tq_immediate);
668 }
669
670 void do_timer(struct pt_regs * regs)
671 {
672 unsigned long mask;
673 struct timer_struct *tp;
674 long ltemp, psecs;
675
676
677
678
679 time_phase += time_adj;
680 if (time_phase <= -FINEUSEC) {
681 ltemp = -time_phase >> SHIFT_SCALE;
682 time_phase += ltemp << SHIFT_SCALE;
683 xtime.tv_usec += tick + time_adjust_step - ltemp;
684 }
685 else if (time_phase >= FINEUSEC) {
686 ltemp = time_phase >> SHIFT_SCALE;
687 time_phase -= ltemp << SHIFT_SCALE;
688 xtime.tv_usec += tick + time_adjust_step + ltemp;
689 } else
690 xtime.tv_usec += tick + time_adjust_step;
691
692 if (time_adjust) {
693
694
695
696
697
698
699
700
701
702 if (time_adjust > tickadj)
703 time_adjust_step = tickadj;
704 else if (time_adjust < -tickadj)
705 time_adjust_step = -tickadj;
706 else
707 time_adjust_step = time_adjust;
708
709
710 time_adjust -= time_adjust_step;
711 }
712 else
713 time_adjust_step = 0;
714
715 if (xtime.tv_usec >= 1000000) {
716 xtime.tv_usec -= 1000000;
717 xtime.tv_sec++;
718 second_overflow();
719 }
720
721 jiffies++;
722 calc_load();
723 if (user_mode(regs)) {
724 current->utime++;
725 if (current->pid) {
726 if (current->priority < 15)
727 kstat.cpu_nice++;
728 else
729 kstat.cpu_user++;
730 }
731
732 if (current->it_virt_value && !(--current->it_virt_value)) {
733 current->it_virt_value = current->it_virt_incr;
734 send_sig(SIGVTALRM,current,1);
735 }
736 } else {
737 current->stime++;
738 if(current->pid)
739 kstat.cpu_system++;
740 if (prof_buffer && current->pid) {
741 extern int _stext;
742 unsigned long ip = instruction_pointer(regs);
743 ip -= (unsigned long) &_stext;
744 ip >>= prof_shift;
745 if (ip < prof_len)
746 prof_buffer[ip]++;
747 }
748 }
749
750
751
752 if ((current->rlim[RLIMIT_CPU].rlim_max != RLIM_INFINITY) &&
753 (((current->stime + current->utime) / HZ) >= current->rlim[RLIMIT_CPU].rlim_max))
754 send_sig(SIGKILL, current, 1);
755 if ((current->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) &&
756 (((current->stime + current->utime) % HZ) == 0)) {
757 psecs = (current->stime + current->utime) / HZ;
758
759 if (psecs == current->rlim[RLIMIT_CPU].rlim_cur)
760 send_sig(SIGXCPU, current, 1);
761
762 else if ((psecs > current->rlim[RLIMIT_CPU].rlim_cur) &&
763 ((psecs - current->rlim[RLIMIT_CPU].rlim_cur) % 5) == 0)
764 send_sig(SIGXCPU, current, 1);
765 }
766
767 if (current->pid && 0 > --current->counter) {
768 current->counter = 0;
769 need_resched = 1;
770 }
771
772 if (current->it_prof_value && !(--current->it_prof_value)) {
773 current->it_prof_value = current->it_prof_incr;
774 send_sig(SIGPROF,current,1);
775 }
776 for (mask = 1, tp = timer_table+0 ; mask ; tp++,mask += mask) {
777 if (mask > timer_active)
778 break;
779 if (!(mask & timer_active))
780 continue;
781 if (tp->expires > jiffies)
782 continue;
783 mark_bh(TIMER_BH);
784 }
785 cli();
786 if (timer_head.next->expires <= jiffies)
787 mark_bh(TIMER_BH);
788 if (tq_timer != &tq_last)
789 mark_bh(TQUEUE_BH);
790 sti();
791 }
792
793 asmlinkage unsigned int sys_alarm(unsigned int seconds)
794 {
795 struct itimerval it_new, it_old;
796 unsigned int oldalarm;
797
798 it_new.it_interval.tv_sec = it_new.it_interval.tv_usec = 0;
799 it_new.it_value.tv_sec = seconds;
800 it_new.it_value.tv_usec = 0;
801 _setitimer(ITIMER_REAL, &it_new, &it_old);
802 oldalarm = it_old.it_value.tv_sec;
803
804
805 if (it_old.it_value.tv_usec)
806 oldalarm++;
807 return oldalarm;
808 }
809
810 asmlinkage int sys_getpid(void)
811 {
812 return current->pid;
813 }
814
815 asmlinkage int sys_getppid(void)
816 {
817 return current->p_opptr->pid;
818 }
819
820 asmlinkage int sys_getuid(void)
821 {
822 return current->uid;
823 }
824
825 asmlinkage int sys_geteuid(void)
826 {
827 return current->euid;
828 }
829
830 asmlinkage int sys_getgid(void)
831 {
832 return current->gid;
833 }
834
835 asmlinkage int sys_getegid(void)
836 {
837 return current->egid;
838 }
839
840 asmlinkage int sys_nice(long increment)
841 {
842 int newprio;
843
844 if (increment < 0 && !suser())
845 return -EPERM;
846 newprio = current->priority - increment;
847 if (newprio < 1)
848 newprio = 1;
849 if (newprio > 35)
850 newprio = 35;
851 current->priority = newprio;
852 return 0;
853 }
854
855 static void show_task(int nr,struct task_struct * p)
856 {
857 unsigned long free;
858 static const char * stat_nam[] = { "R", "S", "D", "Z", "T", "W" };
859
860 printk("%-8s %3d ", p->comm, (p == current) ? -nr : nr);
861 if (((unsigned) p->state) < sizeof(stat_nam)/sizeof(char *))
862 printk(stat_nam[p->state]);
863 else
864 printk(" ");
865 #if ((~0UL) == 0xffffffff)
866 if (p == current)
867 printk(" current ");
868 else
869 printk(" %08lX ", thread_saved_pc(&p->tss));
870 #else
871 if (p == current)
872 printk(" current task ");
873 else
874 printk(" %016lx ", thread_saved_pc(&p->tss));
875 #endif
876 for (free = 1; free < PAGE_SIZE/sizeof(long) ; free++) {
877 if (((unsigned long *)p->kernel_stack_page)[free])
878 break;
879 }
880 printk("%5lu %5d %6d ", free*sizeof(long), p->pid, p->p_pptr->pid);
881 if (p->p_cptr)
882 printk("%5d ", p->p_cptr->pid);
883 else
884 printk(" ");
885 if (p->p_ysptr)
886 printk("%7d", p->p_ysptr->pid);
887 else
888 printk(" ");
889 if (p->p_osptr)
890 printk(" %5d\n", p->p_osptr->pid);
891 else
892 printk("\n");
893 }
894
895 void show_state(void)
896 {
897 int i;
898
899 #if ((~0UL) == 0xffffffff)
900 printk("\n"
901 " free sibling\n");
902 printk(" task PC stack pid father child younger older\n");
903 #else
904 printk("\n"
905 " free sibling\n");
906 printk(" task PC stack pid father child younger older\n");
907 #endif
908 for (i=0 ; i<NR_TASKS ; i++)
909 if (task[i])
910 show_task(i,task[i]);
911 }
912
913 void sched_init(void)
914 {
915
916
917
918
919 int cpu=smp_processor_id();
920 current_set[cpu]=&init_task;
921 #ifdef CONFIG_SMP
922 init_task.processor=cpu;
923 #endif
924 bh_base[TIMER_BH].routine = timer_bh;
925 bh_base[TQUEUE_BH].routine = tqueue_bh;
926 bh_base[IMMEDIATE_BH].routine = immediate_bh;
927 enable_bh(TIMER_BH);
928 enable_bh(TQUEUE_BH);
929 enable_bh(IMMEDIATE_BH);
930 }