This source file includes following definitions.
- add_to_runqueue
- del_from_runqueue
- move_last_runqueue
- wake_up_process
- process_timeout
- goodness
- schedule
- sys_pause
- wake_up
- wake_up_interruptible
- __down
- __sleep_on
- interruptible_sleep_on
- sleep_on
- add_timer
- del_timer
- run_timer_list
- run_old_timers
- tqueue_bh
- immediate_bh
- count_active_tasks
- calc_load
- second_overflow
- update_wall_time_one_tick
- update_wall_time
- do_process_times
- do_it_virt
- do_it_prof
- update_one_process
- update_process_times
- timer_bh
- do_timer
- sys_alarm
- sys_getpid
- sys_getppid
- sys_getuid
- sys_geteuid
- sys_getgid
- sys_getegid
- sys_nice
- find_process_by_pid
- setscheduler
- sys_sched_setscheduler
- sys_sched_setparam
- sys_sched_getscheduler
- sys_sched_getparam
- sys_sched_yield
- sys_sched_get_priority_max
- sys_sched_get_priority_min
- sys_sched_rr_get_interval
- timespectojiffies
- jiffiestotimespec
- sys_nanosleep
- show_task
- show_state
- sched_init
1
2
3
4
5
6
7
8
9
10
11
12
13
14 #include <linux/signal.h>
15 #include <linux/sched.h>
16 #include <linux/timer.h>
17 #include <linux/kernel.h>
18 #include <linux/kernel_stat.h>
19 #include <linux/fdreg.h>
20 #include <linux/errno.h>
21 #include <linux/time.h>
22 #include <linux/ptrace.h>
23 #include <linux/delay.h>
24 #include <linux/interrupt.h>
25 #include <linux/tqueue.h>
26 #include <linux/resource.h>
27 #include <linux/mm.h>
28 #include <linux/smp.h>
29
30 #include <asm/system.h>
31 #include <asm/io.h>
32 #include <asm/segment.h>
33 #include <asm/pgtable.h>
34 #include <asm/mmu_context.h>
35
36 #include <linux/timex.h>
37
38
39
40
41
42 int securelevel = 0;
43
44 long tick = 1000000 / HZ;
45 volatile struct timeval xtime;
46 int tickadj = 500/HZ;
47
48 DECLARE_TASK_QUEUE(tq_timer);
49 DECLARE_TASK_QUEUE(tq_immediate);
50 DECLARE_TASK_QUEUE(tq_scheduler);
51
52
53
54
55 int time_state = TIME_BAD;
56 int time_status = STA_UNSYNC | STA_PLL;
57 long time_offset = 0;
58 long time_constant = 2;
59 long time_tolerance = MAXFREQ;
60 long time_precision = 1;
61 long time_maxerror = 0x70000000;
62 long time_esterror = 0x70000000;
63 long time_phase = 0;
64 long time_freq = 0;
65 long time_adj = 0;
66 long time_reftime = 0;
67
68 long time_adjust = 0;
69 long time_adjust_step = 0;
70
71 int need_resched = 0;
72 unsigned long event = 0;
73
74 extern int _setitimer(int, struct itimerval *, struct itimerval *);
75 unsigned int * prof_buffer = NULL;
76 unsigned long prof_len = 0;
77 unsigned long prof_shift = 0;
78
79 #define _S(nr) (1<<((nr)-1))
80
81 extern void mem_use(void);
82
83 static unsigned long init_kernel_stack[1024] = { STACK_MAGIC, };
84 unsigned long init_user_stack[1024] = { STACK_MAGIC, };
85 static struct vm_area_struct init_mmap = INIT_MMAP;
86 static struct fs_struct init_fs = INIT_FS;
87 static struct files_struct init_files = INIT_FILES;
88 static struct signal_struct init_signals = INIT_SIGNALS;
89
90 struct mm_struct init_mm = INIT_MM;
91 struct task_struct init_task = INIT_TASK;
92
93 unsigned long volatile jiffies=0;
94
95 struct task_struct *current_set[NR_CPUS];
96 struct task_struct *last_task_used_math = NULL;
97
98 struct task_struct * task[NR_TASKS] = {&init_task, };
99
100 struct kernel_stat kstat = { 0 };
101
102 static inline void add_to_runqueue(struct task_struct * p)
103 {
104 #ifdef __SMP__
105 int cpu=smp_processor_id();
106 #endif
107 #if 1
108 if (p->next_run || p->prev_run) {
109 printk("task already on run-queue\n");
110 return;
111 }
112 #endif
113 if (p->counter > current->counter + 3)
114 need_resched = 1;
115 nr_running++;
116 (p->prev_run = init_task.prev_run)->next_run = p;
117 p->next_run = &init_task;
118 init_task.prev_run = p;
119 #ifdef __SMP__
120
121 while(set_bit(31,&smp_process_available));
122 #if 0
123 {
124 while(test_bit(31,&smp_process_available))
125 {
126 if(clear_bit(cpu,&smp_invalidate_needed))
127 {
128 local_flush_tlb();
129 set_bit(cpu,&cpu_callin_map[0]);
130 }
131 }
132 }
133 #endif
134 smp_process_available++;
135 clear_bit(31,&smp_process_available);
136 if ((0!=p->pid) && smp_threads_ready)
137 {
138 int i;
139 for (i=0;i<smp_num_cpus;i++)
140 {
141 if (0==current_set[cpu_logical_map[i]]->pid)
142 {
143 smp_message_pass(cpu_logical_map[i], MSG_RESCHEDULE, 0L, 0);
144 break;
145 }
146 }
147 }
148 #endif
149 }
150
151 static inline void del_from_runqueue(struct task_struct * p)
152 {
153 struct task_struct *next = p->next_run;
154 struct task_struct *prev = p->prev_run;
155
156 #if 1
157 if (!next || !prev) {
158 printk("task not on run-queue\n");
159 return;
160 }
161 #endif
162 if (p == &init_task) {
163 static int nr = 0;
164 if (nr < 5) {
165 nr++;
166 printk("idle task may not sleep\n");
167 }
168 return;
169 }
170 nr_running--;
171 next->prev_run = prev;
172 prev->next_run = next;
173 p->next_run = NULL;
174 p->prev_run = NULL;
175 }
176
177 static inline void move_last_runqueue(struct task_struct * p)
178 {
179 struct task_struct *next = p->next_run;
180 struct task_struct *prev = p->prev_run;
181
182
183 next->prev_run = prev;
184 prev->next_run = next;
185
186 p->next_run = &init_task;
187 prev = init_task.prev_run;
188 init_task.prev_run = p;
189 p->prev_run = prev;
190 prev->next_run = p;
191 }
192
193
194
195
196
197
198
199
200
201 inline void wake_up_process(struct task_struct * p)
202 {
203 unsigned long flags;
204
205 save_flags(flags);
206 cli();
207 p->state = TASK_RUNNING;
208 if (!p->next_run)
209 add_to_runqueue(p);
210 restore_flags(flags);
211 }
212
213 static void process_timeout(unsigned long __data)
214 {
215 struct task_struct * p = (struct task_struct *) __data;
216
217 p->timeout = 0;
218 wake_up_process(p);
219 }
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234 static inline int goodness(struct task_struct * p, struct task_struct * prev, int this_cpu)
235 {
236 int weight;
237
238 #ifdef __SMP__
239
240 if (p->processor != NO_PROC_ID)
241 return -1000;
242 #endif
243
244
245
246
247
248
249 if (p->policy != SCHED_OTHER)
250 return 1000 + p->rt_priority;
251
252
253
254
255
256
257
258
259 weight = p->counter;
260 if (weight) {
261
262 #ifdef __SMP__
263
264
265 if (p->last_processor == this_cpu)
266 weight += PROC_CHANGE_PENALTY;
267 #endif
268
269
270 if (p == prev)
271 weight += 1;
272 }
273
274 return weight;
275 }
276
277
278
279
280
281
282
283
284
285
286
287 asmlinkage void schedule(void)
288 {
289 int c;
290 struct task_struct * p;
291 struct task_struct * prev, * next;
292 unsigned long timeout = 0;
293 int this_cpu=smp_processor_id();
294
295
296
297 if (intr_count)
298 goto scheduling_in_interrupt;
299
300 if (bh_active & bh_mask) {
301 intr_count = 1;
302 do_bottom_half();
303 intr_count = 0;
304 }
305
306 run_task_queue(&tq_scheduler);
307
308 need_resched = 0;
309 prev = current;
310 cli();
311
312 if (!prev->counter && prev->policy == SCHED_RR) {
313 prev->counter = prev->priority;
314 move_last_runqueue(prev);
315 }
316 switch (prev->state) {
317 case TASK_INTERRUPTIBLE:
318 if (prev->signal & ~prev->blocked)
319 goto makerunnable;
320 timeout = prev->timeout;
321 if (timeout && (timeout <= jiffies)) {
322 prev->timeout = 0;
323 timeout = 0;
324 makerunnable:
325 prev->state = TASK_RUNNING;
326 break;
327 }
328 default:
329 del_from_runqueue(prev);
330 case TASK_RUNNING:
331 }
332 p = init_task.next_run;
333 sti();
334
335 #ifdef __SMP__
336
337
338
339 prev->processor = NO_PROC_ID;
340 #endif
341
342
343
344
345
346
347
348 c = -1000;
349 next = &init_task;
350 while (p != &init_task) {
351 int weight = goodness(p, prev, this_cpu);
352 if (weight > c)
353 c = weight, next = p;
354 p = p->next_run;
355 }
356
357
358 if (!c) {
359 for_each_task(p)
360 p->counter = (p->counter >> 1) + p->priority;
361 }
362 #ifdef __SMP__
363
364
365
366
367 if(!prev->pid && !next->pid)
368 next=prev;
369
370
371
372
373 next->processor = this_cpu;
374 next->last_processor = this_cpu;
375
376 #endif
377 #ifdef __SMP_PROF__
378
379 if (0==next->pid)
380 set_bit(this_cpu,&smp_idle_map);
381 else
382 clear_bit(this_cpu,&smp_idle_map);
383 #endif
384 if (prev != next) {
385 struct timer_list timer;
386
387 kstat.context_swtch++;
388 if (timeout) {
389 init_timer(&timer);
390 timer.expires = timeout;
391 timer.data = (unsigned long) prev;
392 timer.function = process_timeout;
393 add_timer(&timer);
394 }
395 get_mmu_context(next);
396 switch_to(prev,next);
397 if (timeout)
398 del_timer(&timer);
399 }
400 return;
401
402 scheduling_in_interrupt:
403 printk("Aiee: scheduling in interrupt\n");
404 }
405
406 #ifndef __alpha__
407
408
409
410
411
412 asmlinkage int sys_pause(void)
413 {
414 current->state = TASK_INTERRUPTIBLE;
415 schedule();
416 return -ERESTARTNOHAND;
417 }
418
419 #endif
420
421
422
423
424
425
426
427
428
429 void wake_up(struct wait_queue **q)
430 {
431 struct wait_queue *tmp;
432 struct task_struct * p;
433
434 if (!q || !(tmp = *q))
435 return;
436 do {
437 if ((p = tmp->task) != NULL) {
438 if ((p->state == TASK_UNINTERRUPTIBLE) ||
439 (p->state == TASK_INTERRUPTIBLE))
440 wake_up_process(p);
441 }
442 if (!tmp->next) {
443 printk("wait_queue is bad (eip = %p)\n",
444 __builtin_return_address(0));
445 printk(" q = %p\n",q);
446 printk(" *q = %p\n",*q);
447 printk(" tmp = %p\n",tmp);
448 break;
449 }
450 tmp = tmp->next;
451 } while (tmp != *q);
452 }
453
454 void wake_up_interruptible(struct wait_queue **q)
455 {
456 struct wait_queue *tmp;
457 struct task_struct * p;
458
459 if (!q || !(tmp = *q))
460 return;
461 do {
462 if ((p = tmp->task) != NULL) {
463 if (p->state == TASK_INTERRUPTIBLE)
464 wake_up_process(p);
465 }
466 if (!tmp->next) {
467 printk("wait_queue is bad (eip = %p)\n",
468 __builtin_return_address(0));
469 printk(" q = %p\n",q);
470 printk(" *q = %p\n",*q);
471 printk(" tmp = %p\n",tmp);
472 break;
473 }
474 tmp = tmp->next;
475 } while (tmp != *q);
476 }
477
478 void __down(struct semaphore * sem)
479 {
480 struct wait_queue wait = { current, NULL };
481 add_wait_queue(&sem->wait, &wait);
482 current->state = TASK_UNINTERRUPTIBLE;
483 while (sem->count <= 0) {
484 schedule();
485 current->state = TASK_UNINTERRUPTIBLE;
486 }
487 current->state = TASK_RUNNING;
488 remove_wait_queue(&sem->wait, &wait);
489 }
490
491 static inline void __sleep_on(struct wait_queue **p, int state)
492 {
493 unsigned long flags;
494 struct wait_queue wait = { current, NULL };
495
496 if (!p)
497 return;
498 if (current == task[0])
499 panic("task[0] trying to sleep");
500 current->state = state;
501 add_wait_queue(p, &wait);
502 save_flags(flags);
503 sti();
504 schedule();
505 remove_wait_queue(p, &wait);
506 restore_flags(flags);
507 }
508
509 void interruptible_sleep_on(struct wait_queue **p)
510 {
511 __sleep_on(p,TASK_INTERRUPTIBLE);
512 }
513
514 void sleep_on(struct wait_queue **p)
515 {
516 __sleep_on(p,TASK_UNINTERRUPTIBLE);
517 }
518
519
520
521
522
523 static struct timer_list timer_head = { &timer_head, &timer_head, ~0, 0, NULL };
524 #define SLOW_BUT_DEBUGGING_TIMERS 0
525
526 void add_timer(struct timer_list * timer)
527 {
528 unsigned long flags;
529 struct timer_list *p;
530
531 #if SLOW_BUT_DEBUGGING_TIMERS
532 if (timer->next || timer->prev) {
533 printk("add_timer() called with non-zero list from %p\n",
534 __builtin_return_address(0));
535 return;
536 }
537 #endif
538 p = &timer_head;
539 save_flags(flags);
540 cli();
541 do {
542 p = p->next;
543 } while (timer->expires > p->expires);
544 timer->next = p;
545 timer->prev = p->prev;
546 p->prev = timer;
547 timer->prev->next = timer;
548 restore_flags(flags);
549 }
550
551 int del_timer(struct timer_list * timer)
552 {
553 unsigned long flags;
554 #if SLOW_BUT_DEBUGGING_TIMERS
555 struct timer_list * p;
556
557 p = &timer_head;
558 save_flags(flags);
559 cli();
560 while ((p = p->next) != &timer_head) {
561 if (p == timer) {
562 timer->next->prev = timer->prev;
563 timer->prev->next = timer->next;
564 timer->next = timer->prev = NULL;
565 restore_flags(flags);
566 return 1;
567 }
568 }
569 if (timer->next || timer->prev)
570 printk("del_timer() called from %p with timer not initialized\n",
571 __builtin_return_address(0));
572 restore_flags(flags);
573 return 0;
574 #else
575 struct timer_list * next;
576 int ret = 0;
577 save_flags(flags);
578 cli();
579 if ((next = timer->next) != NULL) {
580 (next->prev = timer->prev)->next = next;
581 timer->next = timer->prev = NULL;
582 ret = 1;
583 }
584 restore_flags(flags);
585 return ret;
586 #endif
587 }
588
589 static inline void run_timer_list(void)
590 {
591 struct timer_list * timer;
592
593 while ((timer = timer_head.next) != &timer_head && timer->expires <= jiffies) {
594 void (*fn)(unsigned long) = timer->function;
595 unsigned long data = timer->data;
596 timer->next->prev = timer->prev;
597 timer->prev->next = timer->next;
598 timer->next = timer->prev = NULL;
599 sti();
600 fn(data);
601 cli();
602 }
603 }
604
605 static inline void run_old_timers(void)
606 {
607 struct timer_struct *tp;
608 unsigned long mask;
609
610 for (mask = 1, tp = timer_table+0 ; mask ; tp++,mask += mask) {
611 if (mask > timer_active)
612 break;
613 if (!(mask & timer_active))
614 continue;
615 if (tp->expires > jiffies)
616 continue;
617 timer_active &= ~mask;
618 tp->fn();
619 sti();
620 }
621 }
622
623 void tqueue_bh(void)
624 {
625 run_task_queue(&tq_timer);
626 }
627
628 void immediate_bh(void)
629 {
630 run_task_queue(&tq_immediate);
631 }
632
633 unsigned long timer_active = 0;
634 struct timer_struct timer_table[32];
635
636
637
638
639
640
641
642 unsigned long avenrun[3] = { 0,0,0 };
643
644
645
646
647 static unsigned long count_active_tasks(void)
648 {
649 struct task_struct **p;
650 unsigned long nr = 0;
651
652 for(p = &LAST_TASK; p > &FIRST_TASK; --p)
653 if (*p && ((*p)->state == TASK_RUNNING ||
654 (*p)->state == TASK_UNINTERRUPTIBLE ||
655 (*p)->state == TASK_SWAPPING))
656 nr += FIXED_1;
657 #ifdef __SMP__
658 nr-=(smp_num_cpus-1)*FIXED_1;
659 #endif
660 return nr;
661 }
662
663 static inline void calc_load(unsigned long ticks)
664 {
665 unsigned long active_tasks;
666 static int count = LOAD_FREQ;
667
668 count -= ticks;
669 if (count < 0) {
670 count += LOAD_FREQ;
671 active_tasks = count_active_tasks();
672 CALC_LOAD(avenrun[0], EXP_1, active_tasks);
673 CALC_LOAD(avenrun[1], EXP_5, active_tasks);
674 CALC_LOAD(avenrun[2], EXP_15, active_tasks);
675 }
676 }
677
678
679
680
681
682
683
684
685
686
687 static void second_overflow(void)
688 {
689 long ltemp;
690
691
692 time_maxerror = (0x70000000-time_maxerror <
693 time_tolerance >> SHIFT_USEC) ?
694 0x70000000 : (time_maxerror + (time_tolerance >> SHIFT_USEC));
695
696
697
698
699
700
701
702
703
704
705 switch (time_state) {
706
707 case TIME_OK:
708 if (time_status & STA_INS)
709 time_state = TIME_INS;
710 else if (time_status & STA_DEL)
711 time_state = TIME_DEL;
712 break;
713
714 case TIME_INS:
715 if (xtime.tv_sec % 86400 == 0) {
716 xtime.tv_sec--;
717 time_state = TIME_OOP;
718 printk("Clock: inserting leap second 23:59:60 UTC\n");
719 }
720 break;
721
722 case TIME_DEL:
723 if ((xtime.tv_sec + 1) % 86400 == 0) {
724 xtime.tv_sec++;
725 time_state = TIME_WAIT;
726 printk("Clock: deleting leap second 23:59:59 UTC\n");
727 }
728 break;
729
730 case TIME_OOP:
731
732 time_state = TIME_WAIT;
733 break;
734
735 case TIME_WAIT:
736 if (!(time_status & (STA_INS | STA_DEL)))
737 time_state = TIME_OK;
738 }
739
740
741
742
743
744
745
746
747
748
749 if (time_offset < 0) {
750 ltemp = -time_offset;
751 if (!(time_status & STA_FLL))
752 ltemp >>= SHIFT_KG + time_constant;
753 if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE)
754 ltemp = (MAXPHASE / MINSEC) <<
755 SHIFT_UPDATE;
756 time_offset += ltemp;
757 time_adj = -ltemp << (SHIFT_SCALE - SHIFT_HZ -
758 SHIFT_UPDATE);
759 } else {
760 ltemp = time_offset;
761 if (!(time_status & STA_FLL))
762 ltemp >>= SHIFT_KG + time_constant;
763 if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE)
764 ltemp = (MAXPHASE / MINSEC) <<
765 SHIFT_UPDATE;
766 time_offset -= ltemp;
767 time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ -
768 SHIFT_UPDATE);
769 }
770
771
772
773
774
775
776
777
778 pps_valid++;
779 if (pps_valid == PPS_VALID) {
780 pps_jitter = MAXTIME;
781 pps_stabil = MAXFREQ;
782 time_status &= ~(STA_PPSSIGNAL | STA_PPSJITTER |
783 STA_PPSWANDER | STA_PPSERROR);
784 }
785 ltemp = time_freq + pps_freq;
786 if (ltemp < 0)
787 time_adj -= -ltemp >>
788 (SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE);
789 else
790 time_adj += ltemp >>
791 (SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE);
792
793 #if HZ == 100
794
795 if (time_adj < 0)
796 time_adj -= -time_adj >> 2;
797 else
798 time_adj += time_adj >> 2;
799 #endif
800 }
801
802 static void update_wall_time_one_tick(void)
803 {
804
805
806
807
808 time_phase += time_adj;
809 if (time_phase <= -FINEUSEC) {
810 long ltemp = -time_phase >> SHIFT_SCALE;
811 time_phase += ltemp << SHIFT_SCALE;
812 xtime.tv_usec += tick + time_adjust_step - ltemp;
813 }
814 else if (time_phase >= FINEUSEC) {
815 long ltemp = time_phase >> SHIFT_SCALE;
816 time_phase -= ltemp << SHIFT_SCALE;
817 xtime.tv_usec += tick + time_adjust_step + ltemp;
818 } else
819 xtime.tv_usec += tick + time_adjust_step;
820
821 if (time_adjust) {
822
823
824
825
826
827
828
829
830
831 if (time_adjust > tickadj)
832 time_adjust_step = tickadj;
833 else if (time_adjust < -tickadj)
834 time_adjust_step = -tickadj;
835 else
836 time_adjust_step = time_adjust;
837
838
839 time_adjust -= time_adjust_step;
840 }
841 else
842 time_adjust_step = 0;
843 }
844
845
846
847
848
849
850
851
852 static void update_wall_time(unsigned long ticks)
853 {
854 do {
855 ticks--;
856 update_wall_time_one_tick();
857 } while (ticks);
858
859 if (xtime.tv_usec >= 1000000) {
860 xtime.tv_usec -= 1000000;
861 xtime.tv_sec++;
862 second_overflow();
863 }
864 }
865
866 static inline void do_process_times(struct task_struct *p,
867 unsigned long user, unsigned long system)
868 {
869 long psecs;
870
871 p->utime += user;
872 p->stime += system;
873
874 psecs = (p->stime + p->utime) / HZ;
875 if (psecs > p->rlim[RLIMIT_CPU].rlim_cur) {
876
877 if (psecs * HZ == p->stime + p->utime)
878 send_sig(SIGXCPU, p, 1);
879
880 if (psecs > p->rlim[RLIMIT_CPU].rlim_max)
881 send_sig(SIGKILL, p, 1);
882 }
883 }
884
885 static inline void do_it_virt(struct task_struct * p, unsigned long ticks)
886 {
887 unsigned long it_virt = p->it_virt_value;
888
889 if (it_virt) {
890 if (it_virt <= ticks) {
891 it_virt = ticks + p->it_virt_incr;
892 send_sig(SIGVTALRM, p, 1);
893 }
894 p->it_virt_value = it_virt - ticks;
895 }
896 }
897
898 static inline void do_it_prof(struct task_struct * p, unsigned long ticks)
899 {
900 unsigned long it_prof = p->it_prof_value;
901
902 if (it_prof) {
903 if (it_prof <= ticks) {
904 it_prof = ticks + p->it_prof_incr;
905 send_sig(SIGPROF, p, 1);
906 }
907 p->it_prof_value = it_prof - ticks;
908 }
909 }
910
911 static __inline__ void update_one_process(struct task_struct *p,
912 unsigned long ticks, unsigned long user, unsigned long system)
913 {
914 do_process_times(p, user, system);
915 do_it_virt(p, user);
916 do_it_prof(p, ticks);
917 }
918
919 static void update_process_times(unsigned long ticks, unsigned long system)
920 {
921 #ifndef __SMP__
922 struct task_struct * p = current;
923 unsigned long user = ticks - system;
924 if (p->pid) {
925 p->counter -= ticks;
926 if (p->counter < 0) {
927 p->counter = 0;
928 need_resched = 1;
929 }
930 if (p->priority < DEF_PRIORITY)
931 kstat.cpu_nice += user;
932 else
933 kstat.cpu_user += user;
934 kstat.cpu_system += system;
935 }
936 update_one_process(p, ticks, user, system);
937 #else
938 int cpu,j;
939 cpu = smp_processor_id();
940 for (j=0;j<smp_num_cpus;j++)
941 {
942 int i = cpu_logical_map[j];
943 struct task_struct *p;
944
945 #ifdef __SMP_PROF__
946 if (test_bit(i,&smp_idle_map))
947 smp_idle_count[i]++;
948 #endif
949 p = current_set[i];
950
951
952
953 if (p->pid) {
954
955 unsigned long utime = ticks;
956 unsigned long stime = 0;
957 if (cpu == i) {
958 utime = ticks-system;
959 stime = system;
960 } else if (smp_proc_in_lock[i]) {
961 utime = 0;
962 stime = ticks;
963 }
964 update_one_process(p, ticks, utime, stime);
965
966 p->counter -= ticks;
967 if (p->counter >= 0)
968 continue;
969 p->counter = 0;
970 } else {
971
972
973
974
975 if (!(0x7fffffff & smp_process_available))
976 continue;
977 }
978
979 if (i==cpu)
980 need_resched = 1;
981 else
982 smp_message_pass(i, MSG_RESCHEDULE, 0L, 0);
983 }
984 #endif
985 }
986
987 static unsigned long lost_ticks = 0;
988 static unsigned long lost_ticks_system = 0;
989
990 static void timer_bh(void)
991 {
992 unsigned long ticks, system;
993
994 run_old_timers();
995
996 cli();
997 run_timer_list();
998 ticks = lost_ticks;
999 lost_ticks = 0;
1000 system = lost_ticks_system;
1001 lost_ticks_system = 0;
1002 sti();
1003
1004 if (ticks) {
1005 calc_load(ticks);
1006 update_wall_time(ticks);
1007 update_process_times(ticks, system);
1008 }
1009 }
1010
1011
1012
1013
1014
1015
1016 #if HZ > 100
1017 #define should_run_timers(x) ((x) >= HZ/100)
1018 #else
1019 #define should_run_timers(x) (1)
1020 #endif
1021
1022 void do_timer(struct pt_regs * regs)
1023 {
1024 (*(unsigned long *)&jiffies)++;
1025 lost_ticks++;
1026 if (should_run_timers(lost_ticks))
1027 mark_bh(TIMER_BH);
1028 if (!user_mode(regs)) {
1029 lost_ticks_system++;
1030 if (prof_buffer && current->pid) {
1031 extern int _stext;
1032 unsigned long ip = instruction_pointer(regs);
1033 ip -= (unsigned long) &_stext;
1034 ip >>= prof_shift;
1035 if (ip < prof_len)
1036 prof_buffer[ip]++;
1037 }
1038 }
1039 if (tq_timer)
1040 mark_bh(TQUEUE_BH);
1041 }
1042
1043 #ifndef __alpha__
1044
1045
1046
1047
1048
1049 asmlinkage unsigned int sys_alarm(unsigned int seconds)
1050 {
1051 struct itimerval it_new, it_old;
1052 unsigned int oldalarm;
1053
1054 it_new.it_interval.tv_sec = it_new.it_interval.tv_usec = 0;
1055 it_new.it_value.tv_sec = seconds;
1056 it_new.it_value.tv_usec = 0;
1057 _setitimer(ITIMER_REAL, &it_new, &it_old);
1058 oldalarm = it_old.it_value.tv_sec;
1059
1060
1061 if (it_old.it_value.tv_usec)
1062 oldalarm++;
1063 return oldalarm;
1064 }
1065
1066
1067
1068
1069
1070 asmlinkage int sys_getpid(void)
1071 {
1072 return current->pid;
1073 }
1074
1075 asmlinkage int sys_getppid(void)
1076 {
1077 return current->p_opptr->pid;
1078 }
1079
1080 asmlinkage int sys_getuid(void)
1081 {
1082 return current->uid;
1083 }
1084
1085 asmlinkage int sys_geteuid(void)
1086 {
1087 return current->euid;
1088 }
1089
1090 asmlinkage int sys_getgid(void)
1091 {
1092 return current->gid;
1093 }
1094
1095 asmlinkage int sys_getegid(void)
1096 {
1097 return current->egid;
1098 }
1099
1100
1101
1102
1103
1104
1105 asmlinkage int sys_nice(int increment)
1106 {
1107 unsigned long newprio;
1108 int increase = 0;
1109
1110 newprio = increment;
1111 if (increment < 0) {
1112 if (!suser())
1113 return -EPERM;
1114 newprio = -increment;
1115 increase = 1;
1116 }
1117 if (newprio > 40)
1118 newprio = 40;
1119
1120
1121
1122
1123
1124
1125
1126 newprio = (newprio * DEF_PRIORITY + 10) / 20;
1127 increment = newprio;
1128 if (increase)
1129 increment = -increment;
1130 newprio = current->priority - increment;
1131 if (newprio < 1)
1132 newprio = 1;
1133 if (newprio > DEF_PRIORITY*2)
1134 newprio = DEF_PRIORITY*2;
1135 current->priority = newprio;
1136 return 0;
1137 }
1138
1139 #endif
1140
1141 static struct task_struct *find_process_by_pid(pid_t pid) {
1142 struct task_struct *p, *q;
1143
1144 if (pid == 0)
1145 p = current;
1146 else {
1147 p = 0;
1148 for_each_task(q) {
1149 if (q && q->pid == pid) {
1150 p = q;
1151 break;
1152 }
1153 }
1154 }
1155 return p;
1156 }
1157
1158 static int setscheduler(pid_t pid, int policy,
1159 struct sched_param *param)
1160 {
1161 int error;
1162 struct sched_param lp;
1163 struct task_struct *p;
1164
1165 if (!param || pid < 0)
1166 return -EINVAL;
1167
1168 error = verify_area(VERIFY_READ, param, sizeof(struct sched_param));
1169 if (error)
1170 return error;
1171 memcpy_fromfs(&lp, param, sizeof(struct sched_param));
1172
1173 p = find_process_by_pid(pid);
1174 if (!p)
1175 return -ESRCH;
1176
1177 if (policy < 0)
1178 policy = p->policy;
1179 else if (policy != SCHED_FIFO && policy != SCHED_RR &&
1180 policy != SCHED_OTHER)
1181 return -EINVAL;
1182
1183
1184
1185
1186
1187 if (lp.sched_priority < 0 || lp.sched_priority > 99)
1188 return -EINVAL;
1189 if ((policy == SCHED_OTHER) != (lp.sched_priority == 0))
1190 return -EINVAL;
1191
1192 if ((policy == SCHED_FIFO || policy == SCHED_RR) && !suser())
1193 return -EPERM;
1194 if ((current->euid != p->euid) && (current->euid != p->uid) &&
1195 !suser())
1196 return -EPERM;
1197
1198 p->policy = policy;
1199 p->rt_priority = lp.sched_priority;
1200 if (p->next_run)
1201 move_last_runqueue(p);
1202 schedule();
1203
1204 return 0;
1205 }
1206
1207 asmlinkage int sys_sched_setscheduler(pid_t pid, int policy,
1208 struct sched_param *param)
1209 {
1210 return setscheduler(pid, policy, param);
1211 }
1212
1213 asmlinkage int sys_sched_setparam(pid_t pid, struct sched_param *param)
1214 {
1215 return setscheduler(pid, -1, param);
1216 }
1217
1218 asmlinkage int sys_sched_getscheduler(pid_t pid)
1219 {
1220 struct task_struct *p;
1221
1222 if (pid < 0)
1223 return -EINVAL;
1224
1225 p = find_process_by_pid(pid);
1226 if (!p)
1227 return -ESRCH;
1228
1229 return p->policy;
1230 }
1231
1232 asmlinkage int sys_sched_getparam(pid_t pid, struct sched_param *param)
1233 {
1234 int error;
1235 struct task_struct *p;
1236 struct sched_param lp;
1237
1238 if (!param || pid < 0)
1239 return -EINVAL;
1240
1241 error = verify_area(VERIFY_WRITE, param, sizeof(struct sched_param));
1242 if (error)
1243 return error;
1244
1245 p = find_process_by_pid(pid);
1246 if (!p)
1247 return -ESRCH;
1248
1249 lp.sched_priority = p->rt_priority;
1250 memcpy_tofs(param, &lp, sizeof(struct sched_param));
1251
1252 return 0;
1253 }
1254
1255 asmlinkage int sys_sched_yield(void)
1256 {
1257 move_last_runqueue(current);
1258
1259 return 0;
1260 }
1261
1262 asmlinkage int sys_sched_get_priority_max(int policy)
1263 {
1264 switch (policy) {
1265 case SCHED_FIFO:
1266 case SCHED_RR:
1267 return 99;
1268 case SCHED_OTHER:
1269 return 0;
1270 }
1271
1272 return -EINVAL;
1273 }
1274
1275 asmlinkage int sys_sched_get_priority_min(int policy)
1276 {
1277 switch (policy) {
1278 case SCHED_FIFO:
1279 case SCHED_RR:
1280 return 1;
1281 case SCHED_OTHER:
1282 return 0;
1283 }
1284
1285 return -EINVAL;
1286 }
1287
1288 asmlinkage int sys_sched_rr_get_interval(pid_t pid, struct timespec *interval)
1289 {
1290 int error;
1291 struct timespec t;
1292
1293 error = verify_area(VERIFY_WRITE, interval, sizeof(struct timespec));
1294 if (error)
1295 return error;
1296
1297 t.tv_sec = 0;
1298 t.tv_nsec = 0;
1299 return -ENOSYS;
1300 memcpy_tofs(interval, &t, sizeof(struct timespec));
1301
1302 return 0;
1303 }
1304
1305
1306
1307
1308
1309 static unsigned long timespectojiffies(struct timespec *value)
1310 {
1311 unsigned long sec = (unsigned) value->tv_sec;
1312 long nsec = value->tv_nsec;
1313
1314 if (sec > (LONG_MAX / HZ))
1315 return LONG_MAX;
1316 nsec += 1000000000L / HZ - 1;
1317 nsec /= 1000000000L / HZ;
1318 return HZ * sec + nsec;
1319 }
1320
1321 static void jiffiestotimespec(unsigned long jiffies, struct timespec *value)
1322 {
1323 value->tv_nsec = (jiffies % HZ) * (1000000000L / HZ);
1324 value->tv_sec = jiffies / HZ;
1325 return;
1326 }
1327
1328 asmlinkage int sys_nanosleep(struct timespec *rqtp, struct timespec *rmtp)
1329 {
1330 int error;
1331 struct timespec t;
1332 unsigned long expire;
1333
1334 error = verify_area(VERIFY_READ, rqtp, sizeof(struct timespec));
1335 if (error)
1336 return error;
1337 memcpy_fromfs(&t, rqtp, sizeof(struct timespec));
1338 if (rmtp) {
1339 error = verify_area(VERIFY_WRITE, rmtp,
1340 sizeof(struct timespec));
1341 if (error)
1342 return error;
1343 }
1344
1345 if (t.tv_nsec >= 1000000000L || t.tv_nsec < 0 || t.tv_sec < 0)
1346 return -EINVAL;
1347
1348 if (t.tv_sec == 0 && t.tv_nsec <= 2000000L &&
1349 current->policy != SCHED_OTHER) {
1350
1351
1352
1353
1354 udelay((t.tv_nsec + 999) / 1000);
1355 return 0;
1356 }
1357
1358 expire = timespectojiffies(&t) + (t.tv_sec || t.tv_nsec) + jiffies;
1359 current->timeout = expire;
1360 current->state = TASK_INTERRUPTIBLE;
1361 schedule();
1362
1363 if (expire > jiffies) {
1364 if (rmtp) {
1365 jiffiestotimespec(expire - jiffies -
1366 (expire > jiffies + 1), &t);
1367 memcpy_tofs(rmtp, &t, sizeof(struct timespec));
1368 }
1369 return -EINTR;
1370 }
1371
1372 return 0;
1373 }
1374
1375 static void show_task(int nr,struct task_struct * p)
1376 {
1377 unsigned long free;
1378 static const char * stat_nam[] = { "R", "S", "D", "Z", "T", "W" };
1379
1380 printk("%-8s %3d ", p->comm, (p == current) ? -nr : nr);
1381 if (((unsigned) p->state) < sizeof(stat_nam)/sizeof(char *))
1382 printk(stat_nam[p->state]);
1383 else
1384 printk(" ");
1385 #if ((~0UL) == 0xffffffff)
1386 if (p == current)
1387 printk(" current ");
1388 else
1389 printk(" %08lX ", thread_saved_pc(&p->tss));
1390 #else
1391 if (p == current)
1392 printk(" current task ");
1393 else
1394 printk(" %016lx ", thread_saved_pc(&p->tss));
1395 #endif
1396 for (free = 1; free < PAGE_SIZE/sizeof(long) ; free++) {
1397 if (((unsigned long *)p->kernel_stack_page)[free])
1398 break;
1399 }
1400 printk("%5lu %5d %6d ", free*sizeof(long), p->pid, p->p_pptr->pid);
1401 if (p->p_cptr)
1402 printk("%5d ", p->p_cptr->pid);
1403 else
1404 printk(" ");
1405 if (p->p_ysptr)
1406 printk("%7d", p->p_ysptr->pid);
1407 else
1408 printk(" ");
1409 if (p->p_osptr)
1410 printk(" %5d\n", p->p_osptr->pid);
1411 else
1412 printk("\n");
1413 }
1414
1415 void show_state(void)
1416 {
1417 int i;
1418
1419 #if ((~0UL) == 0xffffffff)
1420 printk("\n"
1421 " free sibling\n");
1422 printk(" task PC stack pid father child younger older\n");
1423 #else
1424 printk("\n"
1425 " free sibling\n");
1426 printk(" task PC stack pid father child younger older\n");
1427 #endif
1428 for (i=0 ; i<NR_TASKS ; i++)
1429 if (task[i])
1430 show_task(i,task[i]);
1431 }
1432
1433 void sched_init(void)
1434 {
1435
1436
1437
1438
1439 int cpu=smp_processor_id();
1440 current_set[cpu]=&init_task;
1441 #ifdef __SMP__
1442 init_task.processor=cpu;
1443 #endif
1444 init_bh(TIMER_BH, timer_bh);
1445 init_bh(TQUEUE_BH, tqueue_bh);
1446 init_bh(IMMEDIATE_BH, immediate_bh);
1447 }