This source file includes following definitions.
- add_to_runqueue
- del_from_runqueue
- move_last_runqueue
- wake_up_process
- process_timeout
- goodness
- schedule
- sys_pause
- wake_up
- wake_up_interruptible
- __down
- __sleep_on
- interruptible_sleep_on
- sleep_on
- add_timer
- del_timer
- run_timer_list
- run_old_timers
- tqueue_bh
- immediate_bh
- count_active_tasks
- calc_load
- second_overflow
- update_wall_time_one_tick
- update_wall_time
- do_process_times
- do_it_virt
- do_it_prof
- update_one_process
- update_process_times
- timer_bh
- do_timer
- sys_alarm
- sys_getpid
- sys_getppid
- sys_getuid
- sys_geteuid
- sys_getgid
- sys_getegid
- sys_nice
- find_process_by_pid
- setscheduler
- sys_sched_setscheduler
- sys_sched_setparam
- sys_sched_getscheduler
- sys_sched_getparam
- sys_sched_yield
- sys_sched_get_priority_max
- sys_sched_get_priority_min
- sys_sched_rr_get_interval
- timespectojiffies
- jiffiestotimespec
- sys_nanosleep
- show_task
- show_state
- sched_init
1
2
3
4
5
6
7
8
9
10
11
12
13
14 #include <linux/signal.h>
15 #include <linux/sched.h>
16 #include <linux/timer.h>
17 #include <linux/kernel.h>
18 #include <linux/kernel_stat.h>
19 #include <linux/fdreg.h>
20 #include <linux/errno.h>
21 #include <linux/time.h>
22 #include <linux/ptrace.h>
23 #include <linux/delay.h>
24 #include <linux/interrupt.h>
25 #include <linux/tqueue.h>
26 #include <linux/resource.h>
27 #include <linux/mm.h>
28 #include <linux/smp.h>
29
30 #include <asm/system.h>
31 #include <asm/io.h>
32 #include <asm/segment.h>
33 #include <asm/pgtable.h>
34 #include <asm/mmu_context.h>
35
36 #include <linux/timex.h>
37
38
39
40
41
42 int securelevel = 0;
43
44 long tick = 1000000 / HZ;
45 volatile struct timeval xtime;
46 int tickadj = 500/HZ;
47
48 DECLARE_TASK_QUEUE(tq_timer);
49 DECLARE_TASK_QUEUE(tq_immediate);
50 DECLARE_TASK_QUEUE(tq_scheduler);
51
52
53
54
55 int time_state = TIME_BAD;
56 int time_status = STA_UNSYNC | STA_PLL;
57 long time_offset = 0;
58 long time_constant = 2;
59 long time_tolerance = MAXFREQ;
60 long time_precision = 1;
61 long time_maxerror = 0x70000000;
62 long time_esterror = 0x70000000;
63 long time_phase = 0;
64 long time_freq = 0;
65 long time_adj = 0;
66 long time_reftime = 0;
67
68 long time_adjust = 0;
69 long time_adjust_step = 0;
70
71 int need_resched = 0;
72 unsigned long event = 0;
73
74 extern int _setitimer(int, struct itimerval *, struct itimerval *);
75 unsigned int * prof_buffer = NULL;
76 unsigned long prof_len = 0;
77 unsigned long prof_shift = 0;
78
79 #define _S(nr) (1<<((nr)-1))
80
81 extern void mem_use(void);
82
83 static unsigned long init_kernel_stack[1024] = { STACK_MAGIC, };
84 unsigned long init_user_stack[1024] = { STACK_MAGIC, };
85 static struct vm_area_struct init_mmap = INIT_MMAP;
86 static struct fs_struct init_fs = INIT_FS;
87 static struct files_struct init_files = INIT_FILES;
88 static struct signal_struct init_signals = INIT_SIGNALS;
89
90 struct mm_struct init_mm = INIT_MM;
91 struct task_struct init_task = INIT_TASK;
92
93 unsigned long volatile jiffies=0;
94
95 struct task_struct *current_set[NR_CPUS];
96 struct task_struct *last_task_used_math = NULL;
97
98 struct task_struct * task[NR_TASKS] = {&init_task, };
99
100 struct kernel_stat kstat = { 0 };
101
102 static inline void add_to_runqueue(struct task_struct * p)
103 {
104 #ifdef __SMP__
105 int cpu=smp_processor_id();
106 #endif
107 #if 1
108 if (p->next_run || p->prev_run) {
109 printk("task already on run-queue\n");
110 return;
111 }
112 #endif
113 if (p->counter > current->counter + 3)
114 need_resched = 1;
115 nr_running++;
116 (p->prev_run = init_task.prev_run)->next_run = p;
117 p->next_run = &init_task;
118 init_task.prev_run = p;
119 #ifdef __SMP__
120
121 while(set_bit(31,&smp_process_available));
122 #if 0
123 {
124 while(test_bit(31,&smp_process_available))
125 {
126 if(clear_bit(cpu,&smp_invalidate_needed))
127 {
128 local_invalidate();
129 set_bit(cpu,&cpu_callin_map[0]);
130 }
131 }
132 }
133 #endif
134 smp_process_available++;
135 clear_bit(31,&smp_process_available);
136 if ((0!=p->pid) && smp_threads_ready)
137 {
138 int i;
139 for (i=0;i<=smp_top_cpu;i++)
140 {
141 if (cpu_number_map[i]==-1)
142 continue;
143 if (0==current_set[i]->pid)
144 {
145 smp_message_pass(i, MSG_RESCHEDULE, 0L, 0);
146 break;
147 }
148 }
149 }
150 #endif
151 }
152
153 static inline void del_from_runqueue(struct task_struct * p)
154 {
155 struct task_struct *next = p->next_run;
156 struct task_struct *prev = p->prev_run;
157
158 #if 1
159 if (!next || !prev) {
160 printk("task not on run-queue\n");
161 return;
162 }
163 #endif
164 if (p == &init_task) {
165 static int nr = 0;
166 if (nr < 5) {
167 nr++;
168 printk("idle task may not sleep\n");
169 }
170 return;
171 }
172 nr_running--;
173 next->prev_run = prev;
174 prev->next_run = next;
175 p->next_run = NULL;
176 p->prev_run = NULL;
177 }
178
179 static inline void move_last_runqueue(struct task_struct * p)
180 {
181 struct task_struct *next = p->next_run;
182 struct task_struct *prev = p->prev_run;
183
184 next->prev_run = prev;
185 prev->next_run = next;
186 (p->prev_run = init_task.prev_run)->next_run = p;
187 p->next_run = &init_task;
188 init_task.prev_run = p;
189 }
190
191
192
193
194
195
196
197
198
199 inline void wake_up_process(struct task_struct * p)
200 {
201 unsigned long flags;
202
203 save_flags(flags);
204 cli();
205 p->state = TASK_RUNNING;
206 if (!p->next_run)
207 add_to_runqueue(p);
208 restore_flags(flags);
209 }
210
211 static void process_timeout(unsigned long __data)
212 {
213 struct task_struct * p = (struct task_struct *) __data;
214
215 p->timeout = 0;
216 wake_up_process(p);
217 }
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232 static inline int goodness(struct task_struct * p, struct task_struct * prev, int this_cpu)
233 {
234 int weight;
235
236 #ifdef __SMP__
237
238 if (p->processor != NO_PROC_ID)
239 return -1000;
240 #endif
241
242
243
244
245
246
247 if (p->policy != SCHED_OTHER)
248 return 1000 + p->rt_priority;
249
250
251
252
253
254
255
256
257 weight = p->counter;
258 if (weight) {
259
260 #ifdef __SMP__
261
262
263 if (p->last_processor == this_cpu)
264 weight += PROC_CHANGE_PENALTY;
265 #endif
266
267
268 if (p == prev)
269 weight += 1;
270 }
271
272 return weight;
273 }
274
275
276
277
278
279
280
281
282
283
284
285 asmlinkage void schedule(void)
286 {
287 int c;
288 struct task_struct * p;
289 struct task_struct * prev, * next;
290 unsigned long timeout = 0;
291 int this_cpu=smp_processor_id();
292
293
294
295 if (intr_count) {
296 printk("Aiee: scheduling in interrupt\n");
297 return;
298 }
299 if (bh_active & bh_mask) {
300 intr_count = 1;
301 do_bottom_half();
302 intr_count = 0;
303 }
304 run_task_queue(&tq_scheduler);
305
306 need_resched = 0;
307 prev = current;
308 cli();
309
310 if (!prev->counter && prev->policy == SCHED_RR) {
311 prev->counter = prev->priority;
312 move_last_runqueue(prev);
313 }
314 switch (prev->state) {
315 case TASK_INTERRUPTIBLE:
316 if (prev->signal & ~prev->blocked)
317 goto makerunnable;
318 timeout = prev->timeout;
319 if (timeout && (timeout <= jiffies)) {
320 prev->timeout = 0;
321 timeout = 0;
322 makerunnable:
323 prev->state = TASK_RUNNING;
324 break;
325 }
326 default:
327 del_from_runqueue(prev);
328 case TASK_RUNNING:
329 }
330 p = init_task.next_run;
331 sti();
332
333 #ifdef __SMP__
334
335
336
337 prev->processor = NO_PROC_ID;
338 #endif
339
340
341
342
343
344
345
346 c = -1000;
347 next = &init_task;
348 while (p != &init_task) {
349 int weight = goodness(p, prev, this_cpu);
350 if (weight > c)
351 c = weight, next = p;
352 p = p->next_run;
353 }
354
355
356 if (!c) {
357 for_each_task(p)
358 p->counter = (p->counter >> 1) + p->priority;
359 }
360 #ifdef __SMP__
361
362
363
364
365 if(!prev->pid && !next->pid)
366 next=prev;
367
368
369
370
371 next->processor = this_cpu;
372 next->last_processor = this_cpu;
373
374 #endif
375 #ifdef __SMP_PROF__
376
377 if (0==next->pid)
378 set_bit(this_cpu,&smp_idle_map);
379 else
380 clear_bit(this_cpu,&smp_idle_map);
381 #endif
382 if (prev != next) {
383 struct timer_list timer;
384
385 kstat.context_swtch++;
386 if (timeout) {
387 init_timer(&timer);
388 timer.expires = timeout;
389 timer.data = (unsigned long) prev;
390 timer.function = process_timeout;
391 add_timer(&timer);
392 }
393 get_mmu_context(next);
394 switch_to(prev,next);
395 if (timeout)
396 del_timer(&timer);
397 }
398 }
399
400 #ifndef __alpha__
401
402
403
404
405
406 asmlinkage int sys_pause(void)
407 {
408 current->state = TASK_INTERRUPTIBLE;
409 schedule();
410 return -ERESTARTNOHAND;
411 }
412
413 #endif
414
415
416
417
418
419
420
421
422
423 void wake_up(struct wait_queue **q)
424 {
425 struct wait_queue *tmp;
426 struct task_struct * p;
427
428 if (!q || !(tmp = *q))
429 return;
430 do {
431 if ((p = tmp->task) != NULL) {
432 if ((p->state == TASK_UNINTERRUPTIBLE) ||
433 (p->state == TASK_INTERRUPTIBLE))
434 wake_up_process(p);
435 }
436 if (!tmp->next) {
437 printk("wait_queue is bad (eip = %p)\n",
438 __builtin_return_address(0));
439 printk(" q = %p\n",q);
440 printk(" *q = %p\n",*q);
441 printk(" tmp = %p\n",tmp);
442 break;
443 }
444 tmp = tmp->next;
445 } while (tmp != *q);
446 }
447
448 void wake_up_interruptible(struct wait_queue **q)
449 {
450 struct wait_queue *tmp;
451 struct task_struct * p;
452
453 if (!q || !(tmp = *q))
454 return;
455 do {
456 if ((p = tmp->task) != NULL) {
457 if (p->state == TASK_INTERRUPTIBLE)
458 wake_up_process(p);
459 }
460 if (!tmp->next) {
461 printk("wait_queue is bad (eip = %p)\n",
462 __builtin_return_address(0));
463 printk(" q = %p\n",q);
464 printk(" *q = %p\n",*q);
465 printk(" tmp = %p\n",tmp);
466 break;
467 }
468 tmp = tmp->next;
469 } while (tmp != *q);
470 }
471
472 void __down(struct semaphore * sem)
473 {
474 struct wait_queue wait = { current, NULL };
475 add_wait_queue(&sem->wait, &wait);
476 current->state = TASK_UNINTERRUPTIBLE;
477 while (sem->count <= 0) {
478 schedule();
479 current->state = TASK_UNINTERRUPTIBLE;
480 }
481 current->state = TASK_RUNNING;
482 remove_wait_queue(&sem->wait, &wait);
483 }
484
485 static inline void __sleep_on(struct wait_queue **p, int state)
486 {
487 unsigned long flags;
488 struct wait_queue wait = { current, NULL };
489
490 if (!p)
491 return;
492 if (current == task[0])
493 panic("task[0] trying to sleep");
494 current->state = state;
495 add_wait_queue(p, &wait);
496 save_flags(flags);
497 sti();
498 schedule();
499 remove_wait_queue(p, &wait);
500 restore_flags(flags);
501 }
502
503 void interruptible_sleep_on(struct wait_queue **p)
504 {
505 __sleep_on(p,TASK_INTERRUPTIBLE);
506 }
507
508 void sleep_on(struct wait_queue **p)
509 {
510 __sleep_on(p,TASK_UNINTERRUPTIBLE);
511 }
512
513
514
515
516
517 static struct timer_list timer_head = { &timer_head, &timer_head, ~0, 0, NULL };
518 #define SLOW_BUT_DEBUGGING_TIMERS 0
519
520 void add_timer(struct timer_list * timer)
521 {
522 unsigned long flags;
523 struct timer_list *p;
524
525 #if SLOW_BUT_DEBUGGING_TIMERS
526 if (timer->next || timer->prev) {
527 printk("add_timer() called with non-zero list from %p\n",
528 __builtin_return_address(0));
529 return;
530 }
531 #endif
532 p = &timer_head;
533 save_flags(flags);
534 cli();
535 do {
536 p = p->next;
537 } while (timer->expires > p->expires);
538 timer->next = p;
539 timer->prev = p->prev;
540 p->prev = timer;
541 timer->prev->next = timer;
542 restore_flags(flags);
543 }
544
545 int del_timer(struct timer_list * timer)
546 {
547 unsigned long flags;
548 #if SLOW_BUT_DEBUGGING_TIMERS
549 struct timer_list * p;
550
551 p = &timer_head;
552 save_flags(flags);
553 cli();
554 while ((p = p->next) != &timer_head) {
555 if (p == timer) {
556 timer->next->prev = timer->prev;
557 timer->prev->next = timer->next;
558 timer->next = timer->prev = NULL;
559 restore_flags(flags);
560 return 1;
561 }
562 }
563 if (timer->next || timer->prev)
564 printk("del_timer() called from %p with timer not initialized\n",
565 __builtin_return_address(0));
566 restore_flags(flags);
567 return 0;
568 #else
569 struct timer_list * next;
570 int ret = 0;
571 save_flags(flags);
572 cli();
573 if ((next = timer->next) != NULL) {
574 (next->prev = timer->prev)->next = next;
575 timer->next = timer->prev = NULL;
576 ret = 1;
577 }
578 restore_flags(flags);
579 return ret;
580 #endif
581 }
582
583 static inline void run_timer_list(void)
584 {
585 struct timer_list * timer;
586
587 while ((timer = timer_head.next) != &timer_head && timer->expires <= jiffies) {
588 void (*fn)(unsigned long) = timer->function;
589 unsigned long data = timer->data;
590 timer->next->prev = timer->prev;
591 timer->prev->next = timer->next;
592 timer->next = timer->prev = NULL;
593 sti();
594 fn(data);
595 cli();
596 }
597 }
598
599 static inline void run_old_timers(void)
600 {
601 struct timer_struct *tp;
602 unsigned long mask;
603
604 for (mask = 1, tp = timer_table+0 ; mask ; tp++,mask += mask) {
605 if (mask > timer_active)
606 break;
607 if (!(mask & timer_active))
608 continue;
609 if (tp->expires > jiffies)
610 continue;
611 timer_active &= ~mask;
612 tp->fn();
613 sti();
614 }
615 }
616
617 void tqueue_bh(void)
618 {
619 run_task_queue(&tq_timer);
620 }
621
622 void immediate_bh(void)
623 {
624 run_task_queue(&tq_immediate);
625 }
626
627 unsigned long timer_active = 0;
628 struct timer_struct timer_table[32];
629
630
631
632
633
634
635
636 unsigned long avenrun[3] = { 0,0,0 };
637
638
639
640
641 static unsigned long count_active_tasks(void)
642 {
643 struct task_struct **p;
644 unsigned long nr = 0;
645
646 for(p = &LAST_TASK; p > &FIRST_TASK; --p)
647 if (*p && ((*p)->state == TASK_RUNNING ||
648 (*p)->state == TASK_UNINTERRUPTIBLE ||
649 (*p)->state == TASK_SWAPPING))
650 nr += FIXED_1;
651 #ifdef __SMP__
652 nr-=(smp_num_cpus-1)*FIXED_1;
653 #endif
654 return nr;
655 }
656
657 static inline void calc_load(unsigned long ticks)
658 {
659 unsigned long active_tasks;
660 static int count = LOAD_FREQ;
661
662 count -= ticks;
663 if (count < 0) {
664 count += LOAD_FREQ;
665 active_tasks = count_active_tasks();
666 CALC_LOAD(avenrun[0], EXP_1, active_tasks);
667 CALC_LOAD(avenrun[1], EXP_5, active_tasks);
668 CALC_LOAD(avenrun[2], EXP_15, active_tasks);
669 }
670 }
671
672
673
674
675
676
677
678
679
680
681 static void second_overflow(void)
682 {
683 long ltemp;
684
685
686 time_maxerror = (0x70000000-time_maxerror <
687 time_tolerance >> SHIFT_USEC) ?
688 0x70000000 : (time_maxerror + (time_tolerance >> SHIFT_USEC));
689
690
691
692
693
694
695
696
697
698
699 switch (time_state) {
700
701 case TIME_OK:
702 if (time_status & STA_INS)
703 time_state = TIME_INS;
704 else if (time_status & STA_DEL)
705 time_state = TIME_DEL;
706 break;
707
708 case TIME_INS:
709 if (xtime.tv_sec % 86400 == 0) {
710 xtime.tv_sec--;
711 time_state = TIME_OOP;
712 printk("Clock: inserting leap second 23:59:60 UTC\n");
713 }
714 break;
715
716 case TIME_DEL:
717 if ((xtime.tv_sec + 1) % 86400 == 0) {
718 xtime.tv_sec++;
719 time_state = TIME_WAIT;
720 printk("Clock: deleting leap second 23:59:59 UTC\n");
721 }
722 break;
723
724 case TIME_OOP:
725
726 time_state = TIME_WAIT;
727 break;
728
729 case TIME_WAIT:
730 if (!(time_status & (STA_INS | STA_DEL)))
731 time_state = TIME_OK;
732 }
733
734
735
736
737
738
739
740
741
742
743 if (time_offset < 0) {
744 ltemp = -time_offset;
745 if (!(time_status & STA_FLL))
746 ltemp >>= SHIFT_KG + time_constant;
747 if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE)
748 ltemp = (MAXPHASE / MINSEC) <<
749 SHIFT_UPDATE;
750 time_offset += ltemp;
751 time_adj = -ltemp << (SHIFT_SCALE - SHIFT_HZ -
752 SHIFT_UPDATE);
753 } else {
754 ltemp = time_offset;
755 if (!(time_status & STA_FLL))
756 ltemp >>= SHIFT_KG + time_constant;
757 if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE)
758 ltemp = (MAXPHASE / MINSEC) <<
759 SHIFT_UPDATE;
760 time_offset -= ltemp;
761 time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ -
762 SHIFT_UPDATE);
763 }
764
765
766
767
768
769
770
771
772 pps_valid++;
773 if (pps_valid == PPS_VALID) {
774 pps_jitter = MAXTIME;
775 pps_stabil = MAXFREQ;
776 time_status &= ~(STA_PPSSIGNAL | STA_PPSJITTER |
777 STA_PPSWANDER | STA_PPSERROR);
778 }
779 ltemp = time_freq + pps_freq;
780 if (ltemp < 0)
781 time_adj -= -ltemp >>
782 (SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE);
783 else
784 time_adj += ltemp >>
785 (SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE);
786
787 #if HZ == 100
788
789 if (time_adj < 0)
790 time_adj -= -time_adj >> 2;
791 else
792 time_adj += time_adj >> 2;
793 #endif
794 }
795
796 static void update_wall_time_one_tick(void)
797 {
798
799
800
801
802 time_phase += time_adj;
803 if (time_phase <= -FINEUSEC) {
804 long ltemp = -time_phase >> SHIFT_SCALE;
805 time_phase += ltemp << SHIFT_SCALE;
806 xtime.tv_usec += tick + time_adjust_step - ltemp;
807 }
808 else if (time_phase >= FINEUSEC) {
809 long ltemp = time_phase >> SHIFT_SCALE;
810 time_phase -= ltemp << SHIFT_SCALE;
811 xtime.tv_usec += tick + time_adjust_step + ltemp;
812 } else
813 xtime.tv_usec += tick + time_adjust_step;
814
815 if (time_adjust) {
816
817
818
819
820
821
822
823
824
825 if (time_adjust > tickadj)
826 time_adjust_step = tickadj;
827 else if (time_adjust < -tickadj)
828 time_adjust_step = -tickadj;
829 else
830 time_adjust_step = time_adjust;
831
832
833 time_adjust -= time_adjust_step;
834 }
835 else
836 time_adjust_step = 0;
837 }
838
839
840
841
842
843
844
845
846 static void update_wall_time(unsigned long ticks)
847 {
848 do {
849 ticks--;
850 update_wall_time_one_tick();
851 } while (ticks);
852
853 if (xtime.tv_usec >= 1000000) {
854 xtime.tv_usec -= 1000000;
855 xtime.tv_sec++;
856 second_overflow();
857 }
858 }
859
860 static inline void do_process_times(struct task_struct *p,
861 unsigned long user, unsigned long system)
862 {
863 long psecs;
864
865 p->utime += user;
866 if (p->priority < DEF_PRIORITY)
867 kstat.cpu_nice += user;
868 else
869 kstat.cpu_user += user;
870 kstat.cpu_system += system;
871 p->stime += system;
872
873 psecs = (p->stime + p->utime) / HZ;
874 if (psecs > p->rlim[RLIMIT_CPU].rlim_cur) {
875
876 if (psecs * HZ == p->stime + p->utime)
877 send_sig(SIGXCPU, p, 1);
878
879 if (psecs > p->rlim[RLIMIT_CPU].rlim_max)
880 send_sig(SIGKILL, p, 1);
881 }
882 }
883
884 static inline void do_it_virt(struct task_struct * p, unsigned long ticks)
885 {
886 unsigned long it_virt = p->it_virt_value;
887
888 if (it_virt) {
889 if (it_virt <= ticks) {
890 it_virt = ticks + p->it_virt_incr;
891 send_sig(SIGVTALRM, p, 1);
892 }
893 p->it_virt_value = it_virt - ticks;
894 }
895 }
896
897 static inline void do_it_prof(struct task_struct * p, unsigned long ticks)
898 {
899 unsigned long it_prof = p->it_prof_value;
900
901 if (it_prof) {
902 if (it_prof <= ticks) {
903 it_prof = ticks + p->it_prof_incr;
904 send_sig(SIGPROF, p, 1);
905 }
906 p->it_prof_value = it_prof - ticks;
907 }
908 }
909
910 static __inline__ void update_one_process(struct task_struct *p,
911 unsigned long ticks, unsigned long user, unsigned long system)
912 {
913 do_process_times(p, user, system);
914 do_it_virt(p, user);
915 do_it_prof(p, ticks);
916 }
917
918 static void update_process_times(unsigned long ticks, unsigned long system)
919 {
920 #ifndef __SMP__
921 struct task_struct * p = current;
922 if (p->pid) {
923 p->counter -= ticks;
924 if (p->counter < 0) {
925 p->counter = 0;
926 need_resched = 1;
927 }
928
929 update_one_process(p, ticks, ticks-system, system);
930 }
931 #else
932 int cpu,i;
933 cpu = smp_processor_id();
934 for (i=0;i<=smp_top_cpu;i++)
935 {
936 struct task_struct *p;
937
938 if(cpu_number_map[i]==-1)
939 continue;
940 #ifdef __SMP_PROF__
941 if (test_bit(i,&smp_idle_map))
942 smp_idle_count[i]++;
943 #endif
944 p = current_set[i];
945
946
947
948 if (p->pid) {
949
950 unsigned long utime = ticks;
951 unsigned long stime = 0;
952 if (cpu == i) {
953 utime = ticks-system;
954 stime = system;
955 } else if (smp_proc_in_lock[i]) {
956 utime = 0;
957 stime = ticks;
958 }
959 update_one_process(p, ticks, utime, stime);
960
961 p->counter -= ticks;
962 if (p->counter >= 0)
963 continue;
964 p->counter = 0;
965 } else {
966
967
968
969
970 if (!(0x7fffffff & smp_process_available))
971 continue;
972 }
973
974 if (i==cpu)
975 need_resched = 1;
976 else
977 smp_message_pass(i, MSG_RESCHEDULE, 0L, 0);
978 }
979 #endif
980 }
981
982 static unsigned long lost_ticks = 0;
983 static unsigned long lost_ticks_system = 0;
984
985 static void timer_bh(void)
986 {
987 unsigned long ticks, system;
988
989 run_old_timers();
990
991 cli();
992 run_timer_list();
993 ticks = lost_ticks;
994 lost_ticks = 0;
995 system = lost_ticks_system;
996 lost_ticks_system = 0;
997 sti();
998
999 if (ticks) {
1000 calc_load(ticks);
1001 update_wall_time(ticks);
1002 update_process_times(ticks, system);
1003 }
1004 }
1005
1006
1007
1008
1009
1010
1011 #if HZ > 100
1012 #define should_run_timers(x) ((x) >= HZ/100)
1013 #else
1014 #define should_run_timers(x) (1)
1015 #endif
1016
1017 void do_timer(struct pt_regs * regs)
1018 {
1019 (*(unsigned long *)&jiffies)++;
1020 lost_ticks++;
1021 if (should_run_timers(lost_ticks))
1022 mark_bh(TIMER_BH);
1023 if (!user_mode(regs)) {
1024 lost_ticks_system++;
1025 if (prof_buffer && current->pid) {
1026 extern int _stext;
1027 unsigned long ip = instruction_pointer(regs);
1028 ip -= (unsigned long) &_stext;
1029 ip >>= prof_shift;
1030 if (ip < prof_len)
1031 prof_buffer[ip]++;
1032 }
1033 }
1034 if (tq_timer != &tq_last)
1035 mark_bh(TQUEUE_BH);
1036 }
1037
1038 #ifndef __alpha__
1039
1040
1041
1042
1043
1044 asmlinkage unsigned int sys_alarm(unsigned int seconds)
1045 {
1046 struct itimerval it_new, it_old;
1047 unsigned int oldalarm;
1048
1049 it_new.it_interval.tv_sec = it_new.it_interval.tv_usec = 0;
1050 it_new.it_value.tv_sec = seconds;
1051 it_new.it_value.tv_usec = 0;
1052 _setitimer(ITIMER_REAL, &it_new, &it_old);
1053 oldalarm = it_old.it_value.tv_sec;
1054
1055
1056 if (it_old.it_value.tv_usec)
1057 oldalarm++;
1058 return oldalarm;
1059 }
1060
1061
1062
1063
1064
1065 asmlinkage int sys_getpid(void)
1066 {
1067 return current->pid;
1068 }
1069
1070 asmlinkage int sys_getppid(void)
1071 {
1072 return current->p_opptr->pid;
1073 }
1074
1075 asmlinkage int sys_getuid(void)
1076 {
1077 return current->uid;
1078 }
1079
1080 asmlinkage int sys_geteuid(void)
1081 {
1082 return current->euid;
1083 }
1084
1085 asmlinkage int sys_getgid(void)
1086 {
1087 return current->gid;
1088 }
1089
1090 asmlinkage int sys_getegid(void)
1091 {
1092 return current->egid;
1093 }
1094
1095
1096
1097
1098
1099
1100 asmlinkage int sys_nice(int increment)
1101 {
1102 unsigned long newprio;
1103 int increase = 0;
1104
1105 newprio = increment;
1106 if (increment < 0) {
1107 if (!suser())
1108 return -EPERM;
1109 newprio = -increment;
1110 increase = 1;
1111 }
1112 if (newprio > 40)
1113 newprio = 40;
1114
1115
1116
1117
1118
1119
1120
1121 newprio = (newprio * DEF_PRIORITY + 10) / 20;
1122 increment = newprio;
1123 if (increase)
1124 increment = -increment;
1125 newprio = current->priority - increment;
1126 if (newprio < 1)
1127 newprio = 1;
1128 if (newprio > DEF_PRIORITY*2)
1129 newprio = DEF_PRIORITY*2;
1130 current->priority = newprio;
1131 return 0;
1132 }
1133
1134 #endif
1135
1136 static struct task_struct *find_process_by_pid(pid_t pid) {
1137 struct task_struct *p, *q;
1138
1139 if (pid == 0)
1140 p = current;
1141 else {
1142 p = 0;
1143 for_each_task(q) {
1144 if (q && q->pid == pid) {
1145 p = q;
1146 break;
1147 }
1148 }
1149 }
1150 return p;
1151 }
1152
1153 static int setscheduler(pid_t pid, int policy,
1154 struct sched_param *param)
1155 {
1156 int error;
1157 struct sched_param lp;
1158 struct task_struct *p;
1159
1160 if (!param || pid < 0)
1161 return -EINVAL;
1162
1163 error = verify_area(VERIFY_READ, param, sizeof(struct sched_param));
1164 if (error)
1165 return error;
1166 memcpy_fromfs(&lp, param, sizeof(struct sched_param));
1167
1168 p = find_process_by_pid(pid);
1169 if (!p)
1170 return -ESRCH;
1171
1172 if (policy < 0)
1173 policy = p->policy;
1174 else if (policy != SCHED_FIFO && policy != SCHED_RR &&
1175 policy != SCHED_OTHER)
1176 return -EINVAL;
1177
1178
1179
1180
1181
1182 if (lp.sched_priority < 0 || lp.sched_priority > 99)
1183 return -EINVAL;
1184 if ((policy == SCHED_OTHER) != (lp.sched_priority == 0))
1185 return -EINVAL;
1186
1187 if ((policy == SCHED_FIFO || policy == SCHED_RR) && !suser())
1188 return -EPERM;
1189 if ((current->euid != p->euid) && (current->euid != p->uid) &&
1190 !suser())
1191 return -EPERM;
1192
1193 p->policy = policy;
1194 p->rt_priority = lp.sched_priority;
1195 if (p->next_run)
1196 move_last_runqueue(p);
1197 schedule();
1198
1199 return 0;
1200 }
1201
1202 asmlinkage int sys_sched_setscheduler(pid_t pid, int policy,
1203 struct sched_param *param)
1204 {
1205 return setscheduler(pid, policy, param);
1206 }
1207
1208 asmlinkage int sys_sched_setparam(pid_t pid, struct sched_param *param)
1209 {
1210 return setscheduler(pid, -1, param);
1211 }
1212
1213 asmlinkage int sys_sched_getscheduler(pid_t pid)
1214 {
1215 struct task_struct *p;
1216
1217 if (pid < 0)
1218 return -EINVAL;
1219
1220 p = find_process_by_pid(pid);
1221 if (!p)
1222 return -ESRCH;
1223
1224 return p->policy;
1225 }
1226
1227 asmlinkage int sys_sched_getparam(pid_t pid, struct sched_param *param)
1228 {
1229 int error;
1230 struct task_struct *p;
1231 struct sched_param lp;
1232
1233 if (!param || pid < 0)
1234 return -EINVAL;
1235
1236 error = verify_area(VERIFY_WRITE, param, sizeof(struct sched_param));
1237 if (error)
1238 return error;
1239
1240 p = find_process_by_pid(pid);
1241 if (!p)
1242 return -ESRCH;
1243
1244 lp.sched_priority = p->rt_priority;
1245 memcpy_tofs(param, &lp, sizeof(struct sched_param));
1246
1247 return 0;
1248 }
1249
1250 asmlinkage int sys_sched_yield(void)
1251 {
1252 move_last_runqueue(current);
1253
1254 return 0;
1255 }
1256
1257 asmlinkage int sys_sched_get_priority_max(int policy)
1258 {
1259 switch (policy) {
1260 case SCHED_FIFO:
1261 case SCHED_RR:
1262 return 99;
1263 case SCHED_OTHER:
1264 return 0;
1265 }
1266
1267 return -EINVAL;
1268 }
1269
1270 asmlinkage int sys_sched_get_priority_min(int policy)
1271 {
1272 switch (policy) {
1273 case SCHED_FIFO:
1274 case SCHED_RR:
1275 return 1;
1276 case SCHED_OTHER:
1277 return 0;
1278 }
1279
1280 return -EINVAL;
1281 }
1282
1283 asmlinkage int sys_sched_rr_get_interval(pid_t pid, struct timespec *interval)
1284 {
1285 int error;
1286 struct timespec t;
1287
1288 error = verify_area(VERIFY_WRITE, interval, sizeof(struct timespec));
1289 if (error)
1290 return error;
1291
1292 t.tv_sec = 0;
1293 t.tv_nsec = 0;
1294 return -ENOSYS;
1295 memcpy_tofs(interval, &t, sizeof(struct timespec));
1296
1297 return 0;
1298 }
1299
1300
1301
1302
1303
1304 static unsigned long timespectojiffies(struct timespec *value)
1305 {
1306 unsigned long sec = (unsigned) value->tv_sec;
1307 long nsec = value->tv_nsec;
1308
1309 if (sec > (LONG_MAX / HZ))
1310 return LONG_MAX;
1311 nsec += 1000000000L / HZ - 1;
1312 nsec /= 1000000000L / HZ;
1313 return HZ * sec + nsec;
1314 }
1315
1316 static void jiffiestotimespec(unsigned long jiffies, struct timespec *value)
1317 {
1318 value->tv_nsec = (jiffies % HZ) * (1000000000L / HZ);
1319 value->tv_sec = jiffies / HZ;
1320 return;
1321 }
1322
1323 asmlinkage int sys_nanosleep(struct timespec *rqtp, struct timespec *rmtp)
1324 {
1325 int error;
1326 struct timespec t;
1327 unsigned long expire;
1328
1329 error = verify_area(VERIFY_READ, rqtp, sizeof(struct timespec));
1330 if (error)
1331 return error;
1332 memcpy_fromfs(&t, rqtp, sizeof(struct timespec));
1333 if (rmtp) {
1334 error = verify_area(VERIFY_WRITE, rmtp,
1335 sizeof(struct timespec));
1336 if (error)
1337 return error;
1338 }
1339
1340 if (t.tv_nsec >= 1000000000L || t.tv_nsec < 0 || t.tv_sec < 0)
1341 return -EINVAL;
1342
1343 if (t.tv_sec == 0 && t.tv_nsec <= 2000000L &&
1344 current->policy != SCHED_OTHER) {
1345
1346
1347
1348
1349 udelay((t.tv_nsec + 999) / 1000);
1350 return 0;
1351 }
1352
1353 expire = timespectojiffies(&t) + (t.tv_sec || t.tv_nsec) + jiffies;
1354 current->timeout = expire;
1355 current->state = TASK_INTERRUPTIBLE;
1356 schedule();
1357
1358 if (expire > jiffies) {
1359 if (rmtp) {
1360 jiffiestotimespec(expire - jiffies -
1361 (expire > jiffies + 1), &t);
1362 memcpy_tofs(rmtp, &t, sizeof(struct timespec));
1363 }
1364 return -EINTR;
1365 }
1366
1367 return 0;
1368 }
1369
1370 static void show_task(int nr,struct task_struct * p)
1371 {
1372 unsigned long free;
1373 static const char * stat_nam[] = { "R", "S", "D", "Z", "T", "W" };
1374
1375 printk("%-8s %3d ", p->comm, (p == current) ? -nr : nr);
1376 if (((unsigned) p->state) < sizeof(stat_nam)/sizeof(char *))
1377 printk(stat_nam[p->state]);
1378 else
1379 printk(" ");
1380 #if ((~0UL) == 0xffffffff)
1381 if (p == current)
1382 printk(" current ");
1383 else
1384 printk(" %08lX ", thread_saved_pc(&p->tss));
1385 #else
1386 if (p == current)
1387 printk(" current task ");
1388 else
1389 printk(" %016lx ", thread_saved_pc(&p->tss));
1390 #endif
1391 for (free = 1; free < PAGE_SIZE/sizeof(long) ; free++) {
1392 if (((unsigned long *)p->kernel_stack_page)[free])
1393 break;
1394 }
1395 printk("%5lu %5d %6d ", free*sizeof(long), p->pid, p->p_pptr->pid);
1396 if (p->p_cptr)
1397 printk("%5d ", p->p_cptr->pid);
1398 else
1399 printk(" ");
1400 if (p->p_ysptr)
1401 printk("%7d", p->p_ysptr->pid);
1402 else
1403 printk(" ");
1404 if (p->p_osptr)
1405 printk(" %5d\n", p->p_osptr->pid);
1406 else
1407 printk("\n");
1408 }
1409
1410 void show_state(void)
1411 {
1412 int i;
1413
1414 #if ((~0UL) == 0xffffffff)
1415 printk("\n"
1416 " free sibling\n");
1417 printk(" task PC stack pid father child younger older\n");
1418 #else
1419 printk("\n"
1420 " free sibling\n");
1421 printk(" task PC stack pid father child younger older\n");
1422 #endif
1423 for (i=0 ; i<NR_TASKS ; i++)
1424 if (task[i])
1425 show_task(i,task[i]);
1426 }
1427
1428 void sched_init(void)
1429 {
1430
1431
1432
1433
1434 int cpu=smp_processor_id();
1435 current_set[cpu]=&init_task;
1436 #ifdef __SMP__
1437 init_task.processor=cpu;
1438 #endif
1439 init_bh(TIMER_BH, timer_bh);
1440 init_bh(TQUEUE_BH, tqueue_bh);
1441 init_bh(IMMEDIATE_BH, immediate_bh);
1442 }