This source file includes following definitions.
- add_to_runqueue
- del_from_runqueue
- move_last_runqueue
- wake_up_process
- process_timeout
- goodness
- schedule
- sys_pause
- wake_up
- wake_up_interruptible
- __down
- __sleep_on
- interruptible_sleep_on
- sleep_on
- add_timer
- del_timer
- run_timer_list
- run_old_timers
- tqueue_bh
- immediate_bh
- count_active_tasks
- calc_load
- second_overflow
- update_wall_time_one_tick
- update_wall_time
- do_process_times
- do_it_virt
- do_it_prof
- update_one_process
- update_process_times
- timer_bh
- do_timer
- sys_alarm
- sys_getpid
- sys_getppid
- sys_getuid
- sys_geteuid
- sys_getgid
- sys_getegid
- sys_nice
- find_process_by_pid
- setscheduler
- sys_sched_setscheduler
- sys_sched_setparam
- sys_sched_getscheduler
- sys_sched_getparam
- sys_sched_yield
- sys_sched_get_priority_max
- sys_sched_get_priority_min
- sys_sched_rr_get_interval
- timespectojiffies
- jiffiestotimespec
- sys_nanosleep
- show_task
- show_state
- sched_init
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16 #include <linux/signal.h>
17 #include <linux/sched.h>
18 #include <linux/timer.h>
19 #include <linux/kernel.h>
20 #include <linux/kernel_stat.h>
21 #include <linux/fdreg.h>
22 #include <linux/errno.h>
23 #include <linux/time.h>
24 #include <linux/ptrace.h>
25 #include <linux/delay.h>
26 #include <linux/interrupt.h>
27 #include <linux/tqueue.h>
28 #include <linux/resource.h>
29 #include <linux/mm.h>
30 #include <linux/smp.h>
31
32 #include <asm/system.h>
33 #include <asm/io.h>
34 #include <asm/segment.h>
35 #include <asm/pgtable.h>
36 #include <asm/mmu_context.h>
37
38 #include <linux/timex.h>
39
40
41
42
43
44 int securelevel = 0;
45
46 long tick = 1000000 / HZ;
47 volatile struct timeval xtime;
48 int tickadj = 500/HZ;
49
50 DECLARE_TASK_QUEUE(tq_timer);
51 DECLARE_TASK_QUEUE(tq_immediate);
52 DECLARE_TASK_QUEUE(tq_scheduler);
53
54
55
56
57
58 int time_state = TIME_ERROR;
59 int time_status = STA_UNSYNC;
60 long time_offset = 0;
61 long time_constant = 2;
62 long time_tolerance = MAXFREQ;
63 long time_precision = 1;
64 long time_maxerror = MAXPHASE;
65 long time_esterror = MAXPHASE;
66 long time_phase = 0;
67 long time_freq = 0;
68 long time_adj = 0;
69 long time_reftime = 0;
70
71 long time_adjust = 0;
72 long time_adjust_step = 0;
73
74 int need_resched = 0;
75 unsigned long event = 0;
76
77 extern int _setitimer(int, struct itimerval *, struct itimerval *);
78 unsigned int * prof_buffer = NULL;
79 unsigned long prof_len = 0;
80 unsigned long prof_shift = 0;
81
82 #define _S(nr) (1<<((nr)-1))
83
84 extern void mem_use(void);
85
86 static unsigned long init_kernel_stack[1024] = { STACK_MAGIC, };
87 unsigned long init_user_stack[1024] = { STACK_MAGIC, };
88 static struct vm_area_struct init_mmap = INIT_MMAP;
89 static struct fs_struct init_fs = INIT_FS;
90 static struct files_struct init_files = INIT_FILES;
91 static struct signal_struct init_signals = INIT_SIGNALS;
92
93 struct mm_struct init_mm = INIT_MM;
94 struct task_struct init_task = INIT_TASK;
95
96 unsigned long volatile jiffies=0;
97
98 struct task_struct *current_set[NR_CPUS];
99 struct task_struct *last_task_used_math = NULL;
100
101 struct task_struct * task[NR_TASKS] = {&init_task, };
102
103 struct kernel_stat kstat = { 0 };
104
105 static inline void add_to_runqueue(struct task_struct * p)
106 {
107 #ifdef __SMP__
108 int cpu=smp_processor_id();
109 #endif
110 #if 1
111 if (p->next_run || p->prev_run) {
112 printk("task already on run-queue\n");
113 return;
114 }
115 #endif
116 if (p->counter > current->counter + 3)
117 need_resched = 1;
118 nr_running++;
119 (p->prev_run = init_task.prev_run)->next_run = p;
120 p->next_run = &init_task;
121 init_task.prev_run = p;
122 #ifdef __SMP__
123
124 while(set_bit(31,&smp_process_available))
125 {
126 while(test_bit(31,&smp_process_available))
127 {
128 if(clear_bit(cpu,&smp_invalidate_needed))
129 {
130 local_flush_tlb();
131 set_bit(cpu,&cpu_callin_map[0]);
132 }
133 }
134 }
135 smp_process_available++;
136 clear_bit(31,&smp_process_available);
137 if ((0!=p->pid) && smp_threads_ready)
138 {
139 int i;
140 for (i=0;i<smp_num_cpus;i++)
141 {
142 if (0==current_set[cpu_logical_map[i]]->pid)
143 {
144 smp_message_pass(cpu_logical_map[i], MSG_RESCHEDULE, 0L, 0);
145 break;
146 }
147 }
148 }
149 #endif
150 }
151
152 static inline void del_from_runqueue(struct task_struct * p)
153 {
154 struct task_struct *next = p->next_run;
155 struct task_struct *prev = p->prev_run;
156
157 #if 1
158 if (!next || !prev) {
159 printk("task not on run-queue\n");
160 return;
161 }
162 #endif
163 if (p == &init_task) {
164 static int nr = 0;
165 if (nr < 5) {
166 nr++;
167 printk("idle task may not sleep\n");
168 }
169 return;
170 }
171 nr_running--;
172 next->prev_run = prev;
173 prev->next_run = next;
174 p->next_run = NULL;
175 p->prev_run = NULL;
176 }
177
178 static inline void move_last_runqueue(struct task_struct * p)
179 {
180 struct task_struct *next = p->next_run;
181 struct task_struct *prev = p->prev_run;
182
183
184 next->prev_run = prev;
185 prev->next_run = next;
186
187 p->next_run = &init_task;
188 prev = init_task.prev_run;
189 init_task.prev_run = p;
190 p->prev_run = prev;
191 prev->next_run = p;
192 }
193
194
195
196
197
198
199
200
201
202 inline void wake_up_process(struct task_struct * p)
203 {
204 unsigned long flags;
205
206 save_flags(flags);
207 cli();
208 p->state = TASK_RUNNING;
209 if (!p->next_run)
210 add_to_runqueue(p);
211 restore_flags(flags);
212 }
213
214 static void process_timeout(unsigned long __data)
215 {
216 struct task_struct * p = (struct task_struct *) __data;
217
218 p->timeout = 0;
219 wake_up_process(p);
220 }
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235 static inline int goodness(struct task_struct * p, struct task_struct * prev, int this_cpu)
236 {
237 int weight;
238
239 #ifdef __SMP__
240
241 if (p->processor != NO_PROC_ID)
242 return -1000;
243 #ifdef PAST_2_0
244
245 if (p->processor_mask && !(p->processor_mask & (1<<this_cpu))
246 return -1000;
247 #endif
248 #endif
249
250
251
252
253
254
255 if (p->policy != SCHED_OTHER)
256 return 1000 + p->rt_priority;
257
258
259
260
261
262
263
264
265 weight = p->counter;
266 if (weight) {
267
268 #ifdef __SMP__
269
270
271 if (p->last_processor == this_cpu)
272 weight += PROC_CHANGE_PENALTY;
273 #endif
274
275
276 if (p == prev)
277 weight += 1;
278 }
279
280 return weight;
281 }
282
283
284
285
286
287
288
289
290
291
292
293 asmlinkage void schedule(void)
294 {
295 int c;
296 struct task_struct * p;
297 struct task_struct * prev, * next;
298 unsigned long timeout = 0;
299 int this_cpu=smp_processor_id();
300
301
302
303 if (intr_count)
304 goto scheduling_in_interrupt;
305
306 if (bh_active & bh_mask) {
307 intr_count = 1;
308 do_bottom_half();
309 intr_count = 0;
310 }
311
312 run_task_queue(&tq_scheduler);
313
314 need_resched = 0;
315 prev = current;
316 cli();
317
318 if (!prev->counter && prev->policy == SCHED_RR) {
319 prev->counter = prev->priority;
320 move_last_runqueue(prev);
321 }
322 switch (prev->state) {
323 case TASK_INTERRUPTIBLE:
324 if (prev->signal & ~prev->blocked)
325 goto makerunnable;
326 timeout = prev->timeout;
327 if (timeout && (timeout <= jiffies)) {
328 prev->timeout = 0;
329 timeout = 0;
330 makerunnable:
331 prev->state = TASK_RUNNING;
332 break;
333 }
334 default:
335 del_from_runqueue(prev);
336 case TASK_RUNNING:
337 }
338 p = init_task.next_run;
339 sti();
340
341 #ifdef __SMP__
342
343
344
345 prev->processor = NO_PROC_ID;
346 #define idle_task (task[cpu_number_map[this_cpu]])
347 #else
348 #define idle_task (&init_task)
349 #endif
350
351
352
353
354
355
356
357 c = -1000;
358 next = idle_task;
359 while (p != &init_task) {
360 int weight = goodness(p, prev, this_cpu);
361 if (weight > c)
362 c = weight, next = p;
363 p = p->next_run;
364 }
365
366
367 if (!c) {
368 for_each_task(p)
369 p->counter = (p->counter >> 1) + p->priority;
370 }
371 #ifdef __SMP__
372
373
374
375
376 next->processor = this_cpu;
377 next->last_processor = this_cpu;
378 #endif
379 #ifdef __SMP_PROF__
380
381 if (0==next->pid)
382 set_bit(this_cpu,&smp_idle_map);
383 else
384 clear_bit(this_cpu,&smp_idle_map);
385 #endif
386 if (prev != next) {
387 struct timer_list timer;
388
389 kstat.context_swtch++;
390 if (timeout) {
391 init_timer(&timer);
392 timer.expires = timeout;
393 timer.data = (unsigned long) prev;
394 timer.function = process_timeout;
395 add_timer(&timer);
396 }
397 get_mmu_context(next);
398 switch_to(prev,next);
399 if (timeout)
400 del_timer(&timer);
401 }
402 return;
403
404 scheduling_in_interrupt:
405 printk("Aiee: scheduling in interrupt %p\n",
406 __builtin_return_address(0));
407 }
408
409 #ifndef __alpha__
410
411
412
413
414
415 asmlinkage int sys_pause(void)
416 {
417 current->state = TASK_INTERRUPTIBLE;
418 schedule();
419 return -ERESTARTNOHAND;
420 }
421
422 #endif
423
424
425
426
427
428
429
430
431
432 void wake_up(struct wait_queue **q)
433 {
434 struct wait_queue *tmp;
435 struct task_struct * p;
436
437 if (!q || !(tmp = *q))
438 return;
439 do {
440 if ((p = tmp->task) != NULL) {
441 if ((p->state == TASK_UNINTERRUPTIBLE) ||
442 (p->state == TASK_INTERRUPTIBLE))
443 wake_up_process(p);
444 }
445 if (!tmp->next) {
446 printk("wait_queue is bad (eip = %p)\n",
447 __builtin_return_address(0));
448 printk(" q = %p\n",q);
449 printk(" *q = %p\n",*q);
450 printk(" tmp = %p\n",tmp);
451 break;
452 }
453 tmp = tmp->next;
454 } while (tmp != *q);
455 }
456
457 void wake_up_interruptible(struct wait_queue **q)
458 {
459 struct wait_queue *tmp;
460 struct task_struct * p;
461
462 if (!q || !(tmp = *q))
463 return;
464 do {
465 if ((p = tmp->task) != NULL) {
466 if (p->state == TASK_INTERRUPTIBLE)
467 wake_up_process(p);
468 }
469 if (!tmp->next) {
470 printk("wait_queue is bad (eip = %p)\n",
471 __builtin_return_address(0));
472 printk(" q = %p\n",q);
473 printk(" *q = %p\n",*q);
474 printk(" tmp = %p\n",tmp);
475 break;
476 }
477 tmp = tmp->next;
478 } while (tmp != *q);
479 }
480
481 void __down(struct semaphore * sem)
482 {
483 struct wait_queue wait = { current, NULL };
484 add_wait_queue(&sem->wait, &wait);
485 current->state = TASK_UNINTERRUPTIBLE;
486 while (sem->count <= 0) {
487 schedule();
488 current->state = TASK_UNINTERRUPTIBLE;
489 }
490 current->state = TASK_RUNNING;
491 remove_wait_queue(&sem->wait, &wait);
492 }
493
494 static inline void __sleep_on(struct wait_queue **p, int state)
495 {
496 unsigned long flags;
497 struct wait_queue wait = { current, NULL };
498
499 if (!p)
500 return;
501 if (current == task[0])
502 panic("task[0] trying to sleep");
503 current->state = state;
504 add_wait_queue(p, &wait);
505 save_flags(flags);
506 sti();
507 schedule();
508 remove_wait_queue(p, &wait);
509 restore_flags(flags);
510 }
511
512 void interruptible_sleep_on(struct wait_queue **p)
513 {
514 __sleep_on(p,TASK_INTERRUPTIBLE);
515 }
516
517 void sleep_on(struct wait_queue **p)
518 {
519 __sleep_on(p,TASK_UNINTERRUPTIBLE);
520 }
521
522
523
524
525
526 static struct timer_list timer_head = { &timer_head, &timer_head, ~0, 0, NULL };
527 #define SLOW_BUT_DEBUGGING_TIMERS 0
528
529 void add_timer(struct timer_list * timer)
530 {
531 unsigned long flags;
532 struct timer_list *p;
533
534 #if SLOW_BUT_DEBUGGING_TIMERS
535 if (timer->next || timer->prev) {
536 printk("add_timer() called with non-zero list from %p\n",
537 __builtin_return_address(0));
538 return;
539 }
540 #endif
541 p = &timer_head;
542 save_flags(flags);
543 cli();
544 do {
545 p = p->next;
546 } while (timer->expires > p->expires);
547 timer->next = p;
548 timer->prev = p->prev;
549 p->prev = timer;
550 timer->prev->next = timer;
551 restore_flags(flags);
552 }
553
554 int del_timer(struct timer_list * timer)
555 {
556 int ret = 0;
557 if (timer->next) {
558 unsigned long flags;
559 struct timer_list * next;
560 save_flags(flags);
561 cli();
562 if ((next = timer->next) != NULL) {
563 (next->prev = timer->prev)->next = next;
564 timer->next = timer->prev = NULL;
565 ret = 1;
566 }
567 restore_flags(flags);
568 }
569 return ret;
570 }
571
572 static inline void run_timer_list(void)
573 {
574 struct timer_list * timer;
575
576 while ((timer = timer_head.next) != &timer_head && timer->expires <= jiffies) {
577 void (*fn)(unsigned long) = timer->function;
578 unsigned long data = timer->data;
579 timer->next->prev = timer->prev;
580 timer->prev->next = timer->next;
581 timer->next = timer->prev = NULL;
582 sti();
583 fn(data);
584 cli();
585 }
586 }
587
588 static inline void run_old_timers(void)
589 {
590 struct timer_struct *tp;
591 unsigned long mask;
592
593 for (mask = 1, tp = timer_table+0 ; mask ; tp++,mask += mask) {
594 if (mask > timer_active)
595 break;
596 if (!(mask & timer_active))
597 continue;
598 if (tp->expires > jiffies)
599 continue;
600 timer_active &= ~mask;
601 tp->fn();
602 sti();
603 }
604 }
605
606 void tqueue_bh(void)
607 {
608 run_task_queue(&tq_timer);
609 }
610
611 void immediate_bh(void)
612 {
613 run_task_queue(&tq_immediate);
614 }
615
616 unsigned long timer_active = 0;
617 struct timer_struct timer_table[32];
618
619
620
621
622
623
624
625 unsigned long avenrun[3] = { 0,0,0 };
626
627
628
629
630 static unsigned long count_active_tasks(void)
631 {
632 struct task_struct **p;
633 unsigned long nr = 0;
634
635 for(p = &LAST_TASK; p > &FIRST_TASK; --p)
636 if (*p && ((*p)->state == TASK_RUNNING ||
637 (*p)->state == TASK_UNINTERRUPTIBLE ||
638 (*p)->state == TASK_SWAPPING))
639 nr += FIXED_1;
640 #ifdef __SMP__
641 nr-=(smp_num_cpus-1)*FIXED_1;
642 #endif
643 return nr;
644 }
645
646 static inline void calc_load(unsigned long ticks)
647 {
648 unsigned long active_tasks;
649 static int count = LOAD_FREQ;
650
651 count -= ticks;
652 if (count < 0) {
653 count += LOAD_FREQ;
654 active_tasks = count_active_tasks();
655 CALC_LOAD(avenrun[0], EXP_1, active_tasks);
656 CALC_LOAD(avenrun[1], EXP_5, active_tasks);
657 CALC_LOAD(avenrun[2], EXP_15, active_tasks);
658 }
659 }
660
661
662
663
664
665
666
667
668
669
670 static void second_overflow(void)
671 {
672 long ltemp;
673
674
675 time_maxerror += time_tolerance >> SHIFT_USEC;
676 if ( time_maxerror > MAXPHASE )
677 time_maxerror = MAXPHASE;
678
679
680
681
682
683
684
685
686
687
688 switch (time_state) {
689
690 case TIME_OK:
691 if (time_status & STA_INS)
692 time_state = TIME_INS;
693 else if (time_status & STA_DEL)
694 time_state = TIME_DEL;
695 break;
696
697 case TIME_INS:
698 if (xtime.tv_sec % 86400 == 0) {
699 xtime.tv_sec--;
700 time_state = TIME_OOP;
701 printk("Clock: inserting leap second 23:59:60 UTC\n");
702 }
703 break;
704
705 case TIME_DEL:
706 if ((xtime.tv_sec + 1) % 86400 == 0) {
707 xtime.tv_sec++;
708 time_state = TIME_WAIT;
709 printk("Clock: deleting leap second 23:59:59 UTC\n");
710 }
711 break;
712
713 case TIME_OOP:
714 time_state = TIME_WAIT;
715 break;
716
717 case TIME_WAIT:
718 if (!(time_status & (STA_INS | STA_DEL)))
719 time_state = TIME_OK;
720 }
721
722
723
724
725
726
727
728
729
730
731 if (time_offset < 0) {
732 ltemp = -time_offset;
733 if (!(time_status & STA_FLL))
734 ltemp >>= SHIFT_KG + time_constant;
735 if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE)
736 ltemp = (MAXPHASE / MINSEC) << SHIFT_UPDATE;
737 time_offset += ltemp;
738 time_adj = -ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
739 } else {
740 ltemp = time_offset;
741 if (!(time_status & STA_FLL))
742 ltemp >>= SHIFT_KG + time_constant;
743 if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE)
744 ltemp = (MAXPHASE / MINSEC) << SHIFT_UPDATE;
745 time_offset -= ltemp;
746 time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
747 }
748
749
750
751
752
753
754
755
756 pps_valid++;
757 if (pps_valid == PPS_VALID) {
758 pps_jitter = MAXTIME;
759 pps_stabil = MAXFREQ;
760 time_status &= ~(STA_PPSSIGNAL | STA_PPSJITTER |
761 STA_PPSWANDER | STA_PPSERROR);
762 }
763 ltemp = time_freq + pps_freq;
764 if (ltemp < 0)
765 time_adj -= -ltemp >>
766 (SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE);
767 else
768 time_adj += ltemp >>
769 (SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE);
770
771 #if HZ == 100
772
773 if (time_adj < 0)
774 time_adj -= -time_adj >> 2;
775 else
776 time_adj += time_adj >> 2;
777 #endif
778 }
779
780
781 static void update_wall_time_one_tick(void)
782 {
783
784
785
786
787 time_phase += time_adj;
788 if (time_phase <= -FINEUSEC) {
789 long ltemp = -time_phase >> SHIFT_SCALE;
790 time_phase += ltemp << SHIFT_SCALE;
791 xtime.tv_usec += tick + time_adjust_step - ltemp;
792 }
793 else if (time_phase >= FINEUSEC) {
794 long ltemp = time_phase >> SHIFT_SCALE;
795 time_phase -= ltemp << SHIFT_SCALE;
796 xtime.tv_usec += tick + time_adjust_step + ltemp;
797 } else
798 xtime.tv_usec += tick + time_adjust_step;
799
800 if (time_adjust) {
801
802
803
804
805
806
807
808
809
810 if (time_adjust > tickadj)
811 time_adjust_step = tickadj;
812 else if (time_adjust < -tickadj)
813 time_adjust_step = -tickadj;
814 else
815 time_adjust_step = time_adjust;
816
817
818 time_adjust -= time_adjust_step;
819 }
820 else
821 time_adjust_step = 0;
822 }
823
824
825
826
827
828
829
830
831 static void update_wall_time(unsigned long ticks)
832 {
833 do {
834 ticks--;
835 update_wall_time_one_tick();
836 } while (ticks);
837
838 if (xtime.tv_usec >= 1000000) {
839 xtime.tv_usec -= 1000000;
840 xtime.tv_sec++;
841 second_overflow();
842 }
843 }
844
845 static inline void do_process_times(struct task_struct *p,
846 unsigned long user, unsigned long system)
847 {
848 long psecs;
849
850 p->utime += user;
851 p->stime += system;
852
853 psecs = (p->stime + p->utime) / HZ;
854 if (psecs > p->rlim[RLIMIT_CPU].rlim_cur) {
855
856 if (psecs * HZ == p->stime + p->utime)
857 send_sig(SIGXCPU, p, 1);
858
859 if (psecs > p->rlim[RLIMIT_CPU].rlim_max)
860 send_sig(SIGKILL, p, 1);
861 }
862 }
863
864 static inline void do_it_virt(struct task_struct * p, unsigned long ticks)
865 {
866 unsigned long it_virt = p->it_virt_value;
867
868 if (it_virt) {
869 if (it_virt <= ticks) {
870 it_virt = ticks + p->it_virt_incr;
871 send_sig(SIGVTALRM, p, 1);
872 }
873 p->it_virt_value = it_virt - ticks;
874 }
875 }
876
877 static inline void do_it_prof(struct task_struct * p, unsigned long ticks)
878 {
879 unsigned long it_prof = p->it_prof_value;
880
881 if (it_prof) {
882 if (it_prof <= ticks) {
883 it_prof = ticks + p->it_prof_incr;
884 send_sig(SIGPROF, p, 1);
885 }
886 p->it_prof_value = it_prof - ticks;
887 }
888 }
889
890 static __inline__ void update_one_process(struct task_struct *p,
891 unsigned long ticks, unsigned long user, unsigned long system)
892 {
893 do_process_times(p, user, system);
894 do_it_virt(p, user);
895 do_it_prof(p, ticks);
896 }
897
898 static void update_process_times(unsigned long ticks, unsigned long system)
899 {
900 #ifndef __SMP__
901 struct task_struct * p = current;
902 unsigned long user = ticks - system;
903 if (p->pid) {
904 p->counter -= ticks;
905 if (p->counter < 0) {
906 p->counter = 0;
907 need_resched = 1;
908 }
909 if (p->priority < DEF_PRIORITY)
910 kstat.cpu_nice += user;
911 else
912 kstat.cpu_user += user;
913 kstat.cpu_system += system;
914 }
915 update_one_process(p, ticks, user, system);
916 #else
917 int cpu,j;
918 cpu = smp_processor_id();
919 for (j=0;j<smp_num_cpus;j++)
920 {
921 int i = cpu_logical_map[j];
922 struct task_struct *p;
923
924 #ifdef __SMP_PROF__
925 if (test_bit(i,&smp_idle_map))
926 smp_idle_count[i]++;
927 #endif
928 p = current_set[i];
929
930
931
932 if (p->pid) {
933
934 unsigned long utime = ticks;
935 unsigned long stime = 0;
936 if (cpu == i) {
937 utime = ticks-system;
938 stime = system;
939 } else if (smp_proc_in_lock[i]) {
940 utime = 0;
941 stime = ticks;
942 }
943 update_one_process(p, ticks, utime, stime);
944
945 p->counter -= ticks;
946 if (p->counter >= 0)
947 continue;
948 p->counter = 0;
949 } else {
950
951
952
953
954 if (!(0x7fffffff & smp_process_available))
955 continue;
956 }
957
958 if (i==cpu)
959 need_resched = 1;
960 else
961 smp_message_pass(i, MSG_RESCHEDULE, 0L, 0);
962 }
963 #endif
964 }
965
966 static unsigned long lost_ticks = 0;
967 static unsigned long lost_ticks_system = 0;
968
969 static void timer_bh(void)
970 {
971 unsigned long ticks, system;
972
973 run_old_timers();
974
975 cli();
976 run_timer_list();
977 ticks = lost_ticks;
978 lost_ticks = 0;
979 system = lost_ticks_system;
980 lost_ticks_system = 0;
981 sti();
982
983 if (ticks) {
984 calc_load(ticks);
985 update_wall_time(ticks);
986 update_process_times(ticks, system);
987 }
988 }
989
990
991
992
993
994
995 #if HZ > 100
996 #define should_run_timers(x) ((x) >= HZ/100)
997 #else
998 #define should_run_timers(x) (1)
999 #endif
1000
1001 void do_timer(struct pt_regs * regs)
1002 {
1003 (*(unsigned long *)&jiffies)++;
1004 lost_ticks++;
1005 if (should_run_timers(lost_ticks))
1006 mark_bh(TIMER_BH);
1007 if (!user_mode(regs)) {
1008 lost_ticks_system++;
1009 if (prof_buffer && current->pid) {
1010 extern int _stext;
1011 unsigned long ip = instruction_pointer(regs);
1012 ip -= (unsigned long) &_stext;
1013 ip >>= prof_shift;
1014 if (ip < prof_len)
1015 prof_buffer[ip]++;
1016 }
1017 }
1018 if (tq_timer)
1019 mark_bh(TQUEUE_BH);
1020 }
1021
1022 #ifndef __alpha__
1023
1024
1025
1026
1027
1028 asmlinkage unsigned int sys_alarm(unsigned int seconds)
1029 {
1030 struct itimerval it_new, it_old;
1031 unsigned int oldalarm;
1032
1033 it_new.it_interval.tv_sec = it_new.it_interval.tv_usec = 0;
1034 it_new.it_value.tv_sec = seconds;
1035 it_new.it_value.tv_usec = 0;
1036 _setitimer(ITIMER_REAL, &it_new, &it_old);
1037 oldalarm = it_old.it_value.tv_sec;
1038
1039
1040 if (it_old.it_value.tv_usec)
1041 oldalarm++;
1042 return oldalarm;
1043 }
1044
1045
1046
1047
1048
1049 asmlinkage int sys_getpid(void)
1050 {
1051 return current->pid;
1052 }
1053
1054 asmlinkage int sys_getppid(void)
1055 {
1056 return current->p_opptr->pid;
1057 }
1058
1059 asmlinkage int sys_getuid(void)
1060 {
1061 return current->uid;
1062 }
1063
1064 asmlinkage int sys_geteuid(void)
1065 {
1066 return current->euid;
1067 }
1068
1069 asmlinkage int sys_getgid(void)
1070 {
1071 return current->gid;
1072 }
1073
1074 asmlinkage int sys_getegid(void)
1075 {
1076 return current->egid;
1077 }
1078
1079
1080
1081
1082
1083
1084 asmlinkage int sys_nice(int increment)
1085 {
1086 unsigned long newprio;
1087 int increase = 0;
1088
1089 newprio = increment;
1090 if (increment < 0) {
1091 if (!suser())
1092 return -EPERM;
1093 newprio = -increment;
1094 increase = 1;
1095 }
1096 if (newprio > 40)
1097 newprio = 40;
1098
1099
1100
1101
1102
1103
1104
1105 newprio = (newprio * DEF_PRIORITY + 10) / 20;
1106 increment = newprio;
1107 if (increase)
1108 increment = -increment;
1109 newprio = current->priority - increment;
1110 if (newprio < 1)
1111 newprio = 1;
1112 if (newprio > DEF_PRIORITY*2)
1113 newprio = DEF_PRIORITY*2;
1114 current->priority = newprio;
1115 return 0;
1116 }
1117
1118 #endif
1119
1120 static struct task_struct *find_process_by_pid(pid_t pid) {
1121 struct task_struct *p, *q;
1122
1123 if (pid == 0)
1124 p = current;
1125 else {
1126 p = 0;
1127 for_each_task(q) {
1128 if (q && q->pid == pid) {
1129 p = q;
1130 break;
1131 }
1132 }
1133 }
1134 return p;
1135 }
1136
1137 static int setscheduler(pid_t pid, int policy,
1138 struct sched_param *param)
1139 {
1140 int error;
1141 struct sched_param lp;
1142 struct task_struct *p;
1143
1144 if (!param || pid < 0)
1145 return -EINVAL;
1146
1147 error = verify_area(VERIFY_READ, param, sizeof(struct sched_param));
1148 if (error)
1149 return error;
1150 memcpy_fromfs(&lp, param, sizeof(struct sched_param));
1151
1152 p = find_process_by_pid(pid);
1153 if (!p)
1154 return -ESRCH;
1155
1156 if (policy < 0)
1157 policy = p->policy;
1158 else if (policy != SCHED_FIFO && policy != SCHED_RR &&
1159 policy != SCHED_OTHER)
1160 return -EINVAL;
1161
1162
1163
1164
1165
1166 if (lp.sched_priority < 0 || lp.sched_priority > 99)
1167 return -EINVAL;
1168 if ((policy == SCHED_OTHER) != (lp.sched_priority == 0))
1169 return -EINVAL;
1170
1171 if ((policy == SCHED_FIFO || policy == SCHED_RR) && !suser())
1172 return -EPERM;
1173 if ((current->euid != p->euid) && (current->euid != p->uid) &&
1174 !suser())
1175 return -EPERM;
1176
1177 p->policy = policy;
1178 p->rt_priority = lp.sched_priority;
1179 if (p->next_run)
1180 move_last_runqueue(p);
1181 schedule();
1182
1183 return 0;
1184 }
1185
1186 asmlinkage int sys_sched_setscheduler(pid_t pid, int policy,
1187 struct sched_param *param)
1188 {
1189 return setscheduler(pid, policy, param);
1190 }
1191
1192 asmlinkage int sys_sched_setparam(pid_t pid, struct sched_param *param)
1193 {
1194 return setscheduler(pid, -1, param);
1195 }
1196
1197 asmlinkage int sys_sched_getscheduler(pid_t pid)
1198 {
1199 struct task_struct *p;
1200
1201 if (pid < 0)
1202 return -EINVAL;
1203
1204 p = find_process_by_pid(pid);
1205 if (!p)
1206 return -ESRCH;
1207
1208 return p->policy;
1209 }
1210
1211 asmlinkage int sys_sched_getparam(pid_t pid, struct sched_param *param)
1212 {
1213 int error;
1214 struct task_struct *p;
1215 struct sched_param lp;
1216
1217 if (!param || pid < 0)
1218 return -EINVAL;
1219
1220 error = verify_area(VERIFY_WRITE, param, sizeof(struct sched_param));
1221 if (error)
1222 return error;
1223
1224 p = find_process_by_pid(pid);
1225 if (!p)
1226 return -ESRCH;
1227
1228 lp.sched_priority = p->rt_priority;
1229 memcpy_tofs(param, &lp, sizeof(struct sched_param));
1230
1231 return 0;
1232 }
1233
1234 asmlinkage int sys_sched_yield(void)
1235 {
1236 move_last_runqueue(current);
1237
1238 return 0;
1239 }
1240
1241 asmlinkage int sys_sched_get_priority_max(int policy)
1242 {
1243 switch (policy) {
1244 case SCHED_FIFO:
1245 case SCHED_RR:
1246 return 99;
1247 case SCHED_OTHER:
1248 return 0;
1249 }
1250
1251 return -EINVAL;
1252 }
1253
1254 asmlinkage int sys_sched_get_priority_min(int policy)
1255 {
1256 switch (policy) {
1257 case SCHED_FIFO:
1258 case SCHED_RR:
1259 return 1;
1260 case SCHED_OTHER:
1261 return 0;
1262 }
1263
1264 return -EINVAL;
1265 }
1266
1267 asmlinkage int sys_sched_rr_get_interval(pid_t pid, struct timespec *interval)
1268 {
1269 int error;
1270 struct timespec t;
1271
1272 error = verify_area(VERIFY_WRITE, interval, sizeof(struct timespec));
1273 if (error)
1274 return error;
1275
1276 t.tv_sec = 0;
1277 t.tv_nsec = 0;
1278 return -ENOSYS;
1279 memcpy_tofs(interval, &t, sizeof(struct timespec));
1280
1281 return 0;
1282 }
1283
1284
1285
1286
1287
1288 static unsigned long timespectojiffies(struct timespec *value)
1289 {
1290 unsigned long sec = (unsigned) value->tv_sec;
1291 long nsec = value->tv_nsec;
1292
1293 if (sec > (LONG_MAX / HZ))
1294 return LONG_MAX;
1295 nsec += 1000000000L / HZ - 1;
1296 nsec /= 1000000000L / HZ;
1297 return HZ * sec + nsec;
1298 }
1299
1300 static void jiffiestotimespec(unsigned long jiffies, struct timespec *value)
1301 {
1302 value->tv_nsec = (jiffies % HZ) * (1000000000L / HZ);
1303 value->tv_sec = jiffies / HZ;
1304 return;
1305 }
1306
1307 asmlinkage int sys_nanosleep(struct timespec *rqtp, struct timespec *rmtp)
1308 {
1309 int error;
1310 struct timespec t;
1311 unsigned long expire;
1312
1313 error = verify_area(VERIFY_READ, rqtp, sizeof(struct timespec));
1314 if (error)
1315 return error;
1316 memcpy_fromfs(&t, rqtp, sizeof(struct timespec));
1317 if (rmtp) {
1318 error = verify_area(VERIFY_WRITE, rmtp,
1319 sizeof(struct timespec));
1320 if (error)
1321 return error;
1322 }
1323
1324 if (t.tv_nsec >= 1000000000L || t.tv_nsec < 0 || t.tv_sec < 0)
1325 return -EINVAL;
1326
1327 if (t.tv_sec == 0 && t.tv_nsec <= 2000000L &&
1328 current->policy != SCHED_OTHER) {
1329
1330
1331
1332
1333 udelay((t.tv_nsec + 999) / 1000);
1334 return 0;
1335 }
1336
1337 expire = timespectojiffies(&t) + (t.tv_sec || t.tv_nsec) + jiffies;
1338 current->timeout = expire;
1339 current->state = TASK_INTERRUPTIBLE;
1340 schedule();
1341
1342 if (expire > jiffies) {
1343 if (rmtp) {
1344 jiffiestotimespec(expire - jiffies -
1345 (expire > jiffies + 1), &t);
1346 memcpy_tofs(rmtp, &t, sizeof(struct timespec));
1347 }
1348 return -EINTR;
1349 }
1350
1351 return 0;
1352 }
1353
1354 static void show_task(int nr,struct task_struct * p)
1355 {
1356 unsigned long free;
1357 static const char * stat_nam[] = { "R", "S", "D", "Z", "T", "W" };
1358
1359 printk("%-8s %3d ", p->comm, (p == current) ? -nr : nr);
1360 if (((unsigned) p->state) < sizeof(stat_nam)/sizeof(char *))
1361 printk(stat_nam[p->state]);
1362 else
1363 printk(" ");
1364 #if ((~0UL) == 0xffffffff)
1365 if (p == current)
1366 printk(" current ");
1367 else
1368 printk(" %08lX ", thread_saved_pc(&p->tss));
1369 #else
1370 if (p == current)
1371 printk(" current task ");
1372 else
1373 printk(" %016lx ", thread_saved_pc(&p->tss));
1374 #endif
1375 for (free = 1; free < PAGE_SIZE/sizeof(long) ; free++) {
1376 if (((unsigned long *)p->kernel_stack_page)[free])
1377 break;
1378 }
1379 printk("%5lu %5d %6d ", free*sizeof(long), p->pid, p->p_pptr->pid);
1380 if (p->p_cptr)
1381 printk("%5d ", p->p_cptr->pid);
1382 else
1383 printk(" ");
1384 if (p->p_ysptr)
1385 printk("%7d", p->p_ysptr->pid);
1386 else
1387 printk(" ");
1388 if (p->p_osptr)
1389 printk(" %5d\n", p->p_osptr->pid);
1390 else
1391 printk("\n");
1392 }
1393
1394 void show_state(void)
1395 {
1396 int i;
1397
1398 #if ((~0UL) == 0xffffffff)
1399 printk("\n"
1400 " free sibling\n");
1401 printk(" task PC stack pid father child younger older\n");
1402 #else
1403 printk("\n"
1404 " free sibling\n");
1405 printk(" task PC stack pid father child younger older\n");
1406 #endif
1407 for (i=0 ; i<NR_TASKS ; i++)
1408 if (task[i])
1409 show_task(i,task[i]);
1410 }
1411
1412 void sched_init(void)
1413 {
1414
1415
1416
1417
1418 int cpu=smp_processor_id();
1419 #ifndef __SMP__
1420 current_set[cpu]=&init_task;
1421 #else
1422 init_task.processor=cpu;
1423 for(cpu = 0; cpu < NR_CPUS; cpu++)
1424 current_set[cpu] = &init_task;
1425 #endif
1426 init_bh(TIMER_BH, timer_bh);
1427 init_bh(TQUEUE_BH, tqueue_bh);
1428 init_bh(IMMEDIATE_BH, immediate_bh);
1429 }