This source file includes following definitions.
- add_to_runqueue
- del_from_runqueue
- move_last_runqueue
- wake_up_process
- process_timeout
- goodness
- schedule
- sys_pause
- wake_up
- wake_up_interruptible
- __down
- __sleep_on
- interruptible_sleep_on
- sleep_on
- add_timer
- del_timer
- run_timer_list
- run_old_timers
- tqueue_bh
- immediate_bh
- count_active_tasks
- calc_load
- second_overflow
- update_wall_time_one_tick
- update_wall_time
- do_process_times
- do_it_virt
- do_it_prof
- update_one_process
- update_process_times
- timer_bh
- do_timer
- sys_alarm
- sys_getpid
- sys_getppid
- sys_getuid
- sys_geteuid
- sys_getgid
- sys_getegid
- sys_nice
- find_process_by_pid
- setscheduler
- sys_sched_setscheduler
- sys_sched_setparam
- sys_sched_getscheduler
- sys_sched_getparam
- sys_sched_yield
- sys_sched_get_priority_max
- sys_sched_get_priority_min
- sys_sched_rr_get_interval
- timespectojiffies
- jiffiestotimespec
- sys_nanosleep
- show_task
- show_state
- sched_init
1
2
3
4
5
6
7
8
9
10
11
12
13
14 #include <linux/signal.h>
15 #include <linux/sched.h>
16 #include <linux/timer.h>
17 #include <linux/kernel.h>
18 #include <linux/kernel_stat.h>
19 #include <linux/fdreg.h>
20 #include <linux/errno.h>
21 #include <linux/time.h>
22 #include <linux/ptrace.h>
23 #include <linux/delay.h>
24 #include <linux/interrupt.h>
25 #include <linux/tqueue.h>
26 #include <linux/resource.h>
27 #include <linux/mm.h>
28 #include <linux/smp.h>
29
30 #include <asm/system.h>
31 #include <asm/io.h>
32 #include <asm/segment.h>
33 #include <asm/pgtable.h>
34 #include <asm/mmu_context.h>
35
36 #include <linux/timex.h>
37
38
39
40
41
42 int securelevel = 0;
43
44 long tick = 1000000 / HZ;
45 volatile struct timeval xtime;
46 int tickadj = 500/HZ;
47
48 DECLARE_TASK_QUEUE(tq_timer);
49 DECLARE_TASK_QUEUE(tq_immediate);
50 DECLARE_TASK_QUEUE(tq_scheduler);
51
52
53
54
55 int time_state = TIME_BAD;
56 int time_status = STA_UNSYNC | STA_PLL;
57 long time_offset = 0;
58 long time_constant = 2;
59 long time_tolerance = MAXFREQ;
60 long time_precision = 1;
61 long time_maxerror = 0x70000000;
62 long time_esterror = 0x70000000;
63 long time_phase = 0;
64 long time_freq = 0;
65 long time_adj = 0;
66 long time_reftime = 0;
67
68 long time_adjust = 0;
69 long time_adjust_step = 0;
70
71 int need_resched = 0;
72 unsigned long event = 0;
73
74 extern int _setitimer(int, struct itimerval *, struct itimerval *);
75 unsigned int * prof_buffer = NULL;
76 unsigned long prof_len = 0;
77 unsigned long prof_shift = 0;
78
79 #define _S(nr) (1<<((nr)-1))
80
81 extern void mem_use(void);
82
83 static unsigned long init_kernel_stack[1024] = { STACK_MAGIC, };
84 unsigned long init_user_stack[1024] = { STACK_MAGIC, };
85 static struct vm_area_struct init_mmap = INIT_MMAP;
86 static struct fs_struct init_fs = INIT_FS;
87 static struct files_struct init_files = INIT_FILES;
88 static struct signal_struct init_signals = INIT_SIGNALS;
89
90 struct mm_struct init_mm = INIT_MM;
91 struct task_struct init_task = INIT_TASK;
92
93 unsigned long volatile jiffies=0;
94
95 struct task_struct *current_set[NR_CPUS];
96 struct task_struct *last_task_used_math = NULL;
97
98 struct task_struct * task[NR_TASKS] = {&init_task, };
99
100 struct kernel_stat kstat = { 0 };
101
102 static inline void add_to_runqueue(struct task_struct * p)
103 {
104 #ifdef __SMP__
105 int cpu=smp_processor_id();
106 #endif
107 #if 1
108 if (p->next_run || p->prev_run) {
109 printk("task already on run-queue\n");
110 return;
111 }
112 #endif
113 if (p->counter > current->counter + 3)
114 need_resched = 1;
115 nr_running++;
116 (p->prev_run = init_task.prev_run)->next_run = p;
117 p->next_run = &init_task;
118 init_task.prev_run = p;
119 #ifdef __SMP__
120
121 while(set_bit(31,&smp_process_available));
122 #if 0
123 {
124 while(test_bit(31,&smp_process_available))
125 {
126 if(clear_bit(cpu,&smp_invalidate_needed))
127 {
128 local_invalidate();
129 set_bit(cpu,&cpu_callin_map[0]);
130 }
131 }
132 }
133 #endif
134 smp_process_available++;
135 clear_bit(31,&smp_process_available);
136 if ((0!=p->pid) && smp_threads_ready)
137 {
138 int i;
139 for (i=0;i<smp_num_cpus;i++)
140 {
141 if (0==current_set[cpu_logical_map[i]]->pid)
142 {
143 smp_message_pass(cpu_logical_map[i], MSG_RESCHEDULE, 0L, 0);
144 break;
145 }
146 }
147 }
148 #endif
149 }
150
151 static inline void del_from_runqueue(struct task_struct * p)
152 {
153 struct task_struct *next = p->next_run;
154 struct task_struct *prev = p->prev_run;
155
156 #if 1
157 if (!next || !prev) {
158 printk("task not on run-queue\n");
159 return;
160 }
161 #endif
162 if (p == &init_task) {
163 static int nr = 0;
164 if (nr < 5) {
165 nr++;
166 printk("idle task may not sleep\n");
167 }
168 return;
169 }
170 nr_running--;
171 next->prev_run = prev;
172 prev->next_run = next;
173 p->next_run = NULL;
174 p->prev_run = NULL;
175 }
176
177 static inline void move_last_runqueue(struct task_struct * p)
178 {
179 struct task_struct *next = p->next_run;
180 struct task_struct *prev = p->prev_run;
181
182 next->prev_run = prev;
183 prev->next_run = next;
184 (p->prev_run = init_task.prev_run)->next_run = p;
185 p->next_run = &init_task;
186 init_task.prev_run = p;
187 }
188
189
190
191
192
193
194
195
196
197 inline void wake_up_process(struct task_struct * p)
198 {
199 unsigned long flags;
200
201 save_flags(flags);
202 cli();
203 p->state = TASK_RUNNING;
204 if (!p->next_run)
205 add_to_runqueue(p);
206 restore_flags(flags);
207 }
208
209 static void process_timeout(unsigned long __data)
210 {
211 struct task_struct * p = (struct task_struct *) __data;
212
213 p->timeout = 0;
214 wake_up_process(p);
215 }
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230 static inline int goodness(struct task_struct * p, struct task_struct * prev, int this_cpu)
231 {
232 int weight;
233
234 #ifdef __SMP__
235
236 if (p->processor != NO_PROC_ID)
237 return -1000;
238 #endif
239
240
241
242
243
244
245 if (p->policy != SCHED_OTHER)
246 return 1000 + p->rt_priority;
247
248
249
250
251
252
253
254
255 weight = p->counter;
256 if (weight) {
257
258 #ifdef __SMP__
259
260
261 if (p->last_processor == this_cpu)
262 weight += PROC_CHANGE_PENALTY;
263 #endif
264
265
266 if (p == prev)
267 weight += 1;
268 }
269
270 return weight;
271 }
272
273
274
275
276
277
278
279
280
281
282
283 asmlinkage void schedule(void)
284 {
285 int c;
286 struct task_struct * p;
287 struct task_struct * prev, * next;
288 unsigned long timeout = 0;
289 int this_cpu=smp_processor_id();
290
291
292
293 if (intr_count) {
294 printk("Aiee: scheduling in interrupt\n");
295 return;
296 }
297 if (bh_active & bh_mask) {
298 intr_count = 1;
299 do_bottom_half();
300 intr_count = 0;
301 }
302 run_task_queue(&tq_scheduler);
303
304 need_resched = 0;
305 prev = current;
306 cli();
307
308 if (!prev->counter && prev->policy == SCHED_RR) {
309 prev->counter = prev->priority;
310 move_last_runqueue(prev);
311 }
312 switch (prev->state) {
313 case TASK_INTERRUPTIBLE:
314 if (prev->signal & ~prev->blocked)
315 goto makerunnable;
316 timeout = prev->timeout;
317 if (timeout && (timeout <= jiffies)) {
318 prev->timeout = 0;
319 timeout = 0;
320 makerunnable:
321 prev->state = TASK_RUNNING;
322 break;
323 }
324 default:
325 del_from_runqueue(prev);
326 case TASK_RUNNING:
327 }
328 p = init_task.next_run;
329 sti();
330
331 #ifdef __SMP__
332
333
334
335 prev->processor = NO_PROC_ID;
336 #endif
337
338
339
340
341
342
343
344 c = -1000;
345 next = &init_task;
346 while (p != &init_task) {
347 int weight = goodness(p, prev, this_cpu);
348 if (weight > c)
349 c = weight, next = p;
350 p = p->next_run;
351 }
352
353
354 if (!c) {
355 for_each_task(p)
356 p->counter = (p->counter >> 1) + p->priority;
357 }
358 #ifdef __SMP__
359
360
361
362
363 if(!prev->pid && !next->pid)
364 next=prev;
365
366
367
368
369 next->processor = this_cpu;
370 next->last_processor = this_cpu;
371
372 #endif
373 #ifdef __SMP_PROF__
374
375 if (0==next->pid)
376 set_bit(this_cpu,&smp_idle_map);
377 else
378 clear_bit(this_cpu,&smp_idle_map);
379 #endif
380 if (prev != next) {
381 struct timer_list timer;
382
383 kstat.context_swtch++;
384 if (timeout) {
385 init_timer(&timer);
386 timer.expires = timeout;
387 timer.data = (unsigned long) prev;
388 timer.function = process_timeout;
389 add_timer(&timer);
390 }
391 get_mmu_context(next);
392 switch_to(prev,next);
393 if (timeout)
394 del_timer(&timer);
395 }
396 }
397
398 #ifndef __alpha__
399
400
401
402
403
404 asmlinkage int sys_pause(void)
405 {
406 current->state = TASK_INTERRUPTIBLE;
407 schedule();
408 return -ERESTARTNOHAND;
409 }
410
411 #endif
412
413
414
415
416
417
418
419
420
421 void wake_up(struct wait_queue **q)
422 {
423 struct wait_queue *tmp;
424 struct task_struct * p;
425
426 if (!q || !(tmp = *q))
427 return;
428 do {
429 if ((p = tmp->task) != NULL) {
430 if ((p->state == TASK_UNINTERRUPTIBLE) ||
431 (p->state == TASK_INTERRUPTIBLE))
432 wake_up_process(p);
433 }
434 if (!tmp->next) {
435 printk("wait_queue is bad (eip = %p)\n",
436 __builtin_return_address(0));
437 printk(" q = %p\n",q);
438 printk(" *q = %p\n",*q);
439 printk(" tmp = %p\n",tmp);
440 break;
441 }
442 tmp = tmp->next;
443 } while (tmp != *q);
444 }
445
446 void wake_up_interruptible(struct wait_queue **q)
447 {
448 struct wait_queue *tmp;
449 struct task_struct * p;
450
451 if (!q || !(tmp = *q))
452 return;
453 do {
454 if ((p = tmp->task) != NULL) {
455 if (p->state == TASK_INTERRUPTIBLE)
456 wake_up_process(p);
457 }
458 if (!tmp->next) {
459 printk("wait_queue is bad (eip = %p)\n",
460 __builtin_return_address(0));
461 printk(" q = %p\n",q);
462 printk(" *q = %p\n",*q);
463 printk(" tmp = %p\n",tmp);
464 break;
465 }
466 tmp = tmp->next;
467 } while (tmp != *q);
468 }
469
470 void __down(struct semaphore * sem)
471 {
472 struct wait_queue wait = { current, NULL };
473 add_wait_queue(&sem->wait, &wait);
474 current->state = TASK_UNINTERRUPTIBLE;
475 while (sem->count <= 0) {
476 schedule();
477 current->state = TASK_UNINTERRUPTIBLE;
478 }
479 current->state = TASK_RUNNING;
480 remove_wait_queue(&sem->wait, &wait);
481 }
482
483 static inline void __sleep_on(struct wait_queue **p, int state)
484 {
485 unsigned long flags;
486 struct wait_queue wait = { current, NULL };
487
488 if (!p)
489 return;
490 if (current == task[0])
491 panic("task[0] trying to sleep");
492 current->state = state;
493 add_wait_queue(p, &wait);
494 save_flags(flags);
495 sti();
496 schedule();
497 remove_wait_queue(p, &wait);
498 restore_flags(flags);
499 }
500
501 void interruptible_sleep_on(struct wait_queue **p)
502 {
503 __sleep_on(p,TASK_INTERRUPTIBLE);
504 }
505
506 void sleep_on(struct wait_queue **p)
507 {
508 __sleep_on(p,TASK_UNINTERRUPTIBLE);
509 }
510
511
512
513
514
515 static struct timer_list timer_head = { &timer_head, &timer_head, ~0, 0, NULL };
516 #define SLOW_BUT_DEBUGGING_TIMERS 0
517
518 void add_timer(struct timer_list * timer)
519 {
520 unsigned long flags;
521 struct timer_list *p;
522
523 #if SLOW_BUT_DEBUGGING_TIMERS
524 if (timer->next || timer->prev) {
525 printk("add_timer() called with non-zero list from %p\n",
526 __builtin_return_address(0));
527 return;
528 }
529 #endif
530 p = &timer_head;
531 save_flags(flags);
532 cli();
533 do {
534 p = p->next;
535 } while (timer->expires > p->expires);
536 timer->next = p;
537 timer->prev = p->prev;
538 p->prev = timer;
539 timer->prev->next = timer;
540 restore_flags(flags);
541 }
542
543 int del_timer(struct timer_list * timer)
544 {
545 unsigned long flags;
546 #if SLOW_BUT_DEBUGGING_TIMERS
547 struct timer_list * p;
548
549 p = &timer_head;
550 save_flags(flags);
551 cli();
552 while ((p = p->next) != &timer_head) {
553 if (p == timer) {
554 timer->next->prev = timer->prev;
555 timer->prev->next = timer->next;
556 timer->next = timer->prev = NULL;
557 restore_flags(flags);
558 return 1;
559 }
560 }
561 if (timer->next || timer->prev)
562 printk("del_timer() called from %p with timer not initialized\n",
563 __builtin_return_address(0));
564 restore_flags(flags);
565 return 0;
566 #else
567 struct timer_list * next;
568 int ret = 0;
569 save_flags(flags);
570 cli();
571 if ((next = timer->next) != NULL) {
572 (next->prev = timer->prev)->next = next;
573 timer->next = timer->prev = NULL;
574 ret = 1;
575 }
576 restore_flags(flags);
577 return ret;
578 #endif
579 }
580
581 static inline void run_timer_list(void)
582 {
583 struct timer_list * timer;
584
585 while ((timer = timer_head.next) != &timer_head && timer->expires <= jiffies) {
586 void (*fn)(unsigned long) = timer->function;
587 unsigned long data = timer->data;
588 timer->next->prev = timer->prev;
589 timer->prev->next = timer->next;
590 timer->next = timer->prev = NULL;
591 sti();
592 fn(data);
593 cli();
594 }
595 }
596
597 static inline void run_old_timers(void)
598 {
599 struct timer_struct *tp;
600 unsigned long mask;
601
602 for (mask = 1, tp = timer_table+0 ; mask ; tp++,mask += mask) {
603 if (mask > timer_active)
604 break;
605 if (!(mask & timer_active))
606 continue;
607 if (tp->expires > jiffies)
608 continue;
609 timer_active &= ~mask;
610 tp->fn();
611 sti();
612 }
613 }
614
615 void tqueue_bh(void)
616 {
617 run_task_queue(&tq_timer);
618 }
619
620 void immediate_bh(void)
621 {
622 run_task_queue(&tq_immediate);
623 }
624
625 unsigned long timer_active = 0;
626 struct timer_struct timer_table[32];
627
628
629
630
631
632
633
634 unsigned long avenrun[3] = { 0,0,0 };
635
636
637
638
639 static unsigned long count_active_tasks(void)
640 {
641 struct task_struct **p;
642 unsigned long nr = 0;
643
644 for(p = &LAST_TASK; p > &FIRST_TASK; --p)
645 if (*p && ((*p)->state == TASK_RUNNING ||
646 (*p)->state == TASK_UNINTERRUPTIBLE ||
647 (*p)->state == TASK_SWAPPING))
648 nr += FIXED_1;
649 #ifdef __SMP__
650 nr-=(smp_num_cpus-1)*FIXED_1;
651 #endif
652 return nr;
653 }
654
655 static inline void calc_load(unsigned long ticks)
656 {
657 unsigned long active_tasks;
658 static int count = LOAD_FREQ;
659
660 count -= ticks;
661 if (count < 0) {
662 count += LOAD_FREQ;
663 active_tasks = count_active_tasks();
664 CALC_LOAD(avenrun[0], EXP_1, active_tasks);
665 CALC_LOAD(avenrun[1], EXP_5, active_tasks);
666 CALC_LOAD(avenrun[2], EXP_15, active_tasks);
667 }
668 }
669
670
671
672
673
674
675
676
677
678
679 static void second_overflow(void)
680 {
681 long ltemp;
682
683
684 time_maxerror = (0x70000000-time_maxerror <
685 time_tolerance >> SHIFT_USEC) ?
686 0x70000000 : (time_maxerror + (time_tolerance >> SHIFT_USEC));
687
688
689
690
691
692
693
694
695
696
697 switch (time_state) {
698
699 case TIME_OK:
700 if (time_status & STA_INS)
701 time_state = TIME_INS;
702 else if (time_status & STA_DEL)
703 time_state = TIME_DEL;
704 break;
705
706 case TIME_INS:
707 if (xtime.tv_sec % 86400 == 0) {
708 xtime.tv_sec--;
709 time_state = TIME_OOP;
710 printk("Clock: inserting leap second 23:59:60 UTC\n");
711 }
712 break;
713
714 case TIME_DEL:
715 if ((xtime.tv_sec + 1) % 86400 == 0) {
716 xtime.tv_sec++;
717 time_state = TIME_WAIT;
718 printk("Clock: deleting leap second 23:59:59 UTC\n");
719 }
720 break;
721
722 case TIME_OOP:
723
724 time_state = TIME_WAIT;
725 break;
726
727 case TIME_WAIT:
728 if (!(time_status & (STA_INS | STA_DEL)))
729 time_state = TIME_OK;
730 }
731
732
733
734
735
736
737
738
739
740
741 if (time_offset < 0) {
742 ltemp = -time_offset;
743 if (!(time_status & STA_FLL))
744 ltemp >>= SHIFT_KG + time_constant;
745 if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE)
746 ltemp = (MAXPHASE / MINSEC) <<
747 SHIFT_UPDATE;
748 time_offset += ltemp;
749 time_adj = -ltemp << (SHIFT_SCALE - SHIFT_HZ -
750 SHIFT_UPDATE);
751 } else {
752 ltemp = time_offset;
753 if (!(time_status & STA_FLL))
754 ltemp >>= SHIFT_KG + time_constant;
755 if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE)
756 ltemp = (MAXPHASE / MINSEC) <<
757 SHIFT_UPDATE;
758 time_offset -= ltemp;
759 time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ -
760 SHIFT_UPDATE);
761 }
762
763
764
765
766
767
768
769
770 pps_valid++;
771 if (pps_valid == PPS_VALID) {
772 pps_jitter = MAXTIME;
773 pps_stabil = MAXFREQ;
774 time_status &= ~(STA_PPSSIGNAL | STA_PPSJITTER |
775 STA_PPSWANDER | STA_PPSERROR);
776 }
777 ltemp = time_freq + pps_freq;
778 if (ltemp < 0)
779 time_adj -= -ltemp >>
780 (SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE);
781 else
782 time_adj += ltemp >>
783 (SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE);
784
785 #if HZ == 100
786
787 if (time_adj < 0)
788 time_adj -= -time_adj >> 2;
789 else
790 time_adj += time_adj >> 2;
791 #endif
792 }
793
794 static void update_wall_time_one_tick(void)
795 {
796
797
798
799
800 time_phase += time_adj;
801 if (time_phase <= -FINEUSEC) {
802 long ltemp = -time_phase >> SHIFT_SCALE;
803 time_phase += ltemp << SHIFT_SCALE;
804 xtime.tv_usec += tick + time_adjust_step - ltemp;
805 }
806 else if (time_phase >= FINEUSEC) {
807 long ltemp = time_phase >> SHIFT_SCALE;
808 time_phase -= ltemp << SHIFT_SCALE;
809 xtime.tv_usec += tick + time_adjust_step + ltemp;
810 } else
811 xtime.tv_usec += tick + time_adjust_step;
812
813 if (time_adjust) {
814
815
816
817
818
819
820
821
822
823 if (time_adjust > tickadj)
824 time_adjust_step = tickadj;
825 else if (time_adjust < -tickadj)
826 time_adjust_step = -tickadj;
827 else
828 time_adjust_step = time_adjust;
829
830
831 time_adjust -= time_adjust_step;
832 }
833 else
834 time_adjust_step = 0;
835 }
836
837
838
839
840
841
842
843
844 static void update_wall_time(unsigned long ticks)
845 {
846 do {
847 ticks--;
848 update_wall_time_one_tick();
849 } while (ticks);
850
851 if (xtime.tv_usec >= 1000000) {
852 xtime.tv_usec -= 1000000;
853 xtime.tv_sec++;
854 second_overflow();
855 }
856 }
857
858 static inline void do_process_times(struct task_struct *p,
859 unsigned long user, unsigned long system)
860 {
861 long psecs;
862
863 p->utime += user;
864 if (p->priority < DEF_PRIORITY)
865 kstat.cpu_nice += user;
866 else
867 kstat.cpu_user += user;
868 kstat.cpu_system += system;
869 p->stime += system;
870
871 psecs = (p->stime + p->utime) / HZ;
872 if (psecs > p->rlim[RLIMIT_CPU].rlim_cur) {
873
874 if (psecs * HZ == p->stime + p->utime)
875 send_sig(SIGXCPU, p, 1);
876
877 if (psecs > p->rlim[RLIMIT_CPU].rlim_max)
878 send_sig(SIGKILL, p, 1);
879 }
880 }
881
882 static inline void do_it_virt(struct task_struct * p, unsigned long ticks)
883 {
884 unsigned long it_virt = p->it_virt_value;
885
886 if (it_virt) {
887 if (it_virt <= ticks) {
888 it_virt = ticks + p->it_virt_incr;
889 send_sig(SIGVTALRM, p, 1);
890 }
891 p->it_virt_value = it_virt - ticks;
892 }
893 }
894
895 static inline void do_it_prof(struct task_struct * p, unsigned long ticks)
896 {
897 unsigned long it_prof = p->it_prof_value;
898
899 if (it_prof) {
900 if (it_prof <= ticks) {
901 it_prof = ticks + p->it_prof_incr;
902 send_sig(SIGPROF, p, 1);
903 }
904 p->it_prof_value = it_prof - ticks;
905 }
906 }
907
908 static __inline__ void update_one_process(struct task_struct *p,
909 unsigned long ticks, unsigned long user, unsigned long system)
910 {
911 do_process_times(p, user, system);
912 do_it_virt(p, user);
913 do_it_prof(p, ticks);
914 }
915
916 static void update_process_times(unsigned long ticks, unsigned long system)
917 {
918 #ifndef __SMP__
919 struct task_struct * p = current;
920 if (p->pid) {
921 p->counter -= ticks;
922 if (p->counter < 0) {
923 p->counter = 0;
924 need_resched = 1;
925 }
926
927 update_one_process(p, ticks, ticks-system, system);
928 }
929 #else
930 int cpu,j;
931 cpu = smp_processor_id();
932 for (j=0;j<smp_num_cpus;j++)
933 {
934 int i = cpu_logical_map[j];
935 struct task_struct *p;
936
937 #ifdef __SMP_PROF__
938 if (test_bit(i,&smp_idle_map))
939 smp_idle_count[i]++;
940 #endif
941 p = current_set[i];
942
943
944
945 if (p->pid) {
946
947 unsigned long utime = ticks;
948 unsigned long stime = 0;
949 if (cpu == i) {
950 utime = ticks-system;
951 stime = system;
952 } else if (smp_proc_in_lock[i]) {
953 utime = 0;
954 stime = ticks;
955 }
956 update_one_process(p, ticks, utime, stime);
957
958 p->counter -= ticks;
959 if (p->counter >= 0)
960 continue;
961 p->counter = 0;
962 } else {
963
964
965
966
967 if (!(0x7fffffff & smp_process_available))
968 continue;
969 }
970
971 if (i==cpu)
972 need_resched = 1;
973 else
974 smp_message_pass(i, MSG_RESCHEDULE, 0L, 0);
975 }
976 #endif
977 }
978
979 static unsigned long lost_ticks = 0;
980 static unsigned long lost_ticks_system = 0;
981
982 static void timer_bh(void)
983 {
984 unsigned long ticks, system;
985
986 run_old_timers();
987
988 cli();
989 run_timer_list();
990 ticks = lost_ticks;
991 lost_ticks = 0;
992 system = lost_ticks_system;
993 lost_ticks_system = 0;
994 sti();
995
996 if (ticks) {
997 calc_load(ticks);
998 update_wall_time(ticks);
999 update_process_times(ticks, system);
1000 }
1001 }
1002
1003
1004
1005
1006
1007
1008 #if HZ > 100
1009 #define should_run_timers(x) ((x) >= HZ/100)
1010 #else
1011 #define should_run_timers(x) (1)
1012 #endif
1013
1014 void do_timer(struct pt_regs * regs)
1015 {
1016 (*(unsigned long *)&jiffies)++;
1017 lost_ticks++;
1018 if (should_run_timers(lost_ticks))
1019 mark_bh(TIMER_BH);
1020 if (!user_mode(regs)) {
1021 lost_ticks_system++;
1022 if (prof_buffer && current->pid) {
1023 extern int _stext;
1024 unsigned long ip = instruction_pointer(regs);
1025 ip -= (unsigned long) &_stext;
1026 ip >>= prof_shift;
1027 if (ip < prof_len)
1028 prof_buffer[ip]++;
1029 }
1030 }
1031 if (tq_timer != &tq_last)
1032 mark_bh(TQUEUE_BH);
1033 }
1034
1035 #ifndef __alpha__
1036
1037
1038
1039
1040
1041 asmlinkage unsigned int sys_alarm(unsigned int seconds)
1042 {
1043 struct itimerval it_new, it_old;
1044 unsigned int oldalarm;
1045
1046 it_new.it_interval.tv_sec = it_new.it_interval.tv_usec = 0;
1047 it_new.it_value.tv_sec = seconds;
1048 it_new.it_value.tv_usec = 0;
1049 _setitimer(ITIMER_REAL, &it_new, &it_old);
1050 oldalarm = it_old.it_value.tv_sec;
1051
1052
1053 if (it_old.it_value.tv_usec)
1054 oldalarm++;
1055 return oldalarm;
1056 }
1057
1058
1059
1060
1061
1062 asmlinkage int sys_getpid(void)
1063 {
1064 return current->pid;
1065 }
1066
1067 asmlinkage int sys_getppid(void)
1068 {
1069 return current->p_opptr->pid;
1070 }
1071
1072 asmlinkage int sys_getuid(void)
1073 {
1074 return current->uid;
1075 }
1076
1077 asmlinkage int sys_geteuid(void)
1078 {
1079 return current->euid;
1080 }
1081
1082 asmlinkage int sys_getgid(void)
1083 {
1084 return current->gid;
1085 }
1086
1087 asmlinkage int sys_getegid(void)
1088 {
1089 return current->egid;
1090 }
1091
1092
1093
1094
1095
1096
1097 asmlinkage int sys_nice(int increment)
1098 {
1099 unsigned long newprio;
1100 int increase = 0;
1101
1102 newprio = increment;
1103 if (increment < 0) {
1104 if (!suser())
1105 return -EPERM;
1106 newprio = -increment;
1107 increase = 1;
1108 }
1109 if (newprio > 40)
1110 newprio = 40;
1111
1112
1113
1114
1115
1116
1117
1118 newprio = (newprio * DEF_PRIORITY + 10) / 20;
1119 increment = newprio;
1120 if (increase)
1121 increment = -increment;
1122 newprio = current->priority - increment;
1123 if (newprio < 1)
1124 newprio = 1;
1125 if (newprio > DEF_PRIORITY*2)
1126 newprio = DEF_PRIORITY*2;
1127 current->priority = newprio;
1128 return 0;
1129 }
1130
1131 #endif
1132
1133 static struct task_struct *find_process_by_pid(pid_t pid) {
1134 struct task_struct *p, *q;
1135
1136 if (pid == 0)
1137 p = current;
1138 else {
1139 p = 0;
1140 for_each_task(q) {
1141 if (q && q->pid == pid) {
1142 p = q;
1143 break;
1144 }
1145 }
1146 }
1147 return p;
1148 }
1149
1150 static int setscheduler(pid_t pid, int policy,
1151 struct sched_param *param)
1152 {
1153 int error;
1154 struct sched_param lp;
1155 struct task_struct *p;
1156
1157 if (!param || pid < 0)
1158 return -EINVAL;
1159
1160 error = verify_area(VERIFY_READ, param, sizeof(struct sched_param));
1161 if (error)
1162 return error;
1163 memcpy_fromfs(&lp, param, sizeof(struct sched_param));
1164
1165 p = find_process_by_pid(pid);
1166 if (!p)
1167 return -ESRCH;
1168
1169 if (policy < 0)
1170 policy = p->policy;
1171 else if (policy != SCHED_FIFO && policy != SCHED_RR &&
1172 policy != SCHED_OTHER)
1173 return -EINVAL;
1174
1175
1176
1177
1178
1179 if (lp.sched_priority < 0 || lp.sched_priority > 99)
1180 return -EINVAL;
1181 if ((policy == SCHED_OTHER) != (lp.sched_priority == 0))
1182 return -EINVAL;
1183
1184 if ((policy == SCHED_FIFO || policy == SCHED_RR) && !suser())
1185 return -EPERM;
1186 if ((current->euid != p->euid) && (current->euid != p->uid) &&
1187 !suser())
1188 return -EPERM;
1189
1190 p->policy = policy;
1191 p->rt_priority = lp.sched_priority;
1192 if (p->next_run)
1193 move_last_runqueue(p);
1194 schedule();
1195
1196 return 0;
1197 }
1198
1199 asmlinkage int sys_sched_setscheduler(pid_t pid, int policy,
1200 struct sched_param *param)
1201 {
1202 return setscheduler(pid, policy, param);
1203 }
1204
1205 asmlinkage int sys_sched_setparam(pid_t pid, struct sched_param *param)
1206 {
1207 return setscheduler(pid, -1, param);
1208 }
1209
1210 asmlinkage int sys_sched_getscheduler(pid_t pid)
1211 {
1212 struct task_struct *p;
1213
1214 if (pid < 0)
1215 return -EINVAL;
1216
1217 p = find_process_by_pid(pid);
1218 if (!p)
1219 return -ESRCH;
1220
1221 return p->policy;
1222 }
1223
1224 asmlinkage int sys_sched_getparam(pid_t pid, struct sched_param *param)
1225 {
1226 int error;
1227 struct task_struct *p;
1228 struct sched_param lp;
1229
1230 if (!param || pid < 0)
1231 return -EINVAL;
1232
1233 error = verify_area(VERIFY_WRITE, param, sizeof(struct sched_param));
1234 if (error)
1235 return error;
1236
1237 p = find_process_by_pid(pid);
1238 if (!p)
1239 return -ESRCH;
1240
1241 lp.sched_priority = p->rt_priority;
1242 memcpy_tofs(param, &lp, sizeof(struct sched_param));
1243
1244 return 0;
1245 }
1246
1247 asmlinkage int sys_sched_yield(void)
1248 {
1249 move_last_runqueue(current);
1250
1251 return 0;
1252 }
1253
1254 asmlinkage int sys_sched_get_priority_max(int policy)
1255 {
1256 switch (policy) {
1257 case SCHED_FIFO:
1258 case SCHED_RR:
1259 return 99;
1260 case SCHED_OTHER:
1261 return 0;
1262 }
1263
1264 return -EINVAL;
1265 }
1266
1267 asmlinkage int sys_sched_get_priority_min(int policy)
1268 {
1269 switch (policy) {
1270 case SCHED_FIFO:
1271 case SCHED_RR:
1272 return 1;
1273 case SCHED_OTHER:
1274 return 0;
1275 }
1276
1277 return -EINVAL;
1278 }
1279
1280 asmlinkage int sys_sched_rr_get_interval(pid_t pid, struct timespec *interval)
1281 {
1282 int error;
1283 struct timespec t;
1284
1285 error = verify_area(VERIFY_WRITE, interval, sizeof(struct timespec));
1286 if (error)
1287 return error;
1288
1289 t.tv_sec = 0;
1290 t.tv_nsec = 0;
1291 return -ENOSYS;
1292 memcpy_tofs(interval, &t, sizeof(struct timespec));
1293
1294 return 0;
1295 }
1296
1297
1298
1299
1300
1301 static unsigned long timespectojiffies(struct timespec *value)
1302 {
1303 unsigned long sec = (unsigned) value->tv_sec;
1304 long nsec = value->tv_nsec;
1305
1306 if (sec > (LONG_MAX / HZ))
1307 return LONG_MAX;
1308 nsec += 1000000000L / HZ - 1;
1309 nsec /= 1000000000L / HZ;
1310 return HZ * sec + nsec;
1311 }
1312
1313 static void jiffiestotimespec(unsigned long jiffies, struct timespec *value)
1314 {
1315 value->tv_nsec = (jiffies % HZ) * (1000000000L / HZ);
1316 value->tv_sec = jiffies / HZ;
1317 return;
1318 }
1319
1320 asmlinkage int sys_nanosleep(struct timespec *rqtp, struct timespec *rmtp)
1321 {
1322 int error;
1323 struct timespec t;
1324 unsigned long expire;
1325
1326 error = verify_area(VERIFY_READ, rqtp, sizeof(struct timespec));
1327 if (error)
1328 return error;
1329 memcpy_fromfs(&t, rqtp, sizeof(struct timespec));
1330 if (rmtp) {
1331 error = verify_area(VERIFY_WRITE, rmtp,
1332 sizeof(struct timespec));
1333 if (error)
1334 return error;
1335 }
1336
1337 if (t.tv_nsec >= 1000000000L || t.tv_nsec < 0 || t.tv_sec < 0)
1338 return -EINVAL;
1339
1340 if (t.tv_sec == 0 && t.tv_nsec <= 2000000L &&
1341 current->policy != SCHED_OTHER) {
1342
1343
1344
1345
1346 udelay((t.tv_nsec + 999) / 1000);
1347 return 0;
1348 }
1349
1350 expire = timespectojiffies(&t) + (t.tv_sec || t.tv_nsec) + jiffies;
1351 current->timeout = expire;
1352 current->state = TASK_INTERRUPTIBLE;
1353 schedule();
1354
1355 if (expire > jiffies) {
1356 if (rmtp) {
1357 jiffiestotimespec(expire - jiffies -
1358 (expire > jiffies + 1), &t);
1359 memcpy_tofs(rmtp, &t, sizeof(struct timespec));
1360 }
1361 return -EINTR;
1362 }
1363
1364 return 0;
1365 }
1366
1367 static void show_task(int nr,struct task_struct * p)
1368 {
1369 unsigned long free;
1370 static const char * stat_nam[] = { "R", "S", "D", "Z", "T", "W" };
1371
1372 printk("%-8s %3d ", p->comm, (p == current) ? -nr : nr);
1373 if (((unsigned) p->state) < sizeof(stat_nam)/sizeof(char *))
1374 printk(stat_nam[p->state]);
1375 else
1376 printk(" ");
1377 #if ((~0UL) == 0xffffffff)
1378 if (p == current)
1379 printk(" current ");
1380 else
1381 printk(" %08lX ", thread_saved_pc(&p->tss));
1382 #else
1383 if (p == current)
1384 printk(" current task ");
1385 else
1386 printk(" %016lx ", thread_saved_pc(&p->tss));
1387 #endif
1388 for (free = 1; free < PAGE_SIZE/sizeof(long) ; free++) {
1389 if (((unsigned long *)p->kernel_stack_page)[free])
1390 break;
1391 }
1392 printk("%5lu %5d %6d ", free*sizeof(long), p->pid, p->p_pptr->pid);
1393 if (p->p_cptr)
1394 printk("%5d ", p->p_cptr->pid);
1395 else
1396 printk(" ");
1397 if (p->p_ysptr)
1398 printk("%7d", p->p_ysptr->pid);
1399 else
1400 printk(" ");
1401 if (p->p_osptr)
1402 printk(" %5d\n", p->p_osptr->pid);
1403 else
1404 printk("\n");
1405 }
1406
1407 void show_state(void)
1408 {
1409 int i;
1410
1411 #if ((~0UL) == 0xffffffff)
1412 printk("\n"
1413 " free sibling\n");
1414 printk(" task PC stack pid father child younger older\n");
1415 #else
1416 printk("\n"
1417 " free sibling\n");
1418 printk(" task PC stack pid father child younger older\n");
1419 #endif
1420 for (i=0 ; i<NR_TASKS ; i++)
1421 if (task[i])
1422 show_task(i,task[i]);
1423 }
1424
1425 void sched_init(void)
1426 {
1427
1428
1429
1430
1431 int cpu=smp_processor_id();
1432 current_set[cpu]=&init_task;
1433 #ifdef __SMP__
1434 init_task.processor=cpu;
1435 #endif
1436 init_bh(TIMER_BH, timer_bh);
1437 init_bh(TQUEUE_BH, tqueue_bh);
1438 init_bh(IMMEDIATE_BH, immediate_bh);
1439 }