This source file includes following definitions.
- add_to_runqueue
- del_from_runqueue
- move_last_runqueue
- wake_up_process
- process_timeout
- goodness
- schedule
- sys_pause
- wake_up
- wake_up_interruptible
- __down
- __sleep_on
- interruptible_sleep_on
- sleep_on
- add_timer
- del_timer
- run_timer_list
- run_old_timers
- tqueue_bh
- immediate_bh
- count_active_tasks
- calc_load
- second_overflow
- update_wall_time_one_tick
- update_wall_time
- do_process_times
- do_it_virt
- do_it_prof
- update_one_process
- update_process_times
- timer_bh
- do_timer
- sys_alarm
- sys_getpid
- sys_getppid
- sys_getuid
- sys_geteuid
- sys_getgid
- sys_getegid
- sys_nice
- find_process_by_pid
- setscheduler
- sys_sched_setscheduler
- sys_sched_setparam
- sys_sched_getscheduler
- sys_sched_getparam
- sys_sched_yield
- sys_sched_get_priority_max
- sys_sched_get_priority_min
- sys_sched_rr_get_interval
- timespectojiffies
- jiffiestotimespec
- sys_nanosleep
- show_task
- show_state
- sched_init
1
2
3
4
5
6
7
8
9
10
11
12
13
14 #include <linux/signal.h>
15 #include <linux/sched.h>
16 #include <linux/timer.h>
17 #include <linux/kernel.h>
18 #include <linux/kernel_stat.h>
19 #include <linux/fdreg.h>
20 #include <linux/errno.h>
21 #include <linux/time.h>
22 #include <linux/ptrace.h>
23 #include <linux/delay.h>
24 #include <linux/interrupt.h>
25 #include <linux/tqueue.h>
26 #include <linux/resource.h>
27 #include <linux/mm.h>
28 #include <linux/smp.h>
29
30 #include <asm/system.h>
31 #include <asm/io.h>
32 #include <asm/segment.h>
33 #include <asm/pgtable.h>
34 #include <asm/mmu_context.h>
35
36 #include <linux/timex.h>
37
38
39
40
41
42 int securelevel = 0;
43
44 long tick = 1000000 / HZ;
45 volatile struct timeval xtime;
46 int tickadj = 500/HZ;
47
48 DECLARE_TASK_QUEUE(tq_timer);
49 DECLARE_TASK_QUEUE(tq_immediate);
50 DECLARE_TASK_QUEUE(tq_scheduler);
51
52
53
54
55 int time_state = TIME_BAD;
56 int time_status = STA_UNSYNC | STA_PLL;
57 long time_offset = 0;
58 long time_constant = 2;
59 long time_tolerance = MAXFREQ;
60 long time_precision = 1;
61 long time_maxerror = 0x70000000;
62 long time_esterror = 0x70000000;
63 long time_phase = 0;
64 long time_freq = 0;
65 long time_adj = 0;
66 long time_reftime = 0;
67
68 long time_adjust = 0;
69 long time_adjust_step = 0;
70
71 int need_resched = 0;
72 unsigned long event = 0;
73
74 extern int _setitimer(int, struct itimerval *, struct itimerval *);
75 unsigned int * prof_buffer = NULL;
76 unsigned long prof_len = 0;
77 unsigned long prof_shift = 0;
78
79 #define _S(nr) (1<<((nr)-1))
80
81 extern void mem_use(void);
82
83 static unsigned long init_kernel_stack[1024] = { STACK_MAGIC, };
84 unsigned long init_user_stack[1024] = { STACK_MAGIC, };
85 static struct vm_area_struct init_mmap = INIT_MMAP;
86 static struct fs_struct init_fs = INIT_FS;
87 static struct files_struct init_files = INIT_FILES;
88 static struct signal_struct init_signals = INIT_SIGNALS;
89
90 struct mm_struct init_mm = INIT_MM;
91 struct task_struct init_task = INIT_TASK;
92
93 unsigned long volatile jiffies=0;
94
95 struct task_struct *current_set[NR_CPUS];
96 struct task_struct *last_task_used_math = NULL;
97
98 struct task_struct * task[NR_TASKS] = {&init_task, };
99
100 struct kernel_stat kstat = { 0 };
101
102 static inline void add_to_runqueue(struct task_struct * p)
103 {
104 #ifdef __SMP__
105 int cpu=smp_processor_id();
106 #endif
107 #if 1
108 if (p->next_run || p->prev_run) {
109 printk("task already on run-queue\n");
110 return;
111 }
112 #endif
113 if (p->counter > current->counter + 3)
114 need_resched = 1;
115 nr_running++;
116 (p->prev_run = init_task.prev_run)->next_run = p;
117 p->next_run = &init_task;
118 init_task.prev_run = p;
119 #ifdef __SMP__
120
121 while(set_bit(31,&smp_process_available));
122 #if 0
123 {
124 while(test_bit(31,&smp_process_available))
125 {
126 if(clear_bit(cpu,&smp_invalidate_needed))
127 {
128 local_invalidate();
129 set_bit(cpu,&cpu_callin_map[0]);
130 }
131 }
132 }
133 #endif
134 smp_process_available++;
135 clear_bit(31,&smp_process_available);
136 if ((0!=p->pid) && smp_threads_ready)
137 {
138 int i;
139 for (i=0;i<smp_num_cpus;i++)
140 {
141 if (0==current_set[cpu_logical_map[i]]->pid)
142 {
143 smp_message_pass(cpu_logical_map[i], MSG_RESCHEDULE, 0L, 0);
144 break;
145 }
146 }
147 }
148 #endif
149 }
150
151 static inline void del_from_runqueue(struct task_struct * p)
152 {
153 struct task_struct *next = p->next_run;
154 struct task_struct *prev = p->prev_run;
155
156 #if 1
157 if (!next || !prev) {
158 printk("task not on run-queue\n");
159 return;
160 }
161 #endif
162 if (p == &init_task) {
163 static int nr = 0;
164 if (nr < 5) {
165 nr++;
166 printk("idle task may not sleep\n");
167 }
168 return;
169 }
170 nr_running--;
171 next->prev_run = prev;
172 prev->next_run = next;
173 p->next_run = NULL;
174 p->prev_run = NULL;
175 }
176
177 static inline void move_last_runqueue(struct task_struct * p)
178 {
179 struct task_struct *next = p->next_run;
180 struct task_struct *prev = p->prev_run;
181
182 next->prev_run = prev;
183 prev->next_run = next;
184 (p->prev_run = init_task.prev_run)->next_run = p;
185 p->next_run = &init_task;
186 init_task.prev_run = p;
187 }
188
189
190
191
192
193
194
195
196
197 inline void wake_up_process(struct task_struct * p)
198 {
199 unsigned long flags;
200
201 save_flags(flags);
202 cli();
203 p->state = TASK_RUNNING;
204 if (!p->next_run)
205 add_to_runqueue(p);
206 restore_flags(flags);
207 }
208
209 static void process_timeout(unsigned long __data)
210 {
211 struct task_struct * p = (struct task_struct *) __data;
212
213 p->timeout = 0;
214 wake_up_process(p);
215 }
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230 static inline int goodness(struct task_struct * p, struct task_struct * prev, int this_cpu)
231 {
232 int weight;
233
234 #ifdef __SMP__
235
236 if (p->processor != NO_PROC_ID)
237 return -1000;
238 #endif
239
240
241
242
243
244
245 if (p->policy != SCHED_OTHER)
246 return 1000 + p->rt_priority;
247
248
249
250
251
252
253
254
255 weight = p->counter;
256 if (weight) {
257
258 #ifdef __SMP__
259
260
261 if (p->last_processor == this_cpu)
262 weight += PROC_CHANGE_PENALTY;
263 #endif
264
265
266 if (p == prev)
267 weight += 1;
268 }
269
270 return weight;
271 }
272
273
274
275
276
277
278
279
280
281
282
283 asmlinkage void schedule(void)
284 {
285 int c;
286 struct task_struct * p;
287 struct task_struct * prev, * next;
288 unsigned long timeout = 0;
289 int this_cpu=smp_processor_id();
290
291
292
293 if (intr_count) {
294 printk("Aiee: scheduling in interrupt\n");
295 return;
296 }
297 if (bh_active & bh_mask) {
298 intr_count = 1;
299 do_bottom_half();
300 intr_count = 0;
301 }
302 run_task_queue(&tq_scheduler);
303
304 need_resched = 0;
305 prev = current;
306 cli();
307
308 if (!prev->counter && prev->policy == SCHED_RR) {
309 prev->counter = prev->priority;
310 move_last_runqueue(prev);
311 }
312 switch (prev->state) {
313 case TASK_INTERRUPTIBLE:
314 if (prev->signal & ~prev->blocked)
315 goto makerunnable;
316 timeout = prev->timeout;
317 if (timeout && (timeout <= jiffies)) {
318 prev->timeout = 0;
319 timeout = 0;
320 makerunnable:
321 prev->state = TASK_RUNNING;
322 break;
323 }
324 default:
325 del_from_runqueue(prev);
326 case TASK_RUNNING:
327 }
328 p = init_task.next_run;
329 sti();
330
331 #ifdef __SMP__
332
333
334
335 prev->processor = NO_PROC_ID;
336 #endif
337
338
339
340
341
342
343
344 c = -1000;
345 next = &init_task;
346 while (p != &init_task) {
347 int weight = goodness(p, prev, this_cpu);
348 if (weight > c)
349 c = weight, next = p;
350 p = p->next_run;
351 }
352
353
354 if (!c) {
355 for_each_task(p)
356 p->counter = (p->counter >> 1) + p->priority;
357 }
358 #ifdef __SMP__
359
360
361
362
363 if(!prev->pid && !next->pid)
364 next=prev;
365
366
367
368
369 next->processor = this_cpu;
370 next->last_processor = this_cpu;
371
372 #endif
373 #ifdef __SMP_PROF__
374
375 if (0==next->pid)
376 set_bit(this_cpu,&smp_idle_map);
377 else
378 clear_bit(this_cpu,&smp_idle_map);
379 #endif
380 if (prev != next) {
381 struct timer_list timer;
382
383 kstat.context_swtch++;
384 if (timeout) {
385 init_timer(&timer);
386 timer.expires = timeout;
387 timer.data = (unsigned long) prev;
388 timer.function = process_timeout;
389 add_timer(&timer);
390 }
391 get_mmu_context(next);
392 switch_to(prev,next);
393 if (timeout)
394 del_timer(&timer);
395 }
396 }
397
398 #ifndef __alpha__
399
400
401
402
403
404 asmlinkage int sys_pause(void)
405 {
406 current->state = TASK_INTERRUPTIBLE;
407 schedule();
408 return -ERESTARTNOHAND;
409 }
410
411 #endif
412
413
414
415
416
417
418
419
420
421 void wake_up(struct wait_queue **q)
422 {
423 struct wait_queue *tmp;
424 struct task_struct * p;
425
426 if (!q || !(tmp = *q))
427 return;
428 do {
429 if ((p = tmp->task) != NULL) {
430 if ((p->state == TASK_UNINTERRUPTIBLE) ||
431 (p->state == TASK_INTERRUPTIBLE))
432 wake_up_process(p);
433 }
434 if (!tmp->next) {
435 printk("wait_queue is bad (eip = %p)\n",
436 __builtin_return_address(0));
437 printk(" q = %p\n",q);
438 printk(" *q = %p\n",*q);
439 printk(" tmp = %p\n",tmp);
440 break;
441 }
442 tmp = tmp->next;
443 } while (tmp != *q);
444 }
445
446 void wake_up_interruptible(struct wait_queue **q)
447 {
448 struct wait_queue *tmp;
449 struct task_struct * p;
450
451 if (!q || !(tmp = *q))
452 return;
453 do {
454 if ((p = tmp->task) != NULL) {
455 if (p->state == TASK_INTERRUPTIBLE)
456 wake_up_process(p);
457 }
458 if (!tmp->next) {
459 printk("wait_queue is bad (eip = %p)\n",
460 __builtin_return_address(0));
461 printk(" q = %p\n",q);
462 printk(" *q = %p\n",*q);
463 printk(" tmp = %p\n",tmp);
464 break;
465 }
466 tmp = tmp->next;
467 } while (tmp != *q);
468 }
469
470 void __down(struct semaphore * sem)
471 {
472 struct wait_queue wait = { current, NULL };
473 add_wait_queue(&sem->wait, &wait);
474 current->state = TASK_UNINTERRUPTIBLE;
475 while (sem->count <= 0) {
476 schedule();
477 current->state = TASK_UNINTERRUPTIBLE;
478 }
479 current->state = TASK_RUNNING;
480 remove_wait_queue(&sem->wait, &wait);
481 }
482
483 static inline void __sleep_on(struct wait_queue **p, int state)
484 {
485 unsigned long flags;
486 struct wait_queue wait = { current, NULL };
487
488 if (!p)
489 return;
490 if (current == task[0])
491 panic("task[0] trying to sleep");
492 current->state = state;
493 add_wait_queue(p, &wait);
494 save_flags(flags);
495 sti();
496 schedule();
497 remove_wait_queue(p, &wait);
498 restore_flags(flags);
499 }
500
501 void interruptible_sleep_on(struct wait_queue **p)
502 {
503 __sleep_on(p,TASK_INTERRUPTIBLE);
504 }
505
506 void sleep_on(struct wait_queue **p)
507 {
508 __sleep_on(p,TASK_UNINTERRUPTIBLE);
509 }
510
511
512
513
514
515 static struct timer_list timer_head = { &timer_head, &timer_head, ~0, 0, NULL };
516 #define SLOW_BUT_DEBUGGING_TIMERS 0
517
518 void add_timer(struct timer_list * timer)
519 {
520 unsigned long flags;
521 struct timer_list *p;
522
523 #if SLOW_BUT_DEBUGGING_TIMERS
524 if (timer->next || timer->prev) {
525 printk("add_timer() called with non-zero list from %p\n",
526 __builtin_return_address(0));
527 return;
528 }
529 #endif
530 p = &timer_head;
531 save_flags(flags);
532 cli();
533 do {
534 p = p->next;
535 } while (timer->expires > p->expires);
536 timer->next = p;
537 timer->prev = p->prev;
538 p->prev = timer;
539 timer->prev->next = timer;
540 restore_flags(flags);
541 }
542
543 int del_timer(struct timer_list * timer)
544 {
545 unsigned long flags;
546 #if SLOW_BUT_DEBUGGING_TIMERS
547 struct timer_list * p;
548
549 p = &timer_head;
550 save_flags(flags);
551 cli();
552 while ((p = p->next) != &timer_head) {
553 if (p == timer) {
554 timer->next->prev = timer->prev;
555 timer->prev->next = timer->next;
556 timer->next = timer->prev = NULL;
557 restore_flags(flags);
558 return 1;
559 }
560 }
561 if (timer->next || timer->prev)
562 printk("del_timer() called from %p with timer not initialized\n",
563 __builtin_return_address(0));
564 restore_flags(flags);
565 return 0;
566 #else
567 struct timer_list * next;
568 int ret = 0;
569 save_flags(flags);
570 cli();
571 if ((next = timer->next) != NULL) {
572 (next->prev = timer->prev)->next = next;
573 timer->next = timer->prev = NULL;
574 ret = 1;
575 }
576 restore_flags(flags);
577 return ret;
578 #endif
579 }
580
581 static inline void run_timer_list(void)
582 {
583 struct timer_list * timer;
584
585 while ((timer = timer_head.next) != &timer_head && timer->expires <= jiffies) {
586 void (*fn)(unsigned long) = timer->function;
587 unsigned long data = timer->data;
588 timer->next->prev = timer->prev;
589 timer->prev->next = timer->next;
590 timer->next = timer->prev = NULL;
591 sti();
592 fn(data);
593 cli();
594 }
595 }
596
597 static inline void run_old_timers(void)
598 {
599 struct timer_struct *tp;
600 unsigned long mask;
601
602 for (mask = 1, tp = timer_table+0 ; mask ; tp++,mask += mask) {
603 if (mask > timer_active)
604 break;
605 if (!(mask & timer_active))
606 continue;
607 if (tp->expires > jiffies)
608 continue;
609 timer_active &= ~mask;
610 tp->fn();
611 sti();
612 }
613 }
614
615 void tqueue_bh(void)
616 {
617 run_task_queue(&tq_timer);
618 }
619
620 void immediate_bh(void)
621 {
622 run_task_queue(&tq_immediate);
623 }
624
625 unsigned long timer_active = 0;
626 struct timer_struct timer_table[32];
627
628
629
630
631
632
633
634 unsigned long avenrun[3] = { 0,0,0 };
635
636
637
638
639 static unsigned long count_active_tasks(void)
640 {
641 struct task_struct **p;
642 unsigned long nr = 0;
643
644 for(p = &LAST_TASK; p > &FIRST_TASK; --p)
645 if (*p && ((*p)->state == TASK_RUNNING ||
646 (*p)->state == TASK_UNINTERRUPTIBLE ||
647 (*p)->state == TASK_SWAPPING))
648 nr += FIXED_1;
649 #ifdef __SMP__
650 nr-=(smp_num_cpus-1)*FIXED_1;
651 #endif
652 return nr;
653 }
654
655 static inline void calc_load(unsigned long ticks)
656 {
657 unsigned long active_tasks;
658 static int count = LOAD_FREQ;
659
660 count -= ticks;
661 if (count < 0) {
662 count += LOAD_FREQ;
663 active_tasks = count_active_tasks();
664 CALC_LOAD(avenrun[0], EXP_1, active_tasks);
665 CALC_LOAD(avenrun[1], EXP_5, active_tasks);
666 CALC_LOAD(avenrun[2], EXP_15, active_tasks);
667 }
668 }
669
670
671
672
673
674
675
676
677
678
679 static void second_overflow(void)
680 {
681 long ltemp;
682
683
684 time_maxerror = (0x70000000-time_maxerror <
685 time_tolerance >> SHIFT_USEC) ?
686 0x70000000 : (time_maxerror + (time_tolerance >> SHIFT_USEC));
687
688
689
690
691
692
693
694
695
696
697 switch (time_state) {
698
699 case TIME_OK:
700 if (time_status & STA_INS)
701 time_state = TIME_INS;
702 else if (time_status & STA_DEL)
703 time_state = TIME_DEL;
704 break;
705
706 case TIME_INS:
707 if (xtime.tv_sec % 86400 == 0) {
708 xtime.tv_sec--;
709 time_state = TIME_OOP;
710 printk("Clock: inserting leap second 23:59:60 UTC\n");
711 }
712 break;
713
714 case TIME_DEL:
715 if ((xtime.tv_sec + 1) % 86400 == 0) {
716 xtime.tv_sec++;
717 time_state = TIME_WAIT;
718 printk("Clock: deleting leap second 23:59:59 UTC\n");
719 }
720 break;
721
722 case TIME_OOP:
723
724 time_state = TIME_WAIT;
725 break;
726
727 case TIME_WAIT:
728 if (!(time_status & (STA_INS | STA_DEL)))
729 time_state = TIME_OK;
730 }
731
732
733
734
735
736
737
738
739
740
741 if (time_offset < 0) {
742 ltemp = -time_offset;
743 if (!(time_status & STA_FLL))
744 ltemp >>= SHIFT_KG + time_constant;
745 if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE)
746 ltemp = (MAXPHASE / MINSEC) <<
747 SHIFT_UPDATE;
748 time_offset += ltemp;
749 time_adj = -ltemp << (SHIFT_SCALE - SHIFT_HZ -
750 SHIFT_UPDATE);
751 } else {
752 ltemp = time_offset;
753 if (!(time_status & STA_FLL))
754 ltemp >>= SHIFT_KG + time_constant;
755 if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE)
756 ltemp = (MAXPHASE / MINSEC) <<
757 SHIFT_UPDATE;
758 time_offset -= ltemp;
759 time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ -
760 SHIFT_UPDATE);
761 }
762
763
764
765
766
767
768
769
770 pps_valid++;
771 if (pps_valid == PPS_VALID) {
772 pps_jitter = MAXTIME;
773 pps_stabil = MAXFREQ;
774 time_status &= ~(STA_PPSSIGNAL | STA_PPSJITTER |
775 STA_PPSWANDER | STA_PPSERROR);
776 }
777 ltemp = time_freq + pps_freq;
778 if (ltemp < 0)
779 time_adj -= -ltemp >>
780 (SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE);
781 else
782 time_adj += ltemp >>
783 (SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE);
784
785 #if HZ == 100
786
787 if (time_adj < 0)
788 time_adj -= -time_adj >> 2;
789 else
790 time_adj += time_adj >> 2;
791 #endif
792 }
793
794 static void update_wall_time_one_tick(void)
795 {
796
797
798
799
800 time_phase += time_adj;
801 if (time_phase <= -FINEUSEC) {
802 long ltemp = -time_phase >> SHIFT_SCALE;
803 time_phase += ltemp << SHIFT_SCALE;
804 xtime.tv_usec += tick + time_adjust_step - ltemp;
805 }
806 else if (time_phase >= FINEUSEC) {
807 long ltemp = time_phase >> SHIFT_SCALE;
808 time_phase -= ltemp << SHIFT_SCALE;
809 xtime.tv_usec += tick + time_adjust_step + ltemp;
810 } else
811 xtime.tv_usec += tick + time_adjust_step;
812
813 if (time_adjust) {
814
815
816
817
818
819
820
821
822
823 if (time_adjust > tickadj)
824 time_adjust_step = tickadj;
825 else if (time_adjust < -tickadj)
826 time_adjust_step = -tickadj;
827 else
828 time_adjust_step = time_adjust;
829
830
831 time_adjust -= time_adjust_step;
832 }
833 else
834 time_adjust_step = 0;
835 }
836
837
838
839
840
841
842
843
844 static void update_wall_time(unsigned long ticks)
845 {
846 do {
847 ticks--;
848 update_wall_time_one_tick();
849 } while (ticks);
850
851 if (xtime.tv_usec >= 1000000) {
852 xtime.tv_usec -= 1000000;
853 xtime.tv_sec++;
854 second_overflow();
855 }
856 }
857
858 static inline void do_process_times(struct task_struct *p,
859 unsigned long user, unsigned long system)
860 {
861 long psecs;
862
863 p->utime += user;
864 if (p->priority < DEF_PRIORITY)
865 kstat.cpu_nice += user;
866 else
867 kstat.cpu_user += user;
868 kstat.cpu_system += system;
869 p->stime += system;
870
871 psecs = (p->stime + p->utime) / HZ;
872 if (psecs > p->rlim[RLIMIT_CPU].rlim_cur) {
873
874 if (psecs * HZ == p->stime + p->utime)
875 send_sig(SIGXCPU, p, 1);
876
877 if (psecs > p->rlim[RLIMIT_CPU].rlim_max)
878 send_sig(SIGKILL, p, 1);
879 }
880 }
881
882 static inline void do_it_virt(struct task_struct * p, unsigned long ticks)
883 {
884 unsigned long it_virt = p->it_virt_value;
885
886 if (it_virt) {
887 if (it_virt <= ticks) {
888 it_virt = ticks + p->it_virt_incr;
889 send_sig(SIGVTALRM, p, 1);
890 }
891 p->it_virt_value = it_virt - ticks;
892 }
893 }
894
895 static inline void do_it_prof(struct task_struct * p, unsigned long ticks)
896 {
897 unsigned long it_prof = p->it_prof_value;
898
899 if (it_prof) {
900 if (it_prof <= ticks) {
901 it_prof = ticks + p->it_prof_incr;
902 send_sig(SIGPROF, p, 1);
903 }
904 p->it_prof_value = it_prof - ticks;
905 }
906 }
907
908 static __inline__ void update_one_process(struct task_struct *p,
909 unsigned long ticks, unsigned long user, unsigned long system)
910 {
911 do_process_times(p, user, system);
912 do_it_virt(p, user);
913 do_it_prof(p, ticks);
914 }
915
916 static void update_process_times(unsigned long ticks, unsigned long system)
917 {
918 #ifndef __SMP__
919 struct task_struct * p = current;
920 if (p->pid) {
921 p->counter -= ticks;
922 if (p->counter < 0) {
923 p->counter = 0;
924 need_resched = 1;
925 }
926 }
927 update_one_process(p, ticks, ticks-system, system);
928 #else
929 int cpu,j;
930 cpu = smp_processor_id();
931 for (j=0;j<smp_num_cpus;j++)
932 {
933 int i = cpu_logical_map[j];
934 struct task_struct *p;
935
936 #ifdef __SMP_PROF__
937 if (test_bit(i,&smp_idle_map))
938 smp_idle_count[i]++;
939 #endif
940 p = current_set[i];
941
942
943
944 if (p->pid) {
945
946 unsigned long utime = ticks;
947 unsigned long stime = 0;
948 if (cpu == i) {
949 utime = ticks-system;
950 stime = system;
951 } else if (smp_proc_in_lock[i]) {
952 utime = 0;
953 stime = ticks;
954 }
955 update_one_process(p, ticks, utime, stime);
956
957 p->counter -= ticks;
958 if (p->counter >= 0)
959 continue;
960 p->counter = 0;
961 } else {
962
963
964
965
966 if (!(0x7fffffff & smp_process_available))
967 continue;
968 }
969
970 if (i==cpu)
971 need_resched = 1;
972 else
973 smp_message_pass(i, MSG_RESCHEDULE, 0L, 0);
974 }
975 #endif
976 }
977
978 static unsigned long lost_ticks = 0;
979 static unsigned long lost_ticks_system = 0;
980
981 static void timer_bh(void)
982 {
983 unsigned long ticks, system;
984
985 run_old_timers();
986
987 cli();
988 run_timer_list();
989 ticks = lost_ticks;
990 lost_ticks = 0;
991 system = lost_ticks_system;
992 lost_ticks_system = 0;
993 sti();
994
995 if (ticks) {
996 calc_load(ticks);
997 update_wall_time(ticks);
998 update_process_times(ticks, system);
999 }
1000 }
1001
1002
1003
1004
1005
1006
1007 #if HZ > 100
1008 #define should_run_timers(x) ((x) >= HZ/100)
1009 #else
1010 #define should_run_timers(x) (1)
1011 #endif
1012
1013 void do_timer(struct pt_regs * regs)
1014 {
1015 (*(unsigned long *)&jiffies)++;
1016 lost_ticks++;
1017 if (should_run_timers(lost_ticks))
1018 mark_bh(TIMER_BH);
1019 if (!user_mode(regs)) {
1020 lost_ticks_system++;
1021 if (prof_buffer && current->pid) {
1022 extern int _stext;
1023 unsigned long ip = instruction_pointer(regs);
1024 ip -= (unsigned long) &_stext;
1025 ip >>= prof_shift;
1026 if (ip < prof_len)
1027 prof_buffer[ip]++;
1028 }
1029 }
1030 if (tq_timer != &tq_last)
1031 mark_bh(TQUEUE_BH);
1032 }
1033
1034 #ifndef __alpha__
1035
1036
1037
1038
1039
1040 asmlinkage unsigned int sys_alarm(unsigned int seconds)
1041 {
1042 struct itimerval it_new, it_old;
1043 unsigned int oldalarm;
1044
1045 it_new.it_interval.tv_sec = it_new.it_interval.tv_usec = 0;
1046 it_new.it_value.tv_sec = seconds;
1047 it_new.it_value.tv_usec = 0;
1048 _setitimer(ITIMER_REAL, &it_new, &it_old);
1049 oldalarm = it_old.it_value.tv_sec;
1050
1051
1052 if (it_old.it_value.tv_usec)
1053 oldalarm++;
1054 return oldalarm;
1055 }
1056
1057
1058
1059
1060
1061 asmlinkage int sys_getpid(void)
1062 {
1063 return current->pid;
1064 }
1065
1066 asmlinkage int sys_getppid(void)
1067 {
1068 return current->p_opptr->pid;
1069 }
1070
1071 asmlinkage int sys_getuid(void)
1072 {
1073 return current->uid;
1074 }
1075
1076 asmlinkage int sys_geteuid(void)
1077 {
1078 return current->euid;
1079 }
1080
1081 asmlinkage int sys_getgid(void)
1082 {
1083 return current->gid;
1084 }
1085
1086 asmlinkage int sys_getegid(void)
1087 {
1088 return current->egid;
1089 }
1090
1091
1092
1093
1094
1095
1096 asmlinkage int sys_nice(int increment)
1097 {
1098 unsigned long newprio;
1099 int increase = 0;
1100
1101 newprio = increment;
1102 if (increment < 0) {
1103 if (!suser())
1104 return -EPERM;
1105 newprio = -increment;
1106 increase = 1;
1107 }
1108 if (newprio > 40)
1109 newprio = 40;
1110
1111
1112
1113
1114
1115
1116
1117 newprio = (newprio * DEF_PRIORITY + 10) / 20;
1118 increment = newprio;
1119 if (increase)
1120 increment = -increment;
1121 newprio = current->priority - increment;
1122 if (newprio < 1)
1123 newprio = 1;
1124 if (newprio > DEF_PRIORITY*2)
1125 newprio = DEF_PRIORITY*2;
1126 current->priority = newprio;
1127 return 0;
1128 }
1129
1130 #endif
1131
1132 static struct task_struct *find_process_by_pid(pid_t pid) {
1133 struct task_struct *p, *q;
1134
1135 if (pid == 0)
1136 p = current;
1137 else {
1138 p = 0;
1139 for_each_task(q) {
1140 if (q && q->pid == pid) {
1141 p = q;
1142 break;
1143 }
1144 }
1145 }
1146 return p;
1147 }
1148
1149 static int setscheduler(pid_t pid, int policy,
1150 struct sched_param *param)
1151 {
1152 int error;
1153 struct sched_param lp;
1154 struct task_struct *p;
1155
1156 if (!param || pid < 0)
1157 return -EINVAL;
1158
1159 error = verify_area(VERIFY_READ, param, sizeof(struct sched_param));
1160 if (error)
1161 return error;
1162 memcpy_fromfs(&lp, param, sizeof(struct sched_param));
1163
1164 p = find_process_by_pid(pid);
1165 if (!p)
1166 return -ESRCH;
1167
1168 if (policy < 0)
1169 policy = p->policy;
1170 else if (policy != SCHED_FIFO && policy != SCHED_RR &&
1171 policy != SCHED_OTHER)
1172 return -EINVAL;
1173
1174
1175
1176
1177
1178 if (lp.sched_priority < 0 || lp.sched_priority > 99)
1179 return -EINVAL;
1180 if ((policy == SCHED_OTHER) != (lp.sched_priority == 0))
1181 return -EINVAL;
1182
1183 if ((policy == SCHED_FIFO || policy == SCHED_RR) && !suser())
1184 return -EPERM;
1185 if ((current->euid != p->euid) && (current->euid != p->uid) &&
1186 !suser())
1187 return -EPERM;
1188
1189 p->policy = policy;
1190 p->rt_priority = lp.sched_priority;
1191 if (p->next_run)
1192 move_last_runqueue(p);
1193 schedule();
1194
1195 return 0;
1196 }
1197
1198 asmlinkage int sys_sched_setscheduler(pid_t pid, int policy,
1199 struct sched_param *param)
1200 {
1201 return setscheduler(pid, policy, param);
1202 }
1203
1204 asmlinkage int sys_sched_setparam(pid_t pid, struct sched_param *param)
1205 {
1206 return setscheduler(pid, -1, param);
1207 }
1208
1209 asmlinkage int sys_sched_getscheduler(pid_t pid)
1210 {
1211 struct task_struct *p;
1212
1213 if (pid < 0)
1214 return -EINVAL;
1215
1216 p = find_process_by_pid(pid);
1217 if (!p)
1218 return -ESRCH;
1219
1220 return p->policy;
1221 }
1222
1223 asmlinkage int sys_sched_getparam(pid_t pid, struct sched_param *param)
1224 {
1225 int error;
1226 struct task_struct *p;
1227 struct sched_param lp;
1228
1229 if (!param || pid < 0)
1230 return -EINVAL;
1231
1232 error = verify_area(VERIFY_WRITE, param, sizeof(struct sched_param));
1233 if (error)
1234 return error;
1235
1236 p = find_process_by_pid(pid);
1237 if (!p)
1238 return -ESRCH;
1239
1240 lp.sched_priority = p->rt_priority;
1241 memcpy_tofs(param, &lp, sizeof(struct sched_param));
1242
1243 return 0;
1244 }
1245
1246 asmlinkage int sys_sched_yield(void)
1247 {
1248 move_last_runqueue(current);
1249
1250 return 0;
1251 }
1252
1253 asmlinkage int sys_sched_get_priority_max(int policy)
1254 {
1255 switch (policy) {
1256 case SCHED_FIFO:
1257 case SCHED_RR:
1258 return 99;
1259 case SCHED_OTHER:
1260 return 0;
1261 }
1262
1263 return -EINVAL;
1264 }
1265
1266 asmlinkage int sys_sched_get_priority_min(int policy)
1267 {
1268 switch (policy) {
1269 case SCHED_FIFO:
1270 case SCHED_RR:
1271 return 1;
1272 case SCHED_OTHER:
1273 return 0;
1274 }
1275
1276 return -EINVAL;
1277 }
1278
1279 asmlinkage int sys_sched_rr_get_interval(pid_t pid, struct timespec *interval)
1280 {
1281 int error;
1282 struct timespec t;
1283
1284 error = verify_area(VERIFY_WRITE, interval, sizeof(struct timespec));
1285 if (error)
1286 return error;
1287
1288 t.tv_sec = 0;
1289 t.tv_nsec = 0;
1290 return -ENOSYS;
1291 memcpy_tofs(interval, &t, sizeof(struct timespec));
1292
1293 return 0;
1294 }
1295
1296
1297
1298
1299
1300 static unsigned long timespectojiffies(struct timespec *value)
1301 {
1302 unsigned long sec = (unsigned) value->tv_sec;
1303 long nsec = value->tv_nsec;
1304
1305 if (sec > (LONG_MAX / HZ))
1306 return LONG_MAX;
1307 nsec += 1000000000L / HZ - 1;
1308 nsec /= 1000000000L / HZ;
1309 return HZ * sec + nsec;
1310 }
1311
1312 static void jiffiestotimespec(unsigned long jiffies, struct timespec *value)
1313 {
1314 value->tv_nsec = (jiffies % HZ) * (1000000000L / HZ);
1315 value->tv_sec = jiffies / HZ;
1316 return;
1317 }
1318
1319 asmlinkage int sys_nanosleep(struct timespec *rqtp, struct timespec *rmtp)
1320 {
1321 int error;
1322 struct timespec t;
1323 unsigned long expire;
1324
1325 error = verify_area(VERIFY_READ, rqtp, sizeof(struct timespec));
1326 if (error)
1327 return error;
1328 memcpy_fromfs(&t, rqtp, sizeof(struct timespec));
1329 if (rmtp) {
1330 error = verify_area(VERIFY_WRITE, rmtp,
1331 sizeof(struct timespec));
1332 if (error)
1333 return error;
1334 }
1335
1336 if (t.tv_nsec >= 1000000000L || t.tv_nsec < 0 || t.tv_sec < 0)
1337 return -EINVAL;
1338
1339 if (t.tv_sec == 0 && t.tv_nsec <= 2000000L &&
1340 current->policy != SCHED_OTHER) {
1341
1342
1343
1344
1345 udelay((t.tv_nsec + 999) / 1000);
1346 return 0;
1347 }
1348
1349 expire = timespectojiffies(&t) + (t.tv_sec || t.tv_nsec) + jiffies;
1350 current->timeout = expire;
1351 current->state = TASK_INTERRUPTIBLE;
1352 schedule();
1353
1354 if (expire > jiffies) {
1355 if (rmtp) {
1356 jiffiestotimespec(expire - jiffies -
1357 (expire > jiffies + 1), &t);
1358 memcpy_tofs(rmtp, &t, sizeof(struct timespec));
1359 }
1360 return -EINTR;
1361 }
1362
1363 return 0;
1364 }
1365
1366 static void show_task(int nr,struct task_struct * p)
1367 {
1368 unsigned long free;
1369 static const char * stat_nam[] = { "R", "S", "D", "Z", "T", "W" };
1370
1371 printk("%-8s %3d ", p->comm, (p == current) ? -nr : nr);
1372 if (((unsigned) p->state) < sizeof(stat_nam)/sizeof(char *))
1373 printk(stat_nam[p->state]);
1374 else
1375 printk(" ");
1376 #if ((~0UL) == 0xffffffff)
1377 if (p == current)
1378 printk(" current ");
1379 else
1380 printk(" %08lX ", thread_saved_pc(&p->tss));
1381 #else
1382 if (p == current)
1383 printk(" current task ");
1384 else
1385 printk(" %016lx ", thread_saved_pc(&p->tss));
1386 #endif
1387 for (free = 1; free < PAGE_SIZE/sizeof(long) ; free++) {
1388 if (((unsigned long *)p->kernel_stack_page)[free])
1389 break;
1390 }
1391 printk("%5lu %5d %6d ", free*sizeof(long), p->pid, p->p_pptr->pid);
1392 if (p->p_cptr)
1393 printk("%5d ", p->p_cptr->pid);
1394 else
1395 printk(" ");
1396 if (p->p_ysptr)
1397 printk("%7d", p->p_ysptr->pid);
1398 else
1399 printk(" ");
1400 if (p->p_osptr)
1401 printk(" %5d\n", p->p_osptr->pid);
1402 else
1403 printk("\n");
1404 }
1405
1406 void show_state(void)
1407 {
1408 int i;
1409
1410 #if ((~0UL) == 0xffffffff)
1411 printk("\n"
1412 " free sibling\n");
1413 printk(" task PC stack pid father child younger older\n");
1414 #else
1415 printk("\n"
1416 " free sibling\n");
1417 printk(" task PC stack pid father child younger older\n");
1418 #endif
1419 for (i=0 ; i<NR_TASKS ; i++)
1420 if (task[i])
1421 show_task(i,task[i]);
1422 }
1423
1424 void sched_init(void)
1425 {
1426
1427
1428
1429
1430 int cpu=smp_processor_id();
1431 current_set[cpu]=&init_task;
1432 #ifdef __SMP__
1433 init_task.processor=cpu;
1434 #endif
1435 init_bh(TIMER_BH, timer_bh);
1436 init_bh(TQUEUE_BH, tqueue_bh);
1437 init_bh(IMMEDIATE_BH, immediate_bh);
1438 }