This source file includes following definitions.
- add_to_runqueue
- del_from_runqueue
- move_last_runqueue
- wake_up_process
- process_timeout
- goodness
- schedule
- sys_pause
- wake_up
- wake_up_interruptible
- __down
- __sleep_on
- interruptible_sleep_on
- sleep_on
- add_timer
- del_timer
- run_timer_list
- run_old_timers
- tqueue_bh
- immediate_bh
- count_active_tasks
- calc_load
- second_overflow
- update_wall_time_one_tick
- update_wall_time
- do_process_times
- do_it_virt
- do_it_prof
- update_one_process
- update_process_times
- timer_bh
- do_timer
- sys_alarm
- sys_getpid
- sys_getppid
- sys_getuid
- sys_geteuid
- sys_getgid
- sys_getegid
- sys_nice
- find_process_by_pid
- setscheduler
- sys_sched_setscheduler
- sys_sched_setparam
- sys_sched_getscheduler
- sys_sched_getparam
- sys_sched_yield
- sys_sched_get_priority_max
- sys_sched_get_priority_min
- sys_sched_rr_get_interval
- timespectojiffies
- jiffiestotimespec
- sys_nanosleep
- show_task
- show_state
- sched_init
1
2
3
4
5
6
7
8
9
10
11
12
13
14 #include <linux/signal.h>
15 #include <linux/sched.h>
16 #include <linux/timer.h>
17 #include <linux/kernel.h>
18 #include <linux/kernel_stat.h>
19 #include <linux/fdreg.h>
20 #include <linux/errno.h>
21 #include <linux/time.h>
22 #include <linux/ptrace.h>
23 #include <linux/delay.h>
24 #include <linux/interrupt.h>
25 #include <linux/tqueue.h>
26 #include <linux/resource.h>
27 #include <linux/mm.h>
28 #include <linux/smp.h>
29
30 #include <asm/system.h>
31 #include <asm/io.h>
32 #include <asm/segment.h>
33 #include <asm/pgtable.h>
34 #include <asm/mmu_context.h>
35
36 #include <linux/timex.h>
37
38
39
40
41
42 int securelevel = 0;
43
44 long tick = 1000000 / HZ;
45 volatile struct timeval xtime;
46 int tickadj = 500/HZ;
47
48 DECLARE_TASK_QUEUE(tq_timer);
49 DECLARE_TASK_QUEUE(tq_immediate);
50 DECLARE_TASK_QUEUE(tq_scheduler);
51
52
53
54
55 int time_state = TIME_BAD;
56 int time_status = STA_UNSYNC | STA_PLL;
57 long time_offset = 0;
58 long time_constant = 2;
59 long time_tolerance = MAXFREQ;
60 long time_precision = 1;
61 long time_maxerror = 0x70000000;
62 long time_esterror = 0x70000000;
63 long time_phase = 0;
64 long time_freq = 0;
65 long time_adj = 0;
66 long time_reftime = 0;
67
68 long time_adjust = 0;
69 long time_adjust_step = 0;
70
71 int need_resched = 0;
72 unsigned long event = 0;
73
74 extern int _setitimer(int, struct itimerval *, struct itimerval *);
75 unsigned int * prof_buffer = NULL;
76 unsigned long prof_len = 0;
77 unsigned long prof_shift = 0;
78
79 #define _S(nr) (1<<((nr)-1))
80
81 extern void mem_use(void);
82
83 static unsigned long init_kernel_stack[1024] = { STACK_MAGIC, };
84 unsigned long init_user_stack[1024] = { STACK_MAGIC, };
85 static struct vm_area_struct init_mmap = INIT_MMAP;
86 static struct fs_struct init_fs = INIT_FS;
87 static struct files_struct init_files = INIT_FILES;
88 static struct signal_struct init_signals = INIT_SIGNALS;
89
90 struct mm_struct init_mm = INIT_MM;
91 struct task_struct init_task = INIT_TASK;
92
93 unsigned long volatile jiffies=0;
94
95 struct task_struct *current_set[NR_CPUS];
96 struct task_struct *last_task_used_math = NULL;
97
98 struct task_struct * task[NR_TASKS] = {&init_task, };
99
100 struct kernel_stat kstat = { 0 };
101
102 static inline void add_to_runqueue(struct task_struct * p)
103 {
104 #ifdef __SMP__
105 int cpu=smp_processor_id();
106 #endif
107 #if 1
108 if (p->next_run || p->prev_run) {
109 printk("task already on run-queue\n");
110 return;
111 }
112 #endif
113 if (p->counter > current->counter + 3)
114 need_resched = 1;
115 nr_running++;
116 (p->prev_run = init_task.prev_run)->next_run = p;
117 p->next_run = &init_task;
118 init_task.prev_run = p;
119 #ifdef __SMP__
120
121 while(set_bit(31,&smp_process_available));
122 #if 0
123 {
124 while(test_bit(31,&smp_process_available))
125 {
126 if(clear_bit(cpu,&smp_invalidate_needed))
127 {
128 local_flush_tlb();
129 set_bit(cpu,&cpu_callin_map[0]);
130 }
131 }
132 }
133 #endif
134 smp_process_available++;
135 clear_bit(31,&smp_process_available);
136 if ((0!=p->pid) && smp_threads_ready)
137 {
138 int i;
139 for (i=0;i<smp_num_cpus;i++)
140 {
141 if (0==current_set[cpu_logical_map[i]]->pid)
142 {
143 smp_message_pass(cpu_logical_map[i], MSG_RESCHEDULE, 0L, 0);
144 break;
145 }
146 }
147 }
148 #endif
149 }
150
151 static inline void del_from_runqueue(struct task_struct * p)
152 {
153 struct task_struct *next = p->next_run;
154 struct task_struct *prev = p->prev_run;
155
156 #if 1
157 if (!next || !prev) {
158 printk("task not on run-queue\n");
159 return;
160 }
161 #endif
162 if (p == &init_task) {
163 static int nr = 0;
164 if (nr < 5) {
165 nr++;
166 printk("idle task may not sleep\n");
167 }
168 return;
169 }
170 nr_running--;
171 next->prev_run = prev;
172 prev->next_run = next;
173 p->next_run = NULL;
174 p->prev_run = NULL;
175 }
176
177 static inline void move_last_runqueue(struct task_struct * p)
178 {
179 struct task_struct *next = p->next_run;
180 struct task_struct *prev = p->prev_run;
181
182 next->prev_run = prev;
183 prev->next_run = next;
184 (p->prev_run = init_task.prev_run)->next_run = p;
185 p->next_run = &init_task;
186 init_task.prev_run = p;
187 }
188
189
190
191
192
193
194
195
196
197 inline void wake_up_process(struct task_struct * p)
198 {
199 unsigned long flags;
200
201 save_flags(flags);
202 cli();
203 p->state = TASK_RUNNING;
204 if (!p->next_run)
205 add_to_runqueue(p);
206 restore_flags(flags);
207 }
208
209 static void process_timeout(unsigned long __data)
210 {
211 struct task_struct * p = (struct task_struct *) __data;
212
213 p->timeout = 0;
214 wake_up_process(p);
215 }
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230 static inline int goodness(struct task_struct * p, struct task_struct * prev, int this_cpu)
231 {
232 int weight;
233
234 #ifdef __SMP__
235
236 if (p->processor != NO_PROC_ID)
237 return -1000;
238 #endif
239
240
241
242
243
244
245 if (p->policy != SCHED_OTHER)
246 return 1000 + p->rt_priority;
247
248
249
250
251
252
253
254
255 weight = p->counter;
256 if (weight) {
257
258 #ifdef __SMP__
259
260
261 if (p->last_processor == this_cpu)
262 weight += PROC_CHANGE_PENALTY;
263 #endif
264
265
266 if (p == prev)
267 weight += 1;
268 }
269
270 return weight;
271 }
272
273
274
275
276
277
278
279
280
281
282
283 asmlinkage void schedule(void)
284 {
285 int c;
286 struct task_struct * p;
287 struct task_struct * prev, * next;
288 unsigned long timeout = 0;
289 int this_cpu=smp_processor_id();
290
291
292
293 if (intr_count)
294 goto scheduling_in_interrupt;
295
296 if (bh_active & bh_mask) {
297 intr_count = 1;
298 do_bottom_half();
299 intr_count = 0;
300 }
301
302 run_task_queue(&tq_scheduler);
303
304 need_resched = 0;
305 prev = current;
306 cli();
307
308 if (!prev->counter && prev->policy == SCHED_RR) {
309 prev->counter = prev->priority;
310 move_last_runqueue(prev);
311 }
312 switch (prev->state) {
313 case TASK_INTERRUPTIBLE:
314 if (prev->signal & ~prev->blocked)
315 goto makerunnable;
316 timeout = prev->timeout;
317 if (timeout && (timeout <= jiffies)) {
318 prev->timeout = 0;
319 timeout = 0;
320 makerunnable:
321 prev->state = TASK_RUNNING;
322 break;
323 }
324 default:
325 del_from_runqueue(prev);
326 case TASK_RUNNING:
327 }
328 p = init_task.next_run;
329 sti();
330
331 #ifdef __SMP__
332
333
334
335 prev->processor = NO_PROC_ID;
336 #endif
337
338
339
340
341
342
343
344 c = -1000;
345 next = &init_task;
346 while (p != &init_task) {
347 int weight = goodness(p, prev, this_cpu);
348 if (weight > c)
349 c = weight, next = p;
350 p = p->next_run;
351 }
352
353
354 if (!c) {
355 for_each_task(p)
356 p->counter = (p->counter >> 1) + p->priority;
357 }
358 #ifdef __SMP__
359
360
361
362
363 if(!prev->pid && !next->pid)
364 next=prev;
365
366
367
368
369 next->processor = this_cpu;
370 next->last_processor = this_cpu;
371
372 #endif
373 #ifdef __SMP_PROF__
374
375 if (0==next->pid)
376 set_bit(this_cpu,&smp_idle_map);
377 else
378 clear_bit(this_cpu,&smp_idle_map);
379 #endif
380 if (prev != next) {
381 struct timer_list timer;
382
383 kstat.context_swtch++;
384 if (timeout) {
385 init_timer(&timer);
386 timer.expires = timeout;
387 timer.data = (unsigned long) prev;
388 timer.function = process_timeout;
389 add_timer(&timer);
390 }
391 get_mmu_context(next);
392 switch_to(prev,next);
393 if (timeout)
394 del_timer(&timer);
395 }
396 return;
397
398 scheduling_in_interrupt:
399 printk("Aiee: scheduling in interrupt\n");
400 }
401
402 #ifndef __alpha__
403
404
405
406
407
408 asmlinkage int sys_pause(void)
409 {
410 current->state = TASK_INTERRUPTIBLE;
411 schedule();
412 return -ERESTARTNOHAND;
413 }
414
415 #endif
416
417
418
419
420
421
422
423
424
425 void wake_up(struct wait_queue **q)
426 {
427 struct wait_queue *tmp;
428 struct task_struct * p;
429
430 if (!q || !(tmp = *q))
431 return;
432 do {
433 if ((p = tmp->task) != NULL) {
434 if ((p->state == TASK_UNINTERRUPTIBLE) ||
435 (p->state == TASK_INTERRUPTIBLE))
436 wake_up_process(p);
437 }
438 if (!tmp->next) {
439 printk("wait_queue is bad (eip = %p)\n",
440 __builtin_return_address(0));
441 printk(" q = %p\n",q);
442 printk(" *q = %p\n",*q);
443 printk(" tmp = %p\n",tmp);
444 break;
445 }
446 tmp = tmp->next;
447 } while (tmp != *q);
448 }
449
450 void wake_up_interruptible(struct wait_queue **q)
451 {
452 struct wait_queue *tmp;
453 struct task_struct * p;
454
455 if (!q || !(tmp = *q))
456 return;
457 do {
458 if ((p = tmp->task) != NULL) {
459 if (p->state == TASK_INTERRUPTIBLE)
460 wake_up_process(p);
461 }
462 if (!tmp->next) {
463 printk("wait_queue is bad (eip = %p)\n",
464 __builtin_return_address(0));
465 printk(" q = %p\n",q);
466 printk(" *q = %p\n",*q);
467 printk(" tmp = %p\n",tmp);
468 break;
469 }
470 tmp = tmp->next;
471 } while (tmp != *q);
472 }
473
474 void __down(struct semaphore * sem)
475 {
476 struct wait_queue wait = { current, NULL };
477 add_wait_queue(&sem->wait, &wait);
478 current->state = TASK_UNINTERRUPTIBLE;
479 while (sem->count <= 0) {
480 schedule();
481 current->state = TASK_UNINTERRUPTIBLE;
482 }
483 current->state = TASK_RUNNING;
484 remove_wait_queue(&sem->wait, &wait);
485 }
486
487 static inline void __sleep_on(struct wait_queue **p, int state)
488 {
489 unsigned long flags;
490 struct wait_queue wait = { current, NULL };
491
492 if (!p)
493 return;
494 if (current == task[0])
495 panic("task[0] trying to sleep");
496 current->state = state;
497 add_wait_queue(p, &wait);
498 save_flags(flags);
499 sti();
500 schedule();
501 remove_wait_queue(p, &wait);
502 restore_flags(flags);
503 }
504
505 void interruptible_sleep_on(struct wait_queue **p)
506 {
507 __sleep_on(p,TASK_INTERRUPTIBLE);
508 }
509
510 void sleep_on(struct wait_queue **p)
511 {
512 __sleep_on(p,TASK_UNINTERRUPTIBLE);
513 }
514
515
516
517
518
519 static struct timer_list timer_head = { &timer_head, &timer_head, ~0, 0, NULL };
520 #define SLOW_BUT_DEBUGGING_TIMERS 0
521
522 void add_timer(struct timer_list * timer)
523 {
524 unsigned long flags;
525 struct timer_list *p;
526
527 #if SLOW_BUT_DEBUGGING_TIMERS
528 if (timer->next || timer->prev) {
529 printk("add_timer() called with non-zero list from %p\n",
530 __builtin_return_address(0));
531 return;
532 }
533 #endif
534 p = &timer_head;
535 save_flags(flags);
536 cli();
537 do {
538 p = p->next;
539 } while (timer->expires > p->expires);
540 timer->next = p;
541 timer->prev = p->prev;
542 p->prev = timer;
543 timer->prev->next = timer;
544 restore_flags(flags);
545 }
546
547 int del_timer(struct timer_list * timer)
548 {
549 unsigned long flags;
550 #if SLOW_BUT_DEBUGGING_TIMERS
551 struct timer_list * p;
552
553 p = &timer_head;
554 save_flags(flags);
555 cli();
556 while ((p = p->next) != &timer_head) {
557 if (p == timer) {
558 timer->next->prev = timer->prev;
559 timer->prev->next = timer->next;
560 timer->next = timer->prev = NULL;
561 restore_flags(flags);
562 return 1;
563 }
564 }
565 if (timer->next || timer->prev)
566 printk("del_timer() called from %p with timer not initialized\n",
567 __builtin_return_address(0));
568 restore_flags(flags);
569 return 0;
570 #else
571 struct timer_list * next;
572 int ret = 0;
573 save_flags(flags);
574 cli();
575 if ((next = timer->next) != NULL) {
576 (next->prev = timer->prev)->next = next;
577 timer->next = timer->prev = NULL;
578 ret = 1;
579 }
580 restore_flags(flags);
581 return ret;
582 #endif
583 }
584
585 static inline void run_timer_list(void)
586 {
587 struct timer_list * timer;
588
589 while ((timer = timer_head.next) != &timer_head && timer->expires <= jiffies) {
590 void (*fn)(unsigned long) = timer->function;
591 unsigned long data = timer->data;
592 timer->next->prev = timer->prev;
593 timer->prev->next = timer->next;
594 timer->next = timer->prev = NULL;
595 sti();
596 fn(data);
597 cli();
598 }
599 }
600
601 static inline void run_old_timers(void)
602 {
603 struct timer_struct *tp;
604 unsigned long mask;
605
606 for (mask = 1, tp = timer_table+0 ; mask ; tp++,mask += mask) {
607 if (mask > timer_active)
608 break;
609 if (!(mask & timer_active))
610 continue;
611 if (tp->expires > jiffies)
612 continue;
613 timer_active &= ~mask;
614 tp->fn();
615 sti();
616 }
617 }
618
619 void tqueue_bh(void)
620 {
621 run_task_queue(&tq_timer);
622 }
623
624 void immediate_bh(void)
625 {
626 run_task_queue(&tq_immediate);
627 }
628
629 unsigned long timer_active = 0;
630 struct timer_struct timer_table[32];
631
632
633
634
635
636
637
638 unsigned long avenrun[3] = { 0,0,0 };
639
640
641
642
643 static unsigned long count_active_tasks(void)
644 {
645 struct task_struct **p;
646 unsigned long nr = 0;
647
648 for(p = &LAST_TASK; p > &FIRST_TASK; --p)
649 if (*p && ((*p)->state == TASK_RUNNING ||
650 (*p)->state == TASK_UNINTERRUPTIBLE ||
651 (*p)->state == TASK_SWAPPING))
652 nr += FIXED_1;
653 #ifdef __SMP__
654 nr-=(smp_num_cpus-1)*FIXED_1;
655 #endif
656 return nr;
657 }
658
659 static inline void calc_load(unsigned long ticks)
660 {
661 unsigned long active_tasks;
662 static int count = LOAD_FREQ;
663
664 count -= ticks;
665 if (count < 0) {
666 count += LOAD_FREQ;
667 active_tasks = count_active_tasks();
668 CALC_LOAD(avenrun[0], EXP_1, active_tasks);
669 CALC_LOAD(avenrun[1], EXP_5, active_tasks);
670 CALC_LOAD(avenrun[2], EXP_15, active_tasks);
671 }
672 }
673
674
675
676
677
678
679
680
681
682
683 static void second_overflow(void)
684 {
685 long ltemp;
686
687
688 time_maxerror = (0x70000000-time_maxerror <
689 time_tolerance >> SHIFT_USEC) ?
690 0x70000000 : (time_maxerror + (time_tolerance >> SHIFT_USEC));
691
692
693
694
695
696
697
698
699
700
701 switch (time_state) {
702
703 case TIME_OK:
704 if (time_status & STA_INS)
705 time_state = TIME_INS;
706 else if (time_status & STA_DEL)
707 time_state = TIME_DEL;
708 break;
709
710 case TIME_INS:
711 if (xtime.tv_sec % 86400 == 0) {
712 xtime.tv_sec--;
713 time_state = TIME_OOP;
714 printk("Clock: inserting leap second 23:59:60 UTC\n");
715 }
716 break;
717
718 case TIME_DEL:
719 if ((xtime.tv_sec + 1) % 86400 == 0) {
720 xtime.tv_sec++;
721 time_state = TIME_WAIT;
722 printk("Clock: deleting leap second 23:59:59 UTC\n");
723 }
724 break;
725
726 case TIME_OOP:
727
728 time_state = TIME_WAIT;
729 break;
730
731 case TIME_WAIT:
732 if (!(time_status & (STA_INS | STA_DEL)))
733 time_state = TIME_OK;
734 }
735
736
737
738
739
740
741
742
743
744
745 if (time_offset < 0) {
746 ltemp = -time_offset;
747 if (!(time_status & STA_FLL))
748 ltemp >>= SHIFT_KG + time_constant;
749 if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE)
750 ltemp = (MAXPHASE / MINSEC) <<
751 SHIFT_UPDATE;
752 time_offset += ltemp;
753 time_adj = -ltemp << (SHIFT_SCALE - SHIFT_HZ -
754 SHIFT_UPDATE);
755 } else {
756 ltemp = time_offset;
757 if (!(time_status & STA_FLL))
758 ltemp >>= SHIFT_KG + time_constant;
759 if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE)
760 ltemp = (MAXPHASE / MINSEC) <<
761 SHIFT_UPDATE;
762 time_offset -= ltemp;
763 time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ -
764 SHIFT_UPDATE);
765 }
766
767
768
769
770
771
772
773
774 pps_valid++;
775 if (pps_valid == PPS_VALID) {
776 pps_jitter = MAXTIME;
777 pps_stabil = MAXFREQ;
778 time_status &= ~(STA_PPSSIGNAL | STA_PPSJITTER |
779 STA_PPSWANDER | STA_PPSERROR);
780 }
781 ltemp = time_freq + pps_freq;
782 if (ltemp < 0)
783 time_adj -= -ltemp >>
784 (SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE);
785 else
786 time_adj += ltemp >>
787 (SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE);
788
789 #if HZ == 100
790
791 if (time_adj < 0)
792 time_adj -= -time_adj >> 2;
793 else
794 time_adj += time_adj >> 2;
795 #endif
796 }
797
798 static void update_wall_time_one_tick(void)
799 {
800
801
802
803
804 time_phase += time_adj;
805 if (time_phase <= -FINEUSEC) {
806 long ltemp = -time_phase >> SHIFT_SCALE;
807 time_phase += ltemp << SHIFT_SCALE;
808 xtime.tv_usec += tick + time_adjust_step - ltemp;
809 }
810 else if (time_phase >= FINEUSEC) {
811 long ltemp = time_phase >> SHIFT_SCALE;
812 time_phase -= ltemp << SHIFT_SCALE;
813 xtime.tv_usec += tick + time_adjust_step + ltemp;
814 } else
815 xtime.tv_usec += tick + time_adjust_step;
816
817 if (time_adjust) {
818
819
820
821
822
823
824
825
826
827 if (time_adjust > tickadj)
828 time_adjust_step = tickadj;
829 else if (time_adjust < -tickadj)
830 time_adjust_step = -tickadj;
831 else
832 time_adjust_step = time_adjust;
833
834
835 time_adjust -= time_adjust_step;
836 }
837 else
838 time_adjust_step = 0;
839 }
840
841
842
843
844
845
846
847
848 static void update_wall_time(unsigned long ticks)
849 {
850 do {
851 ticks--;
852 update_wall_time_one_tick();
853 } while (ticks);
854
855 if (xtime.tv_usec >= 1000000) {
856 xtime.tv_usec -= 1000000;
857 xtime.tv_sec++;
858 second_overflow();
859 }
860 }
861
862 static inline void do_process_times(struct task_struct *p,
863 unsigned long user, unsigned long system)
864 {
865 long psecs;
866
867 p->utime += user;
868 p->stime += system;
869
870 psecs = (p->stime + p->utime) / HZ;
871 if (psecs > p->rlim[RLIMIT_CPU].rlim_cur) {
872
873 if (psecs * HZ == p->stime + p->utime)
874 send_sig(SIGXCPU, p, 1);
875
876 if (psecs > p->rlim[RLIMIT_CPU].rlim_max)
877 send_sig(SIGKILL, p, 1);
878 }
879 }
880
881 static inline void do_it_virt(struct task_struct * p, unsigned long ticks)
882 {
883 unsigned long it_virt = p->it_virt_value;
884
885 if (it_virt) {
886 if (it_virt <= ticks) {
887 it_virt = ticks + p->it_virt_incr;
888 send_sig(SIGVTALRM, p, 1);
889 }
890 p->it_virt_value = it_virt - ticks;
891 }
892 }
893
894 static inline void do_it_prof(struct task_struct * p, unsigned long ticks)
895 {
896 unsigned long it_prof = p->it_prof_value;
897
898 if (it_prof) {
899 if (it_prof <= ticks) {
900 it_prof = ticks + p->it_prof_incr;
901 send_sig(SIGPROF, p, 1);
902 }
903 p->it_prof_value = it_prof - ticks;
904 }
905 }
906
907 static __inline__ void update_one_process(struct task_struct *p,
908 unsigned long ticks, unsigned long user, unsigned long system)
909 {
910 do_process_times(p, user, system);
911 do_it_virt(p, user);
912 do_it_prof(p, ticks);
913 }
914
915 static void update_process_times(unsigned long ticks, unsigned long system)
916 {
917 #ifndef __SMP__
918 struct task_struct * p = current;
919 unsigned long user = ticks - system;
920 if (p->pid) {
921 p->counter -= ticks;
922 if (p->counter < 0) {
923 p->counter = 0;
924 need_resched = 1;
925 }
926 if (p->priority < DEF_PRIORITY)
927 kstat.cpu_nice += user;
928 else
929 kstat.cpu_user += user;
930 kstat.cpu_system += system;
931 }
932 update_one_process(p, ticks, user, system);
933 #else
934 int cpu,j;
935 cpu = smp_processor_id();
936 for (j=0;j<smp_num_cpus;j++)
937 {
938 int i = cpu_logical_map[j];
939 struct task_struct *p;
940
941 #ifdef __SMP_PROF__
942 if (test_bit(i,&smp_idle_map))
943 smp_idle_count[i]++;
944 #endif
945 p = current_set[i];
946
947
948
949 if (p->pid) {
950
951 unsigned long utime = ticks;
952 unsigned long stime = 0;
953 if (cpu == i) {
954 utime = ticks-system;
955 stime = system;
956 } else if (smp_proc_in_lock[i]) {
957 utime = 0;
958 stime = ticks;
959 }
960 update_one_process(p, ticks, utime, stime);
961
962 p->counter -= ticks;
963 if (p->counter >= 0)
964 continue;
965 p->counter = 0;
966 } else {
967
968
969
970
971 if (!(0x7fffffff & smp_process_available))
972 continue;
973 }
974
975 if (i==cpu)
976 need_resched = 1;
977 else
978 smp_message_pass(i, MSG_RESCHEDULE, 0L, 0);
979 }
980 #endif
981 }
982
983 static unsigned long lost_ticks = 0;
984 static unsigned long lost_ticks_system = 0;
985
986 static void timer_bh(void)
987 {
988 unsigned long ticks, system;
989
990 run_old_timers();
991
992 cli();
993 run_timer_list();
994 ticks = lost_ticks;
995 lost_ticks = 0;
996 system = lost_ticks_system;
997 lost_ticks_system = 0;
998 sti();
999
1000 if (ticks) {
1001 calc_load(ticks);
1002 update_wall_time(ticks);
1003 update_process_times(ticks, system);
1004 }
1005 }
1006
1007
1008
1009
1010
1011
1012 #if HZ > 100
1013 #define should_run_timers(x) ((x) >= HZ/100)
1014 #else
1015 #define should_run_timers(x) (1)
1016 #endif
1017
1018 void do_timer(struct pt_regs * regs)
1019 {
1020 (*(unsigned long *)&jiffies)++;
1021 lost_ticks++;
1022 if (should_run_timers(lost_ticks))
1023 mark_bh(TIMER_BH);
1024 if (!user_mode(regs)) {
1025 lost_ticks_system++;
1026 if (prof_buffer && current->pid) {
1027 extern int _stext;
1028 unsigned long ip = instruction_pointer(regs);
1029 ip -= (unsigned long) &_stext;
1030 ip >>= prof_shift;
1031 if (ip < prof_len)
1032 prof_buffer[ip]++;
1033 }
1034 }
1035 if (tq_timer)
1036 mark_bh(TQUEUE_BH);
1037 }
1038
1039 #ifndef __alpha__
1040
1041
1042
1043
1044
1045 asmlinkage unsigned int sys_alarm(unsigned int seconds)
1046 {
1047 struct itimerval it_new, it_old;
1048 unsigned int oldalarm;
1049
1050 it_new.it_interval.tv_sec = it_new.it_interval.tv_usec = 0;
1051 it_new.it_value.tv_sec = seconds;
1052 it_new.it_value.tv_usec = 0;
1053 _setitimer(ITIMER_REAL, &it_new, &it_old);
1054 oldalarm = it_old.it_value.tv_sec;
1055
1056
1057 if (it_old.it_value.tv_usec)
1058 oldalarm++;
1059 return oldalarm;
1060 }
1061
1062
1063
1064
1065
1066 asmlinkage int sys_getpid(void)
1067 {
1068 return current->pid;
1069 }
1070
1071 asmlinkage int sys_getppid(void)
1072 {
1073 return current->p_opptr->pid;
1074 }
1075
1076 asmlinkage int sys_getuid(void)
1077 {
1078 return current->uid;
1079 }
1080
1081 asmlinkage int sys_geteuid(void)
1082 {
1083 return current->euid;
1084 }
1085
1086 asmlinkage int sys_getgid(void)
1087 {
1088 return current->gid;
1089 }
1090
1091 asmlinkage int sys_getegid(void)
1092 {
1093 return current->egid;
1094 }
1095
1096
1097
1098
1099
1100
1101 asmlinkage int sys_nice(int increment)
1102 {
1103 unsigned long newprio;
1104 int increase = 0;
1105
1106 newprio = increment;
1107 if (increment < 0) {
1108 if (!suser())
1109 return -EPERM;
1110 newprio = -increment;
1111 increase = 1;
1112 }
1113 if (newprio > 40)
1114 newprio = 40;
1115
1116
1117
1118
1119
1120
1121
1122 newprio = (newprio * DEF_PRIORITY + 10) / 20;
1123 increment = newprio;
1124 if (increase)
1125 increment = -increment;
1126 newprio = current->priority - increment;
1127 if (newprio < 1)
1128 newprio = 1;
1129 if (newprio > DEF_PRIORITY*2)
1130 newprio = DEF_PRIORITY*2;
1131 current->priority = newprio;
1132 return 0;
1133 }
1134
1135 #endif
1136
1137 static struct task_struct *find_process_by_pid(pid_t pid) {
1138 struct task_struct *p, *q;
1139
1140 if (pid == 0)
1141 p = current;
1142 else {
1143 p = 0;
1144 for_each_task(q) {
1145 if (q && q->pid == pid) {
1146 p = q;
1147 break;
1148 }
1149 }
1150 }
1151 return p;
1152 }
1153
1154 static int setscheduler(pid_t pid, int policy,
1155 struct sched_param *param)
1156 {
1157 int error;
1158 struct sched_param lp;
1159 struct task_struct *p;
1160
1161 if (!param || pid < 0)
1162 return -EINVAL;
1163
1164 error = verify_area(VERIFY_READ, param, sizeof(struct sched_param));
1165 if (error)
1166 return error;
1167 memcpy_fromfs(&lp, param, sizeof(struct sched_param));
1168
1169 p = find_process_by_pid(pid);
1170 if (!p)
1171 return -ESRCH;
1172
1173 if (policy < 0)
1174 policy = p->policy;
1175 else if (policy != SCHED_FIFO && policy != SCHED_RR &&
1176 policy != SCHED_OTHER)
1177 return -EINVAL;
1178
1179
1180
1181
1182
1183 if (lp.sched_priority < 0 || lp.sched_priority > 99)
1184 return -EINVAL;
1185 if ((policy == SCHED_OTHER) != (lp.sched_priority == 0))
1186 return -EINVAL;
1187
1188 if ((policy == SCHED_FIFO || policy == SCHED_RR) && !suser())
1189 return -EPERM;
1190 if ((current->euid != p->euid) && (current->euid != p->uid) &&
1191 !suser())
1192 return -EPERM;
1193
1194 p->policy = policy;
1195 p->rt_priority = lp.sched_priority;
1196 if (p->next_run)
1197 move_last_runqueue(p);
1198 schedule();
1199
1200 return 0;
1201 }
1202
1203 asmlinkage int sys_sched_setscheduler(pid_t pid, int policy,
1204 struct sched_param *param)
1205 {
1206 return setscheduler(pid, policy, param);
1207 }
1208
1209 asmlinkage int sys_sched_setparam(pid_t pid, struct sched_param *param)
1210 {
1211 return setscheduler(pid, -1, param);
1212 }
1213
1214 asmlinkage int sys_sched_getscheduler(pid_t pid)
1215 {
1216 struct task_struct *p;
1217
1218 if (pid < 0)
1219 return -EINVAL;
1220
1221 p = find_process_by_pid(pid);
1222 if (!p)
1223 return -ESRCH;
1224
1225 return p->policy;
1226 }
1227
1228 asmlinkage int sys_sched_getparam(pid_t pid, struct sched_param *param)
1229 {
1230 int error;
1231 struct task_struct *p;
1232 struct sched_param lp;
1233
1234 if (!param || pid < 0)
1235 return -EINVAL;
1236
1237 error = verify_area(VERIFY_WRITE, param, sizeof(struct sched_param));
1238 if (error)
1239 return error;
1240
1241 p = find_process_by_pid(pid);
1242 if (!p)
1243 return -ESRCH;
1244
1245 lp.sched_priority = p->rt_priority;
1246 memcpy_tofs(param, &lp, sizeof(struct sched_param));
1247
1248 return 0;
1249 }
1250
1251 asmlinkage int sys_sched_yield(void)
1252 {
1253 move_last_runqueue(current);
1254
1255 return 0;
1256 }
1257
1258 asmlinkage int sys_sched_get_priority_max(int policy)
1259 {
1260 switch (policy) {
1261 case SCHED_FIFO:
1262 case SCHED_RR:
1263 return 99;
1264 case SCHED_OTHER:
1265 return 0;
1266 }
1267
1268 return -EINVAL;
1269 }
1270
1271 asmlinkage int sys_sched_get_priority_min(int policy)
1272 {
1273 switch (policy) {
1274 case SCHED_FIFO:
1275 case SCHED_RR:
1276 return 1;
1277 case SCHED_OTHER:
1278 return 0;
1279 }
1280
1281 return -EINVAL;
1282 }
1283
1284 asmlinkage int sys_sched_rr_get_interval(pid_t pid, struct timespec *interval)
1285 {
1286 int error;
1287 struct timespec t;
1288
1289 error = verify_area(VERIFY_WRITE, interval, sizeof(struct timespec));
1290 if (error)
1291 return error;
1292
1293 t.tv_sec = 0;
1294 t.tv_nsec = 0;
1295 return -ENOSYS;
1296 memcpy_tofs(interval, &t, sizeof(struct timespec));
1297
1298 return 0;
1299 }
1300
1301
1302
1303
1304
1305 static unsigned long timespectojiffies(struct timespec *value)
1306 {
1307 unsigned long sec = (unsigned) value->tv_sec;
1308 long nsec = value->tv_nsec;
1309
1310 if (sec > (LONG_MAX / HZ))
1311 return LONG_MAX;
1312 nsec += 1000000000L / HZ - 1;
1313 nsec /= 1000000000L / HZ;
1314 return HZ * sec + nsec;
1315 }
1316
1317 static void jiffiestotimespec(unsigned long jiffies, struct timespec *value)
1318 {
1319 value->tv_nsec = (jiffies % HZ) * (1000000000L / HZ);
1320 value->tv_sec = jiffies / HZ;
1321 return;
1322 }
1323
1324 asmlinkage int sys_nanosleep(struct timespec *rqtp, struct timespec *rmtp)
1325 {
1326 int error;
1327 struct timespec t;
1328 unsigned long expire;
1329
1330 error = verify_area(VERIFY_READ, rqtp, sizeof(struct timespec));
1331 if (error)
1332 return error;
1333 memcpy_fromfs(&t, rqtp, sizeof(struct timespec));
1334 if (rmtp) {
1335 error = verify_area(VERIFY_WRITE, rmtp,
1336 sizeof(struct timespec));
1337 if (error)
1338 return error;
1339 }
1340
1341 if (t.tv_nsec >= 1000000000L || t.tv_nsec < 0 || t.tv_sec < 0)
1342 return -EINVAL;
1343
1344 if (t.tv_sec == 0 && t.tv_nsec <= 2000000L &&
1345 current->policy != SCHED_OTHER) {
1346
1347
1348
1349
1350 udelay((t.tv_nsec + 999) / 1000);
1351 return 0;
1352 }
1353
1354 expire = timespectojiffies(&t) + (t.tv_sec || t.tv_nsec) + jiffies;
1355 current->timeout = expire;
1356 current->state = TASK_INTERRUPTIBLE;
1357 schedule();
1358
1359 if (expire > jiffies) {
1360 if (rmtp) {
1361 jiffiestotimespec(expire - jiffies -
1362 (expire > jiffies + 1), &t);
1363 memcpy_tofs(rmtp, &t, sizeof(struct timespec));
1364 }
1365 return -EINTR;
1366 }
1367
1368 return 0;
1369 }
1370
1371 static void show_task(int nr,struct task_struct * p)
1372 {
1373 unsigned long free;
1374 static const char * stat_nam[] = { "R", "S", "D", "Z", "T", "W" };
1375
1376 printk("%-8s %3d ", p->comm, (p == current) ? -nr : nr);
1377 if (((unsigned) p->state) < sizeof(stat_nam)/sizeof(char *))
1378 printk(stat_nam[p->state]);
1379 else
1380 printk(" ");
1381 #if ((~0UL) == 0xffffffff)
1382 if (p == current)
1383 printk(" current ");
1384 else
1385 printk(" %08lX ", thread_saved_pc(&p->tss));
1386 #else
1387 if (p == current)
1388 printk(" current task ");
1389 else
1390 printk(" %016lx ", thread_saved_pc(&p->tss));
1391 #endif
1392 for (free = 1; free < PAGE_SIZE/sizeof(long) ; free++) {
1393 if (((unsigned long *)p->kernel_stack_page)[free])
1394 break;
1395 }
1396 printk("%5lu %5d %6d ", free*sizeof(long), p->pid, p->p_pptr->pid);
1397 if (p->p_cptr)
1398 printk("%5d ", p->p_cptr->pid);
1399 else
1400 printk(" ");
1401 if (p->p_ysptr)
1402 printk("%7d", p->p_ysptr->pid);
1403 else
1404 printk(" ");
1405 if (p->p_osptr)
1406 printk(" %5d\n", p->p_osptr->pid);
1407 else
1408 printk("\n");
1409 }
1410
1411 void show_state(void)
1412 {
1413 int i;
1414
1415 #if ((~0UL) == 0xffffffff)
1416 printk("\n"
1417 " free sibling\n");
1418 printk(" task PC stack pid father child younger older\n");
1419 #else
1420 printk("\n"
1421 " free sibling\n");
1422 printk(" task PC stack pid father child younger older\n");
1423 #endif
1424 for (i=0 ; i<NR_TASKS ; i++)
1425 if (task[i])
1426 show_task(i,task[i]);
1427 }
1428
1429 void sched_init(void)
1430 {
1431
1432
1433
1434
1435 int cpu=smp_processor_id();
1436 current_set[cpu]=&init_task;
1437 #ifdef __SMP__
1438 init_task.processor=cpu;
1439 #endif
1440 init_bh(TIMER_BH, timer_bh);
1441 init_bh(TQUEUE_BH, tqueue_bh);
1442 init_bh(IMMEDIATE_BH, immediate_bh);
1443 }