This source file includes following definitions.
- add_to_runqueue
- del_from_runqueue
- move_last_runqueue
- wake_up_process
- process_timeout
- goodness
- schedule
- sys_pause
- wake_up
- wake_up_interruptible
- __down
- __sleep_on
- interruptible_sleep_on
- sleep_on
- add_timer
- del_timer
- run_timer_list
- run_old_timers
- tqueue_bh
- immediate_bh
- count_active_tasks
- calc_load
- second_overflow
- update_wall_time_one_tick
- update_wall_time
- do_process_times
- do_it_virt
- do_it_prof
- update_one_process
- update_process_times
- timer_bh
- do_timer
- sys_alarm
- sys_getpid
- sys_getppid
- sys_getuid
- sys_geteuid
- sys_getgid
- sys_getegid
- sys_nice
- find_process_by_pid
- setscheduler
- sys_sched_setscheduler
- sys_sched_setparam
- sys_sched_getscheduler
- sys_sched_getparam
- sys_sched_yield
- sys_sched_get_priority_max
- sys_sched_get_priority_min
- sys_sched_rr_get_interval
- timespectojiffies
- jiffiestotimespec
- sys_nanosleep
- show_task
- show_state
- sched_init
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16 #include <linux/signal.h>
17 #include <linux/sched.h>
18 #include <linux/timer.h>
19 #include <linux/kernel.h>
20 #include <linux/kernel_stat.h>
21 #include <linux/fdreg.h>
22 #include <linux/errno.h>
23 #include <linux/time.h>
24 #include <linux/ptrace.h>
25 #include <linux/delay.h>
26 #include <linux/interrupt.h>
27 #include <linux/tqueue.h>
28 #include <linux/resource.h>
29 #include <linux/mm.h>
30 #include <linux/smp.h>
31
32 #include <asm/system.h>
33 #include <asm/io.h>
34 #include <asm/segment.h>
35 #include <asm/pgtable.h>
36 #include <asm/mmu_context.h>
37
38 #include <linux/timex.h>
39
40
41
42
43
44 int securelevel = 0;
45
46 long tick = 1000000 / HZ;
47 volatile struct timeval xtime;
48 int tickadj = 500/HZ;
49
50 DECLARE_TASK_QUEUE(tq_timer);
51 DECLARE_TASK_QUEUE(tq_immediate);
52 DECLARE_TASK_QUEUE(tq_scheduler);
53
54
55
56
57
58 int time_state = TIME_ERROR;
59 int time_status = STA_UNSYNC;
60 long time_offset = 0;
61 long time_constant = 2;
62 long time_tolerance = MAXFREQ;
63 long time_precision = 1;
64 long time_maxerror = MAXPHASE;
65 long time_esterror = MAXPHASE;
66 long time_phase = 0;
67 long time_freq = 0;
68 long time_adj = 0;
69 long time_reftime = 0;
70
71 long time_adjust = 0;
72 long time_adjust_step = 0;
73
74 int need_resched = 0;
75 unsigned long event = 0;
76
77 extern int _setitimer(int, struct itimerval *, struct itimerval *);
78 unsigned int * prof_buffer = NULL;
79 unsigned long prof_len = 0;
80 unsigned long prof_shift = 0;
81
82 #define _S(nr) (1<<((nr)-1))
83
84 extern void mem_use(void);
85
86 static unsigned long init_kernel_stack[1024] = { STACK_MAGIC, };
87 unsigned long init_user_stack[1024] = { STACK_MAGIC, };
88 static struct vm_area_struct init_mmap = INIT_MMAP;
89 static struct fs_struct init_fs = INIT_FS;
90 static struct files_struct init_files = INIT_FILES;
91 static struct signal_struct init_signals = INIT_SIGNALS;
92
93 struct mm_struct init_mm = INIT_MM;
94 struct task_struct init_task = INIT_TASK;
95
96 unsigned long volatile jiffies=0;
97
98 struct task_struct *current_set[NR_CPUS];
99 struct task_struct *last_task_used_math = NULL;
100
101 struct task_struct * task[NR_TASKS] = {&init_task, };
102
103 struct kernel_stat kstat = { 0 };
104
105 static inline void add_to_runqueue(struct task_struct * p)
106 {
107 #ifdef __SMP__
108 int cpu=smp_processor_id();
109 #endif
110 #if 1
111 if (p->next_run || p->prev_run) {
112 printk("task already on run-queue\n");
113 return;
114 }
115 #endif
116 if (p->counter > current->counter + 3)
117 need_resched = 1;
118 nr_running++;
119 (p->prev_run = init_task.prev_run)->next_run = p;
120 p->next_run = &init_task;
121 init_task.prev_run = p;
122 #ifdef __SMP__
123
124 while(set_bit(31,&smp_process_available));
125 #if 0
126 {
127 while(test_bit(31,&smp_process_available))
128 {
129 if(clear_bit(cpu,&smp_invalidate_needed))
130 {
131 local_flush_tlb();
132 set_bit(cpu,&cpu_callin_map[0]);
133 }
134 }
135 }
136 #endif
137 smp_process_available++;
138 clear_bit(31,&smp_process_available);
139 if ((0!=p->pid) && smp_threads_ready)
140 {
141 int i;
142 for (i=0;i<smp_num_cpus;i++)
143 {
144 if (0==current_set[cpu_logical_map[i]]->pid)
145 {
146 smp_message_pass(cpu_logical_map[i], MSG_RESCHEDULE, 0L, 0);
147 break;
148 }
149 }
150 }
151 #endif
152 }
153
154 static inline void del_from_runqueue(struct task_struct * p)
155 {
156 struct task_struct *next = p->next_run;
157 struct task_struct *prev = p->prev_run;
158
159 #if 1
160 if (!next || !prev) {
161 printk("task not on run-queue\n");
162 return;
163 }
164 #endif
165 if (p == &init_task) {
166 static int nr = 0;
167 if (nr < 5) {
168 nr++;
169 printk("idle task may not sleep\n");
170 }
171 return;
172 }
173 nr_running--;
174 next->prev_run = prev;
175 prev->next_run = next;
176 p->next_run = NULL;
177 p->prev_run = NULL;
178 }
179
180 static inline void move_last_runqueue(struct task_struct * p)
181 {
182 struct task_struct *next = p->next_run;
183 struct task_struct *prev = p->prev_run;
184
185
186 next->prev_run = prev;
187 prev->next_run = next;
188
189 p->next_run = &init_task;
190 prev = init_task.prev_run;
191 init_task.prev_run = p;
192 p->prev_run = prev;
193 prev->next_run = p;
194 }
195
196
197
198
199
200
201
202
203
204 inline void wake_up_process(struct task_struct * p)
205 {
206 unsigned long flags;
207
208 save_flags(flags);
209 cli();
210 p->state = TASK_RUNNING;
211 if (!p->next_run)
212 add_to_runqueue(p);
213 restore_flags(flags);
214 }
215
216 static void process_timeout(unsigned long __data)
217 {
218 struct task_struct * p = (struct task_struct *) __data;
219
220 p->timeout = 0;
221 wake_up_process(p);
222 }
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237 static inline int goodness(struct task_struct * p, struct task_struct * prev, int this_cpu)
238 {
239 int weight;
240
241 #ifdef __SMP__
242
243 if (p->processor != NO_PROC_ID)
244 return -1000;
245 #endif
246
247
248
249
250
251
252 if (p->policy != SCHED_OTHER)
253 return 1000 + p->rt_priority;
254
255
256
257
258
259
260
261
262 weight = p->counter;
263 if (weight) {
264
265 #ifdef __SMP__
266
267
268 if (p->last_processor == this_cpu)
269 weight += PROC_CHANGE_PENALTY;
270 #endif
271
272
273 if (p == prev)
274 weight += 1;
275 }
276
277 return weight;
278 }
279
280
281
282
283
284
285
286
287
288
289
290 asmlinkage void schedule(void)
291 {
292 int c;
293 struct task_struct * p;
294 struct task_struct * prev, * next;
295 unsigned long timeout = 0;
296 int this_cpu=smp_processor_id();
297
298
299
300 if (intr_count)
301 goto scheduling_in_interrupt;
302
303 if (bh_active & bh_mask) {
304 intr_count = 1;
305 do_bottom_half();
306 intr_count = 0;
307 }
308
309 run_task_queue(&tq_scheduler);
310
311 need_resched = 0;
312 prev = current;
313 cli();
314
315 if (!prev->counter && prev->policy == SCHED_RR) {
316 prev->counter = prev->priority;
317 move_last_runqueue(prev);
318 }
319 switch (prev->state) {
320 case TASK_INTERRUPTIBLE:
321 if (prev->signal & ~prev->blocked)
322 goto makerunnable;
323 timeout = prev->timeout;
324 if (timeout && (timeout <= jiffies)) {
325 prev->timeout = 0;
326 timeout = 0;
327 makerunnable:
328 prev->state = TASK_RUNNING;
329 break;
330 }
331 default:
332 del_from_runqueue(prev);
333 case TASK_RUNNING:
334 }
335 p = init_task.next_run;
336 sti();
337
338 #ifdef __SMP__
339
340
341
342 prev->processor = NO_PROC_ID;
343 #define idle_task (task[this_cpu])
344 #else
345 #define idle_task (&init_task)
346 #endif
347
348
349
350
351
352
353
354 c = -1000;
355 next = idle_task;
356 while (p != &init_task) {
357 int weight = goodness(p, prev, this_cpu);
358 if (weight > c)
359 c = weight, next = p;
360 p = p->next_run;
361 }
362
363
364 if (!c) {
365 for_each_task(p)
366 p->counter = (p->counter >> 1) + p->priority;
367 }
368 #ifdef __SMP__
369
370
371
372
373 next->processor = this_cpu;
374 next->last_processor = this_cpu;
375 #endif
376 #ifdef __SMP_PROF__
377
378 if (0==next->pid)
379 set_bit(this_cpu,&smp_idle_map);
380 else
381 clear_bit(this_cpu,&smp_idle_map);
382 #endif
383 if (prev != next) {
384 struct timer_list timer;
385
386 kstat.context_swtch++;
387 if (timeout) {
388 init_timer(&timer);
389 timer.expires = timeout;
390 timer.data = (unsigned long) prev;
391 timer.function = process_timeout;
392 add_timer(&timer);
393 }
394 get_mmu_context(next);
395 switch_to(prev,next);
396 if (timeout)
397 del_timer(&timer);
398 }
399 return;
400
401 scheduling_in_interrupt:
402 printk("Aiee: scheduling in interrupt %p\n",
403 __builtin_return_address(0));
404 }
405
406 #ifndef __alpha__
407
408
409
410
411
412 asmlinkage int sys_pause(void)
413 {
414 current->state = TASK_INTERRUPTIBLE;
415 schedule();
416 return -ERESTARTNOHAND;
417 }
418
419 #endif
420
421
422
423
424
425
426
427
428
429 void wake_up(struct wait_queue **q)
430 {
431 struct wait_queue *tmp;
432 struct task_struct * p;
433
434 if (!q || !(tmp = *q))
435 return;
436 do {
437 if ((p = tmp->task) != NULL) {
438 if ((p->state == TASK_UNINTERRUPTIBLE) ||
439 (p->state == TASK_INTERRUPTIBLE))
440 wake_up_process(p);
441 }
442 if (!tmp->next) {
443 printk("wait_queue is bad (eip = %p)\n",
444 __builtin_return_address(0));
445 printk(" q = %p\n",q);
446 printk(" *q = %p\n",*q);
447 printk(" tmp = %p\n",tmp);
448 break;
449 }
450 tmp = tmp->next;
451 } while (tmp != *q);
452 }
453
454 void wake_up_interruptible(struct wait_queue **q)
455 {
456 struct wait_queue *tmp;
457 struct task_struct * p;
458
459 if (!q || !(tmp = *q))
460 return;
461 do {
462 if ((p = tmp->task) != NULL) {
463 if (p->state == TASK_INTERRUPTIBLE)
464 wake_up_process(p);
465 }
466 if (!tmp->next) {
467 printk("wait_queue is bad (eip = %p)\n",
468 __builtin_return_address(0));
469 printk(" q = %p\n",q);
470 printk(" *q = %p\n",*q);
471 printk(" tmp = %p\n",tmp);
472 break;
473 }
474 tmp = tmp->next;
475 } while (tmp != *q);
476 }
477
478 void __down(struct semaphore * sem)
479 {
480 struct wait_queue wait = { current, NULL };
481 add_wait_queue(&sem->wait, &wait);
482 current->state = TASK_UNINTERRUPTIBLE;
483 while (sem->count <= 0) {
484 schedule();
485 current->state = TASK_UNINTERRUPTIBLE;
486 }
487 current->state = TASK_RUNNING;
488 remove_wait_queue(&sem->wait, &wait);
489 }
490
491 static inline void __sleep_on(struct wait_queue **p, int state)
492 {
493 unsigned long flags;
494 struct wait_queue wait = { current, NULL };
495
496 if (!p)
497 return;
498 if (current == task[0])
499 panic("task[0] trying to sleep");
500 current->state = state;
501 add_wait_queue(p, &wait);
502 save_flags(flags);
503 sti();
504 schedule();
505 remove_wait_queue(p, &wait);
506 restore_flags(flags);
507 }
508
509 void interruptible_sleep_on(struct wait_queue **p)
510 {
511 __sleep_on(p,TASK_INTERRUPTIBLE);
512 }
513
514 void sleep_on(struct wait_queue **p)
515 {
516 __sleep_on(p,TASK_UNINTERRUPTIBLE);
517 }
518
519
520
521
522
523 static struct timer_list timer_head = { &timer_head, &timer_head, ~0, 0, NULL };
524 #define SLOW_BUT_DEBUGGING_TIMERS 0
525
526 void add_timer(struct timer_list * timer)
527 {
528 unsigned long flags;
529 struct timer_list *p;
530
531 #if SLOW_BUT_DEBUGGING_TIMERS
532 if (timer->next || timer->prev) {
533 printk("add_timer() called with non-zero list from %p\n",
534 __builtin_return_address(0));
535 return;
536 }
537 #endif
538 p = &timer_head;
539 save_flags(flags);
540 cli();
541 do {
542 p = p->next;
543 } while (timer->expires > p->expires);
544 timer->next = p;
545 timer->prev = p->prev;
546 p->prev = timer;
547 timer->prev->next = timer;
548 restore_flags(flags);
549 }
550
551 int del_timer(struct timer_list * timer)
552 {
553 int ret = 0;
554 if (timer->next) {
555 unsigned long flags;
556 struct timer_list * next;
557 save_flags(flags);
558 cli();
559 if ((next = timer->next) != NULL) {
560 (next->prev = timer->prev)->next = next;
561 timer->next = timer->prev = NULL;
562 ret = 1;
563 }
564 restore_flags(flags);
565 }
566 return ret;
567 }
568
569 static inline void run_timer_list(void)
570 {
571 struct timer_list * timer;
572
573 while ((timer = timer_head.next) != &timer_head && timer->expires <= jiffies) {
574 void (*fn)(unsigned long) = timer->function;
575 unsigned long data = timer->data;
576 timer->next->prev = timer->prev;
577 timer->prev->next = timer->next;
578 timer->next = timer->prev = NULL;
579 sti();
580 fn(data);
581 cli();
582 }
583 }
584
585 static inline void run_old_timers(void)
586 {
587 struct timer_struct *tp;
588 unsigned long mask;
589
590 for (mask = 1, tp = timer_table+0 ; mask ; tp++,mask += mask) {
591 if (mask > timer_active)
592 break;
593 if (!(mask & timer_active))
594 continue;
595 if (tp->expires > jiffies)
596 continue;
597 timer_active &= ~mask;
598 tp->fn();
599 sti();
600 }
601 }
602
603 void tqueue_bh(void)
604 {
605 run_task_queue(&tq_timer);
606 }
607
608 void immediate_bh(void)
609 {
610 run_task_queue(&tq_immediate);
611 }
612
613 unsigned long timer_active = 0;
614 struct timer_struct timer_table[32];
615
616
617
618
619
620
621
622 unsigned long avenrun[3] = { 0,0,0 };
623
624
625
626
627 static unsigned long count_active_tasks(void)
628 {
629 struct task_struct **p;
630 unsigned long nr = 0;
631
632 for(p = &LAST_TASK; p > &FIRST_TASK; --p)
633 if (*p && ((*p)->state == TASK_RUNNING ||
634 (*p)->state == TASK_UNINTERRUPTIBLE ||
635 (*p)->state == TASK_SWAPPING))
636 nr += FIXED_1;
637 #ifdef __SMP__
638 nr-=(smp_num_cpus-1)*FIXED_1;
639 #endif
640 return nr;
641 }
642
643 static inline void calc_load(unsigned long ticks)
644 {
645 unsigned long active_tasks;
646 static int count = LOAD_FREQ;
647
648 count -= ticks;
649 if (count < 0) {
650 count += LOAD_FREQ;
651 active_tasks = count_active_tasks();
652 CALC_LOAD(avenrun[0], EXP_1, active_tasks);
653 CALC_LOAD(avenrun[1], EXP_5, active_tasks);
654 CALC_LOAD(avenrun[2], EXP_15, active_tasks);
655 }
656 }
657
658
659
660
661
662
663
664
665
666
667 static void second_overflow(void)
668 {
669 long ltemp;
670
671
672 time_maxerror += time_tolerance >> SHIFT_USEC;
673 if ( time_maxerror > MAXPHASE )
674 time_maxerror = MAXPHASE;
675
676
677
678
679
680
681
682
683
684
685 switch (time_state) {
686
687 case TIME_OK:
688 if (time_status & STA_INS)
689 time_state = TIME_INS;
690 else if (time_status & STA_DEL)
691 time_state = TIME_DEL;
692 break;
693
694 case TIME_INS:
695 if (xtime.tv_sec % 86400 == 0) {
696 xtime.tv_sec--;
697 time_state = TIME_OOP;
698 printk("Clock: inserting leap second 23:59:60 UTC\n");
699 }
700 break;
701
702 case TIME_DEL:
703 if ((xtime.tv_sec + 1) % 86400 == 0) {
704 xtime.tv_sec++;
705 time_state = TIME_WAIT;
706 printk("Clock: deleting leap second 23:59:59 UTC\n");
707 }
708 break;
709
710 case TIME_OOP:
711 time_state = TIME_WAIT;
712 break;
713
714 case TIME_WAIT:
715 if (!(time_status & (STA_INS | STA_DEL)))
716 time_state = TIME_OK;
717 }
718
719
720
721
722
723
724
725
726
727
728 if (time_offset < 0) {
729 ltemp = -time_offset;
730 if (!(time_status & STA_FLL))
731 ltemp >>= SHIFT_KG + time_constant;
732 if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE)
733 ltemp = (MAXPHASE / MINSEC) << SHIFT_UPDATE;
734 time_offset += ltemp;
735 time_adj = -ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
736 } else {
737 ltemp = time_offset;
738 if (!(time_status & STA_FLL))
739 ltemp >>= SHIFT_KG + time_constant;
740 if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE)
741 ltemp = (MAXPHASE / MINSEC) << SHIFT_UPDATE;
742 time_offset -= ltemp;
743 time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
744 }
745
746
747
748
749
750
751
752
753 pps_valid++;
754 if (pps_valid == PPS_VALID) {
755 pps_jitter = MAXTIME;
756 pps_stabil = MAXFREQ;
757 time_status &= ~(STA_PPSSIGNAL | STA_PPSJITTER |
758 STA_PPSWANDER | STA_PPSERROR);
759 }
760 ltemp = time_freq + pps_freq;
761 if (ltemp < 0)
762 time_adj -= -ltemp >>
763 (SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE);
764 else
765 time_adj += ltemp >>
766 (SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE);
767
768 #if HZ == 100
769
770 if (time_adj < 0)
771 time_adj -= -time_adj >> 2;
772 else
773 time_adj += time_adj >> 2;
774 #endif
775 }
776
777
778 static void update_wall_time_one_tick(void)
779 {
780
781
782
783
784 time_phase += time_adj;
785 if (time_phase <= -FINEUSEC) {
786 long ltemp = -time_phase >> SHIFT_SCALE;
787 time_phase += ltemp << SHIFT_SCALE;
788 xtime.tv_usec += tick + time_adjust_step - ltemp;
789 }
790 else if (time_phase >= FINEUSEC) {
791 long ltemp = time_phase >> SHIFT_SCALE;
792 time_phase -= ltemp << SHIFT_SCALE;
793 xtime.tv_usec += tick + time_adjust_step + ltemp;
794 } else
795 xtime.tv_usec += tick + time_adjust_step;
796
797 if (time_adjust) {
798
799
800
801
802
803
804
805
806
807 if (time_adjust > tickadj)
808 time_adjust_step = tickadj;
809 else if (time_adjust < -tickadj)
810 time_adjust_step = -tickadj;
811 else
812 time_adjust_step = time_adjust;
813
814
815 time_adjust -= time_adjust_step;
816 }
817 else
818 time_adjust_step = 0;
819 }
820
821
822
823
824
825
826
827
828 static void update_wall_time(unsigned long ticks)
829 {
830 do {
831 ticks--;
832 update_wall_time_one_tick();
833 } while (ticks);
834
835 if (xtime.tv_usec >= 1000000) {
836 xtime.tv_usec -= 1000000;
837 xtime.tv_sec++;
838 second_overflow();
839 }
840 }
841
842 static inline void do_process_times(struct task_struct *p,
843 unsigned long user, unsigned long system)
844 {
845 long psecs;
846
847 p->utime += user;
848 p->stime += system;
849
850 psecs = (p->stime + p->utime) / HZ;
851 if (psecs > p->rlim[RLIMIT_CPU].rlim_cur) {
852
853 if (psecs * HZ == p->stime + p->utime)
854 send_sig(SIGXCPU, p, 1);
855
856 if (psecs > p->rlim[RLIMIT_CPU].rlim_max)
857 send_sig(SIGKILL, p, 1);
858 }
859 }
860
861 static inline void do_it_virt(struct task_struct * p, unsigned long ticks)
862 {
863 unsigned long it_virt = p->it_virt_value;
864
865 if (it_virt) {
866 if (it_virt <= ticks) {
867 it_virt = ticks + p->it_virt_incr;
868 send_sig(SIGVTALRM, p, 1);
869 }
870 p->it_virt_value = it_virt - ticks;
871 }
872 }
873
874 static inline void do_it_prof(struct task_struct * p, unsigned long ticks)
875 {
876 unsigned long it_prof = p->it_prof_value;
877
878 if (it_prof) {
879 if (it_prof <= ticks) {
880 it_prof = ticks + p->it_prof_incr;
881 send_sig(SIGPROF, p, 1);
882 }
883 p->it_prof_value = it_prof - ticks;
884 }
885 }
886
887 static __inline__ void update_one_process(struct task_struct *p,
888 unsigned long ticks, unsigned long user, unsigned long system)
889 {
890 do_process_times(p, user, system);
891 do_it_virt(p, user);
892 do_it_prof(p, ticks);
893 }
894
895 static void update_process_times(unsigned long ticks, unsigned long system)
896 {
897 #ifndef __SMP__
898 struct task_struct * p = current;
899 unsigned long user = ticks - system;
900 if (p->pid) {
901 p->counter -= ticks;
902 if (p->counter < 0) {
903 p->counter = 0;
904 need_resched = 1;
905 }
906 if (p->priority < DEF_PRIORITY)
907 kstat.cpu_nice += user;
908 else
909 kstat.cpu_user += user;
910 kstat.cpu_system += system;
911 }
912 update_one_process(p, ticks, user, system);
913 #else
914 int cpu,j;
915 cpu = smp_processor_id();
916 for (j=0;j<smp_num_cpus;j++)
917 {
918 int i = cpu_logical_map[j];
919 struct task_struct *p;
920
921 #ifdef __SMP_PROF__
922 if (test_bit(i,&smp_idle_map))
923 smp_idle_count[i]++;
924 #endif
925 p = current_set[i];
926
927
928
929 if (p->pid) {
930
931 unsigned long utime = ticks;
932 unsigned long stime = 0;
933 if (cpu == i) {
934 utime = ticks-system;
935 stime = system;
936 } else if (smp_proc_in_lock[i]) {
937 utime = 0;
938 stime = ticks;
939 }
940 update_one_process(p, ticks, utime, stime);
941
942 p->counter -= ticks;
943 if (p->counter >= 0)
944 continue;
945 p->counter = 0;
946 } else {
947
948
949
950
951 if (!(0x7fffffff & smp_process_available))
952 continue;
953 }
954
955 if (i==cpu)
956 need_resched = 1;
957 else
958 smp_message_pass(i, MSG_RESCHEDULE, 0L, 0);
959 }
960 #endif
961 }
962
963 static unsigned long lost_ticks = 0;
964 static unsigned long lost_ticks_system = 0;
965
966 static void timer_bh(void)
967 {
968 unsigned long ticks, system;
969
970 run_old_timers();
971
972 cli();
973 run_timer_list();
974 ticks = lost_ticks;
975 lost_ticks = 0;
976 system = lost_ticks_system;
977 lost_ticks_system = 0;
978 sti();
979
980 if (ticks) {
981 calc_load(ticks);
982 update_wall_time(ticks);
983 update_process_times(ticks, system);
984 }
985 }
986
987
988
989
990
991
992 #if HZ > 100
993 #define should_run_timers(x) ((x) >= HZ/100)
994 #else
995 #define should_run_timers(x) (1)
996 #endif
997
998 void do_timer(struct pt_regs * regs)
999 {
1000 (*(unsigned long *)&jiffies)++;
1001 lost_ticks++;
1002 if (should_run_timers(lost_ticks))
1003 mark_bh(TIMER_BH);
1004 if (!user_mode(regs)) {
1005 lost_ticks_system++;
1006 if (prof_buffer && current->pid) {
1007 extern int _stext;
1008 unsigned long ip = instruction_pointer(regs);
1009 ip -= (unsigned long) &_stext;
1010 ip >>= prof_shift;
1011 if (ip < prof_len)
1012 prof_buffer[ip]++;
1013 }
1014 }
1015 if (tq_timer)
1016 mark_bh(TQUEUE_BH);
1017 }
1018
1019 #ifndef __alpha__
1020
1021
1022
1023
1024
1025 asmlinkage unsigned int sys_alarm(unsigned int seconds)
1026 {
1027 struct itimerval it_new, it_old;
1028 unsigned int oldalarm;
1029
1030 it_new.it_interval.tv_sec = it_new.it_interval.tv_usec = 0;
1031 it_new.it_value.tv_sec = seconds;
1032 it_new.it_value.tv_usec = 0;
1033 _setitimer(ITIMER_REAL, &it_new, &it_old);
1034 oldalarm = it_old.it_value.tv_sec;
1035
1036
1037 if (it_old.it_value.tv_usec)
1038 oldalarm++;
1039 return oldalarm;
1040 }
1041
1042
1043
1044
1045
1046 asmlinkage int sys_getpid(void)
1047 {
1048 return current->pid;
1049 }
1050
1051 asmlinkage int sys_getppid(void)
1052 {
1053 return current->p_opptr->pid;
1054 }
1055
1056 asmlinkage int sys_getuid(void)
1057 {
1058 return current->uid;
1059 }
1060
1061 asmlinkage int sys_geteuid(void)
1062 {
1063 return current->euid;
1064 }
1065
1066 asmlinkage int sys_getgid(void)
1067 {
1068 return current->gid;
1069 }
1070
1071 asmlinkage int sys_getegid(void)
1072 {
1073 return current->egid;
1074 }
1075
1076
1077
1078
1079
1080
1081 asmlinkage int sys_nice(int increment)
1082 {
1083 unsigned long newprio;
1084 int increase = 0;
1085
1086 newprio = increment;
1087 if (increment < 0) {
1088 if (!suser())
1089 return -EPERM;
1090 newprio = -increment;
1091 increase = 1;
1092 }
1093 if (newprio > 40)
1094 newprio = 40;
1095
1096
1097
1098
1099
1100
1101
1102 newprio = (newprio * DEF_PRIORITY + 10) / 20;
1103 increment = newprio;
1104 if (increase)
1105 increment = -increment;
1106 newprio = current->priority - increment;
1107 if (newprio < 1)
1108 newprio = 1;
1109 if (newprio > DEF_PRIORITY*2)
1110 newprio = DEF_PRIORITY*2;
1111 current->priority = newprio;
1112 return 0;
1113 }
1114
1115 #endif
1116
1117 static struct task_struct *find_process_by_pid(pid_t pid) {
1118 struct task_struct *p, *q;
1119
1120 if (pid == 0)
1121 p = current;
1122 else {
1123 p = 0;
1124 for_each_task(q) {
1125 if (q && q->pid == pid) {
1126 p = q;
1127 break;
1128 }
1129 }
1130 }
1131 return p;
1132 }
1133
1134 static int setscheduler(pid_t pid, int policy,
1135 struct sched_param *param)
1136 {
1137 int error;
1138 struct sched_param lp;
1139 struct task_struct *p;
1140
1141 if (!param || pid < 0)
1142 return -EINVAL;
1143
1144 error = verify_area(VERIFY_READ, param, sizeof(struct sched_param));
1145 if (error)
1146 return error;
1147 memcpy_fromfs(&lp, param, sizeof(struct sched_param));
1148
1149 p = find_process_by_pid(pid);
1150 if (!p)
1151 return -ESRCH;
1152
1153 if (policy < 0)
1154 policy = p->policy;
1155 else if (policy != SCHED_FIFO && policy != SCHED_RR &&
1156 policy != SCHED_OTHER)
1157 return -EINVAL;
1158
1159
1160
1161
1162
1163 if (lp.sched_priority < 0 || lp.sched_priority > 99)
1164 return -EINVAL;
1165 if ((policy == SCHED_OTHER) != (lp.sched_priority == 0))
1166 return -EINVAL;
1167
1168 if ((policy == SCHED_FIFO || policy == SCHED_RR) && !suser())
1169 return -EPERM;
1170 if ((current->euid != p->euid) && (current->euid != p->uid) &&
1171 !suser())
1172 return -EPERM;
1173
1174 p->policy = policy;
1175 p->rt_priority = lp.sched_priority;
1176 if (p->next_run)
1177 move_last_runqueue(p);
1178 schedule();
1179
1180 return 0;
1181 }
1182
1183 asmlinkage int sys_sched_setscheduler(pid_t pid, int policy,
1184 struct sched_param *param)
1185 {
1186 return setscheduler(pid, policy, param);
1187 }
1188
1189 asmlinkage int sys_sched_setparam(pid_t pid, struct sched_param *param)
1190 {
1191 return setscheduler(pid, -1, param);
1192 }
1193
1194 asmlinkage int sys_sched_getscheduler(pid_t pid)
1195 {
1196 struct task_struct *p;
1197
1198 if (pid < 0)
1199 return -EINVAL;
1200
1201 p = find_process_by_pid(pid);
1202 if (!p)
1203 return -ESRCH;
1204
1205 return p->policy;
1206 }
1207
1208 asmlinkage int sys_sched_getparam(pid_t pid, struct sched_param *param)
1209 {
1210 int error;
1211 struct task_struct *p;
1212 struct sched_param lp;
1213
1214 if (!param || pid < 0)
1215 return -EINVAL;
1216
1217 error = verify_area(VERIFY_WRITE, param, sizeof(struct sched_param));
1218 if (error)
1219 return error;
1220
1221 p = find_process_by_pid(pid);
1222 if (!p)
1223 return -ESRCH;
1224
1225 lp.sched_priority = p->rt_priority;
1226 memcpy_tofs(param, &lp, sizeof(struct sched_param));
1227
1228 return 0;
1229 }
1230
1231 asmlinkage int sys_sched_yield(void)
1232 {
1233 move_last_runqueue(current);
1234
1235 return 0;
1236 }
1237
1238 asmlinkage int sys_sched_get_priority_max(int policy)
1239 {
1240 switch (policy) {
1241 case SCHED_FIFO:
1242 case SCHED_RR:
1243 return 99;
1244 case SCHED_OTHER:
1245 return 0;
1246 }
1247
1248 return -EINVAL;
1249 }
1250
1251 asmlinkage int sys_sched_get_priority_min(int policy)
1252 {
1253 switch (policy) {
1254 case SCHED_FIFO:
1255 case SCHED_RR:
1256 return 1;
1257 case SCHED_OTHER:
1258 return 0;
1259 }
1260
1261 return -EINVAL;
1262 }
1263
1264 asmlinkage int sys_sched_rr_get_interval(pid_t pid, struct timespec *interval)
1265 {
1266 int error;
1267 struct timespec t;
1268
1269 error = verify_area(VERIFY_WRITE, interval, sizeof(struct timespec));
1270 if (error)
1271 return error;
1272
1273 t.tv_sec = 0;
1274 t.tv_nsec = 0;
1275 return -ENOSYS;
1276 memcpy_tofs(interval, &t, sizeof(struct timespec));
1277
1278 return 0;
1279 }
1280
1281
1282
1283
1284
1285 static unsigned long timespectojiffies(struct timespec *value)
1286 {
1287 unsigned long sec = (unsigned) value->tv_sec;
1288 long nsec = value->tv_nsec;
1289
1290 if (sec > (LONG_MAX / HZ))
1291 return LONG_MAX;
1292 nsec += 1000000000L / HZ - 1;
1293 nsec /= 1000000000L / HZ;
1294 return HZ * sec + nsec;
1295 }
1296
1297 static void jiffiestotimespec(unsigned long jiffies, struct timespec *value)
1298 {
1299 value->tv_nsec = (jiffies % HZ) * (1000000000L / HZ);
1300 value->tv_sec = jiffies / HZ;
1301 return;
1302 }
1303
1304 asmlinkage int sys_nanosleep(struct timespec *rqtp, struct timespec *rmtp)
1305 {
1306 int error;
1307 struct timespec t;
1308 unsigned long expire;
1309
1310 error = verify_area(VERIFY_READ, rqtp, sizeof(struct timespec));
1311 if (error)
1312 return error;
1313 memcpy_fromfs(&t, rqtp, sizeof(struct timespec));
1314 if (rmtp) {
1315 error = verify_area(VERIFY_WRITE, rmtp,
1316 sizeof(struct timespec));
1317 if (error)
1318 return error;
1319 }
1320
1321 if (t.tv_nsec >= 1000000000L || t.tv_nsec < 0 || t.tv_sec < 0)
1322 return -EINVAL;
1323
1324 if (t.tv_sec == 0 && t.tv_nsec <= 2000000L &&
1325 current->policy != SCHED_OTHER) {
1326
1327
1328
1329
1330 udelay((t.tv_nsec + 999) / 1000);
1331 return 0;
1332 }
1333
1334 expire = timespectojiffies(&t) + (t.tv_sec || t.tv_nsec) + jiffies;
1335 current->timeout = expire;
1336 current->state = TASK_INTERRUPTIBLE;
1337 schedule();
1338
1339 if (expire > jiffies) {
1340 if (rmtp) {
1341 jiffiestotimespec(expire - jiffies -
1342 (expire > jiffies + 1), &t);
1343 memcpy_tofs(rmtp, &t, sizeof(struct timespec));
1344 }
1345 return -EINTR;
1346 }
1347
1348 return 0;
1349 }
1350
1351 static void show_task(int nr,struct task_struct * p)
1352 {
1353 unsigned long free;
1354 static const char * stat_nam[] = { "R", "S", "D", "Z", "T", "W" };
1355
1356 printk("%-8s %3d ", p->comm, (p == current) ? -nr : nr);
1357 if (((unsigned) p->state) < sizeof(stat_nam)/sizeof(char *))
1358 printk(stat_nam[p->state]);
1359 else
1360 printk(" ");
1361 #if ((~0UL) == 0xffffffff)
1362 if (p == current)
1363 printk(" current ");
1364 else
1365 printk(" %08lX ", thread_saved_pc(&p->tss));
1366 #else
1367 if (p == current)
1368 printk(" current task ");
1369 else
1370 printk(" %016lx ", thread_saved_pc(&p->tss));
1371 #endif
1372 for (free = 1; free < PAGE_SIZE/sizeof(long) ; free++) {
1373 if (((unsigned long *)p->kernel_stack_page)[free])
1374 break;
1375 }
1376 printk("%5lu %5d %6d ", free*sizeof(long), p->pid, p->p_pptr->pid);
1377 if (p->p_cptr)
1378 printk("%5d ", p->p_cptr->pid);
1379 else
1380 printk(" ");
1381 if (p->p_ysptr)
1382 printk("%7d", p->p_ysptr->pid);
1383 else
1384 printk(" ");
1385 if (p->p_osptr)
1386 printk(" %5d\n", p->p_osptr->pid);
1387 else
1388 printk("\n");
1389 }
1390
1391 void show_state(void)
1392 {
1393 int i;
1394
1395 #if ((~0UL) == 0xffffffff)
1396 printk("\n"
1397 " free sibling\n");
1398 printk(" task PC stack pid father child younger older\n");
1399 #else
1400 printk("\n"
1401 " free sibling\n");
1402 printk(" task PC stack pid father child younger older\n");
1403 #endif
1404 for (i=0 ; i<NR_TASKS ; i++)
1405 if (task[i])
1406 show_task(i,task[i]);
1407 }
1408
1409 void sched_init(void)
1410 {
1411
1412
1413
1414
1415 int cpu=smp_processor_id();
1416 #ifndef __SMP__
1417 current_set[cpu]=&init_task;
1418 #else
1419 init_task.processor=cpu;
1420 for(cpu = 0; cpu < NR_CPUS; cpu++)
1421 current_set[cpu] = &init_task;
1422 #endif
1423 init_bh(TIMER_BH, timer_bh);
1424 init_bh(TQUEUE_BH, tqueue_bh);
1425 init_bh(IMMEDIATE_BH, immediate_bh);
1426 }