This source file includes following definitions.
- add_to_runqueue
- del_from_runqueue
- move_last_runqueue
- wake_up_process
- process_timeout
- goodness
- schedule
- sys_pause
- wake_up
- wake_up_interruptible
- __down
- __sleep_on
- interruptible_sleep_on
- sleep_on
- add_timer
- del_timer
- count_active_tasks
- calc_load
- second_overflow
- timer_bh
- tqueue_bh
- immediate_bh
- do_timer
- sys_alarm
- sys_getpid
- sys_getppid
- sys_getuid
- sys_geteuid
- sys_getgid
- sys_getegid
- sys_nice
- find_process_by_pid
- setscheduler
- sys_sched_setscheduler
- sys_sched_setparam
- sys_sched_getscheduler
- sys_sched_getparam
- sys_sched_yield
- sys_sched_get_priority_max
- sys_sched_get_priority_min
- sys_sched_rr_get_interval
- timespectojiffies
- jiffiestotimespec
- sys_nanosleep
- show_task
- show_state
- sched_init
1
2
3
4
5
6
7
8
9
10
11
12
13
14 #include <linux/signal.h>
15 #include <linux/sched.h>
16 #include <linux/timer.h>
17 #include <linux/kernel.h>
18 #include <linux/kernel_stat.h>
19 #include <linux/fdreg.h>
20 #include <linux/errno.h>
21 #include <linux/time.h>
22 #include <linux/ptrace.h>
23 #include <linux/delay.h>
24 #include <linux/interrupt.h>
25 #include <linux/tqueue.h>
26 #include <linux/resource.h>
27 #include <linux/mm.h>
28 #include <linux/smp.h>
29
30 #include <asm/system.h>
31 #include <asm/io.h>
32 #include <asm/segment.h>
33 #include <asm/pgtable.h>
34 #include <asm/mmu_context.h>
35
36 #include <linux/timex.h>
37
38
39
40
41
42 int securelevel = 0;
43
44 long tick = 1000000 / HZ;
45 volatile struct timeval xtime;
46 int tickadj = 500/HZ;
47
48 DECLARE_TASK_QUEUE(tq_timer);
49 DECLARE_TASK_QUEUE(tq_immediate);
50 DECLARE_TASK_QUEUE(tq_scheduler);
51
52
53
54
55 int time_state = TIME_BAD;
56 int time_status = STA_UNSYNC | STA_PLL;
57 long time_offset = 0;
58 long time_constant = 2;
59 long time_tolerance = MAXFREQ;
60 long time_precision = 1;
61 long time_maxerror = 0x70000000;
62 long time_esterror = 0x70000000;
63 long time_phase = 0;
64 long time_freq = 0;
65 long time_adj = 0;
66 long time_reftime = 0;
67
68 long time_adjust = 0;
69 long time_adjust_step = 0;
70
71 int need_resched = 0;
72 unsigned long event = 0;
73
74 extern int _setitimer(int, struct itimerval *, struct itimerval *);
75 unsigned int * prof_buffer = NULL;
76 unsigned long prof_len = 0;
77 unsigned long prof_shift = 0;
78
79 #define _S(nr) (1<<((nr)-1))
80
81 extern void mem_use(void);
82
83 static unsigned long init_kernel_stack[1024] = { STACK_MAGIC, };
84 unsigned long init_user_stack[1024] = { STACK_MAGIC, };
85 static struct vm_area_struct init_mmap = INIT_MMAP;
86 static struct fs_struct init_fs = INIT_FS;
87 static struct files_struct init_files = INIT_FILES;
88 static struct signal_struct init_signals = INIT_SIGNALS;
89
90 struct mm_struct init_mm = INIT_MM;
91 struct task_struct init_task = INIT_TASK;
92
93 unsigned long volatile jiffies=0;
94
95 struct task_struct *current_set[NR_CPUS];
96 struct task_struct *last_task_used_math = NULL;
97
98 struct task_struct * task[NR_TASKS] = {&init_task, };
99
100 struct kernel_stat kstat = { 0 };
101
102 static inline void add_to_runqueue(struct task_struct * p)
103 {
104 #if 1
105 if (p->next_run || p->prev_run) {
106 printk("task already on run-queue\n");
107 return;
108 }
109 #endif
110 if (p->counter > current->counter + 3)
111 need_resched = 1;
112 nr_running++;
113 (p->prev_run = init_task.prev_run)->next_run = p;
114 p->next_run = &init_task;
115 init_task.prev_run = p;
116 #ifdef __SMP__
117
118 while(set_bit(31,&smp_process_available))
119 while(test_bit(31,&smp_process_available));
120 smp_process_available++;
121 clear_bit(31,&smp_process_available);
122 if ((0!=p->pid) && smp_threads_ready){
123 int i, found=0;
124 for (i=0;i<smp_num_cpus;i++)
125 if (0==current_set[i]->pid) {
126 smp_message_pass(i, MSG_RESCHEDULE, 0L, 0);
127 break;
128 }
129 }
130 #endif
131 }
132
133 static inline void del_from_runqueue(struct task_struct * p)
134 {
135 struct task_struct *next = p->next_run;
136 struct task_struct *prev = p->prev_run;
137
138 #if 1
139 if (!next || !prev) {
140 printk("task not on run-queue\n");
141 return;
142 }
143 #endif
144 if (p == &init_task) {
145 static int nr = 0;
146 if (nr < 5) {
147 nr++;
148 printk("idle task may not sleep\n");
149 }
150 return;
151 }
152 nr_running--;
153 next->prev_run = prev;
154 prev->next_run = next;
155 p->next_run = NULL;
156 p->prev_run = NULL;
157 }
158
159 static inline void move_last_runqueue(struct task_struct * p)
160 {
161 struct task_struct *next = p->next_run;
162 struct task_struct *prev = p->prev_run;
163
164 next->prev_run = prev;
165 prev->next_run = next;
166 (p->prev_run = init_task.prev_run)->next_run = p;
167 p->next_run = &init_task;
168 init_task.prev_run = p;
169 }
170
171
172
173
174
175
176
177
178
179 inline void wake_up_process(struct task_struct * p)
180 {
181 unsigned long flags;
182
183 save_flags(flags);
184 cli();
185 p->state = TASK_RUNNING;
186 if (!p->next_run)
187 add_to_runqueue(p);
188 restore_flags(flags);
189 }
190
191 static void process_timeout(unsigned long __data)
192 {
193 struct task_struct * p = (struct task_struct *) __data;
194
195 p->timeout = 0;
196 wake_up_process(p);
197 }
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212 static inline int goodness(struct task_struct * p, struct task_struct * prev, int this_cpu)
213 {
214 int weight;
215
216 #ifdef __SMP__
217
218 if (p->processor != NO_PROC_ID)
219 return -1000;
220 #endif
221
222
223
224
225
226
227 if (p->policy != SCHED_OTHER)
228 return 1000 + p->rt_priority;
229
230
231
232
233
234
235
236
237 weight = p->counter;
238 if (weight) {
239
240 #ifdef __SMP__
241
242
243 if (p->last_processor == this_cpu)
244 weight += PROC_CHANGE_PENALTY;
245 #endif
246
247
248 if (p == prev)
249 weight += 1;
250 }
251
252 return weight;
253 }
254
255
256
257
258
259
260
261
262
263
264
265 asmlinkage void schedule(void)
266 {
267 int c;
268 struct task_struct * p;
269 struct task_struct * prev, * next;
270 unsigned long timeout = 0;
271 int this_cpu=smp_processor_id();
272
273
274
275 if (intr_count) {
276 printk("Aiee: scheduling in interrupt\n");
277 return;
278 }
279 if (bh_active & bh_mask) {
280 intr_count = 1;
281 do_bottom_half();
282 intr_count = 0;
283 }
284 run_task_queue(&tq_scheduler);
285
286 need_resched = 0;
287 prev = current;
288 cli();
289
290 if (!prev->counter && prev->policy == SCHED_RR) {
291 prev->counter = prev->priority;
292 move_last_runqueue(prev);
293 }
294 switch (prev->state) {
295 case TASK_INTERRUPTIBLE:
296 if (prev->signal & ~prev->blocked)
297 goto makerunnable;
298 timeout = prev->timeout;
299 if (timeout && (timeout <= jiffies)) {
300 prev->timeout = 0;
301 timeout = 0;
302 makerunnable:
303 prev->state = TASK_RUNNING;
304 break;
305 }
306 default:
307 del_from_runqueue(prev);
308 case TASK_RUNNING:
309 }
310 p = init_task.next_run;
311 sti();
312
313 #ifdef __SMP__
314
315
316
317 prev->processor = NO_PROC_ID;
318 #endif
319
320
321
322
323
324
325
326 c = -1000;
327 next = &init_task;
328 while (p != &init_task) {
329 int weight = goodness(p, prev, this_cpu);
330 if (weight > c)
331 c = weight, next = p;
332 p = p->next_run;
333 }
334
335
336 if (!c) {
337 for_each_task(p)
338 p->counter = (p->counter >> 1) + p->priority;
339 }
340 #ifdef __SMP__
341
342
343
344
345 if(!prev->pid && !next->pid)
346 next=prev;
347
348
349
350
351 next->processor = this_cpu;
352 next->last_processor = this_cpu;
353
354 #endif
355 #ifdef __SMP_PROF__
356
357 if (0==next->pid)
358 set_bit(this_cpu,&smp_idle_map);
359 else
360 clear_bit(this_cpu,&smp_idle_map);
361 #endif
362 if (prev != next) {
363 struct timer_list timer;
364
365 kstat.context_swtch++;
366 if (timeout) {
367 init_timer(&timer);
368 timer.expires = timeout;
369 timer.data = (unsigned long) prev;
370 timer.function = process_timeout;
371 add_timer(&timer);
372 }
373 get_mmu_context(next);
374 switch_to(prev,next);
375 if (timeout)
376 del_timer(&timer);
377 }
378 }
379
380 #ifndef __alpha__
381
382
383
384
385
386 asmlinkage int sys_pause(void)
387 {
388 current->state = TASK_INTERRUPTIBLE;
389 schedule();
390 return -ERESTARTNOHAND;
391 }
392
393 #endif
394
395
396
397
398
399
400
401
402
403 void wake_up(struct wait_queue **q)
404 {
405 struct wait_queue *tmp;
406 struct task_struct * p;
407
408 if (!q || !(tmp = *q))
409 return;
410 do {
411 if ((p = tmp->task) != NULL) {
412 if ((p->state == TASK_UNINTERRUPTIBLE) ||
413 (p->state == TASK_INTERRUPTIBLE))
414 wake_up_process(p);
415 }
416 if (!tmp->next) {
417 printk("wait_queue is bad (eip = %p)\n",
418 __builtin_return_address(0));
419 printk(" q = %p\n",q);
420 printk(" *q = %p\n",*q);
421 printk(" tmp = %p\n",tmp);
422 break;
423 }
424 tmp = tmp->next;
425 } while (tmp != *q);
426 }
427
428 void wake_up_interruptible(struct wait_queue **q)
429 {
430 struct wait_queue *tmp;
431 struct task_struct * p;
432
433 if (!q || !(tmp = *q))
434 return;
435 do {
436 if ((p = tmp->task) != NULL) {
437 if (p->state == TASK_INTERRUPTIBLE)
438 wake_up_process(p);
439 }
440 if (!tmp->next) {
441 printk("wait_queue is bad (eip = %p)\n",
442 __builtin_return_address(0));
443 printk(" q = %p\n",q);
444 printk(" *q = %p\n",*q);
445 printk(" tmp = %p\n",tmp);
446 break;
447 }
448 tmp = tmp->next;
449 } while (tmp != *q);
450 }
451
452 void __down(struct semaphore * sem)
453 {
454 struct wait_queue wait = { current, NULL };
455 add_wait_queue(&sem->wait, &wait);
456 current->state = TASK_UNINTERRUPTIBLE;
457 while (sem->count <= 0) {
458 schedule();
459 current->state = TASK_UNINTERRUPTIBLE;
460 }
461 current->state = TASK_RUNNING;
462 remove_wait_queue(&sem->wait, &wait);
463 }
464
465 static inline void __sleep_on(struct wait_queue **p, int state)
466 {
467 unsigned long flags;
468 struct wait_queue wait = { current, NULL };
469
470 if (!p)
471 return;
472 if (current == task[0])
473 panic("task[0] trying to sleep");
474 current->state = state;
475 add_wait_queue(p, &wait);
476 save_flags(flags);
477 sti();
478 schedule();
479 remove_wait_queue(p, &wait);
480 restore_flags(flags);
481 }
482
483 void interruptible_sleep_on(struct wait_queue **p)
484 {
485 __sleep_on(p,TASK_INTERRUPTIBLE);
486 }
487
488 void sleep_on(struct wait_queue **p)
489 {
490 __sleep_on(p,TASK_UNINTERRUPTIBLE);
491 }
492
493
494
495
496
497 static struct timer_list timer_head = { &timer_head, &timer_head, ~0, 0, NULL };
498 #define SLOW_BUT_DEBUGGING_TIMERS 0
499
500 void add_timer(struct timer_list * timer)
501 {
502 unsigned long flags;
503 struct timer_list *p;
504
505 #if SLOW_BUT_DEBUGGING_TIMERS
506 if (timer->next || timer->prev) {
507 printk("add_timer() called with non-zero list from %p\n",
508 __builtin_return_address(0));
509 return;
510 }
511 #endif
512 p = &timer_head;
513 save_flags(flags);
514 cli();
515 do {
516 p = p->next;
517 } while (timer->expires > p->expires);
518 timer->next = p;
519 timer->prev = p->prev;
520 p->prev = timer;
521 timer->prev->next = timer;
522 restore_flags(flags);
523 }
524
525 int del_timer(struct timer_list * timer)
526 {
527 unsigned long flags;
528 #if SLOW_BUT_DEBUGGING_TIMERS
529 struct timer_list * p;
530
531 p = &timer_head;
532 save_flags(flags);
533 cli();
534 while ((p = p->next) != &timer_head) {
535 if (p == timer) {
536 timer->next->prev = timer->prev;
537 timer->prev->next = timer->next;
538 timer->next = timer->prev = NULL;
539 restore_flags(flags);
540 return 1;
541 }
542 }
543 if (timer->next || timer->prev)
544 printk("del_timer() called from %p with timer not initialized\n",
545 __builtin_return_address(0));
546 restore_flags(flags);
547 return 0;
548 #else
549 struct timer_list * next;
550 int ret = 0;
551 save_flags(flags);
552 cli();
553 if ((next = timer->next) != NULL) {
554 (next->prev = timer->prev)->next = next;
555 timer->next = timer->prev = NULL;
556 ret = 1;
557 }
558 restore_flags(flags);
559 return ret;
560 #endif
561 }
562
563 unsigned long timer_active = 0;
564 struct timer_struct timer_table[32];
565
566
567
568
569
570
571
572 unsigned long avenrun[3] = { 0,0,0 };
573
574
575
576
577 static unsigned long count_active_tasks(void)
578 {
579 struct task_struct **p;
580 unsigned long nr = 0;
581
582 for(p = &LAST_TASK; p > &FIRST_TASK; --p)
583 if (*p && ((*p)->state == TASK_RUNNING ||
584 (*p)->state == TASK_UNINTERRUPTIBLE ||
585 (*p)->state == TASK_SWAPPING))
586 nr += FIXED_1;
587 #ifdef __SMP__
588 nr-=(smp_num_cpus-1)*FIXED_1;
589 #endif
590 return nr;
591 }
592
593 static inline void calc_load(void)
594 {
595 unsigned long active_tasks;
596 static int count = LOAD_FREQ;
597
598 if (count-- > 0)
599 return;
600 count = LOAD_FREQ;
601 active_tasks = count_active_tasks();
602 CALC_LOAD(avenrun[0], EXP_1, active_tasks);
603 CALC_LOAD(avenrun[1], EXP_5, active_tasks);
604 CALC_LOAD(avenrun[2], EXP_15, active_tasks);
605 }
606
607
608
609
610
611
612
613
614
615
616 static void second_overflow(void)
617 {
618 long ltemp;
619
620
621 time_maxerror = (0x70000000-time_maxerror <
622 time_tolerance >> SHIFT_USEC) ?
623 0x70000000 : (time_maxerror + (time_tolerance >> SHIFT_USEC));
624
625
626
627
628
629
630
631
632
633
634 switch (time_state) {
635
636 case TIME_OK:
637 if (time_status & STA_INS)
638 time_state = TIME_INS;
639 else if (time_status & STA_DEL)
640 time_state = TIME_DEL;
641 break;
642
643 case TIME_INS:
644 if (xtime.tv_sec % 86400 == 0) {
645 xtime.tv_sec--;
646 time_state = TIME_OOP;
647 printk("Clock: inserting leap second 23:59:60 UTC\n");
648 }
649 break;
650
651 case TIME_DEL:
652 if ((xtime.tv_sec + 1) % 86400 == 0) {
653 xtime.tv_sec++;
654 time_state = TIME_WAIT;
655 printk("Clock: deleting leap second 23:59:59 UTC\n");
656 }
657 break;
658
659 case TIME_OOP:
660
661 time_state = TIME_WAIT;
662 break;
663
664 case TIME_WAIT:
665 if (!(time_status & (STA_INS | STA_DEL)))
666 time_state = TIME_OK;
667 }
668
669
670
671
672
673
674
675
676
677
678 if (time_offset < 0) {
679 ltemp = -time_offset;
680 if (!(time_status & STA_FLL))
681 ltemp >>= SHIFT_KG + time_constant;
682 if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE)
683 ltemp = (MAXPHASE / MINSEC) <<
684 SHIFT_UPDATE;
685 time_offset += ltemp;
686 time_adj = -ltemp << (SHIFT_SCALE - SHIFT_HZ -
687 SHIFT_UPDATE);
688 } else {
689 ltemp = time_offset;
690 if (!(time_status & STA_FLL))
691 ltemp >>= SHIFT_KG + time_constant;
692 if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE)
693 ltemp = (MAXPHASE / MINSEC) <<
694 SHIFT_UPDATE;
695 time_offset -= ltemp;
696 time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ -
697 SHIFT_UPDATE);
698 }
699
700
701
702
703
704
705
706
707 pps_valid++;
708 if (pps_valid == PPS_VALID) {
709 pps_jitter = MAXTIME;
710 pps_stabil = MAXFREQ;
711 time_status &= ~(STA_PPSSIGNAL | STA_PPSJITTER |
712 STA_PPSWANDER | STA_PPSERROR);
713 }
714 ltemp = time_freq + pps_freq;
715 if (ltemp < 0)
716 time_adj -= -ltemp >>
717 (SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE);
718 else
719 time_adj += ltemp >>
720 (SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE);
721
722 #if HZ == 100
723
724 if (time_adj < 0)
725 time_adj -= -time_adj >> 2;
726 else
727 time_adj += time_adj >> 2;
728 #endif
729 }
730
731
732
733
734 static void timer_bh(void)
735 {
736 unsigned long mask;
737 struct timer_struct *tp;
738 struct timer_list * timer;
739
740 cli();
741 while ((timer = timer_head.next) != &timer_head && timer->expires <= jiffies) {
742 void (*fn)(unsigned long) = timer->function;
743 unsigned long data = timer->data;
744 timer->next->prev = timer->prev;
745 timer->prev->next = timer->next;
746 timer->next = timer->prev = NULL;
747 sti();
748 fn(data);
749 cli();
750 }
751 sti();
752
753 for (mask = 1, tp = timer_table+0 ; mask ; tp++,mask += mask) {
754 if (mask > timer_active)
755 break;
756 if (!(mask & timer_active))
757 continue;
758 if (tp->expires > jiffies)
759 continue;
760 timer_active &= ~mask;
761 tp->fn();
762 sti();
763 }
764 }
765
766 void tqueue_bh(void)
767 {
768 run_task_queue(&tq_timer);
769 }
770
771 void immediate_bh(void)
772 {
773 run_task_queue(&tq_immediate);
774 }
775
776 void do_timer(struct pt_regs * regs)
777 {
778 unsigned long mask;
779 struct timer_struct *tp;
780 long ltemp, psecs;
781 #ifdef __SMP__
782 int cpu,i;
783 #endif
784
785
786
787
788 time_phase += time_adj;
789 if (time_phase <= -FINEUSEC) {
790 ltemp = -time_phase >> SHIFT_SCALE;
791 time_phase += ltemp << SHIFT_SCALE;
792 xtime.tv_usec += tick + time_adjust_step - ltemp;
793 }
794 else if (time_phase >= FINEUSEC) {
795 ltemp = time_phase >> SHIFT_SCALE;
796 time_phase -= ltemp << SHIFT_SCALE;
797 xtime.tv_usec += tick + time_adjust_step + ltemp;
798 } else
799 xtime.tv_usec += tick + time_adjust_step;
800
801 if (time_adjust) {
802
803
804
805
806
807
808
809
810
811 if (time_adjust > tickadj)
812 time_adjust_step = tickadj;
813 else if (time_adjust < -tickadj)
814 time_adjust_step = -tickadj;
815 else
816 time_adjust_step = time_adjust;
817
818
819 time_adjust -= time_adjust_step;
820 }
821 else
822 time_adjust_step = 0;
823
824 if (xtime.tv_usec >= 1000000) {
825 xtime.tv_usec -= 1000000;
826 xtime.tv_sec++;
827 second_overflow();
828 }
829
830 jiffies++;
831 calc_load();
832 #ifndef __SMP__
833 if (user_mode(regs)) {
834 current->utime++;
835 if (current->pid) {
836 if (current->priority < DEF_PRIORITY)
837 kstat.cpu_nice++;
838 else
839 kstat.cpu_user++;
840 }
841
842 if (current->it_virt_value && !(--current->it_virt_value)) {
843 current->it_virt_value = current->it_virt_incr;
844 send_sig(SIGVTALRM,current,1);
845 }
846 } else {
847 current->stime++;
848 if(current->pid)
849 kstat.cpu_system++;
850 if (prof_buffer && current->pid) {
851 extern int _stext;
852 unsigned long ip = instruction_pointer(regs);
853 ip -= (unsigned long) &_stext;
854 ip >>= prof_shift;
855 if (ip < prof_len)
856 prof_buffer[ip]++;
857 }
858 }
859
860
861
862 if ((current->rlim[RLIMIT_CPU].rlim_max != RLIM_INFINITY) &&
863 (((current->stime + current->utime) / HZ) >= current->rlim[RLIMIT_CPU].rlim_max))
864 send_sig(SIGKILL, current, 1);
865 if ((current->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) &&
866 (((current->stime + current->utime) % HZ) == 0)) {
867 psecs = (current->stime + current->utime) / HZ;
868
869 if (psecs == current->rlim[RLIMIT_CPU].rlim_cur)
870 send_sig(SIGXCPU, current, 1);
871
872 else if ((psecs > current->rlim[RLIMIT_CPU].rlim_cur) &&
873 ((psecs - current->rlim[RLIMIT_CPU].rlim_cur) % 5) == 0)
874 send_sig(SIGXCPU, current, 1);
875 }
876
877 if (current->pid && 0 > --current->counter) {
878 current->counter = 0;
879 need_resched = 1;
880 }
881
882 if (current->it_prof_value && !(--current->it_prof_value)) {
883 current->it_prof_value = current->it_prof_incr;
884 send_sig(SIGPROF,current,1);
885 }
886 #else
887 cpu = smp_processor_id();
888 for (i=0;i<(0==smp_num_cpus?1:smp_num_cpus);i++){
889 #ifdef __SMP_PROF__
890 if (test_bit(i,&smp_idle_map)) smp_idle_count[i]++;
891 #endif
892 if (((cpu==i) && user_mode(regs)) ||
893 ((cpu!=i) && 0==smp_proc_in_lock[i])) {
894 current_set[i]->utime++;
895 if (current_set[i]->pid) {
896 if (current_set[i]->priority < DEF_PRIORITY)
897 kstat.cpu_nice++;
898 else
899 kstat.cpu_user++;
900 }
901
902 if (current_set[i]->it_virt_value && !(--current_set[i]->it_virt_value)) {
903 current_set[i]->it_virt_value = current_set[i]->it_virt_incr;
904 send_sig(SIGVTALRM,current_set[i],1);
905 }
906 } else {
907 current_set[i]->stime++;
908 if(current_set[i]->pid)
909 kstat.cpu_system++;
910 if (prof_buffer && current_set[i]->pid) {
911 extern int _stext;
912 unsigned long ip = instruction_pointer(regs);
913 ip -= (unsigned long) &_stext;
914 ip >>= prof_shift;
915 if (ip < prof_len)
916 prof_buffer[ip]++;
917 }
918 }
919
920
921
922 if ((current_set[i]->rlim[RLIMIT_CPU].rlim_max != RLIM_INFINITY) &&
923 (((current_set[i]->stime + current_set[i]->utime) / HZ) >=
924 current_set[i]->rlim[RLIMIT_CPU].rlim_max))
925 send_sig(SIGKILL, current_set[i], 1);
926 if ((current_set[i]->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) &&
927 (((current_set[i]->stime + current_set[i]->utime) % HZ) == 0)) {
928 psecs = (current_set[i]->stime + current_set[i]->utime) / HZ;
929
930 if (psecs == current_set[i]->rlim[RLIMIT_CPU].rlim_cur)
931 send_sig(SIGXCPU, current_set[i], 1);
932
933 else if ((psecs > current_set[i]->rlim[RLIMIT_CPU].rlim_cur) &&
934 ((psecs - current_set[i]->rlim[RLIMIT_CPU].rlim_cur) % 5) == 0)
935 send_sig(SIGXCPU, current_set[i], 1);
936 }
937 if (current_set[i]->pid && 0 > --current_set[i]->counter) {
938 current_set[i]->counter = 0;
939 if (i==cpu)
940 need_resched = 1;
941 else
942 smp_message_pass(i, MSG_RESCHEDULE, 0L, 0);
943 } else
944 if ((0==current_set[i]->pid) && (0x7fffffff & smp_process_available)){
945
946 if (cpu==i)
947 need_resched = 1;
948 else
949 smp_message_pass(i, MSG_RESCHEDULE, 0L, 0);
950 }
951
952
953 if (current_set[i]->it_prof_value && !(--current_set[i]->it_prof_value)) {
954 current_set[i]->it_prof_value = current_set[i]->it_prof_incr;
955 send_sig(SIGPROF,current_set[i],1);
956 }
957 }
958
959 #endif
960
961 for (mask = 1, tp = timer_table+0 ; mask ; tp++,mask += mask) {
962 if (mask > timer_active)
963 break;
964 if (!(mask & timer_active))
965 continue;
966 if (tp->expires > jiffies)
967 continue;
968 mark_bh(TIMER_BH);
969 }
970 if (timer_head.next->expires <= jiffies)
971 mark_bh(TIMER_BH);
972 if (tq_timer != &tq_last)
973 mark_bh(TQUEUE_BH);
974 }
975
976 #ifndef __alpha__
977
978
979
980
981
982 asmlinkage unsigned int sys_alarm(unsigned int seconds)
983 {
984 struct itimerval it_new, it_old;
985 unsigned int oldalarm;
986
987 it_new.it_interval.tv_sec = it_new.it_interval.tv_usec = 0;
988 it_new.it_value.tv_sec = seconds;
989 it_new.it_value.tv_usec = 0;
990 _setitimer(ITIMER_REAL, &it_new, &it_old);
991 oldalarm = it_old.it_value.tv_sec;
992
993
994 if (it_old.it_value.tv_usec)
995 oldalarm++;
996 return oldalarm;
997 }
998
999
1000
1001
1002
1003 asmlinkage int sys_getpid(void)
1004 {
1005 return current->pid;
1006 }
1007
1008 asmlinkage int sys_getppid(void)
1009 {
1010 return current->p_opptr->pid;
1011 }
1012
1013 asmlinkage int sys_getuid(void)
1014 {
1015 return current->uid;
1016 }
1017
1018 asmlinkage int sys_geteuid(void)
1019 {
1020 return current->euid;
1021 }
1022
1023 asmlinkage int sys_getgid(void)
1024 {
1025 return current->gid;
1026 }
1027
1028 asmlinkage int sys_getegid(void)
1029 {
1030 return current->egid;
1031 }
1032
1033
1034
1035
1036
1037
1038 asmlinkage int sys_nice(int increment)
1039 {
1040 unsigned long newprio;
1041 int increase = 0;
1042
1043 newprio = increment;
1044 if (increment < 0) {
1045 if (!suser())
1046 return -EPERM;
1047 newprio = -increment;
1048 increase = 1;
1049 }
1050 if (newprio > 40)
1051 newprio = 40;
1052
1053
1054
1055
1056
1057
1058
1059 newprio = (newprio * DEF_PRIORITY + 10) / 20;
1060 increment = newprio;
1061 if (increase)
1062 increment = -increment;
1063 newprio = current->priority - increment;
1064 if (newprio < 1)
1065 newprio = 1;
1066 if (newprio > DEF_PRIORITY*2)
1067 newprio = DEF_PRIORITY*2;
1068 current->priority = newprio;
1069 return 0;
1070 }
1071
1072 #endif
1073
1074 static struct task_struct *find_process_by_pid(pid_t pid) {
1075 struct task_struct *p, *q;
1076
1077 if (pid == 0)
1078 p = current;
1079 else {
1080 p = 0;
1081 for_each_task(q) {
1082 if (q && q->pid == pid) {
1083 p = q;
1084 break;
1085 }
1086 }
1087 }
1088 return p;
1089 }
1090
1091 static int setscheduler(pid_t pid, int policy,
1092 struct sched_param *param)
1093 {
1094 int error;
1095 struct sched_param lp;
1096 struct task_struct *p;
1097
1098 if (!param || pid < 0)
1099 return -EINVAL;
1100
1101 error = verify_area(VERIFY_READ, param, sizeof(struct sched_param));
1102 if (error)
1103 return error;
1104 memcpy_fromfs(&lp, param, sizeof(struct sched_param));
1105
1106 p = find_process_by_pid(pid);
1107 if (!p)
1108 return -ESRCH;
1109
1110 if (policy < 0)
1111 policy = p->policy;
1112 else if (policy != SCHED_FIFO && policy != SCHED_RR &&
1113 policy != SCHED_OTHER)
1114 return -EINVAL;
1115
1116
1117
1118
1119
1120 if (lp.sched_priority < 0 || lp.sched_priority > 99)
1121 return -EINVAL;
1122 if ((policy == SCHED_OTHER) != (lp.sched_priority == 0))
1123 return -EINVAL;
1124
1125 if ((policy == SCHED_FIFO || policy == SCHED_RR) && !suser())
1126 return -EPERM;
1127 if ((current->euid != p->euid) && (current->euid != p->uid) &&
1128 !suser())
1129 return -EPERM;
1130
1131 p->policy = policy;
1132 p->rt_priority = lp.sched_priority;
1133 if (p->next_run)
1134 move_last_runqueue(p);
1135 schedule();
1136
1137 return 0;
1138 }
1139
1140 asmlinkage int sys_sched_setscheduler(pid_t pid, int policy,
1141 struct sched_param *param)
1142 {
1143 return setscheduler(pid, policy, param);
1144 }
1145
1146 asmlinkage int sys_sched_setparam(pid_t pid, struct sched_param *param)
1147 {
1148 return setscheduler(pid, -1, param);
1149 }
1150
1151 asmlinkage int sys_sched_getscheduler(pid_t pid)
1152 {
1153 struct task_struct *p;
1154
1155 if (pid < 0)
1156 return -EINVAL;
1157
1158 p = find_process_by_pid(pid);
1159 if (!p)
1160 return -ESRCH;
1161
1162 return p->policy;
1163 }
1164
1165 asmlinkage int sys_sched_getparam(pid_t pid, struct sched_param *param)
1166 {
1167 int error;
1168 struct task_struct *p;
1169 struct sched_param lp;
1170
1171 if (!param || pid < 0)
1172 return -EINVAL;
1173
1174 error = verify_area(VERIFY_WRITE, param, sizeof(struct sched_param));
1175 if (error)
1176 return error;
1177
1178 p = find_process_by_pid(pid);
1179 if (!p)
1180 return -ESRCH;
1181
1182 lp.sched_priority = p->rt_priority;
1183 memcpy_tofs(param, &lp, sizeof(struct sched_param));
1184
1185 return 0;
1186 }
1187
1188 asmlinkage int sys_sched_yield(void)
1189 {
1190 move_last_runqueue(current);
1191
1192 return 0;
1193 }
1194
1195 asmlinkage int sys_sched_get_priority_max(int policy)
1196 {
1197 switch (policy) {
1198 case SCHED_FIFO:
1199 case SCHED_RR:
1200 return 99;
1201 case SCHED_OTHER:
1202 return 0;
1203 }
1204
1205 return -EINVAL;
1206 }
1207
1208 asmlinkage int sys_sched_get_priority_min(int policy)
1209 {
1210 switch (policy) {
1211 case SCHED_FIFO:
1212 case SCHED_RR:
1213 return 1;
1214 case SCHED_OTHER:
1215 return 0;
1216 }
1217
1218 return -EINVAL;
1219 }
1220
1221 asmlinkage int sys_sched_rr_get_interval(pid_t pid, struct timespec *interval)
1222 {
1223 int error;
1224 struct timespec t;
1225
1226 error = verify_area(VERIFY_WRITE, interval, sizeof(struct timespec));
1227 if (error)
1228 return error;
1229
1230 t.tv_sec = 0;
1231 t.tv_nsec = 0;
1232 return -ENOSYS;
1233 memcpy_tofs(interval, &t, sizeof(struct timespec));
1234
1235 return 0;
1236 }
1237
1238
1239
1240
1241
1242 static unsigned long timespectojiffies(struct timespec *value)
1243 {
1244 unsigned long sec = (unsigned) value->tv_sec;
1245 long nsec = value->tv_nsec;
1246
1247 if (sec > (LONG_MAX / HZ))
1248 return LONG_MAX;
1249 nsec += 1000000000L / HZ - 1;
1250 nsec /= 1000000000L / HZ;
1251 return HZ * sec + nsec;
1252 }
1253
1254 static void jiffiestotimespec(unsigned long jiffies, struct timespec *value)
1255 {
1256 value->tv_nsec = (jiffies % HZ) * (1000000000L / HZ);
1257 value->tv_sec = jiffies / HZ;
1258 return;
1259 }
1260
1261 asmlinkage int sys_nanosleep(struct timespec *rqtp, struct timespec *rmtp)
1262 {
1263 int error;
1264 struct timespec t;
1265 unsigned long expire;
1266
1267 error = verify_area(VERIFY_READ, rqtp, sizeof(struct timespec));
1268 if (error)
1269 return error;
1270 memcpy_fromfs(&t, rqtp, sizeof(struct timespec));
1271 if (rmtp) {
1272 error = verify_area(VERIFY_WRITE, rmtp,
1273 sizeof(struct timespec));
1274 if (error)
1275 return error;
1276 }
1277
1278 if (t.tv_nsec >= 1000000000L || t.tv_nsec < 0 || t.tv_sec < 0)
1279 return -EINVAL;
1280
1281 if (t.tv_sec == 0 && t.tv_nsec <= 2000000L &&
1282 current->policy != SCHED_OTHER) {
1283
1284
1285
1286
1287 udelay((t.tv_nsec + 999) / 1000);
1288 return 0;
1289 }
1290
1291 expire = timespectojiffies(&t) + (t.tv_sec || t.tv_nsec) + jiffies;
1292 current->timeout = expire;
1293 current->state = TASK_INTERRUPTIBLE;
1294 schedule();
1295
1296 if (expire > jiffies) {
1297 if (rmtp) {
1298 jiffiestotimespec(expire - jiffies -
1299 (expire > jiffies + 1), &t);
1300 memcpy_tofs(rmtp, &t, sizeof(struct timespec));
1301 }
1302 return -EINTR;
1303 }
1304
1305 return 0;
1306 }
1307
1308 static void show_task(int nr,struct task_struct * p)
1309 {
1310 unsigned long free;
1311 static const char * stat_nam[] = { "R", "S", "D", "Z", "T", "W" };
1312
1313 printk("%-8s %3d ", p->comm, (p == current) ? -nr : nr);
1314 if (((unsigned) p->state) < sizeof(stat_nam)/sizeof(char *))
1315 printk(stat_nam[p->state]);
1316 else
1317 printk(" ");
1318 #if ((~0UL) == 0xffffffff)
1319 if (p == current)
1320 printk(" current ");
1321 else
1322 printk(" %08lX ", thread_saved_pc(&p->tss));
1323 #else
1324 if (p == current)
1325 printk(" current task ");
1326 else
1327 printk(" %016lx ", thread_saved_pc(&p->tss));
1328 #endif
1329 for (free = 1; free < PAGE_SIZE/sizeof(long) ; free++) {
1330 if (((unsigned long *)p->kernel_stack_page)[free])
1331 break;
1332 }
1333 printk("%5lu %5d %6d ", free*sizeof(long), p->pid, p->p_pptr->pid);
1334 if (p->p_cptr)
1335 printk("%5d ", p->p_cptr->pid);
1336 else
1337 printk(" ");
1338 if (p->p_ysptr)
1339 printk("%7d", p->p_ysptr->pid);
1340 else
1341 printk(" ");
1342 if (p->p_osptr)
1343 printk(" %5d\n", p->p_osptr->pid);
1344 else
1345 printk("\n");
1346 }
1347
1348 void show_state(void)
1349 {
1350 int i;
1351
1352 #if ((~0UL) == 0xffffffff)
1353 printk("\n"
1354 " free sibling\n");
1355 printk(" task PC stack pid father child younger older\n");
1356 #else
1357 printk("\n"
1358 " free sibling\n");
1359 printk(" task PC stack pid father child younger older\n");
1360 #endif
1361 for (i=0 ; i<NR_TASKS ; i++)
1362 if (task[i])
1363 show_task(i,task[i]);
1364 }
1365
1366 void sched_init(void)
1367 {
1368
1369
1370
1371
1372 int cpu=smp_processor_id();
1373 current_set[cpu]=&init_task;
1374 #ifdef __SMP__
1375 init_task.processor=cpu;
1376 #endif
1377 init_bh(TIMER_BH, timer_bh);
1378 init_bh(TQUEUE_BH, tqueue_bh);
1379 init_bh(IMMEDIATE_BH, immediate_bh);
1380 }