This source file includes following definitions.
- schedule
- sys_pause
- wake_up
- wake_up_interruptible
- __down
- __sleep_on
- interruptible_sleep_on
- sleep_on
- add_timer
- del_timer
- count_active_tasks
- calc_load
- second_overflow
- timer_bh
- tqueue_bh
- immediate_bh
- do_timer
- sys_alarm
- sys_getpid
- sys_getppid
- sys_getuid
- sys_geteuid
- sys_getgid
- sys_getegid
- sys_nice
- show_task
- show_state
- sched_init
1
2
3
4
5
6
7
8
9
10
11
12
13
14 #include <linux/config.h>
15 #include <linux/signal.h>
16 #include <linux/sched.h>
17 #include <linux/timer.h>
18 #include <linux/kernel.h>
19 #include <linux/kernel_stat.h>
20 #include <linux/fdreg.h>
21 #include <linux/errno.h>
22 #include <linux/time.h>
23 #include <linux/ptrace.h>
24 #include <linux/delay.h>
25 #include <linux/interrupt.h>
26 #include <linux/tqueue.h>
27 #include <linux/resource.h>
28 #include <linux/mm.h>
29
30 #include <asm/system.h>
31 #include <asm/io.h>
32 #include <asm/segment.h>
33
34 #define TIMER_IRQ 0
35
36 #include <linux/timex.h>
37
38
39
40
41 long tick = 1000000 / HZ;
42 volatile struct timeval xtime;
43 int tickadj = 500/HZ;
44
45 DECLARE_TASK_QUEUE(tq_timer);
46 DECLARE_TASK_QUEUE(tq_immediate);
47
48
49
50
51 int time_status = TIME_BAD;
52 long time_offset = 0;
53 long time_constant = 0;
54 long time_tolerance = MAXFREQ;
55 long time_precision = 1;
56 long time_maxerror = 0x70000000;
57 long time_esterror = 0x70000000;
58 long time_phase = 0;
59 long time_freq = 0;
60 long time_adj = 0;
61 long time_reftime = 0;
62
63 long time_adjust = 0;
64 long time_adjust_step = 0;
65
66 int need_resched = 0;
67 unsigned long event = 0;
68
69 extern int _setitimer(int, struct itimerval *, struct itimerval *);
70 unsigned long * prof_buffer = NULL;
71 unsigned long prof_len = 0;
72
73 #define _S(nr) (1<<((nr)-1))
74
75 extern void mem_use(void);
76
77 extern int timer_interrupt(void);
78
79 static unsigned long init_kernel_stack[1024] = { STACK_MAGIC, };
80 unsigned long init_user_stack[1024] = { STACK_MAGIC, };
81 static struct vm_area_struct init_mmap = INIT_MMAP;
82 struct task_struct init_task = INIT_TASK;
83
84 unsigned long volatile jiffies=0;
85
86 struct task_struct *current = &init_task;
87 struct task_struct *last_task_used_math = NULL;
88
89 struct task_struct * task[NR_TASKS] = {&init_task, };
90
91 struct kernel_stat kstat = { 0 };
92
93 unsigned long itimer_ticks = 0;
94 unsigned long itimer_next = ~0;
95
96
97
98
99
100
101
102
103
104
105
106
107
108 asmlinkage void schedule(void)
109 {
110 int c;
111 struct task_struct * p;
112 struct task_struct * next;
113 unsigned long ticks;
114
115
116
117 if (intr_count) {
118 printk("Aiee: scheduling in interrupt\n");
119 intr_count = 0;
120 }
121 cli();
122 ticks = itimer_ticks;
123 itimer_ticks = 0;
124 itimer_next = ~0;
125 sti();
126 need_resched = 0;
127 p = &init_task;
128 for (;;) {
129 if ((p = p->next_task) == &init_task)
130 goto confuse_gcc1;
131 if (ticks && p->it_real_value) {
132 if (p->it_real_value <= ticks) {
133 send_sig(SIGALRM, p, 1);
134 if (!p->it_real_incr) {
135 p->it_real_value = 0;
136 goto end_itimer;
137 }
138 do {
139 p->it_real_value += p->it_real_incr;
140 } while (p->it_real_value <= ticks);
141 }
142 p->it_real_value -= ticks;
143 if (p->it_real_value < itimer_next)
144 itimer_next = p->it_real_value;
145 }
146 end_itimer:
147 if (p->state != TASK_INTERRUPTIBLE)
148 continue;
149 if (p->signal & ~p->blocked) {
150 p->state = TASK_RUNNING;
151 continue;
152 }
153 if (p->timeout && p->timeout <= jiffies) {
154 p->timeout = 0;
155 p->state = TASK_RUNNING;
156 }
157 }
158 confuse_gcc1:
159
160
161 #if 0
162
163
164
165
166 if (TASK_UNINTERRUPTIBLE >= (unsigned) current->state &&
167 current->counter < current->priority*2) {
168 ++current->counter;
169 }
170 #endif
171 c = -1000;
172 next = p = &init_task;
173 for (;;) {
174 if ((p = p->next_task) == &init_task)
175 goto confuse_gcc2;
176 if (p->state == TASK_RUNNING && p->counter > c)
177 c = p->counter, next = p;
178 }
179 confuse_gcc2:
180 if (!c) {
181 for_each_task(p)
182 p->counter = (p->counter >> 1) + p->priority;
183 }
184 if (current == next)
185 return;
186 kstat.context_swtch++;
187 switch_to(next);
188 }
189
190 asmlinkage int sys_pause(void)
191 {
192 current->state = TASK_INTERRUPTIBLE;
193 schedule();
194 return -ERESTARTNOHAND;
195 }
196
197
198
199
200
201
202
203
204
205 void wake_up(struct wait_queue **q)
206 {
207 struct wait_queue *tmp;
208 struct task_struct * p;
209
210 if (!q || !(tmp = *q))
211 return;
212 do {
213 if ((p = tmp->task) != NULL) {
214 if ((p->state == TASK_UNINTERRUPTIBLE) ||
215 (p->state == TASK_INTERRUPTIBLE)) {
216 p->state = TASK_RUNNING;
217 if (p->counter > current->counter + 3)
218 need_resched = 1;
219 }
220 }
221 if (!tmp->next) {
222 printk("wait_queue is bad (eip = %p)\n",
223 __builtin_return_address(0));
224 printk(" q = %p\n",q);
225 printk(" *q = %p\n",*q);
226 printk(" tmp = %p\n",tmp);
227 break;
228 }
229 tmp = tmp->next;
230 } while (tmp != *q);
231 }
232
233 void wake_up_interruptible(struct wait_queue **q)
234 {
235 struct wait_queue *tmp;
236 struct task_struct * p;
237
238 if (!q || !(tmp = *q))
239 return;
240 do {
241 if ((p = tmp->task) != NULL) {
242 if (p->state == TASK_INTERRUPTIBLE) {
243 p->state = TASK_RUNNING;
244 if (p->counter > current->counter + 3)
245 need_resched = 1;
246 }
247 }
248 if (!tmp->next) {
249 printk("wait_queue is bad (eip = %p)\n",
250 __builtin_return_address(0));
251 printk(" q = %p\n",q);
252 printk(" *q = %p\n",*q);
253 printk(" tmp = %p\n",tmp);
254 break;
255 }
256 tmp = tmp->next;
257 } while (tmp != *q);
258 }
259
260 void __down(struct semaphore * sem)
261 {
262 struct wait_queue wait = { current, NULL };
263 add_wait_queue(&sem->wait, &wait);
264 current->state = TASK_UNINTERRUPTIBLE;
265 while (sem->count <= 0) {
266 schedule();
267 current->state = TASK_UNINTERRUPTIBLE;
268 }
269 current->state = TASK_RUNNING;
270 remove_wait_queue(&sem->wait, &wait);
271 }
272
273 static inline void __sleep_on(struct wait_queue **p, int state)
274 {
275 unsigned long flags;
276 struct wait_queue wait = { current, NULL };
277
278 if (!p)
279 return;
280 if (current == task[0])
281 panic("task[0] trying to sleep");
282 current->state = state;
283 add_wait_queue(p, &wait);
284 save_flags(flags);
285 sti();
286 schedule();
287 remove_wait_queue(p, &wait);
288 restore_flags(flags);
289 }
290
291 void interruptible_sleep_on(struct wait_queue **p)
292 {
293 __sleep_on(p,TASK_INTERRUPTIBLE);
294 }
295
296 void sleep_on(struct wait_queue **p)
297 {
298 __sleep_on(p,TASK_UNINTERRUPTIBLE);
299 }
300
301
302
303
304
305 static struct timer_list timer_head = { &timer_head, &timer_head, ~0, 0, NULL };
306 #define SLOW_BUT_DEBUGGING_TIMERS 1
307
308 void add_timer(struct timer_list * timer)
309 {
310 unsigned long flags;
311 struct timer_list *p;
312
313 #if SLOW_BUT_DEBUGGING_TIMERS
314 if (timer->next || timer->prev) {
315 printk("add_timer() called with non-zero list from %p\n",
316 __builtin_return_address(0));
317 return;
318 }
319 #endif
320 p = &timer_head;
321 timer->expires += jiffies;
322 save_flags(flags);
323 cli();
324 do {
325 p = p->next;
326 } while (timer->expires > p->expires);
327 timer->next = p;
328 timer->prev = p->prev;
329 p->prev = timer;
330 timer->prev->next = timer;
331 restore_flags(flags);
332 }
333
334 int del_timer(struct timer_list * timer)
335 {
336 unsigned long flags;
337 #if SLOW_BUT_DEBUGGING_TIMERS
338 struct timer_list * p;
339
340 p = &timer_head;
341 save_flags(flags);
342 cli();
343 while ((p = p->next) != &timer_head) {
344 if (p == timer) {
345 timer->next->prev = timer->prev;
346 timer->prev->next = timer->next;
347 timer->next = timer->prev = NULL;
348 restore_flags(flags);
349 timer->expires -= jiffies;
350 return 1;
351 }
352 }
353 if (timer->next || timer->prev)
354 printk("del_timer() called from %p with timer not initialized\n",
355 __builtin_return_address(0));
356 restore_flags(flags);
357 return 0;
358 #else
359 save_flags(flags);
360 cli();
361 if (timer->next) {
362 timer->next->prev = timer->prev;
363 timer->prev->next = timer->next;
364 timer->next = timer->prev = NULL;
365 restore_flags(flags);
366 timer->expires -= jiffies;
367 return 1;
368 }
369 restore_flags(flags);
370 return 0;
371 #endif
372 }
373
374 unsigned long timer_active = 0;
375 struct timer_struct timer_table[32];
376
377
378
379
380
381
382
383 unsigned long avenrun[3] = { 0,0,0 };
384
385
386
387
388 static unsigned long count_active_tasks(void)
389 {
390 struct task_struct **p;
391 unsigned long nr = 0;
392
393 for(p = &LAST_TASK; p > &FIRST_TASK; --p)
394 if (*p && ((*p)->state == TASK_RUNNING ||
395 (*p)->state == TASK_UNINTERRUPTIBLE ||
396 (*p)->state == TASK_SWAPPING))
397 nr += FIXED_1;
398 return nr;
399 }
400
401 static inline void calc_load(void)
402 {
403 unsigned long active_tasks;
404 static int count = LOAD_FREQ;
405
406 if (count-- > 0)
407 return;
408 count = LOAD_FREQ;
409 active_tasks = count_active_tasks();
410 CALC_LOAD(avenrun[0], EXP_1, active_tasks);
411 CALC_LOAD(avenrun[1], EXP_5, active_tasks);
412 CALC_LOAD(avenrun[2], EXP_15, active_tasks);
413 }
414
415
416
417
418
419
420
421
422
423
424
425 static void second_overflow(void)
426 {
427 long ltemp;
428
429 static long last_rtc_update=0;
430 extern int set_rtc_mmss(unsigned long);
431
432
433 time_maxerror = (0x70000000-time_maxerror < time_tolerance) ?
434 0x70000000 : (time_maxerror + time_tolerance);
435
436
437 if (time_offset < 0) {
438 ltemp = (-(time_offset+1) >> (SHIFT_KG + time_constant)) + 1;
439 time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
440 time_offset += (time_adj * HZ) >> (SHIFT_SCALE - SHIFT_UPDATE);
441 time_adj = - time_adj;
442 } else if (time_offset > 0) {
443 ltemp = ((time_offset-1) >> (SHIFT_KG + time_constant)) + 1;
444 time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
445 time_offset -= (time_adj * HZ) >> (SHIFT_SCALE - SHIFT_UPDATE);
446 } else {
447 time_adj = 0;
448 }
449
450 time_adj += (time_freq >> (SHIFT_KF + SHIFT_HZ - SHIFT_SCALE))
451 + FINETUNE;
452
453
454 switch (time_status) {
455 case TIME_INS:
456
457 if (xtime.tv_sec % 86400 == 0) {
458 xtime.tv_sec--;
459 time_status = TIME_OOP;
460 printk("Clock: inserting leap second 23:59:60 GMT\n");
461 }
462 break;
463
464 case TIME_DEL:
465
466 if (xtime.tv_sec % 86400 == 86399) {
467 xtime.tv_sec++;
468 time_status = TIME_OK;
469 printk("Clock: deleting leap second 23:59:59 GMT\n");
470 }
471 break;
472
473 case TIME_OOP:
474 time_status = TIME_OK;
475 break;
476 }
477 if (time_status != TIME_BAD && xtime.tv_sec > last_rtc_update + 660)
478 if (set_rtc_mmss(xtime.tv_sec) == 0)
479 last_rtc_update = xtime.tv_sec;
480 else
481 last_rtc_update = xtime.tv_sec - 600;
482 }
483
484
485
486
487 static void timer_bh(void * unused)
488 {
489 unsigned long mask;
490 struct timer_struct *tp;
491 struct timer_list * timer;
492
493 cli();
494 while ((timer = timer_head.next) != &timer_head && timer->expires < jiffies) {
495 void (*fn)(unsigned long) = timer->function;
496 unsigned long data = timer->data;
497 timer->next->prev = timer->prev;
498 timer->prev->next = timer->next;
499 timer->next = timer->prev = NULL;
500 sti();
501 fn(data);
502 cli();
503 }
504 sti();
505
506 for (mask = 1, tp = timer_table+0 ; mask ; tp++,mask += mask) {
507 if (mask > timer_active)
508 break;
509 if (!(mask & timer_active))
510 continue;
511 if (tp->expires > jiffies)
512 continue;
513 timer_active &= ~mask;
514 tp->fn();
515 sti();
516 }
517 }
518
519 void tqueue_bh(void * unused)
520 {
521 run_task_queue(&tq_timer);
522 }
523
524 void immediate_bh(void * unused)
525 {
526 run_task_queue(&tq_immediate);
527 }
528
529
530
531
532
533
534
535 static void do_timer(int irq, struct pt_regs * regs)
536 {
537 unsigned long mask;
538 struct timer_struct *tp;
539
540 long ltemp, psecs;
541
542
543
544
545 time_phase += time_adj;
546 if (time_phase < -FINEUSEC) {
547 ltemp = -time_phase >> SHIFT_SCALE;
548 time_phase += ltemp << SHIFT_SCALE;
549 xtime.tv_usec += tick + time_adjust_step - ltemp;
550 }
551 else if (time_phase > FINEUSEC) {
552 ltemp = time_phase >> SHIFT_SCALE;
553 time_phase -= ltemp << SHIFT_SCALE;
554 xtime.tv_usec += tick + time_adjust_step + ltemp;
555 } else
556 xtime.tv_usec += tick + time_adjust_step;
557
558 if (time_adjust)
559 {
560
561
562
563
564
565
566
567
568
569 if (time_adjust > tickadj)
570 time_adjust_step = tickadj;
571 else if (time_adjust < -tickadj)
572 time_adjust_step = -tickadj;
573 else
574 time_adjust_step = time_adjust;
575
576
577 time_adjust -= time_adjust_step;
578 }
579 else
580 time_adjust_step = 0;
581
582 if (xtime.tv_usec >= 1000000) {
583 xtime.tv_usec -= 1000000;
584 xtime.tv_sec++;
585 second_overflow();
586 }
587
588 jiffies++;
589 calc_load();
590 if (user_mode(regs)) {
591 current->utime++;
592 if (current != task[0]) {
593 if (current->priority < 15)
594 kstat.cpu_nice++;
595 else
596 kstat.cpu_user++;
597 }
598
599 if (current->it_virt_value && !(--current->it_virt_value)) {
600 current->it_virt_value = current->it_virt_incr;
601 send_sig(SIGVTALRM,current,1);
602 }
603 } else {
604 current->stime++;
605 if(current != task[0])
606 kstat.cpu_system++;
607 #ifdef CONFIG_PROFILE
608 if (prof_buffer && current != task[0]) {
609 unsigned long eip = regs->eip;
610 eip >>= CONFIG_PROFILE_SHIFT;
611 if (eip < prof_len)
612 prof_buffer[eip]++;
613 }
614 #endif
615 }
616
617
618
619 if ((current->rlim[RLIMIT_CPU].rlim_max != RLIM_INFINITY) &&
620 (((current->stime + current->utime) / HZ) >= current->rlim[RLIMIT_CPU].rlim_max))
621 send_sig(SIGKILL, current, 1);
622 if ((current->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) &&
623 (((current->stime + current->utime) % HZ) == 0)) {
624 psecs = (current->stime + current->utime) / HZ;
625
626 if (psecs == current->rlim[RLIMIT_CPU].rlim_cur)
627 send_sig(SIGXCPU, current, 1);
628
629 else if ((psecs > current->rlim[RLIMIT_CPU].rlim_cur) &&
630 ((psecs - current->rlim[RLIMIT_CPU].rlim_cur) % 5) == 0)
631 send_sig(SIGXCPU, current, 1);
632 }
633
634 if (current != task[0] && 0 > --current->counter) {
635 current->counter = 0;
636 need_resched = 1;
637 }
638
639 if (current->it_prof_value && !(--current->it_prof_value)) {
640 current->it_prof_value = current->it_prof_incr;
641 send_sig(SIGPROF,current,1);
642 }
643 for (mask = 1, tp = timer_table+0 ; mask ; tp++,mask += mask) {
644 if (mask > timer_active)
645 break;
646 if (!(mask & timer_active))
647 continue;
648 if (tp->expires > jiffies)
649 continue;
650 mark_bh(TIMER_BH);
651 }
652 cli();
653 itimer_ticks++;
654 if (itimer_ticks > itimer_next)
655 need_resched = 1;
656 if (timer_head.next->expires < jiffies)
657 mark_bh(TIMER_BH);
658 if (tq_timer != &tq_last)
659 mark_bh(TQUEUE_BH);
660 sti();
661 }
662
663 asmlinkage int sys_alarm(long seconds)
664 {
665 struct itimerval it_new, it_old;
666
667 it_new.it_interval.tv_sec = it_new.it_interval.tv_usec = 0;
668 it_new.it_value.tv_sec = seconds;
669 it_new.it_value.tv_usec = 0;
670 _setitimer(ITIMER_REAL, &it_new, &it_old);
671 return(it_old.it_value.tv_sec + (it_old.it_value.tv_usec / 1000000));
672 }
673
674 asmlinkage int sys_getpid(void)
675 {
676 return current->pid;
677 }
678
679 asmlinkage int sys_getppid(void)
680 {
681 return current->p_opptr->pid;
682 }
683
684 asmlinkage int sys_getuid(void)
685 {
686 return current->uid;
687 }
688
689 asmlinkage int sys_geteuid(void)
690 {
691 return current->euid;
692 }
693
694 asmlinkage int sys_getgid(void)
695 {
696 return current->gid;
697 }
698
699 asmlinkage int sys_getegid(void)
700 {
701 return current->egid;
702 }
703
704 asmlinkage int sys_nice(long increment)
705 {
706 int newprio;
707
708 if (increment < 0 && !suser())
709 return -EPERM;
710 newprio = current->priority - increment;
711 if (newprio < 1)
712 newprio = 1;
713 if (newprio > 35)
714 newprio = 35;
715 current->priority = newprio;
716 return 0;
717 }
718
719 static void show_task(int nr,struct task_struct * p)
720 {
721 unsigned long free;
722 static char * stat_nam[] = { "R", "S", "D", "Z", "T", "W" };
723
724 printk("%-8s %3d ", p->comm, (p == current) ? -nr : nr);
725 if (((unsigned) p->state) < sizeof(stat_nam)/sizeof(char *))
726 printk(stat_nam[p->state]);
727 else
728 printk(" ");
729 #ifdef __i386__
730 if (p == current)
731 printk(" current ");
732 else
733 printk(" %08lX ", ((unsigned long *)p->tss.esp)[3]);
734 #endif
735 for (free = 1; free < 1024 ; free++) {
736 if (((unsigned long *)p->kernel_stack_page)[free])
737 break;
738 }
739 printk("%5lu %5d %6d ", free << 2, p->pid, p->p_pptr->pid);
740 if (p->p_cptr)
741 printk("%5d ", p->p_cptr->pid);
742 else
743 printk(" ");
744 if (p->p_ysptr)
745 printk("%7d", p->p_ysptr->pid);
746 else
747 printk(" ");
748 if (p->p_osptr)
749 printk(" %5d\n", p->p_osptr->pid);
750 else
751 printk("\n");
752 }
753
754 void show_state(void)
755 {
756 int i;
757
758 printk(" free sibling\n");
759 printk(" task PC stack pid father child younger older\n");
760 for (i=0 ; i<NR_TASKS ; i++)
761 if (task[i])
762 show_task(i,task[i]);
763 }
764
765 void sched_init(void)
766 {
767 bh_base[TIMER_BH].routine = timer_bh;
768 bh_base[TQUEUE_BH].routine = tqueue_bh;
769 bh_base[IMMEDIATE_BH].routine = immediate_bh;
770 if (request_irq(TIMER_IRQ, do_timer, 0, "timer") != 0)
771 panic("Could not allocate timer IRQ!");
772 }