This source file includes following definitions.
- schedule
- sys_pause
- wake_up
- wake_up_interruptible
- __down
- __sleep_on
- interruptible_sleep_on
- sleep_on
- add_timer
- del_timer
- count_active_tasks
- calc_load
- second_overflow
- timer_bh
- tqueue_bh
- immediate_bh
- do_timer
- sys_alarm
- sys_getpid
- sys_getppid
- sys_getuid
- sys_geteuid
- sys_getgid
- sys_getegid
- sys_nice
- show_task
- show_state
- sched_init
1
2
3
4
5
6
7
8
9
10
11
12
13
14 #include <linux/config.h>
15 #include <linux/signal.h>
16 #include <linux/sched.h>
17 #include <linux/timer.h>
18 #include <linux/kernel.h>
19 #include <linux/kernel_stat.h>
20 #include <linux/fdreg.h>
21 #include <linux/errno.h>
22 #include <linux/time.h>
23 #include <linux/ptrace.h>
24 #include <linux/delay.h>
25 #include <linux/interrupt.h>
26 #include <linux/tqueue.h>
27 #include <linux/resource.h>
28
29 #include <asm/system.h>
30 #include <asm/io.h>
31 #include <asm/segment.h>
32
33 #define TIMER_IRQ 0
34
35 #include <linux/timex.h>
36
37
38
39
40 long tick = 1000000 / HZ;
41 volatile struct timeval xtime;
42 int tickadj = 500/HZ;
43
44 DECLARE_TASK_QUEUE(tq_timer);
45 DECLARE_TASK_QUEUE(tq_immediate);
46
47
48
49
50 int time_status = TIME_BAD;
51 long time_offset = 0;
52 long time_constant = 0;
53 long time_tolerance = MAXFREQ;
54 long time_precision = 1;
55 long time_maxerror = 0x70000000;
56 long time_esterror = 0x70000000;
57 long time_phase = 0;
58 long time_freq = 0;
59 long time_adj = 0;
60 long time_reftime = 0;
61
62 long time_adjust = 0;
63 long time_adjust_step = 0;
64
65 int need_resched = 0;
66 unsigned long event = 0;
67
68
69
70
71 char hard_math = 0;
72 char x86 = 0;
73 char x86_model = 0;
74 char x86_mask = 0;
75 int x86_capability = 0;
76 int fdiv_bug = 0;
77
78 char x86_vendor_id[13] = "Unknown";
79
80 char ignore_irq13 = 0;
81 char wp_works_ok = 0;
82 char hlt_works_ok = 1;
83
84
85
86
87 int EISA_bus = 0;
88
89 extern int _setitimer(int, struct itimerval *, struct itimerval *);
90 unsigned long * prof_buffer = NULL;
91 unsigned long prof_len = 0;
92
93 #define _S(nr) (1<<((nr)-1))
94
95 extern void mem_use(void);
96
97 extern int timer_interrupt(void);
98
99 static unsigned long init_kernel_stack[1024] = { STACK_MAGIC, };
100 unsigned long init_user_stack[1024] = { STACK_MAGIC, };
101 static struct vm_area_struct init_mmap = INIT_MMAP;
102 struct task_struct init_task = INIT_TASK;
103
104 unsigned long volatile jiffies=0;
105
106 struct task_struct *current = &init_task;
107 struct task_struct *last_task_used_math = NULL;
108
109 struct task_struct * task[NR_TASKS] = {&init_task, };
110
111 struct kernel_stat kstat = { 0 };
112
113 unsigned long itimer_ticks = 0;
114 unsigned long itimer_next = ~0;
115
116
117
118
119
120
121
122
123
124
125
126
127
128 asmlinkage void schedule(void)
129 {
130 int c;
131 struct task_struct * p;
132 struct task_struct * next;
133 unsigned long ticks;
134
135
136
137 if (intr_count) {
138 printk("Aiee: scheduling in interrupt\n");
139 intr_count = 0;
140 }
141 cli();
142 ticks = itimer_ticks;
143 itimer_ticks = 0;
144 itimer_next = ~0;
145 sti();
146 need_resched = 0;
147 p = &init_task;
148 for (;;) {
149 if ((p = p->next_task) == &init_task)
150 goto confuse_gcc1;
151 if (ticks && p->it_real_value) {
152 if (p->it_real_value <= ticks) {
153 send_sig(SIGALRM, p, 1);
154 if (!p->it_real_incr) {
155 p->it_real_value = 0;
156 goto end_itimer;
157 }
158 do {
159 p->it_real_value += p->it_real_incr;
160 } while (p->it_real_value <= ticks);
161 }
162 p->it_real_value -= ticks;
163 if (p->it_real_value < itimer_next)
164 itimer_next = p->it_real_value;
165 }
166 end_itimer:
167 if (p->state != TASK_INTERRUPTIBLE)
168 continue;
169 if (p->signal & ~p->blocked) {
170 p->state = TASK_RUNNING;
171 continue;
172 }
173 if (p->timeout && p->timeout <= jiffies) {
174 p->timeout = 0;
175 p->state = TASK_RUNNING;
176 }
177 }
178 confuse_gcc1:
179
180
181 #if 0
182
183
184
185
186 if (TASK_UNINTERRUPTIBLE >= (unsigned) current->state &&
187 current->counter < current->priority*2) {
188 ++current->counter;
189 }
190 #endif
191 c = -1000;
192 next = p = &init_task;
193 for (;;) {
194 if ((p = p->next_task) == &init_task)
195 goto confuse_gcc2;
196 if (p->state == TASK_RUNNING && p->counter > c)
197 c = p->counter, next = p;
198 }
199 confuse_gcc2:
200 if (!c) {
201 for_each_task(p)
202 p->counter = (p->counter >> 1) + p->priority;
203 }
204 if (current == next)
205 return;
206 kstat.context_swtch++;
207 switch_to(next);
208 }
209
210 asmlinkage int sys_pause(void)
211 {
212 current->state = TASK_INTERRUPTIBLE;
213 schedule();
214 return -ERESTARTNOHAND;
215 }
216
217
218
219
220
221
222
223
224
225 void wake_up(struct wait_queue **q)
226 {
227 struct wait_queue *tmp;
228 struct task_struct * p;
229
230 if (!q || !(tmp = *q))
231 return;
232 do {
233 if ((p = tmp->task) != NULL) {
234 if ((p->state == TASK_UNINTERRUPTIBLE) ||
235 (p->state == TASK_INTERRUPTIBLE)) {
236 p->state = TASK_RUNNING;
237 if (p->counter > current->counter + 3)
238 need_resched = 1;
239 }
240 }
241 if (!tmp->next) {
242 printk("wait_queue is bad (eip = %p)\n",
243 __builtin_return_address(0));
244 printk(" q = %p\n",q);
245 printk(" *q = %p\n",*q);
246 printk(" tmp = %p\n",tmp);
247 break;
248 }
249 tmp = tmp->next;
250 } while (tmp != *q);
251 }
252
253 void wake_up_interruptible(struct wait_queue **q)
254 {
255 struct wait_queue *tmp;
256 struct task_struct * p;
257
258 if (!q || !(tmp = *q))
259 return;
260 do {
261 if ((p = tmp->task) != NULL) {
262 if (p->state == TASK_INTERRUPTIBLE) {
263 p->state = TASK_RUNNING;
264 if (p->counter > current->counter + 3)
265 need_resched = 1;
266 }
267 }
268 if (!tmp->next) {
269 printk("wait_queue is bad (eip = %p)\n",
270 __builtin_return_address(0));
271 printk(" q = %p\n",q);
272 printk(" *q = %p\n",*q);
273 printk(" tmp = %p\n",tmp);
274 break;
275 }
276 tmp = tmp->next;
277 } while (tmp != *q);
278 }
279
280 void __down(struct semaphore * sem)
281 {
282 struct wait_queue wait = { current, NULL };
283 add_wait_queue(&sem->wait, &wait);
284 current->state = TASK_UNINTERRUPTIBLE;
285 while (sem->count <= 0) {
286 schedule();
287 current->state = TASK_UNINTERRUPTIBLE;
288 }
289 current->state = TASK_RUNNING;
290 remove_wait_queue(&sem->wait, &wait);
291 }
292
293 static inline void __sleep_on(struct wait_queue **p, int state)
294 {
295 unsigned long flags;
296 struct wait_queue wait = { current, NULL };
297
298 if (!p)
299 return;
300 if (current == task[0])
301 panic("task[0] trying to sleep");
302 current->state = state;
303 add_wait_queue(p, &wait);
304 save_flags(flags);
305 sti();
306 schedule();
307 remove_wait_queue(p, &wait);
308 restore_flags(flags);
309 }
310
311 void interruptible_sleep_on(struct wait_queue **p)
312 {
313 __sleep_on(p,TASK_INTERRUPTIBLE);
314 }
315
316 void sleep_on(struct wait_queue **p)
317 {
318 __sleep_on(p,TASK_UNINTERRUPTIBLE);
319 }
320
321
322
323
324
325 static struct timer_list timer_head = { &timer_head, &timer_head, ~0, 0, NULL };
326 #define SLOW_BUT_DEBUGGING_TIMERS 1
327
328 void add_timer(struct timer_list * timer)
329 {
330 unsigned long flags;
331 struct timer_list *p;
332
333 #if SLOW_BUT_DEBUGGING_TIMERS
334 if (timer->next || timer->prev) {
335 printk("add_timer() called with non-zero list from %p\n",
336 __builtin_return_address(0));
337 return;
338 }
339 #endif
340 p = &timer_head;
341 timer->expires += jiffies;
342 save_flags(flags);
343 cli();
344 do {
345 p = p->next;
346 } while (timer->expires > p->expires);
347 timer->next = p;
348 timer->prev = p->prev;
349 p->prev = timer;
350 timer->prev->next = timer;
351 restore_flags(flags);
352 }
353
354 int del_timer(struct timer_list * timer)
355 {
356 unsigned long flags;
357 #if SLOW_BUT_DEBUGGING_TIMERS
358 struct timer_list * p;
359
360 p = &timer_head;
361 save_flags(flags);
362 cli();
363 while ((p = p->next) != &timer_head) {
364 if (p == timer) {
365 timer->next->prev = timer->prev;
366 timer->prev->next = timer->next;
367 timer->next = timer->prev = NULL;
368 restore_flags(flags);
369 timer->expires -= jiffies;
370 return 1;
371 }
372 }
373 if (timer->next || timer->prev)
374 printk("del_timer() called from %p with timer not initialized\n",
375 __builtin_return_address(0));
376 restore_flags(flags);
377 return 0;
378 #else
379 save_flags(flags);
380 cli();
381 if (timer->next) {
382 timer->next->prev = timer->prev;
383 timer->prev->next = timer->next;
384 timer->next = timer->prev = NULL;
385 restore_flags(flags);
386 timer->expires -= jiffies;
387 return 1;
388 }
389 restore_flags(flags);
390 return 0;
391 #endif
392 }
393
394 unsigned long timer_active = 0;
395 struct timer_struct timer_table[32];
396
397
398
399
400
401
402
403 unsigned long avenrun[3] = { 0,0,0 };
404
405
406
407
408 static unsigned long count_active_tasks(void)
409 {
410 struct task_struct **p;
411 unsigned long nr = 0;
412
413 for(p = &LAST_TASK; p > &FIRST_TASK; --p)
414 if (*p && ((*p)->state == TASK_RUNNING ||
415 (*p)->state == TASK_UNINTERRUPTIBLE ||
416 (*p)->state == TASK_SWAPPING))
417 nr += FIXED_1;
418 return nr;
419 }
420
421 static inline void calc_load(void)
422 {
423 unsigned long active_tasks;
424 static int count = LOAD_FREQ;
425
426 if (count-- > 0)
427 return;
428 count = LOAD_FREQ;
429 active_tasks = count_active_tasks();
430 CALC_LOAD(avenrun[0], EXP_1, active_tasks);
431 CALC_LOAD(avenrun[1], EXP_5, active_tasks);
432 CALC_LOAD(avenrun[2], EXP_15, active_tasks);
433 }
434
435
436
437
438
439
440
441
442
443
444
445 static void second_overflow(void)
446 {
447 long ltemp;
448
449 static long last_rtc_update=0;
450 extern int set_rtc_mmss(unsigned long);
451
452
453 time_maxerror = (0x70000000-time_maxerror < time_tolerance) ?
454 0x70000000 : (time_maxerror + time_tolerance);
455
456
457 if (time_offset < 0) {
458 ltemp = (-(time_offset+1) >> (SHIFT_KG + time_constant)) + 1;
459 time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
460 time_offset += (time_adj * HZ) >> (SHIFT_SCALE - SHIFT_UPDATE);
461 time_adj = - time_adj;
462 } else if (time_offset > 0) {
463 ltemp = ((time_offset-1) >> (SHIFT_KG + time_constant)) + 1;
464 time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
465 time_offset -= (time_adj * HZ) >> (SHIFT_SCALE - SHIFT_UPDATE);
466 } else {
467 time_adj = 0;
468 }
469
470 time_adj += (time_freq >> (SHIFT_KF + SHIFT_HZ - SHIFT_SCALE))
471 + FINETUNE;
472
473
474 switch (time_status) {
475 case TIME_INS:
476
477 if (xtime.tv_sec % 86400 == 0) {
478 xtime.tv_sec--;
479 time_status = TIME_OOP;
480 printk("Clock: inserting leap second 23:59:60 GMT\n");
481 }
482 break;
483
484 case TIME_DEL:
485
486 if (xtime.tv_sec % 86400 == 86399) {
487 xtime.tv_sec++;
488 time_status = TIME_OK;
489 printk("Clock: deleting leap second 23:59:59 GMT\n");
490 }
491 break;
492
493 case TIME_OOP:
494 time_status = TIME_OK;
495 break;
496 }
497 if (xtime.tv_sec > last_rtc_update + 660)
498 if (set_rtc_mmss(xtime.tv_sec) == 0)
499 last_rtc_update = xtime.tv_sec;
500 else
501 last_rtc_update = xtime.tv_sec - 600;
502 }
503
504
505
506
507 static void timer_bh(void * unused)
508 {
509 unsigned long mask;
510 struct timer_struct *tp;
511 struct timer_list * timer;
512
513 cli();
514 while ((timer = timer_head.next) != &timer_head && timer->expires < jiffies) {
515 void (*fn)(unsigned long) = timer->function;
516 unsigned long data = timer->data;
517 timer->next->prev = timer->prev;
518 timer->prev->next = timer->next;
519 timer->next = timer->prev = NULL;
520 sti();
521 fn(data);
522 cli();
523 }
524 sti();
525
526 for (mask = 1, tp = timer_table+0 ; mask ; tp++,mask += mask) {
527 if (mask > timer_active)
528 break;
529 if (!(mask & timer_active))
530 continue;
531 if (tp->expires > jiffies)
532 continue;
533 timer_active &= ~mask;
534 tp->fn();
535 sti();
536 }
537 }
538
539 void tqueue_bh(void * unused)
540 {
541 run_task_queue(&tq_timer);
542 }
543
544 void immediate_bh(void * unused)
545 {
546 run_task_queue(&tq_immediate);
547 }
548
549
550
551
552
553
554
555 static void do_timer(struct pt_regs * regs)
556 {
557 unsigned long mask;
558 struct timer_struct *tp;
559
560 long ltemp, psecs;
561
562
563
564
565 time_phase += time_adj;
566 if (time_phase < -FINEUSEC) {
567 ltemp = -time_phase >> SHIFT_SCALE;
568 time_phase += ltemp << SHIFT_SCALE;
569 xtime.tv_usec += tick + time_adjust_step - ltemp;
570 }
571 else if (time_phase > FINEUSEC) {
572 ltemp = time_phase >> SHIFT_SCALE;
573 time_phase -= ltemp << SHIFT_SCALE;
574 xtime.tv_usec += tick + time_adjust_step + ltemp;
575 } else
576 xtime.tv_usec += tick + time_adjust_step;
577
578 if (time_adjust)
579 {
580
581
582
583
584
585
586
587
588
589 if (time_adjust > tickadj)
590 time_adjust_step = tickadj;
591 else if (time_adjust < -tickadj)
592 time_adjust_step = -tickadj;
593 else
594 time_adjust_step = time_adjust;
595
596
597 time_adjust -= time_adjust_step;
598 }
599 else
600 time_adjust_step = 0;
601
602 if (xtime.tv_usec >= 1000000) {
603 xtime.tv_usec -= 1000000;
604 xtime.tv_sec++;
605 second_overflow();
606 }
607
608 jiffies++;
609 calc_load();
610 if ((VM_MASK & regs->eflags) || (3 & regs->cs)) {
611 current->utime++;
612 if (current != task[0]) {
613 if (current->priority < 15)
614 kstat.cpu_nice++;
615 else
616 kstat.cpu_user++;
617 }
618
619 if (current->it_virt_value && !(--current->it_virt_value)) {
620 current->it_virt_value = current->it_virt_incr;
621 send_sig(SIGVTALRM,current,1);
622 }
623 } else {
624 current->stime++;
625 if(current != task[0])
626 kstat.cpu_system++;
627 #ifdef CONFIG_PROFILE
628 if (prof_buffer && current != task[0]) {
629 unsigned long eip = regs->eip;
630 eip >>= CONFIG_PROFILE_SHIFT;
631 if (eip < prof_len)
632 prof_buffer[eip]++;
633 }
634 #endif
635 }
636
637
638
639 if ((current->rlim[RLIMIT_CPU].rlim_max != RLIM_INFINITY) &&
640 (((current->stime + current->utime) / HZ) >= current->rlim[RLIMIT_CPU].rlim_max))
641 send_sig(SIGKILL, current, 1);
642 if ((current->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) &&
643 (((current->stime + current->utime) % HZ) == 0)) {
644 psecs = (current->stime + current->utime) / HZ;
645
646 if (psecs == current->rlim[RLIMIT_CPU].rlim_cur)
647 send_sig(SIGXCPU, current, 1);
648
649 else if ((psecs > current->rlim[RLIMIT_CPU].rlim_cur) &&
650 ((psecs - current->rlim[RLIMIT_CPU].rlim_cur) % 5) == 0)
651 send_sig(SIGXCPU, current, 1);
652 }
653
654 if (current != task[0] && 0 > --current->counter) {
655 current->counter = 0;
656 need_resched = 1;
657 }
658
659 if (current->it_prof_value && !(--current->it_prof_value)) {
660 current->it_prof_value = current->it_prof_incr;
661 send_sig(SIGPROF,current,1);
662 }
663 for (mask = 1, tp = timer_table+0 ; mask ; tp++,mask += mask) {
664 if (mask > timer_active)
665 break;
666 if (!(mask & timer_active))
667 continue;
668 if (tp->expires > jiffies)
669 continue;
670 mark_bh(TIMER_BH);
671 }
672 cli();
673 itimer_ticks++;
674 if (itimer_ticks > itimer_next)
675 need_resched = 1;
676 if (timer_head.next->expires < jiffies)
677 mark_bh(TIMER_BH);
678 if (tq_timer != &tq_last)
679 mark_bh(TQUEUE_BH);
680 sti();
681 }
682
683 asmlinkage int sys_alarm(long seconds)
684 {
685 struct itimerval it_new, it_old;
686
687 it_new.it_interval.tv_sec = it_new.it_interval.tv_usec = 0;
688 it_new.it_value.tv_sec = seconds;
689 it_new.it_value.tv_usec = 0;
690 _setitimer(ITIMER_REAL, &it_new, &it_old);
691 return(it_old.it_value.tv_sec + (it_old.it_value.tv_usec / 1000000));
692 }
693
694 asmlinkage int sys_getpid(void)
695 {
696 return current->pid;
697 }
698
699 asmlinkage int sys_getppid(void)
700 {
701 return current->p_opptr->pid;
702 }
703
704 asmlinkage int sys_getuid(void)
705 {
706 return current->uid;
707 }
708
709 asmlinkage int sys_geteuid(void)
710 {
711 return current->euid;
712 }
713
714 asmlinkage int sys_getgid(void)
715 {
716 return current->gid;
717 }
718
719 asmlinkage int sys_getegid(void)
720 {
721 return current->egid;
722 }
723
724 asmlinkage int sys_nice(long increment)
725 {
726 int newprio;
727
728 if (increment < 0 && !suser())
729 return -EPERM;
730 newprio = current->priority - increment;
731 if (newprio < 1)
732 newprio = 1;
733 if (newprio > 35)
734 newprio = 35;
735 current->priority = newprio;
736 return 0;
737 }
738
739 static void show_task(int nr,struct task_struct * p)
740 {
741 unsigned long free;
742 static char * stat_nam[] = { "R", "S", "D", "Z", "T", "W" };
743
744 printk("%-8s %3d ", p->comm, (p == current) ? -nr : nr);
745 if (((unsigned) p->state) < sizeof(stat_nam)/sizeof(char *))
746 printk(stat_nam[p->state]);
747 else
748 printk(" ");
749 #ifdef __i386__
750 if (p == current)
751 printk(" current ");
752 else
753 printk(" %08lX ", ((unsigned long *)p->tss.esp)[3]);
754 #endif
755 for (free = 1; free < 1024 ; free++) {
756 if (((unsigned long *)p->kernel_stack_page)[free])
757 break;
758 }
759 printk("%5lu %5d %6d ", free << 2, p->pid, p->p_pptr->pid);
760 if (p->p_cptr)
761 printk("%5d ", p->p_cptr->pid);
762 else
763 printk(" ");
764 if (p->p_ysptr)
765 printk("%7d", p->p_ysptr->pid);
766 else
767 printk(" ");
768 if (p->p_osptr)
769 printk(" %5d\n", p->p_osptr->pid);
770 else
771 printk("\n");
772 }
773
774 void show_state(void)
775 {
776 int i;
777
778 printk(" free sibling\n");
779 printk(" task PC stack pid father child younger older\n");
780 for (i=0 ; i<NR_TASKS ; i++)
781 if (task[i])
782 show_task(i,task[i]);
783 }
784
785 void sched_init(void)
786 {
787 bh_base[TIMER_BH].routine = timer_bh;
788 bh_base[TQUEUE_BH].routine = tqueue_bh;
789 bh_base[IMMEDIATE_BH].routine = immediate_bh;
790 if (sizeof(struct sigaction) != 16)
791 panic("Struct sigaction MUST be 16 bytes");
792 outb_p(0x34,0x43);
793 outb_p(LATCH & 0xff , 0x40);
794 outb(LATCH >> 8 , 0x40);
795 if (request_irq(TIMER_IRQ,(void (*)(int)) do_timer, 0, "timer") != 0)
796 panic("Could not allocate timer IRQ!");
797 }