This source file includes following definitions.
- math_state_restore
- math_emulate
- schedule
- sys_pause
- wake_up
- wake_up_interruptible
- __down
- __sleep_on
- interruptible_sleep_on
- sleep_on
- add_timer
- del_timer
- count_active_tasks
- calc_load
- second_overflow
- timer_bh
- tqueue_bh
- immediate_bh
- do_timer
- sys_alarm
- sys_getpid
- sys_getppid
- sys_getuid
- sys_geteuid
- sys_getgid
- sys_getegid
- sys_nice
- show_task
- show_state
- sched_init
1
2
3
4
5
6
7
8
9
10
11
12
13
14 #include <linux/config.h>
15 #include <linux/signal.h>
16 #include <linux/sched.h>
17 #include <linux/timer.h>
18 #include <linux/kernel.h>
19 #include <linux/kernel_stat.h>
20 #include <linux/fdreg.h>
21 #include <linux/errno.h>
22 #include <linux/time.h>
23 #include <linux/ptrace.h>
24 #include <linux/delay.h>
25 #include <linux/interrupt.h>
26 #include <linux/tqueue.h>
27 #include <linux/resource.h>
28
29 #include <asm/system.h>
30 #include <asm/io.h>
31 #include <asm/segment.h>
32
33 #define TIMER_IRQ 0
34
35 #include <linux/timex.h>
36
37
38
39
40 long tick = 1000000 / HZ;
41 volatile struct timeval xtime;
42 int tickadj = 500/HZ;
43
44 DECLARE_TASK_QUEUE(tq_timer);
45 DECLARE_TASK_QUEUE(tq_immediate);
46
47
48
49
50 int time_status = TIME_BAD;
51 long time_offset = 0;
52 long time_constant = 0;
53 long time_tolerance = MAXFREQ;
54 long time_precision = 1;
55 long time_maxerror = 0x70000000;
56 long time_esterror = 0x70000000;
57 long time_phase = 0;
58 long time_freq = 0;
59 long time_adj = 0;
60 long time_reftime = 0;
61
62 long time_adjust = 0;
63 long time_adjust_step = 0;
64
65 int need_resched = 0;
66 unsigned long event = 0;
67
68
69
70
71 int hard_math = 0;
72 int x86 = 0;
73 int fdiv_bug = 0;
74
75 char x86_vendor_id[13] = "Unknown";
76
77 int ignore_irq13 = 0;
78 int wp_works_ok = 0;
79 int hlt_works_ok = 1;
80
81
82
83
84 int EISA_bus = 0;
85
86 extern int _setitimer(int, struct itimerval *, struct itimerval *);
87 unsigned long * prof_buffer = NULL;
88 unsigned long prof_len = 0;
89
90 #define _S(nr) (1<<((nr)-1))
91
92 extern void mem_use(void);
93
94 extern int timer_interrupt(void);
95 asmlinkage int system_call(void);
96
97 static unsigned long init_kernel_stack[1024] = { STACK_MAGIC, };
98 static struct vm_area_struct init_mmap = INIT_MMAP;
99 struct task_struct init_task = INIT_TASK;
100
101 unsigned long volatile jiffies=0;
102
103 struct task_struct *current = &init_task;
104 struct task_struct *last_task_used_math = NULL;
105
106 struct task_struct * task[NR_TASKS] = {&init_task, };
107
108 long user_stack [ PAGE_SIZE>>2 ] = { STACK_MAGIC, };
109
110 struct {
111 long * a;
112 short b;
113 } stack_start = { & user_stack [PAGE_SIZE>>2] , KERNEL_DS };
114
115 struct kernel_stat kstat = { 0 };
116
117
118
119
120
121
122
123
124 asmlinkage void math_state_restore(void)
125 {
126 __asm__ __volatile__("clts");
127 if (last_task_used_math == current)
128 return;
129 timer_table[COPRO_TIMER].expires = jiffies+50;
130 timer_active |= 1<<COPRO_TIMER;
131 if (last_task_used_math)
132 __asm__("fnsave %0":"=m" (last_task_used_math->tss.i387));
133 else
134 __asm__("fnclex");
135 last_task_used_math = current;
136 if (current->used_math) {
137 __asm__("frstor %0": :"m" (current->tss.i387));
138 } else {
139 __asm__("fninit");
140 current->used_math=1;
141 }
142 timer_active &= ~(1<<COPRO_TIMER);
143 }
144
145 #ifndef CONFIG_MATH_EMULATION
146
147 asmlinkage void math_emulate(long arg)
148 {
149 printk("math-emulation not enabled and no coprocessor found.\n");
150 printk("killing %s.\n",current->comm);
151 send_sig(SIGFPE,current,1);
152 schedule();
153 }
154
155 #endif
156
157 unsigned long itimer_ticks = 0;
158 unsigned long itimer_next = ~0;
159
160
161
162
163
164
165
166
167
168
169
170
171
172 asmlinkage void schedule(void)
173 {
174 int c;
175 struct task_struct * p;
176 struct task_struct * next;
177 unsigned long ticks;
178
179
180
181 if (intr_count) {
182 printk("Aiee: scheduling in interrupt\n");
183 intr_count = 0;
184 }
185 cli();
186 ticks = itimer_ticks;
187 itimer_ticks = 0;
188 itimer_next = ~0;
189 sti();
190 need_resched = 0;
191 p = &init_task;
192 for (;;) {
193 if ((p = p->next_task) == &init_task)
194 goto confuse_gcc1;
195 if (ticks && p->it_real_value) {
196 if (p->it_real_value <= ticks) {
197 send_sig(SIGALRM, p, 1);
198 if (!p->it_real_incr) {
199 p->it_real_value = 0;
200 goto end_itimer;
201 }
202 do {
203 p->it_real_value += p->it_real_incr;
204 } while (p->it_real_value <= ticks);
205 }
206 p->it_real_value -= ticks;
207 if (p->it_real_value < itimer_next)
208 itimer_next = p->it_real_value;
209 }
210 end_itimer:
211 if (p->state != TASK_INTERRUPTIBLE)
212 continue;
213 if (p->signal & ~p->blocked) {
214 p->state = TASK_RUNNING;
215 continue;
216 }
217 if (p->timeout && p->timeout <= jiffies) {
218 p->timeout = 0;
219 p->state = TASK_RUNNING;
220 }
221 }
222 confuse_gcc1:
223
224
225 #if 0
226
227
228
229
230 if (TASK_UNINTERRUPTIBLE >= (unsigned) current->state &&
231 current->counter < current->priority*2) {
232 ++current->counter;
233 }
234 #endif
235 c = -1000;
236 next = p = &init_task;
237 for (;;) {
238 if ((p = p->next_task) == &init_task)
239 goto confuse_gcc2;
240 if (p->state == TASK_RUNNING && p->counter > c)
241 c = p->counter, next = p;
242 }
243 confuse_gcc2:
244 if (!c) {
245 for_each_task(p)
246 p->counter = (p->counter >> 1) + p->priority;
247 }
248 if (current == next)
249 return;
250 kstat.context_swtch++;
251 switch_to(next);
252
253 if(current->debugreg[7]){
254 loaddebug(0);
255 loaddebug(1);
256 loaddebug(2);
257 loaddebug(3);
258 loaddebug(6);
259 };
260 }
261
262 asmlinkage int sys_pause(void)
263 {
264 current->state = TASK_INTERRUPTIBLE;
265 schedule();
266 return -ERESTARTNOHAND;
267 }
268
269
270
271
272
273
274
275
276
277 void wake_up(struct wait_queue **q)
278 {
279 struct wait_queue *tmp;
280 struct task_struct * p;
281
282 if (!q || !(tmp = *q))
283 return;
284 do {
285 if ((p = tmp->task) != NULL) {
286 if ((p->state == TASK_UNINTERRUPTIBLE) ||
287 (p->state == TASK_INTERRUPTIBLE)) {
288 p->state = TASK_RUNNING;
289 if (p->counter > current->counter + 3)
290 need_resched = 1;
291 }
292 }
293 if (!tmp->next) {
294 printk("wait_queue is bad (eip = %p)\n",
295 __builtin_return_address(0));
296 printk(" q = %p\n",q);
297 printk(" *q = %p\n",*q);
298 printk(" tmp = %p\n",tmp);
299 break;
300 }
301 tmp = tmp->next;
302 } while (tmp != *q);
303 }
304
305 void wake_up_interruptible(struct wait_queue **q)
306 {
307 struct wait_queue *tmp;
308 struct task_struct * p;
309
310 if (!q || !(tmp = *q))
311 return;
312 do {
313 if ((p = tmp->task) != NULL) {
314 if (p->state == TASK_INTERRUPTIBLE) {
315 p->state = TASK_RUNNING;
316 if (p->counter > current->counter + 3)
317 need_resched = 1;
318 }
319 }
320 if (!tmp->next) {
321 printk("wait_queue is bad (eip = %p)\n",
322 __builtin_return_address(0));
323 printk(" q = %p\n",q);
324 printk(" *q = %p\n",*q);
325 printk(" tmp = %p\n",tmp);
326 break;
327 }
328 tmp = tmp->next;
329 } while (tmp != *q);
330 }
331
332 void __down(struct semaphore * sem)
333 {
334 struct wait_queue wait = { current, NULL };
335 add_wait_queue(&sem->wait, &wait);
336 current->state = TASK_UNINTERRUPTIBLE;
337 while (sem->count <= 0) {
338 schedule();
339 current->state = TASK_UNINTERRUPTIBLE;
340 }
341 current->state = TASK_RUNNING;
342 remove_wait_queue(&sem->wait, &wait);
343 }
344
345 static inline void __sleep_on(struct wait_queue **p, int state)
346 {
347 unsigned long flags;
348 struct wait_queue wait = { current, NULL };
349
350 if (!p)
351 return;
352 if (current == task[0])
353 panic("task[0] trying to sleep");
354 current->state = state;
355 add_wait_queue(p, &wait);
356 save_flags(flags);
357 sti();
358 schedule();
359 remove_wait_queue(p, &wait);
360 restore_flags(flags);
361 }
362
363 void interruptible_sleep_on(struct wait_queue **p)
364 {
365 __sleep_on(p,TASK_INTERRUPTIBLE);
366 }
367
368 void sleep_on(struct wait_queue **p)
369 {
370 __sleep_on(p,TASK_UNINTERRUPTIBLE);
371 }
372
373
374
375
376
377 static struct timer_list timer_head = { &timer_head, &timer_head, ~0, 0, NULL };
378 #define SLOW_BUT_DEBUGGING_TIMERS 1
379
380 void add_timer(struct timer_list * timer)
381 {
382 unsigned long flags;
383 struct timer_list *p;
384
385 #if SLOW_BUT_DEBUGGING_TIMERS
386 if (timer->next || timer->prev) {
387 printk("add_timer() called with non-zero list from %p\n",
388 __builtin_return_address(0));
389 return;
390 }
391 #endif
392 p = &timer_head;
393 timer->expires += jiffies;
394 save_flags(flags);
395 cli();
396 do {
397 p = p->next;
398 } while (timer->expires > p->expires);
399 timer->next = p;
400 timer->prev = p->prev;
401 p->prev = timer;
402 timer->prev->next = timer;
403 restore_flags(flags);
404 }
405
406 int del_timer(struct timer_list * timer)
407 {
408 unsigned long flags;
409 #if SLOW_BUT_DEBUGGING_TIMERS
410 struct timer_list * p;
411
412 p = &timer_head;
413 save_flags(flags);
414 cli();
415 while ((p = p->next) != &timer_head) {
416 if (p == timer) {
417 timer->next->prev = timer->prev;
418 timer->prev->next = timer->next;
419 timer->next = timer->prev = NULL;
420 restore_flags(flags);
421 timer->expires -= jiffies;
422 return 1;
423 }
424 }
425 if (timer->next || timer->prev)
426 printk("del_timer() called from %p with timer not initialized\n",
427 __builtin_return_address(0));
428 restore_flags(flags);
429 return 0;
430 #else
431 save_flags(flags);
432 cli();
433 if (timer->next) {
434 timer->next->prev = timer->prev;
435 timer->prev->next = timer->next;
436 timer->next = timer->prev = NULL;
437 restore_flags(flags);
438 timer->expires -= jiffies;
439 return 1;
440 }
441 restore_flags(flags);
442 return 0;
443 #endif
444 }
445
446 unsigned long timer_active = 0;
447 struct timer_struct timer_table[32];
448
449
450
451
452
453
454
455 unsigned long avenrun[3] = { 0,0,0 };
456
457
458
459
460 static unsigned long count_active_tasks(void)
461 {
462 struct task_struct **p;
463 unsigned long nr = 0;
464
465 for(p = &LAST_TASK; p > &FIRST_TASK; --p)
466 if (*p && ((*p)->state == TASK_RUNNING ||
467 (*p)->state == TASK_UNINTERRUPTIBLE ||
468 (*p)->state == TASK_SWAPPING))
469 nr += FIXED_1;
470 return nr;
471 }
472
473 static inline void calc_load(void)
474 {
475 unsigned long active_tasks;
476 static int count = LOAD_FREQ;
477
478 if (count-- > 0)
479 return;
480 count = LOAD_FREQ;
481 active_tasks = count_active_tasks();
482 CALC_LOAD(avenrun[0], EXP_1, active_tasks);
483 CALC_LOAD(avenrun[1], EXP_5, active_tasks);
484 CALC_LOAD(avenrun[2], EXP_15, active_tasks);
485 }
486
487
488
489
490
491
492
493
494
495
496
497 static void second_overflow(void)
498 {
499 long ltemp;
500
501 static long last_rtc_update=0;
502 extern int set_rtc_mmss(unsigned long);
503
504
505 time_maxerror = (0x70000000-time_maxerror < time_tolerance) ?
506 0x70000000 : (time_maxerror + time_tolerance);
507
508
509 if (time_offset < 0) {
510 ltemp = (-(time_offset+1) >> (SHIFT_KG + time_constant)) + 1;
511 time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
512 time_offset += (time_adj * HZ) >> (SHIFT_SCALE - SHIFT_UPDATE);
513 time_adj = - time_adj;
514 } else if (time_offset > 0) {
515 ltemp = ((time_offset-1) >> (SHIFT_KG + time_constant)) + 1;
516 time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
517 time_offset -= (time_adj * HZ) >> (SHIFT_SCALE - SHIFT_UPDATE);
518 } else {
519 time_adj = 0;
520 }
521
522 time_adj += (time_freq >> (SHIFT_KF + SHIFT_HZ - SHIFT_SCALE))
523 + FINETUNE;
524
525
526 switch (time_status) {
527 case TIME_INS:
528
529 if (xtime.tv_sec % 86400 == 0) {
530 xtime.tv_sec--;
531 time_status = TIME_OOP;
532 printk("Clock: inserting leap second 23:59:60 GMT\n");
533 }
534 break;
535
536 case TIME_DEL:
537
538 if (xtime.tv_sec % 86400 == 86399) {
539 xtime.tv_sec++;
540 time_status = TIME_OK;
541 printk("Clock: deleting leap second 23:59:59 GMT\n");
542 }
543 break;
544
545 case TIME_OOP:
546 time_status = TIME_OK;
547 break;
548 }
549 if (xtime.tv_sec > last_rtc_update + 660)
550 if (set_rtc_mmss(xtime.tv_sec) == 0)
551 last_rtc_update = xtime.tv_sec;
552 else
553 last_rtc_update = xtime.tv_sec - 600;
554 }
555
556
557
558
559 static void timer_bh(void * unused)
560 {
561 unsigned long mask;
562 struct timer_struct *tp;
563 struct timer_list * timer;
564
565 cli();
566 while ((timer = timer_head.next) != &timer_head && timer->expires < jiffies) {
567 void (*fn)(unsigned long) = timer->function;
568 unsigned long data = timer->data;
569 timer->next->prev = timer->prev;
570 timer->prev->next = timer->next;
571 timer->next = timer->prev = NULL;
572 sti();
573 fn(data);
574 cli();
575 }
576 sti();
577
578 for (mask = 1, tp = timer_table+0 ; mask ; tp++,mask += mask) {
579 if (mask > timer_active)
580 break;
581 if (!(mask & timer_active))
582 continue;
583 if (tp->expires > jiffies)
584 continue;
585 timer_active &= ~mask;
586 tp->fn();
587 sti();
588 }
589 }
590
591 void tqueue_bh(void * unused)
592 {
593 run_task_queue(&tq_timer);
594 }
595
596 void immediate_bh(void * unused)
597 {
598 run_task_queue(&tq_immediate);
599 }
600
601
602
603
604
605
606
607 static void do_timer(struct pt_regs * regs)
608 {
609 unsigned long mask;
610 struct timer_struct *tp;
611
612 long ltemp, psecs;
613
614
615
616
617 time_phase += time_adj;
618 if (time_phase < -FINEUSEC) {
619 ltemp = -time_phase >> SHIFT_SCALE;
620 time_phase += ltemp << SHIFT_SCALE;
621 xtime.tv_usec += tick + time_adjust_step - ltemp;
622 }
623 else if (time_phase > FINEUSEC) {
624 ltemp = time_phase >> SHIFT_SCALE;
625 time_phase -= ltemp << SHIFT_SCALE;
626 xtime.tv_usec += tick + time_adjust_step + ltemp;
627 } else
628 xtime.tv_usec += tick + time_adjust_step;
629
630 if (time_adjust)
631 {
632
633
634
635
636
637
638
639
640
641 if (time_adjust > tickadj)
642 time_adjust_step = tickadj;
643 else if (time_adjust < -tickadj)
644 time_adjust_step = -tickadj;
645 else
646 time_adjust_step = time_adjust;
647
648
649 time_adjust -= time_adjust_step;
650 }
651 else
652 time_adjust_step = 0;
653
654 if (xtime.tv_usec >= 1000000) {
655 xtime.tv_usec -= 1000000;
656 xtime.tv_sec++;
657 second_overflow();
658 }
659
660 jiffies++;
661 calc_load();
662 if ((VM_MASK & regs->eflags) || (3 & regs->cs)) {
663 current->utime++;
664 if (current != task[0]) {
665 if (current->priority < 15)
666 kstat.cpu_nice++;
667 else
668 kstat.cpu_user++;
669 }
670
671 if (current->it_virt_value && !(--current->it_virt_value)) {
672 current->it_virt_value = current->it_virt_incr;
673 send_sig(SIGVTALRM,current,1);
674 }
675 } else {
676 current->stime++;
677 if(current != task[0])
678 kstat.cpu_system++;
679 #ifdef CONFIG_PROFILE
680 if (prof_buffer && current != task[0]) {
681 unsigned long eip = regs->eip;
682 eip >>= 2;
683 if (eip < prof_len)
684 prof_buffer[eip]++;
685 }
686 #endif
687 }
688
689
690
691 if ((current->rlim[RLIMIT_CPU].rlim_max != RLIM_INFINITY) &&
692 (((current->stime + current->utime) / HZ) >= current->rlim[RLIMIT_CPU].rlim_max))
693 send_sig(SIGKILL, current, 1);
694 if ((current->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) &&
695 (((current->stime + current->utime) % HZ) == 0)) {
696 psecs = (current->stime + current->utime) / HZ;
697
698 if (psecs == current->rlim[RLIMIT_CPU].rlim_cur)
699 send_sig(SIGXCPU, current, 1);
700
701 else if ((psecs > current->rlim[RLIMIT_CPU].rlim_cur) &&
702 ((psecs - current->rlim[RLIMIT_CPU].rlim_cur) % 5) == 0)
703 send_sig(SIGXCPU, current, 1);
704 }
705
706 if (current != task[0] && 0 > --current->counter) {
707 current->counter = 0;
708 need_resched = 1;
709 }
710
711 if (current->it_prof_value && !(--current->it_prof_value)) {
712 current->it_prof_value = current->it_prof_incr;
713 send_sig(SIGPROF,current,1);
714 }
715 for (mask = 1, tp = timer_table+0 ; mask ; tp++,mask += mask) {
716 if (mask > timer_active)
717 break;
718 if (!(mask & timer_active))
719 continue;
720 if (tp->expires > jiffies)
721 continue;
722 mark_bh(TIMER_BH);
723 }
724 cli();
725 itimer_ticks++;
726 if (itimer_ticks > itimer_next)
727 need_resched = 1;
728 if (timer_head.next->expires < jiffies)
729 mark_bh(TIMER_BH);
730 if (tq_timer != &tq_last)
731 mark_bh(TQUEUE_BH);
732 sti();
733 }
734
735 asmlinkage int sys_alarm(long seconds)
736 {
737 struct itimerval it_new, it_old;
738
739 it_new.it_interval.tv_sec = it_new.it_interval.tv_usec = 0;
740 it_new.it_value.tv_sec = seconds;
741 it_new.it_value.tv_usec = 0;
742 _setitimer(ITIMER_REAL, &it_new, &it_old);
743 return(it_old.it_value.tv_sec + (it_old.it_value.tv_usec / 1000000));
744 }
745
746 asmlinkage int sys_getpid(void)
747 {
748 return current->pid;
749 }
750
751 asmlinkage int sys_getppid(void)
752 {
753 return current->p_opptr->pid;
754 }
755
756 asmlinkage int sys_getuid(void)
757 {
758 return current->uid;
759 }
760
761 asmlinkage int sys_geteuid(void)
762 {
763 return current->euid;
764 }
765
766 asmlinkage int sys_getgid(void)
767 {
768 return current->gid;
769 }
770
771 asmlinkage int sys_getegid(void)
772 {
773 return current->egid;
774 }
775
776 asmlinkage int sys_nice(long increment)
777 {
778 int newprio;
779
780 if (increment < 0 && !suser())
781 return -EPERM;
782 newprio = current->priority - increment;
783 if (newprio < 1)
784 newprio = 1;
785 if (newprio > 35)
786 newprio = 35;
787 current->priority = newprio;
788 return 0;
789 }
790
791 static void show_task(int nr,struct task_struct * p)
792 {
793 unsigned long free;
794 static char * stat_nam[] = { "R", "S", "D", "Z", "T", "W" };
795
796 printk("%-8s %3d ", p->comm, (p == current) ? -nr : nr);
797 if (((unsigned) p->state) < sizeof(stat_nam)/sizeof(char *))
798 printk(stat_nam[p->state]);
799 else
800 printk(" ");
801 if (p == current)
802 printk(" current ");
803 else
804 printk(" %08lX ", ((unsigned long *)p->tss.esp)[3]);
805 for (free = 1; free < 1024 ; free++) {
806 if (((unsigned long *)p->kernel_stack_page)[free])
807 break;
808 }
809 printk("%5lu %5d %6d ", free << 2, p->pid, p->p_pptr->pid);
810 if (p->p_cptr)
811 printk("%5d ", p->p_cptr->pid);
812 else
813 printk(" ");
814 if (p->p_ysptr)
815 printk("%7d", p->p_ysptr->pid);
816 else
817 printk(" ");
818 if (p->p_osptr)
819 printk(" %5d\n", p->p_osptr->pid);
820 else
821 printk("\n");
822 }
823
824 void show_state(void)
825 {
826 int i;
827
828 printk(" free sibling\n");
829 printk(" task PC stack pid father child younger older\n");
830 for (i=0 ; i<NR_TASKS ; i++)
831 if (task[i])
832 show_task(i,task[i]);
833 }
834
835 void sched_init(void)
836 {
837 int i;
838 struct desc_struct * p;
839
840 bh_base[TIMER_BH].routine = timer_bh;
841 bh_base[TQUEUE_BH].routine = tqueue_bh;
842 bh_base[IMMEDIATE_BH].routine = immediate_bh;
843 if (sizeof(struct sigaction) != 16)
844 panic("Struct sigaction MUST be 16 bytes");
845 set_tss_desc(gdt+FIRST_TSS_ENTRY,&init_task.tss);
846 set_ldt_desc(gdt+FIRST_LDT_ENTRY,&default_ldt,1);
847 set_system_gate(0x80,&system_call);
848 p = gdt+2+FIRST_TSS_ENTRY;
849 for(i=1 ; i<NR_TASKS ; i++) {
850 task[i] = NULL;
851 p->a=p->b=0;
852 p++;
853 p->a=p->b=0;
854 p++;
855 }
856
857 __asm__("pushfl ; andl $0xffffbfff,(%esp) ; popfl");
858 load_TR(0);
859 load_ldt(0);
860 outb_p(0x34,0x43);
861 outb_p(LATCH & 0xff , 0x40);
862 outb(LATCH >> 8 , 0x40);
863 if (request_irq(TIMER_IRQ,(void (*)(int)) do_timer, 0, "timer") != 0)
864 panic("Could not allocate timer IRQ!");
865 }