This source file includes following definitions.
- math_state_restore
- math_emulate
- schedule
- sys_pause
- wake_up
- wake_up_interruptible
- __down
- __sleep_on
- interruptible_sleep_on
- sleep_on
- add_timer
- del_timer
- count_active_tasks
- calc_load
- second_overflow
- timer_bh
- tqueue_bh
- do_timer
- sys_alarm
- sys_getpid
- sys_getppid
- sys_getuid
- sys_geteuid
- sys_getgid
- sys_getegid
- sys_nice
- show_task
- show_state
- sched_init
1
2
3
4
5
6
7
8
9
10
11
12
13
14 #include <linux/config.h>
15 #include <linux/signal.h>
16 #include <linux/sched.h>
17 #include <linux/timer.h>
18 #include <linux/kernel.h>
19 #include <linux/kernel_stat.h>
20 #include <linux/fdreg.h>
21 #include <linux/errno.h>
22 #include <linux/time.h>
23 #include <linux/ptrace.h>
24 #include <linux/segment.h>
25 #include <linux/delay.h>
26 #include <linux/interrupt.h>
27 #include <linux/tqueue.h>
28
29 #include <asm/system.h>
30 #include <asm/io.h>
31 #include <asm/segment.h>
32
33 #define TIMER_IRQ 0
34
35 #include <linux/timex.h>
36
37
38
39
40 long tick = 1000000 / HZ;
41 volatile struct timeval xtime;
42 int tickadj = 500/HZ;
43
44 DECLARE_TASK_QUEUE(tq_timer);
45
46
47
48
49 int time_status = TIME_BAD;
50 long time_offset = 0;
51 long time_constant = 0;
52 long time_tolerance = MAXFREQ;
53 long time_precision = 1;
54 long time_maxerror = 0x70000000;
55 long time_esterror = 0x70000000;
56 long time_phase = 0;
57 long time_freq = 0;
58 long time_adj = 0;
59 long time_reftime = 0;
60
61 long time_adjust = 0;
62 long time_adjust_step = 0;
63
64 int need_resched = 0;
65
66
67
68
69 int hard_math = 0;
70 int x86 = 0;
71 int ignore_irq13 = 0;
72 int wp_works_ok = 0;
73
74
75
76
77 int EISA_bus = 0;
78
79 extern int _setitimer(int, struct itimerval *, struct itimerval *);
80 unsigned long * prof_buffer = NULL;
81 unsigned long prof_len = 0;
82
83 #define _S(nr) (1<<((nr)-1))
84
85 extern void mem_use(void);
86
87 extern int timer_interrupt(void);
88 asmlinkage int system_call(void);
89
90 static unsigned long init_kernel_stack[1024] = { STACK_MAGIC, };
91 struct task_struct init_task = INIT_TASK;
92
93 unsigned long volatile jiffies=0;
94
95 struct task_struct *current = &init_task;
96 struct task_struct *last_task_used_math = NULL;
97
98 struct task_struct * task[NR_TASKS] = {&init_task, };
99
100 long user_stack [ PAGE_SIZE>>2 ] = { STACK_MAGIC, };
101
102 struct {
103 long * a;
104 short b;
105 } stack_start = { & user_stack [PAGE_SIZE>>2] , KERNEL_DS };
106
107 struct kernel_stat kstat = { 0 };
108
109
110
111
112
113
114
115
116 asmlinkage void math_state_restore(void)
117 {
118 __asm__ __volatile__("clts");
119 if (last_task_used_math == current)
120 return;
121 timer_table[COPRO_TIMER].expires = jiffies+50;
122 timer_active |= 1<<COPRO_TIMER;
123 if (last_task_used_math)
124 __asm__("fnsave %0":"=m" (last_task_used_math->tss.i387));
125 else
126 __asm__("fnclex");
127 last_task_used_math = current;
128 if (current->used_math) {
129 __asm__("frstor %0": :"m" (current->tss.i387));
130 } else {
131 __asm__("fninit");
132 current->used_math=1;
133 }
134 timer_active &= ~(1<<COPRO_TIMER);
135 }
136
137 #ifndef CONFIG_MATH_EMULATION
138
139 asmlinkage void math_emulate(long arg)
140 {
141 printk("math-emulation not enabled and no coprocessor found.\n");
142 printk("killing %s.\n",current->comm);
143 send_sig(SIGFPE,current,1);
144 schedule();
145 }
146
147 #endif
148
149 unsigned long itimer_ticks = 0;
150 unsigned long itimer_next = ~0;
151
152
153
154
155
156
157
158
159
160
161
162
163
164 asmlinkage void schedule(void)
165 {
166 int c;
167 struct task_struct * p;
168 struct task_struct * next;
169 unsigned long ticks;
170
171
172
173 if (intr_count) {
174 printk("Aiee: scheduling in interrupt\n");
175 intr_count = 0;
176 }
177 cli();
178 ticks = itimer_ticks;
179 itimer_ticks = 0;
180 itimer_next = ~0;
181 sti();
182 need_resched = 0;
183 p = &init_task;
184 for (;;) {
185 if ((p = p->next_task) == &init_task)
186 goto confuse_gcc1;
187 if (ticks && p->it_real_value) {
188 if (p->it_real_value <= ticks) {
189 send_sig(SIGALRM, p, 1);
190 if (!p->it_real_incr) {
191 p->it_real_value = 0;
192 goto end_itimer;
193 }
194 do {
195 p->it_real_value += p->it_real_incr;
196 } while (p->it_real_value <= ticks);
197 }
198 p->it_real_value -= ticks;
199 if (p->it_real_value < itimer_next)
200 itimer_next = p->it_real_value;
201 }
202 end_itimer:
203 if (p->state != TASK_INTERRUPTIBLE)
204 continue;
205 if (p->signal & ~p->blocked) {
206 p->state = TASK_RUNNING;
207 continue;
208 }
209 if (p->timeout && p->timeout <= jiffies) {
210 p->timeout = 0;
211 p->state = TASK_RUNNING;
212 }
213 }
214 confuse_gcc1:
215
216
217 #if 0
218
219
220
221
222 if (TASK_UNINTERRUPTIBLE >= (unsigned) current->state &&
223 current->counter < current->priority*2) {
224 ++current->counter;
225 }
226 #endif
227 c = -1000;
228 next = p = &init_task;
229 for (;;) {
230 if ((p = p->next_task) == &init_task)
231 goto confuse_gcc2;
232 if (p->state == TASK_RUNNING && p->counter > c)
233 c = p->counter, next = p;
234 }
235 confuse_gcc2:
236 if (!c) {
237 for_each_task(p)
238 p->counter = (p->counter >> 1) + p->priority;
239 }
240 if (current == next)
241 return;
242 kstat.context_swtch++;
243 switch_to(next);
244
245 if(current->debugreg[7]){
246 loaddebug(0);
247 loaddebug(1);
248 loaddebug(2);
249 loaddebug(3);
250 loaddebug(6);
251 };
252 }
253
254 asmlinkage int sys_pause(void)
255 {
256 current->state = TASK_INTERRUPTIBLE;
257 schedule();
258 return -ERESTARTNOHAND;
259 }
260
261
262
263
264
265
266
267
268
269 void wake_up(struct wait_queue **q)
270 {
271 struct wait_queue *tmp;
272 struct task_struct * p;
273
274 if (!q || !(tmp = *q))
275 return;
276 do {
277 if ((p = tmp->task) != NULL) {
278 if ((p->state == TASK_UNINTERRUPTIBLE) ||
279 (p->state == TASK_INTERRUPTIBLE)) {
280 p->state = TASK_RUNNING;
281 if (p->counter > current->counter)
282 need_resched = 1;
283 }
284 }
285 if (!tmp->next) {
286 printk("wait_queue is bad (eip = %08lx)\n",((unsigned long *) q)[-1]);
287 printk(" q = %p\n",q);
288 printk(" *q = %p\n",*q);
289 printk(" tmp = %p\n",tmp);
290 break;
291 }
292 tmp = tmp->next;
293 } while (tmp != *q);
294 }
295
296 void wake_up_interruptible(struct wait_queue **q)
297 {
298 struct wait_queue *tmp;
299 struct task_struct * p;
300
301 if (!q || !(tmp = *q))
302 return;
303 do {
304 if ((p = tmp->task) != NULL) {
305 if (p->state == TASK_INTERRUPTIBLE) {
306 p->state = TASK_RUNNING;
307 if (p->counter > current->counter)
308 need_resched = 1;
309 }
310 }
311 if (!tmp->next) {
312 printk("wait_queue is bad (eip = %08lx)\n",((unsigned long *) q)[-1]);
313 printk(" q = %p\n",q);
314 printk(" *q = %p\n",*q);
315 printk(" tmp = %p\n",tmp);
316 break;
317 }
318 tmp = tmp->next;
319 } while (tmp != *q);
320 }
321
322 void __down(struct semaphore * sem)
323 {
324 struct wait_queue wait = { current, NULL };
325 add_wait_queue(&sem->wait, &wait);
326 current->state = TASK_UNINTERRUPTIBLE;
327 while (sem->count <= 0) {
328 schedule();
329 current->state = TASK_UNINTERRUPTIBLE;
330 }
331 current->state = TASK_RUNNING;
332 remove_wait_queue(&sem->wait, &wait);
333 }
334
335 static inline void __sleep_on(struct wait_queue **p, int state)
336 {
337 unsigned long flags;
338 struct wait_queue wait = { current, NULL };
339
340 if (!p)
341 return;
342 if (current == task[0])
343 panic("task[0] trying to sleep");
344 current->state = state;
345 add_wait_queue(p, &wait);
346 save_flags(flags);
347 sti();
348 schedule();
349 remove_wait_queue(p, &wait);
350 restore_flags(flags);
351 }
352
353 void interruptible_sleep_on(struct wait_queue **p)
354 {
355 __sleep_on(p,TASK_INTERRUPTIBLE);
356 }
357
358 void sleep_on(struct wait_queue **p)
359 {
360 __sleep_on(p,TASK_UNINTERRUPTIBLE);
361 }
362
363
364
365
366
367 static struct timer_list timer_head = { &timer_head, &timer_head, ~0, 0, NULL };
368 #define SLOW_BUT_DEBUGGING_TIMERS 1
369
370 void add_timer(struct timer_list * timer)
371 {
372 unsigned long flags;
373 struct timer_list *p;
374
375 #if SLOW_BUT_DEBUGGING_TIMERS
376 if (timer->next || timer->prev) {
377 printk("add_timer() called with non-zero list from %08lx\n",
378 ((unsigned long *) &timer)[-1]);
379 return;
380 }
381 #endif
382 p = &timer_head;
383 timer->expires += jiffies;
384 save_flags(flags);
385 cli();
386 do {
387 p = p->next;
388 } while (timer->expires > p->expires);
389 timer->next = p;
390 timer->prev = p->prev;
391 p->prev = timer;
392 timer->prev->next = timer;
393 restore_flags(flags);
394 }
395
396 int del_timer(struct timer_list * timer)
397 {
398 unsigned long flags;
399 #if SLOW_BUT_DEBUGGING_TIMERS
400 struct timer_list * p;
401
402 p = &timer_head;
403 save_flags(flags);
404 cli();
405 while ((p = p->next) != &timer_head) {
406 if (p == timer) {
407 timer->next->prev = timer->prev;
408 timer->prev->next = timer->next;
409 timer->next = timer->prev = NULL;
410 restore_flags(flags);
411 timer->expires -= jiffies;
412 return 1;
413 }
414 }
415 if (timer->next || timer->prev)
416 printk("del_timer() called from %08lx with timer not initialized\n",
417 ((unsigned long *) &timer)[-1]);
418 restore_flags(flags);
419 return 0;
420 #else
421 save_flags(flags);
422 cli();
423 if (timer->next) {
424 timer->next->prev = timer->prev;
425 timer->prev->next = timer->next;
426 timer->next = timer->prev = NULL;
427 restore_flags(flags);
428 timer->expires -= jiffies;
429 return 1;
430 }
431 restore_flags(flags);
432 return 0;
433 #endif
434 }
435
436 unsigned long timer_active = 0;
437 struct timer_struct timer_table[32];
438
439
440
441
442
443
444
445 unsigned long avenrun[3] = { 0,0,0 };
446
447
448
449
450 static unsigned long count_active_tasks(void)
451 {
452 struct task_struct **p;
453 unsigned long nr = 0;
454
455 for(p = &LAST_TASK; p > &FIRST_TASK; --p)
456 if (*p && ((*p)->state == TASK_RUNNING ||
457 (*p)->state == TASK_UNINTERRUPTIBLE ||
458 (*p)->state == TASK_SWAPPING))
459 nr += FIXED_1;
460 return nr;
461 }
462
463 static inline void calc_load(void)
464 {
465 unsigned long active_tasks;
466 static int count = LOAD_FREQ;
467
468 if (count-- > 0)
469 return;
470 count = LOAD_FREQ;
471 active_tasks = count_active_tasks();
472 CALC_LOAD(avenrun[0], EXP_1, active_tasks);
473 CALC_LOAD(avenrun[1], EXP_5, active_tasks);
474 CALC_LOAD(avenrun[2], EXP_15, active_tasks);
475 }
476
477
478
479
480
481
482
483
484
485
486
487 static void second_overflow(void)
488 {
489 long ltemp;
490
491 static long last_rtc_update=0;
492 extern int set_rtc_mmss(unsigned long);
493
494
495 time_maxerror = (0x70000000-time_maxerror < time_tolerance) ?
496 0x70000000 : (time_maxerror + time_tolerance);
497
498
499 if (time_offset < 0) {
500 ltemp = (-(time_offset+1) >> (SHIFT_KG + time_constant)) + 1;
501 time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
502 time_offset += (time_adj * HZ) >> (SHIFT_SCALE - SHIFT_UPDATE);
503 time_adj = - time_adj;
504 } else if (time_offset > 0) {
505 ltemp = ((time_offset-1) >> (SHIFT_KG + time_constant)) + 1;
506 time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
507 time_offset -= (time_adj * HZ) >> (SHIFT_SCALE - SHIFT_UPDATE);
508 } else {
509 time_adj = 0;
510 }
511
512 time_adj += (time_freq >> (SHIFT_KF + SHIFT_HZ - SHIFT_SCALE))
513 + FINETUNE;
514
515
516 switch (time_status) {
517 case TIME_INS:
518
519 if (xtime.tv_sec % 86400 == 0) {
520 xtime.tv_sec--;
521 time_status = TIME_OOP;
522 printk("Clock: inserting leap second 23:59:60 GMT\n");
523 }
524 break;
525
526 case TIME_DEL:
527
528 if (xtime.tv_sec % 86400 == 86399) {
529 xtime.tv_sec++;
530 time_status = TIME_OK;
531 printk("Clock: deleting leap second 23:59:59 GMT\n");
532 }
533 break;
534
535 case TIME_OOP:
536 time_status = TIME_OK;
537 break;
538 }
539 if (xtime.tv_sec > last_rtc_update + 660)
540 if (set_rtc_mmss(xtime.tv_sec) == 0)
541 last_rtc_update = xtime.tv_sec;
542 else
543 last_rtc_update = xtime.tv_sec - 600;
544 }
545
546
547
548
549 static void timer_bh(void * unused)
550 {
551 unsigned long mask;
552 struct timer_struct *tp;
553 struct timer_list * timer;
554
555 cli();
556 while ((timer = timer_head.next) != &timer_head && timer->expires < jiffies) {
557 void (*fn)(unsigned long) = timer->function;
558 unsigned long data = timer->data;
559 timer->next->prev = timer->prev;
560 timer->prev->next = timer->next;
561 timer->next = timer->prev = NULL;
562 sti();
563 fn(data);
564 cli();
565 }
566 sti();
567
568 for (mask = 1, tp = timer_table+0 ; mask ; tp++,mask += mask) {
569 if (mask > timer_active)
570 break;
571 if (!(mask & timer_active))
572 continue;
573 if (tp->expires > jiffies)
574 continue;
575 timer_active &= ~mask;
576 tp->fn();
577 sti();
578 }
579 }
580
581 void tqueue_bh(void * unused)
582 {
583 run_task_queue(&tq_timer);
584 }
585
586
587
588
589
590
591
592 static void do_timer(struct pt_regs * regs)
593 {
594 unsigned long mask;
595 struct timer_struct *tp;
596
597 long ltemp;
598
599
600
601
602 time_phase += time_adj;
603 if (time_phase < -FINEUSEC) {
604 ltemp = -time_phase >> SHIFT_SCALE;
605 time_phase += ltemp << SHIFT_SCALE;
606 xtime.tv_usec += tick + time_adjust_step - ltemp;
607 }
608 else if (time_phase > FINEUSEC) {
609 ltemp = time_phase >> SHIFT_SCALE;
610 time_phase -= ltemp << SHIFT_SCALE;
611 xtime.tv_usec += tick + time_adjust_step + ltemp;
612 } else
613 xtime.tv_usec += tick + time_adjust_step;
614
615 if (time_adjust)
616 {
617
618
619
620
621
622
623
624
625
626 if (time_adjust > tickadj)
627 time_adjust_step = tickadj;
628 else if (time_adjust < -tickadj)
629 time_adjust_step = -tickadj;
630 else
631 time_adjust_step = time_adjust;
632
633
634 time_adjust -= time_adjust_step;
635 }
636 else
637 time_adjust_step = 0;
638
639 if (xtime.tv_usec >= 1000000) {
640 xtime.tv_usec -= 1000000;
641 xtime.tv_sec++;
642 second_overflow();
643 }
644
645 jiffies++;
646 calc_load();
647 if ((VM_MASK & regs->eflags) || (3 & regs->cs)) {
648 current->utime++;
649 if (current != task[0]) {
650 if (current->priority < 15)
651 kstat.cpu_nice++;
652 else
653 kstat.cpu_user++;
654 }
655
656 if (current->it_virt_value && !(--current->it_virt_value)) {
657 current->it_virt_value = current->it_virt_incr;
658 send_sig(SIGVTALRM,current,1);
659 }
660 } else {
661 current->stime++;
662 if(current != task[0])
663 kstat.cpu_system++;
664 #ifdef CONFIG_PROFILE
665 if (prof_buffer && current != task[0]) {
666 unsigned long eip = regs->eip;
667 eip >>= 2;
668 if (eip < prof_len)
669 prof_buffer[eip]++;
670 }
671 #endif
672 }
673 if (current != task[0] && 0 > --current->counter) {
674 current->counter = 0;
675 need_resched = 1;
676 }
677
678 if (current->it_prof_value && !(--current->it_prof_value)) {
679 current->it_prof_value = current->it_prof_incr;
680 send_sig(SIGPROF,current,1);
681 }
682 for (mask = 1, tp = timer_table+0 ; mask ; tp++,mask += mask) {
683 if (mask > timer_active)
684 break;
685 if (!(mask & timer_active))
686 continue;
687 if (tp->expires > jiffies)
688 continue;
689 mark_bh(TIMER_BH);
690 }
691 cli();
692 itimer_ticks++;
693 if (itimer_ticks > itimer_next)
694 need_resched = 1;
695 if (timer_head.next->expires < jiffies)
696 mark_bh(TIMER_BH);
697 if (tq_timer != &tq_last)
698 mark_bh(TQUEUE_BH);
699 sti();
700 }
701
702 asmlinkage int sys_alarm(long seconds)
703 {
704 struct itimerval it_new, it_old;
705
706 it_new.it_interval.tv_sec = it_new.it_interval.tv_usec = 0;
707 it_new.it_value.tv_sec = seconds;
708 it_new.it_value.tv_usec = 0;
709 _setitimer(ITIMER_REAL, &it_new, &it_old);
710 return(it_old.it_value.tv_sec + (it_old.it_value.tv_usec / 1000000));
711 }
712
713 asmlinkage int sys_getpid(void)
714 {
715 return current->pid;
716 }
717
718 asmlinkage int sys_getppid(void)
719 {
720 return current->p_opptr->pid;
721 }
722
723 asmlinkage int sys_getuid(void)
724 {
725 return current->uid;
726 }
727
728 asmlinkage int sys_geteuid(void)
729 {
730 return current->euid;
731 }
732
733 asmlinkage int sys_getgid(void)
734 {
735 return current->gid;
736 }
737
738 asmlinkage int sys_getegid(void)
739 {
740 return current->egid;
741 }
742
743 asmlinkage int sys_nice(long increment)
744 {
745 int newprio;
746
747 if (increment < 0 && !suser())
748 return -EPERM;
749 newprio = current->priority - increment;
750 if (newprio < 1)
751 newprio = 1;
752 if (newprio > 35)
753 newprio = 35;
754 current->priority = newprio;
755 return 0;
756 }
757
758 static void show_task(int nr,struct task_struct * p)
759 {
760 unsigned long free;
761 static char * stat_nam[] = { "R", "S", "D", "Z", "T", "W" };
762
763 printk("%-8s %3d ", p->comm, (p == current) ? -nr : nr);
764 if (((unsigned) p->state) < sizeof(stat_nam)/sizeof(char *))
765 printk(stat_nam[p->state]);
766 else
767 printk(" ");
768 if (p == current)
769 printk(" current ");
770 else
771 printk(" %08lX ", ((unsigned long *)p->tss.esp)[3]);
772 for (free = 1; free < 1024 ; free++) {
773 if (((unsigned long *)p->kernel_stack_page)[free])
774 break;
775 }
776 printk("%5lu %5d %6d ", free << 2, p->pid, p->p_pptr->pid);
777 if (p->p_cptr)
778 printk("%5d ", p->p_cptr->pid);
779 else
780 printk(" ");
781 if (p->p_ysptr)
782 printk("%7d", p->p_ysptr->pid);
783 else
784 printk(" ");
785 if (p->p_osptr)
786 printk(" %5d\n", p->p_osptr->pid);
787 else
788 printk("\n");
789 }
790
791 void show_state(void)
792 {
793 int i;
794
795 printk(" free sibling\n");
796 printk(" task PC stack pid father child younger older\n");
797 for (i=0 ; i<NR_TASKS ; i++)
798 if (task[i])
799 show_task(i,task[i]);
800 }
801
802 void sched_init(void)
803 {
804 int i;
805 struct desc_struct * p;
806
807 bh_base[TIMER_BH].routine = timer_bh;
808 bh_base[TQUEUE_BH].routine = tqueue_bh;
809 if (sizeof(struct sigaction) != 16)
810 panic("Struct sigaction MUST be 16 bytes");
811 set_tss_desc(gdt+FIRST_TSS_ENTRY,&init_task.tss);
812 set_ldt_desc(gdt+FIRST_LDT_ENTRY,&default_ldt,1);
813 set_system_gate(0x80,&system_call);
814 p = gdt+2+FIRST_TSS_ENTRY;
815 for(i=1 ; i<NR_TASKS ; i++) {
816 task[i] = NULL;
817 p->a=p->b=0;
818 p++;
819 p->a=p->b=0;
820 p++;
821 }
822
823 __asm__("pushfl ; andl $0xffffbfff,(%esp) ; popfl");
824 load_TR(0);
825 load_ldt(0);
826 outb_p(0x34,0x43);
827 outb_p(LATCH & 0xff , 0x40);
828 outb(LATCH >> 8 , 0x40);
829 if (request_irq(TIMER_IRQ,(void (*)(int)) do_timer)!=0)
830 panic("Could not allocate timer IRQ!");
831 }