This source file includes following definitions.
- math_state_restore
- math_emulate
- schedule
- sys_pause
- wake_up
- wake_up_interruptible
- __down
- __sleep_on
- interruptible_sleep_on
- sleep_on
- add_timer
- del_timer
- count_active_tasks
- calc_load
- second_overflow
- timer_bh
- tqueue_bh
- do_timer
- sys_alarm
- sys_getpid
- sys_getppid
- sys_getuid
- sys_geteuid
- sys_getgid
- sys_getegid
- sys_nice
- show_task
- show_state
- sched_init
1
2
3
4
5
6
7
8
9
10
11
12
13
14 #include <linux/config.h>
15 #include <linux/signal.h>
16 #include <linux/sched.h>
17 #include <linux/timer.h>
18 #include <linux/kernel.h>
19 #include <linux/kernel_stat.h>
20 #include <linux/fdreg.h>
21 #include <linux/errno.h>
22 #include <linux/time.h>
23 #include <linux/ptrace.h>
24 #include <linux/segment.h>
25 #include <linux/delay.h>
26 #include <linux/interrupt.h>
27 #include <linux/tqueue.h>
28
29 #include <asm/system.h>
30 #include <asm/io.h>
31 #include <asm/segment.h>
32
33 #define TIMER_IRQ 0
34
35 #include <linux/timex.h>
36
37
38
39
40 long tick = 1000000 / HZ;
41 volatile struct timeval xtime;
42 int tickadj = 500/HZ;
43
44 DECLARE_TASK_QUEUE(tq_timer);
45
46
47
48
49 int time_status = TIME_BAD;
50 long time_offset = 0;
51 long time_constant = 0;
52 long time_tolerance = MAXFREQ;
53 long time_precision = 1;
54 long time_maxerror = 0x70000000;
55 long time_esterror = 0x70000000;
56 long time_phase = 0;
57 long time_freq = 0;
58 long time_adj = 0;
59 long time_reftime = 0;
60
61 long time_adjust = 0;
62 long time_adjust_step = 0;
63
64 int need_resched = 0;
65 unsigned long event = 0;
66
67
68
69
70 int hard_math = 0;
71 int x86 = 0;
72 int ignore_irq13 = 0;
73 int wp_works_ok = 0;
74
75
76
77
78 int EISA_bus = 0;
79
80 extern int _setitimer(int, struct itimerval *, struct itimerval *);
81 unsigned long * prof_buffer = NULL;
82 unsigned long prof_len = 0;
83
84 #define _S(nr) (1<<((nr)-1))
85
86 extern void mem_use(void);
87
88 extern int timer_interrupt(void);
89 asmlinkage int system_call(void);
90
91 static unsigned long init_kernel_stack[1024] = { STACK_MAGIC, };
92 struct task_struct init_task = INIT_TASK;
93
94 unsigned long volatile jiffies=0;
95
96 struct task_struct *current = &init_task;
97 struct task_struct *last_task_used_math = NULL;
98
99 struct task_struct * task[NR_TASKS] = {&init_task, };
100
101 long user_stack [ PAGE_SIZE>>2 ] = { STACK_MAGIC, };
102
103 struct {
104 long * a;
105 short b;
106 } stack_start = { & user_stack [PAGE_SIZE>>2] , KERNEL_DS };
107
108 struct kernel_stat kstat = { 0 };
109
110
111
112
113
114
115
116
117 asmlinkage void math_state_restore(void)
118 {
119 __asm__ __volatile__("clts");
120 if (last_task_used_math == current)
121 return;
122 timer_table[COPRO_TIMER].expires = jiffies+50;
123 timer_active |= 1<<COPRO_TIMER;
124 if (last_task_used_math)
125 __asm__("fnsave %0":"=m" (last_task_used_math->tss.i387));
126 else
127 __asm__("fnclex");
128 last_task_used_math = current;
129 if (current->used_math) {
130 __asm__("frstor %0": :"m" (current->tss.i387));
131 } else {
132 __asm__("fninit");
133 current->used_math=1;
134 }
135 timer_active &= ~(1<<COPRO_TIMER);
136 }
137
138 #ifndef CONFIG_MATH_EMULATION
139
140 asmlinkage void math_emulate(long arg)
141 {
142 printk("math-emulation not enabled and no coprocessor found.\n");
143 printk("killing %s.\n",current->comm);
144 send_sig(SIGFPE,current,1);
145 schedule();
146 }
147
148 #endif
149
150 unsigned long itimer_ticks = 0;
151 unsigned long itimer_next = ~0;
152
153
154
155
156
157
158
159
160
161
162
163
164
165 asmlinkage void schedule(void)
166 {
167 int c;
168 struct task_struct * p;
169 struct task_struct * next;
170 unsigned long ticks;
171
172
173
174 if (intr_count) {
175 printk("Aiee: scheduling in interrupt\n");
176 intr_count = 0;
177 }
178 cli();
179 ticks = itimer_ticks;
180 itimer_ticks = 0;
181 itimer_next = ~0;
182 sti();
183 need_resched = 0;
184 p = &init_task;
185 for (;;) {
186 if ((p = p->next_task) == &init_task)
187 goto confuse_gcc1;
188 if (ticks && p->it_real_value) {
189 if (p->it_real_value <= ticks) {
190 send_sig(SIGALRM, p, 1);
191 if (!p->it_real_incr) {
192 p->it_real_value = 0;
193 goto end_itimer;
194 }
195 do {
196 p->it_real_value += p->it_real_incr;
197 } while (p->it_real_value <= ticks);
198 }
199 p->it_real_value -= ticks;
200 if (p->it_real_value < itimer_next)
201 itimer_next = p->it_real_value;
202 }
203 end_itimer:
204 if (p->state != TASK_INTERRUPTIBLE)
205 continue;
206 if (p->signal & ~p->blocked) {
207 p->state = TASK_RUNNING;
208 continue;
209 }
210 if (p->timeout && p->timeout <= jiffies) {
211 p->timeout = 0;
212 p->state = TASK_RUNNING;
213 }
214 }
215 confuse_gcc1:
216
217
218 #if 0
219
220
221
222
223 if (TASK_UNINTERRUPTIBLE >= (unsigned) current->state &&
224 current->counter < current->priority*2) {
225 ++current->counter;
226 }
227 #endif
228 c = -1000;
229 next = p = &init_task;
230 for (;;) {
231 if ((p = p->next_task) == &init_task)
232 goto confuse_gcc2;
233 if (p->state == TASK_RUNNING && p->counter > c)
234 c = p->counter, next = p;
235 }
236 confuse_gcc2:
237 if (!c) {
238 for_each_task(p)
239 p->counter = (p->counter >> 1) + p->priority;
240 }
241 if (current == next)
242 return;
243 kstat.context_swtch++;
244 switch_to(next);
245
246 if(current->debugreg[7]){
247 loaddebug(0);
248 loaddebug(1);
249 loaddebug(2);
250 loaddebug(3);
251 loaddebug(6);
252 };
253 }
254
255 asmlinkage int sys_pause(void)
256 {
257 current->state = TASK_INTERRUPTIBLE;
258 schedule();
259 return -ERESTARTNOHAND;
260 }
261
262
263
264
265
266
267
268
269
270 void wake_up(struct wait_queue **q)
271 {
272 struct wait_queue *tmp;
273 struct task_struct * p;
274
275 if (!q || !(tmp = *q))
276 return;
277 do {
278 if ((p = tmp->task) != NULL) {
279 if ((p->state == TASK_UNINTERRUPTIBLE) ||
280 (p->state == TASK_INTERRUPTIBLE)) {
281 p->state = TASK_RUNNING;
282 if (p->counter > current->counter + 3)
283 need_resched = 1;
284 }
285 }
286 if (!tmp->next) {
287 printk("wait_queue is bad (eip = %08lx)\n",((unsigned long *) q)[-1]);
288 printk(" q = %p\n",q);
289 printk(" *q = %p\n",*q);
290 printk(" tmp = %p\n",tmp);
291 break;
292 }
293 tmp = tmp->next;
294 } while (tmp != *q);
295 }
296
297 void wake_up_interruptible(struct wait_queue **q)
298 {
299 struct wait_queue *tmp;
300 struct task_struct * p;
301
302 if (!q || !(tmp = *q))
303 return;
304 do {
305 if ((p = tmp->task) != NULL) {
306 if (p->state == TASK_INTERRUPTIBLE) {
307 p->state = TASK_RUNNING;
308 if (p->counter > current->counter + 3)
309 need_resched = 1;
310 }
311 }
312 if (!tmp->next) {
313 printk("wait_queue is bad (eip = %08lx)\n",((unsigned long *) q)[-1]);
314 printk(" q = %p\n",q);
315 printk(" *q = %p\n",*q);
316 printk(" tmp = %p\n",tmp);
317 break;
318 }
319 tmp = tmp->next;
320 } while (tmp != *q);
321 }
322
323 void __down(struct semaphore * sem)
324 {
325 struct wait_queue wait = { current, NULL };
326 add_wait_queue(&sem->wait, &wait);
327 current->state = TASK_UNINTERRUPTIBLE;
328 while (sem->count <= 0) {
329 schedule();
330 current->state = TASK_UNINTERRUPTIBLE;
331 }
332 current->state = TASK_RUNNING;
333 remove_wait_queue(&sem->wait, &wait);
334 }
335
336 static inline void __sleep_on(struct wait_queue **p, int state)
337 {
338 unsigned long flags;
339 struct wait_queue wait = { current, NULL };
340
341 if (!p)
342 return;
343 if (current == task[0])
344 panic("task[0] trying to sleep");
345 current->state = state;
346 add_wait_queue(p, &wait);
347 save_flags(flags);
348 sti();
349 schedule();
350 remove_wait_queue(p, &wait);
351 restore_flags(flags);
352 }
353
354 void interruptible_sleep_on(struct wait_queue **p)
355 {
356 __sleep_on(p,TASK_INTERRUPTIBLE);
357 }
358
359 void sleep_on(struct wait_queue **p)
360 {
361 __sleep_on(p,TASK_UNINTERRUPTIBLE);
362 }
363
364
365
366
367
368 static struct timer_list timer_head = { &timer_head, &timer_head, ~0, 0, NULL };
369 #define SLOW_BUT_DEBUGGING_TIMERS 1
370
371 void add_timer(struct timer_list * timer)
372 {
373 unsigned long flags;
374 struct timer_list *p;
375
376 #if SLOW_BUT_DEBUGGING_TIMERS
377 if (timer->next || timer->prev) {
378 printk("add_timer() called with non-zero list from %08lx\n",
379 ((unsigned long *) &timer)[-1]);
380 return;
381 }
382 #endif
383 p = &timer_head;
384 timer->expires += jiffies;
385 save_flags(flags);
386 cli();
387 do {
388 p = p->next;
389 } while (timer->expires > p->expires);
390 timer->next = p;
391 timer->prev = p->prev;
392 p->prev = timer;
393 timer->prev->next = timer;
394 restore_flags(flags);
395 }
396
397 int del_timer(struct timer_list * timer)
398 {
399 unsigned long flags;
400 #if SLOW_BUT_DEBUGGING_TIMERS
401 struct timer_list * p;
402
403 p = &timer_head;
404 save_flags(flags);
405 cli();
406 while ((p = p->next) != &timer_head) {
407 if (p == timer) {
408 timer->next->prev = timer->prev;
409 timer->prev->next = timer->next;
410 timer->next = timer->prev = NULL;
411 restore_flags(flags);
412 timer->expires -= jiffies;
413 return 1;
414 }
415 }
416 if (timer->next || timer->prev)
417 printk("del_timer() called from %08lx with timer not initialized\n",
418 ((unsigned long *) &timer)[-1]);
419 restore_flags(flags);
420 return 0;
421 #else
422 save_flags(flags);
423 cli();
424 if (timer->next) {
425 timer->next->prev = timer->prev;
426 timer->prev->next = timer->next;
427 timer->next = timer->prev = NULL;
428 restore_flags(flags);
429 timer->expires -= jiffies;
430 return 1;
431 }
432 restore_flags(flags);
433 return 0;
434 #endif
435 }
436
437 unsigned long timer_active = 0;
438 struct timer_struct timer_table[32];
439
440
441
442
443
444
445
446 unsigned long avenrun[3] = { 0,0,0 };
447
448
449
450
451 static unsigned long count_active_tasks(void)
452 {
453 struct task_struct **p;
454 unsigned long nr = 0;
455
456 for(p = &LAST_TASK; p > &FIRST_TASK; --p)
457 if (*p && ((*p)->state == TASK_RUNNING ||
458 (*p)->state == TASK_UNINTERRUPTIBLE ||
459 (*p)->state == TASK_SWAPPING))
460 nr += FIXED_1;
461 return nr;
462 }
463
464 static inline void calc_load(void)
465 {
466 unsigned long active_tasks;
467 static int count = LOAD_FREQ;
468
469 if (count-- > 0)
470 return;
471 count = LOAD_FREQ;
472 active_tasks = count_active_tasks();
473 CALC_LOAD(avenrun[0], EXP_1, active_tasks);
474 CALC_LOAD(avenrun[1], EXP_5, active_tasks);
475 CALC_LOAD(avenrun[2], EXP_15, active_tasks);
476 }
477
478
479
480
481
482
483
484
485
486
487
488 static void second_overflow(void)
489 {
490 long ltemp;
491
492 static long last_rtc_update=0;
493 extern int set_rtc_mmss(unsigned long);
494
495
496 time_maxerror = (0x70000000-time_maxerror < time_tolerance) ?
497 0x70000000 : (time_maxerror + time_tolerance);
498
499
500 if (time_offset < 0) {
501 ltemp = (-(time_offset+1) >> (SHIFT_KG + time_constant)) + 1;
502 time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
503 time_offset += (time_adj * HZ) >> (SHIFT_SCALE - SHIFT_UPDATE);
504 time_adj = - time_adj;
505 } else if (time_offset > 0) {
506 ltemp = ((time_offset-1) >> (SHIFT_KG + time_constant)) + 1;
507 time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
508 time_offset -= (time_adj * HZ) >> (SHIFT_SCALE - SHIFT_UPDATE);
509 } else {
510 time_adj = 0;
511 }
512
513 time_adj += (time_freq >> (SHIFT_KF + SHIFT_HZ - SHIFT_SCALE))
514 + FINETUNE;
515
516
517 switch (time_status) {
518 case TIME_INS:
519
520 if (xtime.tv_sec % 86400 == 0) {
521 xtime.tv_sec--;
522 time_status = TIME_OOP;
523 printk("Clock: inserting leap second 23:59:60 GMT\n");
524 }
525 break;
526
527 case TIME_DEL:
528
529 if (xtime.tv_sec % 86400 == 86399) {
530 xtime.tv_sec++;
531 time_status = TIME_OK;
532 printk("Clock: deleting leap second 23:59:59 GMT\n");
533 }
534 break;
535
536 case TIME_OOP:
537 time_status = TIME_OK;
538 break;
539 }
540 if (xtime.tv_sec > last_rtc_update + 660)
541 if (set_rtc_mmss(xtime.tv_sec) == 0)
542 last_rtc_update = xtime.tv_sec;
543 else
544 last_rtc_update = xtime.tv_sec - 600;
545 }
546
547
548
549
550 static void timer_bh(void * unused)
551 {
552 unsigned long mask;
553 struct timer_struct *tp;
554 struct timer_list * timer;
555
556 cli();
557 while ((timer = timer_head.next) != &timer_head && timer->expires < jiffies) {
558 void (*fn)(unsigned long) = timer->function;
559 unsigned long data = timer->data;
560 timer->next->prev = timer->prev;
561 timer->prev->next = timer->next;
562 timer->next = timer->prev = NULL;
563 sti();
564 fn(data);
565 cli();
566 }
567 sti();
568
569 for (mask = 1, tp = timer_table+0 ; mask ; tp++,mask += mask) {
570 if (mask > timer_active)
571 break;
572 if (!(mask & timer_active))
573 continue;
574 if (tp->expires > jiffies)
575 continue;
576 timer_active &= ~mask;
577 tp->fn();
578 sti();
579 }
580 }
581
582 void tqueue_bh(void * unused)
583 {
584 run_task_queue(&tq_timer);
585 }
586
587
588
589
590
591
592
593 static void do_timer(struct pt_regs * regs)
594 {
595 unsigned long mask;
596 struct timer_struct *tp;
597
598 long ltemp;
599
600
601
602
603 time_phase += time_adj;
604 if (time_phase < -FINEUSEC) {
605 ltemp = -time_phase >> SHIFT_SCALE;
606 time_phase += ltemp << SHIFT_SCALE;
607 xtime.tv_usec += tick + time_adjust_step - ltemp;
608 }
609 else if (time_phase > FINEUSEC) {
610 ltemp = time_phase >> SHIFT_SCALE;
611 time_phase -= ltemp << SHIFT_SCALE;
612 xtime.tv_usec += tick + time_adjust_step + ltemp;
613 } else
614 xtime.tv_usec += tick + time_adjust_step;
615
616 if (time_adjust)
617 {
618
619
620
621
622
623
624
625
626
627 if (time_adjust > tickadj)
628 time_adjust_step = tickadj;
629 else if (time_adjust < -tickadj)
630 time_adjust_step = -tickadj;
631 else
632 time_adjust_step = time_adjust;
633
634
635 time_adjust -= time_adjust_step;
636 }
637 else
638 time_adjust_step = 0;
639
640 if (xtime.tv_usec >= 1000000) {
641 xtime.tv_usec -= 1000000;
642 xtime.tv_sec++;
643 second_overflow();
644 }
645
646 jiffies++;
647 calc_load();
648 if ((VM_MASK & regs->eflags) || (3 & regs->cs)) {
649 current->utime++;
650 if (current != task[0]) {
651 if (current->priority < 15)
652 kstat.cpu_nice++;
653 else
654 kstat.cpu_user++;
655 }
656
657 if (current->it_virt_value && !(--current->it_virt_value)) {
658 current->it_virt_value = current->it_virt_incr;
659 send_sig(SIGVTALRM,current,1);
660 }
661 } else {
662 current->stime++;
663 if(current != task[0])
664 kstat.cpu_system++;
665 #ifdef CONFIG_PROFILE
666 if (prof_buffer && current != task[0]) {
667 unsigned long eip = regs->eip;
668 eip >>= 2;
669 if (eip < prof_len)
670 prof_buffer[eip]++;
671 }
672 #endif
673 }
674 if (current != task[0] && 0 > --current->counter) {
675 current->counter = 0;
676 need_resched = 1;
677 }
678
679 if (current->it_prof_value && !(--current->it_prof_value)) {
680 current->it_prof_value = current->it_prof_incr;
681 send_sig(SIGPROF,current,1);
682 }
683 for (mask = 1, tp = timer_table+0 ; mask ; tp++,mask += mask) {
684 if (mask > timer_active)
685 break;
686 if (!(mask & timer_active))
687 continue;
688 if (tp->expires > jiffies)
689 continue;
690 mark_bh(TIMER_BH);
691 }
692 cli();
693 itimer_ticks++;
694 if (itimer_ticks > itimer_next)
695 need_resched = 1;
696 if (timer_head.next->expires < jiffies)
697 mark_bh(TIMER_BH);
698 if (tq_timer != &tq_last)
699 mark_bh(TQUEUE_BH);
700 sti();
701 }
702
703 asmlinkage int sys_alarm(long seconds)
704 {
705 struct itimerval it_new, it_old;
706
707 it_new.it_interval.tv_sec = it_new.it_interval.tv_usec = 0;
708 it_new.it_value.tv_sec = seconds;
709 it_new.it_value.tv_usec = 0;
710 _setitimer(ITIMER_REAL, &it_new, &it_old);
711 return(it_old.it_value.tv_sec + (it_old.it_value.tv_usec / 1000000));
712 }
713
714 asmlinkage int sys_getpid(void)
715 {
716 return current->pid;
717 }
718
719 asmlinkage int sys_getppid(void)
720 {
721 return current->p_opptr->pid;
722 }
723
724 asmlinkage int sys_getuid(void)
725 {
726 return current->uid;
727 }
728
729 asmlinkage int sys_geteuid(void)
730 {
731 return current->euid;
732 }
733
734 asmlinkage int sys_getgid(void)
735 {
736 return current->gid;
737 }
738
739 asmlinkage int sys_getegid(void)
740 {
741 return current->egid;
742 }
743
744 asmlinkage int sys_nice(long increment)
745 {
746 int newprio;
747
748 if (increment < 0 && !suser())
749 return -EPERM;
750 newprio = current->priority - increment;
751 if (newprio < 1)
752 newprio = 1;
753 if (newprio > 35)
754 newprio = 35;
755 current->priority = newprio;
756 return 0;
757 }
758
759 static void show_task(int nr,struct task_struct * p)
760 {
761 unsigned long free;
762 static char * stat_nam[] = { "R", "S", "D", "Z", "T", "W" };
763
764 printk("%-8s %3d ", p->comm, (p == current) ? -nr : nr);
765 if (((unsigned) p->state) < sizeof(stat_nam)/sizeof(char *))
766 printk(stat_nam[p->state]);
767 else
768 printk(" ");
769 if (p == current)
770 printk(" current ");
771 else
772 printk(" %08lX ", ((unsigned long *)p->tss.esp)[3]);
773 for (free = 1; free < 1024 ; free++) {
774 if (((unsigned long *)p->kernel_stack_page)[free])
775 break;
776 }
777 printk("%5lu %5d %6d ", free << 2, p->pid, p->p_pptr->pid);
778 if (p->p_cptr)
779 printk("%5d ", p->p_cptr->pid);
780 else
781 printk(" ");
782 if (p->p_ysptr)
783 printk("%7d", p->p_ysptr->pid);
784 else
785 printk(" ");
786 if (p->p_osptr)
787 printk(" %5d\n", p->p_osptr->pid);
788 else
789 printk("\n");
790 }
791
792 void show_state(void)
793 {
794 int i;
795
796 printk(" free sibling\n");
797 printk(" task PC stack pid father child younger older\n");
798 for (i=0 ; i<NR_TASKS ; i++)
799 if (task[i])
800 show_task(i,task[i]);
801 }
802
803 void sched_init(void)
804 {
805 int i;
806 struct desc_struct * p;
807
808 bh_base[TIMER_BH].routine = timer_bh;
809 bh_base[TQUEUE_BH].routine = tqueue_bh;
810 if (sizeof(struct sigaction) != 16)
811 panic("Struct sigaction MUST be 16 bytes");
812 set_tss_desc(gdt+FIRST_TSS_ENTRY,&init_task.tss);
813 set_ldt_desc(gdt+FIRST_LDT_ENTRY,&default_ldt,1);
814 set_system_gate(0x80,&system_call);
815 p = gdt+2+FIRST_TSS_ENTRY;
816 for(i=1 ; i<NR_TASKS ; i++) {
817 task[i] = NULL;
818 p->a=p->b=0;
819 p++;
820 p->a=p->b=0;
821 p++;
822 }
823
824 __asm__("pushfl ; andl $0xffffbfff,(%esp) ; popfl");
825 load_TR(0);
826 load_ldt(0);
827 outb_p(0x34,0x43);
828 outb_p(LATCH & 0xff , 0x40);
829 outb(LATCH >> 8 , 0x40);
830 if (request_irq(TIMER_IRQ,(void (*)(int)) do_timer)!=0)
831 panic("Could not allocate timer IRQ!");
832 }