This source file includes following definitions.
- math_state_restore
- math_emulate
- schedule
- sys_pause
- wake_up
- wake_up_interruptible
- __down
- __sleep_on
- interruptible_sleep_on
- sleep_on
- add_timer
- del_timer
- count_active_tasks
- calc_load
- second_overflow
- timer_bh
- tqueue_bh
- do_timer
- sys_alarm
- sys_getpid
- sys_getppid
- sys_getuid
- sys_geteuid
- sys_getgid
- sys_getegid
- sys_nice
- show_task
- show_state
- sched_init
1
2
3
4
5
6
7
8
9
10
11
12
13
14 #include <linux/config.h>
15 #include <linux/signal.h>
16 #include <linux/sched.h>
17 #include <linux/timer.h>
18 #include <linux/kernel.h>
19 #include <linux/kernel_stat.h>
20 #include <linux/fdreg.h>
21 #include <linux/errno.h>
22 #include <linux/time.h>
23 #include <linux/ptrace.h>
24 #include <linux/segment.h>
25 #include <linux/delay.h>
26 #include <linux/interrupt.h>
27 #include <linux/tqueue.h>
28
29 #include <asm/system.h>
30 #include <asm/io.h>
31 #include <asm/segment.h>
32
33 #define TIMER_IRQ 0
34
35 #include <linux/timex.h>
36
37
38
39
40 long tick = 1000000 / HZ;
41 volatile struct timeval xtime;
42 int tickadj = 500/HZ;
43
44 DECLARE_TASK_QUEUE(tq_timer);
45
46
47
48
49 int time_status = TIME_BAD;
50 long time_offset = 0;
51 long time_constant = 0;
52 long time_tolerance = MAXFREQ;
53 long time_precision = 1;
54 long time_maxerror = 0x70000000;
55 long time_esterror = 0x70000000;
56 long time_phase = 0;
57 long time_freq = 0;
58 long time_adj = 0;
59 long time_reftime = 0;
60
61 long time_adjust = 0;
62 long time_adjust_step = 0;
63
64 int need_resched = 0;
65
66
67
68
69 int hard_math = 0;
70 int x86 = 0;
71 int ignore_irq13 = 0;
72 int wp_works_ok = 0;
73
74
75
76
77 int EISA_bus = 0;
78
79 extern int _setitimer(int, struct itimerval *, struct itimerval *);
80 unsigned long * prof_buffer = NULL;
81 unsigned long prof_len = 0;
82
83 #define _S(nr) (1<<((nr)-1))
84
85 extern void mem_use(void);
86
87 extern int timer_interrupt(void);
88 asmlinkage int system_call(void);
89
90
91
92
93
94 static unsigned long ident_map[33] = {
95 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
96 13, 14, 15, 16, 17, 18, 19, 20, 21, 22,
97 23, 24, 25, 26, 27, 28, 29, 30, 31, 32
98 };
99
100 static unsigned long init_kernel_stack[1024] = { STACK_MAGIC, };
101 struct task_struct init_task = INIT_TASK;
102
103 unsigned long volatile jiffies=0;
104
105 struct task_struct *current = &init_task;
106 struct task_struct *last_task_used_math = NULL;
107
108 struct task_struct * task[NR_TASKS] = {&init_task, };
109
110 long user_stack [ PAGE_SIZE>>2 ] = { STACK_MAGIC, };
111
112 struct {
113 long * a;
114 short b;
115 } stack_start = { & user_stack [PAGE_SIZE>>2] , KERNEL_DS };
116
117 struct kernel_stat kstat = { 0 };
118
119
120
121
122
123
124
125
126 asmlinkage void math_state_restore(void)
127 {
128 __asm__ __volatile__("clts");
129 if (last_task_used_math == current)
130 return;
131 timer_table[COPRO_TIMER].expires = jiffies+50;
132 timer_active |= 1<<COPRO_TIMER;
133 if (last_task_used_math)
134 __asm__("fnsave %0":"=m" (last_task_used_math->tss.i387));
135 else
136 __asm__("fnclex");
137 last_task_used_math = current;
138 if (current->used_math) {
139 __asm__("frstor %0": :"m" (current->tss.i387));
140 } else {
141 __asm__("fninit");
142 current->used_math=1;
143 }
144 timer_active &= ~(1<<COPRO_TIMER);
145 }
146
147 #ifndef CONFIG_MATH_EMULATION
148
149 asmlinkage void math_emulate(long arg)
150 {
151 printk("math-emulation not enabled and no coprocessor found.\n");
152 printk("killing %s.\n",current->comm);
153 send_sig(SIGFPE,current,1);
154 schedule();
155 }
156
157 #endif
158
159 unsigned long itimer_ticks = 0;
160 unsigned long itimer_next = ~0;
161 static unsigned long lost_ticks = 0;
162
163
164
165
166
167
168
169
170
171
172
173
174
175 asmlinkage void schedule(void)
176 {
177 int c;
178 struct task_struct * p;
179 struct task_struct * next;
180 unsigned long ticks;
181
182
183
184 if (intr_count) {
185 printk("Aiee: scheduling in interrupt\n");
186 intr_count = 0;
187 }
188 cli();
189 ticks = itimer_ticks;
190 itimer_ticks = 0;
191 itimer_next = ~0;
192 sti();
193 need_resched = 0;
194 p = &init_task;
195 for (;;) {
196 if ((p = p->next_task) == &init_task)
197 goto confuse_gcc1;
198 if (ticks && p->it_real_value) {
199 if (p->it_real_value <= ticks) {
200 send_sig(SIGALRM, p, 1);
201 if (!p->it_real_incr) {
202 p->it_real_value = 0;
203 goto end_itimer;
204 }
205 do {
206 p->it_real_value += p->it_real_incr;
207 } while (p->it_real_value <= ticks);
208 }
209 p->it_real_value -= ticks;
210 if (p->it_real_value < itimer_next)
211 itimer_next = p->it_real_value;
212 }
213 end_itimer:
214 if (p->state != TASK_INTERRUPTIBLE)
215 continue;
216 if (p->signal & ~p->blocked) {
217 p->state = TASK_RUNNING;
218 continue;
219 }
220 if (p->timeout && p->timeout <= jiffies) {
221 p->timeout = 0;
222 p->state = TASK_RUNNING;
223 }
224 }
225 confuse_gcc1:
226
227
228 #if 0
229
230
231
232
233 if (TASK_UNINTERRUPTIBLE >= (unsigned) current->state &&
234 current->counter < current->priority*2) {
235 ++current->counter;
236 }
237 #endif
238 c = -1000;
239 next = p = &init_task;
240 for (;;) {
241 if ((p = p->next_task) == &init_task)
242 goto confuse_gcc2;
243 if (p->state == TASK_RUNNING && p->counter > c)
244 c = p->counter, next = p;
245 }
246 confuse_gcc2:
247 if (!c) {
248 for_each_task(p)
249 p->counter = (p->counter >> 1) + p->priority;
250 }
251 if (current == next)
252 return;
253 kstat.context_swtch++;
254 switch_to(next);
255
256 if(current->debugreg[7]){
257 loaddebug(0);
258 loaddebug(1);
259 loaddebug(2);
260 loaddebug(3);
261 loaddebug(6);
262 };
263 }
264
265 asmlinkage int sys_pause(void)
266 {
267 current->state = TASK_INTERRUPTIBLE;
268 schedule();
269 return -ERESTARTNOHAND;
270 }
271
272
273
274
275
276
277
278
279
280 void wake_up(struct wait_queue **q)
281 {
282 struct wait_queue *tmp;
283 struct task_struct * p;
284
285 if (!q || !(tmp = *q))
286 return;
287 do {
288 if ((p = tmp->task) != NULL) {
289 if ((p->state == TASK_UNINTERRUPTIBLE) ||
290 (p->state == TASK_INTERRUPTIBLE)) {
291 p->state = TASK_RUNNING;
292 if (p->counter > current->counter)
293 need_resched = 1;
294 }
295 }
296 if (!tmp->next) {
297 printk("wait_queue is bad (eip = %08lx)\n",((unsigned long *) q)[-1]);
298 printk(" q = %p\n",q);
299 printk(" *q = %p\n",*q);
300 printk(" tmp = %p\n",tmp);
301 break;
302 }
303 tmp = tmp->next;
304 } while (tmp != *q);
305 }
306
307 void wake_up_interruptible(struct wait_queue **q)
308 {
309 struct wait_queue *tmp;
310 struct task_struct * p;
311
312 if (!q || !(tmp = *q))
313 return;
314 do {
315 if ((p = tmp->task) != NULL) {
316 if (p->state == TASK_INTERRUPTIBLE) {
317 p->state = TASK_RUNNING;
318 if (p->counter > current->counter)
319 need_resched = 1;
320 }
321 }
322 if (!tmp->next) {
323 printk("wait_queue is bad (eip = %08lx)\n",((unsigned long *) q)[-1]);
324 printk(" q = %p\n",q);
325 printk(" *q = %p\n",*q);
326 printk(" tmp = %p\n",tmp);
327 break;
328 }
329 tmp = tmp->next;
330 } while (tmp != *q);
331 }
332
333 void __down(struct semaphore * sem)
334 {
335 struct wait_queue wait = { current, NULL };
336 add_wait_queue(&sem->wait, &wait);
337 current->state = TASK_UNINTERRUPTIBLE;
338 while (sem->count <= 0) {
339 schedule();
340 current->state = TASK_UNINTERRUPTIBLE;
341 }
342 current->state = TASK_RUNNING;
343 remove_wait_queue(&sem->wait, &wait);
344 }
345
346 static inline void __sleep_on(struct wait_queue **p, int state)
347 {
348 unsigned long flags;
349 struct wait_queue wait = { current, NULL };
350
351 if (!p)
352 return;
353 if (current == task[0])
354 panic("task[0] trying to sleep");
355 current->state = state;
356 add_wait_queue(p, &wait);
357 save_flags(flags);
358 sti();
359 schedule();
360 remove_wait_queue(p, &wait);
361 restore_flags(flags);
362 }
363
364 void interruptible_sleep_on(struct wait_queue **p)
365 {
366 __sleep_on(p,TASK_INTERRUPTIBLE);
367 }
368
369 void sleep_on(struct wait_queue **p)
370 {
371 __sleep_on(p,TASK_UNINTERRUPTIBLE);
372 }
373
374 static struct timer_list * next_timer = NULL;
375
376 void add_timer(struct timer_list * timer)
377 {
378 unsigned long flags;
379 struct timer_list ** p;
380
381 if (!timer)
382 return;
383 timer->next = NULL;
384 p = &next_timer;
385 save_flags(flags);
386 cli();
387 while (*p) {
388 if ((*p)->expires > timer->expires) {
389 (*p)->expires -= timer->expires;
390 timer->next = *p;
391 break;
392 }
393 timer->expires -= (*p)->expires;
394 p = &(*p)->next;
395 }
396 *p = timer;
397 restore_flags(flags);
398 }
399
400 int del_timer(struct timer_list * timer)
401 {
402 unsigned long flags;
403 unsigned long expires = 0;
404 struct timer_list **p;
405
406 p = &next_timer;
407 save_flags(flags);
408 cli();
409 while (*p) {
410 if (*p == timer) {
411 if ((*p = timer->next) != NULL)
412 (*p)->expires += timer->expires;
413 timer->expires += expires;
414 restore_flags(flags);
415 return 1;
416 }
417 expires += (*p)->expires;
418 p = &(*p)->next;
419 }
420 restore_flags(flags);
421 return 0;
422 }
423
424 unsigned long timer_active = 0;
425 struct timer_struct timer_table[32];
426
427
428
429
430
431
432
433 unsigned long avenrun[3] = { 0,0,0 };
434
435
436
437
438 static unsigned long count_active_tasks(void)
439 {
440 struct task_struct **p;
441 unsigned long nr = 0;
442
443 for(p = &LAST_TASK; p > &FIRST_TASK; --p)
444 if (*p && ((*p)->state == TASK_RUNNING ||
445 (*p)->state == TASK_UNINTERRUPTIBLE ||
446 (*p)->state == TASK_SWAPPING))
447 nr += FIXED_1;
448 return nr;
449 }
450
451 static inline void calc_load(void)
452 {
453 unsigned long active_tasks;
454 static int count = LOAD_FREQ;
455
456 if (count-- > 0)
457 return;
458 count = LOAD_FREQ;
459 active_tasks = count_active_tasks();
460 CALC_LOAD(avenrun[0], EXP_1, active_tasks);
461 CALC_LOAD(avenrun[1], EXP_5, active_tasks);
462 CALC_LOAD(avenrun[2], EXP_15, active_tasks);
463 }
464
465
466
467
468
469
470
471
472
473
474
475 static void second_overflow(void)
476 {
477 long ltemp;
478
479 static long last_rtc_update=0;
480 extern int set_rtc_mmss(unsigned long);
481
482
483 time_maxerror = (0x70000000-time_maxerror < time_tolerance) ?
484 0x70000000 : (time_maxerror + time_tolerance);
485
486
487 if (time_offset < 0) {
488 ltemp = (-(time_offset+1) >> (SHIFT_KG + time_constant)) + 1;
489 time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
490 time_offset += (time_adj * HZ) >> (SHIFT_SCALE - SHIFT_UPDATE);
491 time_adj = - time_adj;
492 } else if (time_offset > 0) {
493 ltemp = ((time_offset-1) >> (SHIFT_KG + time_constant)) + 1;
494 time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
495 time_offset -= (time_adj * HZ) >> (SHIFT_SCALE - SHIFT_UPDATE);
496 } else {
497 time_adj = 0;
498 }
499
500 time_adj += (time_freq >> (SHIFT_KF + SHIFT_HZ - SHIFT_SCALE))
501 + FINETUNE;
502
503
504 switch (time_status) {
505 case TIME_INS:
506
507 if (xtime.tv_sec % 86400 == 0) {
508 xtime.tv_sec--;
509 time_status = TIME_OOP;
510 printk("Clock: inserting leap second 23:59:60 GMT\n");
511 }
512 break;
513
514 case TIME_DEL:
515
516 if (xtime.tv_sec % 86400 == 86399) {
517 xtime.tv_sec++;
518 time_status = TIME_OK;
519 printk("Clock: deleting leap second 23:59:59 GMT\n");
520 }
521 break;
522
523 case TIME_OOP:
524 time_status = TIME_OK;
525 break;
526 }
527 if (xtime.tv_sec > last_rtc_update + 660)
528 if (set_rtc_mmss(xtime.tv_sec) == 0)
529 last_rtc_update = xtime.tv_sec;
530 }
531
532
533
534
535 static void timer_bh(void * unused)
536 {
537 unsigned long mask;
538 struct timer_struct *tp;
539
540 cli();
541 while (next_timer && next_timer->expires == 0) {
542 void (*fn)(unsigned long) = next_timer->function;
543 unsigned long data = next_timer->data;
544 next_timer = next_timer->next;
545 sti();
546 fn(data);
547 cli();
548 }
549 sti();
550
551 for (mask = 1, tp = timer_table+0 ; mask ; tp++,mask += mask) {
552 if (mask > timer_active)
553 break;
554 if (!(mask & timer_active))
555 continue;
556 if (tp->expires > jiffies)
557 continue;
558 timer_active &= ~mask;
559 tp->fn();
560 sti();
561 }
562 }
563
564 void tqueue_bh(void * unused)
565 {
566 run_task_queue(&tq_timer);
567 }
568
569
570
571
572
573
574
575 static void do_timer(struct pt_regs * regs)
576 {
577 unsigned long mask;
578 struct timer_struct *tp;
579
580 long ltemp;
581
582
583
584
585 time_phase += time_adj;
586 if (time_phase < -FINEUSEC) {
587 ltemp = -time_phase >> SHIFT_SCALE;
588 time_phase += ltemp << SHIFT_SCALE;
589 xtime.tv_usec += tick + time_adjust_step - ltemp;
590 }
591 else if (time_phase > FINEUSEC) {
592 ltemp = time_phase >> SHIFT_SCALE;
593 time_phase -= ltemp << SHIFT_SCALE;
594 xtime.tv_usec += tick + time_adjust_step + ltemp;
595 } else
596 xtime.tv_usec += tick + time_adjust_step;
597
598 if (time_adjust)
599 {
600
601
602
603
604
605
606
607
608
609 if (time_adjust > tickadj)
610 time_adjust_step = tickadj;
611 else if (time_adjust < -tickadj)
612 time_adjust_step = -tickadj;
613 else
614 time_adjust_step = time_adjust;
615
616
617 time_adjust -= time_adjust_step;
618 }
619 else
620 time_adjust_step = 0;
621
622 if (xtime.tv_usec >= 1000000) {
623 xtime.tv_usec -= 1000000;
624 xtime.tv_sec++;
625 second_overflow();
626 }
627
628 jiffies++;
629 calc_load();
630 if ((VM_MASK & regs->eflags) || (3 & regs->cs)) {
631 current->utime++;
632 if (current != task[0]) {
633 if (current->priority < 15)
634 kstat.cpu_nice++;
635 else
636 kstat.cpu_user++;
637 }
638
639 if (current->it_virt_value && !(--current->it_virt_value)) {
640 current->it_virt_value = current->it_virt_incr;
641 send_sig(SIGVTALRM,current,1);
642 }
643 } else {
644 current->stime++;
645 if(current != task[0])
646 kstat.cpu_system++;
647 #ifdef CONFIG_PROFILE
648 if (prof_buffer && current != task[0]) {
649 unsigned long eip = regs->eip;
650 eip >>= 2;
651 if (eip < prof_len)
652 prof_buffer[eip]++;
653 }
654 #endif
655 }
656 if (current != task[0] && 0 > --current->counter) {
657 current->counter = 0;
658 need_resched = 1;
659 }
660
661 if (current->it_prof_value && !(--current->it_prof_value)) {
662 current->it_prof_value = current->it_prof_incr;
663 send_sig(SIGPROF,current,1);
664 }
665 for (mask = 1, tp = timer_table+0 ; mask ; tp++,mask += mask) {
666 if (mask > timer_active)
667 break;
668 if (!(mask & timer_active))
669 continue;
670 if (tp->expires > jiffies)
671 continue;
672 mark_bh(TIMER_BH);
673 }
674 cli();
675 itimer_ticks++;
676 if (itimer_ticks > itimer_next)
677 need_resched = 1;
678 if (next_timer) {
679 if (next_timer->expires) {
680 next_timer->expires--;
681 if (!next_timer->expires)
682 mark_bh(TIMER_BH);
683 } else {
684 lost_ticks++;
685 mark_bh(TIMER_BH);
686 }
687 }
688 if (tq_timer != &tq_last)
689 mark_bh(TQUEUE_BH);
690 sti();
691 }
692
693 asmlinkage int sys_alarm(long seconds)
694 {
695 struct itimerval it_new, it_old;
696
697 it_new.it_interval.tv_sec = it_new.it_interval.tv_usec = 0;
698 it_new.it_value.tv_sec = seconds;
699 it_new.it_value.tv_usec = 0;
700 _setitimer(ITIMER_REAL, &it_new, &it_old);
701 return(it_old.it_value.tv_sec + (it_old.it_value.tv_usec / 1000000));
702 }
703
704 asmlinkage int sys_getpid(void)
705 {
706 return current->pid;
707 }
708
709 asmlinkage int sys_getppid(void)
710 {
711 return current->p_opptr->pid;
712 }
713
714 asmlinkage int sys_getuid(void)
715 {
716 return current->uid;
717 }
718
719 asmlinkage int sys_geteuid(void)
720 {
721 return current->euid;
722 }
723
724 asmlinkage int sys_getgid(void)
725 {
726 return current->gid;
727 }
728
729 asmlinkage int sys_getegid(void)
730 {
731 return current->egid;
732 }
733
734 asmlinkage int sys_nice(long increment)
735 {
736 int newprio;
737
738 if (increment < 0 && !suser())
739 return -EPERM;
740 newprio = current->priority - increment;
741 if (newprio < 1)
742 newprio = 1;
743 if (newprio > 35)
744 newprio = 35;
745 current->priority = newprio;
746 return 0;
747 }
748
749 static void show_task(int nr,struct task_struct * p)
750 {
751 unsigned long free;
752 static char * stat_nam[] = { "R", "S", "D", "Z", "T", "W" };
753
754 printk("%-8s %3d ", p->comm, (p == current) ? -nr : nr);
755 if (((unsigned) p->state) < sizeof(stat_nam)/sizeof(char *))
756 printk(stat_nam[p->state]);
757 else
758 printk(" ");
759 if (p == current)
760 printk(" current ");
761 else
762 printk(" %08lX ", ((unsigned long *)p->tss.esp)[3]);
763 for (free = 1; free < 1024 ; free++) {
764 if (((unsigned long *)p->kernel_stack_page)[free])
765 break;
766 }
767 printk("%5lu %5d %6d ", free << 2, p->pid, p->p_pptr->pid);
768 if (p->p_cptr)
769 printk("%5d ", p->p_cptr->pid);
770 else
771 printk(" ");
772 if (p->p_ysptr)
773 printk("%7d", p->p_ysptr->pid);
774 else
775 printk(" ");
776 if (p->p_osptr)
777 printk(" %5d\n", p->p_osptr->pid);
778 else
779 printk("\n");
780 }
781
782 void show_state(void)
783 {
784 int i;
785
786 printk(" free sibling\n");
787 printk(" task PC stack pid father child younger older\n");
788 for (i=0 ; i<NR_TASKS ; i++)
789 if (task[i])
790 show_task(i,task[i]);
791 }
792
793 void sched_init(void)
794 {
795 int i;
796 struct desc_struct * p;
797
798 bh_base[TIMER_BH].routine = timer_bh;
799 bh_base[TQUEUE_BH].routine = tqueue_bh;
800 if (sizeof(struct sigaction) != 16)
801 panic("Struct sigaction MUST be 16 bytes");
802 set_tss_desc(gdt+FIRST_TSS_ENTRY,&init_task.tss);
803 set_ldt_desc(gdt+FIRST_LDT_ENTRY,&default_ldt,1);
804 set_system_gate(0x80,&system_call);
805 p = gdt+2+FIRST_TSS_ENTRY;
806 for(i=1 ; i<NR_TASKS ; i++) {
807 task[i] = NULL;
808 p->a=p->b=0;
809 p++;
810 p->a=p->b=0;
811 p++;
812 }
813
814 __asm__("pushfl ; andl $0xffffbfff,(%esp) ; popfl");
815 load_TR(0);
816 load_ldt(0);
817 outb_p(0x34,0x43);
818 outb_p(LATCH & 0xff , 0x40);
819 outb(LATCH >> 8 , 0x40);
820 if (request_irq(TIMER_IRQ,(void (*)(int)) do_timer)!=0)
821 panic("Could not allocate timer IRQ!");
822 }