This source file includes following definitions.
- math_state_restore
- math_emulate
- schedule
- sys_pause
- wake_up
- wake_up_interruptible
- __down
- __sleep_on
- interruptible_sleep_on
- sleep_on
- add_timer
- del_timer
- count_active_tasks
- calc_load
- second_overflow
- timer_bh
- do_timer
- sys_alarm
- sys_getpid
- sys_getppid
- sys_getuid
- sys_geteuid
- sys_getgid
- sys_getegid
- sys_nice
- show_task
- show_state
- sched_init
1
2
3
4
5
6
7
8
9
10
11
12
13
14 #include <linux/config.h>
15 #include <linux/signal.h>
16 #include <linux/sched.h>
17 #include <linux/timer.h>
18 #include <linux/kernel.h>
19 #include <linux/kernel_stat.h>
20 #include <linux/fdreg.h>
21 #include <linux/errno.h>
22 #include <linux/time.h>
23 #include <linux/ptrace.h>
24 #include <linux/segment.h>
25 #include <linux/delay.h>
26 #include <linux/interrupt.h>
27
28 #include <asm/system.h>
29 #include <asm/io.h>
30 #include <asm/segment.h>
31
32 #define TIMER_IRQ 0
33
34 #include <linux/timex.h>
35
36
37
38
39 long tick = 1000000 / HZ;
40 volatile struct timeval xtime;
41 int tickadj = 500/HZ;
42
43
44
45
46 int time_status = TIME_BAD;
47 long time_offset = 0;
48 long time_constant = 0;
49 long time_tolerance = MAXFREQ;
50 long time_precision = 1;
51 long time_maxerror = 0x70000000;
52 long time_esterror = 0x70000000;
53 long time_phase = 0;
54 long time_freq = 0;
55 long time_adj = 0;
56 long time_reftime = 0;
57
58 long time_adjust = 0;
59 long time_adjust_step = 0;
60
61 int need_resched = 0;
62
63
64
65
66 int hard_math = 0;
67 int x86 = 0;
68 int ignore_irq13 = 0;
69 int wp_works_ok = 0;
70
71
72
73
74 int EISA_bus = 0;
75
76 extern int _setitimer(int, struct itimerval *, struct itimerval *);
77 unsigned long * prof_buffer = NULL;
78 unsigned long prof_len = 0;
79
80 #define _S(nr) (1<<((nr)-1))
81
82 extern void mem_use(void);
83
84 extern int timer_interrupt(void);
85 asmlinkage int system_call(void);
86
87 static unsigned long init_kernel_stack[1024] = { STACK_MAGIC, };
88 struct task_struct init_task = INIT_TASK;
89
90 unsigned long volatile jiffies=0;
91
92 struct task_struct *current = &init_task;
93 struct task_struct *last_task_used_math = NULL;
94
95 struct task_struct * task[NR_TASKS] = {&init_task, };
96
97 long user_stack [ PAGE_SIZE>>2 ] = { STACK_MAGIC, };
98
99 struct {
100 long * a;
101 short b;
102 } stack_start = { & user_stack [PAGE_SIZE>>2] , KERNEL_DS };
103
104 struct kernel_stat kstat = { 0 };
105
106
107
108
109
110
111
112
113 asmlinkage void math_state_restore(void)
114 {
115 __asm__ __volatile__("clts");
116 if (last_task_used_math == current)
117 return;
118 timer_table[COPRO_TIMER].expires = jiffies+50;
119 timer_active |= 1<<COPRO_TIMER;
120 if (last_task_used_math)
121 __asm__("fnsave %0":"=m" (last_task_used_math->tss.i387));
122 else
123 __asm__("fnclex");
124 last_task_used_math = current;
125 if (current->used_math) {
126 __asm__("frstor %0": :"m" (current->tss.i387));
127 } else {
128 __asm__("fninit");
129 current->used_math=1;
130 }
131 timer_active &= ~(1<<COPRO_TIMER);
132 }
133
134 #ifndef CONFIG_MATH_EMULATION
135
136 asmlinkage void math_emulate(long arg)
137 {
138 printk("math-emulation not enabled and no coprocessor found.\n");
139 printk("killing %s.\n",current->comm);
140 send_sig(SIGFPE,current,1);
141 schedule();
142 }
143
144 #endif
145
146 unsigned long itimer_ticks = 0;
147 unsigned long itimer_next = ~0;
148 static unsigned long lost_ticks = 0;
149
150
151
152
153
154
155
156
157
158
159
160
161
162 asmlinkage void schedule(void)
163 {
164 int c;
165 struct task_struct * p;
166 struct task_struct * next;
167 unsigned long ticks;
168
169
170
171 if (intr_count) {
172 printk("Aiee: scheduling in interrupt\n");
173 intr_count = 0;
174 }
175 cli();
176 ticks = itimer_ticks;
177 itimer_ticks = 0;
178 itimer_next = ~0;
179 sti();
180 need_resched = 0;
181 p = &init_task;
182 for (;;) {
183 if ((p = p->next_task) == &init_task)
184 goto confuse_gcc1;
185 if (ticks && p->it_real_value) {
186 if (p->it_real_value <= ticks) {
187 send_sig(SIGALRM, p, 1);
188 if (!p->it_real_incr) {
189 p->it_real_value = 0;
190 goto end_itimer;
191 }
192 do {
193 p->it_real_value += p->it_real_incr;
194 } while (p->it_real_value <= ticks);
195 }
196 p->it_real_value -= ticks;
197 if (p->it_real_value < itimer_next)
198 itimer_next = p->it_real_value;
199 }
200 end_itimer:
201 if (p->state != TASK_INTERRUPTIBLE)
202 continue;
203 if (p->signal & ~p->blocked) {
204 p->state = TASK_RUNNING;
205 continue;
206 }
207 if (p->timeout && p->timeout <= jiffies) {
208 p->timeout = 0;
209 p->state = TASK_RUNNING;
210 }
211 }
212 confuse_gcc1:
213
214
215 #if 0
216
217
218
219
220 if (TASK_UNINTERRUPTIBLE >= (unsigned) current->state &&
221 current->counter < current->priority*2) {
222 ++current->counter;
223 }
224 #endif
225 c = -1000;
226 next = p = &init_task;
227 for (;;) {
228 if ((p = p->next_task) == &init_task)
229 goto confuse_gcc2;
230 if (p->state == TASK_RUNNING && p->counter > c)
231 c = p->counter, next = p;
232 }
233 confuse_gcc2:
234 if (!c) {
235 for_each_task(p)
236 p->counter = (p->counter >> 1) + p->priority;
237 }
238 if (current == next)
239 return;
240 kstat.context_swtch++;
241 switch_to(next);
242
243 if(current->debugreg[7]){
244 loaddebug(0);
245 loaddebug(1);
246 loaddebug(2);
247 loaddebug(3);
248 loaddebug(6);
249 };
250 }
251
252 asmlinkage int sys_pause(void)
253 {
254 current->state = TASK_INTERRUPTIBLE;
255 schedule();
256 return -ERESTARTNOHAND;
257 }
258
259
260
261
262
263
264
265
266
267 void wake_up(struct wait_queue **q)
268 {
269 struct wait_queue *tmp;
270 struct task_struct * p;
271
272 if (!q || !(tmp = *q))
273 return;
274 do {
275 if ((p = tmp->task) != NULL) {
276 if ((p->state == TASK_UNINTERRUPTIBLE) ||
277 (p->state == TASK_INTERRUPTIBLE)) {
278 p->state = TASK_RUNNING;
279 if (p->counter > current->counter)
280 need_resched = 1;
281 }
282 }
283 if (!tmp->next) {
284 printk("wait_queue is bad (eip = %08lx)\n",((unsigned long *) q)[-1]);
285 printk(" q = %p\n",q);
286 printk(" *q = %p\n",*q);
287 printk(" tmp = %p\n",tmp);
288 break;
289 }
290 tmp = tmp->next;
291 } while (tmp != *q);
292 }
293
294 void wake_up_interruptible(struct wait_queue **q)
295 {
296 struct wait_queue *tmp;
297 struct task_struct * p;
298
299 if (!q || !(tmp = *q))
300 return;
301 do {
302 if ((p = tmp->task) != NULL) {
303 if (p->state == TASK_INTERRUPTIBLE) {
304 p->state = TASK_RUNNING;
305 if (p->counter > current->counter)
306 need_resched = 1;
307 }
308 }
309 if (!tmp->next) {
310 printk("wait_queue is bad (eip = %08lx)\n",((unsigned long *) q)[-1]);
311 printk(" q = %p\n",q);
312 printk(" *q = %p\n",*q);
313 printk(" tmp = %p\n",tmp);
314 break;
315 }
316 tmp = tmp->next;
317 } while (tmp != *q);
318 }
319
320 void __down(struct semaphore * sem)
321 {
322 struct wait_queue wait = { current, NULL };
323 add_wait_queue(&sem->wait, &wait);
324 current->state = TASK_UNINTERRUPTIBLE;
325 while (sem->count <= 0) {
326 schedule();
327 current->state = TASK_UNINTERRUPTIBLE;
328 }
329 current->state = TASK_RUNNING;
330 remove_wait_queue(&sem->wait, &wait);
331 }
332
333 static inline void __sleep_on(struct wait_queue **p, int state)
334 {
335 unsigned long flags;
336 struct wait_queue wait = { current, NULL };
337
338 if (!p)
339 return;
340 if (current == task[0])
341 panic("task[0] trying to sleep");
342 current->state = state;
343 add_wait_queue(p, &wait);
344 save_flags(flags);
345 sti();
346 schedule();
347 remove_wait_queue(p, &wait);
348 restore_flags(flags);
349 }
350
351 void interruptible_sleep_on(struct wait_queue **p)
352 {
353 __sleep_on(p,TASK_INTERRUPTIBLE);
354 }
355
356 void sleep_on(struct wait_queue **p)
357 {
358 __sleep_on(p,TASK_UNINTERRUPTIBLE);
359 }
360
361 static struct timer_list * next_timer = NULL;
362
363 void add_timer(struct timer_list * timer)
364 {
365 unsigned long flags;
366 struct timer_list ** p;
367
368 if (!timer)
369 return;
370 timer->next = NULL;
371 p = &next_timer;
372 save_flags(flags);
373 cli();
374 while (*p) {
375 if ((*p)->expires > timer->expires) {
376 (*p)->expires -= timer->expires;
377 timer->next = *p;
378 break;
379 }
380 timer->expires -= (*p)->expires;
381 p = &(*p)->next;
382 }
383 *p = timer;
384 restore_flags(flags);
385 }
386
387 int del_timer(struct timer_list * timer)
388 {
389 unsigned long flags;
390 unsigned long expires = 0;
391 struct timer_list **p;
392
393 p = &next_timer;
394 save_flags(flags);
395 cli();
396 while (*p) {
397 if (*p == timer) {
398 if ((*p = timer->next) != NULL)
399 (*p)->expires += timer->expires;
400 timer->expires += expires;
401 restore_flags(flags);
402 return 1;
403 }
404 expires += (*p)->expires;
405 p = &(*p)->next;
406 }
407 restore_flags(flags);
408 return 0;
409 }
410
411 unsigned long timer_active = 0;
412 struct timer_struct timer_table[32];
413
414
415
416
417
418
419
420 unsigned long avenrun[3] = { 0,0,0 };
421
422
423
424
425 static unsigned long count_active_tasks(void)
426 {
427 struct task_struct **p;
428 unsigned long nr = 0;
429
430 for(p = &LAST_TASK; p > &FIRST_TASK; --p)
431 if (*p && ((*p)->state == TASK_RUNNING ||
432 (*p)->state == TASK_UNINTERRUPTIBLE ||
433 (*p)->state == TASK_SWAPPING))
434 nr += FIXED_1;
435 return nr;
436 }
437
438 static inline void calc_load(void)
439 {
440 unsigned long active_tasks;
441 static int count = LOAD_FREQ;
442
443 if (count-- > 0)
444 return;
445 count = LOAD_FREQ;
446 active_tasks = count_active_tasks();
447 CALC_LOAD(avenrun[0], EXP_1, active_tasks);
448 CALC_LOAD(avenrun[1], EXP_5, active_tasks);
449 CALC_LOAD(avenrun[2], EXP_15, active_tasks);
450 }
451
452
453
454
455
456
457
458
459
460
461
462 static void second_overflow(void)
463 {
464 long ltemp;
465
466 static long last_rtc_update=0;
467 extern int set_rtc_mmss(unsigned long);
468
469
470 time_maxerror = (0x70000000-time_maxerror < time_tolerance) ?
471 0x70000000 : (time_maxerror + time_tolerance);
472
473
474 if (time_offset < 0) {
475 ltemp = (-(time_offset+1) >> (SHIFT_KG + time_constant)) + 1;
476 time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
477 time_offset += (time_adj * HZ) >> (SHIFT_SCALE - SHIFT_UPDATE);
478 time_adj = - time_adj;
479 } else if (time_offset > 0) {
480 ltemp = ((time_offset-1) >> (SHIFT_KG + time_constant)) + 1;
481 time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
482 time_offset -= (time_adj * HZ) >> (SHIFT_SCALE - SHIFT_UPDATE);
483 } else {
484 time_adj = 0;
485 }
486
487 time_adj += (time_freq >> (SHIFT_KF + SHIFT_HZ - SHIFT_SCALE))
488 + FINETUNE;
489
490
491 switch (time_status) {
492 case TIME_INS:
493
494 if (xtime.tv_sec % 86400 == 0) {
495 xtime.tv_sec--;
496 time_status = TIME_OOP;
497 printk("Clock: inserting leap second 23:59:60 GMT\n");
498 }
499 break;
500
501 case TIME_DEL:
502
503 if (xtime.tv_sec % 86400 == 86399) {
504 xtime.tv_sec++;
505 time_status = TIME_OK;
506 printk("Clock: deleting leap second 23:59:59 GMT\n");
507 }
508 break;
509
510 case TIME_OOP:
511 time_status = TIME_OK;
512 break;
513 }
514 if (xtime.tv_sec > last_rtc_update + 660)
515 if (set_rtc_mmss(xtime.tv_sec) == 0)
516 last_rtc_update = xtime.tv_sec;
517 }
518
519
520
521
522 static void timer_bh(void * unused)
523 {
524 unsigned long mask;
525 struct timer_struct *tp;
526
527 cli();
528 while (next_timer && next_timer->expires == 0) {
529 void (*fn)(unsigned long) = next_timer->function;
530 unsigned long data = next_timer->data;
531 next_timer = next_timer->next;
532 sti();
533 fn(data);
534 cli();
535 }
536 sti();
537
538 for (mask = 1, tp = timer_table+0 ; mask ; tp++,mask += mask) {
539 if (mask > timer_active)
540 break;
541 if (!(mask & timer_active))
542 continue;
543 if (tp->expires > jiffies)
544 continue;
545 timer_active &= ~mask;
546 tp->fn();
547 sti();
548 }
549 }
550
551
552
553
554
555
556
557 static void do_timer(struct pt_regs * regs)
558 {
559 unsigned long mask;
560 struct timer_struct *tp;
561
562 long ltemp;
563
564
565
566
567 time_phase += time_adj;
568 if (time_phase < -FINEUSEC) {
569 ltemp = -time_phase >> SHIFT_SCALE;
570 time_phase += ltemp << SHIFT_SCALE;
571 xtime.tv_usec += tick + time_adjust_step - ltemp;
572 }
573 else if (time_phase > FINEUSEC) {
574 ltemp = time_phase >> SHIFT_SCALE;
575 time_phase -= ltemp << SHIFT_SCALE;
576 xtime.tv_usec += tick + time_adjust_step + ltemp;
577 } else
578 xtime.tv_usec += tick + time_adjust_step;
579
580 if (time_adjust)
581 {
582
583
584
585
586
587
588
589
590
591 if (time_adjust > tickadj)
592 time_adjust_step = tickadj;
593 else if (time_adjust < -tickadj)
594 time_adjust_step = -tickadj;
595 else
596 time_adjust_step = time_adjust;
597
598
599 time_adjust -= time_adjust_step;
600 }
601 else
602 time_adjust_step = 0;
603
604 if (xtime.tv_usec >= 1000000) {
605 xtime.tv_usec -= 1000000;
606 xtime.tv_sec++;
607 second_overflow();
608 }
609
610 jiffies++;
611 calc_load();
612 if ((VM_MASK & regs->eflags) || (3 & regs->cs)) {
613 current->utime++;
614 if (current != task[0]) {
615 if (current->priority < 15)
616 kstat.cpu_nice++;
617 else
618 kstat.cpu_user++;
619 }
620
621 if (current->it_virt_value && !(--current->it_virt_value)) {
622 current->it_virt_value = current->it_virt_incr;
623 send_sig(SIGVTALRM,current,1);
624 }
625 } else {
626 current->stime++;
627 if(current != task[0])
628 kstat.cpu_system++;
629 #ifdef CONFIG_PROFILE
630 if (prof_buffer && current != task[0]) {
631 unsigned long eip = regs->eip;
632 eip >>= 2;
633 if (eip < prof_len)
634 prof_buffer[eip]++;
635 }
636 #endif
637 }
638 if (current != task[0] && 0 > --current->counter) {
639 current->counter = 0;
640 need_resched = 1;
641 }
642
643 if (current->it_prof_value && !(--current->it_prof_value)) {
644 current->it_prof_value = current->it_prof_incr;
645 send_sig(SIGPROF,current,1);
646 }
647 for (mask = 1, tp = timer_table+0 ; mask ; tp++,mask += mask) {
648 if (mask > timer_active)
649 break;
650 if (!(mask & timer_active))
651 continue;
652 if (tp->expires > jiffies)
653 continue;
654 mark_bh(TIMER_BH);
655 }
656 cli();
657 itimer_ticks++;
658 if (itimer_ticks > itimer_next)
659 need_resched = 1;
660 if (next_timer) {
661 if (next_timer->expires) {
662 next_timer->expires--;
663 if (!next_timer->expires)
664 mark_bh(TIMER_BH);
665 } else {
666 lost_ticks++;
667 mark_bh(TIMER_BH);
668 }
669 }
670 sti();
671 }
672
673 asmlinkage int sys_alarm(long seconds)
674 {
675 struct itimerval it_new, it_old;
676
677 it_new.it_interval.tv_sec = it_new.it_interval.tv_usec = 0;
678 it_new.it_value.tv_sec = seconds;
679 it_new.it_value.tv_usec = 0;
680 _setitimer(ITIMER_REAL, &it_new, &it_old);
681 return(it_old.it_value.tv_sec + (it_old.it_value.tv_usec / 1000000));
682 }
683
684 asmlinkage int sys_getpid(void)
685 {
686 return current->pid;
687 }
688
689 asmlinkage int sys_getppid(void)
690 {
691 return current->p_opptr->pid;
692 }
693
694 asmlinkage int sys_getuid(void)
695 {
696 return current->uid;
697 }
698
699 asmlinkage int sys_geteuid(void)
700 {
701 return current->euid;
702 }
703
704 asmlinkage int sys_getgid(void)
705 {
706 return current->gid;
707 }
708
709 asmlinkage int sys_getegid(void)
710 {
711 return current->egid;
712 }
713
714 asmlinkage int sys_nice(long increment)
715 {
716 int newprio;
717
718 if (increment < 0 && !suser())
719 return -EPERM;
720 newprio = current->priority - increment;
721 if (newprio < 1)
722 newprio = 1;
723 if (newprio > 35)
724 newprio = 35;
725 current->priority = newprio;
726 return 0;
727 }
728
729 static void show_task(int nr,struct task_struct * p)
730 {
731 unsigned long free;
732 static char * stat_nam[] = { "R", "S", "D", "Z", "T", "W" };
733
734 printk("%-8s %3d ", p->comm, (p == current) ? -nr : nr);
735 if (((unsigned) p->state) < sizeof(stat_nam)/sizeof(char *))
736 printk(stat_nam[p->state]);
737 else
738 printk(" ");
739 if (p == current)
740 printk(" current ");
741 else
742 printk(" %08lX ", ((unsigned long *)p->tss.esp)[3]);
743 for (free = 1; free < 1024 ; free++) {
744 if (((unsigned long *)p->kernel_stack_page)[free])
745 break;
746 }
747 printk("%5lu %5d %6d ", free << 2, p->pid, p->p_pptr->pid);
748 if (p->p_cptr)
749 printk("%5d ", p->p_cptr->pid);
750 else
751 printk(" ");
752 if (p->p_ysptr)
753 printk("%7d", p->p_ysptr->pid);
754 else
755 printk(" ");
756 if (p->p_osptr)
757 printk(" %5d\n", p->p_osptr->pid);
758 else
759 printk("\n");
760 }
761
762 void show_state(void)
763 {
764 int i;
765
766 printk(" free sibling\n");
767 printk(" task PC stack pid father child younger older\n");
768 for (i=0 ; i<NR_TASKS ; i++)
769 if (task[i])
770 show_task(i,task[i]);
771 }
772
773 void sched_init(void)
774 {
775 int i;
776 struct desc_struct * p;
777
778 bh_base[TIMER_BH].routine = timer_bh;
779 if (sizeof(struct sigaction) != 16)
780 panic("Struct sigaction MUST be 16 bytes");
781 set_tss_desc(gdt+FIRST_TSS_ENTRY,&init_task.tss);
782 set_ldt_desc(gdt+FIRST_LDT_ENTRY,&default_ldt,1);
783 set_system_gate(0x80,&system_call);
784 p = gdt+2+FIRST_TSS_ENTRY;
785 for(i=1 ; i<NR_TASKS ; i++) {
786 task[i] = NULL;
787 p->a=p->b=0;
788 p++;
789 p->a=p->b=0;
790 p++;
791 }
792
793 __asm__("pushfl ; andl $0xffffbfff,(%esp) ; popfl");
794 load_TR(0);
795 load_ldt(0);
796 outb_p(0x34,0x43);
797 outb_p(LATCH & 0xff , 0x40);
798 outb(LATCH >> 8 , 0x40);
799 if (request_irq(TIMER_IRQ,(void (*)(int)) do_timer)!=0)
800 panic("Could not allocate timer IRQ!");
801 }