This source file includes following definitions.
- math_state_restore
- math_emulate
- schedule
- sys_pause
- wake_up
- wake_up_interruptible
- __down
- __sleep_on
- interruptible_sleep_on
- sleep_on
- add_timer
- del_timer
- count_active_tasks
- calc_load
- second_overflow
- timer_bh
- do_timer
- sys_alarm
- sys_getpid
- sys_getppid
- sys_getuid
- sys_geteuid
- sys_getgid
- sys_getegid
- sys_nice
- show_task
- show_state
- sched_init
1
2
3
4
5
6
7
8
9
10
11
12
13
14 #include <linux/config.h>
15 #include <linux/signal.h>
16 #include <linux/sched.h>
17 #include <linux/timer.h>
18 #include <linux/kernel.h>
19 #include <linux/kernel_stat.h>
20 #include <linux/fdreg.h>
21 #include <linux/errno.h>
22 #include <linux/time.h>
23 #include <linux/ptrace.h>
24 #include <linux/segment.h>
25 #include <linux/delay.h>
26 #include <linux/interrupt.h>
27
28 #include <asm/system.h>
29 #include <asm/io.h>
30 #include <asm/segment.h>
31
32 #define TIMER_IRQ 0
33
34 #include <linux/timex.h>
35
36
37
38
39 long tick = 1000000 / HZ;
40 volatile struct timeval xtime;
41 int tickadj = 500/HZ;
42
43
44
45
46 int time_status = TIME_BAD;
47 long time_offset = 0;
48 long time_constant = 0;
49 long time_tolerance = MAXFREQ;
50 long time_precision = 1;
51 long time_maxerror = 0x70000000;
52 long time_esterror = 0x70000000;
53 long time_phase = 0;
54 long time_freq = 0;
55 long time_adj = 0;
56 long time_reftime = 0;
57
58 long time_adjust = 0;
59 long time_adjust_step = 0;
60
61 int need_resched = 0;
62
63
64
65
66 int hard_math = 0;
67 int x86 = 0;
68 int ignore_irq13 = 0;
69 int wp_works_ok = 0;
70
71
72
73
74 int EISA_bus = 0;
75
76 extern int _setitimer(int, struct itimerval *, struct itimerval *);
77 unsigned long * prof_buffer = NULL;
78 unsigned long prof_len = 0;
79
80 #define _S(nr) (1<<((nr)-1))
81
82 extern void mem_use(void);
83
84 extern int timer_interrupt(void);
85 asmlinkage int system_call(void);
86
87 static unsigned long init_kernel_stack[1024] = { STACK_MAGIC, };
88 struct task_struct init_task = INIT_TASK;
89
90 unsigned long volatile jiffies=0;
91
92 struct task_struct *current = &init_task;
93 struct task_struct *last_task_used_math = NULL;
94
95 struct task_struct * task[NR_TASKS] = {&init_task, };
96
97 long user_stack [ PAGE_SIZE>>2 ] = { STACK_MAGIC, };
98
99 struct {
100 long * a;
101 short b;
102 } stack_start = { & user_stack [PAGE_SIZE>>2] , KERNEL_DS };
103
104 struct kernel_stat kstat = { 0 };
105
106
107
108
109
110
111
112
113 asmlinkage void math_state_restore(void)
114 {
115 __asm__ __volatile__("clts");
116 if (last_task_used_math == current)
117 return;
118 timer_table[COPRO_TIMER].expires = jiffies+50;
119 timer_active |= 1<<COPRO_TIMER;
120 if (last_task_used_math)
121 __asm__("fnsave %0":"=m" (last_task_used_math->tss.i387));
122 else
123 __asm__("fnclex");
124 last_task_used_math = current;
125 if (current->used_math) {
126 __asm__("frstor %0": :"m" (current->tss.i387));
127 } else {
128 __asm__("fninit");
129 current->used_math=1;
130 }
131 timer_active &= ~(1<<COPRO_TIMER);
132 }
133
134 #ifndef CONFIG_MATH_EMULATION
135
136 asmlinkage void math_emulate(long arg)
137 {
138 printk("math-emulation not enabled and no coprocessor found.\n");
139 printk("killing %s.\n",current->comm);
140 send_sig(SIGFPE,current,1);
141 schedule();
142 }
143
144 #endif
145
146 unsigned long itimer_ticks = 0;
147 unsigned long itimer_next = ~0;
148 static unsigned long lost_ticks = 0;
149
150
151
152
153
154
155
156
157
158
159
160
161
162 asmlinkage void schedule(void)
163 {
164 int c;
165 struct task_struct * p;
166 struct task_struct * next;
167 unsigned long ticks;
168
169
170
171 if (intr_count) {
172 printk("Aiee: scheduling in interrupt\n");
173 intr_count = 0;
174 }
175 cli();
176 ticks = itimer_ticks;
177 itimer_ticks = 0;
178 itimer_next = ~0;
179 sti();
180 need_resched = 0;
181 p = &init_task;
182 for (;;) {
183 if ((p = p->next_task) == &init_task)
184 goto confuse_gcc1;
185 if (ticks && p->it_real_value) {
186 if (p->it_real_value <= ticks) {
187 send_sig(SIGALRM, p, 1);
188 if (!p->it_real_incr) {
189 p->it_real_value = 0;
190 goto end_itimer;
191 }
192 do {
193 p->it_real_value += p->it_real_incr;
194 } while (p->it_real_value <= ticks);
195 }
196 p->it_real_value -= ticks;
197 if (p->it_real_value < itimer_next)
198 itimer_next = p->it_real_value;
199 }
200 end_itimer:
201 if (p->state != TASK_INTERRUPTIBLE)
202 continue;
203 if (p->signal & ~p->blocked) {
204 p->state = TASK_RUNNING;
205 continue;
206 }
207 if (p->timeout && p->timeout <= jiffies) {
208 p->timeout = 0;
209 p->state = TASK_RUNNING;
210 }
211 }
212 confuse_gcc1:
213
214
215 #if 0
216
217
218
219
220 if (TASK_UNINTERRUPTIBLE >= (unsigned) current->state &&
221 current->counter < current->priority*2) {
222 ++current->counter;
223 }
224 #endif
225 c = -1;
226 next = p = &init_task;
227 for (;;) {
228 if ((p = p->next_task) == &init_task)
229 goto confuse_gcc2;
230 if (p->state == TASK_RUNNING && p->counter > c)
231 c = p->counter, next = p;
232 }
233 confuse_gcc2:
234 if (!c) {
235 for_each_task(p)
236 p->counter = (p->counter >> 1) + p->priority;
237 }
238 if(current != next)
239 kstat.context_swtch++;
240 switch_to(next);
241
242 if(current->debugreg[7]){
243 loaddebug(0);
244 loaddebug(1);
245 loaddebug(2);
246 loaddebug(3);
247 loaddebug(6);
248 };
249 }
250
251 asmlinkage int sys_pause(void)
252 {
253 current->state = TASK_INTERRUPTIBLE;
254 schedule();
255 return -ERESTARTNOHAND;
256 }
257
258
259
260
261
262
263
264
265
266 void wake_up(struct wait_queue **q)
267 {
268 struct wait_queue *tmp;
269 struct task_struct * p;
270
271 if (!q || !(tmp = *q))
272 return;
273 do {
274 if ((p = tmp->task) != NULL) {
275 if ((p->state == TASK_UNINTERRUPTIBLE) ||
276 (p->state == TASK_INTERRUPTIBLE)) {
277 p->state = TASK_RUNNING;
278 if (p->counter > current->counter)
279 need_resched = 1;
280 }
281 }
282 if (!tmp->next) {
283 printk("wait_queue is bad (eip = %08lx)\n",((unsigned long *) q)[-1]);
284 printk(" q = %p\n",q);
285 printk(" *q = %p\n",*q);
286 printk(" tmp = %p\n",tmp);
287 break;
288 }
289 tmp = tmp->next;
290 } while (tmp != *q);
291 }
292
293 void wake_up_interruptible(struct wait_queue **q)
294 {
295 struct wait_queue *tmp;
296 struct task_struct * p;
297
298 if (!q || !(tmp = *q))
299 return;
300 do {
301 if ((p = tmp->task) != NULL) {
302 if (p->state == TASK_INTERRUPTIBLE) {
303 p->state = TASK_RUNNING;
304 if (p->counter > current->counter)
305 need_resched = 1;
306 }
307 }
308 if (!tmp->next) {
309 printk("wait_queue is bad (eip = %08lx)\n",((unsigned long *) q)[-1]);
310 printk(" q = %p\n",q);
311 printk(" *q = %p\n",*q);
312 printk(" tmp = %p\n",tmp);
313 break;
314 }
315 tmp = tmp->next;
316 } while (tmp != *q);
317 }
318
319 void __down(struct semaphore * sem)
320 {
321 struct wait_queue wait = { current, NULL };
322 add_wait_queue(&sem->wait, &wait);
323 current->state = TASK_UNINTERRUPTIBLE;
324 while (sem->count <= 0) {
325 schedule();
326 current->state = TASK_UNINTERRUPTIBLE;
327 }
328 current->state = TASK_RUNNING;
329 remove_wait_queue(&sem->wait, &wait);
330 }
331
332 static inline void __sleep_on(struct wait_queue **p, int state)
333 {
334 unsigned long flags;
335 struct wait_queue wait = { current, NULL };
336
337 if (!p)
338 return;
339 if (current == task[0])
340 panic("task[0] trying to sleep");
341 current->state = state;
342 add_wait_queue(p, &wait);
343 save_flags(flags);
344 sti();
345 schedule();
346 remove_wait_queue(p, &wait);
347 restore_flags(flags);
348 }
349
350 void interruptible_sleep_on(struct wait_queue **p)
351 {
352 __sleep_on(p,TASK_INTERRUPTIBLE);
353 }
354
355 void sleep_on(struct wait_queue **p)
356 {
357 __sleep_on(p,TASK_UNINTERRUPTIBLE);
358 }
359
360 static struct timer_list * next_timer = NULL;
361
362 void add_timer(struct timer_list * timer)
363 {
364 unsigned long flags;
365 struct timer_list ** p;
366
367 if (!timer)
368 return;
369 timer->next = NULL;
370 p = &next_timer;
371 save_flags(flags);
372 cli();
373 while (*p) {
374 if ((*p)->expires > timer->expires) {
375 (*p)->expires -= timer->expires;
376 timer->next = *p;
377 break;
378 }
379 timer->expires -= (*p)->expires;
380 p = &(*p)->next;
381 }
382 *p = timer;
383 restore_flags(flags);
384 }
385
386 int del_timer(struct timer_list * timer)
387 {
388 unsigned long flags;
389 unsigned long expires = 0;
390 struct timer_list **p;
391
392 p = &next_timer;
393 save_flags(flags);
394 cli();
395 while (*p) {
396 if (*p == timer) {
397 if ((*p = timer->next) != NULL)
398 (*p)->expires += timer->expires;
399 timer->expires += expires;
400 restore_flags(flags);
401 return 1;
402 }
403 expires += (*p)->expires;
404 p = &(*p)->next;
405 }
406 restore_flags(flags);
407 return 0;
408 }
409
410 unsigned long timer_active = 0;
411 struct timer_struct timer_table[32];
412
413
414
415
416
417
418
419 unsigned long avenrun[3] = { 0,0,0 };
420
421
422
423
424 static unsigned long count_active_tasks(void)
425 {
426 struct task_struct **p;
427 unsigned long nr = 0;
428
429 for(p = &LAST_TASK; p > &FIRST_TASK; --p)
430 if (*p && ((*p)->state == TASK_RUNNING ||
431 (*p)->state == TASK_UNINTERRUPTIBLE ||
432 (*p)->state == TASK_SWAPPING))
433 nr += FIXED_1;
434 return nr;
435 }
436
437 static inline void calc_load(void)
438 {
439 unsigned long active_tasks;
440 static int count = LOAD_FREQ;
441
442 if (count-- > 0)
443 return;
444 count = LOAD_FREQ;
445 active_tasks = count_active_tasks();
446 CALC_LOAD(avenrun[0], EXP_1, active_tasks);
447 CALC_LOAD(avenrun[1], EXP_5, active_tasks);
448 CALC_LOAD(avenrun[2], EXP_15, active_tasks);
449 }
450
451
452
453
454
455
456
457
458
459
460
461 static void second_overflow(void)
462 {
463 long ltemp;
464
465 static long last_rtc_update=0;
466 extern int set_rtc_mmss(unsigned long);
467
468
469 time_maxerror = (0x70000000-time_maxerror < time_tolerance) ?
470 0x70000000 : (time_maxerror + time_tolerance);
471
472
473 if (time_offset < 0) {
474 ltemp = (-(time_offset+1) >> (SHIFT_KG + time_constant)) + 1;
475 time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
476 time_offset += (time_adj * HZ) >> (SHIFT_SCALE - SHIFT_UPDATE);
477 time_adj = - time_adj;
478 } else if (time_offset > 0) {
479 ltemp = ((time_offset-1) >> (SHIFT_KG + time_constant)) + 1;
480 time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
481 time_offset -= (time_adj * HZ) >> (SHIFT_SCALE - SHIFT_UPDATE);
482 } else {
483 time_adj = 0;
484 }
485
486 time_adj += (time_freq >> (SHIFT_KF + SHIFT_HZ - SHIFT_SCALE))
487 + FINETUNE;
488
489
490 switch (time_status) {
491 case TIME_INS:
492
493 if (xtime.tv_sec % 86400 == 0) {
494 xtime.tv_sec--;
495 time_status = TIME_OOP;
496 printk("Clock: inserting leap second 23:59:60 GMT\n");
497 }
498 break;
499
500 case TIME_DEL:
501
502 if (xtime.tv_sec % 86400 == 86399) {
503 xtime.tv_sec++;
504 time_status = TIME_OK;
505 printk("Clock: deleting leap second 23:59:59 GMT\n");
506 }
507 break;
508
509 case TIME_OOP:
510 time_status = TIME_OK;
511 break;
512 }
513 if (xtime.tv_sec > last_rtc_update + 660)
514 if (set_rtc_mmss(xtime.tv_sec) == 0)
515 last_rtc_update = xtime.tv_sec;
516 }
517
518
519
520
521 static void timer_bh(void * unused)
522 {
523 unsigned long mask;
524 struct timer_struct *tp;
525
526 cli();
527 while (next_timer && next_timer->expires == 0) {
528 void (*fn)(unsigned long) = next_timer->function;
529 unsigned long data = next_timer->data;
530 next_timer = next_timer->next;
531 sti();
532 fn(data);
533 cli();
534 }
535 sti();
536
537 for (mask = 1, tp = timer_table+0 ; mask ; tp++,mask += mask) {
538 if (mask > timer_active)
539 break;
540 if (!(mask & timer_active))
541 continue;
542 if (tp->expires > jiffies)
543 continue;
544 timer_active &= ~mask;
545 tp->fn();
546 sti();
547 }
548 }
549
550
551
552
553
554
555
556 static void do_timer(struct pt_regs * regs)
557 {
558 unsigned long mask;
559 struct timer_struct *tp;
560
561 long ltemp;
562
563
564
565
566 time_phase += time_adj;
567 if (time_phase < -FINEUSEC) {
568 ltemp = -time_phase >> SHIFT_SCALE;
569 time_phase += ltemp << SHIFT_SCALE;
570 xtime.tv_usec += tick + time_adjust_step - ltemp;
571 }
572 else if (time_phase > FINEUSEC) {
573 ltemp = time_phase >> SHIFT_SCALE;
574 time_phase -= ltemp << SHIFT_SCALE;
575 xtime.tv_usec += tick + time_adjust_step + ltemp;
576 } else
577 xtime.tv_usec += tick + time_adjust_step;
578
579 if (time_adjust)
580 {
581
582
583
584
585
586
587
588
589
590 if (time_adjust > tickadj)
591 time_adjust_step = tickadj;
592 else if (time_adjust < -tickadj)
593 time_adjust_step = -tickadj;
594 else
595 time_adjust_step = time_adjust;
596
597
598 time_adjust -= time_adjust_step;
599 }
600 else
601 time_adjust_step = 0;
602
603 if (xtime.tv_usec >= 1000000) {
604 xtime.tv_usec -= 1000000;
605 xtime.tv_sec++;
606 second_overflow();
607 }
608
609 jiffies++;
610 calc_load();
611 if ((VM_MASK & regs->eflags) || (3 & regs->cs)) {
612 current->utime++;
613 if (current != task[0]) {
614 if (current->priority < 15)
615 kstat.cpu_nice++;
616 else
617 kstat.cpu_user++;
618 }
619
620 if (current->it_virt_value && !(--current->it_virt_value)) {
621 current->it_virt_value = current->it_virt_incr;
622 send_sig(SIGVTALRM,current,1);
623 }
624 } else {
625 current->stime++;
626 if(current != task[0])
627 kstat.cpu_system++;
628 #ifdef CONFIG_PROFILE
629 if (prof_buffer && current != task[0]) {
630 unsigned long eip = regs->eip;
631 eip >>= 2;
632 if (eip < prof_len)
633 prof_buffer[eip]++;
634 }
635 #endif
636 }
637 if (current == task[0] || (--current->counter)<=0) {
638 current->counter=0;
639 need_resched = 1;
640 }
641
642 if (current->it_prof_value && !(--current->it_prof_value)) {
643 current->it_prof_value = current->it_prof_incr;
644 send_sig(SIGPROF,current,1);
645 }
646 for (mask = 1, tp = timer_table+0 ; mask ; tp++,mask += mask) {
647 if (mask > timer_active)
648 break;
649 if (!(mask & timer_active))
650 continue;
651 if (tp->expires > jiffies)
652 continue;
653 mark_bh(TIMER_BH);
654 }
655 cli();
656 itimer_ticks++;
657 if (itimer_ticks > itimer_next)
658 need_resched = 1;
659 if (next_timer) {
660 if (next_timer->expires) {
661 next_timer->expires--;
662 if (!next_timer->expires)
663 mark_bh(TIMER_BH);
664 } else {
665 lost_ticks++;
666 mark_bh(TIMER_BH);
667 }
668 }
669 sti();
670 }
671
672 asmlinkage int sys_alarm(long seconds)
673 {
674 struct itimerval it_new, it_old;
675
676 it_new.it_interval.tv_sec = it_new.it_interval.tv_usec = 0;
677 it_new.it_value.tv_sec = seconds;
678 it_new.it_value.tv_usec = 0;
679 _setitimer(ITIMER_REAL, &it_new, &it_old);
680 return(it_old.it_value.tv_sec + (it_old.it_value.tv_usec / 1000000));
681 }
682
683 asmlinkage int sys_getpid(void)
684 {
685 return current->pid;
686 }
687
688 asmlinkage int sys_getppid(void)
689 {
690 return current->p_opptr->pid;
691 }
692
693 asmlinkage int sys_getuid(void)
694 {
695 return current->uid;
696 }
697
698 asmlinkage int sys_geteuid(void)
699 {
700 return current->euid;
701 }
702
703 asmlinkage int sys_getgid(void)
704 {
705 return current->gid;
706 }
707
708 asmlinkage int sys_getegid(void)
709 {
710 return current->egid;
711 }
712
713 asmlinkage int sys_nice(long increment)
714 {
715 int newprio;
716
717 if (increment < 0 && !suser())
718 return -EPERM;
719 newprio = current->priority - increment;
720 if (newprio < 1)
721 newprio = 1;
722 if (newprio > 35)
723 newprio = 35;
724 current->priority = newprio;
725 return 0;
726 }
727
728 static void show_task(int nr,struct task_struct * p)
729 {
730 unsigned long free;
731 static char * stat_nam[] = { "R", "S", "D", "Z", "T", "W" };
732
733 printk("%-8s %3d ", p->comm, (p == current) ? -nr : nr);
734 if (((unsigned) p->state) < sizeof(stat_nam)/sizeof(char *))
735 printk(stat_nam[p->state]);
736 else
737 printk(" ");
738 if (p == current)
739 printk(" current ");
740 else
741 printk(" %08lX ", ((unsigned long *)p->tss.esp)[3]);
742 for (free = 1; free < 1024 ; free++) {
743 if (((unsigned long *)p->kernel_stack_page)[free])
744 break;
745 }
746 printk("%5lu %5d %6d ", free << 2, p->pid, p->p_pptr->pid);
747 if (p->p_cptr)
748 printk("%5d ", p->p_cptr->pid);
749 else
750 printk(" ");
751 if (p->p_ysptr)
752 printk("%7d", p->p_ysptr->pid);
753 else
754 printk(" ");
755 if (p->p_osptr)
756 printk(" %5d\n", p->p_osptr->pid);
757 else
758 printk("\n");
759 }
760
761 void show_state(void)
762 {
763 int i;
764
765 printk(" free sibling\n");
766 printk(" task PC stack pid father child younger older\n");
767 for (i=0 ; i<NR_TASKS ; i++)
768 if (task[i])
769 show_task(i,task[i]);
770 }
771
772 void sched_init(void)
773 {
774 int i;
775 struct desc_struct * p;
776
777 bh_base[TIMER_BH].routine = timer_bh;
778 if (sizeof(struct sigaction) != 16)
779 panic("Struct sigaction MUST be 16 bytes");
780 set_tss_desc(gdt+FIRST_TSS_ENTRY,&init_task.tss);
781 set_ldt_desc(gdt+FIRST_LDT_ENTRY,&default_ldt,1);
782 set_system_gate(0x80,&system_call);
783 p = gdt+2+FIRST_TSS_ENTRY;
784 for(i=1 ; i<NR_TASKS ; i++) {
785 task[i] = NULL;
786 p->a=p->b=0;
787 p++;
788 p->a=p->b=0;
789 p++;
790 }
791
792 __asm__("pushfl ; andl $0xffffbfff,(%esp) ; popfl");
793 load_TR(0);
794 load_ldt(0);
795 outb_p(0x34,0x43);
796 outb_p(LATCH & 0xff , 0x40);
797 outb(LATCH >> 8 , 0x40);
798 if (request_irq(TIMER_IRQ,(void (*)(int)) do_timer)!=0)
799 panic("Could not allocate timer IRQ!");
800 }