This source file includes following definitions.
- math_state_restore
- math_emulate
- schedule
- sys_pause
- wake_up
- wake_up_interruptible
- __sleep_on
- interruptible_sleep_on
- sleep_on
- add_timer
- del_timer
- count_active_tasks
- calc_load
- second_overflow
- timer_bh
- do_timer
- sys_alarm
- sys_getpid
- sys_getppid
- sys_getuid
- sys_geteuid
- sys_getgid
- sys_getegid
- sys_nice
- show_task
- show_state
- sched_init
1
2
3
4
5
6
7
8
9
10
11
12
13
14 #include <linux/config.h>
15 #include <linux/signal.h>
16 #include <linux/sched.h>
17 #include <linux/timer.h>
18 #include <linux/kernel.h>
19 #include <linux/sys.h>
20 #include <linux/fdreg.h>
21 #include <linux/errno.h>
22 #include <linux/time.h>
23 #include <linux/ptrace.h>
24 #include <linux/segment.h>
25 #include <linux/delay.h>
26 #include <linux/interrupt.h>
27
28 #include <asm/system.h>
29 #include <asm/io.h>
30 #include <asm/segment.h>
31
32 #define TIMER_IRQ 0
33
34 #include <linux/timex.h>
35
36
37
38
39 long tick = 1000000 / HZ;
40 volatile struct timeval xtime;
41 int tickadj = 500/HZ;
42
43
44
45
46 int time_status = TIME_BAD;
47 long time_offset = 0;
48 long time_constant = 0;
49 long time_tolerance = MAXFREQ;
50 long time_precision = 1;
51 long time_maxerror = 0x70000000;
52 long time_esterror = 0x70000000;
53 long time_phase = 0;
54 long time_freq = 0;
55 long time_adj = 0;
56 long time_reftime = 0;
57
58 long time_adjust = 0;
59
60 int need_resched = 0;
61
62
63
64
65 int hard_math = 0;
66 int x86 = 0;
67 int ignore_irq13 = 0;
68 int wp_works_ok = 0;
69
70 extern int _setitimer(int, struct itimerval *, struct itimerval *);
71 unsigned long * prof_buffer = NULL;
72 unsigned long prof_len = 0;
73
74 #define _S(nr) (1<<((nr)-1))
75
76 extern void mem_use(void);
77
78 extern int timer_interrupt(void);
79 asmlinkage int system_call(void);
80
81 static unsigned long init_kernel_stack[1024];
82 struct task_struct init_task = INIT_TASK;
83
84 unsigned long volatile jiffies=0;
85
86 struct task_struct *current = &init_task;
87 struct task_struct *last_task_used_math = NULL;
88
89 struct task_struct * task[NR_TASKS] = {&init_task, };
90
91 long user_stack [ PAGE_SIZE>>2 ] ;
92
93 struct {
94 long * a;
95 short b;
96 } stack_start = { & user_stack [PAGE_SIZE>>2] , KERNEL_DS };
97
98
99
100
101
102 #ifdef __cplusplus
103 extern "C" {
104 #endif
105
106 fn_ptr sys_call_table[] = { sys_setup, sys_exit, sys_fork, sys_read,
107 sys_write, sys_open, sys_close, sys_waitpid, sys_creat, sys_link,
108 sys_unlink, sys_execve, sys_chdir, sys_time, sys_mknod, sys_chmod,
109 sys_chown, sys_break, sys_stat, sys_lseek, sys_getpid, sys_mount,
110 sys_umount, sys_setuid, sys_getuid, sys_stime, sys_ptrace, sys_alarm,
111 sys_fstat, sys_pause, sys_utime, sys_stty, sys_gtty, sys_access,
112 sys_nice, sys_ftime, sys_sync, sys_kill, sys_rename, sys_mkdir,
113 sys_rmdir, sys_dup, sys_pipe, sys_times, sys_prof, sys_brk, sys_setgid,
114 sys_getgid, sys_signal, sys_geteuid, sys_getegid, sys_acct, sys_phys,
115 sys_lock, sys_ioctl, sys_fcntl, sys_mpx, sys_setpgid, sys_ulimit,
116 sys_olduname, sys_umask, sys_chroot, sys_ustat, sys_dup2, sys_getppid,
117 sys_getpgrp, sys_setsid, sys_sigaction, sys_sgetmask, sys_ssetmask,
118 sys_setreuid,sys_setregid, sys_sigsuspend, sys_sigpending,
119 sys_sethostname, sys_setrlimit, sys_getrlimit, sys_getrusage,
120 sys_gettimeofday, sys_settimeofday, sys_getgroups, sys_setgroups,
121 sys_select, sys_symlink, sys_lstat, sys_readlink, sys_uselib,
122 sys_swapon, sys_reboot, sys_readdir, sys_mmap, sys_munmap, sys_truncate,
123 sys_ftruncate, sys_fchmod, sys_fchown, sys_getpriority, sys_setpriority,
124 sys_profil, sys_statfs, sys_fstatfs, sys_ioperm, sys_socketcall,
125 sys_syslog, sys_setitimer, sys_getitimer, sys_newstat, sys_newlstat,
126 sys_newfstat, sys_uname, sys_iopl, sys_vhangup, sys_idle, sys_vm86,
127 sys_wait4, sys_swapoff, sys_sysinfo, sys_ipc, sys_fsync, sys_sigreturn,
128 sys_clone, sys_setdomainname, sys_newuname, sys_modify_ldt,
129 sys_adjtimex, sys_mprotect, sys_sigprocmask,
130 sys_create_module, sys_init_module, sys_delete_module, sys_get_kernel_syms };
131
132
133 int NR_syscalls = sizeof(sys_call_table)/sizeof(fn_ptr);
134
135 #ifdef __cplusplus
136 }
137 #endif
138
139
140
141
142
143
144
145
146 asmlinkage void math_state_restore(void)
147 {
148 __asm__ __volatile__("clts");
149 if (last_task_used_math == current)
150 return;
151 timer_table[COPRO_TIMER].expires = jiffies+50;
152 timer_active |= 1<<COPRO_TIMER;
153 if (last_task_used_math)
154 __asm__("fnsave %0":"=m" (last_task_used_math->tss.i387));
155 else
156 __asm__("fnclex");
157 last_task_used_math = current;
158 if (current->used_math) {
159 __asm__("frstor %0": :"m" (current->tss.i387));
160 } else {
161 __asm__("fninit");
162 current->used_math=1;
163 }
164 timer_active &= ~(1<<COPRO_TIMER);
165 }
166
167 #ifndef CONFIG_MATH_EMULATION
168
169 asmlinkage void math_emulate(long arg)
170 {
171 printk("math-emulation not enabled and no coprocessor found.\n");
172 printk("killing %s.\n",current->comm);
173 send_sig(SIGFPE,current,1);
174 schedule();
175 }
176
177 #endif
178
179 static unsigned long itimer_ticks = 0;
180 static unsigned long itimer_next = ~0;
181 static unsigned long lost_ticks = 0;
182
183
184
185
186
187
188
189
190
191
192
193
194
195 asmlinkage void schedule(void)
196 {
197 int c;
198 struct task_struct * p;
199 struct task_struct * next;
200 unsigned long ticks;
201
202
203
204 cli();
205 ticks = itimer_ticks;
206 itimer_ticks = 0;
207 itimer_next = ~0;
208 sti();
209 need_resched = 0;
210 p = &init_task;
211 for (;;) {
212 if ((p = p->next_task) == &init_task)
213 goto confuse_gcc1;
214 if (ticks && p->it_real_value) {
215 if (p->it_real_value <= ticks) {
216 send_sig(SIGALRM, p, 1);
217 if (!p->it_real_incr) {
218 p->it_real_value = 0;
219 goto end_itimer;
220 }
221 do {
222 p->it_real_value += p->it_real_incr;
223 } while (p->it_real_value <= ticks);
224 }
225 p->it_real_value -= ticks;
226 if (p->it_real_value < itimer_next)
227 itimer_next = p->it_real_value;
228 }
229 end_itimer:
230 if (p->state != TASK_INTERRUPTIBLE)
231 continue;
232 if (p->signal & ~p->blocked) {
233 p->state = TASK_RUNNING;
234 continue;
235 }
236 if (p->timeout && p->timeout <= jiffies) {
237 p->timeout = 0;
238 p->state = TASK_RUNNING;
239 }
240 }
241 confuse_gcc1:
242
243
244 #if 0
245
246
247
248
249 if (TASK_UNINTERRUPTIBLE >= (unsigned) current->state &&
250 current->counter < current->priority*2) {
251 ++current->counter;
252 }
253 #endif
254 c = -1;
255 next = p = &init_task;
256 for (;;) {
257 if ((p = p->next_task) == &init_task)
258 goto confuse_gcc2;
259 if (p->state == TASK_RUNNING && p->counter > c)
260 c = p->counter, next = p;
261 }
262 confuse_gcc2:
263 if (!c) {
264 for_each_task(p)
265 p->counter = (p->counter >> 1) + p->priority;
266 }
267 switch_to(next);
268
269 if(current->debugreg[7]){
270 loaddebug(0);
271 loaddebug(1);
272 loaddebug(2);
273 loaddebug(3);
274 loaddebug(6);
275 };
276 }
277
278 asmlinkage int sys_pause(void)
279 {
280 current->state = TASK_INTERRUPTIBLE;
281 schedule();
282 return -ERESTARTNOHAND;
283 }
284
285
286
287
288
289
290
291
292
293 void wake_up(struct wait_queue **q)
294 {
295 struct wait_queue *tmp;
296 struct task_struct * p;
297
298 if (!q || !(tmp = *q))
299 return;
300 do {
301 if ((p = tmp->task) != NULL) {
302 if ((p->state == TASK_UNINTERRUPTIBLE) ||
303 (p->state == TASK_INTERRUPTIBLE)) {
304 p->state = TASK_RUNNING;
305 if (p->counter > current->counter)
306 need_resched = 1;
307 }
308 }
309 if (!tmp->next) {
310 printk("wait_queue is bad (eip = %08lx)\n",((unsigned long *) q)[-1]);
311 printk(" q = %p\n",q);
312 printk(" *q = %p\n",*q);
313 printk(" tmp = %p\n",tmp);
314 break;
315 }
316 tmp = tmp->next;
317 } while (tmp != *q);
318 }
319
320 void wake_up_interruptible(struct wait_queue **q)
321 {
322 struct wait_queue *tmp;
323 struct task_struct * p;
324
325 if (!q || !(tmp = *q))
326 return;
327 do {
328 if ((p = tmp->task) != NULL) {
329 if (p->state == TASK_INTERRUPTIBLE) {
330 p->state = TASK_RUNNING;
331 if (p->counter > current->counter)
332 need_resched = 1;
333 }
334 }
335 if (!tmp->next) {
336 printk("wait_queue is bad (eip = %08lx)\n",((unsigned long *) q)[-1]);
337 printk(" q = %p\n",q);
338 printk(" *q = %p\n",*q);
339 printk(" tmp = %p\n",tmp);
340 break;
341 }
342 tmp = tmp->next;
343 } while (tmp != *q);
344 }
345
346 static inline void __sleep_on(struct wait_queue **p, int state)
347 {
348 unsigned long flags;
349 struct wait_queue wait = { current, NULL };
350
351 if (!p)
352 return;
353 if (current == task[0])
354 panic("task[0] trying to sleep");
355 current->state = state;
356 add_wait_queue(p, &wait);
357 save_flags(flags);
358 sti();
359 schedule();
360 remove_wait_queue(p, &wait);
361 restore_flags(flags);
362 }
363
364 void interruptible_sleep_on(struct wait_queue **p)
365 {
366 __sleep_on(p,TASK_INTERRUPTIBLE);
367 }
368
369 void sleep_on(struct wait_queue **p)
370 {
371 __sleep_on(p,TASK_UNINTERRUPTIBLE);
372 }
373
374 static struct timer_list * next_timer = NULL;
375
376 void add_timer(struct timer_list * timer)
377 {
378 unsigned long flags;
379 struct timer_list ** p;
380
381 if (!timer)
382 return;
383 timer->next = NULL;
384 p = &next_timer;
385 save_flags(flags);
386 cli();
387 while (*p) {
388 if ((*p)->expires > timer->expires) {
389 (*p)->expires -= timer->expires;
390 timer->next = *p;
391 break;
392 }
393 timer->expires -= (*p)->expires;
394 p = &(*p)->next;
395 }
396 *p = timer;
397 restore_flags(flags);
398 }
399
400 int del_timer(struct timer_list * timer)
401 {
402 unsigned long flags;
403 unsigned long expires = 0;
404 struct timer_list **p;
405
406 p = &next_timer;
407 save_flags(flags);
408 cli();
409 while (*p) {
410 if (*p == timer) {
411 if ((*p = timer->next) != NULL)
412 (*p)->expires += timer->expires;
413 timer->expires += expires;
414 restore_flags(flags);
415 return 1;
416 }
417 expires += (*p)->expires;
418 p = &(*p)->next;
419 }
420 restore_flags(flags);
421 return 0;
422 }
423
424 unsigned long timer_active = 0;
425 struct timer_struct timer_table[32];
426
427
428
429
430
431
432
433 unsigned long avenrun[3] = { 0,0,0 };
434
435
436
437
438 static unsigned long count_active_tasks(void)
439 {
440 struct task_struct **p;
441 unsigned long nr = 0;
442
443 for(p = &LAST_TASK; p > &FIRST_TASK; --p)
444 if (*p && ((*p)->state == TASK_RUNNING ||
445 (*p)->state == TASK_UNINTERRUPTIBLE ||
446 (*p)->state == TASK_SWAPPING))
447 nr += FIXED_1;
448 return nr;
449 }
450
451 static inline void calc_load(void)
452 {
453 unsigned long active_tasks;
454 static int count = LOAD_FREQ;
455
456 if (count-- > 0)
457 return;
458 count = LOAD_FREQ;
459 active_tasks = count_active_tasks();
460 CALC_LOAD(avenrun[0], EXP_1, active_tasks);
461 CALC_LOAD(avenrun[1], EXP_5, active_tasks);
462 CALC_LOAD(avenrun[2], EXP_15, active_tasks);
463 }
464
465
466
467
468
469
470
471
472
473
474
475 static void second_overflow(void)
476 {
477 long ltemp;
478
479 static long last_rtc_update=0;
480 extern int set_rtc_mmss(unsigned long);
481
482
483 time_maxerror = (0x70000000-time_maxerror < time_tolerance) ?
484 0x70000000 : (time_maxerror + time_tolerance);
485
486
487 if (time_offset < 0) {
488 ltemp = (-(time_offset+1) >> (SHIFT_KG + time_constant)) + 1;
489 time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
490 time_offset += (time_adj * HZ) >> (SHIFT_SCALE - SHIFT_UPDATE);
491 time_adj = - time_adj;
492 } else if (time_offset > 0) {
493 ltemp = ((time_offset-1) >> (SHIFT_KG + time_constant)) + 1;
494 time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
495 time_offset -= (time_adj * HZ) >> (SHIFT_SCALE - SHIFT_UPDATE);
496 } else {
497 time_adj = 0;
498 }
499
500 time_adj += (time_freq >> (SHIFT_KF + SHIFT_HZ - SHIFT_SCALE))
501 + FINETUNE;
502
503
504 switch (time_status) {
505 case TIME_INS:
506
507 if (xtime.tv_sec % 86400 == 0) {
508 xtime.tv_sec--;
509 time_status = TIME_OOP;
510 printk("Clock: inserting leap second 23:59:60 GMT\n");
511 }
512 break;
513
514 case TIME_DEL:
515
516 if (xtime.tv_sec % 86400 == 86399) {
517 xtime.tv_sec++;
518 time_status = TIME_OK;
519 printk("Clock: deleting leap second 23:59:59 GMT\n");
520 }
521 break;
522
523 case TIME_OOP:
524 time_status = TIME_OK;
525 break;
526 }
527 if (xtime.tv_sec > last_rtc_update + 660)
528 if (set_rtc_mmss(xtime.tv_sec) == 0)
529 last_rtc_update = xtime.tv_sec;
530 }
531
532
533
534
535 static void timer_bh(void * unused)
536 {
537 unsigned long mask;
538 struct timer_struct *tp;
539
540 cli();
541 while (next_timer && next_timer->expires == 0) {
542 void (*fn)(unsigned long) = next_timer->function;
543 unsigned long data = next_timer->data;
544 next_timer = next_timer->next;
545 sti();
546 fn(data);
547 cli();
548 }
549 sti();
550
551 for (mask = 1, tp = timer_table+0 ; mask ; tp++,mask += mask) {
552 if (mask > timer_active)
553 break;
554 if (!(mask & timer_active))
555 continue;
556 if (tp->expires > jiffies)
557 continue;
558 timer_active &= ~mask;
559 tp->fn();
560 sti();
561 }
562 }
563
564
565
566
567
568
569
570 static void do_timer(struct pt_regs * regs)
571 {
572 unsigned long mask;
573 struct timer_struct *tp;
574
575 long ltemp;
576
577
578
579
580 time_phase += time_adj;
581 if (time_phase < -FINEUSEC) {
582 ltemp = -time_phase >> SHIFT_SCALE;
583 time_phase += ltemp << SHIFT_SCALE;
584 xtime.tv_usec += tick - ltemp;
585 }
586 else if (time_phase > FINEUSEC) {
587 ltemp = time_phase >> SHIFT_SCALE;
588 time_phase -= ltemp << SHIFT_SCALE;
589 xtime.tv_usec += tick + ltemp;
590 } else
591 xtime.tv_usec += tick;
592
593 if (time_adjust)
594 {
595
596
597
598
599
600
601 if (time_adjust > tickadj)
602 ltemp = tickadj;
603 else if (time_adjust < -tickadj)
604 ltemp = -tickadj;
605 else
606 ltemp = time_adjust;
607
608
609 time_adjust -= ltemp;
610
611
612
613
614
615 tick = 1000000/HZ + ltemp;
616 }
617 else
618 tick = 1000000/HZ;
619
620 if (xtime.tv_usec >= 1000000) {
621 xtime.tv_usec -= 1000000;
622 xtime.tv_sec++;
623 second_overflow();
624 }
625
626 jiffies++;
627 calc_load();
628 if ((VM_MASK & regs->eflags) || (3 & regs->cs)) {
629 current->utime++;
630
631 if (current->it_virt_value && !(--current->it_virt_value)) {
632 current->it_virt_value = current->it_virt_incr;
633 send_sig(SIGVTALRM,current,1);
634 }
635 } else {
636 current->stime++;
637 #ifdef CONFIG_PROFILE
638 if (prof_buffer && current != task[0]) {
639 unsigned long eip = regs->eip;
640 eip >>= 2;
641 if (eip < prof_len)
642 prof_buffer[eip]++;
643 }
644 #endif
645 }
646 if (current == task[0] || (--current->counter)<=0) {
647 current->counter=0;
648 need_resched = 1;
649 }
650
651 if (current->it_prof_value && !(--current->it_prof_value)) {
652 current->it_prof_value = current->it_prof_incr;
653 send_sig(SIGPROF,current,1);
654 }
655 for (mask = 1, tp = timer_table+0 ; mask ; tp++,mask += mask) {
656 if (mask > timer_active)
657 break;
658 if (!(mask & timer_active))
659 continue;
660 if (tp->expires > jiffies)
661 continue;
662 mark_bh(TIMER_BH);
663 }
664 cli();
665 itimer_ticks++;
666 if (itimer_ticks > itimer_next)
667 need_resched = 1;
668 if (next_timer) {
669 if (next_timer->expires) {
670 next_timer->expires--;
671 if (!next_timer->expires)
672 mark_bh(TIMER_BH);
673 } else {
674 lost_ticks++;
675 mark_bh(TIMER_BH);
676 }
677 }
678 sti();
679 }
680
681 asmlinkage int sys_alarm(long seconds)
682 {
683 struct itimerval it_new, it_old;
684
685 it_new.it_interval.tv_sec = it_new.it_interval.tv_usec = 0;
686 it_new.it_value.tv_sec = seconds;
687 it_new.it_value.tv_usec = 0;
688 _setitimer(ITIMER_REAL, &it_new, &it_old);
689 return(it_old.it_value.tv_sec + (it_old.it_value.tv_usec / 1000000));
690 }
691
692 asmlinkage int sys_getpid(void)
693 {
694 return current->pid;
695 }
696
697 asmlinkage int sys_getppid(void)
698 {
699 return current->p_opptr->pid;
700 }
701
702 asmlinkage int sys_getuid(void)
703 {
704 return current->uid;
705 }
706
707 asmlinkage int sys_geteuid(void)
708 {
709 return current->euid;
710 }
711
712 asmlinkage int sys_getgid(void)
713 {
714 return current->gid;
715 }
716
717 asmlinkage int sys_getegid(void)
718 {
719 return current->egid;
720 }
721
722 asmlinkage int sys_nice(long increment)
723 {
724 int newprio;
725
726 if (increment < 0 && !suser())
727 return -EPERM;
728 newprio = current->priority - increment;
729 if (newprio < 1)
730 newprio = 1;
731 if (newprio > 35)
732 newprio = 35;
733 current->priority = newprio;
734 return 0;
735 }
736
737 static void show_task(int nr,struct task_struct * p)
738 {
739 static char * stat_nam[] = { "R", "S", "D", "Z", "T", "W" };
740
741 printk("%-8s %3d ", p->comm, (p == current) ? -nr : nr);
742 if (((unsigned) p->state) < sizeof(stat_nam)/sizeof(char *))
743 printk(stat_nam[p->state]);
744 else
745 printk(" ");
746
747 printk(" %08lX ", ((unsigned long *)p->tss.esp)[2]);
748 printk("%5lu %5d %6d ",
749 p->tss.esp - p->kernel_stack_page, p->pid, p->p_pptr->pid);
750 if (p->p_cptr)
751 printk("%5d ", p->p_cptr->pid);
752 else
753 printk(" ");
754 if (p->p_ysptr)
755 printk("%7d", p->p_ysptr->pid);
756 else
757 printk(" ");
758 if (p->p_osptr)
759 printk(" %5d\n", p->p_osptr->pid);
760 else
761 printk("\n");
762 }
763
764 void show_state(void)
765 {
766 int i;
767
768 printk(" free sibling\n");
769 printk(" task PC stack pid father child younger older\n");
770 for (i=0 ; i<NR_TASKS ; i++)
771 if (task[i])
772 show_task(i,task[i]);
773 }
774
775 void sched_init(void)
776 {
777 int i;
778 struct desc_struct * p;
779
780 bh_base[TIMER_BH].routine = timer_bh;
781 if (sizeof(struct sigaction) != 16)
782 panic("Struct sigaction MUST be 16 bytes");
783 set_tss_desc(gdt+FIRST_TSS_ENTRY,&init_task.tss);
784 set_ldt_desc(gdt+FIRST_LDT_ENTRY,&default_ldt,1);
785 set_system_gate(0x80,&system_call);
786 p = gdt+2+FIRST_TSS_ENTRY;
787 for(i=1 ; i<NR_TASKS ; i++) {
788 task[i] = NULL;
789 p->a=p->b=0;
790 p++;
791 p->a=p->b=0;
792 p++;
793 }
794
795 __asm__("pushfl ; andl $0xffffbfff,(%esp) ; popfl");
796 load_TR(0);
797 load_ldt(0);
798 outb_p(0x34,0x43);
799 outb_p(LATCH & 0xff , 0x40);
800 outb(LATCH >> 8 , 0x40);
801 if (request_irq(TIMER_IRQ,(void (*)(int)) do_timer)!=0)
802 panic("Could not allocate timer IRQ!");
803 }