root/kernel/sched.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. math_state_restore
  2. math_emulate
  3. schedule
  4. sys_pause
  5. wake_up
  6. wake_up_interruptible
  7. __down
  8. __sleep_on
  9. interruptible_sleep_on
  10. sleep_on
  11. add_timer
  12. del_timer
  13. count_active_tasks
  14. calc_load
  15. second_overflow
  16. timer_bh
  17. tqueue_bh
  18. do_timer
  19. sys_alarm
  20. sys_getpid
  21. sys_getppid
  22. sys_getuid
  23. sys_geteuid
  24. sys_getgid
  25. sys_getegid
  26. sys_nice
  27. show_task
  28. show_state
  29. sched_init

   1 /*
   2  *  linux/kernel/sched.c
   3  *
   4  *  Copyright (C) 1991, 1992  Linus Torvalds
   5  */
   6 
   7 /*
   8  * 'sched.c' is the main kernel file. It contains scheduling primitives
   9  * (sleep_on, wakeup, schedule etc) as well as a number of simple system
  10  * call functions (type getpid(), which just extracts a field from
  11  * current-task
  12  */
  13 
  14 #include <linux/config.h>
  15 #include <linux/signal.h>
  16 #include <linux/sched.h>
  17 #include <linux/timer.h>
  18 #include <linux/kernel.h>
  19 #include <linux/kernel_stat.h>
  20 #include <linux/fdreg.h>
  21 #include <linux/errno.h>
  22 #include <linux/time.h>
  23 #include <linux/ptrace.h>
  24 #include <linux/segment.h>
  25 #include <linux/delay.h>
  26 #include <linux/interrupt.h>
  27 #include <linux/tqueue.h>
  28 
  29 #include <asm/system.h>
  30 #include <asm/io.h>
  31 #include <asm/segment.h>
  32 
  33 #define TIMER_IRQ 0
  34 
  35 #include <linux/timex.h>
  36 
  37 /*
  38  * kernel variables
  39  */
  40 long tick = 1000000 / HZ;               /* timer interrupt period */
  41 volatile struct timeval xtime;          /* The current time */
  42 int tickadj = 500/HZ;                   /* microsecs */
  43 
  44 DECLARE_TASK_QUEUE(tq_timer);
  45 
  46 /*
  47  * phase-lock loop variables
  48  */
  49 int time_status = TIME_BAD;     /* clock synchronization status */
  50 long time_offset = 0;           /* time adjustment (us) */
  51 long time_constant = 0;         /* pll time constant */
  52 long time_tolerance = MAXFREQ;  /* frequency tolerance (ppm) */
  53 long time_precision = 1;        /* clock precision (us) */
  54 long time_maxerror = 0x70000000;/* maximum error */
  55 long time_esterror = 0x70000000;/* estimated error */
  56 long time_phase = 0;            /* phase offset (scaled us) */
  57 long time_freq = 0;             /* frequency offset (scaled ppm) */
  58 long time_adj = 0;              /* tick adjust (scaled 1 / HZ) */
  59 long time_reftime = 0;          /* time at last adjustment (s) */
  60 
  61 long time_adjust = 0;
  62 long time_adjust_step = 0;
  63 
  64 int need_resched = 0;
  65 
  66 /*
  67  * Tell us the machine setup..
  68  */
  69 int hard_math = 0;              /* set by boot/head.S */
  70 int x86 = 0;                    /* set by boot/head.S to 3 or 4 */
  71 int ignore_irq13 = 0;           /* set if exception 16 works */
  72 int wp_works_ok = 0;            /* set if paging hardware honours WP */ 
  73 
  74 /*
  75  * Bus types ..
  76  */
  77 int EISA_bus = 0;
  78 
  79 extern int _setitimer(int, struct itimerval *, struct itimerval *);
  80 unsigned long * prof_buffer = NULL;
  81 unsigned long prof_len = 0;
  82 
  83 #define _S(nr) (1<<((nr)-1))
  84 
  85 extern void mem_use(void);
  86 
  87 extern int timer_interrupt(void);
  88 asmlinkage int system_call(void);
  89 
  90 static unsigned long init_kernel_stack[1024] = { STACK_MAGIC, };
  91 struct task_struct init_task = INIT_TASK;
  92 
  93 unsigned long volatile jiffies=0;
  94 
  95 struct task_struct *current = &init_task;
  96 struct task_struct *last_task_used_math = NULL;
  97 
  98 struct task_struct * task[NR_TASKS] = {&init_task, };
  99 
 100 long user_stack [ PAGE_SIZE>>2 ] = { STACK_MAGIC, };
 101 
 102 struct {
 103         long * a;
 104         short b;
 105         } stack_start = { & user_stack [PAGE_SIZE>>2] , KERNEL_DS };
 106 
 107 struct kernel_stat kstat = { 0 };
 108 
 109 /*
 110  *  'math_state_restore()' saves the current math information in the
 111  * old math state array, and gets the new ones from the current task
 112  *
 113  * Careful.. There are problems with IBM-designed IRQ13 behaviour.
 114  * Don't touch unless you *really* know how it works.
 115  */
 116 asmlinkage void math_state_restore(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 117 {
 118         __asm__ __volatile__("clts");
 119         if (last_task_used_math == current)
 120                 return;
 121         timer_table[COPRO_TIMER].expires = jiffies+50;
 122         timer_active |= 1<<COPRO_TIMER; 
 123         if (last_task_used_math)
 124                 __asm__("fnsave %0":"=m" (last_task_used_math->tss.i387));
 125         else
 126                 __asm__("fnclex");
 127         last_task_used_math = current;
 128         if (current->used_math) {
 129                 __asm__("frstor %0": :"m" (current->tss.i387));
 130         } else {
 131                 __asm__("fninit");
 132                 current->used_math=1;
 133         }
 134         timer_active &= ~(1<<COPRO_TIMER);
 135 }
 136 
 137 #ifndef CONFIG_MATH_EMULATION
 138 
 139 asmlinkage void math_emulate(long arg)
     /* [previous][next][first][last][top][bottom][index][help] */
 140 {
 141   printk("math-emulation not enabled and no coprocessor found.\n");
 142   printk("killing %s.\n",current->comm);
 143   send_sig(SIGFPE,current,1);
 144   schedule();
 145 }
 146 
 147 #endif /* CONFIG_MATH_EMULATION */
 148 
 149 unsigned long itimer_ticks = 0;
 150 unsigned long itimer_next = ~0;
 151 static unsigned long lost_ticks = 0;
 152 
 153 /*
 154  *  'schedule()' is the scheduler function. It's a very simple and nice
 155  * scheduler: it's not perfect, but certainly works for most things.
 156  * The one thing you might take a look at is the signal-handler code here.
 157  *
 158  *   NOTE!!  Task 0 is the 'idle' task, which gets called when no other
 159  * tasks can run. It can not be killed, and it cannot sleep. The 'state'
 160  * information in task[0] is never used.
 161  *
 162  * The "confuse_gcc" goto is used only to get better assembly code..
 163  * Djikstra probably hates me.
 164  */
 165 asmlinkage void schedule(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 166 {
 167         int c;
 168         struct task_struct * p;
 169         struct task_struct * next;
 170         unsigned long ticks;
 171 
 172 /* check alarm, wake up any interruptible tasks that have got a signal */
 173 
 174         if (intr_count) {
 175                 printk("Aiee: scheduling in interrupt\n");
 176                 intr_count = 0;
 177         }
 178         cli();
 179         ticks = itimer_ticks;
 180         itimer_ticks = 0;
 181         itimer_next = ~0;
 182         sti();
 183         need_resched = 0;
 184         p = &init_task;
 185         for (;;) {
 186                 if ((p = p->next_task) == &init_task)
 187                         goto confuse_gcc1;
 188                 if (ticks && p->it_real_value) {
 189                         if (p->it_real_value <= ticks) {
 190                                 send_sig(SIGALRM, p, 1);
 191                                 if (!p->it_real_incr) {
 192                                         p->it_real_value = 0;
 193                                         goto end_itimer;
 194                                 }
 195                                 do {
 196                                         p->it_real_value += p->it_real_incr;
 197                                 } while (p->it_real_value <= ticks);
 198                         }
 199                         p->it_real_value -= ticks;
 200                         if (p->it_real_value < itimer_next)
 201                                 itimer_next = p->it_real_value;
 202                 }
 203 end_itimer:
 204                 if (p->state != TASK_INTERRUPTIBLE)
 205                         continue;
 206                 if (p->signal & ~p->blocked) {
 207                         p->state = TASK_RUNNING;
 208                         continue;
 209                 }
 210                 if (p->timeout && p->timeout <= jiffies) {
 211                         p->timeout = 0;
 212                         p->state = TASK_RUNNING;
 213                 }
 214         }
 215 confuse_gcc1:
 216 
 217 /* this is the scheduler proper: */
 218 #if 0
 219         /* give processes that go to sleep a bit higher priority.. */
 220         /* This depends on the values for TASK_XXX */
 221         /* This gives smoother scheduling for some things, but */
 222         /* can be very unfair under some circumstances, so.. */
 223         if (TASK_UNINTERRUPTIBLE >= (unsigned) current->state &&
 224             current->counter < current->priority*2) {
 225                 ++current->counter;
 226         }
 227 #endif
 228         c = -1000;
 229         next = p = &init_task;
 230         for (;;) {
 231                 if ((p = p->next_task) == &init_task)
 232                         goto confuse_gcc2;
 233                 if (p->state == TASK_RUNNING && p->counter > c)
 234                         c = p->counter, next = p;
 235         }
 236 confuse_gcc2:
 237         if (!c) {
 238                 for_each_task(p)
 239                         p->counter = (p->counter >> 1) + p->priority;
 240         }
 241         if (current == next)
 242                 return;
 243         kstat.context_swtch++;
 244         switch_to(next);
 245         /* Now maybe reload the debug registers */
 246         if(current->debugreg[7]){
 247                 loaddebug(0);
 248                 loaddebug(1);
 249                 loaddebug(2);
 250                 loaddebug(3);
 251                 loaddebug(6);
 252         };
 253 }
 254 
 255 asmlinkage int sys_pause(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 256 {
 257         current->state = TASK_INTERRUPTIBLE;
 258         schedule();
 259         return -ERESTARTNOHAND;
 260 }
 261 
 262 /*
 263  * wake_up doesn't wake up stopped processes - they have to be awakened
 264  * with signals or similar.
 265  *
 266  * Note that this doesn't need cli-sti pairs: interrupts may not change
 267  * the wait-queue structures directly, but only call wake_up() to wake
 268  * a process. The process itself must remove the queue once it has woken.
 269  */
 270 void wake_up(struct wait_queue **q)
     /* [previous][next][first][last][top][bottom][index][help] */
 271 {
 272         struct wait_queue *tmp;
 273         struct task_struct * p;
 274 
 275         if (!q || !(tmp = *q))
 276                 return;
 277         do {
 278                 if ((p = tmp->task) != NULL) {
 279                         if ((p->state == TASK_UNINTERRUPTIBLE) ||
 280                             (p->state == TASK_INTERRUPTIBLE)) {
 281                                 p->state = TASK_RUNNING;
 282                                 if (p->counter > current->counter)
 283                                         need_resched = 1;
 284                         }
 285                 }
 286                 if (!tmp->next) {
 287                         printk("wait_queue is bad (eip = %08lx)\n",((unsigned long *) q)[-1]);
 288                         printk("        q = %p\n",q);
 289                         printk("       *q = %p\n",*q);
 290                         printk("      tmp = %p\n",tmp);
 291                         break;
 292                 }
 293                 tmp = tmp->next;
 294         } while (tmp != *q);
 295 }
 296 
 297 void wake_up_interruptible(struct wait_queue **q)
     /* [previous][next][first][last][top][bottom][index][help] */
 298 {
 299         struct wait_queue *tmp;
 300         struct task_struct * p;
 301 
 302         if (!q || !(tmp = *q))
 303                 return;
 304         do {
 305                 if ((p = tmp->task) != NULL) {
 306                         if (p->state == TASK_INTERRUPTIBLE) {
 307                                 p->state = TASK_RUNNING;
 308                                 if (p->counter > current->counter)
 309                                         need_resched = 1;
 310                         }
 311                 }
 312                 if (!tmp->next) {
 313                         printk("wait_queue is bad (eip = %08lx)\n",((unsigned long *) q)[-1]);
 314                         printk("        q = %p\n",q);
 315                         printk("       *q = %p\n",*q);
 316                         printk("      tmp = %p\n",tmp);
 317                         break;
 318                 }
 319                 tmp = tmp->next;
 320         } while (tmp != *q);
 321 }
 322 
 323 void __down(struct semaphore * sem)
     /* [previous][next][first][last][top][bottom][index][help] */
 324 {
 325         struct wait_queue wait = { current, NULL };
 326         add_wait_queue(&sem->wait, &wait);
 327         current->state = TASK_UNINTERRUPTIBLE;
 328         while (sem->count <= 0) {
 329                 schedule();
 330                 current->state = TASK_UNINTERRUPTIBLE;
 331         }
 332         current->state = TASK_RUNNING;
 333         remove_wait_queue(&sem->wait, &wait);
 334 }
 335 
 336 static inline void __sleep_on(struct wait_queue **p, int state)
     /* [previous][next][first][last][top][bottom][index][help] */
 337 {
 338         unsigned long flags;
 339         struct wait_queue wait = { current, NULL };
 340 
 341         if (!p)
 342                 return;
 343         if (current == task[0])
 344                 panic("task[0] trying to sleep");
 345         current->state = state;
 346         add_wait_queue(p, &wait);
 347         save_flags(flags);
 348         sti();
 349         schedule();
 350         remove_wait_queue(p, &wait);
 351         restore_flags(flags);
 352 }
 353 
 354 void interruptible_sleep_on(struct wait_queue **p)
     /* [previous][next][first][last][top][bottom][index][help] */
 355 {
 356         __sleep_on(p,TASK_INTERRUPTIBLE);
 357 }
 358 
 359 void sleep_on(struct wait_queue **p)
     /* [previous][next][first][last][top][bottom][index][help] */
 360 {
 361         __sleep_on(p,TASK_UNINTERRUPTIBLE);
 362 }
 363 
 364 static struct timer_list * next_timer = NULL;
 365 
 366 void add_timer(struct timer_list * timer)
     /* [previous][next][first][last][top][bottom][index][help] */
 367 {
 368         unsigned long flags;
 369         struct timer_list ** p;
 370 
 371         if (!timer)
 372                 return;
 373         timer->next = NULL;
 374         p = &next_timer;
 375         save_flags(flags);
 376         cli();
 377         while (*p) {
 378                 if ((*p)->expires > timer->expires) {
 379                         (*p)->expires -= timer->expires;
 380                         timer->next = *p;
 381                         break;
 382                 }
 383                 timer->expires -= (*p)->expires;
 384                 p = &(*p)->next;
 385         }
 386         *p = timer;
 387         restore_flags(flags);
 388 }
 389 
 390 int del_timer(struct timer_list * timer)
     /* [previous][next][first][last][top][bottom][index][help] */
 391 {
 392         unsigned long flags;
 393         unsigned long expires = 0;
 394         struct timer_list **p;
 395 
 396         p = &next_timer;
 397         save_flags(flags);
 398         cli();
 399         while (*p) {
 400                 if (*p == timer) {
 401                         if ((*p = timer->next) != NULL)
 402                                 (*p)->expires += timer->expires;
 403                         timer->expires += expires;
 404                         restore_flags(flags);
 405                         return 1;
 406                 }
 407                 expires += (*p)->expires;
 408                 p = &(*p)->next;
 409         }
 410         restore_flags(flags);
 411         return 0;
 412 }
 413 
 414 unsigned long timer_active = 0;
 415 struct timer_struct timer_table[32];
 416 
 417 /*
 418  * Hmm.. Changed this, as the GNU make sources (load.c) seems to
 419  * imply that avenrun[] is the standard name for this kind of thing.
 420  * Nothing else seems to be standardized: the fractional size etc
 421  * all seem to differ on different machines.
 422  */
 423 unsigned long avenrun[3] = { 0,0,0 };
 424 
 425 /*
 426  * Nr of active tasks - counted in fixed-point numbers
 427  */
 428 static unsigned long count_active_tasks(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 429 {
 430         struct task_struct **p;
 431         unsigned long nr = 0;
 432 
 433         for(p = &LAST_TASK; p > &FIRST_TASK; --p)
 434                 if (*p && ((*p)->state == TASK_RUNNING ||
 435                            (*p)->state == TASK_UNINTERRUPTIBLE ||
 436                            (*p)->state == TASK_SWAPPING))
 437                         nr += FIXED_1;
 438         return nr;
 439 }
 440 
 441 static inline void calc_load(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 442 {
 443         unsigned long active_tasks; /* fixed-point */
 444         static int count = LOAD_FREQ;
 445 
 446         if (count-- > 0)
 447                 return;
 448         count = LOAD_FREQ;
 449         active_tasks = count_active_tasks();
 450         CALC_LOAD(avenrun[0], EXP_1, active_tasks);
 451         CALC_LOAD(avenrun[1], EXP_5, active_tasks);
 452         CALC_LOAD(avenrun[2], EXP_15, active_tasks);
 453 }
 454 
 455 /*
 456  * this routine handles the overflow of the microsecond field
 457  *
 458  * The tricky bits of code to handle the accurate clock support
 459  * were provided by Dave Mills (Mills@UDEL.EDU) of NTP fame.
 460  * They were originally developed for SUN and DEC kernels.
 461  * All the kudos should go to Dave for this stuff.
 462  *
 463  * These were ported to Linux by Philip Gladstone.
 464  */
 465 static void second_overflow(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 466 {
 467         long ltemp;
 468         /* last time the cmos clock got updated */
 469         static long last_rtc_update=0;
 470         extern int set_rtc_mmss(unsigned long);
 471 
 472         /* Bump the maxerror field */
 473         time_maxerror = (0x70000000-time_maxerror < time_tolerance) ?
 474           0x70000000 : (time_maxerror + time_tolerance);
 475 
 476         /* Run the PLL */
 477         if (time_offset < 0) {
 478                 ltemp = (-(time_offset+1) >> (SHIFT_KG + time_constant)) + 1;
 479                 time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
 480                 time_offset += (time_adj * HZ) >> (SHIFT_SCALE - SHIFT_UPDATE);
 481                 time_adj = - time_adj;
 482         } else if (time_offset > 0) {
 483                 ltemp = ((time_offset-1) >> (SHIFT_KG + time_constant)) + 1;
 484                 time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
 485                 time_offset -= (time_adj * HZ) >> (SHIFT_SCALE - SHIFT_UPDATE);
 486         } else {
 487                 time_adj = 0;
 488         }
 489 
 490         time_adj += (time_freq >> (SHIFT_KF + SHIFT_HZ - SHIFT_SCALE))
 491             + FINETUNE;
 492 
 493         /* Handle the leap second stuff */
 494         switch (time_status) {
 495                 case TIME_INS:
 496                 /* ugly divide should be replaced */
 497                 if (xtime.tv_sec % 86400 == 0) {
 498                         xtime.tv_sec--; /* !! */
 499                         time_status = TIME_OOP;
 500                         printk("Clock: inserting leap second 23:59:60 GMT\n");
 501                 }
 502                 break;
 503 
 504                 case TIME_DEL:
 505                 /* ugly divide should be replaced */
 506                 if (xtime.tv_sec % 86400 == 86399) {
 507                         xtime.tv_sec++;
 508                         time_status = TIME_OK;
 509                         printk("Clock: deleting leap second 23:59:59 GMT\n");
 510                 }
 511                 break;
 512 
 513                 case TIME_OOP:
 514                 time_status = TIME_OK;
 515                 break;
 516         }
 517         if (xtime.tv_sec > last_rtc_update + 660)
 518           if (set_rtc_mmss(xtime.tv_sec) == 0)
 519             last_rtc_update = xtime.tv_sec;
 520           else
 521             last_rtc_update = xtime.tv_sec - 600; /* do it again in one min */
 522 }
 523 
 524 /*
 525  * disregard lost ticks for now.. We don't care enough.
 526  */
 527 static void timer_bh(void * unused)
     /* [previous][next][first][last][top][bottom][index][help] */
 528 {
 529         unsigned long mask;
 530         struct timer_struct *tp;
 531 
 532         cli();
 533         while (next_timer && next_timer->expires == 0) {
 534                 void (*fn)(unsigned long) = next_timer->function;
 535                 unsigned long data = next_timer->data;
 536                 next_timer = next_timer->next;
 537                 sti();
 538                 fn(data);
 539                 cli();
 540         }
 541         sti();
 542         
 543         for (mask = 1, tp = timer_table+0 ; mask ; tp++,mask += mask) {
 544                 if (mask > timer_active)
 545                         break;
 546                 if (!(mask & timer_active))
 547                         continue;
 548                 if (tp->expires > jiffies)
 549                         continue;
 550                 timer_active &= ~mask;
 551                 tp->fn();
 552                 sti();
 553         }
 554 }
 555 
 556 void tqueue_bh(void * unused)
     /* [previous][next][first][last][top][bottom][index][help] */
 557 {
 558         run_task_queue(&tq_timer);
 559 }
 560 
 561 /*
 562  * The int argument is really a (struct pt_regs *), in case the
 563  * interrupt wants to know from where it was called. The timer
 564  * irq uses this to decide if it should update the user or system
 565  * times.
 566  */
 567 static void do_timer(struct pt_regs * regs)
     /* [previous][next][first][last][top][bottom][index][help] */
 568 {
 569         unsigned long mask;
 570         struct timer_struct *tp;
 571 
 572         long ltemp;
 573 
 574         /* Advance the phase, once it gets to one microsecond, then
 575          * advance the tick more.
 576          */
 577         time_phase += time_adj;
 578         if (time_phase < -FINEUSEC) {
 579                 ltemp = -time_phase >> SHIFT_SCALE;
 580                 time_phase += ltemp << SHIFT_SCALE;
 581                 xtime.tv_usec += tick + time_adjust_step - ltemp;
 582         }
 583         else if (time_phase > FINEUSEC) {
 584                 ltemp = time_phase >> SHIFT_SCALE;
 585                 time_phase -= ltemp << SHIFT_SCALE;
 586                 xtime.tv_usec += tick + time_adjust_step + ltemp;
 587         } else
 588                 xtime.tv_usec += tick + time_adjust_step;
 589 
 590         if (time_adjust)
 591         {
 592             /* We are doing an adjtime thing. 
 593              *
 594              * Modify the value of the tick for next time.
 595              * Note that a positive delta means we want the clock
 596              * to run fast. This means that the tick should be bigger
 597              *
 598              * Limit the amount of the step for *next* tick to be
 599              * in the range -tickadj .. +tickadj
 600              */
 601              if (time_adjust > tickadj)
 602                time_adjust_step = tickadj;
 603              else if (time_adjust < -tickadj)
 604                time_adjust_step = -tickadj;
 605              else
 606                time_adjust_step = time_adjust;
 607              
 608             /* Reduce by this step the amount of time left  */
 609             time_adjust -= time_adjust_step;
 610         }
 611         else
 612             time_adjust_step = 0;
 613 
 614         if (xtime.tv_usec >= 1000000) {
 615             xtime.tv_usec -= 1000000;
 616             xtime.tv_sec++;
 617             second_overflow();
 618         }
 619 
 620         jiffies++;
 621         calc_load();
 622         if ((VM_MASK & regs->eflags) || (3 & regs->cs)) {
 623                 current->utime++;
 624                 if (current != task[0]) {
 625                         if (current->priority < 15)
 626                                 kstat.cpu_nice++;
 627                         else
 628                                 kstat.cpu_user++;
 629                 }
 630                 /* Update ITIMER_VIRT for current task if not in a system call */
 631                 if (current->it_virt_value && !(--current->it_virt_value)) {
 632                         current->it_virt_value = current->it_virt_incr;
 633                         send_sig(SIGVTALRM,current,1);
 634                 }
 635         } else {
 636                 current->stime++;
 637                 if(current != task[0])
 638                         kstat.cpu_system++;
 639 #ifdef CONFIG_PROFILE
 640                 if (prof_buffer && current != task[0]) {
 641                         unsigned long eip = regs->eip;
 642                         eip >>= 2;
 643                         if (eip < prof_len)
 644                                 prof_buffer[eip]++;
 645                 }
 646 #endif
 647         }
 648         if (current != task[0] && 0 > --current->counter) {
 649                 current->counter = 0;
 650                 need_resched = 1;
 651         }
 652         /* Update ITIMER_PROF for the current task */
 653         if (current->it_prof_value && !(--current->it_prof_value)) {
 654                 current->it_prof_value = current->it_prof_incr;
 655                 send_sig(SIGPROF,current,1);
 656         }
 657         for (mask = 1, tp = timer_table+0 ; mask ; tp++,mask += mask) {
 658                 if (mask > timer_active)
 659                         break;
 660                 if (!(mask & timer_active))
 661                         continue;
 662                 if (tp->expires > jiffies)
 663                         continue;
 664                 mark_bh(TIMER_BH);
 665         }
 666         cli();
 667         itimer_ticks++;
 668         if (itimer_ticks > itimer_next)
 669                 need_resched = 1;
 670         if (next_timer) {
 671                 if (next_timer->expires) {
 672                         next_timer->expires--;
 673                         if (!next_timer->expires)
 674                                 mark_bh(TIMER_BH);
 675                 } else {
 676                         lost_ticks++;
 677                         mark_bh(TIMER_BH);
 678                 }
 679         }
 680         if (tq_timer != &tq_last)
 681                 mark_bh(TQUEUE_BH);
 682         sti();
 683 }
 684 
 685 asmlinkage int sys_alarm(long seconds)
     /* [previous][next][first][last][top][bottom][index][help] */
 686 {
 687         struct itimerval it_new, it_old;
 688 
 689         it_new.it_interval.tv_sec = it_new.it_interval.tv_usec = 0;
 690         it_new.it_value.tv_sec = seconds;
 691         it_new.it_value.tv_usec = 0;
 692         _setitimer(ITIMER_REAL, &it_new, &it_old);
 693         return(it_old.it_value.tv_sec + (it_old.it_value.tv_usec / 1000000));
 694 }
 695 
 696 asmlinkage int sys_getpid(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 697 {
 698         return current->pid;
 699 }
 700 
 701 asmlinkage int sys_getppid(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 702 {
 703         return current->p_opptr->pid;
 704 }
 705 
 706 asmlinkage int sys_getuid(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 707 {
 708         return current->uid;
 709 }
 710 
 711 asmlinkage int sys_geteuid(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 712 {
 713         return current->euid;
 714 }
 715 
 716 asmlinkage int sys_getgid(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 717 {
 718         return current->gid;
 719 }
 720 
 721 asmlinkage int sys_getegid(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 722 {
 723         return current->egid;
 724 }
 725 
 726 asmlinkage int sys_nice(long increment)
     /* [previous][next][first][last][top][bottom][index][help] */
 727 {
 728         int newprio;
 729 
 730         if (increment < 0 && !suser())
 731                 return -EPERM;
 732         newprio = current->priority - increment;
 733         if (newprio < 1)
 734                 newprio = 1;
 735         if (newprio > 35)
 736                 newprio = 35;
 737         current->priority = newprio;
 738         return 0;
 739 }
 740 
 741 static void show_task(int nr,struct task_struct * p)
     /* [previous][next][first][last][top][bottom][index][help] */
 742 {
 743         unsigned long free;
 744         static char * stat_nam[] = { "R", "S", "D", "Z", "T", "W" };
 745 
 746         printk("%-8s %3d ", p->comm, (p == current) ? -nr : nr);
 747         if (((unsigned) p->state) < sizeof(stat_nam)/sizeof(char *))
 748                 printk(stat_nam[p->state]);
 749         else
 750                 printk(" ");
 751         if (p == current)
 752                 printk(" current  ");
 753         else
 754                 printk(" %08lX ", ((unsigned long *)p->tss.esp)[3]);
 755         for (free = 1; free < 1024 ; free++) {
 756                 if (((unsigned long *)p->kernel_stack_page)[free])
 757                         break;
 758         }
 759         printk("%5lu %5d %6d ", free << 2, p->pid, p->p_pptr->pid);
 760         if (p->p_cptr)
 761                 printk("%5d ", p->p_cptr->pid);
 762         else
 763                 printk("      ");
 764         if (p->p_ysptr)
 765                 printk("%7d", p->p_ysptr->pid);
 766         else
 767                 printk("       ");
 768         if (p->p_osptr)
 769                 printk(" %5d\n", p->p_osptr->pid);
 770         else
 771                 printk("\n");
 772 }
 773 
 774 void show_state(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 775 {
 776         int i;
 777 
 778         printk("                         free                        sibling\n");
 779         printk("  task             PC    stack   pid father child younger older\n");
 780         for (i=0 ; i<NR_TASKS ; i++)
 781                 if (task[i])
 782                         show_task(i,task[i]);
 783 }
 784 
 785 void sched_init(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 786 {
 787         int i;
 788         struct desc_struct * p;
 789 
 790         bh_base[TIMER_BH].routine = timer_bh;
 791         bh_base[TQUEUE_BH].routine = tqueue_bh;
 792         if (sizeof(struct sigaction) != 16)
 793                 panic("Struct sigaction MUST be 16 bytes");
 794         set_tss_desc(gdt+FIRST_TSS_ENTRY,&init_task.tss);
 795         set_ldt_desc(gdt+FIRST_LDT_ENTRY,&default_ldt,1);
 796         set_system_gate(0x80,&system_call);
 797         p = gdt+2+FIRST_TSS_ENTRY;
 798         for(i=1 ; i<NR_TASKS ; i++) {
 799                 task[i] = NULL;
 800                 p->a=p->b=0;
 801                 p++;
 802                 p->a=p->b=0;
 803                 p++;
 804         }
 805 /* Clear NT, so that we won't have troubles with that later on */
 806         __asm__("pushfl ; andl $0xffffbfff,(%esp) ; popfl");
 807         load_TR(0);
 808         load_ldt(0);
 809         outb_p(0x34,0x43);              /* binary, mode 2, LSB/MSB, ch 0 */
 810         outb_p(LATCH & 0xff , 0x40);    /* LSB */
 811         outb(LATCH >> 8 , 0x40);        /* MSB */
 812         if (request_irq(TIMER_IRQ,(void (*)(int)) do_timer)!=0)
 813                 panic("Could not allocate timer IRQ!");
 814 }

/* [previous][next][first][last][top][bottom][index][help] */