root/kernel/sched.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. add_to_runqueue
  2. del_from_runqueue
  3. wake_up_process
  4. process_timeout
  5. schedule
  6. sys_pause
  7. wake_up
  8. wake_up_interruptible
  9. __down
  10. __sleep_on
  11. interruptible_sleep_on
  12. sleep_on
  13. add_timer
  14. del_timer
  15. count_active_tasks
  16. calc_load
  17. second_overflow
  18. timer_bh
  19. tqueue_bh
  20. immediate_bh
  21. do_timer
  22. sys_alarm
  23. sys_getpid
  24. sys_getppid
  25. sys_getuid
  26. sys_geteuid
  27. sys_getgid
  28. sys_getegid
  29. sys_nice
  30. show_task
  31. show_state
  32. sched_init

   1 /*
   2  *  linux/kernel/sched.c
   3  *
   4  *  Copyright (C) 1991, 1992  Linus Torvalds
   5  */
   6 
   7 /*
   8  * 'sched.c' is the main kernel file. It contains scheduling primitives
   9  * (sleep_on, wakeup, schedule etc) as well as a number of simple system
  10  * call functions (type getpid(), which just extracts a field from
  11  * current-task
  12  */
  13 
  14 #include <linux/config.h>
  15 #include <linux/signal.h>
  16 #include <linux/sched.h>
  17 #include <linux/timer.h>
  18 #include <linux/kernel.h>
  19 #include <linux/kernel_stat.h>
  20 #include <linux/fdreg.h>
  21 #include <linux/errno.h>
  22 #include <linux/time.h>
  23 #include <linux/ptrace.h>
  24 #include <linux/delay.h>
  25 #include <linux/interrupt.h>
  26 #include <linux/tqueue.h>
  27 #include <linux/resource.h>
  28 #include <linux/mm.h>
  29 
  30 #include <asm/system.h>
  31 #include <asm/io.h>
  32 #include <asm/segment.h>
  33 #include <asm/pgtable.h>
  34 
  35 #include <linux/timex.h>
  36 
  37 /*
  38  * kernel variables
  39  */
  40 long tick = 1000000 / HZ;               /* timer interrupt period */
  41 volatile struct timeval xtime;          /* The current time */
  42 int tickadj = 500/HZ;                   /* microsecs */
  43 
  44 DECLARE_TASK_QUEUE(tq_timer);
  45 DECLARE_TASK_QUEUE(tq_immediate);
  46 DECLARE_TASK_QUEUE(tq_scheduler);
  47 
  48 /*
  49  * phase-lock loop variables
  50  */
  51 int time_state = TIME_BAD;     /* clock synchronization status */
  52 int time_status = STA_UNSYNC;   /* clock status bits */
  53 long time_offset = 0;           /* time adjustment (us) */
  54 long time_constant = 0;         /* pll time constant */
  55 long time_tolerance = MAXFREQ;  /* frequency tolerance (ppm) */
  56 long time_precision = 1;        /* clock precision (us) */
  57 long time_maxerror = 0x70000000;/* maximum error */
  58 long time_esterror = 0x70000000;/* estimated error */
  59 long time_phase = 0;            /* phase offset (scaled us) */
  60 long time_freq = 0;             /* frequency offset (scaled ppm) */
  61 long time_adj = 0;              /* tick adjust (scaled 1 / HZ) */
  62 long time_reftime = 0;          /* time at last adjustment (s) */
  63 
  64 long time_adjust = 0;
  65 long time_adjust_step = 0;
  66 
  67 int need_resched = 0;
  68 unsigned long event = 0;
  69 
  70 extern int _setitimer(int, struct itimerval *, struct itimerval *);
  71 unsigned long * prof_buffer = NULL;
  72 unsigned long prof_len = 0;
  73 unsigned long prof_shift = 0;
  74 
  75 #define _S(nr) (1<<((nr)-1))
  76 
  77 extern void mem_use(void);
  78 
  79 static unsigned long init_kernel_stack[1024] = { STACK_MAGIC, };
  80 unsigned long init_user_stack[1024] = { STACK_MAGIC, };
  81 static struct vm_area_struct init_mmap = INIT_MMAP;
  82 static struct fs_struct init_fs = INIT_FS;
  83 static struct files_struct init_files = INIT_FILES;
  84 static struct signal_struct init_signals = INIT_SIGNALS;
  85 
  86 struct mm_struct init_mm = INIT_MM;
  87 struct task_struct init_task = INIT_TASK;
  88 
  89 unsigned long volatile jiffies=0;
  90 
  91 struct task_struct *current = &init_task;
  92 struct task_struct *last_task_used_math = NULL;
  93 
  94 struct task_struct * task[NR_TASKS] = {&init_task, };
  95 
  96 struct kernel_stat kstat = { 0 };
  97 
  98 static inline void add_to_runqueue(struct task_struct * p)
     /* [previous][next][first][last][top][bottom][index][help] */
  99 {
 100 #if 1   /* sanity tests */
 101         if (p->next_run || p->prev_run) {
 102                 printk("task already on run-queue\n");
 103                 return;
 104         }
 105 #endif
 106         if (p->counter > current->counter + 3)
 107                 need_resched = 1;
 108         nr_running++;
 109         (p->next_run = init_task.next_run)->prev_run = p;
 110         p->prev_run = &init_task;
 111         init_task.next_run = p;
 112 }
 113 
 114 static inline void del_from_runqueue(struct task_struct * p)
     /* [previous][next][first][last][top][bottom][index][help] */
 115 {
 116         struct task_struct *next = p->next_run;
 117         struct task_struct *prev = p->prev_run;
 118 
 119 #if 1   /* sanity tests */
 120         if (!next || !prev) {
 121                 printk("task not on run-queue\n");
 122                 return;
 123         }
 124 #endif
 125         if (p == &init_task) {
 126                 static int nr = 0;
 127                 if (nr < 5) {
 128                         nr++;
 129                         printk("idle task may not sleep\n");
 130                 }
 131                 return;
 132         }
 133         nr_running--;
 134         next->prev_run = prev;
 135         prev->next_run = next;
 136         p->next_run = NULL;
 137         p->prev_run = NULL;
 138 }
 139 
 140 /*
 141  * Wake up a process. Put it on the run-queue if it's not
 142  * already there.  The "current" process is always on the
 143  * run-queue (except when the actual re-schedule is in
 144  * progress), and as such you're allowed to do the simpler
 145  * "current->state = TASK_RUNNING" to mark yourself runnable
 146  * without the overhead of this.
 147  */
 148 inline void wake_up_process(struct task_struct * p)
     /* [previous][next][first][last][top][bottom][index][help] */
 149 {
 150         unsigned long flags;
 151 
 152         save_flags(flags);
 153         cli();
 154         p->state = TASK_RUNNING;
 155         if (!p->next_run)
 156                 add_to_runqueue(p);
 157         restore_flags(flags);
 158 }
 159 
 160 static void process_timeout(unsigned long __data)
     /* [previous][next][first][last][top][bottom][index][help] */
 161 {
 162         struct task_struct * p = (struct task_struct *) __data;
 163 
 164         p->timeout = 0;
 165         wake_up_process(p);
 166 }
 167 
 168 /*
 169  *  'schedule()' is the scheduler function. It's a very simple and nice
 170  * scheduler: it's not perfect, but certainly works for most things.
 171  *
 172  * The goto is "interesting".
 173  *
 174  *   NOTE!!  Task 0 is the 'idle' task, which gets called when no other
 175  * tasks can run. It can not be killed, and it cannot sleep. The 'state'
 176  * information in task[0] is never used.
 177  */
 178 asmlinkage void schedule(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 179 {
 180         int c;
 181         struct task_struct * p;
 182         struct task_struct * next;
 183         unsigned long timeout = 0;
 184 
 185 /* check alarm, wake up any interruptible tasks that have got a signal */
 186 
 187         if (intr_count) {
 188                 printk("Aiee: scheduling in interrupt\n");
 189                 return;
 190         }
 191         run_task_queue(&tq_scheduler);
 192 
 193         need_resched = 0;
 194         cli();
 195         switch (current->state) {
 196                 case TASK_INTERRUPTIBLE:
 197                         if (current->signal & ~current->blocked)
 198                                 goto makerunnable;
 199                         timeout = current->timeout;
 200                         if (timeout && (timeout <= jiffies)) {
 201                                 current->timeout = 0;
 202                                 timeout = 0;
 203                 makerunnable:
 204                                 current->state = TASK_RUNNING;
 205                                 break;
 206                         }
 207                 default:
 208                         del_from_runqueue(current);
 209                 case TASK_RUNNING:
 210         }
 211         p = init_task.next_run;
 212         sti();
 213 
 214 /*
 215  * Note! there may appear new tasks on the run-queue during this, as
 216  * interrupts are enabled. However, they will be put on front of the
 217  * list, so our list starting at "p" is essentially fixed.
 218  */
 219 /* this is the scheduler proper: */
 220         c = -1000;
 221         next = &init_task;
 222         while (p != &init_task) {
 223                 if (p->counter > c)
 224                         c = p->counter, next = p;
 225                 p = p->next_run;
 226         }
 227 
 228         /* if all runnable processes have "counter == 0", re-calculate counters */
 229         if (!c) {
 230                 for_each_task(p)
 231                         p->counter = (p->counter >> 1) + p->priority;
 232         }
 233         if (current != next) {
 234                 struct timer_list timer;
 235 
 236                 kstat.context_swtch++;
 237                 if (timeout) {
 238                         init_timer(&timer);
 239                         timer.expires = timeout;
 240                         timer.data = (unsigned long) current;
 241                         timer.function = process_timeout;
 242                         add_timer(&timer);
 243                 }
 244                 switch_to(next);
 245                 if (timeout)
 246                         del_timer(&timer);
 247         }
 248 }
 249 
 250 asmlinkage int sys_pause(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 251 {
 252         current->state = TASK_INTERRUPTIBLE;
 253         schedule();
 254         return -ERESTARTNOHAND;
 255 }
 256 
 257 /*
 258  * wake_up doesn't wake up stopped processes - they have to be awakened
 259  * with signals or similar.
 260  *
 261  * Note that this doesn't need cli-sti pairs: interrupts may not change
 262  * the wait-queue structures directly, but only call wake_up() to wake
 263  * a process. The process itself must remove the queue once it has woken.
 264  */
 265 void wake_up(struct wait_queue **q)
     /* [previous][next][first][last][top][bottom][index][help] */
 266 {
 267         struct wait_queue *tmp;
 268         struct task_struct * p;
 269 
 270         if (!q || !(tmp = *q))
 271                 return;
 272         do {
 273                 if ((p = tmp->task) != NULL) {
 274                         if ((p->state == TASK_UNINTERRUPTIBLE) ||
 275                             (p->state == TASK_INTERRUPTIBLE))
 276                                 wake_up_process(p);
 277                 }
 278                 if (!tmp->next) {
 279                         printk("wait_queue is bad (eip = %p)\n",
 280                                 __builtin_return_address(0));
 281                         printk("        q = %p\n",q);
 282                         printk("       *q = %p\n",*q);
 283                         printk("      tmp = %p\n",tmp);
 284                         break;
 285                 }
 286                 tmp = tmp->next;
 287         } while (tmp != *q);
 288 }
 289 
 290 void wake_up_interruptible(struct wait_queue **q)
     /* [previous][next][first][last][top][bottom][index][help] */
 291 {
 292         struct wait_queue *tmp;
 293         struct task_struct * p;
 294 
 295         if (!q || !(tmp = *q))
 296                 return;
 297         do {
 298                 if ((p = tmp->task) != NULL) {
 299                         if (p->state == TASK_INTERRUPTIBLE)
 300                                 wake_up_process(p);
 301                 }
 302                 if (!tmp->next) {
 303                         printk("wait_queue is bad (eip = %p)\n",
 304                                 __builtin_return_address(0));
 305                         printk("        q = %p\n",q);
 306                         printk("       *q = %p\n",*q);
 307                         printk("      tmp = %p\n",tmp);
 308                         break;
 309                 }
 310                 tmp = tmp->next;
 311         } while (tmp != *q);
 312 }
 313 
 314 void __down(struct semaphore * sem)
     /* [previous][next][first][last][top][bottom][index][help] */
 315 {
 316         struct wait_queue wait = { current, NULL };
 317         add_wait_queue(&sem->wait, &wait);
 318         current->state = TASK_UNINTERRUPTIBLE;
 319         while (sem->count <= 0) {
 320                 schedule();
 321                 current->state = TASK_UNINTERRUPTIBLE;
 322         }
 323         current->state = TASK_RUNNING;
 324         remove_wait_queue(&sem->wait, &wait);
 325 }
 326 
 327 static inline void __sleep_on(struct wait_queue **p, int state)
     /* [previous][next][first][last][top][bottom][index][help] */
 328 {
 329         unsigned long flags;
 330         struct wait_queue wait = { current, NULL };
 331 
 332         if (!p)
 333                 return;
 334         if (current == task[0])
 335                 panic("task[0] trying to sleep");
 336         current->state = state;
 337         add_wait_queue(p, &wait);
 338         save_flags(flags);
 339         sti();
 340         schedule();
 341         remove_wait_queue(p, &wait);
 342         restore_flags(flags);
 343 }
 344 
 345 void interruptible_sleep_on(struct wait_queue **p)
     /* [previous][next][first][last][top][bottom][index][help] */
 346 {
 347         __sleep_on(p,TASK_INTERRUPTIBLE);
 348 }
 349 
 350 void sleep_on(struct wait_queue **p)
     /* [previous][next][first][last][top][bottom][index][help] */
 351 {
 352         __sleep_on(p,TASK_UNINTERRUPTIBLE);
 353 }
 354 
 355 /*
 356  * The head for the timer-list has a "expires" field of MAX_UINT,
 357  * and the sorting routine counts on this..
 358  */
 359 static struct timer_list timer_head = { &timer_head, &timer_head, ~0, 0, NULL };
 360 #define SLOW_BUT_DEBUGGING_TIMERS 1
 361 
 362 void add_timer(struct timer_list * timer)
     /* [previous][next][first][last][top][bottom][index][help] */
 363 {
 364         unsigned long flags;
 365         struct timer_list *p;
 366 
 367 #if SLOW_BUT_DEBUGGING_TIMERS
 368         if (timer->next || timer->prev) {
 369                 printk("add_timer() called with non-zero list from %p\n",
 370                         __builtin_return_address(0));
 371                 return;
 372         }
 373 #endif
 374         p = &timer_head;
 375         save_flags(flags);
 376         cli();
 377         do {
 378                 p = p->next;
 379         } while (timer->expires > p->expires);
 380         timer->next = p;
 381         timer->prev = p->prev;
 382         p->prev = timer;
 383         timer->prev->next = timer;
 384         restore_flags(flags);
 385 }
 386 
 387 int del_timer(struct timer_list * timer)
     /* [previous][next][first][last][top][bottom][index][help] */
 388 {
 389         unsigned long flags;
 390 #if SLOW_BUT_DEBUGGING_TIMERS
 391         struct timer_list * p;
 392 
 393         p = &timer_head;
 394         save_flags(flags);
 395         cli();
 396         while ((p = p->next) != &timer_head) {
 397                 if (p == timer) {
 398                         timer->next->prev = timer->prev;
 399                         timer->prev->next = timer->next;
 400                         timer->next = timer->prev = NULL;
 401                         restore_flags(flags);
 402                         return 1;
 403                 }
 404         }
 405         if (timer->next || timer->prev)
 406                 printk("del_timer() called from %p with timer not initialized\n",
 407                         __builtin_return_address(0));
 408         restore_flags(flags);
 409         return 0;
 410 #else   
 411         save_flags(flags);
 412         cli();
 413         if (timer->next) {
 414                 timer->next->prev = timer->prev;
 415                 timer->prev->next = timer->next;
 416                 timer->next = timer->prev = NULL;
 417                 restore_flags(flags);
 418                 return 1;
 419         }
 420         restore_flags(flags);
 421         return 0;
 422 #endif
 423 }
 424 
 425 unsigned long timer_active = 0;
 426 struct timer_struct timer_table[32];
 427 
 428 /*
 429  * Hmm.. Changed this, as the GNU make sources (load.c) seems to
 430  * imply that avenrun[] is the standard name for this kind of thing.
 431  * Nothing else seems to be standardized: the fractional size etc
 432  * all seem to differ on different machines.
 433  */
 434 unsigned long avenrun[3] = { 0,0,0 };
 435 
 436 /*
 437  * Nr of active tasks - counted in fixed-point numbers
 438  */
 439 static unsigned long count_active_tasks(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 440 {
 441         struct task_struct **p;
 442         unsigned long nr = 0;
 443 
 444         for(p = &LAST_TASK; p > &FIRST_TASK; --p)
 445                 if (*p && ((*p)->state == TASK_RUNNING ||
 446                            (*p)->state == TASK_UNINTERRUPTIBLE ||
 447                            (*p)->state == TASK_SWAPPING))
 448                         nr += FIXED_1;
 449         return nr;
 450 }
 451 
 452 static inline void calc_load(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 453 {
 454         unsigned long active_tasks; /* fixed-point */
 455         static int count = LOAD_FREQ;
 456 
 457         if (count-- > 0)
 458                 return;
 459         count = LOAD_FREQ;
 460         active_tasks = count_active_tasks();
 461         CALC_LOAD(avenrun[0], EXP_1, active_tasks);
 462         CALC_LOAD(avenrun[1], EXP_5, active_tasks);
 463         CALC_LOAD(avenrun[2], EXP_15, active_tasks);
 464 }
 465 
 466 /*
 467  * this routine handles the overflow of the microsecond field
 468  *
 469  * The tricky bits of code to handle the accurate clock support
 470  * were provided by Dave Mills (Mills@UDEL.EDU) of NTP fame.
 471  * They were originally developed for SUN and DEC kernels.
 472  * All the kudos should go to Dave for this stuff.
 473  *
 474  */
 475 static void second_overflow(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 476 {
 477     long ltemp;
 478 
 479     /* Bump the maxerror field */
 480     time_maxerror = (0x70000000-time_maxerror <
 481                      time_tolerance >> SHIFT_USEC) ?
 482         0x70000000 : (time_maxerror + (time_tolerance >> SHIFT_USEC));
 483 
 484     /*
 485      * Leap second processing. If in leap-insert state at
 486      * the end of the day, the system clock is set back one
 487      * second; if in leap-delete state, the system clock is
 488      * set ahead one second. The microtime() routine or
 489      * external clock driver will insure that reported time
 490      * is always monotonic. The ugly divides should be
 491      * replaced.
 492      */
 493     switch (time_state) {
 494 
 495     case TIME_OK:
 496         if (time_status & STA_INS)
 497             time_state = TIME_INS;
 498         else if (time_status & STA_DEL)
 499             time_state = TIME_DEL;
 500         break;
 501 
 502     case TIME_INS:
 503         if (xtime.tv_sec % 86400 == 0) {
 504             xtime.tv_sec--;
 505             time_state = TIME_OOP;
 506             printk("Clock: inserting leap second 23:59:60 UTC\n");
 507         }
 508         break;
 509 
 510     case TIME_DEL:
 511         if ((xtime.tv_sec + 1) % 86400 == 0) {
 512             xtime.tv_sec++;
 513             time_state = TIME_WAIT;
 514             printk("Clock: deleting leap second 23:59:59 UTC\n");
 515         }
 516         break;
 517 
 518     case TIME_OOP:
 519         time_state = TIME_WAIT;
 520         break;
 521 
 522     case TIME_WAIT:
 523         if (!(time_status & (STA_INS | STA_DEL)))
 524             time_state = TIME_OK;
 525     }
 526 
 527     /*
 528      * Compute the phase adjustment for the next second. In
 529      * PLL mode, the offset is reduced by a fixed factor
 530      * times the time constant. In FLL mode the offset is
 531      * used directly. In either mode, the maximum phase
 532      * adjustment for each second is clamped so as to spread
 533      * the adjustment over not more than the number of
 534      * seconds between updates.
 535      */
 536     if (time_offset < 0) {
 537         ltemp = -time_offset;
 538         if (!(time_status & STA_FLL))
 539             ltemp >>= SHIFT_KG + time_constant;
 540         if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE)
 541             ltemp = (MAXPHASE / MINSEC) <<
 542                 SHIFT_UPDATE;
 543         time_offset += ltemp;
 544         time_adj = -ltemp << (SHIFT_SCALE - SHIFT_HZ -
 545                               SHIFT_UPDATE);
 546     } else {
 547         ltemp = time_offset;
 548         if (!(time_status & STA_FLL))
 549             ltemp >>= SHIFT_KG + time_constant;
 550         if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE)
 551             ltemp = (MAXPHASE / MINSEC) <<
 552                 SHIFT_UPDATE;
 553         time_offset -= ltemp;
 554         time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ -
 555                              SHIFT_UPDATE);
 556     }
 557 
 558     /*
 559      * Compute the frequency estimate and additional phase
 560      * adjustment due to frequency error for the next
 561      * second. When the PPS signal is engaged, gnaw on the
 562      * watchdog counter and update the frequency computed by
 563      * the pll and the PPS signal.
 564      */
 565     pps_valid++;
 566     if (pps_valid == PPS_VALID) {
 567         pps_jitter = MAXTIME;
 568         pps_stabil = MAXFREQ;
 569         time_status &= ~(STA_PPSSIGNAL | STA_PPSJITTER |
 570                          STA_PPSWANDER | STA_PPSERROR);
 571     }
 572     ltemp = time_freq + pps_freq;
 573     if (ltemp < 0)
 574         time_adj -= -ltemp >>
 575             (SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE);
 576     else
 577         time_adj += ltemp >>
 578             (SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE);
 579 
 580     /* compensate for (HZ==100) != 128. Add 25% to get 125; => only 3% error */
 581     if (time_adj < 0)
 582         time_adj -= -time_adj >> 2;
 583     else
 584         time_adj += time_adj >> 2;
 585 }
 586 
 587 /*
 588  * disregard lost ticks for now.. We don't care enough.
 589  */
 590 static void timer_bh(void * unused)
     /* [previous][next][first][last][top][bottom][index][help] */
 591 {
 592         unsigned long mask;
 593         struct timer_struct *tp;
 594         struct timer_list * timer;
 595 
 596         cli();
 597         while ((timer = timer_head.next) != &timer_head && timer->expires <= jiffies) {
 598                 void (*fn)(unsigned long) = timer->function;
 599                 unsigned long data = timer->data;
 600                 timer->next->prev = timer->prev;
 601                 timer->prev->next = timer->next;
 602                 timer->next = timer->prev = NULL;
 603                 sti();
 604                 fn(data);
 605                 cli();
 606         }
 607         sti();
 608         
 609         for (mask = 1, tp = timer_table+0 ; mask ; tp++,mask += mask) {
 610                 if (mask > timer_active)
 611                         break;
 612                 if (!(mask & timer_active))
 613                         continue;
 614                 if (tp->expires > jiffies)
 615                         continue;
 616                 timer_active &= ~mask;
 617                 tp->fn();
 618                 sti();
 619         }
 620 }
 621 
 622 void tqueue_bh(void * unused)
     /* [previous][next][first][last][top][bottom][index][help] */
 623 {
 624         run_task_queue(&tq_timer);
 625 }
 626 
 627 void immediate_bh(void * unused)
     /* [previous][next][first][last][top][bottom][index][help] */
 628 {
 629         run_task_queue(&tq_immediate);
 630 }
 631 
 632 void do_timer(struct pt_regs * regs)
     /* [previous][next][first][last][top][bottom][index][help] */
 633 {
 634         unsigned long mask;
 635         struct timer_struct *tp;
 636         long ltemp, psecs;
 637 
 638         /* Advance the phase, once it gets to one microsecond, then
 639          * advance the tick more.
 640          */
 641         time_phase += time_adj;
 642         if (time_phase <= -FINEUSEC) {
 643                 ltemp = -time_phase >> SHIFT_SCALE;
 644                 time_phase += ltemp << SHIFT_SCALE;
 645                 xtime.tv_usec += tick + time_adjust_step - ltemp;
 646         }
 647         else if (time_phase >= FINEUSEC) {
 648                 ltemp = time_phase >> SHIFT_SCALE;
 649                 time_phase -= ltemp << SHIFT_SCALE;
 650                 xtime.tv_usec += tick + time_adjust_step + ltemp;
 651         } else
 652                 xtime.tv_usec += tick + time_adjust_step;
 653 
 654         if (time_adjust)
 655         {
 656             /* We are doing an adjtime thing. 
 657              *
 658              * Modify the value of the tick for next time.
 659              * Note that a positive delta means we want the clock
 660              * to run fast. This means that the tick should be bigger
 661              *
 662              * Limit the amount of the step for *next* tick to be
 663              * in the range -tickadj .. +tickadj
 664              */
 665              if (time_adjust > tickadj)
 666                time_adjust_step = tickadj;
 667              else if (time_adjust < -tickadj)
 668                time_adjust_step = -tickadj;
 669              else
 670                time_adjust_step = time_adjust;
 671              
 672             /* Reduce by this step the amount of time left  */
 673             time_adjust -= time_adjust_step;
 674         }
 675         else
 676             time_adjust_step = 0;
 677 
 678         if (xtime.tv_usec >= 1000000) {
 679             xtime.tv_usec -= 1000000;
 680             xtime.tv_sec++;
 681             second_overflow();
 682         }
 683 
 684         jiffies++;
 685         calc_load();
 686         if (user_mode(regs)) {
 687                 current->utime++;
 688                 if (current != task[0]) {
 689                         if (current->priority < 15)
 690                                 kstat.cpu_nice++;
 691                         else
 692                                 kstat.cpu_user++;
 693                 }
 694                 /* Update ITIMER_VIRT for current task if not in a system call */
 695                 if (current->it_virt_value && !(--current->it_virt_value)) {
 696                         current->it_virt_value = current->it_virt_incr;
 697                         send_sig(SIGVTALRM,current,1);
 698                 }
 699         } else {
 700                 current->stime++;
 701                 if(current != task[0])
 702                         kstat.cpu_system++;
 703                 if (prof_buffer && current != task[0]) {
 704                         extern int _stext;
 705                         unsigned long ip = instruction_pointer(regs);
 706                         ip -= (unsigned long) &_stext;
 707                         ip >>= prof_shift;
 708                         if (ip < prof_len)
 709                                 prof_buffer[ip]++;
 710                 }
 711         }
 712         /*
 713          * check the cpu time limit on the process.
 714          */
 715         if ((current->rlim[RLIMIT_CPU].rlim_max != RLIM_INFINITY) &&
 716             (((current->stime + current->utime) / HZ) >= current->rlim[RLIMIT_CPU].rlim_max))
 717                 send_sig(SIGKILL, current, 1);
 718         if ((current->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) &&
 719             (((current->stime + current->utime) % HZ) == 0)) {
 720                 psecs = (current->stime + current->utime) / HZ;
 721                 /* send when equal */
 722                 if (psecs == current->rlim[RLIMIT_CPU].rlim_cur)
 723                         send_sig(SIGXCPU, current, 1);
 724                 /* and every five seconds thereafter. */
 725                 else if ((psecs > current->rlim[RLIMIT_CPU].rlim_cur) &&
 726                         ((psecs - current->rlim[RLIMIT_CPU].rlim_cur) % 5) == 0)
 727                         send_sig(SIGXCPU, current, 1);
 728         }
 729 
 730         if (current != task[0] && 0 > --current->counter) {
 731                 current->counter = 0;
 732                 need_resched = 1;
 733         }
 734         /* Update ITIMER_PROF for the current task */
 735         if (current->it_prof_value && !(--current->it_prof_value)) {
 736                 current->it_prof_value = current->it_prof_incr;
 737                 send_sig(SIGPROF,current,1);
 738         }
 739         for (mask = 1, tp = timer_table+0 ; mask ; tp++,mask += mask) {
 740                 if (mask > timer_active)
 741                         break;
 742                 if (!(mask & timer_active))
 743                         continue;
 744                 if (tp->expires > jiffies)
 745                         continue;
 746                 mark_bh(TIMER_BH);
 747         }
 748         cli();
 749         if (timer_head.next->expires <= jiffies)
 750                 mark_bh(TIMER_BH);
 751         if (tq_timer != &tq_last)
 752                 mark_bh(TQUEUE_BH);
 753         sti();
 754 }
 755 
 756 asmlinkage unsigned int sys_alarm(unsigned int seconds)
     /* [previous][next][first][last][top][bottom][index][help] */
 757 {
 758         struct itimerval it_new, it_old;
 759         unsigned int oldalarm;
 760 
 761         it_new.it_interval.tv_sec = it_new.it_interval.tv_usec = 0;
 762         it_new.it_value.tv_sec = seconds;
 763         it_new.it_value.tv_usec = 0;
 764         _setitimer(ITIMER_REAL, &it_new, &it_old);
 765         oldalarm = it_old.it_value.tv_sec;
 766         /* ehhh.. We can't return 0 if we have an alarm pending.. */
 767         /* And we'd better return too much than too little anyway */
 768         if (it_old.it_value.tv_usec)
 769                 oldalarm++;
 770         return oldalarm;
 771 }
 772 
 773 asmlinkage int sys_getpid(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 774 {
 775         return current->pid;
 776 }
 777 
 778 asmlinkage int sys_getppid(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 779 {
 780         return current->p_opptr->pid;
 781 }
 782 
 783 asmlinkage int sys_getuid(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 784 {
 785         return current->uid;
 786 }
 787 
 788 asmlinkage int sys_geteuid(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 789 {
 790         return current->euid;
 791 }
 792 
 793 asmlinkage int sys_getgid(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 794 {
 795         return current->gid;
 796 }
 797 
 798 asmlinkage int sys_getegid(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 799 {
 800         return current->egid;
 801 }
 802 
 803 asmlinkage int sys_nice(long increment)
     /* [previous][next][first][last][top][bottom][index][help] */
 804 {
 805         int newprio;
 806 
 807         if (increment < 0 && !suser())
 808                 return -EPERM;
 809         newprio = current->priority - increment;
 810         if (newprio < 1)
 811                 newprio = 1;
 812         if (newprio > 35)
 813                 newprio = 35;
 814         current->priority = newprio;
 815         return 0;
 816 }
 817 
 818 static void show_task(int nr,struct task_struct * p)
     /* [previous][next][first][last][top][bottom][index][help] */
 819 {
 820         unsigned long free;
 821         static const char * stat_nam[] = { "R", "S", "D", "Z", "T", "W" };
 822 
 823         printk("%-8s %3d ", p->comm, (p == current) ? -nr : nr);
 824         if (((unsigned) p->state) < sizeof(stat_nam)/sizeof(char *))
 825                 printk(stat_nam[p->state]);
 826         else
 827                 printk(" ");
 828 #if ((~0UL) == 0xffffffff)
 829         if (p == current)
 830                 printk(" current  ");
 831         else
 832                 printk(" %08lX ", thread_saved_pc(&p->tss));
 833 #else
 834         if (p == current)
 835                 printk("   current task   ");
 836         else
 837                 printk(" %016lx ", thread_saved_pc(&p->tss));
 838 #endif
 839         for (free = 1; free < PAGE_SIZE/sizeof(long) ; free++) {
 840                 if (((unsigned long *)p->kernel_stack_page)[free])
 841                         break;
 842         }
 843         printk("%5lu %5d %6d ", free*sizeof(long), p->pid, p->p_pptr->pid);
 844         if (p->p_cptr)
 845                 printk("%5d ", p->p_cptr->pid);
 846         else
 847                 printk("      ");
 848         if (p->p_ysptr)
 849                 printk("%7d", p->p_ysptr->pid);
 850         else
 851                 printk("       ");
 852         if (p->p_osptr)
 853                 printk(" %5d\n", p->p_osptr->pid);
 854         else
 855                 printk("\n");
 856 }
 857 
 858 void show_state(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 859 {
 860         int i;
 861 
 862 #if ((~0UL) == 0xffffffff)
 863         printk("\n"
 864                "                         free                        sibling\n");
 865         printk("  task             PC    stack   pid father child younger older\n");
 866 #else
 867         printk("\n"
 868                "                                 free                        sibling\n");
 869         printk("  task                 PC        stack   pid father child younger older\n");
 870 #endif
 871         for (i=0 ; i<NR_TASKS ; i++)
 872                 if (task[i])
 873                         show_task(i,task[i]);
 874 }
 875 
 876 void sched_init(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 877 {
 878         bh_base[TIMER_BH].routine = timer_bh;
 879         bh_base[TQUEUE_BH].routine = tqueue_bh;
 880         bh_base[IMMEDIATE_BH].routine = immediate_bh;
 881         enable_bh(TIMER_BH);
 882         enable_bh(TQUEUE_BH);
 883         enable_bh(IMMEDIATE_BH);
 884 }

/* [previous][next][first][last][top][bottom][index][help] */