root/kernel/sched.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. add_to_runqueue
  2. del_from_runqueue
  3. wake_up_process
  4. process_timeout
  5. schedule
  6. sys_pause
  7. wake_up
  8. wake_up_interruptible
  9. __down
  10. __sleep_on
  11. interruptible_sleep_on
  12. sleep_on
  13. add_timer
  14. del_timer
  15. count_active_tasks
  16. calc_load
  17. second_overflow
  18. timer_bh
  19. tqueue_bh
  20. immediate_bh
  21. do_timer
  22. sys_alarm
  23. sys_getpid
  24. sys_getppid
  25. sys_getuid
  26. sys_geteuid
  27. sys_getgid
  28. sys_getegid
  29. sys_nice
  30. show_task
  31. show_state
  32. sched_init

   1 /*
   2  *  linux/kernel/sched.c
   3  *
   4  *  Copyright (C) 1991, 1992  Linus Torvalds
   5  */
   6 
   7 /*
   8  * 'sched.c' is the main kernel file. It contains scheduling primitives
   9  * (sleep_on, wakeup, schedule etc) as well as a number of simple system
  10  * call functions (type getpid(), which just extracts a field from
  11  * current-task
  12  */
  13 
  14 #include <linux/config.h>
  15 #include <linux/signal.h>
  16 #include <linux/sched.h>
  17 #include <linux/timer.h>
  18 #include <linux/kernel.h>
  19 #include <linux/kernel_stat.h>
  20 #include <linux/fdreg.h>
  21 #include <linux/errno.h>
  22 #include <linux/time.h>
  23 #include <linux/ptrace.h>
  24 #include <linux/delay.h>
  25 #include <linux/interrupt.h>
  26 #include <linux/tqueue.h>
  27 #include <linux/resource.h>
  28 #include <linux/mm.h>
  29 
  30 #include <asm/system.h>
  31 #include <asm/io.h>
  32 #include <asm/segment.h>
  33 #include <asm/pgtable.h>
  34 
  35 #define TIMER_IRQ 0
  36 
  37 #include <linux/timex.h>
  38 
  39 /*
  40  * kernel variables
  41  */
  42 long tick = 1000000 / HZ;               /* timer interrupt period */
  43 volatile struct timeval xtime;          /* The current time */
  44 int tickadj = 500/HZ;                   /* microsecs */
  45 
  46 DECLARE_TASK_QUEUE(tq_timer);
  47 DECLARE_TASK_QUEUE(tq_immediate);
  48 DECLARE_TASK_QUEUE(tq_scheduler);
  49 
  50 /*
  51  * phase-lock loop variables
  52  */
  53 int time_state = TIME_BAD;     /* clock synchronization status */
  54 int time_status = STA_UNSYNC;   /* clock status bits */
  55 long time_offset = 0;           /* time adjustment (us) */
  56 long time_constant = 0;         /* pll time constant */
  57 long time_tolerance = MAXFREQ;  /* frequency tolerance (ppm) */
  58 long time_precision = 1;        /* clock precision (us) */
  59 long time_maxerror = 0x70000000;/* maximum error */
  60 long time_esterror = 0x70000000;/* estimated error */
  61 long time_phase = 0;            /* phase offset (scaled us) */
  62 long time_freq = 0;             /* frequency offset (scaled ppm) */
  63 long time_adj = 0;              /* tick adjust (scaled 1 / HZ) */
  64 long time_reftime = 0;          /* time at last adjustment (s) */
  65 
  66 long time_adjust = 0;
  67 long time_adjust_step = 0;
  68 
  69 int need_resched = 0;
  70 unsigned long event = 0;
  71 
  72 extern int _setitimer(int, struct itimerval *, struct itimerval *);
  73 unsigned long * prof_buffer = NULL;
  74 unsigned long prof_len = 0;
  75 unsigned long prof_shift = 0;
  76 
  77 #define _S(nr) (1<<((nr)-1))
  78 
  79 extern void mem_use(void);
  80 
  81 extern int timer_interrupt(void);
  82  
  83 static unsigned long init_kernel_stack[1024] = { STACK_MAGIC, };
  84 unsigned long init_user_stack[1024] = { STACK_MAGIC, };
  85 static struct vm_area_struct init_mmap = INIT_MMAP;
  86 static struct fs_struct init_fs = INIT_FS;
  87 static struct files_struct init_files = INIT_FILES;
  88 static struct signal_struct init_signals = INIT_SIGNALS;
  89 
  90 struct mm_struct init_mm = INIT_MM;
  91 struct task_struct init_task = INIT_TASK;
  92 
  93 unsigned long volatile jiffies=0;
  94 
  95 struct task_struct *current = &init_task;
  96 struct task_struct *last_task_used_math = NULL;
  97 
  98 struct task_struct * task[NR_TASKS] = {&init_task, };
  99 
 100 struct kernel_stat kstat = { 0 };
 101 
 102 static inline void add_to_runqueue(struct task_struct * p)
     /* [previous][next][first][last][top][bottom][index][help] */
 103 {
 104 #if 1   /* sanity tests */
 105         if (p->next_run || p->prev_run) {
 106                 printk("task already on run-queue\n");
 107                 return;
 108         }
 109 #endif
 110         if (p->counter > current->counter + 3)
 111                 need_resched = 1;
 112         nr_running++;
 113         (p->next_run = init_task.next_run)->prev_run = p;
 114         p->prev_run = &init_task;
 115         init_task.next_run = p;
 116 }
 117 
 118 static inline void del_from_runqueue(struct task_struct * p)
     /* [previous][next][first][last][top][bottom][index][help] */
 119 {
 120         struct task_struct *next = p->next_run;
 121         struct task_struct *prev = p->prev_run;
 122 
 123 #if 1   /* sanity tests */
 124         if (!next || !prev) {
 125                 printk("task not on run-queue\n");
 126                 return;
 127         }
 128 #endif
 129         if (p == &init_task) {
 130                 static int nr = 0;
 131                 if (nr < 5) {
 132                         nr++;
 133                         printk("idle task may not sleep\n");
 134                 }
 135                 return;
 136         }
 137         nr_running--;
 138         next->prev_run = prev;
 139         prev->next_run = next;
 140         p->next_run = NULL;
 141         p->prev_run = NULL;
 142 }
 143 
 144 /*
 145  * Wake up a process. Put it on the run-queue if it's not
 146  * already there.  The "current" process is always on the
 147  * run-queue (except when the actual re-schedule is in
 148  * progress), and as such you're allowed to do the simpler
 149  * "current->state = TASK_RUNNING" to mark yourself runnable
 150  * without the overhead of this.
 151  */
 152 inline void wake_up_process(struct task_struct * p)
     /* [previous][next][first][last][top][bottom][index][help] */
 153 {
 154         unsigned long flags;
 155 
 156         save_flags(flags);
 157         cli();
 158         p->state = TASK_RUNNING;
 159         if (!p->next_run)
 160                 add_to_runqueue(p);
 161         restore_flags(flags);
 162 }
 163 
 164 static void process_timeout(unsigned long __data)
     /* [previous][next][first][last][top][bottom][index][help] */
 165 {
 166         struct task_struct * p = (struct task_struct *) __data;
 167 
 168         p->timeout = 0;
 169         wake_up_process(p);
 170 }
 171 
 172 /*
 173  *  'schedule()' is the scheduler function. It's a very simple and nice
 174  * scheduler: it's not perfect, but certainly works for most things.
 175  *
 176  * The goto is "interesting".
 177  *
 178  *   NOTE!!  Task 0 is the 'idle' task, which gets called when no other
 179  * tasks can run. It can not be killed, and it cannot sleep. The 'state'
 180  * information in task[0] is never used.
 181  */
 182 asmlinkage void schedule(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 183 {
 184         int c;
 185         struct task_struct * p;
 186         struct task_struct * next;
 187         unsigned long timeout = 0;
 188 
 189 /* check alarm, wake up any interruptible tasks that have got a signal */
 190 
 191         if (intr_count) {
 192                 printk("Aiee: scheduling in interrupt\n");
 193                 return;
 194         }
 195         run_task_queue(&tq_scheduler);
 196 
 197         need_resched = 0;
 198         cli();
 199         switch (current->state) {
 200                 case TASK_INTERRUPTIBLE:
 201                         if (current->signal & ~current->blocked)
 202                                 goto makerunnable;
 203                         timeout = current->timeout;
 204                         if (timeout && (timeout <= jiffies)) {
 205                                 current->timeout = 0;
 206                                 timeout = 0;
 207                 makerunnable:
 208                                 current->state = TASK_RUNNING;
 209                                 break;
 210                         }
 211                 default:
 212                         del_from_runqueue(current);
 213                 case TASK_RUNNING:
 214         }
 215         p = init_task.next_run;
 216         sti();
 217 
 218 /*
 219  * Note! there may appear new tasks on the run-queue during this, as
 220  * interrupts are enabled. However, they will be put on front of the
 221  * list, so our list starting at "p" is essentially fixed.
 222  */
 223 /* this is the scheduler proper: */
 224         c = -1000;
 225         next = &init_task;
 226         while (p != &init_task) {
 227                 if (p->counter > c)
 228                         c = p->counter, next = p;
 229                 p = p->next_run;
 230         }
 231 
 232         /* if all runnable processes have "counter == 0", re-calculate counters */
 233         if (!c) {
 234                 for_each_task(p)
 235                         p->counter = (p->counter >> 1) + p->priority;
 236         }
 237         if (current != next) {
 238                 struct timer_list timer;
 239 
 240                 kstat.context_swtch++;
 241                 if (timeout) {
 242                         init_timer(&timer);
 243                         timer.expires = timeout;
 244                         timer.data = (unsigned long) current;
 245                         timer.function = process_timeout;
 246                         add_timer(&timer);
 247                 }
 248                 switch_to(next);
 249                 if (timeout)
 250                         del_timer(&timer);
 251         }
 252 }
 253 
 254 asmlinkage int sys_pause(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 255 {
 256         current->state = TASK_INTERRUPTIBLE;
 257         schedule();
 258         return -ERESTARTNOHAND;
 259 }
 260 
 261 /*
 262  * wake_up doesn't wake up stopped processes - they have to be awakened
 263  * with signals or similar.
 264  *
 265  * Note that this doesn't need cli-sti pairs: interrupts may not change
 266  * the wait-queue structures directly, but only call wake_up() to wake
 267  * a process. The process itself must remove the queue once it has woken.
 268  */
 269 void wake_up(struct wait_queue **q)
     /* [previous][next][first][last][top][bottom][index][help] */
 270 {
 271         struct wait_queue *tmp;
 272         struct task_struct * p;
 273 
 274         if (!q || !(tmp = *q))
 275                 return;
 276         do {
 277                 if ((p = tmp->task) != NULL) {
 278                         if ((p->state == TASK_UNINTERRUPTIBLE) ||
 279                             (p->state == TASK_INTERRUPTIBLE))
 280                                 wake_up_process(p);
 281                 }
 282                 if (!tmp->next) {
 283                         printk("wait_queue is bad (eip = %p)\n",
 284                                 __builtin_return_address(0));
 285                         printk("        q = %p\n",q);
 286                         printk("       *q = %p\n",*q);
 287                         printk("      tmp = %p\n",tmp);
 288                         break;
 289                 }
 290                 tmp = tmp->next;
 291         } while (tmp != *q);
 292 }
 293 
 294 void wake_up_interruptible(struct wait_queue **q)
     /* [previous][next][first][last][top][bottom][index][help] */
 295 {
 296         struct wait_queue *tmp;
 297         struct task_struct * p;
 298 
 299         if (!q || !(tmp = *q))
 300                 return;
 301         do {
 302                 if ((p = tmp->task) != NULL) {
 303                         if (p->state == TASK_INTERRUPTIBLE)
 304                                 wake_up_process(p);
 305                 }
 306                 if (!tmp->next) {
 307                         printk("wait_queue is bad (eip = %p)\n",
 308                                 __builtin_return_address(0));
 309                         printk("        q = %p\n",q);
 310                         printk("       *q = %p\n",*q);
 311                         printk("      tmp = %p\n",tmp);
 312                         break;
 313                 }
 314                 tmp = tmp->next;
 315         } while (tmp != *q);
 316 }
 317 
 318 void __down(struct semaphore * sem)
     /* [previous][next][first][last][top][bottom][index][help] */
 319 {
 320         struct wait_queue wait = { current, NULL };
 321         add_wait_queue(&sem->wait, &wait);
 322         current->state = TASK_UNINTERRUPTIBLE;
 323         while (sem->count <= 0) {
 324                 schedule();
 325                 current->state = TASK_UNINTERRUPTIBLE;
 326         }
 327         current->state = TASK_RUNNING;
 328         remove_wait_queue(&sem->wait, &wait);
 329 }
 330 
 331 static inline void __sleep_on(struct wait_queue **p, int state)
     /* [previous][next][first][last][top][bottom][index][help] */
 332 {
 333         unsigned long flags;
 334         struct wait_queue wait = { current, NULL };
 335 
 336         if (!p)
 337                 return;
 338         if (current == task[0])
 339                 panic("task[0] trying to sleep");
 340         current->state = state;
 341         add_wait_queue(p, &wait);
 342         save_flags(flags);
 343         sti();
 344         schedule();
 345         remove_wait_queue(p, &wait);
 346         restore_flags(flags);
 347 }
 348 
 349 void interruptible_sleep_on(struct wait_queue **p)
     /* [previous][next][first][last][top][bottom][index][help] */
 350 {
 351         __sleep_on(p,TASK_INTERRUPTIBLE);
 352 }
 353 
 354 void sleep_on(struct wait_queue **p)
     /* [previous][next][first][last][top][bottom][index][help] */
 355 {
 356         __sleep_on(p,TASK_UNINTERRUPTIBLE);
 357 }
 358 
 359 /*
 360  * The head for the timer-list has a "expires" field of MAX_UINT,
 361  * and the sorting routine counts on this..
 362  */
 363 static struct timer_list timer_head = { &timer_head, &timer_head, ~0, 0, NULL };
 364 #define SLOW_BUT_DEBUGGING_TIMERS 1
 365 
 366 void add_timer(struct timer_list * timer)
     /* [previous][next][first][last][top][bottom][index][help] */
 367 {
 368         unsigned long flags;
 369         struct timer_list *p;
 370 
 371 #if SLOW_BUT_DEBUGGING_TIMERS
 372         if (timer->next || timer->prev) {
 373                 printk("add_timer() called with non-zero list from %p\n",
 374                         __builtin_return_address(0));
 375                 return;
 376         }
 377 #endif
 378         p = &timer_head;
 379         save_flags(flags);
 380         cli();
 381         do {
 382                 p = p->next;
 383         } while (timer->expires > p->expires);
 384         timer->next = p;
 385         timer->prev = p->prev;
 386         p->prev = timer;
 387         timer->prev->next = timer;
 388         restore_flags(flags);
 389 }
 390 
 391 int del_timer(struct timer_list * timer)
     /* [previous][next][first][last][top][bottom][index][help] */
 392 {
 393         unsigned long flags;
 394 #if SLOW_BUT_DEBUGGING_TIMERS
 395         struct timer_list * p;
 396 
 397         p = &timer_head;
 398         save_flags(flags);
 399         cli();
 400         while ((p = p->next) != &timer_head) {
 401                 if (p == timer) {
 402                         timer->next->prev = timer->prev;
 403                         timer->prev->next = timer->next;
 404                         timer->next = timer->prev = NULL;
 405                         restore_flags(flags);
 406                         return 1;
 407                 }
 408         }
 409         if (timer->next || timer->prev)
 410                 printk("del_timer() called from %p with timer not initialized\n",
 411                         __builtin_return_address(0));
 412         restore_flags(flags);
 413         return 0;
 414 #else   
 415         save_flags(flags);
 416         cli();
 417         if (timer->next) {
 418                 timer->next->prev = timer->prev;
 419                 timer->prev->next = timer->next;
 420                 timer->next = timer->prev = NULL;
 421                 restore_flags(flags);
 422                 return 1;
 423         }
 424         restore_flags(flags);
 425         return 0;
 426 #endif
 427 }
 428 
 429 unsigned long timer_active = 0;
 430 struct timer_struct timer_table[32];
 431 
 432 /*
 433  * Hmm.. Changed this, as the GNU make sources (load.c) seems to
 434  * imply that avenrun[] is the standard name for this kind of thing.
 435  * Nothing else seems to be standardized: the fractional size etc
 436  * all seem to differ on different machines.
 437  */
 438 unsigned long avenrun[3] = { 0,0,0 };
 439 
 440 /*
 441  * Nr of active tasks - counted in fixed-point numbers
 442  */
 443 static unsigned long count_active_tasks(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 444 {
 445         struct task_struct **p;
 446         unsigned long nr = 0;
 447 
 448         for(p = &LAST_TASK; p > &FIRST_TASK; --p)
 449                 if (*p && ((*p)->state == TASK_RUNNING ||
 450                            (*p)->state == TASK_UNINTERRUPTIBLE ||
 451                            (*p)->state == TASK_SWAPPING))
 452                         nr += FIXED_1;
 453         return nr;
 454 }
 455 
 456 static inline void calc_load(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 457 {
 458         unsigned long active_tasks; /* fixed-point */
 459         static int count = LOAD_FREQ;
 460 
 461         if (count-- > 0)
 462                 return;
 463         count = LOAD_FREQ;
 464         active_tasks = count_active_tasks();
 465         CALC_LOAD(avenrun[0], EXP_1, active_tasks);
 466         CALC_LOAD(avenrun[1], EXP_5, active_tasks);
 467         CALC_LOAD(avenrun[2], EXP_15, active_tasks);
 468 }
 469 
 470 /*
 471  * this routine handles the overflow of the microsecond field
 472  *
 473  * The tricky bits of code to handle the accurate clock support
 474  * were provided by Dave Mills (Mills@UDEL.EDU) of NTP fame.
 475  * They were originally developed for SUN and DEC kernels.
 476  * All the kudos should go to Dave for this stuff.
 477  *
 478  */
 479 static void second_overflow(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 480 {
 481     long ltemp;
 482 
 483     /* Bump the maxerror field */
 484     time_maxerror = (0x70000000-time_maxerror <
 485                      time_tolerance >> SHIFT_USEC) ?
 486         0x70000000 : (time_maxerror + (time_tolerance >> SHIFT_USEC));
 487 
 488     /*
 489      * Leap second processing. If in leap-insert state at
 490      * the end of the day, the system clock is set back one
 491      * second; if in leap-delete state, the system clock is
 492      * set ahead one second. The microtime() routine or
 493      * external clock driver will insure that reported time
 494      * is always monotonic. The ugly divides should be
 495      * replaced.
 496      */
 497     switch (time_state) {
 498 
 499     case TIME_OK:
 500         if (time_status & STA_INS)
 501             time_state = TIME_INS;
 502         else if (time_status & STA_DEL)
 503             time_state = TIME_DEL;
 504         break;
 505 
 506     case TIME_INS:
 507         if (xtime.tv_sec % 86400 == 0) {
 508             xtime.tv_sec--;
 509             time_state = TIME_OOP;
 510             printk("Clock: inserting leap second 23:59:60 UTC\n");
 511         }
 512         break;
 513 
 514     case TIME_DEL:
 515         if ((xtime.tv_sec + 1) % 86400 == 0) {
 516             xtime.tv_sec++;
 517             time_state = TIME_WAIT;
 518             printk("Clock: deleting leap second 23:59:59 UTC\n");
 519         }
 520         break;
 521 
 522     case TIME_OOP:
 523         time_state = TIME_WAIT;
 524         break;
 525 
 526     case TIME_WAIT:
 527         if (!(time_status & (STA_INS | STA_DEL)))
 528             time_state = TIME_OK;
 529     }
 530 
 531     /*
 532      * Compute the phase adjustment for the next second. In
 533      * PLL mode, the offset is reduced by a fixed factor
 534      * times the time constant. In FLL mode the offset is
 535      * used directly. In either mode, the maximum phase
 536      * adjustment for each second is clamped so as to spread
 537      * the adjustment over not more than the number of
 538      * seconds between updates.
 539      */
 540     if (time_offset < 0) {
 541         ltemp = -time_offset;
 542         if (!(time_status & STA_FLL))
 543             ltemp >>= SHIFT_KG + time_constant;
 544         if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE)
 545             ltemp = (MAXPHASE / MINSEC) <<
 546                 SHIFT_UPDATE;
 547         time_offset += ltemp;
 548         time_adj = -ltemp << (SHIFT_SCALE - SHIFT_HZ -
 549                               SHIFT_UPDATE);
 550     } else {
 551         ltemp = time_offset;
 552         if (!(time_status & STA_FLL))
 553             ltemp >>= SHIFT_KG + time_constant;
 554         if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE)
 555             ltemp = (MAXPHASE / MINSEC) <<
 556                 SHIFT_UPDATE;
 557         time_offset -= ltemp;
 558         time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ -
 559                              SHIFT_UPDATE);
 560     }
 561 
 562     /*
 563      * Compute the frequency estimate and additional phase
 564      * adjustment due to frequency error for the next
 565      * second. When the PPS signal is engaged, gnaw on the
 566      * watchdog counter and update the frequency computed by
 567      * the pll and the PPS signal.
 568      */
 569     pps_valid++;
 570     if (pps_valid == PPS_VALID) {
 571         pps_jitter = MAXTIME;
 572         pps_stabil = MAXFREQ;
 573         time_status &= ~(STA_PPSSIGNAL | STA_PPSJITTER |
 574                          STA_PPSWANDER | STA_PPSERROR);
 575     }
 576     ltemp = time_freq + pps_freq;
 577     if (ltemp < 0)
 578         time_adj -= -ltemp >>
 579             (SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE);
 580     else
 581         time_adj += ltemp >>
 582             (SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE);
 583 
 584     /* compensate for (HZ==100) != 128. Add 25% to get 125; => only 3% error */
 585     if (time_adj < 0)
 586         time_adj -= -time_adj >> 2;
 587     else
 588         time_adj += time_adj >> 2;
 589 }
 590 
 591 /*
 592  * disregard lost ticks for now.. We don't care enough.
 593  */
 594 static void timer_bh(void * unused)
     /* [previous][next][first][last][top][bottom][index][help] */
 595 {
 596         unsigned long mask;
 597         struct timer_struct *tp;
 598         struct timer_list * timer;
 599 
 600         cli();
 601         while ((timer = timer_head.next) != &timer_head && timer->expires <= jiffies) {
 602                 void (*fn)(unsigned long) = timer->function;
 603                 unsigned long data = timer->data;
 604                 timer->next->prev = timer->prev;
 605                 timer->prev->next = timer->next;
 606                 timer->next = timer->prev = NULL;
 607                 sti();
 608                 fn(data);
 609                 cli();
 610         }
 611         sti();
 612         
 613         for (mask = 1, tp = timer_table+0 ; mask ; tp++,mask += mask) {
 614                 if (mask > timer_active)
 615                         break;
 616                 if (!(mask & timer_active))
 617                         continue;
 618                 if (tp->expires > jiffies)
 619                         continue;
 620                 timer_active &= ~mask;
 621                 tp->fn();
 622                 sti();
 623         }
 624 }
 625 
 626 void tqueue_bh(void * unused)
     /* [previous][next][first][last][top][bottom][index][help] */
 627 {
 628         run_task_queue(&tq_timer);
 629 }
 630 
 631 void immediate_bh(void * unused)
     /* [previous][next][first][last][top][bottom][index][help] */
 632 {
 633         run_task_queue(&tq_immediate);
 634 }
 635 
 636 /*
 637  * The int argument is really a (struct pt_regs *), in case the
 638  * interrupt wants to know from where it was called. The timer
 639  * irq uses this to decide if it should update the user or system
 640  * times.
 641  */
 642 static void do_timer(int irq, struct pt_regs * regs)
     /* [previous][next][first][last][top][bottom][index][help] */
 643 {
 644         unsigned long mask;
 645         struct timer_struct *tp;
 646         /* last time the cmos clock got updated */
 647         static long last_rtc_update=0;
 648         extern int set_rtc_mmss(unsigned long);
 649 
 650         long ltemp, psecs;
 651 
 652         /* Advance the phase, once it gets to one microsecond, then
 653          * advance the tick more.
 654          */
 655         time_phase += time_adj;
 656         if (time_phase <= -FINEUSEC) {
 657                 ltemp = -time_phase >> SHIFT_SCALE;
 658                 time_phase += ltemp << SHIFT_SCALE;
 659                 xtime.tv_usec += tick + time_adjust_step - ltemp;
 660         }
 661         else if (time_phase >= FINEUSEC) {
 662                 ltemp = time_phase >> SHIFT_SCALE;
 663                 time_phase -= ltemp << SHIFT_SCALE;
 664                 xtime.tv_usec += tick + time_adjust_step + ltemp;
 665         } else
 666                 xtime.tv_usec += tick + time_adjust_step;
 667 
 668         if (time_adjust)
 669         {
 670             /* We are doing an adjtime thing. 
 671              *
 672              * Modify the value of the tick for next time.
 673              * Note that a positive delta means we want the clock
 674              * to run fast. This means that the tick should be bigger
 675              *
 676              * Limit the amount of the step for *next* tick to be
 677              * in the range -tickadj .. +tickadj
 678              */
 679              if (time_adjust > tickadj)
 680                time_adjust_step = tickadj;
 681              else if (time_adjust < -tickadj)
 682                time_adjust_step = -tickadj;
 683              else
 684                time_adjust_step = time_adjust;
 685              
 686             /* Reduce by this step the amount of time left  */
 687             time_adjust -= time_adjust_step;
 688         }
 689         else
 690             time_adjust_step = 0;
 691 
 692         if (xtime.tv_usec >= 1000000) {
 693             xtime.tv_usec -= 1000000;
 694             xtime.tv_sec++;
 695             second_overflow();
 696         }
 697 
 698         /* If we have an externally synchronized Linux clock, then update
 699          * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
 700          * called as close as possible to 500 ms before the new second starts.
 701          */
 702         if (time_state != TIME_BAD && xtime.tv_sec > last_rtc_update + 660 &&
 703             xtime.tv_usec > 500000 - (tick >> 1) &&
 704             xtime.tv_usec < 500000 + (tick >> 1))
 705           if (set_rtc_mmss(xtime.tv_sec) == 0)
 706             last_rtc_update = xtime.tv_sec;
 707           else
 708             last_rtc_update = xtime.tv_sec - 600; /* do it again in 60 s */
 709 
 710         jiffies++;
 711         calc_load();
 712         if (user_mode(regs)) {
 713                 current->utime++;
 714                 if (current != task[0]) {
 715                         if (current->priority < 15)
 716                                 kstat.cpu_nice++;
 717                         else
 718                                 kstat.cpu_user++;
 719                 }
 720                 /* Update ITIMER_VIRT for current task if not in a system call */
 721                 if (current->it_virt_value && !(--current->it_virt_value)) {
 722                         current->it_virt_value = current->it_virt_incr;
 723                         send_sig(SIGVTALRM,current,1);
 724                 }
 725         } else {
 726                 current->stime++;
 727                 if(current != task[0])
 728                         kstat.cpu_system++;
 729                 if (prof_buffer && current != task[0]) {
 730                         extern int _stext;
 731                         unsigned long ip = instruction_pointer(regs);
 732                         ip -= (unsigned long) &_stext;
 733                         ip >>= prof_shift;
 734                         if (ip < prof_len)
 735                                 prof_buffer[ip]++;
 736                 }
 737         }
 738         /*
 739          * check the cpu time limit on the process.
 740          */
 741         if ((current->rlim[RLIMIT_CPU].rlim_max != RLIM_INFINITY) &&
 742             (((current->stime + current->utime) / HZ) >= current->rlim[RLIMIT_CPU].rlim_max))
 743                 send_sig(SIGKILL, current, 1);
 744         if ((current->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) &&
 745             (((current->stime + current->utime) % HZ) == 0)) {
 746                 psecs = (current->stime + current->utime) / HZ;
 747                 /* send when equal */
 748                 if (psecs == current->rlim[RLIMIT_CPU].rlim_cur)
 749                         send_sig(SIGXCPU, current, 1);
 750                 /* and every five seconds thereafter. */
 751                 else if ((psecs > current->rlim[RLIMIT_CPU].rlim_cur) &&
 752                         ((psecs - current->rlim[RLIMIT_CPU].rlim_cur) % 5) == 0)
 753                         send_sig(SIGXCPU, current, 1);
 754         }
 755 
 756         if (current != task[0] && 0 > --current->counter) {
 757                 current->counter = 0;
 758                 need_resched = 1;
 759         }
 760         /* Update ITIMER_PROF for the current task */
 761         if (current->it_prof_value && !(--current->it_prof_value)) {
 762                 current->it_prof_value = current->it_prof_incr;
 763                 send_sig(SIGPROF,current,1);
 764         }
 765         for (mask = 1, tp = timer_table+0 ; mask ; tp++,mask += mask) {
 766                 if (mask > timer_active)
 767                         break;
 768                 if (!(mask & timer_active))
 769                         continue;
 770                 if (tp->expires > jiffies)
 771                         continue;
 772                 mark_bh(TIMER_BH);
 773         }
 774         cli();
 775         if (timer_head.next->expires <= jiffies)
 776                 mark_bh(TIMER_BH);
 777         if (tq_timer != &tq_last)
 778                 mark_bh(TQUEUE_BH);
 779         sti();
 780 }
 781 
 782 asmlinkage unsigned int sys_alarm(unsigned int seconds)
     /* [previous][next][first][last][top][bottom][index][help] */
 783 {
 784         struct itimerval it_new, it_old;
 785         unsigned int oldalarm;
 786 
 787         it_new.it_interval.tv_sec = it_new.it_interval.tv_usec = 0;
 788         it_new.it_value.tv_sec = seconds;
 789         it_new.it_value.tv_usec = 0;
 790         _setitimer(ITIMER_REAL, &it_new, &it_old);
 791         oldalarm = it_old.it_value.tv_sec;
 792         /* ehhh.. We can't return 0 if we have an alarm pending.. */
 793         /* And we'd better return too much than too little anyway */
 794         if (it_old.it_value.tv_usec)
 795                 oldalarm++;
 796         return oldalarm;
 797 }
 798 
 799 asmlinkage int sys_getpid(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 800 {
 801         return current->pid;
 802 }
 803 
 804 asmlinkage int sys_getppid(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 805 {
 806         return current->p_opptr->pid;
 807 }
 808 
 809 asmlinkage int sys_getuid(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 810 {
 811         return current->uid;
 812 }
 813 
 814 asmlinkage int sys_geteuid(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 815 {
 816         return current->euid;
 817 }
 818 
 819 asmlinkage int sys_getgid(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 820 {
 821         return current->gid;
 822 }
 823 
 824 asmlinkage int sys_getegid(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 825 {
 826         return current->egid;
 827 }
 828 
 829 asmlinkage int sys_nice(long increment)
     /* [previous][next][first][last][top][bottom][index][help] */
 830 {
 831         int newprio;
 832 
 833         if (increment < 0 && !suser())
 834                 return -EPERM;
 835         newprio = current->priority - increment;
 836         if (newprio < 1)
 837                 newprio = 1;
 838         if (newprio > 35)
 839                 newprio = 35;
 840         current->priority = newprio;
 841         return 0;
 842 }
 843 
 844 static void show_task(int nr,struct task_struct * p)
     /* [previous][next][first][last][top][bottom][index][help] */
 845 {
 846         unsigned long free;
 847         static const char * stat_nam[] = { "R", "S", "D", "Z", "T", "W" };
 848 
 849         printk("%-8s %3d ", p->comm, (p == current) ? -nr : nr);
 850         if (((unsigned) p->state) < sizeof(stat_nam)/sizeof(char *))
 851                 printk(stat_nam[p->state]);
 852         else
 853                 printk(" ");
 854 #if ((~0UL) == 0xffffffff)
 855         if (p == current)
 856                 printk(" current  ");
 857         else
 858                 printk(" %08lX ", thread_saved_pc(&p->tss));
 859 #else
 860         if (p == current)
 861                 printk("   current task   ");
 862         else
 863                 printk(" %016lx ", thread_saved_pc(&p->tss));
 864 #endif
 865         for (free = 1; free < PAGE_SIZE/sizeof(long) ; free++) {
 866                 if (((unsigned long *)p->kernel_stack_page)[free])
 867                         break;
 868         }
 869         printk("%5lu %5d %6d ", free*sizeof(long), p->pid, p->p_pptr->pid);
 870         if (p->p_cptr)
 871                 printk("%5d ", p->p_cptr->pid);
 872         else
 873                 printk("      ");
 874         if (p->p_ysptr)
 875                 printk("%7d", p->p_ysptr->pid);
 876         else
 877                 printk("       ");
 878         if (p->p_osptr)
 879                 printk(" %5d\n", p->p_osptr->pid);
 880         else
 881                 printk("\n");
 882 }
 883 
 884 void show_state(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 885 {
 886         int i;
 887 
 888 #if ((~0UL) == 0xffffffff)
 889         printk("\n"
 890                "                         free                        sibling\n");
 891         printk("  task             PC    stack   pid father child younger older\n");
 892 #else
 893         printk("\n"
 894                "                                 free                        sibling\n");
 895         printk("  task                 PC        stack   pid father child younger older\n");
 896 #endif
 897         for (i=0 ; i<NR_TASKS ; i++)
 898                 if (task[i])
 899                         show_task(i,task[i]);
 900 }
 901 
 902 void sched_init(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 903 {
 904         bh_base[TIMER_BH].routine = timer_bh;
 905         bh_base[TQUEUE_BH].routine = tqueue_bh;
 906         bh_base[IMMEDIATE_BH].routine = immediate_bh;
 907         if (request_irq(TIMER_IRQ, do_timer, 0, "timer") != 0)
 908                 panic("Could not allocate timer IRQ!");
 909         enable_bh(TIMER_BH);
 910         enable_bh(TQUEUE_BH);
 911         enable_bh(IMMEDIATE_BH);
 912 }

/* [previous][next][first][last][top][bottom][index][help] */