root/kernel/sched.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. add_to_runqueue
  2. del_from_runqueue
  3. wake_up_process
  4. process_timeout
  5. schedule
  6. sys_pause
  7. wake_up
  8. wake_up_interruptible
  9. __down
  10. __sleep_on
  11. interruptible_sleep_on
  12. sleep_on
  13. add_timer
  14. del_timer
  15. count_active_tasks
  16. calc_load
  17. second_overflow
  18. timer_bh
  19. tqueue_bh
  20. immediate_bh
  21. do_timer
  22. sys_alarm
  23. sys_getpid
  24. sys_getppid
  25. sys_getuid
  26. sys_geteuid
  27. sys_getgid
  28. sys_getegid
  29. sys_nice
  30. show_task
  31. show_state
  32. sched_init

   1 /*
   2  *  linux/kernel/sched.c
   3  *
   4  *  Copyright (C) 1991, 1992  Linus Torvalds
   5  */
   6 
   7 /*
   8  * 'sched.c' is the main kernel file. It contains scheduling primitives
   9  * (sleep_on, wakeup, schedule etc) as well as a number of simple system
  10  * call functions (type getpid(), which just extracts a field from
  11  * current-task
  12  */
  13 
  14 #include <linux/config.h>
  15 #include <linux/signal.h>
  16 #include <linux/sched.h>
  17 #include <linux/timer.h>
  18 #include <linux/kernel.h>
  19 #include <linux/kernel_stat.h>
  20 #include <linux/fdreg.h>
  21 #include <linux/errno.h>
  22 #include <linux/time.h>
  23 #include <linux/ptrace.h>
  24 #include <linux/delay.h>
  25 #include <linux/interrupt.h>
  26 #include <linux/tqueue.h>
  27 #include <linux/resource.h>
  28 #include <linux/mm.h>
  29 
  30 #include <asm/system.h>
  31 #include <asm/io.h>
  32 #include <asm/segment.h>
  33 #include <asm/pgtable.h>
  34 
  35 #define TIMER_IRQ 0
  36 
  37 #include <linux/timex.h>
  38 
  39 /*
  40  * kernel variables
  41  */
  42 long tick = 1000000 / HZ;               /* timer interrupt period */
  43 volatile struct timeval xtime;          /* The current time */
  44 int tickadj = 500/HZ;                   /* microsecs */
  45 
  46 DECLARE_TASK_QUEUE(tq_timer);
  47 DECLARE_TASK_QUEUE(tq_immediate);
  48 DECLARE_TASK_QUEUE(tq_scheduler);
  49 
  50 /*
  51  * phase-lock loop variables
  52  */
  53 int time_status = TIME_BAD;     /* clock synchronization status */
  54 long time_offset = 0;           /* time adjustment (us) */
  55 long time_constant = 0;         /* pll time constant */
  56 long time_tolerance = MAXFREQ;  /* frequency tolerance (ppm) */
  57 long time_precision = 1;        /* clock precision (us) */
  58 long time_maxerror = 0x70000000;/* maximum error */
  59 long time_esterror = 0x70000000;/* estimated error */
  60 long time_phase = 0;            /* phase offset (scaled us) */
  61 long time_freq = 0;             /* frequency offset (scaled ppm) */
  62 long time_adj = 0;              /* tick adjust (scaled 1 / HZ) */
  63 long time_reftime = 0;          /* time at last adjustment (s) */
  64 
  65 long time_adjust = 0;
  66 long time_adjust_step = 0;
  67 
  68 int need_resched = 0;
  69 unsigned long event = 0;
  70 
  71 extern int _setitimer(int, struct itimerval *, struct itimerval *);
  72 unsigned long * prof_buffer = NULL;
  73 unsigned long prof_len = 0;
  74 
  75 #define _S(nr) (1<<((nr)-1))
  76 
  77 extern void mem_use(void);
  78 
  79 extern int timer_interrupt(void);
  80  
  81 static unsigned long init_kernel_stack[1024] = { STACK_MAGIC, };
  82 unsigned long init_user_stack[1024] = { STACK_MAGIC, };
  83 static struct vm_area_struct init_mmap = INIT_MMAP;
  84 struct task_struct init_task = INIT_TASK;
  85 
  86 unsigned long volatile jiffies=0;
  87 
  88 struct task_struct *current = &init_task;
  89 struct task_struct *last_task_used_math = NULL;
  90 
  91 struct task_struct * task[NR_TASKS] = {&init_task, };
  92 
  93 struct kernel_stat kstat = { 0 };
  94 
  95 static inline void add_to_runqueue(struct task_struct * p)
     /* [previous][next][first][last][top][bottom][index][help] */
  96 {
  97 #if 1   /* sanity tests */
  98         if (p->next_run || p->prev_run) {
  99                 printk("task already on run-queue\n");
 100                 return;
 101         }
 102 #endif
 103         if (p->counter > current->counter + 3)
 104                 need_resched = 1;
 105         nr_running++;
 106         (p->next_run = init_task.next_run)->prev_run = p;
 107         p->prev_run = &init_task;
 108         init_task.next_run = p;
 109 }
 110 
 111 static inline void del_from_runqueue(struct task_struct * p)
     /* [previous][next][first][last][top][bottom][index][help] */
 112 {
 113         struct task_struct *next = p->next_run;
 114         struct task_struct *prev = p->prev_run;
 115 
 116 #if 1   /* sanity tests */
 117         if (!next || !prev) {
 118                 printk("task not on run-queue\n");
 119                 return;
 120         }
 121 #endif
 122         if (p == &init_task) {
 123                 printk("idle task may not sleep\n");
 124                 return;
 125         }
 126         nr_running--;
 127         next->prev_run = prev;
 128         prev->next_run = next;
 129         p->next_run = NULL;
 130         p->prev_run = NULL;
 131 }
 132 
 133 /*
 134  * Wake up a process. Put it on the run-queue if it's not
 135  * already there.  The "current" process is always on the
 136  * run-queue (except when the actual re-schedule is in
 137  * progress), and as such you're allowed to do the simpler
 138  * "current->state = TASK_RUNNING" to mark yourself runnable
 139  * without the overhead of this.
 140  */
 141 inline void wake_up_process(struct task_struct * p)
     /* [previous][next][first][last][top][bottom][index][help] */
 142 {
 143         unsigned long flags;
 144 
 145         save_flags(flags);
 146         cli();
 147         p->state = TASK_RUNNING;
 148         if (!p->next_run)
 149                 add_to_runqueue(p);
 150         restore_flags(flags);
 151 }
 152 
 153 static void process_timeout(unsigned long __data)
     /* [previous][next][first][last][top][bottom][index][help] */
 154 {
 155         struct task_struct * p = (struct task_struct *) __data;
 156 
 157         p->timeout = 0;
 158         wake_up_process(p);
 159 }
 160 
 161 /*
 162  *  'schedule()' is the scheduler function. It's a very simple and nice
 163  * scheduler: it's not perfect, but certainly works for most things.
 164  *
 165  * The goto is "interesting".
 166  *
 167  *   NOTE!!  Task 0 is the 'idle' task, which gets called when no other
 168  * tasks can run. It can not be killed, and it cannot sleep. The 'state'
 169  * information in task[0] is never used.
 170  */
 171 asmlinkage void schedule(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 172 {
 173         int c;
 174         struct task_struct * p;
 175         struct task_struct * next;
 176         unsigned long timeout = 0;
 177 
 178 /* check alarm, wake up any interruptible tasks that have got a signal */
 179 
 180         if (intr_count) {
 181                 printk("Aiee: scheduling in interrupt\n");
 182                 intr_count = 0;
 183         }
 184         run_task_queue(&tq_scheduler);
 185 
 186         need_resched = 0;
 187         cli();
 188         switch (current->state) {
 189                 case TASK_INTERRUPTIBLE:
 190                         if (current->signal & ~current->blocked)
 191                                 goto makerunnable;
 192                         timeout = current->timeout;
 193                         if (timeout && (timeout <= jiffies)) {
 194                                 current->timeout = 0;
 195                                 timeout = 0;
 196                 makerunnable:
 197                                 current->state = TASK_RUNNING;
 198                                 break;
 199                         }
 200                 default:
 201                         del_from_runqueue(current);
 202                 case TASK_RUNNING:
 203         }
 204         p = init_task.next_run;
 205         sti();
 206 
 207 /*
 208  * Note! there may appear new tasks on the run-queue during this, as
 209  * interrupts are enabled. However, they will be put on front of the
 210  * list, so our list starting at "p" is essentially fixed.
 211  */
 212 /* this is the scheduler proper: */
 213         c = -1000;
 214         next = &init_task;
 215         while (p != &init_task) {
 216                 if (p->counter > c)
 217                         c = p->counter, next = p;
 218                 p = p->next_run;
 219         }
 220 
 221         /* if all runnable processes have "counter == 0", re-calculate counters */
 222         if (!c) {
 223                 for_each_task(p)
 224                         p->counter = (p->counter >> 1) + p->priority;
 225         }
 226         if (current != next) {
 227                 struct timer_list timer;
 228 
 229                 kstat.context_swtch++;
 230                 if (timeout) {
 231                         init_timer(&timer);
 232                         timer.expires = timeout - jiffies;
 233                         timer.data = (unsigned long) current;
 234                         timer.function = process_timeout;
 235                         add_timer(&timer);
 236                 }
 237                 switch_to(next);
 238                 if (timeout)
 239                         del_timer(&timer);
 240         }
 241 }
 242 
 243 asmlinkage int sys_pause(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 244 {
 245         current->state = TASK_INTERRUPTIBLE;
 246         schedule();
 247         return -ERESTARTNOHAND;
 248 }
 249 
 250 /*
 251  * wake_up doesn't wake up stopped processes - they have to be awakened
 252  * with signals or similar.
 253  *
 254  * Note that this doesn't need cli-sti pairs: interrupts may not change
 255  * the wait-queue structures directly, but only call wake_up() to wake
 256  * a process. The process itself must remove the queue once it has woken.
 257  */
 258 void wake_up(struct wait_queue **q)
     /* [previous][next][first][last][top][bottom][index][help] */
 259 {
 260         struct wait_queue *tmp;
 261         struct task_struct * p;
 262 
 263         if (!q || !(tmp = *q))
 264                 return;
 265         do {
 266                 if ((p = tmp->task) != NULL) {
 267                         if ((p->state == TASK_UNINTERRUPTIBLE) ||
 268                             (p->state == TASK_INTERRUPTIBLE))
 269                                 wake_up_process(p);
 270                 }
 271                 if (!tmp->next) {
 272                         printk("wait_queue is bad (eip = %p)\n",
 273                                 __builtin_return_address(0));
 274                         printk("        q = %p\n",q);
 275                         printk("       *q = %p\n",*q);
 276                         printk("      tmp = %p\n",tmp);
 277                         break;
 278                 }
 279                 tmp = tmp->next;
 280         } while (tmp != *q);
 281 }
 282 
 283 void wake_up_interruptible(struct wait_queue **q)
     /* [previous][next][first][last][top][bottom][index][help] */
 284 {
 285         struct wait_queue *tmp;
 286         struct task_struct * p;
 287 
 288         if (!q || !(tmp = *q))
 289                 return;
 290         do {
 291                 if ((p = tmp->task) != NULL) {
 292                         if (p->state == TASK_INTERRUPTIBLE)
 293                                 wake_up_process(p);
 294                 }
 295                 if (!tmp->next) {
 296                         printk("wait_queue is bad (eip = %p)\n",
 297                                 __builtin_return_address(0));
 298                         printk("        q = %p\n",q);
 299                         printk("       *q = %p\n",*q);
 300                         printk("      tmp = %p\n",tmp);
 301                         break;
 302                 }
 303                 tmp = tmp->next;
 304         } while (tmp != *q);
 305 }
 306 
 307 void __down(struct semaphore * sem)
     /* [previous][next][first][last][top][bottom][index][help] */
 308 {
 309         struct wait_queue wait = { current, NULL };
 310         add_wait_queue(&sem->wait, &wait);
 311         current->state = TASK_UNINTERRUPTIBLE;
 312         while (sem->count <= 0) {
 313                 schedule();
 314                 current->state = TASK_UNINTERRUPTIBLE;
 315         }
 316         current->state = TASK_RUNNING;
 317         remove_wait_queue(&sem->wait, &wait);
 318 }
 319 
 320 static inline void __sleep_on(struct wait_queue **p, int state)
     /* [previous][next][first][last][top][bottom][index][help] */
 321 {
 322         unsigned long flags;
 323         struct wait_queue wait = { current, NULL };
 324 
 325         if (!p)
 326                 return;
 327         if (current == task[0])
 328                 panic("task[0] trying to sleep");
 329         current->state = state;
 330         add_wait_queue(p, &wait);
 331         save_flags(flags);
 332         sti();
 333         schedule();
 334         remove_wait_queue(p, &wait);
 335         restore_flags(flags);
 336 }
 337 
 338 void interruptible_sleep_on(struct wait_queue **p)
     /* [previous][next][first][last][top][bottom][index][help] */
 339 {
 340         __sleep_on(p,TASK_INTERRUPTIBLE);
 341 }
 342 
 343 void sleep_on(struct wait_queue **p)
     /* [previous][next][first][last][top][bottom][index][help] */
 344 {
 345         __sleep_on(p,TASK_UNINTERRUPTIBLE);
 346 }
 347 
 348 /*
 349  * The head for the timer-list has a "expires" field of MAX_UINT,
 350  * and the sorting routine counts on this..
 351  */
 352 static struct timer_list timer_head = { &timer_head, &timer_head, ~0, 0, NULL };
 353 #define SLOW_BUT_DEBUGGING_TIMERS 1
 354 
 355 void add_timer(struct timer_list * timer)
     /* [previous][next][first][last][top][bottom][index][help] */
 356 {
 357         unsigned long flags;
 358         struct timer_list *p;
 359 
 360 #if SLOW_BUT_DEBUGGING_TIMERS
 361         if (timer->next || timer->prev) {
 362                 printk("add_timer() called with non-zero list from %p\n",
 363                         __builtin_return_address(0));
 364                 return;
 365         }
 366 #endif
 367         p = &timer_head;
 368         timer->expires += jiffies;
 369         save_flags(flags);
 370         cli();
 371         do {
 372                 p = p->next;
 373         } while (timer->expires > p->expires);
 374         timer->next = p;
 375         timer->prev = p->prev;
 376         p->prev = timer;
 377         timer->prev->next = timer;
 378         restore_flags(flags);
 379 }
 380 
 381 int del_timer(struct timer_list * timer)
     /* [previous][next][first][last][top][bottom][index][help] */
 382 {
 383         unsigned long flags;
 384 #if SLOW_BUT_DEBUGGING_TIMERS
 385         struct timer_list * p;
 386 
 387         p = &timer_head;
 388         save_flags(flags);
 389         cli();
 390         while ((p = p->next) != &timer_head) {
 391                 if (p == timer) {
 392                         timer->next->prev = timer->prev;
 393                         timer->prev->next = timer->next;
 394                         timer->next = timer->prev = NULL;
 395                         restore_flags(flags);
 396                         timer->expires -= jiffies;
 397                         return 1;
 398                 }
 399         }
 400         if (timer->next || timer->prev)
 401                 printk("del_timer() called from %p with timer not initialized\n",
 402                         __builtin_return_address(0));
 403         restore_flags(flags);
 404         return 0;
 405 #else   
 406         save_flags(flags);
 407         cli();
 408         if (timer->next) {
 409                 timer->next->prev = timer->prev;
 410                 timer->prev->next = timer->next;
 411                 timer->next = timer->prev = NULL;
 412                 restore_flags(flags);
 413                 timer->expires -= jiffies;
 414                 return 1;
 415         }
 416         restore_flags(flags);
 417         return 0;
 418 #endif
 419 }
 420 
 421 unsigned long timer_active = 0;
 422 struct timer_struct timer_table[32];
 423 
 424 /*
 425  * Hmm.. Changed this, as the GNU make sources (load.c) seems to
 426  * imply that avenrun[] is the standard name for this kind of thing.
 427  * Nothing else seems to be standardized: the fractional size etc
 428  * all seem to differ on different machines.
 429  */
 430 unsigned long avenrun[3] = { 0,0,0 };
 431 
 432 /*
 433  * Nr of active tasks - counted in fixed-point numbers
 434  */
 435 static unsigned long count_active_tasks(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 436 {
 437         struct task_struct **p;
 438         unsigned long nr = 0;
 439 
 440         for(p = &LAST_TASK; p > &FIRST_TASK; --p)
 441                 if (*p && ((*p)->state == TASK_RUNNING ||
 442                            (*p)->state == TASK_UNINTERRUPTIBLE ||
 443                            (*p)->state == TASK_SWAPPING))
 444                         nr += FIXED_1;
 445         return nr;
 446 }
 447 
 448 static inline void calc_load(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 449 {
 450         unsigned long active_tasks; /* fixed-point */
 451         static int count = LOAD_FREQ;
 452 
 453         if (count-- > 0)
 454                 return;
 455         count = LOAD_FREQ;
 456         active_tasks = count_active_tasks();
 457         CALC_LOAD(avenrun[0], EXP_1, active_tasks);
 458         CALC_LOAD(avenrun[1], EXP_5, active_tasks);
 459         CALC_LOAD(avenrun[2], EXP_15, active_tasks);
 460 }
 461 
 462 /*
 463  * this routine handles the overflow of the microsecond field
 464  *
 465  * The tricky bits of code to handle the accurate clock support
 466  * were provided by Dave Mills (Mills@UDEL.EDU) of NTP fame.
 467  * They were originally developed for SUN and DEC kernels.
 468  * All the kudos should go to Dave for this stuff.
 469  *
 470  * These were ported to Linux by Philip Gladstone.
 471  */
 472 static void second_overflow(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 473 {
 474         long ltemp;
 475 
 476         /* Bump the maxerror field */
 477         time_maxerror = (0x70000000-time_maxerror < time_tolerance) ?
 478           0x70000000 : (time_maxerror + time_tolerance);
 479 
 480         /* Run the PLL */
 481         if (time_offset < 0) {
 482                 ltemp = (-(time_offset+1) >> (SHIFT_KG + time_constant)) + 1;
 483                 time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
 484                 time_offset += (time_adj * HZ) >> (SHIFT_SCALE - SHIFT_UPDATE);
 485                 time_adj = - time_adj;
 486         } else if (time_offset > 0) {
 487                 ltemp = ((time_offset-1) >> (SHIFT_KG + time_constant)) + 1;
 488                 time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
 489                 time_offset -= (time_adj * HZ) >> (SHIFT_SCALE - SHIFT_UPDATE);
 490         } else {
 491                 time_adj = 0;
 492         }
 493 
 494         time_adj += (time_freq >> (SHIFT_KF + SHIFT_HZ - SHIFT_SCALE))
 495             + FINETUNE;
 496 
 497         /* Handle the leap second stuff */
 498         switch (time_status) {
 499                 case TIME_INS:
 500                 /* ugly divide should be replaced */
 501                 if (xtime.tv_sec % 86400 == 0) {
 502                         xtime.tv_sec--; /* !! */
 503                         time_status = TIME_OOP;
 504                         printk("Clock: inserting leap second 23:59:60 UTC\n");
 505                 }
 506                 break;
 507 
 508                 case TIME_DEL:
 509                 /* ugly divide should be replaced */
 510                 if (xtime.tv_sec % 86400 == 86399) {
 511                         xtime.tv_sec++;
 512                         time_status = TIME_OK;
 513                         printk("Clock: deleting leap second 23:59:59 UTC\n");
 514                 }
 515                 break;
 516 
 517                 case TIME_OOP:
 518                 time_status = TIME_OK;
 519                 break;
 520         }
 521 }
 522 
 523 /*
 524  * disregard lost ticks for now.. We don't care enough.
 525  */
 526 static void timer_bh(void * unused)
     /* [previous][next][first][last][top][bottom][index][help] */
 527 {
 528         unsigned long mask;
 529         struct timer_struct *tp;
 530         struct timer_list * timer;
 531 
 532         cli();
 533         while ((timer = timer_head.next) != &timer_head && timer->expires < jiffies) {
 534                 void (*fn)(unsigned long) = timer->function;
 535                 unsigned long data = timer->data;
 536                 timer->next->prev = timer->prev;
 537                 timer->prev->next = timer->next;
 538                 timer->next = timer->prev = NULL;
 539                 sti();
 540                 fn(data);
 541                 cli();
 542         }
 543         sti();
 544         
 545         for (mask = 1, tp = timer_table+0 ; mask ; tp++,mask += mask) {
 546                 if (mask > timer_active)
 547                         break;
 548                 if (!(mask & timer_active))
 549                         continue;
 550                 if (tp->expires > jiffies)
 551                         continue;
 552                 timer_active &= ~mask;
 553                 tp->fn();
 554                 sti();
 555         }
 556 }
 557 
 558 void tqueue_bh(void * unused)
     /* [previous][next][first][last][top][bottom][index][help] */
 559 {
 560         run_task_queue(&tq_timer);
 561 }
 562 
 563 void immediate_bh(void * unused)
     /* [previous][next][first][last][top][bottom][index][help] */
 564 {
 565         run_task_queue(&tq_immediate);
 566 }
 567 
 568 /*
 569  * The int argument is really a (struct pt_regs *), in case the
 570  * interrupt wants to know from where it was called. The timer
 571  * irq uses this to decide if it should update the user or system
 572  * times.
 573  */
 574 static void do_timer(int irq, struct pt_regs * regs)
     /* [previous][next][first][last][top][bottom][index][help] */
 575 {
 576         unsigned long mask;
 577         struct timer_struct *tp;
 578         /* last time the cmos clock got updated */
 579         static long last_rtc_update=0;
 580         extern int set_rtc_mmss(unsigned long);
 581 
 582         long ltemp, psecs;
 583 
 584         /* Advance the phase, once it gets to one microsecond, then
 585          * advance the tick more.
 586          */
 587         time_phase += time_adj;
 588         if (time_phase < -FINEUSEC) {
 589                 ltemp = -time_phase >> SHIFT_SCALE;
 590                 time_phase += ltemp << SHIFT_SCALE;
 591                 xtime.tv_usec += tick + time_adjust_step - ltemp;
 592         }
 593         else if (time_phase > FINEUSEC) {
 594                 ltemp = time_phase >> SHIFT_SCALE;
 595                 time_phase -= ltemp << SHIFT_SCALE;
 596                 xtime.tv_usec += tick + time_adjust_step + ltemp;
 597         } else
 598                 xtime.tv_usec += tick + time_adjust_step;
 599 
 600         if (time_adjust)
 601         {
 602             /* We are doing an adjtime thing. 
 603              *
 604              * Modify the value of the tick for next time.
 605              * Note that a positive delta means we want the clock
 606              * to run fast. This means that the tick should be bigger
 607              *
 608              * Limit the amount of the step for *next* tick to be
 609              * in the range -tickadj .. +tickadj
 610              */
 611              if (time_adjust > tickadj)
 612                time_adjust_step = tickadj;
 613              else if (time_adjust < -tickadj)
 614                time_adjust_step = -tickadj;
 615              else
 616                time_adjust_step = time_adjust;
 617              
 618             /* Reduce by this step the amount of time left  */
 619             time_adjust -= time_adjust_step;
 620         }
 621         else
 622             time_adjust_step = 0;
 623 
 624         if (xtime.tv_usec >= 1000000) {
 625             xtime.tv_usec -= 1000000;
 626             xtime.tv_sec++;
 627             second_overflow();
 628         }
 629 
 630         /* If we have an externally synchronized Linux clock, then update
 631          * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
 632          * called as close as possible to 500 ms before the new second starts.
 633          */
 634         if (time_status != TIME_BAD && xtime.tv_sec > last_rtc_update + 660 &&
 635             xtime.tv_usec > 500000 - (tick >> 1) &&
 636             xtime.tv_usec < 500000 + (tick >> 1))
 637           if (set_rtc_mmss(xtime.tv_sec) == 0)
 638             last_rtc_update = xtime.tv_sec;
 639           else
 640             last_rtc_update = xtime.tv_sec - 600; /* do it again in 60 s */
 641 
 642         jiffies++;
 643         calc_load();
 644         if (user_mode(regs)) {
 645                 current->utime++;
 646                 if (current != task[0]) {
 647                         if (current->priority < 15)
 648                                 kstat.cpu_nice++;
 649                         else
 650                                 kstat.cpu_user++;
 651                 }
 652                 /* Update ITIMER_VIRT for current task if not in a system call */
 653                 if (current->it_virt_value && !(--current->it_virt_value)) {
 654                         current->it_virt_value = current->it_virt_incr;
 655                         send_sig(SIGVTALRM,current,1);
 656                 }
 657         } else {
 658                 current->stime++;
 659                 if(current != task[0])
 660                         kstat.cpu_system++;
 661 #ifdef CONFIG_PROFILE
 662                 if (prof_buffer && current != task[0]) {
 663                         extern int _stext;
 664                         unsigned long eip = regs->eip - (unsigned long) &_stext;
 665                         eip >>= CONFIG_PROFILE_SHIFT;
 666                         if (eip < prof_len)
 667                                 prof_buffer[eip]++;
 668                 }
 669 #endif
 670         }
 671         /*
 672          * check the cpu time limit on the process.
 673          */
 674         if ((current->rlim[RLIMIT_CPU].rlim_max != RLIM_INFINITY) &&
 675             (((current->stime + current->utime) / HZ) >= current->rlim[RLIMIT_CPU].rlim_max))
 676                 send_sig(SIGKILL, current, 1);
 677         if ((current->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) &&
 678             (((current->stime + current->utime) % HZ) == 0)) {
 679                 psecs = (current->stime + current->utime) / HZ;
 680                 /* send when equal */
 681                 if (psecs == current->rlim[RLIMIT_CPU].rlim_cur)
 682                         send_sig(SIGXCPU, current, 1);
 683                 /* and every five seconds thereafter. */
 684                 else if ((psecs > current->rlim[RLIMIT_CPU].rlim_cur) &&
 685                         ((psecs - current->rlim[RLIMIT_CPU].rlim_cur) % 5) == 0)
 686                         send_sig(SIGXCPU, current, 1);
 687         }
 688 
 689         if (current != task[0] && 0 > --current->counter) {
 690                 current->counter = 0;
 691                 need_resched = 1;
 692         }
 693         /* Update ITIMER_PROF for the current task */
 694         if (current->it_prof_value && !(--current->it_prof_value)) {
 695                 current->it_prof_value = current->it_prof_incr;
 696                 send_sig(SIGPROF,current,1);
 697         }
 698         for (mask = 1, tp = timer_table+0 ; mask ; tp++,mask += mask) {
 699                 if (mask > timer_active)
 700                         break;
 701                 if (!(mask & timer_active))
 702                         continue;
 703                 if (tp->expires > jiffies)
 704                         continue;
 705                 mark_bh(TIMER_BH);
 706         }
 707         cli();
 708         if (timer_head.next->expires < jiffies)
 709                 mark_bh(TIMER_BH);
 710         if (tq_timer != &tq_last)
 711                 mark_bh(TQUEUE_BH);
 712         sti();
 713 }
 714 
 715 asmlinkage int sys_alarm(long seconds)
     /* [previous][next][first][last][top][bottom][index][help] */
 716 {
 717         struct itimerval it_new, it_old;
 718 
 719         it_new.it_interval.tv_sec = it_new.it_interval.tv_usec = 0;
 720         it_new.it_value.tv_sec = seconds;
 721         it_new.it_value.tv_usec = 0;
 722         _setitimer(ITIMER_REAL, &it_new, &it_old);
 723         return(it_old.it_value.tv_sec + (it_old.it_value.tv_usec / 1000000));
 724 }
 725 
 726 asmlinkage int sys_getpid(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 727 {
 728         return current->pid;
 729 }
 730 
 731 asmlinkage int sys_getppid(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 732 {
 733         return current->p_opptr->pid;
 734 }
 735 
 736 asmlinkage int sys_getuid(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 737 {
 738         return current->uid;
 739 }
 740 
 741 asmlinkage int sys_geteuid(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 742 {
 743         return current->euid;
 744 }
 745 
 746 asmlinkage int sys_getgid(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 747 {
 748         return current->gid;
 749 }
 750 
 751 asmlinkage int sys_getegid(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 752 {
 753         return current->egid;
 754 }
 755 
 756 asmlinkage int sys_nice(long increment)
     /* [previous][next][first][last][top][bottom][index][help] */
 757 {
 758         int newprio;
 759 
 760         if (increment < 0 && !suser())
 761                 return -EPERM;
 762         newprio = current->priority - increment;
 763         if (newprio < 1)
 764                 newprio = 1;
 765         if (newprio > 35)
 766                 newprio = 35;
 767         current->priority = newprio;
 768         return 0;
 769 }
 770 
 771 static void show_task(int nr,struct task_struct * p)
     /* [previous][next][first][last][top][bottom][index][help] */
 772 {
 773         unsigned long free;
 774         static char * stat_nam[] = { "R", "S", "D", "Z", "T", "W" };
 775 
 776         printk("%-8s %3d ", p->comm, (p == current) ? -nr : nr);
 777         if (((unsigned) p->state) < sizeof(stat_nam)/sizeof(char *))
 778                 printk(stat_nam[p->state]);
 779         else
 780                 printk(" ");
 781 #if ((~0UL) == 0xffffffff)
 782         if (p == current)
 783                 printk(" current  ");
 784         else
 785                 printk(" %08lX ", thread_saved_pc(&p->tss));
 786 #else
 787         if (p == current)
 788                 printk("   current task   ");
 789         else
 790                 printk(" %016lx ", thread_saved_pc(&p->tss));
 791 #endif
 792         for (free = 1; free < PAGE_SIZE/sizeof(long) ; free++) {
 793                 if (((unsigned long *)p->kernel_stack_page)[free])
 794                         break;
 795         }
 796         printk("%5lu %5d %6d ", free*sizeof(long), p->pid, p->p_pptr->pid);
 797         if (p->p_cptr)
 798                 printk("%5d ", p->p_cptr->pid);
 799         else
 800                 printk("      ");
 801         if (p->p_ysptr)
 802                 printk("%7d", p->p_ysptr->pid);
 803         else
 804                 printk("       ");
 805         if (p->p_osptr)
 806                 printk(" %5d\n", p->p_osptr->pid);
 807         else
 808                 printk("\n");
 809 }
 810 
 811 void show_state(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 812 {
 813         int i;
 814 
 815 #if ((~0UL) == 0xffffffff)
 816         printk("\n"
 817                "                         free                        sibling\n");
 818         printk("  task             PC    stack   pid father child younger older\n");
 819 #else
 820         printk("\n"
 821                "                                 free                        sibling\n");
 822         printk("  task                 PC        stack   pid father child younger older\n");
 823 #endif
 824         for (i=0 ; i<NR_TASKS ; i++)
 825                 if (task[i])
 826                         show_task(i,task[i]);
 827 }
 828 
 829 void sched_init(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 830 {
 831         bh_base[TIMER_BH].routine = timer_bh;
 832         bh_base[TQUEUE_BH].routine = tqueue_bh;
 833         bh_base[IMMEDIATE_BH].routine = immediate_bh;
 834         if (request_irq(TIMER_IRQ, do_timer, 0, "timer") != 0)
 835                 panic("Could not allocate timer IRQ!");
 836         enable_bh(TIMER_BH);
 837         enable_bh(TQUEUE_BH);
 838         enable_bh(IMMEDIATE_BH);
 839 }

/* [previous][next][first][last][top][bottom][index][help] */