root/kernel/sched.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. schedule
  2. sys_pause
  3. wake_up
  4. wake_up_interruptible
  5. __down
  6. __sleep_on
  7. interruptible_sleep_on
  8. sleep_on
  9. add_timer
  10. del_timer
  11. count_active_tasks
  12. calc_load
  13. second_overflow
  14. timer_bh
  15. tqueue_bh
  16. immediate_bh
  17. do_timer
  18. sys_alarm
  19. sys_getpid
  20. sys_getppid
  21. sys_getuid
  22. sys_geteuid
  23. sys_getgid
  24. sys_getegid
  25. sys_nice
  26. show_task
  27. show_state
  28. sched_init

   1 /*
   2  *  linux/kernel/sched.c
   3  *
   4  *  Copyright (C) 1991, 1992  Linus Torvalds
   5  */
   6 
   7 /*
   8  * 'sched.c' is the main kernel file. It contains scheduling primitives
   9  * (sleep_on, wakeup, schedule etc) as well as a number of simple system
  10  * call functions (type getpid(), which just extracts a field from
  11  * current-task
  12  */
  13 
  14 #include <linux/config.h>
  15 #include <linux/signal.h>
  16 #include <linux/sched.h>
  17 #include <linux/timer.h>
  18 #include <linux/kernel.h>
  19 #include <linux/kernel_stat.h>
  20 #include <linux/fdreg.h>
  21 #include <linux/errno.h>
  22 #include <linux/time.h>
  23 #include <linux/ptrace.h>
  24 #include <linux/delay.h>
  25 #include <linux/interrupt.h>
  26 #include <linux/tqueue.h>
  27 #include <linux/resource.h>
  28 #include <linux/mm.h>
  29 
  30 #include <asm/system.h>
  31 #include <asm/io.h>
  32 #include <asm/segment.h>
  33 #include <asm/pgtable.h>
  34 
  35 #define TIMER_IRQ 0
  36 
  37 #include <linux/timex.h>
  38 
  39 /*
  40  * kernel variables
  41  */
  42 long tick = 1000000 / HZ;               /* timer interrupt period */
  43 volatile struct timeval xtime;          /* The current time */
  44 int tickadj = 500/HZ;                   /* microsecs */
  45 
  46 DECLARE_TASK_QUEUE(tq_timer);
  47 DECLARE_TASK_QUEUE(tq_immediate);
  48 
  49 /*
  50  * phase-lock loop variables
  51  */
  52 int time_status = TIME_BAD;     /* clock synchronization status */
  53 long time_offset = 0;           /* time adjustment (us) */
  54 long time_constant = 0;         /* pll time constant */
  55 long time_tolerance = MAXFREQ;  /* frequency tolerance (ppm) */
  56 long time_precision = 1;        /* clock precision (us) */
  57 long time_maxerror = 0x70000000;/* maximum error */
  58 long time_esterror = 0x70000000;/* estimated error */
  59 long time_phase = 0;            /* phase offset (scaled us) */
  60 long time_freq = 0;             /* frequency offset (scaled ppm) */
  61 long time_adj = 0;              /* tick adjust (scaled 1 / HZ) */
  62 long time_reftime = 0;          /* time at last adjustment (s) */
  63 
  64 long time_adjust = 0;
  65 long time_adjust_step = 0;
  66 
  67 int need_resched = 0;
  68 unsigned long event = 0;
  69 
  70 extern int _setitimer(int, struct itimerval *, struct itimerval *);
  71 unsigned long * prof_buffer = NULL;
  72 unsigned long prof_len = 0;
  73 
  74 #define _S(nr) (1<<((nr)-1))
  75 
  76 extern void mem_use(void);
  77 
  78 extern int timer_interrupt(void);
  79  
  80 static unsigned long init_kernel_stack[1024] = { STACK_MAGIC, };
  81 unsigned long init_user_stack[1024] = { STACK_MAGIC, };
  82 static struct vm_area_struct init_mmap = INIT_MMAP;
  83 struct task_struct init_task = INIT_TASK;
  84 
  85 unsigned long volatile jiffies=0;
  86 
  87 struct task_struct *current = &init_task;
  88 struct task_struct *last_task_used_math = NULL;
  89 
  90 struct task_struct * task[NR_TASKS] = {&init_task, };
  91 
  92 struct kernel_stat kstat = { 0 };
  93 
  94 unsigned long itimer_ticks = 0;
  95 unsigned long itimer_next = ~0;
  96 
  97 /*
  98  *  'schedule()' is the scheduler function. It's a very simple and nice
  99  * scheduler: it's not perfect, but certainly works for most things.
 100  * The one thing you might take a look at is the signal-handler code here.
 101  *
 102  *   NOTE!!  Task 0 is the 'idle' task, which gets called when no other
 103  * tasks can run. It can not be killed, and it cannot sleep. The 'state'
 104  * information in task[0] is never used.
 105  *
 106  * The "confuse_gcc" goto is used only to get better assembly code..
 107  * Dijkstra probably hates me.
 108  */
 109 asmlinkage void schedule(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 110 {
 111         int c;
 112         struct task_struct * p;
 113         struct task_struct * next;
 114         unsigned long ticks;
 115 
 116 /* check alarm, wake up any interruptible tasks that have got a signal */
 117 
 118         if (intr_count) {
 119                 printk("Aiee: scheduling in interrupt\n");
 120                 intr_count = 0;
 121         }
 122         cli();
 123         ticks = itimer_ticks;
 124         itimer_ticks = 0;
 125         itimer_next = ~0;
 126         sti();
 127         need_resched = 0;
 128         p = &init_task;
 129         for (;;) {
 130                 if ((p = p->next_task) == &init_task)
 131                         goto confuse_gcc1;
 132                 if (ticks && p->it_real_value) {
 133                         if (p->it_real_value <= ticks) {
 134                                 send_sig(SIGALRM, p, 1);
 135                                 if (!p->it_real_incr) {
 136                                         p->it_real_value = 0;
 137                                         goto end_itimer;
 138                                 }
 139                                 do {
 140                                         p->it_real_value += p->it_real_incr;
 141                                 } while (p->it_real_value <= ticks);
 142                         }
 143                         p->it_real_value -= ticks;
 144                         if (p->it_real_value < itimer_next)
 145                                 itimer_next = p->it_real_value;
 146                 }
 147 end_itimer:
 148                 if (p->state != TASK_INTERRUPTIBLE)
 149                         continue;
 150                 if (p->signal & ~p->blocked) {
 151                         p->state = TASK_RUNNING;
 152                         continue;
 153                 }
 154                 if (p->timeout && p->timeout <= jiffies) {
 155                         p->timeout = 0;
 156                         p->state = TASK_RUNNING;
 157                 }
 158         }
 159 confuse_gcc1:
 160 
 161 /* this is the scheduler proper: */
 162 #if 0
 163         /* give processes that go to sleep a bit higher priority.. */
 164         /* This depends on the values for TASK_XXX */
 165         /* This gives smoother scheduling for some things, but */
 166         /* can be very unfair under some circumstances, so.. */
 167         if (TASK_UNINTERRUPTIBLE >= (unsigned) current->state &&
 168             current->counter < current->priority*2) {
 169                 ++current->counter;
 170         }
 171 #endif
 172         c = -1000;
 173         next = p = &init_task;
 174         for (;;) {
 175                 if ((p = p->next_task) == &init_task)
 176                         goto confuse_gcc2;
 177                 if (p->state == TASK_RUNNING && p->counter > c)
 178                         c = p->counter, next = p;
 179         }
 180 confuse_gcc2:
 181         if (!c) {
 182                 for_each_task(p)
 183                         p->counter = (p->counter >> 1) + p->priority;
 184         }
 185         if (current == next)
 186                 return;
 187         kstat.context_swtch++;
 188         switch_to(next);
 189 }
 190 
 191 asmlinkage int sys_pause(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 192 {
 193         current->state = TASK_INTERRUPTIBLE;
 194         schedule();
 195         return -ERESTARTNOHAND;
 196 }
 197 
 198 /*
 199  * wake_up doesn't wake up stopped processes - they have to be awakened
 200  * with signals or similar.
 201  *
 202  * Note that this doesn't need cli-sti pairs: interrupts may not change
 203  * the wait-queue structures directly, but only call wake_up() to wake
 204  * a process. The process itself must remove the queue once it has woken.
 205  */
 206 void wake_up(struct wait_queue **q)
     /* [previous][next][first][last][top][bottom][index][help] */
 207 {
 208         struct wait_queue *tmp;
 209         struct task_struct * p;
 210 
 211         if (!q || !(tmp = *q))
 212                 return;
 213         do {
 214                 if ((p = tmp->task) != NULL) {
 215                         if ((p->state == TASK_UNINTERRUPTIBLE) ||
 216                             (p->state == TASK_INTERRUPTIBLE)) {
 217                                 p->state = TASK_RUNNING;
 218                                 if (p->counter > current->counter + 3)
 219                                         need_resched = 1;
 220                         }
 221                 }
 222                 if (!tmp->next) {
 223                         printk("wait_queue is bad (eip = %p)\n",
 224                                 __builtin_return_address(0));
 225                         printk("        q = %p\n",q);
 226                         printk("       *q = %p\n",*q);
 227                         printk("      tmp = %p\n",tmp);
 228                         break;
 229                 }
 230                 tmp = tmp->next;
 231         } while (tmp != *q);
 232 }
 233 
 234 void wake_up_interruptible(struct wait_queue **q)
     /* [previous][next][first][last][top][bottom][index][help] */
 235 {
 236         struct wait_queue *tmp;
 237         struct task_struct * p;
 238 
 239         if (!q || !(tmp = *q))
 240                 return;
 241         do {
 242                 if ((p = tmp->task) != NULL) {
 243                         if (p->state == TASK_INTERRUPTIBLE) {
 244                                 p->state = TASK_RUNNING;
 245                                 if (p->counter > current->counter + 3)
 246                                         need_resched = 1;
 247                         }
 248                 }
 249                 if (!tmp->next) {
 250                         printk("wait_queue is bad (eip = %p)\n",
 251                                 __builtin_return_address(0));
 252                         printk("        q = %p\n",q);
 253                         printk("       *q = %p\n",*q);
 254                         printk("      tmp = %p\n",tmp);
 255                         break;
 256                 }
 257                 tmp = tmp->next;
 258         } while (tmp != *q);
 259 }
 260 
 261 void __down(struct semaphore * sem)
     /* [previous][next][first][last][top][bottom][index][help] */
 262 {
 263         struct wait_queue wait = { current, NULL };
 264         add_wait_queue(&sem->wait, &wait);
 265         current->state = TASK_UNINTERRUPTIBLE;
 266         while (sem->count <= 0) {
 267                 schedule();
 268                 current->state = TASK_UNINTERRUPTIBLE;
 269         }
 270         current->state = TASK_RUNNING;
 271         remove_wait_queue(&sem->wait, &wait);
 272 }
 273 
 274 static inline void __sleep_on(struct wait_queue **p, int state)
     /* [previous][next][first][last][top][bottom][index][help] */
 275 {
 276         unsigned long flags;
 277         struct wait_queue wait = { current, NULL };
 278 
 279         if (!p)
 280                 return;
 281         if (current == task[0])
 282                 panic("task[0] trying to sleep");
 283         current->state = state;
 284         add_wait_queue(p, &wait);
 285         save_flags(flags);
 286         sti();
 287         schedule();
 288         remove_wait_queue(p, &wait);
 289         restore_flags(flags);
 290 }
 291 
 292 void interruptible_sleep_on(struct wait_queue **p)
     /* [previous][next][first][last][top][bottom][index][help] */
 293 {
 294         __sleep_on(p,TASK_INTERRUPTIBLE);
 295 }
 296 
 297 void sleep_on(struct wait_queue **p)
     /* [previous][next][first][last][top][bottom][index][help] */
 298 {
 299         __sleep_on(p,TASK_UNINTERRUPTIBLE);
 300 }
 301 
 302 /*
 303  * The head for the timer-list has a "expires" field of MAX_UINT,
 304  * and the sorting routine counts on this..
 305  */
 306 static struct timer_list timer_head = { &timer_head, &timer_head, ~0, 0, NULL };
 307 #define SLOW_BUT_DEBUGGING_TIMERS 1
 308 
 309 void add_timer(struct timer_list * timer)
     /* [previous][next][first][last][top][bottom][index][help] */
 310 {
 311         unsigned long flags;
 312         struct timer_list *p;
 313 
 314 #if SLOW_BUT_DEBUGGING_TIMERS
 315         if (timer->next || timer->prev) {
 316                 printk("add_timer() called with non-zero list from %p\n",
 317                         __builtin_return_address(0));
 318                 return;
 319         }
 320 #endif
 321         p = &timer_head;
 322         timer->expires += jiffies;
 323         save_flags(flags);
 324         cli();
 325         do {
 326                 p = p->next;
 327         } while (timer->expires > p->expires);
 328         timer->next = p;
 329         timer->prev = p->prev;
 330         p->prev = timer;
 331         timer->prev->next = timer;
 332         restore_flags(flags);
 333 }
 334 
 335 int del_timer(struct timer_list * timer)
     /* [previous][next][first][last][top][bottom][index][help] */
 336 {
 337         unsigned long flags;
 338 #if SLOW_BUT_DEBUGGING_TIMERS
 339         struct timer_list * p;
 340 
 341         p = &timer_head;
 342         save_flags(flags);
 343         cli();
 344         while ((p = p->next) != &timer_head) {
 345                 if (p == timer) {
 346                         timer->next->prev = timer->prev;
 347                         timer->prev->next = timer->next;
 348                         timer->next = timer->prev = NULL;
 349                         restore_flags(flags);
 350                         timer->expires -= jiffies;
 351                         return 1;
 352                 }
 353         }
 354         if (timer->next || timer->prev)
 355                 printk("del_timer() called from %p with timer not initialized\n",
 356                         __builtin_return_address(0));
 357         restore_flags(flags);
 358         return 0;
 359 #else   
 360         save_flags(flags);
 361         cli();
 362         if (timer->next) {
 363                 timer->next->prev = timer->prev;
 364                 timer->prev->next = timer->next;
 365                 timer->next = timer->prev = NULL;
 366                 restore_flags(flags);
 367                 timer->expires -= jiffies;
 368                 return 1;
 369         }
 370         restore_flags(flags);
 371         return 0;
 372 #endif
 373 }
 374 
 375 unsigned long timer_active = 0;
 376 struct timer_struct timer_table[32];
 377 
 378 /*
 379  * Hmm.. Changed this, as the GNU make sources (load.c) seems to
 380  * imply that avenrun[] is the standard name for this kind of thing.
 381  * Nothing else seems to be standardized: the fractional size etc
 382  * all seem to differ on different machines.
 383  */
 384 unsigned long avenrun[3] = { 0,0,0 };
 385 
 386 /*
 387  * Nr of active tasks - counted in fixed-point numbers
 388  */
 389 static unsigned long count_active_tasks(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 390 {
 391         struct task_struct **p;
 392         unsigned long nr = 0;
 393 
 394         for(p = &LAST_TASK; p > &FIRST_TASK; --p)
 395                 if (*p && ((*p)->state == TASK_RUNNING ||
 396                            (*p)->state == TASK_UNINTERRUPTIBLE ||
 397                            (*p)->state == TASK_SWAPPING))
 398                         nr += FIXED_1;
 399         return nr;
 400 }
 401 
 402 static inline void calc_load(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 403 {
 404         unsigned long active_tasks; /* fixed-point */
 405         static int count = LOAD_FREQ;
 406 
 407         if (count-- > 0)
 408                 return;
 409         count = LOAD_FREQ;
 410         active_tasks = count_active_tasks();
 411         CALC_LOAD(avenrun[0], EXP_1, active_tasks);
 412         CALC_LOAD(avenrun[1], EXP_5, active_tasks);
 413         CALC_LOAD(avenrun[2], EXP_15, active_tasks);
 414 }
 415 
 416 /*
 417  * this routine handles the overflow of the microsecond field
 418  *
 419  * The tricky bits of code to handle the accurate clock support
 420  * were provided by Dave Mills (Mills@UDEL.EDU) of NTP fame.
 421  * They were originally developed for SUN and DEC kernels.
 422  * All the kudos should go to Dave for this stuff.
 423  *
 424  * These were ported to Linux by Philip Gladstone.
 425  */
 426 static void second_overflow(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 427 {
 428         long ltemp;
 429 
 430         /* Bump the maxerror field */
 431         time_maxerror = (0x70000000-time_maxerror < time_tolerance) ?
 432           0x70000000 : (time_maxerror + time_tolerance);
 433 
 434         /* Run the PLL */
 435         if (time_offset < 0) {
 436                 ltemp = (-(time_offset+1) >> (SHIFT_KG + time_constant)) + 1;
 437                 time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
 438                 time_offset += (time_adj * HZ) >> (SHIFT_SCALE - SHIFT_UPDATE);
 439                 time_adj = - time_adj;
 440         } else if (time_offset > 0) {
 441                 ltemp = ((time_offset-1) >> (SHIFT_KG + time_constant)) + 1;
 442                 time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
 443                 time_offset -= (time_adj * HZ) >> (SHIFT_SCALE - SHIFT_UPDATE);
 444         } else {
 445                 time_adj = 0;
 446         }
 447 
 448         time_adj += (time_freq >> (SHIFT_KF + SHIFT_HZ - SHIFT_SCALE))
 449             + FINETUNE;
 450 
 451         /* Handle the leap second stuff */
 452         switch (time_status) {
 453                 case TIME_INS:
 454                 /* ugly divide should be replaced */
 455                 if (xtime.tv_sec % 86400 == 0) {
 456                         xtime.tv_sec--; /* !! */
 457                         time_status = TIME_OOP;
 458                         printk("Clock: inserting leap second 23:59:60 UTC\n");
 459                 }
 460                 break;
 461 
 462                 case TIME_DEL:
 463                 /* ugly divide should be replaced */
 464                 if (xtime.tv_sec % 86400 == 86399) {
 465                         xtime.tv_sec++;
 466                         time_status = TIME_OK;
 467                         printk("Clock: deleting leap second 23:59:59 UTC\n");
 468                 }
 469                 break;
 470 
 471                 case TIME_OOP:
 472                 time_status = TIME_OK;
 473                 break;
 474         }
 475 }
 476 
 477 /*
 478  * disregard lost ticks for now.. We don't care enough.
 479  */
 480 static void timer_bh(void * unused)
     /* [previous][next][first][last][top][bottom][index][help] */
 481 {
 482         unsigned long mask;
 483         struct timer_struct *tp;
 484         struct timer_list * timer;
 485 
 486         cli();
 487         while ((timer = timer_head.next) != &timer_head && timer->expires < jiffies) {
 488                 void (*fn)(unsigned long) = timer->function;
 489                 unsigned long data = timer->data;
 490                 timer->next->prev = timer->prev;
 491                 timer->prev->next = timer->next;
 492                 timer->next = timer->prev = NULL;
 493                 sti();
 494                 fn(data);
 495                 cli();
 496         }
 497         sti();
 498         
 499         for (mask = 1, tp = timer_table+0 ; mask ; tp++,mask += mask) {
 500                 if (mask > timer_active)
 501                         break;
 502                 if (!(mask & timer_active))
 503                         continue;
 504                 if (tp->expires > jiffies)
 505                         continue;
 506                 timer_active &= ~mask;
 507                 tp->fn();
 508                 sti();
 509         }
 510 }
 511 
 512 void tqueue_bh(void * unused)
     /* [previous][next][first][last][top][bottom][index][help] */
 513 {
 514         run_task_queue(&tq_timer);
 515 }
 516 
 517 void immediate_bh(void * unused)
     /* [previous][next][first][last][top][bottom][index][help] */
 518 {
 519         run_task_queue(&tq_immediate);
 520 }
 521 
 522 /*
 523  * The int argument is really a (struct pt_regs *), in case the
 524  * interrupt wants to know from where it was called. The timer
 525  * irq uses this to decide if it should update the user or system
 526  * times.
 527  */
 528 static void do_timer(int irq, struct pt_regs * regs)
     /* [previous][next][first][last][top][bottom][index][help] */
 529 {
 530         unsigned long mask;
 531         struct timer_struct *tp;
 532         /* last time the cmos clock got updated */
 533         static long last_rtc_update=0;
 534         extern int set_rtc_mmss(unsigned long);
 535 
 536         long ltemp, psecs;
 537 
 538         /* Advance the phase, once it gets to one microsecond, then
 539          * advance the tick more.
 540          */
 541         time_phase += time_adj;
 542         if (time_phase < -FINEUSEC) {
 543                 ltemp = -time_phase >> SHIFT_SCALE;
 544                 time_phase += ltemp << SHIFT_SCALE;
 545                 xtime.tv_usec += tick + time_adjust_step - ltemp;
 546         }
 547         else if (time_phase > FINEUSEC) {
 548                 ltemp = time_phase >> SHIFT_SCALE;
 549                 time_phase -= ltemp << SHIFT_SCALE;
 550                 xtime.tv_usec += tick + time_adjust_step + ltemp;
 551         } else
 552                 xtime.tv_usec += tick + time_adjust_step;
 553 
 554         if (time_adjust)
 555         {
 556             /* We are doing an adjtime thing. 
 557              *
 558              * Modify the value of the tick for next time.
 559              * Note that a positive delta means we want the clock
 560              * to run fast. This means that the tick should be bigger
 561              *
 562              * Limit the amount of the step for *next* tick to be
 563              * in the range -tickadj .. +tickadj
 564              */
 565              if (time_adjust > tickadj)
 566                time_adjust_step = tickadj;
 567              else if (time_adjust < -tickadj)
 568                time_adjust_step = -tickadj;
 569              else
 570                time_adjust_step = time_adjust;
 571              
 572             /* Reduce by this step the amount of time left  */
 573             time_adjust -= time_adjust_step;
 574         }
 575         else
 576             time_adjust_step = 0;
 577 
 578         if (xtime.tv_usec >= 1000000) {
 579             xtime.tv_usec -= 1000000;
 580             xtime.tv_sec++;
 581             second_overflow();
 582         }
 583 
 584         /* If we have an externally synchronized Linux clock, then update
 585          * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
 586          * called as close as possible to 500 ms before the new second starts.
 587          */
 588         if (time_status != TIME_BAD && xtime.tv_sec > last_rtc_update + 660 &&
 589             xtime.tv_usec > 500000 - (tick >> 1) &&
 590             xtime.tv_usec < 500000 + (tick >> 1))
 591           if (set_rtc_mmss(xtime.tv_sec) == 0)
 592             last_rtc_update = xtime.tv_sec;
 593           else
 594             last_rtc_update = xtime.tv_sec - 600; /* do it again in 60 s */
 595 
 596         jiffies++;
 597         calc_load();
 598         if (user_mode(regs)) {
 599                 current->utime++;
 600                 if (current != task[0]) {
 601                         if (current->priority < 15)
 602                                 kstat.cpu_nice++;
 603                         else
 604                                 kstat.cpu_user++;
 605                 }
 606                 /* Update ITIMER_VIRT for current task if not in a system call */
 607                 if (current->it_virt_value && !(--current->it_virt_value)) {
 608                         current->it_virt_value = current->it_virt_incr;
 609                         send_sig(SIGVTALRM,current,1);
 610                 }
 611         } else {
 612                 current->stime++;
 613                 if(current != task[0])
 614                         kstat.cpu_system++;
 615 #ifdef CONFIG_PROFILE
 616                 if (prof_buffer && current != task[0]) {
 617                         unsigned long eip = regs->eip;
 618                         eip >>= CONFIG_PROFILE_SHIFT;
 619                         if (eip < prof_len)
 620                                 prof_buffer[eip]++;
 621                 }
 622 #endif
 623         }
 624         /*
 625          * check the cpu time limit on the process.
 626          */
 627         if ((current->rlim[RLIMIT_CPU].rlim_max != RLIM_INFINITY) &&
 628             (((current->stime + current->utime) / HZ) >= current->rlim[RLIMIT_CPU].rlim_max))
 629                 send_sig(SIGKILL, current, 1);
 630         if ((current->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) &&
 631             (((current->stime + current->utime) % HZ) == 0)) {
 632                 psecs = (current->stime + current->utime) / HZ;
 633                 /* send when equal */
 634                 if (psecs == current->rlim[RLIMIT_CPU].rlim_cur)
 635                         send_sig(SIGXCPU, current, 1);
 636                 /* and every five seconds thereafter. */
 637                 else if ((psecs > current->rlim[RLIMIT_CPU].rlim_cur) &&
 638                         ((psecs - current->rlim[RLIMIT_CPU].rlim_cur) % 5) == 0)
 639                         send_sig(SIGXCPU, current, 1);
 640         }
 641 
 642         if (current != task[0] && 0 > --current->counter) {
 643                 current->counter = 0;
 644                 need_resched = 1;
 645         }
 646         /* Update ITIMER_PROF for the current task */
 647         if (current->it_prof_value && !(--current->it_prof_value)) {
 648                 current->it_prof_value = current->it_prof_incr;
 649                 send_sig(SIGPROF,current,1);
 650         }
 651         for (mask = 1, tp = timer_table+0 ; mask ; tp++,mask += mask) {
 652                 if (mask > timer_active)
 653                         break;
 654                 if (!(mask & timer_active))
 655                         continue;
 656                 if (tp->expires > jiffies)
 657                         continue;
 658                 mark_bh(TIMER_BH);
 659         }
 660         cli();
 661         itimer_ticks++;
 662         if (itimer_ticks > itimer_next)
 663                 need_resched = 1;
 664         if (timer_head.next->expires < jiffies)
 665                 mark_bh(TIMER_BH);
 666         if (tq_timer != &tq_last)
 667                 mark_bh(TQUEUE_BH);
 668         sti();
 669 }
 670 
 671 asmlinkage int sys_alarm(long seconds)
     /* [previous][next][first][last][top][bottom][index][help] */
 672 {
 673         struct itimerval it_new, it_old;
 674 
 675         it_new.it_interval.tv_sec = it_new.it_interval.tv_usec = 0;
 676         it_new.it_value.tv_sec = seconds;
 677         it_new.it_value.tv_usec = 0;
 678         _setitimer(ITIMER_REAL, &it_new, &it_old);
 679         return(it_old.it_value.tv_sec + (it_old.it_value.tv_usec / 1000000));
 680 }
 681 
 682 asmlinkage int sys_getpid(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 683 {
 684         return current->pid;
 685 }
 686 
 687 asmlinkage int sys_getppid(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 688 {
 689         return current->p_opptr->pid;
 690 }
 691 
 692 asmlinkage int sys_getuid(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 693 {
 694         return current->uid;
 695 }
 696 
 697 asmlinkage int sys_geteuid(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 698 {
 699         return current->euid;
 700 }
 701 
 702 asmlinkage int sys_getgid(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 703 {
 704         return current->gid;
 705 }
 706 
 707 asmlinkage int sys_getegid(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 708 {
 709         return current->egid;
 710 }
 711 
 712 asmlinkage int sys_nice(long increment)
     /* [previous][next][first][last][top][bottom][index][help] */
 713 {
 714         int newprio;
 715 
 716         if (increment < 0 && !suser())
 717                 return -EPERM;
 718         newprio = current->priority - increment;
 719         if (newprio < 1)
 720                 newprio = 1;
 721         if (newprio > 35)
 722                 newprio = 35;
 723         current->priority = newprio;
 724         return 0;
 725 }
 726 
 727 static void show_task(int nr,struct task_struct * p)
     /* [previous][next][first][last][top][bottom][index][help] */
 728 {
 729         unsigned long free;
 730         static char * stat_nam[] = { "R", "S", "D", "Z", "T", "W" };
 731 
 732         printk("%-8s %3d ", p->comm, (p == current) ? -nr : nr);
 733         if (((unsigned) p->state) < sizeof(stat_nam)/sizeof(char *))
 734                 printk(stat_nam[p->state]);
 735         else
 736                 printk(" ");
 737 #ifdef __i386__
 738         if (p == current)
 739                 printk(" current  ");
 740         else
 741                 printk(" %08lX ", ((unsigned long *)p->tss.esp)[3]);
 742 #endif
 743         for (free = 1; free < 1024 ; free++) {
 744                 if (((unsigned long *)p->kernel_stack_page)[free])
 745                         break;
 746         }
 747         printk("%5lu %5d %6d ", free << 2, p->pid, p->p_pptr->pid);
 748         if (p->p_cptr)
 749                 printk("%5d ", p->p_cptr->pid);
 750         else
 751                 printk("      ");
 752         if (p->p_ysptr)
 753                 printk("%7d", p->p_ysptr->pid);
 754         else
 755                 printk("       ");
 756         if (p->p_osptr)
 757                 printk(" %5d\n", p->p_osptr->pid);
 758         else
 759                 printk("\n");
 760 }
 761 
 762 void show_state(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 763 {
 764         int i;
 765 
 766         printk("                         free                        sibling\n");
 767         printk("  task             PC    stack   pid father child younger older\n");
 768         for (i=0 ; i<NR_TASKS ; i++)
 769                 if (task[i])
 770                         show_task(i,task[i]);
 771 }
 772 
 773 void sched_init(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 774 {
 775         bh_base[TIMER_BH].routine = timer_bh;
 776         bh_base[TQUEUE_BH].routine = tqueue_bh;
 777         bh_base[IMMEDIATE_BH].routine = immediate_bh;
 778         if (request_irq(TIMER_IRQ, do_timer, 0, "timer") != 0)
 779                 panic("Could not allocate timer IRQ!");
 780         enable_bh(TIMER_BH);
 781         enable_bh(TQUEUE_BH);
 782         enable_bh(IMMEDIATE_BH);
 783 }

/* [previous][next][first][last][top][bottom][index][help] */