root/kernel/sched.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. schedule
  2. sys_pause
  3. wake_up
  4. wake_up_interruptible
  5. __down
  6. __sleep_on
  7. interruptible_sleep_on
  8. sleep_on
  9. add_timer
  10. del_timer
  11. count_active_tasks
  12. calc_load
  13. second_overflow
  14. timer_bh
  15. tqueue_bh
  16. immediate_bh
  17. do_timer
  18. sys_alarm
  19. sys_getpid
  20. sys_getppid
  21. sys_getuid
  22. sys_geteuid
  23. sys_getgid
  24. sys_getegid
  25. sys_nice
  26. show_task
  27. show_state
  28. sched_init

   1 /*
   2  *  linux/kernel/sched.c
   3  *
   4  *  Copyright (C) 1991, 1992  Linus Torvalds
   5  */
   6 
   7 /*
   8  * 'sched.c' is the main kernel file. It contains scheduling primitives
   9  * (sleep_on, wakeup, schedule etc) as well as a number of simple system
  10  * call functions (type getpid(), which just extracts a field from
  11  * current-task
  12  */
  13 
  14 #include <linux/config.h>
  15 #include <linux/signal.h>
  16 #include <linux/sched.h>
  17 #include <linux/timer.h>
  18 #include <linux/kernel.h>
  19 #include <linux/kernel_stat.h>
  20 #include <linux/fdreg.h>
  21 #include <linux/errno.h>
  22 #include <linux/time.h>
  23 #include <linux/ptrace.h>
  24 #include <linux/delay.h>
  25 #include <linux/interrupt.h>
  26 #include <linux/tqueue.h>
  27 #include <linux/resource.h>
  28 
  29 #include <asm/system.h>
  30 #include <asm/io.h>
  31 #include <asm/segment.h>
  32 
  33 #define TIMER_IRQ 0
  34 
  35 #include <linux/timex.h>
  36 
  37 /*
  38  * kernel variables
  39  */
  40 long tick = 1000000 / HZ;               /* timer interrupt period */
  41 volatile struct timeval xtime;          /* The current time */
  42 int tickadj = 500/HZ;                   /* microsecs */
  43 
  44 DECLARE_TASK_QUEUE(tq_timer);
  45 DECLARE_TASK_QUEUE(tq_immediate);
  46 
  47 /*
  48  * phase-lock loop variables
  49  */
  50 int time_status = TIME_BAD;     /* clock synchronization status */
  51 long time_offset = 0;           /* time adjustment (us) */
  52 long time_constant = 0;         /* pll time constant */
  53 long time_tolerance = MAXFREQ;  /* frequency tolerance (ppm) */
  54 long time_precision = 1;        /* clock precision (us) */
  55 long time_maxerror = 0x70000000;/* maximum error */
  56 long time_esterror = 0x70000000;/* estimated error */
  57 long time_phase = 0;            /* phase offset (scaled us) */
  58 long time_freq = 0;             /* frequency offset (scaled ppm) */
  59 long time_adj = 0;              /* tick adjust (scaled 1 / HZ) */
  60 long time_reftime = 0;          /* time at last adjustment (s) */
  61 
  62 long time_adjust = 0;
  63 long time_adjust_step = 0;
  64 
  65 int need_resched = 0;
  66 unsigned long event = 0;
  67 
  68 /*
  69  * Tell us the machine setup..
  70  */
  71 char hard_math = 0;             /* set by boot/head.S */
  72 char x86 = 0;                   /* set by boot/head.S to 3 or 4 */
  73 char x86_model = 0;             /* set by boot/head.S */
  74 char x86_mask = 0;              /* set by boot/head.S */
  75 int x86_capability = 0;         /* set by boot/head.S */
  76 int fdiv_bug = 0;               /* set if Pentium(TM) with FP bug */
  77 
  78 char x86_vendor_id[13] = "Unknown";
  79 
  80 char ignore_irq13 = 0;          /* set if exception 16 works */
  81 char wp_works_ok = 0;           /* set if paging hardware honours WP */ 
  82 char hlt_works_ok = 1;          /* set if the "hlt" instruction works */
  83 
  84 /*
  85  * Bus types ..
  86  */
  87 int EISA_bus = 0;
  88 
  89 extern int _setitimer(int, struct itimerval *, struct itimerval *);
  90 unsigned long * prof_buffer = NULL;
  91 unsigned long prof_len = 0;
  92 
  93 #define _S(nr) (1<<((nr)-1))
  94 
  95 extern void mem_use(void);
  96 
  97 extern int timer_interrupt(void);
  98  
  99 static unsigned long init_kernel_stack[1024] = { STACK_MAGIC, };
 100 unsigned long init_user_stack[1024] = { STACK_MAGIC, };
 101 static struct vm_area_struct init_mmap = INIT_MMAP;
 102 struct task_struct init_task = INIT_TASK;
 103 
 104 unsigned long volatile jiffies=0;
 105 
 106 struct task_struct *current = &init_task;
 107 struct task_struct *last_task_used_math = NULL;
 108 
 109 struct task_struct * task[NR_TASKS] = {&init_task, };
 110 
 111 struct kernel_stat kstat = { 0 };
 112 
 113 unsigned long itimer_ticks = 0;
 114 unsigned long itimer_next = ~0;
 115 
 116 /*
 117  *  'schedule()' is the scheduler function. It's a very simple and nice
 118  * scheduler: it's not perfect, but certainly works for most things.
 119  * The one thing you might take a look at is the signal-handler code here.
 120  *
 121  *   NOTE!!  Task 0 is the 'idle' task, which gets called when no other
 122  * tasks can run. It can not be killed, and it cannot sleep. The 'state'
 123  * information in task[0] is never used.
 124  *
 125  * The "confuse_gcc" goto is used only to get better assembly code..
 126  * Dijkstra probably hates me.
 127  */
 128 asmlinkage void schedule(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 129 {
 130         int c;
 131         struct task_struct * p;
 132         struct task_struct * next;
 133         unsigned long ticks;
 134 
 135 /* check alarm, wake up any interruptible tasks that have got a signal */
 136 
 137         if (intr_count) {
 138                 printk("Aiee: scheduling in interrupt\n");
 139                 intr_count = 0;
 140         }
 141         cli();
 142         ticks = itimer_ticks;
 143         itimer_ticks = 0;
 144         itimer_next = ~0;
 145         sti();
 146         need_resched = 0;
 147         p = &init_task;
 148         for (;;) {
 149                 if ((p = p->next_task) == &init_task)
 150                         goto confuse_gcc1;
 151                 if (ticks && p->it_real_value) {
 152                         if (p->it_real_value <= ticks) {
 153                                 send_sig(SIGALRM, p, 1);
 154                                 if (!p->it_real_incr) {
 155                                         p->it_real_value = 0;
 156                                         goto end_itimer;
 157                                 }
 158                                 do {
 159                                         p->it_real_value += p->it_real_incr;
 160                                 } while (p->it_real_value <= ticks);
 161                         }
 162                         p->it_real_value -= ticks;
 163                         if (p->it_real_value < itimer_next)
 164                                 itimer_next = p->it_real_value;
 165                 }
 166 end_itimer:
 167                 if (p->state != TASK_INTERRUPTIBLE)
 168                         continue;
 169                 if (p->signal & ~p->blocked) {
 170                         p->state = TASK_RUNNING;
 171                         continue;
 172                 }
 173                 if (p->timeout && p->timeout <= jiffies) {
 174                         p->timeout = 0;
 175                         p->state = TASK_RUNNING;
 176                 }
 177         }
 178 confuse_gcc1:
 179 
 180 /* this is the scheduler proper: */
 181 #if 0
 182         /* give processes that go to sleep a bit higher priority.. */
 183         /* This depends on the values for TASK_XXX */
 184         /* This gives smoother scheduling for some things, but */
 185         /* can be very unfair under some circumstances, so.. */
 186         if (TASK_UNINTERRUPTIBLE >= (unsigned) current->state &&
 187             current->counter < current->priority*2) {
 188                 ++current->counter;
 189         }
 190 #endif
 191         c = -1000;
 192         next = p = &init_task;
 193         for (;;) {
 194                 if ((p = p->next_task) == &init_task)
 195                         goto confuse_gcc2;
 196                 if (p->state == TASK_RUNNING && p->counter > c)
 197                         c = p->counter, next = p;
 198         }
 199 confuse_gcc2:
 200         if (!c) {
 201                 for_each_task(p)
 202                         p->counter = (p->counter >> 1) + p->priority;
 203         }
 204         if (current == next)
 205                 return;
 206         kstat.context_swtch++;
 207         switch_to(next);
 208 }
 209 
 210 asmlinkage int sys_pause(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 211 {
 212         current->state = TASK_INTERRUPTIBLE;
 213         schedule();
 214         return -ERESTARTNOHAND;
 215 }
 216 
 217 /*
 218  * wake_up doesn't wake up stopped processes - they have to be awakened
 219  * with signals or similar.
 220  *
 221  * Note that this doesn't need cli-sti pairs: interrupts may not change
 222  * the wait-queue structures directly, but only call wake_up() to wake
 223  * a process. The process itself must remove the queue once it has woken.
 224  */
 225 void wake_up(struct wait_queue **q)
     /* [previous][next][first][last][top][bottom][index][help] */
 226 {
 227         struct wait_queue *tmp;
 228         struct task_struct * p;
 229 
 230         if (!q || !(tmp = *q))
 231                 return;
 232         do {
 233                 if ((p = tmp->task) != NULL) {
 234                         if ((p->state == TASK_UNINTERRUPTIBLE) ||
 235                             (p->state == TASK_INTERRUPTIBLE)) {
 236                                 p->state = TASK_RUNNING;
 237                                 if (p->counter > current->counter + 3)
 238                                         need_resched = 1;
 239                         }
 240                 }
 241                 if (!tmp->next) {
 242                         printk("wait_queue is bad (eip = %p)\n",
 243                                 __builtin_return_address(0));
 244                         printk("        q = %p\n",q);
 245                         printk("       *q = %p\n",*q);
 246                         printk("      tmp = %p\n",tmp);
 247                         break;
 248                 }
 249                 tmp = tmp->next;
 250         } while (tmp != *q);
 251 }
 252 
 253 void wake_up_interruptible(struct wait_queue **q)
     /* [previous][next][first][last][top][bottom][index][help] */
 254 {
 255         struct wait_queue *tmp;
 256         struct task_struct * p;
 257 
 258         if (!q || !(tmp = *q))
 259                 return;
 260         do {
 261                 if ((p = tmp->task) != NULL) {
 262                         if (p->state == TASK_INTERRUPTIBLE) {
 263                                 p->state = TASK_RUNNING;
 264                                 if (p->counter > current->counter + 3)
 265                                         need_resched = 1;
 266                         }
 267                 }
 268                 if (!tmp->next) {
 269                         printk("wait_queue is bad (eip = %p)\n",
 270                                 __builtin_return_address(0));
 271                         printk("        q = %p\n",q);
 272                         printk("       *q = %p\n",*q);
 273                         printk("      tmp = %p\n",tmp);
 274                         break;
 275                 }
 276                 tmp = tmp->next;
 277         } while (tmp != *q);
 278 }
 279 
 280 void __down(struct semaphore * sem)
     /* [previous][next][first][last][top][bottom][index][help] */
 281 {
 282         struct wait_queue wait = { current, NULL };
 283         add_wait_queue(&sem->wait, &wait);
 284         current->state = TASK_UNINTERRUPTIBLE;
 285         while (sem->count <= 0) {
 286                 schedule();
 287                 current->state = TASK_UNINTERRUPTIBLE;
 288         }
 289         current->state = TASK_RUNNING;
 290         remove_wait_queue(&sem->wait, &wait);
 291 }
 292 
 293 static inline void __sleep_on(struct wait_queue **p, int state)
     /* [previous][next][first][last][top][bottom][index][help] */
 294 {
 295         unsigned long flags;
 296         struct wait_queue wait = { current, NULL };
 297 
 298         if (!p)
 299                 return;
 300         if (current == task[0])
 301                 panic("task[0] trying to sleep");
 302         current->state = state;
 303         add_wait_queue(p, &wait);
 304         save_flags(flags);
 305         sti();
 306         schedule();
 307         remove_wait_queue(p, &wait);
 308         restore_flags(flags);
 309 }
 310 
 311 void interruptible_sleep_on(struct wait_queue **p)
     /* [previous][next][first][last][top][bottom][index][help] */
 312 {
 313         __sleep_on(p,TASK_INTERRUPTIBLE);
 314 }
 315 
 316 void sleep_on(struct wait_queue **p)
     /* [previous][next][first][last][top][bottom][index][help] */
 317 {
 318         __sleep_on(p,TASK_UNINTERRUPTIBLE);
 319 }
 320 
 321 /*
 322  * The head for the timer-list has a "expires" field of MAX_UINT,
 323  * and the sorting routine counts on this..
 324  */
 325 static struct timer_list timer_head = { &timer_head, &timer_head, ~0, 0, NULL };
 326 #define SLOW_BUT_DEBUGGING_TIMERS 1
 327 
 328 void add_timer(struct timer_list * timer)
     /* [previous][next][first][last][top][bottom][index][help] */
 329 {
 330         unsigned long flags;
 331         struct timer_list *p;
 332 
 333 #if SLOW_BUT_DEBUGGING_TIMERS
 334         if (timer->next || timer->prev) {
 335                 printk("add_timer() called with non-zero list from %p\n",
 336                         __builtin_return_address(0));
 337                 return;
 338         }
 339 #endif
 340         p = &timer_head;
 341         timer->expires += jiffies;
 342         save_flags(flags);
 343         cli();
 344         do {
 345                 p = p->next;
 346         } while (timer->expires > p->expires);
 347         timer->next = p;
 348         timer->prev = p->prev;
 349         p->prev = timer;
 350         timer->prev->next = timer;
 351         restore_flags(flags);
 352 }
 353 
 354 int del_timer(struct timer_list * timer)
     /* [previous][next][first][last][top][bottom][index][help] */
 355 {
 356         unsigned long flags;
 357 #if SLOW_BUT_DEBUGGING_TIMERS
 358         struct timer_list * p;
 359 
 360         p = &timer_head;
 361         save_flags(flags);
 362         cli();
 363         while ((p = p->next) != &timer_head) {
 364                 if (p == timer) {
 365                         timer->next->prev = timer->prev;
 366                         timer->prev->next = timer->next;
 367                         timer->next = timer->prev = NULL;
 368                         restore_flags(flags);
 369                         timer->expires -= jiffies;
 370                         return 1;
 371                 }
 372         }
 373         if (timer->next || timer->prev)
 374                 printk("del_timer() called from %p with timer not initialized\n",
 375                         __builtin_return_address(0));
 376         restore_flags(flags);
 377         return 0;
 378 #else   
 379         save_flags(flags);
 380         cli();
 381         if (timer->next) {
 382                 timer->next->prev = timer->prev;
 383                 timer->prev->next = timer->next;
 384                 timer->next = timer->prev = NULL;
 385                 restore_flags(flags);
 386                 timer->expires -= jiffies;
 387                 return 1;
 388         }
 389         restore_flags(flags);
 390         return 0;
 391 #endif
 392 }
 393 
 394 unsigned long timer_active = 0;
 395 struct timer_struct timer_table[32];
 396 
 397 /*
 398  * Hmm.. Changed this, as the GNU make sources (load.c) seems to
 399  * imply that avenrun[] is the standard name for this kind of thing.
 400  * Nothing else seems to be standardized: the fractional size etc
 401  * all seem to differ on different machines.
 402  */
 403 unsigned long avenrun[3] = { 0,0,0 };
 404 
 405 /*
 406  * Nr of active tasks - counted in fixed-point numbers
 407  */
 408 static unsigned long count_active_tasks(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 409 {
 410         struct task_struct **p;
 411         unsigned long nr = 0;
 412 
 413         for(p = &LAST_TASK; p > &FIRST_TASK; --p)
 414                 if (*p && ((*p)->state == TASK_RUNNING ||
 415                            (*p)->state == TASK_UNINTERRUPTIBLE ||
 416                            (*p)->state == TASK_SWAPPING))
 417                         nr += FIXED_1;
 418         return nr;
 419 }
 420 
 421 static inline void calc_load(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 422 {
 423         unsigned long active_tasks; /* fixed-point */
 424         static int count = LOAD_FREQ;
 425 
 426         if (count-- > 0)
 427                 return;
 428         count = LOAD_FREQ;
 429         active_tasks = count_active_tasks();
 430         CALC_LOAD(avenrun[0], EXP_1, active_tasks);
 431         CALC_LOAD(avenrun[1], EXP_5, active_tasks);
 432         CALC_LOAD(avenrun[2], EXP_15, active_tasks);
 433 }
 434 
 435 /*
 436  * this routine handles the overflow of the microsecond field
 437  *
 438  * The tricky bits of code to handle the accurate clock support
 439  * were provided by Dave Mills (Mills@UDEL.EDU) of NTP fame.
 440  * They were originally developed for SUN and DEC kernels.
 441  * All the kudos should go to Dave for this stuff.
 442  *
 443  * These were ported to Linux by Philip Gladstone.
 444  */
 445 static void second_overflow(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 446 {
 447         long ltemp;
 448         /* last time the cmos clock got updated */
 449         static long last_rtc_update=0;
 450         extern int set_rtc_mmss(unsigned long);
 451 
 452         /* Bump the maxerror field */
 453         time_maxerror = (0x70000000-time_maxerror < time_tolerance) ?
 454           0x70000000 : (time_maxerror + time_tolerance);
 455 
 456         /* Run the PLL */
 457         if (time_offset < 0) {
 458                 ltemp = (-(time_offset+1) >> (SHIFT_KG + time_constant)) + 1;
 459                 time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
 460                 time_offset += (time_adj * HZ) >> (SHIFT_SCALE - SHIFT_UPDATE);
 461                 time_adj = - time_adj;
 462         } else if (time_offset > 0) {
 463                 ltemp = ((time_offset-1) >> (SHIFT_KG + time_constant)) + 1;
 464                 time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
 465                 time_offset -= (time_adj * HZ) >> (SHIFT_SCALE - SHIFT_UPDATE);
 466         } else {
 467                 time_adj = 0;
 468         }
 469 
 470         time_adj += (time_freq >> (SHIFT_KF + SHIFT_HZ - SHIFT_SCALE))
 471             + FINETUNE;
 472 
 473         /* Handle the leap second stuff */
 474         switch (time_status) {
 475                 case TIME_INS:
 476                 /* ugly divide should be replaced */
 477                 if (xtime.tv_sec % 86400 == 0) {
 478                         xtime.tv_sec--; /* !! */
 479                         time_status = TIME_OOP;
 480                         printk("Clock: inserting leap second 23:59:60 GMT\n");
 481                 }
 482                 break;
 483 
 484                 case TIME_DEL:
 485                 /* ugly divide should be replaced */
 486                 if (xtime.tv_sec % 86400 == 86399) {
 487                         xtime.tv_sec++;
 488                         time_status = TIME_OK;
 489                         printk("Clock: deleting leap second 23:59:59 GMT\n");
 490                 }
 491                 break;
 492 
 493                 case TIME_OOP:
 494                 time_status = TIME_OK;
 495                 break;
 496         }
 497         if (xtime.tv_sec > last_rtc_update + 660)
 498           if (set_rtc_mmss(xtime.tv_sec) == 0)
 499             last_rtc_update = xtime.tv_sec;
 500           else
 501             last_rtc_update = xtime.tv_sec - 600; /* do it again in one min */
 502 }
 503 
 504 /*
 505  * disregard lost ticks for now.. We don't care enough.
 506  */
 507 static void timer_bh(void * unused)
     /* [previous][next][first][last][top][bottom][index][help] */
 508 {
 509         unsigned long mask;
 510         struct timer_struct *tp;
 511         struct timer_list * timer;
 512 
 513         cli();
 514         while ((timer = timer_head.next) != &timer_head && timer->expires < jiffies) {
 515                 void (*fn)(unsigned long) = timer->function;
 516                 unsigned long data = timer->data;
 517                 timer->next->prev = timer->prev;
 518                 timer->prev->next = timer->next;
 519                 timer->next = timer->prev = NULL;
 520                 sti();
 521                 fn(data);
 522                 cli();
 523         }
 524         sti();
 525         
 526         for (mask = 1, tp = timer_table+0 ; mask ; tp++,mask += mask) {
 527                 if (mask > timer_active)
 528                         break;
 529                 if (!(mask & timer_active))
 530                         continue;
 531                 if (tp->expires > jiffies)
 532                         continue;
 533                 timer_active &= ~mask;
 534                 tp->fn();
 535                 sti();
 536         }
 537 }
 538 
 539 void tqueue_bh(void * unused)
     /* [previous][next][first][last][top][bottom][index][help] */
 540 {
 541         run_task_queue(&tq_timer);
 542 }
 543 
 544 void immediate_bh(void * unused)
     /* [previous][next][first][last][top][bottom][index][help] */
 545 {
 546         run_task_queue(&tq_immediate);
 547 }
 548 
 549 /*
 550  * The int argument is really a (struct pt_regs *), in case the
 551  * interrupt wants to know from where it was called. The timer
 552  * irq uses this to decide if it should update the user or system
 553  * times.
 554  */
 555 static void do_timer(struct pt_regs * regs)
     /* [previous][next][first][last][top][bottom][index][help] */
 556 {
 557         unsigned long mask;
 558         struct timer_struct *tp;
 559 
 560         long ltemp, psecs;
 561 
 562         /* Advance the phase, once it gets to one microsecond, then
 563          * advance the tick more.
 564          */
 565         time_phase += time_adj;
 566         if (time_phase < -FINEUSEC) {
 567                 ltemp = -time_phase >> SHIFT_SCALE;
 568                 time_phase += ltemp << SHIFT_SCALE;
 569                 xtime.tv_usec += tick + time_adjust_step - ltemp;
 570         }
 571         else if (time_phase > FINEUSEC) {
 572                 ltemp = time_phase >> SHIFT_SCALE;
 573                 time_phase -= ltemp << SHIFT_SCALE;
 574                 xtime.tv_usec += tick + time_adjust_step + ltemp;
 575         } else
 576                 xtime.tv_usec += tick + time_adjust_step;
 577 
 578         if (time_adjust)
 579         {
 580             /* We are doing an adjtime thing. 
 581              *
 582              * Modify the value of the tick for next time.
 583              * Note that a positive delta means we want the clock
 584              * to run fast. This means that the tick should be bigger
 585              *
 586              * Limit the amount of the step for *next* tick to be
 587              * in the range -tickadj .. +tickadj
 588              */
 589              if (time_adjust > tickadj)
 590                time_adjust_step = tickadj;
 591              else if (time_adjust < -tickadj)
 592                time_adjust_step = -tickadj;
 593              else
 594                time_adjust_step = time_adjust;
 595              
 596             /* Reduce by this step the amount of time left  */
 597             time_adjust -= time_adjust_step;
 598         }
 599         else
 600             time_adjust_step = 0;
 601 
 602         if (xtime.tv_usec >= 1000000) {
 603             xtime.tv_usec -= 1000000;
 604             xtime.tv_sec++;
 605             second_overflow();
 606         }
 607 
 608         jiffies++;
 609         calc_load();
 610         if (user_mode(regs)) {
 611                 current->utime++;
 612                 if (current != task[0]) {
 613                         if (current->priority < 15)
 614                                 kstat.cpu_nice++;
 615                         else
 616                                 kstat.cpu_user++;
 617                 }
 618                 /* Update ITIMER_VIRT for current task if not in a system call */
 619                 if (current->it_virt_value && !(--current->it_virt_value)) {
 620                         current->it_virt_value = current->it_virt_incr;
 621                         send_sig(SIGVTALRM,current,1);
 622                 }
 623         } else {
 624                 current->stime++;
 625                 if(current != task[0])
 626                         kstat.cpu_system++;
 627 #ifdef CONFIG_PROFILE
 628                 if (prof_buffer && current != task[0]) {
 629                         unsigned long eip = regs->eip;
 630                         eip >>= CONFIG_PROFILE_SHIFT;
 631                         if (eip < prof_len)
 632                                 prof_buffer[eip]++;
 633                 }
 634 #endif
 635         }
 636         /*
 637          * check the cpu time limit on the process.
 638          */
 639         if ((current->rlim[RLIMIT_CPU].rlim_max != RLIM_INFINITY) &&
 640             (((current->stime + current->utime) / HZ) >= current->rlim[RLIMIT_CPU].rlim_max))
 641                 send_sig(SIGKILL, current, 1);
 642         if ((current->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) &&
 643             (((current->stime + current->utime) % HZ) == 0)) {
 644                 psecs = (current->stime + current->utime) / HZ;
 645                 /* send when equal */
 646                 if (psecs == current->rlim[RLIMIT_CPU].rlim_cur)
 647                         send_sig(SIGXCPU, current, 1);
 648                 /* and every five seconds thereafter. */
 649                 else if ((psecs > current->rlim[RLIMIT_CPU].rlim_cur) &&
 650                         ((psecs - current->rlim[RLIMIT_CPU].rlim_cur) % 5) == 0)
 651                         send_sig(SIGXCPU, current, 1);
 652         }
 653 
 654         if (current != task[0] && 0 > --current->counter) {
 655                 current->counter = 0;
 656                 need_resched = 1;
 657         }
 658         /* Update ITIMER_PROF for the current task */
 659         if (current->it_prof_value && !(--current->it_prof_value)) {
 660                 current->it_prof_value = current->it_prof_incr;
 661                 send_sig(SIGPROF,current,1);
 662         }
 663         for (mask = 1, tp = timer_table+0 ; mask ; tp++,mask += mask) {
 664                 if (mask > timer_active)
 665                         break;
 666                 if (!(mask & timer_active))
 667                         continue;
 668                 if (tp->expires > jiffies)
 669                         continue;
 670                 mark_bh(TIMER_BH);
 671         }
 672         cli();
 673         itimer_ticks++;
 674         if (itimer_ticks > itimer_next)
 675                 need_resched = 1;
 676         if (timer_head.next->expires < jiffies)
 677                 mark_bh(TIMER_BH);
 678         if (tq_timer != &tq_last)
 679                 mark_bh(TQUEUE_BH);
 680         sti();
 681 }
 682 
 683 asmlinkage int sys_alarm(long seconds)
     /* [previous][next][first][last][top][bottom][index][help] */
 684 {
 685         struct itimerval it_new, it_old;
 686 
 687         it_new.it_interval.tv_sec = it_new.it_interval.tv_usec = 0;
 688         it_new.it_value.tv_sec = seconds;
 689         it_new.it_value.tv_usec = 0;
 690         _setitimer(ITIMER_REAL, &it_new, &it_old);
 691         return(it_old.it_value.tv_sec + (it_old.it_value.tv_usec / 1000000));
 692 }
 693 
 694 asmlinkage int sys_getpid(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 695 {
 696         return current->pid;
 697 }
 698 
 699 asmlinkage int sys_getppid(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 700 {
 701         return current->p_opptr->pid;
 702 }
 703 
 704 asmlinkage int sys_getuid(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 705 {
 706         return current->uid;
 707 }
 708 
 709 asmlinkage int sys_geteuid(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 710 {
 711         return current->euid;
 712 }
 713 
 714 asmlinkage int sys_getgid(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 715 {
 716         return current->gid;
 717 }
 718 
 719 asmlinkage int sys_getegid(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 720 {
 721         return current->egid;
 722 }
 723 
 724 asmlinkage int sys_nice(long increment)
     /* [previous][next][first][last][top][bottom][index][help] */
 725 {
 726         int newprio;
 727 
 728         if (increment < 0 && !suser())
 729                 return -EPERM;
 730         newprio = current->priority - increment;
 731         if (newprio < 1)
 732                 newprio = 1;
 733         if (newprio > 35)
 734                 newprio = 35;
 735         current->priority = newprio;
 736         return 0;
 737 }
 738 
 739 static void show_task(int nr,struct task_struct * p)
     /* [previous][next][first][last][top][bottom][index][help] */
 740 {
 741         unsigned long free;
 742         static char * stat_nam[] = { "R", "S", "D", "Z", "T", "W" };
 743 
 744         printk("%-8s %3d ", p->comm, (p == current) ? -nr : nr);
 745         if (((unsigned) p->state) < sizeof(stat_nam)/sizeof(char *))
 746                 printk(stat_nam[p->state]);
 747         else
 748                 printk(" ");
 749 #ifdef __i386__
 750         if (p == current)
 751                 printk(" current  ");
 752         else
 753                 printk(" %08lX ", ((unsigned long *)p->tss.esp)[3]);
 754 #endif
 755         for (free = 1; free < 1024 ; free++) {
 756                 if (((unsigned long *)p->kernel_stack_page)[free])
 757                         break;
 758         }
 759         printk("%5lu %5d %6d ", free << 2, p->pid, p->p_pptr->pid);
 760         if (p->p_cptr)
 761                 printk("%5d ", p->p_cptr->pid);
 762         else
 763                 printk("      ");
 764         if (p->p_ysptr)
 765                 printk("%7d", p->p_ysptr->pid);
 766         else
 767                 printk("       ");
 768         if (p->p_osptr)
 769                 printk(" %5d\n", p->p_osptr->pid);
 770         else
 771                 printk("\n");
 772 }
 773 
 774 void show_state(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 775 {
 776         int i;
 777 
 778         printk("                         free                        sibling\n");
 779         printk("  task             PC    stack   pid father child younger older\n");
 780         for (i=0 ; i<NR_TASKS ; i++)
 781                 if (task[i])
 782                         show_task(i,task[i]);
 783 }
 784 
 785 void sched_init(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 786 {
 787         bh_base[TIMER_BH].routine = timer_bh;
 788         bh_base[TQUEUE_BH].routine = tqueue_bh;
 789         bh_base[IMMEDIATE_BH].routine = immediate_bh;
 790         if (sizeof(struct sigaction) != 16)
 791                 panic("Struct sigaction MUST be 16 bytes");
 792         outb_p(0x34,0x43);              /* binary, mode 2, LSB/MSB, ch 0 */
 793         outb_p(LATCH & 0xff , 0x40);    /* LSB */
 794         outb(LATCH >> 8 , 0x40);        /* MSB */
 795         if (request_irq(TIMER_IRQ,(void (*)(int)) do_timer, 0, "timer") != 0)
 796                 panic("Could not allocate timer IRQ!");
 797 }

/* [previous][next][first][last][top][bottom][index][help] */