root/kernel/sched.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. schedule
  2. sys_pause
  3. wake_up
  4. wake_up_interruptible
  5. __down
  6. __sleep_on
  7. interruptible_sleep_on
  8. sleep_on
  9. add_timer
  10. del_timer
  11. count_active_tasks
  12. calc_load
  13. second_overflow
  14. timer_bh
  15. tqueue_bh
  16. immediate_bh
  17. do_timer
  18. sys_alarm
  19. sys_getpid
  20. sys_getppid
  21. sys_getuid
  22. sys_geteuid
  23. sys_getgid
  24. sys_getegid
  25. sys_nice
  26. show_task
  27. show_state
  28. sched_init

   1 /*
   2  *  linux/kernel/sched.c
   3  *
   4  *  Copyright (C) 1991, 1992  Linus Torvalds
   5  */
   6 
   7 /*
   8  * 'sched.c' is the main kernel file. It contains scheduling primitives
   9  * (sleep_on, wakeup, schedule etc) as well as a number of simple system
  10  * call functions (type getpid(), which just extracts a field from
  11  * current-task
  12  */
  13 
  14 #include <linux/config.h>
  15 #include <linux/signal.h>
  16 #include <linux/sched.h>
  17 #include <linux/timer.h>
  18 #include <linux/kernel.h>
  19 #include <linux/kernel_stat.h>
  20 #include <linux/fdreg.h>
  21 #include <linux/errno.h>
  22 #include <linux/time.h>
  23 #include <linux/ptrace.h>
  24 #include <linux/delay.h>
  25 #include <linux/interrupt.h>
  26 #include <linux/tqueue.h>
  27 #include <linux/resource.h>
  28 #include <linux/mm.h>
  29 
  30 #include <asm/system.h>
  31 #include <asm/io.h>
  32 #include <asm/segment.h>
  33 #include <asm/pgtable.h>
  34 
  35 #define TIMER_IRQ 0
  36 
  37 #include <linux/timex.h>
  38 
  39 /*
  40  * kernel variables
  41  */
  42 long tick = 1000000 / HZ;               /* timer interrupt period */
  43 volatile struct timeval xtime;          /* The current time */
  44 int tickadj = 500/HZ;                   /* microsecs */
  45 
  46 DECLARE_TASK_QUEUE(tq_timer);
  47 DECLARE_TASK_QUEUE(tq_immediate);
  48 DECLARE_TASK_QUEUE(tq_scheduler);
  49 
  50 /*
  51  * phase-lock loop variables
  52  */
  53 int time_status = TIME_BAD;     /* clock synchronization status */
  54 long time_offset = 0;           /* time adjustment (us) */
  55 long time_constant = 0;         /* pll time constant */
  56 long time_tolerance = MAXFREQ;  /* frequency tolerance (ppm) */
  57 long time_precision = 1;        /* clock precision (us) */
  58 long time_maxerror = 0x70000000;/* maximum error */
  59 long time_esterror = 0x70000000;/* estimated error */
  60 long time_phase = 0;            /* phase offset (scaled us) */
  61 long time_freq = 0;             /* frequency offset (scaled ppm) */
  62 long time_adj = 0;              /* tick adjust (scaled 1 / HZ) */
  63 long time_reftime = 0;          /* time at last adjustment (s) */
  64 
  65 long time_adjust = 0;
  66 long time_adjust_step = 0;
  67 
  68 int need_resched = 0;
  69 unsigned long event = 0;
  70 
  71 extern int _setitimer(int, struct itimerval *, struct itimerval *);
  72 unsigned long * prof_buffer = NULL;
  73 unsigned long prof_len = 0;
  74 
  75 #define _S(nr) (1<<((nr)-1))
  76 
  77 extern void mem_use(void);
  78 
  79 extern int timer_interrupt(void);
  80  
  81 static unsigned long init_kernel_stack[1024] = { STACK_MAGIC, };
  82 unsigned long init_user_stack[1024] = { STACK_MAGIC, };
  83 static struct vm_area_struct init_mmap = INIT_MMAP;
  84 struct task_struct init_task = INIT_TASK;
  85 
  86 unsigned long volatile jiffies=0;
  87 
  88 struct task_struct *current = &init_task;
  89 struct task_struct *last_task_used_math = NULL;
  90 
  91 struct task_struct * task[NR_TASKS] = {&init_task, };
  92 
  93 struct kernel_stat kstat = { 0 };
  94 
  95 unsigned long itimer_ticks = 0;
  96 unsigned long itimer_next = ~0;
  97 
  98 /*
  99  *  'schedule()' is the scheduler function. It's a very simple and nice
 100  * scheduler: it's not perfect, but certainly works for most things.
 101  * The one thing you might take a look at is the signal-handler code here.
 102  *
 103  *   NOTE!!  Task 0 is the 'idle' task, which gets called when no other
 104  * tasks can run. It can not be killed, and it cannot sleep. The 'state'
 105  * information in task[0] is never used.
 106  *
 107  * The "confuse_gcc" goto is used only to get better assembly code..
 108  * Dijkstra probably hates me.
 109  */
 110 asmlinkage void schedule(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 111 {
 112         int c;
 113         struct task_struct * p;
 114         struct task_struct * next;
 115         unsigned long ticks;
 116 
 117 /* check alarm, wake up any interruptible tasks that have got a signal */
 118 
 119         if (intr_count) {
 120                 printk("Aiee: scheduling in interrupt\n");
 121                 intr_count = 0;
 122         }
 123         run_task_queue(&tq_scheduler);
 124         cli();
 125         ticks = itimer_ticks;
 126         itimer_ticks = 0;
 127         itimer_next = ~0;
 128         sti();
 129         need_resched = 0;
 130         nr_running = 0;
 131         p = &init_task;
 132         for (;;) {
 133                 if ((p = p->next_task) == &init_task)
 134                         goto confuse_gcc1;
 135                 if (ticks && p->it_real_value) {
 136                         if (p->it_real_value <= ticks) {
 137                                 send_sig(SIGALRM, p, 1);
 138                                 if (!p->it_real_incr) {
 139                                         p->it_real_value = 0;
 140                                         goto end_itimer;
 141                                 }
 142                                 do {
 143                                         p->it_real_value += p->it_real_incr;
 144                                 } while (p->it_real_value <= ticks);
 145                         }
 146                         p->it_real_value -= ticks;
 147                         if (p->it_real_value < itimer_next)
 148                                 itimer_next = p->it_real_value;
 149                 }
 150 end_itimer:
 151                 if (p->state != TASK_INTERRUPTIBLE)
 152                         continue;
 153                 if (p->signal & ~p->blocked) {
 154                         p->state = TASK_RUNNING;
 155                         continue;
 156                 }
 157                 if (p->timeout && p->timeout <= jiffies) {
 158                         p->timeout = 0;
 159                         p->state = TASK_RUNNING;
 160                 }
 161         }
 162 confuse_gcc1:
 163 
 164 /* this is the scheduler proper: */
 165 #if 0
 166         /* give processes that go to sleep a bit higher priority.. */
 167         /* This depends on the values for TASK_XXX */
 168         /* This gives smoother scheduling for some things, but */
 169         /* can be very unfair under some circumstances, so.. */
 170         if (TASK_UNINTERRUPTIBLE >= (unsigned) current->state &&
 171             current->counter < current->priority*2) {
 172                 ++current->counter;
 173         }
 174 #endif
 175         c = -1000;
 176         next = p = &init_task;
 177         for (;;) {
 178                 if ((p = p->next_task) == &init_task)
 179                         goto confuse_gcc2;
 180                 if (p->state == TASK_RUNNING) {
 181                         nr_running++;
 182                         if (p->counter > c)
 183                                 c = p->counter, next = p;
 184                 }
 185         }
 186 confuse_gcc2:
 187         if (!c) {
 188                 for_each_task(p)
 189                         p->counter = (p->counter >> 1) + p->priority;
 190         }
 191         if (current == next)
 192                 return;
 193         kstat.context_swtch++;
 194         switch_to(next);
 195 }
 196 
 197 asmlinkage int sys_pause(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 198 {
 199         current->state = TASK_INTERRUPTIBLE;
 200         schedule();
 201         return -ERESTARTNOHAND;
 202 }
 203 
 204 /*
 205  * wake_up doesn't wake up stopped processes - they have to be awakened
 206  * with signals or similar.
 207  *
 208  * Note that this doesn't need cli-sti pairs: interrupts may not change
 209  * the wait-queue structures directly, but only call wake_up() to wake
 210  * a process. The process itself must remove the queue once it has woken.
 211  */
 212 void wake_up(struct wait_queue **q)
     /* [previous][next][first][last][top][bottom][index][help] */
 213 {
 214         struct wait_queue *tmp;
 215         struct task_struct * p;
 216 
 217         if (!q || !(tmp = *q))
 218                 return;
 219         do {
 220                 if ((p = tmp->task) != NULL) {
 221                         if ((p->state == TASK_UNINTERRUPTIBLE) ||
 222                             (p->state == TASK_INTERRUPTIBLE)) {
 223                                 p->state = TASK_RUNNING;
 224                                 if (p->counter > current->counter + 3)
 225                                         need_resched = 1;
 226                         }
 227                 }
 228                 if (!tmp->next) {
 229                         printk("wait_queue is bad (eip = %p)\n",
 230                                 __builtin_return_address(0));
 231                         printk("        q = %p\n",q);
 232                         printk("       *q = %p\n",*q);
 233                         printk("      tmp = %p\n",tmp);
 234                         break;
 235                 }
 236                 tmp = tmp->next;
 237         } while (tmp != *q);
 238 }
 239 
 240 void wake_up_interruptible(struct wait_queue **q)
     /* [previous][next][first][last][top][bottom][index][help] */
 241 {
 242         struct wait_queue *tmp;
 243         struct task_struct * p;
 244 
 245         if (!q || !(tmp = *q))
 246                 return;
 247         do {
 248                 if ((p = tmp->task) != NULL) {
 249                         if (p->state == TASK_INTERRUPTIBLE) {
 250                                 p->state = TASK_RUNNING;
 251                                 if (p->counter > current->counter + 3)
 252                                         need_resched = 1;
 253                         }
 254                 }
 255                 if (!tmp->next) {
 256                         printk("wait_queue is bad (eip = %p)\n",
 257                                 __builtin_return_address(0));
 258                         printk("        q = %p\n",q);
 259                         printk("       *q = %p\n",*q);
 260                         printk("      tmp = %p\n",tmp);
 261                         break;
 262                 }
 263                 tmp = tmp->next;
 264         } while (tmp != *q);
 265 }
 266 
 267 void __down(struct semaphore * sem)
     /* [previous][next][first][last][top][bottom][index][help] */
 268 {
 269         struct wait_queue wait = { current, NULL };
 270         add_wait_queue(&sem->wait, &wait);
 271         current->state = TASK_UNINTERRUPTIBLE;
 272         while (sem->count <= 0) {
 273                 schedule();
 274                 current->state = TASK_UNINTERRUPTIBLE;
 275         }
 276         current->state = TASK_RUNNING;
 277         remove_wait_queue(&sem->wait, &wait);
 278 }
 279 
 280 static inline void __sleep_on(struct wait_queue **p, int state)
     /* [previous][next][first][last][top][bottom][index][help] */
 281 {
 282         unsigned long flags;
 283         struct wait_queue wait = { current, NULL };
 284 
 285         if (!p)
 286                 return;
 287         if (current == task[0])
 288                 panic("task[0] trying to sleep");
 289         current->state = state;
 290         add_wait_queue(p, &wait);
 291         save_flags(flags);
 292         sti();
 293         schedule();
 294         remove_wait_queue(p, &wait);
 295         restore_flags(flags);
 296 }
 297 
 298 void interruptible_sleep_on(struct wait_queue **p)
     /* [previous][next][first][last][top][bottom][index][help] */
 299 {
 300         __sleep_on(p,TASK_INTERRUPTIBLE);
 301 }
 302 
 303 void sleep_on(struct wait_queue **p)
     /* [previous][next][first][last][top][bottom][index][help] */
 304 {
 305         __sleep_on(p,TASK_UNINTERRUPTIBLE);
 306 }
 307 
 308 /*
 309  * The head for the timer-list has a "expires" field of MAX_UINT,
 310  * and the sorting routine counts on this..
 311  */
 312 static struct timer_list timer_head = { &timer_head, &timer_head, ~0, 0, NULL };
 313 #define SLOW_BUT_DEBUGGING_TIMERS 1
 314 
 315 void add_timer(struct timer_list * timer)
     /* [previous][next][first][last][top][bottom][index][help] */
 316 {
 317         unsigned long flags;
 318         struct timer_list *p;
 319 
 320 #if SLOW_BUT_DEBUGGING_TIMERS
 321         if (timer->next || timer->prev) {
 322                 printk("add_timer() called with non-zero list from %p\n",
 323                         __builtin_return_address(0));
 324                 return;
 325         }
 326 #endif
 327         p = &timer_head;
 328         timer->expires += jiffies;
 329         save_flags(flags);
 330         cli();
 331         do {
 332                 p = p->next;
 333         } while (timer->expires > p->expires);
 334         timer->next = p;
 335         timer->prev = p->prev;
 336         p->prev = timer;
 337         timer->prev->next = timer;
 338         restore_flags(flags);
 339 }
 340 
 341 int del_timer(struct timer_list * timer)
     /* [previous][next][first][last][top][bottom][index][help] */
 342 {
 343         unsigned long flags;
 344 #if SLOW_BUT_DEBUGGING_TIMERS
 345         struct timer_list * p;
 346 
 347         p = &timer_head;
 348         save_flags(flags);
 349         cli();
 350         while ((p = p->next) != &timer_head) {
 351                 if (p == timer) {
 352                         timer->next->prev = timer->prev;
 353                         timer->prev->next = timer->next;
 354                         timer->next = timer->prev = NULL;
 355                         restore_flags(flags);
 356                         timer->expires -= jiffies;
 357                         return 1;
 358                 }
 359         }
 360         if (timer->next || timer->prev)
 361                 printk("del_timer() called from %p with timer not initialized\n",
 362                         __builtin_return_address(0));
 363         restore_flags(flags);
 364         return 0;
 365 #else   
 366         save_flags(flags);
 367         cli();
 368         if (timer->next) {
 369                 timer->next->prev = timer->prev;
 370                 timer->prev->next = timer->next;
 371                 timer->next = timer->prev = NULL;
 372                 restore_flags(flags);
 373                 timer->expires -= jiffies;
 374                 return 1;
 375         }
 376         restore_flags(flags);
 377         return 0;
 378 #endif
 379 }
 380 
 381 unsigned long timer_active = 0;
 382 struct timer_struct timer_table[32];
 383 
 384 /*
 385  * Hmm.. Changed this, as the GNU make sources (load.c) seems to
 386  * imply that avenrun[] is the standard name for this kind of thing.
 387  * Nothing else seems to be standardized: the fractional size etc
 388  * all seem to differ on different machines.
 389  */
 390 unsigned long avenrun[3] = { 0,0,0 };
 391 
 392 /*
 393  * Nr of active tasks - counted in fixed-point numbers
 394  */
 395 static unsigned long count_active_tasks(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 396 {
 397         struct task_struct **p;
 398         unsigned long nr = 0;
 399 
 400         for(p = &LAST_TASK; p > &FIRST_TASK; --p)
 401                 if (*p && ((*p)->state == TASK_RUNNING ||
 402                            (*p)->state == TASK_UNINTERRUPTIBLE ||
 403                            (*p)->state == TASK_SWAPPING))
 404                         nr += FIXED_1;
 405         return nr;
 406 }
 407 
 408 static inline void calc_load(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 409 {
 410         unsigned long active_tasks; /* fixed-point */
 411         static int count = LOAD_FREQ;
 412 
 413         if (count-- > 0)
 414                 return;
 415         count = LOAD_FREQ;
 416         active_tasks = count_active_tasks();
 417         CALC_LOAD(avenrun[0], EXP_1, active_tasks);
 418         CALC_LOAD(avenrun[1], EXP_5, active_tasks);
 419         CALC_LOAD(avenrun[2], EXP_15, active_tasks);
 420 }
 421 
 422 /*
 423  * this routine handles the overflow of the microsecond field
 424  *
 425  * The tricky bits of code to handle the accurate clock support
 426  * were provided by Dave Mills (Mills@UDEL.EDU) of NTP fame.
 427  * They were originally developed for SUN and DEC kernels.
 428  * All the kudos should go to Dave for this stuff.
 429  *
 430  * These were ported to Linux by Philip Gladstone.
 431  */
 432 static void second_overflow(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 433 {
 434         long ltemp;
 435 
 436         /* Bump the maxerror field */
 437         time_maxerror = (0x70000000-time_maxerror < time_tolerance) ?
 438           0x70000000 : (time_maxerror + time_tolerance);
 439 
 440         /* Run the PLL */
 441         if (time_offset < 0) {
 442                 ltemp = (-(time_offset+1) >> (SHIFT_KG + time_constant)) + 1;
 443                 time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
 444                 time_offset += (time_adj * HZ) >> (SHIFT_SCALE - SHIFT_UPDATE);
 445                 time_adj = - time_adj;
 446         } else if (time_offset > 0) {
 447                 ltemp = ((time_offset-1) >> (SHIFT_KG + time_constant)) + 1;
 448                 time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
 449                 time_offset -= (time_adj * HZ) >> (SHIFT_SCALE - SHIFT_UPDATE);
 450         } else {
 451                 time_adj = 0;
 452         }
 453 
 454         time_adj += (time_freq >> (SHIFT_KF + SHIFT_HZ - SHIFT_SCALE))
 455             + FINETUNE;
 456 
 457         /* Handle the leap second stuff */
 458         switch (time_status) {
 459                 case TIME_INS:
 460                 /* ugly divide should be replaced */
 461                 if (xtime.tv_sec % 86400 == 0) {
 462                         xtime.tv_sec--; /* !! */
 463                         time_status = TIME_OOP;
 464                         printk("Clock: inserting leap second 23:59:60 UTC\n");
 465                 }
 466                 break;
 467 
 468                 case TIME_DEL:
 469                 /* ugly divide should be replaced */
 470                 if (xtime.tv_sec % 86400 == 86399) {
 471                         xtime.tv_sec++;
 472                         time_status = TIME_OK;
 473                         printk("Clock: deleting leap second 23:59:59 UTC\n");
 474                 }
 475                 break;
 476 
 477                 case TIME_OOP:
 478                 time_status = TIME_OK;
 479                 break;
 480         }
 481 }
 482 
 483 /*
 484  * disregard lost ticks for now.. We don't care enough.
 485  */
 486 static void timer_bh(void * unused)
     /* [previous][next][first][last][top][bottom][index][help] */
 487 {
 488         unsigned long mask;
 489         struct timer_struct *tp;
 490         struct timer_list * timer;
 491 
 492         cli();
 493         while ((timer = timer_head.next) != &timer_head && timer->expires < jiffies) {
 494                 void (*fn)(unsigned long) = timer->function;
 495                 unsigned long data = timer->data;
 496                 timer->next->prev = timer->prev;
 497                 timer->prev->next = timer->next;
 498                 timer->next = timer->prev = NULL;
 499                 sti();
 500                 fn(data);
 501                 cli();
 502         }
 503         sti();
 504         
 505         for (mask = 1, tp = timer_table+0 ; mask ; tp++,mask += mask) {
 506                 if (mask > timer_active)
 507                         break;
 508                 if (!(mask & timer_active))
 509                         continue;
 510                 if (tp->expires > jiffies)
 511                         continue;
 512                 timer_active &= ~mask;
 513                 tp->fn();
 514                 sti();
 515         }
 516 }
 517 
 518 void tqueue_bh(void * unused)
     /* [previous][next][first][last][top][bottom][index][help] */
 519 {
 520         run_task_queue(&tq_timer);
 521 }
 522 
 523 void immediate_bh(void * unused)
     /* [previous][next][first][last][top][bottom][index][help] */
 524 {
 525         run_task_queue(&tq_immediate);
 526 }
 527 
 528 /*
 529  * The int argument is really a (struct pt_regs *), in case the
 530  * interrupt wants to know from where it was called. The timer
 531  * irq uses this to decide if it should update the user or system
 532  * times.
 533  */
 534 static void do_timer(int irq, struct pt_regs * regs)
     /* [previous][next][first][last][top][bottom][index][help] */
 535 {
 536         unsigned long mask;
 537         struct timer_struct *tp;
 538         /* last time the cmos clock got updated */
 539         static long last_rtc_update=0;
 540         extern int set_rtc_mmss(unsigned long);
 541 
 542         long ltemp, psecs;
 543 
 544         /* Advance the phase, once it gets to one microsecond, then
 545          * advance the tick more.
 546          */
 547         time_phase += time_adj;
 548         if (time_phase < -FINEUSEC) {
 549                 ltemp = -time_phase >> SHIFT_SCALE;
 550                 time_phase += ltemp << SHIFT_SCALE;
 551                 xtime.tv_usec += tick + time_adjust_step - ltemp;
 552         }
 553         else if (time_phase > FINEUSEC) {
 554                 ltemp = time_phase >> SHIFT_SCALE;
 555                 time_phase -= ltemp << SHIFT_SCALE;
 556                 xtime.tv_usec += tick + time_adjust_step + ltemp;
 557         } else
 558                 xtime.tv_usec += tick + time_adjust_step;
 559 
 560         if (time_adjust)
 561         {
 562             /* We are doing an adjtime thing. 
 563              *
 564              * Modify the value of the tick for next time.
 565              * Note that a positive delta means we want the clock
 566              * to run fast. This means that the tick should be bigger
 567              *
 568              * Limit the amount of the step for *next* tick to be
 569              * in the range -tickadj .. +tickadj
 570              */
 571              if (time_adjust > tickadj)
 572                time_adjust_step = tickadj;
 573              else if (time_adjust < -tickadj)
 574                time_adjust_step = -tickadj;
 575              else
 576                time_adjust_step = time_adjust;
 577              
 578             /* Reduce by this step the amount of time left  */
 579             time_adjust -= time_adjust_step;
 580         }
 581         else
 582             time_adjust_step = 0;
 583 
 584         if (xtime.tv_usec >= 1000000) {
 585             xtime.tv_usec -= 1000000;
 586             xtime.tv_sec++;
 587             second_overflow();
 588         }
 589 
 590         /* If we have an externally synchronized Linux clock, then update
 591          * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
 592          * called as close as possible to 500 ms before the new second starts.
 593          */
 594         if (time_status != TIME_BAD && xtime.tv_sec > last_rtc_update + 660 &&
 595             xtime.tv_usec > 500000 - (tick >> 1) &&
 596             xtime.tv_usec < 500000 + (tick >> 1))
 597           if (set_rtc_mmss(xtime.tv_sec) == 0)
 598             last_rtc_update = xtime.tv_sec;
 599           else
 600             last_rtc_update = xtime.tv_sec - 600; /* do it again in 60 s */
 601 
 602         jiffies++;
 603         calc_load();
 604         if (user_mode(regs)) {
 605                 current->utime++;
 606                 if (current != task[0]) {
 607                         if (current->priority < 15)
 608                                 kstat.cpu_nice++;
 609                         else
 610                                 kstat.cpu_user++;
 611                 }
 612                 /* Update ITIMER_VIRT for current task if not in a system call */
 613                 if (current->it_virt_value && !(--current->it_virt_value)) {
 614                         current->it_virt_value = current->it_virt_incr;
 615                         send_sig(SIGVTALRM,current,1);
 616                 }
 617         } else {
 618                 current->stime++;
 619                 if(current != task[0])
 620                         kstat.cpu_system++;
 621 #ifdef CONFIG_PROFILE
 622                 if (prof_buffer && current != task[0]) {
 623                         unsigned long eip = regs->eip;
 624                         eip >>= CONFIG_PROFILE_SHIFT;
 625                         if (eip < prof_len)
 626                                 prof_buffer[eip]++;
 627                 }
 628 #endif
 629         }
 630         /*
 631          * check the cpu time limit on the process.
 632          */
 633         if ((current->rlim[RLIMIT_CPU].rlim_max != RLIM_INFINITY) &&
 634             (((current->stime + current->utime) / HZ) >= current->rlim[RLIMIT_CPU].rlim_max))
 635                 send_sig(SIGKILL, current, 1);
 636         if ((current->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) &&
 637             (((current->stime + current->utime) % HZ) == 0)) {
 638                 psecs = (current->stime + current->utime) / HZ;
 639                 /* send when equal */
 640                 if (psecs == current->rlim[RLIMIT_CPU].rlim_cur)
 641                         send_sig(SIGXCPU, current, 1);
 642                 /* and every five seconds thereafter. */
 643                 else if ((psecs > current->rlim[RLIMIT_CPU].rlim_cur) &&
 644                         ((psecs - current->rlim[RLIMIT_CPU].rlim_cur) % 5) == 0)
 645                         send_sig(SIGXCPU, current, 1);
 646         }
 647 
 648         if (current != task[0] && 0 > --current->counter) {
 649                 current->counter = 0;
 650                 need_resched = 1;
 651         }
 652         /* Update ITIMER_PROF for the current task */
 653         if (current->it_prof_value && !(--current->it_prof_value)) {
 654                 current->it_prof_value = current->it_prof_incr;
 655                 send_sig(SIGPROF,current,1);
 656         }
 657         for (mask = 1, tp = timer_table+0 ; mask ; tp++,mask += mask) {
 658                 if (mask > timer_active)
 659                         break;
 660                 if (!(mask & timer_active))
 661                         continue;
 662                 if (tp->expires > jiffies)
 663                         continue;
 664                 mark_bh(TIMER_BH);
 665         }
 666         cli();
 667         itimer_ticks++;
 668         if (itimer_ticks > itimer_next)
 669                 need_resched = 1;
 670         if (timer_head.next->expires < jiffies)
 671                 mark_bh(TIMER_BH);
 672         if (tq_timer != &tq_last)
 673                 mark_bh(TQUEUE_BH);
 674         sti();
 675 }
 676 
 677 asmlinkage int sys_alarm(long seconds)
     /* [previous][next][first][last][top][bottom][index][help] */
 678 {
 679         struct itimerval it_new, it_old;
 680 
 681         it_new.it_interval.tv_sec = it_new.it_interval.tv_usec = 0;
 682         it_new.it_value.tv_sec = seconds;
 683         it_new.it_value.tv_usec = 0;
 684         _setitimer(ITIMER_REAL, &it_new, &it_old);
 685         return(it_old.it_value.tv_sec + (it_old.it_value.tv_usec / 1000000));
 686 }
 687 
 688 asmlinkage int sys_getpid(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 689 {
 690         return current->pid;
 691 }
 692 
 693 asmlinkage int sys_getppid(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 694 {
 695         return current->p_opptr->pid;
 696 }
 697 
 698 asmlinkage int sys_getuid(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 699 {
 700         return current->uid;
 701 }
 702 
 703 asmlinkage int sys_geteuid(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 704 {
 705         return current->euid;
 706 }
 707 
 708 asmlinkage int sys_getgid(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 709 {
 710         return current->gid;
 711 }
 712 
 713 asmlinkage int sys_getegid(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 714 {
 715         return current->egid;
 716 }
 717 
 718 asmlinkage int sys_nice(long increment)
     /* [previous][next][first][last][top][bottom][index][help] */
 719 {
 720         int newprio;
 721 
 722         if (increment < 0 && !suser())
 723                 return -EPERM;
 724         newprio = current->priority - increment;
 725         if (newprio < 1)
 726                 newprio = 1;
 727         if (newprio > 35)
 728                 newprio = 35;
 729         current->priority = newprio;
 730         return 0;
 731 }
 732 
 733 static void show_task(int nr,struct task_struct * p)
     /* [previous][next][first][last][top][bottom][index][help] */
 734 {
 735         unsigned long free;
 736         static char * stat_nam[] = { "R", "S", "D", "Z", "T", "W" };
 737 
 738         printk("%-8s %3d ", p->comm, (p == current) ? -nr : nr);
 739         if (((unsigned) p->state) < sizeof(stat_nam)/sizeof(char *))
 740                 printk(stat_nam[p->state]);
 741         else
 742                 printk(" ");
 743 #ifdef __i386__
 744         if (p == current)
 745                 printk(" current  ");
 746         else
 747                 printk(" %08lX ", ((unsigned long *)p->tss.esp)[3]);
 748 #endif
 749         for (free = 1; free < 1024 ; free++) {
 750                 if (((unsigned long *)p->kernel_stack_page)[free])
 751                         break;
 752         }
 753         printk("%5lu %5d %6d ", free << 2, p->pid, p->p_pptr->pid);
 754         if (p->p_cptr)
 755                 printk("%5d ", p->p_cptr->pid);
 756         else
 757                 printk("      ");
 758         if (p->p_ysptr)
 759                 printk("%7d", p->p_ysptr->pid);
 760         else
 761                 printk("       ");
 762         if (p->p_osptr)
 763                 printk(" %5d\n", p->p_osptr->pid);
 764         else
 765                 printk("\n");
 766 }
 767 
 768 void show_state(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 769 {
 770         int i;
 771 
 772         printk("                         free                        sibling\n");
 773         printk("  task             PC    stack   pid father child younger older\n");
 774         for (i=0 ; i<NR_TASKS ; i++)
 775                 if (task[i])
 776                         show_task(i,task[i]);
 777 }
 778 
 779 void sched_init(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 780 {
 781         bh_base[TIMER_BH].routine = timer_bh;
 782         bh_base[TQUEUE_BH].routine = tqueue_bh;
 783         bh_base[IMMEDIATE_BH].routine = immediate_bh;
 784         if (request_irq(TIMER_IRQ, do_timer, 0, "timer") != 0)
 785                 panic("Could not allocate timer IRQ!");
 786         enable_bh(TIMER_BH);
 787         enable_bh(TQUEUE_BH);
 788         enable_bh(IMMEDIATE_BH);
 789 }

/* [previous][next][first][last][top][bottom][index][help] */