root/kernel/sched.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. add_to_runqueue
  2. del_from_runqueue
  3. wake_up_process
  4. process_timeout
  5. schedule
  6. sys_pause
  7. wake_up
  8. wake_up_interruptible
  9. __down
  10. __sleep_on
  11. interruptible_sleep_on
  12. sleep_on
  13. add_timer
  14. del_timer
  15. count_active_tasks
  16. calc_load
  17. second_overflow
  18. timer_bh
  19. tqueue_bh
  20. immediate_bh
  21. do_timer
  22. sys_alarm
  23. sys_getpid
  24. sys_getppid
  25. sys_getuid
  26. sys_geteuid
  27. sys_getgid
  28. sys_getegid
  29. sys_nice
  30. show_task
  31. show_state
  32. sched_init

   1 /*
   2  *  linux/kernel/sched.c
   3  *
   4  *  Copyright (C) 1991, 1992  Linus Torvalds
   5  */
   6 
   7 /*
   8  * 'sched.c' is the main kernel file. It contains scheduling primitives
   9  * (sleep_on, wakeup, schedule etc) as well as a number of simple system
  10  * call functions (type getpid(), which just extracts a field from
  11  * current-task
  12  */
  13 
  14 #include <linux/config.h>
  15 #include <linux/signal.h>
  16 #include <linux/sched.h>
  17 #include <linux/timer.h>
  18 #include <linux/kernel.h>
  19 #include <linux/kernel_stat.h>
  20 #include <linux/fdreg.h>
  21 #include <linux/errno.h>
  22 #include <linux/time.h>
  23 #include <linux/ptrace.h>
  24 #include <linux/delay.h>
  25 #include <linux/interrupt.h>
  26 #include <linux/tqueue.h>
  27 #include <linux/resource.h>
  28 #include <linux/mm.h>
  29 
  30 #include <asm/system.h>
  31 #include <asm/io.h>
  32 #include <asm/segment.h>
  33 #include <asm/pgtable.h>
  34 
  35 #define TIMER_IRQ 0
  36 
  37 #include <linux/timex.h>
  38 
  39 /*
  40  * kernel variables
  41  */
  42 long tick = 1000000 / HZ;               /* timer interrupt period */
  43 volatile struct timeval xtime;          /* The current time */
  44 int tickadj = 500/HZ;                   /* microsecs */
  45 
  46 DECLARE_TASK_QUEUE(tq_timer);
  47 DECLARE_TASK_QUEUE(tq_immediate);
  48 DECLARE_TASK_QUEUE(tq_scheduler);
  49 
  50 /*
  51  * phase-lock loop variables
  52  */
  53 int time_status = TIME_BAD;     /* clock synchronization status */
  54 long time_offset = 0;           /* time adjustment (us) */
  55 long time_constant = 0;         /* pll time constant */
  56 long time_tolerance = MAXFREQ;  /* frequency tolerance (ppm) */
  57 long time_precision = 1;        /* clock precision (us) */
  58 long time_maxerror = 0x70000000;/* maximum error */
  59 long time_esterror = 0x70000000;/* estimated error */
  60 long time_phase = 0;            /* phase offset (scaled us) */
  61 long time_freq = 0;             /* frequency offset (scaled ppm) */
  62 long time_adj = 0;              /* tick adjust (scaled 1 / HZ) */
  63 long time_reftime = 0;          /* time at last adjustment (s) */
  64 
  65 long time_adjust = 0;
  66 long time_adjust_step = 0;
  67 
  68 int need_resched = 0;
  69 unsigned long event = 0;
  70 
  71 extern int _setitimer(int, struct itimerval *, struct itimerval *);
  72 unsigned long * prof_buffer = NULL;
  73 unsigned long prof_len = 0;
  74 unsigned long prof_shift = 0;
  75 
  76 #define _S(nr) (1<<((nr)-1))
  77 
  78 extern void mem_use(void);
  79 
  80 extern int timer_interrupt(void);
  81  
  82 static unsigned long init_kernel_stack[1024] = { STACK_MAGIC, };
  83 unsigned long init_user_stack[1024] = { STACK_MAGIC, };
  84 static struct vm_area_struct init_mmap = INIT_MMAP;
  85 static struct fs_struct init_fs = INIT_FS;
  86 static struct files_struct init_files = INIT_FILES;
  87 static struct sigaction init_sigaction[32] = { {0,}, };
  88 
  89 struct mm_struct init_mm = INIT_MM;
  90 struct task_struct init_task = INIT_TASK;
  91 
  92 unsigned long volatile jiffies=0;
  93 
  94 struct task_struct *current = &init_task;
  95 struct task_struct *last_task_used_math = NULL;
  96 
  97 struct task_struct * task[NR_TASKS] = {&init_task, };
  98 
  99 struct kernel_stat kstat = { 0 };
 100 
 101 static inline void add_to_runqueue(struct task_struct * p)
     /* [previous][next][first][last][top][bottom][index][help] */
 102 {
 103 #if 1   /* sanity tests */
 104         if (p->next_run || p->prev_run) {
 105                 printk("task already on run-queue\n");
 106                 return;
 107         }
 108 #endif
 109         if (p->counter > current->counter + 3)
 110                 need_resched = 1;
 111         nr_running++;
 112         (p->next_run = init_task.next_run)->prev_run = p;
 113         p->prev_run = &init_task;
 114         init_task.next_run = p;
 115 }
 116 
 117 static inline void del_from_runqueue(struct task_struct * p)
     /* [previous][next][first][last][top][bottom][index][help] */
 118 {
 119         struct task_struct *next = p->next_run;
 120         struct task_struct *prev = p->prev_run;
 121 
 122 #if 1   /* sanity tests */
 123         if (!next || !prev) {
 124                 printk("task not on run-queue\n");
 125                 return;
 126         }
 127 #endif
 128         if (p == &init_task) {
 129                 static int nr = 0;
 130                 if (nr < 5) {
 131                         nr++;
 132                         printk("idle task may not sleep\n");
 133                 }
 134                 return;
 135         }
 136         nr_running--;
 137         next->prev_run = prev;
 138         prev->next_run = next;
 139         p->next_run = NULL;
 140         p->prev_run = NULL;
 141 }
 142 
 143 /*
 144  * Wake up a process. Put it on the run-queue if it's not
 145  * already there.  The "current" process is always on the
 146  * run-queue (except when the actual re-schedule is in
 147  * progress), and as such you're allowed to do the simpler
 148  * "current->state = TASK_RUNNING" to mark yourself runnable
 149  * without the overhead of this.
 150  */
 151 inline void wake_up_process(struct task_struct * p)
     /* [previous][next][first][last][top][bottom][index][help] */
 152 {
 153         unsigned long flags;
 154 
 155         save_flags(flags);
 156         cli();
 157         p->state = TASK_RUNNING;
 158         if (!p->next_run)
 159                 add_to_runqueue(p);
 160         restore_flags(flags);
 161 }
 162 
 163 static void process_timeout(unsigned long __data)
     /* [previous][next][first][last][top][bottom][index][help] */
 164 {
 165         struct task_struct * p = (struct task_struct *) __data;
 166 
 167         p->timeout = 0;
 168         wake_up_process(p);
 169 }
 170 
 171 /*
 172  *  'schedule()' is the scheduler function. It's a very simple and nice
 173  * scheduler: it's not perfect, but certainly works for most things.
 174  *
 175  * The goto is "interesting".
 176  *
 177  *   NOTE!!  Task 0 is the 'idle' task, which gets called when no other
 178  * tasks can run. It can not be killed, and it cannot sleep. The 'state'
 179  * information in task[0] is never used.
 180  */
 181 asmlinkage void schedule(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 182 {
 183         int c;
 184         struct task_struct * p;
 185         struct task_struct * next;
 186         unsigned long timeout = 0;
 187 
 188 /* check alarm, wake up any interruptible tasks that have got a signal */
 189 
 190         if (intr_count) {
 191                 printk("Aiee: scheduling in interrupt\n");
 192                 return;
 193         }
 194         run_task_queue(&tq_scheduler);
 195 
 196         need_resched = 0;
 197         cli();
 198         switch (current->state) {
 199                 case TASK_INTERRUPTIBLE:
 200                         if (current->signal & ~current->blocked)
 201                                 goto makerunnable;
 202                         timeout = current->timeout;
 203                         if (timeout && (timeout <= jiffies)) {
 204                                 current->timeout = 0;
 205                                 timeout = 0;
 206                 makerunnable:
 207                                 current->state = TASK_RUNNING;
 208                                 break;
 209                         }
 210                 default:
 211                         del_from_runqueue(current);
 212                 case TASK_RUNNING:
 213         }
 214         p = init_task.next_run;
 215         sti();
 216 
 217 /*
 218  * Note! there may appear new tasks on the run-queue during this, as
 219  * interrupts are enabled. However, they will be put on front of the
 220  * list, so our list starting at "p" is essentially fixed.
 221  */
 222 /* this is the scheduler proper: */
 223         c = -1000;
 224         next = &init_task;
 225         while (p != &init_task) {
 226                 if (p->counter > c)
 227                         c = p->counter, next = p;
 228                 p = p->next_run;
 229         }
 230 
 231         /* if all runnable processes have "counter == 0", re-calculate counters */
 232         if (!c) {
 233                 for_each_task(p)
 234                         p->counter = (p->counter >> 1) + p->priority;
 235         }
 236         if (current != next) {
 237                 struct timer_list timer;
 238 
 239                 kstat.context_swtch++;
 240                 if (timeout) {
 241                         init_timer(&timer);
 242                         timer.expires = timeout;
 243                         timer.data = (unsigned long) current;
 244                         timer.function = process_timeout;
 245                         add_timer(&timer);
 246                 }
 247                 switch_to(next);
 248                 if (timeout)
 249                         del_timer(&timer);
 250         }
 251 }
 252 
 253 asmlinkage int sys_pause(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 254 {
 255         current->state = TASK_INTERRUPTIBLE;
 256         schedule();
 257         return -ERESTARTNOHAND;
 258 }
 259 
 260 /*
 261  * wake_up doesn't wake up stopped processes - they have to be awakened
 262  * with signals or similar.
 263  *
 264  * Note that this doesn't need cli-sti pairs: interrupts may not change
 265  * the wait-queue structures directly, but only call wake_up() to wake
 266  * a process. The process itself must remove the queue once it has woken.
 267  */
 268 void wake_up(struct wait_queue **q)
     /* [previous][next][first][last][top][bottom][index][help] */
 269 {
 270         struct wait_queue *tmp;
 271         struct task_struct * p;
 272 
 273         if (!q || !(tmp = *q))
 274                 return;
 275         do {
 276                 if ((p = tmp->task) != NULL) {
 277                         if ((p->state == TASK_UNINTERRUPTIBLE) ||
 278                             (p->state == TASK_INTERRUPTIBLE))
 279                                 wake_up_process(p);
 280                 }
 281                 if (!tmp->next) {
 282                         printk("wait_queue is bad (eip = %p)\n",
 283                                 __builtin_return_address(0));
 284                         printk("        q = %p\n",q);
 285                         printk("       *q = %p\n",*q);
 286                         printk("      tmp = %p\n",tmp);
 287                         break;
 288                 }
 289                 tmp = tmp->next;
 290         } while (tmp != *q);
 291 }
 292 
 293 void wake_up_interruptible(struct wait_queue **q)
     /* [previous][next][first][last][top][bottom][index][help] */
 294 {
 295         struct wait_queue *tmp;
 296         struct task_struct * p;
 297 
 298         if (!q || !(tmp = *q))
 299                 return;
 300         do {
 301                 if ((p = tmp->task) != NULL) {
 302                         if (p->state == TASK_INTERRUPTIBLE)
 303                                 wake_up_process(p);
 304                 }
 305                 if (!tmp->next) {
 306                         printk("wait_queue is bad (eip = %p)\n",
 307                                 __builtin_return_address(0));
 308                         printk("        q = %p\n",q);
 309                         printk("       *q = %p\n",*q);
 310                         printk("      tmp = %p\n",tmp);
 311                         break;
 312                 }
 313                 tmp = tmp->next;
 314         } while (tmp != *q);
 315 }
 316 
 317 void __down(struct semaphore * sem)
     /* [previous][next][first][last][top][bottom][index][help] */
 318 {
 319         struct wait_queue wait = { current, NULL };
 320         add_wait_queue(&sem->wait, &wait);
 321         current->state = TASK_UNINTERRUPTIBLE;
 322         while (sem->count <= 0) {
 323                 schedule();
 324                 current->state = TASK_UNINTERRUPTIBLE;
 325         }
 326         current->state = TASK_RUNNING;
 327         remove_wait_queue(&sem->wait, &wait);
 328 }
 329 
 330 static inline void __sleep_on(struct wait_queue **p, int state)
     /* [previous][next][first][last][top][bottom][index][help] */
 331 {
 332         unsigned long flags;
 333         struct wait_queue wait = { current, NULL };
 334 
 335         if (!p)
 336                 return;
 337         if (current == task[0])
 338                 panic("task[0] trying to sleep");
 339         current->state = state;
 340         add_wait_queue(p, &wait);
 341         save_flags(flags);
 342         sti();
 343         schedule();
 344         remove_wait_queue(p, &wait);
 345         restore_flags(flags);
 346 }
 347 
 348 void interruptible_sleep_on(struct wait_queue **p)
     /* [previous][next][first][last][top][bottom][index][help] */
 349 {
 350         __sleep_on(p,TASK_INTERRUPTIBLE);
 351 }
 352 
 353 void sleep_on(struct wait_queue **p)
     /* [previous][next][first][last][top][bottom][index][help] */
 354 {
 355         __sleep_on(p,TASK_UNINTERRUPTIBLE);
 356 }
 357 
 358 /*
 359  * The head for the timer-list has a "expires" field of MAX_UINT,
 360  * and the sorting routine counts on this..
 361  */
 362 static struct timer_list timer_head = { &timer_head, &timer_head, ~0, 0, NULL };
 363 #define SLOW_BUT_DEBUGGING_TIMERS 1
 364 
 365 void add_timer(struct timer_list * timer)
     /* [previous][next][first][last][top][bottom][index][help] */
 366 {
 367         unsigned long flags;
 368         struct timer_list *p;
 369 
 370 #if SLOW_BUT_DEBUGGING_TIMERS
 371         if (timer->next || timer->prev) {
 372                 printk("add_timer() called with non-zero list from %p\n",
 373                         __builtin_return_address(0));
 374                 return;
 375         }
 376 #endif
 377         p = &timer_head;
 378         save_flags(flags);
 379         cli();
 380         do {
 381                 p = p->next;
 382         } while (timer->expires > p->expires);
 383         timer->next = p;
 384         timer->prev = p->prev;
 385         p->prev = timer;
 386         timer->prev->next = timer;
 387         restore_flags(flags);
 388 }
 389 
 390 int del_timer(struct timer_list * timer)
     /* [previous][next][first][last][top][bottom][index][help] */
 391 {
 392         unsigned long flags;
 393 #if SLOW_BUT_DEBUGGING_TIMERS
 394         struct timer_list * p;
 395 
 396         p = &timer_head;
 397         save_flags(flags);
 398         cli();
 399         while ((p = p->next) != &timer_head) {
 400                 if (p == timer) {
 401                         timer->next->prev = timer->prev;
 402                         timer->prev->next = timer->next;
 403                         timer->next = timer->prev = NULL;
 404                         restore_flags(flags);
 405                         return 1;
 406                 }
 407         }
 408         if (timer->next || timer->prev)
 409                 printk("del_timer() called from %p with timer not initialized\n",
 410                         __builtin_return_address(0));
 411         restore_flags(flags);
 412         return 0;
 413 #else   
 414         save_flags(flags);
 415         cli();
 416         if (timer->next) {
 417                 timer->next->prev = timer->prev;
 418                 timer->prev->next = timer->next;
 419                 timer->next = timer->prev = NULL;
 420                 restore_flags(flags);
 421                 return 1;
 422         }
 423         restore_flags(flags);
 424         return 0;
 425 #endif
 426 }
 427 
 428 unsigned long timer_active = 0;
 429 struct timer_struct timer_table[32];
 430 
 431 /*
 432  * Hmm.. Changed this, as the GNU make sources (load.c) seems to
 433  * imply that avenrun[] is the standard name for this kind of thing.
 434  * Nothing else seems to be standardized: the fractional size etc
 435  * all seem to differ on different machines.
 436  */
 437 unsigned long avenrun[3] = { 0,0,0 };
 438 
 439 /*
 440  * Nr of active tasks - counted in fixed-point numbers
 441  */
 442 static unsigned long count_active_tasks(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 443 {
 444         struct task_struct **p;
 445         unsigned long nr = 0;
 446 
 447         for(p = &LAST_TASK; p > &FIRST_TASK; --p)
 448                 if (*p && ((*p)->state == TASK_RUNNING ||
 449                            (*p)->state == TASK_UNINTERRUPTIBLE ||
 450                            (*p)->state == TASK_SWAPPING))
 451                         nr += FIXED_1;
 452         return nr;
 453 }
 454 
 455 static inline void calc_load(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 456 {
 457         unsigned long active_tasks; /* fixed-point */
 458         static int count = LOAD_FREQ;
 459 
 460         if (count-- > 0)
 461                 return;
 462         count = LOAD_FREQ;
 463         active_tasks = count_active_tasks();
 464         CALC_LOAD(avenrun[0], EXP_1, active_tasks);
 465         CALC_LOAD(avenrun[1], EXP_5, active_tasks);
 466         CALC_LOAD(avenrun[2], EXP_15, active_tasks);
 467 }
 468 
 469 /*
 470  * this routine handles the overflow of the microsecond field
 471  *
 472  * The tricky bits of code to handle the accurate clock support
 473  * were provided by Dave Mills (Mills@UDEL.EDU) of NTP fame.
 474  * They were originally developed for SUN and DEC kernels.
 475  * All the kudos should go to Dave for this stuff.
 476  *
 477  * These were ported to Linux by Philip Gladstone.
 478  */
 479 static void second_overflow(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 480 {
 481         long ltemp;
 482 
 483         /* Bump the maxerror field */
 484         time_maxerror = (0x70000000-time_maxerror < time_tolerance) ?
 485           0x70000000 : (time_maxerror + time_tolerance);
 486 
 487         /* Run the PLL */
 488         if (time_offset < 0) {
 489                 ltemp = (-(time_offset+1) >> (SHIFT_KG + time_constant)) + 1;
 490                 time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
 491                 time_offset += (time_adj * HZ) >> (SHIFT_SCALE - SHIFT_UPDATE);
 492                 time_adj = - time_adj;
 493         } else if (time_offset > 0) {
 494                 ltemp = ((time_offset-1) >> (SHIFT_KG + time_constant)) + 1;
 495                 time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
 496                 time_offset -= (time_adj * HZ) >> (SHIFT_SCALE - SHIFT_UPDATE);
 497         } else {
 498                 time_adj = 0;
 499         }
 500 
 501         time_adj += (time_freq >> (SHIFT_KF + SHIFT_HZ - SHIFT_SCALE))
 502             + FINETUNE;
 503 
 504         /* Handle the leap second stuff */
 505         switch (time_status) {
 506                 case TIME_INS:
 507                 /* ugly divide should be replaced */
 508                 if (xtime.tv_sec % 86400 == 0) {
 509                         xtime.tv_sec--; /* !! */
 510                         time_status = TIME_OOP;
 511                         printk("Clock: inserting leap second 23:59:60 UTC\n");
 512                 }
 513                 break;
 514 
 515                 case TIME_DEL:
 516                 /* ugly divide should be replaced */
 517                 if (xtime.tv_sec % 86400 == 86399) {
 518                         xtime.tv_sec++;
 519                         time_status = TIME_OK;
 520                         printk("Clock: deleting leap second 23:59:59 UTC\n");
 521                 }
 522                 break;
 523 
 524                 case TIME_OOP:
 525                 time_status = TIME_OK;
 526                 break;
 527         }
 528 }
 529 
 530 /*
 531  * disregard lost ticks for now.. We don't care enough.
 532  */
 533 static void timer_bh(void * unused)
     /* [previous][next][first][last][top][bottom][index][help] */
 534 {
 535         unsigned long mask;
 536         struct timer_struct *tp;
 537         struct timer_list * timer;
 538 
 539         cli();
 540         while ((timer = timer_head.next) != &timer_head && timer->expires <= jiffies) {
 541                 void (*fn)(unsigned long) = timer->function;
 542                 unsigned long data = timer->data;
 543                 timer->next->prev = timer->prev;
 544                 timer->prev->next = timer->next;
 545                 timer->next = timer->prev = NULL;
 546                 sti();
 547                 fn(data);
 548                 cli();
 549         }
 550         sti();
 551         
 552         for (mask = 1, tp = timer_table+0 ; mask ; tp++,mask += mask) {
 553                 if (mask > timer_active)
 554                         break;
 555                 if (!(mask & timer_active))
 556                         continue;
 557                 if (tp->expires > jiffies)
 558                         continue;
 559                 timer_active &= ~mask;
 560                 tp->fn();
 561                 sti();
 562         }
 563 }
 564 
 565 void tqueue_bh(void * unused)
     /* [previous][next][first][last][top][bottom][index][help] */
 566 {
 567         run_task_queue(&tq_timer);
 568 }
 569 
 570 void immediate_bh(void * unused)
     /* [previous][next][first][last][top][bottom][index][help] */
 571 {
 572         run_task_queue(&tq_immediate);
 573 }
 574 
 575 /*
 576  * The int argument is really a (struct pt_regs *), in case the
 577  * interrupt wants to know from where it was called. The timer
 578  * irq uses this to decide if it should update the user or system
 579  * times.
 580  */
 581 static void do_timer(int irq, struct pt_regs * regs)
     /* [previous][next][first][last][top][bottom][index][help] */
 582 {
 583         unsigned long mask;
 584         struct timer_struct *tp;
 585         /* last time the cmos clock got updated */
 586         static long last_rtc_update=0;
 587         extern int set_rtc_mmss(unsigned long);
 588 
 589         long ltemp, psecs;
 590 
 591         /* Advance the phase, once it gets to one microsecond, then
 592          * advance the tick more.
 593          */
 594         time_phase += time_adj;
 595         if (time_phase < -FINEUSEC) {
 596                 ltemp = -time_phase >> SHIFT_SCALE;
 597                 time_phase += ltemp << SHIFT_SCALE;
 598                 xtime.tv_usec += tick + time_adjust_step - ltemp;
 599         }
 600         else if (time_phase > FINEUSEC) {
 601                 ltemp = time_phase >> SHIFT_SCALE;
 602                 time_phase -= ltemp << SHIFT_SCALE;
 603                 xtime.tv_usec += tick + time_adjust_step + ltemp;
 604         } else
 605                 xtime.tv_usec += tick + time_adjust_step;
 606 
 607         if (time_adjust)
 608         {
 609             /* We are doing an adjtime thing. 
 610              *
 611              * Modify the value of the tick for next time.
 612              * Note that a positive delta means we want the clock
 613              * to run fast. This means that the tick should be bigger
 614              *
 615              * Limit the amount of the step for *next* tick to be
 616              * in the range -tickadj .. +tickadj
 617              */
 618              if (time_adjust > tickadj)
 619                time_adjust_step = tickadj;
 620              else if (time_adjust < -tickadj)
 621                time_adjust_step = -tickadj;
 622              else
 623                time_adjust_step = time_adjust;
 624              
 625             /* Reduce by this step the amount of time left  */
 626             time_adjust -= time_adjust_step;
 627         }
 628         else
 629             time_adjust_step = 0;
 630 
 631         if (xtime.tv_usec >= 1000000) {
 632             xtime.tv_usec -= 1000000;
 633             xtime.tv_sec++;
 634             second_overflow();
 635         }
 636 
 637         /* If we have an externally synchronized Linux clock, then update
 638          * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
 639          * called as close as possible to 500 ms before the new second starts.
 640          */
 641         if (time_status != TIME_BAD && xtime.tv_sec > last_rtc_update + 660 &&
 642             xtime.tv_usec > 500000 - (tick >> 1) &&
 643             xtime.tv_usec < 500000 + (tick >> 1))
 644           if (set_rtc_mmss(xtime.tv_sec) == 0)
 645             last_rtc_update = xtime.tv_sec;
 646           else
 647             last_rtc_update = xtime.tv_sec - 600; /* do it again in 60 s */
 648 
 649         jiffies++;
 650         calc_load();
 651         if (user_mode(regs)) {
 652                 current->utime++;
 653                 if (current != task[0]) {
 654                         if (current->priority < 15)
 655                                 kstat.cpu_nice++;
 656                         else
 657                                 kstat.cpu_user++;
 658                 }
 659                 /* Update ITIMER_VIRT for current task if not in a system call */
 660                 if (current->it_virt_value && !(--current->it_virt_value)) {
 661                         current->it_virt_value = current->it_virt_incr;
 662                         send_sig(SIGVTALRM,current,1);
 663                 }
 664         } else {
 665                 current->stime++;
 666                 if(current != task[0])
 667                         kstat.cpu_system++;
 668                 if (prof_buffer && current != task[0]) {
 669                         extern int _stext;
 670                         unsigned long eip = regs->eip - (unsigned long) &_stext;
 671                         eip >>= prof_shift;
 672                         if (eip < prof_len)
 673                                 prof_buffer[eip]++;
 674                 }
 675         }
 676         /*
 677          * check the cpu time limit on the process.
 678          */
 679         if ((current->rlim[RLIMIT_CPU].rlim_max != RLIM_INFINITY) &&
 680             (((current->stime + current->utime) / HZ) >= current->rlim[RLIMIT_CPU].rlim_max))
 681                 send_sig(SIGKILL, current, 1);
 682         if ((current->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) &&
 683             (((current->stime + current->utime) % HZ) == 0)) {
 684                 psecs = (current->stime + current->utime) / HZ;
 685                 /* send when equal */
 686                 if (psecs == current->rlim[RLIMIT_CPU].rlim_cur)
 687                         send_sig(SIGXCPU, current, 1);
 688                 /* and every five seconds thereafter. */
 689                 else if ((psecs > current->rlim[RLIMIT_CPU].rlim_cur) &&
 690                         ((psecs - current->rlim[RLIMIT_CPU].rlim_cur) % 5) == 0)
 691                         send_sig(SIGXCPU, current, 1);
 692         }
 693 
 694         if (current != task[0] && 0 > --current->counter) {
 695                 current->counter = 0;
 696                 need_resched = 1;
 697         }
 698         /* Update ITIMER_PROF for the current task */
 699         if (current->it_prof_value && !(--current->it_prof_value)) {
 700                 current->it_prof_value = current->it_prof_incr;
 701                 send_sig(SIGPROF,current,1);
 702         }
 703         for (mask = 1, tp = timer_table+0 ; mask ; tp++,mask += mask) {
 704                 if (mask > timer_active)
 705                         break;
 706                 if (!(mask & timer_active))
 707                         continue;
 708                 if (tp->expires > jiffies)
 709                         continue;
 710                 mark_bh(TIMER_BH);
 711         }
 712         cli();
 713         if (timer_head.next->expires <= jiffies)
 714                 mark_bh(TIMER_BH);
 715         if (tq_timer != &tq_last)
 716                 mark_bh(TQUEUE_BH);
 717         sti();
 718 }
 719 
 720 asmlinkage unsigned int sys_alarm(unsigned int seconds)
     /* [previous][next][first][last][top][bottom][index][help] */
 721 {
 722         struct itimerval it_new, it_old;
 723         unsigned int oldalarm;
 724 
 725         it_new.it_interval.tv_sec = it_new.it_interval.tv_usec = 0;
 726         it_new.it_value.tv_sec = seconds;
 727         it_new.it_value.tv_usec = 0;
 728         _setitimer(ITIMER_REAL, &it_new, &it_old);
 729         oldalarm = it_old.it_value.tv_sec;
 730         /* ehhh.. We can't return 0 if we have an alarm pending.. */
 731         /* And we'd better return too much than too little anyway */
 732         if (it_old.it_value.tv_usec)
 733                 oldalarm++;
 734         return oldalarm;
 735 }
 736 
 737 asmlinkage int sys_getpid(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 738 {
 739         return current->pid;
 740 }
 741 
 742 asmlinkage int sys_getppid(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 743 {
 744         return current->p_opptr->pid;
 745 }
 746 
 747 asmlinkage int sys_getuid(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 748 {
 749         return current->uid;
 750 }
 751 
 752 asmlinkage int sys_geteuid(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 753 {
 754         return current->euid;
 755 }
 756 
 757 asmlinkage int sys_getgid(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 758 {
 759         return current->gid;
 760 }
 761 
 762 asmlinkage int sys_getegid(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 763 {
 764         return current->egid;
 765 }
 766 
 767 asmlinkage int sys_nice(long increment)
     /* [previous][next][first][last][top][bottom][index][help] */
 768 {
 769         int newprio;
 770 
 771         if (increment < 0 && !suser())
 772                 return -EPERM;
 773         newprio = current->priority - increment;
 774         if (newprio < 1)
 775                 newprio = 1;
 776         if (newprio > 35)
 777                 newprio = 35;
 778         current->priority = newprio;
 779         return 0;
 780 }
 781 
 782 static void show_task(int nr,struct task_struct * p)
     /* [previous][next][first][last][top][bottom][index][help] */
 783 {
 784         unsigned long free;
 785         static const char * stat_nam[] = { "R", "S", "D", "Z", "T", "W" };
 786 
 787         printk("%-8s %3d ", p->comm, (p == current) ? -nr : nr);
 788         if (((unsigned) p->state) < sizeof(stat_nam)/sizeof(char *))
 789                 printk(stat_nam[p->state]);
 790         else
 791                 printk(" ");
 792 #if ((~0UL) == 0xffffffff)
 793         if (p == current)
 794                 printk(" current  ");
 795         else
 796                 printk(" %08lX ", thread_saved_pc(&p->tss));
 797 #else
 798         if (p == current)
 799                 printk("   current task   ");
 800         else
 801                 printk(" %016lx ", thread_saved_pc(&p->tss));
 802 #endif
 803         for (free = 1; free < PAGE_SIZE/sizeof(long) ; free++) {
 804                 if (((unsigned long *)p->kernel_stack_page)[free])
 805                         break;
 806         }
 807         printk("%5lu %5d %6d ", free*sizeof(long), p->pid, p->p_pptr->pid);
 808         if (p->p_cptr)
 809                 printk("%5d ", p->p_cptr->pid);
 810         else
 811                 printk("      ");
 812         if (p->p_ysptr)
 813                 printk("%7d", p->p_ysptr->pid);
 814         else
 815                 printk("       ");
 816         if (p->p_osptr)
 817                 printk(" %5d\n", p->p_osptr->pid);
 818         else
 819                 printk("\n");
 820 }
 821 
 822 void show_state(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 823 {
 824         int i;
 825 
 826 #if ((~0UL) == 0xffffffff)
 827         printk("\n"
 828                "                         free                        sibling\n");
 829         printk("  task             PC    stack   pid father child younger older\n");
 830 #else
 831         printk("\n"
 832                "                                 free                        sibling\n");
 833         printk("  task                 PC        stack   pid father child younger older\n");
 834 #endif
 835         for (i=0 ; i<NR_TASKS ; i++)
 836                 if (task[i])
 837                         show_task(i,task[i]);
 838 }
 839 
 840 void sched_init(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 841 {
 842         bh_base[TIMER_BH].routine = timer_bh;
 843         bh_base[TQUEUE_BH].routine = tqueue_bh;
 844         bh_base[IMMEDIATE_BH].routine = immediate_bh;
 845         if (request_irq(TIMER_IRQ, do_timer, 0, "timer") != 0)
 846                 panic("Could not allocate timer IRQ!");
 847         enable_bh(TIMER_BH);
 848         enable_bh(TQUEUE_BH);
 849         enable_bh(IMMEDIATE_BH);
 850 }

/* [previous][next][first][last][top][bottom][index][help] */