root/kernel/sched.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. math_state_restore
  2. math_emulate
  3. schedule
  4. sys_pause
  5. wake_up
  6. wake_up_interruptible
  7. __down
  8. __sleep_on
  9. interruptible_sleep_on
  10. sleep_on
  11. add_timer
  12. del_timer
  13. count_active_tasks
  14. calc_load
  15. second_overflow
  16. timer_bh
  17. tqueue_bh
  18. immediate_bh
  19. do_timer
  20. sys_alarm
  21. sys_getpid
  22. sys_getppid
  23. sys_getuid
  24. sys_geteuid
  25. sys_getgid
  26. sys_getegid
  27. sys_nice
  28. show_task
  29. show_state
  30. sched_init

   1 /*
   2  *  linux/kernel/sched.c
   3  *
   4  *  Copyright (C) 1991, 1992  Linus Torvalds
   5  */
   6 
   7 /*
   8  * 'sched.c' is the main kernel file. It contains scheduling primitives
   9  * (sleep_on, wakeup, schedule etc) as well as a number of simple system
  10  * call functions (type getpid(), which just extracts a field from
  11  * current-task
  12  */
  13 
  14 #include <linux/config.h>
  15 #include <linux/signal.h>
  16 #include <linux/sched.h>
  17 #include <linux/timer.h>
  18 #include <linux/kernel.h>
  19 #include <linux/kernel_stat.h>
  20 #include <linux/fdreg.h>
  21 #include <linux/errno.h>
  22 #include <linux/time.h>
  23 #include <linux/ptrace.h>
  24 #include <linux/segment.h>
  25 #include <linux/delay.h>
  26 #include <linux/interrupt.h>
  27 #include <linux/tqueue.h>
  28 #include <linux/resource.h>
  29 
  30 #include <asm/system.h>
  31 #include <asm/io.h>
  32 #include <asm/segment.h>
  33 
  34 #define TIMER_IRQ 0
  35 
  36 #include <linux/timex.h>
  37 
  38 /*
  39  * kernel variables
  40  */
  41 long tick = 1000000 / HZ;               /* timer interrupt period */
  42 volatile struct timeval xtime;          /* The current time */
  43 int tickadj = 500/HZ;                   /* microsecs */
  44 
  45 DECLARE_TASK_QUEUE(tq_timer);
  46 DECLARE_TASK_QUEUE(tq_immediate);
  47 
  48 /*
  49  * phase-lock loop variables
  50  */
  51 int time_status = TIME_BAD;     /* clock synchronization status */
  52 long time_offset = 0;           /* time adjustment (us) */
  53 long time_constant = 0;         /* pll time constant */
  54 long time_tolerance = MAXFREQ;  /* frequency tolerance (ppm) */
  55 long time_precision = 1;        /* clock precision (us) */
  56 long time_maxerror = 0x70000000;/* maximum error */
  57 long time_esterror = 0x70000000;/* estimated error */
  58 long time_phase = 0;            /* phase offset (scaled us) */
  59 long time_freq = 0;             /* frequency offset (scaled ppm) */
  60 long time_adj = 0;              /* tick adjust (scaled 1 / HZ) */
  61 long time_reftime = 0;          /* time at last adjustment (s) */
  62 
  63 long time_adjust = 0;
  64 long time_adjust_step = 0;
  65 
  66 int need_resched = 0;
  67 unsigned long event = 0;
  68 
  69 /*
  70  * Tell us the machine setup..
  71  */
  72 int hard_math = 0;              /* set by boot/head.S */
  73 int x86 = 0;                    /* set by boot/head.S to 3 or 4 */
  74 int ignore_irq13 = 0;           /* set if exception 16 works */
  75 int wp_works_ok = 0;            /* set if paging hardware honours WP */ 
  76 int hlt_works_ok = 1;           /* set if the "hlt" instruction works */
  77 
  78 /*
  79  * Bus types ..
  80  */
  81 int EISA_bus = 0;
  82 
  83 extern int _setitimer(int, struct itimerval *, struct itimerval *);
  84 unsigned long * prof_buffer = NULL;
  85 unsigned long prof_len = 0;
  86 
  87 #define _S(nr) (1<<((nr)-1))
  88 
  89 extern void mem_use(void);
  90 
  91 extern int timer_interrupt(void);
  92 asmlinkage int system_call(void);
  93 
  94 static unsigned long init_kernel_stack[1024] = { STACK_MAGIC, };
  95 static struct vm_area_struct init_mmap = INIT_MMAP;
  96 struct task_struct init_task = INIT_TASK;
  97 
  98 unsigned long volatile jiffies=0;
  99 
 100 struct task_struct *current = &init_task;
 101 struct task_struct *last_task_used_math = NULL;
 102 
 103 struct task_struct * task[NR_TASKS] = {&init_task, };
 104 
 105 long user_stack [ PAGE_SIZE>>2 ] = { STACK_MAGIC, };
 106 
 107 struct {
 108         long * a;
 109         short b;
 110         } stack_start = { & user_stack [PAGE_SIZE>>2] , KERNEL_DS };
 111 
 112 struct kernel_stat kstat = { 0 };
 113 
 114 /*
 115  *  'math_state_restore()' saves the current math information in the
 116  * old math state array, and gets the new ones from the current task
 117  *
 118  * Careful.. There are problems with IBM-designed IRQ13 behaviour.
 119  * Don't touch unless you *really* know how it works.
 120  */
 121 asmlinkage void math_state_restore(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 122 {
 123         __asm__ __volatile__("clts");
 124         if (last_task_used_math == current)
 125                 return;
 126         timer_table[COPRO_TIMER].expires = jiffies+50;
 127         timer_active |= 1<<COPRO_TIMER; 
 128         if (last_task_used_math)
 129                 __asm__("fnsave %0":"=m" (last_task_used_math->tss.i387));
 130         else
 131                 __asm__("fnclex");
 132         last_task_used_math = current;
 133         if (current->used_math) {
 134                 __asm__("frstor %0": :"m" (current->tss.i387));
 135         } else {
 136                 __asm__("fninit");
 137                 current->used_math=1;
 138         }
 139         timer_active &= ~(1<<COPRO_TIMER);
 140 }
 141 
 142 #ifndef CONFIG_MATH_EMULATION
 143 
 144 asmlinkage void math_emulate(long arg)
     /* [previous][next][first][last][top][bottom][index][help] */
 145 {
 146   printk("math-emulation not enabled and no coprocessor found.\n");
 147   printk("killing %s.\n",current->comm);
 148   send_sig(SIGFPE,current,1);
 149   schedule();
 150 }
 151 
 152 #endif /* CONFIG_MATH_EMULATION */
 153 
 154 unsigned long itimer_ticks = 0;
 155 unsigned long itimer_next = ~0;
 156 
 157 /*
 158  *  'schedule()' is the scheduler function. It's a very simple and nice
 159  * scheduler: it's not perfect, but certainly works for most things.
 160  * The one thing you might take a look at is the signal-handler code here.
 161  *
 162  *   NOTE!!  Task 0 is the 'idle' task, which gets called when no other
 163  * tasks can run. It can not be killed, and it cannot sleep. The 'state'
 164  * information in task[0] is never used.
 165  *
 166  * The "confuse_gcc" goto is used only to get better assembly code..
 167  * Dijkstra probably hates me.
 168  */
 169 asmlinkage void schedule(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 170 {
 171         int c;
 172         struct task_struct * p;
 173         struct task_struct * next;
 174         unsigned long ticks;
 175 
 176 /* check alarm, wake up any interruptible tasks that have got a signal */
 177 
 178         if (intr_count) {
 179                 printk("Aiee: scheduling in interrupt\n");
 180                 intr_count = 0;
 181         }
 182         cli();
 183         ticks = itimer_ticks;
 184         itimer_ticks = 0;
 185         itimer_next = ~0;
 186         sti();
 187         need_resched = 0;
 188         p = &init_task;
 189         for (;;) {
 190                 if ((p = p->next_task) == &init_task)
 191                         goto confuse_gcc1;
 192                 if (ticks && p->it_real_value) {
 193                         if (p->it_real_value <= ticks) {
 194                                 send_sig(SIGALRM, p, 1);
 195                                 if (!p->it_real_incr) {
 196                                         p->it_real_value = 0;
 197                                         goto end_itimer;
 198                                 }
 199                                 do {
 200                                         p->it_real_value += p->it_real_incr;
 201                                 } while (p->it_real_value <= ticks);
 202                         }
 203                         p->it_real_value -= ticks;
 204                         if (p->it_real_value < itimer_next)
 205                                 itimer_next = p->it_real_value;
 206                 }
 207 end_itimer:
 208                 if (p->state != TASK_INTERRUPTIBLE)
 209                         continue;
 210                 if (p->signal & ~p->blocked) {
 211                         p->state = TASK_RUNNING;
 212                         continue;
 213                 }
 214                 if (p->timeout && p->timeout <= jiffies) {
 215                         p->timeout = 0;
 216                         p->state = TASK_RUNNING;
 217                 }
 218         }
 219 confuse_gcc1:
 220 
 221 /* this is the scheduler proper: */
 222 #if 0
 223         /* give processes that go to sleep a bit higher priority.. */
 224         /* This depends on the values for TASK_XXX */
 225         /* This gives smoother scheduling for some things, but */
 226         /* can be very unfair under some circumstances, so.. */
 227         if (TASK_UNINTERRUPTIBLE >= (unsigned) current->state &&
 228             current->counter < current->priority*2) {
 229                 ++current->counter;
 230         }
 231 #endif
 232         c = -1000;
 233         next = p = &init_task;
 234         for (;;) {
 235                 if ((p = p->next_task) == &init_task)
 236                         goto confuse_gcc2;
 237                 if (p->state == TASK_RUNNING && p->counter > c)
 238                         c = p->counter, next = p;
 239         }
 240 confuse_gcc2:
 241         if (!c) {
 242                 for_each_task(p)
 243                         p->counter = (p->counter >> 1) + p->priority;
 244         }
 245         if (current == next)
 246                 return;
 247         kstat.context_swtch++;
 248         switch_to(next);
 249         /* Now maybe reload the debug registers */
 250         if(current->debugreg[7]){
 251                 loaddebug(0);
 252                 loaddebug(1);
 253                 loaddebug(2);
 254                 loaddebug(3);
 255                 loaddebug(6);
 256         };
 257 }
 258 
 259 asmlinkage int sys_pause(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 260 {
 261         current->state = TASK_INTERRUPTIBLE;
 262         schedule();
 263         return -ERESTARTNOHAND;
 264 }
 265 
 266 /*
 267  * wake_up doesn't wake up stopped processes - they have to be awakened
 268  * with signals or similar.
 269  *
 270  * Note that this doesn't need cli-sti pairs: interrupts may not change
 271  * the wait-queue structures directly, but only call wake_up() to wake
 272  * a process. The process itself must remove the queue once it has woken.
 273  */
 274 void wake_up(struct wait_queue **q)
     /* [previous][next][first][last][top][bottom][index][help] */
 275 {
 276         struct wait_queue *tmp;
 277         struct task_struct * p;
 278 
 279         if (!q || !(tmp = *q))
 280                 return;
 281         do {
 282                 if ((p = tmp->task) != NULL) {
 283                         if ((p->state == TASK_UNINTERRUPTIBLE) ||
 284                             (p->state == TASK_INTERRUPTIBLE)) {
 285                                 p->state = TASK_RUNNING;
 286                                 if (p->counter > current->counter + 3)
 287                                         need_resched = 1;
 288                         }
 289                 }
 290                 if (!tmp->next) {
 291                         printk("wait_queue is bad (eip = %p)\n",
 292                                 __builtin_return_address(0));
 293                         printk("        q = %p\n",q);
 294                         printk("       *q = %p\n",*q);
 295                         printk("      tmp = %p\n",tmp);
 296                         break;
 297                 }
 298                 tmp = tmp->next;
 299         } while (tmp != *q);
 300 }
 301 
 302 void wake_up_interruptible(struct wait_queue **q)
     /* [previous][next][first][last][top][bottom][index][help] */
 303 {
 304         struct wait_queue *tmp;
 305         struct task_struct * p;
 306 
 307         if (!q || !(tmp = *q))
 308                 return;
 309         do {
 310                 if ((p = tmp->task) != NULL) {
 311                         if (p->state == TASK_INTERRUPTIBLE) {
 312                                 p->state = TASK_RUNNING;
 313                                 if (p->counter > current->counter + 3)
 314                                         need_resched = 1;
 315                         }
 316                 }
 317                 if (!tmp->next) {
 318                         printk("wait_queue is bad (eip = %p)\n",
 319                                 __builtin_return_address(0));
 320                         printk("        q = %p\n",q);
 321                         printk("       *q = %p\n",*q);
 322                         printk("      tmp = %p\n",tmp);
 323                         break;
 324                 }
 325                 tmp = tmp->next;
 326         } while (tmp != *q);
 327 }
 328 
 329 void __down(struct semaphore * sem)
     /* [previous][next][first][last][top][bottom][index][help] */
 330 {
 331         struct wait_queue wait = { current, NULL };
 332         add_wait_queue(&sem->wait, &wait);
 333         current->state = TASK_UNINTERRUPTIBLE;
 334         while (sem->count <= 0) {
 335                 schedule();
 336                 current->state = TASK_UNINTERRUPTIBLE;
 337         }
 338         current->state = TASK_RUNNING;
 339         remove_wait_queue(&sem->wait, &wait);
 340 }
 341 
 342 static inline void __sleep_on(struct wait_queue **p, int state)
     /* [previous][next][first][last][top][bottom][index][help] */
 343 {
 344         unsigned long flags;
 345         struct wait_queue wait = { current, NULL };
 346 
 347         if (!p)
 348                 return;
 349         if (current == task[0])
 350                 panic("task[0] trying to sleep");
 351         current->state = state;
 352         add_wait_queue(p, &wait);
 353         save_flags(flags);
 354         sti();
 355         schedule();
 356         remove_wait_queue(p, &wait);
 357         restore_flags(flags);
 358 }
 359 
 360 void interruptible_sleep_on(struct wait_queue **p)
     /* [previous][next][first][last][top][bottom][index][help] */
 361 {
 362         __sleep_on(p,TASK_INTERRUPTIBLE);
 363 }
 364 
 365 void sleep_on(struct wait_queue **p)
     /* [previous][next][first][last][top][bottom][index][help] */
 366 {
 367         __sleep_on(p,TASK_UNINTERRUPTIBLE);
 368 }
 369 
 370 /*
 371  * The head for the timer-list has a "expires" field of MAX_UINT,
 372  * and the sorting routine counts on this..
 373  */
 374 static struct timer_list timer_head = { &timer_head, &timer_head, ~0, 0, NULL };
 375 #define SLOW_BUT_DEBUGGING_TIMERS 1
 376 
 377 void add_timer(struct timer_list * timer)
     /* [previous][next][first][last][top][bottom][index][help] */
 378 {
 379         unsigned long flags;
 380         struct timer_list *p;
 381 
 382 #if SLOW_BUT_DEBUGGING_TIMERS
 383         if (timer->next || timer->prev) {
 384                 printk("add_timer() called with non-zero list from %p\n",
 385                         __builtin_return_address(0));
 386                 return;
 387         }
 388 #endif
 389         p = &timer_head;
 390         timer->expires += jiffies;
 391         save_flags(flags);
 392         cli();
 393         do {
 394                 p = p->next;
 395         } while (timer->expires > p->expires);
 396         timer->next = p;
 397         timer->prev = p->prev;
 398         p->prev = timer;
 399         timer->prev->next = timer;
 400         restore_flags(flags);
 401 }
 402 
 403 int del_timer(struct timer_list * timer)
     /* [previous][next][first][last][top][bottom][index][help] */
 404 {
 405         unsigned long flags;
 406 #if SLOW_BUT_DEBUGGING_TIMERS
 407         struct timer_list * p;
 408 
 409         p = &timer_head;
 410         save_flags(flags);
 411         cli();
 412         while ((p = p->next) != &timer_head) {
 413                 if (p == timer) {
 414                         timer->next->prev = timer->prev;
 415                         timer->prev->next = timer->next;
 416                         timer->next = timer->prev = NULL;
 417                         restore_flags(flags);
 418                         timer->expires -= jiffies;
 419                         return 1;
 420                 }
 421         }
 422         if (timer->next || timer->prev)
 423                 printk("del_timer() called from %p with timer not initialized\n",
 424                         __builtin_return_address(0));
 425         restore_flags(flags);
 426         return 0;
 427 #else   
 428         save_flags(flags);
 429         cli();
 430         if (timer->next) {
 431                 timer->next->prev = timer->prev;
 432                 timer->prev->next = timer->next;
 433                 timer->next = timer->prev = NULL;
 434                 restore_flags(flags);
 435                 timer->expires -= jiffies;
 436                 return 1;
 437         }
 438         restore_flags(flags);
 439         return 0;
 440 #endif
 441 }
 442 
 443 unsigned long timer_active = 0;
 444 struct timer_struct timer_table[32];
 445 
 446 /*
 447  * Hmm.. Changed this, as the GNU make sources (load.c) seems to
 448  * imply that avenrun[] is the standard name for this kind of thing.
 449  * Nothing else seems to be standardized: the fractional size etc
 450  * all seem to differ on different machines.
 451  */
 452 unsigned long avenrun[3] = { 0,0,0 };
 453 
 454 /*
 455  * Nr of active tasks - counted in fixed-point numbers
 456  */
 457 static unsigned long count_active_tasks(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 458 {
 459         struct task_struct **p;
 460         unsigned long nr = 0;
 461 
 462         for(p = &LAST_TASK; p > &FIRST_TASK; --p)
 463                 if (*p && ((*p)->state == TASK_RUNNING ||
 464                            (*p)->state == TASK_UNINTERRUPTIBLE ||
 465                            (*p)->state == TASK_SWAPPING))
 466                         nr += FIXED_1;
 467         return nr;
 468 }
 469 
 470 static inline void calc_load(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 471 {
 472         unsigned long active_tasks; /* fixed-point */
 473         static int count = LOAD_FREQ;
 474 
 475         if (count-- > 0)
 476                 return;
 477         count = LOAD_FREQ;
 478         active_tasks = count_active_tasks();
 479         CALC_LOAD(avenrun[0], EXP_1, active_tasks);
 480         CALC_LOAD(avenrun[1], EXP_5, active_tasks);
 481         CALC_LOAD(avenrun[2], EXP_15, active_tasks);
 482 }
 483 
 484 /*
 485  * this routine handles the overflow of the microsecond field
 486  *
 487  * The tricky bits of code to handle the accurate clock support
 488  * were provided by Dave Mills (Mills@UDEL.EDU) of NTP fame.
 489  * They were originally developed for SUN and DEC kernels.
 490  * All the kudos should go to Dave for this stuff.
 491  *
 492  * These were ported to Linux by Philip Gladstone.
 493  */
 494 static void second_overflow(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 495 {
 496         long ltemp;
 497         /* last time the cmos clock got updated */
 498         static long last_rtc_update=0;
 499         extern int set_rtc_mmss(unsigned long);
 500 
 501         /* Bump the maxerror field */
 502         time_maxerror = (0x70000000-time_maxerror < time_tolerance) ?
 503           0x70000000 : (time_maxerror + time_tolerance);
 504 
 505         /* Run the PLL */
 506         if (time_offset < 0) {
 507                 ltemp = (-(time_offset+1) >> (SHIFT_KG + time_constant)) + 1;
 508                 time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
 509                 time_offset += (time_adj * HZ) >> (SHIFT_SCALE - SHIFT_UPDATE);
 510                 time_adj = - time_adj;
 511         } else if (time_offset > 0) {
 512                 ltemp = ((time_offset-1) >> (SHIFT_KG + time_constant)) + 1;
 513                 time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
 514                 time_offset -= (time_adj * HZ) >> (SHIFT_SCALE - SHIFT_UPDATE);
 515         } else {
 516                 time_adj = 0;
 517         }
 518 
 519         time_adj += (time_freq >> (SHIFT_KF + SHIFT_HZ - SHIFT_SCALE))
 520             + FINETUNE;
 521 
 522         /* Handle the leap second stuff */
 523         switch (time_status) {
 524                 case TIME_INS:
 525                 /* ugly divide should be replaced */
 526                 if (xtime.tv_sec % 86400 == 0) {
 527                         xtime.tv_sec--; /* !! */
 528                         time_status = TIME_OOP;
 529                         printk("Clock: inserting leap second 23:59:60 GMT\n");
 530                 }
 531                 break;
 532 
 533                 case TIME_DEL:
 534                 /* ugly divide should be replaced */
 535                 if (xtime.tv_sec % 86400 == 86399) {
 536                         xtime.tv_sec++;
 537                         time_status = TIME_OK;
 538                         printk("Clock: deleting leap second 23:59:59 GMT\n");
 539                 }
 540                 break;
 541 
 542                 case TIME_OOP:
 543                 time_status = TIME_OK;
 544                 break;
 545         }
 546         if (xtime.tv_sec > last_rtc_update + 660)
 547           if (set_rtc_mmss(xtime.tv_sec) == 0)
 548             last_rtc_update = xtime.tv_sec;
 549           else
 550             last_rtc_update = xtime.tv_sec - 600; /* do it again in one min */
 551 }
 552 
 553 /*
 554  * disregard lost ticks for now.. We don't care enough.
 555  */
 556 static void timer_bh(void * unused)
     /* [previous][next][first][last][top][bottom][index][help] */
 557 {
 558         unsigned long mask;
 559         struct timer_struct *tp;
 560         struct timer_list * timer;
 561 
 562         cli();
 563         while ((timer = timer_head.next) != &timer_head && timer->expires < jiffies) {
 564                 void (*fn)(unsigned long) = timer->function;
 565                 unsigned long data = timer->data;
 566                 timer->next->prev = timer->prev;
 567                 timer->prev->next = timer->next;
 568                 timer->next = timer->prev = NULL;
 569                 sti();
 570                 fn(data);
 571                 cli();
 572         }
 573         sti();
 574         
 575         for (mask = 1, tp = timer_table+0 ; mask ; tp++,mask += mask) {
 576                 if (mask > timer_active)
 577                         break;
 578                 if (!(mask & timer_active))
 579                         continue;
 580                 if (tp->expires > jiffies)
 581                         continue;
 582                 timer_active &= ~mask;
 583                 tp->fn();
 584                 sti();
 585         }
 586 }
 587 
 588 void tqueue_bh(void * unused)
     /* [previous][next][first][last][top][bottom][index][help] */
 589 {
 590         run_task_queue(&tq_timer);
 591 }
 592 
 593 void immediate_bh(void * unused)
     /* [previous][next][first][last][top][bottom][index][help] */
 594 {
 595         run_task_queue(&tq_immediate);
 596 }
 597 
 598 /*
 599  * The int argument is really a (struct pt_regs *), in case the
 600  * interrupt wants to know from where it was called. The timer
 601  * irq uses this to decide if it should update the user or system
 602  * times.
 603  */
 604 static void do_timer(struct pt_regs * regs)
     /* [previous][next][first][last][top][bottom][index][help] */
 605 {
 606         unsigned long mask;
 607         struct timer_struct *tp;
 608 
 609         long ltemp, psecs;
 610 
 611         /* Advance the phase, once it gets to one microsecond, then
 612          * advance the tick more.
 613          */
 614         time_phase += time_adj;
 615         if (time_phase < -FINEUSEC) {
 616                 ltemp = -time_phase >> SHIFT_SCALE;
 617                 time_phase += ltemp << SHIFT_SCALE;
 618                 xtime.tv_usec += tick + time_adjust_step - ltemp;
 619         }
 620         else if (time_phase > FINEUSEC) {
 621                 ltemp = time_phase >> SHIFT_SCALE;
 622                 time_phase -= ltemp << SHIFT_SCALE;
 623                 xtime.tv_usec += tick + time_adjust_step + ltemp;
 624         } else
 625                 xtime.tv_usec += tick + time_adjust_step;
 626 
 627         if (time_adjust)
 628         {
 629             /* We are doing an adjtime thing. 
 630              *
 631              * Modify the value of the tick for next time.
 632              * Note that a positive delta means we want the clock
 633              * to run fast. This means that the tick should be bigger
 634              *
 635              * Limit the amount of the step for *next* tick to be
 636              * in the range -tickadj .. +tickadj
 637              */
 638              if (time_adjust > tickadj)
 639                time_adjust_step = tickadj;
 640              else if (time_adjust < -tickadj)
 641                time_adjust_step = -tickadj;
 642              else
 643                time_adjust_step = time_adjust;
 644              
 645             /* Reduce by this step the amount of time left  */
 646             time_adjust -= time_adjust_step;
 647         }
 648         else
 649             time_adjust_step = 0;
 650 
 651         if (xtime.tv_usec >= 1000000) {
 652             xtime.tv_usec -= 1000000;
 653             xtime.tv_sec++;
 654             second_overflow();
 655         }
 656 
 657         jiffies++;
 658         calc_load();
 659         if ((VM_MASK & regs->eflags) || (3 & regs->cs)) {
 660                 current->utime++;
 661                 if (current != task[0]) {
 662                         if (current->priority < 15)
 663                                 kstat.cpu_nice++;
 664                         else
 665                                 kstat.cpu_user++;
 666                 }
 667                 /* Update ITIMER_VIRT for current task if not in a system call */
 668                 if (current->it_virt_value && !(--current->it_virt_value)) {
 669                         current->it_virt_value = current->it_virt_incr;
 670                         send_sig(SIGVTALRM,current,1);
 671                 }
 672         } else {
 673                 current->stime++;
 674                 if(current != task[0])
 675                         kstat.cpu_system++;
 676 #ifdef CONFIG_PROFILE
 677                 if (prof_buffer && current != task[0]) {
 678                         unsigned long eip = regs->eip;
 679                         eip >>= 2;
 680                         if (eip < prof_len)
 681                                 prof_buffer[eip]++;
 682                 }
 683 #endif
 684         }
 685         /*
 686          * check the cpu time limit on the process.
 687          */
 688         if ((current->rlim[RLIMIT_CPU].rlim_max != RLIM_INFINITY) &&
 689             (((current->stime + current->utime) / HZ) >= current->rlim[RLIMIT_CPU].rlim_max))
 690                 send_sig(SIGKILL, current, 1);
 691         if ((current->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) &&
 692             (((current->stime + current->utime) % HZ) == 0)) {
 693                 psecs = (current->stime + current->utime) / HZ;
 694                 /* send when equal */
 695                 if (psecs == current->rlim[RLIMIT_CPU].rlim_cur)
 696                         send_sig(SIGXCPU, current, 1);
 697                 /* and every five seconds thereafter. */
 698                 else if ((psecs > current->rlim[RLIMIT_CPU].rlim_cur) &&
 699                         ((psecs - current->rlim[RLIMIT_CPU].rlim_cur) % 5) == 0)
 700                         send_sig(SIGXCPU, current, 1);
 701         }
 702 
 703         if (current != task[0] && 0 > --current->counter) {
 704                 current->counter = 0;
 705                 need_resched = 1;
 706         }
 707         /* Update ITIMER_PROF for the current task */
 708         if (current->it_prof_value && !(--current->it_prof_value)) {
 709                 current->it_prof_value = current->it_prof_incr;
 710                 send_sig(SIGPROF,current,1);
 711         }
 712         for (mask = 1, tp = timer_table+0 ; mask ; tp++,mask += mask) {
 713                 if (mask > timer_active)
 714                         break;
 715                 if (!(mask & timer_active))
 716                         continue;
 717                 if (tp->expires > jiffies)
 718                         continue;
 719                 mark_bh(TIMER_BH);
 720         }
 721         cli();
 722         itimer_ticks++;
 723         if (itimer_ticks > itimer_next)
 724                 need_resched = 1;
 725         if (timer_head.next->expires < jiffies)
 726                 mark_bh(TIMER_BH);
 727         if (tq_timer != &tq_last)
 728                 mark_bh(TQUEUE_BH);
 729         sti();
 730 }
 731 
 732 asmlinkage int sys_alarm(long seconds)
     /* [previous][next][first][last][top][bottom][index][help] */
 733 {
 734         struct itimerval it_new, it_old;
 735 
 736         it_new.it_interval.tv_sec = it_new.it_interval.tv_usec = 0;
 737         it_new.it_value.tv_sec = seconds;
 738         it_new.it_value.tv_usec = 0;
 739         _setitimer(ITIMER_REAL, &it_new, &it_old);
 740         return(it_old.it_value.tv_sec + (it_old.it_value.tv_usec / 1000000));
 741 }
 742 
 743 asmlinkage int sys_getpid(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 744 {
 745         return current->pid;
 746 }
 747 
 748 asmlinkage int sys_getppid(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 749 {
 750         return current->p_opptr->pid;
 751 }
 752 
 753 asmlinkage int sys_getuid(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 754 {
 755         return current->uid;
 756 }
 757 
 758 asmlinkage int sys_geteuid(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 759 {
 760         return current->euid;
 761 }
 762 
 763 asmlinkage int sys_getgid(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 764 {
 765         return current->gid;
 766 }
 767 
 768 asmlinkage int sys_getegid(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 769 {
 770         return current->egid;
 771 }
 772 
 773 asmlinkage int sys_nice(long increment)
     /* [previous][next][first][last][top][bottom][index][help] */
 774 {
 775         int newprio;
 776 
 777         if (increment < 0 && !suser())
 778                 return -EPERM;
 779         newprio = current->priority - increment;
 780         if (newprio < 1)
 781                 newprio = 1;
 782         if (newprio > 35)
 783                 newprio = 35;
 784         current->priority = newprio;
 785         return 0;
 786 }
 787 
 788 static void show_task(int nr,struct task_struct * p)
     /* [previous][next][first][last][top][bottom][index][help] */
 789 {
 790         unsigned long free;
 791         static char * stat_nam[] = { "R", "S", "D", "Z", "T", "W" };
 792 
 793         printk("%-8s %3d ", p->comm, (p == current) ? -nr : nr);
 794         if (((unsigned) p->state) < sizeof(stat_nam)/sizeof(char *))
 795                 printk(stat_nam[p->state]);
 796         else
 797                 printk(" ");
 798         if (p == current)
 799                 printk(" current  ");
 800         else
 801                 printk(" %08lX ", ((unsigned long *)p->tss.esp)[3]);
 802         for (free = 1; free < 1024 ; free++) {
 803                 if (((unsigned long *)p->kernel_stack_page)[free])
 804                         break;
 805         }
 806         printk("%5lu %5d %6d ", free << 2, p->pid, p->p_pptr->pid);
 807         if (p->p_cptr)
 808                 printk("%5d ", p->p_cptr->pid);
 809         else
 810                 printk("      ");
 811         if (p->p_ysptr)
 812                 printk("%7d", p->p_ysptr->pid);
 813         else
 814                 printk("       ");
 815         if (p->p_osptr)
 816                 printk(" %5d\n", p->p_osptr->pid);
 817         else
 818                 printk("\n");
 819 }
 820 
 821 void show_state(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 822 {
 823         int i;
 824 
 825         printk("                         free                        sibling\n");
 826         printk("  task             PC    stack   pid father child younger older\n");
 827         for (i=0 ; i<NR_TASKS ; i++)
 828                 if (task[i])
 829                         show_task(i,task[i]);
 830 }
 831 
 832 void sched_init(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 833 {
 834         int i;
 835         struct desc_struct * p;
 836 
 837         bh_base[TIMER_BH].routine = timer_bh;
 838         bh_base[TQUEUE_BH].routine = tqueue_bh;
 839         bh_base[IMMEDIATE_BH].routine = immediate_bh;
 840         if (sizeof(struct sigaction) != 16)
 841                 panic("Struct sigaction MUST be 16 bytes");
 842         set_tss_desc(gdt+FIRST_TSS_ENTRY,&init_task.tss);
 843         set_ldt_desc(gdt+FIRST_LDT_ENTRY,&default_ldt,1);
 844         set_system_gate(0x80,&system_call);
 845         p = gdt+2+FIRST_TSS_ENTRY;
 846         for(i=1 ; i<NR_TASKS ; i++) {
 847                 task[i] = NULL;
 848                 p->a=p->b=0;
 849                 p++;
 850                 p->a=p->b=0;
 851                 p++;
 852         }
 853 /* Clear NT, so that we won't have troubles with that later on */
 854         __asm__("pushfl ; andl $0xffffbfff,(%esp) ; popfl");
 855         load_TR(0);
 856         load_ldt(0);
 857         outb_p(0x34,0x43);              /* binary, mode 2, LSB/MSB, ch 0 */
 858         outb_p(LATCH & 0xff , 0x40);    /* LSB */
 859         outb(LATCH >> 8 , 0x40);        /* MSB */
 860         if (request_irq(TIMER_IRQ,(void (*)(int)) do_timer, 0, "timer") != 0)
 861                 panic("Could not allocate timer IRQ!");
 862 }

/* [previous][next][first][last][top][bottom][index][help] */