root/kernel/sched.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. math_state_restore
  2. math_emulate
  3. schedule
  4. sys_pause
  5. wake_up
  6. wake_up_interruptible
  7. __down
  8. __sleep_on
  9. interruptible_sleep_on
  10. sleep_on
  11. add_timer
  12. del_timer
  13. count_active_tasks
  14. calc_load
  15. second_overflow
  16. timer_bh
  17. tqueue_bh
  18. immediate_bh
  19. do_timer
  20. sys_alarm
  21. sys_getpid
  22. sys_getppid
  23. sys_getuid
  24. sys_geteuid
  25. sys_getgid
  26. sys_getegid
  27. sys_nice
  28. show_task
  29. show_state
  30. sched_init

   1 /*
   2  *  linux/kernel/sched.c
   3  *
   4  *  Copyright (C) 1991, 1992  Linus Torvalds
   5  */
   6 
   7 /*
   8  * 'sched.c' is the main kernel file. It contains scheduling primitives
   9  * (sleep_on, wakeup, schedule etc) as well as a number of simple system
  10  * call functions (type getpid(), which just extracts a field from
  11  * current-task
  12  */
  13 
  14 #include <linux/config.h>
  15 #include <linux/signal.h>
  16 #include <linux/sched.h>
  17 #include <linux/timer.h>
  18 #include <linux/kernel.h>
  19 #include <linux/kernel_stat.h>
  20 #include <linux/fdreg.h>
  21 #include <linux/errno.h>
  22 #include <linux/time.h>
  23 #include <linux/ptrace.h>
  24 #include <linux/delay.h>
  25 #include <linux/interrupt.h>
  26 #include <linux/tqueue.h>
  27 #include <linux/resource.h>
  28 
  29 #include <asm/system.h>
  30 #include <asm/io.h>
  31 #include <asm/segment.h>
  32 
  33 #define TIMER_IRQ 0
  34 
  35 #include <linux/timex.h>
  36 
  37 /*
  38  * kernel variables
  39  */
  40 long tick = 1000000 / HZ;               /* timer interrupt period */
  41 volatile struct timeval xtime;          /* The current time */
  42 int tickadj = 500/HZ;                   /* microsecs */
  43 
  44 DECLARE_TASK_QUEUE(tq_timer);
  45 DECLARE_TASK_QUEUE(tq_immediate);
  46 
  47 /*
  48  * phase-lock loop variables
  49  */
  50 int time_status = TIME_BAD;     /* clock synchronization status */
  51 long time_offset = 0;           /* time adjustment (us) */
  52 long time_constant = 0;         /* pll time constant */
  53 long time_tolerance = MAXFREQ;  /* frequency tolerance (ppm) */
  54 long time_precision = 1;        /* clock precision (us) */
  55 long time_maxerror = 0x70000000;/* maximum error */
  56 long time_esterror = 0x70000000;/* estimated error */
  57 long time_phase = 0;            /* phase offset (scaled us) */
  58 long time_freq = 0;             /* frequency offset (scaled ppm) */
  59 long time_adj = 0;              /* tick adjust (scaled 1 / HZ) */
  60 long time_reftime = 0;          /* time at last adjustment (s) */
  61 
  62 long time_adjust = 0;
  63 long time_adjust_step = 0;
  64 
  65 int need_resched = 0;
  66 unsigned long event = 0;
  67 
  68 /*
  69  * Tell us the machine setup..
  70  */
  71 char hard_math = 0;             /* set by boot/head.S */
  72 char x86 = 0;                   /* set by boot/head.S to 3 or 4 */
  73 char x86_model = 0;             /* set by boot/head.S */
  74 char x86_mask = 0;              /* set by boot/head.S */
  75 int x86_capability = 0;         /* set by boot/head.S */
  76 int fdiv_bug = 0;               /* set if Pentium(TM) with FP bug */
  77 
  78 char x86_vendor_id[13] = "Unknown";
  79 
  80 char ignore_irq13 = 0;          /* set if exception 16 works */
  81 char wp_works_ok = 0;           /* set if paging hardware honours WP */ 
  82 char hlt_works_ok = 1;          /* set if the "hlt" instruction works */
  83 
  84 /*
  85  * Bus types ..
  86  */
  87 int EISA_bus = 0;
  88 
  89 extern int _setitimer(int, struct itimerval *, struct itimerval *);
  90 unsigned long * prof_buffer = NULL;
  91 unsigned long prof_len = 0;
  92 
  93 #define _S(nr) (1<<((nr)-1))
  94 
  95 extern void mem_use(void);
  96 
  97 extern int timer_interrupt(void);
  98 asmlinkage int system_call(void);
  99 
 100 static unsigned long init_kernel_stack[1024] = { STACK_MAGIC, };
 101 static struct vm_area_struct init_mmap = INIT_MMAP;
 102 struct task_struct init_task = INIT_TASK;
 103 
 104 unsigned long volatile jiffies=0;
 105 
 106 struct task_struct *current = &init_task;
 107 struct task_struct *last_task_used_math = NULL;
 108 
 109 struct task_struct * task[NR_TASKS] = {&init_task, };
 110 
 111 long user_stack [ PAGE_SIZE>>2 ] = { STACK_MAGIC, };
 112 
 113 struct {
 114         long * a;
 115         short b;
 116         } stack_start = { & user_stack [PAGE_SIZE>>2] , KERNEL_DS };
 117 
 118 struct kernel_stat kstat = { 0 };
 119 
 120 /*
 121  *  'math_state_restore()' saves the current math information in the
 122  * old math state array, and gets the new ones from the current task
 123  *
 124  * Careful.. There are problems with IBM-designed IRQ13 behaviour.
 125  * Don't touch unless you *really* know how it works.
 126  */
 127 asmlinkage void math_state_restore(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 128 {
 129         __asm__ __volatile__("clts");
 130         if (last_task_used_math == current)
 131                 return;
 132         timer_table[COPRO_TIMER].expires = jiffies+50;
 133         timer_active |= 1<<COPRO_TIMER; 
 134         if (last_task_used_math)
 135                 __asm__("fnsave %0":"=m" (last_task_used_math->tss.i387));
 136         else
 137                 __asm__("fnclex");
 138         last_task_used_math = current;
 139         if (current->used_math) {
 140                 __asm__("frstor %0": :"m" (current->tss.i387));
 141         } else {
 142                 __asm__("fninit");
 143                 current->used_math=1;
 144         }
 145         timer_active &= ~(1<<COPRO_TIMER);
 146 }
 147 
 148 #ifndef CONFIG_MATH_EMULATION
 149 
 150 asmlinkage void math_emulate(long arg)
     /* [previous][next][first][last][top][bottom][index][help] */
 151 {
 152   printk("math-emulation not enabled and no coprocessor found.\n");
 153   printk("killing %s.\n",current->comm);
 154   send_sig(SIGFPE,current,1);
 155   schedule();
 156 }
 157 
 158 #endif /* CONFIG_MATH_EMULATION */
 159 
 160 unsigned long itimer_ticks = 0;
 161 unsigned long itimer_next = ~0;
 162 
 163 /*
 164  *  'schedule()' is the scheduler function. It's a very simple and nice
 165  * scheduler: it's not perfect, but certainly works for most things.
 166  * The one thing you might take a look at is the signal-handler code here.
 167  *
 168  *   NOTE!!  Task 0 is the 'idle' task, which gets called when no other
 169  * tasks can run. It can not be killed, and it cannot sleep. The 'state'
 170  * information in task[0] is never used.
 171  *
 172  * The "confuse_gcc" goto is used only to get better assembly code..
 173  * Dijkstra probably hates me.
 174  */
 175 asmlinkage void schedule(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 176 {
 177         int c;
 178         struct task_struct * p;
 179         struct task_struct * next;
 180         unsigned long ticks;
 181 
 182 /* check alarm, wake up any interruptible tasks that have got a signal */
 183 
 184         if (intr_count) {
 185                 printk("Aiee: scheduling in interrupt\n");
 186                 intr_count = 0;
 187         }
 188         cli();
 189         ticks = itimer_ticks;
 190         itimer_ticks = 0;
 191         itimer_next = ~0;
 192         sti();
 193         need_resched = 0;
 194         p = &init_task;
 195         for (;;) {
 196                 if ((p = p->next_task) == &init_task)
 197                         goto confuse_gcc1;
 198                 if (ticks && p->it_real_value) {
 199                         if (p->it_real_value <= ticks) {
 200                                 send_sig(SIGALRM, p, 1);
 201                                 if (!p->it_real_incr) {
 202                                         p->it_real_value = 0;
 203                                         goto end_itimer;
 204                                 }
 205                                 do {
 206                                         p->it_real_value += p->it_real_incr;
 207                                 } while (p->it_real_value <= ticks);
 208                         }
 209                         p->it_real_value -= ticks;
 210                         if (p->it_real_value < itimer_next)
 211                                 itimer_next = p->it_real_value;
 212                 }
 213 end_itimer:
 214                 if (p->state != TASK_INTERRUPTIBLE)
 215                         continue;
 216                 if (p->signal & ~p->blocked) {
 217                         p->state = TASK_RUNNING;
 218                         continue;
 219                 }
 220                 if (p->timeout && p->timeout <= jiffies) {
 221                         p->timeout = 0;
 222                         p->state = TASK_RUNNING;
 223                 }
 224         }
 225 confuse_gcc1:
 226 
 227 /* this is the scheduler proper: */
 228 #if 0
 229         /* give processes that go to sleep a bit higher priority.. */
 230         /* This depends on the values for TASK_XXX */
 231         /* This gives smoother scheduling for some things, but */
 232         /* can be very unfair under some circumstances, so.. */
 233         if (TASK_UNINTERRUPTIBLE >= (unsigned) current->state &&
 234             current->counter < current->priority*2) {
 235                 ++current->counter;
 236         }
 237 #endif
 238         c = -1000;
 239         next = p = &init_task;
 240         for (;;) {
 241                 if ((p = p->next_task) == &init_task)
 242                         goto confuse_gcc2;
 243                 if (p->state == TASK_RUNNING && p->counter > c)
 244                         c = p->counter, next = p;
 245         }
 246 confuse_gcc2:
 247         if (!c) {
 248                 for_each_task(p)
 249                         p->counter = (p->counter >> 1) + p->priority;
 250         }
 251         if (current == next)
 252                 return;
 253         kstat.context_swtch++;
 254         switch_to(next);
 255         /* Now maybe reload the debug registers */
 256         if(current->debugreg[7]){
 257                 loaddebug(0);
 258                 loaddebug(1);
 259                 loaddebug(2);
 260                 loaddebug(3);
 261                 loaddebug(6);
 262         };
 263 }
 264 
 265 asmlinkage int sys_pause(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 266 {
 267         current->state = TASK_INTERRUPTIBLE;
 268         schedule();
 269         return -ERESTARTNOHAND;
 270 }
 271 
 272 /*
 273  * wake_up doesn't wake up stopped processes - they have to be awakened
 274  * with signals or similar.
 275  *
 276  * Note that this doesn't need cli-sti pairs: interrupts may not change
 277  * the wait-queue structures directly, but only call wake_up() to wake
 278  * a process. The process itself must remove the queue once it has woken.
 279  */
 280 void wake_up(struct wait_queue **q)
     /* [previous][next][first][last][top][bottom][index][help] */
 281 {
 282         struct wait_queue *tmp;
 283         struct task_struct * p;
 284 
 285         if (!q || !(tmp = *q))
 286                 return;
 287         do {
 288                 if ((p = tmp->task) != NULL) {
 289                         if ((p->state == TASK_UNINTERRUPTIBLE) ||
 290                             (p->state == TASK_INTERRUPTIBLE)) {
 291                                 p->state = TASK_RUNNING;
 292                                 if (p->counter > current->counter + 3)
 293                                         need_resched = 1;
 294                         }
 295                 }
 296                 if (!tmp->next) {
 297                         printk("wait_queue is bad (eip = %p)\n",
 298                                 __builtin_return_address(0));
 299                         printk("        q = %p\n",q);
 300                         printk("       *q = %p\n",*q);
 301                         printk("      tmp = %p\n",tmp);
 302                         break;
 303                 }
 304                 tmp = tmp->next;
 305         } while (tmp != *q);
 306 }
 307 
 308 void wake_up_interruptible(struct wait_queue **q)
     /* [previous][next][first][last][top][bottom][index][help] */
 309 {
 310         struct wait_queue *tmp;
 311         struct task_struct * p;
 312 
 313         if (!q || !(tmp = *q))
 314                 return;
 315         do {
 316                 if ((p = tmp->task) != NULL) {
 317                         if (p->state == TASK_INTERRUPTIBLE) {
 318                                 p->state = TASK_RUNNING;
 319                                 if (p->counter > current->counter + 3)
 320                                         need_resched = 1;
 321                         }
 322                 }
 323                 if (!tmp->next) {
 324                         printk("wait_queue is bad (eip = %p)\n",
 325                                 __builtin_return_address(0));
 326                         printk("        q = %p\n",q);
 327                         printk("       *q = %p\n",*q);
 328                         printk("      tmp = %p\n",tmp);
 329                         break;
 330                 }
 331                 tmp = tmp->next;
 332         } while (tmp != *q);
 333 }
 334 
 335 void __down(struct semaphore * sem)
     /* [previous][next][first][last][top][bottom][index][help] */
 336 {
 337         struct wait_queue wait = { current, NULL };
 338         add_wait_queue(&sem->wait, &wait);
 339         current->state = TASK_UNINTERRUPTIBLE;
 340         while (sem->count <= 0) {
 341                 schedule();
 342                 current->state = TASK_UNINTERRUPTIBLE;
 343         }
 344         current->state = TASK_RUNNING;
 345         remove_wait_queue(&sem->wait, &wait);
 346 }
 347 
 348 static inline void __sleep_on(struct wait_queue **p, int state)
     /* [previous][next][first][last][top][bottom][index][help] */
 349 {
 350         unsigned long flags;
 351         struct wait_queue wait = { current, NULL };
 352 
 353         if (!p)
 354                 return;
 355         if (current == task[0])
 356                 panic("task[0] trying to sleep");
 357         current->state = state;
 358         add_wait_queue(p, &wait);
 359         save_flags(flags);
 360         sti();
 361         schedule();
 362         remove_wait_queue(p, &wait);
 363         restore_flags(flags);
 364 }
 365 
 366 void interruptible_sleep_on(struct wait_queue **p)
     /* [previous][next][first][last][top][bottom][index][help] */
 367 {
 368         __sleep_on(p,TASK_INTERRUPTIBLE);
 369 }
 370 
 371 void sleep_on(struct wait_queue **p)
     /* [previous][next][first][last][top][bottom][index][help] */
 372 {
 373         __sleep_on(p,TASK_UNINTERRUPTIBLE);
 374 }
 375 
 376 /*
 377  * The head for the timer-list has a "expires" field of MAX_UINT,
 378  * and the sorting routine counts on this..
 379  */
 380 static struct timer_list timer_head = { &timer_head, &timer_head, ~0, 0, NULL };
 381 #define SLOW_BUT_DEBUGGING_TIMERS 1
 382 
 383 void add_timer(struct timer_list * timer)
     /* [previous][next][first][last][top][bottom][index][help] */
 384 {
 385         unsigned long flags;
 386         struct timer_list *p;
 387 
 388 #if SLOW_BUT_DEBUGGING_TIMERS
 389         if (timer->next || timer->prev) {
 390                 printk("add_timer() called with non-zero list from %p\n",
 391                         __builtin_return_address(0));
 392                 return;
 393         }
 394 #endif
 395         p = &timer_head;
 396         timer->expires += jiffies;
 397         save_flags(flags);
 398         cli();
 399         do {
 400                 p = p->next;
 401         } while (timer->expires > p->expires);
 402         timer->next = p;
 403         timer->prev = p->prev;
 404         p->prev = timer;
 405         timer->prev->next = timer;
 406         restore_flags(flags);
 407 }
 408 
 409 int del_timer(struct timer_list * timer)
     /* [previous][next][first][last][top][bottom][index][help] */
 410 {
 411         unsigned long flags;
 412 #if SLOW_BUT_DEBUGGING_TIMERS
 413         struct timer_list * p;
 414 
 415         p = &timer_head;
 416         save_flags(flags);
 417         cli();
 418         while ((p = p->next) != &timer_head) {
 419                 if (p == timer) {
 420                         timer->next->prev = timer->prev;
 421                         timer->prev->next = timer->next;
 422                         timer->next = timer->prev = NULL;
 423                         restore_flags(flags);
 424                         timer->expires -= jiffies;
 425                         return 1;
 426                 }
 427         }
 428         if (timer->next || timer->prev)
 429                 printk("del_timer() called from %p with timer not initialized\n",
 430                         __builtin_return_address(0));
 431         restore_flags(flags);
 432         return 0;
 433 #else   
 434         save_flags(flags);
 435         cli();
 436         if (timer->next) {
 437                 timer->next->prev = timer->prev;
 438                 timer->prev->next = timer->next;
 439                 timer->next = timer->prev = NULL;
 440                 restore_flags(flags);
 441                 timer->expires -= jiffies;
 442                 return 1;
 443         }
 444         restore_flags(flags);
 445         return 0;
 446 #endif
 447 }
 448 
 449 unsigned long timer_active = 0;
 450 struct timer_struct timer_table[32];
 451 
 452 /*
 453  * Hmm.. Changed this, as the GNU make sources (load.c) seems to
 454  * imply that avenrun[] is the standard name for this kind of thing.
 455  * Nothing else seems to be standardized: the fractional size etc
 456  * all seem to differ on different machines.
 457  */
 458 unsigned long avenrun[3] = { 0,0,0 };
 459 
 460 /*
 461  * Nr of active tasks - counted in fixed-point numbers
 462  */
 463 static unsigned long count_active_tasks(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 464 {
 465         struct task_struct **p;
 466         unsigned long nr = 0;
 467 
 468         for(p = &LAST_TASK; p > &FIRST_TASK; --p)
 469                 if (*p && ((*p)->state == TASK_RUNNING ||
 470                            (*p)->state == TASK_UNINTERRUPTIBLE ||
 471                            (*p)->state == TASK_SWAPPING))
 472                         nr += FIXED_1;
 473         return nr;
 474 }
 475 
 476 static inline void calc_load(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 477 {
 478         unsigned long active_tasks; /* fixed-point */
 479         static int count = LOAD_FREQ;
 480 
 481         if (count-- > 0)
 482                 return;
 483         count = LOAD_FREQ;
 484         active_tasks = count_active_tasks();
 485         CALC_LOAD(avenrun[0], EXP_1, active_tasks);
 486         CALC_LOAD(avenrun[1], EXP_5, active_tasks);
 487         CALC_LOAD(avenrun[2], EXP_15, active_tasks);
 488 }
 489 
 490 /*
 491  * this routine handles the overflow of the microsecond field
 492  *
 493  * The tricky bits of code to handle the accurate clock support
 494  * were provided by Dave Mills (Mills@UDEL.EDU) of NTP fame.
 495  * They were originally developed for SUN and DEC kernels.
 496  * All the kudos should go to Dave for this stuff.
 497  *
 498  * These were ported to Linux by Philip Gladstone.
 499  */
 500 static void second_overflow(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 501 {
 502         long ltemp;
 503         /* last time the cmos clock got updated */
 504         static long last_rtc_update=0;
 505         extern int set_rtc_mmss(unsigned long);
 506 
 507         /* Bump the maxerror field */
 508         time_maxerror = (0x70000000-time_maxerror < time_tolerance) ?
 509           0x70000000 : (time_maxerror + time_tolerance);
 510 
 511         /* Run the PLL */
 512         if (time_offset < 0) {
 513                 ltemp = (-(time_offset+1) >> (SHIFT_KG + time_constant)) + 1;
 514                 time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
 515                 time_offset += (time_adj * HZ) >> (SHIFT_SCALE - SHIFT_UPDATE);
 516                 time_adj = - time_adj;
 517         } else if (time_offset > 0) {
 518                 ltemp = ((time_offset-1) >> (SHIFT_KG + time_constant)) + 1;
 519                 time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
 520                 time_offset -= (time_adj * HZ) >> (SHIFT_SCALE - SHIFT_UPDATE);
 521         } else {
 522                 time_adj = 0;
 523         }
 524 
 525         time_adj += (time_freq >> (SHIFT_KF + SHIFT_HZ - SHIFT_SCALE))
 526             + FINETUNE;
 527 
 528         /* Handle the leap second stuff */
 529         switch (time_status) {
 530                 case TIME_INS:
 531                 /* ugly divide should be replaced */
 532                 if (xtime.tv_sec % 86400 == 0) {
 533                         xtime.tv_sec--; /* !! */
 534                         time_status = TIME_OOP;
 535                         printk("Clock: inserting leap second 23:59:60 GMT\n");
 536                 }
 537                 break;
 538 
 539                 case TIME_DEL:
 540                 /* ugly divide should be replaced */
 541                 if (xtime.tv_sec % 86400 == 86399) {
 542                         xtime.tv_sec++;
 543                         time_status = TIME_OK;
 544                         printk("Clock: deleting leap second 23:59:59 GMT\n");
 545                 }
 546                 break;
 547 
 548                 case TIME_OOP:
 549                 time_status = TIME_OK;
 550                 break;
 551         }
 552         if (xtime.tv_sec > last_rtc_update + 660)
 553           if (set_rtc_mmss(xtime.tv_sec) == 0)
 554             last_rtc_update = xtime.tv_sec;
 555           else
 556             last_rtc_update = xtime.tv_sec - 600; /* do it again in one min */
 557 }
 558 
 559 /*
 560  * disregard lost ticks for now.. We don't care enough.
 561  */
 562 static void timer_bh(void * unused)
     /* [previous][next][first][last][top][bottom][index][help] */
 563 {
 564         unsigned long mask;
 565         struct timer_struct *tp;
 566         struct timer_list * timer;
 567 
 568         cli();
 569         while ((timer = timer_head.next) != &timer_head && timer->expires < jiffies) {
 570                 void (*fn)(unsigned long) = timer->function;
 571                 unsigned long data = timer->data;
 572                 timer->next->prev = timer->prev;
 573                 timer->prev->next = timer->next;
 574                 timer->next = timer->prev = NULL;
 575                 sti();
 576                 fn(data);
 577                 cli();
 578         }
 579         sti();
 580         
 581         for (mask = 1, tp = timer_table+0 ; mask ; tp++,mask += mask) {
 582                 if (mask > timer_active)
 583                         break;
 584                 if (!(mask & timer_active))
 585                         continue;
 586                 if (tp->expires > jiffies)
 587                         continue;
 588                 timer_active &= ~mask;
 589                 tp->fn();
 590                 sti();
 591         }
 592 }
 593 
 594 void tqueue_bh(void * unused)
     /* [previous][next][first][last][top][bottom][index][help] */
 595 {
 596         run_task_queue(&tq_timer);
 597 }
 598 
 599 void immediate_bh(void * unused)
     /* [previous][next][first][last][top][bottom][index][help] */
 600 {
 601         run_task_queue(&tq_immediate);
 602 }
 603 
 604 /*
 605  * The int argument is really a (struct pt_regs *), in case the
 606  * interrupt wants to know from where it was called. The timer
 607  * irq uses this to decide if it should update the user or system
 608  * times.
 609  */
 610 static void do_timer(struct pt_regs * regs)
     /* [previous][next][first][last][top][bottom][index][help] */
 611 {
 612         unsigned long mask;
 613         struct timer_struct *tp;
 614 
 615         long ltemp, psecs;
 616 
 617         /* Advance the phase, once it gets to one microsecond, then
 618          * advance the tick more.
 619          */
 620         time_phase += time_adj;
 621         if (time_phase < -FINEUSEC) {
 622                 ltemp = -time_phase >> SHIFT_SCALE;
 623                 time_phase += ltemp << SHIFT_SCALE;
 624                 xtime.tv_usec += tick + time_adjust_step - ltemp;
 625         }
 626         else if (time_phase > FINEUSEC) {
 627                 ltemp = time_phase >> SHIFT_SCALE;
 628                 time_phase -= ltemp << SHIFT_SCALE;
 629                 xtime.tv_usec += tick + time_adjust_step + ltemp;
 630         } else
 631                 xtime.tv_usec += tick + time_adjust_step;
 632 
 633         if (time_adjust)
 634         {
 635             /* We are doing an adjtime thing. 
 636              *
 637              * Modify the value of the tick for next time.
 638              * Note that a positive delta means we want the clock
 639              * to run fast. This means that the tick should be bigger
 640              *
 641              * Limit the amount of the step for *next* tick to be
 642              * in the range -tickadj .. +tickadj
 643              */
 644              if (time_adjust > tickadj)
 645                time_adjust_step = tickadj;
 646              else if (time_adjust < -tickadj)
 647                time_adjust_step = -tickadj;
 648              else
 649                time_adjust_step = time_adjust;
 650              
 651             /* Reduce by this step the amount of time left  */
 652             time_adjust -= time_adjust_step;
 653         }
 654         else
 655             time_adjust_step = 0;
 656 
 657         if (xtime.tv_usec >= 1000000) {
 658             xtime.tv_usec -= 1000000;
 659             xtime.tv_sec++;
 660             second_overflow();
 661         }
 662 
 663         jiffies++;
 664         calc_load();
 665         if ((VM_MASK & regs->eflags) || (3 & regs->cs)) {
 666                 current->utime++;
 667                 if (current != task[0]) {
 668                         if (current->priority < 15)
 669                                 kstat.cpu_nice++;
 670                         else
 671                                 kstat.cpu_user++;
 672                 }
 673                 /* Update ITIMER_VIRT for current task if not in a system call */
 674                 if (current->it_virt_value && !(--current->it_virt_value)) {
 675                         current->it_virt_value = current->it_virt_incr;
 676                         send_sig(SIGVTALRM,current,1);
 677                 }
 678         } else {
 679                 current->stime++;
 680                 if(current != task[0])
 681                         kstat.cpu_system++;
 682 #ifdef CONFIG_PROFILE
 683                 if (prof_buffer && current != task[0]) {
 684                         unsigned long eip = regs->eip;
 685                         eip >>= CONFIG_PROFILE_SHIFT;
 686                         if (eip < prof_len)
 687                                 prof_buffer[eip]++;
 688                 }
 689 #endif
 690         }
 691         /*
 692          * check the cpu time limit on the process.
 693          */
 694         if ((current->rlim[RLIMIT_CPU].rlim_max != RLIM_INFINITY) &&
 695             (((current->stime + current->utime) / HZ) >= current->rlim[RLIMIT_CPU].rlim_max))
 696                 send_sig(SIGKILL, current, 1);
 697         if ((current->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) &&
 698             (((current->stime + current->utime) % HZ) == 0)) {
 699                 psecs = (current->stime + current->utime) / HZ;
 700                 /* send when equal */
 701                 if (psecs == current->rlim[RLIMIT_CPU].rlim_cur)
 702                         send_sig(SIGXCPU, current, 1);
 703                 /* and every five seconds thereafter. */
 704                 else if ((psecs > current->rlim[RLIMIT_CPU].rlim_cur) &&
 705                         ((psecs - current->rlim[RLIMIT_CPU].rlim_cur) % 5) == 0)
 706                         send_sig(SIGXCPU, current, 1);
 707         }
 708 
 709         if (current != task[0] && 0 > --current->counter) {
 710                 current->counter = 0;
 711                 need_resched = 1;
 712         }
 713         /* Update ITIMER_PROF for the current task */
 714         if (current->it_prof_value && !(--current->it_prof_value)) {
 715                 current->it_prof_value = current->it_prof_incr;
 716                 send_sig(SIGPROF,current,1);
 717         }
 718         for (mask = 1, tp = timer_table+0 ; mask ; tp++,mask += mask) {
 719                 if (mask > timer_active)
 720                         break;
 721                 if (!(mask & timer_active))
 722                         continue;
 723                 if (tp->expires > jiffies)
 724                         continue;
 725                 mark_bh(TIMER_BH);
 726         }
 727         cli();
 728         itimer_ticks++;
 729         if (itimer_ticks > itimer_next)
 730                 need_resched = 1;
 731         if (timer_head.next->expires < jiffies)
 732                 mark_bh(TIMER_BH);
 733         if (tq_timer != &tq_last)
 734                 mark_bh(TQUEUE_BH);
 735         sti();
 736 }
 737 
 738 asmlinkage int sys_alarm(long seconds)
     /* [previous][next][first][last][top][bottom][index][help] */
 739 {
 740         struct itimerval it_new, it_old;
 741 
 742         it_new.it_interval.tv_sec = it_new.it_interval.tv_usec = 0;
 743         it_new.it_value.tv_sec = seconds;
 744         it_new.it_value.tv_usec = 0;
 745         _setitimer(ITIMER_REAL, &it_new, &it_old);
 746         return(it_old.it_value.tv_sec + (it_old.it_value.tv_usec / 1000000));
 747 }
 748 
 749 asmlinkage int sys_getpid(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 750 {
 751         return current->pid;
 752 }
 753 
 754 asmlinkage int sys_getppid(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 755 {
 756         return current->p_opptr->pid;
 757 }
 758 
 759 asmlinkage int sys_getuid(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 760 {
 761         return current->uid;
 762 }
 763 
 764 asmlinkage int sys_geteuid(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 765 {
 766         return current->euid;
 767 }
 768 
 769 asmlinkage int sys_getgid(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 770 {
 771         return current->gid;
 772 }
 773 
 774 asmlinkage int sys_getegid(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 775 {
 776         return current->egid;
 777 }
 778 
 779 asmlinkage int sys_nice(long increment)
     /* [previous][next][first][last][top][bottom][index][help] */
 780 {
 781         int newprio;
 782 
 783         if (increment < 0 && !suser())
 784                 return -EPERM;
 785         newprio = current->priority - increment;
 786         if (newprio < 1)
 787                 newprio = 1;
 788         if (newprio > 35)
 789                 newprio = 35;
 790         current->priority = newprio;
 791         return 0;
 792 }
 793 
 794 static void show_task(int nr,struct task_struct * p)
     /* [previous][next][first][last][top][bottom][index][help] */
 795 {
 796         unsigned long free;
 797         static char * stat_nam[] = { "R", "S", "D", "Z", "T", "W" };
 798 
 799         printk("%-8s %3d ", p->comm, (p == current) ? -nr : nr);
 800         if (((unsigned) p->state) < sizeof(stat_nam)/sizeof(char *))
 801                 printk(stat_nam[p->state]);
 802         else
 803                 printk(" ");
 804         if (p == current)
 805                 printk(" current  ");
 806         else
 807                 printk(" %08lX ", ((unsigned long *)p->tss.esp)[3]);
 808         for (free = 1; free < 1024 ; free++) {
 809                 if (((unsigned long *)p->kernel_stack_page)[free])
 810                         break;
 811         }
 812         printk("%5lu %5d %6d ", free << 2, p->pid, p->p_pptr->pid);
 813         if (p->p_cptr)
 814                 printk("%5d ", p->p_cptr->pid);
 815         else
 816                 printk("      ");
 817         if (p->p_ysptr)
 818                 printk("%7d", p->p_ysptr->pid);
 819         else
 820                 printk("       ");
 821         if (p->p_osptr)
 822                 printk(" %5d\n", p->p_osptr->pid);
 823         else
 824                 printk("\n");
 825 }
 826 
 827 void show_state(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 828 {
 829         int i;
 830 
 831         printk("                         free                        sibling\n");
 832         printk("  task             PC    stack   pid father child younger older\n");
 833         for (i=0 ; i<NR_TASKS ; i++)
 834                 if (task[i])
 835                         show_task(i,task[i]);
 836 }
 837 
 838 void sched_init(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 839 {
 840         int i;
 841         struct desc_struct * p;
 842 
 843         bh_base[TIMER_BH].routine = timer_bh;
 844         bh_base[TQUEUE_BH].routine = tqueue_bh;
 845         bh_base[IMMEDIATE_BH].routine = immediate_bh;
 846         if (sizeof(struct sigaction) != 16)
 847                 panic("Struct sigaction MUST be 16 bytes");
 848         set_tss_desc(gdt+FIRST_TSS_ENTRY,&init_task.tss);
 849         set_ldt_desc(gdt+FIRST_LDT_ENTRY,&default_ldt,1);
 850         set_system_gate(0x80,&system_call);
 851         p = gdt+2+FIRST_TSS_ENTRY;
 852         for(i=1 ; i<NR_TASKS ; i++) {
 853                 task[i] = NULL;
 854                 p->a=p->b=0;
 855                 p++;
 856                 p->a=p->b=0;
 857                 p++;
 858         }
 859 /* Clear NT, so that we won't have troubles with that later on */
 860         __asm__("pushfl ; andl $0xffffbfff,(%esp) ; popfl");
 861         load_TR(0);
 862         load_ldt(0);
 863         outb_p(0x34,0x43);              /* binary, mode 2, LSB/MSB, ch 0 */
 864         outb_p(LATCH & 0xff , 0x40);    /* LSB */
 865         outb(LATCH >> 8 , 0x40);        /* MSB */
 866         if (request_irq(TIMER_IRQ,(void (*)(int)) do_timer, 0, "timer") != 0)
 867                 panic("Could not allocate timer IRQ!");
 868 }

/* [previous][next][first][last][top][bottom][index][help] */