root/kernel/sched.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. math_state_restore
  2. math_emulate
  3. schedule
  4. sys_pause
  5. wake_up
  6. wake_up_interruptible
  7. __down
  8. __sleep_on
  9. interruptible_sleep_on
  10. sleep_on
  11. add_timer
  12. del_timer
  13. count_active_tasks
  14. calc_load
  15. second_overflow
  16. timer_bh
  17. do_timer
  18. sys_alarm
  19. sys_getpid
  20. sys_getppid
  21. sys_getuid
  22. sys_geteuid
  23. sys_getgid
  24. sys_getegid
  25. sys_nice
  26. show_task
  27. show_state
  28. sched_init

   1 /*
   2  *  linux/kernel/sched.c
   3  *
   4  *  Copyright (C) 1991, 1992  Linus Torvalds
   5  */
   6 
   7 /*
   8  * 'sched.c' is the main kernel file. It contains scheduling primitives
   9  * (sleep_on, wakeup, schedule etc) as well as a number of simple system
  10  * call functions (type getpid(), which just extracts a field from
  11  * current-task
  12  */
  13 
  14 #include <linux/config.h>
  15 #include <linux/signal.h>
  16 #include <linux/sched.h>
  17 #include <linux/timer.h>
  18 #include <linux/kernel.h>
  19 #include <linux/kernel_stat.h>
  20 #include <linux/fdreg.h>
  21 #include <linux/errno.h>
  22 #include <linux/time.h>
  23 #include <linux/ptrace.h>
  24 #include <linux/segment.h>
  25 #include <linux/delay.h>
  26 #include <linux/interrupt.h>
  27 
  28 #include <asm/system.h>
  29 #include <asm/io.h>
  30 #include <asm/segment.h>
  31 
  32 #define TIMER_IRQ 0
  33 
  34 #include <linux/timex.h>
  35 
  36 /*
  37  * kernel variables
  38  */
  39 long tick = 1000000 / HZ;               /* timer interrupt period */
  40 volatile struct timeval xtime;          /* The current time */
  41 int tickadj = 500/HZ;                   /* microsecs */
  42 
  43 /*
  44  * phase-lock loop variables
  45  */
  46 int time_status = TIME_BAD;     /* clock synchronization status */
  47 long time_offset = 0;           /* time adjustment (us) */
  48 long time_constant = 0;         /* pll time constant */
  49 long time_tolerance = MAXFREQ;  /* frequency tolerance (ppm) */
  50 long time_precision = 1;        /* clock precision (us) */
  51 long time_maxerror = 0x70000000;/* maximum error */
  52 long time_esterror = 0x70000000;/* estimated error */
  53 long time_phase = 0;            /* phase offset (scaled us) */
  54 long time_freq = 0;             /* frequency offset (scaled ppm) */
  55 long time_adj = 0;              /* tick adjust (scaled 1 / HZ) */
  56 long time_reftime = 0;          /* time at last adjustment (s) */
  57 
  58 long time_adjust = 0;
  59 long time_adjust_step = 0;
  60 
  61 int need_resched = 0;
  62 
  63 /*
  64  * Tell us the machine setup..
  65  */
  66 int hard_math = 0;              /* set by boot/head.S */
  67 int x86 = 0;                    /* set by boot/head.S to 3 or 4 */
  68 int ignore_irq13 = 0;           /* set if exception 16 works */
  69 int wp_works_ok = 0;            /* set if paging hardware honours WP */ 
  70 
  71 /*
  72  * Bus types ..
  73  */
  74 int EISA_bus = 0;
  75 
  76 extern int _setitimer(int, struct itimerval *, struct itimerval *);
  77 unsigned long * prof_buffer = NULL;
  78 unsigned long prof_len = 0;
  79 
  80 #define _S(nr) (1<<((nr)-1))
  81 
  82 extern void mem_use(void);
  83 
  84 extern int timer_interrupt(void);
  85 asmlinkage int system_call(void);
  86 
  87 static unsigned long init_kernel_stack[1024] = { STACK_MAGIC, };
  88 struct task_struct init_task = INIT_TASK;
  89 
  90 unsigned long volatile jiffies=0;
  91 
  92 struct task_struct *current = &init_task;
  93 struct task_struct *last_task_used_math = NULL;
  94 
  95 struct task_struct * task[NR_TASKS] = {&init_task, };
  96 
  97 long user_stack [ PAGE_SIZE>>2 ] = { STACK_MAGIC, };
  98 
  99 struct {
 100         long * a;
 101         short b;
 102         } stack_start = { & user_stack [PAGE_SIZE>>2] , KERNEL_DS };
 103 
 104 struct kernel_stat kstat = { 0 };
 105 
 106 /*
 107  *  'math_state_restore()' saves the current math information in the
 108  * old math state array, and gets the new ones from the current task
 109  *
 110  * Careful.. There are problems with IBM-designed IRQ13 behaviour.
 111  * Don't touch unless you *really* know how it works.
 112  */
 113 asmlinkage void math_state_restore(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 114 {
 115         __asm__ __volatile__("clts");
 116         if (last_task_used_math == current)
 117                 return;
 118         timer_table[COPRO_TIMER].expires = jiffies+50;
 119         timer_active |= 1<<COPRO_TIMER; 
 120         if (last_task_used_math)
 121                 __asm__("fnsave %0":"=m" (last_task_used_math->tss.i387));
 122         else
 123                 __asm__("fnclex");
 124         last_task_used_math = current;
 125         if (current->used_math) {
 126                 __asm__("frstor %0": :"m" (current->tss.i387));
 127         } else {
 128                 __asm__("fninit");
 129                 current->used_math=1;
 130         }
 131         timer_active &= ~(1<<COPRO_TIMER);
 132 }
 133 
 134 #ifndef CONFIG_MATH_EMULATION
 135 
 136 asmlinkage void math_emulate(long arg)
     /* [previous][next][first][last][top][bottom][index][help] */
 137 {
 138   printk("math-emulation not enabled and no coprocessor found.\n");
 139   printk("killing %s.\n",current->comm);
 140   send_sig(SIGFPE,current,1);
 141   schedule();
 142 }
 143 
 144 #endif /* CONFIG_MATH_EMULATION */
 145 
 146 unsigned long itimer_ticks = 0;
 147 unsigned long itimer_next = ~0;
 148 static unsigned long lost_ticks = 0;
 149 
 150 /*
 151  *  'schedule()' is the scheduler function. It's a very simple and nice
 152  * scheduler: it's not perfect, but certainly works for most things.
 153  * The one thing you might take a look at is the signal-handler code here.
 154  *
 155  *   NOTE!!  Task 0 is the 'idle' task, which gets called when no other
 156  * tasks can run. It can not be killed, and it cannot sleep. The 'state'
 157  * information in task[0] is never used.
 158  *
 159  * The "confuse_gcc" goto is used only to get better assembly code..
 160  * Djikstra probably hates me.
 161  */
 162 asmlinkage void schedule(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 163 {
 164         int c;
 165         struct task_struct * p;
 166         struct task_struct * next;
 167         unsigned long ticks;
 168 
 169 /* check alarm, wake up any interruptible tasks that have got a signal */
 170 
 171         if (intr_count) {
 172                 printk("Aiee: scheduling in interrupt\n");
 173                 intr_count = 0;
 174         }
 175         cli();
 176         ticks = itimer_ticks;
 177         itimer_ticks = 0;
 178         itimer_next = ~0;
 179         sti();
 180         need_resched = 0;
 181         p = &init_task;
 182         for (;;) {
 183                 if ((p = p->next_task) == &init_task)
 184                         goto confuse_gcc1;
 185                 if (ticks && p->it_real_value) {
 186                         if (p->it_real_value <= ticks) {
 187                                 send_sig(SIGALRM, p, 1);
 188                                 if (!p->it_real_incr) {
 189                                         p->it_real_value = 0;
 190                                         goto end_itimer;
 191                                 }
 192                                 do {
 193                                         p->it_real_value += p->it_real_incr;
 194                                 } while (p->it_real_value <= ticks);
 195                         }
 196                         p->it_real_value -= ticks;
 197                         if (p->it_real_value < itimer_next)
 198                                 itimer_next = p->it_real_value;
 199                 }
 200 end_itimer:
 201                 if (p->state != TASK_INTERRUPTIBLE)
 202                         continue;
 203                 if (p->signal & ~p->blocked) {
 204                         p->state = TASK_RUNNING;
 205                         continue;
 206                 }
 207                 if (p->timeout && p->timeout <= jiffies) {
 208                         p->timeout = 0;
 209                         p->state = TASK_RUNNING;
 210                 }
 211         }
 212 confuse_gcc1:
 213 
 214 /* this is the scheduler proper: */
 215 #if 0
 216         /* give processes that go to sleep a bit higher priority.. */
 217         /* This depends on the values for TASK_XXX */
 218         /* This gives smoother scheduling for some things, but */
 219         /* can be very unfair under some circumstances, so.. */
 220         if (TASK_UNINTERRUPTIBLE >= (unsigned) current->state &&
 221             current->counter < current->priority*2) {
 222                 ++current->counter;
 223         }
 224 #endif
 225         c = -1000;
 226         next = p = &init_task;
 227         for (;;) {
 228                 if ((p = p->next_task) == &init_task)
 229                         goto confuse_gcc2;
 230                 if (p->state == TASK_RUNNING && p->counter > c)
 231                         c = p->counter, next = p;
 232         }
 233 confuse_gcc2:
 234         if (!c) {
 235                 for_each_task(p)
 236                         p->counter = (p->counter >> 1) + p->priority;
 237         }
 238         if (current == next)
 239                 return;
 240         kstat.context_swtch++;
 241         switch_to(next);
 242         /* Now maybe reload the debug registers */
 243         if(current->debugreg[7]){
 244                 loaddebug(0);
 245                 loaddebug(1);
 246                 loaddebug(2);
 247                 loaddebug(3);
 248                 loaddebug(6);
 249         };
 250 }
 251 
 252 asmlinkage int sys_pause(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 253 {
 254         current->state = TASK_INTERRUPTIBLE;
 255         schedule();
 256         return -ERESTARTNOHAND;
 257 }
 258 
 259 /*
 260  * wake_up doesn't wake up stopped processes - they have to be awakened
 261  * with signals or similar.
 262  *
 263  * Note that this doesn't need cli-sti pairs: interrupts may not change
 264  * the wait-queue structures directly, but only call wake_up() to wake
 265  * a process. The process itself must remove the queue once it has woken.
 266  */
 267 void wake_up(struct wait_queue **q)
     /* [previous][next][first][last][top][bottom][index][help] */
 268 {
 269         struct wait_queue *tmp;
 270         struct task_struct * p;
 271 
 272         if (!q || !(tmp = *q))
 273                 return;
 274         do {
 275                 if ((p = tmp->task) != NULL) {
 276                         if ((p->state == TASK_UNINTERRUPTIBLE) ||
 277                             (p->state == TASK_INTERRUPTIBLE)) {
 278                                 p->state = TASK_RUNNING;
 279                                 if (p->counter > current->counter)
 280                                         need_resched = 1;
 281                         }
 282                 }
 283                 if (!tmp->next) {
 284                         printk("wait_queue is bad (eip = %08lx)\n",((unsigned long *) q)[-1]);
 285                         printk("        q = %p\n",q);
 286                         printk("       *q = %p\n",*q);
 287                         printk("      tmp = %p\n",tmp);
 288                         break;
 289                 }
 290                 tmp = tmp->next;
 291         } while (tmp != *q);
 292 }
 293 
 294 void wake_up_interruptible(struct wait_queue **q)
     /* [previous][next][first][last][top][bottom][index][help] */
 295 {
 296         struct wait_queue *tmp;
 297         struct task_struct * p;
 298 
 299         if (!q || !(tmp = *q))
 300                 return;
 301         do {
 302                 if ((p = tmp->task) != NULL) {
 303                         if (p->state == TASK_INTERRUPTIBLE) {
 304                                 p->state = TASK_RUNNING;
 305                                 if (p->counter > current->counter)
 306                                         need_resched = 1;
 307                         }
 308                 }
 309                 if (!tmp->next) {
 310                         printk("wait_queue is bad (eip = %08lx)\n",((unsigned long *) q)[-1]);
 311                         printk("        q = %p\n",q);
 312                         printk("       *q = %p\n",*q);
 313                         printk("      tmp = %p\n",tmp);
 314                         break;
 315                 }
 316                 tmp = tmp->next;
 317         } while (tmp != *q);
 318 }
 319 
 320 void __down(struct semaphore * sem)
     /* [previous][next][first][last][top][bottom][index][help] */
 321 {
 322         struct wait_queue wait = { current, NULL };
 323         add_wait_queue(&sem->wait, &wait);
 324         current->state = TASK_UNINTERRUPTIBLE;
 325         while (sem->count <= 0) {
 326                 schedule();
 327                 current->state = TASK_UNINTERRUPTIBLE;
 328         }
 329         current->state = TASK_RUNNING;
 330         remove_wait_queue(&sem->wait, &wait);
 331 }
 332 
 333 static inline void __sleep_on(struct wait_queue **p, int state)
     /* [previous][next][first][last][top][bottom][index][help] */
 334 {
 335         unsigned long flags;
 336         struct wait_queue wait = { current, NULL };
 337 
 338         if (!p)
 339                 return;
 340         if (current == task[0])
 341                 panic("task[0] trying to sleep");
 342         current->state = state;
 343         add_wait_queue(p, &wait);
 344         save_flags(flags);
 345         sti();
 346         schedule();
 347         remove_wait_queue(p, &wait);
 348         restore_flags(flags);
 349 }
 350 
 351 void interruptible_sleep_on(struct wait_queue **p)
     /* [previous][next][first][last][top][bottom][index][help] */
 352 {
 353         __sleep_on(p,TASK_INTERRUPTIBLE);
 354 }
 355 
 356 void sleep_on(struct wait_queue **p)
     /* [previous][next][first][last][top][bottom][index][help] */
 357 {
 358         __sleep_on(p,TASK_UNINTERRUPTIBLE);
 359 }
 360 
 361 static struct timer_list * next_timer = NULL;
 362 
 363 void add_timer(struct timer_list * timer)
     /* [previous][next][first][last][top][bottom][index][help] */
 364 {
 365         unsigned long flags;
 366         struct timer_list ** p;
 367 
 368         if (!timer)
 369                 return;
 370         timer->next = NULL;
 371         p = &next_timer;
 372         save_flags(flags);
 373         cli();
 374         while (*p) {
 375                 if ((*p)->expires > timer->expires) {
 376                         (*p)->expires -= timer->expires;
 377                         timer->next = *p;
 378                         break;
 379                 }
 380                 timer->expires -= (*p)->expires;
 381                 p = &(*p)->next;
 382         }
 383         *p = timer;
 384         restore_flags(flags);
 385 }
 386 
 387 int del_timer(struct timer_list * timer)
     /* [previous][next][first][last][top][bottom][index][help] */
 388 {
 389         unsigned long flags;
 390         unsigned long expires = 0;
 391         struct timer_list **p;
 392 
 393         p = &next_timer;
 394         save_flags(flags);
 395         cli();
 396         while (*p) {
 397                 if (*p == timer) {
 398                         if ((*p = timer->next) != NULL)
 399                                 (*p)->expires += timer->expires;
 400                         timer->expires += expires;
 401                         restore_flags(flags);
 402                         return 1;
 403                 }
 404                 expires += (*p)->expires;
 405                 p = &(*p)->next;
 406         }
 407         restore_flags(flags);
 408         return 0;
 409 }
 410 
 411 unsigned long timer_active = 0;
 412 struct timer_struct timer_table[32];
 413 
 414 /*
 415  * Hmm.. Changed this, as the GNU make sources (load.c) seems to
 416  * imply that avenrun[] is the standard name for this kind of thing.
 417  * Nothing else seems to be standardized: the fractional size etc
 418  * all seem to differ on different machines.
 419  */
 420 unsigned long avenrun[3] = { 0,0,0 };
 421 
 422 /*
 423  * Nr of active tasks - counted in fixed-point numbers
 424  */
 425 static unsigned long count_active_tasks(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 426 {
 427         struct task_struct **p;
 428         unsigned long nr = 0;
 429 
 430         for(p = &LAST_TASK; p > &FIRST_TASK; --p)
 431                 if (*p && ((*p)->state == TASK_RUNNING ||
 432                            (*p)->state == TASK_UNINTERRUPTIBLE ||
 433                            (*p)->state == TASK_SWAPPING))
 434                         nr += FIXED_1;
 435         return nr;
 436 }
 437 
 438 static inline void calc_load(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 439 {
 440         unsigned long active_tasks; /* fixed-point */
 441         static int count = LOAD_FREQ;
 442 
 443         if (count-- > 0)
 444                 return;
 445         count = LOAD_FREQ;
 446         active_tasks = count_active_tasks();
 447         CALC_LOAD(avenrun[0], EXP_1, active_tasks);
 448         CALC_LOAD(avenrun[1], EXP_5, active_tasks);
 449         CALC_LOAD(avenrun[2], EXP_15, active_tasks);
 450 }
 451 
 452 /*
 453  * this routine handles the overflow of the microsecond field
 454  *
 455  * The tricky bits of code to handle the accurate clock support
 456  * were provided by Dave Mills (Mills@UDEL.EDU) of NTP fame.
 457  * They were originally developed for SUN and DEC kernels.
 458  * All the kudos should go to Dave for this stuff.
 459  *
 460  * These were ported to Linux by Philip Gladstone.
 461  */
 462 static void second_overflow(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 463 {
 464         long ltemp;
 465         /* last time the cmos clock got updated */
 466         static long last_rtc_update=0;
 467         extern int set_rtc_mmss(unsigned long);
 468 
 469         /* Bump the maxerror field */
 470         time_maxerror = (0x70000000-time_maxerror < time_tolerance) ?
 471           0x70000000 : (time_maxerror + time_tolerance);
 472 
 473         /* Run the PLL */
 474         if (time_offset < 0) {
 475                 ltemp = (-(time_offset+1) >> (SHIFT_KG + time_constant)) + 1;
 476                 time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
 477                 time_offset += (time_adj * HZ) >> (SHIFT_SCALE - SHIFT_UPDATE);
 478                 time_adj = - time_adj;
 479         } else if (time_offset > 0) {
 480                 ltemp = ((time_offset-1) >> (SHIFT_KG + time_constant)) + 1;
 481                 time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
 482                 time_offset -= (time_adj * HZ) >> (SHIFT_SCALE - SHIFT_UPDATE);
 483         } else {
 484                 time_adj = 0;
 485         }
 486 
 487         time_adj += (time_freq >> (SHIFT_KF + SHIFT_HZ - SHIFT_SCALE))
 488             + FINETUNE;
 489 
 490         /* Handle the leap second stuff */
 491         switch (time_status) {
 492                 case TIME_INS:
 493                 /* ugly divide should be replaced */
 494                 if (xtime.tv_sec % 86400 == 0) {
 495                         xtime.tv_sec--; /* !! */
 496                         time_status = TIME_OOP;
 497                         printk("Clock: inserting leap second 23:59:60 GMT\n");
 498                 }
 499                 break;
 500 
 501                 case TIME_DEL:
 502                 /* ugly divide should be replaced */
 503                 if (xtime.tv_sec % 86400 == 86399) {
 504                         xtime.tv_sec++;
 505                         time_status = TIME_OK;
 506                         printk("Clock: deleting leap second 23:59:59 GMT\n");
 507                 }
 508                 break;
 509 
 510                 case TIME_OOP:
 511                 time_status = TIME_OK;
 512                 break;
 513         }
 514         if (xtime.tv_sec > last_rtc_update + 660)
 515           if (set_rtc_mmss(xtime.tv_sec) == 0)
 516             last_rtc_update = xtime.tv_sec;
 517 }
 518 
 519 /*
 520  * disregard lost ticks for now.. We don't care enough.
 521  */
 522 static void timer_bh(void * unused)
     /* [previous][next][first][last][top][bottom][index][help] */
 523 {
 524         unsigned long mask;
 525         struct timer_struct *tp;
 526 
 527         cli();
 528         while (next_timer && next_timer->expires == 0) {
 529                 void (*fn)(unsigned long) = next_timer->function;
 530                 unsigned long data = next_timer->data;
 531                 next_timer = next_timer->next;
 532                 sti();
 533                 fn(data);
 534                 cli();
 535         }
 536         sti();
 537         
 538         for (mask = 1, tp = timer_table+0 ; mask ; tp++,mask += mask) {
 539                 if (mask > timer_active)
 540                         break;
 541                 if (!(mask & timer_active))
 542                         continue;
 543                 if (tp->expires > jiffies)
 544                         continue;
 545                 timer_active &= ~mask;
 546                 tp->fn();
 547                 sti();
 548         }
 549 }
 550 
 551 /*
 552  * The int argument is really a (struct pt_regs *), in case the
 553  * interrupt wants to know from where it was called. The timer
 554  * irq uses this to decide if it should update the user or system
 555  * times.
 556  */
 557 static void do_timer(struct pt_regs * regs)
     /* [previous][next][first][last][top][bottom][index][help] */
 558 {
 559         unsigned long mask;
 560         struct timer_struct *tp;
 561 
 562         long ltemp;
 563 
 564         /* Advance the phase, once it gets to one microsecond, then
 565          * advance the tick more.
 566          */
 567         time_phase += time_adj;
 568         if (time_phase < -FINEUSEC) {
 569                 ltemp = -time_phase >> SHIFT_SCALE;
 570                 time_phase += ltemp << SHIFT_SCALE;
 571                 xtime.tv_usec += tick + time_adjust_step - ltemp;
 572         }
 573         else if (time_phase > FINEUSEC) {
 574                 ltemp = time_phase >> SHIFT_SCALE;
 575                 time_phase -= ltemp << SHIFT_SCALE;
 576                 xtime.tv_usec += tick + time_adjust_step + ltemp;
 577         } else
 578                 xtime.tv_usec += tick + time_adjust_step;
 579 
 580         if (time_adjust)
 581         {
 582             /* We are doing an adjtime thing. 
 583              *
 584              * Modify the value of the tick for next time.
 585              * Note that a positive delta means we want the clock
 586              * to run fast. This means that the tick should be bigger
 587              *
 588              * Limit the amount of the step for *next* tick to be
 589              * in the range -tickadj .. +tickadj
 590              */
 591              if (time_adjust > tickadj)
 592                time_adjust_step = tickadj;
 593              else if (time_adjust < -tickadj)
 594                time_adjust_step = -tickadj;
 595              else
 596                time_adjust_step = time_adjust;
 597              
 598             /* Reduce by this step the amount of time left  */
 599             time_adjust -= time_adjust_step;
 600         }
 601         else
 602             time_adjust_step = 0;
 603 
 604         if (xtime.tv_usec >= 1000000) {
 605             xtime.tv_usec -= 1000000;
 606             xtime.tv_sec++;
 607             second_overflow();
 608         }
 609 
 610         jiffies++;
 611         calc_load();
 612         if ((VM_MASK & regs->eflags) || (3 & regs->cs)) {
 613                 current->utime++;
 614                 if (current != task[0]) {
 615                         if (current->priority < 15)
 616                                 kstat.cpu_nice++;
 617                         else
 618                                 kstat.cpu_user++;
 619                 }
 620                 /* Update ITIMER_VIRT for current task if not in a system call */
 621                 if (current->it_virt_value && !(--current->it_virt_value)) {
 622                         current->it_virt_value = current->it_virt_incr;
 623                         send_sig(SIGVTALRM,current,1);
 624                 }
 625         } else {
 626                 current->stime++;
 627                 if(current != task[0])
 628                         kstat.cpu_system++;
 629 #ifdef CONFIG_PROFILE
 630                 if (prof_buffer && current != task[0]) {
 631                         unsigned long eip = regs->eip;
 632                         eip >>= 2;
 633                         if (eip < prof_len)
 634                                 prof_buffer[eip]++;
 635                 }
 636 #endif
 637         }
 638         if (current != task[0] && 0 > --current->counter) {
 639                 current->counter = 0;
 640                 need_resched = 1;
 641         }
 642         /* Update ITIMER_PROF for the current task */
 643         if (current->it_prof_value && !(--current->it_prof_value)) {
 644                 current->it_prof_value = current->it_prof_incr;
 645                 send_sig(SIGPROF,current,1);
 646         }
 647         for (mask = 1, tp = timer_table+0 ; mask ; tp++,mask += mask) {
 648                 if (mask > timer_active)
 649                         break;
 650                 if (!(mask & timer_active))
 651                         continue;
 652                 if (tp->expires > jiffies)
 653                         continue;
 654                 mark_bh(TIMER_BH);
 655         }
 656         cli();
 657         itimer_ticks++;
 658         if (itimer_ticks > itimer_next)
 659                 need_resched = 1;
 660         if (next_timer) {
 661                 if (next_timer->expires) {
 662                         next_timer->expires--;
 663                         if (!next_timer->expires)
 664                                 mark_bh(TIMER_BH);
 665                 } else {
 666                         lost_ticks++;
 667                         mark_bh(TIMER_BH);
 668                 }
 669         }
 670         sti();
 671 }
 672 
 673 asmlinkage int sys_alarm(long seconds)
     /* [previous][next][first][last][top][bottom][index][help] */
 674 {
 675         struct itimerval it_new, it_old;
 676 
 677         it_new.it_interval.tv_sec = it_new.it_interval.tv_usec = 0;
 678         it_new.it_value.tv_sec = seconds;
 679         it_new.it_value.tv_usec = 0;
 680         _setitimer(ITIMER_REAL, &it_new, &it_old);
 681         return(it_old.it_value.tv_sec + (it_old.it_value.tv_usec / 1000000));
 682 }
 683 
 684 asmlinkage int sys_getpid(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 685 {
 686         return current->pid;
 687 }
 688 
 689 asmlinkage int sys_getppid(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 690 {
 691         return current->p_opptr->pid;
 692 }
 693 
 694 asmlinkage int sys_getuid(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 695 {
 696         return current->uid;
 697 }
 698 
 699 asmlinkage int sys_geteuid(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 700 {
 701         return current->euid;
 702 }
 703 
 704 asmlinkage int sys_getgid(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 705 {
 706         return current->gid;
 707 }
 708 
 709 asmlinkage int sys_getegid(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 710 {
 711         return current->egid;
 712 }
 713 
 714 asmlinkage int sys_nice(long increment)
     /* [previous][next][first][last][top][bottom][index][help] */
 715 {
 716         int newprio;
 717 
 718         if (increment < 0 && !suser())
 719                 return -EPERM;
 720         newprio = current->priority - increment;
 721         if (newprio < 1)
 722                 newprio = 1;
 723         if (newprio > 35)
 724                 newprio = 35;
 725         current->priority = newprio;
 726         return 0;
 727 }
 728 
 729 static void show_task(int nr,struct task_struct * p)
     /* [previous][next][first][last][top][bottom][index][help] */
 730 {
 731         unsigned long free;
 732         static char * stat_nam[] = { "R", "S", "D", "Z", "T", "W" };
 733 
 734         printk("%-8s %3d ", p->comm, (p == current) ? -nr : nr);
 735         if (((unsigned) p->state) < sizeof(stat_nam)/sizeof(char *))
 736                 printk(stat_nam[p->state]);
 737         else
 738                 printk(" ");
 739         if (p == current)
 740                 printk(" current  ");
 741         else
 742                 printk(" %08lX ", ((unsigned long *)p->tss.esp)[3]);
 743         for (free = 1; free < 1024 ; free++) {
 744                 if (((unsigned long *)p->kernel_stack_page)[free])
 745                         break;
 746         }
 747         printk("%5lu %5d %6d ", free << 2, p->pid, p->p_pptr->pid);
 748         if (p->p_cptr)
 749                 printk("%5d ", p->p_cptr->pid);
 750         else
 751                 printk("      ");
 752         if (p->p_ysptr)
 753                 printk("%7d", p->p_ysptr->pid);
 754         else
 755                 printk("       ");
 756         if (p->p_osptr)
 757                 printk(" %5d\n", p->p_osptr->pid);
 758         else
 759                 printk("\n");
 760 }
 761 
 762 void show_state(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 763 {
 764         int i;
 765 
 766         printk("                         free                        sibling\n");
 767         printk("  task             PC    stack   pid father child younger older\n");
 768         for (i=0 ; i<NR_TASKS ; i++)
 769                 if (task[i])
 770                         show_task(i,task[i]);
 771 }
 772 
 773 void sched_init(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 774 {
 775         int i;
 776         struct desc_struct * p;
 777 
 778         bh_base[TIMER_BH].routine = timer_bh;
 779         if (sizeof(struct sigaction) != 16)
 780                 panic("Struct sigaction MUST be 16 bytes");
 781         set_tss_desc(gdt+FIRST_TSS_ENTRY,&init_task.tss);
 782         set_ldt_desc(gdt+FIRST_LDT_ENTRY,&default_ldt,1);
 783         set_system_gate(0x80,&system_call);
 784         p = gdt+2+FIRST_TSS_ENTRY;
 785         for(i=1 ; i<NR_TASKS ; i++) {
 786                 task[i] = NULL;
 787                 p->a=p->b=0;
 788                 p++;
 789                 p->a=p->b=0;
 790                 p++;
 791         }
 792 /* Clear NT, so that we won't have troubles with that later on */
 793         __asm__("pushfl ; andl $0xffffbfff,(%esp) ; popfl");
 794         load_TR(0);
 795         load_ldt(0);
 796         outb_p(0x34,0x43);              /* binary, mode 2, LSB/MSB, ch 0 */
 797         outb_p(LATCH & 0xff , 0x40);    /* LSB */
 798         outb(LATCH >> 8 , 0x40);        /* MSB */
 799         if (request_irq(TIMER_IRQ,(void (*)(int)) do_timer)!=0)
 800                 panic("Could not allocate timer IRQ!");
 801 }

/* [previous][next][first][last][top][bottom][index][help] */