root/include/linux/sched.h

/* [previous][next][first][last][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. suser
  2. file_from_fd
  3. add_wait_queue
  4. remove_wait_queue
  5. select_wait
  6. down
  7. up

   1 #ifndef _LINUX_SCHED_H
   2 #define _LINUX_SCHED_H
   3 
   4 /*
   5  * define DEBUG if you want the wait-queues to have some extra
   6  * debugging code. It's not normally used, but might catch some
   7  * wait-queue coding errors.
   8  *
   9  *  #define DEBUG
  10  */
  11 
  12 #include <asm/param.h>  /* for HZ */
  13 
  14 extern unsigned long intr_count;
  15 extern unsigned long event;
  16 
  17 #include <linux/binfmts.h>
  18 #include <linux/personality.h>
  19 #include <linux/tasks.h>
  20 #include <linux/kernel.h>
  21 #include <asm/system.h>
  22 #include <asm/page.h>
  23 
  24 #include <linux/smp.h>
  25 #include <linux/tty.h>
  26 #include <linux/sem.h>
  27 
  28 /*
  29  * cloning flags:
  30  */
  31 #define CSIGNAL         0x000000ff      /* signal mask to be sent at exit */
  32 #define CLONE_VM        0x00000100      /* set if VM shared between processes */
  33 #define CLONE_FS        0x00000200      /* set if fs info shared between processes */
  34 #define CLONE_FILES     0x00000400      /* set if open files shared between processes */
  35 #define CLONE_SIGHAND   0x00000800      /* set if signal handlers shared */
  36 #define CLONE_PID       0x00001000      /* set if pid shared */
  37 
  38 /*
  39  * These are the constant used to fake the fixed-point load-average
  40  * counting. Some notes:
  41  *  - 11 bit fractions expand to 22 bits by the multiplies: this gives
  42  *    a load-average precision of 10 bits integer + 11 bits fractional
  43  *  - if you want to count load-averages more often, you need more
  44  *    precision, or rounding will get you. With 2-second counting freq,
  45  *    the EXP_n values would be 1981, 2034 and 2043 if still using only
  46  *    11 bit fractions.
  47  */
  48 extern unsigned long avenrun[];         /* Load averages */
  49 
  50 #define FSHIFT          11              /* nr of bits of precision */
  51 #define FIXED_1         (1<<FSHIFT)     /* 1.0 as fixed-point */
  52 #define LOAD_FREQ       (5*HZ)          /* 5 sec intervals */
  53 #define EXP_1           1884            /* 1/exp(5sec/1min) as fixed-point */
  54 #define EXP_5           2014            /* 1/exp(5sec/5min) */
  55 #define EXP_15          2037            /* 1/exp(5sec/15min) */
  56 
  57 #define CALC_LOAD(load,exp,n) \
  58         load *= exp; \
  59         load += n*(FIXED_1-exp); \
  60         load >>= FSHIFT;
  61 
  62 #define CT_TO_SECS(x)   ((x) / HZ)
  63 #define CT_TO_USECS(x)  (((x) % HZ) * 1000000/HZ)
  64 
  65 extern int nr_running, nr_tasks;
  66 extern int last_pid;
  67 
  68 #define FIRST_TASK task[0]
  69 #define LAST_TASK task[NR_TASKS-1]
  70 
  71 #include <linux/head.h>
  72 #include <linux/fs.h>
  73 #include <linux/signal.h>
  74 #include <linux/time.h>
  75 #include <linux/param.h>
  76 #include <linux/resource.h>
  77 #include <linux/vm86.h>
  78 #include <linux/math_emu.h>
  79 #include <linux/ptrace.h>
  80 #include <linux/timer.h>
  81 
  82 #include <asm/processor.h>
  83 
  84 #define TASK_RUNNING            0
  85 #define TASK_INTERRUPTIBLE      1
  86 #define TASK_UNINTERRUPTIBLE    2
  87 #define TASK_ZOMBIE             3
  88 #define TASK_STOPPED            4
  89 #define TASK_SWAPPING           5
  90 
  91 /*
  92  * Scheduling policies
  93  */
  94 #define SCHED_OTHER             0
  95 #define SCHED_FIFO              1
  96 #define SCHED_RR                2
  97 
  98 struct sched_param {
  99         int sched_priority;
 100 };
 101 
 102 #ifndef NULL
 103 #define NULL ((void *) 0)
 104 #endif
 105 
 106 #ifdef __KERNEL__
 107 
 108 extern void sched_init(void);
 109 extern void show_state(void);
 110 extern void trap_init(void);
 111 
 112 asmlinkage void schedule(void);
 113 
 114 /* ??? */
 115 struct files_struct {
 116         /* ??? */
 117         int count;
 118         /* bit mask to close fds on exec */
 119         fd_set close_on_exec;
 120         /* do we have at most NR_OPEN available fds? I assume fd i maps into
 121          * each open file */    
 122         struct file * fd[NR_OPEN];
 123 };
 124 
 125 #define INIT_FILES { \
 126         1, \
 127         { { 0, } }, \
 128         { NULL, } \
 129 }
 130 
 131 struct fs_struct {
 132         int count;
 133         unsigned short umask;
 134         struct inode * root, * pwd;
 135 };
 136 
 137 #define INIT_FS { \
 138         1, \
 139         0022, \
 140         NULL, NULL \
 141 }
 142 
 143 struct mm_struct {
 144         int count;
 145         pgd_t * pgd;
 146         unsigned long context;
 147         unsigned long start_code, end_code, start_data, end_data;
 148         unsigned long start_brk, brk, start_stack, start_mmap;
 149         unsigned long arg_start, arg_end, env_start, env_end;
 150         unsigned long rss, total_vm, locked_vm;
 151         unsigned long def_flags;
 152         struct vm_area_struct * mmap;
 153         struct vm_area_struct * mmap_avl;
 154 };
 155 
 156 #define INIT_MM { \
 157                 1, \
 158                 swapper_pg_dir, \
 159                 0, \
 160                 0, 0, 0, 0, \
 161                 0, 0, 0, 0, \
 162                 0, 0, 0, 0, \
 163                 0, 0, 0, \
 164                 0, \
 165                 &init_mmap, &init_mmap }
 166 
 167 struct signal_struct {
 168         int count;
 169         struct sigaction action[32];
 170 };
 171 
 172 #define INIT_SIGNALS { \
 173                 1, \
 174                 { {0,}, } }
 175 
 176 struct task_struct {
 177 /* these are hardcoded - don't touch */
 178         volatile long state;    /* -1 unrunnable, 0 runnable, >0 stopped */
 179         long counter;
 180         long priority;
 181         unsigned long signal;
 182         unsigned long blocked;  /* bitmap of masked signals */
 183         unsigned long flags;    /* per process flags, defined below */
 184         int errno;
 185         long debugreg[8];  /* Hardware debugging registers */
 186         struct exec_domain *exec_domain;
 187 /* various fields */
 188         struct linux_binfmt *binfmt;
 189         struct task_struct *next_task, *prev_task;
 190         struct task_struct *next_run,  *prev_run;
 191         unsigned long saved_kernel_stack;
 192         unsigned long kernel_stack_page;
 193         int exit_code, exit_signal;
 194         /* ??? */
 195         unsigned long personality;
 196         int dumpable:1;
 197         int did_exec:1;
 198         /* shouldn't this be pid_t? */
 199         int pid;
 200         int pgrp;
 201         int tty_old_pgrp;
 202         int session;
 203         /* boolean value for session group leader */
 204         int leader;
 205         int     groups[NGROUPS];
 206         /* 
 207          * pointers to (original) parent process, youngest child, younger sibling,
 208          * older sibling, respectively.  (p->father can be replaced with 
 209          * p->p_pptr->pid)
 210          */
 211         struct task_struct *p_opptr, *p_pptr, *p_cptr, *p_ysptr, *p_osptr;
 212         struct wait_queue *wait_chldexit;       /* for wait4() */
 213         unsigned short uid,euid,suid,fsuid;
 214         unsigned short gid,egid,sgid,fsgid;
 215         unsigned long timeout, policy, rt_priority;
 216         unsigned long it_real_value, it_prof_value, it_virt_value;
 217         unsigned long it_real_incr, it_prof_incr, it_virt_incr;
 218         struct timer_list real_timer;
 219         long utime, stime, cutime, cstime, start_time;
 220 /* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
 221         unsigned long min_flt, maj_flt, nswap, cmin_flt, cmaj_flt, cnswap;
 222         int swappable:1;
 223         unsigned long swap_address;
 224         unsigned long old_maj_flt;      /* old value of maj_flt */
 225         unsigned long dec_flt;          /* page fault count of the last time */
 226         unsigned long swap_cnt;         /* number of pages to swap on next pass */
 227 /* limits */
 228         struct rlimit rlim[RLIM_NLIMITS];
 229         unsigned short used_math;
 230         char comm[16];
 231 /* file system info */
 232         int link_count;
 233         struct tty_struct *tty; /* NULL if no tty */
 234 /* ipc stuff */
 235         struct sem_undo *semundo;
 236         struct sem_queue *semsleeping;
 237 /* ldt for this task - used by Wine.  If NULL, default_ldt is used */
 238         struct desc_struct *ldt;
 239 /* tss for this task */
 240         struct thread_struct tss;
 241 /* filesystem information */
 242         struct fs_struct *fs;
 243 /* open file information */
 244         struct files_struct *files;
 245 /* memory management info */
 246         struct mm_struct *mm;
 247 /* signal handlers */
 248         struct signal_struct *sig;
 249 #ifdef __SMP__
 250         int processor;
 251         int last_processor;
 252         int lock_depth;         /* Lock depth. We can context switch in and out of holding a syscall kernel lock... */  
 253 #endif  
 254 };
 255 
 256 /*
 257  * Per process flags
 258  */
 259 #define PF_ALIGNWARN    0x00000001      /* Print alignment warning msgs */
 260                                         /* Not implemented yet, only for 486*/
 261 #define PF_PTRACED      0x00000010      /* set if ptrace (0) has been called. */
 262 #define PF_TRACESYS     0x00000020      /* tracing system calls */
 263 #define PF_FORKNOEXEC   0x00000040      /* forked but didn't exec */
 264 #define PF_SUPERPRIV    0x00000100      /* used super-user privileges */
 265 #define PF_DUMPCORE     0x00000200      /* dumped core */
 266 #define PF_SIGNALED     0x00000400      /* killed by a signal */
 267 
 268 #define PF_STARTING     0x00000100      /* being created */
 269 #define PF_EXITING      0x00000200      /* getting shut down */
 270 
 271 #define PF_USEDFPU      0x00100000      /* Process used the FPU this quantum (SMP only) */
 272 #define PF_DTRACE       0x00200000      /* delayed trace (used on m68k) */
 273 
 274 /*
 275  * Limit the stack by to some sane default: root can always
 276  * increase this limit if needed..  8MB seems reasonable.
 277  */
 278 #define _STK_LIM        (8*1024*1024)
 279 
 280 #define DEF_PRIORITY    (20*HZ/100)     /* 200 ms time slices */
 281 
 282 /*
 283  *  INIT_TASK is used to set up the first task table, touch at
 284  * your own risk!. Base=0, limit=0x1fffff (=2MB)
 285  */
 286 #define INIT_TASK \
 287 /* state etc */ { 0,DEF_PRIORITY,DEF_PRIORITY,0,0,0,0, \
 288 /* debugregs */ { 0, },            \
 289 /* exec domain */&default_exec_domain, \
 290 /* binfmt */    NULL, \
 291 /* schedlink */ &init_task,&init_task, &init_task, &init_task, \
 292 /* stack */     0,(unsigned long) &init_kernel_stack, \
 293 /* ec,brk... */ 0,0,0,0,0, \
 294 /* pid etc.. */ 0,0,0,0,0, \
 295 /* suppl grps*/ {NOGROUP,}, \
 296 /* proc links*/ &init_task,&init_task,NULL,NULL,NULL,NULL, \
 297 /* uid etc */   0,0,0,0,0,0,0,0, \
 298 /* timeout */   0,SCHED_OTHER,0,0,0,0,0,0,0, \
 299 /* timer */     { NULL, NULL, 0, 0, it_real_fn }, \
 300 /* utime */     0,0,0,0,0, \
 301 /* flt */       0,0,0,0,0,0, \
 302 /* swp */       0,0,0,0,0, \
 303 /* rlimits */   INIT_RLIMITS, \
 304 /* math */      0, \
 305 /* comm */      "swapper", \
 306 /* fs info */   0,NULL, \
 307 /* ipc */       NULL, NULL, \
 308 /* ldt */       NULL, \
 309 /* tss */       INIT_TSS, \
 310 /* fs */        &init_fs, \
 311 /* files */     &init_files, \
 312 /* mm */        &init_mm, \
 313 /* signals */   &init_signals, \
 314 }
 315 
 316 extern struct   mm_struct init_mm;
 317 extern struct task_struct init_task;
 318 extern struct task_struct *task[NR_TASKS];
 319 extern struct task_struct *last_task_used_math;
 320 extern struct task_struct *current_set[NR_CPUS];
 321 /*
 322  *      On a single processor system this comes out as current_set[0] when cpp
 323  *      has finished with it, which gcc will optimise away.
 324  */
 325 #define current (0+current_set[smp_processor_id()])     /* Current on this processor */
 326 extern unsigned long volatile jiffies;
 327 extern unsigned long itimer_ticks;
 328 extern unsigned long itimer_next;
 329 extern struct timeval xtime;
 330 extern int need_resched;
 331 extern void do_timer(struct pt_regs *);
 332 
 333 extern unsigned int * prof_buffer;
 334 extern unsigned long prof_len;
 335 extern unsigned long prof_shift;
 336 
 337 extern int securelevel; /* system security level */
 338 
 339 #define CURRENT_TIME (xtime.tv_sec)
 340 
 341 extern void sleep_on(struct wait_queue ** p);
 342 extern void interruptible_sleep_on(struct wait_queue ** p);
 343 extern void wake_up(struct wait_queue ** p);
 344 extern void wake_up_interruptible(struct wait_queue ** p);
 345 extern void wake_up_process(struct task_struct * tsk);
 346 
 347 extern void notify_parent(struct task_struct * tsk);
 348 extern void force_sig(unsigned long sig,struct task_struct * p);
 349 extern int send_sig(unsigned long sig,struct task_struct * p,int priv);
 350 extern int in_group_p(gid_t grp);
 351 
 352 extern int request_irq(unsigned int irq,
 353                        void (*handler)(int, void *, struct pt_regs *),
 354                        unsigned long flags, 
 355                        const char *device,
 356                        void *dev_id);
 357 extern void free_irq(unsigned int irq, void *dev_id);
 358 
 359 /*
 360  * This has now become a routine instead of a macro, it sets a flag if
 361  * it returns true (to do BSD-style accounting where the process is flagged
 362  * if it uses root privs). The implication of this is that you should do
 363  * normal permissions checks first, and check suser() last.
 364  */
 365 extern inline int suser(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 366 {
 367         if (current->euid == 0) {
 368                 current->flags |= PF_SUPERPRIV;
 369                 return 1;
 370         }
 371         return 0;
 372 }
 373 
 374 extern void copy_thread(int, unsigned long, unsigned long, struct task_struct *, struct pt_regs *);
 375 extern void flush_thread(void);
 376 extern void exit_thread(void);
 377 
 378 extern void exit_mm(struct task_struct *);
 379 extern void exit_fs(struct task_struct *);
 380 extern void exit_files(struct task_struct *);
 381 extern void exit_sighand(struct task_struct *);
 382 extern void release_thread(struct task_struct *);
 383 
 384 extern int do_execve(char *, char **, char **, struct pt_regs *);
 385 extern int do_fork(unsigned long, unsigned long, struct pt_regs *);
 386 
 387 
 388 /* See if we have a valid user level fd.
 389  * If it makes sense, return the file structure it references.
 390  * Otherwise return NULL.
 391  */
 392 extern inline struct file *file_from_fd(const unsigned int fd)
     /* [previous][next][first][last][top][bottom][index][help] */
 393 {
 394 
 395         if (fd >= NR_OPEN)
 396                 return NULL;
 397         /* either valid or null */
 398         return current->files->fd[fd];
 399 }
 400         
 401 /*
 402  * The wait-queues are circular lists, and you have to be *very* sure
 403  * to keep them correct. Use only these two functions to add/remove
 404  * entries in the queues.
 405  */
 406 extern inline void add_wait_queue(struct wait_queue ** p, struct wait_queue * wait)
     /* [previous][next][first][last][top][bottom][index][help] */
 407 {
 408         unsigned long flags;
 409 
 410 #ifdef DEBUG
 411         if (wait->next) {
 412                 __label__ here;
 413                 unsigned long pc;
 414                 pc = (unsigned long) &&here;
 415               here:
 416                 printk("add_wait_queue (%08lx): wait->next = %08lx\n",pc,(unsigned long) wait->next);
 417         }
 418 #endif
 419         save_flags(flags);
 420         cli();
 421         if (!*p) {
 422                 wait->next = wait;
 423                 *p = wait;
 424         } else {
 425                 wait->next = (*p)->next;
 426                 (*p)->next = wait;
 427         }
 428         restore_flags(flags);
 429 }
 430 
 431 extern inline void remove_wait_queue(struct wait_queue ** p, struct wait_queue * wait)
     /* [previous][next][first][last][top][bottom][index][help] */
 432 {
 433         unsigned long flags;
 434         struct wait_queue * tmp;
 435 #ifdef DEBUG
 436         unsigned long ok = 0;
 437 #endif
 438 
 439         save_flags(flags);
 440         cli();
 441         if ((*p == wait) &&
 442 #ifdef DEBUG
 443             (ok = 1) &&
 444 #endif
 445             ((*p = wait->next) == wait)) {
 446                 *p = NULL;
 447         } else {
 448                 tmp = wait;
 449                 while (tmp->next != wait) {
 450                         tmp = tmp->next;
 451 #ifdef DEBUG
 452                         if (tmp == *p)
 453                                 ok = 1;
 454 #endif
 455                 }
 456                 tmp->next = wait->next;
 457         }
 458         wait->next = NULL;
 459         restore_flags(flags);
 460 #ifdef DEBUG
 461         if (!ok) {
 462                 __label__ here;
 463                 ok = (unsigned long) &&here;
 464                 printk("removed wait_queue not on list.\n");
 465                 printk("list = %08lx, queue = %08lx\n",(unsigned long) p, (unsigned long) wait);
 466               here:
 467                 printk("eip = %08lx\n",ok);
 468         }
 469 #endif
 470 }
 471 
 472 extern inline void select_wait(struct wait_queue ** wait_address, select_table * p)
     /* [previous][next][first][last][top][bottom][index][help] */
 473 {
 474         struct select_table_entry * entry;
 475 
 476         if (!p || !wait_address)
 477                 return;
 478         if (p->nr >= __MAX_SELECT_TABLE_ENTRIES)
 479                 return;
 480         entry = p->entry + p->nr;
 481         entry->wait_address = wait_address;
 482         entry->wait.task = current;
 483         entry->wait.next = NULL;
 484         add_wait_queue(wait_address,&entry->wait);
 485         p->nr++;
 486 }
 487 
 488 extern void __down(struct semaphore * sem);
 489 
 490 /*
 491  * These are not yet interrupt-safe
 492  */
 493 extern inline void down(struct semaphore * sem)
     /* [previous][next][first][last][top][bottom][index][help] */
 494 {
 495         if (sem->count <= 0)
 496                 __down(sem);
 497         sem->count--;
 498 }
 499 
 500 extern inline void up(struct semaphore * sem)
     /* [previous][next][first][last][top][bottom][index][help] */
 501 {
 502         sem->count++;
 503         wake_up(&sem->wait);
 504 }       
 505 
 506 #define REMOVE_LINKS(p) do { unsigned long flags; \
 507         save_flags(flags) ; cli(); \
 508         (p)->next_task->prev_task = (p)->prev_task; \
 509         (p)->prev_task->next_task = (p)->next_task; \
 510         restore_flags(flags); \
 511         if ((p)->p_osptr) \
 512                 (p)->p_osptr->p_ysptr = (p)->p_ysptr; \
 513         if ((p)->p_ysptr) \
 514                 (p)->p_ysptr->p_osptr = (p)->p_osptr; \
 515         else \
 516                 (p)->p_pptr->p_cptr = (p)->p_osptr; \
 517         } while (0)
 518 
 519 #define SET_LINKS(p) do { unsigned long flags; \
 520         save_flags(flags); cli(); \
 521         (p)->next_task = &init_task; \
 522         (p)->prev_task = init_task.prev_task; \
 523         init_task.prev_task->next_task = (p); \
 524         init_task.prev_task = (p); \
 525         restore_flags(flags); \
 526         (p)->p_ysptr = NULL; \
 527         if (((p)->p_osptr = (p)->p_pptr->p_cptr) != NULL) \
 528                 (p)->p_osptr->p_ysptr = p; \
 529         (p)->p_pptr->p_cptr = p; \
 530         } while (0)
 531 
 532 #define for_each_task(p) \
 533         for (p = &init_task ; (p = p->next_task) != &init_task ; )
 534 
 535 #endif /* __KERNEL__ */
 536 
 537 #endif

/* [previous][next][first][last][top][bottom][index][help] */