root/arch/i386/kernel/process.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. disable_hlt
  2. enable_hlt
  3. hard_idle
  4. sys_idle
  5. sys_idle
  6. cpu_idle
  7. kb_wait
  8. hard_reset_now
  9. show_regs
  10. exit_thread
  11. flush_thread
  12. release_thread
  13. copy_thread
  14. dump_fpu
  15. dump_thread
  16. sys_fork
  17. sys_clone
  18. sys_execve

   1 /*
   2  *  linux/arch/i386/kernel/process.c
   3  *
   4  *  Copyright (C) 1995  Linus Torvalds
   5  */
   6 
   7 /*
   8  * This file handles the architecture-dependent parts of process handling..
   9  */
  10 
  11 #define __KERNEL_SYSCALLS__
  12 #include <stdarg.h>
  13 
  14 #include <linux/errno.h>
  15 #include <linux/sched.h>
  16 #include <linux/kernel.h>
  17 #include <linux/mm.h>
  18 #include <linux/stddef.h>
  19 #include <linux/unistd.h>
  20 #include <linux/ptrace.h>
  21 #include <linux/malloc.h>
  22 #include <linux/ldt.h>
  23 #include <linux/user.h>
  24 #include <linux/a.out.h>
  25 #include <linux/interrupt.h>
  26 #include <linux/config.h>
  27 #include <linux/unistd.h>
  28 
  29 #include <asm/segment.h>
  30 #include <asm/pgtable.h>
  31 #include <asm/system.h>
  32 #include <asm/io.h>
  33 #include <linux/smp.h>
  34 
  35 
  36 asmlinkage void ret_from_sys_call(void) __asm__("ret_from_sys_call");
  37 
  38 #ifdef CONFIG_APM
  39 extern int  apm_do_idle(void);
  40 extern void apm_do_busy(void);
  41 #endif
  42 
  43 static int hlt_counter=0;
  44 
  45 #define HARD_IDLE_TIMEOUT (HZ / 3)
  46 
  47 void disable_hlt(void)
     /* [previous][next][first][last][top][bottom][index][help] */
  48 {
  49         hlt_counter++;
  50 }
  51 
  52 void enable_hlt(void)
     /* [previous][next][first][last][top][bottom][index][help] */
  53 {
  54         hlt_counter--;
  55 }
  56 
  57 #ifndef __SMP__
  58 
  59 static void hard_idle(void)
     /* [previous][next][first][last][top][bottom][index][help] */
  60 {
  61         while (!need_resched) {
  62                 if (hlt_works_ok && !hlt_counter) {
  63 #ifdef CONFIG_APM
  64                                 /* If the APM BIOS is not enabled, or there
  65                                  is an error calling the idle routine, we
  66                                  should hlt if possible.  We need to check
  67                                  need_resched again because an interrupt
  68                                  may have occured in apm_do_idle(). */
  69                         start_bh_atomic();
  70                         if (!apm_do_idle() && !need_resched)
  71                                 __asm__("hlt");
  72                         end_bh_atomic();
  73 #else
  74                         __asm__("hlt");
  75 #endif
  76                 }
  77                 if (need_resched) 
  78                         break;
  79                 schedule();
  80         }
  81 #ifdef CONFIG_APM
  82         apm_do_busy();
  83 #endif
  84 }
  85 
  86 /*
  87  * The idle loop on a uniprocessor i386..
  88  */
  89  
  90 asmlinkage int sys_idle(void)
     /* [previous][next][first][last][top][bottom][index][help] */
  91 {
  92         unsigned long start_idle = 0;
  93 
  94         if (current->pid != 0)
  95                 return -EPERM;
  96         /* endless idle loop with no priority at all */
  97         current->counter = -100;
  98         for (;;) 
  99         {
 100                 /*
 101                  *      We are locked at this point. So we can safely call
 102                  *      the APM bios knowing only one CPU at a time will do
 103                  *      so.
 104                  */
 105                 if (!start_idle) 
 106                         start_idle = jiffies;
 107                 if (jiffies - start_idle > HARD_IDLE_TIMEOUT) 
 108                 {
 109                         hard_idle();
 110                 } 
 111                 else 
 112                 {
 113                         if (hlt_works_ok && !hlt_counter && !need_resched)
 114                                 __asm__("hlt");
 115                 }
 116                 if (need_resched) 
 117                         start_idle = 0;
 118                 schedule();
 119         }
 120 }
 121 
 122 #else
 123 
 124 /*
 125  *      In the SMP world we hlt outside of kernel syscall rather than within
 126  *      so as to get the right locking semantics.
 127  */
 128  
 129 asmlinkage int sys_idle(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 130 {
 131         if(current->pid != 0)
 132                 return -EPERM;
 133 #ifdef __SMP_PROF__
 134         smp_spins_sys_idle[smp_processor_id()]+=
 135           smp_spins_syscall_cur[smp_processor_id()];
 136 #endif
 137         current->counter= -100;
 138         schedule();
 139         return 0;
 140 }
 141 
 142 /*
 143  *      This is being executed in task 0 'user space'.
 144  */
 145 
 146 int cpu_idle(void *unused)
     /* [previous][next][first][last][top][bottom][index][help] */
 147 {
 148         while(1)
 149         {
 150                 if(cpu_data[smp_processor_id()].hlt_works_ok && !hlt_counter && !need_resched)
 151                         __asm("hlt");
 152                 idle();
 153         }
 154 }
 155 
 156 #endif
 157 
 158 /*
 159  * This routine reboots the machine by asking the keyboard
 160  * controller to pulse the reset-line low. We try that for a while,
 161  * and if it doesn't work, we do some other stupid things.
 162  */
 163 static long no_idt[2] = {0, 0};
 164 
 165 static inline void kb_wait(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 166 {
 167         int i;
 168 
 169         for (i=0; i<0x10000; i++)
 170                 if ((inb_p(0x64) & 0x02) == 0)
 171                         break;
 172 }
 173 
 174 void hard_reset_now(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 175 {
 176         int i, j;
 177 
 178         sti();
 179 /* rebooting needs to touch the page at absolute addr 0 */
 180         pg0[0] = 7;
 181         *((unsigned short *)0x472) = 0x1234;
 182         for (;;) {
 183                 for (i=0; i<100; i++) {
 184                         kb_wait();
 185                         for(j = 0; j < 100000 ; j++)
 186                                 /* nothing */;
 187                         outb(0xfe,0x64);         /* pulse reset low */
 188                 }
 189                 __asm__ __volatile__("\tlidt %0": "=m" (no_idt));
 190         }
 191 }
 192 
 193 void show_regs(struct pt_regs * regs)
     /* [previous][next][first][last][top][bottom][index][help] */
 194 {
 195         printk("\n");
 196         printk("EIP: %04x:[<%08lx>]",0xffff & regs->cs,regs->eip);
 197         if (regs->cs & 3)
 198                 printk(" ESP: %04x:%08lx",0xffff & regs->ss,regs->esp);
 199         printk(" EFLAGS: %08lx\n",regs->eflags);
 200         printk("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
 201                 regs->eax,regs->ebx,regs->ecx,regs->edx);
 202         printk("ESI: %08lx EDI: %08lx EBP: %08lx",
 203                 regs->esi, regs->edi, regs->ebp);
 204         printk(" DS: %04x ES: %04x FS: %04x GS: %04x\n",
 205                 0xffff & regs->ds,0xffff & regs->es,
 206                 0xffff & regs->fs,0xffff & regs->gs);
 207 }
 208 
 209 /*
 210  * Free current thread data structures etc..
 211  */
 212 
 213 void exit_thread(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 214 {
 215         /* forget lazy i387 state */
 216         if (last_task_used_math == current)
 217                 last_task_used_math = NULL;
 218         /* forget local segments */
 219         __asm__ __volatile__("mov %w0,%%fs ; mov %w0,%%gs ; lldt %w0"
 220                 : /* no outputs */
 221                 : "r" (0));
 222         current->tss.ldt = 0;
 223         if (current->ldt) {
 224                 void * ldt = current->ldt;
 225                 current->ldt = NULL;
 226                 vfree(ldt);
 227         }
 228 }
 229 
 230 void flush_thread(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 231 {
 232         int i;
 233 
 234         if (current->ldt) {
 235                 free_page((unsigned long) current->ldt);
 236                 current->ldt = NULL;
 237                 for (i=1 ; i<NR_TASKS ; i++) {
 238                         if (task[i] == current)  {
 239                                 set_ldt_desc(gdt+(i<<1)+
 240                                              FIRST_LDT_ENTRY,&default_ldt, 1);
 241                                 load_ldt(i);
 242                         }
 243                 }       
 244         }
 245 
 246         for (i=0 ; i<8 ; i++)
 247                 current->debugreg[i] = 0;
 248 }
 249 
 250 void release_thread(struct task_struct *dead_task)
     /* [previous][next][first][last][top][bottom][index][help] */
 251 {
 252 }
 253 
 254 void copy_thread(int nr, unsigned long clone_flags, unsigned long esp,
     /* [previous][next][first][last][top][bottom][index][help] */
 255         struct task_struct * p, struct pt_regs * regs)
 256 {
 257         int i;
 258         struct pt_regs * childregs;
 259 
 260         p->tss.es = KERNEL_DS;
 261         p->tss.cs = KERNEL_CS;
 262         p->tss.ss = KERNEL_DS;
 263         p->tss.ds = KERNEL_DS;
 264         p->tss.fs = USER_DS;
 265         p->tss.gs = KERNEL_DS;
 266         p->tss.ss0 = KERNEL_DS;
 267         p->tss.esp0 = p->kernel_stack_page + PAGE_SIZE;
 268         p->tss.tr = _TSS(nr);
 269         childregs = ((struct pt_regs *) (p->kernel_stack_page + PAGE_SIZE)) - 1;
 270         p->tss.esp = (unsigned long) childregs;
 271         p->tss.eip = (unsigned long) ret_from_sys_call;
 272         *childregs = *regs;
 273         childregs->eax = 0;
 274         childregs->esp = esp;
 275         p->tss.back_link = 0;
 276         p->tss.eflags = regs->eflags & 0xffffcfff;      /* iopl is always 0 for a new process */
 277         p->tss.ldt = _LDT(nr);
 278         if (p->ldt) {
 279                 p->ldt = (struct desc_struct*) vmalloc(LDT_ENTRIES*LDT_ENTRY_SIZE);
 280                 if (p->ldt != NULL)
 281                         memcpy(p->ldt, current->ldt, LDT_ENTRIES*LDT_ENTRY_SIZE);
 282         }
 283         set_tss_desc(gdt+(nr<<1)+FIRST_TSS_ENTRY,&(p->tss));
 284         if (p->ldt)
 285                 set_ldt_desc(gdt+(nr<<1)+FIRST_LDT_ENTRY,p->ldt, 512);
 286         else
 287                 set_ldt_desc(gdt+(nr<<1)+FIRST_LDT_ENTRY,&default_ldt, 1);
 288         p->tss.bitmap = offsetof(struct thread_struct,io_bitmap);
 289         for (i = 0; i < IO_BITMAP_SIZE+1 ; i++) /* IO bitmap is actually SIZE+1 */
 290                 p->tss.io_bitmap[i] = ~0;
 291         if (last_task_used_math == current)
 292                 __asm__("clts ; fnsave %0 ; frstor %0":"=m" (p->tss.i387));
 293 }
 294 
 295 /*
 296  * fill in the fpu structure for a core dump..
 297  */
 298 int dump_fpu (struct user_i387_struct* fpu)
     /* [previous][next][first][last][top][bottom][index][help] */
 299 {
 300         int fpvalid;
 301 
 302 /* Flag indicating the math stuff is valid. We don't support this for the
 303    soft-float routines yet */
 304         if (hard_math) {
 305                 if ((fpvalid = current->used_math) != 0) {
 306                         if (last_task_used_math == current)
 307                                 __asm__("clts ; fnsave %0": :"m" (*fpu));
 308                         else
 309                                 memcpy(fpu,&current->tss.i387.hard,sizeof(*fpu));
 310                 }
 311         } else {
 312                 /* we should dump the emulator state here, but we need to
 313                    convert it into standard 387 format first.. */
 314                 fpvalid = 0;
 315         }
 316 
 317         return fpvalid;
 318 }
 319 
 320 /*
 321  * fill in the user structure for a core dump..
 322  */
 323 void dump_thread(struct pt_regs * regs, struct user * dump)
     /* [previous][next][first][last][top][bottom][index][help] */
 324 {
 325         int i;
 326 
 327 /* changed the size calculations - should hopefully work better. lbt */
 328         dump->magic = CMAGIC;
 329         dump->start_code = 0;
 330         dump->start_stack = regs->esp & ~(PAGE_SIZE - 1);
 331         dump->u_tsize = ((unsigned long) current->mm->end_code) >> PAGE_SHIFT;
 332         dump->u_dsize = ((unsigned long) (current->mm->brk + (PAGE_SIZE-1))) >> PAGE_SHIFT;
 333         dump->u_dsize -= dump->u_tsize;
 334         dump->u_ssize = 0;
 335         for (i = 0; i < 8; i++)
 336                 dump->u_debugreg[i] = current->debugreg[i];  
 337 
 338         if (dump->start_stack < TASK_SIZE)
 339                 dump->u_ssize = ((unsigned long) (TASK_SIZE - dump->start_stack)) >> PAGE_SHIFT;
 340 
 341         dump->regs = *regs;
 342 
 343         dump->u_fpvalid = dump_fpu (&dump->i387);
 344 }
 345 
 346 asmlinkage int sys_fork(struct pt_regs regs)
     /* [previous][next][first][last][top][bottom][index][help] */
 347 {
 348         return do_fork(SIGCHLD, regs.esp, &regs);
 349 }
 350 
 351 asmlinkage int sys_clone(struct pt_regs regs)
     /* [previous][next][first][last][top][bottom][index][help] */
 352 {
 353         unsigned long clone_flags;
 354         unsigned long newsp;
 355 
 356         clone_flags = regs.ebx;
 357         newsp = regs.ecx;
 358         if (!newsp)
 359                 newsp = regs.esp;
 360         return do_fork(clone_flags, newsp, &regs);
 361 }
 362 
 363 /*
 364  * sys_execve() executes a new program.
 365  */
 366 asmlinkage int sys_execve(struct pt_regs regs)
     /* [previous][next][first][last][top][bottom][index][help] */
 367 {
 368         int error;
 369         char * filename;
 370 
 371         error = getname((char *) regs.ebx, &filename);
 372         if (error)
 373                 return error;
 374         error = do_execve(filename, (char **) regs.ecx, (char **) regs.edx, &regs);
 375         putname(filename);
 376         return error;
 377 }

/* [previous][next][first][last][top][bottom][index][help] */