root/arch/i386/kernel/process.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. disable_hlt
  2. enable_hlt
  3. hard_idle
  4. sys_idle
  5. sys_idle
  6. cpu_idle
  7. kb_wait
  8. hard_reset_now
  9. show_regs
  10. exit_thread
  11. flush_thread
  12. release_thread
  13. copy_thread
  14. dump_fpu
  15. dump_thread
  16. sys_fork
  17. sys_clone
  18. sys_execve

   1 /*
   2  *  linux/arch/i386/kernel/process.c
   3  *
   4  *  Copyright (C) 1995  Linus Torvalds
   5  */
   6 
   7 /*
   8  * This file handles the architecture-dependent parts of process handling..
   9  */
  10 
  11 #define __KERNEL_SYSCALLS__
  12 #include <stdarg.h>
  13 
  14 #include <linux/errno.h>
  15 #include <linux/sched.h>
  16 #include <linux/kernel.h>
  17 #include <linux/mm.h>
  18 #include <linux/stddef.h>
  19 #include <linux/unistd.h>
  20 #include <linux/ptrace.h>
  21 #include <linux/malloc.h>
  22 #include <linux/ldt.h>
  23 #include <linux/user.h>
  24 #include <linux/a.out.h>
  25 #include <linux/interrupt.h>
  26 #include <linux/config.h>
  27 #include <linux/unistd.h>
  28 
  29 #include <asm/segment.h>
  30 #include <asm/pgtable.h>
  31 #include <asm/system.h>
  32 #include <asm/io.h>
  33 #include <linux/smp.h>
  34 
  35 
  36 asmlinkage void ret_from_sys_call(void) __asm__("ret_from_sys_call");
  37 
  38 #ifdef CONFIG_APM
  39 extern int  apm_do_idle(void);
  40 extern void apm_do_busy(void);
  41 #endif
  42 
  43 static int hlt_counter=0;
  44 
  45 #define HARD_IDLE_TIMEOUT (HZ / 3)
  46 
  47 void disable_hlt(void)
     /* [previous][next][first][last][top][bottom][index][help] */
  48 {
  49         hlt_counter++;
  50 }
  51 
  52 void enable_hlt(void)
     /* [previous][next][first][last][top][bottom][index][help] */
  53 {
  54         hlt_counter--;
  55 }
  56 
  57 #ifndef __SMP__
  58 
  59 static void hard_idle(void)
     /* [previous][next][first][last][top][bottom][index][help] */
  60 {
  61         while (!need_resched) {
  62                 if (hlt_works_ok && !hlt_counter) {
  63 #ifdef CONFIG_APM
  64                                 /* If the APM BIOS is not enabled, or there
  65                                  is an error calling the idle routine, we
  66                                  should hlt if possible.  We need to check
  67                                  need_resched again because an interrupt
  68                                  may have occured in apm_do_idle(). */
  69                         start_bh_atomic();
  70                         if (!apm_do_idle() && !need_resched)
  71                                 __asm__("hlt");
  72                         end_bh_atomic();
  73 #else
  74                         __asm__("hlt");
  75 #endif
  76                 }
  77                 if (need_resched) 
  78                         break;
  79                 schedule();
  80         }
  81 #ifdef CONFIG_APM
  82         apm_do_busy();
  83 #endif
  84 }
  85 
  86 /*
  87  * The idle loop on a uniprocessor i386..
  88  */
  89  
  90 asmlinkage int sys_idle(void)
     /* [previous][next][first][last][top][bottom][index][help] */
  91 {
  92         unsigned long start_idle = 0;
  93 
  94         if (current->pid != 0)
  95                 return -EPERM;
  96         /* endless idle loop with no priority at all */
  97         current->counter = -100;
  98         for (;;) 
  99         {
 100                 /*
 101                  *      We are locked at this point. So we can safely call
 102                  *      the APM bios knowing only one CPU at a time will do
 103                  *      so.
 104                  */
 105                 if (!start_idle) 
 106                         start_idle = jiffies;
 107                 if (jiffies - start_idle > HARD_IDLE_TIMEOUT) 
 108                 {
 109                         hard_idle();
 110                 } 
 111                 else 
 112                 {
 113                         if (hlt_works_ok && !hlt_counter && !need_resched)
 114                                 __asm__("hlt");
 115                 }
 116                 if (need_resched) 
 117                         start_idle = 0;
 118                 schedule();
 119         }
 120 }
 121 
 122 #else
 123 
 124 /*
 125  *      In the SMP world we hlt outside of kernel syscall rather than within
 126  *      so as to get the right locking semantics.
 127  */
 128  
 129 asmlinkage int sys_idle(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 130 {
 131         if(current->pid != 0)
 132                 return -EPERM;
 133         current->counter= -100;
 134         schedule();
 135         return 0;
 136 }
 137 
 138 /*
 139  *      This is being executed in task 0 'user space'.
 140  */
 141 
 142 int cpu_idle(void *unused)
     /* [previous][next][first][last][top][bottom][index][help] */
 143 {
 144         while(1)
 145         {
 146                 if(cpu_data[smp_processor_id()].hlt_works_ok && !hlt_counter && !need_resched)
 147                         __asm("hlt");
 148                 idle();
 149         }
 150 }
 151 
 152 #endif
 153 
 154 /*
 155  * This routine reboots the machine by asking the keyboard
 156  * controller to pulse the reset-line low. We try that for a while,
 157  * and if it doesn't work, we do some other stupid things.
 158  */
 159 static long no_idt[2] = {0, 0};
 160 
 161 static inline void kb_wait(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 162 {
 163         int i;
 164 
 165         for (i=0; i<0x10000; i++)
 166                 if ((inb_p(0x64) & 0x02) == 0)
 167                         break;
 168 }
 169 
 170 void hard_reset_now(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 171 {
 172         int i, j;
 173 
 174         sti();
 175 /* rebooting needs to touch the page at absolute addr 0 */
 176         pg0[0] = 7;
 177         *((unsigned short *)0x472) = 0x1234;
 178         for (;;) {
 179                 for (i=0; i<100; i++) {
 180                         kb_wait();
 181                         for(j = 0; j < 100000 ; j++)
 182                                 /* nothing */;
 183                         outb(0xfe,0x64);         /* pulse reset low */
 184                 }
 185                 __asm__ __volatile__("\tlidt %0": "=m" (no_idt));
 186         }
 187 }
 188 
 189 void show_regs(struct pt_regs * regs)
     /* [previous][next][first][last][top][bottom][index][help] */
 190 {
 191         printk("\n");
 192         printk("EIP: %04x:[<%08lx>]",0xffff & regs->cs,regs->eip);
 193         if (regs->cs & 3)
 194                 printk(" ESP: %04x:%08lx",0xffff & regs->ss,regs->esp);
 195         printk(" EFLAGS: %08lx\n",regs->eflags);
 196         printk("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
 197                 regs->eax,regs->ebx,regs->ecx,regs->edx);
 198         printk("ESI: %08lx EDI: %08lx EBP: %08lx",
 199                 regs->esi, regs->edi, regs->ebp);
 200         printk(" DS: %04x ES: %04x FS: %04x GS: %04x\n",
 201                 0xffff & regs->ds,0xffff & regs->es,
 202                 0xffff & regs->fs,0xffff & regs->gs);
 203 }
 204 
 205 /*
 206  * Free current thread data structures etc..
 207  */
 208 
 209 void exit_thread(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 210 {
 211         /* forget lazy i387 state */
 212         if (last_task_used_math == current)
 213                 last_task_used_math = NULL;
 214         /* forget local segments */
 215         __asm__ __volatile__("mov %w0,%%fs ; mov %w0,%%gs ; lldt %w0"
 216                 : /* no outputs */
 217                 : "r" (0));
 218         current->tss.ldt = 0;
 219         if (current->ldt) {
 220                 void * ldt = current->ldt;
 221                 current->ldt = NULL;
 222                 vfree(ldt);
 223         }
 224 }
 225 
 226 void flush_thread(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 227 {
 228         int i;
 229 
 230         if (current->ldt) {
 231                 free_page((unsigned long) current->ldt);
 232                 current->ldt = NULL;
 233                 for (i=1 ; i<NR_TASKS ; i++) {
 234                         if (task[i] == current)  {
 235                                 set_ldt_desc(gdt+(i<<1)+
 236                                              FIRST_LDT_ENTRY,&default_ldt, 1);
 237                                 load_ldt(i);
 238                         }
 239                 }       
 240         }
 241 
 242         for (i=0 ; i<8 ; i++)
 243                 current->debugreg[i] = 0;
 244 }
 245 
 246 void release_thread(struct task_struct *dead_task)
     /* [previous][next][first][last][top][bottom][index][help] */
 247 {
 248 }
 249 
 250 void copy_thread(int nr, unsigned long clone_flags, unsigned long esp,
     /* [previous][next][first][last][top][bottom][index][help] */
 251         struct task_struct * p, struct pt_regs * regs)
 252 {
 253         int i;
 254         struct pt_regs * childregs;
 255 
 256         p->tss.es = KERNEL_DS;
 257         p->tss.cs = KERNEL_CS;
 258         p->tss.ss = KERNEL_DS;
 259         p->tss.ds = KERNEL_DS;
 260         p->tss.fs = USER_DS;
 261         p->tss.gs = KERNEL_DS;
 262         p->tss.ss0 = KERNEL_DS;
 263         p->tss.esp0 = p->kernel_stack_page + PAGE_SIZE;
 264         p->tss.tr = _TSS(nr);
 265         childregs = ((struct pt_regs *) (p->kernel_stack_page + PAGE_SIZE)) - 1;
 266         p->tss.esp = (unsigned long) childregs;
 267         p->tss.eip = (unsigned long) ret_from_sys_call;
 268         *childregs = *regs;
 269         childregs->eax = 0;
 270         childregs->esp = esp;
 271         p->tss.back_link = 0;
 272         p->tss.eflags = regs->eflags & 0xffffcfff;      /* iopl is always 0 for a new process */
 273         p->tss.ldt = _LDT(nr);
 274         if (p->ldt) {
 275                 p->ldt = (struct desc_struct*) vmalloc(LDT_ENTRIES*LDT_ENTRY_SIZE);
 276                 if (p->ldt != NULL)
 277                         memcpy(p->ldt, current->ldt, LDT_ENTRIES*LDT_ENTRY_SIZE);
 278         }
 279         set_tss_desc(gdt+(nr<<1)+FIRST_TSS_ENTRY,&(p->tss));
 280         if (p->ldt)
 281                 set_ldt_desc(gdt+(nr<<1)+FIRST_LDT_ENTRY,p->ldt, 512);
 282         else
 283                 set_ldt_desc(gdt+(nr<<1)+FIRST_LDT_ENTRY,&default_ldt, 1);
 284         p->tss.bitmap = offsetof(struct thread_struct,io_bitmap);
 285         for (i = 0; i < IO_BITMAP_SIZE+1 ; i++) /* IO bitmap is actually SIZE+1 */
 286                 p->tss.io_bitmap[i] = ~0;
 287         if (last_task_used_math == current)
 288                 __asm__("clts ; fnsave %0 ; frstor %0":"=m" (p->tss.i387));
 289 }
 290 
 291 /*
 292  * fill in the fpu structure for a core dump..
 293  */
 294 int dump_fpu (struct user_i387_struct* fpu)
     /* [previous][next][first][last][top][bottom][index][help] */
 295 {
 296         int fpvalid;
 297 
 298 /* Flag indicating the math stuff is valid. We don't support this for the
 299    soft-float routines yet */
 300         if (hard_math) {
 301                 if ((fpvalid = current->used_math) != 0) {
 302                         if (last_task_used_math == current)
 303                                 __asm__("clts ; fnsave %0": :"m" (*fpu));
 304                         else
 305                                 memcpy(fpu,&current->tss.i387.hard,sizeof(*fpu));
 306                 }
 307         } else {
 308                 /* we should dump the emulator state here, but we need to
 309                    convert it into standard 387 format first.. */
 310                 fpvalid = 0;
 311         }
 312 
 313         return fpvalid;
 314 }
 315 
 316 /*
 317  * fill in the user structure for a core dump..
 318  */
 319 void dump_thread(struct pt_regs * regs, struct user * dump)
     /* [previous][next][first][last][top][bottom][index][help] */
 320 {
 321         int i;
 322 
 323 /* changed the size calculations - should hopefully work better. lbt */
 324         dump->magic = CMAGIC;
 325         dump->start_code = 0;
 326         dump->start_stack = regs->esp & ~(PAGE_SIZE - 1);
 327         dump->u_tsize = ((unsigned long) current->mm->end_code) >> 12;
 328         dump->u_dsize = ((unsigned long) (current->mm->brk + (PAGE_SIZE-1))) >> 12;
 329         dump->u_dsize -= dump->u_tsize;
 330         dump->u_ssize = 0;
 331         for (i = 0; i < 8; i++)
 332                 dump->u_debugreg[i] = current->debugreg[i];  
 333 
 334         if (dump->start_stack < TASK_SIZE) {
 335                 dump->u_ssize = ((unsigned long) (TASK_SIZE - dump->start_stack)) >> 12;
 336         }
 337 
 338         dump->regs = *regs;
 339 
 340         dump->u_fpvalid = dump_fpu (&dump->i387);
 341 }
 342 
 343 asmlinkage int sys_fork(struct pt_regs regs)
     /* [previous][next][first][last][top][bottom][index][help] */
 344 {
 345         return do_fork(SIGCHLD, regs.esp, &regs);
 346 }
 347 
 348 asmlinkage int sys_clone(struct pt_regs regs)
     /* [previous][next][first][last][top][bottom][index][help] */
 349 {
 350         unsigned long clone_flags;
 351         unsigned long newsp;
 352 
 353         clone_flags = regs.ebx;
 354         newsp = regs.ecx;
 355         if (!newsp)
 356                 newsp = regs.esp;
 357         return do_fork(clone_flags, newsp, &regs);
 358 }
 359 
 360 /*
 361  * sys_execve() executes a new program.
 362  */
 363 asmlinkage int sys_execve(struct pt_regs regs)
     /* [previous][next][first][last][top][bottom][index][help] */
 364 {
 365         int error;
 366         char * filename;
 367 
 368         error = getname((char *) regs.ebx, &filename);
 369         if (error)
 370                 return error;
 371         error = do_execve(filename, (char **) regs.ecx, (char **) regs.edx, &regs);
 372         putname(filename);
 373         return error;
 374 }

/* [previous][next][first][last][top][bottom][index][help] */