root/arch/i386/kernel/process.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. disable_hlt
  2. enable_hlt
  3. hard_idle
  4. sys_idle
  5. sys_idle
  6. cpu_idle
  7. kb_wait
  8. hard_reset_now
  9. show_regs
  10. exit_thread
  11. flush_thread
  12. release_thread
  13. copy_thread
  14. dump_fpu
  15. dump_thread
  16. sys_fork
  17. sys_clone
  18. sys_execve

   1 /*
   2  *  linux/arch/i386/kernel/process.c
   3  *
   4  *  Copyright (C) 1995  Linus Torvalds
   5  */
   6 
   7 /*
   8  * This file handles the architecture-dependent parts of process handling..
   9  */
  10 
  11 #define __KERNEL_SYSCALLS__
  12 #include <stdarg.h>
  13 
  14 #include <linux/errno.h>
  15 #include <linux/sched.h>
  16 #include <linux/kernel.h>
  17 #include <linux/mm.h>
  18 #include <linux/stddef.h>
  19 #include <linux/unistd.h>
  20 #include <linux/ptrace.h>
  21 #include <linux/malloc.h>
  22 #include <linux/ldt.h>
  23 #include <linux/user.h>
  24 #include <linux/a.out.h>
  25 #include <linux/interrupt.h>
  26 #include <linux/config.h>
  27 #include <linux/unistd.h>
  28 
  29 #include <asm/segment.h>
  30 #include <asm/pgtable.h>
  31 #include <asm/system.h>
  32 #include <asm/io.h>
  33 #include <linux/smp.h>
  34 
  35 
  36 asmlinkage void ret_from_sys_call(void) __asm__("ret_from_sys_call");
  37 
  38 #ifdef CONFIG_APM
  39 extern int  apm_do_idle(void);
  40 extern void apm_do_busy(void);
  41 #endif
  42 
  43 static int hlt_counter=0;
  44 
  45 #define HARD_IDLE_TIMEOUT (HZ / 3)
  46 
  47 void disable_hlt(void)
     /* [previous][next][first][last][top][bottom][index][help] */
  48 {
  49         hlt_counter++;
  50 }
  51 
  52 void enable_hlt(void)
     /* [previous][next][first][last][top][bottom][index][help] */
  53 {
  54         hlt_counter--;
  55 }
  56 
  57 #ifndef __SMP__
  58 
  59 static void hard_idle(void)
     /* [previous][next][first][last][top][bottom][index][help] */
  60 {
  61         while (!need_resched) {
  62                 if (hlt_works_ok && !hlt_counter) {
  63 #ifdef CONFIG_APM
  64                                 /* If the APM BIOS is not enabled, or there
  65                                  is an error calling the idle routine, we
  66                                  should hlt if possible.  We need to check
  67                                  need_resched again because an interrupt
  68                                  may have occurred in apm_do_idle(). */
  69                         start_bh_atomic();
  70                         if (!apm_do_idle() && !need_resched)
  71                                 __asm__("hlt");
  72                         end_bh_atomic();
  73 #else
  74                         __asm__("hlt");
  75 #endif
  76                 }
  77                 if (need_resched) 
  78                         break;
  79                 schedule();
  80         }
  81 #ifdef CONFIG_APM
  82         apm_do_busy();
  83 #endif
  84 }
  85 
  86 /*
  87  * The idle loop on a uniprocessor i386..
  88  */
  89  
  90 asmlinkage int sys_idle(void)
     /* [previous][next][first][last][top][bottom][index][help] */
  91 {
  92         unsigned long start_idle = 0;
  93 
  94         if (current->pid != 0)
  95                 return -EPERM;
  96         /* endless idle loop with no priority at all */
  97         current->counter = -100;
  98         for (;;) 
  99         {
 100                 /*
 101                  *      We are locked at this point. So we can safely call
 102                  *      the APM bios knowing only one CPU at a time will do
 103                  *      so.
 104                  */
 105                 if (!start_idle) 
 106                         start_idle = jiffies;
 107                 if (jiffies - start_idle > HARD_IDLE_TIMEOUT) 
 108                 {
 109                         hard_idle();
 110                 } 
 111                 else 
 112                 {
 113                         if (hlt_works_ok && !hlt_counter && !need_resched)
 114                                 __asm__("hlt");
 115                 }
 116                 if (need_resched) 
 117                         start_idle = 0;
 118                 schedule();
 119         }
 120 }
 121 
 122 #else
 123 
 124 /*
 125  *      In the SMP world we hlt outside of kernel syscall rather than within
 126  *      so as to get the right locking semantics.
 127  */
 128  
 129 asmlinkage int sys_idle(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 130 {
 131         if(current->pid != 0)
 132                 return -EPERM;
 133 #ifdef __SMP_PROF__
 134         smp_spins_sys_idle[smp_processor_id()]+=
 135           smp_spins_syscall_cur[smp_processor_id()];
 136 #endif
 137         current->counter= -100;
 138         schedule();
 139         return 0;
 140 }
 141 
 142 /*
 143  *      This is being executed in task 0 'user space'.
 144  */
 145 
 146 int cpu_idle(void *unused)
     /* [previous][next][first][last][top][bottom][index][help] */
 147 {
 148         while(1)
 149         {
 150                 if(cpu_data[smp_processor_id()].hlt_works_ok && !hlt_counter && !need_resched)
 151                         __asm("hlt");
 152                 if(0==(0x7fffffff & smp_process_available)) 
 153                         continue;
 154                 while(0x80000000 & smp_process_available);
 155                 cli();
 156                 while(set_bit(31,&smp_process_available))
 157                         while(test_bit(31,&smp_process_available))
 158                 {
 159                         /*
 160                          *      Oops.. This is kind of important in some cases...
 161                          */
 162                         if(clear_bit(smp_processor_id(), &smp_invalidate_needed))
 163                                 local_flush_tlb();
 164                 }
 165                 if (0==(0x7fffffff & smp_process_available)){
 166                         clear_bit(31,&smp_process_available);
 167                         sti();
 168                         continue;
 169                 }
 170                 smp_process_available--;
 171                 clear_bit(31,&smp_process_available);
 172                 sti();
 173                 idle();
 174         }
 175 }
 176 
 177 #endif
 178 
 179 /*
 180  * This routine reboots the machine by asking the keyboard
 181  * controller to pulse the reset-line low. We try that for a while,
 182  * and if it doesn't work, we do some other stupid things.
 183  */
 184 static long no_idt[2] = {0, 0};
 185 
 186 static inline void kb_wait(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 187 {
 188         int i;
 189 
 190         for (i=0; i<0x10000; i++)
 191                 if ((inb_p(0x64) & 0x02) == 0)
 192                         break;
 193 }
 194 
 195 void hard_reset_now(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 196 {
 197         int i, j;
 198 
 199         sti();
 200 /* rebooting needs to touch the page at absolute addr 0 */
 201         pg0[0] = 7;
 202         *((unsigned short *)0x472) = 0x1234;
 203         for (;;) {
 204                 for (i=0; i<100; i++) {
 205                         kb_wait();
 206                         for(j = 0; j < 100000 ; j++)
 207                                 /* nothing */;
 208                         outb(0xfe,0x64);         /* pulse reset low */
 209                 }
 210                 __asm__ __volatile__("\tlidt %0": "=m" (no_idt));
 211         }
 212 }
 213 
 214 void show_regs(struct pt_regs * regs)
     /* [previous][next][first][last][top][bottom][index][help] */
 215 {
 216         printk("\n");
 217         printk("EIP: %04x:[<%08lx>]",0xffff & regs->cs,regs->eip);
 218         if (regs->cs & 3)
 219                 printk(" ESP: %04x:%08lx",0xffff & regs->ss,regs->esp);
 220         printk(" EFLAGS: %08lx\n",regs->eflags);
 221         printk("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
 222                 regs->eax,regs->ebx,regs->ecx,regs->edx);
 223         printk("ESI: %08lx EDI: %08lx EBP: %08lx",
 224                 regs->esi, regs->edi, regs->ebp);
 225         printk(" DS: %04x ES: %04x FS: %04x GS: %04x\n",
 226                 0xffff & regs->ds,0xffff & regs->es,
 227                 0xffff & regs->fs,0xffff & regs->gs);
 228 }
 229 
 230 /*
 231  * Free current thread data structures etc..
 232  */
 233 
 234 void exit_thread(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 235 {
 236         /* forget lazy i387 state */
 237         if (last_task_used_math == current)
 238                 last_task_used_math = NULL;
 239         /* forget local segments */
 240         __asm__ __volatile__("mov %w0,%%fs ; mov %w0,%%gs ; lldt %w0"
 241                 : /* no outputs */
 242                 : "r" (0));
 243         current->tss.ldt = 0;
 244         if (current->ldt) {
 245                 void * ldt = current->ldt;
 246                 current->ldt = NULL;
 247                 vfree(ldt);
 248         }
 249 }
 250 
 251 void flush_thread(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 252 {
 253         int i;
 254 
 255         if (current->ldt) {
 256                 free_page((unsigned long) current->ldt);
 257                 current->ldt = NULL;
 258                 for (i=1 ; i<NR_TASKS ; i++) {
 259                         if (task[i] == current)  {
 260                                 set_ldt_desc(gdt+(i<<1)+
 261                                              FIRST_LDT_ENTRY,&default_ldt, 1);
 262                                 load_ldt(i);
 263                         }
 264                 }       
 265         }
 266 
 267         for (i=0 ; i<8 ; i++)
 268                 current->debugreg[i] = 0;
 269 }
 270 
 271 void release_thread(struct task_struct *dead_task)
     /* [previous][next][first][last][top][bottom][index][help] */
 272 {
 273 }
 274 
 275 void copy_thread(int nr, unsigned long clone_flags, unsigned long esp,
     /* [previous][next][first][last][top][bottom][index][help] */
 276         struct task_struct * p, struct pt_regs * regs)
 277 {
 278         int i;
 279         struct pt_regs * childregs;
 280 
 281         p->tss.es = KERNEL_DS;
 282         p->tss.cs = KERNEL_CS;
 283         p->tss.ss = KERNEL_DS;
 284         p->tss.ds = KERNEL_DS;
 285         p->tss.fs = USER_DS;
 286         p->tss.gs = KERNEL_DS;
 287         p->tss.ss0 = KERNEL_DS;
 288         p->tss.esp0 = p->kernel_stack_page + PAGE_SIZE;
 289         p->tss.tr = _TSS(nr);
 290         childregs = ((struct pt_regs *) (p->kernel_stack_page + PAGE_SIZE)) - 1;
 291         p->tss.esp = (unsigned long) childregs;
 292         p->tss.eip = (unsigned long) ret_from_sys_call;
 293         *childregs = *regs;
 294         childregs->eax = 0;
 295         childregs->esp = esp;
 296         p->tss.back_link = 0;
 297         p->tss.eflags = regs->eflags & 0xffffcfff;      /* iopl is always 0 for a new process */
 298         p->tss.ldt = _LDT(nr);
 299         if (p->ldt) {
 300                 p->ldt = (struct desc_struct*) vmalloc(LDT_ENTRIES*LDT_ENTRY_SIZE);
 301                 if (p->ldt != NULL)
 302                         memcpy(p->ldt, current->ldt, LDT_ENTRIES*LDT_ENTRY_SIZE);
 303         }
 304         set_tss_desc(gdt+(nr<<1)+FIRST_TSS_ENTRY,&(p->tss));
 305         if (p->ldt)
 306                 set_ldt_desc(gdt+(nr<<1)+FIRST_LDT_ENTRY,p->ldt, 512);
 307         else
 308                 set_ldt_desc(gdt+(nr<<1)+FIRST_LDT_ENTRY,&default_ldt, 1);
 309         p->tss.bitmap = offsetof(struct thread_struct,io_bitmap);
 310         for (i = 0; i < IO_BITMAP_SIZE+1 ; i++) /* IO bitmap is actually SIZE+1 */
 311                 p->tss.io_bitmap[i] = ~0;
 312         if (last_task_used_math == current)
 313                 __asm__("clts ; fnsave %0 ; frstor %0":"=m" (p->tss.i387));
 314 }
 315 
 316 /*
 317  * fill in the fpu structure for a core dump..
 318  */
 319 int dump_fpu (struct user_i387_struct* fpu)
     /* [previous][next][first][last][top][bottom][index][help] */
 320 {
 321         int fpvalid;
 322 
 323 /* Flag indicating the math stuff is valid. We don't support this for the
 324    soft-float routines yet */
 325         if (hard_math) {
 326                 if ((fpvalid = current->used_math) != 0) {
 327                         if (last_task_used_math == current)
 328                                 __asm__("clts ; fnsave %0": :"m" (*fpu));
 329                         else
 330                                 memcpy(fpu,&current->tss.i387.hard,sizeof(*fpu));
 331                 }
 332         } else {
 333                 /* we should dump the emulator state here, but we need to
 334                    convert it into standard 387 format first.. */
 335                 fpvalid = 0;
 336         }
 337 
 338         return fpvalid;
 339 }
 340 
 341 /*
 342  * fill in the user structure for a core dump..
 343  */
 344 void dump_thread(struct pt_regs * regs, struct user * dump)
     /* [previous][next][first][last][top][bottom][index][help] */
 345 {
 346         int i;
 347 
 348 /* changed the size calculations - should hopefully work better. lbt */
 349         dump->magic = CMAGIC;
 350         dump->start_code = 0;
 351         dump->start_stack = regs->esp & ~(PAGE_SIZE - 1);
 352         dump->u_tsize = ((unsigned long) current->mm->end_code) >> PAGE_SHIFT;
 353         dump->u_dsize = ((unsigned long) (current->mm->brk + (PAGE_SIZE-1))) >> PAGE_SHIFT;
 354         dump->u_dsize -= dump->u_tsize;
 355         dump->u_ssize = 0;
 356         for (i = 0; i < 8; i++)
 357                 dump->u_debugreg[i] = current->debugreg[i];  
 358 
 359         if (dump->start_stack < TASK_SIZE)
 360                 dump->u_ssize = ((unsigned long) (TASK_SIZE - dump->start_stack)) >> PAGE_SHIFT;
 361 
 362         dump->regs = *regs;
 363 
 364         dump->u_fpvalid = dump_fpu (&dump->i387);
 365 }
 366 
 367 asmlinkage int sys_fork(struct pt_regs regs)
     /* [previous][next][first][last][top][bottom][index][help] */
 368 {
 369         return do_fork(SIGCHLD, regs.esp, &regs);
 370 }
 371 
 372 asmlinkage int sys_clone(struct pt_regs regs)
     /* [previous][next][first][last][top][bottom][index][help] */
 373 {
 374         unsigned long clone_flags;
 375         unsigned long newsp;
 376 
 377         clone_flags = regs.ebx;
 378         newsp = regs.ecx;
 379         if (!newsp)
 380                 newsp = regs.esp;
 381         return do_fork(clone_flags, newsp, &regs);
 382 }
 383 
 384 /*
 385  * sys_execve() executes a new program.
 386  */
 387 asmlinkage int sys_execve(struct pt_regs regs)
     /* [previous][next][first][last][top][bottom][index][help] */
 388 {
 389         int error;
 390         char * filename;
 391 
 392         error = getname((char *) regs.ebx, &filename);
 393         if (error)
 394                 return error;
 395         error = do_execve(filename, (char **) regs.ecx, (char **) regs.edx, &regs);
 396         putname(filename);
 397         return error;
 398 }

/* [previous][next][first][last][top][bottom][index][help] */