root/arch/i386/kernel/traps.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. console_verbose
  2. die_if_kernel
  3. DO_ERROR
  4. do_nmi
  5. do_debug
  6. math_error
  7. do_coprocessor_error
  8. math_state_restore
  9. math_emulate
  10. trap_init

   1 /*
   2  *  linux/arch/i386/traps.c
   3  *
   4  *  Copyright (C) 1991, 1992  Linus Torvalds
   5  */
   6 
   7 /*
   8  * 'Traps.c' handles hardware traps and faults after we have saved some
   9  * state in 'asm.s'. Currently mostly a debugging-aid, will be extended
  10  * to mainly kill the offending process (probably by giving it a signal,
  11  * but possibly by killing it outright if necessary).
  12  */
  13 #include <linux/config.h>
  14 #include <linux/head.h>
  15 #include <linux/sched.h>
  16 #include <linux/kernel.h>
  17 #include <linux/string.h>
  18 #include <linux/errno.h>
  19 #include <linux/ptrace.h>
  20 #include <linux/config.h>
  21 #include <linux/timer.h>
  22 #include <linux/mm.h>
  23 
  24 #include <asm/system.h>
  25 #include <asm/segment.h>
  26 #include <asm/io.h>
  27 
  28 asmlinkage int system_call(void);
  29 asmlinkage void lcall7(void);
  30 struct desc_struct default_ldt = { 0, 0 };
  31 
  32 static inline void console_verbose(void)
     /* [previous][next][first][last][top][bottom][index][help] */
  33 {
  34         extern int console_loglevel;
  35         console_loglevel = 15;
  36 }
  37 
  38 #define DO_ERROR(trapnr, signr, str, name, tsk) \
  39 asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
  40 { \
  41         tsk->tss.error_code = error_code; \
  42         tsk->tss.trap_no = trapnr; \
  43         force_sig(signr, tsk); \
  44         die_if_kernel(str,regs,error_code); \
  45 }
  46 
  47 #define get_seg_byte(seg,addr) ({ \
  48 register unsigned char __res; \
  49 __asm__("push %%fs;mov %%ax,%%fs;movb %%fs:%2,%%al;pop %%fs" \
  50         :"=a" (__res):"0" (seg),"m" (*(addr))); \
  51 __res;})
  52 
  53 #define get_seg_long(seg,addr) ({ \
  54 register unsigned long __res; \
  55 __asm__("push %%fs;mov %%ax,%%fs;movl %%fs:%2,%%eax;pop %%fs" \
  56         :"=a" (__res):"0" (seg),"m" (*(addr))); \
  57 __res;})
  58 
  59 #define _fs() ({ \
  60 register unsigned short __res; \
  61 __asm__("mov %%fs,%%ax":"=a" (__res):); \
  62 __res;})
  63 
  64 void page_exception(void);
  65 
  66 asmlinkage void divide_error(void);
  67 asmlinkage void debug(void);
  68 asmlinkage void nmi(void);
  69 asmlinkage void int3(void);
  70 asmlinkage void overflow(void);
  71 asmlinkage void bounds(void);
  72 asmlinkage void invalid_op(void);
  73 asmlinkage void device_not_available(void);
  74 asmlinkage void double_fault(void);
  75 asmlinkage void coprocessor_segment_overrun(void);
  76 asmlinkage void invalid_TSS(void);
  77 asmlinkage void segment_not_present(void);
  78 asmlinkage void stack_segment(void);
  79 asmlinkage void general_protection(void);
  80 asmlinkage void page_fault(void);
  81 asmlinkage void coprocessor_error(void);
  82 asmlinkage void reserved(void);
  83 asmlinkage void alignment_check(void);
  84 
  85 int kstack_depth_to_print = 24;
  86 
  87 /*
  88  * These constants are for searching for possible module text
  89  * segments.  VMALLOC_OFFSET comes from mm/vmalloc.c; MODULE_RANGE is
  90  * a guess of how much space is likely to be vmalloced.
  91  */
  92 #define VMALLOC_OFFSET (8*1024*1024)
  93 #define MODULE_RANGE (8*1024*1024)
  94 
  95 /*static*/ void die_if_kernel(const char * str, struct pt_regs * regs, long err)
     /* [previous][next][first][last][top][bottom][index][help] */
  96 {
  97         int i;
  98         unsigned long esp;
  99         unsigned short ss;
 100         unsigned long *stack, addr, module_start, module_end;
 101         extern char start_kernel, _etext;
 102 
 103         esp = (unsigned long) &regs->esp;
 104         ss = KERNEL_DS;
 105         if ((regs->eflags & VM_MASK) || (3 & regs->cs) == 3)
 106                 return;
 107         if (regs->cs & 3) {
 108                 esp = regs->esp;
 109                 ss = regs->ss;
 110         }
 111         console_verbose();
 112         printk("%s: %04lx\n", str, err & 0xffff);
 113         printk("CPU:    %d\n", smp_processor_id());
 114         printk("EIP:    %04x:[<%08lx>]\nEFLAGS: %08lx\n", 0xffff & regs->cs,regs->eip,regs->eflags);
 115         printk("eax: %08lx   ebx: %08lx   ecx: %08lx   edx: %08lx\n",
 116                 regs->eax, regs->ebx, regs->ecx, regs->edx);
 117         printk("esi: %08lx   edi: %08lx   ebp: %08lx   esp: %08lx\n",
 118                 regs->esi, regs->edi, regs->ebp, esp);
 119         printk("ds: %04x   es: %04x   fs: %04x   gs: %04x   ss: %04x\n",
 120                 regs->ds, regs->es, regs->fs, regs->gs, ss);
 121         store_TR(i);
 122         if (STACK_MAGIC != *(unsigned long *)current->kernel_stack_page)
 123                 printk("Corrupted stack page\n");
 124         printk("Process %s (pid: %d, process nr: %d, stackpage=%08lx)\nStack: ",
 125                 current->comm, current->pid, 0xffff & i, current->kernel_stack_page);
 126         stack = (unsigned long *) esp;
 127         for(i=0; i < kstack_depth_to_print; i++) {
 128                 if (((long) stack & 4095) == 0)
 129                         break;
 130                 if (i && ((i % 8) == 0))
 131                         printk("\n       ");
 132                 printk("%08lx ", get_seg_long(ss,stack++));
 133         }
 134         printk("\nCall Trace: ");
 135         stack = (unsigned long *) esp;
 136         i = 1;
 137         module_start = ((high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1));
 138         module_end = module_start + MODULE_RANGE;
 139         while (((long) stack & 4095) != 0) {
 140                 addr = get_seg_long(ss, stack++);
 141                 /*
 142                  * If the address is either in the text segment of the
 143                  * kernel, or in the region which contains vmalloc'ed
 144                  * memory, it *may* be the address of a calling
 145                  * routine; if so, print it so that someone tracing
 146                  * down the cause of the crash will be able to figure
 147                  * out the call path that was taken.
 148                  */
 149                 if (((addr >= (unsigned long) &start_kernel) &&
 150                      (addr <= (unsigned long) &_etext)) ||
 151                     ((addr >= module_start) && (addr <= module_end))) {
 152                         if (i && ((i % 8) == 0))
 153                                 printk("\n       ");
 154                         printk("[<%08lx>] ", addr);
 155                         i++;
 156                 }
 157         }
 158         printk("\nCode: ");
 159         for(i=0;i<20;i++)
 160                 printk("%02x ",0xff & get_seg_byte(regs->cs,(i+(char *)regs->eip)));
 161         printk("\n");
 162         do_exit(SIGSEGV);
 163 }
 164 
 165 DO_ERROR( 0, SIGFPE,  "divide error", divide_error, current)
     /* [previous][next][first][last][top][bottom][index][help] */
 166 DO_ERROR( 3, SIGTRAP, "int3", int3, current)
 167 DO_ERROR( 4, SIGSEGV, "overflow", overflow, current)
 168 DO_ERROR( 5, SIGSEGV, "bounds", bounds, current)
 169 DO_ERROR( 6, SIGILL,  "invalid operand", invalid_op, current)
 170 DO_ERROR( 7, SIGSEGV, "device not available", device_not_available, current)
 171 DO_ERROR( 8, SIGSEGV, "double fault", double_fault, current)
 172 DO_ERROR( 9, SIGFPE,  "coprocessor segment overrun", coprocessor_segment_overrun, last_task_used_math)
 173 DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS, current)
 174 DO_ERROR(11, SIGBUS,  "segment not present", segment_not_present, current)
 175 DO_ERROR(12, SIGBUS,  "stack segment", stack_segment, current)
 176 DO_ERROR(15, SIGSEGV, "reserved", reserved, current)
 177 DO_ERROR(17, SIGSEGV, "alignment check", alignment_check, current)
 178 
 179 asmlinkage void do_general_protection(struct pt_regs * regs, long error_code)
 180 {
 181         if (regs->eflags & VM_MASK) {
 182                 handle_vm86_fault((struct vm86_regs *) regs, error_code);
 183                 return;
 184         }
 185         die_if_kernel("general protection",regs,error_code);
 186         current->tss.error_code = error_code;
 187         current->tss.trap_no = 13;
 188         force_sig(SIGSEGV, current);    
 189 }
 190 
 191 asmlinkage void do_nmi(struct pt_regs * regs, long error_code)
     /* [previous][next][first][last][top][bottom][index][help] */
 192 {
 193 #ifdef CONFIG_SMP_NMI_INVAL
 194         smp_flush_tlb_rcv();
 195 #else
 196 #ifndef CONFIG_IGNORE_NMI
 197         printk("Uhhuh. NMI received. Dazed and confused, but trying to continue\n");
 198         printk("You probably have a hardware problem with your RAM chips or a\n");
 199         printk("power saving mode enabled.\n");
 200 #endif  
 201 #endif
 202 }
 203 
 204 asmlinkage void do_debug(struct pt_regs * regs, long error_code)
     /* [previous][next][first][last][top][bottom][index][help] */
 205 {
 206         if (regs->eflags & VM_MASK) {
 207                 handle_vm86_debug((struct vm86_regs *) regs, error_code);
 208                 return;
 209         }
 210         force_sig(SIGTRAP, current);
 211         current->tss.trap_no = 1;
 212         current->tss.error_code = error_code;
 213         if ((regs->cs & 3) == 0) {
 214                 /* If this is a kernel mode trap, then reset db7 and allow us to continue */
 215                 __asm__("movl %0,%%db7"
 216                         : /* no output */
 217                         : "r" (0));
 218                 return;
 219         }
 220         die_if_kernel("debug",regs,error_code);
 221 }
 222 
 223 /*
 224  * Allow the process which triggered the interrupt to recover the error
 225  * condition.
 226  *  - the status word is saved in the cs selector.
 227  *  - the tag word is saved in the operand selector.
 228  *  - the status word is then cleared and the tags all set to Empty.
 229  *
 230  * This will give sufficient information for complete recovery provided that
 231  * the affected process knows or can deduce the code and data segments
 232  * which were in force when the exception condition arose.
 233  *
 234  * Note that we play around with the 'TS' bit to hopefully get
 235  * the correct behaviour even in the presence of the asynchronous
 236  * IRQ13 behaviour
 237  */
 238 void math_error(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 239 {
 240         struct task_struct * task;
 241 
 242         clts();
 243 #ifdef __SMP__
 244         task = current;
 245 #else
 246         task = last_task_used_math;
 247         last_task_used_math = NULL;
 248         if (!task) {
 249                 __asm__("fnclex");
 250                 return;
 251         }
 252 #endif
 253         /*
 254          *      Save the info for the exception handler
 255          */
 256         __asm__ __volatile__("fnsave %0":"=m" (task->tss.i387.hard));
 257         task->flags&=~PF_USEDFPU;
 258         stts();
 259 
 260         force_sig(SIGFPE, task);
 261         task->tss.trap_no = 16;
 262         task->tss.error_code = 0;
 263 }
 264 
 265 asmlinkage void do_coprocessor_error(struct pt_regs * regs, long error_code)
     /* [previous][next][first][last][top][bottom][index][help] */
 266 {
 267         ignore_irq13 = 1;
 268         math_error();
 269 }
 270 
 271 /*
 272  *  'math_state_restore()' saves the current math information in the
 273  * old math state array, and gets the new ones from the current task
 274  *
 275  * Careful.. There are problems with IBM-designed IRQ13 behaviour.
 276  * Don't touch unless you *really* know how it works.
 277  */
 278 asmlinkage void math_state_restore(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 279 {
 280         __asm__ __volatile__("clts");           /* Allow maths ops (or we recurse) */
 281 
 282 /*
 283  *      SMP is actually simpler than uniprocessor for once. Because
 284  *      we can't pull the delayed FPU switching trick Linus does
 285  *      we simply have to do the restore each context switch and
 286  *      set the flag. switch_to() will always save the state in
 287  *      case we swap processors. We also don't use the coprocessor
 288  *      timer - IRQ 13 mode isn't used with SMP machines (thank god).
 289  */
 290 #ifndef __SMP__
 291         if (last_task_used_math == current)
 292                 return;
 293         if (last_task_used_math)
 294                 __asm__("fnsave %0":"=m" (last_task_used_math->tss.i387));
 295         else
 296                 __asm__("fnclex");
 297         last_task_used_math = current;
 298 #endif
 299 
 300         if(current->used_math)
 301                 __asm__("frstor %0": :"m" (current->tss.i387));
 302         else
 303         {
 304                 /*
 305                  *      Our first FPU usage, clean the chip.
 306                  */
 307                 __asm__("fninit");
 308                 current->used_math = 1;
 309         }
 310         current->flags|=PF_USEDFPU;             /* So we fnsave on switch_to() */
 311 }
 312 
 313 #ifndef CONFIG_MATH_EMULATION
 314 
 315 asmlinkage void math_emulate(long arg)
     /* [previous][next][first][last][top][bottom][index][help] */
 316 {
 317   printk("math-emulation not enabled and no coprocessor found.\n");
 318   printk("killing %s.\n",current->comm);
 319   force_sig(SIGFPE,current);
 320   schedule();
 321 }
 322 
 323 #endif /* CONFIG_MATH_EMULATION */
 324 
 325 void trap_init(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 326 {
 327         int i;
 328         struct desc_struct * p;
 329         static int smptrap=0;
 330         
 331         if(smptrap)
 332         {
 333                 __asm__("pushfl ; andl $0xffffbfff,(%esp) ; popfl");
 334                 load_ldt(0);
 335                 return;
 336         }
 337         smptrap++;
 338         if (strncmp((char*)0x0FFFD9, "EISA", 4) == 0)
 339                 EISA_bus = 1;
 340         set_call_gate(&default_ldt,lcall7);
 341         set_trap_gate(0,&divide_error);
 342         set_trap_gate(1,&debug);
 343         set_trap_gate(2,&nmi);
 344         set_system_gate(3,&int3);       /* int3-5 can be called from all */
 345         set_system_gate(4,&overflow);
 346         set_system_gate(5,&bounds);
 347         set_trap_gate(6,&invalid_op);
 348         set_trap_gate(7,&device_not_available);
 349         set_trap_gate(8,&double_fault);
 350         set_trap_gate(9,&coprocessor_segment_overrun);
 351         set_trap_gate(10,&invalid_TSS);
 352         set_trap_gate(11,&segment_not_present);
 353         set_trap_gate(12,&stack_segment);
 354         set_trap_gate(13,&general_protection);
 355         set_trap_gate(14,&page_fault);
 356         set_trap_gate(15,&reserved);
 357         set_trap_gate(16,&coprocessor_error);
 358         set_trap_gate(17,&alignment_check);
 359         for (i=18;i<48;i++)
 360                 set_trap_gate(i,&reserved);
 361         set_system_gate(0x80,&system_call);
 362 /* set up GDT task & ldt entries */
 363         p = gdt+FIRST_TSS_ENTRY;
 364         set_tss_desc(p, &init_task.tss);
 365         p++;
 366         set_ldt_desc(p, &default_ldt, 1);
 367         p++;
 368         for(i=1 ; i<NR_TASKS ; i++) {
 369                 p->a=p->b=0;
 370                 p++;
 371                 p->a=p->b=0;
 372                 p++;
 373         }
 374 /* Clear NT, so that we won't have troubles with that later on */
 375         __asm__("pushfl ; andl $0xffffbfff,(%esp) ; popfl");
 376         load_TR(0);
 377         load_ldt(0);
 378 }

/* [previous][next][first][last][top][bottom][index][help] */