root/arch/i386/kernel/traps.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. console_verbose
  2. die_if_kernel
  3. DO_ERROR
  4. do_nmi
  5. do_debug
  6. math_error
  7. do_coprocessor_error
  8. math_state_restore
  9. math_emulate
  10. trap_init

   1 /*
   2  *  linux/arch/i386/traps.c
   3  *
   4  *  Copyright (C) 1991, 1992  Linus Torvalds
   5  */
   6 
   7 /*
   8  * 'Traps.c' handles hardware traps and faults after we have saved some
   9  * state in 'asm.s'. Currently mostly a debugging-aid, will be extended
  10  * to mainly kill the offending process (probably by giving it a signal,
  11  * but possibly by killing it outright if necessary).
  12  */
  13 #include <linux/head.h>
  14 #include <linux/sched.h>
  15 #include <linux/kernel.h>
  16 #include <linux/string.h>
  17 #include <linux/errno.h>
  18 #include <linux/ptrace.h>
  19 #include <linux/config.h>
  20 #include <linux/timer.h>
  21 
  22 #include <asm/system.h>
  23 #include <asm/segment.h>
  24 #include <asm/io.h>
  25 
  26 asmlinkage int system_call(void);
  27 asmlinkage void lcall7(void);
  28 struct desc_struct default_ldt;
  29 
  30 static inline void console_verbose(void)
     /* [previous][next][first][last][top][bottom][index][help] */
  31 {
  32         extern int console_loglevel;
  33         console_loglevel = 15;
  34 }
  35 
  36 #define DO_ERROR(trapnr, signr, str, name, tsk) \
  37 asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
  38 { \
  39         tsk->tss.error_code = error_code; \
  40         tsk->tss.trap_no = trapnr; \
  41         if (signr == SIGTRAP && current->flags & PF_PTRACED) \
  42                 current->blocked &= ~(1 << (SIGTRAP-1)); \
  43         send_sig(signr, tsk, 1); \
  44         die_if_kernel(str,regs,error_code); \
  45 }
  46 
  47 #define get_seg_byte(seg,addr) ({ \
  48 register unsigned char __res; \
  49 __asm__("push %%fs;mov %%ax,%%fs;movb %%fs:%2,%%al;pop %%fs" \
  50         :"=a" (__res):"0" (seg),"m" (*(addr))); \
  51 __res;})
  52 
  53 #define get_seg_long(seg,addr) ({ \
  54 register unsigned long __res; \
  55 __asm__("push %%fs;mov %%ax,%%fs;movl %%fs:%2,%%eax;pop %%fs" \
  56         :"=a" (__res):"0" (seg),"m" (*(addr))); \
  57 __res;})
  58 
  59 #define _fs() ({ \
  60 register unsigned short __res; \
  61 __asm__("mov %%fs,%%ax":"=a" (__res):); \
  62 __res;})
  63 
  64 void page_exception(void);
  65 
  66 asmlinkage void divide_error(void);
  67 asmlinkage void debug(void);
  68 asmlinkage void nmi(void);
  69 asmlinkage void int3(void);
  70 asmlinkage void overflow(void);
  71 asmlinkage void bounds(void);
  72 asmlinkage void invalid_op(void);
  73 asmlinkage void device_not_available(void);
  74 asmlinkage void double_fault(void);
  75 asmlinkage void coprocessor_segment_overrun(void);
  76 asmlinkage void invalid_TSS(void);
  77 asmlinkage void segment_not_present(void);
  78 asmlinkage void stack_segment(void);
  79 asmlinkage void general_protection(void);
  80 asmlinkage void page_fault(void);
  81 asmlinkage void coprocessor_error(void);
  82 asmlinkage void reserved(void);
  83 asmlinkage void alignment_check(void);
  84 
  85 int kstack_depth_to_print = 24;
  86 
  87 /*
  88  * These constants are for searching for possible module text
  89  * segments.  VMALLOC_OFFSET comes from mm/vmalloc.c; MODULE_RANGE is
  90  * a guess of how much space is likely to be vmalloced.
  91  */
  92 #define VMALLOC_OFFSET (8*1024*1024)
  93 #define MODULE_RANGE (8*1024*1024)
  94 
  95 /*static*/ void die_if_kernel(char * str, struct pt_regs * regs, long err)
     /* [previous][next][first][last][top][bottom][index][help] */
  96 {
  97         int i;
  98         unsigned long esp;
  99         unsigned short ss;
 100         unsigned long *stack, addr, module_start, module_end;
 101         extern char start_kernel, etext;
 102 
 103         esp = (unsigned long) &regs->esp;
 104         ss = KERNEL_DS;
 105         if ((regs->eflags & VM_MASK) || (3 & regs->cs) == 3)
 106                 return;
 107         if (regs->cs & 3) {
 108                 esp = regs->esp;
 109                 ss = regs->ss;
 110         }
 111         console_verbose();
 112         printk("%s: %04lx\n", str, err & 0xffff);
 113         printk("EIP:    %04x:%08lx\nEFLAGS: %08lx\n", 0xffff & regs->cs,regs->eip,regs->eflags);
 114         printk("eax: %08lx   ebx: %08lx   ecx: %08lx   edx: %08lx\n",
 115                 regs->eax, regs->ebx, regs->ecx, regs->edx);
 116         printk("esi: %08lx   edi: %08lx   ebp: %08lx   esp: %08lx\n",
 117                 regs->esi, regs->edi, regs->ebp, esp);
 118         printk("ds: %04x   es: %04x   fs: %04x   gs: %04x   ss: %04x\n",
 119                 regs->ds, regs->es, regs->fs, regs->gs, ss);
 120         store_TR(i);
 121         if (STACK_MAGIC != *(unsigned long *)current->kernel_stack_page)
 122                 printk("Corrupted stack page\n");
 123         printk("Process %s (pid: %d, process nr: %d, stackpage=%08lx)\nStack: ",
 124                 current->comm, current->pid, 0xffff & i, current->kernel_stack_page);
 125         stack = (unsigned long *) esp;
 126         for(i=0; i < kstack_depth_to_print; i++) {
 127                 if (((long) stack & 4095) == 0)
 128                         break;
 129                 if (i && ((i % 8) == 0))
 130                         printk("\n       ");
 131                 printk("%08lx ", get_seg_long(ss,stack++));
 132         }
 133         printk("\nCall Trace: ");
 134         stack = (unsigned long *) esp;
 135         i = 1;
 136         module_start = ((high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1));
 137         module_end = module_start + MODULE_RANGE;
 138         while (((long) stack & 4095) != 0) {
 139                 addr = get_seg_long(ss, stack++);
 140                 /*
 141                  * If the address is either in the text segment of the
 142                  * kernel, or in the region which contains vmalloc'ed
 143                  * memory, it *may* be the address of a calling
 144                  * routine; if so, print it so that someone tracing
 145                  * down the cause of the crash will be able to figure
 146                  * out the call path that was taken.
 147                  */
 148                 if (((addr >= (unsigned long) &start_kernel) &&
 149                      (addr <= (unsigned long) &etext)) ||
 150                     ((addr >= module_start) && (addr <= module_end))) {
 151                         if (i && ((i % 8) == 0))
 152                                 printk("\n       ");
 153                         printk("%08lx ", addr);
 154                         i++;
 155                 }
 156         }
 157         printk("\nCode: ");
 158         for(i=0;i<20;i++)
 159                 printk("%02x ",0xff & get_seg_byte(regs->cs,(i+(char *)regs->eip)));
 160         printk("\n");
 161         do_exit(SIGSEGV);
 162 }
 163 
 164 DO_ERROR( 0, SIGFPE,  "divide error", divide_error, current)
     /* [previous][next][first][last][top][bottom][index][help] */
 165 DO_ERROR( 3, SIGTRAP, "int3", int3, current)
 166 DO_ERROR( 4, SIGSEGV, "overflow", overflow, current)
 167 DO_ERROR( 5, SIGSEGV, "bounds", bounds, current)
 168 DO_ERROR( 6, SIGILL,  "invalid operand", invalid_op, current)
 169 DO_ERROR( 7, SIGSEGV, "device not available", device_not_available, current)
 170 DO_ERROR( 8, SIGSEGV, "double fault", double_fault, current)
 171 DO_ERROR( 9, SIGFPE,  "coprocessor segment overrun", coprocessor_segment_overrun, last_task_used_math)
 172 DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS, current)
 173 DO_ERROR(11, SIGBUS,  "segment not present", segment_not_present, current)
 174 DO_ERROR(12, SIGBUS,  "stack segment", stack_segment, current)
 175 DO_ERROR(15, SIGSEGV, "reserved", reserved, current)
 176 DO_ERROR(17, SIGSEGV, "alignment check", alignment_check, current)
 177 
 178 asmlinkage void do_general_protection(struct pt_regs * regs, long error_code)
 179 {
 180         int signr = SIGSEGV;
 181 
 182         if (regs->eflags & VM_MASK) {
 183                 handle_vm86_fault((struct vm86_regs *) regs, error_code);
 184                 return;
 185         }
 186         die_if_kernel("general protection",regs,error_code);
 187         switch (get_seg_byte(regs->cs, (char *)regs->eip)) {
 188                 case 0xCD: /* INT */
 189                 case 0xF4: /* HLT */
 190                 case 0xFA: /* CLI */
 191                 case 0xFB: /* STI */
 192                         signr = SIGILL;
 193         }
 194         current->tss.error_code = error_code;
 195         current->tss.trap_no = 13;
 196         send_sig(signr, current, 1);    
 197 }
 198 
 199 asmlinkage void do_nmi(struct pt_regs * regs, long error_code)
     /* [previous][next][first][last][top][bottom][index][help] */
 200 {
 201         printk("Uhhuh. NMI received. Dazed and confused, but trying to continue\n");
 202         printk("You probably have a hardware problem with your RAM chips\n");
 203 }
 204 
 205 asmlinkage void do_debug(struct pt_regs * regs, long error_code)
     /* [previous][next][first][last][top][bottom][index][help] */
 206 {
 207         if (regs->eflags & VM_MASK) {
 208                 handle_vm86_debug((struct vm86_regs *) regs, error_code);
 209                 return;
 210         }
 211         if (current->flags & PF_PTRACED)
 212                 current->blocked &= ~(1 << (SIGTRAP-1));
 213         send_sig(SIGTRAP, current, 1);
 214         current->tss.trap_no = 1;
 215         current->tss.error_code = error_code;
 216         if ((regs->cs & 3) == 0) {
 217                 /* If this is a kernel mode trap, then reset db7 and allow us to continue */
 218                 __asm__("movl %0,%%db7"
 219                         : /* no output */
 220                         : "r" (0));
 221                 return;
 222         }
 223         die_if_kernel("debug",regs,error_code);
 224 }
 225 
 226 /*
 227  * Allow the process which triggered the interrupt to recover the error
 228  * condition.
 229  *  - the status word is saved in the cs selector.
 230  *  - the tag word is saved in the operand selector.
 231  *  - the status word is then cleared and the tags all set to Empty.
 232  *
 233  * This will give sufficient information for complete recovery provided that
 234  * the affected process knows or can deduce the code and data segments
 235  * which were in force when the exception condition arose.
 236  *
 237  * Note that we play around with the 'TS' bit to hopefully get
 238  * the correct behaviour even in the presence of the asynchronous
 239  * IRQ13 behaviour
 240  */
 241 void math_error(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 242 {
 243         struct i387_hard_struct * env;
 244 
 245         clts();
 246         if (!last_task_used_math) {
 247                 __asm__("fnclex");
 248                 return;
 249         }
 250         env = &last_task_used_math->tss.i387.hard;
 251         send_sig(SIGFPE, last_task_used_math, 1);
 252         last_task_used_math->tss.trap_no = 16;
 253         last_task_used_math->tss.error_code = 0;
 254         __asm__ __volatile__("fnsave %0":"=m" (*env));
 255         last_task_used_math = NULL;
 256         stts();
 257         env->fcs = (env->swd & 0x0000ffff) | (env->fcs & 0xffff0000);
 258         env->fos = env->twd;
 259         env->swd &= 0xffff3800;
 260         env->twd = 0xffffffff;
 261 }
 262 
 263 asmlinkage void do_coprocessor_error(struct pt_regs * regs, long error_code)
     /* [previous][next][first][last][top][bottom][index][help] */
 264 {
 265         ignore_irq13 = 1;
 266         math_error();
 267 }
 268 
 269 /*
 270  *  'math_state_restore()' saves the current math information in the
 271  * old math state array, and gets the new ones from the current task
 272  *
 273  * Careful.. There are problems with IBM-designed IRQ13 behaviour.
 274  * Don't touch unless you *really* know how it works.
 275  */
 276 asmlinkage void math_state_restore(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 277 {
 278         __asm__ __volatile__("clts");
 279         if (last_task_used_math == current)
 280                 return;
 281         timer_table[COPRO_TIMER].expires = jiffies+50;
 282         timer_active |= 1<<COPRO_TIMER; 
 283         if (last_task_used_math)
 284                 __asm__("fnsave %0":"=m" (last_task_used_math->tss.i387));
 285         else
 286                 __asm__("fnclex");
 287         last_task_used_math = current;
 288         if (current->used_math) {
 289                 __asm__("frstor %0": :"m" (current->tss.i387));
 290         } else {
 291                 __asm__("fninit");
 292                 current->used_math=1;
 293         }
 294         timer_active &= ~(1<<COPRO_TIMER);
 295 }
 296 
 297 #ifndef CONFIG_MATH_EMULATION
 298 
 299 asmlinkage void math_emulate(long arg)
     /* [previous][next][first][last][top][bottom][index][help] */
 300 {
 301   printk("math-emulation not enabled and no coprocessor found.\n");
 302   printk("killing %s.\n",current->comm);
 303   send_sig(SIGFPE,current,1);
 304   schedule();
 305 }
 306 
 307 #endif /* CONFIG_MATH_EMULATION */
 308 
 309 void trap_init(void)
     /* [previous][next][first][last][top][bottom][index][help] */
 310 {
 311         int i;
 312         struct desc_struct * p;
 313 
 314         set_call_gate(&default_ldt,lcall7);
 315         set_trap_gate(0,&divide_error);
 316         set_trap_gate(1,&debug);
 317         set_trap_gate(2,&nmi);
 318         set_system_gate(3,&int3);       /* int3-5 can be called from all */
 319         set_system_gate(4,&overflow);
 320         set_system_gate(5,&bounds);
 321         set_trap_gate(6,&invalid_op);
 322         set_trap_gate(7,&device_not_available);
 323         set_trap_gate(8,&double_fault);
 324         set_trap_gate(9,&coprocessor_segment_overrun);
 325         set_trap_gate(10,&invalid_TSS);
 326         set_trap_gate(11,&segment_not_present);
 327         set_trap_gate(12,&stack_segment);
 328         set_trap_gate(13,&general_protection);
 329         set_trap_gate(14,&page_fault);
 330         set_trap_gate(15,&reserved);
 331         set_trap_gate(16,&coprocessor_error);
 332         set_trap_gate(17,&alignment_check);
 333         for (i=18;i<48;i++)
 334                 set_trap_gate(i,&reserved);
 335         set_system_gate(0x80,&system_call);
 336 /* set up GDT task & ldt entries */
 337         p = gdt+FIRST_TSS_ENTRY;
 338         set_tss_desc(p, &init_task.tss);
 339         p++;
 340         set_ldt_desc(p, &default_ldt, 1);
 341         p++;
 342         for(i=1 ; i<NR_TASKS ; i++) {
 343                 p->a=p->b=0;
 344                 p++;
 345                 p->a=p->b=0;
 346                 p++;
 347         }
 348 /* Clear NT, so that we won't have troubles with that later on */
 349         __asm__("pushfl ; andl $0xffffbfff,(%esp) ; popfl");
 350         load_TR(0);
 351         load_ldt(0);
 352 }

/* [previous][next][first][last][top][bottom][index][help] */