root/arch/i386/kernel/vm86.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. save_v86_state
  2. mark_screen_rdonly
  3. sys_vm86
  4. return_to_32bit
  5. set_IF
  6. clear_IF
  7. clear_TF
  8. set_vflags_long
  9. set_vflags_short
  10. get_vflags
  11. is_revectored
  12. do_int
  13. handle_vm86_debug
  14. handle_vm86_fault

   1 /*
   2  *  linux/kernel/vm86.c
   3  *
   4  *  Copyright (C) 1994  Linus Torvalds
   5  */
   6 #include <linux/errno.h>
   7 #include <linux/sched.h>
   8 #include <linux/kernel.h>
   9 #include <linux/signal.h>
  10 #include <linux/string.h>
  11 #include <linux/ptrace.h>
  12 #include <linux/mm.h>
  13 
  14 #include <asm/segment.h>
  15 #include <asm/pgtable.h>
  16 #include <asm/io.h>
  17 
  18 /*
  19  * Known problems:
  20  *
  21  * Interrupt handling is not guaranteed:
  22  * - a real x86 will disable all interrupts for one instruction
  23  *   after a "mov ss,xx" to make stack handling atomic even without
  24  *   the 'lss' instruction. We can't guarantee this in v86 mode,
  25  *   as the next instruction might result in a page fault or similar.
  26  * - a real x86 will have interrupts disabled for one instruction
  27  *   past the 'sti' that enables them. We don't bother with all the
  28  *   details yet..
  29  *
  30  * Hopefully these problems do not actually matter for anything.
  31  */
  32 
  33 /*
  34  * 8- and 16-bit register defines..
  35  */
  36 #define AL(regs)        (((unsigned char *)&((regs)->eax))[0])
  37 #define AH(regs)        (((unsigned char *)&((regs)->eax))[1])
  38 #define IP(regs)        (*(unsigned short *)&((regs)->eip))
  39 #define SP(regs)        (*(unsigned short *)&((regs)->esp))
  40 
  41 /*
  42  * virtual flags (16 and 32-bit versions)
  43  */
  44 #define VFLAGS  (*(unsigned short *)&(current->tss.v86flags))
  45 #define VEFLAGS (current->tss.v86flags)
  46 
  47 #define set_flags(X,new,mask) \
  48 ((X) = ((X) & ~(mask)) | ((new) & (mask)))
  49 
  50 #define SAFE_MASK       (0xDD5)
  51 #define RETURN_MASK     (0xDFF)
  52 
  53 asmlinkage struct pt_regs * save_v86_state(struct vm86_regs * regs)
     /* [previous][next][first][last][top][bottom][index][help] */
  54 {
  55         unsigned long tmp;
  56 
  57         if (!current->tss.vm86_info) {
  58                 printk("no vm86_info: BAD\n");
  59                 do_exit(SIGSEGV);
  60         }
  61         set_flags(regs->eflags, VEFLAGS, VIF_MASK | current->tss.v86mask);
  62         memcpy_tofs(&current->tss.vm86_info->regs,regs,sizeof(*regs));
  63         put_fs_long(current->tss.screen_bitmap,&current->tss.vm86_info->screen_bitmap);
  64         tmp = current->tss.esp0;
  65         current->tss.esp0 = current->saved_kernel_stack;
  66         current->saved_kernel_stack = 0;
  67         return (struct pt_regs *) tmp;
  68 }
  69 
  70 static void mark_screen_rdonly(struct task_struct * tsk)
     /* [previous][next][first][last][top][bottom][index][help] */
  71 {
  72         pgd_t *pgd;
  73         pmd_t *pmd;
  74         pte_t *pte;
  75         int i;
  76 
  77         pgd = pgd_offset(tsk->mm, 0xA0000);
  78         if (pgd_none(*pgd))
  79                 return;
  80         if (pgd_bad(*pgd)) {
  81                 printk("vm86: bad pgd entry [%p]:%08lx\n", pgd, pgd_val(*pgd));
  82                 pgd_clear(pgd);
  83                 return;
  84         }
  85         pmd = pmd_offset(pgd, 0xA0000);
  86         if (pmd_none(*pmd))
  87                 return;
  88         if (pmd_bad(*pmd)) {
  89                 printk("vm86: bad pmd entry [%p]:%08lx\n", pmd, pmd_val(*pmd));
  90                 pmd_clear(pmd);
  91                 return;
  92         }
  93         pte = pte_offset(pmd, 0xA0000);
  94         for (i = 0; i < 32; i++) {
  95                 if (pte_present(*pte))
  96                         set_pte(pte, pte_wrprotect(*pte));
  97                 pte++;
  98         }
  99         invalidate();
 100 }
 101 
 102 asmlinkage int sys_vm86(struct vm86_struct * v86)
     /* [previous][next][first][last][top][bottom][index][help] */
 103 {
 104         struct vm86_struct info;
 105         struct pt_regs * pt_regs = (struct pt_regs *) &v86;
 106         int error;
 107 
 108         if (current->saved_kernel_stack)
 109                 return -EPERM;
 110         /* v86 must be readable (now) and writable (for save_v86_state) */
 111         error = verify_area(VERIFY_WRITE,v86,sizeof(*v86));
 112         if (error)
 113                 return error;
 114         memcpy_fromfs(&info,v86,sizeof(info));
 115 /*
 116  * make sure the vm86() system call doesn't try to do anything silly
 117  */
 118         info.regs.__null_ds = 0;
 119         info.regs.__null_es = 0;
 120         info.regs.__null_fs = 0;
 121         info.regs.__null_gs = 0;
 122 /*
 123  * The eflags register is also special: we cannot trust that the user
 124  * has set it up safely, so this makes sure interrupt etc flags are
 125  * inherited from protected mode.
 126  */
 127         VEFLAGS = info.regs.eflags;
 128         info.regs.eflags &= SAFE_MASK;
 129         info.regs.eflags |= pt_regs->eflags & ~SAFE_MASK;
 130         info.regs.eflags |= VM_MASK;
 131 
 132         switch (info.cpu_type) {
 133                 case CPU_286:
 134                         current->tss.v86mask = 0;
 135                         break;
 136                 case CPU_386:
 137                         current->tss.v86mask = NT_MASK | IOPL_MASK;
 138                         break;
 139                 case CPU_486:
 140                         current->tss.v86mask = AC_MASK | NT_MASK | IOPL_MASK;
 141                         break;
 142                 default:
 143                         current->tss.v86mask = ID_MASK | AC_MASK | NT_MASK | IOPL_MASK;
 144                         break;
 145         }
 146 
 147 /*
 148  * Save old state, set default return value (%eax) to 0
 149  */
 150         pt_regs->eax = 0;
 151         current->saved_kernel_stack = current->tss.esp0;
 152         current->tss.esp0 = (unsigned long) pt_regs;
 153         current->tss.vm86_info = v86;
 154 
 155         current->tss.screen_bitmap = info.screen_bitmap;
 156         if (info.flags & VM86_SCREEN_BITMAP)
 157                 mark_screen_rdonly(current);
 158         __asm__ __volatile__("movl %0,%%esp\n\t"
 159                 "jmp ret_from_sys_call"
 160                 : /* no outputs */
 161                 :"r" (&info.regs));
 162         return 0;
 163 }
 164 
 165 static inline void return_to_32bit(struct vm86_regs * regs16, int retval)
     /* [previous][next][first][last][top][bottom][index][help] */
 166 {
 167         struct pt_regs * regs32;
 168 
 169         regs32 = save_v86_state(regs16);
 170         regs32->eax = retval;
 171         __asm__ __volatile__("movl %0,%%esp\n\t"
 172                 "jmp ret_from_sys_call"
 173                 : : "r" (regs32));
 174 }
 175 
 176 static inline void set_IF(struct vm86_regs * regs)
     /* [previous][next][first][last][top][bottom][index][help] */
 177 {
 178         VEFLAGS |= VIF_MASK;
 179         if (VEFLAGS & VIP_MASK)
 180                 return_to_32bit(regs, VM86_STI);
 181 }
 182 
 183 static inline void clear_IF(struct vm86_regs * regs)
     /* [previous][next][first][last][top][bottom][index][help] */
 184 {
 185         VEFLAGS &= ~VIF_MASK;
 186 }
 187 
 188 static inline void clear_TF(struct vm86_regs * regs)
     /* [previous][next][first][last][top][bottom][index][help] */
 189 {
 190         regs->eflags &= ~TF_MASK;
 191 }
 192 
 193 static inline void set_vflags_long(unsigned long eflags, struct vm86_regs * regs)
     /* [previous][next][first][last][top][bottom][index][help] */
 194 {
 195         set_flags(VEFLAGS, eflags, current->tss.v86mask);
 196         set_flags(regs->eflags, eflags, SAFE_MASK);
 197         if (eflags & IF_MASK)
 198                 set_IF(regs);
 199 }
 200 
 201 static inline void set_vflags_short(unsigned short flags, struct vm86_regs * regs)
     /* [previous][next][first][last][top][bottom][index][help] */
 202 {
 203         set_flags(VFLAGS, flags, current->tss.v86mask);
 204         set_flags(regs->eflags, flags, SAFE_MASK);
 205         if (flags & IF_MASK)
 206                 set_IF(regs);
 207 }
 208 
 209 static inline unsigned long get_vflags(struct vm86_regs * regs)
     /* [previous][next][first][last][top][bottom][index][help] */
 210 {
 211         unsigned long flags = regs->eflags & RETURN_MASK;
 212 
 213         if (VEFLAGS & VIF_MASK)
 214                 flags |= IF_MASK;
 215         return flags | (VEFLAGS & current->tss.v86mask);
 216 }
 217 
 218 static inline int is_revectored(int nr, struct revectored_struct * bitmap)
     /* [previous][next][first][last][top][bottom][index][help] */
 219 {
 220         if (verify_area(VERIFY_READ, bitmap, 256/8) < 0)
 221                 return 1;
 222         __asm__ __volatile__("btl %2,%%fs:%1\n\tsbbl %0,%0"
 223                 :"=r" (nr)
 224                 :"m" (*bitmap),"r" (nr));
 225         return nr;
 226 }
 227 
 228 /*
 229  * Boy are these ugly, but we need to do the correct 16-bit arithmetic.
 230  * Gcc makes a mess of it, so we do it inline and use non-obvious calling
 231  * conventions..
 232  */
 233 #define pushb(base, ptr, val) \
 234 __asm__ __volatile__( \
 235         "decw %w0\n\t" \
 236         "movb %2,%%fs:0(%1,%0)" \
 237         : "=r" (ptr) \
 238         : "r" (base), "q" (val), "0" (ptr))
 239 
 240 #define pushw(base, ptr, val) \
 241 __asm__ __volatile__( \
 242         "decw %w0\n\t" \
 243         "movb %h2,%%fs:0(%1,%0)\n\t" \
 244         "decw %w0\n\t" \
 245         "movb %b2,%%fs:0(%1,%0)" \
 246         : "=r" (ptr) \
 247         : "r" (base), "q" (val), "0" (ptr))
 248 
 249 #define pushl(base, ptr, val) \
 250 __asm__ __volatile__( \
 251         "decw %w0\n\t" \
 252         "rorl $16,%2\n\t" \
 253         "movb %h2,%%fs:0(%1,%0)\n\t" \
 254         "decw %w0\n\t" \
 255         "movb %b2,%%fs:0(%1,%0)\n\t" \
 256         "decw %w0\n\t" \
 257         "rorl $16,%2\n\t" \
 258         "movb %h2,%%fs:0(%1,%0)\n\t" \
 259         "decw %w0\n\t" \
 260         "movb %b2,%%fs:0(%1,%0)" \
 261         : "=r" (ptr) \
 262         : "r" (base), "q" (val), "0" (ptr))
 263 
 264 #define popb(base, ptr) \
 265 ({ unsigned long __res; \
 266 __asm__ __volatile__( \
 267         "movb %%fs:0(%1,%0),%b2\n\t" \
 268         "incw %w0" \
 269         : "=r" (ptr), "=r" (base), "=q" (__res) \
 270         : "0" (ptr), "1" (base), "2" (0)); \
 271 __res; })
 272 
 273 #define popw(base, ptr) \
 274 ({ unsigned long __res; \
 275 __asm__ __volatile__( \
 276         "movb %%fs:0(%1,%0),%b2\n\t" \
 277         "incw %w0\n\t" \
 278         "movb %%fs:0(%1,%0),%h2\n\t" \
 279         "incw %w0" \
 280         : "=r" (ptr), "=r" (base), "=q" (__res) \
 281         : "0" (ptr), "1" (base), "2" (0)); \
 282 __res; })
 283 
 284 #define popl(base, ptr) \
 285 ({ unsigned long __res; \
 286 __asm__ __volatile__( \
 287         "movb %%fs:0(%1,%0),%b2\n\t" \
 288         "incw %w0\n\t" \
 289         "movb %%fs:0(%1,%0),%h2\n\t" \
 290         "incw %w0\n\t" \
 291         "rorl $16,%2\n\t" \
 292         "movb %%fs:0(%1,%0),%b2\n\t" \
 293         "incw %w0\n\t" \
 294         "movb %%fs:0(%1,%0),%h2\n\t" \
 295         "incw %w0\n\t" \
 296         "rorl $16,%2" \
 297         : "=r" (ptr), "=r" (base), "=q" (__res) \
 298         : "0" (ptr), "1" (base)); \
 299 __res; })
 300 
 301 static void do_int(struct vm86_regs *regs, int i, unsigned char * ssp, unsigned long sp)
     /* [previous][next][first][last][top][bottom][index][help] */
 302 {
 303         unsigned short *intr_ptr, seg;
 304 
 305         if (regs->cs == BIOSSEG)
 306                 goto cannot_handle;
 307         if (is_revectored(i, &current->tss.vm86_info->int_revectored))
 308                 goto cannot_handle;
 309         if (i==0x21 && is_revectored(AH(regs),&current->tss.vm86_info->int21_revectored))
 310                 goto cannot_handle;
 311         intr_ptr = (unsigned short *) (i << 2);
 312         if (verify_area(VERIFY_READ, intr_ptr, 4) < 0)
 313                 goto cannot_handle;
 314         seg = get_fs_word(intr_ptr+1);
 315         if (seg == BIOSSEG)
 316                 goto cannot_handle;
 317         pushw(ssp, sp, get_vflags(regs));
 318         pushw(ssp, sp, regs->cs);
 319         pushw(ssp, sp, IP(regs));
 320         regs->cs = seg;
 321         SP(regs) -= 6;
 322         IP(regs) = get_fs_word(intr_ptr+0);
 323         clear_TF(regs);
 324         clear_IF(regs);
 325         return;
 326 
 327 cannot_handle:
 328         return_to_32bit(regs, VM86_INTx + (i << 8));
 329 }
 330 
 331 void handle_vm86_debug(struct vm86_regs * regs, long error_code)
     /* [previous][next][first][last][top][bottom][index][help] */
 332 {
 333 #if 0
 334         do_int(regs, 1, (unsigned char *) (regs->ss << 4), SP(regs));
 335 #else
 336         if (current->flags & PF_PTRACED)
 337                 current->blocked &= ~(1 << (SIGTRAP-1));
 338         send_sig(SIGTRAP, current, 1);
 339         current->tss.trap_no = 1;
 340         current->tss.error_code = error_code;
 341 #endif
 342 }
 343 
 344 void handle_vm86_fault(struct vm86_regs * regs, long error_code)
     /* [previous][next][first][last][top][bottom][index][help] */
 345 {
 346         unsigned char *csp, *ssp;
 347         unsigned long ip, sp;
 348 
 349         csp = (unsigned char *) (regs->cs << 4);
 350         ssp = (unsigned char *) (regs->ss << 4);
 351         sp = SP(regs);
 352         ip = IP(regs);
 353 
 354         switch (popb(csp, ip)) {
 355 
 356         /* operand size override */
 357         case 0x66:
 358                 switch (popb(csp, ip)) {
 359 
 360                 /* pushfd */
 361                 case 0x9c:
 362                         SP(regs) -= 4;
 363                         IP(regs) += 2;
 364                         pushl(ssp, sp, get_vflags(regs));
 365                         return;
 366 
 367                 /* popfd */
 368                 case 0x9d:
 369                         SP(regs) += 4;
 370                         IP(regs) += 2;
 371                         set_vflags_long(popl(ssp, sp), regs);
 372                         return;
 373 
 374                 /* iretd */
 375                 case 0xcf:
 376                         SP(regs) += 12;
 377                         IP(regs) = (unsigned short)popl(ssp, sp);
 378                         regs->cs = (unsigned short)popl(ssp, sp);
 379                         set_vflags_long(popl(ssp, sp), regs);
 380                         return;
 381                 }
 382 
 383         /* pushf */
 384         case 0x9c:
 385                 SP(regs) -= 2;
 386                 IP(regs)++;
 387                 pushw(ssp, sp, get_vflags(regs));
 388                 return;
 389 
 390         /* popf */
 391         case 0x9d:
 392                 SP(regs) += 2;
 393                 IP(regs)++;
 394                 set_vflags_short(popw(ssp, sp), regs);
 395                 return;
 396 
 397         /* int xx */
 398         case 0xcd:
 399                 IP(regs) += 2;
 400                 do_int(regs, popb(csp, ip), ssp, sp);
 401                 return;
 402 
 403         /* iret */
 404         case 0xcf:
 405                 SP(regs) += 6;
 406                 IP(regs) = popw(ssp, sp);
 407                 regs->cs = popw(ssp, sp);
 408                 set_vflags_short(popw(ssp, sp), regs);
 409                 return;
 410 
 411         /* cli */
 412         case 0xfa:
 413                 IP(regs)++;
 414                 clear_IF(regs);
 415                 return;
 416 
 417         /* sti */
 418         /*
 419          * Damn. This is incorrect: the 'sti' instruction should actually
 420          * enable interrupts after the /next/ instruction. Not good.
 421          *
 422          * Probably needs some horsing around with the TF flag. Aiee..
 423          */
 424         case 0xfb:
 425                 IP(regs)++;
 426                 set_IF(regs);
 427                 return;
 428 
 429         default:
 430                 return_to_32bit(regs, VM86_UNKNOWN);
 431         }
 432 }

/* [previous][next][first][last][top][bottom][index][help] */