root/arch/i386/kernel/vm86.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. save_v86_state
  2. mark_screen_rdonly
  3. sys_vm86
  4. return_to_32bit
  5. set_IF
  6. clear_IF
  7. clear_TF
  8. set_vflags_long
  9. set_vflags_short
  10. get_vflags
  11. is_revectored
  12. do_int
  13. handle_vm86_debug
  14. handle_vm86_fault

   1 /*
   2  *  linux/kernel/vm86.c
   3  *
   4  *  Copyright (C) 1994  Linus Torvalds
   5  */
   6 #include <linux/errno.h>
   7 #include <linux/sched.h>
   8 #include <linux/kernel.h>
   9 #include <linux/signal.h>
  10 #include <linux/string.h>
  11 #include <linux/ptrace.h>
  12 #include <linux/mm.h>
  13 
  14 #include <asm/segment.h>
  15 #include <asm/io.h>
  16 
  17 /*
  18  * Known problems:
  19  *
  20  * Interrupt handling is not guaranteed:
  21  * - a real x86 will disable all interrupts for one instruction
  22  *   after a "mov ss,xx" to make stack handling atomic even without
  23  *   the 'lss' instruction. We can't guarantee this in v86 mode,
  24  *   as the next instruction might result in a page fault or similar.
  25  * - a real x86 will have interrupts disabled for one instruction
  26  *   past the 'sti' that enables them. We don't bother with all the
  27  *   details yet..
  28  *
  29  * Hopefully these problems do not actually matter for anything.
  30  */
  31 
  32 /*
  33  * 8- and 16-bit register defines..
  34  */
  35 #define AL(regs)        (((unsigned char *)&((regs)->eax))[0])
  36 #define AH(regs)        (((unsigned char *)&((regs)->eax))[1])
  37 #define IP(regs)        (*(unsigned short *)&((regs)->eip))
  38 #define SP(regs)        (*(unsigned short *)&((regs)->esp))
  39 
  40 /*
  41  * virtual flags (16 and 32-bit versions)
  42  */
  43 #define VFLAGS  (*(unsigned short *)&(current->tss.v86flags))
  44 #define VEFLAGS (current->tss.v86flags)
  45 
  46 #define set_flags(X,new,mask) \
  47 ((X) = ((X) & ~(mask)) | ((new) & (mask)))
  48 
  49 #define SAFE_MASK       (0xDD5)
  50 #define RETURN_MASK     (0xDFF)
  51 
  52 asmlinkage struct pt_regs * save_v86_state(struct vm86_regs * regs)
     /* [previous][next][first][last][top][bottom][index][help] */
  53 {
  54         unsigned long tmp;
  55 
  56         if (!current->tss.vm86_info) {
  57                 printk("no vm86_info: BAD\n");
  58                 do_exit(SIGSEGV);
  59         }
  60         set_flags(regs->eflags, VEFLAGS, VIF_MASK | current->tss.v86mask);
  61         memcpy_tofs(&current->tss.vm86_info->regs,regs,sizeof(*regs));
  62         put_fs_long(current->tss.screen_bitmap,&current->tss.vm86_info->screen_bitmap);
  63         tmp = current->tss.esp0;
  64         current->tss.esp0 = current->saved_kernel_stack;
  65         current->saved_kernel_stack = 0;
  66         return (struct pt_regs *) tmp;
  67 }
  68 
  69 static void mark_screen_rdonly(struct task_struct * tsk)
     /* [previous][next][first][last][top][bottom][index][help] */
  70 {
  71         pgd_t *pg_dir;
  72 
  73         pg_dir = PAGE_DIR_OFFSET(tsk, 0);
  74         if (!pgd_none(*pg_dir)) {
  75                 pte_t *pg_table;
  76                 int i;
  77 
  78                 if (pgd_bad(*pg_dir)) {
  79                         printk("vm86: bad page table directory entry %08lx\n", pgd_val(*pg_dir));
  80                         pgd_clear(pg_dir);
  81                         return;
  82                 }
  83                 pg_table = (pte_t *) pgd_page(*pg_dir);
  84                 pg_table += 0xA0000 >> PAGE_SHIFT;
  85                 for (i = 0 ; i < 32 ; i++) {
  86                         if (pte_present(*pg_table))
  87                                 *pg_table = pte_wrprotect(*pg_table);
  88                         pg_table++;
  89                 }
  90         }
  91 }
  92 
  93 asmlinkage int sys_vm86(struct vm86_struct * v86)
     /* [previous][next][first][last][top][bottom][index][help] */
  94 {
  95         struct vm86_struct info;
  96         struct pt_regs * pt_regs = (struct pt_regs *) &v86;
  97         int error;
  98 
  99         if (current->saved_kernel_stack)
 100                 return -EPERM;
 101         /* v86 must be readable (now) and writable (for save_v86_state) */
 102         error = verify_area(VERIFY_WRITE,v86,sizeof(*v86));
 103         if (error)
 104                 return error;
 105         memcpy_fromfs(&info,v86,sizeof(info));
 106 /*
 107  * make sure the vm86() system call doesn't try to do anything silly
 108  */
 109         info.regs.__null_ds = 0;
 110         info.regs.__null_es = 0;
 111         info.regs.__null_fs = 0;
 112         info.regs.__null_gs = 0;
 113 /*
 114  * The eflags register is also special: we cannot trust that the user
 115  * has set it up safely, so this makes sure interrupt etc flags are
 116  * inherited from protected mode.
 117  */
 118         VEFLAGS = info.regs.eflags;
 119         info.regs.eflags &= SAFE_MASK;
 120         info.regs.eflags |= pt_regs->eflags & ~SAFE_MASK;
 121         info.regs.eflags |= VM_MASK;
 122 
 123         switch (info.cpu_type) {
 124                 case CPU_286:
 125                         current->tss.v86mask = 0;
 126                         break;
 127                 case CPU_386:
 128                         current->tss.v86mask = NT_MASK | IOPL_MASK;
 129                         break;
 130                 case CPU_486:
 131                         current->tss.v86mask = AC_MASK | NT_MASK | IOPL_MASK;
 132                         break;
 133                 default:
 134                         current->tss.v86mask = ID_MASK | AC_MASK | NT_MASK | IOPL_MASK;
 135                         break;
 136         }
 137 
 138 /*
 139  * Save old state, set default return value (%eax) to 0
 140  */
 141         pt_regs->eax = 0;
 142         current->saved_kernel_stack = current->tss.esp0;
 143         current->tss.esp0 = (unsigned long) pt_regs;
 144         current->tss.vm86_info = v86;
 145 
 146         current->tss.screen_bitmap = info.screen_bitmap;
 147         if (info.flags & VM86_SCREEN_BITMAP)
 148                 mark_screen_rdonly(current);
 149         __asm__ __volatile__("movl %0,%%esp\n\t"
 150                 "jmp ret_from_sys_call"
 151                 : /* no outputs */
 152                 :"r" (&info.regs));
 153         return 0;
 154 }
 155 
 156 static inline void return_to_32bit(struct vm86_regs * regs16, int retval)
     /* [previous][next][first][last][top][bottom][index][help] */
 157 {
 158         struct pt_regs * regs32;
 159 
 160         regs32 = save_v86_state(regs16);
 161         regs32->eax = retval;
 162         __asm__ __volatile__("movl %0,%%esp\n\t"
 163                 "jmp ret_from_sys_call"
 164                 : : "r" (regs32));
 165 }
 166 
 167 static inline void set_IF(struct vm86_regs * regs)
     /* [previous][next][first][last][top][bottom][index][help] */
 168 {
 169         VEFLAGS |= VIF_MASK;
 170         if (VEFLAGS & VIP_MASK)
 171                 return_to_32bit(regs, VM86_STI);
 172 }
 173 
 174 static inline void clear_IF(struct vm86_regs * regs)
     /* [previous][next][first][last][top][bottom][index][help] */
 175 {
 176         VEFLAGS &= ~VIF_MASK;
 177 }
 178 
 179 static inline void clear_TF(struct vm86_regs * regs)
     /* [previous][next][first][last][top][bottom][index][help] */
 180 {
 181         regs->eflags &= ~TF_MASK;
 182 }
 183 
 184 static inline void set_vflags_long(unsigned long eflags, struct vm86_regs * regs)
     /* [previous][next][first][last][top][bottom][index][help] */
 185 {
 186         set_flags(VEFLAGS, eflags, current->tss.v86mask);
 187         set_flags(regs->eflags, eflags, SAFE_MASK);
 188         if (eflags & IF_MASK)
 189                 set_IF(regs);
 190 }
 191 
 192 static inline void set_vflags_short(unsigned short flags, struct vm86_regs * regs)
     /* [previous][next][first][last][top][bottom][index][help] */
 193 {
 194         set_flags(VFLAGS, flags, current->tss.v86mask);
 195         set_flags(regs->eflags, flags, SAFE_MASK);
 196         if (flags & IF_MASK)
 197                 set_IF(regs);
 198 }
 199 
 200 static inline unsigned long get_vflags(struct vm86_regs * regs)
     /* [previous][next][first][last][top][bottom][index][help] */
 201 {
 202         unsigned long flags = regs->eflags & RETURN_MASK;
 203 
 204         if (VEFLAGS & VIF_MASK)
 205                 flags |= IF_MASK;
 206         return flags | (VEFLAGS & current->tss.v86mask);
 207 }
 208 
 209 static inline int is_revectored(int nr, struct revectored_struct * bitmap)
     /* [previous][next][first][last][top][bottom][index][help] */
 210 {
 211         __asm__ __volatile__("btl %2,%%fs:%1\n\tsbbl %0,%0"
 212                 :"=r" (nr)
 213                 :"m" (*bitmap),"r" (nr));
 214         return nr;
 215 }
 216 
 217 /*
 218  * Boy are these ugly, but we need to do the correct 16-bit arithmetic.
 219  * Gcc makes a mess of it, so we do it inline and use non-obvious calling
 220  * conventions..
 221  */
 222 #define pushb(base, ptr, val) \
 223 __asm__ __volatile__( \
 224         "decw %w0\n\t" \
 225         "movb %2,%%fs:0(%1,%0)" \
 226         : "=r" (ptr) \
 227         : "r" (base), "q" (val), "0" (ptr))
 228 
 229 #define pushw(base, ptr, val) \
 230 __asm__ __volatile__( \
 231         "decw %w0\n\t" \
 232         "movb %h2,%%fs:0(%1,%0)\n\t" \
 233         "decw %w0\n\t" \
 234         "movb %b2,%%fs:0(%1,%0)" \
 235         : "=r" (ptr) \
 236         : "r" (base), "q" (val), "0" (ptr))
 237 
 238 #define pushl(base, ptr, val) \
 239 __asm__ __volatile__( \
 240         "decw %w0\n\t" \
 241         "rorl $16,%2\n\t" \
 242         "movb %h2,%%fs:0(%1,%0)\n\t" \
 243         "decw %w0\n\t" \
 244         "movb %b2,%%fs:0(%1,%0)\n\t" \
 245         "decw %w0\n\t" \
 246         "rorl $16,%2\n\t" \
 247         "movb %h2,%%fs:0(%1,%0)\n\t" \
 248         "decw %w0\n\t" \
 249         "movb %b2,%%fs:0(%1,%0)" \
 250         : "=r" (ptr) \
 251         : "r" (base), "q" (val), "0" (ptr))
 252 
 253 #define popb(base, ptr) \
 254 ({ unsigned long __res; \
 255 __asm__ __volatile__( \
 256         "movb %%fs:0(%1,%0),%b2\n\t" \
 257         "incw %w0" \
 258         : "=r" (ptr), "=r" (base), "=q" (__res) \
 259         : "0" (ptr), "1" (base), "2" (0)); \
 260 __res; })
 261 
 262 #define popw(base, ptr) \
 263 ({ unsigned long __res; \
 264 __asm__ __volatile__( \
 265         "movb %%fs:0(%1,%0),%b2\n\t" \
 266         "incw %w0\n\t" \
 267         "movb %%fs:0(%1,%0),%h2\n\t" \
 268         "incw %w0" \
 269         : "=r" (ptr), "=r" (base), "=q" (__res) \
 270         : "0" (ptr), "1" (base), "2" (0)); \
 271 __res; })
 272 
 273 #define popl(base, ptr) \
 274 ({ unsigned long __res; \
 275 __asm__ __volatile__( \
 276         "movb %%fs:0(%1,%0),%b2\n\t" \
 277         "incw %w0\n\t" \
 278         "movb %%fs:0(%1,%0),%h2\n\t" \
 279         "incw %w0\n\t" \
 280         "rorl $16,%2\n\t" \
 281         "movb %%fs:0(%1,%0),%b2\n\t" \
 282         "incw %w0\n\t" \
 283         "movb %%fs:0(%1,%0),%h2\n\t" \
 284         "incw %w0\n\t" \
 285         "rorl $16,%2" \
 286         : "=r" (ptr), "=r" (base), "=q" (__res) \
 287         : "0" (ptr), "1" (base)); \
 288 __res; })
 289 
 290 static void do_int(struct vm86_regs *regs, int i, unsigned char * ssp, unsigned long sp)
     /* [previous][next][first][last][top][bottom][index][help] */
 291 {
 292         unsigned short seg = get_fs_word((void *) ((i<<2)+2));
 293 
 294         if (seg == BIOSSEG || regs->cs == BIOSSEG ||
 295             is_revectored(i, &current->tss.vm86_info->int_revectored))
 296                 return_to_32bit(regs, VM86_INTx + (i << 8));
 297         if (i==0x21 && is_revectored(AH(regs),&current->tss.vm86_info->int21_revectored))
 298                 return_to_32bit(regs, VM86_INTx + (i << 8));
 299         pushw(ssp, sp, get_vflags(regs));
 300         pushw(ssp, sp, regs->cs);
 301         pushw(ssp, sp, IP(regs));
 302         regs->cs = seg;
 303         SP(regs) -= 6;
 304         IP(regs) = get_fs_word((void *) (i<<2));
 305         clear_TF(regs);
 306         clear_IF(regs);
 307         return;
 308 }
 309 
 310 void handle_vm86_debug(struct vm86_regs * regs, long error_code)
     /* [previous][next][first][last][top][bottom][index][help] */
 311 {
 312 #if 0
 313         do_int(regs, 1, (unsigned char *) (regs->ss << 4), SP(regs));
 314 #else
 315         if (current->flags & PF_PTRACED)
 316                 current->blocked &= ~(1 << (SIGTRAP-1));
 317         send_sig(SIGTRAP, current, 1);
 318         current->tss.trap_no = 1;
 319         current->tss.error_code = error_code;
 320 #endif
 321 }
 322 
 323 void handle_vm86_fault(struct vm86_regs * regs, long error_code)
     /* [previous][next][first][last][top][bottom][index][help] */
 324 {
 325         unsigned char *csp, *ssp;
 326         unsigned long ip, sp;
 327 
 328         csp = (unsigned char *) (regs->cs << 4);
 329         ssp = (unsigned char *) (regs->ss << 4);
 330         sp = SP(regs);
 331         ip = IP(regs);
 332 
 333         switch (popb(csp, ip)) {
 334 
 335         /* operand size override */
 336         case 0x66:
 337                 switch (popb(csp, ip)) {
 338 
 339                 /* pushfd */
 340                 case 0x9c:
 341                         SP(regs) -= 4;
 342                         IP(regs) += 2;
 343                         pushl(ssp, sp, get_vflags(regs));
 344                         return;
 345 
 346                 /* popfd */
 347                 case 0x9d:
 348                         SP(regs) += 4;
 349                         IP(regs) += 2;
 350                         set_vflags_long(popl(ssp, sp), regs);
 351                         return;
 352                 }
 353 
 354         /* pushf */
 355         case 0x9c:
 356                 SP(regs) -= 2;
 357                 IP(regs)++;
 358                 pushw(ssp, sp, get_vflags(regs));
 359                 return;
 360 
 361         /* popf */
 362         case 0x9d:
 363                 SP(regs) += 2;
 364                 IP(regs)++;
 365                 set_vflags_short(popw(ssp, sp), regs);
 366                 return;
 367 
 368         /* int 3 */
 369         case 0xcc:
 370                 IP(regs)++;
 371                 do_int(regs, 3, ssp, sp);
 372                 return;
 373 
 374         /* int xx */
 375         case 0xcd:
 376                 IP(regs) += 2;
 377                 do_int(regs, popb(csp, ip), ssp, sp);
 378                 return;
 379 
 380         /* iret */
 381         case 0xcf:
 382                 SP(regs) += 6;
 383                 IP(regs) = popw(ssp, sp);
 384                 regs->cs = popw(ssp, sp);
 385                 set_vflags_short(popw(ssp, sp), regs);
 386                 return;
 387 
 388         /* cli */
 389         case 0xfa:
 390                 IP(regs)++;
 391                 clear_IF(regs);
 392                 return;
 393 
 394         /* sti */
 395         /*
 396          * Damn. This is incorrect: the 'sti' instruction should actually
 397          * enable interrupts after the /next/ instruction. Not good.
 398          *
 399          * Probably needs some horsing around with the TF flag. Aiee..
 400          */
 401         case 0xfb:
 402                 IP(regs)++;
 403                 set_IF(regs);
 404                 return;
 405 
 406         default:
 407                 return_to_32bit(regs, VM86_UNKNOWN);
 408         }
 409 }

/* [previous][next][first][last][top][bottom][index][help] */