root/arch/i386/kernel/vm86.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. save_v86_state
  2. mark_screen_rdonly
  3. sys_vm86
  4. return_to_32bit
  5. set_IF
  6. clear_IF
  7. clear_TF
  8. set_vflags_long
  9. set_vflags_short
  10. get_vflags
  11. is_revectored
  12. do_int
  13. handle_vm86_debug
  14. handle_vm86_fault

   1 /*
   2  *  linux/kernel/vm86.c
   3  *
   4  *  Copyright (C) 1994  Linus Torvalds
   5  */
   6 #include <linux/errno.h>
   7 #include <linux/sched.h>
   8 #include <linux/kernel.h>
   9 #include <linux/signal.h>
  10 #include <linux/string.h>
  11 #include <linux/ptrace.h>
  12 
  13 #include <asm/segment.h>
  14 #include <asm/io.h>
  15 
  16 /*
  17  * Known problems:
  18  *
  19  * Interrupt handling is not guaranteed:
  20  * - a real x86 will disable all interrupts for one instruction
  21  *   after a "mov ss,xx" to make stack handling atomic even without
  22  *   the 'lss' instruction. We can't guarantee this in v86 mode,
  23  *   as the next instruction might result in a page fault or similar.
  24  * - a real x86 will have interrupts disabled for one instruction
  25  *   past the 'sti' that enables them. We don't bother with all the
  26  *   details yet..
  27  *
  28  * Hopefully these problems do not actually matter for anything.
  29  */
  30 
  31 /*
  32  * 8- and 16-bit register defines..
  33  */
  34 #define AL(regs)        (((unsigned char *)&((regs)->eax))[0])
  35 #define AH(regs)        (((unsigned char *)&((regs)->eax))[1])
  36 #define IP(regs)        (*(unsigned short *)&((regs)->eip))
  37 #define SP(regs)        (*(unsigned short *)&((regs)->esp))
  38 
  39 /*
  40  * virtual flags (16 and 32-bit versions)
  41  */
  42 #define VFLAGS  (*(unsigned short *)&(current->tss.v86flags))
  43 #define VEFLAGS (current->tss.v86flags)
  44 
  45 #define set_flags(X,new,mask) \
  46 ((X) = ((X) & ~(mask)) | ((new) & (mask)))
  47 
  48 #define SAFE_MASK       (0xDD5)
  49 #define RETURN_MASK     (0xDFF)
  50 
  51 asmlinkage struct pt_regs * save_v86_state(struct vm86_regs * regs)
     /* [previous][next][first][last][top][bottom][index][help] */
  52 {
  53         unsigned long tmp;
  54 
  55         if (!current->tss.vm86_info) {
  56                 printk("no vm86_info: BAD\n");
  57                 do_exit(SIGSEGV);
  58         }
  59         set_flags(regs->eflags, VEFLAGS, VIF_MASK | current->tss.v86mask);
  60         memcpy_tofs(&current->tss.vm86_info->regs,regs,sizeof(*regs));
  61         put_fs_long(current->tss.screen_bitmap,&current->tss.vm86_info->screen_bitmap);
  62         tmp = current->tss.esp0;
  63         current->tss.esp0 = current->saved_kernel_stack;
  64         current->saved_kernel_stack = 0;
  65         return (struct pt_regs *) tmp;
  66 }
  67 
  68 static void mark_screen_rdonly(struct task_struct * tsk)
     /* [previous][next][first][last][top][bottom][index][help] */
  69 {
  70         pgd_t *pg_dir;
  71 
  72         pg_dir = PAGE_DIR_OFFSET(tsk, 0);
  73         if (!pgd_none(*pg_dir)) {
  74                 pte_t *pg_table;
  75                 int i;
  76 
  77                 if (pgd_bad(*pg_dir)) {
  78                         printk("vm86: bad page table directory entry %08lx\n", pgd_val(*pg_dir));
  79                         pgd_clear(pg_dir);
  80                         return;
  81                 }
  82                 pg_table = (pte_t *) pgd_page(*pg_dir);
  83                 pg_table += 0xA0000 >> PAGE_SHIFT;
  84                 for (i = 0 ; i < 32 ; i++) {
  85                         if (pte_present(*pg_table))
  86                                 *pg_table = pte_wrprotect(*pg_table);
  87                         pg_table++;
  88                 }
  89         }
  90 }
  91 
  92 asmlinkage int sys_vm86(struct vm86_struct * v86)
     /* [previous][next][first][last][top][bottom][index][help] */
  93 {
  94         struct vm86_struct info;
  95         struct pt_regs * pt_regs = (struct pt_regs *) &v86;
  96         int error;
  97 
  98         if (current->saved_kernel_stack)
  99                 return -EPERM;
 100         /* v86 must be readable (now) and writable (for save_v86_state) */
 101         error = verify_area(VERIFY_WRITE,v86,sizeof(*v86));
 102         if (error)
 103                 return error;
 104         memcpy_fromfs(&info,v86,sizeof(info));
 105 /*
 106  * make sure the vm86() system call doesn't try to do anything silly
 107  */
 108         info.regs.__null_ds = 0;
 109         info.regs.__null_es = 0;
 110         info.regs.__null_fs = 0;
 111         info.regs.__null_gs = 0;
 112 /*
 113  * The eflags register is also special: we cannot trust that the user
 114  * has set it up safely, so this makes sure interrupt etc flags are
 115  * inherited from protected mode.
 116  */
 117         VEFLAGS = info.regs.eflags;
 118         info.regs.eflags &= SAFE_MASK;
 119         info.regs.eflags |= pt_regs->eflags & ~SAFE_MASK;
 120         info.regs.eflags |= VM_MASK;
 121 
 122         switch (info.cpu_type) {
 123                 case CPU_286:
 124                         current->tss.v86mask = 0;
 125                         break;
 126                 case CPU_386:
 127                         current->tss.v86mask = NT_MASK | IOPL_MASK;
 128                         break;
 129                 case CPU_486:
 130                         current->tss.v86mask = AC_MASK | NT_MASK | IOPL_MASK;
 131                         break;
 132                 default:
 133                         current->tss.v86mask = ID_MASK | AC_MASK | NT_MASK | IOPL_MASK;
 134                         break;
 135         }
 136 
 137 /*
 138  * Save old state, set default return value (%eax) to 0
 139  */
 140         pt_regs->eax = 0;
 141         current->saved_kernel_stack = current->tss.esp0;
 142         current->tss.esp0 = (unsigned long) pt_regs;
 143         current->tss.vm86_info = v86;
 144 
 145         current->tss.screen_bitmap = info.screen_bitmap;
 146         if (info.flags & VM86_SCREEN_BITMAP)
 147                 mark_screen_rdonly(current);
 148         __asm__ __volatile__("movl %0,%%esp\n\t"
 149                 "jmp ret_from_sys_call"
 150                 : /* no outputs */
 151                 :"r" (&info.regs));
 152         return 0;
 153 }
 154 
 155 static inline void return_to_32bit(struct vm86_regs * regs16, int retval)
     /* [previous][next][first][last][top][bottom][index][help] */
 156 {
 157         struct pt_regs * regs32;
 158 
 159         regs32 = save_v86_state(regs16);
 160         regs32->eax = retval;
 161         __asm__ __volatile__("movl %0,%%esp\n\t"
 162                 "jmp ret_from_sys_call"
 163                 : : "r" (regs32));
 164 }
 165 
 166 static inline void set_IF(struct vm86_regs * regs)
     /* [previous][next][first][last][top][bottom][index][help] */
 167 {
 168         VEFLAGS |= VIF_MASK;
 169         if (VEFLAGS & VIP_MASK)
 170                 return_to_32bit(regs, VM86_STI);
 171 }
 172 
 173 static inline void clear_IF(struct vm86_regs * regs)
     /* [previous][next][first][last][top][bottom][index][help] */
 174 {
 175         VEFLAGS &= ~VIF_MASK;
 176 }
 177 
 178 static inline void clear_TF(struct vm86_regs * regs)
     /* [previous][next][first][last][top][bottom][index][help] */
 179 {
 180         regs->eflags &= ~TF_MASK;
 181 }
 182 
 183 static inline void set_vflags_long(unsigned long eflags, struct vm86_regs * regs)
     /* [previous][next][first][last][top][bottom][index][help] */
 184 {
 185         set_flags(VEFLAGS, eflags, current->tss.v86mask);
 186         set_flags(regs->eflags, eflags, SAFE_MASK);
 187         if (eflags & IF_MASK)
 188                 set_IF(regs);
 189 }
 190 
 191 static inline void set_vflags_short(unsigned short flags, struct vm86_regs * regs)
     /* [previous][next][first][last][top][bottom][index][help] */
 192 {
 193         set_flags(VFLAGS, flags, current->tss.v86mask);
 194         set_flags(regs->eflags, flags, SAFE_MASK);
 195         if (flags & IF_MASK)
 196                 set_IF(regs);
 197 }
 198 
 199 static inline unsigned long get_vflags(struct vm86_regs * regs)
     /* [previous][next][first][last][top][bottom][index][help] */
 200 {
 201         unsigned long flags = regs->eflags & RETURN_MASK;
 202 
 203         if (VEFLAGS & VIF_MASK)
 204                 flags |= IF_MASK;
 205         return flags | (VEFLAGS & current->tss.v86mask);
 206 }
 207 
 208 static inline int is_revectored(int nr, struct revectored_struct * bitmap)
     /* [previous][next][first][last][top][bottom][index][help] */
 209 {
 210         __asm__ __volatile__("btl %2,%%fs:%1\n\tsbbl %0,%0"
 211                 :"=r" (nr)
 212                 :"m" (*bitmap),"r" (nr));
 213         return nr;
 214 }
 215 
 216 /*
 217  * Boy are these ugly, but we need to do the correct 16-bit arithmetic.
 218  * Gcc makes a mess of it, so we do it inline and use non-obvious calling
 219  * conventions..
 220  */
 221 #define pushb(base, ptr, val) \
 222 __asm__ __volatile__( \
 223         "decw %w0\n\t" \
 224         "movb %2,%%fs:0(%1,%0)" \
 225         : "=r" (ptr) \
 226         : "r" (base), "q" (val), "0" (ptr))
 227 
 228 #define pushw(base, ptr, val) \
 229 __asm__ __volatile__( \
 230         "decw %w0\n\t" \
 231         "movb %h2,%%fs:0(%1,%0)\n\t" \
 232         "decw %w0\n\t" \
 233         "movb %b2,%%fs:0(%1,%0)" \
 234         : "=r" (ptr) \
 235         : "r" (base), "q" (val), "0" (ptr))
 236 
 237 #define pushl(base, ptr, val) \
 238 __asm__ __volatile__( \
 239         "decw %w0\n\t" \
 240         "rorl $16,%2\n\t" \
 241         "movb %h2,%%fs:0(%1,%0)\n\t" \
 242         "decw %w0\n\t" \
 243         "movb %b2,%%fs:0(%1,%0)\n\t" \
 244         "decw %w0\n\t" \
 245         "rorl $16,%2\n\t" \
 246         "movb %h2,%%fs:0(%1,%0)\n\t" \
 247         "decw %w0\n\t" \
 248         "movb %b2,%%fs:0(%1,%0)" \
 249         : "=r" (ptr) \
 250         : "r" (base), "q" (val), "0" (ptr))
 251 
 252 #define popb(base, ptr) \
 253 ({ unsigned long __res; \
 254 __asm__ __volatile__( \
 255         "movb %%fs:0(%1,%0),%b2\n\t" \
 256         "incw %w0" \
 257         : "=r" (ptr), "=r" (base), "=q" (__res) \
 258         : "0" (ptr), "1" (base), "2" (0)); \
 259 __res; })
 260 
 261 #define popw(base, ptr) \
 262 ({ unsigned long __res; \
 263 __asm__ __volatile__( \
 264         "movb %%fs:0(%1,%0),%b2\n\t" \
 265         "incw %w0\n\t" \
 266         "movb %%fs:0(%1,%0),%h2\n\t" \
 267         "incw %w0" \
 268         : "=r" (ptr), "=r" (base), "=q" (__res) \
 269         : "0" (ptr), "1" (base), "2" (0)); \
 270 __res; })
 271 
 272 #define popl(base, ptr) \
 273 ({ unsigned long __res; \
 274 __asm__ __volatile__( \
 275         "movb %%fs:0(%1,%0),%b2\n\t" \
 276         "incw %w0\n\t" \
 277         "movb %%fs:0(%1,%0),%h2\n\t" \
 278         "incw %w0\n\t" \
 279         "rorl $16,%2\n\t" \
 280         "movb %%fs:0(%1,%0),%b2\n\t" \
 281         "incw %w0\n\t" \
 282         "movb %%fs:0(%1,%0),%h2\n\t" \
 283         "incw %w0\n\t" \
 284         "rorl $16,%2" \
 285         : "=r" (ptr), "=r" (base), "=q" (__res) \
 286         : "0" (ptr), "1" (base)); \
 287 __res; })
 288 
 289 static void do_int(struct vm86_regs *regs, int i, unsigned char * ssp, unsigned long sp)
     /* [previous][next][first][last][top][bottom][index][help] */
 290 {
 291         unsigned short seg = get_fs_word((void *) ((i<<2)+2));
 292 
 293         if (seg == BIOSSEG || regs->cs == BIOSSEG ||
 294             is_revectored(i, &current->tss.vm86_info->int_revectored))
 295                 return_to_32bit(regs, VM86_INTx + (i << 8));
 296         if (i==0x21 && is_revectored(AH(regs),&current->tss.vm86_info->int21_revectored))
 297                 return_to_32bit(regs, VM86_INTx + (i << 8));
 298         pushw(ssp, sp, get_vflags(regs));
 299         pushw(ssp, sp, regs->cs);
 300         pushw(ssp, sp, IP(regs));
 301         regs->cs = seg;
 302         SP(regs) -= 6;
 303         IP(regs) = get_fs_word((void *) (i<<2));
 304         clear_TF(regs);
 305         clear_IF(regs);
 306         return;
 307 }
 308 
 309 void handle_vm86_debug(struct vm86_regs * regs, long error_code)
     /* [previous][next][first][last][top][bottom][index][help] */
 310 {
 311 #if 0
 312         do_int(regs, 1, (unsigned char *) (regs->ss << 4), SP(regs));
 313 #else
 314         if (current->flags & PF_PTRACED)
 315                 current->blocked &= ~(1 << (SIGTRAP-1));
 316         send_sig(SIGTRAP, current, 1);
 317         current->tss.trap_no = 1;
 318         current->tss.error_code = error_code;
 319 #endif
 320 }
 321 
 322 void handle_vm86_fault(struct vm86_regs * regs, long error_code)
     /* [previous][next][first][last][top][bottom][index][help] */
 323 {
 324         unsigned char *csp, *ssp;
 325         unsigned long ip, sp;
 326 
 327         csp = (unsigned char *) (regs->cs << 4);
 328         ssp = (unsigned char *) (regs->ss << 4);
 329         sp = SP(regs);
 330         ip = IP(regs);
 331 
 332         switch (popb(csp, ip)) {
 333 
 334         /* operand size override */
 335         case 0x66:
 336                 switch (popb(csp, ip)) {
 337 
 338                 /* pushfd */
 339                 case 0x9c:
 340                         SP(regs) -= 4;
 341                         IP(regs) += 2;
 342                         pushl(ssp, sp, get_vflags(regs));
 343                         return;
 344 
 345                 /* popfd */
 346                 case 0x9d:
 347                         SP(regs) += 4;
 348                         IP(regs) += 2;
 349                         set_vflags_long(popl(ssp, sp), regs);
 350                         return;
 351                 }
 352 
 353         /* pushf */
 354         case 0x9c:
 355                 SP(regs) -= 2;
 356                 IP(regs)++;
 357                 pushw(ssp, sp, get_vflags(regs));
 358                 return;
 359 
 360         /* popf */
 361         case 0x9d:
 362                 SP(regs) += 2;
 363                 IP(regs)++;
 364                 set_vflags_short(popw(ssp, sp), regs);
 365                 return;
 366 
 367         /* int 3 */
 368         case 0xcc:
 369                 IP(regs)++;
 370                 do_int(regs, 3, ssp, sp);
 371                 return;
 372 
 373         /* int xx */
 374         case 0xcd:
 375                 IP(regs) += 2;
 376                 do_int(regs, popb(csp, ip), ssp, sp);
 377                 return;
 378 
 379         /* iret */
 380         case 0xcf:
 381                 SP(regs) += 6;
 382                 IP(regs) = popw(ssp, sp);
 383                 regs->cs = popw(ssp, sp);
 384                 set_vflags_short(popw(ssp, sp), regs);
 385                 return;
 386 
 387         /* cli */
 388         case 0xfa:
 389                 IP(regs)++;
 390                 clear_IF(regs);
 391                 return;
 392 
 393         /* sti */
 394         /*
 395          * Damn. This is incorrect: the 'sti' instruction should actually
 396          * enable interrupts after the /next/ instruction. Not good.
 397          *
 398          * Probably needs some horsing around with the TF flag. Aiee..
 399          */
 400         case 0xfb:
 401                 IP(regs)++;
 402                 set_IF(regs);
 403                 return;
 404 
 405         default:
 406                 return_to_32bit(regs, VM86_UNKNOWN);
 407         }
 408 }

/* [previous][next][first][last][top][bottom][index][help] */