root/kernel/vm86.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. save_v86_state
  2. mark_screen_rdonly
  3. sys_vm86
  4. return_to_32bit
  5. set_IF
  6. clear_IF
  7. clear_TF
  8. set_vflags_long
  9. set_vflags_short
  10. get_vflags
  11. is_revectored
  12. do_int
  13. handle_vm86_debug
  14. handle_vm86_fault

   1 /*
   2  *  linux/kernel/vm86.c
   3  *
   4  *  Copyright (C) 1994  Linus Torvalds
   5  */
   6 #include <linux/errno.h>
   7 #include <linux/sched.h>
   8 #include <linux/kernel.h>
   9 #include <linux/signal.h>
  10 #include <linux/string.h>
  11 #include <linux/ptrace.h>
  12 
  13 #include <asm/segment.h>
  14 #include <asm/io.h>
  15 
  16 /*
  17  * Known problems:
  18  *
  19  * Interrupt handling is not guaranteed:
  20  * - a real x86 will disable all interrupts for one instruction
  21  *   after a "mov ss,xx" to make stack handling atomic even without
  22  *   the 'lss' instruction. We can't guarantee this in v86 mode,
  23  *   as the next instruction might result in a page fault or similar.
  24  * - a real x86 will have interrupts disabled for one instruction
  25  *   past the 'sti' that enables them. We don't bother with all the
  26  *   details yet..
  27  *
  28  * Hopefully these problems do not actually matter for anything.
  29  */
  30 
  31 /*
  32  * 8- and 16-bit register defines..
  33  */
  34 #define AL(regs)        (((unsigned char *)&((regs)->eax))[0])
  35 #define AH(regs)        (((unsigned char *)&((regs)->eax))[1])
  36 #define IP(regs)        (*(unsigned short *)&((regs)->eip))
  37 #define SP(regs)        (*(unsigned short *)&((regs)->esp))
  38 
  39 /*
  40  * virtual flags (16 and 32-bit versions)
  41  */
  42 #define VFLAGS  (*(unsigned short *)&(current->v86flags))
  43 #define VEFLAGS (current->v86flags)
  44 
  45 #define set_flags(X,new,mask) \
  46 ((X) = ((X) & ~(mask)) | ((new) & (mask)))
  47 
  48 #define SAFE_MASK       (0xDD5)
  49 #define RETURN_MASK     (0xDFF)
  50 
  51 asmlinkage struct pt_regs * save_v86_state(struct vm86_regs * regs)
     /* [previous][next][first][last][top][bottom][index][help] */
  52 {
  53         unsigned long tmp;
  54 
  55         if (!current->vm86_info) {
  56                 printk("no vm86_info: BAD\n");
  57                 do_exit(SIGSEGV);
  58         }
  59         set_flags(regs->eflags, VEFLAGS, VIF_MASK | current->v86mask);
  60         memcpy_tofs(&current->vm86_info->regs,regs,sizeof(*regs));
  61         put_fs_long(current->screen_bitmap,&current->vm86_info->screen_bitmap);
  62         tmp = current->tss.esp0;
  63         current->tss.esp0 = current->saved_kernel_stack;
  64         current->saved_kernel_stack = 0;
  65         return (struct pt_regs *) tmp;
  66 }
  67 
  68 static void mark_screen_rdonly(struct task_struct * tsk)
     /* [previous][next][first][last][top][bottom][index][help] */
  69 {
  70         unsigned long tmp;
  71         unsigned long *pg_table;
  72 
  73         if ((tmp = tsk->tss.cr3) != 0) {
  74                 tmp = *(unsigned long *) tmp;
  75                 if (tmp & PAGE_PRESENT) {
  76                         tmp &= PAGE_MASK;
  77                         pg_table = (0xA0000 >> PAGE_SHIFT) + (unsigned long *) tmp;
  78                         tmp = 32;
  79                         while (tmp--) {
  80                                 if (PAGE_PRESENT & *pg_table)
  81                                         *pg_table &= ~PAGE_RW;
  82                                 pg_table++;
  83                         }
  84                 }
  85         }
  86 }
  87 
  88 asmlinkage int sys_vm86(struct vm86_struct * v86)
     /* [previous][next][first][last][top][bottom][index][help] */
  89 {
  90         struct vm86_struct info;
  91         struct pt_regs * pt_regs = (struct pt_regs *) &v86;
  92         int error;
  93 
  94         if (current->saved_kernel_stack)
  95                 return -EPERM;
  96         /* v86 must be readable (now) and writable (for save_v86_state) */
  97         error = verify_area(VERIFY_WRITE,v86,sizeof(*v86));
  98         if (error)
  99                 return error;
 100         memcpy_fromfs(&info,v86,sizeof(info));
 101 /*
 102  * make sure the vm86() system call doesn't try to do anything silly
 103  */
 104         info.regs.__null_ds = 0;
 105         info.regs.__null_es = 0;
 106         info.regs.__null_fs = 0;
 107         info.regs.__null_gs = 0;
 108 /*
 109  * The eflags register is also special: we cannot trust that the user
 110  * has set it up safely, so this makes sure interrupt etc flags are
 111  * inherited from protected mode.
 112  */
 113         VEFLAGS = info.regs.eflags;
 114         info.regs.eflags &= SAFE_MASK;
 115         info.regs.eflags |= pt_regs->eflags & ~SAFE_MASK;
 116         info.regs.eflags |= VM_MASK;
 117 
 118         switch (info.cpu_type) {
 119                 case CPU_286:
 120                         current->v86mask = 0;
 121                         break;
 122                 case CPU_386:
 123                         current->v86mask = NT_MASK | IOPL_MASK;
 124                         break;
 125                 case CPU_486:
 126                         current->v86mask = AC_MASK | NT_MASK | IOPL_MASK;
 127                         break;
 128                 default:
 129                         current->v86mask = ID_MASK | AC_MASK | NT_MASK | IOPL_MASK;
 130                         break;
 131         }
 132 
 133 /*
 134  * Save old state, set default return value (%eax) to 0
 135  */
 136         pt_regs->eax = 0;
 137         current->saved_kernel_stack = current->tss.esp0;
 138         current->tss.esp0 = (unsigned long) pt_regs;
 139         current->vm86_info = v86;
 140 
 141         current->screen_bitmap = info.screen_bitmap;
 142         if (info.flags & VM86_SCREEN_BITMAP)
 143                 mark_screen_rdonly(current);
 144         __asm__ __volatile__("movl %0,%%esp\n\t"
 145                 "jmp ret_from_sys_call"
 146                 : /* no outputs */
 147                 :"r" (&info.regs));
 148         return 0;
 149 }
 150 
 151 static inline void return_to_32bit(struct vm86_regs * regs16, int retval)
     /* [previous][next][first][last][top][bottom][index][help] */
 152 {
 153         struct pt_regs * regs32;
 154 
 155         regs32 = save_v86_state(regs16);
 156         regs32->eax = retval;
 157         __asm__ __volatile__("movl %0,%%esp\n\t"
 158                 "jmp ret_from_sys_call"
 159                 : : "r" (regs32));
 160 }
 161 
 162 static inline void set_IF(struct vm86_regs * regs)
     /* [previous][next][first][last][top][bottom][index][help] */
 163 {
 164         VEFLAGS |= VIF_MASK;
 165         if (VEFLAGS & VIP_MASK)
 166                 return_to_32bit(regs, VM86_STI);
 167 }
 168 
 169 static inline void clear_IF(struct vm86_regs * regs)
     /* [previous][next][first][last][top][bottom][index][help] */
 170 {
 171         VEFLAGS &= ~VIF_MASK;
 172 }
 173 
 174 static inline void clear_TF(struct vm86_regs * regs)
     /* [previous][next][first][last][top][bottom][index][help] */
 175 {
 176         regs->eflags &= ~TF_MASK;
 177 }
 178 
 179 static inline void set_vflags_long(unsigned long eflags, struct vm86_regs * regs)
     /* [previous][next][first][last][top][bottom][index][help] */
 180 {
 181         set_flags(VEFLAGS, eflags, current->v86mask);
 182         set_flags(regs->eflags, eflags, SAFE_MASK);
 183         if (eflags & IF_MASK)
 184                 set_IF(regs);
 185 }
 186 
 187 static inline void set_vflags_short(unsigned short flags, struct vm86_regs * regs)
     /* [previous][next][first][last][top][bottom][index][help] */
 188 {
 189         set_flags(VFLAGS, flags, current->v86mask);
 190         set_flags(regs->eflags, flags, SAFE_MASK);
 191         if (flags & IF_MASK)
 192                 set_IF(regs);
 193 }
 194 
 195 static inline unsigned long get_vflags(struct vm86_regs * regs)
     /* [previous][next][first][last][top][bottom][index][help] */
 196 {
 197         unsigned long flags = regs->eflags & RETURN_MASK;
 198 
 199         if (VEFLAGS & VIF_MASK)
 200                 flags |= IF_MASK;
 201         return flags | (VEFLAGS & current->v86mask);
 202 }
 203 
 204 static inline int is_revectored(int nr, struct revectored_struct * bitmap)
     /* [previous][next][first][last][top][bottom][index][help] */
 205 {
 206         __asm__ __volatile__("btl %2,%%fs:%1\n\tsbbl %0,%0"
 207                 :"=r" (nr)
 208                 :"m" (*bitmap),"r" (nr));
 209         return nr;
 210 }
 211 
 212 /*
 213  * Boy are these ugly, but we need to do the correct 16-bit arithmetic.
 214  * Gcc makes a mess of it, so we do it inline and use non-obvious calling
 215  * conventions..
 216  */
 217 #define pushb(base, ptr, val) \
 218 __asm__ __volatile__( \
 219         "decw %w0\n\t" \
 220         "movb %2,%%fs:0(%1,%0)" \
 221         : "=r" (ptr) \
 222         : "r" (base), "q" (val), "0" (ptr))
 223 
 224 #define pushw(base, ptr, val) \
 225 __asm__ __volatile__( \
 226         "decw %w0\n\t" \
 227         "movb %h2,%%fs:0(%1,%0)\n\t" \
 228         "decw %w0\n\t" \
 229         "movb %b2,%%fs:0(%1,%0)" \
 230         : "=r" (ptr) \
 231         : "r" (base), "q" (val), "0" (ptr))
 232 
 233 #define pushl(base, ptr, val) \
 234 __asm__ __volatile__( \
 235         "decw %w0\n\t" \
 236         "rorl $16,%2\n\t" \
 237         "movb %h2,%%fs:0(%1,%0)\n\t" \
 238         "decw %w0\n\t" \
 239         "movb %b2,%%fs:0(%1,%0)\n\t" \
 240         "decw %w0\n\t" \
 241         "rorl $16,%2\n\t" \
 242         "movb %h2,%%fs:0(%1,%0)\n\t" \
 243         "decw %w0\n\t" \
 244         "movb %b2,%%fs:0(%1,%0)" \
 245         : "=r" (ptr) \
 246         : "r" (base), "q" (val), "0" (ptr))
 247 
 248 #define popb(base, ptr) \
 249 ({ unsigned long __res; \
 250 __asm__ __volatile__( \
 251         "movb %%fs:0(%1,%0),%b2\n\t" \
 252         "incw %w0" \
 253         : "=r" (ptr), "=r" (base), "=q" (__res) \
 254         : "0" (ptr), "1" (base), "2" (0)); \
 255 __res; })
 256 
 257 #define popw(base, ptr) \
 258 ({ unsigned long __res; \
 259 __asm__ __volatile__( \
 260         "movb %%fs:0(%1,%0),%b2\n\t" \
 261         "incw %w0\n\t" \
 262         "movb %%fs:0(%1,%0),%h2\n\t" \
 263         "incw %w0" \
 264         : "=r" (ptr), "=r" (base), "=q" (__res) \
 265         : "0" (ptr), "1" (base), "2" (0)); \
 266 __res; })
 267 
 268 #define popl(base, ptr) \
 269 ({ unsigned long __res; \
 270 __asm__ __volatile__( \
 271         "movb %%fs:0(%1,%0),%b2\n\t" \
 272         "incw %w0\n\t" \
 273         "movb %%fs:0(%1,%0),%h2\n\t" \
 274         "incw %w0\n\t" \
 275         "rorl $16,%2\n\t" \
 276         "movb %%fs:0(%1,%0),%b2\n\t" \
 277         "incw %w0\n\t" \
 278         "movb %%fs:0(%1,%0),%h2\n\t" \
 279         "incw %w0\n\t" \
 280         "rorl $16,%2" \
 281         : "=r" (ptr), "=r" (base), "=q" (__res) \
 282         : "0" (ptr), "1" (base)); \
 283 __res; })
 284 
 285 static void do_int(struct vm86_regs *regs, int i, unsigned char * ssp, unsigned long sp)
     /* [previous][next][first][last][top][bottom][index][help] */
 286 {
 287         unsigned short seg = get_fs_word((void *) ((i<<2)+2));
 288 
 289         if (seg == BIOSSEG || regs->cs == BIOSSEG ||
 290             is_revectored(i, &current->vm86_info->int_revectored))
 291                 return_to_32bit(regs, VM86_INTx + (i << 8));
 292         if (i==0x21 && is_revectored(AH(regs),&current->vm86_info->int21_revectored))
 293                 return_to_32bit(regs, VM86_INTx + (i << 8));
 294         pushw(ssp, sp, get_vflags(regs));
 295         pushw(ssp, sp, regs->cs);
 296         pushw(ssp, sp, IP(regs));
 297         regs->cs = seg;
 298         SP(regs) -= 6;
 299         IP(regs) = get_fs_word((void *) (i<<2));
 300         clear_TF(regs);
 301         clear_IF(regs);
 302         return;
 303 }
 304 
 305 void handle_vm86_debug(struct vm86_regs * regs, long error_code)
     /* [previous][next][first][last][top][bottom][index][help] */
 306 {
 307 #if 0
 308         do_int(regs, 1, (unsigned char *) (regs->ss << 4), SP(regs));
 309 #else
 310         if (current->flags & PF_PTRACED)
 311                 current->blocked &= ~(1 << (SIGTRAP-1));
 312         send_sig(SIGTRAP, current, 1);
 313         current->tss.trap_no = 1;
 314         current->tss.error_code = error_code;
 315 #endif
 316 }
 317 
 318 void handle_vm86_fault(struct vm86_regs * regs, long error_code)
     /* [previous][next][first][last][top][bottom][index][help] */
 319 {
 320         unsigned char *csp, *ssp;
 321         unsigned long ip, sp;
 322 
 323         csp = (unsigned char *) (regs->cs << 4);
 324         ssp = (unsigned char *) (regs->ss << 4);
 325         sp = SP(regs);
 326         ip = IP(regs);
 327 
 328         switch (popb(csp, ip)) {
 329 
 330         /* operand size override */
 331         case 0x66:
 332                 switch (popb(csp, ip)) {
 333 
 334                 /* pushfd */
 335                 case 0x9c:
 336                         SP(regs) -= 4;
 337                         IP(regs) += 2;
 338                         pushl(ssp, sp, get_vflags(regs));
 339                         return;
 340 
 341                 /* popfd */
 342                 case 0x9d:
 343                         SP(regs) += 4;
 344                         IP(regs) += 2;
 345                         set_vflags_long(popl(ssp, sp), regs);
 346                         return;
 347                 }
 348 
 349         /* pushf */
 350         case 0x9c:
 351                 SP(regs) -= 2;
 352                 IP(regs)++;
 353                 pushw(ssp, sp, get_vflags(regs));
 354                 return;
 355 
 356         /* popf */
 357         case 0x9d:
 358                 SP(regs) += 2;
 359                 IP(regs)++;
 360                 set_vflags_short(popw(ssp, sp), regs);
 361                 return;
 362 
 363         /* int 3 */
 364         case 0xcc:
 365                 IP(regs)++;
 366                 do_int(regs, 3, ssp, sp);
 367                 return;
 368 
 369         /* int xx */
 370         case 0xcd:
 371                 IP(regs) += 2;
 372                 do_int(regs, popb(csp, ip), ssp, sp);
 373                 return;
 374 
 375         /* iret */
 376         case 0xcf:
 377                 SP(regs) += 6;
 378                 IP(regs) = popw(ssp, sp);
 379                 regs->cs = popw(ssp, sp);
 380                 set_vflags_short(popw(ssp, sp), regs);
 381                 return;
 382 
 383         /* cli */
 384         case 0xfa:
 385                 IP(regs)++;
 386                 clear_IF(regs);
 387                 return;
 388 
 389         /* sti */
 390         /*
 391          * Damn. This is incorrect: the 'sti' instruction should actually
 392          * enable interrupts after the /next/ instruction. Not good.
 393          *
 394          * Probably needs some horsing around with the TF flag. Aiee..
 395          */
 396         case 0xfb:
 397                 IP(regs)++;
 398                 set_IF(regs);
 399                 return;
 400 
 401         default:
 402                 return_to_32bit(regs, VM86_UNKNOWN);
 403         }
 404 }

/* [previous][next][first][last][top][bottom][index][help] */