root/kernel/vm86.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. save_v86_state
  2. mark_screen_rdonly
  3. sys_vm86
  4. return_to_32bit
  5. set_IF
  6. clear_IF
  7. clear_TF
  8. set_vflags_long
  9. set_vflags_short
  10. get_vflags
  11. do_int
  12. handle_vm86_fault

   1 /*
   2  *  linux/kernel/vm86.c
   3  *
   4  *  Copyright (C) 1994  Linus Torvalds
   5  */
   6 #include <linux/errno.h>
   7 #include <linux/sched.h>
   8 #include <linux/kernel.h>
   9 #include <linux/signal.h>
  10 #include <linux/string.h>
  11 #include <linux/ptrace.h>
  12 
  13 #include <asm/segment.h>
  14 #include <asm/io.h>
  15 
  16 /*
  17  * 16-bit register defines..
  18  */
  19 #define IP(regs)        (*(unsigned short *)&((regs)->eip))
  20 #define SP(regs)        (*(unsigned short *)&((regs)->esp))
  21 
  22 /*
  23  * virtual flags (16 and 32-bit versions)
  24  */
  25 #define VFLAGS(regs)    (*(unsigned short *)&(current->v86flags))
  26 #define VEFLAGS(regs)   (current->v86flags)
  27 
  28 #define set_flags(X,new,mask) \
  29 ((X) = ((X) & ~(mask)) | ((new) & (mask)))
  30 
  31 #define SAFE_MASK       (0xDD5)
  32 
  33 asmlinkage struct pt_regs * save_v86_state(struct vm86_regs * regs)
     /* [previous][next][first][last][top][bottom][index][help] */
  34 {
  35         unsigned long tmp;
  36 
  37         if (!current->vm86_info) {
  38                 printk("no vm86_info: BAD\n");
  39                 do_exit(SIGSEGV);
  40         }
  41         memcpy_tofs(&current->vm86_info->regs,regs,sizeof(*regs));
  42         put_fs_long(current->screen_bitmap,&current->vm86_info->screen_bitmap);
  43         tmp = current->tss.esp0;
  44         current->tss.esp0 = current->saved_kernel_stack;
  45         current->saved_kernel_stack = 0;
  46         return (struct pt_regs *) tmp;
  47 }
  48 
  49 static void mark_screen_rdonly(struct task_struct * tsk)
     /* [previous][next][first][last][top][bottom][index][help] */
  50 {
  51         unsigned long tmp;
  52         unsigned long *pg_table;
  53 
  54         if ((tmp = tsk->tss.cr3) != 0) {
  55                 tmp = *(unsigned long *) tmp;
  56                 if (tmp & PAGE_PRESENT) {
  57                         tmp &= PAGE_MASK;
  58                         pg_table = (0xA0000 >> PAGE_SHIFT) + (unsigned long *) tmp;
  59                         tmp = 32;
  60                         while (tmp--) {
  61                                 if (PAGE_PRESENT & *pg_table)
  62                                         *pg_table &= ~PAGE_RW;
  63                                 pg_table++;
  64                         }
  65                 }
  66         }
  67 }
  68 
  69 asmlinkage int sys_vm86(struct vm86_struct * v86)
     /* [previous][next][first][last][top][bottom][index][help] */
  70 {
  71         struct vm86_struct info;
  72         struct pt_regs * pt_regs = (struct pt_regs *) &v86;
  73         int error;
  74 
  75         if (current->saved_kernel_stack)
  76                 return -EPERM;
  77         /* v86 must be readable (now) and writable (for save_v86_state) */
  78         error = verify_area(VERIFY_WRITE,v86,sizeof(*v86));
  79         if (error)
  80                 return error;
  81         memcpy_fromfs(&info,v86,sizeof(info));
  82 /*
  83  * make sure the vm86() system call doesn't try to do anything silly
  84  */
  85         info.regs.__null_ds = 0;
  86         info.regs.__null_es = 0;
  87         info.regs.__null_fs = 0;
  88         info.regs.__null_gs = 0;
  89 /*
  90  * The eflags register is also special: we cannot trust that the user
  91  * has set it up safely, so this makes sure interrupt etc flags are
  92  * inherited from protected mode.
  93  */
  94         current->v86flags = info.regs.eflags;
  95         info.regs.eflags &= SAFE_MASK;
  96         info.regs.eflags |= ~SAFE_MASK & pt_regs->eflags;
  97         info.regs.eflags |= VM_MASK;
  98 
  99         switch (info.cpu_type) {
 100                 case CPU_286:
 101                         current->v86mask = 0;
 102                         break;
 103                 case CPU_386:
 104                         current->v86mask = NT_MASK | IOPL_MASK;
 105                         break;
 106                 case CPU_486:
 107                         current->v86mask = AC_MASK | NT_MASK | IOPL_MASK;
 108                         break;
 109                 default:
 110                         current->v86mask = ID_MASK | AC_MASK | NT_MASK | IOPL_MASK;
 111                         break;
 112         }
 113 
 114 /*
 115  * Save old state, set default return value (%eax) to 0
 116  */
 117         pt_regs->eax = 0;
 118         current->saved_kernel_stack = current->tss.esp0;
 119         current->tss.esp0 = (unsigned long) pt_regs;
 120         current->vm86_info = v86;
 121 
 122         current->screen_bitmap = info.screen_bitmap;
 123         if (info.flags & VM86_SCREEN_BITMAP)
 124                 mark_screen_rdonly(current);
 125         __asm__ __volatile__("movl %0,%%esp\n\t"
 126                 "jmp ret_from_sys_call"
 127                 : /* no outputs */
 128                 :"r" (&info.regs));
 129         return 0;
 130 }
 131 
 132 static inline void return_to_32bit(struct vm86_regs * regs16, int retval)
     /* [previous][next][first][last][top][bottom][index][help] */
 133 {
 134         struct pt_regs * regs32;
 135 
 136         regs32 = save_v86_state(regs16);
 137         regs32->eax = retval;
 138         __asm__ __volatile__("movl %0,%%esp\n\t"
 139                 "jmp ret_from_sys_call"
 140                 : : "r" (regs32));
 141 }
 142 
 143 static inline void set_IF(struct vm86_regs * regs)
     /* [previous][next][first][last][top][bottom][index][help] */
 144 {
 145         current->v86flags |= VIF_MASK;
 146         if (current->v86flags & VIP_MASK)
 147                 return_to_32bit(regs, VM86_STI);
 148 }
 149 
 150 static inline void clear_IF(struct vm86_regs * regs)
     /* [previous][next][first][last][top][bottom][index][help] */
 151 {
 152         current->v86flags &= ~VIF_MASK;
 153 }
 154 
 155 static inline void clear_TF(struct vm86_regs * regs)
     /* [previous][next][first][last][top][bottom][index][help] */
 156 {
 157         regs->eflags &= ~TF_MASK;
 158 }
 159 
 160 static inline void set_vflags_long(unsigned long eflags, struct vm86_regs * regs)
     /* [previous][next][first][last][top][bottom][index][help] */
 161 {
 162         set_flags(VEFLAGS(regs), eflags, current->v86mask);
 163         set_flags(regs->eflags, eflags, SAFE_MASK);
 164         if (eflags & IF_MASK)
 165                 set_IF(regs);
 166 }
 167 
 168 static inline void set_vflags_short(unsigned short flags, struct vm86_regs * regs)
     /* [previous][next][first][last][top][bottom][index][help] */
 169 {
 170         set_flags(VFLAGS(regs), flags, current->v86mask);
 171         set_flags(regs->eflags, flags, SAFE_MASK);
 172         if (flags & IF_MASK)
 173                 set_IF(regs);
 174 }
 175 
 176 static inline unsigned long get_vflags(struct vm86_regs * regs)
     /* [previous][next][first][last][top][bottom][index][help] */
 177 {
 178         unsigned long flags = regs->eflags & SAFE_MASK;
 179 
 180         if (current->v86flags & VIF_MASK)
 181                 flags |= IF_MASK;
 182         return flags | (VEFLAGS(regs) & current->v86mask);
 183 }
 184 
 185 /*
 186  * Boy are these ugly, but we need to do the correct 16-bit arithmetic.
 187  * Gcc makes a mess of it, so we do it inline and use non-obvious calling
 188  * conventions..
 189  */
 190 #define pushb(base, ptr, val) \
 191 __asm__ __volatile__( \
 192         "decw %w0\n\t" \
 193         "movb %2,%%fs:0(%1,%0)" \
 194         : "=r" (ptr) \
 195         : "r" (base), "q" (val), "0" (ptr))
 196 
 197 #define pushw(base, ptr, val) \
 198 __asm__ __volatile__( \
 199         "decw %w0\n\t" \
 200         "movb %h2,%%fs:0(%1,%0)\n\t" \
 201         "decw %w0\n\t" \
 202         "movb %b2,%%fs:0(%1,%0)" \
 203         : "=r" (ptr) \
 204         : "r" (base), "q" (val), "0" (ptr))
 205 
 206 #define pushl(base, ptr, val) \
 207 __asm__ __volatile__( \
 208         "decw %w0\n\t" \
 209         "rorl $16,%2\n\t" \
 210         "movb %h2,%%fs:0(%1,%0)\n\t" \
 211         "decw %w0\n\t" \
 212         "movb %b2,%%fs:0(%1,%0)\n\t" \
 213         "decw %w0\n\t" \
 214         "rorl $16,%2\n\t" \
 215         "movb %h2,%%fs:0(%1,%0)\n\t" \
 216         "decw %w0\n\t" \
 217         "movb %b2,%%fs:0(%1,%0)" \
 218         : "=r" (ptr) \
 219         : "r" (base), "q" (val), "0" (ptr))
 220 
 221 #define popb(base, ptr) \
 222 ({ unsigned long __res; \
 223 __asm__ __volatile__( \
 224         "movb %%fs:0(%1,%0),%b2\n\t" \
 225         "incw %w0" \
 226         : "=r" (ptr), "=r" (base), "=r" (__res) \
 227         : "0" (ptr), "1" (base), "2" (0)); \
 228 __res; })
 229 
 230 #define popw(base, ptr) \
 231 ({ unsigned long __res; \
 232 __asm__ __volatile__( \
 233         "movb %%fs:0(%1,%0),%b2\n\t" \
 234         "incw %w0\n\t" \
 235         "movb %%fs:0(%1,%0),%h2\n\t" \
 236         "incw %w0" \
 237         : "=r" (ptr), "=r" (base), "=r" (__res) \
 238         : "0" (ptr), "1" (base), "2" (0)); \
 239 __res; })
 240 
 241 #define popl(base, ptr) \
 242 ({ unsigned long __res; \
 243 __asm__ __volatile__( \
 244         "movb %%fs:0(%1,%0),%b2\n\t" \
 245         "incw %w0\n\t" \
 246         "movb %%fs:0(%1,%0),%h2\n\t" \
 247         "incw %w0\n\t" \
 248         "rorl $16,%2\n\t" \
 249         "movb %%fs:0(%1,%0),%b2\n\t" \
 250         "incw %w0\n\t" \
 251         "movb %%fs:0(%1,%0),%h2\n\t" \
 252         "incw %w0\n\t" \
 253         "rorl $16,%2" \
 254         : "=r" (ptr), "=r" (base), "=r" (__res) \
 255         : "0" (ptr), "1" (base)); \
 256 __res; })
 257 
 258 static void do_int(struct vm86_regs *regs, int i, unsigned char * ssp, unsigned long sp)
     /* [previous][next][first][last][top][bottom][index][help] */
 259 {
 260         unsigned short seg = get_fs_word((void *) ((i<<2)+2));
 261 
 262         if (seg == BIOSSEG || regs->cs == BIOSSEG ||
 263             is_revectored(i, &current->vm86_info->int_revectored))
 264                 return_to_32bit(regs, VM86_INTx + (i << 8));
 265         if (i==0x21 && is_revectored((regs->eax >> 4) & 0xff,&current->vm86_info->int21_revectored)) {
 266                 return_to_32bit(regs, VM86_INTx + (i << 8));
 267         }
 268         pushw(ssp, sp, get_vflags(regs));
 269         pushw(ssp, sp, regs->cs);
 270         pushw(ssp, sp, IP(regs));
 271         regs->cs = seg;
 272         SP(regs) -= 6;
 273         IP(regs) = get_fs_word((void *) (i<<2));
 274         clear_TF(regs);
 275         clear_IF(regs);
 276         return;
 277 }
 278 
 279 
 280 void handle_vm86_fault(struct vm86_regs * regs, long error_code)
     /* [previous][next][first][last][top][bottom][index][help] */
 281 {
 282         unsigned char *csp, *ssp;
 283         unsigned long ip, sp;
 284 
 285         csp = (unsigned char *) (regs->cs << 4);
 286         ssp = (unsigned char *) (regs->ss << 4);
 287         sp = SP(regs);
 288         ip = IP(regs);
 289 
 290         switch (popb(csp, ip)) {
 291 
 292         /* operand size override */
 293         case 0x66:
 294                 switch (popb(csp, ip)) {
 295 
 296                 /* pushfd */
 297                 case 0x9c:
 298                         SP(regs) -= 4;
 299                         IP(regs) += 2;
 300                         pushl(ssp, sp, get_vflags(regs));
 301                         return;
 302 
 303                 /* popfd */
 304                 case 0x9d:
 305                         SP(regs) += 4;
 306                         IP(regs) += 2;
 307                         set_vflags_long(popl(ssp, sp), regs);
 308                         return;
 309                 }
 310 
 311         /* pushf */
 312         case 0x9c:
 313                 SP(regs) -= 2;
 314                 IP(regs)++;
 315                 pushw(ssp, sp, get_vflags(regs));
 316                 return;
 317 
 318         /* popf */
 319         case 0x9d:
 320                 SP(regs) += 2;
 321                 IP(regs)++;
 322                 set_vflags_short(popw(ssp, sp), regs);
 323                 return;
 324 
 325         /* int 3 */
 326         case 0xcc:
 327                 IP(regs)++;
 328                 do_int(regs, 3, ssp, sp);
 329                 return;
 330 
 331         /* int xx */
 332         case 0xcd:
 333                 IP(regs) += 2;
 334                 do_int(regs, popb(csp, ip), ssp, sp);
 335                 return;
 336 
 337         /* iret */
 338         case 0xcf:
 339                 SP(regs) += 6;
 340                 IP(regs) = popw(ssp, sp);
 341                 regs->cs = popw(ssp, sp);
 342                 set_vflags_short(popw(ssp, sp), regs);
 343                 return;
 344 
 345         /* cli */
 346         case 0xfa:
 347                 IP(regs)++;
 348                 clear_IF(regs);
 349                 return;
 350 
 351         /* sti */
 352         case 0xfb:
 353                 IP(regs)++;
 354                 set_IF(regs);
 355                 return;
 356 
 357         default:
 358                 return_to_32bit(regs, VM86_UNKNOWN);
 359         }
 360 }

/* [previous][next][first][last][top][bottom][index][help] */