This source file includes following definitions.
- save_v86_state
- mark_screen_rdonly
- sys_vm86
- return_to_32bit
- set_IF
- clear_IF
- clear_TF
- set_vflags_long
- set_vflags_short
- get_vflags
- is_revectored
- do_int
- handle_vm86_debug
- handle_vm86_fault
1
2
3
4
5
6 #include <linux/errno.h>
7 #include <linux/sched.h>
8 #include <linux/kernel.h>
9 #include <linux/signal.h>
10 #include <linux/string.h>
11 #include <linux/ptrace.h>
12 #include <linux/mm.h>
13
14 #include <asm/segment.h>
15 #include <asm/pgtable.h>
16 #include <asm/io.h>
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36 #define AL(regs) (((unsigned char *)&((regs)->eax))[0])
37 #define AH(regs) (((unsigned char *)&((regs)->eax))[1])
38 #define IP(regs) (*(unsigned short *)&((regs)->eip))
39 #define SP(regs) (*(unsigned short *)&((regs)->esp))
40
41
42
43
44 #define VFLAGS (*(unsigned short *)&(current->tss.v86flags))
45 #define VEFLAGS (current->tss.v86flags)
46
47 #define set_flags(X,new,mask) \
48 ((X) = ((X) & ~(mask)) | ((new) & (mask)))
49
50 #define SAFE_MASK (0xDD5)
51 #define RETURN_MASK (0xDFF)
52
53 asmlinkage struct pt_regs * save_v86_state(struct vm86_regs * regs)
54 {
55 unsigned long tmp;
56
57 if (!current->tss.vm86_info) {
58 printk("no vm86_info: BAD\n");
59 do_exit(SIGSEGV);
60 }
61 set_flags(regs->eflags, VEFLAGS, VIF_MASK | current->tss.v86mask);
62 memcpy_tofs(¤t->tss.vm86_info->regs,regs,sizeof(*regs));
63 put_fs_long(current->tss.screen_bitmap,¤t->tss.vm86_info->screen_bitmap);
64 tmp = current->tss.esp0;
65 current->tss.esp0 = current->saved_kernel_stack;
66 current->saved_kernel_stack = 0;
67 return (struct pt_regs *) tmp;
68 }
69
70 static void mark_screen_rdonly(struct task_struct * tsk)
71 {
72 pgd_t *pgd;
73 pmd_t *pmd;
74 pte_t *pte;
75 int i;
76
77 pgd = pgd_offset(tsk->mm, 0xA0000);
78 if (pgd_none(*pgd))
79 return;
80 if (pgd_bad(*pgd)) {
81 printk("vm86: bad pgd entry [%p]:%08lx\n", pgd, pgd_val(*pgd));
82 pgd_clear(pgd);
83 return;
84 }
85 pmd = pmd_offset(pgd, 0xA0000);
86 if (pmd_none(*pmd))
87 return;
88 if (pmd_bad(*pmd)) {
89 printk("vm86: bad pmd entry [%p]:%08lx\n", pmd, pmd_val(*pmd));
90 pmd_clear(pmd);
91 return;
92 }
93 pte = pte_offset(pmd, 0xA0000);
94 for (i = 0; i < 32; i++) {
95 if (pte_present(*pte))
96 set_pte(pte, pte_wrprotect(*pte));
97 pte++;
98 }
99 invalidate();
100 }
101
102 asmlinkage int sys_vm86(struct vm86_struct * v86)
103 {
104 struct vm86_struct info;
105 struct pt_regs * pt_regs = (struct pt_regs *) &v86;
106 int error;
107
108 if (current->saved_kernel_stack)
109 return -EPERM;
110
111 error = verify_area(VERIFY_WRITE,v86,sizeof(*v86));
112 if (error)
113 return error;
114 memcpy_fromfs(&info,v86,sizeof(info));
115
116
117
118 info.regs.__null_ds = 0;
119 info.regs.__null_es = 0;
120 info.regs.__null_fs = 0;
121 info.regs.__null_gs = 0;
122
123
124
125
126
127 VEFLAGS = info.regs.eflags;
128 info.regs.eflags &= SAFE_MASK;
129 info.regs.eflags |= pt_regs->eflags & ~SAFE_MASK;
130 info.regs.eflags |= VM_MASK;
131
132 switch (info.cpu_type) {
133 case CPU_286:
134 current->tss.v86mask = 0;
135 break;
136 case CPU_386:
137 current->tss.v86mask = NT_MASK | IOPL_MASK;
138 break;
139 case CPU_486:
140 current->tss.v86mask = AC_MASK | NT_MASK | IOPL_MASK;
141 break;
142 default:
143 current->tss.v86mask = ID_MASK | AC_MASK | NT_MASK | IOPL_MASK;
144 break;
145 }
146
147
148
149
150 pt_regs->eax = 0;
151 current->saved_kernel_stack = current->tss.esp0;
152 current->tss.esp0 = (unsigned long) pt_regs;
153 current->tss.vm86_info = v86;
154
155 current->tss.screen_bitmap = info.screen_bitmap;
156 if (info.flags & VM86_SCREEN_BITMAP)
157 mark_screen_rdonly(current);
158 __asm__ __volatile__("movl %0,%%esp\n\t"
159 "jmp ret_from_sys_call"
160 :
161 :"r" (&info.regs));
162 return 0;
163 }
164
165 static inline void return_to_32bit(struct vm86_regs * regs16, int retval)
166 {
167 struct pt_regs * regs32;
168
169 regs32 = save_v86_state(regs16);
170 regs32->eax = retval;
171 __asm__ __volatile__("movl %0,%%esp\n\t"
172 "jmp ret_from_sys_call"
173 : : "r" (regs32));
174 }
175
176 static inline void set_IF(struct vm86_regs * regs)
177 {
178 VEFLAGS |= VIF_MASK;
179 if (VEFLAGS & VIP_MASK)
180 return_to_32bit(regs, VM86_STI);
181 }
182
183 static inline void clear_IF(struct vm86_regs * regs)
184 {
185 VEFLAGS &= ~VIF_MASK;
186 }
187
188 static inline void clear_TF(struct vm86_regs * regs)
189 {
190 regs->eflags &= ~TF_MASK;
191 }
192
193 static inline void set_vflags_long(unsigned long eflags, struct vm86_regs * regs)
194 {
195 set_flags(VEFLAGS, eflags, current->tss.v86mask);
196 set_flags(regs->eflags, eflags, SAFE_MASK);
197 if (eflags & IF_MASK)
198 set_IF(regs);
199 }
200
201 static inline void set_vflags_short(unsigned short flags, struct vm86_regs * regs)
202 {
203 set_flags(VFLAGS, flags, current->tss.v86mask);
204 set_flags(regs->eflags, flags, SAFE_MASK);
205 if (flags & IF_MASK)
206 set_IF(regs);
207 }
208
209 static inline unsigned long get_vflags(struct vm86_regs * regs)
210 {
211 unsigned long flags = regs->eflags & RETURN_MASK;
212
213 if (VEFLAGS & VIF_MASK)
214 flags |= IF_MASK;
215 return flags | (VEFLAGS & current->tss.v86mask);
216 }
217
218 static inline int is_revectored(int nr, struct revectored_struct * bitmap)
219 {
220 if (verify_area(VERIFY_READ, bitmap, 256/8) < 0)
221 return 1;
222 __asm__ __volatile__("btl %2,%%fs:%1\n\tsbbl %0,%0"
223 :"=r" (nr)
224 :"m" (*bitmap),"r" (nr));
225 return nr;
226 }
227
228
229
230
231
232
233 #define pushb(base, ptr, val) \
234 __asm__ __volatile__( \
235 "decw %w0\n\t" \
236 "movb %2,%%fs:0(%1,%0)" \
237 : "=r" (ptr) \
238 : "r" (base), "q" (val), "0" (ptr))
239
240 #define pushw(base, ptr, val) \
241 __asm__ __volatile__( \
242 "decw %w0\n\t" \
243 "movb %h2,%%fs:0(%1,%0)\n\t" \
244 "decw %w0\n\t" \
245 "movb %b2,%%fs:0(%1,%0)" \
246 : "=r" (ptr) \
247 : "r" (base), "q" (val), "0" (ptr))
248
249 #define pushl(base, ptr, val) \
250 __asm__ __volatile__( \
251 "decw %w0\n\t" \
252 "rorl $16,%2\n\t" \
253 "movb %h2,%%fs:0(%1,%0)\n\t" \
254 "decw %w0\n\t" \
255 "movb %b2,%%fs:0(%1,%0)\n\t" \
256 "decw %w0\n\t" \
257 "rorl $16,%2\n\t" \
258 "movb %h2,%%fs:0(%1,%0)\n\t" \
259 "decw %w0\n\t" \
260 "movb %b2,%%fs:0(%1,%0)" \
261 : "=r" (ptr) \
262 : "r" (base), "q" (val), "0" (ptr))
263
264 #define popb(base, ptr) \
265 ({ unsigned long __res; \
266 __asm__ __volatile__( \
267 "movb %%fs:0(%1,%0),%b2\n\t" \
268 "incw %w0" \
269 : "=r" (ptr), "=r" (base), "=q" (__res) \
270 : "0" (ptr), "1" (base), "2" (0)); \
271 __res; })
272
273 #define popw(base, ptr) \
274 ({ unsigned long __res; \
275 __asm__ __volatile__( \
276 "movb %%fs:0(%1,%0),%b2\n\t" \
277 "incw %w0\n\t" \
278 "movb %%fs:0(%1,%0),%h2\n\t" \
279 "incw %w0" \
280 : "=r" (ptr), "=r" (base), "=q" (__res) \
281 : "0" (ptr), "1" (base), "2" (0)); \
282 __res; })
283
284 #define popl(base, ptr) \
285 ({ unsigned long __res; \
286 __asm__ __volatile__( \
287 "movb %%fs:0(%1,%0),%b2\n\t" \
288 "incw %w0\n\t" \
289 "movb %%fs:0(%1,%0),%h2\n\t" \
290 "incw %w0\n\t" \
291 "rorl $16,%2\n\t" \
292 "movb %%fs:0(%1,%0),%b2\n\t" \
293 "incw %w0\n\t" \
294 "movb %%fs:0(%1,%0),%h2\n\t" \
295 "incw %w0\n\t" \
296 "rorl $16,%2" \
297 : "=r" (ptr), "=r" (base), "=q" (__res) \
298 : "0" (ptr), "1" (base)); \
299 __res; })
300
301 static void do_int(struct vm86_regs *regs, int i, unsigned char * ssp, unsigned long sp)
302 {
303 unsigned short *intr_ptr, seg;
304
305 if (regs->cs == BIOSSEG)
306 goto cannot_handle;
307 if (is_revectored(i, ¤t->tss.vm86_info->int_revectored))
308 goto cannot_handle;
309 if (i==0x21 && is_revectored(AH(regs),¤t->tss.vm86_info->int21_revectored))
310 goto cannot_handle;
311 intr_ptr = (unsigned short *) (i << 2);
312 if (verify_area(VERIFY_READ, intr_ptr, 4) < 0)
313 goto cannot_handle;
314 seg = get_fs_word(intr_ptr+1);
315 if (seg == BIOSSEG)
316 goto cannot_handle;
317 pushw(ssp, sp, get_vflags(regs));
318 pushw(ssp, sp, regs->cs);
319 pushw(ssp, sp, IP(regs));
320 regs->cs = seg;
321 SP(regs) -= 6;
322 IP(regs) = get_fs_word(intr_ptr+0);
323 clear_TF(regs);
324 clear_IF(regs);
325 return;
326
327 cannot_handle:
328 return_to_32bit(regs, VM86_INTx + (i << 8));
329 }
330
331 void handle_vm86_debug(struct vm86_regs * regs, long error_code)
332 {
333 #if 0
334 do_int(regs, 1, (unsigned char *) (regs->ss << 4), SP(regs));
335 #else
336 if (current->flags & PF_PTRACED)
337 current->blocked &= ~(1 << (SIGTRAP-1));
338 send_sig(SIGTRAP, current, 1);
339 current->tss.trap_no = 1;
340 current->tss.error_code = error_code;
341 #endif
342 }
343
344 void handle_vm86_fault(struct vm86_regs * regs, long error_code)
345 {
346 unsigned char *csp, *ssp;
347 unsigned long ip, sp;
348
349 csp = (unsigned char *) (regs->cs << 4);
350 ssp = (unsigned char *) (regs->ss << 4);
351 sp = SP(regs);
352 ip = IP(regs);
353
354 switch (popb(csp, ip)) {
355
356
357 case 0x66:
358 switch (popb(csp, ip)) {
359
360
361 case 0x9c:
362 SP(regs) -= 4;
363 IP(regs) += 2;
364 pushl(ssp, sp, get_vflags(regs));
365 return;
366
367
368 case 0x9d:
369 SP(regs) += 4;
370 IP(regs) += 2;
371 set_vflags_long(popl(ssp, sp), regs);
372 return;
373
374
375 case 0xcf:
376 SP(regs) += 12;
377 IP(regs) = (unsigned short)popl(ssp, sp);
378 regs->cs = (unsigned short)popl(ssp, sp);
379 set_vflags_long(popl(ssp, sp), regs);
380 return;
381 }
382 break;
383
384
385 case 0x9c:
386 SP(regs) -= 2;
387 IP(regs)++;
388 pushw(ssp, sp, get_vflags(regs));
389 return;
390
391
392 case 0x9d:
393 SP(regs) += 2;
394 IP(regs)++;
395 set_vflags_short(popw(ssp, sp), regs);
396 return;
397
398
399 case 0xcd:
400 IP(regs) += 2;
401 do_int(regs, popb(csp, ip), ssp, sp);
402 return;
403
404
405 case 0xcf:
406 SP(regs) += 6;
407 IP(regs) = popw(ssp, sp);
408 regs->cs = popw(ssp, sp);
409 set_vflags_short(popw(ssp, sp), regs);
410 return;
411
412
413 case 0xfa:
414 IP(regs)++;
415 clear_IF(regs);
416 return;
417
418
419
420
421
422
423
424
425 case 0xfb:
426 IP(regs)++;
427 set_IF(regs);
428 return;
429 }
430
431
432
433
434 return_to_32bit(regs, VM86_UNKNOWN);
435 }