This source file includes following definitions.
- save_v86_state
- mark_screen_rdonly
- sys_vm86
- return_to_32bit
- set_IF
- clear_IF
- clear_TF
- set_vflags_long
- set_vflags_short
- get_vflags
- is_revectored
- do_int
- handle_vm86_debug
- handle_vm86_fault
1
2
3
4
5
6 #include <linux/errno.h>
7 #include <linux/sched.h>
8 #include <linux/kernel.h>
9 #include <linux/signal.h>
10 #include <linux/string.h>
11 #include <linux/ptrace.h>
12
13 #include <asm/segment.h>
14 #include <asm/io.h>
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34 #define AL(regs) (((unsigned char *)&((regs)->eax))[0])
35 #define AH(regs) (((unsigned char *)&((regs)->eax))[1])
36 #define IP(regs) (*(unsigned short *)&((regs)->eip))
37 #define SP(regs) (*(unsigned short *)&((regs)->esp))
38
39
40
41
42 #define VFLAGS (*(unsigned short *)&(current->v86flags))
43 #define VEFLAGS (current->v86flags)
44
45 #define set_flags(X,new,mask) \
46 ((X) = ((X) & ~(mask)) | ((new) & (mask)))
47
48 #define SAFE_MASK (0x40DD5)
49 #define RETURN_MASK (0x40DFF)
50
51 asmlinkage struct pt_regs * save_v86_state(struct vm86_regs * regs)
52 {
53 unsigned long tmp;
54
55 if (!current->vm86_info) {
56 printk("no vm86_info: BAD\n");
57 do_exit(SIGSEGV);
58 }
59 set_flags(regs->eflags, VEFLAGS, VIF_MASK | current->v86mask);
60 memcpy_tofs(¤t->vm86_info->regs,regs,sizeof(*regs));
61 put_fs_long(current->screen_bitmap,¤t->vm86_info->screen_bitmap);
62 tmp = current->tss.esp0;
63 current->tss.esp0 = current->saved_kernel_stack;
64 current->saved_kernel_stack = 0;
65 return (struct pt_regs *) tmp;
66 }
67
68 static void mark_screen_rdonly(struct task_struct * tsk)
69 {
70 unsigned long tmp;
71 unsigned long *pg_table;
72
73 if ((tmp = tsk->tss.cr3) != 0) {
74 tmp = *(unsigned long *) tmp;
75 if (tmp & PAGE_PRESENT) {
76 tmp &= PAGE_MASK;
77 pg_table = (0xA0000 >> PAGE_SHIFT) + (unsigned long *) tmp;
78 tmp = 32;
79 while (tmp--) {
80 if (PAGE_PRESENT & *pg_table)
81 *pg_table &= ~PAGE_RW;
82 pg_table++;
83 }
84 }
85 }
86 }
87
88 asmlinkage int sys_vm86(struct vm86_struct * v86)
89 {
90 struct vm86_struct info;
91 struct pt_regs * pt_regs = (struct pt_regs *) &v86;
92 int error;
93
94 if (current->saved_kernel_stack)
95 return -EPERM;
96
97 error = verify_area(VERIFY_WRITE,v86,sizeof(*v86));
98 if (error)
99 return error;
100 memcpy_fromfs(&info,v86,sizeof(info));
101
102
103
104 info.regs.__null_ds = 0;
105 info.regs.__null_es = 0;
106 info.regs.__null_fs = 0;
107 info.regs.__null_gs = 0;
108
109
110
111
112
113 VEFLAGS = info.regs.eflags;
114 info.regs.eflags &= SAFE_MASK;
115 info.regs.eflags |= pt_regs->eflags & ~SAFE_MASK;
116 info.regs.eflags |= VM_MASK;
117
118 switch (info.cpu_type) {
119 case CPU_286:
120 current->v86mask = 0;
121 break;
122 case CPU_386:
123 current->v86mask = NT_MASK | IOPL_MASK;
124 break;
125 case CPU_486:
126 current->v86mask = AC_MASK | NT_MASK | IOPL_MASK;
127 break;
128 default:
129 current->v86mask = ID_MASK | AC_MASK | NT_MASK | IOPL_MASK;
130 break;
131 }
132
133
134
135
136 pt_regs->eax = 0;
137 current->saved_kernel_stack = current->tss.esp0;
138 current->tss.esp0 = (unsigned long) pt_regs;
139 current->vm86_info = v86;
140
141 current->screen_bitmap = info.screen_bitmap;
142 if (info.flags & VM86_SCREEN_BITMAP)
143 mark_screen_rdonly(current);
144 __asm__ __volatile__("movl %0,%%esp\n\t"
145 "jmp ret_from_sys_call"
146 :
147 :"r" (&info.regs));
148 return 0;
149 }
150
151 static inline void return_to_32bit(struct vm86_regs * regs16, int retval)
152 {
153 struct pt_regs * regs32;
154
155 regs32 = save_v86_state(regs16);
156 regs32->eax = retval;
157 __asm__ __volatile__("movl %0,%%esp\n\t"
158 "jmp ret_from_sys_call"
159 : : "r" (regs32));
160 }
161
162 static inline void set_IF(struct vm86_regs * regs)
163 {
164 VEFLAGS |= VIF_MASK;
165 if (VEFLAGS & VIP_MASK)
166 return_to_32bit(regs, VM86_STI);
167 }
168
169 static inline void clear_IF(struct vm86_regs * regs)
170 {
171 VEFLAGS &= ~VIF_MASK;
172 }
173
174 static inline void clear_TF(struct vm86_regs * regs)
175 {
176 regs->eflags &= ~TF_MASK;
177 }
178
179 static inline void set_vflags_long(unsigned long eflags, struct vm86_regs * regs)
180 {
181 set_flags(VEFLAGS, eflags, current->v86mask);
182 set_flags(regs->eflags, eflags, SAFE_MASK);
183 if (eflags & IF_MASK)
184 set_IF(regs);
185 }
186
187 static inline void set_vflags_short(unsigned short flags, struct vm86_regs * regs)
188 {
189 set_flags(VFLAGS, flags, current->v86mask);
190 set_flags(regs->eflags, flags, SAFE_MASK);
191 if (flags & IF_MASK)
192 set_IF(regs);
193 }
194
195 static inline unsigned long get_vflags(struct vm86_regs * regs)
196 {
197 unsigned long flags = regs->eflags & RETURN_MASK;
198
199 if (VEFLAGS & VIF_MASK)
200 flags |= IF_MASK;
201 return flags | (VEFLAGS & current->v86mask);
202 }
203
204 static inline int is_revectored(int nr, struct revectored_struct * bitmap)
205 {
206 __asm__ __volatile__("btl %2,%%fs:%1\n\tsbbl %0,%0"
207 :"=r" (nr)
208 :"m" (*bitmap),"r" (nr));
209 return nr;
210 }
211
212
213
214
215
216
217 #define pushb(base, ptr, val) \
218 __asm__ __volatile__( \
219 "decw %w0\n\t" \
220 "movb %2,%%fs:0(%1,%0)" \
221 : "=r" (ptr) \
222 : "r" (base), "q" (val), "0" (ptr))
223
224 #define pushw(base, ptr, val) \
225 __asm__ __volatile__( \
226 "decw %w0\n\t" \
227 "movb %h2,%%fs:0(%1,%0)\n\t" \
228 "decw %w0\n\t" \
229 "movb %b2,%%fs:0(%1,%0)" \
230 : "=r" (ptr) \
231 : "r" (base), "q" (val), "0" (ptr))
232
233 #define pushl(base, ptr, val) \
234 __asm__ __volatile__( \
235 "decw %w0\n\t" \
236 "rorl $16,%2\n\t" \
237 "movb %h2,%%fs:0(%1,%0)\n\t" \
238 "decw %w0\n\t" \
239 "movb %b2,%%fs:0(%1,%0)\n\t" \
240 "decw %w0\n\t" \
241 "rorl $16,%2\n\t" \
242 "movb %h2,%%fs:0(%1,%0)\n\t" \
243 "decw %w0\n\t" \
244 "movb %b2,%%fs:0(%1,%0)" \
245 : "=r" (ptr) \
246 : "r" (base), "q" (val), "0" (ptr))
247
248 #define popb(base, ptr) \
249 ({ unsigned long __res; \
250 __asm__ __volatile__( \
251 "movb %%fs:0(%1,%0),%b2\n\t" \
252 "incw %w0" \
253 : "=r" (ptr), "=r" (base), "=q" (__res) \
254 : "0" (ptr), "1" (base), "2" (0)); \
255 __res; })
256
257 #define popw(base, ptr) \
258 ({ unsigned long __res; \
259 __asm__ __volatile__( \
260 "movb %%fs:0(%1,%0),%b2\n\t" \
261 "incw %w0\n\t" \
262 "movb %%fs:0(%1,%0),%h2\n\t" \
263 "incw %w0" \
264 : "=r" (ptr), "=r" (base), "=q" (__res) \
265 : "0" (ptr), "1" (base), "2" (0)); \
266 __res; })
267
268 #define popl(base, ptr) \
269 ({ unsigned long __res; \
270 __asm__ __volatile__( \
271 "movb %%fs:0(%1,%0),%b2\n\t" \
272 "incw %w0\n\t" \
273 "movb %%fs:0(%1,%0),%h2\n\t" \
274 "incw %w0\n\t" \
275 "rorl $16,%2\n\t" \
276 "movb %%fs:0(%1,%0),%b2\n\t" \
277 "incw %w0\n\t" \
278 "movb %%fs:0(%1,%0),%h2\n\t" \
279 "incw %w0\n\t" \
280 "rorl $16,%2" \
281 : "=r" (ptr), "=r" (base), "=q" (__res) \
282 : "0" (ptr), "1" (base)); \
283 __res; })
284
285 static void do_int(struct vm86_regs *regs, int i, unsigned char * ssp, unsigned long sp)
286 {
287 unsigned short seg = get_fs_word((void *) ((i<<2)+2));
288
289 if (seg == BIOSSEG || regs->cs == BIOSSEG ||
290 is_revectored(i, ¤t->vm86_info->int_revectored))
291 return_to_32bit(regs, VM86_INTx + (i << 8));
292 if (i==0x21 && is_revectored(AH(regs),¤t->vm86_info->int21_revectored))
293 return_to_32bit(regs, VM86_INTx + (i << 8));
294 pushw(ssp, sp, get_vflags(regs));
295 pushw(ssp, sp, regs->cs);
296 pushw(ssp, sp, IP(regs));
297 regs->cs = seg;
298 SP(regs) -= 6;
299 IP(regs) = get_fs_word((void *) (i<<2));
300 clear_TF(regs);
301 clear_IF(regs);
302 return;
303 }
304
305 void handle_vm86_debug(struct vm86_regs * regs, long error_code)
306 {
307 #if 0
308 do_int(regs, 1, (unsigned char *) (regs->ss << 4), SP(regs));
309 #else
310 if (current->flags & PF_PTRACED)
311 current->blocked &= ~(1 << (SIGTRAP-1));
312 send_sig(SIGTRAP, current, 1);
313 current->tss.trap_no = 1;
314 current->tss.error_code = error_code;
315 #endif
316 }
317
318 void handle_vm86_fault(struct vm86_regs * regs, long error_code)
319 {
320 unsigned char *csp, *ssp;
321 unsigned long ip, sp;
322
323 csp = (unsigned char *) (regs->cs << 4);
324 ssp = (unsigned char *) (regs->ss << 4);
325 sp = SP(regs);
326 ip = IP(regs);
327
328 switch (popb(csp, ip)) {
329
330
331 case 0x66:
332 switch (popb(csp, ip)) {
333
334
335 case 0x9c:
336 SP(regs) -= 4;
337 IP(regs) += 2;
338 pushl(ssp, sp, get_vflags(regs));
339 return;
340
341
342 case 0x9d:
343 SP(regs) += 4;
344 IP(regs) += 2;
345 set_vflags_long(popl(ssp, sp), regs);
346 return;
347 }
348
349
350 case 0x9c:
351 SP(regs) -= 2;
352 IP(regs)++;
353 pushw(ssp, sp, get_vflags(regs));
354 return;
355
356
357 case 0x9d:
358 SP(regs) += 2;
359 IP(regs)++;
360 set_vflags_short(popw(ssp, sp), regs);
361 return;
362
363
364 case 0xcc:
365 IP(regs)++;
366 do_int(regs, 3, ssp, sp);
367 return;
368
369
370 case 0xcd:
371 IP(regs) += 2;
372 do_int(regs, popb(csp, ip), ssp, sp);
373 return;
374
375
376 case 0xcf:
377 SP(regs) += 6;
378 IP(regs) = popw(ssp, sp);
379 regs->cs = popw(ssp, sp);
380 set_vflags_short(popw(ssp, sp), regs);
381 return;
382
383
384 case 0xfa:
385 IP(regs)++;
386 clear_IF(regs);
387 return;
388
389
390
391
392
393
394
395
396 case 0xfb:
397 IP(regs)++;
398 set_IF(regs);
399 return;
400
401 default:
402 return_to_32bit(regs, VM86_UNKNOWN);
403 }
404 }