This source file includes following definitions.
- save_v86_state
- mark_screen_rdonly
- sys_vm86
- return_to_32bit
- set_IF
- clear_IF
- clear_TF
- set_vflags_long
- set_vflags_short
- get_vflags
- do_int
- handle_vm86_fault
1
2
3
4
5
6 #include <linux/errno.h>
7 #include <linux/sched.h>
8 #include <linux/kernel.h>
9 #include <linux/signal.h>
10 #include <linux/string.h>
11 #include <linux/ptrace.h>
12
13 #include <asm/segment.h>
14 #include <asm/io.h>
15
16
17
18
19 #define IP(regs) (*(unsigned short *)&((regs)->eip))
20 #define SP(regs) (*(unsigned short *)&((regs)->esp))
21
22
23
24
25 #define VFLAGS(regs) (*(unsigned short *)&(current->v86flags))
26 #define VEFLAGS(regs) (current->v86flags)
27
28 #define set_flags(X,new,mask) \
29 ((X) = ((X) & ~(mask)) | ((new) & (mask)))
30
31 #define SAFE_MASK (0xDD5)
32
33 asmlinkage struct pt_regs * save_v86_state(struct vm86_regs * regs)
34 {
35 unsigned long tmp;
36
37 if (!current->vm86_info) {
38 printk("no vm86_info: BAD\n");
39 do_exit(SIGSEGV);
40 }
41 memcpy_tofs(¤t->vm86_info->regs,regs,sizeof(*regs));
42 put_fs_long(current->screen_bitmap,¤t->vm86_info->screen_bitmap);
43 tmp = current->tss.esp0;
44 current->tss.esp0 = current->saved_kernel_stack;
45 current->saved_kernel_stack = 0;
46 return (struct pt_regs *) tmp;
47 }
48
49 static void mark_screen_rdonly(struct task_struct * tsk)
50 {
51 unsigned long tmp;
52 unsigned long *pg_table;
53
54 if ((tmp = tsk->tss.cr3) != 0) {
55 tmp = *(unsigned long *) tmp;
56 if (tmp & PAGE_PRESENT) {
57 tmp &= PAGE_MASK;
58 pg_table = (0xA0000 >> PAGE_SHIFT) + (unsigned long *) tmp;
59 tmp = 32;
60 while (tmp--) {
61 if (PAGE_PRESENT & *pg_table)
62 *pg_table &= ~PAGE_RW;
63 pg_table++;
64 }
65 }
66 }
67 }
68
69 asmlinkage int sys_vm86(struct vm86_struct * v86)
70 {
71 struct vm86_struct info;
72 struct pt_regs * pt_regs = (struct pt_regs *) &v86;
73 int error;
74
75 if (current->saved_kernel_stack)
76 return -EPERM;
77
78 error = verify_area(VERIFY_WRITE,v86,sizeof(*v86));
79 if (error)
80 return error;
81 memcpy_fromfs(&info,v86,sizeof(info));
82
83
84
85 info.regs.__null_ds = 0;
86 info.regs.__null_es = 0;
87 info.regs.__null_fs = 0;
88 info.regs.__null_gs = 0;
89
90
91
92
93
94 current->v86flags = info.regs.eflags;
95 info.regs.eflags &= SAFE_MASK;
96 info.regs.eflags |= ~SAFE_MASK & pt_regs->eflags;
97 info.regs.eflags |= VM_MASK;
98
99 switch (info.cpu_type) {
100 case CPU_286:
101 current->v86mask = 0;
102 break;
103 case CPU_386:
104 current->v86mask = NT_MASK | IOPL_MASK;
105 break;
106 case CPU_486:
107 current->v86mask = AC_MASK | NT_MASK | IOPL_MASK;
108 break;
109 default:
110 current->v86mask = ID_MASK | AC_MASK | NT_MASK | IOPL_MASK;
111 break;
112 }
113
114
115
116
117 pt_regs->eax = 0;
118 current->saved_kernel_stack = current->tss.esp0;
119 current->tss.esp0 = (unsigned long) pt_regs;
120 current->vm86_info = v86;
121
122 current->screen_bitmap = info.screen_bitmap;
123 if (info.flags & VM86_SCREEN_BITMAP)
124 mark_screen_rdonly(current);
125 __asm__ __volatile__("movl %0,%%esp\n\t"
126 "jmp ret_from_sys_call"
127 :
128 :"r" (&info.regs));
129 return 0;
130 }
131
132 static inline void return_to_32bit(struct vm86_regs * regs16, int retval)
133 {
134 struct pt_regs * regs32;
135
136 regs32 = save_v86_state(regs16);
137 regs32->eax = retval;
138 __asm__ __volatile__("movl %0,%%esp\n\t"
139 "jmp ret_from_sys_call"
140 : : "r" (regs32));
141 }
142
143 static inline void set_IF(struct vm86_regs * regs)
144 {
145 current->v86flags |= VIF_MASK;
146 if (current->v86flags & VIP_MASK)
147 return_to_32bit(regs, VM86_STI);
148 }
149
150 static inline void clear_IF(struct vm86_regs * regs)
151 {
152 current->v86flags &= ~VIF_MASK;
153 }
154
155 static inline void clear_TF(struct vm86_regs * regs)
156 {
157 regs->eflags &= ~TF_MASK;
158 }
159
160 static inline void set_vflags_long(unsigned long eflags, struct vm86_regs * regs)
161 {
162 set_flags(VEFLAGS(regs), eflags, current->v86mask);
163 set_flags(regs->eflags, eflags, SAFE_MASK);
164 if (eflags & IF_MASK)
165 set_IF(regs);
166 }
167
168 static inline void set_vflags_short(unsigned short flags, struct vm86_regs * regs)
169 {
170 set_flags(VFLAGS(regs), flags, current->v86mask);
171 set_flags(regs->eflags, flags, SAFE_MASK);
172 if (flags & IF_MASK)
173 set_IF(regs);
174 }
175
176 static inline unsigned long get_vflags(struct vm86_regs * regs)
177 {
178 unsigned long flags = regs->eflags & SAFE_MASK;
179
180 if (current->v86flags & VIF_MASK)
181 flags |= IF_MASK;
182 return flags | (VEFLAGS(regs) & current->v86mask);
183 }
184
185
186
187
188
189
190 #define pushb(base, ptr, val) \
191 __asm__ __volatile__( \
192 "decw %w0\n\t" \
193 "movb %2,%%fs:0(%1,%0)" \
194 : "=r" (ptr) \
195 : "r" (base), "q" (val), "0" (ptr))
196
197 #define pushw(base, ptr, val) \
198 __asm__ __volatile__( \
199 "decw %w0\n\t" \
200 "movb %h2,%%fs:0(%1,%0)\n\t" \
201 "decw %w0\n\t" \
202 "movb %b2,%%fs:0(%1,%0)" \
203 : "=r" (ptr) \
204 : "r" (base), "q" (val), "0" (ptr))
205
206 #define pushl(base, ptr, val) \
207 __asm__ __volatile__( \
208 "decw %w0\n\t" \
209 "rorl $16,%2\n\t" \
210 "movb %h2,%%fs:0(%1,%0)\n\t" \
211 "decw %w0\n\t" \
212 "movb %b2,%%fs:0(%1,%0)\n\t" \
213 "decw %w0\n\t" \
214 "rorl $16,%2\n\t" \
215 "movb %h2,%%fs:0(%1,%0)\n\t" \
216 "decw %w0\n\t" \
217 "movb %b2,%%fs:0(%1,%0)" \
218 : "=r" (ptr) \
219 : "r" (base), "q" (val), "0" (ptr))
220
221 #define popb(base, ptr) \
222 ({ unsigned long __res; \
223 __asm__ __volatile__( \
224 "movb %%fs:0(%1,%0),%b2\n\t" \
225 "incw %w0" \
226 : "=r" (ptr), "=r" (base), "=r" (__res) \
227 : "0" (ptr), "1" (base), "2" (0)); \
228 __res; })
229
230 #define popw(base, ptr) \
231 ({ unsigned long __res; \
232 __asm__ __volatile__( \
233 "movb %%fs:0(%1,%0),%b2\n\t" \
234 "incw %w0\n\t" \
235 "movb %%fs:0(%1,%0),%h2\n\t" \
236 "incw %w0" \
237 : "=r" (ptr), "=r" (base), "=r" (__res) \
238 : "0" (ptr), "1" (base), "2" (0)); \
239 __res; })
240
241 #define popl(base, ptr) \
242 ({ unsigned long __res; \
243 __asm__ __volatile__( \
244 "movb %%fs:0(%1,%0),%b2\n\t" \
245 "incw %w0\n\t" \
246 "movb %%fs:0(%1,%0),%h2\n\t" \
247 "incw %w0\n\t" \
248 "rorl $16,%2\n\t" \
249 "movb %%fs:0(%1,%0),%b2\n\t" \
250 "incw %w0\n\t" \
251 "movb %%fs:0(%1,%0),%h2\n\t" \
252 "incw %w0\n\t" \
253 "rorl $16,%2" \
254 : "=r" (ptr), "=r" (base), "=r" (__res) \
255 : "0" (ptr), "1" (base)); \
256 __res; })
257
258 static void do_int(struct vm86_regs *regs, int i, unsigned char * ssp, unsigned long sp)
259 {
260 unsigned short seg = get_fs_word((void *) ((i<<2)+2));
261
262 if (seg == BIOSSEG || regs->cs == BIOSSEG ||
263 is_revectored(i, ¤t->vm86_info->int_revectored))
264 return_to_32bit(regs, VM86_INTx + (i << 8));
265 if (i==0x21 && is_revectored((regs->eax >> 4) & 0xff,¤t->vm86_info->int21_revectored)) {
266 return_to_32bit(regs, VM86_INTx + (i << 8));
267 }
268 pushw(ssp, sp, get_vflags(regs));
269 pushw(ssp, sp, regs->cs);
270 pushw(ssp, sp, IP(regs));
271 regs->cs = seg;
272 SP(regs) -= 6;
273 IP(regs) = get_fs_word((void *) (i<<2));
274 clear_TF(regs);
275 clear_IF(regs);
276 return;
277 }
278
279
280 void handle_vm86_fault(struct vm86_regs * regs, long error_code)
281 {
282 unsigned char *csp, *ssp;
283 unsigned long ip, sp;
284
285 csp = (unsigned char *) (regs->cs << 4);
286 ssp = (unsigned char *) (regs->ss << 4);
287 sp = SP(regs);
288 ip = IP(regs);
289
290 switch (popb(csp, ip)) {
291
292
293 case 0x66:
294 switch (popb(csp, ip)) {
295
296
297 case 0x9c:
298 SP(regs) -= 4;
299 IP(regs) += 2;
300 pushl(ssp, sp, get_vflags(regs));
301 return;
302
303
304 case 0x9d:
305 SP(regs) += 4;
306 IP(regs) += 2;
307 set_vflags_long(popl(ssp, sp), regs);
308 return;
309 }
310
311
312 case 0x9c:
313 SP(regs) -= 2;
314 IP(regs)++;
315 pushw(ssp, sp, get_vflags(regs));
316 return;
317
318
319 case 0x9d:
320 SP(regs) += 2;
321 IP(regs)++;
322 set_vflags_short(popw(ssp, sp), regs);
323 return;
324
325
326 case 0xcc:
327 IP(regs)++;
328 do_int(regs, 3, ssp, sp);
329 return;
330
331
332 case 0xcd:
333 IP(regs) += 2;
334 do_int(regs, popb(csp, ip), ssp, sp);
335 return;
336
337
338 case 0xcf:
339 SP(regs) += 6;
340 IP(regs) = popw(ssp, sp);
341 regs->cs = popw(ssp, sp);
342 set_vflags_short(popw(ssp, sp), regs);
343 return;
344
345
346 case 0xfa:
347 IP(regs)++;
348 clear_IF(regs);
349 return;
350
351
352 case 0xfb:
353 IP(regs)++;
354 set_IF(regs);
355 return;
356
357 default:
358 return_to_32bit(regs, VM86_UNKNOWN);
359 }
360 }