This source file includes following definitions.
- disable_hlt
- enable_hlt
- hard_idle
- sys_idle
- sys_idle
- cpu_idle
- kb_wait
- hard_reset_now
- show_regs
- exit_thread
- flush_thread
- release_thread
- copy_thread
- dump_fpu
- dump_thread
- sys_fork
- sys_clone
- sys_execve
1
2
3
4
5
6
7
8
9
10
11 #define __KERNEL_SYSCALLS__
12 #include <stdarg.h>
13
14 #include <linux/errno.h>
15 #include <linux/sched.h>
16 #include <linux/kernel.h>
17 #include <linux/mm.h>
18 #include <linux/stddef.h>
19 #include <linux/unistd.h>
20 #include <linux/ptrace.h>
21 #include <linux/malloc.h>
22 #include <linux/ldt.h>
23 #include <linux/user.h>
24 #include <linux/a.out.h>
25 #include <linux/interrupt.h>
26 #include <linux/config.h>
27 #include <linux/unistd.h>
28
29 #include <asm/segment.h>
30 #include <asm/pgtable.h>
31 #include <asm/system.h>
32 #include <asm/io.h>
33 #include <linux/smp.h>
34
35
36 asmlinkage void ret_from_sys_call(void) __asm__("ret_from_sys_call");
37
38 #ifdef CONFIG_APM
39 extern int apm_do_idle(void);
40 extern void apm_do_busy(void);
41 #endif
42
43 static int hlt_counter=0;
44
45 #define HARD_IDLE_TIMEOUT (HZ / 3)
46
47 void disable_hlt(void)
48 {
49 hlt_counter++;
50 }
51
52 void enable_hlt(void)
53 {
54 hlt_counter--;
55 }
56
57 #ifndef __SMP__
58
59 static void hard_idle(void)
60 {
61 while (!need_resched) {
62 if (hlt_works_ok && !hlt_counter) {
63 #ifdef CONFIG_APM
64
65
66
67
68
69 start_bh_atomic();
70 if (!apm_do_idle() && !need_resched)
71 __asm__("hlt");
72 end_bh_atomic();
73 #else
74 __asm__("hlt");
75 #endif
76 }
77 if (need_resched)
78 break;
79 schedule();
80 }
81 #ifdef CONFIG_APM
82 apm_do_busy();
83 #endif
84 }
85
86
87
88
89
90 asmlinkage int sys_idle(void)
91 {
92 unsigned long start_idle = 0;
93
94 if (current->pid != 0)
95 return -EPERM;
96
97 current->counter = -100;
98 for (;;)
99 {
100
101
102
103
104
105 if (!start_idle)
106 start_idle = jiffies;
107 if (jiffies - start_idle > HARD_IDLE_TIMEOUT)
108 {
109 hard_idle();
110 }
111 else
112 {
113 if (hlt_works_ok && !hlt_counter && !need_resched)
114 __asm__("hlt");
115 }
116 if (need_resched)
117 start_idle = 0;
118 schedule();
119 }
120 }
121
122 #else
123
124
125
126
127
128
129 asmlinkage int sys_idle(void)
130 {
131 if(current->pid != 0)
132 return -EPERM;
133 current->counter= -100;
134 schedule();
135 return 0;
136 }
137
138
139
140
141
142 int cpu_idle(void *unused)
143 {
144 while(1)
145 {
146 if(cpu_data[smp_processor_id()].hlt_works_ok && !hlt_counter && !need_resched)
147 __asm("hlt");
148 idle();
149 }
150 }
151
152 #endif
153
154
155
156
157
158
159 static long no_idt[2] = {0, 0};
160
161 static inline void kb_wait(void)
162 {
163 int i;
164
165 for (i=0; i<0x10000; i++)
166 if ((inb_p(0x64) & 0x02) == 0)
167 break;
168 }
169
170 void hard_reset_now(void)
171 {
172 int i, j;
173
174 sti();
175
176 pg0[0] = 7;
177 *((unsigned short *)0x472) = 0x1234;
178 for (;;) {
179 for (i=0; i<100; i++) {
180 kb_wait();
181 for(j = 0; j < 100000 ; j++)
182 ;
183 outb(0xfe,0x64);
184 }
185 __asm__ __volatile__("\tlidt %0": "=m" (no_idt));
186 }
187 }
188
189 void show_regs(struct pt_regs * regs)
190 {
191 printk("\n");
192 printk("EIP: %04x:[<%08lx>]",0xffff & regs->cs,regs->eip);
193 if (regs->cs & 3)
194 printk(" ESP: %04x:%08lx",0xffff & regs->ss,regs->esp);
195 printk(" EFLAGS: %08lx\n",regs->eflags);
196 printk("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
197 regs->eax,regs->ebx,regs->ecx,regs->edx);
198 printk("ESI: %08lx EDI: %08lx EBP: %08lx",
199 regs->esi, regs->edi, regs->ebp);
200 printk(" DS: %04x ES: %04x FS: %04x GS: %04x\n",
201 0xffff & regs->ds,0xffff & regs->es,
202 0xffff & regs->fs,0xffff & regs->gs);
203 }
204
205
206
207
208
209 void exit_thread(void)
210 {
211
212 if (last_task_used_math == current)
213 last_task_used_math = NULL;
214
215 __asm__ __volatile__("mov %w0,%%fs ; mov %w0,%%gs ; lldt %w0"
216 :
217 : "r" (0));
218 current->tss.ldt = 0;
219 if (current->ldt) {
220 void * ldt = current->ldt;
221 current->ldt = NULL;
222 vfree(ldt);
223 }
224 }
225
226 void flush_thread(void)
227 {
228 int i;
229
230 if (current->ldt) {
231 free_page((unsigned long) current->ldt);
232 current->ldt = NULL;
233 for (i=1 ; i<NR_TASKS ; i++) {
234 if (task[i] == current) {
235 set_ldt_desc(gdt+(i<<1)+
236 FIRST_LDT_ENTRY,&default_ldt, 1);
237 load_ldt(i);
238 }
239 }
240 }
241
242 for (i=0 ; i<8 ; i++)
243 current->debugreg[i] = 0;
244 }
245
246 void release_thread(struct task_struct *dead_task)
247 {
248 }
249
250 void copy_thread(int nr, unsigned long clone_flags, unsigned long esp,
251 struct task_struct * p, struct pt_regs * regs)
252 {
253 int i;
254 struct pt_regs * childregs;
255
256 p->tss.es = KERNEL_DS;
257 p->tss.cs = KERNEL_CS;
258 p->tss.ss = KERNEL_DS;
259 p->tss.ds = KERNEL_DS;
260 p->tss.fs = USER_DS;
261 p->tss.gs = KERNEL_DS;
262 p->tss.ss0 = KERNEL_DS;
263 p->tss.esp0 = p->kernel_stack_page + PAGE_SIZE;
264 p->tss.tr = _TSS(nr);
265 childregs = ((struct pt_regs *) (p->kernel_stack_page + PAGE_SIZE)) - 1;
266 p->tss.esp = (unsigned long) childregs;
267 p->tss.eip = (unsigned long) ret_from_sys_call;
268 *childregs = *regs;
269 childregs->eax = 0;
270 childregs->esp = esp;
271 p->tss.back_link = 0;
272 p->tss.eflags = regs->eflags & 0xffffcfff;
273 p->tss.ldt = _LDT(nr);
274 if (p->ldt) {
275 p->ldt = (struct desc_struct*) vmalloc(LDT_ENTRIES*LDT_ENTRY_SIZE);
276 if (p->ldt != NULL)
277 memcpy(p->ldt, current->ldt, LDT_ENTRIES*LDT_ENTRY_SIZE);
278 }
279 set_tss_desc(gdt+(nr<<1)+FIRST_TSS_ENTRY,&(p->tss));
280 if (p->ldt)
281 set_ldt_desc(gdt+(nr<<1)+FIRST_LDT_ENTRY,p->ldt, 512);
282 else
283 set_ldt_desc(gdt+(nr<<1)+FIRST_LDT_ENTRY,&default_ldt, 1);
284 p->tss.bitmap = offsetof(struct thread_struct,io_bitmap);
285 for (i = 0; i < IO_BITMAP_SIZE+1 ; i++)
286 p->tss.io_bitmap[i] = ~0;
287 if (last_task_used_math == current)
288 __asm__("clts ; fnsave %0 ; frstor %0":"=m" (p->tss.i387));
289 }
290
291
292
293
294 int dump_fpu (struct user_i387_struct* fpu)
295 {
296 int fpvalid;
297
298
299
300 if (hard_math) {
301 if ((fpvalid = current->used_math) != 0) {
302 if (last_task_used_math == current)
303 __asm__("clts ; fnsave %0": :"m" (*fpu));
304 else
305 memcpy(fpu,¤t->tss.i387.hard,sizeof(*fpu));
306 }
307 } else {
308
309
310 fpvalid = 0;
311 }
312
313 return fpvalid;
314 }
315
316
317
318
319 void dump_thread(struct pt_regs * regs, struct user * dump)
320 {
321 int i;
322
323
324 dump->magic = CMAGIC;
325 dump->start_code = 0;
326 dump->start_stack = regs->esp & ~(PAGE_SIZE - 1);
327 dump->u_tsize = ((unsigned long) current->mm->end_code) >> 12;
328 dump->u_dsize = ((unsigned long) (current->mm->brk + (PAGE_SIZE-1))) >> 12;
329 dump->u_dsize -= dump->u_tsize;
330 dump->u_ssize = 0;
331 for (i = 0; i < 8; i++)
332 dump->u_debugreg[i] = current->debugreg[i];
333
334 if (dump->start_stack < TASK_SIZE) {
335 dump->u_ssize = ((unsigned long) (TASK_SIZE - dump->start_stack)) >> 12;
336 }
337
338 dump->regs = *regs;
339
340 dump->u_fpvalid = dump_fpu (&dump->i387);
341 }
342
343 asmlinkage int sys_fork(struct pt_regs regs)
344 {
345 return do_fork(SIGCHLD, regs.esp, ®s);
346 }
347
348 asmlinkage int sys_clone(struct pt_regs regs)
349 {
350 unsigned long clone_flags;
351 unsigned long newsp;
352
353 clone_flags = regs.ebx;
354 newsp = regs.ecx;
355 if (!newsp)
356 newsp = regs.esp;
357 return do_fork(clone_flags, newsp, ®s);
358 }
359
360
361
362
363 asmlinkage int sys_execve(struct pt_regs regs)
364 {
365 int error;
366 char * filename;
367
368 error = getname((char *) regs.ebx, &filename);
369 if (error)
370 return error;
371 error = do_execve(filename, (char **) regs.ecx, (char **) regs.edx, ®s);
372 putname(filename);
373 return error;
374 }