This source file includes following definitions.
- disable_hlt
- enable_hlt
- hard_idle
- sys_idle
- sys_idle
- cpu_idle
- kb_wait
- hard_reset_now
- show_regs
- exit_thread
- flush_thread
- release_thread
- copy_thread
- dump_fpu
- dump_thread
- sys_fork
- sys_clone
- sys_execve
1
2
3
4
5
6
7
8
9
10
11 #define __KERNEL_SYSCALLS__
12 #include <stdarg.h>
13
14 #include <linux/errno.h>
15 #include <linux/sched.h>
16 #include <linux/kernel.h>
17 #include <linux/mm.h>
18 #include <linux/stddef.h>
19 #include <linux/unistd.h>
20 #include <linux/ptrace.h>
21 #include <linux/malloc.h>
22 #include <linux/ldt.h>
23 #include <linux/user.h>
24 #include <linux/a.out.h>
25 #include <linux/interrupt.h>
26 #include <linux/config.h>
27 #include <linux/unistd.h>
28
29 #include <asm/segment.h>
30 #include <asm/pgtable.h>
31 #include <asm/system.h>
32 #include <asm/io.h>
33 #include <linux/smp.h>
34
35
36 asmlinkage void ret_from_sys_call(void) __asm__("ret_from_sys_call");
37
38 #ifdef CONFIG_APM
39 extern int apm_do_idle(void);
40 extern void apm_do_busy(void);
41 #endif
42
43 static int hlt_counter=0;
44
45 #define HARD_IDLE_TIMEOUT (HZ / 3)
46
47 void disable_hlt(void)
48 {
49 hlt_counter++;
50 }
51
52 void enable_hlt(void)
53 {
54 hlt_counter--;
55 }
56
57 #ifndef __SMP__
58
59 static void hard_idle(void)
60 {
61 while (!need_resched) {
62 if (hlt_works_ok && !hlt_counter) {
63 #ifdef CONFIG_APM
64
65
66
67
68
69 start_bh_atomic();
70 if (!apm_do_idle() && !need_resched)
71 __asm__("hlt");
72 end_bh_atomic();
73 #else
74 __asm__("hlt");
75 #endif
76 }
77 if (need_resched)
78 break;
79 schedule();
80 }
81 #ifdef CONFIG_APM
82 apm_do_busy();
83 #endif
84 }
85
86
87
88
89
90 asmlinkage int sys_idle(void)
91 {
92 unsigned long start_idle = 0;
93
94 if (current->pid != 0)
95 return -EPERM;
96
97 current->counter = -100;
98 for (;;)
99 {
100
101
102
103
104
105 if (!start_idle)
106 start_idle = jiffies;
107 if (jiffies - start_idle > HARD_IDLE_TIMEOUT)
108 {
109 hard_idle();
110 }
111 else
112 {
113 if (hlt_works_ok && !hlt_counter && !need_resched)
114 __asm__("hlt");
115 }
116 if (need_resched)
117 start_idle = 0;
118 schedule();
119 }
120 }
121
122 #else
123
124
125
126
127
128
129 asmlinkage int sys_idle(void)
130 {
131 if(current->pid != 0)
132 return -EPERM;
133 #ifdef __SMP_PROF__
134 smp_spins_sys_idle[smp_processor_id()]+=
135 smp_spins_syscall_cur[smp_processor_id()];
136 #endif
137 current->counter= -100;
138 schedule();
139 return 0;
140 }
141
142
143
144
145
146 int cpu_idle(void *unused)
147 {
148 while(1)
149 {
150 if(cpu_data[smp_processor_id()].hlt_works_ok && !hlt_counter && !need_resched)
151 __asm("hlt");
152 idle();
153 }
154 }
155
156 #endif
157
158
159
160
161
162
163 static long no_idt[2] = {0, 0};
164
165 static inline void kb_wait(void)
166 {
167 int i;
168
169 for (i=0; i<0x10000; i++)
170 if ((inb_p(0x64) & 0x02) == 0)
171 break;
172 }
173
174 void hard_reset_now(void)
175 {
176 int i, j;
177
178 sti();
179
180 pg0[0] = 7;
181 *((unsigned short *)0x472) = 0x1234;
182 for (;;) {
183 for (i=0; i<100; i++) {
184 kb_wait();
185 for(j = 0; j < 100000 ; j++)
186 ;
187 outb(0xfe,0x64);
188 }
189 __asm__ __volatile__("\tlidt %0": "=m" (no_idt));
190 }
191 }
192
193 void show_regs(struct pt_regs * regs)
194 {
195 printk("\n");
196 printk("EIP: %04x:[<%08lx>]",0xffff & regs->cs,regs->eip);
197 if (regs->cs & 3)
198 printk(" ESP: %04x:%08lx",0xffff & regs->ss,regs->esp);
199 printk(" EFLAGS: %08lx\n",regs->eflags);
200 printk("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
201 regs->eax,regs->ebx,regs->ecx,regs->edx);
202 printk("ESI: %08lx EDI: %08lx EBP: %08lx",
203 regs->esi, regs->edi, regs->ebp);
204 printk(" DS: %04x ES: %04x FS: %04x GS: %04x\n",
205 0xffff & regs->ds,0xffff & regs->es,
206 0xffff & regs->fs,0xffff & regs->gs);
207 }
208
209
210
211
212
213 void exit_thread(void)
214 {
215
216 if (last_task_used_math == current)
217 last_task_used_math = NULL;
218
219 __asm__ __volatile__("mov %w0,%%fs ; mov %w0,%%gs ; lldt %w0"
220 :
221 : "r" (0));
222 current->tss.ldt = 0;
223 if (current->ldt) {
224 void * ldt = current->ldt;
225 current->ldt = NULL;
226 vfree(ldt);
227 }
228 }
229
230 void flush_thread(void)
231 {
232 int i;
233
234 if (current->ldt) {
235 free_page((unsigned long) current->ldt);
236 current->ldt = NULL;
237 for (i=1 ; i<NR_TASKS ; i++) {
238 if (task[i] == current) {
239 set_ldt_desc(gdt+(i<<1)+
240 FIRST_LDT_ENTRY,&default_ldt, 1);
241 load_ldt(i);
242 }
243 }
244 }
245
246 for (i=0 ; i<8 ; i++)
247 current->debugreg[i] = 0;
248 }
249
250 void release_thread(struct task_struct *dead_task)
251 {
252 }
253
254 void copy_thread(int nr, unsigned long clone_flags, unsigned long esp,
255 struct task_struct * p, struct pt_regs * regs)
256 {
257 int i;
258 struct pt_regs * childregs;
259
260 p->tss.es = KERNEL_DS;
261 p->tss.cs = KERNEL_CS;
262 p->tss.ss = KERNEL_DS;
263 p->tss.ds = KERNEL_DS;
264 p->tss.fs = USER_DS;
265 p->tss.gs = KERNEL_DS;
266 p->tss.ss0 = KERNEL_DS;
267 p->tss.esp0 = p->kernel_stack_page + PAGE_SIZE;
268 p->tss.tr = _TSS(nr);
269 childregs = ((struct pt_regs *) (p->kernel_stack_page + PAGE_SIZE)) - 1;
270 p->tss.esp = (unsigned long) childregs;
271 p->tss.eip = (unsigned long) ret_from_sys_call;
272 *childregs = *regs;
273 childregs->eax = 0;
274 childregs->esp = esp;
275 p->tss.back_link = 0;
276 p->tss.eflags = regs->eflags & 0xffffcfff;
277 p->tss.ldt = _LDT(nr);
278 if (p->ldt) {
279 p->ldt = (struct desc_struct*) vmalloc(LDT_ENTRIES*LDT_ENTRY_SIZE);
280 if (p->ldt != NULL)
281 memcpy(p->ldt, current->ldt, LDT_ENTRIES*LDT_ENTRY_SIZE);
282 }
283 set_tss_desc(gdt+(nr<<1)+FIRST_TSS_ENTRY,&(p->tss));
284 if (p->ldt)
285 set_ldt_desc(gdt+(nr<<1)+FIRST_LDT_ENTRY,p->ldt, 512);
286 else
287 set_ldt_desc(gdt+(nr<<1)+FIRST_LDT_ENTRY,&default_ldt, 1);
288 p->tss.bitmap = offsetof(struct thread_struct,io_bitmap);
289 for (i = 0; i < IO_BITMAP_SIZE+1 ; i++)
290 p->tss.io_bitmap[i] = ~0;
291 if (last_task_used_math == current)
292 __asm__("clts ; fnsave %0 ; frstor %0":"=m" (p->tss.i387));
293 }
294
295
296
297
298 int dump_fpu (struct user_i387_struct* fpu)
299 {
300 int fpvalid;
301
302
303
304 if (hard_math) {
305 if ((fpvalid = current->used_math) != 0) {
306 if (last_task_used_math == current)
307 __asm__("clts ; fnsave %0": :"m" (*fpu));
308 else
309 memcpy(fpu,¤t->tss.i387.hard,sizeof(*fpu));
310 }
311 } else {
312
313
314 fpvalid = 0;
315 }
316
317 return fpvalid;
318 }
319
320
321
322
323 void dump_thread(struct pt_regs * regs, struct user * dump)
324 {
325 int i;
326
327
328 dump->magic = CMAGIC;
329 dump->start_code = 0;
330 dump->start_stack = regs->esp & ~(PAGE_SIZE - 1);
331 dump->u_tsize = ((unsigned long) current->mm->end_code) >> PAGE_SHIFT;
332 dump->u_dsize = ((unsigned long) (current->mm->brk + (PAGE_SIZE-1))) >> PAGE_SHIFT;
333 dump->u_dsize -= dump->u_tsize;
334 dump->u_ssize = 0;
335 for (i = 0; i < 8; i++)
336 dump->u_debugreg[i] = current->debugreg[i];
337
338 if (dump->start_stack < TASK_SIZE)
339 dump->u_ssize = ((unsigned long) (TASK_SIZE - dump->start_stack)) >> PAGE_SHIFT;
340
341 dump->regs = *regs;
342
343 dump->u_fpvalid = dump_fpu (&dump->i387);
344 }
345
346 asmlinkage int sys_fork(struct pt_regs regs)
347 {
348 return do_fork(SIGCHLD, regs.esp, ®s);
349 }
350
351 asmlinkage int sys_clone(struct pt_regs regs)
352 {
353 unsigned long clone_flags;
354 unsigned long newsp;
355
356 clone_flags = regs.ebx;
357 newsp = regs.ecx;
358 if (!newsp)
359 newsp = regs.esp;
360 return do_fork(clone_flags, newsp, ®s);
361 }
362
363
364
365
366 asmlinkage int sys_execve(struct pt_regs regs)
367 {
368 int error;
369 char * filename;
370
371 error = getname((char *) regs.ebx, &filename);
372 if (error)
373 return error;
374 error = do_execve(filename, (char **) regs.ecx, (char **) regs.edx, ®s);
375 putname(filename);
376 return error;
377 }