This source file includes following definitions.
- disable_hlt
- enable_hlt
- hard_idle
- sys_idle
- kb_wait
- hard_reset_now
- show_regs
- exit_thread
- flush_thread
- release_thread
- copy_thread
- dump_fpu
- dump_thread
- sys_fork
- sys_clone
- sys_execve
1
2
3
4
5
6
7
8
9
10
11 #include <linux/errno.h>
12 #include <linux/sched.h>
13 #include <linux/kernel.h>
14 #include <linux/mm.h>
15 #include <linux/stddef.h>
16 #include <linux/unistd.h>
17 #include <linux/ptrace.h>
18 #include <linux/malloc.h>
19 #include <linux/ldt.h>
20 #include <linux/user.h>
21 #include <linux/a.out.h>
22 #include <linux/interrupt.h>
23 #include <linux/config.h>
24
25 #include <asm/segment.h>
26 #include <asm/pgtable.h>
27 #include <asm/system.h>
28 #include <asm/io.h>
29 #include <linux/smp.h>
30
31
32 asmlinkage void ret_from_sys_call(void) __asm__("ret_from_sys_call");
33
34 #ifdef CONFIG_APM
35 extern int apm_do_idle(void);
36 extern void apm_do_busy(void);
37 #endif
38
39 static int hlt_counter=0;
40
41 #define HARD_IDLE_TIMEOUT (HZ / 3)
42
43 void disable_hlt(void)
44 {
45 hlt_counter++;
46 }
47
48 void enable_hlt(void)
49 {
50 hlt_counter--;
51 }
52
53 static void hard_idle(void)
54 {
55 while (!need_resched) {
56 if (hlt_works_ok && !hlt_counter) {
57 #ifdef CONFIG_APM
58
59
60
61
62
63 start_bh_atomic();
64 if (!apm_do_idle() && !need_resched)
65 __asm__("hlt");
66 end_bh_atomic();
67 #else
68 __asm__("hlt");
69 #endif
70 }
71 if (need_resched) break;
72 schedule();
73 }
74 #ifdef CONFIG_APM
75 apm_do_busy();
76 #endif
77 }
78
79
80
81
82 asmlinkage int sys_idle(void)
83 {
84 #ifndef __SMP__
85 unsigned long start_idle = 0;
86 #endif
87
88 if (current->pid != 0)
89 {
90
91 return -EPERM;
92 }
93 #ifdef __SMP__
94
95
96
97 if(smp_processor_id()!=active_kernel_processor)
98 panic("CPU is %d, kernel CPU is %d in sys_idle!\n",
99 smp_processor_id(), active_kernel_processor);
100 if(syscall_count!=1)
101 printk("sys_idle: syscall count is not 1 (%ld)\n", syscall_count);
102 if(kernel_counter!=1)
103 {
104 printk("CPU %d, sys_idle, kernel_counter is %ld\n", smp_processor_id(), kernel_counter);
105 if(!kernel_counter)
106 panic("kernel locking botch");
107 }
108
109
110
111 current->counter = -100;
112 schedule();
113 return 0;
114 #endif
115
116 current->counter = -100;
117 for (;;) {
118 #ifdef __SMP__
119 if (cpu_data[smp_processor_id()].hlt_works_ok && !hlt_counter && !need_resched)
120 __asm__("hlt");
121 #else
122 if (!start_idle) start_idle = jiffies;
123 if (jiffies - start_idle > HARD_IDLE_TIMEOUT) {
124 hard_idle();
125 } else {
126 if (hlt_works_ok && !hlt_counter && !need_resched)
127 __asm__("hlt");
128 }
129 if (need_resched) start_idle = 0;
130 #endif
131 schedule();
132 }
133 }
134
135
136
137
138
139
140 static long no_idt[2] = {0, 0};
141
142 static inline void kb_wait(void)
143 {
144 int i;
145
146 for (i=0; i<0x10000; i++)
147 if ((inb_p(0x64) & 0x02) == 0)
148 break;
149 }
150
151 void hard_reset_now(void)
152 {
153 int i, j;
154
155 sti();
156
157 pg0[0] = 7;
158 *((unsigned short *)0x472) = 0x1234;
159 for (;;) {
160 for (i=0; i<100; i++) {
161 kb_wait();
162 for(j = 0; j < 100000 ; j++)
163 ;
164 outb(0xfe,0x64);
165 }
166 __asm__ __volatile__("\tlidt %0": "=m" (no_idt));
167 }
168 }
169
170 void show_regs(struct pt_regs * regs)
171 {
172 printk("\n");
173 printk("EIP: %04x:[<%08lx>]",0xffff & regs->cs,regs->eip);
174 if (regs->cs & 3)
175 printk(" ESP: %04x:%08lx",0xffff & regs->ss,regs->esp);
176 printk(" EFLAGS: %08lx\n",regs->eflags);
177 printk("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
178 regs->eax,regs->ebx,regs->ecx,regs->edx);
179 printk("ESI: %08lx EDI: %08lx EBP: %08lx",
180 regs->esi, regs->edi, regs->ebp);
181 printk(" DS: %04x ES: %04x FS: %04x GS: %04x\n",
182 0xffff & regs->ds,0xffff & regs->es,
183 0xffff & regs->fs,0xffff & regs->gs);
184 }
185
186
187
188
189 void exit_thread(void)
190 {
191
192 if (last_task_used_math == current)
193 last_task_used_math = NULL;
194
195 __asm__ __volatile__("mov %w0,%%fs ; mov %w0,%%gs ; lldt %w0"
196 :
197 : "r" (0));
198 current->tss.ldt = 0;
199 if (current->ldt) {
200 void * ldt = current->ldt;
201 current->ldt = NULL;
202 vfree(ldt);
203 }
204 }
205
206 void flush_thread(void)
207 {
208 int i;
209
210 if (current->ldt) {
211 free_page((unsigned long) current->ldt);
212 current->ldt = NULL;
213 for (i=1 ; i<NR_TASKS ; i++) {
214 if (task[i] == current) {
215 set_ldt_desc(gdt+(i<<1)+
216 FIRST_LDT_ENTRY,&default_ldt, 1);
217 load_ldt(i);
218 }
219 }
220 }
221
222 for (i=0 ; i<8 ; i++)
223 current->debugreg[i] = 0;
224 }
225
226 void release_thread(struct task_struct *dead_task)
227 {
228 }
229
230 void copy_thread(int nr, unsigned long clone_flags, unsigned long esp,
231 struct task_struct * p, struct pt_regs * regs)
232 {
233 int i;
234 struct pt_regs * childregs;
235
236 p->tss.es = KERNEL_DS;
237 p->tss.cs = KERNEL_CS;
238 p->tss.ss = KERNEL_DS;
239 p->tss.ds = KERNEL_DS;
240 p->tss.fs = USER_DS;
241 p->tss.gs = KERNEL_DS;
242 p->tss.ss0 = KERNEL_DS;
243 p->tss.esp0 = p->kernel_stack_page + PAGE_SIZE;
244 p->tss.tr = _TSS(nr);
245 childregs = ((struct pt_regs *) (p->kernel_stack_page + PAGE_SIZE)) - 1;
246 p->tss.esp = (unsigned long) childregs;
247 p->tss.eip = (unsigned long) ret_from_sys_call;
248 *childregs = *regs;
249 childregs->eax = 0;
250 childregs->esp = esp;
251 p->tss.back_link = 0;
252 p->tss.eflags = regs->eflags & 0xffffcfff;
253 p->tss.ldt = _LDT(nr);
254 if (p->ldt) {
255 p->ldt = (struct desc_struct*) vmalloc(LDT_ENTRIES*LDT_ENTRY_SIZE);
256 if (p->ldt != NULL)
257 memcpy(p->ldt, current->ldt, LDT_ENTRIES*LDT_ENTRY_SIZE);
258 }
259 set_tss_desc(gdt+(nr<<1)+FIRST_TSS_ENTRY,&(p->tss));
260 if (p->ldt)
261 set_ldt_desc(gdt+(nr<<1)+FIRST_LDT_ENTRY,p->ldt, 512);
262 else
263 set_ldt_desc(gdt+(nr<<1)+FIRST_LDT_ENTRY,&default_ldt, 1);
264 p->tss.bitmap = offsetof(struct thread_struct,io_bitmap);
265 for (i = 0; i < IO_BITMAP_SIZE+1 ; i++)
266 p->tss.io_bitmap[i] = ~0;
267 if (last_task_used_math == current)
268 __asm__("clts ; fnsave %0 ; frstor %0":"=m" (p->tss.i387));
269 }
270
271
272
273
274 int dump_fpu (struct user_i387_struct* fpu)
275 {
276 int fpvalid;
277
278
279
280 if (hard_math) {
281 if ((fpvalid = current->used_math) != 0) {
282 if (last_task_used_math == current)
283 __asm__("clts ; fnsave %0": :"m" (*fpu));
284 else
285 memcpy(fpu,¤t->tss.i387.hard,sizeof(*fpu));
286 }
287 } else {
288
289
290 fpvalid = 0;
291 }
292
293 return fpvalid;
294 }
295
296
297
298
299 void dump_thread(struct pt_regs * regs, struct user * dump)
300 {
301 int i;
302
303
304 dump->magic = CMAGIC;
305 dump->start_code = 0;
306 dump->start_stack = regs->esp & ~(PAGE_SIZE - 1);
307 dump->u_tsize = ((unsigned long) current->mm->end_code) >> 12;
308 dump->u_dsize = ((unsigned long) (current->mm->brk + (PAGE_SIZE-1))) >> 12;
309 dump->u_dsize -= dump->u_tsize;
310 dump->u_ssize = 0;
311 for (i = 0; i < 8; i++)
312 dump->u_debugreg[i] = current->debugreg[i];
313
314 if (dump->start_stack < TASK_SIZE) {
315 dump->u_ssize = ((unsigned long) (TASK_SIZE - dump->start_stack)) >> 12;
316 }
317
318 dump->regs = *regs;
319
320 dump->u_fpvalid = dump_fpu (&dump->i387);
321 }
322
323 asmlinkage int sys_fork(struct pt_regs regs)
324 {
325 return do_fork(SIGCHLD, regs.esp, ®s);
326 }
327
328 asmlinkage int sys_clone(struct pt_regs regs)
329 {
330 unsigned long clone_flags;
331 unsigned long newsp;
332
333 clone_flags = regs.ebx;
334 newsp = regs.ecx;
335 if (!newsp)
336 newsp = regs.esp;
337 return do_fork(clone_flags, newsp, ®s);
338 }
339
340
341
342
343 asmlinkage int sys_execve(struct pt_regs regs)
344 {
345 int error;
346 char * filename;
347
348 error = getname((char *) regs.ebx, &filename);
349 if (error)
350 return error;
351 error = do_execve(filename, (char **) regs.ecx, (char **) regs.edx, ®s);
352 putname(filename);
353 return error;
354 }