This source file includes following definitions.
- find_empty_process
- copy_fd
- dup_mmap
- sys_fork
1
2
3
4
5
6
7
8
9
10
11
12
13
14 #include <linux/errno.h>
15 #include <linux/sched.h>
16 #include <linux/kernel.h>
17 #include <linux/mm.h>
18 #include <linux/stddef.h>
19 #include <linux/unistd.h>
20 #include <linux/segment.h>
21 #include <linux/ptrace.h>
22 #include <linux/malloc.h>
23
24 #include <asm/segment.h>
25 #include <asm/system.h>
26
27 asmlinkage void ret_from_sys_call(void) __asm__("ret_from_sys_call");
28
29
30
31 #define MAX_TASKS_PER_USER (NR_TASKS/2)
32 #define MIN_TASKS_LEFT_FOR_ROOT 4
33
34 extern int shm_fork(struct task_struct *, struct task_struct *);
35 long last_pid=0;
36
37 static int find_empty_process(void)
38 {
39 int free_task;
40 int i, tasks_free;
41 int this_user_tasks;
42
43 repeat:
44 if ((++last_pid) & 0xffff8000)
45 last_pid=1;
46 this_user_tasks = 0;
47 tasks_free = 0;
48 free_task = -EAGAIN;
49 i = NR_TASKS;
50 while (--i > 0) {
51 if (!task[i]) {
52 free_task = i;
53 tasks_free++;
54 continue;
55 }
56 if (task[i]->uid == current->uid)
57 this_user_tasks++;
58 if (task[i]->pid == last_pid || task[i]->pgrp == last_pid)
59 goto repeat;
60 }
61 if (tasks_free <= MIN_TASKS_LEFT_FOR_ROOT ||
62 this_user_tasks > MAX_TASKS_PER_USER)
63 if (current->uid)
64 return -EAGAIN;
65 return free_task;
66 }
67
68 static struct file * copy_fd(struct file * old_file)
69 {
70 struct file * new_file = get_empty_filp();
71 int error;
72
73 if (new_file) {
74 memcpy(new_file,old_file,sizeof(struct file));
75 new_file->f_count = 1;
76 if (new_file->f_inode)
77 new_file->f_inode->i_count++;
78 if (new_file->f_op && new_file->f_op->open) {
79 error = new_file->f_op->open(new_file->f_inode,new_file);
80 if (error) {
81 iput(new_file->f_inode);
82 new_file->f_count = 0;
83 new_file = NULL;
84 }
85 }
86 }
87 return new_file;
88 }
89
90 int dup_mmap(struct task_struct * tsk)
91 {
92 struct vm_area_struct * mpnt, **p, *tmp;
93
94 tsk->mmap = NULL;
95 tsk->stk_vma = NULL;
96 p = &tsk->mmap;
97 for (mpnt = current->mmap ; mpnt ; mpnt = mpnt->vm_next) {
98 tmp = (struct vm_area_struct *) kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
99 if (!tmp)
100 return -ENOMEM;
101 *tmp = *mpnt;
102 tmp->vm_task = tsk;
103 tmp->vm_next = NULL;
104 if (tmp->vm_inode)
105 tmp->vm_inode->i_count++;
106 *p = tmp;
107 p = &tmp->vm_next;
108 if (current->stk_vma == mpnt)
109 tsk->stk_vma = tmp;
110 }
111 return 0;
112 }
113
114 #define IS_CLONE (regs.orig_eax == __NR_clone)
115 #define copy_vm(p) ((clone_flags & COPYVM)?copy_page_tables(p):clone_page_tables(p))
116
117
118
119
120
121
122 asmlinkage int sys_fork(struct pt_regs regs)
123 {
124 struct pt_regs * childregs;
125 struct task_struct *p;
126 int i,nr;
127 struct file *f;
128 unsigned long clone_flags = COPYVM | SIGCHLD;
129
130 if(!(p = (struct task_struct*)__get_free_page(GFP_KERNEL)))
131 goto bad_fork;
132 nr = find_empty_process();
133 if (nr < 0)
134 goto bad_fork_free;
135 task[nr] = p;
136 *p = *current;
137 p->kernel_stack_page = 0;
138 p->state = TASK_UNINTERRUPTIBLE;
139 p->flags &= ~(PF_PTRACED|PF_TRACESYS);
140 p->pid = last_pid;
141 p->swappable = 1;
142 p->p_pptr = p->p_opptr = current;
143 p->p_cptr = NULL;
144 SET_LINKS(p);
145 p->signal = 0;
146 p->it_real_value = p->it_virt_value = p->it_prof_value = 0;
147 p->it_real_incr = p->it_virt_incr = p->it_prof_incr = 0;
148 p->leader = 0;
149 p->utime = p->stime = 0;
150 p->cutime = p->cstime = 0;
151 p->min_flt = p->maj_flt = 0;
152 p->cmin_flt = p->cmaj_flt = 0;
153 p->start_time = jiffies;
154
155
156
157 if (!(p->kernel_stack_page = __get_free_page(GFP_KERNEL)))
158 goto bad_fork_cleanup;
159 p->tss.es = KERNEL_DS;
160 p->tss.cs = KERNEL_CS;
161 p->tss.ss = KERNEL_DS;
162 p->tss.ds = KERNEL_DS;
163 p->tss.fs = USER_DS;
164 p->tss.gs = KERNEL_DS;
165 p->tss.ss0 = KERNEL_DS;
166 p->tss.esp0 = p->kernel_stack_page + PAGE_SIZE;
167 p->tss.tr = _TSS(nr);
168 childregs = ((struct pt_regs *) (p->kernel_stack_page + PAGE_SIZE)) - 1;
169 p->tss.esp = (unsigned long) childregs;
170 p->tss.eip = (unsigned long) ret_from_sys_call;
171 *childregs = regs;
172 childregs->eax = 0;
173 p->tss.back_link = 0;
174 p->tss.eflags = regs.eflags & 0xffffcfff;
175 if (IS_CLONE) {
176 if (regs.ebx)
177 childregs->esp = regs.ebx;
178 clone_flags = regs.ecx;
179 if (childregs->esp == regs.esp)
180 clone_flags |= COPYVM;
181 }
182 p->exit_signal = clone_flags & CSIGNAL;
183 p->tss.ldt = _LDT(nr);
184 if (p->ldt) {
185 if ((p->ldt = (struct desc_struct*) __get_free_page(GFP_KERNEL)) != NULL)
186 memcpy(p->ldt, current->ldt, PAGE_SIZE);
187 }
188 p->tss.bitmap = offsetof(struct tss_struct,io_bitmap);
189 for (i = 0; i < IO_BITMAP_SIZE+1 ; i++)
190 p->tss.io_bitmap[i] = ~0;
191 if (last_task_used_math == current)
192 __asm__("clts ; fnsave %0 ; frstor %0":"=m" (p->tss.i387));
193 p->semun = NULL; p->shm = NULL;
194 if (copy_vm(p) || shm_fork(current, p))
195 goto bad_fork_cleanup;
196 if (clone_flags & COPYFD) {
197 for (i=0; i<NR_OPEN;i++)
198 if ((f = p->filp[i]) != NULL)
199 p->filp[i] = copy_fd(f);
200 } else {
201 for (i=0; i<NR_OPEN;i++)
202 if ((f = p->filp[i]) != NULL)
203 f->f_count++;
204 }
205 if (current->pwd)
206 current->pwd->i_count++;
207 if (current->root)
208 current->root->i_count++;
209 if (current->executable)
210 current->executable->i_count++;
211 dup_mmap(p);
212 set_tss_desc(gdt+(nr<<1)+FIRST_TSS_ENTRY,&(p->tss));
213 if (p->ldt)
214 set_ldt_desc(gdt+(nr<<1)+FIRST_LDT_ENTRY,p->ldt, 512);
215 else
216 set_ldt_desc(gdt+(nr<<1)+FIRST_LDT_ENTRY,&default_ldt, 1);
217
218 p->counter = current->counter >> 1;
219 p->state = TASK_RUNNING;
220 return p->pid;
221 bad_fork_cleanup:
222 task[nr] = NULL;
223 REMOVE_LINKS(p);
224 free_page(p->kernel_stack_page);
225 bad_fork_free:
226 free_page((long) p);
227 bad_fork:
228 return -EAGAIN;
229 }