This source file includes following definitions.
- find_empty_process
- get_pid
- dup_mmap
- copy_mm
- copy_fs
- copy_files
- copy_sighand
- do_fork
1
2
3
4
5
6
7
8
9
10
11
12
13
14 #include <linux/errno.h>
15 #include <linux/sched.h>
16 #include <linux/kernel.h>
17 #include <linux/mm.h>
18 #include <linux/stddef.h>
19 #include <linux/unistd.h>
20 #include <linux/ptrace.h>
21 #include <linux/malloc.h>
22 #include <linux/ldt.h>
23 #include <linux/smp.h>
24
25 #include <asm/segment.h>
26 #include <asm/system.h>
27 #include <asm/pgtable.h>
28
29 int nr_tasks=1;
30 int nr_running=1;
31 unsigned long int total_forks=0;
32
33 static inline int find_empty_process(void)
34 {
35 int i;
36 struct task_struct *p;
37
38 if (nr_tasks >= NR_TASKS - MIN_TASKS_LEFT_FOR_ROOT) {
39 if (current->uid)
40 return -EAGAIN;
41 }
42 if (current->uid) {
43 long max_tasks = current->rlim[RLIMIT_NPROC].rlim_cur;
44
45 if (max_tasks < nr_tasks) {
46 for_each_task (p) {
47 if (p->uid == current->uid)
48 if (--max_tasks < 0)
49 return -EAGAIN;
50 }
51 }
52 }
53 for (i = 0 ; i < NR_TASKS ; i++) {
54 if (!task[i])
55 return i;
56 }
57 return -EAGAIN;
58 }
59
60 static int get_pid(unsigned long flags)
61 {
62 static int last_pid = 0;
63 struct task_struct *p;
64
65 if (flags & CLONE_PID)
66 return current->pid;
67 repeat:
68 if ((++last_pid) & 0xffff8000)
69 last_pid=1;
70 for_each_task (p) {
71 if (p->pid == last_pid ||
72 p->pgrp == last_pid ||
73 p->session == last_pid)
74 goto repeat;
75 }
76 return last_pid;
77 }
78
79 static inline int dup_mmap(struct mm_struct * mm)
80 {
81 struct vm_area_struct * mpnt, **p, *tmp;
82
83 mm->mmap = NULL;
84 p = &mm->mmap;
85 for (mpnt = current->mm->mmap ; mpnt ; mpnt = mpnt->vm_next) {
86 tmp = (struct vm_area_struct *) kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
87 if (!tmp) {
88 exit_mmap(mm);
89 return -ENOMEM;
90 }
91 *tmp = *mpnt;
92 tmp->vm_flags &= ~VM_LOCKED;
93 tmp->vm_mm = mm;
94 tmp->vm_next = NULL;
95 if (tmp->vm_inode) {
96 tmp->vm_inode->i_count++;
97
98 tmp->vm_next_share->vm_prev_share = tmp;
99 mpnt->vm_next_share = tmp;
100 tmp->vm_prev_share = mpnt;
101 }
102 if (tmp->vm_ops && tmp->vm_ops->open)
103 tmp->vm_ops->open(tmp);
104 if (copy_page_range(mm, current->mm, tmp)) {
105 exit_mmap(mm);
106 return -ENOMEM;
107 }
108 *p = tmp;
109 p = &tmp->vm_next;
110 }
111 build_mmap_avl(mm);
112 return 0;
113 }
114
115 static inline int copy_mm(unsigned long clone_flags, struct task_struct * tsk)
116 {
117 if (clone_flags & CLONE_VM) {
118 SET_PAGE_DIR(tsk, current->mm->pgd);
119 current->mm->count++;
120 return 0;
121 }
122 tsk->mm = kmalloc(sizeof(*tsk->mm), GFP_KERNEL);
123 if (!tsk->mm)
124 return -1;
125 *tsk->mm = *current->mm;
126 tsk->mm->count = 1;
127 tsk->mm->def_flags = 0;
128 tsk->min_flt = tsk->maj_flt = 0;
129 tsk->cmin_flt = tsk->cmaj_flt = 0;
130 tsk->nswap = tsk->cnswap = 0;
131 if (new_page_tables(tsk))
132 return -1;
133 if (dup_mmap(tsk->mm)) {
134 free_page_tables(tsk);
135 return -1;
136 }
137 return 0;
138 }
139
140 static inline int copy_fs(unsigned long clone_flags, struct task_struct * tsk)
141 {
142 if (clone_flags & CLONE_FS) {
143 current->fs->count++;
144 return 0;
145 }
146 tsk->fs = kmalloc(sizeof(*tsk->fs), GFP_KERNEL);
147 if (!tsk->fs)
148 return -1;
149 tsk->fs->count = 1;
150 tsk->fs->umask = current->fs->umask;
151 if ((tsk->fs->root = current->fs->root))
152 tsk->fs->root->i_count++;
153 if ((tsk->fs->pwd = current->fs->pwd))
154 tsk->fs->pwd->i_count++;
155 return 0;
156 }
157
158 static inline int copy_files(unsigned long clone_flags, struct task_struct * tsk)
159 {
160 int i;
161
162 if (clone_flags & CLONE_FILES) {
163 current->files->count++;
164 return 0;
165 }
166 tsk->files = kmalloc(sizeof(*tsk->files), GFP_KERNEL);
167 if (!tsk->files)
168 return -1;
169 tsk->files->count = 1;
170 memcpy(&tsk->files->close_on_exec, ¤t->files->close_on_exec,
171 sizeof(tsk->files->close_on_exec));
172 for (i = 0; i < NR_OPEN; i++) {
173 struct file * f = current->files->fd[i];
174 if (f)
175 f->f_count++;
176 tsk->files->fd[i] = f;
177 }
178 return 0;
179 }
180
181 static inline int copy_sighand(unsigned long clone_flags, struct task_struct * tsk)
182 {
183 if (clone_flags & CLONE_SIGHAND) {
184 current->sig->count++;
185 return 0;
186 }
187 tsk->sig = kmalloc(sizeof(*tsk->sig), GFP_KERNEL);
188 if (!tsk->sig)
189 return -1;
190 tsk->sig->count = 1;
191 memcpy(tsk->sig->action, current->sig->action, sizeof(tsk->sig->action));
192 return 0;
193 }
194
195
196
197
198
199
200 int do_fork(unsigned long clone_flags, unsigned long usp, struct pt_regs *regs)
201 {
202 int nr;
203 int error = -ENOMEM;
204 unsigned long new_stack;
205 struct task_struct *p;
206
207 p = (struct task_struct *) kmalloc(sizeof(*p), GFP_KERNEL);
208 if (!p)
209 goto bad_fork;
210 new_stack = alloc_kernel_stack();
211 if (!new_stack)
212 goto bad_fork_free_p;
213 error = -EAGAIN;
214 nr = find_empty_process();
215 if (nr < 0)
216 goto bad_fork_free_stack;
217
218 *p = *current;
219
220 if (p->exec_domain && p->exec_domain->use_count)
221 (*p->exec_domain->use_count)++;
222 if (p->binfmt && p->binfmt->use_count)
223 (*p->binfmt->use_count)++;
224
225 p->did_exec = 0;
226 p->swappable = 0;
227 p->kernel_stack_page = new_stack;
228 *(unsigned long *) p->kernel_stack_page = STACK_MAGIC;
229 p->state = TASK_UNINTERRUPTIBLE;
230 p->flags &= ~(PF_PTRACED|PF_TRACESYS|PF_SUPERPREV);
231 p->flags |= PF_FORKNOEXEC;
232 p->pid = get_pid(clone_flags);
233 p->next_run = NULL;
234 p->prev_run = NULL;
235 p->p_pptr = p->p_opptr = current;
236 p->p_cptr = NULL;
237 p->signal = 0;
238 p->it_real_value = p->it_virt_value = p->it_prof_value = 0;
239 p->it_real_incr = p->it_virt_incr = p->it_prof_incr = 0;
240 init_timer(&p->real_timer);
241 p->real_timer.data = (unsigned long) p;
242 p->leader = 0;
243 p->tty_old_pgrp = 0;
244 p->utime = p->stime = 0;
245 p->cutime = p->cstime = 0;
246 #ifdef __SMP__
247 p->processor = NO_PROC_ID;
248 p->lock_depth = 1;
249 #endif
250 p->start_time = jiffies;
251 task[nr] = p;
252 SET_LINKS(p);
253 nr_tasks++;
254
255 error = -ENOMEM;
256
257 if (copy_files(clone_flags, p))
258 goto bad_fork_cleanup;
259 if (copy_fs(clone_flags, p))
260 goto bad_fork_cleanup_files;
261 if (copy_sighand(clone_flags, p))
262 goto bad_fork_cleanup_fs;
263 if (copy_mm(clone_flags, p))
264 goto bad_fork_cleanup_sighand;
265 copy_thread(nr, clone_flags, usp, p, regs);
266 p->semundo = NULL;
267
268
269 p->swappable = 1;
270 p->exit_signal = clone_flags & CSIGNAL;
271 p->counter = current->counter >> 1;
272 wake_up_process(p);
273 ++total_forks;
274 return p->pid;
275
276 bad_fork_cleanup_sighand:
277 exit_sighand(p);
278 bad_fork_cleanup_fs:
279 exit_fs(p);
280 bad_fork_cleanup_files:
281 exit_files(p);
282 bad_fork_cleanup:
283 if (p->exec_domain && p->exec_domain->use_count)
284 (*p->exec_domain->use_count)--;
285 if (p->binfmt && p->binfmt->use_count)
286 (*p->binfmt->use_count)--;
287 task[nr] = NULL;
288 REMOVE_LINKS(p);
289 nr_tasks--;
290 bad_fork_free_stack:
291 free_kernel_stack(new_stack);
292 bad_fork_free_p:
293 kfree(p);
294 bad_fork:
295 return error;
296 }