This source file includes following definitions.
- find_empty_process
- get_pid
- dup_mmap
- copy_mm
- copy_fs
- copy_files
- copy_sighand
- do_fork
1
2
3
4
5
6
7
8
9
10
11
12
13
14 #include <linux/errno.h>
15 #include <linux/sched.h>
16 #include <linux/kernel.h>
17 #include <linux/mm.h>
18 #include <linux/stddef.h>
19 #include <linux/unistd.h>
20 #include <linux/ptrace.h>
21 #include <linux/malloc.h>
22 #include <linux/ldt.h>
23 #include <linux/smp.h>
24
25 #include <asm/segment.h>
26 #include <asm/system.h>
27 #include <asm/pgtable.h>
28
29 int nr_tasks=1;
30 int nr_running=1;
31
32 static inline int find_empty_process(void)
33 {
34 int i;
35 struct task_struct *p;
36
37 if (nr_tasks >= NR_TASKS - MIN_TASKS_LEFT_FOR_ROOT) {
38 if (current->uid)
39 return -EAGAIN;
40 }
41 if (current->uid) {
42 long max_tasks = current->rlim[RLIMIT_NPROC].rlim_cur;
43
44 if (max_tasks < nr_tasks) {
45 for_each_task (p) {
46 if (p->uid == current->uid)
47 if (--max_tasks < 0)
48 return -EAGAIN;
49 }
50 }
51 }
52 for (i = 0 ; i < NR_TASKS ; i++) {
53 if (!task[i])
54 return i;
55 }
56 return -EAGAIN;
57 }
58
59 static int get_pid(unsigned long flags)
60 {
61 static int last_pid = 0;
62 struct task_struct *p;
63
64 if (flags & CLONE_PID)
65 return current->pid;
66 repeat:
67 if ((++last_pid) & 0xffff8000)
68 last_pid=1;
69 for_each_task (p) {
70 if (p->pid == last_pid ||
71 p->pgrp == last_pid ||
72 p->session == last_pid)
73 goto repeat;
74 }
75 return last_pid;
76 }
77
78 static inline int dup_mmap(struct mm_struct * mm)
79 {
80 struct vm_area_struct * mpnt, **p, *tmp;
81
82 mm->mmap = NULL;
83 p = &mm->mmap;
84 for (mpnt = current->mm->mmap ; mpnt ; mpnt = mpnt->vm_next) {
85 tmp = (struct vm_area_struct *) kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
86 if (!tmp) {
87 exit_mmap(mm);
88 return -ENOMEM;
89 }
90 *tmp = *mpnt;
91 tmp->vm_flags &= ~VM_LOCKED;
92 tmp->vm_mm = mm;
93 tmp->vm_next = NULL;
94 if (tmp->vm_inode) {
95 tmp->vm_inode->i_count++;
96
97 tmp->vm_next_share->vm_prev_share = tmp;
98 mpnt->vm_next_share = tmp;
99 tmp->vm_prev_share = mpnt;
100 }
101 if (tmp->vm_ops && tmp->vm_ops->open)
102 tmp->vm_ops->open(tmp);
103 if (copy_page_range(mm, current->mm, tmp)) {
104 exit_mmap(mm);
105 return -ENOMEM;
106 }
107 *p = tmp;
108 p = &tmp->vm_next;
109 }
110 build_mmap_avl(mm);
111 return 0;
112 }
113
114 static inline int copy_mm(unsigned long clone_flags, struct task_struct * tsk)
115 {
116 if (clone_flags & CLONE_VM) {
117 SET_PAGE_DIR(tsk, current->mm->pgd);
118 current->mm->count++;
119 return 0;
120 }
121 tsk->mm = kmalloc(sizeof(*tsk->mm), GFP_KERNEL);
122 if (!tsk->mm)
123 return -1;
124 *tsk->mm = *current->mm;
125 tsk->mm->count = 1;
126 tsk->mm->def_flags = 0;
127 tsk->min_flt = tsk->maj_flt = 0;
128 tsk->cmin_flt = tsk->cmaj_flt = 0;
129 tsk->nswap = tsk->cnswap = 0;
130 if (new_page_tables(tsk))
131 return -1;
132 if (dup_mmap(tsk->mm)) {
133 free_page_tables(tsk);
134 return -1;
135 }
136 return 0;
137 }
138
139 static inline int copy_fs(unsigned long clone_flags, struct task_struct * tsk)
140 {
141 if (clone_flags & CLONE_FS) {
142 current->fs->count++;
143 return 0;
144 }
145 tsk->fs = kmalloc(sizeof(*tsk->fs), GFP_KERNEL);
146 if (!tsk->fs)
147 return -1;
148 tsk->fs->count = 1;
149 tsk->fs->umask = current->fs->umask;
150 if ((tsk->fs->root = current->fs->root))
151 tsk->fs->root->i_count++;
152 if ((tsk->fs->pwd = current->fs->pwd))
153 tsk->fs->pwd->i_count++;
154 return 0;
155 }
156
157 static inline int copy_files(unsigned long clone_flags, struct task_struct * tsk)
158 {
159 int i;
160
161 if (clone_flags & CLONE_FILES) {
162 current->files->count++;
163 return 0;
164 }
165 tsk->files = kmalloc(sizeof(*tsk->files), GFP_KERNEL);
166 if (!tsk->files)
167 return -1;
168 tsk->files->count = 1;
169 memcpy(&tsk->files->close_on_exec, ¤t->files->close_on_exec,
170 sizeof(tsk->files->close_on_exec));
171 for (i = 0; i < NR_OPEN; i++) {
172 struct file * f = current->files->fd[i];
173 if (f)
174 f->f_count++;
175 tsk->files->fd[i] = f;
176 }
177 return 0;
178 }
179
180 static inline int copy_sighand(unsigned long clone_flags, struct task_struct * tsk)
181 {
182 if (clone_flags & CLONE_SIGHAND) {
183 current->sig->count++;
184 return 0;
185 }
186 tsk->sig = kmalloc(sizeof(*tsk->sig), GFP_KERNEL);
187 if (!tsk->sig)
188 return -1;
189 tsk->sig->count = 1;
190 memcpy(tsk->sig->action, current->sig->action, sizeof(tsk->sig->action));
191 return 0;
192 }
193
194
195
196
197
198
199 int do_fork(unsigned long clone_flags, unsigned long usp, struct pt_regs *regs)
200 {
201 int nr;
202 int error = -ENOMEM;
203 unsigned long new_stack;
204 struct task_struct *p;
205
206 p = (struct task_struct *) kmalloc(sizeof(*p), GFP_KERNEL);
207 if (!p)
208 goto bad_fork;
209 new_stack = alloc_kernel_stack();
210 if (!new_stack)
211 goto bad_fork_free_p;
212 error = -EAGAIN;
213 nr = find_empty_process();
214 if (nr < 0)
215 goto bad_fork_free_stack;
216
217 *p = *current;
218
219 if (p->exec_domain && p->exec_domain->use_count)
220 (*p->exec_domain->use_count)++;
221 if (p->binfmt && p->binfmt->use_count)
222 (*p->binfmt->use_count)++;
223
224 p->did_exec = 0;
225 p->swappable = 0;
226 p->kernel_stack_page = new_stack;
227 *(unsigned long *) p->kernel_stack_page = STACK_MAGIC;
228 p->state = TASK_UNINTERRUPTIBLE;
229 p->flags &= ~(PF_PTRACED|PF_TRACESYS);
230 p->pid = get_pid(clone_flags);
231 p->next_run = NULL;
232 p->prev_run = NULL;
233 p->p_pptr = p->p_opptr = current;
234 p->p_cptr = NULL;
235 p->signal = 0;
236 p->it_real_value = p->it_virt_value = p->it_prof_value = 0;
237 p->it_real_incr = p->it_virt_incr = p->it_prof_incr = 0;
238 init_timer(&p->real_timer);
239 p->real_timer.data = (unsigned long) p;
240 p->leader = 0;
241 p->tty_old_pgrp = 0;
242 p->utime = p->stime = 0;
243 p->cutime = p->cstime = 0;
244 #ifdef __SMP__
245 p->processor = NO_PROC_ID;
246 p->lock_depth = 1;
247 #endif
248 p->start_time = jiffies;
249 task[nr] = p;
250 SET_LINKS(p);
251 nr_tasks++;
252
253 error = -ENOMEM;
254
255 if (copy_files(clone_flags, p))
256 goto bad_fork_cleanup;
257 if (copy_fs(clone_flags, p))
258 goto bad_fork_cleanup_files;
259 if (copy_sighand(clone_flags, p))
260 goto bad_fork_cleanup_fs;
261 if (copy_mm(clone_flags, p))
262 goto bad_fork_cleanup_sighand;
263 copy_thread(nr, clone_flags, usp, p, regs);
264 p->semundo = NULL;
265
266
267 p->swappable = 1;
268 p->exit_signal = clone_flags & CSIGNAL;
269 p->counter = current->counter >> 1;
270 wake_up_process(p);
271 return p->pid;
272
273 bad_fork_cleanup_sighand:
274 exit_sighand(p);
275 bad_fork_cleanup_fs:
276 exit_fs(p);
277 bad_fork_cleanup_files:
278 exit_files(p);
279 bad_fork_cleanup:
280 if (p->exec_domain && p->exec_domain->use_count)
281 (*p->exec_domain->use_count)--;
282 if (p->binfmt && p->binfmt->use_count)
283 (*p->binfmt->use_count)--;
284 task[nr] = NULL;
285 REMOVE_LINKS(p);
286 nr_tasks--;
287 bad_fork_free_stack:
288 free_kernel_stack(new_stack);
289 bad_fork_free_p:
290 kfree(p);
291 bad_fork:
292 return error;
293 }