This source file includes following definitions.
- find_empty_process
- dup_mmap
- copy_mm
- copy_fs
- copy_files
- copy_sighand
- do_fork
1
2
3
4
5
6
7
8
9
10
11
12
13
14 #include <linux/errno.h>
15 #include <linux/sched.h>
16 #include <linux/kernel.h>
17 #include <linux/mm.h>
18 #include <linux/stddef.h>
19 #include <linux/unistd.h>
20 #include <linux/ptrace.h>
21 #include <linux/malloc.h>
22 #include <linux/ldt.h>
23
24 #include <asm/segment.h>
25 #include <asm/system.h>
26
27
28
29
30
31
32
33
34
35
36
37
38 struct allocation_struct {
39 struct task_struct tsk;
40 struct sigaction sigaction[32];
41 struct fs_struct fs;
42 struct files_struct files;
43 struct mm_struct mm;
44 };
45
46 int nr_tasks=1;
47 int nr_running=1;
48 long last_pid=0;
49
50 static int find_empty_process(void)
51 {
52 int free_task;
53 int i, tasks_free;
54 int this_user_tasks;
55
56 repeat:
57 if ((++last_pid) & 0xffff8000)
58 last_pid=1;
59 this_user_tasks = 0;
60 tasks_free = 0;
61 free_task = -EAGAIN;
62 i = NR_TASKS;
63 while (--i > 0) {
64 if (!task[i]) {
65 free_task = i;
66 tasks_free++;
67 continue;
68 }
69 if (task[i]->uid == current->uid)
70 this_user_tasks++;
71 if (task[i]->pid == last_pid || task[i]->pgrp == last_pid ||
72 task[i]->session == last_pid)
73 goto repeat;
74 }
75 if (tasks_free <= MIN_TASKS_LEFT_FOR_ROOT ||
76 this_user_tasks > current->rlim[RLIMIT_NPROC].rlim_cur)
77 if (current->uid)
78 return -EAGAIN;
79 return free_task;
80 }
81
82 static int dup_mmap(struct mm_struct * mm)
83 {
84 struct vm_area_struct * mpnt, **p, *tmp;
85
86 mm->mmap = NULL;
87 p = &mm->mmap;
88 for (mpnt = current->mm->mmap ; mpnt ; mpnt = mpnt->vm_next) {
89 tmp = (struct vm_area_struct *) kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
90 if (!tmp) {
91 exit_mmap(mm);
92 return -ENOMEM;
93 }
94 *tmp = *mpnt;
95 tmp->vm_mm = mm;
96 tmp->vm_next = NULL;
97 if (tmp->vm_inode) {
98 tmp->vm_inode->i_count++;
99
100 tmp->vm_next_share->vm_prev_share = tmp;
101 mpnt->vm_next_share = tmp;
102 tmp->vm_prev_share = mpnt;
103 }
104 if (tmp->vm_ops && tmp->vm_ops->open)
105 tmp->vm_ops->open(tmp);
106 *p = tmp;
107 p = &tmp->vm_next;
108 }
109 build_mmap_avl(mm);
110 return 0;
111 }
112
113 static int copy_mm(unsigned long clone_flags, struct allocation_struct * u)
114 {
115 if (clone_flags & CLONE_VM) {
116 if (clone_page_tables(&u->tsk))
117 return -1;
118 current->mm->count++;
119 mem_map[MAP_NR(current->mm)]++;
120 return 0;
121 }
122 u->tsk.mm = &u->mm;
123 u->mm = *current->mm;
124 u->mm.count = 1;
125 u->mm.min_flt = u->mm.maj_flt = 0;
126 u->mm.cmin_flt = u->mm.cmaj_flt = 0;
127 if (copy_page_tables(&u->tsk))
128 return -1;
129 if (dup_mmap(&u->mm))
130 return -1;
131 mem_map[MAP_NR(u)]++;
132 return 0;
133 }
134
135 static void copy_fs(unsigned long clone_flags, struct allocation_struct * u)
136 {
137 if (clone_flags & CLONE_FS) {
138 current->fs->count++;
139 mem_map[MAP_NR(current->fs)]++;
140 return;
141 }
142 u->tsk.fs = &u->fs;
143 u->fs = *current->fs;
144 u->fs.count = 1;
145 if (u->fs.pwd)
146 u->fs.pwd->i_count++;
147 if (u->fs.root)
148 u->fs.root->i_count++;
149 mem_map[MAP_NR(u)]++;
150 }
151
152 static void copy_files(unsigned long clone_flags, struct allocation_struct * u)
153 {
154 int i;
155
156 if (clone_flags & CLONE_FILES) {
157 current->files->count++;
158 mem_map[MAP_NR(current->files)]++;
159 return;
160 }
161 u->tsk.files = &u->files;
162 u->files = *current->files;
163 u->files.count = 1;
164 for (i = 0; i < NR_OPEN; i++) {
165 struct file * f = u->files.fd[i];
166 if (f)
167 f->f_count++;
168 }
169 mem_map[MAP_NR(u)]++;
170 }
171
172 static void copy_sighand(unsigned long clone_flags, struct allocation_struct * u)
173 {
174 if (clone_flags & CLONE_SIGHAND) {
175 mem_map[MAP_NR(current->sigaction)]++;
176 return;
177 }
178 u->tsk.sigaction = u->sigaction;
179 memcpy(u->sigaction, current->sigaction, sizeof(u->sigaction));
180 mem_map[MAP_NR(u)]++;
181 }
182
183
184
185
186
187
188 int do_fork(unsigned long clone_flags, unsigned long usp, struct pt_regs *regs)
189 {
190 int nr;
191 unsigned long new_stack;
192 struct task_struct *p;
193 struct allocation_struct *alloc;
194
195 alloc = (struct allocation_struct *) __get_free_page(GFP_KERNEL);
196 if (!alloc)
197 goto bad_fork;
198 p = &alloc->tsk;
199 new_stack = get_free_page(GFP_KERNEL);
200 if (!new_stack)
201 goto bad_fork_free;
202 nr = find_empty_process();
203 if (nr < 0)
204 goto bad_fork_free;
205
206 *p = *current;
207
208 if (p->exec_domain && p->exec_domain->use_count)
209 (*p->exec_domain->use_count)++;
210 if (p->binfmt && p->binfmt->use_count)
211 (*p->binfmt->use_count)++;
212
213 p->did_exec = 0;
214 p->kernel_stack_page = new_stack;
215 *(unsigned long *) p->kernel_stack_page = STACK_MAGIC;
216 p->state = TASK_UNINTERRUPTIBLE;
217 p->flags &= ~(PF_PTRACED|PF_TRACESYS);
218 p->pid = last_pid;
219 p->next_run = NULL;
220 p->prev_run = NULL;
221 p->p_pptr = p->p_opptr = current;
222 p->p_cptr = NULL;
223 p->signal = 0;
224 p->it_real_value = p->it_virt_value = p->it_prof_value = 0;
225 p->it_real_incr = p->it_virt_incr = p->it_prof_incr = 0;
226 init_timer(&p->real_timer);
227 p->real_timer.data = (unsigned long) p;
228 p->leader = 0;
229 p->tty_old_pgrp = 0;
230 p->utime = p->stime = 0;
231 p->cutime = p->cstime = 0;
232 p->start_time = jiffies;
233 task[nr] = p;
234 SET_LINKS(p);
235 nr_tasks++;
236
237
238 copy_thread(nr, clone_flags, usp, p, regs);
239 if (copy_mm(clone_flags, alloc))
240 goto bad_fork_cleanup;
241 p->semundo = NULL;
242 copy_files(clone_flags, alloc);
243 copy_fs(clone_flags, alloc);
244 copy_sighand(clone_flags, alloc);
245
246
247 p->mm->swappable = 1;
248 p->exit_signal = clone_flags & CSIGNAL;
249 p->counter = current->counter >> 1;
250 wake_up_process(p);
251 return p->pid;
252 bad_fork_cleanup:
253 task[nr] = NULL;
254 REMOVE_LINKS(p);
255 nr_tasks--;
256 bad_fork_free:
257 free_page(new_stack);
258 free_page((long) p);
259 bad_fork:
260 return -EAGAIN;
261 }