This source file includes following definitions.
- find_empty_process
- copy_fd
- dup_mmap
- sys_fork
1
2
3
4
5
6
7
8
9
10
11
12
13
14 #include <linux/errno.h>
15 #include <linux/sched.h>
16 #include <linux/kernel.h>
17 #include <linux/mm.h>
18 #include <linux/stddef.h>
19 #include <linux/unistd.h>
20 #include <linux/segment.h>
21 #include <linux/ptrace.h>
22 #include <linux/malloc.h>
23 #include <linux/ldt.h>
24
25 #include <asm/segment.h>
26 #include <asm/system.h>
27
28 asmlinkage void ret_from_sys_call(void) __asm__("ret_from_sys_call");
29
30
31
32 #define MAX_TASKS_PER_USER (NR_TASKS/2)
33 #define MIN_TASKS_LEFT_FOR_ROOT 4
34
35 extern int shm_fork(struct task_struct *, struct task_struct *);
36 long last_pid=0;
37
38 static int find_empty_process(void)
39 {
40 int free_task;
41 int i, tasks_free;
42 int this_user_tasks;
43
44 repeat:
45 if ((++last_pid) & 0xffff8000)
46 last_pid=1;
47 this_user_tasks = 0;
48 tasks_free = 0;
49 free_task = -EAGAIN;
50 i = NR_TASKS;
51 while (--i > 0) {
52 if (!task[i]) {
53 free_task = i;
54 tasks_free++;
55 continue;
56 }
57 if (task[i]->uid == current->uid)
58 this_user_tasks++;
59 if (task[i]->pid == last_pid || task[i]->pgrp == last_pid)
60 goto repeat;
61 }
62 if (tasks_free <= MIN_TASKS_LEFT_FOR_ROOT ||
63 this_user_tasks > MAX_TASKS_PER_USER)
64 if (current->uid)
65 return -EAGAIN;
66 return free_task;
67 }
68
69 static struct file * copy_fd(struct file * old_file)
70 {
71 struct file * new_file = get_empty_filp();
72 int error;
73
74 if (new_file) {
75 memcpy(new_file,old_file,sizeof(struct file));
76 new_file->f_count = 1;
77 if (new_file->f_inode)
78 new_file->f_inode->i_count++;
79 if (new_file->f_op && new_file->f_op->open) {
80 error = new_file->f_op->open(new_file->f_inode,new_file);
81 if (error) {
82 iput(new_file->f_inode);
83 new_file->f_count = 0;
84 new_file = NULL;
85 }
86 }
87 }
88 return new_file;
89 }
90
91 int dup_mmap(struct task_struct * tsk)
92 {
93 struct vm_area_struct * mpnt, **p, *tmp;
94
95 tsk->mmap = NULL;
96 tsk->stk_vma = NULL;
97 p = &tsk->mmap;
98 for (mpnt = current->mmap ; mpnt ; mpnt = mpnt->vm_next) {
99 tmp = (struct vm_area_struct *) kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
100 if (!tmp)
101 return -ENOMEM;
102 *tmp = *mpnt;
103 tmp->vm_task = tsk;
104 tmp->vm_next = NULL;
105 if (tmp->vm_inode)
106 tmp->vm_inode->i_count++;
107 *p = tmp;
108 p = &tmp->vm_next;
109 if (current->stk_vma == mpnt)
110 tsk->stk_vma = tmp;
111 }
112 return 0;
113 }
114
115 #define IS_CLONE (regs.orig_eax == __NR_clone)
116 #define copy_vm(p) ((clone_flags & COPYVM)?copy_page_tables(p):clone_page_tables(p))
117
118
119
120
121
122
123 asmlinkage int sys_fork(struct pt_regs regs)
124 {
125 struct pt_regs * childregs;
126 struct task_struct *p;
127 int i,nr;
128 struct file *f;
129 unsigned long clone_flags = COPYVM | SIGCHLD;
130
131 if(!(p = (struct task_struct*)__get_free_page(GFP_KERNEL)))
132 goto bad_fork;
133 nr = find_empty_process();
134 if (nr < 0)
135 goto bad_fork_free;
136 task[nr] = p;
137 *p = *current;
138 p->kernel_stack_page = 0;
139 p->state = TASK_UNINTERRUPTIBLE;
140 p->flags &= ~(PF_PTRACED|PF_TRACESYS);
141 p->pid = last_pid;
142 p->swappable = 1;
143 p->p_pptr = p->p_opptr = current;
144 p->p_cptr = NULL;
145 SET_LINKS(p);
146 p->signal = 0;
147 p->it_real_value = p->it_virt_value = p->it_prof_value = 0;
148 p->it_real_incr = p->it_virt_incr = p->it_prof_incr = 0;
149 p->leader = 0;
150 p->utime = p->stime = 0;
151 p->cutime = p->cstime = 0;
152 p->min_flt = p->maj_flt = 0;
153 p->cmin_flt = p->cmaj_flt = 0;
154 p->start_time = jiffies;
155
156
157
158 if (!(p->kernel_stack_page = __get_free_page(GFP_KERNEL)))
159 goto bad_fork_cleanup;
160 p->tss.es = KERNEL_DS;
161 p->tss.cs = KERNEL_CS;
162 p->tss.ss = KERNEL_DS;
163 p->tss.ds = KERNEL_DS;
164 p->tss.fs = USER_DS;
165 p->tss.gs = KERNEL_DS;
166 p->tss.ss0 = KERNEL_DS;
167 p->tss.esp0 = p->kernel_stack_page + PAGE_SIZE;
168 p->tss.tr = _TSS(nr);
169 childregs = ((struct pt_regs *) (p->kernel_stack_page + PAGE_SIZE)) - 1;
170 p->tss.esp = (unsigned long) childregs;
171 p->tss.eip = (unsigned long) ret_from_sys_call;
172 *childregs = regs;
173 childregs->eax = 0;
174 p->tss.back_link = 0;
175 p->tss.eflags = regs.eflags & 0xffffcfff;
176 if (IS_CLONE) {
177 if (regs.ebx)
178 childregs->esp = regs.ebx;
179 clone_flags = regs.ecx;
180 if (childregs->esp == regs.esp)
181 clone_flags |= COPYVM;
182 }
183 p->exit_signal = clone_flags & CSIGNAL;
184 p->tss.ldt = _LDT(nr);
185 if (p->ldt) {
186 p->ldt = (struct desc_struct*) vmalloc(LDT_ENTRIES*LDT_ENTRY_SIZE);
187 if (p->ldt != NULL)
188 memcpy(p->ldt, current->ldt, LDT_ENTRIES*LDT_ENTRY_SIZE);
189 }
190 p->tss.bitmap = offsetof(struct tss_struct,io_bitmap);
191 for (i = 0; i < IO_BITMAP_SIZE+1 ; i++)
192 p->tss.io_bitmap[i] = ~0;
193 if (last_task_used_math == current)
194 __asm__("clts ; fnsave %0 ; frstor %0":"=m" (p->tss.i387));
195 p->semun = NULL; p->shm = NULL;
196 if (copy_vm(p) || shm_fork(current, p))
197 goto bad_fork_cleanup;
198 if (clone_flags & COPYFD) {
199 for (i=0; i<NR_OPEN;i++)
200 if ((f = p->filp[i]) != NULL)
201 p->filp[i] = copy_fd(f);
202 } else {
203 for (i=0; i<NR_OPEN;i++)
204 if ((f = p->filp[i]) != NULL)
205 f->f_count++;
206 }
207 if (current->pwd)
208 current->pwd->i_count++;
209 if (current->root)
210 current->root->i_count++;
211 if (current->executable)
212 current->executable->i_count++;
213 dup_mmap(p);
214 set_tss_desc(gdt+(nr<<1)+FIRST_TSS_ENTRY,&(p->tss));
215 if (p->ldt)
216 set_ldt_desc(gdt+(nr<<1)+FIRST_LDT_ENTRY,p->ldt, 512);
217 else
218 set_ldt_desc(gdt+(nr<<1)+FIRST_LDT_ENTRY,&default_ldt, 1);
219
220 p->counter = current->counter >> 1;
221 p->state = TASK_RUNNING;
222 return p->pid;
223 bad_fork_cleanup:
224 task[nr] = NULL;
225 REMOVE_LINKS(p);
226 free_page(p->kernel_stack_page);
227 bad_fork_free:
228 free_page((long) p);
229 bad_fork:
230 return -EAGAIN;
231 }