This source file includes following definitions.
- find_empty_process
- copy_fd
- dup_mmap
- sys_fork
1
2
3
4
5
6
7
8
9
10
11
12
13
14 #include <linux/errno.h>
15 #include <linux/sched.h>
16 #include <linux/kernel.h>
17 #include <linux/mm.h>
18 #include <linux/stddef.h>
19 #include <linux/unistd.h>
20 #include <linux/segment.h>
21 #include <linux/ptrace.h>
22 #include <linux/malloc.h>
23 #include <linux/ldt.h>
24
25 #include <asm/segment.h>
26 #include <asm/system.h>
27
28 asmlinkage void ret_from_sys_call(void) __asm__("ret_from_sys_call");
29
30
31
32 #define MAX_TASKS_PER_USER (NR_TASKS/2)
33 #define MIN_TASKS_LEFT_FOR_ROOT 4
34
35 extern int shm_fork(struct task_struct *, struct task_struct *);
36 long last_pid=0;
37
38 static int find_empty_process(void)
39 {
40 int free_task;
41 int i, tasks_free;
42 int this_user_tasks;
43
44 repeat:
45 if ((++last_pid) & 0xffff8000)
46 last_pid=1;
47 this_user_tasks = 0;
48 tasks_free = 0;
49 free_task = -EAGAIN;
50 i = NR_TASKS;
51 while (--i > 0) {
52 if (!task[i]) {
53 free_task = i;
54 tasks_free++;
55 continue;
56 }
57 if (task[i]->uid == current->uid)
58 this_user_tasks++;
59 if (task[i]->pid == last_pid || task[i]->pgrp == last_pid ||
60 task[i]->session == last_pid)
61 goto repeat;
62 }
63 if (tasks_free <= MIN_TASKS_LEFT_FOR_ROOT ||
64 this_user_tasks > MAX_TASKS_PER_USER)
65 if (current->uid)
66 return -EAGAIN;
67 return free_task;
68 }
69
70 static struct file * copy_fd(struct file * old_file)
71 {
72 struct file * new_file = get_empty_filp();
73 int error;
74
75 if (new_file) {
76 memcpy(new_file,old_file,sizeof(struct file));
77 new_file->f_count = 1;
78 if (new_file->f_inode)
79 new_file->f_inode->i_count++;
80 if (new_file->f_op && new_file->f_op->open) {
81 error = new_file->f_op->open(new_file->f_inode,new_file);
82 if (error) {
83 iput(new_file->f_inode);
84 new_file->f_count = 0;
85 new_file = NULL;
86 }
87 }
88 }
89 return new_file;
90 }
91
92 int dup_mmap(struct task_struct * tsk)
93 {
94 struct vm_area_struct * mpnt, **p, *tmp;
95
96 tsk->mmap = NULL;
97 tsk->stk_vma = NULL;
98 p = &tsk->mmap;
99 for (mpnt = current->mmap ; mpnt ; mpnt = mpnt->vm_next) {
100 tmp = (struct vm_area_struct *) kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
101 if (!tmp)
102 return -ENOMEM;
103 *tmp = *mpnt;
104 tmp->vm_task = tsk;
105 tmp->vm_next = NULL;
106 if (tmp->vm_inode)
107 tmp->vm_inode->i_count++;
108 *p = tmp;
109 p = &tmp->vm_next;
110 if (current->stk_vma == mpnt)
111 tsk->stk_vma = tmp;
112 }
113 return 0;
114 }
115
116 #define IS_CLONE (regs.orig_eax == __NR_clone)
117 #define copy_vm(p) ((clone_flags & COPYVM)?copy_page_tables(p):clone_page_tables(p))
118
119
120
121
122
123
124 asmlinkage int sys_fork(struct pt_regs regs)
125 {
126 struct pt_regs * childregs;
127 struct task_struct *p;
128 int i,nr;
129 struct file *f;
130 unsigned long clone_flags = COPYVM | SIGCHLD;
131
132 if(!(p = (struct task_struct*)__get_free_page(GFP_KERNEL)))
133 goto bad_fork;
134 nr = find_empty_process();
135 if (nr < 0)
136 goto bad_fork_free;
137 task[nr] = p;
138 *p = *current;
139 p->kernel_stack_page = 0;
140 p->state = TASK_UNINTERRUPTIBLE;
141 p->flags &= ~(PF_PTRACED|PF_TRACESYS);
142 p->pid = last_pid;
143 p->swappable = 1;
144 p->p_pptr = p->p_opptr = current;
145 p->p_cptr = NULL;
146 SET_LINKS(p);
147 p->signal = 0;
148 p->it_real_value = p->it_virt_value = p->it_prof_value = 0;
149 p->it_real_incr = p->it_virt_incr = p->it_prof_incr = 0;
150 p->leader = 0;
151 p->utime = p->stime = 0;
152 p->cutime = p->cstime = 0;
153 p->min_flt = p->maj_flt = 0;
154 p->cmin_flt = p->cmaj_flt = 0;
155 p->start_time = jiffies;
156
157
158
159 if (!(p->kernel_stack_page = __get_free_page(GFP_KERNEL)))
160 goto bad_fork_cleanup;
161 p->tss.es = KERNEL_DS;
162 p->tss.cs = KERNEL_CS;
163 p->tss.ss = KERNEL_DS;
164 p->tss.ds = KERNEL_DS;
165 p->tss.fs = USER_DS;
166 p->tss.gs = KERNEL_DS;
167 p->tss.ss0 = KERNEL_DS;
168 p->tss.esp0 = p->kernel_stack_page + PAGE_SIZE;
169 p->tss.tr = _TSS(nr);
170 childregs = ((struct pt_regs *) (p->kernel_stack_page + PAGE_SIZE)) - 1;
171 p->tss.esp = (unsigned long) childregs;
172 p->tss.eip = (unsigned long) ret_from_sys_call;
173 *childregs = regs;
174 childregs->eax = 0;
175 p->tss.back_link = 0;
176 p->tss.eflags = regs.eflags & 0xffffcfff;
177 if (IS_CLONE) {
178 if (regs.ebx)
179 childregs->esp = regs.ebx;
180 clone_flags = regs.ecx;
181 if (childregs->esp == regs.esp)
182 clone_flags |= COPYVM;
183 }
184 p->exit_signal = clone_flags & CSIGNAL;
185 p->tss.ldt = _LDT(nr);
186 if (p->ldt) {
187 p->ldt = (struct desc_struct*) vmalloc(LDT_ENTRIES*LDT_ENTRY_SIZE);
188 if (p->ldt != NULL)
189 memcpy(p->ldt, current->ldt, LDT_ENTRIES*LDT_ENTRY_SIZE);
190 }
191 p->tss.bitmap = offsetof(struct tss_struct,io_bitmap);
192 for (i = 0; i < IO_BITMAP_SIZE+1 ; i++)
193 p->tss.io_bitmap[i] = ~0;
194 if (last_task_used_math == current)
195 __asm__("clts ; fnsave %0 ; frstor %0":"=m" (p->tss.i387));
196 p->semun = NULL; p->shm = NULL;
197 if (copy_vm(p) || shm_fork(current, p))
198 goto bad_fork_cleanup;
199 if (clone_flags & COPYFD) {
200 for (i=0; i<NR_OPEN;i++)
201 if ((f = p->filp[i]) != NULL)
202 p->filp[i] = copy_fd(f);
203 } else {
204 for (i=0; i<NR_OPEN;i++)
205 if ((f = p->filp[i]) != NULL)
206 f->f_count++;
207 }
208 if (current->pwd)
209 current->pwd->i_count++;
210 if (current->root)
211 current->root->i_count++;
212 if (current->executable)
213 current->executable->i_count++;
214 dup_mmap(p);
215 set_tss_desc(gdt+(nr<<1)+FIRST_TSS_ENTRY,&(p->tss));
216 if (p->ldt)
217 set_ldt_desc(gdt+(nr<<1)+FIRST_LDT_ENTRY,p->ldt, 512);
218 else
219 set_ldt_desc(gdt+(nr<<1)+FIRST_LDT_ENTRY,&default_ldt, 1);
220
221 p->counter = current->counter >> 1;
222 p->state = TASK_RUNNING;
223 return p->pid;
224 bad_fork_cleanup:
225 task[nr] = NULL;
226 REMOVE_LINKS(p);
227 free_page(p->kernel_stack_page);
228 bad_fork_free:
229 free_page((long) p);
230 bad_fork:
231 return -EAGAIN;
232 }