This source file includes following definitions.
- find_empty_process
- copy_fd
- dup_mmap
- sys_fork
1
2
3
4
5
6
7
8
9
10
11
12
13
14 #include <linux/errno.h>
15 #include <linux/sched.h>
16 #include <linux/kernel.h>
17 #include <linux/mm.h>
18 #include <linux/stddef.h>
19 #include <linux/unistd.h>
20 #include <linux/segment.h>
21 #include <linux/ptrace.h>
22
23 #include <asm/segment.h>
24 #include <asm/system.h>
25
26 asmlinkage void ret_from_sys_call(void) __asm__("ret_from_sys_call");
27
28
29
30 #define MAX_TASKS_PER_USER (NR_TASKS/2)
31 #define MIN_TASKS_LEFT_FOR_ROOT 4
32
33 extern int shm_fork(struct task_struct *, struct task_struct *);
34 long last_pid=0;
35
36 static int find_empty_process(void)
37 {
38 int i, task_nr, tasks_free;
39 int this_user_tasks;
40
41 repeat:
42 if ((++last_pid) & 0xffff8000)
43 last_pid=1;
44 this_user_tasks = 0;
45 for(i=0 ; i < NR_TASKS ; i++) {
46 if (!task[i])
47 continue;
48 if (task[i]->uid == current->uid)
49 this_user_tasks++;
50 if (task[i]->pid == last_pid || task[i]->pgrp == last_pid)
51 goto repeat;
52 }
53 if (this_user_tasks > MAX_TASKS_PER_USER && current->uid)
54 return -EAGAIN;
55
56
57
58 tasks_free = 0; task_nr = 0;
59 for (i=NR_TASKS-1; i > 0; i--) {
60 if (!task[i]) {
61 tasks_free++;
62 task_nr = i;
63 }
64 }
65 if (tasks_free <= MIN_TASKS_LEFT_FOR_ROOT && current->uid)
66 return -EAGAIN;
67 return task_nr;
68 }
69
70 static struct file * copy_fd(struct file * old_file)
71 {
72 struct file * new_file = get_empty_filp();
73 int error;
74
75 if (new_file) {
76 memcpy(new_file,old_file,sizeof(struct file));
77 new_file->f_count = 1;
78 if (new_file->f_inode)
79 new_file->f_inode->i_count++;
80 if (new_file->f_op && new_file->f_op->open) {
81 error = new_file->f_op->open(new_file->f_inode,new_file);
82 if (error) {
83 iput(new_file->f_inode);
84 new_file->f_count = 0;
85 new_file = NULL;
86 }
87 }
88 }
89 return new_file;
90 }
91
92 int dup_mmap(struct task_struct * tsk)
93 {
94 struct vm_area_struct * mpnt, **p, *tmp;
95
96 tsk->mmap = NULL;
97 p = &tsk->mmap;
98 for (mpnt = current->mmap ; mpnt ; mpnt = mpnt->vm_next) {
99 tmp = (struct vm_area_struct *) kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
100 if (!tmp)
101 return -ENOMEM;
102 *tmp = *mpnt;
103 tmp->vm_task = tsk;
104 tmp->vm_next = NULL;
105 if (tmp->vm_inode)
106 tmp->vm_inode->i_count++;
107 *p = tmp;
108 p = &tmp->vm_next;
109 }
110 return 0;
111 }
112
113 #define IS_CLONE (regs.orig_eax == __NR_clone)
114 #define copy_vm(p) ((clone_flags & COPYVM)?copy_page_tables(p):clone_page_tables(p))
115
116
117
118
119
120
121 asmlinkage int sys_fork(struct pt_regs regs)
122 {
123 struct pt_regs * childregs;
124 struct task_struct *p;
125 int i,nr;
126 struct file *f;
127 unsigned long clone_flags = COPYVM | SIGCHLD;
128
129 if(!(p = (struct task_struct*)__get_free_page(GFP_KERNEL)))
130 goto bad_fork;
131 nr = find_empty_process();
132 if (nr < 0)
133 goto bad_fork_free;
134 task[nr] = p;
135 *p = *current;
136 p->kernel_stack_page = 0;
137 p->state = TASK_UNINTERRUPTIBLE;
138 p->flags &= ~(PF_PTRACED|PF_TRACESYS);
139 p->pid = last_pid;
140 p->swappable = 1;
141 p->p_pptr = p->p_opptr = current;
142 p->p_cptr = NULL;
143 SET_LINKS(p);
144 p->signal = 0;
145 p->it_real_value = p->it_virt_value = p->it_prof_value = 0;
146 p->it_real_incr = p->it_virt_incr = p->it_prof_incr = 0;
147 p->leader = 0;
148 p->utime = p->stime = 0;
149 p->cutime = p->cstime = 0;
150 p->min_flt = p->maj_flt = 0;
151 p->cmin_flt = p->cmaj_flt = 0;
152 p->start_time = jiffies;
153
154
155
156 if (!(p->kernel_stack_page = __get_free_page(GFP_KERNEL)))
157 goto bad_fork_cleanup;
158 p->tss.es = KERNEL_DS;
159 p->tss.cs = KERNEL_CS;
160 p->tss.ss = KERNEL_DS;
161 p->tss.ds = KERNEL_DS;
162 p->tss.fs = USER_DS;
163 p->tss.gs = KERNEL_DS;
164 p->tss.ss0 = KERNEL_DS;
165 p->tss.esp0 = p->kernel_stack_page + PAGE_SIZE;
166 p->tss.tr = _TSS(nr);
167 childregs = ((struct pt_regs *) (p->kernel_stack_page + PAGE_SIZE)) - 1;
168 p->tss.esp = (unsigned long) childregs;
169 p->tss.eip = (unsigned long) ret_from_sys_call;
170 *childregs = regs;
171 childregs->eax = 0;
172 p->tss.back_link = 0;
173 p->tss.eflags = regs.eflags & 0xffffcfff;
174 if (IS_CLONE) {
175 if (regs.ebx)
176 childregs->esp = regs.ebx;
177 clone_flags = regs.ecx;
178 if (childregs->esp == regs.esp)
179 clone_flags |= COPYVM;
180 }
181 p->exit_signal = clone_flags & CSIGNAL;
182 p->tss.ldt = _LDT(nr);
183 if (p->ldt) {
184 if ((p->ldt = (struct desc_struct*) __get_free_page(GFP_KERNEL)) != NULL)
185 memcpy(p->ldt, current->ldt, PAGE_SIZE);
186 }
187 p->tss.bitmap = offsetof(struct tss_struct,io_bitmap);
188 for (i = 0; i < IO_BITMAP_SIZE+1 ; i++)
189 p->tss.io_bitmap[i] = ~0;
190 if (last_task_used_math == current)
191 __asm__("clts ; fnsave %0 ; frstor %0":"=m" (p->tss.i387));
192 p->semun = NULL; p->shm = NULL;
193 if (copy_vm(p) || shm_fork(current, p))
194 goto bad_fork_cleanup;
195 if (clone_flags & COPYFD) {
196 for (i=0; i<NR_OPEN;i++)
197 if ((f = p->filp[i]) != NULL)
198 p->filp[i] = copy_fd(f);
199 } else {
200 for (i=0; i<NR_OPEN;i++)
201 if ((f = p->filp[i]) != NULL)
202 f->f_count++;
203 }
204 if (current->pwd)
205 current->pwd->i_count++;
206 if (current->root)
207 current->root->i_count++;
208 if (current->executable)
209 current->executable->i_count++;
210 dup_mmap(p);
211 set_tss_desc(gdt+(nr<<1)+FIRST_TSS_ENTRY,&(p->tss));
212 if (p->ldt)
213 set_ldt_desc(gdt+(nr<<1)+FIRST_LDT_ENTRY,p->ldt, 512);
214 else
215 set_ldt_desc(gdt+(nr<<1)+FIRST_LDT_ENTRY,&default_ldt, 1);
216
217 p->counter = current->counter >> 1;
218 p->state = TASK_RUNNING;
219 return p->pid;
220 bad_fork_cleanup:
221 task[nr] = NULL;
222 REMOVE_LINKS(p);
223 free_page(p->kernel_stack_page);
224 bad_fork_free:
225 free_page((long) p);
226 bad_fork:
227 return -EAGAIN;
228 }