This source file includes following definitions.
- check_range
- mem_read
- mem_write
- mem_lseek
- mem_mmap
1
2
3
4
5
6
7 #include <linux/types.h>
8 #include <linux/errno.h>
9 #include <linux/sched.h>
10 #include <linux/kernel.h>
11 #include <linux/mm.h>
12
13 #include <asm/page.h>
14 #include <asm/segment.h>
15 #include <asm/io.h>
16 #include <asm/pgtable.h>
17
18
19
20
21
22
23
24 #define mem_write NULL
25
26 static int check_range(struct task_struct * tsk, unsigned long addr, int count)
27 {
28 struct vm_area_struct *vma;
29 int retval;
30
31 vma = find_vma(tsk, addr);
32 if (!vma)
33 return -EACCES;
34 if (vma->vm_start > addr)
35 return -EACCES;
36 if (!(vma->vm_flags & VM_READ))
37 return -EACCES;
38 while ((retval = vma->vm_end - addr) < count) {
39 struct vm_area_struct *next = vma->vm_next;
40 if (!next)
41 break;
42 if (vma->vm_end != next->vm_start)
43 break;
44 if (!(next->vm_flags & VM_READ))
45 break;
46 vma = next;
47 }
48 if (retval > count)
49 retval = count;
50 return retval;
51 }
52
53 static int mem_read(struct inode * inode, struct file * file,char * buf, int count)
54 {
55 pgd_t *page_dir;
56 pmd_t *page_middle;
57 pte_t pte;
58 char * page;
59 struct task_struct * tsk;
60 unsigned long addr, pid;
61 char *tmp;
62 int i;
63
64 if (count < 0)
65 return -EINVAL;
66 pid = inode->i_ino;
67 pid >>= 16;
68 tsk = NULL;
69 for (i = 1 ; i < NR_TASKS ; i++)
70 if (task[i] && task[i]->pid == pid) {
71 tsk = task[i];
72 break;
73 }
74 if (!tsk)
75 return -EACCES;
76 addr = file->f_pos;
77 count = check_range(tsk, addr, count);
78 if (count < 0)
79 return count;
80 tmp = buf;
81 while (count > 0) {
82 if (current->signal & ~current->blocked)
83 break;
84 page_dir = pgd_offset(tsk,addr);
85 if (pgd_none(*page_dir))
86 break;
87 if (pgd_bad(*page_dir)) {
88 printk("Bad page dir entry %08lx\n", pgd_val(*page_dir));
89 pgd_clear(page_dir);
90 break;
91 }
92 page_middle = pmd_offset(page_dir,addr);
93 if (pmd_none(*page_middle))
94 break;
95 if (pmd_bad(*page_middle)) {
96 printk("Bad page middle entry %08lx\n", pmd_val(*page_middle));
97 pmd_clear(page_middle);
98 break;
99 }
100 pte = *pte_offset(page_middle,addr);
101 if (!pte_present(pte))
102 break;
103 page = (char *) pte_page(pte) + (addr & ~PAGE_MASK);
104 i = PAGE_SIZE-(addr & ~PAGE_MASK);
105 if (i > count)
106 i = count;
107 memcpy_tofs(tmp, page, i);
108 addr += i;
109 tmp += i;
110 count -= i;
111 }
112 file->f_pos = addr;
113 return tmp-buf;
114 }
115
116 #ifndef mem_write
117
118 static int mem_write(struct inode * inode, struct file * file,char * buf, int count)
119 {
120 pgd_t *page_dir;
121 pmd_t *page_middle;
122 pte_t pte;
123 char * page;
124 struct task_struct * tsk;
125 unsigned long addr, pid;
126 char *tmp;
127 int i;
128
129 if (count < 0)
130 return -EINVAL;
131 addr = file->f_pos;
132 pid = inode->i_ino;
133 pid >>= 16;
134 tsk = NULL;
135 for (i = 1 ; i < NR_TASKS ; i++)
136 if (task[i] && task[i]->pid == pid) {
137 tsk = task[i];
138 break;
139 }
140 if (!tsk)
141 return -EACCES;
142 tmp = buf;
143 while (count > 0) {
144 if (current->signal & ~current->blocked)
145 break;
146 page_dir = pgd_offset(tsk,addr);
147 if (pgd_none(*page_dir))
148 break;
149 if (pgd_bad(*page_dir)) {
150 printk("Bad page dir entry %08lx\n", pgd_val(*page_dir));
151 pgd_clear(page_dir);
152 break;
153 }
154 page_middle = pmd_offset(page_dir,addr);
155 if (pmd_none(*page_middle))
156 break;
157 if (pmd_bad(*page_middle)) {
158 printk("Bad page middle entry %08lx\n", pmd_val(*page_middle));
159 pmd_clear(page_middle);
160 break;
161 }
162 pte = *pte_offset(page_middle,addr);
163 if (!pte_present(pte))
164 break;
165 if (!pte_write(pte))
166 break;
167 page = (char *) pte_page(pte) + (addr & ~PAGE_MASK);
168 i = PAGE_SIZE-(addr & ~PAGE_MASK);
169 if (i > count)
170 i = count;
171 memcpy_fromfs(page, tmp, i);
172 addr += i;
173 tmp += i;
174 count -= i;
175 }
176 file->f_pos = addr;
177 if (tmp != buf)
178 return tmp-buf;
179 if (current->signal & ~current->blocked)
180 return -ERESTARTSYS;
181 return 0;
182 }
183
184 #endif
185
186 static int mem_lseek(struct inode * inode, struct file * file, off_t offset, int orig)
187 {
188 switch (orig) {
189 case 0:
190 file->f_pos = offset;
191 return file->f_pos;
192 case 1:
193 file->f_pos += offset;
194 return file->f_pos;
195 default:
196 return -EINVAL;
197 }
198 }
199
200
201
202
203 int mem_mmap(struct inode * inode, struct file * file,
204 struct vm_area_struct * vma)
205 {
206 struct task_struct *tsk;
207 pgd_t *src_dir, *dest_dir;
208 pmd_t *src_middle, *dest_middle;
209 pte_t *src_table, *dest_table;
210 unsigned long stmp, dtmp;
211 struct vm_area_struct *src_vma = NULL;
212 int i;
213
214
215
216 tsk = NULL;
217 for (i = 1 ; i < NR_TASKS ; i++)
218 if (task[i] && task[i]->pid == (inode->i_ino >> 16)) {
219 tsk = task[i];
220 break;
221 }
222
223 if (!tsk)
224 return -EACCES;
225
226
227
228
229
230
231 src_vma = tsk->mm->mmap;
232 stmp = vma->vm_offset;
233 while (stmp < vma->vm_offset + (vma->vm_end - vma->vm_start)) {
234 while (src_vma && stmp > src_vma->vm_end)
235 src_vma = src_vma->vm_next;
236 if (!src_vma || (src_vma->vm_flags & VM_SHM))
237 return -EINVAL;
238
239 src_dir = pgd_offset(tsk, stmp);
240 if (pgd_none(*src_dir))
241 return -EINVAL;
242 if (pgd_bad(*src_dir)) {
243 printk("Bad source page dir entry %08lx\n", pgd_val(*src_dir));
244 return -EINVAL;
245 }
246 src_middle = pmd_offset(src_dir, stmp);
247 if (pmd_none(*src_middle))
248 return -EINVAL;
249 if (pmd_bad(*src_middle)) {
250 printk("Bad source page middle entry %08lx\n", pmd_val(*src_middle));
251 return -EINVAL;
252 }
253 src_table = pte_offset(src_middle, stmp);
254 if (pte_none(*src_table))
255 return -EINVAL;
256
257 if (stmp < src_vma->vm_start) {
258 if (!(src_vma->vm_flags & VM_GROWSDOWN))
259 return -EINVAL;
260 if (src_vma->vm_end - stmp > current->rlim[RLIMIT_STACK].rlim_cur)
261 return -EINVAL;
262 }
263 stmp += PAGE_SIZE;
264 }
265
266 src_vma = tsk->mm->mmap;
267 stmp = vma->vm_offset;
268 dtmp = vma->vm_start;
269
270 while (dtmp < vma->vm_end) {
271 while (src_vma && stmp > src_vma->vm_end)
272 src_vma = src_vma->vm_next;
273
274 src_dir = pgd_offset(tsk, stmp);
275 src_middle = pmd_offset(src_dir, stmp);
276 src_table = pte_offset(src_middle, stmp);
277
278 dest_dir = pgd_offset(current, dtmp);
279 dest_middle = pmd_alloc(dest_dir, dtmp);
280 if (!dest_middle)
281 return -ENOMEM;
282 dest_table = pte_alloc(dest_middle, dtmp);
283 if (!dest_table)
284 return -ENOMEM;
285
286 if (!pte_present(*src_table))
287 do_no_page(src_vma, stmp, 1);
288
289 if ((vma->vm_flags & VM_WRITE) && !pte_write(*src_table))
290 do_wp_page(src_vma, stmp, 1);
291
292 set_pte(src_table, pte_mkdirty(*src_table));
293 set_pte(dest_table, *src_table);
294 mem_map[MAP_NR(pte_page(*src_table))]++;
295
296 stmp += PAGE_SIZE;
297 dtmp += PAGE_SIZE;
298 }
299
300 invalidate();
301 return 0;
302 }
303
304 static struct file_operations proc_mem_operations = {
305 mem_lseek,
306 mem_read,
307 mem_write,
308 NULL,
309 NULL,
310 NULL,
311 mem_mmap,
312 NULL,
313 NULL,
314 NULL
315 };
316
317 struct inode_operations proc_mem_inode_operations = {
318 &proc_mem_operations,
319 NULL,
320 NULL,
321 NULL,
322 NULL,
323 NULL,
324 NULL,
325 NULL,
326 NULL,
327 NULL,
328 NULL,
329 NULL,
330 NULL,
331 NULL,
332 NULL
333 };