This source file includes following definitions.
- mem_read
- mem_write
- mem_lseek
- mem_mmap
1
2
3
4
5
6
7 #include <linux/types.h>
8 #include <linux/errno.h>
9 #include <linux/sched.h>
10 #include <linux/kernel.h>
11 #include <linux/mm.h>
12
13 #include <asm/page.h>
14 #include <asm/segment.h>
15 #include <asm/io.h>
16 #include <asm/pgtable.h>
17
18
19
20
21
22
23
24 #define mem_write NULL
25
26 static int mem_read(struct inode * inode, struct file * file,char * buf, int count)
27 {
28 pgd_t *pgdir;
29 pte_t pte;
30 char * page;
31 struct task_struct * tsk;
32 unsigned long addr, pid;
33 char *tmp;
34 int i;
35
36 if (count < 0)
37 return -EINVAL;
38 pid = inode->i_ino;
39 pid >>= 16;
40 tsk = NULL;
41 for (i = 1 ; i < NR_TASKS ; i++)
42 if (task[i] && task[i]->pid == pid) {
43 tsk = task[i];
44 break;
45 }
46 if (!tsk)
47 return -EACCES;
48 addr = file->f_pos;
49 tmp = buf;
50 while (count > 0) {
51 if (current->signal & ~current->blocked)
52 break;
53 pgdir = PAGE_DIR_OFFSET(tsk,addr);
54 if (pgd_none(*pgdir))
55 break;
56 if (pgd_bad(*pgdir)) {
57 printk("Bad page dir entry %08lx\n", pgd_val(*pgdir));
58 pgd_clear(pgdir);
59 break;
60 }
61 pte = *(pte_t *) (PAGE_PTR(addr) + pgd_page(*pgdir));
62 if (!pte_present(pte))
63 break;
64 page = (char *) pte_page(pte) + (addr & ~PAGE_MASK);
65 i = PAGE_SIZE-(addr & ~PAGE_MASK);
66 if (i > count)
67 i = count;
68 memcpy_tofs(tmp, page, i);
69 addr += i;
70 tmp += i;
71 count -= i;
72 }
73 file->f_pos = addr;
74 return tmp-buf;
75 }
76
77 #ifndef mem_write
78
79 static int mem_write(struct inode * inode, struct file * file,char * buf, int count)
80 {
81 pgd_t * pgdir;
82 pte_t * pte;
83 char * page;
84 struct task_struct * tsk;
85 unsigned long addr, pid;
86 char *tmp;
87 int i;
88
89 if (count < 0)
90 return -EINVAL;
91 addr = file->f_pos;
92 pid = inode->i_ino;
93 pid >>= 16;
94 tsk = NULL;
95 for (i = 1 ; i < NR_TASKS ; i++)
96 if (task[i] && task[i]->pid == pid) {
97 tsk = task[i];
98 break;
99 }
100 if (!tsk)
101 return -EACCES;
102 tmp = buf;
103 while (count > 0) {
104 if (current->signal & ~current->blocked)
105 break;
106 pgdir = PAGE_DIR_OFFSET(tsk,addr);
107 if (pgd_none(*pgdir))
108 break;
109 if (pgd_bad(*pgdir)) {
110 printk("Bad page dir entry %08lx\n", pgd_val(*pgdir));
111 pgd_clear(pgdir);
112 break;
113 }
114 pte = *(pte_t *) (PAGE_PTR(addr) + pgd_page(*pgdir));
115 if (!pte_present(pte))
116 break;
117 if (!pte_write(pte))
118 break;
119 page = (char *) pte_page(pte) + (addr & ~PAGE_MASK);
120 i = PAGE_SIZE-(addr & ~PAGE_MASK);
121 if (i > count)
122 i = count;
123 memcpy_fromfs(page, tmp, i);
124 addr += i;
125 tmp += i;
126 count -= i;
127 }
128 file->f_pos = addr;
129 if (tmp != buf)
130 return tmp-buf;
131 if (current->signal & ~current->blocked)
132 return -ERESTARTSYS;
133 return 0;
134 }
135
136 #endif
137
138 static int mem_lseek(struct inode * inode, struct file * file, off_t offset, int orig)
139 {
140 switch (orig) {
141 case 0:
142 file->f_pos = offset;
143 return file->f_pos;
144 case 1:
145 file->f_pos += offset;
146 return file->f_pos;
147 default:
148 return -EINVAL;
149 }
150 }
151
152
153
154
155 int mem_mmap(struct inode * inode, struct file * file,
156 struct vm_area_struct * vma)
157 {
158 struct task_struct *tsk;
159 pgd_t *src_dir, *dest_dir;
160 pte_t *src_table, *dest_table;
161 unsigned long stmp, dtmp;
162 struct vm_area_struct *src_vma = NULL;
163 int i;
164
165
166
167 tsk = NULL;
168 for (i = 1 ; i < NR_TASKS ; i++)
169 if (task[i] && task[i]->pid == (inode->i_ino >> 16)) {
170 tsk = task[i];
171 src_vma = task[i]->mm->mmap;
172 break;
173 }
174
175 if (!tsk)
176 return -EACCES;
177
178
179
180
181
182
183 stmp = vma->vm_offset;
184 while (stmp < vma->vm_offset + (vma->vm_end - vma->vm_start)) {
185 while (src_vma && stmp > src_vma->vm_end)
186 src_vma = src_vma->vm_next;
187 if (!src_vma || (src_vma->vm_flags & VM_SHM))
188 return -EINVAL;
189
190 src_dir = PAGE_DIR_OFFSET(tsk, stmp);
191 if (pgd_none(*src_dir))
192 return -EINVAL;
193 if (pgd_bad(*src_dir)) {
194 printk("Bad source page dir entry %08lx\n", pgd_val(*src_dir));
195 return -EINVAL;
196 }
197
198 src_table = (pte_t *)(pgd_page(*src_dir) + PAGE_PTR(stmp));
199 if (pte_none(*src_table))
200 return -EINVAL;
201
202 if (stmp < src_vma->vm_start) {
203 if (!(src_vma->vm_flags & VM_GROWSDOWN))
204 return -EINVAL;
205 if (src_vma->vm_end - stmp > current->rlim[RLIMIT_STACK].rlim_cur)
206 return -EINVAL;
207 }
208 stmp += PAGE_SIZE;
209 }
210
211 src_vma = task[i]->mm->mmap;
212 stmp = vma->vm_offset;
213 dtmp = vma->vm_start;
214
215 while (dtmp < vma->vm_end) {
216 while (src_vma && stmp > src_vma->vm_end)
217 src_vma = src_vma->vm_next;
218
219 src_dir = PAGE_DIR_OFFSET(tsk, stmp);
220 src_table = (pte_t *) (pgd_page(*src_dir) + PAGE_PTR(stmp));
221
222 dest_dir = PAGE_DIR_OFFSET(current, dtmp);
223
224 if (pgd_none(*dest_dir)) {
225 unsigned long page = get_free_page(GFP_KERNEL);
226 if (!page)
227 return -ENOMEM;
228 if (pgd_none(*dest_dir)) {
229 pgd_set(dest_dir, (pte_t *) page);
230 } else {
231 free_page(page);
232 }
233 }
234
235 if (pgd_bad(*dest_dir)) {
236 printk("Bad dest directory entry %08lx\n", pgd_val(*dest_dir));
237 return -EINVAL;
238 }
239
240 dest_table = (pte_t *) (pgd_page(*dest_dir) + PAGE_PTR(dtmp));
241
242 if (!pte_present(*src_table))
243 do_no_page(src_vma, stmp, 1);
244
245 if ((vma->vm_flags & VM_WRITE) && !pte_write(*src_table))
246 do_wp_page(src_vma, stmp, 1);
247
248 *src_table = pte_mkdirty(*src_table);
249 *dest_table = *src_table;
250 mem_map[MAP_NR(pte_page(*src_table))]++;
251
252 stmp += PAGE_SIZE;
253 dtmp += PAGE_SIZE;
254 }
255
256 invalidate();
257 return 0;
258 }
259
260 static struct file_operations proc_mem_operations = {
261 mem_lseek,
262 mem_read,
263 mem_write,
264 NULL,
265 NULL,
266 NULL,
267 mem_mmap,
268 NULL,
269 NULL,
270 NULL
271 };
272
273 struct inode_operations proc_mem_inode_operations = {
274 &proc_mem_operations,
275 NULL,
276 NULL,
277 NULL,
278 NULL,
279 NULL,
280 NULL,
281 NULL,
282 NULL,
283 NULL,
284 NULL,
285 NULL,
286 NULL,
287 NULL,
288 NULL
289 };