This source file includes following definitions.
- mem_read
- mem_write
- mem_lseek
- mem_mmap
1
2
3
4
5
6
7 #include <linux/types.h>
8 #include <linux/errno.h>
9 #include <linux/sched.h>
10 #include <linux/kernel.h>
11 #include <linux/mm.h>
12
13 #include <asm/page.h>
14 #include <asm/segment.h>
15 #include <asm/io.h>
16
17
18
19
20
21
22
23 #define mem_write NULL
24
25 static int mem_read(struct inode * inode, struct file * file,char * buf, int count)
26 {
27 pgd_t *pgdir;
28 pte_t pte;
29 char * page;
30 struct task_struct * tsk;
31 unsigned long addr, pid;
32 char *tmp;
33 int i;
34
35 if (count < 0)
36 return -EINVAL;
37 pid = inode->i_ino;
38 pid >>= 16;
39 tsk = NULL;
40 for (i = 1 ; i < NR_TASKS ; i++)
41 if (task[i] && task[i]->pid == pid) {
42 tsk = task[i];
43 break;
44 }
45 if (!tsk)
46 return -EACCES;
47 addr = file->f_pos;
48 tmp = buf;
49 while (count > 0) {
50 if (current->signal & ~current->blocked)
51 break;
52 pgdir = PAGE_DIR_OFFSET(tsk,addr);
53 if (pgd_none(*pgdir))
54 break;
55 if (pgd_bad(*pgdir)) {
56 printk("Bad page dir entry %08lx\n", pgd_val(*pgdir));
57 pgd_clear(pgdir);
58 break;
59 }
60 pte = *(pte_t *) (PAGE_PTR(addr) + pgd_page(*pgdir));
61 if (!pte_present(pte))
62 break;
63 page = (char *) pte_page(pte) + (addr & ~PAGE_MASK);
64 i = PAGE_SIZE-(addr & ~PAGE_MASK);
65 if (i > count)
66 i = count;
67 memcpy_tofs(tmp, page, i);
68 addr += i;
69 tmp += i;
70 count -= i;
71 }
72 file->f_pos = addr;
73 return tmp-buf;
74 }
75
76 #ifndef mem_write
77
78 static int mem_write(struct inode * inode, struct file * file,char * buf, int count)
79 {
80 pgd_t * pgdir;
81 pte_t * pte;
82 char * page;
83 struct task_struct * tsk;
84 unsigned long addr, pid;
85 char *tmp;
86 int i;
87
88 if (count < 0)
89 return -EINVAL;
90 addr = file->f_pos;
91 pid = inode->i_ino;
92 pid >>= 16;
93 tsk = NULL;
94 for (i = 1 ; i < NR_TASKS ; i++)
95 if (task[i] && task[i]->pid == pid) {
96 tsk = task[i];
97 break;
98 }
99 if (!tsk)
100 return -EACCES;
101 tmp = buf;
102 while (count > 0) {
103 if (current->signal & ~current->blocked)
104 break;
105 pgdir = PAGE_DIR_OFFSET(tsk,addr);
106 if (pgd_none(*pgdir))
107 break;
108 if (pgd_bad(*pgdir)) {
109 printk("Bad page dir entry %08lx\n", pgd_val(*pgdir));
110 pgd_clear(pgdir);
111 break;
112 }
113 pte = *(pte_t *) (PAGE_PTR(addr) + pgd_page(*pgdir));
114 if (!pte_present(pte))
115 break;
116 if (!pte_write(pte))
117 break;
118 page = (char *) pte_page(pte) + (addr & ~PAGE_MASK);
119 i = PAGE_SIZE-(addr & ~PAGE_MASK);
120 if (i > count)
121 i = count;
122 memcpy_fromfs(page, tmp, i);
123 addr += i;
124 tmp += i;
125 count -= i;
126 }
127 file->f_pos = addr;
128 if (tmp != buf)
129 return tmp-buf;
130 if (current->signal & ~current->blocked)
131 return -ERESTARTSYS;
132 return 0;
133 }
134
135 #endif
136
137 static int mem_lseek(struct inode * inode, struct file * file, off_t offset, int orig)
138 {
139 switch (orig) {
140 case 0:
141 file->f_pos = offset;
142 return file->f_pos;
143 case 1:
144 file->f_pos += offset;
145 return file->f_pos;
146 default:
147 return -EINVAL;
148 }
149 }
150
151
152
153
154 int mem_mmap(struct inode * inode, struct file * file,
155 struct vm_area_struct * vma)
156 {
157 struct task_struct *tsk;
158 pgd_t *src_dir, *dest_dir;
159 pte_t *src_table, *dest_table;
160 unsigned long stmp, dtmp;
161 struct vm_area_struct *src_vma = NULL;
162 int i;
163
164
165
166 tsk = NULL;
167 for (i = 1 ; i < NR_TASKS ; i++)
168 if (task[i] && task[i]->pid == (inode->i_ino >> 16)) {
169 tsk = task[i];
170 src_vma = task[i]->mm->mmap;
171 break;
172 }
173
174 if (!tsk)
175 return -EACCES;
176
177
178
179
180
181
182 stmp = vma->vm_offset;
183 while (stmp < vma->vm_offset + (vma->vm_end - vma->vm_start)) {
184 while (src_vma && stmp > src_vma->vm_end)
185 src_vma = src_vma->vm_next;
186 if (!src_vma || (src_vma->vm_flags & VM_SHM))
187 return -EINVAL;
188
189 src_dir = PAGE_DIR_OFFSET(tsk, stmp);
190 if (pgd_none(*src_dir))
191 return -EINVAL;
192 if (pgd_bad(*src_dir)) {
193 printk("Bad source page dir entry %08lx\n", pgd_val(*src_dir));
194 return -EINVAL;
195 }
196
197 src_table = (pte_t *)(pgd_page(*src_dir) + PAGE_PTR(stmp));
198 if (pte_none(*src_table))
199 return -EINVAL;
200
201 if (stmp < src_vma->vm_start) {
202 if (!(src_vma->vm_flags & VM_GROWSDOWN))
203 return -EINVAL;
204 if (src_vma->vm_end - stmp > current->rlim[RLIMIT_STACK].rlim_cur)
205 return -EINVAL;
206 }
207 stmp += PAGE_SIZE;
208 }
209
210 src_vma = task[i]->mm->mmap;
211 stmp = vma->vm_offset;
212 dtmp = vma->vm_start;
213
214 while (dtmp < vma->vm_end) {
215 while (src_vma && stmp > src_vma->vm_end)
216 src_vma = src_vma->vm_next;
217
218 src_dir = PAGE_DIR_OFFSET(tsk, stmp);
219 src_table = (pte_t *) (pgd_page(*src_dir) + PAGE_PTR(stmp));
220
221 dest_dir = PAGE_DIR_OFFSET(current, dtmp);
222
223 if (pgd_none(*dest_dir)) {
224 unsigned long page = get_free_page(GFP_KERNEL);
225 if (!page)
226 return -ENOMEM;
227 if (pgd_none(*dest_dir)) {
228 pgd_set(dest_dir, (pte_t *) page);
229 } else {
230 free_page(page);
231 }
232 }
233
234 if (pgd_bad(*dest_dir)) {
235 printk("Bad dest directory entry %08lx\n", pgd_val(*dest_dir));
236 return -EINVAL;
237 }
238
239 dest_table = (pte_t *) (pgd_page(*dest_dir) + PAGE_PTR(dtmp));
240
241 if (!pte_present(*src_table))
242 do_no_page(src_vma, stmp, 1);
243
244 if ((vma->vm_flags & VM_WRITE) && !pte_write(*src_table))
245 do_wp_page(src_vma, stmp, 1);
246
247 *src_table = pte_mkdirty(*src_table);
248 *dest_table = *src_table;
249 mem_map[MAP_NR(pte_page(*src_table))]++;
250
251 stmp += PAGE_SIZE;
252 dtmp += PAGE_SIZE;
253 }
254
255 invalidate();
256 return 0;
257 }
258
259 static struct file_operations proc_mem_operations = {
260 mem_lseek,
261 mem_read,
262 mem_write,
263 NULL,
264 NULL,
265 NULL,
266 mem_mmap,
267 NULL,
268 NULL,
269 NULL
270 };
271
272 struct inode_operations proc_mem_inode_operations = {
273 &proc_mem_operations,
274 NULL,
275 NULL,
276 NULL,
277 NULL,
278 NULL,
279 NULL,
280 NULL,
281 NULL,
282 NULL,
283 NULL,
284 NULL,
285 NULL,
286 NULL,
287 NULL
288 };