This source file includes following definitions.
- file_mmap_nopage
- file_mmap_sync_page
- file_mmap_sync
- file_mmap_unmap
- file_mmap_close
- file_mmap_swapout
- generic_mmap
1
2
3
4
5
6
7
8
9
10
11
12 #include <linux/stat.h>
13 #include <linux/sched.h>
14 #include <linux/kernel.h>
15 #include <linux/mm.h>
16 #include <linux/shm.h>
17 #include <linux/errno.h>
18 #include <linux/mman.h>
19 #include <linux/string.h>
20 #include <linux/malloc.h>
21
22 #include <asm/segment.h>
23 #include <asm/system.h>
24
25
26
27
28
29
30 static unsigned long file_mmap_nopage(struct vm_area_struct * area, unsigned long address,
31 unsigned long page, int no_share)
32 {
33 struct inode * inode = area->vm_inode;
34 unsigned int block;
35 int nr[8];
36 int i, *p;
37
38 address &= PAGE_MASK;
39 block = address - area->vm_start + area->vm_offset;
40 block >>= inode->i_sb->s_blocksize_bits;
41 i = PAGE_SIZE >> inode->i_sb->s_blocksize_bits;
42 p = nr;
43 do {
44 *p = bmap(inode,block);
45 i--;
46 block++;
47 p++;
48 } while (i > 0);
49 return bread_page(page, inode->i_dev, nr, inode->i_sb->s_blocksize, no_share);
50 }
51
52
53
54
55
56
57
58
59
60
61
62
63 static inline void file_mmap_sync_page(struct vm_area_struct * vma,
64 unsigned long offset,
65 unsigned long page)
66 {
67 struct buffer_head * bh;
68
69 bh = buffer_pages[MAP_NR(page)];
70 if (bh) {
71
72 struct buffer_head * tmp = bh;
73 do {
74 mark_buffer_dirty(tmp, 0);
75 tmp = tmp->b_this_page;
76 } while (tmp != bh);
77 return;
78 }
79
80 printk("msync: %ld: [%08lx]\n", offset, page);
81 printk("Can't handle non-shared page yet\n");
82 return;
83 }
84
85 static void file_mmap_sync(struct vm_area_struct * vma, unsigned long start,
86 size_t size, unsigned int flags)
87 {
88 unsigned long page_dir;
89 unsigned long *page_table, *dir;
90 unsigned long poff, pcnt, pc;
91
92 size = size >> PAGE_SHIFT;
93 dir = PAGE_DIR_OFFSET(current->tss.cr3,start);
94 poff = (start >> PAGE_SHIFT) & (PTRS_PER_PAGE-1);
95 start -= vma->vm_start;
96 if ((pcnt = PTRS_PER_PAGE - poff) > size)
97 pcnt = size;
98
99 for ( ; size > 0; ++dir, size -= pcnt,
100 pcnt = (size > PTRS_PER_PAGE ? PTRS_PER_PAGE : size)) {
101 if (!(PAGE_PRESENT & (page_dir = *dir))) {
102 if (page_dir)
103 printk("file_mmap_sync: bad page directory.\n");
104 poff = 0;
105 start += pcnt*PAGE_SIZE;
106 continue;
107 }
108 page_table = (unsigned long *)(PAGE_MASK & page_dir);
109 if (poff) {
110 page_table += poff;
111 poff = 0;
112 }
113 for (pc = pcnt; pc--; page_table++, start += PAGE_SIZE) {
114 unsigned long page = *page_table;
115 if (!(page & PAGE_PRESENT))
116 continue;
117 if (!(page & PAGE_DIRTY))
118 continue;
119 mem_map[MAP_NR(page)]++;
120 if (flags & MS_INVALIDATE) {
121 *page_table = 0;
122 free_page(page);
123 } else
124 *page_table = page & ~PAGE_DIRTY;
125 file_mmap_sync_page(vma, start, page);
126 free_page(page);
127 }
128 }
129 invalidate();
130 return;
131 }
132
133
134
135
136 static void file_mmap_unmap(struct vm_area_struct *vma, unsigned long start, size_t len)
137 {
138 if (vma->vm_page_prot & PAGE_RW)
139 file_mmap_sync(vma, start, len, MS_ASYNC);
140 }
141
142
143
144
145 static void file_mmap_close(struct vm_area_struct * vma)
146 {
147 if (vma->vm_page_prot & PAGE_RW)
148 file_mmap_sync(vma, vma->vm_start, vma->vm_end - vma->vm_start, MS_ASYNC);
149 }
150
151
152
153
154
155
156
157
158 void file_mmap_swapout(struct vm_area_struct * vma,
159 unsigned long offset,
160 unsigned long *pte)
161 {
162 printk("swapout not implemented on shared files..\n");
163 *pte = 0;
164 }
165
166
167
168
169
170
171 static struct vm_operations_struct file_shared_mmap = {
172 NULL,
173 file_mmap_close,
174 file_mmap_unmap,
175 NULL,
176 file_mmap_sync,
177 NULL,
178 file_mmap_nopage,
179 NULL,
180 file_mmap_swapout,
181 NULL,
182 };
183
184
185
186
187
188
189
190 static struct vm_operations_struct file_private_mmap = {
191 NULL,
192 NULL,
193 NULL,
194 NULL,
195 NULL,
196 NULL,
197 file_mmap_nopage,
198 NULL,
199 NULL,
200 NULL,
201 };
202
203
204 int generic_mmap(struct inode * inode, struct file * file, struct vm_area_struct * vma)
205 {
206 struct vm_operations_struct * ops;
207
208 if (vma->vm_offset & (inode->i_sb->s_blocksize - 1))
209 return -EINVAL;
210 if (!inode->i_sb || !S_ISREG(inode->i_mode))
211 return -EACCES;
212 if (!inode->i_op || !inode->i_op->bmap)
213 return -ENOEXEC;
214 ops = &file_private_mmap;
215 if (vma->vm_flags & VM_SHARED) {
216 if (vma->vm_flags & (VM_WRITE | VM_MAYWRITE))
217 ops = &file_shared_mmap;
218 }
219 if (!IS_RDONLY(inode)) {
220 inode->i_atime = CURRENT_TIME;
221 inode->i_dirt = 1;
222 }
223 vma->vm_inode = inode;
224 inode->i_count++;
225 vma->vm_ops = ops;
226 return 0;
227 }