This source file includes following definitions.
- invalidate_inode_pages
- truncate_inode_pages
- shrink_mmap
- page_unuse
- update_vm_cache
- add_to_page_cache
- try_to_read_ahead
- __wait_on_page
- generic_file_readahead
- generic_file_read
- fill_page
- filemap_nopage
- do_write_page
- filemap_write_page
- filemap_swapout
- filemap_swapin
- filemap_sync_pte
- filemap_sync_pte_range
- filemap_sync_pmd_range
- filemap_sync
- filemap_unmap
- generic_file_mmap
- msync_interval
- sys_msync
1
2
3
4
5
6
7
8
9
10
11
12 #include <linux/stat.h>
13 #include <linux/sched.h>
14 #include <linux/kernel.h>
15 #include <linux/mm.h>
16 #include <linux/shm.h>
17 #include <linux/errno.h>
18 #include <linux/mman.h>
19 #include <linux/string.h>
20 #include <linux/malloc.h>
21 #include <linux/fs.h>
22 #include <linux/locks.h>
23 #include <linux/pagemap.h>
24 #include <linux/swap.h>
25
26 #include <asm/segment.h>
27 #include <asm/system.h>
28 #include <asm/pgtable.h>
29
30
31
32
33
34
35
36
37 unsigned long page_cache_size = 0;
38 struct page * page_hash_table[PAGE_HASH_SIZE];
39
40
41
42
43
44
45
46
47
48
49 void invalidate_inode_pages(struct inode * inode)
50 {
51 struct page ** p;
52 struct page * page;
53
54 p = &inode->i_pages;
55 while ((page = *p) != NULL) {
56 if (PageLocked(page)) {
57 p = &page->next;
58 continue;
59 }
60 inode->i_nrpages--;
61 if ((*p = page->next) != NULL)
62 (*p)->prev = page->prev;
63 page->dirty = 0;
64 page->next = NULL;
65 page->prev = NULL;
66 remove_page_from_hash_queue(page);
67 page->inode = NULL;
68 free_page(page_address(page));
69 continue;
70 }
71 }
72
73
74
75
76
77 void truncate_inode_pages(struct inode * inode, unsigned long start)
78 {
79 struct page ** p;
80 struct page * page;
81
82 repeat:
83 p = &inode->i_pages;
84 while ((page = *p) != NULL) {
85 unsigned long offset = page->offset;
86
87
88 if (offset >= start) {
89 if (PageLocked(page)) {
90 wait_on_page(page);
91 goto repeat;
92 }
93 inode->i_nrpages--;
94 if ((*p = page->next) != NULL)
95 (*p)->prev = page->prev;
96 page->dirty = 0;
97 page->next = NULL;
98 page->prev = NULL;
99 remove_page_from_hash_queue(page);
100 page->inode = NULL;
101 free_page(page_address(page));
102 continue;
103 }
104 p = &page->next;
105 offset = start - offset;
106
107 if (offset < PAGE_SIZE)
108 memset((void *) (offset + page_address(page)), 0, PAGE_SIZE - offset);
109 }
110 }
111
112 int shrink_mmap(int priority, int dma)
113 {
114 static int clock = 0;
115 struct page * page;
116 unsigned long limit = MAP_NR(high_memory);
117 struct buffer_head *tmp, *bh;
118
119 priority = (limit<<2) >> priority;
120 page = mem_map + clock;
121 do {
122 priority--;
123 if (PageLocked(page))
124 goto next;
125 if (dma && !PageDMA(page))
126 goto next;
127
128
129 bh = page->buffers;
130 if (bh) {
131 tmp = bh;
132 do {
133 if (buffer_touched(tmp)) {
134 clear_bit(BH_Touched, &tmp->b_state);
135 set_bit(PG_referenced, &page->flags);
136 }
137 tmp = tmp->b_this_page;
138 } while (tmp != bh);
139 }
140
141
142
143
144
145
146
147 switch (page->count) {
148 case 1:
149
150 if (clear_bit(PG_referenced, &page->flags))
151 break;
152
153
154 if (page->inode) {
155 remove_page_from_hash_queue(page);
156 remove_page_from_inode_queue(page);
157 free_page(page_address(page));
158 return 1;
159 }
160
161
162 if (bh && try_to_free_buffer(bh, &bh, 6))
163 return 1;
164 break;
165
166 default:
167
168 set_bit(PG_referenced, &page->flags);
169
170 case 0:
171
172 }
173 next:
174 page++;
175 clock++;
176 if (clock >= limit) {
177 clock = 0;
178 page = mem_map;
179 }
180 } while (priority > 0);
181 return 0;
182 }
183
184
185
186
187
188
189
190 unsigned long page_unuse(unsigned long page)
191 {
192 struct page * p = mem_map + MAP_NR(page);
193 int count = p->count;
194
195 if (count != 2)
196 return count;
197 if (!p->inode)
198 return count;
199 remove_page_from_hash_queue(p);
200 remove_page_from_inode_queue(p);
201 free_page(page);
202 return 1;
203 }
204
205
206
207
208
209 void update_vm_cache(struct inode * inode, unsigned long pos, const char * buf, int count)
210 {
211 unsigned long offset, len;
212
213 offset = (pos & ~PAGE_MASK);
214 pos = pos & PAGE_MASK;
215 len = PAGE_SIZE - offset;
216 do {
217 struct page * page;
218
219 if (len > count)
220 len = count;
221 page = find_page(inode, pos);
222 if (page) {
223 unsigned long addr;
224
225 wait_on_page(page);
226 addr = page_address(page);
227 memcpy((void *) (offset + addr), buf, len);
228 free_page(addr);
229 }
230 count -= len;
231 buf += len;
232 len = PAGE_SIZE;
233 offset = 0;
234 pos += PAGE_SIZE;
235 } while (count);
236 }
237
238 static inline void add_to_page_cache(struct page * page,
239 struct inode * inode, unsigned long offset)
240 {
241 page->count++;
242 page->flags &= ~((1 << PG_uptodate) | (1 << PG_error));
243 page->offset = offset;
244 add_page_to_inode_queue(inode, page);
245 add_page_to_hash_queue(inode, page);
246 }
247
248
249
250
251
252
253 static unsigned long try_to_read_ahead(struct inode * inode, unsigned long offset, unsigned long page_cache)
254 {
255 struct page * page;
256
257 offset &= PAGE_MASK;
258 if (!page_cache) {
259 page_cache = __get_free_page(GFP_KERNEL);
260 if (!page_cache)
261 return 0;
262 }
263 if (offset >= inode->i_size)
264 return page_cache;
265 #if 1
266 page = find_page(inode, offset);
267 if (page) {
268 page->count--;
269 return page_cache;
270 }
271
272
273
274 page = mem_map + MAP_NR(page_cache);
275 add_to_page_cache(page, inode, offset);
276 inode->i_op->readpage(inode, page);
277 free_page(page_cache);
278 return 0;
279 #else
280 return page_cache;
281 #endif
282 }
283
284
285
286
287 void __wait_on_page(struct page *page)
288 {
289 struct wait_queue wait = { current, NULL };
290
291 page->count++;
292 add_wait_queue(&page->wait, &wait);
293 repeat:
294 run_task_queue(&tq_disk);
295 current->state = TASK_UNINTERRUPTIBLE;
296 if (PageLocked(page)) {
297 schedule();
298 goto repeat;
299 }
300 remove_wait_queue(&page->wait, &wait);
301 page->count--;
302 current->state = TASK_RUNNING;
303 }
304
305
306
307
308
309
310
311
312
313
314 #define MAX_READAHEAD (PAGE_SIZE*8)
315 #define MIN_READAHEAD (PAGE_SIZE)
316
317 static inline unsigned long generic_file_readahead(struct file * filp, struct inode * inode,
318 int try_async, unsigned long pos, struct page * page,
319 unsigned long page_cache)
320 {
321 unsigned long max_ahead, ahead;
322 unsigned long rapos, ppos;
323
324 ppos = pos & PAGE_MASK;
325
326
327
328
329 if (PageLocked(page)) {
330 max_ahead = filp->f_ramax;
331 rapos = ppos;
332
333 }
334
335
336
337
338 else {
339
340
341
342 rapos = filp->f_rapos & PAGE_MASK;
343 if (rapos) rapos -= PAGE_SIZE;
344
345
346
347
348
349
350
351
352
353
354
355
356 if (try_async == 1 && pos <= filp->f_rapos &&
357 pos + filp->f_ralen >= filp->f_rapos) {
358 struct page *a_page;
359
360
361
362
363 max_ahead = filp->f_ramax + PAGE_SIZE;
364
365 if (rapos < inode->i_size) {
366 a_page = find_page(inode, rapos);
367 if (a_page) {
368 if (PageLocked(a_page))
369 max_ahead = 0;
370 a_page->count--;
371 }
372 }
373 else
374 max_ahead = 0;
375 try_async = 2;
376 }
377 else {
378 max_ahead = 0;
379 }
380 }
381
382
383
384
385
386
387 ahead = 0;
388 while (ahead < max_ahead) {
389 ahead += PAGE_SIZE;
390 page_cache = try_to_read_ahead(inode, rapos + ahead, page_cache);
391 }
392
393
394
395
396
397
398
399 if (ahead > 0) {
400 filp->f_ralen = ahead;
401 if (try_async == 2) {
402
403
404
405 run_task_queue(&tq_disk);
406 try_async = 1;
407 }
408 }
409
410
411
412
413 filp->f_rapos = rapos + ahead + PAGE_SIZE;
414
415
416
417 if (PageLocked(page)) {
418 __wait_on_page(page);
419 }
420 return page_cache;
421 }
422
423
424 int generic_file_read(struct inode * inode, struct file * filp, char * buf, int count)
425 {
426 int error, read;
427 unsigned long pos, page_cache;
428 int try_async;
429
430 if (count <= 0)
431 return 0;
432 error = 0;
433 read = 0;
434 page_cache = 0;
435
436 pos = filp->f_pos;
437
438
439
440
441
442
443
444
445
446
447
448 if (pos <= filp->f_rapos && pos + filp->f_ralen >= filp->f_rapos) {
449 filp->f_reada = 1;
450 }
451
452
453
454
455
456
457 else if (pos+count < MIN_READAHEAD || !filp->f_rapos ||
458 pos > filp->f_rapos) {
459 filp->f_reada = 0;
460 }
461
462
463
464
465
466
467
468
469 if (filp->f_reada) {
470 try_async = 1;
471 filp->f_ramax += filp->f_ramax;
472 }
473
474
475
476
477
478 else {
479 try_async = 0;
480 filp->f_rapos = 0;
481 filp->f_ralen = 0;
482 filp->f_ramax = 0;
483 }
484
485
486
487
488
489
490
491 if (filp->f_ramax < count)
492 filp->f_ramax = count & PAGE_MASK;
493
494 if (filp->f_ramax < MIN_READAHEAD)
495 filp->f_ramax = MIN_READAHEAD;
496 else if (filp->f_ramax > MAX_READAHEAD)
497 filp->f_ramax = MAX_READAHEAD;
498
499 for (;;) {
500 struct page *page;
501 unsigned long offset, addr, nr;
502
503 if (pos >= inode->i_size)
504 break;
505 offset = pos & ~PAGE_MASK;
506 nr = PAGE_SIZE - offset;
507
508
509
510 page = find_page(inode, pos & PAGE_MASK);
511 if (page)
512 goto found_page;
513
514
515
516
517
518 if (page_cache)
519 goto new_page;
520
521 error = -ENOMEM;
522 page_cache = __get_free_page(GFP_KERNEL);
523 if (!page_cache)
524 break;
525 error = 0;
526
527
528
529
530 if (pos >= inode->i_size)
531 break;
532 page = find_page(inode, pos & PAGE_MASK);
533 if (!page)
534 goto new_page;
535
536 found_page:
537 addr = page_address(page);
538 if (nr > count)
539 nr = count;
540
541 page_cache = generic_file_readahead(filp, inode, try_async, pos, page, page_cache);
542
543 if (!PageUptodate(page))
544 goto read_page;
545 if (nr > inode->i_size - pos)
546 nr = inode->i_size - pos;
547 memcpy_tofs(buf, (void *) (addr + offset), nr);
548 free_page(addr);
549 buf += nr;
550 pos += nr;
551 read += nr;
552 count -= nr;
553 if (count)
554 continue;
555 break;
556
557
558 new_page:
559
560
561
562 addr = page_cache;
563 page = mem_map + MAP_NR(page_cache);
564 page_cache = 0;
565 add_to_page_cache(page, inode, pos & PAGE_MASK);
566
567
568
569
570
571
572
573
574
575 read_page:
576 error = inode->i_op->readpage(inode, page);
577 if (!error)
578 goto found_page;
579 free_page(addr);
580 break;
581 }
582
583 filp->f_pos = pos;
584 filp->f_reada = 1;
585 if (page_cache)
586 free_page(page_cache);
587 if (!IS_RDONLY(inode)) {
588 inode->i_atime = CURRENT_TIME;
589 inode->i_dirt = 1;
590 }
591 if (!read)
592 read = error;
593 return read;
594 }
595
596
597
598
599
600 static inline unsigned long fill_page(struct inode * inode, unsigned long offset)
601 {
602 struct page * page;
603 unsigned long new_page;
604
605 page = find_page(inode, offset);
606 if (page)
607 goto found_page_dont_free;
608 new_page = __get_free_page(GFP_KERNEL);
609 page = find_page(inode, offset);
610 if (page)
611 goto found_page;
612 if (!new_page)
613 return 0;
614 page = mem_map + MAP_NR(new_page);
615 new_page = 0;
616 add_to_page_cache(page, inode, offset);
617 inode->i_op->readpage(inode, page);
618 if (PageLocked(page))
619 new_page = try_to_read_ahead(inode, offset + PAGE_SIZE, 0);
620 found_page:
621 if (new_page)
622 free_page(new_page);
623 found_page_dont_free:
624 wait_on_page(page);
625 return page_address(page);
626 }
627
628
629
630
631
632
633 static unsigned long filemap_nopage(struct vm_area_struct * area, unsigned long address, int no_share)
634 {
635 unsigned long offset;
636 struct inode * inode = area->vm_inode;
637 unsigned long page;
638
639 offset = (address & PAGE_MASK) - area->vm_start + area->vm_offset;
640 if (offset >= inode->i_size && (area->vm_flags & VM_SHARED) && area->vm_mm == current->mm)
641 return 0;
642
643 page = fill_page(inode, offset);
644 if (page && no_share) {
645 unsigned long new_page = __get_free_page(GFP_KERNEL);
646 if (new_page)
647 memcpy((void *) new_page, (void *) page, PAGE_SIZE);
648 free_page(page);
649 return new_page;
650 }
651 return page;
652 }
653
654
655
656
657
658 static inline int do_write_page(struct inode * inode, struct file * file,
659 const char * page, unsigned long offset)
660 {
661 int old_fs, retval;
662 unsigned long size;
663
664 size = offset + PAGE_SIZE;
665
666 if (S_ISREG(inode->i_mode)) {
667 if (size > inode->i_size)
668 size = inode->i_size;
669
670 if (size < offset)
671 return -EIO;
672 }
673 size -= offset;
674 old_fs = get_fs();
675 set_fs(KERNEL_DS);
676 retval = -EIO;
677 if (size == file->f_op->write(inode, file, (const char *) page, size))
678 retval = 0;
679 set_fs(old_fs);
680 return retval;
681 }
682
683 static int filemap_write_page(struct vm_area_struct * vma,
684 unsigned long offset,
685 unsigned long page)
686 {
687 int result;
688 struct file file;
689 struct inode * inode;
690 struct buffer_head * bh;
691
692 bh = mem_map[MAP_NR(page)].buffers;
693 if (bh) {
694
695 struct buffer_head * tmp = bh;
696 do {
697 mark_buffer_dirty(tmp, 0);
698 tmp = tmp->b_this_page;
699 } while (tmp != bh);
700 return 0;
701 }
702
703 inode = vma->vm_inode;
704 file.f_op = inode->i_op->default_file_ops;
705 if (!file.f_op->write)
706 return -EIO;
707 file.f_mode = 3;
708 file.f_flags = 0;
709 file.f_count = 1;
710 file.f_inode = inode;
711 file.f_pos = offset;
712 file.f_reada = 0;
713
714 down(&inode->i_sem);
715 result = do_write_page(inode, &file, (const char *) page, offset);
716 up(&inode->i_sem);
717 return result;
718 }
719
720
721
722
723
724
725
726
727
728
729
730
731 int filemap_swapout(struct vm_area_struct * vma,
732 unsigned long offset,
733 pte_t *page_table)
734 {
735 int error;
736 unsigned long page = pte_page(*page_table);
737 unsigned long entry = SWP_ENTRY(SHM_SWP_TYPE, MAP_NR(page));
738
739 flush_cache_page(vma, (offset + vma->vm_start - vma->vm_offset));
740 set_pte(page_table, __pte(entry));
741 flush_tlb_page(vma, (offset + vma->vm_start - vma->vm_offset));
742 error = filemap_write_page(vma, offset, page);
743 if (pte_val(*page_table) == entry)
744 pte_clear(page_table);
745 return error;
746 }
747
748
749
750
751
752
753
754 static pte_t filemap_swapin(struct vm_area_struct * vma,
755 unsigned long offset,
756 unsigned long entry)
757 {
758 unsigned long page = SWP_OFFSET(entry);
759
760 mem_map[page].count++;
761 page = (page << PAGE_SHIFT) + PAGE_OFFSET;
762 return mk_pte(page,vma->vm_page_prot);
763 }
764
765
766 static inline int filemap_sync_pte(pte_t * ptep, struct vm_area_struct *vma,
767 unsigned long address, unsigned int flags)
768 {
769 pte_t pte = *ptep;
770 unsigned long page;
771 int error;
772
773 if (!(flags & MS_INVALIDATE)) {
774 if (!pte_present(pte))
775 return 0;
776 if (!pte_dirty(pte))
777 return 0;
778 flush_cache_page(vma, address);
779 set_pte(ptep, pte_mkclean(pte));
780 flush_tlb_page(vma, address);
781 page = pte_page(pte);
782 mem_map[MAP_NR(page)].count++;
783 } else {
784 if (pte_none(pte))
785 return 0;
786 flush_cache_page(vma, address);
787 pte_clear(ptep);
788 flush_tlb_page(vma, address);
789 if (!pte_present(pte)) {
790 swap_free(pte_val(pte));
791 return 0;
792 }
793 page = pte_page(pte);
794 if (!pte_dirty(pte) || flags == MS_INVALIDATE) {
795 free_page(page);
796 return 0;
797 }
798 }
799 error = filemap_write_page(vma, address - vma->vm_start + vma->vm_offset, page);
800 free_page(page);
801 return error;
802 }
803
804 static inline int filemap_sync_pte_range(pmd_t * pmd,
805 unsigned long address, unsigned long size,
806 struct vm_area_struct *vma, unsigned long offset, unsigned int flags)
807 {
808 pte_t * pte;
809 unsigned long end;
810 int error;
811
812 if (pmd_none(*pmd))
813 return 0;
814 if (pmd_bad(*pmd)) {
815 printk("filemap_sync_pte_range: bad pmd (%08lx)\n", pmd_val(*pmd));
816 pmd_clear(pmd);
817 return 0;
818 }
819 pte = pte_offset(pmd, address);
820 offset += address & PMD_MASK;
821 address &= ~PMD_MASK;
822 end = address + size;
823 if (end > PMD_SIZE)
824 end = PMD_SIZE;
825 error = 0;
826 do {
827 error |= filemap_sync_pte(pte, vma, address + offset, flags);
828 address += PAGE_SIZE;
829 pte++;
830 } while (address < end);
831 return error;
832 }
833
834 static inline int filemap_sync_pmd_range(pgd_t * pgd,
835 unsigned long address, unsigned long size,
836 struct vm_area_struct *vma, unsigned int flags)
837 {
838 pmd_t * pmd;
839 unsigned long offset, end;
840 int error;
841
842 if (pgd_none(*pgd))
843 return 0;
844 if (pgd_bad(*pgd)) {
845 printk("filemap_sync_pmd_range: bad pgd (%08lx)\n", pgd_val(*pgd));
846 pgd_clear(pgd);
847 return 0;
848 }
849 pmd = pmd_offset(pgd, address);
850 offset = address & PMD_MASK;
851 address &= ~PMD_MASK;
852 end = address + size;
853 if (end > PGDIR_SIZE)
854 end = PGDIR_SIZE;
855 error = 0;
856 do {
857 error |= filemap_sync_pte_range(pmd, address, end - address, vma, offset, flags);
858 address = (address + PMD_SIZE) & PMD_MASK;
859 pmd++;
860 } while (address < end);
861 return error;
862 }
863
864 static int filemap_sync(struct vm_area_struct * vma, unsigned long address,
865 size_t size, unsigned int flags)
866 {
867 pgd_t * dir;
868 unsigned long end = address + size;
869 int error = 0;
870
871 dir = pgd_offset(current->mm, address);
872 flush_cache_range(vma->vm_mm, end - size, end);
873 while (address < end) {
874 error |= filemap_sync_pmd_range(dir, address, end - address, vma, flags);
875 address = (address + PGDIR_SIZE) & PGDIR_MASK;
876 dir++;
877 }
878 flush_tlb_range(vma->vm_mm, end - size, end);
879 return error;
880 }
881
882
883
884
885 static void filemap_unmap(struct vm_area_struct *vma, unsigned long start, size_t len)
886 {
887 filemap_sync(vma, start, len, MS_ASYNC);
888 }
889
890
891
892
893
894
895 static struct vm_operations_struct file_shared_mmap = {
896 NULL,
897 NULL,
898 filemap_unmap,
899 NULL,
900 filemap_sync,
901 NULL,
902 filemap_nopage,
903 NULL,
904 filemap_swapout,
905 filemap_swapin,
906 };
907
908
909
910
911
912
913
914 static struct vm_operations_struct file_private_mmap = {
915 NULL,
916 NULL,
917 NULL,
918 NULL,
919 NULL,
920 NULL,
921 filemap_nopage,
922 NULL,
923 NULL,
924 NULL,
925 };
926
927
928 int generic_file_mmap(struct inode * inode, struct file * file, struct vm_area_struct * vma)
929 {
930 struct vm_operations_struct * ops;
931
932 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) {
933 ops = &file_shared_mmap;
934
935
936 if (vma->vm_offset & (PAGE_SIZE - 1))
937 return -EINVAL;
938 } else {
939 ops = &file_private_mmap;
940 if (vma->vm_offset & (inode->i_sb->s_blocksize - 1))
941 return -EINVAL;
942 }
943 if (!inode->i_sb || !S_ISREG(inode->i_mode))
944 return -EACCES;
945 if (!inode->i_op || !inode->i_op->readpage)
946 return -ENOEXEC;
947 if (!IS_RDONLY(inode)) {
948 inode->i_atime = CURRENT_TIME;
949 inode->i_dirt = 1;
950 }
951 vma->vm_inode = inode;
952 inode->i_count++;
953 vma->vm_ops = ops;
954 return 0;
955 }
956
957
958
959
960
961
962 static int msync_interval(struct vm_area_struct * vma,
963 unsigned long start, unsigned long end, int flags)
964 {
965 if (!vma->vm_inode)
966 return 0;
967 if (vma->vm_ops->sync) {
968 int error;
969 error = vma->vm_ops->sync(vma, start, end-start, flags);
970 if (error)
971 return error;
972 if (flags & MS_SYNC)
973 return file_fsync(vma->vm_inode, NULL);
974 return 0;
975 }
976 return 0;
977 }
978
979 asmlinkage int sys_msync(unsigned long start, size_t len, int flags)
980 {
981 unsigned long end;
982 struct vm_area_struct * vma;
983 int unmapped_error, error;
984
985 if (start & ~PAGE_MASK)
986 return -EINVAL;
987 len = (len + ~PAGE_MASK) & PAGE_MASK;
988 end = start + len;
989 if (end < start)
990 return -EINVAL;
991 if (flags & ~(MS_ASYNC | MS_INVALIDATE | MS_SYNC))
992 return -EINVAL;
993 if (end == start)
994 return 0;
995
996
997
998
999 vma = find_vma(current, start);
1000 unmapped_error = 0;
1001 for (;;) {
1002
1003 if (!vma)
1004 return -EFAULT;
1005
1006 if (start < vma->vm_start) {
1007 unmapped_error = -EFAULT;
1008 start = vma->vm_start;
1009 }
1010
1011 if (end <= vma->vm_end) {
1012 if (start < end) {
1013 error = msync_interval(vma, start, end, flags);
1014 if (error)
1015 return error;
1016 }
1017 return unmapped_error;
1018 }
1019
1020 error = msync_interval(vma, start, vma->vm_end, flags);
1021 if (error)
1022 return error;
1023 start = vma->vm_end;
1024 vma = vma->vm_next;
1025 }
1026 }